summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/audit.cpp133
-rw-r--r--src/mongo/db/audit.h623
-rw-r--r--src/mongo/db/auth/action_set.cpp175
-rw-r--r--src/mongo/db/auth/action_set.h96
-rw-r--r--src/mongo/db/auth/action_set_test.cpp238
-rw-r--r--src/mongo/db/auth/auth_decorations.cpp170
-rw-r--r--src/mongo/db/auth/auth_index_d.cpp124
-rw-r--r--src/mongo/db/auth/auth_index_d.h26
-rw-r--r--src/mongo/db/auth/authentication_session.h65
-rw-r--r--src/mongo/db/auth/authorization_manager.cpp1123
-rw-r--r--src/mongo/db/auth/authorization_manager.h720
-rw-r--r--src/mongo/db/auth/authorization_manager_global.cpp75
-rw-r--r--src/mongo/db/auth/authorization_manager_global.h14
-rw-r--r--src/mongo/db/auth/authorization_manager_mock_init.cpp22
-rw-r--r--src/mongo/db/auth/authorization_manager_test.cpp337
-rw-r--r--src/mongo/db/auth/authorization_session.cpp899
-rw-r--r--src/mongo/db/auth/authorization_session.h441
-rw-r--r--src/mongo/db/auth/authorization_session_test.cpp849
-rw-r--r--src/mongo/db/auth/authz_manager_external_state.cpp6
-rw-r--r--src/mongo/db/auth/authz_manager_external_state.h194
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.cpp72
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.h42
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp695
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.h194
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp457
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.h151
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_s.cpp260
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_s.h49
-rw-r--r--src/mongo/db/auth/authz_session_external_state.cpp12
-rw-r--r--src/mongo/db/auth/authz_session_external_state.h97
-rw-r--r--src/mongo/db/auth/authz_session_external_state_d.cpp39
-rw-r--r--src/mongo/db/auth/authz_session_external_state_d.h26
-rw-r--r--src/mongo/db/auth/authz_session_external_state_mock.h64
-rw-r--r--src/mongo/db/auth/authz_session_external_state_s.cpp15
-rw-r--r--src/mongo/db/auth/authz_session_external_state_s.h22
-rw-r--r--src/mongo/db/auth/authz_session_external_state_server_common.cpp75
-rw-r--r--src/mongo/db/auth/authz_session_external_state_server_common.h42
-rw-r--r--src/mongo/db/auth/impersonation_session.cpp43
-rw-r--r--src/mongo/db/auth/impersonation_session.h30
-rw-r--r--src/mongo/db/auth/internal_user_auth.cpp116
-rw-r--r--src/mongo/db/auth/internal_user_auth.h64
-rw-r--r--src/mongo/db/auth/mongo_authentication_session.cpp8
-rw-r--r--src/mongo/db/auth/mongo_authentication_session.h41
-rw-r--r--src/mongo/db/auth/native_sasl_authentication_session.cpp186
-rw-r--r--src/mongo/db/auth/native_sasl_authentication_session.h42
-rw-r--r--src/mongo/db/auth/privilege.cpp69
-rw-r--r--src/mongo/db/auth/privilege.h67
-rw-r--r--src/mongo/db/auth/privilege_parser.cpp705
-rw-r--r--src/mongo/db/auth/privilege_parser.h304
-rw-r--r--src/mongo/db/auth/privilege_parser_test.cpp341
-rw-r--r--src/mongo/db/auth/resource_pattern.cpp12
-rw-r--r--src/mongo/db/auth/resource_pattern.h331
-rw-r--r--src/mongo/db/auth/role_graph.cpp930
-rw-r--r--src/mongo/db/auth/role_graph.h528
-rw-r--r--src/mongo/db/auth/role_graph_builtin_roles.cpp1390
-rw-r--r--src/mongo/db/auth/role_graph_test.cpp1343
-rw-r--r--src/mongo/db/auth/role_graph_update.cpp458
-rw-r--r--src/mongo/db/auth/role_name.cpp27
-rw-r--r--src/mongo/db/auth/role_name.h248
-rw-r--r--src/mongo/db/auth/sasl_authentication_session.cpp76
-rw-r--r--src/mongo/db/auth/sasl_authentication_session.h252
-rw-r--r--src/mongo/db/auth/sasl_commands.cpp559
-rw-r--r--src/mongo/db/auth/sasl_options.cpp294
-rw-r--r--src/mongo/db/auth/sasl_options.h33
-rw-r--r--src/mongo/db/auth/sasl_plain_server_conversation.cpp113
-rw-r--r--src/mongo/db/auth/sasl_plain_server_conversation.h31
-rw-r--r--src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp505
-rw-r--r--src/mongo/db/auth/sasl_scramsha1_server_conversation.h67
-rw-r--r--src/mongo/db/auth/sasl_server_conversation.cpp12
-rw-r--r--src/mongo/db/auth/sasl_server_conversation.h87
-rw-r--r--src/mongo/db/auth/sasl_test_crutch.cpp2
-rw-r--r--src/mongo/db/auth/security_key.cpp156
-rw-r--r--src/mongo/db/auth/security_key.h18
-rw-r--r--src/mongo/db/auth/user.cpp228
-rw-r--r--src/mongo/db/auth/user.h369
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp215
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.h44
-rw-r--r--src/mongo/db/auth/user_document_parser.cpp787
-rw-r--r--src/mongo/db/auth/user_document_parser.h58
-rw-r--r--src/mongo/db/auth/user_document_parser_test.cpp776
-rw-r--r--src/mongo/db/auth/user_management_commands_parser.cpp1032
-rw-r--r--src/mongo/db/auth/user_management_commands_parser.h369
-rw-r--r--src/mongo/db/auth/user_name.cpp27
-rw-r--r--src/mongo/db/auth/user_name.h219
-rw-r--r--src/mongo/db/auth/user_name_hash.h11
-rw-r--r--src/mongo/db/auth/user_set.cpp155
-rw-r--r--src/mongo/db/auth/user_set.h161
-rw-r--r--src/mongo/db/auth/user_set_test.cpp144
-rw-r--r--src/mongo/db/background.cpp227
-rw-r--r--src/mongo/db/background.h50
-rw-r--r--src/mongo/db/catalog/apply_ops.cpp279
-rw-r--r--src/mongo/db/catalog/apply_ops.h24
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp340
-rw-r--r--src/mongo/db/catalog/capped_utils.h45
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp250
-rw-r--r--src/mongo/db/catalog/coll_mod.h25
-rw-r--r--src/mongo/db/catalog/collection.cpp1211
-rw-r--r--src/mongo/db/catalog/collection.h681
-rw-r--r--src/mongo/db/catalog/collection_catalog_entry.h107
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp240
-rw-r--r--src/mongo/db/catalog/collection_info_cache.cpp204
-rw-r--r--src/mongo/db/catalog/collection_info_cache.h108
-rw-r--r--src/mongo/db/catalog/collection_options.cpp280
-rw-r--r--src/mongo/db/catalog/collection_options.h127
-rw-r--r--src/mongo/db/catalog/collection_options_test.cpp304
-rw-r--r--src/mongo/db/catalog/create_collection.cpp83
-rw-r--r--src/mongo/db/catalog/create_collection.h16
-rw-r--r--src/mongo/db/catalog/cursor_manager.cpp785
-rw-r--r--src/mongo/db/catalog/cursor_manager.h222
-rw-r--r--src/mongo/db/catalog/database.cpp868
-rw-r--r--src/mongo/db/catalog/database.h206
-rw-r--r--src/mongo/db/catalog/database_catalog_entry.h103
-rw-r--r--src/mongo/db/catalog/database_holder.cpp228
-rw-r--r--src/mongo/db/catalog/database_holder.h94
-rw-r--r--src/mongo/db/catalog/document_validation.cpp4
-rw-r--r--src/mongo/db/catalog/document_validation.h62
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp75
-rw-r--r--src/mongo/db/catalog/drop_collection.h22
-rw-r--r--src/mongo/db/catalog/drop_database.cpp60
-rw-r--r--src/mongo/db/catalog/drop_database.h12
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp183
-rw-r--r--src/mongo/db/catalog/drop_indexes.h26
-rw-r--r--src/mongo/db/catalog/head_manager.h22
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp1905
-rw-r--r--src/mongo/db/catalog/index_catalog.h481
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.cpp384
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h197
-rw-r--r--src/mongo/db/catalog/index_create.cpp540
-rw-r--r--src/mongo/db/catalog/index_create.h348
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp112
-rw-r--r--src/mongo/db/catalog/index_key_validate.h12
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp326
-rw-r--r--src/mongo/db/catalog/rename_collection.h26
-rw-r--r--src/mongo/db/client.cpp162
-rw-r--r--src/mongo/db/client.h221
-rw-r--r--src/mongo/db/client_basic.cpp6
-rw-r--r--src/mongo/db/client_basic.h81
-rw-r--r--src/mongo/db/clientcursor.cpp563
-rw-r--r--src/mongo/db/clientcursor.h624
-rw-r--r--src/mongo/db/clientlistplugin.cpp281
-rw-r--r--src/mongo/db/cloner.cpp1023
-rw-r--r--src/mongo/db/cloner.h173
-rw-r--r--src/mongo/db/commands.cpp901
-rw-r--r--src/mongo/db/commands.h708
-rw-r--r--src/mongo/db/commands/apply_ops.cpp177
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp604
-rw-r--r--src/mongo/db/commands/authentication_commands.h89
-rw-r--r--src/mongo/db/commands/cleanup_orphaned_cmd.cpp443
-rw-r--r--src/mongo/db/commands/clone.cpp151
-rw-r--r--src/mongo/db/commands/clone_collection.cpp171
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp216
-rw-r--r--src/mongo/db/commands/compact.cpp204
-rw-r--r--src/mongo/db/commands/connection_status.cpp169
-rw-r--r--src/mongo/db/commands/copydb.cpp326
-rw-r--r--src/mongo/db/commands/copydb.h14
-rw-r--r--src/mongo/db/commands/copydb_common.cpp92
-rw-r--r--src/mongo/db/commands/copydb_start_commands.cpp292
-rw-r--r--src/mongo/db/commands/copydb_start_commands.h10
-rw-r--r--src/mongo/db/commands/count_cmd.cpp229
-rw-r--r--src/mongo/db/commands/cpuprofile.cpp205
-rw-r--r--src/mongo/db/commands/create_indexes.cpp414
-rw-r--r--src/mongo/db/commands/current_op.cpp177
-rw-r--r--src/mongo/db/commands/dbhash.cpp331
-rw-r--r--src/mongo/db/commands/dbhash.h78
-rw-r--r--src/mongo/db/commands/distinct.cpp235
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp246
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp142
-rw-r--r--src/mongo/db/commands/explain_cmd.h96
-rw-r--r--src/mongo/db/commands/fail_point_cmd.cpp228
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp671
-rw-r--r--src/mongo/db/commands/find_and_modify.h16
-rw-r--r--src/mongo/db/commands/find_and_modify_common.cpp59
-rw-r--r--src/mongo/db/commands/find_cmd.cpp541
-rw-r--r--src/mongo/db/commands/fsync.cpp395
-rw-r--r--src/mongo/db/commands/fsync.h6
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp449
-rw-r--r--src/mongo/db/commands/get_last_error.cpp467
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp590
-rw-r--r--src/mongo/db/commands/group.cpp280
-rw-r--r--src/mongo/db/commands/group.h118
-rw-r--r--src/mongo/db/commands/hashcmd.cpp106
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp602
-rw-r--r--src/mongo/db/commands/index_filter_commands.h266
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp562
-rw-r--r--src/mongo/db/commands/isself.cpp62
-rw-r--r--src/mongo/db/commands/kill_op.cpp85
-rw-r--r--src/mongo/db/commands/list_collections.cpp284
-rw-r--r--src/mongo/db/commands/list_databases.cpp156
-rw-r--r--src/mongo/db/commands/list_indexes.cpp272
-rw-r--r--src/mongo/db/commands/merge_chunks_cmd.cpp244
-rw-r--r--src/mongo/db/commands/mr.cpp2894
-rw-r--r--src/mongo/db/commands/mr.h689
-rw-r--r--src/mongo/db/commands/mr_common.cpp185
-rw-r--r--src/mongo/db/commands/mr_test.cpp266
-rw-r--r--src/mongo/db/commands/oplog_note.cpp92
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp211
-rw-r--r--src/mongo/db/commands/parameters.cpp1007
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp509
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp656
-rw-r--r--src/mongo/db/commands/plan_cache_commands.h271
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp689
-rw-r--r--src/mongo/db/commands/rename_collection.cpp159
-rw-r--r--src/mongo/db/commands/rename_collection.h14
-rw-r--r--src/mongo/db/commands/rename_collection_common.cpp100
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp144
-rw-r--r--src/mongo/db/commands/server_status.cpp462
-rw-r--r--src/mongo/db/commands/server_status.h99
-rw-r--r--src/mongo/db/commands/server_status_internal.cpp78
-rw-r--r--src/mongo/db/commands/server_status_internal.h23
-rw-r--r--src/mongo/db/commands/server_status_metric.cpp31
-rw-r--r--src/mongo/db/commands/server_status_metric.h93
-rw-r--r--src/mongo/db/commands/shutdown.h41
-rw-r--r--src/mongo/db/commands/test_commands.cpp366
-rw-r--r--src/mongo/db/commands/top_command.cpp90
-rw-r--r--src/mongo/db/commands/touch.cpp105
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp4666
-rw-r--r--src/mongo/db/commands/user_management_commands.h153
-rw-r--r--src/mongo/db/commands/user_management_commands_common.cpp851
-rw-r--r--src/mongo/db/commands/validate.cpp142
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp2197
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.h266
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp447
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.h170
-rw-r--r--src/mongo/db/commands/write_commands/write_commands_common.cpp87
-rw-r--r--src/mongo/db/commands/write_commands/write_commands_common.h9
-rw-r--r--src/mongo/db/commands/writeback_compatibility_shim.cpp160
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp254
-rw-r--r--src/mongo/db/concurrency/d_concurrency.h405
-rw-r--r--src/mongo/db/concurrency/d_concurrency_test.cpp370
-rw-r--r--src/mongo/db/concurrency/deadlock_detection_test.cpp230
-rw-r--r--src/mongo/db/concurrency/fast_map_noalloc.h357
-rw-r--r--src/mongo/db/concurrency/fast_map_noalloc_test.cpp182
-rw-r--r--src/mongo/db/concurrency/lock_manager.cpp1697
-rw-r--r--src/mongo/db/concurrency/lock_manager.h418
-rw-r--r--src/mongo/db/concurrency/lock_manager_defs.h626
-rw-r--r--src/mongo/db/concurrency/lock_manager_test.cpp1289
-rw-r--r--src/mongo/db/concurrency/lock_manager_test_help.h60
-rw-r--r--src/mongo/db/concurrency/lock_request_list.h127
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp1293
-rw-r--r--src/mongo/db/concurrency/lock_state.h475
-rw-r--r--src/mongo/db/concurrency/lock_state_test.cpp412
-rw-r--r--src/mongo/db/concurrency/lock_stats.cpp182
-rw-r--r--src/mongo/db/concurrency/lock_stats.h257
-rw-r--r--src/mongo/db/concurrency/lock_stats_test.cpp96
-rw-r--r--src/mongo/db/concurrency/locker.h511
-rw-r--r--src/mongo/db/concurrency/locker_noop.h212
-rw-r--r--src/mongo/db/concurrency/write_conflict_exception.cpp65
-rw-r--r--src/mongo/db/concurrency/write_conflict_exception.h76
-rw-r--r--src/mongo/db/conn_pool_options.cpp58
-rw-r--r--src/mongo/db/conn_pool_options.h32
-rw-r--r--src/mongo/db/curop.cpp954
-rw-r--r--src/mongo/db/curop.h789
-rw-r--r--src/mongo/db/curop_metrics.cpp92
-rw-r--r--src/mongo/db/curop_metrics.h4
-rw-r--r--src/mongo/db/curop_test.cpp100
-rw-r--r--src/mongo/db/db.cpp884
-rw-r--r--src/mongo/db/db.h8
-rw-r--r--src/mongo/db/db_raii.cpp272
-rw-r--r--src/mongo/db/db_raii.h301
-rw-r--r--src/mongo/db/dbcommands.cpp2023
-rw-r--r--src/mongo/db/dbcommands_generic.cpp698
-rw-r--r--src/mongo/db/dbdirectclient.cpp249
-rw-r--r--src/mongo/db/dbdirectclient.h106
-rw-r--r--src/mongo/db/dbeval.cpp206
-rw-r--r--src/mongo/db/dbhelpers.cpp927
-rw-r--r--src/mongo/db/dbhelpers.h352
-rw-r--r--src/mongo/db/dbmessage.cpp271
-rw-r--r--src/mongo/db/dbmessage.h432
-rw-r--r--src/mongo/db/dbmessage_test.cpp155
-rw-r--r--src/mongo/db/dbwebserver.cpp826
-rw-r--r--src/mongo/db/dbwebserver.h167
-rw-r--r--src/mongo/db/driverHelpers.cpp66
-rw-r--r--src/mongo/db/exec/and_common-inl.h105
-rw-r--r--src/mongo/db/exec/and_hash.cpp781
-rw-r--r--src/mongo/db/exec/and_hash.h150
-rw-r--r--src/mongo/db/exec/and_sorted.cpp453
-rw-r--r--src/mongo/db/exec/and_sorted.h110
-rw-r--r--src/mongo/db/exec/cached_plan.cpp551
-rw-r--r--src/mongo/db/exec/cached_plan.h218
-rw-r--r--src/mongo/db/exec/collection_scan.cpp366
-rw-r--r--src/mongo/db/exec/collection_scan.h102
-rw-r--r--src/mongo/db/exec/collection_scan_common.h43
-rw-r--r--src/mongo/db/exec/count.cpp301
-rw-r--r--src/mongo/db/exec/count.h102
-rw-r--r--src/mongo/db/exec/count_scan.cpp249
-rw-r--r--src/mongo/db/exec/count_scan.h104
-rw-r--r--src/mongo/db/exec/delete.cpp461
-rw-r--r--src/mongo/db/exec/delete.h161
-rw-r--r--src/mongo/db/exec/distinct_scan.cpp191
-rw-r--r--src/mongo/db/exec/distinct_scan.h124
-rw-r--r--src/mongo/db/exec/eof.cpp74
-rw-r--r--src/mongo/db/exec/eof.h44
-rw-r--r--src/mongo/db/exec/fetch.cpp370
-rw-r--r--src/mongo/db/exec/fetch.h106
-rw-r--r--src/mongo/db/exec/filter.h219
-rw-r--r--src/mongo/db/exec/geo_near.cpp2034
-rw-r--r--src/mongo/db/exec/geo_near.h193
-rw-r--r--src/mongo/db/exec/group.cpp459
-rw-r--r--src/mongo/db/exec/group.h197
-rw-r--r--src/mongo/db/exec/idhack.cpp372
-rw-r--r--src/mongo/db/exec/idhack.h115
-rw-r--r--src/mongo/db/exec/index_scan.cpp418
-rw-r--r--src/mongo/db/exec/index_scan.h204
-rw-r--r--src/mongo/db/exec/keep_mutations.cpp210
-rw-r--r--src/mongo/db/exec/keep_mutations.h84
-rw-r--r--src/mongo/db/exec/limit.cpp170
-rw-r--r--src/mongo/db/exec/limit.h64
-rw-r--r--src/mongo/db/exec/merge_sort.cpp412
-rw-r--r--src/mongo/db/exec/merge_sort.h211
-rw-r--r--src/mongo/db/exec/multi_iterator.cpp188
-rw-r--r--src/mongo/db/exec/multi_iterator.h86
-rw-r--r--src/mongo/db/exec/multi_plan.cpp736
-rw-r--r--src/mongo/db/exec/multi_plan.h368
-rw-r--r--src/mongo/db/exec/near.cpp556
-rw-r--r--src/mongo/db/exec/near.h362
-rw-r--r--src/mongo/db/exec/oplogstart.cpp299
-rw-r--r--src/mongo/db/exec/oplogstart.h160
-rw-r--r--src/mongo/db/exec/or.cpp247
-rw-r--r--src/mongo/db/exec/or.h82
-rw-r--r--src/mongo/db/exec/pipeline_proxy.cpp175
-rw-r--r--src/mongo/db/exec/pipeline_proxy.h102
-rw-r--r--src/mongo/db/exec/plan_stage.h446
-rw-r--r--src/mongo/db/exec/plan_stats.cpp24
-rw-r--r--src/mongo/db/exec/plan_stats.h948
-rw-r--r--src/mongo/db/exec/plan_stats_test.cpp122
-rw-r--r--src/mongo/db/exec/projection.cpp449
-rw-r--r--src/mongo/db/exec/projection.h173
-rw-r--r--src/mongo/db/exec/projection_exec.cpp788
-rw-r--r--src/mongo/db/exec/projection_exec.h316
-rw-r--r--src/mongo/db/exec/projection_exec_test.cpp329
-rw-r--r--src/mongo/db/exec/queued_data_stage.cpp122
-rw-r--r--src/mongo/db/exec/queued_data_stage.h116
-rw-r--r--src/mongo/db/exec/queued_data_stage_test.cpp114
-rw-r--r--src/mongo/db/exec/scoped_timer.cpp14
-rw-r--r--src/mongo/db/exec/scoped_timer.h43
-rw-r--r--src/mongo/db/exec/shard_filter.cpp212
-rw-r--r--src/mongo/db/exec/shard_filter.h144
-rw-r--r--src/mongo/db/exec/skip.cpp167
-rw-r--r--src/mongo/db/exec/skip.h62
-rw-r--r--src/mongo/db/exec/sort.cpp930
-rw-r--r--src/mongo/db/exec/sort.h373
-rw-r--r--src/mongo/db/exec/sort_test.cpp364
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp723
-rw-r--r--src/mongo/db/exec/subplan.cpp810
-rw-r--r--src/mongo/db/exec/subplan.h285
-rw-r--r--src/mongo/db/exec/text.cpp689
-rw-r--r--src/mongo/db/exec/text.h230
-rw-r--r--src/mongo/db/exec/update.cpp1819
-rw-r--r--src/mongo/db/exec/update.h333
-rw-r--r--src/mongo/db/exec/working_set.cpp393
-rw-r--r--src/mongo/db/exec/working_set.h477
-rw-r--r--src/mongo/db/exec/working_set_common.cpp295
-rw-r--r--src/mongo/db/exec/working_set_common.h198
-rw-r--r--src/mongo/db/exec/working_set_computed_data.h130
-rw-r--r--src/mongo/db/exec/working_set_test.cpp354
-rw-r--r--src/mongo/db/field_parser-inl.h534
-rw-r--r--src/mongo/db/field_parser.cpp716
-rw-r--r--src/mongo/db/field_parser.h603
-rw-r--r--src/mongo/db/field_parser_test.cpp747
-rw-r--r--src/mongo/db/field_ref.cpp394
-rw-r--r--src/mongo/db/field_ref.h307
-rw-r--r--src/mongo/db/field_ref_set.cpp206
-rw-r--r--src/mongo/db/field_ref_set.h180
-rw-r--r--src/mongo/db/field_ref_set_test.cpp218
-rw-r--r--src/mongo/db/field_ref_test.cpp552
-rw-r--r--src/mongo/db/fts/fts_basic_tokenizer.cpp84
-rw-r--r--src/mongo/db/fts/fts_basic_tokenizer.h67
-rw-r--r--src/mongo/db/fts/fts_basic_tokenizer_test.cpp78
-rw-r--r--src/mongo/db/fts/fts_element_iterator.cpp269
-rw-r--r--src/mongo/db/fts/fts_element_iterator.h246
-rw-r--r--src/mongo/db/fts/fts_element_iterator_test.cpp536
-rw-r--r--src/mongo/db/fts/fts_enabled.cpp68
-rw-r--r--src/mongo/db/fts/fts_index_format.cpp318
-rw-r--r--src/mongo/db/fts/fts_index_format.h62
-rw-r--r--src/mongo/db/fts/fts_index_format_test.cpp323
-rw-r--r--src/mongo/db/fts/fts_language.cpp395
-rw-r--r--src/mongo/db/fts/fts_language.h203
-rw-r--r--src/mongo/db/fts/fts_language_test.cpp197
-rw-r--r--src/mongo/db/fts/fts_matcher.cpp212
-rw-r--r--src/mongo/db/fts/fts_matcher.h138
-rw-r--r--src/mongo/db/fts/fts_matcher_test.cpp383
-rw-r--r--src/mongo/db/fts/fts_query.cpp345
-rw-r--r--src/mongo/db/fts/fts_query.h109
-rw-r--r--src/mongo/db/fts/fts_query_parser.cpp112
-rw-r--r--src/mongo/db/fts/fts_query_parser.h89
-rw-r--r--src/mongo/db/fts/fts_query_test.cpp456
-rw-r--r--src/mongo/db/fts/fts_spec.cpp753
-rw-r--r--src/mongo/db/fts/fts_spec.h264
-rw-r--r--src/mongo/db/fts/fts_spec_legacy.cpp470
-rw-r--r--src/mongo/db/fts/fts_spec_test.cpp1087
-rw-r--r--src/mongo/db/fts/fts_tokenizer.h84
-rw-r--r--src/mongo/db/fts/fts_util.cpp10
-rw-r--r--src/mongo/db/fts/fts_util.h18
-rw-r--r--src/mongo/db/fts/stemmer.cpp51
-rw-r--r--src/mongo/db/fts/stemmer.h39
-rw-r--r--src/mongo/db/fts/stemmer_test.cpp25
-rw-r--r--src/mongo/db/fts/stop_words.cpp55
-rw-r--r--src/mongo/db/fts/stop_words.h34
-rw-r--r--src/mongo/db/fts/stop_words_test.cpp15
-rw-r--r--src/mongo/db/fts/tokenizer.cpp194
-rw-r--r--src/mongo/db/fts/tokenizer.h54
-rw-r--r--src/mongo/db/fts/tokenizer_test.cpp129
-rw-r--r--src/mongo/db/geo/big_polygon.cpp298
-rw-r--r--src/mongo/db/geo/big_polygon.h102
-rw-r--r--src/mongo/db/geo/big_polygon_test.cpp1010
-rw-r--r--src/mongo/db/geo/geoconstants.h6
-rw-r--r--src/mongo/db/geo/geometry_container.cpp1901
-rw-r--r--src/mongo/db/geo/geometry_container.h245
-rw-r--r--src/mongo/db/geo/geoparser.cpp1228
-rw-r--r--src/mongo/db/geo/geoparser.h111
-rw-r--r--src/mongo/db/geo/geoparser_test.cpp690
-rw-r--r--src/mongo/db/geo/hash.cpp1491
-rw-r--r--src/mongo/db/geo/hash.h453
-rw-r--r--src/mongo/db/geo/hash_test.cpp766
-rw-r--r--src/mongo/db/geo/haystack.cpp137
-rw-r--r--src/mongo/db/geo/r2_region_coverer.cpp446
-rw-r--r--src/mongo/db/geo/r2_region_coverer.h217
-rw-r--r--src/mongo/db/geo/r2_region_coverer_test.cpp1117
-rw-r--r--src/mongo/db/geo/s2.h1
-rw-r--r--src/mongo/db/geo/shapes.cpp1290
-rw-r--r--src/mongo/db/geo/shapes.h583
-rw-r--r--src/mongo/db/global_timestamp.cpp68
-rw-r--r--src/mongo/db/global_timestamp.h18
-rw-r--r--src/mongo/db/hasher.cpp120
-rw-r--r--src/mongo/db/hasher.h155
-rw-r--r--src/mongo/db/hasher_test.cpp665
-rw-r--r--src/mongo/db/index/2d_access_method.cpp34
-rw-r--r--src/mongo/db/index/2d_access_method.h33
-rw-r--r--src/mongo/db/index/2d_common.h10
-rw-r--r--src/mongo/db/index/btree_access_method.cpp49
-rw-r--r--src/mongo/db/index/btree_access_method.h32
-rw-r--r--src/mongo/db/index/btree_key_generator.cpp595
-rw-r--r--src/mongo/db/index/btree_key_generator.h364
-rw-r--r--src/mongo/db/index/btree_key_generator_test.cpp1543
-rw-r--r--src/mongo/db/index/expression_keys_private.cpp774
-rw-r--r--src/mongo/db/index/expression_keys_private.h163
-rw-r--r--src/mongo/db/index/expression_params.h269
-rw-r--r--src/mongo/db/index/external_key_generator.cpp235
-rw-r--r--src/mongo/db/index/external_key_generator.h4
-rw-r--r--src/mongo/db/index/fts_access_method.cpp10
-rw-r--r--src/mongo/db/index/fts_access_method.h22
-rw-r--r--src/mongo/db/index/hash_access_method.cpp41
-rw-r--r--src/mongo/db/index/hash_access_method.h32
-rw-r--r--src/mongo/db/index/haystack_access_method.cpp151
-rw-r--r--src/mongo/db/index/haystack_access_method.h68
-rw-r--r--src/mongo/db/index/haystack_access_method_internal.h100
-rw-r--r--src/mongo/db/index/index_access_method.cpp641
-rw-r--r--src/mongo/db/index/index_access_method.h410
-rw-r--r--src/mongo/db/index/index_descriptor.cpp89
-rw-r--r--src/mongo/db/index/index_descriptor.h372
-rw-r--r--src/mongo/db/index/s2_access_method.cpp111
-rw-r--r--src/mongo/db/index/s2_access_method.h34
-rw-r--r--src/mongo/db/index/s2_common.h84
-rw-r--r--src/mongo/db/index_builder.cpp265
-rw-r--r--src/mongo/db/index_builder.h93
-rw-r--r--src/mongo/db/index_legacy.cpp74
-rw-r--r--src/mongo/db/index_legacy.h67
-rw-r--r--src/mongo/db/index_names.cpp96
-rw-r--r--src/mongo/db/index_names.h100
-rw-r--r--src/mongo/db/index_rebuilder.cpp193
-rw-r--r--src/mongo/db/index_rebuilder.h12
-rw-r--r--src/mongo/db/initialize_server_global_state.cpp501
-rw-r--r--src/mongo/db/initialize_server_global_state.h36
-rw-r--r--src/mongo/db/instance.cpp2049
-rw-r--r--src/mongo/db/instance.h62
-rw-r--r--src/mongo/db/introspect.cpp215
-rw-r--r--src/mongo/db/introspect.h22
-rw-r--r--src/mongo/db/invalidation_type.h14
-rw-r--r--src/mongo/db/jsobj.h1
-rw-r--r--src/mongo/db/keypattern.cpp105
-rw-r--r--src/mongo/db/keypattern.h141
-rw-r--r--src/mongo/db/keypattern_test.cpp191
-rw-r--r--src/mongo/db/lasterror.cpp128
-rw-r--r--src/mongo/db/lasterror.h156
-rw-r--r--src/mongo/db/log_process_details.cpp39
-rw-r--r--src/mongo/db/log_process_details.h18
-rw-r--r--src/mongo/db/matcher/expression.cpp62
-rw-r--r--src/mongo/db/matcher/expression.h417
-rw-r--r--src/mongo/db/matcher/expression_algo.cpp291
-rw-r--r--src/mongo/db/matcher/expression_algo.h50
-rw-r--r--src/mongo/db/matcher/expression_algo_test.cpp1173
-rw-r--r--src/mongo/db/matcher/expression_array.cpp322
-rw-r--r--src/mongo/db/matcher/expression_array.h179
-rw-r--r--src/mongo/db/matcher/expression_array_test.cpp864
-rw-r--r--src/mongo/db/matcher/expression_geo.cpp666
-rw-r--r--src/mongo/db/matcher/expression_geo.h255
-rw-r--r--src/mongo/db/matcher/expression_geo_test.cpp235
-rw-r--r--src/mongo/db/matcher/expression_leaf.cpp822
-rw-r--r--src/mongo/db/matcher/expression_leaf.h593
-rw-r--r--src/mongo/db/matcher/expression_leaf_test.cpp3271
-rw-r--r--src/mongo/db/matcher/expression_parser.cpp1149
-rw-r--r--src/mongo/db/matcher/expression_parser.h286
-rw-r--r--src/mongo/db/matcher/expression_parser_array_test.cpp1292
-rw-r--r--src/mongo/db/matcher/expression_parser_geo.cpp81
-rw-r--r--src/mongo/db/matcher/expression_parser_geo_test.cpp61
-rw-r--r--src/mongo/db/matcher/expression_parser_leaf_test.cpp1141
-rw-r--r--src/mongo/db/matcher/expression_parser_test.cpp148
-rw-r--r--src/mongo/db/matcher/expression_parser_text.cpp92
-rw-r--r--src/mongo/db/matcher/expression_parser_text_test.cpp82
-rw-r--r--src/mongo/db/matcher/expression_parser_tree.cpp123
-rw-r--r--src/mongo/db/matcher/expression_parser_tree_test.cpp266
-rw-r--r--src/mongo/db/matcher/expression_test.cpp111
-rw-r--r--src/mongo/db/matcher/expression_text.cpp111
-rw-r--r--src/mongo/db/matcher/expression_text.h45
-rw-r--r--src/mongo/db/matcher/expression_tree.cpp231
-rw-r--r--src/mongo/db/matcher/expression_tree.h271
-rw-r--r--src/mongo/db/matcher/expression_tree_test.cpp1062
-rw-r--r--src/mongo/db/matcher/expression_where.cpp249
-rw-r--r--src/mongo/db/matcher/expression_where_noop.cpp150
-rw-r--r--src/mongo/db/matcher/match_details.cpp54
-rw-r--r--src/mongo/db/matcher/match_details.h54
-rw-r--r--src/mongo/db/matcher/matchable.cpp11
-rw-r--r--src/mongo/db/matcher/matchable.h116
-rw-r--r--src/mongo/db/matcher/matcher.cpp34
-rw-r--r--src/mongo/db/matcher/matcher.h36
-rw-r--r--src/mongo/db/matcher/path.cpp431
-rw-r--r--src/mongo/db/matcher/path.h216
-rw-r--r--src/mongo/db/matcher/path_internal.cpp47
-rw-r--r--src/mongo/db/matcher/path_internal.h10
-rw-r--r--src/mongo/db/matcher/path_test.cpp716
-rw-r--r--src/mongo/db/max_time.h7
-rw-r--r--src/mongo/db/mongod_options.cpp2099
-rw-r--r--src/mongo/db/mongod_options.h76
-rw-r--r--src/mongo/db/mongod_options_init.cpp77
-rw-r--r--src/mongo/db/namespace_string-inl.h220
-rw-r--r--src/mongo/db/namespace_string.cpp144
-rw-r--r--src/mongo/db/namespace_string.h513
-rw-r--r--src/mongo/db/namespace_string_test.cpp393
-rw-r--r--src/mongo/db/op_observer.cpp299
-rw-r--r--src/mongo/db/op_observer.h101
-rw-r--r--src/mongo/db/operation_context.cpp31
-rw-r--r--src/mongo/db/operation_context.h455
-rw-r--r--src/mongo/db/operation_context_impl.cpp285
-rw-r--r--src/mongo/db/operation_context_impl.h45
-rw-r--r--src/mongo/db/operation_context_noop.h144
-rw-r--r--src/mongo/db/ops/delete.cpp64
-rw-r--r--src/mongo/db/ops/delete.h23
-rw-r--r--src/mongo/db/ops/delete_request.h125
-rw-r--r--src/mongo/db/ops/field_checker.cpp65
-rw-r--r--src/mongo/db/ops/field_checker.h50
-rw-r--r--src/mongo/db/ops/field_checker_test.cpp132
-rw-r--r--src/mongo/db/ops/insert.cpp292
-rw-r--r--src/mongo/db/ops/insert.h35
-rw-r--r--src/mongo/db/ops/log_builder.cpp235
-rw-r--r--src/mongo/db/ops/log_builder.h149
-rw-r--r--src/mongo/db/ops/log_builder_test.cpp453
-rw-r--r--src/mongo/db/ops/modifier_add_to_set.cpp572
-rw-r--r--src/mongo/db/ops/modifier_add_to_set.h68
-rw-r--r--src/mongo/db/ops/modifier_add_to_set_test.cpp678
-rw-r--r--src/mongo/db/ops/modifier_bit.cpp394
-rw-r--r--src/mongo/db/ops/modifier_bit.h82
-rw-r--r--src/mongo/db/ops/modifier_bit_test.cpp1288
-rw-r--r--src/mongo/db/ops/modifier_compare.cpp233
-rw-r--r--src/mongo/db/ops/modifier_compare.h135
-rw-r--r--src/mongo/db/ops/modifier_compare_test.cpp487
-rw-r--r--src/mongo/db/ops/modifier_current_date.cpp358
-rw-r--r--src/mongo/db/ops/modifier_current_date.h73
-rw-r--r--src/mongo/db/ops/modifier_current_date_test.cpp600
-rw-r--r--src/mongo/db/ops/modifier_inc.cpp399
-rw-r--r--src/mongo/db/ops/modifier_inc.h115
-rw-r--r--src/mongo/db/ops/modifier_inc_test.cpp933
-rw-r--r--src/mongo/db/ops/modifier_interface.h275
-rw-r--r--src/mongo/db/ops/modifier_object_replace.cpp240
-rw-r--r--src/mongo/db/ops/modifier_object_replace.h113
-rw-r--r--src/mongo/db/ops/modifier_object_replace_test.cpp506
-rw-r--r--src/mongo/db/ops/modifier_pop.cpp269
-rw-r--r--src/mongo/db/ops/modifier_pop.h67
-rw-r--r--src/mongo/db/ops/modifier_pop_test.cpp462
-rw-r--r--src/mongo/db/ops/modifier_pull.cpp399
-rw-r--r--src/mongo/db/ops/modifier_pull.h67
-rw-r--r--src/mongo/db/ops/modifier_pull_all.cpp325
-rw-r--r--src/mongo/db/ops/modifier_pull_all.h64
-rw-r--r--src/mongo/db/ops/modifier_pull_all_test.cpp389
-rw-r--r--src/mongo/db/ops/modifier_pull_test.cpp901
-rw-r--r--src/mongo/db/ops/modifier_push.cpp1000
-rw-r--r--src/mongo/db/ops/modifier_push.h182
-rw-r--r--src/mongo/db/ops/modifier_push_sorter.h57
-rw-r--r--src/mongo/db/ops/modifier_push_sorter_test.cpp232
-rw-r--r--src/mongo/db/ops/modifier_push_test.cpp2445
-rw-r--r--src/mongo/db/ops/modifier_rename.cpp429
-rw-r--r--src/mongo/db/ops/modifier_rename.h101
-rw-r--r--src/mongo/db/ops/modifier_rename_test.cpp676
-rw-r--r--src/mongo/db/ops/modifier_set.cpp389
-rw-r--r--src/mongo/db/ops/modifier_set.h142
-rw-r--r--src/mongo/db/ops/modifier_set_test.cpp1126
-rw-r--r--src/mongo/db/ops/modifier_table.cpp125
-rw-r--r--src/mongo/db/ops/modifier_table.h64
-rw-r--r--src/mongo/db/ops/modifier_table_test.cpp32
-rw-r--r--src/mongo/db/ops/modifier_unset.cpp214
-rw-r--r--src/mongo/db/ops/modifier_unset.h120
-rw-r--r--src/mongo/db/ops/modifier_unset_test.cpp628
-rw-r--r--src/mongo/db/ops/parsed_delete.cpp142
-rw-r--r--src/mongo/db/ops/parsed_delete.h152
-rw-r--r--src/mongo/db/ops/parsed_update.cpp206
-rw-r--r--src/mongo/db/ops/parsed_update.h187
-rw-r--r--src/mongo/db/ops/path_support.cpp609
-rw-r--r--src/mongo/db/ops/path_support.h295
-rw-r--r--src/mongo/db/ops/path_support_test.cpp1606
-rw-r--r--src/mongo/db/ops/update.cpp124
-rw-r--r--src/mongo/db/ops/update.h42
-rw-r--r--src/mongo/db/ops/update_driver.cpp576
-rw-r--r--src/mongo/db/ops/update_driver.h283
-rw-r--r--src/mongo/db/ops/update_driver_test.cpp695
-rw-r--r--src/mongo/db/ops/update_lifecycle.h63
-rw-r--r--src/mongo/db/ops/update_lifecycle_impl.cpp69
-rw-r--r--src/mongo/db/ops/update_lifecycle_impl.h39
-rw-r--r--src/mongo/db/ops/update_request.h392
-rw-r--r--src/mongo/db/ops/update_result.cpp29
-rw-r--r--src/mongo/db/ops/update_result.h52
-rw-r--r--src/mongo/db/pipeline/accumulator.h272
-rw-r--r--src/mongo/db/pipeline/accumulator_add_to_set.cpp75
-rw-r--r--src/mongo/db/pipeline/accumulator_avg.cpp84
-rw-r--r--src/mongo/db/pipeline/accumulator_first.cpp54
-rw-r--r--src/mongo/db/pipeline/accumulator_last.cpp44
-rw-r--r--src/mongo/db/pipeline/accumulator_min_max.cpp63
-rw-r--r--src/mongo/db/pipeline/accumulator_push.cpp71
-rw-r--r--src/mongo/db/pipeline/accumulator_std_dev.cpp135
-rw-r--r--src/mongo/db/pipeline/accumulator_sum.cpp95
-rw-r--r--src/mongo/db/pipeline/dependencies.cpp224
-rw-r--r--src/mongo/db/pipeline/dependencies.h57
-rw-r--r--src/mongo/db/pipeline/document.cpp655
-rw-r--r--src/mongo/db/pipeline/document.h953
-rw-r--r--src/mongo/db/pipeline/document_internal.h594
-rw-r--r--src/mongo/db/pipeline/document_source.cpp56
-rw-r--r--src/mongo/db/pipeline/document_source.h1768
-rw-r--r--src/mongo/db/pipeline/document_source_bson_array.cpp52
-rw-r--r--src/mongo/db/pipeline/document_source_command_shards.cpp148
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp274
-rw-r--r--src/mongo/db/pipeline/document_source_geo_near.cpp288
-rw-r--r--src/mongo/db/pipeline/document_source_group.cpp916
-rw-r--r--src/mongo/db/pipeline/document_source_limit.cpp89
-rw-r--r--src/mongo/db/pipeline/document_source_match.cpp355
-rw-r--r--src/mongo/db/pipeline/document_source_merge_cursors.cpp221
-rw-r--r--src/mongo/db/pipeline/document_source_out.cpp293
-rw-r--r--src/mongo/db/pipeline/document_source_project.cpp168
-rw-r--r--src/mongo/db/pipeline/document_source_redact.cpp211
-rw-r--r--src/mongo/db/pipeline/document_source_skip.cpp108
-rw-r--r--src/mongo/db/pipeline/document_source_sort.cpp515
-rw-r--r--src/mongo/db/pipeline/document_source_unwind.cpp247
-rw-r--r--src/mongo/db/pipeline/document_value_test.cpp3064
-rw-r--r--src/mongo/db/pipeline/expression.cpp4221
-rw-r--r--src/mongo/db/pipeline/expression.h1675
-rw-r--r--src/mongo/db/pipeline/expression_context.h47
-rw-r--r--src/mongo/db/pipeline/field_path.cpp134
-rw-r--r--src/mongo/db/pipeline/field_path.h163
-rw-r--r--src/mongo/db/pipeline/field_path_test.cpp433
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp1037
-rw-r--r--src/mongo/db/pipeline/pipeline.h294
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp350
-rw-r--r--src/mongo/db/pipeline/pipeline_d.h91
-rw-r--r--src/mongo/db/pipeline/pipeline_optimizations.h160
-rw-r--r--src/mongo/db/pipeline/value.cpp797
-rw-r--r--src/mongo/db/pipeline/value.h588
-rw-r--r--src/mongo/db/pipeline/value_internal.h461
-rw-r--r--src/mongo/db/prefetch.cpp325
-rw-r--r--src/mongo/db/prefetch.h16
-rw-r--r--src/mongo/db/query/canonical_query.cpp1068
-rw-r--r--src/mongo/db/query/canonical_query.h377
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp1003
-rw-r--r--src/mongo/db/query/count_request.cpp134
-rw-r--r--src/mongo/db/query/count_request.h103
-rw-r--r--src/mongo/db/query/count_request_test.cpp190
-rw-r--r--src/mongo/db/query/cursor_responses.cpp42
-rw-r--r--src/mongo/db/query/cursor_responses.h60
-rw-r--r--src/mongo/db/query/explain.cpp1147
-rw-r--r--src/mongo/db/query/explain.h289
-rw-r--r--src/mongo/db/query/explain_common.cpp49
-rw-r--r--src/mongo/db/query/explain_common.h69
-rw-r--r--src/mongo/db/query/expression_index.cpp300
-rw-r--r--src/mongo/db/query/expression_index.h41
-rw-r--r--src/mongo/db/query/expression_index_knobs.cpp4
-rw-r--r--src/mongo/db/query/expression_index_knobs.h22
-rw-r--r--src/mongo/db/query/find.cpp1178
-rw-r--r--src/mongo/db/query/find.h222
-rw-r--r--src/mongo/db/query/find_and_modify_request.cpp259
-rw-r--r--src/mongo/db/query/find_and_modify_request.h233
-rw-r--r--src/mongo/db/query/find_and_modify_request_test.cpp444
-rw-r--r--src/mongo/db/query/find_constants.h6
-rw-r--r--src/mongo/db/query/get_executor.cpp2408
-rw-r--r--src/mongo/db/query/get_executor.h318
-rw-r--r--src/mongo/db/query/get_executor_test.cpp187
-rw-r--r--src/mongo/db/query/getmore_request.cpp165
-rw-r--r--src/mongo/db/query/getmore_request.h56
-rw-r--r--src/mongo/db/query/getmore_request_test.cpp284
-rw-r--r--src/mongo/db/query/index_bounds.cpp853
-rw-r--r--src/mongo/db/query/index_bounds.h375
-rw-r--r--src/mongo/db/query/index_bounds_builder.cpp1673
-rw-r--r--src/mongo/db/query/index_bounds_builder.h283
-rw-r--r--src/mongo/db/query/index_bounds_builder_test.cpp2686
-rw-r--r--src/mongo/db/query/index_bounds_test.cpp1282
-rw-r--r--src/mongo/db/query/index_entry.cpp42
-rw-r--r--src/mongo/db/query/index_entry.h161
-rw-r--r--src/mongo/db/query/index_tag.cpp126
-rw-r--r--src/mongo/db/query/index_tag.h182
-rw-r--r--src/mongo/db/query/indexability.h204
-rw-r--r--src/mongo/db/query/internal_plans.cpp147
-rw-r--r--src/mongo/db/query/internal_plans.h85
-rw-r--r--src/mongo/db/query/interval.cpp356
-rw-r--r--src/mongo/db/query/interval.h275
-rw-r--r--src/mongo/db/query/interval_test.cpp448
-rw-r--r--src/mongo/db/query/lite_parsed_query.cpp1490
-rw-r--r--src/mongo/db/query/lite_parsed_query.h537
-rw-r--r--src/mongo/db/query/lite_parsed_query_test.cpp1914
-rw-r--r--src/mongo/db/query/lru_key_value.h318
-rw-r--r--src/mongo/db/query/lru_key_value_test.cpp280
-rw-r--r--src/mongo/db/query/parsed_projection.cpp477
-rw-r--r--src/mongo/db/query/parsed_projection.h214
-rw-r--r--src/mongo/db/query/parsed_projection_test.cpp354
-rw-r--r--src/mongo/db/query/plan_cache.cpp1099
-rw-r--r--src/mongo/db/query/plan_cache.h678
-rw-r--r--src/mongo/db/query/plan_cache_indexability.cpp92
-rw-r--r--src/mongo/db/query/plan_cache_indexability.h103
-rw-r--r--src/mongo/db/query/plan_cache_indexability_test.cpp310
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp2175
-rw-r--r--src/mongo/db/query/plan_enumerator.cpp2083
-rw-r--r--src/mongo/db/query/plan_enumerator.h780
-rw-r--r--src/mongo/db/query/plan_executor.cpp837
-rw-r--r--src/mongo/db/query/plan_executor.h781
-rw-r--r--src/mongo/db/query/plan_ranker.cpp368
-rw-r--r--src/mongo/db/query/plan_ranker.h135
-rw-r--r--src/mongo/db/query/plan_yield_policy.cpp126
-rw-r--r--src/mongo/db/query/plan_yield_policy.h125
-rw-r--r--src/mongo/db/query/planner_access.cpp2233
-rw-r--r--src/mongo/db/query/planner_access.h675
-rw-r--r--src/mongo/db/query/planner_analysis.cpp1226
-rw-r--r--src/mongo/db/query/planner_analysis.h136
-rw-r--r--src/mongo/db/query/planner_analysis_test.cpp238
-rw-r--r--src/mongo/db/query/planner_ixselect.cpp1079
-rw-r--r--src/mongo/db/query/planner_ixselect.h272
-rw-r--r--src/mongo/db/query/planner_ixselect_test.cpp391
-rw-r--r--src/mongo/db/query/query_knobs.cpp38
-rw-r--r--src/mongo/db/query/query_knobs.h104
-rw-r--r--src/mongo/db/query/query_planner.cpp1457
-rw-r--r--src/mongo/db/query/query_planner.h146
-rw-r--r--src/mongo/db/query/query_planner_array_test.cpp1902
-rw-r--r--src/mongo/db/query/query_planner_common.cpp75
-rw-r--r--src/mongo/db/query/query_planner_common.h78
-rw-r--r--src/mongo/db/query/query_planner_geo_test.cpp1536
-rw-r--r--src/mongo/db/query/query_planner_params.h140
-rw-r--r--src/mongo/db/query/query_planner_test.cpp7109
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp523
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.h250
-rw-r--r--src/mongo/db/query/query_planner_test_lib.cpp803
-rw-r--r--src/mongo/db/query/query_planner_test_lib.h28
-rw-r--r--src/mongo/db/query/query_planner_text_test.cpp670
-rw-r--r--src/mongo/db/query/query_settings.cpp215
-rw-r--r--src/mongo/db/query/query_settings.h193
-rw-r--r--src/mongo/db/query/query_solution.cpp1456
-rw-r--r--src/mongo/db/query/query_solution.h1353
-rw-r--r--src/mongo/db/query/query_yield.cpp56
-rw-r--r--src/mongo/db/query/query_yield.h33
-rw-r--r--src/mongo/db/query/stage_builder.cpp505
-rw-r--r--src/mongo/db/query/stage_builder.h36
-rw-r--r--src/mongo/db/query/stage_types.h102
-rw-r--r--src/mongo/db/range_arithmetic.cpp179
-rw-r--r--src/mongo/db/range_arithmetic.h227
-rw-r--r--src/mongo/db/range_arithmetic_test.cpp203
-rw-r--r--src/mongo/db/range_deleter.cpp842
-rw-r--r--src/mongo/db/range_deleter.h515
-rw-r--r--src/mongo/db/range_deleter_db_env.cpp163
-rw-r--r--src/mongo/db/range_deleter_db_env.h55
-rw-r--r--src/mongo/db/range_deleter_mock_env.cpp178
-rw-r--r--src/mongo/db/range_deleter_mock_env.h242
-rw-r--r--src/mongo/db/range_deleter_service.cpp16
-rw-r--r--src/mongo/db/range_deleter_service.h8
-rw-r--r--src/mongo/db/range_deleter_test.cpp539
-rw-r--r--src/mongo/db/range_preserver.h58
-rw-r--r--src/mongo/db/record_id.h186
-rw-r--r--src/mongo/db/record_id_test.cpp60
-rw-r--r--src/mongo/db/repair_database.cpp317
-rw-r--r--src/mongo/db/repair_database.h29
-rw-r--r--src/mongo/db/repl/applier.cpp318
-rw-r--r--src/mongo/db/repl/applier.h243
-rw-r--r--src/mongo/db/repl/applier_test.cpp1177
-rw-r--r--src/mongo/db/repl/base_cloner.h76
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.cpp433
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.h201
-rw-r--r--src/mongo/db/repl/bgsync.cpp861
-rw-r--r--src/mongo/db/repl/bgsync.h220
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change.cpp409
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change.h200
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp1440
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp438
-rw-r--r--src/mongo/db/repl/collection_cloner.h403
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp745
-rw-r--r--src/mongo/db/repl/data_replicator.cpp2163
-rw-r--r--src/mongo/db/repl/data_replicator.h71
-rw-r--r--src/mongo/db/repl/data_replicator_test.cpp1067
-rw-r--r--src/mongo/db/repl/database_cloner.cpp483
-rw-r--r--src/mongo/db/repl/database_cloner.h303
-rw-r--r--src/mongo/db/repl/database_cloner_test.cpp913
-rw-r--r--src/mongo/db/repl/database_task.cpp117
-rw-r--r--src/mongo/db/repl/database_task.h54
-rw-r--r--src/mongo/db/repl/database_task_test.cpp282
-rw-r--r--src/mongo/db/repl/elect_cmd_runner.cpp203
-rw-r--r--src/mongo/db/repl/elect_cmd_runner.h151
-rw-r--r--src/mongo/db/repl/elect_cmd_runner_test.cpp672
-rw-r--r--src/mongo/db/repl/election_winner_declarer.cpp148
-rw-r--r--src/mongo/db/repl/election_winner_declarer.h143
-rw-r--r--src/mongo/db/repl/election_winner_declarer_test.cpp324
-rw-r--r--src/mongo/db/repl/freshness_checker.cpp330
-rw-r--r--src/mongo/db/repl/freshness_checker.h225
-rw-r--r--src/mongo/db/repl/freshness_checker_test.cpp1825
-rw-r--r--src/mongo/db/repl/handshake_args.cpp118
-rw-r--r--src/mongo/db/repl/handshake_args.h116
-rw-r--r--src/mongo/db/repl/heartbeat_response_action.cpp58
-rw-r--r--src/mongo/db/repl/heartbeat_response_action.h158
-rw-r--r--src/mongo/db/repl/initial_sync.cpp37
-rw-r--r--src/mongo/db/repl/initial_sync.h46
-rw-r--r--src/mongo/db/repl/is_master_response.cpp714
-rw-r--r--src/mongo/db/repl/is_master_response.h293
-rw-r--r--src/mongo/db/repl/isself.cpp423
-rw-r--r--src/mongo/db/repl/isself.h48
-rw-r--r--src/mongo/db/repl/isself_test.cpp58
-rw-r--r--src/mongo/db/repl/last_vote.cpp77
-rw-r--r--src/mongo/db/repl/last_vote.h34
-rw-r--r--src/mongo/db/repl/master_slave.cpp2280
-rw-r--r--src/mongo/db/repl/master_slave.h281
-rw-r--r--src/mongo/db/repl/member_config.cpp482
-rw-r--r--src/mongo/db/repl/member_config.h291
-rw-r--r--src/mongo/db/repl/member_config_test.cpp733
-rw-r--r--src/mongo/db/repl/member_heartbeat_data.cpp120
-rw-r--r--src/mongo/db/repl/member_heartbeat_data.h172
-rw-r--r--src/mongo/db/repl/member_state.h140
-rw-r--r--src/mongo/db/repl/minvalid.cpp122
-rw-r--r--src/mongo/db/repl/minvalid.h58
-rw-r--r--src/mongo/db/repl/operation_context_repl_mock.cpp68
-rw-r--r--src/mongo/db/repl/operation_context_repl_mock.h48
-rw-r--r--src/mongo/db/repl/oplog.cpp1329
-rw-r--r--src/mongo/db/repl/oplog.h189
-rw-r--r--src/mongo/db/repl/oplog_interface.h56
-rw-r--r--src/mongo/db/repl/oplog_interface_local.cpp106
-rw-r--r--src/mongo/db/repl/oplog_interface_local.h33
-rw-r--r--src/mongo/db/repl/oplog_interface_mock.cpp72
-rw-r--r--src/mongo/db/repl/oplog_interface_mock.h36
-rw-r--r--src/mongo/db/repl/oplog_interface_remote.cpp81
-rw-r--r--src/mongo/db/repl/oplog_interface_remote.h33
-rw-r--r--src/mongo/db/repl/oplogreader.cpp264
-rw-r--r--src/mongo/db/repl/oplogreader.h223
-rw-r--r--src/mongo/db/repl/optime.cpp46
-rw-r--r--src/mongo/db/repl/optime.h92
-rw-r--r--src/mongo/db/repl/read_after_optime_args.cpp94
-rw-r--r--src/mongo/db/repl/read_after_optime_args.h56
-rw-r--r--src/mongo/db/repl/read_after_optime_args_test.cpp145
-rw-r--r--src/mongo/db/repl/read_after_optime_response.cpp62
-rw-r--r--src/mongo/db/repl/read_after_optime_response.h100
-rw-r--r--src/mongo/db/repl/read_after_optime_response_test.cpp66
-rw-r--r--src/mongo/db/repl/repl_client_info.cpp14
-rw-r--r--src/mongo/db/repl/repl_client_info.h58
-rw-r--r--src/mongo/db/repl/repl_set_command.cpp20
-rw-r--r--src/mongo/db/repl/repl_set_command.h42
-rw-r--r--src/mongo/db/repl/repl_set_declare_election_winner.cpp55
-rw-r--r--src/mongo/db/repl/repl_set_declare_election_winner_args.cpp237
-rw-r--r--src/mongo/db/repl/repl_set_declare_election_winner_args.h64
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_args.cpp245
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_args.h210
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp178
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_args_v1.h168
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response.cpp618
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response.h399
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response_test.cpp1450
-rw-r--r--src/mongo/db/repl/repl_set_html_summary.cpp306
-rw-r--r--src/mongo/db/repl/repl_set_html_summary.h121
-rw-r--r--src/mongo/db/repl/repl_set_request_votes.cpp55
-rw-r--r--src/mongo/db/repl/repl_set_request_votes_args.cpp326
-rw-r--r--src/mongo/db/repl/repl_set_request_votes_args.h106
-rw-r--r--src/mongo/db/repl/repl_settings.cpp13
-rw-r--r--src/mongo/db/repl/repl_settings.h171
-rw-r--r--src/mongo/db/repl/replica_set_config.cpp919
-rw-r--r--src/mongo/db/repl/replica_set_config.h446
-rw-r--r--src/mongo/db/repl/replica_set_config_checks.cpp409
-rw-r--r--src/mongo/db/repl/replica_set_config_checks.h100
-rw-r--r--src/mongo/db/repl/replica_set_config_checks_test.cpp1327
-rw-r--r--src/mongo/db/repl/replica_set_config_test.cpp2103
-rw-r--r--src/mongo/db/repl/replica_set_tag.cpp374
-rw-r--r--src/mongo/db/repl/replica_set_tag.h481
-rw-r--r--src/mongo/db/repl/replica_set_tag_test.cpp238
-rw-r--r--src/mongo/db/repl/replication_coordinator.cpp38
-rw-r--r--src/mongo/db/repl/replication_coordinator.h1192
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state.h288
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp464
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.h96
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.cpp247
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_mock.h201
-rw-r--r--src/mongo/db/repl/replication_coordinator_global.cpp23
-rw-r--r--src/mongo/db/repl/replication_coordinator_global.h8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp4668
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h1772
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect.cpp397
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp688
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp407
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp1085
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp715
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp394
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp394
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp999
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp4106
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.cpp615
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.h215
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp503
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.h310
-rw-r--r--src/mongo/db/repl/replication_executor.cpp1016
-rw-r--r--src/mongo/db/repl/replication_executor.h638
-rw-r--r--src/mongo/db/repl/replication_executor_test.cpp943
-rw-r--r--src/mongo/db/repl/replication_executor_test_fixture.cpp65
-rw-r--r--src/mongo/db/repl/replication_executor_test_fixture.h123
-rw-r--r--src/mongo/db/repl/replication_info.cpp332
-rw-r--r--src/mongo/db/repl/replset_commands.cpp1365
-rw-r--r--src/mongo/db/repl/replset_web_handler.cpp85
-rw-r--r--src/mongo/db/repl/reporter.cpp204
-rw-r--r--src/mongo/db/repl/reporter.h174
-rw-r--r--src/mongo/db/repl/reporter_test.cpp476
-rw-r--r--src/mongo/db/repl/resync.cpp164
-rw-r--r--src/mongo/db/repl/roll_back_local_operations.cpp266
-rw-r--r--src/mongo/db/repl/roll_back_local_operations.h90
-rw-r--r--src/mongo/db/repl/roll_back_local_operations_test.cpp733
-rw-r--r--src/mongo/db/repl/rollback_source.h95
-rw-r--r--src/mongo/db/repl/rollback_source_impl.cpp88
-rw-r--r--src/mongo/db/repl/rollback_source_impl.h44
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp805
-rw-r--r--src/mongo/db/repl/rs_initialsync.h10
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp1251
-rw-r--r--src/mongo/db/repl/rs_rollback.h90
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp985
-rw-r--r--src/mongo/db/repl/rs_sync.cpp140
-rw-r--r--src/mongo/db/repl/rs_sync.h8
-rw-r--r--src/mongo/db/repl/rslog.cpp14
-rw-r--r--src/mongo/db/repl/rslog.h14
-rw-r--r--src/mongo/db/repl/scatter_gather_algorithm.cpp2
-rw-r--r--src/mongo/db/repl/scatter_gather_algorithm.h65
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.cpp200
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.h154
-rw-r--r--src/mongo/db/repl/scatter_gather_test.cpp602
-rw-r--r--src/mongo/db/repl/storage_interface.cpp8
-rw-r--r--src/mongo/db/repl/storage_interface.h32
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp20
-rw-r--r--src/mongo/db/repl/storage_interface_impl.h17
-rw-r--r--src/mongo/db/repl/storage_interface_mock.cpp12
-rw-r--r--src/mongo/db/repl/storage_interface_mock.h16
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp241
-rw-r--r--src/mongo/db/repl/sync_source_feedback.h88
-rw-r--r--src/mongo/db/repl/sync_tail.cpp1290
-rw-r--r--src/mongo/db/repl/sync_tail.h286
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp486
-rw-r--r--src/mongo/db/repl/task_runner.cpp255
-rw-r--r--src/mongo/db/repl/task_runner.h240
-rw-r--r--src/mongo/db/repl/task_runner_test.cpp567
-rw-r--r--src/mongo/db/repl/task_runner_test_fixture.cpp113
-rw-r--r--src/mongo/db/repl/task_runner_test_fixture.h65
-rw-r--r--src/mongo/db/repl/topology_coordinator.cpp24
-rw-r--r--src/mongo/db/repl/topology_coordinator.h850
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.cpp3770
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.h744
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_test.cpp9615
-rw-r--r--src/mongo/db/repl/update_position_args.cpp171
-rw-r--r--src/mongo/db/repl/update_position_args.h77
-rw-r--r--src/mongo/db/repl/vote_requester.cpp219
-rw-r--r--src/mongo/db/repl/vote_requester.h143
-rw-r--r--src/mongo/db/repl/vote_requester_test.cpp623
-rw-r--r--src/mongo/db/resource.h10
-rw-r--r--src/mongo/db/restapi.cpp445
-rw-r--r--src/mongo/db/restapi.h12
-rw-r--r--src/mongo/db/server_extra_log_context.cpp53
-rw-r--r--src/mongo/db/server_options.cpp16
-rw-r--r--src/mongo/db/server_options.h214
-rw-r--r--src/mongo/db/server_options_helpers.cpp1570
-rw-r--r--src/mongo/db/server_options_helpers.h49
-rw-r--r--src/mongo/db/server_options_test.cpp634
-rw-r--r--src/mongo/db/server_parameters.cpp142
-rw-r--r--src/mongo/db/server_parameters.h189
-rw-r--r--src/mongo/db/server_parameters_inline.h31
-rw-r--r--src/mongo/db/server_parameters_test.cpp84
-rw-r--r--src/mongo/db/service_context.cpp347
-rw-r--r--src/mongo/db/service_context.h568
-rw-r--r--src/mongo/db/service_context_d.cpp400
-rw-r--r--src/mongo/db/service_context_d.h125
-rw-r--r--src/mongo/db/service_context_noop.cpp84
-rw-r--r--src/mongo/db/service_context_noop.h42
-rw-r--r--src/mongo/db/sorter/sorter.cpp1460
-rw-r--r--src/mongo/db/sorter/sorter.h280
-rw-r--r--src/mongo/db/sorter/sorter_test.cpp948
-rw-r--r--src/mongo/db/startup_warnings_common.cpp71
-rw-r--r--src/mongo/db/startup_warnings_common.h8
-rw-r--r--src/mongo/db/startup_warnings_mongod.cpp499
-rw-r--r--src/mongo/db/startup_warnings_mongod.h44
-rw-r--r--src/mongo/db/startup_warnings_mongod_test.cpp162
-rw-r--r--src/mongo/db/stats/counters.cpp196
-rw-r--r--src/mongo/db/stats/counters.h114
-rw-r--r--src/mongo/db/stats/fill_locker_info.cpp63
-rw-r--r--src/mongo/db/stats/fill_locker_info.h10
-rw-r--r--src/mongo/db/stats/fine_clock.h68
-rw-r--r--src/mongo/db/stats/lock_server_status_section.cpp154
-rw-r--r--src/mongo/db/stats/range_deleter_server_status.cpp109
-rw-r--r--src/mongo/db/stats/snapshots.cpp124
-rw-r--r--src/mongo/db/stats/snapshots.h136
-rw-r--r--src/mongo/db/stats/snapshots_webplugins.cpp146
-rw-r--r--src/mongo/db/stats/timer_stats.cpp67
-rw-r--r--src/mongo/db/stats/timer_stats.h84
-rw-r--r--src/mongo/db/stats/top.cpp188
-rw-r--r--src/mongo/db/stats/top.h121
-rw-r--r--src/mongo/db/stats/top_test.cpp10
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp248
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.h97
-rw-r--r--src/mongo/db/storage/capped_callback.h37
-rw-r--r--src/mongo/db/storage/devnull/devnull_init.cpp55
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.cpp384
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.h135
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp726
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_btree_impl.h16
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_btree_impl_test.cpp35
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_engine.cpp108
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_engine.h86
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_engine_test.cpp34
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_init.cpp58
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_record_store.cpp963
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_record_store.h253
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_record_store_test.cpp30
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_recovery_unit.cpp40
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_recovery_unit.h50
-rw-r--r--src/mongo/db/storage/index_entry_comparison.cpp234
-rw-r--r--src/mongo/db/storage/index_entry_comparison.h302
-rw-r--r--src/mongo/db/storage/key_string.cpp1979
-rw-r--r--src/mongo/db/storage/key_string.h500
-rw-r--r--src/mongo/db/storage/key_string_test.cpp223
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.cpp621
-rw-r--r--src/mongo/db/storage/kv/kv_catalog.h164
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp250
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.h79
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry.cpp498
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry.h87
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_get_index.cpp49
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_get_index_mock.cpp13
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_test.cpp94
-rw-r--r--src/mongo/db/storage/kv/kv_engine.h193
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp622
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.h16
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp364
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.h106
-rw-r--r--src/mongo/db/storage/mmap_v1/aligned_builder.cpp207
-rw-r--r--src/mongo/db/storage/mmap_v1/aligned_builder.h211
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp545
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_interface.h14
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp54
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp4066
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.h823
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp3986
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp32
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_ondisk.h519
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp343
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_test_help.h196
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/key.cpp1040
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/key.h213
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/hashtab.cpp82
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/hashtab.h158
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/index_details.cpp9
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/index_details.h51
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace-inl.h69
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace.cpp17
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace.h116
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp344
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.h346
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp553
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h101
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp258
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h91
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp305
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_index.h70
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_test.cpp49
-rw-r--r--src/mongo/db/storage/mmap_v1/compress.cpp36
-rw-r--r--src/mongo/db/storage/mmap_v1/compress.h19
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.cpp323
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.h273
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file_sync.cpp141
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file_sync.h42
-rw-r--r--src/mongo/db/storage/mmap_v1/diskloc.h271
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp1272
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.h188
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_commitjob.cpp107
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_commitjob.h304
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal.cpp1263
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal.h85
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp419
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal_writer.h264
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journalformat.h304
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journalimpl.h136
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp255
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp1019
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.h88
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recovery_unit.cpp432
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recovery_unit.h236
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_stats.h85
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp408
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.h361
-rw-r--r--src/mongo/db/storage/mmap_v1/durop.cpp233
-rw-r--r--src/mongo/db/storage/mmap_v1/durop.h165
-rw-r--r--src/mongo/db/storage/mmap_v1/extent.cpp124
-rw-r--r--src/mongo/db/storage/mmap_v1/extent.h63
-rw-r--r--src/mongo/db/storage/mmap_v1/extent_manager.cpp96
-rw-r--r--src/mongo/db/storage/mmap_v1/extent_manager.h260
-rw-r--r--src/mongo/db/storage/mmap_v1/file_allocator.cpp644
-rw-r--r--src/mongo/db/storage/mmap_v1/file_allocator.h100
-rw-r--r--src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp208
-rw-r--r--src/mongo/db/storage/mmap_v1/heap_record_store_btree.h326
-rw-r--r--src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp176
-rw-r--r--src/mongo/db/storage/mmap_v1/logfile.cpp294
-rw-r--r--src/mongo/db/storage/mmap_v1/logfile.h55
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.cpp363
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.h392
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_posix.cpp394
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp1274
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h261
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp480
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.h98
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp980
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h322
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_init.cpp66
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp158
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_options.cpp110
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_options.h97
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp77
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_windows.cpp762
-rw-r--r--src/mongo/db/storage/mmap_v1/record.h211
-rw-r--r--src/mongo/db/storage/mmap_v1/record_access_tracker.cpp497
-rw-r--r--src/mongo/db/storage/mmap_v1/record_access_tracker.h199
-rw-r--r--src/mongo/db/storage/mmap_v1/record_access_tracker_test.cpp198
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp1517
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.h471
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp1068
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped.h179
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp290
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h100
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp1274
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp234
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h109
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp720
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.h127
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp156
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h60
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp786
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp1016
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h243
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp700
-rw-r--r--src/mongo/db/storage/oplog_hack.cpp58
-rw-r--r--src/mongo/db/storage/oplog_hack.h22
-rw-r--r--src/mongo/db/storage/paths.cpp122
-rw-r--r--src/mongo/db/storage/paths.h99
-rw-r--r--src/mongo/db/storage/record_data.h113
-rw-r--r--src/mongo/db/storage/record_fetcher.h38
-rw-r--r--src/mongo/db/storage/record_store.h935
-rw-r--r--src/mongo/db/storage/record_store_test_datafor.cpp144
-rw-r--r--src/mongo/db/storage/record_store_test_datasize.cpp90
-rw-r--r--src/mongo/db/storage/record_store_test_deleterecord.cpp150
-rw-r--r--src/mongo/db/storage/record_store_test_docwriter.h36
-rw-r--r--src/mongo/db/storage/record_store_test_harness.cpp588
-rw-r--r--src/mongo/db/storage/record_store_test_harness.h32
-rw-r--r--src/mongo/db/storage/record_store_test_insertrecord.cpp218
-rw-r--r--src/mongo/db/storage/record_store_test_manyiter.cpp108
-rw-r--r--src/mongo/db/storage/record_store_test_recorditer.cpp572
-rw-r--r--src/mongo/db/storage/record_store_test_recordstore.cpp46
-rw-r--r--src/mongo/db/storage/record_store_test_repairiter.cpp204
-rw-r--r--src/mongo/db/storage/record_store_test_storagesize.cpp62
-rw-r--r--src/mongo/db/storage/record_store_test_touch.cpp202
-rw-r--r--src/mongo/db/storage/record_store_test_truncate.cpp116
-rw-r--r--src/mongo/db/storage/record_store_test_updaterecord.cpp304
-rw-r--r--src/mongo/db/storage/record_store_test_updaterecord.h77
-rw-r--r--src/mongo/db/storage/record_store_test_updatewithdamages.cpp372
-rw-r--r--src/mongo/db/storage/record_store_test_validate.cpp352
-rw-r--r--src/mongo/db/storage/record_store_test_validate.h122
-rw-r--r--src/mongo/db/storage/recovery_unit.h249
-rw-r--r--src/mongo/db/storage/recovery_unit_noop.h74
-rw-r--r--src/mongo/db/storage/snapshot.h108
-rw-r--r--src/mongo/db/storage/sorted_data_interface.h549
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp324
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor.cpp198
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp966
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp728
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp945
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp843
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp194
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp188
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp62
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.cpp789
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.h169
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_insert.cpp476
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_isempty.cpp64
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_rollback.cpp178
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp106
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_touch.cpp74
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_unindex.cpp362
-rw-r--r--src/mongo/db/storage/storage_engine.h289
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file.h110
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_posix.cpp247
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_test.cpp244
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_windows.cpp250
-rw-r--r--src/mongo/db/storage/storage_engine_metadata.cpp339
-rw-r--r--src/mongo/db/storage/storage_engine_metadata.h151
-rw-r--r--src/mongo/db/storage/storage_engine_metadata_test.cpp436
-rw-r--r--src/mongo/db/storage/storage_init.cpp23
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.cpp54
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h53
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp232
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_global_options.h54
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp1851
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.h278
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp153
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp129
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp221
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp677
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h159
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp45
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp29
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp34
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h28
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp1882
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h426
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mock.cpp16
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp200
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp1310
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp576
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h191
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp61
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h28
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp300
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h167
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp310
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h74
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp660
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.h422
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp537
-rw-r--r--src/mongo/db/storage_options.cpp52
-rw-r--r--src/mongo/db/storage_options.h143
-rw-r--r--src/mongo/db/ttl.cpp464
-rw-r--r--src/mongo/db/ttl.h2
-rw-r--r--src/mongo/db/update_index_data.cpp198
-rw-r--r--src/mongo/db/update_index_data.h70
-rw-r--r--src/mongo/db/update_index_data_test.cpp185
-rw-r--r--src/mongo/db/wire_version.h64
-rw-r--r--src/mongo/db/write_concern.cpp336
-rw-r--r--src/mongo/db/write_concern.h135
-rw-r--r--src/mongo/db/write_concern_options.cpp251
-rw-r--r--src/mongo/db/write_concern_options.h152
1242 files changed, 232393 insertions, 234023 deletions
diff --git a/src/mongo/db/audit.cpp b/src/mongo/db/audit.cpp
index b07c137c8ab..f781b237f96 100644
--- a/src/mongo/db/audit.cpp
+++ b/src/mongo/db/audit.cpp
@@ -31,16 +31,17 @@
#if MONGO_ENTERPRISE_VERSION
#define MONGO_AUDIT_STUB ;
#else
-#define MONGO_AUDIT_STUB {}
+#define MONGO_AUDIT_STUB \
+ {}
#endif
namespace mongo {
namespace audit {
- void logAuthentication(ClientBasic* client,
- StringData mechanism,
- const UserName& user,
- ErrorCodes::Error result) MONGO_AUDIT_STUB
+void logAuthentication(ClientBasic* client,
+ StringData mechanism,
+ const UserName& user,
+ ErrorCodes::Error result) MONGO_AUDIT_STUB
void logCommandAuthzCheck(ClientBasic* client,
const std::string& dbname,
@@ -48,44 +49,38 @@ namespace audit {
Command* command,
ErrorCodes::Error result) MONGO_AUDIT_STUB
- void logDeleteAuthzCheck(
- ClientBasic* client,
- const NamespaceString& ns,
- const BSONObj& pattern,
- ErrorCodes::Error result) MONGO_AUDIT_STUB
-
- void logGetMoreAuthzCheck(
- ClientBasic* client,
- const NamespaceString& ns,
- long long cursorId,
- ErrorCodes::Error result) MONGO_AUDIT_STUB
-
- void logInsertAuthzCheck(
- ClientBasic* client,
- const NamespaceString& ns,
- const BSONObj& insertedObj,
- ErrorCodes::Error result) MONGO_AUDIT_STUB
-
- void logKillCursorsAuthzCheck(
- ClientBasic* client,
- const NamespaceString& ns,
- long long cursorId,
- ErrorCodes::Error result) MONGO_AUDIT_STUB
-
- void logQueryAuthzCheck(
- ClientBasic* client,
- const NamespaceString& ns,
- const BSONObj& query,
- ErrorCodes::Error result) MONGO_AUDIT_STUB
-
- void logUpdateAuthzCheck(
- ClientBasic* client,
- const NamespaceString& ns,
- const BSONObj& query,
- const BSONObj& updateObj,
- bool isUpsert,
- bool isMulti,
- ErrorCodes::Error result) MONGO_AUDIT_STUB
+ void logDeleteAuthzCheck(ClientBasic* client,
+ const NamespaceString& ns,
+ const BSONObj& pattern,
+ ErrorCodes::Error result) MONGO_AUDIT_STUB
+
+ void logGetMoreAuthzCheck(ClientBasic* client,
+ const NamespaceString& ns,
+ long long cursorId,
+ ErrorCodes::Error result) MONGO_AUDIT_STUB
+
+ void logInsertAuthzCheck(ClientBasic* client,
+ const NamespaceString& ns,
+ const BSONObj& insertedObj,
+ ErrorCodes::Error result) MONGO_AUDIT_STUB
+
+ void logKillCursorsAuthzCheck(ClientBasic* client,
+ const NamespaceString& ns,
+ long long cursorId,
+ ErrorCodes::Error result) MONGO_AUDIT_STUB
+
+ void logQueryAuthzCheck(ClientBasic* client,
+ const NamespaceString& ns,
+ const BSONObj& query,
+ ErrorCodes::Error result) MONGO_AUDIT_STUB
+
+ void logUpdateAuthzCheck(ClientBasic* client,
+ const NamespaceString& ns,
+ const BSONObj& query,
+ const BSONObj& updateObj,
+ bool isUpsert,
+ bool isMulti,
+ ErrorCodes::Error result) MONGO_AUDIT_STUB
void logCreateUser(ClientBasic* client,
const UserName& username,
@@ -93,11 +88,9 @@ namespace audit {
const BSONObj* customData,
const std::vector<RoleName>& roles) MONGO_AUDIT_STUB
- void logDropUser(ClientBasic* client,
- const UserName& username) MONGO_AUDIT_STUB
+ void logDropUser(ClientBasic* client, const UserName& username) MONGO_AUDIT_STUB
- void logDropAllUsersFromDatabase(ClientBasic* client,
- StringData dbname) MONGO_AUDIT_STUB
+ void logDropAllUsersFromDatabase(ClientBasic* client, StringData dbname) MONGO_AUDIT_STUB
void logUpdateUser(ClientBasic* client,
const UserName& username,
@@ -123,11 +116,9 @@ namespace audit {
const std::vector<RoleName>* roles,
const PrivilegeVector* privileges) MONGO_AUDIT_STUB
- void logDropRole(ClientBasic* client,
- const RoleName& role) MONGO_AUDIT_STUB
+ void logDropRole(ClientBasic* client, const RoleName& role) MONGO_AUDIT_STUB
- void logDropAllRolesFromDatabase(ClientBasic* client,
- StringData dbname) MONGO_AUDIT_STUB
+ void logDropAllRolesFromDatabase(ClientBasic* client, StringData dbname) MONGO_AUDIT_STUB
void logGrantRolesToRole(ClientBasic* client,
const RoleName& role,
@@ -149,8 +140,7 @@ namespace audit {
const BSONObj* oldConfig,
const BSONObj* newConfig) MONGO_AUDIT_STUB
- void logApplicationMessage(ClientBasic* client,
- StringData msg) MONGO_AUDIT_STUB
+ void logApplicationMessage(ClientBasic* client, StringData msg) MONGO_AUDIT_STUB
void logShutdown(ClientBasic* client) MONGO_AUDIT_STUB
@@ -159,37 +149,29 @@ namespace audit {
StringData indexname,
StringData nsname) MONGO_AUDIT_STUB
- void logCreateCollection(ClientBasic* client,
- StringData nsname) MONGO_AUDIT_STUB
+ void logCreateCollection(ClientBasic* client, StringData nsname) MONGO_AUDIT_STUB
- void logCreateDatabase(ClientBasic* client,
- StringData dbname) MONGO_AUDIT_STUB
+ void logCreateDatabase(ClientBasic* client, StringData dbname) MONGO_AUDIT_STUB
- void logDropIndex(ClientBasic* client,
- StringData indexname,
- StringData nsname) MONGO_AUDIT_STUB
+ void logDropIndex(ClientBasic* client, StringData indexname, StringData nsname) MONGO_AUDIT_STUB
- void logDropCollection(ClientBasic* client,
- StringData nsname) MONGO_AUDIT_STUB
+ void logDropCollection(ClientBasic* client, StringData nsname) MONGO_AUDIT_STUB
- void logDropDatabase(ClientBasic* client,
- StringData dbname) MONGO_AUDIT_STUB
+ void logDropDatabase(ClientBasic* client, StringData dbname) MONGO_AUDIT_STUB
void logRenameCollection(ClientBasic* client,
StringData source,
StringData target) MONGO_AUDIT_STUB
- void logEnableSharding(ClientBasic* client,
- StringData dbname) MONGO_AUDIT_STUB
+ void logEnableSharding(ClientBasic* client, StringData dbname) MONGO_AUDIT_STUB
void logAddShard(ClientBasic* client,
StringData name,
const std::string& servers,
long long maxSize) MONGO_AUDIT_STUB
- void logRemoveShard(ClientBasic* client,
- StringData shardname) MONGO_AUDIT_STUB
+ void logRemoveShard(ClientBasic* client, StringData shardname) MONGO_AUDIT_STUB
void logShardCollection(ClientBasic* client,
StringData ns,
@@ -198,16 +180,13 @@ namespace audit {
void writeImpersonatedUsersToMetadata(BSONObjBuilder* metadata) MONGO_AUDIT_STUB
- void parseAndRemoveImpersonatedUsersField(
- BSONObj cmdObj,
- std::vector<UserName>* parsedUserNames,
- bool* fieldIsPresent) MONGO_AUDIT_STUB
+ void parseAndRemoveImpersonatedUsersField(BSONObj cmdObj,
+ std::vector<UserName>* parsedUserNames,
+ bool* fieldIsPresent) MONGO_AUDIT_STUB
- void parseAndRemoveImpersonatedRolesField(
- BSONObj cmdObj,
- std::vector<RoleName>* parsedRoleNames,
- bool* fieldIsPresent) MONGO_AUDIT_STUB
+ void parseAndRemoveImpersonatedRolesField(BSONObj cmdObj,
+ std::vector<RoleName>* parsedRoleNames,
+ bool* fieldIsPresent) MONGO_AUDIT_STUB
} // namespace audit
} // namespace mongo
-
diff --git a/src/mongo/db/audit.h b/src/mongo/db/audit.h
index fa8604871f5..4dc187da2b1 100644
--- a/src/mongo/db/audit.h
+++ b/src/mongo/db/audit.h
@@ -39,335 +39,306 @@
namespace mongo {
- class AuthorizationSession;
- class BSONObj;
- class ClientBasic;
- class Command;
- class NamespaceString;
- class ReplSetConfig;
- class StringData;
- class UserName;
+class AuthorizationSession;
+class BSONObj;
+class ClientBasic;
+class Command;
+class NamespaceString;
+class ReplSetConfig;
+class StringData;
+class UserName;
namespace audit {
- /**
- * Logs the result of an authentication attempt.
- */
- void logAuthentication(ClientBasic* client,
- StringData mechanism,
- const UserName& user,
- ErrorCodes::Error result);
-
- //
- // Authorization (authz) logging functions.
- //
- // These functions generate log messages describing the disposition of access control
- // checks.
- //
-
- /**
- * Logs the result of a command authorization check.
- */
- void logCommandAuthzCheck(
- ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj,
- Command* command,
- ErrorCodes::Error result);
-
- /**
- * Logs the result of an authorization check for an OP_DELETE wire protocol message.
- */
- void logDeleteAuthzCheck(
- ClientBasic* client,
- const NamespaceString& ns,
- const BSONObj& pattern,
- ErrorCodes::Error result);
-
- /**
- * Logs the result of an authorization check for an OP_GET_MORE wire protocol message.
- */
- void logGetMoreAuthzCheck(
- ClientBasic* client,
- const NamespaceString& ns,
- long long cursorId,
- ErrorCodes::Error result);
-
- /**
- * Logs the result of an authorization check for an OP_INSERT wire protocol message.
- */
- void logInsertAuthzCheck(
- ClientBasic* client,
- const NamespaceString& ns,
- const BSONObj& insertedObj,
- ErrorCodes::Error result);
-
- /**
- * Logs the result of an authorization check for an OP_KILL_CURSORS wire protocol message.
- */
- void logKillCursorsAuthzCheck(
- ClientBasic* client,
- const NamespaceString& ns,
- long long cursorId,
- ErrorCodes::Error result);
-
- /**
- * Logs the result of an authorization check for an OP_QUERY wire protocol message.
- */
- void logQueryAuthzCheck(
- ClientBasic* client,
- const NamespaceString& ns,
- const BSONObj& query,
- ErrorCodes::Error result);
-
- /**
- * Logs the result of an authorization check for an OP_UPDATE wire protocol message.
- */
- void logUpdateAuthzCheck(
- ClientBasic* client,
- const NamespaceString& ns,
- const BSONObj& query,
- const BSONObj& updateObj,
- bool isUpsert,
- bool isMulti,
- ErrorCodes::Error result);
-
- /**
- * Logs the result of a createUser command.
- */
- void logCreateUser(ClientBasic* client,
- const UserName& username,
- bool password,
- const BSONObj* customData,
- const std::vector<RoleName>& roles);
-
- /**
- * Logs the result of a dropUser command.
- */
- void logDropUser(ClientBasic* client,
- const UserName& username);
-
- /**
- * Logs the result of a dropAllUsersFromDatabase command.
- */
- void logDropAllUsersFromDatabase(ClientBasic* client,
- StringData dbname);
-
- /**
- * Logs the result of a updateUser command.
- */
- void logUpdateUser(ClientBasic* client,
- const UserName& username,
- bool password,
- const BSONObj* customData,
- const std::vector<RoleName>* roles);
-
- /**
- * Logs the result of a grantRolesToUser command.
- */
- void logGrantRolesToUser(ClientBasic* client,
- const UserName& username,
- const std::vector<RoleName>& roles);
-
- /**
- * Logs the result of a revokeRolesFromUser command.
- */
- void logRevokeRolesFromUser(ClientBasic* client,
- const UserName& username,
- const std::vector<RoleName>& roles);
-
- /**
- * Logs the result of a createRole command.
- */
- void logCreateRole(ClientBasic* client,
- const RoleName& role,
- const std::vector<RoleName>& roles,
- const PrivilegeVector& privileges);
-
- /**
- * Logs the result of a updateRole command.
- */
- void logUpdateRole(ClientBasic* client,
- const RoleName& role,
- const std::vector<RoleName>* roles,
- const PrivilegeVector* privileges);
-
- /**
- * Logs the result of a dropRole command.
- */
- void logDropRole(ClientBasic* client,
- const RoleName& role);
-
- /**
- * Logs the result of a dropAllRolesForDatabase command.
- */
- void logDropAllRolesFromDatabase(ClientBasic* client,
- StringData dbname);
-
- /**
- * Logs the result of a grantRolesToRole command.
- */
- void logGrantRolesToRole(ClientBasic* client,
- const RoleName& role,
- const std::vector<RoleName>& roles);
-
- /**
- * Logs the result of a revokeRolesFromRole command.
- */
- void logRevokeRolesFromRole(ClientBasic* client,
- const RoleName& role,
- const std::vector<RoleName>& roles);
-
- /**
- * Logs the result of a grantPrivilegesToRole command.
- */
- void logGrantPrivilegesToRole(ClientBasic* client,
- const RoleName& role,
- const PrivilegeVector& privileges);
-
- /**
- * Logs the result of a revokePrivilegesFromRole command.
- */
- void logRevokePrivilegesFromRole(ClientBasic* client,
- const RoleName& role,
- const PrivilegeVector& privileges);
-
- /**
- * Logs the result of a replSet(Re)config command.
- */
- void logReplSetReconfig(ClientBasic* client,
- const BSONObj* oldConfig,
- const BSONObj* newConfig);
-
- /**
- * Logs the result of an ApplicationMessage command.
- */
- void logApplicationMessage(ClientBasic* client,
- StringData msg);
-
- /**
- * Logs the result of a shutdown command.
- */
- void logShutdown(ClientBasic* client);
-
- /**
- * Logs the result of a createIndex command.
- */
- void logCreateIndex(ClientBasic* client,
- const BSONObj* indexSpec,
- StringData indexname,
- StringData nsname);
-
- /**
- * Logs the result of a createCollection command.
- */
- void logCreateCollection(ClientBasic* client,
- StringData nsname);
-
- /**
- * Logs the result of a createDatabase command.
- */
- void logCreateDatabase(ClientBasic* client,
- StringData dbname);
-
-
- /**
- * Logs the result of a dropIndex command.
- */
- void logDropIndex(ClientBasic* client,
- StringData indexname,
- StringData nsname);
-
- /**
- * Logs the result of a dropCollection command.
- */
- void logDropCollection(ClientBasic* client,
- StringData nsname);
-
- /**
- * Logs the result of a dropDatabase command.
- */
- void logDropDatabase(ClientBasic* client,
- StringData dbname);
-
- /**
- * Logs a collection rename event.
- */
- void logRenameCollection(ClientBasic* client,
- StringData source,
- StringData target);
-
- /**
- * Logs the result of a enableSharding command.
- */
- void logEnableSharding(ClientBasic* client,
- StringData dbname);
-
- /**
- * Logs the result of a addShard command.
- */
- void logAddShard(ClientBasic* client,
- StringData name,
- const std::string& servers,
- long long maxSize);
-
- /**
- * Logs the result of a removeShard command.
- */
- void logRemoveShard(ClientBasic* client,
- StringData shardname);
-
- /**
- * Logs the result of a shardCollection command.
- */
- void logShardCollection(ClientBasic* client,
- StringData ns,
- const BSONObj& keyPattern,
- bool unique);
-
-
- /*
- * Appends an array of user/db pairs and an array of role/db pairs
- * to the provided metadata builder. The users and roles are extracted from the current client.
- * They are to be the impersonated users and roles for a Command run by an internal user.
- */
- void writeImpersonatedUsersToMetadata(BSONObjBuilder* metadataBob);
-
- /*
- * Looks for an 'impersonatedUsers' field. This field is used by mongos to
- * transmit the usernames of the currently authenticated user when it runs commands
- * on a shard using internal user authentication. Auditing uses this information
- * to properly ascribe users to actions. This is necessary only for implicit actions that
- * mongos cannot properly audit itself; examples are implicit collection and database creation.
- * This function requires that the field is the last field in the bson object; it edits the
- * command BSON to efficiently remove the field before returning.
- *
- * cmdObj [in, out]: If any impersonated users field exists, it will be parsed and removed.
- * parsedUserNames [out]: populated with parsed usernames
- * fieldIsPresent [out]: true if impersonatedUsers field was present in the object
- */
- void parseAndRemoveImpersonatedUsersField(
- BSONObj cmdObj,
- std::vector<UserName>* parsedUserNames,
- bool* fieldIsPresent);
-
- /*
- * Looks for an 'impersonatedRoles' field. This field is used by mongos to
- * transmit the roles of the currently authenticated user when it runs commands
- * on a shard using internal user authentication. Auditing uses this information
- * to properly ascribe user roles to actions. This is necessary only for implicit actions that
- * mongos cannot properly audit itself; examples are implicit collection and database creation.
- * This function requires that the field is the last field in the bson object; it edits the
- * command BSON to efficiently remove the field before returning.
- *
- * cmdObj [in, out]: If any impersonated roles field exists, it will be parsed and removed.
- * parsedRoleNames [out]: populated with parsed user rolenames
- * fieldIsPresent [out]: true if impersonatedRoles field was present in the object
- */
- void parseAndRemoveImpersonatedRolesField(
- BSONObj cmdObj,
- std::vector<RoleName>* parsedRoleNames,
- bool* fieldIsPresent);
+/**
+ * Logs the result of an authentication attempt.
+ */
+void logAuthentication(ClientBasic* client,
+ StringData mechanism,
+ const UserName& user,
+ ErrorCodes::Error result);
+
+//
+// Authorization (authz) logging functions.
+//
+// These functions generate log messages describing the disposition of access control
+// checks.
+//
+
+/**
+ * Logs the result of a command authorization check.
+ */
+void logCommandAuthzCheck(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ Command* command,
+ ErrorCodes::Error result);
+
+/**
+ * Logs the result of an authorization check for an OP_DELETE wire protocol message.
+ */
+void logDeleteAuthzCheck(ClientBasic* client,
+ const NamespaceString& ns,
+ const BSONObj& pattern,
+ ErrorCodes::Error result);
+
+/**
+ * Logs the result of an authorization check for an OP_GET_MORE wire protocol message.
+ */
+void logGetMoreAuthzCheck(ClientBasic* client,
+ const NamespaceString& ns,
+ long long cursorId,
+ ErrorCodes::Error result);
+
+/**
+ * Logs the result of an authorization check for an OP_INSERT wire protocol message.
+ */
+void logInsertAuthzCheck(ClientBasic* client,
+ const NamespaceString& ns,
+ const BSONObj& insertedObj,
+ ErrorCodes::Error result);
+
+/**
+ * Logs the result of an authorization check for an OP_KILL_CURSORS wire protocol message.
+ */
+void logKillCursorsAuthzCheck(ClientBasic* client,
+ const NamespaceString& ns,
+ long long cursorId,
+ ErrorCodes::Error result);
+
+/**
+ * Logs the result of an authorization check for an OP_QUERY wire protocol message.
+ */
+void logQueryAuthzCheck(ClientBasic* client,
+ const NamespaceString& ns,
+ const BSONObj& query,
+ ErrorCodes::Error result);
+
+/**
+ * Logs the result of an authorization check for an OP_UPDATE wire protocol message.
+ */
+void logUpdateAuthzCheck(ClientBasic* client,
+ const NamespaceString& ns,
+ const BSONObj& query,
+ const BSONObj& updateObj,
+ bool isUpsert,
+ bool isMulti,
+ ErrorCodes::Error result);
+
+/**
+ * Logs the result of a createUser command.
+ */
+void logCreateUser(ClientBasic* client,
+ const UserName& username,
+ bool password,
+ const BSONObj* customData,
+ const std::vector<RoleName>& roles);
+
+/**
+ * Logs the result of a dropUser command.
+ */
+void logDropUser(ClientBasic* client, const UserName& username);
+
+/**
+ * Logs the result of a dropAllUsersFromDatabase command.
+ */
+void logDropAllUsersFromDatabase(ClientBasic* client, StringData dbname);
+
+/**
+ * Logs the result of a updateUser command.
+ */
+void logUpdateUser(ClientBasic* client,
+ const UserName& username,
+ bool password,
+ const BSONObj* customData,
+ const std::vector<RoleName>* roles);
+
+/**
+ * Logs the result of a grantRolesToUser command.
+ */
+void logGrantRolesToUser(ClientBasic* client,
+ const UserName& username,
+ const std::vector<RoleName>& roles);
+
+/**
+ * Logs the result of a revokeRolesFromUser command.
+ */
+void logRevokeRolesFromUser(ClientBasic* client,
+ const UserName& username,
+ const std::vector<RoleName>& roles);
+
+/**
+ * Logs the result of a createRole command.
+ */
+void logCreateRole(ClientBasic* client,
+ const RoleName& role,
+ const std::vector<RoleName>& roles,
+ const PrivilegeVector& privileges);
+
+/**
+ * Logs the result of a updateRole command.
+ */
+void logUpdateRole(ClientBasic* client,
+ const RoleName& role,
+ const std::vector<RoleName>* roles,
+ const PrivilegeVector* privileges);
+
+/**
+ * Logs the result of a dropRole command.
+ */
+void logDropRole(ClientBasic* client, const RoleName& role);
+
+/**
+ * Logs the result of a dropAllRolesForDatabase command.
+ */
+void logDropAllRolesFromDatabase(ClientBasic* client, StringData dbname);
+
+/**
+ * Logs the result of a grantRolesToRole command.
+ */
+void logGrantRolesToRole(ClientBasic* client,
+ const RoleName& role,
+ const std::vector<RoleName>& roles);
+
+/**
+ * Logs the result of a revokeRolesFromRole command.
+ */
+void logRevokeRolesFromRole(ClientBasic* client,
+ const RoleName& role,
+ const std::vector<RoleName>& roles);
+
+/**
+ * Logs the result of a grantPrivilegesToRole command.
+ */
+void logGrantPrivilegesToRole(ClientBasic* client,
+ const RoleName& role,
+ const PrivilegeVector& privileges);
+
+/**
+ * Logs the result of a revokePrivilegesFromRole command.
+ */
+void logRevokePrivilegesFromRole(ClientBasic* client,
+ const RoleName& role,
+ const PrivilegeVector& privileges);
+
+/**
+ * Logs the result of a replSet(Re)config command.
+ */
+void logReplSetReconfig(ClientBasic* client, const BSONObj* oldConfig, const BSONObj* newConfig);
+
+/**
+ * Logs the result of an ApplicationMessage command.
+ */
+void logApplicationMessage(ClientBasic* client, StringData msg);
+
+/**
+ * Logs the result of a shutdown command.
+ */
+void logShutdown(ClientBasic* client);
+
+/**
+ * Logs the result of a createIndex command.
+ */
+void logCreateIndex(ClientBasic* client,
+ const BSONObj* indexSpec,
+ StringData indexname,
+ StringData nsname);
+
+/**
+ * Logs the result of a createCollection command.
+ */
+void logCreateCollection(ClientBasic* client, StringData nsname);
+
+/**
+ * Logs the result of a createDatabase command.
+ */
+void logCreateDatabase(ClientBasic* client, StringData dbname);
+
+
+/**
+ * Logs the result of a dropIndex command.
+ */
+void logDropIndex(ClientBasic* client, StringData indexname, StringData nsname);
+
+/**
+ * Logs the result of a dropCollection command.
+ */
+void logDropCollection(ClientBasic* client, StringData nsname);
+
+/**
+ * Logs the result of a dropDatabase command.
+ */
+void logDropDatabase(ClientBasic* client, StringData dbname);
+
+/**
+ * Logs a collection rename event.
+ */
+void logRenameCollection(ClientBasic* client, StringData source, StringData target);
+
+/**
+ * Logs the result of a enableSharding command.
+ */
+void logEnableSharding(ClientBasic* client, StringData dbname);
+
+/**
+ * Logs the result of a addShard command.
+ */
+void logAddShard(ClientBasic* client,
+ StringData name,
+ const std::string& servers,
+ long long maxSize);
+
+/**
+ * Logs the result of a removeShard command.
+ */
+void logRemoveShard(ClientBasic* client, StringData shardname);
+
+/**
+ * Logs the result of a shardCollection command.
+ */
+void logShardCollection(ClientBasic* client, StringData ns, const BSONObj& keyPattern, bool unique);
+
+
+/*
+ * Appends an array of user/db pairs and an array of role/db pairs
+ * to the provided metadata builder. The users and roles are extracted from the current client.
+ * They are to be the impersonated users and roles for a Command run by an internal user.
+ */
+void writeImpersonatedUsersToMetadata(BSONObjBuilder* metadataBob);
+
+/*
+ * Looks for an 'impersonatedUsers' field. This field is used by mongos to
+ * transmit the usernames of the currently authenticated user when it runs commands
+ * on a shard using internal user authentication. Auditing uses this information
+ * to properly ascribe users to actions. This is necessary only for implicit actions that
+ * mongos cannot properly audit itself; examples are implicit collection and database creation.
+ * This function requires that the field is the last field in the bson object; it edits the
+ * command BSON to efficiently remove the field before returning.
+ *
+ * cmdObj [in, out]: If any impersonated users field exists, it will be parsed and removed.
+ * parsedUserNames [out]: populated with parsed usernames
+ * fieldIsPresent [out]: true if impersonatedUsers field was present in the object
+ */
+void parseAndRemoveImpersonatedUsersField(BSONObj cmdObj,
+ std::vector<UserName>* parsedUserNames,
+ bool* fieldIsPresent);
+
+/*
+ * Looks for an 'impersonatedRoles' field. This field is used by mongos to
+ * transmit the roles of the currently authenticated user when it runs commands
+ * on a shard using internal user authentication. Auditing uses this information
+ * to properly ascribe user roles to actions. This is necessary only for implicit actions that
+ * mongos cannot properly audit itself; examples are implicit collection and database creation.
+ * This function requires that the field is the last field in the bson object; it edits the
+ * command BSON to efficiently remove the field before returning.
+ *
+ * cmdObj [in, out]: If any impersonated roles field exists, it will be parsed and removed.
+ * parsedRoleNames [out]: populated with parsed user rolenames
+ * fieldIsPresent [out]: true if impersonatedRoles field was present in the object
+ */
+void parseAndRemoveImpersonatedRolesField(BSONObj cmdObj,
+ std::vector<RoleName>* parsedRoleNames,
+ bool* fieldIsPresent);
} // namespace audit
} // namespace mongo
diff --git a/src/mongo/db/auth/action_set.cpp b/src/mongo/db/auth/action_set.cpp
index 85bcc079814..6f89222679b 100644
--- a/src/mongo/db/auth/action_set.cpp
+++ b/src/mongo/db/auth/action_set.cpp
@@ -41,110 +41,109 @@
namespace mongo {
- void ActionSet::addAction(const ActionType& action) {
- if (action == ActionType::anyAction) {
- addAllActions();
- return;
- }
- _actions.set(action.getIdentifier(), true);
+void ActionSet::addAction(const ActionType& action) {
+ if (action == ActionType::anyAction) {
+ addAllActions();
+ return;
}
+ _actions.set(action.getIdentifier(), true);
+}
- void ActionSet::addAllActionsFromSet(const ActionSet& actions) {
- if (actions.contains(ActionType::anyAction)) {
- addAllActions();
- return;
- }
- _actions |= actions._actions;
+void ActionSet::addAllActionsFromSet(const ActionSet& actions) {
+ if (actions.contains(ActionType::anyAction)) {
+ addAllActions();
+ return;
}
+ _actions |= actions._actions;
+}
- void ActionSet::addAllActions() {
- _actions = ~std::bitset<ActionType::NUM_ACTION_TYPES>();
- }
+void ActionSet::addAllActions() {
+ _actions = ~std::bitset<ActionType::NUM_ACTION_TYPES>();
+}
+
+void ActionSet::removeAction(const ActionType& action) {
+ _actions.set(action.getIdentifier(), false);
+ _actions.set(ActionType::anyAction.getIdentifier(), false);
+}
- void ActionSet::removeAction(const ActionType& action) {
- _actions.set(action.getIdentifier(), false);
+void ActionSet::removeAllActionsFromSet(const ActionSet& other) {
+ _actions &= ~other._actions;
+ if (!other.empty()) {
_actions.set(ActionType::anyAction.getIdentifier(), false);
}
-
- void ActionSet::removeAllActionsFromSet(const ActionSet& other) {
- _actions &= ~other._actions;
- if (!other.empty()) {
- _actions.set(ActionType::anyAction.getIdentifier(), false);
+}
+
+void ActionSet::removeAllActions() {
+ _actions = std::bitset<ActionType::NUM_ACTION_TYPES>();
+}
+
+bool ActionSet::contains(const ActionType& action) const {
+ return _actions[action.getIdentifier()];
+}
+
+bool ActionSet::isSupersetOf(const ActionSet& other) const {
+ return (_actions & other._actions) == other._actions;
+}
+
+Status ActionSet::parseActionSetFromString(const std::string& actionsString, ActionSet* result) {
+ std::vector<std::string> actionsList;
+ splitStringDelim(actionsString, &actionsList, ',');
+ return parseActionSetFromStringVector(actionsList, result);
+}
+
+Status ActionSet::parseActionSetFromStringVector(const std::vector<std::string>& actionsVector,
+ ActionSet* result) {
+ ActionSet actions;
+ for (size_t i = 0; i < actionsVector.size(); i++) {
+ ActionType action;
+ Status status = ActionType::parseActionFromString(actionsVector[i], &action);
+ if (status != Status::OK()) {
+ ActionSet empty;
+ *result = empty;
+ return status;
}
+ if (action == ActionType::anyAction) {
+ actions.addAllActions();
+ break;
+ }
+ actions.addAction(action);
}
+ *result = actions;
+ return Status::OK();
+}
- void ActionSet::removeAllActions() {
- _actions = std::bitset<ActionType::NUM_ACTION_TYPES>();
- }
-
- bool ActionSet::contains(const ActionType& action) const {
- return _actions[action.getIdentifier()];
- }
-
- bool ActionSet::isSupersetOf(const ActionSet& other) const {
- return (_actions & other._actions) == other._actions;
- }
-
- Status ActionSet::parseActionSetFromString(const std::string& actionsString,
- ActionSet* result) {
- std::vector<std::string> actionsList;
- splitStringDelim(actionsString, &actionsList, ',');
- return parseActionSetFromStringVector(actionsList, result);
+std::string ActionSet::toString() const {
+ if (contains(ActionType::anyAction)) {
+ return ActionType::anyAction.toString();
}
-
- Status ActionSet::parseActionSetFromStringVector(const std::vector<std::string>& actionsVector,
- ActionSet* result) {
- ActionSet actions;
- for (size_t i = 0; i < actionsVector.size(); i++) {
- ActionType action;
- Status status = ActionType::parseActionFromString(actionsVector[i], &action);
- if (status != Status::OK()) {
- ActionSet empty;
- *result = empty;
- return status;
+ StringBuilder str;
+ bool addedOne = false;
+ for (int i = 0; i < ActionType::actionTypeEndValue; i++) {
+ ActionType action(i);
+ if (contains(action)) {
+ if (addedOne) {
+ str << ",";
}
- if (action == ActionType::anyAction) {
- actions.addAllActions();
- break;
- }
- actions.addAction(action);
+ str << ActionType::actionToString(action);
+ addedOne = true;
}
- *result = actions;
- return Status::OK();
}
+ return str.str();
+}
- std::string ActionSet::toString() const {
- if (contains(ActionType::anyAction)) {
- return ActionType::anyAction.toString();
- }
- StringBuilder str;
- bool addedOne = false;
- for (int i = 0; i < ActionType::actionTypeEndValue; i++) {
- ActionType action(i);
- if (contains(action)) {
- if (addedOne) {
- str << ",";
- }
- str << ActionType::actionToString(action);
- addedOne = true;
- }
- }
- return str.str();
+std::vector<std::string> ActionSet::getActionsAsStrings() const {
+ std::vector<std::string> result;
+ if (contains(ActionType::anyAction)) {
+ result.push_back(ActionType::anyAction.toString());
+ return result;
}
-
- std::vector<std::string> ActionSet::getActionsAsStrings() const {
- std::vector<std::string> result;
- if (contains(ActionType::anyAction)) {
- result.push_back(ActionType::anyAction.toString());
- return result;
+ for (int i = 0; i < ActionType::actionTypeEndValue; i++) {
+ ActionType action(i);
+ if (contains(action)) {
+ result.push_back(ActionType::actionToString(action));
}
- for (int i = 0; i < ActionType::actionTypeEndValue; i++) {
- ActionType action(i);
- if (contains(action)) {
- result.push_back(ActionType::actionToString(action));
- }
- }
- return result;
}
+ return result;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/action_set.h b/src/mongo/db/auth/action_set.h
index 4a5c6ef45d0..339a68f019c 100644
--- a/src/mongo/db/auth/action_set.h
+++ b/src/mongo/db/auth/action_set.h
@@ -35,60 +35,62 @@
namespace mongo {
- /*
- * An ActionSet is a bitmask of ActionTypes that represents a set of actions.
- * These are the actions that a Privilege can grant a user to perform on a resource.
- * If the special ActionType::anyAction is granted to this set, it automatically sets all bits
- * in the bitmask, indicating that it contains all possible actions.
- */
- class ActionSet {
- public:
-
- ActionSet() : _actions(0) {}
-
- void addAction(const ActionType& action);
- void addAllActionsFromSet(const ActionSet& actionSet);
- void addAllActions();
-
- // Removes action from the set. Also removes the "anyAction" action, if present.
- // Note: removing the "anyAction" action does *not* remove all other actions.
- void removeAction(const ActionType& action);
- void removeAllActionsFromSet(const ActionSet& actionSet);
- void removeAllActions();
-
- bool empty() const { return _actions.none(); }
-
- bool equals(const ActionSet& other) const { return this->_actions == other._actions; }
+/*
+ * An ActionSet is a bitmask of ActionTypes that represents a set of actions.
+ * These are the actions that a Privilege can grant a user to perform on a resource.
+ * If the special ActionType::anyAction is granted to this set, it automatically sets all bits
+ * in the bitmask, indicating that it contains all possible actions.
+ */
+class ActionSet {
+public:
+ ActionSet() : _actions(0) {}
+
+ void addAction(const ActionType& action);
+ void addAllActionsFromSet(const ActionSet& actionSet);
+ void addAllActions();
+
+ // Removes action from the set. Also removes the "anyAction" action, if present.
+ // Note: removing the "anyAction" action does *not* remove all other actions.
+ void removeAction(const ActionType& action);
+ void removeAllActionsFromSet(const ActionSet& actionSet);
+ void removeAllActions();
+
+ bool empty() const {
+ return _actions.none();
+ }
- bool contains(const ActionType& action) const;
+ bool equals(const ActionSet& other) const {
+ return this->_actions == other._actions;
+ }
- // Returns true only if this ActionSet contains all the actions present in the 'other'
- // ActionSet.
- bool isSupersetOf(const ActionSet& other) const;
+ bool contains(const ActionType& action) const;
- // Returns the std::string representation of this ActionSet
- std::string toString() const;
+ // Returns true only if this ActionSet contains all the actions present in the 'other'
+ // ActionSet.
+ bool isSupersetOf(const ActionSet& other) const;
- // Returns a vector of strings representing the actions in the ActionSet.
- std::vector<std::string> getActionsAsStrings() const;
+ // Returns the std::string representation of this ActionSet
+ std::string toString() const;
- // Takes a comma-separated std::string of action type std::string representations and returns
- // an int bitmask of the actions.
- static Status parseActionSetFromString(const std::string& actionsString, ActionSet* result);
+ // Returns a vector of strings representing the actions in the ActionSet.
+ std::vector<std::string> getActionsAsStrings() const;
- // Takes a vector of action type std::string representations and returns an ActionSet of the
- // actions.
- static Status parseActionSetFromStringVector(const std::vector<std::string>& actionsVector,
- ActionSet* result);
+ // Takes a comma-separated std::string of action type std::string representations and returns
+ // an int bitmask of the actions.
+ static Status parseActionSetFromString(const std::string& actionsString, ActionSet* result);
- private:
+ // Takes a vector of action type std::string representations and returns an ActionSet of the
+ // actions.
+ static Status parseActionSetFromStringVector(const std::vector<std::string>& actionsVector,
+ ActionSet* result);
- // bitmask of actions this privilege grants
- std::bitset<ActionType::NUM_ACTION_TYPES> _actions;
- };
+private:
+ // bitmask of actions this privilege grants
+ std::bitset<ActionType::NUM_ACTION_TYPES> _actions;
+};
- static inline bool operator==(const ActionSet& lhs, const ActionSet& rhs) {
- return lhs.equals(rhs);
- }
+static inline bool operator==(const ActionSet& lhs, const ActionSet& rhs) {
+ return lhs.equals(rhs);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/action_set_test.cpp b/src/mongo/db/auth/action_set_test.cpp
index efe9f6f2761..9689a656549 100644
--- a/src/mongo/db/auth/action_set_test.cpp
+++ b/src/mongo/db/auth/action_set_test.cpp
@@ -36,125 +36,125 @@
namespace mongo {
namespace {
- TEST(ActionSetTest, ParseActionSetFromString) {
- ActionSet result;
- ASSERT_OK(ActionSet::parseActionSetFromString("find,insert,update,remove", &result));
- ASSERT_TRUE(result.contains(ActionType::find));
- ASSERT_TRUE(result.contains(ActionType::insert));
- ASSERT_TRUE(result.contains(ActionType::update));
- ASSERT_TRUE(result.contains(ActionType::remove));
-
- // Order of the strings doesn't matter
- ASSERT_OK(ActionSet::parseActionSetFromString("update,find,remove,insert", &result));
- ASSERT_TRUE(result.contains(ActionType::find));
- ASSERT_TRUE(result.contains(ActionType::insert));
- ASSERT_TRUE(result.contains(ActionType::update));
- ASSERT_TRUE(result.contains(ActionType::remove));
-
- ASSERT_OK(ActionSet::parseActionSetFromString("find", &result));
-
- ASSERT_TRUE(result.contains(ActionType::find));
- ASSERT_FALSE(result.contains(ActionType::insert));
- ASSERT_FALSE(result.contains(ActionType::update));
- ASSERT_FALSE(result.contains(ActionType::remove));
-
- ASSERT_OK(ActionSet::parseActionSetFromString("", &result));
-
- ASSERT_FALSE(result.contains(ActionType::find));
- ASSERT_FALSE(result.contains(ActionType::insert));
- ASSERT_FALSE(result.contains(ActionType::update));
- ASSERT_FALSE(result.contains(ActionType::remove));
-
- ASSERT_EQUALS(ErrorCodes::FailedToParse,
- ActionSet::parseActionSetFromString("INVALID INPUT", &result).code());
- }
-
- TEST(ActionSetTest, ToString) {
- ActionSet actionSet;
-
- ASSERT_EQUALS("", actionSet.toString());
- actionSet.addAction(ActionType::find);
- ASSERT_EQUALS("find", actionSet.toString());
- actionSet.addAction(ActionType::insert);
- ASSERT_EQUALS("find,insert", actionSet.toString());
- actionSet.addAction(ActionType::update);
- ASSERT_EQUALS("find,insert,update", actionSet.toString());
- actionSet.addAction(ActionType::remove);
- ASSERT_EQUALS("find,insert,remove,update", actionSet.toString());
-
- // Now make sure adding actions in a different order doesn't change anything.
- ActionSet actionSet2;
- ASSERT_EQUALS("", actionSet2.toString());
- actionSet2.addAction(ActionType::insert);
- ASSERT_EQUALS("insert", actionSet2.toString());
- actionSet2.addAction(ActionType::remove);
- ASSERT_EQUALS("insert,remove", actionSet2.toString());
- actionSet2.addAction(ActionType::find);
- ASSERT_EQUALS("find,insert,remove", actionSet2.toString());
- actionSet2.addAction(ActionType::update);
- ASSERT_EQUALS("find,insert,remove,update", actionSet2.toString());
- }
-
- TEST(ActionSetTest, IsSupersetOf) {
- ActionSet set1, set2, set3;
- ASSERT_OK(ActionSet::parseActionSetFromString("find,update,insert", &set1));
- ASSERT_OK(ActionSet::parseActionSetFromString("find,update,remove", &set2));
- ASSERT_OK(ActionSet::parseActionSetFromString("find,update", &set3));
-
- ASSERT_FALSE(set1.isSupersetOf(set2));
- ASSERT_TRUE(set1.isSupersetOf(set3));
-
- ASSERT_FALSE(set2.isSupersetOf(set1));
- ASSERT_TRUE(set2.isSupersetOf(set3));
-
- ASSERT_FALSE(set3.isSupersetOf(set1));
- ASSERT_FALSE(set3.isSupersetOf(set2));
- }
-
- TEST(ActionSetTest, anyAction) {
- ActionSet set;
-
- ASSERT_OK(ActionSet::parseActionSetFromString("anyAction", &set));
- ASSERT_TRUE(set.contains(ActionType::find));
- ASSERT_TRUE(set.contains(ActionType::insert));
- ASSERT_TRUE(set.contains(ActionType::anyAction));
-
- set.removeAllActions();
- set.addAllActions();
- ASSERT_TRUE(set.contains(ActionType::find));
- ASSERT_TRUE(set.contains(ActionType::insert));
- ASSERT_TRUE(set.contains(ActionType::anyAction));
-
- set.removeAllActions();
- set.addAction(ActionType::anyAction);
- ASSERT_TRUE(set.contains(ActionType::find));
- ASSERT_TRUE(set.contains(ActionType::insert));
- ASSERT_TRUE(set.contains(ActionType::anyAction));
-
- set.removeAction(ActionType::find);
- ASSERT_FALSE(set.contains(ActionType::find));
- ASSERT_TRUE(set.contains(ActionType::insert));
- ASSERT_FALSE(set.contains(ActionType::anyAction));
-
- set.addAction(ActionType::find);
- ASSERT_TRUE(set.contains(ActionType::find));
- ASSERT_TRUE(set.contains(ActionType::insert));
- ASSERT_FALSE(set.contains(ActionType::anyAction));
-
- set.addAction(ActionType::anyAction);
- ASSERT_TRUE(set.contains(ActionType::find));
- ASSERT_TRUE(set.contains(ActionType::insert));
- ASSERT_TRUE(set.contains(ActionType::anyAction));
-
- ASSERT_EQUALS("anyAction", set.toString());
-
- set.removeAction(ActionType::anyAction);
- ASSERT_TRUE(set.contains(ActionType::find));
- ASSERT_TRUE(set.contains(ActionType::insert));
- ASSERT_FALSE(set.contains(ActionType::anyAction));
-
- ASSERT_NOT_EQUALS("anyAction", set.toString());
- }
+TEST(ActionSetTest, ParseActionSetFromString) {
+ ActionSet result;
+ ASSERT_OK(ActionSet::parseActionSetFromString("find,insert,update,remove", &result));
+ ASSERT_TRUE(result.contains(ActionType::find));
+ ASSERT_TRUE(result.contains(ActionType::insert));
+ ASSERT_TRUE(result.contains(ActionType::update));
+ ASSERT_TRUE(result.contains(ActionType::remove));
+
+ // Order of the strings doesn't matter
+ ASSERT_OK(ActionSet::parseActionSetFromString("update,find,remove,insert", &result));
+ ASSERT_TRUE(result.contains(ActionType::find));
+ ASSERT_TRUE(result.contains(ActionType::insert));
+ ASSERT_TRUE(result.contains(ActionType::update));
+ ASSERT_TRUE(result.contains(ActionType::remove));
+
+ ASSERT_OK(ActionSet::parseActionSetFromString("find", &result));
+
+ ASSERT_TRUE(result.contains(ActionType::find));
+ ASSERT_FALSE(result.contains(ActionType::insert));
+ ASSERT_FALSE(result.contains(ActionType::update));
+ ASSERT_FALSE(result.contains(ActionType::remove));
+
+ ASSERT_OK(ActionSet::parseActionSetFromString("", &result));
+
+ ASSERT_FALSE(result.contains(ActionType::find));
+ ASSERT_FALSE(result.contains(ActionType::insert));
+ ASSERT_FALSE(result.contains(ActionType::update));
+ ASSERT_FALSE(result.contains(ActionType::remove));
+
+ ASSERT_EQUALS(ErrorCodes::FailedToParse,
+ ActionSet::parseActionSetFromString("INVALID INPUT", &result).code());
+}
+
+TEST(ActionSetTest, ToString) {
+ ActionSet actionSet;
+
+ ASSERT_EQUALS("", actionSet.toString());
+ actionSet.addAction(ActionType::find);
+ ASSERT_EQUALS("find", actionSet.toString());
+ actionSet.addAction(ActionType::insert);
+ ASSERT_EQUALS("find,insert", actionSet.toString());
+ actionSet.addAction(ActionType::update);
+ ASSERT_EQUALS("find,insert,update", actionSet.toString());
+ actionSet.addAction(ActionType::remove);
+ ASSERT_EQUALS("find,insert,remove,update", actionSet.toString());
+
+ // Now make sure adding actions in a different order doesn't change anything.
+ ActionSet actionSet2;
+ ASSERT_EQUALS("", actionSet2.toString());
+ actionSet2.addAction(ActionType::insert);
+ ASSERT_EQUALS("insert", actionSet2.toString());
+ actionSet2.addAction(ActionType::remove);
+ ASSERT_EQUALS("insert,remove", actionSet2.toString());
+ actionSet2.addAction(ActionType::find);
+ ASSERT_EQUALS("find,insert,remove", actionSet2.toString());
+ actionSet2.addAction(ActionType::update);
+ ASSERT_EQUALS("find,insert,remove,update", actionSet2.toString());
+}
+
+TEST(ActionSetTest, IsSupersetOf) {
+ ActionSet set1, set2, set3;
+ ASSERT_OK(ActionSet::parseActionSetFromString("find,update,insert", &set1));
+ ASSERT_OK(ActionSet::parseActionSetFromString("find,update,remove", &set2));
+ ASSERT_OK(ActionSet::parseActionSetFromString("find,update", &set3));
+
+ ASSERT_FALSE(set1.isSupersetOf(set2));
+ ASSERT_TRUE(set1.isSupersetOf(set3));
+
+ ASSERT_FALSE(set2.isSupersetOf(set1));
+ ASSERT_TRUE(set2.isSupersetOf(set3));
+
+ ASSERT_FALSE(set3.isSupersetOf(set1));
+ ASSERT_FALSE(set3.isSupersetOf(set2));
+}
+
+TEST(ActionSetTest, anyAction) {
+ ActionSet set;
+
+ ASSERT_OK(ActionSet::parseActionSetFromString("anyAction", &set));
+ ASSERT_TRUE(set.contains(ActionType::find));
+ ASSERT_TRUE(set.contains(ActionType::insert));
+ ASSERT_TRUE(set.contains(ActionType::anyAction));
+
+ set.removeAllActions();
+ set.addAllActions();
+ ASSERT_TRUE(set.contains(ActionType::find));
+ ASSERT_TRUE(set.contains(ActionType::insert));
+ ASSERT_TRUE(set.contains(ActionType::anyAction));
+
+ set.removeAllActions();
+ set.addAction(ActionType::anyAction);
+ ASSERT_TRUE(set.contains(ActionType::find));
+ ASSERT_TRUE(set.contains(ActionType::insert));
+ ASSERT_TRUE(set.contains(ActionType::anyAction));
+
+ set.removeAction(ActionType::find);
+ ASSERT_FALSE(set.contains(ActionType::find));
+ ASSERT_TRUE(set.contains(ActionType::insert));
+ ASSERT_FALSE(set.contains(ActionType::anyAction));
+
+ set.addAction(ActionType::find);
+ ASSERT_TRUE(set.contains(ActionType::find));
+ ASSERT_TRUE(set.contains(ActionType::insert));
+ ASSERT_FALSE(set.contains(ActionType::anyAction));
+
+ set.addAction(ActionType::anyAction);
+ ASSERT_TRUE(set.contains(ActionType::find));
+ ASSERT_TRUE(set.contains(ActionType::insert));
+ ASSERT_TRUE(set.contains(ActionType::anyAction));
+
+ ASSERT_EQUALS("anyAction", set.toString());
+
+ set.removeAction(ActionType::anyAction);
+ ASSERT_TRUE(set.contains(ActionType::find));
+ ASSERT_TRUE(set.contains(ActionType::insert));
+ ASSERT_FALSE(set.contains(ActionType::anyAction));
+
+ ASSERT_NOT_EQUALS("anyAction", set.toString());
+}
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/auth/auth_decorations.cpp b/src/mongo/db/auth/auth_decorations.cpp
index cbd4ca3083a..bc93e494ff2 100644
--- a/src/mongo/db/auth/auth_decorations.cpp
+++ b/src/mongo/db/auth/auth_decorations.cpp
@@ -45,99 +45,93 @@
namespace mongo {
namespace {
- MONGO_INITIALIZER_WITH_PREREQUISITES(CreateAuthorizationManager,
- ("SetupInternalSecurityUser",
- "OIDGeneration",
- "SetGlobalEnvironment",
- "CreateAuthorizationExternalStateFactory",
- "EndStartupOptionStorage"))
- (InitializerContext* context) {
- auto authzManager = stdx::make_unique<AuthorizationManager>(
- AuthzManagerExternalState::create());
- authzManager->setAuthEnabled(serverGlobalParams.isAuthEnabled);
- AuthorizationManager::set(getGlobalServiceContext(), std::move(authzManager));
- return Status::OK();
+MONGO_INITIALIZER_WITH_PREREQUISITES(CreateAuthorizationManager,
+ ("SetupInternalSecurityUser",
+ "OIDGeneration",
+ "SetGlobalEnvironment",
+ "CreateAuthorizationExternalStateFactory",
+ "EndStartupOptionStorage"))
+(InitializerContext* context) {
+ auto authzManager =
+ stdx::make_unique<AuthorizationManager>(AuthzManagerExternalState::create());
+ authzManager->setAuthEnabled(serverGlobalParams.isAuthEnabled);
+ AuthorizationManager::set(getGlobalServiceContext(), std::move(authzManager));
+ return Status::OK();
+}
+
+const auto getAuthenticationSession =
+ ClientBasic::declareDecoration<std::unique_ptr<AuthenticationSession>>();
+
+const auto getAuthorizationManager =
+ ServiceContext::declareDecoration<std::unique_ptr<AuthorizationManager>>();
+
+const auto getAuthorizationSession =
+ ClientBasic::declareDecoration<std::unique_ptr<AuthorizationSession>>();
+
+class AuthzClientObserver final : public ServiceContext::ClientObserver {
+public:
+ void onCreateClient(Client* client) override {
+ auto service = client->getServiceContext();
+ AuthorizationSession::set(client,
+ AuthorizationManager::get(service)->makeAuthorizationSession());
}
- const auto getAuthenticationSession =
- ClientBasic::declareDecoration<std::unique_ptr<AuthenticationSession>>();
+ void onDestroyClient(Client* client) override {}
- const auto getAuthorizationManager =
- ServiceContext::declareDecoration<std::unique_ptr<AuthorizationManager>>();
-
- const auto getAuthorizationSession =
- ClientBasic::declareDecoration<std::unique_ptr<AuthorizationSession>>();
-
- class AuthzClientObserver final : public ServiceContext::ClientObserver {
- public:
- void onCreateClient(Client* client) override {
- auto service = client->getServiceContext();
- AuthorizationSession::set(
- client,
- AuthorizationManager::get(service)->makeAuthorizationSession());
- }
-
- void onDestroyClient(Client* client) override {}
-
- void onCreateOperationContext(OperationContext* opCtx) override {}
- void onDestroyOperationContext(OperationContext* opCtx) override {}
- };
+ void onCreateOperationContext(OperationContext* opCtx) override {}
+ void onDestroyOperationContext(OperationContext* opCtx) override {}
+};
} // namespace
- void AuthenticationSession::set(
- ClientBasic* client,
- std::unique_ptr<AuthenticationSession> newSession) {
- getAuthenticationSession(client) = std::move(newSession);
- }
-
- void AuthenticationSession::swap(
- ClientBasic* client,
- std::unique_ptr<AuthenticationSession>& other) {
- using std::swap;
- swap(getAuthenticationSession(client), other);
- }
-
- AuthorizationManager* AuthorizationManager::get(ServiceContext* service) {
- return getAuthorizationManager(service).get();
- }
-
- AuthorizationManager* AuthorizationManager::get(ServiceContext& service) {
- return getAuthorizationManager(service).get();
- }
-
- void AuthorizationManager::set(ServiceContext* service,
- std::unique_ptr<AuthorizationManager> authzManager) {
- auto& manager = getAuthorizationManager(service);
- invariant(authzManager);
- invariant(!manager);
- manager = std::move(authzManager);
- service->registerClientObserver(stdx::make_unique<AuthzClientObserver>());
- }
-
- AuthorizationSession* AuthorizationSession::get(ClientBasic* client) {
- return get(*client);
- }
-
- AuthorizationSession* AuthorizationSession::get(ClientBasic& client) {
- AuthorizationSession* retval = getAuthorizationSession(client).get();
- massert(16481,
- "No AuthorizationManager has been set up for this connection",
- retval);
- return retval;
- }
-
- bool AuthorizationSession::exists(ClientBasic* client) {
- return getAuthorizationSession(client).get();
- }
-
- void AuthorizationSession::set(
- ClientBasic* client,
- std::unique_ptr<AuthorizationSession> authorizationSession) {
- auto& authzSession = getAuthorizationSession(client);
- invariant(authorizationSession);
- invariant(!authzSession);
- authzSession = std::move(authorizationSession);
- }
+void AuthenticationSession::set(ClientBasic* client,
+ std::unique_ptr<AuthenticationSession> newSession) {
+ getAuthenticationSession(client) = std::move(newSession);
+}
+
+void AuthenticationSession::swap(ClientBasic* client,
+ std::unique_ptr<AuthenticationSession>& other) {
+ using std::swap;
+ swap(getAuthenticationSession(client), other);
+}
+
+AuthorizationManager* AuthorizationManager::get(ServiceContext* service) {
+ return getAuthorizationManager(service).get();
+}
+
+AuthorizationManager* AuthorizationManager::get(ServiceContext& service) {
+ return getAuthorizationManager(service).get();
+}
+
+void AuthorizationManager::set(ServiceContext* service,
+ std::unique_ptr<AuthorizationManager> authzManager) {
+ auto& manager = getAuthorizationManager(service);
+ invariant(authzManager);
+ invariant(!manager);
+ manager = std::move(authzManager);
+ service->registerClientObserver(stdx::make_unique<AuthzClientObserver>());
+}
+
+AuthorizationSession* AuthorizationSession::get(ClientBasic* client) {
+ return get(*client);
+}
+
+AuthorizationSession* AuthorizationSession::get(ClientBasic& client) {
+ AuthorizationSession* retval = getAuthorizationSession(client).get();
+ massert(16481, "No AuthorizationManager has been set up for this connection", retval);
+ return retval;
+}
+
+bool AuthorizationSession::exists(ClientBasic* client) {
+ return getAuthorizationSession(client).get();
+}
+
+void AuthorizationSession::set(ClientBasic* client,
+ std::unique_ptr<AuthorizationSession> authorizationSession) {
+ auto& authzSession = getAuthorizationSession(client);
+ invariant(authorizationSession);
+ invariant(!authzSession);
+ authzSession = std::move(authorizationSession);
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/auth_index_d.cpp b/src/mongo/db/auth/auth_index_d.cpp
index 8cbd093c2c7..546df4c1da9 100644
--- a/src/mongo/db/auth/auth_index_d.cpp
+++ b/src/mongo/db/auth/auth_index_d.cpp
@@ -46,85 +46,79 @@
namespace mongo {
- using std::endl;
+using std::endl;
namespace authindex {
namespace {
- BSONObj v1SystemUsersKeyPattern;
- BSONObj v3SystemUsersKeyPattern;
- BSONObj v3SystemRolesKeyPattern;
- std::string v3SystemUsersIndexName;
- std::string v3SystemRolesIndexName;
-
- MONGO_INITIALIZER(AuthIndexKeyPatterns)(InitializerContext*) {
- v1SystemUsersKeyPattern = BSON("user" << 1 << "userSource" << 1);
- v3SystemUsersKeyPattern = BSON(AuthorizationManager::USER_NAME_FIELD_NAME << 1 <<
- AuthorizationManager::USER_DB_FIELD_NAME << 1);
- v3SystemRolesKeyPattern = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << 1 <<
- AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
- v3SystemUsersIndexName = std::string(
- str::stream() <<
- AuthorizationManager::USER_NAME_FIELD_NAME << "_1_" <<
- AuthorizationManager::USER_DB_FIELD_NAME << "_1");
- v3SystemRolesIndexName = std::string(
- str::stream() <<
- AuthorizationManager::ROLE_NAME_FIELD_NAME << "_1_" <<
- AuthorizationManager::ROLE_DB_FIELD_NAME << "_1");
-
- return Status::OK();
- }
+BSONObj v1SystemUsersKeyPattern;
+BSONObj v3SystemUsersKeyPattern;
+BSONObj v3SystemRolesKeyPattern;
+std::string v3SystemUsersIndexName;
+std::string v3SystemRolesIndexName;
+
+MONGO_INITIALIZER(AuthIndexKeyPatterns)(InitializerContext*) {
+ v1SystemUsersKeyPattern = BSON("user" << 1 << "userSource" << 1);
+ v3SystemUsersKeyPattern = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
+ v3SystemRolesKeyPattern = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
+ v3SystemUsersIndexName =
+ std::string(str::stream() << AuthorizationManager::USER_NAME_FIELD_NAME << "_1_"
+ << AuthorizationManager::USER_DB_FIELD_NAME << "_1");
+ v3SystemRolesIndexName =
+ std::string(str::stream() << AuthorizationManager::ROLE_NAME_FIELD_NAME << "_1_"
+ << AuthorizationManager::ROLE_DB_FIELD_NAME << "_1");
+
+ return Status::OK();
+}
} // namespace
- Status verifySystemIndexes(OperationContext* txn) {
- const NamespaceString systemUsers = AuthorizationManager::usersCollectionNamespace;
+Status verifySystemIndexes(OperationContext* txn) {
+ const NamespaceString systemUsers = AuthorizationManager::usersCollectionNamespace;
- // Make sure the old unique index from v2.4 on system.users doesn't exist.
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetDb autoDb(txn, systemUsers.db(), MODE_X);
- if (!autoDb.getDb()) {
- return Status::OK();
- }
-
- Collection* collection = autoDb.getDb()->getCollection(NamespaceString(systemUsers));
- if (!collection) {
- return Status::OK();
- }
+ // Make sure the old unique index from v2.4 on system.users doesn't exist.
+ ScopedTransaction scopedXact(txn, MODE_IX);
+ AutoGetDb autoDb(txn, systemUsers.db(), MODE_X);
+ if (!autoDb.getDb()) {
+ return Status::OK();
+ }
- IndexCatalog* indexCatalog = collection->getIndexCatalog();
- IndexDescriptor* oldIndex = NULL;
+ Collection* collection = autoDb.getDb()->getCollection(NamespaceString(systemUsers));
+ if (!collection) {
+ return Status::OK();
+ }
- if (indexCatalog &&
- (oldIndex = indexCatalog->findIndexByKeyPattern(txn, v1SystemUsersKeyPattern))) {
- return Status(ErrorCodes::AuthSchemaIncompatible,
- "Old 2.4 style user index identified. "
- "The authentication schema needs to be updated by "
- "running authSchemaUpgrade on a 2.6 server.");
- }
+ IndexCatalog* indexCatalog = collection->getIndexCatalog();
+ IndexDescriptor* oldIndex = NULL;
- return Status::OK();
+ if (indexCatalog &&
+ (oldIndex = indexCatalog->findIndexByKeyPattern(txn, v1SystemUsersKeyPattern))) {
+ return Status(ErrorCodes::AuthSchemaIncompatible,
+ "Old 2.4 style user index identified. "
+ "The authentication schema needs to be updated by "
+ "running authSchemaUpgrade on a 2.6 server.");
}
- void createSystemIndexes(OperationContext* txn, Collection* collection) {
- invariant( collection );
- const NamespaceString& ns = collection->ns();
- if (ns == AuthorizationManager::usersCollectionNamespace) {
- collection->getIndexCatalog()->createIndexOnEmptyCollection(
- txn,
- BSON("name" << v3SystemUsersIndexName
- << "ns" << collection->ns().ns()
- << "key" << v3SystemUsersKeyPattern
- << "unique" << true));
- } else if (ns == AuthorizationManager::rolesCollectionNamespace) {
- collection->getIndexCatalog()->createIndexOnEmptyCollection(
- txn,
- BSON("name" << v3SystemRolesIndexName
- << "ns" << collection->ns().ns()
- << "key" << v3SystemRolesKeyPattern
- << "unique" << true));
- }
+ return Status::OK();
+}
+
+void createSystemIndexes(OperationContext* txn, Collection* collection) {
+ invariant(collection);
+ const NamespaceString& ns = collection->ns();
+ if (ns == AuthorizationManager::usersCollectionNamespace) {
+ collection->getIndexCatalog()->createIndexOnEmptyCollection(
+ txn,
+ BSON("name" << v3SystemUsersIndexName << "ns" << collection->ns().ns() << "key"
+ << v3SystemUsersKeyPattern << "unique" << true));
+ } else if (ns == AuthorizationManager::rolesCollectionNamespace) {
+ collection->getIndexCatalog()->createIndexOnEmptyCollection(
+ txn,
+ BSON("name" << v3SystemRolesIndexName << "ns" << collection->ns().ns() << "key"
+ << v3SystemRolesKeyPattern << "unique" << true));
}
+}
} // namespace authindex
} // namespace mongo
diff --git a/src/mongo/db/auth/auth_index_d.h b/src/mongo/db/auth/auth_index_d.h
index b643e7c8a11..9b85e02e000 100644
--- a/src/mongo/db/auth/auth_index_d.h
+++ b/src/mongo/db/auth/auth_index_d.h
@@ -32,22 +32,22 @@
namespace mongo {
- class Collection;
- class OperationContext;
+class Collection;
+class OperationContext;
namespace authindex {
- /**
- * Creates the appropriate indexes on _new_ system collections supporting authentication and
- * authorization.
- */
- void createSystemIndexes(OperationContext* txn, Collection* collection);
-
- /**
- * Verifies that only the appropriate indexes to support authentication and authorization
- * are present in the admin database
- */
- Status verifySystemIndexes(OperationContext* txn);
+/**
+ * Creates the appropriate indexes on _new_ system collections supporting authentication and
+ * authorization.
+ */
+void createSystemIndexes(OperationContext* txn, Collection* collection);
+
+/**
+ * Verifies that only the appropriate indexes to support authentication and authorization
+ * are present in the admin database
+ */
+Status verifySystemIndexes(OperationContext* txn);
} // namespace authindex
} // namespace mongo
diff --git a/src/mongo/db/auth/authentication_session.h b/src/mongo/db/auth/authentication_session.h
index 8870abefc76..1c3b34b16ee 100644
--- a/src/mongo/db/auth/authentication_session.h
+++ b/src/mongo/db/auth/authentication_session.h
@@ -33,44 +33,47 @@
namespace mongo {
- class ClientBasic;
+class ClientBasic;
+
+/**
+ * Abstract type representing an ongoing authentication session.
+ *
+ * An example subclass is MongoAuthenticationSession.
+ */
+class AuthenticationSession {
+ MONGO_DISALLOW_COPYING(AuthenticationSession);
+
+public:
+ enum SessionType {
+ SESSION_TYPE_MONGO, // The mongo-specific challenge-response authentication mechanism.
+ SESSION_TYPE_SASL // SASL authentication mechanism.
+ };
/**
- * Abstract type representing an ongoing authentication session.
- *
- * An example subclass is MongoAuthenticationSession.
+ * Sets the authentication session for the given "client" to "newSession".
*/
- class AuthenticationSession {
- MONGO_DISALLOW_COPYING(AuthenticationSession);
- public:
- enum SessionType {
- SESSION_TYPE_MONGO, // The mongo-specific challenge-response authentication mechanism.
- SESSION_TYPE_SASL // SASL authentication mechanism.
- };
-
- /**
- * Sets the authentication session for the given "client" to "newSession".
- */
- static void set(ClientBasic* client, std::unique_ptr<AuthenticationSession> newSession);
+ static void set(ClientBasic* client, std::unique_ptr<AuthenticationSession> newSession);
- /**
- * Swaps "client"'s current authentication session with "other".
- */
- static void swap(ClientBasic* client, std::unique_ptr<AuthenticationSession>& other);
+ /**
+ * Swaps "client"'s current authentication session with "other".
+ */
+ static void swap(ClientBasic* client, std::unique_ptr<AuthenticationSession>& other);
- virtual ~AuthenticationSession() = default;
+ virtual ~AuthenticationSession() = default;
- /**
- * Return an identifer of the type of session, so that a caller can safely cast it and
- * extract the type-specific data stored within.
- */
- SessionType getType() const { return _sessionType; }
+ /**
+ * Return an identifer of the type of session, so that a caller can safely cast it and
+ * extract the type-specific data stored within.
+ */
+ SessionType getType() const {
+ return _sessionType;
+ }
- protected:
- explicit AuthenticationSession(SessionType sessionType) : _sessionType(sessionType) {}
+protected:
+ explicit AuthenticationSession(SessionType sessionType) : _sessionType(sessionType) {}
- private:
- const SessionType _sessionType;
- };
+private:
+ const SessionType _sessionType;
+};
} // namespace mongo
diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp
index 016d8da33c6..30123e78177 100644
--- a/src/mongo/db/auth/authorization_manager.cpp
+++ b/src/mongo/db/auth/authorization_manager.cpp
@@ -64,428 +64,418 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::vector;
-
- AuthInfo internalSecurity;
-
- MONGO_INITIALIZER_WITH_PREREQUISITES(SetupInternalSecurityUser, MONGO_NO_PREREQUISITES)(
- InitializerContext* context) {
-
- User* user = new User(UserName("__system", "local"));
-
- user->incrementRefCount(); // Pin this user so the ref count never drops below 1.
- ActionSet allActions;
- allActions.addAllActions();
- PrivilegeVector privileges;
- RoleGraph::generateUniversalPrivileges(&privileges);
- user->addPrivileges(privileges);
- internalSecurity.user = user;
-
- return Status::OK();
- }
-
- const std::string AuthorizationManager::USER_NAME_FIELD_NAME = "user";
- const std::string AuthorizationManager::USER_DB_FIELD_NAME = "db";
- const std::string AuthorizationManager::ROLE_NAME_FIELD_NAME = "role";
- const std::string AuthorizationManager::ROLE_DB_FIELD_NAME = "db";
- const std::string AuthorizationManager::PASSWORD_FIELD_NAME = "pwd";
- const std::string AuthorizationManager::V1_USER_NAME_FIELD_NAME = "user";
- const std::string AuthorizationManager::V1_USER_SOURCE_FIELD_NAME = "userSource";
-
- const NamespaceString AuthorizationManager::adminCommandNamespace("admin.$cmd");
- const NamespaceString AuthorizationManager::rolesCollectionNamespace("admin.system.roles");
- const NamespaceString AuthorizationManager::usersAltCollectionNamespace(
- "admin.system.new_users");
- const NamespaceString AuthorizationManager::usersBackupCollectionNamespace(
- "admin.system.backup_users");
- const NamespaceString AuthorizationManager::usersCollectionNamespace("admin.system.users");
- const NamespaceString AuthorizationManager::versionCollectionNamespace("admin.system.version");
- const NamespaceString AuthorizationManager::defaultTempUsersCollectionNamespace(
- "admin.tempusers");
- const NamespaceString AuthorizationManager::defaultTempRolesCollectionNamespace(
- "admin.temproles");
-
- const BSONObj AuthorizationManager::versionDocumentQuery = BSON("_id" << "authSchema");
-
- const std::string AuthorizationManager::schemaVersionFieldName = "currentVersion";
+using std::endl;
+using std::string;
+using std::vector;
+
+AuthInfo internalSecurity;
+
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetupInternalSecurityUser,
+ MONGO_NO_PREREQUISITES)(InitializerContext* context) {
+ User* user = new User(UserName("__system", "local"));
+
+ user->incrementRefCount(); // Pin this user so the ref count never drops below 1.
+ ActionSet allActions;
+ allActions.addAllActions();
+ PrivilegeVector privileges;
+ RoleGraph::generateUniversalPrivileges(&privileges);
+ user->addPrivileges(privileges);
+ internalSecurity.user = user;
+
+ return Status::OK();
+}
+
+const std::string AuthorizationManager::USER_NAME_FIELD_NAME = "user";
+const std::string AuthorizationManager::USER_DB_FIELD_NAME = "db";
+const std::string AuthorizationManager::ROLE_NAME_FIELD_NAME = "role";
+const std::string AuthorizationManager::ROLE_DB_FIELD_NAME = "db";
+const std::string AuthorizationManager::PASSWORD_FIELD_NAME = "pwd";
+const std::string AuthorizationManager::V1_USER_NAME_FIELD_NAME = "user";
+const std::string AuthorizationManager::V1_USER_SOURCE_FIELD_NAME = "userSource";
+
+const NamespaceString AuthorizationManager::adminCommandNamespace("admin.$cmd");
+const NamespaceString AuthorizationManager::rolesCollectionNamespace("admin.system.roles");
+const NamespaceString AuthorizationManager::usersAltCollectionNamespace("admin.system.new_users");
+const NamespaceString AuthorizationManager::usersBackupCollectionNamespace(
+ "admin.system.backup_users");
+const NamespaceString AuthorizationManager::usersCollectionNamespace("admin.system.users");
+const NamespaceString AuthorizationManager::versionCollectionNamespace("admin.system.version");
+const NamespaceString AuthorizationManager::defaultTempUsersCollectionNamespace("admin.tempusers");
+const NamespaceString AuthorizationManager::defaultTempRolesCollectionNamespace("admin.temproles");
+
+const BSONObj AuthorizationManager::versionDocumentQuery = BSON("_id"
+ << "authSchema");
+
+const std::string AuthorizationManager::schemaVersionFieldName = "currentVersion";
#ifndef _MSC_EXTENSIONS
- const int AuthorizationManager::schemaVersion24;
- const int AuthorizationManager::schemaVersion26Upgrade;
- const int AuthorizationManager::schemaVersion26Final;
- const int AuthorizationManager::schemaVersion28SCRAM;
+const int AuthorizationManager::schemaVersion24;
+const int AuthorizationManager::schemaVersion26Upgrade;
+const int AuthorizationManager::schemaVersion26Final;
+const int AuthorizationManager::schemaVersion28SCRAM;
#endif
+/**
+ * Guard object for synchronizing accesses to data cached in AuthorizationManager instances.
+ * This guard allows one thread to access the cache at a time, and provides an exception-safe
+ * mechanism for a thread to release the cache mutex while performing network or disk operations
+ * while allowing other readers to proceed.
+ *
+ * There are two ways to use this guard. One may simply instantiate the guard like a
+ * std::lock_guard, and perform reads or writes of the cache.
+ *
+ * Alternatively, one may instantiate the guard, examine the cache, and then enter into an
+ * update mode by first wait()ing until otherUpdateInFetchPhase() is false, and then
+ * calling beginFetchPhase(). At this point, other threads may acquire the guard in the simple
+ * manner and do reads, but other threads may not enter into a fetch phase. During the fetch
+ * phase, the thread should perform required network or disk activity to determine what update
+ * it will make to the cache. Then, it should call endFetchPhase(), to reacquire the user cache
+ * mutex. At that point, the thread can make its modifications to the cache and let the guard
+ * go out of scope.
+ *
+ * All updates by guards using a fetch-phase are totally ordered with respect to one another,
+ * and all guards using no fetch phase are totally ordered with respect to one another, but
+ * there is not a total ordering among all guard objects.
+ *
+ * The cached data has an associated counter, called the cache generation. If the cache
+ * generation changes while a guard is in fetch phase, the fetched data should not be stored
+ * into the cache, because some invalidation event occurred during the fetch phase.
+ *
+ * NOTE: It is not safe to enter fetch phase while holding a database lock. Fetch phase
+ * operations are allowed to acquire database locks themselves, so entering fetch while holding
+ * a database lock may lead to deadlock.
+ */
+class AuthorizationManager::CacheGuard {
+ MONGO_DISALLOW_COPYING(CacheGuard);
+
+public:
+ enum FetchSynchronization { fetchSynchronizationAutomatic, fetchSynchronizationManual };
+
/**
- * Guard object for synchronizing accesses to data cached in AuthorizationManager instances.
- * This guard allows one thread to access the cache at a time, and provides an exception-safe
- * mechanism for a thread to release the cache mutex while performing network or disk operations
- * while allowing other readers to proceed.
- *
- * There are two ways to use this guard. One may simply instantiate the guard like a
- * std::lock_guard, and perform reads or writes of the cache.
- *
- * Alternatively, one may instantiate the guard, examine the cache, and then enter into an
- * update mode by first wait()ing until otherUpdateInFetchPhase() is false, and then
- * calling beginFetchPhase(). At this point, other threads may acquire the guard in the simple
- * manner and do reads, but other threads may not enter into a fetch phase. During the fetch
- * phase, the thread should perform required network or disk activity to determine what update
- * it will make to the cache. Then, it should call endFetchPhase(), to reacquire the user cache
- * mutex. At that point, the thread can make its modifications to the cache and let the guard
- * go out of scope.
- *
- * All updates by guards using a fetch-phase are totally ordered with respect to one another,
- * and all guards using no fetch phase are totally ordered with respect to one another, but
- * there is not a total ordering among all guard objects.
- *
- * The cached data has an associated counter, called the cache generation. If the cache
- * generation changes while a guard is in fetch phase, the fetched data should not be stored
- * into the cache, because some invalidation event occurred during the fetch phase.
- *
- * NOTE: It is not safe to enter fetch phase while holding a database lock. Fetch phase
- * operations are allowed to acquire database locks themselves, so entering fetch while holding
- * a database lock may lead to deadlock.
+ * Constructs a cache guard, locking the mutex that synchronizes user cache accesses.
*/
- class AuthorizationManager::CacheGuard {
- MONGO_DISALLOW_COPYING(CacheGuard);
- public:
- enum FetchSynchronization {
- fetchSynchronizationAutomatic,
- fetchSynchronizationManual
- };
-
- /**
- * Constructs a cache guard, locking the mutex that synchronizes user cache accesses.
- */
- CacheGuard(AuthorizationManager* authzManager,
- const FetchSynchronization sync = fetchSynchronizationAutomatic) :
- _isThisGuardInFetchPhase(false),
- _authzManager(authzManager),
- _lock(authzManager->_cacheMutex) {
-
- if (fetchSynchronizationAutomatic == sync) {
- synchronizeWithFetchPhase();
- }
- }
-
- /**
- * Releases the mutex that synchronizes user cache access, if held, and notifies
- * any threads waiting for their own opportunity to update the user cache.
- */
- ~CacheGuard() {
- if (!_lock.owns_lock()) {
- _lock.lock();
- }
- if (_isThisGuardInFetchPhase) {
- fassert(17190, _authzManager->_isFetchPhaseBusy);
- _authzManager->_isFetchPhaseBusy = false;
- _authzManager->_fetchPhaseIsReady.notify_all();
- }
- }
-
- /**
- * Returns true of the authzManager reports that it is in fetch phase.
- */
- bool otherUpdateInFetchPhase() { return _authzManager->_isFetchPhaseBusy; }
-
- /**
- * Waits on the _authzManager->_fetchPhaseIsReady condition.
- */
- void wait() {
- fassert(17222, !_isThisGuardInFetchPhase);
- _authzManager->_fetchPhaseIsReady.wait(_lock);
- }
-
- /**
- * Enters fetch phase, releasing the _authzManager->_cacheMutex after recording the current
- * cache generation.
- */
- void beginFetchPhase() {
- fassert(17191, !_authzManager->_isFetchPhaseBusy);
- _isThisGuardInFetchPhase = true;
- _authzManager->_isFetchPhaseBusy = true;
- _startGeneration = _authzManager->_cacheGeneration;
- _lock.unlock();
+ CacheGuard(AuthorizationManager* authzManager,
+ const FetchSynchronization sync = fetchSynchronizationAutomatic)
+ : _isThisGuardInFetchPhase(false),
+ _authzManager(authzManager),
+ _lock(authzManager->_cacheMutex) {
+ if (fetchSynchronizationAutomatic == sync) {
+ synchronizeWithFetchPhase();
}
+ }
- /**
- * Exits the fetch phase, reacquiring the _authzManager->_cacheMutex.
- */
- void endFetchPhase() {
+ /**
+ * Releases the mutex that synchronizes user cache access, if held, and notifies
+ * any threads waiting for their own opportunity to update the user cache.
+ */
+ ~CacheGuard() {
+ if (!_lock.owns_lock()) {
_lock.lock();
- // We do not clear _authzManager->_isFetchPhaseBusy or notify waiters until
- // ~CacheGuard(), for two reasons. First, there's no value to notifying the waiters
- // before you're ready to release the mutex, because they'll just go to sleep on the
- // mutex. Second, in order to meaningfully check the preconditions of
- // isSameCacheGeneration(), we need a state that means "fetch phase was entered and now
- // has been exited." That state is _isThisGuardInFetchPhase == true and
- // _lock.owns_lock() == true.
}
-
- /**
- * Returns true if _authzManager->_cacheGeneration remained the same while this guard was
- * in fetch phase. Behavior is undefined if this guard never entered fetch phase.
- *
- * If this returns true, do not update the cached data with this
- */
- bool isSameCacheGeneration() const {
- fassert(17223, _isThisGuardInFetchPhase);
- fassert(17231, _lock.owns_lock());
- return _startGeneration == _authzManager->_cacheGeneration;
- }
-
- private:
- void synchronizeWithFetchPhase() {
- while (otherUpdateInFetchPhase())
- wait();
- fassert(17192, !_authzManager->_isFetchPhaseBusy);
- _isThisGuardInFetchPhase = true;
- _authzManager->_isFetchPhaseBusy = true;
- }
-
- OID _startGeneration;
- bool _isThisGuardInFetchPhase;
- AuthorizationManager* _authzManager;
- stdx::unique_lock<stdx::mutex> _lock;
- };
-
- AuthorizationManager::AuthorizationManager(
- std::unique_ptr<AuthzManagerExternalState> externalState) :
- _authEnabled(false),
- _privilegeDocsExist(false),
- _externalState(std::move(externalState)),
- _version(schemaVersionInvalid),
- _isFetchPhaseBusy(false) {
- _updateCacheGeneration_inlock();
- }
-
- AuthorizationManager::~AuthorizationManager() {
- for (unordered_map<UserName, User*>::iterator it = _userCache.begin();
- it != _userCache.end(); ++it) {
- fassert(17265, it->second != internalSecurity.user);
- delete it->second ;
+ if (_isThisGuardInFetchPhase) {
+ fassert(17190, _authzManager->_isFetchPhaseBusy);
+ _authzManager->_isFetchPhaseBusy = false;
+ _authzManager->_fetchPhaseIsReady.notify_all();
}
}
- std::unique_ptr<AuthorizationSession> AuthorizationManager::makeAuthorizationSession() {
- return stdx::make_unique<AuthorizationSession>(
- _externalState->makeAuthzSessionExternalState(this));
- }
-
- Status AuthorizationManager::getAuthorizationVersion(OperationContext* txn, int* version) {
- CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
- int newVersion = _version;
- if (schemaVersionInvalid == newVersion) {
- while (guard.otherUpdateInFetchPhase())
- guard.wait();
- guard.beginFetchPhase();
- Status status = _externalState->getStoredAuthorizationVersion(txn, &newVersion);
- guard.endFetchPhase();
- if (!status.isOK()) {
- warning() << "Problem fetching the stored schema version of authorization data: "
- << status;
- *version = schemaVersionInvalid;
- return status;
- }
-
- if (guard.isSameCacheGeneration()) {
- _version = newVersion;
- }
- }
- *version = newVersion;
- return Status::OK();
+ /**
+ * Returns true of the authzManager reports that it is in fetch phase.
+ */
+ bool otherUpdateInFetchPhase() {
+ return _authzManager->_isFetchPhaseBusy;
}
- OID AuthorizationManager::getCacheGeneration() {
- CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
- return _cacheGeneration;
+ /**
+ * Waits on the _authzManager->_fetchPhaseIsReady condition.
+ */
+ void wait() {
+ fassert(17222, !_isThisGuardInFetchPhase);
+ _authzManager->_fetchPhaseIsReady.wait(_lock);
}
- void AuthorizationManager::setAuthEnabled(bool enabled) {
- _authEnabled = enabled;
+ /**
+ * Enters fetch phase, releasing the _authzManager->_cacheMutex after recording the current
+ * cache generation.
+ */
+ void beginFetchPhase() {
+ fassert(17191, !_authzManager->_isFetchPhaseBusy);
+ _isThisGuardInFetchPhase = true;
+ _authzManager->_isFetchPhaseBusy = true;
+ _startGeneration = _authzManager->_cacheGeneration;
+ _lock.unlock();
}
- bool AuthorizationManager::isAuthEnabled() const {
- return _authEnabled;
+ /**
+ * Exits the fetch phase, reacquiring the _authzManager->_cacheMutex.
+ */
+ void endFetchPhase() {
+ _lock.lock();
+ // We do not clear _authzManager->_isFetchPhaseBusy or notify waiters until
+ // ~CacheGuard(), for two reasons. First, there's no value to notifying the waiters
+ // before you're ready to release the mutex, because they'll just go to sleep on the
+ // mutex. Second, in order to meaningfully check the preconditions of
+ // isSameCacheGeneration(), we need a state that means "fetch phase was entered and now
+ // has been exited." That state is _isThisGuardInFetchPhase == true and
+ // _lock.owns_lock() == true.
}
- bool AuthorizationManager::hasAnyPrivilegeDocuments(OperationContext* txn) {
- stdx::unique_lock<stdx::mutex> lk(_privilegeDocsExistMutex);
- if (_privilegeDocsExist) {
- // If we know that a user exists, don't re-check.
- return true;
- }
-
- lk.unlock();
- bool privDocsExist = _externalState->hasAnyPrivilegeDocuments(txn);
- lk.lock();
-
- if (privDocsExist) {
- _privilegeDocsExist = true;
- }
-
- return _privilegeDocsExist;
+ /**
+ * Returns true if _authzManager->_cacheGeneration remained the same while this guard was
+ * in fetch phase. Behavior is undefined if this guard never entered fetch phase.
+ *
+ * If this returns true, do not update the cached data with this
+ */
+ bool isSameCacheGeneration() const {
+ fassert(17223, _isThisGuardInFetchPhase);
+ fassert(17231, _lock.owns_lock());
+ return _startGeneration == _authzManager->_cacheGeneration;
}
- Status AuthorizationManager::getBSONForPrivileges(const PrivilegeVector& privileges,
- mutablebson::Element resultArray) {
- for (PrivilegeVector::const_iterator it = privileges.begin();
- it != privileges.end(); ++it) {
- std::string errmsg;
- ParsedPrivilege privilege;
- if (!ParsedPrivilege::privilegeToParsedPrivilege(*it, &privilege, &errmsg)) {
- return Status(ErrorCodes::BadValue, errmsg);
- }
- resultArray.appendObject("privileges", privilege.toBSON());
- }
- return Status::OK();
+private:
+ void synchronizeWithFetchPhase() {
+ while (otherUpdateInFetchPhase())
+ wait();
+ fassert(17192, !_authzManager->_isFetchPhaseBusy);
+ _isThisGuardInFetchPhase = true;
+ _authzManager->_isFetchPhaseBusy = true;
}
- Status AuthorizationManager::getBSONForRole(RoleGraph* graph,
- const RoleName& roleName,
- mutablebson::Element result) {
- if (!graph->roleExists(roleName)) {
- return Status(ErrorCodes::RoleNotFound,
- mongoutils::str::stream() << roleName.getFullName() <<
- "does not name an existing role");
- }
- std::string id = mongoutils::str::stream() << roleName.getDB() << "." << roleName.getRole();
- result.appendString("_id", id);
- result.appendString(ROLE_NAME_FIELD_NAME, roleName.getRole());
- result.appendString(ROLE_DB_FIELD_NAME, roleName.getDB());
-
- // Build privileges array
- mutablebson::Element privilegesArrayElement =
- result.getDocument().makeElementArray("privileges");
- result.pushBack(privilegesArrayElement);
- const PrivilegeVector& privileges = graph->getDirectPrivileges(roleName);
- Status status = getBSONForPrivileges(privileges, privilegesArrayElement);
+ OID _startGeneration;
+ bool _isThisGuardInFetchPhase;
+ AuthorizationManager* _authzManager;
+ stdx::unique_lock<stdx::mutex> _lock;
+};
+
+AuthorizationManager::AuthorizationManager(std::unique_ptr<AuthzManagerExternalState> externalState)
+ : _authEnabled(false),
+ _privilegeDocsExist(false),
+ _externalState(std::move(externalState)),
+ _version(schemaVersionInvalid),
+ _isFetchPhaseBusy(false) {
+ _updateCacheGeneration_inlock();
+}
+
+AuthorizationManager::~AuthorizationManager() {
+ for (unordered_map<UserName, User*>::iterator it = _userCache.begin(); it != _userCache.end();
+ ++it) {
+ fassert(17265, it->second != internalSecurity.user);
+ delete it->second;
+ }
+}
+
+std::unique_ptr<AuthorizationSession> AuthorizationManager::makeAuthorizationSession() {
+ return stdx::make_unique<AuthorizationSession>(
+ _externalState->makeAuthzSessionExternalState(this));
+}
+
+Status AuthorizationManager::getAuthorizationVersion(OperationContext* txn, int* version) {
+ CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
+ int newVersion = _version;
+ if (schemaVersionInvalid == newVersion) {
+ while (guard.otherUpdateInFetchPhase())
+ guard.wait();
+ guard.beginFetchPhase();
+ Status status = _externalState->getStoredAuthorizationVersion(txn, &newVersion);
+ guard.endFetchPhase();
if (!status.isOK()) {
+ warning() << "Problem fetching the stored schema version of authorization data: "
+ << status;
+ *version = schemaVersionInvalid;
return status;
}
- // Build roles array
- mutablebson::Element rolesArrayElement = result.getDocument().makeElementArray("roles");
- result.pushBack(rolesArrayElement);
- for (RoleNameIterator roles = graph->getDirectSubordinates(roleName);
- roles.more();
- roles.next()) {
-
- const RoleName& subRole = roles.get();
- mutablebson::Element roleObj = result.getDocument().makeElementObject("");
- roleObj.appendString(ROLE_NAME_FIELD_NAME, subRole.getRole());
- roleObj.appendString(ROLE_DB_FIELD_NAME, subRole.getDB());
- rolesArrayElement.pushBack(roleObj);
+ if (guard.isSameCacheGeneration()) {
+ _version = newVersion;
}
+ }
+ *version = newVersion;
+ return Status::OK();
+}
+
+OID AuthorizationManager::getCacheGeneration() {
+ CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
+ return _cacheGeneration;
+}
+
+void AuthorizationManager::setAuthEnabled(bool enabled) {
+ _authEnabled = enabled;
+}
+
+bool AuthorizationManager::isAuthEnabled() const {
+ return _authEnabled;
+}
+
+bool AuthorizationManager::hasAnyPrivilegeDocuments(OperationContext* txn) {
+ stdx::unique_lock<stdx::mutex> lk(_privilegeDocsExistMutex);
+ if (_privilegeDocsExist) {
+ // If we know that a user exists, don't re-check.
+ return true;
+ }
- return Status::OK();
+ lk.unlock();
+ bool privDocsExist = _externalState->hasAnyPrivilegeDocuments(txn);
+ lk.lock();
+
+ if (privDocsExist) {
+ _privilegeDocsExist = true;
}
- Status AuthorizationManager::_initializeUserFromPrivilegeDocument(
- User* user, const BSONObj& privDoc) {
- V2UserDocumentParser parser;
- std::string userName = parser.extractUserNameFromUserDocument(privDoc);
- if (userName != user->getName().getUser()) {
- return Status(ErrorCodes::BadValue,
- mongoutils::str::stream() << "User name from privilege document \""
- << userName
- << "\" doesn't match name of provided User \""
- << user->getName().getUser()
- << "\"",
- 0);
- }
+ return _privilegeDocsExist;
+}
- Status status = parser.initializeUserCredentialsFromUserDocument(user, privDoc);
- if (!status.isOK()) {
- return status;
- }
- status = parser.initializeUserRolesFromUserDocument(privDoc, user);
- if (!status.isOK()) {
- return status;
- }
- status = parser.initializeUserIndirectRolesFromUserDocument(privDoc, user);
- if (!status.isOK()) {
- return status;
+Status AuthorizationManager::getBSONForPrivileges(const PrivilegeVector& privileges,
+ mutablebson::Element resultArray) {
+ for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) {
+ std::string errmsg;
+ ParsedPrivilege privilege;
+ if (!ParsedPrivilege::privilegeToParsedPrivilege(*it, &privilege, &errmsg)) {
+ return Status(ErrorCodes::BadValue, errmsg);
}
- status = parser.initializeUserPrivilegesFromUserDocument(privDoc, user);
- if (!status.isOK()) {
- return status;
- }
-
- return Status::OK();
+ resultArray.appendObject("privileges", privilege.toBSON());
+ }
+ return Status::OK();
+}
+
+Status AuthorizationManager::getBSONForRole(RoleGraph* graph,
+ const RoleName& roleName,
+ mutablebson::Element result) {
+ if (!graph->roleExists(roleName)) {
+ return Status(ErrorCodes::RoleNotFound,
+ mongoutils::str::stream() << roleName.getFullName()
+ << "does not name an existing role");
+ }
+ std::string id = mongoutils::str::stream() << roleName.getDB() << "." << roleName.getRole();
+ result.appendString("_id", id);
+ result.appendString(ROLE_NAME_FIELD_NAME, roleName.getRole());
+ result.appendString(ROLE_DB_FIELD_NAME, roleName.getDB());
+
+ // Build privileges array
+ mutablebson::Element privilegesArrayElement =
+ result.getDocument().makeElementArray("privileges");
+ result.pushBack(privilegesArrayElement);
+ const PrivilegeVector& privileges = graph->getDirectPrivileges(roleName);
+ Status status = getBSONForPrivileges(privileges, privilegesArrayElement);
+ if (!status.isOK()) {
+ return status;
}
- Status AuthorizationManager::getUserDescription(OperationContext* txn,
- const UserName& userName,
- BSONObj* result) {
- return _externalState->getUserDescription(txn, userName, result);
+ // Build roles array
+ mutablebson::Element rolesArrayElement = result.getDocument().makeElementArray("roles");
+ result.pushBack(rolesArrayElement);
+ for (RoleNameIterator roles = graph->getDirectSubordinates(roleName); roles.more();
+ roles.next()) {
+ const RoleName& subRole = roles.get();
+ mutablebson::Element roleObj = result.getDocument().makeElementObject("");
+ roleObj.appendString(ROLE_NAME_FIELD_NAME, subRole.getRole());
+ roleObj.appendString(ROLE_DB_FIELD_NAME, subRole.getDB());
+ rolesArrayElement.pushBack(roleObj);
}
- Status AuthorizationManager::getRoleDescription(const RoleName& roleName,
- bool showPrivileges,
- BSONObj* result) {
- return _externalState->getRoleDescription(roleName, showPrivileges, result);
+ return Status::OK();
+}
+
+Status AuthorizationManager::_initializeUserFromPrivilegeDocument(User* user,
+ const BSONObj& privDoc) {
+ V2UserDocumentParser parser;
+ std::string userName = parser.extractUserNameFromUserDocument(privDoc);
+ if (userName != user->getName().getUser()) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream() << "User name from privilege document \""
+ << userName
+ << "\" doesn't match name of provided User \""
+ << user->getName().getUser() << "\"",
+ 0);
}
- Status AuthorizationManager::getRoleDescriptionsForDB(const std::string dbname,
- bool showPrivileges,
- bool showBuiltinRoles,
- vector<BSONObj>* result) {
- return _externalState->getRoleDescriptionsForDB(dbname,
- showPrivileges,
- showBuiltinRoles,
- result);
+ Status status = parser.initializeUserCredentialsFromUserDocument(user, privDoc);
+ if (!status.isOK()) {
+ return status;
+ }
+ status = parser.initializeUserRolesFromUserDocument(privDoc, user);
+ if (!status.isOK()) {
+ return status;
+ }
+ status = parser.initializeUserIndirectRolesFromUserDocument(privDoc, user);
+ if (!status.isOK()) {
+ return status;
+ }
+ status = parser.initializeUserPrivilegesFromUserDocument(privDoc, user);
+ if (!status.isOK()) {
+ return status;
}
- Status AuthorizationManager::acquireUser(
- OperationContext* txn, const UserName& userName, User** acquiredUser) {
- if (userName == internalSecurity.user->getName()) {
- *acquiredUser = internalSecurity.user;
- return Status::OK();
- }
+ return Status::OK();
+}
+
+Status AuthorizationManager::getUserDescription(OperationContext* txn,
+ const UserName& userName,
+ BSONObj* result) {
+ return _externalState->getUserDescription(txn, userName, result);
+}
+
+Status AuthorizationManager::getRoleDescription(const RoleName& roleName,
+ bool showPrivileges,
+ BSONObj* result) {
+ return _externalState->getRoleDescription(roleName, showPrivileges, result);
+}
+
+Status AuthorizationManager::getRoleDescriptionsForDB(const std::string dbname,
+ bool showPrivileges,
+ bool showBuiltinRoles,
+ vector<BSONObj>* result) {
+ return _externalState->getRoleDescriptionsForDB(
+ dbname, showPrivileges, showBuiltinRoles, result);
+}
+
+Status AuthorizationManager::acquireUser(OperationContext* txn,
+ const UserName& userName,
+ User** acquiredUser) {
+ if (userName == internalSecurity.user->getName()) {
+ *acquiredUser = internalSecurity.user;
+ return Status::OK();
+ }
- unordered_map<UserName, User*>::iterator it;
+ unordered_map<UserName, User*>::iterator it;
- CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
- while ((_userCache.end() == (it = _userCache.find(userName))) &&
- guard.otherUpdateInFetchPhase()) {
+ CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
+ while ((_userCache.end() == (it = _userCache.find(userName))) &&
+ guard.otherUpdateInFetchPhase()) {
+ guard.wait();
+ }
- guard.wait();
- }
+ if (it != _userCache.end()) {
+ fassert(16914, it->second);
+ fassert(17003, it->second->isValid());
+ fassert(17008, it->second->getRefCount() > 0);
+ it->second->incrementRefCount();
+ *acquiredUser = it->second;
+ return Status::OK();
+ }
- if (it != _userCache.end()) {
- fassert(16914, it->second);
- fassert(17003, it->second->isValid());
- fassert(17008, it->second->getRefCount() > 0);
- it->second->incrementRefCount();
- *acquiredUser = it->second;
- return Status::OK();
- }
+ std::unique_ptr<User> user;
- std::unique_ptr<User> user;
+ int authzVersion = _version;
+ guard.beginFetchPhase();
- int authzVersion = _version;
- guard.beginFetchPhase();
+ // Number of times to retry a user document that fetches due to transient
+ // AuthSchemaIncompatible errors. These errors should only ever occur during and shortly
+ // after schema upgrades.
+ static const int maxAcquireRetries = 2;
+ Status status = Status::OK();
+ for (int i = 0; i < maxAcquireRetries; ++i) {
+ if (authzVersion == schemaVersionInvalid) {
+ Status status = _externalState->getStoredAuthorizationVersion(txn, &authzVersion);
+ if (!status.isOK())
+ return status;
+ }
- // Number of times to retry a user document that fetches due to transient
- // AuthSchemaIncompatible errors. These errors should only ever occur during and shortly
- // after schema upgrades.
- static const int maxAcquireRetries = 2;
- Status status = Status::OK();
- for (int i = 0; i < maxAcquireRetries; ++i) {
- if (authzVersion == schemaVersionInvalid) {
- Status status = _externalState->getStoredAuthorizationVersion(txn, &authzVersion);
- if (!status.isOK())
- return status;
- }
-
- switch (authzVersion) {
+ switch (authzVersion) {
default:
- status = Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Illegal value for authorization data schema version, " <<
- authzVersion);
+ status = Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Illegal value for authorization data schema version, "
+ << authzVersion);
break;
case schemaVersion28SCRAM:
case schemaVersion26Final:
@@ -493,182 +483,174 @@ namespace mongo {
status = _fetchUserV2(txn, userName, &user);
break;
case schemaVersion24:
- status = Status(ErrorCodes::AuthSchemaIncompatible, mongoutils::str::stream() <<
- "Authorization data schema version " << schemaVersion24 <<
- " not supported after MongoDB version 2.6.");
- break;
- }
- if (status.isOK())
+ status = Status(ErrorCodes::AuthSchemaIncompatible,
+ mongoutils::str::stream()
+ << "Authorization data schema version " << schemaVersion24
+ << " not supported after MongoDB version 2.6.");
break;
- if (status != ErrorCodes::AuthSchemaIncompatible)
- return status;
-
- authzVersion = schemaVersionInvalid;
}
- if (!status.isOK())
+ if (status.isOK())
+ break;
+ if (status != ErrorCodes::AuthSchemaIncompatible)
return status;
- guard.endFetchPhase();
-
- user->incrementRefCount();
- // NOTE: It is not safe to throw an exception from here to the end of the method.
- if (guard.isSameCacheGeneration()) {
- _userCache.insert(std::make_pair(userName, user.get()));
- if (_version == schemaVersionInvalid)
- _version = authzVersion;
- }
- else {
- // If the cache generation changed while this thread was in fetch mode, the data
- // associated with the user may now be invalid, so we must mark it as such. The caller
- // may still opt to use the information for a short while, but not indefinitely.
- user->invalidate();
- }
- *acquiredUser = user.release();
-
- return Status::OK();
+ authzVersion = schemaVersionInvalid;
+ }
+ if (!status.isOK())
+ return status;
+
+ guard.endFetchPhase();
+
+ user->incrementRefCount();
+ // NOTE: It is not safe to throw an exception from here to the end of the method.
+ if (guard.isSameCacheGeneration()) {
+ _userCache.insert(std::make_pair(userName, user.get()));
+ if (_version == schemaVersionInvalid)
+ _version = authzVersion;
+ } else {
+ // If the cache generation changed while this thread was in fetch mode, the data
+ // associated with the user may now be invalid, so we must mark it as such. The caller
+ // may still opt to use the information for a short while, but not indefinitely.
+ user->invalidate();
+ }
+ *acquiredUser = user.release();
+
+ return Status::OK();
+}
+
+Status AuthorizationManager::_fetchUserV2(OperationContext* txn,
+ const UserName& userName,
+ std::unique_ptr<User>* acquiredUser) {
+ BSONObj userObj;
+ Status status = getUserDescription(txn, userName, &userObj);
+ if (!status.isOK()) {
+ return status;
}
- Status AuthorizationManager::_fetchUserV2(OperationContext* txn,
- const UserName& userName,
- std::unique_ptr<User>* acquiredUser) {
- BSONObj userObj;
- Status status = getUserDescription(txn, userName, &userObj);
- if (!status.isOK()) {
- return status;
- }
-
- // Put the new user into an unique_ptr temporarily in case there's an error while
- // initializing the user.
- std::unique_ptr<User> user(new User(userName));
+ // Put the new user into an unique_ptr temporarily in case there's an error while
+ // initializing the user.
+ std::unique_ptr<User> user(new User(userName));
- status = _initializeUserFromPrivilegeDocument(user.get(), userObj);
- if (!status.isOK()) {
- return status;
- }
- acquiredUser->reset(user.release());
- return Status::OK();
+ status = _initializeUserFromPrivilegeDocument(user.get(), userObj);
+ if (!status.isOK()) {
+ return status;
}
+ acquiredUser->reset(user.release());
+ return Status::OK();
+}
- void AuthorizationManager::releaseUser(User* user) {
- if (user == internalSecurity.user) {
- return;
- }
+void AuthorizationManager::releaseUser(User* user) {
+ if (user == internalSecurity.user) {
+ return;
+ }
- CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
- user->decrementRefCount();
- if (user->getRefCount() == 0) {
- // If it's been invalidated then it's not in the _userCache anymore.
- if (user->isValid()) {
- MONGO_COMPILER_VARIABLE_UNUSED bool erased = _userCache.erase(user->getName());
- dassert(erased);
- }
- delete user;
+ CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
+ user->decrementRefCount();
+ if (user->getRefCount() == 0) {
+ // If it's been invalidated then it's not in the _userCache anymore.
+ if (user->isValid()) {
+ MONGO_COMPILER_VARIABLE_UNUSED bool erased = _userCache.erase(user->getName());
+ dassert(erased);
}
+ delete user;
+ }
+}
+
+void AuthorizationManager::invalidateUserByName(const UserName& userName) {
+ CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
+ _updateCacheGeneration_inlock();
+ unordered_map<UserName, User*>::iterator it = _userCache.find(userName);
+ if (it == _userCache.end()) {
+ return;
}
- void AuthorizationManager::invalidateUserByName(const UserName& userName) {
- CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
- _updateCacheGeneration_inlock();
- unordered_map<UserName, User*>::iterator it = _userCache.find(userName);
- if (it == _userCache.end()) {
- return;
- }
+ User* user = it->second;
+ _userCache.erase(it);
+ user->invalidate();
+}
+void AuthorizationManager::invalidateUsersFromDB(const std::string& dbname) {
+ CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
+ _updateCacheGeneration_inlock();
+ unordered_map<UserName, User*>::iterator it = _userCache.begin();
+ while (it != _userCache.end()) {
User* user = it->second;
- _userCache.erase(it);
- user->invalidate();
- }
-
- void AuthorizationManager::invalidateUsersFromDB(const std::string& dbname) {
- CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
- _updateCacheGeneration_inlock();
- unordered_map<UserName, User*>::iterator it = _userCache.begin();
- while (it != _userCache.end()) {
- User* user = it->second;
- if (user->getName().getDB() == dbname) {
- _userCache.erase(it++);
- user->invalidate();
- } else {
- ++it;
- }
+ if (user->getName().getDB() == dbname) {
+ _userCache.erase(it++);
+ user->invalidate();
+ } else {
+ ++it;
}
}
-
- void AuthorizationManager::invalidateUserCache() {
- CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
- _invalidateUserCache_inlock();
+}
+
+void AuthorizationManager::invalidateUserCache() {
+ CacheGuard guard(this, CacheGuard::fetchSynchronizationManual);
+ _invalidateUserCache_inlock();
+}
+
+void AuthorizationManager::_invalidateUserCache_inlock() {
+ _updateCacheGeneration_inlock();
+ for (unordered_map<UserName, User*>::iterator it = _userCache.begin(); it != _userCache.end();
+ ++it) {
+ fassert(17266, it->second != internalSecurity.user);
+ it->second->invalidate();
}
+ _userCache.clear();
- void AuthorizationManager::_invalidateUserCache_inlock() {
- _updateCacheGeneration_inlock();
- for (unordered_map<UserName, User*>::iterator it = _userCache.begin();
- it != _userCache.end(); ++it) {
- fassert(17266, it->second != internalSecurity.user);
- it->second->invalidate();
- }
- _userCache.clear();
+ // Reread the schema version before acquiring the next user.
+ _version = schemaVersionInvalid;
+}
- // Reread the schema version before acquiring the next user.
- _version = schemaVersionInvalid;
- }
+Status AuthorizationManager::initialize(OperationContext* txn) {
+ invalidateUserCache();
+ Status status = _externalState->initialize(txn);
+ if (!status.isOK())
+ return status;
- Status AuthorizationManager::initialize(OperationContext* txn) {
- invalidateUserCache();
- Status status = _externalState->initialize(txn);
- if (!status.isOK())
- return status;
-
- return Status::OK();
- }
+ return Status::OK();
+}
namespace {
- bool isAuthzNamespace(StringData ns) {
- return (ns == AuthorizationManager::rolesCollectionNamespace.ns() ||
- ns == AuthorizationManager::usersCollectionNamespace.ns() ||
- ns == AuthorizationManager::versionCollectionNamespace.ns());
- }
-
- bool isAuthzCollection(StringData coll) {
- return (coll == AuthorizationManager::rolesCollectionNamespace.coll() ||
- coll == AuthorizationManager::usersCollectionNamespace.coll() ||
- coll == AuthorizationManager::versionCollectionNamespace.coll());
- }
-
- bool loggedCommandOperatesOnAuthzData(const char* ns, const BSONObj& cmdObj) {
- if (ns != AuthorizationManager::adminCommandNamespace.ns())
- return false;
- const StringData cmdName(cmdObj.firstElement().fieldNameStringData());
- if (cmdName == "drop") {
- return isAuthzCollection(cmdObj.firstElement().valueStringData());
- }
- else if (cmdName == "dropDatabase") {
- return true;
- }
- else if (cmdName == "renameCollection") {
- return isAuthzCollection(cmdObj.firstElement().str()) ||
- isAuthzCollection(cmdObj["to"].str());
- }
- else if (cmdName == "dropIndexes" || cmdName == "deleteIndexes") {
- return false;
- }
- else if (cmdName == "create") {
- return false;
- }
- else {
- return true;
- }
+bool isAuthzNamespace(StringData ns) {
+ return (ns == AuthorizationManager::rolesCollectionNamespace.ns() ||
+ ns == AuthorizationManager::usersCollectionNamespace.ns() ||
+ ns == AuthorizationManager::versionCollectionNamespace.ns());
+}
+
+bool isAuthzCollection(StringData coll) {
+ return (coll == AuthorizationManager::rolesCollectionNamespace.coll() ||
+ coll == AuthorizationManager::usersCollectionNamespace.coll() ||
+ coll == AuthorizationManager::versionCollectionNamespace.coll());
+}
+
+bool loggedCommandOperatesOnAuthzData(const char* ns, const BSONObj& cmdObj) {
+ if (ns != AuthorizationManager::adminCommandNamespace.ns())
+ return false;
+ const StringData cmdName(cmdObj.firstElement().fieldNameStringData());
+ if (cmdName == "drop") {
+ return isAuthzCollection(cmdObj.firstElement().valueStringData());
+ } else if (cmdName == "dropDatabase") {
+ return true;
+ } else if (cmdName == "renameCollection") {
+ return isAuthzCollection(cmdObj.firstElement().str()) ||
+ isAuthzCollection(cmdObj["to"].str());
+ } else if (cmdName == "dropIndexes" || cmdName == "deleteIndexes") {
+ return false;
+ } else if (cmdName == "create") {
+ return false;
+ } else {
+ return true;
}
+}
- bool appliesToAuthzData(
- const char* op,
- const char* ns,
- const BSONObj& o) {
-
- switch (*op) {
+bool appliesToAuthzData(const char* op, const char* ns, const BSONObj& o) {
+ switch (*op) {
case 'i':
case 'u':
case 'd':
- if (op[1] != '\0') return false; // "db" op type
+ if (op[1] != '\0')
+ return false; // "db" op type
return isAuthzNamespace(ns);
case 'c':
return loggedCommandOperatesOnAuthzData(ns, o);
@@ -677,71 +659,66 @@ namespace {
return false;
default:
return true;
- }
}
-
- // Updates to users in the oplog are done by matching on the _id, which will always have the
- // form "<dbname>.<username>". This function extracts the UserName from that string.
- StatusWith<UserName> extractUserNameFromIdString(StringData idstr) {
- size_t splitPoint = idstr.find('.');
- if (splitPoint == string::npos) {
- return StatusWith<UserName>(
- ErrorCodes::FailedToParse,
- mongoutils::str::stream() << "_id entries for user documents must be of "
- "the form <dbname>.<username>. Found: " << idstr);
- }
- return StatusWith<UserName>(UserName(idstr.substr(splitPoint + 1),
- idstr.substr(0, splitPoint)));
+}
+
+// Updates to users in the oplog are done by matching on the _id, which will always have the
+// form "<dbname>.<username>". This function extracts the UserName from that string.
+StatusWith<UserName> extractUserNameFromIdString(StringData idstr) {
+ size_t splitPoint = idstr.find('.');
+ if (splitPoint == string::npos) {
+ return StatusWith<UserName>(ErrorCodes::FailedToParse,
+ mongoutils::str::stream()
+ << "_id entries for user documents must be of "
+ "the form <dbname>.<username>. Found: " << idstr);
}
+ return StatusWith<UserName>(
+ UserName(idstr.substr(splitPoint + 1), idstr.substr(0, splitPoint)));
+}
} // namespace
- void AuthorizationManager::_updateCacheGeneration_inlock() {
- _cacheGeneration = OID::gen();
+void AuthorizationManager::_updateCacheGeneration_inlock() {
+ _cacheGeneration = OID::gen();
+}
+
+void AuthorizationManager::_invalidateRelevantCacheData(const char* op,
+ const char* ns,
+ const BSONObj& o,
+ const BSONObj* o2) {
+ if (ns == AuthorizationManager::rolesCollectionNamespace.ns() ||
+ ns == AuthorizationManager::versionCollectionNamespace.ns()) {
+ invalidateUserCache();
+ return;
}
- void AuthorizationManager::_invalidateRelevantCacheData(const char* op,
- const char* ns,
- const BSONObj& o,
- const BSONObj* o2) {
- if (ns == AuthorizationManager::rolesCollectionNamespace.ns() ||
- ns == AuthorizationManager::versionCollectionNamespace.ns()) {
- invalidateUserCache();
- return;
- }
+ if (*op == 'i' || *op == 'd' || *op == 'u') {
+ // If you got into this function isAuthzNamespace() must have returned true, and we've
+ // already checked that it's not the roles or version collection.
+ invariant(ns == AuthorizationManager::usersCollectionNamespace.ns());
- if (*op == 'i' || *op == 'd' || *op == 'u') {
- // If you got into this function isAuthzNamespace() must have returned true, and we've
- // already checked that it's not the roles or version collection.
- invariant(ns == AuthorizationManager::usersCollectionNamespace.ns());
-
- StatusWith<UserName> userName = (*op == 'u') ?
- extractUserNameFromIdString((*o2)["_id"].str()) :
- extractUserNameFromIdString(o["_id"].str());
-
- if (!userName.isOK()) {
- warning() << "Invalidating user cache based on user being updated failed, will "
- "invalidate the entire cache instead: " << userName.getStatus() << endl;
- invalidateUserCache();
- return;
- }
- invalidateUserByName(userName.getValue());
- } else {
+ StatusWith<UserName> userName = (*op == 'u')
+ ? extractUserNameFromIdString((*o2)["_id"].str())
+ : extractUserNameFromIdString(o["_id"].str());
+
+ if (!userName.isOK()) {
+ warning() << "Invalidating user cache based on user being updated failed, will "
+ "invalidate the entire cache instead: " << userName.getStatus() << endl;
invalidateUserCache();
+ return;
}
+ invalidateUserByName(userName.getValue());
+ } else {
+ invalidateUserCache();
}
+}
- void AuthorizationManager::logOp(
- OperationContext* txn,
- const char* op,
- const char* ns,
- const BSONObj& o,
- BSONObj* o2) {
-
- _externalState->logOp(txn, op, ns, o, o2);
- if (appliesToAuthzData(op, ns, o)) {
- _invalidateRelevantCacheData(op, ns, o, o2);
- }
+void AuthorizationManager::logOp(
+ OperationContext* txn, const char* op, const char* ns, const BSONObj& o, BSONObj* o2) {
+ _externalState->logOp(txn, op, ns, o, o2);
+ if (appliesToAuthzData(op, ns, o)) {
+ _invalidateRelevantCacheData(op, ns, o, o2);
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authorization_manager.h b/src/mongo/db/auth/authorization_manager.h
index 8f7afa9f3fc..9c7fdbaf9d0 100644
--- a/src/mongo/db/auth/authorization_manager.h
+++ b/src/mongo/db/auth/authorization_manager.h
@@ -50,363 +50,363 @@
namespace mongo {
- class AuthorizationSession;
- class AuthzManagerExternalState;
- class OperationContext;
- class ServiceContext;
- class UserDocumentParser;
-
- /**
- * Internal secret key info.
- */
- struct AuthInfo {
- User* user;
- };
- extern AuthInfo internalSecurity; // set at startup and not changed after initialization.
-
- /**
- * Contains server/cluster-wide information about Authorization.
- */
- class AuthorizationManager {
- MONGO_DISALLOW_COPYING(AuthorizationManager);
- public:
- static AuthorizationManager* get(ServiceContext* service);
- static AuthorizationManager* get(ServiceContext& service);
- static void set(ServiceContext* service,
- std::unique_ptr<AuthorizationManager> authzManager);
-
- // The newly constructed AuthorizationManager takes ownership of "externalState"
- explicit AuthorizationManager(std::unique_ptr<AuthzManagerExternalState> externalState);
-
- ~AuthorizationManager();
-
- static const std::string USER_NAME_FIELD_NAME;
- static const std::string USER_DB_FIELD_NAME;
- static const std::string ROLE_NAME_FIELD_NAME;
- static const std::string ROLE_DB_FIELD_NAME;
- static const std::string PASSWORD_FIELD_NAME;
- static const std::string V1_USER_NAME_FIELD_NAME;
- static const std::string V1_USER_SOURCE_FIELD_NAME;
-
- static const NamespaceString adminCommandNamespace;
- static const NamespaceString rolesCollectionNamespace;
- static const NamespaceString usersAltCollectionNamespace;
- static const NamespaceString usersBackupCollectionNamespace;
- static const NamespaceString usersCollectionNamespace;
- static const NamespaceString versionCollectionNamespace;
- static const NamespaceString defaultTempUsersCollectionNamespace; // for mongorestore
- static const NamespaceString defaultTempRolesCollectionNamespace; // for mongorestore
-
- /**
- * Query to match the auth schema version document in the versionCollectionNamespace.
- */
- static const BSONObj versionDocumentQuery;
-
- /**
- * Name of the field in the auth schema version document containing the current schema
- * version.
- */
- static const std::string schemaVersionFieldName;
-
- /**
- * Value used to represent that the schema version is not cached or invalid.
- */
- static const int schemaVersionInvalid = 0;
-
- /**
- * Auth schema version for MongoDB v2.4 and prior.
- */
- static const int schemaVersion24 = 1;
-
- /**
- * Auth schema version for MongoDB v2.6 during the upgrade process. Same as
- * schemaVersion26Final, except that user documents are found in admin.new.users, and user
- * management commands are disabled.
- */
- static const int schemaVersion26Upgrade = 2;
-
- /**
- * Auth schema version for MongoDB 2.6 and 3.0 MONGODB-CR/SCRAM mixed auth mode.
- * Users are stored in admin.system.users, roles in admin.system.roles.
- */
- static const int schemaVersion26Final = 3;
-
- /**
- * Auth schema version for MongoDB 3.0 SCRAM only mode.
- * Users are stored in admin.system.users, roles in admin.system.roles.
- * MONGODB-CR credentials have been replaced with SCRAM credentials in the user documents.
- */
- static const int schemaVersion28SCRAM = 5;
-
- // TODO: Make the following functions no longer static.
-
- /**
- * Takes a vector of privileges and fills the output param "resultArray" with a BSON array
- * representation of the privileges.
- */
- static Status getBSONForPrivileges(const PrivilegeVector& privileges,
- mutablebson::Element resultArray);
-
- /**
- * Takes a role name and a role graph and fills the output param "result" with a BSON
- * representation of the role object.
- * This function does no locking - it is up to the caller to synchronize access to the
- * role graph.
- * Note: The passed in RoleGraph can't be marked const because some of its accessors can
- * actually modify it internally (to set up built-in roles).
- */
- static Status getBSONForRole(/*const*/ RoleGraph* graph,
- const RoleName& roleName,
- mutablebson::Element result);
-
- /**
- * Returns a new AuthorizationSession for use with this AuthorizationManager.
- */
- std::unique_ptr<AuthorizationSession> makeAuthorizationSession();
-
- /**
- * Sets whether or not access control enforcement is enabled for this manager.
- */
- void setAuthEnabled(bool enabled);
-
- /**
- * Returns true if access control is enabled for this manager .
- */
- bool isAuthEnabled() const;
-
- /**
- * Returns via the output parameter "version" the version number of the authorization
- * system. Returns Status::OK() if it was able to successfully fetch the current
- * authorization version. If it has problems fetching the most up to date version it
- * returns a non-OK status. When returning a non-OK status, *version will be set to
- * schemaVersionInvalid (0).
- */
- Status getAuthorizationVersion(OperationContext* txn, int* version);
-
- /**
- * Returns the user cache generation identifier.
- */
- OID getCacheGeneration();
-
- /**
- * Returns true if there exists at least one privilege document in the system.
- * Used by the AuthorizationSession to determine whether localhost connections should be
- * granted special access to bootstrap the system.
- * NOTE: If this method ever returns true, the result is cached in _privilegeDocsExist,
- * meaning that once this method returns true it will continue to return true for the
- * lifetime of this process, even if all users are subsequently dropped from the system.
- */
- bool hasAnyPrivilegeDocuments(OperationContext* txn);
-
- // Checks to see if "doc" is a valid privilege document, assuming it is stored in the
- // "system.users" collection of database "dbname".
- //
- // Returns Status::OK() if the document is good, or Status(ErrorCodes::BadValue), otherwise.
- Status checkValidPrivilegeDocument(StringData dbname, const BSONObj& doc);
-
- // Given a database name and a readOnly flag return an ActionSet describing all the actions
- // that an old-style user with those attributes should be given.
- ActionSet getActionsForOldStyleUser(const std::string& dbname, bool readOnly) const;
-
- /**
- * Writes into "result" a document describing the named user and returns Status::OK(). The
- * description includes the user credentials and customData, if present, the user's role
- * membership and delegation information, a full list of the user's privileges, and a full
- * list of the user's roles, including those roles held implicitly through other roles
- * (indirect roles). In the event that some of this information is inconsistent, the
- * document will contain a "warnings" array, with std::string messages describing
- * inconsistencies.
- *
- * If the user does not exist, returns ErrorCodes::UserNotFound.
- */
- Status getUserDescription(OperationContext* txn, const UserName& userName, BSONObj* result);
-
- /**
- * Writes into "result" a document describing the named role and returns Status::OK(). The
- * description includes the roles in which the named role has membership and a full list of
- * the roles of which the named role is a member, including those roles memberships held
- * implicitly through other roles (indirect roles). If "showPrivileges" is true, then the
- * description documents will also include a full list of the role's privileges.
- * In the event that some of this information is inconsistent, the document will contain a
- * "warnings" array, with std::string messages describing inconsistencies.
- *
- * If the role does not exist, returns ErrorCodes::RoleNotFound.
- */
- Status getRoleDescription(const RoleName& roleName, bool showPrivileges, BSONObj* result);
-
- /**
- * Writes into "result" documents describing the roles that are defined on the given
- * database. Each role description document includes the other roles in which the role has
- * membership and a full list of the roles of which the named role is a member,
- * including those roles memberships held implicitly through other roles (indirect roles).
- * If showPrivileges is true, then the description documents will also include a full list
- * of the role's privileges. If showBuiltinRoles is true, then the result array will
- * contain description documents for all the builtin roles for the given database, if it
- * is false the result will just include user defined roles.
- * In the event that some of the information in a given role description is inconsistent,
- * the document will contain a "warnings" array, with std::string messages describing
- * inconsistencies.
- */
- Status getRoleDescriptionsForDB(const std::string dbname,
- bool showPrivileges,
- bool showBuiltinRoles,
- std::vector<BSONObj>* result);
-
- /**
- * Returns the User object for the given userName in the out parameter "acquiredUser".
- * If the user cache already has a user object for this user, it increments the refcount
- * on that object and gives out a pointer to it. If no user object for this user name
- * exists yet in the cache, reads the user's privilege document from disk, builds up
- * a User object, sets the refcount to 1, and gives that out. The returned user may
- * be invalid by the time the caller gets access to it.
- * The AuthorizationManager retains ownership of the returned User object.
- * On non-OK Status return values, acquiredUser will not be modified.
- */
- Status acquireUser(OperationContext* txn, const UserName& userName, User** acquiredUser);
-
- /**
- * Decrements the refcount of the given User object. If the refcount has gone to zero,
- * deletes the User. Caller must stop using its pointer to "user" after calling this.
- */
- void releaseUser(User* user);
-
- /**
- * Marks the given user as invalid and removes it from the user cache.
- */
- void invalidateUserByName(const UserName& user);
-
- /**
- * Invalidates all users who's source is "dbname" and removes them from the user cache.
- */
- void invalidateUsersFromDB(const std::string& dbname);
-
- /**
- * Initializes the authorization manager. Depending on what version the authorization
- * system is at, this may involve building up the user cache and/or the roles graph.
- * Call this function at startup and after resynchronizing a slave/secondary.
- */
- Status initialize(OperationContext* txn);
-
- /**
- * Invalidates all of the contents of the user cache.
- */
- void invalidateUserCache();
-
- /**
- * Parses privDoc and fully initializes the user object (credentials, roles, and privileges)
- * with the information extracted from the privilege document.
- * This should never be called from outside the AuthorizationManager - the only reason it's
- * public instead of private is so it can be unit tested.
- */
- Status _initializeUserFromPrivilegeDocument(User* user, const BSONObj& privDoc);
-
- /**
- * Hook called by replication code to let the AuthorizationManager observe changes
- * to relevant collections.
- */
- void logOp(OperationContext* txn,
- const char* opstr,
- const char* ns,
- const BSONObj& obj,
- BSONObj* patt);
-
- private:
- /**
- * Type used to guard accesses and updates to the user cache.
- */
- class CacheGuard;
- friend class AuthorizationManager::CacheGuard;
-
- /**
- * Invalidates all User objects in the cache and removes them from the cache.
- * Should only be called when already holding _cacheMutex.
- */
- void _invalidateUserCache_inlock();
-
- /**
- * Given the objects describing an oplog entry that affects authorization data, invalidates
- * the portion of the user cache that is affected by that operation. Should only be called
- * with oplog entries that have been pre-verified to actually affect authorization data.
- */
- void _invalidateRelevantCacheData(const char* op,
- const char* ns,
- const BSONObj& o,
- const BSONObj* o2);
-
- /**
- * Updates _cacheGeneration to a new OID
- */
- void _updateCacheGeneration_inlock();
-
- /**
- * Fetches user information from a v2-schema user document for the named user,
- * and stores a pointer to a new user object into *acquiredUser on success.
- */
- Status _fetchUserV2(OperationContext* txn,
- const UserName& userName,
- std::unique_ptr<User>* acquiredUser);
-
- /**
- * True if access control enforcement is enabled in this AuthorizationManager.
- *
- * Defaults to false. Changes to its value are not synchronized, so it should only be set
- * at initalization-time.
- */
- bool _authEnabled;
-
- /**
- * A cache of whether there are any users set up for the cluster.
- */
- bool _privilegeDocsExist;
-
- // Protects _privilegeDocsExist
- mutable stdx::mutex _privilegeDocsExistMutex;
-
- std::unique_ptr<AuthzManagerExternalState> _externalState;
-
- /**
- * Cached value of the authorization schema version.
- *
- * May be set by acquireUser() and getAuthorizationVersion(). Invalidated by
- * invalidateUserCache().
- *
- * Reads and writes guarded by CacheGuard.
- */
- int _version;
-
- /**
- * Caches User objects with information about user privileges, to avoid the need to
- * go to disk to read user privilege documents whenever possible. Every User object
- * has a reference count - the AuthorizationManager must not delete a User object in the
- * cache unless its reference count is zero.
- */
- unordered_map<UserName, User*> _userCache;
-
- /**
- * Current generation of cached data. Updated every time part of the cache gets
- * invalidated. Protected by CacheGuard.
- */
- OID _cacheGeneration;
-
- /**
- * True if there is an update to the _userCache in progress, and that update is currently in
- * the "fetch phase", during which it does not hold the _cacheMutex.
- *
- * Manipulated via CacheGuard.
- */
- bool _isFetchPhaseBusy;
-
- /**
- * Protects _userCache, _cacheGeneration, _version and _isFetchPhaseBusy. Manipulated
- * via CacheGuard.
- */
- stdx::mutex _cacheMutex;
-
- /**
- * Condition used to signal that it is OK for another CacheGuard to enter a fetch phase.
- * Manipulated via CacheGuard.
- */
- stdx::condition_variable _fetchPhaseIsReady;
- };
-
-} // namespace mongo
+class AuthorizationSession;
+class AuthzManagerExternalState;
+class OperationContext;
+class ServiceContext;
+class UserDocumentParser;
+
+/**
+ * Internal secret key info.
+ */
+struct AuthInfo {
+ User* user;
+};
+extern AuthInfo internalSecurity; // set at startup and not changed after initialization.
+
+/**
+ * Contains server/cluster-wide information about Authorization.
+ */
+class AuthorizationManager {
+ MONGO_DISALLOW_COPYING(AuthorizationManager);
+
+public:
+ static AuthorizationManager* get(ServiceContext* service);
+ static AuthorizationManager* get(ServiceContext& service);
+ static void set(ServiceContext* service, std::unique_ptr<AuthorizationManager> authzManager);
+
+ // The newly constructed AuthorizationManager takes ownership of "externalState"
+ explicit AuthorizationManager(std::unique_ptr<AuthzManagerExternalState> externalState);
+
+ ~AuthorizationManager();
+
+ static const std::string USER_NAME_FIELD_NAME;
+ static const std::string USER_DB_FIELD_NAME;
+ static const std::string ROLE_NAME_FIELD_NAME;
+ static const std::string ROLE_DB_FIELD_NAME;
+ static const std::string PASSWORD_FIELD_NAME;
+ static const std::string V1_USER_NAME_FIELD_NAME;
+ static const std::string V1_USER_SOURCE_FIELD_NAME;
+
+ static const NamespaceString adminCommandNamespace;
+ static const NamespaceString rolesCollectionNamespace;
+ static const NamespaceString usersAltCollectionNamespace;
+ static const NamespaceString usersBackupCollectionNamespace;
+ static const NamespaceString usersCollectionNamespace;
+ static const NamespaceString versionCollectionNamespace;
+ static const NamespaceString defaultTempUsersCollectionNamespace; // for mongorestore
+ static const NamespaceString defaultTempRolesCollectionNamespace; // for mongorestore
+
+ /**
+ * Query to match the auth schema version document in the versionCollectionNamespace.
+ */
+ static const BSONObj versionDocumentQuery;
+
+ /**
+ * Name of the field in the auth schema version document containing the current schema
+ * version.
+ */
+ static const std::string schemaVersionFieldName;
+
+ /**
+ * Value used to represent that the schema version is not cached or invalid.
+ */
+ static const int schemaVersionInvalid = 0;
+
+ /**
+ * Auth schema version for MongoDB v2.4 and prior.
+ */
+ static const int schemaVersion24 = 1;
+
+ /**
+ * Auth schema version for MongoDB v2.6 during the upgrade process. Same as
+ * schemaVersion26Final, except that user documents are found in admin.new.users, and user
+ * management commands are disabled.
+ */
+ static const int schemaVersion26Upgrade = 2;
+
+ /**
+ * Auth schema version for MongoDB 2.6 and 3.0 MONGODB-CR/SCRAM mixed auth mode.
+ * Users are stored in admin.system.users, roles in admin.system.roles.
+ */
+ static const int schemaVersion26Final = 3;
+
+ /**
+ * Auth schema version for MongoDB 3.0 SCRAM only mode.
+ * Users are stored in admin.system.users, roles in admin.system.roles.
+ * MONGODB-CR credentials have been replaced with SCRAM credentials in the user documents.
+ */
+ static const int schemaVersion28SCRAM = 5;
+
+ // TODO: Make the following functions no longer static.
+
+ /**
+ * Takes a vector of privileges and fills the output param "resultArray" with a BSON array
+ * representation of the privileges.
+ */
+ static Status getBSONForPrivileges(const PrivilegeVector& privileges,
+ mutablebson::Element resultArray);
+
+ /**
+ * Takes a role name and a role graph and fills the output param "result" with a BSON
+ * representation of the role object.
+ * This function does no locking - it is up to the caller to synchronize access to the
+ * role graph.
+ * Note: The passed in RoleGraph can't be marked const because some of its accessors can
+ * actually modify it internally (to set up built-in roles).
+ */
+ static Status getBSONForRole(/*const*/ RoleGraph* graph,
+ const RoleName& roleName,
+ mutablebson::Element result);
+
+ /**
+ * Returns a new AuthorizationSession for use with this AuthorizationManager.
+ */
+ std::unique_ptr<AuthorizationSession> makeAuthorizationSession();
+
+ /**
+ * Sets whether or not access control enforcement is enabled for this manager.
+ */
+ void setAuthEnabled(bool enabled);
+
+ /**
+ * Returns true if access control is enabled for this manager .
+ */
+ bool isAuthEnabled() const;
+
+ /**
+ * Returns via the output parameter "version" the version number of the authorization
+ * system. Returns Status::OK() if it was able to successfully fetch the current
+ * authorization version. If it has problems fetching the most up to date version it
+ * returns a non-OK status. When returning a non-OK status, *version will be set to
+ * schemaVersionInvalid (0).
+ */
+ Status getAuthorizationVersion(OperationContext* txn, int* version);
+
+ /**
+ * Returns the user cache generation identifier.
+ */
+ OID getCacheGeneration();
+
+ /**
+ * Returns true if there exists at least one privilege document in the system.
+ * Used by the AuthorizationSession to determine whether localhost connections should be
+ * granted special access to bootstrap the system.
+ * NOTE: If this method ever returns true, the result is cached in _privilegeDocsExist,
+ * meaning that once this method returns true it will continue to return true for the
+ * lifetime of this process, even if all users are subsequently dropped from the system.
+ */
+ bool hasAnyPrivilegeDocuments(OperationContext* txn);
+
+ // Checks to see if "doc" is a valid privilege document, assuming it is stored in the
+ // "system.users" collection of database "dbname".
+ //
+ // Returns Status::OK() if the document is good, or Status(ErrorCodes::BadValue), otherwise.
+ Status checkValidPrivilegeDocument(StringData dbname, const BSONObj& doc);
+
+ // Given a database name and a readOnly flag return an ActionSet describing all the actions
+ // that an old-style user with those attributes should be given.
+ ActionSet getActionsForOldStyleUser(const std::string& dbname, bool readOnly) const;
+
+ /**
+ * Writes into "result" a document describing the named user and returns Status::OK(). The
+ * description includes the user credentials and customData, if present, the user's role
+ * membership and delegation information, a full list of the user's privileges, and a full
+ * list of the user's roles, including those roles held implicitly through other roles
+ * (indirect roles). In the event that some of this information is inconsistent, the
+ * document will contain a "warnings" array, with std::string messages describing
+ * inconsistencies.
+ *
+ * If the user does not exist, returns ErrorCodes::UserNotFound.
+ */
+ Status getUserDescription(OperationContext* txn, const UserName& userName, BSONObj* result);
+
+ /**
+ * Writes into "result" a document describing the named role and returns Status::OK(). The
+ * description includes the roles in which the named role has membership and a full list of
+ * the roles of which the named role is a member, including those roles memberships held
+ * implicitly through other roles (indirect roles). If "showPrivileges" is true, then the
+ * description documents will also include a full list of the role's privileges.
+ * In the event that some of this information is inconsistent, the document will contain a
+ * "warnings" array, with std::string messages describing inconsistencies.
+ *
+ * If the role does not exist, returns ErrorCodes::RoleNotFound.
+ */
+ Status getRoleDescription(const RoleName& roleName, bool showPrivileges, BSONObj* result);
+
+ /**
+ * Writes into "result" documents describing the roles that are defined on the given
+ * database. Each role description document includes the other roles in which the role has
+ * membership and a full list of the roles of which the named role is a member,
+ * including those roles memberships held implicitly through other roles (indirect roles).
+ * If showPrivileges is true, then the description documents will also include a full list
+ * of the role's privileges. If showBuiltinRoles is true, then the result array will
+ * contain description documents for all the builtin roles for the given database, if it
+ * is false the result will just include user defined roles.
+ * In the event that some of the information in a given role description is inconsistent,
+ * the document will contain a "warnings" array, with std::string messages describing
+ * inconsistencies.
+ */
+ Status getRoleDescriptionsForDB(const std::string dbname,
+ bool showPrivileges,
+ bool showBuiltinRoles,
+ std::vector<BSONObj>* result);
+
+ /**
+ * Returns the User object for the given userName in the out parameter "acquiredUser".
+ * If the user cache already has a user object for this user, it increments the refcount
+ * on that object and gives out a pointer to it. If no user object for this user name
+ * exists yet in the cache, reads the user's privilege document from disk, builds up
+ * a User object, sets the refcount to 1, and gives that out. The returned user may
+ * be invalid by the time the caller gets access to it.
+ * The AuthorizationManager retains ownership of the returned User object.
+ * On non-OK Status return values, acquiredUser will not be modified.
+ */
+ Status acquireUser(OperationContext* txn, const UserName& userName, User** acquiredUser);
+
+ /**
+ * Decrements the refcount of the given User object. If the refcount has gone to zero,
+ * deletes the User. Caller must stop using its pointer to "user" after calling this.
+ */
+ void releaseUser(User* user);
+
+ /**
+ * Marks the given user as invalid and removes it from the user cache.
+ */
+ void invalidateUserByName(const UserName& user);
+
+ /**
+ * Invalidates all users who's source is "dbname" and removes them from the user cache.
+ */
+ void invalidateUsersFromDB(const std::string& dbname);
+
+ /**
+ * Initializes the authorization manager. Depending on what version the authorization
+ * system is at, this may involve building up the user cache and/or the roles graph.
+ * Call this function at startup and after resynchronizing a slave/secondary.
+ */
+ Status initialize(OperationContext* txn);
+
+ /**
+ * Invalidates all of the contents of the user cache.
+ */
+ void invalidateUserCache();
+
+ /**
+ * Parses privDoc and fully initializes the user object (credentials, roles, and privileges)
+ * with the information extracted from the privilege document.
+ * This should never be called from outside the AuthorizationManager - the only reason it's
+ * public instead of private is so it can be unit tested.
+ */
+ Status _initializeUserFromPrivilegeDocument(User* user, const BSONObj& privDoc);
+
+ /**
+ * Hook called by replication code to let the AuthorizationManager observe changes
+ * to relevant collections.
+ */
+ void logOp(OperationContext* txn,
+ const char* opstr,
+ const char* ns,
+ const BSONObj& obj,
+ BSONObj* patt);
+
+private:
+ /**
+ * Type used to guard accesses and updates to the user cache.
+ */
+ class CacheGuard;
+ friend class AuthorizationManager::CacheGuard;
+
+ /**
+ * Invalidates all User objects in the cache and removes them from the cache.
+ * Should only be called when already holding _cacheMutex.
+ */
+ void _invalidateUserCache_inlock();
+
+ /**
+ * Given the objects describing an oplog entry that affects authorization data, invalidates
+ * the portion of the user cache that is affected by that operation. Should only be called
+ * with oplog entries that have been pre-verified to actually affect authorization data.
+ */
+ void _invalidateRelevantCacheData(const char* op,
+ const char* ns,
+ const BSONObj& o,
+ const BSONObj* o2);
+
+ /**
+ * Updates _cacheGeneration to a new OID
+ */
+ void _updateCacheGeneration_inlock();
+
+ /**
+ * Fetches user information from a v2-schema user document for the named user,
+ * and stores a pointer to a new user object into *acquiredUser on success.
+ */
+ Status _fetchUserV2(OperationContext* txn,
+ const UserName& userName,
+ std::unique_ptr<User>* acquiredUser);
+
+ /**
+ * True if access control enforcement is enabled in this AuthorizationManager.
+ *
+ * Defaults to false. Changes to its value are not synchronized, so it should only be set
+ * at initalization-time.
+ */
+ bool _authEnabled;
+
+ /**
+ * A cache of whether there are any users set up for the cluster.
+ */
+ bool _privilegeDocsExist;
+
+ // Protects _privilegeDocsExist
+ mutable stdx::mutex _privilegeDocsExistMutex;
+
+ std::unique_ptr<AuthzManagerExternalState> _externalState;
+
+ /**
+ * Cached value of the authorization schema version.
+ *
+ * May be set by acquireUser() and getAuthorizationVersion(). Invalidated by
+ * invalidateUserCache().
+ *
+ * Reads and writes guarded by CacheGuard.
+ */
+ int _version;
+
+ /**
+ * Caches User objects with information about user privileges, to avoid the need to
+ * go to disk to read user privilege documents whenever possible. Every User object
+ * has a reference count - the AuthorizationManager must not delete a User object in the
+ * cache unless its reference count is zero.
+ */
+ unordered_map<UserName, User*> _userCache;
+
+ /**
+ * Current generation of cached data. Updated every time part of the cache gets
+ * invalidated. Protected by CacheGuard.
+ */
+ OID _cacheGeneration;
+
+ /**
+ * True if there is an update to the _userCache in progress, and that update is currently in
+ * the "fetch phase", during which it does not hold the _cacheMutex.
+ *
+ * Manipulated via CacheGuard.
+ */
+ bool _isFetchPhaseBusy;
+
+ /**
+ * Protects _userCache, _cacheGeneration, _version and _isFetchPhaseBusy. Manipulated
+ * via CacheGuard.
+ */
+ stdx::mutex _cacheMutex;
+
+ /**
+ * Condition used to signal that it is OK for another CacheGuard to enter a fetch phase.
+ * Manipulated via CacheGuard.
+ */
+ stdx::condition_variable _fetchPhaseIsReady;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/auth/authorization_manager_global.cpp b/src/mongo/db/auth/authorization_manager_global.cpp
index cc5ee6513f5..2fc20deef25 100644
--- a/src/mongo/db/auth/authorization_manager_global.cpp
+++ b/src/mongo/db/auth/authorization_manager_global.cpp
@@ -38,51 +38,50 @@
namespace mongo {
namespace {
- class AuthzVersionParameter : public ServerParameter {
- MONGO_DISALLOW_COPYING(AuthzVersionParameter);
- public:
- AuthzVersionParameter(ServerParameterSet* sps, const std::string& name);
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name);
- virtual Status set(const BSONElement& newValueElement);
- virtual Status setFromString(const std::string& str);
- };
+class AuthzVersionParameter : public ServerParameter {
+ MONGO_DISALLOW_COPYING(AuthzVersionParameter);
- MONGO_INITIALIZER_GENERAL(AuthzSchemaParameter,
- MONGO_NO_PREREQUISITES,
- ("BeginStartupOptionParsing"))(InitializerContext*) {
- new AuthzVersionParameter(ServerParameterSet::getGlobal(),
- authSchemaVersionServerParameter);
- return Status::OK();
- }
+public:
+ AuthzVersionParameter(ServerParameterSet* sps, const std::string& name);
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name);
+ virtual Status set(const BSONElement& newValueElement);
+ virtual Status setFromString(const std::string& str);
+};
- AuthzVersionParameter::AuthzVersionParameter(ServerParameterSet* sps, const std::string& name) :
- ServerParameter(sps, name, false, false) {}
+MONGO_INITIALIZER_GENERAL(AuthzSchemaParameter,
+ MONGO_NO_PREREQUISITES,
+ ("BeginStartupOptionParsing"))(InitializerContext*) {
+ new AuthzVersionParameter(ServerParameterSet::getGlobal(), authSchemaVersionServerParameter);
+ return Status::OK();
+}
- void AuthzVersionParameter::append(
- OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
- int authzVersion;
- uassertStatusOK(
- getGlobalAuthorizationManager()->getAuthorizationVersion(txn, &authzVersion));
- b.append(name, authzVersion);
- }
+AuthzVersionParameter::AuthzVersionParameter(ServerParameterSet* sps, const std::string& name)
+ : ServerParameter(sps, name, false, false) {}
- Status AuthzVersionParameter::set(const BSONElement& newValueElement) {
- return Status(ErrorCodes::InternalError, "set called on unsettable server parameter");
- }
+void AuthzVersionParameter::append(OperationContext* txn,
+ BSONObjBuilder& b,
+ const std::string& name) {
+ int authzVersion;
+ uassertStatusOK(getGlobalAuthorizationManager()->getAuthorizationVersion(txn, &authzVersion));
+ b.append(name, authzVersion);
+}
- Status AuthzVersionParameter::setFromString(const std::string& newValueString) {
- return Status(ErrorCodes::InternalError, "set called on unsettable server parameter");
- }
+Status AuthzVersionParameter::set(const BSONElement& newValueElement) {
+ return Status(ErrorCodes::InternalError, "set called on unsettable server parameter");
+}
+
+Status AuthzVersionParameter::setFromString(const std::string& newValueString) {
+ return Status(ErrorCodes::InternalError, "set called on unsettable server parameter");
+}
} // namespace
- const std::string authSchemaVersionServerParameter = "authSchemaVersion";
+const std::string authSchemaVersionServerParameter = "authSchemaVersion";
- AuthorizationManager* getGlobalAuthorizationManager() {
- AuthorizationManager* globalAuthManager = AuthorizationManager::get(
- getGlobalServiceContext());
- fassert(16842, globalAuthManager != nullptr);
- return globalAuthManager;
- }
+AuthorizationManager* getGlobalAuthorizationManager() {
+ AuthorizationManager* globalAuthManager = AuthorizationManager::get(getGlobalServiceContext());
+ fassert(16842, globalAuthManager != nullptr);
+ return globalAuthManager;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authorization_manager_global.h b/src/mongo/db/auth/authorization_manager_global.h
index b0ef39f0069..3e6e936e520 100644
--- a/src/mongo/db/auth/authorization_manager_global.h
+++ b/src/mongo/db/auth/authorization_manager_global.h
@@ -32,12 +32,12 @@
namespace mongo {
- /**
- * Name of the server parameter used to report the auth schema version (via getParameter).
- */
- extern const std::string authSchemaVersionServerParameter;
+/**
+ * Name of the server parameter used to report the auth schema version (via getParameter).
+ */
+extern const std::string authSchemaVersionServerParameter;
- // Gets the singleton AuthorizationManager object for this server process.
- AuthorizationManager* getGlobalAuthorizationManager();
+// Gets the singleton AuthorizationManager object for this server process.
+AuthorizationManager* getGlobalAuthorizationManager();
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authorization_manager_mock_init.cpp b/src/mongo/db/auth/authorization_manager_mock_init.cpp
index eadf9711604..890ee2c0a2b 100644
--- a/src/mongo/db/auth/authorization_manager_mock_init.cpp
+++ b/src/mongo/db/auth/authorization_manager_mock_init.cpp
@@ -41,18 +41,18 @@
namespace mongo {
namespace {
- std::unique_ptr<AuthzManagerExternalState> createAuthzManagerExternalStateMock() {
- return stdx::make_unique<AuthzManagerExternalStateMock>();
- }
+std::unique_ptr<AuthzManagerExternalState> createAuthzManagerExternalStateMock() {
+ return stdx::make_unique<AuthzManagerExternalStateMock>();
+}
- MONGO_INITIALIZER(CreateAuthorizationExternalStateFactory) (InitializerContext* context) {
- AuthzManagerExternalState::create = &createAuthzManagerExternalStateMock;
- return Status::OK();
- }
+MONGO_INITIALIZER(CreateAuthorizationExternalStateFactory)(InitializerContext* context) {
+ AuthzManagerExternalState::create = &createAuthzManagerExternalStateMock;
+ return Status::OK();
+}
- MONGO_INITIALIZER(SetGlobalEnvironment)(InitializerContext* context) {
- setGlobalServiceContext(stdx::make_unique<ServiceContextNoop>());
- return Status::OK();
- }
+MONGO_INITIALIZER(SetGlobalEnvironment)(InitializerContext* context) {
+ setGlobalServiceContext(stdx::make_unique<ServiceContextNoop>());
+ return Status::OK();
+}
}
}
diff --git a/src/mongo/db/auth/authorization_manager_test.cpp b/src/mongo/db/auth/authorization_manager_test.cpp
index 65a2f84243c..9114e8268c0 100644
--- a/src/mongo/db/auth/authorization_manager_test.cpp
+++ b/src/mongo/db/auth/authorization_manager_test.cpp
@@ -49,172 +49,185 @@
namespace mongo {
namespace {
- using std::vector;
-
- TEST(RoleParsingTest, BuildRoleBSON) {
- RoleGraph graph;
- RoleName roleA("roleA", "dbA");
- RoleName roleB("roleB", "dbB");
- RoleName roleC("roleC", "dbC");
- ActionSet actions;
- actions.addAction(ActionType::find);
- actions.addAction(ActionType::insert);
-
- ASSERT_OK(graph.createRole(roleA));
- ASSERT_OK(graph.createRole(roleB));
- ASSERT_OK(graph.createRole(roleC));
-
- ASSERT_OK(graph.addRoleToRole(roleA, roleC));
- ASSERT_OK(graph.addRoleToRole(roleA, roleB));
- ASSERT_OK(graph.addRoleToRole(roleB, roleC));
-
- ASSERT_OK(graph.addPrivilegeToRole(
- roleA, Privilege(ResourcePattern::forAnyNormalResource(), actions)));
- ASSERT_OK(graph.addPrivilegeToRole(
- roleB, Privilege(ResourcePattern::forExactNamespace(NamespaceString("dbB.foo")),
- actions)));
- ASSERT_OK(graph.addPrivilegeToRole(
- roleC, Privilege(ResourcePattern::forClusterResource(), actions)));
- ASSERT_OK(graph.recomputePrivilegeData());
-
-
- // Role A
- mutablebson::Document doc;
- ASSERT_OK(AuthorizationManager::getBSONForRole(&graph, roleA, doc.root()));
- BSONObj roleDoc = doc.getObject();
-
- ASSERT_EQUALS("dbA.roleA", roleDoc["_id"].String());
- ASSERT_EQUALS("roleA", roleDoc["role"].String());
- ASSERT_EQUALS("dbA", roleDoc["db"].String());
-
- vector<BSONElement> privs = roleDoc["privileges"].Array();
- ASSERT_EQUALS(1U, privs.size());
- ASSERT_EQUALS("", privs[0].Obj()["resource"].Obj()["db"].String());
- ASSERT_EQUALS("", privs[0].Obj()["resource"].Obj()["collection"].String());
- ASSERT(privs[0].Obj()["resource"].Obj()["cluster"].eoo());
- vector<BSONElement> actionElements = privs[0].Obj()["actions"].Array();
- ASSERT_EQUALS(2U, actionElements.size());
- ASSERT_EQUALS("find", actionElements[0].String());
- ASSERT_EQUALS("insert", actionElements[1].String());
-
- vector<BSONElement> roles = roleDoc["roles"].Array();
- ASSERT_EQUALS(2U, roles.size());
- ASSERT_EQUALS("roleC", roles[0].Obj()["role"].String());
- ASSERT_EQUALS("dbC", roles[0].Obj()["db"].String());
- ASSERT_EQUALS("roleB", roles[1].Obj()["role"].String());
- ASSERT_EQUALS("dbB", roles[1].Obj()["db"].String());
-
- // Role B
- doc.reset();
- ASSERT_OK(AuthorizationManager::getBSONForRole(&graph, roleB, doc.root()));
- roleDoc = doc.getObject();
-
- ASSERT_EQUALS("dbB.roleB", roleDoc["_id"].String());
- ASSERT_EQUALS("roleB", roleDoc["role"].String());
- ASSERT_EQUALS("dbB", roleDoc["db"].String());
-
- privs = roleDoc["privileges"].Array();
- ASSERT_EQUALS(1U, privs.size());
- ASSERT_EQUALS("dbB", privs[0].Obj()["resource"].Obj()["db"].String());
- ASSERT_EQUALS("foo", privs[0].Obj()["resource"].Obj()["collection"].String());
- ASSERT(privs[0].Obj()["resource"].Obj()["cluster"].eoo());
- actionElements = privs[0].Obj()["actions"].Array();
- ASSERT_EQUALS(2U, actionElements.size());
- ASSERT_EQUALS("find", actionElements[0].String());
- ASSERT_EQUALS("insert", actionElements[1].String());
-
- roles = roleDoc["roles"].Array();
- ASSERT_EQUALS(1U, roles.size());
- ASSERT_EQUALS("roleC", roles[0].Obj()["role"].String());
- ASSERT_EQUALS("dbC", roles[0].Obj()["db"].String());
-
- // Role C
- doc.reset();
- ASSERT_OK(AuthorizationManager::getBSONForRole(&graph, roleC, doc.root()));
- roleDoc = doc.getObject();
-
- ASSERT_EQUALS("dbC.roleC", roleDoc["_id"].String());
- ASSERT_EQUALS("roleC", roleDoc["role"].String());
- ASSERT_EQUALS("dbC", roleDoc["db"].String());
-
- privs = roleDoc["privileges"].Array();
- ASSERT_EQUALS(1U, privs.size());
- ASSERT(privs[0].Obj()["resource"].Obj()["cluster"].Bool());
- ASSERT(privs[0].Obj()["resource"].Obj()["db"].eoo());
- ASSERT(privs[0].Obj()["resource"].Obj()["collection"].eoo());
- actionElements = privs[0].Obj()["actions"].Array();
- ASSERT_EQUALS(2U, actionElements.size());
- ASSERT_EQUALS("find", actionElements[0].String());
- ASSERT_EQUALS("insert", actionElements[1].String());
-
- roles = roleDoc["roles"].Array();
- ASSERT_EQUALS(0U, roles.size());
+using std::vector;
+
+TEST(RoleParsingTest, BuildRoleBSON) {
+ RoleGraph graph;
+ RoleName roleA("roleA", "dbA");
+ RoleName roleB("roleB", "dbB");
+ RoleName roleC("roleC", "dbC");
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ actions.addAction(ActionType::insert);
+
+ ASSERT_OK(graph.createRole(roleA));
+ ASSERT_OK(graph.createRole(roleB));
+ ASSERT_OK(graph.createRole(roleC));
+
+ ASSERT_OK(graph.addRoleToRole(roleA, roleC));
+ ASSERT_OK(graph.addRoleToRole(roleA, roleB));
+ ASSERT_OK(graph.addRoleToRole(roleB, roleC));
+
+ ASSERT_OK(graph.addPrivilegeToRole(
+ roleA, Privilege(ResourcePattern::forAnyNormalResource(), actions)));
+ ASSERT_OK(graph.addPrivilegeToRole(
+ roleB, Privilege(ResourcePattern::forExactNamespace(NamespaceString("dbB.foo")), actions)));
+ ASSERT_OK(
+ graph.addPrivilegeToRole(roleC, Privilege(ResourcePattern::forClusterResource(), actions)));
+ ASSERT_OK(graph.recomputePrivilegeData());
+
+
+ // Role A
+ mutablebson::Document doc;
+ ASSERT_OK(AuthorizationManager::getBSONForRole(&graph, roleA, doc.root()));
+ BSONObj roleDoc = doc.getObject();
+
+ ASSERT_EQUALS("dbA.roleA", roleDoc["_id"].String());
+ ASSERT_EQUALS("roleA", roleDoc["role"].String());
+ ASSERT_EQUALS("dbA", roleDoc["db"].String());
+
+ vector<BSONElement> privs = roleDoc["privileges"].Array();
+ ASSERT_EQUALS(1U, privs.size());
+ ASSERT_EQUALS("", privs[0].Obj()["resource"].Obj()["db"].String());
+ ASSERT_EQUALS("", privs[0].Obj()["resource"].Obj()["collection"].String());
+ ASSERT(privs[0].Obj()["resource"].Obj()["cluster"].eoo());
+ vector<BSONElement> actionElements = privs[0].Obj()["actions"].Array();
+ ASSERT_EQUALS(2U, actionElements.size());
+ ASSERT_EQUALS("find", actionElements[0].String());
+ ASSERT_EQUALS("insert", actionElements[1].String());
+
+ vector<BSONElement> roles = roleDoc["roles"].Array();
+ ASSERT_EQUALS(2U, roles.size());
+ ASSERT_EQUALS("roleC", roles[0].Obj()["role"].String());
+ ASSERT_EQUALS("dbC", roles[0].Obj()["db"].String());
+ ASSERT_EQUALS("roleB", roles[1].Obj()["role"].String());
+ ASSERT_EQUALS("dbB", roles[1].Obj()["db"].String());
+
+ // Role B
+ doc.reset();
+ ASSERT_OK(AuthorizationManager::getBSONForRole(&graph, roleB, doc.root()));
+ roleDoc = doc.getObject();
+
+ ASSERT_EQUALS("dbB.roleB", roleDoc["_id"].String());
+ ASSERT_EQUALS("roleB", roleDoc["role"].String());
+ ASSERT_EQUALS("dbB", roleDoc["db"].String());
+
+ privs = roleDoc["privileges"].Array();
+ ASSERT_EQUALS(1U, privs.size());
+ ASSERT_EQUALS("dbB", privs[0].Obj()["resource"].Obj()["db"].String());
+ ASSERT_EQUALS("foo", privs[0].Obj()["resource"].Obj()["collection"].String());
+ ASSERT(privs[0].Obj()["resource"].Obj()["cluster"].eoo());
+ actionElements = privs[0].Obj()["actions"].Array();
+ ASSERT_EQUALS(2U, actionElements.size());
+ ASSERT_EQUALS("find", actionElements[0].String());
+ ASSERT_EQUALS("insert", actionElements[1].String());
+
+ roles = roleDoc["roles"].Array();
+ ASSERT_EQUALS(1U, roles.size());
+ ASSERT_EQUALS("roleC", roles[0].Obj()["role"].String());
+ ASSERT_EQUALS("dbC", roles[0].Obj()["db"].String());
+
+ // Role C
+ doc.reset();
+ ASSERT_OK(AuthorizationManager::getBSONForRole(&graph, roleC, doc.root()));
+ roleDoc = doc.getObject();
+
+ ASSERT_EQUALS("dbC.roleC", roleDoc["_id"].String());
+ ASSERT_EQUALS("roleC", roleDoc["role"].String());
+ ASSERT_EQUALS("dbC", roleDoc["db"].String());
+
+ privs = roleDoc["privileges"].Array();
+ ASSERT_EQUALS(1U, privs.size());
+ ASSERT(privs[0].Obj()["resource"].Obj()["cluster"].Bool());
+ ASSERT(privs[0].Obj()["resource"].Obj()["db"].eoo());
+ ASSERT(privs[0].Obj()["resource"].Obj()["collection"].eoo());
+ actionElements = privs[0].Obj()["actions"].Array();
+ ASSERT_EQUALS(2U, actionElements.size());
+ ASSERT_EQUALS("find", actionElements[0].String());
+ ASSERT_EQUALS("insert", actionElements[1].String());
+
+ roles = roleDoc["roles"].Array();
+ ASSERT_EQUALS(0U, roles.size());
+}
+
+class AuthorizationManagerTest : public ::mongo::unittest::Test {
+public:
+ virtual ~AuthorizationManagerTest() {
+ if (authzManager)
+ authzManager->invalidateUserCache();
}
- class AuthorizationManagerTest : public ::mongo::unittest::Test {
- public:
- virtual ~AuthorizationManagerTest() {
- if (authzManager)
- authzManager->invalidateUserCache();
- }
-
- void setUp() {
- auto localExternalState = stdx::make_unique<AuthzManagerExternalStateMock>();
- externalState = localExternalState.get();
- externalState->setAuthzVersion(AuthorizationManager::schemaVersion26Final);
- authzManager = stdx::make_unique<AuthorizationManager>(std::move(localExternalState));
- externalState->setAuthorizationManager(authzManager.get());
- authzManager->setAuthEnabled(true);
- }
-
- std::unique_ptr<AuthorizationManager> authzManager;
- AuthzManagerExternalStateMock* externalState;
- };
-
- TEST_F(AuthorizationManagerTest, testAcquireV2User) {
+ void setUp() {
+ auto localExternalState = stdx::make_unique<AuthzManagerExternalStateMock>();
+ externalState = localExternalState.get();
externalState->setAuthzVersion(AuthorizationManager::schemaVersion26Final);
-
- OperationContextNoop txn;
-
- ASSERT_OK(externalState->insertPrivilegeDocument(
- &txn,
- BSON("_id" << "admin.v2read" <<
- "user" << "v2read" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "password") <<
- "roles" << BSON_ARRAY(BSON("role" << "read" << "db" << "test"))),
- BSONObj()));
- ASSERT_OK(externalState->insertPrivilegeDocument(
- &txn,
- BSON("_id" << "admin.v2cluster" <<
- "user" << "v2cluster" <<
- "db" << "admin" <<
- "credentials" << BSON("MONGODB-CR" << "password") <<
- "roles" << BSON_ARRAY(BSON("role" << "clusterAdmin" << "db" << "admin"))),
- BSONObj()));
-
- User* v2read;
- ASSERT_OK(authzManager->acquireUser(&txn, UserName("v2read", "test"), &v2read));
- ASSERT_EQUALS(UserName("v2read", "test"), v2read->getName());
- ASSERT(v2read->isValid());
- ASSERT_EQUALS(1U, v2read->getRefCount());
- RoleNameIterator roles = v2read->getRoles();
- ASSERT_EQUALS(RoleName("read", "test"), roles.next());
- ASSERT_FALSE(roles.more());
- // Make sure user's refCount is 0 at the end of the test to avoid an assertion failure
- authzManager->releaseUser(v2read);
-
- User* v2cluster;
- ASSERT_OK(authzManager->acquireUser(&txn, UserName("v2cluster", "admin"), &v2cluster));
- ASSERT_EQUALS(UserName("v2cluster", "admin"), v2cluster->getName());
- ASSERT(v2cluster->isValid());
- ASSERT_EQUALS(1U, v2cluster->getRefCount());
- RoleNameIterator clusterRoles = v2cluster->getRoles();
- ASSERT_EQUALS(RoleName("clusterAdmin", "admin"), clusterRoles.next());
- ASSERT_FALSE(clusterRoles.more());
- // Make sure user's refCount is 0 at the end of the test to avoid an assertion failure
- authzManager->releaseUser(v2cluster);
+ authzManager = stdx::make_unique<AuthorizationManager>(std::move(localExternalState));
+ externalState->setAuthorizationManager(authzManager.get());
+ authzManager->setAuthEnabled(true);
}
+ std::unique_ptr<AuthorizationManager> authzManager;
+ AuthzManagerExternalStateMock* externalState;
+};
+
+TEST_F(AuthorizationManagerTest, testAcquireV2User) {
+ externalState->setAuthzVersion(AuthorizationManager::schemaVersion26Final);
+
+ OperationContextNoop txn;
+
+ ASSERT_OK(
+ externalState->insertPrivilegeDocument(&txn,
+ BSON("_id"
+ << "admin.v2read"
+ << "user"
+ << "v2read"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "password")
+ << "roles" << BSON_ARRAY(BSON("role"
+ << "read"
+ << "db"
+ << "test"))),
+ BSONObj()));
+ ASSERT_OK(
+ externalState->insertPrivilegeDocument(&txn,
+ BSON("_id"
+ << "admin.v2cluster"
+ << "user"
+ << "v2cluster"
+ << "db"
+ << "admin"
+ << "credentials" << BSON("MONGODB-CR"
+ << "password")
+ << "roles" << BSON_ARRAY(BSON("role"
+ << "clusterAdmin"
+ << "db"
+ << "admin"))),
+ BSONObj()));
+
+ User* v2read;
+ ASSERT_OK(authzManager->acquireUser(&txn, UserName("v2read", "test"), &v2read));
+ ASSERT_EQUALS(UserName("v2read", "test"), v2read->getName());
+ ASSERT(v2read->isValid());
+ ASSERT_EQUALS(1U, v2read->getRefCount());
+ RoleNameIterator roles = v2read->getRoles();
+ ASSERT_EQUALS(RoleName("read", "test"), roles.next());
+ ASSERT_FALSE(roles.more());
+ // Make sure user's refCount is 0 at the end of the test to avoid an assertion failure
+ authzManager->releaseUser(v2read);
+
+ User* v2cluster;
+ ASSERT_OK(authzManager->acquireUser(&txn, UserName("v2cluster", "admin"), &v2cluster));
+ ASSERT_EQUALS(UserName("v2cluster", "admin"), v2cluster->getName());
+ ASSERT(v2cluster->isValid());
+ ASSERT_EQUALS(1U, v2cluster->getRefCount());
+ RoleNameIterator clusterRoles = v2cluster->getRoles();
+ ASSERT_EQUALS(RoleName("clusterAdmin", "admin"), clusterRoles.next());
+ ASSERT_FALSE(clusterRoles.more());
+ // Make sure user's refCount is 0 at the end of the test to avoid an assertion failure
+ authzManager->releaseUser(v2cluster);
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/auth/authorization_session.cpp b/src/mongo/db/auth/authorization_session.cpp
index bcc66f45d17..22bd1b1f191 100644
--- a/src/mongo/db/auth/authorization_session.cpp
+++ b/src/mongo/db/auth/authorization_session.cpp
@@ -51,461 +51,437 @@
namespace mongo {
- using std::vector;
+using std::vector;
namespace {
- const std::string ADMIN_DBNAME = "admin";
+const std::string ADMIN_DBNAME = "admin";
} // namespace
- AuthorizationSession::AuthorizationSession(
- std::unique_ptr<AuthzSessionExternalState> externalState)
- : _externalState(std::move(externalState)),
- _impersonationFlag(false) {}
-
- AuthorizationSession::~AuthorizationSession() {
- for (UserSet::iterator it = _authenticatedUsers.begin();
- it != _authenticatedUsers.end(); ++it) {
- getAuthorizationManager().releaseUser(*it);
+AuthorizationSession::AuthorizationSession(std::unique_ptr<AuthzSessionExternalState> externalState)
+ : _externalState(std::move(externalState)), _impersonationFlag(false) {}
+
+AuthorizationSession::~AuthorizationSession() {
+ for (UserSet::iterator it = _authenticatedUsers.begin(); it != _authenticatedUsers.end();
+ ++it) {
+ getAuthorizationManager().releaseUser(*it);
+ }
+}
+
+AuthorizationManager& AuthorizationSession::getAuthorizationManager() {
+ return _externalState->getAuthorizationManager();
+}
+
+void AuthorizationSession::startRequest(OperationContext* txn) {
+ _externalState->startRequest(txn);
+ _refreshUserInfoAsNeeded(txn);
+}
+
+Status AuthorizationSession::addAndAuthorizeUser(OperationContext* txn, const UserName& userName) {
+ User* user;
+ Status status = getAuthorizationManager().acquireUser(txn, userName, &user);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ // Calling add() on the UserSet may return a user that was replaced because it was from the
+ // same database.
+ User* replacedUser = _authenticatedUsers.add(user);
+ if (replacedUser) {
+ getAuthorizationManager().releaseUser(replacedUser);
+ }
+
+ // If there are any users and roles in the impersonation data, clear it out.
+ clearImpersonatedUserData();
+
+ _buildAuthenticatedRolesVector();
+ return Status::OK();
+}
+
+User* AuthorizationSession::lookupUser(const UserName& name) {
+ return _authenticatedUsers.lookup(name);
+}
+
+void AuthorizationSession::logoutDatabase(const std::string& dbname) {
+ User* removedUser = _authenticatedUsers.removeByDBName(dbname);
+ if (removedUser) {
+ getAuthorizationManager().releaseUser(removedUser);
+ }
+ clearImpersonatedUserData();
+ _buildAuthenticatedRolesVector();
+}
+
+UserNameIterator AuthorizationSession::getAuthenticatedUserNames() {
+ return _authenticatedUsers.getNames();
+}
+
+RoleNameIterator AuthorizationSession::getAuthenticatedRoleNames() {
+ return makeRoleNameIterator(_authenticatedRoleNames.begin(), _authenticatedRoleNames.end());
+}
+
+std::string AuthorizationSession::getAuthenticatedUserNamesToken() {
+ std::string ret;
+ for (UserNameIterator nameIter = getAuthenticatedUserNames(); nameIter.more();
+ nameIter.next()) {
+ ret += '\0'; // Using a NUL byte which isn't valid in usernames to separate them.
+ ret += nameIter->getFullName();
+ }
+
+ return ret;
+}
+
+void AuthorizationSession::grantInternalAuthorization() {
+ _authenticatedUsers.add(internalSecurity.user);
+ _buildAuthenticatedRolesVector();
+}
+
+PrivilegeVector AuthorizationSession::getDefaultPrivileges() {
+ PrivilegeVector defaultPrivileges;
+
+ // If localhost exception is active (and no users exist),
+ // return a vector of the minimum privileges required to bootstrap
+ // a system and add the first user.
+ if (_externalState->shouldAllowLocalhost()) {
+ ResourcePattern adminDBResource = ResourcePattern::forDatabaseName(ADMIN_DBNAME);
+ ActionSet setupAdminUserActionSet;
+ setupAdminUserActionSet.addAction(ActionType::createUser);
+ setupAdminUserActionSet.addAction(ActionType::grantRole);
+ Privilege setupAdminUserPrivilege = Privilege(adminDBResource, setupAdminUserActionSet);
+
+ ResourcePattern externalDBResource = ResourcePattern::forDatabaseName("$external");
+ Privilege setupExternalUserPrivilege =
+ Privilege(externalDBResource, ActionType::createUser);
+
+ ActionSet setupServerConfigActionSet;
+
+ // If this server is an arbiter, add specific privileges meant to circumvent
+ // the behavior of an arbiter in an authenticated replset. See SERVER-5479.
+ if (_externalState->serverIsArbiter()) {
+ setupServerConfigActionSet.addAction(ActionType::getCmdLineOpts);
+ setupServerConfigActionSet.addAction(ActionType::getParameter);
+ setupServerConfigActionSet.addAction(ActionType::serverStatus);
+ setupServerConfigActionSet.addAction(ActionType::shutdown);
}
- }
-
- AuthorizationManager& AuthorizationSession::getAuthorizationManager() {
- return _externalState->getAuthorizationManager();
- }
- void AuthorizationSession::startRequest(OperationContext* txn) {
- _externalState->startRequest(txn);
- _refreshUserInfoAsNeeded(txn);
- }
-
- Status AuthorizationSession::addAndAuthorizeUser(
- OperationContext* txn, const UserName& userName) {
- User* user;
- Status status = getAuthorizationManager().acquireUser(txn, userName, &user);
- if (!status.isOK()) {
- return status;
- }
+ setupServerConfigActionSet.addAction(ActionType::addShard);
+ setupServerConfigActionSet.addAction(ActionType::replSetConfigure);
+ setupServerConfigActionSet.addAction(ActionType::replSetGetStatus);
+ Privilege setupServerConfigPrivilege =
+ Privilege(ResourcePattern::forClusterResource(), setupServerConfigActionSet);
- // Calling add() on the UserSet may return a user that was replaced because it was from the
- // same database.
- User* replacedUser = _authenticatedUsers.add(user);
- if (replacedUser) {
- getAuthorizationManager().releaseUser(replacedUser);
- }
-
- // If there are any users and roles in the impersonation data, clear it out.
- clearImpersonatedUserData();
-
- _buildAuthenticatedRolesVector();
- return Status::OK();
- }
-
- User* AuthorizationSession::lookupUser(const UserName& name) {
- return _authenticatedUsers.lookup(name);
- }
-
- void AuthorizationSession::logoutDatabase(const std::string& dbname) {
- User* removedUser = _authenticatedUsers.removeByDBName(dbname);
- if (removedUser) {
- getAuthorizationManager().releaseUser(removedUser);
- }
- clearImpersonatedUserData();
- _buildAuthenticatedRolesVector();
- }
-
- UserNameIterator AuthorizationSession::getAuthenticatedUserNames() {
- return _authenticatedUsers.getNames();
- }
-
- RoleNameIterator AuthorizationSession::getAuthenticatedRoleNames() {
- return makeRoleNameIterator(_authenticatedRoleNames.begin(),
- _authenticatedRoleNames.end());
+ Privilege::addPrivilegeToPrivilegeVector(&defaultPrivileges, setupAdminUserPrivilege);
+ Privilege::addPrivilegeToPrivilegeVector(&defaultPrivileges, setupExternalUserPrivilege);
+ Privilege::addPrivilegeToPrivilegeVector(&defaultPrivileges, setupServerConfigPrivilege);
+ return defaultPrivileges;
}
- std::string AuthorizationSession::getAuthenticatedUserNamesToken() {
- std::string ret;
- for (UserNameIterator nameIter = getAuthenticatedUserNames();
- nameIter.more();
- nameIter.next()) {
- ret += '\0'; // Using a NUL byte which isn't valid in usernames to separate them.
- ret += nameIter->getFullName();
- }
+ return defaultPrivileges;
+}
- return ret;
+Status AuthorizationSession::checkAuthForQuery(const NamespaceString& ns, const BSONObj& query) {
+ if (MONGO_unlikely(ns.isCommand())) {
+ return Status(ErrorCodes::InternalError,
+ str::stream() << "Checking query auth on command namespace " << ns.ns());
}
-
- void AuthorizationSession::grantInternalAuthorization() {
- _authenticatedUsers.add(internalSecurity.user);
- _buildAuthenticatedRolesVector();
+ if (!isAuthorizedForActionsOnNamespace(ns, ActionType::find)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized for query on " << ns.ns());
}
+ return Status::OK();
+}
- PrivilegeVector AuthorizationSession::getDefaultPrivileges() {
- PrivilegeVector defaultPrivileges;
-
- // If localhost exception is active (and no users exist),
- // return a vector of the minimum privileges required to bootstrap
- // a system and add the first user.
- if (_externalState->shouldAllowLocalhost()) {
- ResourcePattern adminDBResource = ResourcePattern::forDatabaseName(ADMIN_DBNAME);
- ActionSet setupAdminUserActionSet;
- setupAdminUserActionSet.addAction(ActionType::createUser);
- setupAdminUserActionSet.addAction(ActionType::grantRole);
- Privilege setupAdminUserPrivilege =
- Privilege(adminDBResource, setupAdminUserActionSet);
-
- ResourcePattern externalDBResource = ResourcePattern::forDatabaseName("$external");
- Privilege setupExternalUserPrivilege =
- Privilege(externalDBResource, ActionType::createUser);
-
- ActionSet setupServerConfigActionSet;
-
- // If this server is an arbiter, add specific privileges meant to circumvent
- // the behavior of an arbiter in an authenticated replset. See SERVER-5479.
- if (_externalState->serverIsArbiter()) {
- setupServerConfigActionSet.addAction(ActionType::getCmdLineOpts);
- setupServerConfigActionSet.addAction(ActionType::getParameter);
- setupServerConfigActionSet.addAction(ActionType::serverStatus);
- setupServerConfigActionSet.addAction(ActionType::shutdown);
- }
-
- setupServerConfigActionSet.addAction(ActionType::addShard);
- setupServerConfigActionSet.addAction(ActionType::replSetConfigure);
- setupServerConfigActionSet.addAction(ActionType::replSetGetStatus);
- Privilege setupServerConfigPrivilege =
- Privilege(ResourcePattern::forClusterResource(), setupServerConfigActionSet);
-
- Privilege::addPrivilegeToPrivilegeVector(&defaultPrivileges, setupAdminUserPrivilege);
- Privilege::addPrivilegeToPrivilegeVector(&defaultPrivileges,
- setupExternalUserPrivilege);
- Privilege::addPrivilegeToPrivilegeVector(&defaultPrivileges,
- setupServerConfigPrivilege);
- return defaultPrivileges;
+Status AuthorizationSession::checkAuthForGetMore(const NamespaceString& ns, long long cursorID) {
+ // "ns" can be in one of three formats: "listCollections" format, "listIndexes" format, and
+ // normal format.
+ if (ns.isListCollectionsGetMore()) {
+ // "ns" is of the form "<db>.$cmd.listCollections". Check if we can perform the
+ // listCollections action on the database resource for "<db>".
+ if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(ns.db()),
+ ActionType::listCollections)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized for listCollections getMore on "
+ << ns.ns());
}
-
- return defaultPrivileges;
- }
-
- Status AuthorizationSession::checkAuthForQuery(const NamespaceString& ns,
- const BSONObj& query) {
- if (MONGO_unlikely(ns.isCommand())) {
- return Status(ErrorCodes::InternalError, str::stream() <<
- "Checking query auth on command namespace " << ns.ns());
+ } else if (ns.isListIndexesGetMore()) {
+ // "ns" is of the form "<db>.$cmd.listIndexes.<coll>". Check if we can perform the
+ // listIndexes action on the "<db>.<coll>" namespace.
+ NamespaceString targetNS = ns.getTargetNSForListIndexesGetMore();
+ if (!isAuthorizedForActionsOnNamespace(targetNS, ActionType::listIndexes)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized for listIndexes getMore on " << ns.ns());
}
+ } else {
+ // "ns" is a regular namespace string. Check if we can perform the find action on it.
if (!isAuthorizedForActionsOnNamespace(ns, ActionType::find)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized for query on " << ns.ns());
+ str::stream() << "not authorized for getMore on " << ns.ns());
}
- return Status::OK();
}
+ return Status::OK();
+}
- Status AuthorizationSession::checkAuthForGetMore(const NamespaceString& ns,
- long long cursorID) {
- // "ns" can be in one of three formats: "listCollections" format, "listIndexes" format, and
- // normal format.
- if (ns.isListCollectionsGetMore()) {
- // "ns" is of the form "<db>.$cmd.listCollections". Check if we can perform the
- // listCollections action on the database resource for "<db>".
- if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(ns.db()),
- ActionType::listCollections)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized for listCollections getMore on "
- << ns.ns());
- }
+Status AuthorizationSession::checkAuthForInsert(const NamespaceString& ns,
+ const BSONObj& document) {
+ if (ns.coll() == StringData("system.indexes", StringData::LiteralTag())) {
+ BSONElement nsElement = document["ns"];
+ if (nsElement.type() != String) {
+ return Status(ErrorCodes::Unauthorized,
+ "Cannot authorize inserting into "
+ "system.indexes documents without a string-typed \"ns\" field.");
}
- else if (ns.isListIndexesGetMore()) {
- // "ns" is of the form "<db>.$cmd.listIndexes.<coll>". Check if we can perform the
- // listIndexes action on the "<db>.<coll>" namespace.
- NamespaceString targetNS = ns.getTargetNSForListIndexesGetMore();
- if (!isAuthorizedForActionsOnNamespace(targetNS, ActionType::listIndexes)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized for listIndexes getMore on "
- << ns.ns());
- }
+ NamespaceString indexNS(nsElement.str());
+ if (!isAuthorizedForActionsOnNamespace(indexNS, ActionType::createIndex)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized to create index on " << indexNS.ns());
}
- else {
- // "ns" is a regular namespace string. Check if we can perform the find action on it.
- if (!isAuthorizedForActionsOnNamespace(ns, ActionType::find)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized for getMore on " << ns.ns());
- }
+ } else {
+ if (!isAuthorizedForActionsOnNamespace(ns, ActionType::insert)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized for insert on " << ns.ns());
}
- return Status::OK();
}
- Status AuthorizationSession::checkAuthForInsert(const NamespaceString& ns,
- const BSONObj& document) {
- if (ns.coll() == StringData("system.indexes", StringData::LiteralTag())) {
- BSONElement nsElement = document["ns"];
- if (nsElement.type() != String) {
- return Status(ErrorCodes::Unauthorized, "Cannot authorize inserting into "
- "system.indexes documents without a string-typed \"ns\" field.");
- }
- NamespaceString indexNS(nsElement.str());
- if (!isAuthorizedForActionsOnNamespace(indexNS, ActionType::createIndex)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized to create index on " <<
- indexNS.ns());
- }
- } else {
- if (!isAuthorizedForActionsOnNamespace(ns, ActionType::insert)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized for insert on " << ns.ns());
- }
- }
-
- return Status::OK();
- }
+ return Status::OK();
+}
- Status AuthorizationSession::checkAuthForUpdate(const NamespaceString& ns,
- const BSONObj& query,
- const BSONObj& update,
- bool upsert) {
- if (!upsert) {
- if (!isAuthorizedForActionsOnNamespace(ns, ActionType::update)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized for update on " << ns.ns());
- }
+Status AuthorizationSession::checkAuthForUpdate(const NamespaceString& ns,
+ const BSONObj& query,
+ const BSONObj& update,
+ bool upsert) {
+ if (!upsert) {
+ if (!isAuthorizedForActionsOnNamespace(ns, ActionType::update)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized for update on " << ns.ns());
}
- else {
- ActionSet required;
- required.addAction(ActionType::update);
- required.addAction(ActionType::insert);
- if (!isAuthorizedForActionsOnNamespace(ns, required)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized for upsert on " << ns.ns());
- }
+ } else {
+ ActionSet required;
+ required.addAction(ActionType::update);
+ required.addAction(ActionType::insert);
+ if (!isAuthorizedForActionsOnNamespace(ns, required)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized for upsert on " << ns.ns());
}
- return Status::OK();
}
+ return Status::OK();
+}
- Status AuthorizationSession::checkAuthForDelete(const NamespaceString& ns,
- const BSONObj& query) {
- if (!isAuthorizedForActionsOnNamespace(ns, ActionType::remove)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized to remove from " << ns.ns());
- }
- return Status::OK();
+Status AuthorizationSession::checkAuthForDelete(const NamespaceString& ns, const BSONObj& query) {
+ if (!isAuthorizedForActionsOnNamespace(ns, ActionType::remove)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized to remove from " << ns.ns());
}
+ return Status::OK();
+}
- Status AuthorizationSession::checkAuthForKillCursors(const NamespaceString& ns,
- long long cursorID) {
- // See implementation comments in checkAuthForGetMore(). This method looks very similar.
- if (ns.isListCollectionsGetMore()) {
- if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(ns.db()),
- ActionType::killCursors)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized to kill listCollections cursor on "
- << ns.ns());
- }
+Status AuthorizationSession::checkAuthForKillCursors(const NamespaceString& ns,
+ long long cursorID) {
+ // See implementation comments in checkAuthForGetMore(). This method looks very similar.
+ if (ns.isListCollectionsGetMore()) {
+ if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(ns.db()),
+ ActionType::killCursors)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized to kill listCollections cursor on "
+ << ns.ns());
}
- else if (ns.isListIndexesGetMore()) {
- NamespaceString targetNS = ns.getTargetNSForListIndexesGetMore();
- if (!isAuthorizedForActionsOnNamespace(targetNS, ActionType::killCursors)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized to kill listIndexes cursor on "
- << ns.ns());
- }
+ } else if (ns.isListIndexesGetMore()) {
+ NamespaceString targetNS = ns.getTargetNSForListIndexesGetMore();
+ if (!isAuthorizedForActionsOnNamespace(targetNS, ActionType::killCursors)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized to kill listIndexes cursor on "
+ << ns.ns());
}
- else {
- if (!isAuthorizedForActionsOnNamespace(ns, ActionType::killCursors)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized to kill cursor on " << ns.ns());
- }
+ } else {
+ if (!isAuthorizedForActionsOnNamespace(ns, ActionType::killCursors)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized to kill cursor on " << ns.ns());
}
- return Status::OK();
}
+ return Status::OK();
+}
- Status AuthorizationSession::checkAuthorizedToGrantPrivilege(const Privilege& privilege) {
- const ResourcePattern& resource = privilege.getResourcePattern();
- if (resource.isDatabasePattern() || resource.isExactNamespacePattern()) {
- if (!isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(resource.databaseToMatch()),
- ActionType::grantRole)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to grant privileges on the "
- << resource.databaseToMatch() << "database");
- }
- } else if (!isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName("admin"),
- ActionType::grantRole)) {
+Status AuthorizationSession::checkAuthorizedToGrantPrivilege(const Privilege& privilege) {
+ const ResourcePattern& resource = privilege.getResourcePattern();
+ if (resource.isDatabasePattern() || resource.isExactNamespacePattern()) {
+ if (!isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(resource.databaseToMatch()),
+ ActionType::grantRole)) {
return Status(ErrorCodes::Unauthorized,
- "To grant privileges affecting multiple databases or the cluster,"
- " must be authorized to grant roles from the admin database");
+ str::stream() << "Not authorized to grant privileges on the "
+ << resource.databaseToMatch() << "database");
}
- return Status::OK();
- }
-
-
- Status AuthorizationSession::checkAuthorizedToRevokePrivilege(const Privilege& privilege) {
- const ResourcePattern& resource = privilege.getResourcePattern();
- if (resource.isDatabasePattern() || resource.isExactNamespacePattern()) {
- if (!isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(resource.databaseToMatch()),
- ActionType::revokeRole)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to revoke privileges on the "
- << resource.databaseToMatch() << "database");
- }
- } else if (!isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName("admin"),
- ActionType::revokeRole)) {
+ } else if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName("admin"),
+ ActionType::grantRole)) {
+ return Status(ErrorCodes::Unauthorized,
+ "To grant privileges affecting multiple databases or the cluster,"
+ " must be authorized to grant roles from the admin database");
+ }
+ return Status::OK();
+}
+
+
+Status AuthorizationSession::checkAuthorizedToRevokePrivilege(const Privilege& privilege) {
+ const ResourcePattern& resource = privilege.getResourcePattern();
+ if (resource.isDatabasePattern() || resource.isExactNamespacePattern()) {
+ if (!isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(resource.databaseToMatch()),
+ ActionType::revokeRole)) {
return Status(ErrorCodes::Unauthorized,
- "To revoke privileges affecting multiple databases or the cluster,"
- " must be authorized to revoke roles from the admin database");
+ str::stream() << "Not authorized to revoke privileges on the "
+ << resource.databaseToMatch() << "database");
}
- return Status::OK();
- }
-
- bool AuthorizationSession::isAuthorizedToGrantRole(const RoleName& role) {
- return isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(role.getDB()),
- ActionType::grantRole);
- }
-
- bool AuthorizationSession::isAuthorizedToRevokeRole(const RoleName& role) {
- return isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(role.getDB()),
- ActionType::revokeRole);
- }
-
- bool AuthorizationSession::isAuthorizedForPrivilege(const Privilege& privilege) {
- if (_externalState->shouldIgnoreAuthChecks())
- return true;
-
- return _isAuthorizedForPrivilege(privilege);
- }
-
- bool AuthorizationSession::isAuthorizedForPrivileges(const vector<Privilege>& privileges) {
- if (_externalState->shouldIgnoreAuthChecks())
- return true;
+ } else if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName("admin"),
+ ActionType::revokeRole)) {
+ return Status(ErrorCodes::Unauthorized,
+ "To revoke privileges affecting multiple databases or the cluster,"
+ " must be authorized to revoke roles from the admin database");
+ }
+ return Status::OK();
+}
+
+bool AuthorizationSession::isAuthorizedToGrantRole(const RoleName& role) {
+ return isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(role.getDB()),
+ ActionType::grantRole);
+}
+
+bool AuthorizationSession::isAuthorizedToRevokeRole(const RoleName& role) {
+ return isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(role.getDB()),
+ ActionType::revokeRole);
+}
+
+bool AuthorizationSession::isAuthorizedForPrivilege(const Privilege& privilege) {
+ if (_externalState->shouldIgnoreAuthChecks())
+ return true;
- for (size_t i = 0; i < privileges.size(); ++i) {
- if (!_isAuthorizedForPrivilege(privileges[i]))
- return false;
- }
+ return _isAuthorizedForPrivilege(privilege);
+}
+bool AuthorizationSession::isAuthorizedForPrivileges(const vector<Privilege>& privileges) {
+ if (_externalState->shouldIgnoreAuthChecks())
return true;
- }
- bool AuthorizationSession::isAuthorizedForActionsOnResource(const ResourcePattern& resource,
- ActionType action) {
- return isAuthorizedForPrivilege(Privilege(resource, action));
+ for (size_t i = 0; i < privileges.size(); ++i) {
+ if (!_isAuthorizedForPrivilege(privileges[i]))
+ return false;
}
- bool AuthorizationSession::isAuthorizedForActionsOnResource(const ResourcePattern& resource,
- const ActionSet& actions) {
- return isAuthorizedForPrivilege(Privilege(resource, actions));
- }
+ return true;
+}
- bool AuthorizationSession::isAuthorizedForActionsOnNamespace(const NamespaceString& ns,
- ActionType action) {
- return isAuthorizedForPrivilege(
- Privilege(ResourcePattern::forExactNamespace(ns), action));
- }
+bool AuthorizationSession::isAuthorizedForActionsOnResource(const ResourcePattern& resource,
+ ActionType action) {
+ return isAuthorizedForPrivilege(Privilege(resource, action));
+}
- bool AuthorizationSession::isAuthorizedForActionsOnNamespace(const NamespaceString& ns,
- const ActionSet& actions) {
- return isAuthorizedForPrivilege(
- Privilege(ResourcePattern::forExactNamespace(ns), actions));
- }
+bool AuthorizationSession::isAuthorizedForActionsOnResource(const ResourcePattern& resource,
+ const ActionSet& actions) {
+ return isAuthorizedForPrivilege(Privilege(resource, actions));
+}
- static const int resourceSearchListCapacity = 5;
- /**
- * Builds from "target" an exhaustive list of all ResourcePatterns that match "target".
- *
- * Stores the resulting list into resourceSearchList, and returns the length.
- *
- * The seach lists are as follows, depending on the type of "target":
- *
- * target is ResourcePattern::forAnyResource():
- * searchList = { ResourcePattern::forAnyResource(), ResourcePattern::forAnyResource() }
- * target is the ResourcePattern::forClusterResource():
- * searchList = { ResourcePattern::forAnyResource(), ResourcePattern::forClusterResource() }
- * target is a database, db:
- * searchList = { ResourcePattern::forAnyResource(),
- * ResourcePattern::forAnyNormalResource(),
- * db }
- * target is a non-system collection, db.coll:
- * searchList = { ResourcePattern::forAnyResource(),
- * ResourcePattern::forAnyNormalResource(),
- * db,
- * coll,
- * db.coll }
- * target is a system collection, db.system.coll:
- * searchList = { ResourcePattern::forAnyResource(),
- * system.coll,
- * db.system.coll }
- */
- static int buildResourceSearchList(
- const ResourcePattern& target,
- ResourcePattern resourceSearchList[resourceSearchListCapacity]) {
-
- int size = 0;
- resourceSearchList[size++] = ResourcePattern::forAnyResource();
- if (target.isExactNamespacePattern()) {
- if (!target.ns().isSystem()) {
- resourceSearchList[size++] = ResourcePattern::forAnyNormalResource();
- resourceSearchList[size++] = ResourcePattern::forDatabaseName(target.ns().db());
- }
- resourceSearchList[size++] = ResourcePattern::forCollectionName(target.ns().coll());
- }
- else if (target.isDatabasePattern()) {
- resourceSearchList[size++] = ResourcePattern::forAnyNormalResource();
- }
- resourceSearchList[size++] = target;
- dassert(size <= resourceSearchListCapacity);
- return size;
- }
+bool AuthorizationSession::isAuthorizedForActionsOnNamespace(const NamespaceString& ns,
+ ActionType action) {
+ return isAuthorizedForPrivilege(Privilege(ResourcePattern::forExactNamespace(ns), action));
+}
- bool AuthorizationSession::isAuthorizedToChangeAsUser(const UserName& userName, ActionType actionType) {
- User* user = lookupUser(userName);
- if (!user) {
- return false;
- }
- ResourcePattern resourceSearchList[resourceSearchListCapacity];
- const int resourceSearchListLength =
- buildResourceSearchList(ResourcePattern::forDatabaseName(userName.getDB()),
- resourceSearchList);
+bool AuthorizationSession::isAuthorizedForActionsOnNamespace(const NamespaceString& ns,
+ const ActionSet& actions) {
+ return isAuthorizedForPrivilege(Privilege(ResourcePattern::forExactNamespace(ns), actions));
+}
- ActionSet actions;
- for (int i = 0; i < resourceSearchListLength; ++i) {
- actions.addAllActionsFromSet(user->getActionsForResource(resourceSearchList[i]));
+static const int resourceSearchListCapacity = 5;
+/**
+ * Builds from "target" an exhaustive list of all ResourcePatterns that match "target".
+ *
+ * Stores the resulting list into resourceSearchList, and returns the length.
+ *
+ * The seach lists are as follows, depending on the type of "target":
+ *
+ * target is ResourcePattern::forAnyResource():
+ * searchList = { ResourcePattern::forAnyResource(), ResourcePattern::forAnyResource() }
+ * target is the ResourcePattern::forClusterResource():
+ * searchList = { ResourcePattern::forAnyResource(), ResourcePattern::forClusterResource() }
+ * target is a database, db:
+ * searchList = { ResourcePattern::forAnyResource(),
+ * ResourcePattern::forAnyNormalResource(),
+ * db }
+ * target is a non-system collection, db.coll:
+ * searchList = { ResourcePattern::forAnyResource(),
+ * ResourcePattern::forAnyNormalResource(),
+ * db,
+ * coll,
+ * db.coll }
+ * target is a system collection, db.system.coll:
+ * searchList = { ResourcePattern::forAnyResource(),
+ * system.coll,
+ * db.system.coll }
+ */
+static int buildResourceSearchList(const ResourcePattern& target,
+ ResourcePattern resourceSearchList[resourceSearchListCapacity]) {
+ int size = 0;
+ resourceSearchList[size++] = ResourcePattern::forAnyResource();
+ if (target.isExactNamespacePattern()) {
+ if (!target.ns().isSystem()) {
+ resourceSearchList[size++] = ResourcePattern::forAnyNormalResource();
+ resourceSearchList[size++] = ResourcePattern::forDatabaseName(target.ns().db());
}
- return actions.contains(actionType);
+ resourceSearchList[size++] = ResourcePattern::forCollectionName(target.ns().coll());
+ } else if (target.isDatabasePattern()) {
+ resourceSearchList[size++] = ResourcePattern::forAnyNormalResource();
+ }
+ resourceSearchList[size++] = target;
+ dassert(size <= resourceSearchListCapacity);
+ return size;
+}
+
+bool AuthorizationSession::isAuthorizedToChangeAsUser(const UserName& userName,
+ ActionType actionType) {
+ User* user = lookupUser(userName);
+ if (!user) {
+ return false;
}
+ ResourcePattern resourceSearchList[resourceSearchListCapacity];
+ const int resourceSearchListLength = buildResourceSearchList(
+ ResourcePattern::forDatabaseName(userName.getDB()), resourceSearchList);
- bool AuthorizationSession::isAuthorizedToChangeOwnPasswordAsUser(const UserName& userName) {
- return AuthorizationSession::isAuthorizedToChangeAsUser(userName, ActionType::changeOwnPassword);
+ ActionSet actions;
+ for (int i = 0; i < resourceSearchListLength; ++i) {
+ actions.addAllActionsFromSet(user->getActionsForResource(resourceSearchList[i]));
}
+ return actions.contains(actionType);
+}
- bool AuthorizationSession::isAuthorizedToChangeOwnCustomDataAsUser(const UserName& userName) {
- return AuthorizationSession::isAuthorizedToChangeAsUser(userName, ActionType::changeOwnCustomData);
- }
+bool AuthorizationSession::isAuthorizedToChangeOwnPasswordAsUser(const UserName& userName) {
+ return AuthorizationSession::isAuthorizedToChangeAsUser(userName,
+ ActionType::changeOwnPassword);
+}
- bool AuthorizationSession::isAuthenticatedAsUserWithRole(const RoleName& roleName) {
- for (UserSet::iterator it = _authenticatedUsers.begin();
- it != _authenticatedUsers.end(); ++it) {
- if ((*it)->hasRole(roleName)) {
- return true;
- }
+bool AuthorizationSession::isAuthorizedToChangeOwnCustomDataAsUser(const UserName& userName) {
+ return AuthorizationSession::isAuthorizedToChangeAsUser(userName,
+ ActionType::changeOwnCustomData);
+}
+
+bool AuthorizationSession::isAuthenticatedAsUserWithRole(const RoleName& roleName) {
+ for (UserSet::iterator it = _authenticatedUsers.begin(); it != _authenticatedUsers.end();
+ ++it) {
+ if ((*it)->hasRole(roleName)) {
+ return true;
}
- return false;
}
+ return false;
+}
- void AuthorizationSession::_refreshUserInfoAsNeeded(OperationContext* txn) {
- AuthorizationManager& authMan = getAuthorizationManager();
- UserSet::iterator it = _authenticatedUsers.begin();
- while (it != _authenticatedUsers.end()) {
- User* user = *it;
+void AuthorizationSession::_refreshUserInfoAsNeeded(OperationContext* txn) {
+ AuthorizationManager& authMan = getAuthorizationManager();
+ UserSet::iterator it = _authenticatedUsers.begin();
+ while (it != _authenticatedUsers.end()) {
+ User* user = *it;
- if (!user->isValid()) {
- // Make a good faith effort to acquire an up-to-date user object, since the one
- // we've cached is marked "out-of-date."
- UserName name = user->getName();
- User* updatedUser;
+ if (!user->isValid()) {
+ // Make a good faith effort to acquire an up-to-date user object, since the one
+ // we've cached is marked "out-of-date."
+ UserName name = user->getName();
+ User* updatedUser;
- Status status = authMan.acquireUser(txn, name, &updatedUser);
- switch (status.code()) {
+ Status status = authMan.acquireUser(txn, name, &updatedUser);
+ switch (status.code()) {
case ErrorCodes::OK: {
// Success! Replace the old User object with the updated one.
fassert(17067, _authenticatedUsers.replaceAt(it, updatedUser) == user);
@@ -517,103 +493,98 @@ namespace {
// User does not exist anymore; remove it from _authenticatedUsers.
fassert(17068, _authenticatedUsers.removeAt(it) == user);
authMan.releaseUser(user);
- log() << "Removed deleted user " << name <<
- " from session cache of user information.";
+ log() << "Removed deleted user " << name
+ << " from session cache of user information.";
continue; // No need to advance "it" in this case.
}
default:
// Unrecognized error; assume that it's transient, and continue working with the
// out-of-date privilege data.
- warning() << "Could not fetch updated user privilege information for " <<
- name << "; continuing to use old information. Reason is " << status;
+ warning() << "Could not fetch updated user privilege information for " << name
+ << "; continuing to use old information. Reason is " << status;
break;
- }
}
- ++it;
}
- _buildAuthenticatedRolesVector();
- }
-
- void AuthorizationSession::_buildAuthenticatedRolesVector() {
- _authenticatedRoleNames.clear();
- for (UserSet::iterator it = _authenticatedUsers.begin();
- it != _authenticatedUsers.end();
- ++it) {
- RoleNameIterator roles = (*it)->getIndirectRoles();
- while (roles.more()) {
- RoleName roleName = roles.next();
- _authenticatedRoleNames.push_back(RoleName(roleName.getRole(),
- roleName.getDB()));
- }
+ ++it;
+ }
+ _buildAuthenticatedRolesVector();
+}
+
+void AuthorizationSession::_buildAuthenticatedRolesVector() {
+ _authenticatedRoleNames.clear();
+ for (UserSet::iterator it = _authenticatedUsers.begin(); it != _authenticatedUsers.end();
+ ++it) {
+ RoleNameIterator roles = (*it)->getIndirectRoles();
+ while (roles.more()) {
+ RoleName roleName = roles.next();
+ _authenticatedRoleNames.push_back(RoleName(roleName.getRole(), roleName.getDB()));
}
}
+}
- bool AuthorizationSession::_isAuthorizedForPrivilege(const Privilege& privilege) {
- const ResourcePattern& target(privilege.getResourcePattern());
-
- ResourcePattern resourceSearchList[resourceSearchListCapacity];
- const int resourceSearchListLength = buildResourceSearchList(target, resourceSearchList);
+bool AuthorizationSession::_isAuthorizedForPrivilege(const Privilege& privilege) {
+ const ResourcePattern& target(privilege.getResourcePattern());
- ActionSet unmetRequirements = privilege.getActions();
+ ResourcePattern resourceSearchList[resourceSearchListCapacity];
+ const int resourceSearchListLength = buildResourceSearchList(target, resourceSearchList);
- PrivilegeVector defaultPrivileges = getDefaultPrivileges();
- for (PrivilegeVector::iterator it = defaultPrivileges.begin();
- it != defaultPrivileges.end(); ++it) {
+ ActionSet unmetRequirements = privilege.getActions();
- for (int i = 0; i < resourceSearchListLength; ++i) {
- if (!(it->getResourcePattern() == resourceSearchList[i]))
- continue;
+ PrivilegeVector defaultPrivileges = getDefaultPrivileges();
+ for (PrivilegeVector::iterator it = defaultPrivileges.begin(); it != defaultPrivileges.end();
+ ++it) {
+ for (int i = 0; i < resourceSearchListLength; ++i) {
+ if (!(it->getResourcePattern() == resourceSearchList[i]))
+ continue;
- ActionSet userActions = it->getActions();
- unmetRequirements.removeAllActionsFromSet(userActions);
+ ActionSet userActions = it->getActions();
+ unmetRequirements.removeAllActionsFromSet(userActions);
- if (unmetRequirements.empty())
- return true;
- }
+ if (unmetRequirements.empty())
+ return true;
}
+ }
- for (UserSet::iterator it = _authenticatedUsers.begin();
- it != _authenticatedUsers.end(); ++it) {
- User* user = *it;
- for (int i = 0; i < resourceSearchListLength; ++i) {
- ActionSet userActions = user->getActionsForResource(resourceSearchList[i]);
- unmetRequirements.removeAllActionsFromSet(userActions);
+ for (UserSet::iterator it = _authenticatedUsers.begin(); it != _authenticatedUsers.end();
+ ++it) {
+ User* user = *it;
+ for (int i = 0; i < resourceSearchListLength; ++i) {
+ ActionSet userActions = user->getActionsForResource(resourceSearchList[i]);
+ unmetRequirements.removeAllActionsFromSet(userActions);
- if (unmetRequirements.empty())
- return true;
- }
+ if (unmetRequirements.empty())
+ return true;
}
-
- return false;
}
- void AuthorizationSession::setImpersonatedUserData(std::vector<UserName> usernames,
- std::vector<RoleName> roles) {
- _impersonatedUserNames = usernames;
- _impersonatedRoleNames = roles;
- _impersonationFlag = true;
- }
+ return false;
+}
- UserNameIterator AuthorizationSession::getImpersonatedUserNames() {
- return makeUserNameIterator(_impersonatedUserNames.begin(),
- _impersonatedUserNames.end());
- }
+void AuthorizationSession::setImpersonatedUserData(std::vector<UserName> usernames,
+ std::vector<RoleName> roles) {
+ _impersonatedUserNames = usernames;
+ _impersonatedRoleNames = roles;
+ _impersonationFlag = true;
+}
- RoleNameIterator AuthorizationSession::getImpersonatedRoleNames() {
- return makeRoleNameIterator(_impersonatedRoleNames.begin(),
- _impersonatedRoleNames.end());
- }
+UserNameIterator AuthorizationSession::getImpersonatedUserNames() {
+ return makeUserNameIterator(_impersonatedUserNames.begin(), _impersonatedUserNames.end());
+}
- // Clear the vectors of impersonated usernames and roles.
- void AuthorizationSession::clearImpersonatedUserData() {
- _impersonatedUserNames.clear();
- _impersonatedRoleNames.clear();
- _impersonationFlag = false;
- }
+RoleNameIterator AuthorizationSession::getImpersonatedRoleNames() {
+ return makeRoleNameIterator(_impersonatedRoleNames.begin(), _impersonatedRoleNames.end());
+}
+// Clear the vectors of impersonated usernames and roles.
+void AuthorizationSession::clearImpersonatedUserData() {
+ _impersonatedUserNames.clear();
+ _impersonatedRoleNames.clear();
+ _impersonationFlag = false;
+}
- bool AuthorizationSession::isImpersonating() const {
- return _impersonationFlag;
- }
-} // namespace mongo
+bool AuthorizationSession::isImpersonating() const {
+ return _impersonationFlag;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/auth/authorization_session.h b/src/mongo/db/auth/authorization_session.h
index 0fe4f1c46cb..d6fe06e11fa 100644
--- a/src/mongo/db/auth/authorization_session.h
+++ b/src/mongo/db/auth/authorization_session.h
@@ -44,234 +44,233 @@
#include "mongo/db/namespace_string.h"
namespace mongo {
- class ClientBasic;
+class ClientBasic;
+/**
+ * Contains all the authorization logic for a single client connection. It contains a set of
+ * the users which have been authenticated, as well as a set of privileges that have been
+ * granted to those users to perform various actions.
+ *
+ * An AuthorizationSession object is present within every mongo::ClientBasic object.
+ *
+ * Users in the _authenticatedUsers cache may get marked as invalid by the AuthorizationManager,
+ * for instance if their privileges are changed by a user or role modification command. At the
+ * beginning of every user-initiated operation startRequest() gets called which updates
+ * the cached information about any users who have been marked as invalid. This guarantees that
+ * every operation looks at one consistent view of each user for every auth check required over
+ * the lifetime of the operation.
+ */
+class AuthorizationSession {
+ MONGO_DISALLOW_COPYING(AuthorizationSession);
+
+public:
/**
- * Contains all the authorization logic for a single client connection. It contains a set of
- * the users which have been authenticated, as well as a set of privileges that have been
- * granted to those users to perform various actions.
+ * Gets the AuthorizationSession associated with the given "client", or nullptr.
*
- * An AuthorizationSession object is present within every mongo::ClientBasic object.
+ * The "client" object continues to own the returned AuthorizationSession.
+ */
+ static AuthorizationSession* get(ClientBasic* client);
+
+ /**
+ * Gets the AuthorizationSession associated with the given "client", or nullptr.
+ *
+ * The "client" object continues to own the returned AuthorizationSession.
+ */
+ static AuthorizationSession* get(ClientBasic& client);
+
+ /**
+ * Returns false if AuthorizationSession::get(client) would return nullptr.
+ */
+ static bool exists(ClientBasic* client);
+
+ /**
+ * Sets the AuthorizationSession associated with "client" to "session".
*
- * Users in the _authenticatedUsers cache may get marked as invalid by the AuthorizationManager,
- * for instance if their privileges are changed by a user or role modification command. At the
- * beginning of every user-initiated operation startRequest() gets called which updates
- * the cached information about any users who have been marked as invalid. This guarantees that
- * every operation looks at one consistent view of each user for every auth check required over
- * the lifetime of the operation.
+ * "session" must not be NULL, and it is only legal to call this function once
+ * on each instance of "client".
*/
- class AuthorizationSession {
- MONGO_DISALLOW_COPYING(AuthorizationSession);
- public:
- /**
- * Gets the AuthorizationSession associated with the given "client", or nullptr.
- *
- * The "client" object continues to own the returned AuthorizationSession.
- */
- static AuthorizationSession* get(ClientBasic* client);
-
- /**
- * Gets the AuthorizationSession associated with the given "client", or nullptr.
- *
- * The "client" object continues to own the returned AuthorizationSession.
- */
- static AuthorizationSession* get(ClientBasic& client);
-
- /**
- * Returns false if AuthorizationSession::get(client) would return nullptr.
- */
- static bool exists(ClientBasic* client);
-
- /**
- * Sets the AuthorizationSession associated with "client" to "session".
- *
- * "session" must not be NULL, and it is only legal to call this function once
- * on each instance of "client".
- */
- static void set(ClientBasic* client, std::unique_ptr<AuthorizationSession> session);
-
- // Takes ownership of the externalState.
- explicit AuthorizationSession(std::unique_ptr<AuthzSessionExternalState> externalState);
- ~AuthorizationSession();
-
- AuthorizationManager& getAuthorizationManager();
-
- // Should be called at the beginning of every new request. This performs the checks
- // necessary to determine if localhost connections should be given full access.
- // TODO: try to eliminate the need for this call.
- void startRequest(OperationContext* txn);
-
- /**
- * Adds the User identified by "UserName" to the authorization session, acquiring privileges
- * for it in the process.
- */
- Status addAndAuthorizeUser(OperationContext* txn, const UserName& userName);
-
- // Returns the authenticated user with the given name. Returns NULL
- // if no such user is found.
- // The user remains in the _authenticatedUsers set for this AuthorizationSession,
- // and ownership of the user stays with the AuthorizationManager
- User* lookupUser(const UserName& name);
-
- // Gets an iterator over the names of all authenticated users stored in this manager.
- UserNameIterator getAuthenticatedUserNames();
-
- // Gets an iterator over the roles of all authenticated users stored in this manager.
- RoleNameIterator getAuthenticatedRoleNames();
-
- // Returns a std::string representing all logged-in users on the current session.
- // WARNING: this std::string will contain NUL bytes so don't call c_str()!
- std::string getAuthenticatedUserNamesToken();
-
- // Removes any authenticated principals whose authorization credentials came from the given
- // database, and revokes any privileges that were granted via that principal.
- void logoutDatabase(const std::string& dbname);
-
- // Adds the internalSecurity user to the set of authenticated users.
- // Used to grant internal threads full access.
- void grantInternalAuthorization();
-
- // Generates a vector of default privileges that are granted to any user,
- // regardless of which roles that user does or does not possess.
- // If localhost exception is active, the permissions include the ability to create
- // the first user and the ability to run the commands needed to bootstrap the system
- // into a state where the first user can be created.
- PrivilegeVector getDefaultPrivileges();
-
- // Checks if this connection has the privileges necessary to perform the given query on the
- // given namespace.
- Status checkAuthForQuery(const NamespaceString& ns, const BSONObj& query);
-
- // Checks if this connection has the privileges necessary to perform a getMore operation on
- // the identified cursor, supposing that cursor is associated with the supplied namespace
- // identifier.
- Status checkAuthForGetMore(const NamespaceString& ns, long long cursorID);
-
- // Checks if this connection has the privileges necessary to perform the given update on the
- // given namespace.
- Status checkAuthForUpdate(const NamespaceString& ns,
- const BSONObj& query,
- const BSONObj& update,
- bool upsert);
-
- // Checks if this connection has the privileges necessary to insert the given document
- // to the given namespace. Correctly interprets inserts to system.indexes and performs
- // the proper auth checks for index building.
- Status checkAuthForInsert(const NamespaceString& ns, const BSONObj& document);
-
- // Checks if this connection has the privileges necessary to perform a delete on the given
- // namespace.
- Status checkAuthForDelete(const NamespaceString& ns, const BSONObj& query);
-
- // Checks if this connection has the privileges necessary to perform a killCursor on
- // the identified cursor, supposing that cursor is associated with the supplied namespace
- // identifier.
- Status checkAuthForKillCursors(const NamespaceString& ns, long long cursorID);
-
- // Checks if this connection has the privileges necessary to grant the given privilege
- // to a role.
- Status checkAuthorizedToGrantPrivilege(const Privilege& privilege);
-
- // Checks if this connection has the privileges necessary to revoke the given privilege
- // from a role.
- Status checkAuthorizedToRevokePrivilege(const Privilege& privilege);
-
- // Utility function for isAuthorizedForActionsOnResource(
- // ResourcePattern::forDatabaseName(role.getDB()), ActionType::grantAnyRole)
- bool isAuthorizedToGrantRole(const RoleName& role);
-
- // Utility function for isAuthorizedForActionsOnResource(
- // ResourcePattern::forDatabaseName(role.getDB()), ActionType::grantAnyRole)
- bool isAuthorizedToRevokeRole(const RoleName& role);
-
- // Utility function for isAuthorizedToChangeOwnPasswordAsUser and isAuthorizedToChangeOwnCustomDataAsUser
- bool isAuthorizedToChangeAsUser(const UserName& userName, ActionType actionType);
-
- // Returns true if the current session is authenticated as the given user and that user
- // is allowed to change his/her own password
- bool isAuthorizedToChangeOwnPasswordAsUser(const UserName& userName);
-
- // Returns true if the current session is authenticated as the given user and that user
- // is allowed to change his/her own customData.
- bool isAuthorizedToChangeOwnCustomDataAsUser(const UserName& userName);
-
- // Returns true if any of the authenticated users on this session have the given role.
- // NOTE: this does not refresh any of the users even if they are marked as invalid.
- bool isAuthenticatedAsUserWithRole(const RoleName& roleName);
-
- // Returns true if this session is authorized for the given Privilege.
- //
- // Contains all the authorization logic including handling things like the localhost
- // exception.
- bool isAuthorizedForPrivilege(const Privilege& privilege);
-
- // Like isAuthorizedForPrivilege, above, except returns true if the session is authorized
- // for all of the listed privileges.
- bool isAuthorizedForPrivileges(const std::vector<Privilege>& privileges);
-
- // Utility function for isAuthorizedForPrivilege(Privilege(resource, action)).
- bool isAuthorizedForActionsOnResource(const ResourcePattern& resource, ActionType action);
-
- // Utility function for isAuthorizedForPrivilege(Privilege(resource, actions)).
- bool isAuthorizedForActionsOnResource(const ResourcePattern& resource,
- const ActionSet& actions);
-
- // Utility function for
- // isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns), action).
- bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, ActionType action);
-
- // Utility function for
- // isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns), actions).
- bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns,
- const ActionSet& actions);
-
- // Replaces the data for users that a system user is impersonating with new data.
- // The auditing system adds these users and their roles to each audit record in the log.
- void setImpersonatedUserData(std::vector<UserName> usernames, std::vector<RoleName> roles);
-
- // Gets an iterator over the names of all users that the system user is impersonating.
- UserNameIterator getImpersonatedUserNames();
-
- // Gets an iterator over the roles of all users that the system user is impersonating.
- RoleNameIterator getImpersonatedRoleNames();
-
- // Clears the data for impersonated users.
- void clearImpersonatedUserData();
-
- // Tells whether impersonation is active or not. This state is set when
- // setImpersonatedUserData is called and cleared when clearImpersonatedUserData is
- // called.
- bool isImpersonating() const;
-
- private:
-
- // If any users authenticated on this session are marked as invalid this updates them with
- // up-to-date information. May require a read lock on the "admin" db to read the user data.
- void _refreshUserInfoAsNeeded(OperationContext* txn);
-
- // Builds a vector of all roles held by users who are authenticated on this connection. The
- // vector is stored in _authenticatedRoleNames. This function is called when users are
- // logged in or logged out, as well as when the user cache is determined to be out of date.
- void _buildAuthenticatedRolesVector();
+ static void set(ClientBasic* client, std::unique_ptr<AuthorizationSession> session);
- // Checks if this connection is authorized for the given Privilege, ignoring whether or not
- // we should even be doing authorization checks in general. Note: this may acquire a read
- // lock on the admin database (to update out-of-date user privilege information).
- bool _isAuthorizedForPrivilege(const Privilege& privilege);
+ // Takes ownership of the externalState.
+ explicit AuthorizationSession(std::unique_ptr<AuthzSessionExternalState> externalState);
+ ~AuthorizationSession();
- std::unique_ptr<AuthzSessionExternalState> _externalState;
+ AuthorizationManager& getAuthorizationManager();
- // All Users who have been authenticated on this connection.
- UserSet _authenticatedUsers;
- // The roles of the authenticated users. This vector is generated when the authenticated
- // users set is changed.
- std::vector<RoleName> _authenticatedRoleNames;
+ // Should be called at the beginning of every new request. This performs the checks
+ // necessary to determine if localhost connections should be given full access.
+ // TODO: try to eliminate the need for this call.
+ void startRequest(OperationContext* txn);
+
+ /**
+ * Adds the User identified by "UserName" to the authorization session, acquiring privileges
+ * for it in the process.
+ */
+ Status addAndAuthorizeUser(OperationContext* txn, const UserName& userName);
+
+ // Returns the authenticated user with the given name. Returns NULL
+ // if no such user is found.
+ // The user remains in the _authenticatedUsers set for this AuthorizationSession,
+ // and ownership of the user stays with the AuthorizationManager
+ User* lookupUser(const UserName& name);
+
+ // Gets an iterator over the names of all authenticated users stored in this manager.
+ UserNameIterator getAuthenticatedUserNames();
+
+ // Gets an iterator over the roles of all authenticated users stored in this manager.
+ RoleNameIterator getAuthenticatedRoleNames();
+
+ // Returns a std::string representing all logged-in users on the current session.
+ // WARNING: this std::string will contain NUL bytes so don't call c_str()!
+ std::string getAuthenticatedUserNamesToken();
+
+ // Removes any authenticated principals whose authorization credentials came from the given
+ // database, and revokes any privileges that were granted via that principal.
+ void logoutDatabase(const std::string& dbname);
+
+ // Adds the internalSecurity user to the set of authenticated users.
+ // Used to grant internal threads full access.
+ void grantInternalAuthorization();
+
+ // Generates a vector of default privileges that are granted to any user,
+ // regardless of which roles that user does or does not possess.
+ // If localhost exception is active, the permissions include the ability to create
+ // the first user and the ability to run the commands needed to bootstrap the system
+ // into a state where the first user can be created.
+ PrivilegeVector getDefaultPrivileges();
+
+ // Checks if this connection has the privileges necessary to perform the given query on the
+ // given namespace.
+ Status checkAuthForQuery(const NamespaceString& ns, const BSONObj& query);
+
+ // Checks if this connection has the privileges necessary to perform a getMore operation on
+ // the identified cursor, supposing that cursor is associated with the supplied namespace
+ // identifier.
+ Status checkAuthForGetMore(const NamespaceString& ns, long long cursorID);
+
+ // Checks if this connection has the privileges necessary to perform the given update on the
+ // given namespace.
+ Status checkAuthForUpdate(const NamespaceString& ns,
+ const BSONObj& query,
+ const BSONObj& update,
+ bool upsert);
+
+ // Checks if this connection has the privileges necessary to insert the given document
+ // to the given namespace. Correctly interprets inserts to system.indexes and performs
+ // the proper auth checks for index building.
+ Status checkAuthForInsert(const NamespaceString& ns, const BSONObj& document);
+
+ // Checks if this connection has the privileges necessary to perform a delete on the given
+ // namespace.
+ Status checkAuthForDelete(const NamespaceString& ns, const BSONObj& query);
+
+ // Checks if this connection has the privileges necessary to perform a killCursor on
+ // the identified cursor, supposing that cursor is associated with the supplied namespace
+ // identifier.
+ Status checkAuthForKillCursors(const NamespaceString& ns, long long cursorID);
+
+ // Checks if this connection has the privileges necessary to grant the given privilege
+ // to a role.
+ Status checkAuthorizedToGrantPrivilege(const Privilege& privilege);
+
+ // Checks if this connection has the privileges necessary to revoke the given privilege
+ // from a role.
+ Status checkAuthorizedToRevokePrivilege(const Privilege& privilege);
+
+ // Utility function for isAuthorizedForActionsOnResource(
+ // ResourcePattern::forDatabaseName(role.getDB()), ActionType::grantAnyRole)
+ bool isAuthorizedToGrantRole(const RoleName& role);
+
+ // Utility function for isAuthorizedForActionsOnResource(
+ // ResourcePattern::forDatabaseName(role.getDB()), ActionType::grantAnyRole)
+ bool isAuthorizedToRevokeRole(const RoleName& role);
+
+ // Utility function for isAuthorizedToChangeOwnPasswordAsUser and isAuthorizedToChangeOwnCustomDataAsUser
+ bool isAuthorizedToChangeAsUser(const UserName& userName, ActionType actionType);
+
+ // Returns true if the current session is authenticated as the given user and that user
+ // is allowed to change his/her own password
+ bool isAuthorizedToChangeOwnPasswordAsUser(const UserName& userName);
+
+ // Returns true if the current session is authenticated as the given user and that user
+ // is allowed to change his/her own customData.
+ bool isAuthorizedToChangeOwnCustomDataAsUser(const UserName& userName);
+
+ // Returns true if any of the authenticated users on this session have the given role.
+ // NOTE: this does not refresh any of the users even if they are marked as invalid.
+ bool isAuthenticatedAsUserWithRole(const RoleName& roleName);
+
+ // Returns true if this session is authorized for the given Privilege.
+ //
+ // Contains all the authorization logic including handling things like the localhost
+ // exception.
+ bool isAuthorizedForPrivilege(const Privilege& privilege);
+
+ // Like isAuthorizedForPrivilege, above, except returns true if the session is authorized
+ // for all of the listed privileges.
+ bool isAuthorizedForPrivileges(const std::vector<Privilege>& privileges);
+
+ // Utility function for isAuthorizedForPrivilege(Privilege(resource, action)).
+ bool isAuthorizedForActionsOnResource(const ResourcePattern& resource, ActionType action);
+
+ // Utility function for isAuthorizedForPrivilege(Privilege(resource, actions)).
+ bool isAuthorizedForActionsOnResource(const ResourcePattern& resource,
+ const ActionSet& actions);
+
+ // Utility function for
+ // isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns), action).
+ bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, ActionType action);
+
+ // Utility function for
+ // isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns), actions).
+ bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, const ActionSet& actions);
+
+ // Replaces the data for users that a system user is impersonating with new data.
+ // The auditing system adds these users and their roles to each audit record in the log.
+ void setImpersonatedUserData(std::vector<UserName> usernames, std::vector<RoleName> roles);
+
+ // Gets an iterator over the names of all users that the system user is impersonating.
+ UserNameIterator getImpersonatedUserNames();
+
+ // Gets an iterator over the roles of all users that the system user is impersonating.
+ RoleNameIterator getImpersonatedRoleNames();
+
+ // Clears the data for impersonated users.
+ void clearImpersonatedUserData();
+
+ // Tells whether impersonation is active or not. This state is set when
+ // setImpersonatedUserData is called and cleared when clearImpersonatedUserData is
+ // called.
+ bool isImpersonating() const;
+
+private:
+ // If any users authenticated on this session are marked as invalid this updates them with
+ // up-to-date information. May require a read lock on the "admin" db to read the user data.
+ void _refreshUserInfoAsNeeded(OperationContext* txn);
+
+ // Builds a vector of all roles held by users who are authenticated on this connection. The
+ // vector is stored in _authenticatedRoleNames. This function is called when users are
+ // logged in or logged out, as well as when the user cache is determined to be out of date.
+ void _buildAuthenticatedRolesVector();
+
+ // Checks if this connection is authorized for the given Privilege, ignoring whether or not
+ // we should even be doing authorization checks in general. Note: this may acquire a read
+ // lock on the admin database (to update out-of-date user privilege information).
+ bool _isAuthorizedForPrivilege(const Privilege& privilege);
+
+ std::unique_ptr<AuthzSessionExternalState> _externalState;
- // A vector of impersonated UserNames and a vector of those users' RoleNames.
- // These are used in the auditing system. They are not used for authz checks.
- std::vector<UserName> _impersonatedUserNames;
- std::vector<RoleName> _impersonatedRoleNames;
- bool _impersonationFlag;
- };
+ // All Users who have been authenticated on this connection.
+ UserSet _authenticatedUsers;
+ // The roles of the authenticated users. This vector is generated when the authenticated
+ // users set is changed.
+ std::vector<RoleName> _authenticatedRoleNames;
+
+ // A vector of impersonated UserNames and a vector of those users' RoleNames.
+ // These are used in the auditing system. They are not used for authz checks.
+ std::vector<UserName> _impersonatedUserNames;
+ std::vector<RoleName> _impersonatedRoleNames;
+ bool _impersonationFlag;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authorization_session_test.cpp b/src/mongo/db/auth/authorization_session_test.cpp
index 5ce06edb4a7..0f5b4936e9d 100644
--- a/src/mongo/db/auth/authorization_session_test.cpp
+++ b/src/mongo/db/auth/authorization_session_test.cpp
@@ -46,409 +46,464 @@
namespace mongo {
namespace {
- class FailureCapableAuthzManagerExternalStateMock :
- public AuthzManagerExternalStateMock {
- public:
- FailureCapableAuthzManagerExternalStateMock() : _findsShouldFail(false) {}
- virtual ~FailureCapableAuthzManagerExternalStateMock() {}
-
- void setFindsShouldFail(bool enable) { _findsShouldFail = enable; }
-
- virtual Status findOne(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- BSONObj* result) {
- if (_findsShouldFail &&
- collectionName == AuthorizationManager::usersCollectionNamespace) {
-
- return Status(ErrorCodes::UnknownError,
- "findOne on admin.system.users set to fail in mock.");
- }
- return AuthzManagerExternalStateMock::findOne(txn, collectionName, query, result);
- }
-
- private:
- bool _findsShouldFail;
- };
-
- class AuthorizationSessionTest : public ::mongo::unittest::Test {
- public:
- FailureCapableAuthzManagerExternalStateMock* managerState;
- OperationContextNoop _txn;
- AuthzSessionExternalStateMock* sessionState;
- std::unique_ptr<AuthorizationManager> authzManager;
- std::unique_ptr<AuthorizationSession> authzSession;
-
- void setUp() {
- auto localManagerState = stdx::make_unique<FailureCapableAuthzManagerExternalStateMock>();
- managerState = localManagerState.get();
- managerState->setAuthzVersion(AuthorizationManager::schemaVersion26Final);
- authzManager = stdx::make_unique<AuthorizationManager>(std::move(localManagerState));
- auto localSessionState = stdx::make_unique<AuthzSessionExternalStateMock>(authzManager.get());
- sessionState = localSessionState.get();
- authzSession = stdx::make_unique<AuthorizationSession>(std::move(localSessionState));
- authzManager->setAuthEnabled(true);
- }
- };
-
- const ResourcePattern testDBResource(ResourcePattern::forDatabaseName("test"));
- const ResourcePattern otherDBResource(ResourcePattern::forDatabaseName("other"));
- const ResourcePattern adminDBResource(ResourcePattern::forDatabaseName("admin"));
- const ResourcePattern testFooCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("test.foo")));
- const ResourcePattern otherFooCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("other.foo")));
- const ResourcePattern thirdFooCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("third.foo")));
- const ResourcePattern adminFooCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("admin.foo")));
- const ResourcePattern testUsersCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("test.system.users")));
- const ResourcePattern otherUsersCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("other.system.users")));
- const ResourcePattern thirdUsersCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("third.system.users")));
- const ResourcePattern testIndexesCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("test.system.indexes")));
- const ResourcePattern otherIndexesCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("other.system.indexes")));
- const ResourcePattern thirdIndexesCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("third.system.indexes")));
- const ResourcePattern testProfileCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("test.system.profile")));
- const ResourcePattern otherProfileCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("other.system.profile")));
- const ResourcePattern thirdProfileCollResource(
- ResourcePattern::forExactNamespace(NamespaceString("third.system.profile")));
-
- TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
- // Check that disabling auth checks works
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
- sessionState->setReturnValueForShouldIgnoreAuthChecks(true);
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
- sessionState->setReturnValueForShouldIgnoreAuthChecks(false);
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
-
- // Check that you can't authorize a user that doesn't exist.
- ASSERT_EQUALS(ErrorCodes::UserNotFound,
- authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
-
- // Add a user with readWrite and dbAdmin on the test DB
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "readWrite" <<
- "db" << "test") <<
- BSON("role" << "dbAdmin" <<
- "db" << "test"))),
- BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
-
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testDBResource, ActionType::dbStats));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherFooCollResource, ActionType::insert));
-
- // Add an admin user with readWriteAnyDatabase
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
- BSON("user" << "admin" <<
- "db" << "admin" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "readWriteAnyDatabase" <<
- "db" << "admin"))),
- BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("admin", "admin")));
-
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(
- NamespaceString("anydb.somecollection")),
- ActionType::insert));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- otherDBResource, ActionType::insert));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- otherFooCollResource, ActionType::insert));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherFooCollResource, ActionType::collMod));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
-
- authzSession->logoutDatabase("test");
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- otherFooCollResource, ActionType::insert));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::collMod));
-
- authzSession->logoutDatabase("admin");
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherFooCollResource, ActionType::insert));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::collMod));
- }
+class FailureCapableAuthzManagerExternalStateMock : public AuthzManagerExternalStateMock {
+public:
+ FailureCapableAuthzManagerExternalStateMock() : _findsShouldFail(false) {}
+ virtual ~FailureCapableAuthzManagerExternalStateMock() {}
- TEST_F(AuthorizationSessionTest, DuplicateRolesOK) {
- // Add a user with doubled-up readWrite and single dbAdmin on the test DB
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "readWrite" <<
- "db" << "test") <<
- BSON("role" << "dbAdmin" <<
- "db" << "test") <<
- BSON("role" << "readWrite" <<
- "db" << "test"))),
- BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
-
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testDBResource, ActionType::dbStats));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherFooCollResource, ActionType::insert));
+ void setFindsShouldFail(bool enable) {
+ _findsShouldFail = enable;
}
- TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
- BSON("user" << "rw" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "readWrite" <<
- "db" << "test") <<
- BSON("role" << "dbAdmin" <<
- "db" << "test"))),
- BSONObj()));
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
- BSON("user" << "useradmin" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "userAdmin" <<
- "db" << "test"))),
- BSONObj()));
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
- BSON("user" << "rwany" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "readWriteAnyDatabase" <<
- "db" << "admin") <<
- BSON("role" << "dbAdminAnyDatabase" <<
- "db" << "admin"))),
- BSONObj()));
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
- BSON("user" << "useradminany" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "userAdminAnyDatabase" <<
- "db" << "admin"))),
- BSONObj()));
-
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("rwany", "test")));
-
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testUsersCollResource, ActionType::insert));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testUsersCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherUsersCollResource, ActionType::insert));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherUsersCollResource, ActionType::find));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testIndexesCollResource, ActionType::find));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testProfileCollResource, ActionType::find));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- otherIndexesCollResource, ActionType::find));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- otherProfileCollResource, ActionType::find));
-
- // Logging in as useradminany@test implicitly logs out rwany@test.
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("useradminany", "test")));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testUsersCollResource, ActionType::insert));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testUsersCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherUsersCollResource, ActionType::insert));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- otherUsersCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testIndexesCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testProfileCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherIndexesCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherProfileCollResource, ActionType::find));
-
- // Logging in as rw@test implicitly logs out useradminany@test.
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("rw", "test")));
-
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testUsersCollResource, ActionType::insert));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testUsersCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherUsersCollResource, ActionType::insert));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherUsersCollResource, ActionType::find));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testIndexesCollResource, ActionType::find));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testProfileCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherIndexesCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherProfileCollResource, ActionType::find));
-
-
- // Logging in as useradmin@test implicitly logs out rw@test.
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("useradmin", "test")));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testUsersCollResource, ActionType::insert));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testUsersCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherUsersCollResource, ActionType::insert));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherUsersCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testIndexesCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testProfileCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherIndexesCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- otherProfileCollResource, ActionType::find));
- }
-
- TEST_F(AuthorizationSessionTest, InvalidateUser) {
- // Add a readWrite user
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "readWrite" <<
- "db" << "test"))),
- BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
-
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::find));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
-
- User* user = authzSession->lookupUser(UserName("spencer", "test"));
- ASSERT(user->isValid());
-
- // Change the user to be read-only
- int ignored;
- managerState->remove(
- &_txn,
- AuthorizationManager::usersCollectionNamespace,
- BSONObj(),
- BSONObj(),
- &ignored);
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "read" <<
- "db" << "test"))),
- BSONObj()));
-
- // Make sure that invalidating the user causes the session to reload its privileges.
- authzManager->invalidateUserByName(user->getName());
- authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
-
- user = authzSession->lookupUser(UserName("spencer", "test"));
- ASSERT(user->isValid());
-
- // Delete the user.
- managerState->remove(
- &_txn,
- AuthorizationManager::usersCollectionNamespace,
- BSONObj(),
- BSONObj(),
- &ignored);
- // Make sure that invalidating the user causes the session to reload its privileges.
- authzManager->invalidateUserByName(user->getName());
- authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
- ASSERT_FALSE(authzSession->lookupUser(UserName("spencer", "test")));
+ virtual Status findOne(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ BSONObj* result) {
+ if (_findsShouldFail && collectionName == AuthorizationManager::usersCollectionNamespace) {
+ return Status(ErrorCodes::UnknownError,
+ "findOne on admin.system.users set to fail in mock.");
+ }
+ return AuthzManagerExternalStateMock::findOne(txn, collectionName, query, result);
}
- TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
- // Add a readWrite user
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "readWrite" <<
- "db" << "test"))),
- BSONObj()));
- ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
-
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::find));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
-
- User* user = authzSession->lookupUser(UserName("spencer", "test"));
- ASSERT(user->isValid());
-
- // Change the user to be read-only
- int ignored;
- managerState->setFindsShouldFail(true);
- managerState->remove(
- &_txn,
- AuthorizationManager::usersCollectionNamespace,
- BSONObj(),
- BSONObj(),
- &ignored);
- ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "read" <<
- "db" << "test"))),
- BSONObj()));
-
- // Even though the user's privileges have been reduced, since we've configured user
- // document lookup to fail, the authz session should continue to use its known out-of-date
- // privilege data.
- authzManager->invalidateUserByName(user->getName());
- authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::find));
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
-
- // Once we configure document lookup to succeed again, authorization checks should
- // observe the new values.
- managerState->setFindsShouldFail(false);
- authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
- ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::find));
- ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(
- testFooCollResource, ActionType::insert));
+private:
+ bool _findsShouldFail;
+};
+
+class AuthorizationSessionTest : public ::mongo::unittest::Test {
+public:
+ FailureCapableAuthzManagerExternalStateMock* managerState;
+ OperationContextNoop _txn;
+ AuthzSessionExternalStateMock* sessionState;
+ std::unique_ptr<AuthorizationManager> authzManager;
+ std::unique_ptr<AuthorizationSession> authzSession;
+
+ void setUp() {
+ auto localManagerState = stdx::make_unique<FailureCapableAuthzManagerExternalStateMock>();
+ managerState = localManagerState.get();
+ managerState->setAuthzVersion(AuthorizationManager::schemaVersion26Final);
+ authzManager = stdx::make_unique<AuthorizationManager>(std::move(localManagerState));
+ auto localSessionState =
+ stdx::make_unique<AuthzSessionExternalStateMock>(authzManager.get());
+ sessionState = localSessionState.get();
+ authzSession = stdx::make_unique<AuthorizationSession>(std::move(localSessionState));
+ authzManager->setAuthEnabled(true);
}
+};
+
+const ResourcePattern testDBResource(ResourcePattern::forDatabaseName("test"));
+const ResourcePattern otherDBResource(ResourcePattern::forDatabaseName("other"));
+const ResourcePattern adminDBResource(ResourcePattern::forDatabaseName("admin"));
+const ResourcePattern testFooCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("test.foo")));
+const ResourcePattern otherFooCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("other.foo")));
+const ResourcePattern thirdFooCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("third.foo")));
+const ResourcePattern adminFooCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("admin.foo")));
+const ResourcePattern testUsersCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("test.system.users")));
+const ResourcePattern otherUsersCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("other.system.users")));
+const ResourcePattern thirdUsersCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("third.system.users")));
+const ResourcePattern testIndexesCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("test.system.indexes")));
+const ResourcePattern otherIndexesCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("other.system.indexes")));
+const ResourcePattern thirdIndexesCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("third.system.indexes")));
+const ResourcePattern testProfileCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("test.system.profile")));
+const ResourcePattern otherProfileCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("other.system.profile")));
+const ResourcePattern thirdProfileCollResource(
+ ResourcePattern::forExactNamespace(NamespaceString("third.system.profile")));
+
+TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
+ // Check that disabling auth checks works
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+ sessionState->setReturnValueForShouldIgnoreAuthChecks(true);
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+ sessionState->setReturnValueForShouldIgnoreAuthChecks(false);
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+
+ // Check that you can't authorize a user that doesn't exist.
+ ASSERT_EQUALS(ErrorCodes::UserNotFound,
+ authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
+
+ // Add a user with readWrite and dbAdmin on the test DB
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "readWrite"
+ << "db"
+ << "test")
+ << BSON("role"
+ << "dbAdmin"
+ << "db"
+ << "test"))),
+ BSONObj()));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
+
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testDBResource, ActionType::dbStats));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherFooCollResource, ActionType::insert));
+
+ // Add an admin user with readWriteAnyDatabase
+ ASSERT_OK(
+ managerState->insertPrivilegeDocument(&_txn,
+ BSON("user"
+ << "admin"
+ << "db"
+ << "admin"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "readWriteAnyDatabase"
+ << "db"
+ << "admin"))),
+ BSONObj()));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("admin", "admin")));
+
+ ASSERT_TRUE(authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString("anydb.somecollection")),
+ ActionType::insert));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(otherDBResource, ActionType::insert));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(otherFooCollResource, ActionType::insert));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherFooCollResource, ActionType::collMod));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+
+ authzSession->logoutDatabase("test");
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(otherFooCollResource, ActionType::insert));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::collMod));
+
+ authzSession->logoutDatabase("admin");
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherFooCollResource, ActionType::insert));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::collMod));
+}
+
+TEST_F(AuthorizationSessionTest, DuplicateRolesOK) {
+ // Add a user with doubled-up readWrite and single dbAdmin on the test DB
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "readWrite"
+ << "db"
+ << "test")
+ << BSON("role"
+ << "dbAdmin"
+ << "db"
+ << "test")
+ << BSON("role"
+ << "readWrite"
+ << "db"
+ << "test"))),
+ BSONObj()));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
+
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testDBResource, ActionType::dbStats));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherFooCollResource, ActionType::insert));
+}
+
+TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ BSON("user"
+ << "rw"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "readWrite"
+ << "db"
+ << "test")
+ << BSON("role"
+ << "dbAdmin"
+ << "db"
+ << "test"))),
+ BSONObj()));
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ BSON("user"
+ << "useradmin"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "userAdmin"
+ << "db"
+ << "test"))),
+ BSONObj()));
+ ASSERT_OK(
+ managerState->insertPrivilegeDocument(&_txn,
+ BSON("user"
+ << "rwany"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "readWriteAnyDatabase"
+ << "db"
+ << "admin")
+ << BSON("role"
+ << "dbAdminAnyDatabase"
+ << "db"
+ << "admin"))),
+ BSONObj()));
+ ASSERT_OK(
+ managerState->insertPrivilegeDocument(&_txn,
+ BSON("user"
+ << "useradminany"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "userAdminAnyDatabase"
+ << "db"
+ << "admin"))),
+ BSONObj()));
+
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("rwany", "test")));
+
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testUsersCollResource, ActionType::insert));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testUsersCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherUsersCollResource, ActionType::insert));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherUsersCollResource, ActionType::find));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testIndexesCollResource, ActionType::find));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testProfileCollResource, ActionType::find));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(otherIndexesCollResource, ActionType::find));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(otherProfileCollResource, ActionType::find));
+
+ // Logging in as useradminany@test implicitly logs out rwany@test.
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("useradminany", "test")));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testUsersCollResource, ActionType::insert));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testUsersCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherUsersCollResource, ActionType::insert));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(otherUsersCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testIndexesCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testProfileCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherIndexesCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherProfileCollResource, ActionType::find));
+
+ // Logging in as rw@test implicitly logs out useradminany@test.
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("rw", "test")));
+
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testUsersCollResource, ActionType::insert));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testUsersCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherUsersCollResource, ActionType::insert));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherUsersCollResource, ActionType::find));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testIndexesCollResource, ActionType::find));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testProfileCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherIndexesCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherProfileCollResource, ActionType::find));
+
+
+ // Logging in as useradmin@test implicitly logs out rw@test.
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("useradmin", "test")));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testUsersCollResource, ActionType::insert));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testUsersCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherUsersCollResource, ActionType::insert));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherUsersCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testIndexesCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testProfileCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherIndexesCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(otherProfileCollResource, ActionType::find));
+}
+
+TEST_F(AuthorizationSessionTest, InvalidateUser) {
+ // Add a readWrite user
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "readWrite"
+ << "db"
+ << "test"))),
+ BSONObj()));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
+
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::find));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+
+ User* user = authzSession->lookupUser(UserName("spencer", "test"));
+ ASSERT(user->isValid());
+
+ // Change the user to be read-only
+ int ignored;
+ managerState->remove(
+ &_txn, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "read"
+ << "db"
+ << "test"))),
+ BSONObj()));
+
+ // Make sure that invalidating the user causes the session to reload its privileges.
+ authzManager->invalidateUserByName(user->getName());
+ authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+
+ user = authzSession->lookupUser(UserName("spencer", "test"));
+ ASSERT(user->isValid());
+
+ // Delete the user.
+ managerState->remove(
+ &_txn, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
+ // Make sure that invalidating the user causes the session to reload its privileges.
+ authzManager->invalidateUserByName(user->getName());
+ authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+ ASSERT_FALSE(authzSession->lookupUser(UserName("spencer", "test")));
+}
+
+TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
+ // Add a readWrite user
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "readWrite"
+ << "db"
+ << "test"))),
+ BSONObj()));
+ ASSERT_OK(authzSession->addAndAuthorizeUser(&_txn, UserName("spencer", "test")));
+
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::find));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+
+ User* user = authzSession->lookupUser(UserName("spencer", "test"));
+ ASSERT(user->isValid());
+
+ // Change the user to be read-only
+ int ignored;
+ managerState->setFindsShouldFail(true);
+ managerState->remove(
+ &_txn, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
+ ASSERT_OK(managerState->insertPrivilegeDocument(&_txn,
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "read"
+ << "db"
+ << "test"))),
+ BSONObj()));
+
+ // Even though the user's privileges have been reduced, since we've configured user
+ // document lookup to fail, the authz session should continue to use its known out-of-date
+ // privilege data.
+ authzManager->invalidateUserByName(user->getName());
+ authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::find));
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+
+ // Once we configure document lookup to succeed again, authorization checks should
+ // observe the new values.
+ managerState->setFindsShouldFail(false);
+ authzSession->startRequest(&_txn); // Refreshes cached data for invalid users
+ ASSERT_TRUE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::find));
+ ASSERT_FALSE(
+ authzSession->isAuthorizedForActionsOnResource(testFooCollResource, ActionType::insert));
+}
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state.cpp b/src/mongo/db/auth/authz_manager_external_state.cpp
index 62d6683b78b..fae54cbf5e9 100644
--- a/src/mongo/db/auth/authz_manager_external_state.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state.cpp
@@ -32,9 +32,9 @@
namespace mongo {
- stdx::function<std::unique_ptr<AuthzManagerExternalState>()> AuthzManagerExternalState::create;
+stdx::function<std::unique_ptr<AuthzManagerExternalState>()> AuthzManagerExternalState::create;
- AuthzManagerExternalState::AuthzManagerExternalState() = default;
- AuthzManagerExternalState::~AuthzManagerExternalState() = default;
+AuthzManagerExternalState::AuthzManagerExternalState() = default;
+AuthzManagerExternalState::~AuthzManagerExternalState() = default;
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state.h b/src/mongo/db/auth/authz_manager_external_state.h
index 6fd94effb69..e3bdcdb8c43 100644
--- a/src/mongo/db/auth/authz_manager_external_state.h
+++ b/src/mongo/db/auth/authz_manager_external_state.h
@@ -41,105 +41,101 @@
namespace mongo {
- class AuthorizationManager;
- class AuthzSessionExternalState;
- class OperationContext;
+class AuthorizationManager;
+class AuthzSessionExternalState;
+class OperationContext;
+
+/**
+ * Public interface for a class that encapsulates all the information related to system
+ * state not stored in AuthorizationManager. This is primarily to make AuthorizationManager
+ * easier to test as well as to allow different implementations for mongos and mongod.
+ */
+class AuthzManagerExternalState {
+ MONGO_DISALLOW_COPYING(AuthzManagerExternalState);
+
+public:
+ static stdx::function<std::unique_ptr<AuthzManagerExternalState>()> create;
+
+ virtual ~AuthzManagerExternalState();
/**
- * Public interface for a class that encapsulates all the information related to system
- * state not stored in AuthorizationManager. This is primarily to make AuthorizationManager
- * easier to test as well as to allow different implementations for mongos and mongod.
+ * Initializes the external state object. Must be called after construction and before
+ * calling other methods. Object may not be used after this method returns something other
+ * than Status::OK().
*/
- class AuthzManagerExternalState {
- MONGO_DISALLOW_COPYING(AuthzManagerExternalState);
-
- public:
-
- static stdx::function<std::unique_ptr<AuthzManagerExternalState>()> create;
-
- virtual ~AuthzManagerExternalState();
-
- /**
- * Initializes the external state object. Must be called after construction and before
- * calling other methods. Object may not be used after this method returns something other
- * than Status::OK().
- */
- virtual Status initialize(OperationContext* txn) = 0;
-
- /**
- * Creates an external state manipulator for an AuthorizationSession whose
- * AuthorizationManager uses this object as its own external state manipulator.
- */
- virtual std::unique_ptr<AuthzSessionExternalState> makeAuthzSessionExternalState(
- AuthorizationManager* authzManager) = 0;
-
- /**
- * Retrieves the schema version of the persistent data describing users and roles.
- * Will leave *outVersion unmodified on non-OK status return values.
- */
- virtual Status getStoredAuthorizationVersion(OperationContext* txn, int* outVersion) = 0;
-
- /**
- * Writes into "result" a document describing the named user and returns Status::OK(). The
- * description includes the user credentials, if present, the user's role membership and
- * delegation information, a full list of the user's privileges, and a full list of the
- * user's roles, including those roles held implicitly through other roles (indirect roles).
- * In the event that some of this information is inconsistent, the document will contain a
- * "warnings" array, with std::string messages describing inconsistencies.
- *
- * If the user does not exist, returns ErrorCodes::UserNotFound.
- */
- virtual Status getUserDescription(
- OperationContext* txn, const UserName& userName, BSONObj* result) = 0;
-
- /**
- * Writes into "result" a document describing the named role and returns Status::OK(). The
- * description includes the roles in which the named role has membership and a full list of
- * the roles of which the named role is a member, including those roles memberships held
- * implicitly through other roles (indirect roles). If "showPrivileges" is true, then the
- * description documents will also include a full list of the role's privileges.
- * In the event that some of this information is inconsistent, the document will contain a
- * "warnings" array, with std::string messages describing inconsistencies.
- *
- * If the role does not exist, returns ErrorCodes::RoleNotFound.
- */
- virtual Status getRoleDescription(const RoleName& roleName,
- bool showPrivileges,
- BSONObj* result) = 0;
-
- /**
- * Writes into "result" documents describing the roles that are defined on the given
- * database. Each role description document includes the other roles in which the role has
- * membership and a full list of the roles of which the named role is a member,
- * including those roles memberships held implicitly through other roles (indirect roles).
- * If showPrivileges is true, then the description documents will also include a full list
- * of the role's privileges. If showBuiltinRoles is true, then the result array will
- * contain description documents for all the builtin roles for the given database, if it
- * is false the result will just include user defined roles.
- * In the event that some of the information in a given role description is inconsistent,
- * the document will contain a "warnings" array, with std::string messages describing
- * inconsistencies.
- */
- virtual Status getRoleDescriptionsForDB(const std::string dbname,
- bool showPrivileges,
- bool showBuiltinRoles,
- std::vector<BSONObj>* result) = 0;
-
- /**
- * Returns true if there exists at least one privilege document in the system.
- */
- virtual bool hasAnyPrivilegeDocuments(OperationContext* txn) = 0;
-
- virtual void logOp(
- OperationContext* txn,
- const char* op,
- const char* ns,
- const BSONObj& o,
- BSONObj* o2) {}
-
-
- protected:
- AuthzManagerExternalState(); // This class should never be instantiated directly.
- };
-
-} // namespace mongo
+ virtual Status initialize(OperationContext* txn) = 0;
+
+ /**
+ * Creates an external state manipulator for an AuthorizationSession whose
+ * AuthorizationManager uses this object as its own external state manipulator.
+ */
+ virtual std::unique_ptr<AuthzSessionExternalState> makeAuthzSessionExternalState(
+ AuthorizationManager* authzManager) = 0;
+
+ /**
+ * Retrieves the schema version of the persistent data describing users and roles.
+ * Will leave *outVersion unmodified on non-OK status return values.
+ */
+ virtual Status getStoredAuthorizationVersion(OperationContext* txn, int* outVersion) = 0;
+
+ /**
+ * Writes into "result" a document describing the named user and returns Status::OK(). The
+ * description includes the user credentials, if present, the user's role membership and
+ * delegation information, a full list of the user's privileges, and a full list of the
+ * user's roles, including those roles held implicitly through other roles (indirect roles).
+ * In the event that some of this information is inconsistent, the document will contain a
+ * "warnings" array, with std::string messages describing inconsistencies.
+ *
+ * If the user does not exist, returns ErrorCodes::UserNotFound.
+ */
+ virtual Status getUserDescription(OperationContext* txn,
+ const UserName& userName,
+ BSONObj* result) = 0;
+
+ /**
+ * Writes into "result" a document describing the named role and returns Status::OK(). The
+ * description includes the roles in which the named role has membership and a full list of
+ * the roles of which the named role is a member, including those roles memberships held
+ * implicitly through other roles (indirect roles). If "showPrivileges" is true, then the
+ * description documents will also include a full list of the role's privileges.
+ * In the event that some of this information is inconsistent, the document will contain a
+ * "warnings" array, with std::string messages describing inconsistencies.
+ *
+ * If the role does not exist, returns ErrorCodes::RoleNotFound.
+ */
+ virtual Status getRoleDescription(const RoleName& roleName,
+ bool showPrivileges,
+ BSONObj* result) = 0;
+
+ /**
+ * Writes into "result" documents describing the roles that are defined on the given
+ * database. Each role description document includes the other roles in which the role has
+ * membership and a full list of the roles of which the named role is a member,
+ * including those roles memberships held implicitly through other roles (indirect roles).
+ * If showPrivileges is true, then the description documents will also include a full list
+ * of the role's privileges. If showBuiltinRoles is true, then the result array will
+ * contain description documents for all the builtin roles for the given database, if it
+ * is false the result will just include user defined roles.
+ * In the event that some of the information in a given role description is inconsistent,
+ * the document will contain a "warnings" array, with std::string messages describing
+ * inconsistencies.
+ */
+ virtual Status getRoleDescriptionsForDB(const std::string dbname,
+ bool showPrivileges,
+ bool showBuiltinRoles,
+ std::vector<BSONObj>* result) = 0;
+
+ /**
+ * Returns true if there exists at least one privilege document in the system.
+ */
+ virtual bool hasAnyPrivilegeDocuments(OperationContext* txn) = 0;
+
+ virtual void logOp(
+ OperationContext* txn, const char* op, const char* ns, const BSONObj& o, BSONObj* o2) {}
+
+
+protected:
+ AuthzManagerExternalState(); // This class should never be instantiated directly.
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp
index 1c529f037d3..601c14decff 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp
@@ -51,49 +51,43 @@
namespace mongo {
- AuthzManagerExternalStateMongod::AuthzManagerExternalStateMongod() = default;
- AuthzManagerExternalStateMongod::~AuthzManagerExternalStateMongod() = default;
+AuthzManagerExternalStateMongod::AuthzManagerExternalStateMongod() = default;
+AuthzManagerExternalStateMongod::~AuthzManagerExternalStateMongod() = default;
- std::unique_ptr<AuthzSessionExternalState>
- AuthzManagerExternalStateMongod::makeAuthzSessionExternalState(
- AuthorizationManager* authzManager) {
+std::unique_ptr<AuthzSessionExternalState>
+AuthzManagerExternalStateMongod::makeAuthzSessionExternalState(AuthorizationManager* authzManager) {
+ return stdx::make_unique<AuthzSessionExternalStateMongod>(authzManager);
+}
- return stdx::make_unique<AuthzSessionExternalStateMongod>(authzManager);
+Status AuthzManagerExternalStateMongod::query(
+ OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& projection,
+ const stdx::function<void(const BSONObj&)>& resultProcessor) {
+ try {
+ DBDirectClient client(txn);
+ client.query(resultProcessor, collectionName.ns(), query, &projection);
+ return Status::OK();
+ } catch (const DBException& e) {
+ return e.toStatus();
}
+}
- Status AuthzManagerExternalStateMongod::query(
- OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& projection,
- const stdx::function<void(const BSONObj&)>& resultProcessor) {
- try {
- DBDirectClient client(txn);
- client.query(resultProcessor, collectionName.ns(), query, &projection);
- return Status::OK();
- } catch (const DBException& e) {
- return e.toStatus();
- }
- }
-
- Status AuthzManagerExternalStateMongod::findOne(
- OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- BSONObj* result) {
-
- AutoGetCollectionForRead ctx(txn, collectionName);
+Status AuthzManagerExternalStateMongod::findOne(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ BSONObj* result) {
+ AutoGetCollectionForRead ctx(txn, collectionName);
- BSONObj found;
- if (Helpers::findOne(txn,
- ctx.getCollection(),
- query,
- found)) {
- *result = found.getOwned();
- return Status::OK();
- }
- return Status(ErrorCodes::NoMatchingDocument, mongoutils::str::stream() <<
- "No document in " << collectionName.ns() << " matches " << query);
+ BSONObj found;
+ if (Helpers::findOne(txn, ctx.getCollection(), query, found)) {
+ *result = found.getOwned();
+ return Status::OK();
}
+ return Status(ErrorCodes::NoMatchingDocument,
+ mongoutils::str::stream() << "No document in " << collectionName.ns()
+ << " matches " << query);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.h b/src/mongo/db/auth/authz_manager_external_state_d.h
index fd2c6537166..f0fb7f91568 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.h
+++ b/src/mongo/db/auth/authz_manager_external_state_d.h
@@ -39,28 +39,28 @@
namespace mongo {
- /**
- * The implementation of AuthzManagerExternalState functionality for mongod.
- */
- class AuthzManagerExternalStateMongod : public AuthzManagerExternalStateLocal {
- MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMongod);
+/**
+ * The implementation of AuthzManagerExternalState functionality for mongod.
+ */
+class AuthzManagerExternalStateMongod : public AuthzManagerExternalStateLocal {
+ MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMongod);
- public:
- AuthzManagerExternalStateMongod();
- virtual ~AuthzManagerExternalStateMongod();
+public:
+ AuthzManagerExternalStateMongod();
+ virtual ~AuthzManagerExternalStateMongod();
- std::unique_ptr<AuthzSessionExternalState> makeAuthzSessionExternalState(
- AuthorizationManager* authzManager) override;
+ std::unique_ptr<AuthzSessionExternalState> makeAuthzSessionExternalState(
+ AuthorizationManager* authzManager) override;
- virtual Status findOne(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- BSONObj* result);
- virtual Status query(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& projection,
- const stdx::function<void(const BSONObj&)>& resultProcessor);
- };
+ virtual Status findOne(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ BSONObj* result);
+ virtual Status query(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& projection,
+ const stdx::function<void(const BSONObj&)>& resultProcessor);
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index e1f4b8e0301..ed0c8e560a5 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -42,422 +42,385 @@
namespace mongo {
- using std::vector;
+using std::vector;
- Status AuthzManagerExternalStateLocal::initialize(OperationContext* txn) {
- Status status = _initializeRoleGraph(txn);
- if (!status.isOK()) {
- if (status == ErrorCodes::GraphContainsCycle) {
- error() << "Cycle detected in admin.system.roles; role inheritance disabled. "
- "Remove the listed cycle and any others to re-enable role inheritance. " <<
- status.reason();
- }
- else {
- error() << "Could not generate role graph from admin.system.roles; "
- "only system roles available: " << status;
- }
+Status AuthzManagerExternalStateLocal::initialize(OperationContext* txn) {
+ Status status = _initializeRoleGraph(txn);
+ if (!status.isOK()) {
+ if (status == ErrorCodes::GraphContainsCycle) {
+ error() << "Cycle detected in admin.system.roles; role inheritance disabled. "
+ "Remove the listed cycle and any others to re-enable role inheritance. "
+ << status.reason();
+ } else {
+ error() << "Could not generate role graph from admin.system.roles; "
+ "only system roles available: " << status;
}
-
- return Status::OK();
}
- Status AuthzManagerExternalStateLocal::getStoredAuthorizationVersion(
- OperationContext* txn, int* outVersion) {
- BSONObj versionDoc;
- Status status = findOne(txn,
- AuthorizationManager::versionCollectionNamespace,
- AuthorizationManager::versionDocumentQuery,
- &versionDoc);
- if (status.isOK()) {
- BSONElement versionElement = versionDoc[AuthorizationManager::schemaVersionFieldName];
- if (versionElement.isNumber()) {
- *outVersion = versionElement.numberInt();
- return Status::OK();
- }
- else if (versionElement.eoo()) {
- return Status(ErrorCodes::NoSuchKey, mongoutils::str::stream() <<
- "No " << AuthorizationManager::schemaVersionFieldName <<
- " field in version document.");
- }
- else {
- return Status(ErrorCodes::TypeMismatch, mongoutils::str::stream() <<
- "Could not determine schema version of authorization data. "
- "Bad (non-numeric) type " << typeName(versionElement.type()) <<
- " (" << versionElement.type() << ") for " <<
- AuthorizationManager::schemaVersionFieldName <<
- " field in version document");
- }
- }
- else if (status == ErrorCodes::NoMatchingDocument) {
- *outVersion = AuthorizationManager::schemaVersion28SCRAM;
+ return Status::OK();
+}
+
+Status AuthzManagerExternalStateLocal::getStoredAuthorizationVersion(OperationContext* txn,
+ int* outVersion) {
+ BSONObj versionDoc;
+ Status status = findOne(txn,
+ AuthorizationManager::versionCollectionNamespace,
+ AuthorizationManager::versionDocumentQuery,
+ &versionDoc);
+ if (status.isOK()) {
+ BSONElement versionElement = versionDoc[AuthorizationManager::schemaVersionFieldName];
+ if (versionElement.isNumber()) {
+ *outVersion = versionElement.numberInt();
return Status::OK();
+ } else if (versionElement.eoo()) {
+ return Status(ErrorCodes::NoSuchKey,
+ mongoutils::str::stream() << "No "
+ << AuthorizationManager::schemaVersionFieldName
+ << " field in version document.");
+ } else {
+ return Status(ErrorCodes::TypeMismatch,
+ mongoutils::str::stream()
+ << "Could not determine schema version of authorization data. "
+ "Bad (non-numeric) type " << typeName(versionElement.type())
+ << " (" << versionElement.type() << ") for "
+ << AuthorizationManager::schemaVersionFieldName
+ << " field in version document");
}
- else {
- return status;
- }
+ } else if (status == ErrorCodes::NoMatchingDocument) {
+ *outVersion = AuthorizationManager::schemaVersion28SCRAM;
+ return Status::OK();
+ } else {
+ return status;
}
+}
namespace {
- void addRoleNameToObjectElement(mutablebson::Element object, const RoleName& role) {
- fassert(17153, object.appendString(AuthorizationManager::ROLE_NAME_FIELD_NAME,
- role.getRole()));
- fassert(17154, object.appendString(AuthorizationManager::ROLE_DB_FIELD_NAME,
- role.getDB()));
- }
-
- void addRoleNameObjectsToArrayElement(mutablebson::Element array, RoleNameIterator roles) {
- for (; roles.more(); roles.next()) {
- mutablebson::Element roleElement = array.getDocument().makeElementObject("");
- addRoleNameToObjectElement(roleElement, roles.get());
- fassert(17155, array.pushBack(roleElement));
- }
+void addRoleNameToObjectElement(mutablebson::Element object, const RoleName& role) {
+ fassert(17153, object.appendString(AuthorizationManager::ROLE_NAME_FIELD_NAME, role.getRole()));
+ fassert(17154, object.appendString(AuthorizationManager::ROLE_DB_FIELD_NAME, role.getDB()));
+}
+
+void addRoleNameObjectsToArrayElement(mutablebson::Element array, RoleNameIterator roles) {
+ for (; roles.more(); roles.next()) {
+ mutablebson::Element roleElement = array.getDocument().makeElementObject("");
+ addRoleNameToObjectElement(roleElement, roles.get());
+ fassert(17155, array.pushBack(roleElement));
}
-
- void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privilegesElement,
- mutablebson::Element warningsElement,
- const PrivilegeVector& privileges) {
- std::string errmsg;
- for (size_t i = 0; i < privileges.size(); ++i) {
- ParsedPrivilege pp;
- if (ParsedPrivilege::privilegeToParsedPrivilege(privileges[i], &pp, &errmsg)) {
- fassert(17156, privilegesElement.appendObject("", pp.toBSON()));
- } else {
- fassert(17157,
- warningsElement.appendString(
+}
+
+void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privilegesElement,
+ mutablebson::Element warningsElement,
+ const PrivilegeVector& privileges) {
+ std::string errmsg;
+ for (size_t i = 0; i < privileges.size(); ++i) {
+ ParsedPrivilege pp;
+ if (ParsedPrivilege::privilegeToParsedPrivilege(privileges[i], &pp, &errmsg)) {
+ fassert(17156, privilegesElement.appendObject("", pp.toBSON()));
+ } else {
+ fassert(17157,
+ warningsElement.appendString(
"",
- std::string(mongoutils::str::stream() <<
- "Skipped privileges on resource " <<
- privileges[i].getResourcePattern().toString() <<
- ". Reason: " << errmsg)));
- }
+ std::string(mongoutils::str::stream()
+ << "Skipped privileges on resource "
+ << privileges[i].getResourcePattern().toString()
+ << ". Reason: " << errmsg)));
}
}
+}
} // namespace
- bool AuthzManagerExternalStateLocal::hasAnyPrivilegeDocuments(OperationContext* txn) {
- BSONObj userBSONObj;
- Status status = findOne(
- txn,
- AuthorizationManager::usersCollectionNamespace,
- BSONObj(),
- &userBSONObj);
- // If we were unable to complete the query,
- // it's best to assume that there _are_ privilege documents.
- return status != ErrorCodes::NoMatchingDocument;
- }
-
- Status AuthzManagerExternalStateLocal::getUserDescription(
- OperationContext* txn,
- const UserName& userName,
- BSONObj* result) {
-
- BSONObj userDoc;
- Status status = _getUserDocument(txn, userName, &userDoc);
- if (!status.isOK())
- return status;
+bool AuthzManagerExternalStateLocal::hasAnyPrivilegeDocuments(OperationContext* txn) {
+ BSONObj userBSONObj;
+ Status status =
+ findOne(txn, AuthorizationManager::usersCollectionNamespace, BSONObj(), &userBSONObj);
+ // If we were unable to complete the query,
+ // it's best to assume that there _are_ privilege documents.
+ return status != ErrorCodes::NoMatchingDocument;
+}
+
+Status AuthzManagerExternalStateLocal::getUserDescription(OperationContext* txn,
+ const UserName& userName,
+ BSONObj* result) {
+ BSONObj userDoc;
+ Status status = _getUserDocument(txn, userName, &userDoc);
+ if (!status.isOK())
+ return status;
- BSONElement directRolesElement;
- status = bsonExtractTypedField(userDoc, "roles", Array, &directRolesElement);
- if (!status.isOK())
- return status;
- std::vector<RoleName> directRoles;
- status = V2UserDocumentParser::parseRoleVector(BSONArray(directRolesElement.Obj()),
- &directRoles);
- if (!status.isOK())
- return status;
+ BSONElement directRolesElement;
+ status = bsonExtractTypedField(userDoc, "roles", Array, &directRolesElement);
+ if (!status.isOK())
+ return status;
+ std::vector<RoleName> directRoles;
+ status =
+ V2UserDocumentParser::parseRoleVector(BSONArray(directRolesElement.Obj()), &directRoles);
+ if (!status.isOK())
+ return status;
- unordered_set<RoleName> indirectRoles;
- PrivilegeVector allPrivileges;
- bool isRoleGraphInconsistent;
- {
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
- isRoleGraphInconsistent = _roleGraphState == roleGraphStateConsistent;
- for (size_t i = 0; i < directRoles.size(); ++i) {
- const RoleName& role(directRoles[i]);
- indirectRoles.insert(role);
- if (isRoleGraphInconsistent) {
- for (RoleNameIterator subordinates = _roleGraph.getIndirectSubordinates(role);
- subordinates.more();
- subordinates.next()) {
-
- indirectRoles.insert(subordinates.get());
- }
- }
- const PrivilegeVector& rolePrivileges(
- isRoleGraphInconsistent ?
- _roleGraph.getAllPrivileges(role) :
- _roleGraph.getDirectPrivileges(role));
- for (PrivilegeVector::const_iterator priv = rolePrivileges.begin(),
- end = rolePrivileges.end();
- priv != end;
- ++priv) {
-
- Privilege::addPrivilegeToPrivilegeVector(&allPrivileges, *priv);
+ unordered_set<RoleName> indirectRoles;
+ PrivilegeVector allPrivileges;
+ bool isRoleGraphInconsistent;
+ {
+ stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ isRoleGraphInconsistent = _roleGraphState == roleGraphStateConsistent;
+ for (size_t i = 0; i < directRoles.size(); ++i) {
+ const RoleName& role(directRoles[i]);
+ indirectRoles.insert(role);
+ if (isRoleGraphInconsistent) {
+ for (RoleNameIterator subordinates = _roleGraph.getIndirectSubordinates(role);
+ subordinates.more();
+ subordinates.next()) {
+ indirectRoles.insert(subordinates.get());
}
}
+ const PrivilegeVector& rolePrivileges(isRoleGraphInconsistent
+ ? _roleGraph.getAllPrivileges(role)
+ : _roleGraph.getDirectPrivileges(role));
+ for (PrivilegeVector::const_iterator priv = rolePrivileges.begin(),
+ end = rolePrivileges.end();
+ priv != end;
+ ++priv) {
+ Privilege::addPrivilegeToPrivilegeVector(&allPrivileges, *priv);
+ }
}
-
- mutablebson::Document resultDoc(userDoc, mutablebson::Document::kInPlaceDisabled);
- mutablebson::Element inheritedRolesElement = resultDoc.makeElementArray("inheritedRoles");
- mutablebson::Element privilegesElement = resultDoc.makeElementArray("inheritedPrivileges");
- mutablebson::Element warningsElement = resultDoc.makeElementArray("warnings");
- fassert(17159, resultDoc.root().pushBack(inheritedRolesElement));
- fassert(17158, resultDoc.root().pushBack(privilegesElement));
- if (!isRoleGraphInconsistent) {
- fassert(17160, warningsElement.appendString(
- "", "Role graph inconsistent, only direct privileges available."));
- }
- addRoleNameObjectsToArrayElement(inheritedRolesElement,
- makeRoleNameIteratorForContainer(indirectRoles));
- addPrivilegeObjectsOrWarningsToArrayElement(
- privilegesElement, warningsElement, allPrivileges);
- if (warningsElement.hasChildren()) {
- fassert(17161, resultDoc.root().pushBack(warningsElement));
- }
- *result = resultDoc.getObject();
- return Status::OK();
}
- Status AuthzManagerExternalStateLocal::_getUserDocument(OperationContext* txn,
- const UserName& userName,
- BSONObj* userDoc) {
- Status status = findOne(
- txn,
- AuthorizationManager::usersCollectionNamespace,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME << userName.getUser() <<
- AuthorizationManager::USER_DB_FIELD_NAME << userName.getDB()),
- userDoc);
- if (status == ErrorCodes::NoMatchingDocument) {
- status = Status(ErrorCodes::UserNotFound, mongoutils::str::stream() <<
- "Could not find user " << userName.getFullName());
- }
- return status;
+ mutablebson::Document resultDoc(userDoc, mutablebson::Document::kInPlaceDisabled);
+ mutablebson::Element inheritedRolesElement = resultDoc.makeElementArray("inheritedRoles");
+ mutablebson::Element privilegesElement = resultDoc.makeElementArray("inheritedPrivileges");
+ mutablebson::Element warningsElement = resultDoc.makeElementArray("warnings");
+ fassert(17159, resultDoc.root().pushBack(inheritedRolesElement));
+ fassert(17158, resultDoc.root().pushBack(privilegesElement));
+ if (!isRoleGraphInconsistent) {
+ fassert(17160,
+ warningsElement.appendString(
+ "", "Role graph inconsistent, only direct privileges available."));
}
-
- Status AuthzManagerExternalStateLocal::getRoleDescription(const RoleName& roleName,
- bool showPrivileges,
- BSONObj* result) {
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
- return _getRoleDescription_inlock(roleName, showPrivileges, result);
+ addRoleNameObjectsToArrayElement(inheritedRolesElement,
+ makeRoleNameIteratorForContainer(indirectRoles));
+ addPrivilegeObjectsOrWarningsToArrayElement(privilegesElement, warningsElement, allPrivileges);
+ if (warningsElement.hasChildren()) {
+ fassert(17161, resultDoc.root().pushBack(warningsElement));
+ }
+ *result = resultDoc.getObject();
+ return Status::OK();
+}
+
+Status AuthzManagerExternalStateLocal::_getUserDocument(OperationContext* txn,
+ const UserName& userName,
+ BSONObj* userDoc) {
+ Status status = findOne(txn,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << userName.getUser() << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getDB()),
+ userDoc);
+ if (status == ErrorCodes::NoMatchingDocument) {
+ status =
+ Status(ErrorCodes::UserNotFound,
+ mongoutils::str::stream() << "Could not find user " << userName.getFullName());
+ }
+ return status;
+}
+
+Status AuthzManagerExternalStateLocal::getRoleDescription(const RoleName& roleName,
+ bool showPrivileges,
+ BSONObj* result) {
+ stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ return _getRoleDescription_inlock(roleName, showPrivileges, result);
+}
+
+Status AuthzManagerExternalStateLocal::_getRoleDescription_inlock(const RoleName& roleName,
+ bool showPrivileges,
+ BSONObj* result) {
+ if (!_roleGraph.roleExists(roleName))
+ return Status(ErrorCodes::RoleNotFound, "No role named " + roleName.toString());
+
+ mutablebson::Document resultDoc;
+ fassert(17162,
+ resultDoc.root().appendString(AuthorizationManager::ROLE_NAME_FIELD_NAME,
+ roleName.getRole()));
+ fassert(
+ 17163,
+ resultDoc.root().appendString(AuthorizationManager::ROLE_DB_FIELD_NAME, roleName.getDB()));
+ fassert(17267, resultDoc.root().appendBool("isBuiltin", _roleGraph.isBuiltinRole(roleName)));
+ mutablebson::Element rolesElement = resultDoc.makeElementArray("roles");
+ fassert(17164, resultDoc.root().pushBack(rolesElement));
+ mutablebson::Element inheritedRolesElement = resultDoc.makeElementArray("inheritedRoles");
+ fassert(17165, resultDoc.root().pushBack(inheritedRolesElement));
+ mutablebson::Element privilegesElement = resultDoc.makeElementArray("privileges");
+ mutablebson::Element inheritedPrivilegesElement =
+ resultDoc.makeElementArray("inheritedPrivileges");
+ if (showPrivileges) {
+ fassert(17166, resultDoc.root().pushBack(privilegesElement));
}
+ mutablebson::Element warningsElement = resultDoc.makeElementArray("warnings");
- Status AuthzManagerExternalStateLocal::_getRoleDescription_inlock(const RoleName& roleName,
- bool showPrivileges,
- BSONObj* result) {
- if (!_roleGraph.roleExists(roleName))
- return Status(ErrorCodes::RoleNotFound, "No role named " + roleName.toString());
-
- mutablebson::Document resultDoc;
- fassert(17162, resultDoc.root().appendString(
- AuthorizationManager::ROLE_NAME_FIELD_NAME, roleName.getRole()));
- fassert(17163, resultDoc.root().appendString(
- AuthorizationManager::ROLE_DB_FIELD_NAME, roleName.getDB()));
- fassert(17267,
- resultDoc.root().appendBool("isBuiltin", _roleGraph.isBuiltinRole(roleName)));
- mutablebson::Element rolesElement = resultDoc.makeElementArray("roles");
- fassert(17164, resultDoc.root().pushBack(rolesElement));
- mutablebson::Element inheritedRolesElement = resultDoc.makeElementArray("inheritedRoles");
- fassert(17165, resultDoc.root().pushBack(inheritedRolesElement));
- mutablebson::Element privilegesElement = resultDoc.makeElementArray("privileges");
- mutablebson::Element inheritedPrivilegesElement =
- resultDoc.makeElementArray("inheritedPrivileges");
+ addRoleNameObjectsToArrayElement(rolesElement, _roleGraph.getDirectSubordinates(roleName));
+ if (_roleGraphState == roleGraphStateConsistent) {
+ addRoleNameObjectsToArrayElement(inheritedRolesElement,
+ _roleGraph.getIndirectSubordinates(roleName));
if (showPrivileges) {
- fassert(17166, resultDoc.root().pushBack(privilegesElement));
- }
- mutablebson::Element warningsElement = resultDoc.makeElementArray("warnings");
-
- addRoleNameObjectsToArrayElement(rolesElement, _roleGraph.getDirectSubordinates(roleName));
- if (_roleGraphState == roleGraphStateConsistent) {
- addRoleNameObjectsToArrayElement(
- inheritedRolesElement, _roleGraph.getIndirectSubordinates(roleName));
- if (showPrivileges) {
- addPrivilegeObjectsOrWarningsToArrayElement(
- privilegesElement,
- warningsElement,
- _roleGraph.getDirectPrivileges(roleName));
-
- addPrivilegeObjectsOrWarningsToArrayElement(
- inheritedPrivilegesElement,
- warningsElement,
- _roleGraph.getAllPrivileges(roleName));
-
- fassert(17323, resultDoc.root().pushBack(inheritedPrivilegesElement));
- }
- }
- else if (showPrivileges) {
- warningsElement.appendString(
- "", "Role graph state inconsistent; only direct privileges available.");
addPrivilegeObjectsOrWarningsToArrayElement(
- privilegesElement, warningsElement, _roleGraph.getDirectPrivileges(roleName));
- }
- if (warningsElement.hasChildren()) {
- fassert(17167, resultDoc.root().pushBack(warningsElement));
- }
- *result = resultDoc.getObject();
- return Status::OK();
- }
+ privilegesElement, warningsElement, _roleGraph.getDirectPrivileges(roleName));
- Status AuthzManagerExternalStateLocal::getRoleDescriptionsForDB(const std::string dbname,
- bool showPrivileges,
- bool showBuiltinRoles,
- vector<BSONObj>* result) {
- stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+ addPrivilegeObjectsOrWarningsToArrayElement(
+ inheritedPrivilegesElement, warningsElement, _roleGraph.getAllPrivileges(roleName));
- for (RoleNameIterator it = _roleGraph.getRolesForDatabase(dbname);
- it.more(); it.next()) {
- if (!showBuiltinRoles && _roleGraph.isBuiltinRole(it.get())) {
- continue;
- }
- BSONObj roleDoc;
- Status status = _getRoleDescription_inlock(it.get(), showPrivileges, &roleDoc);
- if (!status.isOK()) {
- return status;
- }
- result->push_back(roleDoc);
+ fassert(17323, resultDoc.root().pushBack(inheritedPrivilegesElement));
}
- return Status::OK();
+ } else if (showPrivileges) {
+ warningsElement.appendString(
+ "", "Role graph state inconsistent; only direct privileges available.");
+ addPrivilegeObjectsOrWarningsToArrayElement(
+ privilegesElement, warningsElement, _roleGraph.getDirectPrivileges(roleName));
}
+ if (warningsElement.hasChildren()) {
+ fassert(17167, resultDoc.root().pushBack(warningsElement));
+ }
+ *result = resultDoc.getObject();
+ return Status::OK();
+}
+
+Status AuthzManagerExternalStateLocal::getRoleDescriptionsForDB(const std::string dbname,
+ bool showPrivileges,
+ bool showBuiltinRoles,
+ vector<BSONObj>* result) {
+ stdx::lock_guard<stdx::mutex> lk(_roleGraphMutex);
+
+ for (RoleNameIterator it = _roleGraph.getRolesForDatabase(dbname); it.more(); it.next()) {
+ if (!showBuiltinRoles && _roleGraph.isBuiltinRole(it.get())) {
+ continue;
+ }
+ BSONObj roleDoc;
+ Status status = _getRoleDescription_inlock(it.get(), showPrivileges, &roleDoc);
+ if (!status.isOK()) {
+ return status;
+ }
+ result->push_back(roleDoc);
+ }
+ return Status::OK();
+}
namespace {
- /**
- * Adds the role described in "doc" to "roleGraph". If the role cannot be added, due to
- * some error in "doc", logs a warning.
- */
- void addRoleFromDocumentOrWarn(RoleGraph* roleGraph, const BSONObj& doc) {
- Status status = roleGraph->addRoleFromDocument(doc);
- if (!status.isOK()) {
- warning() << "Skipping invalid admin.system.roles document while calculating privileges"
- " for user-defined roles: " << status << "; document " << doc;
- }
+/**
+ * Adds the role described in "doc" to "roleGraph". If the role cannot be added, due to
+ * some error in "doc", logs a warning.
+ */
+void addRoleFromDocumentOrWarn(RoleGraph* roleGraph, const BSONObj& doc) {
+ Status status = roleGraph->addRoleFromDocument(doc);
+ if (!status.isOK()) {
+ warning() << "Skipping invalid admin.system.roles document while calculating privileges"
+ " for user-defined roles: " << status << "; document " << doc;
}
+}
} // namespace
- Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* txn) {
- stdx::lock_guard<stdx::mutex> lkInitialzeRoleGraph(_roleGraphMutex);
+Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* txn) {
+ stdx::lock_guard<stdx::mutex> lkInitialzeRoleGraph(_roleGraphMutex);
- _roleGraphState = roleGraphStateInitial;
- _roleGraph = RoleGraph();
+ _roleGraphState = roleGraphStateInitial;
+ _roleGraph = RoleGraph();
- RoleGraph newRoleGraph;
- Status status = query(
- txn,
- AuthorizationManager::rolesCollectionNamespace,
- BSONObj(),
- BSONObj(),
- stdx::bind(addRoleFromDocumentOrWarn, &newRoleGraph, stdx::placeholders::_1));
- if (!status.isOK())
- return status;
+ RoleGraph newRoleGraph;
+ Status status =
+ query(txn,
+ AuthorizationManager::rolesCollectionNamespace,
+ BSONObj(),
+ BSONObj(),
+ stdx::bind(addRoleFromDocumentOrWarn, &newRoleGraph, stdx::placeholders::_1));
+ if (!status.isOK())
+ return status;
- status = newRoleGraph.recomputePrivilegeData();
+ status = newRoleGraph.recomputePrivilegeData();
+
+ RoleGraphState newState;
+ if (status == ErrorCodes::GraphContainsCycle) {
+ error() << "Inconsistent role graph during authorization manager initialization. Only "
+ "direct privileges available. " << status.reason();
+ newState = roleGraphStateHasCycle;
+ status = Status::OK();
+ } else if (status.isOK()) {
+ newState = roleGraphStateConsistent;
+ } else {
+ newState = roleGraphStateInitial;
+ }
- RoleGraphState newState;
- if (status == ErrorCodes::GraphContainsCycle) {
- error() << "Inconsistent role graph during authorization manager initialization. Only "
- "direct privileges available. " << status.reason();
- newState = roleGraphStateHasCycle;
- status = Status::OK();
- }
- else if (status.isOK()) {
- newState = roleGraphStateConsistent;
- }
- else {
- newState = roleGraphStateInitial;
+ if (status.isOK()) {
+ _roleGraph.swap(newRoleGraph);
+ _roleGraphState = newState;
+ }
+ return status;
+}
+
+class AuthzManagerExternalStateLocal::AuthzManagerLogOpHandler : public RecoveryUnit::Change {
+public:
+ // None of the parameters below (except externalState) need to live longer than
+ // the instantiations of this class
+ AuthzManagerLogOpHandler(AuthzManagerExternalStateLocal* externalState,
+ const char* op,
+ const char* ns,
+ const BSONObj& o,
+ const BSONObj* o2)
+ : _externalState(externalState),
+ _op(op),
+ _ns(ns),
+ _o(o.getOwned()),
+
+ _isO2Set(o2 ? true : false),
+ _o2(_isO2Set ? o2->getOwned() : BSONObj()) {}
+
+ virtual void commit() {
+ stdx::lock_guard<stdx::mutex> lk(_externalState->_roleGraphMutex);
+ Status status = _externalState->_roleGraph.handleLogOp(
+ _op.c_str(), NamespaceString(_ns.c_str()), _o, _isO2Set ? &_o2 : NULL);
+
+ if (status == ErrorCodes::OplogOperationUnsupported) {
+ _externalState->_roleGraph = RoleGraph();
+ _externalState->_roleGraphState = _externalState->roleGraphStateInitial;
+ BSONObjBuilder oplogEntryBuilder;
+ oplogEntryBuilder << "op" << _op << "ns" << _ns << "o" << _o;
+ if (_isO2Set)
+ oplogEntryBuilder << "o2" << _o2;
+ error() << "Unsupported modification to roles collection in oplog; "
+ "restart this process to reenable user-defined roles; " << status.reason()
+ << "; Oplog entry: " << oplogEntryBuilder.done();
+ } else if (!status.isOK()) {
+ warning() << "Skipping bad update to roles collection in oplog. " << status
+ << " Oplog entry: " << _op;
}
-
- if (status.isOK()) {
- _roleGraph.swap(newRoleGraph);
- _roleGraphState = newState;
+ status = _externalState->_roleGraph.recomputePrivilegeData();
+ if (status == ErrorCodes::GraphContainsCycle) {
+ _externalState->_roleGraphState = _externalState->roleGraphStateHasCycle;
+ error() << "Inconsistent role graph during authorization manager initialization. "
+ "Only direct privileges available. " << status.reason()
+ << " after applying oplog entry " << _op;
+ } else {
+ fassert(17183, status);
+ _externalState->_roleGraphState = _externalState->roleGraphStateConsistent;
}
- return status;
}
- class AuthzManagerExternalStateLocal::AuthzManagerLogOpHandler : public RecoveryUnit::Change {
- public:
+ virtual void rollback() {}
- // None of the parameters below (except externalState) need to live longer than
- // the instantiations of this class
- AuthzManagerLogOpHandler(AuthzManagerExternalStateLocal* externalState,
- const char* op,
- const char* ns,
- const BSONObj& o,
- const BSONObj* o2):
- _externalState(externalState),
- _op(op),
- _ns(ns),
- _o(o.getOwned()),
+private:
+ AuthzManagerExternalStateLocal* _externalState;
+ const std::string _op;
+ const std::string _ns;
+ const BSONObj _o;
- _isO2Set(o2 ? true : false),
- _o2(_isO2Set ? o2->getOwned() : BSONObj()) {
+ const bool _isO2Set;
+ const BSONObj _o2;
+};
- }
-
- virtual void commit() {
- stdx::lock_guard<stdx::mutex> lk(_externalState->_roleGraphMutex);
- Status status = _externalState->_roleGraph.handleLogOp(_op.c_str(),
- NamespaceString(_ns.c_str()),
- _o,
- _isO2Set ? &_o2 : NULL);
-
- if (status == ErrorCodes::OplogOperationUnsupported) {
- _externalState->_roleGraph = RoleGraph();
- _externalState->_roleGraphState = _externalState->roleGraphStateInitial;
- BSONObjBuilder oplogEntryBuilder;
- oplogEntryBuilder << "op" << _op << "ns" << _ns << "o" << _o;
- if (_isO2Set)
- oplogEntryBuilder << "o2" << _o2;
- error() << "Unsupported modification to roles collection in oplog; "
- "restart this process to reenable user-defined roles; " << status.reason() <<
- "; Oplog entry: " << oplogEntryBuilder.done();
- }
- else if (!status.isOK()) {
- warning() << "Skipping bad update to roles collection in oplog. " << status <<
- " Oplog entry: " << _op;
- }
- status = _externalState->_roleGraph.recomputePrivilegeData();
- if (status == ErrorCodes::GraphContainsCycle) {
- _externalState->_roleGraphState = _externalState->roleGraphStateHasCycle;
- error() << "Inconsistent role graph during authorization manager initialization. "
- "Only direct privileges available. " << status.reason() <<
- " after applying oplog entry " << _op;
- }
- else {
- fassert(17183, status);
- _externalState->_roleGraphState = _externalState->roleGraphStateConsistent;
- }
-
- }
-
- virtual void rollback() { }
-
- private:
- AuthzManagerExternalStateLocal* _externalState;
- const std::string _op;
- const std::string _ns;
- const BSONObj _o;
-
- const bool _isO2Set;
- const BSONObj _o2;
- };
-
- void AuthzManagerExternalStateLocal::logOp(
- OperationContext* txn,
- const char* op,
- const char* ns,
- const BSONObj& o,
- BSONObj* o2) {
-
- if (ns == AuthorizationManager::rolesCollectionNamespace.ns() ||
- ns == AuthorizationManager::adminCommandNamespace.ns()) {
-
- txn->recoveryUnit()->registerChange(new AuthzManagerLogOpHandler(this,
- op,
- ns,
- o,
- o2));
- }
+void AuthzManagerExternalStateLocal::logOp(
+ OperationContext* txn, const char* op, const char* ns, const BSONObj& o, BSONObj* o2) {
+ if (ns == AuthorizationManager::rolesCollectionNamespace.ns() ||
+ ns == AuthorizationManager::adminCommandNamespace.ns()) {
+ txn->recoveryUnit()->registerChange(new AuthzManagerLogOpHandler(this, op, ns, o, o2));
}
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.h b/src/mongo/db/auth/authz_manager_external_state_local.h
index f8243aff00e..fe4a90ed1cd 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.h
+++ b/src/mongo/db/auth/authz_manager_external_state_local.h
@@ -40,105 +40,103 @@
namespace mongo {
+/**
+ * Common implementation of AuthzManagerExternalState for systems where role
+ * and user information are stored locally.
+ */
+class AuthzManagerExternalStateLocal : public AuthzManagerExternalState {
+ MONGO_DISALLOW_COPYING(AuthzManagerExternalStateLocal);
+
+public:
+ virtual ~AuthzManagerExternalStateLocal() = default;
+
+ virtual Status initialize(OperationContext* txn);
+
+ virtual Status getStoredAuthorizationVersion(OperationContext* txn, int* outVersion);
+ virtual Status getUserDescription(OperationContext* txn,
+ const UserName& userName,
+ BSONObj* result);
+ virtual Status getRoleDescription(const RoleName& roleName,
+ bool showPrivileges,
+ BSONObj* result);
+ virtual Status getRoleDescriptionsForDB(const std::string dbname,
+ bool showPrivileges,
+ bool showBuiltinRoles,
+ std::vector<BSONObj>* result);
+
+ bool hasAnyPrivilegeDocuments(OperationContext* txn) override;
+
/**
- * Common implementation of AuthzManagerExternalState for systems where role
- * and user information are stored locally.
+ * Finds a document matching "query" in "collectionName", and store a shared-ownership
+ * copy into "result".
+ *
+ * Returns Status::OK() on success. If no match is found, returns
+ * ErrorCodes::NoMatchingDocument. Other errors returned as appropriate.
*/
- class AuthzManagerExternalStateLocal : public AuthzManagerExternalState {
- MONGO_DISALLOW_COPYING(AuthzManagerExternalStateLocal);
-
- public:
- virtual ~AuthzManagerExternalStateLocal() = default;
-
- virtual Status initialize(OperationContext* txn);
-
- virtual Status getStoredAuthorizationVersion(OperationContext* txn, int* outVersion);
- virtual Status getUserDescription(
- OperationContext* txn, const UserName& userName, BSONObj* result);
- virtual Status getRoleDescription(const RoleName& roleName,
- bool showPrivileges,
- BSONObj* result);
- virtual Status getRoleDescriptionsForDB(const std::string dbname,
- bool showPrivileges,
- bool showBuiltinRoles,
- std::vector<BSONObj>* result);
-
- bool hasAnyPrivilegeDocuments(OperationContext* txn) override;
-
- /**
- * Finds a document matching "query" in "collectionName", and store a shared-ownership
- * copy into "result".
- *
- * Returns Status::OK() on success. If no match is found, returns
- * ErrorCodes::NoMatchingDocument. Other errors returned as appropriate.
- */
- virtual Status findOne(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- BSONObj* result) = 0;
-
- /**
- * Finds all documents matching "query" in "collectionName". For each document returned,
- * calls the function resultProcessor on it.
- */
- virtual Status query(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& projection,
- const stdx::function<void(const BSONObj&)>& resultProcessor) = 0;
-
- virtual void logOp(
- OperationContext* txn,
- const char* op,
- const char* ns,
- const BSONObj& o,
- BSONObj* o2);
-
- protected:
- AuthzManagerExternalStateLocal() = default;
-
- /**
- * Fetches the user document for "userName" from local storage, and stores it into "result".
- */
- virtual Status _getUserDocument(OperationContext* txn,
- const UserName& userName,
- BSONObj* result);
- private:
- enum RoleGraphState {
- roleGraphStateInitial = 0,
- roleGraphStateConsistent,
- roleGraphStateHasCycle
- };
-
- /**
- * RecoveryUnit::Change subclass used to commit work for AuthzManager logOp listener.
- */
- class AuthzManagerLogOpHandler;
-
- /**
- * Initializes the role graph from the contents of the admin.system.roles collection.
- */
- Status _initializeRoleGraph(OperationContext* txn);
-
- Status _getRoleDescription_inlock(const RoleName& roleName,
- bool showPrivileges,
- BSONObj* result);
- /**
- * Eventually consistent, in-memory representation of all roles in the system (both
- * user-defined and built-in). Synchronized via _roleGraphMutex.
- */
- RoleGraph _roleGraph;
-
- /**
- * State of _roleGraph, one of "initial", "consistent" and "has cycle". Synchronized via
- * _roleGraphMutex.
- */
- RoleGraphState _roleGraphState = roleGraphStateInitial;
-
- /**
- * Guards _roleGraphState and _roleGraph.
- */
- stdx::mutex _roleGraphMutex;
+ virtual Status findOne(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ BSONObj* result) = 0;
+
+ /**
+ * Finds all documents matching "query" in "collectionName". For each document returned,
+ * calls the function resultProcessor on it.
+ */
+ virtual Status query(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& projection,
+ const stdx::function<void(const BSONObj&)>& resultProcessor) = 0;
+
+ virtual void logOp(
+ OperationContext* txn, const char* op, const char* ns, const BSONObj& o, BSONObj* o2);
+
+protected:
+ AuthzManagerExternalStateLocal() = default;
+
+ /**
+ * Fetches the user document for "userName" from local storage, and stores it into "result".
+ */
+ virtual Status _getUserDocument(OperationContext* txn,
+ const UserName& userName,
+ BSONObj* result);
+
+private:
+ enum RoleGraphState {
+ roleGraphStateInitial = 0,
+ roleGraphStateConsistent,
+ roleGraphStateHasCycle
};
-} // namespace mongo
+ /**
+ * RecoveryUnit::Change subclass used to commit work for AuthzManager logOp listener.
+ */
+ class AuthzManagerLogOpHandler;
+
+ /**
+ * Initializes the role graph from the contents of the admin.system.roles collection.
+ */
+ Status _initializeRoleGraph(OperationContext* txn);
+
+ Status _getRoleDescription_inlock(const RoleName& roleName,
+ bool showPrivileges,
+ BSONObj* result);
+ /**
+ * Eventually consistent, in-memory representation of all roles in the system (both
+ * user-defined and built-in). Synchronized via _roleGraphMutex.
+ */
+ RoleGraph _roleGraph;
+
+ /**
+ * State of _roleGraph, one of "initial", "consistent" and "has cycle". Synchronized via
+ * _roleGraphMutex.
+ */
+ RoleGraphState _roleGraphState = roleGraphStateInitial;
+
+ /**
+ * Guards _roleGraphState and _roleGraph.
+ */
+ stdx::mutex _roleGraphMutex;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index dc15b46f58d..6dc24b6a8d6 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -48,282 +48,249 @@
namespace mongo {
namespace {
- void addRoleNameToObjectElement(mutablebson::Element object, const RoleName& role) {
- fassert(17175, object.appendString(AuthorizationManager::ROLE_NAME_FIELD_NAME, role.getRole()));
- fassert(17176, object.appendString(AuthorizationManager::ROLE_DB_FIELD_NAME, role.getDB()));
+void addRoleNameToObjectElement(mutablebson::Element object, const RoleName& role) {
+ fassert(17175, object.appendString(AuthorizationManager::ROLE_NAME_FIELD_NAME, role.getRole()));
+ fassert(17176, object.appendString(AuthorizationManager::ROLE_DB_FIELD_NAME, role.getDB()));
+}
+
+void addRoleNameObjectsToArrayElement(mutablebson::Element array, RoleNameIterator roles) {
+ for (; roles.more(); roles.next()) {
+ mutablebson::Element roleElement = array.getDocument().makeElementObject("");
+ addRoleNameToObjectElement(roleElement, roles.get());
+ fassert(17177, array.pushBack(roleElement));
}
-
- void addRoleNameObjectsToArrayElement(mutablebson::Element array, RoleNameIterator roles) {
- for (; roles.more(); roles.next()) {
- mutablebson::Element roleElement = array.getDocument().makeElementObject("");
- addRoleNameToObjectElement(roleElement, roles.get());
- fassert(17177, array.pushBack(roleElement));
- }
- }
-
- void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privilegesElement,
- mutablebson::Element warningsElement,
- const PrivilegeVector& privileges) {
- std::string errmsg;
- for (size_t i = 0; i < privileges.size(); ++i) {
- ParsedPrivilege pp;
- if (ParsedPrivilege::privilegeToParsedPrivilege(privileges[i], &pp, &errmsg)) {
- fassert(17178, privilegesElement.appendObject("", pp.toBSON()));
- } else {
- fassert(17179,
- warningsElement.appendString(
+}
+
+void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privilegesElement,
+ mutablebson::Element warningsElement,
+ const PrivilegeVector& privileges) {
+ std::string errmsg;
+ for (size_t i = 0; i < privileges.size(); ++i) {
+ ParsedPrivilege pp;
+ if (ParsedPrivilege::privilegeToParsedPrivilege(privileges[i], &pp, &errmsg)) {
+ fassert(17178, privilegesElement.appendObject("", pp.toBSON()));
+ } else {
+ fassert(17179,
+ warningsElement.appendString(
"",
- std::string(mongoutils::str::stream() <<
- "Skipped privileges on resource " <<
- privileges[i].getResourcePattern().toString() <<
- ". Reason: " << errmsg)));
- }
+ std::string(mongoutils::str::stream()
+ << "Skipped privileges on resource "
+ << privileges[i].getResourcePattern().toString()
+ << ". Reason: " << errmsg)));
}
}
+}
} // namespace
- AuthzManagerExternalStateMock::AuthzManagerExternalStateMock() : _authzManager(NULL) {}
- AuthzManagerExternalStateMock::~AuthzManagerExternalStateMock() {}
-
- void AuthzManagerExternalStateMock::setAuthorizationManager(
- AuthorizationManager* authzManager) {
- _authzManager = authzManager;
+AuthzManagerExternalStateMock::AuthzManagerExternalStateMock() : _authzManager(NULL) {}
+AuthzManagerExternalStateMock::~AuthzManagerExternalStateMock() {}
+
+void AuthzManagerExternalStateMock::setAuthorizationManager(AuthorizationManager* authzManager) {
+ _authzManager = authzManager;
+}
+
+void AuthzManagerExternalStateMock::setAuthzVersion(int version) {
+ OperationContextNoop opCtx;
+ uassertStatusOK(
+ updateOne(&opCtx,
+ AuthorizationManager::versionCollectionNamespace,
+ AuthorizationManager::versionDocumentQuery,
+ BSON("$set" << BSON(AuthorizationManager::schemaVersionFieldName << version)),
+ true,
+ BSONObj()));
+}
+
+std::unique_ptr<AuthzSessionExternalState>
+AuthzManagerExternalStateMock::makeAuthzSessionExternalState(AuthorizationManager* authzManager) {
+ return stdx::make_unique<AuthzSessionExternalStateMock>(authzManager);
+}
+
+Status AuthzManagerExternalStateMock::findOne(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ BSONObj* result) {
+ BSONObjCollection::iterator iter;
+ Status status = _findOneIter(collectionName, query, &iter);
+ if (!status.isOK())
+ return status;
+ *result = iter->copy();
+ return Status::OK();
+}
+
+Status AuthzManagerExternalStateMock::query(
+ OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj&,
+ const stdx::function<void(const BSONObj&)>& resultProcessor) {
+ std::vector<BSONObjCollection::iterator> iterVector;
+ Status status = _queryVector(collectionName, query, &iterVector);
+ if (!status.isOK()) {
+ return status;
}
-
- void AuthzManagerExternalStateMock::setAuthzVersion(int version) {
- OperationContextNoop opCtx;
- uassertStatusOK(
- updateOne(&opCtx,
- AuthorizationManager::versionCollectionNamespace,
- AuthorizationManager::versionDocumentQuery,
- BSON("$set" << BSON(AuthorizationManager::schemaVersionFieldName <<
- version)),
- true,
- BSONObj()));
+ try {
+ for (std::vector<BSONObjCollection::iterator>::iterator it = iterVector.begin();
+ it != iterVector.end();
+ ++it) {
+ resultProcessor(**it);
+ }
+ } catch (const DBException& ex) {
+ status = ex.toStatus();
}
-
- std::unique_ptr<AuthzSessionExternalState>
- AuthzManagerExternalStateMock::makeAuthzSessionExternalState(
- AuthorizationManager* authzManager) {
-
- return stdx::make_unique<AuthzSessionExternalStateMock>(authzManager);
+ return status;
+}
+
+Status AuthzManagerExternalStateMock::insert(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& document,
+ const BSONObj&) {
+ BSONObj toInsert;
+ if (document["_id"].eoo()) {
+ BSONObjBuilder docWithIdBuilder;
+ docWithIdBuilder.append("_id", OID::gen());
+ docWithIdBuilder.appendElements(document);
+ toInsert = docWithIdBuilder.obj();
+ } else {
+ toInsert = document.copy();
}
+ _documents[collectionName].push_back(toInsert);
- Status AuthzManagerExternalStateMock::findOne(
- OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- BSONObj* result) {
- BSONObjCollection::iterator iter;
- Status status = _findOneIter(collectionName, query, &iter);
- if (!status.isOK())
- return status;
- *result = iter->copy();
- return Status::OK();
+ if (_authzManager) {
+ _authzManager->logOp(txn, "i", collectionName.ns().c_str(), toInsert, NULL);
}
- Status AuthzManagerExternalStateMock::query(
- OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj&,
- const stdx::function<void(const BSONObj&)>& resultProcessor) {
- std::vector<BSONObjCollection::iterator> iterVector;
- Status status = _queryVector(collectionName, query, &iterVector);
- if (!status.isOK()) {
- return status;
- }
- try {
- for (std::vector<BSONObjCollection::iterator>::iterator it = iterVector.begin();
- it != iterVector.end(); ++it) {
- resultProcessor(**it);
- }
- }
- catch (const DBException& ex) {
- status = ex.toStatus();
- }
+ return Status::OK();
+}
+
+Status AuthzManagerExternalStateMock::insertPrivilegeDocument(OperationContext* txn,
+ const BSONObj& userObj,
+ const BSONObj& writeConcern) {
+ return insert(txn, AuthorizationManager::usersCollectionNamespace, userObj, writeConcern);
+}
+
+Status AuthzManagerExternalStateMock::updateOne(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& updatePattern,
+ bool upsert,
+ const BSONObj& writeConcern) {
+ namespace mmb = mutablebson;
+ UpdateDriver::Options updateOptions;
+ UpdateDriver driver(updateOptions);
+ Status status = driver.parse(updatePattern);
+ if (!status.isOK())
return status;
- }
- Status AuthzManagerExternalStateMock::insert(
- OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& document,
- const BSONObj&) {
- BSONObj toInsert;
- if (document["_id"].eoo()) {
- BSONObjBuilder docWithIdBuilder;
- docWithIdBuilder.append("_id", OID::gen());
- docWithIdBuilder.appendElements(document);
- toInsert = docWithIdBuilder.obj();
- }
- else {
- toInsert = document.copy();
- }
- _documents[collectionName].push_back(toInsert);
+ BSONObjCollection::iterator iter;
+ status = _findOneIter(collectionName, query, &iter);
+ mmb::Document document;
+ if (status.isOK()) {
+ document.reset(*iter, mmb::Document::kInPlaceDisabled);
+ BSONObj logObj;
+ status = driver.update(StringData(), &document, &logObj);
+ if (!status.isOK())
+ return status;
+ BSONObj newObj = document.getObject().copy();
+ *iter = newObj;
+ BSONObj idQuery = driver.makeOplogEntryQuery(newObj, false);
if (_authzManager) {
- _authzManager->logOp(
- txn,
- "i",
- collectionName.ns().c_str(),
- toInsert,
- NULL);
+ _authzManager->logOp(txn, "u", collectionName.ns().c_str(), logObj, &idQuery);
}
return Status::OK();
- }
-
- Status AuthzManagerExternalStateMock::insertPrivilegeDocument(
- OperationContext* txn,
- const BSONObj& userObj,
- const BSONObj& writeConcern) {
- return insert(txn, AuthorizationManager::usersCollectionNamespace, userObj, writeConcern);
- }
-
- Status AuthzManagerExternalStateMock::updateOne(
- OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& updatePattern,
- bool upsert,
- const BSONObj& writeConcern) {
-
- namespace mmb = mutablebson;
- UpdateDriver::Options updateOptions;
- UpdateDriver driver(updateOptions);
- Status status = driver.parse(updatePattern);
- if (!status.isOK())
- return status;
-
- BSONObjCollection::iterator iter;
- status = _findOneIter(collectionName, query, &iter);
- mmb::Document document;
- if (status.isOK()) {
- document.reset(*iter, mmb::Document::kInPlaceDisabled);
- BSONObj logObj;
- status = driver.update(StringData(), &document, &logObj);
- if (!status.isOK())
- return status;
- BSONObj newObj = document.getObject().copy();
- *iter = newObj;
- BSONObj idQuery = driver.makeOplogEntryQuery(newObj, false);
-
- if (_authzManager) {
- _authzManager->logOp(
- txn,
- "u",
- collectionName.ns().c_str(),
- logObj,
- &idQuery);
- }
-
- return Status::OK();
+ } else if (status == ErrorCodes::NoMatchingDocument && upsert) {
+ if (query.hasField("_id")) {
+ document.root().appendElement(query["_id"]);
}
- else if (status == ErrorCodes::NoMatchingDocument && upsert) {
- if (query.hasField("_id")) {
- document.root().appendElement(query["_id"]);
- }
- status = driver.populateDocumentWithQueryFields(query, NULL, document);
- if (!status.isOK()) {
- return status;
- }
- status = driver.update(StringData(), &document);
- if (!status.isOK()) {
- return status;
- }
- return insert(txn, collectionName, document.getObject(), writeConcern);
+ status = driver.populateDocumentWithQueryFields(query, NULL, document);
+ if (!status.isOK()) {
+ return status;
}
- else {
+ status = driver.update(StringData(), &document);
+ if (!status.isOK()) {
return status;
}
+ return insert(txn, collectionName, document.getObject(), writeConcern);
+ } else {
+ return status;
}
+}
+
+Status AuthzManagerExternalStateMock::update(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& updatePattern,
+ bool upsert,
+ bool multi,
+ const BSONObj& writeConcern,
+ int* nMatched) {
+ return Status(ErrorCodes::InternalError,
+ "AuthzManagerExternalStateMock::update not implemented in mock.");
+}
+
+Status AuthzManagerExternalStateMock::remove(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj&,
+ int* numRemoved) {
+ int n = 0;
+ BSONObjCollection::iterator iter;
+ while (_findOneIter(collectionName, query, &iter).isOK()) {
+ BSONObj idQuery = (*iter)["_id"].wrap();
+ _documents[collectionName].erase(iter);
+ ++n;
- Status AuthzManagerExternalStateMock::update(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& updatePattern,
- bool upsert,
- bool multi,
- const BSONObj& writeConcern,
- int* nMatched) {
- return Status(ErrorCodes::InternalError,
- "AuthzManagerExternalStateMock::update not implemented in mock.");
- }
-
- Status AuthzManagerExternalStateMock::remove(
- OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj&,
- int* numRemoved) {
- int n = 0;
- BSONObjCollection::iterator iter;
- while (_findOneIter(collectionName, query, &iter).isOK()) {
- BSONObj idQuery = (*iter)["_id"].wrap();
- _documents[collectionName].erase(iter);
- ++n;
-
- if (_authzManager) {
- _authzManager->logOp(
- txn,
- "d",
- collectionName.ns().c_str(),
- idQuery,
- NULL);
- }
-
+ if (_authzManager) {
+ _authzManager->logOp(txn, "d", collectionName.ns().c_str(), idQuery, NULL);
}
- *numRemoved = n;
- return Status::OK();
}
-
- std::vector<BSONObj> AuthzManagerExternalStateMock::getCollectionContents(
- const NamespaceString& collectionName) {
- return mapFindWithDefault(_documents, collectionName, std::vector<BSONObj>());
+ *numRemoved = n;
+ return Status::OK();
+}
+
+std::vector<BSONObj> AuthzManagerExternalStateMock::getCollectionContents(
+ const NamespaceString& collectionName) {
+ return mapFindWithDefault(_documents, collectionName, std::vector<BSONObj>());
+}
+
+Status AuthzManagerExternalStateMock::_findOneIter(const NamespaceString& collectionName,
+ const BSONObj& query,
+ BSONObjCollection::iterator* result) {
+ std::vector<BSONObjCollection::iterator> iterVector;
+ Status status = _queryVector(collectionName, query, &iterVector);
+ if (!status.isOK()) {
+ return status;
}
-
- Status AuthzManagerExternalStateMock::_findOneIter(
- const NamespaceString& collectionName,
- const BSONObj& query,
- BSONObjCollection::iterator* result) {
- std::vector<BSONObjCollection::iterator> iterVector;
- Status status = _queryVector(collectionName, query, &iterVector);
- if (!status.isOK()) {
- return status;
- }
- if (!iterVector.size()) {
- return Status(ErrorCodes::NoMatchingDocument, "No matching document");
- }
- *result = iterVector.front();
- return Status::OK();
+ if (!iterVector.size()) {
+ return Status(ErrorCodes::NoMatchingDocument, "No matching document");
}
+ *result = iterVector.front();
+ return Status::OK();
+}
+
+Status AuthzManagerExternalStateMock::_queryVector(
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ std::vector<BSONObjCollection::iterator>* result) {
+ StatusWithMatchExpression parseResult =
+ MatchExpressionParser::parse(query, MatchExpressionParser::WhereCallback());
+ if (!parseResult.isOK()) {
+ return parseResult.getStatus();
+ }
+ const std::unique_ptr<MatchExpression> matcher(parseResult.getValue());
- Status AuthzManagerExternalStateMock::_queryVector(
- const NamespaceString& collectionName,
- const BSONObj& query,
- std::vector<BSONObjCollection::iterator>* result) {
-
- StatusWithMatchExpression parseResult =
- MatchExpressionParser::parse(query, MatchExpressionParser::WhereCallback());
- if (!parseResult.isOK()) {
- return parseResult.getStatus();
- }
- const std::unique_ptr<MatchExpression> matcher(parseResult.getValue());
-
- NamespaceDocumentMap::iterator mapIt = _documents.find(collectionName);
- if (mapIt == _documents.end())
- return Status::OK();
-
- for (BSONObjCollection::iterator vecIt = mapIt->second.begin();
- vecIt != mapIt->second.end();
- ++vecIt) {
+ NamespaceDocumentMap::iterator mapIt = _documents.find(collectionName);
+ if (mapIt == _documents.end())
+ return Status::OK();
- if (matcher->matchesBSON(*vecIt)) {
- result->push_back(vecIt);
- }
+ for (BSONObjCollection::iterator vecIt = mapIt->second.begin(); vecIt != mapIt->second.end();
+ ++vecIt) {
+ if (matcher->matchesBSON(*vecIt)) {
+ result->push_back(vecIt);
}
- return Status::OK();
}
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.h b/src/mongo/db/auth/authz_manager_external_state_mock.h
index 43f3abb2546..d6b457e0de9 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.h
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.h
@@ -42,88 +42,87 @@
namespace mongo {
- class AuthorizationManager;
+class AuthorizationManager;
- /**
- * Mock of the AuthzManagerExternalState class used only for testing.
- */
- class AuthzManagerExternalStateMock : public AuthzManagerExternalStateLocal {
- MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMock);
+/**
+ * Mock of the AuthzManagerExternalState class used only for testing.
+ */
+class AuthzManagerExternalStateMock : public AuthzManagerExternalStateLocal {
+ MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMock);
- public:
+public:
+ AuthzManagerExternalStateMock();
+ virtual ~AuthzManagerExternalStateMock();
- AuthzManagerExternalStateMock();
- virtual ~AuthzManagerExternalStateMock();
+ void setAuthorizationManager(AuthorizationManager* authzManager);
+ void setAuthzVersion(int version);
- void setAuthorizationManager(AuthorizationManager* authzManager);
- void setAuthzVersion(int version);
+ std::unique_ptr<AuthzSessionExternalState> makeAuthzSessionExternalState(
+ AuthorizationManager* authzManager) override;
- std::unique_ptr<AuthzSessionExternalState> makeAuthzSessionExternalState(
- AuthorizationManager* authzManager) override;
+ virtual Status findOne(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ BSONObj* result);
- virtual Status findOne(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- BSONObj* result);
+ virtual Status query(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& projection, // Currently unused in mock
+ const stdx::function<void(const BSONObj&)>& resultProcessor);
- virtual Status query(OperationContext* txn,
+ /**
+ * Inserts the given user object into the "admin" database.
+ */
+ Status insertPrivilegeDocument(OperationContext* txn,
+ const BSONObj& userObj,
+ const BSONObj& writeConcern);
+
+ // This implementation does not understand uniqueness constraints.
+ virtual Status insert(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& document,
+ const BSONObj& writeConcern);
+
+ // This implementation does not understand uniqueness constraints, ignores writeConcern,
+ // and only correctly handles some upsert behaviors.
+ virtual Status updateOne(OperationContext* txn,
const NamespaceString& collectionName,
const BSONObj& query,
- const BSONObj& projection, // Currently unused in mock
- const stdx::function<void(const BSONObj&)>& resultProcessor);
-
- /**
- * Inserts the given user object into the "admin" database.
- */
- Status insertPrivilegeDocument(OperationContext* txn,
- const BSONObj& userObj,
- const BSONObj& writeConcern);
-
- // This implementation does not understand uniqueness constraints.
- virtual Status insert(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& document,
- const BSONObj& writeConcern);
-
- // This implementation does not understand uniqueness constraints, ignores writeConcern,
- // and only correctly handles some upsert behaviors.
- virtual Status updateOne(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& updatePattern,
- bool upsert,
- const BSONObj& writeConcern);
- virtual Status update(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& updatePattern,
- bool upsert,
- bool multi,
- const BSONObj& writeConcern,
- int* nMatched);
- virtual Status remove(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& writeConcern,
- int* numRemoved);
-
- std::vector<BSONObj> getCollectionContents(const NamespaceString& collectionName);
-
- private:
- typedef std::vector<BSONObj> BSONObjCollection;
- typedef std::map<NamespaceString, BSONObjCollection> NamespaceDocumentMap;
-
- Status _findOneIter(const NamespaceString& collectionName,
- const BSONObj& query,
- BSONObjCollection::iterator* result);
-
- Status _queryVector(const NamespaceString& collectionName,
- const BSONObj& query,
- std::vector<BSONObjCollection::iterator>* result);
-
-
- AuthorizationManager* _authzManager; // For reporting logOps.
- NamespaceDocumentMap _documents; // Mock database.
- };
-
-} // namespace mongo
+ const BSONObj& updatePattern,
+ bool upsert,
+ const BSONObj& writeConcern);
+ virtual Status update(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& updatePattern,
+ bool upsert,
+ bool multi,
+ const BSONObj& writeConcern,
+ int* nMatched);
+ virtual Status remove(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& writeConcern,
+ int* numRemoved);
+
+ std::vector<BSONObj> getCollectionContents(const NamespaceString& collectionName);
+
+private:
+ typedef std::vector<BSONObj> BSONObjCollection;
+ typedef std::map<NamespaceString, BSONObjCollection> NamespaceDocumentMap;
+
+ Status _findOneIter(const NamespaceString& collectionName,
+ const BSONObj& query,
+ BSONObjCollection::iterator* result);
+
+ Status _queryVector(const NamespaceString& collectionName,
+ const BSONObj& query,
+ std::vector<BSONObjCollection::iterator>* result);
+
+
+ AuthorizationManager* _authzManager; // For reporting logOps.
+ NamespaceDocumentMap _documents; // Mock database.
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.cpp b/src/mongo/db/auth/authz_manager_external_state_s.cpp
index 11d0bf3720f..91ca85ee3ef 100644
--- a/src/mongo/db/auth/authz_manager_external_state_s.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_s.cpp
@@ -49,153 +49,139 @@
namespace mongo {
- AuthzManagerExternalStateMongos::AuthzManagerExternalStateMongos() = default;
-
- AuthzManagerExternalStateMongos::~AuthzManagerExternalStateMongos() = default;
-
- Status AuthzManagerExternalStateMongos::initialize(OperationContext* txn) {
- return Status::OK();
+AuthzManagerExternalStateMongos::AuthzManagerExternalStateMongos() = default;
+
+AuthzManagerExternalStateMongos::~AuthzManagerExternalStateMongos() = default;
+
+Status AuthzManagerExternalStateMongos::initialize(OperationContext* txn) {
+ return Status::OK();
+}
+
+std::unique_ptr<AuthzSessionExternalState>
+AuthzManagerExternalStateMongos::makeAuthzSessionExternalState(AuthorizationManager* authzManager) {
+ return stdx::make_unique<AuthzSessionExternalStateMongos>(authzManager);
+}
+
+Status AuthzManagerExternalStateMongos::getStoredAuthorizationVersion(OperationContext* txn,
+ int* outVersion) {
+ // Note: we are treating
+ // { 'getParameter' : 1, <authSchemaVersionServerParameter> : 1 }
+ // as a user management command since this is the *only* part of mongos
+ // that runs this command
+ BSONObj getParameterCmd = BSON("getParameter" << 1 << authSchemaVersionServerParameter << 1);
+ BSONObjBuilder builder;
+ const bool ok =
+ grid.catalogManager()->runUserManagementReadCommand("admin", getParameterCmd, &builder);
+ BSONObj cmdResult = builder.obj();
+ if (!ok) {
+ return Command::getStatusFromCommandResult(cmdResult);
}
- std::unique_ptr<AuthzSessionExternalState>
- AuthzManagerExternalStateMongos::makeAuthzSessionExternalState(
- AuthorizationManager* authzManager) {
-
- return stdx::make_unique<AuthzSessionExternalStateMongos>(authzManager);
+ BSONElement versionElement = cmdResult[authSchemaVersionServerParameter];
+ if (versionElement.eoo()) {
+ return Status(ErrorCodes::UnknownError, "getParameter misbehaved.");
}
-
- Status AuthzManagerExternalStateMongos::getStoredAuthorizationVersion(
- OperationContext* txn, int* outVersion) {
- // Note: we are treating
- // { 'getParameter' : 1, <authSchemaVersionServerParameter> : 1 }
- // as a user management command since this is the *only* part of mongos
- // that runs this command
- BSONObj getParameterCmd = BSON("getParameter" << 1 <<
- authSchemaVersionServerParameter << 1);
- BSONObjBuilder builder;
- const bool ok = grid.catalogManager()->runUserManagementReadCommand("admin",
- getParameterCmd,
- &builder);
- BSONObj cmdResult = builder.obj();
- if (!ok) {
- return Command::getStatusFromCommandResult(cmdResult);
- }
-
- BSONElement versionElement = cmdResult[authSchemaVersionServerParameter];
- if (versionElement.eoo()) {
- return Status(ErrorCodes::UnknownError, "getParameter misbehaved.");
- }
- *outVersion = versionElement.numberInt();
-
- return Status::OK();
+ *outVersion = versionElement.numberInt();
+
+ return Status::OK();
+}
+
+Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* txn,
+ const UserName& userName,
+ BSONObj* result) {
+ BSONObj usersInfoCmd =
+ BSON("usersInfo" << BSON_ARRAY(BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << userName.getUser()
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getDB())) << "showPrivileges" << true
+ << "showCredentials" << true);
+ BSONObjBuilder builder;
+ const bool ok =
+ grid.catalogManager()->runUserManagementReadCommand("admin", usersInfoCmd, &builder);
+ BSONObj cmdResult = builder.obj();
+ if (!ok) {
+ return Command::getStatusFromCommandResult(cmdResult);
}
- Status AuthzManagerExternalStateMongos::getUserDescription(
- OperationContext* txn, const UserName& userName, BSONObj* result) {
- BSONObj usersInfoCmd = BSON("usersInfo" <<
- BSON_ARRAY(BSON(AuthorizationManager::USER_NAME_FIELD_NAME <<
- userName.getUser() <<
- AuthorizationManager::USER_DB_FIELD_NAME <<
- userName.getDB())) <<
- "showPrivileges" << true <<
- "showCredentials" << true);
- BSONObjBuilder builder;
- const bool ok = grid.catalogManager()->runUserManagementReadCommand("admin",
- usersInfoCmd,
- &builder);
- BSONObj cmdResult = builder.obj();
- if (!ok) {
- return Command::getStatusFromCommandResult(cmdResult);
- }
-
- std::vector<BSONElement> foundUsers = cmdResult["users"].Array();
- if (foundUsers.size() == 0) {
- return Status(ErrorCodes::UserNotFound,
- "User \"" + userName.toString() + "\" not found");
- }
-
- if (foundUsers.size() > 1) {
- return Status(ErrorCodes::UserDataInconsistent,
- str::stream() << "Found multiple users on the \""
- << userName.getDB() << "\" database with name \""
- << userName.getUser() << "\"");
- }
- *result = foundUsers[0].Obj().getOwned();
- return Status::OK();
+ std::vector<BSONElement> foundUsers = cmdResult["users"].Array();
+ if (foundUsers.size() == 0) {
+ return Status(ErrorCodes::UserNotFound, "User \"" + userName.toString() + "\" not found");
}
- Status AuthzManagerExternalStateMongos::getRoleDescription(const RoleName& roleName,
- bool showPrivileges,
- BSONObj* result) {
- BSONObj rolesInfoCmd = BSON("rolesInfo" <<
- BSON_ARRAY(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME <<
- roleName.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME <<
- roleName.getDB())) <<
- "showPrivileges" << showPrivileges);
- BSONObjBuilder builder;
- const bool ok = grid.catalogManager()->runUserManagementReadCommand("admin",
- rolesInfoCmd,
- &builder);
- BSONObj cmdResult = builder.obj();
- if (!ok) {
- return Command::getStatusFromCommandResult(cmdResult);
- }
-
- std::vector<BSONElement> foundRoles = cmdResult["roles"].Array();
- if (foundRoles.size() == 0) {
- return Status(ErrorCodes::RoleNotFound,
- "Role \"" + roleName.toString() + "\" not found");
- }
-
- if (foundRoles.size() > 1) {
- return Status(ErrorCodes::RoleDataInconsistent,
- str::stream() << "Found multiple roles on the \""
- << roleName.getDB() << "\" database with name \""
- << roleName.getRole() << "\"");
- }
- *result = foundRoles[0].Obj().getOwned();
- return Status::OK();
+ if (foundUsers.size() > 1) {
+ return Status(ErrorCodes::UserDataInconsistent,
+ str::stream() << "Found multiple users on the \"" << userName.getDB()
+ << "\" database with name \"" << userName.getUser() << "\"");
+ }
+ *result = foundUsers[0].Obj().getOwned();
+ return Status::OK();
+}
+
+Status AuthzManagerExternalStateMongos::getRoleDescription(const RoleName& roleName,
+ bool showPrivileges,
+ BSONObj* result) {
+ BSONObj rolesInfoCmd =
+ BSON("rolesInfo" << BSON_ARRAY(BSON(
+ AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB())) << "showPrivileges" << showPrivileges);
+ BSONObjBuilder builder;
+ const bool ok =
+ grid.catalogManager()->runUserManagementReadCommand("admin", rolesInfoCmd, &builder);
+ BSONObj cmdResult = builder.obj();
+ if (!ok) {
+ return Command::getStatusFromCommandResult(cmdResult);
}
- Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(const std::string dbname,
- bool showPrivileges,
- bool showBuiltinRoles,
- std::vector<BSONObj>* result) {
- BSONObj rolesInfoCmd = BSON("rolesInfo" << 1 <<
- "showPrivileges" << showPrivileges <<
- "showBuiltinRoles" << showBuiltinRoles);
- BSONObjBuilder builder;
- const bool ok = grid.catalogManager()->runUserManagementReadCommand(dbname,
- rolesInfoCmd,
- &builder);
- BSONObj cmdResult = builder.obj();
- if (!ok) {
- return Command::getStatusFromCommandResult(cmdResult);
- }
- for (BSONObjIterator it(cmdResult["roles"].Obj()); it.more(); it.next()) {
- result->push_back((*it).Obj().getOwned());
- }
- return Status::OK();
+ std::vector<BSONElement> foundRoles = cmdResult["roles"].Array();
+ if (foundRoles.size() == 0) {
+ return Status(ErrorCodes::RoleNotFound, "Role \"" + roleName.toString() + "\" not found");
}
- bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext* txn) {
- BSONObj usersInfoCmd = BSON("usersInfo" << 1);
- BSONObjBuilder builder;
- const bool ok = grid.catalogManager()->runUserManagementReadCommand("admin",
- usersInfoCmd,
- &builder);
- if (!ok) {
- // If we were unable to complete the query,
- // it's best to assume that there _are_ privilege documents. This might happen
- // if the node contaning the users collection becomes transiently unavailable.
- // See SERVER-12616, for example.
- return true;
- }
-
- BSONObj cmdResult = builder.obj();
- std::vector<BSONElement> foundUsers = cmdResult["users"].Array();
- return foundUsers.size() > 0;
+ if (foundRoles.size() > 1) {
+ return Status(ErrorCodes::RoleDataInconsistent,
+ str::stream() << "Found multiple roles on the \"" << roleName.getDB()
+ << "\" database with name \"" << roleName.getRole() << "\"");
}
+ *result = foundRoles[0].Obj().getOwned();
+ return Status::OK();
+}
+
+Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(const std::string dbname,
+ bool showPrivileges,
+ bool showBuiltinRoles,
+ std::vector<BSONObj>* result) {
+ BSONObj rolesInfoCmd = BSON("rolesInfo" << 1 << "showPrivileges" << showPrivileges
+ << "showBuiltinRoles" << showBuiltinRoles);
+ BSONObjBuilder builder;
+ const bool ok =
+ grid.catalogManager()->runUserManagementReadCommand(dbname, rolesInfoCmd, &builder);
+ BSONObj cmdResult = builder.obj();
+ if (!ok) {
+ return Command::getStatusFromCommandResult(cmdResult);
+ }
+ for (BSONObjIterator it(cmdResult["roles"].Obj()); it.more(); it.next()) {
+ result->push_back((*it).Obj().getOwned());
+ }
+ return Status::OK();
+}
+
+bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext* txn) {
+ BSONObj usersInfoCmd = BSON("usersInfo" << 1);
+ BSONObjBuilder builder;
+ const bool ok =
+ grid.catalogManager()->runUserManagementReadCommand("admin", usersInfoCmd, &builder);
+ if (!ok) {
+ // If we were unable to complete the query,
+ // it's best to assume that there _are_ privilege documents. This might happen
+ // if the node contaning the users collection becomes transiently unavailable.
+ // See SERVER-12616, for example.
+ return true;
+ }
+
+ BSONObj cmdResult = builder.obj();
+ std::vector<BSONElement> foundUsers = cmdResult["users"].Array();
+ return foundUsers.size() > 0;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.h b/src/mongo/db/auth/authz_manager_external_state_s.h
index 047ff018006..8de98a53552 100644
--- a/src/mongo/db/auth/authz_manager_external_state_s.h
+++ b/src/mongo/db/auth/authz_manager_external_state_s.h
@@ -40,31 +40,32 @@
namespace mongo {
- /**
- * The implementation of AuthzManagerExternalState functionality for mongos.
- */
- class AuthzManagerExternalStateMongos : public AuthzManagerExternalState{
- MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMongos);
+/**
+ * The implementation of AuthzManagerExternalState functionality for mongos.
+ */
+class AuthzManagerExternalStateMongos : public AuthzManagerExternalState {
+ MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMongos);
- public:
- AuthzManagerExternalStateMongos();
- virtual ~AuthzManagerExternalStateMongos();
+public:
+ AuthzManagerExternalStateMongos();
+ virtual ~AuthzManagerExternalStateMongos();
- virtual Status initialize(OperationContext* txn);
- std::unique_ptr<AuthzSessionExternalState> makeAuthzSessionExternalState(
- AuthorizationManager* authzManager) override;
- virtual Status getStoredAuthorizationVersion(OperationContext* txn, int* outVersion);
- virtual Status getUserDescription(
- OperationContext* txn, const UserName& userName, BSONObj* result);
- virtual Status getRoleDescription(const RoleName& roleName,
- bool showPrivileges,
- BSONObj* result);
- virtual Status getRoleDescriptionsForDB(const std::string dbname,
- bool showPrivileges,
- bool showBuiltinRoles,
- std::vector<BSONObj>* result);
+ virtual Status initialize(OperationContext* txn);
+ std::unique_ptr<AuthzSessionExternalState> makeAuthzSessionExternalState(
+ AuthorizationManager* authzManager) override;
+ virtual Status getStoredAuthorizationVersion(OperationContext* txn, int* outVersion);
+ virtual Status getUserDescription(OperationContext* txn,
+ const UserName& userName,
+ BSONObj* result);
+ virtual Status getRoleDescription(const RoleName& roleName,
+ bool showPrivileges,
+ BSONObj* result);
+ virtual Status getRoleDescriptionsForDB(const std::string dbname,
+ bool showPrivileges,
+ bool showBuiltinRoles,
+ std::vector<BSONObj>* result);
- bool hasAnyPrivilegeDocuments(OperationContext* txn) override;
- };
+ bool hasAnyPrivilegeDocuments(OperationContext* txn) override;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state.cpp b/src/mongo/db/auth/authz_session_external_state.cpp
index 2a7db8c1ff1..c0dceb9617b 100644
--- a/src/mongo/db/auth/authz_session_external_state.cpp
+++ b/src/mongo/db/auth/authz_session_external_state.cpp
@@ -35,12 +35,12 @@
namespace mongo {
- AuthzSessionExternalState::AuthzSessionExternalState(AuthorizationManager* authzManager) :
- _authzManager(authzManager) {}
- AuthzSessionExternalState::~AuthzSessionExternalState() {}
+AuthzSessionExternalState::AuthzSessionExternalState(AuthorizationManager* authzManager)
+ : _authzManager(authzManager) {}
+AuthzSessionExternalState::~AuthzSessionExternalState() {}
- AuthorizationManager& AuthzSessionExternalState::getAuthorizationManager() {
- return *_authzManager;
- }
+AuthorizationManager& AuthzSessionExternalState::getAuthorizationManager() {
+ return *_authzManager;
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state.h b/src/mongo/db/auth/authz_session_external_state.h
index d49e5eb35bf..5ce7ab155ed 100644
--- a/src/mongo/db/auth/authz_session_external_state.h
+++ b/src/mongo/db/auth/authz_session_external_state.h
@@ -38,53 +38,52 @@
namespace mongo {
- class Principal;
- class OperationContext;
+class Principal;
+class OperationContext;
- /**
- * Public interface for a class that encapsulates all the session information related to system
- * state not stored in AuthorizationSession. This is primarily to make AuthorizationSession
- * easier to test as well as to allow different implementations in mongos and mongod.
- */
- class AuthzSessionExternalState {
- MONGO_DISALLOW_COPYING(AuthzSessionExternalState);
-
- public:
-
- virtual ~AuthzSessionExternalState();
-
- AuthorizationManager& getAuthorizationManager();
-
- // Returns true if this connection should be treated as if it has full access to do
- // anything, regardless of the current auth state. Currently the reasons why this could be
- // are that auth isn't enabled or the connection is a "god" connection.
- virtual bool shouldIgnoreAuthChecks() const = 0;
-
- // Returns true if this connection should be treated as a localhost connection with no
- // admin authentication users created. This condition is used to allow the creation of
- // the first user on a server with authorization enabled.
- // NOTE: _checkShouldAllowLocalhost MUST be called at least once before any call to
- // shouldAllowLocalhost or we could ignore auth checks incorrectly.
- virtual bool shouldAllowLocalhost() const = 0;
-
- // Returns true if this connection should allow extra server configuration actions under
- // the localhost exception. This condition is used to allow special privileges on arbiters.
- // See SERVER-5479 for details on when this may be removed.
- virtual bool serverIsArbiter() const = 0;
-
- // Should be called at the beginning of every new request. This performs the checks
- // necessary to determine if localhost connections should be given full access.
- virtual void startRequest(OperationContext* txn) = 0;
-
- protected:
- // This class should never be instantiated directly.
- AuthzSessionExternalState(AuthorizationManager* authzManager);
-
- // Pointer to the authorization manager associated with the authorization session
- // that owns this object.
- //
- // TODO(schwerin): Eliminate this back pointer.
- AuthorizationManager* _authzManager;
- };
-
-} // namespace mongo
+/**
+ * Public interface for a class that encapsulates all the session information related to system
+ * state not stored in AuthorizationSession. This is primarily to make AuthorizationSession
+ * easier to test as well as to allow different implementations in mongos and mongod.
+ */
+class AuthzSessionExternalState {
+ MONGO_DISALLOW_COPYING(AuthzSessionExternalState);
+
+public:
+ virtual ~AuthzSessionExternalState();
+
+ AuthorizationManager& getAuthorizationManager();
+
+ // Returns true if this connection should be treated as if it has full access to do
+ // anything, regardless of the current auth state. Currently the reasons why this could be
+ // are that auth isn't enabled or the connection is a "god" connection.
+ virtual bool shouldIgnoreAuthChecks() const = 0;
+
+ // Returns true if this connection should be treated as a localhost connection with no
+ // admin authentication users created. This condition is used to allow the creation of
+ // the first user on a server with authorization enabled.
+ // NOTE: _checkShouldAllowLocalhost MUST be called at least once before any call to
+ // shouldAllowLocalhost or we could ignore auth checks incorrectly.
+ virtual bool shouldAllowLocalhost() const = 0;
+
+ // Returns true if this connection should allow extra server configuration actions under
+ // the localhost exception. This condition is used to allow special privileges on arbiters.
+ // See SERVER-5479 for details on when this may be removed.
+ virtual bool serverIsArbiter() const = 0;
+
+ // Should be called at the beginning of every new request. This performs the checks
+ // necessary to determine if localhost connections should be given full access.
+ virtual void startRequest(OperationContext* txn) = 0;
+
+protected:
+ // This class should never be instantiated directly.
+ AuthzSessionExternalState(AuthorizationManager* authzManager);
+
+ // Pointer to the authorization manager associated with the authorization session
+ // that owns this object.
+ //
+ // TODO(schwerin): Eliminate this back pointer.
+ AuthorizationManager* _authzManager;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_d.cpp b/src/mongo/db/auth/authz_session_external_state_d.cpp
index 65f30b5ce75..dee99046240 100644
--- a/src/mongo/db/auth/authz_session_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_session_external_state_d.cpp
@@ -43,29 +43,28 @@
namespace mongo {
- AuthzSessionExternalStateMongod::AuthzSessionExternalStateMongod(
- AuthorizationManager* authzManager) :
- AuthzSessionExternalStateServerCommon(authzManager) {}
- AuthzSessionExternalStateMongod::~AuthzSessionExternalStateMongod() {}
+AuthzSessionExternalStateMongod::AuthzSessionExternalStateMongod(AuthorizationManager* authzManager)
+ : AuthzSessionExternalStateServerCommon(authzManager) {}
+AuthzSessionExternalStateMongod::~AuthzSessionExternalStateMongod() {}
- void AuthzSessionExternalStateMongod::startRequest(OperationContext* txn) {
- // No locks should be held as this happens before any database accesses occur
- dassert(!txn->lockState()->isLocked());
+void AuthzSessionExternalStateMongod::startRequest(OperationContext* txn) {
+ // No locks should be held as this happens before any database accesses occur
+ dassert(!txn->lockState()->isLocked());
- _checkShouldAllowLocalhost(txn);
- }
+ _checkShouldAllowLocalhost(txn);
+}
- bool AuthzSessionExternalStateMongod::shouldIgnoreAuthChecks() const {
- // TODO(spencer): get "isInDirectClient" from OperationContext
- return cc().isInDirectClient() ||
- AuthzSessionExternalStateServerCommon::shouldIgnoreAuthChecks();
- }
+bool AuthzSessionExternalStateMongod::shouldIgnoreAuthChecks() const {
+ // TODO(spencer): get "isInDirectClient" from OperationContext
+ return cc().isInDirectClient() ||
+ AuthzSessionExternalStateServerCommon::shouldIgnoreAuthChecks();
+}
- bool AuthzSessionExternalStateMongod::serverIsArbiter() const {
- // Arbiters have access to extra privileges under localhost. See SERVER-5479.
- return (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
+bool AuthzSessionExternalStateMongod::serverIsArbiter() const {
+ // Arbiters have access to extra privileges under localhost. See SERVER-5479.
+ return (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
repl::ReplicationCoordinator::modeReplSet &&
- repl::getGlobalReplicationCoordinator()->getMemberState().arbiter());
- }
+ repl::getGlobalReplicationCoordinator()->getMemberState().arbiter());
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_d.h b/src/mongo/db/auth/authz_session_external_state_d.h
index 8af5cc41e61..0761ad5ea39 100644
--- a/src/mongo/db/auth/authz_session_external_state_d.h
+++ b/src/mongo/db/auth/authz_session_external_state_d.h
@@ -35,21 +35,21 @@
namespace mongo {
- /**
- * The implementation of AuthzSessionExternalState functionality for mongod.
- */
- class AuthzSessionExternalStateMongod : public AuthzSessionExternalStateServerCommon {
- MONGO_DISALLOW_COPYING(AuthzSessionExternalStateMongod);
+/**
+ * The implementation of AuthzSessionExternalState functionality for mongod.
+ */
+class AuthzSessionExternalStateMongod : public AuthzSessionExternalStateServerCommon {
+ MONGO_DISALLOW_COPYING(AuthzSessionExternalStateMongod);
- public:
- AuthzSessionExternalStateMongod(AuthorizationManager* authzManager);
- virtual ~AuthzSessionExternalStateMongod();
+public:
+ AuthzSessionExternalStateMongod(AuthorizationManager* authzManager);
+ virtual ~AuthzSessionExternalStateMongod();
- virtual bool shouldIgnoreAuthChecks() const;
+ virtual bool shouldIgnoreAuthChecks() const;
- virtual bool serverIsArbiter() const;
+ virtual bool serverIsArbiter() const;
- virtual void startRequest(OperationContext* txn);
- };
+ virtual void startRequest(OperationContext* txn);
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_mock.h b/src/mongo/db/auth/authz_session_external_state_mock.h
index 6e26c1bbceb..f1aa1d7a166 100644
--- a/src/mongo/db/auth/authz_session_external_state_mock.h
+++ b/src/mongo/db/auth/authz_session_external_state_mock.h
@@ -35,43 +35,45 @@
namespace mongo {
- /**
- * Mock of the AuthzSessionExternalState class used only for testing.
- */
- class AuthzSessionExternalStateMock : public AuthzSessionExternalState {
- MONGO_DISALLOW_COPYING(AuthzSessionExternalStateMock);
+/**
+ * Mock of the AuthzSessionExternalState class used only for testing.
+ */
+class AuthzSessionExternalStateMock : public AuthzSessionExternalState {
+ MONGO_DISALLOW_COPYING(AuthzSessionExternalStateMock);
- public:
- AuthzSessionExternalStateMock(AuthorizationManager* authzManager) :
- AuthzSessionExternalState(authzManager), _ignoreAuthChecksReturnValue(false),
- _allowLocalhostReturnValue(false), _serverIsArbiterReturnValue(false) {}
+public:
+ AuthzSessionExternalStateMock(AuthorizationManager* authzManager)
+ : AuthzSessionExternalState(authzManager),
+ _ignoreAuthChecksReturnValue(false),
+ _allowLocalhostReturnValue(false),
+ _serverIsArbiterReturnValue(false) {}
- virtual bool shouldIgnoreAuthChecks() const {
- return _ignoreAuthChecksReturnValue;
- }
+ virtual bool shouldIgnoreAuthChecks() const {
+ return _ignoreAuthChecksReturnValue;
+ }
- virtual bool shouldAllowLocalhost() const {
- return _allowLocalhostReturnValue;
- }
+ virtual bool shouldAllowLocalhost() const {
+ return _allowLocalhostReturnValue;
+ }
- virtual bool serverIsArbiter() const {
- return _serverIsArbiterReturnValue;
- }
+ virtual bool serverIsArbiter() const {
+ return _serverIsArbiterReturnValue;
+ }
- void setReturnValueForShouldIgnoreAuthChecks(bool returnValue) {
- _ignoreAuthChecksReturnValue = returnValue;
- }
+ void setReturnValueForShouldIgnoreAuthChecks(bool returnValue) {
+ _ignoreAuthChecksReturnValue = returnValue;
+ }
- void setReturnValueForShouldAllowLocalhost(bool returnValue) {
- _allowLocalhostReturnValue = returnValue;
- }
+ void setReturnValueForShouldAllowLocalhost(bool returnValue) {
+ _allowLocalhostReturnValue = returnValue;
+ }
- virtual void startRequest(OperationContext* txn) {}
+ virtual void startRequest(OperationContext* txn) {}
- private:
- bool _ignoreAuthChecksReturnValue;
- bool _allowLocalhostReturnValue;
- bool _serverIsArbiterReturnValue;
- };
+private:
+ bool _ignoreAuthChecksReturnValue;
+ bool _allowLocalhostReturnValue;
+ bool _serverIsArbiterReturnValue;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_s.cpp b/src/mongo/db/auth/authz_session_external_state_s.cpp
index 4009670c6c4..9576bc79a2d 100644
--- a/src/mongo/db/auth/authz_session_external_state_s.cpp
+++ b/src/mongo/db/auth/authz_session_external_state_s.cpp
@@ -38,13 +38,12 @@
namespace mongo {
- AuthzSessionExternalStateMongos::AuthzSessionExternalStateMongos(
- AuthorizationManager* authzManager) :
- AuthzSessionExternalStateServerCommon(authzManager) {}
- AuthzSessionExternalStateMongos::~AuthzSessionExternalStateMongos() {}
+AuthzSessionExternalStateMongos::AuthzSessionExternalStateMongos(AuthorizationManager* authzManager)
+ : AuthzSessionExternalStateServerCommon(authzManager) {}
+AuthzSessionExternalStateMongos::~AuthzSessionExternalStateMongos() {}
- void AuthzSessionExternalStateMongos::startRequest(OperationContext* txn) {
- _checkShouldAllowLocalhost(txn);
- }
+void AuthzSessionExternalStateMongos::startRequest(OperationContext* txn) {
+ _checkShouldAllowLocalhost(txn);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_s.h b/src/mongo/db/auth/authz_session_external_state_s.h
index 777082faadc..7db5078db2a 100644
--- a/src/mongo/db/auth/authz_session_external_state_s.h
+++ b/src/mongo/db/auth/authz_session_external_state_s.h
@@ -35,17 +35,17 @@
namespace mongo {
- /**
- * The implementation of AuthzSessionExternalState functionality for mongos.
- */
- class AuthzSessionExternalStateMongos : public AuthzSessionExternalStateServerCommon {
- MONGO_DISALLOW_COPYING(AuthzSessionExternalStateMongos);
+/**
+ * The implementation of AuthzSessionExternalState functionality for mongos.
+ */
+class AuthzSessionExternalStateMongos : public AuthzSessionExternalStateServerCommon {
+ MONGO_DISALLOW_COPYING(AuthzSessionExternalStateMongos);
- public:
- AuthzSessionExternalStateMongos(AuthorizationManager* authzManager);
- virtual ~AuthzSessionExternalStateMongos();
+public:
+ AuthzSessionExternalStateMongos(AuthorizationManager* authzManager);
+ virtual ~AuthzSessionExternalStateMongos();
- virtual void startRequest(OperationContext* txn);
- };
+ virtual void startRequest(OperationContext* txn);
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_server_common.cpp b/src/mongo/db/auth/authz_session_external_state_server_common.cpp
index 453980e19f7..a85ab1c5ac2 100644
--- a/src/mongo/db/auth/authz_session_external_state_server_common.cpp
+++ b/src/mongo/db/auth/authz_session_external_state_server_common.cpp
@@ -42,50 +42,49 @@
namespace mongo {
namespace {
- MONGO_EXPORT_STARTUP_SERVER_PARAMETER(enableLocalhostAuthBypass, bool, true);
-} // namespace
+MONGO_EXPORT_STARTUP_SERVER_PARAMETER(enableLocalhostAuthBypass, bool, true);
+} // namespace
- // NOTE: we default _allowLocalhost to true under the assumption that _checkShouldAllowLocalhost
- // will always be called before any calls to shouldAllowLocalhost. If this is not the case,
- // it could cause a security hole.
- AuthzSessionExternalStateServerCommon::AuthzSessionExternalStateServerCommon(
- AuthorizationManager* authzManager) :
- AuthzSessionExternalState(authzManager),
- _allowLocalhost(enableLocalhostAuthBypass) {}
- AuthzSessionExternalStateServerCommon::~AuthzSessionExternalStateServerCommon() {}
+// NOTE: we default _allowLocalhost to true under the assumption that _checkShouldAllowLocalhost
+// will always be called before any calls to shouldAllowLocalhost. If this is not the case,
+// it could cause a security hole.
+AuthzSessionExternalStateServerCommon::AuthzSessionExternalStateServerCommon(
+ AuthorizationManager* authzManager)
+ : AuthzSessionExternalState(authzManager), _allowLocalhost(enableLocalhostAuthBypass) {}
+AuthzSessionExternalStateServerCommon::~AuthzSessionExternalStateServerCommon() {}
- void AuthzSessionExternalStateServerCommon::_checkShouldAllowLocalhost(OperationContext* txn) {
- if (!_authzManager->isAuthEnabled())
- return;
- // If we know that an admin user exists, don't re-check.
- if (!_allowLocalhost)
- return;
- // Don't bother checking if we're not on a localhost connection
- if (!ClientBasic::getCurrent()->getIsLocalHostConnection()) {
- _allowLocalhost = false;
- return;
- }
+void AuthzSessionExternalStateServerCommon::_checkShouldAllowLocalhost(OperationContext* txn) {
+ if (!_authzManager->isAuthEnabled())
+ return;
+ // If we know that an admin user exists, don't re-check.
+ if (!_allowLocalhost)
+ return;
+ // Don't bother checking if we're not on a localhost connection
+ if (!ClientBasic::getCurrent()->getIsLocalHostConnection()) {
+ _allowLocalhost = false;
+ return;
+ }
- _allowLocalhost = !_authzManager->hasAnyPrivilegeDocuments(txn);
- if (_allowLocalhost) {
- ONCE {
- log() << "note: no users configured in admin.system.users, allowing localhost "
- "access" << std::endl;
- }
+ _allowLocalhost = !_authzManager->hasAnyPrivilegeDocuments(txn);
+ if (_allowLocalhost) {
+ ONCE {
+ log() << "note: no users configured in admin.system.users, allowing localhost "
+ "access" << std::endl;
}
}
+}
- bool AuthzSessionExternalStateServerCommon::serverIsArbiter() const {
- return false;
- }
+bool AuthzSessionExternalStateServerCommon::serverIsArbiter() const {
+ return false;
+}
- bool AuthzSessionExternalStateServerCommon::shouldAllowLocalhost() const {
- ClientBasic* client = ClientBasic::getCurrent();
- return _allowLocalhost && client->getIsLocalHostConnection();
- }
+bool AuthzSessionExternalStateServerCommon::shouldAllowLocalhost() const {
+ ClientBasic* client = ClientBasic::getCurrent();
+ return _allowLocalhost && client->getIsLocalHostConnection();
+}
- bool AuthzSessionExternalStateServerCommon::shouldIgnoreAuthChecks() const {
- return !_authzManager->isAuthEnabled();
- }
+bool AuthzSessionExternalStateServerCommon::shouldIgnoreAuthChecks() const {
+ return !_authzManager->isAuthEnabled();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/authz_session_external_state_server_common.h b/src/mongo/db/auth/authz_session_external_state_server_common.h
index f96035dcade..df7ceb6c9f4 100644
--- a/src/mongo/db/auth/authz_session_external_state_server_common.h
+++ b/src/mongo/db/auth/authz_session_external_state_server_common.h
@@ -35,31 +35,29 @@
namespace mongo {
- /**
- * The implementation of AuthzSessionExternalState functionality common to mongod and mongos.
- */
- class AuthzSessionExternalStateServerCommon : public AuthzSessionExternalState {
- MONGO_DISALLOW_COPYING(AuthzSessionExternalStateServerCommon);
-
- public:
- virtual ~AuthzSessionExternalStateServerCommon();
-
- virtual bool shouldAllowLocalhost() const;
- virtual bool shouldIgnoreAuthChecks() const;
- virtual bool serverIsArbiter() const;
+/**
+ * The implementation of AuthzSessionExternalState functionality common to mongod and mongos.
+ */
+class AuthzSessionExternalStateServerCommon : public AuthzSessionExternalState {
+ MONGO_DISALLOW_COPYING(AuthzSessionExternalStateServerCommon);
- protected:
- AuthzSessionExternalStateServerCommon(AuthorizationManager* authzManager);
+public:
+ virtual ~AuthzSessionExternalStateServerCommon();
- // Checks whether or not localhost connections should be given full access and stores the
- // result in _allowLocalhost. Currently localhost connections are only given full access
- // if there are no users in the admin database.
- void _checkShouldAllowLocalhost(OperationContext* txn);
+ virtual bool shouldAllowLocalhost() const;
+ virtual bool shouldIgnoreAuthChecks() const;
+ virtual bool serverIsArbiter() const;
- private:
+protected:
+ AuthzSessionExternalStateServerCommon(AuthorizationManager* authzManager);
- bool _allowLocalhost;
+ // Checks whether or not localhost connections should be given full access and stores the
+ // result in _allowLocalhost. Currently localhost connections are only given full access
+ // if there are no users in the admin database.
+ void _checkShouldAllowLocalhost(OperationContext* txn);
- };
+private:
+ bool _allowLocalhost;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/impersonation_session.cpp b/src/mongo/db/auth/impersonation_session.cpp
index eb8bf54a6f9..228d5567807 100644
--- a/src/mongo/db/auth/impersonation_session.cpp
+++ b/src/mongo/db/auth/impersonation_session.cpp
@@ -44,36 +44,29 @@
namespace mongo {
- ImpersonationSessionGuard::ImpersonationSessionGuard(OperationContext* txn)
- : _txn(txn) {
+ImpersonationSessionGuard::ImpersonationSessionGuard(OperationContext* txn) : _txn(txn) {
+ auto authSession = AuthorizationSession::get(_txn->getClient());
- auto authSession = AuthorizationSession::get(_txn->getClient());
+ const auto& impersonatedUsersAndRoles =
+ rpc::AuditMetadata::get(txn).getImpersonatedUsersAndRoles();
- const auto& impersonatedUsersAndRoles =
- rpc::AuditMetadata::get(txn).getImpersonatedUsersAndRoles();
+ if (impersonatedUsersAndRoles != boost::none) {
+ uassert(ErrorCodes::Unauthorized,
+ "Unauthorized use of impersonation metadata.",
+ authSession->isAuthorizedForPrivilege(
+ Privilege(ResourcePattern::forClusterResource(), ActionType::impersonate)));
- if (impersonatedUsersAndRoles != boost::none) {
+ fassert(ErrorCodes::InternalError, !authSession->isImpersonating());
- uassert(ErrorCodes::Unauthorized,
- "Unauthorized use of impersonation metadata.",
- authSession->isAuthorizedForPrivilege(
- Privilege(ResourcePattern::forClusterResource(),
- ActionType::impersonate)));
-
- fassert(ErrorCodes::InternalError, !authSession->isImpersonating());
-
- authSession->setImpersonatedUserData(std::get<0>(*impersonatedUsersAndRoles),
- std::get<1>(*impersonatedUsersAndRoles));
- _active = true;
- }
+ authSession->setImpersonatedUserData(std::get<0>(*impersonatedUsersAndRoles),
+ std::get<1>(*impersonatedUsersAndRoles));
+ _active = true;
}
+}
- ImpersonationSessionGuard::~ImpersonationSessionGuard() {
- DESTRUCTOR_GUARD(
- if (_active) {
- AuthorizationSession::get(_txn->getClient())->clearImpersonatedUserData();
- }
- )
- }
+ImpersonationSessionGuard::~ImpersonationSessionGuard() {
+ DESTRUCTOR_GUARD(
+ if (_active) { AuthorizationSession::get(_txn->getClient())->clearImpersonatedUserData(); })
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/impersonation_session.h b/src/mongo/db/auth/impersonation_session.h
index ab05bf1e5c3..af2dab2f774 100644
--- a/src/mongo/db/auth/impersonation_session.h
+++ b/src/mongo/db/auth/impersonation_session.h
@@ -29,20 +29,22 @@
#include "mongo/base/disallow_copying.h"
namespace mongo {
- class OperationContext;
+class OperationContext;
- /**
- * RAII class to optionally set an impersonated username list into the authorization session
- * for the duration of the life of this object.
- */
- class ImpersonationSessionGuard {
- MONGO_DISALLOW_COPYING(ImpersonationSessionGuard);
- public:
- ImpersonationSessionGuard(OperationContext* txn);
- ~ImpersonationSessionGuard();
- private:
- OperationContext* _txn;
- bool _active{false};
- };
+/**
+ * RAII class to optionally set an impersonated username list into the authorization session
+ * for the duration of the life of this object.
+ */
+class ImpersonationSessionGuard {
+ MONGO_DISALLOW_COPYING(ImpersonationSessionGuard);
+
+public:
+ ImpersonationSessionGuard(OperationContext* txn);
+ ~ImpersonationSessionGuard();
+
+private:
+ OperationContext* _txn;
+ bool _active{false};
+};
} // namespace mongo
diff --git a/src/mongo/db/auth/internal_user_auth.cpp b/src/mongo/db/auth/internal_user_auth.cpp
index 6c8190845ea..5cc90c29ea9 100644
--- a/src/mongo/db/auth/internal_user_auth.cpp
+++ b/src/mongo/db/auth/internal_user_auth.cpp
@@ -39,78 +39,78 @@
#include "mongo/util/log.h"
namespace mongo {
- namespace mmb = mongo::mutablebson;
+namespace mmb = mongo::mutablebson;
- // not guarded by the authParams mutex never changed in
- // multi-threaded operation
- static bool authParamsSet = false;
+// not guarded by the authParams mutex never changed in
+// multi-threaded operation
+static bool authParamsSet = false;
- // Store default authentication parameters for internal authentication to cluster members,
- // guarded by the authParams mutex
- static BSONObj authParams;
+// Store default authentication parameters for internal authentication to cluster members,
+// guarded by the authParams mutex
+static BSONObj authParams;
- static stdx::mutex authParamMutex;
+static stdx::mutex authParamMutex;
- bool isInternalAuthSet() {
- return authParamsSet;
- }
-
- void setInternalUserAuthParams(const BSONObj& authParamsIn) {
- if (!isInternalAuthSet()) {
- authParamsSet = true;
- }
- stdx::lock_guard<stdx::mutex> lk(authParamMutex);
+bool isInternalAuthSet() {
+ return authParamsSet;
+}
- if (authParamsIn["mechanism"].String() != "SCRAM-SHA-1") {
- authParams = authParamsIn.copy();
- return;
- }
+void setInternalUserAuthParams(const BSONObj& authParamsIn) {
+ if (!isInternalAuthSet()) {
+ authParamsSet = true;
+ }
+ stdx::lock_guard<stdx::mutex> lk(authParamMutex);
- // Create authParams for legacy MONGODB-CR authentication for 2.6/3.0 mixed
- // mode if applicable.
- mmb::Document fallback(authParamsIn);
- fallback.root().findFirstChildNamed("mechanism").setValueString("MONGODB-CR");
+ if (authParamsIn["mechanism"].String() != "SCRAM-SHA-1") {
+ authParams = authParamsIn.copy();
+ return;
+ }
- mmb::Document doc(authParamsIn);
- mmb::Element fallbackEl = doc.makeElementObject("fallbackParams");
- fallbackEl.setValueObject(fallback.getObject());
- doc.root().pushBack(fallbackEl);
- authParams = doc.getObject().copy();
+ // Create authParams for legacy MONGODB-CR authentication for 2.6/3.0 mixed
+ // mode if applicable.
+ mmb::Document fallback(authParamsIn);
+ fallback.root().findFirstChildNamed("mechanism").setValueString("MONGODB-CR");
+
+ mmb::Document doc(authParamsIn);
+ mmb::Element fallbackEl = doc.makeElementObject("fallbackParams");
+ fallbackEl.setValueObject(fallback.getObject());
+ doc.root().pushBack(fallbackEl);
+ authParams = doc.getObject().copy();
+}
+
+BSONObj getInternalUserAuthParamsWithFallback() {
+ if (!authParamsSet) {
+ return BSONObj();
}
- BSONObj getInternalUserAuthParamsWithFallback() {
- if (!authParamsSet) {
- return BSONObj();
- }
+ stdx::lock_guard<stdx::mutex> lk(authParamMutex);
+ return authParams.copy();
+}
- stdx::lock_guard<stdx::mutex> lk(authParamMutex);
- return authParams.copy();
+BSONObj getFallbackAuthParams(BSONObj params) {
+ if (params["fallbackParams"].type() != Object) {
+ return BSONObj();
}
+ return params["fallbackParams"].Obj();
+}
- BSONObj getFallbackAuthParams(BSONObj params) {
- if (params["fallbackParams"].type() != Object) {
- return BSONObj();
+bool authenticateInternalUser(DBClientWithCommands* conn) {
+ if (!isInternalAuthSet()) {
+ if (!serverGlobalParams.quiet) {
+ log() << "ERROR: No authentication parameters set for internal user";
}
- return params["fallbackParams"].Obj();
+ return false;
}
- bool authenticateInternalUser(DBClientWithCommands* conn) {
- if (!isInternalAuthSet()) {
- if (!serverGlobalParams.quiet) {
- log() << "ERROR: No authentication parameters set for internal user";
- }
- return false;
- }
-
- try {
- conn->auth(getInternalUserAuthParamsWithFallback());
- return true;
- } catch(const UserException& ex) {
- if (!serverGlobalParams.quiet) {
- log() << "can't authenticate to " << conn->toString()
- << " as internal user, error: "<< ex.what();
- }
- return false;
+ try {
+ conn->auth(getInternalUserAuthParamsWithFallback());
+ return true;
+ } catch (const UserException& ex) {
+ if (!serverGlobalParams.quiet) {
+ log() << "can't authenticate to " << conn->toString()
+ << " as internal user, error: " << ex.what();
}
+ return false;
}
-} // namespace mongo
+}
+} // namespace mongo
diff --git a/src/mongo/db/auth/internal_user_auth.h b/src/mongo/db/auth/internal_user_auth.h
index 547c9ba76a8..772eef1b322 100644
--- a/src/mongo/db/auth/internal_user_auth.h
+++ b/src/mongo/db/auth/internal_user_auth.h
@@ -29,40 +29,40 @@
#pragma once
namespace mongo {
- class BSONObj;
- class DBClientWithCommands;
+class BSONObj;
+class DBClientWithCommands;
- /**
- * @return true if internal authentication parameters has been set up
- */
- bool isInternalAuthSet();
+/**
+ * @return true if internal authentication parameters has been set up
+ */
+bool isInternalAuthSet();
- /**
- * This method initializes the authParams object with authentication
- * credentials to be used by authenticateInternalUser.
- */
- void setInternalUserAuthParams(const BSONObj& authParamsIn);
+/**
+ * This method initializes the authParams object with authentication
+ * credentials to be used by authenticateInternalUser.
+ */
+void setInternalUserAuthParams(const BSONObj& authParamsIn);
- /**
- * Returns a copy of the authParams object to be used by authenticateInternalUser
- *
- * The format of the return object is { authparams, fallbackParams:params}
- *
- * If SCRAM-SHA-1 is the internal auth mechanism the fallbackParams sub document is
- * for MONGODB-CR auth is included. For MONGODB-XC509 no fallbackParams document is
- * returned.
- **/
- BSONObj getInternalUserAuthParamsWithFallback();
+/**
+ * Returns a copy of the authParams object to be used by authenticateInternalUser
+ *
+ * The format of the return object is { authparams, fallbackParams:params}
+ *
+ * If SCRAM-SHA-1 is the internal auth mechanism the fallbackParams sub document is
+ * for MONGODB-CR auth is included. For MONGODB-XC509 no fallbackParams document is
+ * returned.
+ **/
+BSONObj getInternalUserAuthParamsWithFallback();
- /**
- * Returns a copy of the fallback parameter portion of an internal auth parameter object
- **/
- BSONObj getFallbackAuthParams(BSONObj params);
+/**
+ * Returns a copy of the fallback parameter portion of an internal auth parameter object
+ **/
+BSONObj getFallbackAuthParams(BSONObj params);
- /**
- * Authenticates to another cluster member using appropriate authentication data.
- * Uses getInternalUserAuthParams() to retrive authentication parameters.
- * @return true if the authentication was succesful
- */
- bool authenticateInternalUser(DBClientWithCommands* conn);
-} // namespace mongo
+/**
+* Authenticates to another cluster member using appropriate authentication data.
+* Uses getInternalUserAuthParams() to retrive authentication parameters.
+* @return true if the authentication was succesful
+*/
+bool authenticateInternalUser(DBClientWithCommands* conn);
+} // namespace mongo
diff --git a/src/mongo/db/auth/mongo_authentication_session.cpp b/src/mongo/db/auth/mongo_authentication_session.cpp
index 117f3a047f2..2e25b4f561c 100644
--- a/src/mongo/db/auth/mongo_authentication_session.cpp
+++ b/src/mongo/db/auth/mongo_authentication_session.cpp
@@ -29,11 +29,9 @@
namespace mongo {
- MongoAuthenticationSession::MongoAuthenticationSession(nonce64 nonce) :
- AuthenticationSession(AuthenticationSession::SESSION_TYPE_MONGO),
- _nonce(nonce) {
- }
+MongoAuthenticationSession::MongoAuthenticationSession(nonce64 nonce)
+ : AuthenticationSession(AuthenticationSession::SESSION_TYPE_MONGO), _nonce(nonce) {}
- MongoAuthenticationSession::~MongoAuthenticationSession() {}
+MongoAuthenticationSession::~MongoAuthenticationSession() {}
} // namespace mongo
diff --git a/src/mongo/db/auth/mongo_authentication_session.h b/src/mongo/db/auth/mongo_authentication_session.h
index 19d2c44d387..e60bcf6ac85 100644
--- a/src/mongo/db/auth/mongo_authentication_session.h
+++ b/src/mongo/db/auth/mongo_authentication_session.h
@@ -31,24 +31,27 @@
namespace mongo {
- typedef unsigned long long nonce64;
-
- /**
- * Authentication session data for a nonce-challenge-response authentication of the
- * type used in the Mongo nonce-authenticate protocol.
- *
- * The only session data is the nonce sent to the client.
- */
- class MongoAuthenticationSession : public AuthenticationSession {
- MONGO_DISALLOW_COPYING(MongoAuthenticationSession);
- public:
- explicit MongoAuthenticationSession(nonce64 nonce);
- virtual ~MongoAuthenticationSession();
-
- nonce64 getNonce() const { return _nonce; }
-
- private:
- const nonce64 _nonce;
- };
+typedef unsigned long long nonce64;
+
+/**
+ * Authentication session data for a nonce-challenge-response authentication of the
+ * type used in the Mongo nonce-authenticate protocol.
+ *
+ * The only session data is the nonce sent to the client.
+ */
+class MongoAuthenticationSession : public AuthenticationSession {
+ MONGO_DISALLOW_COPYING(MongoAuthenticationSession);
+
+public:
+ explicit MongoAuthenticationSession(nonce64 nonce);
+ virtual ~MongoAuthenticationSession();
+
+ nonce64 getNonce() const {
+ return _nonce;
+ }
+
+private:
+ const nonce64 _nonce;
+};
} // namespace mongo
diff --git a/src/mongo/db/auth/native_sasl_authentication_session.cpp b/src/mongo/db/auth/native_sasl_authentication_session.cpp
index e2392beacd3..9566ba37487 100644
--- a/src/mongo/db/auth/native_sasl_authentication_session.cpp
+++ b/src/mongo/db/auth/native_sasl_authentication_session.cpp
@@ -52,122 +52,108 @@
namespace mongo {
- using std::unique_ptr;
+using std::unique_ptr;
namespace {
- SaslAuthenticationSession* createNativeSaslAuthenticationSession(
- AuthorizationSession* authzSession,
- const std::string& mechanism) {
- return new NativeSaslAuthenticationSession(authzSession);
- }
-
- MONGO_INITIALIZER(NativeSaslServerCore)(InitializerContext* context) {
- if (saslGlobalParams.hostName.empty())
- saslGlobalParams.hostName = getHostNameCached();
- if (saslGlobalParams.serviceName.empty())
- saslGlobalParams.serviceName = "mongodb";
-
- SaslAuthenticationSession::create = createNativeSaslAuthenticationSession;
- return Status::OK();
- }
-
- // PostSaslCommands is reversely dependent on CyrusSaslCommands having been run
- MONGO_INITIALIZER_WITH_PREREQUISITES(PostSaslCommands,
- ("NativeSaslServerCore"))
- (InitializerContext*) {
-
- AuthorizationManager authzManager(stdx::make_unique<AuthzManagerExternalStateMock>());
- std::unique_ptr<AuthorizationSession> authzSession =
- authzManager.makeAuthorizationSession();
-
- for (size_t i = 0; i < saslGlobalParams.authenticationMechanisms.size(); ++i) {
- const std::string& mechanism = saslGlobalParams.authenticationMechanisms[i];
- if (mechanism == "MONGODB-CR" || mechanism == "MONGODB-X509") {
- // Not a SASL mechanism; no need to smoke test built-in mechanisms.
- continue;
- }
- unique_ptr<SaslAuthenticationSession>
- session(SaslAuthenticationSession::create(authzSession.get(), mechanism));
- Status status = session->start("test",
- mechanism,
- saslGlobalParams.serviceName,
- saslGlobalParams.hostName,
- 1,
- true);
- if (!status.isOK())
- return status;
+SaslAuthenticationSession* createNativeSaslAuthenticationSession(AuthorizationSession* authzSession,
+ const std::string& mechanism) {
+ return new NativeSaslAuthenticationSession(authzSession);
+}
+
+MONGO_INITIALIZER(NativeSaslServerCore)(InitializerContext* context) {
+ if (saslGlobalParams.hostName.empty())
+ saslGlobalParams.hostName = getHostNameCached();
+ if (saslGlobalParams.serviceName.empty())
+ saslGlobalParams.serviceName = "mongodb";
+
+ SaslAuthenticationSession::create = createNativeSaslAuthenticationSession;
+ return Status::OK();
+}
+
+// PostSaslCommands is reversely dependent on CyrusSaslCommands having been run
+MONGO_INITIALIZER_WITH_PREREQUISITES(PostSaslCommands, ("NativeSaslServerCore"))
+(InitializerContext*) {
+ AuthorizationManager authzManager(stdx::make_unique<AuthzManagerExternalStateMock>());
+ std::unique_ptr<AuthorizationSession> authzSession = authzManager.makeAuthorizationSession();
+
+ for (size_t i = 0; i < saslGlobalParams.authenticationMechanisms.size(); ++i) {
+ const std::string& mechanism = saslGlobalParams.authenticationMechanisms[i];
+ if (mechanism == "MONGODB-CR" || mechanism == "MONGODB-X509") {
+ // Not a SASL mechanism; no need to smoke test built-in mechanisms.
+ continue;
}
-
- return Status::OK();
- }
-} //namespace
-
- NativeSaslAuthenticationSession::NativeSaslAuthenticationSession(
- AuthorizationSession* authzSession) :
- SaslAuthenticationSession(authzSession),
- _mechanism("") {
+ unique_ptr<SaslAuthenticationSession> session(
+ SaslAuthenticationSession::create(authzSession.get(), mechanism));
+ Status status = session->start(
+ "test", mechanism, saslGlobalParams.serviceName, saslGlobalParams.hostName, 1, true);
+ if (!status.isOK())
+ return status;
}
- NativeSaslAuthenticationSession::~NativeSaslAuthenticationSession() {}
+ return Status::OK();
+}
+} // namespace
- Status NativeSaslAuthenticationSession::start(StringData authenticationDatabase,
- StringData mechanism,
- StringData serviceName,
- StringData serviceHostname,
- int64_t conversationId,
- bool autoAuthorize) {
- fassert(18626, conversationId > 0);
+NativeSaslAuthenticationSession::NativeSaslAuthenticationSession(AuthorizationSession* authzSession)
+ : SaslAuthenticationSession(authzSession), _mechanism("") {}
- if (_conversationId != 0) {
- return Status(ErrorCodes::AlreadyInitialized,
- "Cannot call start() twice on same NativeSaslAuthenticationSession.");
- }
+NativeSaslAuthenticationSession::~NativeSaslAuthenticationSession() {}
- _authenticationDatabase = authenticationDatabase.toString();
- _mechanism = mechanism.toString();
- _serviceName = serviceName.toString();
- _serviceHostname = serviceHostname.toString();
- _conversationId = conversationId;
- _autoAuthorize = autoAuthorize;
+Status NativeSaslAuthenticationSession::start(StringData authenticationDatabase,
+ StringData mechanism,
+ StringData serviceName,
+ StringData serviceHostname,
+ int64_t conversationId,
+ bool autoAuthorize) {
+ fassert(18626, conversationId > 0);
- if (mechanism == "PLAIN") {
- _saslConversation.reset(new SaslPLAINServerConversation(this));
- }
- else if (mechanism == "SCRAM-SHA-1") {
- _saslConversation.reset(new SaslSCRAMSHA1ServerConversation(this));
- }
- else {
- return Status(ErrorCodes::BadValue,
- mongoutils::str::stream() << "SASL mechanism " << mechanism <<
- " is not supported");
- }
+ if (_conversationId != 0) {
+ return Status(ErrorCodes::AlreadyInitialized,
+ "Cannot call start() twice on same NativeSaslAuthenticationSession.");
+ }
- return Status::OK();
+ _authenticationDatabase = authenticationDatabase.toString();
+ _mechanism = mechanism.toString();
+ _serviceName = serviceName.toString();
+ _serviceHostname = serviceHostname.toString();
+ _conversationId = conversationId;
+ _autoAuthorize = autoAuthorize;
+
+ if (mechanism == "PLAIN") {
+ _saslConversation.reset(new SaslPLAINServerConversation(this));
+ } else if (mechanism == "SCRAM-SHA-1") {
+ _saslConversation.reset(new SaslSCRAMSHA1ServerConversation(this));
+ } else {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream() << "SASL mechanism " << mechanism
+ << " is not supported");
}
- Status NativeSaslAuthenticationSession::step(StringData inputData,
- std::string* outputData) {
- if (!_saslConversation) {
- return Status(ErrorCodes::BadValue,
- mongoutils::str::stream() <<
- "The authentication session has not been properly initialized");
- }
+ return Status::OK();
+}
- StatusWith<bool> status = _saslConversation->step(inputData, outputData);
- if (status.isOK()) {
- _done = status.getValue();
- } else {
- _done = true;
- }
- return status.getStatus();
+Status NativeSaslAuthenticationSession::step(StringData inputData, std::string* outputData) {
+ if (!_saslConversation) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "The authentication session has not been properly initialized");
}
- std::string NativeSaslAuthenticationSession::getPrincipalId() const {
- return _saslConversation->getPrincipalId();
+ StatusWith<bool> status = _saslConversation->step(inputData, outputData);
+ if (status.isOK()) {
+ _done = status.getValue();
+ } else {
+ _done = true;
}
+ return status.getStatus();
+}
- const char* NativeSaslAuthenticationSession::getMechanism() const {
- return _mechanism.c_str();
- }
+std::string NativeSaslAuthenticationSession::getPrincipalId() const {
+ return _saslConversation->getPrincipalId();
+}
+
+const char* NativeSaslAuthenticationSession::getMechanism() const {
+ return _mechanism.c_str();
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/native_sasl_authentication_session.h b/src/mongo/db/auth/native_sasl_authentication_session.h
index 57b90a145be..156ffe685f3 100644
--- a/src/mongo/db/auth/native_sasl_authentication_session.h
+++ b/src/mongo/db/auth/native_sasl_authentication_session.h
@@ -40,31 +40,31 @@
namespace mongo {
- /**
- * Authentication session data for the server side of SASL authentication.
- */
- class NativeSaslAuthenticationSession : public SaslAuthenticationSession {
- MONGO_DISALLOW_COPYING(NativeSaslAuthenticationSession);
- public:
+/**
+ * Authentication session data for the server side of SASL authentication.
+ */
+class NativeSaslAuthenticationSession : public SaslAuthenticationSession {
+ MONGO_DISALLOW_COPYING(NativeSaslAuthenticationSession);
- explicit NativeSaslAuthenticationSession(AuthorizationSession* authSession);
- virtual ~NativeSaslAuthenticationSession();
+public:
+ explicit NativeSaslAuthenticationSession(AuthorizationSession* authSession);
+ virtual ~NativeSaslAuthenticationSession();
- virtual Status start(StringData authenticationDatabase,
- StringData mechanism,
- StringData serviceName,
- StringData serviceHostname,
- int64_t conversationId,
- bool autoAuthorize);
+ virtual Status start(StringData authenticationDatabase,
+ StringData mechanism,
+ StringData serviceName,
+ StringData serviceHostname,
+ int64_t conversationId,
+ bool autoAuthorize);
- virtual Status step(StringData inputData, std::string* outputData);
+ virtual Status step(StringData inputData, std::string* outputData);
- virtual std::string getPrincipalId() const;
+ virtual std::string getPrincipalId() const;
- virtual const char* getMechanism() const;
+ virtual const char* getMechanism() const;
- private:
- std::string _mechanism;
- std::unique_ptr<SaslServerConversation> _saslConversation;
- };
+private:
+ std::string _mechanism;
+ std::unique_ptr<SaslServerConversation> _saslConversation;
+};
} // namespace mongo
diff --git a/src/mongo/db/auth/privilege.cpp b/src/mongo/db/auth/privilege.cpp
index c98385f2bf6..25cd3141151 100644
--- a/src/mongo/db/auth/privilege.cpp
+++ b/src/mongo/db/auth/privilege.cpp
@@ -32,47 +32,46 @@
namespace mongo {
- void Privilege::addPrivilegeToPrivilegeVector(PrivilegeVector* privileges,
- const Privilege& privilegeToAdd) {
- for (PrivilegeVector::iterator it = privileges->begin(); it != privileges->end(); ++it) {
- if (it->getResourcePattern() == privilegeToAdd.getResourcePattern()) {
- it->addActions(privilegeToAdd.getActions());
- return;
- }
+void Privilege::addPrivilegeToPrivilegeVector(PrivilegeVector* privileges,
+ const Privilege& privilegeToAdd) {
+ for (PrivilegeVector::iterator it = privileges->begin(); it != privileges->end(); ++it) {
+ if (it->getResourcePattern() == privilegeToAdd.getResourcePattern()) {
+ it->addActions(privilegeToAdd.getActions());
+ return;
}
- // No privilege exists yet for this resource
- privileges->push_back(privilegeToAdd);
}
+ // No privilege exists yet for this resource
+ privileges->push_back(privilegeToAdd);
+}
- Privilege::Privilege(const ResourcePattern& resource, const ActionType& action) :
- _resource(resource) {
+Privilege::Privilege(const ResourcePattern& resource, const ActionType& action)
+ : _resource(resource) {
+ _actions.addAction(action);
+}
+Privilege::Privilege(const ResourcePattern& resource, const ActionSet& actions)
+ : _resource(resource), _actions(actions) {}
- _actions.addAction(action);
- }
- Privilege::Privilege(const ResourcePattern& resource, const ActionSet& actions) :
- _resource(resource), _actions(actions) {}
+void Privilege::addActions(const ActionSet& actionsToAdd) {
+ _actions.addAllActionsFromSet(actionsToAdd);
+}
- void Privilege::addActions(const ActionSet& actionsToAdd) {
- _actions.addAllActionsFromSet(actionsToAdd);
- }
+void Privilege::removeActions(const ActionSet& actionsToRemove) {
+ _actions.removeAllActionsFromSet(actionsToRemove);
+}
- void Privilege::removeActions(const ActionSet& actionsToRemove) {
- _actions.removeAllActionsFromSet(actionsToRemove);
- }
+bool Privilege::includesAction(const ActionType& action) const {
+ return _actions.contains(action);
+}
- bool Privilege::includesAction(const ActionType& action) const {
- return _actions.contains(action);
- }
+bool Privilege::includesActions(const ActionSet& actions) const {
+ return _actions.isSupersetOf(actions);
+}
- bool Privilege::includesActions(const ActionSet& actions) const {
- return _actions.isSupersetOf(actions);
- }
-
- BSONObj Privilege::toBSON() const {
- ParsedPrivilege pp;
- std::string errmsg;
- invariant(ParsedPrivilege::privilegeToParsedPrivilege(*this, &pp, &errmsg));
- return pp.toBSON();
- }
+BSONObj Privilege::toBSON() const {
+ ParsedPrivilege pp;
+ std::string errmsg;
+ invariant(ParsedPrivilege::privilegeToParsedPrivilege(*this, &pp, &errmsg));
+ return pp.toBSON();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/privilege.h b/src/mongo/db/auth/privilege.h
index 1009172a30c..da6b054e012 100644
--- a/src/mongo/db/auth/privilege.h
+++ b/src/mongo/db/auth/privilege.h
@@ -36,47 +36,50 @@
namespace mongo {
- class Privilege;
- typedef std::vector<Privilege> PrivilegeVector;
+class Privilege;
+typedef std::vector<Privilege> PrivilegeVector;
+/**
+ * A representation of the permission to perform a set of actions on a resource.
+ */
+class Privilege {
+public:
/**
- * A representation of the permission to perform a set of actions on a resource.
+ * Adds "privilegeToAdd" to "privileges", de-duping "privilegeToAdd" if the vector already
+ * contains a privilege on the same resource.
+ *
+ * This method is the preferred way to add privileges to privilege vectors.
*/
- class Privilege {
- public:
- /**
- * Adds "privilegeToAdd" to "privileges", de-duping "privilegeToAdd" if the vector already
- * contains a privilege on the same resource.
- *
- * This method is the preferred way to add privileges to privilege vectors.
- */
- static void addPrivilegeToPrivilegeVector(PrivilegeVector* privileges,
- const Privilege& privilegeToAdd);
-
+ static void addPrivilegeToPrivilegeVector(PrivilegeVector* privileges,
+ const Privilege& privilegeToAdd);
- Privilege() {};
- Privilege(const ResourcePattern& resource, const ActionType& action);
- Privilege(const ResourcePattern& resource, const ActionSet& actions);
- ~Privilege() {}
- const ResourcePattern& getResourcePattern() const { return _resource; }
+ Privilege(){};
+ Privilege(const ResourcePattern& resource, const ActionType& action);
+ Privilege(const ResourcePattern& resource, const ActionSet& actions);
+ ~Privilege() {}
- const ActionSet& getActions() const { return _actions; }
+ const ResourcePattern& getResourcePattern() const {
+ return _resource;
+ }
- void addActions(const ActionSet& actionsToAdd);
- void removeActions(const ActionSet& actionsToRemove);
+ const ActionSet& getActions() const {
+ return _actions;
+ }
- // Checks if the given action is present in the Privilege.
- bool includesAction(const ActionType& action) const;
- // Checks if the given actions are present in the Privilege.
- bool includesActions(const ActionSet& actions) const;
+ void addActions(const ActionSet& actionsToAdd);
+ void removeActions(const ActionSet& actionsToRemove);
- BSONObj toBSON() const;
+ // Checks if the given action is present in the Privilege.
+ bool includesAction(const ActionType& action) const;
+ // Checks if the given actions are present in the Privilege.
+ bool includesActions(const ActionSet& actions) const;
- private:
+ BSONObj toBSON() const;
- ResourcePattern _resource;
- ActionSet _actions; // bitmask of actions this privilege grants
- };
+private:
+ ResourcePattern _resource;
+ ActionSet _actions; // bitmask of actions this privilege grants
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/privilege_parser.cpp b/src/mongo/db/auth/privilege_parser.cpp
index f1b4777a74c..140fbde2d7f 100644
--- a/src/mongo/db/auth/privilege_parser.cpp
+++ b/src/mongo/db/auth/privilege_parser.cpp
@@ -37,420 +37,429 @@
namespace mongo {
- using std::string;
- using std::vector;
+using std::string;
+using std::vector;
- using mongoutils::str::stream;
+using mongoutils::str::stream;
- const BSONField<bool> ParsedResource::anyResource("anyResource");
- const BSONField<bool> ParsedResource::cluster("cluster");
- const BSONField<string> ParsedResource::db("db");
- const BSONField<string> ParsedResource::collection("collection");
+const BSONField<bool> ParsedResource::anyResource("anyResource");
+const BSONField<bool> ParsedResource::cluster("cluster");
+const BSONField<string> ParsedResource::db("db");
+const BSONField<string> ParsedResource::collection("collection");
- ParsedResource::ParsedResource() {
- clear();
- }
-
- ParsedResource::~ParsedResource() {
- }
-
- bool ParsedResource::isValid(std::string* errMsg) const {
- std::string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
+ParsedResource::ParsedResource() {
+ clear();
+}
- int numCandidateTypes = 0;
- if (isAnyResourceSet()) ++numCandidateTypes;
- if (isClusterSet()) ++numCandidateTypes;
- if (isDbSet() || isCollectionSet()) ++numCandidateTypes;
+ParsedResource::~ParsedResource() {}
- if (isDbSet() != isCollectionSet()) {
- *errMsg = stream() << "resource must set both " << db.name() << " and " <<
- collection.name() << " or neither, but not exactly one.";
- return false;
- }
- if (numCandidateTypes != 1) {
- *errMsg = stream() << "resource must have exactly " << db.name() << " and " <<
- collection.name() << " set, or have only " << cluster.name() << " set " <<
- " or have only " << anyResource.name() << " set";
- return false;
- }
- if (isAnyResourceSet() && !getAnyResource()) {
- *errMsg = stream() << anyResource.name() << " must be true when specified";
- return false;
- }
- if (isClusterSet() && !getCluster()) {
- *errMsg = stream() << cluster.name() << " must be true when specified";
- return false;
- }
- if (isDbSet() && (!NamespaceString::validDBName(getDb()) && !getDb().empty())) {
- *errMsg = stream() << getDb() << " is not a valid database name";
- return false;
- }
- if (isCollectionSet() && (!NamespaceString::validCollectionName(getCollection()) &&
- !getCollection().empty())) {
- *errMsg = stream() << getCollection() << " is not a valid collection name";
- return false;
- }
- return true;
+bool ParsedResource::isValid(std::string* errMsg) const {
+ std::string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
}
- BSONObj ParsedResource::toBSON() const {
- BSONObjBuilder builder;
-
- if (_isAnyResourceSet) builder.append(anyResource(), _anyResource);
-
- if (_isClusterSet) builder.append(cluster(), _cluster);
-
- if (_isDbSet) builder.append(db(), _db);
-
- if (_isCollectionSet) builder.append(collection(), _collection);
+ int numCandidateTypes = 0;
+ if (isAnyResourceSet())
+ ++numCandidateTypes;
+ if (isClusterSet())
+ ++numCandidateTypes;
+ if (isDbSet() || isCollectionSet())
+ ++numCandidateTypes;
- return builder.obj();
+ if (isDbSet() != isCollectionSet()) {
+ *errMsg = stream() << "resource must set both " << db.name() << " and " << collection.name()
+ << " or neither, but not exactly one.";
+ return false;
}
-
- bool ParsedResource::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
-
- std::string dummy;
- if (!errMsg) errMsg = &dummy;
-
- FieldParser::FieldState fieldState;
- fieldState = FieldParser::extract(source, anyResource, &_anyResource, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isAnyResourceSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, cluster, &_cluster, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isClusterSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, db, &_db, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isDbSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, collection, &_collection, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isCollectionSet = fieldState == FieldParser::FIELD_SET;
-
- return true;
+ if (numCandidateTypes != 1) {
+ *errMsg = stream() << "resource must have exactly " << db.name() << " and "
+ << collection.name() << " set, or have only " << cluster.name()
+ << " set "
+ << " or have only " << anyResource.name() << " set";
+ return false;
}
-
- void ParsedResource::clear() {
- _anyResource = false;
- _isAnyResourceSet = false;
-
- _cluster = false;
- _isClusterSet = false;
-
- _db.clear();
- _isDbSet = false;
-
- _collection.clear();
- _isCollectionSet = false;
-
+ if (isAnyResourceSet() && !getAnyResource()) {
+ *errMsg = stream() << anyResource.name() << " must be true when specified";
+ return false;
}
-
- void ParsedResource::cloneTo(ParsedResource* other) const {
- other->clear();
-
- other->_anyResource = _anyResource;
- other->_isAnyResourceSet = _isAnyResourceSet;
-
- other->_cluster = _cluster;
- other->_isClusterSet = _isClusterSet;
-
- other->_db = _db;
- other->_isDbSet = _isDbSet;
-
- other->_collection = _collection;
- other->_isCollectionSet = _isCollectionSet;
+ if (isClusterSet() && !getCluster()) {
+ *errMsg = stream() << cluster.name() << " must be true when specified";
+ return false;
}
-
- std::string ParsedResource::toString() const {
- return toBSON().toString();
+ if (isDbSet() && (!NamespaceString::validDBName(getDb()) && !getDb().empty())) {
+ *errMsg = stream() << getDb() << " is not a valid database name";
+ return false;
}
-
- void ParsedResource::setAnyResource(bool anyResource) {
- _anyResource = anyResource;
- _isAnyResourceSet = true;
+ if (isCollectionSet() &&
+ (!NamespaceString::validCollectionName(getCollection()) && !getCollection().empty())) {
+ *errMsg = stream() << getCollection() << " is not a valid collection name";
+ return false;
}
+ return true;
+}
- void ParsedResource::unsetAnyResource() {
- _isAnyResourceSet = false;
- }
+BSONObj ParsedResource::toBSON() const {
+ BSONObjBuilder builder;
- bool ParsedResource::isAnyResourceSet() const {
- return _isAnyResourceSet;
- }
+ if (_isAnyResourceSet)
+ builder.append(anyResource(), _anyResource);
- bool ParsedResource::getAnyResource() const {
- dassert(_isAnyResourceSet);
- return _anyResource;
- }
-
- void ParsedResource::setCluster(bool cluster) {
- _cluster = cluster;
- _isClusterSet = true;
- }
+ if (_isClusterSet)
+ builder.append(cluster(), _cluster);
- void ParsedResource::unsetCluster() {
- _isClusterSet = false;
- }
+ if (_isDbSet)
+ builder.append(db(), _db);
- bool ParsedResource::isClusterSet() const {
- return _isClusterSet;
- }
+ if (_isCollectionSet)
+ builder.append(collection(), _collection);
- bool ParsedResource::getCluster() const {
- dassert(_isClusterSet);
- return _cluster;
- }
+ return builder.obj();
+}
- void ParsedResource::setDb(StringData db) {
- _db = db.toString();
- _isDbSet = true;
- }
+bool ParsedResource::parseBSON(const BSONObj& source, string* errMsg) {
+ clear();
- void ParsedResource::unsetDb() {
- _isDbSet = false;
- }
+ std::string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
- bool ParsedResource::isDbSet() const {
- return _isDbSet;
- }
+ FieldParser::FieldState fieldState;
+ fieldState = FieldParser::extract(source, anyResource, &_anyResource, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isAnyResourceSet = fieldState == FieldParser::FIELD_SET;
- const std::string& ParsedResource::getDb() const {
- dassert(_isDbSet);
- return _db;
- }
+ fieldState = FieldParser::extract(source, cluster, &_cluster, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isClusterSet = fieldState == FieldParser::FIELD_SET;
- void ParsedResource::setCollection(StringData collection) {
- _collection = collection.toString();
- _isCollectionSet = true;
- }
+ fieldState = FieldParser::extract(source, db, &_db, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isDbSet = fieldState == FieldParser::FIELD_SET;
- void ParsedResource::unsetCollection() {
- _isCollectionSet = false;
- }
+ fieldState = FieldParser::extract(source, collection, &_collection, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isCollectionSet = fieldState == FieldParser::FIELD_SET;
- bool ParsedResource::isCollectionSet() const {
- return _isCollectionSet;
- }
+ return true;
+}
+
+void ParsedResource::clear() {
+ _anyResource = false;
+ _isAnyResourceSet = false;
+
+ _cluster = false;
+ _isClusterSet = false;
+
+ _db.clear();
+ _isDbSet = false;
- const std::string& ParsedResource::getCollection() const {
- dassert(_isCollectionSet);
- return _collection;
- }
+ _collection.clear();
+ _isCollectionSet = false;
+}
+
+void ParsedResource::cloneTo(ParsedResource* other) const {
+ other->clear();
- const BSONField<std::vector<string> > ParsedPrivilege::actions("actions");
- const BSONField<ParsedResource> ParsedPrivilege::resource("resource");
+ other->_anyResource = _anyResource;
+ other->_isAnyResourceSet = _isAnyResourceSet;
- ParsedPrivilege::ParsedPrivilege() {
- clear();
- }
+ other->_cluster = _cluster;
+ other->_isClusterSet = _isClusterSet;
- ParsedPrivilege::~ParsedPrivilege() {
- }
+ other->_db = _db;
+ other->_isDbSet = _isDbSet;
- bool ParsedPrivilege::isValid(std::string* errMsg) const {
- std::string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
+ other->_collection = _collection;
+ other->_isCollectionSet = _isCollectionSet;
+}
- // All the mandatory fields must be present.
- if (!_isActionsSet || !_actions.size()) {
- *errMsg = stream() << "missing " << actions.name() << " field";
- return false;
- }
+std::string ParsedResource::toString() const {
+ return toBSON().toString();
+}
- if (!_isResourceSet) {
- *errMsg = stream() << "missing " << resource.name() << " field";
- return false;
- }
+void ParsedResource::setAnyResource(bool anyResource) {
+ _anyResource = anyResource;
+ _isAnyResourceSet = true;
+}
- return getResource().isValid(errMsg);
- }
+void ParsedResource::unsetAnyResource() {
+ _isAnyResourceSet = false;
+}
- BSONObj ParsedPrivilege::toBSON() const {
- BSONObjBuilder builder;
+bool ParsedResource::isAnyResourceSet() const {
+ return _isAnyResourceSet;
+}
- if (_isResourceSet) builder.append(resource(), _resource.toBSON());
+bool ParsedResource::getAnyResource() const {
+ dassert(_isAnyResourceSet);
+ return _anyResource;
+}
- if (_isActionsSet) {
- BSONArrayBuilder actionsBuilder(builder.subarrayStart(actions()));
- for (std::vector<string>::const_iterator it = _actions.begin();
- it != _actions.end();
- ++it) {
- actionsBuilder.append(*it);
- }
- actionsBuilder.doneFast();
- }
+void ParsedResource::setCluster(bool cluster) {
+ _cluster = cluster;
+ _isClusterSet = true;
+}
- return builder.obj().getOwned();
- }
+void ParsedResource::unsetCluster() {
+ _isClusterSet = false;
+}
- bool ParsedPrivilege::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
+bool ParsedResource::isClusterSet() const {
+ return _isClusterSet;
+}
- std::string dummy;
- if (!errMsg) errMsg = &dummy;
+bool ParsedResource::getCluster() const {
+ dassert(_isClusterSet);
+ return _cluster;
+}
- FieldParser::FieldState fieldState;
- fieldState = FieldParser::extract(source, actions, &_actions, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isActionsSet = fieldState == FieldParser::FIELD_SET;
+void ParsedResource::setDb(StringData db) {
+ _db = db.toString();
+ _isDbSet = true;
+}
- fieldState = FieldParser::extract(source, resource, &_resource, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isResourceSet = fieldState == FieldParser::FIELD_SET;
+void ParsedResource::unsetDb() {
+ _isDbSet = false;
+}
- return true;
- }
+bool ParsedResource::isDbSet() const {
+ return _isDbSet;
+}
- void ParsedPrivilege::clear() {
- _actions.clear();
- _isActionsSet = false;
- _resource.clear();
- _isResourceSet = false;
+const std::string& ParsedResource::getDb() const {
+ dassert(_isDbSet);
+ return _db;
+}
- }
+void ParsedResource::setCollection(StringData collection) {
+ _collection = collection.toString();
+ _isCollectionSet = true;
+}
- std::string ParsedPrivilege::toString() const {
- return toBSON().toString();
- }
+void ParsedResource::unsetCollection() {
+ _isCollectionSet = false;
+}
- void ParsedPrivilege::setActions(const std::vector<string>& actions) {
- for (std::vector<string>::const_iterator it = actions.begin();
- it != actions.end();
- ++it) {
- addToActions((*it));
- }
- _isActionsSet = actions.size() > 0;
- }
+bool ParsedResource::isCollectionSet() const {
+ return _isCollectionSet;
+}
- void ParsedPrivilege::addToActions(const string& actions) {
- _actions.push_back(actions);
- _isActionsSet = true;
- }
+const std::string& ParsedResource::getCollection() const {
+ dassert(_isCollectionSet);
+ return _collection;
+}
- void ParsedPrivilege::unsetActions() {
- _actions.clear();
- _isActionsSet = false;
- }
+const BSONField<std::vector<string>> ParsedPrivilege::actions("actions");
+const BSONField<ParsedResource> ParsedPrivilege::resource("resource");
- bool ParsedPrivilege::isActionsSet() const {
- return _isActionsSet;
- }
+ParsedPrivilege::ParsedPrivilege() {
+ clear();
+}
- size_t ParsedPrivilege::sizeActions() const {
- return _actions.size();
- }
+ParsedPrivilege::~ParsedPrivilege() {}
- const std::vector<string>& ParsedPrivilege::getActions() const {
- dassert(_isActionsSet);
- return _actions;
+bool ParsedPrivilege::isValid(std::string* errMsg) const {
+ std::string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
}
- const string& ParsedPrivilege::getActionsAt(size_t pos) const {
- dassert(_isActionsSet);
- dassert(_actions.size() > pos);
- return _actions.at(pos);
+ // All the mandatory fields must be present.
+ if (!_isActionsSet || !_actions.size()) {
+ *errMsg = stream() << "missing " << actions.name() << " field";
+ return false;
}
- void ParsedPrivilege::setResource(const ParsedResource& resource) {
- resource.cloneTo(&_resource);
- _isResourceSet = true;
+ if (!_isResourceSet) {
+ *errMsg = stream() << "missing " << resource.name() << " field";
+ return false;
}
- void ParsedPrivilege::unsetResource() {
- _isResourceSet = false;
- }
+ return getResource().isValid(errMsg);
+}
- bool ParsedPrivilege::isResourceSet() const {
- return _isResourceSet;
- }
+BSONObj ParsedPrivilege::toBSON() const {
+ BSONObjBuilder builder;
- const ParsedResource& ParsedPrivilege::getResource() const {
- dassert(_isResourceSet);
- return _resource;
- }
-
- bool ParsedPrivilege::parsedPrivilegeToPrivilege(const ParsedPrivilege& parsedPrivilege,
- Privilege* result,
- std::string* errmsg) {
- if (!parsedPrivilege.isValid(errmsg)) {
- return false;
- }
+ if (_isResourceSet)
+ builder.append(resource(), _resource.toBSON());
- // Build actions
- ActionSet actions;
- const vector<std::string>& parsedActions = parsedPrivilege.getActions();
- Status status = ActionSet::parseActionSetFromStringVector(parsedActions, &actions);
- if (!status.isOK()) {
- *errmsg = status.reason();
- return false;
+ if (_isActionsSet) {
+ BSONArrayBuilder actionsBuilder(builder.subarrayStart(actions()));
+ for (std::vector<string>::const_iterator it = _actions.begin(); it != _actions.end();
+ ++it) {
+ actionsBuilder.append(*it);
}
-
- // Build resource
- ResourcePattern resource;
- const ParsedResource& parsedResource = parsedPrivilege.getResource();
- if (parsedResource.isAnyResourceSet() && parsedResource.getAnyResource()) {
- resource = ResourcePattern::forAnyResource();
- } else if (parsedResource.isClusterSet() && parsedResource.getCluster()) {
- resource = ResourcePattern::forClusterResource();
+ actionsBuilder.doneFast();
+ }
+
+ return builder.obj().getOwned();
+}
+
+bool ParsedPrivilege::parseBSON(const BSONObj& source, string* errMsg) {
+ clear();
+
+ std::string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
+
+ FieldParser::FieldState fieldState;
+ fieldState = FieldParser::extract(source, actions, &_actions, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isActionsSet = fieldState == FieldParser::FIELD_SET;
+
+ fieldState = FieldParser::extract(source, resource, &_resource, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isResourceSet = fieldState == FieldParser::FIELD_SET;
+
+ return true;
+}
+
+void ParsedPrivilege::clear() {
+ _actions.clear();
+ _isActionsSet = false;
+ _resource.clear();
+ _isResourceSet = false;
+}
+
+std::string ParsedPrivilege::toString() const {
+ return toBSON().toString();
+}
+
+void ParsedPrivilege::setActions(const std::vector<string>& actions) {
+ for (std::vector<string>::const_iterator it = actions.begin(); it != actions.end(); ++it) {
+ addToActions((*it));
+ }
+ _isActionsSet = actions.size() > 0;
+}
+
+void ParsedPrivilege::addToActions(const string& actions) {
+ _actions.push_back(actions);
+ _isActionsSet = true;
+}
+
+void ParsedPrivilege::unsetActions() {
+ _actions.clear();
+ _isActionsSet = false;
+}
+
+bool ParsedPrivilege::isActionsSet() const {
+ return _isActionsSet;
+}
+
+size_t ParsedPrivilege::sizeActions() const {
+ return _actions.size();
+}
+
+const std::vector<string>& ParsedPrivilege::getActions() const {
+ dassert(_isActionsSet);
+ return _actions;
+}
+
+const string& ParsedPrivilege::getActionsAt(size_t pos) const {
+ dassert(_isActionsSet);
+ dassert(_actions.size() > pos);
+ return _actions.at(pos);
+}
+
+void ParsedPrivilege::setResource(const ParsedResource& resource) {
+ resource.cloneTo(&_resource);
+ _isResourceSet = true;
+}
+
+void ParsedPrivilege::unsetResource() {
+ _isResourceSet = false;
+}
+
+bool ParsedPrivilege::isResourceSet() const {
+ return _isResourceSet;
+}
+
+const ParsedResource& ParsedPrivilege::getResource() const {
+ dassert(_isResourceSet);
+ return _resource;
+}
+
+bool ParsedPrivilege::parsedPrivilegeToPrivilege(const ParsedPrivilege& parsedPrivilege,
+ Privilege* result,
+ std::string* errmsg) {
+ if (!parsedPrivilege.isValid(errmsg)) {
+ return false;
+ }
+
+ // Build actions
+ ActionSet actions;
+ const vector<std::string>& parsedActions = parsedPrivilege.getActions();
+ Status status = ActionSet::parseActionSetFromStringVector(parsedActions, &actions);
+ if (!status.isOK()) {
+ *errmsg = status.reason();
+ return false;
+ }
+
+ // Build resource
+ ResourcePattern resource;
+ const ParsedResource& parsedResource = parsedPrivilege.getResource();
+ if (parsedResource.isAnyResourceSet() && parsedResource.getAnyResource()) {
+ resource = ResourcePattern::forAnyResource();
+ } else if (parsedResource.isClusterSet() && parsedResource.getCluster()) {
+ resource = ResourcePattern::forClusterResource();
+ } else {
+ if (parsedResource.isDbSet() && !parsedResource.getDb().empty()) {
+ if (parsedResource.isCollectionSet() && !parsedResource.getCollection().empty()) {
+ resource = ResourcePattern::forExactNamespace(
+ NamespaceString(parsedResource.getDb(), parsedResource.getCollection()));
+ } else {
+ resource = ResourcePattern::forDatabaseName(parsedResource.getDb());
+ }
} else {
- if (parsedResource.isDbSet() && !parsedResource.getDb().empty()) {
- if (parsedResource.isCollectionSet() && !parsedResource.getCollection().empty()) {
- resource = ResourcePattern::forExactNamespace(
- NamespaceString(parsedResource.getDb(),
- parsedResource.getCollection()));
- } else {
- resource = ResourcePattern::forDatabaseName(parsedResource.getDb());
- }
+ if (parsedResource.isCollectionSet() && !parsedResource.getCollection().empty()) {
+ resource = ResourcePattern::forCollectionName(parsedResource.getCollection());
} else {
- if (parsedResource.isCollectionSet() && !parsedResource.getCollection().empty()) {
- resource = ResourcePattern::forCollectionName(parsedResource.getCollection());
- } else {
- resource = ResourcePattern::forAnyNormalResource();
- }
+ resource = ResourcePattern::forAnyNormalResource();
}
}
-
- *result = Privilege(resource, actions);
- return true;
}
- bool ParsedPrivilege::privilegeToParsedPrivilege(const Privilege& privilege,
- ParsedPrivilege* result,
- std::string* errmsg) {
- ParsedResource parsedResource;
- if (privilege.getResourcePattern().isExactNamespacePattern()) {
- parsedResource.setDb(privilege.getResourcePattern().databaseToMatch());
- parsedResource.setCollection(privilege.getResourcePattern().collectionToMatch());
- } else if (privilege.getResourcePattern().isDatabasePattern()) {
- parsedResource.setDb(privilege.getResourcePattern().databaseToMatch());
- parsedResource.setCollection("");
- } else if (privilege.getResourcePattern().isCollectionPattern()) {
- parsedResource.setDb("");
- parsedResource.setCollection(privilege.getResourcePattern().collectionToMatch());
- } else if (privilege.getResourcePattern().isAnyNormalResourcePattern()) {
- parsedResource.setDb("");
- parsedResource.setCollection("");
- } else if (privilege.getResourcePattern().isClusterResourcePattern()) {
- parsedResource.setCluster(true);
- } else if (privilege.getResourcePattern().isAnyResourcePattern()) {
- parsedResource.setAnyResource(true);
- } else {
- *errmsg = stream() << privilege.getResourcePattern().toString() <<
- " is not a valid user-grantable resource pattern";
- return false;
- }
-
- result->clear();
- result->setResource(parsedResource);
- result->setActions(privilege.getActions().getActionsAsStrings());
- return result->isValid(errmsg);
- }
-} // namespace mongo
+ *result = Privilege(resource, actions);
+ return true;
+}
+
+bool ParsedPrivilege::privilegeToParsedPrivilege(const Privilege& privilege,
+ ParsedPrivilege* result,
+ std::string* errmsg) {
+ ParsedResource parsedResource;
+ if (privilege.getResourcePattern().isExactNamespacePattern()) {
+ parsedResource.setDb(privilege.getResourcePattern().databaseToMatch());
+ parsedResource.setCollection(privilege.getResourcePattern().collectionToMatch());
+ } else if (privilege.getResourcePattern().isDatabasePattern()) {
+ parsedResource.setDb(privilege.getResourcePattern().databaseToMatch());
+ parsedResource.setCollection("");
+ } else if (privilege.getResourcePattern().isCollectionPattern()) {
+ parsedResource.setDb("");
+ parsedResource.setCollection(privilege.getResourcePattern().collectionToMatch());
+ } else if (privilege.getResourcePattern().isAnyNormalResourcePattern()) {
+ parsedResource.setDb("");
+ parsedResource.setCollection("");
+ } else if (privilege.getResourcePattern().isClusterResourcePattern()) {
+ parsedResource.setCluster(true);
+ } else if (privilege.getResourcePattern().isAnyResourcePattern()) {
+ parsedResource.setAnyResource(true);
+ } else {
+ *errmsg = stream() << privilege.getResourcePattern().toString()
+ << " is not a valid user-grantable resource pattern";
+ return false;
+ }
+
+ result->clear();
+ result->setResource(parsedResource);
+ result->setActions(privilege.getActions().getActionsAsStrings());
+ return result->isValid(errmsg);
+}
+} // namespace mongo
diff --git a/src/mongo/db/auth/privilege_parser.h b/src/mongo/db/auth/privilege_parser.h
index aec390e3973..a48297c862f 100644
--- a/src/mongo/db/auth/privilege_parser.h
+++ b/src/mongo/db/auth/privilege_parser.h
@@ -37,160 +37,160 @@
namespace mongo {
- class Privilege;
+class Privilege;
+
+/**
+ * This class is used to parse documents describing resources as they are represented as part
+ * of privileges granted to roles in the role management commands.
+ */
+class ParsedResource : BSONSerializable {
+ MONGO_DISALLOW_COPYING(ParsedResource);
+
+public:
+ //
+ // schema declarations
+ //
+
+ static const BSONField<bool> anyResource;
+ static const BSONField<bool> cluster;
+ static const BSONField<std::string> db;
+ static const BSONField<std::string> collection;
+
+ //
+ // construction / destruction
+ //
+
+ ParsedResource();
+ ~ParsedResource();
+
+ /** Copies all the fields present in 'this' to 'other'. */
+ void cloneTo(ParsedResource* other) const;
+
+ //
+ // bson serializable interface implementation
+ //
+
+ bool isValid(std::string* errMsg) const;
+ BSONObj toBSON() const;
+ bool parseBSON(const BSONObj& source, std::string* errMsg);
+ void clear();
+ virtual std::string toString() const;
+
+ //
+ // individual field accessors
+ //
+
+ void setAnyResource(bool anyResource);
+ void unsetAnyResource();
+ bool isAnyResourceSet() const;
+ bool getAnyResource() const;
+
+ void setCluster(bool cluster);
+ void unsetCluster();
+ bool isClusterSet() const;
+ bool getCluster() const;
+
+ void setDb(StringData db);
+ void unsetDb();
+ bool isDbSet() const;
+ const std::string& getDb() const;
+
+ void setCollection(StringData collection);
+ void unsetCollection();
+ bool isCollectionSet() const;
+ const std::string& getCollection() const;
+
+private:
+ // Convention: (M)andatory, (O)ptional
+
+ // (O) Only present if the resource matches anything.
+ bool _anyResource;
+ bool _isAnyResourceSet;
+
+ // (O) Only present if the resource is the cluster
+ bool _cluster;
+ bool _isClusterSet;
+
+ // (O) database portion of the resource
+ std::string _db;
+ bool _isDbSet;
+
+ // (O) collection portion of the resource
+ std::string _collection;
+ bool _isCollectionSet;
+};
+
+/**
+ * This class is used to parse documents describing privileges in the role managment commands.
+ */
+class ParsedPrivilege : BSONSerializable {
+ MONGO_DISALLOW_COPYING(ParsedPrivilege);
+
+public:
+ //
+ // schema declarations
+ //
+
+ static const BSONField<std::vector<std::string>> actions;
+ static const BSONField<ParsedResource> resource;
+
+ //
+ // construction / destruction
+ //
+
+ ParsedPrivilege();
+ ~ParsedPrivilege();
/**
- * This class is used to parse documents describing resources as they are represented as part
- * of privileges granted to roles in the role management commands.
+ * Takes a parsedPrivilege and turns it into a true Privilege object.
*/
- class ParsedResource : BSONSerializable {
- MONGO_DISALLOW_COPYING(ParsedResource);
- public:
-
- //
- // schema declarations
- //
-
- static const BSONField<bool> anyResource;
- static const BSONField<bool> cluster;
- static const BSONField<std::string> db;
- static const BSONField<std::string> collection;
-
- //
- // construction / destruction
- //
-
- ParsedResource();
- ~ParsedResource();
-
- /** Copies all the fields present in 'this' to 'other'. */
- void cloneTo(ParsedResource* other) const;
-
- //
- // bson serializable interface implementation
- //
-
- bool isValid(std::string* errMsg) const;
- BSONObj toBSON() const;
- bool parseBSON(const BSONObj& source, std::string* errMsg);
- void clear();
- virtual std::string toString() const;
-
- //
- // individual field accessors
- //
-
- void setAnyResource(bool anyResource);
- void unsetAnyResource();
- bool isAnyResourceSet() const;
- bool getAnyResource() const;
-
- void setCluster(bool cluster);
- void unsetCluster();
- bool isClusterSet() const;
- bool getCluster() const;
-
- void setDb(StringData db);
- void unsetDb();
- bool isDbSet() const;
- const std::string& getDb() const;
-
- void setCollection(StringData collection);
- void unsetCollection();
- bool isCollectionSet() const;
- const std::string& getCollection() const;
-
- private:
- // Convention: (M)andatory, (O)ptional
-
- // (O) Only present if the resource matches anything.
- bool _anyResource;
- bool _isAnyResourceSet;
-
- // (O) Only present if the resource is the cluster
- bool _cluster;
- bool _isClusterSet;
-
- // (O) database portion of the resource
- std::string _db;
- bool _isDbSet;
-
- // (O) collection portion of the resource
- std::string _collection;
- bool _isCollectionSet;
- };
-
+ static bool parsedPrivilegeToPrivilege(const ParsedPrivilege& parsedPrivilege,
+ Privilege* result,
+ std::string* errmsg);
/**
- * This class is used to parse documents describing privileges in the role managment commands.
+ * Takes a Privilege object and turns it into a ParsedPrivilege.
*/
- class ParsedPrivilege : BSONSerializable {
- MONGO_DISALLOW_COPYING(ParsedPrivilege);
- public:
-
- //
- // schema declarations
- //
-
- static const BSONField<std::vector<std::string> > actions;
- static const BSONField<ParsedResource> resource;
-
- //
- // construction / destruction
- //
-
- ParsedPrivilege();
- ~ParsedPrivilege();
-
- /**
- * Takes a parsedPrivilege and turns it into a true Privilege object.
- */
- static bool parsedPrivilegeToPrivilege(const ParsedPrivilege& parsedPrivilege,
- Privilege* result,
- std::string* errmsg);
- /**
- * Takes a Privilege object and turns it into a ParsedPrivilege.
- */
- static bool privilegeToParsedPrivilege(const Privilege& privilege,
- ParsedPrivilege* result,
- std::string* errmsg);
-
- //
- // bson serializable interface implementation
- //
-
- bool isValid(std::string* errMsg) const;
- BSONObj toBSON() const;
- bool parseBSON(const BSONObj& source, std::string* errMsg);
- void clear();
- std::string toString() const;
-
- //
- // individual field accessors
- //
-
- void setActions(const std::vector<std::string>& actions);
- void addToActions(const std::string& actions);
- void unsetActions();
- bool isActionsSet() const;
- size_t sizeActions() const;
- const std::vector<std::string>& getActions() const;
- const std::string& getActionsAt(size_t pos) const;
-
- void setResource(const ParsedResource& resource);
- void unsetResource();
- bool isResourceSet() const;
- const ParsedResource& getResource() const;
-
- private:
- // Convention: (M)andatory, (O)ptional
-
- // (M) Array of action types
- std::vector<std::string> _actions;
- bool _isActionsSet;
-
- // (M) Object describing the resource pattern of this privilege
- ParsedResource _resource;
- bool _isResourceSet;
- };
-
-} // namespace mongo
+ static bool privilegeToParsedPrivilege(const Privilege& privilege,
+ ParsedPrivilege* result,
+ std::string* errmsg);
+
+ //
+ // bson serializable interface implementation
+ //
+
+ bool isValid(std::string* errMsg) const;
+ BSONObj toBSON() const;
+ bool parseBSON(const BSONObj& source, std::string* errMsg);
+ void clear();
+ std::string toString() const;
+
+ //
+ // individual field accessors
+ //
+
+ void setActions(const std::vector<std::string>& actions);
+ void addToActions(const std::string& actions);
+ void unsetActions();
+ bool isActionsSet() const;
+ size_t sizeActions() const;
+ const std::vector<std::string>& getActions() const;
+ const std::string& getActionsAt(size_t pos) const;
+
+ void setResource(const ParsedResource& resource);
+ void unsetResource();
+ bool isResourceSet() const;
+ const ParsedResource& getResource() const;
+
+private:
+ // Convention: (M)andatory, (O)ptional
+
+ // (M) Array of action types
+ std::vector<std::string> _actions;
+ bool _isActionsSet;
+
+ // (M) Object describing the resource pattern of this privilege
+ ParsedResource _resource;
+ bool _isResourceSet;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/auth/privilege_parser_test.cpp b/src/mongo/db/auth/privilege_parser_test.cpp
index 798a197ae91..8885922b1dd 100644
--- a/src/mongo/db/auth/privilege_parser_test.cpp
+++ b/src/mongo/db/auth/privilege_parser_test.cpp
@@ -38,173 +38,180 @@
namespace mongo {
namespace {
- TEST(PrivilegeParserTest, IsValidTest) {
- ParsedPrivilege parsedPrivilege;
- std::string errmsg;
-
- // must have resource
- parsedPrivilege.parseBSON(BSON("actions" << BSON_ARRAY("find")), &errmsg);
- ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
-
- // must have actions
- parsedPrivilege.parseBSON(BSON("resource" << BSON("cluster" << true)), &errmsg);
- ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
-
- // resource can't have cluster as well as db or collection
- parsedPrivilege.parseBSON(BSON("resource" << BSON("cluster" << true <<
- "db" << "" <<
- "collection" << "") <<
- "actions" << BSON_ARRAY("find")),
- &errmsg);
- ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
-
- // resource can't have db without collection
- parsedPrivilege.parseBSON(BSON("resource" << BSON("db" << "") <<
- "actions" << BSON_ARRAY("find")),
- &errmsg);
- ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
-
- // resource can't have collection without db
- parsedPrivilege.parseBSON(BSON("resource" << BSON("collection" << "") <<
- "actions" << BSON_ARRAY("find")),
- &errmsg);
- ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
-
- // Works with wildcard db and resource
- parsedPrivilege.parseBSON(BSON("resource" << BSON("db" << "" << "collection" << "") <<
- "actions" << BSON_ARRAY("find")),
- &errmsg);
- ASSERT(parsedPrivilege.isValid(&errmsg));
-
- // Works with real db and collection
- parsedPrivilege.parseBSON(BSON("resource" << BSON("db" << "test" <<
- "collection" << "foo") <<
- "actions" << BSON_ARRAY("find")),
- &errmsg);
- ASSERT(parsedPrivilege.isValid(&errmsg));
-
- // Works with cluster resource
- parsedPrivilege.parseBSON(BSON("resource" << BSON("cluster" << true) <<
- "actions" << BSON_ARRAY("find")),
- &errmsg);
- ASSERT(parsedPrivilege.isValid(&errmsg));
- }
-
- TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
- ParsedPrivilege parsedPrivilege;
- Privilege privilege;
- std::string errmsg;
- std::vector<std::string> actionsVector;
- actionsVector.push_back("find");
-
- // Works with wildcard db and resource
- parsedPrivilege.parseBSON(BSON("resource" << BSON("db" << "" << "collection" << "") <<
- "actions" << BSON_ARRAY("find")),
- &errmsg);
- ASSERT(parsedPrivilege.isValid(&errmsg));
- ASSERT(ParsedPrivilege::parsedPrivilegeToPrivilege(parsedPrivilege, &privilege, &errmsg));
- ASSERT(privilege.getActions().contains(ActionType::find));
- ASSERT(!privilege.getActions().contains(ActionType::insert));
- ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forAnyNormalResource());
-
- ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg));
- ASSERT(parsedPrivilege.isValid(&errmsg));
- ASSERT(parsedPrivilege.isResourceSet());
- ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet());
- ASSERT(parsedPrivilege.getResource().isDbSet());
- ASSERT(parsedPrivilege.getResource().isCollectionSet());
- ASSERT_EQUALS("", parsedPrivilege.getResource().getDb());
- ASSERT_EQUALS("", parsedPrivilege.getResource().getCollection());
- ASSERT(parsedPrivilege.isActionsSet());
- ASSERT(actionsVector == parsedPrivilege.getActions());
-
- // Works with exact namespaces
- parsedPrivilege.parseBSON(BSON("resource" << BSON("db" << "test" <<
- "collection" << "foo") <<
- "actions" << BSON_ARRAY("find")),
- &errmsg);
- ASSERT(parsedPrivilege.isValid(&errmsg));
- ASSERT(ParsedPrivilege::parsedPrivilegeToPrivilege(parsedPrivilege, &privilege, &errmsg));
- ASSERT(privilege.getActions().contains(ActionType::find));
- ASSERT(!privilege.getActions().contains(ActionType::insert));
- ASSERT_EQUALS(privilege.getResourcePattern(),
- ResourcePattern::forExactNamespace(NamespaceString("test.foo")));
-
- ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg));
- ASSERT(parsedPrivilege.isValid(&errmsg));
- ASSERT(parsedPrivilege.isResourceSet());
- ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet());
- ASSERT(parsedPrivilege.getResource().isDbSet());
- ASSERT(parsedPrivilege.getResource().isCollectionSet());
- ASSERT_EQUALS("test", parsedPrivilege.getResource().getDb());
- ASSERT_EQUALS("foo", parsedPrivilege.getResource().getCollection());
- ASSERT(parsedPrivilege.isActionsSet());
- ASSERT(actionsVector == parsedPrivilege.getActions());
-
- // Works with database resource
- parsedPrivilege.parseBSON(BSON("resource" << BSON("db" << "test" <<
- "collection" << "") <<
- "actions" << BSON_ARRAY("find")),
- &errmsg);
- ASSERT(parsedPrivilege.isValid(&errmsg));
- ASSERT(ParsedPrivilege::parsedPrivilegeToPrivilege(parsedPrivilege, &privilege, &errmsg));
- ASSERT(privilege.getActions().contains(ActionType::find));
- ASSERT(!privilege.getActions().contains(ActionType::insert));
- ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forDatabaseName("test"));
-
- ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg));
- ASSERT(parsedPrivilege.isValid(&errmsg));
- ASSERT(parsedPrivilege.isResourceSet());
- ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet());
- ASSERT(parsedPrivilege.getResource().isDbSet());
- ASSERT(parsedPrivilege.getResource().isCollectionSet());
- ASSERT_EQUALS("test", parsedPrivilege.getResource().getDb());
- ASSERT_EQUALS("", parsedPrivilege.getResource().getCollection());
- ASSERT(parsedPrivilege.isActionsSet());
- ASSERT(actionsVector == parsedPrivilege.getActions());
-
- // Works with collection resource
- parsedPrivilege.parseBSON(BSON("resource" << BSON("db" << "" <<
- "collection" << "foo") <<
- "actions" << BSON_ARRAY("find")),
- &errmsg);
- ASSERT(parsedPrivilege.isValid(&errmsg));
- ASSERT(ParsedPrivilege::parsedPrivilegeToPrivilege(parsedPrivilege, &privilege, &errmsg));
- ASSERT(privilege.getActions().contains(ActionType::find));
- ASSERT(!privilege.getActions().contains(ActionType::insert));
- ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forCollectionName("foo"));
-
- ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg));
- ASSERT(parsedPrivilege.isValid(&errmsg));
- ASSERT(parsedPrivilege.isResourceSet());
- ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet());
- ASSERT(parsedPrivilege.getResource().isDbSet());
- ASSERT(parsedPrivilege.getResource().isCollectionSet());
- ASSERT_EQUALS("", parsedPrivilege.getResource().getDb());
- ASSERT_EQUALS("foo", parsedPrivilege.getResource().getCollection());
- ASSERT(parsedPrivilege.isActionsSet());
- ASSERT(actionsVector == parsedPrivilege.getActions());
-
- // Works with cluster resource
- parsedPrivilege.parseBSON(BSON("resource" << BSON("cluster" << true) <<
- "actions" << BSON_ARRAY("find")),
- &errmsg);
- ASSERT(parsedPrivilege.isValid(&errmsg));
- ASSERT(ParsedPrivilege::parsedPrivilegeToPrivilege(parsedPrivilege, &privilege, &errmsg));
- ASSERT(privilege.getActions().contains(ActionType::find));
- ASSERT(!privilege.getActions().contains(ActionType::insert));
- ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forClusterResource());
-
- ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg));
- ASSERT(parsedPrivilege.isValid(&errmsg));
- ASSERT(parsedPrivilege.isResourceSet());
- ASSERT(parsedPrivilege.getResource().isClusterSet());
- ASSERT(parsedPrivilege.getResource().getCluster());
- ASSERT_FALSE(parsedPrivilege.getResource().isDbSet());
- ASSERT_FALSE(parsedPrivilege.getResource().isCollectionSet());
- ASSERT(parsedPrivilege.isActionsSet());
- ASSERT(actionsVector == parsedPrivilege.getActions());
- }
+TEST(PrivilegeParserTest, IsValidTest) {
+ ParsedPrivilege parsedPrivilege;
+ std::string errmsg;
+
+ // must have resource
+ parsedPrivilege.parseBSON(BSON("actions" << BSON_ARRAY("find")), &errmsg);
+ ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
+
+ // must have actions
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("cluster" << true)), &errmsg);
+ ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
+
+ // resource can't have cluster as well as db or collection
+ parsedPrivilege.parseBSON(
+ BSON("resource" << BSON("cluster" << true << "db"
+ << ""
+ << "collection"
+ << "") << "actions" << BSON_ARRAY("find")),
+ &errmsg);
+ ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
+
+ // resource can't have db without collection
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
+ << "") << "actions" << BSON_ARRAY("find")),
+ &errmsg);
+ ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
+
+ // resource can't have collection without db
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("collection"
+ << "") << "actions" << BSON_ARRAY("find")),
+ &errmsg);
+ ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
+
+ // Works with wildcard db and resource
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
+ << ""
+ << "collection"
+ << "") << "actions" << BSON_ARRAY("find")),
+ &errmsg);
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+
+ // Works with real db and collection
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
+ << "test"
+ << "collection"
+ << "foo") << "actions" << BSON_ARRAY("find")),
+ &errmsg);
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+
+ // Works with cluster resource
+ parsedPrivilege.parseBSON(
+ BSON("resource" << BSON("cluster" << true) << "actions" << BSON_ARRAY("find")), &errmsg);
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+}
+
+TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
+ ParsedPrivilege parsedPrivilege;
+ Privilege privilege;
+ std::string errmsg;
+ std::vector<std::string> actionsVector;
+ actionsVector.push_back("find");
+
+ // Works with wildcard db and resource
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
+ << ""
+ << "collection"
+ << "") << "actions" << BSON_ARRAY("find")),
+ &errmsg);
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+ ASSERT(ParsedPrivilege::parsedPrivilegeToPrivilege(parsedPrivilege, &privilege, &errmsg));
+ ASSERT(privilege.getActions().contains(ActionType::find));
+ ASSERT(!privilege.getActions().contains(ActionType::insert));
+ ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forAnyNormalResource());
+
+ ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg));
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+ ASSERT(parsedPrivilege.isResourceSet());
+ ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet());
+ ASSERT(parsedPrivilege.getResource().isDbSet());
+ ASSERT(parsedPrivilege.getResource().isCollectionSet());
+ ASSERT_EQUALS("", parsedPrivilege.getResource().getDb());
+ ASSERT_EQUALS("", parsedPrivilege.getResource().getCollection());
+ ASSERT(parsedPrivilege.isActionsSet());
+ ASSERT(actionsVector == parsedPrivilege.getActions());
+
+ // Works with exact namespaces
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
+ << "test"
+ << "collection"
+ << "foo") << "actions" << BSON_ARRAY("find")),
+ &errmsg);
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+ ASSERT(ParsedPrivilege::parsedPrivilegeToPrivilege(parsedPrivilege, &privilege, &errmsg));
+ ASSERT(privilege.getActions().contains(ActionType::find));
+ ASSERT(!privilege.getActions().contains(ActionType::insert));
+ ASSERT_EQUALS(privilege.getResourcePattern(),
+ ResourcePattern::forExactNamespace(NamespaceString("test.foo")));
+
+ ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg));
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+ ASSERT(parsedPrivilege.isResourceSet());
+ ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet());
+ ASSERT(parsedPrivilege.getResource().isDbSet());
+ ASSERT(parsedPrivilege.getResource().isCollectionSet());
+ ASSERT_EQUALS("test", parsedPrivilege.getResource().getDb());
+ ASSERT_EQUALS("foo", parsedPrivilege.getResource().getCollection());
+ ASSERT(parsedPrivilege.isActionsSet());
+ ASSERT(actionsVector == parsedPrivilege.getActions());
+
+ // Works with database resource
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
+ << "test"
+ << "collection"
+ << "") << "actions" << BSON_ARRAY("find")),
+ &errmsg);
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+ ASSERT(ParsedPrivilege::parsedPrivilegeToPrivilege(parsedPrivilege, &privilege, &errmsg));
+ ASSERT(privilege.getActions().contains(ActionType::find));
+ ASSERT(!privilege.getActions().contains(ActionType::insert));
+ ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forDatabaseName("test"));
+
+ ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg));
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+ ASSERT(parsedPrivilege.isResourceSet());
+ ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet());
+ ASSERT(parsedPrivilege.getResource().isDbSet());
+ ASSERT(parsedPrivilege.getResource().isCollectionSet());
+ ASSERT_EQUALS("test", parsedPrivilege.getResource().getDb());
+ ASSERT_EQUALS("", parsedPrivilege.getResource().getCollection());
+ ASSERT(parsedPrivilege.isActionsSet());
+ ASSERT(actionsVector == parsedPrivilege.getActions());
+
+ // Works with collection resource
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
+ << ""
+ << "collection"
+ << "foo") << "actions" << BSON_ARRAY("find")),
+ &errmsg);
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+ ASSERT(ParsedPrivilege::parsedPrivilegeToPrivilege(parsedPrivilege, &privilege, &errmsg));
+ ASSERT(privilege.getActions().contains(ActionType::find));
+ ASSERT(!privilege.getActions().contains(ActionType::insert));
+ ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forCollectionName("foo"));
+
+ ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg));
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+ ASSERT(parsedPrivilege.isResourceSet());
+ ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet());
+ ASSERT(parsedPrivilege.getResource().isDbSet());
+ ASSERT(parsedPrivilege.getResource().isCollectionSet());
+ ASSERT_EQUALS("", parsedPrivilege.getResource().getDb());
+ ASSERT_EQUALS("foo", parsedPrivilege.getResource().getCollection());
+ ASSERT(parsedPrivilege.isActionsSet());
+ ASSERT(actionsVector == parsedPrivilege.getActions());
+
+ // Works with cluster resource
+ parsedPrivilege.parseBSON(
+ BSON("resource" << BSON("cluster" << true) << "actions" << BSON_ARRAY("find")), &errmsg);
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+ ASSERT(ParsedPrivilege::parsedPrivilegeToPrivilege(parsedPrivilege, &privilege, &errmsg));
+ ASSERT(privilege.getActions().contains(ActionType::find));
+ ASSERT(!privilege.getActions().contains(ActionType::insert));
+ ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forClusterResource());
+
+ ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg));
+ ASSERT(parsedPrivilege.isValid(&errmsg));
+ ASSERT(parsedPrivilege.isResourceSet());
+ ASSERT(parsedPrivilege.getResource().isClusterSet());
+ ASSERT(parsedPrivilege.getResource().getCluster());
+ ASSERT_FALSE(parsedPrivilege.getResource().isDbSet());
+ ASSERT_FALSE(parsedPrivilege.getResource().isCollectionSet());
+ ASSERT(parsedPrivilege.isActionsSet());
+ ASSERT(actionsVector == parsedPrivilege.getActions());
+}
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/auth/resource_pattern.cpp b/src/mongo/db/auth/resource_pattern.cpp
index 6bf23368327..fe62ced9ca9 100644
--- a/src/mongo/db/auth/resource_pattern.cpp
+++ b/src/mongo/db/auth/resource_pattern.cpp
@@ -38,8 +38,8 @@
namespace mongo {
- std::string ResourcePattern::toString() const {
- switch (_matchType) {
+std::string ResourcePattern::toString() const {
+ switch (_matchType) {
case matchNever:
return "<no resources>";
case matchClusterResource:
@@ -56,11 +56,11 @@ namespace mongo {
return "<all resources>";
default:
return "<unknown resource pattern type>";
- }
}
+}
- std::ostream& operator<<(std::ostream& os, const ResourcePattern& pattern) {
- return os << pattern.toString();
- }
+std::ostream& operator<<(std::ostream& os, const ResourcePattern& pattern) {
+ return os << pattern.toString();
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/resource_pattern.h b/src/mongo/db/auth/resource_pattern.h
index c50876b74c5..2896c0aeb24 100644
--- a/src/mongo/db/auth/resource_pattern.h
+++ b/src/mongo/db/auth/resource_pattern.h
@@ -37,174 +37,181 @@
namespace mongo {
+/**
+ * Representation of names of various kinds of resources targetable by the access control
+ * system.
+ *
+ * Three of the types of name, "forDatabaseName", "forExactNamespace" and "forClusterResource",
+ * can represent concrete resources targeted for manipulation by database operations. All of
+ * the types also act as patterns, useful for matching against groups of concrete resources as
+ * part of the access control system. See buildResourceSearchList() in
+ * authorization_session.cpp for details.
+ */
+class ResourcePattern {
+public:
+ /**
+ * Returns a pattern that matches absolutely any resource.
+ */
+ static ResourcePattern forAnyResource() {
+ return ResourcePattern(matchAnyResource);
+ }
+
+ /**
+ * Returns a pattern that matches any database or collection resource except collections for
+ * which ns.isSystem().
+ */
+ static ResourcePattern forAnyNormalResource() {
+ return ResourcePattern(matchAnyNormalResource);
+ }
+
+ /**
+ * Returns a pattern that matches the "cluster" resource.
+ */
+ static ResourcePattern forClusterResource() {
+ return ResourcePattern(matchClusterResource);
+ }
+
+ /**
+ * Returns a pattern that matches the named database, and NamespaceStrings
+ * "ns" for which ns.isSystem() is false and ns.db() == dbname.
+ */
+ static ResourcePattern forDatabaseName(StringData dbName) {
+ return ResourcePattern(matchDatabaseName, NamespaceString(dbName, ""));
+ }
+
+ /**
+ * Returns a pattern that matches NamespaceStrings "ns" for which ns.coll() ==
+ * collectionName.
+ */
+ static ResourcePattern forCollectionName(StringData collectionName) {
+ return ResourcePattern(matchCollectionName, NamespaceString("", collectionName));
+ }
+
+ /**
+ * Returns a pattern that matches the given exact namespace string.
+ */
+ static ResourcePattern forExactNamespace(const NamespaceString& ns) {
+ return ResourcePattern(matchExactNamespace, ns);
+ }
+
+ /**
+ * Constructs a pattern that never matches.
+ */
+ ResourcePattern() : _matchType(matchNever) {}
+
+ /**
+ * Returns true if this pattern matches only exact namespaces.
+ */
+ bool isExactNamespacePattern() const {
+ return _matchType == matchExactNamespace;
+ }
+
/**
- * Representation of names of various kinds of resources targetable by the access control
- * system.
+ * Returns true if this pattern matches on the database name only.
+ */
+ bool isDatabasePattern() const {
+ return _matchType == matchDatabaseName;
+ }
+
+ /**
+ * Returns true if this pattern matches on the collection name only.
+ */
+ bool isCollectionPattern() const {
+ return _matchType == matchCollectionName;
+ }
+
+ /**
+ * Returns true if this pattern matches the cluster resource only.
+ */
+ bool isClusterResourcePattern() const {
+ return _matchType == matchClusterResource;
+ }
+
+ /**
+ * Returns true if this pattern matches only any normal resource.
+ */
+ bool isAnyNormalResourcePattern() const {
+ return _matchType == matchAnyNormalResource;
+ }
+
+ /**
+ * Returns true if this pattern matches any resource.
+ */
+ bool isAnyResourcePattern() const {
+ return _matchType == matchAnyResource;
+ }
+
+ /**
+ * Returns the namespace that this pattern matches.
*
- * Three of the types of name, "forDatabaseName", "forExactNamespace" and "forClusterResource",
- * can represent concrete resources targeted for manipulation by database operations. All of
- * the types also act as patterns, useful for matching against groups of concrete resources as
- * part of the access control system. See buildResourceSearchList() in
- * authorization_session.cpp for details.
- */
- class ResourcePattern {
- public:
- /**
- * Returns a pattern that matches absolutely any resource.
- */
- static ResourcePattern forAnyResource() {
- return ResourcePattern(matchAnyResource);
- }
-
- /**
- * Returns a pattern that matches any database or collection resource except collections for
- * which ns.isSystem().
- */
- static ResourcePattern forAnyNormalResource() {
- return ResourcePattern(matchAnyNormalResource);
- }
-
- /**
- * Returns a pattern that matches the "cluster" resource.
- */
- static ResourcePattern forClusterResource() {
- return ResourcePattern(matchClusterResource);
- }
-
- /**
- * Returns a pattern that matches the named database, and NamespaceStrings
- * "ns" for which ns.isSystem() is false and ns.db() == dbname.
- */
- static ResourcePattern forDatabaseName(StringData dbName) {
- return ResourcePattern(matchDatabaseName, NamespaceString(dbName, ""));
- }
-
- /**
- * Returns a pattern that matches NamespaceStrings "ns" for which ns.coll() ==
- * collectionName.
- */
- static ResourcePattern forCollectionName(StringData collectionName) {
- return ResourcePattern(matchCollectionName, NamespaceString("", collectionName));
- }
-
- /**
- * Returns a pattern that matches the given exact namespace string.
- */
- static ResourcePattern forExactNamespace(const NamespaceString& ns) {
- return ResourcePattern(matchExactNamespace, ns);
- }
-
- /**
- * Constructs a pattern that never matches.
- */
- ResourcePattern() : _matchType(matchNever) {}
-
- /**
- * Returns true if this pattern matches only exact namespaces.
- */
- bool isExactNamespacePattern() const {
- return _matchType == matchExactNamespace;
- }
-
- /**
- * Returns true if this pattern matches on the database name only.
- */
- bool isDatabasePattern() const {
- return _matchType == matchDatabaseName;
- }
-
- /**
- * Returns true if this pattern matches on the collection name only.
- */
- bool isCollectionPattern() const {
- return _matchType == matchCollectionName;
- }
-
- /**
- * Returns true if this pattern matches the cluster resource only.
- */
- bool isClusterResourcePattern() const {
- return _matchType == matchClusterResource;
- }
-
- /**
- * Returns true if this pattern matches only any normal resource.
- */
- bool isAnyNormalResourcePattern() const {
- return _matchType == matchAnyNormalResource;
- }
-
- /**
- * Returns true if this pattern matches any resource.
- */
- bool isAnyResourcePattern() const {
- return _matchType == matchAnyResource;
- }
-
- /**
- * Returns the namespace that this pattern matches.
- *
- * Behavior is undefined unless isExactNamespacePattern() is true.
- */
- const NamespaceString& ns() const { return _ns; }
-
- /**
- * Returns the database that this pattern matches.
- *
- * Behavior is undefined unless the pattern is of type matchDatabaseName or
- * matchExactNamespace
- */
- StringData databaseToMatch() const { return _ns.db(); }
-
- /**
- * Returns the collection that this pattern matches.
- *
- * Behavior is undefined unless the pattern is of type matchCollectionName or
- * matchExactNamespace
- */
- StringData collectionToMatch() const { return _ns.coll(); }
-
- std::string toString() const;
-
- inline size_t hash() const {
- // TODO: Choose a better hash function.
- return MONGO_HASH_NAMESPACE::hash<std::string>()(_ns.ns()) ^ _matchType;
- }
-
- bool operator==(const ResourcePattern& other) const {
- if (_matchType != other._matchType)
- return false;
- if (_ns != other._ns)
- return false;
- return true;
- }
-
- private:
- enum MatchType {
- matchNever = 0, /// Matches no resource.
- matchClusterResource = 1, /// Matches if the resource is the cluster resource.
- matchDatabaseName = 2, /// Matches if the resource's database name is _ns.db().
- matchCollectionName = 3, /// Matches if the resource's collection name is _ns.coll().
- matchExactNamespace = 4, /// Matches if the resource's namespace name is _ns.
- matchAnyNormalResource = 5, /// Matches all databases and non-system collections.
- matchAnyResource = 6 /// Matches absolutely anything.
- };
-
- explicit ResourcePattern(MatchType type) : _matchType(type) {}
- ResourcePattern(MatchType type, const NamespaceString& ns) : _matchType(type), _ns(ns) {}
-
- MatchType _matchType;
- NamespaceString _ns;
+ * Behavior is undefined unless isExactNamespacePattern() is true.
+ */
+ const NamespaceString& ns() const {
+ return _ns;
+ }
+
+ /**
+ * Returns the database that this pattern matches.
+ *
+ * Behavior is undefined unless the pattern is of type matchDatabaseName or
+ * matchExactNamespace
+ */
+ StringData databaseToMatch() const {
+ return _ns.db();
+ }
+
+ /**
+ * Returns the collection that this pattern matches.
+ *
+ * Behavior is undefined unless the pattern is of type matchCollectionName or
+ * matchExactNamespace
+ */
+ StringData collectionToMatch() const {
+ return _ns.coll();
+ }
+
+ std::string toString() const;
+
+ inline size_t hash() const {
+ // TODO: Choose a better hash function.
+ return MONGO_HASH_NAMESPACE::hash<std::string>()(_ns.ns()) ^ _matchType;
+ }
+
+ bool operator==(const ResourcePattern& other) const {
+ if (_matchType != other._matchType)
+ return false;
+ if (_ns != other._ns)
+ return false;
+ return true;
+ }
+
+private:
+ enum MatchType {
+ matchNever = 0, /// Matches no resource.
+ matchClusterResource = 1, /// Matches if the resource is the cluster resource.
+ matchDatabaseName = 2, /// Matches if the resource's database name is _ns.db().
+ matchCollectionName = 3, /// Matches if the resource's collection name is _ns.coll().
+ matchExactNamespace = 4, /// Matches if the resource's namespace name is _ns.
+ matchAnyNormalResource = 5, /// Matches all databases and non-system collections.
+ matchAnyResource = 6 /// Matches absolutely anything.
};
- std::ostream& operator<<(std::ostream& os, const ResourcePattern& pattern);
+ explicit ResourcePattern(MatchType type) : _matchType(type) {}
+ ResourcePattern(MatchType type, const NamespaceString& ns) : _matchType(type), _ns(ns) {}
+
+ MatchType _matchType;
+ NamespaceString _ns;
+};
+
+std::ostream& operator<<(std::ostream& os, const ResourcePattern& pattern);
} // namespace mongo
MONGO_HASH_NAMESPACE_START
- template <> struct hash<mongo::ResourcePattern> {
- size_t operator()(const mongo::ResourcePattern& resource) const {
- return resource.hash();
- }
- };
+template <>
+struct hash<mongo::ResourcePattern> {
+ size_t operator()(const mongo::ResourcePattern& resource) const {
+ return resource.hash();
+ }
+};
MONGO_HASH_NAMESPACE_END
diff --git a/src/mongo/db/auth/role_graph.cpp b/src/mongo/db/auth/role_graph.cpp
index 98ea177cc43..a0861b98236 100644
--- a/src/mongo/db/auth/role_graph.cpp
+++ b/src/mongo/db/auth/role_graph.cpp
@@ -40,522 +40,520 @@
namespace mongo {
namespace {
- PrivilegeVector emptyPrivilegeVector;
-} // namespace
-
- RoleGraph::RoleGraph() {};
- RoleGraph::RoleGraph(const RoleGraph& other) : _roleToSubordinates(other._roleToSubordinates),
- _roleToIndirectSubordinates(other._roleToIndirectSubordinates),
- _roleToMembers(other._roleToMembers),
- _directPrivilegesForRole(other._directPrivilegesForRole),
- _allPrivilegesForRole(other._allPrivilegesForRole),
- _allRoles(other._allRoles) {}
- RoleGraph::~RoleGraph() {};
-
- void RoleGraph::swap(RoleGraph& other) {
- using std::swap;
- swap(this->_roleToSubordinates, other._roleToSubordinates);
- swap(this->_roleToIndirectSubordinates, other._roleToIndirectSubordinates);
- swap(this->_roleToMembers, other._roleToMembers);
- swap(this->_directPrivilegesForRole, other._directPrivilegesForRole);
- swap(this->_allPrivilegesForRole, other._allPrivilegesForRole);
- swap(this->_allRoles, other._allRoles);
- }
-
- void swap(RoleGraph& lhs, RoleGraph& rhs) {
- lhs.swap(rhs);
- }
-
- bool RoleGraph::roleExists(const RoleName& role) {
- _createBuiltinRoleIfNeeded(role);
- return _roleExistsDontCreateBuiltin(role);
- }
-
- bool RoleGraph::_roleExistsDontCreateBuiltin(const RoleName& role) {
- EdgeSet::const_iterator edgeIt = _roleToSubordinates.find(role);
- if (edgeIt == _roleToSubordinates.end())
- return false;
- edgeIt = _roleToMembers.find(role);
- fassert(16825, edgeIt != _roleToMembers.end());
-
- RolePrivilegeMap::const_iterator strIt = _directPrivilegesForRole.find(role);
- if (strIt == _directPrivilegesForRole.end())
- return false;
- strIt = _allPrivilegesForRole.find(role);
- fassert(16826, strIt != _allPrivilegesForRole.end());
- return true;
- }
-
- Status RoleGraph::createRole(const RoleName& role) {
- if (roleExists(role)) {
- return Status(ErrorCodes::DuplicateKey,
- mongoutils::str::stream() << "Role: " << role.getFullName() <<
- " already exists",
- 0);
- }
-
- _createRoleDontCheckIfRoleExists(role);
- return Status::OK();
+PrivilegeVector emptyPrivilegeVector;
+} // namespace
+
+RoleGraph::RoleGraph(){};
+RoleGraph::RoleGraph(const RoleGraph& other)
+ : _roleToSubordinates(other._roleToSubordinates),
+ _roleToIndirectSubordinates(other._roleToIndirectSubordinates),
+ _roleToMembers(other._roleToMembers),
+ _directPrivilegesForRole(other._directPrivilegesForRole),
+ _allPrivilegesForRole(other._allPrivilegesForRole),
+ _allRoles(other._allRoles) {}
+RoleGraph::~RoleGraph(){};
+
+void RoleGraph::swap(RoleGraph& other) {
+ using std::swap;
+ swap(this->_roleToSubordinates, other._roleToSubordinates);
+ swap(this->_roleToIndirectSubordinates, other._roleToIndirectSubordinates);
+ swap(this->_roleToMembers, other._roleToMembers);
+ swap(this->_directPrivilegesForRole, other._directPrivilegesForRole);
+ swap(this->_allPrivilegesForRole, other._allPrivilegesForRole);
+ swap(this->_allRoles, other._allRoles);
+}
+
+void swap(RoleGraph& lhs, RoleGraph& rhs) {
+ lhs.swap(rhs);
+}
+
+bool RoleGraph::roleExists(const RoleName& role) {
+ _createBuiltinRoleIfNeeded(role);
+ return _roleExistsDontCreateBuiltin(role);
+}
+
+bool RoleGraph::_roleExistsDontCreateBuiltin(const RoleName& role) {
+ EdgeSet::const_iterator edgeIt = _roleToSubordinates.find(role);
+ if (edgeIt == _roleToSubordinates.end())
+ return false;
+ edgeIt = _roleToMembers.find(role);
+ fassert(16825, edgeIt != _roleToMembers.end());
+
+ RolePrivilegeMap::const_iterator strIt = _directPrivilegesForRole.find(role);
+ if (strIt == _directPrivilegesForRole.end())
+ return false;
+ strIt = _allPrivilegesForRole.find(role);
+ fassert(16826, strIt != _allPrivilegesForRole.end());
+ return true;
+}
+
+Status RoleGraph::createRole(const RoleName& role) {
+ if (roleExists(role)) {
+ return Status(ErrorCodes::DuplicateKey,
+ mongoutils::str::stream() << "Role: " << role.getFullName()
+ << " already exists",
+ 0);
}
- void RoleGraph::_createRoleDontCheckIfRoleExists(const RoleName& role) {
- // Just reference the role in all the maps so that an entry gets created with empty
- // containers for the value.
- _roleToSubordinates[role];
- _roleToIndirectSubordinates[role];
- _roleToMembers[role];
- _directPrivilegesForRole[role];
- _allPrivilegesForRole[role];
- _allRoles.insert(role);
+ _createRoleDontCheckIfRoleExists(role);
+ return Status::OK();
+}
+
+void RoleGraph::_createRoleDontCheckIfRoleExists(const RoleName& role) {
+ // Just reference the role in all the maps so that an entry gets created with empty
+ // containers for the value.
+ _roleToSubordinates[role];
+ _roleToIndirectSubordinates[role];
+ _roleToMembers[role];
+ _directPrivilegesForRole[role];
+ _allPrivilegesForRole[role];
+ _allRoles.insert(role);
+}
+
+Status RoleGraph::deleteRole(const RoleName& role) {
+ if (!roleExists(role)) {
+ return Status(ErrorCodes::RoleNotFound,
+ mongoutils::str::stream() << "Role: " << role.getFullName()
+ << " does not exist",
+ 0);
}
-
- Status RoleGraph::deleteRole(const RoleName& role) {
- if (!roleExists(role)) {
- return Status(ErrorCodes::RoleNotFound,
- mongoutils::str::stream() << "Role: " << role.getFullName() <<
- " does not exist",
- 0);
- }
- if (isBuiltinRole(role)) {
- return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream() << "Cannot delete built-in role: " <<
- role.getFullName(),
- 0);
- }
-
- for (std::vector<RoleName>::iterator it = _roleToSubordinates[role].begin();
- it != _roleToSubordinates[role].end(); ++it) {
- _roleToMembers[*it].erase(std::find(_roleToMembers[*it].begin(),
- _roleToMembers[*it].end(),
- role));
- }
- for (std::vector<RoleName>::iterator it = _roleToMembers[role].begin();
- it != _roleToMembers[role].end(); ++it) {
- _roleToSubordinates[*it].erase(std::find(_roleToSubordinates[*it].begin(),
- _roleToSubordinates[*it].end(),
- role));
- }
- _roleToSubordinates.erase(role);
- _roleToIndirectSubordinates.erase(role);
- _roleToMembers.erase(role);
- _directPrivilegesForRole.erase(role);
- _allPrivilegesForRole.erase(role);
- _allRoles.erase(role);
- return Status::OK();
+ if (isBuiltinRole(role)) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream()
+ << "Cannot delete built-in role: " << role.getFullName(),
+ 0);
}
- RoleNameIterator RoleGraph::getDirectSubordinates(const RoleName& role) {
- if (!roleExists(role))
- return RoleNameIterator(NULL);
- return makeRoleNameIteratorForContainer(_roleToSubordinates[role]);
+ for (std::vector<RoleName>::iterator it = _roleToSubordinates[role].begin();
+ it != _roleToSubordinates[role].end();
+ ++it) {
+ _roleToMembers[*it].erase(
+ std::find(_roleToMembers[*it].begin(), _roleToMembers[*it].end(), role));
}
-
- RoleNameIterator RoleGraph::getIndirectSubordinates(const RoleName& role) {
- if (!roleExists(role))
- return RoleNameIterator(NULL);
- return makeRoleNameIteratorForContainer(_roleToIndirectSubordinates[role]);
+ for (std::vector<RoleName>::iterator it = _roleToMembers[role].begin();
+ it != _roleToMembers[role].end();
+ ++it) {
+ _roleToSubordinates[*it].erase(
+ std::find(_roleToSubordinates[*it].begin(), _roleToSubordinates[*it].end(), role));
}
-
- RoleNameIterator RoleGraph::getDirectMembers(const RoleName& role) {
- if (!roleExists(role))
- return RoleNameIterator(NULL);
- return makeRoleNameIteratorForContainer(_roleToMembers[role]);
+ _roleToSubordinates.erase(role);
+ _roleToIndirectSubordinates.erase(role);
+ _roleToMembers.erase(role);
+ _directPrivilegesForRole.erase(role);
+ _allPrivilegesForRole.erase(role);
+ _allRoles.erase(role);
+ return Status::OK();
+}
+
+RoleNameIterator RoleGraph::getDirectSubordinates(const RoleName& role) {
+ if (!roleExists(role))
+ return RoleNameIterator(NULL);
+ return makeRoleNameIteratorForContainer(_roleToSubordinates[role]);
+}
+
+RoleNameIterator RoleGraph::getIndirectSubordinates(const RoleName& role) {
+ if (!roleExists(role))
+ return RoleNameIterator(NULL);
+ return makeRoleNameIteratorForContainer(_roleToIndirectSubordinates[role]);
+}
+
+RoleNameIterator RoleGraph::getDirectMembers(const RoleName& role) {
+ if (!roleExists(role))
+ return RoleNameIterator(NULL);
+ return makeRoleNameIteratorForContainer(_roleToMembers[role]);
+}
+
+const PrivilegeVector& RoleGraph::getDirectPrivileges(const RoleName& role) {
+ if (!roleExists(role))
+ return emptyPrivilegeVector;
+ return _directPrivilegesForRole.find(role)->second;
+}
+
+const PrivilegeVector& RoleGraph::getAllPrivileges(const RoleName& role) {
+ if (!roleExists(role))
+ return emptyPrivilegeVector;
+ return _allPrivilegesForRole.find(role)->second;
+}
+
+Status RoleGraph::addRoleToRole(const RoleName& recipient, const RoleName& role) {
+ if (!roleExists(recipient)) {
+ return Status(ErrorCodes::RoleNotFound,
+ mongoutils::str::stream() << "Role: " << recipient.getFullName()
+ << " does not exist");
}
-
- const PrivilegeVector& RoleGraph::getDirectPrivileges(const RoleName& role) {
- if (!roleExists(role))
- return emptyPrivilegeVector;
- return _directPrivilegesForRole.find(role)->second;
+ if (isBuiltinRole(recipient)) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream()
+ << "Cannot grant roles to built-in role: " << role.getFullName());
}
-
- const PrivilegeVector& RoleGraph::getAllPrivileges(const RoleName& role) {
- if (!roleExists(role))
- return emptyPrivilegeVector;
- return _allPrivilegesForRole.find(role)->second;
+ if (!roleExists(role)) {
+ return Status(ErrorCodes::RoleNotFound,
+ mongoutils::str::stream() << "Role: " << role.getFullName()
+ << " does not exist");
}
- Status RoleGraph::addRoleToRole(const RoleName& recipient, const RoleName& role) {
- if (!roleExists(recipient)) {
- return Status(ErrorCodes::RoleNotFound,
- mongoutils::str::stream() << "Role: " << recipient.getFullName() <<
- " does not exist");
- }
- if (isBuiltinRole(recipient)) {
- return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream() << "Cannot grant roles to built-in role: " <<
- role.getFullName());
- }
- if (!roleExists(role)) {
- return Status(ErrorCodes::RoleNotFound,
- mongoutils::str::stream() << "Role: " << role.getFullName() <<
- " does not exist");
- }
-
- if (std::find(_roleToSubordinates[recipient].begin(),
- _roleToSubordinates[recipient].end(),
- role) ==
- _roleToSubordinates[recipient].end()) {
- // Only add role if it's not already present
- _roleToSubordinates[recipient].push_back(role);
- _roleToMembers[role].push_back(recipient);
- }
-
- return Status::OK();
+ if (std::find(_roleToSubordinates[recipient].begin(),
+ _roleToSubordinates[recipient].end(),
+ role) == _roleToSubordinates[recipient].end()) {
+ // Only add role if it's not already present
+ _roleToSubordinates[recipient].push_back(role);
+ _roleToMembers[role].push_back(recipient);
}
- Status RoleGraph::removeRoleFromRole(const RoleName& recipient, const RoleName& role) {
- if (!roleExists(recipient)) {
- return Status(ErrorCodes::RoleNotFound,
- mongoutils::str::stream() << "Role: " << recipient.getFullName() <<
- " does not exist",
- 0);
- }
- if (isBuiltinRole(recipient)) {
- return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream() << "Cannot remove roles from built-in role: " <<
- role.getFullName(),
- 0);
- }
- if (!roleExists(role)) {
- return Status(ErrorCodes::RoleNotFound,
- mongoutils::str::stream() << "Role: " << role.getFullName() <<
- " does not exist",
- 0);
- }
+ return Status::OK();
+}
- std::vector<RoleName>::iterator itToRm = std::find(_roleToMembers[role].begin(),
- _roleToMembers[role].end(),
- recipient);
- if (itToRm != _roleToMembers[role].end()) {
- _roleToMembers[role].erase(itToRm);
- } else {
- return Status(ErrorCodes::RolesNotRelated,
- mongoutils::str::stream() << recipient.getFullName() << " is not a member"
- " of " << role.getFullName(),
- 0);
- }
-
- itToRm = std::find(_roleToSubordinates[recipient].begin(),
- _roleToSubordinates[recipient].end(),
- role);
- fassert(16827, itToRm != _roleToSubordinates[recipient].end());
- _roleToSubordinates[recipient].erase(itToRm);
- return Status::OK();
+Status RoleGraph::removeRoleFromRole(const RoleName& recipient, const RoleName& role) {
+ if (!roleExists(recipient)) {
+ return Status(ErrorCodes::RoleNotFound,
+ mongoutils::str::stream() << "Role: " << recipient.getFullName()
+ << " does not exist",
+ 0);
}
-
- Status RoleGraph::removeAllRolesFromRole(const RoleName& victim) {
- typedef std::vector<RoleName> RoleNameVector;
- if (!roleExists(victim)) {
- return Status(ErrorCodes::RoleNotFound,
- mongoutils::str::stream() << "Role: " << victim.getFullName() <<
- " does not exist",
- 0);
- }
- if (isBuiltinRole(victim)) {
- return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream() << "Cannot remove roles from built-in role: " <<
- victim.getFullName(),
- 0);
- }
-
- RoleNameVector& subordinatesOfVictim = _roleToSubordinates[victim];
- for (RoleNameVector::const_iterator subordinateRole = subordinatesOfVictim.begin(),
- end = subordinatesOfVictim.end();
- subordinateRole != end;
- ++subordinateRole) {
-
- RoleNameVector& membersOfSubordinate = _roleToMembers[*subordinateRole];
- RoleNameVector::iterator toErase = std::find(
- membersOfSubordinate.begin(), membersOfSubordinate.end(), victim);
- fassert(17173, toErase != membersOfSubordinate.end());
- membersOfSubordinate.erase(toErase);
- }
- subordinatesOfVictim.clear();
- return Status::OK();
+ if (isBuiltinRole(recipient)) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream()
+ << "Cannot remove roles from built-in role: " << role.getFullName(),
+ 0);
}
-
- Status RoleGraph::addPrivilegeToRole(const RoleName& role, const Privilege& privilegeToAdd) {
- if (!roleExists(role)) {
- return Status(ErrorCodes::RoleNotFound,
- mongoutils::str::stream() << "Role: " << role.getFullName() <<
- " does not exist",
- 0);
- }
- if (isBuiltinRole(role)) {
- return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream() << "Cannot grant privileges to built-in role: "
- << role.getFullName(),
- 0);
- }
-
- _addPrivilegeToRoleNoChecks(role, privilegeToAdd);
- return Status::OK();
+ if (!roleExists(role)) {
+ return Status(ErrorCodes::RoleNotFound,
+ mongoutils::str::stream() << "Role: " << role.getFullName()
+ << " does not exist",
+ 0);
}
- void RoleGraph::_addPrivilegeToRoleNoChecks(const RoleName& role,
- const Privilege& privilegeToAdd) {
- Privilege::addPrivilegeToPrivilegeVector(&_directPrivilegesForRole[role], privilegeToAdd);
+ std::vector<RoleName>::iterator itToRm =
+ std::find(_roleToMembers[role].begin(), _roleToMembers[role].end(), recipient);
+ if (itToRm != _roleToMembers[role].end()) {
+ _roleToMembers[role].erase(itToRm);
+ } else {
+ return Status(ErrorCodes::RolesNotRelated,
+ mongoutils::str::stream() << recipient.getFullName() << " is not a member"
+ " of "
+ << role.getFullName(),
+ 0);
}
- // NOTE: Current runtime of this is O(n*m) where n is the size of the current PrivilegeVector
- // for the given role, and m is the size of the privilegesToAdd vector.
- // If this was a PrivilegeSet (sorted on resource) rather than a PrivilegeVector, we
- // could do this in O(n+m) instead.
- Status RoleGraph::addPrivilegesToRole(const RoleName& role,
- const PrivilegeVector& privilegesToAdd) {
- if (!roleExists(role)) {
- return Status(ErrorCodes::RoleNotFound,
- mongoutils::str::stream() << "Role: " << role.getFullName() <<
- " does not exist",
- 0);
- }
- if (isBuiltinRole(role)) {
- return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream() << "Cannot grant privileges to built-in role: "
- << role.getFullName(),
- 0);
- }
-
- for (PrivilegeVector::const_iterator it = privilegesToAdd.begin();
- it != privilegesToAdd.end(); ++it) {
- _addPrivilegeToRoleNoChecks(role, *it);
- }
- return Status::OK();
+ itToRm = std::find(
+ _roleToSubordinates[recipient].begin(), _roleToSubordinates[recipient].end(), role);
+ fassert(16827, itToRm != _roleToSubordinates[recipient].end());
+ _roleToSubordinates[recipient].erase(itToRm);
+ return Status::OK();
+}
+
+Status RoleGraph::removeAllRolesFromRole(const RoleName& victim) {
+ typedef std::vector<RoleName> RoleNameVector;
+ if (!roleExists(victim)) {
+ return Status(ErrorCodes::RoleNotFound,
+ mongoutils::str::stream() << "Role: " << victim.getFullName()
+ << " does not exist",
+ 0);
+ }
+ if (isBuiltinRole(victim)) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream()
+ << "Cannot remove roles from built-in role: " << victim.getFullName(),
+ 0);
}
- Status RoleGraph::removePrivilegeFromRole(const RoleName& role,
- const Privilege& privilegeToRemove) {
- if (!roleExists(role)) {
- return Status(ErrorCodes::RoleNotFound,
- mongoutils::str::stream() << "Role: " << role.getFullName() <<
- " does not exist",
- 0);
- }
- if (isBuiltinRole(role)) {
- return Status(
- ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream() << "Cannot remove privileges from built-in role: " <<
- role.getFullName());
- }
+ RoleNameVector& subordinatesOfVictim = _roleToSubordinates[victim];
+ for (RoleNameVector::const_iterator subordinateRole = subordinatesOfVictim.begin(),
+ end = subordinatesOfVictim.end();
+ subordinateRole != end;
+ ++subordinateRole) {
+ RoleNameVector& membersOfSubordinate = _roleToMembers[*subordinateRole];
+ RoleNameVector::iterator toErase =
+ std::find(membersOfSubordinate.begin(), membersOfSubordinate.end(), victim);
+ fassert(17173, toErase != membersOfSubordinate.end());
+ membersOfSubordinate.erase(toErase);
+ }
+ subordinatesOfVictim.clear();
+ return Status::OK();
+}
+
+Status RoleGraph::addPrivilegeToRole(const RoleName& role, const Privilege& privilegeToAdd) {
+ if (!roleExists(role)) {
+ return Status(ErrorCodes::RoleNotFound,
+ mongoutils::str::stream() << "Role: " << role.getFullName()
+ << " does not exist",
+ 0);
+ }
+ if (isBuiltinRole(role)) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream()
+ << "Cannot grant privileges to built-in role: " << role.getFullName(),
+ 0);
+ }
- PrivilegeVector& currentPrivileges = _directPrivilegesForRole[role];
- for (PrivilegeVector::iterator it = currentPrivileges.begin();
- it != currentPrivileges.end(); ++it) {
-
- Privilege& curPrivilege = *it;
- if (curPrivilege.getResourcePattern() == privilegeToRemove.getResourcePattern()) {
- ActionSet curActions = curPrivilege.getActions();
-
- if (!curActions.isSupersetOf(privilegeToRemove.getActions())) {
- // Didn't possess all the actions being removed.
- return Status(ErrorCodes::PrivilegeNotFound,
- mongoutils::str::stream() << "Role: " << role.getFullName() <<
- " does not contain a privilege on " <<
- privilegeToRemove.getResourcePattern().toString() <<
- " with actions: " <<
- privilegeToRemove.getActions().toString(),
- 0);
- }
+ _addPrivilegeToRoleNoChecks(role, privilegeToAdd);
+ return Status::OK();
+}
+
+void RoleGraph::_addPrivilegeToRoleNoChecks(const RoleName& role, const Privilege& privilegeToAdd) {
+ Privilege::addPrivilegeToPrivilegeVector(&_directPrivilegesForRole[role], privilegeToAdd);
+}
+
+// NOTE: Current runtime of this is O(n*m) where n is the size of the current PrivilegeVector
+// for the given role, and m is the size of the privilegesToAdd vector.
+// If this was a PrivilegeSet (sorted on resource) rather than a PrivilegeVector, we
+// could do this in O(n+m) instead.
+Status RoleGraph::addPrivilegesToRole(const RoleName& role,
+ const PrivilegeVector& privilegesToAdd) {
+ if (!roleExists(role)) {
+ return Status(ErrorCodes::RoleNotFound,
+ mongoutils::str::stream() << "Role: " << role.getFullName()
+ << " does not exist",
+ 0);
+ }
+ if (isBuiltinRole(role)) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream()
+ << "Cannot grant privileges to built-in role: " << role.getFullName(),
+ 0);
+ }
- curPrivilege.removeActions(privilegeToRemove.getActions());
- if (curPrivilege.getActions().empty()) {
- currentPrivileges.erase(it);
- }
- return Status::OK();
- }
- }
- return Status(ErrorCodes::PrivilegeNotFound,
- mongoutils::str::stream() << "Role: " << role.getFullName() << " does not "
- "contain any privileges on " <<
- privilegeToRemove.getResourcePattern().toString(),
+ for (PrivilegeVector::const_iterator it = privilegesToAdd.begin(); it != privilegesToAdd.end();
+ ++it) {
+ _addPrivilegeToRoleNoChecks(role, *it);
+ }
+ return Status::OK();
+}
+
+Status RoleGraph::removePrivilegeFromRole(const RoleName& role,
+ const Privilege& privilegeToRemove) {
+ if (!roleExists(role)) {
+ return Status(ErrorCodes::RoleNotFound,
+ mongoutils::str::stream() << "Role: " << role.getFullName()
+ << " does not exist",
0);
}
+ if (isBuiltinRole(role)) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream()
+ << "Cannot remove privileges from built-in role: " << role.getFullName());
+ }
- Status RoleGraph::removePrivilegesFromRole(const RoleName& role,
- const PrivilegeVector& privilegesToRemove) {
- for (PrivilegeVector::const_iterator it = privilegesToRemove.begin();
- it != privilegesToRemove.end(); ++it) {
- Status status = removePrivilegeFromRole(role, *it);
- if (!status.isOK()) {
- return status;
+ PrivilegeVector& currentPrivileges = _directPrivilegesForRole[role];
+ for (PrivilegeVector::iterator it = currentPrivileges.begin(); it != currentPrivileges.end();
+ ++it) {
+ Privilege& curPrivilege = *it;
+ if (curPrivilege.getResourcePattern() == privilegeToRemove.getResourcePattern()) {
+ ActionSet curActions = curPrivilege.getActions();
+
+ if (!curActions.isSupersetOf(privilegeToRemove.getActions())) {
+ // Didn't possess all the actions being removed.
+ return Status(ErrorCodes::PrivilegeNotFound,
+ mongoutils::str::stream()
+ << "Role: " << role.getFullName()
+ << " does not contain a privilege on "
+ << privilegeToRemove.getResourcePattern().toString()
+ << " with actions: " << privilegeToRemove.getActions().toString(),
+ 0);
}
- }
- return Status::OK();
- }
- Status RoleGraph::removeAllPrivilegesFromRole(const RoleName& role) {
- if (!roleExists(role)) {
- return Status(ErrorCodes::RoleNotFound,
- mongoutils::str::stream() << "Role: " << role.getFullName() <<
- " does not exist",
- 0);
- }
- if (isBuiltinRole(role)) {
- return Status(
- ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream() << "Cannot remove privileges from built-in role: " <<
- role.getFullName());
+ curPrivilege.removeActions(privilegeToRemove.getActions());
+ if (curPrivilege.getActions().empty()) {
+ currentPrivileges.erase(it);
+ }
+ return Status::OK();
}
- _directPrivilegesForRole[role].clear();
- return Status::OK();
}
-
- Status RoleGraph::replaceRole(const RoleName& roleName,
- const std::vector<RoleName>& roles,
- const PrivilegeVector& privileges) {
- Status status = removeAllPrivilegesFromRole(roleName);
- if (status == ErrorCodes::RoleNotFound) {
- fassert(17168, createRole(roleName));
- }
- else if (!status.isOK()) {
+ return Status(ErrorCodes::PrivilegeNotFound,
+ mongoutils::str::stream() << "Role: " << role.getFullName()
+ << " does not "
+ "contain any privileges on "
+ << privilegeToRemove.getResourcePattern().toString(),
+ 0);
+}
+
+Status RoleGraph::removePrivilegesFromRole(const RoleName& role,
+ const PrivilegeVector& privilegesToRemove) {
+ for (PrivilegeVector::const_iterator it = privilegesToRemove.begin();
+ it != privilegesToRemove.end();
+ ++it) {
+ Status status = removePrivilegeFromRole(role, *it);
+ if (!status.isOK()) {
return status;
}
- fassert(17169, removeAllRolesFromRole(roleName));
- for (size_t i = 0; i < roles.size(); ++i) {
- const RoleName& grantedRole = roles[i];
- status = createRole(grantedRole);
- fassert(17170, status.isOK() || status == ErrorCodes::DuplicateKey);
- fassert(17171, addRoleToRole(roleName, grantedRole));
+ }
+ return Status::OK();
+}
+
+Status RoleGraph::removeAllPrivilegesFromRole(const RoleName& role) {
+ if (!roleExists(role)) {
+ return Status(ErrorCodes::RoleNotFound,
+ mongoutils::str::stream() << "Role: " << role.getFullName()
+ << " does not exist",
+ 0);
+ }
+ if (isBuiltinRole(role)) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream()
+ << "Cannot remove privileges from built-in role: " << role.getFullName());
+ }
+ _directPrivilegesForRole[role].clear();
+ return Status::OK();
+}
+
+Status RoleGraph::replaceRole(const RoleName& roleName,
+ const std::vector<RoleName>& roles,
+ const PrivilegeVector& privileges) {
+ Status status = removeAllPrivilegesFromRole(roleName);
+ if (status == ErrorCodes::RoleNotFound) {
+ fassert(17168, createRole(roleName));
+ } else if (!status.isOK()) {
+ return status;
+ }
+ fassert(17169, removeAllRolesFromRole(roleName));
+ for (size_t i = 0; i < roles.size(); ++i) {
+ const RoleName& grantedRole = roles[i];
+ status = createRole(grantedRole);
+ fassert(17170, status.isOK() || status == ErrorCodes::DuplicateKey);
+ fassert(17171, addRoleToRole(roleName, grantedRole));
+ }
+ fassert(17172, addPrivilegesToRole(roleName, privileges));
+ return Status::OK();
+}
+
+Status RoleGraph::recomputePrivilegeData() {
+ /*
+ * This method is used to recompute the "allPrivileges" vector for each node in the graph,
+ * as well as look for cycles. It is implemented by performing a depth-first traversal of
+ * the dependency graph, once for each node. "visitedRoles" tracks the set of role names
+ * ever visited, and it is used to prune each DFS. A node that has been visited once on any
+ * DFS is never visited again. Complexity of this implementation is O(n+m) where "n" is the
+ * number of nodes and "m" is the number of prerequisite edges. Space complexity is O(n),
+ * in both stack space and size of the "visitedRoles" set.
+ *
+ * "inProgressRoles" is used to detect and report cycles, as well as to keep track of roles
+ * we started visiting before realizing they had children that needed visiting first, so
+ * we can get back to them after visiting their children.
+ */
+
+ unordered_set<RoleName> visitedRoles;
+ for (EdgeSet::const_iterator it = _roleToSubordinates.begin(); it != _roleToSubordinates.end();
+ ++it) {
+ Status status = _recomputePrivilegeDataHelper(it->first, visitedRoles);
+ if (!status.isOK()) {
+ return status;
}
- fassert(17172, addPrivilegesToRole(roleName, privileges));
- return Status::OK();
}
+ return Status::OK();
+}
- Status RoleGraph::recomputePrivilegeData() {
- /*
- * This method is used to recompute the "allPrivileges" vector for each node in the graph,
- * as well as look for cycles. It is implemented by performing a depth-first traversal of
- * the dependency graph, once for each node. "visitedRoles" tracks the set of role names
- * ever visited, and it is used to prune each DFS. A node that has been visited once on any
- * DFS is never visited again. Complexity of this implementation is O(n+m) where "n" is the
- * number of nodes and "m" is the number of prerequisite edges. Space complexity is O(n),
- * in both stack space and size of the "visitedRoles" set.
- *
- * "inProgressRoles" is used to detect and report cycles, as well as to keep track of roles
- * we started visiting before realizing they had children that needed visiting first, so
- * we can get back to them after visiting their children.
- */
-
- unordered_set<RoleName> visitedRoles;
- for (EdgeSet::const_iterator it = _roleToSubordinates.begin();
- it != _roleToSubordinates.end(); ++it) {
- Status status = _recomputePrivilegeDataHelper(it->first, visitedRoles);
- if (!status.isOK()) {
- return status;
- }
- }
+Status RoleGraph::_recomputePrivilegeDataHelper(const RoleName& startingRole,
+ unordered_set<RoleName>& visitedRoles) {
+ if (visitedRoles.count(startingRole)) {
return Status::OK();
}
- Status RoleGraph::_recomputePrivilegeDataHelper(const RoleName& startingRole,
- unordered_set<RoleName>& visitedRoles) {
- if (visitedRoles.count(startingRole)) {
- return Status::OK();
- }
-
- std::vector<RoleName> inProgressRoles;
- inProgressRoles.push_back(startingRole);
- while (inProgressRoles.size()) {
- const RoleName currentRole = inProgressRoles.back();
- fassert(17277, !visitedRoles.count(currentRole));
+ std::vector<RoleName> inProgressRoles;
+ inProgressRoles.push_back(startingRole);
+ while (inProgressRoles.size()) {
+ const RoleName currentRole = inProgressRoles.back();
+ fassert(17277, !visitedRoles.count(currentRole));
- if (!roleExists(currentRole)) {
- return Status(ErrorCodes::RoleNotFound,
- mongoutils::str::stream() << "Role: " << currentRole.getFullName() <<
- " does not exist",
- 0);
- }
+ if (!roleExists(currentRole)) {
+ return Status(ErrorCodes::RoleNotFound,
+ mongoutils::str::stream() << "Role: " << currentRole.getFullName()
+ << " does not exist",
+ 0);
+ }
- // Check for cycles
- {
- const std::vector<RoleName>::const_iterator begin = inProgressRoles.begin();
- // The currentRole will always be last so don't look there.
- const std::vector<RoleName>::const_iterator end = --inProgressRoles.end();
- const std::vector<RoleName>::const_iterator firstOccurence =
- std::find(begin, end, currentRole);
- if (firstOccurence != end) {
- std::ostringstream os;
- os << "Cycle in dependency graph: ";
- for (std::vector<RoleName>::const_iterator it = firstOccurence;
- it != end; ++it) {
- os << it->getFullName() << " -> ";
- }
- os << currentRole.getFullName();
- return Status(ErrorCodes::GraphContainsCycle, os.str());
+ // Check for cycles
+ {
+ const std::vector<RoleName>::const_iterator begin = inProgressRoles.begin();
+ // The currentRole will always be last so don't look there.
+ const std::vector<RoleName>::const_iterator end = --inProgressRoles.end();
+ const std::vector<RoleName>::const_iterator firstOccurence =
+ std::find(begin, end, currentRole);
+ if (firstOccurence != end) {
+ std::ostringstream os;
+ os << "Cycle in dependency graph: ";
+ for (std::vector<RoleName>::const_iterator it = firstOccurence; it != end; ++it) {
+ os << it->getFullName() << " -> ";
}
+ os << currentRole.getFullName();
+ return Status(ErrorCodes::GraphContainsCycle, os.str());
}
+ }
- // Make sure we've already visited all subordinate roles before worrying about this one.
- const std::vector<RoleName>& currentRoleDirectRoles = _roleToSubordinates[currentRole];
- std::vector<RoleName>::const_iterator roleIt;
- for (roleIt = currentRoleDirectRoles.begin();
- roleIt != currentRoleDirectRoles.end(); ++roleIt) {
- const RoleName& childRole = *roleIt;
- if (!visitedRoles.count(childRole)) {
- inProgressRoles.push_back(childRole);
- break;
- }
- }
- // If roleIt didn't reach the end of currentRoleDirectRoles that means we found a child
- // of currentRole that we haven't visited yet.
- if (roleIt != currentRoleDirectRoles.end()) {
- continue;
+ // Make sure we've already visited all subordinate roles before worrying about this one.
+ const std::vector<RoleName>& currentRoleDirectRoles = _roleToSubordinates[currentRole];
+ std::vector<RoleName>::const_iterator roleIt;
+ for (roleIt = currentRoleDirectRoles.begin(); roleIt != currentRoleDirectRoles.end();
+ ++roleIt) {
+ const RoleName& childRole = *roleIt;
+ if (!visitedRoles.count(childRole)) {
+ inProgressRoles.push_back(childRole);
+ break;
}
- // At this point, we know that we've already visited all child roles of currentRole
- // and thus their "all privileges" sets are correct and can be added to currentRole's
- // "all privileges" set
-
- // Need to clear out the "all privileges" vector for the current role, and re-fill it
- // with just the direct privileges for this role.
- PrivilegeVector& currentRoleAllPrivileges = _allPrivilegesForRole[currentRole];
- currentRoleAllPrivileges = _directPrivilegesForRole[currentRole];
-
- // Need to do the same thing for the indirect roles
- unordered_set<RoleName>& currentRoleIndirectRoles =
- _roleToIndirectSubordinates[currentRole];
- currentRoleIndirectRoles.clear();
- for (std::vector<RoleName>::const_iterator it = currentRoleDirectRoles.begin();
- it != currentRoleDirectRoles.end(); ++it) {
- currentRoleIndirectRoles.insert(*it);
+ }
+ // If roleIt didn't reach the end of currentRoleDirectRoles that means we found a child
+ // of currentRole that we haven't visited yet.
+ if (roleIt != currentRoleDirectRoles.end()) {
+ continue;
+ }
+ // At this point, we know that we've already visited all child roles of currentRole
+ // and thus their "all privileges" sets are correct and can be added to currentRole's
+ // "all privileges" set
+
+ // Need to clear out the "all privileges" vector for the current role, and re-fill it
+ // with just the direct privileges for this role.
+ PrivilegeVector& currentRoleAllPrivileges = _allPrivilegesForRole[currentRole];
+ currentRoleAllPrivileges = _directPrivilegesForRole[currentRole];
+
+ // Need to do the same thing for the indirect roles
+ unordered_set<RoleName>& currentRoleIndirectRoles =
+ _roleToIndirectSubordinates[currentRole];
+ currentRoleIndirectRoles.clear();
+ for (std::vector<RoleName>::const_iterator it = currentRoleDirectRoles.begin();
+ it != currentRoleDirectRoles.end();
+ ++it) {
+ currentRoleIndirectRoles.insert(*it);
+ }
+
+ // Recursively add children's privileges to current role's "all privileges" vector, and
+ // children's roles to current roles's "indirect roles" vector.
+ for (std::vector<RoleName>::const_iterator roleIt = currentRoleDirectRoles.begin();
+ roleIt != currentRoleDirectRoles.end();
+ ++roleIt) {
+ // At this point, we already know that the "all privilege" set for the child is
+ // correct, so add those privileges to our "all privilege" set.
+ const RoleName& childRole = *roleIt;
+
+ const PrivilegeVector& childsPrivileges = _allPrivilegesForRole[childRole];
+ for (PrivilegeVector::const_iterator privIt = childsPrivileges.begin();
+ privIt != childsPrivileges.end();
+ ++privIt) {
+ Privilege::addPrivilegeToPrivilegeVector(&currentRoleAllPrivileges, *privIt);
}
- // Recursively add children's privileges to current role's "all privileges" vector, and
- // children's roles to current roles's "indirect roles" vector.
- for (std::vector<RoleName>::const_iterator roleIt = currentRoleDirectRoles.begin();
- roleIt != currentRoleDirectRoles.end(); ++roleIt) {
- // At this point, we already know that the "all privilege" set for the child is
- // correct, so add those privileges to our "all privilege" set.
- const RoleName& childRole = *roleIt;
-
- const PrivilegeVector& childsPrivileges = _allPrivilegesForRole[childRole];
- for (PrivilegeVector::const_iterator privIt = childsPrivileges.begin();
- privIt != childsPrivileges.end(); ++privIt) {
- Privilege::addPrivilegeToPrivilegeVector(&currentRoleAllPrivileges, *privIt);
- }
-
- // We also know that the "indirect roles" for the child is also correct, so we can
- // add those roles to our "indirect roles" set.
- const unordered_set<RoleName>& childsRoles = _roleToIndirectSubordinates[childRole];
- for (unordered_set<RoleName>::const_iterator childsRoleIt = childsRoles.begin();
- childsRoleIt != childsRoles.end(); ++childsRoleIt) {
- currentRoleIndirectRoles.insert(*childsRoleIt);
- }
+ // We also know that the "indirect roles" for the child is also correct, so we can
+ // add those roles to our "indirect roles" set.
+ const unordered_set<RoleName>& childsRoles = _roleToIndirectSubordinates[childRole];
+ for (unordered_set<RoleName>::const_iterator childsRoleIt = childsRoles.begin();
+ childsRoleIt != childsRoles.end();
+ ++childsRoleIt) {
+ currentRoleIndirectRoles.insert(*childsRoleIt);
}
-
- visitedRoles.insert(currentRole);
- inProgressRoles.pop_back();
}
- return Status::OK();
+
+ visitedRoles.insert(currentRole);
+ inProgressRoles.pop_back();
}
+ return Status::OK();
+}
- RoleNameIterator RoleGraph::getRolesForDatabase(const std::string& dbname) {
- _createBuiltinRolesForDBIfNeeded(dbname);
+RoleNameIterator RoleGraph::getRolesForDatabase(const std::string& dbname) {
+ _createBuiltinRolesForDBIfNeeded(dbname);
- std::set<RoleName>::const_iterator lower = _allRoles.lower_bound(RoleName("", dbname));
- std::string afterDB = dbname;
- afterDB.push_back('\0');
- std::set<RoleName>::const_iterator upper = _allRoles.lower_bound(RoleName("", afterDB));
- return makeRoleNameIterator(lower, upper);
- }
+ std::set<RoleName>::const_iterator lower = _allRoles.lower_bound(RoleName("", dbname));
+ std::string afterDB = dbname;
+ afterDB.push_back('\0');
+ std::set<RoleName>::const_iterator upper = _allRoles.lower_bound(RoleName("", afterDB));
+ return makeRoleNameIterator(lower, upper);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/role_graph.h b/src/mongo/db/auth/role_graph.h
index bec12b6d1e2..50a0c47a857 100644
--- a/src/mongo/db/auth/role_graph.h
+++ b/src/mongo/db/auth/role_graph.h
@@ -41,273 +41,271 @@
namespace mongo {
+/**
+ * A graph of role and privilege relationships.
+ *
+ * This structure is used to store an in-memory representation of the admin.system.roledata
+ * collection, specifically the graph of which roles are members of other roles and what
+ * privileges each role has, both directly and transitively through membership in other roles.
+ * There are some restrictions on calls to getAllPrivileges(), specifically, one must call
+ * recomputePrivilegeData() before calling getAllPrivileges() if any of the mutation methods
+ * have been called on the instance since the later of its construction or the last call to
+ * recomputePrivilegeData() on the object.
+ */
+class RoleGraph {
+public:
+ /**
+ * Adds to "privileges" the privileges associated with the named built-in role, and returns
+ * true. Returns false if "role" does not name a built-in role, and does not modify
+ * "privileges". Addition of new privileges is done as with
+ * Privilege::addPrivilegeToPrivilegeVector.
+ */
+ static bool addPrivilegesForBuiltinRole(const RoleName& role, PrivilegeVector* privileges);
+
+ RoleGraph();
+ RoleGraph(const RoleGraph& other);
+ ~RoleGraph();
+
+ // Built-in roles for backwards compatibility with 2.2 and prior
+ static const std::string BUILTIN_ROLE_V0_READ;
+ static const std::string BUILTIN_ROLE_V0_READ_WRITE;
+ static const std::string BUILTIN_ROLE_V0_ADMIN_READ;
+ static const std::string BUILTIN_ROLE_V0_ADMIN_READ_WRITE;
+
+ // Swaps the contents of this RoleGraph with those of "other"
+ void swap(RoleGraph& other);
+
+ /**
+ * Adds to "privileges" the necessary privileges to do absolutely anything on the system.
+ */
+ static void generateUniversalPrivileges(PrivilegeVector* privileges);
+
+ /**
+ * Returns an iterator over the RoleNames of the "members" of the given role.
+ * Members of a role are roles that have been granted this role directly (roles that are
+ * members transitively through another role are not included). These are the "parents" of
+ * this node in the graph.
+ */
+ RoleNameIterator getDirectMembers(const RoleName& role);
+
+ /**
+ * Returns an iterator over the RoleNames of the "subordinates" of the given role.
+ * Subordinate roles are the roles that this role has been granted directly (roles
+ * that have been granted transitively through another role are not included). These are
+ * the "children" of this node in the graph.
+ */
+ RoleNameIterator getDirectSubordinates(const RoleName& role);
+
+ /**
+ * Returns an iterator that can be used to get a full list of roles that this role inherits
+ * privileges from. This includes its direct subordinate roles as well as the subordinates
+ * of its subordinates, and so on.
+ */
+ RoleNameIterator getIndirectSubordinates(const RoleName& role);
+
+ /**
+ * Returns an iterator that can be used to get a full list of roles (in lexicographical
+ * order) that are defined on the given database.
+ */
+ RoleNameIterator getRolesForDatabase(const std::string& dbname);
+
+ /**
+ * Returns a vector of the privileges that the given role has been directly granted.
+ * Privileges that have been granted transitively through this role's subordinate roles are
+ * not included.
+ */
+ const PrivilegeVector& getDirectPrivileges(const RoleName& role);
+
/**
- * A graph of role and privilege relationships.
+ * Returns a vector of all privileges that the given role contains. This includes both the
+ * privileges that have been granted to this role directly, as well as any privileges
+ * inherited from the role's subordinate roles.
+ */
+ const PrivilegeVector& getAllPrivileges(const RoleName& role);
+
+ /**
+ * Returns whether or not the given role exists in the role graph. Will implicitly
+ * add the role to the graph if it is a built-in role and isn't already in the graph.
+ */
+ bool roleExists(const RoleName& role);
+
+ /**
+ * Returns whether the given role corresponds to a built-in role.
+ */
+ static bool isBuiltinRole(const RoleName& role);
+
+ // Mutation functions
+
+ /**
+ * Puts an entry into the RoleGraph for the given RoleName.
+ * Returns DuplicateKey if the role already exists.
+ */
+ Status createRole(const RoleName& role);
+
+ /**
+ * Deletes the given role by first removing it from the members/subordinates arrays for
+ * all other roles, and then by removing its own entries in the 4 member maps.
+ * Returns RoleNotFound if the role doesn't exist.
+ * Returns InvalidRoleModification if "role" is a built-in role.
+ */
+ Status deleteRole(const RoleName& role);
+
+ /**
+ * Grants "role" to "recipient". This leaves "recipient" as a member of "role" and "role"
+ * as a subordinate of "recipient".
+ * Returns RoleNotFound if either of "role" or "recipient" doesn't exist in
+ * the RoleGraph.
+ * Returns InvalidRoleModification if "recipient" is a built-in role.
+ */
+ Status addRoleToRole(const RoleName& recipient, const RoleName& role);
+
+ /**
+ * Revokes "role" from "recipient".
+ * Returns RoleNotFound if either of "role" or "recipient" doesn't exist in
+ * the RoleGraph. Returns RolesNotRelated if "recipient" is not currently a
+ * member of "role".
+ * Returns InvalidRoleModification if "role" is a built-in role.
+ */
+ Status removeRoleFromRole(const RoleName& recipient, const RoleName& role);
+
+ /**
+ * Removes all roles held by "victim".
+ * Returns RoleNotFound if "victim" doesn't exist in the role graph.
+ * Returns InvalidRoleModification if "victim" is a built-in role.
+ */
+ Status removeAllRolesFromRole(const RoleName& victim);
+
+ /**
+ * Grants "privilegeToAdd" to "role".
+ * Returns RoleNotFound if "role" doesn't exist in the role graph.
+ * Returns InvalidRoleModification if "role" is a built-in role.
+ */
+ Status addPrivilegeToRole(const RoleName& role, const Privilege& privilegeToAdd);
+
+ /**
+ * Grants Privileges from "privilegesToAdd" to "role".
+ * Returns RoleNotFound if "role" doesn't exist in the role graph.
+ * Returns InvalidRoleModification if "role" is a built-in role.
+ */
+ Status addPrivilegesToRole(const RoleName& role, const PrivilegeVector& privilegesToAdd);
+
+ /**
+ * Removes "privilegeToRemove" from "role".
+ * Returns RoleNotFound if "role" doesn't exist in the role graph.
+ * Returns PrivilegeNotFound if "role" doesn't contain the full privilege being removed.
+ * Returns InvalidRoleModification if "role" is a built-in role.
+ */
+ Status removePrivilegeFromRole(const RoleName& role, const Privilege& privilegeToRemove);
+
+ /**
+ * Removes all privileges in the "privilegesToRemove" vector from "role".
+ * Returns RoleNotFound if "role" doesn't exist in the role graph.
+ * Returns InvalidRoleModification if "role" is a built-in role.
+ * Returns PrivilegeNotFound if "role" is missing any of the privileges being removed. If
+ * PrivilegeNotFound is returned then the graph may be in an inconsistent state and needs to
+ * be abandoned.
+ */
+ Status removePrivilegesFromRole(const RoleName& role,
+ const PrivilegeVector& privilegesToRemove);
+
+ /**
+ * Removes all privileges from "role".
+ * Returns RoleNotFound if "role" doesn't exist in the role graph.
+ * Returns InvalidRoleModification if "role" is a built-in role.
+ */
+ Status removeAllPrivilegesFromRole(const RoleName& role);
+
+ /**
+ * Updates the RoleGraph by adding the role named "roleName", with the given role
+ * memberships and privileges. If the name "roleName" already exists, it is replaced. Any
+ * subordinate roles mentioned in role.roles are created, if needed, with empty privilege
+ * and subordinate role lists.
+ *
+ * Should _only_ fail if the role to replace is a builtin role, in which
+ * case it will return ErrorCodes::InvalidRoleModification.
+ */
+ Status replaceRole(const RoleName& roleName,
+ const std::vector<RoleName>& roles,
+ const PrivilegeVector& privileges);
+
+ /**
+ * Adds the role described in "doc" the role graph.
+ */
+ Status addRoleFromDocument(const BSONObj& doc);
+
+ /**
+ * Applies to the RoleGraph the oplog operation described by the parameters.
*
- * This structure is used to store an in-memory representation of the admin.system.roledata
- * collection, specifically the graph of which roles are members of other roles and what
- * privileges each role has, both directly and transitively through membership in other roles.
- * There are some restrictions on calls to getAllPrivileges(), specifically, one must call
- * recomputePrivilegeData() before calling getAllPrivileges() if any of the mutation methods
- * have been called on the instance since the later of its construction or the last call to
- * recomputePrivilegeData() on the object.
+ * Returns Status::OK() on success, ErrorCodes::OplogOperationUnsupported if the oplog
+ * operation is not supported, and other codes (typically BadValue) if the oplog operation
+ * is ill-described.
*/
- class RoleGraph {
- public:
- /**
- * Adds to "privileges" the privileges associated with the named built-in role, and returns
- * true. Returns false if "role" does not name a built-in role, and does not modify
- * "privileges". Addition of new privileges is done as with
- * Privilege::addPrivilegeToPrivilegeVector.
- */
- static bool addPrivilegesForBuiltinRole(const RoleName& role, PrivilegeVector* privileges);
-
- RoleGraph();
- RoleGraph(const RoleGraph& other);
- ~RoleGraph();
-
- // Built-in roles for backwards compatibility with 2.2 and prior
- static const std::string BUILTIN_ROLE_V0_READ;
- static const std::string BUILTIN_ROLE_V0_READ_WRITE;
- static const std::string BUILTIN_ROLE_V0_ADMIN_READ;
- static const std::string BUILTIN_ROLE_V0_ADMIN_READ_WRITE;
-
- // Swaps the contents of this RoleGraph with those of "other"
- void swap(RoleGraph& other);
-
- /**
- * Adds to "privileges" the necessary privileges to do absolutely anything on the system.
- */
- static void generateUniversalPrivileges(PrivilegeVector* privileges);
-
- /**
- * Returns an iterator over the RoleNames of the "members" of the given role.
- * Members of a role are roles that have been granted this role directly (roles that are
- * members transitively through another role are not included). These are the "parents" of
- * this node in the graph.
- */
- RoleNameIterator getDirectMembers(const RoleName& role);
-
- /**
- * Returns an iterator over the RoleNames of the "subordinates" of the given role.
- * Subordinate roles are the roles that this role has been granted directly (roles
- * that have been granted transitively through another role are not included). These are
- * the "children" of this node in the graph.
- */
- RoleNameIterator getDirectSubordinates(const RoleName& role);
-
- /**
- * Returns an iterator that can be used to get a full list of roles that this role inherits
- * privileges from. This includes its direct subordinate roles as well as the subordinates
- * of its subordinates, and so on.
- */
- RoleNameIterator getIndirectSubordinates(const RoleName& role);
-
- /**
- * Returns an iterator that can be used to get a full list of roles (in lexicographical
- * order) that are defined on the given database.
- */
- RoleNameIterator getRolesForDatabase(const std::string& dbname);
-
- /**
- * Returns a vector of the privileges that the given role has been directly granted.
- * Privileges that have been granted transitively through this role's subordinate roles are
- * not included.
- */
- const PrivilegeVector& getDirectPrivileges(const RoleName& role);
-
- /**
- * Returns a vector of all privileges that the given role contains. This includes both the
- * privileges that have been granted to this role directly, as well as any privileges
- * inherited from the role's subordinate roles.
- */
- const PrivilegeVector& getAllPrivileges(const RoleName& role);
-
- /**
- * Returns whether or not the given role exists in the role graph. Will implicitly
- * add the role to the graph if it is a built-in role and isn't already in the graph.
- */
- bool roleExists(const RoleName& role);
-
- /**
- * Returns whether the given role corresponds to a built-in role.
- */
- static bool isBuiltinRole(const RoleName& role);
-
- // Mutation functions
-
- /**
- * Puts an entry into the RoleGraph for the given RoleName.
- * Returns DuplicateKey if the role already exists.
- */
- Status createRole(const RoleName& role);
-
- /**
- * Deletes the given role by first removing it from the members/subordinates arrays for
- * all other roles, and then by removing its own entries in the 4 member maps.
- * Returns RoleNotFound if the role doesn't exist.
- * Returns InvalidRoleModification if "role" is a built-in role.
- */
- Status deleteRole(const RoleName& role);
-
- /**
- * Grants "role" to "recipient". This leaves "recipient" as a member of "role" and "role"
- * as a subordinate of "recipient".
- * Returns RoleNotFound if either of "role" or "recipient" doesn't exist in
- * the RoleGraph.
- * Returns InvalidRoleModification if "recipient" is a built-in role.
- */
- Status addRoleToRole(const RoleName& recipient, const RoleName& role);
-
- /**
- * Revokes "role" from "recipient".
- * Returns RoleNotFound if either of "role" or "recipient" doesn't exist in
- * the RoleGraph. Returns RolesNotRelated if "recipient" is not currently a
- * member of "role".
- * Returns InvalidRoleModification if "role" is a built-in role.
- */
- Status removeRoleFromRole(const RoleName& recipient, const RoleName& role);
-
- /**
- * Removes all roles held by "victim".
- * Returns RoleNotFound if "victim" doesn't exist in the role graph.
- * Returns InvalidRoleModification if "victim" is a built-in role.
- */
- Status removeAllRolesFromRole(const RoleName& victim);
-
- /**
- * Grants "privilegeToAdd" to "role".
- * Returns RoleNotFound if "role" doesn't exist in the role graph.
- * Returns InvalidRoleModification if "role" is a built-in role.
- */
- Status addPrivilegeToRole(const RoleName& role, const Privilege& privilegeToAdd);
-
- /**
- * Grants Privileges from "privilegesToAdd" to "role".
- * Returns RoleNotFound if "role" doesn't exist in the role graph.
- * Returns InvalidRoleModification if "role" is a built-in role.
- */
- Status addPrivilegesToRole(const RoleName& role, const PrivilegeVector& privilegesToAdd);
-
- /**
- * Removes "privilegeToRemove" from "role".
- * Returns RoleNotFound if "role" doesn't exist in the role graph.
- * Returns PrivilegeNotFound if "role" doesn't contain the full privilege being removed.
- * Returns InvalidRoleModification if "role" is a built-in role.
- */
- Status removePrivilegeFromRole(const RoleName& role,
- const Privilege& privilegeToRemove);
-
- /**
- * Removes all privileges in the "privilegesToRemove" vector from "role".
- * Returns RoleNotFound if "role" doesn't exist in the role graph.
- * Returns InvalidRoleModification if "role" is a built-in role.
- * Returns PrivilegeNotFound if "role" is missing any of the privileges being removed. If
- * PrivilegeNotFound is returned then the graph may be in an inconsistent state and needs to
- * be abandoned.
- */
- Status removePrivilegesFromRole(const RoleName& role,
- const PrivilegeVector& privilegesToRemove);
-
- /**
- * Removes all privileges from "role".
- * Returns RoleNotFound if "role" doesn't exist in the role graph.
- * Returns InvalidRoleModification if "role" is a built-in role.
- */
- Status removeAllPrivilegesFromRole(const RoleName& role);
-
- /**
- * Updates the RoleGraph by adding the role named "roleName", with the given role
- * memberships and privileges. If the name "roleName" already exists, it is replaced. Any
- * subordinate roles mentioned in role.roles are created, if needed, with empty privilege
- * and subordinate role lists.
- *
- * Should _only_ fail if the role to replace is a builtin role, in which
- * case it will return ErrorCodes::InvalidRoleModification.
- */
- Status replaceRole(const RoleName& roleName,
- const std::vector<RoleName>& roles,
- const PrivilegeVector& privileges);
-
- /**
- * Adds the role described in "doc" the role graph.
- */
- Status addRoleFromDocument(const BSONObj& doc);
-
- /**
- * Applies to the RoleGraph the oplog operation described by the parameters.
- *
- * Returns Status::OK() on success, ErrorCodes::OplogOperationUnsupported if the oplog
- * operation is not supported, and other codes (typically BadValue) if the oplog operation
- * is ill-described.
- */
- Status handleLogOp(
- const char* op,
- const NamespaceString& ns,
- const BSONObj& o,
- const BSONObj* o2);
-
- /**
- * Recomputes the indirect (getAllPrivileges) data for this graph.
- *
- * Must be called between calls to any of the mutation functions and calls
- * to getAllPrivileges().
- *
- * Returns Status::OK() on success. If a cycle is detected, returns
- * ErrorCodes::GraphContainsCycle, and the status message reveals the cycle.
- */
- Status recomputePrivilegeData();
-
- private:
- // Helper method doing a topological DFS to compute the indirect privilege
- // data and look for cycles
- Status _recomputePrivilegeDataHelper(const RoleName& currentRole,
- unordered_set<RoleName>& visitedRoles);
-
- /**
- * If the role name given is not a built-in role, or it is but it's already in the role
- * graph, then this does nothing. If it *is* a built-in role and this is the first time
- * this function has been called for this role, it will add the role into the role graph.
- */
- void _createBuiltinRoleIfNeeded(const RoleName& role);
-
- /**
- * Adds the built-in roles for the given database name to the role graph if they aren't
- * already present.
- */
- void _createBuiltinRolesForDBIfNeeded(const std::string& dbname);
-
- /**
- * Returns whether or not the given role exists strictly within the role graph.
- */
- bool _roleExistsDontCreateBuiltin(const RoleName& role);
-
- /**
- * Just creates the role in the role graph, without checking whether or not the role already
- * exists.
- */
- void _createRoleDontCheckIfRoleExists(const RoleName& role);
-
- /**
- * Grants "privilegeToAdd" to "role".
- * Doesn't do any checking as to whether the role exists or is a built-in role.
- */
- void _addPrivilegeToRoleNoChecks(const RoleName& role, const Privilege& privilegeToAdd);
-
-
- // Represents all the outgoing edges to other roles from any given role.
- typedef unordered_map<RoleName, std::vector<RoleName> > EdgeSet;
- // Maps a role name to a list of privileges associated with that role.
- typedef unordered_map<RoleName, PrivilegeVector> RolePrivilegeMap;
-
- EdgeSet _roleToSubordinates;
- unordered_map<RoleName, unordered_set<RoleName> > _roleToIndirectSubordinates;
- EdgeSet _roleToMembers;
- RolePrivilegeMap _directPrivilegesForRole;
- RolePrivilegeMap _allPrivilegesForRole;
- std::set<RoleName> _allRoles;
- };
-
- void swap(RoleGraph& lhs, RoleGraph& rhs);
+ Status handleLogOp(const char* op,
+ const NamespaceString& ns,
+ const BSONObj& o,
+ const BSONObj* o2);
+
+ /**
+ * Recomputes the indirect (getAllPrivileges) data for this graph.
+ *
+ * Must be called between calls to any of the mutation functions and calls
+ * to getAllPrivileges().
+ *
+ * Returns Status::OK() on success. If a cycle is detected, returns
+ * ErrorCodes::GraphContainsCycle, and the status message reveals the cycle.
+ */
+ Status recomputePrivilegeData();
+
+private:
+ // Helper method doing a topological DFS to compute the indirect privilege
+ // data and look for cycles
+ Status _recomputePrivilegeDataHelper(const RoleName& currentRole,
+ unordered_set<RoleName>& visitedRoles);
+
+ /**
+ * If the role name given is not a built-in role, or it is but it's already in the role
+ * graph, then this does nothing. If it *is* a built-in role and this is the first time
+ * this function has been called for this role, it will add the role into the role graph.
+ */
+ void _createBuiltinRoleIfNeeded(const RoleName& role);
+
+ /**
+ * Adds the built-in roles for the given database name to the role graph if they aren't
+ * already present.
+ */
+ void _createBuiltinRolesForDBIfNeeded(const std::string& dbname);
+
+ /**
+ * Returns whether or not the given role exists strictly within the role graph.
+ */
+ bool _roleExistsDontCreateBuiltin(const RoleName& role);
+
+ /**
+ * Just creates the role in the role graph, without checking whether or not the role already
+ * exists.
+ */
+ void _createRoleDontCheckIfRoleExists(const RoleName& role);
+
+ /**
+ * Grants "privilegeToAdd" to "role".
+ * Doesn't do any checking as to whether the role exists or is a built-in role.
+ */
+ void _addPrivilegeToRoleNoChecks(const RoleName& role, const Privilege& privilegeToAdd);
+
+
+ // Represents all the outgoing edges to other roles from any given role.
+ typedef unordered_map<RoleName, std::vector<RoleName>> EdgeSet;
+ // Maps a role name to a list of privileges associated with that role.
+ typedef unordered_map<RoleName, PrivilegeVector> RolePrivilegeMap;
+
+ EdgeSet _roleToSubordinates;
+ unordered_map<RoleName, unordered_set<RoleName>> _roleToIndirectSubordinates;
+ EdgeSet _roleToMembers;
+ RolePrivilegeMap _directPrivilegesForRole;
+ RolePrivilegeMap _allPrivilegesForRole;
+ std::set<RoleName> _allRoles;
+};
+
+void swap(RoleGraph& lhs, RoleGraph& rhs);
} // namespace mongo
diff --git a/src/mongo/db/auth/role_graph_builtin_roles.cpp b/src/mongo/db/auth/role_graph_builtin_roles.cpp
index 479cc0de9fa..6b8a1762bce 100644
--- a/src/mongo/db/auth/role_graph_builtin_roles.cpp
+++ b/src/mongo/db/auth/role_graph_builtin_roles.cpp
@@ -36,784 +36,648 @@
namespace mongo {
- const std::string RoleGraph::BUILTIN_ROLE_V0_READ = "read";
- const std::string RoleGraph::BUILTIN_ROLE_V0_READ_WRITE= "dbOwner";
- const std::string RoleGraph::BUILTIN_ROLE_V0_ADMIN_READ = "readAnyDatabase";
- const std::string RoleGraph::BUILTIN_ROLE_V0_ADMIN_READ_WRITE= "root";
+const std::string RoleGraph::BUILTIN_ROLE_V0_READ = "read";
+const std::string RoleGraph::BUILTIN_ROLE_V0_READ_WRITE = "dbOwner";
+const std::string RoleGraph::BUILTIN_ROLE_V0_ADMIN_READ = "readAnyDatabase";
+const std::string RoleGraph::BUILTIN_ROLE_V0_ADMIN_READ_WRITE = "root";
namespace {
- const std::string ADMIN_DBNAME = "admin";
-
- const std::string BUILTIN_ROLE_READ = "read";
- const std::string BUILTIN_ROLE_READ_WRITE = "readWrite";
- const std::string BUILTIN_ROLE_USER_ADMIN = "userAdmin";
- const std::string BUILTIN_ROLE_DB_ADMIN = "dbAdmin";
- const std::string BUILTIN_ROLE_CLUSTER_ADMIN = "clusterAdmin";
- const std::string BUILTIN_ROLE_READ_ANY_DB = "readAnyDatabase";
- const std::string BUILTIN_ROLE_READ_WRITE_ANY_DB = "readWriteAnyDatabase";
- const std::string BUILTIN_ROLE_USER_ADMIN_ANY_DB = "userAdminAnyDatabase";
- const std::string BUILTIN_ROLE_DB_ADMIN_ANY_DB = "dbAdminAnyDatabase";
- const std::string BUILTIN_ROLE_ROOT = "root";
- const std::string BUILTIN_ROLE_INTERNAL = "__system";
- const std::string BUILTIN_ROLE_DB_OWNER = "dbOwner";
- const std::string BUILTIN_ROLE_CLUSTER_MONITOR = "clusterMonitor";
- const std::string BUILTIN_ROLE_HOST_MANAGEMENT = "hostManager";
- const std::string BUILTIN_ROLE_CLUSTER_MANAGEMENT = "clusterManager";
- const std::string BUILTIN_ROLE_BACKUP = "backup";
- const std::string BUILTIN_ROLE_RESTORE = "restore";
-
- /// Actions that the "read" role may perform on a normal resources of a specific database, and
- /// that the "readAnyDatabase" role may perform on normal resources of any database.
- ActionSet readRoleActions;
-
- /// Actions that the "readWrite" role may perform on a normal resources of a specific database,
- /// and that the "readWriteAnyDatabase" role may perform on normal resources of any database.
- ActionSet readWriteRoleActions;
-
- /// Actions that the "userAdmin" role may perform on normal resources of a specific database,
- /// and that the "userAdminAnyDatabase" role may perform on normal resources of any database.
- ActionSet userAdminRoleActions;
-
- /// Actions that the "dbAdmin" role may perform on normal resources of a specific database,
- // and that the "dbAdminAnyDatabase" role may perform on normal resources of any database.
- ActionSet dbAdminRoleActions;
-
- /// Actions that the "clusterMonitor" role may perform on the cluster resource.
- ActionSet clusterMonitorRoleClusterActions;
-
- /// Actions that the "clusterMonitor" role may perform on any database.
- ActionSet clusterMonitorRoleDatabaseActions;
-
- /// Actions that the "hostManager" role may perform on the cluster resource.
- ActionSet hostManagerRoleClusterActions;
-
- /// Actions that the "hostManager" role may perform on any database.
- ActionSet hostManagerRoleDatabaseActions;
-
- /// Actions that the "clusterManager" role may perform on the cluster resource.
- ActionSet clusterManagerRoleClusterActions;
-
- /// Actions that the "clusterManager" role may perform on any database
- ActionSet clusterManagerRoleDatabaseActions;
-
- ActionSet& operator<<(ActionSet& target, ActionType source) {
- target.addAction(source);
- return target;
- }
-
- void operator+=(ActionSet& target, const ActionSet& source) {
- target.addAllActionsFromSet(source);
- }
-
- // This sets up the built-in role ActionSets. This is what determines what actions each role
- // is authorized to perform
- MONGO_INITIALIZER(AuthorizationBuiltinRoles)(InitializerContext* context) {
- // Read role
- readRoleActions
- << ActionType::collStats
- << ActionType::dbHash
- << ActionType::dbStats
- << ActionType::find
- << ActionType::killCursors
- << ActionType::listCollections
- << ActionType::listIndexes
- << ActionType::planCacheRead;
-
- // Read-write role
- readWriteRoleActions += readRoleActions;
- readWriteRoleActions
- << ActionType::convertToCapped // db admin gets this also
- << ActionType::createCollection // db admin gets this also
- << ActionType::dropCollection
- << ActionType::dropIndex
- << ActionType::emptycapped
- << ActionType::createIndex
- << ActionType::insert
- << ActionType::remove
- << ActionType::renameCollectionSameDB // db admin gets this also
- << ActionType::update;
-
- // User admin role
- userAdminRoleActions
- << ActionType::changeCustomData
- << ActionType::changePassword
- << ActionType::createUser
- << ActionType::createRole
- << ActionType::dropUser
- << ActionType::dropRole
- << ActionType::grantRole
- << ActionType::revokeRole
- << ActionType::viewUser
- << ActionType::viewRole;
-
-
- // DB admin role
- dbAdminRoleActions
- << ActionType::bypassDocumentValidation
- << ActionType::collMod
- << ActionType::collStats // clusterMonitor gets this also
- << ActionType::compact
- << ActionType::convertToCapped // read_write gets this also
- << ActionType::createCollection // read_write gets this also
- << ActionType::dbStats // clusterMonitor gets this also
- << ActionType::dropCollection
- << ActionType::dropDatabase // clusterAdmin gets this also TODO(spencer): should readWriteAnyDatabase?
- << ActionType::dropIndex
- << ActionType::createIndex
- << ActionType::indexStats
- << ActionType::enableProfiler
- << ActionType::listCollections
- << ActionType::listIndexes
- << ActionType::planCacheIndexFilter
- << ActionType::planCacheRead
- << ActionType::planCacheWrite
- << ActionType::reIndex
- << ActionType::renameCollectionSameDB // read_write gets this also
- << ActionType::repairDatabase
- << ActionType::storageDetails
- << ActionType::validate;
-
- // clusterMonitor role actions that target the cluster resource
- clusterMonitorRoleClusterActions
- << ActionType::connPoolStats
- << ActionType::getCmdLineOpts
- << ActionType::getLog
- << ActionType::getParameter
- << ActionType::getShardMap
- << ActionType::hostInfo
- << ActionType::listDatabases
- << ActionType::listShards // clusterManager gets this also
- << ActionType::netstat
- << ActionType::replSetGetConfig // clusterManager gets this also
- << ActionType::replSetGetStatus // clusterManager gets this also
- << ActionType::serverStatus
- << ActionType::top
- << ActionType::cursorInfo
- << ActionType::inprog
- << ActionType::shardingState;
-
- // clusterMonitor role actions that target a database (or collection) resource
- clusterMonitorRoleDatabaseActions
- << ActionType::collStats // dbAdmin gets this also
- << ActionType::dbStats // dbAdmin gets this also
- << ActionType::getShardVersion;
-
- // hostManager role actions that target the cluster resource
- hostManagerRoleClusterActions
- << ActionType::applicationMessage // clusterManager gets this also
- << ActionType::connPoolSync
- << ActionType::cpuProfiler
- << ActionType::logRotate
- << ActionType::setParameter
- << ActionType::shutdown
- << ActionType::touch
- << ActionType::unlock
- << ActionType::diagLogging
- << ActionType::flushRouterConfig // clusterManager gets this also
- << ActionType::fsync
- << ActionType::invalidateUserCache // userAdminAnyDatabase gets this also
- << ActionType::killop
- << ActionType::resync; // clusterManager gets this also
-
- // hostManager role actions that target the database resource
- hostManagerRoleDatabaseActions
- << ActionType::killCursors
- << ActionType::repairDatabase;
-
-
- // clusterManager role actions that target the cluster resource
- clusterManagerRoleClusterActions
- << ActionType::appendOplogNote // backup gets this also
- << ActionType::applicationMessage // hostManager gets this also
- << ActionType::replSetConfigure
- << ActionType::replSetGetConfig // clusterMonitor gets this also
- << ActionType::replSetGetStatus // clusterMonitor gets this also
- << ActionType::replSetStateChange
- << ActionType::resync // hostManager gets this also
- << ActionType::addShard
- << ActionType::removeShard
- << ActionType::listShards // clusterMonitor gets this also
- << ActionType::flushRouterConfig // hostManager gets this also
- << ActionType::cleanupOrphaned;
-
- clusterManagerRoleDatabaseActions
- << ActionType::splitChunk
- << ActionType::moveChunk
- << ActionType::enableSharding
- << ActionType::splitVector;
-
- return Status::OK();
- }
-
- void addReadOnlyDbPrivileges(PrivilegeVector* privileges, StringData dbName) {
- Privilege::addPrivilegeToPrivilegeVector(
- privileges, Privilege(ResourcePattern::forDatabaseName(dbName), readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyResource(), ActionType::listCollections));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- NamespaceString(dbName, "system.indexes")),
- readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.js")),
- readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- NamespaceString(dbName, "system.namespaces")),
- readRoleActions));
- }
-
- void addReadWriteDbPrivileges(PrivilegeVector* privileges, StringData dbName) {
- addReadOnlyDbPrivileges(privileges, dbName);
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forDatabaseName(dbName), readWriteRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.js")),
- readWriteRoleActions));
- }
-
- void addUserAdminDbPrivileges(PrivilegeVector* privileges, StringData dbName) {
- privileges->push_back(
- Privilege(ResourcePattern::forDatabaseName(dbName), userAdminRoleActions));
- }
-
- void addDbAdminDbPrivileges(PrivilegeVector* privileges, StringData dbName) {
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forDatabaseName(dbName), dbAdminRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- NamespaceString(dbName, "system.indexes")),
- readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- NamespaceString(dbName, "system.namespaces")),
- readRoleActions));
-
- ActionSet profileActions = readRoleActions;
- profileActions.addAction(ActionType::convertToCapped);
- profileActions.addAction(ActionType::createCollection);
- profileActions.addAction(ActionType::dropCollection);
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- NamespaceString(dbName, "system.profile")),
- profileActions));
- }
-
- void addDbOwnerPrivileges(PrivilegeVector* privileges, StringData dbName) {
- addReadWriteDbPrivileges(privileges, dbName);
- addDbAdminDbPrivileges(privileges, dbName);
- addUserAdminDbPrivileges(privileges, dbName);
- }
-
-
- void addReadOnlyAnyDbPrivileges(PrivilegeVector* privileges) {
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyNormalResource(), readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forClusterResource(), ActionType::listDatabases));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.indexes"),
- readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.js"),
- readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.namespaces"),
- readRoleActions));
- }
-
- void addReadWriteAnyDbPrivileges(PrivilegeVector* privileges) {
- addReadOnlyAnyDbPrivileges(privileges);
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyNormalResource(), readWriteRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.js"), readWriteRoleActions));
- }
-
- void addUserAdminAnyDbPrivileges(PrivilegeVector* privileges) {
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyNormalResource(), userAdminRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forClusterResource(), ActionType::listDatabases));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forClusterResource(), ActionType::invalidateUserCache));
-
-
- ActionSet readRoleAndIndexActions;
- readRoleAndIndexActions += readRoleActions;
- readRoleAndIndexActions << ActionType::createIndex << ActionType::dropIndex;
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.users"),
- readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::usersCollectionNamespace),
- readRoleAndIndexActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::rolesCollectionNamespace),
- readRoleAndIndexActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::versionCollectionNamespace),
- readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::usersAltCollectionNamespace),
- readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::usersBackupCollectionNamespace),
- readRoleActions));
- }
-
- void addDbAdminAnyDbPrivileges(PrivilegeVector* privileges) {
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forClusterResource(), ActionType::listDatabases));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyNormalResource(), dbAdminRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.indexes"),
- readRoleActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.namespaces"),
- readRoleActions));
- ActionSet profileActions = readRoleActions;
- profileActions.addAction(ActionType::convertToCapped);
- profileActions.addAction(ActionType::createCollection);
- profileActions.addAction(ActionType::dropCollection);
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.profile"),
- profileActions));
- }
-
- void addClusterMonitorPrivileges(PrivilegeVector* privileges) {
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forClusterResource(), clusterMonitorRoleClusterActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyNormalResource(),
- clusterMonitorRoleDatabaseActions));
- addReadOnlyDbPrivileges(privileges, "config");
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- NamespaceString("local.system.replset")),
- ActionType::find));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.profile"), ActionType::find));
- }
-
- void addHostManagerPrivileges(PrivilegeVector* privileges) {
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forClusterResource(), hostManagerRoleClusterActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyNormalResource(),
- hostManagerRoleDatabaseActions));
- }
-
- void addClusterManagerPrivileges(PrivilegeVector* privileges) {
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forClusterResource(), clusterManagerRoleClusterActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyNormalResource(),
- clusterManagerRoleDatabaseActions));
- addReadOnlyDbPrivileges(privileges, "config");
-
- ActionSet configSettingsActions;
- configSettingsActions << ActionType::insert << ActionType::update << ActionType::remove;
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(NamespaceString("config",
- "settings")),
- configSettingsActions));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(NamespaceString("local",
- "system.replset")),
- readRoleActions));
- }
+const std::string ADMIN_DBNAME = "admin";
+
+const std::string BUILTIN_ROLE_READ = "read";
+const std::string BUILTIN_ROLE_READ_WRITE = "readWrite";
+const std::string BUILTIN_ROLE_USER_ADMIN = "userAdmin";
+const std::string BUILTIN_ROLE_DB_ADMIN = "dbAdmin";
+const std::string BUILTIN_ROLE_CLUSTER_ADMIN = "clusterAdmin";
+const std::string BUILTIN_ROLE_READ_ANY_DB = "readAnyDatabase";
+const std::string BUILTIN_ROLE_READ_WRITE_ANY_DB = "readWriteAnyDatabase";
+const std::string BUILTIN_ROLE_USER_ADMIN_ANY_DB = "userAdminAnyDatabase";
+const std::string BUILTIN_ROLE_DB_ADMIN_ANY_DB = "dbAdminAnyDatabase";
+const std::string BUILTIN_ROLE_ROOT = "root";
+const std::string BUILTIN_ROLE_INTERNAL = "__system";
+const std::string BUILTIN_ROLE_DB_OWNER = "dbOwner";
+const std::string BUILTIN_ROLE_CLUSTER_MONITOR = "clusterMonitor";
+const std::string BUILTIN_ROLE_HOST_MANAGEMENT = "hostManager";
+const std::string BUILTIN_ROLE_CLUSTER_MANAGEMENT = "clusterManager";
+const std::string BUILTIN_ROLE_BACKUP = "backup";
+const std::string BUILTIN_ROLE_RESTORE = "restore";
+
+/// Actions that the "read" role may perform on a normal resources of a specific database, and
+/// that the "readAnyDatabase" role may perform on normal resources of any database.
+ActionSet readRoleActions;
+
+/// Actions that the "readWrite" role may perform on a normal resources of a specific database,
+/// and that the "readWriteAnyDatabase" role may perform on normal resources of any database.
+ActionSet readWriteRoleActions;
+
+/// Actions that the "userAdmin" role may perform on normal resources of a specific database,
+/// and that the "userAdminAnyDatabase" role may perform on normal resources of any database.
+ActionSet userAdminRoleActions;
+
+/// Actions that the "dbAdmin" role may perform on normal resources of a specific database,
+// and that the "dbAdminAnyDatabase" role may perform on normal resources of any database.
+ActionSet dbAdminRoleActions;
+
+/// Actions that the "clusterMonitor" role may perform on the cluster resource.
+ActionSet clusterMonitorRoleClusterActions;
+
+/// Actions that the "clusterMonitor" role may perform on any database.
+ActionSet clusterMonitorRoleDatabaseActions;
+
+/// Actions that the "hostManager" role may perform on the cluster resource.
+ActionSet hostManagerRoleClusterActions;
+
+/// Actions that the "hostManager" role may perform on any database.
+ActionSet hostManagerRoleDatabaseActions;
+
+/// Actions that the "clusterManager" role may perform on the cluster resource.
+ActionSet clusterManagerRoleClusterActions;
+
+/// Actions that the "clusterManager" role may perform on any database
+ActionSet clusterManagerRoleDatabaseActions;
+
+ActionSet& operator<<(ActionSet& target, ActionType source) {
+ target.addAction(source);
+ return target;
+}
+
+void operator+=(ActionSet& target, const ActionSet& source) {
+ target.addAllActionsFromSet(source);
+}
+
+// This sets up the built-in role ActionSets. This is what determines what actions each role
+// is authorized to perform
+MONGO_INITIALIZER(AuthorizationBuiltinRoles)(InitializerContext* context) {
+ // Read role
+ readRoleActions << ActionType::collStats << ActionType::dbHash << ActionType::dbStats
+ << ActionType::find << ActionType::killCursors << ActionType::listCollections
+ << ActionType::listIndexes << ActionType::planCacheRead;
+
+ // Read-write role
+ readWriteRoleActions += readRoleActions;
+ readWriteRoleActions << ActionType::convertToCapped // db admin gets this also
+ << ActionType::createCollection // db admin gets this also
+ << ActionType::dropCollection << ActionType::dropIndex
+ << ActionType::emptycapped << ActionType::createIndex << ActionType::insert
+ << ActionType::remove
+ << ActionType::renameCollectionSameDB // db admin gets this also
+ << ActionType::update;
+
+ // User admin role
+ userAdminRoleActions << ActionType::changeCustomData << ActionType::changePassword
+ << ActionType::createUser << ActionType::createRole << ActionType::dropUser
+ << ActionType::dropRole << ActionType::grantRole << ActionType::revokeRole
+ << ActionType::viewUser << ActionType::viewRole;
+
+
+ // DB admin role
+ dbAdminRoleActions
+ << ActionType::bypassDocumentValidation << ActionType::collMod
+ << ActionType::collStats // clusterMonitor gets this also
+ << ActionType::compact << ActionType::convertToCapped // read_write gets this also
+ << ActionType::createCollection // read_write gets this also
+ << ActionType::dbStats // clusterMonitor gets this also
+ << ActionType::dropCollection
+ << ActionType::
+ dropDatabase // clusterAdmin gets this also TODO(spencer): should readWriteAnyDatabase?
+ << ActionType::dropIndex << ActionType::createIndex << ActionType::indexStats
+ << ActionType::enableProfiler << ActionType::listCollections << ActionType::listIndexes
+ << ActionType::planCacheIndexFilter << ActionType::planCacheRead
+ << ActionType::planCacheWrite << ActionType::reIndex
+ << ActionType::renameCollectionSameDB // read_write gets this also
+ << ActionType::repairDatabase << ActionType::storageDetails << ActionType::validate;
+
+ // clusterMonitor role actions that target the cluster resource
+ clusterMonitorRoleClusterActions
+ << ActionType::connPoolStats << ActionType::getCmdLineOpts << ActionType::getLog
+ << ActionType::getParameter << ActionType::getShardMap << ActionType::hostInfo
+ << ActionType::listDatabases << ActionType::listShards // clusterManager gets this also
+ << ActionType::netstat << ActionType::replSetGetConfig // clusterManager gets this also
+ << ActionType::replSetGetStatus // clusterManager gets this also
+ << ActionType::serverStatus << ActionType::top << ActionType::cursorInfo
+ << ActionType::inprog << ActionType::shardingState;
+
+ // clusterMonitor role actions that target a database (or collection) resource
+ clusterMonitorRoleDatabaseActions << ActionType::collStats // dbAdmin gets this also
+ << ActionType::dbStats // dbAdmin gets this also
+ << ActionType::getShardVersion;
+
+ // hostManager role actions that target the cluster resource
+ hostManagerRoleClusterActions
+ << ActionType::applicationMessage // clusterManager gets this also
+ << ActionType::connPoolSync << ActionType::cpuProfiler << ActionType::logRotate
+ << ActionType::setParameter << ActionType::shutdown << ActionType::touch
+ << ActionType::unlock << ActionType::diagLogging
+ << ActionType::flushRouterConfig // clusterManager gets this also
+ << ActionType::fsync
+ << ActionType::invalidateUserCache // userAdminAnyDatabase gets this also
+ << ActionType::killop << ActionType::resync; // clusterManager gets this also
+
+ // hostManager role actions that target the database resource
+ hostManagerRoleDatabaseActions << ActionType::killCursors << ActionType::repairDatabase;
+
+
+ // clusterManager role actions that target the cluster resource
+ clusterManagerRoleClusterActions
+ << ActionType::appendOplogNote // backup gets this also
+ << ActionType::applicationMessage // hostManager gets this also
+ << ActionType::replSetConfigure
+ << ActionType::replSetGetConfig // clusterMonitor gets this also
+ << ActionType::replSetGetStatus // clusterMonitor gets this also
+ << ActionType::replSetStateChange << ActionType::resync // hostManager gets this also
+ << ActionType::addShard << ActionType::removeShard
+ << ActionType::listShards // clusterMonitor gets this also
+ << ActionType::flushRouterConfig // hostManager gets this also
+ << ActionType::cleanupOrphaned;
+
+ clusterManagerRoleDatabaseActions << ActionType::splitChunk << ActionType::moveChunk
+ << ActionType::enableSharding << ActionType::splitVector;
+
+ return Status::OK();
+}
+
+void addReadOnlyDbPrivileges(PrivilegeVector* privileges, StringData dbName) {
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forDatabaseName(dbName), readRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyResource(), ActionType::listCollections));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.indexes")),
+ readRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.js")),
+ readRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.namespaces")),
+ readRoleActions));
+}
+
+void addReadWriteDbPrivileges(PrivilegeVector* privileges, StringData dbName) {
+ addReadOnlyDbPrivileges(privileges, dbName);
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forDatabaseName(dbName), readWriteRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.js")),
+ readWriteRoleActions));
+}
+
+void addUserAdminDbPrivileges(PrivilegeVector* privileges, StringData dbName) {
+ privileges->push_back(
+ Privilege(ResourcePattern::forDatabaseName(dbName), userAdminRoleActions));
+}
+
+void addDbAdminDbPrivileges(PrivilegeVector* privileges, StringData dbName) {
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forDatabaseName(dbName), dbAdminRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.indexes")),
+ readRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.namespaces")),
+ readRoleActions));
+
+ ActionSet profileActions = readRoleActions;
+ profileActions.addAction(ActionType::convertToCapped);
+ profileActions.addAction(ActionType::createCollection);
+ profileActions.addAction(ActionType::dropCollection);
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbName, "system.profile")),
+ profileActions));
+}
+
+void addDbOwnerPrivileges(PrivilegeVector* privileges, StringData dbName) {
+ addReadWriteDbPrivileges(privileges, dbName);
+ addDbAdminDbPrivileges(privileges, dbName);
+ addUserAdminDbPrivileges(privileges, dbName);
+}
+
+
+void addReadOnlyAnyDbPrivileges(PrivilegeVector* privileges) {
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyNormalResource(), readRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forClusterResource(), ActionType::listDatabases));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forCollectionName("system.indexes"), readRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forCollectionName("system.js"), readRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forCollectionName("system.namespaces"), readRoleActions));
+}
+
+void addReadWriteAnyDbPrivileges(PrivilegeVector* privileges) {
+ addReadOnlyAnyDbPrivileges(privileges);
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyNormalResource(), readWriteRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forCollectionName("system.js"), readWriteRoleActions));
+}
+
+void addUserAdminAnyDbPrivileges(PrivilegeVector* privileges) {
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyNormalResource(), userAdminRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forClusterResource(), ActionType::listDatabases));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forClusterResource(), ActionType::invalidateUserCache));
+
+
+ ActionSet readRoleAndIndexActions;
+ readRoleAndIndexActions += readRoleActions;
+ readRoleAndIndexActions << ActionType::createIndex << ActionType::dropIndex;
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forCollectionName("system.users"), readRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(
+ ResourcePattern::forExactNamespace(AuthorizationManager::usersCollectionNamespace),
+ readRoleAndIndexActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(
+ ResourcePattern::forExactNamespace(AuthorizationManager::rolesCollectionNamespace),
+ readRoleAndIndexActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(
+ ResourcePattern::forExactNamespace(AuthorizationManager::versionCollectionNamespace),
+ readRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(
+ ResourcePattern::forExactNamespace(AuthorizationManager::usersAltCollectionNamespace),
+ readRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(
+ AuthorizationManager::usersBackupCollectionNamespace),
+ readRoleActions));
+}
+
+void addDbAdminAnyDbPrivileges(PrivilegeVector* privileges) {
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forClusterResource(), ActionType::listDatabases));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyNormalResource(), dbAdminRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forCollectionName("system.indexes"), readRoleActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forCollectionName("system.namespaces"), readRoleActions));
+ ActionSet profileActions = readRoleActions;
+ profileActions.addAction(ActionType::convertToCapped);
+ profileActions.addAction(ActionType::createCollection);
+ profileActions.addAction(ActionType::dropCollection);
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forCollectionName("system.profile"), profileActions));
+}
+
+void addClusterMonitorPrivileges(PrivilegeVector* privileges) {
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forClusterResource(), clusterMonitorRoleClusterActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forAnyNormalResource(), clusterMonitorRoleDatabaseActions));
+ addReadOnlyDbPrivileges(privileges, "config");
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString("local.system.replset")),
+ ActionType::find));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forCollectionName("system.profile"), ActionType::find));
+}
+
+void addHostManagerPrivileges(PrivilegeVector* privileges) {
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forClusterResource(), hostManagerRoleClusterActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forAnyNormalResource(), hostManagerRoleDatabaseActions));
+}
+
+void addClusterManagerPrivileges(PrivilegeVector* privileges) {
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forClusterResource(), clusterManagerRoleClusterActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forAnyNormalResource(), clusterManagerRoleDatabaseActions));
+ addReadOnlyDbPrivileges(privileges, "config");
+
+ ActionSet configSettingsActions;
+ configSettingsActions << ActionType::insert << ActionType::update << ActionType::remove;
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString("config", "settings")),
+ configSettingsActions));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString("local", "system.replset")),
+ readRoleActions));
+}
+
+void addClusterAdminPrivileges(PrivilegeVector* privileges) {
+ addClusterMonitorPrivileges(privileges);
+ addHostManagerPrivileges(privileges);
+ addClusterManagerPrivileges(privileges);
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyNormalResource(), ActionType::dropDatabase));
+}
+
+void addBackupPrivileges(PrivilegeVector* privileges) {
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyResource(), ActionType::collStats));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyNormalResource(), ActionType::find));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyResource(), ActionType::listCollections));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyResource(), ActionType::listIndexes));
+
+ ActionSet clusterActions;
+ clusterActions << ActionType::getParameter // To check authSchemaVersion
+ << ActionType::listDatabases << ActionType::appendOplogNote; // For BRS
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forClusterResource(), clusterActions));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forCollectionName("system.indexes"), ActionType::find));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forCollectionName("system.namespaces"), ActionType::find));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forCollectionName("system.js"), ActionType::find));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forCollectionName("system.users"), ActionType::find));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(
+ ResourcePattern::forExactNamespace(AuthorizationManager::usersAltCollectionNamespace),
+ ActionType::find));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(
+ AuthorizationManager::usersBackupCollectionNamespace),
+ ActionType::find));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(
+ ResourcePattern::forExactNamespace(AuthorizationManager::rolesCollectionNamespace),
+ ActionType::find));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(
+ ResourcePattern::forExactNamespace(AuthorizationManager::versionCollectionNamespace),
+ ActionType::find));
+
+ ActionSet configSettingsActions;
+ configSettingsActions << ActionType::insert << ActionType::update << ActionType::find;
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString("config", "settings")),
+ configSettingsActions));
+}
+
+void addRestorePrivileges(PrivilegeVector* privileges) {
+ ActionSet actions;
+ actions << ActionType::bypassDocumentValidation << ActionType::collMod
+ << ActionType::createCollection << ActionType::createIndex << ActionType::dropCollection
+ << ActionType::insert;
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyNormalResource(), actions));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forCollectionName("system.js"), actions));
+
+ // Need to be able to query system.namespaces to check existing collection options.
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forCollectionName("system.namespaces"), ActionType::find));
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyResource(), ActionType::listCollections));
+
+ // Privileges for user/role management
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forAnyNormalResource(), userAdminRoleActions));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(
+ AuthorizationManager::defaultTempUsersCollectionNamespace),
+ ActionType::find));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(
+ AuthorizationManager::defaultTempRolesCollectionNamespace),
+ ActionType::find));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(
+ ResourcePattern::forExactNamespace(AuthorizationManager::usersAltCollectionNamespace),
+ actions));
+
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(ResourcePattern::forExactNamespace(
+ AuthorizationManager::usersBackupCollectionNamespace),
+ actions));
+
+ actions << ActionType::find;
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(
+ ResourcePattern::forExactNamespace(AuthorizationManager::versionCollectionNamespace),
+ actions));
+
+ // Need additional actions on system.users.
+ actions << ActionType::update << ActionType::remove;
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forCollectionName("system.users"), actions));
+
+ // Need to be able to run getParameter to check authSchemaVersion
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges, Privilege(ResourcePattern::forClusterResource(), ActionType::getParameter));
+
+ // Need to be able to create an index on the system.roles collection.
+ Privilege::addPrivilegeToPrivilegeVector(
+ privileges,
+ Privilege(
+ ResourcePattern::forExactNamespace(AuthorizationManager::rolesCollectionNamespace),
+ ActionType::createIndex));
+}
+
+void addRootRolePrivileges(PrivilegeVector* privileges) {
+ addClusterAdminPrivileges(privileges);
+ addUserAdminAnyDbPrivileges(privileges);
+ addDbAdminAnyDbPrivileges(privileges);
+ addReadWriteAnyDbPrivileges(privileges);
+}
+
+void addInternalRolePrivileges(PrivilegeVector* privileges) {
+ RoleGraph::generateUniversalPrivileges(privileges);
+}
- void addClusterAdminPrivileges(PrivilegeVector* privileges) {
- addClusterMonitorPrivileges(privileges);
- addHostManagerPrivileges(privileges);
- addClusterManagerPrivileges(privileges);
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyNormalResource(),
- ActionType::dropDatabase));
- }
+} // namespace
- void addBackupPrivileges(PrivilegeVector* privileges) {
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyResource(), ActionType::collStats));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyNormalResource(), ActionType::find));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyResource(), ActionType::listCollections));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyResource(), ActionType::listIndexes));
-
- ActionSet clusterActions;
- clusterActions << ActionType::getParameter // To check authSchemaVersion
- << ActionType::listDatabases
- << ActionType::appendOplogNote; // For BRS
- Privilege::addPrivilegeToPrivilegeVector(
- privileges, Privilege(ResourcePattern::forClusterResource(), clusterActions));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.indexes"), ActionType::find));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.namespaces"),
- ActionType::find));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.js"), ActionType::find));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.users"), ActionType::find));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::usersAltCollectionNamespace),
- ActionType::find));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::usersBackupCollectionNamespace),
- ActionType::find));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::rolesCollectionNamespace),
- ActionType::find));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::versionCollectionNamespace),
- ActionType::find));
-
- ActionSet configSettingsActions;
- configSettingsActions << ActionType::insert << ActionType::update << ActionType::find;
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(NamespaceString("config",
- "settings")),
- configSettingsActions));
+bool RoleGraph::addPrivilegesForBuiltinRole(const RoleName& roleName, PrivilegeVector* result) {
+ const bool isAdminDB = (roleName.getDB() == ADMIN_DBNAME);
+
+ if (roleName.getRole() == BUILTIN_ROLE_READ) {
+ addReadOnlyDbPrivileges(result, roleName.getDB());
+ } else if (roleName.getRole() == BUILTIN_ROLE_READ_WRITE) {
+ addReadWriteDbPrivileges(result, roleName.getDB());
+ } else if (roleName.getRole() == BUILTIN_ROLE_USER_ADMIN) {
+ addUserAdminDbPrivileges(result, roleName.getDB());
+ } else if (roleName.getRole() == BUILTIN_ROLE_DB_ADMIN) {
+ addDbAdminDbPrivileges(result, roleName.getDB());
+ } else if (roleName.getRole() == BUILTIN_ROLE_DB_OWNER) {
+ addDbOwnerPrivileges(result, roleName.getDB());
+ } else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_READ_ANY_DB) {
+ addReadOnlyAnyDbPrivileges(result);
+ } else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_READ_WRITE_ANY_DB) {
+ addReadWriteAnyDbPrivileges(result);
+ } else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_USER_ADMIN_ANY_DB) {
+ addUserAdminAnyDbPrivileges(result);
+ } else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_DB_ADMIN_ANY_DB) {
+ addDbAdminAnyDbPrivileges(result);
+ } else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_CLUSTER_MONITOR) {
+ addClusterMonitorPrivileges(result);
+ } else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_HOST_MANAGEMENT) {
+ addHostManagerPrivileges(result);
+ } else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_CLUSTER_MANAGEMENT) {
+ addClusterManagerPrivileges(result);
+ } else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_CLUSTER_ADMIN) {
+ addClusterAdminPrivileges(result);
+ } else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_BACKUP) {
+ addBackupPrivileges(result);
+ } else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_RESTORE) {
+ addRestorePrivileges(result);
+ } else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_ROOT) {
+ addRootRolePrivileges(result);
+ } else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_INTERNAL) {
+ addInternalRolePrivileges(result);
+ } else {
+ return false;
}
+ return true;
+}
- void addRestorePrivileges(PrivilegeVector* privileges) {
- ActionSet actions;
- actions
- << ActionType::bypassDocumentValidation
- << ActionType::collMod
- << ActionType::createCollection
- << ActionType::createIndex
- << ActionType::dropCollection
- << ActionType::insert
- ;
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyNormalResource(), actions));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.js"), actions));
-
- // Need to be able to query system.namespaces to check existing collection options.
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.namespaces"),
- ActionType::find));
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyResource(), ActionType::listCollections));
-
- // Privileges for user/role management
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forAnyNormalResource(), userAdminRoleActions));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::defaultTempUsersCollectionNamespace),
- ActionType::find));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::defaultTempRolesCollectionNamespace),
- ActionType::find));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::usersAltCollectionNamespace),
- actions));
-
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::usersBackupCollectionNamespace),
- actions));
-
- actions << ActionType::find;
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(
- ResourcePattern::forExactNamespace(
- AuthorizationManager::versionCollectionNamespace),
- actions));
-
- // Need additional actions on system.users.
- actions << ActionType::update << ActionType::remove;
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forCollectionName("system.users"), actions));
-
- // Need to be able to run getParameter to check authSchemaVersion
- Privilege::addPrivilegeToPrivilegeVector(
- privileges, Privilege(ResourcePattern::forClusterResource(),
- ActionType::getParameter));
-
- // Need to be able to create an index on the system.roles collection.
- Privilege::addPrivilegeToPrivilegeVector(
- privileges,
- Privilege(ResourcePattern::forExactNamespace(
- AuthorizationManager::rolesCollectionNamespace),
- ActionType::createIndex));
- }
+void RoleGraph::generateUniversalPrivileges(PrivilegeVector* privileges) {
+ ActionSet allActions;
+ allActions.addAllActions();
+ privileges->push_back(Privilege(ResourcePattern::forAnyResource(), allActions));
+}
- void addRootRolePrivileges(PrivilegeVector* privileges) {
- addClusterAdminPrivileges(privileges);
- addUserAdminAnyDbPrivileges(privileges);
- addDbAdminAnyDbPrivileges(privileges);
- addReadWriteAnyDbPrivileges(privileges);
+bool RoleGraph::isBuiltinRole(const RoleName& role) {
+ if (!NamespaceString::validDBName(role.getDB()) || role.getDB() == "$external") {
+ return false;
}
- void addInternalRolePrivileges(PrivilegeVector* privileges) {
- RoleGraph::generateUniversalPrivileges(privileges);
- }
+ bool isAdminDB = role.getDB() == ADMIN_DBNAME;
-} // namespace
-
- bool RoleGraph::addPrivilegesForBuiltinRole(const RoleName& roleName,
- PrivilegeVector* result) {
- const bool isAdminDB = (roleName.getDB() == ADMIN_DBNAME);
-
- if (roleName.getRole() == BUILTIN_ROLE_READ) {
- addReadOnlyDbPrivileges(result, roleName.getDB());
- }
- else if (roleName.getRole() == BUILTIN_ROLE_READ_WRITE) {
- addReadWriteDbPrivileges(result, roleName.getDB());
- }
- else if (roleName.getRole() == BUILTIN_ROLE_USER_ADMIN) {
- addUserAdminDbPrivileges(result, roleName.getDB());
- }
- else if (roleName.getRole() == BUILTIN_ROLE_DB_ADMIN) {
- addDbAdminDbPrivileges(result, roleName.getDB());
- }
- else if (roleName.getRole() == BUILTIN_ROLE_DB_OWNER) {
- addDbOwnerPrivileges(result, roleName.getDB());
- }
- else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_READ_ANY_DB) {
- addReadOnlyAnyDbPrivileges(result);
- }
- else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_READ_WRITE_ANY_DB) {
- addReadWriteAnyDbPrivileges(result);
- }
- else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_USER_ADMIN_ANY_DB) {
- addUserAdminAnyDbPrivileges(result);
- }
- else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_DB_ADMIN_ANY_DB) {
- addDbAdminAnyDbPrivileges(result);
- }
- else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_CLUSTER_MONITOR) {
- addClusterMonitorPrivileges(result);
- }
- else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_HOST_MANAGEMENT) {
- addHostManagerPrivileges(result);
- }
- else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_CLUSTER_MANAGEMENT) {
- addClusterManagerPrivileges(result);
- }
- else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_CLUSTER_ADMIN) {
- addClusterAdminPrivileges(result);
- }
- else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_BACKUP) {
- addBackupPrivileges(result);
- }
- else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_RESTORE) {
- addRestorePrivileges(result);
- }
- else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_ROOT) {
- addRootRolePrivileges(result);
- }
- else if (isAdminDB && roleName.getRole() == BUILTIN_ROLE_INTERNAL) {
- addInternalRolePrivileges(result);
- }
- else {
- return false;
- }
+ if (role.getRole() == BUILTIN_ROLE_READ) {
+ return true;
+ } else if (role.getRole() == BUILTIN_ROLE_READ_WRITE) {
+ return true;
+ } else if (role.getRole() == BUILTIN_ROLE_USER_ADMIN) {
+ return true;
+ } else if (role.getRole() == BUILTIN_ROLE_DB_ADMIN) {
+ return true;
+ } else if (role.getRole() == BUILTIN_ROLE_DB_OWNER) {
+ return true;
+ } else if (isAdminDB && role.getRole() == BUILTIN_ROLE_READ_ANY_DB) {
+ return true;
+ } else if (isAdminDB && role.getRole() == BUILTIN_ROLE_READ_WRITE_ANY_DB) {
+ return true;
+ } else if (isAdminDB && role.getRole() == BUILTIN_ROLE_USER_ADMIN_ANY_DB) {
+ return true;
+ } else if (isAdminDB && role.getRole() == BUILTIN_ROLE_DB_ADMIN_ANY_DB) {
+ return true;
+ } else if (isAdminDB && role.getRole() == BUILTIN_ROLE_CLUSTER_MONITOR) {
+ return true;
+ } else if (isAdminDB && role.getRole() == BUILTIN_ROLE_HOST_MANAGEMENT) {
+ return true;
+ } else if (isAdminDB && role.getRole() == BUILTIN_ROLE_CLUSTER_MANAGEMENT) {
+ return true;
+ } else if (isAdminDB && role.getRole() == BUILTIN_ROLE_CLUSTER_ADMIN) {
+ return true;
+ } else if (isAdminDB && role.getRole() == BUILTIN_ROLE_BACKUP) {
+ return true;
+ } else if (isAdminDB && role.getRole() == BUILTIN_ROLE_RESTORE) {
+ return true;
+ } else if (isAdminDB && role.getRole() == BUILTIN_ROLE_ROOT) {
+ return true;
+ } else if (isAdminDB && role.getRole() == BUILTIN_ROLE_INTERNAL) {
return true;
}
-
- void RoleGraph::generateUniversalPrivileges(PrivilegeVector* privileges) {
- ActionSet allActions;
- allActions.addAllActions();
- privileges->push_back(Privilege(ResourcePattern::forAnyResource(), allActions));
- }
-
- bool RoleGraph::isBuiltinRole(const RoleName& role) {
- if (!NamespaceString::validDBName(role.getDB()) || role.getDB() == "$external") {
- return false;
- }
-
- bool isAdminDB = role.getDB() == ADMIN_DBNAME;
-
- if (role.getRole() == BUILTIN_ROLE_READ) {
- return true;
- }
- else if (role.getRole() == BUILTIN_ROLE_READ_WRITE) {
- return true;
- }
- else if (role.getRole() == BUILTIN_ROLE_USER_ADMIN) {
- return true;
- }
- else if (role.getRole() == BUILTIN_ROLE_DB_ADMIN) {
- return true;
- }
- else if (role.getRole() == BUILTIN_ROLE_DB_OWNER) {
- return true;
- }
- else if (isAdminDB && role.getRole() == BUILTIN_ROLE_READ_ANY_DB) {
- return true;
- }
- else if (isAdminDB && role.getRole() == BUILTIN_ROLE_READ_WRITE_ANY_DB) {
- return true;
- }
- else if (isAdminDB && role.getRole() == BUILTIN_ROLE_USER_ADMIN_ANY_DB) {
- return true;
- }
- else if (isAdminDB && role.getRole() == BUILTIN_ROLE_DB_ADMIN_ANY_DB) {
- return true;
- }
- else if (isAdminDB && role.getRole() == BUILTIN_ROLE_CLUSTER_MONITOR) {
- return true;
- }
- else if (isAdminDB && role.getRole() == BUILTIN_ROLE_HOST_MANAGEMENT) {
- return true;
- }
- else if (isAdminDB && role.getRole() == BUILTIN_ROLE_CLUSTER_MANAGEMENT) {
- return true;
- }
- else if (isAdminDB && role.getRole() == BUILTIN_ROLE_CLUSTER_ADMIN) {
- return true;
- }
- else if (isAdminDB && role.getRole() == BUILTIN_ROLE_BACKUP) {
- return true;
- }
- else if (isAdminDB && role.getRole() == BUILTIN_ROLE_RESTORE) {
- return true;
- }
- else if (isAdminDB && role.getRole() == BUILTIN_ROLE_ROOT) {
- return true;
- }
- else if (isAdminDB && role.getRole() == BUILTIN_ROLE_INTERNAL) {
- return true;
- }
- return false;
+ return false;
+}
+
+void RoleGraph::_createBuiltinRolesForDBIfNeeded(const std::string& dbname) {
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_READ, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_READ_WRITE, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_USER_ADMIN, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_DB_ADMIN, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_DB_OWNER, dbname));
+
+ if (dbname == "admin") {
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_READ_ANY_DB, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_READ_WRITE_ANY_DB, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_USER_ADMIN_ANY_DB, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_DB_ADMIN_ANY_DB, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_CLUSTER_MONITOR, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_HOST_MANAGEMENT, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_CLUSTER_MANAGEMENT, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_CLUSTER_ADMIN, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_BACKUP, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_RESTORE, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_ROOT, dbname));
+ _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_INTERNAL, dbname));
}
+}
- void RoleGraph::_createBuiltinRolesForDBIfNeeded(const std::string& dbname) {
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_READ, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_READ_WRITE, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_USER_ADMIN, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_DB_ADMIN, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_DB_OWNER, dbname));
-
- if (dbname == "admin") {
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_READ_ANY_DB, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_READ_WRITE_ANY_DB, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_USER_ADMIN_ANY_DB, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_DB_ADMIN_ANY_DB, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_CLUSTER_MONITOR, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_HOST_MANAGEMENT, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_CLUSTER_MANAGEMENT, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_CLUSTER_ADMIN, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_BACKUP, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_RESTORE, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_ROOT, dbname));
- _createBuiltinRoleIfNeeded(RoleName(BUILTIN_ROLE_INTERNAL, dbname));
- }
+void RoleGraph::_createBuiltinRoleIfNeeded(const RoleName& role) {
+ if (!isBuiltinRole(role) || _roleExistsDontCreateBuiltin(role)) {
+ return;
}
- void RoleGraph::_createBuiltinRoleIfNeeded(const RoleName& role) {
- if (!isBuiltinRole(role) || _roleExistsDontCreateBuiltin(role)) {
- return;
- }
-
- _createRoleDontCheckIfRoleExists(role);
- PrivilegeVector privileges;
- fassert(17145, addPrivilegesForBuiltinRole(role, &privileges));
- for (size_t i = 0; i < privileges.size(); ++i) {
- _addPrivilegeToRoleNoChecks(role, privileges[i]);
- _allPrivilegesForRole[role].push_back(privileges[i]);
- }
+ _createRoleDontCheckIfRoleExists(role);
+ PrivilegeVector privileges;
+ fassert(17145, addPrivilegesForBuiltinRole(role, &privileges));
+ for (size_t i = 0; i < privileges.size(); ++i) {
+ _addPrivilegeToRoleNoChecks(role, privileges[i]);
+ _allPrivilegesForRole[role].push_back(privileges[i]);
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/role_graph_test.cpp b/src/mongo/db/auth/role_graph_test.cpp
index 4e99f584276..745207c4b6d 100644
--- a/src/mongo/db/auth/role_graph_test.cpp
+++ b/src/mongo/db/auth/role_graph_test.cpp
@@ -40,693 +40,696 @@
namespace mongo {
namespace {
- // Tests adding and removing roles from other roles, the RoleNameIterator, and the
- // getDirectMembers and getDirectSubordinates methods
- TEST(RoleGraphTest, AddRemoveRoles) {
- RoleName roleA("roleA", "dbA");
- RoleName roleB("roleB", "dbB");
- RoleName roleC("roleC", "dbC");
- RoleName roleD("readWrite", "dbD"); // built-in role
-
- RoleGraph graph;
- ASSERT_OK(graph.createRole(roleA));
- ASSERT_OK(graph.createRole(roleB));
- ASSERT_OK(graph.createRole(roleC));
-
- RoleNameIterator it;
- it = graph.getDirectSubordinates(roleA);
- ASSERT_FALSE(it.more());
- it = graph.getDirectMembers(roleA);
- ASSERT_FALSE(it.more());
-
- ASSERT_OK(graph.addRoleToRole(roleA, roleB));
-
- // A -> B
- it = graph.getDirectSubordinates(roleA);
- ASSERT_TRUE(it.more());
- // should not advance the iterator
- ASSERT_EQUALS(it.get().getFullName(), roleB.getFullName());
- ASSERT_EQUALS(it.get().getFullName(), roleB.getFullName());
+// Tests adding and removing roles from other roles, the RoleNameIterator, and the
+// getDirectMembers and getDirectSubordinates methods
+TEST(RoleGraphTest, AddRemoveRoles) {
+ RoleName roleA("roleA", "dbA");
+ RoleName roleB("roleB", "dbB");
+ RoleName roleC("roleC", "dbC");
+ RoleName roleD("readWrite", "dbD"); // built-in role
+
+ RoleGraph graph;
+ ASSERT_OK(graph.createRole(roleA));
+ ASSERT_OK(graph.createRole(roleB));
+ ASSERT_OK(graph.createRole(roleC));
+
+ RoleNameIterator it;
+ it = graph.getDirectSubordinates(roleA);
+ ASSERT_FALSE(it.more());
+ it = graph.getDirectMembers(roleA);
+ ASSERT_FALSE(it.more());
+
+ ASSERT_OK(graph.addRoleToRole(roleA, roleB));
+
+ // A -> B
+ it = graph.getDirectSubordinates(roleA);
+ ASSERT_TRUE(it.more());
+ // should not advance the iterator
+ ASSERT_EQUALS(it.get().getFullName(), roleB.getFullName());
+ ASSERT_EQUALS(it.get().getFullName(), roleB.getFullName());
+ ASSERT_EQUALS(it.next().getFullName(), roleB.getFullName());
+ ASSERT_FALSE(it.more());
+
+ it = graph.getDirectMembers(roleA);
+ ASSERT_FALSE(it.more());
+
+ it = graph.getDirectMembers(roleB);
+ ASSERT_EQUALS(it.next().getFullName(), roleA.getFullName());
+ ASSERT_FALSE(it.more());
+
+ it = graph.getDirectSubordinates(roleB);
+ ASSERT_FALSE(it.more());
+
+ ASSERT_OK(graph.addRoleToRole(roleA, roleC));
+ ASSERT_OK(graph.addRoleToRole(roleB, roleC));
+ ASSERT_OK(graph.addRoleToRole(roleB, roleD));
+ // Adding the same role twice should be a no-op, duplicate roles should be de-duped.
+ ASSERT_OK(graph.addRoleToRole(roleB, roleD));
+
+ /*
+ * Graph now looks like:
+ * A
+ * / \
+ * v v
+ * B -> C
+ * |
+ * v
+ * D
+ */
+
+
+ it = graph.getDirectSubordinates(roleA); // should be roleB and roleC, order doesn't matter
+ RoleName cur = it.next();
+ if (cur == roleB) {
+ ASSERT_EQUALS(it.next().getFullName(), roleC.getFullName());
+ } else if (cur == roleC) {
ASSERT_EQUALS(it.next().getFullName(), roleB.getFullName());
- ASSERT_FALSE(it.more());
-
- it = graph.getDirectMembers(roleA);
- ASSERT_FALSE(it.more());
-
- it = graph.getDirectMembers(roleB);
- ASSERT_EQUALS(it.next().getFullName(), roleA.getFullName());
- ASSERT_FALSE(it.more());
-
- it = graph.getDirectSubordinates(roleB);
- ASSERT_FALSE(it.more());
-
- ASSERT_OK(graph.addRoleToRole(roleA, roleC));
- ASSERT_OK(graph.addRoleToRole(roleB, roleC));
- ASSERT_OK(graph.addRoleToRole(roleB, roleD));
- // Adding the same role twice should be a no-op, duplicate roles should be de-duped.
- ASSERT_OK(graph.addRoleToRole(roleB, roleD));
-
- /*
- * Graph now looks like:
- * A
- * / \
- * v v
- * B -> C
- * |
- * v
- * D
- */
-
-
- it = graph.getDirectSubordinates(roleA); // should be roleB and roleC, order doesn't matter
+ } else {
+ FAIL(mongoutils::str::stream() << "unexpected role returned: " << cur.getFullName());
+ }
+ ASSERT_FALSE(it.more());
+
+ ASSERT_OK(graph.recomputePrivilegeData());
+ it = graph.getIndirectSubordinates(roleA); // should have roleB, roleC and roleD
+ bool hasB = false;
+ bool hasC = false;
+ bool hasD = false;
+ int num = 0;
+ while (it.more()) {
+ ++num;
RoleName cur = it.next();
if (cur == roleB) {
- ASSERT_EQUALS(it.next().getFullName(), roleC.getFullName());
+ hasB = true;
} else if (cur == roleC) {
- ASSERT_EQUALS(it.next().getFullName(), roleB.getFullName());
- } else {
- FAIL(mongoutils::str::stream() << "unexpected role returned: " << cur.getFullName());
- }
- ASSERT_FALSE(it.more());
-
- ASSERT_OK(graph.recomputePrivilegeData());
- it = graph.getIndirectSubordinates(roleA); // should have roleB, roleC and roleD
- bool hasB = false;
- bool hasC = false;
- bool hasD = false;
- int num = 0;
- while (it.more()) {
- ++num;
- RoleName cur = it.next();
- if (cur == roleB) {
- hasB = true;
- } else if (cur == roleC) {
- hasC = true;
- } else if (cur == roleD) {
- hasD = true;
- } else {
- FAIL(mongoutils::str::stream() << "unexpected role returned: " <<
- cur.getFullName());
- }
- }
- ASSERT_EQUALS(3, num);
- ASSERT(hasB);
- ASSERT(hasC);
- ASSERT(hasD);
-
- it = graph.getDirectSubordinates(roleB); // should be roleC and roleD, order doesn't matter
- cur = it.next();
- if (cur == roleC) {
- ASSERT_EQUALS(it.next().getFullName(), roleD.getFullName());
+ hasC = true;
} else if (cur == roleD) {
- ASSERT_EQUALS(it.next().getFullName(), roleC.getFullName());
- } else {
- FAIL(mongoutils::str::stream() << "unexpected role returned: " << cur.getFullName());
- }
- ASSERT_FALSE(it.more());
-
- it = graph.getDirectSubordinates(roleC);
- ASSERT_FALSE(it.more());
-
- it = graph.getDirectMembers(roleA);
- ASSERT_FALSE(it.more());
-
- it = graph.getDirectMembers(roleB);
- ASSERT_EQUALS(it.next().getFullName(), roleA.getFullName());
- ASSERT_FALSE(it.more());
-
- it = graph.getDirectMembers(roleC); // should be role A and role B, order doesn't matter
- cur = it.next();
- if (cur == roleA) {
- ASSERT_EQUALS(it.next().getFullName(), roleB.getFullName());
- } else if (cur == roleB) {
- ASSERT_EQUALS(it.next().getFullName(), roleA.getFullName());
+ hasD = true;
} else {
FAIL(mongoutils::str::stream() << "unexpected role returned: " << cur.getFullName());
}
- ASSERT_FALSE(it.more());
-
- // Now remove roleD from roleB and make sure graph is update correctly
- ASSERT_OK(graph.removeRoleFromRole(roleB, roleD));
-
- /*
- * Graph now looks like:
- * A
- * / \
- * v v
- * B -> C
- */
- it = graph.getDirectSubordinates(roleB); // should be just roleC
- ASSERT_EQUALS(it.next().getFullName(), roleC.getFullName());
- ASSERT_FALSE(it.more());
-
- it = graph.getDirectSubordinates(roleD); // should be empty
- ASSERT_FALSE(it.more());
-
-
- // Now delete roleB entirely and make sure that the other roles are updated properly
- ASSERT_OK(graph.deleteRole(roleB));
- ASSERT_NOT_OK(graph.deleteRole(roleB));
- it = graph.getDirectSubordinates(roleA);
- ASSERT_EQUALS(it.next().getFullName(), roleC.getFullName());
- ASSERT_FALSE(it.more());
- it = graph.getDirectMembers(roleC);
- ASSERT_EQUALS(it.next().getFullName(), roleA.getFullName());
- ASSERT_FALSE(it.more());
}
-
- const ResourcePattern collectionAFooResource(ResourcePattern::forExactNamespace(
- NamespaceString("dbA.foo")));
- const ResourcePattern db1Resource(ResourcePattern::forDatabaseName("db1"));
- const ResourcePattern db2Resource(ResourcePattern::forDatabaseName("db2"));
- const ResourcePattern dbAResource(ResourcePattern::forDatabaseName("dbA"));
- const ResourcePattern dbBResource(ResourcePattern::forDatabaseName("dbB"));
- const ResourcePattern dbCResource(ResourcePattern::forDatabaseName("dbC"));
- const ResourcePattern dbDResource(ResourcePattern::forDatabaseName("dbD"));
- const ResourcePattern dbResource(ResourcePattern::forDatabaseName("db"));
-
- // Tests that adding multiple privileges on the same resource correctly collapses those to one
- // privilege
- TEST(RoleGraphTest, AddPrivileges) {
- RoleName roleA("roleA", "dbA");
-
- RoleGraph graph;
- ASSERT_OK(graph.createRole(roleA));
-
- // Test adding a single privilege
- ActionSet actions;
- actions.addAction(ActionType::find);
- ASSERT_OK(graph.addPrivilegeToRole(roleA, Privilege(dbAResource, actions)));
-
- PrivilegeVector privileges = graph.getDirectPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
- ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
- ASSERT_EQUALS(actions.toString(), privileges[0].getActions().toString());
-
- // Add a privilege on a different resource
- ASSERT_OK(graph.addPrivilegeToRole(roleA, Privilege(collectionAFooResource, actions)));
- privileges = graph.getDirectPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(2), privileges.size());
- ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
- ASSERT_EQUALS(actions.toString(), privileges[0].getActions().toString());
- ASSERT_EQUALS(collectionAFooResource, privileges[1].getResourcePattern());
- ASSERT_EQUALS(actions.toString(), privileges[1].getActions().toString());
-
-
- // Add different privileges on an existing resource and make sure they get de-duped
- actions.removeAllActions();
- actions.addAction(ActionType::insert);
-
- PrivilegeVector privilegesToAdd;
- privilegesToAdd.push_back(Privilege(dbAResource, actions));
-
- actions.removeAllActions();
- actions.addAction(ActionType::update);
- privilegesToAdd.push_back(Privilege(dbAResource, actions));
-
- ASSERT_OK(graph.addPrivilegesToRole(roleA, privilegesToAdd));
-
- privileges = graph.getDirectPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(2), privileges.size());
- ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
- ASSERT_NOT_EQUALS(actions.toString(), privileges[0].getActions().toString());
- actions.addAction(ActionType::find);
- actions.addAction(ActionType::insert);
- ASSERT_EQUALS(actions.toString(), privileges[0].getActions().toString());
- actions.removeAction(ActionType::insert);
- actions.removeAction(ActionType::update);
- ASSERT_EQUALS(collectionAFooResource, privileges[1].getResourcePattern());
- ASSERT_EQUALS(actions.toString(), privileges[1].getActions().toString());
- }
-
- // Tests that recomputePrivilegeData correctly detects cycles in the graph.
- TEST(RoleGraphTest, DetectCycles) {
- RoleName roleA("roleA", "dbA");
- RoleName roleB("roleB", "dbB");
- RoleName roleC("roleC", "dbC");
- RoleName roleD("roleD", "dbD");
-
- RoleGraph graph;
- ASSERT_OK(graph.createRole(roleA));
- ASSERT_OK(graph.createRole(roleB));
- ASSERT_OK(graph.createRole(roleC));
- ASSERT_OK(graph.createRole(roleD));
-
- // Add a role to itself
- ASSERT_OK(graph.recomputePrivilegeData());
- ASSERT_OK(graph.addRoleToRole(roleA, roleA));
- ASSERT_NOT_OK(graph.recomputePrivilegeData());
- ASSERT_OK(graph.removeRoleFromRole(roleA, roleA));
- ASSERT_OK(graph.recomputePrivilegeData());
-
- ASSERT_OK(graph.addRoleToRole(roleA, roleB));
- ASSERT_OK(graph.recomputePrivilegeData());
- ASSERT_OK(graph.addRoleToRole(roleA, roleC));
- ASSERT_OK(graph.addRoleToRole(roleB, roleC));
- ASSERT_OK(graph.recomputePrivilegeData());
- /*
- * Graph now looks like:
- * A
- * / \
- * v v
- * B -> C
- */
- ASSERT_OK(graph.addRoleToRole(roleC, roleD));
- ASSERT_OK(graph.addRoleToRole(roleD, roleB)); // Add a cycle
- /*
- * Graph now looks like:
- * A
- * / \
- * v v
- * B -> C
- * ^ /
- * \ v
- * D
- */
- ASSERT_NOT_OK(graph.recomputePrivilegeData());
- ASSERT_OK(graph.removeRoleFromRole(roleD, roleB));
- ASSERT_OK(graph.recomputePrivilegeData());
- }
-
- // Tests that recomputePrivilegeData correctly updates transitive privilege data for all roles.
- TEST(RoleGraphTest, RecomputePrivilegeData) {
- // We create 4 roles and give each of them a unique privilege. After that the direct
- // privileges for all the roles are not touched. The only thing that is changed is the
- // role membership graph, and we test how that affects the set of all transitive privileges
- // for each role.
- RoleName roleA("roleA", "dbA");
- RoleName roleB("roleB", "dbB");
- RoleName roleC("roleC", "dbC");
- RoleName roleD("readWrite", "dbD"); // built-in role
-
- ActionSet actions;
- actions.addAllActions();
-
- RoleGraph graph;
- ASSERT_OK(graph.createRole(roleA));
- ASSERT_OK(graph.createRole(roleB));
- ASSERT_OK(graph.createRole(roleC));
-
- ASSERT_OK(graph.addPrivilegeToRole(roleA, Privilege(dbAResource, actions)));
- ASSERT_OK(graph.addPrivilegeToRole(roleB, Privilege(dbBResource, actions)));
- ASSERT_OK(graph.addPrivilegeToRole(roleC, Privilege(dbCResource, actions)));
-
- ASSERT_OK(graph.recomputePrivilegeData());
-
- PrivilegeVector privileges = graph.getAllPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
- ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
-
- // At this point we have all 4 roles set up, each with their own privilege, but no
- // roles have been granted to each other.
-
- ASSERT_OK(graph.addRoleToRole(roleA, roleB));
- // Role graph: A->B
- ASSERT_OK(graph.recomputePrivilegeData());
- privileges = graph.getAllPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(2), privileges.size());
- ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
- ASSERT_EQUALS(dbBResource, privileges[1].getResourcePattern());
-
- // Add's roleC's privileges to roleB and make sure roleA gets them as well.
- ASSERT_OK(graph.addRoleToRole(roleB, roleC));
- // Role graph: A->B->C
- ASSERT_OK(graph.recomputePrivilegeData());
- privileges = graph.getAllPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(3), privileges.size());
- ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
- ASSERT_EQUALS(dbBResource, privileges[1].getResourcePattern());
- ASSERT_EQUALS(dbCResource, privileges[2].getResourcePattern());
- privileges = graph.getAllPrivileges(roleB);
- ASSERT_EQUALS(static_cast<size_t>(2), privileges.size());
- ASSERT_EQUALS(dbBResource, privileges[0].getResourcePattern());
- ASSERT_EQUALS(dbCResource, privileges[1].getResourcePattern());
-
- // Add's roleD's privileges to roleC and make sure that roleA and roleB get them as well.
- ASSERT_OK(graph.addRoleToRole(roleC, roleD));
- // Role graph: A->B->C->D
- ASSERT_OK(graph.recomputePrivilegeData());
- privileges = graph.getAllPrivileges(roleA);
- const size_t readWriteRolePrivilegeCount = graph.getAllPrivileges(roleD).size();
- ASSERT_EQUALS(readWriteRolePrivilegeCount + 3, privileges.size());
- ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
- ASSERT_EQUALS(dbBResource, privileges[1].getResourcePattern());
- ASSERT_EQUALS(dbCResource, privileges[2].getResourcePattern());
- privileges = graph.getAllPrivileges(roleB);
- ASSERT_EQUALS(readWriteRolePrivilegeCount + 2, privileges.size());
- ASSERT_EQUALS(dbBResource, privileges[0].getResourcePattern());
- ASSERT_EQUALS(dbCResource, privileges[1].getResourcePattern());
- privileges = graph.getAllPrivileges(roleC);
- ASSERT_EQUALS(readWriteRolePrivilegeCount + 1, privileges.size());
- ASSERT_EQUALS(dbCResource, privileges[0].getResourcePattern());
-
- // Remove roleC from roleB, make sure that roleA then loses both roleC's and roleD's
- // privileges
- ASSERT_OK(graph.removeRoleFromRole(roleB, roleC));
- // Role graph: A->B C->D
- ASSERT_OK(graph.recomputePrivilegeData());
- privileges = graph.getAllPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(2), privileges.size());
- ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
- ASSERT_EQUALS(dbBResource, privileges[1].getResourcePattern());
- privileges = graph.getAllPrivileges(roleB);
- ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
- ASSERT_EQUALS(dbBResource, privileges[0].getResourcePattern());
- privileges = graph.getAllPrivileges(roleC);
- ASSERT_EQUALS(readWriteRolePrivilegeCount + 1, privileges.size());
- ASSERT_EQUALS(dbCResource, privileges[0].getResourcePattern());
- privileges = graph.getAllPrivileges(roleD);
- ASSERT_EQUALS(readWriteRolePrivilegeCount, privileges.size());
-
- // Make sure direct privileges were untouched
- privileges = graph.getDirectPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
- ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
- privileges = graph.getDirectPrivileges(roleB);
- ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
- ASSERT_EQUALS(dbBResource, privileges[0].getResourcePattern());
- privileges = graph.getDirectPrivileges(roleC);
- ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
- ASSERT_EQUALS(dbCResource, privileges[0].getResourcePattern());
- privileges = graph.getDirectPrivileges(roleD);
- ASSERT_EQUALS(readWriteRolePrivilegeCount, privileges.size());
- }
-
- // Test that if you grant 1 role to another, then remove it and change it's privileges, then
- // re-grant it, the receiving role sees the new privileges and not the old ones.
- TEST(RoleGraphTest, ReAddRole) {
- RoleName roleA("roleA", "dbA");
- RoleName roleB("roleB", "dbB");
- RoleName roleC("roleC", "dbC");
-
- ActionSet actionsA, actionsB, actionsC;
- actionsA.addAction(ActionType::find);
- actionsB.addAction(ActionType::insert);
- actionsC.addAction(ActionType::update);
-
- RoleGraph graph;
- ASSERT_OK(graph.createRole(roleA));
- ASSERT_OK(graph.createRole(roleB));
- ASSERT_OK(graph.createRole(roleC));
-
- ASSERT_OK(graph.addPrivilegeToRole(roleA, Privilege(dbResource, actionsA)));
- ASSERT_OK(graph.addPrivilegeToRole(roleB, Privilege(dbResource, actionsB)));
- ASSERT_OK(graph.addPrivilegeToRole(roleC, Privilege(dbResource, actionsC)));
-
- ASSERT_OK(graph.addRoleToRole(roleA, roleB));
- ASSERT_OK(graph.addRoleToRole(roleB, roleC)); // graph: A <- B <- C
-
- ASSERT_OK(graph.recomputePrivilegeData());
-
- // roleA should have privileges from roleB and roleC
- PrivilegeVector privileges = graph.getAllPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
- ASSERT_TRUE(privileges[0].getActions().contains(ActionType::find));
- ASSERT_TRUE(privileges[0].getActions().contains(ActionType::insert));
- ASSERT_TRUE(privileges[0].getActions().contains(ActionType::update));
-
- // Now remove roleB from roleA. B still is a member of C, but A no longer should have
- // privileges from B or C.
- ASSERT_OK(graph.removeRoleFromRole(roleA, roleB));
- ASSERT_OK(graph.recomputePrivilegeData());
-
- // roleA should no longer have the privileges from roleB or roleC
- privileges = graph.getAllPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
- ASSERT_TRUE(privileges[0].getActions().contains(ActionType::find));
- ASSERT_FALSE(privileges[0].getActions().contains(ActionType::insert));
- ASSERT_FALSE(privileges[0].getActions().contains(ActionType::update));
-
- // Change the privileges that roleB grants
- ASSERT_OK(graph.removeAllPrivilegesFromRole(roleB));
- ActionSet newActionsB;
- newActionsB.addAction(ActionType::remove);
- ASSERT_OK(graph.addPrivilegeToRole(roleB, Privilege(dbResource, newActionsB)));
-
- // Grant roleB back to roleA, make sure roleA has roleB's new privilege but not its old one.
- ASSERT_OK(graph.addRoleToRole(roleA, roleB));
- ASSERT_OK(graph.recomputePrivilegeData());
-
- privileges = graph.getAllPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
- ASSERT_TRUE(privileges[0].getActions().contains(ActionType::find));
- ASSERT_TRUE(privileges[0].getActions().contains(ActionType::update)); // should get roleC's actions again
- ASSERT_TRUE(privileges[0].getActions().contains(ActionType::remove)); // roleB should grant this to roleA
- ASSERT_FALSE(privileges[0].getActions().contains(ActionType::insert)); // no roles have this action anymore
-
- // Now delete roleB completely. A should once again lose the privileges from both B and C.
- ASSERT_OK(graph.deleteRole(roleB));
- ASSERT_OK(graph.recomputePrivilegeData());
-
- // roleA should no longer have the privileges from roleB or roleC
- privileges = graph.getAllPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
- ASSERT_TRUE(privileges[0].getActions().contains(ActionType::find));
- ASSERT_FALSE(privileges[0].getActions().contains(ActionType::update));
- ASSERT_FALSE(privileges[0].getActions().contains(ActionType::remove));
- ASSERT_FALSE(privileges[0].getActions().contains(ActionType::insert));
-
- // Now re-create roleB and give it a new privilege, then grant it back to roleA.
- // RoleA should get its new privilege but not roleC's privilege this time nor either of
- // roleB's old privileges.
- ASSERT_OK(graph.createRole(roleB));
- actionsB.removeAllActions();
- actionsB.addAction(ActionType::shutdown);
- ASSERT_OK(graph.addPrivilegeToRole(roleB, Privilege(dbResource, actionsB)));
- ASSERT_OK(graph.addRoleToRole(roleA, roleB));
- ASSERT_OK(graph.recomputePrivilegeData());
-
- privileges = graph.getAllPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
- ASSERT_TRUE(privileges[0].getActions().contains(ActionType::find));
- ASSERT_TRUE(privileges[0].getActions().contains(ActionType::shutdown));
- ASSERT_FALSE(privileges[0].getActions().contains(ActionType::update));
- ASSERT_FALSE(privileges[0].getActions().contains(ActionType::remove));
- ASSERT_FALSE(privileges[0].getActions().contains(ActionType::insert));
- }
-
- // Tests copy constructor and swap functionality.
- TEST(RoleGraphTest, CopySwap) {
- RoleName roleA("roleA", "dbA");
- RoleName roleB("roleB", "dbB");
- RoleName roleC("roleC", "dbC");
-
- RoleGraph graph;
- ASSERT_OK(graph.createRole(roleA));
- ASSERT_OK(graph.createRole(roleB));
- ASSERT_OK(graph.createRole(roleC));
-
- ActionSet actions;
- actions.addAction(ActionType::find);
- ASSERT_OK(graph.addPrivilegeToRole(roleA, Privilege(dbAResource, actions)));
- ASSERT_OK(graph.addPrivilegeToRole(roleB, Privilege(dbBResource, actions)));
- ASSERT_OK(graph.addPrivilegeToRole(roleC, Privilege(dbCResource, actions)));
-
- ASSERT_OK(graph.addRoleToRole(roleA, roleB));
-
- // Make a copy of the graph to do further modifications on.
- RoleGraph tempGraph(graph);
- ASSERT_OK(tempGraph.addRoleToRole(roleB, roleC));
- tempGraph.recomputePrivilegeData();
-
- // Now swap the copy back with the original graph and make sure the original was updated
- // properly.
- swap(tempGraph, graph);
-
- RoleNameIterator it = graph.getDirectSubordinates(roleB);
- ASSERT_TRUE(it.more());
+ ASSERT_EQUALS(3, num);
+ ASSERT(hasB);
+ ASSERT(hasC);
+ ASSERT(hasD);
+
+ it = graph.getDirectSubordinates(roleB); // should be roleC and roleD, order doesn't matter
+ cur = it.next();
+ if (cur == roleC) {
+ ASSERT_EQUALS(it.next().getFullName(), roleD.getFullName());
+ } else if (cur == roleD) {
ASSERT_EQUALS(it.next().getFullName(), roleC.getFullName());
- ASSERT_FALSE(it.more());
-
- graph.getAllPrivileges(roleA); // should have privileges from roleB *and* role C
- PrivilegeVector privileges = graph.getAllPrivileges(roleA);
- ASSERT_EQUALS(static_cast<size_t>(3), privileges.size());
- ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
- ASSERT_EQUALS(dbBResource, privileges[1].getResourcePattern());
- ASSERT_EQUALS(dbCResource, privileges[2].getResourcePattern());
+ } else {
+ FAIL(mongoutils::str::stream() << "unexpected role returned: " << cur.getFullName());
}
+ ASSERT_FALSE(it.more());
- // Tests error handling
- TEST(RoleGraphTest, ErrorHandling) {
- RoleName roleA("roleA", "dbA");
- RoleName roleB("roleB", "dbB");
- RoleName roleC("roleC", "dbC");
-
- ActionSet actions;
- actions.addAction(ActionType::find);
- Privilege privilege1(db1Resource, actions);
- Privilege privilege2(db2Resource, actions);
- PrivilegeVector privileges;
- privileges.push_back(privilege1);
- privileges.push_back(privilege2);
-
- RoleGraph graph;
- // None of the roles exist yet.
- ASSERT_NOT_OK(graph.addPrivilegeToRole(roleA, privilege1));
- ASSERT_NOT_OK(graph.addPrivilegesToRole(roleA, privileges));
- ASSERT_NOT_OK(graph.removePrivilegeFromRole(roleA, privilege1));
- ASSERT_NOT_OK(graph.removePrivilegesFromRole(roleA, privileges));
- ASSERT_NOT_OK(graph.removeAllPrivilegesFromRole(roleA));
- ASSERT_NOT_OK(graph.addRoleToRole(roleA, roleB));
- ASSERT_NOT_OK(graph.removeRoleFromRole(roleA, roleB));
-
- // One of the roles exists
- ASSERT_OK(graph.createRole(roleA));
- ASSERT_NOT_OK(graph.createRole(roleA)); // Can't create same role twice
- ASSERT_NOT_OK(graph.addRoleToRole(roleA, roleB));
- ASSERT_NOT_OK(graph.addRoleToRole(roleB, roleA));
- ASSERT_NOT_OK(graph.removeRoleFromRole(roleA, roleB));
- ASSERT_NOT_OK(graph.removeRoleFromRole(roleB, roleA));
-
- // Should work now that both exist.
- ASSERT_OK(graph.createRole(roleB));
- ASSERT_OK(graph.addRoleToRole(roleA, roleB));
- ASSERT_OK(graph.removeRoleFromRole(roleA, roleB));
- ASSERT_NOT_OK(graph.removeRoleFromRole(roleA, roleB)); // roleA isn't actually a member of roleB
-
- // Can't remove a privilege from a role that doesn't have it.
- ASSERT_NOT_OK(graph.removePrivilegeFromRole(roleA, privilege1));
- ASSERT_OK(graph.addPrivilegeToRole(roleA, privilege1));
- ASSERT_OK(graph.removePrivilegeFromRole(roleA, privilege1)); // now should work
-
- // Test that removing a vector of privileges fails if *any* of the privileges are missing.
- ASSERT_OK(graph.addPrivilegeToRole(roleA, privilege1));
- ASSERT_OK(graph.addPrivilegeToRole(roleA, privilege2));
- // Removing both privileges should work since it has both
- ASSERT_OK(graph.removePrivilegesFromRole(roleA, privileges));
- // Now add only 1 back and this time removing both should fail
- ASSERT_OK(graph.addPrivilegeToRole(roleA, privilege1));
- ASSERT_NOT_OK(graph.removePrivilegesFromRole(roleA, privileges));
- }
+ it = graph.getDirectSubordinates(roleC);
+ ASSERT_FALSE(it.more());
+ it = graph.getDirectMembers(roleA);
+ ASSERT_FALSE(it.more());
- TEST(RoleGraphTest, BuiltinRoles) {
- RoleName userRole("userDefined", "dbA");
- RoleName builtinRole("read", "dbA");
-
- ActionSet actions;
- actions.addAction(ActionType::insert);
- Privilege privilege(dbAResource, actions);
-
- RoleGraph graph;
-
- ASSERT(graph.roleExists(builtinRole));
- ASSERT_NOT_OK(graph.createRole(builtinRole));
- ASSERT_NOT_OK(graph.deleteRole(builtinRole));
- ASSERT(graph.roleExists(builtinRole));
- ASSERT(!graph.roleExists(userRole));
- ASSERT_OK(graph.createRole(userRole));
- ASSERT(graph.roleExists(userRole));
-
- ASSERT_NOT_OK(graph.addPrivilegeToRole(builtinRole, privilege));
- ASSERT_NOT_OK(graph.removePrivilegeFromRole(builtinRole, privilege));
- ASSERT_NOT_OK(graph.addRoleToRole(builtinRole, userRole));
- ASSERT_NOT_OK(graph.removeRoleFromRole(builtinRole, userRole));
-
- ASSERT_OK(graph.addPrivilegeToRole(userRole, privilege));
- ASSERT_OK(graph.addRoleToRole(userRole, builtinRole));
- ASSERT_OK(graph.recomputePrivilegeData());
-
- PrivilegeVector privileges = graph.getDirectPrivileges(userRole);
- ASSERT_EQUALS(1U, privileges.size());
- ASSERT(privileges[0].getActions().equals(actions));
- ASSERT(!privileges[0].getActions().contains(ActionType::find));
- ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
-
- privileges = graph.getAllPrivileges(userRole);
- size_t i;
- for (i = 0; i < privileges.size(); ++i) {
- if (dbAResource == privileges[i].getResourcePattern())
- break;
- }
- ASSERT_NOT_EQUALS(privileges.size(), i);
- ASSERT(privileges[i].getActions().isSupersetOf(actions));
- ASSERT(privileges[i].getActions().contains(ActionType::insert));
- ASSERT(privileges[i].getActions().contains(ActionType::find));
+ it = graph.getDirectMembers(roleB);
+ ASSERT_EQUALS(it.next().getFullName(), roleA.getFullName());
+ ASSERT_FALSE(it.more());
- ASSERT_OK(graph.deleteRole(userRole));
- ASSERT(!graph.roleExists(userRole));
- }
-
- TEST(RoleGraphTest, BuiltinRolesOnlyOnAppropriateDatabases) {
- RoleGraph graph;
- ASSERT(graph.roleExists(RoleName("read", "test")));
- ASSERT(graph.roleExists(RoleName("readWrite", "test")));
- ASSERT(graph.roleExists(RoleName("userAdmin", "test")));
- ASSERT(graph.roleExists(RoleName("dbAdmin", "test")));
- ASSERT(graph.roleExists(RoleName("dbOwner", "test")));
- ASSERT(!graph.roleExists(RoleName("readAnyDatabase", "test")));
- ASSERT(!graph.roleExists(RoleName("readWriteAnyDatabase", "test")));
- ASSERT(!graph.roleExists(RoleName("userAdminAnyDatabase", "test")));
- ASSERT(!graph.roleExists(RoleName("dbAdminAnyDatabase", "test")));
- ASSERT(!graph.roleExists(RoleName("clusterAdmin", "test")));
- ASSERT(!graph.roleExists(RoleName("root", "test")));
- ASSERT(!graph.roleExists(RoleName("__system", "test")));
- ASSERT(!graph.roleExists(RoleName("MyRole", "test")));
-
- ASSERT(graph.roleExists(RoleName("read", "admin")));
- ASSERT(graph.roleExists(RoleName("readWrite", "admin")));
- ASSERT(graph.roleExists(RoleName("userAdmin", "admin")));
- ASSERT(graph.roleExists(RoleName("dbAdmin", "admin")));
- ASSERT(graph.roleExists(RoleName("dbOwner", "admin")));
- ASSERT(graph.roleExists(RoleName("readAnyDatabase", "admin")));
- ASSERT(graph.roleExists(RoleName("readWriteAnyDatabase", "admin")));
- ASSERT(graph.roleExists(RoleName("userAdminAnyDatabase", "admin")));
- ASSERT(graph.roleExists(RoleName("dbAdminAnyDatabase", "admin")));
- ASSERT(graph.roleExists(RoleName("clusterAdmin", "admin")));
- ASSERT(graph.roleExists(RoleName("root", "admin")));
- ASSERT(graph.roleExists(RoleName("__system", "admin")));
- ASSERT(!graph.roleExists(RoleName("MyRole", "admin")));
+ it = graph.getDirectMembers(roleC); // should be role A and role B, order doesn't matter
+ cur = it.next();
+ if (cur == roleA) {
+ ASSERT_EQUALS(it.next().getFullName(), roleB.getFullName());
+ } else if (cur == roleB) {
+ ASSERT_EQUALS(it.next().getFullName(), roleA.getFullName());
+ } else {
+ FAIL(mongoutils::str::stream() << "unexpected role returned: " << cur.getFullName());
}
-
- TEST(RoleGraphTest, getRolesForDatabase) {
- RoleGraph graph;
- graph.createRole(RoleName("myRole", "test"));
- // Make sure that a role on "test2" doesn't show up in the roles list for "test"
- graph.createRole(RoleName("anotherRole", "test2"));
- graph.createRole(RoleName("myAdminRole", "admin"));
-
- // Non-admin DB with no user-defined roles
- RoleNameIterator it = graph.getRolesForDatabase("fakedb");
- ASSERT_EQUALS(RoleName("dbAdmin", "fakedb"), it.next());
- ASSERT_EQUALS(RoleName("dbOwner", "fakedb"), it.next());
- ASSERT_EQUALS(RoleName("read", "fakedb"), it.next());
- ASSERT_EQUALS(RoleName("readWrite", "fakedb"), it.next());
- ASSERT_EQUALS(RoleName("userAdmin", "fakedb"), it.next());
- ASSERT_FALSE(it.more());
-
- // Non-admin DB with a user-defined role
- it = graph.getRolesForDatabase("test");
- ASSERT_EQUALS(RoleName("dbAdmin", "test"), it.next());
- ASSERT_EQUALS(RoleName("dbOwner", "test"), it.next());
- ASSERT_EQUALS(RoleName("myRole", "test"), it.next());
- ASSERT_EQUALS(RoleName("read", "test"), it.next());
- ASSERT_EQUALS(RoleName("readWrite", "test"), it.next());
- ASSERT_EQUALS(RoleName("userAdmin", "test"), it.next());
- ASSERT_FALSE(it.more());
-
- // Admin DB
- it = graph.getRolesForDatabase("admin");
- ASSERT_EQUALS(RoleName("__system", "admin"), it.next());
- ASSERT_EQUALS(RoleName("backup", "admin"), it.next());
- ASSERT_EQUALS(RoleName("clusterAdmin", "admin"), it.next());
- ASSERT_EQUALS(RoleName("clusterManager", "admin"), it.next());
- ASSERT_EQUALS(RoleName("clusterMonitor", "admin"), it.next());
- ASSERT_EQUALS(RoleName("dbAdmin", "admin"), it.next());
- ASSERT_EQUALS(RoleName("dbAdminAnyDatabase", "admin"), it.next());
- ASSERT_EQUALS(RoleName("dbOwner", "admin"), it.next());
- ASSERT_EQUALS(RoleName("hostManager", "admin"), it.next());
- ASSERT_EQUALS(RoleName("myAdminRole", "admin"), it.next());
- ASSERT_EQUALS(RoleName("read", "admin"), it.next());
- ASSERT_EQUALS(RoleName("readAnyDatabase", "admin"), it.next());
- ASSERT_EQUALS(RoleName("readWrite", "admin"), it.next());
- ASSERT_EQUALS(RoleName("readWriteAnyDatabase", "admin"), it.next());
- ASSERT_EQUALS(RoleName("restore", "admin"), it.next());
- ASSERT_EQUALS(RoleName("root", "admin"), it.next());
- ASSERT_EQUALS(RoleName("userAdmin", "admin"), it.next());
- ASSERT_EQUALS(RoleName("userAdminAnyDatabase", "admin"), it.next());
- ASSERT_FALSE(it.more());
+ ASSERT_FALSE(it.more());
+
+ // Now remove roleD from roleB and make sure graph is update correctly
+ ASSERT_OK(graph.removeRoleFromRole(roleB, roleD));
+
+ /*
+ * Graph now looks like:
+ * A
+ * / \
+ * v v
+ * B -> C
+ */
+ it = graph.getDirectSubordinates(roleB); // should be just roleC
+ ASSERT_EQUALS(it.next().getFullName(), roleC.getFullName());
+ ASSERT_FALSE(it.more());
+
+ it = graph.getDirectSubordinates(roleD); // should be empty
+ ASSERT_FALSE(it.more());
+
+
+ // Now delete roleB entirely and make sure that the other roles are updated properly
+ ASSERT_OK(graph.deleteRole(roleB));
+ ASSERT_NOT_OK(graph.deleteRole(roleB));
+ it = graph.getDirectSubordinates(roleA);
+ ASSERT_EQUALS(it.next().getFullName(), roleC.getFullName());
+ ASSERT_FALSE(it.more());
+ it = graph.getDirectMembers(roleC);
+ ASSERT_EQUALS(it.next().getFullName(), roleA.getFullName());
+ ASSERT_FALSE(it.more());
+}
+
+const ResourcePattern collectionAFooResource(
+ ResourcePattern::forExactNamespace(NamespaceString("dbA.foo")));
+const ResourcePattern db1Resource(ResourcePattern::forDatabaseName("db1"));
+const ResourcePattern db2Resource(ResourcePattern::forDatabaseName("db2"));
+const ResourcePattern dbAResource(ResourcePattern::forDatabaseName("dbA"));
+const ResourcePattern dbBResource(ResourcePattern::forDatabaseName("dbB"));
+const ResourcePattern dbCResource(ResourcePattern::forDatabaseName("dbC"));
+const ResourcePattern dbDResource(ResourcePattern::forDatabaseName("dbD"));
+const ResourcePattern dbResource(ResourcePattern::forDatabaseName("db"));
+
+// Tests that adding multiple privileges on the same resource correctly collapses those to one
+// privilege
+TEST(RoleGraphTest, AddPrivileges) {
+ RoleName roleA("roleA", "dbA");
+
+ RoleGraph graph;
+ ASSERT_OK(graph.createRole(roleA));
+
+ // Test adding a single privilege
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ ASSERT_OK(graph.addPrivilegeToRole(roleA, Privilege(dbAResource, actions)));
+
+ PrivilegeVector privileges = graph.getDirectPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
+ ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
+ ASSERT_EQUALS(actions.toString(), privileges[0].getActions().toString());
+
+ // Add a privilege on a different resource
+ ASSERT_OK(graph.addPrivilegeToRole(roleA, Privilege(collectionAFooResource, actions)));
+ privileges = graph.getDirectPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(2), privileges.size());
+ ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
+ ASSERT_EQUALS(actions.toString(), privileges[0].getActions().toString());
+ ASSERT_EQUALS(collectionAFooResource, privileges[1].getResourcePattern());
+ ASSERT_EQUALS(actions.toString(), privileges[1].getActions().toString());
+
+
+ // Add different privileges on an existing resource and make sure they get de-duped
+ actions.removeAllActions();
+ actions.addAction(ActionType::insert);
+
+ PrivilegeVector privilegesToAdd;
+ privilegesToAdd.push_back(Privilege(dbAResource, actions));
+
+ actions.removeAllActions();
+ actions.addAction(ActionType::update);
+ privilegesToAdd.push_back(Privilege(dbAResource, actions));
+
+ ASSERT_OK(graph.addPrivilegesToRole(roleA, privilegesToAdd));
+
+ privileges = graph.getDirectPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(2), privileges.size());
+ ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
+ ASSERT_NOT_EQUALS(actions.toString(), privileges[0].getActions().toString());
+ actions.addAction(ActionType::find);
+ actions.addAction(ActionType::insert);
+ ASSERT_EQUALS(actions.toString(), privileges[0].getActions().toString());
+ actions.removeAction(ActionType::insert);
+ actions.removeAction(ActionType::update);
+ ASSERT_EQUALS(collectionAFooResource, privileges[1].getResourcePattern());
+ ASSERT_EQUALS(actions.toString(), privileges[1].getActions().toString());
+}
+
+// Tests that recomputePrivilegeData correctly detects cycles in the graph.
+TEST(RoleGraphTest, DetectCycles) {
+ RoleName roleA("roleA", "dbA");
+ RoleName roleB("roleB", "dbB");
+ RoleName roleC("roleC", "dbC");
+ RoleName roleD("roleD", "dbD");
+
+ RoleGraph graph;
+ ASSERT_OK(graph.createRole(roleA));
+ ASSERT_OK(graph.createRole(roleB));
+ ASSERT_OK(graph.createRole(roleC));
+ ASSERT_OK(graph.createRole(roleD));
+
+ // Add a role to itself
+ ASSERT_OK(graph.recomputePrivilegeData());
+ ASSERT_OK(graph.addRoleToRole(roleA, roleA));
+ ASSERT_NOT_OK(graph.recomputePrivilegeData());
+ ASSERT_OK(graph.removeRoleFromRole(roleA, roleA));
+ ASSERT_OK(graph.recomputePrivilegeData());
+
+ ASSERT_OK(graph.addRoleToRole(roleA, roleB));
+ ASSERT_OK(graph.recomputePrivilegeData());
+ ASSERT_OK(graph.addRoleToRole(roleA, roleC));
+ ASSERT_OK(graph.addRoleToRole(roleB, roleC));
+ ASSERT_OK(graph.recomputePrivilegeData());
+ /*
+ * Graph now looks like:
+ * A
+ * / \
+ * v v
+ * B -> C
+ */
+ ASSERT_OK(graph.addRoleToRole(roleC, roleD));
+ ASSERT_OK(graph.addRoleToRole(roleD, roleB)); // Add a cycle
+ /*
+ * Graph now looks like:
+ * A
+ * / \
+ * v v
+ * B -> C
+ * ^ /
+ * \ v
+ * D
+ */
+ ASSERT_NOT_OK(graph.recomputePrivilegeData());
+ ASSERT_OK(graph.removeRoleFromRole(roleD, roleB));
+ ASSERT_OK(graph.recomputePrivilegeData());
+}
+
+// Tests that recomputePrivilegeData correctly updates transitive privilege data for all roles.
+TEST(RoleGraphTest, RecomputePrivilegeData) {
+ // We create 4 roles and give each of them a unique privilege. After that the direct
+ // privileges for all the roles are not touched. The only thing that is changed is the
+ // role membership graph, and we test how that affects the set of all transitive privileges
+ // for each role.
+ RoleName roleA("roleA", "dbA");
+ RoleName roleB("roleB", "dbB");
+ RoleName roleC("roleC", "dbC");
+ RoleName roleD("readWrite", "dbD"); // built-in role
+
+ ActionSet actions;
+ actions.addAllActions();
+
+ RoleGraph graph;
+ ASSERT_OK(graph.createRole(roleA));
+ ASSERT_OK(graph.createRole(roleB));
+ ASSERT_OK(graph.createRole(roleC));
+
+ ASSERT_OK(graph.addPrivilegeToRole(roleA, Privilege(dbAResource, actions)));
+ ASSERT_OK(graph.addPrivilegeToRole(roleB, Privilege(dbBResource, actions)));
+ ASSERT_OK(graph.addPrivilegeToRole(roleC, Privilege(dbCResource, actions)));
+
+ ASSERT_OK(graph.recomputePrivilegeData());
+
+ PrivilegeVector privileges = graph.getAllPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
+ ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
+
+ // At this point we have all 4 roles set up, each with their own privilege, but no
+ // roles have been granted to each other.
+
+ ASSERT_OK(graph.addRoleToRole(roleA, roleB));
+ // Role graph: A->B
+ ASSERT_OK(graph.recomputePrivilegeData());
+ privileges = graph.getAllPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(2), privileges.size());
+ ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
+ ASSERT_EQUALS(dbBResource, privileges[1].getResourcePattern());
+
+ // Add's roleC's privileges to roleB and make sure roleA gets them as well.
+ ASSERT_OK(graph.addRoleToRole(roleB, roleC));
+ // Role graph: A->B->C
+ ASSERT_OK(graph.recomputePrivilegeData());
+ privileges = graph.getAllPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(3), privileges.size());
+ ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
+ ASSERT_EQUALS(dbBResource, privileges[1].getResourcePattern());
+ ASSERT_EQUALS(dbCResource, privileges[2].getResourcePattern());
+ privileges = graph.getAllPrivileges(roleB);
+ ASSERT_EQUALS(static_cast<size_t>(2), privileges.size());
+ ASSERT_EQUALS(dbBResource, privileges[0].getResourcePattern());
+ ASSERT_EQUALS(dbCResource, privileges[1].getResourcePattern());
+
+ // Add's roleD's privileges to roleC and make sure that roleA and roleB get them as well.
+ ASSERT_OK(graph.addRoleToRole(roleC, roleD));
+ // Role graph: A->B->C->D
+ ASSERT_OK(graph.recomputePrivilegeData());
+ privileges = graph.getAllPrivileges(roleA);
+ const size_t readWriteRolePrivilegeCount = graph.getAllPrivileges(roleD).size();
+ ASSERT_EQUALS(readWriteRolePrivilegeCount + 3, privileges.size());
+ ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
+ ASSERT_EQUALS(dbBResource, privileges[1].getResourcePattern());
+ ASSERT_EQUALS(dbCResource, privileges[2].getResourcePattern());
+ privileges = graph.getAllPrivileges(roleB);
+ ASSERT_EQUALS(readWriteRolePrivilegeCount + 2, privileges.size());
+ ASSERT_EQUALS(dbBResource, privileges[0].getResourcePattern());
+ ASSERT_EQUALS(dbCResource, privileges[1].getResourcePattern());
+ privileges = graph.getAllPrivileges(roleC);
+ ASSERT_EQUALS(readWriteRolePrivilegeCount + 1, privileges.size());
+ ASSERT_EQUALS(dbCResource, privileges[0].getResourcePattern());
+
+ // Remove roleC from roleB, make sure that roleA then loses both roleC's and roleD's
+ // privileges
+ ASSERT_OK(graph.removeRoleFromRole(roleB, roleC));
+ // Role graph: A->B C->D
+ ASSERT_OK(graph.recomputePrivilegeData());
+ privileges = graph.getAllPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(2), privileges.size());
+ ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
+ ASSERT_EQUALS(dbBResource, privileges[1].getResourcePattern());
+ privileges = graph.getAllPrivileges(roleB);
+ ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
+ ASSERT_EQUALS(dbBResource, privileges[0].getResourcePattern());
+ privileges = graph.getAllPrivileges(roleC);
+ ASSERT_EQUALS(readWriteRolePrivilegeCount + 1, privileges.size());
+ ASSERT_EQUALS(dbCResource, privileges[0].getResourcePattern());
+ privileges = graph.getAllPrivileges(roleD);
+ ASSERT_EQUALS(readWriteRolePrivilegeCount, privileges.size());
+
+ // Make sure direct privileges were untouched
+ privileges = graph.getDirectPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
+ ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
+ privileges = graph.getDirectPrivileges(roleB);
+ ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
+ ASSERT_EQUALS(dbBResource, privileges[0].getResourcePattern());
+ privileges = graph.getDirectPrivileges(roleC);
+ ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
+ ASSERT_EQUALS(dbCResource, privileges[0].getResourcePattern());
+ privileges = graph.getDirectPrivileges(roleD);
+ ASSERT_EQUALS(readWriteRolePrivilegeCount, privileges.size());
+}
+
+// Test that if you grant 1 role to another, then remove it and change it's privileges, then
+// re-grant it, the receiving role sees the new privileges and not the old ones.
+TEST(RoleGraphTest, ReAddRole) {
+ RoleName roleA("roleA", "dbA");
+ RoleName roleB("roleB", "dbB");
+ RoleName roleC("roleC", "dbC");
+
+ ActionSet actionsA, actionsB, actionsC;
+ actionsA.addAction(ActionType::find);
+ actionsB.addAction(ActionType::insert);
+ actionsC.addAction(ActionType::update);
+
+ RoleGraph graph;
+ ASSERT_OK(graph.createRole(roleA));
+ ASSERT_OK(graph.createRole(roleB));
+ ASSERT_OK(graph.createRole(roleC));
+
+ ASSERT_OK(graph.addPrivilegeToRole(roleA, Privilege(dbResource, actionsA)));
+ ASSERT_OK(graph.addPrivilegeToRole(roleB, Privilege(dbResource, actionsB)));
+ ASSERT_OK(graph.addPrivilegeToRole(roleC, Privilege(dbResource, actionsC)));
+
+ ASSERT_OK(graph.addRoleToRole(roleA, roleB));
+ ASSERT_OK(graph.addRoleToRole(roleB, roleC)); // graph: A <- B <- C
+
+ ASSERT_OK(graph.recomputePrivilegeData());
+
+ // roleA should have privileges from roleB and roleC
+ PrivilegeVector privileges = graph.getAllPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
+ ASSERT_TRUE(privileges[0].getActions().contains(ActionType::find));
+ ASSERT_TRUE(privileges[0].getActions().contains(ActionType::insert));
+ ASSERT_TRUE(privileges[0].getActions().contains(ActionType::update));
+
+ // Now remove roleB from roleA. B still is a member of C, but A no longer should have
+ // privileges from B or C.
+ ASSERT_OK(graph.removeRoleFromRole(roleA, roleB));
+ ASSERT_OK(graph.recomputePrivilegeData());
+
+ // roleA should no longer have the privileges from roleB or roleC
+ privileges = graph.getAllPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
+ ASSERT_TRUE(privileges[0].getActions().contains(ActionType::find));
+ ASSERT_FALSE(privileges[0].getActions().contains(ActionType::insert));
+ ASSERT_FALSE(privileges[0].getActions().contains(ActionType::update));
+
+ // Change the privileges that roleB grants
+ ASSERT_OK(graph.removeAllPrivilegesFromRole(roleB));
+ ActionSet newActionsB;
+ newActionsB.addAction(ActionType::remove);
+ ASSERT_OK(graph.addPrivilegeToRole(roleB, Privilege(dbResource, newActionsB)));
+
+ // Grant roleB back to roleA, make sure roleA has roleB's new privilege but not its old one.
+ ASSERT_OK(graph.addRoleToRole(roleA, roleB));
+ ASSERT_OK(graph.recomputePrivilegeData());
+
+ privileges = graph.getAllPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
+ ASSERT_TRUE(privileges[0].getActions().contains(ActionType::find));
+ ASSERT_TRUE(privileges[0].getActions().contains(
+ ActionType::update)); // should get roleC's actions again
+ ASSERT_TRUE(privileges[0].getActions().contains(
+ ActionType::remove)); // roleB should grant this to roleA
+ ASSERT_FALSE(privileges[0].getActions().contains(
+ ActionType::insert)); // no roles have this action anymore
+
+ // Now delete roleB completely. A should once again lose the privileges from both B and C.
+ ASSERT_OK(graph.deleteRole(roleB));
+ ASSERT_OK(graph.recomputePrivilegeData());
+
+ // roleA should no longer have the privileges from roleB or roleC
+ privileges = graph.getAllPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
+ ASSERT_TRUE(privileges[0].getActions().contains(ActionType::find));
+ ASSERT_FALSE(privileges[0].getActions().contains(ActionType::update));
+ ASSERT_FALSE(privileges[0].getActions().contains(ActionType::remove));
+ ASSERT_FALSE(privileges[0].getActions().contains(ActionType::insert));
+
+ // Now re-create roleB and give it a new privilege, then grant it back to roleA.
+ // RoleA should get its new privilege but not roleC's privilege this time nor either of
+ // roleB's old privileges.
+ ASSERT_OK(graph.createRole(roleB));
+ actionsB.removeAllActions();
+ actionsB.addAction(ActionType::shutdown);
+ ASSERT_OK(graph.addPrivilegeToRole(roleB, Privilege(dbResource, actionsB)));
+ ASSERT_OK(graph.addRoleToRole(roleA, roleB));
+ ASSERT_OK(graph.recomputePrivilegeData());
+
+ privileges = graph.getAllPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(1), privileges.size());
+ ASSERT_TRUE(privileges[0].getActions().contains(ActionType::find));
+ ASSERT_TRUE(privileges[0].getActions().contains(ActionType::shutdown));
+ ASSERT_FALSE(privileges[0].getActions().contains(ActionType::update));
+ ASSERT_FALSE(privileges[0].getActions().contains(ActionType::remove));
+ ASSERT_FALSE(privileges[0].getActions().contains(ActionType::insert));
+}
+
+// Tests copy constructor and swap functionality.
+TEST(RoleGraphTest, CopySwap) {
+ RoleName roleA("roleA", "dbA");
+ RoleName roleB("roleB", "dbB");
+ RoleName roleC("roleC", "dbC");
+
+ RoleGraph graph;
+ ASSERT_OK(graph.createRole(roleA));
+ ASSERT_OK(graph.createRole(roleB));
+ ASSERT_OK(graph.createRole(roleC));
+
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ ASSERT_OK(graph.addPrivilegeToRole(roleA, Privilege(dbAResource, actions)));
+ ASSERT_OK(graph.addPrivilegeToRole(roleB, Privilege(dbBResource, actions)));
+ ASSERT_OK(graph.addPrivilegeToRole(roleC, Privilege(dbCResource, actions)));
+
+ ASSERT_OK(graph.addRoleToRole(roleA, roleB));
+
+ // Make a copy of the graph to do further modifications on.
+ RoleGraph tempGraph(graph);
+ ASSERT_OK(tempGraph.addRoleToRole(roleB, roleC));
+ tempGraph.recomputePrivilegeData();
+
+ // Now swap the copy back with the original graph and make sure the original was updated
+ // properly.
+ swap(tempGraph, graph);
+
+ RoleNameIterator it = graph.getDirectSubordinates(roleB);
+ ASSERT_TRUE(it.more());
+ ASSERT_EQUALS(it.next().getFullName(), roleC.getFullName());
+ ASSERT_FALSE(it.more());
+
+ graph.getAllPrivileges(roleA); // should have privileges from roleB *and* role C
+ PrivilegeVector privileges = graph.getAllPrivileges(roleA);
+ ASSERT_EQUALS(static_cast<size_t>(3), privileges.size());
+ ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
+ ASSERT_EQUALS(dbBResource, privileges[1].getResourcePattern());
+ ASSERT_EQUALS(dbCResource, privileges[2].getResourcePattern());
+}
+
+// Tests error handling
+TEST(RoleGraphTest, ErrorHandling) {
+ RoleName roleA("roleA", "dbA");
+ RoleName roleB("roleB", "dbB");
+ RoleName roleC("roleC", "dbC");
+
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ Privilege privilege1(db1Resource, actions);
+ Privilege privilege2(db2Resource, actions);
+ PrivilegeVector privileges;
+ privileges.push_back(privilege1);
+ privileges.push_back(privilege2);
+
+ RoleGraph graph;
+ // None of the roles exist yet.
+ ASSERT_NOT_OK(graph.addPrivilegeToRole(roleA, privilege1));
+ ASSERT_NOT_OK(graph.addPrivilegesToRole(roleA, privileges));
+ ASSERT_NOT_OK(graph.removePrivilegeFromRole(roleA, privilege1));
+ ASSERT_NOT_OK(graph.removePrivilegesFromRole(roleA, privileges));
+ ASSERT_NOT_OK(graph.removeAllPrivilegesFromRole(roleA));
+ ASSERT_NOT_OK(graph.addRoleToRole(roleA, roleB));
+ ASSERT_NOT_OK(graph.removeRoleFromRole(roleA, roleB));
+
+ // One of the roles exists
+ ASSERT_OK(graph.createRole(roleA));
+ ASSERT_NOT_OK(graph.createRole(roleA)); // Can't create same role twice
+ ASSERT_NOT_OK(graph.addRoleToRole(roleA, roleB));
+ ASSERT_NOT_OK(graph.addRoleToRole(roleB, roleA));
+ ASSERT_NOT_OK(graph.removeRoleFromRole(roleA, roleB));
+ ASSERT_NOT_OK(graph.removeRoleFromRole(roleB, roleA));
+
+ // Should work now that both exist.
+ ASSERT_OK(graph.createRole(roleB));
+ ASSERT_OK(graph.addRoleToRole(roleA, roleB));
+ ASSERT_OK(graph.removeRoleFromRole(roleA, roleB));
+ ASSERT_NOT_OK(
+ graph.removeRoleFromRole(roleA, roleB)); // roleA isn't actually a member of roleB
+
+ // Can't remove a privilege from a role that doesn't have it.
+ ASSERT_NOT_OK(graph.removePrivilegeFromRole(roleA, privilege1));
+ ASSERT_OK(graph.addPrivilegeToRole(roleA, privilege1));
+ ASSERT_OK(graph.removePrivilegeFromRole(roleA, privilege1)); // now should work
+
+ // Test that removing a vector of privileges fails if *any* of the privileges are missing.
+ ASSERT_OK(graph.addPrivilegeToRole(roleA, privilege1));
+ ASSERT_OK(graph.addPrivilegeToRole(roleA, privilege2));
+ // Removing both privileges should work since it has both
+ ASSERT_OK(graph.removePrivilegesFromRole(roleA, privileges));
+ // Now add only 1 back and this time removing both should fail
+ ASSERT_OK(graph.addPrivilegeToRole(roleA, privilege1));
+ ASSERT_NOT_OK(graph.removePrivilegesFromRole(roleA, privileges));
+}
+
+
+TEST(RoleGraphTest, BuiltinRoles) {
+ RoleName userRole("userDefined", "dbA");
+ RoleName builtinRole("read", "dbA");
+
+ ActionSet actions;
+ actions.addAction(ActionType::insert);
+ Privilege privilege(dbAResource, actions);
+
+ RoleGraph graph;
+
+ ASSERT(graph.roleExists(builtinRole));
+ ASSERT_NOT_OK(graph.createRole(builtinRole));
+ ASSERT_NOT_OK(graph.deleteRole(builtinRole));
+ ASSERT(graph.roleExists(builtinRole));
+ ASSERT(!graph.roleExists(userRole));
+ ASSERT_OK(graph.createRole(userRole));
+ ASSERT(graph.roleExists(userRole));
+
+ ASSERT_NOT_OK(graph.addPrivilegeToRole(builtinRole, privilege));
+ ASSERT_NOT_OK(graph.removePrivilegeFromRole(builtinRole, privilege));
+ ASSERT_NOT_OK(graph.addRoleToRole(builtinRole, userRole));
+ ASSERT_NOT_OK(graph.removeRoleFromRole(builtinRole, userRole));
+
+ ASSERT_OK(graph.addPrivilegeToRole(userRole, privilege));
+ ASSERT_OK(graph.addRoleToRole(userRole, builtinRole));
+ ASSERT_OK(graph.recomputePrivilegeData());
+
+ PrivilegeVector privileges = graph.getDirectPrivileges(userRole);
+ ASSERT_EQUALS(1U, privileges.size());
+ ASSERT(privileges[0].getActions().equals(actions));
+ ASSERT(!privileges[0].getActions().contains(ActionType::find));
+ ASSERT_EQUALS(dbAResource, privileges[0].getResourcePattern());
+
+ privileges = graph.getAllPrivileges(userRole);
+ size_t i;
+ for (i = 0; i < privileges.size(); ++i) {
+ if (dbAResource == privileges[i].getResourcePattern())
+ break;
}
+ ASSERT_NOT_EQUALS(privileges.size(), i);
+ ASSERT(privileges[i].getActions().isSupersetOf(actions));
+ ASSERT(privileges[i].getActions().contains(ActionType::insert));
+ ASSERT(privileges[i].getActions().contains(ActionType::find));
+
+ ASSERT_OK(graph.deleteRole(userRole));
+ ASSERT(!graph.roleExists(userRole));
+}
+
+TEST(RoleGraphTest, BuiltinRolesOnlyOnAppropriateDatabases) {
+ RoleGraph graph;
+ ASSERT(graph.roleExists(RoleName("read", "test")));
+ ASSERT(graph.roleExists(RoleName("readWrite", "test")));
+ ASSERT(graph.roleExists(RoleName("userAdmin", "test")));
+ ASSERT(graph.roleExists(RoleName("dbAdmin", "test")));
+ ASSERT(graph.roleExists(RoleName("dbOwner", "test")));
+ ASSERT(!graph.roleExists(RoleName("readAnyDatabase", "test")));
+ ASSERT(!graph.roleExists(RoleName("readWriteAnyDatabase", "test")));
+ ASSERT(!graph.roleExists(RoleName("userAdminAnyDatabase", "test")));
+ ASSERT(!graph.roleExists(RoleName("dbAdminAnyDatabase", "test")));
+ ASSERT(!graph.roleExists(RoleName("clusterAdmin", "test")));
+ ASSERT(!graph.roleExists(RoleName("root", "test")));
+ ASSERT(!graph.roleExists(RoleName("__system", "test")));
+ ASSERT(!graph.roleExists(RoleName("MyRole", "test")));
+
+ ASSERT(graph.roleExists(RoleName("read", "admin")));
+ ASSERT(graph.roleExists(RoleName("readWrite", "admin")));
+ ASSERT(graph.roleExists(RoleName("userAdmin", "admin")));
+ ASSERT(graph.roleExists(RoleName("dbAdmin", "admin")));
+ ASSERT(graph.roleExists(RoleName("dbOwner", "admin")));
+ ASSERT(graph.roleExists(RoleName("readAnyDatabase", "admin")));
+ ASSERT(graph.roleExists(RoleName("readWriteAnyDatabase", "admin")));
+ ASSERT(graph.roleExists(RoleName("userAdminAnyDatabase", "admin")));
+ ASSERT(graph.roleExists(RoleName("dbAdminAnyDatabase", "admin")));
+ ASSERT(graph.roleExists(RoleName("clusterAdmin", "admin")));
+ ASSERT(graph.roleExists(RoleName("root", "admin")));
+ ASSERT(graph.roleExists(RoleName("__system", "admin")));
+ ASSERT(!graph.roleExists(RoleName("MyRole", "admin")));
+}
+
+TEST(RoleGraphTest, getRolesForDatabase) {
+ RoleGraph graph;
+ graph.createRole(RoleName("myRole", "test"));
+ // Make sure that a role on "test2" doesn't show up in the roles list for "test"
+ graph.createRole(RoleName("anotherRole", "test2"));
+ graph.createRole(RoleName("myAdminRole", "admin"));
+
+ // Non-admin DB with no user-defined roles
+ RoleNameIterator it = graph.getRolesForDatabase("fakedb");
+ ASSERT_EQUALS(RoleName("dbAdmin", "fakedb"), it.next());
+ ASSERT_EQUALS(RoleName("dbOwner", "fakedb"), it.next());
+ ASSERT_EQUALS(RoleName("read", "fakedb"), it.next());
+ ASSERT_EQUALS(RoleName("readWrite", "fakedb"), it.next());
+ ASSERT_EQUALS(RoleName("userAdmin", "fakedb"), it.next());
+ ASSERT_FALSE(it.more());
+
+ // Non-admin DB with a user-defined role
+ it = graph.getRolesForDatabase("test");
+ ASSERT_EQUALS(RoleName("dbAdmin", "test"), it.next());
+ ASSERT_EQUALS(RoleName("dbOwner", "test"), it.next());
+ ASSERT_EQUALS(RoleName("myRole", "test"), it.next());
+ ASSERT_EQUALS(RoleName("read", "test"), it.next());
+ ASSERT_EQUALS(RoleName("readWrite", "test"), it.next());
+ ASSERT_EQUALS(RoleName("userAdmin", "test"), it.next());
+ ASSERT_FALSE(it.more());
+
+ // Admin DB
+ it = graph.getRolesForDatabase("admin");
+ ASSERT_EQUALS(RoleName("__system", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("backup", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("clusterAdmin", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("clusterManager", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("clusterMonitor", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("dbAdmin", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("dbAdminAnyDatabase", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("dbOwner", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("hostManager", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("myAdminRole", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("read", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("readAnyDatabase", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("readWrite", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("readWriteAnyDatabase", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("restore", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("root", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("userAdmin", "admin"), it.next());
+ ASSERT_EQUALS(RoleName("userAdminAnyDatabase", "admin"), it.next());
+ ASSERT_FALSE(it.more());
+}
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/auth/role_graph_update.cpp b/src/mongo/db/auth/role_graph_update.cpp
index 71e9370ec61..62df4a33a4b 100644
--- a/src/mongo/db/auth/role_graph_update.cpp
+++ b/src/mongo/db/auth/role_graph_update.cpp
@@ -40,278 +40,266 @@ namespace mongo {
namespace {
- /**
- * Structure representing information parsed out of a role document.
- */
- struct RoleInfo {
- RoleName name;
- std::vector<RoleName> roles;
- PrivilegeVector privileges;
- };
+/**
+ * Structure representing information parsed out of a role document.
+ */
+struct RoleInfo {
+ RoleName name;
+ std::vector<RoleName> roles;
+ PrivilegeVector privileges;
+};
- /**
- * Parses the role name out of a BSON document.
- */
- Status parseRoleNameFromDocument(const BSONObj& doc, RoleName* name) {
- BSONElement nameElement;
- BSONElement sourceElement;
- Status status = bsonExtractTypedField(
- doc, AuthorizationManager::ROLE_NAME_FIELD_NAME, String, &nameElement);
- if (!status.isOK())
- return status;
- status = bsonExtractTypedField(
- doc, AuthorizationManager::ROLE_DB_FIELD_NAME, String, &sourceElement);
- if (!status.isOK())
- return status;
- *name = RoleName(nameElement.valueStringData(), sourceElement.valueStringData());
+/**
+ * Parses the role name out of a BSON document.
+ */
+Status parseRoleNameFromDocument(const BSONObj& doc, RoleName* name) {
+ BSONElement nameElement;
+ BSONElement sourceElement;
+ Status status = bsonExtractTypedField(
+ doc, AuthorizationManager::ROLE_NAME_FIELD_NAME, String, &nameElement);
+ if (!status.isOK())
return status;
- }
+ status = bsonExtractTypedField(
+ doc, AuthorizationManager::ROLE_DB_FIELD_NAME, String, &sourceElement);
+ if (!status.isOK())
+ return status;
+ *name = RoleName(nameElement.valueStringData(), sourceElement.valueStringData());
+ return status;
+}
- /**
- * Checks whether the given "roleName" corresponds with the given _id field.
- * In admin.system.roles, documents with role name "role@db" must have _id
- * "db.role".
- *
- * Returns Status::OK if the two values are compatible.
- */
- Status checkIdMatchesRoleName(const BSONElement& idElement, const RoleName& roleName) {
- if (idElement.type() != String) {
- return Status(ErrorCodes::TypeMismatch,
- "Role document _id fields must be strings.");
- }
- StringData idField = idElement.valueStringData();
- size_t firstDot = idField.find('.');
- if (firstDot == std::string::npos ||
- idField.substr(0, firstDot) != roleName.getDB() ||
- idField.substr(firstDot + 1) != roleName.getRole()) {
- return Status(ErrorCodes::FailedToParse, mongoutils::str::stream() <<
- "Role document _id fields must be encoded as the string "
- "dbname.rolename. Found " << idField << " for " <<
- roleName.getFullName());
- }
- return Status::OK();
+/**
+ * Checks whether the given "roleName" corresponds with the given _id field.
+ * In admin.system.roles, documents with role name "role@db" must have _id
+ * "db.role".
+ *
+ * Returns Status::OK if the two values are compatible.
+ */
+Status checkIdMatchesRoleName(const BSONElement& idElement, const RoleName& roleName) {
+ if (idElement.type() != String) {
+ return Status(ErrorCodes::TypeMismatch, "Role document _id fields must be strings.");
+ }
+ StringData idField = idElement.valueStringData();
+ size_t firstDot = idField.find('.');
+ if (firstDot == std::string::npos || idField.substr(0, firstDot) != roleName.getDB() ||
+ idField.substr(firstDot + 1) != roleName.getRole()) {
+ return Status(ErrorCodes::FailedToParse,
+ mongoutils::str::stream()
+ << "Role document _id fields must be encoded as the string "
+ "dbname.rolename. Found " << idField << " for "
+ << roleName.getFullName());
}
+ return Status::OK();
+}
- /**
- * Parses "idElement" to extract the role name, according to the "dbname.role" convention
- * used for admin.system.roles documents.
- */
- Status getRoleNameFromIdField(const BSONElement& idElement, RoleName* roleName) {
- if (idElement.type() != String) {
- return Status(ErrorCodes::TypeMismatch,
- "Role document _id fields must be strings.");
- }
- StringData idField = idElement.valueStringData();
- size_t dotPos = idField.find('.');
- if (dotPos == std::string::npos) {
- return Status(ErrorCodes::BadValue,
- "Role document _id fields must have the form dbname.rolename");
- }
- *roleName = RoleName(idField.substr(dotPos + 1), idField.substr(0, dotPos));
- return Status::OK();
+/**
+ * Parses "idElement" to extract the role name, according to the "dbname.role" convention
+ * used for admin.system.roles documents.
+ */
+Status getRoleNameFromIdField(const BSONElement& idElement, RoleName* roleName) {
+ if (idElement.type() != String) {
+ return Status(ErrorCodes::TypeMismatch, "Role document _id fields must be strings.");
}
+ StringData idField = idElement.valueStringData();
+ size_t dotPos = idField.find('.');
+ if (dotPos == std::string::npos) {
+ return Status(ErrorCodes::BadValue,
+ "Role document _id fields must have the form dbname.rolename");
+ }
+ *roleName = RoleName(idField.substr(dotPos + 1), idField.substr(0, dotPos));
+ return Status::OK();
+}
- /**
- * Parses information about a role from a BSON document.
- */
- Status parseRoleFromDocument(const BSONObj& doc, RoleInfo* role) {
- BSONElement rolesElement;
- Status status = parseRoleNameFromDocument(doc, &role->name);
- if (!status.isOK())
- return status;
- status = checkIdMatchesRoleName(doc["_id"], role->name);
- if (!status.isOK())
- return status;
- status = bsonExtractTypedField(doc, "roles", Array, &rolesElement);
- if (!status.isOK())
- return status;
- BSONForEach(singleRoleElement, rolesElement.Obj()) {
- if (singleRoleElement.type() != Object) {
- return Status(ErrorCodes::TypeMismatch,
- "Elements of roles array must be objects.");
- }
- RoleName possessedRoleName;
- status = parseRoleNameFromDocument(singleRoleElement.Obj(), &possessedRoleName);
- if (!status.isOK())
- return status;
- role->roles.push_back(possessedRoleName);
+/**
+ * Parses information about a role from a BSON document.
+ */
+Status parseRoleFromDocument(const BSONObj& doc, RoleInfo* role) {
+ BSONElement rolesElement;
+ Status status = parseRoleNameFromDocument(doc, &role->name);
+ if (!status.isOK())
+ return status;
+ status = checkIdMatchesRoleName(doc["_id"], role->name);
+ if (!status.isOK())
+ return status;
+ status = bsonExtractTypedField(doc, "roles", Array, &rolesElement);
+ if (!status.isOK())
+ return status;
+ BSONForEach(singleRoleElement, rolesElement.Obj()) {
+ if (singleRoleElement.type() != Object) {
+ return Status(ErrorCodes::TypeMismatch, "Elements of roles array must be objects.");
}
-
- BSONElement privilegesElement;
- status = bsonExtractTypedField(doc, "privileges", Array, &privilegesElement);
+ RoleName possessedRoleName;
+ status = parseRoleNameFromDocument(singleRoleElement.Obj(), &possessedRoleName);
if (!status.isOK())
return status;
- status = auth::parseAndValidatePrivilegeArray(BSONArray(privilegesElement.Obj()),
- &role->privileges);
- return status;
+ role->roles.push_back(possessedRoleName);
}
- /**
- * Updates roleGraph for an insert-type oplog operation on admin.system.roles.
- */
- Status handleOplogInsert(RoleGraph* roleGraph, const BSONObj& insertedObj) {
- RoleInfo role;
- Status status = parseRoleFromDocument(insertedObj, &role);
- if (!status.isOK())
- return status;
- status = roleGraph->replaceRole(role.name, role.roles, role.privileges);
+ BSONElement privilegesElement;
+ status = bsonExtractTypedField(doc, "privileges", Array, &privilegesElement);
+ if (!status.isOK())
return status;
- }
+ status =
+ auth::parseAndValidatePrivilegeArray(BSONArray(privilegesElement.Obj()), &role->privileges);
+ return status;
+}
- /**
- * Updates roleGraph for an update-type oplog operation on admin.system.roles.
- *
- * Treats all updates as upserts.
- */
- Status handleOplogUpdate(RoleGraph* roleGraph,
- const BSONObj& updatePattern,
- const BSONObj& queryPattern) {
- RoleName roleToUpdate;
- Status status = getRoleNameFromIdField(queryPattern["_id"], &roleToUpdate);
- if (!status.isOK())
- return status;
+/**
+ * Updates roleGraph for an insert-type oplog operation on admin.system.roles.
+ */
+Status handleOplogInsert(RoleGraph* roleGraph, const BSONObj& insertedObj) {
+ RoleInfo role;
+ Status status = parseRoleFromDocument(insertedObj, &role);
+ if (!status.isOK())
+ return status;
+ status = roleGraph->replaceRole(role.name, role.roles, role.privileges);
+ return status;
+}
- UpdateDriver::Options updateOptions;
- UpdateDriver driver(updateOptions);
- status = driver.parse(updatePattern);
- if (!status.isOK())
- return status;
+/**
+ * Updates roleGraph for an update-type oplog operation on admin.system.roles.
+ *
+ * Treats all updates as upserts.
+ */
+Status handleOplogUpdate(RoleGraph* roleGraph,
+ const BSONObj& updatePattern,
+ const BSONObj& queryPattern) {
+ RoleName roleToUpdate;
+ Status status = getRoleNameFromIdField(queryPattern["_id"], &roleToUpdate);
+ if (!status.isOK())
+ return status;
- mutablebson::Document roleDocument;
- status = AuthorizationManager::getBSONForRole(
- roleGraph, roleToUpdate, roleDocument.root());
- if (status == ErrorCodes::RoleNotFound) {
- // The query pattern will only contain _id, no other immutable fields are present
- status = driver.populateDocumentWithQueryFields(queryPattern, NULL, roleDocument);
- }
- if (!status.isOK())
- return status;
+ UpdateDriver::Options updateOptions;
+ UpdateDriver driver(updateOptions);
+ status = driver.parse(updatePattern);
+ if (!status.isOK())
+ return status;
- status = driver.update(StringData(), &roleDocument);
- if (!status.isOK())
- return status;
+ mutablebson::Document roleDocument;
+ status = AuthorizationManager::getBSONForRole(roleGraph, roleToUpdate, roleDocument.root());
+ if (status == ErrorCodes::RoleNotFound) {
+ // The query pattern will only contain _id, no other immutable fields are present
+ status = driver.populateDocumentWithQueryFields(queryPattern, NULL, roleDocument);
+ }
+ if (!status.isOK())
+ return status;
- // Now use the updated document to totally replace the role in the graph!
- RoleInfo role;
- status = parseRoleFromDocument(roleDocument.getObject(), &role);
- if (!status.isOK())
- return status;
- status = roleGraph->replaceRole(role.name, role.roles, role.privileges);
+ status = driver.update(StringData(), &roleDocument);
+ if (!status.isOK())
+ return status;
+ // Now use the updated document to totally replace the role in the graph!
+ RoleInfo role;
+ status = parseRoleFromDocument(roleDocument.getObject(), &role);
+ if (!status.isOK())
return status;
- }
+ status = roleGraph->replaceRole(role.name, role.roles, role.privileges);
- /**
- * Updates roleGraph for a delete-type oplog operation on admin.system.roles.
- */
- Status handleOplogDelete(
- RoleGraph* roleGraph,
- const BSONObj& deletePattern) {
+ return status;
+}
- RoleName roleToDelete;
- Status status = getRoleNameFromIdField(deletePattern["_id"], &roleToDelete);
- if (!status.isOK())
- return status;
- status = roleGraph->deleteRole(roleToDelete);
- if (ErrorCodes::RoleNotFound == status) {
- // Double-delete can happen in oplog application.
- status = Status::OK();
- }
+/**
+ * Updates roleGraph for a delete-type oplog operation on admin.system.roles.
+ */
+Status handleOplogDelete(RoleGraph* roleGraph, const BSONObj& deletePattern) {
+ RoleName roleToDelete;
+ Status status = getRoleNameFromIdField(deletePattern["_id"], &roleToDelete);
+ if (!status.isOK())
return status;
+ status = roleGraph->deleteRole(roleToDelete);
+ if (ErrorCodes::RoleNotFound == status) {
+ // Double-delete can happen in oplog application.
+ status = Status::OK();
}
+ return status;
+}
- /**
- * Updates roleGraph for command-type oplog operations on the admin database.
- */
- Status handleOplogCommand(RoleGraph* roleGraph, const BSONObj& cmdObj) {
- const NamespaceString& rolesCollectionNamespace =
- AuthorizationManager::rolesCollectionNamespace;
- const StringData cmdName(cmdObj.firstElement().fieldNameStringData());
- if (cmdName == "applyOps") {
- // Operations applied by applyOps will be passed into RoleGraph::handleOplog() by the
- // implementation of applyOps itself.
- return Status::OK();
- }
- if (cmdName == "create") {
- return Status::OK();
- }
- if (cmdName == "drop") {
- if (cmdObj.firstElement().str() == rolesCollectionNamespace.coll()) {
- *roleGraph = RoleGraph();
- }
- return Status::OK();
- }
- if (cmdName == "dropDatabase") {
+/**
+ * Updates roleGraph for command-type oplog operations on the admin database.
+ */
+Status handleOplogCommand(RoleGraph* roleGraph, const BSONObj& cmdObj) {
+ const NamespaceString& rolesCollectionNamespace =
+ AuthorizationManager::rolesCollectionNamespace;
+ const StringData cmdName(cmdObj.firstElement().fieldNameStringData());
+ if (cmdName == "applyOps") {
+ // Operations applied by applyOps will be passed into RoleGraph::handleOplog() by the
+ // implementation of applyOps itself.
+ return Status::OK();
+ }
+ if (cmdName == "create") {
+ return Status::OK();
+ }
+ if (cmdName == "drop") {
+ if (cmdObj.firstElement().str() == rolesCollectionNamespace.coll()) {
*roleGraph = RoleGraph();
- return Status::OK();
}
- if (cmdName == "renameCollection") {
- if (cmdObj.firstElement().str() == rolesCollectionNamespace.ns()) {
- *roleGraph = RoleGraph();
- return Status::OK();
- }
- if (cmdObj["to"].str() == rolesCollectionNamespace.ns()) {
- *roleGraph = RoleGraph();
- return Status(ErrorCodes::OplogOperationUnsupported,
- "Renaming into admin.system.roles produces inconsistent state; "
- "must resynchronize role graph.");
- }
- return Status::OK();
- }
- if (cmdName == "dropIndexes" || cmdName == "deleteIndexes") {
+ return Status::OK();
+ }
+ if (cmdName == "dropDatabase") {
+ *roleGraph = RoleGraph();
+ return Status::OK();
+ }
+ if (cmdName == "renameCollection") {
+ if (cmdObj.firstElement().str() == rolesCollectionNamespace.ns()) {
+ *roleGraph = RoleGraph();
return Status::OK();
}
- if ((cmdName == "collMod" || cmdName == "emptycapped") &&
- cmdObj.firstElement().str() != rolesCollectionNamespace.coll()) {
-
- // We don't care about these if they're not on the roles collection.
- return Status::OK();
+ if (cmdObj["to"].str() == rolesCollectionNamespace.ns()) {
+ *roleGraph = RoleGraph();
+ return Status(ErrorCodes::OplogOperationUnsupported,
+ "Renaming into admin.system.roles produces inconsistent state; "
+ "must resynchronize role graph.");
}
- // No other commands expected. Warn.
- return Status(ErrorCodes::OplogOperationUnsupported, "Unsupported oplog operation");
+ return Status::OK();
}
+ if (cmdName == "dropIndexes" || cmdName == "deleteIndexes") {
+ return Status::OK();
+ }
+ if ((cmdName == "collMod" || cmdName == "emptycapped") &&
+ cmdObj.firstElement().str() != rolesCollectionNamespace.coll()) {
+ // We don't care about these if they're not on the roles collection.
+ return Status::OK();
+ }
+ // No other commands expected. Warn.
+ return Status(ErrorCodes::OplogOperationUnsupported, "Unsupported oplog operation");
+}
} // namespace
- Status RoleGraph::addRoleFromDocument(const BSONObj& doc) {
- RoleInfo role;
- Status status = parseRoleFromDocument(doc, &role);
- if (!status.isOK())
- return status;
- status = replaceRole(role.name, role.roles, role.privileges);
+Status RoleGraph::addRoleFromDocument(const BSONObj& doc) {
+ RoleInfo role;
+ Status status = parseRoleFromDocument(doc, &role);
+ if (!status.isOK())
return status;
- }
+ status = replaceRole(role.name, role.roles, role.privileges);
+ return status;
+}
- Status RoleGraph::handleLogOp(
- const char* op,
- const NamespaceString& ns,
- const BSONObj& o,
- const BSONObj* o2) {
-
- if (op == StringData("db", StringData::LiteralTag()))
- return Status::OK();
- if (op[0] == '\0' || op[1] != '\0') {
- return Status(ErrorCodes::BadValue,
- mongoutils::str::stream() << "Unrecognized \"op\" field value \"" <<
- op << '"');
- }
+Status RoleGraph::handleLogOp(const char* op,
+ const NamespaceString& ns,
+ const BSONObj& o,
+ const BSONObj* o2) {
+ if (op == StringData("db", StringData::LiteralTag()))
+ return Status::OK();
+ if (op[0] == '\0' || op[1] != '\0') {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Unrecognized \"op\" field value \"" << op
+ << '"');
+ }
- if (ns.db() != AuthorizationManager::rolesCollectionNamespace.db())
- return Status::OK();
+ if (ns.db() != AuthorizationManager::rolesCollectionNamespace.db())
+ return Status::OK();
- if (ns.isCommand()) {
- if (*op == 'c') {
- return handleOplogCommand(this, o);
- }
- else {
- return Status(ErrorCodes::BadValue,
- "Non-command oplog entry on admin.$cmd namespace");
- }
+ if (ns.isCommand()) {
+ if (*op == 'c') {
+ return handleOplogCommand(this, o);
+ } else {
+ return Status(ErrorCodes::BadValue, "Non-command oplog entry on admin.$cmd namespace");
}
+ }
- if (ns.coll() != AuthorizationManager::rolesCollectionNamespace.coll())
- return Status::OK();
+ if (ns.coll() != AuthorizationManager::rolesCollectionNamespace.coll())
+ return Status::OK();
- switch (*op) {
+ switch (*op) {
case 'i':
return handleOplogInsert(this, o);
case 'u':
@@ -329,9 +317,9 @@ namespace {
"Namespace admin.system.roles is not a valid target for commands");
default:
return Status(ErrorCodes::BadValue,
- mongoutils::str::stream() << "Unrecognized \"op\" field value \"" <<
- op << '"');
- }
+ mongoutils::str::stream() << "Unrecognized \"op\" field value \"" << op
+ << '"');
}
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/role_name.cpp b/src/mongo/db/auth/role_name.cpp
index f38bcedeb59..d5f6aecfa48 100644
--- a/src/mongo/db/auth/role_name.cpp
+++ b/src/mongo/db/auth/role_name.cpp
@@ -36,20 +36,19 @@
namespace mongo {
- RoleName::RoleName(StringData role, StringData dbname) {
- _fullName.resize(role.size() + dbname.size() + 1);
- std::string::iterator iter = std::copy(role.rawData(),
- role.rawData() + role.size(),
- _fullName.begin());
- *iter = '@';
- ++iter;
- iter = std::copy(dbname.rawData(), dbname.rawData() + dbname.size(), iter);
- dassert(iter == _fullName.end());
- _splitPoint = role.size();
- }
+RoleName::RoleName(StringData role, StringData dbname) {
+ _fullName.resize(role.size() + dbname.size() + 1);
+ std::string::iterator iter =
+ std::copy(role.rawData(), role.rawData() + role.size(), _fullName.begin());
+ *iter = '@';
+ ++iter;
+ iter = std::copy(dbname.rawData(), dbname.rawData() + dbname.size(), iter);
+ dassert(iter == _fullName.end());
+ _splitPoint = role.size();
+}
- std::ostream& operator<<(std::ostream& os, const RoleName& name) {
- return os << name.getFullName();
- }
+std::ostream& operator<<(std::ostream& os, const RoleName& name) {
+ return os << name.getFullName();
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/role_name.h b/src/mongo/db/auth/role_name.h
index 05b5d30e80b..30ef4d50412 100644
--- a/src/mongo/db/auth/role_name.h
+++ b/src/mongo/db/auth/role_name.h
@@ -40,146 +40,176 @@
namespace mongo {
+/**
+ * Representation of a name of a role in a MongoDB system.
+ *
+ * Consists of a "role name" part and a "datbase name" part.
+ */
+class RoleName {
+public:
+ RoleName() : _splitPoint(0) {}
+ RoleName(StringData role, StringData dbname);
+
/**
- * Representation of a name of a role in a MongoDB system.
- *
- * Consists of a "role name" part and a "datbase name" part.
+ * Gets the name of the role excluding the "@dbname" component.
*/
- class RoleName {
- public:
- RoleName() : _splitPoint(0) {}
- RoleName(StringData role, StringData dbname);
+ StringData getRole() const {
+ return StringData(_fullName).substr(0, _splitPoint);
+ }
- /**
- * Gets the name of the role excluding the "@dbname" component.
- */
- StringData getRole() const { return StringData(_fullName).substr(0, _splitPoint); }
+ /**
+ * Gets the database name part of a role name.
+ */
+ StringData getDB() const {
+ return StringData(_fullName).substr(_splitPoint + 1);
+ }
- /**
- * Gets the database name part of a role name.
- */
- StringData getDB() const { return StringData(_fullName).substr(_splitPoint + 1); }
+ bool empty() const {
+ return _fullName.empty();
+ }
- bool empty() const { return _fullName.empty(); }
+ /**
+ * Gets the full name of a role as a string, formatted as "role@db".
+ *
+ * Allowed for keys in non-persistent data structures, such as std::map.
+ */
+ const std::string& getFullName() const {
+ return _fullName;
+ }
- /**
- * Gets the full name of a role as a string, formatted as "role@db".
- *
- * Allowed for keys in non-persistent data structures, such as std::map.
- */
- const std::string& getFullName() const { return _fullName; }
+ /**
+ * Stringifies the object, for logging/debugging.
+ */
+ const std::string& toString() const {
+ return getFullName();
+ }
- /**
- * Stringifies the object, for logging/debugging.
- */
- const std::string& toString() const { return getFullName(); }
+private:
+ std::string _fullName; // The full name, stored as a string. "role@db".
+ size_t _splitPoint; // The index of the "@" separating the role and db name parts.
+};
- private:
- std::string _fullName; // The full name, stored as a string. "role@db".
- size_t _splitPoint; // The index of the "@" separating the role and db name parts.
- };
+static inline bool operator==(const RoleName& lhs, const RoleName& rhs) {
+ return lhs.getFullName() == rhs.getFullName();
+}
- static inline bool operator==(const RoleName& lhs, const RoleName& rhs) {
- return lhs.getFullName() == rhs.getFullName();
- }
+static inline bool operator!=(const RoleName& lhs, const RoleName& rhs) {
+ return lhs.getFullName() != rhs.getFullName();
+}
- static inline bool operator!=(const RoleName& lhs, const RoleName& rhs) {
- return lhs.getFullName() != rhs.getFullName();
+static inline bool operator<(const RoleName& lhs, const RoleName& rhs) {
+ if (lhs.getDB() == rhs.getDB()) {
+ return lhs.getRole() < rhs.getRole();
}
+ return lhs.getDB() < rhs.getDB();
+}
- static inline bool operator<(const RoleName& lhs, const RoleName& rhs) {
- if (lhs.getDB() == rhs.getDB()) {
- return lhs.getRole() < rhs.getRole();
- }
- return lhs.getDB() < rhs.getDB();
- }
+std::ostream& operator<<(std::ostream& os, const RoleName& name);
- std::ostream& operator<<(std::ostream& os, const RoleName& name);
+/**
+ * Iterator over an unspecified container of RoleName objects.
+ */
+class RoleNameIterator {
+public:
+ class Impl {
+ MONGO_DISALLOW_COPYING(Impl);
- /**
- * Iterator over an unspecified container of RoleName objects.
- */
- class RoleNameIterator {
public:
- class Impl {
- MONGO_DISALLOW_COPYING(Impl);
- public:
- Impl() {};
- virtual ~Impl() {};
- static Impl* clone(Impl* orig) { return orig ? orig->doClone(): NULL; }
- virtual bool more() const = 0;
- virtual const RoleName& get() const = 0;
-
- virtual const RoleName& next() = 0;
-
- private:
- virtual Impl* doClone() const = 0;
- };
-
- RoleNameIterator() : _impl(nullptr) {}
- RoleNameIterator(const RoleNameIterator& other) : _impl(Impl::clone(other._impl.get())) {}
- explicit RoleNameIterator(Impl* impl) : _impl(impl) {}
-
- RoleNameIterator& operator=(const RoleNameIterator& other) {
- _impl.reset(Impl::clone(other._impl.get()));
- return *this;
+ Impl(){};
+ virtual ~Impl(){};
+ static Impl* clone(Impl* orig) {
+ return orig ? orig->doClone() : NULL;
}
+ virtual bool more() const = 0;
+ virtual const RoleName& get() const = 0;
- bool more() const { return _impl.get() && _impl->more(); }
- const RoleName& get() const { return _impl->get(); }
-
- const RoleName& next() { return _impl->next(); }
-
- const RoleName& operator*() const { return get(); }
- const RoleName* operator->() const { return &get(); }
+ virtual const RoleName& next() = 0;
private:
- std::unique_ptr<Impl> _impl;
+ virtual Impl* doClone() const = 0;
};
-} // namespace mongo
+ RoleNameIterator() : _impl(nullptr) {}
+ RoleNameIterator(const RoleNameIterator& other) : _impl(Impl::clone(other._impl.get())) {}
+ explicit RoleNameIterator(Impl* impl) : _impl(impl) {}
+
+ RoleNameIterator& operator=(const RoleNameIterator& other) {
+ _impl.reset(Impl::clone(other._impl.get()));
+ return *this;
+ }
+
+ bool more() const {
+ return _impl.get() && _impl->more();
+ }
+ const RoleName& get() const {
+ return _impl->get();
+ }
+
+ const RoleName& next() {
+ return _impl->next();
+ }
+
+ const RoleName& operator*() const {
+ return get();
+ }
+ const RoleName* operator->() const {
+ return &get();
+ }
+
+private:
+ std::unique_ptr<Impl> _impl;
+};
+
+} // namespace mongo
// Define hash function for RoleNames so they can be keys in std::unordered_map
MONGO_HASH_NAMESPACE_START
- template <> struct hash<mongo::RoleName> {
- size_t operator()(const mongo::RoleName& rname) const {
- return hash<std::string>()(rname.getFullName());
- }
- };
+template <>
+struct hash<mongo::RoleName> {
+ size_t operator()(const mongo::RoleName& rname) const {
+ return hash<std::string>()(rname.getFullName());
+ }
+};
MONGO_HASH_NAMESPACE_END
namespace mongo {
- template <typename ContainerIterator>
- class RoleNameContainerIteratorImpl : public RoleNameIterator::Impl {
- MONGO_DISALLOW_COPYING(RoleNameContainerIteratorImpl);
- public:
- RoleNameContainerIteratorImpl(const ContainerIterator& begin,
- const ContainerIterator& end) :
- _curr(begin), _end(end) {}
- virtual ~RoleNameContainerIteratorImpl() {}
- virtual bool more() const { return _curr != _end; }
- virtual const RoleName& next() { return *(_curr++); }
- virtual const RoleName& get() const { return *_curr; }
- virtual RoleNameIterator::Impl* doClone() const {
- return new RoleNameContainerIteratorImpl(_curr, _end);
- }
+template <typename ContainerIterator>
+class RoleNameContainerIteratorImpl : public RoleNameIterator::Impl {
+ MONGO_DISALLOW_COPYING(RoleNameContainerIteratorImpl);
- private:
- ContainerIterator _curr;
- ContainerIterator _end;
- };
-
- template <typename ContainerIterator>
- RoleNameIterator makeRoleNameIterator(const ContainerIterator& begin,
- const ContainerIterator& end) {
- return RoleNameIterator( new RoleNameContainerIteratorImpl<ContainerIterator>(begin, end));
+public:
+ RoleNameContainerIteratorImpl(const ContainerIterator& begin, const ContainerIterator& end)
+ : _curr(begin), _end(end) {}
+ virtual ~RoleNameContainerIteratorImpl() {}
+ virtual bool more() const {
+ return _curr != _end;
}
-
- template <typename Container>
- RoleNameIterator makeRoleNameIteratorForContainer(const Container& container) {
- return makeRoleNameIterator(container.begin(), container.end());
+ virtual const RoleName& next() {
+ return *(_curr++);
}
+ virtual const RoleName& get() const {
+ return *_curr;
+ }
+ virtual RoleNameIterator::Impl* doClone() const {
+ return new RoleNameContainerIteratorImpl(_curr, _end);
+ }
+
+private:
+ ContainerIterator _curr;
+ ContainerIterator _end;
+};
+
+template <typename ContainerIterator>
+RoleNameIterator makeRoleNameIterator(const ContainerIterator& begin,
+ const ContainerIterator& end) {
+ return RoleNameIterator(new RoleNameContainerIteratorImpl<ContainerIterator>(begin, end));
+}
+
+template <typename Container>
+RoleNameIterator makeRoleNameIteratorForContainer(const Container& container) {
+ return makeRoleNameIterator(container.begin(), container.end());
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_authentication_session.cpp b/src/mongo/db/auth/sasl_authentication_session.cpp
index 62abdc8a284..c74bba6fadb 100644
--- a/src/mongo/db/auth/sasl_authentication_session.cpp
+++ b/src/mongo/db/auth/sasl_authentication_session.cpp
@@ -46,51 +46,47 @@
#include "mongo/util/mongoutils/str.h"
namespace mongo {
- SaslAuthenticationSession::SaslAuthenticationSessionFactoryFn
- SaslAuthenticationSession::create;
+SaslAuthenticationSession::SaslAuthenticationSessionFactoryFn SaslAuthenticationSession::create;
- // Mechanism name constants.
- const char SaslAuthenticationSession::mechanismCRAMMD5[] = "CRAM-MD5";
- const char SaslAuthenticationSession::mechanismDIGESTMD5[] = "DIGEST-MD5";
- const char SaslAuthenticationSession::mechanismSCRAMSHA1[] = "SCRAM-SHA-1";
- const char SaslAuthenticationSession::mechanismGSSAPI[] = "GSSAPI";
- const char SaslAuthenticationSession::mechanismPLAIN[] = "PLAIN";
+// Mechanism name constants.
+const char SaslAuthenticationSession::mechanismCRAMMD5[] = "CRAM-MD5";
+const char SaslAuthenticationSession::mechanismDIGESTMD5[] = "DIGEST-MD5";
+const char SaslAuthenticationSession::mechanismSCRAMSHA1[] = "SCRAM-SHA-1";
+const char SaslAuthenticationSession::mechanismGSSAPI[] = "GSSAPI";
+const char SaslAuthenticationSession::mechanismPLAIN[] = "PLAIN";
- /**
- * Standard method in mongodb for determining if "authenticatedUser" may act as "requestedUser."
- *
- * The standard rule in MongoDB is simple. The authenticated user name must be the same as the
- * requested user name.
- */
- bool isAuthorizedCommon(SaslAuthenticationSession* session,
- StringData requestedUser,
- StringData authenticatedUser) {
-
- return requestedUser == authenticatedUser;
- }
+/**
+ * Standard method in mongodb for determining if "authenticatedUser" may act as "requestedUser."
+ *
+ * The standard rule in MongoDB is simple. The authenticated user name must be the same as the
+ * requested user name.
+ */
+bool isAuthorizedCommon(SaslAuthenticationSession* session,
+ StringData requestedUser,
+ StringData authenticatedUser) {
+ return requestedUser == authenticatedUser;
+}
- SaslAuthenticationSession::SaslAuthenticationSession(AuthorizationSession* authzSession) :
- AuthenticationSession(AuthenticationSession::SESSION_TYPE_SASL),
- _authzSession(authzSession),
- _saslStep(0),
- _conversationId(0),
- _autoAuthorize(false),
- _done(false) {
- }
+SaslAuthenticationSession::SaslAuthenticationSession(AuthorizationSession* authzSession)
+ : AuthenticationSession(AuthenticationSession::SESSION_TYPE_SASL),
+ _authzSession(authzSession),
+ _saslStep(0),
+ _conversationId(0),
+ _autoAuthorize(false),
+ _done(false) {}
- SaslAuthenticationSession::~SaslAuthenticationSession() {};
+SaslAuthenticationSession::~SaslAuthenticationSession(){};
- StringData SaslAuthenticationSession::getAuthenticationDatabase() const {
- if (Command::testCommandsEnabled &&
- _authenticationDatabase == "admin" &&
- getPrincipalId() == internalSecurity.user->getName().getUser()) {
- // Allows authenticating as the internal user against the admin database. This is to
- // support the auth passthrough test framework on mongos (since you can't use the local
- // database on a mongos, so you can't auth as the internal user without this).
- return internalSecurity.user->getName().getDB();
- } else {
- return _authenticationDatabase;
- }
+StringData SaslAuthenticationSession::getAuthenticationDatabase() const {
+ if (Command::testCommandsEnabled && _authenticationDatabase == "admin" &&
+ getPrincipalId() == internalSecurity.user->getName().getUser()) {
+ // Allows authenticating as the internal user against the admin database. This is to
+ // support the auth passthrough test framework on mongos (since you can't use the local
+ // database on a mongos, so you can't auth as the internal user without this).
+ return internalSecurity.user->getName().getDB();
+ } else {
+ return _authenticationDatabase;
}
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_authentication_session.h b/src/mongo/db/auth/sasl_authentication_session.h
index e1bc7cf7551..ac298dbede6 100644
--- a/src/mongo/db/auth/sasl_authentication_session.h
+++ b/src/mongo/db/auth/sasl_authentication_session.h
@@ -41,128 +41,140 @@
namespace mongo {
- class AuthorizationSession;
- class OperationContext;
+class AuthorizationSession;
+class OperationContext;
+
+/**
+ * Authentication session data for the server side of SASL authentication.
+ */
+class SaslAuthenticationSession : public AuthenticationSession {
+ MONGO_DISALLOW_COPYING(SaslAuthenticationSession);
+
+public:
+ typedef stdx::function<SaslAuthenticationSession*(AuthorizationSession*, const std::string&)>
+ SaslAuthenticationSessionFactoryFn;
+ static SaslAuthenticationSessionFactoryFn create;
+
+ // Mechanism name constants.
+ static const char mechanismCRAMMD5[];
+ static const char mechanismDIGESTMD5[];
+ static const char mechanismSCRAMSHA1[];
+ static const char mechanismGSSAPI[];
+ static const char mechanismPLAIN[];
+
+ explicit SaslAuthenticationSession(AuthorizationSession* authSession);
+ virtual ~SaslAuthenticationSession();
+
+ /**
+ * Start the server side of a SASL authentication.
+ *
+ * "authenticationDatabase" is the database against which the user is authenticating.
+ * "mechanism" is the SASL mechanism to use.
+ * "serviceName" is the SASL service name to use.
+ * "serviceHostname" is the FQDN of this server.
+ * "conversationId" is the conversation identifier to use for this session.
+ *
+ * If "autoAuthorize" is set to true, the server will automatically acquire all privileges
+ * for a successfully authenticated user. If it is false, the client will need to
+ * explicilty acquire privileges on resources it wishes to access.
+ *
+ * Must be called only once on an instance.
+ */
+ virtual Status start(StringData authenticationDatabase,
+ StringData mechanism,
+ StringData serviceName,
+ StringData serviceHostname,
+ int64_t conversationId,
+ bool autoAuthorize) = 0;
+
+ /**
+ * Perform one step of the server side of the authentication session,
+ * consuming "inputData" and producing "*outputData".
+ *
+ * A return of Status::OK() indiciates succesful progress towards authentication.
+ * Any other return code indicates that authentication has failed.
+ *
+ * Must not be called before start().
+ */
+ virtual Status step(StringData inputData, std::string* outputData) = 0;
+
+ /**
+ * Returns the the operation context associated with the currently executing command.
+ * Authentication commands must set this on their associated
+ * SaslAuthenticationSession.
+ */
+ OperationContext* getOpCtxt() const {
+ return _txn;
+ }
+ void setOpCtxt(OperationContext* txn) {
+ _txn = txn;
+ }
+
+ /**
+ * Gets the name of the database against which this authentication conversation is running.
+ *
+ * Not meaningful before a successful call to start().
+ */
+ StringData getAuthenticationDatabase() const;
+
+ /**
+ * Get the conversation id for this authentication session.
+ *
+ * Must not be called before start().
+ */
+ int64_t getConversationId() const {
+ return _conversationId;
+ }
+
+ /**
+ * If the last call to step() returned Status::OK(), this method returns true if the
+ * authentication conversation has completed, from the server's perspective. If it returns
+ * false, the server expects more input from the client. If the last call to step() did not
+ * return Status::OK(), returns true.
+ *
+ * Behavior is undefined if step() has not been called.
+ */
+ bool isDone() const {
+ return _done;
+ }
+
+ /**
+ * Gets the string identifier of the principal being authenticated.
+ *
+ * Returns the empty string if the session does not yet know the identity being
+ * authenticated.
+ */
+ virtual std::string getPrincipalId() const = 0;
+
+ /**
+ * Gets the name of the SASL mechanism in use.
+ *
+ * Returns "" if start() has not been called or if start() did not return Status::OK().
+ */
+ virtual const char* getMechanism() const = 0;
/**
- * Authentication session data for the server side of SASL authentication.
+ * Returns true if automatic privilege acquisition should be used for this principal, after
+ * authentication. Not meaningful before a successful call to start().
*/
- class SaslAuthenticationSession : public AuthenticationSession {
- MONGO_DISALLOW_COPYING(SaslAuthenticationSession);
- public:
- typedef stdx::function<SaslAuthenticationSession* (AuthorizationSession*,
- const std::string&)>
- SaslAuthenticationSessionFactoryFn;
- static SaslAuthenticationSessionFactoryFn create;
-
- // Mechanism name constants.
- static const char mechanismCRAMMD5[];
- static const char mechanismDIGESTMD5[];
- static const char mechanismSCRAMSHA1[];
- static const char mechanismGSSAPI[];
- static const char mechanismPLAIN[];
-
- explicit SaslAuthenticationSession(AuthorizationSession* authSession);
- virtual ~SaslAuthenticationSession();
-
- /**
- * Start the server side of a SASL authentication.
- *
- * "authenticationDatabase" is the database against which the user is authenticating.
- * "mechanism" is the SASL mechanism to use.
- * "serviceName" is the SASL service name to use.
- * "serviceHostname" is the FQDN of this server.
- * "conversationId" is the conversation identifier to use for this session.
- *
- * If "autoAuthorize" is set to true, the server will automatically acquire all privileges
- * for a successfully authenticated user. If it is false, the client will need to
- * explicilty acquire privileges on resources it wishes to access.
- *
- * Must be called only once on an instance.
- */
- virtual Status start(StringData authenticationDatabase,
- StringData mechanism,
- StringData serviceName,
- StringData serviceHostname,
- int64_t conversationId,
- bool autoAuthorize) = 0;
-
- /**
- * Perform one step of the server side of the authentication session,
- * consuming "inputData" and producing "*outputData".
- *
- * A return of Status::OK() indiciates succesful progress towards authentication.
- * Any other return code indicates that authentication has failed.
- *
- * Must not be called before start().
- */
- virtual Status step(StringData inputData, std::string* outputData) = 0;
-
- /**
- * Returns the the operation context associated with the currently executing command.
- * Authentication commands must set this on their associated
- * SaslAuthenticationSession.
- */
- OperationContext* getOpCtxt() const { return _txn; }
- void setOpCtxt(OperationContext* txn) { _txn = txn; }
-
- /**
- * Gets the name of the database against which this authentication conversation is running.
- *
- * Not meaningful before a successful call to start().
- */
- StringData getAuthenticationDatabase() const;
-
- /**
- * Get the conversation id for this authentication session.
- *
- * Must not be called before start().
- */
- int64_t getConversationId() const { return _conversationId; }
-
- /**
- * If the last call to step() returned Status::OK(), this method returns true if the
- * authentication conversation has completed, from the server's perspective. If it returns
- * false, the server expects more input from the client. If the last call to step() did not
- * return Status::OK(), returns true.
- *
- * Behavior is undefined if step() has not been called.
- */
- bool isDone() const { return _done; }
-
- /**
- * Gets the string identifier of the principal being authenticated.
- *
- * Returns the empty string if the session does not yet know the identity being
- * authenticated.
- */
- virtual std::string getPrincipalId() const = 0;
-
- /**
- * Gets the name of the SASL mechanism in use.
- *
- * Returns "" if start() has not been called or if start() did not return Status::OK().
- */
- virtual const char* getMechanism() const = 0;
-
- /**
- * Returns true if automatic privilege acquisition should be used for this principal, after
- * authentication. Not meaningful before a successful call to start().
- */
- bool shouldAutoAuthorize() const { return _autoAuthorize; }
-
- AuthorizationSession* getAuthorizationSession() { return _authzSession; }
-
- protected:
- OperationContext* _txn;
- AuthorizationSession* _authzSession;
- std::string _authenticationDatabase;
- std::string _serviceName;
- std::string _serviceHostname;
- int _saslStep;
- int64_t _conversationId;
- bool _autoAuthorize;
- bool _done;
- };
+ bool shouldAutoAuthorize() const {
+ return _autoAuthorize;
+ }
+
+ AuthorizationSession* getAuthorizationSession() {
+ return _authzSession;
+ }
+
+protected:
+ OperationContext* _txn;
+ AuthorizationSession* _authzSession;
+ std::string _authenticationDatabase;
+ std::string _serviceName;
+ std::string _serviceHostname;
+ int _saslStep;
+ int64_t _conversationId;
+ bool _autoAuthorize;
+ bool _done;
+};
} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_commands.cpp b/src/mongo/db/auth/sasl_commands.cpp
index 6747a35771c..c05eec78539 100644
--- a/src/mongo/db/auth/sasl_commands.cpp
+++ b/src/mongo/db/auth/sasl_commands.cpp
@@ -56,327 +56,320 @@
namespace mongo {
namespace {
- using std::stringstream;
-
- const bool autoAuthorizeDefault = true;
-
- class CmdSaslStart : public Command {
- public:
- CmdSaslStart();
- virtual ~CmdSaslStart();
-
- virtual void addRequiredPrivileges(
- const std::string&, const BSONObj&, std::vector<Privilege>*) {}
-
- virtual bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& ignored,
- BSONObjBuilder& result);
-
- virtual void help(stringstream& help) const;
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool requiresAuth() { return false; }
-
- };
-
- class CmdSaslContinue : public Command {
- public:
- CmdSaslContinue();
- virtual ~CmdSaslContinue();
-
- virtual void addRequiredPrivileges(
- const std::string&, const BSONObj&, std::vector<Privilege>*) {}
-
- virtual bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& ignored,
- BSONObjBuilder& result);
-
- virtual void help(stringstream& help) const;
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool requiresAuth() { return false; }
- };
-
- CmdSaslStart cmdSaslStart;
- CmdSaslContinue cmdSaslContinue;
- Status buildResponse(const SaslAuthenticationSession* session,
- const std::string& responsePayload,
- BSONType responsePayloadType,
- BSONObjBuilder* result) {
- result->appendIntOrLL(saslCommandConversationIdFieldName, session->getConversationId());
- result->appendBool(saslCommandDoneFieldName, session->isDone());
-
- if (responsePayload.size() > size_t(std::numeric_limits<int>::max())) {
- return Status(ErrorCodes::InvalidLength, "Response payload too long");
- }
- if (responsePayloadType == BinData) {
- result->appendBinData(saslCommandPayloadFieldName,
- int(responsePayload.size()),
- BinDataGeneral,
- responsePayload.data());
- }
- else if (responsePayloadType == String) {
- result->append(saslCommandPayloadFieldName, base64::encode(responsePayload));
- }
- else {
- fassertFailed(4003);
- }
+using std::stringstream;
- return Status::OK();
- }
+const bool autoAuthorizeDefault = true;
- Status extractConversationId(const BSONObj& cmdObj, int64_t* conversationId) {
- BSONElement element;
- Status status = bsonExtractField(cmdObj, saslCommandConversationIdFieldName, &element);
- if (!status.isOK())
- return status;
+class CmdSaslStart : public Command {
+public:
+ CmdSaslStart();
+ virtual ~CmdSaslStart();
- if (!element.isNumber()) {
- return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Wrong type for field; expected number for " << element);
- }
- *conversationId = element.numberLong();
- return Status::OK();
- }
+ virtual void addRequiredPrivileges(const std::string&,
+ const BSONObj&,
+ std::vector<Privilege>*) {}
- Status extractMechanism(const BSONObj& cmdObj, std::string* mechanism) {
- return bsonExtractStringField(cmdObj, saslCommandMechanismFieldName, mechanism);
- }
+ virtual bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& ignored,
+ BSONObjBuilder& result);
- void addStatus(const Status& status, BSONObjBuilder* builder) {
- builder->append("ok", status.isOK() ? 1.0: 0.0);
- if (!status.isOK())
- builder->append(saslCommandCodeFieldName, status.code());
- if (!status.reason().empty())
- builder->append(saslCommandErrmsgFieldName, status.reason());
+ virtual void help(stringstream& help) const;
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
}
-
- Status doSaslStep(const ClientBasic* client,
- SaslAuthenticationSession* session,
- const BSONObj& cmdObj,
- BSONObjBuilder* result) {
-
- std::string payload;
- BSONType type = EOO;
- Status status = saslExtractPayload(cmdObj, &payload, &type);
- if (!status.isOK())
- return status;
-
- std::string responsePayload;
- // Passing in a payload and extracting a responsePayload
- status = session->step(payload, &responsePayload);
-
- if (!status.isOK()) {
- const SockAddr clientAddr = client->port()->localAddr();
- log() << session->getMechanism() << " authentication failed for " <<
- session->getPrincipalId() << " on " <<
- session->getAuthenticationDatabase() << " from client " << clientAddr.getAddr() <<
- " ; " << status.toString() << std::endl;
- // All the client needs to know is that authentication has failed.
- return Status(ErrorCodes::AuthenticationFailed, "Authentication failed.");
- }
-
- status = buildResponse(session, responsePayload, type, result);
- if (!status.isOK())
- return status;
-
- if (session->isDone()) {
- UserName userName(session->getPrincipalId(), session->getAuthenticationDatabase());
- status = session->getAuthorizationSession()->addAndAuthorizeUser(
- session->getOpCtxt(), userName);
- if (!status.isOK()) {
- return status;
- }
-
- if (!serverGlobalParams.quiet) {
- log() << "Successfully authenticated as principal " << session->getPrincipalId()
- << " on " << session->getAuthenticationDatabase();
- }
- }
- return Status::OK();
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool requiresAuth() {
+ return false;
+ }
+};
+
+class CmdSaslContinue : public Command {
+public:
+ CmdSaslContinue();
+ virtual ~CmdSaslContinue();
+
+ virtual void addRequiredPrivileges(const std::string&,
+ const BSONObj&,
+ std::vector<Privilege>*) {}
+
+ virtual bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& ignored,
+ BSONObjBuilder& result);
+
+ virtual void help(stringstream& help) const;
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool requiresAuth() {
+ return false;
+ }
+};
+
+CmdSaslStart cmdSaslStart;
+CmdSaslContinue cmdSaslContinue;
+Status buildResponse(const SaslAuthenticationSession* session,
+ const std::string& responsePayload,
+ BSONType responsePayloadType,
+ BSONObjBuilder* result) {
+ result->appendIntOrLL(saslCommandConversationIdFieldName, session->getConversationId());
+ result->appendBool(saslCommandDoneFieldName, session->isDone());
+
+ if (responsePayload.size() > size_t(std::numeric_limits<int>::max())) {
+ return Status(ErrorCodes::InvalidLength, "Response payload too long");
+ }
+ if (responsePayloadType == BinData) {
+ result->appendBinData(saslCommandPayloadFieldName,
+ int(responsePayload.size()),
+ BinDataGeneral,
+ responsePayload.data());
+ } else if (responsePayloadType == String) {
+ result->append(saslCommandPayloadFieldName, base64::encode(responsePayload));
+ } else {
+ fassertFailed(4003);
}
- Status doSaslStart(const ClientBasic* client,
- SaslAuthenticationSession* session,
- const std::string& db,
- const BSONObj& cmdObj,
- BSONObjBuilder* result) {
-
- bool autoAuthorize = false;
- Status status = bsonExtractBooleanFieldWithDefault(cmdObj,
- saslCommandAutoAuthorizeFieldName,
- autoAuthorizeDefault,
- &autoAuthorize);
- if (!status.isOK())
- return status;
-
- std::string mechanism;
- status = extractMechanism(cmdObj, &mechanism);
- if (!status.isOK())
- return status;
-
- if (!sequenceContains(saslGlobalParams.authenticationMechanisms, mechanism) &&
- mechanism != "SCRAM-SHA-1") {
- // Always allow SCRAM-SHA-1 to pass to the first sasl step since we need to
- // handle internal user authentication, SERVER-16534
- result->append(saslCommandMechanismListFieldName,
- saslGlobalParams.authenticationMechanisms);
- return Status(ErrorCodes::BadValue,
- mongoutils::str::stream() << "Unsupported mechanism " << mechanism);
- }
+ return Status::OK();
+}
- status = session->start(db,
- mechanism,
- saslGlobalParams.serviceName,
- saslGlobalParams.hostName,
- 1,
- autoAuthorize);
- if (!status.isOK())
- return status;
+Status extractConversationId(const BSONObj& cmdObj, int64_t* conversationId) {
+ BSONElement element;
+ Status status = bsonExtractField(cmdObj, saslCommandConversationIdFieldName, &element);
+ if (!status.isOK())
+ return status;
- return doSaslStep(client, session, cmdObj, result);
+ if (!element.isNumber()) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Wrong type for field; expected number for " << element);
+ }
+ *conversationId = element.numberLong();
+ return Status::OK();
+}
+
+Status extractMechanism(const BSONObj& cmdObj, std::string* mechanism) {
+ return bsonExtractStringField(cmdObj, saslCommandMechanismFieldName, mechanism);
+}
+
+void addStatus(const Status& status, BSONObjBuilder* builder) {
+ builder->append("ok", status.isOK() ? 1.0 : 0.0);
+ if (!status.isOK())
+ builder->append(saslCommandCodeFieldName, status.code());
+ if (!status.reason().empty())
+ builder->append(saslCommandErrmsgFieldName, status.reason());
+}
+
+Status doSaslStep(const ClientBasic* client,
+ SaslAuthenticationSession* session,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result) {
+ std::string payload;
+ BSONType type = EOO;
+ Status status = saslExtractPayload(cmdObj, &payload, &type);
+ if (!status.isOK())
+ return status;
+
+ std::string responsePayload;
+ // Passing in a payload and extracting a responsePayload
+ status = session->step(payload, &responsePayload);
+
+ if (!status.isOK()) {
+ const SockAddr clientAddr = client->port()->localAddr();
+ log() << session->getMechanism() << " authentication failed for "
+ << session->getPrincipalId() << " on " << session->getAuthenticationDatabase()
+ << " from client " << clientAddr.getAddr() << " ; " << status.toString() << std::endl;
+ // All the client needs to know is that authentication has failed.
+ return Status(ErrorCodes::AuthenticationFailed, "Authentication failed.");
}
- Status doSaslContinue(const ClientBasic* client,
- SaslAuthenticationSession* session,
- const BSONObj& cmdObj,
- BSONObjBuilder* result) {
+ status = buildResponse(session, responsePayload, type, result);
+ if (!status.isOK())
+ return status;
- int64_t conversationId = 0;
- Status status = extractConversationId(cmdObj, &conversationId);
- if (!status.isOK())
+ if (session->isDone()) {
+ UserName userName(session->getPrincipalId(), session->getAuthenticationDatabase());
+ status =
+ session->getAuthorizationSession()->addAndAuthorizeUser(session->getOpCtxt(), userName);
+ if (!status.isOK()) {
return status;
- if (conversationId != session->getConversationId())
- return Status(ErrorCodes::ProtocolError, "sasl: Mismatched conversation id");
+ }
- return doSaslStep(client, session, cmdObj, result);
+ if (!serverGlobalParams.quiet) {
+ log() << "Successfully authenticated as principal " << session->getPrincipalId()
+ << " on " << session->getAuthenticationDatabase();
+ }
}
-
- CmdSaslStart::CmdSaslStart() : Command(saslStartCommandName) {}
- CmdSaslStart::~CmdSaslStart() {}
-
- void CmdSaslStart::help(std::stringstream& os) const {
- os << "First step in a SASL authentication conversation.";
+ return Status::OK();
+}
+
+Status doSaslStart(const ClientBasic* client,
+ SaslAuthenticationSession* session,
+ const std::string& db,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result) {
+ bool autoAuthorize = false;
+ Status status = bsonExtractBooleanFieldWithDefault(
+ cmdObj, saslCommandAutoAuthorizeFieldName, autoAuthorizeDefault, &autoAuthorize);
+ if (!status.isOK())
+ return status;
+
+ std::string mechanism;
+ status = extractMechanism(cmdObj, &mechanism);
+ if (!status.isOK())
+ return status;
+
+ if (!sequenceContains(saslGlobalParams.authenticationMechanisms, mechanism) &&
+ mechanism != "SCRAM-SHA-1") {
+ // Always allow SCRAM-SHA-1 to pass to the first sasl step since we need to
+ // handle internal user authentication, SERVER-16534
+ result->append(saslCommandMechanismListFieldName,
+ saslGlobalParams.authenticationMechanisms);
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Unsupported mechanism " << mechanism);
}
- bool CmdSaslStart::run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& ignored,
- BSONObjBuilder& result) {
+ status = session->start(
+ db, mechanism, saslGlobalParams.serviceName, saslGlobalParams.hostName, 1, autoAuthorize);
+ if (!status.isOK())
+ return status;
- ClientBasic* client = ClientBasic::getCurrent();
- AuthenticationSession::set(client, std::unique_ptr<AuthenticationSession>());
+ return doSaslStep(client, session, cmdObj, result);
+}
- std::string mechanism;
- if (!extractMechanism(cmdObj, &mechanism).isOK()) {
- return false;
- }
+Status doSaslContinue(const ClientBasic* client,
+ SaslAuthenticationSession* session,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result) {
+ int64_t conversationId = 0;
+ Status status = extractConversationId(cmdObj, &conversationId);
+ if (!status.isOK())
+ return status;
+ if (conversationId != session->getConversationId())
+ return Status(ErrorCodes::ProtocolError, "sasl: Mismatched conversation id");
+
+ return doSaslStep(client, session, cmdObj, result);
+}
+
+CmdSaslStart::CmdSaslStart() : Command(saslStartCommandName) {}
+CmdSaslStart::~CmdSaslStart() {}
+
+void CmdSaslStart::help(std::stringstream& os) const {
+ os << "First step in a SASL authentication conversation.";
+}
+
+bool CmdSaslStart::run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& ignored,
+ BSONObjBuilder& result) {
+ ClientBasic* client = ClientBasic::getCurrent();
+ AuthenticationSession::set(client, std::unique_ptr<AuthenticationSession>());
+
+ std::string mechanism;
+ if (!extractMechanism(cmdObj, &mechanism).isOK()) {
+ return false;
+ }
- SaslAuthenticationSession* session =
- SaslAuthenticationSession::create(AuthorizationSession::get(client), mechanism);
+ SaslAuthenticationSession* session =
+ SaslAuthenticationSession::create(AuthorizationSession::get(client), mechanism);
- std::unique_ptr<AuthenticationSession> sessionGuard(session);
+ std::unique_ptr<AuthenticationSession> sessionGuard(session);
- session->setOpCtxt(txn);
+ session->setOpCtxt(txn);
- Status status = doSaslStart(client, session, db, cmdObj, &result);
- addStatus(status, &result);
+ Status status = doSaslStart(client, session, db, cmdObj, &result);
+ addStatus(status, &result);
- if (session->isDone()) {
- audit::logAuthentication(
- client,
- session->getMechanism(),
- UserName(session->getPrincipalId(), db),
- status.code());
- }
- else {
- AuthenticationSession::swap(client, sessionGuard);
- }
- return status.isOK();
+ if (session->isDone()) {
+ audit::logAuthentication(client,
+ session->getMechanism(),
+ UserName(session->getPrincipalId(), db),
+ status.code());
+ } else {
+ AuthenticationSession::swap(client, sessionGuard);
}
-
- CmdSaslContinue::CmdSaslContinue() : Command(saslContinueCommandName) {}
- CmdSaslContinue::~CmdSaslContinue() {}
-
- void CmdSaslContinue::help(std::stringstream& os) const {
- os << "Subsequent steps in a SASL authentication conversation.";
+ return status.isOK();
+}
+
+CmdSaslContinue::CmdSaslContinue() : Command(saslContinueCommandName) {}
+CmdSaslContinue::~CmdSaslContinue() {}
+
+void CmdSaslContinue::help(std::stringstream& os) const {
+ os << "Subsequent steps in a SASL authentication conversation.";
+}
+
+bool CmdSaslContinue::run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& ignored,
+ BSONObjBuilder& result) {
+ ClientBasic* client = ClientBasic::getCurrent();
+ std::unique_ptr<AuthenticationSession> sessionGuard;
+ AuthenticationSession::swap(client, sessionGuard);
+
+ if (!sessionGuard || sessionGuard->getType() != AuthenticationSession::SESSION_TYPE_SASL) {
+ addStatus(Status(ErrorCodes::ProtocolError, "No SASL session state found"), &result);
+ return false;
}
- bool CmdSaslContinue::run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& ignored,
- BSONObjBuilder& result) {
-
- ClientBasic* client = ClientBasic::getCurrent();
- std::unique_ptr<AuthenticationSession> sessionGuard;
- AuthenticationSession::swap(client, sessionGuard);
-
- if (!sessionGuard || sessionGuard->getType() != AuthenticationSession::SESSION_TYPE_SASL) {
- addStatus(Status(ErrorCodes::ProtocolError, "No SASL session state found"), &result);
- return false;
- }
-
- SaslAuthenticationSession* session =
- static_cast<SaslAuthenticationSession*>(sessionGuard.get());
+ SaslAuthenticationSession* session =
+ static_cast<SaslAuthenticationSession*>(sessionGuard.get());
- // Authenticating the __system@local user to the admin database on mongos is required
- // by the auth passthrough test suite.
- if (session->getAuthenticationDatabase() != db && !Command::testCommandsEnabled) {
- addStatus(Status(ErrorCodes::ProtocolError,
- "Attempt to switch database target during SASL authentication."),
- &result);
- return false;
- }
+ // Authenticating the __system@local user to the admin database on mongos is required
+ // by the auth passthrough test suite.
+ if (session->getAuthenticationDatabase() != db && !Command::testCommandsEnabled) {
+ addStatus(Status(ErrorCodes::ProtocolError,
+ "Attempt to switch database target during SASL authentication."),
+ &result);
+ return false;
+ }
- session->setOpCtxt(txn);
+ session->setOpCtxt(txn);
- Status status = doSaslContinue(client, session, cmdObj, &result);
- addStatus(status, &result);
+ Status status = doSaslContinue(client, session, cmdObj, &result);
+ addStatus(status, &result);
- if (session->isDone()) {
- audit::logAuthentication(
- client,
- session->getMechanism(),
- UserName(session->getPrincipalId(), db),
- status.code());
- }
- else {
- AuthenticationSession::swap(client, sessionGuard);
- }
-
- return status.isOK();
+ if (session->isDone()) {
+ audit::logAuthentication(client,
+ session->getMechanism(),
+ UserName(session->getPrincipalId(), db),
+ status.code());
+ } else {
+ AuthenticationSession::swap(client, sessionGuard);
}
- // The CyrusSaslCommands Enterprise initializer is dependent on PreSaslCommands
- MONGO_INITIALIZER_WITH_PREREQUISITES(PreSaslCommands,
- ("NativeSaslServerCore"))
- (InitializerContext*) {
+ return status.isOK();
+}
- if (!sequenceContains(saslGlobalParams.authenticationMechanisms, "MONGODB-CR"))
- CmdAuthenticate::disableAuthMechanism("MONGODB-CR");
+// The CyrusSaslCommands Enterprise initializer is dependent on PreSaslCommands
+MONGO_INITIALIZER_WITH_PREREQUISITES(PreSaslCommands, ("NativeSaslServerCore"))
+(InitializerContext*) {
+ if (!sequenceContains(saslGlobalParams.authenticationMechanisms, "MONGODB-CR"))
+ CmdAuthenticate::disableAuthMechanism("MONGODB-CR");
- if (!sequenceContains(saslGlobalParams.authenticationMechanisms, "MONGODB-X509"))
- CmdAuthenticate::disableAuthMechanism("MONGODB-X509");
+ if (!sequenceContains(saslGlobalParams.authenticationMechanisms, "MONGODB-X509"))
+ CmdAuthenticate::disableAuthMechanism("MONGODB-X509");
- // For backwards compatibility, in 3.0 we are letting MONGODB-CR imply general
- // challenge-response auth and hence SCRAM-SHA-1 is enabled by either specifying
- // SCRAM-SHA-1 or MONGODB-CR in the authenticationMechanism server parameter.
- if (!sequenceContains(saslGlobalParams.authenticationMechanisms, "SCRAM-SHA-1") &&
- sequenceContains(saslGlobalParams.authenticationMechanisms, "MONGODB-CR"))
- saslGlobalParams.authenticationMechanisms.push_back("SCRAM-SHA-1");
+ // For backwards compatibility, in 3.0 we are letting MONGODB-CR imply general
+ // challenge-response auth and hence SCRAM-SHA-1 is enabled by either specifying
+ // SCRAM-SHA-1 or MONGODB-CR in the authenticationMechanism server parameter.
+ if (!sequenceContains(saslGlobalParams.authenticationMechanisms, "SCRAM-SHA-1") &&
+ sequenceContains(saslGlobalParams.authenticationMechanisms, "MONGODB-CR"))
+ saslGlobalParams.authenticationMechanisms.push_back("SCRAM-SHA-1");
- return Status::OK();
- }
+ return Status::OK();
+}
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_options.cpp b/src/mongo/db/auth/sasl_options.cpp
index 7261d8b49f0..8ca9e38dd63 100644
--- a/src/mongo/db/auth/sasl_options.cpp
+++ b/src/mongo/db/auth/sasl_options.cpp
@@ -39,162 +39,160 @@
namespace mongo {
- SASLGlobalParams saslGlobalParams;
-
- const int defaultScramIterationCount = 10000;
- const int minimumScramIterationCount = 5000;
-
- SASLGlobalParams::SASLGlobalParams() {
- // Authentication mechanisms supported by default.
- authenticationMechanisms.push_back("MONGODB-CR");
- authenticationMechanisms.push_back("MONGODB-X509");
- authenticationMechanisms.push_back("SCRAM-SHA-1");
- // Default iteration count for SCRAM authentication.
- scramIterationCount = defaultScramIterationCount;
+SASLGlobalParams saslGlobalParams;
+
+const int defaultScramIterationCount = 10000;
+const int minimumScramIterationCount = 5000;
+
+SASLGlobalParams::SASLGlobalParams() {
+ // Authentication mechanisms supported by default.
+ authenticationMechanisms.push_back("MONGODB-CR");
+ authenticationMechanisms.push_back("MONGODB-X509");
+ authenticationMechanisms.push_back("SCRAM-SHA-1");
+ // Default iteration count for SCRAM authentication.
+ scramIterationCount = defaultScramIterationCount;
+}
+
+Status addSASLOptions(moe::OptionSection* options) {
+ moe::OptionSection saslOptions("SASL Options");
+
+ saslOptions.addOptionChaining("security.authenticationMechanisms",
+ "",
+ moe::StringVector,
+ "List of supported authentication mechanisms. "
+ "Default is MONGODB-CR, SCRAM-SHA-1 and MONGODB-X509.")
+ .setSources(moe::SourceYAMLConfig);
+
+ saslOptions.addOptionChaining(
+ "security.sasl.hostName", "", moe::String, "Fully qualified server domain name")
+ .setSources(moe::SourceYAMLConfig);
+
+ saslOptions.addOptionChaining("security.sasl.serviceName",
+ "",
+ moe::String,
+ "Registered name of the service using SASL")
+ .setSources(moe::SourceYAMLConfig);
+
+ saslOptions.addOptionChaining("security.sasl.saslauthdSocketPath",
+ "",
+ moe::String,
+ "Path to Unix domain socket file for saslauthd")
+ .setSources(moe::SourceYAMLConfig);
+
+ Status ret = options->addSection(saslOptions);
+ if (!ret.isOK()) {
+ log() << "Failed to add sasl option section: " << ret.toString();
+ return ret;
}
- Status addSASLOptions(moe::OptionSection* options) {
-
- moe::OptionSection saslOptions("SASL Options");
-
- saslOptions.addOptionChaining("security.authenticationMechanisms", "",
- moe::StringVector, "List of supported authentication mechanisms. "
- "Default is MONGODB-CR, SCRAM-SHA-1 and MONGODB-X509.")
- .setSources(moe::SourceYAMLConfig);
-
- saslOptions.addOptionChaining("security.sasl.hostName", "", moe::String,
- "Fully qualified server domain name")
- .setSources(moe::SourceYAMLConfig);
-
- saslOptions.addOptionChaining("security.sasl.serviceName", "", moe::String,
- "Registered name of the service using SASL")
- .setSources(moe::SourceYAMLConfig);
-
- saslOptions.addOptionChaining("security.sasl.saslauthdSocketPath", "", moe::String,
- "Path to Unix domain socket file for saslauthd")
- .setSources(moe::SourceYAMLConfig);
-
- Status ret = options->addSection(saslOptions);
- if (!ret.isOK()) {
- log() << "Failed to add sasl option section: " << ret.toString();
- return ret;
- }
-
- return Status::OK();
- }
-
- Status storeSASLOptions(const moe::Environment& params) {
-
- bool haveAuthenticationMechanisms = false;
- bool haveHostName = false;
- bool haveServiceName = false;
- bool haveAuthdPath = false;
- bool haveScramIterationCount = false;
-
- // Check our setParameter options first so that these values can be properly overridden via
- // the command line even though the options have different names.
- if (params.count("setParameter")) {
- std::map<std::string, std::string> parameters =
- params["setParameter"].as<std::map<std::string, std::string> >();
- for (std::map<std::string, std::string>::iterator parametersIt = parameters.begin();
- parametersIt != parameters.end(); parametersIt++) {
- if (parametersIt->first == "authenticationMechanisms") {
- haveAuthenticationMechanisms = true;
- }
- else if (parametersIt->first == "saslHostName") {
- haveHostName = true;
- }
- else if (parametersIt->first == "saslServiceName") {
- haveServiceName = true;
- }
- else if (parametersIt->first == "saslauthdPath") {
- haveAuthdPath = true;
- }
- else if (parametersIt->first == "scramIterationCount") {
- haveScramIterationCount = true;
- }
+ return Status::OK();
+}
+
+Status storeSASLOptions(const moe::Environment& params) {
+ bool haveAuthenticationMechanisms = false;
+ bool haveHostName = false;
+ bool haveServiceName = false;
+ bool haveAuthdPath = false;
+ bool haveScramIterationCount = false;
+
+ // Check our setParameter options first so that these values can be properly overridden via
+ // the command line even though the options have different names.
+ if (params.count("setParameter")) {
+ std::map<std::string, std::string> parameters =
+ params["setParameter"].as<std::map<std::string, std::string>>();
+ for (std::map<std::string, std::string>::iterator parametersIt = parameters.begin();
+ parametersIt != parameters.end();
+ parametersIt++) {
+ if (parametersIt->first == "authenticationMechanisms") {
+ haveAuthenticationMechanisms = true;
+ } else if (parametersIt->first == "saslHostName") {
+ haveHostName = true;
+ } else if (parametersIt->first == "saslServiceName") {
+ haveServiceName = true;
+ } else if (parametersIt->first == "saslauthdPath") {
+ haveAuthdPath = true;
+ } else if (parametersIt->first == "scramIterationCount") {
+ haveScramIterationCount = true;
}
}
-
- if (params.count("security.authenticationMechanisms") &&
- !haveAuthenticationMechanisms) {
- saslGlobalParams.authenticationMechanisms =
- params["security.authenticationMechanisms"].as<std::vector<std::string> >();
- }
- if (params.count("security.sasl.hostName") && !haveHostName) {
- saslGlobalParams.hostName =
- params["security.sasl.hostName"].as<std::string>();
- }
- if (params.count("security.sasl.serviceName") && !haveServiceName) {
- saslGlobalParams.serviceName =
- params["security.sasl.serviceName"].as<std::string>();
- }
- if (params.count("security.sasl.saslauthdSocketPath") && !haveAuthdPath) {
- saslGlobalParams.authdPath =
- params["security.sasl.saslauthdSocketPath"].as<std::string>();
- }
- if (params.count("security.sasl.scramIterationCount") && !haveScramIterationCount) {
- saslGlobalParams.scramIterationCount =
- params["security.sasl.scramIterationCount"].as<int>();
- }
-
- return Status::OK();
}
- MONGO_MODULE_STARTUP_OPTIONS_REGISTER(SASLOptions)(InitializerContext* context) {
- return addSASLOptions(&moe::startupOptions);
+ if (params.count("security.authenticationMechanisms") && !haveAuthenticationMechanisms) {
+ saslGlobalParams.authenticationMechanisms =
+ params["security.authenticationMechanisms"].as<std::vector<std::string>>();
}
-
- MONGO_STARTUP_OPTIONS_STORE(SASLOptions)(InitializerContext* context) {
- return storeSASLOptions(moe::startupOptionsParsed);
+ if (params.count("security.sasl.hostName") && !haveHostName) {
+ saslGlobalParams.hostName = params["security.sasl.hostName"].as<std::string>();
+ }
+ if (params.count("security.sasl.serviceName") && !haveServiceName) {
+ saslGlobalParams.serviceName = params["security.sasl.serviceName"].as<std::string>();
+ }
+ if (params.count("security.sasl.saslauthdSocketPath") && !haveAuthdPath) {
+ saslGlobalParams.authdPath = params["security.sasl.saslauthdSocketPath"].as<std::string>();
+ }
+ if (params.count("security.sasl.scramIterationCount") && !haveScramIterationCount) {
+ saslGlobalParams.scramIterationCount =
+ params["security.sasl.scramIterationCount"].as<int>();
}
- // SASL Startup Parameters, making them settable via setParameter on the command line or in the
- // legacy INI config file. None of these parameters are modifiable at runtime.
- ExportedServerParameter<std::vector<std::string> > SASLAuthenticationMechanismsSetting(
- ServerParameterSet::getGlobal(),
- "authenticationMechanisms",
- &saslGlobalParams.authenticationMechanisms,
- true, // Change at startup
- false); // Change at runtime
-
- ExportedServerParameter<std::string> SASLHostNameSetting(ServerParameterSet::getGlobal(),
- "saslHostName",
- &saslGlobalParams.hostName,
- true, // Change at startup
- false); // Change at runtime
-
- ExportedServerParameter<std::string> SASLServiceNameSetting(ServerParameterSet::getGlobal(),
- "saslServiceName",
- &saslGlobalParams.serviceName,
- true, // Change at startup
- false); // Change at runtime
-
- ExportedServerParameter<std::string> SASLAuthdPathSetting(ServerParameterSet::getGlobal(),
- "saslauthdPath",
- &saslGlobalParams.authdPath,
- true, // Change at startup
- false); // Change at runtime
-
- const std::string scramIterationCountServerParameter = "scramIterationCount";
- class ExportedScramIterationCountParameter : public ExportedServerParameter<int> {
- public:
- ExportedScramIterationCountParameter():
- ExportedServerParameter<int>(ServerParameterSet::getGlobal(),
- scramIterationCountServerParameter,
- &saslGlobalParams.scramIterationCount,
- true, // Change at startup
- true) {} // Change at runtime
-
- virtual Status validate(const int& newValue) {
- if (newValue < minimumScramIterationCount) {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for SCRAM iteration count: " << newValue <<
- " is less than the minimum SCRAM iteration count, " <<
- minimumScramIterationCount);
- }
- return Status::OK();
+ return Status::OK();
+}
+
+MONGO_MODULE_STARTUP_OPTIONS_REGISTER(SASLOptions)(InitializerContext* context) {
+ return addSASLOptions(&moe::startupOptions);
+}
+
+MONGO_STARTUP_OPTIONS_STORE(SASLOptions)(InitializerContext* context) {
+ return storeSASLOptions(moe::startupOptionsParsed);
+}
+
+// SASL Startup Parameters, making them settable via setParameter on the command line or in the
+// legacy INI config file. None of these parameters are modifiable at runtime.
+ExportedServerParameter<std::vector<std::string>> SASLAuthenticationMechanismsSetting(
+ ServerParameterSet::getGlobal(),
+ "authenticationMechanisms",
+ &saslGlobalParams.authenticationMechanisms,
+ true, // Change at startup
+ false); // Change at runtime
+
+ExportedServerParameter<std::string> SASLHostNameSetting(ServerParameterSet::getGlobal(),
+ "saslHostName",
+ &saslGlobalParams.hostName,
+ true, // Change at startup
+ false); // Change at runtime
+
+ExportedServerParameter<std::string> SASLServiceNameSetting(ServerParameterSet::getGlobal(),
+ "saslServiceName",
+ &saslGlobalParams.serviceName,
+ true, // Change at startup
+ false); // Change at runtime
+
+ExportedServerParameter<std::string> SASLAuthdPathSetting(ServerParameterSet::getGlobal(),
+ "saslauthdPath",
+ &saslGlobalParams.authdPath,
+ true, // Change at startup
+ false); // Change at runtime
+
+const std::string scramIterationCountServerParameter = "scramIterationCount";
+class ExportedScramIterationCountParameter : public ExportedServerParameter<int> {
+public:
+ ExportedScramIterationCountParameter()
+ : ExportedServerParameter<int>(ServerParameterSet::getGlobal(),
+ scramIterationCountServerParameter,
+ &saslGlobalParams.scramIterationCount,
+ true, // Change at startup
+ true) {} // Change at runtime
+
+ virtual Status validate(const int& newValue) {
+ if (newValue < minimumScramIterationCount) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Invalid value for SCRAM iteration count: " << newValue
+ << " is less than the minimum SCRAM iteration count, "
+ << minimumScramIterationCount);
}
- } scramIterationCountParam;
+ return Status::OK();
+ }
+} scramIterationCountParam;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_options.h b/src/mongo/db/auth/sasl_options.h
index cc649adeeba..f9a4bb15efd 100644
--- a/src/mongo/db/auth/sasl_options.h
+++ b/src/mongo/db/auth/sasl_options.h
@@ -36,27 +36,26 @@
namespace mongo {
namespace optionenvironment {
- class OptionSection;
- class Environment;
-} // namespace optionenvironment
+class OptionSection;
+class Environment;
+} // namespace optionenvironment
- namespace moe = optionenvironment;
+namespace moe = optionenvironment;
- struct SASLGlobalParams {
+struct SASLGlobalParams {
+ std::vector<std::string> authenticationMechanisms;
+ std::string hostName;
+ std::string serviceName;
+ std::string authdPath;
+ int scramIterationCount;
- std::vector<std::string> authenticationMechanisms;
- std::string hostName;
- std::string serviceName;
- std::string authdPath;
- int scramIterationCount;
+ SASLGlobalParams();
+};
- SASLGlobalParams();
- };
+extern SASLGlobalParams saslGlobalParams;
- extern SASLGlobalParams saslGlobalParams;
+Status addSASLOptions(moe::OptionSection* options);
- Status addSASLOptions(moe::OptionSection* options);
+Status storeSASLOptions(const moe::Environment& params);
- Status storeSASLOptions(const moe::Environment& params);
-
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_plain_server_conversation.cpp b/src/mongo/db/auth/sasl_plain_server_conversation.cpp
index ef38762e3a5..b5f0b9e3c8f 100644
--- a/src/mongo/db/auth/sasl_plain_server_conversation.cpp
+++ b/src/mongo/db/auth/sasl_plain_server_conversation.cpp
@@ -36,73 +36,70 @@
namespace mongo {
- SaslPLAINServerConversation::SaslPLAINServerConversation(
- SaslAuthenticationSession* saslAuthSession) :
- SaslServerConversation(saslAuthSession) {
+SaslPLAINServerConversation::SaslPLAINServerConversation(SaslAuthenticationSession* saslAuthSession)
+ : SaslServerConversation(saslAuthSession) {}
+
+SaslPLAINServerConversation::~SaslPLAINServerConversation(){};
+
+StatusWith<bool> SaslPLAINServerConversation::step(StringData inputData, std::string* outputData) {
+ // Expecting user input on the form: user\0user\0pwd
+ std::string input = inputData.toString();
+ std::string pwd = "";
+
+ try {
+ _user = input.substr(0, inputData.find('\0'));
+ pwd = input.substr(inputData.find('\0', _user.size() + 1) + 1);
+ } catch (std::out_of_range& exception) {
+ return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
+ mongoutils::str::stream()
+ << "Incorrectly formatted PLAIN client message");
}
- SaslPLAINServerConversation::~SaslPLAINServerConversation() {};
+ User* userObj;
+ // The authentication database is also the source database for the user.
+ Status status =
+ _saslAuthSession->getAuthorizationSession()->getAuthorizationManager().acquireUser(
+ _saslAuthSession->getOpCtxt(),
+ UserName(_user, _saslAuthSession->getAuthenticationDatabase()),
+ &userObj);
- StatusWith<bool> SaslPLAINServerConversation::step(StringData inputData,
- std::string* outputData) {
- // Expecting user input on the form: user\0user\0pwd
- std::string input = inputData.toString();
- std::string pwd = "";
-
- try {
- _user = input.substr(0, inputData.find('\0'));
- pwd = input.substr(inputData.find('\0', _user.size()+1)+1);
- }
- catch (std::out_of_range& exception) {
- return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
- mongoutils::str::stream() << "Incorrectly formatted PLAIN client message");
- }
-
- User* userObj;
- // The authentication database is also the source database for the user.
- Status status = _saslAuthSession->getAuthorizationSession()->getAuthorizationManager().
- acquireUser(_saslAuthSession->getOpCtxt(),
- UserName(_user, _saslAuthSession->getAuthenticationDatabase()),
- &userObj);
-
- if (!status.isOK()) {
- return StatusWith<bool>(status);
- }
+ if (!status.isOK()) {
+ return StatusWith<bool>(status);
+ }
- const User::CredentialData creds = userObj->getCredentials();
- _saslAuthSession->getAuthorizationSession()->getAuthorizationManager().
- releaseUser(userObj);
+ const User::CredentialData creds = userObj->getCredentials();
+ _saslAuthSession->getAuthorizationSession()->getAuthorizationManager().releaseUser(userObj);
- std::string authDigest = createPasswordDigest(_user, pwd);
+ std::string authDigest = createPasswordDigest(_user, pwd);
- if (!creds.password.empty()) {
- // Handle schemaVersion26Final (MONGODB-CR/SCRAM mixed mode)
- if (authDigest != creds.password) {
- return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
- mongoutils::str::stream() << "Incorrect user name or password");
- }
+ if (!creds.password.empty()) {
+ // Handle schemaVersion26Final (MONGODB-CR/SCRAM mixed mode)
+ if (authDigest != creds.password) {
+ return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
+ mongoutils::str::stream() << "Incorrect user name or password");
}
- else {
- // Handle schemaVersion28SCRAM (SCRAM only mode)
- unsigned char storedKey[scram::hashSize];
- unsigned char serverKey[scram::hashSize];
-
- scram::generateSecrets(authDigest,
- reinterpret_cast<const unsigned char*>(base64::decode(creds.scram.salt).c_str()),
- 16,
- creds.scram.iterationCount,
- storedKey,
- serverKey);
- if (creds.scram.storedKey != base64::encode(reinterpret_cast<const char*>(storedKey),
- scram::hashSize)){
- return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
- mongoutils::str::stream() << "Incorrect user name or password");
- }
+ } else {
+ // Handle schemaVersion28SCRAM (SCRAM only mode)
+ unsigned char storedKey[scram::hashSize];
+ unsigned char serverKey[scram::hashSize];
+
+ scram::generateSecrets(
+ authDigest,
+ reinterpret_cast<const unsigned char*>(base64::decode(creds.scram.salt).c_str()),
+ 16,
+ creds.scram.iterationCount,
+ storedKey,
+ serverKey);
+ if (creds.scram.storedKey !=
+ base64::encode(reinterpret_cast<const char*>(storedKey), scram::hashSize)) {
+ return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
+ mongoutils::str::stream() << "Incorrect user name or password");
}
+ }
- *outputData = "";
+ *outputData = "";
- return StatusWith<bool>(true);
- }
+ return StatusWith<bool>(true);
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_plain_server_conversation.h b/src/mongo/db/auth/sasl_plain_server_conversation.h
index d8e33e99905..5d3b57ffa89 100644
--- a/src/mongo/db/auth/sasl_plain_server_conversation.h
+++ b/src/mongo/db/auth/sasl_plain_server_conversation.h
@@ -36,21 +36,22 @@
#include "mongo/db/auth/sasl_server_conversation.h"
namespace mongo {
+/**
+ * Server side authentication session for SASL PLAIN.
+ */
+class SaslPLAINServerConversation : public SaslServerConversation {
+ MONGO_DISALLOW_COPYING(SaslPLAINServerConversation);
+
+public:
/**
- * Server side authentication session for SASL PLAIN.
- */
- class SaslPLAINServerConversation : public SaslServerConversation {
- MONGO_DISALLOW_COPYING(SaslPLAINServerConversation);
- public:
- /**
- * Implements the server side of a SASL PLAIN mechanism session.
- *
- **/
- explicit SaslPLAINServerConversation(SaslAuthenticationSession* saslAuthSession);
-
- virtual ~SaslPLAINServerConversation();
-
- virtual StatusWith<bool> step(StringData inputData, std::string* outputData);
- };
+ * Implements the server side of a SASL PLAIN mechanism session.
+ *
+ **/
+ explicit SaslPLAINServerConversation(SaslAuthenticationSession* saslAuthSession);
+
+ virtual ~SaslPLAINServerConversation();
+
+ virtual StatusWith<bool> step(StringData inputData, std::string* outputData);
+};
} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp b/src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp
index 83137a8bd99..9fd8496b7bc 100644
--- a/src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp
+++ b/src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp
@@ -48,292 +48,289 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
-
- SaslSCRAMSHA1ServerConversation::SaslSCRAMSHA1ServerConversation(
- SaslAuthenticationSession* saslAuthSession) :
- SaslServerConversation(saslAuthSession),
- _step(0),
- _authMessage(""),
- _nonce("") {
+using std::unique_ptr;
+using std::string;
+
+SaslSCRAMSHA1ServerConversation::SaslSCRAMSHA1ServerConversation(
+ SaslAuthenticationSession* saslAuthSession)
+ : SaslServerConversation(saslAuthSession), _step(0), _authMessage(""), _nonce("") {}
+
+StatusWith<bool> SaslSCRAMSHA1ServerConversation::step(StringData inputData,
+ std::string* outputData) {
+ std::vector<std::string> input = StringSplitter::split(inputData.toString(), ",");
+ _step++;
+
+ if (_step > 3 || _step <= 0) {
+ return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
+ mongoutils::str::stream()
+ << "Invalid SCRAM-SHA-1 authentication step: " << _step);
}
-
- StatusWith<bool> SaslSCRAMSHA1ServerConversation::step(StringData inputData,
- std::string* outputData) {
-
- std::vector<std::string> input = StringSplitter::split(inputData.toString(), ",");
- _step++;
-
- if (_step > 3 || _step <= 0) {
- return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
- mongoutils::str::stream() << "Invalid SCRAM-SHA-1 authentication step: " << _step);
- }
- if (_step == 1) {
- return _firstStep(input, outputData);
- }
- if (_step == 2) {
- return _secondStep(input, outputData);
- }
-
- *outputData = "";
-
- return StatusWith<bool>(true);
+ if (_step == 1) {
+ return _firstStep(input, outputData);
}
-
- /*
- * RFC 5802 specifies that in SCRAM user names characters ',' and '=' are encoded as
- * =2C and =3D respectively.
- */
- static void decodeSCRAMUsername(std::string& user) {
- boost::replace_all(user, "=2C", ",");
- boost::replace_all(user, "=3D", "=");
+ if (_step == 2) {
+ return _secondStep(input, outputData);
}
- /*
- * Parse client-first-message of the form:
- * n,a=authzid,n=encoded-username,r=client-nonce
- *
- * Generate server-first-message on the form:
- * r=client-nonce|server-nonce,s=user-salt,i=iteration-count
- *
- * NOTE: we are ignoring the authorization ID part of the message
- */
- StatusWith<bool> SaslSCRAMSHA1ServerConversation::_firstStep(std::vector<string>& input,
- std::string* outputData) {
- std::string authzId = "";
-
- if (input.size() == 4) {
- /* The second entry a=authzid is optional. If provided it will be
- * validated against the encoded username.
- *
- * The two allowed input forms are:
- * n,,n=encoded-username,r=client-nonce
- * n,a=authzid,n=encoded-username,r=client-nonce
- */
- if (!str::startsWith(input[1], "a=") || input[1].size() < 3) {
- return StatusWith<bool>(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Incorrect SCRAM-SHA-1 authzid: " << input[1]);
- }
- authzId = input[1].substr(2);
- input.erase(input.begin() + 1);
- }
-
- if (input.size() != 3) {
- return StatusWith<bool>(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Incorrect number of arguments for first SCRAM-SHA-1 client message, got " <<
- input.size() << " expected 4");
- }
- else if (input[0] != "n") {
- return StatusWith<bool>(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Incorrect SCRAM-SHA-1 client message prefix: " << input[0]);
- }
- else if (!str::startsWith(input[1], "n=") || input[1].size() < 3) {
- return StatusWith<bool>(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Incorrect SCRAM-SHA-1 user name: " << input[1]);
- }
- else if(!str::startsWith(input[2], "r=") || input[2].size() < 6) {
- return StatusWith<bool>(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Incorrect SCRAM-SHA-1 client nonce: " << input[2]);
- }
+ *outputData = "";
- _user = input[1].substr(2);
- if (!authzId.empty() && _user != authzId) {
- return StatusWith<bool>(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "SCRAM-SHA-1 user name " << _user << " does not match authzid " << authzId);
- }
+ return StatusWith<bool>(true);
+}
- decodeSCRAMUsername(_user);
+/*
+ * RFC 5802 specifies that in SCRAM user names characters ',' and '=' are encoded as
+ * =2C and =3D respectively.
+ */
+static void decodeSCRAMUsername(std::string& user) {
+ boost::replace_all(user, "=2C", ",");
+ boost::replace_all(user, "=3D", "=");
+}
- // SERVER-16534, SCRAM-SHA-1 must be enabled for authenticating the internal user, so that
- // cluster members may communicate with each other. Hence ignore disabled auth mechanism
- // for the internal user.
- UserName user(_user, _saslAuthSession->getAuthenticationDatabase());
- if (!sequenceContains(saslGlobalParams.authenticationMechanisms, "SCRAM-SHA-1") &&
- user != internalSecurity.user->getName()) {
+/*
+ * Parse client-first-message of the form:
+ * n,a=authzid,n=encoded-username,r=client-nonce
+ *
+ * Generate server-first-message on the form:
+ * r=client-nonce|server-nonce,s=user-salt,i=iteration-count
+ *
+ * NOTE: we are ignoring the authorization ID part of the message
+ */
+StatusWith<bool> SaslSCRAMSHA1ServerConversation::_firstStep(std::vector<string>& input,
+ std::string* outputData) {
+ std::string authzId = "";
+
+ if (input.size() == 4) {
+ /* The second entry a=authzid is optional. If provided it will be
+ * validated against the encoded username.
+ *
+ * The two allowed input forms are:
+ * n,,n=encoded-username,r=client-nonce
+ * n,a=authzid,n=encoded-username,r=client-nonce
+ */
+ if (!str::startsWith(input[1], "a=") || input[1].size() < 3) {
return StatusWith<bool>(ErrorCodes::BadValue,
- "SCRAM-SHA-1 authentication is disabled");
+ mongoutils::str::stream()
+ << "Incorrect SCRAM-SHA-1 authzid: " << input[1]);
}
+ authzId = input[1].substr(2);
+ input.erase(input.begin() + 1);
+ }
- // add client-first-message-bare to _authMessage
- _authMessage += input[1] + "," + input[2] + ",";
-
- std::string clientNonce = input[2].substr(2);
-
- // The authentication database is also the source database for the user.
- User* userObj;
- Status status = _saslAuthSession->getAuthorizationSession()->getAuthorizationManager().
- acquireUser(_saslAuthSession->getOpCtxt(),
- user,
- &userObj);
+ if (input.size() != 3) {
+ return StatusWith<bool>(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Incorrect number of arguments for first SCRAM-SHA-1 client message, got "
+ << input.size() << " expected 4");
+ } else if (input[0] != "n") {
+ return StatusWith<bool>(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Incorrect SCRAM-SHA-1 client message prefix: " << input[0]);
+ } else if (!str::startsWith(input[1], "n=") || input[1].size() < 3) {
+ return StatusWith<bool>(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Incorrect SCRAM-SHA-1 user name: " << input[1]);
+ } else if (!str::startsWith(input[2], "r=") || input[2].size() < 6) {
+ return StatusWith<bool>(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Incorrect SCRAM-SHA-1 client nonce: " << input[2]);
+ }
- if (!status.isOK()) {
- return StatusWith<bool>(status);
- }
+ _user = input[1].substr(2);
+ if (!authzId.empty() && _user != authzId) {
+ return StatusWith<bool>(ErrorCodes::BadValue,
+ mongoutils::str::stream() << "SCRAM-SHA-1 user name " << _user
+ << " does not match authzid " << authzId);
+ }
- _creds = userObj->getCredentials();
- UserName userName = userObj->getName();
+ decodeSCRAMUsername(_user);
- _saslAuthSession->getAuthorizationSession()->getAuthorizationManager().
- releaseUser(userObj);
+ // SERVER-16534, SCRAM-SHA-1 must be enabled for authenticating the internal user, so that
+ // cluster members may communicate with each other. Hence ignore disabled auth mechanism
+ // for the internal user.
+ UserName user(_user, _saslAuthSession->getAuthenticationDatabase());
+ if (!sequenceContains(saslGlobalParams.authenticationMechanisms, "SCRAM-SHA-1") &&
+ user != internalSecurity.user->getName()) {
+ return StatusWith<bool>(ErrorCodes::BadValue, "SCRAM-SHA-1 authentication is disabled");
+ }
- // Check for authentication attempts of the __system user on
- // systems started without a keyfile.
- if (userName == internalSecurity.user->getName() &&
- _creds.scram.salt.empty()) {
- return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
- "It is not possible to authenticate as the __system user "
- "on servers started without a --keyFile parameter");
- }
+ // add client-first-message-bare to _authMessage
+ _authMessage += input[1] + "," + input[2] + ",";
- // Generate SCRAM credentials on the fly for mixed MONGODB-CR/SCRAM mode.
- if (_creds.scram.salt.empty() && !_creds.password.empty()) {
- // Use a default value of 5000 for the scramIterationCount when in mixed mode,
- // overriding the default value (10000) used for SCRAM mode or the user-given value.
- const int mixedModeScramIterationCount = 5000;
- BSONObj scramCreds = scram::generateCredentials(_creds.password,
- mixedModeScramIterationCount);
- _creds.scram.iterationCount = scramCreds[scram::iterationCountFieldName].Int();
- _creds.scram.salt = scramCreds[scram::saltFieldName].String();
- _creds.scram.storedKey = scramCreds[scram::storedKeyFieldName].String();
- _creds.scram.serverKey = scramCreds[scram::serverKeyFieldName].String();
- }
+ std::string clientNonce = input[2].substr(2);
- // Generate server-first-message
- // Create text-based nonce as base64 encoding of a binary blob of length multiple of 3
- const int nonceLenQWords = 3;
- uint64_t binaryNonce[nonceLenQWords];
+ // The authentication database is also the source database for the user.
+ User* userObj;
+ Status status =
+ _saslAuthSession->getAuthorizationSession()->getAuthorizationManager().acquireUser(
+ _saslAuthSession->getOpCtxt(), user, &userObj);
- unique_ptr<SecureRandom> sr(SecureRandom::create());
+ if (!status.isOK()) {
+ return StatusWith<bool>(status);
+ }
- binaryNonce[0] = sr->nextInt64();
- binaryNonce[1] = sr->nextInt64();
- binaryNonce[2] = sr->nextInt64();
+ _creds = userObj->getCredentials();
+ UserName userName = userObj->getName();
- _nonce = clientNonce +
- base64::encode(reinterpret_cast<char*>(binaryNonce), sizeof(binaryNonce));
- StringBuilder sb;
- sb << "r=" << _nonce <<
- ",s=" << _creds.scram.salt <<
- ",i=" << _creds.scram.iterationCount;
- *outputData = sb.str();
+ _saslAuthSession->getAuthorizationSession()->getAuthorizationManager().releaseUser(userObj);
- // add server-first-message to authMessage
- _authMessage += *outputData + ",";
+ // Check for authentication attempts of the __system user on
+ // systems started without a keyfile.
+ if (userName == internalSecurity.user->getName() && _creds.scram.salt.empty()) {
+ return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
+ "It is not possible to authenticate as the __system user "
+ "on servers started without a --keyFile parameter");
+ }
- return StatusWith<bool>(false);
+ // Generate SCRAM credentials on the fly for mixed MONGODB-CR/SCRAM mode.
+ if (_creds.scram.salt.empty() && !_creds.password.empty()) {
+ // Use a default value of 5000 for the scramIterationCount when in mixed mode,
+ // overriding the default value (10000) used for SCRAM mode or the user-given value.
+ const int mixedModeScramIterationCount = 5000;
+ BSONObj scramCreds =
+ scram::generateCredentials(_creds.password, mixedModeScramIterationCount);
+ _creds.scram.iterationCount = scramCreds[scram::iterationCountFieldName].Int();
+ _creds.scram.salt = scramCreds[scram::saltFieldName].String();
+ _creds.scram.storedKey = scramCreds[scram::storedKeyFieldName].String();
+ _creds.scram.serverKey = scramCreds[scram::serverKeyFieldName].String();
}
- /**
- * Parse client-final-message of the form:
- * c=channel-binding(base64),r=client-nonce|server-nonce,p=ClientProof
- *
- * Generate successful authentication server-final-message on the form:
- * v=ServerSignature
- *
- * or failed authentication server-final-message on the form:
- * e=message
- *
- * NOTE: we are ignoring the channel binding part of the message
- **/
- StatusWith<bool> SaslSCRAMSHA1ServerConversation::_secondStep(const std::vector<string>& input,
- std::string* outputData) {
- if (input.size() != 3) {
- return StatusWith<bool>(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Incorrect number of arguments for second SCRAM-SHA-1 client message, got " <<
- input.size() << " expected 3");
- }
- else if (!str::startsWith(input[0], "c=") || input[0].size() < 3) {
- return StatusWith<bool>(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Incorrect SCRAM-SHA-1 channel binding: " << input[0]);
- }
- else if (!str::startsWith(input[1], "r=") || input[1].size() < 6) {
- return StatusWith<bool>(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Incorrect SCRAM-SHA-1 client|server nonce: " << input[1]);
- }
- else if(!str::startsWith(input[2], "p=") || input[2].size() < 3) {
- return StatusWith<bool>(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Incorrect SCRAM-SHA-1 ClientProof: " << input[2]);
- }
+ // Generate server-first-message
+ // Create text-based nonce as base64 encoding of a binary blob of length multiple of 3
+ const int nonceLenQWords = 3;
+ uint64_t binaryNonce[nonceLenQWords];
- // add client-final-message-without-proof to authMessage
- _authMessage += input[0] + "," + input[1];
+ unique_ptr<SecureRandom> sr(SecureRandom::create());
- // Concatenated nonce sent by client should equal the one in server-first-message
- std::string nonce = input[1].substr(2);
- if (nonce != _nonce) {
- return StatusWith<bool>(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Unmatched SCRAM-SHA-1 nonce received from client in second step, expected " <<
- _nonce << " but received " << nonce);
- }
+ binaryNonce[0] = sr->nextInt64();
+ binaryNonce[1] = sr->nextInt64();
+ binaryNonce[2] = sr->nextInt64();
- std::string clientProof = input[2].substr(2);
-
- // Do server side computations, compare storedKeys and generate client-final-message
- // AuthMessage := client-first-message-bare + "," +
- // server-first-message + "," +
- // client-final-message-without-proof
- // ClientSignature := HMAC(StoredKey, AuthMessage)
- // ClientKey := ClientSignature XOR ClientProof
- // ServerSignature := HMAC(ServerKey, AuthMessage)
-
- unsigned int hashLen = 0;
- unsigned char clientSignature[scram::hashSize];
-
- std::string decodedStoredKey = base64::decode(_creds.scram.storedKey);
- // ClientSignature := HMAC(StoredKey, AuthMessage)
- fassert(18662, crypto::hmacSha1(
- reinterpret_cast<const unsigned char*>(decodedStoredKey.c_str()),
- scram::hashSize,
- reinterpret_cast<const unsigned char*>(_authMessage.c_str()),
- _authMessage.size(),
- clientSignature,
- &hashLen));
-
- fassert(18658, hashLen == scram::hashSize);
-
- try {
- clientProof = base64::decode(clientProof);
- }
- catch (const DBException& ex) {
- return StatusWith<bool>(ex.toStatus());
- }
- const unsigned char *decodedClientProof =
- reinterpret_cast<const unsigned char*>(clientProof.c_str());
+ _nonce =
+ clientNonce + base64::encode(reinterpret_cast<char*>(binaryNonce), sizeof(binaryNonce));
+ StringBuilder sb;
+ sb << "r=" << _nonce << ",s=" << _creds.scram.salt << ",i=" << _creds.scram.iterationCount;
+ *outputData = sb.str();
- // ClientKey := ClientSignature XOR ClientProof
- unsigned char clientKey[scram::hashSize];
- for(size_t i=0; i<scram::hashSize; i++) {
- clientKey[i] = clientSignature[i]^decodedClientProof[i];
- }
+ // add server-first-message to authMessage
+ _authMessage += *outputData + ",";
- // StoredKey := H(ClientKey)
- unsigned char computedStoredKey[scram::hashSize];
- fassert(18659, crypto::sha1(clientKey, scram::hashSize, computedStoredKey));
+ return StatusWith<bool>(false);
+}
- if (memcmp(decodedStoredKey.c_str(), computedStoredKey, scram::hashSize) != 0) {
- return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
- mongoutils::str::stream() <<
- "SCRAM-SHA-1 authentication failed, storedKey mismatch");
- }
+/**
+ * Parse client-final-message of the form:
+ * c=channel-binding(base64),r=client-nonce|server-nonce,p=ClientProof
+ *
+ * Generate successful authentication server-final-message on the form:
+ * v=ServerSignature
+ *
+ * or failed authentication server-final-message on the form:
+ * e=message
+ *
+ * NOTE: we are ignoring the channel binding part of the message
+**/
+StatusWith<bool> SaslSCRAMSHA1ServerConversation::_secondStep(const std::vector<string>& input,
+ std::string* outputData) {
+ if (input.size() != 3) {
+ return StatusWith<bool>(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Incorrect number of arguments for second SCRAM-SHA-1 client message, got "
+ << input.size() << " expected 3");
+ } else if (!str::startsWith(input[0], "c=") || input[0].size() < 3) {
+ return StatusWith<bool>(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Incorrect SCRAM-SHA-1 channel binding: " << input[0]);
+ } else if (!str::startsWith(input[1], "r=") || input[1].size() < 6) {
+ return StatusWith<bool>(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Incorrect SCRAM-SHA-1 client|server nonce: " << input[1]);
+ } else if (!str::startsWith(input[2], "p=") || input[2].size() < 3) {
+ return StatusWith<bool>(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Incorrect SCRAM-SHA-1 ClientProof: " << input[2]);
+ }
+
+ // add client-final-message-without-proof to authMessage
+ _authMessage += input[0] + "," + input[1];
+
+ // Concatenated nonce sent by client should equal the one in server-first-message
+ std::string nonce = input[1].substr(2);
+ if (nonce != _nonce) {
+ return StatusWith<bool>(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Unmatched SCRAM-SHA-1 nonce received from client in second step, expected "
+ << _nonce << " but received " << nonce);
+ }
- // ServerSignature := HMAC(ServerKey, AuthMessage)
- unsigned char serverSignature[scram::hashSize];
- std::string decodedServerKey = base64::decode(_creds.scram.serverKey);
- fassert(18660, crypto::hmacSha1(
- reinterpret_cast<const unsigned char*>(decodedServerKey.c_str()),
- scram::hashSize,
- reinterpret_cast<const unsigned char*>(_authMessage.c_str()),
- _authMessage.size(),
- serverSignature,
- &hashLen));
+ std::string clientProof = input[2].substr(2);
+
+ // Do server side computations, compare storedKeys and generate client-final-message
+ // AuthMessage := client-first-message-bare + "," +
+ // server-first-message + "," +
+ // client-final-message-without-proof
+ // ClientSignature := HMAC(StoredKey, AuthMessage)
+ // ClientKey := ClientSignature XOR ClientProof
+ // ServerSignature := HMAC(ServerKey, AuthMessage)
+
+ unsigned int hashLen = 0;
+ unsigned char clientSignature[scram::hashSize];
+
+ std::string decodedStoredKey = base64::decode(_creds.scram.storedKey);
+ // ClientSignature := HMAC(StoredKey, AuthMessage)
+ fassert(18662,
+ crypto::hmacSha1(reinterpret_cast<const unsigned char*>(decodedStoredKey.c_str()),
+ scram::hashSize,
+ reinterpret_cast<const unsigned char*>(_authMessage.c_str()),
+ _authMessage.size(),
+ clientSignature,
+ &hashLen));
+
+ fassert(18658, hashLen == scram::hashSize);
+
+ try {
+ clientProof = base64::decode(clientProof);
+ } catch (const DBException& ex) {
+ return StatusWith<bool>(ex.toStatus());
+ }
+ const unsigned char* decodedClientProof =
+ reinterpret_cast<const unsigned char*>(clientProof.c_str());
- fassert(18661, hashLen == scram::hashSize);
+ // ClientKey := ClientSignature XOR ClientProof
+ unsigned char clientKey[scram::hashSize];
+ for (size_t i = 0; i < scram::hashSize; i++) {
+ clientKey[i] = clientSignature[i] ^ decodedClientProof[i];
+ }
- StringBuilder sb;
- sb << "v=" << base64::encode(reinterpret_cast<char*>(serverSignature), scram::hashSize);
- *outputData = sb.str();
+ // StoredKey := H(ClientKey)
+ unsigned char computedStoredKey[scram::hashSize];
+ fassert(18659, crypto::sha1(clientKey, scram::hashSize, computedStoredKey));
- return StatusWith<bool>(false);
+ if (memcmp(decodedStoredKey.c_str(), computedStoredKey, scram::hashSize) != 0) {
+ return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
+ mongoutils::str::stream()
+ << "SCRAM-SHA-1 authentication failed, storedKey mismatch");
}
+
+ // ServerSignature := HMAC(ServerKey, AuthMessage)
+ unsigned char serverSignature[scram::hashSize];
+ std::string decodedServerKey = base64::decode(_creds.scram.serverKey);
+ fassert(18660,
+ crypto::hmacSha1(reinterpret_cast<const unsigned char*>(decodedServerKey.c_str()),
+ scram::hashSize,
+ reinterpret_cast<const unsigned char*>(_authMessage.c_str()),
+ _authMessage.size(),
+ serverSignature,
+ &hashLen));
+
+ fassert(18661, hashLen == scram::hashSize);
+
+ StringBuilder sb;
+ sb << "v=" << base64::encode(reinterpret_cast<char*>(serverSignature), scram::hashSize);
+ *outputData = sb.str();
+
+ return StatusWith<bool>(false);
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_scramsha1_server_conversation.h b/src/mongo/db/auth/sasl_scramsha1_server_conversation.h
index 1a2e1ad8fbd..68b3e226168 100644
--- a/src/mongo/db/auth/sasl_scramsha1_server_conversation.h
+++ b/src/mongo/db/auth/sasl_scramsha1_server_conversation.h
@@ -37,45 +37,46 @@
#include "mongo/db/auth/sasl_server_conversation.h"
namespace mongo {
+/**
+ * Server side authentication session for SASL SCRAM-SHA-1.
+ */
+class SaslSCRAMSHA1ServerConversation : public SaslServerConversation {
+ MONGO_DISALLOW_COPYING(SaslSCRAMSHA1ServerConversation);
+
+public:
/**
- * Server side authentication session for SASL SCRAM-SHA-1.
- */
- class SaslSCRAMSHA1ServerConversation : public SaslServerConversation {
- MONGO_DISALLOW_COPYING(SaslSCRAMSHA1ServerConversation);
- public:
- /**
- * Implements the server side of a SASL SCRAM-SHA-1 mechanism session.
- **/
- explicit SaslSCRAMSHA1ServerConversation(SaslAuthenticationSession* saslAuthSession);
+ * Implements the server side of a SASL SCRAM-SHA-1 mechanism session.
+ **/
+ explicit SaslSCRAMSHA1ServerConversation(SaslAuthenticationSession* saslAuthSession);
- virtual ~SaslSCRAMSHA1ServerConversation() {};
+ virtual ~SaslSCRAMSHA1ServerConversation(){};
- /**
- * Take one step in a SCRAM-SHA-1 conversation.
- *
- * @return !Status::OK() if auth failed. The boolean part indicates if the
- * authentication conversation is finished or not.
- *
- **/
- virtual StatusWith<bool> step(StringData inputData, std::string* outputData);
+ /**
+ * Take one step in a SCRAM-SHA-1 conversation.
+ *
+ * @return !Status::OK() if auth failed. The boolean part indicates if the
+ * authentication conversation is finished or not.
+ *
+ **/
+ virtual StatusWith<bool> step(StringData inputData, std::string* outputData);
- private:
- /**
- * Parse client-first-message and generate server-first-message
- **/
- StatusWith<bool> _firstStep(std::vector<std::string>& input, std::string* outputData);
+private:
+ /**
+ * Parse client-first-message and generate server-first-message
+ **/
+ StatusWith<bool> _firstStep(std::vector<std::string>& input, std::string* outputData);
- /**
- * Parse client-final-message and generate server-final-message
- **/
- StatusWith<bool> _secondStep(const std::vector<std::string>& input, std::string* outputData);
+ /**
+ * Parse client-final-message and generate server-final-message
+ **/
+ StatusWith<bool> _secondStep(const std::vector<std::string>& input, std::string* outputData);
- int _step;
- std::string _authMessage;
- User::CredentialData _creds;
+ int _step;
+ std::string _authMessage;
+ User::CredentialData _creds;
- // client and server nonce concatenated
- std::string _nonce;
- };
+ // client and server nonce concatenated
+ std::string _nonce;
+};
} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_server_conversation.cpp b/src/mongo/db/auth/sasl_server_conversation.cpp
index aa7f662535d..75f680e9a0d 100644
--- a/src/mongo/db/auth/sasl_server_conversation.cpp
+++ b/src/mongo/db/auth/sasl_server_conversation.cpp
@@ -31,11 +31,11 @@
#include <string>
namespace mongo {
-
- SaslServerConversation::~SaslServerConversation() {};
-
- std::string SaslServerConversation::getPrincipalId() {
- return _user;
- }
+
+SaslServerConversation::~SaslServerConversation(){};
+
+std::string SaslServerConversation::getPrincipalId() {
+ return _user;
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_server_conversation.h b/src/mongo/db/auth/sasl_server_conversation.h
index 005e5fc67f1..5d0ce497523 100644
--- a/src/mongo/db/auth/sasl_server_conversation.h
+++ b/src/mongo/db/auth/sasl_server_conversation.h
@@ -37,52 +37,53 @@
#include "mongo/db/auth/user.h"
namespace mongo {
-
- class SaslAuthenticationSession;
- template <typename T> class StatusWith;
-
+
+class SaslAuthenticationSession;
+template <typename T>
+class StatusWith;
+
+/**
+ * Abstract class for implementing the server-side
+ * of a SASL mechanism conversation.
+ */
+class SaslServerConversation {
+ MONGO_DISALLOW_COPYING(SaslServerConversation);
+
+public:
/**
- * Abstract class for implementing the server-side
- * of a SASL mechanism conversation.
- */
- class SaslServerConversation {
- MONGO_DISALLOW_COPYING(SaslServerConversation);
- public:
- /**
- * Implements the server side of a SASL authentication mechanism.
- *
- * "saslAuthSession" is the corresponding SASLAuthenticationSession.
- * "saslAuthSession" must stay in scope until the SaslServerConversation's
- * destructor completes.
- *
- **/
- explicit SaslServerConversation(SaslAuthenticationSession* saslAuthSession) :
- _saslAuthSession(saslAuthSession),
- _user("") {}
+ * Implements the server side of a SASL authentication mechanism.
+ *
+ * "saslAuthSession" is the corresponding SASLAuthenticationSession.
+ * "saslAuthSession" must stay in scope until the SaslServerConversation's
+ * destructor completes.
+ *
+ **/
+ explicit SaslServerConversation(SaslAuthenticationSession* saslAuthSession)
+ : _saslAuthSession(saslAuthSession), _user("") {}
- virtual ~SaslServerConversation();
+ virtual ~SaslServerConversation();
- /**
- * Performs one step of the server side of the authentication session,
- * consuming "inputData" and producing "*outputData".
- *
- * A return of Status::OK() indicates successful progress towards authentication.
- * A return of !Status::OK() indicates failed authentication
- *
- * A return of true means that the authentication process has finished.
- * A return of false means that the authentication process has more steps.
- *
- */
- virtual StatusWith<bool> step(StringData inputData, std::string* outputData) = 0;
+ /**
+ * Performs one step of the server side of the authentication session,
+ * consuming "inputData" and producing "*outputData".
+ *
+ * A return of Status::OK() indicates successful progress towards authentication.
+ * A return of !Status::OK() indicates failed authentication
+ *
+ * A return of true means that the authentication process has finished.
+ * A return of false means that the authentication process has more steps.
+ *
+ */
+ virtual StatusWith<bool> step(StringData inputData, std::string* outputData) = 0;
+
+ /**
+ * Gets the SASL principal id (user name) for the conversation
+ **/
+ std::string getPrincipalId();
- /**
- * Gets the SASL principal id (user name) for the conversation
- **/
- std::string getPrincipalId();
-
- protected:
- SaslAuthenticationSession* _saslAuthSession;
- std::string _user;
- };
+protected:
+ SaslAuthenticationSession* _saslAuthSession;
+ std::string _user;
+};
} // namespace mongo
diff --git a/src/mongo/db/auth/sasl_test_crutch.cpp b/src/mongo/db/auth/sasl_test_crutch.cpp
index da6eae4d332..d64877492aa 100644
--- a/src/mongo/db/auth/sasl_test_crutch.cpp
+++ b/src/mongo/db/auth/sasl_test_crutch.cpp
@@ -30,5 +30,5 @@
#include "mongo/db/commands.h"
namespace mongo {
- int Command::testCommandsEnabled = 0;
+int Command::testCommandsEnabled = 0;
}
diff --git a/src/mongo/db/auth/security_key.cpp b/src/mongo/db/auth/security_key.cpp
index 8edbc6ef140..d7d7c96410e 100644
--- a/src/mongo/db/auth/security_key.cpp
+++ b/src/mongo/db/auth/security_key.cpp
@@ -51,97 +51,97 @@
namespace mongo {
- using std::endl;
- using std::string;
+using std::endl;
+using std::string;
- bool setUpSecurityKey(const string& filename) {
- struct stat stats;
+bool setUpSecurityKey(const string& filename) {
+ struct stat stats;
- // check obvious file errors
- if (stat(filename.c_str(), &stats) == -1) {
- log() << "error getting file " << filename << ": " << strerror(errno) << endl;
- return false;
- }
+ // check obvious file errors
+ if (stat(filename.c_str(), &stats) == -1) {
+ log() << "error getting file " << filename << ": " << strerror(errno) << endl;
+ return false;
+ }
#if !defined(_WIN32)
- // check permissions: must be X00, where X is >= 4
- if ((stats.st_mode & (S_IRWXG|S_IRWXO)) != 0) {
- log() << "permissions on " << filename << " are too open" << endl;
- return false;
- }
+ // check permissions: must be X00, where X is >= 4
+ if ((stats.st_mode & (S_IRWXG | S_IRWXO)) != 0) {
+ log() << "permissions on " << filename << " are too open" << endl;
+ return false;
+ }
#endif
- FILE* file = fopen( filename.c_str(), "rb" );
- if (!file) {
- log() << "error opening file: " << filename << ": " << strerror(errno) << endl;
+ FILE* file = fopen(filename.c_str(), "rb");
+ if (!file) {
+ log() << "error opening file: " << filename << ": " << strerror(errno) << endl;
+ return false;
+ }
+
+ string str = "";
+
+ // strip key file
+ const unsigned long long fileLength = stats.st_size;
+ unsigned long long read = 0;
+ while (read < fileLength) {
+ char buf;
+ int readLength = fread(&buf, 1, 1, file);
+ if (readLength < 1) {
+ log() << "error reading file " << filename << endl;
+ fclose(file);
return false;
}
+ read++;
- string str = "";
-
- // strip key file
- const unsigned long long fileLength = stats.st_size;
- unsigned long long read = 0;
- while (read < fileLength) {
- char buf;
- int readLength = fread(&buf, 1, 1, file);
- if (readLength < 1) {
- log() << "error reading file " << filename << endl;
- fclose( file );
- return false;
- }
- read++;
-
- // check for whitespace
- if ((buf >= '\x09' && buf <= '\x0D') || buf == ' ') {
- continue;
- }
-
- // check valid base64
- if ((buf < 'A' || buf > 'Z') && (buf < 'a' || buf > 'z') && (buf < '0' || buf > '9') && buf != '+' && buf != '/') {
- log() << "invalid char in key file " << filename << ": " << buf << endl;
- fclose( file );
- return false;
- }
-
- str += buf;
+ // check for whitespace
+ if ((buf >= '\x09' && buf <= '\x0D') || buf == ' ') {
+ continue;
}
- fclose( file );
-
- const unsigned long long keyLength = str.size();
- if (keyLength < 6 || keyLength > 1024) {
- log() << " security key in " << filename << " has length " << keyLength
- << ", must be between 6 and 1024 chars" << endl;
+ // check valid base64
+ if ((buf < 'A' || buf > 'Z') && (buf < 'a' || buf > 'z') && (buf < '0' || buf > '9') &&
+ buf != '+' && buf != '/') {
+ log() << "invalid char in key file " << filename << ": " << buf << endl;
+ fclose(file);
return false;
}
- // Generate MONGODB-CR and SCRAM credentials for the internal user based on the keyfile.
- User::CredentialData credentials;
- credentials.password = mongo::createPasswordDigest(
- internalSecurity.user->getName().getUser().toString(), str);
-
- BSONObj creds = scram::generateCredentials(credentials.password,
- saslGlobalParams.scramIterationCount);
- credentials.scram.iterationCount = creds[scram::iterationCountFieldName].Int();
- credentials.scram.salt = creds[scram::saltFieldName].String();
- credentials.scram.storedKey = creds[scram::storedKeyFieldName].String();
- credentials.scram.serverKey = creds[scram::serverKeyFieldName].String();
-
- internalSecurity.user->setCredentials(credentials);
-
- int clusterAuthMode = serverGlobalParams.clusterAuthMode.load();
- if (clusterAuthMode == ServerGlobalParams::ClusterAuthMode_keyFile ||
- clusterAuthMode == ServerGlobalParams::ClusterAuthMode_sendKeyFile) {
- setInternalUserAuthParams(
- BSON(saslCommandMechanismFieldName << "SCRAM-SHA-1" <<
- saslCommandUserDBFieldName <<
- internalSecurity.user->getName().getDB() <<
- saslCommandUserFieldName << internalSecurity.user->getName().getUser() <<
- saslCommandPasswordFieldName << credentials.password <<
- saslCommandDigestPasswordFieldName << false));
- }
- return true;
+ str += buf;
+ }
+
+ fclose(file);
+
+ const unsigned long long keyLength = str.size();
+ if (keyLength < 6 || keyLength > 1024) {
+ log() << " security key in " << filename << " has length " << keyLength
+ << ", must be between 6 and 1024 chars" << endl;
+ return false;
+ }
+
+ // Generate MONGODB-CR and SCRAM credentials for the internal user based on the keyfile.
+ User::CredentialData credentials;
+ credentials.password =
+ mongo::createPasswordDigest(internalSecurity.user->getName().getUser().toString(), str);
+
+ BSONObj creds =
+ scram::generateCredentials(credentials.password, saslGlobalParams.scramIterationCount);
+ credentials.scram.iterationCount = creds[scram::iterationCountFieldName].Int();
+ credentials.scram.salt = creds[scram::saltFieldName].String();
+ credentials.scram.storedKey = creds[scram::storedKeyFieldName].String();
+ credentials.scram.serverKey = creds[scram::serverKeyFieldName].String();
+
+ internalSecurity.user->setCredentials(credentials);
+
+ int clusterAuthMode = serverGlobalParams.clusterAuthMode.load();
+ if (clusterAuthMode == ServerGlobalParams::ClusterAuthMode_keyFile ||
+ clusterAuthMode == ServerGlobalParams::ClusterAuthMode_sendKeyFile) {
+ setInternalUserAuthParams(
+ BSON(saslCommandMechanismFieldName
+ << "SCRAM-SHA-1" << saslCommandUserDBFieldName
+ << internalSecurity.user->getName().getDB() << saslCommandUserFieldName
+ << internalSecurity.user->getName().getUser() << saslCommandPasswordFieldName
+ << credentials.password << saslCommandDigestPasswordFieldName << false));
}
+ return true;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/security_key.h b/src/mongo/db/auth/security_key.h
index a250b34a029..898efdf9e8a 100644
--- a/src/mongo/db/auth/security_key.h
+++ b/src/mongo/db/auth/security_key.h
@@ -31,13 +31,13 @@
#include <string>
namespace mongo {
- /**
- * This method checks the validity of filename as a security key, hashes its
- * contents, and stores it in the internalSecurity variable. Prints an
- * error message to the logs if there's an error.
- * @param filename the file containing the key
- * @return if the key was successfully stored
- */
- bool setUpSecurityKey(const std::string& filename);
+/**
+ * This method checks the validity of filename as a security key, hashes its
+ * contents, and stores it in the internalSecurity variable. Prints an
+ * error message to the logs if there's an error.
+ * @param filename the file containing the key
+ * @return if the key was successfully stored
+ */
+bool setUpSecurityKey(const std::string& filename);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/user.cpp b/src/mongo/db/auth/user.cpp
index a2afa97a4e3..dd4dd3196a9 100644
--- a/src/mongo/db/auth/user.cpp
+++ b/src/mongo/db/auth/user.cpp
@@ -41,123 +41,119 @@
namespace mongo {
- User::User(const UserName& name) :
- _name(name),
- _refCount(0),
- _isValid(1) {}
+User::User(const UserName& name) : _name(name), _refCount(0), _isValid(1) {}
- User::~User() {
- dassert(_refCount == 0);
- }
-
- const UserName& User::getName() const {
- return _name;
- }
-
- RoleNameIterator User::getRoles() const {
- return makeRoleNameIteratorForContainer(_roles);
- }
-
- RoleNameIterator User::getIndirectRoles() const {
- return makeRoleNameIteratorForContainer(_indirectRoles);
- }
-
- bool User::hasRole(const RoleName& roleName) const {
- return _roles.count(roleName);
- }
-
- const User::CredentialData& User::getCredentials() const {
- return _credentials;
- }
-
- bool User::isValid() const {
- return _isValid.loadRelaxed() == 1;
- }
-
- uint32_t User::getRefCount() const {
- return _refCount;
- }
-
- const ActionSet User::getActionsForResource(const ResourcePattern& resource) const {
- unordered_map<ResourcePattern, Privilege>::const_iterator it = _privileges.find(resource);
- if (it == _privileges.end()) {
- return ActionSet();
- }
- return it->second.getActions();
- }
-
- User* User::clone() const {
- std::unique_ptr<User> result(new User(_name));
- result->_privileges = _privileges;
- result->_roles = _roles;
- result->_credentials = _credentials;
- return result.release();
- }
+User::~User() {
+ dassert(_refCount == 0);
+}
- void User::setCredentials(const CredentialData& credentials) {
- _credentials = credentials;
- }
-
- void User::setRoles(RoleNameIterator roles) {
- _roles.clear();
- while (roles.more()) {
- _roles.insert(roles.next());
- }
- }
-
- void User::setIndirectRoles(RoleNameIterator indirectRoles) {
- _indirectRoles.clear();
- while (indirectRoles.more()) {
- _indirectRoles.push_back(indirectRoles.next());
- }
- }
+const UserName& User::getName() const {
+ return _name;
+}
- void User::setPrivileges(const PrivilegeVector& privileges) {
- _privileges.clear();
- for (size_t i = 0; i < privileges.size(); ++i) {
- const Privilege& privilege = privileges[i];
- _privileges[privilege.getResourcePattern()] = privilege;
- }
- }
-
- void User::addRole(const RoleName& roleName) {
- _roles.insert(roleName);
- }
-
- void User::addRoles(const std::vector<RoleName>& roles) {
- for (std::vector<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
- addRole(*it);
- }
- }
+RoleNameIterator User::getRoles() const {
+ return makeRoleNameIteratorForContainer(_roles);
+}
- void User::addPrivilege(const Privilege& privilegeToAdd) {
- ResourcePrivilegeMap::iterator it = _privileges.find(privilegeToAdd.getResourcePattern());
- if (it == _privileges.end()) {
- // No privilege exists yet for this resource
- _privileges.insert(std::make_pair(privilegeToAdd.getResourcePattern(), privilegeToAdd));
- } else {
- dassert(it->first == privilegeToAdd.getResourcePattern());
- it->second.addActions(privilegeToAdd.getActions());
- }
- }
-
- void User::addPrivileges(const PrivilegeVector& privileges) {
- for (PrivilegeVector::const_iterator it = privileges.begin();
- it != privileges.end(); ++it) {
- addPrivilege(*it);
- }
- }
-
- void User::invalidate() {
- _isValid.store(0);
- }
-
- void User::incrementRefCount() {
- ++_refCount;
- }
-
- void User::decrementRefCount() {
- dassert(_refCount > 0);
- --_refCount;
- }
-} // namespace mongo
+RoleNameIterator User::getIndirectRoles() const {
+ return makeRoleNameIteratorForContainer(_indirectRoles);
+}
+
+bool User::hasRole(const RoleName& roleName) const {
+ return _roles.count(roleName);
+}
+
+const User::CredentialData& User::getCredentials() const {
+ return _credentials;
+}
+
+bool User::isValid() const {
+ return _isValid.loadRelaxed() == 1;
+}
+
+uint32_t User::getRefCount() const {
+ return _refCount;
+}
+
+const ActionSet User::getActionsForResource(const ResourcePattern& resource) const {
+ unordered_map<ResourcePattern, Privilege>::const_iterator it = _privileges.find(resource);
+ if (it == _privileges.end()) {
+ return ActionSet();
+ }
+ return it->second.getActions();
+}
+
+User* User::clone() const {
+ std::unique_ptr<User> result(new User(_name));
+ result->_privileges = _privileges;
+ result->_roles = _roles;
+ result->_credentials = _credentials;
+ return result.release();
+}
+
+void User::setCredentials(const CredentialData& credentials) {
+ _credentials = credentials;
+}
+
+void User::setRoles(RoleNameIterator roles) {
+ _roles.clear();
+ while (roles.more()) {
+ _roles.insert(roles.next());
+ }
+}
+
+void User::setIndirectRoles(RoleNameIterator indirectRoles) {
+ _indirectRoles.clear();
+ while (indirectRoles.more()) {
+ _indirectRoles.push_back(indirectRoles.next());
+ }
+}
+
+void User::setPrivileges(const PrivilegeVector& privileges) {
+ _privileges.clear();
+ for (size_t i = 0; i < privileges.size(); ++i) {
+ const Privilege& privilege = privileges[i];
+ _privileges[privilege.getResourcePattern()] = privilege;
+ }
+}
+
+void User::addRole(const RoleName& roleName) {
+ _roles.insert(roleName);
+}
+
+void User::addRoles(const std::vector<RoleName>& roles) {
+ for (std::vector<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
+ addRole(*it);
+ }
+}
+
+void User::addPrivilege(const Privilege& privilegeToAdd) {
+ ResourcePrivilegeMap::iterator it = _privileges.find(privilegeToAdd.getResourcePattern());
+ if (it == _privileges.end()) {
+ // No privilege exists yet for this resource
+ _privileges.insert(std::make_pair(privilegeToAdd.getResourcePattern(), privilegeToAdd));
+ } else {
+ dassert(it->first == privilegeToAdd.getResourcePattern());
+ it->second.addActions(privilegeToAdd.getActions());
+ }
+}
+
+void User::addPrivileges(const PrivilegeVector& privileges) {
+ for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) {
+ addPrivilege(*it);
+ }
+}
+
+void User::invalidate() {
+ _isValid.store(0);
+}
+
+void User::incrementRefCount() {
+ ++_refCount;
+}
+
+void User::decrementRefCount() {
+ dassert(_refCount > 0);
+ --_refCount;
+}
+} // namespace mongo
diff --git a/src/mongo/db/auth/user.h b/src/mongo/db/auth/user.h
index d920abdda9d..d4aea7e442b 100644
--- a/src/mongo/db/auth/user.h
+++ b/src/mongo/db/auth/user.h
@@ -41,193 +41,188 @@
namespace mongo {
+/**
+ * Represents a MongoDB user. Stores information about the user necessary for access control
+ * checks and authentications, such as what privileges this user has, as well as what roles
+ * the user belongs to.
+ *
+ * Every User object is owned by an AuthorizationManager. The AuthorizationManager is the only
+ * one that should construct, modify, or delete a User object. All other consumers of User must
+ * use only the const methods. The AuthorizationManager is responsible for maintaining the
+ * reference count on all User objects it gives out and must not mutate any User objects with
+ * a non-zero reference count (except to call invalidate()). Any consumer of a User object
+ * should check isInvalidated() before using it, and if it has been invalidated, it should
+ * return the object to the AuthorizationManager and fetch a new User object instance for this
+ * user from the AuthorizationManager.
+ */
+class User {
+ MONGO_DISALLOW_COPYING(User);
+
+public:
+ struct SCRAMCredentials {
+ SCRAMCredentials() : iterationCount(0), salt(""), serverKey(""), storedKey("") {}
+
+ int iterationCount;
+ std::string salt;
+ std::string serverKey;
+ std::string storedKey;
+ };
+ struct CredentialData {
+ CredentialData() : password(""), scram(), isExternal(false) {}
+
+ std::string password;
+ SCRAMCredentials scram;
+ bool isExternal;
+ };
+
+ typedef unordered_map<ResourcePattern, Privilege> ResourcePrivilegeMap;
+
+ explicit User(const UserName& name);
+ ~User();
+
+ /**
+ * Returns the user name for this user.
+ */
+ const UserName& getName() const;
+
+ /**
+ * Returns an iterator over the names of the user's direct roles
+ */
+ RoleNameIterator getRoles() const;
+
+ /**
+ * Returns an iterator over the names of the user's indirect roles
+ */
+ RoleNameIterator getIndirectRoles() const;
+
+ /**
+ * Returns true if this user is a member of the given role.
+ */
+ bool hasRole(const RoleName& roleName) const;
+
+ /**
+ * Returns a reference to the information about the user's privileges.
+ */
+ const ResourcePrivilegeMap& getPrivileges() const {
+ return _privileges;
+ }
+
+ /**
+ * Returns the CredentialData for this user.
+ */
+ const CredentialData& getCredentials() const;
+
+ /**
+ * Gets the set of actions this user is allowed to perform on the given resource.
+ */
+ const ActionSet getActionsForResource(const ResourcePattern& resource) const;
+
+ /**
+ * Returns true if this copy of information about this user is still valid. If this returns
+ * false, this object should no longer be used and should be returned to the
+ * AuthorizationManager and a new User object for this user should be requested.
+ */
+ bool isValid() const;
+
+ /**
+ * This returns the reference count for this User. The AuthorizationManager should be the
+ * only caller of this.
+ */
+ uint32_t getRefCount() const;
+
+ /**
+ * Clones this user into a new, valid User object with refcount of 0.
+ */
+ User* clone() const;
+
+ // Mutators below. Mutation functions should *only* be called by the AuthorizationManager
+
/**
- * Represents a MongoDB user. Stores information about the user necessary for access control
- * checks and authentications, such as what privileges this user has, as well as what roles
- * the user belongs to.
+ * Sets this user's authentication credentials.
+ */
+ void setCredentials(const CredentialData& credentials);
+
+ /**
+ * Replaces any existing user role membership information with the roles from "roles".
+ */
+ void setRoles(RoleNameIterator roles);
+
+ /**
+ * Replaces any existing indirect user role membership information with the roles from
+ * "indirectRoles".
+ */
+ void setIndirectRoles(RoleNameIterator indirectRoles);
+
+ /**
+ * Replaces any existing user privilege information with "privileges".
+ */
+ void setPrivileges(const PrivilegeVector& privileges);
+
+ /**
+ * Adds the given role name to the list of roles of which this user is a member.
+ */
+ void addRole(const RoleName& role);
+
+ /**
+ * Adds the given role names to the list of roles that this user belongs to.
+ */
+ void addRoles(const std::vector<RoleName>& roles);
+
+ /**
+ * Adds the given privilege to the list of privileges this user is authorized for.
+ */
+ void addPrivilege(const Privilege& privilege);
+
+ /**
+ * Adds the given privileges to the list of privileges this user is authorized for.
+ */
+ void addPrivileges(const PrivilegeVector& privileges);
+
+ /**
+ * Marks this instance of the User object as invalid, most likely because information about
+ * the user has been updated and needs to be reloaded from the AuthorizationManager.
*
- * Every User object is owned by an AuthorizationManager. The AuthorizationManager is the only
- * one that should construct, modify, or delete a User object. All other consumers of User must
- * use only the const methods. The AuthorizationManager is responsible for maintaining the
- * reference count on all User objects it gives out and must not mutate any User objects with
- * a non-zero reference count (except to call invalidate()). Any consumer of a User object
- * should check isInvalidated() before using it, and if it has been invalidated, it should
- * return the object to the AuthorizationManager and fetch a new User object instance for this
- * user from the AuthorizationManager.
- */
- class User {
- MONGO_DISALLOW_COPYING(User);
- public:
- struct SCRAMCredentials {
- SCRAMCredentials() :
- iterationCount(0),
- salt(""),
- serverKey(""),
- storedKey("") {}
-
- int iterationCount;
- std::string salt;
- std::string serverKey;
- std::string storedKey;
- };
- struct CredentialData {
- CredentialData() :
- password(""),
- scram(),
- isExternal(false) {}
-
- std::string password;
- SCRAMCredentials scram;
- bool isExternal;
- };
-
- typedef unordered_map<ResourcePattern, Privilege> ResourcePrivilegeMap;
-
- explicit User(const UserName& name);
- ~User();
-
- /**
- * Returns the user name for this user.
- */
- const UserName& getName() const;
-
- /**
- * Returns an iterator over the names of the user's direct roles
- */
- RoleNameIterator getRoles() const;
-
- /**
- * Returns an iterator over the names of the user's indirect roles
- */
- RoleNameIterator getIndirectRoles() const;
-
- /**
- * Returns true if this user is a member of the given role.
- */
- bool hasRole(const RoleName& roleName) const;
-
- /**
- * Returns a reference to the information about the user's privileges.
- */
- const ResourcePrivilegeMap& getPrivileges() const { return _privileges; }
-
- /**
- * Returns the CredentialData for this user.
- */
- const CredentialData& getCredentials() const;
-
- /**
- * Gets the set of actions this user is allowed to perform on the given resource.
- */
- const ActionSet getActionsForResource(const ResourcePattern& resource) const;
-
- /**
- * Returns true if this copy of information about this user is still valid. If this returns
- * false, this object should no longer be used and should be returned to the
- * AuthorizationManager and a new User object for this user should be requested.
- */
- bool isValid() const;
-
- /**
- * This returns the reference count for this User. The AuthorizationManager should be the
- * only caller of this.
- */
- uint32_t getRefCount() const;
-
- /**
- * Clones this user into a new, valid User object with refcount of 0.
- */
- User* clone() const;
-
- // Mutators below. Mutation functions should *only* be called by the AuthorizationManager
-
- /**
- * Sets this user's authentication credentials.
- */
- void setCredentials(const CredentialData& credentials);
-
- /**
- * Replaces any existing user role membership information with the roles from "roles".
- */
- void setRoles(RoleNameIterator roles);
-
- /**
- * Replaces any existing indirect user role membership information with the roles from
- * "indirectRoles".
- */
- void setIndirectRoles(RoleNameIterator indirectRoles);
-
- /**
- * Replaces any existing user privilege information with "privileges".
- */
- void setPrivileges(const PrivilegeVector& privileges);
-
- /**
- * Adds the given role name to the list of roles of which this user is a member.
- */
- void addRole(const RoleName& role);
-
- /**
- * Adds the given role names to the list of roles that this user belongs to.
- */
- void addRoles(const std::vector<RoleName>& roles);
-
- /**
- * Adds the given privilege to the list of privileges this user is authorized for.
- */
- void addPrivilege(const Privilege& privilege);
-
- /**
- * Adds the given privileges to the list of privileges this user is authorized for.
- */
- void addPrivileges(const PrivilegeVector& privileges);
-
- /**
- * Marks this instance of the User object as invalid, most likely because information about
- * the user has been updated and needs to be reloaded from the AuthorizationManager.
- *
- * This method should *only* be called by the AuthorizationManager.
- */
- void invalidate();
-
- /**
- * Increments the reference count for this User object, which records how many threads have
- * a reference to it.
- *
- * This method should *only* be called by the AuthorizationManager.
- */
- void incrementRefCount();
-
- /**
- * Decrements the reference count for this User object, which records how many threads have
- * a reference to it. Once the reference count goes to zero, the AuthorizationManager is
- * allowed to destroy this instance.
- *
- * This method should *only* be called by the AuthorizationManager.
- */
- void decrementRefCount();
-
- private:
-
- UserName _name;
-
- // Maps resource name to privilege on that resource
- ResourcePrivilegeMap _privileges;
-
- // Roles the user has privileges from
- unordered_set<RoleName> _roles;
-
- // Roles that the user indirectly has privileges from, due to role inheritance.
- std::vector<RoleName> _indirectRoles;
-
- // Credential information.
- CredentialData _credentials;
-
- // _refCount and _isInvalidated are modified exclusively by the AuthorizationManager
- // _isInvalidated can be read by any consumer of User, but _refCount can only be
- // meaningfully read by the AuthorizationManager, as _refCount is guarded by the AM's _lock
- uint32_t _refCount;
- AtomicUInt32 _isValid; // Using as a boolean
- };
+ * This method should *only* be called by the AuthorizationManager.
+ */
+ void invalidate();
+
+ /**
+ * Increments the reference count for this User object, which records how many threads have
+ * a reference to it.
+ *
+ * This method should *only* be called by the AuthorizationManager.
+ */
+ void incrementRefCount();
+
+ /**
+ * Decrements the reference count for this User object, which records how many threads have
+ * a reference to it. Once the reference count goes to zero, the AuthorizationManager is
+ * allowed to destroy this instance.
+ *
+ * This method should *only* be called by the AuthorizationManager.
+ */
+ void decrementRefCount();
+
+private:
+ UserName _name;
+
+ // Maps resource name to privilege on that resource
+ ResourcePrivilegeMap _privileges;
+
+ // Roles the user has privileges from
+ unordered_set<RoleName> _roles;
+
+ // Roles that the user indirectly has privileges from, due to role inheritance.
+ std::vector<RoleName> _indirectRoles;
+
+ // Credential information.
+ CredentialData _credentials;
+
+ // _refCount and _isInvalidated are modified exclusively by the AuthorizationManager
+ // _isInvalidated can be read by any consumer of User, but _refCount can only be
+ // meaningfully read by the AuthorizationManager, as _refCount is guarded by the AM's _lock
+ uint32_t _refCount;
+ AtomicUInt32 _isValid; // Using as a boolean
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index 0faaaa84bc6..dd44b200f60 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -51,132 +51,127 @@
namespace mongo {
namespace {
- // How often to check with the config servers whether authorization information has changed.
- int userCacheInvalidationIntervalSecs = 30; // 30 second default
- stdx::mutex invalidationIntervalMutex;
- stdx::condition_variable invalidationIntervalChangedCondition;
- Date_t lastInvalidationTime;
-
- class ExportedInvalidationIntervalParameter : public ExportedServerParameter<int> {
- public:
- ExportedInvalidationIntervalParameter() :
- ExportedServerParameter<int>(ServerParameterSet::getGlobal(),
- "userCacheInvalidationIntervalSecs",
- &userCacheInvalidationIntervalSecs,
- true,
- true) {}
-
- virtual Status validate( const int& potentialNewValue )
- {
- if (potentialNewValue < 1 || potentialNewValue > 86400) {
- return Status(ErrorCodes::BadValue,
- "userCacheInvalidationIntervalSecs must be between 1 "
- "and 86400 (24 hours)");
- }
- return Status::OK();
+// How often to check with the config servers whether authorization information has changed.
+int userCacheInvalidationIntervalSecs = 30; // 30 second default
+stdx::mutex invalidationIntervalMutex;
+stdx::condition_variable invalidationIntervalChangedCondition;
+Date_t lastInvalidationTime;
+
+class ExportedInvalidationIntervalParameter : public ExportedServerParameter<int> {
+public:
+ ExportedInvalidationIntervalParameter()
+ : ExportedServerParameter<int>(ServerParameterSet::getGlobal(),
+ "userCacheInvalidationIntervalSecs",
+ &userCacheInvalidationIntervalSecs,
+ true,
+ true) {}
+
+ virtual Status validate(const int& potentialNewValue) {
+ if (potentialNewValue < 1 || potentialNewValue > 86400) {
+ return Status(ErrorCodes::BadValue,
+ "userCacheInvalidationIntervalSecs must be between 1 "
+ "and 86400 (24 hours)");
}
+ return Status::OK();
+ }
- // Without this the compiler complains that defining set(const int&)
- // hides set(const BSONElement&)
- using ExportedServerParameter<int>::set;
+ // Without this the compiler complains that defining set(const int&)
+ // hides set(const BSONElement&)
+ using ExportedServerParameter<int>::set;
- virtual Status set( const int& newValue ) {
- stdx::unique_lock<stdx::mutex> lock(invalidationIntervalMutex);
- Status status = ExportedServerParameter<int>::set(newValue);
- invalidationIntervalChangedCondition.notify_all();
- return status;
- }
+ virtual Status set(const int& newValue) {
+ stdx::unique_lock<stdx::mutex> lock(invalidationIntervalMutex);
+ Status status = ExportedServerParameter<int>::set(newValue);
+ invalidationIntervalChangedCondition.notify_all();
+ return status;
+ }
- } exportedIntervalParam;
-
- StatusWith<OID> getCurrentCacheGeneration() {
- try {
- BSONObjBuilder result;
- const bool ok = grid.catalogManager()->runUserManagementReadCommand(
- "admin",
- BSON("_getUserCacheGeneration" << 1),
- &result);
- if (!ok) {
- return Command::getStatusFromCommandResult(result.obj());
- }
- return result.obj()["cacheGeneration"].OID();
- } catch (const DBException& e) {
- return StatusWith<OID>(e.toStatus());
- } catch (const std::exception& e) {
- return StatusWith<OID>(ErrorCodes::UnknownError, e.what());
+} exportedIntervalParam;
+
+StatusWith<OID> getCurrentCacheGeneration() {
+ try {
+ BSONObjBuilder result;
+ const bool ok = grid.catalogManager()->runUserManagementReadCommand(
+ "admin", BSON("_getUserCacheGeneration" << 1), &result);
+ if (!ok) {
+ return Command::getStatusFromCommandResult(result.obj());
}
+ return result.obj()["cacheGeneration"].OID();
+ } catch (const DBException& e) {
+ return StatusWith<OID>(e.toStatus());
+ } catch (const std::exception& e) {
+ return StatusWith<OID>(ErrorCodes::UnknownError, e.what());
}
+}
-} // namespace
+} // namespace
- UserCacheInvalidator::UserCacheInvalidator(AuthorizationManager* authzManager) :
- _authzManager(authzManager) {
+UserCacheInvalidator::UserCacheInvalidator(AuthorizationManager* authzManager)
+ : _authzManager(authzManager) {
+ StatusWith<OID> currentGeneration = getCurrentCacheGeneration();
+ if (currentGeneration.isOK()) {
+ _previousCacheGeneration = currentGeneration.getValue();
+ return;
+ }
- StatusWith<OID> currentGeneration = getCurrentCacheGeneration();
- if (currentGeneration.isOK()) {
- _previousCacheGeneration = currentGeneration.getValue();
- return;
+ if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) {
+ warning() << "_getUserCacheGeneration command not found while fetching initial user "
+ "cache generation from the config server(s). This most likely means you are "
+ "running an outdated version of mongod on the config servers";
+ } else {
+ warning() << "An error occurred while fetching initial user cache generation from "
+ "config servers: " << currentGeneration.getStatus();
+ }
+ _previousCacheGeneration = OID();
+}
+
+void UserCacheInvalidator::run() {
+ Client::initThread("UserCacheInvalidator");
+ lastInvalidationTime = Date_t::now();
+
+ while (true) {
+ stdx::unique_lock<stdx::mutex> lock(invalidationIntervalMutex);
+ Date_t sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs);
+ Date_t now = Date_t::now();
+ while (now < sleepUntil) {
+ invalidationIntervalChangedCondition.wait_for(lock, sleepUntil - now);
+ sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs);
+ now = Date_t::now();
}
+ lastInvalidationTime = now;
+ lock.unlock();
- if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) {
- warning() << "_getUserCacheGeneration command not found while fetching initial user "
- "cache generation from the config server(s). This most likely means you are "
- "running an outdated version of mongod on the config servers";
- } else {
- warning() << "An error occurred while fetching initial user cache generation from "
- "config servers: " << currentGeneration.getStatus();
+ if (inShutdown()) {
+ break;
}
- _previousCacheGeneration = OID();
- }
-
- void UserCacheInvalidator::run() {
- Client::initThread("UserCacheInvalidator");
- lastInvalidationTime = Date_t::now();
-
- while (true) {
- stdx::unique_lock<stdx::mutex> lock(invalidationIntervalMutex);
- Date_t sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs);
- Date_t now = Date_t::now();
- while (now < sleepUntil) {
- invalidationIntervalChangedCondition.wait_for(lock, sleepUntil - now);
- sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs);
- now = Date_t::now();
- }
- lastInvalidationTime = now;
- lock.unlock();
-
- if (inShutdown()) {
- break;
- }
- StatusWith<OID> currentGeneration = getCurrentCacheGeneration();
- if (!currentGeneration.isOK()) {
- if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) {
- warning() << "_getUserCacheGeneration command not found on config server(s), "
- "this most likely means you are running an outdated version of mongod "
- "on the config servers" << std::endl;
- } else {
- warning() << "An error occurred while fetching current user cache generation "
- "to check if user cache needs invalidation: " <<
- currentGeneration.getStatus() << std::endl;
- }
- // When in doubt, invalidate the cache
- _authzManager->invalidateUserCache();
- continue;
+ StatusWith<OID> currentGeneration = getCurrentCacheGeneration();
+ if (!currentGeneration.isOK()) {
+ if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) {
+ warning() << "_getUserCacheGeneration command not found on config server(s), "
+ "this most likely means you are running an outdated version of mongod "
+ "on the config servers" << std::endl;
+ } else {
+ warning() << "An error occurred while fetching current user cache generation "
+ "to check if user cache needs invalidation: "
+ << currentGeneration.getStatus() << std::endl;
}
+ // When in doubt, invalidate the cache
+ _authzManager->invalidateUserCache();
+ continue;
+ }
- if (currentGeneration.getValue() != _previousCacheGeneration) {
- log() << "User cache generation changed from " << _previousCacheGeneration <<
- " to " << currentGeneration.getValue() << "; invalidating user cache" <<
- std::endl;
- _authzManager->invalidateUserCache();
- _previousCacheGeneration = currentGeneration.getValue();
- }
+ if (currentGeneration.getValue() != _previousCacheGeneration) {
+ log() << "User cache generation changed from " << _previousCacheGeneration << " to "
+ << currentGeneration.getValue() << "; invalidating user cache" << std::endl;
+ _authzManager->invalidateUserCache();
+ _previousCacheGeneration = currentGeneration.getValue();
}
}
+}
- std::string UserCacheInvalidator::name() const {
- return "UserCacheInvalidatorThread";
- }
+std::string UserCacheInvalidator::name() const {
+ return "UserCacheInvalidatorThread";
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.h b/src/mongo/db/auth/user_cache_invalidator_job.h
index 8742a28c79c..3eb173b0a56 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.h
+++ b/src/mongo/db/auth/user_cache_invalidator_job.h
@@ -32,25 +32,25 @@
namespace mongo {
- class AuthorizationManager;
-
- /**
- * Background job that runs only in mongos and periodically checks in with the config servers
- * to determine whether any authorization information has changed, and if so causes the
- * AuthorizationManager to throw out its in-memory cache of User objects (which contains the
- * users' credentials, roles, privileges, etc).
- */
- class UserCacheInvalidator : public BackgroundJob {
- public:
- explicit UserCacheInvalidator(AuthorizationManager* authzManager);
-
- protected:
- virtual std::string name() const;
- virtual void run();
-
- private:
- AuthorizationManager* _authzManager;
- OID _previousCacheGeneration;
- };
-
-} // namespace mongo
+class AuthorizationManager;
+
+/**
+ * Background job that runs only in mongos and periodically checks in with the config servers
+ * to determine whether any authorization information has changed, and if so causes the
+ * AuthorizationManager to throw out its in-memory cache of User objects (which contains the
+ * users' credentials, roles, privileges, etc).
+ */
+class UserCacheInvalidator : public BackgroundJob {
+public:
+ explicit UserCacheInvalidator(AuthorizationManager* authzManager);
+
+protected:
+ virtual std::string name() const;
+ virtual void run();
+
+private:
+ AuthorizationManager* _authzManager;
+ OID _previousCacheGeneration;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/auth/user_document_parser.cpp b/src/mongo/db/auth/user_document_parser.cpp
index d4a095c661d..058829622f5 100644
--- a/src/mongo/db/auth/user_document_parser.cpp
+++ b/src/mongo/db/auth/user_document_parser.cpp
@@ -44,142 +44,135 @@
namespace mongo {
namespace {
- const std::string ADMIN_DBNAME = "admin";
-
- const std::string ROLES_FIELD_NAME = "roles";
- const std::string PRIVILEGES_FIELD_NAME = "inheritedPrivileges";
- const std::string INHERITED_ROLES_FIELD_NAME = "inheritedRoles";
- const std::string OTHER_DB_ROLES_FIELD_NAME = "otherDBRoles";
- const std::string READONLY_FIELD_NAME = "readOnly";
- const std::string CREDENTIALS_FIELD_NAME = "credentials";
- const std::string ROLE_NAME_FIELD_NAME = "role";
- const std::string ROLE_DB_FIELD_NAME = "db";
- const std::string MONGODB_CR_CREDENTIAL_FIELD_NAME = "MONGODB-CR";
- const std::string SCRAM_CREDENTIAL_FIELD_NAME = "SCRAM-SHA-1";
- const std::string MONGODB_EXTERNAL_CREDENTIAL_FIELD_NAME = "external";
-
- inline Status _badValue(const char* reason, int location) {
- return Status(ErrorCodes::BadValue, reason, location);
+const std::string ADMIN_DBNAME = "admin";
+
+const std::string ROLES_FIELD_NAME = "roles";
+const std::string PRIVILEGES_FIELD_NAME = "inheritedPrivileges";
+const std::string INHERITED_ROLES_FIELD_NAME = "inheritedRoles";
+const std::string OTHER_DB_ROLES_FIELD_NAME = "otherDBRoles";
+const std::string READONLY_FIELD_NAME = "readOnly";
+const std::string CREDENTIALS_FIELD_NAME = "credentials";
+const std::string ROLE_NAME_FIELD_NAME = "role";
+const std::string ROLE_DB_FIELD_NAME = "db";
+const std::string MONGODB_CR_CREDENTIAL_FIELD_NAME = "MONGODB-CR";
+const std::string SCRAM_CREDENTIAL_FIELD_NAME = "SCRAM-SHA-1";
+const std::string MONGODB_EXTERNAL_CREDENTIAL_FIELD_NAME = "external";
+
+inline Status _badValue(const char* reason, int location) {
+ return Status(ErrorCodes::BadValue, reason, location);
+}
+
+inline Status _badValue(const std::string& reason, int location) {
+ return Status(ErrorCodes::BadValue, reason, location);
+}
+
+Status _checkV1RolesArray(const BSONElement& rolesElement) {
+ if (rolesElement.type() != Array) {
+ return _badValue("Role fields must be an array when present in system.users entries", 0);
}
-
- inline Status _badValue(const std::string& reason, int location) {
- return Status(ErrorCodes::BadValue, reason, location);
- }
-
- Status _checkV1RolesArray(const BSONElement& rolesElement) {
- if (rolesElement.type() != Array) {
- return _badValue("Role fields must be an array when present in system.users entries",
- 0);
- }
- for (BSONObjIterator iter(rolesElement.embeddedObject()); iter.more(); iter.next()) {
- BSONElement element = *iter;
- if (element.type() != String || element.valueStringData().empty()) {
- return _badValue("Roles must be non-empty strings.", 0);
- }
+ for (BSONObjIterator iter(rolesElement.embeddedObject()); iter.more(); iter.next()) {
+ BSONElement element = *iter;
+ if (element.type() != String || element.valueStringData().empty()) {
+ return _badValue("Roles must be non-empty strings.", 0);
}
- return Status::OK();
}
+ return Status::OK();
+}
} // namespace
- std::string V1UserDocumentParser::extractUserNameFromUserDocument(
- const BSONObj& doc) const {
- return doc[AuthorizationManager::V1_USER_NAME_FIELD_NAME].str();
- }
-
- Status V1UserDocumentParser::initializeUserCredentialsFromUserDocument(
- User* user, const BSONObj& privDoc) const {
- User::CredentialData credentials;
- if (privDoc.hasField(AuthorizationManager::PASSWORD_FIELD_NAME)) {
- credentials.password = privDoc[AuthorizationManager::PASSWORD_FIELD_NAME].String();
- credentials.isExternal = false;
- }
- else if (privDoc.hasField(AuthorizationManager::V1_USER_SOURCE_FIELD_NAME)) {
- std::string userSource = privDoc[AuthorizationManager::V1_USER_SOURCE_FIELD_NAME].String();
- if (userSource != "$external") {
- return Status(ErrorCodes::UnsupportedFormat,
- "Cannot extract credentials from user documents without a password "
- "and with userSource != \"$external\"");
- } else {
- credentials.isExternal = true;
- }
- }
- else {
+std::string V1UserDocumentParser::extractUserNameFromUserDocument(const BSONObj& doc) const {
+ return doc[AuthorizationManager::V1_USER_NAME_FIELD_NAME].str();
+}
+
+Status V1UserDocumentParser::initializeUserCredentialsFromUserDocument(
+ User* user, const BSONObj& privDoc) const {
+ User::CredentialData credentials;
+ if (privDoc.hasField(AuthorizationManager::PASSWORD_FIELD_NAME)) {
+ credentials.password = privDoc[AuthorizationManager::PASSWORD_FIELD_NAME].String();
+ credentials.isExternal = false;
+ } else if (privDoc.hasField(AuthorizationManager::V1_USER_SOURCE_FIELD_NAME)) {
+ std::string userSource = privDoc[AuthorizationManager::V1_USER_SOURCE_FIELD_NAME].String();
+ if (userSource != "$external") {
return Status(ErrorCodes::UnsupportedFormat,
- "Invalid user document: must have one of \"pwd\" and \"userSource\"");
+ "Cannot extract credentials from user documents without a password "
+ "and with userSource != \"$external\"");
+ } else {
+ credentials.isExternal = true;
}
-
- user->setCredentials(credentials);
- return Status::OK();
+ } else {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "Invalid user document: must have one of \"pwd\" and \"userSource\"");
}
- static void _initializeUserRolesFromV0UserDocument(
- User* user, const BSONObj& privDoc, StringData dbname) {
- bool readOnly = privDoc["readOnly"].trueValue();
- if (dbname == "admin") {
- if (readOnly) {
- user->addRole(RoleName(RoleGraph::BUILTIN_ROLE_V0_ADMIN_READ, "admin"));
- } else {
- user->addRole(RoleName(RoleGraph::BUILTIN_ROLE_V0_ADMIN_READ_WRITE, "admin"));
- }
+ user->setCredentials(credentials);
+ return Status::OK();
+}
+
+static void _initializeUserRolesFromV0UserDocument(User* user,
+ const BSONObj& privDoc,
+ StringData dbname) {
+ bool readOnly = privDoc["readOnly"].trueValue();
+ if (dbname == "admin") {
+ if (readOnly) {
+ user->addRole(RoleName(RoleGraph::BUILTIN_ROLE_V0_ADMIN_READ, "admin"));
} else {
- if (readOnly) {
- user->addRole(RoleName(RoleGraph::BUILTIN_ROLE_V0_READ, dbname));
- } else {
- user->addRole(RoleName(RoleGraph::BUILTIN_ROLE_V0_READ_WRITE, dbname));
- }
+ user->addRole(RoleName(RoleGraph::BUILTIN_ROLE_V0_ADMIN_READ_WRITE, "admin"));
+ }
+ } else {
+ if (readOnly) {
+ user->addRole(RoleName(RoleGraph::BUILTIN_ROLE_V0_READ, dbname));
+ } else {
+ user->addRole(RoleName(RoleGraph::BUILTIN_ROLE_V0_READ_WRITE, dbname));
}
}
+}
- Status _initializeUserRolesFromV1RolesArray(User* user,
- const BSONElement& rolesElement,
- StringData dbname) {
- static const char privilegesTypeMismatchMessage[] =
- "Roles in V1 user documents must be enumerated in an array of strings.";
+Status _initializeUserRolesFromV1RolesArray(User* user,
+ const BSONElement& rolesElement,
+ StringData dbname) {
+ static const char privilegesTypeMismatchMessage[] =
+ "Roles in V1 user documents must be enumerated in an array of strings.";
- if (rolesElement.type() != Array)
- return Status(ErrorCodes::TypeMismatch, privilegesTypeMismatchMessage);
+ if (rolesElement.type() != Array)
+ return Status(ErrorCodes::TypeMismatch, privilegesTypeMismatchMessage);
- for (BSONObjIterator iter(rolesElement.embeddedObject()); iter.more(); iter.next()) {
- BSONElement roleElement = *iter;
- if (roleElement.type() != String)
- return Status(ErrorCodes::TypeMismatch, privilegesTypeMismatchMessage);
+ for (BSONObjIterator iter(rolesElement.embeddedObject()); iter.more(); iter.next()) {
+ BSONElement roleElement = *iter;
+ if (roleElement.type() != String)
+ return Status(ErrorCodes::TypeMismatch, privilegesTypeMismatchMessage);
- user->addRole(RoleName(roleElement.String(), dbname));
- }
- return Status::OK();
+ user->addRole(RoleName(roleElement.String(), dbname));
+ }
+ return Status::OK();
+}
+
+static Status _initializeUserRolesFromV1UserDocument(User* user,
+ const BSONObj& privDoc,
+ StringData dbname) {
+ if (!privDoc[READONLY_FIELD_NAME].eoo()) {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "User documents may not contain both \"readonly\" and "
+ "\"roles\" fields");
}
- static Status _initializeUserRolesFromV1UserDocument(
- User* user, const BSONObj& privDoc, StringData dbname) {
-
- if (!privDoc[READONLY_FIELD_NAME].eoo()) {
- return Status(ErrorCodes::UnsupportedFormat,
- "User documents may not contain both \"readonly\" and "
- "\"roles\" fields");
- }
-
- Status status = _initializeUserRolesFromV1RolesArray(user,
- privDoc[ROLES_FIELD_NAME],
- dbname);
- if (!status.isOK()) {
- return status;
- }
+ Status status = _initializeUserRolesFromV1RolesArray(user, privDoc[ROLES_FIELD_NAME], dbname);
+ if (!status.isOK()) {
+ return status;
+ }
- // If "dbname" is the admin database, handle the otherDBPrivileges field, which
- // grants privileges on databases other than "dbname".
- BSONElement otherDbPrivileges = privDoc[OTHER_DB_ROLES_FIELD_NAME];
- if (dbname == ADMIN_DBNAME) {
- switch (otherDbPrivileges.type()) {
+ // If "dbname" is the admin database, handle the otherDBPrivileges field, which
+ // grants privileges on databases other than "dbname".
+ BSONElement otherDbPrivileges = privDoc[OTHER_DB_ROLES_FIELD_NAME];
+ if (dbname == ADMIN_DBNAME) {
+ switch (otherDbPrivileges.type()) {
case EOO:
break;
case Object: {
- for (BSONObjIterator iter(otherDbPrivileges.embeddedObject());
- iter.more(); iter.next()) {
-
+ for (BSONObjIterator iter(otherDbPrivileges.embeddedObject()); iter.more();
+ iter.next()) {
BSONElement rolesElement = *iter;
- status = _initializeUserRolesFromV1RolesArray(user,
- rolesElement,
- rolesElement.fieldName());
+ status = _initializeUserRolesFromV1RolesArray(
+ user, rolesElement, rolesElement.fieldName());
if (!status.isOK())
return status;
}
@@ -188,359 +181,337 @@ namespace {
default:
return Status(ErrorCodes::TypeMismatch,
"Field \"otherDBRoles\" must be an object, if present.");
- }
- }
- else if (!otherDbPrivileges.eoo()) {
- return Status(ErrorCodes::UnsupportedFormat,
- "Only the admin database may contain a field called \"otherDBRoles\"");
}
+ } else if (!otherDbPrivileges.eoo()) {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "Only the admin database may contain a field called \"otherDBRoles\"");
+ }
- return Status::OK();
+ return Status::OK();
+}
+
+Status V1UserDocumentParser::initializeUserRolesFromUserDocument(User* user,
+ const BSONObj& privDoc,
+ StringData dbname) const {
+ if (!privDoc.hasField("roles")) {
+ _initializeUserRolesFromV0UserDocument(user, privDoc, dbname);
+ } else {
+ return _initializeUserRolesFromV1UserDocument(user, privDoc, dbname);
}
+ return Status::OK();
+}
- Status V1UserDocumentParser::initializeUserRolesFromUserDocument(
- User* user, const BSONObj& privDoc, StringData dbname) const {
- if (!privDoc.hasField("roles")) {
- _initializeUserRolesFromV0UserDocument(user, privDoc, dbname);
- } else {
- return _initializeUserRolesFromV1UserDocument(user, privDoc, dbname);
+
+Status _checkV2RolesArray(const BSONElement& rolesElement) {
+ if (rolesElement.eoo()) {
+ return _badValue("User document needs 'roles' field to be provided", 0);
+ }
+ if (rolesElement.type() != Array) {
+ return _badValue("'roles' field must be an array", 0);
+ }
+ for (BSONObjIterator iter(rolesElement.embeddedObject()); iter.more(); iter.next()) {
+ if ((*iter).type() != Object) {
+ return _badValue("Elements in 'roles' array must objects", 0);
}
- return Status::OK();
+ Status status = V2UserDocumentParser::checkValidRoleObject((*iter).Obj());
+ if (!status.isOK())
+ return status;
+ }
+ return Status::OK();
+}
+
+Status V2UserDocumentParser::checkValidUserDocument(const BSONObj& doc) const {
+ BSONElement userElement = doc[AuthorizationManager::USER_NAME_FIELD_NAME];
+ BSONElement userDBElement = doc[AuthorizationManager::USER_DB_FIELD_NAME];
+ BSONElement credentialsElement = doc[CREDENTIALS_FIELD_NAME];
+ BSONElement rolesElement = doc[ROLES_FIELD_NAME];
+
+ // Validate the "user" element.
+ if (userElement.type() != String)
+ return _badValue("User document needs 'user' field to be a string", 0);
+ if (userElement.valueStringData().empty())
+ return _badValue("User document needs 'user' field to be non-empty", 0);
+
+ // Validate the "db" element
+ if (userDBElement.type() != String || userDBElement.valueStringData().empty()) {
+ return _badValue("User document needs 'db' field to be a non-empty string", 0);
+ }
+ StringData userDBStr = userDBElement.valueStringData();
+ if (!NamespaceString::validDBName(userDBStr) && userDBStr != "$external") {
+ return _badValue(mongoutils::str::stream() << "'" << userDBStr
+ << "' is not a valid value for the db field.",
+ 0);
}
+ // Validate the "credentials" element
+ if (credentialsElement.eoo()) {
+ return _badValue("User document needs 'credentials' object", 0);
+ }
+ if (credentialsElement.type() != Object) {
+ return _badValue("User document needs 'credentials' field to be an object", 0);
+ }
- Status _checkV2RolesArray(const BSONElement& rolesElement) {
- if (rolesElement.eoo()) {
- return _badValue("User document needs 'roles' field to be provided", 0);
- }
- if (rolesElement.type() != Array) {
- return _badValue("'roles' field must be an array", 0);
+ BSONObj credentialsObj = credentialsElement.Obj();
+ if (credentialsObj.isEmpty()) {
+ return _badValue("User document needs 'credentials' field to be a non-empty object", 0);
+ }
+ if (userDBStr == "$external") {
+ BSONElement externalElement = credentialsObj[MONGODB_EXTERNAL_CREDENTIAL_FIELD_NAME];
+ if (externalElement.eoo() || externalElement.type() != Bool || !externalElement.Bool()) {
+ return _badValue(
+ "User documents for users defined on '$external' must have "
+ "'credentials' field set to {external: true}",
+ 0);
}
- for (BSONObjIterator iter(rolesElement.embeddedObject()); iter.more(); iter.next()) {
- if ((*iter).type() != Object) {
- return _badValue("Elements in 'roles' array must objects", 0);
+ } else {
+ BSONElement scramElement = credentialsObj[SCRAM_CREDENTIAL_FIELD_NAME];
+ BSONElement mongoCRElement = credentialsObj[MONGODB_CR_CREDENTIAL_FIELD_NAME];
+
+ if (!mongoCRElement.eoo()) {
+ if (mongoCRElement.type() != String || mongoCRElement.valueStringData().empty()) {
+ return _badValue(
+ "MONGODB-CR credential must to be a non-empty string"
+ ", if present",
+ 0);
+ }
+ } else if (!scramElement.eoo()) {
+ if (scramElement.type() != Object) {
+ return _badValue("SCRAM credential must be an object, if present", 0);
}
- Status status = V2UserDocumentParser::checkValidRoleObject((*iter).Obj());
- if (!status.isOK())
- return status;
+ } else {
+ return _badValue(
+ "User document must provide credentials for all "
+ "non-external users",
+ 0);
}
- return Status::OK();
}
- Status V2UserDocumentParser::checkValidUserDocument(const BSONObj& doc) const {
- BSONElement userElement = doc[AuthorizationManager::USER_NAME_FIELD_NAME];
- BSONElement userDBElement = doc[AuthorizationManager::USER_DB_FIELD_NAME];
- BSONElement credentialsElement = doc[CREDENTIALS_FIELD_NAME];
- BSONElement rolesElement = doc[ROLES_FIELD_NAME];
-
- // Validate the "user" element.
- if (userElement.type() != String)
- return _badValue("User document needs 'user' field to be a string", 0);
- if (userElement.valueStringData().empty())
- return _badValue("User document needs 'user' field to be non-empty", 0);
-
- // Validate the "db" element
- if (userDBElement.type() != String || userDBElement.valueStringData().empty()) {
- return _badValue("User document needs 'db' field to be a non-empty string", 0);
- }
- StringData userDBStr = userDBElement.valueStringData();
- if (!NamespaceString::validDBName(userDBStr) && userDBStr != "$external") {
- return _badValue(mongoutils::str::stream() << "'" << userDBStr <<
- "' is not a valid value for the db field.",
- 0);
- }
+ // Validate the "roles" element.
+ Status status = _checkV2RolesArray(rolesElement);
+ if (!status.isOK())
+ return status;
- // Validate the "credentials" element
- if (credentialsElement.eoo()) {
- return _badValue("User document needs 'credentials' object",
- 0);
- }
- if (credentialsElement.type() != Object) {
- return _badValue("User document needs 'credentials' field to be an object", 0);
- }
+ return Status::OK();
+}
+
+std::string V2UserDocumentParser::extractUserNameFromUserDocument(const BSONObj& doc) const {
+ return doc[AuthorizationManager::USER_NAME_FIELD_NAME].str();
+}
- BSONObj credentialsObj = credentialsElement.Obj();
- if (credentialsObj.isEmpty()) {
- return _badValue("User document needs 'credentials' field to be a non-empty object",
- 0);
+Status V2UserDocumentParser::initializeUserCredentialsFromUserDocument(
+ User* user, const BSONObj& privDoc) const {
+ User::CredentialData credentials;
+ std::string userDB = privDoc[AuthorizationManager::USER_DB_FIELD_NAME].String();
+ BSONElement credentialsElement = privDoc[CREDENTIALS_FIELD_NAME];
+ if (!credentialsElement.eoo()) {
+ if (credentialsElement.type() != Object) {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "'credentials' field in user documents must be an object");
}
- if (userDBStr == "$external") {
- BSONElement externalElement = credentialsObj[MONGODB_EXTERNAL_CREDENTIAL_FIELD_NAME];
- if (externalElement.eoo() || externalElement.type() != Bool ||
- !externalElement.Bool()) {
- return _badValue("User documents for users defined on '$external' must have "
- "'credentials' field set to {external: true}", 0);
- }
- }
- else {
- BSONElement scramElement = credentialsObj[SCRAM_CREDENTIAL_FIELD_NAME];
- BSONElement mongoCRElement = credentialsObj[MONGODB_CR_CREDENTIAL_FIELD_NAME];
-
- if (!mongoCRElement.eoo()) {
- if (mongoCRElement.type() != String || mongoCRElement.valueStringData().empty()) {
- return _badValue("MONGODB-CR credential must to be a non-empty string"
- ", if present", 0);
- }
- }
- else if (!scramElement.eoo()) {
- if (scramElement.type() != Object) {
- return _badValue("SCRAM credential must be an object, if present", 0);
+ if (userDB == "$external") {
+ BSONElement externalCredentialElement =
+ credentialsElement.Obj()[MONGODB_EXTERNAL_CREDENTIAL_FIELD_NAME];
+ if (!externalCredentialElement.eoo()) {
+ if (externalCredentialElement.type() != Bool || !externalCredentialElement.Bool()) {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "'external' field in credentials object must be set to true");
+ } else {
+ credentials.isExternal = true;
}
+ } else {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "User documents defined on '$external' must provide set "
+ "credentials to {external:true}");
}
- else {
- return _badValue("User document must provide credentials for all "
- "non-external users", 0);
+ } else {
+ BSONElement scramElement = credentialsElement.Obj()[SCRAM_CREDENTIAL_FIELD_NAME];
+ BSONElement mongoCRCredentialElement =
+ credentialsElement.Obj()[MONGODB_CR_CREDENTIAL_FIELD_NAME];
+
+ if (scramElement.eoo() && mongoCRCredentialElement.eoo()) {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "User documents must provide credentials for SCRAM-SHA-1 "
+ "or MONGODB-CR authentication");
}
- }
- // Validate the "roles" element.
- Status status = _checkV2RolesArray(rolesElement);
- if (!status.isOK())
- return status;
+ if (!scramElement.eoo()) {
+ // We are asserting rather then returning errors since these
+ // fields should have been prepopulated by the calling code.
+ credentials.scram.iterationCount = scramElement.Obj()["iterationCount"].numberInt();
+ uassert(17501,
+ "Invalid or missing SCRAM iteration count",
+ credentials.scram.iterationCount > 0);
- return Status::OK();
- }
+ credentials.scram.salt = scramElement.Obj()["salt"].str();
+ uassert(17502, "Missing SCRAM salt", !credentials.scram.salt.empty());
- std::string V2UserDocumentParser::extractUserNameFromUserDocument(
- const BSONObj& doc) const {
- return doc[AuthorizationManager::USER_NAME_FIELD_NAME].str();
- }
+ credentials.scram.serverKey = scramElement["serverKey"].str();
+ uassert(17503, "Missing SCRAM serverKey", !credentials.scram.serverKey.empty());
- Status V2UserDocumentParser::initializeUserCredentialsFromUserDocument(
- User* user, const BSONObj& privDoc) const {
- User::CredentialData credentials;
- std::string userDB = privDoc[AuthorizationManager::USER_DB_FIELD_NAME].String();
- BSONElement credentialsElement = privDoc[CREDENTIALS_FIELD_NAME];
- if (!credentialsElement.eoo()) {
- if (credentialsElement.type() != Object) {
- return Status(ErrorCodes::UnsupportedFormat,
- "'credentials' field in user documents must be an object");
+ credentials.scram.storedKey = scramElement["storedKey"].str();
+ uassert(17504, "Missing SCRAM storedKey", !credentials.scram.storedKey.empty());
}
- if (userDB == "$external") {
- BSONElement externalCredentialElement =
- credentialsElement.Obj()[MONGODB_EXTERNAL_CREDENTIAL_FIELD_NAME];
- if (!externalCredentialElement.eoo()) {
- if (externalCredentialElement.type() != Bool ||
- !externalCredentialElement.Bool()) {
- return Status(ErrorCodes::UnsupportedFormat,
- "'external' field in credentials object must be set to true");
- } else {
- credentials.isExternal = true;
- }
- } else {
- return Status(ErrorCodes::UnsupportedFormat,
- "User documents defined on '$external' must provide set "
- "credentials to {external:true}");
- }
- } else {
-
- BSONElement scramElement =
- credentialsElement.Obj()[SCRAM_CREDENTIAL_FIELD_NAME];
- BSONElement mongoCRCredentialElement =
- credentialsElement.Obj()[MONGODB_CR_CREDENTIAL_FIELD_NAME];
-
- if (scramElement.eoo() && mongoCRCredentialElement.eoo()) {
- return Status(ErrorCodes::UnsupportedFormat,
- "User documents must provide credentials for SCRAM-SHA-1 "
- "or MONGODB-CR authentication");
- }
- if (!scramElement.eoo()) {
- // We are asserting rather then returning errors since these
- // fields should have been prepopulated by the calling code.
- credentials.scram.iterationCount =
- scramElement.Obj()["iterationCount"].numberInt();
- uassert(17501, "Invalid or missing SCRAM iteration count",
- credentials.scram.iterationCount > 0);
-
- credentials.scram.salt =
- scramElement.Obj()["salt"].str();
- uassert(17502, "Missing SCRAM salt",
- !credentials.scram.salt.empty());
-
- credentials.scram.serverKey =
- scramElement["serverKey"].str();
- uassert(17503, "Missing SCRAM serverKey",
- !credentials.scram.serverKey.empty());
-
- credentials.scram.storedKey =
- scramElement["storedKey"].str();
- uassert(17504, "Missing SCRAM storedKey",
- !credentials.scram.storedKey.empty());
- }
-
- if (!mongoCRCredentialElement.eoo()) {
- if (mongoCRCredentialElement.type() != String ||
- mongoCRCredentialElement.valueStringData().empty()) {
+ if (!mongoCRCredentialElement.eoo()) {
+ if (mongoCRCredentialElement.type() != String ||
+ mongoCRCredentialElement.valueStringData().empty()) {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "MONGODB-CR credentials must be non-empty strings");
+ } else {
+ credentials.password = mongoCRCredentialElement.String();
+ if (credentials.password.empty()) {
return Status(ErrorCodes::UnsupportedFormat,
- "MONGODB-CR credentials must be non-empty strings");
- } else {
- credentials.password = mongoCRCredentialElement.String();
- if (credentials.password.empty()) {
- return Status(ErrorCodes::UnsupportedFormat,
- "User documents must provide authentication credentials");
- }
+ "User documents must provide authentication credentials");
}
}
- credentials.isExternal = false;
}
- } else {
- return Status(ErrorCodes::UnsupportedFormat,
- "Cannot extract credentials from user documents without a "
- "'credentials' field");
+ credentials.isExternal = false;
}
-
- user->setCredentials(credentials);
- return Status::OK();
+ } else {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "Cannot extract credentials from user documents without a "
+ "'credentials' field");
}
- static Status _extractRoleDocumentElements(
- const BSONObj& roleObject,
- BSONElement* roleNameElement,
- BSONElement* roleSourceElement) {
+ user->setCredentials(credentials);
+ return Status::OK();
+}
- *roleNameElement = roleObject[ROLE_NAME_FIELD_NAME];
- *roleSourceElement = roleObject[ROLE_DB_FIELD_NAME];
-
- if (roleNameElement->type() != String || roleNameElement->valueStringData().empty()) {
- return Status(ErrorCodes::UnsupportedFormat,
- "Role names must be non-empty strings");
- }
- if (roleSourceElement->type() != String || roleSourceElement->valueStringData().empty()) {
- return Status(ErrorCodes::UnsupportedFormat, "Role db must be non-empty strings");
- }
+static Status _extractRoleDocumentElements(const BSONObj& roleObject,
+ BSONElement* roleNameElement,
+ BSONElement* roleSourceElement) {
+ *roleNameElement = roleObject[ROLE_NAME_FIELD_NAME];
+ *roleSourceElement = roleObject[ROLE_DB_FIELD_NAME];
- return Status::OK();
+ if (roleNameElement->type() != String || roleNameElement->valueStringData().empty()) {
+ return Status(ErrorCodes::UnsupportedFormat, "Role names must be non-empty strings");
}
-
- Status V2UserDocumentParser::checkValidRoleObject(const BSONObj& roleObject) {
- BSONElement roleNameElement;
- BSONElement roleSourceElement;
- return _extractRoleDocumentElements(
- roleObject,
- &roleNameElement,
- &roleSourceElement);
+ if (roleSourceElement->type() != String || roleSourceElement->valueStringData().empty()) {
+ return Status(ErrorCodes::UnsupportedFormat, "Role db must be non-empty strings");
}
- Status V2UserDocumentParser::parseRoleName(const BSONObj& roleObject, RoleName* result) {
- BSONElement roleNameElement;
- BSONElement roleSourceElement;
- Status status = _extractRoleDocumentElements(
- roleObject,
- &roleNameElement,
- &roleSourceElement);
- if (!status.isOK())
- return status;
- *result = RoleName(roleNameElement.str(), roleSourceElement.str());
- return status;
- }
+ return Status::OK();
+}
- Status V2UserDocumentParser::parseRoleVector(const BSONArray& rolesArray,
- std::vector<RoleName>* result) {
- std::vector<RoleName> roles;
- for (BSONObjIterator it(rolesArray); it.more(); it.next()) {
- if ((*it).type() != Object) {
- return Status(ErrorCodes::TypeMismatch, "Roles must be objects.");
- }
- RoleName role;
- Status status = parseRoleName((*it).Obj(), &role);
- if (!status.isOK())
- return status;
- roles.push_back(role);
+Status V2UserDocumentParser::checkValidRoleObject(const BSONObj& roleObject) {
+ BSONElement roleNameElement;
+ BSONElement roleSourceElement;
+ return _extractRoleDocumentElements(roleObject, &roleNameElement, &roleSourceElement);
+}
+
+Status V2UserDocumentParser::parseRoleName(const BSONObj& roleObject, RoleName* result) {
+ BSONElement roleNameElement;
+ BSONElement roleSourceElement;
+ Status status = _extractRoleDocumentElements(roleObject, &roleNameElement, &roleSourceElement);
+ if (!status.isOK())
+ return status;
+ *result = RoleName(roleNameElement.str(), roleSourceElement.str());
+ return status;
+}
+
+Status V2UserDocumentParser::parseRoleVector(const BSONArray& rolesArray,
+ std::vector<RoleName>* result) {
+ std::vector<RoleName> roles;
+ for (BSONObjIterator it(rolesArray); it.more(); it.next()) {
+ if ((*it).type() != Object) {
+ return Status(ErrorCodes::TypeMismatch, "Roles must be objects.");
}
- std::swap(*result, roles);
- return Status::OK();
+ RoleName role;
+ Status status = parseRoleName((*it).Obj(), &role);
+ if (!status.isOK())
+ return status;
+ roles.push_back(role);
}
+ std::swap(*result, roles);
+ return Status::OK();
+}
- Status V2UserDocumentParser::initializeUserRolesFromUserDocument(
- const BSONObj& privDoc, User* user) const {
+Status V2UserDocumentParser::initializeUserRolesFromUserDocument(const BSONObj& privDoc,
+ User* user) const {
+ BSONElement rolesElement = privDoc[ROLES_FIELD_NAME];
- BSONElement rolesElement = privDoc[ROLES_FIELD_NAME];
+ if (rolesElement.type() != Array) {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "User document needs 'roles' field to be an array");
+ }
- if (rolesElement.type() != Array) {
+ std::vector<RoleName> roles;
+ for (BSONObjIterator it(rolesElement.Obj()); it.more(); it.next()) {
+ if ((*it).type() != Object) {
return Status(ErrorCodes::UnsupportedFormat,
- "User document needs 'roles' field to be an array");
+ "User document needs values in 'roles' array to be a sub-documents");
}
+ BSONObj roleObject = (*it).Obj();
- std::vector<RoleName> roles;
- for (BSONObjIterator it(rolesElement.Obj()); it.more(); it.next()) {
- if ((*it).type() != Object) {
- return Status(ErrorCodes::UnsupportedFormat,
- "User document needs values in 'roles' array to be a sub-documents");
- }
- BSONObj roleObject = (*it).Obj();
-
- RoleName role;
- Status status = parseRoleName(roleObject, &role);
- if (!status.isOK()) {
- return status;
- }
- roles.push_back(role);
+ RoleName role;
+ Status status = parseRoleName(roleObject, &role);
+ if (!status.isOK()) {
+ return status;
}
- user->setRoles(makeRoleNameIteratorForContainer(roles));
- return Status::OK();
+ roles.push_back(role);
}
+ user->setRoles(makeRoleNameIteratorForContainer(roles));
+ return Status::OK();
+}
- Status V2UserDocumentParser::initializeUserIndirectRolesFromUserDocument(
- const BSONObj& privDoc, User* user) const {
+Status V2UserDocumentParser::initializeUserIndirectRolesFromUserDocument(const BSONObj& privDoc,
+ User* user) const {
+ BSONElement indirectRolesElement = privDoc[INHERITED_ROLES_FIELD_NAME];
- BSONElement indirectRolesElement = privDoc[INHERITED_ROLES_FIELD_NAME];
+ if (indirectRolesElement.type() != Array) {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "User document needs 'inheritedRoles' field to be an array");
+ }
- if (indirectRolesElement.type() != Array) {
+ std::vector<RoleName> indirectRoles;
+ for (BSONObjIterator it(indirectRolesElement.Obj()); it.more(); it.next()) {
+ if ((*it).type() != Object) {
return Status(ErrorCodes::UnsupportedFormat,
- "User document needs 'inheritedRoles' field to be an array");
+ "User document needs values in 'inheritedRoles'"
+ " array to be a sub-documents");
}
+ BSONObj indirectRoleObject = (*it).Obj();
- std::vector<RoleName> indirectRoles;
- for (BSONObjIterator it(indirectRolesElement.Obj()); it.more(); it.next()) {
- if ((*it).type() != Object) {
- return Status(ErrorCodes::UnsupportedFormat,
- "User document needs values in 'inheritedRoles'"
- " array to be a sub-documents");
- }
- BSONObj indirectRoleObject = (*it).Obj();
-
- RoleName indirectRole;
- Status status = parseRoleName(indirectRoleObject, &indirectRole);
- if (!status.isOK()) {
- return status;
- }
- indirectRoles.push_back(indirectRole);
+ RoleName indirectRole;
+ Status status = parseRoleName(indirectRoleObject, &indirectRole);
+ if (!status.isOK()) {
+ return status;
}
- user->setIndirectRoles(makeRoleNameIteratorForContainer(indirectRoles));
+ indirectRoles.push_back(indirectRole);
+ }
+ user->setIndirectRoles(makeRoleNameIteratorForContainer(indirectRoles));
+ return Status::OK();
+}
+
+Status V2UserDocumentParser::initializeUserPrivilegesFromUserDocument(const BSONObj& doc,
+ User* user) const {
+ BSONElement privilegesElement = doc[PRIVILEGES_FIELD_NAME];
+ if (privilegesElement.eoo())
return Status::OK();
+ if (privilegesElement.type() != Array) {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "User document 'inheritedPrivileges' element must be Array if present.");
}
-
- Status V2UserDocumentParser::initializeUserPrivilegesFromUserDocument(const BSONObj& doc,
- User* user) const {
- BSONElement privilegesElement = doc[PRIVILEGES_FIELD_NAME];
- if (privilegesElement.eoo())
- return Status::OK();
- if (privilegesElement.type() != Array) {
- return Status(ErrorCodes::UnsupportedFormat,
- "User document 'inheritedPrivileges' element must be Array if present.");
+ PrivilegeVector privileges;
+ std::string errmsg;
+ for (BSONObjIterator it(privilegesElement.Obj()); it.more(); it.next()) {
+ if ((*it).type() != Object) {
+ warning() << "Wrong type of element in inheritedPrivileges array for "
+ << user->getName() << ": " << *it;
+ continue;
}
- PrivilegeVector privileges;
- std::string errmsg;
- for (BSONObjIterator it(privilegesElement.Obj()); it.more(); it.next()) {
- if ((*it).type() != Object) {
- warning() << "Wrong type of element in inheritedPrivileges array for " <<
- user->getName() << ": " << *it;
- continue;
- }
- Privilege privilege;
- ParsedPrivilege pp;
- if (!pp.parseBSON((*it).Obj(), &errmsg) ||
- !ParsedPrivilege::parsedPrivilegeToPrivilege(pp, &privilege, &errmsg)) {
-
- warning() << "Could not parse privilege element in user document for " <<
- user->getName() << ": " << errmsg;
- continue;
- }
- privileges.push_back(privilege);
+ Privilege privilege;
+ ParsedPrivilege pp;
+ if (!pp.parseBSON((*it).Obj(), &errmsg) ||
+ !ParsedPrivilege::parsedPrivilegeToPrivilege(pp, &privilege, &errmsg)) {
+ warning() << "Could not parse privilege element in user document for "
+ << user->getName() << ": " << errmsg;
+ continue;
}
- user->setPrivileges(privileges);
- return Status::OK();
+ privileges.push_back(privilege);
}
+ user->setPrivileges(privileges);
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/auth/user_document_parser.h b/src/mongo/db/auth/user_document_parser.h
index 28b93ee5673..502a3f349ea 100644
--- a/src/mongo/db/auth/user_document_parser.h
+++ b/src/mongo/db/auth/user_document_parser.h
@@ -36,41 +36,43 @@
namespace mongo {
- class V1UserDocumentParser {
- MONGO_DISALLOW_COPYING(V1UserDocumentParser);
- public:
- V1UserDocumentParser() {}
- std::string extractUserNameFromUserDocument(const BSONObj& doc) const;
+class V1UserDocumentParser {
+ MONGO_DISALLOW_COPYING(V1UserDocumentParser);
- Status initializeUserCredentialsFromUserDocument(User* user,
- const BSONObj& privDoc) const;
+public:
+ V1UserDocumentParser() {}
+ std::string extractUserNameFromUserDocument(const BSONObj& doc) const;
- Status initializeUserRolesFromUserDocument(
- User* user, const BSONObj& privDoc, StringData dbname) const;
- };
+ Status initializeUserCredentialsFromUserDocument(User* user, const BSONObj& privDoc) const;
- class V2UserDocumentParser {
- MONGO_DISALLOW_COPYING(V2UserDocumentParser);
- public:
- V2UserDocumentParser() {}
- Status checkValidUserDocument(const BSONObj& doc) const;
+ Status initializeUserRolesFromUserDocument(User* user,
+ const BSONObj& privDoc,
+ StringData dbname) const;
+};
- /**
- * Returns Status::OK() iff the given BSONObj describes a valid element from a roles array.
- */
- static Status checkValidRoleObject(const BSONObj& roleObject);
+class V2UserDocumentParser {
+ MONGO_DISALLOW_COPYING(V2UserDocumentParser);
- static Status parseRoleName(const BSONObj& roleObject, RoleName* result);
+public:
+ V2UserDocumentParser() {}
+ Status checkValidUserDocument(const BSONObj& doc) const;
- static Status parseRoleVector(const BSONArray& rolesArray, std::vector<RoleName>* result);
+ /**
+ * Returns Status::OK() iff the given BSONObj describes a valid element from a roles array.
+ */
+ static Status checkValidRoleObject(const BSONObj& roleObject);
- std::string extractUserNameFromUserDocument(const BSONObj& doc) const;
+ static Status parseRoleName(const BSONObj& roleObject, RoleName* result);
- Status initializeUserCredentialsFromUserDocument(User* user, const BSONObj& privDoc) const;
+ static Status parseRoleVector(const BSONArray& rolesArray, std::vector<RoleName>* result);
- Status initializeUserRolesFromUserDocument(const BSONObj& doc, User* user) const;
- Status initializeUserIndirectRolesFromUserDocument(const BSONObj& doc, User* user) const;
- Status initializeUserPrivilegesFromUserDocument(const BSONObj& doc, User* user) const;
- };
+ std::string extractUserNameFromUserDocument(const BSONObj& doc) const;
-} // namespace mongo
+ Status initializeUserCredentialsFromUserDocument(User* user, const BSONObj& privDoc) const;
+
+ Status initializeUserRolesFromUserDocument(const BSONObj& doc, User* user) const;
+ Status initializeUserIndirectRolesFromUserDocument(const BSONObj& doc, User* user) const;
+ Status initializeUserPrivilegesFromUserDocument(const BSONObj& doc, User* user) const;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/auth/user_document_parser_test.cpp b/src/mongo/db/auth/user_document_parser_test.cpp
index d2dff197b12..ae6c566d109 100644
--- a/src/mongo/db/auth/user_document_parser_test.cpp
+++ b/src/mongo/db/auth/user_document_parser_test.cpp
@@ -44,360 +44,450 @@
namespace mongo {
namespace {
- using std::unique_ptr;
-
- class V1UserDocumentParsing : public ::mongo::unittest::Test {
- public:
- V1UserDocumentParsing() {}
-
- unique_ptr<User> user;
- unique_ptr<User> adminUser;
- V1UserDocumentParser v1parser;
-
- void setUp() {
- resetUsers();
- }
-
- void resetUsers() {
- user.reset(new User(UserName("spencer", "test")));
- adminUser.reset(new User(UserName("admin", "admin")));
- }
- };
-
- TEST_F(V1UserDocumentParsing, testParsingV0UserDocuments) {
- BSONObj readWrite = BSON("user" << "spencer" << "pwd" << "passwordHash");
- BSONObj readOnly = BSON("user" << "spencer" << "pwd" << "passwordHash" <<
- "readOnly" << true);
- BSONObj readWriteAdmin = BSON("user" << "admin" << "pwd" << "passwordHash");
- BSONObj readOnlyAdmin = BSON("user" << "admin" << "pwd" << "passwordHash" <<
- "readOnly" << true);
-
- ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(
- user.get(), readOnly, "test"));
- RoleNameIterator roles = user->getRoles();
- ASSERT_EQUALS(RoleName("read", "test"), roles.next());
- ASSERT_FALSE(roles.more());
+using std::unique_ptr;
- resetUsers();
- ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(
- user.get(), readWrite, "test"));
- roles = user->getRoles();
- ASSERT_EQUALS(RoleName("dbOwner", "test"), roles.next());
- ASSERT_FALSE(roles.more());
+class V1UserDocumentParsing : public ::mongo::unittest::Test {
+public:
+ V1UserDocumentParsing() {}
- resetUsers();
- ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(
- adminUser.get(), readOnlyAdmin, "admin"));
- roles = adminUser->getRoles();
- ASSERT_EQUALS(RoleName("readAnyDatabase", "admin"), roles.next());
- ASSERT_FALSE(roles.more());
+ unique_ptr<User> user;
+ unique_ptr<User> adminUser;
+ V1UserDocumentParser v1parser;
+ void setUp() {
resetUsers();
- ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(
- adminUser.get(), readWriteAdmin, "admin"));
- roles = adminUser->getRoles();
- ASSERT_EQUALS(RoleName("root", "admin"), roles.next());
- ASSERT_FALSE(roles.more());
- }
-
- TEST_F(V1UserDocumentParsing, VerifyRolesFieldMustBeAnArray) {
- ASSERT_NOT_OK(v1parser.initializeUserRolesFromUserDocument(
- user.get(),
- BSON("user" << "spencer" << "pwd" << "" << "roles" << "read"),
- "test"));
- ASSERT_FALSE(user->getRoles().more());
- }
-
- TEST_F(V1UserDocumentParsing, VerifySemanticallyInvalidRolesStillParse) {
- ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(
- user.get(),
- BSON("user" << "spencer" <<
- "pwd" << "" <<
- "roles" << BSON_ARRAY("read" << "frim")),
- "test"));
- RoleNameIterator roles = user->getRoles();
- RoleName role = roles.next();
- if (role == RoleName("read", "test")) {
- ASSERT_EQUALS(RoleName("frim", "test"), roles.next());
- } else {
- ASSERT_EQUALS(RoleName("frim", "test"), role);
- ASSERT_EQUALS(RoleName("read", "test"), roles.next());
- }
- ASSERT_FALSE(roles.more());
- }
-
- TEST_F(V1UserDocumentParsing, VerifyOtherDBRolesMustBeAnObjectOfArraysOfStrings) {
- ASSERT_NOT_OK(v1parser.initializeUserRolesFromUserDocument(
- adminUser.get(),
- BSON("user" << "admin" <<
- "pwd" << "" <<
- "roles" << BSON_ARRAY("read") <<
- "otherDBRoles" << BSON_ARRAY("read")),
- "admin"));
-
- ASSERT_NOT_OK(v1parser.initializeUserRolesFromUserDocument(
- adminUser.get(),
- BSON("user" << "admin" <<
- "pwd" << "" <<
- "roles" << BSON_ARRAY("read") <<
- "otherDBRoles" << BSON("test2" << "read")),
- "admin"));
}
- TEST_F(V1UserDocumentParsing, VerifyCannotGrantPrivilegesOnOtherDatabasesNormally) {
- // Cannot grant roles on other databases, except from admin database.
- ASSERT_NOT_OK(v1parser.initializeUserRolesFromUserDocument(
- user.get(),
- BSON("user" << "spencer" <<
- "pwd" << "" <<
- "roles" << BSONArrayBuilder().arr() <<
- "otherDBRoles" << BSON("test2" << BSON_ARRAY("read"))),
- "test"));
- ASSERT_FALSE(user->getRoles().more());
+ void resetUsers() {
+ user.reset(new User(UserName("spencer", "test")));
+ adminUser.reset(new User(UserName("admin", "admin")));
}
-
- TEST_F(V1UserDocumentParsing, GrantUserAdminOnTestViaAdmin) {
- // Grant userAdmin on test via admin.
- ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(
- adminUser.get(),
- BSON("user" << "admin" <<
- "pwd" << "" <<
- "roles" << BSONArrayBuilder().arr() <<
- "otherDBRoles" << BSON("test" << BSON_ARRAY("userAdmin"))),
- "admin"));
- RoleNameIterator roles = adminUser->getRoles();
- ASSERT_EQUALS(RoleName("userAdmin", "test"), roles.next());
- ASSERT_FALSE(roles.more());
- }
-
- TEST_F(V1UserDocumentParsing, MixedV0V1UserDocumentsAreInvalid) {
- // Try to mix fields from V0 and V1 user documents and make sure it fails.
- ASSERT_NOT_OK(v1parser.initializeUserRolesFromUserDocument(
- user.get(),
- BSON("user" << "spencer" <<
- "pwd" << "passwordHash" <<
- "readOnly" << false <<
- "roles" << BSON_ARRAY("read")),
- "test"));
- ASSERT_FALSE(user->getRoles().more());
- }
-
- class V2UserDocumentParsing : public ::mongo::unittest::Test {
- public:
- V2UserDocumentParsing() {}
-
- unique_ptr<User> user;
- unique_ptr<User> adminUser;
- V2UserDocumentParser v2parser;
-
- void setUp() {
- user.reset(new User(UserName("spencer", "test")));
- adminUser.reset(new User(UserName("admin", "admin")));
- }
- };
-
-
- TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
- BSONArray emptyArray = BSONArrayBuilder().arr();
-
- // V1 documents don't work
- ASSERT_NOT_OK(v2parser.checkValidUserDocument(
- BSON("user" << "spencer" << "pwd" << "a" <<
- "roles" << BSON_ARRAY("read"))));
-
- // Need name field
- ASSERT_NOT_OK(v2parser.checkValidUserDocument(
- BSON("db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << emptyArray)));
-
- // Need source field
- ASSERT_NOT_OK(v2parser.checkValidUserDocument(
- BSON("user" << "spencer" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << emptyArray)));
-
- // Need credentials field
- ASSERT_NOT_OK(v2parser.checkValidUserDocument(
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "roles" << emptyArray)));
-
- // Need roles field
- ASSERT_NOT_OK(v2parser.checkValidUserDocument(
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a"))));
-
- // Empty roles arrays are OK
- ASSERT_OK(v2parser.checkValidUserDocument(
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << emptyArray)));
-
- // Need credentials of {external: true} if user's db is $external
- ASSERT_OK(v2parser.checkValidUserDocument(
- BSON("user" << "spencer" <<
- "db" << "$external" <<
- "credentials" << BSON("external" << true) <<
- "roles" << emptyArray)));
-
- // Roles must be objects
- ASSERT_NOT_OK(v2parser.checkValidUserDocument(
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY("read"))));
-
- // Role needs name
- ASSERT_NOT_OK(v2parser.checkValidUserDocument(
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("db" << "dbA")))));
-
- // Role needs source
- ASSERT_NOT_OK(v2parser.checkValidUserDocument(
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "roleA")))));
-
-
- // Basic valid user document
- ASSERT_OK(v2parser.checkValidUserDocument(
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "roleA" <<
- "db" << "dbA")))));
-
- // Multiple roles OK
- ASSERT_OK(v2parser.checkValidUserDocument(
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "roles" << BSON_ARRAY(BSON("role" << "roleA" <<
- "db" << "dbA") <<
- BSON("role" << "roleB" <<
- "db" << "dbB")))));
-
- // Optional extraData field OK
- ASSERT_OK(v2parser.checkValidUserDocument(
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a") <<
- "extraData" << BSON("foo" << "bar") <<
- "roles" << BSON_ARRAY(BSON("role" << "roleA" <<
- "db" << "dbA")))));
+};
+
+TEST_F(V1UserDocumentParsing, testParsingV0UserDocuments) {
+ BSONObj readWrite = BSON("user"
+ << "spencer"
+ << "pwd"
+ << "passwordHash");
+ BSONObj readOnly = BSON("user"
+ << "spencer"
+ << "pwd"
+ << "passwordHash"
+ << "readOnly" << true);
+ BSONObj readWriteAdmin = BSON("user"
+ << "admin"
+ << "pwd"
+ << "passwordHash");
+ BSONObj readOnlyAdmin = BSON("user"
+ << "admin"
+ << "pwd"
+ << "passwordHash"
+ << "readOnly" << true);
+
+ ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(user.get(), readOnly, "test"));
+ RoleNameIterator roles = user->getRoles();
+ ASSERT_EQUALS(RoleName("read", "test"), roles.next());
+ ASSERT_FALSE(roles.more());
+
+ resetUsers();
+ ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(user.get(), readWrite, "test"));
+ roles = user->getRoles();
+ ASSERT_EQUALS(RoleName("dbOwner", "test"), roles.next());
+ ASSERT_FALSE(roles.more());
+
+ resetUsers();
+ ASSERT_OK(
+ v1parser.initializeUserRolesFromUserDocument(adminUser.get(), readOnlyAdmin, "admin"));
+ roles = adminUser->getRoles();
+ ASSERT_EQUALS(RoleName("readAnyDatabase", "admin"), roles.next());
+ ASSERT_FALSE(roles.more());
+
+ resetUsers();
+ ASSERT_OK(
+ v1parser.initializeUserRolesFromUserDocument(adminUser.get(), readWriteAdmin, "admin"));
+ roles = adminUser->getRoles();
+ ASSERT_EQUALS(RoleName("root", "admin"), roles.next());
+ ASSERT_FALSE(roles.more());
+}
+
+TEST_F(V1UserDocumentParsing, VerifyRolesFieldMustBeAnArray) {
+ ASSERT_NOT_OK(v1parser.initializeUserRolesFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "pwd"
+ << ""
+ << "roles"
+ << "read"),
+ "test"));
+ ASSERT_FALSE(user->getRoles().more());
+}
+
+TEST_F(V1UserDocumentParsing, VerifySemanticallyInvalidRolesStillParse) {
+ ASSERT_OK(
+ v1parser.initializeUserRolesFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "pwd"
+ << ""
+ << "roles" << BSON_ARRAY("read"
+ << "frim")),
+ "test"));
+ RoleNameIterator roles = user->getRoles();
+ RoleName role = roles.next();
+ if (role == RoleName("read", "test")) {
+ ASSERT_EQUALS(RoleName("frim", "test"), roles.next());
+ } else {
+ ASSERT_EQUALS(RoleName("frim", "test"), role);
+ ASSERT_EQUALS(RoleName("read", "test"), roles.next());
}
-
- TEST_F(V2UserDocumentParsing, V2CredentialExtraction) {
- // Old "pwd" field not valid
- ASSERT_NOT_OK(v2parser.initializeUserCredentialsFromUserDocument(
- user.get(),
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "pwd" << "")));
-
- // Credentials must be provided
- ASSERT_NOT_OK(v2parser.initializeUserCredentialsFromUserDocument(
- user.get(),
- BSON("user" << "spencer" <<
- "db" << "test")));
-
- // Credentials must be object
- ASSERT_NOT_OK(v2parser.initializeUserCredentialsFromUserDocument(
- user.get(),
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << "a")));
-
- // Must specify credentials for MONGODB-CR
- ASSERT_NOT_OK(v2parser.initializeUserCredentialsFromUserDocument(
- user.get(),
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("foo" << "bar"))));
-
- // Make sure extracting valid credentials works
- ASSERT_OK(v2parser.initializeUserCredentialsFromUserDocument(
- user.get(),
- BSON("user" << "spencer" <<
- "db" << "test" <<
- "credentials" << BSON("MONGODB-CR" << "a"))));
- ASSERT(user->getCredentials().password == "a");
- ASSERT(!user->getCredentials().isExternal);
-
- // Credentials are {external:true if users's db is $external
- ASSERT_OK(v2parser.initializeUserCredentialsFromUserDocument(
- user.get(),
- BSON("user" << "spencer" <<
- "db" << "$external" <<
- "credentials" << BSON("external" << true))));
- ASSERT(user->getCredentials().password.empty());
- ASSERT(user->getCredentials().isExternal);
-
+ ASSERT_FALSE(roles.more());
+}
+
+TEST_F(V1UserDocumentParsing, VerifyOtherDBRolesMustBeAnObjectOfArraysOfStrings) {
+ ASSERT_NOT_OK(
+ v1parser.initializeUserRolesFromUserDocument(adminUser.get(),
+ BSON("user"
+ << "admin"
+ << "pwd"
+ << ""
+ << "roles" << BSON_ARRAY("read")
+ << "otherDBRoles" << BSON_ARRAY("read")),
+ "admin"));
+
+ ASSERT_NOT_OK(
+ v1parser.initializeUserRolesFromUserDocument(adminUser.get(),
+ BSON("user"
+ << "admin"
+ << "pwd"
+ << ""
+ << "roles" << BSON_ARRAY("read")
+ << "otherDBRoles" << BSON("test2"
+ << "read")),
+ "admin"));
+}
+
+TEST_F(V1UserDocumentParsing, VerifyCannotGrantPrivilegesOnOtherDatabasesNormally) {
+ // Cannot grant roles on other databases, except from admin database.
+ ASSERT_NOT_OK(
+ v1parser.initializeUserRolesFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "pwd"
+ << ""
+ << "roles" << BSONArrayBuilder().arr()
+ << "otherDBRoles"
+ << BSON("test2" << BSON_ARRAY("read"))),
+ "test"));
+ ASSERT_FALSE(user->getRoles().more());
+}
+
+TEST_F(V1UserDocumentParsing, GrantUserAdminOnTestViaAdmin) {
+ // Grant userAdmin on test via admin.
+ ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(
+ adminUser.get(),
+ BSON("user"
+ << "admin"
+ << "pwd"
+ << ""
+ << "roles" << BSONArrayBuilder().arr() << "otherDBRoles"
+ << BSON("test" << BSON_ARRAY("userAdmin"))),
+ "admin"));
+ RoleNameIterator roles = adminUser->getRoles();
+ ASSERT_EQUALS(RoleName("userAdmin", "test"), roles.next());
+ ASSERT_FALSE(roles.more());
+}
+
+TEST_F(V1UserDocumentParsing, MixedV0V1UserDocumentsAreInvalid) {
+ // Try to mix fields from V0 and V1 user documents and make sure it fails.
+ ASSERT_NOT_OK(
+ v1parser.initializeUserRolesFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "pwd"
+ << "passwordHash"
+ << "readOnly" << false << "roles"
+ << BSON_ARRAY("read")),
+ "test"));
+ ASSERT_FALSE(user->getRoles().more());
+}
+
+class V2UserDocumentParsing : public ::mongo::unittest::Test {
+public:
+ V2UserDocumentParsing() {}
+
+ unique_ptr<User> user;
+ unique_ptr<User> adminUser;
+ V2UserDocumentParser v2parser;
+
+ void setUp() {
+ user.reset(new User(UserName("spencer", "test")));
+ adminUser.reset(new User(UserName("admin", "admin")));
}
-
- TEST_F(V2UserDocumentParsing, V2RoleExtraction) {
- // "roles" field must be provided
- ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(
- BSON("user" << "spencer"),
- user.get()));
-
- // V1-style roles arrays no longer work
- ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(
- BSON("user" << "spencer" <<
- "roles" << BSON_ARRAY("read")),
- user.get()));
-
- // Roles must have "db" field
- ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(
- BSON("user" << "spencer" <<
- "roles" << BSON_ARRAY(BSONObj())),
- user.get()));
-
- ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(
- BSON("user" << "spencer" <<
- "roles" << BSON_ARRAY(BSON("role" << "roleA"))),
- user.get()));
-
- ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(
- BSON("user" << "spencer" <<
- "roles" << BSON_ARRAY(BSON("user" << "roleA" <<
- "db" << "dbA"))),
- user.get()));
-
- // Valid role names are extracted successfully
- ASSERT_OK(v2parser.initializeUserRolesFromUserDocument(
- BSON("user" << "spencer" <<
- "roles" << BSON_ARRAY(BSON("role" << "roleA" <<
- "db" << "dbA"))),
- user.get()));
- RoleNameIterator roles = user->getRoles();
+};
+
+
+TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
+ BSONArray emptyArray = BSONArrayBuilder().arr();
+
+ // V1 documents don't work
+ ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
+ << "spencer"
+ << "pwd"
+ << "a"
+ << "roles" << BSON_ARRAY("read"))));
+
+ // Need name field
+ ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << emptyArray)));
+
+ // Need source field
+ ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
+ << "spencer"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << emptyArray)));
+
+ // Need credentials field
+ ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "roles" << emptyArray)));
+
+ // Need roles field
+ ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a"))));
+
+ // Empty roles arrays are OK
+ ASSERT_OK(v2parser.checkValidUserDocument(BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << emptyArray)));
+
+ // Need credentials of {external: true} if user's db is $external
+ ASSERT_OK(v2parser.checkValidUserDocument(BSON("user"
+ << "spencer"
+ << "db"
+ << "$external"
+ << "credentials" << BSON("external" << true)
+ << "roles" << emptyArray)));
+
+ // Roles must be objects
+ ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY("read"))));
+
+ // Role needs name
+ ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("db"
+ << "dbA")))));
+
+ // Role needs source
+ ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "roleA")))));
+
+
+ // Basic valid user document
+ ASSERT_OK(v2parser.checkValidUserDocument(BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "roleA"
+ << "db"
+ << "dbA")))));
+
+ // Multiple roles OK
+ ASSERT_OK(v2parser.checkValidUserDocument(BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "roleA"
+ << "db"
+ << "dbA")
+ << BSON("role"
+ << "roleB"
+ << "db"
+ << "dbB")))));
+
+ // Optional extraData field OK
+ ASSERT_OK(v2parser.checkValidUserDocument(BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials" << BSON("MONGODB-CR"
+ << "a") << "extraData"
+ << BSON("foo"
+ << "bar") << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "roleA"
+ << "db"
+ << "dbA")))));
+}
+
+TEST_F(V2UserDocumentParsing, V2CredentialExtraction) {
+ // Old "pwd" field not valid
+ ASSERT_NOT_OK(v2parser.initializeUserCredentialsFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "pwd"
+ << "")));
+
+ // Credentials must be provided
+ ASSERT_NOT_OK(v2parser.initializeUserCredentialsFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test")));
+
+ // Credentials must be object
+ ASSERT_NOT_OK(v2parser.initializeUserCredentialsFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials"
+ << "a")));
+
+ // Must specify credentials for MONGODB-CR
+ ASSERT_NOT_OK(v2parser.initializeUserCredentialsFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials"
+ << BSON("foo"
+ << "bar"))));
+
+ // Make sure extracting valid credentials works
+ ASSERT_OK(v2parser.initializeUserCredentialsFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "test"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a"))));
+ ASSERT(user->getCredentials().password == "a");
+ ASSERT(!user->getCredentials().isExternal);
+
+ // Credentials are {external:true if users's db is $external
+ ASSERT_OK(
+ v2parser.initializeUserCredentialsFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "db"
+ << "$external"
+ << "credentials"
+ << BSON("external" << true))));
+ ASSERT(user->getCredentials().password.empty());
+ ASSERT(user->getCredentials().isExternal);
+}
+
+TEST_F(V2UserDocumentParsing, V2RoleExtraction) {
+ // "roles" field must be provided
+ ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"),
+ user.get()));
+
+ // V1-style roles arrays no longer work
+ ASSERT_NOT_OK(
+ v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles" << BSON_ARRAY("read")),
+ user.get()));
+
+ // Roles must have "db" field
+ ASSERT_NOT_OK(
+ v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles" << BSON_ARRAY(BSONObj())),
+ user.get()));
+
+ ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles" << BSON_ARRAY(BSON(
+ "role"
+ << "roleA"))),
+ user.get()));
+
+ ASSERT_NOT_OK(
+ v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles" << BSON_ARRAY(BSON("user"
+ << "roleA"
+ << "db"
+ << "dbA"))),
+ user.get()));
+
+ // Valid role names are extracted successfully
+ ASSERT_OK(
+ v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles" << BSON_ARRAY(BSON("role"
+ << "roleA"
+ << "db"
+ << "dbA"))),
+ user.get()));
+ RoleNameIterator roles = user->getRoles();
+ ASSERT_EQUALS(RoleName("roleA", "dbA"), roles.next());
+ ASSERT_FALSE(roles.more());
+
+ // Multiple roles OK
+ ASSERT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "roleA"
+ << "db"
+ << "dbA")
+ << BSON("role"
+ << "roleB"
+ << "db"
+ << "dbB"))),
+ user.get()));
+ roles = user->getRoles();
+ RoleName role = roles.next();
+ if (role == RoleName("roleA", "dbA")) {
+ ASSERT_EQUALS(RoleName("roleB", "dbB"), roles.next());
+ } else {
+ ASSERT_EQUALS(RoleName("roleB", "dbB"), role);
ASSERT_EQUALS(RoleName("roleA", "dbA"), roles.next());
- ASSERT_FALSE(roles.more());
-
- // Multiple roles OK
- ASSERT_OK(v2parser.initializeUserRolesFromUserDocument(
- BSON("user" << "spencer" <<
- "roles" << BSON_ARRAY(BSON("role" << "roleA" <<
- "db" << "dbA") <<
- BSON("role" << "roleB" <<
- "db" << "dbB"))),
- user.get()));
- roles = user->getRoles();
- RoleName role = roles.next();
- if (role == RoleName("roleA", "dbA")) {
- ASSERT_EQUALS(RoleName("roleB", "dbB"), roles.next());
- } else {
- ASSERT_EQUALS(RoleName("roleB", "dbB"), role);
- ASSERT_EQUALS(RoleName("roleA", "dbA"), roles.next());
- }
- ASSERT_FALSE(roles.more());
}
+ ASSERT_FALSE(roles.more());
+}
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/auth/user_management_commands_parser.cpp b/src/mongo/db/auth/user_management_commands_parser.cpp
index eecef9d9301..82130e2a530 100644
--- a/src/mongo/db/auth/user_management_commands_parser.cpp
+++ b/src/mongo/db/auth/user_management_commands_parser.cpp
@@ -47,652 +47,620 @@
namespace mongo {
namespace auth {
- using std::vector;
-
- /**
- * Writes into *writeConcern a BSONObj describing the parameters to getLastError to use for
- * the write confirmation.
- */
- Status _extractWriteConcern(const BSONObj& cmdObj, BSONObj* writeConcern) {
- BSONElement writeConcernElement;
- Status status = bsonExtractTypedField(cmdObj, "writeConcern", Object, &writeConcernElement);
- if (!status.isOK()) {
- if (status.code() == ErrorCodes::NoSuchKey) {
- *writeConcern = BSONObj();
- return Status::OK();
- }
- return status;
- }
- *writeConcern = writeConcernElement.Obj().getOwned();;
- return Status::OK();
- }
-
- Status _checkNoExtraFields(const BSONObj& cmdObj,
- StringData cmdName,
- const unordered_set<std::string>& validFieldNames) {
- // Iterate through all fields in command object and make sure there are no unexpected
- // ones.
- for (BSONObjIterator iter(cmdObj); iter.more(); iter.next()) {
- StringData fieldName = (*iter).fieldNameStringData();
- if (!validFieldNames.count(fieldName.toString())) {
- return Status(ErrorCodes::BadValue,
- mongoutils::str::stream() << "\"" << fieldName << "\" is not "
- "a valid argument to " << cmdName);
- }
- }
- return Status::OK();
- }
-
- // Extracts a UserName or RoleName object from a BSONElement.
- template <typename Name>
- Status _parseNameFromBSONElement(const BSONElement& element,
- StringData dbname,
- StringData nameFieldName,
- StringData sourceFieldName,
- Name* parsedName) {
- if (element.type() == String) {
- *parsedName = Name(element.String(), dbname);
- }
- else if (element.type() == Object) {
- BSONObj obj = element.Obj();
-
- std::string name;
- std::string source;
- Status status = bsonExtractStringField(obj, nameFieldName, &name);
- if (!status.isOK()) {
- return status;
- }
- status = bsonExtractStringField(obj, sourceFieldName, &source);
- if (!status.isOK()) {
- return status;
- }
-
- *parsedName = Name(name, source);
- }
- else {
+using std::vector;
+
+/**
+ * Writes into *writeConcern a BSONObj describing the parameters to getLastError to use for
+ * the write confirmation.
+ */
+Status _extractWriteConcern(const BSONObj& cmdObj, BSONObj* writeConcern) {
+ BSONElement writeConcernElement;
+ Status status = bsonExtractTypedField(cmdObj, "writeConcern", Object, &writeConcernElement);
+ if (!status.isOK()) {
+ if (status.code() == ErrorCodes::NoSuchKey) {
+ *writeConcern = BSONObj();
+ return Status::OK();
+ }
+ return status;
+ }
+ *writeConcern = writeConcernElement.Obj().getOwned();
+ ;
+ return Status::OK();
+}
+
+Status _checkNoExtraFields(const BSONObj& cmdObj,
+ StringData cmdName,
+ const unordered_set<std::string>& validFieldNames) {
+ // Iterate through all fields in command object and make sure there are no unexpected
+ // ones.
+ for (BSONObjIterator iter(cmdObj); iter.more(); iter.next()) {
+ StringData fieldName = (*iter).fieldNameStringData();
+ if (!validFieldNames.count(fieldName.toString())) {
return Status(ErrorCodes::BadValue,
- "User and role names must be either strings or objects");
- }
- return Status::OK();
- }
-
- // Extracts UserName or RoleName objects from a BSONArray of role/user names.
- template <typename Name>
- Status _parseNamesFromBSONArray(const BSONArray& array,
- StringData dbname,
- StringData nameFieldName,
- StringData sourceFieldName,
- std::vector<Name>* parsedNames) {
- for (BSONObjIterator it(array); it.more(); it.next()) {
- BSONElement element = *it;
- Name name;
- Status status = _parseNameFromBSONElement(element,
- dbname,
- nameFieldName,
- sourceFieldName,
- &name);
- if (!status.isOK()) {
- return status;
- }
- parsedNames->push_back(name);
- }
- return Status::OK();
- }
-
- Status parseUserNamesFromBSONArray(const BSONArray& usersArray,
- StringData dbname,
- std::vector<UserName>* parsedUserNames) {
- return _parseNamesFromBSONArray(usersArray,
- dbname,
- AuthorizationManager::USER_NAME_FIELD_NAME,
- AuthorizationManager::USER_DB_FIELD_NAME,
- parsedUserNames);
- }
-
- Status parseRoleNamesFromBSONArray(const BSONArray& rolesArray,
- StringData dbname,
- std::vector<RoleName>* parsedRoleNames) {
- return _parseNamesFromBSONArray(rolesArray,
- dbname,
- AuthorizationManager::ROLE_NAME_FIELD_NAME,
- AuthorizationManager::ROLE_DB_FIELD_NAME,
- parsedRoleNames);
- }
-
- Status parseRolePossessionManipulationCommands(const BSONObj& cmdObj,
- StringData cmdName,
- const std::string& dbname,
- std::string* parsedName,
- vector<RoleName>* parsedRoleNames,
- BSONObj* parsedWriteConcern) {
- unordered_set<std::string> validFieldNames;
- validFieldNames.insert(cmdName.toString());
- validFieldNames.insert("roles");
- validFieldNames.insert("writeConcern");
-
- Status status = _checkNoExtraFields(cmdObj, cmdName, validFieldNames);
- if (!status.isOK()) {
- return status;
+ mongoutils::str::stream() << "\"" << fieldName << "\" is not "
+ "a valid argument to "
+ << cmdName);
}
+ }
+ return Status::OK();
+}
- status = _extractWriteConcern(cmdObj, parsedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+// Extracts a UserName or RoleName object from a BSONElement.
+template <typename Name>
+Status _parseNameFromBSONElement(const BSONElement& element,
+ StringData dbname,
+ StringData nameFieldName,
+ StringData sourceFieldName,
+ Name* parsedName) {
+ if (element.type() == String) {
+ *parsedName = Name(element.String(), dbname);
+ } else if (element.type() == Object) {
+ BSONObj obj = element.Obj();
- status = bsonExtractStringField(cmdObj, cmdName, parsedName);
+ std::string name;
+ std::string source;
+ Status status = bsonExtractStringField(obj, nameFieldName, &name);
if (!status.isOK()) {
return status;
}
-
- BSONElement rolesElement;
- status = bsonExtractTypedField(cmdObj, "roles", Array, &rolesElement);
+ status = bsonExtractStringField(obj, sourceFieldName, &source);
if (!status.isOK()) {
return status;
}
- status = parseRoleNamesFromBSONArray(BSONArray(rolesElement.Obj()),
- dbname,
- parsedRoleNames);
- if (!status.isOK()) {
- return status;
- }
+ *parsedName = Name(name, source);
+ } else {
+ return Status(ErrorCodes::BadValue,
+ "User and role names must be either strings or objects");
+ }
+ return Status::OK();
+}
- if (!parsedRoleNames->size()) {
- return Status(ErrorCodes::BadValue,
- mongoutils::str::stream() << cmdName << " command requires a non-empty "
- "\"roles\" array");
- }
- return Status::OK();
- }
-
- Status parseCreateOrUpdateUserCommands(const BSONObj& cmdObj,
- StringData cmdName,
- const std::string& dbname,
- CreateOrUpdateUserArgs* parsedArgs) {
- unordered_set<std::string> validFieldNames;
- validFieldNames.insert(cmdName.toString());
- validFieldNames.insert("customData");
- validFieldNames.insert("digestPassword");
- validFieldNames.insert("pwd");
- validFieldNames.insert("roles");
- validFieldNames.insert("writeConcern");
-
- Status status = _checkNoExtraFields(cmdObj, cmdName, validFieldNames);
+// Extracts UserName or RoleName objects from a BSONArray of role/user names.
+template <typename Name>
+Status _parseNamesFromBSONArray(const BSONArray& array,
+ StringData dbname,
+ StringData nameFieldName,
+ StringData sourceFieldName,
+ std::vector<Name>* parsedNames) {
+ for (BSONObjIterator it(array); it.more(); it.next()) {
+ BSONElement element = *it;
+ Name name;
+ Status status =
+ _parseNameFromBSONElement(element, dbname, nameFieldName, sourceFieldName, &name);
if (!status.isOK()) {
return status;
}
+ parsedNames->push_back(name);
+ }
+ return Status::OK();
+}
+
+Status parseUserNamesFromBSONArray(const BSONArray& usersArray,
+ StringData dbname,
+ std::vector<UserName>* parsedUserNames) {
+ return _parseNamesFromBSONArray(usersArray,
+ dbname,
+ AuthorizationManager::USER_NAME_FIELD_NAME,
+ AuthorizationManager::USER_DB_FIELD_NAME,
+ parsedUserNames);
+}
+
+Status parseRoleNamesFromBSONArray(const BSONArray& rolesArray,
+ StringData dbname,
+ std::vector<RoleName>* parsedRoleNames) {
+ return _parseNamesFromBSONArray(rolesArray,
+ dbname,
+ AuthorizationManager::ROLE_NAME_FIELD_NAME,
+ AuthorizationManager::ROLE_DB_FIELD_NAME,
+ parsedRoleNames);
+}
+
+Status parseRolePossessionManipulationCommands(const BSONObj& cmdObj,
+ StringData cmdName,
+ const std::string& dbname,
+ std::string* parsedName,
+ vector<RoleName>* parsedRoleNames,
+ BSONObj* parsedWriteConcern) {
+ unordered_set<std::string> validFieldNames;
+ validFieldNames.insert(cmdName.toString());
+ validFieldNames.insert("roles");
+ validFieldNames.insert("writeConcern");
+
+ Status status = _checkNoExtraFields(cmdObj, cmdName, validFieldNames);
+ if (!status.isOK()) {
+ return status;
+ }
- status = _extractWriteConcern(cmdObj, &parsedArgs->writeConcern);
- if (!status.isOK()) {
- return status;
- }
+ status = _extractWriteConcern(cmdObj, parsedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- BSONObjBuilder userObjBuilder;
+ status = bsonExtractStringField(cmdObj, cmdName, parsedName);
+ if (!status.isOK()) {
+ return status;
+ }
- // Parse user name
- std::string userName;
- status = bsonExtractStringField(cmdObj, cmdName, &userName);
- if (!status.isOK()) {
- return status;
- }
+ BSONElement rolesElement;
+ status = bsonExtractTypedField(cmdObj, "roles", Array, &rolesElement);
+ if (!status.isOK()) {
+ return status;
+ }
- parsedArgs->userName = UserName(userName, dbname);
-
- // Parse password
- if (cmdObj.hasField("pwd")) {
- std::string password;
- status = bsonExtractStringField(cmdObj, "pwd", &password);
- if (!status.isOK()) {
- return status;
- }
- if (password.empty()) {
- return Status(ErrorCodes::BadValue, "User passwords must not be empty");
- }
-
- bool digestPassword; // True if the server should digest the password
- status = bsonExtractBooleanFieldWithDefault(cmdObj,
- "digestPassword",
- true,
- &digestPassword);
- if (!status.isOK()) {
- return status;
- }
-
- if (digestPassword) {
- parsedArgs->hashedPassword = mongo::createPasswordDigest(
- userName, password);
- } else {
- parsedArgs->hashedPassword = password;
- }
- parsedArgs->hasHashedPassword = true;
- }
+ status = parseRoleNamesFromBSONArray(BSONArray(rolesElement.Obj()), dbname, parsedRoleNames);
+ if (!status.isOK()) {
+ return status;
+ }
- // Parse custom data
- if (cmdObj.hasField("customData")) {
- BSONElement element;
- status = bsonExtractTypedField(cmdObj, "customData", Object, &element);
- if (!status.isOK()) {
- return status;
- }
- parsedArgs->customData = element.Obj();
- parsedArgs->hasCustomData = true;
- }
+ if (!parsedRoleNames->size()) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream() << cmdName << " command requires a non-empty "
+ "\"roles\" array");
+ }
+ return Status::OK();
+}
+
+Status parseCreateOrUpdateUserCommands(const BSONObj& cmdObj,
+ StringData cmdName,
+ const std::string& dbname,
+ CreateOrUpdateUserArgs* parsedArgs) {
+ unordered_set<std::string> validFieldNames;
+ validFieldNames.insert(cmdName.toString());
+ validFieldNames.insert("customData");
+ validFieldNames.insert("digestPassword");
+ validFieldNames.insert("pwd");
+ validFieldNames.insert("roles");
+ validFieldNames.insert("writeConcern");
+
+ Status status = _checkNoExtraFields(cmdObj, cmdName, validFieldNames);
+ if (!status.isOK()) {
+ return status;
+ }
- // Parse roles
- if (cmdObj.hasField("roles")) {
- BSONElement rolesElement;
- status = bsonExtractTypedField(cmdObj, "roles", Array, &rolesElement);
- if (!status.isOK()) {
- return status;
- }
- status = parseRoleNamesFromBSONArray(BSONArray(rolesElement.Obj()),
- dbname,
- &parsedArgs->roles);
- if (!status.isOK()) {
- return status;
- }
- parsedArgs->hasRoles = true;
- }
+ status = _extractWriteConcern(cmdObj, &parsedArgs->writeConcern);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ BSONObjBuilder userObjBuilder;
- return Status::OK();
+ // Parse user name
+ std::string userName;
+ status = bsonExtractStringField(cmdObj, cmdName, &userName);
+ if (!status.isOK()) {
+ return status;
}
- Status parseAndValidateDropUserCommand(const BSONObj& cmdObj,
- const std::string& dbname,
- UserName* parsedUserName,
- BSONObj* parsedWriteConcern) {
- unordered_set<std::string> validFieldNames;
- validFieldNames.insert("dropUser");
- validFieldNames.insert("writeConcern");
+ parsedArgs->userName = UserName(userName, dbname);
- Status status = _checkNoExtraFields(cmdObj, "dropUser", validFieldNames);
+ // Parse password
+ if (cmdObj.hasField("pwd")) {
+ std::string password;
+ status = bsonExtractStringField(cmdObj, "pwd", &password);
if (!status.isOK()) {
return status;
}
+ if (password.empty()) {
+ return Status(ErrorCodes::BadValue, "User passwords must not be empty");
+ }
- std::string user;
- status = bsonExtractStringField(cmdObj, "dropUser", &user);
+ bool digestPassword; // True if the server should digest the password
+ status =
+ bsonExtractBooleanFieldWithDefault(cmdObj, "digestPassword", true, &digestPassword);
if (!status.isOK()) {
return status;
}
- status = _extractWriteConcern(cmdObj, parsedWriteConcern);
+ if (digestPassword) {
+ parsedArgs->hashedPassword = mongo::createPasswordDigest(userName, password);
+ } else {
+ parsedArgs->hashedPassword = password;
+ }
+ parsedArgs->hasHashedPassword = true;
+ }
+
+ // Parse custom data
+ if (cmdObj.hasField("customData")) {
+ BSONElement element;
+ status = bsonExtractTypedField(cmdObj, "customData", Object, &element);
if (!status.isOK()) {
return status;
}
-
- *parsedUserName = UserName(user, dbname);
- return Status::OK();
+ parsedArgs->customData = element.Obj();
+ parsedArgs->hasCustomData = true;
}
- Status parseFromDatabaseCommand(const BSONObj& cmdObj,
- const std::string& dbname,
- BSONObj* parsedWriteConcern,
- std::string command) {
- unordered_set<std::string> validFieldNames;
- validFieldNames.insert(command);
- validFieldNames.insert("writeConcern");
-
- Status status = _checkNoExtraFields(cmdObj, command, validFieldNames);
+ // Parse roles
+ if (cmdObj.hasField("roles")) {
+ BSONElement rolesElement;
+ status = bsonExtractTypedField(cmdObj, "roles", Array, &rolesElement);
if (!status.isOK()) {
return status;
}
-
- status = _extractWriteConcern(cmdObj, parsedWriteConcern);
+ status =
+ parseRoleNamesFromBSONArray(BSONArray(rolesElement.Obj()), dbname, &parsedArgs->roles);
if (!status.isOK()) {
return status;
}
+ parsedArgs->hasRoles = true;
+ }
+
+ return Status::OK();
+}
+
+Status parseAndValidateDropUserCommand(const BSONObj& cmdObj,
+ const std::string& dbname,
+ UserName* parsedUserName,
+ BSONObj* parsedWriteConcern) {
+ unordered_set<std::string> validFieldNames;
+ validFieldNames.insert("dropUser");
+ validFieldNames.insert("writeConcern");
- return Status::OK();
+ Status status = _checkNoExtraFields(cmdObj, "dropUser", validFieldNames);
+ if (!status.isOK()) {
+ return status;
}
- Status parseAndValidateDropAllUsersFromDatabaseCommand(const BSONObj& cmdObj,
- const std::string& dbname,
- BSONObj* parsedWriteConcern) {
- return parseFromDatabaseCommand(cmdObj, dbname, parsedWriteConcern, "dropAllUsersFromDatabase");
+
+ std::string user;
+ status = bsonExtractStringField(cmdObj, "dropUser", &user);
+ if (!status.isOK()) {
+ return status;
}
- Status parseUsersInfoCommand(const BSONObj& cmdObj,
- StringData dbname,
- UsersInfoArgs* parsedArgs) {
- unordered_set<std::string> validFieldNames;
- validFieldNames.insert("usersInfo");
- validFieldNames.insert("showPrivileges");
- validFieldNames.insert("showCredentials");
+ status = _extractWriteConcern(cmdObj, parsedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- Status status = _checkNoExtraFields(cmdObj, "usersInfo", validFieldNames);
- if (!status.isOK()) {
- return status;
- }
+ *parsedUserName = UserName(user, dbname);
+ return Status::OK();
+}
- if (cmdObj["usersInfo"].numberInt() == 1) {
- parsedArgs->allForDB = true;
- } else if (cmdObj["usersInfo"].type() == Array) {
- status = parseUserNamesFromBSONArray(BSONArray(cmdObj["usersInfo"].Obj()),
- dbname,
- &parsedArgs->userNames);
- if (!status.isOK()) {
- return status;
- }
- } else {
- UserName name;
- status = _parseNameFromBSONElement(cmdObj["usersInfo"],
- dbname,
- AuthorizationManager::USER_NAME_FIELD_NAME,
- AuthorizationManager::USER_DB_FIELD_NAME,
- &name);
- if (!status.isOK()) {
- return status;
- }
- parsedArgs->userNames.push_back(name);
- }
+Status parseFromDatabaseCommand(const BSONObj& cmdObj,
+ const std::string& dbname,
+ BSONObj* parsedWriteConcern,
+ std::string command) {
+ unordered_set<std::string> validFieldNames;
+ validFieldNames.insert(command);
+ validFieldNames.insert("writeConcern");
+
+ Status status = _checkNoExtraFields(cmdObj, command, validFieldNames);
+ if (!status.isOK()) {
+ return status;
+ }
- status = bsonExtractBooleanFieldWithDefault(cmdObj,
- "showPrivileges",
- false,
- &parsedArgs->showPrivileges);
+ status = _extractWriteConcern(cmdObj, parsedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ return Status::OK();
+}
+Status parseAndValidateDropAllUsersFromDatabaseCommand(const BSONObj& cmdObj,
+ const std::string& dbname,
+ BSONObj* parsedWriteConcern) {
+ return parseFromDatabaseCommand(cmdObj, dbname, parsedWriteConcern, "dropAllUsersFromDatabase");
+}
+
+Status parseUsersInfoCommand(const BSONObj& cmdObj, StringData dbname, UsersInfoArgs* parsedArgs) {
+ unordered_set<std::string> validFieldNames;
+ validFieldNames.insert("usersInfo");
+ validFieldNames.insert("showPrivileges");
+ validFieldNames.insert("showCredentials");
+
+ Status status = _checkNoExtraFields(cmdObj, "usersInfo", validFieldNames);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ if (cmdObj["usersInfo"].numberInt() == 1) {
+ parsedArgs->allForDB = true;
+ } else if (cmdObj["usersInfo"].type() == Array) {
+ status = parseUserNamesFromBSONArray(
+ BSONArray(cmdObj["usersInfo"].Obj()), dbname, &parsedArgs->userNames);
if (!status.isOK()) {
return status;
}
- status = bsonExtractBooleanFieldWithDefault(cmdObj,
- "showCredentials",
- false,
- &parsedArgs->showCredentials);
+ } else {
+ UserName name;
+ status = _parseNameFromBSONElement(cmdObj["usersInfo"],
+ dbname,
+ AuthorizationManager::USER_NAME_FIELD_NAME,
+ AuthorizationManager::USER_DB_FIELD_NAME,
+ &name);
if (!status.isOK()) {
return status;
}
+ parsedArgs->userNames.push_back(name);
+ }
- return Status::OK();
+ status = bsonExtractBooleanFieldWithDefault(
+ cmdObj, "showPrivileges", false, &parsedArgs->showPrivileges);
+ if (!status.isOK()) {
+ return status;
+ }
+ status = bsonExtractBooleanFieldWithDefault(
+ cmdObj, "showCredentials", false, &parsedArgs->showCredentials);
+ if (!status.isOK()) {
+ return status;
}
- Status parseRolesInfoCommand(const BSONObj& cmdObj,
- StringData dbname,
- RolesInfoArgs* parsedArgs) {
- unordered_set<std::string> validFieldNames;
- validFieldNames.insert("rolesInfo");
- validFieldNames.insert("showPrivileges");
- validFieldNames.insert("showBuiltinRoles");
+ return Status::OK();
+}
- Status status = _checkNoExtraFields(cmdObj, "rolesInfo", validFieldNames);
- if (!status.isOK()) {
- return status;
- }
+Status parseRolesInfoCommand(const BSONObj& cmdObj, StringData dbname, RolesInfoArgs* parsedArgs) {
+ unordered_set<std::string> validFieldNames;
+ validFieldNames.insert("rolesInfo");
+ validFieldNames.insert("showPrivileges");
+ validFieldNames.insert("showBuiltinRoles");
- if (cmdObj["rolesInfo"].numberInt() == 1) {
- parsedArgs->allForDB = true;
- } else if (cmdObj["rolesInfo"].type() == Array) {
- status = parseRoleNamesFromBSONArray(BSONArray(cmdObj["rolesInfo"].Obj()),
- dbname,
- &parsedArgs->roleNames);
- if (!status.isOK()) {
- return status;
- }
- } else {
- RoleName name;
- status = _parseNameFromBSONElement(cmdObj["rolesInfo"],
- dbname,
- AuthorizationManager::ROLE_NAME_FIELD_NAME,
- AuthorizationManager::ROLE_DB_FIELD_NAME,
- &name);
- if (!status.isOK()) {
- return status;
- }
- parsedArgs->roleNames.push_back(name);
- }
+ Status status = _checkNoExtraFields(cmdObj, "rolesInfo", validFieldNames);
+ if (!status.isOK()) {
+ return status;
+ }
- status = bsonExtractBooleanFieldWithDefault(cmdObj,
- "showPrivileges",
- false,
- &parsedArgs->showPrivileges);
+ if (cmdObj["rolesInfo"].numberInt() == 1) {
+ parsedArgs->allForDB = true;
+ } else if (cmdObj["rolesInfo"].type() == Array) {
+ status = parseRoleNamesFromBSONArray(
+ BSONArray(cmdObj["rolesInfo"].Obj()), dbname, &parsedArgs->roleNames);
if (!status.isOK()) {
return status;
}
-
- status = bsonExtractBooleanFieldWithDefault(cmdObj,
- "showBuiltinRoles",
- false,
- &parsedArgs->showBuiltinRoles);
+ } else {
+ RoleName name;
+ status = _parseNameFromBSONElement(cmdObj["rolesInfo"],
+ dbname,
+ AuthorizationManager::ROLE_NAME_FIELD_NAME,
+ AuthorizationManager::ROLE_DB_FIELD_NAME,
+ &name);
if (!status.isOK()) {
return status;
}
+ parsedArgs->roleNames.push_back(name);
+ }
- return Status::OK();
- }
-
- /*
- * Validates that the given privilege BSONArray is valid.
- * If parsedPrivileges is not NULL, adds to it the privileges parsed out of the input BSONArray.
- */
- Status parseAndValidatePrivilegeArray(const BSONArray& privileges,
- PrivilegeVector* parsedPrivileges) {
- for (BSONObjIterator it(privileges); it.more(); it.next()) {
- BSONElement element = *it;
- if (element.type() != Object) {
- return Status(ErrorCodes::FailedToParse,
- "Elements in privilege arrays must be objects");
- }
-
- ParsedPrivilege parsedPrivilege;
- std::string errmsg;
- if (!parsedPrivilege.parseBSON(element.Obj(), &errmsg)) {
- return Status(ErrorCodes::FailedToParse, errmsg);
- }
- if (!parsedPrivilege.isValid(&errmsg)) {
- return Status(ErrorCodes::FailedToParse, errmsg);
- }
-
- Privilege privilege;
- if (!ParsedPrivilege::parsedPrivilegeToPrivilege(parsedPrivilege, &privilege, &errmsg)) {
- return Status(ErrorCodes::FailedToParse, errmsg);
- }
-
- parsedPrivileges->push_back(privilege);
- }
- return Status::OK();
+ status = bsonExtractBooleanFieldWithDefault(
+ cmdObj, "showPrivileges", false, &parsedArgs->showPrivileges);
+ if (!status.isOK()) {
+ return status;
}
- Status parseCreateOrUpdateRoleCommands(const BSONObj& cmdObj,
- StringData cmdName,
- const std::string& dbname,
- CreateOrUpdateRoleArgs* parsedArgs) {
- unordered_set<std::string> validFieldNames;
- validFieldNames.insert(cmdName.toString());
- validFieldNames.insert("privileges");
- validFieldNames.insert("roles");
- validFieldNames.insert("writeConcern");
+ status = bsonExtractBooleanFieldWithDefault(
+ cmdObj, "showBuiltinRoles", false, &parsedArgs->showBuiltinRoles);
+ if (!status.isOK()) {
+ return status;
+ }
- Status status = _checkNoExtraFields(cmdObj, cmdName, validFieldNames);
- if (!status.isOK()) {
- return status;
- }
+ return Status::OK();
+}
- status = _extractWriteConcern(cmdObj, &parsedArgs->writeConcern);
- if (!status.isOK()) {
- return status;
+/*
+ * Validates that the given privilege BSONArray is valid.
+ * If parsedPrivileges is not NULL, adds to it the privileges parsed out of the input BSONArray.
+ */
+Status parseAndValidatePrivilegeArray(const BSONArray& privileges,
+ PrivilegeVector* parsedPrivileges) {
+ for (BSONObjIterator it(privileges); it.more(); it.next()) {
+ BSONElement element = *it;
+ if (element.type() != Object) {
+ return Status(ErrorCodes::FailedToParse,
+ "Elements in privilege arrays must be objects");
}
- std::string roleName;
- status = bsonExtractStringField(cmdObj, cmdName, &roleName);
- if (!status.isOK()) {
- return status;
+ ParsedPrivilege parsedPrivilege;
+ std::string errmsg;
+ if (!parsedPrivilege.parseBSON(element.Obj(), &errmsg)) {
+ return Status(ErrorCodes::FailedToParse, errmsg);
}
- parsedArgs->roleName = RoleName(roleName, dbname);
-
- // Parse privileges
- if (cmdObj.hasField("privileges")) {
- BSONElement privilegesElement;
- status = bsonExtractTypedField(cmdObj, "privileges", Array, &privilegesElement);
- if (!status.isOK()) {
- return status;
- }
- status = parseAndValidatePrivilegeArray(BSONArray(privilegesElement.Obj()),
- &parsedArgs->privileges);
- if (!status.isOK()) {
- return status;
- }
- parsedArgs->hasPrivileges = true;
+ if (!parsedPrivilege.isValid(&errmsg)) {
+ return Status(ErrorCodes::FailedToParse, errmsg);
}
- // Parse roles
- if (cmdObj.hasField("roles")) {
- BSONElement rolesElement;
- status = bsonExtractTypedField(cmdObj, "roles", Array, &rolesElement);
- if (!status.isOK()) {
- return status;
- }
- status = parseRoleNamesFromBSONArray(BSONArray(rolesElement.Obj()),
- dbname,
- &parsedArgs->roles);
- if (!status.isOK()) {
- return status;
- }
- parsedArgs->hasRoles = true;
+ Privilege privilege;
+ if (!ParsedPrivilege::parsedPrivilegeToPrivilege(parsedPrivilege, &privilege, &errmsg)) {
+ return Status(ErrorCodes::FailedToParse, errmsg);
}
- return Status::OK();
- }
-
- Status parseAndValidateRolePrivilegeManipulationCommands(const BSONObj& cmdObj,
- StringData cmdName,
- const std::string& dbname,
- RoleName* parsedRoleName,
- PrivilegeVector* parsedPrivileges,
- BSONObj* parsedWriteConcern) {
- unordered_set<std::string> validFieldNames;
- validFieldNames.insert(cmdName.toString());
- validFieldNames.insert("privileges");
- validFieldNames.insert("writeConcern");
- Status status = _checkNoExtraFields(cmdObj, cmdName, validFieldNames);
- if (!status.isOK()) {
- return status;
- }
-
- status = _extractWriteConcern(cmdObj, parsedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ parsedPrivileges->push_back(privilege);
+ }
+ return Status::OK();
+}
+
+Status parseCreateOrUpdateRoleCommands(const BSONObj& cmdObj,
+ StringData cmdName,
+ const std::string& dbname,
+ CreateOrUpdateRoleArgs* parsedArgs) {
+ unordered_set<std::string> validFieldNames;
+ validFieldNames.insert(cmdName.toString());
+ validFieldNames.insert("privileges");
+ validFieldNames.insert("roles");
+ validFieldNames.insert("writeConcern");
+
+ Status status = _checkNoExtraFields(cmdObj, cmdName, validFieldNames);
+ if (!status.isOK()) {
+ return status;
+ }
- BSONObjBuilder roleObjBuilder;
+ status = _extractWriteConcern(cmdObj, &parsedArgs->writeConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- // Parse role name
- std::string roleName;
- status = bsonExtractStringField(cmdObj, cmdName, &roleName);
- if (!status.isOK()) {
- return status;
- }
- *parsedRoleName = RoleName(roleName, dbname);
+ std::string roleName;
+ status = bsonExtractStringField(cmdObj, cmdName, &roleName);
+ if (!status.isOK()) {
+ return status;
+ }
+ parsedArgs->roleName = RoleName(roleName, dbname);
- // Parse privileges
+ // Parse privileges
+ if (cmdObj.hasField("privileges")) {
BSONElement privilegesElement;
status = bsonExtractTypedField(cmdObj, "privileges", Array, &privilegesElement);
if (!status.isOK()) {
return status;
}
status = parseAndValidatePrivilegeArray(BSONArray(privilegesElement.Obj()),
- parsedPrivileges);
+ &parsedArgs->privileges);
if (!status.isOK()) {
return status;
}
- if (!parsedPrivileges->size()) {
- return Status(ErrorCodes::BadValue,
- mongoutils::str::stream() << cmdName << " command requires a non-empty "
- "\"privileges\" array");
- }
-
- return Status::OK();
+ parsedArgs->hasPrivileges = true;
}
- Status parseDropRoleCommand(const BSONObj& cmdObj,
- const std::string& dbname,
- RoleName* parsedRoleName,
- BSONObj* parsedWriteConcern) {
- unordered_set<std::string> validFieldNames;
- validFieldNames.insert("dropRole");
- validFieldNames.insert("writeConcern");
-
- Status status = _checkNoExtraFields(cmdObj, "dropRole", validFieldNames);
+ // Parse roles
+ if (cmdObj.hasField("roles")) {
+ BSONElement rolesElement;
+ status = bsonExtractTypedField(cmdObj, "roles", Array, &rolesElement);
if (!status.isOK()) {
return status;
}
-
- std::string user;
- status = bsonExtractStringField(cmdObj, "dropRole", &user);
+ status =
+ parseRoleNamesFromBSONArray(BSONArray(rolesElement.Obj()), dbname, &parsedArgs->roles);
if (!status.isOK()) {
return status;
}
+ parsedArgs->hasRoles = true;
+ }
+ return Status::OK();
+}
+
+Status parseAndValidateRolePrivilegeManipulationCommands(const BSONObj& cmdObj,
+ StringData cmdName,
+ const std::string& dbname,
+ RoleName* parsedRoleName,
+ PrivilegeVector* parsedPrivileges,
+ BSONObj* parsedWriteConcern) {
+ unordered_set<std::string> validFieldNames;
+ validFieldNames.insert(cmdName.toString());
+ validFieldNames.insert("privileges");
+ validFieldNames.insert("writeConcern");
+
+ Status status = _checkNoExtraFields(cmdObj, cmdName, validFieldNames);
+ if (!status.isOK()) {
+ return status;
+ }
- status = _extractWriteConcern(cmdObj, parsedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ status = _extractWriteConcern(cmdObj, parsedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ BSONObjBuilder roleObjBuilder;
- *parsedRoleName = RoleName(user, dbname);
- return Status::OK();
+ // Parse role name
+ std::string roleName;
+ status = bsonExtractStringField(cmdObj, cmdName, &roleName);
+ if (!status.isOK()) {
+ return status;
}
+ *parsedRoleName = RoleName(roleName, dbname);
- Status parseDropAllRolesFromDatabaseCommand(const BSONObj& cmdObj,
- const std::string& dbname,
- BSONObj* parsedWriteConcern) {
- return parseFromDatabaseCommand(cmdObj, dbname, parsedWriteConcern, "dropAllRolesFromDatabase");
+ // Parse privileges
+ BSONElement privilegesElement;
+ status = bsonExtractTypedField(cmdObj, "privileges", Array, &privilegesElement);
+ if (!status.isOK()) {
+ return status;
+ }
+ status = parseAndValidatePrivilegeArray(BSONArray(privilegesElement.Obj()), parsedPrivileges);
+ if (!status.isOK()) {
+ return status;
+ }
+ if (!parsedPrivileges->size()) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream() << cmdName << " command requires a non-empty "
+ "\"privileges\" array");
}
- Status parseMergeAuthzCollectionsCommand(const BSONObj& cmdObj,
- MergeAuthzCollectionsArgs* parsedArgs) {
- unordered_set<std::string> validFieldNames;
- validFieldNames.insert("_mergeAuthzCollections");
- validFieldNames.insert("tempUsersCollection");
- validFieldNames.insert("tempRolesCollection");
- validFieldNames.insert("db");
- validFieldNames.insert("drop");
- validFieldNames.insert("writeConcern");
+ return Status::OK();
+}
- Status status = _checkNoExtraFields(cmdObj, "_mergeAuthzCollections", validFieldNames);
- if (!status.isOK()) {
- return status;
- }
+Status parseDropRoleCommand(const BSONObj& cmdObj,
+ const std::string& dbname,
+ RoleName* parsedRoleName,
+ BSONObj* parsedWriteConcern) {
+ unordered_set<std::string> validFieldNames;
+ validFieldNames.insert("dropRole");
+ validFieldNames.insert("writeConcern");
- status = _extractWriteConcern(cmdObj, &parsedArgs->writeConcern);
- if (!status.isOK()) {
- return status;
- }
+ Status status = _checkNoExtraFields(cmdObj, "dropRole", validFieldNames);
+ if (!status.isOK()) {
+ return status;
+ }
- status = bsonExtractStringFieldWithDefault(cmdObj,
- "tempUsersCollection",
- "",
- &parsedArgs->usersCollName);
- if (!status.isOK()) {
- return status;
- }
+ std::string user;
+ status = bsonExtractStringField(cmdObj, "dropRole", &user);
+ if (!status.isOK()) {
+ return status;
+ }
- status = bsonExtractStringFieldWithDefault(cmdObj,
- "tempRolesCollection",
- "",
- &parsedArgs->rolesCollName);
- if (!status.isOK()) {
- return status;
- }
+ status = _extractWriteConcern(cmdObj, parsedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- status = bsonExtractStringField(cmdObj, "db", &parsedArgs->db);
- if (!status.isOK()) {
- if (status == ErrorCodes::NoSuchKey) {
- return Status(ErrorCodes::OutdatedClient,
- "Missing \"db\" field for _mergeAuthzCollections command. This is "
- "most likely due to running an outdated (pre-2.6.4) version of "
- "mongorestore.");
- }
- return status;
- }
+ *parsedRoleName = RoleName(user, dbname);
+ return Status::OK();
+}
+
+Status parseDropAllRolesFromDatabaseCommand(const BSONObj& cmdObj,
+ const std::string& dbname,
+ BSONObj* parsedWriteConcern) {
+ return parseFromDatabaseCommand(cmdObj, dbname, parsedWriteConcern, "dropAllRolesFromDatabase");
+}
+
+Status parseMergeAuthzCollectionsCommand(const BSONObj& cmdObj,
+ MergeAuthzCollectionsArgs* parsedArgs) {
+ unordered_set<std::string> validFieldNames;
+ validFieldNames.insert("_mergeAuthzCollections");
+ validFieldNames.insert("tempUsersCollection");
+ validFieldNames.insert("tempRolesCollection");
+ validFieldNames.insert("db");
+ validFieldNames.insert("drop");
+ validFieldNames.insert("writeConcern");
+
+ Status status = _checkNoExtraFields(cmdObj, "_mergeAuthzCollections", validFieldNames);
+ if (!status.isOK()) {
+ return status;
+ }
- status = bsonExtractBooleanFieldWithDefault(cmdObj,
- "drop",
- false,
- &parsedArgs->drop);
- if (!status.isOK()) {
- return status;
+ status = _extractWriteConcern(cmdObj, &parsedArgs->writeConcern);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ status = bsonExtractStringFieldWithDefault(
+ cmdObj, "tempUsersCollection", "", &parsedArgs->usersCollName);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ status = bsonExtractStringFieldWithDefault(
+ cmdObj, "tempRolesCollection", "", &parsedArgs->rolesCollName);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ status = bsonExtractStringField(cmdObj, "db", &parsedArgs->db);
+ if (!status.isOK()) {
+ if (status == ErrorCodes::NoSuchKey) {
+ return Status(ErrorCodes::OutdatedClient,
+ "Missing \"db\" field for _mergeAuthzCollections command. This is "
+ "most likely due to running an outdated (pre-2.6.4) version of "
+ "mongorestore.");
}
+ return status;
+ }
- return Status::OK();
+ status = bsonExtractBooleanFieldWithDefault(cmdObj, "drop", false, &parsedArgs->drop);
+ if (!status.isOK()) {
+ return status;
}
-} // namespace auth
-} // namespace mongo
+ return Status::OK();
+}
+
+} // namespace auth
+} // namespace mongo
diff --git a/src/mongo/db/auth/user_management_commands_parser.h b/src/mongo/db/auth/user_management_commands_parser.h
index 68ef26ce894..c55210e8978 100644
--- a/src/mongo/db/auth/user_management_commands_parser.h
+++ b/src/mongo/db/auth/user_management_commands_parser.h
@@ -43,190 +43,185 @@
namespace mongo {
namespace auth {
- struct CreateOrUpdateUserArgs {
- UserName userName;
- bool hasHashedPassword;
- std::string hashedPassword;
- bool hasCustomData;
- BSONObj customData;
- bool hasRoles;
- std::vector<RoleName> roles;
- BSONObj writeConcern;
-
- CreateOrUpdateUserArgs() :
- hasHashedPassword(false), hasCustomData(false), hasRoles(false) {}
- };
-
- /**
- * Takes a command object describing an invocation of the "createUser" or "updateUser" commands
- * (which command it is is specified in "cmdName") on the database "dbname", and parses out all
- * the arguments into the "parsedArgs" output param.
- */
- Status parseCreateOrUpdateUserCommands(const BSONObj& cmdObj,
- StringData cmdName,
- const std::string& dbname,
- CreateOrUpdateUserArgs* parsedArgs);
-
- /**
- * Takes a command object describing an invocation of one of "grantRolesToUser",
- * "revokeRolesFromUser", "grantDelegateRolesToUser", "revokeDelegateRolesFromUser",
- * "grantRolesToRole", and "revokeRolesFromRoles" (which command it is is specified in the
- * "cmdName" argument), and parses out (into the parsedName out param) the user/role name of
- * the user/roles being modified, the roles being granted or revoked, and the write concern to
- * use.
- */
- Status parseRolePossessionManipulationCommands(const BSONObj& cmdObj,
- StringData cmdName,
- const std::string& dbname,
- std::string* parsedName,
- std::vector<RoleName>* parsedRoleNames,
- BSONObj* parsedWriteConcern);
-
- /**
- * Takes a command object describing an invocation of the "dropUser" command and parses out
- * the UserName of the user to be removed and the writeConcern.
- * Also validates the input and returns a non-ok Status if there is anything wrong.
- */
- Status parseAndValidateDropUserCommand(const BSONObj& cmdObj,
- const std::string& dbname,
- UserName* parsedUserName,
- BSONObj* parsedWriteConcern);
-
- /**
- * Takes a command object describing an invocation of the "dropAllUsersFromDatabase" command and
- * parses out the write concern.
- * Also validates the input and returns a non-ok Status if there is anything wrong.
- */
- Status parseAndValidateDropAllUsersFromDatabaseCommand(const BSONObj& cmdObj,
- const std::string& dbname,
- BSONObj* parsedWriteConcern);
-
- struct UsersInfoArgs {
- std::vector<UserName> userNames;
- bool allForDB;
- bool showPrivileges;
- bool showCredentials;
- UsersInfoArgs() : allForDB(false), showPrivileges(false), showCredentials(false) {}
- };
-
- /**
- * Takes a command object describing an invocation of the "usersInfo" command and parses out
- * all the arguments into the "parsedArgs" output param.
- */
- Status parseUsersInfoCommand(const BSONObj& cmdObj,
- StringData dbname,
- UsersInfoArgs* parsedArgs);
-
- struct RolesInfoArgs {
- std::vector<RoleName> roleNames;
- bool allForDB;
- bool showPrivileges;
- bool showBuiltinRoles;
- RolesInfoArgs() : allForDB(false), showPrivileges(false), showBuiltinRoles(false) {}
- };
-
- /**
- * Takes a command object describing an invocation of the "rolesInfo" command and parses out
- * the arguments into the "parsedArgs" output param.
- */
- Status parseRolesInfoCommand(const BSONObj& cmdObj,
- StringData dbname,
- RolesInfoArgs* parsedArgs);
-
- struct CreateOrUpdateRoleArgs {
- RoleName roleName;
- bool hasRoles;
- std::vector<RoleName> roles;
- bool hasPrivileges;
- PrivilegeVector privileges;
- BSONObj writeConcern;
- CreateOrUpdateRoleArgs() : hasRoles(false), hasPrivileges(false) {}
- };
-
- /**
- * Takes a command object describing an invocation of the "createRole" or "updateRole" commands
- * (which command it is is specified in "cmdName") on the database "dbname", and parses out all
- * the arguments into the "parsedArgs" output param.
- */
- Status parseCreateOrUpdateRoleCommands(const BSONObj& cmdObj,
- StringData cmdName,
- const std::string& dbname,
- CreateOrUpdateRoleArgs* parsedArgs);
-
- /**
- * Takes a command object describing an invocation of the "grantPrivilegesToRole" or
- * "revokePrivilegesFromRole" commands, and parses out the role name of the
- * role being modified, the privileges being granted or revoked, and the write concern to use.
- */
- Status parseAndValidateRolePrivilegeManipulationCommands(const BSONObj& cmdObj,
- StringData cmdName,
- const std::string& dbname,
- RoleName* parsedRoleName,
- PrivilegeVector* parsedPrivileges,
- BSONObj* parsedWriteConcern);
-
- /**
- * Takes a command object describing an invocation of the "dropRole" command and parses out
- * the RoleName of the role to be removed and the writeConcern.
- */
- Status parseDropRoleCommand(const BSONObj& cmdObj,
- const std::string& dbname,
- RoleName* parsedRoleName,
- BSONObj* parsedWriteConcern);
-
- /**
- * Takes a command object describing an invocation of the "dropAllRolesFromDatabase" command and
- * parses out the write concern.
- */
- Status parseDropAllRolesFromDatabaseCommand(const BSONObj& cmdObj,
- const std::string& dbname,
- BSONObj* parsedWriteConcern);
-
- /**
- * Parses the privileges described in "privileges" into a vector of Privilege objects.
- * Returns Status::OK() upon successfully parsing all the elements of "privileges".
- */
- Status parseAndValidatePrivilegeArray(const BSONArray& privileges,
- PrivilegeVector* parsedPrivileges);
-
- /**
- * Takes a BSONArray of name,db pair documents, parses that array and returns (via the
- * output param parsedRoleNames) a list of the role names in the input array.
- * Performs syntactic validation of "rolesArray", only.
- */
- Status parseRoleNamesFromBSONArray(const BSONArray& rolesArray,
- StringData dbname,
- std::vector<RoleName>* parsedRoleNames);
-
- /**
- * Takes a BSONArray of name,db pair documents, parses that array and returns (via the
- * output param parsedUserNames) a list of the usernames in the input array.
- * Performs syntactic validation of "usersArray", only.
- */
- Status parseUserNamesFromBSONArray(const BSONArray& usersArray,
- StringData dbname,
- std::vector<UserName>* parsedUserNames);
-
- struct MergeAuthzCollectionsArgs {
- std::string usersCollName;
- std::string rolesCollName;
- std::string db;
- bool drop;
- BSONObj writeConcern;
- MergeAuthzCollectionsArgs() : drop(false) {}
- };
-
- /**
- * Takes a command object describing an invocation of the "_mergeAuthzCollections" command and
- * parses out the name of the temporary collections to use for user and role data, whether or
- * not to drop the existing users/roles, the database if this is a for a db-specific restore,
- * and the writeConcern.
- * Returns ErrorCodes::OutdatedClient if the "db" field is missing, as that likely indicates
- * the command was sent by an outdated (pre 2.6.4) version of mongorestore.
- * Returns other codes indicating missing or incorrectly typed fields.
- */
- Status parseMergeAuthzCollectionsCommand(const BSONObj& cmdObj,
- MergeAuthzCollectionsArgs* parsedArgs);
-
-} // namespace auth
-} // namespace mongo
+struct CreateOrUpdateUserArgs {
+ UserName userName;
+ bool hasHashedPassword;
+ std::string hashedPassword;
+ bool hasCustomData;
+ BSONObj customData;
+ bool hasRoles;
+ std::vector<RoleName> roles;
+ BSONObj writeConcern;
+
+ CreateOrUpdateUserArgs() : hasHashedPassword(false), hasCustomData(false), hasRoles(false) {}
+};
+
+/**
+ * Takes a command object describing an invocation of the "createUser" or "updateUser" commands
+ * (which command it is is specified in "cmdName") on the database "dbname", and parses out all
+ * the arguments into the "parsedArgs" output param.
+ */
+Status parseCreateOrUpdateUserCommands(const BSONObj& cmdObj,
+ StringData cmdName,
+ const std::string& dbname,
+ CreateOrUpdateUserArgs* parsedArgs);
+
+/**
+ * Takes a command object describing an invocation of one of "grantRolesToUser",
+ * "revokeRolesFromUser", "grantDelegateRolesToUser", "revokeDelegateRolesFromUser",
+ * "grantRolesToRole", and "revokeRolesFromRoles" (which command it is is specified in the
+ * "cmdName" argument), and parses out (into the parsedName out param) the user/role name of
+ * the user/roles being modified, the roles being granted or revoked, and the write concern to
+ * use.
+ */
+Status parseRolePossessionManipulationCommands(const BSONObj& cmdObj,
+ StringData cmdName,
+ const std::string& dbname,
+ std::string* parsedName,
+ std::vector<RoleName>* parsedRoleNames,
+ BSONObj* parsedWriteConcern);
+
+/**
+ * Takes a command object describing an invocation of the "dropUser" command and parses out
+ * the UserName of the user to be removed and the writeConcern.
+ * Also validates the input and returns a non-ok Status if there is anything wrong.
+ */
+Status parseAndValidateDropUserCommand(const BSONObj& cmdObj,
+ const std::string& dbname,
+ UserName* parsedUserName,
+ BSONObj* parsedWriteConcern);
+
+/**
+ * Takes a command object describing an invocation of the "dropAllUsersFromDatabase" command and
+ * parses out the write concern.
+ * Also validates the input and returns a non-ok Status if there is anything wrong.
+ */
+Status parseAndValidateDropAllUsersFromDatabaseCommand(const BSONObj& cmdObj,
+ const std::string& dbname,
+ BSONObj* parsedWriteConcern);
+
+struct UsersInfoArgs {
+ std::vector<UserName> userNames;
+ bool allForDB;
+ bool showPrivileges;
+ bool showCredentials;
+ UsersInfoArgs() : allForDB(false), showPrivileges(false), showCredentials(false) {}
+};
+
+/**
+ * Takes a command object describing an invocation of the "usersInfo" command and parses out
+ * all the arguments into the "parsedArgs" output param.
+ */
+Status parseUsersInfoCommand(const BSONObj& cmdObj, StringData dbname, UsersInfoArgs* parsedArgs);
+
+struct RolesInfoArgs {
+ std::vector<RoleName> roleNames;
+ bool allForDB;
+ bool showPrivileges;
+ bool showBuiltinRoles;
+ RolesInfoArgs() : allForDB(false), showPrivileges(false), showBuiltinRoles(false) {}
+};
+
+/**
+ * Takes a command object describing an invocation of the "rolesInfo" command and parses out
+ * the arguments into the "parsedArgs" output param.
+ */
+Status parseRolesInfoCommand(const BSONObj& cmdObj, StringData dbname, RolesInfoArgs* parsedArgs);
+
+struct CreateOrUpdateRoleArgs {
+ RoleName roleName;
+ bool hasRoles;
+ std::vector<RoleName> roles;
+ bool hasPrivileges;
+ PrivilegeVector privileges;
+ BSONObj writeConcern;
+ CreateOrUpdateRoleArgs() : hasRoles(false), hasPrivileges(false) {}
+};
+
+/**
+ * Takes a command object describing an invocation of the "createRole" or "updateRole" commands
+ * (which command it is is specified in "cmdName") on the database "dbname", and parses out all
+ * the arguments into the "parsedArgs" output param.
+ */
+Status parseCreateOrUpdateRoleCommands(const BSONObj& cmdObj,
+ StringData cmdName,
+ const std::string& dbname,
+ CreateOrUpdateRoleArgs* parsedArgs);
+
+/**
+ * Takes a command object describing an invocation of the "grantPrivilegesToRole" or
+ * "revokePrivilegesFromRole" commands, and parses out the role name of the
+ * role being modified, the privileges being granted or revoked, and the write concern to use.
+ */
+Status parseAndValidateRolePrivilegeManipulationCommands(const BSONObj& cmdObj,
+ StringData cmdName,
+ const std::string& dbname,
+ RoleName* parsedRoleName,
+ PrivilegeVector* parsedPrivileges,
+ BSONObj* parsedWriteConcern);
+
+/**
+ * Takes a command object describing an invocation of the "dropRole" command and parses out
+ * the RoleName of the role to be removed and the writeConcern.
+ */
+Status parseDropRoleCommand(const BSONObj& cmdObj,
+ const std::string& dbname,
+ RoleName* parsedRoleName,
+ BSONObj* parsedWriteConcern);
+
+/**
+ * Takes a command object describing an invocation of the "dropAllRolesFromDatabase" command and
+ * parses out the write concern.
+ */
+Status parseDropAllRolesFromDatabaseCommand(const BSONObj& cmdObj,
+ const std::string& dbname,
+ BSONObj* parsedWriteConcern);
+
+/**
+ * Parses the privileges described in "privileges" into a vector of Privilege objects.
+ * Returns Status::OK() upon successfully parsing all the elements of "privileges".
+ */
+Status parseAndValidatePrivilegeArray(const BSONArray& privileges,
+ PrivilegeVector* parsedPrivileges);
+
+/**
+ * Takes a BSONArray of name,db pair documents, parses that array and returns (via the
+ * output param parsedRoleNames) a list of the role names in the input array.
+ * Performs syntactic validation of "rolesArray", only.
+ */
+Status parseRoleNamesFromBSONArray(const BSONArray& rolesArray,
+ StringData dbname,
+ std::vector<RoleName>* parsedRoleNames);
+
+/**
+ * Takes a BSONArray of name,db pair documents, parses that array and returns (via the
+ * output param parsedUserNames) a list of the usernames in the input array.
+ * Performs syntactic validation of "usersArray", only.
+ */
+Status parseUserNamesFromBSONArray(const BSONArray& usersArray,
+ StringData dbname,
+ std::vector<UserName>* parsedUserNames);
+
+struct MergeAuthzCollectionsArgs {
+ std::string usersCollName;
+ std::string rolesCollName;
+ std::string db;
+ bool drop;
+ BSONObj writeConcern;
+ MergeAuthzCollectionsArgs() : drop(false) {}
+};
+
+/**
+ * Takes a command object describing an invocation of the "_mergeAuthzCollections" command and
+ * parses out the name of the temporary collections to use for user and role data, whether or
+ * not to drop the existing users/roles, the database if this is a for a db-specific restore,
+ * and the writeConcern.
+ * Returns ErrorCodes::OutdatedClient if the "db" field is missing, as that likely indicates
+ * the command was sent by an outdated (pre 2.6.4) version of mongorestore.
+ * Returns other codes indicating missing or incorrectly typed fields.
+ */
+Status parseMergeAuthzCollectionsCommand(const BSONObj& cmdObj,
+ MergeAuthzCollectionsArgs* parsedArgs);
+
+} // namespace auth
+} // namespace mongo
diff --git a/src/mongo/db/auth/user_name.cpp b/src/mongo/db/auth/user_name.cpp
index 422f415d478..c2e768edf95 100644
--- a/src/mongo/db/auth/user_name.cpp
+++ b/src/mongo/db/auth/user_name.cpp
@@ -35,20 +35,19 @@
namespace mongo {
- UserName::UserName(StringData user, StringData dbname) {
- _fullName.resize(user.size() + dbname.size() + 1);
- std::string::iterator iter = std::copy(user.rawData(),
- user.rawData() + user.size(),
- _fullName.begin());
- *iter = '@';
- ++iter;
- iter = std::copy(dbname.rawData(), dbname.rawData() + dbname.size(), iter);
- dassert(iter == _fullName.end());
- _splitPoint = user.size();
- }
+UserName::UserName(StringData user, StringData dbname) {
+ _fullName.resize(user.size() + dbname.size() + 1);
+ std::string::iterator iter =
+ std::copy(user.rawData(), user.rawData() + user.size(), _fullName.begin());
+ *iter = '@';
+ ++iter;
+ iter = std::copy(dbname.rawData(), dbname.rawData() + dbname.size(), iter);
+ dassert(iter == _fullName.end());
+ _splitPoint = user.size();
+}
- std::ostream& operator<<(std::ostream& os, const UserName& name) {
- return os << name.getFullName();
- }
+std::ostream& operator<<(std::ostream& os, const UserName& name) {
+ return os << name.getFullName();
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/user_name.h b/src/mongo/db/auth/user_name.h
index 55220f33e10..03f39fa42d6 100644
--- a/src/mongo/db/auth/user_name.h
+++ b/src/mongo/db/auth/user_name.h
@@ -37,126 +37,153 @@
namespace mongo {
+/**
+ * Representation of a name of a principal (authenticatable user) in a MongoDB system.
+ *
+ * Consists of a "user name" part, and a "database name" part.
+ */
+class UserName {
+public:
+ UserName() : _splitPoint(0) {}
+ UserName(StringData user, StringData dbname);
+
/**
- * Representation of a name of a principal (authenticatable user) in a MongoDB system.
- *
- * Consists of a "user name" part, and a "database name" part.
+ * Gets the user part of a UserName.
*/
- class UserName {
- public:
- UserName() : _splitPoint(0) {}
- UserName(StringData user, StringData dbname);
-
- /**
- * Gets the user part of a UserName.
- */
- StringData getUser() const { return StringData(_fullName).substr(0, _splitPoint); }
-
- /**
- * Gets the database name part of a UserName.
- */
- StringData getDB() const { return StringData(_fullName).substr(_splitPoint + 1); }
-
- /**
- * Gets the full unique name of a user as a string, formatted as "user@db".
- */
- const std::string& getFullName() const { return _fullName; }
-
- /**
- * Stringifies the object, for logging/debugging.
- */
- std::string toString() const { return getFullName(); }
-
- private:
- std::string _fullName; // The full name, stored as a string. "user@db".
- size_t _splitPoint; // The index of the "@" separating the user and db name parts.
- };
-
- static inline bool operator==(const UserName& lhs, const UserName& rhs) {
- return lhs.getFullName() == rhs.getFullName();
+ StringData getUser() const {
+ return StringData(_fullName).substr(0, _splitPoint);
}
- static inline bool operator!=(const UserName& lhs, const UserName& rhs) {
- return lhs.getFullName() != rhs.getFullName();
+ /**
+ * Gets the database name part of a UserName.
+ */
+ StringData getDB() const {
+ return StringData(_fullName).substr(_splitPoint + 1);
}
- static inline bool operator<(const UserName& lhs, const UserName& rhs) {
- return lhs.getFullName() < rhs.getFullName();
+ /**
+ * Gets the full unique name of a user as a string, formatted as "user@db".
+ */
+ const std::string& getFullName() const {
+ return _fullName;
}
- std::ostream& operator<<(std::ostream& os, const UserName& name);
-
/**
- * Iterator over an unspecified container of UserName objects.
+ * Stringifies the object, for logging/debugging.
*/
- class UserNameIterator {
- public:
- class Impl {
- MONGO_DISALLOW_COPYING(Impl);
- public:
- Impl() {};
- virtual ~Impl() {};
- static Impl* clone(Impl* orig) { return orig ? orig->doClone(): NULL; }
- virtual bool more() const = 0;
- virtual const UserName& get() const = 0;
-
- virtual const UserName& next() = 0;
-
- private:
- virtual Impl* doClone() const = 0;
- };
-
- UserNameIterator() : _impl(nullptr) {}
- UserNameIterator(const UserNameIterator& other) : _impl(Impl::clone(other._impl.get())) {}
- explicit UserNameIterator(Impl* impl) : _impl(impl) {}
-
- UserNameIterator& operator=(const UserNameIterator& other) {
- _impl.reset(Impl::clone(other._impl.get()));
- return *this;
- }
+ std::string toString() const {
+ return getFullName();
+ }
- bool more() const { return _impl.get() && _impl->more(); }
- const UserName& get() const { return _impl->get(); }
+private:
+ std::string _fullName; // The full name, stored as a string. "user@db".
+ size_t _splitPoint; // The index of the "@" separating the user and db name parts.
+};
- const UserName& next() { return _impl->next(); }
+static inline bool operator==(const UserName& lhs, const UserName& rhs) {
+ return lhs.getFullName() == rhs.getFullName();
+}
- const UserName& operator*() const { return get(); }
- const UserName* operator->() const { return &get(); }
+static inline bool operator!=(const UserName& lhs, const UserName& rhs) {
+ return lhs.getFullName() != rhs.getFullName();
+}
- private:
- std::unique_ptr<Impl> _impl;
- };
+static inline bool operator<(const UserName& lhs, const UserName& rhs) {
+ return lhs.getFullName() < rhs.getFullName();
+}
+
+std::ostream& operator<<(std::ostream& os, const UserName& name);
+/**
+ * Iterator over an unspecified container of UserName objects.
+ */
+class UserNameIterator {
+public:
+ class Impl {
+ MONGO_DISALLOW_COPYING(Impl);
- template <typename ContainerIterator>
- class UserNameContainerIteratorImpl : public UserNameIterator::Impl {
- MONGO_DISALLOW_COPYING(UserNameContainerIteratorImpl);
public:
- UserNameContainerIteratorImpl(const ContainerIterator& begin,
- const ContainerIterator& end) :
- _curr(begin), _end(end) {}
- virtual ~UserNameContainerIteratorImpl() {}
- virtual bool more() const { return _curr != _end; }
- virtual const UserName& next() { return *(_curr++); }
- virtual const UserName& get() const { return *_curr; }
- virtual UserNameIterator::Impl* doClone() const {
- return new UserNameContainerIteratorImpl(_curr, _end);
+ Impl(){};
+ virtual ~Impl(){};
+ static Impl* clone(Impl* orig) {
+ return orig ? orig->doClone() : NULL;
}
+ virtual bool more() const = 0;
+ virtual const UserName& get() const = 0;
+
+ virtual const UserName& next() = 0;
private:
- ContainerIterator _curr;
- ContainerIterator _end;
+ virtual Impl* doClone() const = 0;
};
- template <typename ContainerIterator>
- UserNameIterator makeUserNameIterator(const ContainerIterator& begin,
- const ContainerIterator& end) {
- return UserNameIterator( new UserNameContainerIteratorImpl<ContainerIterator>(begin, end));
+ UserNameIterator() : _impl(nullptr) {}
+ UserNameIterator(const UserNameIterator& other) : _impl(Impl::clone(other._impl.get())) {}
+ explicit UserNameIterator(Impl* impl) : _impl(impl) {}
+
+ UserNameIterator& operator=(const UserNameIterator& other) {
+ _impl.reset(Impl::clone(other._impl.get()));
+ return *this;
+ }
+
+ bool more() const {
+ return _impl.get() && _impl->more();
+ }
+ const UserName& get() const {
+ return _impl->get();
+ }
+
+ const UserName& next() {
+ return _impl->next();
}
- template <typename Container>
- UserNameIterator makeUserNameIteratorForContainer(const Container& container) {
- return makeUserNameIterator(container.begin(), container.end());
+ const UserName& operator*() const {
+ return get();
}
+ const UserName* operator->() const {
+ return &get();
+ }
+
+private:
+ std::unique_ptr<Impl> _impl;
+};
+
+
+template <typename ContainerIterator>
+class UserNameContainerIteratorImpl : public UserNameIterator::Impl {
+ MONGO_DISALLOW_COPYING(UserNameContainerIteratorImpl);
+
+public:
+ UserNameContainerIteratorImpl(const ContainerIterator& begin, const ContainerIterator& end)
+ : _curr(begin), _end(end) {}
+ virtual ~UserNameContainerIteratorImpl() {}
+ virtual bool more() const {
+ return _curr != _end;
+ }
+ virtual const UserName& next() {
+ return *(_curr++);
+ }
+ virtual const UserName& get() const {
+ return *_curr;
+ }
+ virtual UserNameIterator::Impl* doClone() const {
+ return new UserNameContainerIteratorImpl(_curr, _end);
+ }
+
+private:
+ ContainerIterator _curr;
+ ContainerIterator _end;
+};
+
+template <typename ContainerIterator>
+UserNameIterator makeUserNameIterator(const ContainerIterator& begin,
+ const ContainerIterator& end) {
+ return UserNameIterator(new UserNameContainerIteratorImpl<ContainerIterator>(begin, end));
+}
+
+template <typename Container>
+UserNameIterator makeUserNameIteratorForContainer(const Container& container) {
+ return makeUserNameIterator(container.begin(), container.end());
+}
} // namespace mongo
diff --git a/src/mongo/db/auth/user_name_hash.h b/src/mongo/db/auth/user_name_hash.h
index f60a897f590..6f38d9ccfda 100644
--- a/src/mongo/db/auth/user_name_hash.h
+++ b/src/mongo/db/auth/user_name_hash.h
@@ -35,9 +35,10 @@
// Define hash function for UserNames so they can be keys in std::unordered_map
MONGO_HASH_NAMESPACE_START
- template <> struct hash<mongo::UserName> {
- size_t operator()(const mongo::UserName& pname) const {
- return hash<std::string>()(pname.getFullName());
- }
- };
+template <>
+struct hash<mongo::UserName> {
+ size_t operator()(const mongo::UserName& pname) const {
+ return hash<std::string>()(pname.getFullName());
+ }
+};
MONGO_HASH_NAMESPACE_END
diff --git a/src/mongo/db/auth/user_set.cpp b/src/mongo/db/auth/user_set.cpp
index c81869a8ad5..2616b5cc697 100644
--- a/src/mongo/db/auth/user_set.cpp
+++ b/src/mongo/db/auth/user_set.cpp
@@ -36,94 +36,99 @@
namespace mongo {
namespace {
- class UserSetNameIteratorImpl : public UserNameIterator::Impl {
- MONGO_DISALLOW_COPYING(UserSetNameIteratorImpl);
- public:
- UserSetNameIteratorImpl(const UserSet::iterator& begin,
- const UserSet::iterator& end) :
- _curr(begin), _end(end) {}
- virtual ~UserSetNameIteratorImpl() {}
- virtual bool more() const { return _curr != _end; }
- virtual const UserName& next() { return (*(_curr++))->getName(); }
- virtual const UserName& get() const { return (*_curr)->getName(); }
- virtual UserNameIterator::Impl* doClone() const {
- return new UserSetNameIteratorImpl(_curr, _end);
- }
+class UserSetNameIteratorImpl : public UserNameIterator::Impl {
+ MONGO_DISALLOW_COPYING(UserSetNameIteratorImpl);
- private:
- UserSet::iterator _curr;
- UserSet::iterator _end;
- };
-} // namespace
+public:
+ UserSetNameIteratorImpl(const UserSet::iterator& begin, const UserSet::iterator& end)
+ : _curr(begin), _end(end) {}
+ virtual ~UserSetNameIteratorImpl() {}
+ virtual bool more() const {
+ return _curr != _end;
+ }
+ virtual const UserName& next() {
+ return (*(_curr++))->getName();
+ }
+ virtual const UserName& get() const {
+ return (*_curr)->getName();
+ }
+ virtual UserNameIterator::Impl* doClone() const {
+ return new UserSetNameIteratorImpl(_curr, _end);
+ }
- UserSet::UserSet() : _users(), _usersEnd(_users.end()) {}
- UserSet::~UserSet() {}
+private:
+ UserSet::iterator _curr;
+ UserSet::iterator _end;
+};
+} // namespace
- User* UserSet::add(User* user) {
- for (mutable_iterator it = mbegin(); it != mend(); ++it) {
- User* current = *it;
- if (current->getName().getDB() == user->getName().getDB()) {
- // There can be only one user per database.
- *it = user;
- return current;
- }
- }
- if (_usersEnd == _users.end()) {
- _users.push_back(user);
- _usersEnd = _users.end();
- }
- else {
- *_usersEnd = user;
- ++_usersEnd;
+UserSet::UserSet() : _users(), _usersEnd(_users.end()) {}
+UserSet::~UserSet() {}
+
+User* UserSet::add(User* user) {
+ for (mutable_iterator it = mbegin(); it != mend(); ++it) {
+ User* current = *it;
+ if (current->getName().getDB() == user->getName().getDB()) {
+ // There can be only one user per database.
+ *it = user;
+ return current;
}
- return NULL;
}
+ if (_usersEnd == _users.end()) {
+ _users.push_back(user);
+ _usersEnd = _users.end();
+ } else {
+ *_usersEnd = user;
+ ++_usersEnd;
+ }
+ return NULL;
+}
- User* UserSet::removeByDBName(StringData dbname) {
- for (iterator it = begin(); it != end(); ++it) {
- User* current = *it;
- if (current->getName().getDB() == dbname) {
- return removeAt(it);
- }
+User* UserSet::removeByDBName(StringData dbname) {
+ for (iterator it = begin(); it != end(); ++it) {
+ User* current = *it;
+ if (current->getName().getDB() == dbname) {
+ return removeAt(it);
}
- return NULL;
}
+ return NULL;
+}
- User* UserSet::replaceAt(iterator it, User* replacement) {
- size_t offset = it - begin();
- User* old = _users[offset];
- _users[offset] = replacement;
- return old;
- }
+User* UserSet::replaceAt(iterator it, User* replacement) {
+ size_t offset = it - begin();
+ User* old = _users[offset];
+ _users[offset] = replacement;
+ return old;
+}
- User* UserSet::removeAt(iterator it) {
- size_t offset = it - begin();
- User* old = _users[offset];
- --_usersEnd;
- _users[offset] = *_usersEnd;
- *_usersEnd = NULL;
- return old;
- }
+User* UserSet::removeAt(iterator it) {
+ size_t offset = it - begin();
+ User* old = _users[offset];
+ --_usersEnd;
+ _users[offset] = *_usersEnd;
+ *_usersEnd = NULL;
+ return old;
+}
- User* UserSet::lookup(const UserName& name) const {
- User* user = lookupByDBName(name.getDB());
- if (user && user->getName() == name) {
- return user;
- }
- return NULL;
+User* UserSet::lookup(const UserName& name) const {
+ User* user = lookupByDBName(name.getDB());
+ if (user && user->getName() == name) {
+ return user;
}
+ return NULL;
+}
- User* UserSet::lookupByDBName(StringData dbname) const {
- for (iterator it = begin(); it != end(); ++it) {
- User* current = *it;
- if (current->getName().getDB() == dbname) {
- return current;
- }
+User* UserSet::lookupByDBName(StringData dbname) const {
+ for (iterator it = begin(); it != end(); ++it) {
+ User* current = *it;
+ if (current->getName().getDB() == dbname) {
+ return current;
}
- return NULL;
}
+ return NULL;
+}
- UserNameIterator UserSet::getNames() const {
- return UserNameIterator(new UserSetNameIteratorImpl(begin(), end()));
- }
-} // namespace mongo
+UserNameIterator UserSet::getNames() const {
+ return UserNameIterator(new UserSetNameIteratorImpl(begin(), end()));
+}
+} // namespace mongo
diff --git a/src/mongo/db/auth/user_set.h b/src/mongo/db/auth/user_set.h
index 8d2bc630d2e..9ad0ab03202 100644
--- a/src/mongo/db/auth/user_set.h
+++ b/src/mongo/db/auth/user_set.h
@@ -39,81 +39,90 @@
namespace mongo {
+/**
+ * A collection of authenticated users.
+ * This class does not do any locking/synchronization, the consumer will be responsible for
+ * synchronizing access.
+ */
+class UserSet {
+ MONGO_DISALLOW_COPYING(UserSet);
+
+public:
+ typedef std::vector<User*>::const_iterator iterator;
+
+ UserSet();
+ ~UserSet();
+
+ /**
+ * Adds a User to the UserSet.
+ *
+ * The UserSet does not take ownership of the User.
+ *
+ * As there can only be one user per database in the UserSet, if a User already exists for
+ * the new User's database, the old user will be removed from the set and returned. It is
+ * the caller's responsibility to then release that user. If no user already exists for the
+ * new user's database, returns NULL.
+ *
+ * Invalidates any outstanding iterators or NameIterators.
+ */
+ User* add(User* user);
+
+ /**
+ * Replaces the user at "it" with "replacement." Does not take ownership of the User.
+ * Returns a pointer to the old user referenced by "it". Does _not_ invalidate "iterator"
+ * instances.
+ */
+ User* replaceAt(iterator it, User* replacement);
+
+ /**
+ * Removes the user at "it", and returns a pointer to it. After this call, "it" remains
+ * valid. It will either equal "end()", or refer to some user between the values of "it"
+ * and "end()" before this call was made.
+ */
+ User* removeAt(iterator it);
+
/**
- * A collection of authenticated users.
- * This class does not do any locking/synchronization, the consumer will be responsible for
- * synchronizing access.
+ * Removes the User whose authentication credentials came from dbname, and returns that
+ * user. It is the caller's responsibility to then release that user back to the
+ * authorizationManger. If no user exists for the given database, returns NULL;
*/
- class UserSet {
- MONGO_DISALLOW_COPYING(UserSet);
- public:
- typedef std::vector<User*>::const_iterator iterator;
-
- UserSet();
- ~UserSet();
-
- /**
- * Adds a User to the UserSet.
- *
- * The UserSet does not take ownership of the User.
- *
- * As there can only be one user per database in the UserSet, if a User already exists for
- * the new User's database, the old user will be removed from the set and returned. It is
- * the caller's responsibility to then release that user. If no user already exists for the
- * new user's database, returns NULL.
- *
- * Invalidates any outstanding iterators or NameIterators.
- */
- User* add(User* user);
-
- /**
- * Replaces the user at "it" with "replacement." Does not take ownership of the User.
- * Returns a pointer to the old user referenced by "it". Does _not_ invalidate "iterator"
- * instances.
- */
- User* replaceAt(iterator it, User* replacement);
-
- /**
- * Removes the user at "it", and returns a pointer to it. After this call, "it" remains
- * valid. It will either equal "end()", or refer to some user between the values of "it"
- * and "end()" before this call was made.
- */
- User* removeAt(iterator it);
-
- /**
- * Removes the User whose authentication credentials came from dbname, and returns that
- * user. It is the caller's responsibility to then release that user back to the
- * authorizationManger. If no user exists for the given database, returns NULL;
- */
- User* removeByDBName(StringData dbname);
-
- // Returns the User with the given name, or NULL if not found.
- // Ownership of the returned User remains with the UserSet. The pointer
- // returned is only guaranteed to remain valid until the next non-const method is called
- // on the UserSet.
- User* lookup(const UserName& name) const;
-
- // Gets the user whose authentication credentials came from dbname, or NULL if none
- // exist. There should be at most one such user.
- User* lookupByDBName(StringData dbname) const;
-
- // Gets an iterator over the names of the users stored in the set. The iterator is
- // valid until the next non-const method is called on the UserSet.
- UserNameIterator getNames() const;
-
- iterator begin() const { return _users.begin(); }
- iterator end() const { return _usersEnd; }
-
- private:
- typedef std::vector<User*>::iterator mutable_iterator;
-
- mutable_iterator mbegin() { return _users.begin(); }
- mutable_iterator mend() { return _usersEnd; }
-
- // The UserSet maintains ownership of the Users in it, and is responsible for
- // returning them to the AuthorizationManager when done with them.
- std::vector<User*> _users;
- std::vector<User*>::iterator _usersEnd;
- };
-
-} // namespace mongo
+ User* removeByDBName(StringData dbname);
+
+ // Returns the User with the given name, or NULL if not found.
+ // Ownership of the returned User remains with the UserSet. The pointer
+ // returned is only guaranteed to remain valid until the next non-const method is called
+ // on the UserSet.
+ User* lookup(const UserName& name) const;
+
+ // Gets the user whose authentication credentials came from dbname, or NULL if none
+ // exist. There should be at most one such user.
+ User* lookupByDBName(StringData dbname) const;
+
+ // Gets an iterator over the names of the users stored in the set. The iterator is
+ // valid until the next non-const method is called on the UserSet.
+ UserNameIterator getNames() const;
+
+ iterator begin() const {
+ return _users.begin();
+ }
+ iterator end() const {
+ return _usersEnd;
+ }
+
+private:
+ typedef std::vector<User*>::iterator mutable_iterator;
+
+ mutable_iterator mbegin() {
+ return _users.begin();
+ }
+ mutable_iterator mend() {
+ return _usersEnd;
+ }
+
+ // The UserSet maintains ownership of the Users in it, and is responsible for
+ // returning them to the AuthorizationManager when done with them.
+ std::vector<User*> _users;
+ std::vector<User*>::iterator _usersEnd;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/auth/user_set_test.cpp b/src/mongo/db/auth/user_set_test.cpp
index c1f036ae289..d8c4290d7c8 100644
--- a/src/mongo/db/auth/user_set_test.cpp
+++ b/src/mongo/db/auth/user_set_test.cpp
@@ -39,78 +39,78 @@
namespace mongo {
namespace {
- TEST(UserSetTest, BasicTest) {
- UserSet set;
-
- User* p1 = new User(UserName("Bob", "test"));
- User* p2 = new User(UserName("George", "test"));
- User* p3 = new User(UserName("Bob", "test2"));
-
- const std::unique_ptr<User> delp1(p1);
- const std::unique_ptr<User> delp2(p2);
- const std::unique_ptr<User> delp3(p3);
-
- ASSERT_NULL(set.lookup(UserName("Bob", "test")));
- ASSERT_NULL(set.lookup(UserName("George", "test")));
- ASSERT_NULL(set.lookup(UserName("Bob", "test2")));
- ASSERT_NULL(set.lookupByDBName("test"));
- ASSERT_NULL(set.lookupByDBName("test2"));
-
- ASSERT_NULL(set.add(p1));
-
- ASSERT_EQUALS(p1, set.lookup(UserName("Bob", "test")));
- ASSERT_EQUALS(p1, set.lookupByDBName("test"));
- ASSERT_NULL(set.lookup(UserName("George", "test")));
- ASSERT_NULL(set.lookup(UserName("Bob", "test2")));
- ASSERT_NULL(set.lookupByDBName("test2"));
-
- // This should not replace the existing user "Bob" because they are different databases
- ASSERT_NULL(set.add(p3));
-
- ASSERT_EQUALS(p1, set.lookup(UserName("Bob", "test")));
- ASSERT_EQUALS(p1, set.lookupByDBName("test"));
- ASSERT_NULL(set.lookup(UserName("George", "test")));
- ASSERT_EQUALS(p3, set.lookup(UserName("Bob", "test2")));
- ASSERT_EQUALS(p3, set.lookupByDBName("test2"));
-
- User* replaced = set.add(p2); // This should replace Bob since they're on the same database
-
- ASSERT_EQUALS(replaced, p1);
- ASSERT_NULL(set.lookup(UserName("Bob", "test")));
- ASSERT_EQUALS(p2, set.lookup(UserName("George", "test")));
- ASSERT_EQUALS(p2, set.lookupByDBName("test"));
- ASSERT_EQUALS(p3, set.lookup(UserName("Bob", "test2")));
- ASSERT_EQUALS(p3, set.lookupByDBName("test2"));
-
- User* removed = set.removeByDBName("test");
-
- ASSERT_EQUALS(removed, p2);
- ASSERT_NULL(set.lookup(UserName("Bob", "test")));
- ASSERT_NULL(set.lookup(UserName("George", "test")));
- ASSERT_NULL(set.lookupByDBName("test"));
- ASSERT_EQUALS(p3, set.lookup(UserName("Bob", "test2")));
- ASSERT_EQUALS(p3, set.lookupByDBName("test2"));
-
- UserNameIterator iter = set.getNames();
- ASSERT_TRUE(iter.more());
- ASSERT_EQUALS(iter.next(), UserName("Bob", "test2"));
- ASSERT_FALSE(iter.more());
- }
-
- TEST(UserSetTest, IterateNames) {
- UserSet pset;
- UserNameIterator iter = pset.getNames();
- ASSERT(!iter.more());
-
- std::unique_ptr<User> user(new User(UserName("bob", "test")));
- ASSERT_NULL(pset.add(user.get()));
-
- iter = pset.getNames();
- ASSERT(iter.more());
- ASSERT_EQUALS(*iter, UserName("bob", "test"));
- ASSERT_EQUALS(iter.next(), UserName("bob", "test"));
- ASSERT(!iter.more());
- }
+TEST(UserSetTest, BasicTest) {
+ UserSet set;
+
+ User* p1 = new User(UserName("Bob", "test"));
+ User* p2 = new User(UserName("George", "test"));
+ User* p3 = new User(UserName("Bob", "test2"));
+
+ const std::unique_ptr<User> delp1(p1);
+ const std::unique_ptr<User> delp2(p2);
+ const std::unique_ptr<User> delp3(p3);
+
+ ASSERT_NULL(set.lookup(UserName("Bob", "test")));
+ ASSERT_NULL(set.lookup(UserName("George", "test")));
+ ASSERT_NULL(set.lookup(UserName("Bob", "test2")));
+ ASSERT_NULL(set.lookupByDBName("test"));
+ ASSERT_NULL(set.lookupByDBName("test2"));
+
+ ASSERT_NULL(set.add(p1));
+
+ ASSERT_EQUALS(p1, set.lookup(UserName("Bob", "test")));
+ ASSERT_EQUALS(p1, set.lookupByDBName("test"));
+ ASSERT_NULL(set.lookup(UserName("George", "test")));
+ ASSERT_NULL(set.lookup(UserName("Bob", "test2")));
+ ASSERT_NULL(set.lookupByDBName("test2"));
+
+ // This should not replace the existing user "Bob" because they are different databases
+ ASSERT_NULL(set.add(p3));
+
+ ASSERT_EQUALS(p1, set.lookup(UserName("Bob", "test")));
+ ASSERT_EQUALS(p1, set.lookupByDBName("test"));
+ ASSERT_NULL(set.lookup(UserName("George", "test")));
+ ASSERT_EQUALS(p3, set.lookup(UserName("Bob", "test2")));
+ ASSERT_EQUALS(p3, set.lookupByDBName("test2"));
+
+ User* replaced = set.add(p2); // This should replace Bob since they're on the same database
+
+ ASSERT_EQUALS(replaced, p1);
+ ASSERT_NULL(set.lookup(UserName("Bob", "test")));
+ ASSERT_EQUALS(p2, set.lookup(UserName("George", "test")));
+ ASSERT_EQUALS(p2, set.lookupByDBName("test"));
+ ASSERT_EQUALS(p3, set.lookup(UserName("Bob", "test2")));
+ ASSERT_EQUALS(p3, set.lookupByDBName("test2"));
+
+ User* removed = set.removeByDBName("test");
+
+ ASSERT_EQUALS(removed, p2);
+ ASSERT_NULL(set.lookup(UserName("Bob", "test")));
+ ASSERT_NULL(set.lookup(UserName("George", "test")));
+ ASSERT_NULL(set.lookupByDBName("test"));
+ ASSERT_EQUALS(p3, set.lookup(UserName("Bob", "test2")));
+ ASSERT_EQUALS(p3, set.lookupByDBName("test2"));
+
+ UserNameIterator iter = set.getNames();
+ ASSERT_TRUE(iter.more());
+ ASSERT_EQUALS(iter.next(), UserName("Bob", "test2"));
+ ASSERT_FALSE(iter.more());
+}
+
+TEST(UserSetTest, IterateNames) {
+ UserSet pset;
+ UserNameIterator iter = pset.getNames();
+ ASSERT(!iter.more());
+
+ std::unique_ptr<User> user(new User(UserName("bob", "test")));
+ ASSERT_NULL(pset.add(user.get()));
+
+ iter = pset.getNames();
+ ASSERT(iter.more());
+ ASSERT_EQUALS(*iter, UserName("bob", "test"));
+ ASSERT_EQUALS(iter.next(), UserName("bob", "test"));
+ ASSERT(!iter.more());
+}
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/background.cpp b/src/mongo/db/background.cpp
index fbf46736f98..0eb45815723 100644
--- a/src/mongo/db/background.cpp
+++ b/src/mongo/db/background.cpp
@@ -45,137 +45,136 @@
namespace mongo {
- using std::shared_ptr;
+using std::shared_ptr;
namespace {
- class BgInfo {
- MONGO_DISALLOW_COPYING(BgInfo);
- public:
- BgInfo() : _opsInProgCount(0) {}
+class BgInfo {
+ MONGO_DISALLOW_COPYING(BgInfo);
- void recordBegin();
- int recordEnd();
- void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk);
+public:
+ BgInfo() : _opsInProgCount(0) {}
- int getOpsInProgCount() const { return _opsInProgCount; }
+ void recordBegin();
+ int recordEnd();
+ void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk);
- private:
- int _opsInProgCount;
- stdx::condition_variable _noOpsInProg;
- };
-
- typedef StringMap<std::shared_ptr<BgInfo> > BgInfoMap;
- typedef BgInfoMap::const_iterator BgInfoMapIterator;
-
- stdx::mutex m;
- BgInfoMap dbsInProg;
- BgInfoMap nsInProg;
-
- void BgInfo::recordBegin() {
- ++_opsInProgCount;
- }
-
- int BgInfo::recordEnd() {
- dassert(_opsInProgCount > 0);
- --_opsInProgCount;
- if (0 == _opsInProgCount) {
- _noOpsInProg.notify_all();
- }
+ int getOpsInProgCount() const {
return _opsInProgCount;
}
- void BgInfo::awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk) {
- while (_opsInProgCount > 0)
- _noOpsInProg.wait(lk);
- }
-
- void recordBeginAndInsert(BgInfoMap* bgiMap, StringData key) {
- std::shared_ptr<BgInfo>& bgInfo = bgiMap->get(key);
- if (!bgInfo)
- bgInfo.reset(new BgInfo);
- bgInfo->recordBegin();
- }
-
- void recordEndAndRemove(BgInfoMap* bgiMap, StringData key) {
- BgInfoMapIterator iter = bgiMap->find(key);
- fassert(17431, iter != bgiMap->end());
- if (0 == iter->second->recordEnd()) {
- bgiMap->erase(iter);
- }
- }
-
- void awaitNoBgOps(
- stdx::unique_lock<stdx::mutex>& lk,
- BgInfoMap* bgiMap,
- StringData key) {
-
- std::shared_ptr<BgInfo> bgInfo = mapFindWithDefault(
- *bgiMap, key, std::shared_ptr<BgInfo>());
- if (!bgInfo)
- return;
- bgInfo->awaitNoBgOps(lk);
- }
-
-} // namespace
- bool BackgroundOperation::inProgForDb(StringData db) {
- stdx::lock_guard<stdx::mutex> lk(m);
- return dbsInProg.find(db) != dbsInProg.end();
- }
+private:
+ int _opsInProgCount;
+ stdx::condition_variable _noOpsInProg;
+};
- bool BackgroundOperation::inProgForNs(StringData ns) {
- stdx::lock_guard<stdx::mutex> lk(m);
- return nsInProg.find(ns) != nsInProg.end();
- }
+typedef StringMap<std::shared_ptr<BgInfo>> BgInfoMap;
+typedef BgInfoMap::const_iterator BgInfoMapIterator;
- void BackgroundOperation::assertNoBgOpInProgForDb(StringData db) {
- uassert(ErrorCodes::BackgroundOperationInProgressForDatabase, mongoutils::str::stream() <<
- "cannot perform operation: a background operation is currently running for "
- "database " << db,
- !inProgForDb(db));
- }
+stdx::mutex m;
+BgInfoMap dbsInProg;
+BgInfoMap nsInProg;
- void BackgroundOperation::assertNoBgOpInProgForNs(StringData ns) {
- uassert(ErrorCodes::BackgroundOperationInProgressForNamespace, mongoutils::str::stream() <<
- "cannot perform operation: a background operation is currently running for "
- "collection " << ns,
- !inProgForNs(ns));
- }
+void BgInfo::recordBegin() {
+ ++_opsInProgCount;
+}
- void BackgroundOperation::awaitNoBgOpInProgForDb(StringData db) {
- stdx::unique_lock<stdx::mutex> lk(m);
- awaitNoBgOps(lk, &dbsInProg, db);
+int BgInfo::recordEnd() {
+ dassert(_opsInProgCount > 0);
+ --_opsInProgCount;
+ if (0 == _opsInProgCount) {
+ _noOpsInProg.notify_all();
}
-
- void BackgroundOperation::awaitNoBgOpInProgForNs(StringData ns) {
- stdx::unique_lock<stdx::mutex> lk(m);
- awaitNoBgOps(lk, &nsInProg, ns);
+ return _opsInProgCount;
+}
+
+void BgInfo::awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk) {
+ while (_opsInProgCount > 0)
+ _noOpsInProg.wait(lk);
+}
+
+void recordBeginAndInsert(BgInfoMap* bgiMap, StringData key) {
+ std::shared_ptr<BgInfo>& bgInfo = bgiMap->get(key);
+ if (!bgInfo)
+ bgInfo.reset(new BgInfo);
+ bgInfo->recordBegin();
+}
+
+void recordEndAndRemove(BgInfoMap* bgiMap, StringData key) {
+ BgInfoMapIterator iter = bgiMap->find(key);
+ fassert(17431, iter != bgiMap->end());
+ if (0 == iter->second->recordEnd()) {
+ bgiMap->erase(iter);
}
+}
- BackgroundOperation::BackgroundOperation(StringData ns) : _ns(ns) {
- stdx::lock_guard<stdx::mutex> lk(m);
- recordBeginAndInsert(&dbsInProg, _ns.db());
- recordBeginAndInsert(&nsInProg, _ns.ns());
- }
+void awaitNoBgOps(stdx::unique_lock<stdx::mutex>& lk, BgInfoMap* bgiMap, StringData key) {
+ std::shared_ptr<BgInfo> bgInfo = mapFindWithDefault(*bgiMap, key, std::shared_ptr<BgInfo>());
+ if (!bgInfo)
+ return;
+ bgInfo->awaitNoBgOps(lk);
+}
- BackgroundOperation::~BackgroundOperation() {
- stdx::lock_guard<stdx::mutex> lk(m);
- recordEndAndRemove(&dbsInProg, _ns.db());
- recordEndAndRemove(&nsInProg, _ns.ns());
+} // namespace
+bool BackgroundOperation::inProgForDb(StringData db) {
+ stdx::lock_guard<stdx::mutex> lk(m);
+ return dbsInProg.find(db) != dbsInProg.end();
+}
+
+bool BackgroundOperation::inProgForNs(StringData ns) {
+ stdx::lock_guard<stdx::mutex> lk(m);
+ return nsInProg.find(ns) != nsInProg.end();
+}
+
+void BackgroundOperation::assertNoBgOpInProgForDb(StringData db) {
+ uassert(ErrorCodes::BackgroundOperationInProgressForDatabase,
+ mongoutils::str::stream()
+ << "cannot perform operation: a background operation is currently running for "
+ "database " << db,
+ !inProgForDb(db));
+}
+
+void BackgroundOperation::assertNoBgOpInProgForNs(StringData ns) {
+ uassert(ErrorCodes::BackgroundOperationInProgressForNamespace,
+ mongoutils::str::stream()
+ << "cannot perform operation: a background operation is currently running for "
+ "collection " << ns,
+ !inProgForNs(ns));
+}
+
+void BackgroundOperation::awaitNoBgOpInProgForDb(StringData db) {
+ stdx::unique_lock<stdx::mutex> lk(m);
+ awaitNoBgOps(lk, &dbsInProg, db);
+}
+
+void BackgroundOperation::awaitNoBgOpInProgForNs(StringData ns) {
+ stdx::unique_lock<stdx::mutex> lk(m);
+ awaitNoBgOps(lk, &nsInProg, ns);
+}
+
+BackgroundOperation::BackgroundOperation(StringData ns) : _ns(ns) {
+ stdx::lock_guard<stdx::mutex> lk(m);
+ recordBeginAndInsert(&dbsInProg, _ns.db());
+ recordBeginAndInsert(&nsInProg, _ns.ns());
+}
+
+BackgroundOperation::~BackgroundOperation() {
+ stdx::lock_guard<stdx::mutex> lk(m);
+ recordEndAndRemove(&dbsInProg, _ns.db());
+ recordEndAndRemove(&nsInProg, _ns.ns());
+}
+
+void BackgroundOperation::dump(std::ostream& ss) {
+ stdx::lock_guard<stdx::mutex> lk(m);
+ if (nsInProg.size()) {
+ ss << "\n<b>Background Jobs in Progress</b>\n";
+ for (BgInfoMapIterator i = nsInProg.begin(); i != nsInProg.end(); ++i)
+ ss << " " << i->first << '\n';
}
-
- void BackgroundOperation::dump(std::ostream& ss) {
- stdx::lock_guard<stdx::mutex> lk(m);
- if( nsInProg.size() ) {
- ss << "\n<b>Background Jobs in Progress</b>\n";
- for( BgInfoMapIterator i = nsInProg.begin(); i != nsInProg.end(); ++i )
- ss << " " << i->first << '\n';
- }
- for( BgInfoMapIterator i = dbsInProg.begin(); i != dbsInProg.end(); ++i ) {
- if( i->second->getOpsInProgCount() )
- ss << "database " << i->first << ": " << i->second->getOpsInProgCount() << '\n';
- }
+ for (BgInfoMapIterator i = dbsInProg.begin(); i != dbsInProg.end(); ++i) {
+ if (i->second->getOpsInProgCount())
+ ss << "database " << i->first << ": " << i->second->getOpsInProgCount() << '\n';
}
+}
-} // namespace mongo
-
+} // namespace mongo
diff --git a/src/mongo/db/background.h b/src/mongo/db/background.h
index 7bfa113bd9e..f089e9f34fc 100644
--- a/src/mongo/db/background.h
+++ b/src/mongo/db/background.h
@@ -43,34 +43,34 @@
namespace mongo {
- /* these are administrative operations / jobs
- for a namespace running in the background, and that if in progress,
- you aren't allowed to do other NamespaceDetails major manipulations
- (such as dropping ns or db) even in the foreground and must
- instead uassert.
+/* these are administrative operations / jobs
+ for a namespace running in the background, and that if in progress,
+ you aren't allowed to do other NamespaceDetails major manipulations
+ (such as dropping ns or db) even in the foreground and must
+ instead uassert.
- It's assumed this is not for super-high RPS things, so we don't do
- anything special in the implementation here to be fast.
- */
- class BackgroundOperation {
- MONGO_DISALLOW_COPYING(BackgroundOperation);
- public:
- static bool inProgForDb(StringData db);
- static bool inProgForNs(StringData ns);
- static void assertNoBgOpInProgForDb(StringData db);
- static void assertNoBgOpInProgForNs(StringData ns);
- static void awaitNoBgOpInProgForDb(StringData db);
- static void awaitNoBgOpInProgForNs(StringData ns);
- static void dump(std::ostream&);
+ It's assumed this is not for super-high RPS things, so we don't do
+ anything special in the implementation here to be fast.
+*/
+class BackgroundOperation {
+ MONGO_DISALLOW_COPYING(BackgroundOperation);
- /* check for in progress before instantiating */
- BackgroundOperation(StringData ns);
+public:
+ static bool inProgForDb(StringData db);
+ static bool inProgForNs(StringData ns);
+ static void assertNoBgOpInProgForDb(StringData db);
+ static void assertNoBgOpInProgForNs(StringData ns);
+ static void awaitNoBgOpInProgForDb(StringData db);
+ static void awaitNoBgOpInProgForNs(StringData ns);
+ static void dump(std::ostream&);
- virtual ~BackgroundOperation();
+ /* check for in progress before instantiating */
+ BackgroundOperation(StringData ns);
- private:
- NamespaceString _ns;
- };
+ virtual ~BackgroundOperation();
-} // namespace mongo
+private:
+ NamespaceString _ns;
+};
+} // namespace mongo
diff --git a/src/mongo/db/catalog/apply_ops.cpp b/src/mongo/db/catalog/apply_ops.cpp
index 9292fbcbf32..623adeaf9d5 100644
--- a/src/mongo/db/catalog/apply_ops.cpp
+++ b/src/mongo/db/catalog/apply_ops.cpp
@@ -47,162 +47,161 @@
#include "mongo/util/log.h"
namespace mongo {
- Status applyOps(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& applyOpCmd,
- BSONObjBuilder* result) {
- // SERVER-4328 todo : is global ok or does this take a long time? i believe multiple
- // ns used so locking individually requires more analysis
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
-
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbName);
-
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while applying ops to database " << dbName);
- }
+Status applyOps(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& applyOpCmd,
+ BSONObjBuilder* result) {
+ // SERVER-4328 todo : is global ok or does this take a long time? i believe multiple
+ // ns used so locking individually requires more analysis
+ ScopedTransaction scopedXact(txn, MODE_X);
+ Lock::GlobalWrite globalWriteLock(txn->lockState());
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbName);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while applying ops to database " << dbName);
+ }
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- BSONObj ops = applyOpCmd.firstElement().Obj();
- // Preconditions check reads the database state, so needs to be done locked
- if (applyOpCmd["preCondition"].type() == Array) {
- BSONObjIterator i(applyOpCmd["preCondition"].Obj());
- while (i.more()) {
- BSONObj f = i.next().Obj();
-
- DBDirectClient db(txn);
- BSONObj realres = db.findOne(f["ns"].String() , f["q"].Obj());
-
- // Apply-ops would never have a $where matcher, so use the default callback,
- // which will throw an error if $where is found.
- Matcher m(f["res"].Obj());
- if (! m.matches(realres)) {
- result->append("got" , realres);
- result->append("whatFailed" , f);
- txn->setReplicatedWrites(shouldReplicateWrites);
- return Status(ErrorCodes::BadValue, "pre-condition failed");
- }
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ BSONObj ops = applyOpCmd.firstElement().Obj();
+ // Preconditions check reads the database state, so needs to be done locked
+ if (applyOpCmd["preCondition"].type() == Array) {
+ BSONObjIterator i(applyOpCmd["preCondition"].Obj());
+ while (i.more()) {
+ BSONObj f = i.next().Obj();
+
+ DBDirectClient db(txn);
+ BSONObj realres = db.findOne(f["ns"].String(), f["q"].Obj());
+
+ // Apply-ops would never have a $where matcher, so use the default callback,
+ // which will throw an error if $where is found.
+ Matcher m(f["res"].Obj());
+ if (!m.matches(realres)) {
+ result->append("got", realres);
+ result->append("whatFailed", f);
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ return Status(ErrorCodes::BadValue, "pre-condition failed");
}
}
+ }
- // apply
- int num = 0;
- int errors = 0;
-
- BSONObjIterator i(ops);
- BSONArrayBuilder ab;
- const bool alwaysUpsert = applyOpCmd.hasField("alwaysUpsert") ?
- applyOpCmd["alwaysUpsert"].trueValue() : true;
-
- while (i.more()) {
- BSONElement e = i.next();
- const BSONObj& temp = e.Obj();
-
- // Ignore 'n' operations.
- const char *opType = temp["op"].valuestrsafe();
- if (*opType == 'n') continue;
-
- const std::string ns = temp["ns"].String();
-
- // Run operations under a nested lock as a hack to prevent yielding.
- //
- // The list of operations is supposed to be applied atomically; yielding
- // would break atomicity by allowing an interruption or a shutdown to occur
- // after only some operations are applied. We are already locked globally
- // at this point, so taking a DBLock on the namespace creates a nested lock,
- // and yields are disallowed for operations that hold a nested lock.
- //
- // We do not have a wrapping WriteUnitOfWork so it is possible for a journal
- // commit to happen with a subset of ops applied.
- // TODO figure out what to do about this.
- Lock::GlobalWrite globalWriteLockDisallowTempRelease(txn->lockState());
-
- // Ensures that yielding will not happen (see the comment above).
- DEV {
- Locker::LockSnapshot lockSnapshot;
- invariant(!txn->lockState()->saveLockStateAndUnlock(&lockSnapshot));
- };
-
- Status status(ErrorCodes::InternalError, "");
-
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- if (*opType == 'c') {
- status = repl::applyCommand_inlock(txn, temp);
- break;
- }
- else {
- OldClientContext ctx(txn, ns);
-
- status = repl::applyOperation_inlock(txn, ctx.db(), temp, alwaysUpsert);
- break;
- }
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "applyOps", ns);
-
- ab.append(status.isOK());
- if (!status.isOK()) {
- errors++;
+ // apply
+ int num = 0;
+ int errors = 0;
+
+ BSONObjIterator i(ops);
+ BSONArrayBuilder ab;
+ const bool alwaysUpsert =
+ applyOpCmd.hasField("alwaysUpsert") ? applyOpCmd["alwaysUpsert"].trueValue() : true;
+
+ while (i.more()) {
+ BSONElement e = i.next();
+ const BSONObj& temp = e.Obj();
+
+ // Ignore 'n' operations.
+ const char* opType = temp["op"].valuestrsafe();
+ if (*opType == 'n')
+ continue;
+
+ const std::string ns = temp["ns"].String();
+
+ // Run operations under a nested lock as a hack to prevent yielding.
+ //
+ // The list of operations is supposed to be applied atomically; yielding
+ // would break atomicity by allowing an interruption or a shutdown to occur
+ // after only some operations are applied. We are already locked globally
+ // at this point, so taking a DBLock on the namespace creates a nested lock,
+ // and yields are disallowed for operations that hold a nested lock.
+ //
+ // We do not have a wrapping WriteUnitOfWork so it is possible for a journal
+ // commit to happen with a subset of ops applied.
+ // TODO figure out what to do about this.
+ Lock::GlobalWrite globalWriteLockDisallowTempRelease(txn->lockState());
+
+ // Ensures that yielding will not happen (see the comment above).
+ DEV {
+ Locker::LockSnapshot lockSnapshot;
+ invariant(!txn->lockState()->saveLockStateAndUnlock(&lockSnapshot));
+ };
+
+ Status status(ErrorCodes::InternalError, "");
+
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ if (*opType == 'c') {
+ status = repl::applyCommand_inlock(txn, temp);
+ break;
+ } else {
+ OldClientContext ctx(txn, ns);
+
+ status = repl::applyOperation_inlock(txn, ctx.db(), temp, alwaysUpsert);
+ break;
}
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "applyOps", ns);
- num++;
-
- WriteUnitOfWork wuow(txn);
- logOpForDbHash(txn, ns.c_str());
- wuow.commit();
+ ab.append(status.isOK());
+ if (!status.isOK()) {
+ errors++;
}
- result->append("applied" , num);
- result->append("results" , ab.arr());
- txn->setReplicatedWrites(shouldReplicateWrites);
+ num++;
- if (txn->writesAreReplicated()) {
- // We want this applied atomically on slaves
- // so we re-wrap without the pre-condition for speed
+ WriteUnitOfWork wuow(txn);
+ logOpForDbHash(txn, ns.c_str());
+ wuow.commit();
+ }
- std::string tempNS = str::stream() << dbName << ".$cmd";
+ result->append("applied", num);
+ result->append("results", ab.arr());
+ txn->setReplicatedWrites(shouldReplicateWrites);
- // TODO: possibly use mutable BSON to remove preCondition field
- // once it is available
- BSONObjBuilder cmdBuilder;
+ if (txn->writesAreReplicated()) {
+ // We want this applied atomically on slaves
+ // so we re-wrap without the pre-condition for speed
- for (auto elem : applyOpCmd) {
- auto name = elem.fieldNameStringData();
- if (name == "preCondition") continue;
- if (name == "bypassDocumentValidation") continue;
- cmdBuilder.append(elem);
- }
+ std::string tempNS = str::stream() << dbName << ".$cmd";
- const BSONObj cmdRewritten = cmdBuilder.done();
-
- // We currently always logOp the command regardless of whether the individial ops
- // succeeded and rely on any failures to also happen on secondaries. This isn't
- // perfect, but it's what the command has always done and is part of its "correct"
- // behavior.
- while (true) {
- try {
- WriteUnitOfWork wunit(txn);
- getGlobalServiceContext()->getOpObserver()->onApplyOps(txn,
- tempNS,
- cmdRewritten);
- wunit.commit();
- break;
- }
- catch (const WriteConflictException& wce) {
- LOG(2) <<
- "WriteConflictException while logging applyOps command, retrying.";
- txn->recoveryUnit()->abandonSnapshot();
- continue;
- }
- }
+ // TODO: possibly use mutable BSON to remove preCondition field
+ // once it is available
+ BSONObjBuilder cmdBuilder;
+
+ for (auto elem : applyOpCmd) {
+ auto name = elem.fieldNameStringData();
+ if (name == "preCondition")
+ continue;
+ if (name == "bypassDocumentValidation")
+ continue;
+ cmdBuilder.append(elem);
}
- if (errors != 0) {
- return Status(ErrorCodes::UnknownError, "");
+ const BSONObj cmdRewritten = cmdBuilder.done();
+
+ // We currently always logOp the command regardless of whether the individial ops
+ // succeeded and rely on any failures to also happen on secondaries. This isn't
+ // perfect, but it's what the command has always done and is part of its "correct"
+ // behavior.
+ while (true) {
+ try {
+ WriteUnitOfWork wunit(txn);
+ getGlobalServiceContext()->getOpObserver()->onApplyOps(txn, tempNS, cmdRewritten);
+ wunit.commit();
+ break;
+ } catch (const WriteConflictException& wce) {
+ LOG(2) << "WriteConflictException while logging applyOps command, retrying.";
+ txn->recoveryUnit()->abandonSnapshot();
+ continue;
+ }
}
+ }
- return Status::OK();
+ if (errors != 0) {
+ return Status(ErrorCodes::UnknownError, "");
}
-} // namespace mongo
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/apply_ops.h b/src/mongo/db/catalog/apply_ops.h
index 13639deb586..588d3bb370b 100644
--- a/src/mongo/db/catalog/apply_ops.h
+++ b/src/mongo/db/catalog/apply_ops.h
@@ -29,17 +29,17 @@
#include "mongo/base/status.h"
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class OperationContext;
+class BSONObj;
+class BSONObjBuilder;
+class OperationContext;
- /**
- * Applies ops contained in "applyOpCmd" and populates fields in "result" to be returned to the
- * user.
- */
- Status applyOps(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& applyOpCmd,
- BSONObjBuilder* result);
+/**
+ * Applies ops contained in "applyOpCmd" and populates fields in "result" to be returned to the
+ * user.
+ */
+Status applyOps(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& applyOpCmd,
+ BSONObjBuilder* result);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index 8e981bd2491..b1adb472565 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -51,118 +51,111 @@
#include "mongo/util/scopeguard.h"
namespace mongo {
- Status emptyCapped(OperationContext* txn,
- const NamespaceString& collectionName) {
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
-
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(
- collectionName);
-
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while truncating collection "
- << collectionName.ns());
- }
-
- Database* db = autoDb.getDb();
- massert(13429, "no such database", db);
+Status emptyCapped(OperationContext* txn, const NamespaceString& collectionName) {
+ ScopedTransaction scopedXact(txn, MODE_IX);
+ AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
- Collection* collection = db->getCollection(collectionName);
- massert(28584, "no such collection", collection);
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(collectionName);
- BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while truncating collection "
+ << collectionName.ns());
+ }
- WriteUnitOfWork wuow(txn);
+ Database* db = autoDb.getDb();
+ massert(13429, "no such database", db);
- Status status = collection->truncate(txn);
- if (!status.isOK()) {
- return status;
- }
+ Collection* collection = db->getCollection(collectionName);
+ massert(28584, "no such collection", collection);
- getGlobalServiceContext()->getOpObserver()->onEmptyCapped(txn, collection->ns());
+ BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
- wuow.commit();
+ WriteUnitOfWork wuow(txn);
- return Status::OK();
+ Status status = collection->truncate(txn);
+ if (!status.isOK()) {
+ return status;
}
- Status cloneCollectionAsCapped(OperationContext* txn,
- Database* db,
- const std::string& shortFrom,
- const std::string& shortTo,
- double size,
- bool temp) {
-
- std::string fromNs = db->name() + "." + shortFrom;
- std::string toNs = db->name() + "." + shortTo;
-
- Collection* fromCollection = db->getCollection(fromNs);
- if (!fromCollection)
- return Status(ErrorCodes::NamespaceNotFound,
- str::stream() << "source collection " << fromNs << " does not exist");
-
- if (db->getCollection(toNs))
- return Status(ErrorCodes::NamespaceExists, "to collection already exists");
-
- // create new collection
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- const auto fromOptions = fromCollection->getCatalogEntry()
- ->getCollectionOptions(txn)
- .toBSON();
- OldClientContext ctx(txn, toNs);
- BSONObjBuilder spec;
- spec.appendBool("capped", true);
- spec.append("size", size);
- if (temp)
- spec.appendBool("temp", true);
- spec.appendElementsUnique(fromOptions);
-
- WriteUnitOfWork wunit(txn);
- Status status = userCreateNS(txn, ctx.db(), toNs, spec.done());
- if (!status.isOK())
- return status;
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "cloneCollectionAsCapped", fromNs);
+ getGlobalServiceContext()->getOpObserver()->onEmptyCapped(txn, collection->ns());
+
+ wuow.commit();
+
+ return Status::OK();
+}
+
+Status cloneCollectionAsCapped(OperationContext* txn,
+ Database* db,
+ const std::string& shortFrom,
+ const std::string& shortTo,
+ double size,
+ bool temp) {
+ std::string fromNs = db->name() + "." + shortFrom;
+ std::string toNs = db->name() + "." + shortTo;
+
+ Collection* fromCollection = db->getCollection(fromNs);
+ if (!fromCollection)
+ return Status(ErrorCodes::NamespaceNotFound,
+ str::stream() << "source collection " << fromNs << " does not exist");
+
+ if (db->getCollection(toNs))
+ return Status(ErrorCodes::NamespaceExists, "to collection already exists");
+
+ // create new collection
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ const auto fromOptions =
+ fromCollection->getCatalogEntry()->getCollectionOptions(txn).toBSON();
+ OldClientContext ctx(txn, toNs);
+ BSONObjBuilder spec;
+ spec.appendBool("capped", true);
+ spec.append("size", size);
+ if (temp)
+ spec.appendBool("temp", true);
+ spec.appendElementsUnique(fromOptions);
+
+ WriteUnitOfWork wunit(txn);
+ Status status = userCreateNS(txn, ctx.db(), toNs, spec.done());
+ if (!status.isOK())
+ return status;
+ wunit.commit();
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "cloneCollectionAsCapped", fromNs);
- Collection* toCollection = db->getCollection(toNs);
- invariant(toCollection); // we created above
+ Collection* toCollection = db->getCollection(toNs);
+ invariant(toCollection); // we created above
- // how much data to ignore because it won't fit anyway
- // datasize and extentSize can't be compared exactly, so add some padding to 'size'
+ // how much data to ignore because it won't fit anyway
+ // datasize and extentSize can't be compared exactly, so add some padding to 'size'
- long long allocatedSpaceGuess =
- std::max(static_cast<long long>(size * 2),
- static_cast<long long>(toCollection->getRecordStore()->storageSize(txn) * 2));
+ long long allocatedSpaceGuess =
+ std::max(static_cast<long long>(size * 2),
+ static_cast<long long>(toCollection->getRecordStore()->storageSize(txn) * 2));
- long long excessSize = fromCollection->dataSize(txn) - allocatedSpaceGuess;
+ long long excessSize = fromCollection->dataSize(txn) - allocatedSpaceGuess;
- std::unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
- txn,
- fromNs,
- fromCollection,
- InternalPlanner::FORWARD));
+ std::unique_ptr<PlanExecutor> exec(
+ InternalPlanner::collectionScan(txn, fromNs, fromCollection, InternalPlanner::FORWARD));
- exec->setYieldPolicy(PlanExecutor::WRITE_CONFLICT_RETRY_ONLY);
+ exec->setYieldPolicy(PlanExecutor::WRITE_CONFLICT_RETRY_ONLY);
- Snapshotted<BSONObj> objToClone;
- RecordId loc;
- PlanExecutor::ExecState state = PlanExecutor::FAILURE; // suppress uninitialized warnings
+ Snapshotted<BSONObj> objToClone;
+ RecordId loc;
+ PlanExecutor::ExecState state = PlanExecutor::FAILURE; // suppress uninitialized warnings
- DisableDocumentValidation validationDisabler(txn);
+ DisableDocumentValidation validationDisabler(txn);
- int retries = 0; // non-zero when retrying our last document.
- while (true) {
- if (!retries) {
- state = exec->getNextSnapshotted(&objToClone, &loc);
- }
+ int retries = 0; // non-zero when retrying our last document.
+ while (true) {
+ if (!retries) {
+ state = exec->getNextSnapshotted(&objToClone, &loc);
+ }
- switch(state) {
+ switch (state) {
case PlanExecutor::IS_EOF:
return Status::OK();
- case PlanExecutor::ADVANCED:
- {
+ case PlanExecutor::ADVANCED: {
if (excessSize > 0) {
// 4x is for padding, power of 2, etc...
excessSize -= (4 * objToClone.value().objsize());
@@ -178,116 +171,103 @@ namespace mongo {
// CollectionScan PlanStage does not have a FAILURE scenario.
// 3) All other PlanExecutor states are handled above
invariant(false);
- }
-
- try {
- // Make sure we are working with the latest version of the document.
- if (objToClone.snapshotId() != txn->recoveryUnit()->getSnapshotId()
- && !fromCollection->findDoc(txn, loc, &objToClone)) {
- // doc was deleted so don't clone it.
- retries = 0;
- continue;
- }
-
- WriteUnitOfWork wunit(txn);
- toCollection->insertDocument(txn,
- objToClone.value(),
- true,
- txn->writesAreReplicated());
- wunit.commit();
+ }
- // Go to the next document
+ try {
+ // Make sure we are working with the latest version of the document.
+ if (objToClone.snapshotId() != txn->recoveryUnit()->getSnapshotId() &&
+ !fromCollection->findDoc(txn, loc, &objToClone)) {
+ // doc was deleted so don't clone it.
retries = 0;
+ continue;
}
- catch (const WriteConflictException& wce) {
- CurOp::get(txn)->debug().writeConflicts++;
- retries++; // logAndBackoff expects this to be 1 on first call.
- wce.logAndBackoff(retries, "cloneCollectionAsCapped", fromNs);
-
- // Can't use WRITE_CONFLICT_RETRY_LOOP macros since we need to save/restore exec
- // around call to abandonSnapshot.
- exec->saveState();
- txn->recoveryUnit()->abandonSnapshot();
- exec->restoreState(txn); // Handles any WCEs internally.
- }
- }
- invariant(false); // unreachable
+ WriteUnitOfWork wunit(txn);
+ toCollection->insertDocument(txn, objToClone.value(), true, txn->writesAreReplicated());
+ wunit.commit();
+
+ // Go to the next document
+ retries = 0;
+ } catch (const WriteConflictException& wce) {
+ CurOp::get(txn)->debug().writeConflicts++;
+ retries++; // logAndBackoff expects this to be 1 on first call.
+ wce.logAndBackoff(retries, "cloneCollectionAsCapped", fromNs);
+
+ // Can't use WRITE_CONFLICT_RETRY_LOOP macros since we need to save/restore exec
+ // around call to abandonSnapshot.
+ exec->saveState();
+ txn->recoveryUnit()->abandonSnapshot();
+ exec->restoreState(txn); // Handles any WCEs internally.
+ }
}
- Status convertToCapped(OperationContext* txn,
- const NamespaceString& collectionName,
- double size) {
+ invariant(false); // unreachable
+}
- StringData dbname = collectionName.db();
- StringData shortSource = collectionName.coll();
+Status convertToCapped(OperationContext* txn, const NamespaceString& collectionName, double size) {
+ StringData dbname = collectionName.db();
+ StringData shortSource = collectionName.coll();
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(collectionName);
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(collectionName);
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while converting "
- << collectionName.ns() << " to a capped collection");
- }
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while converting " << collectionName.ns()
+ << " to a capped collection");
+ }
- Database* const db = autoDb.getDb();
- if (!db) {
- return Status(ErrorCodes::DatabaseNotFound,
- str::stream() << "database " << dbname << " not found");
- }
+ Database* const db = autoDb.getDb();
+ if (!db) {
+ return Status(ErrorCodes::DatabaseNotFound,
+ str::stream() << "database " << dbname << " not found");
+ }
- BackgroundOperation::assertNoBgOpInProgForDb(dbname);
+ BackgroundOperation::assertNoBgOpInProgForDb(dbname);
- std::string shortTmpName = str::stream() << "tmp.convertToCapped." << shortSource;
- std::string longTmpName = str::stream() << dbname << "." << shortTmpName;
+ std::string shortTmpName = str::stream() << "tmp.convertToCapped." << shortSource;
+ std::string longTmpName = str::stream() << dbname << "." << shortTmpName;
- if (db->getCollection(longTmpName)) {
- WriteUnitOfWork wunit(txn);
- Status status = db->dropCollection(txn, longTmpName);
- if (!status.isOK())
- return status;
- }
+ if (db->getCollection(longTmpName)) {
+ WriteUnitOfWork wunit(txn);
+ Status status = db->dropCollection(txn, longTmpName);
+ if (!status.isOK())
+ return status;
+ }
- const bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
- Status status = cloneCollectionAsCapped(txn,
- db,
- shortSource.toString(),
- shortTmpName,
- size,
- true);
+ const bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
+ Status status =
+ cloneCollectionAsCapped(txn, db, shortSource.toString(), shortTmpName, size, true);
- if (!status.isOK()) {
- return status;
- }
+ if (!status.isOK()) {
+ return status;
+ }
- verify(db->getCollection(longTmpName));
+ verify(db->getCollection(longTmpName));
- {
- WriteUnitOfWork wunit(txn);
- status = db->dropCollection(txn, collectionName.ns());
- txn->setReplicatedWrites(shouldReplicateWrites);
- if (!status.isOK())
- return status;
+ {
+ WriteUnitOfWork wunit(txn);
+ status = db->dropCollection(txn, collectionName.ns());
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ if (!status.isOK())
+ return status;
- status = db->renameCollection(txn, longTmpName, collectionName.ns(), false);
- if (!status.isOK())
- return status;
+ status = db->renameCollection(txn, longTmpName, collectionName.ns(), false);
+ if (!status.isOK())
+ return status;
- getGlobalServiceContext()->getOpObserver()->onConvertToCapped(
- txn,
- NamespaceString(collectionName),
- size);
+ getGlobalServiceContext()->getOpObserver()->onConvertToCapped(
+ txn, NamespaceString(collectionName), size);
- wunit.commit();
- }
- return Status::OK();
+ wunit.commit();
}
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/capped_utils.h b/src/mongo/db/catalog/capped_utils.h
index 05104230fcf..f7be6dc427e 100644
--- a/src/mongo/db/catalog/capped_utils.h
+++ b/src/mongo/db/catalog/capped_utils.h
@@ -29,30 +29,27 @@
#include "mongo/base/status.h"
namespace mongo {
- class Database;
- class NamespaceString;
- class OperationContext;
+class Database;
+class NamespaceString;
+class OperationContext;
- /**
- * Drops all documents contained in the capped collection, "collectionName".
- */
- Status emptyCapped(OperationContext* txn,
- const NamespaceString& collectionName);
+/**
+ * Drops all documents contained in the capped collection, "collectionName".
+ */
+Status emptyCapped(OperationContext* txn, const NamespaceString& collectionName);
- /**
- * Clones the collection "shortFrom" to the capped collection "shortTo" with a size of "size".
- */
- Status cloneCollectionAsCapped(OperationContext* txn,
- Database* db,
- const std::string& shortFrom,
- const std::string& shortTo,
- double size,
- bool temp);
+/**
+ * Clones the collection "shortFrom" to the capped collection "shortTo" with a size of "size".
+ */
+Status cloneCollectionAsCapped(OperationContext* txn,
+ Database* db,
+ const std::string& shortFrom,
+ const std::string& shortTo,
+ double size,
+ bool temp);
- /**
- * Converts the collection "collectionName" to a capped collection with a size of "size".
- */
- Status convertToCapped(OperationContext* txn,
- const NamespaceString& collectionName,
- double size);
-} // namespace mongo
+/**
+ * Converts the collection "collectionName" to a capped collection with a size of "size".
+ */
+Status convertToCapped(OperationContext* txn, const NamespaceString& collectionName, double size);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 768ee713b0a..00a3a643a5b 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -40,148 +40,140 @@
#include "mongo/db/service_context.h"
namespace mongo {
- Status collMod(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* result) {
- StringData dbName = ns.db();
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbName, MODE_X);
- Database* const db = autoDb.getDb();
- Collection* coll = db ? db->getCollection(ns) : NULL;
-
- // If db/collection does not exist, short circuit and return.
- if (!db || !coll) {
- return Status(ErrorCodes::NamespaceNotFound, "ns does not exist");
- }
+Status collMod(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result) {
+ StringData dbName = ns.db();
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, dbName, MODE_X);
+ Database* const db = autoDb.getDb();
+ Collection* coll = db ? db->getCollection(ns) : NULL;
+
+ // If db/collection does not exist, short circuit and return.
+ if (!db || !coll) {
+ return Status(ErrorCodes::NamespaceNotFound, "ns does not exist");
+ }
- OldClientContext ctx(txn, ns);
+ OldClientContext ctx(txn, ns);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns);
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns);
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while setting collection options on "
- << ns.toString());
- }
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while setting collection options on "
+ << ns.toString());
+ }
+
+ WriteUnitOfWork wunit(txn);
- WriteUnitOfWork wunit(txn);
+ Status errorStatus = Status::OK();
- Status errorStatus = Status::OK();
+ BSONForEach(e, cmdObj) {
+ if (str::equals("collMod", e.fieldName())) {
+ // no-op
+ } else if (str::startsWith(e.fieldName(), "$")) {
+ // no-op ignore top-level fields prefixed with $. They are for the command processor
+ } else if (LiteParsedQuery::cmdOptionMaxTimeMS == e.fieldNameStringData()) {
+ // no-op
+ } else if (str::equals("index", e.fieldName())) {
+ BSONObj indexObj = e.Obj();
+ BSONObj keyPattern = indexObj.getObjectField("keyPattern");
- BSONForEach(e, cmdObj) {
- if (str::equals("collMod", e.fieldName())) {
- // no-op
+ if (keyPattern.isEmpty()) {
+ errorStatus = Status(ErrorCodes::InvalidOptions, "no keyPattern specified");
+ continue;
}
- else if (str::startsWith(e.fieldName(), "$")) {
- // no-op ignore top-level fields prefixed with $. They are for the command processor
+
+ BSONElement newExpireSecs = indexObj["expireAfterSeconds"];
+ if (newExpireSecs.eoo()) {
+ errorStatus = Status(ErrorCodes::InvalidOptions, "no expireAfterSeconds field");
+ continue;
}
- else if (LiteParsedQuery::cmdOptionMaxTimeMS == e.fieldNameStringData()) {
- // no-op
+ if (!newExpireSecs.isNumber()) {
+ errorStatus =
+ Status(ErrorCodes::InvalidOptions, "expireAfterSeconds field must be a number");
+ continue;
}
- else if (str::equals("index", e.fieldName())) {
- BSONObj indexObj = e.Obj();
- BSONObj keyPattern = indexObj.getObjectField("keyPattern");
-
- if (keyPattern.isEmpty()){
- errorStatus = Status(ErrorCodes::InvalidOptions, "no keyPattern specified");
- continue;
- }
-
- BSONElement newExpireSecs = indexObj["expireAfterSeconds"];
- if (newExpireSecs.eoo()) {
- errorStatus = Status(ErrorCodes::InvalidOptions, "no expireAfterSeconds field");
- continue;
- }
- if (! newExpireSecs.isNumber()) {
- errorStatus = Status(ErrorCodes::InvalidOptions,
- "expireAfterSeconds field must be a number");
- continue;
- }
-
- const IndexDescriptor* idx = coll->getIndexCatalog()
- ->findIndexByKeyPattern(txn, keyPattern);
- if (idx == NULL) {
- errorStatus = Status(ErrorCodes::InvalidOptions,
- str::stream() << "cannot find index " << keyPattern
- << " for ns " << ns.toString());
- continue;
- }
- BSONElement oldExpireSecs = idx->infoObj().getField("expireAfterSeconds");
- if (oldExpireSecs.eoo()){
- errorStatus = Status(ErrorCodes::InvalidOptions,
- "no expireAfterSeconds field to update");
- continue;
- }
- if (! oldExpireSecs.isNumber()) {
- errorStatus = Status(ErrorCodes::InvalidOptions,
- "existing expireAfterSeconds field is not a number");
- continue;
- }
-
- if (oldExpireSecs != newExpireSecs) {
- result->appendAs(oldExpireSecs, "expireAfterSeconds_old");
- // Change the value of "expireAfterSeconds" on disk.
- coll->getCatalogEntry()->updateTTLSetting(txn,
- idx->indexName(),
- newExpireSecs.numberLong());
- // Notify the index catalog that the definition of this index changed.
- idx = coll->getIndexCatalog()->refreshEntry(txn, idx);
- result->appendAs(newExpireSecs , "expireAfterSeconds_new");
- }
+
+ const IndexDescriptor* idx =
+ coll->getIndexCatalog()->findIndexByKeyPattern(txn, keyPattern);
+ if (idx == NULL) {
+ errorStatus = Status(ErrorCodes::InvalidOptions,
+ str::stream() << "cannot find index " << keyPattern
+ << " for ns " << ns.toString());
+ continue;
}
- else if (str::equals("validator", e.fieldName())) {
- auto status = coll->setValidator(txn, e.Obj());
- if (!status.isOK())
- errorStatus = std::move(status);
+ BSONElement oldExpireSecs = idx->infoObj().getField("expireAfterSeconds");
+ if (oldExpireSecs.eoo()) {
+ errorStatus =
+ Status(ErrorCodes::InvalidOptions, "no expireAfterSeconds field to update");
+ continue;
}
- else {
- // As of SERVER-17312 we only support these two options. When SERVER-17320 is
- // resolved this will need to be enhanced to handle other options.
- typedef CollectionOptions CO;
- const StringData name = e.fieldNameStringData();
- const int flag = (name == "usePowerOf2Sizes") ? CO::Flag_UsePowerOf2Sizes :
- (name == "noPadding") ? CO::Flag_NoPadding :
- 0;
- if (!flag) {
- errorStatus = Status(ErrorCodes::InvalidOptions,
- str::stream() << "unknown option to collMod: " << name);
- continue;
- }
-
- CollectionCatalogEntry* cce = coll->getCatalogEntry();
-
- const int oldFlags = cce->getCollectionOptions(txn).flags;
- const bool oldSetting = oldFlags & flag;
- const bool newSetting = e.trueValue();
-
- result->appendBool(name.toString() + "_old", oldSetting);
- result->appendBool(name.toString() + "_new", newSetting);
-
- const int newFlags = newSetting
- ? (oldFlags | flag) // set flag
- : (oldFlags & ~flag); // clear flag
-
- // NOTE we do this unconditionally to ensure that we note that the user has
- // explicitly set flags, even if they are just setting the default.
- cce->updateFlags(txn, newFlags);
-
- const CollectionOptions newOptions = cce->getCollectionOptions(txn);
- invariant(newOptions.flags == newFlags);
- invariant(newOptions.flagsSet);
+ if (!oldExpireSecs.isNumber()) {
+ errorStatus = Status(ErrorCodes::InvalidOptions,
+ "existing expireAfterSeconds field is not a number");
+ continue;
}
- }
- if (!errorStatus.isOK()) {
- return errorStatus;
- }
+ if (oldExpireSecs != newExpireSecs) {
+ result->appendAs(oldExpireSecs, "expireAfterSeconds_old");
+ // Change the value of "expireAfterSeconds" on disk.
+ coll->getCatalogEntry()->updateTTLSetting(
+ txn, idx->indexName(), newExpireSecs.numberLong());
+ // Notify the index catalog that the definition of this index changed.
+ idx = coll->getIndexCatalog()->refreshEntry(txn, idx);
+ result->appendAs(newExpireSecs, "expireAfterSeconds_new");
+ }
+ } else if (str::equals("validator", e.fieldName())) {
+ auto status = coll->setValidator(txn, e.Obj());
+ if (!status.isOK())
+ errorStatus = std::move(status);
+ } else {
+ // As of SERVER-17312 we only support these two options. When SERVER-17320 is
+ // resolved this will need to be enhanced to handle other options.
+ typedef CollectionOptions CO;
+ const StringData name = e.fieldNameStringData();
+ const int flag = (name == "usePowerOf2Sizes")
+ ? CO::Flag_UsePowerOf2Sizes
+ : (name == "noPadding") ? CO::Flag_NoPadding : 0;
+ if (!flag) {
+ errorStatus = Status(ErrorCodes::InvalidOptions,
+ str::stream() << "unknown option to collMod: " << name);
+ continue;
+ }
+
+ CollectionCatalogEntry* cce = coll->getCatalogEntry();
+
+ const int oldFlags = cce->getCollectionOptions(txn).flags;
+ const bool oldSetting = oldFlags & flag;
+ const bool newSetting = e.trueValue();
+
+ result->appendBool(name.toString() + "_old", oldSetting);
+ result->appendBool(name.toString() + "_new", newSetting);
+
+ const int newFlags = newSetting ? (oldFlags | flag) // set flag
+ : (oldFlags & ~flag); // clear flag
- getGlobalServiceContext()->getOpObserver()->onCollMod(txn,
- (dbName.toString() + ".$cmd").c_str(),
- cmdObj);
+ // NOTE we do this unconditionally to ensure that we note that the user has
+ // explicitly set flags, even if they are just setting the default.
+ cce->updateFlags(txn, newFlags);
- wunit.commit();
- return Status::OK();
+ const CollectionOptions newOptions = cce->getCollectionOptions(txn);
+ invariant(newOptions.flags == newFlags);
+ invariant(newOptions.flagsSet);
+ }
+ }
+
+ if (!errorStatus.isOK()) {
+ return errorStatus;
}
-} // namespace mongo
+
+ getGlobalServiceContext()->getOpObserver()->onCollMod(
+ txn, (dbName.toString() + ".$cmd").c_str(), cmdObj);
+
+ wunit.commit();
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/coll_mod.h b/src/mongo/db/catalog/coll_mod.h
index 1f511f45145..eb8644b74d1 100644
--- a/src/mongo/db/catalog/coll_mod.h
+++ b/src/mongo/db/catalog/coll_mod.h
@@ -29,17 +29,16 @@
#include "mongo/base/status.h"
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class NamespaceString;
- class OperationContext;
-
- /**
- * Performs the collection modification described in "cmdObj" on the collection "ns".
- */
- Status collMod(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* result);
-} // namespace mongo
+class BSONObj;
+class BSONObjBuilder;
+class NamespaceString;
+class OperationContext;
+/**
+ * Performs the collection modification described in "cmdObj" on the collection "ns".
+ */
+Status collMod(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 4be5fe65529..64ef00330d6 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -56,49 +56,45 @@
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/db/storage/record_fetcher.h"
-#include "mongo/db/auth/user_document_parser.h" // XXX-ANDY
+#include "mongo/db/auth/user_document_parser.h" // XXX-ANDY
#include "mongo/util/log.h"
namespace mongo {
namespace {
- const auto bannedExpressionsInValidators = std::set<StringData>{
- "$geoNear",
- "$near",
- "$nearSphere",
- "$text",
- "$where",
- };
- Status checkValidatorForBannedExpressions(const BSONObj& validator) {
- for (auto field : validator) {
- const auto name = field.fieldNameStringData();
- if (name[0] == '$' && bannedExpressionsInValidators.count(name)) {
- return {ErrorCodes::InvalidOptions,
- str::stream() << name << " is not allowed in collection validators"};
- }
-
- if (field.type() == Object || field.type() == Array) {
- auto status = checkValidatorForBannedExpressions(field.Obj());
- if (!status.isOK())
- return status;
- }
+const auto bannedExpressionsInValidators = std::set<StringData>{
+ "$geoNear", "$near", "$nearSphere", "$text", "$where",
+};
+Status checkValidatorForBannedExpressions(const BSONObj& validator) {
+ for (auto field : validator) {
+ const auto name = field.fieldNameStringData();
+ if (name[0] == '$' && bannedExpressionsInValidators.count(name)) {
+ return {ErrorCodes::InvalidOptions,
+ str::stream() << name << " is not allowed in collection validators"};
}
- return Status::OK();
+ if (field.type() == Object || field.type() == Array) {
+ auto status = checkValidatorForBannedExpressions(field.Obj());
+ if (!status.isOK())
+ return status;
+ }
}
+
+ return Status::OK();
+}
}
- using std::unique_ptr;
- using std::endl;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::endl;
+using std::string;
+using std::vector;
- using logger::LogComponent;
+using logger::LogComponent;
- std::string CompactOptions::toString() const {
- std::stringstream ss;
- ss << "paddingMode: ";
- switch ( paddingMode ) {
+std::string CompactOptions::toString() const {
+ std::stringstream ss;
+ ss << "paddingMode: ";
+ switch (paddingMode) {
case NONE:
ss << "NONE";
break;
@@ -106,752 +102,721 @@ namespace {
ss << "PRESERVE";
break;
case MANUAL:
- ss << "MANUAL (" << paddingBytes << " + ( doc * " << paddingFactor <<") )";
- }
+ ss << "MANUAL (" << paddingBytes << " + ( doc * " << paddingFactor << ") )";
+ }
- ss << " validateDocuments: " << validateDocuments;
+ ss << " validateDocuments: " << validateDocuments;
- return ss.str();
- }
+ return ss.str();
+}
- //
- // CappedInsertNotifier
- //
+//
+// CappedInsertNotifier
+//
- CappedInsertNotifier::CappedInsertNotifier()
- : _cappedInsertCount(0) {
- }
+CappedInsertNotifier::CappedInsertNotifier() : _cappedInsertCount(0) {}
- void CappedInsertNotifier::notifyOfInsert() {
- stdx::lock_guard<stdx::mutex> lk(_cappedNewDataMutex);
- _cappedInsertCount++;
- _cappedNewDataNotifier.notify_all();
- }
+void CappedInsertNotifier::notifyOfInsert() {
+ stdx::lock_guard<stdx::mutex> lk(_cappedNewDataMutex);
+ _cappedInsertCount++;
+ _cappedNewDataNotifier.notify_all();
+}
- uint64_t CappedInsertNotifier::getCount() const {
- stdx::lock_guard<stdx::mutex> lk(_cappedNewDataMutex);
- return _cappedInsertCount;
- }
+uint64_t CappedInsertNotifier::getCount() const {
+ stdx::lock_guard<stdx::mutex> lk(_cappedNewDataMutex);
+ return _cappedInsertCount;
+}
- void CappedInsertNotifier::waitForInsert(uint64_t referenceCount, Microseconds timeout) const {
- stdx::unique_lock<stdx::mutex> lk(_cappedNewDataMutex);
+void CappedInsertNotifier::waitForInsert(uint64_t referenceCount, Microseconds timeout) const {
+ stdx::unique_lock<stdx::mutex> lk(_cappedNewDataMutex);
- while (referenceCount == _cappedInsertCount) {
- if (stdx::cv_status::timeout == _cappedNewDataNotifier.wait_for(lk, timeout)) {
- return;
- }
+ while (referenceCount == _cappedInsertCount) {
+ if (stdx::cv_status::timeout == _cappedNewDataNotifier.wait_for(lk, timeout)) {
+ return;
}
}
+}
- // ----
-
- Collection::Collection( OperationContext* txn,
- StringData fullNS,
- CollectionCatalogEntry* details,
- RecordStore* recordStore,
- DatabaseCatalogEntry* dbce )
- : _ns( fullNS ),
- _details( details ),
- _recordStore( recordStore ),
- _dbce( dbce ),
- _infoCache( this ),
- _indexCatalog( this ),
- _validatorDoc(_details->getCollectionOptions(txn).validator.getOwned()),
- _validator(uassertStatusOK(parseValidator(_validatorDoc))),
- _cursorManager(fullNS),
- _cappedNotifier(_recordStore->isCapped() ? new CappedInsertNotifier() : nullptr) {
- _magic = 1357924;
- _indexCatalog.init(txn);
- if ( isCapped() )
- _recordStore->setCappedDeleteCallback( this );
- _infoCache.reset(txn);
- }
+// ----
+
+Collection::Collection(OperationContext* txn,
+ StringData fullNS,
+ CollectionCatalogEntry* details,
+ RecordStore* recordStore,
+ DatabaseCatalogEntry* dbce)
+ : _ns(fullNS),
+ _details(details),
+ _recordStore(recordStore),
+ _dbce(dbce),
+ _infoCache(this),
+ _indexCatalog(this),
+ _validatorDoc(_details->getCollectionOptions(txn).validator.getOwned()),
+ _validator(uassertStatusOK(parseValidator(_validatorDoc))),
+ _cursorManager(fullNS),
+ _cappedNotifier(_recordStore->isCapped() ? new CappedInsertNotifier() : nullptr) {
+ _magic = 1357924;
+ _indexCatalog.init(txn);
+ if (isCapped())
+ _recordStore->setCappedDeleteCallback(this);
+ _infoCache.reset(txn);
+}
- Collection::~Collection() {
- verify( ok() );
- _magic = 0;
- }
+Collection::~Collection() {
+ verify(ok());
+ _magic = 0;
+}
- bool Collection::requiresIdIndex() const {
+bool Collection::requiresIdIndex() const {
+ if (_ns.ns().find('$') != string::npos) {
+ // no indexes on indexes
+ return false;
+ }
- if ( _ns.ns().find( '$' ) != string::npos ) {
- // no indexes on indexes
+ if (_ns.isSystem()) {
+ StringData shortName = _ns.coll().substr(_ns.coll().find('.') + 1);
+ if (shortName == "indexes" || shortName == "namespaces" || shortName == "profile") {
return false;
}
-
- if ( _ns.isSystem() ) {
- StringData shortName = _ns.coll().substr( _ns.coll().find( '.' ) + 1 );
- if ( shortName == "indexes" ||
- shortName == "namespaces" ||
- shortName == "profile" ) {
- return false;
- }
- }
-
- if ( _ns.db() == "local" ) {
- if ( _ns.coll().startsWith( "oplog." ) )
- return false;
- }
-
- if ( !_ns.isSystem() ) {
- // non system collections definitely have an _id index
- return true;
- }
-
-
- return true;
- }
-
- std::unique_ptr<RecordCursor> Collection::getCursor(OperationContext* txn, bool forward) const {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
- invariant( ok() );
-
- return _recordStore->getCursor(txn, forward);
}
- vector<std::unique_ptr<RecordCursor>> Collection::getManyCursors(OperationContext* txn) const {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
-
- return _recordStore->getManyCursors(txn);
+ if (_ns.db() == "local") {
+ if (_ns.coll().startsWith("oplog."))
+ return false;
}
- Snapshotted<BSONObj> Collection::docFor(OperationContext* txn, const RecordId& loc) const {
- return Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(),
- _recordStore->dataFor( txn, loc ).releaseToBson());
+ if (!_ns.isSystem()) {
+ // non system collections definitely have an _id index
+ return true;
}
- bool Collection::findDoc(OperationContext* txn,
- const RecordId& loc,
- Snapshotted<BSONObj>* out) const {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
- RecordData rd;
- if ( !_recordStore->findRecord( txn, loc, &rd ) )
- return false;
- *out = Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(), rd.releaseToBson());
- return true;
- }
+ return true;
+}
- Status Collection::checkValidation(OperationContext* txn, const BSONObj& document) const {
- if (!_validator)
- return Status::OK();
+std::unique_ptr<RecordCursor> Collection::getCursor(OperationContext* txn, bool forward) const {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+ invariant(ok());
- if (documentValidationDisabled(txn))
- return Status::OK();
+ return _recordStore->getCursor(txn, forward);
+}
- if (_validator->matchesBSON(document))
- return Status::OK();
+vector<std::unique_ptr<RecordCursor>> Collection::getManyCursors(OperationContext* txn) const {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
- return {ErrorCodes::DocumentValidationFailure, "Document failed validation"};
- }
+ return _recordStore->getManyCursors(txn);
+}
- StatusWith<std::unique_ptr<MatchExpression>> Collection::parseValidator(
- const BSONObj& validator) const {
- if (validator.isEmpty())
- return {nullptr};
+Snapshotted<BSONObj> Collection::docFor(OperationContext* txn, const RecordId& loc) const {
+ return Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(),
+ _recordStore->dataFor(txn, loc).releaseToBson());
+}
- if (ns().isSystem()) {
- return {ErrorCodes::InvalidOptions,
- "Document validators not allowed on system collections."};
- }
+bool Collection::findDoc(OperationContext* txn,
+ const RecordId& loc,
+ Snapshotted<BSONObj>* out) const {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
- if (ns().isOnInternalDb()) {
- return {ErrorCodes::InvalidOptions,
- str::stream() << "Document validators are not allowed on collections in"
- << " the " << ns().db() << " database"};
- }
+ RecordData rd;
+ if (!_recordStore->findRecord(txn, loc, &rd))
+ return false;
+ *out = Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(), rd.releaseToBson());
+ return true;
+}
- {
- auto status = checkValidatorForBannedExpressions(validator);
- if (!status.isOK())
- return status;
- }
+Status Collection::checkValidation(OperationContext* txn, const BSONObj& document) const {
+ if (!_validator)
+ return Status::OK();
- auto statusWithRawPtr = MatchExpressionParser::parse(validator);
- if (!statusWithRawPtr.isOK())
- return statusWithRawPtr.getStatus();
+ if (documentValidationDisabled(txn))
+ return Status::OK();
- return {std::unique_ptr<MatchExpression>(statusWithRawPtr.getValue())};
- }
+ if (_validator->matchesBSON(document))
+ return Status::OK();
- StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota) {
- invariant(!_validator || documentValidationDisabled(txn));
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant( !_indexCatalog.haveAnyIndexes() ); // eventually can implement, just not done
+ return {ErrorCodes::DocumentValidationFailure, "Document failed validation"};
+}
- StatusWith<RecordId> loc = _recordStore->insertRecord( txn,
- doc,
- _enforceQuota( enforceQuota ) );
- if ( !loc.isOK() )
- return loc;
+StatusWith<std::unique_ptr<MatchExpression>> Collection::parseValidator(
+ const BSONObj& validator) const {
+ if (validator.isEmpty())
+ return {nullptr};
- // we cannot call into the OpObserver here because the document being written is not present
- // fortunately, this is currently only used for adding entries to the oplog.
+ if (ns().isSystem()) {
+ return {ErrorCodes::InvalidOptions,
+ "Document validators not allowed on system collections."};
+ }
- return StatusWith<RecordId>( loc );
+ if (ns().isOnInternalDb()) {
+ return {ErrorCodes::InvalidOptions,
+ str::stream() << "Document validators are not allowed on collections in"
+ << " the " << ns().db() << " database"};
}
- StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
- const BSONObj& docToInsert,
- bool enforceQuota,
- bool fromMigrate) {
- {
- auto status = checkValidation(txn, docToInsert);
- if (!status.isOK())
- return status;
- }
+ {
+ auto status = checkValidatorForBannedExpressions(validator);
+ if (!status.isOK())
+ return status;
+ }
- const SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
+ auto statusWithRawPtr = MatchExpressionParser::parse(validator);
+ if (!statusWithRawPtr.isOK())
+ return statusWithRawPtr.getStatus();
- if ( _indexCatalog.findIdIndex( txn ) ) {
- if ( docToInsert["_id"].eoo() ) {
- return StatusWith<RecordId>( ErrorCodes::InternalError,
- str::stream() << "Collection::insertDocument got "
- "document without _id for ns:" << _ns.ns() );
- }
- }
+ return {std::unique_ptr<MatchExpression>(statusWithRawPtr.getValue())};
+}
- StatusWith<RecordId> res = _insertDocument( txn, docToInsert, enforceQuota );
- invariant( sid == txn->recoveryUnit()->getSnapshotId() );
- if (res.isOK()) {
- getGlobalServiceContext()->getOpObserver()->onInsert(txn,
- ns(),
- docToInsert,
- fromMigrate);
-
- // If there is a notifier object and another thread is waiting on it, then we notify
- // waiters of this document insert. Waiters keep a shared_ptr to '_cappedNotifier', so
- // there are waiters if this Collection's shared_ptr is not unique.
- if (_cappedNotifier && !_cappedNotifier.unique()) {
- _cappedNotifier->notifyOfInsert();
- }
- }
+StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota) {
+ invariant(!_validator || documentValidationDisabled(txn));
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(!_indexCatalog.haveAnyIndexes()); // eventually can implement, just not done
- return res;
- }
+ StatusWith<RecordId> loc = _recordStore->insertRecord(txn, doc, _enforceQuota(enforceQuota));
+ if (!loc.isOK())
+ return loc;
- StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
- const BSONObj& doc,
- MultiIndexBlock* indexBlock,
- bool enforceQuota) {
- {
- auto status = checkValidation(txn, doc);
- if (!status.isOK())
- return status;
- }
+ // we cannot call into the OpObserver here because the document being written is not present
+ // fortunately, this is currently only used for adding entries to the oplog.
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ return StatusWith<RecordId>(loc);
+}
- StatusWith<RecordId> loc = _recordStore->insertRecord( txn,
- doc.objdata(),
- doc.objsize(),
- _enforceQuota(enforceQuota) );
+StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
+ const BSONObj& docToInsert,
+ bool enforceQuota,
+ bool fromMigrate) {
+ {
+ auto status = checkValidation(txn, docToInsert);
+ if (!status.isOK())
+ return status;
+ }
- if ( !loc.isOK() )
- return loc;
+ const SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
- Status status = indexBlock->insert( doc, loc.getValue() );
- if ( !status.isOK() )
- return StatusWith<RecordId>( status );
+ if (_indexCatalog.findIdIndex(txn)) {
+ if (docToInsert["_id"].eoo()) {
+ return StatusWith<RecordId>(ErrorCodes::InternalError,
+ str::stream()
+ << "Collection::insertDocument got "
+ "document without _id for ns:" << _ns.ns());
+ }
+ }
- getGlobalServiceContext()->getOpObserver()->onInsert(txn, ns(), doc);
+ StatusWith<RecordId> res = _insertDocument(txn, docToInsert, enforceQuota);
+ invariant(sid == txn->recoveryUnit()->getSnapshotId());
+ if (res.isOK()) {
+ getGlobalServiceContext()->getOpObserver()->onInsert(txn, ns(), docToInsert, fromMigrate);
- // If there is a notifier object and another thread is waiting on it, then we notify waiters
- // of this document insert. Waiters keep a shared_ptr to '_cappedNotifier', so there are
- // waiters if this Collection's shared_ptr is not unique.
+ // If there is a notifier object and another thread is waiting on it, then we notify
+ // waiters of this document insert. Waiters keep a shared_ptr to '_cappedNotifier', so
+ // there are waiters if this Collection's shared_ptr is not unique.
if (_cappedNotifier && !_cappedNotifier.unique()) {
_cappedNotifier->notifyOfInsert();
}
-
- return loc;
}
- StatusWith<RecordId> Collection::_insertDocument( OperationContext* txn,
- const BSONObj& docToInsert,
- bool enforceQuota ) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ return res;
+}
- // TODO: for now, capped logic lives inside NamespaceDetails, which is hidden
- // under the RecordStore, this feels broken since that should be a
- // collection access method probably
+StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
+ const BSONObj& doc,
+ MultiIndexBlock* indexBlock,
+ bool enforceQuota) {
+ {
+ auto status = checkValidation(txn, doc);
+ if (!status.isOK())
+ return status;
+ }
- StatusWith<RecordId> loc = _recordStore->insertRecord( txn,
- docToInsert.objdata(),
- docToInsert.objsize(),
- _enforceQuota( enforceQuota ) );
- if ( !loc.isOK() )
- return loc;
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant( RecordId::min() < loc.getValue() );
- invariant( loc.getValue() < RecordId::max() );
+ StatusWith<RecordId> loc =
+ _recordStore->insertRecord(txn, doc.objdata(), doc.objsize(), _enforceQuota(enforceQuota));
- _infoCache.notifyOfWriteOp();
+ if (!loc.isOK())
+ return loc;
- Status s = _indexCatalog.indexRecord(txn, docToInsert, loc.getValue());
- if (!s.isOK())
- return StatusWith<RecordId>(s);
+ Status status = indexBlock->insert(doc, loc.getValue());
+ if (!status.isOK())
+ return StatusWith<RecordId>(status);
- return loc;
+ getGlobalServiceContext()->getOpObserver()->onInsert(txn, ns(), doc);
+
+ // If there is a notifier object and another thread is waiting on it, then we notify waiters
+ // of this document insert. Waiters keep a shared_ptr to '_cappedNotifier', so there are
+ // waiters if this Collection's shared_ptr is not unique.
+ if (_cappedNotifier && !_cappedNotifier.unique()) {
+ _cappedNotifier->notifyOfInsert();
}
- Status Collection::aboutToDeleteCapped( OperationContext* txn,
- const RecordId& loc,
- RecordData data ) {
+ return loc;
+}
- /* check if any cursors point to us. if so, advance them. */
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
+StatusWith<RecordId> Collection::_insertDocument(OperationContext* txn,
+ const BSONObj& docToInsert,
+ bool enforceQuota) {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- BSONObj doc = data.releaseToBson();
- _indexCatalog.unindexRecord(txn, doc, loc, false);
+ // TODO: for now, capped logic lives inside NamespaceDetails, which is hidden
+ // under the RecordStore, this feels broken since that should be a
+ // collection access method probably
- return Status::OK();
- }
+ StatusWith<RecordId> loc = _recordStore->insertRecord(
+ txn, docToInsert.objdata(), docToInsert.objsize(), _enforceQuota(enforceQuota));
+ if (!loc.isOK())
+ return loc;
- void Collection::deleteDocument(OperationContext* txn,
- const RecordId& loc,
- bool cappedOK,
- bool noWarn,
- BSONObj* deletedId) {
- if ( isCapped() && !cappedOK ) {
- log() << "failing remove on a capped ns " << _ns << endl;
- uasserted( 10089, "cannot remove from a capped collection" );
- return;
- }
+ invariant(RecordId::min() < loc.getValue());
+ invariant(loc.getValue() < RecordId::max());
- Snapshotted<BSONObj> doc = docFor(txn, loc);
+ _infoCache.notifyOfWriteOp();
- BSONElement e = doc.value()["_id"];
- BSONObj id;
- if (e.type()) {
- id = e.wrap();
- if (deletedId) {
- *deletedId = e.wrap();
- }
- }
+ Status s = _indexCatalog.indexRecord(txn, docToInsert, loc.getValue());
+ if (!s.isOK())
+ return StatusWith<RecordId>(s);
- /* check if any cursors point to us. if so, advance them. */
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
+ return loc;
+}
- _indexCatalog.unindexRecord(txn, doc.value(), loc, noWarn);
+Status Collection::aboutToDeleteCapped(OperationContext* txn,
+ const RecordId& loc,
+ RecordData data) {
+ /* check if any cursors point to us. if so, advance them. */
+ _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
- _recordStore->deleteRecord(txn, loc);
+ BSONObj doc = data.releaseToBson();
+ _indexCatalog.unindexRecord(txn, doc, loc, false);
- _infoCache.notifyOfWriteOp();
+ return Status::OK();
+}
- if (!id.isEmpty()) {
- getGlobalServiceContext()->getOpObserver()->onDelete(txn, ns().ns(), id);
- }
+void Collection::deleteDocument(
+ OperationContext* txn, const RecordId& loc, bool cappedOK, bool noWarn, BSONObj* deletedId) {
+ if (isCapped() && !cappedOK) {
+ log() << "failing remove on a capped ns " << _ns << endl;
+ uasserted(10089, "cannot remove from a capped collection");
+ return;
}
- Counter64 moveCounter;
- ServerStatusMetricField<Counter64> moveCounterDisplay( "record.moves", &moveCounter );
-
- StatusWith<RecordId> Collection::updateDocument( OperationContext* txn,
- const RecordId& oldLocation,
- const Snapshotted<BSONObj>& oldDoc,
- const BSONObj& newDoc,
- bool enforceQuota,
- bool indexesAffected,
- OpDebug* debug,
- oplogUpdateEntryArgs& args) {
- {
- auto status = checkValidation(txn, newDoc);
- if (!status.isOK())
- return status;
- }
+ Snapshotted<BSONObj> doc = docFor(txn, loc);
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant(oldDoc.snapshotId() == txn->recoveryUnit()->getSnapshotId());
-
- SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
-
- BSONElement oldId = oldDoc.value()["_id"];
- if ( !oldId.eoo() && ( oldId != newDoc["_id"] ) )
- return StatusWith<RecordId>( ErrorCodes::InternalError,
- "in Collection::updateDocument _id mismatch",
- 13596 );
-
- // At the end of this step, we will have a map of UpdateTickets, one per index, which
- // represent the index updates needed to be done, based on the changes between oldDoc and
- // newDoc.
- OwnedPointerMap<IndexDescriptor*,UpdateTicket> updateTickets;
- if ( indexesAffected ) {
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, true );
- while ( ii.more() ) {
- IndexDescriptor* descriptor = ii.next();
- IndexCatalogEntry* entry = ii.catalogEntry(descriptor);
- IndexAccessMethod* iam = ii.accessMethod( descriptor );
-
- InsertDeleteOptions options;
- options.logIfError = false;
- options.dupsAllowed =
- !(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique())
- || repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
- UpdateTicket* updateTicket = new UpdateTicket();
- updateTickets.mutableMap()[descriptor] = updateTicket;
- Status ret = iam->validateUpdate(txn,
- oldDoc.value(),
- newDoc,
- oldLocation,
- options,
- updateTicket,
- entry->getFilterExpression());
- if ( !ret.isOK() ) {
- return StatusWith<RecordId>( ret );
- }
- }
+ BSONElement e = doc.value()["_id"];
+ BSONObj id;
+ if (e.type()) {
+ id = e.wrap();
+ if (deletedId) {
+ *deletedId = e.wrap();
}
+ }
- // This can call back into Collection::recordStoreGoingToMove. If that happens, the old
- // object is removed from all indexes.
- StatusWith<RecordId> newLocation = _recordStore->updateRecord( txn,
- oldLocation,
- newDoc.objdata(),
- newDoc.objsize(),
- _enforceQuota( enforceQuota ),
- this );
-
- if ( !newLocation.isOK() ) {
- return newLocation;
- }
+ /* check if any cursors point to us. if so, advance them. */
+ _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
- // At this point, the old object may or may not still be indexed, depending on if it was
- // moved.
+ _indexCatalog.unindexRecord(txn, doc.value(), loc, noWarn);
- _infoCache.notifyOfWriteOp();
+ _recordStore->deleteRecord(txn, loc);
- // If the object did move, we need to add the new location to all indexes.
- if ( newLocation.getValue() != oldLocation ) {
+ _infoCache.notifyOfWriteOp();
- if ( debug ) {
- if (debug->nmoved == -1) // default of -1 rather than 0
- debug->nmoved = 1;
- else
- debug->nmoved += 1;
- }
+ if (!id.isEmpty()) {
+ getGlobalServiceContext()->getOpObserver()->onDelete(txn, ns().ns(), id);
+ }
+}
+
+Counter64 moveCounter;
+ServerStatusMetricField<Counter64> moveCounterDisplay("record.moves", &moveCounter);
+
+StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
+ const RecordId& oldLocation,
+ const Snapshotted<BSONObj>& oldDoc,
+ const BSONObj& newDoc,
+ bool enforceQuota,
+ bool indexesAffected,
+ OpDebug* debug,
+ oplogUpdateEntryArgs& args) {
+ {
+ auto status = checkValidation(txn, newDoc);
+ if (!status.isOK())
+ return status;
+ }
+
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(oldDoc.snapshotId() == txn->recoveryUnit()->getSnapshotId());
- Status s = _indexCatalog.indexRecord(txn, newDoc, newLocation.getValue());
- if (!s.isOK())
- return StatusWith<RecordId>(s);
- invariant( sid == txn->recoveryUnit()->getSnapshotId() );
- args.ns = ns().ns();
- getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
+ SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
- return newLocation;
+ BSONElement oldId = oldDoc.value()["_id"];
+ if (!oldId.eoo() && (oldId != newDoc["_id"]))
+ return StatusWith<RecordId>(
+ ErrorCodes::InternalError, "in Collection::updateDocument _id mismatch", 13596);
+
+ // At the end of this step, we will have a map of UpdateTickets, one per index, which
+ // represent the index updates needed to be done, based on the changes between oldDoc and
+ // newDoc.
+ OwnedPointerMap<IndexDescriptor*, UpdateTicket> updateTickets;
+ if (indexesAffected) {
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true);
+ while (ii.more()) {
+ IndexDescriptor* descriptor = ii.next();
+ IndexCatalogEntry* entry = ii.catalogEntry(descriptor);
+ IndexAccessMethod* iam = ii.accessMethod(descriptor);
+
+ InsertDeleteOptions options;
+ options.logIfError = false;
+ options.dupsAllowed =
+ !(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique()) ||
+ repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
+ UpdateTicket* updateTicket = new UpdateTicket();
+ updateTickets.mutableMap()[descriptor] = updateTicket;
+ Status ret = iam->validateUpdate(txn,
+ oldDoc.value(),
+ newDoc,
+ oldLocation,
+ options,
+ updateTicket,
+ entry->getFilterExpression());
+ if (!ret.isOK()) {
+ return StatusWith<RecordId>(ret);
+ }
}
+ }
- // Object did not move. We update each index with each respective UpdateTicket.
+ // This can call back into Collection::recordStoreGoingToMove. If that happens, the old
+ // object is removed from all indexes.
+ StatusWith<RecordId> newLocation = _recordStore->updateRecord(
+ txn, oldLocation, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota), this);
- if ( debug )
- debug->keyUpdates = 0;
+ if (!newLocation.isOK()) {
+ return newLocation;
+ }
- if ( indexesAffected ) {
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, true );
- while ( ii.more() ) {
- IndexDescriptor* descriptor = ii.next();
- IndexAccessMethod* iam = ii.accessMethod(descriptor);
+ // At this point, the old object may or may not still be indexed, depending on if it was
+ // moved.
- int64_t updatedKeys;
- Status ret = iam->update(
- txn, *updateTickets.mutableMap()[descriptor], &updatedKeys);
- if ( !ret.isOK() )
- return StatusWith<RecordId>( ret );
- if ( debug )
- debug->keyUpdates += updatedKeys;
- }
+ _infoCache.notifyOfWriteOp();
+
+ // If the object did move, we need to add the new location to all indexes.
+ if (newLocation.getValue() != oldLocation) {
+ if (debug) {
+ if (debug->nmoved == -1) // default of -1 rather than 0
+ debug->nmoved = 1;
+ else
+ debug->nmoved += 1;
}
- invariant( sid == txn->recoveryUnit()->getSnapshotId() );
+ Status s = _indexCatalog.indexRecord(txn, newDoc, newLocation.getValue());
+ if (!s.isOK())
+ return StatusWith<RecordId>(s);
+ invariant(sid == txn->recoveryUnit()->getSnapshotId());
args.ns = ns().ns();
getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
return newLocation;
}
- Status Collection::recordStoreGoingToMove( OperationContext* txn,
- const RecordId& oldLocation,
- const char* oldBuffer,
- size_t oldSize ) {
- moveCounter.increment();
- _cursorManager.invalidateDocument(txn, oldLocation, INVALIDATION_DELETION);
- _indexCatalog.unindexRecord(txn, BSONObj(oldBuffer), oldLocation, true);
- return Status::OK();
- }
+ // Object did not move. We update each index with each respective UpdateTicket.
- Status Collection::recordStoreGoingToUpdateInPlace( OperationContext* txn,
- const RecordId& loc ) {
- // Broadcast the mutation so that query results stay correct.
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
- return Status::OK();
+ if (debug)
+ debug->keyUpdates = 0;
+
+ if (indexesAffected) {
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true);
+ while (ii.more()) {
+ IndexDescriptor* descriptor = ii.next();
+ IndexAccessMethod* iam = ii.accessMethod(descriptor);
+
+ int64_t updatedKeys;
+ Status ret = iam->update(txn, *updateTickets.mutableMap()[descriptor], &updatedKeys);
+ if (!ret.isOK())
+ return StatusWith<RecordId>(ret);
+ if (debug)
+ debug->keyUpdates += updatedKeys;
+ }
}
+ invariant(sid == txn->recoveryUnit()->getSnapshotId());
+ args.ns = ns().ns();
+ getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
- bool Collection::updateWithDamagesSupported() const {
- if (_validator)
- return false;
+ return newLocation;
+}
- return _recordStore->updateWithDamagesSupported();
- }
+Status Collection::recordStoreGoingToMove(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* oldBuffer,
+ size_t oldSize) {
+ moveCounter.increment();
+ _cursorManager.invalidateDocument(txn, oldLocation, INVALIDATION_DELETION);
+ _indexCatalog.unindexRecord(txn, BSONObj(oldBuffer), oldLocation, true);
+ return Status::OK();
+}
- Status Collection::updateDocumentWithDamages( OperationContext* txn,
- const RecordId& loc,
- const Snapshotted<RecordData>& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages,
- oplogUpdateEntryArgs& args) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant(oldRec.snapshotId() == txn->recoveryUnit()->getSnapshotId());
- invariant(updateWithDamagesSupported());
-
- // Broadcast the mutation so that query results stay correct.
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
-
- Status status =
- _recordStore->updateWithDamages(txn, loc, oldRec.value(), damageSource, damages);
-
- if (status.isOK()) {
- args.ns = ns().ns();
- getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
- }
- return status;
- }
+Status Collection::recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) {
+ // Broadcast the mutation so that query results stay correct.
+ _cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
+ return Status::OK();
+}
- bool Collection::_enforceQuota( bool userEnforeQuota ) const {
- if ( !userEnforeQuota )
- return false;
- if ( !mmapv1GlobalOptions.quota )
- return false;
+bool Collection::updateWithDamagesSupported() const {
+ if (_validator)
+ return false;
- if ( _ns.db() == "local" )
- return false;
+ return _recordStore->updateWithDamagesSupported();
+}
- if ( _ns.isSpecial() )
- return false;
+Status Collection::updateDocumentWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const Snapshotted<RecordData>& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages,
+ oplogUpdateEntryArgs& args) {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(oldRec.snapshotId() == txn->recoveryUnit()->getSnapshotId());
+ invariant(updateWithDamagesSupported());
- return true;
- }
+ // Broadcast the mutation so that query results stay correct.
+ _cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
- bool Collection::isCapped() const {
- return _cappedNotifier.get();
- }
+ Status status =
+ _recordStore->updateWithDamages(txn, loc, oldRec.value(), damageSource, damages);
- std::shared_ptr<CappedInsertNotifier> Collection::getCappedInsertNotifier() const {
- invariant(isCapped());
- return _cappedNotifier;
+ if (status.isOK()) {
+ args.ns = ns().ns();
+ getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
}
+ return status;
+}
- uint64_t Collection::numRecords( OperationContext* txn ) const {
- return _recordStore->numRecords( txn );
- }
+bool Collection::_enforceQuota(bool userEnforeQuota) const {
+ if (!userEnforeQuota)
+ return false;
- uint64_t Collection::dataSize( OperationContext* txn ) const {
- return _recordStore->dataSize( txn );
- }
+ if (!mmapv1GlobalOptions.quota)
+ return false;
+
+ if (_ns.db() == "local")
+ return false;
- uint64_t Collection::getIndexSize(OperationContext* opCtx,
- BSONObjBuilder* details,
- int scale) {
+ if (_ns.isSpecial())
+ return false;
- IndexCatalog* idxCatalog = getIndexCatalog();
+ return true;
+}
- IndexCatalog::IndexIterator ii = idxCatalog->getIndexIterator(opCtx, true);
+bool Collection::isCapped() const {
+ return _cappedNotifier.get();
+}
- uint64_t totalSize = 0;
+std::shared_ptr<CappedInsertNotifier> Collection::getCappedInsertNotifier() const {
+ invariant(isCapped());
+ return _cappedNotifier;
+}
- while (ii.more()) {
- IndexDescriptor* d = ii.next();
- IndexAccessMethod* iam = idxCatalog->getIndex(d);
+uint64_t Collection::numRecords(OperationContext* txn) const {
+ return _recordStore->numRecords(txn);
+}
- long long ds = iam->getSpaceUsedBytes(opCtx);
+uint64_t Collection::dataSize(OperationContext* txn) const {
+ return _recordStore->dataSize(txn);
+}
- totalSize += ds;
- if (details) {
- details->appendNumber(d->indexName(), ds / scale);
- }
- }
+uint64_t Collection::getIndexSize(OperationContext* opCtx, BSONObjBuilder* details, int scale) {
+ IndexCatalog* idxCatalog = getIndexCatalog();
- return totalSize;
- }
+ IndexCatalog::IndexIterator ii = idxCatalog->getIndexIterator(opCtx, true);
- /**
- * order will be:
- * 1) store index specs
- * 2) drop indexes
- * 3) truncate record store
- * 4) re-write indexes
- */
- Status Collection::truncate(OperationContext* txn) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
- massert( 17445, "index build in progress", _indexCatalog.numIndexesInProgress( txn ) == 0 );
-
- // 1) store index specs
- vector<BSONObj> indexSpecs;
- {
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, false );
- while ( ii.more() ) {
- const IndexDescriptor* idx = ii.next();
- indexSpecs.push_back( idx->infoObj().getOwned() );
- }
- }
+ uint64_t totalSize = 0;
- // 2) drop indexes
- Status status = _indexCatalog.dropAllIndexes(txn, true);
- if ( !status.isOK() )
- return status;
- _cursorManager.invalidateAll(false, "collection truncated");
- _infoCache.reset( txn );
+ while (ii.more()) {
+ IndexDescriptor* d = ii.next();
+ IndexAccessMethod* iam = idxCatalog->getIndex(d);
- // 3) truncate record store
- status = _recordStore->truncate(txn);
- if ( !status.isOK() )
- return status;
+ long long ds = iam->getSpaceUsedBytes(opCtx);
- // 4) re-create indexes
- for ( size_t i = 0; i < indexSpecs.size(); i++ ) {
- status = _indexCatalog.createIndexOnEmptyCollection(txn, indexSpecs[i]);
- if ( !status.isOK() )
- return status;
+ totalSize += ds;
+ if (details) {
+ details->appendNumber(d->indexName(), ds / scale);
}
+ }
- return Status::OK();
+ return totalSize;
+}
+
+/**
+ * order will be:
+ * 1) store index specs
+ * 2) drop indexes
+ * 3) truncate record store
+ * 4) re-write indexes
+ */
+Status Collection::truncate(OperationContext* txn) {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+ massert(17445, "index build in progress", _indexCatalog.numIndexesInProgress(txn) == 0);
+
+ // 1) store index specs
+ vector<BSONObj> indexSpecs;
+ {
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, false);
+ while (ii.more()) {
+ const IndexDescriptor* idx = ii.next();
+ indexSpecs.push_back(idx->infoObj().getOwned());
+ }
}
- void Collection::temp_cappedTruncateAfter(OperationContext* txn,
- RecordId end,
- bool inclusive) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant( isCapped() );
+ // 2) drop indexes
+ Status status = _indexCatalog.dropAllIndexes(txn, true);
+ if (!status.isOK())
+ return status;
+ _cursorManager.invalidateAll(false, "collection truncated");
+ _infoCache.reset(txn);
- _cursorManager.invalidateAll(false, "capped collection truncated");
- _recordStore->temp_cappedTruncateAfter( txn, end, inclusive );
+ // 3) truncate record store
+ status = _recordStore->truncate(txn);
+ if (!status.isOK())
+ return status;
+
+ // 4) re-create indexes
+ for (size_t i = 0; i < indexSpecs.size(); i++) {
+ status = _indexCatalog.createIndexOnEmptyCollection(txn, indexSpecs[i]);
+ if (!status.isOK())
+ return status;
}
- Status Collection::setValidator(OperationContext* txn, BSONObj validatorDoc) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+ return Status::OK();
+}
- // Make owned early so that the parsed match expression refers to the owned object.
- if (!validatorDoc.isOwned()) validatorDoc = validatorDoc.getOwned();
+void Collection::temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(isCapped());
- auto statusWithMatcher = parseValidator(validatorDoc);
- if (!statusWithMatcher.isOK())
- return statusWithMatcher.getStatus();
+ _cursorManager.invalidateAll(false, "capped collection truncated");
+ _recordStore->temp_cappedTruncateAfter(txn, end, inclusive);
+}
- _details->updateValidator(txn, validatorDoc);
+Status Collection::setValidator(OperationContext* txn, BSONObj validatorDoc) {
+ invariant(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
- _validator = std::move(statusWithMatcher.getValue());
- _validatorDoc = std::move(validatorDoc);
- return Status::OK();
- }
+ // Make owned early so that the parsed match expression refers to the owned object.
+ if (!validatorDoc.isOwned())
+ validatorDoc = validatorDoc.getOwned();
- namespace {
- class MyValidateAdaptor : public ValidateAdaptor {
- public:
- virtual ~MyValidateAdaptor(){}
-
- virtual Status validate( const RecordData& record, size_t* dataSize ) {
- BSONObj obj = record.toBson();
- const Status status = validateBSON(obj.objdata(), obj.objsize());
- if ( status.isOK() )
- *dataSize = obj.objsize();
- return Status::OK();
- }
+ auto statusWithMatcher = parseValidator(validatorDoc);
+ if (!statusWithMatcher.isOK())
+ return statusWithMatcher.getStatus();
- };
+ _details->updateValidator(txn, validatorDoc);
+
+ _validator = std::move(statusWithMatcher.getValue());
+ _validatorDoc = std::move(validatorDoc);
+ return Status::OK();
+}
+
+namespace {
+class MyValidateAdaptor : public ValidateAdaptor {
+public:
+ virtual ~MyValidateAdaptor() {}
+
+ virtual Status validate(const RecordData& record, size_t* dataSize) {
+ BSONObj obj = record.toBson();
+ const Status status = validateBSON(obj.objdata(), obj.objsize());
+ if (status.isOK())
+ *dataSize = obj.objsize();
+ return Status::OK();
}
+};
+}
- Status Collection::validate( OperationContext* txn,
- bool full, bool scanData,
- ValidateResults* results, BSONObjBuilder* output ){
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+Status Collection::validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateResults* results,
+ BSONObjBuilder* output) {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
- MyValidateAdaptor adaptor;
- Status status = _recordStore->validate( txn, full, scanData, &adaptor, results, output );
- if ( !status.isOK() )
- return status;
+ MyValidateAdaptor adaptor;
+ Status status = _recordStore->validate(txn, full, scanData, &adaptor, results, output);
+ if (!status.isOK())
+ return status;
- { // indexes
- output->append("nIndexes", _indexCatalog.numIndexesReady( txn ) );
- int idxn = 0;
- try {
- // Only applicable when 'full' validation is requested.
- std::unique_ptr<BSONObjBuilder> indexDetails(full ? new BSONObjBuilder() : NULL);
- BSONObjBuilder indexes; // not using subObjStart to be exception safe
-
- IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(txn, false);
- while( i.more() ) {
- const IndexDescriptor* descriptor = i.next();
- log(LogComponent::kIndex) << "validating index " << descriptor->indexNamespace() << endl;
- IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
- invariant( iam );
-
- std::unique_ptr<BSONObjBuilder> bob(
- indexDetails.get() ? new BSONObjBuilder(
- indexDetails->subobjStart(descriptor->indexNamespace())) :
- NULL);
-
- int64_t keys;
- iam->validate(txn, full, &keys, bob.get());
- indexes.appendNumber(descriptor->indexNamespace(),
- static_cast<long long>(keys));
-
- if (bob) {
- BSONObj obj = bob->done();
- BSONElement valid = obj["valid"];
- if (valid.ok() && !valid.trueValue()) {
- results->valid = false;
- }
+ { // indexes
+ output->append("nIndexes", _indexCatalog.numIndexesReady(txn));
+ int idxn = 0;
+ try {
+ // Only applicable when 'full' validation is requested.
+ std::unique_ptr<BSONObjBuilder> indexDetails(full ? new BSONObjBuilder() : NULL);
+ BSONObjBuilder indexes; // not using subObjStart to be exception safe
+
+ IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(txn, false);
+ while (i.more()) {
+ const IndexDescriptor* descriptor = i.next();
+ log(LogComponent::kIndex) << "validating index " << descriptor->indexNamespace()
+ << endl;
+ IndexAccessMethod* iam = _indexCatalog.getIndex(descriptor);
+ invariant(iam);
+
+ std::unique_ptr<BSONObjBuilder> bob(
+ indexDetails.get() ? new BSONObjBuilder(indexDetails->subobjStart(
+ descriptor->indexNamespace()))
+ : NULL);
+
+ int64_t keys;
+ iam->validate(txn, full, &keys, bob.get());
+ indexes.appendNumber(descriptor->indexNamespace(), static_cast<long long>(keys));
+
+ if (bob) {
+ BSONObj obj = bob->done();
+ BSONElement valid = obj["valid"];
+ if (valid.ok() && !valid.trueValue()) {
+ results->valid = false;
}
- idxn++;
- }
-
- output->append("keysPerIndex", indexes.done());
- if (indexDetails.get()) {
- output->append("indexDetails", indexDetails->done());
}
+ idxn++;
}
- catch ( DBException& exc ) {
- string err = str::stream() <<
- "exception during index validate idxn "<<
- BSONObjBuilder::numStr(idxn) <<
- ": " << exc.toString();
- results->errors.push_back( err );
- results->valid = false;
+
+ output->append("keysPerIndex", indexes.done());
+ if (indexDetails.get()) {
+ output->append("indexDetails", indexDetails->done());
}
+ } catch (DBException& exc) {
+ string err = str::stream() << "exception during index validate idxn "
+ << BSONObjBuilder::numStr(idxn) << ": " << exc.toString();
+ results->errors.push_back(err);
+ results->valid = false;
}
-
- return Status::OK();
}
- Status Collection::touch( OperationContext* txn,
- bool touchData, bool touchIndexes,
- BSONObjBuilder* output ) const {
- if ( touchData ) {
- BSONObjBuilder b;
- Status status = _recordStore->touch( txn, &b );
- if ( !status.isOK() )
- return status;
- output->append( "data", b.obj() );
- }
+ return Status::OK();
+}
- if ( touchIndexes ) {
- Timer t;
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, false );
- while ( ii.more() ) {
- const IndexDescriptor* desc = ii.next();
- const IndexAccessMethod* iam = _indexCatalog.getIndex( desc );
- Status status = iam->touch( txn );
- if ( !status.isOK() )
- return status;
- }
+Status Collection::touch(OperationContext* txn,
+ bool touchData,
+ bool touchIndexes,
+ BSONObjBuilder* output) const {
+ if (touchData) {
+ BSONObjBuilder b;
+ Status status = _recordStore->touch(txn, &b);
+ if (!status.isOK())
+ return status;
+ output->append("data", b.obj());
+ }
- output->append( "indexes", BSON( "num" << _indexCatalog.numIndexesTotal( txn ) <<
- "millis" << t.millis() ) );
+ if (touchIndexes) {
+ Timer t;
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, false);
+ while (ii.more()) {
+ const IndexDescriptor* desc = ii.next();
+ const IndexAccessMethod* iam = _indexCatalog.getIndex(desc);
+ Status status = iam->touch(txn);
+ if (!status.isOK())
+ return status;
}
- return Status::OK();
+ output->append("indexes",
+ BSON("num" << _indexCatalog.numIndexesTotal(txn) << "millis" << t.millis()));
}
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 5f5609bd437..972979378cf 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -53,343 +53,360 @@
namespace mongo {
- class CollectionCatalogEntry;
- class DatabaseCatalogEntry;
- class ExtentManager;
- class IndexCatalog;
- class MatchExpression;
- class MultiIndexBlock;
- class OpDebug;
- class OperationContext;
- class RecordCursor;
- class RecordFetcher;
- class UpdateDriver;
- class UpdateRequest;
-
- struct CompactOptions {
-
- CompactOptions() {
- paddingMode = NONE;
- validateDocuments = true;
- paddingFactor = 1;
- paddingBytes = 0;
- }
-
- // padding
- enum PaddingMode {
- PRESERVE, NONE, MANUAL
- } paddingMode;
-
- // only used if _paddingMode == MANUAL
- double paddingFactor; // what to multiple document size by
- int paddingBytes; // what to add to ducment size after multiplication
- unsigned computeRecordSize( unsigned recordSize ) const {
- recordSize = static_cast<unsigned>( paddingFactor * recordSize );
- recordSize += paddingBytes;
- return recordSize;
- }
-
- // other
- bool validateDocuments;
-
- std::string toString() const;
- };
-
- struct CompactStats {
- CompactStats() {
- corruptDocuments = 0;
- }
-
- long long corruptDocuments;
- };
+class CollectionCatalogEntry;
+class DatabaseCatalogEntry;
+class ExtentManager;
+class IndexCatalog;
+class MatchExpression;
+class MultiIndexBlock;
+class OpDebug;
+class OperationContext;
+class RecordCursor;
+class RecordFetcher;
+class UpdateDriver;
+class UpdateRequest;
+
+struct CompactOptions {
+ CompactOptions() {
+ paddingMode = NONE;
+ validateDocuments = true;
+ paddingFactor = 1;
+ paddingBytes = 0;
+ }
+
+ // padding
+ enum PaddingMode { PRESERVE, NONE, MANUAL } paddingMode;
+
+ // only used if _paddingMode == MANUAL
+ double paddingFactor; // what to multiple document size by
+ int paddingBytes; // what to add to ducment size after multiplication
+ unsigned computeRecordSize(unsigned recordSize) const {
+ recordSize = static_cast<unsigned>(paddingFactor * recordSize);
+ recordSize += paddingBytes;
+ return recordSize;
+ }
+
+ // other
+ bool validateDocuments;
+
+ std::string toString() const;
+};
+
+struct CompactStats {
+ CompactStats() {
+ corruptDocuments = 0;
+ }
+
+ long long corruptDocuments;
+};
+
+/**
+ * Queries with the awaitData option use this notifier object to wait for more data to be
+ * inserted into the capped collection.
+ */
+class CappedInsertNotifier {
+public:
+ CappedInsertNotifier();
+
+ /**
+ * Wakes up threads waiting on this object for the arrival of new data.
+ */
+ void notifyOfInsert();
+
+ /**
+ * Get a counter value which is incremented on every insert into a capped collection.
+ * The return value should be used as a reference value to pass into waitForCappedInsert().
+ */
+ uint64_t getCount() const;
+
+ /**
+ * Waits for 'timeout' microseconds, or until notifyAll() is called to indicate that new
+ * data is available in the capped collection.
+ */
+ void waitForInsert(uint64_t referenceCount, Microseconds timeout) const;
+
+private:
+ // Signalled when a successful insert is made into a capped collection.
+ mutable stdx::condition_variable _cappedNewDataNotifier;
+
+ // Mutex used with '_cappedNewDataNotifier'. Protects access to '_cappedInsertCount'.
+ mutable stdx::mutex _cappedNewDataMutex;
+
+ // A counter, incremented on insertion of new data into the capped collection.
+ //
+ // The condition which '_cappedNewDataNotifier' is being notified of is an increment of this
+ // counter. Access to this counter is synchronized with '_cappedNewDataMutex'.
+ uint64_t _cappedInsertCount;
+};
+
+/**
+ * this is NOT safe through a yield right now
+ * not sure if it will be, or what yet
+ */
+class Collection : CappedDocumentDeleteCallback, UpdateNotifier {
+public:
+ Collection(OperationContext* txn,
+ StringData fullNS,
+ CollectionCatalogEntry* details, // does not own
+ RecordStore* recordStore, // does not own
+ DatabaseCatalogEntry* dbce); // does not own
+
+ ~Collection();
+
+ bool ok() const {
+ return _magic == 1357924;
+ }
+
+ CollectionCatalogEntry* getCatalogEntry() {
+ return _details;
+ }
+ const CollectionCatalogEntry* getCatalogEntry() const {
+ return _details;
+ }
+
+ CollectionInfoCache* infoCache() {
+ return &_infoCache;
+ }
+ const CollectionInfoCache* infoCache() const {
+ return &_infoCache;
+ }
+
+ const NamespaceString& ns() const {
+ return _ns;
+ }
+
+ const IndexCatalog* getIndexCatalog() const {
+ return &_indexCatalog;
+ }
+ IndexCatalog* getIndexCatalog() {
+ return &_indexCatalog;
+ }
+
+ const RecordStore* getRecordStore() const {
+ return _recordStore;
+ }
+ RecordStore* getRecordStore() {
+ return _recordStore;
+ }
+
+ CursorManager* getCursorManager() const {
+ return &_cursorManager;
+ }
+
+ bool requiresIdIndex() const;
+
+ Snapshotted<BSONObj> docFor(OperationContext* txn, const RecordId& loc) const;
+
+ /**
+ * @param out - contents set to the right docs if exists, or nothing.
+ * @return true iff loc exists
+ */
+ bool findDoc(OperationContext* txn, const RecordId& loc, Snapshotted<BSONObj>* out) const;
+
+ std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward = true) const;
+
+ /**
+ * Returns many cursors that partition the Collection into many disjoint sets. Iterating
+ * all returned cursors is equivalent to iterating the full collection.
+ */
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const;
+
+ void deleteDocument(OperationContext* txn,
+ const RecordId& loc,
+ bool cappedOK = false,
+ bool noWarn = false,
+ BSONObj* deletedId = 0);
+
+ /**
+ * this does NOT modify the doc before inserting
+ * i.e. will not add an _id field for documents that are missing it
+ *
+ * If enforceQuota is false, quotas will be ignored.
+ */
+ StatusWith<RecordId> insertDocument(OperationContext* txn,
+ const BSONObj& doc,
+ bool enforceQuota,
+ bool fromMigrate = false);
/**
- * Queries with the awaitData option use this notifier object to wait for more data to be
- * inserted into the capped collection.
+ * Callers must ensure no document validation is performed for this collection when calling
+ * this method.
*/
- class CappedInsertNotifier {
- public:
- CappedInsertNotifier();
-
- /**
- * Wakes up threads waiting on this object for the arrival of new data.
- */
- void notifyOfInsert();
-
- /**
- * Get a counter value which is incremented on every insert into a capped collection.
- * The return value should be used as a reference value to pass into waitForCappedInsert().
- */
- uint64_t getCount() const;
-
- /**
- * Waits for 'timeout' microseconds, or until notifyAll() is called to indicate that new
- * data is available in the capped collection.
- */
- void waitForInsert(uint64_t referenceCount, Microseconds timeout) const;
-
- private:
- // Signalled when a successful insert is made into a capped collection.
- mutable stdx::condition_variable _cappedNewDataNotifier;
-
- // Mutex used with '_cappedNewDataNotifier'. Protects access to '_cappedInsertCount'.
- mutable stdx::mutex _cappedNewDataMutex;
-
- // A counter, incremented on insertion of new data into the capped collection.
- //
- // The condition which '_cappedNewDataNotifier' is being notified of is an increment of this
- // counter. Access to this counter is synchronized with '_cappedNewDataMutex'.
- uint64_t _cappedInsertCount;
- };
+ StatusWith<RecordId> insertDocument(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota);
+
+ StatusWith<RecordId> insertDocument(OperationContext* txn,
+ const BSONObj& doc,
+ MultiIndexBlock* indexBlock,
+ bool enforceQuota);
+
+ /**
+ * updates the document @ oldLocation with newDoc
+ * if the document fits in the old space, it is put there
+ * if not, it is moved
+ * @return the post update location of the doc (may or may not be the same as oldLocation)
+ */
+ StatusWith<RecordId> updateDocument(OperationContext* txn,
+ const RecordId& oldLocation,
+ const Snapshotted<BSONObj>& oldDoc,
+ const BSONObj& newDoc,
+ bool enforceQuota,
+ bool indexesAffected,
+ OpDebug* debug,
+ oplogUpdateEntryArgs& args);
+
+ bool updateWithDamagesSupported() const;
/**
- * this is NOT safe through a yield right now
- * not sure if it will be, or what yet
+ * Not allowed to modify indexes.
+ * Illegal to call if updateWithDamagesSupported() returns false.
*/
- class Collection : CappedDocumentDeleteCallback, UpdateNotifier {
- public:
- Collection( OperationContext* txn,
- StringData fullNS,
- CollectionCatalogEntry* details, // does not own
- RecordStore* recordStore, // does not own
- DatabaseCatalogEntry* dbce ); // does not own
-
- ~Collection();
-
- bool ok() const { return _magic == 1357924; }
-
- CollectionCatalogEntry* getCatalogEntry() { return _details; }
- const CollectionCatalogEntry* getCatalogEntry() const { return _details; }
-
- CollectionInfoCache* infoCache() { return &_infoCache; }
- const CollectionInfoCache* infoCache() const { return &_infoCache; }
-
- const NamespaceString& ns() const { return _ns; }
-
- const IndexCatalog* getIndexCatalog() const { return &_indexCatalog; }
- IndexCatalog* getIndexCatalog() { return &_indexCatalog; }
-
- const RecordStore* getRecordStore() const { return _recordStore; }
- RecordStore* getRecordStore() { return _recordStore; }
-
- CursorManager* getCursorManager() const { return &_cursorManager; }
-
- bool requiresIdIndex() const;
-
- Snapshotted<BSONObj> docFor(OperationContext* txn, const RecordId& loc) const;
-
- /**
- * @param out - contents set to the right docs if exists, or nothing.
- * @return true iff loc exists
- */
- bool findDoc(OperationContext* txn, const RecordId& loc, Snapshotted<BSONObj>* out) const;
-
- std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward = true) const;
-
- /**
- * Returns many cursors that partition the Collection into many disjoint sets. Iterating
- * all returned cursors is equivalent to iterating the full collection.
- */
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const;
-
- void deleteDocument( OperationContext* txn,
- const RecordId& loc,
- bool cappedOK = false,
- bool noWarn = false,
- BSONObj* deletedId = 0 );
-
- /**
- * this does NOT modify the doc before inserting
- * i.e. will not add an _id field for documents that are missing it
- *
- * If enforceQuota is false, quotas will be ignored.
- */
- StatusWith<RecordId> insertDocument( OperationContext* txn,
- const BSONObj& doc,
- bool enforceQuota,
- bool fromMigrate = false);
-
- /**
- * Callers must ensure no document validation is performed for this collection when calling
- * this method.
- */
- StatusWith<RecordId> insertDocument( OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota );
-
- StatusWith<RecordId> insertDocument( OperationContext* txn,
- const BSONObj& doc,
- MultiIndexBlock* indexBlock,
- bool enforceQuota );
-
- /**
- * updates the document @ oldLocation with newDoc
- * if the document fits in the old space, it is put there
- * if not, it is moved
- * @return the post update location of the doc (may or may not be the same as oldLocation)
- */
- StatusWith<RecordId> updateDocument(OperationContext* txn,
- const RecordId& oldLocation,
- const Snapshotted<BSONObj>& oldDoc,
- const BSONObj& newDoc,
- bool enforceQuota,
- bool indexesAffected,
- OpDebug* debug,
- oplogUpdateEntryArgs& args);
-
- bool updateWithDamagesSupported() const;
-
- /**
- * Not allowed to modify indexes.
- * Illegal to call if updateWithDamagesSupported() returns false.
- */
- Status updateDocumentWithDamages(OperationContext* txn,
- const RecordId& loc,
- const Snapshotted<RecordData>& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages,
- oplogUpdateEntryArgs& args);
-
- // -----------
-
- StatusWith<CompactStats> compact(OperationContext* txn, const CompactOptions* options);
-
- /**
- * removes all documents as fast as possible
- * indexes before and after will be the same
- * as will other characteristics
- */
- Status truncate(OperationContext* txn);
-
- /**
- * @param full - does more checks
- * @param scanData - scans each document
- * @return OK if the validate run successfully
- * OK will be returned even if corruption is found
- * deatils will be in result
- */
- Status validate( OperationContext* txn,
- bool full, bool scanData,
- ValidateResults* results, BSONObjBuilder* output );
-
- /**
- * forces data into cache
- */
- Status touch( OperationContext* txn,
- bool touchData, bool touchIndexes,
- BSONObjBuilder* output ) const;
-
- /**
- * Truncate documents newer than the document at 'end' from the capped
- * collection. The collection cannot be completely emptied using this
- * function. An assertion will be thrown if that is attempted.
- * @param inclusive - Truncate 'end' as well iff true
- * XXX: this will go away soon, just needed to move for now
- */
- void temp_cappedTruncateAfter( OperationContext* txn, RecordId end, bool inclusive );
-
- /**
- * Sets the validator for this collection.
- *
- * An empty validator removes all validation.
- * Requires an exclusive lock on the collection.
- */
- Status setValidator(OperationContext* txn, BSONObj validator);
-
- // -----------
-
- //
- // Stats
- //
-
- bool isCapped() const;
-
- /**
- * Get a pointer to a capped insert notifier object. The caller can wait on this object
- * until it is notified of a new insert into the capped collection.
- *
- * It is invalid to call this method unless the collection is capped.
- */
- std::shared_ptr<CappedInsertNotifier> getCappedInsertNotifier() const;
-
- uint64_t numRecords( OperationContext* txn ) const;
-
- uint64_t dataSize( OperationContext* txn ) const;
-
- int averageObjectSize( OperationContext* txn ) const {
- uint64_t n = numRecords( txn );
- if ( n == 0 )
- return 5;
- return static_cast<int>( dataSize( txn ) / n );
- }
-
- uint64_t getIndexSize(OperationContext* opCtx,
- BSONObjBuilder* details = NULL,
- int scale = 1);
-
- // --- end suspect things
-
- private:
-
- /**
- * Returns a non-ok Status if document does not pass this collection's validator.
- */
- Status checkValidation(OperationContext* txn, const BSONObj& document) const;
-
- /**
- * Returns a non-ok Status if validator is not legal for this collection.
- */
- StatusWith<std::unique_ptr<MatchExpression>> parseValidator(const BSONObj& validator) const;
-
- Status recordStoreGoingToMove( OperationContext* txn,
- const RecordId& oldLocation,
- const char* oldBuffer,
- size_t oldSize );
-
- Status recordStoreGoingToUpdateInPlace( OperationContext* txn,
- const RecordId& loc );
-
- Status aboutToDeleteCapped( OperationContext* txn, const RecordId& loc, RecordData data );
-
- /**
- * same semantics as insertDocument, but doesn't do:
- * - some user error checks
- * - adjust padding
- */
- StatusWith<RecordId> _insertDocument( OperationContext* txn,
- const BSONObj& doc,
- bool enforceQuota );
-
- bool _enforceQuota( bool userEnforeQuota ) const;
-
- int _magic;
-
- NamespaceString _ns;
- CollectionCatalogEntry* _details;
- RecordStore* _recordStore;
- DatabaseCatalogEntry* _dbce;
- CollectionInfoCache _infoCache;
- IndexCatalog _indexCatalog;
-
- // Empty means no filter.
- BSONObj _validatorDoc;
- // Points into _validatorDoc. Null means no filter.
- std::unique_ptr<MatchExpression> _validator;
-
- // this is mutable because read only users of the Collection class
- // use it keep state. This seems valid as const correctness of Collection
- // should be about the data.
- mutable CursorManager _cursorManager;
-
- // Notifier object for awaitData. Threads polling a capped collection for new data can wait
- // on this object until notified of the arrival of new data.
- //
- // This is non-null if and only if the collection is a capped collection.
- std::shared_ptr<CappedInsertNotifier> _cappedNotifier;
-
- friend class Database;
- friend class IndexCatalog;
- friend class NamespaceDetails;
- };
+ Status updateDocumentWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const Snapshotted<RecordData>& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages,
+ oplogUpdateEntryArgs& args);
+ // -----------
+
+ StatusWith<CompactStats> compact(OperationContext* txn, const CompactOptions* options);
+
+ /**
+ * removes all documents as fast as possible
+ * indexes before and after will be the same
+ * as will other characteristics
+ */
+ Status truncate(OperationContext* txn);
+
+ /**
+ * @param full - does more checks
+ * @param scanData - scans each document
+ * @return OK if the validate run successfully
+ * OK will be returned even if corruption is found
+ * deatils will be in result
+ */
+ Status validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateResults* results,
+ BSONObjBuilder* output);
+
+ /**
+ * forces data into cache
+ */
+ Status touch(OperationContext* txn,
+ bool touchData,
+ bool touchIndexes,
+ BSONObjBuilder* output) const;
+
+ /**
+ * Truncate documents newer than the document at 'end' from the capped
+ * collection. The collection cannot be completely emptied using this
+ * function. An assertion will be thrown if that is attempted.
+ * @param inclusive - Truncate 'end' as well iff true
+ * XXX: this will go away soon, just needed to move for now
+ */
+ void temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive);
+
+ /**
+ * Sets the validator for this collection.
+ *
+ * An empty validator removes all validation.
+ * Requires an exclusive lock on the collection.
+ */
+ Status setValidator(OperationContext* txn, BSONObj validator);
+
+ // -----------
+
+ //
+ // Stats
+ //
+
+ bool isCapped() const;
+
+ /**
+ * Get a pointer to a capped insert notifier object. The caller can wait on this object
+ * until it is notified of a new insert into the capped collection.
+ *
+ * It is invalid to call this method unless the collection is capped.
+ */
+ std::shared_ptr<CappedInsertNotifier> getCappedInsertNotifier() const;
+
+ uint64_t numRecords(OperationContext* txn) const;
+
+ uint64_t dataSize(OperationContext* txn) const;
+
+ int averageObjectSize(OperationContext* txn) const {
+ uint64_t n = numRecords(txn);
+ if (n == 0)
+ return 5;
+ return static_cast<int>(dataSize(txn) / n);
+ }
+
+ uint64_t getIndexSize(OperationContext* opCtx, BSONObjBuilder* details = NULL, int scale = 1);
+
+ // --- end suspect things
+
+private:
+ /**
+ * Returns a non-ok Status if document does not pass this collection's validator.
+ */
+ Status checkValidation(OperationContext* txn, const BSONObj& document) const;
+
+ /**
+ * Returns a non-ok Status if validator is not legal for this collection.
+ */
+ StatusWith<std::unique_ptr<MatchExpression>> parseValidator(const BSONObj& validator) const;
+
+ Status recordStoreGoingToMove(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* oldBuffer,
+ size_t oldSize);
+
+ Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc);
+
+ Status aboutToDeleteCapped(OperationContext* txn, const RecordId& loc, RecordData data);
+
+ /**
+ * same semantics as insertDocument, but doesn't do:
+ * - some user error checks
+ * - adjust padding
+ */
+ StatusWith<RecordId> _insertDocument(OperationContext* txn,
+ const BSONObj& doc,
+ bool enforceQuota);
+
+ bool _enforceQuota(bool userEnforeQuota) const;
+
+ int _magic;
+
+ NamespaceString _ns;
+ CollectionCatalogEntry* _details;
+ RecordStore* _recordStore;
+ DatabaseCatalogEntry* _dbce;
+ CollectionInfoCache _infoCache;
+ IndexCatalog _indexCatalog;
+
+ // Empty means no filter.
+ BSONObj _validatorDoc;
+ // Points into _validatorDoc. Null means no filter.
+ std::unique_ptr<MatchExpression> _validator;
+
+ // this is mutable because read only users of the Collection class
+ // use it keep state. This seems valid as const correctness of Collection
+ // should be about the data.
+ mutable CursorManager _cursorManager;
+
+ // Notifier object for awaitData. Threads polling a capped collection for new data can wait
+ // on this object until notified of the arrival of new data.
+ //
+ // This is non-null if and only if the collection is a capped collection.
+ std::shared_ptr<CappedInsertNotifier> _cappedNotifier;
+
+ friend class Database;
+ friend class IndexCatalog;
+ friend class NamespaceDetails;
+};
}
diff --git a/src/mongo/db/catalog/collection_catalog_entry.h b/src/mongo/db/catalog/collection_catalog_entry.h
index 876d393d733..c5f4278c3f1 100644
--- a/src/mongo/db/catalog/collection_catalog_entry.h
+++ b/src/mongo/db/catalog/collection_catalog_entry.h
@@ -37,83 +37,74 @@
namespace mongo {
- class IndexDescriptor;
- class OperationContext;
+class IndexDescriptor;
+class OperationContext;
- class CollectionCatalogEntry {
- public:
- CollectionCatalogEntry( StringData ns )
- : _ns( ns ){
- }
- virtual ~CollectionCatalogEntry(){}
+class CollectionCatalogEntry {
+public:
+ CollectionCatalogEntry(StringData ns) : _ns(ns) {}
+ virtual ~CollectionCatalogEntry() {}
- const NamespaceString& ns() const { return _ns; }
+ const NamespaceString& ns() const {
+ return _ns;
+ }
- // ------- indexes ----------
+ // ------- indexes ----------
- virtual CollectionOptions getCollectionOptions( OperationContext* txn ) const = 0;
+ virtual CollectionOptions getCollectionOptions(OperationContext* txn) const = 0;
- virtual int getTotalIndexCount( OperationContext* txn ) const = 0;
+ virtual int getTotalIndexCount(OperationContext* txn) const = 0;
- virtual int getCompletedIndexCount( OperationContext* txn ) const = 0;
+ virtual int getCompletedIndexCount(OperationContext* txn) const = 0;
- virtual int getMaxAllowedIndexes() const = 0;
+ virtual int getMaxAllowedIndexes() const = 0;
- virtual void getAllIndexes( OperationContext* txn,
- std::vector<std::string>* names ) const = 0;
+ virtual void getAllIndexes(OperationContext* txn, std::vector<std::string>* names) const = 0;
- virtual BSONObj getIndexSpec( OperationContext* txn,
- StringData idxName ) const = 0;
+ virtual BSONObj getIndexSpec(OperationContext* txn, StringData idxName) const = 0;
- virtual bool isIndexMultikey( OperationContext* txn,
- StringData indexName) const = 0;
+ virtual bool isIndexMultikey(OperationContext* txn, StringData indexName) const = 0;
- virtual bool setIndexIsMultikey(OperationContext* txn,
- StringData indexName,
- bool multikey = true) = 0;
+ virtual bool setIndexIsMultikey(OperationContext* txn,
+ StringData indexName,
+ bool multikey = true) = 0;
- virtual RecordId getIndexHead( OperationContext* txn,
- StringData indexName ) const = 0;
+ virtual RecordId getIndexHead(OperationContext* txn, StringData indexName) const = 0;
- virtual void setIndexHead( OperationContext* txn,
- StringData indexName,
- const RecordId& newHead ) = 0;
+ virtual void setIndexHead(OperationContext* txn,
+ StringData indexName,
+ const RecordId& newHead) = 0;
- virtual bool isIndexReady( OperationContext* txn,
- StringData indexName ) const = 0;
+ virtual bool isIndexReady(OperationContext* txn, StringData indexName) const = 0;
- virtual Status removeIndex( OperationContext* txn,
- StringData indexName ) = 0;
+ virtual Status removeIndex(OperationContext* txn, StringData indexName) = 0;
- virtual Status prepareForIndexBuild( OperationContext* txn,
- const IndexDescriptor* spec ) = 0;
+ virtual Status prepareForIndexBuild(OperationContext* txn, const IndexDescriptor* spec) = 0;
- virtual void indexBuildSuccess( OperationContext* txn,
- StringData indexName ) = 0;
+ virtual void indexBuildSuccess(OperationContext* txn, StringData indexName) = 0;
- /* Updates the expireAfterSeconds field of the given index to the value in newExpireSecs.
- * The specified index must already contain an expireAfterSeconds field, and the value in
- * that field and newExpireSecs must both be numeric.
- */
- virtual void updateTTLSetting( OperationContext* txn,
- StringData idxName,
- long long newExpireSeconds ) = 0;
+ /* Updates the expireAfterSeconds field of the given index to the value in newExpireSecs.
+ * The specified index must already contain an expireAfterSeconds field, and the value in
+ * that field and newExpireSecs must both be numeric.
+ */
+ virtual void updateTTLSetting(OperationContext* txn,
+ StringData idxName,
+ long long newExpireSeconds) = 0;
- /**
- * Sets the flags field of CollectionOptions to newValue.
- * Subsequent calls to getCollectionOptions should have flags==newValue and flagsSet==true.
- */
- virtual void updateFlags(OperationContext* txn, int newValue) = 0;
+ /**
+ * Sets the flags field of CollectionOptions to newValue.
+ * Subsequent calls to getCollectionOptions should have flags==newValue and flagsSet==true.
+ */
+ virtual void updateFlags(OperationContext* txn, int newValue) = 0;
- /**
- * Updates the validator for this collection.
- *
- * An empty validator removes all validation.
- */
- virtual void updateValidator(OperationContext* txn, const BSONObj& validator) = 0;
-
- private:
- NamespaceString _ns;
- };
+ /**
+ * Updates the validator for this collection.
+ *
+ * An empty validator removes all validation.
+ */
+ virtual void updateValidator(OperationContext* txn, const BSONObj& validator) = 0;
+private:
+ NamespaceString _ns;
+};
}
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index a6f4f98041b..e3f0623006f 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -48,154 +48,150 @@
namespace mongo {
- using std::endl;
- using std::vector;
-
- namespace {
- BSONObj _compactAdjustIndexSpec( const BSONObj& oldSpec ) {
- BSONObjBuilder b;
- BSONObj::iterator i( oldSpec );
- while( i.more() ) {
- BSONElement e = i.next();
- if ( str::equals( e.fieldName(), "v" ) ) {
- // Drop any preexisting index version spec. The default index version will
- // be used instead for the new index.
- continue;
- }
- if ( str::equals( e.fieldName(), "background" ) ) {
- // Create the new index in the foreground.
- continue;
- }
- // Pass the element through to the new index spec.
- b.append(e);
- }
- return b.obj();
+using std::endl;
+using std::vector;
+
+namespace {
+BSONObj _compactAdjustIndexSpec(const BSONObj& oldSpec) {
+ BSONObjBuilder b;
+ BSONObj::iterator i(oldSpec);
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (str::equals(e.fieldName(), "v")) {
+ // Drop any preexisting index version spec. The default index version will
+ // be used instead for the new index.
+ continue;
+ }
+ if (str::equals(e.fieldName(), "background")) {
+ // Create the new index in the foreground.
+ continue;
}
+ // Pass the element through to the new index spec.
+ b.append(e);
+ }
+ return b.obj();
+}
- class MyCompactAdaptor : public RecordStoreCompactAdaptor {
- public:
- MyCompactAdaptor(Collection* collection,
- MultiIndexBlock* indexBlock)
+class MyCompactAdaptor : public RecordStoreCompactAdaptor {
+public:
+ MyCompactAdaptor(Collection* collection, MultiIndexBlock* indexBlock)
- : _collection( collection ),
- _multiIndexBlock(indexBlock) {
- }
+ : _collection(collection), _multiIndexBlock(indexBlock) {}
- virtual bool isDataValid( const RecordData& recData ) {
- return recData.toBson().valid();
- }
-
- virtual size_t dataSize( const RecordData& recData ) {
- return recData.toBson().objsize();
- }
+ virtual bool isDataValid(const RecordData& recData) {
+ return recData.toBson().valid();
+ }
- virtual void inserted( const RecordData& recData, const RecordId& newLocation ) {
- _multiIndexBlock->insert( recData.toBson(), newLocation );
- }
+ virtual size_t dataSize(const RecordData& recData) {
+ return recData.toBson().objsize();
+ }
- private:
- Collection* _collection;
+ virtual void inserted(const RecordData& recData, const RecordId& newLocation) {
+ _multiIndexBlock->insert(recData.toBson(), newLocation);
+ }
- MultiIndexBlock* _multiIndexBlock;
- };
+private:
+ Collection* _collection;
- }
+ MultiIndexBlock* _multiIndexBlock;
+};
+}
- StatusWith<CompactStats> Collection::compact( OperationContext* txn,
- const CompactOptions* compactOptions ) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+StatusWith<CompactStats> Collection::compact(OperationContext* txn,
+ const CompactOptions* compactOptions) {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
- DisableDocumentValidation validationDisabler(txn);
+ DisableDocumentValidation validationDisabler(txn);
- if ( !_recordStore->compactSupported() )
- return StatusWith<CompactStats>( ErrorCodes::CommandNotSupported,
- str::stream() <<
- "cannot compact collection with record store: " <<
- _recordStore->name() );
+ if (!_recordStore->compactSupported())
+ return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported,
+ str::stream()
+ << "cannot compact collection with record store: "
+ << _recordStore->name());
- if (_recordStore->compactsInPlace()) {
- // Since we are compacting in-place, we don't need to touch the indexes.
- // TODO SERVER-16856 compact indexes
- CompactStats stats;
- Status status = _recordStore->compact(txn, NULL, compactOptions, &stats);
- if (!status.isOK())
- return StatusWith<CompactStats>(status);
+ if (_recordStore->compactsInPlace()) {
+ // Since we are compacting in-place, we don't need to touch the indexes.
+ // TODO SERVER-16856 compact indexes
+ CompactStats stats;
+ Status status = _recordStore->compact(txn, NULL, compactOptions, &stats);
+ if (!status.isOK())
+ return StatusWith<CompactStats>(status);
- return StatusWith<CompactStats>(stats);
- }
+ return StatusWith<CompactStats>(stats);
+ }
- if ( _indexCatalog.numIndexesInProgress( txn ) )
- return StatusWith<CompactStats>( ErrorCodes::BadValue,
- "cannot compact when indexes in progress" );
-
-
- // same data, but might perform a little different after compact?
- _infoCache.reset( txn );
-
- vector<BSONObj> indexSpecs;
- {
- IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( txn, false ) );
- while ( ii.more() ) {
- IndexDescriptor* descriptor = ii.next();
-
- const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj());
- const BSONObj key = spec.getObjectField("key");
- const Status keyStatus = validateKeyPattern(key);
- if (!keyStatus.isOK()) {
- return StatusWith<CompactStats>(
- ErrorCodes::CannotCreateIndex,
- str::stream() << "Cannot compact collection due to invalid index "
- << spec << ": " << keyStatus.reason() << " For more info see"
- << " http://dochub.mongodb.org/core/index-validation");
- }
- indexSpecs.push_back(spec);
+ if (_indexCatalog.numIndexesInProgress(txn))
+ return StatusWith<CompactStats>(ErrorCodes::BadValue,
+ "cannot compact when indexes in progress");
+
+
+ // same data, but might perform a little different after compact?
+ _infoCache.reset(txn);
+
+ vector<BSONObj> indexSpecs;
+ {
+ IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false));
+ while (ii.more()) {
+ IndexDescriptor* descriptor = ii.next();
+
+ const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj());
+ const BSONObj key = spec.getObjectField("key");
+ const Status keyStatus = validateKeyPattern(key);
+ if (!keyStatus.isOK()) {
+ return StatusWith<CompactStats>(
+ ErrorCodes::CannotCreateIndex,
+ str::stream() << "Cannot compact collection due to invalid index " << spec
+ << ": " << keyStatus.reason() << " For more info see"
+ << " http://dochub.mongodb.org/core/index-validation");
}
+ indexSpecs.push_back(spec);
}
+ }
- // Give a chance to be interrupted *before* we drop all indexes.
- txn->checkForInterrupt();
-
- {
- // note that the drop indexes call also invalidates all clientcursors for the namespace,
- // which is important and wanted here
- WriteUnitOfWork wunit(txn);
- log() << "compact dropping indexes" << endl;
- Status status = _indexCatalog.dropAllIndexes(txn, true);
- if ( !status.isOK() ) {
- return StatusWith<CompactStats>( status );
- }
- wunit.commit();
- }
+ // Give a chance to be interrupted *before* we drop all indexes.
+ txn->checkForInterrupt();
- CompactStats stats;
+ {
+ // note that the drop indexes call also invalidates all clientcursors for the namespace,
+ // which is important and wanted here
+ WriteUnitOfWork wunit(txn);
+ log() << "compact dropping indexes" << endl;
+ Status status = _indexCatalog.dropAllIndexes(txn, true);
+ if (!status.isOK()) {
+ return StatusWith<CompactStats>(status);
+ }
+ wunit.commit();
+ }
- MultiIndexBlock indexer(txn, this);
- indexer.allowInterruption();
- indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking
+ CompactStats stats;
- Status status = indexer.init( indexSpecs );
- if ( !status.isOK() )
- return StatusWith<CompactStats>( status );
+ MultiIndexBlock indexer(txn, this);
+ indexer.allowInterruption();
+ indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking
- MyCompactAdaptor adaptor(this, &indexer);
+ Status status = indexer.init(indexSpecs);
+ if (!status.isOK())
+ return StatusWith<CompactStats>(status);
- status = _recordStore->compact( txn, &adaptor, compactOptions, &stats);
- if (!status.isOK())
- return StatusWith<CompactStats>(status);
+ MyCompactAdaptor adaptor(this, &indexer);
- log() << "starting index commits";
- status = indexer.doneInserting();
- if ( !status.isOK() )
- return StatusWith<CompactStats>( status );
+ status = _recordStore->compact(txn, &adaptor, compactOptions, &stats);
+ if (!status.isOK())
+ return StatusWith<CompactStats>(status);
- {
- WriteUnitOfWork wunit(txn);
- indexer.commit();
- wunit.commit();
- }
+ log() << "starting index commits";
+ status = indexer.doneInserting();
+ if (!status.isOK())
+ return StatusWith<CompactStats>(status);
- return StatusWith<CompactStats>( stats );
+ {
+ WriteUnitOfWork wunit(txn);
+ indexer.commit();
+ wunit.commit();
}
+ return StatusWith<CompactStats>(stats);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_info_cache.cpp b/src/mongo/db/catalog/collection_info_cache.cpp
index a111a9a6684..994dc9945c5 100644
--- a/src/mongo/db/catalog/collection_info_cache.cpp
+++ b/src/mongo/db/catalog/collection_info_cache.cpp
@@ -46,128 +46,124 @@
namespace mongo {
- CollectionInfoCache::CollectionInfoCache( Collection* collection )
- : _collection( collection ),
- _keysComputed( false ),
- _planCache(new PlanCache(collection->ns().ns())),
- _querySettings(new QuerySettings()) { }
-
- void CollectionInfoCache::reset( OperationContext* txn ) {
- LOG(1) << _collection->ns().ns() << ": clearing plan cache - collection info cache reset";
- clearQueryCache();
- _keysComputed = false;
- computeIndexKeys( txn );
- updatePlanCacheIndexEntries( txn );
- // query settings is not affected by info cache reset.
- // index filters should persist throughout life of collection
- }
+CollectionInfoCache::CollectionInfoCache(Collection* collection)
+ : _collection(collection),
+ _keysComputed(false),
+ _planCache(new PlanCache(collection->ns().ns())),
+ _querySettings(new QuerySettings()) {}
+
+void CollectionInfoCache::reset(OperationContext* txn) {
+ LOG(1) << _collection->ns().ns() << ": clearing plan cache - collection info cache reset";
+ clearQueryCache();
+ _keysComputed = false;
+ computeIndexKeys(txn);
+ updatePlanCacheIndexEntries(txn);
+ // query settings is not affected by info cache reset.
+ // index filters should persist throughout life of collection
+}
- const UpdateIndexData& CollectionInfoCache::indexKeys( OperationContext* txn ) const {
- // This requires "some" lock, and MODE_IS is an expression for that, for now.
- dassert(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_IS));
- invariant(_keysComputed);
- return _indexedPaths;
- }
+const UpdateIndexData& CollectionInfoCache::indexKeys(OperationContext* txn) const {
+ // This requires "some" lock, and MODE_IS is an expression for that, for now.
+ dassert(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_IS));
+ invariant(_keysComputed);
+ return _indexedPaths;
+}
- void CollectionInfoCache::computeIndexKeys( OperationContext* txn ) {
- // This function modified objects attached to the Collection so we need a write lock
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
- _indexedPaths.clear();
-
- IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(txn, true);
- while (i.more()) {
- IndexDescriptor* descriptor = i.next();
-
- if (descriptor->getAccessMethodName() != IndexNames::TEXT) {
- BSONObj key = descriptor->keyPattern();
- BSONObjIterator j(key);
- while (j.more()) {
- BSONElement e = j.next();
- _indexedPaths.addPath(e.fieldName());
- }
+void CollectionInfoCache::computeIndexKeys(OperationContext* txn) {
+ // This function modified objects attached to the Collection so we need a write lock
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ _indexedPaths.clear();
+
+ IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(txn, true);
+ while (i.more()) {
+ IndexDescriptor* descriptor = i.next();
+
+ if (descriptor->getAccessMethodName() != IndexNames::TEXT) {
+ BSONObj key = descriptor->keyPattern();
+ BSONObjIterator j(key);
+ while (j.more()) {
+ BSONElement e = j.next();
+ _indexedPaths.addPath(e.fieldName());
}
- else {
- fts::FTSSpec ftsSpec(descriptor->infoObj());
-
- if (ftsSpec.wildcard()) {
- _indexedPaths.allPathsIndexed();
+ } else {
+ fts::FTSSpec ftsSpec(descriptor->infoObj());
+
+ if (ftsSpec.wildcard()) {
+ _indexedPaths.allPathsIndexed();
+ } else {
+ for (size_t i = 0; i < ftsSpec.numExtraBefore(); ++i) {
+ _indexedPaths.addPath(ftsSpec.extraBefore(i));
}
- else {
- for (size_t i = 0; i < ftsSpec.numExtraBefore(); ++i) {
- _indexedPaths.addPath(ftsSpec.extraBefore(i));
- }
- for (fts::Weights::const_iterator it = ftsSpec.weights().begin();
- it != ftsSpec.weights().end();
- ++it) {
- _indexedPaths.addPath(it->first);
- }
- for (size_t i = 0; i < ftsSpec.numExtraAfter(); ++i) {
- _indexedPaths.addPath(ftsSpec.extraAfter(i));
- }
- // Any update to a path containing "language" as a component could change the
- // language of a subdocument. Add the override field as a path component.
- _indexedPaths.addPathComponent(ftsSpec.languageOverrideField());
+ for (fts::Weights::const_iterator it = ftsSpec.weights().begin();
+ it != ftsSpec.weights().end();
+ ++it) {
+ _indexedPaths.addPath(it->first);
}
- }
-
- // handle partial indexes
- const IndexCatalogEntry* entry = i.catalogEntry(descriptor);
- const MatchExpression* filter = entry->getFilterExpression();
- if (filter) {
- unordered_set<std::string> paths;
- QueryPlannerIXSelect::getFields(filter, "", &paths);
- for (auto it = paths.begin(); it != paths.end(); ++it) {
- _indexedPaths.addPath(*it);
+ for (size_t i = 0; i < ftsSpec.numExtraAfter(); ++i) {
+ _indexedPaths.addPath(ftsSpec.extraAfter(i));
}
+ // Any update to a path containing "language" as a component could change the
+ // language of a subdocument. Add the override field as a path component.
+ _indexedPaths.addPathComponent(ftsSpec.languageOverrideField());
}
}
- _keysComputed = true;
-
- }
-
- void CollectionInfoCache::notifyOfWriteOp() {
- if (NULL != _planCache.get()) {
- _planCache->notifyOfWriteOp();
+ // handle partial indexes
+ const IndexCatalogEntry* entry = i.catalogEntry(descriptor);
+ const MatchExpression* filter = entry->getFilterExpression();
+ if (filter) {
+ unordered_set<std::string> paths;
+ QueryPlannerIXSelect::getFields(filter, "", &paths);
+ for (auto it = paths.begin(); it != paths.end(); ++it) {
+ _indexedPaths.addPath(*it);
+ }
}
}
- void CollectionInfoCache::clearQueryCache() {
- if (NULL != _planCache.get()) {
- _planCache->clear();
- }
- }
+ _keysComputed = true;
+}
- PlanCache* CollectionInfoCache::getPlanCache() const {
- return _planCache.get();
+void CollectionInfoCache::notifyOfWriteOp() {
+ if (NULL != _planCache.get()) {
+ _planCache->notifyOfWriteOp();
}
+}
- QuerySettings* CollectionInfoCache::getQuerySettings() const {
- return _querySettings.get();
+void CollectionInfoCache::clearQueryCache() {
+ if (NULL != _planCache.get()) {
+ _planCache->clear();
}
+}
- void CollectionInfoCache::updatePlanCacheIndexEntries(OperationContext* txn) {
- std::vector<IndexEntry> indexEntries;
-
- // TODO We shouldn't need to include unfinished indexes, but we must here because the index
- // catalog may be in an inconsistent state. SERVER-18346.
- const bool includeUnfinishedIndexes = true;
- IndexCatalog::IndexIterator ii =
- _collection->getIndexCatalog()->getIndexIterator(txn, includeUnfinishedIndexes);
- while (ii.more()) {
- const IndexDescriptor* desc = ii.next();
- const IndexCatalogEntry* ice = ii.catalogEntry(desc);
- indexEntries.emplace_back(desc->keyPattern(),
- desc->getAccessMethodName(),
- desc->isMultikey(txn),
- desc->isSparse(),
- desc->unique(),
- desc->indexName(),
- ice->getFilterExpression(),
- desc->infoObj());
- }
+PlanCache* CollectionInfoCache::getPlanCache() const {
+ return _planCache.get();
+}
- _planCache->notifyOfIndexEntries(indexEntries);
+QuerySettings* CollectionInfoCache::getQuerySettings() const {
+ return _querySettings.get();
+}
+
+void CollectionInfoCache::updatePlanCacheIndexEntries(OperationContext* txn) {
+ std::vector<IndexEntry> indexEntries;
+
+ // TODO We shouldn't need to include unfinished indexes, but we must here because the index
+ // catalog may be in an inconsistent state. SERVER-18346.
+ const bool includeUnfinishedIndexes = true;
+ IndexCatalog::IndexIterator ii =
+ _collection->getIndexCatalog()->getIndexIterator(txn, includeUnfinishedIndexes);
+ while (ii.more()) {
+ const IndexDescriptor* desc = ii.next();
+ const IndexCatalogEntry* ice = ii.catalogEntry(desc);
+ indexEntries.emplace_back(desc->keyPattern(),
+ desc->getAccessMethodName(),
+ desc->isMultikey(txn),
+ desc->isSparse(),
+ desc->unique(),
+ desc->indexName(),
+ ice->getFilterExpression(),
+ desc->infoObj());
}
+ _planCache->notifyOfIndexEntries(indexEntries);
+}
}
diff --git a/src/mongo/db/catalog/collection_info_cache.h b/src/mongo/db/catalog/collection_info_cache.h
index 294f371eef0..7e418b4e123 100644
--- a/src/mongo/db/catalog/collection_info_cache.h
+++ b/src/mongo/db/catalog/collection_info_cache.h
@@ -37,76 +37,76 @@
namespace mongo {
- class Collection;
+class Collection;
- /**
- * this is for storing things that you want to cache about a single collection
- * life cycle is managed for you from inside Collection
+/**
+ * this is for storing things that you want to cache about a single collection
+ * life cycle is managed for you from inside Collection
+ */
+class CollectionInfoCache {
+public:
+ CollectionInfoCache(Collection* collection);
+
+ /*
+ * Resets entire cache state. Must be called under exclusive DB lock.
*/
- class CollectionInfoCache {
- public:
-
- CollectionInfoCache( Collection* collection );
-
- /*
- * Resets entire cache state. Must be called under exclusive DB lock.
- */
- void reset( OperationContext* txn );
+ void reset(OperationContext* txn);
- //
- // New Query Execution
- //
+ //
+ // New Query Execution
+ //
- /**
- * Get the PlanCache for this collection.
- */
- PlanCache* getPlanCache() const;
-
- /**
- * Get the QuerySettings for this collection.
- */
- QuerySettings* getQuerySettings() const;
+ /**
+ * Get the PlanCache for this collection.
+ */
+ PlanCache* getPlanCache() const;
- // -------------------
+ /**
+ * Get the QuerySettings for this collection.
+ */
+ QuerySettings* getQuerySettings() const;
- /* get set of index keys for this namespace. handy to quickly check if a given
- field is indexed (Note it might be a secondary component of a compound index.)
- */
- const UpdateIndexData& indexKeys( OperationContext* txn ) const;
+ // -------------------
- // ---------------------
+ /* get set of index keys for this namespace. handy to quickly check if a given
+ field is indexed (Note it might be a secondary component of a compound index.)
+ */
+ const UpdateIndexData& indexKeys(OperationContext* txn) const;
- /**
- * Called when an index is added to this collection.
- */
- void addedIndex( OperationContext* txn ) { reset( txn ); }
+ // ---------------------
- void clearQueryCache();
+ /**
+ * Called when an index is added to this collection.
+ */
+ void addedIndex(OperationContext* txn) {
+ reset(txn);
+ }
- /* you must notify the cache if you are doing writes, as query plan utility will change */
- void notifyOfWriteOp();
+ void clearQueryCache();
- private:
+ /* you must notify the cache if you are doing writes, as query plan utility will change */
+ void notifyOfWriteOp();
- Collection* _collection; // not owned
+private:
+ Collection* _collection; // not owned
- // --- index keys cache
- bool _keysComputed;
- UpdateIndexData _indexedPaths;
+ // --- index keys cache
+ bool _keysComputed;
+ UpdateIndexData _indexedPaths;
- // A cache for query plans.
- std::unique_ptr<PlanCache> _planCache;
+ // A cache for query plans.
+ std::unique_ptr<PlanCache> _planCache;
- // Query settings.
- // Includes index filters.
- std::unique_ptr<QuerySettings> _querySettings;
+ // Query settings.
+ // Includes index filters.
+ std::unique_ptr<QuerySettings> _querySettings;
- /**
- * Must be called under exclusive DB lock.
- */
- void computeIndexKeys( OperationContext* txn );
+ /**
+ * Must be called under exclusive DB lock.
+ */
+ void computeIndexKeys(OperationContext* txn);
- void updatePlanCacheIndexEntries( OperationContext* txn );
- };
+ void updatePlanCacheIndexEntries(OperationContext* txn);
+};
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_options.cpp b/src/mongo/db/catalog/collection_options.cpp
index 90dd997fce9..3aaaaac6893 100644
--- a/src/mongo/db/catalog/collection_options.cpp
+++ b/src/mongo/db/catalog/collection_options.cpp
@@ -35,180 +35,170 @@
namespace mongo {
- // static
- bool CollectionOptions::validMaxCappedDocs( long long* max ) {
- if ( *max <= 0 ||
- *max == std::numeric_limits<long long>::max() ) {
- *max = 0x7fffffff;
- return true;
- }
-
- if ( *max < ( 0x1LL << 31 ) ) {
- return true;
- }
-
- return false;
+// static
+bool CollectionOptions::validMaxCappedDocs(long long* max) {
+ if (*max <= 0 || *max == std::numeric_limits<long long>::max()) {
+ *max = 0x7fffffff;
+ return true;
}
- void CollectionOptions::reset() {
- capped = false;
- cappedSize = 0;
- cappedMaxDocs = 0;
- initialNumExtents = 0;
- initialExtentSizes.clear();
- autoIndexId = DEFAULT;
- // For compatibility with previous versions if the user sets no flags,
- // we set Flag_UsePowerOf2Sizes in case the user downgrades.
- flags = Flag_UsePowerOf2Sizes;
- flagsSet = false;
- temp = false;
- storageEngine = BSONObj();
- validator = BSONObj();
+ if (*max < (0x1LL << 31)) {
+ return true;
}
- bool CollectionOptions::isValid() const {
- return validate().isOK();
- }
+ return false;
+}
- Status CollectionOptions::validate() const {
- return CollectionOptions().parse(toBSON());
- }
+void CollectionOptions::reset() {
+ capped = false;
+ cappedSize = 0;
+ cappedMaxDocs = 0;
+ initialNumExtents = 0;
+ initialExtentSizes.clear();
+ autoIndexId = DEFAULT;
+ // For compatibility with previous versions if the user sets no flags,
+ // we set Flag_UsePowerOf2Sizes in case the user downgrades.
+ flags = Flag_UsePowerOf2Sizes;
+ flagsSet = false;
+ temp = false;
+ storageEngine = BSONObj();
+ validator = BSONObj();
+}
- Status CollectionOptions::parse(const BSONObj& options) {
- reset();
+bool CollectionOptions::isValid() const {
+ return validate().isOK();
+}
- // During parsing, ignore some validation errors in order to accept options objects that
- // were valid in previous versions of the server. SERVER-13737.
- BSONObjIterator i( options );
- while ( i.more() ) {
- BSONElement e = i.next();
- StringData fieldName = e.fieldName();
+Status CollectionOptions::validate() const {
+ return CollectionOptions().parse(toBSON());
+}
- if ( fieldName == "capped" ) {
- capped = e.trueValue();
+Status CollectionOptions::parse(const BSONObj& options) {
+ reset();
+
+ // During parsing, ignore some validation errors in order to accept options objects that
+ // were valid in previous versions of the server. SERVER-13737.
+ BSONObjIterator i(options);
+ while (i.more()) {
+ BSONElement e = i.next();
+ StringData fieldName = e.fieldName();
+
+ if (fieldName == "capped") {
+ capped = e.trueValue();
+ } else if (fieldName == "size") {
+ if (!e.isNumber()) {
+ // Ignoring for backwards compatibility.
+ continue;
}
- else if ( fieldName == "size" ) {
- if ( !e.isNumber() ) {
- // Ignoring for backwards compatibility.
- continue;
- }
- cappedSize = e.numberLong();
- if ( cappedSize < 0 )
- return Status( ErrorCodes::BadValue, "size has to be >= 0" );
- cappedSize += 0xff;
- cappedSize &= 0xffffffffffffff00LL;
+ cappedSize = e.numberLong();
+ if (cappedSize < 0)
+ return Status(ErrorCodes::BadValue, "size has to be >= 0");
+ cappedSize += 0xff;
+ cappedSize &= 0xffffffffffffff00LL;
+ } else if (fieldName == "max") {
+ if (!options["capped"].trueValue() || !e.isNumber()) {
+ // Ignoring for backwards compatibility.
+ continue;
}
- else if ( fieldName == "max" ) {
- if ( !options["capped"].trueValue() || !e.isNumber() ) {
- // Ignoring for backwards compatibility.
- continue;
- }
- cappedMaxDocs = e.numberLong();
- if ( !validMaxCappedDocs( &cappedMaxDocs ) )
- return Status( ErrorCodes::BadValue,
- "max in a capped collection has to be < 2^31 or not set" );
- }
- else if ( fieldName == "$nExtents" ) {
- if ( e.type() == Array ) {
- BSONObjIterator j( e.Obj() );
- while ( j.more() ) {
- BSONElement inner = j.next();
- initialExtentSizes.push_back( inner.numberInt() );
- }
- }
- else {
- initialNumExtents = e.numberLong();
+ cappedMaxDocs = e.numberLong();
+ if (!validMaxCappedDocs(&cappedMaxDocs))
+ return Status(ErrorCodes::BadValue,
+ "max in a capped collection has to be < 2^31 or not set");
+ } else if (fieldName == "$nExtents") {
+ if (e.type() == Array) {
+ BSONObjIterator j(e.Obj());
+ while (j.more()) {
+ BSONElement inner = j.next();
+ initialExtentSizes.push_back(inner.numberInt());
}
+ } else {
+ initialNumExtents = e.numberLong();
}
- else if ( fieldName == "autoIndexId" ) {
- if ( e.trueValue() )
- autoIndexId = YES;
- else
- autoIndexId = NO;
+ } else if (fieldName == "autoIndexId") {
+ if (e.trueValue())
+ autoIndexId = YES;
+ else
+ autoIndexId = NO;
+ } else if (fieldName == "flags") {
+ flags = e.numberInt();
+ flagsSet = true;
+ } else if (fieldName == "temp") {
+ temp = e.trueValue();
+ } else if (fieldName == "storageEngine") {
+ // Storage engine-specific collection options.
+ // "storageEngine" field must be of type "document".
+ // Every field inside "storageEngine" has to be a document.
+ // Format:
+ // {
+ // ...
+ // storageEngine: {
+ // storageEngine1: {
+ // ...
+ // },
+ // storageEngine2: {
+ // ...
+ // }
+ // },
+ // ...
+ // }
+ if (e.type() != mongo::Object) {
+ return Status(ErrorCodes::BadValue, "'storageEngine' has to be a document.");
}
- else if ( fieldName == "flags" ) {
- flags = e.numberInt();
- flagsSet = true;
- }
- else if ( fieldName == "temp" ) {
- temp = e.trueValue();
- }
- else if (fieldName == "storageEngine") {
- // Storage engine-specific collection options.
- // "storageEngine" field must be of type "document".
- // Every field inside "storageEngine" has to be a document.
- // Format:
- // {
- // ...
- // storageEngine: {
- // storageEngine1: {
- // ...
- // },
- // storageEngine2: {
- // ...
- // }
- // },
- // ...
- // }
- if (e.type() != mongo::Object) {
- return Status(ErrorCodes::BadValue, "'storageEngine' has to be a document.");
- }
- BSONForEach(storageEngineElement, e.Obj()) {
- StringData storageEngineName = storageEngineElement.fieldNameStringData();
- if (storageEngineElement.type() != mongo::Object) {
- return Status(ErrorCodes::BadValue, str::stream() << "'storageEngine." <<
- storageEngineName << "' has to be an embedded document.");
- }
+ BSONForEach(storageEngineElement, e.Obj()) {
+ StringData storageEngineName = storageEngineElement.fieldNameStringData();
+ if (storageEngineElement.type() != mongo::Object) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "'storageEngine." << storageEngineName
+ << "' has to be an embedded document.");
}
-
- storageEngine = e.Obj().getOwned();
}
- else if (fieldName == "validator") {
- if (e.type() != mongo::Object) {
- return Status(ErrorCodes::BadValue, "'validator' has to be a document.");
- }
- validator = e.Obj().getOwned();
+ storageEngine = e.Obj().getOwned();
+ } else if (fieldName == "validator") {
+ if (e.type() != mongo::Object) {
+ return Status(ErrorCodes::BadValue, "'validator' has to be a document.");
}
- }
- return Status::OK();
+ validator = e.Obj().getOwned();
+ }
}
- BSONObj CollectionOptions::toBSON() const {
- BSONObjBuilder b;
- if ( capped ) {
- b.appendBool( "capped", true );
- b.appendNumber( "size", cappedSize );
+ return Status::OK();
+}
- if ( cappedMaxDocs )
- b.appendNumber( "max", cappedMaxDocs );
- }
+BSONObj CollectionOptions::toBSON() const {
+ BSONObjBuilder b;
+ if (capped) {
+ b.appendBool("capped", true);
+ b.appendNumber("size", cappedSize);
- if ( initialNumExtents )
- b.appendNumber( "$nExtents", initialNumExtents );
- if ( !initialExtentSizes.empty() )
- b.append( "$nExtents", initialExtentSizes );
+ if (cappedMaxDocs)
+ b.appendNumber("max", cappedMaxDocs);
+ }
- if ( autoIndexId != DEFAULT )
- b.appendBool( "autoIndexId", autoIndexId == YES );
+ if (initialNumExtents)
+ b.appendNumber("$nExtents", initialNumExtents);
+ if (!initialExtentSizes.empty())
+ b.append("$nExtents", initialExtentSizes);
- if ( flagsSet )
- b.append( "flags", flags );
+ if (autoIndexId != DEFAULT)
+ b.appendBool("autoIndexId", autoIndexId == YES);
- if ( temp )
- b.appendBool( "temp", true );
+ if (flagsSet)
+ b.append("flags", flags);
- if (!storageEngine.isEmpty()) {
- b.append("storageEngine", storageEngine);
- }
+ if (temp)
+ b.appendBool("temp", true);
- if (!validator.isEmpty()) {
- b.append("validator", validator);
- }
+ if (!storageEngine.isEmpty()) {
+ b.append("storageEngine", storageEngine);
+ }
- return b.obj();
+ if (!validator.isEmpty()) {
+ b.append("validator", validator);
}
+ return b.obj();
+}
}
diff --git a/src/mongo/db/catalog/collection_options.h b/src/mongo/db/catalog/collection_options.h
index 356d4f2c3e7..7520917b1d2 100644
--- a/src/mongo/db/catalog/collection_options.h
+++ b/src/mongo/db/catalog/collection_options.h
@@ -35,69 +35,70 @@
namespace mongo {
- struct CollectionOptions {
- CollectionOptions() {
- reset();
- }
-
- void reset();
-
- /**
- * Returns true if collection options validates successfully.
- */
- bool isValid() const;
-
- /**
- * Confirms that collection options can be converted to BSON and back without errors.
- */
- Status validate() const;
-
- /**
- * Parses the "options" subfield of the collection info object.
- */
- Status parse( const BSONObj& obj );
-
- BSONObj toBSON() const;
-
- /**
- * @param max in and out, will be adjusted
- * @return if the value is valid at all
- */
- static bool validMaxCappedDocs( long long* max );
-
- // ----
-
- bool capped;
- long long cappedSize;
- long long cappedMaxDocs;
-
- // following 2 are mutually exclusive, can only have one set
- long long initialNumExtents;
- std::vector<long long> initialExtentSizes;
-
- // behavior of _id index creation when collection created
- void setNoIdIndex() { autoIndexId = NO; }
- enum {
- DEFAULT, // currently yes for most collections, NO for some system ones
- YES, // create _id index
- NO // do not create _id index
- } autoIndexId;
-
- // user flags
- enum UserFlags {
- Flag_UsePowerOf2Sizes = 1 << 0,
- Flag_NoPadding = 1 << 1,
- };
- int flags; // a bitvector of UserFlags
- bool flagsSet;
-
- bool temp;
-
- // Storage engine collection options. Always owned or empty.
- BSONObj storageEngine;
-
- // Always owned or empty.
- BSONObj validator;
+struct CollectionOptions {
+ CollectionOptions() {
+ reset();
+ }
+
+ void reset();
+
+ /**
+ * Returns true if collection options validates successfully.
+ */
+ bool isValid() const;
+
+ /**
+ * Confirms that collection options can be converted to BSON and back without errors.
+ */
+ Status validate() const;
+
+ /**
+ * Parses the "options" subfield of the collection info object.
+ */
+ Status parse(const BSONObj& obj);
+
+ BSONObj toBSON() const;
+
+ /**
+ * @param max in and out, will be adjusted
+ * @return if the value is valid at all
+ */
+ static bool validMaxCappedDocs(long long* max);
+
+ // ----
+
+ bool capped;
+ long long cappedSize;
+ long long cappedMaxDocs;
+
+ // following 2 are mutually exclusive, can only have one set
+ long long initialNumExtents;
+ std::vector<long long> initialExtentSizes;
+
+ // behavior of _id index creation when collection created
+ void setNoIdIndex() {
+ autoIndexId = NO;
+ }
+ enum {
+ DEFAULT, // currently yes for most collections, NO for some system ones
+ YES, // create _id index
+ NO // do not create _id index
+ } autoIndexId;
+
+ // user flags
+ enum UserFlags {
+ Flag_UsePowerOf2Sizes = 1 << 0,
+ Flag_NoPadding = 1 << 1,
};
+ int flags; // a bitvector of UserFlags
+ bool flagsSet;
+ bool temp;
+
+ // Storage engine collection options. Always owned or empty.
+ BSONObj storageEngine;
+
+ // Always owned or empty.
+ BSONObj validator;
+};
}
diff --git a/src/mongo/db/catalog/collection_options_test.cpp b/src/mongo/db/catalog/collection_options_test.cpp
index 165f377a8d7..b56b883e7a2 100644
--- a/src/mongo/db/catalog/collection_options_test.cpp
+++ b/src/mongo/db/catalog/collection_options_test.cpp
@@ -35,156 +35,156 @@
namespace mongo {
- void checkRoundTrip( const CollectionOptions& options1 ) {
- CollectionOptions options2;
- options2.parse( options1.toBSON() );
- ASSERT_EQUALS( options1.toBSON(), options2.toBSON() );
- }
-
- TEST( CollectionOptions, SimpleRoundTrip ) {
- CollectionOptions options;
- checkRoundTrip( options );
-
- options.capped = true;
- options.cappedSize = 10240;
- options.cappedMaxDocs = 1111;
- checkRoundTrip( options );
-
- options.setNoIdIndex();
- options.flags = 5;
- checkRoundTrip( options );
- }
-
- TEST(CollectionOptions, IsValid) {
- CollectionOptions options;
- ASSERT_TRUE(options.isValid());
-
- options.storageEngine = fromjson("{storageEngine1: 1}");
- ASSERT_FALSE(options.isValid());
- }
-
- TEST(CollectionOptions, Validate) {
- CollectionOptions options;
- ASSERT_OK(options.validate());
-
- options.storageEngine = fromjson("{storageEngine1: 1}");
- ASSERT_NOT_OK(options.validate());
- }
-
- TEST(CollectionOptions, Validator) {
- CollectionOptions options;
-
- ASSERT_NOT_OK(options.parse(fromjson("{validator: 'notAnObject'}")));
-
- ASSERT_OK(options.parse(fromjson("{validator: {a: 1}}")));
- ASSERT_EQ(options.validator, fromjson("{a: 1}"));
-
- options.validator = fromjson("{b: 1}");
- ASSERT_EQ(options.toBSON()["validator"].Obj(), fromjson("{b: 1}"));
-
- options.reset();
- ASSERT_EQ(options.validator, BSONObj());
- ASSERT(!options.toBSON()["validator"]);
- }
-
- TEST( CollectionOptions, ErrorBadSize ) {
- ASSERT_NOT_OK( CollectionOptions().parse( fromjson( "{capped: true, size: -1}" ) ) );
- ASSERT_NOT_OK( CollectionOptions().parse( fromjson( "{capped: false, size: -1}" ) ) );
- }
-
- TEST( CollectionOptions, ErrorBadMax ) {
- ASSERT_NOT_OK( CollectionOptions().parse( BSON( "capped" << true << "max"
- << ( 1LL << 31 ) ) ) );
- }
-
- TEST( CollectionOptions, IgnoreSizeWrongType ) {
- CollectionOptions options;
- ASSERT_OK( options.parse( fromjson( "{size: undefined, capped: undefined}" ) ) );
- ASSERT_EQUALS( options.capped, false );
- ASSERT_EQUALS( options.cappedSize, 0 );
- }
-
- TEST( CollectionOptions, IgnoreMaxWrongType ) {
- CollectionOptions options;
- ASSERT_OK( options.parse( fromjson( "{capped: true, size: 1024, max: ''}" ) ) );
- ASSERT_EQUALS( options.capped, true );
- ASSERT_EQUALS( options.cappedSize, 1024 );
- ASSERT_EQUALS( options.cappedMaxDocs, 0 );
- }
-
- TEST( CollectionOptions, IgnoreUnregisteredFields ) {
- ASSERT_OK( CollectionOptions().parse( BSON( "create" << "c" ) ) );
- ASSERT_OK( CollectionOptions().parse( BSON( "foo" << "bar" ) ) );
- }
-
- TEST(CollectionOptions, InvalidStorageEngineField) {
- // "storageEngine" field has to be an object if present.
- ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{storageEngine: 1}")));
-
- // Every field under "storageEngine" has to be an object.
- ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{storageEngine: {storageEngine1: 1}}")));
-
- // Empty "storageEngine" not allowed
- ASSERT_OK(CollectionOptions().parse(fromjson("{storageEngine: {}}")));
- }
-
- TEST(CollectionOptions, ParseEngineField) {
- CollectionOptions opts;
- ASSERT_OK(opts.parse(fromjson("{unknownField: 1, "
- "storageEngine: {storageEngine1: {x: 1, y: 2}, storageEngine2: {a: 1, b:2}}}")));
- checkRoundTrip(opts);
-
- // Unrecognized field should not be present in BSON representation.
- BSONObj obj = opts.toBSON();
- ASSERT_FALSE(obj.hasField("unknownField"));
-
- // Check "storageEngine" field.
- ASSERT_TRUE(obj.hasField("storageEngine"));
- ASSERT_TRUE(obj.getField("storageEngine").isABSONObj());
- BSONObj storageEngine = obj.getObjectField("storageEngine");
-
- // Check individual storage storageEngine fields.
- ASSERT_TRUE(storageEngine.getField("storageEngine1").isABSONObj());
- BSONObj storageEngine1 = storageEngine.getObjectField("storageEngine1");
- ASSERT_EQUALS(1, storageEngine1.getIntField("x"));
- ASSERT_EQUALS(2, storageEngine1.getIntField("y"));
-
- ASSERT_TRUE(storageEngine.getField("storageEngine2").isABSONObj());
- BSONObj storageEngine2 = storageEngine.getObjectField("storageEngine2");
- ASSERT_EQUALS(1, storageEngine2.getIntField("a"));
- ASSERT_EQUALS(2, storageEngine2.getIntField("b"));
-
- }
-
- TEST(CollectionOptions, ResetStorageEngineField) {
- CollectionOptions opts;
- ASSERT_OK(opts.parse(fromjson(
- "{storageEngine: {storageEngine1: {x: 1}}}")));
- checkRoundTrip(opts);
-
- opts.reset();
-
- ASSERT_TRUE(opts.storageEngine.isEmpty());
- }
-
- TEST(CollectionOptions, ModifyStorageEngineField) {
- CollectionOptions opts;
-
- // Directly modify storageEngine field in collection options.
- opts.storageEngine = fromjson("{storageEngine1: {x: 1}}");
-
- // Unrecognized field should not be present in BSON representation.
- BSONObj obj = opts.toBSON();
- ASSERT_FALSE(obj.hasField("unknownField"));
-
- // Check "storageEngine" field.
- ASSERT_TRUE(obj.hasField("storageEngine"));
- ASSERT_TRUE(obj.getField("storageEngine").isABSONObj());
- BSONObj storageEngine = obj.getObjectField("storageEngine");
-
- // Check individual storage storageEngine fields.
- ASSERT_TRUE(storageEngine.getField("storageEngine1").isABSONObj());
- BSONObj storageEngine1 = storageEngine.getObjectField("storageEngine1");
- ASSERT_EQUALS(1, storageEngine1.getIntField("x"));
- }
+void checkRoundTrip(const CollectionOptions& options1) {
+ CollectionOptions options2;
+ options2.parse(options1.toBSON());
+ ASSERT_EQUALS(options1.toBSON(), options2.toBSON());
+}
+
+TEST(CollectionOptions, SimpleRoundTrip) {
+ CollectionOptions options;
+ checkRoundTrip(options);
+
+ options.capped = true;
+ options.cappedSize = 10240;
+ options.cappedMaxDocs = 1111;
+ checkRoundTrip(options);
+
+ options.setNoIdIndex();
+ options.flags = 5;
+ checkRoundTrip(options);
+}
+
+TEST(CollectionOptions, IsValid) {
+ CollectionOptions options;
+ ASSERT_TRUE(options.isValid());
+
+ options.storageEngine = fromjson("{storageEngine1: 1}");
+ ASSERT_FALSE(options.isValid());
+}
+
+TEST(CollectionOptions, Validate) {
+ CollectionOptions options;
+ ASSERT_OK(options.validate());
+
+ options.storageEngine = fromjson("{storageEngine1: 1}");
+ ASSERT_NOT_OK(options.validate());
+}
+
+TEST(CollectionOptions, Validator) {
+ CollectionOptions options;
+
+ ASSERT_NOT_OK(options.parse(fromjson("{validator: 'notAnObject'}")));
+
+ ASSERT_OK(options.parse(fromjson("{validator: {a: 1}}")));
+ ASSERT_EQ(options.validator, fromjson("{a: 1}"));
+
+ options.validator = fromjson("{b: 1}");
+ ASSERT_EQ(options.toBSON()["validator"].Obj(), fromjson("{b: 1}"));
+
+ options.reset();
+ ASSERT_EQ(options.validator, BSONObj());
+ ASSERT(!options.toBSON()["validator"]);
+}
+
+TEST(CollectionOptions, ErrorBadSize) {
+ ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{capped: true, size: -1}")));
+ ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{capped: false, size: -1}")));
+}
+
+TEST(CollectionOptions, ErrorBadMax) {
+ ASSERT_NOT_OK(CollectionOptions().parse(BSON("capped" << true << "max" << (1LL << 31))));
+}
+
+TEST(CollectionOptions, IgnoreSizeWrongType) {
+ CollectionOptions options;
+ ASSERT_OK(options.parse(fromjson("{size: undefined, capped: undefined}")));
+ ASSERT_EQUALS(options.capped, false);
+ ASSERT_EQUALS(options.cappedSize, 0);
+}
+
+TEST(CollectionOptions, IgnoreMaxWrongType) {
+ CollectionOptions options;
+ ASSERT_OK(options.parse(fromjson("{capped: true, size: 1024, max: ''}")));
+ ASSERT_EQUALS(options.capped, true);
+ ASSERT_EQUALS(options.cappedSize, 1024);
+ ASSERT_EQUALS(options.cappedMaxDocs, 0);
+}
+
+TEST(CollectionOptions, IgnoreUnregisteredFields) {
+ ASSERT_OK(CollectionOptions().parse(BSON("create"
+ << "c")));
+ ASSERT_OK(CollectionOptions().parse(BSON("foo"
+ << "bar")));
+}
+
+TEST(CollectionOptions, InvalidStorageEngineField) {
+ // "storageEngine" field has to be an object if present.
+ ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{storageEngine: 1}")));
+
+ // Every field under "storageEngine" has to be an object.
+ ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{storageEngine: {storageEngine1: 1}}")));
+
+ // Empty "storageEngine" not allowed
+ ASSERT_OK(CollectionOptions().parse(fromjson("{storageEngine: {}}")));
+}
+
+TEST(CollectionOptions, ParseEngineField) {
+ CollectionOptions opts;
+ ASSERT_OK(opts.parse(fromjson(
+ "{unknownField: 1, "
+ "storageEngine: {storageEngine1: {x: 1, y: 2}, storageEngine2: {a: 1, b:2}}}")));
+ checkRoundTrip(opts);
+
+ // Unrecognized field should not be present in BSON representation.
+ BSONObj obj = opts.toBSON();
+ ASSERT_FALSE(obj.hasField("unknownField"));
+
+ // Check "storageEngine" field.
+ ASSERT_TRUE(obj.hasField("storageEngine"));
+ ASSERT_TRUE(obj.getField("storageEngine").isABSONObj());
+ BSONObj storageEngine = obj.getObjectField("storageEngine");
+
+ // Check individual storage storageEngine fields.
+ ASSERT_TRUE(storageEngine.getField("storageEngine1").isABSONObj());
+ BSONObj storageEngine1 = storageEngine.getObjectField("storageEngine1");
+ ASSERT_EQUALS(1, storageEngine1.getIntField("x"));
+ ASSERT_EQUALS(2, storageEngine1.getIntField("y"));
+
+ ASSERT_TRUE(storageEngine.getField("storageEngine2").isABSONObj());
+ BSONObj storageEngine2 = storageEngine.getObjectField("storageEngine2");
+ ASSERT_EQUALS(1, storageEngine2.getIntField("a"));
+ ASSERT_EQUALS(2, storageEngine2.getIntField("b"));
+}
+
+TEST(CollectionOptions, ResetStorageEngineField) {
+ CollectionOptions opts;
+ ASSERT_OK(opts.parse(fromjson("{storageEngine: {storageEngine1: {x: 1}}}")));
+ checkRoundTrip(opts);
+
+ opts.reset();
+
+ ASSERT_TRUE(opts.storageEngine.isEmpty());
+}
+
+TEST(CollectionOptions, ModifyStorageEngineField) {
+ CollectionOptions opts;
+
+ // Directly modify storageEngine field in collection options.
+ opts.storageEngine = fromjson("{storageEngine1: {x: 1}}");
+
+ // Unrecognized field should not be present in BSON representation.
+ BSONObj obj = opts.toBSON();
+ ASSERT_FALSE(obj.hasField("unknownField"));
+
+ // Check "storageEngine" field.
+ ASSERT_TRUE(obj.hasField("storageEngine"));
+ ASSERT_TRUE(obj.getField("storageEngine").isABSONObj());
+ BSONObj storageEngine = obj.getObjectField("storageEngine");
+
+ // Check individual storage storageEngine fields.
+ ASSERT_TRUE(storageEngine.getField("storageEngine1").isABSONObj());
+ BSONObj storageEngine1 = storageEngine.getObjectField("storageEngine1");
+ ASSERT_EQUALS(1, storageEngine1.getIntField("x"));
+}
}
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index aaff88c3121..afa5204fcbf 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -40,56 +40,53 @@
#include "mongo/db/repl/replication_coordinator_global.h"
namespace mongo {
- Status createCollection(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& cmdObj) {
- BSONObjIterator it(cmdObj);
+Status createCollection(OperationContext* txn, const std::string& dbName, const BSONObj& cmdObj) {
+ BSONObjIterator it(cmdObj);
- // Extract ns from first cmdObj element.
- BSONElement firstElt = it.next();
- uassert(15888,
- "must pass name of collection to create",
- firstElt.valuestrsafe()[0] != '\0');
+ // Extract ns from first cmdObj element.
+ BSONElement firstElt = it.next();
+ uassert(15888, "must pass name of collection to create", firstElt.valuestrsafe()[0] != '\0');
- Status status = userAllowedCreateNS(dbName, firstElt.valuestr());
- if (!status.isOK()) {
- return status;
- }
+ Status status = userAllowedCreateNS(dbName, firstElt.valuestr());
+ if (!status.isOK()) {
+ return status;
+ }
- NamespaceString nss(dbName, firstElt.valuestrsafe());
+ NamespaceString nss(dbName, firstElt.valuestrsafe());
- // Build options object from remaining cmdObj elements.
- BSONObjBuilder optionsBuilder;
- while (it.more()) {
- optionsBuilder.append(it.next());
- }
+ // Build options object from remaining cmdObj elements.
+ BSONObjBuilder optionsBuilder;
+ while (it.more()) {
+ optionsBuilder.append(it.next());
+ }
- BSONObj options = optionsBuilder.obj();
- uassert(14832,
- "specify size:<n> when capped is true",
- !options["capped"].trueValue() || options["size"].isNumber() ||
- options.hasField("$nExtents"));
+ BSONObj options = optionsBuilder.obj();
+ uassert(14832,
+ "specify size:<n> when capped is true",
+ !options["capped"].trueValue() || options["size"].isNumber() ||
+ options.hasField("$nExtents"));
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbName, MODE_X);
- OldClientContext ctx(txn, nss.ns());
- if (txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
- return Status(ErrorCodes::NotMaster, str::stream() <<
- "Not primary while creating collection " << nss.ns());
- }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), dbName, MODE_X);
+ OldClientContext ctx(txn, nss.ns());
+ if (txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while creating collection " << nss.ns());
+ }
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(txn);
- // Create collection.
- status = userCreateNS(txn, ctx.db(), nss.ns(), options);
- if (!status.isOK()) {
- return status;
- }
+ // Create collection.
+ status = userCreateNS(txn, ctx.db(), nss.ns(), options);
+ if (!status.isOK()) {
+ return status;
+ }
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "create", nss.ns());
- return Status::OK();
+ wunit.commit();
}
-} // namespace mongo
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "create", nss.ns());
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/create_collection.h b/src/mongo/db/catalog/create_collection.h
index e96a7d799f9..84a042dae12 100644
--- a/src/mongo/db/catalog/create_collection.h
+++ b/src/mongo/db/catalog/create_collection.h
@@ -31,13 +31,11 @@
#include "mongo/base/status.h"
namespace mongo {
- class BSONObj;
- class OperationContext;
+class BSONObj;
+class OperationContext;
- /**
- * Creates a collection as described in "cmdObj" on the database "dbName".
- */
- Status createCollection(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& cmdObj);
-} // namespace mongo
+/**
+ * Creates a collection as described in "cmdObj" on the database "dbName".
+ */
+Status createCollection(OperationContext* txn, const std::string& dbName, const BSONObj& cmdObj);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/cursor_manager.cpp b/src/mongo/db/catalog/cursor_manager.cpp
index b5bf0f49f73..97426228ca9 100644
--- a/src/mongo/db/catalog/cursor_manager.cpp
+++ b/src/mongo/db/catalog/cursor_manager.cpp
@@ -48,514 +48,479 @@
namespace mongo {
- using std::string;
- using std::vector;
-
- namespace {
- unsigned idFromCursorId( CursorId id ) {
- uint64_t x = static_cast<uint64_t>(id);
- x = x >> 32;
- return static_cast<unsigned>( x );
- }
-
- CursorId cursorIdFromParts( unsigned collection,
- unsigned cursor ) {
- CursorId x = static_cast<CursorId>( collection ) << 32;
- x |= cursor;
- return x;
- }
-
- class IdWorkTest : public StartupTest {
- public:
- void _run( unsigned a, unsigned b) {
- CursorId x = cursorIdFromParts( a, b );
- invariant( a == idFromCursorId( x ) );
- CursorId y = cursorIdFromParts( a, b + 1 );
- invariant( x != y );
- }
+using std::string;
+using std::vector;
+
+namespace {
+unsigned idFromCursorId(CursorId id) {
+ uint64_t x = static_cast<uint64_t>(id);
+ x = x >> 32;
+ return static_cast<unsigned>(x);
+}
- void run() {
- _run( 123, 456 );
- _run( 0xdeadbeef, 0xcafecafe );
- _run( 0, 0 );
- _run( 99999999, 999 );
- _run( 0xFFFFFFFF, 1 );
- _run( 0xFFFFFFFF, 0 );
- _run( 0xFFFFFFFF, 0xFFFFFFFF );
- }
- } idWorkTest;
- }
+CursorId cursorIdFromParts(unsigned collection, unsigned cursor) {
+ CursorId x = static_cast<CursorId>(collection) << 32;
+ x |= cursor;
+ return x;
+}
- class GlobalCursorIdCache {
- public:
+class IdWorkTest : public StartupTest {
+public:
+ void _run(unsigned a, unsigned b) {
+ CursorId x = cursorIdFromParts(a, b);
+ invariant(a == idFromCursorId(x));
+ CursorId y = cursorIdFromParts(a, b + 1);
+ invariant(x != y);
+ }
+
+ void run() {
+ _run(123, 456);
+ _run(0xdeadbeef, 0xcafecafe);
+ _run(0, 0);
+ _run(99999999, 999);
+ _run(0xFFFFFFFF, 1);
+ _run(0xFFFFFFFF, 0);
+ _run(0xFFFFFFFF, 0xFFFFFFFF);
+ }
+} idWorkTest;
+}
- GlobalCursorIdCache();
- ~GlobalCursorIdCache();
+class GlobalCursorIdCache {
+public:
+ GlobalCursorIdCache();
+ ~GlobalCursorIdCache();
- /**
- * this gets called when a CursorManager gets created
- * @return the id the CursorManager should use when generating
- * cursor ids
- */
- unsigned created( const std::string& ns );
+ /**
+ * this gets called when a CursorManager gets created
+ * @return the id the CursorManager should use when generating
+ * cursor ids
+ */
+ unsigned created(const std::string& ns);
- /**
- * called by CursorManager when its going away
- */
- void destroyed( unsigned id, const std::string& ns );
+ /**
+ * called by CursorManager when its going away
+ */
+ void destroyed(unsigned id, const std::string& ns);
- /**
- * works globally
- */
- bool eraseCursor(OperationContext* txn, CursorId id, bool checkAuth);
+ /**
+ * works globally
+ */
+ bool eraseCursor(OperationContext* txn, CursorId id, bool checkAuth);
- void appendStats( BSONObjBuilder& builder );
+ void appendStats(BSONObjBuilder& builder);
- std::size_t timeoutCursors(OperationContext* txn, int millisSinceLastCall);
+ std::size_t timeoutCursors(OperationContext* txn, int millisSinceLastCall);
- int64_t nextSeed();
- private:
- SimpleMutex _mutex;
+ int64_t nextSeed();
- typedef unordered_map<unsigned,string> Map;
- Map _idToNS;
- unsigned _nextId;
+private:
+ SimpleMutex _mutex;
- std::unique_ptr<SecureRandom> _secureRandom;
- };
+ typedef unordered_map<unsigned, string> Map;
+ Map _idToNS;
+ unsigned _nextId;
- // Note that "globalCursorIdCache" must be declared before "globalCursorManager", as the latter
- // calls into the former during destruction.
- std::unique_ptr<GlobalCursorIdCache> globalCursorIdCache;
- std::unique_ptr<CursorManager> globalCursorManager;
+ std::unique_ptr<SecureRandom> _secureRandom;
+};
- MONGO_INITIALIZER(GlobalCursorIdCache)(InitializerContext* context) {
- globalCursorIdCache.reset(new GlobalCursorIdCache());
- return Status::OK();
- }
+// Note that "globalCursorIdCache" must be declared before "globalCursorManager", as the latter
+// calls into the former during destruction.
+std::unique_ptr<GlobalCursorIdCache> globalCursorIdCache;
+std::unique_ptr<CursorManager> globalCursorManager;
- MONGO_INITIALIZER_WITH_PREREQUISITES(GlobalCursorManager, ("GlobalCursorIdCache"))
- (InitializerContext* context) {
- globalCursorManager.reset(new CursorManager(""));
- return Status::OK();
- }
+MONGO_INITIALIZER(GlobalCursorIdCache)(InitializerContext* context) {
+ globalCursorIdCache.reset(new GlobalCursorIdCache());
+ return Status::OK();
+}
- GlobalCursorIdCache::GlobalCursorIdCache()
- : _nextId( 0 ),
- _secureRandom() {
- }
+MONGO_INITIALIZER_WITH_PREREQUISITES(GlobalCursorManager, ("GlobalCursorIdCache"))
+(InitializerContext* context) {
+ globalCursorManager.reset(new CursorManager(""));
+ return Status::OK();
+}
- GlobalCursorIdCache::~GlobalCursorIdCache() {
- }
+GlobalCursorIdCache::GlobalCursorIdCache() : _nextId(0), _secureRandom() {}
- int64_t GlobalCursorIdCache::nextSeed() {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- if ( !_secureRandom )
- _secureRandom.reset(SecureRandom::create());
- return _secureRandom->nextInt64();
- }
+GlobalCursorIdCache::~GlobalCursorIdCache() {}
- unsigned GlobalCursorIdCache::created( const std::string& ns ) {
- static const unsigned MAX_IDS = 1000 * 1000 * 1000;
+int64_t GlobalCursorIdCache::nextSeed() {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ if (!_secureRandom)
+ _secureRandom.reset(SecureRandom::create());
+ return _secureRandom->nextInt64();
+}
- stdx::lock_guard<SimpleMutex> lk( _mutex );
+unsigned GlobalCursorIdCache::created(const std::string& ns) {
+ static const unsigned MAX_IDS = 1000 * 1000 * 1000;
- fassert( 17359, _idToNS.size() < MAX_IDS );
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- for ( unsigned i = 0; i <= MAX_IDS; i++ ) {
- unsigned id = ++_nextId;
- if ( id == 0 )
- continue;
- if ( _idToNS.count( id ) > 0 )
- continue;
- _idToNS[id] = ns;
- return id;
- }
+ fassert(17359, _idToNS.size() < MAX_IDS);
- invariant( false );
+ for (unsigned i = 0; i <= MAX_IDS; i++) {
+ unsigned id = ++_nextId;
+ if (id == 0)
+ continue;
+ if (_idToNS.count(id) > 0)
+ continue;
+ _idToNS[id] = ns;
+ return id;
}
- void GlobalCursorIdCache::destroyed( unsigned id, const std::string& ns ) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- invariant( ns == _idToNS[id] );
- _idToNS.erase( id );
- }
+ invariant(false);
+}
- bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool checkAuth) {
- // Figure out what the namespace of this cursor is.
- std::string ns;
- if (globalCursorManager->ownsCursorId(id)) {
- ClientCursorPin pin(globalCursorManager.get(), id);
- if (!pin.c()) {
- // No such cursor. TODO: Consider writing to audit log here (even though we don't
- // have a namespace).
- return false;
- }
- ns = pin.c()->ns();
- }
- else {
- stdx::lock_guard<SimpleMutex> lk(_mutex);
- unsigned nsid = idFromCursorId(id);
- Map::const_iterator it = _idToNS.find(nsid);
- if (it == _idToNS.end()) {
- // No namespace corresponding to this cursor id prefix. TODO: Consider writing to
- // audit log here (even though we don't have a namespace).
- return false;
- }
- ns = it->second;
- }
- const NamespaceString nss(ns);
- invariant(nss.isValid());
-
- // Check if we are authorized to erase this cursor.
- if (checkAuth) {
- AuthorizationSession* as = AuthorizationSession::get(txn->getClient());
- Status authorizationStatus = as->checkAuthForKillCursors(nss, id);
- if (!authorizationStatus.isOK()) {
- audit::logKillCursorsAuthzCheck(txn->getClient(),
- nss,
- id,
- ErrorCodes::Unauthorized);
- return false;
- }
- }
+void GlobalCursorIdCache::destroyed(unsigned id, const std::string& ns) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ invariant(ns == _idToNS[id]);
+ _idToNS.erase(id);
+}
- // If this cursor is owned by the global cursor manager, ask it to erase the cursor for us.
- if (globalCursorManager->ownsCursorId(id)) {
- return globalCursorManager->eraseCursor(txn, id, checkAuth);
+bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool checkAuth) {
+ // Figure out what the namespace of this cursor is.
+ std::string ns;
+ if (globalCursorManager->ownsCursorId(id)) {
+ ClientCursorPin pin(globalCursorManager.get(), id);
+ if (!pin.c()) {
+ // No such cursor. TODO: Consider writing to audit log here (even though we don't
+ // have a namespace).
+ return false;
}
-
- // If not, then the cursor must be owned by a collection. Erase the cursor under the
- // collection lock (to prevent the collection from going away during the erase).
- AutoGetCollectionForRead ctx(txn, nss);
- Collection* collection = ctx.getCollection();
- if (!collection) {
- if (checkAuth)
- audit::logKillCursorsAuthzCheck(txn->getClient(),
- nss,
- id,
- ErrorCodes::CursorNotFound);
+ ns = pin.c()->ns();
+ } else {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ unsigned nsid = idFromCursorId(id);
+ Map::const_iterator it = _idToNS.find(nsid);
+ if (it == _idToNS.end()) {
+ // No namespace corresponding to this cursor id prefix. TODO: Consider writing to
+ // audit log here (even though we don't have a namespace).
return false;
}
- return collection->getCursorManager()->eraseCursor(txn, id, checkAuth);
+ ns = it->second;
}
+ const NamespaceString nss(ns);
+ invariant(nss.isValid());
- std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* txn, int millisSinceLastCall) {
- size_t totalTimedOut = 0;
-
- // Time out the cursors from the global cursor manager.
- totalTimedOut += globalCursorManager->timeoutCursors( millisSinceLastCall );
-
- // Compute the set of collection names that we have to time out cursors for.
- vector<string> todo;
- {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- for ( Map::const_iterator i = _idToNS.begin(); i != _idToNS.end(); ++i ) {
- if (globalCursorManager->ownsCursorId(cursorIdFromParts(i->first, 0))) {
- // Skip the global cursor manager, since we handle it above (and it's not
- // associated with a collection).
- continue;
- }
- todo.push_back( i->second );
- }
+ // Check if we are authorized to erase this cursor.
+ if (checkAuth) {
+ AuthorizationSession* as = AuthorizationSession::get(txn->getClient());
+ Status authorizationStatus = as->checkAuthForKillCursors(nss, id);
+ if (!authorizationStatus.isOK()) {
+ audit::logKillCursorsAuthzCheck(txn->getClient(), nss, id, ErrorCodes::Unauthorized);
+ return false;
}
+ }
+
+ // If this cursor is owned by the global cursor manager, ask it to erase the cursor for us.
+ if (globalCursorManager->ownsCursorId(id)) {
+ return globalCursorManager->eraseCursor(txn, id, checkAuth);
+ }
- // For each collection, time out its cursors under the collection lock (to prevent the
- // collection from going away during the erase).
- for ( unsigned i = 0; i < todo.size(); i++ ) {
- const std::string& ns = todo[i];
+ // If not, then the cursor must be owned by a collection. Erase the cursor under the
+ // collection lock (to prevent the collection from going away during the erase).
+ AutoGetCollectionForRead ctx(txn, nss);
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ if (checkAuth)
+ audit::logKillCursorsAuthzCheck(txn->getClient(), nss, id, ErrorCodes::CursorNotFound);
+ return false;
+ }
+ return collection->getCursorManager()->eraseCursor(txn, id, checkAuth);
+}
- AutoGetCollectionForRead ctx(txn, ns);
- if (!ctx.getDb()) {
- continue;
- }
+std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* txn, int millisSinceLastCall) {
+ size_t totalTimedOut = 0;
+
+ // Time out the cursors from the global cursor manager.
+ totalTimedOut += globalCursorManager->timeoutCursors(millisSinceLastCall);
- Collection* collection = ctx.getCollection();
- if ( collection == NULL ) {
+ // Compute the set of collection names that we have to time out cursors for.
+ vector<string> todo;
+ {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ for (Map::const_iterator i = _idToNS.begin(); i != _idToNS.end(); ++i) {
+ if (globalCursorManager->ownsCursorId(cursorIdFromParts(i->first, 0))) {
+ // Skip the global cursor manager, since we handle it above (and it's not
+ // associated with a collection).
continue;
}
+ todo.push_back(i->second);
+ }
+ }
+
+ // For each collection, time out its cursors under the collection lock (to prevent the
+ // collection from going away during the erase).
+ for (unsigned i = 0; i < todo.size(); i++) {
+ const std::string& ns = todo[i];
+
+ AutoGetCollectionForRead ctx(txn, ns);
+ if (!ctx.getDb()) {
+ continue;
+ }
- totalTimedOut += collection->getCursorManager()->timeoutCursors( millisSinceLastCall );
+ Collection* collection = ctx.getCollection();
+ if (collection == NULL) {
+ continue;
}
- return totalTimedOut;
+ totalTimedOut += collection->getCursorManager()->timeoutCursors(millisSinceLastCall);
}
- // ---
+ return totalTimedOut;
+}
- CursorManager* CursorManager::getGlobalCursorManager() {
- return globalCursorManager.get();
- }
+// ---
- std::size_t CursorManager::timeoutCursorsGlobal(OperationContext* txn,
- int millisSinceLastCall) {
- return globalCursorIdCache->timeoutCursors(txn, millisSinceLastCall);
- }
+CursorManager* CursorManager::getGlobalCursorManager() {
+ return globalCursorManager.get();
+}
- int CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* txn, int n,
- const char* _ids) {
- ConstDataCursor ids(_ids);
- int numDeleted = 0;
- for ( int i = 0; i < n; i++ ) {
- if ( eraseCursorGlobalIfAuthorized(txn, ids.readAndAdvance<LittleEndian<int64_t>>()))
- numDeleted++;
- if ( inShutdown() )
- break;
- }
- return numDeleted;
- }
- bool CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id) {
- return globalCursorIdCache->eraseCursor(txn, id, true);
- }
- bool CursorManager::eraseCursorGlobal(OperationContext* txn, CursorId id) {
- return globalCursorIdCache->eraseCursor(txn, id, false );
+std::size_t CursorManager::timeoutCursorsGlobal(OperationContext* txn, int millisSinceLastCall) {
+ return globalCursorIdCache->timeoutCursors(txn, millisSinceLastCall);
+}
+
+int CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* txn, int n, const char* _ids) {
+ ConstDataCursor ids(_ids);
+ int numDeleted = 0;
+ for (int i = 0; i < n; i++) {
+ if (eraseCursorGlobalIfAuthorized(txn, ids.readAndAdvance<LittleEndian<int64_t>>()))
+ numDeleted++;
+ if (inShutdown())
+ break;
}
+ return numDeleted;
+}
+bool CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id) {
+ return globalCursorIdCache->eraseCursor(txn, id, true);
+}
+bool CursorManager::eraseCursorGlobal(OperationContext* txn, CursorId id) {
+ return globalCursorIdCache->eraseCursor(txn, id, false);
+}
- // --------------------------
+// --------------------------
- CursorManager::CursorManager( StringData ns )
- : _nss( ns ) {
- _collectionCacheRuntimeId = globalCursorIdCache->created( _nss.ns() );
- _random.reset( new PseudoRandom( globalCursorIdCache->nextSeed() ) );
- }
+CursorManager::CursorManager(StringData ns) : _nss(ns) {
+ _collectionCacheRuntimeId = globalCursorIdCache->created(_nss.ns());
+ _random.reset(new PseudoRandom(globalCursorIdCache->nextSeed()));
+}
- CursorManager::~CursorManager() {
- invalidateAll(true, "collection going away");
- globalCursorIdCache->destroyed( _collectionCacheRuntimeId, _nss.ns() );
- }
+CursorManager::~CursorManager() {
+ invalidateAll(true, "collection going away");
+ globalCursorIdCache->destroyed(_collectionCacheRuntimeId, _nss.ns());
+}
- void CursorManager::invalidateAll(bool collectionGoingAway,
- const std::string& reason) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
+void CursorManager::invalidateAll(bool collectionGoingAway, const std::string& reason) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- for ( ExecSet::iterator it = _nonCachedExecutors.begin();
- it != _nonCachedExecutors.end();
- ++it ) {
+ for (ExecSet::iterator it = _nonCachedExecutors.begin(); it != _nonCachedExecutors.end();
+ ++it) {
+ // we kill the executor, but it deletes itself
+ PlanExecutor* exec = *it;
+ exec->kill(reason);
+ invariant(exec->collection() == NULL);
+ }
+ _nonCachedExecutors.clear();
- // we kill the executor, but it deletes itself
- PlanExecutor* exec = *it;
- exec->kill(reason);
- invariant( exec->collection() == NULL );
- }
- _nonCachedExecutors.clear();
+ if (collectionGoingAway) {
+ // we're going to wipe out the world
+ for (CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i) {
+ ClientCursor* cc = i->second;
- if ( collectionGoingAway ) {
- // we're going to wipe out the world
- for ( CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i ) {
- ClientCursor* cc = i->second;
+ cc->kill();
- cc->kill();
+ invariant(cc->getExecutor() == NULL || cc->getExecutor()->collection() == NULL);
- invariant( cc->getExecutor() == NULL || cc->getExecutor()->collection() == NULL );
-
- // If the CC is pinned, somebody is actively using it and we do not delete it.
- // Instead we notify the holder that we killed it. The holder will then delete the
- // CC.
- //
- // If the CC is not pinned, there is nobody actively holding it. We can safely
- // delete it.
- if (!cc->isPinned()) {
- delete cc;
- }
+ // If the CC is pinned, somebody is actively using it and we do not delete it.
+ // Instead we notify the holder that we killed it. The holder will then delete the
+ // CC.
+ //
+ // If the CC is not pinned, there is nobody actively holding it. We can safely
+ // delete it.
+ if (!cc->isPinned()) {
+ delete cc;
}
}
- else {
- CursorMap newMap;
-
- // collection will still be around, just all PlanExecutors are invalid
- for ( CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i ) {
- ClientCursor* cc = i->second;
-
- // Note that a valid ClientCursor state is "no cursor no executor." This is because
- // the set of active cursor IDs in ClientCursor is used as representation of query
- // state. See sharding_block.h. TODO(greg,hk): Move this out.
- if (NULL == cc->getExecutor() ) {
- newMap.insert( *i );
- continue;
- }
-
- if (cc->isPinned() || cc->isAggCursor()) {
- // Pinned cursors need to stay alive, so we leave them around. Aggregation
- // cursors also can stay alive (since they don't have their lifetime bound to
- // the underlying collection). However, if they have an associated executor, we
- // need to kill it, because it's now invalid.
- if ( cc->getExecutor() )
- cc->getExecutor()->kill(reason);
- newMap.insert( *i );
- }
- else {
- cc->kill();
- delete cc;
- }
+ } else {
+ CursorMap newMap;
+
+ // collection will still be around, just all PlanExecutors are invalid
+ for (CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i) {
+ ClientCursor* cc = i->second;
+ // Note that a valid ClientCursor state is "no cursor no executor." This is because
+ // the set of active cursor IDs in ClientCursor is used as representation of query
+ // state. See sharding_block.h. TODO(greg,hk): Move this out.
+ if (NULL == cc->getExecutor()) {
+ newMap.insert(*i);
+ continue;
}
- _cursors = newMap;
+ if (cc->isPinned() || cc->isAggCursor()) {
+ // Pinned cursors need to stay alive, so we leave them around. Aggregation
+ // cursors also can stay alive (since they don't have their lifetime bound to
+ // the underlying collection). However, if they have an associated executor, we
+ // need to kill it, because it's now invalid.
+ if (cc->getExecutor())
+ cc->getExecutor()->kill(reason);
+ newMap.insert(*i);
+ } else {
+ cc->kill();
+ delete cc;
+ }
}
+
+ _cursors = newMap;
}
+}
- void CursorManager::invalidateDocument( OperationContext* txn,
- const RecordId& dl,
- InvalidationType type ) {
- if ( supportsDocLocking() ) {
- // If a storage engine supports doc locking, then we do not need to invalidate.
- // The transactional boundaries of the operation protect us.
- return;
- }
+void CursorManager::invalidateDocument(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
+ if (supportsDocLocking()) {
+ // If a storage engine supports doc locking, then we do not need to invalidate.
+ // The transactional boundaries of the operation protect us.
+ return;
+ }
- stdx::lock_guard<SimpleMutex> lk( _mutex );
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- for ( ExecSet::iterator it = _nonCachedExecutors.begin();
- it != _nonCachedExecutors.end();
- ++it ) {
+ for (ExecSet::iterator it = _nonCachedExecutors.begin(); it != _nonCachedExecutors.end();
+ ++it) {
+ PlanExecutor* exec = *it;
+ exec->invalidate(txn, dl, type);
+ }
- PlanExecutor* exec = *it;
+ for (CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i) {
+ PlanExecutor* exec = i->second->getExecutor();
+ if (exec) {
exec->invalidate(txn, dl, type);
}
-
- for ( CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i ) {
- PlanExecutor* exec = i->second->getExecutor();
- if ( exec ) {
- exec->invalidate(txn, dl, type);
- }
- }
}
+}
- std::size_t CursorManager::timeoutCursors( int millisSinceLastCall ) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
-
- vector<ClientCursor*> toDelete;
-
- for ( CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i ) {
- ClientCursor* cc = i->second;
- if ( cc->shouldTimeout( millisSinceLastCall ) )
- toDelete.push_back( cc );
- }
+std::size_t CursorManager::timeoutCursors(int millisSinceLastCall) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- for ( vector<ClientCursor*>::const_iterator i = toDelete.begin();
- i != toDelete.end(); ++i ) {
- ClientCursor* cc = *i;
- _deregisterCursor_inlock( cc );
- cc->kill();
- delete cc;
- }
+ vector<ClientCursor*> toDelete;
- return toDelete.size();
+ for (CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i) {
+ ClientCursor* cc = i->second;
+ if (cc->shouldTimeout(millisSinceLastCall))
+ toDelete.push_back(cc);
}
- void CursorManager::registerExecutor( PlanExecutor* exec ) {
- stdx::lock_guard<SimpleMutex> lk(_mutex);
- const std::pair<ExecSet::iterator, bool> result = _nonCachedExecutors.insert(exec);
- invariant(result.second); // make sure this was inserted
+ for (vector<ClientCursor*>::const_iterator i = toDelete.begin(); i != toDelete.end(); ++i) {
+ ClientCursor* cc = *i;
+ _deregisterCursor_inlock(cc);
+ cc->kill();
+ delete cc;
}
- void CursorManager::deregisterExecutor( PlanExecutor* exec ) {
- stdx::lock_guard<SimpleMutex> lk(_mutex);
- _nonCachedExecutors.erase(exec);
- }
+ return toDelete.size();
+}
- ClientCursor* CursorManager::find( CursorId id, bool pin ) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- CursorMap::const_iterator it = _cursors.find( id );
- if ( it == _cursors.end() )
- return NULL;
-
- ClientCursor* cursor = it->second;
- if ( pin ) {
- uassert( 12051,
- "clientcursor already in use? driver problem?",
- !cursor->isPinned() );
- cursor->setPinned();
- }
+void CursorManager::registerExecutor(PlanExecutor* exec) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ const std::pair<ExecSet::iterator, bool> result = _nonCachedExecutors.insert(exec);
+ invariant(result.second); // make sure this was inserted
+}
- return cursor;
- }
+void CursorManager::deregisterExecutor(PlanExecutor* exec) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ _nonCachedExecutors.erase(exec);
+}
- void CursorManager::unpin( ClientCursor* cursor ) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
+ClientCursor* CursorManager::find(CursorId id, bool pin) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ CursorMap::const_iterator it = _cursors.find(id);
+ if (it == _cursors.end())
+ return NULL;
- invariant( cursor->isPinned() );
- cursor->unsetPinned();
+ ClientCursor* cursor = it->second;
+ if (pin) {
+ uassert(12051, "clientcursor already in use? driver problem?", !cursor->isPinned());
+ cursor->setPinned();
}
- bool CursorManager::ownsCursorId( CursorId cursorId ) const {
- return _collectionCacheRuntimeId == idFromCursorId( cursorId );
- }
+ return cursor;
+}
- void CursorManager::getCursorIds( std::set<CursorId>* openCursors ) const {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
+void CursorManager::unpin(ClientCursor* cursor) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- for ( CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i ) {
- ClientCursor* cc = i->second;
- openCursors->insert( cc->cursorid() );
- }
- }
+ invariant(cursor->isPinned());
+ cursor->unsetPinned();
+}
- size_t CursorManager::numCursors() const {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- return _cursors.size();
- }
+bool CursorManager::ownsCursorId(CursorId cursorId) const {
+ return _collectionCacheRuntimeId == idFromCursorId(cursorId);
+}
- CursorId CursorManager::_allocateCursorId_inlock() {
- for ( int i = 0; i < 10000; i++ ) {
- unsigned mypart = static_cast<unsigned>( _random->nextInt32() );
- CursorId id = cursorIdFromParts( _collectionCacheRuntimeId, mypart );
- if ( _cursors.count( id ) == 0 )
- return id;
- }
- fassertFailed( 17360 );
- }
+void CursorManager::getCursorIds(std::set<CursorId>* openCursors) const {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- CursorId CursorManager::registerCursor( ClientCursor* cc ) {
- invariant( cc );
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- CursorId id = _allocateCursorId_inlock();
- _cursors[id] = cc;
- return id;
+ for (CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i) {
+ ClientCursor* cc = i->second;
+ openCursors->insert(cc->cursorid());
}
+}
+
+size_t CursorManager::numCursors() const {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ return _cursors.size();
+}
- void CursorManager::deregisterCursor( ClientCursor* cc ) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- _deregisterCursor_inlock( cc );
+CursorId CursorManager::_allocateCursorId_inlock() {
+ for (int i = 0; i < 10000; i++) {
+ unsigned mypart = static_cast<unsigned>(_random->nextInt32());
+ CursorId id = cursorIdFromParts(_collectionCacheRuntimeId, mypart);
+ if (_cursors.count(id) == 0)
+ return id;
}
+ fassertFailed(17360);
+}
- bool CursorManager::eraseCursor(OperationContext* txn, CursorId id, bool checkAuth) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
+CursorId CursorManager::registerCursor(ClientCursor* cc) {
+ invariant(cc);
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ CursorId id = _allocateCursorId_inlock();
+ _cursors[id] = cc;
+ return id;
+}
- CursorMap::iterator it = _cursors.find( id );
- if ( it == _cursors.end() ) {
- if ( checkAuth )
- audit::logKillCursorsAuthzCheck( txn->getClient(),
- _nss,
- id,
- ErrorCodes::CursorNotFound );
- return false;
- }
+void CursorManager::deregisterCursor(ClientCursor* cc) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ _deregisterCursor_inlock(cc);
+}
- ClientCursor* cursor = it->second;
+bool CursorManager::eraseCursor(OperationContext* txn, CursorId id, bool checkAuth) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- if ( checkAuth )
- audit::logKillCursorsAuthzCheck( txn->getClient(),
- _nss,
- id,
- ErrorCodes::OK );
+ CursorMap::iterator it = _cursors.find(id);
+ if (it == _cursors.end()) {
+ if (checkAuth)
+ audit::logKillCursorsAuthzCheck(txn->getClient(), _nss, id, ErrorCodes::CursorNotFound);
+ return false;
+ }
- massert( 16089,
- str::stream() << "Cannot kill active cursor " << id,
- !cursor->isPinned() );
+ ClientCursor* cursor = it->second;
- cursor->kill();
- _deregisterCursor_inlock( cursor );
- delete cursor;
- return true;
- }
+ if (checkAuth)
+ audit::logKillCursorsAuthzCheck(txn->getClient(), _nss, id, ErrorCodes::OK);
- void CursorManager::_deregisterCursor_inlock( ClientCursor* cc ) {
- invariant( cc );
- CursorId id = cc->cursorid();
- _cursors.erase( id );
- }
+ massert(16089, str::stream() << "Cannot kill active cursor " << id, !cursor->isPinned());
+
+ cursor->kill();
+ _deregisterCursor_inlock(cursor);
+ delete cursor;
+ return true;
+}
+void CursorManager::_deregisterCursor_inlock(ClientCursor* cc) {
+ invariant(cc);
+ CursorId id = cc->cursorid();
+ _cursors.erase(id);
+}
}
diff --git a/src/mongo/db/catalog/cursor_manager.h b/src/mongo/db/catalog/cursor_manager.h
index 65fd204c5e3..81b9dd0107c 100644
--- a/src/mongo/db/catalog/cursor_manager.h
+++ b/src/mongo/db/catalog/cursor_manager.h
@@ -40,118 +40,114 @@
namespace mongo {
- class OperationContext;
- class PseudoRandom;
- class PlanExecutor;
-
- class CursorManager {
- public:
- CursorManager( StringData ns );
-
- /**
- * will kill() all PlanExecutor instances it has
- */
- ~CursorManager();
-
- // -----------------
-
- /**
- * @param collectionGoingAway Pass as true if the Collection instance is going away.
- * This could be because the db is being closed, or the
- * collection/db is being dropped.
- * @param reason The motivation for invalidating all cursors. Will be used
- * for error reporting and logging when an operation finds that
- * the cursor it was operating on has been killed.
- */
- void invalidateAll(bool collectionGoingAway, const std::string& reason);
-
- /**
- * Broadcast a document invalidation to all relevant PlanExecutor(s). invalidateDocument
- * must called *before* the provided RecordId is about to be deleted or mutated.
- */
- void invalidateDocument( OperationContext* txn,
- const RecordId& dl,
- InvalidationType type );
-
- /*
- * timesout cursors that have been idle for too long
- * note: must have a readlock on the collection
- * @return number timed out
- */
- std::size_t timeoutCursors( int millisSinceLastCall );
-
- // -----------------
-
- /**
- * Register an executor so that it can be notified of deletion/invalidation during yields.
- * Must be called before an executor yields. If an executor is cached (inside a
- * ClientCursor) it MUST NOT be registered; the two are mutually exclusive.
- */
- void registerExecutor(PlanExecutor* exec);
-
- /**
- * Remove an executor from the registry.
- */
- void deregisterExecutor(PlanExecutor* exec);
-
- // -----------------
-
- CursorId registerCursor( ClientCursor* cc );
- void deregisterCursor( ClientCursor* cc );
-
- bool eraseCursor(OperationContext* txn, CursorId id, bool checkAuth );
-
- /**
- * Returns true if the space of cursor ids that cursor manager is responsible for includes
- * the given cursor id. Otherwise, returns false.
- *
- * The return value of this method does not indicate any information about whether or not a
- * cursor actually exists with the given cursor id. Use the find() method for that purpose.
- */
- bool ownsCursorId( CursorId cursorId ) const;
-
- void getCursorIds( std::set<CursorId>* openCursors ) const;
- std::size_t numCursors() const;
-
- /**
- * @param pin - if true, will try to pin cursor
- * if pinned already, will assert
- * otherwise will pin
- */
- ClientCursor* find( CursorId id, bool pin );
-
- void unpin( ClientCursor* cursor );
-
- // ----------------------
-
- static CursorManager* getGlobalCursorManager();
-
- static int eraseCursorGlobalIfAuthorized(OperationContext* txn, int n,
- const char* ids);
- static bool eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id);
-
- static bool eraseCursorGlobal(OperationContext* txn, CursorId id);
-
- /**
- * @return number timed out
- */
- static std::size_t timeoutCursorsGlobal(OperationContext* txn, int millisSinceLastCall);
-
- private:
- CursorId _allocateCursorId_inlock();
- void _deregisterCursor_inlock( ClientCursor* cc );
-
- NamespaceString _nss;
- unsigned _collectionCacheRuntimeId;
- std::unique_ptr<PseudoRandom> _random;
-
- mutable SimpleMutex _mutex;
-
- typedef unordered_set<PlanExecutor*> ExecSet;
- ExecSet _nonCachedExecutors;
-
- typedef std::map<CursorId,ClientCursor*> CursorMap;
- CursorMap _cursors;
- };
+class OperationContext;
+class PseudoRandom;
+class PlanExecutor;
+
+class CursorManager {
+public:
+ CursorManager(StringData ns);
+
+ /**
+ * will kill() all PlanExecutor instances it has
+ */
+ ~CursorManager();
+
+ // -----------------
+
+ /**
+ * @param collectionGoingAway Pass as true if the Collection instance is going away.
+ * This could be because the db is being closed, or the
+ * collection/db is being dropped.
+ * @param reason The motivation for invalidating all cursors. Will be used
+ * for error reporting and logging when an operation finds that
+ * the cursor it was operating on has been killed.
+ */
+ void invalidateAll(bool collectionGoingAway, const std::string& reason);
+
+ /**
+ * Broadcast a document invalidation to all relevant PlanExecutor(s). invalidateDocument
+ * must called *before* the provided RecordId is about to be deleted or mutated.
+ */
+ void invalidateDocument(OperationContext* txn, const RecordId& dl, InvalidationType type);
+
+ /*
+ * timesout cursors that have been idle for too long
+ * note: must have a readlock on the collection
+ * @return number timed out
+ */
+ std::size_t timeoutCursors(int millisSinceLastCall);
+
+ // -----------------
+
+ /**
+ * Register an executor so that it can be notified of deletion/invalidation during yields.
+ * Must be called before an executor yields. If an executor is cached (inside a
+ * ClientCursor) it MUST NOT be registered; the two are mutually exclusive.
+ */
+ void registerExecutor(PlanExecutor* exec);
+
+ /**
+ * Remove an executor from the registry.
+ */
+ void deregisterExecutor(PlanExecutor* exec);
+
+ // -----------------
+
+ CursorId registerCursor(ClientCursor* cc);
+ void deregisterCursor(ClientCursor* cc);
+
+ bool eraseCursor(OperationContext* txn, CursorId id, bool checkAuth);
+
+ /**
+ * Returns true if the space of cursor ids that cursor manager is responsible for includes
+ * the given cursor id. Otherwise, returns false.
+ *
+ * The return value of this method does not indicate any information about whether or not a
+ * cursor actually exists with the given cursor id. Use the find() method for that purpose.
+ */
+ bool ownsCursorId(CursorId cursorId) const;
+
+ void getCursorIds(std::set<CursorId>* openCursors) const;
+ std::size_t numCursors() const;
+
+ /**
+ * @param pin - if true, will try to pin cursor
+ * if pinned already, will assert
+ * otherwise will pin
+ */
+ ClientCursor* find(CursorId id, bool pin);
+
+ void unpin(ClientCursor* cursor);
+
+ // ----------------------
+
+ static CursorManager* getGlobalCursorManager();
+
+ static int eraseCursorGlobalIfAuthorized(OperationContext* txn, int n, const char* ids);
+ static bool eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id);
+
+ static bool eraseCursorGlobal(OperationContext* txn, CursorId id);
+
+ /**
+ * @return number timed out
+ */
+ static std::size_t timeoutCursorsGlobal(OperationContext* txn, int millisSinceLastCall);
+
+private:
+ CursorId _allocateCursorId_inlock();
+ void _deregisterCursor_inlock(ClientCursor* cc);
+
+ NamespaceString _nss;
+ unsigned _collectionCacheRuntimeId;
+ std::unique_ptr<PseudoRandom> _random;
+
+ mutable SimpleMutex _mutex;
+ typedef unordered_set<PlanExecutor*> ExecSet;
+ ExecSet _nonCachedExecutors;
+
+ typedef std::map<CursorId, ClientCursor*> CursorMap;
+ CursorMap _cursors;
+};
}
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 7753235fa8f..d5b7b372c40 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -65,570 +65,544 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::list;
- using std::set;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- void massertNamespaceNotIndex( StringData ns, StringData caller ) {
- massert( 17320,
- str::stream() << "cannot do " << caller
- << " on namespace with a $ in it: " << ns,
- NamespaceString::normal( ns ) );
- }
+using std::unique_ptr;
+using std::endl;
+using std::list;
+using std::set;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+void massertNamespaceNotIndex(StringData ns, StringData caller) {
+ massert(17320,
+ str::stream() << "cannot do " << caller << " on namespace with a $ in it: " << ns,
+ NamespaceString::normal(ns));
+}
+
+class Database::AddCollectionChange : public RecoveryUnit::Change {
+public:
+ AddCollectionChange(Database* db, StringData ns) : _db(db), _ns(ns.toString()) {}
+
+ virtual void commit() {}
+ virtual void rollback() {
+ CollectionMap::const_iterator it = _db->_collections.find(_ns);
+ if (it == _db->_collections.end())
+ return;
- class Database::AddCollectionChange : public RecoveryUnit::Change {
- public:
- AddCollectionChange(Database* db, StringData ns)
- : _db(db)
- , _ns(ns.toString())
- {}
-
- virtual void commit() {}
- virtual void rollback() {
- CollectionMap::const_iterator it = _db->_collections.find(_ns);
- if ( it == _db->_collections.end() )
- return;
-
- delete it->second;
- _db->_collections.erase( it );
- }
+ delete it->second;
+ _db->_collections.erase(it);
+ }
- Database* const _db;
- const std::string _ns;
- };
+ Database* const _db;
+ const std::string _ns;
+};
- class Database::RemoveCollectionChange : public RecoveryUnit::Change {
- public:
- // Takes ownership of coll (but not db).
- RemoveCollectionChange(Database* db, Collection* coll)
- : _db(db)
- , _coll(coll)
- {}
+class Database::RemoveCollectionChange : public RecoveryUnit::Change {
+public:
+ // Takes ownership of coll (but not db).
+ RemoveCollectionChange(Database* db, Collection* coll) : _db(db), _coll(coll) {}
- virtual void commit() {
- delete _coll;
- }
+ virtual void commit() {
+ delete _coll;
+ }
- virtual void rollback() {
- Collection*& inMap = _db->_collections[_coll->ns().ns()];
- invariant(!inMap);
- inMap = _coll;
- }
+ virtual void rollback() {
+ Collection*& inMap = _db->_collections[_coll->ns().ns()];
+ invariant(!inMap);
+ inMap = _coll;
+ }
- Database* const _db;
- Collection* const _coll;
- };
+ Database* const _db;
+ Collection* const _coll;
+};
- Database::~Database() {
- for (CollectionMap::const_iterator i = _collections.begin(); i != _collections.end(); ++i)
- delete i->second;
- }
+Database::~Database() {
+ for (CollectionMap::const_iterator i = _collections.begin(); i != _collections.end(); ++i)
+ delete i->second;
+}
- void Database::close(OperationContext* txn ) {
- // XXX? - Do we need to close database under global lock or just DB-lock is sufficient ?
- invariant(txn->lockState()->isW());
- // oplog caches some things, dirty its caches
- repl::oplogCheckCloseDatabase(txn, this);
+void Database::close(OperationContext* txn) {
+ // XXX? - Do we need to close database under global lock or just DB-lock is sufficient ?
+ invariant(txn->lockState()->isW());
+ // oplog caches some things, dirty its caches
+ repl::oplogCheckCloseDatabase(txn, this);
- if ( BackgroundOperation::inProgForDb( _name ) ) {
- log() << "warning: bg op in prog during close db? " << _name << endl;
- }
+ if (BackgroundOperation::inProgForDb(_name)) {
+ log() << "warning: bg op in prog during close db? " << _name << endl;
}
+}
- Status Database::validateDBName( StringData dbname ) {
+Status Database::validateDBName(StringData dbname) {
+ if (dbname.size() <= 0)
+ return Status(ErrorCodes::BadValue, "db name is empty");
- if ( dbname.size() <= 0 )
- return Status( ErrorCodes::BadValue, "db name is empty" );
+ if (dbname.size() >= 64)
+ return Status(ErrorCodes::BadValue, "db name is too long");
- if ( dbname.size() >= 64 )
- return Status( ErrorCodes::BadValue, "db name is too long" );
+ if (dbname.find('.') != string::npos)
+ return Status(ErrorCodes::BadValue, "db name cannot contain a .");
- if ( dbname.find( '.' ) != string::npos )
- return Status( ErrorCodes::BadValue, "db name cannot contain a ." );
-
- if ( dbname.find( ' ' ) != string::npos )
- return Status( ErrorCodes::BadValue, "db name cannot contain a space" );
+ if (dbname.find(' ') != string::npos)
+ return Status(ErrorCodes::BadValue, "db name cannot contain a space");
#ifdef _WIN32
- static const char* windowsReservedNames[] = {
- "con", "prn", "aux", "nul",
- "com1", "com2", "com3", "com4", "com5", "com6", "com7", "com8", "com9",
- "lpt1", "lpt2", "lpt3", "lpt4", "lpt5", "lpt6", "lpt7", "lpt8", "lpt9"
- };
-
- string lower( dbname.toString() );
- std::transform( lower.begin(), lower.end(), lower.begin(), ::tolower );
- for ( size_t i = 0; i < (sizeof(windowsReservedNames) / sizeof(char*)); ++i ) {
- if ( lower == windowsReservedNames[i] ) {
- stringstream errorString;
- errorString << "db name \"" << dbname.toString() << "\" is a reserved name";
- return Status( ErrorCodes::BadValue, errorString.str() );
- }
+ static const char* windowsReservedNames[] = {
+ "con", "prn", "aux", "nul", "com1", "com2", "com3", "com4", "com5", "com6", "com7",
+ "com8", "com9", "lpt1", "lpt2", "lpt3", "lpt4", "lpt5", "lpt6", "lpt7", "lpt8", "lpt9"};
+
+ string lower(dbname.toString());
+ std::transform(lower.begin(), lower.end(), lower.begin(), ::tolower);
+ for (size_t i = 0; i < (sizeof(windowsReservedNames) / sizeof(char*)); ++i) {
+ if (lower == windowsReservedNames[i]) {
+ stringstream errorString;
+ errorString << "db name \"" << dbname.toString() << "\" is a reserved name";
+ return Status(ErrorCodes::BadValue, errorString.str());
}
+ }
#endif
- return Status::OK();
- }
+ return Status::OK();
+}
- Collection* Database::_getOrCreateCollectionInstance(OperationContext* txn,
- StringData fullns) {
- Collection* collection = getCollection( fullns );
- if (collection) {
- return collection;
- }
+Collection* Database::_getOrCreateCollectionInstance(OperationContext* txn, StringData fullns) {
+ Collection* collection = getCollection(fullns);
+ if (collection) {
+ return collection;
+ }
- unique_ptr<CollectionCatalogEntry> cce( _dbEntry->getCollectionCatalogEntry( fullns ) );
- invariant( cce.get() );
+ unique_ptr<CollectionCatalogEntry> cce(_dbEntry->getCollectionCatalogEntry(fullns));
+ invariant(cce.get());
+
+ unique_ptr<RecordStore> rs(_dbEntry->getRecordStore(fullns));
+ invariant(rs.get()); // if cce exists, so should this
+
+ // Not registering AddCollectionChange since this is for collections that already exist.
+ Collection* c = new Collection(txn, fullns, cce.release(), rs.release(), _dbEntry);
+ return c;
+}
+
+Database::Database(OperationContext* txn, StringData name, DatabaseCatalogEntry* dbEntry)
+ : _name(name.toString()),
+ _dbEntry(dbEntry),
+ _profileName(_name + ".system.profile"),
+ _indexesName(_name + ".system.indexes") {
+ Status status = validateDBName(_name);
+ if (!status.isOK()) {
+ warning() << "tried to open invalid db: " << _name << endl;
+ uasserted(10028, status.toString());
+ }
- unique_ptr<RecordStore> rs( _dbEntry->getRecordStore( fullns ) );
- invariant( rs.get() ); // if cce exists, so should this
+ _profile = serverGlobalParams.defaultProfile;
- // Not registering AddCollectionChange since this is for collections that already exist.
- Collection* c = new Collection( txn, fullns, cce.release(), rs.release(), _dbEntry );
- return c;
+ list<string> collections;
+ _dbEntry->getCollectionNamespaces(&collections);
+ for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
+ const string ns = *it;
+ _collections[ns] = _getOrCreateCollectionInstance(txn, ns);
}
+}
- Database::Database(OperationContext* txn, StringData name, DatabaseCatalogEntry* dbEntry)
- : _name(name.toString()),
- _dbEntry( dbEntry ),
- _profileName(_name + ".system.profile"),
- _indexesName(_name + ".system.indexes")
- {
- Status status = validateDBName( _name );
- if ( !status.isOK() ) {
- warning() << "tried to open invalid db: " << _name << endl;
- uasserted( 10028, status.toString() );
- }
-
- _profile = serverGlobalParams.defaultProfile;
- list<string> collections;
- _dbEntry->getCollectionNamespaces( &collections );
- for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
- const string ns = *it;
- _collections[ns] = _getOrCreateCollectionInstance(txn, ns);
- }
+/*static*/
+string Database::duplicateUncasedName(const string& name, set<string>* duplicates) {
+ if (duplicates) {
+ duplicates->clear();
}
+ set<string> allShortNames;
+ dbHolder().getAllShortNames(allShortNames);
- /*static*/
- string Database::duplicateUncasedName(const string &name, set< string > *duplicates) {
- if ( duplicates ) {
- duplicates->clear();
- }
-
- set<string> allShortNames;
- dbHolder().getAllShortNames(allShortNames);
+ for (const auto& dbname : allShortNames) {
+ if (strcasecmp(dbname.c_str(), name.c_str()))
+ continue;
- for (const auto& dbname : allShortNames) {
- if (strcasecmp(dbname.c_str(), name.c_str()))
- continue;
+ if (strcmp(dbname.c_str(), name.c_str()) == 0)
+ continue;
- if (strcmp(dbname.c_str(), name.c_str()) == 0)
- continue;
-
- if ( duplicates ) {
- duplicates->insert(dbname);
- } else {
- return dbname;
- }
+ if (duplicates) {
+ duplicates->insert(dbname);
+ } else {
+ return dbname;
}
- if ( duplicates ) {
- return duplicates->empty() ? "" : *duplicates->begin();
- }
- return "";
}
+ if (duplicates) {
+ return duplicates->empty() ? "" : *duplicates->begin();
+ }
+ return "";
+}
- void Database::clearTmpCollections(OperationContext* txn) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+void Database::clearTmpCollections(OperationContext* txn) {
+ invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
- list<string> collections;
- _dbEntry->getCollectionNamespaces( &collections );
+ list<string> collections;
+ _dbEntry->getCollectionNamespaces(&collections);
- for ( list<string>::iterator i = collections.begin(); i != collections.end(); ++i ) {
- string ns = *i;
- invariant( NamespaceString::normal( ns ) );
+ for (list<string>::iterator i = collections.begin(); i != collections.end(); ++i) {
+ string ns = *i;
+ invariant(NamespaceString::normal(ns));
- CollectionCatalogEntry* coll = _dbEntry->getCollectionCatalogEntry( ns );
+ CollectionCatalogEntry* coll = _dbEntry->getCollectionCatalogEntry(ns);
- CollectionOptions options = coll->getCollectionOptions( txn );
- if ( !options.temp )
+ CollectionOptions options = coll->getCollectionOptions(txn);
+ if (!options.temp)
+ continue;
+ try {
+ WriteUnitOfWork wunit(txn);
+ Status status = dropCollection(txn, ns);
+ if (!status.isOK()) {
+ warning() << "could not drop temp collection '" << ns << "': " << status;
continue;
- try {
- WriteUnitOfWork wunit(txn);
- Status status = dropCollection( txn, ns );
- if ( !status.isOK() ) {
- warning() << "could not drop temp collection '" << ns << "': " << status;
- continue;
- }
-
- wunit.commit();
- }
- catch (const WriteConflictException& exp) {
- warning() << "could not drop temp collection '" << ns << "' due to "
- "WriteConflictException";
- txn->recoveryUnit()->abandonSnapshot();
}
- }
- }
-
- Status Database::setProfilingLevel(OperationContext* txn, int newLevel) {
- if (_profile == newLevel) {
- return Status::OK();
- }
- if (newLevel == 0) {
- _profile = 0;
- return Status::OK();
+ wunit.commit();
+ } catch (const WriteConflictException& exp) {
+ warning() << "could not drop temp collection '" << ns << "' due to "
+ "WriteConflictException";
+ txn->recoveryUnit()->abandonSnapshot();
}
+ }
+}
- if (newLevel < 0 || newLevel > 2) {
- return Status(ErrorCodes::BadValue, "profiling level has to be >=0 and <= 2");
- }
+Status Database::setProfilingLevel(OperationContext* txn, int newLevel) {
+ if (_profile == newLevel) {
+ return Status::OK();
+ }
- Status status = createProfileCollection(txn, this);
- if (!status.isOK()) {
- return status;
- }
+ if (newLevel == 0) {
+ _profile = 0;
+ return Status::OK();
+ }
- _profile = newLevel;
+ if (newLevel < 0 || newLevel > 2) {
+ return Status(ErrorCodes::BadValue, "profiling level has to be >=0 and <= 2");
+ }
- return Status::OK();
+ Status status = createProfileCollection(txn, this);
+ if (!status.isOK()) {
+ return status;
}
- void Database::getStats( OperationContext* opCtx, BSONObjBuilder* output, double scale ) {
- list<string> collections;
- _dbEntry->getCollectionNamespaces( &collections );
+ _profile = newLevel;
- long long ncollections = 0;
- long long objects = 0;
- long long size = 0;
- long long storageSize = 0;
- long long numExtents = 0;
- long long indexes = 0;
- long long indexSize = 0;
+ return Status::OK();
+}
- for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
- const string ns = *it;
+void Database::getStats(OperationContext* opCtx, BSONObjBuilder* output, double scale) {
+ list<string> collections;
+ _dbEntry->getCollectionNamespaces(&collections);
- Collection* collection = getCollection( ns );
- if ( !collection )
- continue;
+ long long ncollections = 0;
+ long long objects = 0;
+ long long size = 0;
+ long long storageSize = 0;
+ long long numExtents = 0;
+ long long indexes = 0;
+ long long indexSize = 0;
- ncollections += 1;
- objects += collection->numRecords(opCtx);
- size += collection->dataSize(opCtx);
+ for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
+ const string ns = *it;
- BSONObjBuilder temp;
- storageSize += collection->getRecordStore()->storageSize( opCtx, &temp );
- numExtents += temp.obj()["numExtents"].numberInt(); // XXX
+ Collection* collection = getCollection(ns);
+ if (!collection)
+ continue;
- indexes += collection->getIndexCatalog()->numIndexesTotal( opCtx );
- indexSize += collection->getIndexSize(opCtx);
- }
+ ncollections += 1;
+ objects += collection->numRecords(opCtx);
+ size += collection->dataSize(opCtx);
- output->appendNumber( "collections" , ncollections );
- output->appendNumber( "objects" , objects );
- output->append ( "avgObjSize" , objects == 0 ? 0 : double(size) / double(objects) );
- output->appendNumber( "dataSize" , size / scale );
- output->appendNumber( "storageSize" , storageSize / scale);
- output->appendNumber( "numExtents" , numExtents );
- output->appendNumber( "indexes" , indexes );
- output->appendNumber( "indexSize" , indexSize / scale );
+ BSONObjBuilder temp;
+ storageSize += collection->getRecordStore()->storageSize(opCtx, &temp);
+ numExtents += temp.obj()["numExtents"].numberInt(); // XXX
- _dbEntry->appendExtraStats( opCtx, output, scale );
+ indexes += collection->getIndexCatalog()->numIndexesTotal(opCtx);
+ indexSize += collection->getIndexSize(opCtx);
}
- Status Database::dropCollection(OperationContext* txn, StringData fullns) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ output->appendNumber("collections", ncollections);
+ output->appendNumber("objects", objects);
+ output->append("avgObjSize", objects == 0 ? 0 : double(size) / double(objects));
+ output->appendNumber("dataSize", size / scale);
+ output->appendNumber("storageSize", storageSize / scale);
+ output->appendNumber("numExtents", numExtents);
+ output->appendNumber("indexes", indexes);
+ output->appendNumber("indexSize", indexSize / scale);
- LOG(1) << "dropCollection: " << fullns << endl;
- massertNamespaceNotIndex( fullns, "dropCollection" );
+ _dbEntry->appendExtraStats(opCtx, output, scale);
+}
- Collection* collection = getCollection( fullns );
- if ( !collection ) {
- // collection doesn't exist
- return Status::OK();
- }
+Status Database::dropCollection(OperationContext* txn, StringData fullns) {
+ invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
- NamespaceString nss(fullns);
- {
- verify(nss.db() == _name);
+ LOG(1) << "dropCollection: " << fullns << endl;
+ massertNamespaceNotIndex(fullns, "dropCollection");
- if (nss.isSystem()) {
- if (nss.isSystemDotProfile()) {
- if ( _profile != 0 )
- return Status(
- ErrorCodes::IllegalOperation,
- "turn off profiling before dropping system.profile collection");
- }
- else {
- return Status( ErrorCodes::IllegalOperation, "can't drop system ns" );
- }
- }
- }
-
- BackgroundOperation::assertNoBgOpInProgForNs( fullns );
+ Collection* collection = getCollection(fullns);
+ if (!collection) {
+ // collection doesn't exist
+ return Status::OK();
+ }
- audit::logDropCollection( &cc(), fullns );
+ NamespaceString nss(fullns);
+ {
+ verify(nss.db() == _name);
- Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
- if ( !s.isOK() ) {
- warning() << "could not drop collection, trying to drop indexes"
- << fullns << " because of " << s.toString();
- return s;
+ if (nss.isSystem()) {
+ if (nss.isSystemDotProfile()) {
+ if (_profile != 0)
+ return Status(ErrorCodes::IllegalOperation,
+ "turn off profiling before dropping system.profile collection");
+ } else {
+ return Status(ErrorCodes::IllegalOperation, "can't drop system ns");
+ }
}
+ }
- verify( collection->_details->getTotalIndexCount( txn ) == 0 );
- LOG(1) << "\t dropIndexes done" << endl;
-
- Top::get(txn->getClient()->getServiceContext()).collectionDropped(fullns);
-
- s = _dbEntry->dropCollection( txn, fullns );
+ BackgroundOperation::assertNoBgOpInProgForNs(fullns);
- // we want to do this always
- _clearCollectionCache(txn, fullns, "collection dropped");
+ audit::logDropCollection(&cc(), fullns);
- if ( !s.isOK() )
- return s;
+ Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
+ if (!s.isOK()) {
+ warning() << "could not drop collection, trying to drop indexes" << fullns << " because of "
+ << s.toString();
+ return s;
+ }
- DEV {
- // check all index collection entries are gone
- string nstocheck = fullns.toString() + ".$";
- for ( CollectionMap::const_iterator i = _collections.begin();
- i != _collections.end();
- ++i ) {
- string temp = i->first;
- if ( temp.find( nstocheck ) != 0 )
- continue;
- log() << "after drop, bad cache entries for: "
- << fullns << " have " << temp;
- verify(0);
- }
- }
+ verify(collection->_details->getTotalIndexCount(txn) == 0);
+ LOG(1) << "\t dropIndexes done" << endl;
- getGlobalServiceContext()->getOpObserver()->onDropCollection(txn, nss);
- return Status::OK();
- }
+ Top::get(txn->getClient()->getServiceContext()).collectionDropped(fullns);
- void Database::_clearCollectionCache(OperationContext* txn,
- StringData fullns,
- const std::string& reason) {
- verify( _name == nsToDatabaseSubstring( fullns ) );
- CollectionMap::const_iterator it = _collections.find( fullns.toString() );
- if ( it == _collections.end() )
- return;
+ s = _dbEntry->dropCollection(txn, fullns);
- // Takes ownership of the collection
- txn->recoveryUnit()->registerChange(new RemoveCollectionChange(this, it->second));
+ // we want to do this always
+ _clearCollectionCache(txn, fullns, "collection dropped");
- it->second->_cursorManager.invalidateAll(false, reason);
- _collections.erase( it );
- }
+ if (!s.isOK())
+ return s;
- Collection* Database::getCollection( StringData ns ) const {
- invariant( _name == nsToDatabaseSubstring( ns ) );
- CollectionMap::const_iterator it = _collections.find( ns );
- if ( it != _collections.end() && it->second ) {
- return it->second;
+ DEV {
+ // check all index collection entries are gone
+ string nstocheck = fullns.toString() + ".$";
+ for (CollectionMap::const_iterator i = _collections.begin(); i != _collections.end(); ++i) {
+ string temp = i->first;
+ if (temp.find(nstocheck) != 0)
+ continue;
+ log() << "after drop, bad cache entries for: " << fullns << " have " << temp;
+ verify(0);
}
-
- return NULL;
}
+ getGlobalServiceContext()->getOpObserver()->onDropCollection(txn, nss);
+ return Status::OK();
+}
+
+void Database::_clearCollectionCache(OperationContext* txn,
+ StringData fullns,
+ const std::string& reason) {
+ verify(_name == nsToDatabaseSubstring(fullns));
+ CollectionMap::const_iterator it = _collections.find(fullns.toString());
+ if (it == _collections.end())
+ return;
+
+ // Takes ownership of the collection
+ txn->recoveryUnit()->registerChange(new RemoveCollectionChange(this, it->second));
+
+ it->second->_cursorManager.invalidateAll(false, reason);
+ _collections.erase(it);
+}
+
+Collection* Database::getCollection(StringData ns) const {
+ invariant(_name == nsToDatabaseSubstring(ns));
+ CollectionMap::const_iterator it = _collections.find(ns);
+ if (it != _collections.end() && it->second) {
+ return it->second;
+ }
+ return NULL;
+}
- Status Database::renameCollection( OperationContext* txn,
- StringData fromNS,
- StringData toNS,
- bool stayTemp ) {
-
- audit::logRenameCollection( &cc(), fromNS, toNS );
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
-
- { // remove anything cached
- Collection* coll = getCollection( fromNS );
- if ( !coll )
- return Status(ErrorCodes::NamespaceNotFound, "collection not found to rename");
- string clearCacheReason = str::stream() << "renamed collection '" << fromNS
- << "' to '" << toNS << "'";
- IndexCatalog::IndexIterator ii = coll->getIndexCatalog()->getIndexIterator(txn, true);
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
- _clearCollectionCache(txn, desc->indexNamespace(), clearCacheReason);
- }
+Status Database::renameCollection(OperationContext* txn,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp) {
+ audit::logRenameCollection(&cc(), fromNS, toNS);
+ invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
- _clearCollectionCache(txn, fromNS, clearCacheReason);
- _clearCollectionCache(txn, toNS, clearCacheReason);
+ { // remove anything cached
+ Collection* coll = getCollection(fromNS);
+ if (!coll)
+ return Status(ErrorCodes::NamespaceNotFound, "collection not found to rename");
- Top::get(txn->getClient()->getServiceContext()).collectionDropped(fromNS.toString());
+ string clearCacheReason = str::stream() << "renamed collection '" << fromNS << "' to '"
+ << toNS << "'";
+ IndexCatalog::IndexIterator ii = coll->getIndexCatalog()->getIndexIterator(txn, true);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
+ _clearCollectionCache(txn, desc->indexNamespace(), clearCacheReason);
}
- txn->recoveryUnit()->registerChange( new AddCollectionChange(this, toNS) );
- Status s = _dbEntry->renameCollection( txn, fromNS, toNS, stayTemp );
- _collections[toNS] = _getOrCreateCollectionInstance(txn, toNS);
- return s;
- }
+ _clearCollectionCache(txn, fromNS, clearCacheReason);
+ _clearCollectionCache(txn, toNS, clearCacheReason);
- Collection* Database::getOrCreateCollection(OperationContext* txn, StringData ns) {
- Collection* c = getCollection( ns );
- if ( !c ) {
- c = createCollection( txn, ns );
- }
- return c;
+ Top::get(txn->getClient()->getServiceContext()).collectionDropped(fromNS.toString());
}
- Collection* Database::createCollection( OperationContext* txn,
- StringData ns,
- const CollectionOptions& options,
- bool createIdIndex ) {
- massert( 17399, "collection already exists", getCollection( ns ) == NULL );
- massertNamespaceNotIndex( ns, "createCollection" );
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
-
- if ( serverGlobalParams.configsvr &&
- !( ns.startsWith( "config." ) ||
- ns.startsWith( "local." ) ||
- ns.startsWith( "admin." ) ) ) {
- uasserted(14037, "can't create user databases on a --configsvr instance");
- }
+ txn->recoveryUnit()->registerChange(new AddCollectionChange(this, toNS));
+ Status s = _dbEntry->renameCollection(txn, fromNS, toNS, stayTemp);
+ _collections[toNS] = _getOrCreateCollectionInstance(txn, toNS);
+ return s;
+}
- if (NamespaceString::normal(ns)) {
- // This check only applies for actual collections, not indexes or other types of ns.
- uassert(17381, str::stream() << "fully qualified namespace " << ns << " is too long "
- << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)",
- ns.size() <= NamespaceString::MaxNsCollectionLen);
- }
+Collection* Database::getOrCreateCollection(OperationContext* txn, StringData ns) {
+ Collection* c = getCollection(ns);
+ if (!c) {
+ c = createCollection(txn, ns);
+ }
+ return c;
+}
+
+Collection* Database::createCollection(OperationContext* txn,
+ StringData ns,
+ const CollectionOptions& options,
+ bool createIdIndex) {
+ massert(17399, "collection already exists", getCollection(ns) == NULL);
+ massertNamespaceNotIndex(ns, "createCollection");
+ invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+
+ if (serverGlobalParams.configsvr &&
+ !(ns.startsWith("config.") || ns.startsWith("local.") || ns.startsWith("admin."))) {
+ uasserted(14037, "can't create user databases on a --configsvr instance");
+ }
- NamespaceString nss( ns );
- uassert( 17316, "cannot create a blank collection", nss.coll() > 0 );
+ if (NamespaceString::normal(ns)) {
+ // This check only applies for actual collections, not indexes or other types of ns.
+ uassert(17381,
+ str::stream() << "fully qualified namespace " << ns << " is too long "
+ << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)",
+ ns.size() <= NamespaceString::MaxNsCollectionLen);
+ }
- audit::logCreateCollection( &cc(), ns );
+ NamespaceString nss(ns);
+ uassert(17316, "cannot create a blank collection", nss.coll() > 0);
- txn->recoveryUnit()->registerChange( new AddCollectionChange(this, ns) );
+ audit::logCreateCollection(&cc(), ns);
- Status status = _dbEntry->createCollection(txn, ns, options, true /*allocateDefaultSpace*/);
- massertNoTraceStatusOK(status);
+ txn->recoveryUnit()->registerChange(new AddCollectionChange(this, ns));
+ Status status = _dbEntry->createCollection(txn, ns, options, true /*allocateDefaultSpace*/);
+ massertNoTraceStatusOK(status);
- Collection* collection = _getOrCreateCollectionInstance(txn, ns);
- invariant(collection);
- _collections[ns] = collection;
- if ( createIdIndex ) {
- if ( collection->requiresIdIndex() ) {
- if ( options.autoIndexId == CollectionOptions::YES ||
- options.autoIndexId == CollectionOptions::DEFAULT ) {
- IndexCatalog* ic = collection->getIndexCatalog();
- uassertStatusOK(
- ic->createIndexOnEmptyCollection(txn, ic->getDefaultIdIndexSpec()));
- }
- }
+ Collection* collection = _getOrCreateCollectionInstance(txn, ns);
+ invariant(collection);
+ _collections[ns] = collection;
- if ( nss.isSystem() ) {
- authindex::createSystemIndexes( txn, collection );
+ if (createIdIndex) {
+ if (collection->requiresIdIndex()) {
+ if (options.autoIndexId == CollectionOptions::YES ||
+ options.autoIndexId == CollectionOptions::DEFAULT) {
+ IndexCatalog* ic = collection->getIndexCatalog();
+ uassertStatusOK(ic->createIndexOnEmptyCollection(txn, ic->getDefaultIdIndexSpec()));
}
-
}
- getGlobalServiceContext()->getOpObserver()->onCreateCollection(txn, nss, options);
-
- return collection;
- }
-
- const DatabaseCatalogEntry* Database::getDatabaseCatalogEntry() const {
- return _dbEntry;
+ if (nss.isSystem()) {
+ authindex::createSystemIndexes(txn, collection);
+ }
}
- void dropAllDatabasesExceptLocal(OperationContext* txn) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
-
- vector<string> n;
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->listDatabases(&n);
-
- if( n.size() == 0 ) return;
- log() << "dropAllDatabasesExceptLocal " << n.size() << endl;
-
- for (vector<string>::iterator i = n.begin(); i != n.end(); i++) {
- if (*i != "local") {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- Database* db = dbHolder().get(txn, *i);
- // This is needed since dropDatabase can't be rolled back.
- // This is safe be replaced by "invariant(db);dropDatabase(txn, db);" once fixed
- if (db == nullptr) {
- log() << "database disappeared after listDatabases but before drop: " << *i;
- } else {
- dropDatabase(txn, db);
- }
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn,
- "dropAllDatabasesExceptLocal",
- *i);
+ getGlobalServiceContext()->getOpObserver()->onCreateCollection(txn, nss, options);
+
+ return collection;
+}
+
+const DatabaseCatalogEntry* Database::getDatabaseCatalogEntry() const {
+ return _dbEntry;
+}
+
+void dropAllDatabasesExceptLocal(OperationContext* txn) {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+
+ vector<string> n;
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ storageEngine->listDatabases(&n);
+
+ if (n.size() == 0)
+ return;
+ log() << "dropAllDatabasesExceptLocal " << n.size() << endl;
+
+ for (vector<string>::iterator i = n.begin(); i != n.end(); i++) {
+ if (*i != "local") {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ Database* db = dbHolder().get(txn, *i);
+ // This is needed since dropDatabase can't be rolled back.
+ // This is safe be replaced by "invariant(db);dropDatabase(txn, db);" once fixed
+ if (db == nullptr) {
+ log() << "database disappeared after listDatabases but before drop: " << *i;
+ } else {
+ dropDatabase(txn, db);
+ }
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropAllDatabasesExceptLocal", *i);
}
}
+}
- void dropDatabase(OperationContext* txn, Database* db ) {
- invariant( db );
-
- // Store the name so we have if for after the db object is deleted
- const string name = db->name();
- LOG(1) << "dropDatabase " << name << endl;
+void dropDatabase(OperationContext* txn, Database* db) {
+ invariant(db);
- invariant(txn->lockState()->isDbLockedForMode(name, MODE_X));
+ // Store the name so we have if for after the db object is deleted
+ const string name = db->name();
+ LOG(1) << "dropDatabase " << name << endl;
- BackgroundOperation::assertNoBgOpInProgForDb(name.c_str());
+ invariant(txn->lockState()->isDbLockedForMode(name, MODE_X));
- audit::logDropDatabase( &cc(), name );
+ BackgroundOperation::assertNoBgOpInProgForDb(name.c_str());
- dbHolder().close( txn, name );
- db = NULL; // d is now deleted
+ audit::logDropDatabase(&cc(), name);
- getGlobalServiceContext()->getGlobalStorageEngine()->dropDatabase( txn, name );
- }
+ dbHolder().close(txn, name);
+ db = NULL; // d is now deleted
- /** { ..., capped: true, size: ..., max: ... }
- * @param createDefaultIndexes - if false, defers id (and other) index creation.
- * @return true if successful
- */
- Status userCreateNS( OperationContext* txn,
- Database* db,
- StringData ns,
- BSONObj options,
- bool createDefaultIndexes ) {
+ getGlobalServiceContext()->getGlobalStorageEngine()->dropDatabase(txn, name);
+}
- invariant( db );
+/** { ..., capped: true, size: ..., max: ... }
+ * @param createDefaultIndexes - if false, defers id (and other) index creation.
+ * @return true if successful
+*/
+Status userCreateNS(OperationContext* txn,
+ Database* db,
+ StringData ns,
+ BSONObj options,
+ bool createDefaultIndexes) {
+ invariant(db);
- LOG(1) << "create collection " << ns << ' ' << options;
+ LOG(1) << "create collection " << ns << ' ' << options;
- if ( !NamespaceString::validCollectionComponent(ns) )
- return Status( ErrorCodes::InvalidNamespace,
- str::stream() << "invalid ns: " << ns );
+ if (!NamespaceString::validCollectionComponent(ns))
+ return Status(ErrorCodes::InvalidNamespace, str::stream() << "invalid ns: " << ns);
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection(ns);
- if ( collection )
- return Status( ErrorCodes::NamespaceExists,
- "collection already exists" );
+ if (collection)
+ return Status(ErrorCodes::NamespaceExists, "collection already exists");
- CollectionOptions collectionOptions;
- Status status = collectionOptions.parse(options);
- if ( !status.isOK() )
- return status;
+ CollectionOptions collectionOptions;
+ Status status = collectionOptions.parse(options);
+ if (!status.isOK())
+ return status;
- status = validateStorageOptions(collectionOptions.storageEngine,
- &StorageEngine::Factory::validateCollectionStorageOptions);
- if ( !status.isOK() )
- return status;
+ status = validateStorageOptions(collectionOptions.storageEngine,
+ &StorageEngine::Factory::validateCollectionStorageOptions);
+ if (!status.isOK())
+ return status;
- invariant(db->createCollection(txn, ns, collectionOptions, createDefaultIndexes));
+ invariant(db->createCollection(txn, ns, collectionOptions, createDefaultIndexes));
- return Status::OK();
- }
-} // namespace mongo
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index d1f2465ef60..7f07371ceff 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -40,133 +40,139 @@
namespace mongo {
- class Collection;
- class DataFile;
- class DatabaseCatalogEntry;
- class ExtentManager;
- class IndexCatalog;
- class NamespaceDetails;
- class OperationContext;
+class Collection;
+class DataFile;
+class DatabaseCatalogEntry;
+class ExtentManager;
+class IndexCatalog;
+class NamespaceDetails;
+class OperationContext;
- /**
- * Database represents a database database
- * Each database database has its own set of files -- dbname.ns, dbname.0, dbname.1, ...
- * NOT memory mapped
- */
- class Database {
- public:
- Database(OperationContext* txn, StringData name, DatabaseCatalogEntry* dbEntry);
-
- // must call close first
- ~Database();
+/**
+ * Database represents a database database
+ * Each database database has its own set of files -- dbname.ns, dbname.0, dbname.1, ...
+ * NOT memory mapped
+*/
+class Database {
+public:
+ Database(OperationContext* txn, StringData name, DatabaseCatalogEntry* dbEntry);
- // closes files and other cleanup see below.
- void close( OperationContext* txn );
+ // must call close first
+ ~Database();
- const std::string& name() const { return _name; }
+ // closes files and other cleanup see below.
+ void close(OperationContext* txn);
- void clearTmpCollections(OperationContext* txn);
+ const std::string& name() const {
+ return _name;
+ }
- /**
- * Sets a new profiling level for the database and returns the outcome.
- *
- * @param txn Operation context which to use for creating the profiling collection.
- * @param newLevel New profiling level to use.
- */
- Status setProfilingLevel(OperationContext* txn, int newLevel);
+ void clearTmpCollections(OperationContext* txn);
- int getProfilingLevel() const { return _profile; }
- const char* getProfilingNS() const { return _profileName.c_str(); }
+ /**
+ * Sets a new profiling level for the database and returns the outcome.
+ *
+ * @param txn Operation context which to use for creating the profiling collection.
+ * @param newLevel New profiling level to use.
+ */
+ Status setProfilingLevel(OperationContext* txn, int newLevel);
- void getStats( OperationContext* opCtx, BSONObjBuilder* output, double scale = 1 );
+ int getProfilingLevel() const {
+ return _profile;
+ }
+ const char* getProfilingNS() const {
+ return _profileName.c_str();
+ }
- const DatabaseCatalogEntry* getDatabaseCatalogEntry() const;
+ void getStats(OperationContext* opCtx, BSONObjBuilder* output, double scale = 1);
- Status dropCollection(OperationContext* txn, StringData fullns);
+ const DatabaseCatalogEntry* getDatabaseCatalogEntry() const;
- Collection* createCollection( OperationContext* txn,
- StringData ns,
- const CollectionOptions& options = CollectionOptions(),
- bool createDefaultIndexes = true );
+ Status dropCollection(OperationContext* txn, StringData fullns);
- /**
- * @param ns - this is fully qualified, which is maybe not ideal ???
- */
- Collection* getCollection( StringData ns ) const ;
+ Collection* createCollection(OperationContext* txn,
+ StringData ns,
+ const CollectionOptions& options = CollectionOptions(),
+ bool createDefaultIndexes = true);
- Collection* getCollection( const NamespaceString& ns ) const {
- return getCollection( ns.ns() );
- }
+ /**
+ * @param ns - this is fully qualified, which is maybe not ideal ???
+ */
+ Collection* getCollection(StringData ns) const;
- Collection* getOrCreateCollection( OperationContext* txn, StringData ns );
+ Collection* getCollection(const NamespaceString& ns) const {
+ return getCollection(ns.ns());
+ }
- Status renameCollection( OperationContext* txn,
- StringData fromNS,
- StringData toNS,
- bool stayTemp );
+ Collection* getOrCreateCollection(OperationContext* txn, StringData ns);
- /**
- * @return name of an existing database with same text name but different
- * casing, if one exists. Otherwise the empty std::string is returned. If
- * 'duplicates' is specified, it is filled with all duplicate names.
- // TODO move???
- */
- static std::string duplicateUncasedName( const std::string &name,
- std::set< std::string > *duplicates = 0 );
+ Status renameCollection(OperationContext* txn,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp);
- static Status validateDBName( StringData dbname );
+ /**
+ * @return name of an existing database with same text name but different
+ * casing, if one exists. Otherwise the empty std::string is returned. If
+ * 'duplicates' is specified, it is filled with all duplicate names.
+ // TODO move???
+ */
+ static std::string duplicateUncasedName(const std::string& name,
+ std::set<std::string>* duplicates = 0);
+
+ static Status validateDBName(StringData dbname);
- const std::string& getSystemIndexesName() const { return _indexesName; }
- private:
+ const std::string& getSystemIndexesName() const {
+ return _indexesName;
+ }
- /**
- * Gets or creates collection instance from existing metadata,
- * Returns NULL if invalid
- *
- * Note: This does not add the collection to _collections map, that must be done
- * by the caller, who takes onership of the Collection*
- */
- Collection* _getOrCreateCollectionInstance(OperationContext* txn, StringData fullns);
+private:
+ /**
+ * Gets or creates collection instance from existing metadata,
+ * Returns NULL if invalid
+ *
+ * Note: This does not add the collection to _collections map, that must be done
+ * by the caller, who takes onership of the Collection*
+ */
+ Collection* _getOrCreateCollectionInstance(OperationContext* txn, StringData fullns);
- /**
- * Deregisters and invalidates all cursors on collection 'fullns'. Callers must specify
- * 'reason' for why the cache is being cleared.
- */
- void _clearCollectionCache(OperationContext* txn,
- StringData fullns,
- const std::string& reason);
+ /**
+ * Deregisters and invalidates all cursors on collection 'fullns'. Callers must specify
+ * 'reason' for why the cache is being cleared.
+ */
+ void _clearCollectionCache(OperationContext* txn, StringData fullns, const std::string& reason);
- class AddCollectionChange;
- class RemoveCollectionChange;
+ class AddCollectionChange;
+ class RemoveCollectionChange;
- const std::string _name; // "alleyinsider"
+ const std::string _name; // "alleyinsider"
- DatabaseCatalogEntry* _dbEntry; // not owned here
+ DatabaseCatalogEntry* _dbEntry; // not owned here
- const std::string _profileName; // "alleyinsider.system.profile"
- const std::string _indexesName; // "alleyinsider.system.indexes"
+ const std::string _profileName; // "alleyinsider.system.profile"
+ const std::string _indexesName; // "alleyinsider.system.indexes"
- int _profile; // 0=off.
+ int _profile; // 0=off.
- // TODO: make sure deletes go through
- // this in some ways is a dupe of _namespaceIndex
- // but it points to a much more useful data structure
- typedef StringMap< Collection* > CollectionMap;
- CollectionMap _collections;
+ // TODO: make sure deletes go through
+ // this in some ways is a dupe of _namespaceIndex
+ // but it points to a much more useful data structure
+ typedef StringMap<Collection*> CollectionMap;
+ CollectionMap _collections;
- friend class Collection;
- friend class NamespaceDetails;
- friend class IndexCatalog;
- };
+ friend class Collection;
+ friend class NamespaceDetails;
+ friend class IndexCatalog;
+};
- void dropDatabase(OperationContext* txn, Database* db );
+void dropDatabase(OperationContext* txn, Database* db);
- void dropAllDatabasesExceptLocal(OperationContext* txn);
+void dropAllDatabasesExceptLocal(OperationContext* txn);
- Status userCreateNS( OperationContext* txn,
- Database* db,
- StringData ns,
- BSONObj options,
- bool createDefaultIndexes = true );
+Status userCreateNS(OperationContext* txn,
+ Database* db,
+ StringData ns,
+ BSONObj options,
+ bool createDefaultIndexes = true);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/database_catalog_entry.h b/src/mongo/db/catalog/database_catalog_entry.h
index 4c981582cc6..6ad95405fce 100644
--- a/src/mongo/db/catalog/database_catalog_entry.h
+++ b/src/mongo/db/catalog/database_catalog_entry.h
@@ -38,74 +38,73 @@
namespace mongo {
- class BSONObjBuilder;
- class CollectionCatalogEntry;
- class IndexAccessMethod;
- class IndexCatalogEntry;
- class OperationContext;
- class RecordStore;
+class BSONObjBuilder;
+class CollectionCatalogEntry;
+class IndexAccessMethod;
+class IndexCatalogEntry;
+class OperationContext;
+class RecordStore;
- struct CollectionOptions;
+struct CollectionOptions;
- class DatabaseCatalogEntry {
- public:
- DatabaseCatalogEntry( StringData name )
- : _name( name.toString() ) {
- }
+class DatabaseCatalogEntry {
+public:
+ DatabaseCatalogEntry(StringData name) : _name(name.toString()) {}
- virtual ~DatabaseCatalogEntry(){ }
+ virtual ~DatabaseCatalogEntry() {}
- const std::string& name() const { return _name; }
+ const std::string& name() const {
+ return _name;
+ }
- virtual bool exists() const = 0;
- virtual bool isEmpty() const = 0;
- virtual bool hasUserData() const = 0;
+ virtual bool exists() const = 0;
+ virtual bool isEmpty() const = 0;
+ virtual bool hasUserData() const = 0;
- virtual int64_t sizeOnDisk( OperationContext* opCtx ) const = 0;
+ virtual int64_t sizeOnDisk(OperationContext* opCtx) const = 0;
- virtual void appendExtraStats( OperationContext* opCtx,
- BSONObjBuilder* out,
- double scale ) const = 0;
+ virtual void appendExtraStats(OperationContext* opCtx,
+ BSONObjBuilder* out,
+ double scale) const = 0;
- // these are hacks :(
- virtual bool isOlderThan24( OperationContext* opCtx ) const = 0;
- virtual void markIndexSafe24AndUp( OperationContext* opCtx ) = 0;
+ // these are hacks :(
+ virtual bool isOlderThan24(OperationContext* opCtx) const = 0;
+ virtual void markIndexSafe24AndUp(OperationContext* opCtx) = 0;
- /**
- * @return true if current files on disk are compatibile with the current version.
- * if we return false, then an upgrade will be required
- */
- virtual bool currentFilesCompatible( OperationContext* opCtx ) const = 0;
+ /**
+ * @return true if current files on disk are compatibile with the current version.
+ * if we return false, then an upgrade will be required
+ */
+ virtual bool currentFilesCompatible(OperationContext* opCtx) const = 0;
- // ----
+ // ----
- virtual void getCollectionNamespaces( std::list<std::string>* out ) const = 0;
+ virtual void getCollectionNamespaces(std::list<std::string>* out) const = 0;
- // The DatabaseCatalogEntry owns this, do not delete
- virtual CollectionCatalogEntry* getCollectionCatalogEntry( StringData ns ) const = 0;
+ // The DatabaseCatalogEntry owns this, do not delete
+ virtual CollectionCatalogEntry* getCollectionCatalogEntry(StringData ns) const = 0;
- // The DatabaseCatalogEntry owns this, do not delete
- virtual RecordStore* getRecordStore( StringData ns ) const = 0;
+ // The DatabaseCatalogEntry owns this, do not delete
+ virtual RecordStore* getRecordStore(StringData ns) const = 0;
- // Ownership passes to caller
- virtual IndexAccessMethod* getIndex( OperationContext* txn,
- const CollectionCatalogEntry* collection,
- IndexCatalogEntry* index ) = 0;
+ // Ownership passes to caller
+ virtual IndexAccessMethod* getIndex(OperationContext* txn,
+ const CollectionCatalogEntry* collection,
+ IndexCatalogEntry* index) = 0;
- virtual Status createCollection( OperationContext* txn,
- StringData ns,
- const CollectionOptions& options,
- bool allocateDefaultSpace ) = 0;
+ virtual Status createCollection(OperationContext* txn,
+ StringData ns,
+ const CollectionOptions& options,
+ bool allocateDefaultSpace) = 0;
- virtual Status renameCollection( OperationContext* txn,
- StringData fromNS,
- StringData toNS,
- bool stayTemp ) = 0;
+ virtual Status renameCollection(OperationContext* txn,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp) = 0;
- virtual Status dropCollection( OperationContext* opCtx,
- StringData ns ) = 0;
+ virtual Status dropCollection(OperationContext* opCtx, StringData ns) = 0;
- private:
- std::string _name;
- };
+private:
+ std::string _name;
+};
}
diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp
index bf1238db95f..f4a2cf62970 100644
--- a/src/mongo/db/catalog/database_holder.cpp
+++ b/src/mongo/db/catalog/database_holder.cpp
@@ -46,169 +46,159 @@
namespace mongo {
- using std::set;
- using std::string;
- using std::stringstream;
+using std::set;
+using std::string;
+using std::stringstream;
namespace {
- StringData _todb(StringData ns) {
- size_t i = ns.find('.');
- if (i == std::string::npos) {
- uassert(13074, "db name can't be empty", ns.size());
- return ns;
- }
+StringData _todb(StringData ns) {
+ size_t i = ns.find('.');
+ if (i == std::string::npos) {
+ uassert(13074, "db name can't be empty", ns.size());
+ return ns;
+ }
- uassert(13075, "db name can't be empty", i > 0);
+ uassert(13075, "db name can't be empty", i > 0);
- const StringData d = ns.substr(0, i);
- uassert(13280, "invalid db name: " + ns.toString(), NamespaceString::validDBName(d));
+ const StringData d = ns.substr(0, i);
+ uassert(13280, "invalid db name: " + ns.toString(), NamespaceString::validDBName(d));
- return d;
- }
+ return d;
+}
- DatabaseHolder _dbHolder;
+DatabaseHolder _dbHolder;
-} // namespace
+} // namespace
- DatabaseHolder& dbHolder() {
- return _dbHolder;
- }
+DatabaseHolder& dbHolder() {
+ return _dbHolder;
+}
+
+Database* DatabaseHolder::get(OperationContext* txn, StringData ns) const {
+ const StringData db = _todb(ns);
+ invariant(txn->lockState()->isDbLockedForMode(db, MODE_IS));
- Database* DatabaseHolder::get(OperationContext* txn,
- StringData ns) const {
+ stdx::lock_guard<SimpleMutex> lk(_m);
+ DBs::const_iterator it = _dbs.find(db);
+ if (it != _dbs.end()) {
+ return it->second;
+ }
- const StringData db = _todb(ns);
- invariant(txn->lockState()->isDbLockedForMode(db, MODE_IS));
+ return NULL;
+}
- stdx::lock_guard<SimpleMutex> lk(_m);
- DBs::const_iterator it = _dbs.find(db);
- if (it != _dbs.end()) {
- return it->second;
+Database* DatabaseHolder::openDb(OperationContext* txn, StringData ns, bool* justCreated) {
+ const StringData dbname = _todb(ns);
+ invariant(txn->lockState()->isDbLockedForMode(dbname, MODE_X));
+
+ Database* db = get(txn, ns);
+ if (db) {
+ if (justCreated) {
+ *justCreated = false;
}
- return NULL;
+ return db;
}
- Database* DatabaseHolder::openDb(OperationContext* txn,
- StringData ns,
- bool* justCreated) {
+ // Check casing
+ const string duplicate = Database::duplicateUncasedName(dbname.toString());
+ if (!duplicate.empty()) {
+ stringstream ss;
+ ss << "db already exists with different case already have: [" << duplicate
+ << "] trying to create [" << dbname.toString() << "]";
+ uasserted(ErrorCodes::DatabaseDifferCase, ss.str());
+ }
- const StringData dbname = _todb(ns);
- invariant(txn->lockState()->isDbLockedForMode(dbname, MODE_X));
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ invariant(storageEngine);
- Database* db = get(txn, ns);
- if (db) {
- if (justCreated) {
- *justCreated = false;
- }
+ DatabaseCatalogEntry* entry = storageEngine->getDatabaseCatalogEntry(txn, dbname);
+ invariant(entry);
+ const bool exists = entry->exists();
+ if (!exists) {
+ audit::logCreateDatabase(&cc(), dbname);
+ }
- return db;
- }
+ if (justCreated) {
+ *justCreated = !exists;
+ }
- // Check casing
- const string duplicate = Database::duplicateUncasedName(dbname.toString());
- if (!duplicate.empty()) {
- stringstream ss;
- ss << "db already exists with different case already have: ["
- << duplicate
- << "] trying to create ["
- << dbname.toString()
- << "]";
- uasserted(ErrorCodes::DatabaseDifferCase, ss.str());
- }
+ // Do this outside of the scoped lock, because database creation does transactional
+ // operations which may block. Only one thread can be inside this method for the same DB
+ // name, because of the requirement for X-lock on the database when we enter. So there is
+ // no way we can insert two different databases for the same name.
+ db = new Database(txn, dbname, entry);
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- invariant(storageEngine);
+ stdx::lock_guard<SimpleMutex> lk(_m);
+ _dbs[dbname] = db;
- DatabaseCatalogEntry* entry = storageEngine->getDatabaseCatalogEntry(txn, dbname);
- invariant(entry);
- const bool exists = entry->exists();
- if (!exists) {
- audit::logCreateDatabase(&cc(), dbname);
- }
+ return db;
+}
- if (justCreated) {
- *justCreated = !exists;
- }
+void DatabaseHolder::close(OperationContext* txn, StringData ns) {
+ // TODO: This should be fine if only a DB X-lock
+ invariant(txn->lockState()->isW());
- // Do this outside of the scoped lock, because database creation does transactional
- // operations which may block. Only one thread can be inside this method for the same DB
- // name, because of the requirement for X-lock on the database when we enter. So there is
- // no way we can insert two different databases for the same name.
- db = new Database(txn, dbname, entry);
+ const StringData dbName = _todb(ns);
- stdx::lock_guard<SimpleMutex> lk(_m);
- _dbs[dbname] = db;
+ stdx::lock_guard<SimpleMutex> lk(_m);
- return db;
+ DBs::const_iterator it = _dbs.find(dbName);
+ if (it == _dbs.end()) {
+ return;
}
- void DatabaseHolder::close(OperationContext* txn,
- StringData ns) {
- // TODO: This should be fine if only a DB X-lock
- invariant(txn->lockState()->isW());
+ it->second->close(txn);
+ delete it->second;
+ _dbs.erase(it);
- const StringData dbName = _todb(ns);
-
- stdx::lock_guard<SimpleMutex> lk(_m);
+ getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(txn, dbName.toString());
+}
- DBs::const_iterator it = _dbs.find(dbName);
- if (it == _dbs.end()) {
- return;
- }
+bool DatabaseHolder::closeAll(OperationContext* txn, BSONObjBuilder& result, bool force) {
+ invariant(txn->lockState()->isW());
- it->second->close( txn );
- delete it->second;
- _dbs.erase(it);
+ stdx::lock_guard<SimpleMutex> lk(_m);
- getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(txn, dbName.toString());
+ set<string> dbs;
+ for (DBs::const_iterator i = _dbs.begin(); i != _dbs.end(); ++i) {
+ dbs.insert(i->first);
}
- bool DatabaseHolder::closeAll(OperationContext* txn, BSONObjBuilder& result, bool force) {
- invariant(txn->lockState()->isW());
+ BSONArrayBuilder bb(result.subarrayStart("dbs"));
+ int nNotClosed = 0;
+ for (set<string>::iterator i = dbs.begin(); i != dbs.end(); ++i) {
+ string name = *i;
- stdx::lock_guard<SimpleMutex> lk(_m);
+ LOG(2) << "DatabaseHolder::closeAll name:" << name;
- set< string > dbs;
- for ( DBs::const_iterator i = _dbs.begin(); i != _dbs.end(); ++i ) {
- dbs.insert( i->first );
+ if (!force && BackgroundOperation::inProgForDb(name)) {
+ log() << "WARNING: can't close database " << name
+ << " because a bg job is in progress - try killOp command";
+ nNotClosed++;
+ continue;
}
- BSONArrayBuilder bb( result.subarrayStart( "dbs" ) );
- int nNotClosed = 0;
- for( set< string >::iterator i = dbs.begin(); i != dbs.end(); ++i ) {
- string name = *i;
-
- LOG(2) << "DatabaseHolder::closeAll name:" << name;
+ Database* db = _dbs[name];
+ db->close(txn);
+ delete db;
- if( !force && BackgroundOperation::inProgForDb(name) ) {
- log() << "WARNING: can't close database "
- << name
- << " because a bg job is in progress - try killOp command";
- nNotClosed++;
- continue;
- }
+ _dbs.erase(name);
- Database* db = _dbs[name];
- db->close( txn );
- delete db;
+ getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(txn, name);
- _dbs.erase( name );
-
- getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase( txn, name );
-
- bb.append( name );
- }
-
- bb.done();
- if( nNotClosed ) {
- result.append("nNotClosed", nNotClosed);
- }
+ bb.append(name);
+ }
- return true;
+ bb.done();
+ if (nNotClosed) {
+ result.append("nNotClosed", nNotClosed);
}
+
+ return true;
+}
}
diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h
index 3238b886f01..ce99747937b 100644
--- a/src/mongo/db/catalog/database_holder.h
+++ b/src/mongo/db/catalog/database_holder.h
@@ -37,62 +37,62 @@
namespace mongo {
- class Database;
- class OperationContext;
+class Database;
+class OperationContext;
+
+/**
+ * Registry of opened databases.
+ */
+class DatabaseHolder {
+public:
+ DatabaseHolder() = default;
/**
- * Registry of opened databases.
+ * Retrieves an already opened database or returns NULL. Must be called with the database
+ * locked in at least IS-mode.
*/
- class DatabaseHolder {
- public:
- DatabaseHolder() = default;
+ Database* get(OperationContext* txn, StringData ns) const;
- /**
- * Retrieves an already opened database or returns NULL. Must be called with the database
- * locked in at least IS-mode.
- */
- Database* get(OperationContext* txn, StringData ns) const;
-
- /**
- * Retrieves a database reference if it is already opened, or opens it if it hasn't been
- * opened/created yet. Must be called with the database locked in X-mode.
- *
- * @param justCreated Returns whether the database was newly created (true) or it already
- * existed (false). Can be NULL if this information is not necessary.
- */
- Database* openDb(OperationContext* txn, StringData ns, bool* justCreated = NULL);
+ /**
+ * Retrieves a database reference if it is already opened, or opens it if it hasn't been
+ * opened/created yet. Must be called with the database locked in X-mode.
+ *
+ * @param justCreated Returns whether the database was newly created (true) or it already
+ * existed (false). Can be NULL if this information is not necessary.
+ */
+ Database* openDb(OperationContext* txn, StringData ns, bool* justCreated = NULL);
- /**
- * Closes the specified database. Must be called with the database locked in X-mode.
- */
- void close(OperationContext* txn, StringData ns);
+ /**
+ * Closes the specified database. Must be called with the database locked in X-mode.
+ */
+ void close(OperationContext* txn, StringData ns);
- /**
- * Closes all opened databases. Must be called with the global lock acquired in X-mode.
- *
- * @param result Populated with the names of the databases, which were closed.
- * @param force Force close even if something underway - use at shutdown
- */
- bool closeAll(OperationContext* txn, BSONObjBuilder& result, bool force);
+ /**
+ * Closes all opened databases. Must be called with the global lock acquired in X-mode.
+ *
+ * @param result Populated with the names of the databases, which were closed.
+ * @param force Force close even if something underway - use at shutdown
+ */
+ bool closeAll(OperationContext* txn, BSONObjBuilder& result, bool force);
- /**
- * Retrieves the names of all currently opened databases. Does not require locking, but it
- * is not guaranteed that the returned set of names will be still valid unless a global
- * lock is held, which would prevent database from disappearing or being created.
- */
- void getAllShortNames( std::set<std::string>& all ) const {
- stdx::lock_guard<SimpleMutex> lk(_m);
- for( DBs::const_iterator j=_dbs.begin(); j!=_dbs.end(); ++j ) {
- all.insert( j->first );
- }
+ /**
+ * Retrieves the names of all currently opened databases. Does not require locking, but it
+ * is not guaranteed that the returned set of names will be still valid unless a global
+ * lock is held, which would prevent database from disappearing or being created.
+ */
+ void getAllShortNames(std::set<std::string>& all) const {
+ stdx::lock_guard<SimpleMutex> lk(_m);
+ for (DBs::const_iterator j = _dbs.begin(); j != _dbs.end(); ++j) {
+ all.insert(j->first);
}
+ }
- private:
- typedef StringMap<Database*> DBs;
+private:
+ typedef StringMap<Database*> DBs;
- mutable SimpleMutex _m;
- DBs _dbs;
- };
+ mutable SimpleMutex _m;
+ DBs _dbs;
+};
- DatabaseHolder& dbHolder();
+DatabaseHolder& dbHolder();
}
diff --git a/src/mongo/db/catalog/document_validation.cpp b/src/mongo/db/catalog/document_validation.cpp
index 73a6540a391..c0377028e3f 100644
--- a/src/mongo/db/catalog/document_validation.cpp
+++ b/src/mongo/db/catalog/document_validation.cpp
@@ -31,6 +31,6 @@
#include "mongo/db/catalog/document_validation.h"
namespace mongo {
- const OperationContext::Decoration<bool> documentValidationDisabled =
- OperationContext::declareDecoration<bool>();
+const OperationContext::Decoration<bool> documentValidationDisabled =
+ OperationContext::declareDecoration<bool>();
}
diff --git a/src/mongo/db/catalog/document_validation.h b/src/mongo/db/catalog/document_validation.h
index d03b1627a56..2bc8f8b4787 100644
--- a/src/mongo/db/catalog/document_validation.h
+++ b/src/mongo/db/catalog/document_validation.h
@@ -33,40 +33,40 @@
#include "mongo/db/operation_context.h"
namespace mongo {
- /**
- * If true, Collection should do no validation of writes from this OperationContext.
- *
- * Note that Decorations are value-constructed so this defaults to false.
- */
- extern const OperationContext::Decoration<bool> documentValidationDisabled;
+/**
+ * If true, Collection should do no validation of writes from this OperationContext.
+ *
+ * Note that Decorations are value-constructed so this defaults to false.
+ */
+extern const OperationContext::Decoration<bool> documentValidationDisabled;
- inline StringData bypassDocumentValidationCommandOption() {
- return "bypassDocumentValidation";
- }
+inline StringData bypassDocumentValidationCommandOption() {
+ return "bypassDocumentValidation";
+}
- inline bool shouldBypassDocumentValidationForCommand(const BSONObj& cmdObj) {
- return cmdObj[bypassDocumentValidationCommandOption()].trueValue();
- }
+inline bool shouldBypassDocumentValidationForCommand(const BSONObj& cmdObj) {
+ return cmdObj[bypassDocumentValidationCommandOption()].trueValue();
+}
- /**
- * Disables document validation on a single OperationContext while in scope.
- * Resets to original value when leaving scope so they are safe to nest.
- */
- class DisableDocumentValidation {
- MONGO_DISALLOW_COPYING(DisableDocumentValidation);
- public:
- DisableDocumentValidation(OperationContext* txn)
- : _txn(txn)
- , _initialState(documentValidationDisabled(_txn)) {
- documentValidationDisabled(_txn) = true;
- }
+/**
+ * Disables document validation on a single OperationContext while in scope.
+ * Resets to original value when leaving scope so they are safe to nest.
+ */
+class DisableDocumentValidation {
+ MONGO_DISALLOW_COPYING(DisableDocumentValidation);
- ~DisableDocumentValidation() {
- documentValidationDisabled(_txn) = _initialState;
- }
+public:
+ DisableDocumentValidation(OperationContext* txn)
+ : _txn(txn), _initialState(documentValidationDisabled(_txn)) {
+ documentValidationDisabled(_txn) = true;
+ }
+
+ ~DisableDocumentValidation() {
+ documentValidationDisabled(_txn) = _initialState;
+ }
- private:
- OperationContext* const _txn;
- const bool _initialState;
- };
+private:
+ OperationContext* const _txn;
+ const bool _initialState;
+};
}
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index cb932432dab..67bfb70ae45 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -48,54 +48,55 @@
#include "mongo/util/log.h"
namespace mongo {
- Status dropCollection(OperationContext* txn,
- const NamespaceString& collectionName,
- BSONObjBuilder& result) {
- if (!serverGlobalParams.quiet) {
- log() << "CMD: drop " << collectionName;
- }
+Status dropCollection(OperationContext* txn,
+ const NamespaceString& collectionName,
+ BSONObjBuilder& result) {
+ if (!serverGlobalParams.quiet) {
+ log() << "CMD: drop " << collectionName;
+ }
- std::string dbname = collectionName.db().toString();
+ std::string dbname = collectionName.db().toString();
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbname, MODE_X);
- Database* const db = autoDb.getDb();
- Collection* coll = db ? db->getCollection(collectionName) : nullptr;
+ AutoGetDb autoDb(txn, dbname, MODE_X);
+ Database* const db = autoDb.getDb();
+ Collection* coll = db ? db->getCollection(collectionName) : nullptr;
- // If db/collection does not exist, short circuit and return.
- if ( !db || !coll ) {
- return Status(ErrorCodes::NamespaceNotFound, "ns not found");
- }
- OldClientContext context(txn, collectionName);
+ // If db/collection does not exist, short circuit and return.
+ if (!db || !coll) {
+ return Status(ErrorCodes::NamespaceNotFound, "ns not found");
+ }
+ OldClientContext context(txn, collectionName);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(collectionName);
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(collectionName);
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while dropping collection "
- << collectionName.ns());
- }
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while dropping collection "
+ << collectionName.ns());
+ }
- int numIndexes = coll->getIndexCatalog()->numIndexesTotal(txn);
+ int numIndexes = coll->getIndexCatalog()->numIndexesTotal(txn);
- BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
+ BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
- WriteUnitOfWork wunit(txn);
- Status s = db->dropCollection(txn, collectionName.ns());
+ WriteUnitOfWork wunit(txn);
+ Status s = db->dropCollection(txn, collectionName.ns());
- result.append("ns", collectionName);
+ result.append("ns", collectionName);
- if ( !s.isOK() ) {
- return s;
- }
+ if (!s.isOK()) {
+ return s;
+ }
- result.append("nIndexesWas", numIndexes);
+ result.append("nIndexesWas", numIndexes);
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "drop", collectionName.ns());
- return Status::OK();
+ wunit.commit();
}
-} // namespace mongo
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "drop", collectionName.ns());
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_collection.h b/src/mongo/db/catalog/drop_collection.h
index a12f5e8419c..c62b2c376c5 100644
--- a/src/mongo/db/catalog/drop_collection.h
+++ b/src/mongo/db/catalog/drop_collection.h
@@ -29,15 +29,15 @@
#include "mongo/base/status.h"
namespace mongo {
- class BSONObjBuilder;
- class NamespaceString;
- class OperationContext;
+class BSONObjBuilder;
+class NamespaceString;
+class OperationContext;
- /**
- * Drops the collection "collectionName" and populates "result" with statistics about what
- * was removed.
- */
- Status dropCollection(OperationContext* txn,
- const NamespaceString& collectionName,
- BSONObjBuilder& result);
-} // namespace mongo
+/**
+ * Drops the collection "collectionName" and populates "result" with statistics about what
+ * was removed.
+ */
+Status dropCollection(OperationContext* txn,
+ const NamespaceString& collectionName,
+ BSONObjBuilder& result);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index aa274ad2b28..7996c7c2eee 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -48,43 +48,43 @@
#include "mongo/util/log.h"
namespace mongo {
- Status dropDatabase(OperationContext* txn, const std::string& dbName) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- AutoGetDb autoDB(txn, dbName, MODE_X);
- Database* const db = autoDB.getDb();
- if (!db) {
- return Status(ErrorCodes::DatabaseNotFound,
- str::stream() << "Could not drop database " << dbName
- << " because it does not exist");
+Status dropDatabase(OperationContext* txn, const std::string& dbName) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ AutoGetDb autoDB(txn, dbName, MODE_X);
+ Database* const db = autoDB.getDb();
+ if (!db) {
+ return Status(ErrorCodes::DatabaseNotFound,
+ str::stream() << "Could not drop database " << dbName
+ << " because it does not exist");
+ }
+ OldClientContext context(txn, dbName);
- }
- OldClientContext context(txn, dbName);
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbName);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbName);
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while dropping database " << dbName);
+ }
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while dropping database " << dbName);
- }
+ log() << "dropDatabase " << dbName << " starting";
- log() << "dropDatabase " << dbName << " starting";
+ BackgroundOperation::assertNoBgOpInProgForDb(dbName);
+ mongo::dropDatabase(txn, db);
- BackgroundOperation::assertNoBgOpInProgForDb(dbName);
- mongo::dropDatabase(txn, db);
+ log() << "dropDatabase " << dbName << " finished";
- log() << "dropDatabase " << dbName << " finished";
+ WriteUnitOfWork wunit(txn);
- WriteUnitOfWork wunit(txn);
+ getGlobalServiceContext()->getOpObserver()->onDropDatabase(txn, dbName + ".$cmd");
- getGlobalServiceContext()->getOpObserver()->onDropDatabase(txn, dbName + ".$cmd");
-
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropDatabase", dbName);
-
- return Status::OK();
+ wunit.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropDatabase", dbName);
+
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_database.h b/src/mongo/db/catalog/drop_database.h
index 184d66d5bf2..b60e817be27 100644
--- a/src/mongo/db/catalog/drop_database.h
+++ b/src/mongo/db/catalog/drop_database.h
@@ -29,10 +29,10 @@
#include "mongo/base/status.h"
namespace mongo {
- class OperationContext;
+class OperationContext;
- /**
- * Drops the database "dbName".
- */
- Status dropDatabase(OperationContext* txn, const std::string& dbName);
-} // namespace mongo
+/**
+ * Drops the database "dbName".
+ */
+Status dropDatabase(OperationContext* txn, const std::string& dbName);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index 344b247290a..1f338303b69 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -48,117 +48,114 @@
namespace mongo {
namespace {
- Status wrappedRun(OperationContext* txn,
- const StringData& dbname,
- const std::string& toDeleteNs,
- Database* const db,
- const BSONObj& jsobj,
- BSONObjBuilder* anObjBuilder) {
- if (!serverGlobalParams.quiet) {
- LOG(0) << "CMD: dropIndexes " << toDeleteNs;
- }
- Collection* collection = db ? db->getCollection(toDeleteNs) : nullptr;
-
- // If db/collection does not exist, short circuit and return.
- if (!db || !collection) {
- return Status(ErrorCodes::NamespaceNotFound, "ns not found");
- }
-
- OldClientContext ctx(txn, toDeleteNs);
- BackgroundOperation::assertNoBgOpInProgForNs(toDeleteNs);
-
- IndexCatalog* indexCatalog = collection->getIndexCatalog();
- anObjBuilder->appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(txn));
-
+Status wrappedRun(OperationContext* txn,
+ const StringData& dbname,
+ const std::string& toDeleteNs,
+ Database* const db,
+ const BSONObj& jsobj,
+ BSONObjBuilder* anObjBuilder) {
+ if (!serverGlobalParams.quiet) {
+ LOG(0) << "CMD: dropIndexes " << toDeleteNs;
+ }
+ Collection* collection = db ? db->getCollection(toDeleteNs) : nullptr;
- BSONElement f = jsobj.getField("index");
- if (f.type() == String) {
+ // If db/collection does not exist, short circuit and return.
+ if (!db || !collection) {
+ return Status(ErrorCodes::NamespaceNotFound, "ns not found");
+ }
- std::string indexToDelete = f.valuestr();
+ OldClientContext ctx(txn, toDeleteNs);
+ BackgroundOperation::assertNoBgOpInProgForNs(toDeleteNs);
- if (indexToDelete == "*") {
- Status s = indexCatalog->dropAllIndexes(txn, false);
- if (!s.isOK()) {
- return s;
- }
- anObjBuilder->append("msg", "non-_id indexes dropped for collection");
- return Status::OK();
- }
+ IndexCatalog* indexCatalog = collection->getIndexCatalog();
+ anObjBuilder->appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(txn));
- IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(txn,
- indexToDelete);
- if (desc == NULL) {
- return Status(ErrorCodes::IndexNotFound,
- str::stream() << "index not found with name ["
- << indexToDelete << "]");
- }
- if (desc->isIdIndex()) {
- return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
- }
+ BSONElement f = jsobj.getField("index");
+ if (f.type() == String) {
+ std::string indexToDelete = f.valuestr();
- Status s = indexCatalog->dropIndex(txn, desc);
+ if (indexToDelete == "*") {
+ Status s = indexCatalog->dropAllIndexes(txn, false);
if (!s.isOK()) {
return s;
}
-
+ anObjBuilder->append("msg", "non-_id indexes dropped for collection");
return Status::OK();
}
- if (f.type() == Object) {
- IndexDescriptor* desc =
- collection->getIndexCatalog()->findIndexByKeyPattern(txn, f.embeddedObject());
- if (desc == NULL) {
- return Status(ErrorCodes::InvalidOptions,
- str::stream() << "can't find index with key: "
- << f.embeddedObject().toString());
- }
-
- if (desc->isIdIndex()) {
- return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
- }
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(txn, indexToDelete);
+ if (desc == NULL) {
+ return Status(ErrorCodes::IndexNotFound,
+ str::stream() << "index not found with name [" << indexToDelete << "]");
+ }
- Status s = indexCatalog->dropIndex(txn, desc);
- if (!s.isOK()) {
- return s;
- }
+ if (desc->isIdIndex()) {
+ return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
+ }
- return Status::OK();
+ Status s = indexCatalog->dropIndex(txn, desc);
+ if (!s.isOK()) {
+ return s;
}
- return Status(ErrorCodes::IndexNotFound, "invalid index name spec");
+ return Status::OK();
}
-} // namespace
-
- Status dropIndexes(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& idxDescriptor,
- BSONObjBuilder* result) {
- StringData dbName = ns.db();
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbName, MODE_X);
-
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns);
-
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while dropping indexes in "
- << ns.toString());
- }
- WriteUnitOfWork wunit(txn);
- Status status = wrappedRun(txn, dbName, ns, autoDb.getDb(), idxDescriptor, result);
- if (!status.isOK()) {
- return status;
- }
- getGlobalServiceContext()->getOpObserver()->onDropIndex(txn,
- dbName.toString() + ".$cmd",
- idxDescriptor);
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropIndexes", dbName);
+ if (f.type() == Object) {
+ IndexDescriptor* desc =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, f.embeddedObject());
+ if (desc == NULL) {
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream()
+ << "can't find index with key: " << f.embeddedObject().toString());
+ }
+
+ if (desc->isIdIndex()) {
+ return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
+ }
+
+ Status s = indexCatalog->dropIndex(txn, desc);
+ if (!s.isOK()) {
+ return s;
+ }
+
return Status::OK();
}
-} // namespace mongo
+ return Status(ErrorCodes::IndexNotFound, "invalid index name spec");
+}
+} // namespace
+
+Status dropIndexes(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& idxDescriptor,
+ BSONObjBuilder* result) {
+ StringData dbName = ns.db();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, dbName, MODE_X);
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while dropping indexes in "
+ << ns.toString());
+ }
+
+ WriteUnitOfWork wunit(txn);
+ Status status = wrappedRun(txn, dbName, ns, autoDb.getDb(), idxDescriptor, result);
+ if (!status.isOK()) {
+ return status;
+ }
+ getGlobalServiceContext()->getOpObserver()->onDropIndex(
+ txn, dbName.toString() + ".$cmd", idxDescriptor);
+ wunit.commit();
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropIndexes", dbName);
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_indexes.h b/src/mongo/db/catalog/drop_indexes.h
index ba07687098e..931fa348019 100644
--- a/src/mongo/db/catalog/drop_indexes.h
+++ b/src/mongo/db/catalog/drop_indexes.h
@@ -29,18 +29,18 @@
#include "mongo/base/status.h"
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class NamespaceString;
- class OperationContext;
+class BSONObj;
+class BSONObjBuilder;
+class NamespaceString;
+class OperationContext;
- /**
- * Drops the index from collection "ns" that matches the "idxDescriptor" and populates
- * "result" with some statistics about the dropped index.
- */
- Status dropIndexes(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& idxDescriptor,
- BSONObjBuilder* result);
+/**
+ * Drops the index from collection "ns" that matches the "idxDescriptor" and populates
+ * "result" with some statistics about the dropped index.
+ */
+Status dropIndexes(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& idxDescriptor,
+ BSONObjBuilder* result);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/head_manager.h b/src/mongo/db/catalog/head_manager.h
index f3b198b276e..7a671ccf69f 100644
--- a/src/mongo/db/catalog/head_manager.h
+++ b/src/mongo/db/catalog/head_manager.h
@@ -32,19 +32,19 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
- /**
- * An abstraction for setting and getting data about the 'head' of an index. This is the data
- * that lives in the catalog to identify where an index lives.
- */
- class HeadManager {
- public:
- virtual ~HeadManager() { }
+/**
+ * An abstraction for setting and getting data about the 'head' of an index. This is the data
+ * that lives in the catalog to identify where an index lives.
+ */
+class HeadManager {
+public:
+ virtual ~HeadManager() {}
- virtual const RecordId getHead(OperationContext* txn) const = 0;
+ virtual const RecordId getHead(OperationContext* txn) const = 0;
- virtual void setHead(OperationContext* txn, const RecordId newHead) = 0;
- };
+ virtual void setHead(OperationContext* txn, const RecordId newHead) = 0;
+};
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 734c6c6e412..85a1b8f401f 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -65,1215 +65,1140 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::endl;
+using std::string;
+using std::vector;
- static const int INDEX_CATALOG_INIT = 283711;
- static const int INDEX_CATALOG_UNINIT = 654321;
+static const int INDEX_CATALOG_INIT = 283711;
+static const int INDEX_CATALOG_UNINIT = 654321;
- // What's the default version of our indices?
- const int DefaultIndexVersionNumber = 1;
+// What's the default version of our indices?
+const int DefaultIndexVersionNumber = 1;
- const BSONObj IndexCatalog::_idObj = BSON( "_id" << 1 );
+const BSONObj IndexCatalog::_idObj = BSON("_id" << 1);
- // -------------
+// -------------
- IndexCatalog::IndexCatalog( Collection* collection )
- : _magic(INDEX_CATALOG_UNINIT),
- _collection( collection ),
- _maxNumIndexesAllowed(_collection->getCatalogEntry()->getMaxAllowedIndexes()) {
- }
+IndexCatalog::IndexCatalog(Collection* collection)
+ : _magic(INDEX_CATALOG_UNINIT),
+ _collection(collection),
+ _maxNumIndexesAllowed(_collection->getCatalogEntry()->getMaxAllowedIndexes()) {}
- IndexCatalog::~IndexCatalog() {
- if ( _magic != INDEX_CATALOG_UNINIT ) {
- // only do this check if we haven't been initialized
- _checkMagic();
- }
- _magic = 123456;
+IndexCatalog::~IndexCatalog() {
+ if (_magic != INDEX_CATALOG_UNINIT) {
+ // only do this check if we haven't been initialized
+ _checkMagic();
}
+ _magic = 123456;
+}
- Status IndexCatalog::init(OperationContext* txn) {
- vector<string> indexNames;
- _collection->getCatalogEntry()->getAllIndexes( txn, &indexNames );
-
- for ( size_t i = 0; i < indexNames.size(); i++ ) {
- const string& indexName = indexNames[i];
- BSONObj spec = _collection->getCatalogEntry()->getIndexSpec( txn,
- indexName ).getOwned();
-
- if ( !_collection->getCatalogEntry()->isIndexReady( txn, indexName ) ) {
- _unfinishedIndexes.push_back( spec );
- continue;
- }
+Status IndexCatalog::init(OperationContext* txn) {
+ vector<string> indexNames;
+ _collection->getCatalogEntry()->getAllIndexes(txn, &indexNames);
- BSONObj keyPattern = spec.getObjectField("key");
- IndexDescriptor* descriptor = new IndexDescriptor( _collection,
- _getAccessMethodName(txn,
- keyPattern),
- spec );
- const bool initFromDisk = true;
- IndexCatalogEntry* entry = _setupInMemoryStructures( txn, descriptor, initFromDisk );
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ const string& indexName = indexNames[i];
+ BSONObj spec = _collection->getCatalogEntry()->getIndexSpec(txn, indexName).getOwned();
- fassert( 17340, entry->isReady( txn ) );
+ if (!_collection->getCatalogEntry()->isIndexReady(txn, indexName)) {
+ _unfinishedIndexes.push_back(spec);
+ continue;
}
- if ( _unfinishedIndexes.size() ) {
- // if there are left over indexes, we don't let anyone add/drop indexes
- // until someone goes and fixes them
- log() << "found " << _unfinishedIndexes.size()
- << " index(es) that wasn't finished before shutdown";
- }
+ BSONObj keyPattern = spec.getObjectField("key");
+ IndexDescriptor* descriptor =
+ new IndexDescriptor(_collection, _getAccessMethodName(txn, keyPattern), spec);
+ const bool initFromDisk = true;
+ IndexCatalogEntry* entry = _setupInMemoryStructures(txn, descriptor, initFromDisk);
- _magic = INDEX_CATALOG_INIT;
- return Status::OK();
+ fassert(17340, entry->isReady(txn));
}
-namespace {
- class IndexCleanupOnRollback : public RecoveryUnit::Change {
- public:
- /**
- * None of these pointers are owned by this class.
- */
- IndexCleanupOnRollback(OperationContext* txn,
- Collection* collection,
- IndexCatalogEntryContainer* entries,
- const IndexDescriptor* desc)
- : _txn(txn),
- _collection(collection),
- _entries(entries),
- _desc(desc) {
- }
-
- virtual void commit() {}
-
- virtual void rollback() {
- _entries->remove(_desc);
- _collection->infoCache()->reset(_txn);
- }
-
- private:
- OperationContext* _txn;
- Collection* _collection;
- IndexCatalogEntryContainer* _entries;
- const IndexDescriptor* _desc;
- };
-} // namespace
-
- IndexCatalogEntry* IndexCatalog::_setupInMemoryStructures(OperationContext* txn,
- IndexDescriptor* descriptor,
- bool initFromDisk) {
- unique_ptr<IndexDescriptor> descriptorCleanup( descriptor );
-
- unique_ptr<IndexCatalogEntry> entry( new IndexCatalogEntry( _collection->ns().ns(),
- _collection->getCatalogEntry(),
- descriptorCleanup.release(),
- _collection->infoCache() ) );
-
- entry->init( txn, _collection->_dbce->getIndex( txn,
- _collection->getCatalogEntry(),
- entry.get() ) );
-
- IndexCatalogEntry* save = entry.get();
- _entries.add( entry.release() );
-
- if (!initFromDisk) {
- txn->recoveryUnit()->registerChange(new IndexCleanupOnRollback(txn,
- _collection,
- &_entries,
- descriptor));
- }
+ if (_unfinishedIndexes.size()) {
+ // if there are left over indexes, we don't let anyone add/drop indexes
+ // until someone goes and fixes them
+ log() << "found " << _unfinishedIndexes.size()
+ << " index(es) that wasn't finished before shutdown";
+ }
- invariant( save == _entries.find( descriptor ) );
- invariant( save == _entries.find( descriptor->indexName() ) );
+ _magic = INDEX_CATALOG_INIT;
+ return Status::OK();
+}
- return save;
+namespace {
+class IndexCleanupOnRollback : public RecoveryUnit::Change {
+public:
+ /**
+ * None of these pointers are owned by this class.
+ */
+ IndexCleanupOnRollback(OperationContext* txn,
+ Collection* collection,
+ IndexCatalogEntryContainer* entries,
+ const IndexDescriptor* desc)
+ : _txn(txn), _collection(collection), _entries(entries), _desc(desc) {}
+
+ virtual void commit() {}
+
+ virtual void rollback() {
+ _entries->remove(_desc);
+ _collection->infoCache()->reset(_txn);
}
- bool IndexCatalog::ok() const {
- return ( _magic == INDEX_CATALOG_INIT );
+private:
+ OperationContext* _txn;
+ Collection* _collection;
+ IndexCatalogEntryContainer* _entries;
+ const IndexDescriptor* _desc;
+};
+} // namespace
+
+IndexCatalogEntry* IndexCatalog::_setupInMemoryStructures(OperationContext* txn,
+ IndexDescriptor* descriptor,
+ bool initFromDisk) {
+ unique_ptr<IndexDescriptor> descriptorCleanup(descriptor);
+
+ unique_ptr<IndexCatalogEntry> entry(new IndexCatalogEntry(_collection->ns().ns(),
+ _collection->getCatalogEntry(),
+ descriptorCleanup.release(),
+ _collection->infoCache()));
+
+ entry->init(txn,
+ _collection->_dbce->getIndex(txn, _collection->getCatalogEntry(), entry.get()));
+
+ IndexCatalogEntry* save = entry.get();
+ _entries.add(entry.release());
+
+ if (!initFromDisk) {
+ txn->recoveryUnit()->registerChange(
+ new IndexCleanupOnRollback(txn, _collection, &_entries, descriptor));
}
- void IndexCatalog::_checkMagic() const {
- if ( ok() ) {
- return;
- }
- log() << "IndexCatalog::_magic wrong, is : " << _magic;
- fassertFailed(17198);
- }
+ invariant(save == _entries.find(descriptor));
+ invariant(save == _entries.find(descriptor->indexName()));
- Status IndexCatalog::checkUnfinished() const {
- if ( _unfinishedIndexes.size() == 0 )
- return Status::OK();
+ return save;
+}
- return Status( ErrorCodes::InternalError,
- str::stream()
- << "IndexCatalog has left over indexes that must be cleared"
- << " ns: " << _collection->ns().ns() );
- }
+bool IndexCatalog::ok() const {
+ return (_magic == INDEX_CATALOG_INIT);
+}
- bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn,
- const BSONObj& keyPattern) const {
- string pluginName = IndexNames::findPluginName(keyPattern);
- bool known = IndexNames::isKnownName(pluginName);
-
- if ( !_collection->_dbce->isOlderThan24( txn ) ) {
- // RulesFor24+
- // This assert will be triggered when downgrading from a future version that
- // supports an index plugin unsupported by this version.
- uassert(17197, str::stream() << "Invalid index type '" << pluginName << "' "
- << "in index " << keyPattern,
- known);
- return false;
- }
+void IndexCatalog::_checkMagic() const {
+ if (ok()) {
+ return;
+ }
+ log() << "IndexCatalog::_magic wrong, is : " << _magic;
+ fassertFailed(17198);
+}
- // RulesFor22
- if (!known) {
- log() << "warning: can't find plugin [" << pluginName << "]" << endl;
- return true;
- }
+Status IndexCatalog::checkUnfinished() const {
+ if (_unfinishedIndexes.size() == 0)
+ return Status::OK();
- if (!IndexNames::existedBefore24(pluginName)) {
- warning() << "Treating index " << keyPattern << " as ascending since "
- << "it was created before 2.4 and '" << pluginName << "' "
- << "was not a valid type at that time."
- << endl;
- return true;
- }
+ return Status(ErrorCodes::InternalError,
+ str::stream() << "IndexCatalog has left over indexes that must be cleared"
+ << " ns: " << _collection->ns().ns());
+}
+bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn, const BSONObj& keyPattern) const {
+ string pluginName = IndexNames::findPluginName(keyPattern);
+ bool known = IndexNames::isKnownName(pluginName);
+
+ if (!_collection->_dbce->isOlderThan24(txn)) {
+ // RulesFor24+
+ // This assert will be triggered when downgrading from a future version that
+ // supports an index plugin unsupported by this version.
+ uassert(17197,
+ str::stream() << "Invalid index type '" << pluginName << "' "
+ << "in index " << keyPattern,
+ known);
return false;
}
- string IndexCatalog::_getAccessMethodName(OperationContext* txn,
- const BSONObj& keyPattern) const {
- if ( _shouldOverridePlugin(txn, keyPattern) ) {
- return "";
- }
-
- return IndexNames::findPluginName(keyPattern);
+ // RulesFor22
+ if (!known) {
+ log() << "warning: can't find plugin [" << pluginName << "]" << endl;
+ return true;
}
+ if (!IndexNames::existedBefore24(pluginName)) {
+ warning() << "Treating index " << keyPattern << " as ascending since "
+ << "it was created before 2.4 and '" << pluginName << "' "
+ << "was not a valid type at that time." << endl;
+ return true;
+ }
- // ---------------------------
-
- Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded( OperationContext* txn,
- const string& newPluginName ) {
-
- // first check if requested index requires pdfile minor version to be bumped
- if ( IndexNames::existedBefore24(newPluginName) ) {
- return Status::OK();
- }
-
- DatabaseCatalogEntry* dbce = _collection->_dbce;
-
- if ( !dbce->isOlderThan24( txn ) ) {
- return Status::OK(); // these checks have already been done
- }
+ return false;
+}
- // Everything below is MMAPv1 specific since it was the only storage engine that existed
- // before 2.4. We look at all indexes in this database to make sure that none of them use
- // plugins that didn't exist before 2.4. If that holds, we mark the database as "2.4-clean"
- // which allows creation of indexes using new plugins.
-
- RecordStore* indexes = dbce->getRecordStore(dbce->name() + ".system.indexes");
- auto cursor = indexes->getCursor(txn);
- while (auto record = cursor->next()) {
- const BSONObj index = record->data.releaseToBson();
- const BSONObj key = index.getObjectField("key");
- const string plugin = IndexNames::findPluginName(key);
- if ( IndexNames::existedBefore24(plugin) )
- continue;
+string IndexCatalog::_getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) const {
+ if (_shouldOverridePlugin(txn, keyPattern)) {
+ return "";
+ }
- const string errmsg = str::stream()
- << "Found pre-existing index " << index << " with invalid type '" << plugin << "'. "
- << "Disallowing creation of new index type '" << newPluginName << "'. See "
- << "http://dochub.mongodb.org/core/index-type-changes"
- ;
+ return IndexNames::findPluginName(keyPattern);
+}
- return Status( ErrorCodes::CannotCreateIndex, errmsg );
- }
- dbce->markIndexSafe24AndUp( txn );
+// ---------------------------
+Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
+ const string& newPluginName) {
+ // first check if requested index requires pdfile minor version to be bumped
+ if (IndexNames::existedBefore24(newPluginName)) {
return Status::OK();
}
- StatusWith<BSONObj> IndexCatalog::prepareSpecForCreate( OperationContext* txn,
- const BSONObj& original ) const {
- Status status = _isSpecOk( original );
- if ( !status.isOK() )
- return StatusWith<BSONObj>( status );
-
- BSONObj fixed = _fixIndexSpec( original );
+ DatabaseCatalogEntry* dbce = _collection->_dbce;
- // we double check with new index spec
- status = _isSpecOk( fixed );
- if ( !status.isOK() )
- return StatusWith<BSONObj>( status );
-
- status = _doesSpecConflictWithExisting( txn, fixed );
- if ( !status.isOK() )
- return StatusWith<BSONObj>( status );
-
- return StatusWith<BSONObj>( fixed );
+ if (!dbce->isOlderThan24(txn)) {
+ return Status::OK(); // these checks have already been done
}
- Status IndexCatalog::createIndexOnEmptyCollection(OperationContext* txn, BSONObj spec) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(),
- MODE_X));
- invariant(_collection->numRecords(txn) == 0);
+ // Everything below is MMAPv1 specific since it was the only storage engine that existed
+ // before 2.4. We look at all indexes in this database to make sure that none of them use
+ // plugins that didn't exist before 2.4. If that holds, we mark the database as "2.4-clean"
+ // which allows creation of indexes using new plugins.
+
+ RecordStore* indexes = dbce->getRecordStore(dbce->name() + ".system.indexes");
+ auto cursor = indexes->getCursor(txn);
+ while (auto record = cursor->next()) {
+ const BSONObj index = record->data.releaseToBson();
+ const BSONObj key = index.getObjectField("key");
+ const string plugin = IndexNames::findPluginName(key);
+ if (IndexNames::existedBefore24(plugin))
+ continue;
+
+ const string errmsg = str::stream()
+ << "Found pre-existing index " << index << " with invalid type '" << plugin << "'. "
+ << "Disallowing creation of new index type '" << newPluginName << "'. See "
+ << "http://dochub.mongodb.org/core/index-type-changes";
+
+ return Status(ErrorCodes::CannotCreateIndex, errmsg);
+ }
- _checkMagic();
- Status status = checkUnfinished();
- if ( !status.isOK() )
- return status;
+ dbce->markIndexSafe24AndUp(txn);
- StatusWith<BSONObj> statusWithSpec = prepareSpecForCreate( txn, spec );
- status = statusWithSpec.getStatus();
- if ( !status.isOK() )
- return status;
- spec = statusWithSpec.getValue();
+ return Status::OK();
+}
- string pluginName = IndexNames::findPluginName( spec["key"].Obj() );
- if ( pluginName.size() ) {
- Status s = _upgradeDatabaseMinorVersionIfNeeded( txn, pluginName );
- if ( !s.isOK() )
- return s;
- }
+StatusWith<BSONObj> IndexCatalog::prepareSpecForCreate(OperationContext* txn,
+ const BSONObj& original) const {
+ Status status = _isSpecOk(original);
+ if (!status.isOK())
+ return StatusWith<BSONObj>(status);
- // now going to touch disk
- IndexBuildBlock indexBuildBlock(txn, _collection, spec);
- status = indexBuildBlock.init();
- if ( !status.isOK() )
- return status;
+ BSONObj fixed = _fixIndexSpec(original);
- // sanity checks, etc...
- IndexCatalogEntry* entry = indexBuildBlock.getEntry();
- invariant( entry );
- IndexDescriptor* descriptor = entry->descriptor();
- invariant( descriptor );
- invariant( entry == _entries.find( descriptor ) );
+ // we double check with new index spec
+ status = _isSpecOk(fixed);
+ if (!status.isOK())
+ return StatusWith<BSONObj>(status);
- status = entry->accessMethod()->initializeAsEmpty(txn);
- if (!status.isOK())
- return status;
- indexBuildBlock.success();
+ status = _doesSpecConflictWithExisting(txn, fixed);
+ if (!status.isOK())
+ return StatusWith<BSONObj>(status);
- // sanity check
- invariant(_collection->getCatalogEntry()->isIndexReady(txn, descriptor->indexName()));
+ return StatusWith<BSONObj>(fixed);
+}
- return Status::OK();
+Status IndexCatalog::createIndexOnEmptyCollection(OperationContext* txn, BSONObj spec) {
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+ invariant(_collection->numRecords(txn) == 0);
+
+ _checkMagic();
+ Status status = checkUnfinished();
+ if (!status.isOK())
+ return status;
+
+ StatusWith<BSONObj> statusWithSpec = prepareSpecForCreate(txn, spec);
+ status = statusWithSpec.getStatus();
+ if (!status.isOK())
+ return status;
+ spec = statusWithSpec.getValue();
+
+ string pluginName = IndexNames::findPluginName(spec["key"].Obj());
+ if (pluginName.size()) {
+ Status s = _upgradeDatabaseMinorVersionIfNeeded(txn, pluginName);
+ if (!s.isOK())
+ return s;
}
- IndexCatalog::IndexBuildBlock::IndexBuildBlock(OperationContext* txn,
- Collection* collection,
- const BSONObj& spec )
- : _collection( collection ),
- _catalog( collection->getIndexCatalog() ),
- _ns( _catalog->_collection->ns().ns() ),
- _spec( spec.getOwned() ),
- _entry( NULL ),
- _txn(txn) {
-
- invariant( collection );
- }
+ // now going to touch disk
+ IndexBuildBlock indexBuildBlock(txn, _collection, spec);
+ status = indexBuildBlock.init();
+ if (!status.isOK())
+ return status;
- Status IndexCatalog::IndexBuildBlock::init() {
- // need this first for names, etc...
- BSONObj keyPattern = _spec.getObjectField("key");
- IndexDescriptor* descriptor = new IndexDescriptor( _collection,
- IndexNames::findPluginName(keyPattern),
- _spec );
- unique_ptr<IndexDescriptor> descriptorCleaner( descriptor );
+ // sanity checks, etc...
+ IndexCatalogEntry* entry = indexBuildBlock.getEntry();
+ invariant(entry);
+ IndexDescriptor* descriptor = entry->descriptor();
+ invariant(descriptor);
+ invariant(entry == _entries.find(descriptor));
- _indexName = descriptor->indexName();
- _indexNamespace = descriptor->indexNamespace();
+ status = entry->accessMethod()->initializeAsEmpty(txn);
+ if (!status.isOK())
+ return status;
+ indexBuildBlock.success();
- /// ---------- setup on disk structures ----------------
+ // sanity check
+ invariant(_collection->getCatalogEntry()->isIndexReady(txn, descriptor->indexName()));
- Status status = _collection->getCatalogEntry()->prepareForIndexBuild( _txn, descriptor );
- if ( !status.isOK() )
- return status;
+ return Status::OK();
+}
- /// ---------- setup in memory structures ----------------
- const bool initFromDisk = false;
- _entry = _catalog->_setupInMemoryStructures(_txn,
- descriptorCleaner.release(),
- initFromDisk);
+IndexCatalog::IndexBuildBlock::IndexBuildBlock(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& spec)
+ : _collection(collection),
+ _catalog(collection->getIndexCatalog()),
+ _ns(_catalog->_collection->ns().ns()),
+ _spec(spec.getOwned()),
+ _entry(NULL),
+ _txn(txn) {
+ invariant(collection);
+}
- return Status::OK();
- }
+Status IndexCatalog::IndexBuildBlock::init() {
+ // need this first for names, etc...
+ BSONObj keyPattern = _spec.getObjectField("key");
+ IndexDescriptor* descriptor =
+ new IndexDescriptor(_collection, IndexNames::findPluginName(keyPattern), _spec);
+ unique_ptr<IndexDescriptor> descriptorCleaner(descriptor);
- IndexCatalog::IndexBuildBlock::~IndexBuildBlock() {
- // Don't need to call fail() here, as rollback will clean everything up for us.
- }
+ _indexName = descriptor->indexName();
+ _indexNamespace = descriptor->indexNamespace();
- void IndexCatalog::IndexBuildBlock::fail() {
- fassert( 17204, _catalog->_collection->ok() ); // defensive
+ /// ---------- setup on disk structures ----------------
- IndexCatalogEntry* entry = _catalog->_entries.find( _indexName );
- invariant( entry == _entry );
+ Status status = _collection->getCatalogEntry()->prepareForIndexBuild(_txn, descriptor);
+ if (!status.isOK())
+ return status;
- if ( entry ) {
- _catalog->_dropIndex(_txn, entry);
- }
- else {
- _catalog->_deleteIndexFromDisk( _txn,
- _indexName,
- _indexNamespace );
- }
- }
+ /// ---------- setup in memory structures ----------------
+ const bool initFromDisk = false;
+ _entry = _catalog->_setupInMemoryStructures(_txn, descriptorCleaner.release(), initFromDisk);
- void IndexCatalog::IndexBuildBlock::success() {
- fassert( 17207, _catalog->_collection->ok() );
+ return Status::OK();
+}
- _catalog->_collection->getCatalogEntry()->indexBuildSuccess( _txn, _indexName );
+IndexCatalog::IndexBuildBlock::~IndexBuildBlock() {
+ // Don't need to call fail() here, as rollback will clean everything up for us.
+}
- IndexDescriptor* desc = _catalog->findIndexByName( _txn, _indexName, true );
- fassert( 17330, desc );
- IndexCatalogEntry* entry = _catalog->_entries.find( desc );
- fassert( 17331, entry && entry == _entry );
+void IndexCatalog::IndexBuildBlock::fail() {
+ fassert(17204, _catalog->_collection->ok()); // defensive
- entry->setIsReady( true );
+ IndexCatalogEntry* entry = _catalog->_entries.find(_indexName);
+ invariant(entry == _entry);
- _catalog->_collection->infoCache()->addedIndex( _txn );
+ if (entry) {
+ _catalog->_dropIndex(_txn, entry);
+ } else {
+ _catalog->_deleteIndexFromDisk(_txn, _indexName, _indexNamespace);
}
+}
- namespace {
- // While technically recursive, only current possible with 2 levels.
- Status _checkValidFilterExpressions(MatchExpression* expression, int level = 0) {
- if (!expression)
- return Status::OK();
-
- switch(expression->matchType()) {
- case MatchExpression::AND:
- if (level > 0)
- return Status(ErrorCodes::CannotCreateIndex,
- "$and only supported in partialFilterExpression at top level");
- for (size_t i = 0; i < expression->numChildren(); i++) {
- Status status = _checkValidFilterExpressions(expression->getChild(i),
- level + 1 );
- if (!status.isOK())
- return status;
- }
- return Status::OK();
- case MatchExpression::EQ:
- case MatchExpression::LT:
- case MatchExpression::LTE:
- case MatchExpression::GT:
- case MatchExpression::GTE:
- case MatchExpression::EXISTS:
- case MatchExpression::TYPE_OPERATOR:
- return Status::OK();
- default:
- return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "unsupported expression in partial index: "
- << expression->toString());
- }
- }
- }
+void IndexCatalog::IndexBuildBlock::success() {
+ fassert(17207, _catalog->_collection->ok());
- Status IndexCatalog::_isSpecOk( const BSONObj& spec ) const {
+ _catalog->_collection->getCatalogEntry()->indexBuildSuccess(_txn, _indexName);
- const NamespaceString& nss = _collection->ns();
+ IndexDescriptor* desc = _catalog->findIndexByName(_txn, _indexName, true);
+ fassert(17330, desc);
+ IndexCatalogEntry* entry = _catalog->_entries.find(desc);
+ fassert(17331, entry && entry == _entry);
- BSONElement vElt = spec["v"];
- if( !vElt.eoo() ) {
- if ( !vElt.isNumber() ) {
- return Status( ErrorCodes::CannotCreateIndex,
- str::stream() << "non-numeric value for \"v\" field:" << vElt );
- }
- double v = vElt.Number();
+ entry->setIsReady(true);
- // SERVER-16893 Forbid use of v0 indexes with non-mmapv1 engines
- if (v == 0 && !getGlobalServiceContext()->getGlobalStorageEngine()->isMmapV1()) {
- return Status( ErrorCodes::CannotCreateIndex,
- str::stream() << "use of v0 indexes is only allowed with the "
- << "mmapv1 storage engine");
- }
+ _catalog->_collection->infoCache()->addedIndex(_txn);
+}
- // note (one day) we may be able to fresh build less versions than we can use
- // isASupportedIndexVersionNumber() is what we can use
- if ( v != 0 && v != 1 ) {
- return Status( ErrorCodes::CannotCreateIndex,
- str::stream() << "this version of mongod cannot build new indexes "
- << "of version number " << v );
- }
- }
+namespace {
+// While technically recursive, only current possible with 2 levels.
+Status _checkValidFilterExpressions(MatchExpression* expression, int level = 0) {
+ if (!expression)
+ return Status::OK();
- if ( nss.isSystemDotIndexes() )
- return Status( ErrorCodes::CannotCreateIndex,
- "cannot create indexes on the system.indexes collection" );
+ switch (expression->matchType()) {
+ case MatchExpression::AND:
+ if (level > 0)
+ return Status(ErrorCodes::CannotCreateIndex,
+ "$and only supported in partialFilterExpression at top level");
+ for (size_t i = 0; i < expression->numChildren(); i++) {
+ Status status = _checkValidFilterExpressions(expression->getChild(i), level + 1);
+ if (!status.isOK())
+ return status;
+ }
+ return Status::OK();
+ case MatchExpression::EQ:
+ case MatchExpression::LT:
+ case MatchExpression::LTE:
+ case MatchExpression::GT:
+ case MatchExpression::GTE:
+ case MatchExpression::EXISTS:
+ case MatchExpression::TYPE_OPERATOR:
+ return Status::OK();
+ default:
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "unsupported expression in partial index: "
+ << expression->toString());
+ }
+}
+}
- if ( nss.isOplog() )
- return Status( ErrorCodes::CannotCreateIndex,
- "cannot create indexes on the oplog" );
+Status IndexCatalog::_isSpecOk(const BSONObj& spec) const {
+ const NamespaceString& nss = _collection->ns();
- if ( nss.coll() == "$freelist" ) {
- // this isn't really proper, but we never want it and its not an error per se
- return Status( ErrorCodes::IndexAlreadyExists, "cannot index freelist" );
+ BSONElement vElt = spec["v"];
+ if (!vElt.eoo()) {
+ if (!vElt.isNumber()) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "non-numeric value for \"v\" field:" << vElt);
}
+ double v = vElt.Number();
- const BSONElement specNamespace = spec["ns"];
- if ( specNamespace.type() != String )
- return Status( ErrorCodes::CannotCreateIndex,
- "the index spec needs a 'ns' string field" );
-
- if ( nss.ns() != specNamespace.valueStringData())
- return Status( ErrorCodes::CannotCreateIndex,
- "the index spec ns does not match" );
+ // SERVER-16893 Forbid use of v0 indexes with non-mmapv1 engines
+ if (v == 0 && !getGlobalServiceContext()->getGlobalStorageEngine()->isMmapV1()) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "use of v0 indexes is only allowed with the "
+ << "mmapv1 storage engine");
+ }
- // logical name of the index
- const BSONElement nameElem = spec["name"];
- if (nameElem.type() != String)
+ // note (one day) we may be able to fresh build less versions than we can use
+ // isASupportedIndexVersionNumber() is what we can use
+ if (v != 0 && v != 1) {
return Status(ErrorCodes::CannotCreateIndex,
- "index name must be specified as a string");
-
- const StringData name = nameElem.valueStringData();
- if (name.find('\0') != std::string::npos)
- return Status(ErrorCodes::CannotCreateIndex, "index names cannot contain NUL bytes");
-
- if (name.empty())
- return Status(ErrorCodes::CannotCreateIndex, "index names cannot be empty");
-
- const std::string indexNamespace = IndexDescriptor::makeIndexNamespace( nss.ns(), name );
- if ( indexNamespace.length() > NamespaceString::MaxNsLen )
- return Status( ErrorCodes::CannotCreateIndex,
- str::stream() << "namespace name generated from index name \"" <<
- indexNamespace << "\" is too long (127 byte max)" );
-
- const BSONObj key = spec.getObjectField("key");
- const Status keyStatus = validateKeyPattern(key);
- if (!keyStatus.isOK()) {
- return Status( ErrorCodes::CannotCreateIndex,
- str::stream() << "bad index key pattern " << key << ": "
- << keyStatus.reason() );
+ str::stream() << "this version of mongod cannot build new indexes "
+ << "of version number " << v);
}
+ }
- const bool isSparse = spec["sparse"].trueValue();
+ if (nss.isSystemDotIndexes())
+ return Status(ErrorCodes::CannotCreateIndex,
+ "cannot create indexes on the system.indexes collection");
- // Ensure if there is a filter, its valid.
- BSONElement filterElement = spec.getField("partialFilterExpression");
- if ( filterElement ) {
- if ( isSparse ) {
- return Status( ErrorCodes::CannotCreateIndex,
- "cannot mix \"partialFilterExpression\" and \"sparse\" options" );
- }
+ if (nss.isOplog())
+ return Status(ErrorCodes::CannotCreateIndex, "cannot create indexes on the oplog");
- if ( filterElement.type() != Object ) {
- return Status(ErrorCodes::CannotCreateIndex,
- "'partialFilterExpression' for an index has to be a document");
- }
- StatusWithMatchExpression res = MatchExpressionParser::parse( filterElement.Obj() );
- if ( !res.isOK() ) {
- return res.getStatus();
- }
- const std::unique_ptr<MatchExpression> filterExpr( res.getValue() );
+ if (nss.coll() == "$freelist") {
+ // this isn't really proper, but we never want it and its not an error per se
+ return Status(ErrorCodes::IndexAlreadyExists, "cannot index freelist");
+ }
- Status status = _checkValidFilterExpressions( filterExpr.get() );
- if (!status.isOK()) {
- return status;
- }
- }
+ const BSONElement specNamespace = spec["ns"];
+ if (specNamespace.type() != String)
+ return Status(ErrorCodes::CannotCreateIndex, "the index spec needs a 'ns' string field");
+
+ if (nss.ns() != specNamespace.valueStringData())
+ return Status(ErrorCodes::CannotCreateIndex, "the index spec ns does not match");
+
+ // logical name of the index
+ const BSONElement nameElem = spec["name"];
+ if (nameElem.type() != String)
+ return Status(ErrorCodes::CannotCreateIndex, "index name must be specified as a string");
+
+ const StringData name = nameElem.valueStringData();
+ if (name.find('\0') != std::string::npos)
+ return Status(ErrorCodes::CannotCreateIndex, "index names cannot contain NUL bytes");
+
+ if (name.empty())
+ return Status(ErrorCodes::CannotCreateIndex, "index names cannot be empty");
+
+ const std::string indexNamespace = IndexDescriptor::makeIndexNamespace(nss.ns(), name);
+ if (indexNamespace.length() > NamespaceString::MaxNsLen)
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "namespace name generated from index name \""
+ << indexNamespace << "\" is too long (127 byte max)");
+
+ const BSONObj key = spec.getObjectField("key");
+ const Status keyStatus = validateKeyPattern(key);
+ if (!keyStatus.isOK()) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "bad index key pattern " << key << ": "
+ << keyStatus.reason());
+ }
- if ( IndexDescriptor::isIdIndexPattern( key ) ) {
- BSONElement uniqueElt = spec["unique"];
- if ( uniqueElt && !uniqueElt.trueValue() ) {
- return Status( ErrorCodes::CannotCreateIndex, "_id index cannot be non-unique" );
- }
+ const bool isSparse = spec["sparse"].trueValue();
- if ( filterElement ) {
- return Status( ErrorCodes::CannotCreateIndex, "_id index cannot be partial" );
- }
+ // Ensure if there is a filter, its valid.
+ BSONElement filterElement = spec.getField("partialFilterExpression");
+ if (filterElement) {
+ if (isSparse) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ "cannot mix \"partialFilterExpression\" and \"sparse\" options");
+ }
- if ( isSparse ) {
- return Status( ErrorCodes::CannotCreateIndex, "_id index cannot be sparse" );
- }
+ if (filterElement.type() != Object) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ "'partialFilterExpression' for an index has to be a document");
}
- else {
- // for non _id indexes, we check to see if replication has turned off all indexes
- // we _always_ created _id index
- if (!repl::getGlobalReplicationCoordinator()->buildsIndexes()) {
- // this is not exactly the right error code, but I think will make the most sense
- return Status( ErrorCodes::IndexAlreadyExists, "no indexes per repl" );
- }
+ StatusWithMatchExpression res = MatchExpressionParser::parse(filterElement.Obj());
+ if (!res.isOK()) {
+ return res.getStatus();
}
+ const std::unique_ptr<MatchExpression> filterExpr(res.getValue());
- // --- only storage engine checks allowed below this ----
+ Status status = _checkValidFilterExpressions(filterExpr.get());
+ if (!status.isOK()) {
+ return status;
+ }
+ }
- BSONElement storageEngineElement = spec.getField("storageEngine");
- if (storageEngineElement.eoo()) {
- return Status::OK();
+ if (IndexDescriptor::isIdIndexPattern(key)) {
+ BSONElement uniqueElt = spec["unique"];
+ if (uniqueElt && !uniqueElt.trueValue()) {
+ return Status(ErrorCodes::CannotCreateIndex, "_id index cannot be non-unique");
}
- if (storageEngineElement.type() != mongo::Object) {
- return Status(ErrorCodes::CannotCreateIndex, "'storageEngine' has to be a document.");
+
+ if (filterElement) {
+ return Status(ErrorCodes::CannotCreateIndex, "_id index cannot be partial");
}
- BSONObj storageEngineOptions = storageEngineElement.Obj();
- if (storageEngineOptions.isEmpty()) {
- return Status(ErrorCodes::CannotCreateIndex,
- "Empty 'storageEngine' options are invalid. "
- "Please remove, or include valid options.");
+ if (isSparse) {
+ return Status(ErrorCodes::CannotCreateIndex, "_id index cannot be sparse");
}
- Status storageEngineStatus = validateStorageOptions(storageEngineOptions,
- &StorageEngine::Factory::validateIndexStorageOptions);
- if (!storageEngineStatus.isOK()) {
- return storageEngineStatus;
+ } else {
+ // for non _id indexes, we check to see if replication has turned off all indexes
+ // we _always_ created _id index
+ if (!repl::getGlobalReplicationCoordinator()->buildsIndexes()) {
+ // this is not exactly the right error code, but I think will make the most sense
+ return Status(ErrorCodes::IndexAlreadyExists, "no indexes per repl");
}
+ }
+
+ // --- only storage engine checks allowed below this ----
+ BSONElement storageEngineElement = spec.getField("storageEngine");
+ if (storageEngineElement.eoo()) {
return Status::OK();
}
+ if (storageEngineElement.type() != mongo::Object) {
+ return Status(ErrorCodes::CannotCreateIndex, "'storageEngine' has to be a document.");
+ }
+ BSONObj storageEngineOptions = storageEngineElement.Obj();
+ if (storageEngineOptions.isEmpty()) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ "Empty 'storageEngine' options are invalid. "
+ "Please remove, or include valid options.");
+ }
+ Status storageEngineStatus = validateStorageOptions(
+ storageEngineOptions, &StorageEngine::Factory::validateIndexStorageOptions);
+ if (!storageEngineStatus.isOK()) {
+ return storageEngineStatus;
+ }
- Status IndexCatalog::_doesSpecConflictWithExisting( OperationContext* txn,
- const BSONObj& spec ) const {
- const char *name = spec.getStringField("name");
- invariant( name[0] );
-
- const BSONObj key = spec.getObjectField("key");
-
- {
- // Check both existing and in-progress indexes (2nd param = true)
- const IndexDescriptor* desc = findIndexByName( txn, name, true );
- if ( desc ) {
- // index already exists with same name
-
- if ( !desc->keyPattern().equal( key ) )
- return Status( ErrorCodes::IndexKeySpecsConflict,
- str::stream() << "Trying to create an index "
- << "with same name " << name
- << " with different key spec " << key
- << " vs existing spec " << desc->keyPattern() );
-
- IndexDescriptor temp( _collection,
- _getAccessMethodName( txn, key ),
- spec );
- if ( !desc->areIndexOptionsEquivalent( &temp ) )
- return Status( ErrorCodes::IndexOptionsConflict,
- str::stream() << "Index with name: " << name
- << " already exists with different options" );
-
- // Index already exists with the same options, so no need to build a new
- // one (not an error). Most likely requested by a client using ensureIndex.
- return Status( ErrorCodes::IndexAlreadyExists, str::stream() <<
- "index already exists: " << name );
- }
- }
+ return Status::OK();
+}
- {
- // Check both existing and in-progress indexes (2nd param = true)
- const IndexDescriptor* desc = findIndexByKeyPattern(txn, key, true);
- if (desc) {
- LOG(2) << "index already exists with diff name " << name
- << ' ' << key << endl;
-
- IndexDescriptor temp( _collection,
- _getAccessMethodName( txn, key ),
- spec );
- if ( !desc->areIndexOptionsEquivalent( &temp ) )
- return Status( ErrorCodes::IndexOptionsConflict,
- str::stream() << "Index with pattern: " << key
- << " already exists with different options" );
-
- return Status( ErrorCodes::IndexAlreadyExists, str::stream() <<
- "index already exists: " << name );
- }
+Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
+ const BSONObj& spec) const {
+ const char* name = spec.getStringField("name");
+ invariant(name[0]);
+
+ const BSONObj key = spec.getObjectField("key");
+
+ {
+ // Check both existing and in-progress indexes (2nd param = true)
+ const IndexDescriptor* desc = findIndexByName(txn, name, true);
+ if (desc) {
+ // index already exists with same name
+
+ if (!desc->keyPattern().equal(key))
+ return Status(ErrorCodes::IndexKeySpecsConflict,
+ str::stream() << "Trying to create an index "
+ << "with same name " << name
+ << " with different key spec " << key
+ << " vs existing spec " << desc->keyPattern());
+
+ IndexDescriptor temp(_collection, _getAccessMethodName(txn, key), spec);
+ if (!desc->areIndexOptionsEquivalent(&temp))
+ return Status(ErrorCodes::IndexOptionsConflict,
+ str::stream() << "Index with name: " << name
+ << " already exists with different options");
+
+ // Index already exists with the same options, so no need to build a new
+ // one (not an error). Most likely requested by a client using ensureIndex.
+ return Status(ErrorCodes::IndexAlreadyExists,
+ str::stream() << "index already exists: " << name);
}
+ }
- if ( numIndexesTotal(txn) >= _maxNumIndexesAllowed ) {
- string s = str::stream() << "add index fails, too many indexes for "
- << _collection->ns().ns() << " key:" << key.toString();
- log() << s;
- return Status( ErrorCodes::CannotCreateIndex, s );
- }
+ {
+ // Check both existing and in-progress indexes (2nd param = true)
+ const IndexDescriptor* desc = findIndexByKeyPattern(txn, key, true);
+ if (desc) {
+ LOG(2) << "index already exists with diff name " << name << ' ' << key << endl;
- // Refuse to build text index if another text index exists or is in progress.
- // Collections should only have one text index.
- string pluginName = IndexNames::findPluginName( key );
- if ( pluginName == IndexNames::TEXT ) {
- vector<IndexDescriptor*> textIndexes;
- const bool includeUnfinishedIndexes = true;
- findIndexByType( txn, IndexNames::TEXT, textIndexes, includeUnfinishedIndexes );
- if ( textIndexes.size() > 0 ) {
- return Status( ErrorCodes::CannotCreateIndex,
- str::stream() << "only one text index per collection allowed, "
- << "found existing text index \"" << textIndexes[0]->indexName()
- << "\"" );
- }
+ IndexDescriptor temp(_collection, _getAccessMethodName(txn, key), spec);
+ if (!desc->areIndexOptionsEquivalent(&temp))
+ return Status(ErrorCodes::IndexOptionsConflict,
+ str::stream() << "Index with pattern: " << key
+ << " already exists with different options");
+
+ return Status(ErrorCodes::IndexAlreadyExists,
+ str::stream() << "index already exists: " << name);
}
- return Status::OK();
}
- BSONObj IndexCatalog::getDefaultIdIndexSpec() const {
- dassert( _idObj["_id"].type() == NumberInt );
-
- BSONObjBuilder b;
- b.append( "name", "_id_" );
- b.append( "ns", _collection->ns().ns() );
- b.append( "key", _idObj );
- return b.obj();
+ if (numIndexesTotal(txn) >= _maxNumIndexesAllowed) {
+ string s = str::stream() << "add index fails, too many indexes for "
+ << _collection->ns().ns() << " key:" << key.toString();
+ log() << s;
+ return Status(ErrorCodes::CannotCreateIndex, s);
}
- Status IndexCatalog::dropAllIndexes(OperationContext* txn,
- bool includingIdIndex) {
-
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(),
- MODE_X));
-
- BackgroundOperation::assertNoBgOpInProgForNs( _collection->ns().ns() );
-
- // there may be pointers pointing at keys in the btree(s). kill them.
- // TODO: can this can only clear cursors on this index?
- _collection->getCursorManager()->invalidateAll(false, "all indexes on collection dropped");
-
- // make sure nothing in progress
- massert( 17348,
- "cannot dropAllIndexes when index builds in progress",
- numIndexesTotal(txn) == numIndexesReady(txn) );
-
- bool haveIdIndex = false;
-
- vector<string> indexNamesToDrop;
- {
- int seen = 0;
- IndexIterator ii = getIndexIterator( txn, true );
- while ( ii.more() ) {
- seen++;
- IndexDescriptor* desc = ii.next();
- if ( desc->isIdIndex() && includingIdIndex == false ) {
- haveIdIndex = true;
- continue;
- }
- indexNamesToDrop.push_back( desc->indexName() );
- }
- invariant( seen == numIndexesTotal(txn) );
+ // Refuse to build text index if another text index exists or is in progress.
+ // Collections should only have one text index.
+ string pluginName = IndexNames::findPluginName(key);
+ if (pluginName == IndexNames::TEXT) {
+ vector<IndexDescriptor*> textIndexes;
+ const bool includeUnfinishedIndexes = true;
+ findIndexByType(txn, IndexNames::TEXT, textIndexes, includeUnfinishedIndexes);
+ if (textIndexes.size() > 0) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "only one text index per collection allowed, "
+ << "found existing text index \""
+ << textIndexes[0]->indexName() << "\"");
}
+ }
+ return Status::OK();
+}
- for ( size_t i = 0; i < indexNamesToDrop.size(); i++ ) {
- string indexName = indexNamesToDrop[i];
- IndexDescriptor* desc = findIndexByName( txn, indexName, true );
- invariant( desc );
- LOG(1) << "\t dropAllIndexes dropping: " << desc->toString();
- IndexCatalogEntry* entry = _entries.find( desc );
- invariant( entry );
- _dropIndex(txn, entry);
- }
+BSONObj IndexCatalog::getDefaultIdIndexSpec() const {
+ dassert(_idObj["_id"].type() == NumberInt);
- // verify state is sane post cleaning
+ BSONObjBuilder b;
+ b.append("name", "_id_");
+ b.append("ns", _collection->ns().ns());
+ b.append("key", _idObj);
+ return b.obj();
+}
- long long numIndexesInCollectionCatalogEntry =
- _collection->getCatalogEntry()->getTotalIndexCount( txn );
+Status IndexCatalog::dropAllIndexes(OperationContext* txn, bool includingIdIndex) {
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
- if ( haveIdIndex ) {
- fassert( 17324, numIndexesTotal(txn) == 1 );
- fassert( 17325, numIndexesReady(txn) == 1 );
- fassert( 17326, numIndexesInCollectionCatalogEntry == 1 );
- fassert( 17336, _entries.size() == 1 );
- }
- else {
- if ( numIndexesTotal(txn) || numIndexesInCollectionCatalogEntry || _entries.size() ) {
- error() << "About to fassert - "
- << " numIndexesTotal(): " << numIndexesTotal(txn)
- << " numSystemIndexesEntries: " << numIndexesInCollectionCatalogEntry
- << " _entries.size(): " << _entries.size()
- << " indexNamesToDrop: " << indexNamesToDrop.size()
- << " haveIdIndex: " << haveIdIndex;
+ BackgroundOperation::assertNoBgOpInProgForNs(_collection->ns().ns());
+
+ // there may be pointers pointing at keys in the btree(s). kill them.
+ // TODO: can this can only clear cursors on this index?
+ _collection->getCursorManager()->invalidateAll(false, "all indexes on collection dropped");
+
+ // make sure nothing in progress
+ massert(17348,
+ "cannot dropAllIndexes when index builds in progress",
+ numIndexesTotal(txn) == numIndexesReady(txn));
+
+ bool haveIdIndex = false;
+
+ vector<string> indexNamesToDrop;
+ {
+ int seen = 0;
+ IndexIterator ii = getIndexIterator(txn, true);
+ while (ii.more()) {
+ seen++;
+ IndexDescriptor* desc = ii.next();
+ if (desc->isIdIndex() && includingIdIndex == false) {
+ haveIdIndex = true;
+ continue;
}
- fassert( 17327, numIndexesTotal(txn) == 0 );
- fassert( 17328, numIndexesInCollectionCatalogEntry == 0 );
- fassert( 17337, _entries.size() == 0 );
+ indexNamesToDrop.push_back(desc->indexName());
}
-
- return Status::OK();
+ invariant(seen == numIndexesTotal(txn));
}
- Status IndexCatalog::dropIndex(OperationContext* txn,
- IndexDescriptor* desc ) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(),
- MODE_X));
- IndexCatalogEntry* entry = _entries.find( desc );
+ for (size_t i = 0; i < indexNamesToDrop.size(); i++) {
+ string indexName = indexNamesToDrop[i];
+ IndexDescriptor* desc = findIndexByName(txn, indexName, true);
+ invariant(desc);
+ LOG(1) << "\t dropAllIndexes dropping: " << desc->toString();
+ IndexCatalogEntry* entry = _entries.find(desc);
+ invariant(entry);
+ _dropIndex(txn, entry);
+ }
- if ( !entry )
- return Status( ErrorCodes::InternalError, "cannot find index to delete" );
+ // verify state is sane post cleaning
+
+ long long numIndexesInCollectionCatalogEntry =
+ _collection->getCatalogEntry()->getTotalIndexCount(txn);
+
+ if (haveIdIndex) {
+ fassert(17324, numIndexesTotal(txn) == 1);
+ fassert(17325, numIndexesReady(txn) == 1);
+ fassert(17326, numIndexesInCollectionCatalogEntry == 1);
+ fassert(17336, _entries.size() == 1);
+ } else {
+ if (numIndexesTotal(txn) || numIndexesInCollectionCatalogEntry || _entries.size()) {
+ error() << "About to fassert - "
+ << " numIndexesTotal(): " << numIndexesTotal(txn)
+ << " numSystemIndexesEntries: " << numIndexesInCollectionCatalogEntry
+ << " _entries.size(): " << _entries.size()
+ << " indexNamesToDrop: " << indexNamesToDrop.size()
+ << " haveIdIndex: " << haveIdIndex;
+ }
+ fassert(17327, numIndexesTotal(txn) == 0);
+ fassert(17328, numIndexesInCollectionCatalogEntry == 0);
+ fassert(17337, _entries.size() == 0);
+ }
- if ( !entry->isReady( txn ) )
- return Status( ErrorCodes::InternalError, "cannot delete not ready index" );
+ return Status::OK();
+}
- BackgroundOperation::assertNoBgOpInProgForNs( _collection->ns().ns() );
+Status IndexCatalog::dropIndex(OperationContext* txn, IndexDescriptor* desc) {
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+ IndexCatalogEntry* entry = _entries.find(desc);
- return _dropIndex(txn, entry);
- }
+ if (!entry)
+ return Status(ErrorCodes::InternalError, "cannot find index to delete");
-namespace {
- class IndexRemoveChange : public RecoveryUnit::Change {
- public:
- IndexRemoveChange(OperationContext* txn,
- Collection* collection,
- IndexCatalogEntryContainer* entries,
- IndexCatalogEntry* entry)
- : _txn(txn),
- _collection(collection),
- _entries(entries),
- _entry(entry) {
- }
+ if (!entry->isReady(txn))
+ return Status(ErrorCodes::InternalError, "cannot delete not ready index");
- virtual void commit() {
- delete _entry;
- }
+ BackgroundOperation::assertNoBgOpInProgForNs(_collection->ns().ns());
- virtual void rollback() {
- _entries->add(_entry);
- _collection->infoCache()->reset(_txn);
- }
+ return _dropIndex(txn, entry);
+}
- private:
- OperationContext* _txn;
- Collection* _collection;
- IndexCatalogEntryContainer* _entries;
- IndexCatalogEntry* _entry;
- };
-} // namespace
-
- Status IndexCatalog::_dropIndex(OperationContext* txn,
- IndexCatalogEntry* entry ) {
- /**
- * IndexState in order
- * <db>.system.indexes
- * NamespaceDetails
- * <db>.system.ns
- */
-
- // ----- SANITY CHECKS -------------
- if ( !entry )
- return Status( ErrorCodes::BadValue, "IndexCatalog::_dropIndex passed NULL" );
+namespace {
+class IndexRemoveChange : public RecoveryUnit::Change {
+public:
+ IndexRemoveChange(OperationContext* txn,
+ Collection* collection,
+ IndexCatalogEntryContainer* entries,
+ IndexCatalogEntry* entry)
+ : _txn(txn), _collection(collection), _entries(entries), _entry(entry) {}
+
+ virtual void commit() {
+ delete _entry;
+ }
- _checkMagic();
- Status status = checkUnfinished();
- if ( !status.isOK() )
- return status;
+ virtual void rollback() {
+ _entries->add(_entry);
+ _collection->infoCache()->reset(_txn);
+ }
- // Pulling indexName/indexNamespace out as they are needed post descriptor release.
- string indexName = entry->descriptor()->indexName();
- string indexNamespace = entry->descriptor()->indexNamespace();
+private:
+ OperationContext* _txn;
+ Collection* _collection;
+ IndexCatalogEntryContainer* _entries;
+ IndexCatalogEntry* _entry;
+};
+} // namespace
- // there may be pointers pointing at keys in the btree(s). kill them.
- // TODO: can this can only clear cursors on this index?
- _collection->getCursorManager()->invalidateAll(false, str::stream() << "index '"
- << indexName
- << "' dropped");
+Status IndexCatalog::_dropIndex(OperationContext* txn, IndexCatalogEntry* entry) {
+ /**
+ * IndexState in order
+ * <db>.system.indexes
+ * NamespaceDetails
+ * <db>.system.ns
+ */
- // --------- START REAL WORK ----------
+ // ----- SANITY CHECKS -------------
+ if (!entry)
+ return Status(ErrorCodes::BadValue, "IndexCatalog::_dropIndex passed NULL");
- audit::logDropIndex( &cc(), indexName, _collection->ns().ns() );
+ _checkMagic();
+ Status status = checkUnfinished();
+ if (!status.isOK())
+ return status;
- invariant(_entries.release(entry->descriptor()) == entry);
- txn->recoveryUnit()->registerChange(new IndexRemoveChange(txn, _collection,
- &_entries, entry));
- entry = NULL;
+ // Pulling indexName/indexNamespace out as they are needed post descriptor release.
+ string indexName = entry->descriptor()->indexName();
+ string indexNamespace = entry->descriptor()->indexNamespace();
- _deleteIndexFromDisk(txn, indexName, indexNamespace);
+ // there may be pointers pointing at keys in the btree(s). kill them.
+ // TODO: can this can only clear cursors on this index?
+ _collection->getCursorManager()->invalidateAll(
+ false, str::stream() << "index '" << indexName << "' dropped");
- _checkMagic();
+ // --------- START REAL WORK ----------
- // Now that we've dropped the index, ask the info cache to rebuild its cached view of
- // collection state.
- _collection->infoCache()->reset(txn);
+ audit::logDropIndex(&cc(), indexName, _collection->ns().ns());
- return Status::OK();
- }
+ invariant(_entries.release(entry->descriptor()) == entry);
+ txn->recoveryUnit()->registerChange(new IndexRemoveChange(txn, _collection, &_entries, entry));
+ entry = NULL;
- void IndexCatalog::_deleteIndexFromDisk( OperationContext* txn,
- const string& indexName,
- const string& indexNamespace ) {
- Status status = _collection->getCatalogEntry()->removeIndex( txn, indexName );
- if ( status.code() == ErrorCodes::NamespaceNotFound ) {
- // this is ok, as we may be partially through index creation
- }
- else if ( !status.isOK() ) {
- warning() << "couldn't drop index " << indexName
- << " on collection: " << _collection->ns()
- << " because of " << status.toString();
- }
- }
+ _deleteIndexFromDisk(txn, indexName, indexNamespace);
- vector<BSONObj> IndexCatalog::getAndClearUnfinishedIndexes(OperationContext* txn) {
- vector<BSONObj> toReturn = _unfinishedIndexes;
- _unfinishedIndexes.clear();
- for ( size_t i = 0; i < toReturn.size(); i++ ) {
- BSONObj spec = toReturn[i];
+ _checkMagic();
- BSONObj keyPattern = spec.getObjectField("key");
- IndexDescriptor desc( _collection, _getAccessMethodName(txn, keyPattern), spec );
+ // Now that we've dropped the index, ask the info cache to rebuild its cached view of
+ // collection state.
+ _collection->infoCache()->reset(txn);
- _deleteIndexFromDisk( txn,
- desc.indexName(),
- desc.indexNamespace() );
- }
- return toReturn;
- }
+ return Status::OK();
+}
- bool IndexCatalog::isMultikey( OperationContext* txn, const IndexDescriptor* idx ) {
- IndexCatalogEntry* entry = _entries.find( idx );
- invariant( entry );
- return entry->isMultikey();
+void IndexCatalog::_deleteIndexFromDisk(OperationContext* txn,
+ const string& indexName,
+ const string& indexNamespace) {
+ Status status = _collection->getCatalogEntry()->removeIndex(txn, indexName);
+ if (status.code() == ErrorCodes::NamespaceNotFound) {
+ // this is ok, as we may be partially through index creation
+ } else if (!status.isOK()) {
+ warning() << "couldn't drop index " << indexName << " on collection: " << _collection->ns()
+ << " because of " << status.toString();
}
+}
+vector<BSONObj> IndexCatalog::getAndClearUnfinishedIndexes(OperationContext* txn) {
+ vector<BSONObj> toReturn = _unfinishedIndexes;
+ _unfinishedIndexes.clear();
+ for (size_t i = 0; i < toReturn.size(); i++) {
+ BSONObj spec = toReturn[i];
- // ---------------------------
+ BSONObj keyPattern = spec.getObjectField("key");
+ IndexDescriptor desc(_collection, _getAccessMethodName(txn, keyPattern), spec);
- bool IndexCatalog::haveAnyIndexes() const {
- return _entries.size() != 0;
+ _deleteIndexFromDisk(txn, desc.indexName(), desc.indexNamespace());
}
+ return toReturn;
+}
- int IndexCatalog::numIndexesTotal( OperationContext* txn ) const {
- int count = _entries.size() + _unfinishedIndexes.size();
- dassert(_collection->getCatalogEntry()->getTotalIndexCount(txn) == count);
- return count;
- }
+bool IndexCatalog::isMultikey(OperationContext* txn, const IndexDescriptor* idx) {
+ IndexCatalogEntry* entry = _entries.find(idx);
+ invariant(entry);
+ return entry->isMultikey();
+}
- int IndexCatalog::numIndexesReady( OperationContext* txn ) const {
- int count = 0;
- IndexIterator ii = getIndexIterator(txn, /*includeUnfinished*/false);
- while (ii.more()) {
- ii.next();
- count++;
- }
- dassert(_collection->getCatalogEntry()->getCompletedIndexCount(txn) == count);
- return count;
- }
- bool IndexCatalog::haveIdIndex( OperationContext* txn ) const {
- return findIdIndex( txn ) != NULL;
- }
+// ---------------------------
- IndexCatalog::IndexIterator::IndexIterator( OperationContext* txn,
- const IndexCatalog* cat,
- bool includeUnfinishedIndexes )
- : _includeUnfinishedIndexes( includeUnfinishedIndexes ),
- _txn( txn ),
- _catalog( cat ),
- _iterator( cat->_entries.begin() ),
- _start( true ),
- _prev( NULL ),
- _next( NULL ) {
- }
+bool IndexCatalog::haveAnyIndexes() const {
+ return _entries.size() != 0;
+}
- bool IndexCatalog::IndexIterator::more() {
- if ( _start ) {
- _advance();
- _start = false;
- }
- return _next != NULL;
- }
+int IndexCatalog::numIndexesTotal(OperationContext* txn) const {
+ int count = _entries.size() + _unfinishedIndexes.size();
+ dassert(_collection->getCatalogEntry()->getTotalIndexCount(txn) == count);
+ return count;
+}
- IndexDescriptor* IndexCatalog::IndexIterator::next() {
- if ( !more() )
- return NULL;
- _prev = _next;
- _advance();
- return _prev->descriptor();
+int IndexCatalog::numIndexesReady(OperationContext* txn) const {
+ int count = 0;
+ IndexIterator ii = getIndexIterator(txn, /*includeUnfinished*/ false);
+ while (ii.more()) {
+ ii.next();
+ count++;
}
+ dassert(_collection->getCatalogEntry()->getCompletedIndexCount(txn) == count);
+ return count;
+}
- IndexAccessMethod* IndexCatalog::IndexIterator::accessMethod( const IndexDescriptor* desc ) {
- invariant( desc == _prev->descriptor() );
- return _prev->accessMethod();
- }
+bool IndexCatalog::haveIdIndex(OperationContext* txn) const {
+ return findIdIndex(txn) != NULL;
+}
- IndexCatalogEntry* IndexCatalog::IndexIterator::catalogEntry( const IndexDescriptor* desc ) {
- invariant( desc == _prev->descriptor() );
- return _prev;
+IndexCatalog::IndexIterator::IndexIterator(OperationContext* txn,
+ const IndexCatalog* cat,
+ bool includeUnfinishedIndexes)
+ : _includeUnfinishedIndexes(includeUnfinishedIndexes),
+ _txn(txn),
+ _catalog(cat),
+ _iterator(cat->_entries.begin()),
+ _start(true),
+ _prev(NULL),
+ _next(NULL) {}
+
+bool IndexCatalog::IndexIterator::more() {
+ if (_start) {
+ _advance();
+ _start = false;
}
+ return _next != NULL;
+}
- void IndexCatalog::IndexIterator::_advance() {
- _next = NULL;
+IndexDescriptor* IndexCatalog::IndexIterator::next() {
+ if (!more())
+ return NULL;
+ _prev = _next;
+ _advance();
+ return _prev->descriptor();
+}
- while ( _iterator != _catalog->_entries.end() ) {
- IndexCatalogEntry* entry = *_iterator;
- ++_iterator;
+IndexAccessMethod* IndexCatalog::IndexIterator::accessMethod(const IndexDescriptor* desc) {
+ invariant(desc == _prev->descriptor());
+ return _prev->accessMethod();
+}
- if ( _includeUnfinishedIndexes ||
- entry->isReady(_txn) ) {
- _next = entry;
- return;
- }
- }
+IndexCatalogEntry* IndexCatalog::IndexIterator::catalogEntry(const IndexDescriptor* desc) {
+ invariant(desc == _prev->descriptor());
+ return _prev;
+}
- }
+void IndexCatalog::IndexIterator::_advance() {
+ _next = NULL;
+ while (_iterator != _catalog->_entries.end()) {
+ IndexCatalogEntry* entry = *_iterator;
+ ++_iterator;
- IndexDescriptor* IndexCatalog::findIdIndex( OperationContext* txn ) const {
- IndexIterator ii = getIndexIterator( txn, false );
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
- if ( desc->isIdIndex() )
- return desc;
+ if (_includeUnfinishedIndexes || entry->isReady(_txn)) {
+ _next = entry;
+ return;
}
- return NULL;
}
+}
- IndexDescriptor* IndexCatalog::findIndexByName( OperationContext* txn,
- StringData name,
- bool includeUnfinishedIndexes ) const {
- IndexIterator ii = getIndexIterator( txn, includeUnfinishedIndexes );
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
- if ( desc->indexName() == name )
- return desc;
- }
- return NULL;
+
+IndexDescriptor* IndexCatalog::findIdIndex(OperationContext* txn) const {
+ IndexIterator ii = getIndexIterator(txn, false);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
+ if (desc->isIdIndex())
+ return desc;
}
+ return NULL;
+}
- IndexDescriptor* IndexCatalog::findIndexByKeyPattern( OperationContext* txn,
- const BSONObj& key,
- bool includeUnfinishedIndexes ) const {
- IndexIterator ii = getIndexIterator( txn, includeUnfinishedIndexes );
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
- if ( desc->keyPattern() == key )
- return desc;
- }
- return NULL;
+IndexDescriptor* IndexCatalog::findIndexByName(OperationContext* txn,
+ StringData name,
+ bool includeUnfinishedIndexes) const {
+ IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
+ if (desc->indexName() == name)
+ return desc;
}
+ return NULL;
+}
- IndexDescriptor* IndexCatalog::findShardKeyPrefixedIndex( OperationContext* txn,
- const BSONObj& shardKey,
- bool requireSingleKey ) const {
- IndexDescriptor* best = NULL;
+IndexDescriptor* IndexCatalog::findIndexByKeyPattern(OperationContext* txn,
+ const BSONObj& key,
+ bool includeUnfinishedIndexes) const {
+ IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
+ if (desc->keyPattern() == key)
+ return desc;
+ }
+ return NULL;
+}
- IndexIterator ii = getIndexIterator( txn, false );
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
+IndexDescriptor* IndexCatalog::findShardKeyPrefixedIndex(OperationContext* txn,
+ const BSONObj& shardKey,
+ bool requireSingleKey) const {
+ IndexDescriptor* best = NULL;
- if ( desc->isPartial() )
- continue;
+ IndexIterator ii = getIndexIterator(txn, false);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
- if ( !shardKey.isPrefixOf( desc->keyPattern() ) )
- continue;
+ if (desc->isPartial())
+ continue;
- if( !desc->isMultikey( txn ) )
- return desc;
+ if (!shardKey.isPrefixOf(desc->keyPattern()))
+ continue;
- if ( !requireSingleKey )
- best = desc;
- }
+ if (!desc->isMultikey(txn))
+ return desc;
- return best;
+ if (!requireSingleKey)
+ best = desc;
}
- void IndexCatalog::findIndexByType( OperationContext* txn,
- const string& type, vector<IndexDescriptor*>& matches,
- bool includeUnfinishedIndexes ) const {
- IndexIterator ii = getIndexIterator( txn, includeUnfinishedIndexes );
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
- if ( IndexNames::findPluginName( desc->keyPattern() ) == type ) {
- matches.push_back( desc );
- }
+ return best;
+}
+
+void IndexCatalog::findIndexByType(OperationContext* txn,
+ const string& type,
+ vector<IndexDescriptor*>& matches,
+ bool includeUnfinishedIndexes) const {
+ IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
+ if (IndexNames::findPluginName(desc->keyPattern()) == type) {
+ matches.push_back(desc);
}
}
+}
- IndexAccessMethod* IndexCatalog::getIndex( const IndexDescriptor* desc ) {
- IndexCatalogEntry* entry = _entries.find( desc );
- massert( 17334, "cannot find index entry", entry );
- return entry->accessMethod();
- }
+IndexAccessMethod* IndexCatalog::getIndex(const IndexDescriptor* desc) {
+ IndexCatalogEntry* entry = _entries.find(desc);
+ massert(17334, "cannot find index entry", entry);
+ return entry->accessMethod();
+}
- const IndexAccessMethod* IndexCatalog::getIndex( const IndexDescriptor* desc ) const {
- return getEntry( desc )->accessMethod();
- }
+const IndexAccessMethod* IndexCatalog::getIndex(const IndexDescriptor* desc) const {
+ return getEntry(desc)->accessMethod();
+}
- const IndexCatalogEntry* IndexCatalog::getEntry( const IndexDescriptor* desc ) const {
- const IndexCatalogEntry* entry = _entries.find( desc );
- massert( 17357, "cannot find index entry", entry );
- return entry;
- }
+const IndexCatalogEntry* IndexCatalog::getEntry(const IndexDescriptor* desc) const {
+ const IndexCatalogEntry* entry = _entries.find(desc);
+ massert(17357, "cannot find index entry", entry);
+ return entry;
+}
- const IndexDescriptor* IndexCatalog::refreshEntry( OperationContext* txn,
- const IndexDescriptor* oldDesc ) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(),
- MODE_X));
-
- const std::string indexName = oldDesc->indexName();
- invariant( _collection->getCatalogEntry()->isIndexReady( txn, indexName ) );
-
- // Notify other users of the IndexCatalog that we're about to invalidate 'oldDesc'.
- const bool collectionGoingAway = false;
- _collection->getCursorManager()->invalidateAll(collectionGoingAway,
- str::stream() << "definition of index '"
- << indexName << "' changed");
-
- // Delete the IndexCatalogEntry that owns this descriptor. After deletion, 'oldDesc' is
- // invalid and should not be dereferenced.
- IndexCatalogEntry* oldEntry = _entries.release(oldDesc);
- txn->recoveryUnit()->registerChange(new IndexRemoveChange(txn,
- _collection,
- &_entries,
- oldEntry));
-
- // Ask the CollectionCatalogEntry for the new index spec.
- BSONObj spec = _collection->getCatalogEntry()->getIndexSpec( txn, indexName ).getOwned();
- BSONObj keyPattern = spec.getObjectField( "key" );
-
- // Re-register this index in the index catalog with the new spec.
- IndexDescriptor* newDesc = new IndexDescriptor( _collection,
- _getAccessMethodName( txn, keyPattern ),
- spec );
- const bool initFromDisk = false;
- const IndexCatalogEntry* newEntry = _setupInMemoryStructures( txn, newDesc, initFromDisk );
- invariant( newEntry->isReady( txn ) );
-
- // Return the new descriptor.
- return newEntry->descriptor();
- }
+const IndexDescriptor* IndexCatalog::refreshEntry(OperationContext* txn,
+ const IndexDescriptor* oldDesc) {
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
- // ---------------------------
+ const std::string indexName = oldDesc->indexName();
+ invariant(_collection->getCatalogEntry()->isIndexReady(txn, indexName));
- namespace {
- bool isDupsAllowed( IndexDescriptor* desc ) {
- bool isUnique = desc->unique() || KeyPattern::isIdKeyPattern(desc->keyPattern());
- if ( !isUnique )
- return true;
+ // Notify other users of the IndexCatalog that we're about to invalidate 'oldDesc'.
+ const bool collectionGoingAway = false;
+ _collection->getCursorManager()->invalidateAll(
+ collectionGoingAway, str::stream() << "definition of index '" << indexName << "' changed");
- return repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(desc);
- }
+ // Delete the IndexCatalogEntry that owns this descriptor. After deletion, 'oldDesc' is
+ // invalid and should not be dereferenced.
+ IndexCatalogEntry* oldEntry = _entries.release(oldDesc);
+ txn->recoveryUnit()->registerChange(
+ new IndexRemoveChange(txn, _collection, &_entries, oldEntry));
- }
+ // Ask the CollectionCatalogEntry for the new index spec.
+ BSONObj spec = _collection->getCatalogEntry()->getIndexSpec(txn, indexName).getOwned();
+ BSONObj keyPattern = spec.getObjectField("key");
- Status IndexCatalog::_indexRecord(OperationContext* txn,
- IndexCatalogEntry* index,
- const BSONObj& obj,
- const RecordId &loc ) {
- const MatchExpression* filter = index->getFilterExpression();
- if ( filter && !filter->matchesBSON( obj ) ) {
- return Status::OK();
- }
+ // Re-register this index in the index catalog with the new spec.
+ IndexDescriptor* newDesc =
+ new IndexDescriptor(_collection, _getAccessMethodName(txn, keyPattern), spec);
+ const bool initFromDisk = false;
+ const IndexCatalogEntry* newEntry = _setupInMemoryStructures(txn, newDesc, initFromDisk);
+ invariant(newEntry->isReady(txn));
- InsertDeleteOptions options;
- options.logIfError = false;
- options.dupsAllowed = isDupsAllowed( index->descriptor() );
+ // Return the new descriptor.
+ return newEntry->descriptor();
+}
- int64_t inserted;
- return index->accessMethod()->insert(txn, obj, loc, options, &inserted);
- }
+// ---------------------------
- Status IndexCatalog::_unindexRecord(OperationContext* txn,
- IndexCatalogEntry* index,
- const BSONObj& obj,
- const RecordId &loc,
- bool logIfError) {
- InsertDeleteOptions options;
- options.logIfError = logIfError;
- options.dupsAllowed = isDupsAllowed( index->descriptor() );
-
- // For unindex operations, dupsAllowed=false really means that it is safe to delete anything
- // that matches the key, without checking the RecordID, since dups are impossible. We need
- // to disable this behavior for in-progress indexes. See SERVER-17487 for more details.
- options.dupsAllowed = options.dupsAllowed || !index->isReady(txn);
-
- int64_t removed;
- Status status = index->accessMethod()->remove(txn, obj, loc, options, &removed);
-
- if ( !status.isOK() ) {
- log() << "Couldn't unindex record " << obj.toString()
- << " from collection " << _collection->ns()
- << ". Status: " << status.toString();
- }
+namespace {
+bool isDupsAllowed(IndexDescriptor* desc) {
+ bool isUnique = desc->unique() || KeyPattern::isIdKeyPattern(desc->keyPattern());
+ if (!isUnique)
+ return true;
+
+ return repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(desc);
+}
+}
+Status IndexCatalog::_indexRecord(OperationContext* txn,
+ IndexCatalogEntry* index,
+ const BSONObj& obj,
+ const RecordId& loc) {
+ const MatchExpression* filter = index->getFilterExpression();
+ if (filter && !filter->matchesBSON(obj)) {
return Status::OK();
}
+ InsertDeleteOptions options;
+ options.logIfError = false;
+ options.dupsAllowed = isDupsAllowed(index->descriptor());
- Status IndexCatalog::indexRecord(OperationContext* txn,
- const BSONObj& obj,
- const RecordId &loc ) {
-
- for ( IndexCatalogEntryContainer::const_iterator i = _entries.begin();
- i != _entries.end();
- ++i ) {
- Status s = _indexRecord(txn, *i, obj, loc);
- if (!s.isOK())
- return s;
- }
+ int64_t inserted;
+ return index->accessMethod()->insert(txn, obj, loc, options, &inserted);
+}
- return Status::OK();
+Status IndexCatalog::_unindexRecord(OperationContext* txn,
+ IndexCatalogEntry* index,
+ const BSONObj& obj,
+ const RecordId& loc,
+ bool logIfError) {
+ InsertDeleteOptions options;
+ options.logIfError = logIfError;
+ options.dupsAllowed = isDupsAllowed(index->descriptor());
+
+ // For unindex operations, dupsAllowed=false really means that it is safe to delete anything
+ // that matches the key, without checking the RecordID, since dups are impossible. We need
+ // to disable this behavior for in-progress indexes. See SERVER-17487 for more details.
+ options.dupsAllowed = options.dupsAllowed || !index->isReady(txn);
+
+ int64_t removed;
+ Status status = index->accessMethod()->remove(txn, obj, loc, options, &removed);
+
+ if (!status.isOK()) {
+ log() << "Couldn't unindex record " << obj.toString() << " from collection "
+ << _collection->ns() << ". Status: " << status.toString();
}
- void IndexCatalog::unindexRecord(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- bool noWarn) {
+ return Status::OK();
+}
- for ( IndexCatalogEntryContainer::const_iterator i = _entries.begin();
- i != _entries.end();
- ++i ) {
- IndexCatalogEntry* entry = *i;
+Status IndexCatalog::indexRecord(OperationContext* txn, const BSONObj& obj, const RecordId& loc) {
+ for (IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end();
+ ++i) {
+ Status s = _indexRecord(txn, *i, obj, loc);
+ if (!s.isOK())
+ return s;
+ }
- // If it's a background index, we DO NOT want to log anything.
- bool logIfError = entry->isReady(txn) ? !noWarn : false;
- _unindexRecord(txn, entry, obj, loc, logIfError);
- }
+ return Status::OK();
+}
+
+void IndexCatalog::unindexRecord(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ bool noWarn) {
+ for (IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end();
+ ++i) {
+ IndexCatalogEntry* entry = *i;
+
+ // If it's a background index, we DO NOT want to log anything.
+ bool logIfError = entry->isReady(txn) ? !noWarn : false;
+ _unindexRecord(txn, entry, obj, loc, logIfError);
}
+}
- BSONObj IndexCatalog::fixIndexKey( const BSONObj& key ) {
- if ( IndexDescriptor::isIdIndexPattern( key ) ) {
- return _idObj;
- }
- if ( key["_id"].type() == Bool && key.nFields() == 1 ) {
- return _idObj;
- }
- return key;
+BSONObj IndexCatalog::fixIndexKey(const BSONObj& key) {
+ if (IndexDescriptor::isIdIndexPattern(key)) {
+ return _idObj;
}
+ if (key["_id"].type() == Bool && key.nFields() == 1) {
+ return _idObj;
+ }
+ return key;
+}
- BSONObj IndexCatalog::_fixIndexSpec( const BSONObj& spec ) {
- BSONObj o = IndexLegacy::adjustIndexSpecObject( spec );
+BSONObj IndexCatalog::_fixIndexSpec(const BSONObj& spec) {
+ BSONObj o = IndexLegacy::adjustIndexSpecObject(spec);
- BSONObjBuilder b;
+ BSONObjBuilder b;
- int v = DefaultIndexVersionNumber;
- if( !o["v"].eoo() ) {
- v = o["v"].numberInt();
- }
+ int v = DefaultIndexVersionNumber;
+ if (!o["v"].eoo()) {
+ v = o["v"].numberInt();
+ }
- // idea is to put things we use a lot earlier
- b.append("v", v);
+ // idea is to put things we use a lot earlier
+ b.append("v", v);
- if( o["unique"].trueValue() )
- b.appendBool("unique", true); // normalize to bool true in case was int 1 or something...
+ if (o["unique"].trueValue())
+ b.appendBool("unique", true); // normalize to bool true in case was int 1 or something...
- BSONObj key = fixIndexKey( o["key"].Obj() );
- b.append( "key", key );
+ BSONObj key = fixIndexKey(o["key"].Obj());
+ b.append("key", key);
- string name = o["name"].String();
- if ( IndexDescriptor::isIdIndexPattern( key ) ) {
- name = "_id_";
- }
- b.append( "name", name );
-
- {
- BSONObjIterator i(o);
- while ( i.more() ) {
- BSONElement e = i.next();
- string s = e.fieldName();
-
- if ( s == "_id" ) {
- // skip
- }
- else if ( s == "dropDups" ) {
- // dropDups is silently ignored and removed from the spec as of SERVER-14710.
- }
- else if ( s == "v" || s == "unique" ||
- s == "key" || s == "name" ) {
- // covered above
- }
- else {
- b.append(e);
- }
+ string name = o["name"].String();
+ if (IndexDescriptor::isIdIndexPattern(key)) {
+ name = "_id_";
+ }
+ b.append("name", name);
+
+ {
+ BSONObjIterator i(o);
+ while (i.more()) {
+ BSONElement e = i.next();
+ string s = e.fieldName();
+
+ if (s == "_id") {
+ // skip
+ } else if (s == "dropDups") {
+ // dropDups is silently ignored and removed from the spec as of SERVER-14710.
+ } else if (s == "v" || s == "unique" || s == "key" || s == "name") {
+ // covered above
+ } else {
+ b.append(e);
}
}
-
- return b.obj();
}
+ return b.obj();
+}
}
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index 6b746fe6fd5..9cb9186a25e 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -40,318 +40,309 @@
namespace mongo {
- class Client;
- class Collection;
+class Client;
+class Collection;
- class IndexDescriptor;
- class IndexAccessMethod;
+class IndexDescriptor;
+class IndexAccessMethod;
- /**
- * how many: 1 per Collection
- * lifecycle: attached to a Collection
- */
- class IndexCatalog {
- public:
- IndexCatalog( Collection* collection );
- ~IndexCatalog();
-
- // must be called before used
- Status init(OperationContext* txn);
-
- bool ok() const;
-
- // ---- accessors -----
-
- bool haveAnyIndexes() const;
- int numIndexesTotal( OperationContext* txn ) const;
- int numIndexesReady( OperationContext* txn ) const;
- int numIndexesInProgress( OperationContext* txn ) const {
- return numIndexesTotal(txn) - numIndexesReady(txn);
- }
-
- /**
- * this is in "alive" until the Collection goes away
- * in which case everything from this tree has to go away
- */
-
- bool haveIdIndex( OperationContext* txn ) const;
+/**
+ * how many: 1 per Collection
+ * lifecycle: attached to a Collection
+ */
+class IndexCatalog {
+public:
+ IndexCatalog(Collection* collection);
+ ~IndexCatalog();
- /**
- * Returns the spec for the id index to create by default for this collection.
- */
- BSONObj getDefaultIdIndexSpec() const;
+ // must be called before used
+ Status init(OperationContext* txn);
- IndexDescriptor* findIdIndex( OperationContext* txn ) const;
+ bool ok() const;
- /**
- * @return null if cannot find
- */
- IndexDescriptor* findIndexByName( OperationContext* txn,
- StringData name,
- bool includeUnfinishedIndexes = false ) const;
+ // ---- accessors -----
- /**
- * @return null if cannot find
- */
- IndexDescriptor* findIndexByKeyPattern( OperationContext* txn,
- const BSONObj& key,
- bool includeUnfinishedIndexes = false ) const;
-
- /**
- * Returns an index suitable for shard key range scans.
- *
- * This index:
- * - must be prefixed by 'shardKey', and
- * - must not be a partial index.
- *
- * If the parameter 'requireSingleKey' is true, then this index additionally must not be
- * multi-key.
- *
- * If no such index exists, returns NULL.
- */
- IndexDescriptor* findShardKeyPrefixedIndex( OperationContext* txn,
- const BSONObj& shardKey,
- bool requireSingleKey ) const;
-
- void findIndexByType( OperationContext* txn,
- const std::string& type,
- std::vector<IndexDescriptor*>& matches,
- bool includeUnfinishedIndexes = false ) const;
+ bool haveAnyIndexes() const;
+ int numIndexesTotal(OperationContext* txn) const;
+ int numIndexesReady(OperationContext* txn) const;
+ int numIndexesInProgress(OperationContext* txn) const {
+ return numIndexesTotal(txn) - numIndexesReady(txn);
+ }
+ /**
+ * this is in "alive" until the Collection goes away
+ * in which case everything from this tree has to go away
+ */
- /**
- * Reload the index definition for 'oldDesc' from the CollectionCatalogEntry. 'oldDesc'
- * must be a ready index that is already registered with the index catalog. Returns an
- * unowned pointer to the descriptor for the new index definition.
- *
- * Use this method to notify the IndexCatalog that the spec for this index has changed.
- *
- * It is invalid to dereference 'oldDesc' after calling this method. This method broadcasts
- * an invalidateAll() on the cursor manager to notify other users of the IndexCatalog that
- * this descriptor is now invalid.
- */
- const IndexDescriptor* refreshEntry( OperationContext* txn,
- const IndexDescriptor* oldDesc );
+ bool haveIdIndex(OperationContext* txn) const;
- // never returns NULL
- const IndexCatalogEntry* getEntry( const IndexDescriptor* desc ) const;
+ /**
+ * Returns the spec for the id index to create by default for this collection.
+ */
+ BSONObj getDefaultIdIndexSpec() const;
- IndexAccessMethod* getIndex( const IndexDescriptor* desc );
- const IndexAccessMethod* getIndex( const IndexDescriptor* desc ) const;
+ IndexDescriptor* findIdIndex(OperationContext* txn) const;
- /**
- * Returns a not-ok Status if there are any unfinished index builds. No new indexes should
- * be built when in this state.
- */
- Status checkUnfinished() const;
-
- class IndexIterator {
- public:
- bool more();
- IndexDescriptor* next();
+ /**
+ * @return null if cannot find
+ */
+ IndexDescriptor* findIndexByName(OperationContext* txn,
+ StringData name,
+ bool includeUnfinishedIndexes = false) const;
- // returns the access method for the last return IndexDescriptor
- IndexAccessMethod* accessMethod( const IndexDescriptor* desc );
+ /**
+ * @return null if cannot find
+ */
+ IndexDescriptor* findIndexByKeyPattern(OperationContext* txn,
+ const BSONObj& key,
+ bool includeUnfinishedIndexes = false) const;
- // returns the IndexCatalogEntry for the last return IndexDescriptor
- IndexCatalogEntry* catalogEntry( const IndexDescriptor* desc );
+ /**
+ * Returns an index suitable for shard key range scans.
+ *
+ * This index:
+ * - must be prefixed by 'shardKey', and
+ * - must not be a partial index.
+ *
+ * If the parameter 'requireSingleKey' is true, then this index additionally must not be
+ * multi-key.
+ *
+ * If no such index exists, returns NULL.
+ */
+ IndexDescriptor* findShardKeyPrefixedIndex(OperationContext* txn,
+ const BSONObj& shardKey,
+ bool requireSingleKey) const;
- private:
- IndexIterator( OperationContext* txn,
- const IndexCatalog* cat,
- bool includeUnfinishedIndexes );
+ void findIndexByType(OperationContext* txn,
+ const std::string& type,
+ std::vector<IndexDescriptor*>& matches,
+ bool includeUnfinishedIndexes = false) const;
- void _advance();
- bool _includeUnfinishedIndexes;
+ /**
+ * Reload the index definition for 'oldDesc' from the CollectionCatalogEntry. 'oldDesc'
+ * must be a ready index that is already registered with the index catalog. Returns an
+ * unowned pointer to the descriptor for the new index definition.
+ *
+ * Use this method to notify the IndexCatalog that the spec for this index has changed.
+ *
+ * It is invalid to dereference 'oldDesc' after calling this method. This method broadcasts
+ * an invalidateAll() on the cursor manager to notify other users of the IndexCatalog that
+ * this descriptor is now invalid.
+ */
+ const IndexDescriptor* refreshEntry(OperationContext* txn, const IndexDescriptor* oldDesc);
- OperationContext* const _txn;
- const IndexCatalog* _catalog;
- IndexCatalogEntryContainer::const_iterator _iterator;
+ // never returns NULL
+ const IndexCatalogEntry* getEntry(const IndexDescriptor* desc) const;
- bool _start; // only true before we've called next() or more()
+ IndexAccessMethod* getIndex(const IndexDescriptor* desc);
+ const IndexAccessMethod* getIndex(const IndexDescriptor* desc) const;
- IndexCatalogEntry* _prev;
- IndexCatalogEntry* _next;
+ /**
+ * Returns a not-ok Status if there are any unfinished index builds. No new indexes should
+ * be built when in this state.
+ */
+ Status checkUnfinished() const;
- friend class IndexCatalog;
- };
+ class IndexIterator {
+ public:
+ bool more();
+ IndexDescriptor* next();
- IndexIterator getIndexIterator( OperationContext* txn,
- bool includeUnfinishedIndexes ) const {
- return IndexIterator( txn, this, includeUnfinishedIndexes );
- };
+ // returns the access method for the last return IndexDescriptor
+ IndexAccessMethod* accessMethod(const IndexDescriptor* desc);
- // ---- index set modifiers ------
+ // returns the IndexCatalogEntry for the last return IndexDescriptor
+ IndexCatalogEntry* catalogEntry(const IndexDescriptor* desc);
- /**
- * Call this only on an empty collection from inside a WriteUnitOfWork. Index creation on an
- * empty collection can be rolled back as part of a larger WUOW.
- */
- Status createIndexOnEmptyCollection(OperationContext* txn, BSONObj spec);
+ private:
+ IndexIterator(OperationContext* txn,
+ const IndexCatalog* cat,
+ bool includeUnfinishedIndexes);
- StatusWith<BSONObj> prepareSpecForCreate( OperationContext* txn,
- const BSONObj& original ) const;
+ void _advance();
- Status dropAllIndexes(OperationContext* txn,
- bool includingIdIndex );
+ bool _includeUnfinishedIndexes;
- Status dropIndex(OperationContext* txn,
- IndexDescriptor* desc );
+ OperationContext* const _txn;
+ const IndexCatalog* _catalog;
+ IndexCatalogEntryContainer::const_iterator _iterator;
- /**
- * will drop all incompleted indexes and return specs
- * after this, the indexes can be rebuilt
- */
- std::vector<BSONObj> getAndClearUnfinishedIndexes(OperationContext* txn);
+ bool _start; // only true before we've called next() or more()
+ IndexCatalogEntry* _prev;
+ IndexCatalogEntry* _next;
- struct IndexKillCriteria {
- std::string ns;
- std::string name;
- BSONObj key;
- };
+ friend class IndexCatalog;
+ };
- // ---- modify single index
+ IndexIterator getIndexIterator(OperationContext* txn, bool includeUnfinishedIndexes) const {
+ return IndexIterator(txn, this, includeUnfinishedIndexes);
+ };
- bool isMultikey( OperationContext* txn, const IndexDescriptor* idex );
+ // ---- index set modifiers ------
- // --- these probably become private?
+ /**
+ * Call this only on an empty collection from inside a WriteUnitOfWork. Index creation on an
+ * empty collection can be rolled back as part of a larger WUOW.
+ */
+ Status createIndexOnEmptyCollection(OperationContext* txn, BSONObj spec);
+ StatusWith<BSONObj> prepareSpecForCreate(OperationContext* txn, const BSONObj& original) const;
- /**
- * disk creation order
- * 1) system.indexes entry
- * 2) collection's NamespaceDetails
- * a) info + head
- * b) _indexBuildsInProgress++
- * 3) indexes entry in .ns file
- * 4) system.namespaces entry for index ns
- */
- class IndexBuildBlock {
- MONGO_DISALLOW_COPYING(IndexBuildBlock);
- public:
- IndexBuildBlock(OperationContext* txn,
- Collection* collection,
- const BSONObj& spec );
+ Status dropAllIndexes(OperationContext* txn, bool includingIdIndex);
- ~IndexBuildBlock();
+ Status dropIndex(OperationContext* txn, IndexDescriptor* desc);
- Status init();
+ /**
+ * will drop all incompleted indexes and return specs
+ * after this, the indexes can be rebuilt
+ */
+ std::vector<BSONObj> getAndClearUnfinishedIndexes(OperationContext* txn);
- void success();
- /**
- * index build failed, clean up meta data
- */
- void fail();
+ struct IndexKillCriteria {
+ std::string ns;
+ std::string name;
+ BSONObj key;
+ };
- IndexCatalogEntry* getEntry() { return _entry; }
+ // ---- modify single index
- private:
- Collection* const _collection;
- IndexCatalog* const _catalog;
- const std::string _ns;
+ bool isMultikey(OperationContext* txn, const IndexDescriptor* idex);
- BSONObj _spec;
+ // --- these probably become private?
- std::string _indexName;
- std::string _indexNamespace;
- IndexCatalogEntry* _entry;
- bool _inProgress;
+ /**
+ * disk creation order
+ * 1) system.indexes entry
+ * 2) collection's NamespaceDetails
+ * a) info + head
+ * b) _indexBuildsInProgress++
+ * 3) indexes entry in .ns file
+ * 4) system.namespaces entry for index ns
+ */
+ class IndexBuildBlock {
+ MONGO_DISALLOW_COPYING(IndexBuildBlock);
- OperationContext* _txn;
- };
+ public:
+ IndexBuildBlock(OperationContext* txn, Collection* collection, const BSONObj& spec);
- // ----- data modifiers ------
+ ~IndexBuildBlock();
- // this throws for now
- Status indexRecord(OperationContext* txn, const BSONObj& obj, const RecordId &loc);
+ Status init();
- void unindexRecord(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- bool noWarn);
+ void success();
- // ------- temp internal -------
+ /**
+ * index build failed, clean up meta data
+ */
+ void fail();
- std::string getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) {
- return _getAccessMethodName( txn, keyPattern );
+ IndexCatalogEntry* getEntry() {
+ return _entry;
}
- Status _upgradeDatabaseMinorVersionIfNeeded( OperationContext* txn,
- const std::string& newPluginName );
+ private:
+ Collection* const _collection;
+ IndexCatalog* const _catalog;
+ const std::string _ns;
- // public static helpers
+ BSONObj _spec;
- static BSONObj fixIndexKey( const BSONObj& key );
+ std::string _indexName;
+ std::string _indexNamespace;
- private:
- static const BSONObj _idObj; // { _id : 1 }
+ IndexCatalogEntry* _entry;
+ bool _inProgress;
- bool _shouldOverridePlugin( OperationContext* txn, const BSONObj& keyPattern ) const;
+ OperationContext* _txn;
+ };
- /**
- * This differs from IndexNames::findPluginName in that returns the plugin name we *should*
- * use, not the plugin name inside of the provided key pattern. To understand when these
- * differ, see shouldOverridePlugin.
- */
- std::string _getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) const;
+ // ----- data modifiers ------
- void _checkMagic() const;
+ // this throws for now
+ Status indexRecord(OperationContext* txn, const BSONObj& obj, const RecordId& loc);
- Status _indexRecord(OperationContext* txn,
- IndexCatalogEntry* index,
- const BSONObj& obj,
- const RecordId &loc );
+ void unindexRecord(OperationContext* txn, const BSONObj& obj, const RecordId& loc, bool noWarn);
- Status _unindexRecord(OperationContext* txn,
- IndexCatalogEntry* index,
- const BSONObj& obj,
- const RecordId &loc,
- bool logIfError);
+ // ------- temp internal -------
- /**
- * this does no sanity checks
- */
- Status _dropIndex(OperationContext* txn,
- IndexCatalogEntry* entry );
+ std::string getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) {
+ return _getAccessMethodName(txn, keyPattern);
+ }
- // just does disk hanges
- // doesn't change memory state, etc...
- void _deleteIndexFromDisk( OperationContext* txn,
- const std::string& indexName,
- const std::string& indexNamespace );
+ Status _upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
+ const std::string& newPluginName);
- // descriptor ownership passes to _setupInMemoryStructures
- // initFromDisk: Avoids registering a change to undo this operation when set to true.
- // You must set this flag if calling this function outside of a UnitOfWork.
- IndexCatalogEntry* _setupInMemoryStructures(OperationContext* txn,
- IndexDescriptor* descriptor,
- bool initFromDisk);
+ // public static helpers
- // Apply a set of transformations to the user-provided index object 'spec' to make it
- // conform to the standard for insertion. This function adds the 'v' field if it didn't
- // exist, removes the '_id' field if it exists, applies plugin-level transformations if
- // appropriate, etc.
- static BSONObj _fixIndexSpec( const BSONObj& spec );
+ static BSONObj fixIndexKey(const BSONObj& key);
- Status _isSpecOk( const BSONObj& spec ) const;
+private:
+ static const BSONObj _idObj; // { _id : 1 }
- Status _doesSpecConflictWithExisting( OperationContext* txn, const BSONObj& spec ) const;
+ bool _shouldOverridePlugin(OperationContext* txn, const BSONObj& keyPattern) const;
- int _magic;
- Collection* const _collection;
- const int _maxNumIndexesAllowed;
+ /**
+ * This differs from IndexNames::findPluginName in that returns the plugin name we *should*
+ * use, not the plugin name inside of the provided key pattern. To understand when these
+ * differ, see shouldOverridePlugin.
+ */
+ std::string _getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) const;
- IndexCatalogEntryContainer _entries;
+ void _checkMagic() const;
- // These are the index specs of indexes that were "leftover".
- // "Leftover" means they were unfinished when a mongod shut down.
- // Certain operations are prohibited until someone fixes.
- // Retrieve by calling getAndClearUnfinishedIndexes().
- std::vector<BSONObj> _unfinishedIndexes;
- };
+ Status _indexRecord(OperationContext* txn,
+ IndexCatalogEntry* index,
+ const BSONObj& obj,
+ const RecordId& loc);
+
+ Status _unindexRecord(OperationContext* txn,
+ IndexCatalogEntry* index,
+ const BSONObj& obj,
+ const RecordId& loc,
+ bool logIfError);
+ /**
+ * this does no sanity checks
+ */
+ Status _dropIndex(OperationContext* txn, IndexCatalogEntry* entry);
+
+ // just does disk hanges
+ // doesn't change memory state, etc...
+ void _deleteIndexFromDisk(OperationContext* txn,
+ const std::string& indexName,
+ const std::string& indexNamespace);
+
+ // descriptor ownership passes to _setupInMemoryStructures
+ // initFromDisk: Avoids registering a change to undo this operation when set to true.
+ // You must set this flag if calling this function outside of a UnitOfWork.
+ IndexCatalogEntry* _setupInMemoryStructures(OperationContext* txn,
+ IndexDescriptor* descriptor,
+ bool initFromDisk);
+
+ // Apply a set of transformations to the user-provided index object 'spec' to make it
+ // conform to the standard for insertion. This function adds the 'v' field if it didn't
+ // exist, removes the '_id' field if it exists, applies plugin-level transformations if
+ // appropriate, etc.
+ static BSONObj _fixIndexSpec(const BSONObj& spec);
+
+ Status _isSpecOk(const BSONObj& spec) const;
+
+ Status _doesSpecConflictWithExisting(OperationContext* txn, const BSONObj& spec) const;
+
+ int _magic;
+ Collection* const _collection;
+ const int _maxNumIndexesAllowed;
+
+ IndexCatalogEntryContainer _entries;
+
+ // These are the index specs of indexes that were "leftover".
+ // "Leftover" means they were unfinished when a mongod shut down.
+ // Certain operations are prohibited until someone fixes.
+ // Retrieve by calling getAndClearUnfinishedIndexes().
+ std::vector<BSONObj> _unfinishedIndexes;
+};
}
diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp
index 41ad093e8cc..55f5511680b 100644
--- a/src/mongo/db/catalog/index_catalog_entry.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry.cpp
@@ -48,246 +48,240 @@
namespace mongo {
- using std::string;
+using std::string;
- class HeadManagerImpl : public HeadManager {
- public:
- HeadManagerImpl(IndexCatalogEntry* ice) : _catalogEntry(ice) { }
- virtual ~HeadManagerImpl() { }
+class HeadManagerImpl : public HeadManager {
+public:
+ HeadManagerImpl(IndexCatalogEntry* ice) : _catalogEntry(ice) {}
+ virtual ~HeadManagerImpl() {}
- const RecordId getHead(OperationContext* txn) const {
- return _catalogEntry->head(txn);
- }
-
- void setHead(OperationContext* txn, const RecordId newHead) {
- _catalogEntry->setHead(txn, newHead);
- }
-
- private:
- // Not owned here.
- IndexCatalogEntry* _catalogEntry;
- };
-
- IndexCatalogEntry::IndexCatalogEntry( StringData ns,
- CollectionCatalogEntry* collection,
- IndexDescriptor* descriptor,
- CollectionInfoCache* infoCache )
- : _ns( ns.toString() ),
- _collection( collection ),
- _descriptor( descriptor ),
- _infoCache( infoCache ),
- _accessMethod( NULL ),
- _headManager(new HeadManagerImpl(this)),
- _ordering( Ordering::make( descriptor->keyPattern() ) ),
- _isReady( false ) {
-
- _descriptor->_cachedEntry = this;
+ const RecordId getHead(OperationContext* txn) const {
+ return _catalogEntry->head(txn);
}
- IndexCatalogEntry::~IndexCatalogEntry() {
- _descriptor->_cachedEntry = NULL; // defensive
-
- delete _headManager;
- delete _accessMethod;
- delete _descriptor;
+ void setHead(OperationContext* txn, const RecordId newHead) {
+ _catalogEntry->setHead(txn, newHead);
}
- void IndexCatalogEntry::init( OperationContext* txn,
- IndexAccessMethod* accessMethod ) {
- verify( _accessMethod == NULL );
- _accessMethod = accessMethod;
-
- _isReady = _catalogIsReady( txn );
- _head = _catalogHead( txn );
- _isMultikey = _catalogIsMultikey( txn );
-
- BSONElement filterElement = _descriptor->getInfoElement("partialFilterExpression");
- if ( filterElement.type() ) {
- invariant( filterElement.isABSONObj() );
- BSONObj filter = filterElement.Obj();
- StatusWithMatchExpression res = MatchExpressionParser::parse( filter );
- // this should be checked in create, so can blow up here
- invariantOK( res.getStatus() );
- _filterExpression.reset( res.getValue() );
- LOG(2) << "have filter expression for "
- << _ns << " " << _descriptor->indexName()
- << " " << filter;
- }
+private:
+ // Not owned here.
+ IndexCatalogEntry* _catalogEntry;
+};
+
+IndexCatalogEntry::IndexCatalogEntry(StringData ns,
+ CollectionCatalogEntry* collection,
+ IndexDescriptor* descriptor,
+ CollectionInfoCache* infoCache)
+ : _ns(ns.toString()),
+ _collection(collection),
+ _descriptor(descriptor),
+ _infoCache(infoCache),
+ _accessMethod(NULL),
+ _headManager(new HeadManagerImpl(this)),
+ _ordering(Ordering::make(descriptor->keyPattern())),
+ _isReady(false) {
+ _descriptor->_cachedEntry = this;
+}
+
+IndexCatalogEntry::~IndexCatalogEntry() {
+ _descriptor->_cachedEntry = NULL; // defensive
+
+ delete _headManager;
+ delete _accessMethod;
+ delete _descriptor;
+}
+
+void IndexCatalogEntry::init(OperationContext* txn, IndexAccessMethod* accessMethod) {
+ verify(_accessMethod == NULL);
+ _accessMethod = accessMethod;
+
+ _isReady = _catalogIsReady(txn);
+ _head = _catalogHead(txn);
+ _isMultikey = _catalogIsMultikey(txn);
+
+ BSONElement filterElement = _descriptor->getInfoElement("partialFilterExpression");
+ if (filterElement.type()) {
+ invariant(filterElement.isABSONObj());
+ BSONObj filter = filterElement.Obj();
+ StatusWithMatchExpression res = MatchExpressionParser::parse(filter);
+ // this should be checked in create, so can blow up here
+ invariantOK(res.getStatus());
+ _filterExpression.reset(res.getValue());
+ LOG(2) << "have filter expression for " << _ns << " " << _descriptor->indexName() << " "
+ << filter;
}
+}
- const RecordId& IndexCatalogEntry::head( OperationContext* txn ) const {
- DEV invariant( _head == _catalogHead( txn ) );
- return _head;
- }
+const RecordId& IndexCatalogEntry::head(OperationContext* txn) const {
+ DEV invariant(_head == _catalogHead(txn));
+ return _head;
+}
- bool IndexCatalogEntry::isReady( OperationContext* txn ) const {
- DEV invariant( _isReady == _catalogIsReady( txn ) );
- return _isReady;
- }
+bool IndexCatalogEntry::isReady(OperationContext* txn) const {
+ DEV invariant(_isReady == _catalogIsReady(txn));
+ return _isReady;
+}
- bool IndexCatalogEntry::isMultikey() const {
- return _isMultikey;
- }
+bool IndexCatalogEntry::isMultikey() const {
+ return _isMultikey;
+}
+
+// ---
- // ---
+void IndexCatalogEntry::setIsReady(bool newIsReady) {
+ _isReady = newIsReady;
+}
- void IndexCatalogEntry::setIsReady( bool newIsReady ) {
- _isReady = newIsReady;
+class IndexCatalogEntry::SetHeadChange : public RecoveryUnit::Change {
+public:
+ SetHeadChange(IndexCatalogEntry* ice, RecordId oldHead) : _ice(ice), _oldHead(oldHead) {}
+
+ virtual void commit() {}
+ virtual void rollback() {
+ _ice->_head = _oldHead;
}
- class IndexCatalogEntry::SetHeadChange : public RecoveryUnit::Change {
- public:
- SetHeadChange(IndexCatalogEntry* ice, RecordId oldHead) :_ice(ice), _oldHead(oldHead) {
- }
+ IndexCatalogEntry* _ice;
+ const RecordId _oldHead;
+};
- virtual void commit() {}
- virtual void rollback() { _ice->_head = _oldHead; }
+void IndexCatalogEntry::setHead(OperationContext* txn, RecordId newHead) {
+ _collection->setIndexHead(txn, _descriptor->indexName(), newHead);
- IndexCatalogEntry* _ice;
- const RecordId _oldHead;
- };
+ txn->recoveryUnit()->registerChange(new SetHeadChange(this, _head));
+ _head = newHead;
+}
- void IndexCatalogEntry::setHead( OperationContext* txn, RecordId newHead ) {
- _collection->setIndexHead( txn,
- _descriptor->indexName(),
- newHead );
- txn->recoveryUnit()->registerChange(new SetHeadChange(this, _head));
- _head = newHead;
+/**
+ * RAII class, which associates a new RecoveryUnit with an OperationContext for the purposes
+ * of simulating a sub-transaction. Takes ownership of the new recovery unit and frees it at
+ * destruction time.
+ */
+class RecoveryUnitSwap {
+public:
+ RecoveryUnitSwap(OperationContext* txn, RecoveryUnit* newRecoveryUnit)
+ : _txn(txn),
+ _oldRecoveryUnit(_txn->releaseRecoveryUnit()),
+ _oldRecoveryUnitState(
+ _txn->setRecoveryUnit(newRecoveryUnit, OperationContext::kNotInUnitOfWork)),
+ _newRecoveryUnit(newRecoveryUnit) {}
+
+ ~RecoveryUnitSwap() {
+ _txn->releaseRecoveryUnit();
+ _txn->setRecoveryUnit(_oldRecoveryUnit, _oldRecoveryUnitState);
}
+private:
+ // Not owned
+ OperationContext* const _txn;
- /**
- * RAII class, which associates a new RecoveryUnit with an OperationContext for the purposes
- * of simulating a sub-transaction. Takes ownership of the new recovery unit and frees it at
- * destruction time.
- */
- class RecoveryUnitSwap {
- public:
- RecoveryUnitSwap(OperationContext* txn, RecoveryUnit* newRecoveryUnit)
- : _txn(txn),
- _oldRecoveryUnit(_txn->releaseRecoveryUnit()),
- _oldRecoveryUnitState(_txn->setRecoveryUnit(newRecoveryUnit,
- OperationContext::kNotInUnitOfWork)),
- _newRecoveryUnit(newRecoveryUnit) { }
-
- ~RecoveryUnitSwap() {
- _txn->releaseRecoveryUnit();
- _txn->setRecoveryUnit(_oldRecoveryUnit, _oldRecoveryUnitState);
- }
+ // Owned, but life-time is not controlled
+ RecoveryUnit* const _oldRecoveryUnit;
+ OperationContext::RecoveryUnitState const _oldRecoveryUnitState;
- private:
- // Not owned
- OperationContext* const _txn;
+ // Owned and life-time is controlled
+ const std::unique_ptr<RecoveryUnit> _newRecoveryUnit;
+};
- // Owned, but life-time is not controlled
- RecoveryUnit* const _oldRecoveryUnit;
- OperationContext::RecoveryUnitState const _oldRecoveryUnitState;
+void IndexCatalogEntry::setMultikey(OperationContext* txn) {
+ if (isMultikey()) {
+ return;
+ }
- // Owned and life-time is controlled
- const std::unique_ptr<RecoveryUnit> _newRecoveryUnit;
- };
+ // Only one thread should set the multi-key value per collection, because the metadata for
+ // a collection is one large document.
+ Lock::ResourceLock collMDLock(txn->lockState(), ResourceId(RESOURCE_METADATA, _ns), MODE_X);
- void IndexCatalogEntry::setMultikey(OperationContext* txn) {
- if (isMultikey()) {
- return;
- }
+ // Check again in case we blocked on the MD lock and another thread beat us to setting the
+ // multiKey metadata for this index.
+ if (isMultikey()) {
+ return;
+ }
- // Only one thread should set the multi-key value per collection, because the metadata for
- // a collection is one large document.
- Lock::ResourceLock collMDLock(txn->lockState(),
- ResourceId(RESOURCE_METADATA, _ns),
- MODE_X);
+ // This effectively emulates a sub-transaction off the main transaction, which invoked
+ // setMultikey. The reason we need is to avoid artificial WriteConflicts, which happen
+ // with snapshot isolation.
+ {
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ RecoveryUnitSwap ruSwap(txn, storageEngine->newRecoveryUnit());
- // Check again in case we blocked on the MD lock and another thread beat us to setting the
- // multiKey metadata for this index.
- if (isMultikey()) {
- return;
- }
+ WriteUnitOfWork wuow(txn);
- // This effectively emulates a sub-transaction off the main transaction, which invoked
- // setMultikey. The reason we need is to avoid artificial WriteConflicts, which happen
- // with snapshot isolation.
- {
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- RecoveryUnitSwap ruSwap(txn, storageEngine->newRecoveryUnit());
-
- WriteUnitOfWork wuow(txn);
-
- if (_collection->setIndexIsMultikey(txn, _descriptor->indexName())) {
- if (_infoCache) {
- LOG(1) << _ns << ": clearing plan cache - index "
- << _descriptor->keyPattern() << " set to multi key.";
- _infoCache->clearQueryCache();
- }
+ if (_collection->setIndexIsMultikey(txn, _descriptor->indexName())) {
+ if (_infoCache) {
+ LOG(1) << _ns << ": clearing plan cache - index " << _descriptor->keyPattern()
+ << " set to multi key.";
+ _infoCache->clearQueryCache();
}
-
- wuow.commit();
}
- _isMultikey = true;
+ wuow.commit();
}
- // ----
-
- bool IndexCatalogEntry::_catalogIsReady( OperationContext* txn ) const {
- return _collection->isIndexReady( txn, _descriptor->indexName() );
- }
+ _isMultikey = true;
+}
- RecordId IndexCatalogEntry::_catalogHead( OperationContext* txn ) const {
- return _collection->getIndexHead( txn, _descriptor->indexName() );
- }
+// ----
- bool IndexCatalogEntry::_catalogIsMultikey( OperationContext* txn ) const {
- return _collection->isIndexMultikey( txn, _descriptor->indexName() );
- }
+bool IndexCatalogEntry::_catalogIsReady(OperationContext* txn) const {
+ return _collection->isIndexReady(txn, _descriptor->indexName());
+}
- // ------------------
+RecordId IndexCatalogEntry::_catalogHead(OperationContext* txn) const {
+ return _collection->getIndexHead(txn, _descriptor->indexName());
+}
- const IndexCatalogEntry* IndexCatalogEntryContainer::find( const IndexDescriptor* desc ) const {
- if ( desc->_cachedEntry )
- return desc->_cachedEntry;
+bool IndexCatalogEntry::_catalogIsMultikey(OperationContext* txn) const {
+ return _collection->isIndexMultikey(txn, _descriptor->indexName());
+}
- for ( const_iterator i = begin(); i != end(); ++i ) {
- const IndexCatalogEntry* e = *i;
- if ( e->descriptor() == desc )
- return e;
- }
- return NULL;
- }
+// ------------------
- IndexCatalogEntry* IndexCatalogEntryContainer::find( const IndexDescriptor* desc ) {
- if ( desc->_cachedEntry )
- return desc->_cachedEntry;
+const IndexCatalogEntry* IndexCatalogEntryContainer::find(const IndexDescriptor* desc) const {
+ if (desc->_cachedEntry)
+ return desc->_cachedEntry;
- for ( iterator i = begin(); i != end(); ++i ) {
- IndexCatalogEntry* e = *i;
- if ( e->descriptor() == desc )
- return e;
- }
- return NULL;
+ for (const_iterator i = begin(); i != end(); ++i) {
+ const IndexCatalogEntry* e = *i;
+ if (e->descriptor() == desc)
+ return e;
}
+ return NULL;
+}
- IndexCatalogEntry* IndexCatalogEntryContainer::find( const string& name ) {
- for ( iterator i = begin(); i != end(); ++i ) {
- IndexCatalogEntry* e = *i;
- if ( e->descriptor()->indexName() == name )
- return e;
- }
- return NULL;
+IndexCatalogEntry* IndexCatalogEntryContainer::find(const IndexDescriptor* desc) {
+ if (desc->_cachedEntry)
+ return desc->_cachedEntry;
+
+ for (iterator i = begin(); i != end(); ++i) {
+ IndexCatalogEntry* e = *i;
+ if (e->descriptor() == desc)
+ return e;
}
+ return NULL;
+}
- IndexCatalogEntry* IndexCatalogEntryContainer::release( const IndexDescriptor* desc ) {
- for ( std::vector<IndexCatalogEntry*>::iterator i = _entries.mutableVector().begin();
- i != _entries.mutableVector().end();
- ++i ) {
- IndexCatalogEntry* e = *i;
- if ( e->descriptor() != desc )
- continue;
- _entries.mutableVector().erase( i );
+IndexCatalogEntry* IndexCatalogEntryContainer::find(const string& name) {
+ for (iterator i = begin(); i != end(); ++i) {
+ IndexCatalogEntry* e = *i;
+ if (e->descriptor()->indexName() == name)
return e;
- }
- return NULL;
}
+ return NULL;
+}
+
+IndexCatalogEntry* IndexCatalogEntryContainer::release(const IndexDescriptor* desc) {
+ for (std::vector<IndexCatalogEntry*>::iterator i = _entries.mutableVector().begin();
+ i != _entries.mutableVector().end();
+ ++i) {
+ IndexCatalogEntry* e = *i;
+ if (e->descriptor() != desc)
+ continue;
+ _entries.mutableVector().erase(i);
+ return e;
+ }
+ return NULL;
+}
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index ad919b2bca5..d7ab07f37ec 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -38,130 +38,155 @@
namespace mongo {
- class CollectionCatalogEntry;
- class CollectionInfoCache;
- class HeadManager;
- class IndexAccessMethod;
- class IndexDescriptor;
- class MatchExpression;
- class OperationContext;
+class CollectionCatalogEntry;
+class CollectionInfoCache;
+class HeadManager;
+class IndexAccessMethod;
+class IndexDescriptor;
+class MatchExpression;
+class OperationContext;
- class IndexCatalogEntry {
- MONGO_DISALLOW_COPYING( IndexCatalogEntry );
- public:
- IndexCatalogEntry( StringData ns,
- CollectionCatalogEntry* collection, // not owned
- IndexDescriptor* descriptor, // ownership passes to me
- CollectionInfoCache* infoCache ); // not owned, optional
+class IndexCatalogEntry {
+ MONGO_DISALLOW_COPYING(IndexCatalogEntry);
- ~IndexCatalogEntry();
+public:
+ IndexCatalogEntry(StringData ns,
+ CollectionCatalogEntry* collection, // not owned
+ IndexDescriptor* descriptor, // ownership passes to me
+ CollectionInfoCache* infoCache); // not owned, optional
- const std::string& ns() const { return _ns; }
+ ~IndexCatalogEntry();
- void init( OperationContext* txn,
- IndexAccessMethod* accessMethod );
+ const std::string& ns() const {
+ return _ns;
+ }
- IndexDescriptor* descriptor() { return _descriptor; }
- const IndexDescriptor* descriptor() const { return _descriptor; }
+ void init(OperationContext* txn, IndexAccessMethod* accessMethod);
- IndexAccessMethod* accessMethod() { return _accessMethod; }
- const IndexAccessMethod* accessMethod() const { return _accessMethod; }
+ IndexDescriptor* descriptor() {
+ return _descriptor;
+ }
+ const IndexDescriptor* descriptor() const {
+ return _descriptor;
+ }
- const Ordering& ordering() const { return _ordering; }
+ IndexAccessMethod* accessMethod() {
+ return _accessMethod;
+ }
+ const IndexAccessMethod* accessMethod() const {
+ return _accessMethod;
+ }
- const MatchExpression* getFilterExpression() const { return _filterExpression.get(); }
+ const Ordering& ordering() const {
+ return _ordering;
+ }
- /// ---------------------
+ const MatchExpression* getFilterExpression() const {
+ return _filterExpression.get();
+ }
- const RecordId& head( OperationContext* txn ) const;
+ /// ---------------------
- void setHead( OperationContext* txn, RecordId newHead );
+ const RecordId& head(OperationContext* txn) const;
- void setIsReady( bool newIsReady );
+ void setHead(OperationContext* txn, RecordId newHead);
- HeadManager* headManager() const { return _headManager; }
+ void setIsReady(bool newIsReady);
- // --
+ HeadManager* headManager() const {
+ return _headManager;
+ }
- bool isMultikey() const;
+ // --
- void setMultikey( OperationContext* txn );
+ bool isMultikey() const;
- // if this ready is ready for queries
- bool isReady( OperationContext* txn ) const;
+ void setMultikey(OperationContext* txn);
- private:
+ // if this ready is ready for queries
+ bool isReady(OperationContext* txn) const;
- class SetMultikeyChange;
- class SetHeadChange;
+private:
+ class SetMultikeyChange;
+ class SetHeadChange;
- bool _catalogIsReady( OperationContext* txn ) const;
- RecordId _catalogHead( OperationContext* txn ) const;
- bool _catalogIsMultikey( OperationContext* txn ) const;
+ bool _catalogIsReady(OperationContext* txn) const;
+ RecordId _catalogHead(OperationContext* txn) const;
+ bool _catalogIsMultikey(OperationContext* txn) const;
- // -----
+ // -----
- std::string _ns;
+ std::string _ns;
- CollectionCatalogEntry* _collection; // not owned here
+ CollectionCatalogEntry* _collection; // not owned here
- IndexDescriptor* _descriptor; // owned here
+ IndexDescriptor* _descriptor; // owned here
- CollectionInfoCache* _infoCache; // not owned here
+ CollectionInfoCache* _infoCache; // not owned here
- IndexAccessMethod* _accessMethod; // owned here
+ IndexAccessMethod* _accessMethod; // owned here
- // Owned here.
- HeadManager* _headManager;
- std::unique_ptr<MatchExpression> _filterExpression;
+ // Owned here.
+ HeadManager* _headManager;
+ std::unique_ptr<MatchExpression> _filterExpression;
- // cached stuff
+ // cached stuff
- Ordering _ordering; // TODO: this might be b-tree specific
- bool _isReady; // cache of NamespaceDetails info
- RecordId _head; // cache of IndexDetails
- bool _isMultikey; // cache of NamespaceDetails info
- };
+ Ordering _ordering; // TODO: this might be b-tree specific
+ bool _isReady; // cache of NamespaceDetails info
+ RecordId _head; // cache of IndexDetails
+ bool _isMultikey; // cache of NamespaceDetails info
+};
- class IndexCatalogEntryContainer {
- public:
+class IndexCatalogEntryContainer {
+public:
+ typedef std::vector<IndexCatalogEntry*>::const_iterator const_iterator;
+ typedef std::vector<IndexCatalogEntry*>::const_iterator iterator;
- typedef std::vector<IndexCatalogEntry*>::const_iterator const_iterator;
- typedef std::vector<IndexCatalogEntry*>::const_iterator iterator;
+ const_iterator begin() const {
+ return _entries.vector().begin();
+ }
+ const_iterator end() const {
+ return _entries.vector().end();
+ }
- const_iterator begin() const { return _entries.vector().begin(); }
- const_iterator end() const { return _entries.vector().end(); }
+ iterator begin() {
+ return _entries.vector().begin();
+ }
+ iterator end() {
+ return _entries.vector().end();
+ }
- iterator begin() { return _entries.vector().begin(); }
- iterator end() { return _entries.vector().end(); }
+ // TODO: these have to be SUPER SUPER FAST
+ // maybe even some pointer trickery is in order
+ const IndexCatalogEntry* find(const IndexDescriptor* desc) const;
+ IndexCatalogEntry* find(const IndexDescriptor* desc);
- // TODO: these have to be SUPER SUPER FAST
- // maybe even some pointer trickery is in order
- const IndexCatalogEntry* find( const IndexDescriptor* desc ) const;
- IndexCatalogEntry* find( const IndexDescriptor* desc );
+ IndexCatalogEntry* find(const std::string& name);
- IndexCatalogEntry* find( const std::string& name );
+ unsigned size() const {
+ return _entries.size();
+ }
+ // -----------------
- unsigned size() const { return _entries.size(); }
- // -----------------
+ /**
+ * Removes from _entries and returns the matching entry or NULL if none matches.
+ */
+ IndexCatalogEntry* release(const IndexDescriptor* desc);
- /**
- * Removes from _entries and returns the matching entry or NULL if none matches.
- */
- IndexCatalogEntry* release( const IndexDescriptor* desc );
+ bool remove(const IndexDescriptor* desc) {
+ IndexCatalogEntry* entry = release(desc);
+ delete entry;
+ return entry;
+ }
- bool remove( const IndexDescriptor* desc ) {
- IndexCatalogEntry* entry = release(desc);
- delete entry;
- return entry;
- }
-
- // pass ownership to EntryContainer
- void add( IndexCatalogEntry* entry ) { _entries.mutableVector().push_back( entry ); }
-
- private:
- OwnedPointerVector<IndexCatalogEntry> _entries;
- };
+ // pass ownership to EntryContainer
+ void add(IndexCatalogEntry* entry) {
+ _entries.mutableVector().push_back(entry);
+ }
+private:
+ OwnedPointerVector<IndexCatalogEntry> _entries;
+};
}
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index b624c0151f0..c642fcb83a5 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -54,334 +54,318 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::endl;
-
- /**
- * On rollback sets MultiIndexBlock::_needToCleanup to true.
- */
- class MultiIndexBlock::SetNeedToCleanupOnRollback : public RecoveryUnit::Change {
- public:
- explicit SetNeedToCleanupOnRollback(MultiIndexBlock* indexer) : _indexer(indexer) {}
-
- virtual void commit() {}
- virtual void rollback() { _indexer->_needToCleanup = true; }
-
- private:
- MultiIndexBlock* const _indexer;
- };
-
- /**
- * On rollback in init(), cleans up _indexes so that ~MultiIndexBlock doesn't try to clean
- * up _indexes manually (since the changes were already rolled back).
- * Due to this, it is thus legal to call init() again after it fails.
- */
- class MultiIndexBlock::CleanupIndexesVectorOnRollback : public RecoveryUnit::Change {
- public:
- explicit CleanupIndexesVectorOnRollback(MultiIndexBlock* indexer) : _indexer(indexer) {}
-
- virtual void commit() {}
- virtual void rollback() { _indexer->_indexes.clear(); }
-
- private:
- MultiIndexBlock* const _indexer;
- };
-
- MultiIndexBlock::MultiIndexBlock(OperationContext* txn, Collection* collection)
- : _collection(collection),
- _txn(txn),
- _buildInBackground(false),
- _allowInterruption(false),
- _ignoreUnique(false),
- _needToCleanup(true) {
+using std::unique_ptr;
+using std::string;
+using std::endl;
+
+/**
+ * On rollback sets MultiIndexBlock::_needToCleanup to true.
+ */
+class MultiIndexBlock::SetNeedToCleanupOnRollback : public RecoveryUnit::Change {
+public:
+ explicit SetNeedToCleanupOnRollback(MultiIndexBlock* indexer) : _indexer(indexer) {}
+
+ virtual void commit() {}
+ virtual void rollback() {
+ _indexer->_needToCleanup = true;
}
- MultiIndexBlock::~MultiIndexBlock() {
- if (!_needToCleanup || _indexes.empty())
- return;
- while (true) {
- try {
- WriteUnitOfWork wunit(_txn);
- // This cleans up all index builds.
- // Because that may need to write, it is done inside
- // of a WUOW. Nothing inside this block can fail, and it is made fatal if it does.
- for (size_t i = 0; i < _indexes.size(); i++) {
- _indexes[i].block->fail();
- }
- wunit.commit();
- return;
- }
- catch (const WriteConflictException& e) {
- continue;
- }
- catch (const std::exception& e) {
- error() << "Caught exception while cleaning up partially built indexes: "
- << e.what();
- }
- catch (...) {
- error() << "Caught unknown exception while cleaning up partially built indexes.";
- }
- fassertFailed(18644);
- }
+private:
+ MultiIndexBlock* const _indexer;
+};
+
+/**
+ * On rollback in init(), cleans up _indexes so that ~MultiIndexBlock doesn't try to clean
+ * up _indexes manually (since the changes were already rolled back).
+ * Due to this, it is thus legal to call init() again after it fails.
+ */
+class MultiIndexBlock::CleanupIndexesVectorOnRollback : public RecoveryUnit::Change {
+public:
+ explicit CleanupIndexesVectorOnRollback(MultiIndexBlock* indexer) : _indexer(indexer) {}
+
+ virtual void commit() {}
+ virtual void rollback() {
+ _indexer->_indexes.clear();
}
- void MultiIndexBlock::removeExistingIndexes(std::vector<BSONObj>* specs) const {
- for (size_t i = 0; i < specs->size(); i++) {
- Status status =
- _collection->getIndexCatalog()->prepareSpecForCreate(_txn, (*specs)[i]).getStatus();
- if (status.code() == ErrorCodes::IndexAlreadyExists) {
- specs->erase(specs->begin() + i);
- i--;
+private:
+ MultiIndexBlock* const _indexer;
+};
+
+MultiIndexBlock::MultiIndexBlock(OperationContext* txn, Collection* collection)
+ : _collection(collection),
+ _txn(txn),
+ _buildInBackground(false),
+ _allowInterruption(false),
+ _ignoreUnique(false),
+ _needToCleanup(true) {}
+
+MultiIndexBlock::~MultiIndexBlock() {
+ if (!_needToCleanup || _indexes.empty())
+ return;
+ while (true) {
+ try {
+ WriteUnitOfWork wunit(_txn);
+ // This cleans up all index builds.
+ // Because that may need to write, it is done inside
+ // of a WUOW. Nothing inside this block can fail, and it is made fatal if it does.
+ for (size_t i = 0; i < _indexes.size(); i++) {
+ _indexes[i].block->fail();
}
- // intentionally ignoring other error codes
+ wunit.commit();
+ return;
+ } catch (const WriteConflictException& e) {
+ continue;
+ } catch (const std::exception& e) {
+ error() << "Caught exception while cleaning up partially built indexes: " << e.what();
+ } catch (...) {
+ error() << "Caught unknown exception while cleaning up partially built indexes.";
}
+ fassertFailed(18644);
}
+}
+
+void MultiIndexBlock::removeExistingIndexes(std::vector<BSONObj>* specs) const {
+ for (size_t i = 0; i < specs->size(); i++) {
+ Status status =
+ _collection->getIndexCatalog()->prepareSpecForCreate(_txn, (*specs)[i]).getStatus();
+ if (status.code() == ErrorCodes::IndexAlreadyExists) {
+ specs->erase(specs->begin() + i);
+ i--;
+ }
+ // intentionally ignoring other error codes
+ }
+}
- Status MultiIndexBlock::init(const std::vector<BSONObj>& indexSpecs) {
- WriteUnitOfWork wunit(_txn);
-
- invariant(_indexes.empty());
- _txn->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this));
+Status MultiIndexBlock::init(const std::vector<BSONObj>& indexSpecs) {
+ WriteUnitOfWork wunit(_txn);
- const string& ns = _collection->ns().ns();
+ invariant(_indexes.empty());
+ _txn->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this));
- Status status = _collection->getIndexCatalog()->checkUnfinished();
- if ( !status.isOK() )
- return status;
+ const string& ns = _collection->ns().ns();
- for ( size_t i = 0; i < indexSpecs.size(); i++ ) {
- BSONObj info = indexSpecs[i];
+ Status status = _collection->getIndexCatalog()->checkUnfinished();
+ if (!status.isOK())
+ return status;
- string pluginName = IndexNames::findPluginName( info["key"].Obj() );
- if ( pluginName.size() ) {
- Status s =
- _collection->getIndexCatalog()->_upgradeDatabaseMinorVersionIfNeeded(_txn, pluginName);
- if ( !s.isOK() )
- return s;
- }
+ for (size_t i = 0; i < indexSpecs.size(); i++) {
+ BSONObj info = indexSpecs[i];
- // Any foreground indexes make all indexes be built in the foreground.
- _buildInBackground = (_buildInBackground && info["background"].trueValue());
+ string pluginName = IndexNames::findPluginName(info["key"].Obj());
+ if (pluginName.size()) {
+ Status s = _collection->getIndexCatalog()->_upgradeDatabaseMinorVersionIfNeeded(
+ _txn, pluginName);
+ if (!s.isOK())
+ return s;
}
- for ( size_t i = 0; i < indexSpecs.size(); i++ ) {
- BSONObj info = indexSpecs[i];
- StatusWith<BSONObj> statusWithInfo =
- _collection->getIndexCatalog()->prepareSpecForCreate( _txn, info );
- Status status = statusWithInfo.getStatus();
- if ( !status.isOK() )
- return status;
- info = statusWithInfo.getValue();
-
- IndexToBuild index;
- index.block.reset(new IndexCatalog::IndexBuildBlock(_txn, _collection, info));
- status = index.block->init();
- if ( !status.isOK() )
- return status;
-
- index.real = index.block->getEntry()->accessMethod();
- status = index.real->initializeAsEmpty(_txn);
- if ( !status.isOK() )
- return status;
-
- if (!_buildInBackground) {
- // Bulk build process requires foreground building as it assumes nothing is changing
- // under it.
- index.bulk = index.real->initiateBulk();
- }
+ // Any foreground indexes make all indexes be built in the foreground.
+ _buildInBackground = (_buildInBackground && info["background"].trueValue());
+ }
+
+ for (size_t i = 0; i < indexSpecs.size(); i++) {
+ BSONObj info = indexSpecs[i];
+ StatusWith<BSONObj> statusWithInfo =
+ _collection->getIndexCatalog()->prepareSpecForCreate(_txn, info);
+ Status status = statusWithInfo.getStatus();
+ if (!status.isOK())
+ return status;
+ info = statusWithInfo.getValue();
- const IndexDescriptor* descriptor = index.block->getEntry()->descriptor();
+ IndexToBuild index;
+ index.block.reset(new IndexCatalog::IndexBuildBlock(_txn, _collection, info));
+ status = index.block->init();
+ if (!status.isOK())
+ return status;
- index.options.logIfError = false; // logging happens elsewhere if needed.
- index.options.dupsAllowed = !descriptor->unique()
- || _ignoreUnique
- || repl::getGlobalReplicationCoordinator()
- ->shouldIgnoreUniqueIndex(descriptor);
+ index.real = index.block->getEntry()->accessMethod();
+ status = index.real->initializeAsEmpty(_txn);
+ if (!status.isOK())
+ return status;
- log() << "build index on: " << ns << " properties: " << descriptor->toString();
- if (index.bulk)
- log() << "\t building index using bulk method";
+ if (!_buildInBackground) {
+ // Bulk build process requires foreground building as it assumes nothing is changing
+ // under it.
+ index.bulk = index.real->initiateBulk();
+ }
- index.filterExpression = index.block->getEntry()->getFilterExpression();
+ const IndexDescriptor* descriptor = index.block->getEntry()->descriptor();
- // TODO SERVER-14888 Suppress this in cases we don't want to audit.
- audit::logCreateIndex(_txn->getClient(), &info, descriptor->indexName(), ns);
+ index.options.logIfError = false; // logging happens elsewhere if needed.
+ index.options.dupsAllowed = !descriptor->unique() || _ignoreUnique ||
+ repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
- _indexes.push_back(std::move(index));
- }
+ log() << "build index on: " << ns << " properties: " << descriptor->toString();
+ if (index.bulk)
+ log() << "\t building index using bulk method";
- // this is so that operations examining the list of indexes know there are more keys to look
- // at when doing things like in place updates, etc...
- _collection->infoCache()->addedIndex(_txn);
+ index.filterExpression = index.block->getEntry()->getFilterExpression();
- if (_buildInBackground)
- _backgroundOperation.reset(new BackgroundOperation(ns));
+ // TODO SERVER-14888 Suppress this in cases we don't want to audit.
+ audit::logCreateIndex(_txn->getClient(), &info, descriptor->indexName(), ns);
- wunit.commit();
- return Status::OK();
+ _indexes.push_back(std::move(index));
}
- Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsOut) {
- const char* curopMessage = _buildInBackground ? "Index Build (background)" : "Index Build";
- const auto numRecords = _collection->numRecords(_txn);
- stdx::unique_lock<Client> lk(*_txn->getClient());
- ProgressMeterHolder progress(*_txn->setMessage_inlock(curopMessage,
- curopMessage,
- numRecords));
- lk.unlock();
-
- Timer t;
-
- unsigned long long n = 0;
-
- unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(_txn,
- _collection->ns().ns(),
- _collection));
- if (_buildInBackground) {
- invariant(_allowInterruption);
- exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
- }
- else {
- exec->setYieldPolicy(PlanExecutor::WRITE_CONFLICT_RETRY_ONLY);
- }
+ // this is so that operations examining the list of indexes know there are more keys to look
+ // at when doing things like in place updates, etc...
+ _collection->infoCache()->addedIndex(_txn);
- Snapshotted<BSONObj> objToIndex;
- RecordId loc;
- PlanExecutor::ExecState state;
- int retries = 0; // non-zero when retrying our last document.
- while (retries
- || (PlanExecutor::ADVANCED == (state = exec->getNextSnapshotted(&objToIndex,
- &loc)))) {
- try {
- if (_allowInterruption)
- _txn->checkForInterrupt();
-
- // Make sure we are working with the latest version of the document.
- if (objToIndex.snapshotId() != _txn->recoveryUnit()->getSnapshotId()
- && !_collection->findDoc(_txn, loc, &objToIndex)) {
- // doc was deleted so don't index it.
- retries = 0;
- continue;
- }
-
- // Done before insert so we can retry document if it WCEs.
- progress->setTotalWhileRunning( _collection->numRecords(_txn) );
-
- WriteUnitOfWork wunit(_txn);
- Status ret = insert(objToIndex.value(), loc);
- if (ret.isOK()) {
- wunit.commit();
- }
- else if (dupsOut && ret.code() == ErrorCodes::DuplicateKey) {
- // If dupsOut is non-null, we should only fail the specific insert that
- // led to a DuplicateKey rather than the whole index build.
- dupsOut->insert(loc);
- }
- else {
- // Fail the index build hard.
- return ret;
- }
-
- // Go to the next document
- progress->hit();
- n++;
- retries = 0;
- }
- catch (const WriteConflictException& wce) {
- CurOp::get(_txn)->debug().writeConflicts++;
- retries++; // logAndBackoff expects this to be 1 on first call.
- wce.logAndBackoff(retries, "index creation", _collection->ns().ns());
-
- // Can't use WRITE_CONFLICT_RETRY_LOOP macros since we need to save/restore exec
- // around call to abandonSnapshot.
- exec->saveState();
- _txn->recoveryUnit()->abandonSnapshot();
- exec->restoreState(_txn); // Handles any WCEs internally.
- }
- }
+ if (_buildInBackground)
+ _backgroundOperation.reset(new BackgroundOperation(ns));
- if (state != PlanExecutor::IS_EOF) {
- // If the plan executor was killed, this means the DB/collection was dropped and so it
- // is not safe to cleanup the in-progress indexes.
- if (state == PlanExecutor::DEAD) {
- abortWithoutCleanup();
- }
+ wunit.commit();
+ return Status::OK();
+}
- uasserted(28550,
- "Unable to complete index build as the collection is no longer readable");
- }
-
- progress->finished();
+Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsOut) {
+ const char* curopMessage = _buildInBackground ? "Index Build (background)" : "Index Build";
+ const auto numRecords = _collection->numRecords(_txn);
+ stdx::unique_lock<Client> lk(*_txn->getClient());
+ ProgressMeterHolder progress(*_txn->setMessage_inlock(curopMessage, curopMessage, numRecords));
+ lk.unlock();
- Status ret = doneInserting(dupsOut);
- if (!ret.isOK())
- return ret;
+ Timer t;
- log() << "build index done. scanned " << n << " total records. "
- << t.seconds() << " secs" << endl;
+ unsigned long long n = 0;
- return Status::OK();
+ unique_ptr<PlanExecutor> exec(
+ InternalPlanner::collectionScan(_txn, _collection->ns().ns(), _collection));
+ if (_buildInBackground) {
+ invariant(_allowInterruption);
+ exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
+ } else {
+ exec->setYieldPolicy(PlanExecutor::WRITE_CONFLICT_RETRY_ONLY);
}
- Status MultiIndexBlock::insert(const BSONObj& doc, const RecordId& loc) {
- for ( size_t i = 0; i < _indexes.size(); i++ ) {
-
- if ( _indexes[i].filterExpression &&
- !_indexes[i].filterExpression->matchesBSON(doc) ) {
+ Snapshotted<BSONObj> objToIndex;
+ RecordId loc;
+ PlanExecutor::ExecState state;
+ int retries = 0; // non-zero when retrying our last document.
+ while (retries ||
+ (PlanExecutor::ADVANCED == (state = exec->getNextSnapshotted(&objToIndex, &loc)))) {
+ try {
+ if (_allowInterruption)
+ _txn->checkForInterrupt();
+
+ // Make sure we are working with the latest version of the document.
+ if (objToIndex.snapshotId() != _txn->recoveryUnit()->getSnapshotId() &&
+ !_collection->findDoc(_txn, loc, &objToIndex)) {
+ // doc was deleted so don't index it.
+ retries = 0;
continue;
}
- int64_t unused;
- Status idxStatus(ErrorCodes::InternalError, "");
- if (_indexes[i].bulk) {
- idxStatus = _indexes[i].bulk->insert(_txn, doc, loc, _indexes[i].options, &unused);
- }
- else {
- idxStatus = _indexes[i].real->insert(_txn, doc, loc, _indexes[i].options, &unused);
+ // Done before insert so we can retry document if it WCEs.
+ progress->setTotalWhileRunning(_collection->numRecords(_txn));
+
+ WriteUnitOfWork wunit(_txn);
+ Status ret = insert(objToIndex.value(), loc);
+ if (ret.isOK()) {
+ wunit.commit();
+ } else if (dupsOut && ret.code() == ErrorCodes::DuplicateKey) {
+ // If dupsOut is non-null, we should only fail the specific insert that
+ // led to a DuplicateKey rather than the whole index build.
+ dupsOut->insert(loc);
+ } else {
+ // Fail the index build hard.
+ return ret;
}
- if ( !idxStatus.isOK() )
- return idxStatus;
+ // Go to the next document
+ progress->hit();
+ n++;
+ retries = 0;
+ } catch (const WriteConflictException& wce) {
+ CurOp::get(_txn)->debug().writeConflicts++;
+ retries++; // logAndBackoff expects this to be 1 on first call.
+ wce.logAndBackoff(retries, "index creation", _collection->ns().ns());
+
+ // Can't use WRITE_CONFLICT_RETRY_LOOP macros since we need to save/restore exec
+ // around call to abandonSnapshot.
+ exec->saveState();
+ _txn->recoveryUnit()->abandonSnapshot();
+ exec->restoreState(_txn); // Handles any WCEs internally.
}
- return Status::OK();
}
- Status MultiIndexBlock::doneInserting(std::set<RecordId>* dupsOut) {
- for ( size_t i = 0; i < _indexes.size(); i++ ) {
- if ( _indexes[i].bulk == NULL )
- continue;
- LOG(1) << "\t bulk commit starting for index: "
- << _indexes[i].block->getEntry()->descriptor()->indexName();
- Status status = _indexes[i].real->commitBulk( _txn,
- std::move(_indexes[i].bulk),
- _allowInterruption,
- _indexes[i].options.dupsAllowed,
- dupsOut );
- if ( !status.isOK() ) {
- return status;
- }
+ if (state != PlanExecutor::IS_EOF) {
+ // If the plan executor was killed, this means the DB/collection was dropped and so it
+ // is not safe to cleanup the in-progress indexes.
+ if (state == PlanExecutor::DEAD) {
+ abortWithoutCleanup();
}
- return Status::OK();
+ uasserted(28550, "Unable to complete index build as the collection is no longer readable");
}
- void MultiIndexBlock::abortWithoutCleanup() {
- _indexes.clear();
- _needToCleanup = false;
- }
+ progress->finished();
- void MultiIndexBlock::commit() {
- for ( size_t i = 0; i < _indexes.size(); i++ ) {
- _indexes[i].block->success();
+ Status ret = doneInserting(dupsOut);
+ if (!ret.isOK())
+ return ret;
+
+ log() << "build index done. scanned " << n << " total records. " << t.seconds() << " secs"
+ << endl;
+
+ return Status::OK();
+}
+
+Status MultiIndexBlock::insert(const BSONObj& doc, const RecordId& loc) {
+ for (size_t i = 0; i < _indexes.size(); i++) {
+ if (_indexes[i].filterExpression && !_indexes[i].filterExpression->matchesBSON(doc)) {
+ continue;
+ }
+
+ int64_t unused;
+ Status idxStatus(ErrorCodes::InternalError, "");
+ if (_indexes[i].bulk) {
+ idxStatus = _indexes[i].bulk->insert(_txn, doc, loc, _indexes[i].options, &unused);
+ } else {
+ idxStatus = _indexes[i].real->insert(_txn, doc, loc, _indexes[i].options, &unused);
+ }
+
+ if (!idxStatus.isOK())
+ return idxStatus;
+ }
+ return Status::OK();
+}
+
+Status MultiIndexBlock::doneInserting(std::set<RecordId>* dupsOut) {
+ for (size_t i = 0; i < _indexes.size(); i++) {
+ if (_indexes[i].bulk == NULL)
+ continue;
+ LOG(1) << "\t bulk commit starting for index: "
+ << _indexes[i].block->getEntry()->descriptor()->indexName();
+ Status status = _indexes[i].real->commitBulk(_txn,
+ std::move(_indexes[i].bulk),
+ _allowInterruption,
+ _indexes[i].options.dupsAllowed,
+ dupsOut);
+ if (!status.isOK()) {
+ return status;
}
+ }
+
+ return Status::OK();
+}
- // this one is so operations examining the list of indexes know that the index is finished
- _collection->infoCache()->addedIndex(_txn);
+void MultiIndexBlock::abortWithoutCleanup() {
+ _indexes.clear();
+ _needToCleanup = false;
+}
- _txn->recoveryUnit()->registerChange(new SetNeedToCleanupOnRollback(this));
- _needToCleanup = false;
+void MultiIndexBlock::commit() {
+ for (size_t i = 0; i < _indexes.size(); i++) {
+ _indexes[i].block->success();
}
-} // namespace mongo
+ // this one is so operations examining the list of indexes know that the index is finished
+ _collection->infoCache()->addedIndex(_txn);
+
+ _txn->recoveryUnit()->registerChange(new SetNeedToCleanupOnRollback(this));
+ _needToCleanup = false;
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/index_create.h b/src/mongo/db/catalog/index_create.h
index f4be38710cd..d3dccb20b46 100644
--- a/src/mongo/db/catalog/index_create.h
+++ b/src/mongo/db/catalog/index_create.h
@@ -42,189 +42,197 @@
namespace mongo {
- class BackgroundOperation;
- class BSONObj;
- class Collection;
- class OperationContext;
+class BackgroundOperation;
+class BSONObj;
+class Collection;
+class OperationContext;
+
+/**
+ * Builds one or more indexes.
+ *
+ * If any method other than insert() returns a not-ok Status, this MultiIndexBlock should be
+ * considered failed and must be destroyed.
+ *
+ * If a MultiIndexBlock is destroyed before commit() or if commit() is rolled back, it will
+ * clean up all traces of the indexes being constructed. MultiIndexBlocks should not be
+ * destructed from inside of a WriteUnitOfWork as any cleanup needed should never be rolled back
+ * (as it is itself essentially a form of rollback, you don't want to "rollback the rollback").
+ */
+class MultiIndexBlock {
+ MONGO_DISALLOW_COPYING(MultiIndexBlock);
+
+public:
+ /**
+ * Neither pointer is owned.
+ */
+ MultiIndexBlock(OperationContext* txn, Collection* collection);
+ ~MultiIndexBlock();
+
+ /**
+ * By default we ignore the 'background' flag in specs when building an index. If this is
+ * called before init(), we will build the indexes in the background as long as *all* specs
+ * call for background indexing. If any spec calls for foreground indexing all indexes will
+ * be built in the foreground, as there is no concurrency benefit to building a subset of
+ * indexes in the background, but there is a performance benefit to building all in the
+ * foreground.
+ */
+ void allowBackgroundBuilding() {
+ _buildInBackground = true;
+ }
/**
- * Builds one or more indexes.
+ * Call this before init() to allow the index build to be interrupted.
+ * This only affects builds using the insertAllDocumentsInCollection helper.
+ */
+ void allowInterruption() {
+ _allowInterruption = true;
+ }
+
+ /**
+ * By default we enforce the 'unique' flag in specs when building an index by failing.
+ * If this is called before init(), we will ignore unique violations. This has no effect if
+ * no specs are unique.
*
- * If any method other than insert() returns a not-ok Status, this MultiIndexBlock should be
- * considered failed and must be destroyed.
+ * If this is called, any dupsOut sets passed in will never be filled.
+ */
+ void ignoreUniqueConstraint() {
+ _ignoreUnique = true;
+ }
+
+ /**
+ * Removes pre-existing indexes from 'specs'. If this isn't done, init() may fail with
+ * IndexAlreadyExists.
+ */
+ void removeExistingIndexes(std::vector<BSONObj>* specs) const;
+
+ /**
+ * Prepares the index(es) for building.
*
- * If a MultiIndexBlock is destroyed before commit() or if commit() is rolled back, it will
- * clean up all traces of the indexes being constructed. MultiIndexBlocks should not be
- * destructed from inside of a WriteUnitOfWork as any cleanup needed should never be rolled back
- * (as it is itself essentially a form of rollback, you don't want to "rollback the rollback").
+ * Does not need to be called inside of a WriteUnitOfWork (but can be due to nesting).
+ *
+ * Requires holding an exclusive database lock.
*/
- class MultiIndexBlock {
- MONGO_DISALLOW_COPYING( MultiIndexBlock );
- public:
- /**
- * Neither pointer is owned.
- */
- MultiIndexBlock(OperationContext* txn, Collection* collection);
- ~MultiIndexBlock();
-
- /**
- * By default we ignore the 'background' flag in specs when building an index. If this is
- * called before init(), we will build the indexes in the background as long as *all* specs
- * call for background indexing. If any spec calls for foreground indexing all indexes will
- * be built in the foreground, as there is no concurrency benefit to building a subset of
- * indexes in the background, but there is a performance benefit to building all in the
- * foreground.
- */
- void allowBackgroundBuilding() { _buildInBackground = true; }
-
- /**
- * Call this before init() to allow the index build to be interrupted.
- * This only affects builds using the insertAllDocumentsInCollection helper.
- */
- void allowInterruption() { _allowInterruption = true; }
-
- /**
- * By default we enforce the 'unique' flag in specs when building an index by failing.
- * If this is called before init(), we will ignore unique violations. This has no effect if
- * no specs are unique.
- *
- * If this is called, any dupsOut sets passed in will never be filled.
- */
- void ignoreUniqueConstraint() { _ignoreUnique = true; }
-
- /**
- * Removes pre-existing indexes from 'specs'. If this isn't done, init() may fail with
- * IndexAlreadyExists.
- */
- void removeExistingIndexes(std::vector<BSONObj>* specs) const;
-
- /**
- * Prepares the index(es) for building.
- *
- * Does not need to be called inside of a WriteUnitOfWork (but can be due to nesting).
- *
- * Requires holding an exclusive database lock.
- */
- Status init(const std::vector<BSONObj>& specs);
- Status init(const BSONObj& spec) {
- return init(std::vector<BSONObj>(1, spec));
- }
+ Status init(const std::vector<BSONObj>& specs);
+ Status init(const BSONObj& spec) {
+ return init(std::vector<BSONObj>(1, spec));
+ }
+
+ /**
+ * Inserts all documents in the Collection into the indexes and logs with timing info.
+ *
+ * This is a simplified replacement for insert and doneInserting. Do not call this if you
+ * are calling either of them.
+ *
+ * If dupsOut is passed as non-NULL, violators of uniqueness constraints will be added to
+ * the set rather than failing the build. Documents added to this set are not indexed, so
+ * callers MUST either fail this index build or delete the documents from the collection.
+ *
+ * Can throw an exception if interrupted.
+ *
+ * Should not be called inside of a WriteUnitOfWork.
+ */
+ Status insertAllDocumentsInCollection(std::set<RecordId>* dupsOut = NULL);
- /**
- * Inserts all documents in the Collection into the indexes and logs with timing info.
- *
- * This is a simplified replacement for insert and doneInserting. Do not call this if you
- * are calling either of them.
- *
- * If dupsOut is passed as non-NULL, violators of uniqueness constraints will be added to
- * the set rather than failing the build. Documents added to this set are not indexed, so
- * callers MUST either fail this index build or delete the documents from the collection.
- *
- * Can throw an exception if interrupted.
- *
- * Should not be called inside of a WriteUnitOfWork.
- */
- Status insertAllDocumentsInCollection(std::set<RecordId>* dupsOut = NULL);
-
- /**
- * Call this after init() for each document in the collection.
- *
- * Do not call if you called insertAllDocumentsInCollection();
- *
- * Should be called inside of a WriteUnitOfWork.
- */
- Status insert(const BSONObj& wholeDocument, const RecordId& loc);
-
- /**
- * Call this after the last insert(). This gives the index builder a chance to do any
- * long-running operations in separate units of work from commit().
- *
- * Do not call if you called insertAllDocumentsInCollection();
- *
- * If dupsOut is passed as non-NULL, violators of uniqueness constraints will be added to
- * the set. Documents added to this set are not indexed, so callers MUST either fail this
- * index build or delete the documents from the collection.
- *
- * Should not be called inside of a WriteUnitOfWork.
- */
- Status doneInserting(std::set<RecordId>* dupsOut = NULL);
-
- /**
- * Marks the index ready for use. Should only be called as the last method after
- * doneInserting() or insertAllDocumentsInCollection() return success.
- *
- * Should be called inside of a WriteUnitOfWork. If the index building is to be logOp'd,
- * logOp() should be called from the same unit of work as commit().
- *
- * Requires holding an exclusive database lock.
- */
- void commit();
-
- /**
- * May be called at any time after construction but before a successful commit(). Suppresses
- * the default behavior on destruction of removing all traces of uncommitted index builds.
- *
- * The most common use of this is if the indexes were already dropped via some other
- * mechanism such as the whole collection being dropped. In that case, it would be invalid
- * to try to remove the indexes again. Also, replication uses this to ensure that indexes
- * that are being built on shutdown are resumed on startup.
- *
- * Do not use this unless you are really sure you need to.
- *
- * Does not matter whether it is called inside of a WriteUnitOfWork. Will not be rolled
- * back.
- */
- void abortWithoutCleanup();
-
- bool getBuildInBackground() const { return _buildInBackground; }
-
- private:
- class SetNeedToCleanupOnRollback;
- class CleanupIndexesVectorOnRollback;
-
- struct IndexToBuild {
-#if defined(_MSC_VER) && _MSC_VER < 1900 // MVSC++ <= 2013 can't generate default move operations
- IndexToBuild() = default;
- IndexToBuild(IndexToBuild&& other)
- : block(std::move(other.block))
- , real(std::move(other.real))
- , bulk(std::move(other.bulk))
- , options(std::move(other.options))
- , filterExpression(std::move(other.filterExpression))
- {}
-
- IndexToBuild& operator= (IndexToBuild&& other) {
- block = std::move(other.block);
- real = std::move(other.real);
- filterExpression = std::move(other.filterExpression);
- bulk = std::move(other.bulk);
- options = std::move(other.options);
- return *this;
- }
+ /**
+ * Call this after init() for each document in the collection.
+ *
+ * Do not call if you called insertAllDocumentsInCollection();
+ *
+ * Should be called inside of a WriteUnitOfWork.
+ */
+ Status insert(const BSONObj& wholeDocument, const RecordId& loc);
+
+ /**
+ * Call this after the last insert(). This gives the index builder a chance to do any
+ * long-running operations in separate units of work from commit().
+ *
+ * Do not call if you called insertAllDocumentsInCollection();
+ *
+ * If dupsOut is passed as non-NULL, violators of uniqueness constraints will be added to
+ * the set. Documents added to this set are not indexed, so callers MUST either fail this
+ * index build or delete the documents from the collection.
+ *
+ * Should not be called inside of a WriteUnitOfWork.
+ */
+ Status doneInserting(std::set<RecordId>* dupsOut = NULL);
+
+ /**
+ * Marks the index ready for use. Should only be called as the last method after
+ * doneInserting() or insertAllDocumentsInCollection() return success.
+ *
+ * Should be called inside of a WriteUnitOfWork. If the index building is to be logOp'd,
+ * logOp() should be called from the same unit of work as commit().
+ *
+ * Requires holding an exclusive database lock.
+ */
+ void commit();
+
+ /**
+ * May be called at any time after construction but before a successful commit(). Suppresses
+ * the default behavior on destruction of removing all traces of uncommitted index builds.
+ *
+ * The most common use of this is if the indexes were already dropped via some other
+ * mechanism such as the whole collection being dropped. In that case, it would be invalid
+ * to try to remove the indexes again. Also, replication uses this to ensure that indexes
+ * that are being built on shutdown are resumed on startup.
+ *
+ * Do not use this unless you are really sure you need to.
+ *
+ * Does not matter whether it is called inside of a WriteUnitOfWork. Will not be rolled
+ * back.
+ */
+ void abortWithoutCleanup();
+
+ bool getBuildInBackground() const {
+ return _buildInBackground;
+ }
+
+private:
+ class SetNeedToCleanupOnRollback;
+ class CleanupIndexesVectorOnRollback;
+
+ struct IndexToBuild {
+#if defined(_MSC_VER) && _MSC_VER < 1900 // MVSC++ <= 2013 can't generate default move operations
+ IndexToBuild() = default;
+ IndexToBuild(IndexToBuild&& other)
+ : block(std::move(other.block)),
+ real(std::move(other.real)),
+ bulk(std::move(other.bulk)),
+ options(std::move(other.options)),
+ filterExpression(std::move(other.filterExpression)) {}
+
+ IndexToBuild& operator=(IndexToBuild&& other) {
+ block = std::move(other.block);
+ real = std::move(other.real);
+ filterExpression = std::move(other.filterExpression);
+ bulk = std::move(other.bulk);
+ options = std::move(other.options);
+ return *this;
+ }
#endif
- std::unique_ptr<IndexCatalog::IndexBuildBlock> block;
+ std::unique_ptr<IndexCatalog::IndexBuildBlock> block;
- IndexAccessMethod* real = NULL; // owned elsewhere
- const MatchExpression* filterExpression; // might be NULL, owned elsewhere
- std::unique_ptr<IndexAccessMethod::BulkBuilder> bulk;
+ IndexAccessMethod* real = NULL; // owned elsewhere
+ const MatchExpression* filterExpression; // might be NULL, owned elsewhere
+ std::unique_ptr<IndexAccessMethod::BulkBuilder> bulk;
- InsertDeleteOptions options;
- };
+ InsertDeleteOptions options;
+ };
- std::vector<IndexToBuild> _indexes;
+ std::vector<IndexToBuild> _indexes;
- std::unique_ptr<BackgroundOperation> _backgroundOperation;
+ std::unique_ptr<BackgroundOperation> _backgroundOperation;
- // Pointers not owned here and must outlive 'this'
- Collection* _collection;
- OperationContext* _txn;
+ // Pointers not owned here and must outlive 'this'
+ Collection* _collection;
+ OperationContext* _txn;
- bool _buildInBackground;
- bool _allowInterruption;
- bool _ignoreUnique;
+ bool _buildInBackground;
+ bool _allowInterruption;
+ bool _ignoreUnique;
- bool _needToCleanup;
- };
+ bool _needToCleanup;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index c42c2a1921f..d29c4d93d92 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -35,80 +35,78 @@
namespace mongo {
- using std::string;
+using std::string;
- Status validateKeyPattern(const BSONObj& key) {
- const ErrorCodes::Error code = ErrorCodes::CannotCreateIndex;
+Status validateKeyPattern(const BSONObj& key) {
+ const ErrorCodes::Error code = ErrorCodes::CannotCreateIndex;
- if ( key.objsize() > 2048 )
- return Status(code, "Index key pattern too large.");
+ if (key.objsize() > 2048)
+ return Status(code, "Index key pattern too large.");
- if ( key.isEmpty() )
- return Status(code, "Index keys cannot be empty.");
+ if (key.isEmpty())
+ return Status(code, "Index keys cannot be empty.");
- string pluginName = IndexNames::findPluginName( key );
- if ( pluginName.size() ) {
- if ( !IndexNames::isKnownName( pluginName ) )
- return Status(code,
- mongoutils::str::stream() << "Unknown index plugin '"
- << pluginName << '\'');
- }
+ string pluginName = IndexNames::findPluginName(key);
+ if (pluginName.size()) {
+ if (!IndexNames::isKnownName(pluginName))
+ return Status(
+ code, mongoutils::str::stream() << "Unknown index plugin '" << pluginName << '\'');
+ }
- BSONObjIterator it( key );
- while ( it.more() ) {
- BSONElement keyElement = it.next();
+ BSONObjIterator it(key);
+ while (it.more()) {
+ BSONElement keyElement = it.next();
- if( keyElement.type() == Object || keyElement.type() == Array )
- return Status(code, "Index keys cannot be Objects or Arrays.");
+ if (keyElement.type() == Object || keyElement.type() == Array)
+ return Status(code, "Index keys cannot be Objects or Arrays.");
- if ( keyElement.type() == String && pluginName != keyElement.str() ) {
- return Status(code, "Can't use more than one index plugin for a single index.");
- }
+ if (keyElement.type() == String && pluginName != keyElement.str()) {
+ return Status(code, "Can't use more than one index plugin for a single index.");
+ }
- // Ensure that the fields on which we are building the index are valid: a field must not
- // begin with a '$' unless it is part of a DBRef or text index, and a field path cannot
- // contain an empty field. If a field cannot be created or updated, it should not be
- // indexable.
+ // Ensure that the fields on which we are building the index are valid: a field must not
+ // begin with a '$' unless it is part of a DBRef or text index, and a field path cannot
+ // contain an empty field. If a field cannot be created or updated, it should not be
+ // indexable.
- FieldRef keyField( keyElement.fieldName() );
+ FieldRef keyField(keyElement.fieldName());
- const size_t numParts = keyField.numParts();
- if ( numParts == 0 ) {
- return Status(code, "Index keys cannot be an empty field.");
- }
+ const size_t numParts = keyField.numParts();
+ if (numParts == 0) {
+ return Status(code, "Index keys cannot be an empty field.");
+ }
- // "$**" is acceptable for a text index.
- if ( mongoutils::str::equals( keyElement.fieldName(), "$**" ) &&
- keyElement.valuestrsafe() == IndexNames::TEXT )
- continue;
+ // "$**" is acceptable for a text index.
+ if (mongoutils::str::equals(keyElement.fieldName(), "$**") &&
+ keyElement.valuestrsafe() == IndexNames::TEXT)
+ continue;
- for ( size_t i = 0; i != numParts; ++i ) {
- const StringData part = keyField.getPart(i);
+ for (size_t i = 0; i != numParts; ++i) {
+ const StringData part = keyField.getPart(i);
- // Check if the index key path contains an empty field.
- if ( part.empty() ) {
- return Status(code, "Index keys cannot contain an empty field.");
- }
+ // Check if the index key path contains an empty field.
+ if (part.empty()) {
+ return Status(code, "Index keys cannot contain an empty field.");
+ }
- if ( part[0] != '$' )
- continue;
+ if (part[0] != '$')
+ continue;
- // Check if the '$'-prefixed field is part of a DBRef: since we don't have the
- // necessary context to validate whether this is a proper DBRef, we allow index
- // creation on '$'-prefixed names that match those used in a DBRef.
- const bool mightBePartOfDbRef = (i != 0) &&
- (part == "$db" ||
- part == "$id" ||
- part == "$ref");
+ // Check if the '$'-prefixed field is part of a DBRef: since we don't have the
+ // necessary context to validate whether this is a proper DBRef, we allow index
+ // creation on '$'-prefixed names that match those used in a DBRef.
+ const bool mightBePartOfDbRef =
+ (i != 0) && (part == "$db" || part == "$id" || part == "$ref");
- if ( !mightBePartOfDbRef ) {
- return Status(code, "Index key contains an illegal field name: "
- "field name starts with '$'.");
- }
+ if (!mightBePartOfDbRef) {
+ return Status(code,
+ "Index key contains an illegal field name: "
+ "field name starts with '$'.");
}
}
-
- return Status::OK();
}
-} // namespace mongo
+
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/index_key_validate.h b/src/mongo/db/catalog/index_key_validate.h
index 4392e7e01a8..b03722ebc0c 100644
--- a/src/mongo/db/catalog/index_key_validate.h
+++ b/src/mongo/db/catalog/index_key_validate.h
@@ -31,10 +31,10 @@
#include "mongo/base/status.h"
namespace mongo {
- class BSONObj;
+class BSONObj;
- /**
- * Checks if the key is valid for building an index.
- */
- Status validateKeyPattern(const BSONObj& key);
-} // namespace mongo
+/**
+ * Checks if the key is valid for building an index.
+ */
+Status validateKeyPattern(const BSONObj& key);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 9c73d968e37..a077a452fb8 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -51,208 +51,200 @@
namespace mongo {
namespace {
- static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
- WriteUnitOfWork wunit(txn);
- if (db->dropCollection(txn, collName).isOK()) {
- // ignoring failure case
- wunit.commit();
- }
+static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
+ WriteUnitOfWork wunit(txn);
+ if (db->dropCollection(txn, collName).isOK()) {
+ // ignoring failure case
+ wunit.commit();
+ }
+}
+} // namespace
+
+Status renameCollection(OperationContext* txn,
+ const NamespaceString& source,
+ const NamespaceString& target,
+ bool dropTarget,
+ bool stayTemp) {
+ DisableDocumentValidation validationDisabler(txn);
+
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite globalWriteLock(txn->lockState());
+ // We stay in source context the whole time. This is mostly to set the CurOp namespace.
+ OldClientContext ctx(txn, source);
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(source);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while renaming collection " << source.ns()
+ << " to " << target.ns());
}
-} // namespace
-
- Status renameCollection(OperationContext* txn,
- const NamespaceString& source,
- const NamespaceString& target,
- bool dropTarget,
- bool stayTemp) {
- DisableDocumentValidation validationDisabler(txn);
-
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
- // We stay in source context the whole time. This is mostly to set the CurOp namespace.
- OldClientContext ctx(txn, source);
-
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(source);
-
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while renaming collection " << source.ns()
- << " to " << target.ns());
- }
- Database* const sourceDB = dbHolder().get(txn, source.db());
- Collection* const sourceColl = sourceDB ? sourceDB->getCollection(source.ns()) : nullptr;
- if (!sourceColl) {
- return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist");
- }
+ Database* const sourceDB = dbHolder().get(txn, source.db());
+ Collection* const sourceColl = sourceDB ? sourceDB->getCollection(source.ns()) : nullptr;
+ if (!sourceColl) {
+ return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist");
+ }
- {
- // Ensure that collection name does not exceed maximum length.
- // Ensure that index names do not push the length over the max.
- // Iterator includes unfinished indexes.
- IndexCatalog::IndexIterator sourceIndIt =
- sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
- int longestIndexNameLength = 0;
- while (sourceIndIt.more()) {
- int thisLength = sourceIndIt.next()->indexName().length();
- if (thisLength > longestIndexNameLength)
- longestIndexNameLength = thisLength;
- }
+ {
+ // Ensure that collection name does not exceed maximum length.
+ // Ensure that index names do not push the length over the max.
+ // Iterator includes unfinished indexes.
+ IndexCatalog::IndexIterator sourceIndIt =
+ sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
+ int longestIndexNameLength = 0;
+ while (sourceIndIt.more()) {
+ int thisLength = sourceIndIt.next()->indexName().length();
+ if (thisLength > longestIndexNameLength)
+ longestIndexNameLength = thisLength;
+ }
- unsigned int longestAllowed =
- std::min(int(NamespaceString::MaxNsCollectionLen),
- int(NamespaceString::MaxNsLen) - 2/*strlen(".$")*/ - longestIndexNameLength);
- if (target.size() > longestAllowed) {
- StringBuilder sb;
- sb << "collection name length of " << target.size()
- << " exceeds maximum length of " << longestAllowed
- << ", allowing for index names";
- return Status(ErrorCodes::InvalidLength, sb.str());
- }
+ unsigned int longestAllowed =
+ std::min(int(NamespaceString::MaxNsCollectionLen),
+ int(NamespaceString::MaxNsLen) - 2 /*strlen(".$")*/ - longestIndexNameLength);
+ if (target.size() > longestAllowed) {
+ StringBuilder sb;
+ sb << "collection name length of " << target.size() << " exceeds maximum length of "
+ << longestAllowed << ", allowing for index names";
+ return Status(ErrorCodes::InvalidLength, sb.str());
}
+ }
- BackgroundOperation::assertNoBgOpInProgForNs(source.ns());
+ BackgroundOperation::assertNoBgOpInProgForNs(source.ns());
- Database* const targetDB = dbHolder().openDb(txn, target.db());
+ Database* const targetDB = dbHolder().openDb(txn, target.db());
- {
- WriteUnitOfWork wunit(txn);
+ {
+ WriteUnitOfWork wunit(txn);
- // Check if the target namespace exists and if dropTarget is true.
- // If target exists and dropTarget is not true, return false.
- if (targetDB->getCollection(target)) {
- if (!dropTarget) {
- printStackTrace();
- return Status(ErrorCodes::NamespaceExists, "target namespace exists");
- }
-
- Status s = targetDB->dropCollection(txn, target.ns());
- if (!s.isOK()) {
- return s;
- }
+ // Check if the target namespace exists and if dropTarget is true.
+ // If target exists and dropTarget is not true, return false.
+ if (targetDB->getCollection(target)) {
+ if (!dropTarget) {
+ printStackTrace();
+ return Status(ErrorCodes::NamespaceExists, "target namespace exists");
}
- // If we are renaming in the same database, just
- // rename the namespace and we're done.
- if (sourceDB == targetDB) {
- Status s = targetDB->renameCollection(txn, source.ns(), target.ns(), stayTemp);
- if (!s.isOK()) {
- return s;
- }
-
- getGlobalServiceContext()->getOpObserver()->onRenameCollection(
- txn,
- NamespaceString(source),
- NamespaceString(target),
- dropTarget,
- stayTemp);
-
- wunit.commit();
- return Status::OK();
+ Status s = targetDB->dropCollection(txn, target.ns());
+ if (!s.isOK()) {
+ return s;
+ }
+ }
+
+ // If we are renaming in the same database, just
+ // rename the namespace and we're done.
+ if (sourceDB == targetDB) {
+ Status s = targetDB->renameCollection(txn, source.ns(), target.ns(), stayTemp);
+ if (!s.isOK()) {
+ return s;
}
+ getGlobalServiceContext()->getOpObserver()->onRenameCollection(
+ txn, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
+
wunit.commit();
+ return Status::OK();
}
- // If we get here, we are renaming across databases, so we must copy all the data and
- // indexes, then remove the source collection.
+ wunit.commit();
+ }
- // Create the target collection. It will be removed if we fail to copy the collection.
- // TODO use a temp collection and unset the temp flag on success.
- Collection* targetColl = nullptr;
- {
- CollectionOptions options = sourceColl->getCatalogEntry()->getCollectionOptions(txn);
+ // If we get here, we are renaming across databases, so we must copy all the data and
+ // indexes, then remove the source collection.
- WriteUnitOfWork wunit(txn);
+ // Create the target collection. It will be removed if we fail to copy the collection.
+ // TODO use a temp collection and unset the temp flag on success.
+ Collection* targetColl = nullptr;
+ {
+ CollectionOptions options = sourceColl->getCatalogEntry()->getCollectionOptions(txn);
- // No logOp necessary because the entire renameCollection command is one logOp.
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- targetColl = targetDB->createCollection(txn, target.ns(), options,
- false); // _id index build with others later.
- txn->setReplicatedWrites(shouldReplicateWrites);
- if (!targetColl) {
- return Status(ErrorCodes::OutOfDiskSpace, "Failed to create target collection.");
- }
+ WriteUnitOfWork wunit(txn);
- wunit.commit();
+ // No logOp necessary because the entire renameCollection command is one logOp.
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ targetColl = targetDB->createCollection(txn,
+ target.ns(),
+ options,
+ false); // _id index build with others later.
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ if (!targetColl) {
+ return Status(ErrorCodes::OutOfDiskSpace, "Failed to create target collection.");
}
- // Dismissed on success
- ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, txn, targetDB, target.ns());
-
- MultiIndexBlock indexer(txn, targetColl);
- indexer.allowInterruption();
-
- // Copy the index descriptions from the source collection, adjusting the ns field.
- {
- std::vector<BSONObj> indexesToCopy;
- IndexCatalog::IndexIterator sourceIndIt =
- sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
- while (sourceIndIt.more()) {
- const BSONObj currIndex = sourceIndIt.next()->infoObj();
-
- // Process the source index.
- BSONObjBuilder newIndex;
- newIndex.append("ns", target);
- newIndex.appendElementsUnique(currIndex);
- indexesToCopy.push_back(newIndex.obj());
- }
- indexer.init(indexesToCopy);
- }
+ wunit.commit();
+ }
- {
- // Copy over all the data from source collection to target collection.
- auto cursor = sourceColl->getCursor(txn);
- while (auto record = cursor->next()) {
- txn->checkForInterrupt();
-
- const auto obj = record->data.releaseToBson();
-
- WriteUnitOfWork wunit(txn);
- // No logOp necessary because the entire renameCollection command is one logOp.
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- Status status =
- targetColl->insertDocument(txn, obj, &indexer, true).getStatus();
- txn->setReplicatedWrites(shouldReplicateWrites);
- if (!status.isOK())
- return status;
- wunit.commit();
- }
+ // Dismissed on success
+ ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, txn, targetDB, target.ns());
+
+ MultiIndexBlock indexer(txn, targetColl);
+ indexer.allowInterruption();
+
+ // Copy the index descriptions from the source collection, adjusting the ns field.
+ {
+ std::vector<BSONObj> indexesToCopy;
+ IndexCatalog::IndexIterator sourceIndIt =
+ sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
+ while (sourceIndIt.more()) {
+ const BSONObj currIndex = sourceIndIt.next()->infoObj();
+
+ // Process the source index.
+ BSONObjBuilder newIndex;
+ newIndex.append("ns", target);
+ newIndex.appendElementsUnique(currIndex);
+ indexesToCopy.push_back(newIndex.obj());
}
+ indexer.init(indexesToCopy);
+ }
- Status status = indexer.doneInserting();
- if (!status.isOK())
- return status;
+ {
+ // Copy over all the data from source collection to target collection.
+ auto cursor = sourceColl->getCursor(txn);
+ while (auto record = cursor->next()) {
+ txn->checkForInterrupt();
- {
- // Getting here means we successfully built the target copy. We now remove the
- // source collection and finalize the rename.
- WriteUnitOfWork wunit(txn);
+ const auto obj = record->data.releaseToBson();
+ WriteUnitOfWork wunit(txn);
+ // No logOp necessary because the entire renameCollection command is one logOp.
bool shouldReplicateWrites = txn->writesAreReplicated();
txn->setReplicatedWrites(false);
- Status status = sourceDB->dropCollection(txn, source.ns());
+ Status status = targetColl->insertDocument(txn, obj, &indexer, true).getStatus();
txn->setReplicatedWrites(shouldReplicateWrites);
if (!status.isOK())
return status;
+ wunit.commit();
+ }
+ }
- indexer.commit();
+ Status status = indexer.doneInserting();
+ if (!status.isOK())
+ return status;
- getGlobalServiceContext()->getOpObserver()->onRenameCollection(
- txn,
- NamespaceString(source),
- NamespaceString(target),
- dropTarget,
- stayTemp);
+ {
+ // Getting here means we successfully built the target copy. We now remove the
+ // source collection and finalize the rename.
+ WriteUnitOfWork wunit(txn);
- wunit.commit();
- }
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ Status status = sourceDB->dropCollection(txn, source.ns());
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ if (!status.isOK())
+ return status;
- targetCollectionDropper.Dismiss();
- return Status::OK();
+ indexer.commit();
+
+ getGlobalServiceContext()->getOpObserver()->onRenameCollection(
+ txn, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
+
+ wunit.commit();
}
-} // namespace mongo
+ targetCollectionDropper.Dismiss();
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/rename_collection.h b/src/mongo/db/catalog/rename_collection.h
index 1ec0b754779..fb1aa7b5387 100644
--- a/src/mongo/db/catalog/rename_collection.h
+++ b/src/mongo/db/catalog/rename_collection.h
@@ -29,18 +29,18 @@
#include "mongo/base/status.h"
namespace mongo {
- class NamespaceString;
- class OperationContext;
+class NamespaceString;
+class OperationContext;
- /**
- * Renames the collection "source" to "target" and drops the existing collection named "target"
- * iff "dropTarget" is true. "stayTemp" indicates whether a collection should maintain its
- * temporariness.
- */
- Status renameCollection(OperationContext* txn,
- const NamespaceString& source,
- const NamespaceString& target,
- bool dropTarget,
- bool stayTemp);
+/**
+ * Renames the collection "source" to "target" and drops the existing collection named "target"
+ * iff "dropTarget" is true. "stayTemp" indicates whether a collection should maintain its
+ * temporariness.
+ */
+Status renameCollection(OperationContext* txn,
+ const NamespaceString& source,
+ const NamespaceString& target,
+ bool dropTarget,
+ bool stayTemp);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 8ab9a7995f3..5da6d24c835 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -49,106 +49,104 @@
namespace mongo {
- using logger::LogComponent;
+using logger::LogComponent;
- TSP_DECLARE(ServiceContext::UniqueClient, currentClient)
- TSP_DEFINE(ServiceContext::UniqueClient, currentClient)
+TSP_DECLARE(ServiceContext::UniqueClient, currentClient)
+TSP_DEFINE(ServiceContext::UniqueClient, currentClient)
- void Client::initThreadIfNotAlready(const char* desc) {
- if (currentClient.getMake()->get())
- return;
- initThread(desc);
- }
+void Client::initThreadIfNotAlready(const char* desc) {
+ if (currentClient.getMake()->get())
+ return;
+ initThread(desc);
+}
- void Client::initThreadIfNotAlready() {
- initThreadIfNotAlready(getThreadName().c_str());
- }
+void Client::initThreadIfNotAlready() {
+ initThreadIfNotAlready(getThreadName().c_str());
+}
- void Client::initThread(const char *desc, AbstractMessagingPort *mp) {
- initThread(desc, getGlobalServiceContext(), mp);
- }
+void Client::initThread(const char* desc, AbstractMessagingPort* mp) {
+ initThread(desc, getGlobalServiceContext(), mp);
+}
- /**
- * This must be called whenever a new thread is started, so that active threads can be tracked
- * so each thread has a Client object in TLS.
- */
- void Client::initThread(const char *desc, ServiceContext* service, AbstractMessagingPort *mp) {
- invariant(currentClient.getMake()->get() == nullptr);
-
- std::string fullDesc;
- if (mp != NULL) {
- fullDesc = str::stream() << desc << mp->connectionId();
- }
- else {
- fullDesc = desc;
- }
-
- setThreadName(fullDesc.c_str());
-
- // Create the client obj, attach to thread
- *currentClient.get() = service->makeClient(fullDesc, mp);
+/**
+ * This must be called whenever a new thread is started, so that active threads can be tracked
+ * so each thread has a Client object in TLS.
+ */
+void Client::initThread(const char* desc, ServiceContext* service, AbstractMessagingPort* mp) {
+ invariant(currentClient.getMake()->get() == nullptr);
+
+ std::string fullDesc;
+ if (mp != NULL) {
+ fullDesc = str::stream() << desc << mp->connectionId();
+ } else {
+ fullDesc = desc;
}
- Client::Client(std::string desc,
- ServiceContext* serviceContext,
- AbstractMessagingPort *p)
- : ClientBasic(serviceContext, p),
- _desc(std::move(desc)),
- _threadId(stdx::this_thread::get_id()),
- _connectionId(p ? p->connectionId() : 0) {
- }
+ setThreadName(fullDesc.c_str());
- void Client::reportState(BSONObjBuilder& builder) {
- builder.append("desc", desc());
+ // Create the client obj, attach to thread
+ *currentClient.get() = service->makeClient(fullDesc, mp);
+}
- std::stringstream ss;
- ss << _threadId;
- builder.append("threadId", ss.str());
+Client::Client(std::string desc, ServiceContext* serviceContext, AbstractMessagingPort* p)
+ : ClientBasic(serviceContext, p),
+ _desc(std::move(desc)),
+ _threadId(stdx::this_thread::get_id()),
+ _connectionId(p ? p->connectionId() : 0) {}
- if (_connectionId) {
- builder.appendNumber("connectionId", _connectionId);
- }
+void Client::reportState(BSONObjBuilder& builder) {
+ builder.append("desc", desc());
- if (hasRemote()) {
- builder.append("client", getRemote().toString());
- }
- }
+ std::stringstream ss;
+ ss << _threadId;
+ builder.append("threadId", ss.str());
- ServiceContext::UniqueOperationContext Client::makeOperationContext() {
- return getServiceContext()->makeOperationContext(this);
+ if (_connectionId) {
+ builder.appendNumber("connectionId", _connectionId);
}
- void Client::setOperationContext(OperationContext* txn) {
- // We can only set the OperationContext once before resetting it.
- invariant(txn != NULL && _txn == NULL);
- _txn = txn;
+ if (hasRemote()) {
+ builder.append("client", getRemote().toString());
}
-
- void Client::resetOperationContext() {
- invariant(_txn != NULL);
- _txn = NULL;
+}
+
+ServiceContext::UniqueOperationContext Client::makeOperationContext() {
+ return getServiceContext()->makeOperationContext(this);
+}
+
+void Client::setOperationContext(OperationContext* txn) {
+ // We can only set the OperationContext once before resetting it.
+ invariant(txn != NULL && _txn == NULL);
+ _txn = txn;
+}
+
+void Client::resetOperationContext() {
+ invariant(_txn != NULL);
+ _txn = NULL;
+}
+
+std::string Client::clientAddress(bool includePort) const {
+ if (!hasRemote()) {
+ return "";
}
-
- std::string Client::clientAddress(bool includePort) const {
- if (!hasRemote()) {
- return "";
- }
- if (includePort) {
- return getRemote().toString();
- }
- return getRemote().host();
+ if (includePort) {
+ return getRemote().toString();
}
+ return getRemote().host();
+}
- ClientBasic* ClientBasic::getCurrent() {
- return currentClient.getMake()->get();
- }
+ClientBasic* ClientBasic::getCurrent() {
+ return currentClient.getMake()->get();
+}
- Client& cc() {
- Client* c = currentClient.getMake()->get();
- verify(c);
- return *c;
- }
+Client& cc() {
+ Client* c = currentClient.getMake()->get();
+ verify(c);
+ return *c;
+}
- bool haveClient() { return currentClient.getMake()->get(); }
+bool haveClient() {
+ return currentClient.getMake()->get();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
index 4f27c639b0a..bff49a38ef3 100644
--- a/src/mongo/db/client.h
+++ b/src/mongo/db/client.h
@@ -47,109 +47,122 @@
namespace mongo {
- class Collection;
- class AbstractMessagingPort;
-
- typedef long long ConnectionId;
-
- /** the database's concept of an outside "client" */
- class Client : public ClientBasic {
- public:
- /** each thread which does db operations has a Client object in TLS.
- * call this when your thread starts.
- */
- static void initThread(const char *desc, AbstractMessagingPort *mp = 0);
- static void initThread(const char* desc,
- ServiceContext* serviceContext,
- AbstractMessagingPort* mp);
-
- /**
- * Inits a thread if that thread has not already been init'd, setting the thread name to
- * "desc".
- */
- static void initThreadIfNotAlready(const char* desc);
-
- /**
- * Inits a thread if that thread has not already been init'd, using the existing thread name
- */
- static void initThreadIfNotAlready();
-
- std::string clientAddress(bool includePort = false) const;
- const std::string& desc() const { return _desc; }
-
- void reportState(BSONObjBuilder& builder);
-
- // Ensures stability of the client's OperationContext. When the client is locked,
- // the OperationContext will not disappear.
- void lock() { _lock.lock(); }
- void unlock() { _lock.unlock(); }
-
- /**
- * Makes a new operation context representing an operation on this client. At most
- * one operation context may be in scope on a client at a time.
- */
- ServiceContext::UniqueOperationContext makeOperationContext();
-
- /**
- * Sets the active operation context on this client to "txn", which must be non-NULL.
- *
- * It is an error to call this method if there is already an operation context on Client.
- * It is an error to call this on an unlocked client.
- */
- void setOperationContext(OperationContext* txn);
-
- /**
- * Clears the active operation context on this client.
- *
- * There must already be such a context set on this client.
- * It is an error to call this on an unlocked client.
- */
- void resetOperationContext();
-
- /**
- * Gets the operation context active on this client, or nullptr if there is no such context.
- *
- * It is an error to call this method on an unlocked client, or to use the value returned
- * by this method while the client is not locked.
- */
- OperationContext* getOperationContext() { return _txn; }
-
- // TODO(spencer): SERVER-10228 SERVER-14779 Remove this/move it fully into OperationContext.
- bool isInDirectClient() const { return _inDirectClient; }
- void setInDirectClient(bool newVal) { _inDirectClient = newVal; }
-
- ConnectionId getConnectionId() const { return _connectionId; }
- bool isFromUserConnection() const { return _connectionId > 0; }
-
- private:
- friend class ServiceContext;
- Client(std::string desc,
- ServiceContext* serviceContext,
- AbstractMessagingPort *p = 0);
-
-
- // Description for the client (e.g. conn8)
- const std::string _desc;
-
- // OS id of the thread, which owns this client
- const stdx::thread::id _threadId;
-
- // > 0 for things "conn", 0 otherwise
- const ConnectionId _connectionId;
-
- // Protects the contents of the Client (such as changing the OperationContext, etc)
- mutable SpinLock _lock;
-
- // Whether this client is running as DBDirectClient
- bool _inDirectClient = false;
-
- // If != NULL, then contains the currently active OperationContext
- OperationContext* _txn = nullptr;
- };
-
- /** get the Client object for this thread. */
- Client& cc();
-
- bool haveClient();
+class Collection;
+class AbstractMessagingPort;
+
+typedef long long ConnectionId;
+
+/** the database's concept of an outside "client" */
+class Client : public ClientBasic {
+public:
+ /** each thread which does db operations has a Client object in TLS.
+ * call this when your thread starts.
+ */
+ static void initThread(const char* desc, AbstractMessagingPort* mp = 0);
+ static void initThread(const char* desc,
+ ServiceContext* serviceContext,
+ AbstractMessagingPort* mp);
+
+ /**
+ * Inits a thread if that thread has not already been init'd, setting the thread name to
+ * "desc".
+ */
+ static void initThreadIfNotAlready(const char* desc);
+
+ /**
+ * Inits a thread if that thread has not already been init'd, using the existing thread name
+ */
+ static void initThreadIfNotAlready();
+
+ std::string clientAddress(bool includePort = false) const;
+ const std::string& desc() const {
+ return _desc;
+ }
+
+ void reportState(BSONObjBuilder& builder);
+
+ // Ensures stability of the client's OperationContext. When the client is locked,
+ // the OperationContext will not disappear.
+ void lock() {
+ _lock.lock();
+ }
+ void unlock() {
+ _lock.unlock();
+ }
+
+ /**
+ * Makes a new operation context representing an operation on this client. At most
+ * one operation context may be in scope on a client at a time.
+ */
+ ServiceContext::UniqueOperationContext makeOperationContext();
+
+ /**
+ * Sets the active operation context on this client to "txn", which must be non-NULL.
+ *
+ * It is an error to call this method if there is already an operation context on Client.
+ * It is an error to call this on an unlocked client.
+ */
+ void setOperationContext(OperationContext* txn);
+
+ /**
+ * Clears the active operation context on this client.
+ *
+ * There must already be such a context set on this client.
+ * It is an error to call this on an unlocked client.
+ */
+ void resetOperationContext();
+
+ /**
+ * Gets the operation context active on this client, or nullptr if there is no such context.
+ *
+ * It is an error to call this method on an unlocked client, or to use the value returned
+ * by this method while the client is not locked.
+ */
+ OperationContext* getOperationContext() {
+ return _txn;
+ }
+
+ // TODO(spencer): SERVER-10228 SERVER-14779 Remove this/move it fully into OperationContext.
+ bool isInDirectClient() const {
+ return _inDirectClient;
+ }
+ void setInDirectClient(bool newVal) {
+ _inDirectClient = newVal;
+ }
+
+ ConnectionId getConnectionId() const {
+ return _connectionId;
+ }
+ bool isFromUserConnection() const {
+ return _connectionId > 0;
+ }
+
+private:
+ friend class ServiceContext;
+ Client(std::string desc, ServiceContext* serviceContext, AbstractMessagingPort* p = 0);
+
+
+ // Description for the client (e.g. conn8)
+ const std::string _desc;
+
+ // OS id of the thread, which owns this client
+ const stdx::thread::id _threadId;
+
+ // > 0 for things "conn", 0 otherwise
+ const ConnectionId _connectionId;
+
+ // Protects the contents of the Client (such as changing the OperationContext, etc)
+ mutable SpinLock _lock;
+
+ // Whether this client is running as DBDirectClient
+ bool _inDirectClient = false;
+
+ // If != NULL, then contains the currently active OperationContext
+ OperationContext* _txn = nullptr;
+};
+
+/** get the Client object for this thread. */
+Client& cc();
+bool haveClient();
};
diff --git a/src/mongo/db/client_basic.cpp b/src/mongo/db/client_basic.cpp
index 0f2a9492460..06e619597c3 100644
--- a/src/mongo/db/client_basic.cpp
+++ b/src/mongo/db/client_basic.cpp
@@ -32,9 +32,9 @@
namespace mongo {
- ClientBasic::ClientBasic(ServiceContext* serviceContext, AbstractMessagingPort* messagingPort) :
- _serviceContext(serviceContext), _messagingPort(messagingPort) {}
+ClientBasic::ClientBasic(ServiceContext* serviceContext, AbstractMessagingPort* messagingPort)
+ : _serviceContext(serviceContext), _messagingPort(messagingPort) {}
- ClientBasic::~ClientBasic() = default;
+ClientBasic::~ClientBasic() = default;
} // namespace mongo
diff --git a/src/mongo/db/client_basic.h b/src/mongo/db/client_basic.h
index b28bf0f4c3c..6587227245b 100644
--- a/src/mongo/db/client_basic.h
+++ b/src/mongo/db/client_basic.h
@@ -37,49 +37,56 @@
namespace mongo {
- class ServiceContext;
+class ServiceContext;
- /**
- * this is the base class for Client and ClientInfo
- * Client is for mongod
- * ClientInfo is for mongos
- * They should converge slowly
- * The idea is this has the basic api so that not all code has to be duplicated
- */
- class ClientBasic : public Decorable<ClientBasic> {
- MONGO_DISALLOW_COPYING(ClientBasic);
- public:
- bool getIsLocalHostConnection() {
- if (!hasRemote()) {
- return false;
- }
- return getRemote().isLocalHost();
- }
+/**
+ * this is the base class for Client and ClientInfo
+ * Client is for mongod
+ * ClientInfo is for mongos
+ * They should converge slowly
+ * The idea is this has the basic api so that not all code has to be duplicated
+ */
+class ClientBasic : public Decorable<ClientBasic> {
+ MONGO_DISALLOW_COPYING(ClientBasic);
- bool hasRemote() const { return _messagingPort; }
- HostAndPort getRemote() const {
- verify( _messagingPort );
- return _messagingPort->remote();
+public:
+ bool getIsLocalHostConnection() {
+ if (!hasRemote()) {
+ return false;
}
+ return getRemote().isLocalHost();
+ }
- /**
- * Returns the ServiceContext that owns this client session context.
- */
- ServiceContext* getServiceContext() const { return _serviceContext; }
+ bool hasRemote() const {
+ return _messagingPort;
+ }
+ HostAndPort getRemote() const {
+ verify(_messagingPort);
+ return _messagingPort->remote();
+ }
- /**
- * Returns the AbstractMessagePort to which this client session is bound, if any.
- */
- AbstractMessagingPort * port() const { return _messagingPort; }
+ /**
+ * Returns the ServiceContext that owns this client session context.
+ */
+ ServiceContext* getServiceContext() const {
+ return _serviceContext;
+ }
+
+ /**
+ * Returns the AbstractMessagePort to which this client session is bound, if any.
+ */
+ AbstractMessagingPort* port() const {
+ return _messagingPort;
+ }
- static ClientBasic* getCurrent();
+ static ClientBasic* getCurrent();
- protected:
- ClientBasic(ServiceContext* serviceContext, AbstractMessagingPort* messagingPort);
- ~ClientBasic();
+protected:
+ ClientBasic(ServiceContext* serviceContext, AbstractMessagingPort* messagingPort);
+ ~ClientBasic();
- private:
- ServiceContext* const _serviceContext;
- AbstractMessagingPort* const _messagingPort;
- };
+private:
+ ServiceContext* const _serviceContext;
+ AbstractMessagingPort* const _messagingPort;
+};
}
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index c35c75dd88d..c0b61f99404 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -53,319 +53,322 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- static Counter64 cursorStatsOpen; // gauge
- static Counter64 cursorStatsOpenPinned; // gauge
- static Counter64 cursorStatsOpenNoTimeout; // gauge
- static Counter64 cursorStatsTimedOut;
-
- static ServerStatusMetricField<Counter64> dCursorStatsOpen( "cursor.open.total",
- &cursorStatsOpen );
- static ServerStatusMetricField<Counter64> dCursorStatsOpenPinned( "cursor.open.pinned",
- &cursorStatsOpenPinned );
- static ServerStatusMetricField<Counter64> dCursorStatsOpenNoTimeout( "cursor.open.noTimeout",
- &cursorStatsOpenNoTimeout );
- static ServerStatusMetricField<Counter64> dCursorStatusTimedout( "cursor.timedOut",
- &cursorStatsTimedOut );
-
- MONGO_EXPORT_SERVER_PARAMETER(cursorTimeoutMillis, int, 10 * 60 * 1000 /* 10 minutes */);
-
- long long ClientCursor::totalOpen() {
- return cursorStatsOpen.get();
+using std::string;
+using std::stringstream;
+
+static Counter64 cursorStatsOpen; // gauge
+static Counter64 cursorStatsOpenPinned; // gauge
+static Counter64 cursorStatsOpenNoTimeout; // gauge
+static Counter64 cursorStatsTimedOut;
+
+static ServerStatusMetricField<Counter64> dCursorStatsOpen("cursor.open.total", &cursorStatsOpen);
+static ServerStatusMetricField<Counter64> dCursorStatsOpenPinned("cursor.open.pinned",
+ &cursorStatsOpenPinned);
+static ServerStatusMetricField<Counter64> dCursorStatsOpenNoTimeout("cursor.open.noTimeout",
+ &cursorStatsOpenNoTimeout);
+static ServerStatusMetricField<Counter64> dCursorStatusTimedout("cursor.timedOut",
+ &cursorStatsTimedOut);
+
+MONGO_EXPORT_SERVER_PARAMETER(cursorTimeoutMillis, int, 10 * 60 * 1000 /* 10 minutes */);
+
+long long ClientCursor::totalOpen() {
+ return cursorStatsOpen.get();
+}
+
+ClientCursor::ClientCursor(CursorManager* cursorManager,
+ PlanExecutor* exec,
+ const std::string& ns,
+ int qopts,
+ const BSONObj query,
+ bool isAggCursor)
+ : _ns(ns),
+ _cursorManager(cursorManager),
+ _countedYet(false),
+ _isAggCursor(isAggCursor),
+ _unownedRU(NULL) {
+ _exec.reset(exec);
+ _query = query;
+ _queryOptions = qopts;
+ if (exec->collection()) {
+ invariant(cursorManager == exec->collection()->getCursorManager());
}
-
- ClientCursor::ClientCursor(CursorManager* cursorManager,
- PlanExecutor* exec,
- const std::string& ns,
- int qopts,
- const BSONObj query,
- bool isAggCursor)
- : _ns(ns),
- _cursorManager(cursorManager),
- _countedYet(false),
- _isAggCursor(isAggCursor),
- _unownedRU(NULL) {
-
- _exec.reset(exec);
- _query = query;
- _queryOptions = qopts;
- if (exec->collection()) {
- invariant(cursorManager == exec->collection()->getCursorManager());
- }
- init();
- }
-
- ClientCursor::ClientCursor(const Collection* collection)
- : _ns(collection->ns().ns()),
- _cursorManager(collection->getCursorManager()),
- _countedYet(false),
- _queryOptions(QueryOption_NoCursorTimeout),
- _isAggCursor(false),
- _unownedRU(NULL) {
- init();
+ init();
+}
+
+ClientCursor::ClientCursor(const Collection* collection)
+ : _ns(collection->ns().ns()),
+ _cursorManager(collection->getCursorManager()),
+ _countedYet(false),
+ _queryOptions(QueryOption_NoCursorTimeout),
+ _isAggCursor(false),
+ _unownedRU(NULL) {
+ init();
+}
+
+void ClientCursor::init() {
+ invariant(_cursorManager);
+
+ _isPinned = false;
+ _isNoTimeout = false;
+
+ _idleAgeMillis = 0;
+ _leftoverMaxTimeMicros = 0;
+ _pos = 0;
+
+ if (_queryOptions & QueryOption_NoCursorTimeout) {
+ // cursors normally timeout after an inactivity period to prevent excess memory use
+ // setting this prevents timeout of the cursor in question.
+ _isNoTimeout = true;
+ cursorStatsOpenNoTimeout.increment();
}
- void ClientCursor::init() {
- invariant( _cursorManager );
-
- _isPinned = false;
- _isNoTimeout = false;
-
- _idleAgeMillis = 0;
- _leftoverMaxTimeMicros = 0;
- _pos = 0;
+ _cursorid = _cursorManager->registerCursor(this);
- if (_queryOptions & QueryOption_NoCursorTimeout) {
- // cursors normally timeout after an inactivity period to prevent excess memory use
- // setting this prevents timeout of the cursor in question.
- _isNoTimeout = true;
- cursorStatsOpenNoTimeout.increment();
- }
-
- _cursorid = _cursorManager->registerCursor( this );
+ cursorStatsOpen.increment();
+ _countedYet = true;
+}
- cursorStatsOpen.increment();
- _countedYet = true;
+ClientCursor::~ClientCursor() {
+ if (_pos == -2) {
+ // defensive: destructor called twice
+ wassert(false);
+ return;
}
- ClientCursor::~ClientCursor() {
- if( _pos == -2 ) {
- // defensive: destructor called twice
- wassert(false);
- return;
- }
-
- invariant( !_isPinned ); // Must call unsetPinned() before invoking destructor.
-
- if ( _countedYet ) {
- _countedYet = false;
- cursorStatsOpen.decrement();
- if ( _isNoTimeout )
- cursorStatsOpenNoTimeout.decrement();
- }
-
- if ( _cursorManager ) {
- // this could be null if kill() was killed
- _cursorManager->deregisterCursor( this );
- }
+ invariant(!_isPinned); // Must call unsetPinned() before invoking destructor.
- // defensive:
- _cursorManager = NULL;
- _cursorid = INVALID_CURSOR_ID;
- _pos = -2;
- _isNoTimeout = false;
+ if (_countedYet) {
+ _countedYet = false;
+ cursorStatsOpen.decrement();
+ if (_isNoTimeout)
+ cursorStatsOpenNoTimeout.decrement();
}
- void ClientCursor::kill() {
- if ( _exec.get() )
- _exec->kill("cursor killed");
-
- _cursorManager = NULL;
+ if (_cursorManager) {
+ // this could be null if kill() was killed
+ _cursorManager->deregisterCursor(this);
}
- //
- // Timing and timeouts
- //
-
- bool ClientCursor::shouldTimeout(int millis) {
- _idleAgeMillis += millis;
- if (_isNoTimeout || _isPinned) {
- return false;
- }
- return _idleAgeMillis > cursorTimeoutMillis;
- }
+ // defensive:
+ _cursorManager = NULL;
+ _cursorid = INVALID_CURSOR_ID;
+ _pos = -2;
+ _isNoTimeout = false;
+}
- void ClientCursor::setIdleTime( int millis ) {
- _idleAgeMillis = millis;
- }
+void ClientCursor::kill() {
+ if (_exec.get())
+ _exec->kill("cursor killed");
- void ClientCursor::updateSlaveLocation(OperationContext* txn) {
- if (_slaveReadTill.isNull())
- return;
+ _cursorManager = NULL;
+}
- verify(str::startsWith(_ns.c_str(), "local.oplog."));
+//
+// Timing and timeouts
+//
- Client* c = txn->getClient();
- verify(c);
- OID rid = repl::ReplClientInfo::forClient(c).getRemoteID();
- if (!rid.isSet())
- return;
+bool ClientCursor::shouldTimeout(int millis) {
+ _idleAgeMillis += millis;
+ if (_isNoTimeout || _isPinned) {
+ return false;
+ }
+ return _idleAgeMillis > cursorTimeoutMillis;
+}
+
+void ClientCursor::setIdleTime(int millis) {
+ _idleAgeMillis = millis;
+}
+
+void ClientCursor::updateSlaveLocation(OperationContext* txn) {
+ if (_slaveReadTill.isNull())
+ return;
+
+ verify(str::startsWith(_ns.c_str(), "local.oplog."));
+
+ Client* c = txn->getClient();
+ verify(c);
+ OID rid = repl::ReplClientInfo::forClient(c).getRemoteID();
+ if (!rid.isSet())
+ return;
+
+ repl::getGlobalReplicationCoordinator()->setLastOptimeForSlave(rid, _slaveReadTill);
+}
+
+//
+// Storage engine state for getMore.
+//
+
+void ClientCursor::setUnownedRecoveryUnit(RecoveryUnit* ru) {
+ invariant(!_unownedRU);
+ invariant(!_ownedRU.get());
+ _unownedRU = ru;
+}
+
+RecoveryUnit* ClientCursor::getUnownedRecoveryUnit() const {
+ return _unownedRU;
+}
+
+void ClientCursor::setOwnedRecoveryUnit(RecoveryUnit* ru) {
+ invariant(!_unownedRU);
+ invariant(!_ownedRU.get());
+ _ownedRU.reset(ru);
+}
+
+RecoveryUnit* ClientCursor::releaseOwnedRecoveryUnit() {
+ return _ownedRU.release();
+}
+
+//
+// Pin methods
+//
+
+ClientCursorPin::ClientCursorPin(CursorManager* cursorManager, long long cursorid) : _cursor(NULL) {
+ cursorStatsOpenPinned.increment();
+ _cursor = cursorManager->find(cursorid, true);
+}
+
+ClientCursorPin::~ClientCursorPin() {
+ cursorStatsOpenPinned.decrement();
+ release();
+}
+
+void ClientCursorPin::release() {
+ if (!_cursor)
+ return;
+
+ invariant(_cursor->isPinned());
+
+ if (_cursor->cursorManager() == NULL) {
+ // The ClientCursor was killed while we had it. Therefore, it is our responsibility to
+ // kill it.
+ deleteUnderlying();
+ } else {
+ // Unpin the cursor under the collection cursor manager lock.
+ _cursor->cursorManager()->unpin(_cursor);
+ }
- repl::getGlobalReplicationCoordinator()->setLastOptimeForSlave(rid, _slaveReadTill);
+ _cursor = NULL;
+}
+
+void ClientCursorPin::deleteUnderlying() {
+ invariant(_cursor);
+ invariant(_cursor->isPinned());
+ // Note the following subtleties of this method's implementation:
+ // - We must unpin the cursor before destruction, since it is an error to destroy a pinned
+ // cursor.
+ // - In addition, we must deregister the cursor before unpinning, since it is an
+ // error to unpin a registered cursor without holding the cursor manager lock (note that
+ // we can't simply unpin with the cursor manager lock here, since we need to guarantee
+ // exclusive ownership of the cursor when we are deleting it).
+ if (_cursor->cursorManager()) {
+ _cursor->cursorManager()->deregisterCursor(_cursor);
+ _cursor->kill();
}
+ _cursor->unsetPinned();
+ delete _cursor;
+ _cursor = NULL;
+}
- //
- // Storage engine state for getMore.
- //
+ClientCursor* ClientCursorPin::c() const {
+ return _cursor;
+}
- void ClientCursor::setUnownedRecoveryUnit(RecoveryUnit* ru) {
- invariant(!_unownedRU);
- invariant(!_ownedRU.get());
- _unownedRU = ru;
- }
+//
+// ClientCursorMonitor
+//
- RecoveryUnit* ClientCursor::getUnownedRecoveryUnit() const {
- return _unownedRU;
+/**
+ * Thread for timing out old cursors
+ */
+class ClientCursorMonitor : public BackgroundJob {
+public:
+ std::string name() const {
+ return "ClientCursorMonitor";
}
- void ClientCursor::setOwnedRecoveryUnit(RecoveryUnit* ru) {
- invariant(!_unownedRU);
- invariant(!_ownedRU.get());
- _ownedRU.reset(ru);
+ void run() {
+ Client::initThread("clientcursormon");
+ Timer t;
+ const int Secs = 4;
+ while (!inShutdown()) {
+ {
+ OperationContextImpl txn;
+ cursorStatsTimedOut.increment(
+ CursorManager::timeoutCursorsGlobal(&txn, t.millisReset()));
+ }
+ sleepsecs(Secs);
+ }
}
-
- RecoveryUnit* ClientCursor::releaseOwnedRecoveryUnit() {
- return _ownedRU.release();
+};
+
+namespace {
+// Only one instance of the ClientCursorMonitor exists
+ClientCursorMonitor clientCursorMonitor;
+
+void _appendCursorStats(BSONObjBuilder& b) {
+ b.append("note", "deprecated, use server status metrics");
+ b.appendNumber("clientCursors_size", cursorStatsOpen.get());
+ b.appendNumber("totalOpen", cursorStatsOpen.get());
+ b.appendNumber("pinned", cursorStatsOpenPinned.get());
+ b.appendNumber("totalNoTimeout", cursorStatsOpenNoTimeout.get());
+ b.appendNumber("timedOut", cursorStatsTimedOut.get());
+}
+}
+
+void startClientCursorMonitor() {
+ clientCursorMonitor.go();
+}
+
+// QUESTION: Restrict to the namespace from which this command was issued?
+// Alternatively, make this command admin-only?
+// TODO: remove this for 3.0
+class CmdCursorInfo : public Command {
+public:
+ CmdCursorInfo() : Command("cursorInfo") {}
+ virtual bool slaveOk() const {
+ return true;
}
-
- //
- // Pin methods
- //
-
- ClientCursorPin::ClientCursorPin( CursorManager* cursorManager, long long cursorid )
- : _cursor( NULL ) {
- cursorStatsOpenPinned.increment();
- _cursor = cursorManager->find( cursorid, true );
+ virtual void help(stringstream& help) const {
+ help << " example: { cursorInfo : 1 }, deprecated";
}
-
- ClientCursorPin::~ClientCursorPin() {
- cursorStatsOpenPinned.decrement();
- release();
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
}
-
- void ClientCursorPin::release() {
- if ( !_cursor )
- return;
-
- invariant( _cursor->isPinned() );
-
- if ( _cursor->cursorManager() == NULL ) {
- // The ClientCursor was killed while we had it. Therefore, it is our responsibility to
- // kill it.
- deleteUnderlying();
- }
- else {
- // Unpin the cursor under the collection cursor manager lock.
- _cursor->cursorManager()->unpin( _cursor );
- }
-
- _cursor = NULL;
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::cursorInfo);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
-
- void ClientCursorPin::deleteUnderlying() {
- invariant( _cursor );
- invariant( _cursor->isPinned() );
- // Note the following subtleties of this method's implementation:
- // - We must unpin the cursor before destruction, since it is an error to destroy a pinned
- // cursor.
- // - In addition, we must deregister the cursor before unpinning, since it is an
- // error to unpin a registered cursor without holding the cursor manager lock (note that
- // we can't simply unpin with the cursor manager lock here, since we need to guarantee
- // exclusive ownership of the cursor when we are deleting it).
- if ( _cursor->cursorManager() ) {
- _cursor->cursorManager()->deregisterCursor( _cursor );
- _cursor->kill();
- }
- _cursor->unsetPinned();
- delete _cursor;
- _cursor = NULL;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ _appendCursorStats(result);
+ return true;
}
+} cmdCursorInfo;
- ClientCursor* ClientCursorPin::c() const {
- return _cursor;
- }
+//
+// cursors stats.
+//
- //
- // ClientCursorMonitor
- //
-
- /**
- * Thread for timing out old cursors
- */
- class ClientCursorMonitor : public BackgroundJob {
- public:
- std::string name() const { return "ClientCursorMonitor"; }
-
- void run() {
- Client::initThread("clientcursormon");
- Timer t;
- const int Secs = 4;
- while (!inShutdown()) {
- {
- OperationContextImpl txn;
- cursorStatsTimedOut.increment(
- CursorManager::timeoutCursorsGlobal(&txn, t.millisReset()));
- }
- sleepsecs(Secs);
- }
- }
- };
-
- namespace {
- // Only one instance of the ClientCursorMonitor exists
- ClientCursorMonitor clientCursorMonitor;
-
- void _appendCursorStats( BSONObjBuilder& b ) {
- b.append( "note" , "deprecated, use server status metrics" );
- b.appendNumber("clientCursors_size", cursorStatsOpen.get() );
- b.appendNumber("totalOpen", cursorStatsOpen.get() );
- b.appendNumber("pinned", cursorStatsOpenPinned.get() );
- b.appendNumber("totalNoTimeout", cursorStatsOpenNoTimeout.get() );
- b.appendNumber("timedOut" , cursorStatsTimedOut.get());
- }
+class CursorServerStats : public ServerStatusSection {
+public:
+ CursorServerStats() : ServerStatusSection("cursors") {}
+ virtual bool includeByDefault() const {
+ return true;
}
- void startClientCursorMonitor() {
- clientCursorMonitor.go();
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObjBuilder b;
+ _appendCursorStats(b);
+ return b.obj();
}
- // QUESTION: Restrict to the namespace from which this command was issued?
- // Alternatively, make this command admin-only?
- // TODO: remove this for 3.0
- class CmdCursorInfo : public Command {
- public:
- CmdCursorInfo() : Command( "cursorInfo" ) {}
- virtual bool slaveOk() const { return true; }
- virtual void help( stringstream& help ) const {
- help << " example: { cursorInfo : 1 }, deprecated";
- }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::cursorInfo);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- _appendCursorStats( result );
- return true;
- }
- } cmdCursorInfo;
-
- //
- // cursors stats.
- //
-
- class CursorServerStats : public ServerStatusSection {
- public:
- CursorServerStats() : ServerStatusSection( "cursors" ){}
- virtual bool includeByDefault() const { return true; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
- BSONObjBuilder b;
- _appendCursorStats( b );
- return b.obj();
- }
-
- } cursorServerStats;
+} cursorServerStats;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index d2482ffbfa9..a145802e985 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -36,306 +36,344 @@
namespace mongo {
- class ClientCursor;
- class Collection;
- class CursorManager;
- class RecoveryUnit;
+class ClientCursor;
+class Collection;
+class CursorManager;
+class RecoveryUnit;
- typedef long long CursorId; /* passed to the client so it can send back on getMore */
- static const CursorId INVALID_CURSOR_ID = -1; // But see SERVER-5726.
+typedef long long CursorId; /* passed to the client so it can send back on getMore */
+static const CursorId INVALID_CURSOR_ID = -1; // But see SERVER-5726.
+/**
+ * ClientCursor is a wrapper that represents a cursorid from our database application's
+ * perspective.
+ */
+class ClientCursor {
+ MONGO_DISALLOW_COPYING(ClientCursor);
+
+public:
/**
- * ClientCursor is a wrapper that represents a cursorid from our database application's
- * perspective.
+ * This ClientCursor constructor creates a cursorid that can be used with getMore and
+ * killCursors. "cursorManager" is the object that will manage the lifetime of this
+ * cursor, and "ns" is the namespace string that should be associated with this cursor (e.g.
+ * "test.foo", "test.$cmd.listCollections", etc).
*/
- class ClientCursor {
- MONGO_DISALLOW_COPYING(ClientCursor);
- public:
- /**
- * This ClientCursor constructor creates a cursorid that can be used with getMore and
- * killCursors. "cursorManager" is the object that will manage the lifetime of this
- * cursor, and "ns" is the namespace string that should be associated with this cursor (e.g.
- * "test.foo", "test.$cmd.listCollections", etc).
- */
- ClientCursor(CursorManager* cursorManager,
- PlanExecutor* exec,
- const std::string& ns,
- int qopts = 0,
- const BSONObj query = BSONObj(),
- bool isAggCursor = false);
-
- /**
- * This ClientCursor is used to track sharding state for the given collection.
- */
- explicit ClientCursor(const Collection* collection);
-
- //
- // Basic accessors
- //
-
- CursorId cursorid() const { return _cursorid; }
- std::string ns() const { return _ns; }
- CursorManager* cursorManager() const { return _cursorManager; }
- bool isAggCursor() const { return _isAggCursor; }
-
- //
- // Pinning functionality.
- //
-
- /**
- * Marks this ClientCursor as in use. unsetPinned() must be called before the destructor of
- * this ClientCursor is invoked.
- */
- void setPinned() { _isPinned = true; }
-
- /**
- * Marks this ClientCursor as no longer in use.
- */
- void unsetPinned() { _isPinned = false; }
-
- bool isPinned() const { return _isPinned; }
-
- /**
- * This is called when someone is dropping a collection or something else that
- * goes through killing cursors.
- * It removes the responsiilibty of de-registering from ClientCursor.
- * Responsibility for deleting the ClientCursor doesn't change from this call
- * see PlanExecutor::kill.
- */
- void kill();
-
- //
- // Timing and timeouts
- //
-
- /**
- * @param millis amount of idle passed time since last call
- * note called outside of locks (other than ccmutex) so care must be exercised
- */
- bool shouldTimeout( int millis );
- void setIdleTime( int millis );
- int idleTime() const { return _idleAgeMillis; }
-
- uint64_t getLeftoverMaxTimeMicros() const { return _leftoverMaxTimeMicros; }
- void setLeftoverMaxTimeMicros( uint64_t leftoverMaxTimeMicros ) {
- _leftoverMaxTimeMicros = leftoverMaxTimeMicros;
- }
-
- //
- // Replication-related stuff. TODO: Document and clean.
- //
-
- // Used to report replication position only in master-slave,
- // so we keep them as TimeStamp rather than OpTime.
- void updateSlaveLocation(OperationContext* txn);
- void slaveReadTill( const Timestamp& t ) { _slaveReadTill = t; }
- /** Just for testing. */
- Timestamp getSlaveReadTill() const { return _slaveReadTill; }
-
- //
- // Query-specific functionality that may be adapted for the PlanExecutor.
- //
-
- PlanExecutor* getExecutor() const { return _exec.get(); }
- int queryOptions() const { return _queryOptions; }
- const BSONObj& getQuery() const { return _query; }
-
- // Used by ops/query.cpp to stash how many results have been returned by a query.
- int pos() const { return _pos; }
- void incPos(int n) { _pos += n; }
- void setPos(int n) { _pos = n; }
-
- static long long totalOpen();
-
- //
- // Storage engine state for getMore.
- //
-
- bool hasRecoveryUnit() const { return _ownedRU.get() || _unownedRU; }
-
- /**
- *
- * If a ClientCursor is created via DBDirectClient, it uses the same storage engine
- * context as the DBDirectClient caller. We store this context in _unownedRU. We use
- * this to verify that all further callers use the same RecoveryUnit.
- *
- * Once a ClientCursor has an unowned RecoveryUnit, it will always have one.
- *
- * Sets the unowned RecoveryUnit to 'ru'. Does NOT take ownership of the pointer.
- */
- void setUnownedRecoveryUnit(RecoveryUnit* ru);
-
- /**
- * Return the unowned RecoveryUnit. 'this' does not own pointer and therefore cannot
- * transfer ownership.
- */
- RecoveryUnit* getUnownedRecoveryUnit() const;
-
- /**
- * If a ClientCursor is created via a client request, we bind its lifetime to the
- * ClientCursor's by storing it un _ownedRU. In order to execute the query over repeated
- * network requests, we have to keep the execution state around.
- */
-
- /**
- * Set the owned recovery unit to 'ru'. Takes ownership of it. If there is a previous
- * owned recovery unit, it is deleted.
- */
- void setOwnedRecoveryUnit(RecoveryUnit* ru);
-
- /**
- * Returns the owned recovery unit. Ownership is transferred to the caller.
- */
- RecoveryUnit* releaseOwnedRecoveryUnit();
-
- private:
- friend class CursorManager;
- friend class ClientCursorPin;
-
- /**
- * Only friends are allowed to destroy ClientCursor objects.
- */
- ~ClientCursor();
-
- /**
- * Initialization common between both constructors for the ClientCursor. The database must
- * be stable when this is called, because cursors hang off the collection.
- */
- void init();
-
- //
- // ClientCursor-specific data, independent of the underlying execution type.
- //
-
- // The ID of the ClientCursor.
- CursorId _cursorid;
-
- // The namespace we're operating on.
- std::string _ns;
-
- CursorManager* _cursorManager;
-
- // if we've added it to the total open counter yet
- bool _countedYet;
-
- // How many objects have been returned by the find() so far?
- int _pos;
-
- // If this cursor was created by a find operation, '_query' holds the query predicate for
- // the find. If this cursor was created by a command (e.g. the aggregate command), then
- // '_query' holds the command specification received from the client.
- BSONObj _query;
-
- // See the QueryOptions enum in dbclient.h
- int _queryOptions;
-
- // Is this ClientCursor backed by an aggregation pipeline? Defaults to false.
- //
- // Agg executors differ from others in that they manage their own locking internally and
- // should not be killed or destroyed when the underlying collection is deleted.
- //
- // Note: This should *not* be set for the internal cursor used as input to an aggregation.
- bool _isAggCursor;
-
- // Is this cursor in use? Defaults to false.
- bool _isPinned;
-
- // Is the "no timeout" flag set on this cursor? If false, this cursor may be targeted for
- // deletion after an interval of inactivity. Defaults to false.
- bool _isNoTimeout;
-
- // The replication position only used in master-slave.
- Timestamp _slaveReadTill;
-
- // How long has the cursor been idle?
- int _idleAgeMillis;
-
- // TODO: Document.
- uint64_t _leftoverMaxTimeMicros;
-
- // For chunks that are being migrated, there is a period of time when that chunks data is in
- // two shards, the donor and the receiver one. That data is picked up by a cursor on the
- // receiver side, even before the migration was decided. The CollectionMetadata allow one
- // to inquiry if any given document of the collection belongs indeed to this shard or if it
- // is coming from (or a vestige of) an ongoing migration.
- CollectionMetadataPtr _collMetadata;
-
- // Only one of these is not-NULL.
- RecoveryUnit* _unownedRU;
- std::unique_ptr<RecoveryUnit> _ownedRU;
- // NOTE: _ownedRU must come before _exec, because _ownedRU must outlive _exec.
- // The storage engine can have resources in the PlanExecutor that rely on
- // the RecoveryUnit being alive.
-
- //
- // The underlying execution machinery.
- //
- std::unique_ptr<PlanExecutor> _exec;
- };
+ ClientCursor(CursorManager* cursorManager,
+ PlanExecutor* exec,
+ const std::string& ns,
+ int qopts = 0,
+ const BSONObj query = BSONObj(),
+ bool isAggCursor = false);
+
+ /**
+ * This ClientCursor is used to track sharding state for the given collection.
+ */
+ explicit ClientCursor(const Collection* collection);
+
+ //
+ // Basic accessors
+ //
+
+ CursorId cursorid() const {
+ return _cursorid;
+ }
+ std::string ns() const {
+ return _ns;
+ }
+ CursorManager* cursorManager() const {
+ return _cursorManager;
+ }
+ bool isAggCursor() const {
+ return _isAggCursor;
+ }
+
+ //
+ // Pinning functionality.
+ //
+
+ /**
+ * Marks this ClientCursor as in use. unsetPinned() must be called before the destructor of
+ * this ClientCursor is invoked.
+ */
+ void setPinned() {
+ _isPinned = true;
+ }
+
+ /**
+ * Marks this ClientCursor as no longer in use.
+ */
+ void unsetPinned() {
+ _isPinned = false;
+ }
+
+ bool isPinned() const {
+ return _isPinned;
+ }
+
+ /**
+ * This is called when someone is dropping a collection or something else that
+ * goes through killing cursors.
+ * It removes the responsiilibty of de-registering from ClientCursor.
+ * Responsibility for deleting the ClientCursor doesn't change from this call
+ * see PlanExecutor::kill.
+ */
+ void kill();
+
+ //
+ // Timing and timeouts
+ //
+
+ /**
+ * @param millis amount of idle passed time since last call
+ * note called outside of locks (other than ccmutex) so care must be exercised
+ */
+ bool shouldTimeout(int millis);
+ void setIdleTime(int millis);
+ int idleTime() const {
+ return _idleAgeMillis;
+ }
+
+ uint64_t getLeftoverMaxTimeMicros() const {
+ return _leftoverMaxTimeMicros;
+ }
+ void setLeftoverMaxTimeMicros(uint64_t leftoverMaxTimeMicros) {
+ _leftoverMaxTimeMicros = leftoverMaxTimeMicros;
+ }
+
+ //
+ // Replication-related stuff. TODO: Document and clean.
+ //
+
+ // Used to report replication position only in master-slave,
+ // so we keep them as TimeStamp rather than OpTime.
+ void updateSlaveLocation(OperationContext* txn);
+ void slaveReadTill(const Timestamp& t) {
+ _slaveReadTill = t;
+ }
+ /** Just for testing. */
+ Timestamp getSlaveReadTill() const {
+ return _slaveReadTill;
+ }
+
+ //
+ // Query-specific functionality that may be adapted for the PlanExecutor.
+ //
+
+ PlanExecutor* getExecutor() const {
+ return _exec.get();
+ }
+ int queryOptions() const {
+ return _queryOptions;
+ }
+ const BSONObj& getQuery() const {
+ return _query;
+ }
+
+ // Used by ops/query.cpp to stash how many results have been returned by a query.
+ int pos() const {
+ return _pos;
+ }
+ void incPos(int n) {
+ _pos += n;
+ }
+ void setPos(int n) {
+ _pos = n;
+ }
+
+ static long long totalOpen();
+
+ //
+ // Storage engine state for getMore.
+ //
+
+ bool hasRecoveryUnit() const {
+ return _ownedRU.get() || _unownedRU;
+ }
/**
- * ClientCursorPin is an RAII class that manages the pinned state of a ClientCursor.
- * ClientCursorPin objects pin the given cursor upon construction, and release the pin upon
- * destruction.
- *
- * A pin extends the lifetime of a ClientCursor object until the pin's release. Pinned
- * ClientCursor objects cannot not be killed due to inactivity, and cannot be killed by user
- * kill requests. When a CursorManager is destroyed (e.g. by a collection drop), ownership of
- * any still-pinned ClientCursor objects is transferred to their managing ClientCursorPin
- * objects.
*
- * Example usage:
- * {
- * ClientCursorPin pin(cursorManager, cursorid);
- * ClientCursor* cursor = pin.c();
- * if (cursor) {
- * // Use cursor.
- * }
- * // Pin automatically released on block exit.
- * }
+ * If a ClientCursor is created via DBDirectClient, it uses the same storage engine
+ * context as the DBDirectClient caller. We store this context in _unownedRU. We use
+ * this to verify that all further callers use the same RecoveryUnit.
*
- * Clients that wish to access ClientCursor objects owned by collection cursor managers must
- * hold the collection lock during pin acquisition and pin release. This guards from a
- * collection drop (which requires an exclusive lock on the collection) occurring concurrently
- * with the pin request or unpin request.
+ * Once a ClientCursor has an unowned RecoveryUnit, it will always have one.
*
- * Clients that wish to access ClientCursor objects owned by the global cursor manager need not
- * hold any locks; the global cursor manager can only be destroyed by a process exit.
+ * Sets the unowned RecoveryUnit to 'ru'. Does NOT take ownership of the pointer.
+ */
+ void setUnownedRecoveryUnit(RecoveryUnit* ru);
+
+ /**
+ * Return the unowned RecoveryUnit. 'this' does not own pointer and therefore cannot
+ * transfer ownership.
+ */
+ RecoveryUnit* getUnownedRecoveryUnit() const;
+
+ /**
+ * If a ClientCursor is created via a client request, we bind its lifetime to the
+ * ClientCursor's by storing it un _ownedRU. In order to execute the query over repeated
+ * network requests, we have to keep the execution state around.
+ */
+
+ /**
+ * Set the owned recovery unit to 'ru'. Takes ownership of it. If there is a previous
+ * owned recovery unit, it is deleted.
+ */
+ void setOwnedRecoveryUnit(RecoveryUnit* ru);
+
+ /**
+ * Returns the owned recovery unit. Ownership is transferred to the caller.
+ */
+ RecoveryUnit* releaseOwnedRecoveryUnit();
+
+private:
+ friend class CursorManager;
+ friend class ClientCursorPin;
+
+ /**
+ * Only friends are allowed to destroy ClientCursor objects.
+ */
+ ~ClientCursor();
+
+ /**
+ * Initialization common between both constructors for the ClientCursor. The database must
+ * be stable when this is called, because cursors hang off the collection.
*/
- class ClientCursorPin {
- MONGO_DISALLOW_COPYING(ClientCursorPin);
- public:
- /**
- * Asks "cursorManager" to set a pin on the ClientCursor associated with "cursorid". If no
- * such cursor exists, does nothing. If the cursor is already pinned, throws a
- * UserException.
- */
- ClientCursorPin( CursorManager* cursorManager, long long cursorid );
-
- /**
- * Calls release().
- */
- ~ClientCursorPin();
-
- /**
- * Releases the pin. It does not delete the underlying cursor unless ownership has passed
- * to us after kill. Turns into a no-op if release() or deleteUnderlying() have already
- * been called on this pin.
- */
- void release();
-
- /**
- * Deletes the underlying cursor. Cannot be called if release() or deleteUnderlying() have
- * already been called on this pin.
- */
- void deleteUnderlying();
-
- ClientCursor *c() const;
-
- private:
- ClientCursor* _cursor;
- };
-
- void startClientCursorMonitor();
-
-} // namespace mongo
+ void init();
+
+ //
+ // ClientCursor-specific data, independent of the underlying execution type.
+ //
+
+ // The ID of the ClientCursor.
+ CursorId _cursorid;
+
+ // The namespace we're operating on.
+ std::string _ns;
+
+ CursorManager* _cursorManager;
+
+ // if we've added it to the total open counter yet
+ bool _countedYet;
+
+ // How many objects have been returned by the find() so far?
+ int _pos;
+
+ // If this cursor was created by a find operation, '_query' holds the query predicate for
+ // the find. If this cursor was created by a command (e.g. the aggregate command), then
+ // '_query' holds the command specification received from the client.
+ BSONObj _query;
+
+ // See the QueryOptions enum in dbclient.h
+ int _queryOptions;
+
+ // Is this ClientCursor backed by an aggregation pipeline? Defaults to false.
+ //
+ // Agg executors differ from others in that they manage their own locking internally and
+ // should not be killed or destroyed when the underlying collection is deleted.
+ //
+ // Note: This should *not* be set for the internal cursor used as input to an aggregation.
+ bool _isAggCursor;
+
+ // Is this cursor in use? Defaults to false.
+ bool _isPinned;
+
+ // Is the "no timeout" flag set on this cursor? If false, this cursor may be targeted for
+ // deletion after an interval of inactivity. Defaults to false.
+ bool _isNoTimeout;
+
+ // The replication position only used in master-slave.
+ Timestamp _slaveReadTill;
+
+ // How long has the cursor been idle?
+ int _idleAgeMillis;
+
+ // TODO: Document.
+ uint64_t _leftoverMaxTimeMicros;
+
+ // For chunks that are being migrated, there is a period of time when that chunks data is in
+ // two shards, the donor and the receiver one. That data is picked up by a cursor on the
+ // receiver side, even before the migration was decided. The CollectionMetadata allow one
+ // to inquiry if any given document of the collection belongs indeed to this shard or if it
+ // is coming from (or a vestige of) an ongoing migration.
+ CollectionMetadataPtr _collMetadata;
+
+ // Only one of these is not-NULL.
+ RecoveryUnit* _unownedRU;
+ std::unique_ptr<RecoveryUnit> _ownedRU;
+ // NOTE: _ownedRU must come before _exec, because _ownedRU must outlive _exec.
+ // The storage engine can have resources in the PlanExecutor that rely on
+ // the RecoveryUnit being alive.
+
+ //
+ // The underlying execution machinery.
+ //
+ std::unique_ptr<PlanExecutor> _exec;
+};
+
+/**
+ * ClientCursorPin is an RAII class that manages the pinned state of a ClientCursor.
+ * ClientCursorPin objects pin the given cursor upon construction, and release the pin upon
+ * destruction.
+ *
+ * A pin extends the lifetime of a ClientCursor object until the pin's release. Pinned
+ * ClientCursor objects cannot not be killed due to inactivity, and cannot be killed by user
+ * kill requests. When a CursorManager is destroyed (e.g. by a collection drop), ownership of
+ * any still-pinned ClientCursor objects is transferred to their managing ClientCursorPin
+ * objects.
+ *
+ * Example usage:
+ * {
+ * ClientCursorPin pin(cursorManager, cursorid);
+ * ClientCursor* cursor = pin.c();
+ * if (cursor) {
+ * // Use cursor.
+ * }
+ * // Pin automatically released on block exit.
+ * }
+ *
+ * Clients that wish to access ClientCursor objects owned by collection cursor managers must
+ * hold the collection lock during pin acquisition and pin release. This guards from a
+ * collection drop (which requires an exclusive lock on the collection) occurring concurrently
+ * with the pin request or unpin request.
+ *
+ * Clients that wish to access ClientCursor objects owned by the global cursor manager need not
+ * hold any locks; the global cursor manager can only be destroyed by a process exit.
+ */
+class ClientCursorPin {
+ MONGO_DISALLOW_COPYING(ClientCursorPin);
+
+public:
+ /**
+ * Asks "cursorManager" to set a pin on the ClientCursor associated with "cursorid". If no
+ * such cursor exists, does nothing. If the cursor is already pinned, throws a
+ * UserException.
+ */
+ ClientCursorPin(CursorManager* cursorManager, long long cursorid);
+
+ /**
+ * Calls release().
+ */
+ ~ClientCursorPin();
+
+ /**
+ * Releases the pin. It does not delete the underlying cursor unless ownership has passed
+ * to us after kill. Turns into a no-op if release() or deleteUnderlying() have already
+ * been called on this pin.
+ */
+ void release();
+
+ /**
+ * Deletes the underlying cursor. Cannot be called if release() or deleteUnderlying() have
+ * already been called on this pin.
+ */
+ void deleteUnderlying();
+
+ ClientCursor* c() const;
+
+private:
+ ClientCursor* _cursor;
+};
+
+void startClientCursorMonitor();
+
+} // namespace mongo
diff --git a/src/mongo/db/clientlistplugin.cpp b/src/mongo/db/clientlistplugin.cpp
index aaffdd83492..62676395328 100644
--- a/src/mongo/db/clientlistplugin.cpp
+++ b/src/mongo/db/clientlistplugin.cpp
@@ -46,205 +46,198 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
+using std::unique_ptr;
+using std::string;
namespace {
- class ClientListPlugin : public WebStatusPlugin {
- public:
- ClientListPlugin() : WebStatusPlugin( "clients" , 20 ) {}
- virtual void init() {}
-
- virtual void run(OperationContext* txn, std::stringstream& ss ) {
- using namespace html;
-
- ss << "\n<table border=1 cellpadding=2 cellspacing=0>";
- ss << "<tr align='left'>"
- << th( a("", "Connections to the database, both internal and external.", "Client") )
- << th( a("http://dochub.mongodb.org/core/viewingandterminatingcurrentoperation", "", "OpId") )
- << "<th>Locking</th>"
- << "<th>Waiting</th>"
- << "<th>SecsRunning</th>"
- << "<th>Op</th>"
- << th( a("http://dochub.mongodb.org/core/whatisanamespace", "", "Namespace") )
- << "<th>Query</th>"
- << "<th>client</th>"
- << "<th>msg</th>"
- << "<th>progress</th>"
-
- << "</tr>\n";
-
- _processAllClients(txn->getClient()->getServiceContext(), ss);
-
- ss << "</table>\n";
- }
+class ClientListPlugin : public WebStatusPlugin {
+public:
+ ClientListPlugin() : WebStatusPlugin("clients", 20) {}
+ virtual void init() {}
- private:
+ virtual void run(OperationContext* txn, std::stringstream& ss) {
+ using namespace html;
- static void _processAllClients(ServiceContext* service, std::stringstream& ss) {
- using namespace html;
+ ss << "\n<table border=1 cellpadding=2 cellspacing=0>";
+ ss << "<tr align='left'>"
+ << th(a("", "Connections to the database, both internal and external.", "Client"))
+ << th(a("http://dochub.mongodb.org/core/viewingandterminatingcurrentoperation",
+ "",
+ "OpId")) << "<th>Locking</th>"
+ << "<th>Waiting</th>"
+ << "<th>SecsRunning</th>"
+ << "<th>Op</th>"
+ << th(a("http://dochub.mongodb.org/core/whatisanamespace", "", "Namespace"))
+ << "<th>Query</th>"
+ << "<th>client</th>"
+ << "<th>msg</th>"
+ << "<th>progress</th>"
- for (ServiceContext::LockedClientsCursor cursor(service);
- Client* client = cursor.next();) {
+ << "</tr>\n";
- invariant(client);
+ _processAllClients(txn->getClient()->getServiceContext(), ss);
- // Make the client stable
- stdx::lock_guard<Client> lk(*client);
- const OperationContext* txn = client->getOperationContext();
- if (!txn) continue;
+ ss << "</table>\n";
+ }
- CurOp* curOp = CurOp::get(txn);
- if (!curOp) continue;
+private:
+ static void _processAllClients(ServiceContext* service, std::stringstream& ss) {
+ using namespace html;
- ss << "<tr><td>" << client->desc() << "</td>";
+ for (ServiceContext::LockedClientsCursor cursor(service); Client* client = cursor.next();) {
+ invariant(client);
- tablecell(ss, txn->getOpID());
- tablecell(ss, true);
+ // Make the client stable
+ stdx::lock_guard<Client> lk(*client);
+ const OperationContext* txn = client->getOperationContext();
+ if (!txn)
+ continue;
- // LockState
- {
- Locker::LockerInfo lockerInfo;
- txn->lockState()->getLockerInfo(&lockerInfo);
+ CurOp* curOp = CurOp::get(txn);
+ if (!curOp)
+ continue;
- BSONObjBuilder lockerInfoBuilder;
- fillLockerInfo(lockerInfo, lockerInfoBuilder);
+ ss << "<tr><td>" << client->desc() << "</td>";
- tablecell(ss, lockerInfoBuilder.obj());
- }
+ tablecell(ss, txn->getOpID());
+ tablecell(ss, true);
- tablecell(ss, curOp->elapsedSeconds());
+ // LockState
+ {
+ Locker::LockerInfo lockerInfo;
+ txn->lockState()->getLockerInfo(&lockerInfo);
- tablecell(ss, curOp->getOp());
- tablecell(ss, html::escape(curOp->getNS()));
+ BSONObjBuilder lockerInfoBuilder;
+ fillLockerInfo(lockerInfo, lockerInfoBuilder);
- if (curOp->haveQuery()) {
- tablecell(ss, html::escape(curOp->query().toString()));
- }
- else {
- tablecell(ss, "");
- }
+ tablecell(ss, lockerInfoBuilder.obj());
+ }
- tablecell(ss, client->clientAddress(true /*includePort*/));
+ tablecell(ss, curOp->elapsedSeconds());
- tablecell(ss, curOp->getMessage());
- tablecell(ss, curOp->getProgressMeter().toString());
+ tablecell(ss, curOp->getOp());
+ tablecell(ss, html::escape(curOp->getNS()));
- ss << "</tr>\n";
+ if (curOp->haveQuery()) {
+ tablecell(ss, html::escape(curOp->query().toString()));
+ } else {
+ tablecell(ss, "");
}
- }
- } clientListPlugin;
+ tablecell(ss, client->clientAddress(true /*includePort*/));
+ tablecell(ss, curOp->getMessage());
+ tablecell(ss, curOp->getProgressMeter().toString());
- class CurrentOpContexts : public Command {
- public:
- CurrentOpContexts()
- : Command( "currentOpCtx" ) {
+ ss << "</tr>\n";
}
+ }
- virtual bool isWriteCommandForConfigServer() const { return false; }
+} clientListPlugin;
- virtual bool slaveOk() const { return true; }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- if ( AuthorizationSession::get(client)
- ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
- ActionType::inprog) ) {
- return Status::OK();
- }
+class CurrentOpContexts : public Command {
+public:
+ CurrentOpContexts() : Command("currentOpCtx") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- return Status(ErrorCodes::Unauthorized, "unauthorized");
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::inprog)) {
+ return Status::OK();
}
- bool run( OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- unique_ptr<MatchExpression> filter;
- if ( cmdObj["filter"].isABSONObj() ) {
- StatusWithMatchExpression res =
- MatchExpressionParser::parse( cmdObj["filter"].Obj() );
- if ( !res.isOK() ) {
- return appendCommandStatus( result, res.getStatus() );
- }
- filter.reset( res.getValue() );
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ unique_ptr<MatchExpression> filter;
+ if (cmdObj["filter"].isABSONObj()) {
+ StatusWithMatchExpression res = MatchExpressionParser::parse(cmdObj["filter"].Obj());
+ if (!res.isOK()) {
+ return appendCommandStatus(result, res.getStatus());
}
-
- result.appendArray(
- "operations",
- _processAllClients(txn->getClient()->getServiceContext(), filter.get()));
-
- return true;
+ filter.reset(res.getValue());
}
+ result.appendArray("operations",
+ _processAllClients(txn->getClient()->getServiceContext(), filter.get()));
- private:
+ return true;
+ }
- static BSONArray _processAllClients(ServiceContext* service, MatchExpression* matcher) {
- BSONArrayBuilder array;
- for (ServiceContext::LockedClientsCursor cursor(service);
- Client* client = cursor.next();) {
+private:
+ static BSONArray _processAllClients(ServiceContext* service, MatchExpression* matcher) {
+ BSONArrayBuilder array;
- invariant(client);
+ for (ServiceContext::LockedClientsCursor cursor(service); Client* client = cursor.next();) {
+ invariant(client);
- BSONObjBuilder b;
+ BSONObjBuilder b;
- // Make the client stable
- stdx::lock_guard<Client> lk(*client);
+ // Make the client stable
+ stdx::lock_guard<Client> lk(*client);
- client->reportState(b);
+ client->reportState(b);
- const OperationContext* txn = client->getOperationContext();
- b.appendBool("active", static_cast<bool>(txn));
- if (txn) {
- b.append("opid", txn->getOpID());
- if (txn->isKillPending()) {
- b.append("killPending", true);
- }
-
- CurOp::get(txn)->reportState(&b);
+ const OperationContext* txn = client->getOperationContext();
+ b.appendBool("active", static_cast<bool>(txn));
+ if (txn) {
+ b.append("opid", txn->getOpID());
+ if (txn->isKillPending()) {
+ b.append("killPending", true);
+ }
- // LockState
- if (txn->lockState()) {
- StringBuilder ss;
- ss << txn->lockState();
- b.append("lockStatePointer", ss.str());
+ CurOp::get(txn)->reportState(&b);
- Locker::LockerInfo lockerInfo;
- txn->lockState()->getLockerInfo(&lockerInfo);
+ // LockState
+ if (txn->lockState()) {
+ StringBuilder ss;
+ ss << txn->lockState();
+ b.append("lockStatePointer", ss.str());
- BSONObjBuilder lockerInfoBuilder;
- fillLockerInfo(lockerInfo, lockerInfoBuilder);
+ Locker::LockerInfo lockerInfo;
+ txn->lockState()->getLockerInfo(&lockerInfo);
- b.append("lockState", lockerInfoBuilder.obj());
- }
+ BSONObjBuilder lockerInfoBuilder;
+ fillLockerInfo(lockerInfo, lockerInfoBuilder);
- // RecoveryUnit
- if (txn->recoveryUnit()) {
- txn->recoveryUnit()->reportState(&b);
- }
+ b.append("lockState", lockerInfoBuilder.obj());
}
- const BSONObj obj = b.obj();
-
- if (!matcher || matcher->matchesBSON(obj)) {
- array.append(obj);
+ // RecoveryUnit
+ if (txn->recoveryUnit()) {
+ txn->recoveryUnit()->reportState(&b);
}
}
- return array.arr();
+ const BSONObj obj = b.obj();
+
+ if (!matcher || matcher->matchesBSON(obj)) {
+ array.append(obj);
+ }
}
- } currentOpContexts;
+ return array.arr();
+ }
+
+} currentOpContexts;
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 0837db844c0..4ce9a81eef1 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -66,612 +66,601 @@
namespace mongo {
- using std::endl;
- using std::list;
- using std::set;
- using std::string;
- using std::unique_ptr;
- using std::vector;
-
- MONGO_EXPORT_SERVER_PARAMETER(skipCorruptDocumentsWhenCloning, bool, false);
-
- BSONElement getErrField(const BSONObj& o);
-
- /* for index info object:
- { "name" : "name_1" , "ns" : "foo.index3" , "key" : { "name" : 1.0 } }
- we need to fix up the value in the "ns" parameter so that the name prefix is correct on a
- copy to a new name.
- */
- BSONObj fixindex(const string& newDbName, BSONObj o) {
- BSONObjBuilder b;
- BSONObjIterator i(o);
- while ( i.moreWithEOO() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
-
- // for now, skip the "v" field so that v:0 indexes will be upgraded to v:1
- if ( string("v") == e.fieldName() ) {
- continue;
- }
+using std::endl;
+using std::list;
+using std::set;
+using std::string;
+using std::unique_ptr;
+using std::vector;
- if ( string("ns") == e.fieldName() ) {
- uassert( 10024 , "bad ns field for index during dbcopy", e.type() == String);
- const char *p = strchr(e.valuestr(), '.');
- uassert( 10025 , "bad ns field for index during dbcopy [2]", p);
- string newname = newDbName + p;
- b.append("ns", newname);
- }
- else {
- b.append(e);
- }
- }
+MONGO_EXPORT_SERVER_PARAMETER(skipCorruptDocumentsWhenCloning, bool, false);
- BSONObj res= b.obj();
+BSONElement getErrField(const BSONObj& o);
+
+/* for index info object:
+ { "name" : "name_1" , "ns" : "foo.index3" , "key" : { "name" : 1.0 } }
+ we need to fix up the value in the "ns" parameter so that the name prefix is correct on a
+ copy to a new name.
+*/
+BSONObj fixindex(const string& newDbName, BSONObj o) {
+ BSONObjBuilder b;
+ BSONObjIterator i(o);
+ while (i.moreWithEOO()) {
+ BSONElement e = i.next();
+ if (e.eoo())
+ break;
+
+ // for now, skip the "v" field so that v:0 indexes will be upgraded to v:1
+ if (string("v") == e.fieldName()) {
+ continue;
+ }
- return res;
+ if (string("ns") == e.fieldName()) {
+ uassert(10024, "bad ns field for index during dbcopy", e.type() == String);
+ const char* p = strchr(e.valuestr(), '.');
+ uassert(10025, "bad ns field for index during dbcopy [2]", p);
+ string newname = newDbName + p;
+ b.append("ns", newname);
+ } else {
+ b.append(e);
+ }
}
- Cloner::Cloner() { }
-
- struct Cloner::Fun {
- Fun(OperationContext* txn, const string& dbName)
- :lastLog(0),
- txn(txn),
- _dbName(dbName)
- {}
-
- void operator()( DBClientCursorBatchIterator &i ) {
- invariant(from_collection.coll() != "system.indexes");
-
- // XXX: can probably take dblock instead
- unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(txn, MODE_X));
- unique_ptr<Lock::GlobalWrite> globalWriteLock(new Lock::GlobalWrite(txn->lockState()));
- uassert(ErrorCodes::NotMaster,
- str::stream() << "Not primary while cloning collection " << from_collection.ns()
- << " to " << to_collection.ns(),
- !txn->writesAreReplicated() ||
+ BSONObj res = b.obj();
+
+ return res;
+}
+
+Cloner::Cloner() {}
+
+struct Cloner::Fun {
+ Fun(OperationContext* txn, const string& dbName) : lastLog(0), txn(txn), _dbName(dbName) {}
+
+ void operator()(DBClientCursorBatchIterator& i) {
+ invariant(from_collection.coll() != "system.indexes");
+
+ // XXX: can probably take dblock instead
+ unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(txn, MODE_X));
+ unique_ptr<Lock::GlobalWrite> globalWriteLock(new Lock::GlobalWrite(txn->lockState()));
+ uassert(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while cloning collection " << from_collection.ns()
+ << " to " << to_collection.ns(),
+ !txn->writesAreReplicated() ||
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(to_collection));
- // Make sure database still exists after we resume from the temp release
- Database* db = dbHolder().openDb(txn, _dbName);
+ // Make sure database still exists after we resume from the temp release
+ Database* db = dbHolder().openDb(txn, _dbName);
- bool createdCollection = false;
- Collection* collection = NULL;
+ bool createdCollection = false;
+ Collection* collection = NULL;
- collection = db->getCollection( to_collection );
- if ( !collection ) {
- massert( 17321,
- str::stream()
- << "collection dropped during clone ["
- << to_collection.ns() << "]",
- !createdCollection );
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
- collection = db->createCollection(txn, to_collection.ns(), CollectionOptions());
- verify(collection);
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", to_collection.ns());
+ collection = db->getCollection(to_collection);
+ if (!collection) {
+ massert(17321,
+ str::stream() << "collection dropped during clone [" << to_collection.ns()
+ << "]",
+ !createdCollection);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ collection = db->createCollection(txn, to_collection.ns(), CollectionOptions());
+ verify(collection);
+ wunit.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", to_collection.ns());
+ }
- while( i.moreInCurrentBatch() ) {
- if ( numSeen % 128 == 127 ) {
- time_t now = time(0);
- if( now - lastLog >= 60 ) {
- // report progress
- if( lastLog )
- log() << "clone " << to_collection << ' ' << numSeen << endl;
- lastLog = now;
- }
+ while (i.moreInCurrentBatch()) {
+ if (numSeen % 128 == 127) {
+ time_t now = time(0);
+ if (now - lastLog >= 60) {
+ // report progress
+ if (lastLog)
+ log() << "clone " << to_collection << ' ' << numSeen << endl;
+ lastLog = now;
+ }
- if (_mayBeInterrupted) {
- txn->checkForInterrupt();
- }
+ if (_mayBeInterrupted) {
+ txn->checkForInterrupt();
+ }
+
+ if (_mayYield) {
+ scopedXact.reset();
+ globalWriteLock.reset();
+
+ CurOp::get(txn)->yielded();
- if (_mayYield) {
- scopedXact.reset();
- globalWriteLock.reset();
-
- CurOp::get(txn)->yielded();
-
- scopedXact.reset(new ScopedTransaction(txn, MODE_X));
- globalWriteLock.reset(new Lock::GlobalWrite(txn->lockState()));
-
- // Check if everything is still all right.
- if (txn->writesAreReplicated()) {
- uassert(28592,
- str::stream() << "Cannot write to ns: " << to_collection.ns()
- << " after yielding",
- repl::getGlobalReplicationCoordinator()->
- canAcceptWritesFor(to_collection));
- }
-
- // TODO: SERVER-16598 abort if original db or collection is gone.
- db = dbHolder().get(txn, _dbName);
- uassert(28593,
- str::stream() << "Database " << _dbName
- << " dropped while cloning",
- db != NULL);
-
- collection = db->getCollection(to_collection);
- uassert(28594,
- str::stream() << "Collection " << to_collection.ns()
- << " dropped while cloning",
- collection != NULL);
+ scopedXact.reset(new ScopedTransaction(txn, MODE_X));
+ globalWriteLock.reset(new Lock::GlobalWrite(txn->lockState()));
+
+ // Check if everything is still all right.
+ if (txn->writesAreReplicated()) {
+ uassert(28592,
+ str::stream() << "Cannot write to ns: " << to_collection.ns()
+ << " after yielding",
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(
+ to_collection));
}
+
+ // TODO: SERVER-16598 abort if original db or collection is gone.
+ db = dbHolder().get(txn, _dbName);
+ uassert(28593,
+ str::stream() << "Database " << _dbName << " dropped while cloning",
+ db != NULL);
+
+ collection = db->getCollection(to_collection);
+ uassert(28594,
+ str::stream() << "Collection " << to_collection.ns()
+ << " dropped while cloning",
+ collection != NULL);
}
+ }
- BSONObj tmp = i.nextSafe();
+ BSONObj tmp = i.nextSafe();
- /* assure object is valid. note this will slow us down a little. */
- const Status status = validateBSON(tmp.objdata(), tmp.objsize());
- if (!status.isOK()) {
- str::stream ss;
- ss << "Cloner: found corrupt document in " << from_collection.toString()
- << ": " << status.reason();
- if (skipCorruptDocumentsWhenCloning) {
- warning() << ss.ss.str() << "; skipping";
- continue;
- }
- msgasserted(28531, ss);
+ /* assure object is valid. note this will slow us down a little. */
+ const Status status = validateBSON(tmp.objdata(), tmp.objsize());
+ if (!status.isOK()) {
+ str::stream ss;
+ ss << "Cloner: found corrupt document in " << from_collection.toString() << ": "
+ << status.reason();
+ if (skipCorruptDocumentsWhenCloning) {
+ warning() << ss.ss.str() << "; skipping";
+ continue;
}
+ msgasserted(28531, ss);
+ }
- ++numSeen;
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
+ ++numSeen;
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
- BSONObj doc = tmp;
- StatusWith<RecordId> loc = collection->insertDocument( txn, doc, true );
- if ( !loc.isOK() ) {
- error() << "error: exception cloning object in " << from_collection
- << ' ' << loc.getStatus() << " obj:" << doc;
- }
- uassertStatusOK( loc.getStatus() );
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "cloner insert", to_collection.ns());
- RARELY if ( time( 0 ) - saveLast > 60 ) {
- log() << numSeen << " objects cloned so far from collection " << from_collection;
- saveLast = time( 0 );
+ BSONObj doc = tmp;
+ StatusWith<RecordId> loc = collection->insertDocument(txn, doc, true);
+ if (!loc.isOK()) {
+ error() << "error: exception cloning object in " << from_collection << ' '
+ << loc.getStatus() << " obj:" << doc;
}
+ uassertStatusOK(loc.getStatus());
+ wunit.commit();
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "cloner insert", to_collection.ns());
+ RARELY if (time(0) - saveLast > 60) {
+ log() << numSeen << " objects cloned so far from collection " << from_collection;
+ saveLast = time(0);
}
}
+ }
- time_t lastLog;
- OperationContext* txn;
- const string _dbName;
-
- int64_t numSeen;
- NamespaceString from_collection;
- NamespaceString to_collection;
- time_t saveLast;
- bool _mayYield;
- bool _mayBeInterrupted;
- };
-
- /* copy the specified collection
- */
- void Cloner::copy(OperationContext* txn,
- const string& toDBName,
- const NamespaceString& from_collection,
- const NamespaceString& to_collection,
- bool masterSameProcess,
- bool slaveOk,
- bool mayYield,
- bool mayBeInterrupted,
- Query query) {
- LOG(2) << "\t\tcloning collection " << from_collection << " to " << to_collection << " on " << _conn->getServerAddress() << " with filter " << query.toString() << endl;
-
- Fun f(txn, toDBName);
- f.numSeen = 0;
- f.from_collection = from_collection;
- f.to_collection = to_collection;
- f.saveLast = time( 0 );
- f._mayYield = mayYield;
- f._mayBeInterrupted = mayBeInterrupted;
-
- int options = QueryOption_NoCursorTimeout | ( slaveOk ? QueryOption_SlaveOk : 0 );
- {
- Lock::TempRelease tempRelease(txn->lockState());
- _conn->query(stdx::function<void(DBClientCursorBatchIterator &)>(f), from_collection,
- query, 0, options);
- }
+ time_t lastLog;
+ OperationContext* txn;
+ const string _dbName;
- uassert(ErrorCodes::NotMaster,
- str::stream() << "Not primary while cloning collection " << from_collection.ns()
- << " to " << to_collection.ns() << " with filter "
- << query.toString(),
- !txn->writesAreReplicated() ||
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(to_collection));
+ int64_t numSeen;
+ NamespaceString from_collection;
+ NamespaceString to_collection;
+ time_t saveLast;
+ bool _mayYield;
+ bool _mayBeInterrupted;
+};
+
+/* copy the specified collection
+*/
+void Cloner::copy(OperationContext* txn,
+ const string& toDBName,
+ const NamespaceString& from_collection,
+ const NamespaceString& to_collection,
+ bool masterSameProcess,
+ bool slaveOk,
+ bool mayYield,
+ bool mayBeInterrupted,
+ Query query) {
+ LOG(2) << "\t\tcloning collection " << from_collection << " to " << to_collection << " on "
+ << _conn->getServerAddress() << " with filter " << query.toString() << endl;
+
+ Fun f(txn, toDBName);
+ f.numSeen = 0;
+ f.from_collection = from_collection;
+ f.to_collection = to_collection;
+ f.saveLast = time(0);
+ f._mayYield = mayYield;
+ f._mayBeInterrupted = mayBeInterrupted;
+
+ int options = QueryOption_NoCursorTimeout | (slaveOk ? QueryOption_SlaveOk : 0);
+ {
+ Lock::TempRelease tempRelease(txn->lockState());
+ _conn->query(stdx::function<void(DBClientCursorBatchIterator&)>(f),
+ from_collection,
+ query,
+ 0,
+ options);
}
- void Cloner::copyIndexes(OperationContext* txn,
- const string& toDBName,
- const NamespaceString& from_collection,
- const NamespaceString& to_collection,
- bool masterSameProcess,
- bool slaveOk,
- bool mayYield,
- bool mayBeInterrupted) {
-
- LOG(2) << "\t\t copyIndexes " << from_collection << " to " << to_collection
- << " on " << _conn->getServerAddress();
-
- vector<BSONObj> indexesToBuild;
-
- {
- Lock::TempRelease tempRelease(txn->lockState());
- list<BSONObj> sourceIndexes = _conn->getIndexSpecs( from_collection,
- slaveOk ? QueryOption_SlaveOk : 0 );
- for (list<BSONObj>::const_iterator it = sourceIndexes.begin();
- it != sourceIndexes.end(); ++it) {
- indexesToBuild.push_back(fixindex(to_collection.db().toString(), *it));
- }
+ uassert(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while cloning collection " << from_collection.ns()
+ << " to " << to_collection.ns() << " with filter " << query.toString(),
+ !txn->writesAreReplicated() ||
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(to_collection));
+}
+
+void Cloner::copyIndexes(OperationContext* txn,
+ const string& toDBName,
+ const NamespaceString& from_collection,
+ const NamespaceString& to_collection,
+ bool masterSameProcess,
+ bool slaveOk,
+ bool mayYield,
+ bool mayBeInterrupted) {
+ LOG(2) << "\t\t copyIndexes " << from_collection << " to " << to_collection << " on "
+ << _conn->getServerAddress();
+
+ vector<BSONObj> indexesToBuild;
+
+ {
+ Lock::TempRelease tempRelease(txn->lockState());
+ list<BSONObj> sourceIndexes =
+ _conn->getIndexSpecs(from_collection, slaveOk ? QueryOption_SlaveOk : 0);
+ for (list<BSONObj>::const_iterator it = sourceIndexes.begin(); it != sourceIndexes.end();
+ ++it) {
+ indexesToBuild.push_back(fixindex(to_collection.db().toString(), *it));
}
+ }
- uassert(ErrorCodes::NotMaster,
- str::stream() << "Not primary while copying indexes from " << from_collection.ns()
- << " to " << to_collection.ns() << " (Cloner)",
- !txn->writesAreReplicated() ||
+ uassert(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while copying indexes from " << from_collection.ns()
+ << " to " << to_collection.ns() << " (Cloner)",
+ !txn->writesAreReplicated() ||
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(to_collection));
- if (indexesToBuild.empty())
- return;
+ if (indexesToBuild.empty())
+ return;
- // We are under lock here again, so reload the database in case it may have disappeared
- // during the temp release
- Database* db = dbHolder().openDb(txn, toDBName);
+ // We are under lock here again, so reload the database in case it may have disappeared
+ // during the temp release
+ Database* db = dbHolder().openDb(txn, toDBName);
- Collection* collection = db->getCollection( to_collection );
- if ( !collection ) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
- collection = db->createCollection(txn, to_collection.ns(), CollectionOptions());
- invariant(collection);
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", to_collection.ns());
+ Collection* collection = db->getCollection(to_collection);
+ if (!collection) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ collection = db->createCollection(txn, to_collection.ns(), CollectionOptions());
+ invariant(collection);
+ wunit.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", to_collection.ns());
+ }
- // TODO pass the MultiIndexBlock when inserting into the collection rather than building the
- // indexes after the fact. This depends on holding a lock on the collection the whole time
- // from creation to completion without yielding to ensure the index and the collection
- // matches. It also wouldn't work on non-empty collections so we would need both
- // implementations anyway as long as that is supported.
- MultiIndexBlock indexer(txn, collection);
- if (mayBeInterrupted)
- indexer.allowInterruption();
-
- indexer.removeExistingIndexes(&indexesToBuild);
- if (indexesToBuild.empty())
- return;
-
- uassertStatusOK(indexer.init(indexesToBuild));
- uassertStatusOK(indexer.insertAllDocumentsInCollection());
-
- WriteUnitOfWork wunit(txn);
- indexer.commit();
- if (txn->writesAreReplicated()) {
- const string targetSystemIndexesCollectionName =
- to_collection.getSystemIndexesCollection();
- const char* createIndexNs = targetSystemIndexesCollectionName.c_str();
- for (vector<BSONObj>::const_iterator it = indexesToBuild.begin();
- it != indexesToBuild.end(); ++it) {
- getGlobalServiceContext()->getOpObserver()->onCreateIndex(txn, createIndexNs, *it);
- }
+ // TODO pass the MultiIndexBlock when inserting into the collection rather than building the
+ // indexes after the fact. This depends on holding a lock on the collection the whole time
+ // from creation to completion without yielding to ensure the index and the collection
+ // matches. It also wouldn't work on non-empty collections so we would need both
+ // implementations anyway as long as that is supported.
+ MultiIndexBlock indexer(txn, collection);
+ if (mayBeInterrupted)
+ indexer.allowInterruption();
+
+ indexer.removeExistingIndexes(&indexesToBuild);
+ if (indexesToBuild.empty())
+ return;
+
+ uassertStatusOK(indexer.init(indexesToBuild));
+ uassertStatusOK(indexer.insertAllDocumentsInCollection());
+
+ WriteUnitOfWork wunit(txn);
+ indexer.commit();
+ if (txn->writesAreReplicated()) {
+ const string targetSystemIndexesCollectionName = to_collection.getSystemIndexesCollection();
+ const char* createIndexNs = targetSystemIndexesCollectionName.c_str();
+ for (vector<BSONObj>::const_iterator it = indexesToBuild.begin();
+ it != indexesToBuild.end();
+ ++it) {
+ getGlobalServiceContext()->getOpObserver()->onCreateIndex(txn, createIndexNs, *it);
}
- wunit.commit();
}
-
- bool Cloner::copyCollection(OperationContext* txn,
- const string& ns,
- const BSONObj& query,
- string& errmsg,
- bool mayYield,
- bool mayBeInterrupted,
- bool shouldCopyIndexes) {
-
- const NamespaceString nss(ns);
- const string dbname = nss.db().toString();
-
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbWrite(txn->lockState(), dbname, MODE_X);
-
- uassert(ErrorCodes::NotMaster,
- str::stream() << "Not primary while copying collection " << ns << " (Cloner)",
- !txn->writesAreReplicated() ||
+ wunit.commit();
+}
+
+bool Cloner::copyCollection(OperationContext* txn,
+ const string& ns,
+ const BSONObj& query,
+ string& errmsg,
+ bool mayYield,
+ bool mayBeInterrupted,
+ bool shouldCopyIndexes) {
+ const NamespaceString nss(ns);
+ const string dbname = nss.db().toString();
+
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbWrite(txn->lockState(), dbname, MODE_X);
+
+ uassert(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while copying collection " << ns << " (Cloner)",
+ !txn->writesAreReplicated() ||
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss));
- Database* db = dbHolder().openDb(txn, dbname);
+ Database* db = dbHolder().openDb(txn, dbname);
- // config
- BSONObj filter = BSON("name" << nss.coll().toString());
- list<BSONObj> collList = _conn->getCollectionInfos( dbname, filter);
- if (!collList.empty()) {
- invariant(collList.size() <= 1);
- BSONObj col = collList.front();
- if (col["options"].isABSONObj()) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
- Status status = userCreateNS(txn, db, ns, col["options"].Obj(), 0);
- if ( !status.isOK() ) {
- errmsg = status.toString();
- return false;
- }
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createUser", ns);
+ // config
+ BSONObj filter = BSON("name" << nss.coll().toString());
+ list<BSONObj> collList = _conn->getCollectionInfos(dbname, filter);
+ if (!collList.empty()) {
+ invariant(collList.size() <= 1);
+ BSONObj col = collList.front();
+ if (col["options"].isABSONObj()) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ Status status = userCreateNS(txn, db, ns, col["options"].Obj(), 0);
+ if (!status.isOK()) {
+ errmsg = status.toString();
+ return false;
+ }
+ wunit.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createUser", ns);
}
-
- // main data
- copy(txn, dbname,
- nss, nss,
- false, true, mayYield, mayBeInterrupted,
- Query(query).snapshot());
-
- /* TODO : copyIndexes bool does not seem to be implemented! */
- if(!shouldCopyIndexes) {
- log() << "ERROR copy collection shouldCopyIndexes not implemented? " << ns << endl;
- }
-
- // indexes
- copyIndexes(txn, dbname,
- NamespaceString(ns), NamespaceString(ns),
- false, true, mayYield,
- mayBeInterrupted);
-
- return true;
}
- Status Cloner::copyDb(OperationContext* txn,
- const std::string& toDBName,
- const string& masterHost,
- const CloneOptions& opts,
- set<string>* clonedColls) {
+ // main data
+ copy(txn, dbname, nss, nss, false, true, mayYield, mayBeInterrupted, Query(query).snapshot());
- massert(10289,
- "useReplAuth is not written to replication log",
- !opts.useReplAuth || !txn->writesAreReplicated());
+ /* TODO : copyIndexes bool does not seem to be implemented! */
+ if (!shouldCopyIndexes) {
+ log() << "ERROR copy collection shouldCopyIndexes not implemented? " << ns << endl;
+ }
- auto statusWithMasterHost = ConnectionString::parse(masterHost);
- if (!statusWithMasterHost.isOK()) {
- return statusWithMasterHost.getStatus();
- }
+ // indexes
+ copyIndexes(txn,
+ dbname,
+ NamespaceString(ns),
+ NamespaceString(ns),
+ false,
+ true,
+ mayYield,
+ mayBeInterrupted);
+
+ return true;
+}
+
+Status Cloner::copyDb(OperationContext* txn,
+ const std::string& toDBName,
+ const string& masterHost,
+ const CloneOptions& opts,
+ set<string>* clonedColls) {
+ massert(10289,
+ "useReplAuth is not written to replication log",
+ !opts.useReplAuth || !txn->writesAreReplicated());
+
+ auto statusWithMasterHost = ConnectionString::parse(masterHost);
+ if (!statusWithMasterHost.isOK()) {
+ return statusWithMasterHost.getStatus();
+ }
- const ConnectionString cs(statusWithMasterHost.getValue());
+ const ConnectionString cs(statusWithMasterHost.getValue());
- bool masterSameProcess = false;
- std::vector<HostAndPort> csServers = cs.getServers();
- for (std::vector<HostAndPort>::const_iterator iter = csServers.begin();
- iter != csServers.end(); ++iter) {
+ bool masterSameProcess = false;
+ std::vector<HostAndPort> csServers = cs.getServers();
+ for (std::vector<HostAndPort>::const_iterator iter = csServers.begin(); iter != csServers.end();
+ ++iter) {
+ if (!repl::isSelf(*iter))
+ continue;
- if (!repl::isSelf(*iter))
- continue;
+ masterSameProcess = true;
+ break;
+ }
- masterSameProcess = true;
- break;
+ if (masterSameProcess) {
+ if (opts.fromDB == toDBName) {
+ // Guard against re-entrance
+ return Status(ErrorCodes::IllegalOperation, "can't clone from self (localhost)");
}
+ }
- if (masterSameProcess) {
- if (opts.fromDB == toDBName) {
- // Guard against re-entrance
- return Status(ErrorCodes::IllegalOperation, "can't clone from self (localhost)");
+ {
+ // setup connection
+ if (_conn.get()) {
+ // nothing to do
+ } else if (!masterSameProcess) {
+ std::string errmsg;
+ unique_ptr<DBClientBase> con(cs.connect(errmsg));
+ if (!con.get()) {
+ return Status(ErrorCodes::HostUnreachable, errmsg);
}
- }
- {
- // setup connection
- if (_conn.get()) {
- // nothing to do
+ if (getGlobalAuthorizationManager()->isAuthEnabled() &&
+ !authenticateInternalUser(con.get())) {
+ return Status(ErrorCodes::AuthenticationFailed,
+ "Unable to authenticate as internal user");
}
- else if ( !masterSameProcess ) {
- std::string errmsg;
- unique_ptr<DBClientBase> con( cs.connect( errmsg ));
- if (!con.get()) {
- return Status(ErrorCodes::HostUnreachable, errmsg);
- }
-
- if (getGlobalAuthorizationManager()->isAuthEnabled() &&
- !authenticateInternalUser(con.get())) {
- return Status(ErrorCodes::AuthenticationFailed,
- "Unable to authenticate as internal user");
- }
-
- _conn = std::move(con);
- }
- else {
- _conn.reset(new DBDirectClient(txn));
- }
+ _conn = std::move(con);
+ } else {
+ _conn.reset(new DBDirectClient(txn));
}
+ }
- // Gather the list of collections to clone
- list<BSONObj> toClone;
- if (clonedColls) {
- clonedColls->clear();
- }
+ // Gather the list of collections to clone
+ list<BSONObj> toClone;
+ if (clonedColls) {
+ clonedColls->clear();
+ }
- {
- // getCollectionInfos may make a remote call, which may block indefinitely, so release
- // the global lock that we are entering with.
- Lock::TempRelease tempRelease(txn->lockState());
+ {
+ // getCollectionInfos may make a remote call, which may block indefinitely, so release
+ // the global lock that we are entering with.
+ Lock::TempRelease tempRelease(txn->lockState());
- list<BSONObj> raw = _conn->getCollectionInfos( opts.fromDB );
- for ( list<BSONObj>::iterator it = raw.begin(); it != raw.end(); ++it ) {
- BSONObj collection = *it;
+ list<BSONObj> raw = _conn->getCollectionInfos(opts.fromDB);
+ for (list<BSONObj>::iterator it = raw.begin(); it != raw.end(); ++it) {
+ BSONObj collection = *it;
- LOG(2) << "\t cloner got " << collection << endl;
+ LOG(2) << "\t cloner got " << collection << endl;
- BSONElement collectionOptions = collection["options"];
- if ( collectionOptions.isABSONObj() ) {
- Status parseOptionsStatus = CollectionOptions().parse(collectionOptions.Obj());
- if (!parseOptionsStatus.isOK()) {
- return parseOptionsStatus;
- }
+ BSONElement collectionOptions = collection["options"];
+ if (collectionOptions.isABSONObj()) {
+ Status parseOptionsStatus = CollectionOptions().parse(collectionOptions.Obj());
+ if (!parseOptionsStatus.isOK()) {
+ return parseOptionsStatus;
}
+ }
- BSONElement e = collection.getField("name");
- if ( e.eoo() ) {
- string s = "bad collection object " + collection.toString();
- massert( 10290 , s.c_str(), false);
- }
- verify( !e.eoo() );
- verify( e.type() == String );
+ BSONElement e = collection.getField("name");
+ if (e.eoo()) {
+ string s = "bad collection object " + collection.toString();
+ massert(10290, s.c_str(), false);
+ }
+ verify(!e.eoo());
+ verify(e.type() == String);
- const NamespaceString ns(opts.fromDB, e.valuestr());
+ const NamespaceString ns(opts.fromDB, e.valuestr());
- if( ns.isSystem() ) {
- /* system.users and s.js is cloned -- but nothing else from system.
- * system.indexes is handled specially at the end*/
- if( legalClientSystemNS( ns.ns() , true ) == 0 ) {
- LOG(2) << "\t\t not cloning because system collection" << endl;
- continue;
- }
- }
- if( !ns.isNormal() ) {
- LOG(2) << "\t\t not cloning because has $ ";
+ if (ns.isSystem()) {
+ /* system.users and s.js is cloned -- but nothing else from system.
+ * system.indexes is handled specially at the end*/
+ if (legalClientSystemNS(ns.ns(), true) == 0) {
+ LOG(2) << "\t\t not cloning because system collection" << endl;
continue;
}
+ }
+ if (!ns.isNormal()) {
+ LOG(2) << "\t\t not cloning because has $ ";
+ continue;
+ }
- if( opts.collsToIgnore.find( ns.ns() ) != opts.collsToIgnore.end() ){
- LOG(2) << "\t\t ignoring collection " << ns;
- continue;
- }
- else {
- LOG(2) << "\t\t not ignoring collection " << ns;
- }
-
- if (clonedColls) {
- clonedColls->insert(ns.ns());
- }
+ if (opts.collsToIgnore.find(ns.ns()) != opts.collsToIgnore.end()) {
+ LOG(2) << "\t\t ignoring collection " << ns;
+ continue;
+ } else {
+ LOG(2) << "\t\t not ignoring collection " << ns;
+ }
- toClone.push_back( collection.getOwned() );
+ if (clonedColls) {
+ clonedColls->insert(ns.ns());
}
+
+ toClone.push_back(collection.getOwned());
}
+ }
- uassert(ErrorCodes::NotMaster,
- str::stream() << "Not primary while cloning database " << opts.fromDB
- << " (after getting list of collections to clone)",
- !txn->writesAreReplicated() ||
+ uassert(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while cloning database " << opts.fromDB
+ << " (after getting list of collections to clone)",
+ !txn->writesAreReplicated() ||
repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(toDBName));
- if ( opts.syncData ) {
- for ( list<BSONObj>::iterator i=toClone.begin(); i != toClone.end(); i++ ) {
- BSONObj collection = *i;
- LOG(2) << " really will clone: " << collection << endl;
- const char* collectionName = collection["name"].valuestr();
- BSONObj options = collection.getObjectField("options");
+ if (opts.syncData) {
+ for (list<BSONObj>::iterator i = toClone.begin(); i != toClone.end(); i++) {
+ BSONObj collection = *i;
+ LOG(2) << " really will clone: " << collection << endl;
+ const char* collectionName = collection["name"].valuestr();
+ BSONObj options = collection.getObjectField("options");
- const NamespaceString from_name(opts.fromDB, collectionName);
- const NamespaceString to_name(toDBName, collectionName);
+ const NamespaceString from_name(opts.fromDB, collectionName);
+ const NamespaceString to_name(toDBName, collectionName);
- Database* db = dbHolder().openDb(txn, toDBName);
+ Database* db = dbHolder().openDb(txn, toDBName);
- {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
-
- // we defer building id index for performance - building it in batch is much
- // faster
- Status createStatus = userCreateNS(txn, db, to_name.ns(), options, false);
- if (!createStatus.isOK()) {
- return createStatus;
- }
-
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createUser", to_name.ns());
- }
+ {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
- LOG(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
- Query q;
- if( opts.snapshot )
- q.snapshot();
-
- copy(txn,
- toDBName,
- from_name,
- to_name,
- masterSameProcess,
- opts.slaveOk,
- opts.mayYield,
- opts.mayBeInterrupted,
- q);
-
- // Copy releases the lock, so we need to re-load the database. This should
- // probably throw if the database has changed in between, but for now preserve
- // the existing behaviour.
- db = dbHolder().get(txn, toDBName);
- uassert(18645,
- str::stream() << "database " << toDBName << " dropped during clone",
- db);
-
- Collection* c = db->getCollection( to_name );
- if ( c && !c->getIndexCatalog()->haveIdIndex( txn ) ) {
- // We need to drop objects with duplicate _ids because we didn't do a true
- // snapshot and this is before applying oplog operations that occur during the
- // initial sync.
- set<RecordId> dups;
-
- MultiIndexBlock indexer(txn, c);
- if (opts.mayBeInterrupted)
- indexer.allowInterruption();
-
- uassertStatusOK(indexer.init(c->getIndexCatalog()->getDefaultIdIndexSpec()));
- uassertStatusOK(indexer.insertAllDocumentsInCollection(&dups));
-
- // This must be done before we commit the indexer. See the comment about
- // dupsAllowed in IndexCatalog::_unindexRecord and SERVER-17487.
- for (set<RecordId>::const_iterator it = dups.begin(); it != dups.end(); ++it) {
- WriteUnitOfWork wunit(txn);
- BSONObj id;
-
- c->deleteDocument(txn,
- *it,
- true,
- true,
- txn->writesAreReplicated() ? &id : nullptr);
- wunit.commit();
+ // we defer building id index for performance - building it in batch is much
+ // faster
+ Status createStatus = userCreateNS(txn, db, to_name.ns(), options, false);
+ if (!createStatus.isOK()) {
+ return createStatus;
}
- if (!dups.empty()) {
- log() << "index build dropped: " << dups.size() << " dups";
- }
+ wunit.commit();
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createUser", to_name.ns());
+ }
+ LOG(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
+ Query q;
+ if (opts.snapshot)
+ q.snapshot();
+
+ copy(txn,
+ toDBName,
+ from_name,
+ to_name,
+ masterSameProcess,
+ opts.slaveOk,
+ opts.mayYield,
+ opts.mayBeInterrupted,
+ q);
+
+ // Copy releases the lock, so we need to re-load the database. This should
+ // probably throw if the database has changed in between, but for now preserve
+ // the existing behaviour.
+ db = dbHolder().get(txn, toDBName);
+ uassert(18645, str::stream() << "database " << toDBName << " dropped during clone", db);
+
+ Collection* c = db->getCollection(to_name);
+ if (c && !c->getIndexCatalog()->haveIdIndex(txn)) {
+ // We need to drop objects with duplicate _ids because we didn't do a true
+ // snapshot and this is before applying oplog operations that occur during the
+ // initial sync.
+ set<RecordId> dups;
+
+ MultiIndexBlock indexer(txn, c);
+ if (opts.mayBeInterrupted)
+ indexer.allowInterruption();
+
+ uassertStatusOK(indexer.init(c->getIndexCatalog()->getDefaultIdIndexSpec()));
+ uassertStatusOK(indexer.insertAllDocumentsInCollection(&dups));
+
+ // This must be done before we commit the indexer. See the comment about
+ // dupsAllowed in IndexCatalog::_unindexRecord and SERVER-17487.
+ for (set<RecordId>::const_iterator it = dups.begin(); it != dups.end(); ++it) {
WriteUnitOfWork wunit(txn);
- indexer.commit();
- if (txn->writesAreReplicated()) {
- getGlobalServiceContext()->getOpObserver()->onCreateIndex(
- txn,
- c->ns().getSystemIndexesCollection().c_str(),
- c->getIndexCatalog()->getDefaultIdIndexSpec());
- }
+ BSONObj id;
+
+ c->deleteDocument(
+ txn, *it, true, true, txn->writesAreReplicated() ? &id : nullptr);
wunit.commit();
}
- }
- }
- // now build the secondary indexes
- if ( opts.syncIndexes ) {
- for ( list<BSONObj>::iterator i=toClone.begin(); i != toClone.end(); i++ ) {
- BSONObj collection = *i;
- log() << "copying indexes for: " << collection;
-
- const char* collectionName = collection["name"].valuestr();
-
- NamespaceString from_name( opts.fromDB, collectionName );
- NamespaceString to_name( toDBName, collectionName );
-
- copyIndexes(txn,
- toDBName,
- from_name,
- to_name,
- masterSameProcess,
- opts.slaveOk,
- opts.mayYield,
- opts.mayBeInterrupted );
+ if (!dups.empty()) {
+ log() << "index build dropped: " << dups.size() << " dups";
+ }
+
+ WriteUnitOfWork wunit(txn);
+ indexer.commit();
+ if (txn->writesAreReplicated()) {
+ getGlobalServiceContext()->getOpObserver()->onCreateIndex(
+ txn,
+ c->ns().getSystemIndexesCollection().c_str(),
+ c->getIndexCatalog()->getDefaultIdIndexSpec());
+ }
+ wunit.commit();
}
}
+ }
- return Status::OK();
+ // now build the secondary indexes
+ if (opts.syncIndexes) {
+ for (list<BSONObj>::iterator i = toClone.begin(); i != toClone.end(); i++) {
+ BSONObj collection = *i;
+ log() << "copying indexes for: " << collection;
+
+ const char* collectionName = collection["name"].valuestr();
+
+ NamespaceString from_name(opts.fromDB, collectionName);
+ NamespaceString to_name(toDBName, collectionName);
+
+ copyIndexes(txn,
+ toDBName,
+ from_name,
+ to_name,
+ masterSameProcess,
+ opts.slaveOk,
+ opts.mayYield,
+ opts.mayBeInterrupted);
+ }
}
-} // namespace mongo
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/cloner.h b/src/mongo/db/cloner.h
index 2009d971869..3d49cf01a05 100644
--- a/src/mongo/db/cloner.h
+++ b/src/mongo/db/cloner.h
@@ -35,92 +35,93 @@
namespace mongo {
- struct CloneOptions;
- class DBClientBase;
- class NamespaceString;
- class OperationContext;
-
-
- class Cloner {
- MONGO_DISALLOW_COPYING(Cloner);
- public:
- Cloner();
-
- void setConnection(DBClientBase* c) {
- _conn.reset(c);
- }
-
- /**
- * Copies an entire database from the specified host.
- */
- Status copyDb(OperationContext* txn,
- const std::string& toDBName,
- const std::string& masterHost,
- const CloneOptions& opts,
- std::set<std::string>* clonedColls);
-
- bool copyCollection(OperationContext* txn,
- const std::string& ns,
- const BSONObj& query,
- std::string& errmsg,
- bool mayYield,
- bool mayBeInterrupted,
- bool copyIndexes = true);
-
- private:
- void copy(OperationContext* txn,
- const std::string& toDBName,
- const NamespaceString& from_ns,
- const NamespaceString& to_ns,
- bool masterSameProcess,
- bool slaveOk,
- bool mayYield,
- bool mayBeInterrupted,
- Query q);
-
- void copyIndexes(OperationContext* txn,
- const std::string& toDBName,
- const NamespaceString& from_ns,
- const NamespaceString& to_ns,
- bool masterSameProcess,
- bool slaveOk,
- bool mayYield,
- bool mayBeInterrupted);
-
- struct Fun;
- std::unique_ptr<DBClientBase> _conn;
- };
+struct CloneOptions;
+class DBClientBase;
+class NamespaceString;
+class OperationContext;
+
+
+class Cloner {
+ MONGO_DISALLOW_COPYING(Cloner);
+
+public:
+ Cloner();
+
+ void setConnection(DBClientBase* c) {
+ _conn.reset(c);
+ }
/**
- * slaveOk - if true it is ok if the source of the data is !ismaster.
- * useReplAuth - use the credentials we normally use as a replication slave for the cloning
- * snapshot - use $snapshot mode for copying collections. note this should not be used
- * when it isn't required, as it will be slower. for example,
- * repairDatabase need not use it.
+ * Copies an entire database from the specified host.
*/
- struct CloneOptions {
- CloneOptions() {
- slaveOk = false;
- useReplAuth = false;
- snapshot = true;
- mayYield = true;
- mayBeInterrupted = false;
-
- syncData = true;
- syncIndexes = true;
- }
-
- std::string fromDB;
- std::set<std::string> collsToIgnore;
-
- bool slaveOk;
- bool useReplAuth;
- bool snapshot;
- bool mayYield;
- bool mayBeInterrupted;
-
- bool syncData;
- bool syncIndexes;
- };
-
-} // namespace mongo
+ Status copyDb(OperationContext* txn,
+ const std::string& toDBName,
+ const std::string& masterHost,
+ const CloneOptions& opts,
+ std::set<std::string>* clonedColls);
+
+ bool copyCollection(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& query,
+ std::string& errmsg,
+ bool mayYield,
+ bool mayBeInterrupted,
+ bool copyIndexes = true);
+
+private:
+ void copy(OperationContext* txn,
+ const std::string& toDBName,
+ const NamespaceString& from_ns,
+ const NamespaceString& to_ns,
+ bool masterSameProcess,
+ bool slaveOk,
+ bool mayYield,
+ bool mayBeInterrupted,
+ Query q);
+
+ void copyIndexes(OperationContext* txn,
+ const std::string& toDBName,
+ const NamespaceString& from_ns,
+ const NamespaceString& to_ns,
+ bool masterSameProcess,
+ bool slaveOk,
+ bool mayYield,
+ bool mayBeInterrupted);
+
+ struct Fun;
+ std::unique_ptr<DBClientBase> _conn;
+};
+
+/**
+ * slaveOk - if true it is ok if the source of the data is !ismaster.
+ * useReplAuth - use the credentials we normally use as a replication slave for the cloning
+ * snapshot - use $snapshot mode for copying collections. note this should not be used
+ * when it isn't required, as it will be slower. for example,
+ * repairDatabase need not use it.
+ */
+struct CloneOptions {
+ CloneOptions() {
+ slaveOk = false;
+ useReplAuth = false;
+ snapshot = true;
+ mayYield = true;
+ mayBeInterrupted = false;
+
+ syncData = true;
+ syncIndexes = true;
+ }
+
+ std::string fromDB;
+ std::set<std::string> collsToIgnore;
+
+ bool slaveOk;
+ bool useReplAuth;
+ bool snapshot;
+ bool mayYield;
+ bool mayBeInterrupted;
+
+ bool syncData;
+ bool syncIndexes;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index 3fe012598ca..e8d12ae5d9f 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -62,519 +62,508 @@
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::endl;
+using std::string;
+using std::stringstream;
+using std::endl;
- using logger::LogComponent;
+using logger::LogComponent;
- Command::CommandMap* Command::_commandsByBestName;
- Command::CommandMap* Command::_webCommands;
- Command::CommandMap* Command::_commands;
+Command::CommandMap* Command::_commandsByBestName;
+Command::CommandMap* Command::_webCommands;
+Command::CommandMap* Command::_commands;
- int Command::testCommandsEnabled = 0;
+int Command::testCommandsEnabled = 0;
- Counter64 Command::unknownCommands;
- static ServerStatusMetricField<Counter64> displayUnknownCommands( "commands.<UNKNOWN>",
- &Command::unknownCommands );
+Counter64 Command::unknownCommands;
+static ServerStatusMetricField<Counter64> displayUnknownCommands("commands.<UNKNOWN>",
+ &Command::unknownCommands);
- namespace {
- ExportedServerParameter<int> testCommandsParameter(ServerParameterSet::getGlobal(),
- "enableTestCommands",
- &Command::testCommandsEnabled,
- true,
- false);
- }
-
- string Command::parseNsFullyQualified(const string& dbname, const BSONObj& cmdObj) const {
- BSONElement first = cmdObj.firstElement();
- uassert(17005,
- mongoutils::str::stream() << "Main argument to " << first.fieldNameStringData() <<
- " must be a fully qualified namespace string. Found: " <<
- first.toString(false),
- first.type() == mongo::String &&
+namespace {
+ExportedServerParameter<int> testCommandsParameter(ServerParameterSet::getGlobal(),
+ "enableTestCommands",
+ &Command::testCommandsEnabled,
+ true,
+ false);
+}
+
+string Command::parseNsFullyQualified(const string& dbname, const BSONObj& cmdObj) const {
+ BSONElement first = cmdObj.firstElement();
+ uassert(17005,
+ mongoutils::str::stream()
+ << "Main argument to " << first.fieldNameStringData()
+ << " must be a fully qualified namespace string. Found: " << first.toString(false),
+ first.type() == mongo::String &&
NamespaceString::validCollectionComponent(first.valuestr()));
- return first.String();
- }
-
- string Command::parseNsCollectionRequired(const string& dbname, const BSONObj& cmdObj) const {
- // Accepts both BSON String and Symbol for collection name per SERVER-16260
- // TODO(kangas) remove Symbol support in MongoDB 3.0 after Ruby driver audit
- BSONElement first = cmdObj.firstElement();
- uassert(17009,
- "no collection name specified",
- first.canonicalType() == canonicalizeBSONType(mongo::String)
- && first.valuestrsize() > 0);
- std::string coll = first.valuestr();
- return dbname + '.' + coll;
- }
-
- /*virtual*/ string Command::parseNs(const string& dbname, const BSONObj& cmdObj) const {
- BSONElement first = cmdObj.firstElement();
- if (first.type() != mongo::String)
- return dbname;
-
- string coll = cmdObj.firstElement().valuestr();
+ return first.String();
+}
+
+string Command::parseNsCollectionRequired(const string& dbname, const BSONObj& cmdObj) const {
+ // Accepts both BSON String and Symbol for collection name per SERVER-16260
+ // TODO(kangas) remove Symbol support in MongoDB 3.0 after Ruby driver audit
+ BSONElement first = cmdObj.firstElement();
+ uassert(17009,
+ "no collection name specified",
+ first.canonicalType() == canonicalizeBSONType(mongo::String) &&
+ first.valuestrsize() > 0);
+ std::string coll = first.valuestr();
+ return dbname + '.' + coll;
+}
+
+/*virtual*/ string Command::parseNs(const string& dbname, const BSONObj& cmdObj) const {
+ BSONElement first = cmdObj.firstElement();
+ if (first.type() != mongo::String)
+ return dbname;
+
+ string coll = cmdObj.firstElement().valuestr();
#if defined(CLC)
- DEV if( mongoutils::str::startsWith(coll, dbname+'.') ) {
- log() << "DEBUG parseNs Command's collection name looks like it includes the db name\n"
- << dbname << '\n'
- << coll << '\n'
- << cmdObj.toString() << endl;
- dassert(false);
- }
+ DEV if (mongoutils::str::startsWith(coll, dbname + '.')) {
+ log() << "DEBUG parseNs Command's collection name looks like it includes the db name\n"
+ << dbname << '\n' << coll << '\n' << cmdObj.toString() << endl;
+ dassert(false);
+ }
#endif
- return dbname + '.' + coll;
+ return dbname + '.' + coll;
+}
+
+ResourcePattern Command::parseResourcePattern(const std::string& dbname,
+ const BSONObj& cmdObj) const {
+ std::string ns = parseNs(dbname, cmdObj);
+ if (ns.find('.') == std::string::npos) {
+ return ResourcePattern::forDatabaseName(ns);
}
-
- ResourcePattern Command::parseResourcePattern(const std::string& dbname,
- const BSONObj& cmdObj) const {
- std::string ns = parseNs(dbname, cmdObj);
- if (ns.find('.') == std::string::npos) {
- return ResourcePattern::forDatabaseName(ns);
- }
- return ResourcePattern::forExactNamespace(NamespaceString(ns));
+ return ResourcePattern::forExactNamespace(NamespaceString(ns));
+}
+
+void Command::htmlHelp(stringstream& ss) const {
+ string helpStr;
+ {
+ stringstream h;
+ help(h);
+ helpStr = h.str();
}
-
- void Command::htmlHelp(stringstream& ss) const {
- string helpStr;
- {
- stringstream h;
- help(h);
- helpStr = h.str();
- }
- ss << "\n<tr><td>";
- bool web = _webCommands->find(name) != _webCommands->end();
- if( web ) ss << "<a href=\"/" << name << "?text=1\">";
- ss << name;
- if( web ) ss << "</a>";
- ss << "</td>\n";
- ss << "<td>";
- if (isWriteCommandForConfigServer()) {
- ss << "W ";
- }
- else {
- ss << "R ";
- }
- if( slaveOk() )
- ss << "S ";
- if( adminOnly() )
- ss << "A";
- ss << "</td>";
- ss << "<td>";
- if( helpStr != "no help defined" ) {
- const char *p = helpStr.c_str();
- while( *p ) {
- if( *p == '<' ) {
- ss << "&lt;";
- p++; continue;
- }
- else if( *p == '{' )
- ss << "<code>";
- else if( *p == '}' ) {
- ss << "}</code>";
- p++;
- continue;
- }
- if( strncmp(p, "http:", 5) == 0 ) {
- ss << "<a href=\"";
- const char *q = p;
- while( *q && *q != ' ' && *q != '\n' )
- ss << *q++;
- ss << "\">";
- q = p;
- if( str::startsWith(q, "http://www.mongodb.org/display/") )
- q += 31;
- while( *q && *q != ' ' && *q != '\n' ) {
- ss << (*q == '+' ? ' ' : *q);
- q++;
- if( *q == '#' )
- while( *q && *q != ' ' && *q != '\n' ) q++;
- }
- ss << "</a>";
- p = q;
- continue;
- }
- if( *p == '\n' ) ss << "<br>";
- else ss << *p;
+ ss << "\n<tr><td>";
+ bool web = _webCommands->find(name) != _webCommands->end();
+ if (web)
+ ss << "<a href=\"/" << name << "?text=1\">";
+ ss << name;
+ if (web)
+ ss << "</a>";
+ ss << "</td>\n";
+ ss << "<td>";
+ if (isWriteCommandForConfigServer()) {
+ ss << "W ";
+ } else {
+ ss << "R ";
+ }
+ if (slaveOk())
+ ss << "S ";
+ if (adminOnly())
+ ss << "A";
+ ss << "</td>";
+ ss << "<td>";
+ if (helpStr != "no help defined") {
+ const char* p = helpStr.c_str();
+ while (*p) {
+ if (*p == '<') {
+ ss << "&lt;";
+ p++;
+ continue;
+ } else if (*p == '{')
+ ss << "<code>";
+ else if (*p == '}') {
+ ss << "}</code>";
p++;
+ continue;
}
+ if (strncmp(p, "http:", 5) == 0) {
+ ss << "<a href=\"";
+ const char* q = p;
+ while (*q && *q != ' ' && *q != '\n')
+ ss << *q++;
+ ss << "\">";
+ q = p;
+ if (str::startsWith(q, "http://www.mongodb.org/display/"))
+ q += 31;
+ while (*q && *q != ' ' && *q != '\n') {
+ ss << (*q == '+' ? ' ' : *q);
+ q++;
+ if (*q == '#')
+ while (*q && *q != ' ' && *q != '\n')
+ q++;
+ }
+ ss << "</a>";
+ p = q;
+ continue;
+ }
+ if (*p == '\n')
+ ss << "<br>";
+ else
+ ss << *p;
+ p++;
}
- ss << "</td>";
- ss << "</tr>\n";
- }
-
- Command::Command(StringData _name, bool web, StringData oldName) :
- name(_name.toString()),
- _commandsExecutedMetric("commands."+ _name.toString()+".total", &_commandsExecuted),
- _commandsFailedMetric("commands."+ _name.toString()+".failed", &_commandsFailed) {
- // register ourself.
- if ( _commands == 0 )
- _commands = new CommandMap();
- if( _commandsByBestName == 0 )
- _commandsByBestName = new CommandMap();
- Command*& c = (*_commands)[name];
- if ( c )
- log() << "warning: 2 commands with name: " << _name << endl;
- c = this;
- (*_commandsByBestName)[name] = this;
-
- if( web ) {
- if( _webCommands == 0 )
- _webCommands = new CommandMap();
- (*_webCommands)[name] = this;
- }
-
- if( !oldName.empty() )
- (*_commands)[oldName.toString()] = this;
- }
-
- void Command::help( stringstream& help ) const {
- help << "no help defined";
}
-
- Command* Command::findCommand( StringData name ) {
- CommandMap::const_iterator i = _commands->find( name );
- if ( i == _commands->end() )
- return 0;
- return i->second;
+ ss << "</td>";
+ ss << "</tr>\n";
+}
+
+Command::Command(StringData _name, bool web, StringData oldName)
+ : name(_name.toString()),
+ _commandsExecutedMetric("commands." + _name.toString() + ".total", &_commandsExecuted),
+ _commandsFailedMetric("commands." + _name.toString() + ".failed", &_commandsFailed) {
+ // register ourself.
+ if (_commands == 0)
+ _commands = new CommandMap();
+ if (_commandsByBestName == 0)
+ _commandsByBestName = new CommandMap();
+ Command*& c = (*_commands)[name];
+ if (c)
+ log() << "warning: 2 commands with name: " << _name << endl;
+ c = this;
+ (*_commandsByBestName)[name] = this;
+
+ if (web) {
+ if (_webCommands == 0)
+ _webCommands = new CommandMap();
+ (*_webCommands)[name] = this;
}
- bool Command::appendCommandStatus(BSONObjBuilder& result, const Status& status) {
- appendCommandStatus(result, status.isOK(), status.reason());
- BSONObj tmp = result.asTempObj();
- if (!status.isOK() && !tmp.hasField("code")) {
- result.append("code", status.code());
- }
- return status.isOK();
+ if (!oldName.empty())
+ (*_commands)[oldName.toString()] = this;
+}
+
+void Command::help(stringstream& help) const {
+ help << "no help defined";
+}
+
+Command* Command::findCommand(StringData name) {
+ CommandMap::const_iterator i = _commands->find(name);
+ if (i == _commands->end())
+ return 0;
+ return i->second;
+}
+
+bool Command::appendCommandStatus(BSONObjBuilder& result, const Status& status) {
+ appendCommandStatus(result, status.isOK(), status.reason());
+ BSONObj tmp = result.asTempObj();
+ if (!status.isOK() && !tmp.hasField("code")) {
+ result.append("code", status.code());
}
+ return status.isOK();
+}
- void Command::appendCommandStatus(BSONObjBuilder& result, bool ok, const std::string& errmsg) {
- BSONObj tmp = result.asTempObj();
- bool have_ok = tmp.hasField("ok");
- bool have_errmsg = tmp.hasField("errmsg");
+void Command::appendCommandStatus(BSONObjBuilder& result, bool ok, const std::string& errmsg) {
+ BSONObj tmp = result.asTempObj();
+ bool have_ok = tmp.hasField("ok");
+ bool have_errmsg = tmp.hasField("errmsg");
- if (!have_ok)
- result.append( "ok" , ok ? 1.0 : 0.0 );
+ if (!have_ok)
+ result.append("ok", ok ? 1.0 : 0.0);
- if (!ok && !have_errmsg) {
- result.append("errmsg", errmsg);
- }
+ if (!ok && !have_errmsg) {
+ result.append("errmsg", errmsg);
}
-
- void Command::appendCommandWCStatus(BSONObjBuilder& result, const Status& status) {
- if (!status.isOK()) {
- WCErrorDetail wcError;
- wcError.setErrCode(status.code());
- wcError.setErrMessage(status.reason());
- result.append("writeConcernError", wcError.toBSON());
- }
+}
+
+void Command::appendCommandWCStatus(BSONObjBuilder& result, const Status& status) {
+ if (!status.isOK()) {
+ WCErrorDetail wcError;
+ wcError.setErrCode(status.code());
+ wcError.setErrMessage(status.reason());
+ result.append("writeConcernError", wcError.toBSON());
}
+}
- Status Command::getStatusFromCommandResult(const BSONObj& result) {
- return mongo::getStatusFromCommandResult(result);
- }
+Status Command::getStatusFromCommandResult(const BSONObj& result) {
+ return mongo::getStatusFromCommandResult(result);
+}
- Status Command::parseCommandCursorOptions(const BSONObj& cmdObj,
- long long defaultBatchSize,
- long long* batchSize) {
- invariant(batchSize);
- *batchSize = defaultBatchSize;
+Status Command::parseCommandCursorOptions(const BSONObj& cmdObj,
+ long long defaultBatchSize,
+ long long* batchSize) {
+ invariant(batchSize);
+ *batchSize = defaultBatchSize;
- BSONElement cursorElem = cmdObj["cursor"];
- if (cursorElem.eoo()) {
- return Status::OK();
- }
-
- if (cursorElem.type() != mongo::Object) {
- return Status(ErrorCodes::TypeMismatch, "cursor field must be missing or an object");
- }
-
- BSONObj cursor = cursorElem.embeddedObject();
- BSONElement batchSizeElem = cursor["batchSize"];
-
- const int expectedNumberOfCursorFields = batchSizeElem.eoo() ? 0 : 1;
- if (cursor.nFields() != expectedNumberOfCursorFields) {
- return Status(ErrorCodes::BadValue,
- "cursor object can't contain fields other than batchSize");
- }
+ BSONElement cursorElem = cmdObj["cursor"];
+ if (cursorElem.eoo()) {
+ return Status::OK();
+ }
- if (batchSizeElem.eoo()) {
- return Status::OK();
- }
+ if (cursorElem.type() != mongo::Object) {
+ return Status(ErrorCodes::TypeMismatch, "cursor field must be missing or an object");
+ }
- if (!batchSizeElem.isNumber()) {
- return Status(ErrorCodes::TypeMismatch, "cursor.batchSize must be a number");
- }
+ BSONObj cursor = cursorElem.embeddedObject();
+ BSONElement batchSizeElem = cursor["batchSize"];
- // This can change in the future, but for now all negatives are reserved.
- if (batchSizeElem.numberLong() < 0) {
- return Status(ErrorCodes::BadValue, "cursor.batchSize must not be negative");
- }
-
- *batchSize = batchSizeElem.numberLong();
+ const int expectedNumberOfCursorFields = batchSizeElem.eoo() ? 0 : 1;
+ if (cursor.nFields() != expectedNumberOfCursorFields) {
+ return Status(ErrorCodes::BadValue,
+ "cursor object can't contain fields other than batchSize");
+ }
+ if (batchSizeElem.eoo()) {
return Status::OK();
}
- Status Command::checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- std::vector<Privilege> privileges;
- this->addRequiredPrivileges(dbname, cmdObj, &privileges);
- if (AuthorizationSession::get(client)->isAuthorizedForPrivileges(privileges))
- return Status::OK();
- return Status(ErrorCodes::Unauthorized, "unauthorized");
+ if (!batchSizeElem.isNumber()) {
+ return Status(ErrorCodes::TypeMismatch, "cursor.batchSize must be a number");
}
- void Command::redactForLogging(mutablebson::Document* cmdObj) {}
-
- BSONObj Command::getRedactedCopyForLogging(const BSONObj& cmdObj) {
- namespace mmb = mutablebson;
- mmb::Document cmdToLog(cmdObj, mmb::Document::kInPlaceDisabled);
- redactForLogging(&cmdToLog);
- BSONObjBuilder bob;
- cmdToLog.writeTo(&bob);
- return bob.obj();
+ // This can change in the future, but for now all negatives are reserved.
+ if (batchSizeElem.numberLong() < 0) {
+ return Status(ErrorCodes::BadValue, "cursor.batchSize must not be negative");
}
- void Command::logIfSlow( const Timer& timer, const string& msg ) {
- int ms = timer.millis();
- if (ms > serverGlobalParams.slowMS) {
- log() << msg << " took " << ms << " ms." << endl;
- }
- }
+ *batchSize = batchSizeElem.numberLong();
- static Status _checkAuthorizationImpl(Command* c,
- ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- namespace mmb = mutablebson;
- if ( c->adminOnly() && dbname != "admin" ) {
- return Status(ErrorCodes::Unauthorized, str::stream() << c->name <<
- " may only be run against the admin database.");
- }
- if (AuthorizationSession::get(client)->getAuthorizationManager().isAuthEnabled()) {
- Status status = c->checkAuthForCommand(client, dbname, cmdObj);
- if (status == ErrorCodes::Unauthorized) {
- mmb::Document cmdToLog(cmdObj, mmb::Document::kInPlaceDisabled);
- c->redactForLogging(&cmdToLog);
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized on " << dbname <<
- " to execute command " << cmdToLog.toString());
- }
- if (!status.isOK()) {
- return status;
- }
- }
- else if (c->adminOnly() &&
- c->localHostOnlyIfNoAuth(cmdObj) &&
- !client->getIsLocalHostConnection()) {
+ return Status::OK();
+}
- return Status(ErrorCodes::Unauthorized, str::stream() << c->name <<
- " must run from localhost when running db without auth");
- }
+Status Command::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ std::vector<Privilege> privileges;
+ this->addRequiredPrivileges(dbname, cmdObj, &privileges);
+ if (AuthorizationSession::get(client)->isAuthorizedForPrivileges(privileges))
return Status::OK();
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+}
+
+void Command::redactForLogging(mutablebson::Document* cmdObj) {}
+
+BSONObj Command::getRedactedCopyForLogging(const BSONObj& cmdObj) {
+ namespace mmb = mutablebson;
+ mmb::Document cmdToLog(cmdObj, mmb::Document::kInPlaceDisabled);
+ redactForLogging(&cmdToLog);
+ BSONObjBuilder bob;
+ cmdToLog.writeTo(&bob);
+ return bob.obj();
+}
+
+void Command::logIfSlow(const Timer& timer, const string& msg) {
+ int ms = timer.millis();
+ if (ms > serverGlobalParams.slowMS) {
+ log() << msg << " took " << ms << " ms." << endl;
}
-
- Status Command::_checkAuthorization(Command* c,
- ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- namespace mmb = mutablebson;
- Status status = _checkAuthorizationImpl(c, client, dbname, cmdObj);
+}
+
+static Status _checkAuthorizationImpl(Command* c,
+ ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ namespace mmb = mutablebson;
+ if (c->adminOnly() && dbname != "admin") {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << c->name << " may only be run against the admin database.");
+ }
+ if (AuthorizationSession::get(client)->getAuthorizationManager().isAuthEnabled()) {
+ Status status = c->checkAuthForCommand(client, dbname, cmdObj);
+ if (status == ErrorCodes::Unauthorized) {
+ mmb::Document cmdToLog(cmdObj, mmb::Document::kInPlaceDisabled);
+ c->redactForLogging(&cmdToLog);
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized on " << dbname << " to execute command "
+ << cmdToLog.toString());
+ }
if (!status.isOK()) {
- log(LogComponent::kAccessControl) << status << std::endl;
+ return status;
}
- audit::logCommandAuthzCheck(client,
- dbname,
- cmdObj,
- c,
- status.code());
- return status;
- }
-
- bool Command::isHelpRequest(const rpc::RequestInterface& request) {
- return request.getCommandArgs()["help"].trueValue();
+ } else if (c->adminOnly() && c->localHostOnlyIfNoAuth(cmdObj) &&
+ !client->getIsLocalHostConnection()) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << c->name
+ << " must run from localhost when running db without auth");
}
-
- void Command::generateHelpResponse(OperationContext* txn,
- const rpc::RequestInterface& request,
- rpc::ReplyBuilderInterface* replyBuilder,
- const Command& command) {
- std::stringstream ss;
- BSONObjBuilder helpBuilder;
- ss << "help for: " << command.name << " ";
- command.help(ss);
- helpBuilder.append("help", ss.str());
- helpBuilder.append("lockType", command.isWriteCommandForConfigServer() ? 1 : 0);
-
- replyBuilder->setMetadata(rpc::makeEmptyMetadata());
- replyBuilder->setCommandReply(helpBuilder.done());
+ return Status::OK();
+}
+
+Status Command::_checkAuthorization(Command* c,
+ ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ namespace mmb = mutablebson;
+ Status status = _checkAuthorizationImpl(c, client, dbname, cmdObj);
+ if (!status.isOK()) {
+ log(LogComponent::kAccessControl) << status << std::endl;
}
+ audit::logCommandAuthzCheck(client, dbname, cmdObj, c, status.code());
+ return status;
+}
+
+bool Command::isHelpRequest(const rpc::RequestInterface& request) {
+ return request.getCommandArgs()["help"].trueValue();
+}
+
+void Command::generateHelpResponse(OperationContext* txn,
+ const rpc::RequestInterface& request,
+ rpc::ReplyBuilderInterface* replyBuilder,
+ const Command& command) {
+ std::stringstream ss;
+ BSONObjBuilder helpBuilder;
+ ss << "help for: " << command.name << " ";
+ command.help(ss);
+ helpBuilder.append("help", ss.str());
+ helpBuilder.append("lockType", command.isWriteCommandForConfigServer() ? 1 : 0);
+
+ replyBuilder->setMetadata(rpc::makeEmptyMetadata());
+ replyBuilder->setCommandReply(helpBuilder.done());
+}
namespace {
- void _generateErrorResponse(OperationContext* txn,
- rpc::ReplyBuilderInterface* replyBuilder,
- const DBException& exception) {
-
- Command::registerError(txn, exception);
-
- // We could have thrown an exception after setting fields in the builder,
- // so we need to reset it to a clean state just to be sure.
- replyBuilder->reset();
-
- // No metadata is needed for an error reply.
- replyBuilder->setMetadata(rpc::makeEmptyMetadata());
-
- // We need to include some extra information for SendStaleConfig.
- if (exception.getCode() == ErrorCodes::SendStaleConfig) {
- const SendStaleConfigException& scex =
- static_cast<const SendStaleConfigException&>(exception);
- replyBuilder->setCommandReply(scex.toStatus(),
- BSON("ns" << scex.getns() <<
- "vReceived" << scex.getVersionReceived().toBSON() <<
- "vWanted" << scex.getVersionWanted().toBSON()));
- }
- else {
- replyBuilder->setCommandReply(exception.toStatus());
- }
+void _generateErrorResponse(OperationContext* txn,
+ rpc::ReplyBuilderInterface* replyBuilder,
+ const DBException& exception) {
+ Command::registerError(txn, exception);
+
+ // We could have thrown an exception after setting fields in the builder,
+ // so we need to reset it to a clean state just to be sure.
+ replyBuilder->reset();
+
+ // No metadata is needed for an error reply.
+ replyBuilder->setMetadata(rpc::makeEmptyMetadata());
+
+ // We need to include some extra information for SendStaleConfig.
+ if (exception.getCode() == ErrorCodes::SendStaleConfig) {
+ const SendStaleConfigException& scex =
+ static_cast<const SendStaleConfigException&>(exception);
+ replyBuilder->setCommandReply(scex.toStatus(),
+ BSON("ns" << scex.getns() << "vReceived"
+ << scex.getVersionReceived().toBSON() << "vWanted"
+ << scex.getVersionWanted().toBSON()));
+ } else {
+ replyBuilder->setCommandReply(exception.toStatus());
}
+}
} // namespace
- void Command::generateErrorResponse(OperationContext* txn,
- rpc::ReplyBuilderInterface* replyBuilder,
- const DBException& exception,
- const rpc::RequestInterface& request,
- Command* command) {
-
- LOG(1) << "assertion while executing command '"
- << request.getCommandName() << "' "
- << "on database '"
- << request.getDatabase() << "' "
- << "with arguments '"
- << command->getRedactedCopyForLogging(request.getCommandArgs()) << "' "
- << "and metadata '"
- << request.getMetadata() << "': "
- << exception.toString();
-
- _generateErrorResponse(txn, replyBuilder, exception);
- }
-
- void Command::generateErrorResponse(OperationContext* txn,
- rpc::ReplyBuilderInterface* replyBuilder,
- const DBException& exception,
- const rpc::RequestInterface& request) {
-
- LOG(1) << "assertion while executing command '"
- << request.getCommandName() << "' "
- << "on database '"
- << request.getDatabase() << "': "
- << exception.toString();
-
- _generateErrorResponse(txn, replyBuilder, exception);
- }
-
- void Command::generateErrorResponse(OperationContext* txn,
- rpc::ReplyBuilderInterface* replyBuilder,
- const DBException& exception) {
- LOG(1) << "assertion while executing command: " << exception.toString();
- _generateErrorResponse(txn, replyBuilder, exception);
- }
-
- void runCommands(OperationContext* txn,
- const rpc::RequestInterface& request,
- rpc::ReplyBuilderInterface* replyBuilder) {
-
- try {
-
- dassert(replyBuilder->getState() == rpc::ReplyBuilderInterface::State::kMetadata);
-
- Command* c = nullptr;
- // In the absence of a Command object, no redaction is possible. Therefore
- // to avoid displaying potentially sensitive information in the logs,
- // we restrict the log message to the name of the unrecognized command.
- // However, the complete command object will still be echoed to the client.
- if (!(c = Command::findCommand(request.getCommandName()))) {
- Command::unknownCommands.increment();
- std::string msg = str::stream() << "no such command: '"
- << request.getCommandName() << "'";
- LOG(2) << msg;
- uasserted(ErrorCodes::CommandNotFound,
- str::stream() << msg << ", bad cmd: '"
- << request.getCommandArgs() << "'");
- }
+void Command::generateErrorResponse(OperationContext* txn,
+ rpc::ReplyBuilderInterface* replyBuilder,
+ const DBException& exception,
+ const rpc::RequestInterface& request,
+ Command* command) {
+ LOG(1) << "assertion while executing command '" << request.getCommandName() << "' "
+ << "on database '" << request.getDatabase() << "' "
+ << "with arguments '" << command->getRedactedCopyForLogging(request.getCommandArgs())
+ << "' "
+ << "and metadata '" << request.getMetadata() << "': " << exception.toString();
+
+ _generateErrorResponse(txn, replyBuilder, exception);
+}
+
+void Command::generateErrorResponse(OperationContext* txn,
+ rpc::ReplyBuilderInterface* replyBuilder,
+ const DBException& exception,
+ const rpc::RequestInterface& request) {
+ LOG(1) << "assertion while executing command '" << request.getCommandName() << "' "
+ << "on database '" << request.getDatabase() << "': " << exception.toString();
+
+ _generateErrorResponse(txn, replyBuilder, exception);
+}
+
+void Command::generateErrorResponse(OperationContext* txn,
+ rpc::ReplyBuilderInterface* replyBuilder,
+ const DBException& exception) {
+ LOG(1) << "assertion while executing command: " << exception.toString();
+ _generateErrorResponse(txn, replyBuilder, exception);
+}
+
+void runCommands(OperationContext* txn,
+ const rpc::RequestInterface& request,
+ rpc::ReplyBuilderInterface* replyBuilder) {
+ try {
+ dassert(replyBuilder->getState() == rpc::ReplyBuilderInterface::State::kMetadata);
+
+ Command* c = nullptr;
+ // In the absence of a Command object, no redaction is possible. Therefore
+ // to avoid displaying potentially sensitive information in the logs,
+ // we restrict the log message to the name of the unrecognized command.
+ // However, the complete command object will still be echoed to the client.
+ if (!(c = Command::findCommand(request.getCommandName()))) {
+ Command::unknownCommands.increment();
+ std::string msg = str::stream() << "no such command: '" << request.getCommandName()
+ << "'";
+ LOG(2) << msg;
+ uasserted(ErrorCodes::CommandNotFound,
+ str::stream() << msg << ", bad cmd: '" << request.getCommandArgs() << "'");
+ }
- LOG(2) << "run command " << request.getDatabase() << ".$cmd" << ' '
- << c->getRedactedCopyForLogging(request.getCommandArgs());
+ LOG(2) << "run command " << request.getDatabase() << ".$cmd" << ' '
+ << c->getRedactedCopyForLogging(request.getCommandArgs());
- Command::execCommand(txn, c, request, replyBuilder);
- }
+ Command::execCommand(txn, c, request, replyBuilder);
+ }
- catch (const DBException& ex) {
- Command::generateErrorResponse(txn, replyBuilder, ex, request);
- }
+ catch (const DBException& ex) {
+ Command::generateErrorResponse(txn, replyBuilder, ex, request);
}
+}
- class PoolFlushCmd : public Command {
- public:
- PoolFlushCmd() : Command( "connPoolSync" , false , "connpoolsync" ) {}
- virtual void help( stringstream &help ) const { help<<"internal"; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::connPoolSync);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
+class PoolFlushCmd : public Command {
+public:
+ PoolFlushCmd() : Command("connPoolSync", false, "connpoolsync") {}
+ virtual void help(stringstream& help) const {
+ help << "internal";
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::connPoolSync);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
- virtual bool run(OperationContext* txn,
- const string&,
- mongo::BSONObj&,
- int,
- std::string&,
- mongo::BSONObjBuilder& result) {
- shardConnectionPool.flush();
- globalConnPool.flush();
- return true;
- }
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool run(OperationContext* txn,
+ const string&,
+ mongo::BSONObj&,
+ int,
+ std::string&,
+ mongo::BSONObjBuilder& result) {
+ shardConnectionPool.flush();
+ globalConnPool.flush();
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
- } poolFlushCmd;
-
- class PoolStats : public Command {
- public:
- PoolStats() : Command( "connPoolStats" ) {}
- virtual void help( stringstream &help ) const { help<<"stats about connection pool"; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::connPoolStats);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- virtual bool run(OperationContext* txn,
- const string&,
- mongo::BSONObj&,
- int,
- std::string&,
- mongo::BSONObjBuilder& result) {
-
- globalConnPool.appendInfo(result);
- result.append( "numDBClientConnection" , DBClientConnection::getNumConnections() );
- result.append( "numAScopedConnection" , AScopedConnection::getNumConnections() );
- return true;
- }
- virtual bool slaveOk() const {
- return true;
- }
+} poolFlushCmd;
+
+class PoolStats : public Command {
+public:
+ PoolStats() : Command("connPoolStats") {}
+ virtual void help(stringstream& help) const {
+ help << "stats about connection pool";
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::connPoolStats);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ virtual bool run(OperationContext* txn,
+ const string&,
+ mongo::BSONObj&,
+ int,
+ std::string&,
+ mongo::BSONObjBuilder& result) {
+ globalConnPool.appendInfo(result);
+ result.append("numDBClientConnection", DBClientConnection::getNumConnections());
+ result.append("numAScopedConnection", AScopedConnection::getNumConnections());
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
- } poolStatsCmd;
+} poolStatsCmd;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
index 2c1a9edf76a..e6b7982e91c 100644
--- a/src/mongo/db/commands.h
+++ b/src/mongo/db/commands.h
@@ -47,357 +47,365 @@
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class Client;
- class CurOp;
- class Database;
- class OperationContext;
- class Timer;
+class BSONObj;
+class BSONObjBuilder;
+class Client;
+class CurOp;
+class Database;
+class OperationContext;
+class Timer;
namespace mutablebson {
- class Document;
+class Document;
} // namespace mutablebson
- /** mongodb "commands" (sent via db.$cmd.findOne(...))
- subclass to make a command. define a singleton object for it.
- */
- class Command {
- protected:
- // The type of the first field in 'cmdObj' must be mongo::String. The first field is
- // interpreted as a collection name.
- std::string parseNsFullyQualified(const std::string& dbname, const BSONObj& cmdObj) const;
-
- // The type of the first field in 'cmdObj' must be mongo::String or Symbol.
- // The first field is interpreted as a collection name.
- std::string parseNsCollectionRequired(const std::string& dbname,
- const BSONObj& cmdObj) const;
- public:
-
- typedef StringMap<Command*> CommandMap;
-
- // Return the namespace for the command. If the first field in 'cmdObj' is of type
- // mongo::String, then that field is interpreted as the collection name, and is
- // appended to 'dbname' after a '.' character. If the first field is not of type
- // mongo::String, then 'dbname' is returned unmodified.
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const;
-
- // Utility that returns a ResourcePattern for the namespace returned from
- // parseNs(dbname, cmdObj). This will be either an exact namespace resource pattern
- // or a database resource pattern, depending on whether parseNs returns a fully qualifed
- // collection name or just a database name.
- ResourcePattern parseResourcePattern(const std::string& dbname,
- const BSONObj& cmdObj) const;
-
- const std::string name;
-
- /* run the given command
- implement this...
-
- return value is true if succeeded. if false, set errmsg text.
- */
- virtual bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) = 0;
-
- /**
- * Translation point between the new request/response types and the legacy types.
- *
- * Then we won't need to mutate the command object. At that point we can also make
- * this method virtual so commands can override it directly.
- */
- /*virtual*/ bool run(OperationContext* txn,
- const rpc::RequestInterface& request,
- rpc::ReplyBuilderInterface* replyBuilder);
-
-
- /**
- * This designation for the command is only used by the 'help' call and has nothing to do
- * with lock acquisition. The reason we need to have it there is because
- * SyncClusterConnection uses this to determine whether the command is update and needs to
- * be sent to all three servers or just one.
- *
- * Eventually when SyncClusterConnection is refactored out, we can get rid of it.
- */
- virtual bool isWriteCommandForConfigServer() const = 0;
-
- /* Return true if only the admin ns has privileges to run this command. */
- virtual bool adminOnly() const {
- return false;
- }
-
- void htmlHelp(std::stringstream&) const;
-
- /* Like adminOnly, but even stricter: we must either be authenticated for admin db,
- or, if running without auth, on the local interface. Used for things which
- are so major that remote invocation may not make sense (e.g., shutdownServer).
-
- When localHostOnlyIfNoAuth() is true, adminOnly() must also be true.
- */
- virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) { return false; }
-
- /* Return true if slaves are allowed to execute the command
- */
- virtual bool slaveOk() const = 0;
-
- /* Return true if the client force a command to be run on a slave by
- turning on the 'slaveOk' option in the command query.
- */
- virtual bool slaveOverrideOk() const {
- return false;
- }
-
- /**
- * Override and return fales if the command opcounters should not be incremented on
- * behalf of this command.
- */
- virtual bool shouldAffectCommandCounter() const { return true; }
-
- virtual void help( std::stringstream& help ) const;
-
- /**
- * Commands which can be explained override this method. Any operation which has a query
- * part and executes as a tree of execution stages can be explained. A command should
- * implement explain by:
- *
- * 1) Calling its custom parse function in order to parse the command. The output of
- * this function should be a CanonicalQuery (representing the query part of the
- * operation), and a PlanExecutor which wraps the tree of execution stages.
- *
- * 2) Calling Explain::explainStages(...) on the PlanExecutor. This is the function
- * which knows how to convert an execution stage tree into explain output.
- */
- virtual Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const {
- return Status(ErrorCodes::IllegalOperation, "Cannot explain cmd: " + name);
- }
-
- /**
- * Checks if the given client is authorized to run this command on database "dbname"
- * with the invocation described by "cmdObj".
- */
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
-
- /**
- * Redacts "cmdObj" in-place to a form suitable for writing to logs.
- *
- * The default implementation does nothing.
- */
- virtual void redactForLogging(mutablebson::Document* cmdObj);
-
- /**
- * Returns a copy of "cmdObj" in a form suitable for writing to logs.
- * Uses redactForLogging() to transform "cmdObj".
- */
- BSONObj getRedactedCopyForLogging(const BSONObj& cmdObj);
-
- /* Return true if a replica set secondary should go into "recovering"
- (unreadable) state while running this command.
- */
- virtual bool maintenanceMode() const { return false; }
-
- /* Return true if command should be permitted when a replica set secondary is in "recovering"
- (unreadable) state.
- */
- virtual bool maintenanceOk() const { return true; /* assumed true prior to commit */ }
-
- /** @param webUI expose the command in the web ui as localhost:28017/<name>
- @param oldName an optional old, deprecated name for the command
- */
- Command(StringData _name, bool webUI = false, StringData oldName = StringData());
-
- virtual ~Command() {}
-
- protected:
- /**
- * Appends to "*out" the privileges required to run this command on database "dbname" with
- * the invocation described by "cmdObj". New commands shouldn't implement this, they should
- * implement checkAuthForCommand instead.
- */
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- // The default implementation of addRequiredPrivileges should never be hit.
- fassertFailed(16940);
- }
-
- BSONObj getQuery( const BSONObj& cmdObj ) {
- if ( cmdObj["query"].type() == Object )
- return cmdObj["query"].embeddedObject();
- if ( cmdObj["q"].type() == Object )
- return cmdObj["q"].embeddedObject();
- return BSONObj();
- }
-
- static void logIfSlow( const Timer& cmdTimer, const std::string& msg);
-
- static CommandMap* _commands;
- static CommandMap* _commandsByBestName;
- static CommandMap* _webCommands;
-
- // Counters for how many times this command has been executed and failed
- Counter64 _commandsExecuted;
- Counter64 _commandsFailed;
-
- // Pointers to hold the metrics tree references
- ServerStatusMetricField<Counter64> _commandsExecutedMetric;
- ServerStatusMetricField<Counter64> _commandsFailedMetric;
-
- public:
-
- static const CommandMap* commandsByBestName() { return _commandsByBestName; }
- static const CommandMap* webCommands() { return _webCommands; }
-
- // Counter for unknown commands
- static Counter64 unknownCommands;
-
- /** @return if command was found */
- static void runAgainstRegistered(const char *ns,
- BSONObj& jsobj,
- BSONObjBuilder& anObjBuilder,
- int queryOptions = 0);
- static Command* findCommand( StringData name );
-
- /**
- * Executes a command after stripping metadata, performing authorization checks,
- * handling audit impersonation, and (potentially) setting maintenance mode. This method
- * also checks that the command is permissible to run on the node given its current
- * replication state. All the logic here is independent of any particular command; any
- * functionality relevant to a specific command should be confined to its run() method.
- *
- * This is currently used by mongod and dbwebserver.
- */
- static void execCommand(OperationContext* txn,
- Command* command,
- const rpc::RequestInterface& request,
- rpc::ReplyBuilderInterface* replyBuilder);
-
- // For mongos
- // TODO: remove this entirely now that all instances of ClientBasic are instances
- // of Client. This will happen as part of SERVER-18292
- static void execCommandClientBasic(OperationContext* txn,
- Command* c,
- ClientBasic& client,
- int queryOptions,
- const char *ns,
- BSONObj& cmdObj,
- BSONObjBuilder& result);
-
- // Helper for setting errmsg and ok field in command result object.
- static void appendCommandStatus(BSONObjBuilder& result, bool ok, const std::string& errmsg);
-
- // @return s.isOK()
- static bool appendCommandStatus(BSONObjBuilder& result, const Status& status);
-
- // Converts "result" into a Status object. The input is expected to be the object returned
- // by running a command. Returns ErrorCodes::CommandResultSchemaViolation if "result" does
- // not look like the result of a command.
- static Status getStatusFromCommandResult(const BSONObj& result);
-
- /**
- * Parses cursor options from the command request object "cmdObj". Used by commands that
- * take cursor options. The only cursor option currently supported is "cursor.batchSize".
- *
- * If a valid batch size was specified, returns Status::OK() and fills in "batchSize" with
- * the specified value. If no batch size was specified, returns Status::OK() and fills in
- * "batchSize" with the provided default value.
- *
- * If an error occurred while parsing, returns an error Status. If this is the case, the
- * value pointed to by "batchSize" is unspecified.
- */
- static Status parseCommandCursorOptions(const BSONObj& cmdObj,
- long long defaultBatchSize,
- long long* batchSize);
-
- /**
- * Helper for setting a writeConcernError field in the command result object if
- * a writeConcern error occurs.
- */
- static void appendCommandWCStatus(BSONObjBuilder& result, const Status& status);
-
- // Set by command line. Controls whether or not testing-only commands should be available.
- static int testCommandsEnabled;
-
- /**
- * Returns true if this a request for the 'help' information associated with the command.
- */
- static bool isHelpRequest(const rpc::RequestInterface& request);
-
- /**
- * Generates a reply from the 'help' information associated with a command. The state of
- * the passed ReplyBuilder will be in kOutputDocs after calling this method.
- */
- static void generateHelpResponse(OperationContext* txn,
- const rpc::RequestInterface& request,
- rpc::ReplyBuilderInterface* replyBuilder,
- const Command& command);
-
- /**
- * When an assertion is hit during command execution, this method is used to fill the fields
- * of the command reply with the information from the error. In addition, information about
- * the command is logged. This function does not return anything, because there is typically
- * already an active exception when this function is called, so there
- * is little that can be done if it fails.
- */
- static void generateErrorResponse(OperationContext* txn,
- rpc::ReplyBuilderInterface* replyBuilder,
- const DBException& exception,
- const rpc::RequestInterface& request,
- Command* command);
-
- /**
- * Generates a command error response. This overload of generateErrorResponse is intended
- * to be called if the command is successfully parsed, but there is an error before we have
- * a handle to the actual Command object. This can happen, for example, when the command
- * is not found.
- */
- static void generateErrorResponse(OperationContext* txn,
- rpc::ReplyBuilderInterface* replyBuilder,
- const DBException& exception,
- const rpc::RequestInterface& request);
-
- /**
- * Generates a command error response. Similar to other overloads of generateErrorResponse,
- * but doesn't print any information about the specific command being executed. This is
- * neccessary, for example, if there is
- * an assertion hit while parsing the command.
- */
- static void generateErrorResponse(OperationContext* txn,
- rpc::ReplyBuilderInterface* replyBuilder,
- const DBException& exception);
-
- /**
- * Records the error on to the OperationContext. This hook is needed because mongos
- * does not have CurOp linked in to it.
- */
- static void registerError(OperationContext* txn, const DBException& exception);
-
- private:
-
- /**
- * Checks to see if the client is authorized to run the given command with the given
- * parameters on the given named database.
- *
- * Returns Status::OK() if the command is authorized. Most likely returns
- * ErrorCodes::Unauthorized otherwise, but any return other than Status::OK implies not
- * authorized.
- */
- static Status _checkAuthorization(Command* c,
- ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
- };
-
- void runCommands(OperationContext* txn,
- const rpc::RequestInterface& request,
- rpc::ReplyBuilderInterface* replyBuilder);
-
-} // namespace mongo
+/** mongodb "commands" (sent via db.$cmd.findOne(...))
+ subclass to make a command. define a singleton object for it.
+ */
+class Command {
+protected:
+ // The type of the first field in 'cmdObj' must be mongo::String. The first field is
+ // interpreted as a collection name.
+ std::string parseNsFullyQualified(const std::string& dbname, const BSONObj& cmdObj) const;
+
+ // The type of the first field in 'cmdObj' must be mongo::String or Symbol.
+ // The first field is interpreted as a collection name.
+ std::string parseNsCollectionRequired(const std::string& dbname, const BSONObj& cmdObj) const;
+
+public:
+ typedef StringMap<Command*> CommandMap;
+
+ // Return the namespace for the command. If the first field in 'cmdObj' is of type
+ // mongo::String, then that field is interpreted as the collection name, and is
+ // appended to 'dbname' after a '.' character. If the first field is not of type
+ // mongo::String, then 'dbname' is returned unmodified.
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const;
+
+ // Utility that returns a ResourcePattern for the namespace returned from
+ // parseNs(dbname, cmdObj). This will be either an exact namespace resource pattern
+ // or a database resource pattern, depending on whether parseNs returns a fully qualifed
+ // collection name or just a database name.
+ ResourcePattern parseResourcePattern(const std::string& dbname, const BSONObj& cmdObj) const;
+
+ const std::string name;
+
+ /* run the given command
+ implement this...
+
+ return value is true if succeeded. if false, set errmsg text.
+ */
+ virtual bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) = 0;
+
+ /**
+ * Translation point between the new request/response types and the legacy types.
+ *
+ * Then we won't need to mutate the command object. At that point we can also make
+ * this method virtual so commands can override it directly.
+ */
+ /*virtual*/ bool run(OperationContext* txn,
+ const rpc::RequestInterface& request,
+ rpc::ReplyBuilderInterface* replyBuilder);
+
+
+ /**
+ * This designation for the command is only used by the 'help' call and has nothing to do
+ * with lock acquisition. The reason we need to have it there is because
+ * SyncClusterConnection uses this to determine whether the command is update and needs to
+ * be sent to all three servers or just one.
+ *
+ * Eventually when SyncClusterConnection is refactored out, we can get rid of it.
+ */
+ virtual bool isWriteCommandForConfigServer() const = 0;
+
+ /* Return true if only the admin ns has privileges to run this command. */
+ virtual bool adminOnly() const {
+ return false;
+ }
+
+ void htmlHelp(std::stringstream&) const;
+
+ /* Like adminOnly, but even stricter: we must either be authenticated for admin db,
+ or, if running without auth, on the local interface. Used for things which
+ are so major that remote invocation may not make sense (e.g., shutdownServer).
+
+ When localHostOnlyIfNoAuth() is true, adminOnly() must also be true.
+ */
+ virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) {
+ return false;
+ }
+
+ /* Return true if slaves are allowed to execute the command
+ */
+ virtual bool slaveOk() const = 0;
+
+ /* Return true if the client force a command to be run on a slave by
+ turning on the 'slaveOk' option in the command query.
+ */
+ virtual bool slaveOverrideOk() const {
+ return false;
+ }
+
+ /**
+ * Override and return fales if the command opcounters should not be incremented on
+ * behalf of this command.
+ */
+ virtual bool shouldAffectCommandCounter() const {
+ return true;
+ }
+
+ virtual void help(std::stringstream& help) const;
+
+ /**
+ * Commands which can be explained override this method. Any operation which has a query
+ * part and executes as a tree of execution stages can be explained. A command should
+ * implement explain by:
+ *
+ * 1) Calling its custom parse function in order to parse the command. The output of
+ * this function should be a CanonicalQuery (representing the query part of the
+ * operation), and a PlanExecutor which wraps the tree of execution stages.
+ *
+ * 2) Calling Explain::explainStages(...) on the PlanExecutor. This is the function
+ * which knows how to convert an execution stage tree into explain output.
+ */
+ virtual Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ return Status(ErrorCodes::IllegalOperation, "Cannot explain cmd: " + name);
+ }
+
+ /**
+ * Checks if the given client is authorized to run this command on database "dbname"
+ * with the invocation described by "cmdObj".
+ */
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+
+ /**
+ * Redacts "cmdObj" in-place to a form suitable for writing to logs.
+ *
+ * The default implementation does nothing.
+ */
+ virtual void redactForLogging(mutablebson::Document* cmdObj);
+
+ /**
+ * Returns a copy of "cmdObj" in a form suitable for writing to logs.
+ * Uses redactForLogging() to transform "cmdObj".
+ */
+ BSONObj getRedactedCopyForLogging(const BSONObj& cmdObj);
+
+ /* Return true if a replica set secondary should go into "recovering"
+ (unreadable) state while running this command.
+ */
+ virtual bool maintenanceMode() const {
+ return false;
+ }
+
+ /* Return true if command should be permitted when a replica set secondary is in "recovering"
+ (unreadable) state.
+ */
+ virtual bool maintenanceOk() const {
+ return true; /* assumed true prior to commit */
+ }
+
+ /** @param webUI expose the command in the web ui as localhost:28017/<name>
+ @param oldName an optional old, deprecated name for the command
+ */
+ Command(StringData _name, bool webUI = false, StringData oldName = StringData());
+
+ virtual ~Command() {}
+
+protected:
+ /**
+ * Appends to "*out" the privileges required to run this command on database "dbname" with
+ * the invocation described by "cmdObj". New commands shouldn't implement this, they should
+ * implement checkAuthForCommand instead.
+ */
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // The default implementation of addRequiredPrivileges should never be hit.
+ fassertFailed(16940);
+ }
+
+ BSONObj getQuery(const BSONObj& cmdObj) {
+ if (cmdObj["query"].type() == Object)
+ return cmdObj["query"].embeddedObject();
+ if (cmdObj["q"].type() == Object)
+ return cmdObj["q"].embeddedObject();
+ return BSONObj();
+ }
+
+ static void logIfSlow(const Timer& cmdTimer, const std::string& msg);
+
+ static CommandMap* _commands;
+ static CommandMap* _commandsByBestName;
+ static CommandMap* _webCommands;
+
+ // Counters for how many times this command has been executed and failed
+ Counter64 _commandsExecuted;
+ Counter64 _commandsFailed;
+
+ // Pointers to hold the metrics tree references
+ ServerStatusMetricField<Counter64> _commandsExecutedMetric;
+ ServerStatusMetricField<Counter64> _commandsFailedMetric;
+
+public:
+ static const CommandMap* commandsByBestName() {
+ return _commandsByBestName;
+ }
+ static const CommandMap* webCommands() {
+ return _webCommands;
+ }
+
+ // Counter for unknown commands
+ static Counter64 unknownCommands;
+
+ /** @return if command was found */
+ static void runAgainstRegistered(const char* ns,
+ BSONObj& jsobj,
+ BSONObjBuilder& anObjBuilder,
+ int queryOptions = 0);
+ static Command* findCommand(StringData name);
+
+ /**
+ * Executes a command after stripping metadata, performing authorization checks,
+ * handling audit impersonation, and (potentially) setting maintenance mode. This method
+ * also checks that the command is permissible to run on the node given its current
+ * replication state. All the logic here is independent of any particular command; any
+ * functionality relevant to a specific command should be confined to its run() method.
+ *
+ * This is currently used by mongod and dbwebserver.
+ */
+ static void execCommand(OperationContext* txn,
+ Command* command,
+ const rpc::RequestInterface& request,
+ rpc::ReplyBuilderInterface* replyBuilder);
+
+ // For mongos
+ // TODO: remove this entirely now that all instances of ClientBasic are instances
+ // of Client. This will happen as part of SERVER-18292
+ static void execCommandClientBasic(OperationContext* txn,
+ Command* c,
+ ClientBasic& client,
+ int queryOptions,
+ const char* ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder& result);
+
+ // Helper for setting errmsg and ok field in command result object.
+ static void appendCommandStatus(BSONObjBuilder& result, bool ok, const std::string& errmsg);
+
+ // @return s.isOK()
+ static bool appendCommandStatus(BSONObjBuilder& result, const Status& status);
+
+ // Converts "result" into a Status object. The input is expected to be the object returned
+ // by running a command. Returns ErrorCodes::CommandResultSchemaViolation if "result" does
+ // not look like the result of a command.
+ static Status getStatusFromCommandResult(const BSONObj& result);
+
+ /**
+ * Parses cursor options from the command request object "cmdObj". Used by commands that
+ * take cursor options. The only cursor option currently supported is "cursor.batchSize".
+ *
+ * If a valid batch size was specified, returns Status::OK() and fills in "batchSize" with
+ * the specified value. If no batch size was specified, returns Status::OK() and fills in
+ * "batchSize" with the provided default value.
+ *
+ * If an error occurred while parsing, returns an error Status. If this is the case, the
+ * value pointed to by "batchSize" is unspecified.
+ */
+ static Status parseCommandCursorOptions(const BSONObj& cmdObj,
+ long long defaultBatchSize,
+ long long* batchSize);
+
+ /**
+ * Helper for setting a writeConcernError field in the command result object if
+ * a writeConcern error occurs.
+ */
+ static void appendCommandWCStatus(BSONObjBuilder& result, const Status& status);
+
+ // Set by command line. Controls whether or not testing-only commands should be available.
+ static int testCommandsEnabled;
+
+ /**
+ * Returns true if this a request for the 'help' information associated with the command.
+ */
+ static bool isHelpRequest(const rpc::RequestInterface& request);
+
+ /**
+ * Generates a reply from the 'help' information associated with a command. The state of
+ * the passed ReplyBuilder will be in kOutputDocs after calling this method.
+ */
+ static void generateHelpResponse(OperationContext* txn,
+ const rpc::RequestInterface& request,
+ rpc::ReplyBuilderInterface* replyBuilder,
+ const Command& command);
+
+ /**
+ * When an assertion is hit during command execution, this method is used to fill the fields
+ * of the command reply with the information from the error. In addition, information about
+ * the command is logged. This function does not return anything, because there is typically
+ * already an active exception when this function is called, so there
+ * is little that can be done if it fails.
+ */
+ static void generateErrorResponse(OperationContext* txn,
+ rpc::ReplyBuilderInterface* replyBuilder,
+ const DBException& exception,
+ const rpc::RequestInterface& request,
+ Command* command);
+
+ /**
+ * Generates a command error response. This overload of generateErrorResponse is intended
+ * to be called if the command is successfully parsed, but there is an error before we have
+ * a handle to the actual Command object. This can happen, for example, when the command
+ * is not found.
+ */
+ static void generateErrorResponse(OperationContext* txn,
+ rpc::ReplyBuilderInterface* replyBuilder,
+ const DBException& exception,
+ const rpc::RequestInterface& request);
+
+ /**
+ * Generates a command error response. Similar to other overloads of generateErrorResponse,
+ * but doesn't print any information about the specific command being executed. This is
+ * neccessary, for example, if there is
+ * an assertion hit while parsing the command.
+ */
+ static void generateErrorResponse(OperationContext* txn,
+ rpc::ReplyBuilderInterface* replyBuilder,
+ const DBException& exception);
+
+ /**
+ * Records the error on to the OperationContext. This hook is needed because mongos
+ * does not have CurOp linked in to it.
+ */
+ static void registerError(OperationContext* txn, const DBException& exception);
+
+private:
+ /**
+ * Checks to see if the client is authorized to run the given command with the given
+ * parameters on the given named database.
+ *
+ * Returns Status::OK() if the command is authorized. Most likely returns
+ * ErrorCodes::Unauthorized otherwise, but any return other than Status::OK implies not
+ * authorized.
+ */
+ static Status _checkAuthorization(Command* c,
+ ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+};
+
+void runCommands(OperationContext* txn,
+ const rpc::RequestInterface& request,
+ rpc::ReplyBuilderInterface* replyBuilder);
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp
index 7ca79ba57e4..7381d61dffc 100644
--- a/src/mongo/db/commands/apply_ops.cpp
+++ b/src/mongo/db/commands/apply_ops.cpp
@@ -58,103 +58,106 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
- class ApplyOpsCmd : public Command {
- public:
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return true; }
+class ApplyOpsCmd : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- ApplyOpsCmd() : Command( "applyOps" ) {}
- virtual void help( stringstream &help ) const {
- help << "internal (sharding)\n{ applyOps : [ ] , preCondition : [ { ns : ... , q : ... , res : ... } ] }";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- // applyOps can do pretty much anything, so require all privileges.
- RoleGraph::generateUniversalPrivileges(out);
- }
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ ApplyOpsCmd() : Command("applyOps") {}
+ virtual void help(stringstream& help) const {
+ help << "internal (sharding)\n{ applyOps : [ ] , preCondition : [ { ns : ... , q : ... , "
+ "res : ... } ] }";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // applyOps can do pretty much anything, so require all privileges.
+ RoleGraph::generateUniversalPrivileges(out);
+ }
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj))
+ maybeDisableValidation.emplace(txn);
- if ( cmdObj.firstElement().type() != Array ) {
- errmsg = "ops has to be an array";
- return false;
- }
+ if (cmdObj.firstElement().type() != Array) {
+ errmsg = "ops has to be an array";
+ return false;
+ }
- BSONObj ops = cmdObj.firstElement().Obj();
+ BSONObj ops = cmdObj.firstElement().Obj();
- {
- // check input
- BSONObjIterator i( ops );
- while ( i.more() ) {
- BSONElement e = i.next();
- if (!_checkOperation(e, errmsg)) {
- return false;
- }
+ {
+ // check input
+ BSONObjIterator i(ops);
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (!_checkOperation(e, errmsg)) {
+ return false;
}
}
-
- return appendCommandStatus(result, applyOps(txn, dbname, cmdObj, &result));
}
- private:
- /**
- * Returns true if 'e' contains a valid operation.
- */
- bool _checkOperation(const BSONElement& e, string& errmsg) {
- if (e.type() != Object) {
- errmsg = str::stream() << "op not an object: " << e.fieldName();
- return false;
- }
- BSONObj obj = e.Obj();
- // op - operation type
- BSONElement opElement = obj.getField("op");
- if (opElement.eoo()) {
- errmsg = str::stream() << "op does not contain required \"op\" field: "
- << e.fieldName();
- return false;
- }
- if (opElement.type() != mongo::String) {
- errmsg = str::stream() << "\"op\" field is not a string: " << e.fieldName();
- return false;
- }
- // operation type -- see logOp() comments for types
- const char *opType = opElement.valuestrsafe();
- if (*opType == '\0') {
- errmsg = str::stream() << "\"op\" field value cannot be empty: " << e.fieldName();
- return false;
- }
+ return appendCommandStatus(result, applyOps(txn, dbname, cmdObj, &result));
+ }
- // ns - namespace
- // Only operations of type 'n' are allowed to have an empty namespace.
- BSONElement nsElement = obj.getField("ns");
- if (nsElement.eoo()) {
- errmsg = str::stream() << "op does not contain required \"ns\" field: "
- << e.fieldName();
- return false;
- }
- if (nsElement.type() != mongo::String) {
- errmsg = str::stream() << "\"ns\" field is not a string: " << e.fieldName();
- return false;
- }
- if (*opType != 'n' && nsElement.String().empty()) {
- errmsg = str::stream()
- << "\"ns\" field value cannot be empty when op type is not 'n': "
- << e.fieldName();
- return false;
- }
- return true;
+private:
+ /**
+ * Returns true if 'e' contains a valid operation.
+ */
+ bool _checkOperation(const BSONElement& e, string& errmsg) {
+ if (e.type() != Object) {
+ errmsg = str::stream() << "op not an object: " << e.fieldName();
+ return false;
+ }
+ BSONObj obj = e.Obj();
+ // op - operation type
+ BSONElement opElement = obj.getField("op");
+ if (opElement.eoo()) {
+ errmsg = str::stream()
+ << "op does not contain required \"op\" field: " << e.fieldName();
+ return false;
+ }
+ if (opElement.type() != mongo::String) {
+ errmsg = str::stream() << "\"op\" field is not a string: " << e.fieldName();
+ return false;
+ }
+ // operation type -- see logOp() comments for types
+ const char* opType = opElement.valuestrsafe();
+ if (*opType == '\0') {
+ errmsg = str::stream() << "\"op\" field value cannot be empty: " << e.fieldName();
+ return false;
}
- } applyOpsCmd;
+ // ns - namespace
+ // Only operations of type 'n' are allowed to have an empty namespace.
+ BSONElement nsElement = obj.getField("ns");
+ if (nsElement.eoo()) {
+ errmsg = str::stream()
+ << "op does not contain required \"ns\" field: " << e.fieldName();
+ return false;
+ }
+ if (nsElement.type() != mongo::String) {
+ errmsg = str::stream() << "\"ns\" field is not a string: " << e.fieldName();
+ return false;
+ }
+ if (*opType != 'n' && nsElement.String().empty()) {
+ errmsg = str::stream()
+ << "\"ns\" field value cannot be empty when op type is not 'n': " << e.fieldName();
+ return false;
+ }
+ return true;
+ }
+} applyOpsCmd;
}
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index 698eb2fd406..1b499f1d31d 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -64,370 +64,362 @@
namespace mongo {
- using std::hex;
- using std::string;
- using std::stringstream;
-
- static bool _isCRAuthDisabled;
- static bool _isX509AuthDisabled;
- static const char _nonceAuthenticationDisabledMessage[] =
- "Challenge-response authentication using getnonce and authenticate commands is disabled.";
- static const char _x509AuthenticationDisabledMessage[] =
- "x.509 authentication is disabled.";
-
- void CmdAuthenticate::disableAuthMechanism(std::string authMechanism) {
- if (authMechanism == "MONGODB-CR") {
- _isCRAuthDisabled = true;
- }
- if (authMechanism == "MONGODB-X509") {
- _isX509AuthDisabled = true;
- }
+using std::hex;
+using std::string;
+using std::stringstream;
+
+static bool _isCRAuthDisabled;
+static bool _isX509AuthDisabled;
+static const char _nonceAuthenticationDisabledMessage[] =
+ "Challenge-response authentication using getnonce and authenticate commands is disabled.";
+static const char _x509AuthenticationDisabledMessage[] = "x.509 authentication is disabled.";
+
+void CmdAuthenticate::disableAuthMechanism(std::string authMechanism) {
+ if (authMechanism == "MONGODB-CR") {
+ _isCRAuthDisabled = true;
}
+ if (authMechanism == "MONGODB-X509") {
+ _isX509AuthDisabled = true;
+ }
+}
- /* authentication
-
- system.users contains
- { user : <username>, pwd : <pwd_digest>, ... }
+/* authentication
- getnonce sends nonce to client
+ system.users contains
+ { user : <username>, pwd : <pwd_digest>, ... }
- client then sends { authenticate:1, nonce64:<nonce_str>, user:<username>, key:<key> }
+ getnonce sends nonce to client
- where <key> is md5(<nonce_str><username><pwd_digest_str>) as a string
- */
+ client then sends { authenticate:1, nonce64:<nonce_str>, user:<username>, key:<key> }
- class CmdGetNonce : public Command {
- public:
- CmdGetNonce() :
- Command("getnonce"),
- _random(SecureRandom::create()) {
- }
+ where <key> is md5(<nonce_str><username><pwd_digest_str>) as a string
+*/
- virtual bool slaveOk() const {
- return true;
- }
- void help(stringstream& h) const { h << "internal"; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- nonce64 n = getNextNonce();
- stringstream ss;
- ss << hex << n;
- result.append("nonce", ss.str() );
- AuthenticationSession::set(
- ClientBasic::getCurrent(),
- stdx::make_unique<MongoAuthenticationSession>(n));
- return true;
- }
+class CmdGetNonce : public Command {
+public:
+ CmdGetNonce() : Command("getnonce"), _random(SecureRandom::create()) {}
- private:
- nonce64 getNextNonce() {
- stdx::lock_guard<SimpleMutex> lk(_randMutex);
- return _random->nextInt64();
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ void help(stringstream& h) const {
+ h << "internal";
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ nonce64 n = getNextNonce();
+ stringstream ss;
+ ss << hex << n;
+ result.append("nonce", ss.str());
+ AuthenticationSession::set(ClientBasic::getCurrent(),
+ stdx::make_unique<MongoAuthenticationSession>(n));
+ return true;
+ }
- SimpleMutex _randMutex; // Synchronizes accesses to _random.
- std::unique_ptr<SecureRandom> _random;
- } cmdGetNonce;
+private:
+ nonce64 getNextNonce() {
+ stdx::lock_guard<SimpleMutex> lk(_randMutex);
+ return _random->nextInt64();
+ }
- void CmdAuthenticate::redactForLogging(mutablebson::Document* cmdObj) {
- namespace mmb = mutablebson;
- static const int numRedactedFields = 2;
- static const char* redactedFields[numRedactedFields] = { "key", "nonce" };
- for (int i = 0; i < numRedactedFields; ++i) {
- for (mmb::Element element = mmb::findFirstChildNamed(cmdObj->root(), redactedFields[i]);
- element.ok();
- element = mmb::findElementNamed(element.rightSibling(), redactedFields[i])) {
+ SimpleMutex _randMutex; // Synchronizes accesses to _random.
+ std::unique_ptr<SecureRandom> _random;
+} cmdGetNonce;
- element.setValueString("xxx");
- }
+void CmdAuthenticate::redactForLogging(mutablebson::Document* cmdObj) {
+ namespace mmb = mutablebson;
+ static const int numRedactedFields = 2;
+ static const char* redactedFields[numRedactedFields] = {"key", "nonce"};
+ for (int i = 0; i < numRedactedFields; ++i) {
+ for (mmb::Element element = mmb::findFirstChildNamed(cmdObj->root(), redactedFields[i]);
+ element.ok();
+ element = mmb::findElementNamed(element.rightSibling(), redactedFields[i])) {
+ element.setValueString("xxx");
}
}
+}
- bool CmdAuthenticate::run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- if (!serverGlobalParams.quiet) {
- mutablebson::Document cmdToLog(cmdObj, mutablebson::Document::kInPlaceDisabled);
- redactForLogging(&cmdToLog);
- log() << " authenticate db: " << dbname << " " << cmdToLog;
- }
+bool CmdAuthenticate::run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ if (!serverGlobalParams.quiet) {
+ mutablebson::Document cmdToLog(cmdObj, mutablebson::Document::kInPlaceDisabled);
+ redactForLogging(&cmdToLog);
+ log() << " authenticate db: " << dbname << " " << cmdToLog;
+ }
- UserName user(cmdObj.getStringField("user"), dbname);
- if (Command::testCommandsEnabled &&
- user.getDB() == "admin" &&
- user.getUser() == internalSecurity.user->getName().getUser()) {
- // Allows authenticating as the internal user against the admin database. This is to
- // support the auth passthrough test framework on mongos (since you can't use the local
- // database on a mongos, so you can't auth as the internal user without this).
- user = internalSecurity.user->getName();
- }
+ UserName user(cmdObj.getStringField("user"), dbname);
+ if (Command::testCommandsEnabled && user.getDB() == "admin" &&
+ user.getUser() == internalSecurity.user->getName().getUser()) {
+ // Allows authenticating as the internal user against the admin database. This is to
+ // support the auth passthrough test framework on mongos (since you can't use the local
+ // database on a mongos, so you can't auth as the internal user without this).
+ user = internalSecurity.user->getName();
+ }
- std::string mechanism = cmdObj.getStringField("mechanism");
- if (mechanism.empty()) {
- mechanism = "MONGODB-CR";
+ std::string mechanism = cmdObj.getStringField("mechanism");
+ if (mechanism.empty()) {
+ mechanism = "MONGODB-CR";
+ }
+ Status status = _authenticate(txn, mechanism, user, cmdObj);
+ audit::logAuthentication(ClientBasic::getCurrent(), mechanism, user, status.code());
+ if (!status.isOK()) {
+ if (!serverGlobalParams.quiet) {
+ log() << "Failed to authenticate " << user << " with mechanism " << mechanism << ": "
+ << status;
}
- Status status = _authenticate(txn, mechanism, user, cmdObj);
- audit::logAuthentication(ClientBasic::getCurrent(),
- mechanism,
- user,
- status.code());
- if (!status.isOK()) {
- if (!serverGlobalParams.quiet) {
- log() << "Failed to authenticate " << user << " with mechanism " << mechanism
- << ": " << status;
- }
- if (status.code() == ErrorCodes::AuthenticationFailed) {
- // Statuses with code AuthenticationFailed may contain messages we do not wish to
- // reveal to the user, so we return a status with the message "auth failed".
- appendCommandStatus(result,
- Status(ErrorCodes::AuthenticationFailed, "auth failed"));
- }
- else {
- appendCommandStatus(result, status);
- }
- return false;
+ if (status.code() == ErrorCodes::AuthenticationFailed) {
+ // Statuses with code AuthenticationFailed may contain messages we do not wish to
+ // reveal to the user, so we return a status with the message "auth failed".
+ appendCommandStatus(result, Status(ErrorCodes::AuthenticationFailed, "auth failed"));
+ } else {
+ appendCommandStatus(result, status);
}
- result.append("dbname", user.getDB());
- result.append("user", user.getUser());
- return true;
+ return false;
}
+ result.append("dbname", user.getDB());
+ result.append("user", user.getUser());
+ return true;
+}
- Status CmdAuthenticate::_authenticate(OperationContext* txn,
- const std::string& mechanism,
- const UserName& user,
- const BSONObj& cmdObj) {
-
- if (mechanism == "MONGODB-CR") {
- return _authenticateCR(txn, user, cmdObj);
- }
+Status CmdAuthenticate::_authenticate(OperationContext* txn,
+ const std::string& mechanism,
+ const UserName& user,
+ const BSONObj& cmdObj) {
+ if (mechanism == "MONGODB-CR") {
+ return _authenticateCR(txn, user, cmdObj);
+ }
#ifdef MONGO_CONFIG_SSL
- if (mechanism == "MONGODB-X509") {
- return _authenticateX509(txn, user, cmdObj);
- }
-#endif
- return Status(ErrorCodes::BadValue, "Unsupported mechanism: " + mechanism);
+ if (mechanism == "MONGODB-X509") {
+ return _authenticateX509(txn, user, cmdObj);
}
+#endif
+ return Status(ErrorCodes::BadValue, "Unsupported mechanism: " + mechanism);
+}
- Status CmdAuthenticate::_authenticateCR(
- OperationContext* txn, const UserName& user, const BSONObj& cmdObj) {
-
- if (user == internalSecurity.user->getName() &&
- serverGlobalParams.clusterAuthMode.load() ==
- ServerGlobalParams::ClusterAuthMode_x509) {
- return Status(ErrorCodes::AuthenticationFailed,
- "Mechanism x509 is required for internal cluster authentication");
- }
+Status CmdAuthenticate::_authenticateCR(OperationContext* txn,
+ const UserName& user,
+ const BSONObj& cmdObj) {
+ if (user == internalSecurity.user->getName() &&
+ serverGlobalParams.clusterAuthMode.load() == ServerGlobalParams::ClusterAuthMode_x509) {
+ return Status(ErrorCodes::AuthenticationFailed,
+ "Mechanism x509 is required for internal cluster authentication");
+ }
- if (_isCRAuthDisabled) {
- // SERVER-8461, MONGODB-CR must be enabled for authenticating the internal user, so that
- // cluster members may communicate with each other.
- if (user != internalSecurity.user->getName()) {
- return Status(ErrorCodes::BadValue, _nonceAuthenticationDisabledMessage);
- }
+ if (_isCRAuthDisabled) {
+ // SERVER-8461, MONGODB-CR must be enabled for authenticating the internal user, so that
+ // cluster members may communicate with each other.
+ if (user != internalSecurity.user->getName()) {
+ return Status(ErrorCodes::BadValue, _nonceAuthenticationDisabledMessage);
}
+ }
- string key = cmdObj.getStringField("key");
- string received_nonce = cmdObj.getStringField("nonce");
-
- if( user.getUser().empty() || key.empty() || received_nonce.empty() ) {
- sleepmillis(10);
- return Status(ErrorCodes::ProtocolError,
- "field missing/wrong type in received authenticate command");
- }
+ string key = cmdObj.getStringField("key");
+ string received_nonce = cmdObj.getStringField("nonce");
- stringstream digestBuilder;
+ if (user.getUser().empty() || key.empty() || received_nonce.empty()) {
+ sleepmillis(10);
+ return Status(ErrorCodes::ProtocolError,
+ "field missing/wrong type in received authenticate command");
+ }
- {
- ClientBasic *client = ClientBasic::getCurrent();
- std::unique_ptr<AuthenticationSession> session;
- AuthenticationSession::swap(client, session);
- if (!session || session->getType() != AuthenticationSession::SESSION_TYPE_MONGO) {
+ stringstream digestBuilder;
+
+ {
+ ClientBasic* client = ClientBasic::getCurrent();
+ std::unique_ptr<AuthenticationSession> session;
+ AuthenticationSession::swap(client, session);
+ if (!session || session->getType() != AuthenticationSession::SESSION_TYPE_MONGO) {
+ sleepmillis(30);
+ return Status(ErrorCodes::ProtocolError, "No pending nonce");
+ } else {
+ nonce64 nonce = static_cast<MongoAuthenticationSession*>(session.get())->getNonce();
+ digestBuilder << hex << nonce;
+ if (digestBuilder.str() != received_nonce) {
sleepmillis(30);
- return Status(ErrorCodes::ProtocolError, "No pending nonce");
- }
- else {
- nonce64 nonce = static_cast<MongoAuthenticationSession*>(session.get())->getNonce();
- digestBuilder << hex << nonce;
- if (digestBuilder.str() != received_nonce) {
- sleepmillis(30);
- return Status(ErrorCodes::AuthenticationFailed, "Received wrong nonce.");
- }
+ return Status(ErrorCodes::AuthenticationFailed, "Received wrong nonce.");
}
}
+ }
- User* userObj;
- Status status = getGlobalAuthorizationManager()->acquireUser(txn, user, &userObj);
- if (!status.isOK()) {
- // Failure to find the privilege document indicates no-such-user, a fact that we do not
- // wish to reveal to the client. So, we return AuthenticationFailed rather than passing
- // through the returned status.
- return Status(ErrorCodes::AuthenticationFailed, status.toString());
- }
- string pwd = userObj->getCredentials().password;
- getGlobalAuthorizationManager()->releaseUser(userObj);
-
- if (pwd.empty()) {
- return Status(ErrorCodes::AuthenticationFailed,
- "MONGODB-CR credentials missing in the user document");
- }
+ User* userObj;
+ Status status = getGlobalAuthorizationManager()->acquireUser(txn, user, &userObj);
+ if (!status.isOK()) {
+ // Failure to find the privilege document indicates no-such-user, a fact that we do not
+ // wish to reveal to the client. So, we return AuthenticationFailed rather than passing
+ // through the returned status.
+ return Status(ErrorCodes::AuthenticationFailed, status.toString());
+ }
+ string pwd = userObj->getCredentials().password;
+ getGlobalAuthorizationManager()->releaseUser(userObj);
- md5digest d;
- {
- digestBuilder << user.getUser() << pwd;
- string done = digestBuilder.str();
+ if (pwd.empty()) {
+ return Status(ErrorCodes::AuthenticationFailed,
+ "MONGODB-CR credentials missing in the user document");
+ }
- md5_state_t st;
- md5_init(&st);
- md5_append(&st, (const md5_byte_t *) done.c_str(), done.size());
- md5_finish(&st, d);
- }
+ md5digest d;
+ {
+ digestBuilder << user.getUser() << pwd;
+ string done = digestBuilder.str();
- string computed = digestToString( d );
+ md5_state_t st;
+ md5_init(&st);
+ md5_append(&st, (const md5_byte_t*)done.c_str(), done.size());
+ md5_finish(&st, d);
+ }
- if ( key != computed ) {
- return Status(ErrorCodes::AuthenticationFailed, "key mismatch");
- }
+ string computed = digestToString(d);
- AuthorizationSession* authorizationSession =
- AuthorizationSession::get(ClientBasic::getCurrent());
- status = authorizationSession->addAndAuthorizeUser(txn, user);
- if (!status.isOK()) {
- return status;
- }
+ if (key != computed) {
+ return Status(ErrorCodes::AuthenticationFailed, "key mismatch");
+ }
- return Status::OK();
+ AuthorizationSession* authorizationSession =
+ AuthorizationSession::get(ClientBasic::getCurrent());
+ status = authorizationSession->addAndAuthorizeUser(txn, user);
+ if (!status.isOK()) {
+ return status;
}
+ return Status::OK();
+}
+
#ifdef MONGO_CONFIG_SSL
- void canonicalizeClusterDN(std::vector<std::string>* dn) {
- // remove all RDNs we don't care about
- for (size_t i=0; i<dn->size(); i++) {
- std::string& comp = dn->at(i);
- boost::algorithm::trim(comp);
- if (!mongoutils::str::startsWith(comp.c_str(), "DC=") &&
- !mongoutils::str::startsWith(comp.c_str(), "O=") &&
- !mongoutils::str::startsWith(comp.c_str(), "OU=")) {
- dn->erase(dn->begin()+i);
- i--;
- }
+void canonicalizeClusterDN(std::vector<std::string>* dn) {
+ // remove all RDNs we don't care about
+ for (size_t i = 0; i < dn->size(); i++) {
+ std::string& comp = dn->at(i);
+ boost::algorithm::trim(comp);
+ if (!mongoutils::str::startsWith(comp.c_str(), "DC=") &&
+ !mongoutils::str::startsWith(comp.c_str(), "O=") &&
+ !mongoutils::str::startsWith(comp.c_str(), "OU=")) {
+ dn->erase(dn->begin() + i);
+ i--;
}
- std::stable_sort(dn->begin(), dn->end());
}
+ std::stable_sort(dn->begin(), dn->end());
+}
- bool CmdAuthenticate::_clusterIdMatch(const std::string& subjectName,
- const std::string& srvSubjectName) {
- std::vector<string> clientRDN = StringSplitter::split(subjectName, ",");
- std::vector<string> serverRDN = StringSplitter::split(srvSubjectName, ",");
+bool CmdAuthenticate::_clusterIdMatch(const std::string& subjectName,
+ const std::string& srvSubjectName) {
+ std::vector<string> clientRDN = StringSplitter::split(subjectName, ",");
+ std::vector<string> serverRDN = StringSplitter::split(srvSubjectName, ",");
- canonicalizeClusterDN(&clientRDN);
- canonicalizeClusterDN(&serverRDN);
+ canonicalizeClusterDN(&clientRDN);
+ canonicalizeClusterDN(&serverRDN);
- if (clientRDN.size() == 0 || clientRDN.size() != serverRDN.size()) {
- return false;
- }
+ if (clientRDN.size() == 0 || clientRDN.size() != serverRDN.size()) {
+ return false;
+ }
- for (size_t i=0; i < serverRDN.size(); i++) {
- if(clientRDN[i] != serverRDN[i]) {
- return false;
- }
+ for (size_t i = 0; i < serverRDN.size(); i++) {
+ if (clientRDN[i] != serverRDN[i]) {
+ return false;
}
- return true;
}
-
- Status CmdAuthenticate::_authenticateX509(
- OperationContext* txn, const UserName& user, const BSONObj& cmdObj) {
- if (!getSSLManager()) {
- return Status(ErrorCodes::ProtocolError,
- "SSL support is required for the MONGODB-X509 mechanism.");
- }
- if(user.getDB() != "$external") {
- return Status(ErrorCodes::ProtocolError,
- "X.509 authentication must always use the $external database.");
- }
+ return true;
+}
- ClientBasic *client = ClientBasic::getCurrent();
- AuthorizationSession* authorizationSession = AuthorizationSession::get(client);
- std::string subjectName = client->port()->getX509SubjectName();
+Status CmdAuthenticate::_authenticateX509(OperationContext* txn,
+ const UserName& user,
+ const BSONObj& cmdObj) {
+ if (!getSSLManager()) {
+ return Status(ErrorCodes::ProtocolError,
+ "SSL support is required for the MONGODB-X509 mechanism.");
+ }
+ if (user.getDB() != "$external") {
+ return Status(ErrorCodes::ProtocolError,
+ "X.509 authentication must always use the $external database.");
+ }
- if (!getSSLManager()->getSSLConfiguration().hasCA) {
- return Status(ErrorCodes::AuthenticationFailed,
- "Unable to verify x.509 certificate, as no CA has been provided.");
- }
- else if (user.getUser() != subjectName) {
- return Status(ErrorCodes::AuthenticationFailed,
- "There is no x.509 client certificate matching the user.");
+ ClientBasic* client = ClientBasic::getCurrent();
+ AuthorizationSession* authorizationSession = AuthorizationSession::get(client);
+ std::string subjectName = client->port()->getX509SubjectName();
+
+ if (!getSSLManager()->getSSLConfiguration().hasCA) {
+ return Status(ErrorCodes::AuthenticationFailed,
+ "Unable to verify x.509 certificate, as no CA has been provided.");
+ } else if (user.getUser() != subjectName) {
+ return Status(ErrorCodes::AuthenticationFailed,
+ "There is no x.509 client certificate matching the user.");
+ } else {
+ std::string srvSubjectName = getSSLManager()->getSSLConfiguration().serverSubjectName;
+
+ // Handle internal cluster member auth, only applies to server-server connections
+ if (_clusterIdMatch(subjectName, srvSubjectName)) {
+ int clusterAuthMode = serverGlobalParams.clusterAuthMode.load();
+ if (clusterAuthMode == ServerGlobalParams::ClusterAuthMode_undefined ||
+ clusterAuthMode == ServerGlobalParams::ClusterAuthMode_keyFile) {
+ return Status(ErrorCodes::AuthenticationFailed,
+ "The provided certificate "
+ "can only be used for cluster authentication, not client "
+ "authentication. The current configuration does not allow "
+ "x.509 cluster authentication, check the --clusterAuthMode flag");
+ }
+ authorizationSession->grantInternalAuthorization();
}
+ // Handle normal client authentication, only applies to client-server connections
else {
- std::string srvSubjectName = getSSLManager()->getSSLConfiguration().serverSubjectName;
-
- // Handle internal cluster member auth, only applies to server-server connections
- if (_clusterIdMatch(subjectName, srvSubjectName)) {
- int clusterAuthMode = serverGlobalParams.clusterAuthMode.load();
- if (clusterAuthMode == ServerGlobalParams::ClusterAuthMode_undefined ||
- clusterAuthMode == ServerGlobalParams::ClusterAuthMode_keyFile) {
- return Status(ErrorCodes::AuthenticationFailed, "The provided certificate "
- "can only be used for cluster authentication, not client "
- "authentication. The current configuration does not allow "
- "x.509 cluster authentication, check the --clusterAuthMode flag");
- }
- authorizationSession->grantInternalAuthorization();
+ if (_isX509AuthDisabled) {
+ return Status(ErrorCodes::BadValue, _x509AuthenticationDisabledMessage);
}
- // Handle normal client authentication, only applies to client-server connections
- else {
- if (_isX509AuthDisabled) {
- return Status(ErrorCodes::BadValue,
- _x509AuthenticationDisabledMessage);
- }
- Status status = authorizationSession->addAndAuthorizeUser(txn, user);
- if (!status.isOK()) {
- return status;
- }
+ Status status = authorizationSession->addAndAuthorizeUser(txn, user);
+ if (!status.isOK()) {
+ return status;
}
- return Status::OK();
}
+ return Status::OK();
}
+}
#endif
- CmdAuthenticate cmdAuthenticate;
+CmdAuthenticate cmdAuthenticate;
- class CmdLogout : public Command {
- public:
- virtual bool slaveOk() const {
- return true;
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- void help(stringstream& h) const { h << "de-authenticate"; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- CmdLogout() : Command("logout") {}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- AuthorizationSession* authSession =
- AuthorizationSession::get(ClientBasic::getCurrent());
- authSession->logoutDatabase(dbname);
- if (Command::testCommandsEnabled && dbname == "admin") {
- // Allows logging out as the internal user against the admin database, however
- // this actually logs out of the local database as well. This is to
- // support the auth passthrough test framework on mongos (since you can't use the
- // local database on a mongos, so you can't logout as the internal user
- // without this).
- authSession->logoutDatabase("local");
- }
- return true;
+class CmdLogout : public Command {
+public:
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ void help(stringstream& h) const {
+ h << "de-authenticate";
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ CmdLogout() : Command("logout") {}
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ AuthorizationSession* authSession = AuthorizationSession::get(ClientBasic::getCurrent());
+ authSession->logoutDatabase(dbname);
+ if (Command::testCommandsEnabled && dbname == "admin") {
+ // Allows logging out as the internal user against the admin database, however
+ // this actually logs out of the local database as well. This is to
+ // support the auth passthrough test framework on mongos (since you can't use the
+ // local database on a mongos, so you can't logout as the internal user
+ // without this).
+ authSession->logoutDatabase("local");
}
- } cmdLogout;
+ return true;
+ }
+} cmdLogout;
}
diff --git a/src/mongo/db/commands/authentication_commands.h b/src/mongo/db/commands/authentication_commands.h
index e22711454e2..67a41c18401 100644
--- a/src/mongo/db/commands/authentication_commands.h
+++ b/src/mongo/db/commands/authentication_commands.h
@@ -36,52 +36,53 @@
namespace mongo {
- class CmdAuthenticate : public Command {
- public:
- static void disableAuthMechanism(std::string authMechanism);
+class CmdAuthenticate : public Command {
+public:
+ static void disableAuthMechanism(std::string authMechanism);
- virtual bool slaveOk() const {
- return true;
- }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help(std::stringstream& ss) const { ss << "internal"; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- virtual void redactForLogging(mutablebson::Document* cmdObj);
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(std::stringstream& ss) const {
+ ss << "internal";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ virtual void redactForLogging(mutablebson::Document* cmdObj);
- CmdAuthenticate() : Command("authenticate") {}
- bool run(OperationContext* txn, const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result);
+ CmdAuthenticate() : Command("authenticate") {}
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
- private:
- /**
- * Completes the authentication of "user" using "mechanism" and parameters from "cmdObj".
- *
- * Returns Status::OK() on success. All other statuses indicate failed authentication. The
- * entire status returned here may always be used for logging. However, if the code is
- * AuthenticationFailed, the "reason" field of the return status may contain information
- * that should not be revealed to the connected client.
- *
- * Other than AuthenticationFailed, common returns are BadValue, indicating unsupported
- * mechanism, and ProtocolError, indicating an error in the use of the authentication
- * protocol.
- */
- Status _authenticate(OperationContext* txn,
- const std::string& mechanism,
- const UserName& user,
- const BSONObj& cmdObj);
- Status _authenticateCR(
- OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
- Status _authenticateX509(
- OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
- bool _clusterIdMatch(const std::string& subjectName, const std::string& srvSubjectName);
- };
+private:
+ /**
+ * Completes the authentication of "user" using "mechanism" and parameters from "cmdObj".
+ *
+ * Returns Status::OK() on success. All other statuses indicate failed authentication. The
+ * entire status returned here may always be used for logging. However, if the code is
+ * AuthenticationFailed, the "reason" field of the return status may contain information
+ * that should not be revealed to the connected client.
+ *
+ * Other than AuthenticationFailed, common returns are BadValue, indicating unsupported
+ * mechanism, and ProtocolError, indicating an error in the use of the authentication
+ * protocol.
+ */
+ Status _authenticate(OperationContext* txn,
+ const std::string& mechanism,
+ const UserName& user,
+ const BSONObj& cmdObj);
+ Status _authenticateCR(OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
+ Status _authenticateX509(OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
+ bool _clusterIdMatch(const std::string& subjectName, const std::string& srvSubjectName);
+};
- extern CmdAuthenticate cmdAuthenticate;
+extern CmdAuthenticate cmdAuthenticate;
}
-
-
diff --git a/src/mongo/db/commands/cleanup_orphaned_cmd.cpp b/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
index a6ff2b90a6d..50666033aa6 100644
--- a/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
@@ -50,277 +50,258 @@
#include "mongo/util/log.h"
namespace {
- using mongo::WriteConcernOptions;
+using mongo::WriteConcernOptions;
- const int kDefaultWTimeoutMs = 60 * 1000;
- const WriteConcernOptions DefaultWriteConcern(WriteConcernOptions::kMajority,
- WriteConcernOptions::NONE,
- kDefaultWTimeoutMs);
+const int kDefaultWTimeoutMs = 60 * 1000;
+const WriteConcernOptions DefaultWriteConcern(WriteConcernOptions::kMajority,
+ WriteConcernOptions::NONE,
+ kDefaultWTimeoutMs);
}
namespace mongo {
- using std::endl;
- using std::string;
-
- using mongoutils::str::stream;
-
- enum CleanupResult {
- CleanupResult_Done, CleanupResult_Continue, CleanupResult_Error
- };
-
- /**
- * Cleans up one range of orphaned data starting from a range that overlaps or starts at
- * 'startingFromKey'. If empty, startingFromKey is the minimum key of the sharded range.
- *
- * @return CleanupResult_Continue and 'stoppedAtKey' if orphaned range was found and cleaned
- * @return CleanupResult_Done if no orphaned ranges remain
- * @return CleanupResult_Error and 'errMsg' if an error occurred
- *
- * If the collection is not sharded, returns CleanupResult_Done.
- */
- CleanupResult cleanupOrphanedData( OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& startingFromKeyConst,
- const WriteConcernOptions& secondaryThrottle,
- BSONObj* stoppedAtKey,
- string* errMsg ) {
-
- BSONObj startingFromKey = startingFromKeyConst;
-
- CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( ns.toString() );
- if ( !metadata || metadata->getKeyPattern().isEmpty() ) {
-
- warning() << "skipping orphaned data cleanup for " << ns.toString()
- << ", collection is not sharded" << endl;
-
- return CleanupResult_Done;
- }
+using std::endl;
+using std::string;
- BSONObj keyPattern = metadata->getKeyPattern();
- if ( !startingFromKey.isEmpty() ) {
- if ( !metadata->isValidKey( startingFromKey ) ) {
+using mongoutils::str::stream;
- *errMsg = stream() << "could not cleanup orphaned data, start key "
- << startingFromKey
- << " does not match shard key pattern " << keyPattern;
+enum CleanupResult { CleanupResult_Done, CleanupResult_Continue, CleanupResult_Error };
- warning() << *errMsg << endl;
- return CleanupResult_Error;
- }
- }
- else {
- startingFromKey = metadata->getMinKey();
- }
-
- KeyRange orphanRange;
- if ( !metadata->getNextOrphanRange( startingFromKey, &orphanRange ) ) {
+/**
+ * Cleans up one range of orphaned data starting from a range that overlaps or starts at
+ * 'startingFromKey'. If empty, startingFromKey is the minimum key of the sharded range.
+ *
+ * @return CleanupResult_Continue and 'stoppedAtKey' if orphaned range was found and cleaned
+ * @return CleanupResult_Done if no orphaned ranges remain
+ * @return CleanupResult_Error and 'errMsg' if an error occurred
+ *
+ * If the collection is not sharded, returns CleanupResult_Done.
+ */
+CleanupResult cleanupOrphanedData(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& startingFromKeyConst,
+ const WriteConcernOptions& secondaryThrottle,
+ BSONObj* stoppedAtKey,
+ string* errMsg) {
+ BSONObj startingFromKey = startingFromKeyConst;
+
+ CollectionMetadataPtr metadata = shardingState.getCollectionMetadata(ns.toString());
+ if (!metadata || metadata->getKeyPattern().isEmpty()) {
+ warning() << "skipping orphaned data cleanup for " << ns.toString()
+ << ", collection is not sharded" << endl;
+
+ return CleanupResult_Done;
+ }
- LOG( 1 ) << "orphaned data cleanup requested for " << ns.toString()
- << " starting from " << startingFromKey
- << ", no orphan ranges remain" << endl;
+ BSONObj keyPattern = metadata->getKeyPattern();
+ if (!startingFromKey.isEmpty()) {
+ if (!metadata->isValidKey(startingFromKey)) {
+ *errMsg = stream() << "could not cleanup orphaned data, start key " << startingFromKey
+ << " does not match shard key pattern " << keyPattern;
- return CleanupResult_Done;
- }
- orphanRange.ns = ns;
- *stoppedAtKey = orphanRange.maxKey;
-
- // We're done with this metadata now, no matter what happens
- metadata.reset();
-
- LOG( 1 ) << "orphaned data cleanup requested for " << ns.toString()
- << " starting from " << startingFromKey
- << ", removing next orphan range"
- << " [" << orphanRange.minKey << "," << orphanRange.maxKey << ")"
- << endl;
-
- // Metadata snapshot may be stale now, but deleter checks metadata again in write lock
- // before delete.
- RangeDeleterOptions deleterOptions(orphanRange);
- deleterOptions.writeConcern = secondaryThrottle;
- deleterOptions.onlyRemoveOrphanedDocs = true;
- deleterOptions.fromMigrate = true;
- // Must wait for cursors since there can be existing cursors with an older
- // CollectionMetadata.
- deleterOptions.waitForOpenCursors = true;
- deleterOptions.removeSaverReason = "cleanup-cmd";
-
- if (!getDeleter()->deleteNow(txn, deleterOptions, errMsg)) {
warning() << *errMsg << endl;
return CleanupResult_Error;
}
-
- return CleanupResult_Continue;
+ } else {
+ startingFromKey = metadata->getMinKey();
}
- /**
- * Cleanup orphaned data command. Called on a particular namespace, and if the collection
- * is sharded will clean up a single orphaned data range which overlaps or starts after a
- * passed-in 'startingFromKey'. Returns true and a 'stoppedAtKey' (which will start a
- * search for the next orphaned range if the command is called again) or no key if there
- * are no more orphaned ranges in the collection.
- *
- * If the collection is not sharded, returns true but no 'stoppedAtKey'.
- * On failure, returns false and an error message.
- *
- * Calling this command repeatedly until no 'stoppedAtKey' is returned ensures that the
- * full collection range is searched for orphaned documents, but since sharding state may
- * change between calls there is no guarantee that all orphaned documents were found unless
- * the balancer is off.
- *
- * Safe to call with the balancer on.
- *
- * Format:
- *
- * {
- * cleanupOrphaned: <ns>,
- * // optional parameters:
- * startingAtKey: { <shardKeyValue> }, // defaults to lowest value
- * secondaryThrottle: <bool>, // defaults to true
- * // defaults to { w: "majority", wtimeout: 60000 }. Applies to individual writes.
- * writeConcern: { <writeConcern options> }
- * }
- */
- class CleanupOrphanedCommand : public Command {
- public:
- CleanupOrphanedCommand() :
- Command( "cleanupOrphaned" ) {}
-
- virtual bool slaveOk() const { return false; }
- virtual bool adminOnly() const { return true; }
- virtual bool localHostOnlyIfNoAuth( const BSONObj& cmdObj ) { return false; }
-
- virtual Status checkAuthForCommand( ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj ) {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::cleanupOrphaned)) {
- return Status(ErrorCodes::Unauthorized,
- "Not authorized for cleanupOrphaned command.");
- }
- return Status::OK();
- }
+ KeyRange orphanRange;
+ if (!metadata->getNextOrphanRange(startingFromKey, &orphanRange)) {
+ LOG(1) << "orphaned data cleanup requested for " << ns.toString() << " starting from "
+ << startingFromKey << ", no orphan ranges remain" << endl;
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ return CleanupResult_Done;
+ }
+ orphanRange.ns = ns;
+ *stoppedAtKey = orphanRange.maxKey;
+
+ // We're done with this metadata now, no matter what happens
+ metadata.reset();
+
+ LOG(1) << "orphaned data cleanup requested for " << ns.toString() << " starting from "
+ << startingFromKey << ", removing next orphan range"
+ << " [" << orphanRange.minKey << "," << orphanRange.maxKey << ")" << endl;
+
+ // Metadata snapshot may be stale now, but deleter checks metadata again in write lock
+ // before delete.
+ RangeDeleterOptions deleterOptions(orphanRange);
+ deleterOptions.writeConcern = secondaryThrottle;
+ deleterOptions.onlyRemoveOrphanedDocs = true;
+ deleterOptions.fromMigrate = true;
+ // Must wait for cursors since there can be existing cursors with an older
+ // CollectionMetadata.
+ deleterOptions.waitForOpenCursors = true;
+ deleterOptions.removeSaverReason = "cleanup-cmd";
+
+ if (!getDeleter()->deleteNow(txn, deleterOptions, errMsg)) {
+ warning() << *errMsg << endl;
+ return CleanupResult_Error;
+ }
- // Input
- static BSONField<string> nsField;
- static BSONField<BSONObj> startingFromKeyField;
+ return CleanupResult_Continue;
+}
- // Output
- static BSONField<BSONObj> stoppedAtKeyField;
+/**
+ * Cleanup orphaned data command. Called on a particular namespace, and if the collection
+ * is sharded will clean up a single orphaned data range which overlaps or starts after a
+ * passed-in 'startingFromKey'. Returns true and a 'stoppedAtKey' (which will start a
+ * search for the next orphaned range if the command is called again) or no key if there
+ * are no more orphaned ranges in the collection.
+ *
+ * If the collection is not sharded, returns true but no 'stoppedAtKey'.
+ * On failure, returns false and an error message.
+ *
+ * Calling this command repeatedly until no 'stoppedAtKey' is returned ensures that the
+ * full collection range is searched for orphaned documents, but since sharding state may
+ * change between calls there is no guarantee that all orphaned documents were found unless
+ * the balancer is off.
+ *
+ * Safe to call with the balancer on.
+ *
+ * Format:
+ *
+ * {
+ * cleanupOrphaned: <ns>,
+ * // optional parameters:
+ * startingAtKey: { <shardKeyValue> }, // defaults to lowest value
+ * secondaryThrottle: <bool>, // defaults to true
+ * // defaults to { w: "majority", wtimeout: 60000 }. Applies to individual writes.
+ * writeConcern: { <writeConcern options> }
+ * }
+ */
+class CleanupOrphanedCommand : public Command {
+public:
+ CleanupOrphanedCommand() : Command("cleanupOrphaned") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) {
+ return false;
+ }
- bool run( OperationContext* txn,
- string const &db,
- BSONObj &cmdObj,
- int,
- string &errmsg,
- BSONObjBuilder &result) {
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::cleanupOrphaned)) {
+ return Status(ErrorCodes::Unauthorized, "Not authorized for cleanupOrphaned command.");
+ }
+ return Status::OK();
+ }
- string ns;
- if ( !FieldParser::extract( cmdObj, nsField, &ns, &errmsg ) ) {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- if ( ns == "" ) {
- errmsg = "no collection name specified";
- return false;
- }
+ // Input
+ static BSONField<string> nsField;
+ static BSONField<BSONObj> startingFromKeyField;
+
+ // Output
+ static BSONField<BSONObj> stoppedAtKeyField;
+
+ bool run(OperationContext* txn,
+ string const& db,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string ns;
+ if (!FieldParser::extract(cmdObj, nsField, &ns, &errmsg)) {
+ return false;
+ }
- BSONObj startingFromKey;
- if ( !FieldParser::extract( cmdObj,
- startingFromKeyField,
- &startingFromKey,
- &errmsg ) ) {
- return false;
- }
+ if (ns == "") {
+ errmsg = "no collection name specified";
+ return false;
+ }
- WriteConcernOptions writeConcern;
- Status status = writeConcern.parseSecondaryThrottle(cmdObj, NULL);
+ BSONObj startingFromKey;
+ if (!FieldParser::extract(cmdObj, startingFromKeyField, &startingFromKey, &errmsg)) {
+ return false;
+ }
- if (!status.isOK()){
- if (status.code() != ErrorCodes::WriteConcernNotDefined) {
- return appendCommandStatus(result, status);
- }
+ WriteConcernOptions writeConcern;
+ Status status = writeConcern.parseSecondaryThrottle(cmdObj, NULL);
- writeConcern = DefaultWriteConcern;
- }
- else {
- repl::ReplicationCoordinator* replCoordinator =
- repl::getGlobalReplicationCoordinator();
- Status status = replCoordinator->checkIfWriteConcernCanBeSatisfied(writeConcern);
-
- if (replCoordinator->getReplicationMode() ==
- repl::ReplicationCoordinator::modeMasterSlave &&
- writeConcern.shouldWaitForOtherNodes()) {
- warning() << "cleanupOrphaned cannot check if write concern setting "
- << writeConcern.toBSON()
- << " can be enforced in a master slave configuration";
- }
-
- if (!status.isOK() && status != ErrorCodes::NoReplicationEnabled) {
- return appendCommandStatus(result, status);
- }
+ if (!status.isOK()) {
+ if (status.code() != ErrorCodes::WriteConcernNotDefined) {
+ return appendCommandStatus(result, status);
}
- if (writeConcern.shouldWaitForOtherNodes() &&
- writeConcern.wTimeout == WriteConcernOptions::kNoTimeout) {
- // Don't allow no timeout.
- writeConcern.wTimeout = kDefaultWTimeoutMs;
+ writeConcern = DefaultWriteConcern;
+ } else {
+ repl::ReplicationCoordinator* replCoordinator = repl::getGlobalReplicationCoordinator();
+ Status status = replCoordinator->checkIfWriteConcernCanBeSatisfied(writeConcern);
+
+ if (replCoordinator->getReplicationMode() ==
+ repl::ReplicationCoordinator::modeMasterSlave &&
+ writeConcern.shouldWaitForOtherNodes()) {
+ warning() << "cleanupOrphaned cannot check if write concern setting "
+ << writeConcern.toBSON()
+ << " can be enforced in a master slave configuration";
}
- if (!shardingState.enabled()) {
- errmsg = str::stream() << "server is not part of a sharded cluster or "
- << "the sharding metadata is not yet initialized.";
- return false;
+ if (!status.isOK() && status != ErrorCodes::NoReplicationEnabled) {
+ return appendCommandStatus(result, status);
}
+ }
- ChunkVersion shardVersion;
- status = shardingState.refreshMetadataNow(txn, ns, &shardVersion);
- if ( !status.isOK() ) {
- if ( status.code() == ErrorCodes::RemoteChangeDetected ) {
- warning() << "Shard version in transition detected while refreshing "
- << "metadata for " << ns << " at version " << shardVersion << endl;
- }
- else {
- errmsg = str::stream() << "failed to refresh shard metadata: "
- << status.reason();
- return false;
- }
- }
+ if (writeConcern.shouldWaitForOtherNodes() &&
+ writeConcern.wTimeout == WriteConcernOptions::kNoTimeout) {
+ // Don't allow no timeout.
+ writeConcern.wTimeout = kDefaultWTimeoutMs;
+ }
- BSONObj stoppedAtKey;
- CleanupResult cleanupResult = cleanupOrphanedData( txn,
- NamespaceString( ns ),
- startingFromKey,
- writeConcern,
- &stoppedAtKey,
- &errmsg );
+ if (!shardingState.enabled()) {
+ errmsg = str::stream() << "server is not part of a sharded cluster or "
+ << "the sharding metadata is not yet initialized.";
+ return false;
+ }
- if ( cleanupResult == CleanupResult_Error ) {
+ ChunkVersion shardVersion;
+ status = shardingState.refreshMetadataNow(txn, ns, &shardVersion);
+ if (!status.isOK()) {
+ if (status.code() == ErrorCodes::RemoteChangeDetected) {
+ warning() << "Shard version in transition detected while refreshing "
+ << "metadata for " << ns << " at version " << shardVersion << endl;
+ } else {
+ errmsg = str::stream() << "failed to refresh shard metadata: " << status.reason();
return false;
}
+ }
- if ( cleanupResult == CleanupResult_Continue ) {
- result.append( stoppedAtKeyField(), stoppedAtKey );
- }
- else {
- dassert( cleanupResult == CleanupResult_Done );
- }
+ BSONObj stoppedAtKey;
+ CleanupResult cleanupResult = cleanupOrphanedData(
+ txn, NamespaceString(ns), startingFromKey, writeConcern, &stoppedAtKey, &errmsg);
- return true;
+ if (cleanupResult == CleanupResult_Error) {
+ return false;
}
- };
- BSONField<string> CleanupOrphanedCommand::nsField( "cleanupOrphaned" );
- BSONField<BSONObj> CleanupOrphanedCommand::startingFromKeyField( "startingFromKey" );
- BSONField<BSONObj> CleanupOrphanedCommand::stoppedAtKeyField( "stoppedAtKey" );
+ if (cleanupResult == CleanupResult_Continue) {
+ result.append(stoppedAtKeyField(), stoppedAtKey);
+ } else {
+ dassert(cleanupResult == CleanupResult_Done);
+ }
- MONGO_INITIALIZER(RegisterCleanupOrphanedCommand)(InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed.
- new CleanupOrphanedCommand();
- return Status::OK();
+ return true;
}
+};
+
+BSONField<string> CleanupOrphanedCommand::nsField("cleanupOrphaned");
+BSONField<BSONObj> CleanupOrphanedCommand::startingFromKeyField("startingFromKey");
+BSONField<BSONObj> CleanupOrphanedCommand::stoppedAtKeyField("stoppedAtKey");
-} // namespace mongo
+MONGO_INITIALIZER(RegisterCleanupOrphanedCommand)(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new CleanupOrphanedCommand();
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index f2fd0d8928f..0e6c7fbf1e7 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -39,97 +39,98 @@
namespace {
- using namespace mongo;
-
- using std::set;
- using std::string;
- using std::stringstream;
-
- /* Usage:
- mydb.$cmd.findOne( { clone: "fromhost" } );
- Note: doesn't work with authentication enabled, except as internal operation or for
- old-style users for backwards compatibility.
- */
- class CmdClone : public Command {
- public:
- CmdClone() : Command("clone") { }
-
- virtual bool slaveOk() const {
- return false;
- }
+using namespace mongo;
- virtual bool isWriteCommandForConfigServer() const { return true; }
+using std::set;
+using std::string;
+using std::stringstream;
- virtual void help( stringstream &help ) const {
- help << "clone this database from an instance of the db on another host\n";
- help << "{clone: \"host13\"[, slaveOk: <bool>]}";
+/* Usage:
+ mydb.$cmd.findOne( { clone: "fromhost" } );
+ Note: doesn't work with authentication enabled, except as internal operation or for
+ old-style users for backwards compatibility.
+*/
+class CmdClone : public Command {
+public:
+ CmdClone() : Command("clone") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "clone this database from an instance of the db on another host\n";
+ help << "{clone: \"host13\"[, slaveOk: <bool>]}";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::insert);
+ actions.addAction(ActionType::createIndex);
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ actions.addAction(ActionType::bypassDocumentValidation);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::insert);
- actions.addAction(ActionType::createIndex);
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- actions.addAction(ActionType::bypassDocumentValidation);
- }
-
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(dbname), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ return Status::OK();
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ maybeDisableValidation.emplace(txn);
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- maybeDisableValidation.emplace(txn);
- }
-
- string from = cmdObj.getStringField("clone");
- if ( from.empty() )
- return false;
+ string from = cmdObj.getStringField("clone");
+ if (from.empty())
+ return false;
- CloneOptions opts;
- opts.fromDB = dbname;
- opts.slaveOk = cmdObj["slaveOk"].trueValue();
+ CloneOptions opts;
+ opts.fromDB = dbname;
+ opts.slaveOk = cmdObj["slaveOk"].trueValue();
- // See if there's any collections we should ignore
- if( cmdObj["collsToIgnore"].type() == Array ){
- BSONObjIterator it( cmdObj["collsToIgnore"].Obj() );
+ // See if there's any collections we should ignore
+ if (cmdObj["collsToIgnore"].type() == Array) {
+ BSONObjIterator it(cmdObj["collsToIgnore"].Obj());
- while( it.more() ){
- BSONElement e = it.next();
- if( e.type() == String ){
- opts.collsToIgnore.insert( e.String() );
- }
+ while (it.more()) {
+ BSONElement e = it.next();
+ if (e.type() == String) {
+ opts.collsToIgnore.insert(e.String());
}
}
+ }
- set<string> clonedColls;
+ set<string> clonedColls;
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
- Cloner cloner;
- Status status = cloner.copyDb(txn, dbname, from, opts, &clonedColls);
+ Cloner cloner;
+ Status status = cloner.copyDb(txn, dbname, from, opts, &clonedColls);
- BSONArrayBuilder barr;
- barr.append( clonedColls );
+ BSONArrayBuilder barr;
+ barr.append(clonedColls);
- result.append("clonedColls", barr.arr());
+ result.append("clonedColls", barr.arr());
- return appendCommandStatus(result, status);
- }
+ return appendCommandStatus(result, status);
+ }
- } cmdClone;
+} cmdClone;
-} // namespace
+} // namespace
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index 2ef62f8b090..f8bda90a8da 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -57,105 +57,106 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::stringstream;
- using std::endl;
-
- class CmdCloneCollection : public Command {
- public:
- CmdCloneCollection() : Command("cloneCollection") { }
-
- virtual bool slaveOk() const {
- return false;
+using std::unique_ptr;
+using std::string;
+using std::stringstream;
+using std::endl;
+
+class CmdCloneCollection : public Command {
+public:
+ CmdCloneCollection() : Command("cloneCollection") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ return parseNsFullyQualified(dbname, cmdObj);
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ std::string ns = parseNs(dbname, cmdObj);
+
+ ActionSet actions;
+ actions.addAction(ActionType::insert);
+ actions.addAction(ActionType::createIndex); // SERVER-11418
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ actions.addAction(ActionType::bypassDocumentValidation);
}
- virtual bool isWriteCommandForConfigServer() const {
- return false;
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(ns)), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
-
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ return Status::OK();
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "{ cloneCollection: <collection>, from: <host> [,query: <query_filter>] "
+ "[,copyIndexes:<bool>] }"
+ "\nCopies a collection from one server to another. Do not use on a single server "
+ "as the destination "
+ "is placed at the same db.collection (namespace) as the source.\n";
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj))
+ maybeDisableValidation.emplace(txn);
+
+ string fromhost = cmdObj.getStringField("from");
+ if (fromhost.empty()) {
+ errmsg = "missing 'from' parameter";
+ return false;
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- std::string ns = parseNs(dbname, cmdObj);
-
- ActionSet actions;
- actions.addAction(ActionType::insert);
- actions.addAction(ActionType::createIndex); // SERVER-11418
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- actions.addAction(ActionType::bypassDocumentValidation);
- }
-
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(ns)), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ {
+ HostAndPort h(fromhost);
+ if (repl::isSelf(h)) {
+ errmsg = "can't cloneCollection from self";
+ return false;
}
- return Status::OK();
}
- virtual void help( stringstream &help ) const {
- help << "{ cloneCollection: <collection>, from: <host> [,query: <query_filter>] [,copyIndexes:<bool>] }"
- "\nCopies a collection from one server to another. Do not use on a single server as the destination "
- "is placed at the same db.collection (namespace) as the source.\n"
- ;
+ string collection = parseNs(dbname, cmdObj);
+ Status allowedWriteStatus = userAllowedWriteNS(dbname, collection);
+ if (!allowedWriteStatus.isOK()) {
+ return appendCommandStatus(result, allowedWriteStatus);
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ BSONObj query = cmdObj.getObjectField("query");
+ if (query.isEmpty())
+ query = BSONObj();
- string fromhost = cmdObj.getStringField("from");
- if ( fromhost.empty() ) {
- errmsg = "missing 'from' parameter";
- return false;
- }
-
- {
- HostAndPort h(fromhost);
- if (repl::isSelf(h)) {
- errmsg = "can't cloneCollection from self";
- return false;
- }
- }
-
- string collection = parseNs(dbname, cmdObj);
- Status allowedWriteStatus = userAllowedWriteNS(dbname, collection);
- if (!allowedWriteStatus.isOK()) {
- return appendCommandStatus(result, allowedWriteStatus);
- }
+ BSONElement copyIndexesSpec = cmdObj.getField("copyindexes");
+ bool copyIndexes = copyIndexesSpec.isBoolean() ? copyIndexesSpec.boolean() : true;
- BSONObj query = cmdObj.getObjectField("query");
- if ( query.isEmpty() )
- query = BSONObj();
+ log() << "cloneCollection. db:" << dbname << " collection:" << collection
+ << " from: " << fromhost << " query: " << query << " "
+ << (copyIndexes ? "" : ", not copying indexes") << endl;
- BSONElement copyIndexesSpec = cmdObj.getField("copyindexes");
- bool copyIndexes = copyIndexesSpec.isBoolean() ? copyIndexesSpec.boolean() : true;
-
- log() << "cloneCollection. db:" << dbname << " collection:" << collection << " from: " << fromhost
- << " query: " << query << " " << ( copyIndexes ? "" : ", not copying indexes" ) << endl;
-
- Cloner cloner;
- unique_ptr<DBClientConnection> myconn;
- myconn.reset( new DBClientConnection() );
- if ( ! myconn->connect( HostAndPort(fromhost) , errmsg ) )
- return false;
+ Cloner cloner;
+ unique_ptr<DBClientConnection> myconn;
+ myconn.reset(new DBClientConnection());
+ if (!myconn->connect(HostAndPort(fromhost), errmsg))
+ return false;
- cloner.setConnection( myconn.release() );
+ cloner.setConnection(myconn.release());
- return cloner.copyCollection(txn, collection, query, errmsg, true, false, copyIndexes);
- }
+ return cloner.copyCollection(txn, collection, query, errmsg, true, false, copyIndexes);
+ }
- } cmdCloneCollection;
+} cmdCloneCollection;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index c9d3816a2b7..4f53833e975 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -46,117 +46,123 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::stringstream;
-
- class CmdCloneCollectionAsCapped : public Command {
- public:
- CmdCloneCollectionAsCapped() : Command( "cloneCollectionAsCapped" ) {}
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual void help( stringstream &help ) const {
- help << "{ cloneCollectionAsCapped:<fromName>, toCollection:<toName>, size:<sizeInBytes> }";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet sourceActions;
- sourceActions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), sourceActions));
-
- ActionSet targetActions;
- targetActions.addAction(ActionType::insert);
- targetActions.addAction(ActionType::createIndex);
- targetActions.addAction(ActionType::convertToCapped);
- std::string collection = cmdObj.getStringField("toCollection");
- uassert(16708, "bad 'toCollection' value", !collection.empty());
-
- out->push_back(Privilege(ResourcePattern::forExactNamespace(
- NamespaceString(dbname, collection)),
- targetActions));
- }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- string from = jsobj.getStringField( "cloneCollectionAsCapped" );
- string to = jsobj.getStringField( "toCollection" );
- double size = jsobj.getField( "size" ).number();
- bool temp = jsobj.getField( "temp" ).trueValue();
-
- if ( from.empty() || to.empty() || size == 0 ) {
- errmsg = "invalid command spec";
- return false;
- }
-
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbname, MODE_X);
-
- NamespaceString nss(dbname, to);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while cloning collection " << from << " to " << to
- << " (as capped)"));
- }
-
- Database* const db = autoDb.getDb();
- if (!db) {
- return appendCommandStatus(result,
- Status(ErrorCodes::DatabaseNotFound,
- str::stream() << "database " << dbname
- << " not found"));
- }
-
- Status status = cloneCollectionAsCapped(txn, db, from, to, size, temp);
- return appendCommandStatus( result, status );
- }
- } cmdCloneCollectionAsCapped;
-
- /* jan2010:
- Converts the given collection to a capped collection w/ the specified size.
- This command is not highly used, and is not currently supported with sharded
- environments.
- */
- class CmdConvertToCapped : public Command {
- public:
- CmdConvertToCapped() : Command( "convertToCapped" ) {}
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual void help( stringstream &help ) const {
- help << "{ convertToCapped:<fromCollectionName>, size:<sizeInBytes> }";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::convertToCapped);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+using std::unique_ptr;
+using std::string;
+using std::stringstream;
+
+class CmdCloneCollectionAsCapped : public Command {
+public:
+ CmdCloneCollectionAsCapped() : Command("cloneCollectionAsCapped") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "{ cloneCollectionAsCapped:<fromName>, toCollection:<toName>, size:<sizeInBytes> }";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet sourceActions;
+ sourceActions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), sourceActions));
+
+ ActionSet targetActions;
+ targetActions.addAction(ActionType::insert);
+ targetActions.addAction(ActionType::createIndex);
+ targetActions.addAction(ActionType::convertToCapped);
+ std::string collection = cmdObj.getStringField("toCollection");
+ uassert(16708, "bad 'toCollection' value", !collection.empty());
+
+ out->push_back(
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbname, collection)),
+ targetActions));
+ }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string from = jsobj.getStringField("cloneCollectionAsCapped");
+ string to = jsobj.getStringField("toCollection");
+ double size = jsobj.getField("size").number();
+ bool temp = jsobj.getField("temp").trueValue();
+
+ if (from.empty() || to.empty() || size == 0) {
+ errmsg = "invalid command spec";
+ return false;
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, dbname, MODE_X);
- string shortSource = jsobj.getStringField( "convertToCapped" );
- double size = jsobj.getField( "size" ).number();
+ NamespaceString nss(dbname, to);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::NotMaster,
+ str::stream()
+ << "Not primary while cloning collection " << from
+ << " to " << to << " (as capped)"));
+ }
- if (shortSource.empty() || size == 0) {
- errmsg = "invalid command spec";
- return false;
- }
+ Database* const db = autoDb.getDb();
+ if (!db) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::DatabaseNotFound,
+ str::stream() << "database " << dbname << " not found"));
+ }
- return appendCommandStatus(result,
- convertToCapped(txn,
- NamespaceString(dbname, shortSource),
- size));
+ Status status = cloneCollectionAsCapped(txn, db, from, to, size, temp);
+ return appendCommandStatus(result, status);
+ }
+} cmdCloneCollectionAsCapped;
+
+/* jan2010:
+ Converts the given collection to a capped collection w/ the specified size.
+ This command is not highly used, and is not currently supported with sharded
+ environments.
+ */
+class CmdConvertToCapped : public Command {
+public:
+ CmdConvertToCapped() : Command("convertToCapped") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "{ convertToCapped:<fromCollectionName>, size:<sizeInBytes> }";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::convertToCapped);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string shortSource = jsobj.getStringField("convertToCapped");
+ double size = jsobj.getField("size").number();
+
+ if (shortSource.empty() || size == 0) {
+ errmsg = "invalid command spec";
+ return false;
}
- } cmdConvertToCapped;
+ return appendCommandStatus(
+ result, convertToCapped(txn, NamespaceString(dbname, shortSource), size));
+ }
+} cmdConvertToCapped;
}
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index e0c935a3f19..a71357cb53a 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -51,126 +51,134 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- class CompactCmd : public Command {
- public:
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool adminOnly() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool maintenanceMode() const { return true; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::compact);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
- virtual void help( stringstream& help ) const {
- help << "compact collection\n"
- "warning: this operation locks the database and is slow. you can cancel with killOp()\n"
+using std::string;
+using std::stringstream;
+
+class CompactCmd : public Command {
+public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool maintenanceMode() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::compact);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+ virtual void help(stringstream& help) const {
+ help << "compact collection\n"
+ "warning: this operation locks the database and is slow. you can cancel with "
+ "killOp()\n"
"{ compact : <collection_name>, [force:<bool>], [validate:<bool>],\n"
" [paddingFactor:<num>], [paddingBytes:<num>] }\n"
" force - allows to run on a replica set primary\n"
- " validate - check records are noncorrupt before adding to newly compacting extents. slower but safer (defaults to true in this version)\n";
+ " validate - check records are noncorrupt before adding to newly compacting "
+ "extents. slower but safer (defaults to true in this version)\n";
+ }
+ CompactCmd() : Command("compact") {}
+
+ virtual bool run(OperationContext* txn,
+ const string& db,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string nsToCompact = parseNsCollectionRequired(db, cmdObj);
+
+ repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
+ if (replCoord->getMemberState().primary() && !cmdObj["force"].trueValue()) {
+ errmsg =
+ "will not run compact on an active replica set primary as this is a slow blocking "
+ "operation. use force:true to force";
+ return false;
}
- CompactCmd() : Command("compact") { }
-
- virtual bool run(OperationContext* txn,
- const string& db,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const std::string nsToCompact = parseNsCollectionRequired(db, cmdObj);
-
- repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
- if (replCoord->getMemberState().primary() && !cmdObj["force"].trueValue()) {
- errmsg = "will not run compact on an active replica set primary as this is a slow blocking operation. use force:true to force";
- return false;
- }
- NamespaceString ns(nsToCompact);
- if ( !ns.isNormal() ) {
- errmsg = "bad namespace name";
- return false;
- }
+ NamespaceString ns(nsToCompact);
+ if (!ns.isNormal()) {
+ errmsg = "bad namespace name";
+ return false;
+ }
- if ( ns.isSystem() ) {
- // items in system.* cannot be moved as there might be pointers to them
- // i.e. system.indexes entries are pointed to from NamespaceDetails
- errmsg = "can't compact a system namespace";
- return false;
- }
+ if (ns.isSystem()) {
+ // items in system.* cannot be moved as there might be pointers to them
+ // i.e. system.indexes entries are pointed to from NamespaceDetails
+ errmsg = "can't compact a system namespace";
+ return false;
+ }
- CompactOptions compactOptions;
+ CompactOptions compactOptions;
- if ( cmdObj["preservePadding"].trueValue() ) {
- compactOptions.paddingMode = CompactOptions::PRESERVE;
- if ( cmdObj.hasElement( "paddingFactor" ) ||
- cmdObj.hasElement( "paddingBytes" ) ) {
- errmsg = "cannot mix preservePadding and paddingFactor|paddingBytes";
+ if (cmdObj["preservePadding"].trueValue()) {
+ compactOptions.paddingMode = CompactOptions::PRESERVE;
+ if (cmdObj.hasElement("paddingFactor") || cmdObj.hasElement("paddingBytes")) {
+ errmsg = "cannot mix preservePadding and paddingFactor|paddingBytes";
+ return false;
+ }
+ } else if (cmdObj.hasElement("paddingFactor") || cmdObj.hasElement("paddingBytes")) {
+ compactOptions.paddingMode = CompactOptions::MANUAL;
+ if (cmdObj.hasElement("paddingFactor")) {
+ compactOptions.paddingFactor = cmdObj["paddingFactor"].Number();
+ if (compactOptions.paddingFactor < 1 || compactOptions.paddingFactor > 4) {
+ errmsg = "invalid padding factor";
return false;
}
}
- else if ( cmdObj.hasElement( "paddingFactor" ) || cmdObj.hasElement( "paddingBytes" ) ) {
- compactOptions.paddingMode = CompactOptions::MANUAL;
- if ( cmdObj.hasElement("paddingFactor") ) {
- compactOptions.paddingFactor = cmdObj["paddingFactor"].Number();
- if ( compactOptions.paddingFactor < 1 ||
- compactOptions.paddingFactor > 4 ){
- errmsg = "invalid padding factor";
- return false;
- }
- }
- if ( cmdObj.hasElement("paddingBytes") ) {
- compactOptions.paddingBytes = cmdObj["paddingBytes"].numberInt();
- if ( compactOptions.paddingBytes < 0 ||
- compactOptions.paddingBytes > ( 1024 * 1024 ) ) {
- errmsg = "invalid padding bytes";
- return false;
- }
+ if (cmdObj.hasElement("paddingBytes")) {
+ compactOptions.paddingBytes = cmdObj["paddingBytes"].numberInt();
+ if (compactOptions.paddingBytes < 0 ||
+ compactOptions.paddingBytes > (1024 * 1024)) {
+ errmsg = "invalid padding bytes";
+ return false;
}
}
+ }
- if ( cmdObj.hasElement("validate") )
- compactOptions.validateDocuments = cmdObj["validate"].trueValue();
-
+ if (cmdObj.hasElement("validate"))
+ compactOptions.validateDocuments = cmdObj["validate"].trueValue();
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, db, MODE_X);
- Database* const collDB = autoDb.getDb();
- Collection* collection = collDB ? collDB->getCollection(ns) : NULL;
- // If db/collection does not exist, short circuit and return.
- if ( !collDB || !collection ) {
- errmsg = "namespace does not exist";
- return false;
- }
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, db, MODE_X);
+ Database* const collDB = autoDb.getDb();
+ Collection* collection = collDB ? collDB->getCollection(ns) : NULL;
- OldClientContext ctx(txn, ns);
- BackgroundOperation::assertNoBgOpInProgForNs(ns.ns());
+ // If db/collection does not exist, short circuit and return.
+ if (!collDB || !collection) {
+ errmsg = "namespace does not exist";
+ return false;
+ }
- if ( collection->isCapped() ) {
- errmsg = "cannot compact a capped collection";
- return false;
- }
+ OldClientContext ctx(txn, ns);
+ BackgroundOperation::assertNoBgOpInProgForNs(ns.ns());
- log() << "compact " << ns << " begin, options: " << compactOptions.toString();
+ if (collection->isCapped()) {
+ errmsg = "cannot compact a capped collection";
+ return false;
+ }
- StatusWith<CompactStats> status = collection->compact( txn, &compactOptions );
- if ( !status.isOK() )
- return appendCommandStatus( result, status.getStatus() );
+ log() << "compact " << ns << " begin, options: " << compactOptions.toString();
- if ( status.getValue().corruptDocuments > 0 )
- result.append("invalidObjects", status.getValue().corruptDocuments );
+ StatusWith<CompactStats> status = collection->compact(txn, &compactOptions);
+ if (!status.isOK())
+ return appendCommandStatus(result, status.getStatus());
- log() << "compact " << ns << " end";
+ if (status.getValue().corruptDocuments > 0)
+ result.append("invalidObjects", status.getValue().corruptDocuments);
- return true;
- }
- };
- static CompactCmd compactCmd;
+ log() << "compact " << ns << " end";
+ return true;
+ }
+};
+static CompactCmd compactCmd;
}
diff --git a/src/mongo/db/commands/connection_status.cpp b/src/mongo/db/commands/connection_status.cpp
index 06a4367c9ed..843b8b1728a 100644
--- a/src/mongo/db/commands/connection_status.cpp
+++ b/src/mongo/db/commands/connection_status.cpp
@@ -35,100 +35,99 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- class CmdConnectionStatus : public Command {
- public:
- CmdConnectionStatus() : Command("connectionStatus") {}
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
-
- void help(stringstream& h) const {
- h << "Returns connection-specific information such as logged-in users and their roles";
+using std::string;
+using std::stringstream;
+
+class CmdConnectionStatus : public Command {
+public:
+ CmdConnectionStatus() : Command("connectionStatus") {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+
+ void help(stringstream& h) const {
+ h << "Returns connection-specific information such as logged-in users and their roles";
+ }
+
+ bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ AuthorizationSession* authSession = AuthorizationSession::get(ClientBasic::getCurrent());
+
+ bool showPrivileges;
+ Status status =
+ bsonExtractBooleanFieldWithDefault(cmdObj, "showPrivileges", false, &showPrivileges);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- bool run(OperationContext* txn, const string&, BSONObj& cmdObj, int, string& errmsg,
- BSONObjBuilder& result) {
- AuthorizationSession* authSession =
- AuthorizationSession::get(ClientBasic::getCurrent());
-
- bool showPrivileges;
- Status status = bsonExtractBooleanFieldWithDefault(cmdObj,
- "showPrivileges",
- false,
- &showPrivileges);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ BSONObjBuilder authInfo(result.subobjStart("authInfo"));
+ {
+ BSONArrayBuilder authenticatedUsers(authInfo.subarrayStart("authenticatedUsers"));
+ UserNameIterator nameIter = authSession->getAuthenticatedUserNames();
- BSONObjBuilder authInfo(result.subobjStart("authInfo"));
- {
- BSONArrayBuilder authenticatedUsers(authInfo.subarrayStart("authenticatedUsers"));
- UserNameIterator nameIter = authSession->getAuthenticatedUserNames();
-
- for ( ; nameIter.more(); nameIter.next()) {
- BSONObjBuilder userInfoBuilder(authenticatedUsers.subobjStart());
- userInfoBuilder.append(AuthorizationManager::USER_NAME_FIELD_NAME,
- nameIter->getUser());
- userInfoBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME,
- nameIter->getDB());
- }
+ for (; nameIter.more(); nameIter.next()) {
+ BSONObjBuilder userInfoBuilder(authenticatedUsers.subobjStart());
+ userInfoBuilder.append(AuthorizationManager::USER_NAME_FIELD_NAME,
+ nameIter->getUser());
+ userInfoBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME, nameIter->getDB());
}
- {
- BSONArrayBuilder authenticatedRoles(
- authInfo.subarrayStart("authenticatedUserRoles"));
- RoleNameIterator roleIter = authSession->getAuthenticatedRoleNames();
-
- for ( ; roleIter.more(); roleIter.next()) {
- BSONObjBuilder roleInfoBuilder(authenticatedRoles.subobjStart());
- roleInfoBuilder.append(AuthorizationManager::ROLE_NAME_FIELD_NAME,
- roleIter->getRole());
- roleInfoBuilder.append(AuthorizationManager::ROLE_DB_FIELD_NAME,
- roleIter->getDB());
- }
+ }
+ {
+ BSONArrayBuilder authenticatedRoles(authInfo.subarrayStart("authenticatedUserRoles"));
+ RoleNameIterator roleIter = authSession->getAuthenticatedRoleNames();
+
+ for (; roleIter.more(); roleIter.next()) {
+ BSONObjBuilder roleInfoBuilder(authenticatedRoles.subobjStart());
+ roleInfoBuilder.append(AuthorizationManager::ROLE_NAME_FIELD_NAME,
+ roleIter->getRole());
+ roleInfoBuilder.append(AuthorizationManager::ROLE_DB_FIELD_NAME, roleIter->getDB());
}
- if (showPrivileges) {
- BSONArrayBuilder authenticatedPrivileges(
- authInfo.subarrayStart("authenticatedUserPrivileges"));
-
- // Create a unified map of resources to privileges, to avoid duplicate
- // entries in the connection status output.
- User::ResourcePrivilegeMap unifiedResourcePrivilegeMap;
- UserNameIterator nameIter = authSession->getAuthenticatedUserNames();
-
- for ( ; nameIter.more(); nameIter.next()) {
- User* authUser = authSession->lookupUser(*nameIter);
- const User::ResourcePrivilegeMap& resourcePrivilegeMap =
- authUser->getPrivileges();
- for (User::ResourcePrivilegeMap::const_iterator it =
- resourcePrivilegeMap.begin();
- it != resourcePrivilegeMap.end();
- ++it) {
- if (unifiedResourcePrivilegeMap.find(it->first) ==
- unifiedResourcePrivilegeMap.end()) {
- unifiedResourcePrivilegeMap[it->first] = it->second;
- } else {
- unifiedResourcePrivilegeMap[it->first].addActions(
- it->second.getActions());
- }
+ }
+ if (showPrivileges) {
+ BSONArrayBuilder authenticatedPrivileges(
+ authInfo.subarrayStart("authenticatedUserPrivileges"));
+
+ // Create a unified map of resources to privileges, to avoid duplicate
+ // entries in the connection status output.
+ User::ResourcePrivilegeMap unifiedResourcePrivilegeMap;
+ UserNameIterator nameIter = authSession->getAuthenticatedUserNames();
+
+ for (; nameIter.more(); nameIter.next()) {
+ User* authUser = authSession->lookupUser(*nameIter);
+ const User::ResourcePrivilegeMap& resourcePrivilegeMap = authUser->getPrivileges();
+ for (User::ResourcePrivilegeMap::const_iterator it = resourcePrivilegeMap.begin();
+ it != resourcePrivilegeMap.end();
+ ++it) {
+ if (unifiedResourcePrivilegeMap.find(it->first) ==
+ unifiedResourcePrivilegeMap.end()) {
+ unifiedResourcePrivilegeMap[it->first] = it->second;
+ } else {
+ unifiedResourcePrivilegeMap[it->first].addActions(it->second.getActions());
}
}
+ }
- for (User::ResourcePrivilegeMap::const_iterator it =
- unifiedResourcePrivilegeMap.begin();
- it != unifiedResourcePrivilegeMap.end();
- ++it) {
- authenticatedPrivileges << it->second.toBSON();
- }
+ for (User::ResourcePrivilegeMap::const_iterator it =
+ unifiedResourcePrivilegeMap.begin();
+ it != unifiedResourcePrivilegeMap.end();
+ ++it) {
+ authenticatedPrivileges << it->second.toBSON();
}
+ }
- authInfo.doneFast();
+ authInfo.doneFast();
- return true;
- }
- } cmdConnectionStatus;
+ return true;
+ }
+} cmdConnectionStatus;
}
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index 328c24111f8..d41c3f50657 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -43,190 +43,190 @@
namespace {
- using namespace mongo;
-
- using std::string;
- using std::stringstream;
-
- /* Usage:
- * admindb.$cmd.findOne( { copydb: 1, fromhost: <connection string>, fromdb: <db>,
- * todb: <db>[, username: <username>, nonce: <nonce>, key: <key>] } );
- *
- * The "copydb" command is used to copy a database. Note that this is a very broad definition.
- * This means that the "copydb" command can be used in the following ways:
- *
- * 1. To copy a database within a single node
- * 2. To copy a database within a sharded cluster, possibly to another shard
- * 3. To copy a database from one cluster to another
- *
- * Note that in all cases both the target and source database must be unsharded.
- *
- * The "copydb" command gets sent by the client or the mongos to the destination of the copy
- * operation. The node, cluster, or shard that recieves the "copydb" command must then query
- * the source of the database to be copied for all the contents and metadata of the database.
- *
- *
- *
- * When used with auth, there are two different considerations.
- *
- * The first is authentication with the target. The only entity that needs to authenticate with
- * the target node is the client, so authentication works there the same as it would with any
- * other command.
- *
- * The second is the authentication of the target with the source, which is needed because the
- * target must query the source directly for the contents of the database. To do this, the
- * client must use the "copydbgetnonce" command, in which the target will get a nonce from the
- * source and send it back to the client. The client can then hash its password with the nonce,
- * send it to the target when it runs the "copydb" command, which can then use that information
- * to authenticate with the source.
- *
- * NOTE: mongos doesn't know how to call or handle the "copydbgetnonce" command. See
- * SERVER-6427.
- *
- * NOTE: Since internal cluster auth works differently, "copydb" currently doesn't work between
- * shards in a cluster when auth is enabled. See SERVER-13080.
- */
- class CmdCopyDb : public Command {
- public:
- CmdCopyDb() : Command("copydb") { }
-
- virtual bool adminOnly() const {
- return true;
+using namespace mongo;
+
+using std::string;
+using std::stringstream;
+
+/* Usage:
+ * admindb.$cmd.findOne( { copydb: 1, fromhost: <connection string>, fromdb: <db>,
+ * todb: <db>[, username: <username>, nonce: <nonce>, key: <key>] } );
+ *
+ * The "copydb" command is used to copy a database. Note that this is a very broad definition.
+ * This means that the "copydb" command can be used in the following ways:
+ *
+ * 1. To copy a database within a single node
+ * 2. To copy a database within a sharded cluster, possibly to another shard
+ * 3. To copy a database from one cluster to another
+ *
+ * Note that in all cases both the target and source database must be unsharded.
+ *
+ * The "copydb" command gets sent by the client or the mongos to the destination of the copy
+ * operation. The node, cluster, or shard that recieves the "copydb" command must then query
+ * the source of the database to be copied for all the contents and metadata of the database.
+ *
+ *
+ *
+ * When used with auth, there are two different considerations.
+ *
+ * The first is authentication with the target. The only entity that needs to authenticate with
+ * the target node is the client, so authentication works there the same as it would with any
+ * other command.
+ *
+ * The second is the authentication of the target with the source, which is needed because the
+ * target must query the source directly for the contents of the database. To do this, the
+ * client must use the "copydbgetnonce" command, in which the target will get a nonce from the
+ * source and send it back to the client. The client can then hash its password with the nonce,
+ * send it to the target when it runs the "copydb" command, which can then use that information
+ * to authenticate with the source.
+ *
+ * NOTE: mongos doesn't know how to call or handle the "copydbgetnonce" command. See
+ * SERVER-6427.
+ *
+ * NOTE: Since internal cluster auth works differently, "copydb" currently doesn't work between
+ * shards in a cluster when auth is enabled. See SERVER-13080.
+ */
+class CmdCopyDb : public Command {
+public:
+ CmdCopyDb() : Command("copydb") {}
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return copydb::checkAuthForCopydbCommand(client, dbname, cmdObj);
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "copy a database from another host to this host\n";
+ help << "usage: {copydb: 1, fromhost: <connection string>, fromdb: <db>, todb: <db>"
+ << "[, slaveOk: <bool>, username: <username>, nonce: <nonce>, key: <key>]}";
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj))
+ maybeDisableValidation.emplace(txn);
+
+ string fromhost = cmdObj.getStringField("fromhost");
+ bool fromSelf = fromhost.empty();
+ if (fromSelf) {
+ /* copy from self */
+ stringstream ss;
+ ss << "localhost:" << serverGlobalParams.port;
+ fromhost = ss.str();
}
- virtual bool slaveOk() const {
+ CloneOptions cloneOptions;
+ cloneOptions.fromDB = cmdObj.getStringField("fromdb");
+ cloneOptions.slaveOk = cmdObj["slaveOk"].trueValue();
+ cloneOptions.useReplAuth = false;
+ cloneOptions.snapshot = true;
+ cloneOptions.mayYield = true;
+ cloneOptions.mayBeInterrupted = false;
+
+ string todb = cmdObj.getStringField("todb");
+ if (fromhost.empty() || todb.empty() || cloneOptions.fromDB.empty()) {
+ errmsg =
+ "params missing - {copydb: 1, fromhost: <connection string>, "
+ "fromdb: <db>, todb: <db>}";
return false;
}
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return copydb::checkAuthForCopydbCommand(client, dbname, cmdObj);
+ if (!NamespaceString::validDBName(todb)) {
+ errmsg = "invalid todb name: " + todb;
+ return false;
}
- virtual void help( stringstream &help ) const {
- help << "copy a database from another host to this host\n";
- help << "usage: {copydb: 1, fromhost: <connection string>, fromdb: <db>, todb: <db>"
- << "[, slaveOk: <bool>, username: <username>, nonce: <nonce>, key: <key>]}";
- }
+ Cloner cloner;
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
-
- string fromhost = cmdObj.getStringField("fromhost");
- bool fromSelf = fromhost.empty();
- if ( fromSelf ) {
- /* copy from self */
- stringstream ss;
- ss << "localhost:" << serverGlobalParams.port;
- fromhost = ss.str();
- }
+ // Get MONGODB-CR parameters
+ string username = cmdObj.getStringField("username");
+ string nonce = cmdObj.getStringField("nonce");
+ string key = cmdObj.getStringField("key");
- CloneOptions cloneOptions;
- cloneOptions.fromDB = cmdObj.getStringField("fromdb");
- cloneOptions.slaveOk = cmdObj["slaveOk"].trueValue();
- cloneOptions.useReplAuth = false;
- cloneOptions.snapshot = true;
- cloneOptions.mayYield = true;
- cloneOptions.mayBeInterrupted = false;
-
- string todb = cmdObj.getStringField("todb");
- if ( fromhost.empty() || todb.empty() || cloneOptions.fromDB.empty() ) {
- errmsg = "params missing - {copydb: 1, fromhost: <connection string>, "
- "fromdb: <db>, todb: <db>}";
- return false;
- }
+ auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
- if ( !NamespaceString::validDBName( todb ) ) {
- errmsg = "invalid todb name: " + todb;
- return false;
- }
-
- Cloner cloner;
-
- // Get MONGODB-CR parameters
- string username = cmdObj.getStringField( "username" );
- string nonce = cmdObj.getStringField( "nonce" );
- string key = cmdObj.getStringField( "key" );
-
- auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
-
- if ( !username.empty() && !nonce.empty() && !key.empty() ) {
- uassert( 13008, "must call copydbgetnonce first", authConn.get() );
- BSONObj ret;
- {
- if ( !authConn->runCommand( cloneOptions.fromDB,
- BSON( "authenticate" << 1 << "user" << username
- << "nonce" << nonce << "key" << key ), ret ) ) {
- errmsg = "unable to login " + ret.toString();
- authConn.reset();
- return false;
- }
- }
- cloner.setConnection( authConn.release() );
- }
- else if (cmdObj.hasField(saslCommandConversationIdFieldName) &&
- cmdObj.hasField(saslCommandPayloadFieldName)) {
- uassert( 25487, "must call copydbsaslstart first", authConn.get() );
- BSONObj ret;
- if ( !authConn->runCommand( cloneOptions.fromDB,
- BSON( "saslContinue" << 1 <<
- cmdObj[saslCommandConversationIdFieldName] <<
- cmdObj[saslCommandPayloadFieldName] ),
- ret ) ) {
+ if (!username.empty() && !nonce.empty() && !key.empty()) {
+ uassert(13008, "must call copydbgetnonce first", authConn.get());
+ BSONObj ret;
+ {
+ if (!authConn->runCommand(cloneOptions.fromDB,
+ BSON("authenticate" << 1 << "user" << username << "nonce"
+ << nonce << "key" << key),
+ ret)) {
errmsg = "unable to login " + ret.toString();
authConn.reset();
return false;
}
-
- if (!ret["done"].Bool()) {
- result.appendElements( ret );
- return true;
- }
-
- result.append("done", true);
- cloner.setConnection( authConn.release() );
}
- else if (!fromSelf) {
- // If fromSelf leave the cloner's conn empty, it will use a DBDirectClient instead.
- const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromhost)));
+ cloner.setConnection(authConn.release());
+ } else if (cmdObj.hasField(saslCommandConversationIdFieldName) &&
+ cmdObj.hasField(saslCommandPayloadFieldName)) {
+ uassert(25487, "must call copydbsaslstart first", authConn.get());
+ BSONObj ret;
+ if (!authConn->runCommand(cloneOptions.fromDB,
+ BSON("saslContinue"
+ << 1 << cmdObj[saslCommandConversationIdFieldName]
+ << cmdObj[saslCommandPayloadFieldName]),
+ ret)) {
+ errmsg = "unable to login " + ret.toString();
+ authConn.reset();
+ return false;
+ }
- DBClientBase* conn = cs.connect(errmsg);
- if (!conn) {
- return false;
- }
- cloner.setConnection(conn);
+ if (!ret["done"].Bool()) {
+ result.appendElements(ret);
+ return true;
}
- // Either we didn't need the authConn (if we even had one), or we already moved it
- // into the cloner so just make sure we don't keep it around if we don't need it.
- authConn.reset();
+ result.append("done", true);
+ cloner.setConnection(authConn.release());
+ } else if (!fromSelf) {
+ // If fromSelf leave the cloner's conn empty, it will use a DBDirectClient instead.
+ const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromhost)));
- if (fromSelf) {
- // SERVER-4328 todo lock just the two db's not everything for the fromself case
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- uassertStatusOK(cloner.copyDb(txn, todb, fromhost, cloneOptions, NULL));
- }
- else {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), todb, MODE_X);
- uassertStatusOK(cloner.copyDb(txn, todb, fromhost, cloneOptions, NULL));
+ DBClientBase* conn = cs.connect(errmsg);
+ if (!conn) {
+ return false;
}
+ cloner.setConnection(conn);
+ }
- return true;
+ // Either we didn't need the authConn (if we even had one), or we already moved it
+ // into the cloner so just make sure we don't keep it around if we don't need it.
+ authConn.reset();
+
+ if (fromSelf) {
+ // SERVER-4328 todo lock just the two db's not everything for the fromself case
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ uassertStatusOK(cloner.copyDb(txn, todb, fromhost, cloneOptions, NULL));
+ } else {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lk(txn->lockState(), todb, MODE_X);
+ uassertStatusOK(cloner.copyDb(txn, todb, fromhost, cloneOptions, NULL));
}
- } cmdCopyDB;
+ return true;
+ }
+
+} cmdCopyDB;
-} // namespace
+} // namespace
diff --git a/src/mongo/db/commands/copydb.h b/src/mongo/db/commands/copydb.h
index f7b2adfbe6d..3da70ccd01a 100644
--- a/src/mongo/db/commands/copydb.h
+++ b/src/mongo/db/commands/copydb.h
@@ -36,15 +36,13 @@
namespace mongo {
- class ClientBasic;
+class ClientBasic;
namespace copydb {
- Status checkAuthForCopydbCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
-
-} // namespace copydb
-} // namespace mongo
-
+Status checkAuthForCopydbCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+} // namespace copydb
+} // namespace mongo
diff --git a/src/mongo/db/commands/copydb_common.cpp b/src/mongo/db/commands/copydb_common.cpp
index b72a91a9310..5f033aede73 100644
--- a/src/mongo/db/commands/copydb_common.cpp
+++ b/src/mongo/db/commands/copydb_common.cpp
@@ -43,63 +43,63 @@
namespace mongo {
namespace copydb {
- Status checkAuthForCopydbCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- bool fromSelf = StringData(cmdObj.getStringField("fromhost")).empty();
- StringData fromdb = cmdObj.getStringField("fromdb");
- StringData todb = cmdObj.getStringField("todb");
+Status checkAuthForCopydbCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ bool fromSelf = StringData(cmdObj.getStringField("fromhost")).empty();
+ StringData fromdb = cmdObj.getStringField("fromdb");
+ StringData todb = cmdObj.getStringField("todb");
- // get system collections
- std::vector<std::string> legalClientSystemCollections;
- legalClientSystemCollections.push_back("system.js");
- if (fromdb == "admin") {
- legalClientSystemCollections.push_back("system.users");
- legalClientSystemCollections.push_back("system.roles");
- legalClientSystemCollections.push_back("system.version");
- } else if (fromdb == "local") { // TODO(spencer): shouldn't be possible. See SERVER-11383
- legalClientSystemCollections.push_back("system.replset");
- }
+ // get system collections
+ std::vector<std::string> legalClientSystemCollections;
+ legalClientSystemCollections.push_back("system.js");
+ if (fromdb == "admin") {
+ legalClientSystemCollections.push_back("system.users");
+ legalClientSystemCollections.push_back("system.roles");
+ legalClientSystemCollections.push_back("system.version");
+ } else if (fromdb == "local") { // TODO(spencer): shouldn't be possible. See SERVER-11383
+ legalClientSystemCollections.push_back("system.replset");
+ }
- // Check authorization on destination db
- ActionSet actions;
- actions.addAction(ActionType::insert);
- actions.addAction(ActionType::createIndex);
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- actions.addAction(ActionType::bypassDocumentValidation);
- }
+ // Check authorization on destination db
+ ActionSet actions;
+ actions.addAction(ActionType::insert);
+ actions.addAction(ActionType::createIndex);
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ actions.addAction(ActionType::bypassDocumentValidation);
+ }
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(todb), actions)) {
+ if (!AuthorizationSession::get(client)
+ ->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(todb), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+ actions.removeAllActions();
+ actions.addAction(ActionType::insert);
+ for (size_t i = 0; i < legalClientSystemCollections.size(); ++i) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnNamespace(
+ NamespaceString(todb, legalClientSystemCollections[i]), actions)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
+ }
+ if (fromSelf) {
+ // If copying from self, also require privileges on source db
actions.removeAllActions();
- actions.addAction(ActionType::insert);
+ actions.addAction(ActionType::find);
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(fromdb), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
for (size_t i = 0; i < legalClientSystemCollections.size(); ++i) {
if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnNamespace(
- NamespaceString(todb, legalClientSystemCollections[i]), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- }
-
- if (fromSelf) {
- // If copying from self, also require privileges on source db
- actions.removeAllActions();
- actions.addAction(ActionType::find);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(fromdb), actions)) {
+ NamespaceString(fromdb, legalClientSystemCollections[i]), actions)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- for (size_t i = 0; i < legalClientSystemCollections.size(); ++i) {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnNamespace(
- NamespaceString(fromdb, legalClientSystemCollections[i]), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- }
}
- return Status::OK();
}
+ return Status::OK();
+}
-} // namespace copydb
-} // namespace mongo
+} // namespace copydb
+} // namespace mongo
diff --git a/src/mongo/db/commands/copydb_start_commands.cpp b/src/mongo/db/commands/copydb_start_commands.cpp
index 70434e11f1b..078ddca6039 100644
--- a/src/mongo/db/commands/copydb_start_commands.cpp
+++ b/src/mongo/db/commands/copydb_start_commands.cpp
@@ -48,178 +48,172 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
+
+namespace {
+const auto authConnection = Client::declareDecoration<std::unique_ptr<DBClientBase>>();
+} // namespace
+
+std::unique_ptr<DBClientBase>& CopyDbAuthConnection::forClient(Client* client) {
+ return authConnection(client);
+}
+
+/* Usage:
+ * admindb.$cmd.findOne( { copydbgetnonce: 1, fromhost: <connection string> } );
+ *
+ * Run against the mongod that is the intended target for the "copydb" command. Used to get a
+ * nonce from the source of a "copydb" operation for authentication purposes. See the
+ * description of the "copydb" command below.
+ */
+class CmdCopyDbGetNonce : public Command {
+public:
+ CmdCopyDbGetNonce() : Command("copydbgetnonce") {}
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
- namespace {
- const auto authConnection =
- Client::declareDecoration<std::unique_ptr<DBClientBase>>();
- } // namespace
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- std::unique_ptr<DBClientBase>& CopyDbAuthConnection::forClient(Client* client) {
- return authConnection(client);
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // No auth required
}
- /* Usage:
- * admindb.$cmd.findOne( { copydbgetnonce: 1, fromhost: <connection string> } );
- *
- * Run against the mongod that is the intended target for the "copydb" command. Used to get a
- * nonce from the source of a "copydb" operation for authentication purposes. See the
- * description of the "copydb" command below.
- */
- class CmdCopyDbGetNonce : public Command {
- public:
- CmdCopyDbGetNonce() : Command("copydbgetnonce") { }
-
- virtual bool adminOnly() const {
- return true;
+ virtual void help(stringstream& help) const {
+ help << "get a nonce for subsequent copy db request from secure server\n";
+ help << "usage: {copydbgetnonce: 1, fromhost: <hostname>}";
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string fromhost = cmdObj.getStringField("fromhost");
+ if (fromhost.empty()) {
+ /* copy from self */
+ stringstream ss;
+ ss << "localhost:" << serverGlobalParams.port;
+ fromhost = ss.str();
}
- virtual bool slaveOk() const {
+ const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromhost)));
+
+ auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
+ authConn.reset(cs.connect(errmsg));
+ if (!authConn) {
return false;
}
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ BSONObj ret;
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- // No auth required
+ if (!authConn->runCommand("admin", BSON("getnonce" << 1), ret)) {
+ errmsg = "couldn't get nonce " + ret.toString();
+ authConn.reset();
+ return false;
}
- virtual void help( stringstream &help ) const {
- help << "get a nonce for subsequent copy db request from secure server\n";
- help << "usage: {copydbgetnonce: 1, fromhost: <hostname>}";
- }
+ result.appendElements(ret);
+ return true;
+ }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- string fromhost = cmdObj.getStringField("fromhost");
- if ( fromhost.empty() ) {
- /* copy from self */
- stringstream ss;
- ss << "localhost:" << serverGlobalParams.port;
- fromhost = ss.str();
- }
-
- const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromhost)));
-
- auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
- authConn.reset(cs.connect(errmsg));
- if (!authConn) {
- return false;
- }
-
- BSONObj ret;
-
- if( !authConn->runCommand( "admin", BSON( "getnonce" << 1 ), ret ) ) {
- errmsg = "couldn't get nonce " + ret.toString();
- authConn.reset();
- return false;
- }
-
- result.appendElements( ret );
- return true;
+} cmdCopyDBGetNonce;
+
+/* Usage:
+ * admindb.$cmd.findOne( { copydbsaslstart: 1,
+ * fromhost: <connection string>,
+ * mechanism: <String>,
+ * payload: <BinaryOrString> } );
+ *
+ * Run against the mongod that is the intended target for the "copydb" command. Used to
+ * initialize a SASL auth session for a "copydb" operation for authentication purposes.
+ */
+class CmdCopyDbSaslStart : public Command {
+public:
+ CmdCopyDbSaslStart() : Command("copydbsaslstart") {}
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ // No auth required
+ return Status::OK();
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "Initialize a SASL auth session for subsequent copy db request "
+ "from secure server\n";
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const string fromDb = cmdObj.getStringField("fromdb");
+
+ string fromHost = cmdObj.getStringField("fromhost");
+ if (fromHost.empty()) {
+ /* copy from self */
+ stringstream ss;
+ ss << "localhost:" << serverGlobalParams.port;
+ fromHost = ss.str();
}
- } cmdCopyDBGetNonce;
-
- /* Usage:
- * admindb.$cmd.findOne( { copydbsaslstart: 1,
- * fromhost: <connection string>,
- * mechanism: <String>,
- * payload: <BinaryOrString> } );
- *
- * Run against the mongod that is the intended target for the "copydb" command. Used to
- * initialize a SASL auth session for a "copydb" operation for authentication purposes.
- */
- class CmdCopyDbSaslStart : public Command {
- public:
- CmdCopyDbSaslStart() : Command("copydbsaslstart") { }
-
- virtual bool adminOnly() const {
- return true;
+ const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromHost)));
+
+ BSONElement mechanismElement;
+ Status status = bsonExtractField(cmdObj, saslCommandMechanismFieldName, &mechanismElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool slaveOk() const {
+ BSONElement payloadElement;
+ status = bsonExtractField(cmdObj, saslCommandPayloadFieldName, &payloadElement);
+ if (!status.isOK()) {
+ log() << "Failed to extract payload: " << status;
return false;
}
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- // No auth required
- return Status::OK();
+ auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
+ authConn.reset(cs.connect(errmsg));
+ if (!authConn.get()) {
+ return false;
}
- virtual void help( stringstream &help ) const {
- help << "Initialize a SASL auth session for subsequent copy db request "
- "from secure server\n";
+ BSONObj ret;
+ if (!authConn->runCommand(
+ fromDb, BSON("saslStart" << 1 << mechanismElement << payloadElement), ret)) {
+ authConn.reset();
+ return appendCommandStatus(result, Command::getStatusFromCommandResult(ret));
}
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- const string fromDb = cmdObj.getStringField("fromdb");
-
- string fromHost = cmdObj.getStringField("fromhost");
- if ( fromHost.empty() ) {
- /* copy from self */
- stringstream ss;
- ss << "localhost:" << serverGlobalParams.port;
- fromHost = ss.str();
- }
-
- const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromHost)));
-
- BSONElement mechanismElement;
- Status status = bsonExtractField(cmdObj,
- saslCommandMechanismFieldName,
- &mechanismElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- BSONElement payloadElement;
- status = bsonExtractField(cmdObj, saslCommandPayloadFieldName, &payloadElement);
- if (!status.isOK()) {
- log() << "Failed to extract payload: " << status;
- return false;
- }
-
- auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
- authConn.reset(cs.connect(errmsg));
- if (!authConn.get()) {
- return false;
- }
-
- BSONObj ret;
- if( !authConn->runCommand( fromDb,
- BSON( "saslStart" << 1 <<
- mechanismElement <<
- payloadElement),
- ret ) ) {
- authConn.reset();
- return appendCommandStatus(result,
- Command::getStatusFromCommandResult(ret));
-
- }
-
- result.appendElements( ret );
- return true;
- }
+ result.appendElements(ret);
+ return true;
+ }
- } cmdCopyDBSaslStart;
+} cmdCopyDBSaslStart;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/copydb_start_commands.h b/src/mongo/db/commands/copydb_start_commands.h
index c4c80e1dbb4..61f3313f918 100644
--- a/src/mongo/db/commands/copydb_start_commands.h
+++ b/src/mongo/db/commands/copydb_start_commands.h
@@ -32,10 +32,10 @@
namespace mongo {
- class Client;
+class Client;
- struct CopyDbAuthConnection {
- static std::unique_ptr<DBClientBase>& forClient(Client* client);
- };
+struct CopyDbAuthConnection {
+ static std::unique_ptr<DBClientBase>& forClient(Client* client);
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 7f4e0bf3a65..91838325936 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -44,121 +44,130 @@
namespace mongo {
namespace {
- using std::unique_ptr;
- using std::string;
- using std::stringstream;
-
- /**
- * Implements the MongoD side of the count command.
- */
- class CmdCount : public Command {
- public:
- virtual bool isWriteCommandForConfigServer() const { return false; }
- CmdCount() : Command("count") { }
- virtual bool slaveOk() const {
- // ok on --slave setups
- return repl::getGlobalReplicationCoordinator()->getSettings().slave == repl::SimpleSlave;
+using std::unique_ptr;
+using std::string;
+using std::stringstream;
+
+/**
+ * Implements the MongoD side of the count command.
+ */
+class CmdCount : public Command {
+public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ CmdCount() : Command("count") {}
+ virtual bool slaveOk() const {
+ // ok on --slave setups
+ return repl::getGlobalReplicationCoordinator()->getSettings().slave == repl::SimpleSlave;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual bool maintenanceOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "count objects in collection";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ virtual Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ auto request = CountRequest::parseFromBSON(dbname, cmdObj);
+ if (!request.isOK()) {
+ return request.getStatus();
}
- virtual bool slaveOverrideOk() const { return true; }
- virtual bool maintenanceOk() const { return false; }
- virtual bool adminOnly() const { return false; }
- virtual void help( stringstream& help ) const { help << "count objects in collection"; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+
+ // Acquire the db read lock.
+ AutoGetCollectionForRead ctx(txn, request.getValue().getNs());
+ Collection* collection = ctx.getCollection();
+
+ // Prevent chunks from being cleaned up during yields - this allows us to only check the
+ // version on initial entry into count.
+ RangePreserver preserver(collection);
+
+ PlanExecutor* rawExec;
+ Status getExecStatus = getExecutorCount(txn,
+ collection,
+ request.getValue(),
+ true, // explain
+ PlanExecutor::YIELD_AUTO,
+ &rawExec);
+ if (!getExecStatus.isOK()) {
+ return getExecStatus;
}
- virtual Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const {
-
- auto request = CountRequest::parseFromBSON(dbname, cmdObj);
- if (!request.isOK()) {
- return request.getStatus();
- }
-
- // Acquire the db read lock.
- AutoGetCollectionForRead ctx(txn, request.getValue().getNs());
- Collection* collection = ctx.getCollection();
-
- // Prevent chunks from being cleaned up during yields - this allows us to only check the
- // version on initial entry into count.
- RangePreserver preserver(collection);
-
- PlanExecutor* rawExec;
- Status getExecStatus = getExecutorCount(txn,
- collection,
- request.getValue(),
- true, // explain
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- if (!getExecStatus.isOK()) {
- return getExecStatus;
- }
-
- unique_ptr<PlanExecutor> exec(rawExec);
-
- Explain::explainStages(exec.get(), verbosity, out);
- return Status::OK();
+ unique_ptr<PlanExecutor> exec(rawExec);
+
+ Explain::explainStages(exec.get(), verbosity, out);
+ return Status::OK();
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auto request = CountRequest::parseFromBSON(dbname, cmdObj);
+ if (!request.isOK()) {
+ return appendCommandStatus(result, request.getStatus());
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int, string& errmsg,
- BSONObjBuilder& result) {
-
- auto request = CountRequest::parseFromBSON(dbname, cmdObj);
- if (!request.isOK()) {
- return appendCommandStatus(result, request.getStatus());
- }
-
- AutoGetCollectionForRead ctx(txn, request.getValue().getNs());
- Collection* collection = ctx.getCollection();
-
- // Prevent chunks from being cleaned up during yields - this allows us to only check the
- // version on initial entry into count.
- RangePreserver preserver(collection);
-
- PlanExecutor* rawExec;
- Status getExecStatus = getExecutorCount(txn,
- collection,
- request.getValue(),
- false, // !explain
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- if (!getExecStatus.isOK()) {
- return appendCommandStatus(result, getExecStatus);
- }
-
- unique_ptr<PlanExecutor> exec(rawExec);
-
- // Store the plan summary string in CurOp.
- if (NULL != CurOp::get(txn)) {
- CurOp::get(txn)->debug().planSummary = Explain::getPlanSummary(exec.get());
- }
-
- Status execPlanStatus = exec->executePlan();
- if (!execPlanStatus.isOK()) {
- return appendCommandStatus(result, execPlanStatus);
- }
-
- // Plan is done executing. We just need to pull the count out of the root stage.
- invariant(STAGE_COUNT == exec->getRootStage()->stageType());
- CountStage* countStage = static_cast<CountStage*>(exec->getRootStage());
- const CountStats* countStats =
- static_cast<const CountStats*>(countStage->getSpecificStats());
-
- result.appendNumber("n", countStats->nCounted);
- return true;
+ AutoGetCollectionForRead ctx(txn, request.getValue().getNs());
+ Collection* collection = ctx.getCollection();
+
+ // Prevent chunks from being cleaned up during yields - this allows us to only check the
+ // version on initial entry into count.
+ RangePreserver preserver(collection);
+
+ PlanExecutor* rawExec;
+ Status getExecStatus = getExecutorCount(txn,
+ collection,
+ request.getValue(),
+ false, // !explain
+ PlanExecutor::YIELD_AUTO,
+ &rawExec);
+ if (!getExecStatus.isOK()) {
+ return appendCommandStatus(result, getExecStatus);
}
- } cmdCount;
+ unique_ptr<PlanExecutor> exec(rawExec);
+
+ // Store the plan summary string in CurOp.
+ if (NULL != CurOp::get(txn)) {
+ CurOp::get(txn)->debug().planSummary = Explain::getPlanSummary(exec.get());
+ }
+
+ Status execPlanStatus = exec->executePlan();
+ if (!execPlanStatus.isOK()) {
+ return appendCommandStatus(result, execPlanStatus);
+ }
+
+ // Plan is done executing. We just need to pull the count out of the root stage.
+ invariant(STAGE_COUNT == exec->getRootStage()->stageType());
+ CountStage* countStage = static_cast<CountStage*>(exec->getRootStage());
+ const CountStats* countStats =
+ static_cast<const CountStats*>(countStage->getSpecificStats());
+
+ result.appendNumber("n", countStats->nCounted);
+ return true;
+ }
+
+} cmdCount;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/commands/cpuprofile.cpp b/src/mongo/db/commands/cpuprofile.cpp
index b50dd0233a5..4273666a70f 100644
--- a/src/mongo/db/commands/cpuprofile.cpp
+++ b/src/mongo/db/commands/cpuprofile.cpp
@@ -42,9 +42,9 @@
* The commands defined here, and profiling, are only available when enabled at
* build-time with the "--use-cpu-profiler" argument to scons.
*
- * Example SCons command line:
+ * Example SCons command line:
*
- * scons --release --use-cpu-profiler
+ * scons --release --use-cpu-profiler
*/
#include "gperftools/profiler.h"
@@ -63,102 +63,109 @@
namespace mongo {
- namespace {
-
- /**
- * Common code for the implementation of cpu profiler commands.
- */
- class CpuProfilerCommand : public Command {
- public:
- CpuProfilerCommand( char const *name ) : Command( name ) {}
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool localHostOnlyIfNoAuth( const BSONObj& cmdObj ) { return true; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::cpuProfiler);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
-
- // This is an abuse of the global dbmutex. We only really need to
- // ensure that only one cpuprofiler command runs at once; it would
- // be fine for it to run concurrently with other operations.
- virtual bool isWriteCommandForConfigServer() const { return true; }
- };
-
- /**
- * Class providing implementation of the _cpuProfilerStart command.
- */
- class CpuProfilerStartCommand : public CpuProfilerCommand {
- public:
- CpuProfilerStartCommand() : CpuProfilerCommand( commandName ) {}
-
- virtual bool run( OperationContext* txn,
- std::string const &db,
- BSONObj &cmdObj,
- int options,
- std::string &errmsg,
- BSONObjBuilder &result);
-
- static char const *const commandName;
- } cpuProfilerStartCommandInstance;
-
- /**
- * Class providing implementation of the _cpuProfilerStop command.
- */
- class CpuProfilerStopCommand : public CpuProfilerCommand {
- public:
- CpuProfilerStopCommand() : CpuProfilerCommand( commandName ) {}
-
- virtual bool run( OperationContext* txn,
- std::string const &db,
- BSONObj &cmdObj,
- int options,
- std::string &errmsg,
- BSONObjBuilder &result);
-
- static char const *const commandName;
- } cpuProfilerStopCommandInstance;
-
- char const *const CpuProfilerStartCommand::commandName = "_cpuProfilerStart";
- char const *const CpuProfilerStopCommand::commandName = "_cpuProfilerStop";
-
- bool CpuProfilerStartCommand::run( OperationContext* txn,
- std::string const &db,
- BSONObj &cmdObj,
- int options,
- std::string &errmsg,
- BSONObjBuilder &result) {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), db, MODE_X);
- // The lock here is just to prevent concurrency, nothing will write.
- OldClientContext ctx(txn, db);
-
- std::string profileFilename = cmdObj[commandName]["profileFilename"].String();
- if ( ! ::ProfilerStart( profileFilename.c_str() ) ) {
- errmsg = "Failed to start profiler";
- return false;
- }
- return true;
- }
-
- bool CpuProfilerStopCommand::run( OperationContext* txn,
- std::string const &db,
- BSONObj &cmdObj,
- int options,
- std::string &errmsg,
- BSONObjBuilder &result) {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), db, MODE_X);
- OldClientContext ctx(txn, db);
-
- ::ProfilerStop();
- return true;
- }
-
- } // namespace
+namespace {
-} // namespace mongo
+/**
+ * Common code for the implementation of cpu profiler commands.
+ */
+class CpuProfilerCommand : public Command {
+public:
+ CpuProfilerCommand(char const* name) : Command(name) {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::cpuProfiler);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+
+ // This is an abuse of the global dbmutex. We only really need to
+ // ensure that only one cpuprofiler command runs at once; it would
+ // be fine for it to run concurrently with other operations.
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+};
+
+/**
+ * Class providing implementation of the _cpuProfilerStart command.
+ */
+class CpuProfilerStartCommand : public CpuProfilerCommand {
+public:
+ CpuProfilerStartCommand() : CpuProfilerCommand(commandName) {}
+ virtual bool run(OperationContext* txn,
+ std::string const& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ static char const* const commandName;
+} cpuProfilerStartCommandInstance;
+
+/**
+ * Class providing implementation of the _cpuProfilerStop command.
+ */
+class CpuProfilerStopCommand : public CpuProfilerCommand {
+public:
+ CpuProfilerStopCommand() : CpuProfilerCommand(commandName) {}
+
+ virtual bool run(OperationContext* txn,
+ std::string const& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ static char const* const commandName;
+} cpuProfilerStopCommandInstance;
+
+char const* const CpuProfilerStartCommand::commandName = "_cpuProfilerStart";
+char const* const CpuProfilerStopCommand::commandName = "_cpuProfilerStop";
+
+bool CpuProfilerStartCommand::run(OperationContext* txn,
+ std::string const& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), db, MODE_X);
+ // The lock here is just to prevent concurrency, nothing will write.
+ OldClientContext ctx(txn, db);
+
+ std::string profileFilename = cmdObj[commandName]["profileFilename"].String();
+ if (!::ProfilerStart(profileFilename.c_str())) {
+ errmsg = "Failed to start profiler";
+ return false;
+ }
+ return true;
+}
+
+bool CpuProfilerStopCommand::run(OperationContext* txn,
+ std::string const& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), db, MODE_X);
+ OldClientContext ctx(txn, db);
+
+ ::ProfilerStop();
+ return true;
+}
+
+} // namespace
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 83dc5e72177..c18c33b7ce4 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -52,251 +52,261 @@
namespace mongo {
- using std::string;
-
- /**
- * { createIndexes : "bar", indexes : [ { ns : "test.bar", key : { x : 1 }, name: "x_1" } ] }
- */
- class CmdCreateIndex : public Command {
- public:
- CmdCreateIndex() : Command( "createIndexes" ){}
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return false; } // TODO: this could be made true...
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::createIndex);
- Privilege p(parseResourcePattern(dbname, cmdObj), actions);
- if (AuthorizationSession::get(client)->isAuthorizedForPrivilege(p))
- return Status::OK();
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
-
+using std::string;
- BSONObj _addNsToSpec( const NamespaceString& ns, const BSONObj& obj ) {
- BSONObjBuilder b;
- b.append( "ns", ns );
- b.appendElements( obj );
- return b.obj();
+/**
+ * { createIndexes : "bar", indexes : [ { ns : "test.bar", key : { x : 1 }, name: "x_1" } ] }
+ */
+class CmdCreateIndex : public Command {
+public:
+ CmdCreateIndex() : Command("createIndexes") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ } // TODO: this could be made true...
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::createIndex);
+ Privilege p(parseResourcePattern(dbname, cmdObj), actions);
+ if (AuthorizationSession::get(client)->isAuthorizedForPrivilege(p))
+ return Status::OK();
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+
+ BSONObj _addNsToSpec(const NamespaceString& ns, const BSONObj& obj) {
+ BSONObjBuilder b;
+ b.append("ns", ns);
+ b.appendElements(obj);
+ return b.obj();
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ // --- parse
+
+ NamespaceString ns(dbname, cmdObj[name].String());
+ Status status = userAllowedWriteNS(ns);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
+
+ if (cmdObj["indexes"].type() != Array) {
+ errmsg = "indexes has to be an array";
+ result.append("cmdObj", cmdObj);
+ return false;
}
- virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int options,
- string& errmsg, BSONObjBuilder& result) {
- // --- parse
-
- NamespaceString ns( dbname, cmdObj[name].String() );
- Status status = userAllowedWriteNS( ns );
- if ( !status.isOK() )
- return appendCommandStatus( result, status );
-
- if ( cmdObj["indexes"].type() != Array ) {
- errmsg = "indexes has to be an array";
- result.append( "cmdObj", cmdObj );
- return false;
+ std::vector<BSONObj> specs;
+ {
+ BSONObjIterator i(cmdObj["indexes"].Obj());
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() != Object) {
+ errmsg = "everything in indexes has to be an Object";
+ result.append("cmdObj", cmdObj);
+ return false;
+ }
+ specs.push_back(e.Obj());
}
+ }
- std::vector<BSONObj> specs;
- {
- BSONObjIterator i( cmdObj["indexes"].Obj() );
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( e.type() != Object ) {
- errmsg = "everything in indexes has to be an Object";
- result.append( "cmdObj", cmdObj );
- return false;
- }
- specs.push_back( e.Obj() );
- }
+ if (specs.size() == 0) {
+ errmsg = "no indexes to add";
+ return false;
+ }
+
+ // check specs
+ for (size_t i = 0; i < specs.size(); i++) {
+ BSONObj spec = specs[i];
+ if (spec["ns"].eoo()) {
+ spec = _addNsToSpec(ns, spec);
+ specs[i] = spec;
}
- if ( specs.size() == 0 ) {
- errmsg = "no indexes to add";
+ if (spec["ns"].type() != String) {
+ errmsg = "spec has no ns";
+ result.append("spec", spec);
return false;
}
-
- // check specs
- for ( size_t i = 0; i < specs.size(); i++ ) {
- BSONObj spec = specs[i];
- if ( spec["ns"].eoo() ) {
- spec = _addNsToSpec( ns, spec );
- specs[i] = spec;
- }
-
- if ( spec["ns"].type() != String ) {
- errmsg = "spec has no ns";
- result.append( "spec", spec );
- return false;
- }
- if ( ns != spec["ns"].String() ) {
- errmsg = "namespace mismatch";
- result.append( "spec", spec );
- return false;
- }
+ if (ns != spec["ns"].String()) {
+ errmsg = "namespace mismatch";
+ result.append("spec", spec);
+ return false;
}
+ }
- // now we know we have to create index(es)
- // Note: createIndexes command does not currently respect shard versioning.
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), ns.db(), MODE_X);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while creating indexes in " << ns.ns()));
- }
+ // now we know we have to create index(es)
+ // Note: createIndexes command does not currently respect shard versioning.
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbLock(txn->lockState(), ns.db(), MODE_X);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while creating indexes in " << ns.ns()));
+ }
- Database* db = dbHolder().get(txn, ns.db());
- if (!db) {
- db = dbHolder().openDb(txn, ns.db());
- }
+ Database* db = dbHolder().get(txn, ns.db());
+ if (!db) {
+ db = dbHolder().openDb(txn, ns.db());
+ }
- Collection* collection = db->getCollection( ns.ns() );
- result.appendBool( "createdCollectionAutomatically", collection == NULL );
- if ( !collection ) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
- collection = db->createCollection(txn, ns.ns(), CollectionOptions());
- invariant( collection );
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
+ Collection* collection = db->getCollection(ns.ns());
+ result.appendBool("createdCollectionAutomatically", collection == NULL);
+ if (!collection) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ collection = db->createCollection(txn, ns.ns(), CollectionOptions());
+ invariant(collection);
+ wunit.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
+ }
- const int numIndexesBefore = collection->getIndexCatalog()->numIndexesTotal(txn);
- result.append("numIndexesBefore", numIndexesBefore);
+ const int numIndexesBefore = collection->getIndexCatalog()->numIndexesTotal(txn);
+ result.append("numIndexesBefore", numIndexesBefore);
- MultiIndexBlock indexer(txn, collection);
- indexer.allowBackgroundBuilding();
- indexer.allowInterruption();
+ MultiIndexBlock indexer(txn, collection);
+ indexer.allowBackgroundBuilding();
+ indexer.allowInterruption();
- const size_t origSpecsSize = specs.size();
- indexer.removeExistingIndexes(&specs);
+ const size_t origSpecsSize = specs.size();
+ indexer.removeExistingIndexes(&specs);
- if (specs.size() == 0) {
- result.append("numIndexesAfter", numIndexesBefore);
- result.append( "note", "all indexes already exist" );
- return true;
- }
+ if (specs.size() == 0) {
+ result.append("numIndexesAfter", numIndexesBefore);
+ result.append("note", "all indexes already exist");
+ return true;
+ }
- if (specs.size() != origSpecsSize) {
- result.append( "note", "index already exists" );
- }
+ if (specs.size() != origSpecsSize) {
+ result.append("note", "index already exists");
+ }
- for ( size_t i = 0; i < specs.size(); i++ ) {
- const BSONObj& spec = specs[i];
- if ( spec["unique"].trueValue() ) {
- status = checkUniqueIndexConstraints(txn, ns.ns(), spec["key"].Obj());
+ for (size_t i = 0; i < specs.size(); i++) {
+ const BSONObj& spec = specs[i];
+ if (spec["unique"].trueValue()) {
+ status = checkUniqueIndexConstraints(txn, ns.ns(), spec["key"].Obj());
- if ( !status.isOK() ) {
- appendCommandStatus( result, status );
- return false;
- }
+ if (!status.isOK()) {
+ appendCommandStatus(result, status);
+ return false;
}
}
+ }
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- uassertStatusOK(indexer.init(specs));
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ uassertStatusOK(indexer.init(specs));
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
- // If we're a background index, replace exclusive db lock with an intent lock, so that
- // other readers and writers can proceed during this phase.
- if (indexer.getBuildInBackground()) {
- txn->recoveryUnit()->abandonSnapshot();
- dbLock.relockWithMode(MODE_IX);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while creating background indexes in " << ns.ns()));
- }
+ // If we're a background index, replace exclusive db lock with an intent lock, so that
+ // other readers and writers can proceed during this phase.
+ if (indexer.getBuildInBackground()) {
+ txn->recoveryUnit()->abandonSnapshot();
+ dbLock.relockWithMode(MODE_IX);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while creating background indexes in "
+ << ns.ns()));
}
+ }
- try {
- Lock::CollectionLock colLock(txn->lockState(), ns.ns(), MODE_IX);
- uassertStatusOK(indexer.insertAllDocumentsInCollection());
- }
- catch (const DBException& e) {
- invariant(e.getCode() != ErrorCodes::WriteConflict);
- // Must have exclusive DB lock before we clean up the index build via the
- // destructor of 'indexer'.
- if (indexer.getBuildInBackground()) {
- try {
- // This function cannot throw today, but we will preemptively prepare for
- // that day, to avoid data corruption due to lack of index cleanup.
- txn->recoveryUnit()->abandonSnapshot();
- dbLock.relockWithMode(MODE_X);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while creating background indexes in "
- << ns.ns() << ": cleaning up index build failure due to "
- << e.toString()));
- }
- }
- catch (...) {
- std::terminate();
+ try {
+ Lock::CollectionLock colLock(txn->lockState(), ns.ns(), MODE_IX);
+ uassertStatusOK(indexer.insertAllDocumentsInCollection());
+ } catch (const DBException& e) {
+ invariant(e.getCode() != ErrorCodes::WriteConflict);
+ // Must have exclusive DB lock before we clean up the index build via the
+ // destructor of 'indexer'.
+ if (indexer.getBuildInBackground()) {
+ try {
+ // This function cannot throw today, but we will preemptively prepare for
+ // that day, to avoid data corruption due to lack of index cleanup.
+ txn->recoveryUnit()->abandonSnapshot();
+ dbLock.relockWithMode(MODE_X);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NotMaster,
+ str::stream()
+ << "Not primary while creating background indexes in "
+ << ns.ns() << ": cleaning up index build failure due to "
+ << e.toString()));
}
+ } catch (...) {
+ std::terminate();
}
- throw;
}
- // Need to return db lock back to exclusive, to complete the index build.
- if (indexer.getBuildInBackground()) {
- txn->recoveryUnit()->abandonSnapshot();
- dbLock.relockWithMode(MODE_X);
- uassert(ErrorCodes::NotMaster,
- str::stream() << "Not primary while completing index build in " << dbname,
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns));
-
- Database* db = dbHolder().get(txn, ns.db());
- uassert(28551, "database dropped during index build", db);
- uassert(28552, "collection dropped during index build",
- db->getCollection(ns.ns()));
- }
-
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
+ throw;
+ }
+ // Need to return db lock back to exclusive, to complete the index build.
+ if (indexer.getBuildInBackground()) {
+ txn->recoveryUnit()->abandonSnapshot();
+ dbLock.relockWithMode(MODE_X);
+ uassert(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while completing index build in " << dbname,
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns));
- indexer.commit();
+ Database* db = dbHolder().get(txn, ns.db());
+ uassert(28551, "database dropped during index build", db);
+ uassert(28552, "collection dropped during index build", db->getCollection(ns.ns()));
+ }
- for ( size_t i = 0; i < specs.size(); i++ ) {
- std::string systemIndexes = ns.getSystemIndexesCollection();
- getGlobalServiceContext()->getOpObserver()->onCreateIndex(txn,
- systemIndexes,
- specs[i]);
- }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
+ indexer.commit();
- result.append( "numIndexesAfter", collection->getIndexCatalog()->numIndexesTotal(txn) );
+ for (size_t i = 0; i < specs.size(); i++) {
+ std::string systemIndexes = ns.getSystemIndexesCollection();
+ getGlobalServiceContext()->getOpObserver()->onCreateIndex(
+ txn, systemIndexes, specs[i]);
+ }
- return true;
+ wunit.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
- private:
- static Status checkUniqueIndexConstraints(OperationContext* txn,
- StringData ns,
- const BSONObj& newIdxKey) {
+ result.append("numIndexesAfter", collection->getIndexCatalog()->numIndexesTotal(txn));
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ return true;
+ }
- if ( shardingState.enabled() ) {
- CollectionMetadataPtr metadata(
- shardingState.getCollectionMetadata( ns.toString() ));
+private:
+ static Status checkUniqueIndexConstraints(OperationContext* txn,
+ StringData ns,
+ const BSONObj& newIdxKey) {
+ invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- if ( metadata ) {
- ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
- if (!shardKeyPattern.isUniqueIndexCompatible(newIdxKey)) {
- return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "cannot create unique index over " << newIdxKey
- << " with shard key pattern "
- << shardKeyPattern.toBSON());
- }
+ if (shardingState.enabled()) {
+ CollectionMetadataPtr metadata(shardingState.getCollectionMetadata(ns.toString()));
+
+ if (metadata) {
+ ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
+ if (!shardKeyPattern.isUniqueIndexCompatible(newIdxKey)) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "cannot create unique index over " << newIdxKey
+ << " with shard key pattern "
+ << shardKeyPattern.toBSON());
}
}
-
- return Status::OK();
}
- } cmdCreateIndex;
+ return Status::OK();
+ }
+} cmdCreateIndex;
}
diff --git a/src/mongo/db/commands/current_op.cpp b/src/mongo/db/commands/current_op.cpp
index d28cb91874e..5107a43d5c5 100644
--- a/src/mongo/db/commands/current_op.cpp
+++ b/src/mongo/db/commands/current_op.cpp
@@ -48,114 +48,115 @@
namespace mongo {
- class CurrentOpCommand : public Command {
- public:
-
- CurrentOpCommand() : Command("currentOp") {}
+class CurrentOpCommand : public Command {
+public:
+ CurrentOpCommand() : Command("currentOp") {}
+
+ bool isWriteCommandForConfigServer() const final {
+ return false;
+ }
+
+ bool slaveOk() const final {
+ return true;
+ }
+
+ bool adminOnly() const final {
+ return true;
+ }
+
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) final {
+ bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::inprog);
+ return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+ bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) final {
+ const bool includeAll = cmdObj["$all"].trueValue();
+
+ // Filter the output
+ BSONObj filter;
+ {
+ BSONObjBuilder b;
+ BSONObjIterator i(cmdObj);
+ invariant(i.more());
+ i.next(); // skip {currentOp: 1} which is required to be the first element
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (str::equals("$all", e.fieldName())) {
+ continue;
+ }
- bool isWriteCommandForConfigServer() const final { return false; }
+ b.append(e);
+ }
+ filter = b.obj();
+ }
- bool slaveOk() const final { return true; }
+ const WhereCallbackReal whereCallback(txn, db);
+ const Matcher matcher(filter, whereCallback);
- bool adminOnly() const final { return true; }
+ BSONArrayBuilder inprogBuilder(result.subarrayStart("inprog"));
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) final {
+ for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
+ Client* client = cursor.next();) {
+ invariant(client);
- bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(),
- ActionType::inprog);
- return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
+ stdx::lock_guard<Client> lk(*client);
+ const OperationContext* opCtx = client->getOperationContext();
- bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) final {
-
- const bool includeAll = cmdObj["$all"].trueValue();
-
- // Filter the output
- BSONObj filter;
- {
- BSONObjBuilder b;
- BSONObjIterator i(cmdObj);
- invariant(i.more());
- i.next(); // skip {currentOp: 1} which is required to be the first element
- while (i.more()) {
- BSONElement e = i.next();
- if (str::equals("$all", e.fieldName())) {
- continue;
- }
-
- b.append(e);
- }
- filter = b.obj();
+ if (!includeAll) {
+ // Skip over inactive connections.
+ if (!opCtx)
+ continue;
}
- const WhereCallbackReal whereCallback(txn, db);
- const Matcher matcher(filter, whereCallback);
-
- BSONArrayBuilder inprogBuilder(result.subarrayStart("inprog"));
-
- for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
- Client* client = cursor.next();) {
+ BSONObjBuilder infoBuilder;
- invariant(client);
+ // The client information
+ client->reportState(infoBuilder);
- stdx::lock_guard<Client> lk(*client);
- const OperationContext* opCtx = client->getOperationContext();
-
- if (!includeAll) {
- // Skip over inactive connections.
- if (!opCtx)
- continue;
+ // Operation context specific information
+ infoBuilder.appendBool("active", static_cast<bool>(opCtx));
+ if (opCtx) {
+ infoBuilder.append("opid", opCtx->getOpID());
+ if (opCtx->isKillPending()) {
+ infoBuilder.append("killPending", true);
}
- BSONObjBuilder infoBuilder;
-
- // The client information
- client->reportState(infoBuilder);
+ CurOp::get(opCtx)->reportState(&infoBuilder);
- // Operation context specific information
- infoBuilder.appendBool("active", static_cast<bool>(opCtx));
- if (opCtx) {
- infoBuilder.append("opid", opCtx->getOpID());
- if (opCtx->isKillPending()) {
- infoBuilder.append("killPending", true);
- }
-
- CurOp::get(opCtx)->reportState(&infoBuilder);
-
- // LockState
- Locker::LockerInfo lockerInfo;
- opCtx->lockState()->getLockerInfo(&lockerInfo);
- fillLockerInfo(lockerInfo, infoBuilder);
- }
+ // LockState
+ Locker::LockerInfo lockerInfo;
+ opCtx->lockState()->getLockerInfo(&lockerInfo);
+ fillLockerInfo(lockerInfo, infoBuilder);
+ }
- infoBuilder.done();
+ infoBuilder.done();
- const BSONObj info = infoBuilder.obj();
+ const BSONObj info = infoBuilder.obj();
- if (includeAll || matcher.matches(info)) {
- inprogBuilder.append(info);
- }
+ if (includeAll || matcher.matches(info)) {
+ inprogBuilder.append(info);
}
+ }
- inprogBuilder.done();
-
- if (lockedForWriting()) {
- result.append("fsyncLock", true);
- result.append("info",
- "use db.fsyncUnlock() to terminate the fsync write/snapshot lock");
- }
+ inprogBuilder.done();
- return true;
+ if (lockedForWriting()) {
+ result.append("fsyncLock", true);
+ result.append("info",
+ "use db.fsyncUnlock() to terminate the fsync write/snapshot lock");
}
- } currentOpCommand;
+ return true;
+ }
+
+} currentOpCommand;
} // namespace mongo
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 14a4ab955d5..dd9db449300 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -46,210 +46,197 @@
namespace mongo {
- using std::endl;
- using std::list;
- using std::set;
- using std::string;
- using std::unique_ptr;
- using std::vector;
+using std::endl;
+using std::list;
+using std::set;
+using std::string;
+using std::unique_ptr;
+using std::vector;
- DBHashCmd dbhashCmd;
+DBHashCmd dbhashCmd;
- void logOpForDbHash(OperationContext* txn, const char* ns) {
- dbhashCmd.wipeCacheForCollection(txn, ns);
- }
+void logOpForDbHash(OperationContext* txn, const char* ns) {
+ dbhashCmd.wipeCacheForCollection(txn, ns);
+}
- // ----
+// ----
- DBHashCmd::DBHashCmd() : Command("dbHash", false, "dbhash") {
- }
-
- void DBHashCmd::addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::dbHash);
- out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
- }
+DBHashCmd::DBHashCmd() : Command("dbHash", false, "dbhash") {}
- std::string DBHashCmd::hashCollection(OperationContext* opCtx,
- Database* db,
- const std::string& fullCollectionName,
- bool* fromCache) {
- stdx::unique_lock<stdx::mutex> cachedHashedLock(_cachedHashedMutex, stdx::defer_lock);
-
- if ( isCachable( fullCollectionName ) ) {
- cachedHashedLock.lock();
- string hash = _cachedHashed[fullCollectionName];
- if ( hash.size() > 0 ) {
- *fromCache = true;
- return hash;
- }
- }
+void DBHashCmd::addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dbHash);
+ out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
+}
- *fromCache = false;
- Collection* collection = db->getCollection( fullCollectionName );
- if ( !collection )
- return "";
-
- IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex( opCtx );
-
- unique_ptr<PlanExecutor> exec;
- if ( desc ) {
- exec.reset(InternalPlanner::indexScan(opCtx,
- collection,
- desc,
- BSONObj(),
- BSONObj(),
- false,
- InternalPlanner::FORWARD,
- InternalPlanner::IXSCAN_FETCH));
- }
- else if ( collection->isCapped() ) {
- exec.reset(InternalPlanner::collectionScan(opCtx,
- fullCollectionName,
- collection));
- }
- else {
- log() << "can't find _id index for: " << fullCollectionName << endl;
- return "no _id _index";
+std::string DBHashCmd::hashCollection(OperationContext* opCtx,
+ Database* db,
+ const std::string& fullCollectionName,
+ bool* fromCache) {
+ stdx::unique_lock<stdx::mutex> cachedHashedLock(_cachedHashedMutex, stdx::defer_lock);
+
+ if (isCachable(fullCollectionName)) {
+ cachedHashedLock.lock();
+ string hash = _cachedHashed[fullCollectionName];
+ if (hash.size() > 0) {
+ *fromCache = true;
+ return hash;
}
+ }
- md5_state_t st;
- md5_init(&st);
+ *fromCache = false;
+ Collection* collection = db->getCollection(fullCollectionName);
+ if (!collection)
+ return "";
+
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(opCtx);
+
+ unique_ptr<PlanExecutor> exec;
+ if (desc) {
+ exec.reset(InternalPlanner::indexScan(opCtx,
+ collection,
+ desc,
+ BSONObj(),
+ BSONObj(),
+ false,
+ InternalPlanner::FORWARD,
+ InternalPlanner::IXSCAN_FETCH));
+ } else if (collection->isCapped()) {
+ exec.reset(InternalPlanner::collectionScan(opCtx, fullCollectionName, collection));
+ } else {
+ log() << "can't find _id index for: " << fullCollectionName << endl;
+ return "no _id _index";
+ }
- long long n = 0;
- PlanExecutor::ExecState state;
- BSONObj c;
- verify(NULL != exec.get());
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) {
- md5_append( &st , (const md5_byte_t*)c.objdata() , c.objsize() );
- n++;
- }
- if (PlanExecutor::IS_EOF != state) {
- warning() << "error while hashing, db dropped? ns=" << fullCollectionName << endl;
- }
- md5digest d;
- md5_finish(&st, d);
- string hash = digestToString( d );
+ md5_state_t st;
+ md5_init(&st);
- if (cachedHashedLock.owns_lock()) {
- _cachedHashed[fullCollectionName] = hash;
- }
+ long long n = 0;
+ PlanExecutor::ExecState state;
+ BSONObj c;
+ verify(NULL != exec.get());
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) {
+ md5_append(&st, (const md5_byte_t*)c.objdata(), c.objsize());
+ n++;
+ }
+ if (PlanExecutor::IS_EOF != state) {
+ warning() << "error while hashing, db dropped? ns=" << fullCollectionName << endl;
+ }
+ md5digest d;
+ md5_finish(&st, d);
+ string hash = digestToString(d);
- return hash;
+ if (cachedHashedLock.owns_lock()) {
+ _cachedHashed[fullCollectionName] = hash;
}
- bool DBHashCmd::run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Timer timer;
-
- set<string> desiredCollections;
- if ( cmdObj["collections"].type() == Array ) {
- BSONObjIterator i( cmdObj["collections"].Obj() );
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( e.type() != String ) {
- errmsg = "collections entries have to be strings";
- return false;
- }
- desiredCollections.insert( e.String() );
- }
- }
+ return hash;
+}
- list<string> colls;
- const string ns = parseNs(dbname, cmdObj);
-
- // We lock the entire database in S-mode in order to ensure that the contents will not
- // change for the snapshot.
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, ns, MODE_S);
- Database* db = autoDb.getDb();
- if (db) {
- db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls);
- colls.sort();
+bool DBHashCmd::run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Timer timer;
+
+ set<string> desiredCollections;
+ if (cmdObj["collections"].type() == Array) {
+ BSONObjIterator i(cmdObj["collections"].Obj());
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() != String) {
+ errmsg = "collections entries have to be strings";
+ return false;
+ }
+ desiredCollections.insert(e.String());
}
+ }
- result.appendNumber( "numCollections" , (long long)colls.size() );
- result.append( "host" , prettyHostName() );
+ list<string> colls;
+ const string ns = parseNs(dbname, cmdObj);
+
+ // We lock the entire database in S-mode in order to ensure that the contents will not
+ // change for the snapshot.
+ ScopedTransaction scopedXact(txn, MODE_IS);
+ AutoGetDb autoDb(txn, ns, MODE_S);
+ Database* db = autoDb.getDb();
+ if (db) {
+ db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls);
+ colls.sort();
+ }
- md5_state_t globalState;
- md5_init(&globalState);
+ result.appendNumber("numCollections", (long long)colls.size());
+ result.append("host", prettyHostName());
- vector<string> cached;
+ md5_state_t globalState;
+ md5_init(&globalState);
- BSONObjBuilder bb( result.subobjStart( "collections" ) );
- for ( list<string>::iterator i=colls.begin(); i != colls.end(); i++ ) {
- string fullCollectionName = *i;
- if ( fullCollectionName.size() -1 <= dbname.size() ) {
- errmsg = str::stream() << "weird fullCollectionName [" << fullCollectionName << "]";
- return false;
- }
- string shortCollectionName = fullCollectionName.substr( dbname.size() + 1 );
+ vector<string> cached;
- if ( shortCollectionName.find( "system." ) == 0 )
- continue;
+ BSONObjBuilder bb(result.subobjStart("collections"));
+ for (list<string>::iterator i = colls.begin(); i != colls.end(); i++) {
+ string fullCollectionName = *i;
+ if (fullCollectionName.size() - 1 <= dbname.size()) {
+ errmsg = str::stream() << "weird fullCollectionName [" << fullCollectionName << "]";
+ return false;
+ }
+ string shortCollectionName = fullCollectionName.substr(dbname.size() + 1);
- if ( desiredCollections.size() > 0 &&
- desiredCollections.count( shortCollectionName ) == 0 )
- continue;
+ if (shortCollectionName.find("system.") == 0)
+ continue;
- bool fromCache = false;
- string hash = hashCollection( txn, db, fullCollectionName, &fromCache );
+ if (desiredCollections.size() > 0 && desiredCollections.count(shortCollectionName) == 0)
+ continue;
- bb.append( shortCollectionName, hash );
+ bool fromCache = false;
+ string hash = hashCollection(txn, db, fullCollectionName, &fromCache);
- md5_append( &globalState , (const md5_byte_t*)hash.c_str() , hash.size() );
- if ( fromCache )
- cached.push_back( fullCollectionName );
- }
- bb.done();
+ bb.append(shortCollectionName, hash);
- md5digest d;
- md5_finish(&globalState, d);
- string hash = digestToString( d );
+ md5_append(&globalState, (const md5_byte_t*)hash.c_str(), hash.size());
+ if (fromCache)
+ cached.push_back(fullCollectionName);
+ }
+ bb.done();
- result.append( "md5" , hash );
- result.appendNumber( "timeMillis", timer.millis() );
+ md5digest d;
+ md5_finish(&globalState, d);
+ string hash = digestToString(d);
- result.append( "fromCache", cached );
+ result.append("md5", hash);
+ result.appendNumber("timeMillis", timer.millis());
- return 1;
- }
+ result.append("fromCache", cached);
- class DBHashCmd::DBHashLogOpHandler : public RecoveryUnit::Change {
- public:
- DBHashLogOpHandler(DBHashCmd* dCmd,
- StringData ns):
- _dCmd(dCmd),
- _ns(ns.toString()) {
+ return 1;
+}
- }
- void commit() {
- stdx::lock_guard<stdx::mutex> lk( _dCmd->_cachedHashedMutex );
- _dCmd->_cachedHashed.erase(_ns);
- }
- void rollback() { }
-
- private:
- DBHashCmd *_dCmd;
- const std::string _ns;
- };
-
- void DBHashCmd::wipeCacheForCollection(OperationContext* txn,
- StringData ns) {
- if ( !isCachable( ns ) )
- return;
- txn->recoveryUnit()->registerChange(new DBHashLogOpHandler(this, ns));
+class DBHashCmd::DBHashLogOpHandler : public RecoveryUnit::Change {
+public:
+ DBHashLogOpHandler(DBHashCmd* dCmd, StringData ns) : _dCmd(dCmd), _ns(ns.toString()) {}
+ void commit() {
+ stdx::lock_guard<stdx::mutex> lk(_dCmd->_cachedHashedMutex);
+ _dCmd->_cachedHashed.erase(_ns);
}
+ void rollback() {}
- bool DBHashCmd::isCachable( StringData ns ) const {
- return ns.startsWith( "config." );
- }
+private:
+ DBHashCmd* _dCmd;
+ const std::string _ns;
+};
+
+void DBHashCmd::wipeCacheForCollection(OperationContext* txn, StringData ns) {
+ if (!isCachable(ns))
+ return;
+ txn->recoveryUnit()->registerChange(new DBHashLogOpHandler(this, ns));
+}
+bool DBHashCmd::isCachable(StringData ns) const {
+ return ns.startsWith("config.");
+}
}
diff --git a/src/mongo/db/commands/dbhash.h b/src/mongo/db/commands/dbhash.h
index aa9a396b080..8b566f98327 100644
--- a/src/mongo/db/commands/dbhash.h
+++ b/src/mongo/db/commands/dbhash.h
@@ -35,41 +35,45 @@
namespace mongo {
- void logOpForDbHash( OperationContext* txn, const char* ns );
-
- class DBHashCmd : public Command {
- public:
- DBHashCmd();
-
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out);
-
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int,
- std::string& errmsg,
- BSONObjBuilder& result);
-
- void wipeCacheForCollection(OperationContext* txn, StringData ns);
-
- private:
-
- /**
- * RecoveryUnit::Change subclass used to commit work for dbhash logOp listener
- */
- class DBHashLogOpHandler;
-
- bool isCachable( StringData ns ) const;
-
- std::string hashCollection( OperationContext* opCtx, Database* db, const std::string& fullCollectionName, bool* fromCache );
-
- std::map<std::string,std::string> _cachedHashed;
- stdx::mutex _cachedHashedMutex;
-
- };
-
+void logOpForDbHash(OperationContext* txn, const char* ns);
+
+class DBHashCmd : public Command {
+public:
+ DBHashCmd();
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out);
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ void wipeCacheForCollection(OperationContext* txn, StringData ns);
+
+private:
+ /**
+ * RecoveryUnit::Change subclass used to commit work for dbhash logOp listener
+ */
+ class DBHashLogOpHandler;
+
+ bool isCachable(StringData ns) const;
+
+ std::string hashCollection(OperationContext* opCtx,
+ Database* db,
+ const std::string& fullCollectionName,
+ bool* fromCache);
+
+ std::map<std::string, std::string> _cachedHashed;
+ stdx::mutex _cachedHashedMutex;
+};
}
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index ceae947fc52..2c281140c63 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -47,137 +47,142 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::stringstream;
-
- class DistinctCommand : public Command {
- public:
- DistinctCommand() : Command("distinct") {}
-
- virtual bool slaveOk() const { return false; }
- virtual bool slaveOverrideOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+using std::unique_ptr;
+using std::string;
+using std::stringstream;
+
+class DistinctCommand : public Command {
+public:
+ DistinctCommand() : Command("distinct") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Timer t;
+
+ // ensure that the key is a string
+ uassert(18510,
+ mongoutils::str::stream() << "The first argument to the distinct command "
+ << "must be a string but was a "
+ << typeName(cmdObj["key"].type()),
+ cmdObj["key"].type() == mongo::String);
+
+ // ensure that the where clause is a document
+ if (cmdObj["query"].isNull() == false && cmdObj["query"].eoo() == false) {
+ uassert(18511,
+ mongoutils::str::stream() << "The query for the distinct command must be a "
+ << "document but was a "
+ << typeName(cmdObj["query"].type()),
+ cmdObj["query"].type() == mongo::Object);
}
- virtual void help( stringstream &help ) const {
- help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
- }
+ string key = cmdObj["key"].valuestrsafe();
+ BSONObj keyPattern = BSON(key << 1);
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- Timer t;
-
- // ensure that the key is a string
- uassert(18510,
- mongoutils::str::stream() << "The first argument to the distinct command " <<
- "must be a string but was a " << typeName(cmdObj["key"].type()),
- cmdObj["key"].type() == mongo::String);
-
- // ensure that the where clause is a document
- if( cmdObj["query"].isNull() == false && cmdObj["query"].eoo() == false ){
- uassert(18511,
- mongoutils::str::stream() << "The query for the distinct command must be a " <<
- "document but was a " << typeName(cmdObj["query"].type()),
- cmdObj["query"].type() == mongo::Object);
- }
+ BSONObj query = getQuery(cmdObj);
- string key = cmdObj["key"].valuestrsafe();
- BSONObj keyPattern = BSON( key << 1 );
+ int bufSize = BSONObjMaxUserSize - 4096;
+ BufBuilder bb(bufSize);
+ char* start = bb.buf();
- BSONObj query = getQuery( cmdObj );
+ BSONArrayBuilder arr(bb);
+ BSONElementSet values;
- int bufSize = BSONObjMaxUserSize - 4096;
- BufBuilder bb( bufSize );
- char * start = bb.buf();
+ const string ns = parseNs(dbname, cmdObj);
+ AutoGetCollectionForRead ctx(txn, ns);
- BSONArrayBuilder arr( bb );
- BSONElementSet values;
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ result.appendArray("values", BSONObj());
+ result.append("stats", BSON("n" << 0 << "nscanned" << 0 << "nscannedObjects" << 0));
+ return true;
+ }
- const string ns = parseNs(dbname, cmdObj);
- AutoGetCollectionForRead ctx(txn, ns);
+ PlanExecutor* rawExec;
+ Status status =
+ getExecutorDistinct(txn, collection, query, key, PlanExecutor::YIELD_AUTO, &rawExec);
+ if (!status.isOK()) {
+ uasserted(17216,
+ mongoutils::str::stream() << "Can't get executor for query " << query << ": "
+ << status.toString());
+ return 0;
+ }
- Collection* collection = ctx.getCollection();
- if (!collection) {
- result.appendArray( "values" , BSONObj() );
- result.append("stats", BSON("n" << 0 <<
- "nscanned" << 0 <<
- "nscannedObjects" << 0));
- return true;
- }
+ unique_ptr<PlanExecutor> exec(rawExec);
+
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ // Distinct expands arrays.
+ //
+ // If our query is covered, each value of the key should be in the index key and
+ // available to us without this. If a collection scan is providing the data, we may
+ // have to expand an array.
+ BSONElementSet elts;
+ obj.getFieldsDotted(key, elts);
+
+ for (BSONElementSet::iterator it = elts.begin(); it != elts.end(); ++it) {
+ BSONElement elt = *it;
+ if (values.count(elt)) {
+ continue;
+ }
+ int currentBufPos = bb.len();
- PlanExecutor* rawExec;
- Status status = getExecutorDistinct(txn,
- collection,
- query,
- key,
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- if (!status.isOK()) {
- uasserted(17216, mongoutils::str::stream() << "Can't get executor for query "
- << query << ": " << status.toString());
- return 0;
- }
+ uassert(17217,
+ "distinct too big, 16mb cap",
+ (currentBufPos + elt.size() + 1024) < bufSize);
- unique_ptr<PlanExecutor> exec(rawExec);
-
- BSONObj obj;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- // Distinct expands arrays.
- //
- // If our query is covered, each value of the key should be in the index key and
- // available to us without this. If a collection scan is providing the data, we may
- // have to expand an array.
- BSONElementSet elts;
- obj.getFieldsDotted(key, elts);
-
- for (BSONElementSet::iterator it = elts.begin(); it != elts.end(); ++it) {
- BSONElement elt = *it;
- if (values.count(elt)) { continue; }
- int currentBufPos = bb.len();
-
- uassert(17217, "distinct too big, 16mb cap",
- (currentBufPos + elt.size() + 1024) < bufSize);
-
- arr.append(elt);
- BSONElement x(start + currentBufPos);
- values.insert(x);
- }
+ arr.append(elt);
+ BSONElement x(start + currentBufPos);
+ values.insert(x);
}
+ }
- // Get summary information about the plan.
- PlanSummaryStats stats;
- Explain::getSummaryStats(exec.get(), &stats);
-
- verify( start == bb.buf() );
+ // Get summary information about the plan.
+ PlanSummaryStats stats;
+ Explain::getSummaryStats(exec.get(), &stats);
- result.appendArray( "values" , arr.done() );
+ verify(start == bb.buf());
- {
- BSONObjBuilder b;
- b.appendNumber( "n" , stats.nReturned );
- b.appendNumber( "nscanned" , stats.totalKeysExamined );
- b.appendNumber( "nscannedObjects" , stats.totalDocsExamined );
- b.appendNumber( "timems" , t.millis() );
- b.append( "planSummary" , Explain::getPlanSummary(exec.get()) );
- result.append( "stats" , b.obj() );
- }
+ result.appendArray("values", arr.done());
- return true;
+ {
+ BSONObjBuilder b;
+ b.appendNumber("n", stats.nReturned);
+ b.appendNumber("nscanned", stats.totalKeysExamined);
+ b.appendNumber("nscannedObjects", stats.totalDocsExamined);
+ b.appendNumber("timems", t.millis());
+ b.append("planSummary", Explain::getPlanSummary(exec.get()));
+ result.append("stats", b.obj());
}
- } distinctCmd;
+
+ return true;
+ }
+} distinctCmd;
} // namespace mongo
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 52dfba38bf2..d0e6ac633a5 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -59,142 +59,142 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- /* "dropIndexes" is now the preferred form - "deleteIndexes" deprecated */
- class CmdDropIndexes : public Command {
- public:
- virtual bool slaveOk() const {
+using std::endl;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+/* "dropIndexes" is now the preferred form - "deleteIndexes" deprecated */
+class CmdDropIndexes : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "drop indexes for a collection";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dropIndex);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") {}
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string ns = parseNsCollectionRequired(dbname, jsobj);
+ return appendCommandStatus(result, dropIndexes(txn, NamespaceString(ns), jsobj, &result));
+ }
+
+} cmdDropIndexes;
+
+class CmdReIndex : public Command {
+public:
+ virtual bool slaveOk() const {
+ return true;
+ } // can reindex on a secondary
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "re-index a collection";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::reIndex);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+ CmdReIndex() : Command("reIndex") {}
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ DBDirectClient db(txn);
+
+ const std::string toDeleteNs = parseNsCollectionRequired(dbname, jsobj);
+
+ LOG(0) << "CMD: reIndex " << toDeleteNs << endl;
+
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
+ OldClientContext ctx(txn, toDeleteNs);
+
+ Collection* collection = ctx.db()->getCollection(toDeleteNs);
+
+ if (!collection) {
+ errmsg = "ns not found";
return false;
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual void help( stringstream& help ) const {
- help << "drop indexes for a collection";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::dropIndex);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
-
- CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") { }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const std::string ns = parseNsCollectionRequired(dbname, jsobj);
- return appendCommandStatus(result,
- dropIndexes(txn,
- NamespaceString(ns),
- jsobj,
- &result));
- }
-
- } cmdDropIndexes;
-
- class CmdReIndex : public Command {
- public:
- virtual bool slaveOk() const { return true; } // can reindex on a secondary
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual void help( stringstream& help ) const {
- help << "re-index a collection";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::reIndex);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
- CmdReIndex() : Command("reIndex") { }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- DBDirectClient db(txn);
-
- const std::string toDeleteNs = parseNsCollectionRequired(dbname, jsobj);
-
- LOG(0) << "CMD: reIndex " << toDeleteNs << endl;
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
- OldClientContext ctx(txn, toDeleteNs);
-
- Collection* collection = ctx.db()->getCollection( toDeleteNs );
-
- if ( !collection ) {
- errmsg = "ns not found";
- return false;
- }
-
- BackgroundOperation::assertNoBgOpInProgForNs( toDeleteNs );
-
- vector<BSONObj> all;
- {
- vector<string> indexNames;
- collection->getCatalogEntry()->getAllIndexes( txn, &indexNames );
- for ( size_t i = 0; i < indexNames.size(); i++ ) {
- const string& name = indexNames[i];
- BSONObj spec = collection->getCatalogEntry()->getIndexSpec( txn, name );
- all.push_back(spec.removeField("v").getOwned());
-
- const BSONObj key = spec.getObjectField("key");
- const Status keyStatus = validateKeyPattern(key);
- if (!keyStatus.isOK()) {
- errmsg = str::stream()
- << "Cannot rebuild index " << spec << ": " << keyStatus.reason()
- << " For more info see http://dochub.mongodb.org/core/index-validation";
- return false;
- }
+ BackgroundOperation::assertNoBgOpInProgForNs(toDeleteNs);
+
+ vector<BSONObj> all;
+ {
+ vector<string> indexNames;
+ collection->getCatalogEntry()->getAllIndexes(txn, &indexNames);
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ const string& name = indexNames[i];
+ BSONObj spec = collection->getCatalogEntry()->getIndexSpec(txn, name);
+ all.push_back(spec.removeField("v").getOwned());
+
+ const BSONObj key = spec.getObjectField("key");
+ const Status keyStatus = validateKeyPattern(key);
+ if (!keyStatus.isOK()) {
+ errmsg = str::stream()
+ << "Cannot rebuild index " << spec << ": " << keyStatus.reason()
+ << " For more info see http://dochub.mongodb.org/core/index-validation";
+ return false;
}
}
+ }
- result.appendNumber( "nIndexesWas", all.size() );
+ result.appendNumber("nIndexesWas", all.size());
- {
- WriteUnitOfWork wunit(txn);
- Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
- if ( !s.isOK() ) {
- errmsg = "dropIndexes failed";
- return appendCommandStatus( result, s );
- }
- wunit.commit();
+ {
+ WriteUnitOfWork wunit(txn);
+ Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
+ if (!s.isOK()) {
+ errmsg = "dropIndexes failed";
+ return appendCommandStatus(result, s);
}
+ wunit.commit();
+ }
- MultiIndexBlock indexer(txn, collection);
- // do not want interruption as that will leave us without indexes.
-
- Status status = indexer.init(all);
- if (!status.isOK())
- return appendCommandStatus( result, status );
-
- status = indexer.insertAllDocumentsInCollection();
- if (!status.isOK())
- return appendCommandStatus( result, status );
+ MultiIndexBlock indexer(txn, collection);
+ // do not want interruption as that will leave us without indexes.
- {
- WriteUnitOfWork wunit(txn);
- indexer.commit();
- wunit.commit();
- }
+ Status status = indexer.init(all);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
- result.append( "nIndexes", (int)all.size() );
- result.append( "indexes", all );
+ status = indexer.insertAllDocumentsInCollection();
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
- return true;
+ {
+ WriteUnitOfWork wunit(txn);
+ indexer.commit();
+ wunit.commit();
}
- } cmdReIndex;
+ result.append("nIndexes", (int)all.size());
+ result.append("indexes", all);
+ return true;
+ }
+} cmdReIndex;
}
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index 9560a41f9fc..9a3abcc6176 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -43,84 +43,82 @@
namespace mongo {
- using std::string;
+using std::string;
- static CmdExplain cmdExplain;
+static CmdExplain cmdExplain;
- Status CmdExplain::checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- if (Object != cmdObj.firstElement().type()) {
- return Status(ErrorCodes::BadValue, "explain command requires a nested object");
- }
+Status CmdExplain::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (Object != cmdObj.firstElement().type()) {
+ return Status(ErrorCodes::BadValue, "explain command requires a nested object");
+ }
+
+ BSONObj explainObj = cmdObj.firstElement().Obj();
+
+ Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
+ if (NULL == commToExplain) {
+ mongoutils::str::stream ss;
+ ss << "unknown command: " << explainObj.firstElementFieldName();
+ return Status(ErrorCodes::CommandNotFound, ss);
+ }
- BSONObj explainObj = cmdObj.firstElement().Obj();
+ return commToExplain->checkAuthForCommand(client, dbname, explainObj);
+}
+
+bool CmdExplain::run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ExplainCommon::Verbosity verbosity;
+ Status parseStatus = ExplainCommon::parseCmdBSON(cmdObj, &verbosity);
+ if (!parseStatus.isOK()) {
+ return appendCommandStatus(result, parseStatus);
+ }
+
+ // This is the nested command which we are explaining.
+ BSONObj explainObj = cmdObj.firstElement().Obj();
- Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
- if (NULL == commToExplain) {
- mongoutils::str::stream ss;
- ss << "unknown command: " << explainObj.firstElementFieldName();
- return Status(ErrorCodes::CommandNotFound, ss);
- }
+ Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
+ if (NULL == commToExplain) {
+ mongoutils::str::stream ss;
+ ss << "Explain failed due to unknown command: " << explainObj.firstElementFieldName();
+ Status explainStatus(ErrorCodes::CommandNotFound, ss);
+ return appendCommandStatus(result, explainStatus);
+ }
- return commToExplain->checkAuthForCommand(client, dbname, explainObj);
+ // Check whether the child command is allowed to run here. TODO: this logic is
+ // copied from Command::execCommand and should be abstracted. Until then, make
+ // sure to keep it up to date.
+ repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
+ bool iAmPrimary = replCoord->canAcceptWritesForDatabase(dbname);
+ bool commandCanRunOnSecondary = commToExplain->slaveOk();
+
+ bool commandIsOverriddenToRunOnSecondary = commToExplain->slaveOverrideOk() &&
+ (rpc::ServerSelectionMetadata::get(txn).isSecondaryOk() ||
+ rpc::ServerSelectionMetadata::get(txn).getReadPreference() != boost::none);
+ bool iAmStandalone = !txn->writesAreReplicated();
+
+ const bool canRunHere = iAmPrimary || commandCanRunOnSecondary ||
+ commandIsOverriddenToRunOnSecondary || iAmStandalone;
+
+ if (!canRunHere) {
+ mongoutils::str::stream ss;
+ ss << "Explain's child command cannot run on this node. "
+ << "Are you explaining a write command on a secondary?";
+ appendCommandStatus(result, false, ss);
+ return false;
}
- bool CmdExplain::run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj, int options,
- string& errmsg,
- BSONObjBuilder& result) {
-
- ExplainCommon::Verbosity verbosity;
- Status parseStatus = ExplainCommon::parseCmdBSON(cmdObj, &verbosity);
- if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus);
- }
-
- // This is the nested command which we are explaining.
- BSONObj explainObj = cmdObj.firstElement().Obj();
-
- Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
- if (NULL == commToExplain) {
- mongoutils::str::stream ss;
- ss << "Explain failed due to unknown command: " << explainObj.firstElementFieldName();
- Status explainStatus(ErrorCodes::CommandNotFound, ss);
- return appendCommandStatus(result, explainStatus);
- }
-
- // Check whether the child command is allowed to run here. TODO: this logic is
- // copied from Command::execCommand and should be abstracted. Until then, make
- // sure to keep it up to date.
- repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
- bool iAmPrimary = replCoord->canAcceptWritesForDatabase(dbname);
- bool commandCanRunOnSecondary = commToExplain->slaveOk();
-
- bool commandIsOverriddenToRunOnSecondary = commToExplain->slaveOverrideOk() &&
- (rpc::ServerSelectionMetadata::get(txn).isSecondaryOk() ||
- rpc::ServerSelectionMetadata::get(txn).getReadPreference() != boost::none);
- bool iAmStandalone = !txn->writesAreReplicated();
-
- const bool canRunHere = iAmPrimary ||
- commandCanRunOnSecondary ||
- commandIsOverriddenToRunOnSecondary ||
- iAmStandalone;
-
- if (!canRunHere) {
- mongoutils::str::stream ss;
- ss << "Explain's child command cannot run on this node. "
- << "Are you explaining a write command on a secondary?";
- appendCommandStatus(result, false, ss);
- return false;
- }
-
- // Actually call the nested command's explain(...) method.
- Status explainStatus = commToExplain->explain(txn, dbname, explainObj, verbosity, &result);
- if (!explainStatus.isOK()) {
- return appendCommandStatus(result, explainStatus);
- }
-
- return true;
+ // Actually call the nested command's explain(...) method.
+ Status explainStatus = commToExplain->explain(txn, dbname, explainObj, verbosity, &result);
+ if (!explainStatus.isOK()) {
+ return appendCommandStatus(result, explainStatus);
}
-} // namespace mongo
+ return true;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/explain_cmd.h b/src/mongo/db/commands/explain_cmd.h
index 24edec205ca..7d77bd3af36 100644
--- a/src/mongo/db/commands/explain_cmd.h
+++ b/src/mongo/db/commands/explain_cmd.h
@@ -33,57 +33,63 @@
namespace mongo {
- /**
- * The explain command is used to generate explain output for any read or write
- * operation which has a query component (e.g. find, count, update, remove, distinct, etc.).
- *
- * The explain command takes as its argument a nested object which specifies the command to
- * explain, and a verbosity indicator. For example:
- *
- * {explain: {count: "coll", query: {foo: "bar"}}, verbosity: "executionStats"}
- *
- * This command like a dispatcher: it just retrieves a pointer to the nested command and
- * invokes its explain() implementation.
- */
- class CmdExplain : public Command {
- public:
- CmdExplain() : Command("explain") { }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
+/**
+ * The explain command is used to generate explain output for any read or write
+ * operation which has a query component (e.g. find, count, update, remove, distinct, etc.).
+ *
+ * The explain command takes as its argument a nested object which specifies the command to
+ * explain, and a verbosity indicator. For example:
+ *
+ * {explain: {count: "coll", query: {foo: "bar"}}, verbosity: "executionStats"}
+ *
+ * This command like a dispatcher: it just retrieves a pointer to the nested command and
+ * invokes its explain() implementation.
+ */
+class CmdExplain : public Command {
+public:
+ CmdExplain() : Command("explain") {}
- /**
- * Running an explain on a secondary requires explicitly setting slaveOk.
- */
- virtual bool slaveOk() const {
- return false;
- }
- virtual bool slaveOverrideOk() const {
- return true;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual bool maintenanceOk() const { return false; }
+ /**
+ * Running an explain on a secondary requires explicitly setting slaveOk.
+ */
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
- virtual bool adminOnly() const { return false; }
+ virtual bool maintenanceOk() const {
+ return false;
+ }
- virtual void help( std::stringstream& help ) const {
- help << "explain database reads and writes";
- }
+ virtual bool adminOnly() const {
+ return false;
+ }
- /**
- * You are authorized to run an explain if you are authorized to run
- * the command that you are explaining. The auth check is performed recursively
- * on the nested command.
- */
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+ virtual void help(std::stringstream& help) const {
+ help << "explain database reads and writes";
+ }
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj, int options,
- std::string& errmsg,
- BSONObjBuilder& result);
+ /**
+ * You are authorized to run an explain if you are authorized to run
+ * the command that you are explaining. The auth check is performed recursively
+ * on the nested command.
+ */
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- };
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+};
} // namespace mongo
diff --git a/src/mongo/db/commands/fail_point_cmd.cpp b/src/mongo/db/commands/fail_point_cmd.cpp
index 944d3ca491c..2b6e5cff0fc 100644
--- a/src/mongo/db/commands/fail_point_cmd.cpp
+++ b/src/mongo/db/commands/fail_point_cmd.cpp
@@ -37,140 +37,136 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- /**
- * Command for modifying installed fail points.
- *
- * Format
- * {
- * configureFailPoint: <string>, // name of the fail point.
- * mode: <string|Object>, // the new mode to set. Can have one of the
- * following format:
- *
- * 1. 'off' - disable fail point.
- * 2. 'alwaysOn' - fail point is always active.
- * 3. { activationProbability: <n> } - n should be a double between 0 and 1,
- * representing the probability that the fail point will fire. 0 means never,
- * 1 means (nearly) always.
- * 4. { times: <n> } - n should be positive and within the range of a 32 bit
- * signed integer and this is the number of passes on the fail point will
- * remain activated.
- *
- * data: <Object> // optional arbitrary object to store.
- * }
- */
- class FaultInjectCmd: public Command {
- public:
- FaultInjectCmd(): Command("configureFailPoint") {}
-
- virtual bool slaveOk() const {
- return true;
- }
+using std::string;
+using std::stringstream;
- virtual bool isWriteCommandForConfigServer() const { return false; }
+/**
+ * Command for modifying installed fail points.
+ *
+ * Format
+ * {
+ * configureFailPoint: <string>, // name of the fail point.
+ * mode: <string|Object>, // the new mode to set. Can have one of the
+ * following format:
+ *
+ * 1. 'off' - disable fail point.
+ * 2. 'alwaysOn' - fail point is always active.
+ * 3. { activationProbability: <n> } - n should be a double between 0 and 1,
+ * representing the probability that the fail point will fire. 0 means never,
+ * 1 means (nearly) always.
+ * 4. { times: <n> } - n should be positive and within the range of a 32 bit
+ * signed integer and this is the number of passes on the fail point will
+ * remain activated.
+ *
+ * data: <Object> // optional arbitrary object to store.
+ * }
+ */
+class FaultInjectCmd : public Command {
+public:
+ FaultInjectCmd() : Command("configureFailPoint") {}
- virtual bool adminOnly() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
- virtual void help(stringstream& h) const {
- h << "modifies the settings of a fail point";
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+
+ virtual void help(stringstream& h) const {
+ h << "modifies the settings of a fail point";
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const string failPointName(cmdObj.firstElement().str());
+ FailPointRegistry* registry = getGlobalFailPointRegistry();
+ FailPoint* failPoint = registry->getFailPoint(failPointName);
+
+ if (failPoint == NULL) {
+ errmsg = failPointName + " not found";
+ return false;
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const string failPointName(cmdObj.firstElement().str());
- FailPointRegistry* registry = getGlobalFailPointRegistry();
- FailPoint* failPoint = registry->getFailPoint(failPointName);
-
- if (failPoint == NULL) {
- errmsg = failPointName + " not found";
+ FailPoint::Mode mode = FailPoint::alwaysOn;
+ FailPoint::ValType val = 0;
+
+ const BSONElement modeElem(cmdObj["mode"]);
+ if (modeElem.eoo()) {
+ result.appendElements(failPoint->toBSON());
+ return true;
+ } else if (modeElem.type() == String) {
+ const string modeStr(modeElem.valuestr());
+
+ if (modeStr == "off") {
+ mode = FailPoint::off;
+ } else if (modeStr == "alwaysOn") {
+ mode = FailPoint::alwaysOn;
+ } else {
+ errmsg = "unknown mode: " + modeStr;
return false;
}
+ } else if (modeElem.type() == Object) {
+ const BSONObj modeObj(modeElem.Obj());
- FailPoint::Mode mode = FailPoint::alwaysOn;
- FailPoint::ValType val = 0;
+ if (modeObj.hasField("times")) {
+ mode = FailPoint::nTimes;
+ const int intVal = modeObj["times"].numberInt();
- const BSONElement modeElem(cmdObj["mode"]);
- if (modeElem.eoo()) {
- result.appendElements(failPoint->toBSON());
- return true;
- }
- else if (modeElem.type() == String) {
- const string modeStr(modeElem.valuestr());
-
- if (modeStr == "off") {
- mode = FailPoint::off;
- }
- else if (modeStr == "alwaysOn") {
- mode = FailPoint::alwaysOn;
- }
- else {
- errmsg = "unknown mode: " + modeStr;
+ if (intVal < 0) {
+ errmsg = "times should be positive";
return false;
}
- }
- else if (modeElem.type() == Object) {
- const BSONObj modeObj(modeElem.Obj());
-
- if (modeObj.hasField("times")) {
- mode = FailPoint::nTimes;
- const int intVal = modeObj["times"].numberInt();
- if (intVal < 0) {
- errmsg = "times should be positive";
- return false;
- }
-
- val = intVal;
- }
- else if (modeObj.hasField("activationProbability")) {
- mode = FailPoint::random;
- const double activationProbability =
- modeObj["activationProbability"].numberDouble();
- if (activationProbability < 0 || activationProbability > 1) {
- errmsg = str::stream() <<
- "activationProbability must be between 0.0 and 1.0; found " <<
- activationProbability;
- return false;
- }
- val = static_cast<int32_t>(
- std::numeric_limits<int32_t>::max() * activationProbability);
- }
- else {
- errmsg = "invalid mode object";
+ val = intVal;
+ } else if (modeObj.hasField("activationProbability")) {
+ mode = FailPoint::random;
+ const double activationProbability =
+ modeObj["activationProbability"].numberDouble();
+ if (activationProbability < 0 || activationProbability > 1) {
+ errmsg = str::stream()
+ << "activationProbability must be between 0.0 and 1.0; found "
+ << activationProbability;
return false;
}
- }
- else {
- errmsg = "invalid mode format";
+ val = static_cast<int32_t>(std::numeric_limits<int32_t>::max() *
+ activationProbability);
+ } else {
+ errmsg = "invalid mode object";
return false;
}
-
- BSONObj dataObj;
- if (cmdObj.hasField("data")) {
- dataObj = cmdObj["data"].Obj();
- }
-
- failPoint->setMode(mode, val, dataObj);
- return true;
+ } else {
+ errmsg = "invalid mode format";
+ return false;
}
- };
- MONGO_INITIALIZER(RegisterFaultInjectCmd)(InitializerContext* context) {
- if (Command::testCommandsEnabled) {
- // Leaked intentionally: a Command registers itself when constructed.
- new FaultInjectCmd();
+
+ BSONObj dataObj;
+ if (cmdObj.hasField("data")) {
+ dataObj = cmdObj["data"].Obj();
}
- return Status::OK();
+
+ failPoint->setMode(mode, val, dataObj);
+ return true;
}
+};
+MONGO_INITIALIZER(RegisterFaultInjectCmd)(InitializerContext* context) {
+ if (Command::testCommandsEnabled) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new FaultInjectCmd();
+ }
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 609b29673c2..ee7c2544ac3 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -67,423 +67,414 @@ namespace mongo {
namespace {
- const UpdateStats* getUpdateStats(const PlanStageStats* stats) {
- // The stats may refer to an update stage, or a projection stage wrapping an update stage.
- if (StageType::STAGE_PROJECTION == stats->stageType) {
- invariant(stats->children.size() == 1);
- stats = stats->children[0];
- }
+const UpdateStats* getUpdateStats(const PlanStageStats* stats) {
+ // The stats may refer to an update stage, or a projection stage wrapping an update stage.
+ if (StageType::STAGE_PROJECTION == stats->stageType) {
+ invariant(stats->children.size() == 1);
+ stats = stats->children[0];
+ }
- invariant(StageType::STAGE_UPDATE == stats->stageType);
- return static_cast<UpdateStats*>(stats->specific.get());
+ invariant(StageType::STAGE_UPDATE == stats->stageType);
+ return static_cast<UpdateStats*>(stats->specific.get());
+}
+
+const DeleteStats* getDeleteStats(const PlanStageStats* stats) {
+ // The stats may refer to a delete stage, or a projection stage wrapping a delete stage.
+ if (StageType::STAGE_PROJECTION == stats->stageType) {
+ invariant(stats->children.size() == 1);
+ stats = stats->children[0];
}
- const DeleteStats* getDeleteStats(const PlanStageStats* stats) {
- // The stats may refer to a delete stage, or a projection stage wrapping a delete stage.
- if (StageType::STAGE_PROJECTION == stats->stageType) {
- invariant(stats->children.size() == 1);
- stats = stats->children[0];
- }
+ invariant(StageType::STAGE_DELETE == stats->stageType);
+ return static_cast<DeleteStats*>(stats->specific.get());
+}
- invariant(StageType::STAGE_DELETE == stats->stageType);
- return static_cast<DeleteStats*>(stats->specific.get());
+/**
+ * If the operation succeeded, then Status::OK() is returned, possibly with a document value
+ * to return to the client. If no matching document to update or remove was found, then none
+ * is returned. Otherwise, the updated or deleted document is returned.
+ *
+ * If the operation failed, then an error Status is returned.
+ */
+StatusWith<boost::optional<BSONObj>> advanceExecutor(PlanExecutor* exec, bool isRemove) {
+ BSONObj value;
+ PlanExecutor::ExecState state = exec->getNext(&value, nullptr);
+ if (PlanExecutor::ADVANCED == state) {
+ return boost::optional<BSONObj>(std::move(value));
}
+ if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ error() << "Plan executor error during findAndModify: " << PlanExecutor::statestr(state)
+ << ", stats: " << Explain::statsToBSON(*stats);
- /**
- * If the operation succeeded, then Status::OK() is returned, possibly with a document value
- * to return to the client. If no matching document to update or remove was found, then none
- * is returned. Otherwise, the updated or deleted document is returned.
- *
- * If the operation failed, then an error Status is returned.
- */
- StatusWith<boost::optional<BSONObj>> advanceExecutor(PlanExecutor* exec, bool isRemove) {
- BSONObj value;
- PlanExecutor::ExecState state = exec->getNext(&value, nullptr);
- if (PlanExecutor::ADVANCED == state) {
- return boost::optional<BSONObj>(std::move(value));
+ if (WorkingSetCommon::isValidStatusMemberObject(value)) {
+ const Status errorStatus = WorkingSetCommon::getMemberObjectStatus(value);
+ invariant(!errorStatus.isOK());
+ return {errorStatus.code(), errorStatus.reason()};
}
- if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- error() << "Plan executor error during findAndModify: "
- << PlanExecutor::statestr(state)
- << ", stats: " << Explain::statsToBSON(*stats);
-
- if (WorkingSetCommon::isValidStatusMemberObject(value)) {
- const Status errorStatus =
- WorkingSetCommon::getMemberObjectStatus(value);
- invariant(!errorStatus.isOK());
- return {errorStatus.code(), errorStatus.reason()};
- }
- const std::string opstr = isRemove ? "delete" : "update";
- return {ErrorCodes::OperationFailed, str::stream()
- << "executor returned " << PlanExecutor::statestr(state)
- << " while executing " << opstr};
+ const std::string opstr = isRemove ? "delete" : "update";
+ return {ErrorCodes::OperationFailed,
+ str::stream() << "executor returned " << PlanExecutor::statestr(state)
+ << " while executing " << opstr};
+ }
+ invariant(state == PlanExecutor::IS_EOF);
+ return boost::optional<BSONObj>(boost::none);
+}
+
+void makeUpdateRequest(const FindAndModifyRequest& args,
+ bool explain,
+ UpdateLifecycleImpl* updateLifecycle,
+ UpdateRequest* requestOut) {
+ requestOut->setQuery(args.getQuery());
+ requestOut->setProj(args.getFields());
+ requestOut->setUpdates(args.getUpdateObj());
+ requestOut->setSort(args.getSort());
+ requestOut->setUpsert(args.isUpsert());
+ requestOut->setReturnDocs(args.shouldReturnNew() ? UpdateRequest::RETURN_NEW
+ : UpdateRequest::RETURN_OLD);
+ requestOut->setMulti(false);
+ requestOut->setYieldPolicy(PlanExecutor::YIELD_AUTO);
+ requestOut->setExplain(explain);
+ requestOut->setLifecycle(updateLifecycle);
+}
+
+void makeDeleteRequest(const FindAndModifyRequest& args, bool explain, DeleteRequest* requestOut) {
+ requestOut->setQuery(args.getQuery());
+ requestOut->setProj(args.getFields());
+ requestOut->setSort(args.getSort());
+ requestOut->setMulti(false);
+ requestOut->setYieldPolicy(PlanExecutor::YIELD_AUTO);
+ requestOut->setReturnDeleted(true); // Always return the old value.
+ requestOut->setExplain(explain);
+}
+
+void appendCommandResponse(PlanExecutor* exec,
+ bool isRemove,
+ const boost::optional<BSONObj>& value,
+ BSONObjBuilder& result) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ BSONObjBuilder lastErrorObjBuilder(result.subobjStart("lastErrorObject"));
+
+ if (isRemove) {
+ lastErrorObjBuilder.appendNumber("n", getDeleteStats(stats.get())->docsDeleted);
+ } else {
+ const UpdateStats* updateStats = getUpdateStats(stats.get());
+ lastErrorObjBuilder.appendBool("updatedExisting", updateStats->nMatched > 0);
+ lastErrorObjBuilder.appendNumber("n", updateStats->inserted ? 1 : updateStats->nMatched);
+ // Note we have to use the objInserted from the stats here, rather than 'value'
+ // because the _id field could have been excluded by a projection.
+ if (!updateStats->objInserted.isEmpty()) {
+ lastErrorObjBuilder.appendAs(updateStats->objInserted["_id"], kUpsertedFieldName);
}
- invariant(state == PlanExecutor::IS_EOF);
- return boost::optional<BSONObj>(boost::none);
}
+ lastErrorObjBuilder.done();
- void makeUpdateRequest(const FindAndModifyRequest& args,
- bool explain,
- UpdateLifecycleImpl* updateLifecycle,
- UpdateRequest* requestOut) {
- requestOut->setQuery(args.getQuery());
- requestOut->setProj(args.getFields());
- requestOut->setUpdates(args.getUpdateObj());
- requestOut->setSort(args.getSort());
- requestOut->setUpsert(args.isUpsert());
- requestOut->setReturnDocs(args.shouldReturnNew()
- ? UpdateRequest::RETURN_NEW
- : UpdateRequest::RETURN_OLD);
- requestOut->setMulti(false);
- requestOut->setYieldPolicy(PlanExecutor::YIELD_AUTO);
- requestOut->setExplain(explain);
- requestOut->setLifecycle(updateLifecycle);
+ if (value) {
+ result.append("value", *value);
+ } else {
+ result.appendNull("value");
}
+}
+
+Status checkCanAcceptWritesForDatabase(const NamespaceString& nsString) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString)) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream()
+ << "Not primary while running findAndModify command on collection "
+ << nsString.ns());
+ }
+ return Status::OK();
+}
+
+} // namespace
- void makeDeleteRequest(const FindAndModifyRequest& args,
- bool explain,
- DeleteRequest* requestOut) {
- requestOut->setQuery(args.getQuery());
- requestOut->setProj(args.getFields());
- requestOut->setSort(args.getSort());
- requestOut->setMulti(false);
- requestOut->setYieldPolicy(PlanExecutor::YIELD_AUTO);
- requestOut->setReturnDeleted(true); // Always return the old value.
- requestOut->setExplain(explain);
+/* Find and Modify an object returning either the old (default) or new value*/
+class CmdFindAndModify : public Command {
+public:
+ void help(std::stringstream& help) const override {
+ help << "{ findAndModify: \"collection\", query: {processed:false}, update: {$set: "
+ "{processed:true}}, new: true}\n"
+ "{ findAndModify: \"collection\", query: {processed:false}, remove: true, sort: "
+ "{priority:-1}}\n"
+ "Either update or remove is required, all other fields have default values.\n"
+ "Output is in the \"value\" field\n";
}
- void appendCommandResponse(PlanExecutor* exec,
- bool isRemove,
- const boost::optional<BSONObj>& value,
- BSONObjBuilder& result) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- BSONObjBuilder lastErrorObjBuilder(result.subobjStart("lastErrorObject"));
+ CmdFindAndModify() : Command("findAndModify", false, "findandmodify") {}
+ bool slaveOk() const override {
+ return false;
+ }
+ bool isWriteCommandForConfigServer() const override {
+ return true;
+ }
+ void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) override {
+ find_and_modify::addPrivilegesRequiredForFindAndModify(this, dbname, cmdObj, out);
+ }
- if (isRemove) {
- lastErrorObjBuilder.appendNumber("n", getDeleteStats(stats.get())->docsDeleted);
+ Status explain(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const override {
+ const std::string fullNs = parseNsCollectionRequired(dbName, cmdObj);
+ Status allowedWriteStatus = userAllowedWriteNS(fullNs);
+ if (!allowedWriteStatus.isOK()) {
+ return allowedWriteStatus;
}
- else {
- const UpdateStats* updateStats = getUpdateStats(stats.get());
- lastErrorObjBuilder.appendBool("updatedExisting", updateStats->nMatched > 0);
- lastErrorObjBuilder.appendNumber("n", updateStats->inserted ? 1
- : updateStats->nMatched);
- // Note we have to use the objInserted from the stats here, rather than 'value'
- // because the _id field could have been excluded by a projection.
- if (!updateStats->objInserted.isEmpty()) {
- lastErrorObjBuilder.appendAs(updateStats->objInserted["_id"], kUpsertedFieldName);
- }
- }
- lastErrorObjBuilder.done();
- if (value) {
- result.append("value", *value);
- }
- else {
- result.appendNull("value");
+ StatusWith<FindAndModifyRequest> parseStatus =
+ FindAndModifyRequest::parseFromBSON(NamespaceString(fullNs), cmdObj);
+ if (!parseStatus.isOK()) {
+ return parseStatus.getStatus();
}
- }
- Status checkCanAcceptWritesForDatabase(const NamespaceString& nsString) {
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString)) {
- return Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while running findAndModify command on collection "
- << nsString.ns());
- }
- return Status::OK();
- }
+ const FindAndModifyRequest& args = parseStatus.getValue();
+ const NamespaceString& nsString = args.getNamespaceString();
-} // namespace
+ auto client = txn->getClient();
- /* Find and Modify an object returning either the old (default) or new value*/
- class CmdFindAndModify : public Command {
- public:
- void help(std::stringstream& help) const override {
- help <<
- "{ findAndModify: \"collection\", query: {processed:false}, update: {$set: {processed:true}}, new: true}\n"
- "{ findAndModify: \"collection\", query: {processed:false}, remove: true, sort: {priority:-1}}\n"
- "Either update or remove is required, all other fields have default values.\n"
- "Output is in the \"value\" field\n";
- }
+ if (args.isRemove()) {
+ DeleteRequest request(nsString);
+ const bool isExplain = true;
+ makeDeleteRequest(args, isExplain, &request);
- CmdFindAndModify() : Command("findAndModify", false, "findandmodify") { }
- bool slaveOk() const override { return false; }
- bool isWriteCommandForConfigServer() const override { return true; }
- void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) override {
- find_and_modify::addPrivilegesRequiredForFindAndModify(this, dbname, cmdObj, out);
- }
+ ParsedDelete parsedDelete(txn, &request);
+ Status parsedDeleteStatus = parsedDelete.parseRequest();
+ if (!parsedDeleteStatus.isOK()) {
+ return parsedDeleteStatus;
+ }
+
+ // Explain calls of the findAndModify command are read-only, but we take write
+ // locks so that the timing information is more accurate.
+ AutoGetDb autoDb(txn, dbName, MODE_IX);
+ Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
+
+ ensureShardVersionOKOrThrow(client, nsString.ns());
- Status explain(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const override {
- const std::string fullNs = parseNsCollectionRequired(dbName, cmdObj);
- Status allowedWriteStatus = userAllowedWriteNS(fullNs);
- if (!allowedWriteStatus.isOK()) {
- return allowedWriteStatus;
+ Collection* collection = nullptr;
+ if (autoDb.getDb()) {
+ collection = autoDb.getDb()->getCollection(nsString.ns());
+ } else {
+ return {ErrorCodes::DatabaseNotFound,
+ str::stream() << "database " << dbName << " does not exist."};
}
- StatusWith<FindAndModifyRequest> parseStatus =
- FindAndModifyRequest::parseFromBSON(NamespaceString(fullNs), cmdObj);
- if (!parseStatus.isOK()) {
- return parseStatus.getStatus();
+ PlanExecutor* rawExec;
+ Status execStatus = getExecutorDelete(txn, collection, &parsedDelete, &rawExec);
+ if (!execStatus.isOK()) {
+ return execStatus;
}
+ const std::unique_ptr<PlanExecutor> exec(rawExec);
+ Explain::explainStages(exec.get(), verbosity, out);
+ } else {
+ UpdateRequest request(nsString);
+ const bool ignoreVersion = false;
+ UpdateLifecycleImpl updateLifecycle(ignoreVersion, nsString);
+ const bool isExplain = true;
+ makeUpdateRequest(args, isExplain, &updateLifecycle, &request);
+
+ ParsedUpdate parsedUpdate(txn, &request);
+ Status parsedUpdateStatus = parsedUpdate.parseRequest();
+ if (!parsedUpdateStatus.isOK()) {
+ return parsedUpdateStatus;
+ }
+
+ OpDebug* opDebug = &CurOp::get(txn)->debug();
- const FindAndModifyRequest& args = parseStatus.getValue();
- const NamespaceString& nsString = args.getNamespaceString();
+ // Explain calls of the findAndModify command are read-only, but we take write
+ // locks so that the timing information is more accurate.
+ AutoGetDb autoDb(txn, dbName, MODE_IX);
+ Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
- auto client = txn->getClient();
+ ensureShardVersionOKOrThrow(client, nsString.ns());
+ Collection* collection = nullptr;
+ if (autoDb.getDb()) {
+ collection = autoDb.getDb()->getCollection(nsString.ns());
+ } else {
+ return {ErrorCodes::DatabaseNotFound,
+ str::stream() << "database " << dbName << " does not exist."};
+ }
+
+ PlanExecutor* rawExec;
+ Status execStatus =
+ getExecutorUpdate(txn, collection, &parsedUpdate, opDebug, &rawExec);
+ if (!execStatus.isOK()) {
+ return execStatus;
+ }
+ const std::unique_ptr<PlanExecutor> exec(rawExec);
+ Explain::explainStages(exec.get(), verbosity, out);
+ }
+
+ return Status::OK();
+ }
+
+ bool run(OperationContext* txn,
+ const std::string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) override {
+ // findAndModify command is not replicated directly.
+ invariant(txn->writesAreReplicated());
+ const std::string fullNs = parseNsCollectionRequired(dbName, cmdObj);
+ Status allowedWriteStatus = userAllowedWriteNS(fullNs);
+ if (!allowedWriteStatus.isOK()) {
+ return appendCommandStatus(result, allowedWriteStatus);
+ }
+
+ StatusWith<FindAndModifyRequest> parseStatus =
+ FindAndModifyRequest::parseFromBSON(NamespaceString(fullNs), cmdObj);
+ if (!parseStatus.isOK()) {
+ return appendCommandStatus(result, parseStatus.getStatus());
+ }
+
+ const FindAndModifyRequest& args = parseStatus.getValue();
+ const NamespaceString& nsString = args.getNamespaceString();
+
+ StatusWith<WriteConcernOptions> wcResult = extractWriteConcern(cmdObj);
+ if (!wcResult.isOK()) {
+ return appendCommandStatus(result, wcResult.getStatus());
+ }
+ txn->setWriteConcern(wcResult.getValue());
+ setupSynchronousCommit(txn);
+
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj))
+ maybeDisableValidation.emplace(txn);
+
+ auto client = txn->getClient();
+
+ // We may encounter a WriteConflictException when creating a collection during an
+ // upsert, even when holding the exclusive lock on the database (due to other load on
+ // the system). The query framework should handle all other WriteConflictExceptions,
+ // but we defensively wrap the operation in the retry loop anyway.
+ //
+ // SERVER-17579 getExecutorUpdate() and getExecutorDelete() can throw a
+ // WriteConflictException when checking whether an index is ready or not.
+ // (on debug builds only)
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
if (args.isRemove()) {
DeleteRequest request(nsString);
- const bool isExplain = true;
+ const bool isExplain = false;
makeDeleteRequest(args, isExplain, &request);
ParsedDelete parsedDelete(txn, &request);
Status parsedDeleteStatus = parsedDelete.parseRequest();
if (!parsedDeleteStatus.isOK()) {
- return parsedDeleteStatus;
+ return appendCommandStatus(result, parsedDeleteStatus);
}
- // Explain calls of the findAndModify command are read-only, but we take write
- // locks so that the timing information is more accurate.
- AutoGetDb autoDb(txn, dbName, MODE_IX);
+ AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX);
Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
+ Collection* collection = autoDb.getDb()->getCollection(nsString.ns());
ensureShardVersionOKOrThrow(client, nsString.ns());
- Collection* collection = nullptr;
- if (autoDb.getDb()) {
- collection = autoDb.getDb()->getCollection(nsString.ns());
- }
- else {
- return {ErrorCodes::DatabaseNotFound,
- str::stream() << "database " << dbName << " does not exist."};
+ Status isPrimary = checkCanAcceptWritesForDatabase(nsString);
+ if (!isPrimary.isOK()) {
+ return appendCommandStatus(result, isPrimary);
}
PlanExecutor* rawExec;
Status execStatus = getExecutorDelete(txn, collection, &parsedDelete, &rawExec);
if (!execStatus.isOK()) {
- return execStatus;
+ return appendCommandStatus(result, execStatus);
}
const std::unique_ptr<PlanExecutor> exec(rawExec);
- Explain::explainStages(exec.get(), verbosity, out);
- }
- else {
+
+ StatusWith<boost::optional<BSONObj>> advanceStatus =
+ advanceExecutor(exec.get(), args.isRemove());
+ if (!advanceStatus.isOK()) {
+ return appendCommandStatus(result, advanceStatus.getStatus());
+ }
+
+ boost::optional<BSONObj> value = advanceStatus.getValue();
+ appendCommandResponse(exec.get(), args.isRemove(), value, result);
+ } else {
UpdateRequest request(nsString);
const bool ignoreVersion = false;
UpdateLifecycleImpl updateLifecycle(ignoreVersion, nsString);
- const bool isExplain = true;
+ const bool isExplain = false;
makeUpdateRequest(args, isExplain, &updateLifecycle, &request);
ParsedUpdate parsedUpdate(txn, &request);
Status parsedUpdateStatus = parsedUpdate.parseRequest();
if (!parsedUpdateStatus.isOK()) {
- return parsedUpdateStatus;
+ return appendCommandStatus(result, parsedUpdateStatus);
}
OpDebug* opDebug = &CurOp::get(txn)->debug();
- // Explain calls of the findAndModify command are read-only, but we take write
- // locks so that the timing information is more accurate.
- AutoGetDb autoDb(txn, dbName, MODE_IX);
+ AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX);
Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
+ Collection* collection = autoDb.getDb()->getCollection(nsString.ns());
ensureShardVersionOKOrThrow(client, nsString.ns());
- Collection* collection = nullptr;
- if (autoDb.getDb()) {
- collection = autoDb.getDb()->getCollection(nsString.ns());
- }
- else {
- return {ErrorCodes::DatabaseNotFound,
- str::stream() << "database " << dbName << " does not exist."};
- }
-
- PlanExecutor* rawExec;
- Status execStatus = getExecutorUpdate(txn, collection, &parsedUpdate, opDebug,
- &rawExec);
- if (!execStatus.isOK()) {
- return execStatus;
+ Status isPrimary = checkCanAcceptWritesForDatabase(nsString);
+ if (!isPrimary.isOK()) {
+ return appendCommandStatus(result, isPrimary);
}
- const std::unique_ptr<PlanExecutor> exec(rawExec);
- Explain::explainStages(exec.get(), verbosity, out);
- }
-
- return Status::OK();
- }
-
- bool run(OperationContext* txn,
- const std::string& dbName,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) override {
- // findAndModify command is not replicated directly.
- invariant(txn->writesAreReplicated());
- const std::string fullNs = parseNsCollectionRequired(dbName, cmdObj);
- Status allowedWriteStatus = userAllowedWriteNS(fullNs);
- if (!allowedWriteStatus.isOK()) {
- return appendCommandStatus(result, allowedWriteStatus);
- }
- StatusWith<FindAndModifyRequest> parseStatus =
- FindAndModifyRequest::parseFromBSON(NamespaceString(fullNs), cmdObj);
- if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus.getStatus());
- }
-
- const FindAndModifyRequest& args = parseStatus.getValue();
- const NamespaceString& nsString = args.getNamespaceString();
-
- StatusWith<WriteConcernOptions> wcResult = extractWriteConcern(cmdObj);
- if (!wcResult.isOK()) {
- return appendCommandStatus(result, wcResult.getStatus());
- }
- txn->setWriteConcern(wcResult.getValue());
- setupSynchronousCommit(txn);
-
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
-
- auto client = txn->getClient();
-
- // We may encounter a WriteConflictException when creating a collection during an
- // upsert, even when holding the exclusive lock on the database (due to other load on
- // the system). The query framework should handle all other WriteConflictExceptions,
- // but we defensively wrap the operation in the retry loop anyway.
- //
- // SERVER-17579 getExecutorUpdate() and getExecutorDelete() can throw a
- // WriteConflictException when checking whether an index is ready or not.
- // (on debug builds only)
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- if (args.isRemove()) {
- DeleteRequest request(nsString);
- const bool isExplain = false;
- makeDeleteRequest(args, isExplain, &request);
-
- ParsedDelete parsedDelete(txn, &request);
- Status parsedDeleteStatus = parsedDelete.parseRequest();
- if (!parsedDeleteStatus.isOK()) {
- return appendCommandStatus(result, parsedDeleteStatus);
- }
-
- AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX);
- Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
- Collection* collection = autoDb.getDb()->getCollection(nsString.ns());
-
- ensureShardVersionOKOrThrow(client, nsString.ns());
-
- Status isPrimary = checkCanAcceptWritesForDatabase(nsString);
- if (!isPrimary.isOK()) {
- return appendCommandStatus(result, isPrimary);
- }
-
- PlanExecutor* rawExec;
- Status execStatus = getExecutorDelete(txn, collection, &parsedDelete, &rawExec);
- if (!execStatus.isOK()) {
- return appendCommandStatus(result, execStatus);
- }
- const std::unique_ptr<PlanExecutor> exec(rawExec);
-
- StatusWith<boost::optional<BSONObj>> advanceStatus =
- advanceExecutor(exec.get(), args.isRemove());
- if (!advanceStatus.isOK()) {
- return appendCommandStatus(result, advanceStatus.getStatus());
- }
-
- boost::optional<BSONObj> value = advanceStatus.getValue();
- appendCommandResponse(exec.get(), args.isRemove(), value, result);
- }
- else {
- UpdateRequest request(nsString);
- const bool ignoreVersion = false;
- UpdateLifecycleImpl updateLifecycle(ignoreVersion, nsString);
- const bool isExplain = false;
- makeUpdateRequest(args, isExplain, &updateLifecycle, &request);
-
- ParsedUpdate parsedUpdate(txn, &request);
- Status parsedUpdateStatus = parsedUpdate.parseRequest();
- if (!parsedUpdateStatus.isOK()) {
- return appendCommandStatus(result, parsedUpdateStatus);
- }
-
- OpDebug* opDebug = &CurOp::get(txn)->debug();
-
- AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX);
- Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
- Collection* collection = autoDb.getDb()->getCollection(nsString.ns());
-
- ensureShardVersionOKOrThrow(client, nsString.ns());
-
- Status isPrimary = checkCanAcceptWritesForDatabase(nsString);
- if (!isPrimary.isOK()) {
- return appendCommandStatus(result, isPrimary);
+ // Create the collection if it does not exist when performing an upsert
+ // because the update stage does not create its own collection.
+ if (!collection && args.isUpsert()) {
+ // Release the collection lock and reacquire a lock on the database
+ // in exclusive mode in order to create the collection.
+ collLock.relockAsDatabaseExclusive(autoDb.lock());
+ collection = autoDb.getDb()->getCollection(nsString.ns());
+ Status isPrimaryAfterRelock = checkCanAcceptWritesForDatabase(nsString);
+ if (!isPrimaryAfterRelock.isOK()) {
+ return appendCommandStatus(result, isPrimaryAfterRelock);
}
- // Create the collection if it does not exist when performing an upsert
- // because the update stage does not create its own collection.
- if (!collection && args.isUpsert()) {
- // Release the collection lock and reacquire a lock on the database
- // in exclusive mode in order to create the collection.
- collLock.relockAsDatabaseExclusive(autoDb.lock());
- collection = autoDb.getDb()->getCollection(nsString.ns());
- Status isPrimaryAfterRelock = checkCanAcceptWritesForDatabase(nsString);
- if (!isPrimaryAfterRelock.isOK()) {
- return appendCommandStatus(result, isPrimaryAfterRelock);
- }
-
- if (collection) {
- // Someone else beat us to creating the collection, do nothing.
- }
- else {
- WriteUnitOfWork wuow(txn);
- Status createCollStatus = userCreateNS(txn, autoDb.getDb(),
- nsString.ns(), BSONObj());
- if (!createCollStatus.isOK()) {
- return appendCommandStatus(result, createCollStatus);
- }
- wuow.commit();
-
- collection = autoDb.getDb()->getCollection(nsString.ns());
- invariant(collection);
+ if (collection) {
+ // Someone else beat us to creating the collection, do nothing.
+ } else {
+ WriteUnitOfWork wuow(txn);
+ Status createCollStatus =
+ userCreateNS(txn, autoDb.getDb(), nsString.ns(), BSONObj());
+ if (!createCollStatus.isOK()) {
+ return appendCommandStatus(result, createCollStatus);
}
- }
+ wuow.commit();
- PlanExecutor* rawExec;
- Status execStatus = getExecutorUpdate(txn, collection, &parsedUpdate, opDebug,
- &rawExec);
- if (!execStatus.isOK()) {
- return appendCommandStatus(result, execStatus);
- }
- const std::unique_ptr<PlanExecutor> exec(rawExec);
-
- StatusWith<boost::optional<BSONObj>> advanceStatus =
- advanceExecutor(exec.get(), args.isRemove());
- if (!advanceStatus.isOK()) {
- return appendCommandStatus(result, advanceStatus.getStatus());
+ collection = autoDb.getDb()->getCollection(nsString.ns());
+ invariant(collection);
}
+ }
- boost::optional<BSONObj> value = advanceStatus.getValue();
- appendCommandResponse(exec.get(), args.isRemove(), value, result);
+ PlanExecutor* rawExec;
+ Status execStatus =
+ getExecutorUpdate(txn, collection, &parsedUpdate, opDebug, &rawExec);
+ if (!execStatus.isOK()) {
+ return appendCommandStatus(result, execStatus);
}
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "findAndModify", nsString.ns());
+ const std::unique_ptr<PlanExecutor> exec(rawExec);
- WriteConcernResult res;
- auto waitForWCStatus = waitForWriteConcern(
- txn,
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(),
- &res
- );
- appendCommandWCStatus(result, waitForWCStatus);
+ StatusWith<boost::optional<BSONObj>> advanceStatus =
+ advanceExecutor(exec.get(), args.isRemove());
+ if (!advanceStatus.isOK()) {
+ return appendCommandStatus(result, advanceStatus.getStatus());
+ }
- return true;
+ boost::optional<BSONObj> value = advanceStatus.getValue();
+ appendCommandResponse(exec.get(), args.isRemove(), value, result);
+ }
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "findAndModify", nsString.ns());
+
+ WriteConcernResult res;
+ auto waitForWCStatus = waitForWriteConcern(
+ txn, repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(), &res);
+ appendCommandWCStatus(result, waitForWCStatus);
+
+ return true;
+ }
- } cmdFindAndModify;
+} cmdFindAndModify;
} // namespace mongo
diff --git a/src/mongo/db/commands/find_and_modify.h b/src/mongo/db/commands/find_and_modify.h
index ab1a6eff18c..cd6c08e7c25 100644
--- a/src/mongo/db/commands/find_and_modify.h
+++ b/src/mongo/db/commands/find_and_modify.h
@@ -36,16 +36,14 @@
namespace mongo {
- class Command;
+class Command;
namespace find_and_modify {
- void addPrivilegesRequiredForFindAndModify(Command* commandTemplate,
- const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out);
-
-} // namespace find_and_modify
-} // namespace mongo
-
+void addPrivilegesRequiredForFindAndModify(Command* commandTemplate,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out);
+} // namespace find_and_modify
+} // namespace mongo
diff --git a/src/mongo/db/commands/find_and_modify_common.cpp b/src/mongo/db/commands/find_and_modify_common.cpp
index abe8bdab8f2..8179ea72406 100644
--- a/src/mongo/db/commands/find_and_modify_common.cpp
+++ b/src/mongo/db/commands/find_and_modify_common.cpp
@@ -44,34 +44,35 @@
namespace mongo {
namespace find_and_modify {
- void addPrivilegesRequiredForFindAndModify(Command* commandTemplate,
- const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- bool update = cmdObj["update"].trueValue();
- bool upsert = cmdObj["upsert"].trueValue();
- bool remove = cmdObj["remove"].trueValue();
- bool bypassDocumentValidation = shouldBypassDocumentValidationForCommand(cmdObj);
+void addPrivilegesRequiredForFindAndModify(Command* commandTemplate,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ bool update = cmdObj["update"].trueValue();
+ bool upsert = cmdObj["upsert"].trueValue();
+ bool remove = cmdObj["remove"].trueValue();
+ bool bypassDocumentValidation = shouldBypassDocumentValidationForCommand(cmdObj);
- ActionSet actions;
- actions.addAction(ActionType::find);
- if (update) {
- actions.addAction(ActionType::update);
- }
- if (upsert) {
- actions.addAction(ActionType::insert);
- }
- if (remove) {
- actions.addAction(ActionType::remove);
- }
- if (bypassDocumentValidation) {
- actions.addAction(ActionType::bypassDocumentValidation);
- }
- ResourcePattern resource(commandTemplate->parseResourcePattern(dbname, cmdObj));
- uassert(17137, "Invalid target namespace " + resource.toString(),
- resource.isExactNamespacePattern());
- out->push_back(Privilege(resource, actions));
- }
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ if (update) {
+ actions.addAction(ActionType::update);
+ }
+ if (upsert) {
+ actions.addAction(ActionType::insert);
+ }
+ if (remove) {
+ actions.addAction(ActionType::remove);
+ }
+ if (bypassDocumentValidation) {
+ actions.addAction(ActionType::bypassDocumentValidation);
+ }
+ ResourcePattern resource(commandTemplate->parseResourcePattern(dbname, cmdObj));
+ uassert(17137,
+ "Invalid target namespace " + resource.toString(),
+ resource.isExactNamespacePattern());
+ out->push_back(Privilege(resource, actions));
+}
-} // namespace find_and_modify
-} // namespace mongo
+} // namespace find_and_modify
+} // namespace mongo
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 0bc9589bbef..d40ff0f766e 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -55,313 +55,312 @@
namespace mongo {
- /**
- * A command for running .find() queries.
- */
- class FindCmd : public Command {
- MONGO_DISALLOW_COPYING(FindCmd);
- public:
- FindCmd() : Command("find") { }
+/**
+ * A command for running .find() queries.
+ */
+class FindCmd : public Command {
+ MONGO_DISALLOW_COPYING(FindCmd);
- bool isWriteCommandForConfigServer() const override { return false; }
+public:
+ FindCmd() : Command("find") {}
- bool slaveOk() const override { return false; }
+ bool isWriteCommandForConfigServer() const override {
+ return false;
+ }
- bool slaveOverrideOk() const override { return true; }
+ bool slaveOk() const override {
+ return false;
+ }
- bool maintenanceOk() const override { return false; }
+ bool slaveOverrideOk() const override {
+ return true;
+ }
- bool adminOnly() const override { return false; }
+ bool maintenanceOk() const override {
+ return false;
+ }
- void help(std::stringstream& help) const override {
- help << "query for documents";
- }
+ bool adminOnly() const override {
+ return false;
+ }
- /**
- * A find command does not increment the command counter, but rather increments the
- * query counter.
- */
- bool shouldAffectCommandCounter() const override { return false; }
+ void help(std::stringstream& help) const override {
+ help << "query for documents";
+ }
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) override {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
+ /**
+ * A find command does not increment the command counter, but rather increments the
+ * query counter.
+ */
+ bool shouldAffectCommandCounter() const override {
+ return false;
+ }
- if (authzSession->isAuthorizedForActionsOnResource(pattern, ActionType::find)) {
- return Status::OK();
- }
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) override {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
- return Status(ErrorCodes::Unauthorized, "unauthorized");
+ if (authzSession->isAuthorizedForActionsOnResource(pattern, ActionType::find)) {
+ return Status::OK();
}
- Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const override {
- const std::string fullns = parseNs(dbname, cmdObj);
- const NamespaceString nss(fullns);
- if (!nss.isValid()) {
- return {ErrorCodes::InvalidNamespace,
- str::stream() << "Invalid collection name: " << nss.ns()};
- }
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+ }
+
+ Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const override {
+ const std::string fullns = parseNs(dbname, cmdObj);
+ const NamespaceString nss(fullns);
+ if (!nss.isValid()) {
+ return {ErrorCodes::InvalidNamespace,
+ str::stream() << "Invalid collection name: " << nss.ns()};
+ }
- // Parse the command BSON to a LiteParsedQuery.
- const bool isExplain = true;
- auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- if (!lpqStatus.isOK()) {
- return lpqStatus.getStatus();
- }
+ // Parse the command BSON to a LiteParsedQuery.
+ const bool isExplain = true;
+ auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ if (!lpqStatus.isOK()) {
+ return lpqStatus.getStatus();
+ }
- // Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery.
- std::unique_ptr<CanonicalQuery> cq;
- {
- CanonicalQuery* rawCq;
- WhereCallbackReal whereCallback(txn, nss.db());
- Status canonStatus = CanonicalQuery::canonicalize(lpqStatus.getValue().release(),
- &rawCq,
- whereCallback);
- if (!canonStatus.isOK()) {
- return canonStatus;
- }
- cq.reset(rawCq);
+ // Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery.
+ std::unique_ptr<CanonicalQuery> cq;
+ {
+ CanonicalQuery* rawCq;
+ WhereCallbackReal whereCallback(txn, nss.db());
+ Status canonStatus =
+ CanonicalQuery::canonicalize(lpqStatus.getValue().release(), &rawCq, whereCallback);
+ if (!canonStatus.isOK()) {
+ return canonStatus;
}
+ cq.reset(rawCq);
+ }
- AutoGetCollectionForRead ctx(txn, nss);
- // The collection may be NULL. If so, getExecutor() should handle it by returning
- // an execution tree with an EOFStage.
- Collection* collection = ctx.getCollection();
-
- // We have a parsed query. Time to get the execution plan for it.
- std::unique_ptr<PlanExecutor> exec;
- {
- PlanExecutor* rawExec;
- Status execStatus = getExecutorFind(txn,
- collection,
- nss,
- cq.release(),
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- if (!execStatus.isOK()) {
- return execStatus;
- }
- exec.reset(rawExec);
+ AutoGetCollectionForRead ctx(txn, nss);
+ // The collection may be NULL. If so, getExecutor() should handle it by returning
+ // an execution tree with an EOFStage.
+ Collection* collection = ctx.getCollection();
+
+ // We have a parsed query. Time to get the execution plan for it.
+ std::unique_ptr<PlanExecutor> exec;
+ {
+ PlanExecutor* rawExec;
+ Status execStatus = getExecutorFind(
+ txn, collection, nss, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec);
+ if (!execStatus.isOK()) {
+ return execStatus;
}
+ exec.reset(rawExec);
+ }
- // Got the execution tree. Explain it.
- Explain::explainStages(exec.get(), verbosity, out);
- return Status::OK();
+ // Got the execution tree. Explain it.
+ Explain::explainStages(exec.get(), verbosity, out);
+ return Status::OK();
+ }
+
+ /**
+ * Runs a query using the following steps:
+ * 1) Parsing.
+ * 2) Acquire locks.
+ * 3) Plan query, obtaining an executor that can run it.
+ * 4) Setup a cursor for the query, which may be used on subsequent getMores.
+ * 5) Generate the first batch.
+ * 6) Save state for getMore.
+ * 7) Generate response to send to the client.
+ *
+ * TODO: Rather than using the sharding version available in thread-local storage
+ * (i.e. call to shardingState.needCollectionMetadata() below), shard version
+ * information should be passed as part of the command parameter.
+ */
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) override {
+ const std::string fullns = parseNs(dbname, cmdObj);
+ const NamespaceString nss(fullns);
+ if (!nss.isValid()) {
+ return appendCommandStatus(result,
+ {ErrorCodes::InvalidNamespace,
+ str::stream() << "Invalid collection name: " << nss.ns()});
}
- /**
- * Runs a query using the following steps:
- * 1) Parsing.
- * 2) Acquire locks.
- * 3) Plan query, obtaining an executor that can run it.
- * 4) Setup a cursor for the query, which may be used on subsequent getMores.
- * 5) Generate the first batch.
- * 6) Save state for getMore.
- * 7) Generate response to send to the client.
- *
- * TODO: Rather than using the sharding version available in thread-local storage
- * (i.e. call to shardingState.needCollectionMetadata() below), shard version
- * information should be passed as part of the command parameter.
- */
- bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) override {
- const std::string fullns = parseNs(dbname, cmdObj);
- const NamespaceString nss(fullns);
- if (!nss.isValid()) {
- return appendCommandStatus(result, {ErrorCodes::InvalidNamespace,
- str::stream() << "Invalid collection name: "
- << nss.ns()});
- }
+ // Although it is a command, a find command gets counted as a query.
+ globalOpCounters.gotQuery();
- // Although it is a command, a find command gets counted as a query.
- globalOpCounters.gotQuery();
+ if (txn->getClient()->isInDirectClient()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation, "Cannot run find command from eval()"));
+ }
- if (txn->getClient()->isInDirectClient()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "Cannot run find command from eval()"));
- }
+ // 1a) Parse the command BSON to a LiteParsedQuery.
+ const bool isExplain = false;
+ auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ if (!lpqStatus.isOK()) {
+ return appendCommandStatus(result, lpqStatus.getStatus());
+ }
- // 1a) Parse the command BSON to a LiteParsedQuery.
- const bool isExplain = false;
- auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- if (!lpqStatus.isOK()) {
- return appendCommandStatus(result, lpqStatus.getStatus());
- }
+ auto& lpq = lpqStatus.getValue();
- auto& lpq = lpqStatus.getValue();
-
- // Fill out curop information.
- int ntoreturn = lpq->getBatchSize().value_or(0);
- beginQueryOp(txn, nss, cmdObj, ntoreturn, lpq->getSkip());
-
- // 1b) Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery.
- std::unique_ptr<CanonicalQuery> cq;
- {
- CanonicalQuery* rawCq;
- WhereCallbackReal whereCallback(txn, nss.db());
- Status canonStatus = CanonicalQuery::canonicalize(lpq.release(),
- &rawCq,
- whereCallback);
- if (!canonStatus.isOK()) {
- return appendCommandStatus(result, canonStatus);
- }
- cq.reset(rawCq);
- }
+ // Fill out curop information.
+ int ntoreturn = lpq->getBatchSize().value_or(0);
+ beginQueryOp(txn, nss, cmdObj, ntoreturn, lpq->getSkip());
- // 2) Acquire locks.
- AutoGetCollectionForRead ctx(txn, nss);
- Collection* collection = ctx.getCollection();
-
- const int dbProfilingLevel = ctx.getDb() ? ctx.getDb()->getProfilingLevel() :
- serverGlobalParams.defaultProfile;
-
- // It is possible that the sharding version will change during yield while we are
- // retrieving a plan executor. If this happens we will throw an error and mongos will
- // retry.
- const ChunkVersion shardingVersionAtStart = shardingState.getVersion(nss.ns());
-
- // 3) Get the execution plan for the query.
- std::unique_ptr<PlanExecutor> execHolder;
- {
- PlanExecutor* rawExec;
- Status execStatus = getExecutorFind(txn,
- collection,
- nss,
- cq.release(),
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- if (!execStatus.isOK()) {
- return appendCommandStatus(result, execStatus);
- }
- execHolder.reset(rawExec);
+ // 1b) Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery.
+ std::unique_ptr<CanonicalQuery> cq;
+ {
+ CanonicalQuery* rawCq;
+ WhereCallbackReal whereCallback(txn, nss.db());
+ Status canonStatus = CanonicalQuery::canonicalize(lpq.release(), &rawCq, whereCallback);
+ if (!canonStatus.isOK()) {
+ return appendCommandStatus(result, canonStatus);
}
+ cq.reset(rawCq);
+ }
- // TODO: Currently, chunk ranges are kept around until all ClientCursors created while
- // the chunk belonged on this node are gone. Separating chunk lifetime management from
- // ClientCursor should allow this check to go away.
- if (!shardingState.getVersion(nss.ns()).isWriteCompatibleWith(shardingVersionAtStart)) {
- // Version changed while retrieving a PlanExecutor. Terminate the operation,
- // signaling that mongos should retry.
- throw SendStaleConfigException(nss.ns(),
- "version changed during find command",
- shardingVersionAtStart,
- shardingState.getVersion(nss.ns()));
+ // 2) Acquire locks.
+ AutoGetCollectionForRead ctx(txn, nss);
+ Collection* collection = ctx.getCollection();
+
+ const int dbProfilingLevel =
+ ctx.getDb() ? ctx.getDb()->getProfilingLevel() : serverGlobalParams.defaultProfile;
+
+ // It is possible that the sharding version will change during yield while we are
+ // retrieving a plan executor. If this happens we will throw an error and mongos will
+ // retry.
+ const ChunkVersion shardingVersionAtStart = shardingState.getVersion(nss.ns());
+
+ // 3) Get the execution plan for the query.
+ std::unique_ptr<PlanExecutor> execHolder;
+ {
+ PlanExecutor* rawExec;
+ Status execStatus = getExecutorFind(
+ txn, collection, nss, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec);
+ if (!execStatus.isOK()) {
+ return appendCommandStatus(result, execStatus);
}
+ execHolder.reset(rawExec);
+ }
- if (!collection) {
- // No collection. Just fill out curop indicating that there were zero results and
- // there is no ClientCursor id, and then return.
- const int numResults = 0;
- const CursorId cursorId = 0;
- endQueryOp(txn, execHolder.get(), dbProfilingLevel, numResults, cursorId);
- appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result);
- return true;
- }
+ // TODO: Currently, chunk ranges are kept around until all ClientCursors created while
+ // the chunk belonged on this node are gone. Separating chunk lifetime management from
+ // ClientCursor should allow this check to go away.
+ if (!shardingState.getVersion(nss.ns()).isWriteCompatibleWith(shardingVersionAtStart)) {
+ // Version changed while retrieving a PlanExecutor. Terminate the operation,
+ // signaling that mongos should retry.
+ throw SendStaleConfigException(nss.ns(),
+ "version changed during find command",
+ shardingVersionAtStart,
+ shardingState.getVersion(nss.ns()));
+ }
- const LiteParsedQuery& pq = execHolder->getCanonicalQuery()->getParsed();
-
- // 4) If possible, register the execution plan inside a ClientCursor, and pin that
- // cursor. In this case, ownership of the PlanExecutor is transferred to the
- // ClientCursor, and 'exec' becomes null.
- //
- // First unregister the PlanExecutor so it can be re-registered with ClientCursor.
- execHolder->deregisterExec();
-
- // Create a ClientCursor containing this plan executor. We don't have to worry
- // about leaking it as it's inserted into a global map by its ctor.
- ClientCursor* cursor = new ClientCursor(collection->getCursorManager(),
- execHolder.release(),
- nss.ns(),
- pq.getOptions(),
- pq.getFilter());
- CursorId cursorId = cursor->cursorid();
- ClientCursorPin ccPin(collection->getCursorManager(), cursorId);
-
- // On early return, get rid of the the cursor.
- ScopeGuard cursorFreer = MakeGuard(&ClientCursorPin::deleteUnderlying, ccPin);
-
- invariant(!execHolder);
- PlanExecutor* exec = cursor->getExecutor();
-
- // 5) Stream query results, adding them to a BSONArray as we go.
- BSONArrayBuilder firstBatch;
- BSONObj obj;
- PlanExecutor::ExecState state;
- int numResults = 0;
- while (!enoughForFirstBatch(pq, numResults, firstBatch.len())
- && PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- // If adding this object will cause us to exceed the BSON size limit, then we stash
- // it for later.
- if (firstBatch.len() + obj.objsize() > BSONObjMaxUserSize && numResults > 0) {
- exec->enqueue(obj);
- break;
- }
-
- // Add result to output buffer.
- firstBatch.append(obj);
- numResults++;
- }
+ if (!collection) {
+ // No collection. Just fill out curop indicating that there were zero results and
+ // there is no ClientCursor id, and then return.
+ const int numResults = 0;
+ const CursorId cursorId = 0;
+ endQueryOp(txn, execHolder.get(), dbProfilingLevel, numResults, cursorId);
+ appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result);
+ return true;
+ }
- // Throw an assertion if query execution fails for any reason.
- if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- error() << "Plan executor error during find command: "
- << PlanExecutor::statestr(state)
- << ", stats: " << Explain::statsToBSON(*stats);
-
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during find command: "
- << WorkingSetCommon::toStatusString(obj)));
+ const LiteParsedQuery& pq = execHolder->getCanonicalQuery()->getParsed();
+
+ // 4) If possible, register the execution plan inside a ClientCursor, and pin that
+ // cursor. In this case, ownership of the PlanExecutor is transferred to the
+ // ClientCursor, and 'exec' becomes null.
+ //
+ // First unregister the PlanExecutor so it can be re-registered with ClientCursor.
+ execHolder->deregisterExec();
+
+ // Create a ClientCursor containing this plan executor. We don't have to worry
+ // about leaking it as it's inserted into a global map by its ctor.
+ ClientCursor* cursor = new ClientCursor(collection->getCursorManager(),
+ execHolder.release(),
+ nss.ns(),
+ pq.getOptions(),
+ pq.getFilter());
+ CursorId cursorId = cursor->cursorid();
+ ClientCursorPin ccPin(collection->getCursorManager(), cursorId);
+
+ // On early return, get rid of the the cursor.
+ ScopeGuard cursorFreer = MakeGuard(&ClientCursorPin::deleteUnderlying, ccPin);
+
+ invariant(!execHolder);
+ PlanExecutor* exec = cursor->getExecutor();
+
+ // 5) Stream query results, adding them to a BSONArray as we go.
+ BSONArrayBuilder firstBatch;
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ int numResults = 0;
+ while (!enoughForFirstBatch(pq, numResults, firstBatch.len()) &&
+ PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ // If adding this object will cause us to exceed the BSON size limit, then we stash
+ // it for later.
+ if (firstBatch.len() + obj.objsize() > BSONObjMaxUserSize && numResults > 0) {
+ exec->enqueue(obj);
+ break;
}
- // 6) Set up the cursor for getMore.
- if (shouldSaveCursor(txn, collection, state, exec)) {
- // State will be restored on getMore.
- exec->saveState();
-
- cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
- cursor->setPos(numResults);
-
- // Don't stash the RU for tailable cursors at EOF, let them get a new RU on their
- // next getMore.
- if (!(pq.isTailable() && state == PlanExecutor::IS_EOF)) {
- // We stash away the RecoveryUnit in the ClientCursor. It's used for
- // subsequent getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
- txn->recoveryUnit()->abandonSnapshot();
- cursor->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
- StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
- txn->setRecoveryUnit(engine->newRecoveryUnit(),
- OperationContext::kNotInUnitOfWork);
- }
- }
- else {
- cursorId = 0;
- }
+ // Add result to output buffer.
+ firstBatch.append(obj);
+ numResults++;
+ }
- // Fill out curop based on the results.
- endQueryOp(txn, exec, dbProfilingLevel, numResults, cursorId);
+ // Throw an assertion if query execution fails for any reason.
+ if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ error() << "Plan executor error during find command: " << PlanExecutor::statestr(state)
+ << ", stats: " << Explain::statsToBSON(*stats);
+
+ return appendCommandStatus(result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream()
+ << "Executor error during find command: "
+ << WorkingSetCommon::toStatusString(obj)));
+ }
- // 7) Generate the response object to send to the client.
- appendCursorResponseObject(cursorId, nss.ns(), firstBatch.arr(), &result);
- if (cursorId) {
- cursorFreer.Dismiss();
+ // 6) Set up the cursor for getMore.
+ if (shouldSaveCursor(txn, collection, state, exec)) {
+ // State will be restored on getMore.
+ exec->saveState();
+
+ cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
+ cursor->setPos(numResults);
+
+ // Don't stash the RU for tailable cursors at EOF, let them get a new RU on their
+ // next getMore.
+ if (!(pq.isTailable() && state == PlanExecutor::IS_EOF)) {
+ // We stash away the RecoveryUnit in the ClientCursor. It's used for
+ // subsequent getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
+ txn->recoveryUnit()->abandonSnapshot();
+ cursor->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
+ StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
+ txn->setRecoveryUnit(engine->newRecoveryUnit(), OperationContext::kNotInUnitOfWork);
}
- return true;
+ } else {
+ cursorId = 0;
+ }
+
+ // Fill out curop based on the results.
+ endQueryOp(txn, exec, dbProfilingLevel, numResults, cursorId);
+
+ // 7) Generate the response object to send to the client.
+ appendCursorResponseObject(cursorId, nss.ns(), firstBatch.arr(), &result);
+ if (cursorId) {
+ cursorFreer.Dismiss();
}
+ return true;
+ }
- } findCmd;
+} findCmd;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index aa3b8c9855a..348637d062a 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -57,232 +57,245 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::stringstream;
-
- class FSyncLockThread : public BackgroundJob {
- void doRealWork();
- public:
- FSyncLockThread() : BackgroundJob( true ) {}
- virtual ~FSyncLockThread(){}
- virtual string name() const { return "FSyncLockThread"; }
- virtual void run() {
- Client::initThread( "fsyncLockWorker" );
- try {
- doRealWork();
- }
- catch ( std::exception& e ) {
- error() << "FSyncLockThread exception: " << e.what() << endl;
- }
+using std::endl;
+using std::string;
+using std::stringstream;
+
+class FSyncLockThread : public BackgroundJob {
+ void doRealWork();
+
+public:
+ FSyncLockThread() : BackgroundJob(true) {}
+ virtual ~FSyncLockThread() {}
+ virtual string name() const {
+ return "FSyncLockThread";
+ }
+ virtual void run() {
+ Client::initThread("fsyncLockWorker");
+ try {
+ doRealWork();
+ } catch (std::exception& e) {
+ error() << "FSyncLockThread exception: " << e.what() << endl;
}
- };
-
- /* see unlockFsync() for unlocking:
- db.$cmd.sys.unlock.findOne()
- */
- class FSyncCommand : public Command {
- public:
- static const char* url() { return "http://dochub.mongodb.org/core/fsynccommand"; }
- bool locked;
- bool pendingUnlock;
- SimpleMutex m; // protects locked var above
- string err;
-
- stdx::condition_variable_any _threadSync;
- stdx::condition_variable_any _unlockSync;
-
- FSyncCommand() : Command( "fsync" ) { locked=false; pendingUnlock=false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual void help(stringstream& h) const { h << url(); }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::fsync);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+};
+
+/* see unlockFsync() for unlocking:
+ db.$cmd.sys.unlock.findOne()
+*/
+class FSyncCommand : public Command {
+public:
+ static const char* url() {
+ return "http://dochub.mongodb.org/core/fsynccommand";
+ }
+ bool locked;
+ bool pendingUnlock;
+ SimpleMutex m; // protects locked var above
+ string err;
+
+ stdx::condition_variable_any _threadSync;
+ stdx::condition_variable_any _unlockSync;
+
+ FSyncCommand() : Command("fsync") {
+ locked = false;
+ pendingUnlock = false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual void help(stringstream& h) const {
+ h << url();
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::fsync);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ if (txn->lockState()->isLocked()) {
+ errmsg = "fsync: Cannot execute fsync command from contexts that hold a data lock";
+ return false;
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- if (txn->lockState()->isLocked()) {
- errmsg = "fsync: Cannot execute fsync command from contexts that hold a data lock";
+
+ bool sync =
+ !cmdObj["async"].trueValue(); // async means do an fsync, but return immediately
+ bool lock = cmdObj["lock"].trueValue();
+ log() << "CMD fsync: sync:" << sync << " lock:" << lock << endl;
+ if (lock) {
+ if (!sync) {
+ errmsg = "fsync: sync option must be true when using lock";
return false;
}
- bool sync = !cmdObj["async"].trueValue(); // async means do an fsync, but return immediately
- bool lock = cmdObj["lock"].trueValue();
- log() << "CMD fsync: sync:" << sync << " lock:" << lock << endl;
- if( lock ) {
- if ( ! sync ) {
- errmsg = "fsync: sync option must be true when using lock";
- return false;
- }
-
- stdx::lock_guard<SimpleMutex> lk(m);
- err = "";
-
- (new FSyncLockThread())->go();
- while ( ! locked && err.size() == 0 ) {
- _threadSync.wait( m );
- }
-
- if ( err.size() ){
- errmsg = err;
- return false;
- }
-
- log() << "db is now locked, no writes allowed. db.fsyncUnlock() to unlock" << endl;
- log() << " For more info see " << FSyncCommand::url() << endl;
- result.append("info", "now locked against writes, use db.fsyncUnlock() to unlock");
- result.append("seeAlso", FSyncCommand::url());
+ stdx::lock_guard<SimpleMutex> lk(m);
+ err = "";
+ (new FSyncLockThread())->go();
+ while (!locked && err.size() == 0) {
+ _threadSync.wait(m);
}
- else {
- // the simple fsync command case
- if (sync) {
- // can this be GlobalRead? and if it can, it should be nongreedy.
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite w(txn->lockState());
- getDur().commitNow(txn);
-
- // No WriteUnitOfWork needed, as this does no writes of its own.
- }
-
- // Take a global IS lock to ensure the storage engine is not shutdown
- Lock::GlobalLock global(txn->lockState(), MODE_IS, UINT_MAX);
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- result.append( "numFiles" , storageEngine->flushAllFiles( sync ) );
- }
- return 1;
- }
- } fsyncCmd;
- namespace {
- bool unlockFsync();
- } // namespace
+ if (err.size()) {
+ errmsg = err;
+ return false;
+ }
- class FSyncUnlockCommand : public Command {
- public:
+ log() << "db is now locked, no writes allowed. db.fsyncUnlock() to unlock" << endl;
+ log() << " For more info see " << FSyncCommand::url() << endl;
+ result.append("info", "now locked against writes, use db.fsyncUnlock() to unlock");
+ result.append("seeAlso", FSyncCommand::url());
- FSyncUnlockCommand() : Command("fsyncUnlock") {}
+ } else {
+ // the simple fsync command case
+ if (sync) {
+ // can this be GlobalRead? and if it can, it should be nongreedy.
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite w(txn->lockState());
+ getDur().commitNow(txn);
- bool isWriteCommandForConfigServer() const override { return false; }
+ // No WriteUnitOfWork needed, as this does no writes of its own.
+ }
- bool slaveOk() const override { return true; }
+ // Take a global IS lock to ensure the storage engine is not shutdown
+ Lock::GlobalLock global(txn->lockState(), MODE_IS, UINT_MAX);
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ result.append("numFiles", storageEngine->flushAllFiles(sync));
+ }
+ return 1;
+ }
+} fsyncCmd;
- bool adminOnly() const override { return true; }
+namespace {
+bool unlockFsync();
+} // namespace
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) override {
+class FSyncUnlockCommand : public Command {
+public:
+ FSyncUnlockCommand() : Command("fsyncUnlock") {}
- bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(),
- ActionType::unlock);
+ bool isWriteCommandForConfigServer() const override {
+ return false;
+ }
- return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
+ bool slaveOk() const override {
+ return true;
+ }
- bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) override {
+ bool adminOnly() const override {
+ return true;
+ }
- log() << "command: unlock requested";
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) override {
+ bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::unlock);
- if (unlockFsync()) {
- result.append("info", "unlock completed");
- return true;
- }
- else {
- errmsg = "not locked";
- return false;
- }
- }
+ return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
- } unlockFsyncCmd;
+ bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) override {
+ log() << "command: unlock requested";
- SimpleMutex filesLockedFsync;
+ if (unlockFsync()) {
+ result.append("info", "unlock completed");
+ return true;
+ } else {
+ errmsg = "not locked";
+ return false;
+ }
+ }
- void FSyncLockThread::doRealWork() {
- stdx::lock_guard<SimpleMutex> lkf(filesLockedFsync);
+} unlockFsyncCmd;
- OperationContextImpl txn;
- ScopedTransaction transaction(&txn, MODE_X);
- Lock::GlobalWrite global(txn.lockState()); // No WriteUnitOfWork needed
+SimpleMutex filesLockedFsync;
- stdx::lock_guard<SimpleMutex> lk(fsyncCmd.m);
+void FSyncLockThread::doRealWork() {
+ stdx::lock_guard<SimpleMutex> lkf(filesLockedFsync);
- invariant(!fsyncCmd.locked); // impossible to get here if locked is true
- try {
- getDur().syncDataAndTruncateJournal(&txn);
- }
- catch( std::exception& e ) {
- error() << "error doing syncDataAndTruncateJournal: " << e.what() << endl;
- fsyncCmd.err = e.what();
- fsyncCmd._threadSync.notify_one();
- fsyncCmd.locked = false;
- return;
- }
+ OperationContextImpl txn;
+ ScopedTransaction transaction(&txn, MODE_X);
+ Lock::GlobalWrite global(txn.lockState()); // No WriteUnitOfWork needed
- txn.lockState()->downgradeGlobalXtoSForMMAPV1();
+ stdx::lock_guard<SimpleMutex> lk(fsyncCmd.m);
- try {
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->flushAllFiles(true);
- }
- catch( std::exception& e ) {
- error() << "error doing flushAll: " << e.what() << endl;
- fsyncCmd.err = e.what();
- fsyncCmd._threadSync.notify_one();
- fsyncCmd.locked = false;
- return;
- }
+ invariant(!fsyncCmd.locked); // impossible to get here if locked is true
+ try {
+ getDur().syncDataAndTruncateJournal(&txn);
+ } catch (std::exception& e) {
+ error() << "error doing syncDataAndTruncateJournal: " << e.what() << endl;
+ fsyncCmd.err = e.what();
+ fsyncCmd._threadSync.notify_one();
+ fsyncCmd.locked = false;
+ return;
+ }
- invariant(!fsyncCmd.locked);
- fsyncCmd.locked = true;
+ txn.lockState()->downgradeGlobalXtoSForMMAPV1();
+ try {
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ storageEngine->flushAllFiles(true);
+ } catch (std::exception& e) {
+ error() << "error doing flushAll: " << e.what() << endl;
+ fsyncCmd.err = e.what();
fsyncCmd._threadSync.notify_one();
+ fsyncCmd.locked = false;
+ return;
+ }
- while ( ! fsyncCmd.pendingUnlock ) {
- fsyncCmd._unlockSync.wait(fsyncCmd.m);
- }
- fsyncCmd.pendingUnlock = false;
+ invariant(!fsyncCmd.locked);
+ fsyncCmd.locked = true;
- fsyncCmd.locked = false;
- fsyncCmd.err = "unlocked";
+ fsyncCmd._threadSync.notify_one();
- fsyncCmd._unlockSync.notify_one();
+ while (!fsyncCmd.pendingUnlock) {
+ fsyncCmd._unlockSync.wait(fsyncCmd.m);
}
+ fsyncCmd.pendingUnlock = false;
+
+ fsyncCmd.locked = false;
+ fsyncCmd.err = "unlocked";
+
+ fsyncCmd._unlockSync.notify_one();
+}
+
+bool lockedForWriting() {
+ return fsyncCmd.locked;
+}
- bool lockedForWriting() {
- return fsyncCmd.locked;
+namespace {
+// @return true if unlocked
+bool unlockFsync() {
+ stdx::lock_guard<SimpleMutex> lk(fsyncCmd.m);
+ if (!fsyncCmd.locked) {
+ return false;
}
-
- namespace {
- // @return true if unlocked
- bool unlockFsync() {
- stdx::lock_guard<SimpleMutex> lk( fsyncCmd.m );
- if( !fsyncCmd.locked ) {
- return false;
- }
- fsyncCmd.pendingUnlock = true;
- fsyncCmd._unlockSync.notify_one();
- fsyncCmd._threadSync.notify_one();
+ fsyncCmd.pendingUnlock = true;
+ fsyncCmd._unlockSync.notify_one();
+ fsyncCmd._threadSync.notify_one();
- while ( fsyncCmd.locked ) {
- fsyncCmd._unlockSync.wait( fsyncCmd.m );
- }
- return true;
- }
- } // namespace
+ while (fsyncCmd.locked) {
+ fsyncCmd._unlockSync.wait(fsyncCmd.m);
+ }
+ return true;
+}
+} // namespace
}
diff --git a/src/mongo/db/commands/fsync.h b/src/mongo/db/commands/fsync.h
index 4072a1f6e50..1442a627d00 100644
--- a/src/mongo/db/commands/fsync.h
+++ b/src/mongo/db/commands/fsync.h
@@ -31,7 +31,7 @@
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
- // Use this for blocking during an fsync-and-lock
- extern SimpleMutex filesLockedFsync;
- bool lockedForWriting();
+// Use this for blocking during an fsync-and-lock
+extern SimpleMutex filesLockedFsync;
+bool lockedForWriting();
} // namespace mongo
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index 6f44749925d..5be50f433f6 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -53,271 +53,276 @@
namespace mongo {
- using std::unique_ptr;
- using std::stringstream;
-
- class Geo2dFindNearCmd : public Command {
- public:
- Geo2dFindNearCmd() : Command("geoNear") {}
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
- bool slaveOk() const { return true; }
- bool slaveOverrideOk() const { return true; }
-
- void help(stringstream& h) const {
- h << "http://dochub.mongodb.org/core/geo#GeospatialIndexing-geoNearCommand";
- }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+using std::unique_ptr;
+using std::stringstream;
+
+class Geo2dFindNearCmd : public Command {
+public:
+ Geo2dFindNearCmd() : Command("geoNear") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ bool slaveOk() const {
+ return true;
+ }
+ bool slaveOverrideOk() const {
+ return true;
+ }
+
+ void help(stringstream& h) const {
+ h << "http://dochub.mongodb.org/core/geo#GeospatialIndexing-geoNearCommand";
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ if (!cmdObj["start"].eoo()) {
+ errmsg = "using deprecated 'start' argument to geoNear";
+ return false;
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- if (!cmdObj["start"].eoo()) {
- errmsg = "using deprecated 'start' argument to geoNear";
- return false;
- }
-
- const NamespaceString nss(parseNs(dbname, cmdObj));
- AutoGetCollectionForRead ctx(txn, nss);
+ const NamespaceString nss(parseNs(dbname, cmdObj));
+ AutoGetCollectionForRead ctx(txn, nss);
- Collection* collection = ctx.getCollection();
- if ( !collection ) {
- errmsg = "can't find ns";
- return false;
- }
-
- IndexCatalog* indexCatalog = collection->getIndexCatalog();
-
- // cout << "raw cmd " << cmdObj.toString() << endl;
-
- // We seek to populate this.
- string nearFieldName;
- bool using2DIndex = false;
- if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
- return false;
- }
-
- PointWithCRS point;
- uassert(17304, "'near' field must be point",
- GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK());
-
- bool isSpherical = cmdObj["spherical"].trueValue();
- if (!using2DIndex) {
- uassert(17301, "2dsphere index must have spherical: true", isSpherical);
- }
-
- // Build the $near expression for the query.
- BSONObjBuilder nearBob;
- if (isSpherical) {
- nearBob.append("$nearSphere", cmdObj["near"].Obj());
- }
- else {
- nearBob.append("$near", cmdObj["near"].Obj());
- }
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ errmsg = "can't find ns";
+ return false;
+ }
- if (!cmdObj["maxDistance"].eoo()) {
- uassert(17299, "maxDistance must be a number",cmdObj["maxDistance"].isNumber());
- nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
- }
+ IndexCatalog* indexCatalog = collection->getIndexCatalog();
- if (!cmdObj["minDistance"].eoo()) {
- uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
- uassert(17300, "minDistance must be a number",cmdObj["minDistance"].isNumber());
- nearBob.append("$minDistance", cmdObj["minDistance"].number());
- }
+ // cout << "raw cmd " << cmdObj.toString() << endl;
- if (!cmdObj["uniqueDocs"].eoo()) {
- warning() << nss << ": ignoring deprecated uniqueDocs option in geoNear command";
- }
+ // We seek to populate this.
+ string nearFieldName;
+ bool using2DIndex = false;
+ if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
+ return false;
+ }
- // And, build the full query expression.
- BSONObjBuilder queryBob;
- queryBob.append(nearFieldName, nearBob.obj());
- if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
- queryBob.appendElements(cmdObj["query"].Obj());
- }
- BSONObj rewritten = queryBob.obj();
+ PointWithCRS point;
+ uassert(17304,
+ "'near' field must be point",
+ GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK());
- // cout << "rewritten query: " << rewritten.toString() << endl;
+ bool isSpherical = cmdObj["spherical"].trueValue();
+ if (!using2DIndex) {
+ uassert(17301, "2dsphere index must have spherical: true", isSpherical);
+ }
- long long numWanted = 100;
- const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
- BSONElement eNumWanted = cmdObj[limitName];
- if (!eNumWanted.eoo()) {
- uassert(17303, "limit must be number", eNumWanted.isNumber());
- numWanted = eNumWanted.safeNumberLong();
- uassert(17302, "limit must be >=0", numWanted >= 0);
- }
+ // Build the $near expression for the query.
+ BSONObjBuilder nearBob;
+ if (isSpherical) {
+ nearBob.append("$nearSphere", cmdObj["near"].Obj());
+ } else {
+ nearBob.append("$near", cmdObj["near"].Obj());
+ }
- bool includeLocs = false;
- if (!cmdObj["includeLocs"].eoo()) {
- includeLocs = cmdObj["includeLocs"].trueValue();
- }
+ if (!cmdObj["maxDistance"].eoo()) {
+ uassert(17299, "maxDistance must be a number", cmdObj["maxDistance"].isNumber());
+ nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
+ }
- double distanceMultiplier = 1.0;
- BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"];
- if (!eDistanceMultiplier.eoo()) {
- uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
- distanceMultiplier = eDistanceMultiplier.number();
- uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
- }
+ if (!cmdObj["minDistance"].eoo()) {
+ uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
+ uassert(17300, "minDistance must be a number", cmdObj["minDistance"].isNumber());
+ nearBob.append("$minDistance", cmdObj["minDistance"].number());
+ }
- BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) <<
- "$dis" << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));
-
- CanonicalQuery* cq;
- const WhereCallbackReal whereCallback(txn, nss.db());
-
- if (!CanonicalQuery::canonicalize(nss,
- rewritten,
- BSONObj(),
- projObj,
- 0,
- numWanted,
- BSONObj(),
- &cq,
- whereCallback).isOK()) {
- errmsg = "Can't parse filter / create query";
- return false;
- }
+ if (!cmdObj["uniqueDocs"].eoo()) {
+ warning() << nss << ": ignoring deprecated uniqueDocs option in geoNear command";
+ }
- // Prevent chunks from being cleaned up during yields - this allows us to only check the
- // version on initial entry into geoNear.
- RangePreserver preserver(collection);
+ // And, build the full query expression.
+ BSONObjBuilder queryBob;
+ queryBob.append(nearFieldName, nearBob.obj());
+ if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
+ queryBob.appendElements(cmdObj["query"].Obj());
+ }
+ BSONObj rewritten = queryBob.obj();
- PlanExecutor* rawExec;
- if (!getExecutor(txn, collection, cq, PlanExecutor::YIELD_AUTO, &rawExec, 0).isOK()) {
- errmsg = "can't get query executor";
- return false;
- }
+ // cout << "rewritten query: " << rewritten.toString() << endl;
- unique_ptr<PlanExecutor> exec(rawExec);
-
- double totalDistance = 0;
- BSONObjBuilder resultBuilder(result.subarrayStart("results"));
- double farthestDist = 0;
-
- BSONObj currObj;
- long long results = 0;
- while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) {
-
- // Come up with the correct distance.
- double dist = currObj["$dis"].number() * distanceMultiplier;
- totalDistance += dist;
- if (dist > farthestDist) { farthestDist = dist; }
-
- // Strip out '$dis' and '$pt' from the result obj. The rest gets added as 'obj'
- // in the command result.
- BSONObjIterator resIt(currObj);
- BSONObjBuilder resBob;
- while (resIt.more()) {
- BSONElement elt = resIt.next();
- if (!mongoutils::str::equals("$pt", elt.fieldName())
- && !mongoutils::str::equals("$dis", elt.fieldName())) {
- resBob.append(elt);
- }
- }
- BSONObj resObj = resBob.obj();
+ long long numWanted = 100;
+ const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
+ BSONElement eNumWanted = cmdObj[limitName];
+ if (!eNumWanted.eoo()) {
+ uassert(17303, "limit must be number", eNumWanted.isNumber());
+ numWanted = eNumWanted.safeNumberLong();
+ uassert(17302, "limit must be >=0", numWanted >= 0);
+ }
- // Don't make a too-big result object.
- if (resultBuilder.len() + resObj.objsize()> BSONObjMaxUserSize) {
- warning() << "Too many geoNear results for query " << rewritten.toString()
- << ", truncating output.";
- break;
- }
+ bool includeLocs = false;
+ if (!cmdObj["includeLocs"].eoo()) {
+ includeLocs = cmdObj["includeLocs"].trueValue();
+ }
- // Add the next result to the result builder.
- BSONObjBuilder oneResultBuilder(
- resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
- oneResultBuilder.append("dis", dist);
- if (includeLocs) {
- oneResultBuilder.appendAs(currObj["$pt"], "loc");
- }
- oneResultBuilder.append("obj", resObj);
- oneResultBuilder.done();
- ++results;
- }
+ double distanceMultiplier = 1.0;
+ BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"];
+ if (!eDistanceMultiplier.eoo()) {
+ uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
+ distanceMultiplier = eDistanceMultiplier.number();
+ uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
+ }
- resultBuilder.done();
+ BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) << "$dis"
+ << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));
- // Fill out the stats subobj.
- BSONObjBuilder stats(result.subobjStart("stats"));
+ CanonicalQuery* cq;
+ const WhereCallbackReal whereCallback(txn, nss.db());
- // Fill in nscanned from the explain.
- PlanSummaryStats summary;
- Explain::getSummaryStats(exec.get(), &summary);
- stats.appendNumber("nscanned", summary.totalKeysExamined);
- stats.appendNumber("objectsLoaded", summary.totalDocsExamined);
+ if (!CanonicalQuery::canonicalize(
+ nss, rewritten, BSONObj(), projObj, 0, numWanted, BSONObj(), &cq, whereCallback)
+ .isOK()) {
+ errmsg = "Can't parse filter / create query";
+ return false;
+ }
- stats.append("avgDistance", totalDistance / results);
- stats.append("maxDistance", farthestDist);
- stats.append("time", CurOp::get(txn)->elapsedMillis());
- stats.done();
+ // Prevent chunks from being cleaned up during yields - this allows us to only check the
+ // version on initial entry into geoNear.
+ RangePreserver preserver(collection);
- return true;
+ PlanExecutor* rawExec;
+ if (!getExecutor(txn, collection, cq, PlanExecutor::YIELD_AUTO, &rawExec, 0).isOK()) {
+ errmsg = "can't get query executor";
+ return false;
}
- private:
- bool getFieldName(OperationContext* txn, Collection* collection, IndexCatalog* indexCatalog,
- string* fieldOut, string* errOut, bool *isFrom2D) {
- vector<IndexDescriptor*> idxs;
+ unique_ptr<PlanExecutor> exec(rawExec);
- // First, try 2d.
- collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2D, idxs);
- if (idxs.size() > 1) {
- *errOut = "more than one 2d index, not sure which to run geoNear on";
- return false;
+ double totalDistance = 0;
+ BSONObjBuilder resultBuilder(result.subarrayStart("results"));
+ double farthestDist = 0;
+
+ BSONObj currObj;
+ long long results = 0;
+ while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) {
+ // Come up with the correct distance.
+ double dist = currObj["$dis"].number() * distanceMultiplier;
+ totalDistance += dist;
+ if (dist > farthestDist) {
+ farthestDist = dist;
}
- if (1 == idxs.size()) {
- BSONObj indexKp = idxs[0]->keyPattern();
- BSONObjIterator kpIt(indexKp);
- while (kpIt.more()) {
- BSONElement elt = kpIt.next();
- if (String == elt.type() && IndexNames::GEO_2D == elt.valuestr()) {
- *fieldOut = elt.fieldName();
- *isFrom2D = true;
- return true;
- }
+ // Strip out '$dis' and '$pt' from the result obj. The rest gets added as 'obj'
+ // in the command result.
+ BSONObjIterator resIt(currObj);
+ BSONObjBuilder resBob;
+ while (resIt.more()) {
+ BSONElement elt = resIt.next();
+ if (!mongoutils::str::equals("$pt", elt.fieldName()) &&
+ !mongoutils::str::equals("$dis", elt.fieldName())) {
+ resBob.append(elt);
}
}
+ BSONObj resObj = resBob.obj();
- // Next, 2dsphere.
- idxs.clear();
- collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2DSPHERE, idxs);
- if (0 == idxs.size()) {
- *errOut = "no geo indices for geoNear";
- return false;
+ // Don't make a too-big result object.
+ if (resultBuilder.len() + resObj.objsize() > BSONObjMaxUserSize) {
+ warning() << "Too many geoNear results for query " << rewritten.toString()
+ << ", truncating output.";
+ break;
}
- if (idxs.size() > 1) {
- *errOut = "more than one 2dsphere index, not sure which to run geoNear on";
- return false;
+ // Add the next result to the result builder.
+ BSONObjBuilder oneResultBuilder(
+ resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
+ oneResultBuilder.append("dis", dist);
+ if (includeLocs) {
+ oneResultBuilder.appendAs(currObj["$pt"], "loc");
}
+ oneResultBuilder.append("obj", resObj);
+ oneResultBuilder.done();
+ ++results;
+ }
- // 1 == idx.size()
+ resultBuilder.done();
+
+ // Fill out the stats subobj.
+ BSONObjBuilder stats(result.subobjStart("stats"));
+
+ // Fill in nscanned from the explain.
+ PlanSummaryStats summary;
+ Explain::getSummaryStats(exec.get(), &summary);
+ stats.appendNumber("nscanned", summary.totalKeysExamined);
+ stats.appendNumber("objectsLoaded", summary.totalDocsExamined);
+
+ stats.append("avgDistance", totalDistance / results);
+ stats.append("maxDistance", farthestDist);
+ stats.append("time", CurOp::get(txn)->elapsedMillis());
+ stats.done();
+
+ return true;
+ }
+
+private:
+ bool getFieldName(OperationContext* txn,
+ Collection* collection,
+ IndexCatalog* indexCatalog,
+ string* fieldOut,
+ string* errOut,
+ bool* isFrom2D) {
+ vector<IndexDescriptor*> idxs;
+
+ // First, try 2d.
+ collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2D, idxs);
+ if (idxs.size() > 1) {
+ *errOut = "more than one 2d index, not sure which to run geoNear on";
+ return false;
+ }
+
+ if (1 == idxs.size()) {
BSONObj indexKp = idxs[0]->keyPattern();
BSONObjIterator kpIt(indexKp);
while (kpIt.more()) {
BSONElement elt = kpIt.next();
- if (String == elt.type() && IndexNames::GEO_2DSPHERE == elt.valuestr()) {
+ if (String == elt.type() && IndexNames::GEO_2D == elt.valuestr()) {
*fieldOut = elt.fieldName();
- *isFrom2D = false;
+ *isFrom2D = true;
return true;
}
}
+ }
+ // Next, 2dsphere.
+ idxs.clear();
+ collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2DSPHERE, idxs);
+ if (0 == idxs.size()) {
+ *errOut = "no geo indices for geoNear";
return false;
}
- } geo2dFindNearCmd;
+
+ if (idxs.size() > 1) {
+ *errOut = "more than one 2dsphere index, not sure which to run geoNear on";
+ return false;
+ }
+
+ // 1 == idx.size()
+ BSONObj indexKp = idxs[0]->keyPattern();
+ BSONObjIterator kpIt(indexKp);
+ while (kpIt.more()) {
+ BSONElement elt = kpIt.next();
+ if (String == elt.type() && IndexNames::GEO_2DSPHERE == elt.valuestr()) {
+ *fieldOut = elt.fieldName();
+ *isFrom2D = false;
+ return true;
+ }
+ }
+
+ return false;
+ }
+} geo2dFindNearCmd;
} // namespace mongo
diff --git a/src/mongo/db/commands/get_last_error.cpp b/src/mongo/db/commands/get_last_error.cpp
index ac80e1de823..2e5cd625086 100644
--- a/src/mongo/db/commands/get_last_error.cpp
+++ b/src/mongo/db/commands/get_last_error.cpp
@@ -44,264 +44,267 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- /* reset any errors so that getlasterror comes back clean.
-
- useful before performing a long series of operations where we want to
- see if any of the operations triggered an error, but don't want to check
- after each op as that woudl be a client/server turnaround.
- */
- class CmdResetError : public Command {
- public:
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const {
- return true;
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- virtual void help( stringstream& help ) const {
- help << "reset error state (used with getpreverror)";
+using std::string;
+using std::stringstream;
+
+/* reset any errors so that getlasterror comes back clean.
+
+ useful before performing a long series of operations where we want to
+ see if any of the operations triggered an error, but don't want to check
+ after each op as that woudl be a client/server turnaround.
+*/
+class CmdResetError : public Command {
+public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ virtual void help(stringstream& help) const {
+ help << "reset error state (used with getpreverror)";
+ }
+ CmdResetError() : Command("resetError", false, "reseterror") {}
+ bool run(OperationContext* txn,
+ const string& db,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ LastError::get(txn->getClient()).reset();
+ return true;
+ }
+} cmdResetError;
+
+class CmdGetLastError : public Command {
+public:
+ CmdGetLastError() : Command("getLastError", false, "getlasterror") {}
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ virtual void help(stringstream& help) const {
+ LastError::get(cc()).disable(); // SERVER-11492
+ help << "return error status of the last operation on this connection\n"
+ << "options:\n"
+ << " { fsync:true } - fsync before returning, or wait for journal commit if running "
+ "with --journal\n"
+ << " { j:true } - wait for journal commit if running with --journal\n"
+ << " { w:n } - await replication to n servers (including self) before returning\n"
+ << " { w:'majority' } - await replication to majority of set\n"
+ << " { wtimeout:m} - timeout for w in m milliseconds";
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ //
+ // Correct behavior here is very finicky.
+ //
+ // 1. The first step is to append the error that occurred on the previous operation.
+ // This adds an "err" field to the command, which is *not* the command failing.
+ //
+ // 2. Next we parse and validate write concern options. If these options are invalid
+ // the command fails no matter what, even if we actually had an error earlier. The
+ // reason for checking here is to match legacy behavior on these kind of failures -
+ // we'll still get an "err" field for the write error.
+ //
+ // 3. If we had an error on the previous operation, we then return immediately.
+ //
+ // 4. Finally, we actually enforce the write concern. All errors *except* timeout are
+ // reported with ok : 0.0, to match legacy behavior.
+ //
+ // There is a special case when "wOpTime" and "wElectionId" are explicitly provided by
+ // the client (mongos) - in this case we *only* enforce the write concern if it is
+ // valid.
+ //
+ // We always need to either report "err" (if ok : 1) or "errmsg" (if ok : 0), even if
+ // err is null.
+ //
+
+ LastError* le = &LastError::get(txn->getClient());
+ le->disable();
+
+ // Always append lastOp and connectionId
+ Client& c = *txn->getClient();
+ if (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
+ repl::ReplicationCoordinator::modeReplSet) {
+ const repl::OpTime lastOp = repl::ReplClientInfo::forClient(c).getLastOp();
+ if (!lastOp.isNull()) {
+ result.append("lastOp", lastOp.getTimestamp());
+ // TODO(siyuan) Add "lastOpTerm"
+ }
}
- CmdResetError() : Command("resetError", false, "reseterror") {}
- bool run(OperationContext* txn,
- const string& db,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- LastError::get(txn->getClient()).reset();
- return true;
+
+ // for sharding; also useful in general for debugging
+ result.appendNumber("connectionId", c.getConnectionId());
+
+ Timestamp lastTimestamp;
+ BSONField<Timestamp> wOpTimeField("wOpTime");
+ FieldParser::FieldState extracted =
+ FieldParser::extract(cmdObj, wOpTimeField, &lastTimestamp, &errmsg);
+ if (!extracted) {
+ result.append("badGLE", cmdObj);
+ appendCommandStatus(result, false, errmsg);
+ return false;
}
- } cmdResetError;
-
- class CmdGetLastError : public Command {
- public:
- CmdGetLastError() : Command("getLastError", false, "getlasterror") { }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- virtual void help( stringstream& help ) const {
- LastError::get(cc()).disable(); // SERVER-11492
- help << "return error status of the last operation on this connection\n"
- << "options:\n"
- << " { fsync:true } - fsync before returning, or wait for journal commit if running with --journal\n"
- << " { j:true } - wait for journal commit if running with --journal\n"
- << " { w:n } - await replication to n servers (including self) before returning\n"
- << " { w:'majority' } - await replication to majority of set\n"
- << " { wtimeout:m} - timeout for w in m milliseconds";
+
+ repl::OpTime lastOpTime;
+ bool lastOpTimePresent = extracted != FieldParser::FIELD_NONE;
+ if (!lastOpTimePresent) {
+ // Use the client opTime if no wOpTime is specified
+ lastOpTime = repl::ReplClientInfo::forClient(c).getLastOp();
+ // TODO(siyuan) Fix mongos to supply wOpTimeTerm, then parse out that value here
+ } else {
+ // TODO(siyuan) Don't use the default term after fixing mongos.
+ lastOpTime = repl::OpTime(lastTimestamp, repl::OpTime::kDefaultTerm);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- //
- // Correct behavior here is very finicky.
- //
- // 1. The first step is to append the error that occurred on the previous operation.
- // This adds an "err" field to the command, which is *not* the command failing.
- //
- // 2. Next we parse and validate write concern options. If these options are invalid
- // the command fails no matter what, even if we actually had an error earlier. The
- // reason for checking here is to match legacy behavior on these kind of failures -
- // we'll still get an "err" field for the write error.
- //
- // 3. If we had an error on the previous operation, we then return immediately.
- //
- // 4. Finally, we actually enforce the write concern. All errors *except* timeout are
- // reported with ok : 0.0, to match legacy behavior.
- //
- // There is a special case when "wOpTime" and "wElectionId" are explicitly provided by
- // the client (mongos) - in this case we *only* enforce the write concern if it is
- // valid.
- //
- // We always need to either report "err" (if ok : 1) or "errmsg" (if ok : 0), even if
- // err is null.
- //
-
- LastError *le = &LastError::get(txn->getClient());
- le->disable();
-
- // Always append lastOp and connectionId
- Client& c = *txn->getClient();
- if (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
- repl::ReplicationCoordinator::modeReplSet) {
- const repl::OpTime lastOp = repl::ReplClientInfo::forClient(c).getLastOp();
- if (!lastOp.isNull()) {
- result.append("lastOp", lastOp.getTimestamp());
- // TODO(siyuan) Add "lastOpTerm"
- }
- }
+ OID electionId;
+ BSONField<OID> wElectionIdField("wElectionId");
+ extracted = FieldParser::extract(cmdObj, wElectionIdField, &electionId, &errmsg);
+ if (!extracted) {
+ result.append("badGLE", cmdObj);
+ appendCommandStatus(result, false, errmsg);
+ return false;
+ }
- // for sharding; also useful in general for debugging
- result.appendNumber( "connectionId" , c.getConnectionId() );
-
- Timestamp lastTimestamp;
- BSONField<Timestamp> wOpTimeField("wOpTime");
- FieldParser::FieldState extracted = FieldParser::extract(cmdObj, wOpTimeField,
- &lastTimestamp, &errmsg);
- if (!extracted) {
- result.append("badGLE", cmdObj);
- appendCommandStatus(result, false, errmsg);
- return false;
- }
+ bool electionIdPresent = extracted != FieldParser::FIELD_NONE;
+ bool errorOccurred = false;
- repl::OpTime lastOpTime;
- bool lastOpTimePresent = extracted != FieldParser::FIELD_NONE;
- if (!lastOpTimePresent) {
- // Use the client opTime if no wOpTime is specified
- lastOpTime = repl::ReplClientInfo::forClient(c).getLastOp();
- // TODO(siyuan) Fix mongos to supply wOpTimeTerm, then parse out that value here
+ // Errors aren't reported when wOpTime is used
+ if (!lastOpTimePresent) {
+ if (le->getNPrev() != 1) {
+ errorOccurred = LastError::noError.appendSelf(result, false);
} else {
- // TODO(siyuan) Don't use the default term after fixing mongos.
- lastOpTime = repl::OpTime(lastTimestamp, repl::OpTime::kDefaultTerm);
- }
-
- OID electionId;
- BSONField<OID> wElectionIdField("wElectionId");
- extracted = FieldParser::extract(cmdObj, wElectionIdField,
- &electionId, &errmsg);
- if (!extracted) {
- result.append("badGLE", cmdObj);
- appendCommandStatus(result, false, errmsg);
- return false;
+ errorOccurred = le->appendSelf(result, false);
}
+ }
- bool electionIdPresent = extracted != FieldParser::FIELD_NONE;
- bool errorOccurred = false;
-
- // Errors aren't reported when wOpTime is used
- if ( !lastOpTimePresent ) {
- if ( le->getNPrev() != 1 ) {
- errorOccurred = LastError::noError.appendSelf( result, false );
- }
- else {
- errorOccurred = le->appendSelf( result, false );
- }
- }
+ BSONObj writeConcernDoc = cmdObj;
+ // Use the default options if we have no gle options aside from wOpTime/wElectionId
+ const int nFields = cmdObj.nFields();
+ bool useDefaultGLEOptions = (nFields == 1) || (nFields == 2 && lastOpTimePresent) ||
+ (nFields == 3 && lastOpTimePresent && electionIdPresent);
- BSONObj writeConcernDoc = cmdObj;
- // Use the default options if we have no gle options aside from wOpTime/wElectionId
- const int nFields = cmdObj.nFields();
- bool useDefaultGLEOptions = (nFields == 1) ||
- (nFields == 2 && lastOpTimePresent) ||
- (nFields == 3 && lastOpTimePresent && electionIdPresent);
+ WriteConcernOptions writeConcern;
- WriteConcernOptions writeConcern;
+ if (useDefaultGLEOptions) {
+ writeConcern = repl::getGlobalReplicationCoordinator()->getGetLastErrorDefault();
+ }
- if (useDefaultGLEOptions) {
- writeConcern = repl::getGlobalReplicationCoordinator()->getGetLastErrorDefault();
- }
+ Status status = writeConcern.parse(writeConcernDoc);
- Status status = writeConcern.parse( writeConcernDoc );
+ //
+ // Validate write concern no matter what, this matches 2.4 behavior
+ //
- //
- // Validate write concern no matter what, this matches 2.4 behavior
- //
+ if (status.isOK()) {
+ // Ensure options are valid for this host
+ status = validateWriteConcern(writeConcern);
+ }
- if ( status.isOK() ) {
- // Ensure options are valid for this host
- status = validateWriteConcern( writeConcern );
- }
+ if (!status.isOK()) {
+ result.append("badGLE", writeConcernDoc);
+ return appendCommandStatus(result, status);
+ }
- if ( !status.isOK() ) {
- result.append( "badGLE", writeConcernDoc );
- return appendCommandStatus( result, status );
- }
+ // Don't wait for replication if there was an error reported - this matches 2.4 behavior
+ if (errorOccurred) {
+ dassert(!lastOpTimePresent);
+ return true;
+ }
- // Don't wait for replication if there was an error reported - this matches 2.4 behavior
- if ( errorOccurred ) {
- dassert( !lastOpTimePresent );
- return true;
- }
+ // No error occurred, so we won't duplicate these fields with write concern errors
+ dassert(result.asTempObj()["err"].eoo());
+ dassert(result.asTempObj()["code"].eoo());
- // No error occurred, so we won't duplicate these fields with write concern errors
- dassert( result.asTempObj()["err"].eoo() );
- dassert( result.asTempObj()["code"].eoo() );
-
- // If we got an electionId, make sure it matches
- if (electionIdPresent) {
- if (repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
- repl::ReplicationCoordinator::modeReplSet) {
- // Ignore electionIds of 0 from mongos.
- if (electionId != OID()) {
- errmsg = "wElectionId passed but no replication active";
- result.append("code", ErrorCodes::BadValue);
- return false;
- }
- }
- else {
- if (electionId != repl::getGlobalReplicationCoordinator()->getElectionId()) {
- LOG(3) << "oid passed in is " << electionId
- << ", but our id is "
- << repl::getGlobalReplicationCoordinator()->getElectionId();
- errmsg = "election occurred after write";
- result.append("code", ErrorCodes::WriteConcernFailed);
- return false;
- }
+ // If we got an electionId, make sure it matches
+ if (electionIdPresent) {
+ if (repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
+ repl::ReplicationCoordinator::modeReplSet) {
+ // Ignore electionIds of 0 from mongos.
+ if (electionId != OID()) {
+ errmsg = "wElectionId passed but no replication active";
+ result.append("code", ErrorCodes::BadValue);
+ return false;
+ }
+ } else {
+ if (electionId != repl::getGlobalReplicationCoordinator()->getElectionId()) {
+ LOG(3) << "oid passed in is " << electionId << ", but our id is "
+ << repl::getGlobalReplicationCoordinator()->getElectionId();
+ errmsg = "election occurred after write";
+ result.append("code", ErrorCodes::WriteConcernFailed);
+ return false;
}
}
+ }
- txn->setWriteConcern(writeConcern);
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- txn->setMessage_inlock( "waiting for write concern" );
- }
-
- WriteConcernResult wcResult;
- status = waitForWriteConcern( txn, lastOpTime, &wcResult );
- wcResult.appendTo( writeConcern, &result );
-
- // For backward compatibility with 2.4, wtimeout returns ok : 1.0
- if ( wcResult.wTimedOut ) {
- dassert( !wcResult.err.empty() ); // so we always report err
- dassert( !status.isOK() );
- result.append( "errmsg", "timed out waiting for slaves" );
- result.append( "code", status.code() );
- return true;
- }
-
- return appendCommandStatus( result, status );
+ txn->setWriteConcern(writeConcern);
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ txn->setMessage_inlock("waiting for write concern");
}
- } cmdGetLastError;
+ WriteConcernResult wcResult;
+ status = waitForWriteConcern(txn, lastOpTime, &wcResult);
+ wcResult.appendTo(writeConcern, &result);
- class CmdGetPrevError : public Command {
- public:
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream& help ) const {
- help << "check for errors since last reseterror commandcal";
- }
- virtual bool slaveOk() const {
- return true;
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- CmdGetPrevError() : Command("getPrevError", false, "getpreverror") {}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- LastError *le = &LastError::get(txn->getClient());
- le->disable();
- le->appendSelf(result, true);
- if (le->isValid())
- result.append("nPrev", le->getNPrev());
- else
- result.append("nPrev", -1);
+ // For backward compatibility with 2.4, wtimeout returns ok : 1.0
+ if (wcResult.wTimedOut) {
+ dassert(!wcResult.err.empty()); // so we always report err
+ dassert(!status.isOK());
+ result.append("errmsg", "timed out waiting for slaves");
+ result.append("code", status.code());
return true;
}
- } cmdGetPrevError;
+ return appendCommandStatus(result, status);
+ }
+
+} cmdGetLastError;
+
+class CmdGetPrevError : public Command {
+public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "check for errors since last reseterror commandcal";
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ CmdGetPrevError() : Command("getPrevError", false, "getpreverror") {}
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ LastError* le = &LastError::get(txn->getClient());
+ le->disable();
+ le->appendSelf(result, true);
+ if (le->isValid())
+ result.append("nPrev", le->getNPrev());
+ else
+ result.append("nPrev", -1);
+ return true;
+ }
+} cmdGetPrevError;
}
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index e075fbd047e..23805bf1123 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -57,351 +57,363 @@
namespace mongo {
- /**
- * A command for running getMore() against an existing cursor registered with a CursorManager.
- * Used to generate the next batch of results for a ClientCursor.
- *
- * Can be used in combination with any cursor-generating command (e.g. find, aggregate,
- * listIndexes).
- */
- class GetMoreCmd : public Command {
- MONGO_DISALLOW_COPYING(GetMoreCmd);
- public:
- GetMoreCmd() : Command("getMore") { }
+/**
+ * A command for running getMore() against an existing cursor registered with a CursorManager.
+ * Used to generate the next batch of results for a ClientCursor.
+ *
+ * Can be used in combination with any cursor-generating command (e.g. find, aggregate,
+ * listIndexes).
+ */
+class GetMoreCmd : public Command {
+ MONGO_DISALLOW_COPYING(GetMoreCmd);
- bool isWriteCommandForConfigServer() const override { return false; }
+public:
+ GetMoreCmd() : Command("getMore") {}
- bool slaveOk() const override { return false; }
+ bool isWriteCommandForConfigServer() const override {
+ return false;
+ }
- bool slaveOverrideOk() const override { return true; }
+ bool slaveOk() const override {
+ return false;
+ }
- bool maintenanceOk() const override { return false; }
+ bool slaveOverrideOk() const override {
+ return true;
+ }
- bool adminOnly() const override { return false; }
+ bool maintenanceOk() const override {
+ return false;
+ }
- void help(std::stringstream& help) const override {
- help << "retrieve more results from an existing cursor";
- }
+ bool adminOnly() const override {
+ return false;
+ }
- /**
- * A getMore command increments the getMore counter, not the command counter.
- */
- bool shouldAffectCommandCounter() const override { return false; }
+ void help(std::stringstream& help) const override {
+ help << "retrieve more results from an existing cursor";
+ }
- std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return GetMoreRequest::parseNs(dbname, cmdObj);
+ /**
+ * A getMore command increments the getMore counter, not the command counter.
+ */
+ bool shouldAffectCommandCounter() const override {
+ return false;
+ }
+
+ std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
+ return GetMoreRequest::parseNs(dbname, cmdObj);
+ }
+
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) override {
+ StatusWith<GetMoreRequest> parseStatus = GetMoreRequest::parseFromBSON(dbname, cmdObj);
+ if (!parseStatus.isOK()) {
+ return parseStatus.getStatus();
+ }
+ const GetMoreRequest& request = parseStatus.getValue();
+
+ return AuthorizationSession::get(client)
+ ->checkAuthForGetMore(request.nss, request.cursorid);
+ }
+
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) override {
+ // Counted as a getMore, not as a command.
+ globalOpCounters.gotGetMore();
+
+ if (txn->getClient()->isInDirectClient()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation, "Cannot run getMore command from eval()"));
}
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) override {
- StatusWith<GetMoreRequest> parseStatus = GetMoreRequest::parseFromBSON(dbname, cmdObj);
- if (!parseStatus.isOK()) {
- return parseStatus.getStatus();
+ StatusWith<GetMoreRequest> parseStatus = GetMoreRequest::parseFromBSON(dbname, cmdObj);
+ if (!parseStatus.isOK()) {
+ return appendCommandStatus(result, parseStatus.getStatus());
+ }
+ const GetMoreRequest& request = parseStatus.getValue();
+
+ // Depending on the type of cursor being operated on, we hold locks for the whole
+ // getMore, or none of the getMore, or part of the getMore. The three cases in detail:
+ //
+ // 1) Normal cursor: we lock with "ctx" and hold it for the whole getMore.
+ // 2) Cursor owned by global cursor manager: we don't lock anything. These cursors
+ // don't own any collection state.
+ // 3) Agg cursor: we lock with "ctx", then release, then relock with "unpinDBLock" and
+ // "unpinCollLock". This is because agg cursors handle locking internally (hence the
+ // release), but the pin and unpin of the cursor must occur under the collection
+ // lock. We don't use our AutoGetCollectionForRead "ctx" to relock, because
+ // AutoGetCollectionForRead checks the sharding version (and we want the relock for
+ // the unpin to succeed even if the sharding version has changed).
+ //
+ // Note that we declare our locks before our ClientCursorPin, in order to ensure that
+ // the pin's destructor is called before the lock destructors (so that the unpin occurs
+ // under the lock).
+ std::unique_ptr<AutoGetCollectionForRead> ctx;
+ std::unique_ptr<Lock::DBLock> unpinDBLock;
+ std::unique_ptr<Lock::CollectionLock> unpinCollLock;
+
+ CursorManager* cursorManager;
+ CursorManager* globalCursorManager = CursorManager::getGlobalCursorManager();
+ if (globalCursorManager->ownsCursorId(request.cursorid)) {
+ cursorManager = globalCursorManager;
+ } else {
+ ctx.reset(new AutoGetCollectionForRead(txn, request.nss));
+ Collection* collection = ctx->getCollection();
+ if (!collection) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::OperationFailed,
+ "collection dropped between getMore calls"));
}
- const GetMoreRequest& request = parseStatus.getValue();
+ cursorManager = collection->getCursorManager();
+ }
- return AuthorizationSession::get(client)->checkAuthForGetMore(request.nss,
- request.cursorid);
+ ClientCursorPin ccPin(cursorManager, request.cursorid);
+ ClientCursor* cursor = ccPin.c();
+ if (!cursor) {
+ // We didn't find the cursor.
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::CursorNotFound,
+ str::stream() << "Cursor not found, cursor id: " << request.cursorid));
}
- bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) override {
- // Counted as a getMore, not as a command.
- globalOpCounters.gotGetMore();
+ if (request.nss.ns() != cursor->ns()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::Unauthorized,
+ str::stream() << "Requested getMore on namespace '" << request.nss.ns()
+ << "', but cursor belongs to a different namespace"));
+ }
- if (txn->getClient()->isInDirectClient()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "Cannot run getMore command from eval()"));
- }
+ const bool hasOwnMaxTime = CurOp::get(txn)->isMaxTimeSet();
- StatusWith<GetMoreRequest> parseStatus = GetMoreRequest::parseFromBSON(dbname, cmdObj);
- if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus.getStatus());
- }
- const GetMoreRequest& request = parseStatus.getValue();
-
- // Depending on the type of cursor being operated on, we hold locks for the whole
- // getMore, or none of the getMore, or part of the getMore. The three cases in detail:
- //
- // 1) Normal cursor: we lock with "ctx" and hold it for the whole getMore.
- // 2) Cursor owned by global cursor manager: we don't lock anything. These cursors
- // don't own any collection state.
- // 3) Agg cursor: we lock with "ctx", then release, then relock with "unpinDBLock" and
- // "unpinCollLock". This is because agg cursors handle locking internally (hence the
- // release), but the pin and unpin of the cursor must occur under the collection
- // lock. We don't use our AutoGetCollectionForRead "ctx" to relock, because
- // AutoGetCollectionForRead checks the sharding version (and we want the relock for
- // the unpin to succeed even if the sharding version has changed).
- //
- // Note that we declare our locks before our ClientCursorPin, in order to ensure that
- // the pin's destructor is called before the lock destructors (so that the unpin occurs
- // under the lock).
- std::unique_ptr<AutoGetCollectionForRead> ctx;
- std::unique_ptr<Lock::DBLock> unpinDBLock;
- std::unique_ptr<Lock::CollectionLock> unpinCollLock;
-
- CursorManager* cursorManager;
- CursorManager* globalCursorManager = CursorManager::getGlobalCursorManager();
- if (globalCursorManager->ownsCursorId(request.cursorid)) {
- cursorManager = globalCursorManager;
- }
- else {
- ctx.reset(new AutoGetCollectionForRead(txn, request.nss));
- Collection* collection = ctx->getCollection();
- if (!collection) {
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- "collection dropped between getMore calls"));
- }
- cursorManager = collection->getCursorManager();
- }
+ // Validation related to awaitData.
+ if (isCursorAwaitData(cursor)) {
+ invariant(isCursorTailable(cursor));
- ClientCursorPin ccPin(cursorManager, request.cursorid);
- ClientCursor* cursor = ccPin.c();
- if (!cursor) {
- // We didn't find the cursor.
- return appendCommandStatus(result, Status(ErrorCodes::CursorNotFound, str::stream()
- << "Cursor not found, cursor id: " << request.cursorid));
+ if (!hasOwnMaxTime) {
+ Status status(ErrorCodes::BadValue,
+ str::stream() << "Must set maxTimeMS on a getMore if the initial "
+ << "query had 'awaitData' set: " << cmdObj);
+ return appendCommandStatus(result, status);
}
- if (request.nss.ns() != cursor->ns()) {
- return appendCommandStatus(result, Status(ErrorCodes::Unauthorized, str::stream()
- << "Requested getMore on namespace '" << request.nss.ns()
- << "', but cursor belongs to a different namespace"));
+ if (cursor->isAggCursor()) {
+ Status status(ErrorCodes::BadValue,
+ "awaitData cannot be set on an aggregation cursor");
+ return appendCommandStatus(result, status);
}
+ }
- const bool hasOwnMaxTime = CurOp::get(txn)->isMaxTimeSet();
-
- // Validation related to awaitData.
- if (isCursorAwaitData(cursor)) {
- invariant(isCursorTailable(cursor));
+ // On early return, get rid of the cursor.
+ ScopeGuard cursorFreer = MakeGuard(&GetMoreCmd::cleanupCursor, txn, &ccPin, request);
- if (!hasOwnMaxTime) {
- Status status(ErrorCodes::BadValue,
- str::stream() << "Must set maxTimeMS on a getMore if the initial "
- << "query had 'awaitData' set: " << cmdObj);
- return appendCommandStatus(result, status);
- }
+ if (!cursor->hasRecoveryUnit()) {
+ // Start using a new RecoveryUnit.
+ cursor->setOwnedRecoveryUnit(
+ getGlobalServiceContext()->getGlobalStorageEngine()->newRecoveryUnit());
+ }
- if (cursor->isAggCursor()) {
- Status status(ErrorCodes::BadValue,
- "awaitData cannot be set on an aggregation cursor");
- return appendCommandStatus(result, status);
- }
- }
+ // Swap RecoveryUnit(s) between the ClientCursor and OperationContext.
+ ScopedRecoveryUnitSwapper ruSwapper(cursor, txn);
- // On early return, get rid of the cursor.
- ScopeGuard cursorFreer = MakeGuard(&GetMoreCmd::cleanupCursor, txn, &ccPin, request);
+ // Reset timeout timer on the cursor since the cursor is still in use.
+ cursor->setIdleTime(0);
- if (!cursor->hasRecoveryUnit()) {
- // Start using a new RecoveryUnit.
- cursor->setOwnedRecoveryUnit(
- getGlobalServiceContext()->getGlobalStorageEngine()->newRecoveryUnit());
- }
+ // If there is no time limit set directly on this getMore command, but the operation
+ // that spawned this cursor had a time limit set, then we have to apply any leftover
+ // time to this getMore.
+ if (!hasOwnMaxTime) {
+ CurOp::get(txn)->setMaxTimeMicros(cursor->getLeftoverMaxTimeMicros());
+ }
+ txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
- // Swap RecoveryUnit(s) between the ClientCursor and OperationContext.
- ScopedRecoveryUnitSwapper ruSwapper(cursor, txn);
+ if (cursor->isAggCursor()) {
+ // Agg cursors handle their own locking internally.
+ ctx.reset(); // unlocks
+ }
- // Reset timeout timer on the cursor since the cursor is still in use.
- cursor->setIdleTime(0);
+ PlanExecutor* exec = cursor->getExecutor();
+ exec->restoreState(txn);
- // If there is no time limit set directly on this getMore command, but the operation
- // that spawned this cursor had a time limit set, then we have to apply any leftover
- // time to this getMore.
- if (!hasOwnMaxTime) {
- CurOp::get(txn)->setMaxTimeMicros(cursor->getLeftoverMaxTimeMicros());
- }
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ // If we're tailing a capped collection, retrieve a monotonically increasing insert
+ // counter.
+ uint64_t lastInsertCount = 0;
+ if (isCursorAwaitData(cursor)) {
+ invariant(ctx->getCollection()->isCapped());
+ lastInsertCount = ctx->getCollection()->getCappedInsertNotifier()->getCount();
+ }
- if (cursor->isAggCursor()) {
- // Agg cursors handle their own locking internally.
- ctx.reset(); // unlocks
- }
+ CursorId respondWithId = 0;
+ BSONArrayBuilder nextBatch;
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ int numResults = 0;
+ Status batchStatus = generateBatch(cursor, request, &nextBatch, &state, &numResults);
+ if (!batchStatus.isOK()) {
+ return appendCommandStatus(result, batchStatus);
+ }
- PlanExecutor* exec = cursor->getExecutor();
+ // If this is an await data cursor, and we hit EOF without generating any results, then
+ // we block waiting for new oplog data to arrive.
+ if (isCursorAwaitData(cursor) && state == PlanExecutor::IS_EOF && numResults == 0) {
+ // Retrieve the notifier which we will wait on until new data arrives. We make sure
+ // to do this in the lock because once we drop the lock it is possible for the
+ // collection to become invalid. The notifier itself will outlive the collection if
+ // the collection is dropped, as we keep a shared_ptr to it.
+ auto notifier = ctx->getCollection()->getCappedInsertNotifier();
+
+ // Save the PlanExecutor and drop our locks.
+ exec->saveState();
+ ctx.reset();
+
+ // Block waiting for data.
+ Microseconds timeout(CurOp::get(txn)->getRemainingMaxTimeMicros());
+ notifier->waitForInsert(lastInsertCount, timeout);
+ notifier.reset();
+
+ ctx.reset(new AutoGetCollectionForRead(txn, request.nss));
exec->restoreState(txn);
- // If we're tailing a capped collection, retrieve a monotonically increasing insert
- // counter.
- uint64_t lastInsertCount = 0;
- if (isCursorAwaitData(cursor)) {
- invariant(ctx->getCollection()->isCapped());
- lastInsertCount = ctx->getCollection()->getCappedInsertNotifier()->getCount();
- }
-
- CursorId respondWithId = 0;
- BSONArrayBuilder nextBatch;
- BSONObj obj;
- PlanExecutor::ExecState state;
- int numResults = 0;
- Status batchStatus = generateBatch(cursor, request, &nextBatch, &state, &numResults);
+ // We woke up because either the timed_wait expired, or there was more data. Either
+ // way, attempt to generate another batch of results.
+ batchStatus = generateBatch(cursor, request, &nextBatch, &state, &numResults);
if (!batchStatus.isOK()) {
return appendCommandStatus(result, batchStatus);
}
+ }
- // If this is an await data cursor, and we hit EOF without generating any results, then
- // we block waiting for new oplog data to arrive.
- if (isCursorAwaitData(cursor) && state == PlanExecutor::IS_EOF && numResults == 0) {
- // Retrieve the notifier which we will wait on until new data arrives. We make sure
- // to do this in the lock because once we drop the lock it is possible for the
- // collection to become invalid. The notifier itself will outlive the collection if
- // the collection is dropped, as we keep a shared_ptr to it.
- auto notifier = ctx->getCollection()->getCappedInsertNotifier();
-
- // Save the PlanExecutor and drop our locks.
- exec->saveState();
- ctx.reset();
-
- // Block waiting for data.
- Microseconds timeout(CurOp::get(txn)->getRemainingMaxTimeMicros());
- notifier->waitForInsert(lastInsertCount, timeout);
- notifier.reset();
-
- ctx.reset(new AutoGetCollectionForRead(txn, request.nss));
- exec->restoreState(txn);
-
- // We woke up because either the timed_wait expired, or there was more data. Either
- // way, attempt to generate another batch of results.
- batchStatus = generateBatch(cursor, request, &nextBatch, &state, &numResults);
- if (!batchStatus.isOK()) {
- return appendCommandStatus(result, batchStatus);
- }
- }
-
- if (shouldSaveCursorGetMore(state, exec, isCursorTailable(cursor))) {
- respondWithId = request.cursorid;
+ if (shouldSaveCursorGetMore(state, exec, isCursorTailable(cursor))) {
+ respondWithId = request.cursorid;
- exec->saveState();
+ exec->saveState();
- // If maxTimeMS was set directly on the getMore rather than being rolled over
- // from a previous find, then don't roll remaining micros over to the next
- // getMore.
- if (!hasOwnMaxTime) {
- cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
- }
+ // If maxTimeMS was set directly on the getMore rather than being rolled over
+ // from a previous find, then don't roll remaining micros over to the next
+ // getMore.
+ if (!hasOwnMaxTime) {
+ cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
+ }
- cursor->incPos(numResults);
+ cursor->incPos(numResults);
- if (isCursorTailable(cursor) && state == PlanExecutor::IS_EOF) {
- // Rather than swapping their existing RU into the client cursor, tailable
- // cursors should get a new recovery unit.
- ruSwapper.dismiss();
- }
- }
- else {
- CurOp::get(txn)->debug().cursorExhausted = true;
+ if (isCursorTailable(cursor) && state == PlanExecutor::IS_EOF) {
+ // Rather than swapping their existing RU into the client cursor, tailable
+ // cursors should get a new recovery unit.
+ ruSwapper.dismiss();
}
+ } else {
+ CurOp::get(txn)->debug().cursorExhausted = true;
+ }
- appendGetMoreResponseObject(respondWithId, request.nss.ns(), nextBatch.arr(), &result);
+ appendGetMoreResponseObject(respondWithId, request.nss.ns(), nextBatch.arr(), &result);
- if (respondWithId) {
- cursorFreer.Dismiss();
+ if (respondWithId) {
+ cursorFreer.Dismiss();
- // If we are operating on an aggregation cursor, then we dropped our collection lock
- // earlier and need to reacquire it in order to clean up our ClientCursorPin.
- if (cursor->isAggCursor()) {
- invariant(NULL == ctx.get());
- unpinDBLock.reset(
- new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
- unpinCollLock.reset(
- new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
- }
+ // If we are operating on an aggregation cursor, then we dropped our collection lock
+ // earlier and need to reacquire it in order to clean up our ClientCursorPin.
+ if (cursor->isAggCursor()) {
+ invariant(NULL == ctx.get());
+ unpinDBLock.reset(new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
+ unpinCollLock.reset(
+ new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
}
-
- return true;
}
- /**
- * Uses 'cursor' and 'request' to fill out 'nextBatch' with the batch of result documents to
- * be returned by this getMore.
- *
- * Returns the number of documents in the batch in *numResults, which must be initialized to
- * zero by the caller. Returns the final ExecState returned by the cursor in *state.
- *
- * Returns an OK status if the batch was successfully generated, and a non-OK status if the
- * PlanExecutor encounters a failure.
- */
- Status generateBatch(ClientCursor* cursor,
- const GetMoreRequest& request,
- BSONArrayBuilder* nextBatch,
- PlanExecutor::ExecState* state,
- int* numResults) {
- PlanExecutor* exec = cursor->getExecutor();
- const bool isAwaitData = isCursorAwaitData(cursor);
-
- // If an awaitData getMore is killed during this process due to our max time expiring at
- // an interrupt point, we just continue as normal and return rather than reporting a
- // timeout to the user.
- BSONObj obj;
- try {
- while (PlanExecutor::ADVANCED == (*state = exec->getNext(&obj, NULL))) {
- // If adding this object will cause us to exceed the BSON size limit, then we
- // stash it for later.
- if (nextBatch->len() + obj.objsize() > BSONObjMaxUserSize && *numResults > 0) {
- exec->enqueue(obj);
- break;
- }
-
- // Add result to output buffer.
- nextBatch->append(obj);
- (*numResults)++;
-
- if (enoughForGetMore(request.batchSize.value_or(0),
- *numResults, nextBatch->len())) {
- break;
- }
- }
- }
- catch (const UserException& except) {
- if (isAwaitData && except.getCode() == ErrorCodes::ExceededTimeLimit) {
- // We ignore exceptions from interrupt points due to max time expiry for
- // awaitData cursors.
- }
- else {
- throw;
+ return true;
+ }
+
+ /**
+ * Uses 'cursor' and 'request' to fill out 'nextBatch' with the batch of result documents to
+ * be returned by this getMore.
+ *
+ * Returns the number of documents in the batch in *numResults, which must be initialized to
+ * zero by the caller. Returns the final ExecState returned by the cursor in *state.
+ *
+ * Returns an OK status if the batch was successfully generated, and a non-OK status if the
+ * PlanExecutor encounters a failure.
+ */
+ Status generateBatch(ClientCursor* cursor,
+ const GetMoreRequest& request,
+ BSONArrayBuilder* nextBatch,
+ PlanExecutor::ExecState* state,
+ int* numResults) {
+ PlanExecutor* exec = cursor->getExecutor();
+ const bool isAwaitData = isCursorAwaitData(cursor);
+
+ // If an awaitData getMore is killed during this process due to our max time expiring at
+ // an interrupt point, we just continue as normal and return rather than reporting a
+ // timeout to the user.
+ BSONObj obj;
+ try {
+ while (PlanExecutor::ADVANCED == (*state = exec->getNext(&obj, NULL))) {
+ // If adding this object will cause us to exceed the BSON size limit, then we
+ // stash it for later.
+ if (nextBatch->len() + obj.objsize() > BSONObjMaxUserSize && *numResults > 0) {
+ exec->enqueue(obj);
+ break;
}
- }
- if (PlanExecutor::FAILURE == *state || PlanExecutor::DEAD == *state) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- error() << "GetMore command executor error: " << PlanExecutor::statestr(*state)
- << ", stats: " << Explain::statsToBSON(*stats);
+ // Add result to output buffer.
+ nextBatch->append(obj);
+ (*numResults)++;
- return Status(ErrorCodes::OperationFailed,
- str::stream() << "GetMore command executor error: "
- << WorkingSetCommon::toStatusString(obj));
+ if (enoughForGetMore(
+ request.batchSize.value_or(0), *numResults, nextBatch->len())) {
+ break;
+ }
+ }
+ } catch (const UserException& except) {
+ if (isAwaitData && except.getCode() == ErrorCodes::ExceededTimeLimit) {
+ // We ignore exceptions from interrupt points due to max time expiry for
+ // awaitData cursors.
+ } else {
+ throw;
}
-
- return Status::OK();
}
- /**
- * Called via a ScopeGuard on early return in order to ensure that the ClientCursor gets
- * cleaned up properly.
- */
- static void cleanupCursor(OperationContext* txn,
- ClientCursorPin* ccPin,
- const GetMoreRequest& request) {
- ClientCursor* cursor = ccPin->c();
+ if (PlanExecutor::FAILURE == *state || PlanExecutor::DEAD == *state) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ error() << "GetMore command executor error: " << PlanExecutor::statestr(*state)
+ << ", stats: " << Explain::statsToBSON(*stats);
- std::unique_ptr<Lock::DBLock> unpinDBLock;
- std::unique_ptr<Lock::CollectionLock> unpinCollLock;
+ return Status(ErrorCodes::OperationFailed,
+ str::stream() << "GetMore command executor error: "
+ << WorkingSetCommon::toStatusString(obj));
+ }
- if (cursor->isAggCursor()) {
- unpinDBLock.reset(new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
- unpinCollLock.reset(
- new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
- }
+ return Status::OK();
+ }
- ccPin->deleteUnderlying();
+ /**
+ * Called via a ScopeGuard on early return in order to ensure that the ClientCursor gets
+ * cleaned up properly.
+ */
+ static void cleanupCursor(OperationContext* txn,
+ ClientCursorPin* ccPin,
+ const GetMoreRequest& request) {
+ ClientCursor* cursor = ccPin->c();
+
+ std::unique_ptr<Lock::DBLock> unpinDBLock;
+ std::unique_ptr<Lock::CollectionLock> unpinCollLock;
+
+ if (cursor->isAggCursor()) {
+ unpinDBLock.reset(new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
+ unpinCollLock.reset(
+ new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
}
- } getMoreCmd;
+ ccPin->deleteUnderlying();
+ }
+
+} getMoreCmd;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/group.cpp b/src/mongo/db/commands/group.cpp
index 31a03fa2543..cdfb9ca0d5a 100644
--- a/src/mongo/db/commands/group.cpp
+++ b/src/mongo/db/commands/group.cpp
@@ -44,176 +44,164 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
+using std::unique_ptr;
+using std::string;
- static GroupCommand cmdGroup;
+static GroupCommand cmdGroup;
- GroupCommand::GroupCommand() : Command("group") {}
+GroupCommand::GroupCommand() : Command("group") {}
- Status GroupCommand::checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- std::string ns = parseNs(dbname, cmdObj);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnNamespace(
- NamespaceString(ns), ActionType::find)) {
- return Status(ErrorCodes::Unauthorized, "unauthorized");
- }
- return Status::OK();
+Status GroupCommand::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ std::string ns = parseNs(dbname, cmdObj);
+ if (!AuthorizationSession::get(client)
+ ->isAuthorizedForActionsOnNamespace(NamespaceString(ns), ActionType::find)) {
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
}
-
- std::string GroupCommand::parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- const BSONObj& p = cmdObj.firstElement().embeddedObjectUserCheck();
- uassert(17211, "ns has to be set", p["ns"].type() == String);
- return dbname + "." + p["ns"].String();
+ return Status::OK();
+}
+
+std::string GroupCommand::parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ const BSONObj& p = cmdObj.firstElement().embeddedObjectUserCheck();
+ uassert(17211, "ns has to be set", p["ns"].type() == String);
+ return dbname + "." + p["ns"].String();
+}
+
+Status GroupCommand::parseRequest(const string& dbname,
+ const BSONObj& cmdObj,
+ GroupRequest* request) const {
+ request->ns = parseNs(dbname, cmdObj);
+
+ // By default, group requests are regular group not explain of group.
+ request->explain = false;
+
+ const BSONObj& p = cmdObj.firstElement().embeddedObjectUserCheck();
+
+ if (p["cond"].type() == Object) {
+ request->query = p["cond"].embeddedObject().getOwned();
+ } else if (p["condition"].type() == Object) {
+ request->query = p["condition"].embeddedObject().getOwned();
+ } else if (p["query"].type() == Object) {
+ request->query = p["query"].embeddedObject().getOwned();
+ } else if (p["q"].type() == Object) {
+ request->query = p["q"].embeddedObject().getOwned();
}
- Status GroupCommand::parseRequest(const string& dbname,
- const BSONObj& cmdObj,
- GroupRequest* request) const {
- request->ns = parseNs(dbname, cmdObj);
-
- // By default, group requests are regular group not explain of group.
- request->explain = false;
-
- const BSONObj& p = cmdObj.firstElement().embeddedObjectUserCheck();
-
- if (p["cond"].type() == Object) {
- request->query = p["cond"].embeddedObject().getOwned();
- }
- else if (p["condition"].type() == Object) {
- request->query = p["condition"].embeddedObject().getOwned();
- }
- else if (p["query"].type() == Object) {
- request->query = p["query"].embeddedObject().getOwned();
- }
- else if (p["q"].type() == Object) {
- request->query = p["q"].embeddedObject().getOwned();
- }
-
- if (p["key"].type() == Object) {
- request->keyPattern = p["key"].embeddedObjectUserCheck().getOwned();
- if (!p["$keyf"].eoo()) {
- return Status(ErrorCodes::BadValue, "can't have key and $keyf");
- }
- }
- else if (!p["$keyf"].eoo()) {
- request->keyFunctionCode = p["$keyf"]._asCode();
- }
- else {
- // No key specified. Use the entire object as the key.
- }
-
- BSONElement reduce = p["$reduce"];
- if (reduce.eoo()) {
- return Status(ErrorCodes::BadValue, "$reduce has to be set");
- }
- request->reduceCode = reduce._asCode();
-
- if (reduce.type() == CodeWScope) {
- request->reduceScope = reduce.codeWScopeObject().getOwned();
+ if (p["key"].type() == Object) {
+ request->keyPattern = p["key"].embeddedObjectUserCheck().getOwned();
+ if (!p["$keyf"].eoo()) {
+ return Status(ErrorCodes::BadValue, "can't have key and $keyf");
}
+ } else if (!p["$keyf"].eoo()) {
+ request->keyFunctionCode = p["$keyf"]._asCode();
+ } else {
+ // No key specified. Use the entire object as the key.
+ }
- if (p["initial"].type() != Object) {
- return Status(ErrorCodes::BadValue, "initial has to be an object");
- }
- request->initial = p["initial"].embeddedObject().getOwned();
+ BSONElement reduce = p["$reduce"];
+ if (reduce.eoo()) {
+ return Status(ErrorCodes::BadValue, "$reduce has to be set");
+ }
+ request->reduceCode = reduce._asCode();
- if (!p["finalize"].eoo()) {
- request->finalize = p["finalize"]._asCode();
- }
+ if (reduce.type() == CodeWScope) {
+ request->reduceScope = reduce.codeWScopeObject().getOwned();
+ }
- return Status::OK();
+ if (p["initial"].type() != Object) {
+ return Status(ErrorCodes::BadValue, "initial has to be an object");
}
+ request->initial = p["initial"].embeddedObject().getOwned();
- bool GroupCommand::run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int,
- std::string& errmsg,
- BSONObjBuilder& out) {
- GroupRequest groupRequest;
- Status parseRequestStatus = parseRequest(dbname, cmdObj, &groupRequest);
- if (!parseRequestStatus.isOK()) {
- return appendCommandStatus(out, parseRequestStatus);
- }
+ if (!p["finalize"].eoo()) {
+ request->finalize = p["finalize"]._asCode();
+ }
- AutoGetCollectionForRead ctx(txn, groupRequest.ns);
- Collection* coll = ctx.getCollection();
-
- PlanExecutor *rawPlanExecutor;
- Status getExecStatus = getExecutorGroup(txn,
- coll,
- groupRequest,
- PlanExecutor::YIELD_AUTO,
- &rawPlanExecutor);
- if (!getExecStatus.isOK()) {
- return appendCommandStatus(out, getExecStatus);
- }
+ return Status::OK();
+}
+
+bool GroupCommand::run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int,
+ std::string& errmsg,
+ BSONObjBuilder& out) {
+ GroupRequest groupRequest;
+ Status parseRequestStatus = parseRequest(dbname, cmdObj, &groupRequest);
+ if (!parseRequestStatus.isOK()) {
+ return appendCommandStatus(out, parseRequestStatus);
+ }
- unique_ptr<PlanExecutor> planExecutor(rawPlanExecutor);
-
- // Group executors return ADVANCED exactly once, with the entire group result.
- BSONObj retval;
- PlanExecutor::ExecState state = planExecutor->getNext(&retval, NULL);
- if (PlanExecutor::ADVANCED != state) {
- invariant(PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state);
-
- if (WorkingSetCommon::isValidStatusMemberObject(retval)) {
- return appendCommandStatus(out, WorkingSetCommon::getMemberObjectStatus(retval));
- }
- return appendCommandStatus(out,
- Status(ErrorCodes::BadValue,
- str::stream() << "error encountered during group "
- << "operation, executor returned "
- << PlanExecutor::statestr(state)));
- }
+ AutoGetCollectionForRead ctx(txn, groupRequest.ns);
+ Collection* coll = ctx.getCollection();
- invariant(planExecutor->isEOF());
+ PlanExecutor* rawPlanExecutor;
+ Status getExecStatus =
+ getExecutorGroup(txn, coll, groupRequest, PlanExecutor::YIELD_AUTO, &rawPlanExecutor);
+ if (!getExecStatus.isOK()) {
+ return appendCommandStatus(out, getExecStatus);
+ }
- invariant(STAGE_GROUP == planExecutor->getRootStage()->stageType());
- GroupStage* groupStage = static_cast<GroupStage*>(planExecutor->getRootStage());
- const GroupStats* groupStats =
- static_cast<const GroupStats*>(groupStage->getSpecificStats());
- const CommonStats* groupChildStats = groupStage->getChildren()[0]->getCommonStats();
+ unique_ptr<PlanExecutor> planExecutor(rawPlanExecutor);
- out.appendArray("retval", retval);
- out.append("count", static_cast<long long>(groupChildStats->advanced));
- out.append("keys", static_cast<long long>(groupStats->nGroups));
+ // Group executors return ADVANCED exactly once, with the entire group result.
+ BSONObj retval;
+ PlanExecutor::ExecState state = planExecutor->getNext(&retval, NULL);
+ if (PlanExecutor::ADVANCED != state) {
+ invariant(PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state);
- return true;
+ if (WorkingSetCommon::isValidStatusMemberObject(retval)) {
+ return appendCommandStatus(out, WorkingSetCommon::getMemberObjectStatus(retval));
+ }
+ return appendCommandStatus(out,
+ Status(ErrorCodes::BadValue,
+ str::stream() << "error encountered during group "
+ << "operation, executor returned "
+ << PlanExecutor::statestr(state)));
}
- Status GroupCommand::explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const {
- GroupRequest groupRequest;
- Status parseRequestStatus = parseRequest(dbname, cmdObj, &groupRequest);
- if (!parseRequestStatus.isOK()) {
- return parseRequestStatus;
- }
+ invariant(planExecutor->isEOF());
+
+ invariant(STAGE_GROUP == planExecutor->getRootStage()->stageType());
+ GroupStage* groupStage = static_cast<GroupStage*>(planExecutor->getRootStage());
+ const GroupStats* groupStats = static_cast<const GroupStats*>(groupStage->getSpecificStats());
+ const CommonStats* groupChildStats = groupStage->getChildren()[0]->getCommonStats();
+
+ out.appendArray("retval", retval);
+ out.append("count", static_cast<long long>(groupChildStats->advanced));
+ out.append("keys", static_cast<long long>(groupStats->nGroups));
+
+ return true;
+}
+
+Status GroupCommand::explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ GroupRequest groupRequest;
+ Status parseRequestStatus = parseRequest(dbname, cmdObj, &groupRequest);
+ if (!parseRequestStatus.isOK()) {
+ return parseRequestStatus;
+ }
- groupRequest.explain = true;
+ groupRequest.explain = true;
- AutoGetCollectionForRead ctx(txn, groupRequest.ns);
- Collection* coll = ctx.getCollection();
+ AutoGetCollectionForRead ctx(txn, groupRequest.ns);
+ Collection* coll = ctx.getCollection();
- PlanExecutor *rawPlanExecutor;
- Status getExecStatus = getExecutorGroup(txn,
- coll,
- groupRequest,
- PlanExecutor::YIELD_AUTO,
- &rawPlanExecutor);
- if (!getExecStatus.isOK()) {
- return getExecStatus;
- }
+ PlanExecutor* rawPlanExecutor;
+ Status getExecStatus =
+ getExecutorGroup(txn, coll, groupRequest, PlanExecutor::YIELD_AUTO, &rawPlanExecutor);
+ if (!getExecStatus.isOK()) {
+ return getExecStatus;
+ }
- unique_ptr<PlanExecutor> planExecutor(rawPlanExecutor);
+ unique_ptr<PlanExecutor> planExecutor(rawPlanExecutor);
- Explain::explainStages(planExecutor.get(), verbosity, out);
- return Status::OK();
- }
+ Explain::explainStages(planExecutor.get(), verbosity, out);
+ return Status::OK();
+}
} // namespace mongo
diff --git a/src/mongo/db/commands/group.h b/src/mongo/db/commands/group.h
index d6486a8d978..eab554dd1f8 100644
--- a/src/mongo/db/commands/group.h
+++ b/src/mongo/db/commands/group.h
@@ -32,60 +32,68 @@
namespace mongo {
- class ClientBasic;
- class Database;
- class OperationContext;
- class PlanExecutor;
- class Scope;
-
- struct GroupRequest;
-
- class GroupCommand : public Command {
- public:
- GroupCommand();
-
- private:
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual bool maintenanceOk() const { return false; }
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool slaveOverrideOk() const { return true; }
-
- virtual void help(std::stringstream& help) const {
- help << "http://dochub.mongodb.org/core/aggregation";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
-
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const;
-
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& jsobj,
- int,
- std::string& errmsg,
- BSONObjBuilder& result);
-
- virtual Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const;
-
- /**
- * Parse a group command object.
- *
- * If 'cmdObj' is well-formed, returns Status::OK() and fills in out-argument 'request'.
- *
- * If a parsing error is encountered, returns an error Status.
- */
- Status parseRequest(const std::string& dbname,
- const BSONObj& cmdObj,
- GroupRequest* request) const;
- };
+class ClientBasic;
+class Database;
+class OperationContext;
+class PlanExecutor;
+class Scope;
+
+struct GroupRequest;
+
+class GroupCommand : public Command {
+public:
+ GroupCommand();
+
+private:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual bool maintenanceOk() const {
+ return false;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << "http://dochub.mongodb.org/core/aggregation";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const;
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& jsobj,
+ int,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ virtual Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const;
+
+ /**
+ * Parse a group command object.
+ *
+ * If 'cmdObj' is well-formed, returns Status::OK() and fills in out-argument 'request'.
+ *
+ * If a parsing error is encountered, returns an error Status.
+ */
+ Status parseRequest(const std::string& dbname,
+ const BSONObj& cmdObj,
+ GroupRequest* request) const;
+};
} // namespace mongo
diff --git a/src/mongo/db/commands/hashcmd.cpp b/src/mongo/db/commands/hashcmd.cpp
index 21fc475469d..a4f7c437630 100644
--- a/src/mongo/db/commands/hashcmd.cpp
+++ b/src/mongo/db/commands/hashcmd.cpp
@@ -46,60 +46,66 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
- // Testing only, enabled via command-line.
- class CmdHashElt : public Command {
- public:
- CmdHashElt() : Command("_hashBSONElement") {};
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
- virtual void help( stringstream& help ) const {
- help << "returns the hash of the first BSONElement val in a BSONObj";
- }
+// Testing only, enabled via command-line.
+class CmdHashElt : public Command {
+public:
+ CmdHashElt() : Command("_hashBSONElement"){};
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+ virtual void help(stringstream& help) const {
+ help << "returns the hash of the first BSONElement val in a BSONObj";
+ }
- /* CmdObj has the form {"hash" : <thingToHash>}
- * or {"hash" : <thingToHash>, "seed" : <number> }
- * Result has the form
- * {"key" : <thingTohash>, "seed" : <int>, "out": NumberLong(<hash>)}
- *
- * Example use in the shell:
- *> db.runCommand({hash: "hashthis", seed: 1})
- *> {"key" : "hashthis",
- *> "seed" : 1,
- *> "out" : NumberLong(6271151123721111923),
- *> "ok" : 1 }
- **/
- bool run(OperationContext* txn, const string& db,
- BSONObj& cmdObj,
- int options, string& errmsg,
- BSONObjBuilder& result){
- result.appendAs(cmdObj.firstElement(),"key");
+ /* CmdObj has the form {"hash" : <thingToHash>}
+ * or {"hash" : <thingToHash>, "seed" : <number> }
+ * Result has the form
+ * {"key" : <thingTohash>, "seed" : <int>, "out": NumberLong(<hash>)}
+ *
+ * Example use in the shell:
+ *> db.runCommand({hash: "hashthis", seed: 1})
+ *> {"key" : "hashthis",
+ *> "seed" : 1,
+ *> "out" : NumberLong(6271151123721111923),
+ *> "ok" : 1 }
+ **/
+ bool run(OperationContext* txn,
+ const string& db,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ result.appendAs(cmdObj.firstElement(), "key");
- int seed = 0;
- if (cmdObj.hasField("seed")){
- if (! cmdObj["seed"].isNumber()) {
- errmsg += "seed must be a number";
- return false;
- }
- seed = cmdObj["seed"].numberInt();
+ int seed = 0;
+ if (cmdObj.hasField("seed")) {
+ if (!cmdObj["seed"].isNumber()) {
+ errmsg += "seed must be a number";
+ return false;
}
- result.append( "seed" , seed );
-
- result.append( "out" , BSONElementHasher::hash64( cmdObj.firstElement() , seed ) );
- return true;
+ seed = cmdObj["seed"].numberInt();
}
- };
- MONGO_INITIALIZER(RegisterHashEltCmd)(InitializerContext* context) {
- if (Command::testCommandsEnabled) {
- // Leaked intentionally: a Command registers itself when constructed.
- new CmdHashElt();
- }
- return Status::OK();
+ result.append("seed", seed);
+
+ result.append("out", BSONElementHasher::hash64(cmdObj.firstElement(), seed));
+ return true;
}
+};
+MONGO_INITIALIZER(RegisterHashEltCmd)(InitializerContext* context) {
+ if (Command::testCommandsEnabled) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new CmdHashElt();
+ }
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index ac85733d3b5..20783c5b244 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -47,355 +47,357 @@
namespace {
- using std::string;
- using std::vector;
- using namespace mongo;
-
- /**
- * Utility function to extract error code and message from status
- * and append to BSON results.
- */
- void addStatus(const Status& status, BSONObjBuilder& builder) {
- builder.append("ok", status.isOK() ? 1.0 : 0.0);
- if (!status.isOK()) {
- builder.append("code", status.code());
- }
- if (!status.reason().empty()) {
- builder.append("errmsg", status.reason());
- }
- }
+using std::string;
+using std::vector;
+using namespace mongo;
- /**
- * Retrieves a collection's query settings and plan cache from the database.
- */
- static Status getQuerySettingsAndPlanCache(OperationContext* txn,
- Collection* collection,
- const string& ns,
- QuerySettings** querySettingsOut,
- PlanCache** planCacheOut) {
-
- *querySettingsOut = NULL;
- *planCacheOut = NULL;
- if (NULL == collection) {
- return Status(ErrorCodes::BadValue, "no such collection");
- }
+/**
+ * Utility function to extract error code and message from status
+ * and append to BSON results.
+ */
+void addStatus(const Status& status, BSONObjBuilder& builder) {
+ builder.append("ok", status.isOK() ? 1.0 : 0.0);
+ if (!status.isOK()) {
+ builder.append("code", status.code());
+ }
+ if (!status.reason().empty()) {
+ builder.append("errmsg", status.reason());
+ }
+}
- CollectionInfoCache* infoCache = collection->infoCache();
- invariant(infoCache);
+/**
+ * Retrieves a collection's query settings and plan cache from the database.
+ */
+static Status getQuerySettingsAndPlanCache(OperationContext* txn,
+ Collection* collection,
+ const string& ns,
+ QuerySettings** querySettingsOut,
+ PlanCache** planCacheOut) {
+ *querySettingsOut = NULL;
+ *planCacheOut = NULL;
+ if (NULL == collection) {
+ return Status(ErrorCodes::BadValue, "no such collection");
+ }
- QuerySettings* querySettings = infoCache->getQuerySettings();
- invariant(querySettings);
+ CollectionInfoCache* infoCache = collection->infoCache();
+ invariant(infoCache);
- *querySettingsOut = querySettings;
+ QuerySettings* querySettings = infoCache->getQuerySettings();
+ invariant(querySettings);
- PlanCache* planCache = infoCache->getPlanCache();
- invariant(planCache);
+ *querySettingsOut = querySettings;
- *planCacheOut = planCache;
+ PlanCache* planCache = infoCache->getPlanCache();
+ invariant(planCache);
- return Status::OK();
- }
+ *planCacheOut = planCache;
- //
- // Command instances.
- // Registers commands with the command system and make commands
- // available to the client.
- //
+ return Status::OK();
+}
- MONGO_INITIALIZER_WITH_PREREQUISITES(SetupIndexFilterCommands, MONGO_NO_PREREQUISITES)(
- InitializerContext* context) {
+//
+// Command instances.
+// Registers commands with the command system and make commands
+// available to the client.
+//
- new ListFilters();
- new ClearFilters();
- new SetFilter();
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetupIndexFilterCommands,
+ MONGO_NO_PREREQUISITES)(InitializerContext* context) {
+ new ListFilters();
+ new ClearFilters();
+ new SetFilter();
- return Status::OK();
- }
+ return Status::OK();
+}
-} // namespace
+} // namespace
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::vector;
- using std::unique_ptr;
-
- IndexFilterCommand::IndexFilterCommand(const string& name, const string& helpText)
- : Command(name),
- helpText(helpText) { }
-
- bool IndexFilterCommand::run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- string ns = parseNs(dbname, cmdObj);
+using std::string;
+using std::stringstream;
+using std::vector;
+using std::unique_ptr;
- Status status = runIndexFilterCommand(txn, ns, cmdObj, &result);
+IndexFilterCommand::IndexFilterCommand(const string& name, const string& helpText)
+ : Command(name), helpText(helpText) {}
- if (!status.isOK()) {
- addStatus(status, result);
- return false;
- }
+bool IndexFilterCommand::run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string ns = parseNs(dbname, cmdObj);
- return true;
- }
+ Status status = runIndexFilterCommand(txn, ns, cmdObj, &result);
- bool IndexFilterCommand::isWriteCommandForConfigServer() const { return false; }
-
- bool IndexFilterCommand::slaveOk() const {
+ if (!status.isOK()) {
+ addStatus(status, result);
return false;
}
- bool IndexFilterCommand::slaveOverrideOk() const {
- return true;
- }
+ return true;
+}
- void IndexFilterCommand::help(stringstream& ss) const {
- ss << helpText;
- }
-
- Status IndexFilterCommand::checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
+bool IndexFilterCommand::isWriteCommandForConfigServer() const {
+ return false;
+}
- if (authzSession->isAuthorizedForActionsOnResource(pattern, ActionType::planCacheIndexFilter)) {
- return Status::OK();
- }
+bool IndexFilterCommand::slaveOk() const {
+ return false;
+}
- return Status(ErrorCodes::Unauthorized, "unauthorized");
- }
+bool IndexFilterCommand::slaveOverrideOk() const {
+ return true;
+}
- ListFilters::ListFilters() : IndexFilterCommand("planCacheListFilters",
- "Displays index filters for all query shapes in a collection.") { }
+void IndexFilterCommand::help(stringstream& ss) const {
+ ss << helpText;
+}
- Status ListFilters::runIndexFilterCommand(OperationContext* txn,
- const string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- // This is a read lock. The query settings is owned by the collection.
- AutoGetCollectionForRead ctx(txn, ns);
+Status IndexFilterCommand::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
- QuerySettings* querySettings;
- PlanCache* unused;
- Status status =
- getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &unused);
- if (!status.isOK()) {
- // No collection - return empty array of filters.
- BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
- hintsBuilder.doneFast();
- return Status::OK();
- }
- return list(*querySettings, bob);
+ if (authzSession->isAuthorizedForActionsOnResource(pattern, ActionType::planCacheIndexFilter)) {
+ return Status::OK();
}
- // static
- Status ListFilters::list(const QuerySettings& querySettings, BSONObjBuilder* bob) {
- invariant(bob);
-
- // Format of BSON result:
- //
- // {
- // hints: [
- // {
- // query: <query>,
- // sort: <sort>,
- // projection: <projection>,
- // indexes: [<index1>, <index2>, <index3>, ...]
- // }
- // }
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+}
+
+ListFilters::ListFilters()
+ : IndexFilterCommand("planCacheListFilters",
+ "Displays index filters for all query shapes in a collection.") {}
+
+Status ListFilters::runIndexFilterCommand(OperationContext* txn,
+ const string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ // This is a read lock. The query settings is owned by the collection.
+ AutoGetCollectionForRead ctx(txn, ns);
+
+ QuerySettings* querySettings;
+ PlanCache* unused;
+ Status status =
+ getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &unused);
+ if (!status.isOK()) {
+ // No collection - return empty array of filters.
BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
- OwnedPointerVector<AllowedIndexEntry> entries;
- entries.mutableVector() = querySettings.getAllAllowedIndices();
- for (vector<AllowedIndexEntry*>::const_iterator i = entries.begin();
- i != entries.end(); ++i) {
- AllowedIndexEntry* entry = *i;
- invariant(entry);
-
- BSONObjBuilder hintBob(hintsBuilder.subobjStart());
- hintBob.append("query", entry->query);
- hintBob.append("sort", entry->sort);
- hintBob.append("projection", entry->projection);
- BSONArrayBuilder indexesBuilder(hintBob.subarrayStart("indexes"));
- for (vector<BSONObj>::const_iterator j = entry->indexKeyPatterns.begin();
- j != entry->indexKeyPatterns.end(); ++j) {
- const BSONObj& index = *j;
- indexesBuilder.append(index);
- }
- indexesBuilder.doneFast();
- }
hintsBuilder.doneFast();
return Status::OK();
}
+ return list(*querySettings, bob);
+}
- ClearFilters::ClearFilters() : IndexFilterCommand("planCacheClearFilters",
- "Clears index filter for a single query shape or, "
- "if the query shape is omitted, all filters for the collection.") { }
-
- Status ClearFilters::runIndexFilterCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- // This is a read lock. The query settings is owned by the collection.
- AutoGetCollectionForRead ctx(txn, ns);
-
- QuerySettings* querySettings;
- PlanCache* planCache;
- Status status =
- getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
- if (!status.isOK()) {
- // No collection - do nothing.
- return Status::OK();
- }
- return clear(txn, querySettings, planCache, ns, cmdObj);
- }
-
- // static
- Status ClearFilters::clear(OperationContext* txn,
- QuerySettings* querySettings,
- PlanCache* planCache,
- const std::string& ns,
- const BSONObj& cmdObj) {
- invariant(querySettings);
-
- // According to the specification, the planCacheClearFilters command runs in two modes:
- // - clear all hints; or
- // - clear hints for single query shape when a query shape is described in the
- // command arguments.
- if (cmdObj.hasField("query")) {
- CanonicalQuery* cqRaw;
- Status status = PlanCacheCommand::canonicalize(txn, ns, cmdObj, &cqRaw);
- if (!status.isOK()) {
- return status;
- }
-
- unique_ptr<CanonicalQuery> cq(cqRaw);
- querySettings->removeAllowedIndices(planCache->computeKey(*cq));
-
- // Remove entry from plan cache
- planCache->remove(*cq);
- return Status::OK();
- }
-
- // If query is not provided, make sure sort and projection are not in arguments.
- // We do not want to clear the entire cache inadvertently when the user
- // forgot to provide a value for "query".
- if (cmdObj.hasField("sort") || cmdObj.hasField("projection")) {
- return Status(ErrorCodes::BadValue, "sort or projection provided without query");
- }
+// static
+Status ListFilters::list(const QuerySettings& querySettings, BSONObjBuilder* bob) {
+ invariant(bob);
- // Get entries from query settings. We need to remove corresponding entries from the plan
- // cache shortly.
- OwnedPointerVector<AllowedIndexEntry> entries;
- entries.mutableVector() = querySettings->getAllAllowedIndices();
-
- // OK to proceed with clearing entire cache.
- querySettings->clearAllowedIndices();
-
- const NamespaceString nss(ns);
- const WhereCallbackReal whereCallback(txn, nss.db());
-
- // Remove corresponding entries from plan cache.
- // Admin hints affect the planning process directly. If there were
- // plans generated as a result of applying index filter, these need to be
- // invalidated. This allows the planner to re-populate the plan cache with
- // non-filtered indexed solutions next time the query is run.
- // Resolve plan cache key from (query, sort, projection) in query settings entry.
- // Concurrency note: There's no harm in removing plan cache entries one at at time.
- // Only way that PlanCache::remove() can fail is when the query shape has been removed from
- // the cache by some other means (re-index, collection info reset, ...). This is OK since
- // that's the intended effect of calling the remove() function with the key from the hint entry.
- for (vector<AllowedIndexEntry*>::const_iterator i = entries.begin();
- i != entries.end(); ++i) {
- AllowedIndexEntry* entry = *i;
- invariant(entry);
-
- // Create canonical query.
- CanonicalQuery* cqRaw;
- Status result = CanonicalQuery::canonicalize(
- ns, entry->query, entry->sort, entry->projection, &cqRaw, whereCallback);
- invariant(result.isOK());
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- // Remove plan cache entry.
- planCache->remove(*cq);
+ // Format of BSON result:
+ //
+ // {
+ // hints: [
+ // {
+ // query: <query>,
+ // sort: <sort>,
+ // projection: <projection>,
+ // indexes: [<index1>, <index2>, <index3>, ...]
+ // }
+ // }
+ BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
+ OwnedPointerVector<AllowedIndexEntry> entries;
+ entries.mutableVector() = querySettings.getAllAllowedIndices();
+ for (vector<AllowedIndexEntry*>::const_iterator i = entries.begin(); i != entries.end(); ++i) {
+ AllowedIndexEntry* entry = *i;
+ invariant(entry);
+
+ BSONObjBuilder hintBob(hintsBuilder.subobjStart());
+ hintBob.append("query", entry->query);
+ hintBob.append("sort", entry->sort);
+ hintBob.append("projection", entry->projection);
+ BSONArrayBuilder indexesBuilder(hintBob.subarrayStart("indexes"));
+ for (vector<BSONObj>::const_iterator j = entry->indexKeyPatterns.begin();
+ j != entry->indexKeyPatterns.end();
+ ++j) {
+ const BSONObj& index = *j;
+ indexesBuilder.append(index);
}
-
+ indexesBuilder.doneFast();
+ }
+ hintsBuilder.doneFast();
+ return Status::OK();
+}
+
+ClearFilters::ClearFilters()
+ : IndexFilterCommand("planCacheClearFilters",
+ "Clears index filter for a single query shape or, "
+ "if the query shape is omitted, all filters for the collection.") {}
+
+Status ClearFilters::runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ // This is a read lock. The query settings is owned by the collection.
+ AutoGetCollectionForRead ctx(txn, ns);
+
+ QuerySettings* querySettings;
+ PlanCache* planCache;
+ Status status =
+ getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
+ if (!status.isOK()) {
+ // No collection - do nothing.
return Status::OK();
}
-
- SetFilter::SetFilter() : IndexFilterCommand("planCacheSetFilter",
- "Sets index filter for a query shape. Overrides existing filter.") { }
-
- Status SetFilter::runIndexFilterCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- // This is a read lock. The query settings is owned by the collection.
- const NamespaceString nss(ns);
- AutoGetCollectionForRead ctx(txn, nss);
-
- QuerySettings* querySettings;
- PlanCache* planCache;
- Status status =
- getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
+ return clear(txn, querySettings, planCache, ns, cmdObj);
+}
+
+// static
+Status ClearFilters::clear(OperationContext* txn,
+ QuerySettings* querySettings,
+ PlanCache* planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj) {
+ invariant(querySettings);
+
+ // According to the specification, the planCacheClearFilters command runs in two modes:
+ // - clear all hints; or
+ // - clear hints for single query shape when a query shape is described in the
+ // command arguments.
+ if (cmdObj.hasField("query")) {
+ CanonicalQuery* cqRaw;
+ Status status = PlanCacheCommand::canonicalize(txn, ns, cmdObj, &cqRaw);
if (!status.isOK()) {
return status;
}
- return set(txn, querySettings, planCache, ns, cmdObj);
+
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+ querySettings->removeAllowedIndices(planCache->computeKey(*cq));
+
+ // Remove entry from plan cache
+ planCache->remove(*cq);
+ return Status::OK();
}
- // static
- Status SetFilter::set(OperationContext* txn,
- QuerySettings* querySettings,
- PlanCache* planCache,
- const string& ns,
- const BSONObj& cmdObj) {
- // indexes - required
- BSONElement indexesElt = cmdObj.getField("indexes");
- if (indexesElt.eoo()) {
- return Status(ErrorCodes::BadValue, "required field indexes missing");
- }
- if (indexesElt.type() != mongo::Array) {
- return Status(ErrorCodes::BadValue, "required field indexes must be an array");
- }
- vector<BSONElement> indexesEltArray = indexesElt.Array();
- if (indexesEltArray.empty()) {
- return Status(ErrorCodes::BadValue,
- "required field indexes must contain at least one index");
- }
- vector<BSONObj> indexes;
- for (vector<BSONElement>::const_iterator i = indexesEltArray.begin();
- i != indexesEltArray.end(); ++i) {
- const BSONElement& elt = *i;
- if (!elt.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "each item in indexes must be an object");
- }
- BSONObj obj = elt.Obj();
- if (obj.isEmpty()) {
- return Status(ErrorCodes::BadValue, "index specification cannot be empty");
- }
- indexes.push_back(obj.getOwned());
- }
+ // If query is not provided, make sure sort and projection are not in arguments.
+ // We do not want to clear the entire cache inadvertently when the user
+ // forgot to provide a value for "query".
+ if (cmdObj.hasField("sort") || cmdObj.hasField("projection")) {
+ return Status(ErrorCodes::BadValue, "sort or projection provided without query");
+ }
+ // Get entries from query settings. We need to remove corresponding entries from the plan
+ // cache shortly.
+ OwnedPointerVector<AllowedIndexEntry> entries;
+ entries.mutableVector() = querySettings->getAllAllowedIndices();
+
+ // OK to proceed with clearing entire cache.
+ querySettings->clearAllowedIndices();
+
+ const NamespaceString nss(ns);
+ const WhereCallbackReal whereCallback(txn, nss.db());
+
+ // Remove corresponding entries from plan cache.
+ // Admin hints affect the planning process directly. If there were
+ // plans generated as a result of applying index filter, these need to be
+ // invalidated. This allows the planner to re-populate the plan cache with
+ // non-filtered indexed solutions next time the query is run.
+ // Resolve plan cache key from (query, sort, projection) in query settings entry.
+ // Concurrency note: There's no harm in removing plan cache entries one at at time.
+ // Only way that PlanCache::remove() can fail is when the query shape has been removed from
+ // the cache by some other means (re-index, collection info reset, ...). This is OK since
+ // that's the intended effect of calling the remove() function with the key from the hint entry.
+ for (vector<AllowedIndexEntry*>::const_iterator i = entries.begin(); i != entries.end(); ++i) {
+ AllowedIndexEntry* entry = *i;
+ invariant(entry);
+
+ // Create canonical query.
CanonicalQuery* cqRaw;
- Status status = PlanCacheCommand::canonicalize(txn, ns, cmdObj, &cqRaw);
- if (!status.isOK()) {
- return status;
- }
+ Status result = CanonicalQuery::canonicalize(
+ ns, entry->query, entry->sort, entry->projection, &cqRaw, whereCallback);
+ invariant(result.isOK());
unique_ptr<CanonicalQuery> cq(cqRaw);
- // Add allowed indices to query settings, overriding any previous entries.
- querySettings->setAllowedIndices(*cq, planCache->computeKey(*cq), indexes);
-
- // Remove entry from plan cache.
+ // Remove plan cache entry.
planCache->remove(*cq);
+ }
- return Status::OK();
+ return Status::OK();
+}
+
+SetFilter::SetFilter()
+ : IndexFilterCommand("planCacheSetFilter",
+ "Sets index filter for a query shape. Overrides existing filter.") {}
+
+Status SetFilter::runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ // This is a read lock. The query settings is owned by the collection.
+ const NamespaceString nss(ns);
+ AutoGetCollectionForRead ctx(txn, nss);
+
+ QuerySettings* querySettings;
+ PlanCache* planCache;
+ Status status =
+ getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
+ if (!status.isOK()) {
+ return status;
+ }
+ return set(txn, querySettings, planCache, ns, cmdObj);
+}
+
+// static
+Status SetFilter::set(OperationContext* txn,
+ QuerySettings* querySettings,
+ PlanCache* planCache,
+ const string& ns,
+ const BSONObj& cmdObj) {
+ // indexes - required
+ BSONElement indexesElt = cmdObj.getField("indexes");
+ if (indexesElt.eoo()) {
+ return Status(ErrorCodes::BadValue, "required field indexes missing");
+ }
+ if (indexesElt.type() != mongo::Array) {
+ return Status(ErrorCodes::BadValue, "required field indexes must be an array");
}
+ vector<BSONElement> indexesEltArray = indexesElt.Array();
+ if (indexesEltArray.empty()) {
+ return Status(ErrorCodes::BadValue,
+ "required field indexes must contain at least one index");
+ }
+ vector<BSONObj> indexes;
+ for (vector<BSONElement>::const_iterator i = indexesEltArray.begin();
+ i != indexesEltArray.end();
+ ++i) {
+ const BSONElement& elt = *i;
+ if (!elt.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "each item in indexes must be an object");
+ }
+ BSONObj obj = elt.Obj();
+ if (obj.isEmpty()) {
+ return Status(ErrorCodes::BadValue, "index specification cannot be empty");
+ }
+ indexes.push_back(obj.getOwned());
+ }
+
+ CanonicalQuery* cqRaw;
+ Status status = PlanCacheCommand::canonicalize(txn, ns, cmdObj, &cqRaw);
+ if (!status.isOK()) {
+ return status;
+ }
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ // Add allowed indices to query settings, overriding any previous entries.
+ querySettings->setAllowedIndices(*cq, planCache->computeKey(*cq), indexes);
+
+ // Remove entry from plan cache.
+ planCache->remove(*cq);
+
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/index_filter_commands.h b/src/mongo/db/commands/index_filter_commands.h
index 1ed6fa506c0..9b2815dba70 100644
--- a/src/mongo/db/commands/index_filter_commands.h
+++ b/src/mongo/db/commands/index_filter_commands.h
@@ -34,153 +34,153 @@
namespace mongo {
+/**
+ * DB commands for index filters.
+ * Index filter commands work on a different data structure in the collection
+ * info cache from the plan cache.
+ * The user still thinks of index filter commands as part of the plan cache functionality
+ * so the command name prefix is still "planCache".
+ *
+ * These are in a header to facilitate unit testing. See index_filter_commands_test.cpp.
+ */
+
+/**
+ * IndexFilterCommand
+ * Defines common attributes for all index filter related commands
+ * such as slaveOk.
+ */
+class IndexFilterCommand : public Command {
+public:
+ IndexFilterCommand(const std::string& name, const std::string& helpText);
+
/**
- * DB commands for index filters.
- * Index filter commands work on a different data structure in the collection
- * info cache from the plan cache.
- * The user still thinks of index filter commands as part of the plan cache functionality
- * so the command name prefix is still "planCache".
+ * Entry point from command subsystem.
+ * Implementation provides standardization of error handling
+ * such as adding error code and message to BSON result.
*
- * These are in a header to facilitate unit testing. See index_filter_commands_test.cpp.
+ * Do not override in derived classes.
+ * Override runPlanCacheCommands instead to
+ * implement plan cache command functionality.
*/
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ virtual bool isWriteCommandForConfigServer() const;
+
+ virtual bool slaveOk() const;
+
+ virtual bool slaveOverrideOk() const;
+
+ virtual void help(std::stringstream& ss) const;
+
/**
- * IndexFilterCommand
- * Defines common attributes for all index filter related commands
- * such as slaveOk.
+ * One action type defined for index filter commands:
+ * - planCacheIndexFilter
*/
- class IndexFilterCommand : public Command {
- public:
- IndexFilterCommand(const std::string& name, const std::string& helpText);
-
- /**
- * Entry point from command subsystem.
- * Implementation provides standardization of error handling
- * such as adding error code and message to BSON result.
- *
- * Do not override in derived classes.
- * Override runPlanCacheCommands instead to
- * implement plan cache command functionality.
- */
-
- bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result);
-
- virtual bool isWriteCommandForConfigServer() const;
-
- virtual bool slaveOk() const;
-
- virtual bool slaveOverrideOk() const;
-
- virtual void help(std::stringstream& ss) const;
-
- /**
- * One action type defined for index filter commands:
- * - planCacheIndexFilter
- */
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
-
- /**
- * Subset of command arguments used by index filter commands
- * Override to provide command functionality.
- * Should contain just enough logic to invoke run*Command() function
- * in query_settings.h
- */
- virtual Status runIndexFilterCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) = 0;
-
- private:
- std::string helpText;
- };
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
/**
- * ListFilters
- *
- * { planCacheListFilters: <collection> }
- *
+ * Subset of command arguments used by index filter commands
+ * Override to provide command functionality.
+ * Should contain just enough logic to invoke run*Command() function
+ * in query_settings.h
+ */
+ virtual Status runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) = 0;
+
+private:
+ std::string helpText;
+};
+
+/**
+ * ListFilters
+ *
+ * { planCacheListFilters: <collection> }
+ *
+ */
+class ListFilters : public IndexFilterCommand {
+public:
+ ListFilters();
+
+ virtual Status runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
+
+ /**
+ * Looks up index filters from collection's query settings.
+ * Inserts index filters into BSON builder.
*/
- class ListFilters : public IndexFilterCommand {
- public:
- ListFilters();
-
- virtual Status runIndexFilterCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Looks up index filters from collection's query settings.
- * Inserts index filters into BSON builder.
- */
- static Status list(const QuerySettings& querySettings, BSONObjBuilder* bob);
- };
+ static Status list(const QuerySettings& querySettings, BSONObjBuilder* bob);
+};
+
+/**
+ * ClearFilters
+ *
+ * { planCacheClearFilters: <collection>, query: <query>, sort: <sort>, projection: <projection> }
+ *
+ */
+class ClearFilters : public IndexFilterCommand {
+public:
+ ClearFilters();
+
+ virtual Status runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
- * ClearFilters
- *
- * { planCacheClearFilters: <collection>, query: <query>, sort: <sort>, projection: <projection> }
- *
+ * If query shape is provided, clears index filter for a query.
+ * Otherwise, clears collection's filters.
+ * Namespace argument ns is ignored if we are clearing the entire cache.
+ * Removes corresponding entries from plan cache.
*/
- class ClearFilters : public IndexFilterCommand {
- public:
- ClearFilters();
-
- virtual Status runIndexFilterCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * If query shape is provided, clears index filter for a query.
- * Otherwise, clears collection's filters.
- * Namespace argument ns is ignored if we are clearing the entire cache.
- * Removes corresponding entries from plan cache.
- */
- static Status clear(OperationContext* txn,
- QuerySettings* querySettings,
- PlanCache* planCache,
- const std::string& ns,
- const BSONObj& cmdObj);
- };
+ static Status clear(OperationContext* txn,
+ QuerySettings* querySettings,
+ PlanCache* planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj);
+};
+
+/**
+ * SetFilter
+ *
+ * {
+ * planCacheSetFilter: <collection>,
+ * query: <query>,
+ * sort: <sort>,
+ * projection: <projection>,
+ * indexes: [ <index1>, <index2>, <index3>, ... ]
+ * }
+ *
+ */
+class SetFilter : public IndexFilterCommand {
+public:
+ SetFilter();
+
+ virtual Status runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
- * SetFilter
- *
- * {
- * planCacheSetFilter: <collection>,
- * query: <query>,
- * sort: <sort>,
- * projection: <projection>,
- * indexes: [ <index1>, <index2>, <index3>, ... ]
- * }
- *
+ * Sets index filter for a query shape.
+ * Removes entry for query shape from plan cache.
*/
- class SetFilter : public IndexFilterCommand {
- public:
- SetFilter();
-
- virtual Status runIndexFilterCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Sets index filter for a query shape.
- * Removes entry for query shape from plan cache.
- */
- static Status set(OperationContext* txn,
- QuerySettings* querySettings,
- PlanCache* planCache,
- const std::string& ns,
- const BSONObj& cmdObj);
- };
+ static Status set(OperationContext* txn,
+ QuerySettings* querySettings,
+ PlanCache* planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj);
+};
} // namespace mongo
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 108c7d85bbb..b23b3d34ed8 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -43,287 +43,313 @@ using namespace mongo;
namespace {
- using std::string;
- using std::unique_ptr;
- using std::vector;
-
- static const char* ns = "test.t";
-
- /**
- * Utility function to get list of index filters from the query settings.
- */
- vector<BSONObj> getFilters(const QuerySettings& querySettings) {
- BSONObjBuilder bob;
- ASSERT_OK(ListFilters::list(querySettings, &bob));
- BSONObj resultObj = bob.obj();
- BSONElement filtersElt = resultObj.getField("filters");
- ASSERT_EQUALS(filtersElt.type(), mongo::Array);
- vector<BSONElement> filtersEltArray = filtersElt.Array();
- vector<BSONObj> filters;
- for (vector<BSONElement>::const_iterator i = filtersEltArray.begin();
- i != filtersEltArray.end(); ++i) {
- const BSONElement& elt = *i;
-
- ASSERT_TRUE(elt.isABSONObj());
- BSONObj obj = elt.Obj();
-
- // Check required fields.
- // query
- BSONElement queryElt = obj.getField("query");
- ASSERT_TRUE(queryElt.isABSONObj());
-
- // sort
- BSONElement sortElt = obj.getField("sort");
- ASSERT_TRUE(sortElt.isABSONObj());
-
- // projection
- BSONElement projectionElt = obj.getField("projection");
- ASSERT_TRUE(projectionElt.isABSONObj());
-
- // indexes
- BSONElement indexesElt = obj.getField("indexes");
- ASSERT_EQUALS(indexesElt.type(), mongo::Array);
-
- // All fields OK. Append to vector.
- filters.push_back(obj.getOwned());
- }
+using std::string;
+using std::unique_ptr;
+using std::vector;
- return filters;
- }
+static const char* ns = "test.t";
- /**
- * Utility function to create a PlanRankingDecision
- */
- PlanRankingDecision* createDecision(size_t numPlans) {
- unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
- for (size_t i = 0; i < numPlans; ++i) {
- CommonStats common("COLLSCAN");
- unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
- stats->specific.reset(new CollectionScanStats());
- why->stats.mutableVector().push_back(stats.release());
- why->scores.push_back(0U);
- why->candidateOrder.push_back(i);
- }
- return why.release();
+/**
+ * Utility function to get list of index filters from the query settings.
+ */
+vector<BSONObj> getFilters(const QuerySettings& querySettings) {
+ BSONObjBuilder bob;
+ ASSERT_OK(ListFilters::list(querySettings, &bob));
+ BSONObj resultObj = bob.obj();
+ BSONElement filtersElt = resultObj.getField("filters");
+ ASSERT_EQUALS(filtersElt.type(), mongo::Array);
+ vector<BSONElement> filtersEltArray = filtersElt.Array();
+ vector<BSONObj> filters;
+ for (vector<BSONElement>::const_iterator i = filtersEltArray.begin();
+ i != filtersEltArray.end();
+ ++i) {
+ const BSONElement& elt = *i;
+
+ ASSERT_TRUE(elt.isABSONObj());
+ BSONObj obj = elt.Obj();
+
+ // Check required fields.
+ // query
+ BSONElement queryElt = obj.getField("query");
+ ASSERT_TRUE(queryElt.isABSONObj());
+
+ // sort
+ BSONElement sortElt = obj.getField("sort");
+ ASSERT_TRUE(sortElt.isABSONObj());
+
+ // projection
+ BSONElement projectionElt = obj.getField("projection");
+ ASSERT_TRUE(projectionElt.isABSONObj());
+
+ // indexes
+ BSONElement indexesElt = obj.getField("indexes");
+ ASSERT_EQUALS(indexesElt.type(), mongo::Array);
+
+ // All fields OK. Append to vector.
+ filters.push_back(obj.getOwned());
}
- /**
- * Injects an entry into plan cache for query shape.
- */
- void addQueryShapeToPlanCache(PlanCache* planCache, const char* queryStr, const char* sortStr,
- const char* projectionStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projectionObj = fromjson(projectionStr);
-
- // Create canonical query.
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, queryObj, sortObj, projectionObj, &cqRaw));
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- QuerySolution qs;
- qs.cacheData.reset(new SolutionCacheData());
- qs.cacheData->tree.reset(new PlanCacheIndexTree());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- ASSERT_OK(planCache->add(*cq, solns, createDecision(1U)));
+ return filters;
+}
+
+/**
+ * Utility function to create a PlanRankingDecision
+ */
+PlanRankingDecision* createDecision(size_t numPlans) {
+ unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
+ for (size_t i = 0; i < numPlans; ++i) {
+ CommonStats common("COLLSCAN");
+ unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
+ stats->specific.reset(new CollectionScanStats());
+ why->stats.mutableVector().push_back(stats.release());
+ why->scores.push_back(0U);
+ why->candidateOrder.push_back(i);
}
+ return why.release();
+}
+
+/**
+ * Injects an entry into plan cache for query shape.
+ */
+void addQueryShapeToPlanCache(PlanCache* planCache,
+ const char* queryStr,
+ const char* sortStr,
+ const char* projectionStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projectionObj = fromjson(projectionStr);
+
+ // Create canonical query.
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, queryObj, sortObj, projectionObj, &cqRaw));
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ QuerySolution qs;
+ qs.cacheData.reset(new SolutionCacheData());
+ qs.cacheData->tree.reset(new PlanCacheIndexTree());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ ASSERT_OK(planCache->add(*cq, solns, createDecision(1U)));
+}
- /**
- * Checks if plan cache contains query shape.
- */
- bool planCacheContains(const PlanCache& planCache, const char* queryStr, const char* sortStr,
- const char* projectionStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projectionObj = fromjson(projectionStr);
-
- // Create canonical query.
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, queryObj, sortObj, projectionObj, &cqRaw));
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- // Retrieve cache entries from plan cache.
- vector<PlanCacheEntry*> entries = planCache.getAllEntries();
-
- // Search keys.
- bool found = false;
- for (vector<PlanCacheEntry*>::const_iterator i = entries.begin(); i != entries.end(); i++) {
- PlanCacheEntry* entry = *i;
-
- // Canonicalizing query shape in cache entry to get cache key.
- // Alternatively, we could add key to PlanCacheEntry but that would be used in one place only.
- ASSERT_OK(CanonicalQuery::canonicalize(ns, entry->query, entry->sort,
- entry->projection, &cqRaw));
- unique_ptr<CanonicalQuery> currentQuery(cqRaw);
-
- if (planCache.computeKey(*currentQuery) == planCache.computeKey(*cq)) {
- found = true;
- }
- // Release resources for cache entry after extracting key.
- delete entry;
+/**
+ * Checks if plan cache contains query shape.
+ */
+bool planCacheContains(const PlanCache& planCache,
+ const char* queryStr,
+ const char* sortStr,
+ const char* projectionStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projectionObj = fromjson(projectionStr);
+
+ // Create canonical query.
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, queryObj, sortObj, projectionObj, &cqRaw));
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ // Retrieve cache entries from plan cache.
+ vector<PlanCacheEntry*> entries = planCache.getAllEntries();
+
+ // Search keys.
+ bool found = false;
+ for (vector<PlanCacheEntry*>::const_iterator i = entries.begin(); i != entries.end(); i++) {
+ PlanCacheEntry* entry = *i;
+
+ // Canonicalizing query shape in cache entry to get cache key.
+ // Alternatively, we could add key to PlanCacheEntry but that would be used in one place only.
+ ASSERT_OK(
+ CanonicalQuery::canonicalize(ns, entry->query, entry->sort, entry->projection, &cqRaw));
+ unique_ptr<CanonicalQuery> currentQuery(cqRaw);
+
+ if (planCache.computeKey(*currentQuery) == planCache.computeKey(*cq)) {
+ found = true;
}
- return found;
+ // Release resources for cache entry after extracting key.
+ delete entry;
}
+ return found;
+}
- /**
- * Tests for ListFilters
- */
+/**
+ * Tests for ListFilters
+ */
- TEST(IndexFilterCommandsTest, ListFiltersEmpty) {
- QuerySettings empty;
- vector<BSONObj> filters = getFilters(empty);
- ASSERT_TRUE(filters.empty());
- }
+TEST(IndexFilterCommandsTest, ListFiltersEmpty) {
+ QuerySettings empty;
+ vector<BSONObj> filters = getFilters(empty);
+ ASSERT_TRUE(filters.empty());
+}
- /**
- * Tests for ClearFilters
- */
-
- TEST(IndexFilterCommandsTest, ClearFiltersInvalidParameter) {
- QuerySettings empty;
- PlanCache planCache;
- OperationContextNoop txn;
-
- // If present, query has to be an object.
- ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns, fromjson("{query: 1234}")));
- // If present, sort must be an object.
- ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, sort: 1234}")));
- // If present, projection must be an object.
- ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, projection: 1234}")));
- // Query must pass canonicalization.
- ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: {$no_such_op: 1}}}")));
- // Sort present without query is an error.
- ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns, fromjson("{sort: {a: 1}}")));
- // Projection present without query is an error.
- ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns,
- fromjson("{projection: {_id: 0, a: 1}}")));
- }
+/**
+ * Tests for ClearFilters
+ */
- TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
- QuerySettings querySettings;
- PlanCache planCache;
- OperationContextNoop txn;
-
- ASSERT_OK(SetFilter::set(&txn, &querySettings, &planCache, ns,
- fromjson("{query: {a: 1}, indexes: [{a: 1}]}")));
- vector<BSONObj> filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 1U);
-
- // Clear nonexistent hint.
- // Command should succeed and cache should remain unchanged.
- ASSERT_OK(ClearFilters::clear(&txn, &querySettings, &planCache, ns, fromjson("{query: {b: 1}}")));
- filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 1U);
- }
+TEST(IndexFilterCommandsTest, ClearFiltersInvalidParameter) {
+ QuerySettings empty;
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ // If present, query has to be an object.
+ ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns, fromjson("{query: 1234}")));
+ // If present, sort must be an object.
+ ASSERT_NOT_OK(
+ ClearFilters::clear(&txn, &empty, &planCache, ns, fromjson("{query: {a: 1}, sort: 1234}")));
+ // If present, projection must be an object.
+ ASSERT_NOT_OK(ClearFilters::clear(
+ &txn, &empty, &planCache, ns, fromjson("{query: {a: 1}, projection: 1234}")));
+ // Query must pass canonicalization.
+ ASSERT_NOT_OK(ClearFilters::clear(
+ &txn, &empty, &planCache, ns, fromjson("{query: {a: {$no_such_op: 1}}}")));
+ // Sort present without query is an error.
+ ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns, fromjson("{sort: {a: 1}}")));
+ // Projection present without query is an error.
+ ASSERT_NOT_OK(ClearFilters::clear(
+ &txn, &empty, &planCache, ns, fromjson("{projection: {_id: 0, a: 1}}")));
+}
+
+TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
+ QuerySettings querySettings;
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ ASSERT_OK(SetFilter::set(
+ &txn, &querySettings, &planCache, ns, fromjson("{query: {a: 1}, indexes: [{a: 1}]}")));
+ vector<BSONObj> filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 1U);
+
+ // Clear nonexistent hint.
+ // Command should succeed and cache should remain unchanged.
+ ASSERT_OK(
+ ClearFilters::clear(&txn, &querySettings, &planCache, ns, fromjson("{query: {b: 1}}")));
+ filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 1U);
+}
- /**
- * Tests for SetFilter
- */
-
- TEST(IndexFilterCommandsTest, SetFilterInvalidParameter) {
- QuerySettings empty;
- PlanCache planCache;
- OperationContextNoop txn;
-
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{}")));
- // Missing required query field.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{indexes: [{a: 1}]}")));
- // Missing required indexes field.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{query: {a: 1}}")));
- // Query has to be an object.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: 1234, indexes: [{a: 1}, {b: 1}]}")));
- // Indexes field has to be an array.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, indexes: 1234}")));
- // Array indexes field cannot empty.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, indexes: []}")));
- // Elements in indexes have to be objects.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, indexes: [{a: 1}, 99]}")));
- // Objects in indexes cannot be empty.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, indexes: [{a: 1}, {}]}")));
- // If present, sort must be an object.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, sort: 1234, indexes: [{a: 1}, {b: 1}]}")));
- // If present, projection must be an object.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, projection: 1234, indexes: [{a: 1}, {b: 1}]}")));
- // Query must pass canonicalization.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: {$no_such_op: 1}}, indexes: [{a: 1}, {b: 1}]}")));
- }
+/**
+ * Tests for SetFilter
+ */
- TEST(IndexFilterCommandsTest, SetAndClearFilters) {
- QuerySettings querySettings;
- PlanCache planCache;
- OperationContextNoop txn;
-
- // Inject query shape into plan cache.
- addQueryShapeToPlanCache(&planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}");
- ASSERT_TRUE(planCacheContains(planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}"));
-
- ASSERT_OK(SetFilter::set(&txn, &querySettings, &planCache, ns,
- fromjson("{query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
- "indexes: [{a: 1}]}")));
- vector<BSONObj> filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 1U);
-
- // Query shape should not exist in plan cache after hint is updated.
- ASSERT_FALSE(planCacheContains(planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}"));
-
- // Fields in filter should match criteria in most recent query settings update.
- ASSERT_EQUALS(filters[0].getObjectField("query"), fromjson("{a: 1, b: 1}"));
- ASSERT_EQUALS(filters[0].getObjectField("sort"), fromjson("{a: -1}"));
- ASSERT_EQUALS(filters[0].getObjectField("projection"), fromjson("{_id: 0, a: 1}"));
-
- // Replacing the hint for the same query shape ({a: 1, b: 1} and {b: 2, a: 3}
- // share same shape) should not change the query settings size.
- ASSERT_OK(SetFilter::set(&txn, &querySettings, &planCache, ns,
- fromjson("{query: {b: 2, a: 3}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
- "indexes: [{a: 1, b: 1}]}")));
- filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 1U);
-
- // Add hint for different query shape.
- ASSERT_OK(SetFilter::set(&txn, &querySettings, &planCache, ns,
- fromjson("{query: {b: 1}, indexes: [{b: 1}]}")));
- filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 2U);
-
- // Add hint for 3rd query shape. This is to prepare for ClearHint tests.
- ASSERT_OK(SetFilter::set(&txn, &querySettings, &planCache, ns,
- fromjson("{query: {a: 1}, indexes: [{a: 1}]}")));
- filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 3U);
-
- // Add 2 entries to plan cache and check plan cache after clearing one/all filters.
- addQueryShapeToPlanCache(&planCache, "{a: 1}", "{}", "{}");
- addQueryShapeToPlanCache(&planCache, "{b: 1}", "{}", "{}");
-
- // Clear single hint.
- ASSERT_OK(ClearFilters::clear(&txn, &querySettings, &planCache, ns,
- fromjson("{query: {a: 1}}")));
- filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 2U);
-
- // Query shape should not exist in plan cache after cleaing 1 hint.
- ASSERT_FALSE(planCacheContains(planCache, "{a: 1}", "{}", "{}"));
- ASSERT_TRUE(planCacheContains(planCache, "{b: 1}", "{}", "{}"));
-
- // Clear all filters
- ASSERT_OK(ClearFilters::clear(&txn, &querySettings, &planCache, ns, fromjson("{}")));
- filters = getFilters(querySettings);
- ASSERT_TRUE(filters.empty());
-
- // {b: 1} should be gone from plan cache after flushing query settings.
- ASSERT_FALSE(planCacheContains(planCache, "{b: 1}", "{}", "{}"));
- }
+TEST(IndexFilterCommandsTest, SetFilterInvalidParameter) {
+ QuerySettings empty;
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{}")));
+ // Missing required query field.
+ ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{indexes: [{a: 1}]}")));
+ // Missing required indexes field.
+ ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{query: {a: 1}}")));
+ // Query has to be an object.
+ ASSERT_NOT_OK(SetFilter::set(
+ &txn, &empty, &planCache, ns, fromjson("{query: 1234, indexes: [{a: 1}, {b: 1}]}")));
+ // Indexes field has to be an array.
+ ASSERT_NOT_OK(
+ SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{query: {a: 1}, indexes: 1234}")));
+ // Array indexes field cannot empty.
+ ASSERT_NOT_OK(
+ SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{query: {a: 1}, indexes: []}")));
+ // Elements in indexes have to be objects.
+ ASSERT_NOT_OK(SetFilter::set(
+ &txn, &empty, &planCache, ns, fromjson("{query: {a: 1}, indexes: [{a: 1}, 99]}")));
+ // Objects in indexes cannot be empty.
+ ASSERT_NOT_OK(SetFilter::set(
+ &txn, &empty, &planCache, ns, fromjson("{query: {a: 1}, indexes: [{a: 1}, {}]}")));
+ // If present, sort must be an object.
+ ASSERT_NOT_OK(
+ SetFilter::set(&txn,
+ &empty,
+ &planCache,
+ ns,
+ fromjson("{query: {a: 1}, sort: 1234, indexes: [{a: 1}, {b: 1}]}")));
+ // If present, projection must be an object.
+ ASSERT_NOT_OK(
+ SetFilter::set(&txn,
+ &empty,
+ &planCache,
+ ns,
+ fromjson("{query: {a: 1}, projection: 1234, indexes: [{a: 1}, {b: 1}]}")));
+ // Query must pass canonicalization.
+ ASSERT_NOT_OK(
+ SetFilter::set(&txn,
+ &empty,
+ &planCache,
+ ns,
+ fromjson("{query: {a: {$no_such_op: 1}}, indexes: [{a: 1}, {b: 1}]}")));
+}
+
+TEST(IndexFilterCommandsTest, SetAndClearFilters) {
+ QuerySettings querySettings;
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ // Inject query shape into plan cache.
+ addQueryShapeToPlanCache(&planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}");
+ ASSERT_TRUE(planCacheContains(planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}"));
+
+ ASSERT_OK(SetFilter::set(&txn,
+ &querySettings,
+ &planCache,
+ ns,
+ fromjson(
+ "{query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
+ "indexes: [{a: 1}]}")));
+ vector<BSONObj> filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 1U);
+
+ // Query shape should not exist in plan cache after hint is updated.
+ ASSERT_FALSE(planCacheContains(planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}"));
+
+ // Fields in filter should match criteria in most recent query settings update.
+ ASSERT_EQUALS(filters[0].getObjectField("query"), fromjson("{a: 1, b: 1}"));
+ ASSERT_EQUALS(filters[0].getObjectField("sort"), fromjson("{a: -1}"));
+ ASSERT_EQUALS(filters[0].getObjectField("projection"), fromjson("{_id: 0, a: 1}"));
+
+ // Replacing the hint for the same query shape ({a: 1, b: 1} and {b: 2, a: 3}
+ // share same shape) should not change the query settings size.
+ ASSERT_OK(SetFilter::set(&txn,
+ &querySettings,
+ &planCache,
+ ns,
+ fromjson(
+ "{query: {b: 2, a: 3}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
+ "indexes: [{a: 1, b: 1}]}")));
+ filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 1U);
+
+ // Add hint for different query shape.
+ ASSERT_OK(SetFilter::set(
+ &txn, &querySettings, &planCache, ns, fromjson("{query: {b: 1}, indexes: [{b: 1}]}")));
+ filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 2U);
+
+ // Add hint for 3rd query shape. This is to prepare for ClearHint tests.
+ ASSERT_OK(SetFilter::set(
+ &txn, &querySettings, &planCache, ns, fromjson("{query: {a: 1}, indexes: [{a: 1}]}")));
+ filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 3U);
+
+ // Add 2 entries to plan cache and check plan cache after clearing one/all filters.
+ addQueryShapeToPlanCache(&planCache, "{a: 1}", "{}", "{}");
+ addQueryShapeToPlanCache(&planCache, "{b: 1}", "{}", "{}");
+
+ // Clear single hint.
+ ASSERT_OK(
+ ClearFilters::clear(&txn, &querySettings, &planCache, ns, fromjson("{query: {a: 1}}")));
+ filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 2U);
+
+ // Query shape should not exist in plan cache after cleaing 1 hint.
+ ASSERT_FALSE(planCacheContains(planCache, "{a: 1}", "{}", "{}"));
+ ASSERT_TRUE(planCacheContains(planCache, "{b: 1}", "{}", "{}"));
+
+ // Clear all filters
+ ASSERT_OK(ClearFilters::clear(&txn, &querySettings, &planCache, ns, fromjson("{}")));
+ filters = getFilters(querySettings);
+ ASSERT_TRUE(filters.empty());
+
+ // {b: 1} should be gone from plan cache after flushing query settings.
+ ASSERT_FALSE(planCacheContains(planCache, "{b: 1}", "{}", "{}"));
+}
} // namespace
diff --git a/src/mongo/db/commands/isself.cpp b/src/mongo/db/commands/isself.cpp
index ebec8ae4fdb..91522b4ddb7 100644
--- a/src/mongo/db/commands/isself.cpp
+++ b/src/mongo/db/commands/isself.cpp
@@ -36,36 +36,40 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
- class IsSelfCommand : public Command {
- public:
- IsSelfCommand() : Command("_isSelf") {}
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream &help ) const {
- help << "{ _isSelf : 1 } INTERNAL ONLY";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- result.append( "id" , repl::instanceId );
- return true;
- }
- };
-
- MONGO_INITIALIZER_WITH_PREREQUISITES(RegisterIsSelfCommand, ("GenerateInstanceId"))
- (InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed
- new IsSelfCommand();
- return Status::OK();
+class IsSelfCommand : public Command {
+public:
+ IsSelfCommand() : Command("_isSelf") {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "{ _isSelf : 1 } INTERNAL ONLY";
}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ result.append("id", repl::instanceId);
+ return true;
+ }
+};
+
+MONGO_INITIALIZER_WITH_PREREQUISITES(RegisterIsSelfCommand, ("GenerateInstanceId"))
+(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed
+ new IsSelfCommand();
+ return Status::OK();
+}
} // namespace mongo
diff --git a/src/mongo/db/commands/kill_op.cpp b/src/mongo/db/commands/kill_op.cpp
index efa43986d0d..bd0555b9364 100644
--- a/src/mongo/db/commands/kill_op.cpp
+++ b/src/mongo/db/commands/kill_op.cpp
@@ -45,46 +45,49 @@
namespace mongo {
- class KillOpCommand : public Command {
- public:
-
- KillOpCommand() : Command("killOp") {}
-
- bool isWriteCommandForConfigServer() const final { return false; }
-
- bool slaveOk() const final { return true; }
-
- bool adminOnly() const final { return true; }
-
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) final {
-
- bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(),
- ActionType::killop);
- return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
-
- bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) final {
-
- long long op;
- uassertStatusOK(bsonExtractIntegerField(cmdObj, "op", &op));
-
- log() << "going to kill op: " << op;
- result.append("info", "attempting to kill op");
-
- uassert(26823, str::stream() << "invalid op : " << op,
- (op >= 0) && (op <= std::numeric_limits<unsigned int>::max()));
-
- getGlobalServiceContext()->killOperation(static_cast<unsigned int>(op));
- return true;
- }
- } killOpCmd;
+class KillOpCommand : public Command {
+public:
+ KillOpCommand() : Command("killOp") {}
+
+ bool isWriteCommandForConfigServer() const final {
+ return false;
+ }
+
+ bool slaveOk() const final {
+ return true;
+ }
+
+ bool adminOnly() const final {
+ return true;
+ }
+
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) final {
+ bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::killop);
+ return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+ bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) final {
+ long long op;
+ uassertStatusOK(bsonExtractIntegerField(cmdObj, "op", &op));
+
+ log() << "going to kill op: " << op;
+ result.append("info", "attempting to kill op");
+
+ uassert(26823,
+ str::stream() << "invalid op : " << op,
+ (op >= 0) && (op <= std::numeric_limits<unsigned int>::max()));
+
+ getGlobalServiceContext()->killOperation(static_cast<unsigned int>(op));
+ return true;
+ }
+} killOpCmd;
} // namespace mongo
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 9c4789e3cd3..b48e98598ac 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -48,165 +48,165 @@
namespace mongo {
- using std::unique_ptr;
- using std::list;
- using std::string;
- using std::stringstream;
-
- class CmdListCollections : public Command {
- public:
- virtual bool slaveOk() const { return false; }
- virtual bool slaveOverrideOk() const { return true; }
- virtual bool adminOnly() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help( stringstream& help ) const { help << "list collections for this db"; }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
-
- // Check for the listCollections ActionType on the database
- // or find on system.namespaces for pre 3.0 systems.
- if (authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname),
- ActionType::listCollections) ||
- authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(
- NamespaceString(dbname, "system.namespaces")),
- ActionType::find)) {
- return Status::OK();
+using std::unique_ptr;
+using std::list;
+using std::string;
+using std::stringstream;
+
+class CmdListCollections : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "list collections for this db";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+
+ // Check for the listCollections ActionType on the database
+ // or find on system.namespaces for pre 3.0 systems.
+ if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
+ ActionType::listCollections) ||
+ authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(dbname, "system.namespaces")),
+ ActionType::find)) {
+ return Status::OK();
+ }
+
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to create users on db: " << dbname);
+ }
+
+ CmdListCollections() : Command("listCollections") {}
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ std::unique_ptr<MatchExpression> matcher;
+ BSONElement filterElt = jsobj["filter"];
+ if (!filterElt.eoo()) {
+ if (filterElt.type() != mongo::Object) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::BadValue, "\"filter\" must be an object"));
+ }
+ StatusWithMatchExpression statusWithMatcher =
+ MatchExpressionParser::parse(filterElt.Obj());
+ if (!statusWithMatcher.isOK()) {
+ return appendCommandStatus(result, statusWithMatcher.getStatus());
}
+ matcher.reset(statusWithMatcher.getValue());
+ }
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create users on db: " <<
- dbname);
+ const long long defaultBatchSize = std::numeric_limits<long long>::max();
+ long long batchSize;
+ Status parseCursorStatus = parseCommandCursorOptions(jsobj, defaultBatchSize, &batchSize);
+ if (!parseCursorStatus.isOK()) {
+ return appendCommandStatus(result, parseCursorStatus);
}
- CmdListCollections() : Command( "listCollections" ) {}
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- std::unique_ptr<MatchExpression> matcher;
- BSONElement filterElt = jsobj["filter"];
- if (!filterElt.eoo()) {
- if (filterElt.type() != mongo::Object) {
- return appendCommandStatus(result, Status(ErrorCodes::BadValue,
- "\"filter\" must be an object"));
- }
- StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(filterElt.Obj());
- if (!statusWithMatcher.isOK()) {
- return appendCommandStatus(result, statusWithMatcher.getStatus());
- }
- matcher.reset(statusWithMatcher.getValue());
- }
+ ScopedTransaction scopedXact(txn, MODE_IS);
+ AutoGetDb autoDb(txn, dbname, MODE_S);
- const long long defaultBatchSize = std::numeric_limits<long long>::max();
- long long batchSize;
- Status parseCursorStatus = parseCommandCursorOptions(jsobj,
- defaultBatchSize,
- &batchSize);
- if (!parseCursorStatus.isOK()) {
- return appendCommandStatus(result, parseCursorStatus);
- }
+ const Database* d = autoDb.getDb();
+ const DatabaseCatalogEntry* dbEntry = NULL;
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, dbname, MODE_S);
+ list<string> names;
+ if (d) {
+ dbEntry = d->getDatabaseCatalogEntry();
+ dbEntry->getCollectionNamespaces(&names);
+ names.sort();
+ }
- const Database* d = autoDb.getDb();
- const DatabaseCatalogEntry* dbEntry = NULL;
+ std::unique_ptr<WorkingSet> ws(new WorkingSet());
+ std::unique_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
- list<string> names;
- if ( d ) {
- dbEntry = d->getDatabaseCatalogEntry();
- dbEntry->getCollectionNamespaces( &names );
- names.sort();
- }
+ for (std::list<std::string>::const_iterator i = names.begin(); i != names.end(); ++i) {
+ const std::string& ns = *i;
- std::unique_ptr<WorkingSet> ws(new WorkingSet());
- std::unique_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
-
- for (std::list<std::string>::const_iterator i = names.begin();
- i != names.end();
- ++i) {
- const std::string& ns = *i;
-
- StringData collection = nsToCollectionSubstring( ns );
- if ( collection == "system.namespaces" ) {
- continue;
- }
-
- BSONObjBuilder b;
- b.append( "name", collection );
-
- CollectionOptions options =
- dbEntry->getCollectionCatalogEntry( ns )->getCollectionOptions(txn);
- b.append( "options", options.toBSON() );
-
- BSONObj maybe = b.obj();
- if ( matcher && !matcher->matchesBSON( maybe ) ) {
- continue;
- }
-
- WorkingSetMember member;
- member.state = WorkingSetMember::OWNED_OBJ;
- member.keyData.clear();
- member.loc = RecordId();
- member.obj = Snapshotted<BSONObj>(SnapshotId(), maybe);
- root->pushBack(member);
+ StringData collection = nsToCollectionSubstring(ns);
+ if (collection == "system.namespaces") {
+ continue;
}
- std::string cursorNamespace = str::stream() << dbname << ".$cmd." << name;
- dassert(NamespaceString(cursorNamespace).isValid());
- dassert(NamespaceString(cursorNamespace).isListCollectionsGetMore());
-
- PlanExecutor* rawExec;
- Status makeStatus = PlanExecutor::make(txn,
- ws.release(),
- root.release(),
- cursorNamespace,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- std::unique_ptr<PlanExecutor> exec(rawExec);
- if (!makeStatus.isOK()) {
- return appendCommandStatus( result, makeStatus );
- }
+ BSONObjBuilder b;
+ b.append("name", collection);
- BSONArrayBuilder firstBatch;
-
- const int byteLimit = MaxBytesToReturnToClientAtOnce;
- for (long long objCount = 0;
- objCount < batchSize && firstBatch.len() < byteLimit;
- objCount++) {
- BSONObj next;
- PlanExecutor::ExecState state = exec->getNext(&next, NULL);
- if ( state == PlanExecutor::IS_EOF ) {
- break;
- }
- invariant( state == PlanExecutor::ADVANCED );
- firstBatch.append(next);
- }
+ CollectionOptions options =
+ dbEntry->getCollectionCatalogEntry(ns)->getCollectionOptions(txn);
+ b.append("options", options.toBSON());
- CursorId cursorId = 0LL;
- if ( !exec->isEOF() ) {
- exec->saveState();
- ClientCursor* cursor = new ClientCursor(CursorManager::getGlobalCursorManager(),
- exec.release(),
- cursorNamespace);
- cursorId = cursor->cursorid();
+ BSONObj maybe = b.obj();
+ if (matcher && !matcher->matchesBSON(maybe)) {
+ continue;
}
- appendCursorResponseObject( cursorId, cursorNamespace, firstBatch.arr(), &result );
+ WorkingSetMember member;
+ member.state = WorkingSetMember::OWNED_OBJ;
+ member.keyData.clear();
+ member.loc = RecordId();
+ member.obj = Snapshotted<BSONObj>(SnapshotId(), maybe);
+ root->pushBack(member);
+ }
- return true;
+ std::string cursorNamespace = str::stream() << dbname << ".$cmd." << name;
+ dassert(NamespaceString(cursorNamespace).isValid());
+ dassert(NamespaceString(cursorNamespace).isListCollectionsGetMore());
+
+ PlanExecutor* rawExec;
+ Status makeStatus = PlanExecutor::make(txn,
+ ws.release(),
+ root.release(),
+ cursorNamespace,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+ if (!makeStatus.isOK()) {
+ return appendCommandStatus(result, makeStatus);
}
- } cmdListCollections;
+ BSONArrayBuilder firstBatch;
+
+ const int byteLimit = MaxBytesToReturnToClientAtOnce;
+ for (long long objCount = 0; objCount < batchSize && firstBatch.len() < byteLimit;
+ objCount++) {
+ BSONObj next;
+ PlanExecutor::ExecState state = exec->getNext(&next, NULL);
+ if (state == PlanExecutor::IS_EOF) {
+ break;
+ }
+ invariant(state == PlanExecutor::ADVANCED);
+ firstBatch.append(next);
+ }
+
+ CursorId cursorId = 0LL;
+ if (!exec->isEOF()) {
+ exec->saveState();
+ ClientCursor* cursor = new ClientCursor(
+ CursorManager::getGlobalCursorManager(), exec.release(), cursorNamespace);
+ cursorId = cursor->cursorid();
+ }
+
+ appendCursorResponseObject(cursorId, cursorNamespace, firstBatch.arr(), &result);
+
+ return true;
+ }
+} cmdListCollections;
}
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 81779a83a76..886c035d076 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -38,85 +38,87 @@
namespace mongo {
- using std::set;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- // XXX: remove and put into storage api
- intmax_t dbSize( const string& database );
-
- class CmdListDatabases : public Command {
- public:
- virtual bool slaveOk() const {
- return false;
- }
- virtual bool slaveOverrideOk() const {
- return true;
- }
- virtual bool adminOnly() const {
- return true;
- }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream& help ) const { help << "list databases on this server"; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::listDatabases);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
-
- CmdListDatabases() : Command("listDatabases" , true ) {}
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- vector< string > dbNames;
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->listDatabases( &dbNames );
-
- vector< BSONObj > dbInfos;
-
- set<string> seen;
- intmax_t totalSize = 0;
- for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
- const string& dbname = *i;
-
- BSONObjBuilder b;
- b.append( "name", dbname );
-
- {
- ScopedTransaction transaction(txn, MODE_IS);
- Lock::DBLock dbLock(txn->lockState(), dbname, MODE_IS);
-
- Database* db = dbHolder().get( txn, dbname );
- if ( !db )
- continue;
-
- const DatabaseCatalogEntry* entry = db->getDatabaseCatalogEntry();
- invariant( entry );
-
- int64_t size = entry->sizeOnDisk( txn );
- b.append( "sizeOnDisk", static_cast<double>( size ) );
- totalSize += size;
-
- b.appendBool("empty", size == 0);
- }
-
- dbInfos.push_back( b.obj() );
-
- seen.insert( i->c_str() );
+using std::set;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+// XXX: remove and put into storage api
+intmax_t dbSize(const string& database);
+
+class CmdListDatabases : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "list databases on this server";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::listDatabases);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+
+ CmdListDatabases() : Command("listDatabases", true) {}
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ vector<string> dbNames;
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ storageEngine->listDatabases(&dbNames);
+
+ vector<BSONObj> dbInfos;
+
+ set<string> seen;
+ intmax_t totalSize = 0;
+ for (vector<string>::iterator i = dbNames.begin(); i != dbNames.end(); ++i) {
+ const string& dbname = *i;
+
+ BSONObjBuilder b;
+ b.append("name", dbname);
+
+ {
+ ScopedTransaction transaction(txn, MODE_IS);
+ Lock::DBLock dbLock(txn->lockState(), dbname, MODE_IS);
+
+ Database* db = dbHolder().get(txn, dbname);
+ if (!db)
+ continue;
+
+ const DatabaseCatalogEntry* entry = db->getDatabaseCatalogEntry();
+ invariant(entry);
+
+ int64_t size = entry->sizeOnDisk(txn);
+ b.append("sizeOnDisk", static_cast<double>(size));
+ totalSize += size;
+
+ b.appendBool("empty", size == 0);
}
- result.append( "databases", dbInfos );
- result.append( "totalSize", double( totalSize ) );
- return true;
+ dbInfos.push_back(b.obj());
+
+ seen.insert(i->c_str());
}
- } cmdListDatabases;
+ result.append("databases", dbInfos);
+ result.append("totalSize", double(totalSize));
+ return true;
+ }
+} cmdListDatabases;
}
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index f020f87c43d..ce0394156e3 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -48,159 +48,163 @@
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::vector;
-
- /**
- * Lists the indexes for a given collection.
- *
- * Format:
- * {
- * listIndexes: <collection name>
- * }
- *
- * Return format:
- * {
- * indexes: [
- * ...
- * ]
- * }
- */
- class CmdListIndexes : public Command {
- public:
- virtual bool slaveOk() const { return false; }
- virtual bool slaveOverrideOk() const { return true; }
- virtual bool adminOnly() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help( stringstream& help ) const { help << "list indexes for a collection"; }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::listIndexes);
- out->push_back(Privilege(parseResourcePattern( dbname, cmdObj ), actions));
- }
-
- CmdListIndexes() : Command( "listIndexes" ) {}
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
+using std::string;
+using std::stringstream;
+using std::vector;
- BSONElement first = cmdObj.firstElement();
- uassert(
- 28528,
+/**
+ * Lists the indexes for a given collection.
+ *
+ * Format:
+ * {
+ * listIndexes: <collection name>
+ * }
+ *
+ * Return format:
+ * {
+ * indexes: [
+ * ...
+ * ]
+ * }
+ */
+class CmdListIndexes : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "list indexes for a collection";
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::listIndexes);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ CmdListIndexes() : Command("listIndexes") {}
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ BSONElement first = cmdObj.firstElement();
+ uassert(28528,
str::stream() << "Argument to listIndexes must be of type String, not "
<< typeName(first.type()),
first.type() == String);
- StringData collectionName = first.valueStringData();
- uassert(
- 28529,
+ StringData collectionName = first.valueStringData();
+ uassert(28529,
str::stream() << "Argument to listIndexes must be a collection name, "
<< "not the empty string",
!collectionName.empty());
- const NamespaceString ns(dbname, collectionName);
-
- const long long defaultBatchSize = std::numeric_limits<long long>::max();
- long long batchSize;
- Status parseCursorStatus = parseCommandCursorOptions(cmdObj,
- defaultBatchSize,
- &batchSize);
- if (!parseCursorStatus.isOK()) {
- return appendCommandStatus(result, parseCursorStatus);
- }
+ const NamespaceString ns(dbname, collectionName);
- AutoGetCollectionForRead autoColl(txn, ns);
- if (!autoColl.getDb()) {
- return appendCommandStatus( result, Status( ErrorCodes::NamespaceNotFound,
- "no database" ) );
- }
+ const long long defaultBatchSize = std::numeric_limits<long long>::max();
+ long long batchSize;
+ Status parseCursorStatus = parseCommandCursorOptions(cmdObj, defaultBatchSize, &batchSize);
+ if (!parseCursorStatus.isOK()) {
+ return appendCommandStatus(result, parseCursorStatus);
+ }
- const Collection* collection = autoColl.getCollection();
- if (!collection) {
- return appendCommandStatus( result, Status( ErrorCodes::NamespaceNotFound,
- "no collection" ) );
- }
+ AutoGetCollectionForRead autoColl(txn, ns);
+ if (!autoColl.getDb()) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::NamespaceNotFound, "no database"));
+ }
+
+ const Collection* collection = autoColl.getCollection();
+ if (!collection) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::NamespaceNotFound, "no collection"));
+ }
- const CollectionCatalogEntry* cce = collection->getCatalogEntry();
- invariant(cce);
+ const CollectionCatalogEntry* cce = collection->getCatalogEntry();
+ invariant(cce);
- vector<string> indexNames;
+ vector<string> indexNames;
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ indexNames.clear();
+ cce->getAllIndexes(txn, &indexNames);
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
+
+ std::unique_ptr<WorkingSet> ws(new WorkingSet());
+ std::unique_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
+
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ BSONObj indexSpec;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- indexNames.clear();
- cce->getAllIndexes( txn, &indexNames );
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
-
- std::unique_ptr<WorkingSet> ws(new WorkingSet());
- std::unique_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
-
- for ( size_t i = 0; i < indexNames.size(); i++ ) {
- BSONObj indexSpec;
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- indexSpec = cce->getIndexSpec( txn, indexNames[i] );
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
-
- WorkingSetMember member;
- member.state = WorkingSetMember::OWNED_OBJ;
- member.keyData.clear();
- member.loc = RecordId();
- member.obj = Snapshotted<BSONObj>(SnapshotId(), indexSpec.getOwned());
- root->pushBack(member);
+ indexSpec = cce->getIndexSpec(txn, indexNames[i]);
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
+
+ WorkingSetMember member;
+ member.state = WorkingSetMember::OWNED_OBJ;
+ member.keyData.clear();
+ member.loc = RecordId();
+ member.obj = Snapshotted<BSONObj>(SnapshotId(), indexSpec.getOwned());
+ root->pushBack(member);
+ }
- std::string cursorNamespace = str::stream() << dbname << ".$cmd." << name << "."
- << ns.coll();
- dassert(NamespaceString(cursorNamespace).isValid());
- dassert(NamespaceString(cursorNamespace).isListIndexesGetMore());
- dassert(ns == NamespaceString(cursorNamespace).getTargetNSForListIndexesGetMore());
-
- PlanExecutor* rawExec;
- Status makeStatus = PlanExecutor::make(txn,
- ws.release(),
- root.release(),
- cursorNamespace,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- std::unique_ptr<PlanExecutor> exec(rawExec);
- if (!makeStatus.isOK()) {
- return appendCommandStatus( result, makeStatus );
- }
+ std::string cursorNamespace = str::stream() << dbname << ".$cmd." << name << "."
+ << ns.coll();
+ dassert(NamespaceString(cursorNamespace).isValid());
+ dassert(NamespaceString(cursorNamespace).isListIndexesGetMore());
+ dassert(ns == NamespaceString(cursorNamespace).getTargetNSForListIndexesGetMore());
+
+ PlanExecutor* rawExec;
+ Status makeStatus = PlanExecutor::make(txn,
+ ws.release(),
+ root.release(),
+ cursorNamespace,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+ if (!makeStatus.isOK()) {
+ return appendCommandStatus(result, makeStatus);
+ }
- BSONArrayBuilder firstBatch;
-
- const int byteLimit = MaxBytesToReturnToClientAtOnce;
- for (long long objCount = 0;
- objCount < batchSize && firstBatch.len() < byteLimit;
- objCount++) {
- BSONObj next;
- PlanExecutor::ExecState state = exec->getNext(&next, NULL);
- if ( state == PlanExecutor::IS_EOF ) {
- break;
- }
- invariant( state == PlanExecutor::ADVANCED );
- firstBatch.append(next);
- }
+ BSONArrayBuilder firstBatch;
- CursorId cursorId = 0LL;
- if ( !exec->isEOF() ) {
- exec->saveState();
- ClientCursor* cursor = new ClientCursor(CursorManager::getGlobalCursorManager(),
- exec.release(),
- cursorNamespace);
- cursorId = cursor->cursorid();
+ const int byteLimit = MaxBytesToReturnToClientAtOnce;
+ for (long long objCount = 0; objCount < batchSize && firstBatch.len() < byteLimit;
+ objCount++) {
+ BSONObj next;
+ PlanExecutor::ExecState state = exec->getNext(&next, NULL);
+ if (state == PlanExecutor::IS_EOF) {
+ break;
}
+ invariant(state == PlanExecutor::ADVANCED);
+ firstBatch.append(next);
+ }
- appendCursorResponseObject( cursorId, cursorNamespace, firstBatch.arr(), &result );
-
- return true;
+ CursorId cursorId = 0LL;
+ if (!exec->isEOF()) {
+ exec->saveState();
+ ClientCursor* cursor = new ClientCursor(
+ CursorManager::getGlobalCursorManager(), exec.release(), cursorNamespace);
+ cursorId = cursor->cursorid();
}
- } cmdListIndexes;
+ appendCursorResponseObject(cursorId, cursorNamespace, firstBatch.arr(), &result);
+
+ return true;
+ }
+} cmdListIndexes;
}
diff --git a/src/mongo/db/commands/merge_chunks_cmd.cpp b/src/mongo/db/commands/merge_chunks_cmd.cpp
index 87721b11469..1ee9d397dd7 100644
--- a/src/mongo/db/commands/merge_chunks_cmd.cpp
+++ b/src/mongo/db/commands/merge_chunks_cmd.cpp
@@ -38,145 +38,151 @@
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::vector;
-
- /**
- * Mongod-side command for merging chunks.
- */
- class MergeChunksCommand : public Command {
- public:
- MergeChunksCommand() : Command("mergeChunks") {}
-
- virtual void help(stringstream& h) const {
- h << "Merge Chunks command\n"
- << "usage: { mergeChunks : <ns>, bounds : [ <min key>, <max key> ],"
- << " (opt) epoch : <epoch>, (opt) config : <configdb string>,"
- << " (opt) shardName : <shard name> }";
- }
+using std::string;
+using std::stringstream;
+using std::vector;
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
- ActionType::splitChunk)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
+/**
+ * Mongod-side command for merging chunks.
+ */
+class MergeChunksCommand : public Command {
+public:
+ MergeChunksCommand() : Command("mergeChunks") {}
+
+ virtual void help(stringstream& h) const {
+ h << "Merge Chunks command\n"
+ << "usage: { mergeChunks : <ns>, bounds : [ <min key>, <max key> ],"
+ << " (opt) epoch : <epoch>, (opt) config : <configdb string>,"
+ << " (opt) shardName : <shard name> }";
+ }
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
+ ActionType::splitChunk)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
+ return Status::OK();
+ }
- virtual bool adminOnly() const { return true; }
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- // Required
- static BSONField<string> nsField;
- static BSONField<vector<BSONObj> > boundsField;
- // Optional, if the merge is only valid for a particular epoch
- static BSONField<OID> epochField;
- // Optional, if our sharding state has not previously been initializeed
- static BSONField<string> shardNameField;
- static BSONField<string> configField;
-
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- string ns = parseNs(dbname, cmdObj);
-
- if ( ns.size() == 0 ) {
- errmsg = "no namespace specified";
- return false;
- }
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ return parseNsFullyQualified(dbname, cmdObj);
+ }
- vector<BSONObj> bounds;
- if ( !FieldParser::extract( cmdObj, boundsField, &bounds, &errmsg ) ) {
- return false;
- }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- if ( bounds.size() == 0 ) {
- errmsg = "no bounds were specified";
- return false;
- }
+ // Required
+ static BSONField<string> nsField;
+ static BSONField<vector<BSONObj>> boundsField;
+ // Optional, if the merge is only valid for a particular epoch
+ static BSONField<OID> epochField;
+ // Optional, if our sharding state has not previously been initializeed
+ static BSONField<string> shardNameField;
+ static BSONField<string> configField;
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string ns = parseNs(dbname, cmdObj);
+
+ if (ns.size() == 0) {
+ errmsg = "no namespace specified";
+ return false;
+ }
- if ( bounds.size() != 2 ) {
- errmsg = "only a min and max bound may be specified";
- return false;
- }
+ vector<BSONObj> bounds;
+ if (!FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg)) {
+ return false;
+ }
- BSONObj minKey = bounds[0];
- BSONObj maxKey = bounds[1];
+ if (bounds.size() == 0) {
+ errmsg = "no bounds were specified";
+ return false;
+ }
- if ( minKey.isEmpty() ) {
- errmsg = "no min key specified";
- return false;
- }
+ if (bounds.size() != 2) {
+ errmsg = "only a min and max bound may be specified";
+ return false;
+ }
- if ( maxKey.isEmpty() ) {
- errmsg = "no max key specified";
- return false;
- }
+ BSONObj minKey = bounds[0];
+ BSONObj maxKey = bounds[1];
- //
- // This might be the first call from mongos, so we may need to pass the config and shard
- // information to initialize the shardingState.
- //
-
- string config;
- FieldParser::FieldState extracted = FieldParser::extract( cmdObj,
- configField,
- &config,
- &errmsg );
- if (!shardingState.enabled()) {
- if (!extracted || extracted == FieldParser::FIELD_NONE) {
- errmsg = "sharding state must be enabled or "
- "config server specified to merge chunks";
- return false;
- }
-
- ShardingState::initialize(config);
- }
+ if (minKey.isEmpty()) {
+ errmsg = "no min key specified";
+ return false;
+ }
- // ShardName is optional, but might not be set yet
- string shardName;
- extracted = FieldParser::extract( cmdObj, shardNameField, &shardName, &errmsg );
+ if (maxKey.isEmpty()) {
+ errmsg = "no max key specified";
+ return false;
+ }
- if ( !extracted ) return false;
- if ( extracted != FieldParser::FIELD_NONE ) {
- shardingState.gotShardName( shardName );
+ //
+ // This might be the first call from mongos, so we may need to pass the config and shard
+ // information to initialize the shardingState.
+ //
+
+ string config;
+ FieldParser::FieldState extracted =
+ FieldParser::extract(cmdObj, configField, &config, &errmsg);
+ if (!shardingState.enabled()) {
+ if (!extracted || extracted == FieldParser::FIELD_NONE) {
+ errmsg =
+ "sharding state must be enabled or "
+ "config server specified to merge chunks";
+ return false;
}
- //
- // Epoch is optional, and if not set indicates we should use the latest epoch
- //
+ ShardingState::initialize(config);
+ }
- OID epoch;
- if ( !FieldParser::extract( cmdObj, epochField, &epoch, &errmsg ) ) {
- return false;
- }
+ // ShardName is optional, but might not be set yet
+ string shardName;
+ extracted = FieldParser::extract(cmdObj, shardNameField, &shardName, &errmsg);
- return mergeChunks( txn, NamespaceString( ns ), minKey, maxKey, epoch, &errmsg );
+ if (!extracted)
+ return false;
+ if (extracted != FieldParser::FIELD_NONE) {
+ shardingState.gotShardName(shardName);
}
- };
- BSONField<string> MergeChunksCommand::nsField( "mergeChunks" );
- BSONField<vector<BSONObj> > MergeChunksCommand::boundsField( "bounds" );
+ //
+ // Epoch is optional, and if not set indicates we should use the latest epoch
+ //
- BSONField<string> MergeChunksCommand::configField( "config" );
- BSONField<string> MergeChunksCommand::shardNameField( "shardName" );
- BSONField<OID> MergeChunksCommand::epochField( "epoch" );
+ OID epoch;
+ if (!FieldParser::extract(cmdObj, epochField, &epoch, &errmsg)) {
+ return false;
+ }
- MONGO_INITIALIZER(InitMergeChunksCommand)(InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed.
- new MergeChunksCommand();
- return Status::OK();
+ return mergeChunks(txn, NamespaceString(ns), minKey, maxKey, epoch, &errmsg);
}
+};
+
+BSONField<string> MergeChunksCommand::nsField("mergeChunks");
+BSONField<vector<BSONObj>> MergeChunksCommand::boundsField("bounds");
+
+BSONField<string> MergeChunksCommand::configField("config");
+BSONField<string> MergeChunksCommand::shardNameField("shardName");
+BSONField<OID> MergeChunksCommand::epochField("epoch");
+
+MONGO_INITIALIZER(InitMergeChunksCommand)(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new MergeChunksCommand();
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index d88f05f733f..59eca8ae4c4 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -71,882 +71,848 @@
namespace mongo {
- using std::endl;
- using std::set;
- using std::shared_ptr;
- using std::string;
- using std::stringstream;
- using std::unique_ptr;
- using std::vector;
+using std::endl;
+using std::set;
+using std::shared_ptr;
+using std::string;
+using std::stringstream;
+using std::unique_ptr;
+using std::vector;
- namespace mr {
+namespace mr {
- AtomicUInt32 Config::JOB_NUMBER;
+AtomicUInt32 Config::JOB_NUMBER;
- JSFunction::JSFunction( const std::string& type , const BSONElement& e ) {
- _type = type;
- _code = e._asCode();
+JSFunction::JSFunction(const std::string& type, const BSONElement& e) {
+ _type = type;
+ _code = e._asCode();
- if ( e.type() == CodeWScope )
- _wantedScope = e.codeWScopeObject();
- }
+ if (e.type() == CodeWScope)
+ _wantedScope = e.codeWScopeObject();
+}
- void JSFunction::init( State * state ) {
- _scope = state->scope();
- verify( _scope );
- _scope->init( &_wantedScope );
+void JSFunction::init(State* state) {
+ _scope = state->scope();
+ verify(_scope);
+ _scope->init(&_wantedScope);
- _func = _scope->createFunction( _code.c_str() );
- uassert( 13598 , str::stream() << "couldn't compile code for: " << _type , _func );
+ _func = _scope->createFunction(_code.c_str());
+ uassert(13598, str::stream() << "couldn't compile code for: " << _type, _func);
- // install in JS scope so that it can be called in JS mode
- _scope->setFunction(_type.c_str(), _code.c_str());
- }
+ // install in JS scope so that it can be called in JS mode
+ _scope->setFunction(_type.c_str(), _code.c_str());
+}
- void JSMapper::init( State * state ) {
- _func.init( state );
- _params = state->config().mapParams;
- }
+void JSMapper::init(State* state) {
+ _func.init(state);
+ _params = state->config().mapParams;
+}
- /**
- * Applies the map function to an object, which should internally call emit()
- */
- void JSMapper::map( const BSONObj& o ) {
- Scope * s = _func.scope();
- verify( s );
- if (s->invoke(_func.func(), &_params, &o, 0, true))
- uasserted(9014, str::stream() << "map invoke failed: " << s->getError());
- }
+/**
+ * Applies the map function to an object, which should internally call emit()
+ */
+void JSMapper::map(const BSONObj& o) {
+ Scope* s = _func.scope();
+ verify(s);
+ if (s->invoke(_func.func(), &_params, &o, 0, true))
+ uasserted(9014, str::stream() << "map invoke failed: " << s->getError());
+}
- /**
- * Applies the finalize function to a tuple obj (key, val)
- * Returns tuple obj {_id: key, value: newval}
- */
- BSONObj JSFinalizer::finalize( const BSONObj& o ) {
- Scope * s = _func.scope();
-
- Scope::NoDBAccess no = s->disableDBAccess( "can't access db inside finalize" );
- s->invokeSafe( _func.func() , &o, 0 );
-
- // don't want to use o.objsize() to size b
- // since there are many cases where the point of finalize
- // is converting many fields to 1
- BSONObjBuilder b;
- b.append( o.firstElement() );
- s->append( b , "value" , "__returnValue" );
- return b.obj();
- }
+/**
+ * Applies the finalize function to a tuple obj (key, val)
+ * Returns tuple obj {_id: key, value: newval}
+ */
+BSONObj JSFinalizer::finalize(const BSONObj& o) {
+ Scope* s = _func.scope();
+
+ Scope::NoDBAccess no = s->disableDBAccess("can't access db inside finalize");
+ s->invokeSafe(_func.func(), &o, 0);
+
+ // don't want to use o.objsize() to size b
+ // since there are many cases where the point of finalize
+ // is converting many fields to 1
+ BSONObjBuilder b;
+ b.append(o.firstElement());
+ s->append(b, "value", "__returnValue");
+ return b.obj();
+}
- void JSReducer::init( State * state ) {
- _func.init( state );
- }
+void JSReducer::init(State* state) {
+ _func.init(state);
+}
- /**
- * Reduces a list of tuple objects (key, value) to a single tuple {"0": key, "1": value}
- */
- BSONObj JSReducer::reduce( const BSONList& tuples ) {
- if (tuples.size() <= 1)
- return tuples[0];
- BSONObj key;
- int endSizeEstimate = 16;
- _reduce( tuples , key , endSizeEstimate );
-
- BSONObjBuilder b(endSizeEstimate);
- b.appendAs( key.firstElement() , "0" );
- _func.scope()->append( b , "1" , "__returnValue" );
- return b.obj();
- }
+/**
+ * Reduces a list of tuple objects (key, value) to a single tuple {"0": key, "1": value}
+ */
+BSONObj JSReducer::reduce(const BSONList& tuples) {
+ if (tuples.size() <= 1)
+ return tuples[0];
+ BSONObj key;
+ int endSizeEstimate = 16;
+ _reduce(tuples, key, endSizeEstimate);
+
+ BSONObjBuilder b(endSizeEstimate);
+ b.appendAs(key.firstElement(), "0");
+ _func.scope()->append(b, "1", "__returnValue");
+ return b.obj();
+}
- /**
- * Reduces a list of tuple object (key, value) to a single tuple {_id: key, value: val}
- * Also applies a finalizer method if present.
- */
- BSONObj JSReducer::finalReduce( const BSONList& tuples , Finalizer * finalizer ) {
+/**
+ * Reduces a list of tuple object (key, value) to a single tuple {_id: key, value: val}
+ * Also applies a finalizer method if present.
+ */
+BSONObj JSReducer::finalReduce(const BSONList& tuples, Finalizer* finalizer) {
+ BSONObj res;
+ BSONObj key;
+
+ if (tuples.size() == 1) {
+ // 1 obj, just use it
+ key = tuples[0];
+ BSONObjBuilder b(key.objsize());
+ BSONObjIterator it(key);
+ b.appendAs(it.next(), "_id");
+ b.appendAs(it.next(), "value");
+ res = b.obj();
+ } else {
+ // need to reduce
+ int endSizeEstimate = 16;
+ _reduce(tuples, key, endSizeEstimate);
+ BSONObjBuilder b(endSizeEstimate);
+ b.appendAs(key.firstElement(), "_id");
+ _func.scope()->append(b, "value", "__returnValue");
+ res = b.obj();
+ }
- BSONObj res;
- BSONObj key;
-
- if (tuples.size() == 1) {
- // 1 obj, just use it
- key = tuples[0];
- BSONObjBuilder b(key.objsize());
- BSONObjIterator it(key);
- b.appendAs( it.next() , "_id" );
- b.appendAs( it.next() , "value" );
- res = b.obj();
- }
- else {
- // need to reduce
- int endSizeEstimate = 16;
- _reduce( tuples , key , endSizeEstimate );
- BSONObjBuilder b(endSizeEstimate);
- b.appendAs( key.firstElement() , "_id" );
- _func.scope()->append( b , "value" , "__returnValue" );
- res = b.obj();
- }
+ if (finalizer) {
+ res = finalizer->finalize(res);
+ }
- if ( finalizer ) {
- res = finalizer->finalize( res );
- }
+ return res;
+}
- return res;
+/**
+ * actually applies a reduce, to a list of tuples (key, value).
+ * After the call, tuples will hold a single tuple {"0": key, "1": value}
+ */
+void JSReducer::_reduce(const BSONList& tuples, BSONObj& key, int& endSizeEstimate) {
+ uassert(10074, "need values", tuples.size());
+
+ int sizeEstimate = (tuples.size() * tuples.begin()->getField("value").size()) + 128;
+
+ // need to build the reduce args: ( key, [values] )
+ BSONObjBuilder reduceArgs(sizeEstimate);
+ std::unique_ptr<BSONArrayBuilder> valueBuilder;
+ unsigned n = 0;
+ for (; n < tuples.size(); n++) {
+ BSONObjIterator j(tuples[n]);
+ BSONElement keyE = j.next();
+ if (n == 0) {
+ reduceArgs.append(keyE);
+ key = keyE.wrap();
+ valueBuilder.reset(new BSONArrayBuilder(reduceArgs.subarrayStart("tuples")));
}
- /**
- * actually applies a reduce, to a list of tuples (key, value).
- * After the call, tuples will hold a single tuple {"0": key, "1": value}
- */
- void JSReducer::_reduce( const BSONList& tuples , BSONObj& key , int& endSizeEstimate ) {
- uassert( 10074 , "need values" , tuples.size() );
-
- int sizeEstimate = ( tuples.size() * tuples.begin()->getField( "value" ).size() ) + 128;
-
- // need to build the reduce args: ( key, [values] )
- BSONObjBuilder reduceArgs( sizeEstimate );
- std::unique_ptr<BSONArrayBuilder> valueBuilder;
- unsigned n = 0;
- for ( ; n<tuples.size(); n++ ) {
- BSONObjIterator j(tuples[n]);
- BSONElement keyE = j.next();
- if ( n == 0 ) {
- reduceArgs.append( keyE );
- key = keyE.wrap();
- valueBuilder.reset(new BSONArrayBuilder( reduceArgs.subarrayStart( "tuples" ) ));
- }
-
- BSONElement ee = j.next();
-
- uassert( 13070 , "value too large to reduce" , ee.size() < ( BSONObjMaxUserSize / 2 ) );
+ BSONElement ee = j.next();
- // If adding this element to the array would cause it to be too large, break. The
- // remainder of the tuples will be processed recursively at the end of this
- // function.
- if ( valueBuilder->len() + ee.size() > BSONObjMaxUserSize ) {
- verify( n > 1 ); // if not, inf. loop
- break;
- }
+ uassert(13070, "value too large to reduce", ee.size() < (BSONObjMaxUserSize / 2));
- valueBuilder->append( ee );
- }
- verify(valueBuilder);
- valueBuilder->done();
- BSONObj args = reduceArgs.obj();
+ // If adding this element to the array would cause it to be too large, break. The
+ // remainder of the tuples will be processed recursively at the end of this
+ // function.
+ if (valueBuilder->len() + ee.size() > BSONObjMaxUserSize) {
+ verify(n > 1); // if not, inf. loop
+ break;
+ }
- Scope * s = _func.scope();
+ valueBuilder->append(ee);
+ }
+ verify(valueBuilder);
+ valueBuilder->done();
+ BSONObj args = reduceArgs.obj();
- s->invokeSafe(_func.func(), &args, 0);
- ++numReduces;
+ Scope* s = _func.scope();
- if ( s->type( "__returnValue" ) == Array ) {
- uasserted( 10075 , "reduce -> multiple not supported yet");
- return;
- }
+ s->invokeSafe(_func.func(), &args, 0);
+ ++numReduces;
- endSizeEstimate = key.objsize() + ( args.objsize() / tuples.size() );
+ if (s->type("__returnValue") == Array) {
+ uasserted(10075, "reduce -> multiple not supported yet");
+ return;
+ }
- if ( n == tuples.size() )
- return;
+ endSizeEstimate = key.objsize() + (args.objsize() / tuples.size());
- // the input list was too large, add the rest of elmts to new tuples and reduce again
- // note: would be better to use loop instead of recursion to avoid stack overflow
- BSONList x;
- for ( ; n < tuples.size(); n++ ) {
- x.push_back( tuples[n] );
- }
- BSONObjBuilder temp( endSizeEstimate );
- temp.append( key.firstElement() );
- s->append( temp , "1" , "__returnValue" );
- x.push_back( temp.obj() );
- _reduce( x , key , endSizeEstimate );
- }
+ if (n == tuples.size())
+ return;
- Config::Config( const string& _dbname , const BSONObj& cmdObj )
- {
- dbname = _dbname;
- ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
+ // the input list was too large, add the rest of elmts to new tuples and reduce again
+ // note: would be better to use loop instead of recursion to avoid stack overflow
+ BSONList x;
+ for (; n < tuples.size(); n++) {
+ x.push_back(tuples[n]);
+ }
+ BSONObjBuilder temp(endSizeEstimate);
+ temp.append(key.firstElement());
+ s->append(temp, "1", "__returnValue");
+ x.push_back(temp.obj());
+ _reduce(x, key, endSizeEstimate);
+}
- verbose = cmdObj["verbose"].trueValue();
- jsMode = cmdObj["jsMode"].trueValue();
- splitInfo = 0;
+Config::Config(const string& _dbname, const BSONObj& cmdObj) {
+ dbname = _dbname;
+ ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
- if (cmdObj.hasField("splitInfo")) {
- splitInfo = cmdObj["splitInfo"].Int();
- }
+ verbose = cmdObj["verbose"].trueValue();
+ jsMode = cmdObj["jsMode"].trueValue();
+ splitInfo = 0;
- jsMaxKeys = 500000;
- reduceTriggerRatio = 10.0;
- maxInMemSize = 500 * 1024;
+ if (cmdObj.hasField("splitInfo")) {
+ splitInfo = cmdObj["splitInfo"].Int();
+ }
- uassert( 13602 , "outType is no longer a valid option" , cmdObj["outType"].eoo() );
+ jsMaxKeys = 500000;
+ reduceTriggerRatio = 10.0;
+ maxInMemSize = 500 * 1024;
- outputOptions = parseOutputOptions(dbname, cmdObj);
+ uassert(13602, "outType is no longer a valid option", cmdObj["outType"].eoo());
- shardedFirstPass = false;
- if (cmdObj.hasField("shardedFirstPass") && cmdObj["shardedFirstPass"].trueValue()){
- massert(16054,
- "shardedFirstPass should only use replace outType",
- outputOptions.outType == REPLACE);
- shardedFirstPass = true;
- }
+ outputOptions = parseOutputOptions(dbname, cmdObj);
- if ( outputOptions.outType != INMEMORY ) { // setup temp collection name
- tempNamespace = str::stream()
- << (outputOptions.outDB.empty() ? dbname : outputOptions.outDB)
- << ".tmp.mr."
- << cmdObj.firstElement().String()
- << "_"
- << JOB_NUMBER.fetchAndAdd(1);
- incLong = tempNamespace + "_inc";
- }
+ shardedFirstPass = false;
+ if (cmdObj.hasField("shardedFirstPass") && cmdObj["shardedFirstPass"].trueValue()) {
+ massert(16054,
+ "shardedFirstPass should only use replace outType",
+ outputOptions.outType == REPLACE);
+ shardedFirstPass = true;
+ }
- {
- // scope and code
+ if (outputOptions.outType != INMEMORY) { // setup temp collection name
+ tempNamespace = str::stream()
+ << (outputOptions.outDB.empty() ? dbname : outputOptions.outDB) << ".tmp.mr."
+ << cmdObj.firstElement().String() << "_" << JOB_NUMBER.fetchAndAdd(1);
+ incLong = tempNamespace + "_inc";
+ }
- if ( cmdObj["scope"].type() == Object )
- scopeSetup = cmdObj["scope"].embeddedObjectUserCheck();
+ {
+ // scope and code
- mapper.reset( new JSMapper( cmdObj["map"] ) );
- reducer.reset( new JSReducer( cmdObj["reduce"] ) );
- if ( cmdObj["finalize"].type() && cmdObj["finalize"].trueValue() )
- finalizer.reset( new JSFinalizer( cmdObj["finalize"] ) );
+ if (cmdObj["scope"].type() == Object)
+ scopeSetup = cmdObj["scope"].embeddedObjectUserCheck();
- if ( cmdObj["mapparams"].type() == Array ) {
- mapParams = cmdObj["mapparams"].embeddedObjectUserCheck();
- }
+ mapper.reset(new JSMapper(cmdObj["map"]));
+ reducer.reset(new JSReducer(cmdObj["reduce"]));
+ if (cmdObj["finalize"].type() && cmdObj["finalize"].trueValue())
+ finalizer.reset(new JSFinalizer(cmdObj["finalize"]));
- }
+ if (cmdObj["mapparams"].type() == Array) {
+ mapParams = cmdObj["mapparams"].embeddedObjectUserCheck();
+ }
+ }
- {
- // query options
- BSONElement q = cmdObj["query"];
- if ( q.type() == Object )
- filter = q.embeddedObjectUserCheck();
- else
- uassert( 13608 , "query has to be blank or an Object" , ! q.trueValue() );
+ {
+ // query options
+ BSONElement q = cmdObj["query"];
+ if (q.type() == Object)
+ filter = q.embeddedObjectUserCheck();
+ else
+ uassert(13608, "query has to be blank or an Object", !q.trueValue());
+
+
+ BSONElement s = cmdObj["sort"];
+ if (s.type() == Object)
+ sort = s.embeddedObjectUserCheck();
+ else
+ uassert(13609, "sort has to be blank or an Object", !s.trueValue());
+
+ if (cmdObj["limit"].isNumber())
+ limit = cmdObj["limit"].numberLong();
+ else
+ limit = 0;
+ }
+}
+/**
+ * Clean up the temporary and incremental collections
+ */
+void State::dropTempCollections() {
+ _db.dropCollection(_config.tempNamespace);
+ // Always forget about temporary namespaces, so we don't cache lots of them
+ ShardConnection::forgetNS(_config.tempNamespace);
+ if (_useIncremental) {
+ // We don't want to log the deletion of incLong as it isn't replicated. While
+ // harmless, this would lead to a scary looking warning on the secondaries.
+ bool shouldReplicateWrites = _txn->writesAreReplicated();
+ _txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+
+ ScopedTransaction scopedXact(_txn, MODE_IX);
+ Lock::DBLock lk(_txn->lockState(), nsToDatabaseSubstring(_config.incLong), MODE_X);
+ if (Database* db = dbHolder().get(_txn, _config.incLong)) {
+ WriteUnitOfWork wunit(_txn);
+ db->dropCollection(_txn, _config.incLong);
+ wunit.commit();
+ }
- BSONElement s = cmdObj["sort"];
- if ( s.type() == Object )
- sort = s.embeddedObjectUserCheck();
- else
- uassert( 13609 , "sort has to be blank or an Object" , ! s.trueValue() );
+ ShardConnection::forgetNS(_config.incLong);
+ }
+}
- if ( cmdObj["limit"].isNumber() )
- limit = cmdObj["limit"].numberLong();
- else
- limit = 0;
- }
+/**
+ * Create temporary collection, set up indexes
+ */
+void State::prepTempCollection() {
+ if (!_onDisk)
+ return;
+
+ dropTempCollections();
+ if (_useIncremental) {
+ // Create the inc collection and make sure we have index on "0" key.
+ // Intentionally not replicating the inc collection to secondaries.
+ bool shouldReplicateWrites = _txn->writesAreReplicated();
+ _txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+
+ OldClientWriteContext incCtx(_txn, _config.incLong);
+ WriteUnitOfWork wuow(_txn);
+ Collection* incColl = incCtx.getCollection();
+ invariant(!incColl);
+
+ CollectionOptions options;
+ options.setNoIdIndex();
+ options.temp = true;
+ incColl = incCtx.db()->createCollection(_txn, _config.incLong, options);
+ invariant(incColl);
+
+ BSONObj indexSpec = BSON("key" << BSON("0" << 1) << "ns" << _config.incLong << "name"
+ << "_temp_0");
+ Status status = incColl->getIndexCatalog()->createIndexOnEmptyCollection(_txn, indexSpec);
+ if (!status.isOK()) {
+ uasserted(17305,
+ str::stream() << "createIndex failed for mr incLong ns: " << _config.incLong
+ << " err: " << status.code());
}
+ wuow.commit();
+ }
- /**
- * Clean up the temporary and incremental collections
- */
- void State::dropTempCollections() {
- _db.dropCollection(_config.tempNamespace);
- // Always forget about temporary namespaces, so we don't cache lots of them
- ShardConnection::forgetNS( _config.tempNamespace );
- if (_useIncremental) {
- // We don't want to log the deletion of incLong as it isn't replicated. While
- // harmless, this would lead to a scary looking warning on the secondaries.
- bool shouldReplicateWrites = _txn->writesAreReplicated();
- _txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
-
- ScopedTransaction scopedXact(_txn, MODE_IX);
- Lock::DBLock lk(_txn->lockState(),
- nsToDatabaseSubstring(_config.incLong),
- MODE_X);
- if (Database* db = dbHolder().get(_txn, _config.incLong)) {
- WriteUnitOfWork wunit(_txn);
- db->dropCollection(_txn, _config.incLong);
- wunit.commit();
+ CollectionOptions finalOptions;
+ vector<BSONObj> indexesToInsert;
+
+ {
+ // copy indexes and collection options into temporary storage
+ OldClientWriteContext finalCtx(_txn, _config.outputOptions.finalNamespace);
+ Collection* const finalColl = finalCtx.getCollection();
+ if (finalColl) {
+ finalOptions = finalColl->getCatalogEntry()->getCollectionOptions(_txn);
+
+ IndexCatalog::IndexIterator ii =
+ finalColl->getIndexCatalog()->getIndexIterator(_txn, true);
+ // Iterate over finalColl's indexes.
+ while (ii.more()) {
+ IndexDescriptor* currIndex = ii.next();
+ BSONObjBuilder b;
+ b.append("ns", _config.tempNamespace);
+
+ // Copy over contents of the index descriptor's infoObj.
+ BSONObjIterator j(currIndex->infoObj());
+ while (j.more()) {
+ BSONElement e = j.next();
+ if (str::equals(e.fieldName(), "_id") || str::equals(e.fieldName(), "ns"))
+ continue;
+ b.append(e);
}
-
- ShardConnection::forgetNS( _config.incLong );
+ indexesToInsert.push_back(b.obj());
}
-
}
+ }
- /**
- * Create temporary collection, set up indexes
- */
- void State::prepTempCollection() {
- if ( ! _onDisk )
- return;
-
- dropTempCollections();
- if (_useIncremental) {
- // Create the inc collection and make sure we have index on "0" key.
- // Intentionally not replicating the inc collection to secondaries.
- bool shouldReplicateWrites = _txn->writesAreReplicated();
- _txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
-
- OldClientWriteContext incCtx(_txn, _config.incLong);
- WriteUnitOfWork wuow(_txn);
- Collection* incColl = incCtx.getCollection();
- invariant(!incColl);
-
- CollectionOptions options;
- options.setNoIdIndex();
- options.temp = true;
- incColl = incCtx.db()->createCollection(_txn, _config.incLong, options);
- invariant(incColl);
-
- BSONObj indexSpec = BSON( "key" << BSON( "0" << 1 ) << "ns" << _config.incLong
- << "name" << "_temp_0" );
- Status status = incColl->getIndexCatalog()->createIndexOnEmptyCollection(_txn,
- indexSpec);
- if ( !status.isOK() ) {
- uasserted( 17305 , str::stream() << "createIndex failed for mr incLong ns: " <<
- _config.incLong << " err: " << status.code() );
+ {
+ // create temp collection and insert the indexes from temporary storage
+ OldClientWriteContext tempCtx(_txn, _config.tempNamespace);
+ WriteUnitOfWork wuow(_txn);
+ NamespaceString tempNss(_config.tempNamespace);
+ uassert(ErrorCodes::NotMaster,
+ "no longer master",
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(tempNss));
+ Collection* tempColl = tempCtx.getCollection();
+ invariant(!tempColl);
+
+ CollectionOptions options = finalOptions;
+ options.temp = true;
+ tempColl = tempCtx.db()->createCollection(_txn, _config.tempNamespace, options);
+
+ for (vector<BSONObj>::iterator it = indexesToInsert.begin(); it != indexesToInsert.end();
+ ++it) {
+ Status status = tempColl->getIndexCatalog()->createIndexOnEmptyCollection(_txn, *it);
+ if (!status.isOK()) {
+ if (status.code() == ErrorCodes::IndexAlreadyExists) {
+ continue;
}
- wuow.commit();
+ uassertStatusOK(status);
}
+ // Log the createIndex operation.
+ string logNs = nsToDatabase(_config.tempNamespace) + ".system.indexes";
+ getGlobalServiceContext()->getOpObserver()->onCreateIndex(_txn, logNs, *it);
+ }
+ wuow.commit();
+ }
+}
- CollectionOptions finalOptions;
- vector<BSONObj> indexesToInsert;
-
- {
- // copy indexes and collection options into temporary storage
- OldClientWriteContext finalCtx(_txn, _config.outputOptions.finalNamespace);
- Collection* const finalColl = finalCtx.getCollection();
- if ( finalColl ) {
- finalOptions = finalColl->getCatalogEntry()->getCollectionOptions(_txn);
-
- IndexCatalog::IndexIterator ii =
- finalColl->getIndexCatalog()->getIndexIterator( _txn, true );
- // Iterate over finalColl's indexes.
- while ( ii.more() ) {
- IndexDescriptor* currIndex = ii.next();
- BSONObjBuilder b;
- b.append( "ns" , _config.tempNamespace );
-
- // Copy over contents of the index descriptor's infoObj.
- BSONObjIterator j( currIndex->infoObj() );
- while ( j.more() ) {
- BSONElement e = j.next();
- if ( str::equals( e.fieldName() , "_id" ) ||
- str::equals( e.fieldName() , "ns" ) )
- continue;
- b.append( e );
- }
- indexesToInsert.push_back( b.obj() );
- }
- }
- }
+/**
+ * For inline mode, appends results to output object.
+ * Makes sure (key, value) tuple is formatted as {_id: key, value: val}
+ */
+void State::appendResults(BSONObjBuilder& final) {
+ if (_onDisk) {
+ if (!_config.outputOptions.outDB.empty()) {
+ BSONObjBuilder loc;
+ if (!_config.outputOptions.outDB.empty())
+ loc.append("db", _config.outputOptions.outDB);
+ if (!_config.outputOptions.collectionName.empty())
+ loc.append("collection", _config.outputOptions.collectionName);
+ final.append("result", loc.obj());
+ } else {
+ if (!_config.outputOptions.collectionName.empty())
+ final.append("result", _config.outputOptions.collectionName);
+ }
- {
- // create temp collection and insert the indexes from temporary storage
- OldClientWriteContext tempCtx(_txn, _config.tempNamespace);
- WriteUnitOfWork wuow(_txn);
- NamespaceString tempNss(_config.tempNamespace);
- uassert(ErrorCodes::NotMaster, "no longer master",
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(tempNss));
- Collection* tempColl = tempCtx.getCollection();
- invariant(!tempColl);
-
- CollectionOptions options = finalOptions;
- options.temp = true;
- tempColl = tempCtx.db()->createCollection(_txn, _config.tempNamespace, options);
-
- for ( vector<BSONObj>::iterator it = indexesToInsert.begin();
- it != indexesToInsert.end(); ++it ) {
- Status status =
- tempColl->getIndexCatalog()->createIndexOnEmptyCollection(_txn, *it);
- if (!status.isOK()) {
- if (status.code() == ErrorCodes::IndexAlreadyExists) {
- continue;
- }
- uassertStatusOK(status);
- }
- // Log the createIndex operation.
- string logNs = nsToDatabase( _config.tempNamespace ) + ".system.indexes";
- getGlobalServiceContext()->getOpObserver()->onCreateIndex(_txn, logNs, *it);
- }
- wuow.commit();
+ if (_config.splitInfo > 0) {
+ // add split points, used for shard
+ BSONObj res;
+ BSONObj idKey = BSON("_id" << 1);
+ if (!_db.runCommand("admin",
+ BSON("splitVector" << _config.outputOptions.finalNamespace
+ << "keyPattern" << idKey << "maxChunkSizeBytes"
+ << _config.splitInfo),
+ res)) {
+ uasserted(15921, str::stream() << "splitVector failed: " << res);
}
-
+ if (res.hasField("splitKeys"))
+ final.append(res.getField("splitKeys"));
}
+ return;
+ }
- /**
- * For inline mode, appends results to output object.
- * Makes sure (key, value) tuple is formatted as {_id: key, value: val}
- */
- void State::appendResults( BSONObjBuilder& final ) {
- if ( _onDisk ) {
- if (!_config.outputOptions.outDB.empty()) {
- BSONObjBuilder loc;
- if ( !_config.outputOptions.outDB.empty())
- loc.append( "db" , _config.outputOptions.outDB );
- if ( !_config.outputOptions.collectionName.empty() )
- loc.append( "collection" , _config.outputOptions.collectionName );
- final.append("result", loc.obj());
- }
- else {
- if ( !_config.outputOptions.collectionName.empty() )
- final.append( "result" , _config.outputOptions.collectionName );
- }
+ if (_jsMode) {
+ ScriptingFunction getResult = _scope->createFunction(
+ "var map = _mrMap;"
+ "var result = [];"
+ "for (key in map) {"
+ " result.push({_id: key, value: map[key]});"
+ "}"
+ "return result;");
+ _scope->invoke(getResult, 0, 0, 0, false);
+ BSONObj obj = _scope->getObject("__returnValue");
+ final.append("results", BSONArray(obj));
+ return;
+ }
- if ( _config.splitInfo > 0 ) {
- // add split points, used for shard
- BSONObj res;
- BSONObj idKey = BSON( "_id" << 1 );
- if (!_db.runCommand("admin",
- BSON("splitVector" << _config.outputOptions.finalNamespace
- << "keyPattern" << idKey
- << "maxChunkSizeBytes" << _config.splitInfo),
- res)) {
- uasserted( 15921 , str::stream() << "splitVector failed: " << res );
- }
- if ( res.hasField( "splitKeys" ) )
- final.append( res.getField( "splitKeys" ) );
- }
- return;
- }
+ uassert(13604, "too much data for in memory map/reduce", _size < BSONObjMaxUserSize);
- if (_jsMode) {
- ScriptingFunction getResult = _scope->createFunction(
- "var map = _mrMap;"
- "var result = [];"
- "for (key in map) {"
- " result.push({_id: key, value: map[key]});"
- "}"
- "return result;");
- _scope->invoke(getResult, 0, 0, 0, false);
- BSONObj obj = _scope->getObject("__returnValue");
- final.append("results", BSONArray(obj));
- return;
- }
+ BSONArrayBuilder b((int)(_size * 1.2)); // _size is data size, doesn't count overhead and keys
- uassert( 13604 , "too much data for in memory map/reduce" , _size < BSONObjMaxUserSize );
+ for (InMemory::iterator i = _temp->begin(); i != _temp->end(); ++i) {
+ BSONObj key = i->first;
+ BSONList& all = i->second;
- BSONArrayBuilder b( (int)(_size * 1.2) ); // _size is data size, doesn't count overhead and keys
+ verify(all.size() == 1);
- for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
- BSONObj key = i->first;
- BSONList& all = i->second;
+ BSONObjIterator vi(all[0]);
+ vi.next();
- verify( all.size() == 1 );
+ BSONObjBuilder temp(b.subobjStart());
+ temp.appendAs(key.firstElement(), "_id");
+ temp.appendAs(vi.next(), "value");
+ temp.done();
+ }
- BSONObjIterator vi( all[0] );
- vi.next();
+ BSONArray res = b.arr();
+ final.append("results", res);
+}
- BSONObjBuilder temp( b.subobjStart() );
- temp.appendAs( key.firstElement() , "_id" );
- temp.appendAs( vi.next() , "value" );
- temp.done();
- }
+/**
+ * Does post processing on output collection.
+ * This may involve replacing, merging or reducing.
+ */
+long long State::postProcessCollection(OperationContext* txn, CurOp* op, ProgressMeterHolder& pm) {
+ if (_onDisk == false || _config.outputOptions.outType == Config::INMEMORY)
+ return numInMemKeys();
- BSONArray res = b.arr();
- final.append( "results" , res );
- }
+ if (_config.outputOptions.outNonAtomic)
+ return postProcessCollectionNonAtomic(txn, op, pm);
- /**
- * Does post processing on output collection.
- * This may involve replacing, merging or reducing.
- */
- long long State::postProcessCollection(
- OperationContext* txn, CurOp* op, ProgressMeterHolder& pm) {
+ invariant(!txn->lockState()->isLocked());
- if ( _onDisk == false || _config.outputOptions.outType == Config::INMEMORY )
- return numInMemKeys();
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lock(
+ txn->lockState()); // TODO(erh): this is how it was, but seems it doesn't need to be global
+ return postProcessCollectionNonAtomic(txn, op, pm);
+}
- if (_config.outputOptions.outNonAtomic)
- return postProcessCollectionNonAtomic(txn, op, pm);
+//
+// For SERVER-6116 - can't handle version errors in count currently
+//
- invariant( !txn->lockState()->isLocked() );
+/**
+ * Runs count and disables version errors.
+ *
+ * TODO: make count work with versioning
+ */
+unsigned long long _safeCount(Client* client,
+ // Can't be const b/c count isn't
+ /* const */ DBDirectClient& db,
+ const string& ns,
+ const BSONObj& query = BSONObj(),
+ int options = 0,
+ int limit = 0,
+ int skip = 0) {
+ ShardForceVersionOkModeBlock ignoreVersion(client); // ignore versioning here
+ return db.count(ns, query, options, limit, skip);
+}
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lock(txn->lockState()); // TODO(erh): this is how it was, but seems it doesn't need to be global
- return postProcessCollectionNonAtomic(txn, op, pm);
+//
+// End SERVER-6116
+//
+
+long long State::postProcessCollectionNonAtomic(OperationContext* txn,
+ CurOp* op,
+ ProgressMeterHolder& pm) {
+ auto client = txn->getClient();
+
+ if (_config.outputOptions.finalNamespace == _config.tempNamespace)
+ return _safeCount(client, _db, _config.outputOptions.finalNamespace);
+
+ if (_config.outputOptions.outType == Config::REPLACE ||
+ _safeCount(client, _db, _config.outputOptions.finalNamespace) == 0) {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lock(txn->lockState()); // TODO(erh): why global???
+ // replace: just rename from temp to final collection name, dropping previous collection
+ _db.dropCollection(_config.outputOptions.finalNamespace);
+ BSONObj info;
+
+ if (!_db.runCommand("admin",
+ BSON("renameCollection" << _config.tempNamespace << "to"
+ << _config.outputOptions.finalNamespace
+ << "stayTemp" << _config.shardedFirstPass),
+ info)) {
+ uasserted(10076, str::stream() << "rename failed: " << info);
}
- //
- // For SERVER-6116 - can't handle version errors in count currently
- //
-
- /**
- * Runs count and disables version errors.
- *
- * TODO: make count work with versioning
- */
- unsigned long long _safeCount( Client* client,
- // Can't be const b/c count isn't
- /* const */ DBDirectClient& db,
- const string &ns,
- const BSONObj& query = BSONObj(),
- int options = 0,
- int limit = 0,
- int skip = 0 )
+ _db.dropCollection(_config.tempNamespace);
+ } else if (_config.outputOptions.outType == Config::MERGE) {
+ // merge: upsert new docs into old collection
{
- ShardForceVersionOkModeBlock ignoreVersion(client); // ignore versioning here
- return db.count( ns, query, options, limit, skip );
+ const auto count = _safeCount(client, _db, _config.tempNamespace, BSONObj());
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ op->setMessage_inlock(
+ "m/r: merge post processing", "M/R Merge Post Processing Progress", count);
}
+ unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace, BSONObj());
+ while (cursor->more()) {
+ ScopedTransaction scopedXact(_txn, MODE_IX);
+ Lock::DBLock lock(_txn->lockState(),
+ nsToDatabaseSubstring(_config.outputOptions.finalNamespace),
+ MODE_X);
+ BSONObj o = cursor->nextSafe();
+ Helpers::upsert(_txn, _config.outputOptions.finalNamespace, o);
+ pm.hit();
+ }
+ _db.dropCollection(_config.tempNamespace);
+ pm.finished();
+ } else if (_config.outputOptions.outType == Config::REDUCE) {
+ // reduce: apply reduce op on new result and existing one
+ BSONList values;
- //
- // End SERVER-6116
- //
-
- long long State::postProcessCollectionNonAtomic(
- OperationContext* txn, CurOp* op, ProgressMeterHolder& pm) {
-
- auto client = txn->getClient();
-
- if ( _config.outputOptions.finalNamespace == _config.tempNamespace )
- return _safeCount( client, _db, _config.outputOptions.finalNamespace );
-
- if (_config.outputOptions.outType == Config::REPLACE ||
- _safeCount(client, _db, _config.outputOptions.finalNamespace) == 0) {
-
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lock(txn->lockState()); // TODO(erh): why global???
- // replace: just rename from temp to final collection name, dropping previous collection
- _db.dropCollection( _config.outputOptions.finalNamespace );
- BSONObj info;
+ {
+ const auto count = _safeCount(client, _db, _config.tempNamespace, BSONObj());
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ op->setMessage_inlock(
+ "m/r: reduce post processing", "M/R Reduce Post Processing Progress", count);
+ }
+ unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace, BSONObj());
+ while (cursor->more()) {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lock(txn->lockState()); // TODO(erh) why global?
+ BSONObj temp = cursor->nextSafe();
+ BSONObj old;
- if ( ! _db.runCommand( "admin"
- , BSON( "renameCollection" << _config.tempNamespace <<
- "to" << _config.outputOptions.finalNamespace <<
- "stayTemp" << _config.shardedFirstPass )
- , info ) ) {
- uasserted( 10076 , str::stream() << "rename failed: " << info );
- }
-
- _db.dropCollection( _config.tempNamespace );
- }
- else if ( _config.outputOptions.outType == Config::MERGE ) {
- // merge: upsert new docs into old collection
- {
- const auto count = _safeCount(client, _db, _config.tempNamespace, BSONObj());
- stdx::lock_guard<Client> lk(*txn->getClient());
- op->setMessage_inlock("m/r: merge post processing",
- "M/R Merge Post Processing Progress",
- count);
- }
- unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace , BSONObj());
- while (cursor->more()) {
- ScopedTransaction scopedXact(_txn, MODE_IX);
- Lock::DBLock lock(_txn->lockState(),
- nsToDatabaseSubstring(_config.outputOptions.finalNamespace),
- MODE_X);
- BSONObj o = cursor->nextSafe();
- Helpers::upsert( _txn, _config.outputOptions.finalNamespace , o );
- pm.hit();
- }
- _db.dropCollection( _config.tempNamespace );
- pm.finished();
+ bool found;
+ {
+ const std::string& finalNamespace = _config.outputOptions.finalNamespace;
+ OldClientContext tx(txn, finalNamespace);
+ Collection* coll = getCollectionOrUassert(tx.db(), finalNamespace);
+ found = Helpers::findOne(_txn, coll, temp["_id"].wrap(), old, true);
}
- else if ( _config.outputOptions.outType == Config::REDUCE ) {
- // reduce: apply reduce op on new result and existing one
- BSONList values;
-
- {
- const auto count = _safeCount(client, _db, _config.tempNamespace, BSONObj());
- stdx::lock_guard<Client> lk(*txn->getClient());
- op->setMessage_inlock("m/r: reduce post processing",
- "M/R Reduce Post Processing Progress",
- count);
- }
- unique_ptr<DBClientCursor> cursor = _db.query( _config.tempNamespace , BSONObj() );
- while ( cursor->more() ) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lock(txn->lockState()); // TODO(erh) why global?
- BSONObj temp = cursor->nextSafe();
- BSONObj old;
-
- bool found;
- {
- const std::string& finalNamespace = _config.outputOptions.finalNamespace;
- OldClientContext tx(txn, finalNamespace);
- Collection* coll = getCollectionOrUassert(tx.db(), finalNamespace);
- found = Helpers::findOne(_txn,
- coll,
- temp["_id"].wrap(),
- old,
- true);
- }
- if ( found ) {
- // need to reduce
- values.clear();
- values.push_back( temp );
- values.push_back( old );
- Helpers::upsert(_txn,
- _config.outputOptions.finalNamespace,
- _config.reducer->finalReduce(values,
- _config.finalizer.get()));
- }
- else {
- Helpers::upsert( _txn, _config.outputOptions.finalNamespace , temp );
- }
- pm.hit();
- }
- pm.finished();
+ if (found) {
+ // need to reduce
+ values.clear();
+ values.push_back(temp);
+ values.push_back(old);
+ Helpers::upsert(_txn,
+ _config.outputOptions.finalNamespace,
+ _config.reducer->finalReduce(values, _config.finalizer.get()));
+ } else {
+ Helpers::upsert(_txn, _config.outputOptions.finalNamespace, temp);
}
-
- return _safeCount( txn->getClient(), _db, _config.outputOptions.finalNamespace );
+ pm.hit();
}
+ pm.finished();
+ }
- /**
- * Insert doc in collection. This should be replicated.
- */
- void State::insert( const string& ns , const BSONObj& o ) {
- verify( _onDisk );
+ return _safeCount(txn->getClient(), _db, _config.outputOptions.finalNamespace);
+}
+/**
+ * Insert doc in collection. This should be replicated.
+ */
+void State::insert(const string& ns, const BSONObj& o) {
+ verify(_onDisk);
- OldClientWriteContext ctx(_txn, ns );
- WriteUnitOfWork wuow(_txn);
- NamespaceString nss(ns);
- uassert(ErrorCodes::NotMaster, "no longer master",
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss));
- Collection* coll = getCollectionOrUassert(ctx.db(), ns);
- BSONObjBuilder b;
- if ( !o.hasField( "_id" ) ) {
- b.appendOID( "_id", NULL, true );
- }
- b.appendElements(o);
- BSONObj bo = b.obj();
+ OldClientWriteContext ctx(_txn, ns);
+ WriteUnitOfWork wuow(_txn);
+ NamespaceString nss(ns);
+ uassert(ErrorCodes::NotMaster,
+ "no longer master",
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss));
+ Collection* coll = getCollectionOrUassert(ctx.db(), ns);
- uassertStatusOK( coll->insertDocument( _txn, bo, true ).getStatus() );
- wuow.commit();
- }
+ BSONObjBuilder b;
+ if (!o.hasField("_id")) {
+ b.appendOID("_id", NULL, true);
+ }
+ b.appendElements(o);
+ BSONObj bo = b.obj();
- /**
- * Insert doc into the inc collection. This should not be replicated.
- */
- void State::_insertToInc( BSONObj& o ) {
- verify( _onDisk );
-
- OldClientWriteContext ctx(_txn, _config.incLong );
- WriteUnitOfWork wuow(_txn);
- Collection* coll = getCollectionOrUassert(ctx.db(), _config.incLong);
- bool shouldReplicateWrites = _txn->writesAreReplicated();
- _txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
- uassertStatusOK(coll->insertDocument(_txn, o, true, false).getStatus());
- wuow.commit();
- }
+ uassertStatusOK(coll->insertDocument(_txn, bo, true).getStatus());
+ wuow.commit();
+}
- State::State(OperationContext* txn, const Config& c) :
- _config(c),
- _db(txn),
- _useIncremental(true),
- _txn(txn),
- _size(0),
- _dupCount(0),
- _numEmits(0) {
- _temp.reset( new InMemory() );
- _onDisk = _config.outputOptions.outType != Config::INMEMORY;
- }
+/**
+ * Insert doc into the inc collection. This should not be replicated.
+ */
+void State::_insertToInc(BSONObj& o) {
+ verify(_onDisk);
+
+ OldClientWriteContext ctx(_txn, _config.incLong);
+ WriteUnitOfWork wuow(_txn);
+ Collection* coll = getCollectionOrUassert(ctx.db(), _config.incLong);
+ bool shouldReplicateWrites = _txn->writesAreReplicated();
+ _txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+ uassertStatusOK(coll->insertDocument(_txn, o, true, false).getStatus());
+ wuow.commit();
+}
- bool State::sourceExists() {
- return _db.exists( _config.ns );
- }
+State::State(OperationContext* txn, const Config& c)
+ : _config(c), _db(txn), _useIncremental(true), _txn(txn), _size(0), _dupCount(0), _numEmits(0) {
+ _temp.reset(new InMemory());
+ _onDisk = _config.outputOptions.outType != Config::INMEMORY;
+}
- long long State::incomingDocuments() {
- return _safeCount( _txn->getClient(), _db, _config.ns , _config.filter , QueryOption_SlaveOk , (unsigned) _config.limit );
- }
+bool State::sourceExists() {
+ return _db.exists(_config.ns);
+}
- State::~State() {
- if ( _onDisk ) {
- try {
- dropTempCollections();
- }
- catch ( std::exception& e ) {
- error() << "couldn't cleanup after map reduce: " << e.what() << endl;
- }
- }
- if (_scope && !_scope->isKillPending() && _scope->getError().empty()) {
- // cleanup js objects
- try {
- ScriptingFunction cleanup =
- _scope->createFunction("delete _emitCt; delete _keyCt; delete _mrMap;");
- _scope->invoke(cleanup, 0, 0, 0, true);
- }
- catch (const DBException &) {
- // not important because properties will be reset if scope is reused
- LOG(1) << "MapReduce terminated during state destruction" << endl;
- }
- }
- }
+long long State::incomingDocuments() {
+ return _safeCount(_txn->getClient(),
+ _db,
+ _config.ns,
+ _config.filter,
+ QueryOption_SlaveOk,
+ (unsigned)_config.limit);
+}
- /**
- * Initialize the mapreduce operation, creating the inc collection
- */
- void State::init() {
- // setup js
- const string userToken = AuthorizationSession::get(ClientBasic::getCurrent())
- ->getAuthenticatedUserNamesToken();
- _scope.reset(globalScriptEngine->getPooledScope(
- _txn, _config.dbname, "mapreduce" + userToken).release());
-
- if ( ! _config.scopeSetup.isEmpty() )
- _scope->init( &_config.scopeSetup );
-
- _config.mapper->init( this );
- _config.reducer->init( this );
- if ( _config.finalizer )
- _config.finalizer->init( this );
- _scope->setBoolean("_doFinal", _config.finalizer.get() != 0);
-
- switchMode(_config.jsMode); // set up js-mode based on Config
-
- // global JS map/reduce hashmap
- // we use a standard JS object which means keys are only simple types
- // we could also add a real hashmap from a library and object comparison methods
- // for increased performance, we may want to look at v8 Harmony Map support
- // _scope->setObject("_mrMap", BSONObj(), false);
- ScriptingFunction init = _scope->createFunction(
- "_emitCt = 0;"
- "_keyCt = 0;"
- "_dupCt = 0;"
- "_redCt = 0;"
- "if (typeof(_mrMap) === 'undefined') {"
- " _mrMap = {};"
- "}");
- _scope->invoke(init, 0, 0, 0, true);
-
- // js function to run reduce on all keys
- // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); list = hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };");
- _reduceAll = _scope->createFunction(
- "var map = _mrMap;"
- "var list, ret;"
- "for (var key in map) {"
- " list = map[key];"
- " if (list.length != 1) {"
- " ret = _reduce(key, list);"
- " map[key] = [ret];"
- " ++_redCt;"
- " }"
- "}"
- "_dupCt = 0;");
- massert(16717, "error initializing JavaScript reduceAll function",
- _reduceAll != 0);
-
- _reduceAndEmit = _scope->createFunction(
- "var map = _mrMap;"
- "var list, ret;"
- "for (var key in map) {"
- " list = map[key];"
- " if (list.length == 1)"
- " ret = list[0];"
- " else {"
- " ret = _reduce(key, list);"
- " ++_redCt;"
- " }"
- " emit(key, ret);"
- "}"
- "delete _mrMap;");
- massert(16718, "error initializing JavaScript reduce/emit function",
- _reduceAndEmit != 0);
-
- _reduceAndFinalize = _scope->createFunction(
- "var map = _mrMap;"
- "var list, ret;"
- "for (var key in map) {"
- " list = map[key];"
- " if (list.length == 1) {"
- " if (!_doFinal) { continue; }"
- " ret = list[0];"
- " }"
- " else {"
- " ret = _reduce(key, list);"
- " ++_redCt;"
- " }"
- " if (_doFinal)"
- " ret = _finalize(key, ret);"
- " map[key] = ret;"
- "}");
- massert(16719, "error creating JavaScript reduce/finalize function",
- _reduceAndFinalize != 0);
-
- _reduceAndFinalizeAndInsert = _scope->createFunction(
- "var map = _mrMap;"
- "var list, ret;"
- "for (var key in map) {"
- " list = map[key];"
- " if (list.length == 1)"
- " ret = list[0];"
- " else {"
- " ret = _reduce(key, list);"
- " ++_redCt;"
- " }"
- " if (_doFinal)"
- " ret = _finalize(key, ret);"
- " _nativeToTemp({_id: key, value: ret});"
- "}");
- massert(16720, "error initializing JavaScript functions",
- _reduceAndFinalizeAndInsert != 0);
+State::~State() {
+ if (_onDisk) {
+ try {
+ dropTempCollections();
+ } catch (std::exception& e) {
+ error() << "couldn't cleanup after map reduce: " << e.what() << endl;
}
-
- void State::switchMode(bool jsMode) {
- _jsMode = jsMode;
- if (jsMode) {
- // emit function that stays in JS
- _scope->setFunction("emit",
- "function(key, value) {"
- " if (typeof(key) === 'object') {"
- " _bailFromJS(key, value);"
- " return;"
- " }"
- " ++_emitCt;"
- " var map = _mrMap;"
- " var list = map[key];"
- " if (!list) {"
- " ++_keyCt;"
- " list = [];"
- " map[key] = list;"
- " }"
- " else"
- " ++_dupCt;"
- " list.push(value);"
- "}");
- _scope->injectNative("_bailFromJS", _bailFromJS, this);
- }
- else {
- // emit now populates C++ map
- _scope->injectNative( "emit" , fast_emit, this );
- }
+ }
+ if (_scope && !_scope->isKillPending() && _scope->getError().empty()) {
+ // cleanup js objects
+ try {
+ ScriptingFunction cleanup =
+ _scope->createFunction("delete _emitCt; delete _keyCt; delete _mrMap;");
+ _scope->invoke(cleanup, 0, 0, 0, true);
+ } catch (const DBException&) {
+ // not important because properties will be reset if scope is reused
+ LOG(1) << "MapReduce terminated during state destruction" << endl;
}
+ }
+}
- void State::bailFromJS() {
- LOG(1) << "M/R: Switching from JS mode to mixed mode" << endl;
+/**
+ * Initialize the mapreduce operation, creating the inc collection
+ */
+void State::init() {
+ // setup js
+ const string userToken =
+ AuthorizationSession::get(ClientBasic::getCurrent())->getAuthenticatedUserNamesToken();
+ _scope.reset(globalScriptEngine->getPooledScope(_txn, _config.dbname, "mapreduce" + userToken)
+ .release());
+
+ if (!_config.scopeSetup.isEmpty())
+ _scope->init(&_config.scopeSetup);
+
+ _config.mapper->init(this);
+ _config.reducer->init(this);
+ if (_config.finalizer)
+ _config.finalizer->init(this);
+ _scope->setBoolean("_doFinal", _config.finalizer.get() != 0);
+
+ switchMode(_config.jsMode); // set up js-mode based on Config
+
+ // global JS map/reduce hashmap
+ // we use a standard JS object which means keys are only simple types
+ // we could also add a real hashmap from a library and object comparison methods
+ // for increased performance, we may want to look at v8 Harmony Map support
+ // _scope->setObject("_mrMap", BSONObj(), false);
+ ScriptingFunction init = _scope->createFunction(
+ "_emitCt = 0;"
+ "_keyCt = 0;"
+ "_dupCt = 0;"
+ "_redCt = 0;"
+ "if (typeof(_mrMap) === 'undefined') {"
+ " _mrMap = {};"
+ "}");
+ _scope->invoke(init, 0, 0, 0, true);
+
+ // js function to run reduce on all keys
+ // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); list = hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };");
+ _reduceAll = _scope->createFunction(
+ "var map = _mrMap;"
+ "var list, ret;"
+ "for (var key in map) {"
+ " list = map[key];"
+ " if (list.length != 1) {"
+ " ret = _reduce(key, list);"
+ " map[key] = [ret];"
+ " ++_redCt;"
+ " }"
+ "}"
+ "_dupCt = 0;");
+ massert(16717, "error initializing JavaScript reduceAll function", _reduceAll != 0);
+
+ _reduceAndEmit = _scope->createFunction(
+ "var map = _mrMap;"
+ "var list, ret;"
+ "for (var key in map) {"
+ " list = map[key];"
+ " if (list.length == 1)"
+ " ret = list[0];"
+ " else {"
+ " ret = _reduce(key, list);"
+ " ++_redCt;"
+ " }"
+ " emit(key, ret);"
+ "}"
+ "delete _mrMap;");
+ massert(16718, "error initializing JavaScript reduce/emit function", _reduceAndEmit != 0);
+
+ _reduceAndFinalize = _scope->createFunction(
+ "var map = _mrMap;"
+ "var list, ret;"
+ "for (var key in map) {"
+ " list = map[key];"
+ " if (list.length == 1) {"
+ " if (!_doFinal) { continue; }"
+ " ret = list[0];"
+ " }"
+ " else {"
+ " ret = _reduce(key, list);"
+ " ++_redCt;"
+ " }"
+ " if (_doFinal)"
+ " ret = _finalize(key, ret);"
+ " map[key] = ret;"
+ "}");
+ massert(16719, "error creating JavaScript reduce/finalize function", _reduceAndFinalize != 0);
+
+ _reduceAndFinalizeAndInsert = _scope->createFunction(
+ "var map = _mrMap;"
+ "var list, ret;"
+ "for (var key in map) {"
+ " list = map[key];"
+ " if (list.length == 1)"
+ " ret = list[0];"
+ " else {"
+ " ret = _reduce(key, list);"
+ " ++_redCt;"
+ " }"
+ " if (_doFinal)"
+ " ret = _finalize(key, ret);"
+ " _nativeToTemp({_id: key, value: ret});"
+ "}");
+ massert(16720, "error initializing JavaScript functions", _reduceAndFinalizeAndInsert != 0);
+}
- // reduce and reemit into c++
- switchMode(false);
- _scope->invoke(_reduceAndEmit, 0, 0, 0, true);
- // need to get the real number emitted so far
- _numEmits = _scope->getNumberInt("_emitCt");
- _config.reducer->numReduces = _scope->getNumberInt("_redCt");
- }
+void State::switchMode(bool jsMode) {
+ _jsMode = jsMode;
+ if (jsMode) {
+ // emit function that stays in JS
+ _scope->setFunction("emit",
+ "function(key, value) {"
+ " if (typeof(key) === 'object') {"
+ " _bailFromJS(key, value);"
+ " return;"
+ " }"
+ " ++_emitCt;"
+ " var map = _mrMap;"
+ " var list = map[key];"
+ " if (!list) {"
+ " ++_keyCt;"
+ " list = [];"
+ " map[key] = list;"
+ " }"
+ " else"
+ " ++_dupCt;"
+ " list.push(value);"
+ "}");
+ _scope->injectNative("_bailFromJS", _bailFromJS, this);
+ } else {
+ // emit now populates C++ map
+ _scope->injectNative("emit", fast_emit, this);
+ }
+}
- Collection* State::getCollectionOrUassert(Database* db, StringData ns) {
- Collection* out = db ? db->getCollection(ns) : NULL;
- uassert(18697, "Collection unexpectedly disappeared: " + ns.toString(),
- out);
- return out;
- }
+void State::bailFromJS() {
+ LOG(1) << "M/R: Switching from JS mode to mixed mode" << endl;
- /**
- * Applies last reduce and finalize on a list of tuples (key, val)
- * Inserts single result {_id: key, value: val} into temp collection
- */
- void State::finalReduce( BSONList& values ) {
- if ( !_onDisk || values.size() == 0 )
- return;
+ // reduce and reemit into c++
+ switchMode(false);
+ _scope->invoke(_reduceAndEmit, 0, 0, 0, true);
+ // need to get the real number emitted so far
+ _numEmits = _scope->getNumberInt("_emitCt");
+ _config.reducer->numReduces = _scope->getNumberInt("_redCt");
+}
- BSONObj res = _config.reducer->finalReduce( values , _config.finalizer.get() );
- insert( _config.tempNamespace , res );
- }
+Collection* State::getCollectionOrUassert(Database* db, StringData ns) {
+ Collection* out = db ? db->getCollection(ns) : NULL;
+ uassert(18697, "Collection unexpectedly disappeared: " + ns.toString(), out);
+ return out;
+}
- BSONObj _nativeToTemp( const BSONObj& args, void* data ) {
- State* state = (State*) data;
- BSONObjIterator it(args);
- state->insert(state->_config.tempNamespace, it.next().Obj());
- return BSONObj();
- }
+/**
+ * Applies last reduce and finalize on a list of tuples (key, val)
+ * Inserts single result {_id: key, value: val} into temp collection
+ */
+void State::finalReduce(BSONList& values) {
+ if (!_onDisk || values.size() == 0)
+ return;
+
+ BSONObj res = _config.reducer->finalReduce(values, _config.finalizer.get());
+ insert(_config.tempNamespace, res);
+}
+
+BSONObj _nativeToTemp(const BSONObj& args, void* data) {
+ State* state = (State*)data;
+ BSONObjIterator it(args);
+ state->insert(state->_config.tempNamespace, it.next().Obj());
+ return BSONObj();
+}
// BSONObj _nativeToInc( const BSONObj& args, void* data ) {
// State* state = (State*) data;
@@ -956,807 +922,791 @@ namespace mongo {
// return BSONObj();
// }
- /**
- * Applies last reduce and finalize.
- * After calling this method, the temp collection will be completed.
- * If inline, the results will be in the in memory map
- */
- void State::finalReduce(CurOp * op , ProgressMeterHolder& pm ) {
-
- if (_jsMode) {
- // apply the reduce within JS
- if (_onDisk) {
- _scope->injectNative("_nativeToTemp", _nativeToTemp, this);
- _scope->invoke(_reduceAndFinalizeAndInsert, 0, 0, 0, true);
- return;
- }
- else {
- _scope->invoke(_reduceAndFinalize, 0, 0, 0, true);
- return;
- }
- }
+/**
+ * Applies last reduce and finalize.
+ * After calling this method, the temp collection will be completed.
+ * If inline, the results will be in the in memory map
+ */
+void State::finalReduce(CurOp* op, ProgressMeterHolder& pm) {
+ if (_jsMode) {
+ // apply the reduce within JS
+ if (_onDisk) {
+ _scope->injectNative("_nativeToTemp", _nativeToTemp, this);
+ _scope->invoke(_reduceAndFinalizeAndInsert, 0, 0, 0, true);
+ return;
+ } else {
+ _scope->invoke(_reduceAndFinalize, 0, 0, 0, true);
+ return;
+ }
+ }
- if ( ! _onDisk ) {
- // all data has already been reduced, just finalize
- if ( _config.finalizer ) {
- long size = 0;
- for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
- BSONObj key = i->first;
- BSONList& all = i->second;
+ if (!_onDisk) {
+ // all data has already been reduced, just finalize
+ if (_config.finalizer) {
+ long size = 0;
+ for (InMemory::iterator i = _temp->begin(); i != _temp->end(); ++i) {
+ BSONObj key = i->first;
+ BSONList& all = i->second;
- verify( all.size() == 1 );
+ verify(all.size() == 1);
- BSONObj res = _config.finalizer->finalize( all[0] );
+ BSONObj res = _config.finalizer->finalize(all[0]);
- all.clear();
- all.push_back( res );
- size += res.objsize();
- }
- _size = size;
- }
- return;
+ all.clear();
+ all.push_back(res);
+ size += res.objsize();
}
+ _size = size;
+ }
+ return;
+ }
- // use index on "0" to pull sorted data
- verify( _temp->size() == 0 );
- BSONObj sortKey = BSON( "0" << 1 );
+ // use index on "0" to pull sorted data
+ verify(_temp->size() == 0);
+ BSONObj sortKey = BSON("0" << 1);
+
+ {
+ OldClientWriteContext incCtx(_txn, _config.incLong);
+ WriteUnitOfWork wuow(_txn);
+ Collection* incColl = getCollectionOrUassert(incCtx.db(), _config.incLong);
+
+ bool foundIndex = false;
+ IndexCatalog::IndexIterator ii = incColl->getIndexCatalog()->getIndexIterator(_txn, true);
+ // Iterate over incColl's indexes.
+ while (ii.more()) {
+ IndexDescriptor* currIndex = ii.next();
+ BSONObj x = currIndex->infoObj();
+ if (sortKey.woCompare(x["key"].embeddedObject()) == 0) {
+ foundIndex = true;
+ break;
+ }
+ }
- {
- OldClientWriteContext incCtx(_txn, _config.incLong );
- WriteUnitOfWork wuow(_txn);
- Collection* incColl = getCollectionOrUassert(incCtx.db(), _config.incLong );
-
- bool foundIndex = false;
- IndexCatalog::IndexIterator ii =
- incColl->getIndexCatalog()->getIndexIterator( _txn, true );
- // Iterate over incColl's indexes.
- while ( ii.more() ) {
- IndexDescriptor* currIndex = ii.next();
- BSONObj x = currIndex->infoObj();
- if ( sortKey.woCompare( x["key"].embeddedObject() ) == 0 ) {
- foundIndex = true;
- break;
- }
- }
+ verify(foundIndex);
+ wuow.commit();
+ }
- verify( foundIndex );
- wuow.commit();
- }
+ unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(_txn, _config.incLong));
- unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(_txn, _config.incLong));
+ BSONObj prev;
+ BSONList all;
- BSONObj prev;
- BSONList all;
+ {
+ const auto count = _db.count(_config.incLong, BSONObj(), QueryOption_SlaveOk);
+ stdx::lock_guard<Client> lk(*_txn->getClient());
+ verify(pm ==
+ op->setMessage_inlock("m/r: (3/3) final reduce to collection",
+ "M/R: (3/3) Final Reduce Progress",
+ count));
+ }
- {
- const auto count = _db.count(_config.incLong, BSONObj(), QueryOption_SlaveOk);
- stdx::lock_guard<Client> lk(*_txn->getClient());
- verify(pm == op->setMessage_inlock("m/r: (3/3) final reduce to collection",
- "M/R: (3/3) Final Reduce Progress",
- count));
+ const NamespaceString nss(_config.incLong);
+ const WhereCallbackReal whereCallback(_txn, nss.db());
+
+ CanonicalQuery* cqRaw;
+ verify(CanonicalQuery::canonicalize(
+ _config.incLong, BSONObj(), sortKey, BSONObj(), &cqRaw, whereCallback).isOK());
+ std::unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ Collection* coll = getCollectionOrUassert(ctx->getDb(), _config.incLong);
+ invariant(coll);
+
+ PlanExecutor* rawExec;
+ verify(getExecutor(_txn,
+ coll,
+ cq.release(),
+ PlanExecutor::YIELD_AUTO,
+ &rawExec,
+ QueryPlannerParams::NO_TABLE_SCAN).isOK());
+
+ unique_ptr<PlanExecutor> exec(rawExec);
+
+ // iterate over all sorted objects
+ BSONObj o;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&o, NULL))) {
+ o = o.getOwned(); // we will be accessing outside of the lock
+ pm.hit();
+
+ if (o.woSortOrder(prev, sortKey) == 0) {
+ // object is same as previous, add to array
+ all.push_back(o);
+ if (pm->hits() % 100 == 0) {
+ _txn->checkForInterrupt();
}
+ continue;
+ }
- const NamespaceString nss(_config.incLong);
- const WhereCallbackReal whereCallback(_txn, nss.db());
-
- CanonicalQuery* cqRaw;
- verify(CanonicalQuery::canonicalize(_config.incLong,
- BSONObj(),
- sortKey,
- BSONObj(),
- &cqRaw,
- whereCallback).isOK());
- std::unique_ptr<CanonicalQuery> cq(cqRaw);
-
- Collection* coll = getCollectionOrUassert(ctx->getDb(), _config.incLong);
- invariant(coll);
-
- PlanExecutor* rawExec;
- verify(getExecutor(_txn,
- coll,
- cq.release(),
- PlanExecutor::YIELD_AUTO,
- &rawExec,
- QueryPlannerParams::NO_TABLE_SCAN).isOK());
-
- unique_ptr<PlanExecutor> exec(rawExec);
-
- // iterate over all sorted objects
- BSONObj o;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&o, NULL))) {
- o = o.getOwned(); // we will be accessing outside of the lock
- pm.hit();
-
- if ( o.woSortOrder( prev , sortKey ) == 0 ) {
- // object is same as previous, add to array
- all.push_back( o );
- if ( pm->hits() % 100 == 0 ) {
- _txn->checkForInterrupt();
- }
- continue;
- }
+ exec->saveState();
- exec->saveState();
+ ctx.reset();
- ctx.reset();
+ // reduce a finalize array
+ finalReduce(all);
- // reduce a finalize array
- finalReduce( all );
+ ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
- ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
+ all.clear();
+ prev = o;
+ all.push_back(o);
- all.clear();
- prev = o;
- all.push_back( o );
+ if (!exec->restoreState(_txn)) {
+ break;
+ }
- if (!exec->restoreState(_txn)) {
- break;
- }
+ _txn->checkForInterrupt();
+ }
- _txn->checkForInterrupt();
- }
+ ctx.reset();
+ // reduce and finalize last array
+ finalReduce(all);
+ ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
- ctx.reset();
- // reduce and finalize last array
- finalReduce( all );
- ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
+ pm.finished();
+}
- pm.finished();
- }
+/**
+ * Attempts to reduce objects in the memory map.
+ * A new memory map will be created to hold the results.
+ * If applicable, objects with unique key may be dumped to inc collection.
+ * Input and output objects are both {"0": key, "1": val}
+ */
+void State::reduceInMemory() {
+ if (_jsMode) {
+ // in js mode the reduce is applied when writing to collection
+ return;
+ }
- /**
- * Attempts to reduce objects in the memory map.
- * A new memory map will be created to hold the results.
- * If applicable, objects with unique key may be dumped to inc collection.
- * Input and output objects are both {"0": key, "1": val}
- */
- void State::reduceInMemory() {
-
- if (_jsMode) {
- // in js mode the reduce is applied when writing to collection
- return;
+ unique_ptr<InMemory> n(new InMemory()); // for new data
+ long nSize = 0;
+ _dupCount = 0;
+
+ for (InMemory::iterator i = _temp->begin(); i != _temp->end(); ++i) {
+ BSONList& all = i->second;
+
+ if (all.size() == 1) {
+ // only 1 value for this key
+ if (_onDisk) {
+ // this key has low cardinality, so just write to collection
+ _insertToInc(*(all.begin()));
+ } else {
+ // add to new map
+ nSize += _add(n.get(), all[0]);
}
+ } else if (all.size() > 1) {
+ // several values, reduce and add to map
+ BSONObj res = _config.reducer->reduce(all);
+ nSize += _add(n.get(), res);
+ }
+ }
- unique_ptr<InMemory> n( new InMemory() ); // for new data
- long nSize = 0;
- _dupCount = 0;
+ // swap maps
+ _temp.reset(n.release());
+ _size = nSize;
+}
- for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
- BSONList& all = i->second;
+/**
+ * Dumps the entire in memory map to the inc collection.
+ */
+void State::dumpToInc() {
+ if (!_onDisk)
+ return;
- if ( all.size() == 1 ) {
- // only 1 value for this key
- if ( _onDisk ) {
- // this key has low cardinality, so just write to collection
- _insertToInc( *(all.begin()) );
- }
- else {
- // add to new map
- nSize += _add(n.get(), all[0]);
- }
- }
- else if ( all.size() > 1 ) {
- // several values, reduce and add to map
- BSONObj res = _config.reducer->reduce( all );
- nSize += _add(n.get(), res);
- }
- }
+ for (InMemory::iterator i = _temp->begin(); i != _temp->end(); i++) {
+ BSONList& all = i->second;
+ if (all.size() < 1)
+ continue;
- // swap maps
- _temp.reset( n.release() );
- _size = nSize;
- }
+ for (BSONList::iterator j = all.begin(); j != all.end(); j++)
+ _insertToInc(*j);
+ }
+ _temp->clear();
+ _size = 0;
+}
- /**
- * Dumps the entire in memory map to the inc collection.
- */
- void State::dumpToInc() {
- if ( ! _onDisk )
- return;
+/**
+ * Adds object to in memory map
+ */
+void State::emit(const BSONObj& a) {
+ _numEmits++;
+ _size += _add(_temp.get(), a);
+}
- for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); i++ ) {
- BSONList& all = i->second;
- if ( all.size() < 1 )
- continue;
+int State::_add(InMemory* im, const BSONObj& a) {
+ BSONList& all = (*im)[a];
+ all.push_back(a);
+ if (all.size() > 1) {
+ ++_dupCount;
+ }
- for ( BSONList::iterator j=all.begin(); j!=all.end(); j++ )
- _insertToInc( *j );
- }
- _temp->clear();
- _size = 0;
+ return a.objsize() + 16;
+}
+
+void State::reduceAndSpillInMemoryStateIfNeeded() {
+ // Make sure no DB locks are held, because this method manages its own locking and
+ // write units of work.
+ invariant(!_txn->lockState()->isLocked());
+
+ if (_jsMode) {
+ // try to reduce if it is beneficial
+ int dupCt = _scope->getNumberInt("_dupCt");
+ int keyCt = _scope->getNumberInt("_keyCt");
+
+ if (keyCt > _config.jsMaxKeys) {
+ // too many keys for JS, switch to mixed
+ _bailFromJS(BSONObj(), this);
+ // then fall through to check map size
+ } else if (dupCt > (keyCt * _config.reduceTriggerRatio)) {
+ // reduce now to lower mem usage
+ Timer t;
+ _scope->invoke(_reduceAll, 0, 0, 0, true);
+ LOG(3) << " MR - did reduceAll: keys=" << keyCt << " dups=" << dupCt
+ << " newKeys=" << _scope->getNumberInt("_keyCt") << " time=" << t.millis()
+ << "ms" << endl;
+ return;
}
+ }
- /**
- * Adds object to in memory map
- */
- void State::emit( const BSONObj& a ) {
- _numEmits++;
- _size += _add(_temp.get(), a);
+ if (_jsMode)
+ return;
+
+ if (_size > _config.maxInMemSize || _dupCount > (_temp->size() * _config.reduceTriggerRatio)) {
+ // attempt to reduce in memory map, if memory is too high or we have many duplicates
+ long oldSize = _size;
+ Timer t;
+ reduceInMemory();
+ LOG(3) << " MR - did reduceInMemory: size=" << oldSize << " dups=" << _dupCount
+ << " newSize=" << _size << " time=" << t.millis() << "ms" << endl;
+
+ // if size is still high, or values are not reducing well, dump
+ if (_onDisk && (_size > _config.maxInMemSize || _size > oldSize / 2)) {
+ dumpToInc();
+ LOG(3) << " MR - dumping to db" << endl;
}
+ }
+}
- int State::_add(InMemory* im, const BSONObj& a) {
- BSONList& all = (*im)[a];
- all.push_back( a );
- if (all.size() > 1) {
- ++_dupCount;
- }
+/**
+ * emit that will be called by js function
+ */
+BSONObj fast_emit(const BSONObj& args, void* data) {
+ uassert(10077, "fast_emit takes 2 args", args.nFields() == 2);
+ uassert(13069,
+ "an emit can't be more than half max bson size",
+ args.objsize() < (BSONObjMaxUserSize / 2));
+
+ State* state = (State*)data;
+ if (args.firstElement().type() == Undefined) {
+ BSONObjBuilder b(args.objsize());
+ b.appendNull("");
+ BSONObjIterator i(args);
+ i.next();
+ b.append(i.next());
+ state->emit(b.obj());
+ } else {
+ state->emit(args);
+ }
+ return BSONObj();
+}
- return a.objsize() + 16;
- }
+/**
+ * function is called when we realize we cant use js mode for m/r on the 1st key
+ */
+BSONObj _bailFromJS(const BSONObj& args, void* data) {
+ State* state = (State*)data;
+ state->bailFromJS();
- void State::reduceAndSpillInMemoryStateIfNeeded() {
- // Make sure no DB locks are held, because this method manages its own locking and
- // write units of work.
- invariant(!_txn->lockState()->isLocked());
+ // emit this particular key if there is one
+ if (!args.isEmpty()) {
+ fast_emit(args, data);
+ }
+ return BSONObj();
+}
- if (_jsMode) {
- // try to reduce if it is beneficial
- int dupCt = _scope->getNumberInt("_dupCt");
- int keyCt = _scope->getNumberInt("_keyCt");
+/**
+ * This class represents a map/reduce command executed on a single server
+ */
+class MapReduceCommand : public Command {
+public:
+ MapReduceCommand() : Command("mapReduce", false, "mapreduce") {}
- if (keyCt > _config.jsMaxKeys) {
- // too many keys for JS, switch to mixed
- _bailFromJS(BSONObj(), this);
- // then fall through to check map size
- }
- else if (dupCt > (keyCt * _config.reduceTriggerRatio)) {
- // reduce now to lower mem usage
- Timer t;
- _scope->invoke(_reduceAll, 0, 0, 0, true);
- LOG(3) << " MR - did reduceAll: keys=" << keyCt << " dups=" << dupCt
- << " newKeys=" << _scope->getNumberInt("_keyCt") << " time="
- << t.millis() << "ms" << endl;
- return;
- }
- }
+ virtual bool slaveOk() const {
+ return repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
+ repl::ReplicationCoordinator::modeReplSet;
+ }
- if (_jsMode)
- return;
-
- if (_size > _config.maxInMemSize || _dupCount > (_temp->size() * _config.reduceTriggerRatio)) {
- // attempt to reduce in memory map, if memory is too high or we have many duplicates
- long oldSize = _size;
- Timer t;
- reduceInMemory();
- LOG(3) << " MR - did reduceInMemory: size=" << oldSize << " dups=" << _dupCount
- << " newSize=" << _size << " time=" << t.millis() << "ms" << endl;
-
- // if size is still high, or values are not reducing well, dump
- if ( _onDisk && (_size > _config.maxInMemSize || _size > oldSize / 2) ) {
- dumpToInc();
- LOG(3) << " MR - dumping to db" << endl;
- }
- }
- }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
- /**
- * emit that will be called by js function
- */
- BSONObj fast_emit( const BSONObj& args, void* data ) {
- uassert( 10077 , "fast_emit takes 2 args" , args.nFields() == 2 );
- uassert( 13069 , "an emit can't be more than half max bson size" , args.objsize() < ( BSONObjMaxUserSize / 2 ) );
-
- State* state = (State*) data;
- if ( args.firstElement().type() == Undefined ) {
- BSONObjBuilder b( args.objsize() );
- b.appendNull( "" );
- BSONObjIterator i( args );
- i.next();
- b.append( i.next() );
- state->emit( b.obj() );
- }
- else {
- state->emit( args );
- }
- return BSONObj();
- }
+ virtual void help(stringstream& help) const {
+ help << "Run a map/reduce operation on the server.\n";
+ help << "Note this is used for aggregation, not querying, in MongoDB.\n";
+ help << "http://dochub.mongodb.org/core/mapreduce";
+ }
- /**
- * function is called when we realize we cant use js mode for m/r on the 1st key
- */
- BSONObj _bailFromJS( const BSONObj& args, void* data ) {
- State* state = (State*) data;
- state->bailFromJS();
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- // emit this particular key if there is one
- if (!args.isEmpty()) {
- fast_emit(args, data);
- }
- return BSONObj();
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ addPrivilegesRequiredForMapReduce(this, dbname, cmdObj, out);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmd,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Timer t;
+
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmd))
+ maybeDisableValidation.emplace(txn);
+
+ auto client = txn->getClient();
+
+ if (client->isInDirectClient()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation, "Cannot run mapReduce command from eval()"));
}
- /**
- * This class represents a map/reduce command executed on a single server
- */
- class MapReduceCommand : public Command {
- public:
- MapReduceCommand() : Command("mapReduce", false, "mapreduce") {}
+ CurOp* op = CurOp::get(txn);
- virtual bool slaveOk() const {
- return repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
- repl::ReplicationCoordinator::modeReplSet;
- }
+ Config config(dbname, cmd);
- virtual bool slaveOverrideOk() const { return true; }
+ LOG(1) << "mr ns: " << config.ns << endl;
- virtual void help( stringstream &help ) const {
- help << "Run a map/reduce operation on the server.\n";
- help << "Note this is used for aggregation, not querying, in MongoDB.\n";
- help << "http://dochub.mongodb.org/core/mapreduce";
- }
+ uassert(16149, "cannot run map reduce without the js engine", globalScriptEngine);
+
+ CollectionMetadataPtr collMetadata;
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ // Prevent sharding state from changing during the MR.
+ unique_ptr<RangePreserver> rangePreserver;
+ {
+ AutoGetCollectionForRead ctx(txn, config.ns);
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- addPrivilegesRequiredForMapReduce(this, dbname, cmdObj, out);
+ Collection* collection = ctx.getCollection();
+ if (collection) {
+ rangePreserver.reset(new RangePreserver(collection));
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmd,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Timer t;
+ // Get metadata before we check our version, to make sure it doesn't increment
+ // in the meantime. Need to do this in the same lock scope as the block.
+ if (shardingState.needCollectionMetadata(client, config.ns)) {
+ collMetadata = shardingState.getCollectionMetadata(config.ns);
+ }
+ }
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmd))
- maybeDisableValidation.emplace(txn);
+ bool shouldHaveData = false;
- auto client = txn->getClient();
+ BSONObjBuilder countsBuilder;
+ BSONObjBuilder timingBuilder;
+ State state(txn, config);
+ if (!state.sourceExists()) {
+ errmsg = "ns doesn't exist";
+ return false;
+ }
+ if (state.isOnDisk()) {
+ // this means that it will be doing a write operation, make sure we are on Master
+ // ideally this check should be in slaveOk(), but at that point config is not known
+ NamespaceString nss(config.ns);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
+ errmsg = "not master";
+ return false;
+ }
+ }
- if (client->isInDirectClient()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "Cannot run mapReduce command from eval()"));
- }
+ try {
+ state.init();
+ state.prepTempCollection();
+ ON_BLOCK_EXIT_OBJ(state, &State::dropTempCollections);
+
+ int progressTotal = 0;
+ bool showTotal = true;
+ if (state.config().filter.isEmpty()) {
+ progressTotal = state.incomingDocuments();
+ } else {
+ showTotal = false;
+ // Set an arbitrary total > 0 so the meter will be activated.
+ progressTotal = 1;
+ }
- CurOp* op = CurOp::get(txn);
+ stdx::unique_lock<Client> lk(*txn->getClient());
+ ProgressMeter& progress(op->setMessage_inlock(
+ "m/r: (1/3) emit phase", "M/R: (1/3) Emit Progress", progressTotal));
+ lk.unlock();
+ progress.showTotal(showTotal);
+ ProgressMeterHolder pm(progress);
- Config config( dbname , cmd );
+ // See cast on next line to 32 bit unsigned
+ wassert(config.limit < 0x4000000);
- LOG(1) << "mr ns: " << config.ns << endl;
+ long long mapTime = 0;
+ long long reduceTime = 0;
+ long long numInputs = 0;
- uassert( 16149 , "cannot run map reduce without the js engine", globalScriptEngine );
+ {
+ // We've got a cursor preventing migrations off, now re-establish our
+ // useful cursor.
- CollectionMetadataPtr collMetadata;
+ const NamespaceString nss(config.ns);
- // Prevent sharding state from changing during the MR.
- unique_ptr<RangePreserver> rangePreserver;
- {
- AutoGetCollectionForRead ctx(txn, config.ns);
+ // Need lock and context to use it
+ unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(txn, MODE_IS));
+ unique_ptr<AutoGetDb> scopedAutoDb(new AutoGetDb(txn, nss.db(), MODE_S));
- Collection* collection = ctx.getCollection();
- if (collection) {
- rangePreserver.reset(new RangePreserver(collection));
- }
+ const WhereCallbackReal whereCallback(txn, nss.db());
- // Get metadata before we check our version, to make sure it doesn't increment
- // in the meantime. Need to do this in the same lock scope as the block.
- if (shardingState.needCollectionMetadata(client, config.ns)) {
- collMetadata = shardingState.getCollectionMetadata( config.ns );
- }
+ CanonicalQuery* cqRaw;
+ if (!CanonicalQuery::canonicalize(
+ config.ns, config.filter, config.sort, BSONObj(), &cqRaw, whereCallback)
+ .isOK()) {
+ uasserted(17238, "Can't canonicalize query " + config.filter.toString());
+ return 0;
}
+ std::unique_ptr<CanonicalQuery> cq(cqRaw);
- bool shouldHaveData = false;
+ Database* db = scopedAutoDb->getDb();
+ Collection* coll = state.getCollectionOrUassert(db, config.ns);
+ invariant(coll);
- BSONObjBuilder countsBuilder;
- BSONObjBuilder timingBuilder;
- State state( txn, config );
- if ( ! state.sourceExists() ) {
- errmsg = "ns doesn't exist";
- return false;
- }
- if (state.isOnDisk()) {
- // this means that it will be doing a write operation, make sure we are on Master
- // ideally this check should be in slaveOk(), but at that point config is not known
- NamespaceString nss(config.ns);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
- errmsg = "not master";
- return false;
- }
+ PlanExecutor* rawExec;
+ if (!getExecutor(txn, coll, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec)
+ .isOK()) {
+ uasserted(17239, "Can't get executor for query " + config.filter.toString());
+ return 0;
}
- try {
- state.init();
- state.prepTempCollection();
- ON_BLOCK_EXIT_OBJ(state, &State::dropTempCollections);
+ unique_ptr<PlanExecutor> exec(rawExec);
- int progressTotal = 0;
- bool showTotal = true;
- if ( state.config().filter.isEmpty() ) {
- progressTotal = state.incomingDocuments();
- }
- else {
- showTotal = false;
- // Set an arbitrary total > 0 so the meter will be activated.
- progressTotal = 1;
- }
+ Timer mt;
- stdx::unique_lock<Client> lk(*txn->getClient());
- ProgressMeter& progress( op->setMessage_inlock("m/r: (1/3) emit phase",
- "M/R: (1/3) Emit Progress",
- progressTotal ));
- lk.unlock();
- progress.showTotal(showTotal);
- ProgressMeterHolder pm(progress);
-
- // See cast on next line to 32 bit unsigned
- wassert(config.limit < 0x4000000);
-
- long long mapTime = 0;
- long long reduceTime = 0;
- long long numInputs = 0;
-
- {
- // We've got a cursor preventing migrations off, now re-establish our
- // useful cursor.
-
- const NamespaceString nss(config.ns);
-
- // Need lock and context to use it
- unique_ptr<ScopedTransaction> scopedXact(
- new ScopedTransaction(txn, MODE_IS));
- unique_ptr<AutoGetDb> scopedAutoDb(new AutoGetDb(txn, nss.db(), MODE_S));
-
- const WhereCallbackReal whereCallback(txn, nss.db());
-
- CanonicalQuery* cqRaw;
- if (!CanonicalQuery::canonicalize(config.ns,
- config.filter,
- config.sort,
- BSONObj(),
- &cqRaw,
- whereCallback).isOK()) {
- uasserted(17238, "Can't canonicalize query " + config.filter.toString());
- return 0;
- }
- std::unique_ptr<CanonicalQuery> cq(cqRaw);
-
- Database* db = scopedAutoDb->getDb();
- Collection* coll = state.getCollectionOrUassert(db, config.ns);
- invariant(coll);
-
- PlanExecutor* rawExec;
- if (!getExecutor(txn,
- coll,
- cq.release(),
- PlanExecutor::YIELD_AUTO,
- &rawExec).isOK()) {
- uasserted(17239, "Can't get executor for query "
- + config.filter.toString());
- return 0;
+ // go through each doc
+ BSONObj o;
+ while (PlanExecutor::ADVANCED == exec->getNext(&o, NULL)) {
+ // check to see if this is a new object we don't own yet
+ // because of a chunk migration
+ if (collMetadata) {
+ ShardKeyPattern kp(collMetadata->getKeyPattern());
+ if (!collMetadata->keyBelongsToMe(kp.extractShardKeyFromDoc(o))) {
+ continue;
}
+ }
- unique_ptr<PlanExecutor> exec(rawExec);
-
- Timer mt;
-
- // go through each doc
- BSONObj o;
- while (PlanExecutor::ADVANCED == exec->getNext(&o, NULL)) {
- // check to see if this is a new object we don't own yet
- // because of a chunk migration
- if ( collMetadata ) {
- ShardKeyPattern kp( collMetadata->getKeyPattern() );
- if (!collMetadata->keyBelongsToMe(kp.extractShardKeyFromDoc(o))) {
- continue;
- }
- }
-
- // do map
- if ( config.verbose ) mt.reset();
- config.mapper->map( o );
- if ( config.verbose ) mapTime += mt.micros();
-
- // Check if the state accumulated so far needs to be written to a
- // collection. This may yield the DB lock temporarily and then
- // acquire it again.
- //
- numInputs++;
- if (numInputs % 100 == 0) {
- Timer t;
-
- // TODO: As an optimization, we might want to do the save/restore
- // state and yield inside the reduceAndSpillInMemoryState method, so
- // it only happens if necessary.
- exec->saveState();
-
- scopedAutoDb.reset();
- scopedXact.reset();
-
- state.reduceAndSpillInMemoryStateIfNeeded();
-
- scopedXact.reset(new ScopedTransaction(txn, MODE_IS));
- scopedAutoDb.reset(new AutoGetDb(txn, nss.db(), MODE_S));
-
- exec->restoreState(txn);
-
- // Need to reload the database, in case it was dropped after we
- // released the lock
- db = scopedAutoDb->getDb();
- if (db == NULL) {
- // Database was deleted after we freed the lock
- StringBuilder sb;
- sb << "Database "
- << nss.db()
- << " was deleted in the middle of the reduce job.";
- uasserted(28523, sb.str());
- }
-
- reduceTime += t.micros();
-
- txn->checkForInterrupt();
- }
-
- pm.hit();
-
- if (config.limit && numInputs >= config.limit)
- break;
+ // do map
+ if (config.verbose)
+ mt.reset();
+ config.mapper->map(o);
+ if (config.verbose)
+ mapTime += mt.micros();
+
+ // Check if the state accumulated so far needs to be written to a
+ // collection. This may yield the DB lock temporarily and then
+ // acquire it again.
+ //
+ numInputs++;
+ if (numInputs % 100 == 0) {
+ Timer t;
+
+ // TODO: As an optimization, we might want to do the save/restore
+ // state and yield inside the reduceAndSpillInMemoryState method, so
+ // it only happens if necessary.
+ exec->saveState();
+
+ scopedAutoDb.reset();
+ scopedXact.reset();
+
+ state.reduceAndSpillInMemoryStateIfNeeded();
+
+ scopedXact.reset(new ScopedTransaction(txn, MODE_IS));
+ scopedAutoDb.reset(new AutoGetDb(txn, nss.db(), MODE_S));
+
+ exec->restoreState(txn);
+
+ // Need to reload the database, in case it was dropped after we
+ // released the lock
+ db = scopedAutoDb->getDb();
+ if (db == NULL) {
+ // Database was deleted after we freed the lock
+ StringBuilder sb;
+ sb << "Database " << nss.db()
+ << " was deleted in the middle of the reduce job.";
+ uasserted(28523, sb.str());
}
- }
- pm.finished();
- txn->checkForInterrupt();
+ reduceTime += t.micros();
- // update counters
- countsBuilder.appendNumber("input", numInputs);
- countsBuilder.appendNumber( "emit" , state.numEmits() );
- if ( state.numEmits() )
- shouldHaveData = true;
+ txn->checkForInterrupt();
+ }
- timingBuilder.appendNumber( "mapTime" , mapTime / 1000 );
- timingBuilder.append( "emitLoop" , t.millis() );
+ pm.hit();
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- op->setMessage_inlock("m/r: (2/3) final reduce in memory",
- "M/R: (2/3) Final In-Memory Reduce Progress");
- }
- Timer rt;
- // do reduce in memory
- // this will be the last reduce needed for inline mode
- state.reduceInMemory();
- // if not inline: dump the in memory map to inc collection, all data is on disk
- state.dumpToInc();
- // final reduce
- state.finalReduce(op , pm );
- reduceTime += rt.micros();
- countsBuilder.appendNumber( "reduce" , state.numReduces() );
- timingBuilder.appendNumber("reduceTime", reduceTime / 1000);
- timingBuilder.append( "mode" , state.jsMode() ? "js" : "mixed" );
-
- long long finalCount = state.postProcessCollection(txn, op, pm);
- state.appendResults( result );
-
- timingBuilder.appendNumber( "total" , t.millis() );
- result.appendNumber( "timeMillis" , t.millis() );
- countsBuilder.appendNumber( "output" , finalCount );
- if ( config.verbose ) result.append( "timing" , timingBuilder.obj() );
- result.append( "counts" , countsBuilder.obj() );
-
- if ( finalCount == 0 && shouldHaveData ) {
- result.append( "cmd" , cmd );
- errmsg = "there were emits but no data!";
- return false;
- }
- }
- catch( SendStaleConfigException& e ){
- log() << "mr detected stale config, should retry" << causedBy(e) << endl;
- throw e;
- }
- // TODO: The error handling code for queries is v. fragile,
- // *requires* rethrow AssertionExceptions - should probably fix.
- catch ( AssertionException& e ){
- log() << "mr failed, removing collection" << causedBy(e) << endl;
- throw e;
- }
- catch ( std::exception& e ){
- log() << "mr failed, removing collection" << causedBy(e) << endl;
- throw e;
- }
- catch ( ... ) {
- log() << "mr failed for unknown reason, removing collection" << endl;
- throw;
+ if (config.limit && numInputs >= config.limit)
+ break;
}
-
- return true;
}
+ pm.finished();
+
+ txn->checkForInterrupt();
+
+ // update counters
+ countsBuilder.appendNumber("input", numInputs);
+ countsBuilder.appendNumber("emit", state.numEmits());
+ if (state.numEmits())
+ shouldHaveData = true;
- } mapReduceCommand;
-
- /**
- * This class represents a map/reduce command executed on the output server of a sharded env
- */
- class MapReduceFinishCommand : public Command {
- public:
- void help(stringstream& h) const { h << "internal"; }
- MapReduceFinishCommand() : Command( "mapreduce.shardedfinish" ) {}
- virtual bool slaveOk() const {
- return repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
- repl::ReplicationCoordinator::modeReplSet;
+ timingBuilder.appendNumber("mapTime", mapTime / 1000);
+ timingBuilder.append("emitLoop", t.millis());
+
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ op->setMessage_inlock("m/r: (2/3) final reduce in memory",
+ "M/R: (2/3) Final In-Memory Reduce Progress");
}
- virtual bool slaveOverrideOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::internal);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ Timer rt;
+ // do reduce in memory
+ // this will be the last reduce needed for inline mode
+ state.reduceInMemory();
+ // if not inline: dump the in memory map to inc collection, all data is on disk
+ state.dumpToInc();
+ // final reduce
+ state.finalReduce(op, pm);
+ reduceTime += rt.micros();
+ countsBuilder.appendNumber("reduce", state.numReduces());
+ timingBuilder.appendNumber("reduceTime", reduceTime / 1000);
+ timingBuilder.append("mode", state.jsMode() ? "js" : "mixed");
+
+ long long finalCount = state.postProcessCollection(txn, op, pm);
+ state.appendResults(result);
+
+ timingBuilder.appendNumber("total", t.millis());
+ result.appendNumber("timeMillis", t.millis());
+ countsBuilder.appendNumber("output", finalCount);
+ if (config.verbose)
+ result.append("timing", timingBuilder.obj());
+ result.append("counts", countsBuilder.obj());
+
+ if (finalCount == 0 && shouldHaveData) {
+ result.append("cmd", cmd);
+ errmsg = "there were emits but no data!";
+ return false;
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
-
- ShardedConnectionInfo::addHook();
- // legacy name
- string shardedOutputCollection = cmdObj["shardedOutputCollection"].valuestrsafe();
- verify( shardedOutputCollection.size() > 0 );
- string inputNS;
- if ( cmdObj["inputDB"].type() == String ) {
- inputNS = cmdObj["inputDB"].String() + "." + shardedOutputCollection;
- }
- else {
- inputNS = dbname + "." + shardedOutputCollection;
- }
+ } catch (SendStaleConfigException& e) {
+ log() << "mr detected stale config, should retry" << causedBy(e) << endl;
+ throw e;
+ }
+ // TODO: The error handling code for queries is v. fragile,
+ // *requires* rethrow AssertionExceptions - should probably fix.
+ catch (AssertionException& e) {
+ log() << "mr failed, removing collection" << causedBy(e) << endl;
+ throw e;
+ } catch (std::exception& e) {
+ log() << "mr failed, removing collection" << causedBy(e) << endl;
+ throw e;
+ } catch (...) {
+ log() << "mr failed for unknown reason, removing collection" << endl;
+ throw;
+ }
- CurOp * op = CurOp::get(txn);
+ return true;
+ }
- Config config( dbname , cmdObj.firstElement().embeddedObjectUserCheck() );
- State state(txn, config);
- state.init();
+} mapReduceCommand;
- // no need for incremental collection because records are already sorted
- state._useIncremental = false;
- config.incLong = config.tempNamespace;
+/**
+ * This class represents a map/reduce command executed on the output server of a sharded env
+ */
+class MapReduceFinishCommand : public Command {
+public:
+ void help(stringstream& h) const {
+ h << "internal";
+ }
+ MapReduceFinishCommand() : Command("mapreduce.shardedfinish") {}
+ virtual bool slaveOk() const {
+ return repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
+ repl::ReplicationCoordinator::modeReplSet;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::internal);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj))
+ maybeDisableValidation.emplace(txn);
+
+ ShardedConnectionInfo::addHook();
+ // legacy name
+ string shardedOutputCollection = cmdObj["shardedOutputCollection"].valuestrsafe();
+ verify(shardedOutputCollection.size() > 0);
+ string inputNS;
+ if (cmdObj["inputDB"].type() == String) {
+ inputNS = cmdObj["inputDB"].String() + "." + shardedOutputCollection;
+ } else {
+ inputNS = dbname + "." + shardedOutputCollection;
+ }
- BSONObj shardCounts = cmdObj["shardCounts"].embeddedObjectUserCheck();
- BSONObj counts = cmdObj["counts"].embeddedObjectUserCheck();
+ CurOp* op = CurOp::get(txn);
- stdx::unique_lock<Client> lk(*txn->getClient());
- ProgressMeterHolder pm(op->setMessage_inlock("m/r: merge sort and reduce",
- "M/R Merge Sort and Reduce Progress"));
- lk.unlock();
- set<string> servers;
+ Config config(dbname, cmdObj.firstElement().embeddedObjectUserCheck());
+ State state(txn, config);
+ state.init();
- {
- // parse per shard results
- BSONObjIterator i(shardCounts);
- while (i.more()) {
- BSONElement e = i.next();
- servers.insert(e.fieldName());
- }
- }
+ // no need for incremental collection because records are already sorted
+ state._useIncremental = false;
+ config.incLong = config.tempNamespace;
- state.prepTempCollection();
- ON_BLOCK_EXIT_OBJ(state, &State::dropTempCollections);
-
- BSONList values;
- if (!config.outputOptions.outDB.empty()) {
- BSONObjBuilder loc;
- if ( !config.outputOptions.outDB.empty())
- loc.append( "db" , config.outputOptions.outDB );
- if ( !config.outputOptions.collectionName.empty() )
- loc.append( "collection" , config.outputOptions.collectionName );
- result.append("result", loc.obj());
- }
- else {
- if ( !config.outputOptions.collectionName.empty() )
- result.append( "result" , config.outputOptions.collectionName );
- }
+ BSONObj shardCounts = cmdObj["shardCounts"].embeddedObjectUserCheck();
+ BSONObj counts = cmdObj["counts"].embeddedObjectUserCheck();
- auto status = grid.catalogCache()->getDatabase(dbname);
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
+ stdx::unique_lock<Client> lk(*txn->getClient());
+ ProgressMeterHolder pm(op->setMessage_inlock("m/r: merge sort and reduce",
+ "M/R Merge Sort and Reduce Progress"));
+ lk.unlock();
+ set<string> servers;
+
+ {
+ // parse per shard results
+ BSONObjIterator i(shardCounts);
+ while (i.more()) {
+ BSONElement e = i.next();
+ servers.insert(e.fieldName());
+ }
+ }
- shared_ptr<DBConfig> confOut = status.getValue();
+ state.prepTempCollection();
+ ON_BLOCK_EXIT_OBJ(state, &State::dropTempCollections);
+
+ BSONList values;
+ if (!config.outputOptions.outDB.empty()) {
+ BSONObjBuilder loc;
+ if (!config.outputOptions.outDB.empty())
+ loc.append("db", config.outputOptions.outDB);
+ if (!config.outputOptions.collectionName.empty())
+ loc.append("collection", config.outputOptions.collectionName);
+ result.append("result", loc.obj());
+ } else {
+ if (!config.outputOptions.collectionName.empty())
+ result.append("result", config.outputOptions.collectionName);
+ }
- vector<ChunkPtr> chunks;
- if ( confOut->isSharded(config.outputOptions.finalNamespace) ) {
- ChunkManagerPtr cm = confOut->getChunkManager(
- config.outputOptions.finalNamespace);
+ auto status = grid.catalogCache()->getDatabase(dbname);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status.getStatus());
+ }
- // Fetch result from other shards 1 chunk at a time. It would be better to do
- // just one big $or query, but then the sorting would not be efficient.
- const string shardName = shardingState.getShardName();
- const ChunkMap& chunkMap = cm->getChunkMap();
+ shared_ptr<DBConfig> confOut = status.getValue();
- for ( ChunkMap::const_iterator it = chunkMap.begin(); it != chunkMap.end(); ++it ) {
- ChunkPtr chunk = it->second;
- if (chunk->getShardId() == shardName) {
- chunks.push_back(chunk);
- }
- }
- }
+ vector<ChunkPtr> chunks;
+ if (confOut->isSharded(config.outputOptions.finalNamespace)) {
+ ChunkManagerPtr cm = confOut->getChunkManager(config.outputOptions.finalNamespace);
- long long inputCount = 0;
- unsigned int index = 0;
- BSONObj query;
- BSONArrayBuilder chunkSizes;
- while (true) {
- ChunkPtr chunk;
- if (chunks.size() > 0) {
- chunk = chunks[index];
- BSONObjBuilder b;
- b.appendAs(chunk->getMin().firstElement(), "$gte");
- b.appendAs(chunk->getMax().firstElement(), "$lt");
- query = BSON("_id" << b.obj());
-// chunkSizes.append(min);
- }
+ // Fetch result from other shards 1 chunk at a time. It would be better to do
+ // just one big $or query, but then the sorting would not be efficient.
+ const string shardName = shardingState.getShardName();
+ const ChunkMap& chunkMap = cm->getChunkMap();
- // reduce from each shard for a chunk
- BSONObj sortKey = BSON( "_id" << 1 );
- ParallelSortClusteredCursor cursor(servers, inputNS,
- Query(query).sort(sortKey), QueryOption_NoCursorTimeout);
- cursor.init();
- int chunkSize = 0;
-
- while ( cursor.more() || !values.empty() ) {
- BSONObj t;
- if (cursor.more()) {
- t = cursor.next().getOwned();
- ++inputCount;
-
- if ( values.size() == 0 ) {
- values.push_back( t );
- continue;
- }
-
- if ( t.woSortOrder( *(values.begin()) , sortKey ) == 0 ) {
- values.push_back( t );
- continue;
- }
- }
+ for (ChunkMap::const_iterator it = chunkMap.begin(); it != chunkMap.end(); ++it) {
+ ChunkPtr chunk = it->second;
+ if (chunk->getShardId() == shardName) {
+ chunks.push_back(chunk);
+ }
+ }
+ }
- BSONObj res = config.reducer->finalReduce( values , config.finalizer.get());
- chunkSize += res.objsize();
- if (state.isOnDisk())
- state.insert( config.tempNamespace , res );
- else
- state.emit(res);
- values.clear();
- if (!t.isEmpty())
- values.push_back( t );
+ long long inputCount = 0;
+ unsigned int index = 0;
+ BSONObj query;
+ BSONArrayBuilder chunkSizes;
+ while (true) {
+ ChunkPtr chunk;
+ if (chunks.size() > 0) {
+ chunk = chunks[index];
+ BSONObjBuilder b;
+ b.appendAs(chunk->getMin().firstElement(), "$gte");
+ b.appendAs(chunk->getMax().firstElement(), "$lt");
+ query = BSON("_id" << b.obj());
+ // chunkSizes.append(min);
+ }
+
+ // reduce from each shard for a chunk
+ BSONObj sortKey = BSON("_id" << 1);
+ ParallelSortClusteredCursor cursor(
+ servers, inputNS, Query(query).sort(sortKey), QueryOption_NoCursorTimeout);
+ cursor.init();
+ int chunkSize = 0;
+
+ while (cursor.more() || !values.empty()) {
+ BSONObj t;
+ if (cursor.more()) {
+ t = cursor.next().getOwned();
+ ++inputCount;
+
+ if (values.size() == 0) {
+ values.push_back(t);
+ continue;
}
- if (chunk) {
- chunkSizes.append(chunk->getMin());
- chunkSizes.append(chunkSize);
+ if (t.woSortOrder(*(values.begin()), sortKey) == 0) {
+ values.push_back(t);
+ continue;
}
- if (++index >= chunks.size())
- break;
}
- // Forget temporary input collection, if output is sharded collection
- ShardConnection::forgetNS( inputNS );
+ BSONObj res = config.reducer->finalReduce(values, config.finalizer.get());
+ chunkSize += res.objsize();
+ if (state.isOnDisk())
+ state.insert(config.tempNamespace, res);
+ else
+ state.emit(res);
+ values.clear();
+ if (!t.isEmpty())
+ values.push_back(t);
+ }
- result.append( "chunkSizes" , chunkSizes.arr() );
+ if (chunk) {
+ chunkSizes.append(chunk->getMin());
+ chunkSizes.append(chunkSize);
+ }
+ if (++index >= chunks.size())
+ break;
+ }
- long long outputCount = state.postProcessCollection(txn, op, pm);
- state.appendResults( result );
+ // Forget temporary input collection, if output is sharded collection
+ ShardConnection::forgetNS(inputNS);
- BSONObjBuilder countsB(32);
- countsB.append("input", inputCount);
- countsB.append("reduce", state.numReduces());
- countsB.append("output", outputCount);
- result.append( "counts" , countsB.obj() );
+ result.append("chunkSizes", chunkSizes.arr());
- return 1;
- }
- } mapReduceFinishCommand;
+ long long outputCount = state.postProcessCollection(txn, op, pm);
+ state.appendResults(result);
- }
+ BSONObjBuilder countsB(32);
+ countsB.append("input", inputCount);
+ countsB.append("reduce", state.numReduces());
+ countsB.append("output", outputCount);
+ result.append("counts", countsB.obj());
+ return 1;
+ }
+} mapReduceFinishCommand;
+}
}
-
diff --git a/src/mongo/db/commands/mr.h b/src/mongo/db/commands/mr.h
index 083165ebe27..8bc4264794e 100644
--- a/src/mongo/db/commands/mr.h
+++ b/src/mongo/db/commands/mr.h
@@ -42,347 +42,374 @@
namespace mongo {
- class Collection;
- class Database;
- class OperationContext;
-
- namespace mr {
-
- typedef std::vector<BSONObj> BSONList;
-
- class State;
-
- // ------------ function interfaces -----------
-
- class Mapper {
- MONGO_DISALLOW_COPYING(Mapper);
- public:
- virtual ~Mapper() {}
- virtual void init( State * state ) = 0;
-
- virtual void map( const BSONObj& o ) = 0;
- protected:
- Mapper() = default;
- };
-
- class Finalizer {
- MONGO_DISALLOW_COPYING(Finalizer);
- public:
- virtual ~Finalizer() {}
- virtual void init( State * state ) = 0;
-
- /**
- * this takes a tuple and returns a tuple
- */
- virtual BSONObj finalize( const BSONObj& tuple ) = 0;
-
- protected:
- Finalizer() = default;
- };
-
- class Reducer {
- MONGO_DISALLOW_COPYING(Reducer);
- public:
- Reducer() : numReduces(0) {}
- virtual ~Reducer() {}
- virtual void init( State * state ) = 0;
-
- virtual BSONObj reduce( const BSONList& tuples ) = 0;
- /** this means its a final reduce, even if there is no finalizer */
- virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer ) = 0;
-
- long long numReduces;
- };
-
- // ------------ js function implementations -----------
-
- /**
- * used as a holder for Scope and ScriptingFunction
- * visitor like pattern as Scope is gotten from first access
- */
- class JSFunction {
- MONGO_DISALLOW_COPYING(JSFunction);
- public:
- /**
- * @param type (map|reduce|finalize)
- */
- JSFunction( const std::string& type , const BSONElement& e );
- virtual ~JSFunction() {}
-
- virtual void init( State * state );
-
- Scope * scope() const { return _scope; }
- ScriptingFunction func() const { return _func; }
-
- private:
- std::string _type;
- std::string _code; // actual javascript code
- BSONObj _wantedScope; // this is for CodeWScope
-
- Scope * _scope; // this is not owned by us, and might be shared
- ScriptingFunction _func;
- };
-
- class JSMapper : public Mapper {
- public:
- JSMapper( const BSONElement & code ) : _func( "_map" , code ) {}
- virtual void map( const BSONObj& o );
- virtual void init( State * state );
-
- private:
- JSFunction _func;
- BSONObj _params;
- };
-
- class JSReducer : public Reducer {
- public:
- JSReducer( const BSONElement& code ) : _func( "_reduce" , code ) {}
- virtual void init( State * state );
-
- virtual BSONObj reduce( const BSONList& tuples );
- virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer );
-
- private:
-
- /**
- * result in "__returnValue"
- * @param key OUT
- * @param endSizeEstimate OUT
- */
- void _reduce( const BSONList& values , BSONObj& key , int& endSizeEstimate );
-
- JSFunction _func;
- };
-
- class JSFinalizer : public Finalizer {
- public:
- JSFinalizer( const BSONElement& code ) : _func( "_finalize" , code ) {}
- virtual BSONObj finalize( const BSONObj& o );
- virtual void init( State * state ) { _func.init( state ); }
- private:
- JSFunction _func;
-
- };
-
- // -----------------
-
-
- class TupleKeyCmp {
- public:
- TupleKeyCmp() {}
- bool operator()( const BSONObj &l, const BSONObj &r ) const {
- return l.firstElement().woCompare( r.firstElement() ) < 0;
- }
- };
-
- typedef std::map< BSONObj,BSONList,TupleKeyCmp > InMemory; // from key to list of tuples
-
- /**
- * holds map/reduce config information
- */
- class Config {
- public:
- Config( const std::string& _dbname , const BSONObj& cmdObj );
-
- std::string dbname;
- std::string ns;
-
- // options
- bool verbose;
- bool jsMode;
- int splitInfo;
-
- // query options
-
- BSONObj filter;
- BSONObj sort;
- long long limit;
-
- // functions
-
- std::unique_ptr<Mapper> mapper;
- std::unique_ptr<Reducer> reducer;
- std::unique_ptr<Finalizer> finalizer;
-
- BSONObj mapParams;
- BSONObj scopeSetup;
-
- // output tables
- std::string incLong;
- std::string tempNamespace;
-
- enum OutputType {
- REPLACE , // atomically replace the collection
- MERGE , // merge keys, override dups
- REDUCE , // merge keys, reduce dups
- INMEMORY // only store in memory, limited in size
- };
- struct OutputOptions {
- std::string outDB;
- std::string collectionName;
- std::string finalNamespace;
- // if true, no lock during output operation
- bool outNonAtomic;
- OutputType outType;
- } outputOptions;
-
- static OutputOptions parseOutputOptions(const std::string& dbname, const BSONObj& cmdObj);
-
- // max number of keys allowed in JS map before switching mode
- long jsMaxKeys;
- // ratio of duplicates vs unique keys before reduce is triggered in js mode
- float reduceTriggerRatio;
- // maximum size of map before it gets dumped to disk
- long maxInMemSize;
-
- // true when called from mongos to do phase-1 of M/R
- bool shardedFirstPass;
-
- static AtomicUInt32 JOB_NUMBER;
- }; // end MRsetup
-
- /**
- * stores information about intermediate map reduce state
- * controls flow of data from map->reduce->finalize->output
- */
- class State {
- public:
- /**
- * txn must outlive this State.
- */
- State( OperationContext* txn, const Config& c );
- ~State();
-
- void init();
-
- // ---- prep -----
- bool sourceExists();
-
- long long incomingDocuments();
-
- // ---- map stage ----
+class Collection;
+class Database;
+class OperationContext;
- /**
- * stages on in in-memory storage
- */
- void emit( const BSONObj& a );
-
- /**
- * Checks the size of the transient in-memory results accumulated so far and potentially
- * runs reduce in order to compact them. If the data is still too large, it will be
- * spilled to the output collection.
- *
- * NOTE: Make sure that no DB locks are held, when calling this function, because it may
- * try to acquire write DB lock for the write to the output collection.
- */
- void reduceAndSpillInMemoryStateIfNeeded();
-
- /**
- * run reduce on _temp
- */
- void reduceInMemory();
-
- /**
- * transfers in memory storage to temp collection
- */
- void dumpToInc();
- void insertToInc( BSONObj& o );
- void _insertToInc( BSONObj& o );
-
- // ------ reduce stage -----------
-
- void prepTempCollection();
-
- void finalReduce( BSONList& values );
-
- void finalReduce( CurOp * op , ProgressMeterHolder& pm );
-
- // ------- cleanup/data positioning ----------
-
- /**
- * Clean up the temporary and incremental collections
- */
- void dropTempCollections();
-
- /**
- @return number objects in collection
- */
- long long postProcessCollection(
- OperationContext* txn, CurOp* op, ProgressMeterHolder& pm);
- long long postProcessCollectionNonAtomic(
- OperationContext* txn, CurOp* op, ProgressMeterHolder& pm);
-
- /**
- * if INMEMORY will append
- * may also append stats or anything else it likes
- */
- void appendResults( BSONObjBuilder& b );
-
- // -------- util ------------
-
- /**
- * inserts with correct replication semantics
- */
- void insert( const std::string& ns , const BSONObj& o );
+namespace mr {
- // ------ simple accessors -----
-
- /** State maintains ownership, do no use past State lifetime */
- Scope* scope() { return _scope.get(); }
-
- const Config& config() { return _config; }
-
- bool isOnDisk() { return _onDisk; }
+typedef std::vector<BSONObj> BSONList;
- long long numEmits() const { if (_jsMode) return _scope->getNumberLongLong("_emitCt"); return _numEmits; }
- long long numReduces() const { if (_jsMode) return _scope->getNumberLongLong("_redCt"); return _config.reducer->numReduces; }
- long long numInMemKeys() const { if (_jsMode) return _scope->getNumberLongLong("_keyCt"); return _temp->size(); }
+class State;
- bool jsMode() {return _jsMode;}
- void switchMode(bool jsMode);
- void bailFromJS();
-
- Collection* getCollectionOrUassert(Database* db, StringData ns);
-
- const Config& _config;
- DBDirectClient _db;
- bool _useIncremental; // use an incremental collection
+// ------------ function interfaces -----------
- protected:
+class Mapper {
+ MONGO_DISALLOW_COPYING(Mapper);
- /**
- * Appends a new document to the in-memory list of tuples, which are under that
- * document's key.
- *
- * @return estimated in-memory size occupied by the newly added document.
- */
- int _add(InMemory* im , const BSONObj& a);
+public:
+ virtual ~Mapper() {}
+ virtual void init(State* state) = 0;
- OperationContext* _txn;
- std::unique_ptr<Scope> _scope;
- bool _onDisk; // if the end result of this map reduce is disk or not
+ virtual void map(const BSONObj& o) = 0;
- std::unique_ptr<InMemory> _temp;
- long _size; // bytes in _temp
- long _dupCount; // number of duplicate key entries
+protected:
+ Mapper() = default;
+};
- long long _numEmits;
-
- bool _jsMode;
- ScriptingFunction _reduceAll;
- ScriptingFunction _reduceAndEmit;
- ScriptingFunction _reduceAndFinalize;
- ScriptingFunction _reduceAndFinalizeAndInsert;
- };
+class Finalizer {
+ MONGO_DISALLOW_COPYING(Finalizer);
- BSONObj fast_emit( const BSONObj& args, void* data );
- BSONObj _bailFromJS( const BSONObj& args, void* data );
+public:
+ virtual ~Finalizer() {}
+ virtual void init(State* state) = 0;
- void addPrivilegesRequiredForMapReduce(Command* commandTemplate,
- const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out);
- } // end mr namespace
-}
+ /**
+ * this takes a tuple and returns a tuple
+ */
+ virtual BSONObj finalize(const BSONObj& tuple) = 0;
+
+protected:
+ Finalizer() = default;
+};
+
+class Reducer {
+ MONGO_DISALLOW_COPYING(Reducer);
+
+public:
+ Reducer() : numReduces(0) {}
+ virtual ~Reducer() {}
+ virtual void init(State* state) = 0;
+ virtual BSONObj reduce(const BSONList& tuples) = 0;
+ /** this means its a final reduce, even if there is no finalizer */
+ virtual BSONObj finalReduce(const BSONList& tuples, Finalizer* finalizer) = 0;
+ long long numReduces;
+};
+
+// ------------ js function implementations -----------
+
+/**
+ * used as a holder for Scope and ScriptingFunction
+ * visitor like pattern as Scope is gotten from first access
+ */
+class JSFunction {
+ MONGO_DISALLOW_COPYING(JSFunction);
+
+public:
+ /**
+ * @param type (map|reduce|finalize)
+ */
+ JSFunction(const std::string& type, const BSONElement& e);
+ virtual ~JSFunction() {}
+
+ virtual void init(State* state);
+
+ Scope* scope() const {
+ return _scope;
+ }
+ ScriptingFunction func() const {
+ return _func;
+ }
+
+private:
+ std::string _type;
+ std::string _code; // actual javascript code
+ BSONObj _wantedScope; // this is for CodeWScope
+
+ Scope* _scope; // this is not owned by us, and might be shared
+ ScriptingFunction _func;
+};
+
+class JSMapper : public Mapper {
+public:
+ JSMapper(const BSONElement& code) : _func("_map", code) {}
+ virtual void map(const BSONObj& o);
+ virtual void init(State* state);
+
+private:
+ JSFunction _func;
+ BSONObj _params;
+};
+
+class JSReducer : public Reducer {
+public:
+ JSReducer(const BSONElement& code) : _func("_reduce", code) {}
+ virtual void init(State* state);
+
+ virtual BSONObj reduce(const BSONList& tuples);
+ virtual BSONObj finalReduce(const BSONList& tuples, Finalizer* finalizer);
+
+private:
+ /**
+ * result in "__returnValue"
+ * @param key OUT
+ * @param endSizeEstimate OUT
+ */
+ void _reduce(const BSONList& values, BSONObj& key, int& endSizeEstimate);
+
+ JSFunction _func;
+};
+
+class JSFinalizer : public Finalizer {
+public:
+ JSFinalizer(const BSONElement& code) : _func("_finalize", code) {}
+ virtual BSONObj finalize(const BSONObj& o);
+ virtual void init(State* state) {
+ _func.init(state);
+ }
+
+private:
+ JSFunction _func;
+};
+
+// -----------------
+
+
+class TupleKeyCmp {
+public:
+ TupleKeyCmp() {}
+ bool operator()(const BSONObj& l, const BSONObj& r) const {
+ return l.firstElement().woCompare(r.firstElement()) < 0;
+ }
+};
+
+typedef std::map<BSONObj, BSONList, TupleKeyCmp> InMemory; // from key to list of tuples
+
+/**
+ * holds map/reduce config information
+ */
+class Config {
+public:
+ Config(const std::string& _dbname, const BSONObj& cmdObj);
+
+ std::string dbname;
+ std::string ns;
+
+ // options
+ bool verbose;
+ bool jsMode;
+ int splitInfo;
+
+ // query options
+
+ BSONObj filter;
+ BSONObj sort;
+ long long limit;
+
+ // functions
+
+ std::unique_ptr<Mapper> mapper;
+ std::unique_ptr<Reducer> reducer;
+ std::unique_ptr<Finalizer> finalizer;
+
+ BSONObj mapParams;
+ BSONObj scopeSetup;
+
+ // output tables
+ std::string incLong;
+ std::string tempNamespace;
+
+ enum OutputType {
+ REPLACE, // atomically replace the collection
+ MERGE, // merge keys, override dups
+ REDUCE, // merge keys, reduce dups
+ INMEMORY // only store in memory, limited in size
+ };
+ struct OutputOptions {
+ std::string outDB;
+ std::string collectionName;
+ std::string finalNamespace;
+ // if true, no lock during output operation
+ bool outNonAtomic;
+ OutputType outType;
+ } outputOptions;
+
+ static OutputOptions parseOutputOptions(const std::string& dbname, const BSONObj& cmdObj);
+
+ // max number of keys allowed in JS map before switching mode
+ long jsMaxKeys;
+ // ratio of duplicates vs unique keys before reduce is triggered in js mode
+ float reduceTriggerRatio;
+ // maximum size of map before it gets dumped to disk
+ long maxInMemSize;
+
+ // true when called from mongos to do phase-1 of M/R
+ bool shardedFirstPass;
+
+ static AtomicUInt32 JOB_NUMBER;
+}; // end MRsetup
+
+/**
+ * stores information about intermediate map reduce state
+ * controls flow of data from map->reduce->finalize->output
+ */
+class State {
+public:
+ /**
+ * txn must outlive this State.
+ */
+ State(OperationContext* txn, const Config& c);
+ ~State();
+
+ void init();
+
+ // ---- prep -----
+ bool sourceExists();
+
+ long long incomingDocuments();
+
+ // ---- map stage ----
+
+ /**
+ * stages on in in-memory storage
+ */
+ void emit(const BSONObj& a);
+
+ /**
+ * Checks the size of the transient in-memory results accumulated so far and potentially
+ * runs reduce in order to compact them. If the data is still too large, it will be
+ * spilled to the output collection.
+ *
+ * NOTE: Make sure that no DB locks are held, when calling this function, because it may
+ * try to acquire write DB lock for the write to the output collection.
+ */
+ void reduceAndSpillInMemoryStateIfNeeded();
+
+ /**
+ * run reduce on _temp
+ */
+ void reduceInMemory();
+
+ /**
+ * transfers in memory storage to temp collection
+ */
+ void dumpToInc();
+ void insertToInc(BSONObj& o);
+ void _insertToInc(BSONObj& o);
+
+ // ------ reduce stage -----------
+
+ void prepTempCollection();
+
+ void finalReduce(BSONList& values);
+
+ void finalReduce(CurOp* op, ProgressMeterHolder& pm);
+
+ // ------- cleanup/data positioning ----------
+
+ /**
+ * Clean up the temporary and incremental collections
+ */
+ void dropTempCollections();
+
+ /**
+ @return number objects in collection
+ */
+ long long postProcessCollection(OperationContext* txn, CurOp* op, ProgressMeterHolder& pm);
+ long long postProcessCollectionNonAtomic(OperationContext* txn,
+ CurOp* op,
+ ProgressMeterHolder& pm);
+
+ /**
+ * if INMEMORY will append
+ * may also append stats or anything else it likes
+ */
+ void appendResults(BSONObjBuilder& b);
+
+ // -------- util ------------
+
+ /**
+ * inserts with correct replication semantics
+ */
+ void insert(const std::string& ns, const BSONObj& o);
+
+ // ------ simple accessors -----
+
+ /** State maintains ownership, do no use past State lifetime */
+ Scope* scope() {
+ return _scope.get();
+ }
+
+ const Config& config() {
+ return _config;
+ }
+
+ bool isOnDisk() {
+ return _onDisk;
+ }
+
+ long long numEmits() const {
+ if (_jsMode)
+ return _scope->getNumberLongLong("_emitCt");
+ return _numEmits;
+ }
+ long long numReduces() const {
+ if (_jsMode)
+ return _scope->getNumberLongLong("_redCt");
+ return _config.reducer->numReduces;
+ }
+ long long numInMemKeys() const {
+ if (_jsMode)
+ return _scope->getNumberLongLong("_keyCt");
+ return _temp->size();
+ }
+
+ bool jsMode() {
+ return _jsMode;
+ }
+ void switchMode(bool jsMode);
+ void bailFromJS();
+
+ Collection* getCollectionOrUassert(Database* db, StringData ns);
+
+ const Config& _config;
+ DBDirectClient _db;
+ bool _useIncremental; // use an incremental collection
+
+protected:
+ /**
+ * Appends a new document to the in-memory list of tuples, which are under that
+ * document's key.
+ *
+ * @return estimated in-memory size occupied by the newly added document.
+ */
+ int _add(InMemory* im, const BSONObj& a);
+
+ OperationContext* _txn;
+ std::unique_ptr<Scope> _scope;
+ bool _onDisk; // if the end result of this map reduce is disk or not
+
+ std::unique_ptr<InMemory> _temp;
+ long _size; // bytes in _temp
+ long _dupCount; // number of duplicate key entries
+
+ long long _numEmits;
+
+ bool _jsMode;
+ ScriptingFunction _reduceAll;
+ ScriptingFunction _reduceAndEmit;
+ ScriptingFunction _reduceAndFinalize;
+ ScriptingFunction _reduceAndFinalizeAndInsert;
+};
+
+BSONObj fast_emit(const BSONObj& args, void* data);
+BSONObj _bailFromJS(const BSONObj& args, void* data);
+
+void addPrivilegesRequiredForMapReduce(Command* commandTemplate,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out);
+} // end mr namespace
+}
diff --git a/src/mongo/db/commands/mr_common.cpp b/src/mongo/db/commands/mr_common.cpp
index 64e65f3e501..4d11661fd2d 100644
--- a/src/mongo/db/commands/mr_common.cpp
+++ b/src/mongo/db/commands/mr_common.cpp
@@ -41,107 +41,96 @@
namespace mongo {
- namespace mr {
- Config::OutputOptions Config::parseOutputOptions(const std::string& dbname,
- const BSONObj& cmdObj) {
- Config::OutputOptions outputOptions;
-
- outputOptions.outNonAtomic = false;
- if (cmdObj["out"].type() == String) {
- outputOptions.collectionName = cmdObj["out"].String();
- outputOptions.outType = REPLACE;
- }
- else if (cmdObj["out"].type() == Object) {
- BSONObj o = cmdObj["out"].embeddedObject();
-
- if (o.hasElement("normal")) {
- outputOptions.outType = REPLACE;
- outputOptions.collectionName = o["normal"].String();
- }
- else if (o.hasElement("replace")) {
- outputOptions.outType = REPLACE;
- outputOptions.collectionName = o["replace"].String();
- }
- else if (o.hasElement("merge")) {
- outputOptions.outType = MERGE;
- outputOptions.collectionName = o["merge"].String();
- }
- else if (o.hasElement("reduce")) {
- outputOptions.outType = REDUCE;
- outputOptions.collectionName = o["reduce"].String();
- }
- else if (o.hasElement("inline")) {
- outputOptions.outType = INMEMORY;
- }
- else {
- uasserted(13522,
- str::stream() << "please specify one of "
- << "[replace|merge|reduce|inline] in 'out' object");
- }
-
- if (o.hasElement("db")) {
- outputOptions.outDB = o["db"].String();
- }
-
- if (o.hasElement("nonAtomic")) {
- outputOptions.outNonAtomic = o["nonAtomic"].Bool();
- if (outputOptions.outNonAtomic)
- uassert(15895,
- "nonAtomic option cannot be used with this output type",
- (outputOptions.outType == REDUCE ||
- outputOptions.outType == MERGE));
- }
- }
- else {
- uasserted(13606 , "'out' has to be a string or an object");
- }
-
- if (outputOptions.outType != INMEMORY) {
- outputOptions.finalNamespace = mongoutils::str::stream()
- << (outputOptions.outDB.empty() ? dbname : outputOptions.outDB)
- << "." << outputOptions.collectionName;
- }
-
- return outputOptions;
+namespace mr {
+Config::OutputOptions Config::parseOutputOptions(const std::string& dbname, const BSONObj& cmdObj) {
+ Config::OutputOptions outputOptions;
+
+ outputOptions.outNonAtomic = false;
+ if (cmdObj["out"].type() == String) {
+ outputOptions.collectionName = cmdObj["out"].String();
+ outputOptions.outType = REPLACE;
+ } else if (cmdObj["out"].type() == Object) {
+ BSONObj o = cmdObj["out"].embeddedObject();
+
+ if (o.hasElement("normal")) {
+ outputOptions.outType = REPLACE;
+ outputOptions.collectionName = o["normal"].String();
+ } else if (o.hasElement("replace")) {
+ outputOptions.outType = REPLACE;
+ outputOptions.collectionName = o["replace"].String();
+ } else if (o.hasElement("merge")) {
+ outputOptions.outType = MERGE;
+ outputOptions.collectionName = o["merge"].String();
+ } else if (o.hasElement("reduce")) {
+ outputOptions.outType = REDUCE;
+ outputOptions.collectionName = o["reduce"].String();
+ } else if (o.hasElement("inline")) {
+ outputOptions.outType = INMEMORY;
+ } else {
+ uasserted(13522,
+ str::stream() << "please specify one of "
+ << "[replace|merge|reduce|inline] in 'out' object");
}
- void addPrivilegesRequiredForMapReduce(Command* commandTemplate,
- const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- Config::OutputOptions outputOptions = Config::parseOutputOptions(dbname, cmdObj);
-
- ResourcePattern inputResource(commandTemplate->parseResourcePattern(dbname, cmdObj));
- uassert(17142, mongoutils::str::stream() <<
- "Invalid input resource " << inputResource.toString(),
- inputResource.isExactNamespacePattern());
- out->push_back(Privilege(inputResource, ActionType::find));
-
- if (outputOptions.outType != Config::INMEMORY) {
- ActionSet outputActions;
- outputActions.addAction(ActionType::insert);
- if (outputOptions.outType == Config::REPLACE) {
- outputActions.addAction(ActionType::remove);
- }
- else {
- outputActions.addAction(ActionType::update);
- }
-
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- outputActions.addAction(ActionType::bypassDocumentValidation);
- }
-
- ResourcePattern outputResource(
- ResourcePattern::forExactNamespace(
- NamespaceString(outputOptions.finalNamespace)));
- uassert(17143, mongoutils::str::stream() << "Invalid target namespace " <<
- outputResource.ns().ns(),
- outputResource.ns().isValid());
-
- // TODO: check if outputNs exists and add createCollection privilege if not
- out->push_back(Privilege(outputResource, outputActions));
- }
+ if (o.hasElement("db")) {
+ outputOptions.outDB = o["db"].String();
}
+
+ if (o.hasElement("nonAtomic")) {
+ outputOptions.outNonAtomic = o["nonAtomic"].Bool();
+ if (outputOptions.outNonAtomic)
+ uassert(15895,
+ "nonAtomic option cannot be used with this output type",
+ (outputOptions.outType == REDUCE || outputOptions.outType == MERGE));
+ }
+ } else {
+ uasserted(13606, "'out' has to be a string or an object");
+ }
+
+ if (outputOptions.outType != INMEMORY) {
+ outputOptions.finalNamespace = mongoutils::str::stream()
+ << (outputOptions.outDB.empty() ? dbname : outputOptions.outDB) << "."
+ << outputOptions.collectionName;
}
+ return outputOptions;
+}
+
+void addPrivilegesRequiredForMapReduce(Command* commandTemplate,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ Config::OutputOptions outputOptions = Config::parseOutputOptions(dbname, cmdObj);
+
+ ResourcePattern inputResource(commandTemplate->parseResourcePattern(dbname, cmdObj));
+ uassert(17142,
+ mongoutils::str::stream() << "Invalid input resource " << inputResource.toString(),
+ inputResource.isExactNamespacePattern());
+ out->push_back(Privilege(inputResource, ActionType::find));
+
+ if (outputOptions.outType != Config::INMEMORY) {
+ ActionSet outputActions;
+ outputActions.addAction(ActionType::insert);
+ if (outputOptions.outType == Config::REPLACE) {
+ outputActions.addAction(ActionType::remove);
+ } else {
+ outputActions.addAction(ActionType::update);
+ }
+
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ outputActions.addAction(ActionType::bypassDocumentValidation);
+ }
+
+ ResourcePattern outputResource(
+ ResourcePattern::forExactNamespace(NamespaceString(outputOptions.finalNamespace)));
+ uassert(17143,
+ mongoutils::str::stream() << "Invalid target namespace "
+ << outputResource.ns().ns(),
+ outputResource.ns().isValid());
+
+ // TODO: check if outputNs exists and add createCollection privilege if not
+ out->push_back(Privilege(outputResource, outputActions));
+ }
+}
+}
}
diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp
index 72c28154c9d..93e90851257 100644
--- a/src/mongo/db/commands/mr_test.cpp
+++ b/src/mongo/db/commands/mr_test.cpp
@@ -41,121 +41,171 @@ using namespace mongo;
namespace {
- /**
- * Tests for mr::Config
- */
+/**
+ * Tests for mr::Config
+ */
- /**
- * Helper function to verify field of mr::Config::OutputOptions.
- */
- template <typename T> void _compareOutputOptionField(const std::string& dbname,
- const std::string& cmdObjStr,
- const std::string& fieldName,
- const T& actual, const T& expected) {
- if (actual == expected) return;
- FAIL(str::stream() << "parseOutputOptions(\"" << dbname << ", " << cmdObjStr << "): "
- << fieldName << ": Expected: " << expected << ". Actual: " << actual);
- }
+/**
+ * Helper function to verify field of mr::Config::OutputOptions.
+ */
+template <typename T>
+void _compareOutputOptionField(const std::string& dbname,
+ const std::string& cmdObjStr,
+ const std::string& fieldName,
+ const T& actual,
+ const T& expected) {
+ if (actual == expected)
+ return;
+ FAIL(str::stream() << "parseOutputOptions(\"" << dbname << ", " << cmdObjStr << "): "
+ << fieldName << ": Expected: " << expected << ". Actual: " << actual);
+}
- /**
- * Returns string representation of mr::Config::OutputType
- */
- std::string _getOutTypeString(mr::Config::OutputType outType) {
- switch (outType) {
- case mr::Config::REPLACE: return "REPLACE";
- case mr::Config::MERGE: return "MERGE";
- case mr::Config::REDUCE: return "REDUCE";
- case mr::Config::INMEMORY: return "INMEMORY";
- }
- invariant(0);
+/**
+ * Returns string representation of mr::Config::OutputType
+ */
+std::string _getOutTypeString(mr::Config::OutputType outType) {
+ switch (outType) {
+ case mr::Config::REPLACE:
+ return "REPLACE";
+ case mr::Config::MERGE:
+ return "MERGE";
+ case mr::Config::REDUCE:
+ return "REDUCE";
+ case mr::Config::INMEMORY:
+ return "INMEMORY";
}
+ invariant(0);
+}
- /**
- * Test helper function to check expected result of parseOutputOptions.
- */
- void _testConfigParseOutputOptions(const std::string& dbname, const std::string& cmdObjStr,
- const std::string& expectedOutDb,
- const std::string& expectedCollectionName,
- const std::string& expectedFinalNamespace,
- bool expectedOutNonAtomic,
- mr::Config::OutputType expectedOutType) {
- const BSONObj cmdObj = fromjson(cmdObjStr);
- mr::Config::OutputOptions outputOptions = mr::Config::parseOutputOptions(dbname, cmdObj);
- _compareOutputOptionField(dbname, cmdObjStr, "outDb", outputOptions.outDB, expectedOutDb);
- _compareOutputOptionField(dbname, cmdObjStr, "collectionName",
- outputOptions.collectionName, expectedCollectionName);
- _compareOutputOptionField(dbname, cmdObjStr, "finalNamespace",
- outputOptions.finalNamespace, expectedFinalNamespace);
- _compareOutputOptionField(dbname, cmdObjStr, "outNonAtomic", outputOptions.outNonAtomic,
- expectedOutNonAtomic);
- _compareOutputOptionField(dbname, cmdObjStr, "outType",
- _getOutTypeString(outputOptions.outType),
- _getOutTypeString(expectedOutType));
- }
+/**
+ * Test helper function to check expected result of parseOutputOptions.
+ */
+void _testConfigParseOutputOptions(const std::string& dbname,
+ const std::string& cmdObjStr,
+ const std::string& expectedOutDb,
+ const std::string& expectedCollectionName,
+ const std::string& expectedFinalNamespace,
+ bool expectedOutNonAtomic,
+ mr::Config::OutputType expectedOutType) {
+ const BSONObj cmdObj = fromjson(cmdObjStr);
+ mr::Config::OutputOptions outputOptions = mr::Config::parseOutputOptions(dbname, cmdObj);
+ _compareOutputOptionField(dbname, cmdObjStr, "outDb", outputOptions.outDB, expectedOutDb);
+ _compareOutputOptionField(
+ dbname, cmdObjStr, "collectionName", outputOptions.collectionName, expectedCollectionName);
+ _compareOutputOptionField(
+ dbname, cmdObjStr, "finalNamespace", outputOptions.finalNamespace, expectedFinalNamespace);
+ _compareOutputOptionField(
+ dbname, cmdObjStr, "outNonAtomic", outputOptions.outNonAtomic, expectedOutNonAtomic);
+ _compareOutputOptionField(dbname,
+ cmdObjStr,
+ "outType",
+ _getOutTypeString(outputOptions.outType),
+ _getOutTypeString(expectedOutType));
+}
- /**
- * Tests for mr::Config::parseOutputOptions.
- */
- TEST(ConfigOutputOptionsTest, parseOutputOptions) {
- // Missing 'out' field.
- ASSERT_THROWS(mr::Config::parseOutputOptions("mydb", fromjson("{}")), UserException);
- // 'out' must be either string or object.
- ASSERT_THROWS(mr::Config::parseOutputOptions("mydb", fromjson("{out: 99}")),
- UserException);
- // 'out.nonAtomic' is not supported with normal, replace or inline.
- ASSERT_THROWS(mr::Config::parseOutputOptions(
- "mydb",
- fromjson("{out: {normal: 'mycoll', nonAtomic: true}}")),
- UserException);
- ASSERT_THROWS(mr::Config::parseOutputOptions(
- "mydb",
- fromjson("{out: {replace: 'mycoll', nonAtomic: true}}")),
- UserException);
- ASSERT_THROWS(mr::Config::parseOutputOptions(
- "mydb",
- fromjson("{out: {inline: 'mycoll', nonAtomic: true}}")),
- UserException);
- // Unknown output specifer.
- ASSERT_THROWS(mr::Config::parseOutputOptions(
- "mydb",
- fromjson("{out: {no_such_out_type: 'mycoll'}}")),
- UserException);
+/**
+ * Tests for mr::Config::parseOutputOptions.
+ */
+TEST(ConfigOutputOptionsTest, parseOutputOptions) {
+ // Missing 'out' field.
+ ASSERT_THROWS(mr::Config::parseOutputOptions("mydb", fromjson("{}")), UserException);
+ // 'out' must be either string or object.
+ ASSERT_THROWS(mr::Config::parseOutputOptions("mydb", fromjson("{out: 99}")), UserException);
+ // 'out.nonAtomic' is not supported with normal, replace or inline.
+ ASSERT_THROWS(mr::Config::parseOutputOptions(
+ "mydb", fromjson("{out: {normal: 'mycoll', nonAtomic: true}}")),
+ UserException);
+ ASSERT_THROWS(mr::Config::parseOutputOptions(
+ "mydb", fromjson("{out: {replace: 'mycoll', nonAtomic: true}}")),
+ UserException);
+ ASSERT_THROWS(mr::Config::parseOutputOptions(
+ "mydb", fromjson("{out: {inline: 'mycoll', nonAtomic: true}}")),
+ UserException);
+ // Unknown output specifer.
+ ASSERT_THROWS(
+ mr::Config::parseOutputOptions("mydb", fromjson("{out: {no_such_out_type: 'mycoll'}}")),
+ UserException);
- // 'out' is string.
- _testConfigParseOutputOptions("mydb", "{out: 'mycoll'}",
- "", "mycoll", "mydb.mycoll", false, mr::Config::REPLACE);
- // 'out' is object.
- _testConfigParseOutputOptions("mydb", "{out: {normal: 'mycoll'}}",
- "", "mycoll", "mydb.mycoll", false, mr::Config::REPLACE);
- // 'out.db' overrides dbname parameter
- _testConfigParseOutputOptions("mydb1", "{out: {replace: 'mycoll', db: 'mydb2'}}",
- "mydb2", "mycoll", "mydb2.mycoll", false,
- mr::Config::REPLACE);
- // 'out.nonAtomic' is supported with merge and reduce.
- _testConfigParseOutputOptions("mydb", "{out: {merge: 'mycoll', nonAtomic: true}}",
- "", "mycoll", "mydb.mycoll", true, mr::Config::MERGE);
- _testConfigParseOutputOptions("mydb", "{out: {reduce: 'mycoll', nonAtomic: true}}",
- "", "mycoll", "mydb.mycoll", true, mr::Config::REDUCE);
- // inline
- _testConfigParseOutputOptions("mydb1", "{out: {inline: 'mycoll', db: 'mydb2'}}",
- "mydb2", "", "", false, mr::Config::INMEMORY);
+ // 'out' is string.
+ _testConfigParseOutputOptions(
+ "mydb", "{out: 'mycoll'}", "", "mycoll", "mydb.mycoll", false, mr::Config::REPLACE);
+ // 'out' is object.
+ _testConfigParseOutputOptions("mydb",
+ "{out: {normal: 'mycoll'}}",
+ "",
+ "mycoll",
+ "mydb.mycoll",
+ false,
+ mr::Config::REPLACE);
+ // 'out.db' overrides dbname parameter
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {replace: 'mycoll', db: 'mydb2'}}",
+ "mydb2",
+ "mycoll",
+ "mydb2.mycoll",
+ false,
+ mr::Config::REPLACE);
+ // 'out.nonAtomic' is supported with merge and reduce.
+ _testConfigParseOutputOptions("mydb",
+ "{out: {merge: 'mycoll', nonAtomic: true}}",
+ "",
+ "mycoll",
+ "mydb.mycoll",
+ true,
+ mr::Config::MERGE);
+ _testConfigParseOutputOptions("mydb",
+ "{out: {reduce: 'mycoll', nonAtomic: true}}",
+ "",
+ "mycoll",
+ "mydb.mycoll",
+ true,
+ mr::Config::REDUCE);
+ // inline
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {inline: 'mycoll', db: 'mydb2'}}",
+ "mydb2",
+ "",
+ "",
+ false,
+ mr::Config::INMEMORY);
- // Order should not matter in fields of 'out' object.
- _testConfigParseOutputOptions("mydb1", "{out: {db: 'mydb2', normal: 'mycoll'}}",
- "mydb2", "mycoll", "mydb2.mycoll", false,
- mr::Config::REPLACE);
- _testConfigParseOutputOptions("mydb1", "{out: {db: 'mydb2', replace: 'mycoll'}}",
- "mydb2", "mycoll", "mydb2.mycoll", false,
- mr::Config::REPLACE);
- _testConfigParseOutputOptions("mydb1", "{out: {nonAtomic: true, merge: 'mycoll'}}",
- "", "mycoll", "mydb1.mycoll", true,
- mr::Config::MERGE);
- _testConfigParseOutputOptions("mydb1", "{out: {nonAtomic: true, reduce: 'mycoll'}}",
- "", "mycoll", "mydb1.mycoll", true,
- mr::Config::REDUCE);
- _testConfigParseOutputOptions("mydb1", "{out: {db: 'mydb2', inline: 'mycoll'}}",
- "mydb2", "", "", false, mr::Config::INMEMORY);
- }
+ // Order should not matter in fields of 'out' object.
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {db: 'mydb2', normal: 'mycoll'}}",
+ "mydb2",
+ "mycoll",
+ "mydb2.mycoll",
+ false,
+ mr::Config::REPLACE);
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {db: 'mydb2', replace: 'mycoll'}}",
+ "mydb2",
+ "mycoll",
+ "mydb2.mycoll",
+ false,
+ mr::Config::REPLACE);
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {nonAtomic: true, merge: 'mycoll'}}",
+ "",
+ "mycoll",
+ "mydb1.mycoll",
+ true,
+ mr::Config::MERGE);
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {nonAtomic: true, reduce: 'mycoll'}}",
+ "",
+ "mycoll",
+ "mydb1.mycoll",
+ true,
+ mr::Config::REDUCE);
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {db: 'mydb2', inline: 'mycoll'}}",
+ "mydb2",
+ "",
+ "",
+ false,
+ mr::Config::INMEMORY);
+}
} // namespace
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index c512c2a8c8b..a9f1ad7e619 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -42,52 +42,60 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
- class AppendOplogNoteCmd : public Command {
- public:
- AppendOplogNoteCmd() : Command( "appendOplogNote" ) {}
- virtual bool slaveOk() const { return false; }
- virtual bool adminOnly() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream &help ) const {
- help << "Adds a no-op entry to the oplog";
+class AppendOplogNoteCmd : public Command {
+public:
+ AppendOplogNoteCmd() : Command("appendOplogNote") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "Adds a no-op entry to the oplog";
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::appendOplogNote)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::appendOplogNote)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
+ return Status::OK();
+ }
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ if (!repl::getGlobalReplicationCoordinator()->isReplEnabled()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NoReplicationEnabled,
+ "Must have replication set up to run \"appendOplogNote\""));
+ }
+ BSONElement dataElement;
+ Status status = bsonExtractTypedField(cmdObj, "data", Object, &dataElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- if (!repl::getGlobalReplicationCoordinator()->isReplEnabled()) {
- return appendCommandStatus(result, Status(
- ErrorCodes::NoReplicationEnabled,
- "Must have replication set up to run \"appendOplogNote\""));
- }
- BSONElement dataElement;
- Status status = bsonExtractTypedField(cmdObj, "data", Object, &dataElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWrite(txn->lockState());
+ ScopedTransaction scopedXact(txn, MODE_X);
+ Lock::GlobalWrite globalWrite(txn->lockState());
- WriteUnitOfWork wuow(txn);
- getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, dataElement.Obj());
- wuow.commit();
- return true;
- }
+ WriteUnitOfWork wuow(txn);
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, dataElement.Obj());
+ wuow.commit();
+ return true;
+ }
- } appendOplogNoteCmd;
+} appendOplogNoteCmd;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 36acc5d9bb8..33e24b6648e 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -41,128 +41,121 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
-
- class ParallelCollectionScanCmd : public Command {
- public:
-
- struct ExtentInfo {
- ExtentInfo( RecordId dl, size_t s )
- : diskLoc(dl), size(s) {
- }
- RecordId diskLoc;
- size_t size;
- };
-
- // ------------------------------------------------
-
- ParallelCollectionScanCmd() : Command( "parallelCollectionScan" ){}
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- Privilege p(parseResourcePattern(dbname, cmdObj), actions);
- if ( AuthorizationSession::get(client)->isAuthorizedForPrivilege(p) )
- return Status::OK();
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
+using std::unique_ptr;
+using std::string;
+
+class ParallelCollectionScanCmd : public Command {
+public:
+ struct ExtentInfo {
+ ExtentInfo(RecordId dl, size_t s) : diskLoc(dl), size(s) {}
+ RecordId diskLoc;
+ size_t size;
+ };
+
+ // ------------------------------------------------
+
+ ParallelCollectionScanCmd() : Command("parallelCollectionScan") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ Privilege p(parseResourcePattern(dbname, cmdObj), actions);
+ if (AuthorizationSession::get(client)->isAuthorizedForPrivilege(p))
+ return Status::OK();
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ NamespaceString ns(dbname, cmdObj[name].String());
+
+ AutoGetCollectionForRead ctx(txn, ns.ns());
+
+ Collection* collection = ctx.getCollection();
+ if (!collection)
+ return appendCommandStatus(result,
+ Status(ErrorCodes::NamespaceNotFound,
+ str::stream() << "ns does not exist: " << ns.ns()));
+
+ size_t numCursors = static_cast<size_t>(cmdObj["numCursors"].numberInt());
+
+ if (numCursors == 0 || numCursors > 10000)
+ return appendCommandStatus(result,
+ Status(ErrorCodes::BadValue,
+ str::stream()
+ << "numCursors has to be between 1 and 10000"
+ << " was: " << numCursors));
+
+ auto iterators = collection->getManyCursors(txn);
+ if (iterators.size() < numCursors) {
+ numCursors = iterators.size();
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
-
- NamespaceString ns( dbname, cmdObj[name].String() );
+ OwnedPointerVector<PlanExecutor> execs;
+ for (size_t i = 0; i < numCursors; i++) {
+ WorkingSet* ws = new WorkingSet();
+ MultiIteratorStage* mis = new MultiIteratorStage(txn, ws, collection);
- AutoGetCollectionForRead ctx(txn, ns.ns());
+ PlanExecutor* rawExec;
+ // Takes ownership of 'ws' and 'mis'.
+ Status execStatus =
+ PlanExecutor::make(txn, ws, mis, collection, PlanExecutor::YIELD_AUTO, &rawExec);
+ invariant(execStatus.isOK());
+ unique_ptr<PlanExecutor> curExec(rawExec);
- Collection* collection = ctx.getCollection();
- if ( !collection )
- return appendCommandStatus( result,
- Status( ErrorCodes::NamespaceNotFound,
- str::stream() <<
- "ns does not exist: " << ns.ns() ) );
-
- size_t numCursors = static_cast<size_t>( cmdObj["numCursors"].numberInt() );
-
- if ( numCursors == 0 || numCursors > 10000 )
- return appendCommandStatus( result,
- Status( ErrorCodes::BadValue,
- str::stream() <<
- "numCursors has to be between 1 and 10000" <<
- " was: " << numCursors ) );
-
- auto iterators = collection->getManyCursors(txn);
- if (iterators.size() < numCursors) {
- numCursors = iterators.size();
- }
+ // The PlanExecutor was registered on construction due to the YIELD_AUTO policy.
+ // We have to deregister it, as it will be registered with ClientCursor.
+ curExec->deregisterExec();
- OwnedPointerVector<PlanExecutor> execs;
- for ( size_t i = 0; i < numCursors; i++ ) {
- WorkingSet* ws = new WorkingSet();
- MultiIteratorStage* mis = new MultiIteratorStage(txn, ws, collection);
+ // Need to save state while yielding locks between now and getMore().
+ curExec->saveState();
- PlanExecutor* rawExec;
- // Takes ownership of 'ws' and 'mis'.
- Status execStatus = PlanExecutor::make(txn, ws, mis, collection,
- PlanExecutor::YIELD_AUTO, &rawExec);
- invariant(execStatus.isOK());
- unique_ptr<PlanExecutor> curExec(rawExec);
+ execs.push_back(curExec.release());
+ }
- // The PlanExecutor was registered on construction due to the YIELD_AUTO policy.
- // We have to deregister it, as it will be registered with ClientCursor.
- curExec->deregisterExec();
+ // transfer iterators to executors using a round-robin distribution.
+ // TODO consider using a common work queue once invalidation issues go away.
+ for (size_t i = 0; i < iterators.size(); i++) {
+ PlanExecutor* theExec = execs[i % execs.size()];
+ MultiIteratorStage* mis = static_cast<MultiIteratorStage*>(theExec->getRootStage());
- // Need to save state while yielding locks between now and getMore().
- curExec->saveState();
+ // This wasn't called above as they weren't assigned yet
+ iterators[i]->savePositioned();
- execs.push_back(curExec.release());
- }
+ mis->addIterator(std::move(iterators[i]));
+ }
- // transfer iterators to executors using a round-robin distribution.
- // TODO consider using a common work queue once invalidation issues go away.
- for (size_t i = 0; i < iterators.size(); i++) {
- PlanExecutor* theExec = execs[i % execs.size()];
- MultiIteratorStage* mis = static_cast<MultiIteratorStage*>(theExec->getRootStage());
+ {
+ BSONArrayBuilder bucketsBuilder;
+ for (size_t i = 0; i < execs.size(); i++) {
+ // transfer ownership of an executor to the ClientCursor (which manages its own
+ // lifetime).
+ ClientCursor* cc =
+ new ClientCursor(collection->getCursorManager(), execs.releaseAt(i), ns.ns());
- // This wasn't called above as they weren't assigned yet
- iterators[i]->savePositioned();
+ BSONObjBuilder threadResult;
+ appendCursorResponseObject(cc->cursorid(), ns.ns(), BSONArray(), &threadResult);
+ threadResult.appendBool("ok", 1);
- mis->addIterator(std::move(iterators[i]));
+ bucketsBuilder.append(threadResult.obj());
}
-
- {
- BSONArrayBuilder bucketsBuilder;
- for (size_t i = 0; i < execs.size(); i++) {
- // transfer ownership of an executor to the ClientCursor (which manages its own
- // lifetime).
- ClientCursor* cc = new ClientCursor( collection->getCursorManager(),
- execs.releaseAt(i),
- ns.ns() );
-
- BSONObjBuilder threadResult;
- appendCursorResponseObject( cc->cursorid(),
- ns.ns(),
- BSONArray(),
- &threadResult );
- threadResult.appendBool( "ok", 1 );
-
- bucketsBuilder.append( threadResult.obj() );
- }
- result.appendArray( "cursors", bucketsBuilder.obj() );
- }
-
- return true;
-
+ result.appendArray("cursors", bucketsBuilder.obj());
}
- } parallelCollectionScanCmd;
+ return true;
+ }
+} parallelCollectionScanCmd;
}
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index c9048ee54c2..7ff531bb8a1 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -51,556 +51,549 @@ using std::stringstream;
namespace mongo {
- namespace {
- void appendParameterNames( stringstream& help ) {
- help << "supported:\n";
- const ServerParameter::Map& m = ServerParameterSet::getGlobal()->getMap();
- for ( ServerParameter::Map::const_iterator i = m.begin(); i != m.end(); ++i ) {
- help << " " << i->first << "\n";
- }
- }
+namespace {
+void appendParameterNames(stringstream& help) {
+ help << "supported:\n";
+ const ServerParameter::Map& m = ServerParameterSet::getGlobal()->getMap();
+ for (ServerParameter::Map::const_iterator i = m.begin(); i != m.end(); ++i) {
+ help << " " << i->first << "\n";
}
+}
+}
- class CmdGet : public Command {
- public:
- CmdGet() : Command( "getParameter" ) { }
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::getParameter);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+class CmdGet : public Command {
+public:
+ CmdGet() : Command("getParameter") {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::getParameter);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ virtual void help(stringstream& help) const {
+ help << "get administrative option(s)\nexample:\n";
+ help << "{ getParameter:1, notablescan:1 }\n";
+ appendParameterNames(help);
+ help << "{ getParameter:'*' } to get everything\n";
+ }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ bool all = *cmdObj.firstElement().valuestrsafe() == '*';
+
+ int before = result.len();
+
+ const ServerParameter::Map& m = ServerParameterSet::getGlobal()->getMap();
+ for (ServerParameter::Map::const_iterator i = m.begin(); i != m.end(); ++i) {
+ if (all || cmdObj.hasElement(i->first.c_str())) {
+ i->second->append(txn, result, i->second->name());
+ }
}
- virtual void help( stringstream &help ) const {
- help << "get administrative option(s)\nexample:\n";
- help << "{ getParameter:1, notablescan:1 }\n";
- appendParameterNames( help );
- help << "{ getParameter:'*' } to get everything\n";
+
+ if (before == result.len()) {
+ errmsg = "no option found to get";
+ return false;
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- bool all = *cmdObj.firstElement().valuestrsafe() == '*';
-
- int before = result.len();
-
- const ServerParameter::Map& m = ServerParameterSet::getGlobal()->getMap();
- for ( ServerParameter::Map::const_iterator i = m.begin(); i != m.end(); ++i ) {
- if ( all || cmdObj.hasElement( i->first.c_str() ) ) {
- i->second->append(txn, result, i->second->name() );
- }
- }
+ return true;
+ }
+} cmdGet;
- if ( before == result.len() ) {
- errmsg = "no option found to get";
+class CmdSet : public Command {
+public:
+ CmdSet() : Command("setParameter") {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::setParameter);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ virtual void help(stringstream& help) const {
+ help << "set administrative option(s)\n";
+ help << "{ setParameter:1, <param>:<value> }\n";
+ appendParameterNames(help);
+ }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ int numSet = 0;
+ bool found = false;
+
+ const ServerParameter::Map& parameterMap = ServerParameterSet::getGlobal()->getMap();
+
+ // First check that we aren't setting the same parameter twice and that we actually are
+ // setting parameters that we have registered and can change at runtime
+ BSONObjIterator parameterCheckIterator(cmdObj);
+
+ // We already know that "setParameter" will be the first element in this object, so skip
+ // past that
+ parameterCheckIterator.next();
+
+ // Set of all the parameters the user is attempting to change
+ std::map<std::string, BSONElement> parametersToSet;
+
+ // Iterate all parameters the user passed in to do the initial validation checks,
+ // including verifying that we are not setting the same parameter twice.
+ while (parameterCheckIterator.more()) {
+ BSONElement parameter = parameterCheckIterator.next();
+ std::string parameterName = parameter.fieldName();
+
+ ServerParameter::Map::const_iterator foundParameter = parameterMap.find(parameterName);
+
+ // Check to see if this is actually a valid parameter
+ if (foundParameter == parameterMap.end()) {
+ errmsg = str::stream() << "attempted to set unrecognized parameter ["
+ << parameterName << "], use help:true to see options ";
return false;
}
- return true;
- }
- } cmdGet;
-
- class CmdSet : public Command {
- public:
- CmdSet() : Command( "setParameter" ) { }
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::setParameter);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- virtual void help( stringstream &help ) const {
- help << "set administrative option(s)\n";
- help << "{ setParameter:1, <param>:<value> }\n";
- appendParameterNames( help );
- }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- int numSet = 0;
- bool found = false;
-
- const ServerParameter::Map& parameterMap = ServerParameterSet::getGlobal()->getMap();
-
- // First check that we aren't setting the same parameter twice and that we actually are
- // setting parameters that we have registered and can change at runtime
- BSONObjIterator parameterCheckIterator(cmdObj);
-
- // We already know that "setParameter" will be the first element in this object, so skip
- // past that
- parameterCheckIterator.next();
-
- // Set of all the parameters the user is attempting to change
- std::map<std::string, BSONElement> parametersToSet;
-
- // Iterate all parameters the user passed in to do the initial validation checks,
- // including verifying that we are not setting the same parameter twice.
- while (parameterCheckIterator.more()) {
- BSONElement parameter = parameterCheckIterator.next();
- std::string parameterName = parameter.fieldName();
-
- ServerParameter::Map::const_iterator foundParameter =
- parameterMap.find(parameterName);
-
- // Check to see if this is actually a valid parameter
- if (foundParameter == parameterMap.end()) {
- errmsg = str::stream() << "attempted to set unrecognized parameter ["
- << parameterName
- << "], use help:true to see options ";
- return false;
- }
-
- // Make sure we are allowed to change this parameter
- if (!foundParameter->second->allowedToChangeAtRuntime()) {
- errmsg = str::stream() << "not allowed to change [" << parameterName
- << "] at runtime";
- return false;
- }
-
- // Make sure we are only setting this parameter once
- if (parametersToSet.count(parameterName)) {
- errmsg = str::stream() << "attempted to set parameter ["
- << parameterName
- << "] twice in the same setParameter command, "
- << "once to value: ["
- << parametersToSet[parameterName].toString(false)
- << "], and once to value: [" << parameter.toString(false)
- << "]";
- return false;
- }
-
- parametersToSet[parameterName] = parameter;
- }
- // Iterate the parameters that we have confirmed we are setting and actually set them.
- // Not that if setting any one parameter fails, the command will fail, but the user
- // won't see what has been set and what hasn't. See SERVER-8552.
- for (std::map<std::string, BSONElement>::iterator it = parametersToSet.begin();
- it != parametersToSet.end(); ++it) {
- BSONElement parameter = it->second;
- std::string parameterName = it->first;
-
- ServerParameter::Map::const_iterator foundParameter =
- parameterMap.find(parameterName);
-
- if (foundParameter == parameterMap.end()) {
- errmsg = str::stream() << "Parameter: " << parameterName << " that was "
- << "avaliable during our first lookup in the registered "
- << "parameters map is no longer available.";
- return false;
- }
-
- if (numSet == 0) {
- foundParameter->second->append(txn, result, "was");
- }
-
- Status status = foundParameter->second->set(parameter);
- if (status.isOK()) {
- numSet++;
- continue;
- }
-
- errmsg = status.reason();
- result.append("code", status.code());
+ // Make sure we are allowed to change this parameter
+ if (!foundParameter->second->allowedToChangeAtRuntime()) {
+ errmsg = str::stream() << "not allowed to change [" << parameterName
+ << "] at runtime";
return false;
}
- if (numSet == 0 && !found) {
- errmsg = "no option found to set, use help:true to see options ";
+ // Make sure we are only setting this parameter once
+ if (parametersToSet.count(parameterName)) {
+ errmsg = str::stream()
+ << "attempted to set parameter [" << parameterName
+ << "] twice in the same setParameter command, "
+ << "once to value: [" << parametersToSet[parameterName].toString(false)
+ << "], and once to value: [" << parameter.toString(false) << "]";
return false;
}
- return true;
+ parametersToSet[parameterName] = parameter;
}
- } cmdSet;
- namespace {
- using logger::globalLogDomain;
- using logger::LogComponent;
- using logger::LogComponentSetting;
- using logger::LogSeverity;
- using logger::parseLogComponentSettings;
-
- class LogLevelSetting : public ServerParameter {
- public:
- LogLevelSetting() : ServerParameter(ServerParameterSet::getGlobal(), "logLevel") {}
-
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
- b << name << globalLogDomain()->getMinimumLogSeverity().toInt();
+ // Iterate the parameters that we have confirmed we are setting and actually set them.
+ // Not that if setting any one parameter fails, the command will fail, but the user
+ // won't see what has been set and what hasn't. See SERVER-8552.
+ for (std::map<std::string, BSONElement>::iterator it = parametersToSet.begin();
+ it != parametersToSet.end();
+ ++it) {
+ BSONElement parameter = it->second;
+ std::string parameterName = it->first;
+
+ ServerParameter::Map::const_iterator foundParameter = parameterMap.find(parameterName);
+
+ if (foundParameter == parameterMap.end()) {
+ errmsg = str::stream() << "Parameter: " << parameterName << " that was "
+ << "avaliable during our first lookup in the registered "
+ << "parameters map is no longer available.";
+ return false;
}
- virtual Status set(const BSONElement& newValueElement) {
- int newValue;
- if (!newValueElement.coerce(&newValue) || newValue < 0)
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for logLevel: " << newValueElement);
- LogSeverity newSeverity = (newValue > 0) ? LogSeverity::Debug(newValue) :
- LogSeverity::Log();
- globalLogDomain()->setMinimumLoggedSeverity(newSeverity);
- return Status::OK();
+ if (numSet == 0) {
+ foundParameter->second->append(txn, result, "was");
}
- virtual Status setFromString(const std::string& str) {
- int newValue;
- Status status = parseNumberFromString(str, &newValue);
- if (!status.isOK())
- return status;
- if (newValue < 0)
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for logLevel: " << newValue);
- LogSeverity newSeverity = (newValue > 0) ? LogSeverity::Debug(newValue) :
- LogSeverity::Log();
- globalLogDomain()->setMinimumLoggedSeverity(newSeverity);
- return Status::OK();
- }
- } logLevelSetting;
-
- /**
- * Log component verbosity.
- * Log levels of log component hierarchy.
- * Negative value for a log component means the default log level will be used.
- */
- class LogComponentVerbositySetting : public ServerParameter {
- MONGO_DISALLOW_COPYING(LogComponentVerbositySetting);
- public:
- LogComponentVerbositySetting()
- : ServerParameter(ServerParameterSet::getGlobal(), "logComponentVerbosity") {}
-
- virtual void append(OperationContext* txn, BSONObjBuilder& b,
- const std::string& name) {
- BSONObj currentSettings;
- _get(&currentSettings);
- b << name << currentSettings;
+ Status status = foundParameter->second->set(parameter);
+ if (status.isOK()) {
+ numSet++;
+ continue;
}
- virtual Status set(const BSONElement& newValueElement) {
- if (!newValueElement.isABSONObj()) {
- return Status(ErrorCodes::TypeMismatch, mongoutils::str::stream() <<
- "log component verbosity is not a BSON object: " <<
- newValueElement);
- }
- return _set(newValueElement.Obj());
- }
+ errmsg = status.reason();
+ result.append("code", status.code());
+ return false;
+ }
- virtual Status setFromString(const std::string& str) {
- try {
- return _set(mongo::fromjson(str));
- }
- catch (const DBException& ex) {
- return ex.toStatus();
- }
- }
+ if (numSet == 0 && !found) {
+ errmsg = "no option found to set, use help:true to see options ";
+ return false;
+ }
- private:
- /**
- * Returns current settings as a BSON document.
- * The "default" log component is an implementation detail. Don't expose this to users.
- */
- void _get(BSONObj* output) const {
- static const string defaultLogComponentName =
- LogComponent(LogComponent::kDefault).getShortName();
-
- mutablebson::Document doc;
-
- for (int i = 0; i < int(LogComponent::kNumLogComponents); ++i) {
- LogComponent component = static_cast<LogComponent::Value>(i);
-
- int severity = -1;
- if (globalLogDomain()->hasMinimumLogSeverity(component)) {
- severity = globalLogDomain()->getMinimumLogSeverity(component).toInt();
- }
-
- // Save LogComponent::kDefault LogSeverity at root
- if (component == LogComponent::kDefault) {
- doc.root().appendInt("verbosity", severity);
- continue;
- }
-
- mutablebson::Element element = doc.makeElementObject(component.getShortName());
- element.appendInt("verbosity", severity);
-
- mutablebson::Element parentElement = _getParentElement(doc, component);
- parentElement.pushBack(element);
- }
-
- BSONObj result = doc.getObject();
- output->swap(result);
- invariant(!output->hasField(defaultLogComponentName));
- }
+ return true;
+ }
+} cmdSet;
- /**
- * Updates component hierarchy log levels.
- *
- * BSON Format:
- * {
- * verbosity: 4, <-- maps to 'default' log component.
- * componentA: {
- * verbosity: 2, <-- sets componentA's log level to 2.
- * componentB: {
- * verbosity: 1, <-- sets componentA.componentB's log level to 1.
- * }
- * componentC: {
- * verbosity: -1, <-- clears componentA.componentC's log level so that
- * its final loglevel will be inherited from componentA.
- * }
- * },
- * componentD : 3 <-- sets componentD's log level to 3 (alternative to
- * subdocument with 'verbosity' field).
- * }
- *
- * For the default component, the log level is read from the top-level
- * "verbosity" field.
- * For non-default components, we look up the element using the component's
- * dotted name. If the "<dotted component name>" field is a number, the log
- * level will be read from the field's value.
- * Otherwise, we assume that the "<dotted component name>" field is an
- * object with a "verbosity" field that holds the log level for the component.
- * The more verbose format with the "verbosity" field is intended to support
- * setting of log levels of both parent and child log components in the same
- * BSON document.
- *
- * Ignore elements in BSON object that do not map to a log component's dotted
- * name.
- */
- Status _set(const BSONObj& bsonSettings) const {
- StatusWith< std::vector<LogComponentSetting> > parseStatus =
- parseLogComponentSettings(bsonSettings);
-
- if (!parseStatus.isOK()) {
- return parseStatus.getStatus();
- }
-
- std::vector<LogComponentSetting> settings = parseStatus.getValue();
- std::vector<LogComponentSetting>::iterator it = settings.begin();
- for (; it < settings.end(); ++it) {
- LogComponentSetting newSetting = *it;
-
- // Negative value means to clear log level of component.
- if (newSetting.level < 0) {
- globalLogDomain()->clearMinimumLoggedSeverity(newSetting.component);
- continue;
- }
- // Convert non-negative value to Log()/Debug(N).
- LogSeverity newSeverity = (newSetting.level > 0) ?
- LogSeverity::Debug(newSetting.level) : LogSeverity::Log();
- globalLogDomain()->setMinimumLoggedSeverity(newSetting.component,
- newSeverity);
- }
-
- return Status::OK();
- }
+namespace {
+using logger::globalLogDomain;
+using logger::LogComponent;
+using logger::LogComponentSetting;
+using logger::LogSeverity;
+using logger::parseLogComponentSettings;
- /**
- * Search document for element corresponding to log component's parent.
- */
- static mutablebson::Element _getParentElement(mutablebson::Document& doc,
- LogComponent component) {
- // Hide LogComponent::kDefault
- if (component == LogComponent::kDefault) {
- return doc.end();
- }
- LogComponent parentComponent = component.parent();
-
- // Attach LogComponent::kDefault children to root
- if (parentComponent == LogComponent::kDefault) {
- return doc.root();
- }
- mutablebson::Element grandParentElement = _getParentElement(doc, parentComponent);
- return grandParentElement.findFirstChildNamed(parentComponent.getShortName());
- }
- } logComponentVerbositySetting;
-
- } // namespace
-
- namespace {
- class SSLModeSetting : public ServerParameter {
- public:
- SSLModeSetting() : ServerParameter(ServerParameterSet::getGlobal(), "sslMode",
- false, // allowedToChangeAtStartup
- true // allowedToChangeAtRuntime
- ) {}
-
- std::string sslModeStr() {
- switch (sslGlobalParams.sslMode.load()) {
- case SSLParams::SSLMode_disabled:
- return "disabled";
- case SSLParams::SSLMode_allowSSL:
- return "allowSSL";
- case SSLParams::SSLMode_preferSSL:
- return "preferSSL";
- case SSLParams::SSLMode_requireSSL:
- return "requireSSL";
- default:
- return "undefined";
- }
- }
+class LogLevelSetting : public ServerParameter {
+public:
+ LogLevelSetting() : ServerParameter(ServerParameterSet::getGlobal(), "logLevel") {}
- virtual void append(
- OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
- b << name << sslModeStr();
- }
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ b << name << globalLogDomain()->getMinimumLogSeverity().toInt();
+ }
- virtual Status set(const BSONElement& newValueElement) {
- try {
- return setFromString(newValueElement.String());
- }
- catch (MsgAssertionException msg) {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for sslMode via setParameter command: "
- << newValueElement);
- }
+ virtual Status set(const BSONElement& newValueElement) {
+ int newValue;
+ if (!newValueElement.coerce(&newValue) || newValue < 0)
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Invalid value for logLevel: " << newValueElement);
+ LogSeverity newSeverity =
+ (newValue > 0) ? LogSeverity::Debug(newValue) : LogSeverity::Log();
+ globalLogDomain()->setMinimumLoggedSeverity(newSeverity);
+ return Status::OK();
+ }
- }
+ virtual Status setFromString(const std::string& str) {
+ int newValue;
+ Status status = parseNumberFromString(str, &newValue);
+ if (!status.isOK())
+ return status;
+ if (newValue < 0)
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Invalid value for logLevel: " << newValue);
+ LogSeverity newSeverity =
+ (newValue > 0) ? LogSeverity::Debug(newValue) : LogSeverity::Log();
+ globalLogDomain()->setMinimumLoggedSeverity(newSeverity);
+ return Status::OK();
+ }
+} logLevelSetting;
- virtual Status setFromString(const std::string& str) {
-#ifndef MONGO_CONFIG_SSL
- return Status(ErrorCodes::IllegalOperation, mongoutils::str::stream() <<
- "Unable to set sslMode, SSL support is not compiled into server");
-#endif
- if (str != "disabled" && str != "allowSSL" &&
- str != "preferSSL" && str != "requireSSL") {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for sslMode via setParameter command: "
- << str);
- }
-
- int oldMode = sslGlobalParams.sslMode.load();
- if (str == "preferSSL" && oldMode == SSLParams::SSLMode_allowSSL) {
- sslGlobalParams.sslMode.store(SSLParams::SSLMode_preferSSL);
- }
- else if (str == "requireSSL" && oldMode == SSLParams::SSLMode_preferSSL) {
- sslGlobalParams.sslMode.store(SSLParams::SSLMode_requireSSL);
- }
- else {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Illegal state transition for sslMode, attempt to change from "
- << sslModeStr() << " to " << str);
- }
- return Status::OK();
- }
- } sslModeSetting;
-
- class ClusterAuthModeSetting : public ServerParameter {
- public:
- ClusterAuthModeSetting() :
- ServerParameter(ServerParameterSet::getGlobal(), "clusterAuthMode",
- false, // allowedToChangeAtStartup
- true // allowedToChangeAtRuntime
- ) {}
-
- std::string clusterAuthModeStr() {
- switch (serverGlobalParams.clusterAuthMode.load()) {
- case ServerGlobalParams::ClusterAuthMode_keyFile:
- return "keyFile";
- case ServerGlobalParams::ClusterAuthMode_sendKeyFile:
- return "sendKeyFile";
- case ServerGlobalParams::ClusterAuthMode_sendX509:
- return "sendX509";
- case ServerGlobalParams::ClusterAuthMode_x509:
- return "x509";
- default:
- return "undefined";
- }
+/**
+ * Log component verbosity.
+ * Log levels of log component hierarchy.
+ * Negative value for a log component means the default log level will be used.
+ */
+class LogComponentVerbositySetting : public ServerParameter {
+ MONGO_DISALLOW_COPYING(LogComponentVerbositySetting);
+
+public:
+ LogComponentVerbositySetting()
+ : ServerParameter(ServerParameterSet::getGlobal(), "logComponentVerbosity") {}
+
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ BSONObj currentSettings;
+ _get(&currentSettings);
+ b << name << currentSettings;
+ }
+
+ virtual Status set(const BSONElement& newValueElement) {
+ if (!newValueElement.isABSONObj()) {
+ return Status(ErrorCodes::TypeMismatch,
+ mongoutils::str::stream()
+ << "log component verbosity is not a BSON object: "
+ << newValueElement);
+ }
+ return _set(newValueElement.Obj());
+ }
+
+ virtual Status setFromString(const std::string& str) {
+ try {
+ return _set(mongo::fromjson(str));
+ } catch (const DBException& ex) {
+ return ex.toStatus();
+ }
+ }
+
+private:
+ /**
+ * Returns current settings as a BSON document.
+ * The "default" log component is an implementation detail. Don't expose this to users.
+ */
+ void _get(BSONObj* output) const {
+ static const string defaultLogComponentName =
+ LogComponent(LogComponent::kDefault).getShortName();
+
+ mutablebson::Document doc;
+
+ for (int i = 0; i < int(LogComponent::kNumLogComponents); ++i) {
+ LogComponent component = static_cast<LogComponent::Value>(i);
+
+ int severity = -1;
+ if (globalLogDomain()->hasMinimumLogSeverity(component)) {
+ severity = globalLogDomain()->getMinimumLogSeverity(component).toInt();
}
- virtual void append(
- OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
- b << name << clusterAuthModeStr();
+ // Save LogComponent::kDefault LogSeverity at root
+ if (component == LogComponent::kDefault) {
+ doc.root().appendInt("verbosity", severity);
+ continue;
}
- virtual Status set(const BSONElement& newValueElement) {
- try {
- return setFromString(newValueElement.String());
- }
- catch (MsgAssertionException msg) {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for clusterAuthMode via setParameter command: "
- << newValueElement);
- }
+ mutablebson::Element element = doc.makeElementObject(component.getShortName());
+ element.appendInt("verbosity", severity);
+
+ mutablebson::Element parentElement = _getParentElement(doc, component);
+ parentElement.pushBack(element);
+ }
+
+ BSONObj result = doc.getObject();
+ output->swap(result);
+ invariant(!output->hasField(defaultLogComponentName));
+ }
+
+ /**
+ * Updates component hierarchy log levels.
+ *
+ * BSON Format:
+ * {
+ * verbosity: 4, <-- maps to 'default' log component.
+ * componentA: {
+ * verbosity: 2, <-- sets componentA's log level to 2.
+ * componentB: {
+ * verbosity: 1, <-- sets componentA.componentB's log level to 1.
+ * }
+ * componentC: {
+ * verbosity: -1, <-- clears componentA.componentC's log level so that
+ * its final loglevel will be inherited from componentA.
+ * }
+ * },
+ * componentD : 3 <-- sets componentD's log level to 3 (alternative to
+ * subdocument with 'verbosity' field).
+ * }
+ *
+ * For the default component, the log level is read from the top-level
+ * "verbosity" field.
+ * For non-default components, we look up the element using the component's
+ * dotted name. If the "<dotted component name>" field is a number, the log
+ * level will be read from the field's value.
+ * Otherwise, we assume that the "<dotted component name>" field is an
+ * object with a "verbosity" field that holds the log level for the component.
+ * The more verbose format with the "verbosity" field is intended to support
+ * setting of log levels of both parent and child log components in the same
+ * BSON document.
+ *
+ * Ignore elements in BSON object that do not map to a log component's dotted
+ * name.
+ */
+ Status _set(const BSONObj& bsonSettings) const {
+ StatusWith<std::vector<LogComponentSetting>> parseStatus =
+ parseLogComponentSettings(bsonSettings);
+
+ if (!parseStatus.isOK()) {
+ return parseStatus.getStatus();
+ }
+
+ std::vector<LogComponentSetting> settings = parseStatus.getValue();
+ std::vector<LogComponentSetting>::iterator it = settings.begin();
+ for (; it < settings.end(); ++it) {
+ LogComponentSetting newSetting = *it;
+ // Negative value means to clear log level of component.
+ if (newSetting.level < 0) {
+ globalLogDomain()->clearMinimumLoggedSeverity(newSetting.component);
+ continue;
}
+ // Convert non-negative value to Log()/Debug(N).
+ LogSeverity newSeverity =
+ (newSetting.level > 0) ? LogSeverity::Debug(newSetting.level) : LogSeverity::Log();
+ globalLogDomain()->setMinimumLoggedSeverity(newSetting.component, newSeverity);
+ }
+
+ return Status::OK();
+ }
+
+ /**
+ * Search document for element corresponding to log component's parent.
+ */
+ static mutablebson::Element _getParentElement(mutablebson::Document& doc,
+ LogComponent component) {
+ // Hide LogComponent::kDefault
+ if (component == LogComponent::kDefault) {
+ return doc.end();
+ }
+ LogComponent parentComponent = component.parent();
+
+ // Attach LogComponent::kDefault children to root
+ if (parentComponent == LogComponent::kDefault) {
+ return doc.root();
+ }
+ mutablebson::Element grandParentElement = _getParentElement(doc, parentComponent);
+ return grandParentElement.findFirstChildNamed(parentComponent.getShortName());
+ }
+} logComponentVerbositySetting;
+
+} // namespace
+
+namespace {
+class SSLModeSetting : public ServerParameter {
+public:
+ SSLModeSetting()
+ : ServerParameter(ServerParameterSet::getGlobal(),
+ "sslMode",
+ false, // allowedToChangeAtStartup
+ true // allowedToChangeAtRuntime
+ ) {}
+
+ std::string sslModeStr() {
+ switch (sslGlobalParams.sslMode.load()) {
+ case SSLParams::SSLMode_disabled:
+ return "disabled";
+ case SSLParams::SSLMode_allowSSL:
+ return "allowSSL";
+ case SSLParams::SSLMode_preferSSL:
+ return "preferSSL";
+ case SSLParams::SSLMode_requireSSL:
+ return "requireSSL";
+ default:
+ return "undefined";
+ }
+ }
+
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ b << name << sslModeStr();
+ }
+
+ virtual Status set(const BSONElement& newValueElement) {
+ try {
+ return setFromString(newValueElement.String());
+ } catch (MsgAssertionException msg) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Invalid value for sslMode via setParameter command: "
+ << newValueElement);
+ }
+ }
- virtual Status setFromString(const std::string& str) {
+ virtual Status setFromString(const std::string& str) {
#ifndef MONGO_CONFIG_SSL
- return Status(ErrorCodes::IllegalOperation, mongoutils::str::stream() <<
- "Unable to set clusterAuthMode, " <<
- "SSL support is not compiled into server");
+ return Status(ErrorCodes::IllegalOperation,
+ mongoutils::str::stream()
+ << "Unable to set sslMode, SSL support is not compiled into server");
#endif
- if (str != "keyFile" && str != "sendKeyFile" &&
- str != "sendX509" && str != "x509") {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for clusterAuthMode via setParameter command: "
- << str);
- }
-
- int oldMode = serverGlobalParams.clusterAuthMode.load();
- int sslMode = sslGlobalParams.sslMode.load();
- if (str == "sendX509" &&
- oldMode == ServerGlobalParams::ClusterAuthMode_sendKeyFile) {
- if (sslMode == SSLParams::SSLMode_disabled ||
- sslMode == SSLParams::SSLMode_allowSSL) {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Illegal state transition for clusterAuthMode, " <<
- "need to enable SSL for outgoing connections");
- }
- serverGlobalParams.clusterAuthMode.store
- (ServerGlobalParams::ClusterAuthMode_sendX509);
-#ifdef MONGO_CONFIG_SSL
- setInternalUserAuthParams(BSON(saslCommandMechanismFieldName <<
- "MONGODB-X509" <<
- saslCommandUserDBFieldName << "$external" <<
- saslCommandUserFieldName <<
- getSSLManager()->getSSLConfiguration()
- .clientSubjectName));
-#endif
- }
- else if (str == "x509" &&
- oldMode == ServerGlobalParams::ClusterAuthMode_sendX509) {
- serverGlobalParams.clusterAuthMode.store
- (ServerGlobalParams::ClusterAuthMode_x509);
- }
- else {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Illegal state transition for clusterAuthMode, change from "
- << clusterAuthModeStr() << " to " << str);
- }
- return Status::OK();
- }
- } clusterAuthModeSetting;
+ if (str != "disabled" && str != "allowSSL" && str != "preferSSL" && str != "requireSSL") {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Invalid value for sslMode via setParameter command: " << str);
+ }
- ExportedServerParameter<bool> QuietSetting( ServerParameterSet::getGlobal(),
- "quiet",
- &serverGlobalParams.quiet,
- true,
- true );
+ int oldMode = sslGlobalParams.sslMode.load();
+ if (str == "preferSSL" && oldMode == SSLParams::SSLMode_allowSSL) {
+ sslGlobalParams.sslMode.store(SSLParams::SSLMode_preferSSL);
+ } else if (str == "requireSSL" && oldMode == SSLParams::SSLMode_preferSSL) {
+ sslGlobalParams.sslMode.store(SSLParams::SSLMode_requireSSL);
+ } else {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Illegal state transition for sslMode, attempt to change from "
+ << sslModeStr() << " to " << str);
+ }
+ return Status::OK();
+ }
+} sslModeSetting;
+
+class ClusterAuthModeSetting : public ServerParameter {
+public:
+ ClusterAuthModeSetting()
+ : ServerParameter(ServerParameterSet::getGlobal(),
+ "clusterAuthMode",
+ false, // allowedToChangeAtStartup
+ true // allowedToChangeAtRuntime
+ ) {}
+
+ std::string clusterAuthModeStr() {
+ switch (serverGlobalParams.clusterAuthMode.load()) {
+ case ServerGlobalParams::ClusterAuthMode_keyFile:
+ return "keyFile";
+ case ServerGlobalParams::ClusterAuthMode_sendKeyFile:
+ return "sendKeyFile";
+ case ServerGlobalParams::ClusterAuthMode_sendX509:
+ return "sendX509";
+ case ServerGlobalParams::ClusterAuthMode_x509:
+ return "x509";
+ default:
+ return "undefined";
+ }
+ }
- ExportedServerParameter<int> MaxConsecutiveFailedChecksSetting(
- ServerParameterSet::getGlobal(),
- "replMonitorMaxFailedChecks",
- &ReplicaSetMonitor::maxConsecutiveFailedChecks,
- false, // allowedToChangeAtStartup
- true); // allowedToChangeAtRuntime
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ b << name << clusterAuthModeStr();
+ }
- ExportedServerParameter<bool> TraceExceptionsSetting(ServerParameterSet::getGlobal(),
- "traceExceptions",
- &DBException::traceExceptions,
- false, // allowedToChangeAtStartup
- true); // allowedToChangeAtRuntime
+ virtual Status set(const BSONElement& newValueElement) {
+ try {
+ return setFromString(newValueElement.String());
+ } catch (MsgAssertionException msg) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Invalid value for clusterAuthMode via setParameter command: "
+ << newValueElement);
+ }
+ }
+ virtual Status setFromString(const std::string& str) {
+#ifndef MONGO_CONFIG_SSL
+ return Status(ErrorCodes::IllegalOperation,
+ mongoutils::str::stream() << "Unable to set clusterAuthMode, "
+ << "SSL support is not compiled into server");
+#endif
+ if (str != "keyFile" && str != "sendKeyFile" && str != "sendX509" && str != "x509") {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Invalid value for clusterAuthMode via setParameter command: "
+ << str);
+ }
+ int oldMode = serverGlobalParams.clusterAuthMode.load();
+ int sslMode = sslGlobalParams.sslMode.load();
+ if (str == "sendX509" && oldMode == ServerGlobalParams::ClusterAuthMode_sendKeyFile) {
+ if (sslMode == SSLParams::SSLMode_disabled || sslMode == SSLParams::SSLMode_allowSSL) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Illegal state transition for clusterAuthMode, "
+ << "need to enable SSL for outgoing connections");
+ }
+ serverGlobalParams.clusterAuthMode.store(ServerGlobalParams::ClusterAuthMode_sendX509);
+#ifdef MONGO_CONFIG_SSL
+ setInternalUserAuthParams(
+ BSON(saslCommandMechanismFieldName
+ << "MONGODB-X509" << saslCommandUserDBFieldName << "$external"
+ << saslCommandUserFieldName
+ << getSSLManager()->getSSLConfiguration().clientSubjectName));
+#endif
+ } else if (str == "x509" && oldMode == ServerGlobalParams::ClusterAuthMode_sendX509) {
+ serverGlobalParams.clusterAuthMode.store(ServerGlobalParams::ClusterAuthMode_x509);
+ } else {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Illegal state transition for clusterAuthMode, change from "
+ << clusterAuthModeStr() << " to " << str);
+ }
+ return Status::OK();
}
-
+} clusterAuthModeSetting;
+
+ExportedServerParameter<bool> QuietSetting(
+ ServerParameterSet::getGlobal(), "quiet", &serverGlobalParams.quiet, true, true);
+
+ExportedServerParameter<int> MaxConsecutiveFailedChecksSetting(
+ ServerParameterSet::getGlobal(),
+ "replMonitorMaxFailedChecks",
+ &ReplicaSetMonitor::maxConsecutiveFailedChecks,
+ false, // allowedToChangeAtStartup
+ true); // allowedToChangeAtRuntime
+
+ExportedServerParameter<bool> TraceExceptionsSetting(ServerParameterSet::getGlobal(),
+ "traceExceptions",
+ &DBException::traceExceptions,
+ false, // allowedToChangeAtStartup
+ true); // allowedToChangeAtRuntime
+}
}
-
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 4f9274dc6b7..95423cf2e7b 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -54,295 +54,284 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::endl;
- using std::shared_ptr;
- using std::string;
- using std::stringstream;
- using std::unique_ptr;
-
- /**
- * Returns true if we need to keep a ClientCursor saved for this pipeline (for future getMore
- * requests). Otherwise, returns false.
- */
- static bool handleCursorCommand(OperationContext* txn,
- const string& ns,
- ClientCursorPin* pin,
- PlanExecutor* exec,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) {
-
- ClientCursor* cursor = pin ? pin->c() : NULL;
- if (pin) {
- invariant(cursor);
- invariant(cursor->getExecutor() == exec);
- invariant(cursor->isAggCursor());
- }
+using boost::intrusive_ptr;
+using std::endl;
+using std::shared_ptr;
+using std::string;
+using std::stringstream;
+using std::unique_ptr;
- const long long defaultBatchSize = 101; // Same as query.
- long long batchSize;
- uassertStatusOK(Command::parseCommandCursorOptions(cmdObj, defaultBatchSize, &batchSize));
-
- // can't use result BSONObjBuilder directly since it won't handle exceptions correctly.
- BSONArrayBuilder resultsArray;
- const int byteLimit = MaxBytesToReturnToClientAtOnce;
- BSONObj next;
- for (int objCount = 0; objCount < batchSize; objCount++) {
- // The initial getNext() on a PipelineProxyStage may be very expensive so we don't
- // do it when batchSize is 0 since that indicates a desire for a fast return.
- if (exec->getNext(&next, NULL) != PlanExecutor::ADVANCED) {
- // make it an obvious error to use cursor or executor after this point
- cursor = NULL;
- exec = NULL;
- break;
- }
+/**
+ * Returns true if we need to keep a ClientCursor saved for this pipeline (for future getMore
+ * requests). Otherwise, returns false.
+ */
+static bool handleCursorCommand(OperationContext* txn,
+ const string& ns,
+ ClientCursorPin* pin,
+ PlanExecutor* exec,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) {
+ ClientCursor* cursor = pin ? pin->c() : NULL;
+ if (pin) {
+ invariant(cursor);
+ invariant(cursor->getExecutor() == exec);
+ invariant(cursor->isAggCursor());
+ }
- if (resultsArray.len() + next.objsize() > byteLimit) {
- // Get the pipeline proxy stage wrapped by this PlanExecutor.
- PipelineProxyStage* proxy = static_cast<PipelineProxyStage*>(exec->getRootStage());
- // too big. next will be the first doc in the second batch
- proxy->pushBack(next);
- break;
- }
+ const long long defaultBatchSize = 101; // Same as query.
+ long long batchSize;
+ uassertStatusOK(Command::parseCommandCursorOptions(cmdObj, defaultBatchSize, &batchSize));
+
+ // can't use result BSONObjBuilder directly since it won't handle exceptions correctly.
+ BSONArrayBuilder resultsArray;
+ const int byteLimit = MaxBytesToReturnToClientAtOnce;
+ BSONObj next;
+ for (int objCount = 0; objCount < batchSize; objCount++) {
+ // The initial getNext() on a PipelineProxyStage may be very expensive so we don't
+ // do it when batchSize is 0 since that indicates a desire for a fast return.
+ if (exec->getNext(&next, NULL) != PlanExecutor::ADVANCED) {
+ // make it an obvious error to use cursor or executor after this point
+ cursor = NULL;
+ exec = NULL;
+ break;
+ }
- resultsArray.append(next);
+ if (resultsArray.len() + next.objsize() > byteLimit) {
+ // Get the pipeline proxy stage wrapped by this PlanExecutor.
+ PipelineProxyStage* proxy = static_cast<PipelineProxyStage*>(exec->getRootStage());
+ // too big. next will be the first doc in the second batch
+ proxy->pushBack(next);
+ break;
}
- // NOTE: exec->isEOF() can have side effects such as writing by $out. However, it should
- // be relatively quick since if there was no pin then the input is empty. Also, this
- // violates the contract for batchSize==0. Sharding requires a cursor to be returned in that
- // case. This is ok for now however, since you can't have a sharded collection that doesn't
- // exist.
- const bool canReturnMoreBatches = pin;
- if (!canReturnMoreBatches && exec && !exec->isEOF()) {
- // msgasserting since this shouldn't be possible to trigger from today's aggregation
- // language. The wording assumes that the only reason pin would be null is if the
- // collection doesn't exist.
- msgasserted(17391, str::stream()
- << "Aggregation has more results than fit in initial batch, but can't "
- << "create cursor since collection " << ns << " doesn't exist");
+ resultsArray.append(next);
+ }
+
+ // NOTE: exec->isEOF() can have side effects such as writing by $out. However, it should
+ // be relatively quick since if there was no pin then the input is empty. Also, this
+ // violates the contract for batchSize==0. Sharding requires a cursor to be returned in that
+ // case. This is ok for now however, since you can't have a sharded collection that doesn't
+ // exist.
+ const bool canReturnMoreBatches = pin;
+ if (!canReturnMoreBatches && exec && !exec->isEOF()) {
+ // msgasserting since this shouldn't be possible to trigger from today's aggregation
+ // language. The wording assumes that the only reason pin would be null is if the
+ // collection doesn't exist.
+ msgasserted(
+ 17391,
+ str::stream() << "Aggregation has more results than fit in initial batch, but can't "
+ << "create cursor since collection " << ns << " doesn't exist");
+ }
+
+ if (cursor) {
+ // If a time limit was set on the pipeline, remaining time is "rolled over" to the
+ // cursor (for use by future getmore ops).
+ cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
+
+ CurOp::get(txn)->debug().cursorid = cursor->cursorid();
+
+ if (txn->getClient()->isInDirectClient()) {
+ cursor->setUnownedRecoveryUnit(txn->recoveryUnit());
+ } else {
+ // We stash away the RecoveryUnit in the ClientCursor. It's used for subsequent
+ // getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
+ txn->recoveryUnit()->abandonSnapshot();
+ cursor->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ invariant(txn->setRecoveryUnit(storageEngine->newRecoveryUnit(),
+ OperationContext::kNotInUnitOfWork) ==
+ OperationContext::kNotInUnitOfWork);
}
- if (cursor) {
- // If a time limit was set on the pipeline, remaining time is "rolled over" to the
- // cursor (for use by future getmore ops).
- cursor->setLeftoverMaxTimeMicros( CurOp::get(txn)->getRemainingMaxTimeMicros() );
+ // Cursor needs to be in a saved state while we yield locks for getmore. State
+ // will be restored in getMore().
+ exec->saveState();
+ }
- CurOp::get(txn)->debug().cursorid = cursor->cursorid();
+ const long long cursorId = cursor ? cursor->cursorid() : 0LL;
+ appendCursorResponseObject(cursorId, ns, resultsArray.arr(), &result);
- if (txn->getClient()->isInDirectClient()) {
- cursor->setUnownedRecoveryUnit(txn->recoveryUnit());
- }
- else {
- // We stash away the RecoveryUnit in the ClientCursor. It's used for subsequent
- // getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
- txn->recoveryUnit()->abandonSnapshot();
- cursor->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- invariant(txn->setRecoveryUnit(storageEngine->newRecoveryUnit(),
- OperationContext::kNotInUnitOfWork)
- == OperationContext::kNotInUnitOfWork);
- }
+ return static_cast<bool>(cursor);
+}
- // Cursor needs to be in a saved state while we yield locks for getmore. State
- // will be restored in getMore().
- exec->saveState();
- }
- const long long cursorId = cursor ? cursor->cursorid() : 0LL;
- appendCursorResponseObject(cursorId, ns, resultsArray.arr(), &result);
+class PipelineCommand : public Command {
+public:
+ PipelineCommand() : Command(Pipeline::commandName) {} // command is called "aggregate"
- return static_cast<bool>(cursor);
+ // Locks are managed manually, in particular by DocumentSourceCursor.
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "{ pipeline: [ { $operator: {...}}, ... ]"
+ << ", explain: <bool>"
+ << ", allowDiskUse: <bool>"
+ << ", cursor: {batchSize: <number>}"
+ << " }" << endl
+ << "See http://dochub.mongodb.org/core/aggregation for more details.";
}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ Pipeline::addRequiredPrivileges(this, dbname, cmdObj, out);
+ }
- class PipelineCommand :
- public Command {
- public:
- PipelineCommand() :Command(Pipeline::commandName) {} // command is called "aggregate"
-
- // Locks are managed manually, in particular by DocumentSourceCursor.
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return false; }
- virtual bool slaveOverrideOk() const { return true; }
- virtual void help(stringstream &help) const {
- help << "{ pipeline: [ { $operator: {...}}, ... ]"
- << ", explain: <bool>"
- << ", allowDiskUse: <bool>"
- << ", cursor: {batchSize: <number>}"
- << " }"
- << endl
- << "See http://dochub.mongodb.org/core/aggregation for more details."
- ;
+ virtual bool run(OperationContext* txn,
+ const string& db,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string ns = parseNs(db, cmdObj);
+ if (nsToCollectionSubstring(ns).empty()) {
+ errmsg = "missing collection name";
+ return false;
}
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- Pipeline::addRequiredPrivileges(this, dbname, cmdObj, out);
+ NamespaceString nss(ns);
+
+ intrusive_ptr<ExpressionContext> pCtx = new ExpressionContext(txn, nss);
+ pCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
+
+ /* try to parse the command; if this fails, then we didn't run */
+ intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx);
+ if (!pPipeline.get())
+ return false;
+
+ // This is outside of the if block to keep the object alive until the pipeline is finished.
+ BSONObj parsed;
+ if (kDebugBuild && !pPipeline->isExplain() && !pCtx->inShard) {
+ // Make sure all operations round-trip through Pipeline::toBson() correctly by
+ // reparsing every command in debug builds. This is important because sharded
+ // aggregations rely on this ability. Skipping when inShard because this has
+ // already been through the transformation (and this unsets pCtx->inShard).
+ parsed = pPipeline->serialize().toBson();
+ pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx);
+ verify(pPipeline);
}
- virtual bool run(OperationContext* txn,
- const string &db,
- BSONObj &cmdObj,
- int options,
- string &errmsg,
- BSONObjBuilder &result) {
- const std::string ns = parseNs(db, cmdObj);
- if (nsToCollectionSubstring(ns).empty()) {
- errmsg = "missing collection name";
- return false;
+ PlanExecutor* exec = NULL;
+ unique_ptr<ClientCursorPin> pin; // either this OR the execHolder will be non-null
+ unique_ptr<PlanExecutor> execHolder;
+ {
+ // This will throw if the sharding version for this connection is out of date. The
+ // lock must be held continuously from now until we have we created both the output
+ // ClientCursor and the input executor. This ensures that both are using the same
+ // sharding version that we synchronize on here. This is also why we always need to
+ // create a ClientCursor even when we aren't outputting to a cursor. See the comment
+ // on ShardFilterStage for more details.
+ AutoGetCollectionForRead ctx(txn, nss.ns());
+
+ Collection* collection = ctx.getCollection();
+
+ // This does mongod-specific stuff like creating the input PlanExecutor and adding
+ // it to the front of the pipeline if needed.
+ std::shared_ptr<PlanExecutor> input =
+ PipelineD::prepareCursorSource(txn, collection, pPipeline, pCtx);
+ pPipeline->stitch();
+
+ // Create the PlanExecutor which returns results from the pipeline. The WorkingSet
+ // ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
+ // PlanExecutor.
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ unique_ptr<PipelineProxyStage> proxy(
+ new PipelineProxyStage(pPipeline, input, ws.get()));
+ Status execStatus = Status::OK();
+ if (NULL == collection) {
+ execStatus = PlanExecutor::make(txn,
+ ws.release(),
+ proxy.release(),
+ nss.ns(),
+ PlanExecutor::YIELD_MANUAL,
+ &exec);
+ } else {
+ execStatus = PlanExecutor::make(txn,
+ ws.release(),
+ proxy.release(),
+ collection,
+ PlanExecutor::YIELD_MANUAL,
+ &exec);
}
- NamespaceString nss(ns);
-
- intrusive_ptr<ExpressionContext> pCtx = new ExpressionContext(txn, nss);
- pCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
-
- /* try to parse the command; if this fails, then we didn't run */
- intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx);
- if (!pPipeline.get())
- return false;
-
- // This is outside of the if block to keep the object alive until the pipeline is finished.
- BSONObj parsed;
- if (kDebugBuild && !pPipeline->isExplain() && !pCtx->inShard) {
- // Make sure all operations round-trip through Pipeline::toBson() correctly by
- // reparsing every command in debug builds. This is important because sharded
- // aggregations rely on this ability. Skipping when inShard because this has
- // already been through the transformation (and this unsets pCtx->inShard).
- parsed = pPipeline->serialize().toBson();
- pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx);
- verify(pPipeline);
+ invariant(execStatus.isOK());
+ execHolder.reset(exec);
+
+ if (!collection && input) {
+ // If we don't have a collection, we won't be able to register any executors, so
+ // make sure that the input PlanExecutor (likely wrapping an EOFStage) doesn't
+ // need to be registered.
+ invariant(!input->collection());
}
- PlanExecutor* exec = NULL;
- unique_ptr<ClientCursorPin> pin; // either this OR the execHolder will be non-null
- unique_ptr<PlanExecutor> execHolder;
- {
- // This will throw if the sharding version for this connection is out of date. The
- // lock must be held continuously from now until we have we created both the output
- // ClientCursor and the input executor. This ensures that both are using the same
- // sharding version that we synchronize on here. This is also why we always need to
- // create a ClientCursor even when we aren't outputting to a cursor. See the comment
- // on ShardFilterStage for more details.
- AutoGetCollectionForRead ctx(txn, nss.ns());
-
- Collection* collection = ctx.getCollection();
-
- // This does mongod-specific stuff like creating the input PlanExecutor and adding
- // it to the front of the pipeline if needed.
- std::shared_ptr<PlanExecutor> input = PipelineD::prepareCursorSource(txn,
- collection,
- pPipeline,
- pCtx);
- pPipeline->stitch();
-
- // Create the PlanExecutor which returns results from the pipeline. The WorkingSet
- // ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
- // PlanExecutor.
- unique_ptr<WorkingSet> ws(new WorkingSet());
- unique_ptr<PipelineProxyStage> proxy(
- new PipelineProxyStage(pPipeline, input, ws.get()));
- Status execStatus = Status::OK();
- if (NULL == collection) {
- execStatus = PlanExecutor::make(txn,
- ws.release(),
- proxy.release(),
- nss.ns(),
- PlanExecutor::YIELD_MANUAL,
- &exec);
- }
- else {
- execStatus = PlanExecutor::make(txn,
- ws.release(),
- proxy.release(),
- collection,
- PlanExecutor::YIELD_MANUAL,
- &exec);
- }
- invariant(execStatus.isOK());
- execHolder.reset(exec);
-
- if (!collection && input) {
- // If we don't have a collection, we won't be able to register any executors, so
- // make sure that the input PlanExecutor (likely wrapping an EOFStage) doesn't
- // need to be registered.
- invariant(!input->collection());
- }
-
- if (collection) {
- const bool isAggCursor = true; // enable special locking behavior
- ClientCursor* cursor = new ClientCursor(collection->getCursorManager(),
- execHolder.release(),
- nss.ns(),
- 0,
- cmdObj.getOwned(),
- isAggCursor);
- pin.reset(new ClientCursorPin(collection->getCursorManager(),
- cursor->cursorid()));
- // Don't add any code between here and the start of the try block.
- }
-
- // At this point, it is safe to release the collection lock.
- // - In the case where we have a collection: we will need to reacquire the
- // collection lock later when cleaning up our ClientCursorPin.
- // - In the case where we don't have a collection: our PlanExecutor won't be
- // registered, so it will be safe to clean it up outside the lock.
- invariant(NULL == execHolder.get() || NULL == execHolder->collection());
+ if (collection) {
+ const bool isAggCursor = true; // enable special locking behavior
+ ClientCursor* cursor = new ClientCursor(collection->getCursorManager(),
+ execHolder.release(),
+ nss.ns(),
+ 0,
+ cmdObj.getOwned(),
+ isAggCursor);
+ pin.reset(new ClientCursorPin(collection->getCursorManager(), cursor->cursorid()));
+ // Don't add any code between here and the start of the try block.
}
- try {
- // Unless set to true, the ClientCursor created above will be deleted on block exit.
- bool keepCursor = false;
+ // At this point, it is safe to release the collection lock.
+ // - In the case where we have a collection: we will need to reacquire the
+ // collection lock later when cleaning up our ClientCursorPin.
+ // - In the case where we don't have a collection: our PlanExecutor won't be
+ // registered, so it will be safe to clean it up outside the lock.
+ invariant(NULL == execHolder.get() || NULL == execHolder->collection());
+ }
- const bool isCursorCommand = !cmdObj["cursor"].eoo();
+ try {
+ // Unless set to true, the ClientCursor created above will be deleted on block exit.
+ bool keepCursor = false;
- // If both explain and cursor are specified, explain wins.
- if (pPipeline->isExplain()) {
- result << "stages" << Value(pPipeline->writeExplainOps());
- }
- else if (isCursorCommand) {
- keepCursor = handleCursorCommand(txn,
- nss.ns(),
- pin.get(),
- exec,
- cmdObj,
- result);
- }
- else {
- pPipeline->run(result);
- }
+ const bool isCursorCommand = !cmdObj["cursor"].eoo();
- // Clean up our ClientCursorPin, if needed. We must reacquire the collection lock
- // in order to do so.
- if (pin) {
- // We acquire locks here with DBLock and CollectionLock instead of using
- // AutoGetCollectionForRead. AutoGetCollectionForRead will throw if the
- // sharding version is out of date, and we don't care if the sharding version
- // has changed.
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
- if (keepCursor) {
- pin->release();
- }
- else {
- pin->deleteUnderlying();
- }
- }
+ // If both explain and cursor are specified, explain wins.
+ if (pPipeline->isExplain()) {
+ result << "stages" << Value(pPipeline->writeExplainOps());
+ } else if (isCursorCommand) {
+ keepCursor = handleCursorCommand(txn, nss.ns(), pin.get(), exec, cmdObj, result);
+ } else {
+ pPipeline->run(result);
}
- catch (...) {
- // On our way out of scope, we clean up our ClientCursorPin if needed.
- if (pin) {
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
+
+ // Clean up our ClientCursorPin, if needed. We must reacquire the collection lock
+ // in order to do so.
+ if (pin) {
+ // We acquire locks here with DBLock and CollectionLock instead of using
+ // AutoGetCollectionForRead. AutoGetCollectionForRead will throw if the
+ // sharding version is out of date, and we don't care if the sharding version
+ // has changed.
+ Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
+ if (keepCursor) {
+ pin->release();
+ } else {
pin->deleteUnderlying();
}
- throw;
}
- // Any code that needs the cursor pinned must be inside the try block, above.
-
- return true;
+ } catch (...) {
+ // On our way out of scope, we clean up our ClientCursorPin if needed.
+ if (pin) {
+ Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
+ pin->deleteUnderlying();
+ }
+ throw;
}
- } cmdPipeline;
+ // Any code that needs the cursor pinned must be inside the try block, above.
+
+ return true;
+ }
+} cmdPipeline;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 3fc5d8f313e..1fc0b40493c 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -48,399 +48,401 @@
namespace {
- using std::string;
- using std::unique_ptr;
- using namespace mongo;
-
- /**
- * Utility function to extract error code and message from status
- * and append to BSON results.
- */
- void addStatus(const Status& status, BSONObjBuilder& builder) {
- builder.append("ok", status.isOK() ? 1.0 : 0.0);
- if (!status.isOK()) {
- builder.append("code", status.code());
- }
- if (!status.reason().empty()) {
- builder.append("errmsg", status.reason());
- }
- }
+using std::string;
+using std::unique_ptr;
+using namespace mongo;
- /**
- * Retrieves a collection's plan cache from the database.
- */
- static Status getPlanCache(OperationContext* txn,
- Collection* collection,
- const string& ns,
- PlanCache** planCacheOut) {
- *planCacheOut = NULL;
-
- if (NULL == collection) {
- return Status(ErrorCodes::BadValue, "no such collection");
- }
+/**
+ * Utility function to extract error code and message from status
+ * and append to BSON results.
+ */
+void addStatus(const Status& status, BSONObjBuilder& builder) {
+ builder.append("ok", status.isOK() ? 1.0 : 0.0);
+ if (!status.isOK()) {
+ builder.append("code", status.code());
+ }
+ if (!status.reason().empty()) {
+ builder.append("errmsg", status.reason());
+ }
+}
- CollectionInfoCache* infoCache = collection->infoCache();
- invariant(infoCache);
+/**
+ * Retrieves a collection's plan cache from the database.
+ */
+static Status getPlanCache(OperationContext* txn,
+ Collection* collection,
+ const string& ns,
+ PlanCache** planCacheOut) {
+ *planCacheOut = NULL;
+
+ if (NULL == collection) {
+ return Status(ErrorCodes::BadValue, "no such collection");
+ }
- PlanCache* planCache = infoCache->getPlanCache();
- invariant(planCache);
+ CollectionInfoCache* infoCache = collection->infoCache();
+ invariant(infoCache);
- *planCacheOut = planCache;
- return Status::OK();
- }
+ PlanCache* planCache = infoCache->getPlanCache();
+ invariant(planCache);
- //
- // Command instances.
- // Registers commands with the command system and make commands
- // available to the client.
- //
+ *planCacheOut = planCache;
+ return Status::OK();
+}
- MONGO_INITIALIZER_WITH_PREREQUISITES(SetupPlanCacheCommands, MONGO_NO_PREREQUISITES)(
- InitializerContext* context) {
+//
+// Command instances.
+// Registers commands with the command system and make commands
+// available to the client.
+//
- // PlanCacheCommand constructors refer to static ActionType instances.
- // Registering commands in a mongo static initializer ensures that
- // the ActionType construction will be completed first.
- new PlanCacheListQueryShapes();
- new PlanCacheClear();
- new PlanCacheListPlans();
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetupPlanCacheCommands,
+ MONGO_NO_PREREQUISITES)(InitializerContext* context) {
+ // PlanCacheCommand constructors refer to static ActionType instances.
+ // Registering commands in a mongo static initializer ensures that
+ // the ActionType construction will be completed first.
+ new PlanCacheListQueryShapes();
+ new PlanCacheClear();
+ new PlanCacheListPlans();
- return Status::OK();
- }
+ return Status::OK();
+}
-} // namespace
+} // namespace
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::vector;
- using std::unique_ptr;
+using std::string;
+using std::stringstream;
+using std::vector;
+using std::unique_ptr;
- PlanCacheCommand::PlanCacheCommand(const string& name, const string& helpText,
- ActionType actionType)
- : Command(name),
- helpText(helpText),
- actionType(actionType) { }
+PlanCacheCommand::PlanCacheCommand(const string& name,
+ const string& helpText,
+ ActionType actionType)
+ : Command(name), helpText(helpText), actionType(actionType) {}
- bool PlanCacheCommand::run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- string ns = parseNs(dbname, cmdObj);
+bool PlanCacheCommand::run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string ns = parseNs(dbname, cmdObj);
- Status status = runPlanCacheCommand(txn, ns, cmdObj, &result);
+ Status status = runPlanCacheCommand(txn, ns, cmdObj, &result);
- if (!status.isOK()) {
- addStatus(status, result);
- return false;
- }
-
- return true;
+ if (!status.isOK()) {
+ addStatus(status, result);
+ return false;
}
- bool PlanCacheCommand::isWriteCommandForConfigServer() const { return false; }
+ return true;
+}
- bool PlanCacheCommand::slaveOk() const {
- return false;
- }
+bool PlanCacheCommand::isWriteCommandForConfigServer() const {
+ return false;
+}
- bool PlanCacheCommand::slaveOverrideOk() const {
- return true;
- }
+bool PlanCacheCommand::slaveOk() const {
+ return false;
+}
- void PlanCacheCommand::help(stringstream& ss) const {
- ss << helpText;
- }
+bool PlanCacheCommand::slaveOverrideOk() const {
+ return true;
+}
- Status PlanCacheCommand::checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
+void PlanCacheCommand::help(stringstream& ss) const {
+ ss << helpText;
+}
- if (authzSession->isAuthorizedForActionsOnResource(pattern, actionType)) {
- return Status::OK();
- }
+Status PlanCacheCommand::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
- return Status(ErrorCodes::Unauthorized, "unauthorized");
+ if (authzSession->isAuthorizedForActionsOnResource(pattern, actionType)) {
+ return Status::OK();
}
- // static
- Status PlanCacheCommand::canonicalize(OperationContext* txn,
- const string& ns,
- const BSONObj& cmdObj,
- CanonicalQuery** canonicalQueryOut) {
- // query - required
- BSONElement queryElt = cmdObj.getField("query");
- if (queryElt.eoo()) {
- return Status(ErrorCodes::BadValue, "required field query missing");
- }
- if (!queryElt.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "required field query must be an object");
- }
- if (queryElt.eoo()) {
- return Status(ErrorCodes::BadValue, "required field query missing");
- }
- BSONObj queryObj = queryElt.Obj();
-
- // sort - optional
- BSONElement sortElt = cmdObj.getField("sort");
- BSONObj sortObj;
- if (!sortElt.eoo()) {
- if (!sortElt.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "optional field sort must be an object");
- }
- sortObj = sortElt.Obj();
- }
-
- // projection - optional
- BSONElement projElt = cmdObj.getField("projection");
- BSONObj projObj;
- if (!projElt.eoo()) {
- if (!projElt.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "optional field projection must be an object");
- }
- projObj = projElt.Obj();
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+}
+
+// static
+Status PlanCacheCommand::canonicalize(OperationContext* txn,
+ const string& ns,
+ const BSONObj& cmdObj,
+ CanonicalQuery** canonicalQueryOut) {
+ // query - required
+ BSONElement queryElt = cmdObj.getField("query");
+ if (queryElt.eoo()) {
+ return Status(ErrorCodes::BadValue, "required field query missing");
+ }
+ if (!queryElt.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "required field query must be an object");
+ }
+ if (queryElt.eoo()) {
+ return Status(ErrorCodes::BadValue, "required field query missing");
+ }
+ BSONObj queryObj = queryElt.Obj();
+
+ // sort - optional
+ BSONElement sortElt = cmdObj.getField("sort");
+ BSONObj sortObj;
+ if (!sortElt.eoo()) {
+ if (!sortElt.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "optional field sort must be an object");
}
+ sortObj = sortElt.Obj();
+ }
- // Create canonical query
- CanonicalQuery* cqRaw;
-
- const NamespaceString nss(ns);
- const WhereCallbackReal whereCallback(txn, nss.db());
-
- Status result = CanonicalQuery::canonicalize(
- ns, queryObj, sortObj, projObj, &cqRaw, whereCallback);
- if (!result.isOK()) {
- return result;
+ // projection - optional
+ BSONElement projElt = cmdObj.getField("projection");
+ BSONObj projObj;
+ if (!projElt.eoo()) {
+ if (!projElt.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "optional field projection must be an object");
}
-
- *canonicalQueryOut = cqRaw;
- return Status::OK();
+ projObj = projElt.Obj();
}
- PlanCacheListQueryShapes::PlanCacheListQueryShapes() : PlanCacheCommand("planCacheListQueryShapes",
- "Displays all query shapes in a collection.",
- ActionType::planCacheRead) { }
+ // Create canonical query
+ CanonicalQuery* cqRaw;
- Status PlanCacheListQueryShapes::runPlanCacheCommand(OperationContext* txn,
- const string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- // This is a read lock. The query cache is owned by the collection.
- AutoGetCollectionForRead ctx(txn, ns);
+ const NamespaceString nss(ns);
+ const WhereCallbackReal whereCallback(txn, nss.db());
- PlanCache* planCache;
- Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
- if (!status.isOK()) {
- // No collection - return results with empty shapes array.
- BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
- arrayBuilder.doneFast();
- return Status::OK();
- }
- return list(*planCache, bob);
+ Status result =
+ CanonicalQuery::canonicalize(ns, queryObj, sortObj, projObj, &cqRaw, whereCallback);
+ if (!result.isOK()) {
+ return result;
}
- // static
- Status PlanCacheListQueryShapes::list(const PlanCache& planCache, BSONObjBuilder* bob) {
- invariant(bob);
-
- // Fetch all cached solutions from plan cache.
- vector<PlanCacheEntry*> solutions = planCache.getAllEntries();
-
+ *canonicalQueryOut = cqRaw;
+ return Status::OK();
+}
+
+PlanCacheListQueryShapes::PlanCacheListQueryShapes()
+ : PlanCacheCommand("planCacheListQueryShapes",
+ "Displays all query shapes in a collection.",
+ ActionType::planCacheRead) {}
+
+Status PlanCacheListQueryShapes::runPlanCacheCommand(OperationContext* txn,
+ const string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ // This is a read lock. The query cache is owned by the collection.
+ AutoGetCollectionForRead ctx(txn, ns);
+
+ PlanCache* planCache;
+ Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
+ if (!status.isOK()) {
+ // No collection - return results with empty shapes array.
BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
- for (vector<PlanCacheEntry*>::const_iterator i = solutions.begin(); i != solutions.end(); i++) {
- PlanCacheEntry* entry = *i;
- invariant(entry);
-
- BSONObjBuilder shapeBuilder(arrayBuilder.subobjStart());
- shapeBuilder.append("query", entry->query);
- shapeBuilder.append("sort", entry->sort);
- shapeBuilder.append("projection", entry->projection);
- shapeBuilder.doneFast();
-
- // Release resources for cached solution after extracting query shape.
- delete entry;
- }
arrayBuilder.doneFast();
-
return Status::OK();
}
+ return list(*planCache, bob);
+}
- PlanCacheClear::PlanCacheClear() : PlanCacheCommand("planCacheClear",
- "Drops one or all cached queries in a collection.",
- ActionType::planCacheWrite) { }
-
- Status PlanCacheClear::runPlanCacheCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- // This is a read lock. The query cache is owned by the collection.
- AutoGetCollectionForRead ctx(txn, ns);
+// static
+Status PlanCacheListQueryShapes::list(const PlanCache& planCache, BSONObjBuilder* bob) {
+ invariant(bob);
- PlanCache* planCache;
- Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
- if (!status.isOK()) {
- // No collection - nothing to do. Return OK status.
- return Status::OK();
- }
- return clear(txn, planCache, ns, cmdObj);
- }
+ // Fetch all cached solutions from plan cache.
+ vector<PlanCacheEntry*> solutions = planCache.getAllEntries();
- // static
- Status PlanCacheClear::clear(OperationContext* txn,
- PlanCache* planCache,
- const string& ns,
- const BSONObj& cmdObj) {
- invariant(planCache);
-
- // According to the specification, the planCacheClear command runs in two modes:
- // - clear all query shapes; or
- // - clear plans for single query shape when a query shape is described in the
- // command arguments.
- if (cmdObj.hasField("query")) {
- CanonicalQuery* cqRaw;
- Status status = PlanCacheCommand::canonicalize(txn, ns, cmdObj, &cqRaw);
- if (!status.isOK()) {
- return status;
- }
+ BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
+ for (vector<PlanCacheEntry*>::const_iterator i = solutions.begin(); i != solutions.end(); i++) {
+ PlanCacheEntry* entry = *i;
+ invariant(entry);
- unique_ptr<CanonicalQuery> cq(cqRaw);
+ BSONObjBuilder shapeBuilder(arrayBuilder.subobjStart());
+ shapeBuilder.append("query", entry->query);
+ shapeBuilder.append("sort", entry->sort);
+ shapeBuilder.append("projection", entry->projection);
+ shapeBuilder.doneFast();
- if (!planCache->contains(*cq)) {
- // Log if asked to clear non-existent query shape.
- LOG(1) << ns << ": query shape doesn't exist in PlanCache - "
- << cq->getQueryObj().toString()
- << "(sort: " << cq->getParsed().getSort()
- << "; projection: " << cq->getParsed().getProj() << ")";
- return Status::OK();
- }
+ // Release resources for cached solution after extracting query shape.
+ delete entry;
+ }
+ arrayBuilder.doneFast();
+
+ return Status::OK();
+}
+
+PlanCacheClear::PlanCacheClear()
+ : PlanCacheCommand("planCacheClear",
+ "Drops one or all cached queries in a collection.",
+ ActionType::planCacheWrite) {}
+
+Status PlanCacheClear::runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ // This is a read lock. The query cache is owned by the collection.
+ AutoGetCollectionForRead ctx(txn, ns);
+
+ PlanCache* planCache;
+ Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
+ if (!status.isOK()) {
+ // No collection - nothing to do. Return OK status.
+ return Status::OK();
+ }
+ return clear(txn, planCache, ns, cmdObj);
+}
+
+// static
+Status PlanCacheClear::clear(OperationContext* txn,
+ PlanCache* planCache,
+ const string& ns,
+ const BSONObj& cmdObj) {
+ invariant(planCache);
+
+ // According to the specification, the planCacheClear command runs in two modes:
+ // - clear all query shapes; or
+ // - clear plans for single query shape when a query shape is described in the
+ // command arguments.
+ if (cmdObj.hasField("query")) {
+ CanonicalQuery* cqRaw;
+ Status status = PlanCacheCommand::canonicalize(txn, ns, cmdObj, &cqRaw);
+ if (!status.isOK()) {
+ return status;
+ }
- Status result = planCache->remove(*cq);
- if (!result.isOK()) {
- return result;
- }
+ unique_ptr<CanonicalQuery> cq(cqRaw);
- LOG(1) << ns << ": removed plan cache entry - " << cq->getQueryObj().toString()
- << "(sort: " << cq->getParsed().getSort()
+ if (!planCache->contains(*cq)) {
+ // Log if asked to clear non-existent query shape.
+ LOG(1) << ns << ": query shape doesn't exist in PlanCache - "
+ << cq->getQueryObj().toString() << "(sort: " << cq->getParsed().getSort()
<< "; projection: " << cq->getParsed().getProj() << ")";
-
return Status::OK();
}
- // If query is not provided, make sure sort and projection are not in arguments.
- // We do not want to clear the entire cache inadvertently when the user
- // forgets to provide a value for "query".
- if (cmdObj.hasField("sort") || cmdObj.hasField("projection")) {
- return Status(ErrorCodes::BadValue, "sort or projection provided without query");
+ Status result = planCache->remove(*cq);
+ if (!result.isOK()) {
+ return result;
}
- planCache->clear();
-
- LOG(1) << ns << ": cleared plan cache";
+ LOG(1) << ns << ": removed plan cache entry - " << cq->getQueryObj().toString()
+ << "(sort: " << cq->getParsed().getSort()
+ << "; projection: " << cq->getParsed().getProj() << ")";
return Status::OK();
}
- PlanCacheListPlans::PlanCacheListPlans() : PlanCacheCommand("planCacheListPlans",
- "Displays the cached plans for a query shape.",
- ActionType::planCacheRead) { }
+ // If query is not provided, make sure sort and projection are not in arguments.
+ // We do not want to clear the entire cache inadvertently when the user
+ // forgets to provide a value for "query".
+ if (cmdObj.hasField("sort") || cmdObj.hasField("projection")) {
+ return Status(ErrorCodes::BadValue, "sort or projection provided without query");
+ }
- Status PlanCacheListPlans::runPlanCacheCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- AutoGetCollectionForRead ctx(txn, ns);
+ planCache->clear();
- PlanCache* planCache;
- Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
- if (!status.isOK()) {
- // No collection - return empty plans array.
- BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
- plansBuilder.doneFast();
- return Status::OK();
- }
- return list(txn, *planCache, ns, cmdObj, bob);
- }
+ LOG(1) << ns << ": cleared plan cache";
- // static
- Status PlanCacheListPlans::list(OperationContext* txn,
- const PlanCache& planCache,
- const std::string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- CanonicalQuery* cqRaw;
- Status status = canonicalize(txn, ns, cmdObj, &cqRaw);
- if (!status.isOK()) {
- return status;
- }
+ return Status::OK();
+}
- unique_ptr<CanonicalQuery> cq(cqRaw);
+PlanCacheListPlans::PlanCacheListPlans()
+ : PlanCacheCommand("planCacheListPlans",
+ "Displays the cached plans for a query shape.",
+ ActionType::planCacheRead) {}
- if (!planCache.contains(*cq)) {
- // Return empty plans in results if query shape does not
- // exist in plan cache.
- BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
- plansBuilder.doneFast();
- return Status::OK();
- }
+Status PlanCacheListPlans::runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ AutoGetCollectionForRead ctx(txn, ns);
- PlanCacheEntry* entryRaw;
- Status result = planCache.getEntry(*cq, &entryRaw);
- if (!result.isOK()) {
- return result;
- }
- unique_ptr<PlanCacheEntry> entry(entryRaw);
+ PlanCache* planCache;
+ Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
+ if (!status.isOK()) {
+ // No collection - return empty plans array.
+ BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
+ plansBuilder.doneFast();
+ return Status::OK();
+ }
+ return list(txn, *planCache, ns, cmdObj, bob);
+}
+
+// static
+Status PlanCacheListPlans::list(OperationContext* txn,
+ const PlanCache& planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ CanonicalQuery* cqRaw;
+ Status status = canonicalize(txn, ns, cmdObj, &cqRaw);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+ if (!planCache.contains(*cq)) {
+ // Return empty plans in results if query shape does not
+ // exist in plan cache.
BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
- size_t numPlans = entry->plannerData.size();
- invariant(numPlans == entry->decision->stats.size());
- invariant(numPlans == entry->decision->scores.size());
- for (size_t i = 0; i < numPlans; ++i) {
- BSONObjBuilder planBob(plansBuilder.subobjStart());
-
- // Create plan details field.
- // Currently, simple string representationg of
- // SolutionCacheData. Need to revisit format when we
- // need to parse user-provided plan details for planCacheAddPlan.
- SolutionCacheData* scd = entry->plannerData[i];
- BSONObjBuilder detailsBob(planBob.subobjStart("details"));
- detailsBob.append("solution", scd->toString());
- detailsBob.doneFast();
-
- // reason is comprised of score and initial stats provided by
- // multi plan runner.
- BSONObjBuilder reasonBob(planBob.subobjStart("reason"));
- reasonBob.append("score", entry->decision->scores[i]);
- BSONObjBuilder statsBob(reasonBob.subobjStart("stats"));
- PlanStageStats* stats = entry->decision->stats.vector()[i];
- if (stats) {
- Explain::statsToBSON(*stats, &statsBob);
- }
- statsBob.doneFast();
- reasonBob.doneFast();
-
- // BSON object for 'feedback' field shows scores from historical executions of the plan.
- BSONObjBuilder feedbackBob(planBob.subobjStart("feedback"));
- if (i == 0U) {
- feedbackBob.append("nfeedback", int(entry->feedback.size()));
- BSONArrayBuilder scoresBob(feedbackBob.subarrayStart("scores"));
- for (size_t i = 0; i < entry->feedback.size(); ++i) {
- BSONObjBuilder scoreBob(scoresBob.subobjStart());
- scoreBob.append("score", entry->feedback[i]->score);
- }
- scoresBob.doneFast();
- }
- feedbackBob.doneFast();
+ plansBuilder.doneFast();
+ return Status::OK();
+ }
- planBob.append("filterSet", scd->indexFilterApplied);
+ PlanCacheEntry* entryRaw;
+ Status result = planCache.getEntry(*cq, &entryRaw);
+ if (!result.isOK()) {
+ return result;
+ }
+ unique_ptr<PlanCacheEntry> entry(entryRaw);
+
+ BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
+ size_t numPlans = entry->plannerData.size();
+ invariant(numPlans == entry->decision->stats.size());
+ invariant(numPlans == entry->decision->scores.size());
+ for (size_t i = 0; i < numPlans; ++i) {
+ BSONObjBuilder planBob(plansBuilder.subobjStart());
+
+ // Create plan details field.
+ // Currently, simple string representationg of
+ // SolutionCacheData. Need to revisit format when we
+ // need to parse user-provided plan details for planCacheAddPlan.
+ SolutionCacheData* scd = entry->plannerData[i];
+ BSONObjBuilder detailsBob(planBob.subobjStart("details"));
+ detailsBob.append("solution", scd->toString());
+ detailsBob.doneFast();
+
+ // reason is comprised of score and initial stats provided by
+ // multi plan runner.
+ BSONObjBuilder reasonBob(planBob.subobjStart("reason"));
+ reasonBob.append("score", entry->decision->scores[i]);
+ BSONObjBuilder statsBob(reasonBob.subobjStart("stats"));
+ PlanStageStats* stats = entry->decision->stats.vector()[i];
+ if (stats) {
+ Explain::statsToBSON(*stats, &statsBob);
}
- plansBuilder.doneFast();
+ statsBob.doneFast();
+ reasonBob.doneFast();
+
+ // BSON object for 'feedback' field shows scores from historical executions of the plan.
+ BSONObjBuilder feedbackBob(planBob.subobjStart("feedback"));
+ if (i == 0U) {
+ feedbackBob.append("nfeedback", int(entry->feedback.size()));
+ BSONArrayBuilder scoresBob(feedbackBob.subarrayStart("scores"));
+ for (size_t i = 0; i < entry->feedback.size(); ++i) {
+ BSONObjBuilder scoreBob(scoresBob.subobjStart());
+ scoreBob.append("score", entry->feedback[i]->score);
+ }
+ scoresBob.doneFast();
+ }
+ feedbackBob.doneFast();
- return Status::OK();
+ planBob.append("filterSet", scd->indexFilterApplied);
}
+ plansBuilder.doneFast();
+
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/plan_cache_commands.h b/src/mongo/db/commands/plan_cache_commands.h
index 05b7c5969a8..3858704dbde 100644
--- a/src/mongo/db/commands/plan_cache_commands.h
+++ b/src/mongo/db/commands/plan_cache_commands.h
@@ -33,155 +33,154 @@
namespace mongo {
+/**
+ * DB commands for plan cache.
+ * These are in a header to facilitate unit testing. See plan_cache_commands_test.cpp.
+ */
+
+/**
+ * PlanCacheCommand
+ * Defines common attributes for all plan cache related commands
+ * such as slaveOk.
+ */
+class PlanCacheCommand : public Command {
+public:
+ PlanCacheCommand(const std::string& name, const std::string& helpText, ActionType actionType);
+
/**
- * DB commands for plan cache.
- * These are in a header to facilitate unit testing. See plan_cache_commands_test.cpp.
+ * Entry point from command subsystem.
+ * Implementation provides standardization of error handling
+ * such as adding error code and message to BSON result.
+ *
+ * Do not override in derived classes.
+ * Override runPlanCacheCommands instead to
+ * implement plan cache command functionality.
*/
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ virtual bool isWriteCommandForConfigServer() const;
+
+ virtual bool slaveOk() const;
+
+ virtual bool slaveOverrideOk() const;
+
+ virtual void help(std::stringstream& ss) const;
+
/**
- * PlanCacheCommand
- * Defines common attributes for all plan cache related commands
- * such as slaveOk.
+ * Two action types defined for plan cache commands:
+ * - planCacheRead
+ * - planCacheWrite
*/
- class PlanCacheCommand : public Command {
- public:
- PlanCacheCommand(const std::string& name, const std::string& helpText,
- ActionType actionType);
-
- /**
- * Entry point from command subsystem.
- * Implementation provides standardization of error handling
- * such as adding error code and message to BSON result.
- *
- * Do not override in derived classes.
- * Override runPlanCacheCommands instead to
- * implement plan cache command functionality.
- */
-
- bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result);
-
- virtual bool isWriteCommandForConfigServer() const;
-
- virtual bool slaveOk() const;
-
- virtual bool slaveOverrideOk() const;
-
- virtual void help(std::stringstream& ss) const;
-
- /**
- * Two action types defined for plan cache commands:
- * - planCacheRead
- * - planCacheWrite
- */
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
- /**
- * Subset of command arguments used by plan cache commands
- * Override to provide command functionality.
- * Should contain just enough logic to invoke run*Command() function
- * in plan_cache.h
- */
- virtual Status runPlanCacheCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) = 0;
-
- /**
- * Validatess query shape from command object and returns canonical query.
- */
- static Status canonicalize(OperationContext* txn,
- const std::string& ns,
- const BSONObj& cmdObj,
- CanonicalQuery** canonicalQueryOut);
-
- private:
- std::string helpText;
- ActionType actionType;
- };
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+ /**
+ * Subset of command arguments used by plan cache commands
+ * Override to provide command functionality.
+ * Should contain just enough logic to invoke run*Command() function
+ * in plan_cache.h
+ */
+ virtual Status runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) = 0;
/**
- * planCacheListQueryShapes
- *
- * { planCacheListQueryShapes: <collection> }
- *
+ * Validatess query shape from command object and returns canonical query.
*/
- class PlanCacheListQueryShapes : public PlanCacheCommand {
- public:
- PlanCacheListQueryShapes();
- virtual Status runPlanCacheCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Looks up cache keys for collection's plan cache.
- * Inserts keys for query into BSON builder.
- */
- static Status list(const PlanCache& planCache, BSONObjBuilder* bob);
- };
+ static Status canonicalize(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& cmdObj,
+ CanonicalQuery** canonicalQueryOut);
+
+private:
+ std::string helpText;
+ ActionType actionType;
+};
+
+/**
+ * planCacheListQueryShapes
+ *
+ * { planCacheListQueryShapes: <collection> }
+ *
+ */
+class PlanCacheListQueryShapes : public PlanCacheCommand {
+public:
+ PlanCacheListQueryShapes();
+ virtual Status runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
- * planCacheClear
- *
- * {
- * planCacheClear: <collection>,
- * query: <query>,
- * sort: <sort>,
- * projection: <projection>
- * }
- *
+ * Looks up cache keys for collection's plan cache.
+ * Inserts keys for query into BSON builder.
*/
- class PlanCacheClear : public PlanCacheCommand {
- public:
- PlanCacheClear();
- virtual Status runPlanCacheCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Clears collection's plan cache.
- * If query shape is provided, clears plans for that single query shape only.
- */
- static Status clear(OperationContext* txn,
- PlanCache* planCache,
- const std::string& ns,
- const BSONObj& cmdObj);
- };
+ static Status list(const PlanCache& planCache, BSONObjBuilder* bob);
+};
+
+/**
+ * planCacheClear
+ *
+ * {
+ * planCacheClear: <collection>,
+ * query: <query>,
+ * sort: <sort>,
+ * projection: <projection>
+ * }
+ *
+ */
+class PlanCacheClear : public PlanCacheCommand {
+public:
+ PlanCacheClear();
+ virtual Status runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
- * planCacheListPlans
- *
- * {
- * planCacheListPlans: <collection>,
- * query: <query>,
- * sort: <sort>,
- * projection: <projection>
- * }
- *
+ * Clears collection's plan cache.
+ * If query shape is provided, clears plans for that single query shape only.
+ */
+ static Status clear(OperationContext* txn,
+ PlanCache* planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj);
+};
+
+/**
+ * planCacheListPlans
+ *
+ * {
+ * planCacheListPlans: <collection>,
+ * query: <query>,
+ * sort: <sort>,
+ * projection: <projection>
+ * }
+ *
+ */
+class PlanCacheListPlans : public PlanCacheCommand {
+public:
+ PlanCacheListPlans();
+ virtual Status runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
+
+ /**
+ * Displays the cached plans for a query shape.
*/
- class PlanCacheListPlans : public PlanCacheCommand {
- public:
- PlanCacheListPlans();
- virtual Status runPlanCacheCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Displays the cached plans for a query shape.
- */
- static Status list(OperationContext* txn,
- const PlanCache& planCache,
- const std::string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob);
- };
+ static Status list(OperationContext* txn,
+ const PlanCache& planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* bob);
+};
} // namespace mongo
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 86eecdbda7e..8a7eee783d8 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -45,371 +45,374 @@ using namespace mongo;
namespace {
- using std::string;
- using std::unique_ptr;
- using std::vector;
-
- static const char* ns = "test.t";
-
- /**
- * Tests for planCacheListQueryShapes
- */
-
- /**
- * Utility function to get list of keys in the cache.
- */
- std::vector<BSONObj> getShapes(const PlanCache& planCache) {
- BSONObjBuilder bob;
- ASSERT_OK(PlanCacheListQueryShapes::list(planCache, &bob));
- BSONObj resultObj = bob.obj();
- BSONElement shapesElt = resultObj.getField("shapes");
- ASSERT_EQUALS(shapesElt.type(), mongo::Array);
- vector<BSONElement> shapesEltArray = shapesElt.Array();
- vector<BSONObj> shapes;
- for (vector<BSONElement>::const_iterator i = shapesEltArray.begin();
- i != shapesEltArray.end(); ++i) {
- const BSONElement& elt = *i;
-
- ASSERT_TRUE(elt.isABSONObj());
- BSONObj obj = elt.Obj();
-
- // Check required fields.
- // query
- BSONElement queryElt = obj.getField("query");
- ASSERT_TRUE(queryElt.isABSONObj());
-
- // sort
- BSONElement sortElt = obj.getField("sort");
- ASSERT_TRUE(sortElt.isABSONObj());
-
- // projection
- BSONElement projectionElt = obj.getField("projection");
- ASSERT_TRUE(projectionElt.isABSONObj());
-
- // All fields OK. Append to vector.
- shapes.push_back(obj.getOwned());
- }
- return shapes;
- }
+using std::string;
+using std::unique_ptr;
+using std::vector;
- /**
- * Utility function to create a SolutionCacheData
- */
- SolutionCacheData* createSolutionCacheData() {
- unique_ptr<SolutionCacheData> scd(new SolutionCacheData());
- scd->tree.reset(new PlanCacheIndexTree());
- return scd.release();
- }
+static const char* ns = "test.t";
- /**
- * Utility function to create a PlanRankingDecision
- */
- PlanRankingDecision* createDecision(size_t numPlans) {
- unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
- for (size_t i = 0; i < numPlans; ++i) {
- CommonStats common("COLLSCAN");
- unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
- stats->specific.reset(new CollectionScanStats());
- why->stats.mutableVector().push_back(stats.release());
- why->scores.push_back(0U);
- why->candidateOrder.push_back(i);
- }
- return why.release();
- }
+/**
+ * Tests for planCacheListQueryShapes
+ */
- TEST(PlanCacheCommandsTest, planCacheListQueryShapesEmpty) {
- PlanCache empty;
- vector<BSONObj> shapes = getShapes(empty);
- ASSERT_TRUE(shapes.empty());
- }
+/**
+ * Utility function to get list of keys in the cache.
+ */
+std::vector<BSONObj> getShapes(const PlanCache& planCache) {
+ BSONObjBuilder bob;
+ ASSERT_OK(PlanCacheListQueryShapes::list(planCache, &bob));
+ BSONObj resultObj = bob.obj();
+ BSONElement shapesElt = resultObj.getField("shapes");
+ ASSERT_EQUALS(shapesElt.type(), mongo::Array);
+ vector<BSONElement> shapesEltArray = shapesElt.Array();
+ vector<BSONObj> shapes;
+ for (vector<BSONElement>::const_iterator i = shapesEltArray.begin(); i != shapesEltArray.end();
+ ++i) {
+ const BSONElement& elt = *i;
- TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
- // Create a canonical query
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- // Plan cache with one entry
- PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
-
- vector<BSONObj> shapes = getShapes(planCache);
- ASSERT_EQUALS(shapes.size(), 1U);
- ASSERT_EQUALS(shapes[0].getObjectField("query"), cq->getQueryObj());
- ASSERT_EQUALS(shapes[0].getObjectField("sort"), cq->getParsed().getSort());
- ASSERT_EQUALS(shapes[0].getObjectField("projection"), cq->getParsed().getProj());
- }
+ ASSERT_TRUE(elt.isABSONObj());
+ BSONObj obj = elt.Obj();
- /**
- * Tests for planCacheClear
- */
-
- TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
- // Create a canonical query
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- // Plan cache with one entry
- PlanCache planCache;
- QuerySolution qs;
- OperationContextNoop txn;
-
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
- ASSERT_EQUALS(getShapes(planCache).size(), 1U);
-
- // Clear cache and confirm number of keys afterwards.
- ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, ns, BSONObj()));
- ASSERT_EQUALS(getShapes(planCache).size(), 0U);
- }
+ // Check required fields.
+ // query
+ BSONElement queryElt = obj.getField("query");
+ ASSERT_TRUE(queryElt.isABSONObj());
- /**
- * Tests for PlanCacheCommand::makeCacheKey
- * Mostly validation on the input parameters
- */
-
- TEST(PlanCacheCommandsTest, Canonicalize) {
- // Invalid parameters
- PlanCache planCache;
- CanonicalQuery* cqRaw;
- OperationContextNoop txn;
-
- // Missing query field
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{}"), &cqRaw));
- // Query needs to be an object
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: 1}"), &cqRaw));
- // Sort needs to be an object
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {}, sort: 1}"),
- &cqRaw));
- // Bad query (invalid sort order)
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {}, sort: {a: 0}}"),
- &cqRaw));
-
- // Valid parameters
- ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {a: 1, b: 1}}"), &cqRaw));
- unique_ptr<CanonicalQuery> query(cqRaw);
-
-
- // Equivalent query should generate same key.
- ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {b: 1, a: 1}}"), &cqRaw));
- unique_ptr<CanonicalQuery> equivQuery(cqRaw);
- ASSERT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*equivQuery));
-
- // Sort query should generate different key from unsorted query.
- ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns,
- fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"), &cqRaw));
- unique_ptr<CanonicalQuery> sortQuery1(cqRaw);
- ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*sortQuery1));
-
- // Confirm sort arguments are properly delimited (SERVER-17158)
- ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns,
- fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"), &cqRaw));
- unique_ptr<CanonicalQuery> sortQuery2(cqRaw);
- ASSERT_NOT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery2));
-
- // Changing order and/or value of predicates should not change key
- ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns,
- fromjson("{query: {b: 3, a: 3}, sort: {a: 1, b: 1}}"), &cqRaw));
- unique_ptr<CanonicalQuery> sortQuery3(cqRaw);
- ASSERT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery3));
-
- // Projected query should generate different key from unprojected query.
- ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns,
- fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"), &cqRaw));
- unique_ptr<CanonicalQuery> projectionQuery(cqRaw);
- ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*projectionQuery));
- }
+ // sort
+ BSONElement sortElt = obj.getField("sort");
+ ASSERT_TRUE(sortElt.isABSONObj());
+
+ // projection
+ BSONElement projectionElt = obj.getField("projection");
+ ASSERT_TRUE(projectionElt.isABSONObj());
- /**
- * Tests for planCacheClear (single query shape)
- */
-
- TEST(PlanCacheCommandsTest, planCacheClearInvalidParameter) {
- PlanCache planCache;
- OperationContextNoop txn;
-
- // Query field type must be BSON object.
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: 12345}")));
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: /keyisnotregex/}")));
- // Query must pass canonicalization.
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns,
- fromjson("{query: {a: {$no_such_op: 1}}}")));
- // Sort present without query is an error.
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{sort: {a: 1}}")));
- // Projection present without query is an error.
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns,
- fromjson("{projection: {_id: 0, a: 1}}")));
+ // All fields OK. Append to vector.
+ shapes.push_back(obj.getOwned());
}
+ return shapes;
+}
- TEST(PlanCacheCommandsTest, planCacheClearUnknownKey) {
- PlanCache planCache;
- OperationContextNoop txn;
+/**
+ * Utility function to create a SolutionCacheData
+ */
+SolutionCacheData* createSolutionCacheData() {
+ unique_ptr<SolutionCacheData> scd(new SolutionCacheData());
+ scd->tree.reset(new PlanCacheIndexTree());
+ return scd.release();
+}
- ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: {a: 1}}")));
+/**
+ * Utility function to create a PlanRankingDecision
+ */
+PlanRankingDecision* createDecision(size_t numPlans) {
+ unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
+ for (size_t i = 0; i < numPlans; ++i) {
+ CommonStats common("COLLSCAN");
+ unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
+ stats->specific.reset(new CollectionScanStats());
+ why->stats.mutableVector().push_back(stats.release());
+ why->scores.push_back(0U);
+ why->candidateOrder.push_back(i);
}
+ return why.release();
+}
+
+TEST(PlanCacheCommandsTest, planCacheListQueryShapesEmpty) {
+ PlanCache empty;
+ vector<BSONObj> shapes = getShapes(empty);
+ ASSERT_TRUE(shapes.empty());
+}
+
+TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
+ // Create a canonical query
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ // Plan cache with one entry
+ PlanCache planCache;
+ QuerySolution qs;
+ qs.cacheData.reset(createSolutionCacheData());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ planCache.add(*cq, solns, createDecision(1U));
+
+ vector<BSONObj> shapes = getShapes(planCache);
+ ASSERT_EQUALS(shapes.size(), 1U);
+ ASSERT_EQUALS(shapes[0].getObjectField("query"), cq->getQueryObj());
+ ASSERT_EQUALS(shapes[0].getObjectField("sort"), cq->getParsed().getSort());
+ ASSERT_EQUALS(shapes[0].getObjectField("projection"), cq->getParsed().getProj());
+}
- TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
- // Create 2 canonical queries.
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- unique_ptr<CanonicalQuery> cqA(cqRaw);
- ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{b: 1}"), &cqRaw));
- unique_ptr<CanonicalQuery> cqB(cqRaw);
-
- // Create plan cache with 2 entries.
- PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- planCache.add(*cqA, solns, createDecision(1U));
- planCache.add(*cqB, solns, createDecision(1U));
-
- // Check keys in cache before dropping {b: 1}
- vector<BSONObj> shapesBefore = getShapes(planCache);
- ASSERT_EQUALS(shapesBefore.size(), 2U);
- BSONObj shapeA = BSON("query" << cqA->getQueryObj() << "sort" << cqA->getParsed().getSort()
- << "projection" << cqA->getParsed().getProj());
- BSONObj shapeB = BSON("query" << cqB->getQueryObj() << "sort" << cqB->getParsed().getSort()
- << "projection" << cqB->getParsed().getProj());
- ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeA) != shapesBefore.end());
- ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeB) != shapesBefore.end());
-
- // Drop {b: 1} from cache. Make sure {a: 1} is still in cache afterwards.
- BSONObjBuilder bob;
- OperationContextNoop txn;
-
- ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, ns, BSON("query" << cqB->getQueryObj())));
- vector<BSONObj> shapesAfter = getShapes(planCache);
- ASSERT_EQUALS(shapesAfter.size(), 1U);
- ASSERT_EQUALS(shapesAfter[0], shapeA);
- }
+/**
+ * Tests for planCacheClear
+ */
- /**
- * Tests for planCacheListPlans
- */
-
- /**
- * Function to extract plan ID from BSON element.
- * Validates planID during extraction.
- * Each BSON element contains an embedded BSON object with the following layout:
- * {
- * plan: <plan_id>,
- * details: <plan_details>,
- * reason: <ranking_stats>,
- * feedback: <execution_stats>,
- * source: <source>
- * }
- * Compilation note: GCC 4.4 has issues with getPlan() declared as a function object.
- */
- BSONObj getPlan(const BSONElement& elt) {
- ASSERT_TRUE(elt.isABSONObj());
- BSONObj obj = elt.Obj();
+TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
+ // Create a canonical query
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
+ unique_ptr<CanonicalQuery> cq(cqRaw);
- // Check required fields.
- // details
- BSONElement detailsElt = obj.getField("details");
- ASSERT_TRUE(detailsElt.isABSONObj());
+ // Plan cache with one entry
+ PlanCache planCache;
+ QuerySolution qs;
+ OperationContextNoop txn;
- // reason
- BSONElement reasonElt = obj.getField("reason");
- ASSERT_TRUE(reasonElt.isABSONObj());
+ qs.cacheData.reset(createSolutionCacheData());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ planCache.add(*cq, solns, createDecision(1U));
+ ASSERT_EQUALS(getShapes(planCache).size(), 1U);
- // feedback
- BSONElement feedbackElt = obj.getField("feedback");
- ASSERT_TRUE(feedbackElt.isABSONObj());
+ // Clear cache and confirm number of keys afterwards.
+ ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, ns, BSONObj()));
+ ASSERT_EQUALS(getShapes(planCache).size(), 0U);
+}
- return obj.getOwned();
- }
+/**
+ * Tests for PlanCacheCommand::makeCacheKey
+ * Mostly validation on the input parameters
+ */
- /**
- * Utility function to get list of plan IDs for a query in the cache.
- */
- vector<BSONObj> getPlans(const PlanCache& planCache, const BSONObj& query,
- const BSONObj& sort, const BSONObj& projection) {
- OperationContextNoop txn;
-
- BSONObjBuilder bob;
- BSONObj cmdObj = BSON("query" << query << "sort" << sort << "projection" << projection);
- ASSERT_OK(PlanCacheListPlans::list(&txn, planCache, ns, cmdObj, &bob));
- BSONObj resultObj = bob.obj();
- BSONElement plansElt = resultObj.getField("plans");
- ASSERT_EQUALS(plansElt.type(), mongo::Array);
- vector<BSONElement> planEltArray = plansElt.Array();
- ASSERT_FALSE(planEltArray.empty());
- vector<BSONObj> plans(planEltArray.size());
- std::transform(planEltArray.begin(), planEltArray.end(), plans.begin(), getPlan);
- return plans;
- }
+TEST(PlanCacheCommandsTest, Canonicalize) {
+ // Invalid parameters
+ PlanCache planCache;
+ CanonicalQuery* cqRaw;
+ OperationContextNoop txn;
+
+ // Missing query field
+ ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{}"), &cqRaw));
+ // Query needs to be an object
+ ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: 1}"), &cqRaw));
+ // Sort needs to be an object
+ ASSERT_NOT_OK(
+ PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {}, sort: 1}"), &cqRaw));
+ // Bad query (invalid sort order)
+ ASSERT_NOT_OK(
+ PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {}, sort: {a: 0}}"), &cqRaw));
+
+ // Valid parameters
+ ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {a: 1, b: 1}}"), &cqRaw));
+ unique_ptr<CanonicalQuery> query(cqRaw);
+
+
+ // Equivalent query should generate same key.
+ ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {b: 1, a: 1}}"), &cqRaw));
+ unique_ptr<CanonicalQuery> equivQuery(cqRaw);
+ ASSERT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*equivQuery));
+
+ // Sort query should generate different key from unsorted query.
+ ASSERT_OK(PlanCacheCommand::canonicalize(
+ &txn, ns, fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"), &cqRaw));
+ unique_ptr<CanonicalQuery> sortQuery1(cqRaw);
+ ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*sortQuery1));
+
+ // Confirm sort arguments are properly delimited (SERVER-17158)
+ ASSERT_OK(PlanCacheCommand::canonicalize(
+ &txn, ns, fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"), &cqRaw));
+ unique_ptr<CanonicalQuery> sortQuery2(cqRaw);
+ ASSERT_NOT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery2));
+
+ // Changing order and/or value of predicates should not change key
+ ASSERT_OK(PlanCacheCommand::canonicalize(
+ &txn, ns, fromjson("{query: {b: 3, a: 3}, sort: {a: 1, b: 1}}"), &cqRaw));
+ unique_ptr<CanonicalQuery> sortQuery3(cqRaw);
+ ASSERT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery3));
+
+ // Projected query should generate different key from unprojected query.
+ ASSERT_OK(PlanCacheCommand::canonicalize(
+ &txn, ns, fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"), &cqRaw));
+ unique_ptr<CanonicalQuery> projectionQuery(cqRaw);
+ ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*projectionQuery));
+}
- TEST(PlanCacheCommandsTest, planCacheListPlansInvalidParameter) {
- PlanCache planCache;
- BSONObjBuilder ignored;
- OperationContextNoop txn;
-
- // Missing query field is not ok.
- ASSERT_NOT_OK(PlanCacheListPlans::list(&txn, planCache, ns, BSONObj(), &ignored));
- // Query field type must be BSON object.
- ASSERT_NOT_OK(PlanCacheListPlans::list(&txn, planCache, ns, fromjson("{query: 12345}"),
- &ignored));
- ASSERT_NOT_OK(PlanCacheListPlans::list(&txn, planCache, ns, fromjson("{query: /keyisnotregex/}"),
- &ignored));
- }
+/**
+ * Tests for planCacheClear (single query shape)
+ */
- TEST(PlanCacheCommandsTest, planCacheListPlansUnknownKey) {
- // Leave the plan cache empty.
- PlanCache planCache;
- OperationContextNoop txn;
+TEST(PlanCacheCommandsTest, planCacheClearInvalidParameter) {
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ // Query field type must be BSON object.
+ ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: 12345}")));
+ ASSERT_NOT_OK(
+ PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: /keyisnotregex/}")));
+ // Query must pass canonicalization.
+ ASSERT_NOT_OK(
+ PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: {a: {$no_such_op: 1}}}")));
+ // Sort present without query is an error.
+ ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{sort: {a: 1}}")));
+ // Projection present without query is an error.
+ ASSERT_NOT_OK(
+ PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{projection: {_id: 0, a: 1}}")));
+}
+
+TEST(PlanCacheCommandsTest, planCacheClearUnknownKey) {
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: {a: 1}}")));
+}
+
+TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
+ // Create 2 canonical queries.
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
+ unique_ptr<CanonicalQuery> cqA(cqRaw);
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{b: 1}"), &cqRaw));
+ unique_ptr<CanonicalQuery> cqB(cqRaw);
+
+ // Create plan cache with 2 entries.
+ PlanCache planCache;
+ QuerySolution qs;
+ qs.cacheData.reset(createSolutionCacheData());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ planCache.add(*cqA, solns, createDecision(1U));
+ planCache.add(*cqB, solns, createDecision(1U));
+
+ // Check keys in cache before dropping {b: 1}
+ vector<BSONObj> shapesBefore = getShapes(planCache);
+ ASSERT_EQUALS(shapesBefore.size(), 2U);
+ BSONObj shapeA = BSON("query" << cqA->getQueryObj() << "sort" << cqA->getParsed().getSort()
+ << "projection" << cqA->getParsed().getProj());
+ BSONObj shapeB = BSON("query" << cqB->getQueryObj() << "sort" << cqB->getParsed().getSort()
+ << "projection" << cqB->getParsed().getProj());
+ ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeA) != shapesBefore.end());
+ ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeB) != shapesBefore.end());
+
+ // Drop {b: 1} from cache. Make sure {a: 1} is still in cache afterwards.
+ BSONObjBuilder bob;
+ OperationContextNoop txn;
+
+ ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, ns, BSON("query" << cqB->getQueryObj())));
+ vector<BSONObj> shapesAfter = getShapes(planCache);
+ ASSERT_EQUALS(shapesAfter.size(), 1U);
+ ASSERT_EQUALS(shapesAfter[0], shapeA);
+}
- BSONObjBuilder ignored;
- ASSERT_OK(PlanCacheListPlans::list(&txn, planCache, ns, fromjson("{query: {a: 1}}"), &ignored));
- }
+/**
+ * Tests for planCacheListPlans
+ */
- TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
- // Create a canonical query
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- // Plan cache with one entry
- PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
-
- vector<BSONObj> plans = getPlans(planCache, cq->getQueryObj(),
- cq->getParsed().getSort(), cq->getParsed().getProj());
- ASSERT_EQUALS(plans.size(), 1U);
- }
+/**
+ * Function to extract plan ID from BSON element.
+ * Validates planID during extraction.
+ * Each BSON element contains an embedded BSON object with the following layout:
+ * {
+ * plan: <plan_id>,
+ * details: <plan_details>,
+ * reason: <ranking_stats>,
+ * feedback: <execution_stats>,
+ * source: <source>
+ * }
+ * Compilation note: GCC 4.4 has issues with getPlan() declared as a function object.
+ */
+BSONObj getPlan(const BSONElement& elt) {
+ ASSERT_TRUE(elt.isABSONObj());
+ BSONObj obj = elt.Obj();
- TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
- // Create a canonical query
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- // Plan cache with one entry
- PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- // Add cache entry with 2 solutions.
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(2U));
-
- vector<BSONObj> plans = getPlans(planCache, cq->getQueryObj(),
- cq->getParsed().getSort(), cq->getParsed().getProj());
- ASSERT_EQUALS(plans.size(), 2U);
- }
+ // Check required fields.
+ // details
+ BSONElement detailsElt = obj.getField("details");
+ ASSERT_TRUE(detailsElt.isABSONObj());
+
+ // reason
+ BSONElement reasonElt = obj.getField("reason");
+ ASSERT_TRUE(reasonElt.isABSONObj());
+
+ // feedback
+ BSONElement feedbackElt = obj.getField("feedback");
+ ASSERT_TRUE(feedbackElt.isABSONObj());
+
+ return obj.getOwned();
+}
+
+/**
+ * Utility function to get list of plan IDs for a query in the cache.
+ */
+vector<BSONObj> getPlans(const PlanCache& planCache,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& projection) {
+ OperationContextNoop txn;
+
+ BSONObjBuilder bob;
+ BSONObj cmdObj = BSON("query" << query << "sort" << sort << "projection" << projection);
+ ASSERT_OK(PlanCacheListPlans::list(&txn, planCache, ns, cmdObj, &bob));
+ BSONObj resultObj = bob.obj();
+ BSONElement plansElt = resultObj.getField("plans");
+ ASSERT_EQUALS(plansElt.type(), mongo::Array);
+ vector<BSONElement> planEltArray = plansElt.Array();
+ ASSERT_FALSE(planEltArray.empty());
+ vector<BSONObj> plans(planEltArray.size());
+ std::transform(planEltArray.begin(), planEltArray.end(), plans.begin(), getPlan);
+ return plans;
+}
+
+TEST(PlanCacheCommandsTest, planCacheListPlansInvalidParameter) {
+ PlanCache planCache;
+ BSONObjBuilder ignored;
+ OperationContextNoop txn;
+
+ // Missing query field is not ok.
+ ASSERT_NOT_OK(PlanCacheListPlans::list(&txn, planCache, ns, BSONObj(), &ignored));
+ // Query field type must be BSON object.
+ ASSERT_NOT_OK(
+ PlanCacheListPlans::list(&txn, planCache, ns, fromjson("{query: 12345}"), &ignored));
+ ASSERT_NOT_OK(PlanCacheListPlans::list(
+ &txn, planCache, ns, fromjson("{query: /keyisnotregex/}"), &ignored));
+}
+
+TEST(PlanCacheCommandsTest, planCacheListPlansUnknownKey) {
+ // Leave the plan cache empty.
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ BSONObjBuilder ignored;
+ ASSERT_OK(PlanCacheListPlans::list(&txn, planCache, ns, fromjson("{query: {a: 1}}"), &ignored));
+}
+
+TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
+ // Create a canonical query
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ // Plan cache with one entry
+ PlanCache planCache;
+ QuerySolution qs;
+ qs.cacheData.reset(createSolutionCacheData());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ planCache.add(*cq, solns, createDecision(1U));
+
+ vector<BSONObj> plans = getPlans(
+ planCache, cq->getQueryObj(), cq->getParsed().getSort(), cq->getParsed().getProj());
+ ASSERT_EQUALS(plans.size(), 1U);
+}
+
+TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
+ // Create a canonical query
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ // Plan cache with one entry
+ PlanCache planCache;
+ QuerySolution qs;
+ qs.cacheData.reset(createSolutionCacheData());
+ // Add cache entry with 2 solutions.
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ solns.push_back(&qs);
+ planCache.add(*cq, solns, createDecision(2U));
+
+ vector<BSONObj> plans = getPlans(
+ planCache, cq->getQueryObj(), cq->getParsed().getSort(), cq->getParsed().getProj());
+ ASSERT_EQUALS(plans.size(), 2U);
+}
} // namespace
diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp
index 5479ee92a40..68c47676527 100644
--- a/src/mongo/db/commands/rename_collection.cpp
+++ b/src/mongo/db/commands/rename_collection.cpp
@@ -52,98 +52,99 @@
namespace mongo {
- using std::min;
- using std::string;
- using std::stringstream;
+using std::min;
+using std::string;
+using std::stringstream;
- class CmdRenameCollection : public Command {
- public:
- CmdRenameCollection() : Command( "renameCollection" ) {}
- virtual bool adminOnly() const {
- return true;
+class CmdRenameCollection : public Command {
+public:
+ CmdRenameCollection() : Command("renameCollection") {}
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return rename_collection::checkAuthForRenameCollectionCommand(client, dbname, cmdObj);
+ }
+ virtual void help(stringstream& help) const {
+ help << " example: { renameCollection: foo.a, to: bar.b }";
+ }
+
+ static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
+ WriteUnitOfWork wunit(txn);
+ if (db->dropCollection(txn, collName).isOK()) {
+ // ignoring failure case
+ wunit.commit();
}
- virtual bool slaveOk() const {
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string source = cmdObj.getStringField(name.c_str());
+ string target = cmdObj.getStringField("to");
+
+ if (!NamespaceString::validCollectionComponent(target.c_str())) {
+ errmsg = "invalid collection name: " + target;
return false;
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return rename_collection::checkAuthForRenameCollectionCommand(client, dbname, cmdObj);
- }
- virtual void help( stringstream &help ) const {
- help << " example: { renameCollection: foo.a, to: bar.b }";
- }
-
- static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
- WriteUnitOfWork wunit(txn);
- if (db->dropCollection(txn, collName).isOK()) {
- // ignoring failure case
- wunit.commit();
- }
+ if (source.empty() || target.empty()) {
+ errmsg = "invalid command syntax";
+ return false;
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- string source = cmdObj.getStringField( name.c_str() );
- string target = cmdObj.getStringField( "to" );
-
- if ( !NamespaceString::validCollectionComponent(target.c_str()) ) {
- errmsg = "invalid collection name: " + target;
+ if ((repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
+ repl::ReplicationCoordinator::modeNone)) {
+ if (NamespaceString(source).isOplog()) {
+ errmsg = "can't rename live oplog while replicating";
return false;
}
- if ( source.empty() || target.empty() ) {
- errmsg = "invalid command syntax";
+ if (NamespaceString(target).isOplog()) {
+ errmsg = "can't rename to live oplog while replicating";
return false;
}
+ }
- if ((repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
- repl::ReplicationCoordinator::modeNone)) {
- if (NamespaceString(source).isOplog()) {
- errmsg = "can't rename live oplog while replicating";
- return false;
- }
- if (NamespaceString(target).isOplog()) {
- errmsg = "can't rename to live oplog while replicating";
- return false;
- }
- }
-
- if (NamespaceString::oplog(source) != NamespaceString::oplog(target)) {
- errmsg =
- "If either the source or target of a rename is an oplog name, both must be";
- return false;
- }
-
- Status sourceStatus = userAllowedWriteNS(source);
- if (!sourceStatus.isOK()) {
- errmsg = "error with source namespace: " + sourceStatus.reason();
- return false;
- }
+ if (NamespaceString::oplog(source) != NamespaceString::oplog(target)) {
+ errmsg = "If either the source or target of a rename is an oplog name, both must be";
+ return false;
+ }
- Status targetStatus = userAllowedWriteNS(target);
- if (!targetStatus.isOK()) {
- errmsg = "error with target namespace: " + targetStatus.reason();
- return false;
- }
+ Status sourceStatus = userAllowedWriteNS(source);
+ if (!sourceStatus.isOK()) {
+ errmsg = "error with source namespace: " + sourceStatus.reason();
+ return false;
+ }
- if (NamespaceString(source).coll() == "system.indexes"
- || NamespaceString(target).coll() == "system.indexes") {
- errmsg = "renaming system.indexes is not allowed";
- return false;
- }
+ Status targetStatus = userAllowedWriteNS(target);
+ if (!targetStatus.isOK()) {
+ errmsg = "error with target namespace: " + targetStatus.reason();
+ return false;
+ }
- return appendCommandStatus(result,
- renameCollection(txn,
- NamespaceString(source),
- NamespaceString(target),
- cmdObj["dropTarget"].trueValue(),
- cmdObj["stayTemp"].trueValue()));
+ if (NamespaceString(source).coll() == "system.indexes" ||
+ NamespaceString(target).coll() == "system.indexes") {
+ errmsg = "renaming system.indexes is not allowed";
+ return false;
}
- } cmdrenamecollection;
-} // namespace mongo
+ return appendCommandStatus(result,
+ renameCollection(txn,
+ NamespaceString(source),
+ NamespaceString(target),
+ cmdObj["dropTarget"].trueValue(),
+ cmdObj["stayTemp"].trueValue()));
+ }
+} cmdrenamecollection;
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/rename_collection.h b/src/mongo/db/commands/rename_collection.h
index f8651bccd4c..a7e3c6beed4 100644
--- a/src/mongo/db/commands/rename_collection.h
+++ b/src/mongo/db/commands/rename_collection.h
@@ -36,15 +36,13 @@
namespace mongo {
- class ClientBasic;
+class ClientBasic;
namespace rename_collection {
- Status checkAuthForRenameCollectionCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
-
-} // namespace rename_collection
-} // namespace mongo
-
+Status checkAuthForRenameCollectionCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+} // namespace rename_collection
+} // namespace mongo
diff --git a/src/mongo/db/commands/rename_collection_common.cpp b/src/mongo/db/commands/rename_collection_common.cpp
index feec6f4f135..d9818962cc3 100644
--- a/src/mongo/db/commands/rename_collection_common.cpp
+++ b/src/mongo/db/commands/rename_collection_common.cpp
@@ -42,63 +42,61 @@
namespace mongo {
namespace rename_collection {
- Status checkAuthForRenameCollectionCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- NamespaceString sourceNS = NamespaceString(cmdObj.getStringField("renameCollection"));
- NamespaceString targetNS = NamespaceString(cmdObj.getStringField("to"));
- bool dropTarget = cmdObj["dropTarget"].trueValue();
+Status checkAuthForRenameCollectionCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ NamespaceString sourceNS = NamespaceString(cmdObj.getStringField("renameCollection"));
+ NamespaceString targetNS = NamespaceString(cmdObj.getStringField("to"));
+ bool dropTarget = cmdObj["dropTarget"].trueValue();
- if (sourceNS.db() == targetNS.db() && !sourceNS.isSystem() && !targetNS.isSystem()) {
- // If renaming within the same database, then if you have renameCollectionSameDB and
- // either can read both of source and dest collections or *can't* read either of source
- // or dest collection, then you get can do the rename, even without insert on the
- // destination collection.
- bool canRename = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(sourceNS.db()),
- ActionType::renameCollectionSameDB);
+ if (sourceNS.db() == targetNS.db() && !sourceNS.isSystem() && !targetNS.isSystem()) {
+ // If renaming within the same database, then if you have renameCollectionSameDB and
+ // either can read both of source and dest collections or *can't* read either of source
+ // or dest collection, then you get can do the rename, even without insert on the
+ // destination collection.
+ bool canRename = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(sourceNS.db()), ActionType::renameCollectionSameDB);
- bool canDropTargetIfNeeded = true;
- if (dropTarget) {
- canDropTargetIfNeeded =
- AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(targetNS),
- ActionType::dropCollection);
- }
-
- bool canReadSrc = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(sourceNS), ActionType::find);
- bool canReadDest = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(targetNS), ActionType::find);
-
- if (canRename && canDropTargetIfNeeded && (canReadSrc || !canReadDest)) {
- return Status::OK();
- }
+ bool canDropTargetIfNeeded = true;
+ if (dropTarget) {
+ canDropTargetIfNeeded =
+ AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(targetNS), ActionType::dropCollection);
}
- // Check privileges on source collection
- ActionSet actions;
- actions.addAction(ActionType::find);
- actions.addAction(ActionType::dropCollection);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(sourceNS), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
+ bool canReadSrc = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(sourceNS), ActionType::find);
+ bool canReadDest = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(targetNS), ActionType::find);
- // Check privileges on dest collection
- actions.removeAllActions();
- actions.addAction(ActionType::insert);
- actions.addAction(ActionType::createIndex);
- if (dropTarget) {
- actions.addAction(ActionType::dropCollection);
- }
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(targetNS), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ if (canRename && canDropTargetIfNeeded && (canReadSrc || !canReadDest)) {
+ return Status::OK();
}
+ }
+
+ // Check privileges on source collection
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ actions.addAction(ActionType::dropCollection);
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(sourceNS), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
- return Status::OK();
+ // Check privileges on dest collection
+ actions.removeAllActions();
+ actions.addAction(ActionType::insert);
+ actions.addAction(ActionType::createIndex);
+ if (dropTarget) {
+ actions.addAction(ActionType::dropCollection);
+ }
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(targetNS), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
-} // namespace rename_collection
-} // namespace mongo
+ return Status::OK();
+}
+
+} // namespace rename_collection
+} // namespace mongo
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index 0598b67b9c3..5cf096fc511 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -42,83 +42,77 @@
namespace mongo {
- using std::string;
-
- class RepairCursorCmd : public Command {
- public:
- RepairCursorCmd() : Command("repairCursor") {}
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- Privilege p(parseResourcePattern(dbname, cmdObj), actions);
- if (AuthorizationSession::get(client)->isAuthorizedForPrivilege(p))
- return Status::OK();
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
+using std::string;
+
+class RepairCursorCmd : public Command {
+public:
+ RepairCursorCmd() : Command("repairCursor") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ Privilege p(parseResourcePattern(dbname, cmdObj), actions);
+ if (AuthorizationSession::get(client)->isAuthorizedForPrivilege(p))
+ return Status::OK();
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ NamespaceString ns(parseNs(dbname, cmdObj));
+
+ AutoGetCollectionForRead ctx(txn, ns.ns());
+
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::NamespaceNotFound, "ns does not exist: " + ns.ns()));
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
-
- NamespaceString ns(parseNs(dbname, cmdObj));
-
- AutoGetCollectionForRead ctx(txn, ns.ns());
-
- Collection* collection = ctx.getCollection();
- if (!collection) {
- return appendCommandStatus(result,
- Status(ErrorCodes::NamespaceNotFound,
- "ns does not exist: " + ns.ns()));
- }
-
- auto cursor = collection->getRecordStore()->getCursorForRepair(txn);
- if (!cursor) {
- return appendCommandStatus(result,
- Status(ErrorCodes::CommandNotSupported,
- "repair iterator not supported"));
- }
-
- std::unique_ptr<WorkingSet> ws(new WorkingSet());
- std::unique_ptr<MultiIteratorStage> stage(new MultiIteratorStage(txn, ws.get(),
- collection));
- stage->addIterator(std::move(cursor));
-
- PlanExecutor* rawExec;
- Status execStatus = PlanExecutor::make(txn,
- ws.release(),
- stage.release(),
- collection,
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- invariant(execStatus.isOK());
- std::unique_ptr<PlanExecutor> exec(rawExec);
-
- // 'exec' will be used in getMore(). It was automatically registered on construction
- // due to the auto yield policy, so it could yield during plan selection. We deregister
- // it now so that it can be registed with ClientCursor.
- exec->deregisterExec();
- exec->saveState();
-
- // ClientCursors' constructor inserts them into a global map that manages their
- // lifetimes. That is why the next line isn't leaky.
- ClientCursor* cc = new ClientCursor(collection->getCursorManager(),
- exec.release(),
- ns.ns());
-
- appendCursorResponseObject(cc->cursorid(), ns.ns(), BSONArray(), &result);
-
- return true;
-
+ auto cursor = collection->getRecordStore()->getCursorForRepair(txn);
+ if (!cursor) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::CommandNotSupported, "repair iterator not supported"));
}
- } repairCursorCmd;
+ std::unique_ptr<WorkingSet> ws(new WorkingSet());
+ std::unique_ptr<MultiIteratorStage> stage(
+ new MultiIteratorStage(txn, ws.get(), collection));
+ stage->addIterator(std::move(cursor));
+
+ PlanExecutor* rawExec;
+ Status execStatus = PlanExecutor::make(
+ txn, ws.release(), stage.release(), collection, PlanExecutor::YIELD_AUTO, &rawExec);
+ invariant(execStatus.isOK());
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+
+ // 'exec' will be used in getMore(). It was automatically registered on construction
+ // due to the auto yield policy, so it could yield during plan selection. We deregister
+ // it now so that it can be registed with ClientCursor.
+ exec->deregisterExec();
+ exec->saveState();
+
+ // ClientCursors' constructor inserts them into a global map that manages their
+ // lifetimes. That is why the next line isn't leaky.
+ ClientCursor* cc =
+ new ClientCursor(collection->getCursorManager(), exec.release(), ns.ns());
+
+ appendCursorResponseObject(cc->cursorid(), ns.ns(), BSONArray(), &result);
+
+ return true;
+ }
+} repairCursorCmd;
}
diff --git a/src/mongo/db/commands/server_status.cpp b/src/mongo/db/commands/server_status.cpp
index d2bf917f6c4..d7b3324efe6 100644
--- a/src/mongo/db/commands/server_status.cpp
+++ b/src/mongo/db/commands/server_status.cpp
@@ -54,273 +54,269 @@
namespace mongo {
- using std::endl;
- using std::map;
- using std::string;
- using std::stringstream;
-
- class CmdServerStatus : public Command {
- public:
-
- CmdServerStatus()
- : Command("serverStatus", true),
- _started( curTimeMillis64() ),
- _runCalled( false ) {
- }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
+using std::endl;
+using std::map;
+using std::string;
+using std::stringstream;
+
+class CmdServerStatus : public Command {
+public:
+ CmdServerStatus()
+ : Command("serverStatus", true), _started(curTimeMillis64()), _runCalled(false) {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual void help( stringstream& help ) const {
- help << "returns lots of administrative server statistics";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::serverStatus);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- _runCalled = true;
-
- long long start = Listener::getElapsedTimeMillis();
- BSONObjBuilder timeBuilder(256);
-
- const auto authSession = AuthorizationSession::get(ClientBasic::getCurrent());
-
- // --- basic fields that are global
-
- result.append("host", prettyHostName() );
- result.append("version", versionString);
- result.append("process", serverGlobalParams.binaryName);
- result.append("pid", ProcessId::getCurrent().asLongLong());
- result.append("uptime", (double) (time(0) - serverGlobalParams.started));
- result.append("uptimeMillis", (long long)(curTimeMillis64()-_started));
- result.append("uptimeEstimate",(double) (start/1000));
- result.appendDate( "localTime" , jsTime() );
-
- timeBuilder.appendNumber( "after basic" , Listener::getElapsedTimeMillis() - start );
-
- // --- all sections
-
- for ( SectionMap::const_iterator i = _sections->begin(); i != _sections->end(); ++i ) {
- ServerStatusSection* section = i->second;
-
- std::vector<Privilege> requiredPrivileges;
- section->addRequiredPrivileges(&requiredPrivileges);
- if (!authSession->isAuthorizedForPrivileges(requiredPrivileges))
- continue;
-
- bool include = section->includeByDefault();
-
- BSONElement e = cmdObj[section->getSectionName()];
- if ( e.type() ) {
- include = e.trueValue();
- }
-
- if ( ! include )
- continue;
-
- BSONObj data = section->generateSection(txn, e);
- if ( data.isEmpty() )
- continue;
-
- result.append( section->getSectionName(), data );
- timeBuilder.appendNumber( static_cast<string>(str::stream() << "after " << section->getSectionName()),
- Listener::getElapsedTimeMillis() - start );
- }
+ virtual void help(stringstream& help) const {
+ help << "returns lots of administrative server statistics";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::serverStatus);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ _runCalled = true;
- // --- counters
- bool includeMetricTree = MetricTree::theMetricTree != NULL;
- if ( cmdObj["metrics"].type() && !cmdObj["metrics"].trueValue() )
- includeMetricTree = false;
+ long long start = Listener::getElapsedTimeMillis();
+ BSONObjBuilder timeBuilder(256);
- if ( includeMetricTree ) {
- MetricTree::theMetricTree->appendTo( result );
- }
+ const auto authSession = AuthorizationSession::get(ClientBasic::getCurrent());
- // --- some hard coded global things hard to pull out
+ // --- basic fields that are global
- {
- RamLog::LineIterator rl(RamLog::get("warnings"));
- if (rl.lastWrite() >= time(0)-(10*60)){ // only show warnings from last 10 minutes
- BSONArrayBuilder arr(result.subarrayStart("warnings"));
- while (rl.more()) {
- arr.append(rl.next());
- }
- arr.done();
- }
- }
+ result.append("host", prettyHostName());
+ result.append("version", versionString);
+ result.append("process", serverGlobalParams.binaryName);
+ result.append("pid", ProcessId::getCurrent().asLongLong());
+ result.append("uptime", (double)(time(0) - serverGlobalParams.started));
+ result.append("uptimeMillis", (long long)(curTimeMillis64() - _started));
+ result.append("uptimeEstimate", (double)(start / 1000));
+ result.appendDate("localTime", jsTime());
+
+ timeBuilder.appendNumber("after basic", Listener::getElapsedTimeMillis() - start);
+
+ // --- all sections
+
+ for (SectionMap::const_iterator i = _sections->begin(); i != _sections->end(); ++i) {
+ ServerStatusSection* section = i->second;
+
+ std::vector<Privilege> requiredPrivileges;
+ section->addRequiredPrivileges(&requiredPrivileges);
+ if (!authSession->isAuthorizedForPrivileges(requiredPrivileges))
+ continue;
+
+ bool include = section->includeByDefault();
- timeBuilder.appendNumber( "at end" , Listener::getElapsedTimeMillis() - start );
- if ( Listener::getElapsedTimeMillis() - start > 1000 ) {
- BSONObj t = timeBuilder.obj();
- log() << "serverStatus was very slow: " << t << endl;
- result.append( "timing" , t );
+ BSONElement e = cmdObj[section->getSectionName()];
+ if (e.type()) {
+ include = e.trueValue();
}
- return true;
+ if (!include)
+ continue;
+
+ BSONObj data = section->generateSection(txn, e);
+ if (data.isEmpty())
+ continue;
+
+ result.append(section->getSectionName(), data);
+ timeBuilder.appendNumber(
+ static_cast<string>(str::stream() << "after " << section->getSectionName()),
+ Listener::getElapsedTimeMillis() - start);
+ }
+
+ // --- counters
+ bool includeMetricTree = MetricTree::theMetricTree != NULL;
+ if (cmdObj["metrics"].type() && !cmdObj["metrics"].trueValue())
+ includeMetricTree = false;
+
+ if (includeMetricTree) {
+ MetricTree::theMetricTree->appendTo(result);
}
- void addSection( ServerStatusSection* section ) {
- verify( ! _runCalled );
- if ( _sections == 0 ) {
- _sections = new SectionMap();
+ // --- some hard coded global things hard to pull out
+
+ {
+ RamLog::LineIterator rl(RamLog::get("warnings"));
+ if (rl.lastWrite() >= time(0) - (10 * 60)) { // only show warnings from last 10 minutes
+ BSONArrayBuilder arr(result.subarrayStart("warnings"));
+ while (rl.more()) {
+ arr.append(rl.next());
+ }
+ arr.done();
}
- (*_sections)[section->getSectionName()] = section;
}
- private:
- const unsigned long long _started;
- bool _runCalled;
+ timeBuilder.appendNumber("at end", Listener::getElapsedTimeMillis() - start);
+ if (Listener::getElapsedTimeMillis() - start > 1000) {
+ BSONObj t = timeBuilder.obj();
+ log() << "serverStatus was very slow: " << t << endl;
+ result.append("timing", t);
+ }
+
+ return true;
+ }
+
+ void addSection(ServerStatusSection* section) {
+ verify(!_runCalled);
+ if (_sections == 0) {
+ _sections = new SectionMap();
+ }
+ (*_sections)[section->getSectionName()] = section;
+ }
+
+private:
+ const unsigned long long _started;
+ bool _runCalled;
+
+ typedef map<string, ServerStatusSection*> SectionMap;
+ static SectionMap* _sections;
+} cmdServerStatus;
+
+
+CmdServerStatus::SectionMap* CmdServerStatus::_sections = 0;
- typedef map< string , ServerStatusSection* > SectionMap;
- static SectionMap* _sections;
- } cmdServerStatus;
+ServerStatusSection::ServerStatusSection(const string& sectionName) : _sectionName(sectionName) {
+ cmdServerStatus.addSection(this);
+}
+
+OpCounterServerStatusSection::OpCounterServerStatusSection(const string& sectionName,
+ OpCounters* counters)
+ : ServerStatusSection(sectionName), _counters(counters) {}
+
+BSONObj OpCounterServerStatusSection::generateSection(OperationContext* txn,
+ const BSONElement& configElement) const {
+ return _counters->getObj();
+}
+
+OpCounterServerStatusSection globalOpCounterServerStatusSection("opcounters", &globalOpCounters);
- CmdServerStatus::SectionMap* CmdServerStatus::_sections = 0;
+namespace {
- ServerStatusSection::ServerStatusSection( const string& sectionName )
- : _sectionName( sectionName ) {
- cmdServerStatus.addSection( this );
+// some universal sections
+
+class Connections : public ServerStatusSection {
+public:
+ Connections() : ServerStatusSection("connections") {}
+ virtual bool includeByDefault() const {
+ return true;
}
- OpCounterServerStatusSection::OpCounterServerStatusSection( const string& sectionName, OpCounters* counters )
- : ServerStatusSection( sectionName ), _counters( counters ){
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObjBuilder bb;
+ bb.append("current", Listener::globalTicketHolder.used());
+ bb.append("available", Listener::globalTicketHolder.available());
+ bb.append("totalCreated", Listener::globalConnectionNumber.load());
+ return bb.obj();
}
- BSONObj OpCounterServerStatusSection::generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
- return _counters->getObj();
+} connections;
+
+class ExtraInfo : public ServerStatusSection {
+public:
+ ExtraInfo() : ServerStatusSection("extra_info") {}
+ virtual bool includeByDefault() const {
+ return true;
}
-
- OpCounterServerStatusSection globalOpCounterServerStatusSection( "opcounters", &globalOpCounters );
-
-
- namespace {
-
- // some universal sections
-
- class Connections : public ServerStatusSection {
- public:
- Connections() : ServerStatusSection( "connections" ){}
- virtual bool includeByDefault() const { return true; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
-
- BSONObjBuilder bb;
- bb.append( "current" , Listener::globalTicketHolder.used() );
- bb.append( "available" , Listener::globalTicketHolder.available() );
- bb.append( "totalCreated" , Listener::globalConnectionNumber.load() );
- return bb.obj();
- }
- } connections;
-
- class ExtraInfo : public ServerStatusSection {
- public:
- ExtraInfo() : ServerStatusSection( "extra_info" ){}
- virtual bool includeByDefault() const { return true; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
-
- BSONObjBuilder bb;
-
- bb.append("note", "fields vary by platform");
- ProcessInfo p;
- p.getExtraInfo(bb);
-
- return bb.obj();
- }
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObjBuilder bb;
- } extraInfo;
+ bb.append("note", "fields vary by platform");
+ ProcessInfo p;
+ p.getExtraInfo(bb);
+ return bb.obj();
+ }
- class Asserts : public ServerStatusSection {
- public:
- Asserts() : ServerStatusSection( "asserts" ){}
- virtual bool includeByDefault() const { return true; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
+} extraInfo;
- BSONObjBuilder asserts;
- asserts.append( "regular" , assertionCount.regular );
- asserts.append( "warning" , assertionCount.warning );
- asserts.append( "msg" , assertionCount.msg );
- asserts.append( "user" , assertionCount.user );
- asserts.append( "rollovers" , assertionCount.rollovers );
- return asserts.obj();
- }
-
- } asserts;
+class Asserts : public ServerStatusSection {
+public:
+ Asserts() : ServerStatusSection("asserts") {}
+ virtual bool includeByDefault() const {
+ return true;
+ }
- class Network : public ServerStatusSection {
- public:
- Network() : ServerStatusSection( "network" ){}
- virtual bool includeByDefault() const { return true; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObjBuilder asserts;
+ asserts.append("regular", assertionCount.regular);
+ asserts.append("warning", assertionCount.warning);
+ asserts.append("msg", assertionCount.msg);
+ asserts.append("user", assertionCount.user);
+ asserts.append("rollovers", assertionCount.rollovers);
+ return asserts.obj();
+ }
- BSONObjBuilder b;
- networkCounter.append( b );
- return b.obj();
- }
-
- } network;
+} asserts;
-#ifdef MONGO_CONFIG_SSL
- class Security : public ServerStatusSection {
- public:
- Security() : ServerStatusSection( "security" ) {}
- virtual bool includeByDefault() const { return true; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
- BSONObj result;
- if (getSSLManager()) {
- result = getSSLManager()->getSSLConfiguration().getServerStatusBSON();
- }
- return result;
- }
- } security;
-#endif
+class Network : public ServerStatusSection {
+public:
+ Network() : ServerStatusSection("network") {}
+ virtual bool includeByDefault() const {
+ return true;
+ }
- class MemBase : public ServerStatusMetric {
- public:
- MemBase() : ServerStatusMetric(".mem.bits") {}
- virtual void appendAtLeaf( BSONObjBuilder& b ) const {
- b.append( "bits", sizeof(int*) == 4 ? 32 : 64 );
-
- ProcessInfo p;
- int v = 0;
- if ( p.supported() ) {
- b.appendNumber( "resident" , p.getResidentSize() );
- v = p.getVirtualMemorySize();
- b.appendNumber( "virtual" , v );
- b.appendBool( "supported" , true );
- }
- else {
- b.append( "note" , "not all mem info support on this platform" );
- b.appendBool( "supported" , false );
- }
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObjBuilder b;
+ networkCounter.append(b);
+ return b.obj();
+ }
- }
- } memBase;
+} network;
+
+#ifdef MONGO_CONFIG_SSL
+class Security : public ServerStatusSection {
+public:
+ Security() : ServerStatusSection("security") {}
+ virtual bool includeByDefault() const {
+ return true;
}
-}
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj result;
+ if (getSSLManager()) {
+ result = getSSLManager()->getSSLConfiguration().getServerStatusBSON();
+ }
+
+ return result;
+ }
+} security;
+#endif
+class MemBase : public ServerStatusMetric {
+public:
+ MemBase() : ServerStatusMetric(".mem.bits") {}
+ virtual void appendAtLeaf(BSONObjBuilder& b) const {
+ b.append("bits", sizeof(int*) == 4 ? 32 : 64);
+
+ ProcessInfo p;
+ int v = 0;
+ if (p.supported()) {
+ b.appendNumber("resident", p.getResidentSize());
+ v = p.getVirtualMemorySize();
+ b.appendNumber("virtual", v);
+ b.appendBool("supported", true);
+ } else {
+ b.append("note", "not all mem info support on this platform");
+ b.appendBool("supported", false);
+ }
+ }
+} memBase;
+}
+}
diff --git a/src/mongo/db/commands/server_status.h b/src/mongo/db/commands/server_status.h
index 695fac9b8e1..1ebe57280d7 100644
--- a/src/mongo/db/commands/server_status.h
+++ b/src/mongo/db/commands/server_status.h
@@ -38,60 +38,61 @@
namespace mongo {
- class ServerStatusSection {
- public:
- ServerStatusSection( const std::string& sectionName );
- virtual ~ServerStatusSection(){}
+class ServerStatusSection {
+public:
+ ServerStatusSection(const std::string& sectionName);
+ virtual ~ServerStatusSection() {}
- const std::string& getSectionName() const { return _sectionName; }
+ const std::string& getSectionName() const {
+ return _sectionName;
+ }
- /**
- * if this returns true, if the user doesn't mention this section
- * it will be included in the result
- * if they do : 1, it will be included
- * if they do : 0, it will not
- *
- * examples (section 'foo')
- * includeByDefault returning true
- * foo : 0 = not included
- * foo : 1 = included
- * foo missing = included
- * includeByDefault returning false
- * foo : 0 = not included
- * foo : 1 = included
- * foo missing = false
- */
- virtual bool includeByDefault() const = 0;
-
- /**
- * Adds the privileges that are required to view this section
- * TODO: Remove this empty default implementation and implement for every section.
- */
- virtual void addRequiredPrivileges(std::vector<Privilege>* out) {};
+ /**
+ * if this returns true, if the user doesn't mention this section
+ * it will be included in the result
+ * if they do : 1, it will be included
+ * if they do : 0, it will not
+ *
+ * examples (section 'foo')
+ * includeByDefault returning true
+ * foo : 0 = not included
+ * foo : 1 = included
+ * foo missing = included
+ * includeByDefault returning false
+ * foo : 0 = not included
+ * foo : 1 = included
+ * foo missing = false
+ */
+ virtual bool includeByDefault() const = 0;
- /**
- * actually generate the result
- * @param configElement the element from the actual command related to this section
- * so if the section is 'foo', this is cmdObj['foo']
- */
- virtual BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const = 0;
+ /**
+ * Adds the privileges that are required to view this section
+ * TODO: Remove this empty default implementation and implement for every section.
+ */
+ virtual void addRequiredPrivileges(std::vector<Privilege>* out){};
- private:
- const std::string _sectionName;
- };
+ /**
+ * actually generate the result
+ * @param configElement the element from the actual command related to this section
+ * so if the section is 'foo', this is cmdObj['foo']
+ */
+ virtual BSONObj generateSection(OperationContext* txn,
+ const BSONElement& configElement) const = 0;
- class OpCounterServerStatusSection : public ServerStatusSection {
- public:
- OpCounterServerStatusSection( const std::string& sectionName, OpCounters* counters );
- virtual bool includeByDefault() const { return true; }
-
- virtual BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const;
+private:
+ const std::string _sectionName;
+};
- private:
- const OpCounters* _counters;
- };
+class OpCounterServerStatusSection : public ServerStatusSection {
+public:
+ OpCounterServerStatusSection(const std::string& sectionName, OpCounters* counters);
+ virtual bool includeByDefault() const {
+ return true;
+ }
-}
+ virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const;
+private:
+ const OpCounters* _counters;
+};
+}
diff --git a/src/mongo/db/commands/server_status_internal.cpp b/src/mongo/db/commands/server_status_internal.cpp
index e329a3724a4..bb564a9bcb6 100644
--- a/src/mongo/db/commands/server_status_internal.cpp
+++ b/src/mongo/db/commands/server_status_internal.cpp
@@ -37,53 +37,53 @@
namespace mongo {
- using std::cerr;
- using std::endl;
- using std::map;
- using std::string;
+using std::cerr;
+using std::endl;
+using std::map;
+using std::string;
- using namespace mongoutils;
+using namespace mongoutils;
- MetricTree* MetricTree::theMetricTree = NULL;
+MetricTree* MetricTree::theMetricTree = NULL;
- void MetricTree::add( ServerStatusMetric* metric ) {
- string name = metric->getMetricName();
- if ( name[0] == '.' )
- _add( name.substr(1), metric );
- else
- _add( str::stream() << "metrics." << name, metric );
- }
-
- void MetricTree::_add( const string& path, ServerStatusMetric* metric ) {
- size_t idx = path.find( "." );
- if ( idx == string::npos ) {
- _metrics[path] = metric;
- return;
- }
+void MetricTree::add(ServerStatusMetric* metric) {
+ string name = metric->getMetricName();
+ if (name[0] == '.')
+ _add(name.substr(1), metric);
+ else
+ _add(str::stream() << "metrics." << name, metric);
+}
- string myLevel = path.substr( 0, idx );
- if ( _metrics.count( myLevel ) > 0 ) {
- cerr << "metric conflict on: " << myLevel << endl;
- fassertFailed( 16461 );
- }
+void MetricTree::_add(const string& path, ServerStatusMetric* metric) {
+ size_t idx = path.find(".");
+ if (idx == string::npos) {
+ _metrics[path] = metric;
+ return;
+ }
- MetricTree*& sub = _subtrees[myLevel];
- if ( ! sub )
- sub = new MetricTree();
- sub->_add( path.substr( idx + 1 ), metric );
+ string myLevel = path.substr(0, idx);
+ if (_metrics.count(myLevel) > 0) {
+ cerr << "metric conflict on: " << myLevel << endl;
+ fassertFailed(16461);
}
- void MetricTree::appendTo( BSONObjBuilder& b ) const {
- for ( map<string,ServerStatusMetric*>::const_iterator i = _metrics.begin(); i != _metrics.end(); ++i ) {
- i->second->appendAtLeaf( b );
- }
+ MetricTree*& sub = _subtrees[myLevel];
+ if (!sub)
+ sub = new MetricTree();
+ sub->_add(path.substr(idx + 1), metric);
+}
- for ( map<string,MetricTree*>::const_iterator i = _subtrees.begin(); i != _subtrees.end(); ++i ) {
- BSONObjBuilder bb( b.subobjStart( i->first ) );
- i->second->appendTo( bb );
- bb.done();
- }
+void MetricTree::appendTo(BSONObjBuilder& b) const {
+ for (map<string, ServerStatusMetric*>::const_iterator i = _metrics.begin(); i != _metrics.end();
+ ++i) {
+ i->second->appendAtLeaf(b);
}
+ for (map<string, MetricTree*>::const_iterator i = _subtrees.begin(); i != _subtrees.end();
+ ++i) {
+ BSONObjBuilder bb(b.subobjStart(i->first));
+ i->second->appendTo(bb);
+ bb.done();
+ }
+}
}
-
diff --git a/src/mongo/db/commands/server_status_internal.h b/src/mongo/db/commands/server_status_internal.h
index 37e3bbf3439..6f5e15d5d33 100644
--- a/src/mongo/db/commands/server_status_internal.h
+++ b/src/mongo/db/commands/server_status_internal.h
@@ -37,21 +37,20 @@
namespace mongo {
- class ServerStatusMetric;
+class ServerStatusMetric;
- class MetricTree {
- public:
- void add( ServerStatusMetric* metric );
+class MetricTree {
+public:
+ void add(ServerStatusMetric* metric);
- void appendTo( BSONObjBuilder& b ) const;
+ void appendTo(BSONObjBuilder& b) const;
- static MetricTree* theMetricTree;
- private:
+ static MetricTree* theMetricTree;
- void _add( const std::string& path, ServerStatusMetric* metric );
-
- std::map<std::string, MetricTree*> _subtrees;
- std::map<std::string, ServerStatusMetric*> _metrics;
- };
+private:
+ void _add(const std::string& path, ServerStatusMetric* metric);
+ std::map<std::string, MetricTree*> _subtrees;
+ std::map<std::string, ServerStatusMetric*> _metrics;
+};
}
diff --git a/src/mongo/db/commands/server_status_metric.cpp b/src/mongo/db/commands/server_status_metric.cpp
index 1e999635751..999205b9704 100644
--- a/src/mongo/db/commands/server_status_metric.cpp
+++ b/src/mongo/db/commands/server_status_metric.cpp
@@ -34,25 +34,20 @@
namespace mongo {
- using std::string;
+using std::string;
- ServerStatusMetric::ServerStatusMetric(const string& nameIn)
- : _name( nameIn ),
- _leafName( _parseLeafName( nameIn ) ) {
-
- if ( MetricTree::theMetricTree == 0 )
- MetricTree::theMetricTree = new MetricTree();
- MetricTree::theMetricTree->add( this );
- }
-
- string ServerStatusMetric::_parseLeafName( const string& name ) {
- size_t idx = name.rfind( "." );
- if ( idx == string::npos )
- return name;
-
- return name.substr( idx + 1 );
- }
+ServerStatusMetric::ServerStatusMetric(const string& nameIn)
+ : _name(nameIn), _leafName(_parseLeafName(nameIn)) {
+ if (MetricTree::theMetricTree == 0)
+ MetricTree::theMetricTree = new MetricTree();
+ MetricTree::theMetricTree->add(this);
+}
+string ServerStatusMetric::_parseLeafName(const string& name) {
+ size_t idx = name.rfind(".");
+ if (idx == string::npos)
+ return name;
+ return name.substr(idx + 1);
+}
}
-
diff --git a/src/mongo/db/commands/server_status_metric.h b/src/mongo/db/commands/server_status_metric.h
index 239c66fa96b..83fc5eff452 100644
--- a/src/mongo/db/commands/server_status_metric.h
+++ b/src/mongo/db/commands/server_status_metric.h
@@ -36,56 +36,57 @@
namespace mongo {
- class ServerStatusMetric {
- public:
- /**
- * @param name is a dotted path of a counter name
- * if name starts with . its treated as a path from the serverStatus root
- * otherwise it will live under the "counters" namespace
- * so foo.bar would be serverStatus().counters.foo.bar
- */
- ServerStatusMetric(const std::string& name);
- virtual ~ServerStatusMetric(){}
-
- std::string getMetricName() const { return _name; }
-
- virtual void appendAtLeaf( BSONObjBuilder& b ) const = 0;
-
- protected:
- static std::string _parseLeafName( const std::string& name );
-
- const std::string _name;
- const std::string _leafName;
- };
-
+class ServerStatusMetric {
+public:
/**
- * usage
- *
- * declared once
- * Counter counter;
- * ServerStatusMetricField myAwesomeCounterDisplay( "path.to.counter", &counter );
- *
- * call
- * counter.hit();
- *
- * will show up in db.serverStatus().metrics.path.to.counter
+ * @param name is a dotted path of a counter name
+ * if name starts with . its treated as a path from the serverStatus root
+ * otherwise it will live under the "counters" namespace
+ * so foo.bar would be serverStatus().counters.foo.bar
*/
- template< typename T >
- class ServerStatusMetricField : public ServerStatusMetric {
- public:
- ServerStatusMetricField( const std::string& name, const T* t )
- : ServerStatusMetric(name), _t(t) {
- }
+ ServerStatusMetric(const std::string& name);
+ virtual ~ServerStatusMetric() {}
- const T* get() { return _t; }
+ std::string getMetricName() const {
+ return _name;
+ }
- virtual void appendAtLeaf( BSONObjBuilder& b ) const {
- b.append( _leafName, *_t );
- }
+ virtual void appendAtLeaf(BSONObjBuilder& b) const = 0;
- private:
- const T* _t;
- };
+protected:
+ static std::string _parseLeafName(const std::string& name);
-}
+ const std::string _name;
+ const std::string _leafName;
+};
+/**
+ * usage
+ *
+ * declared once
+ * Counter counter;
+ * ServerStatusMetricField myAwesomeCounterDisplay( "path.to.counter", &counter );
+ *
+ * call
+ * counter.hit();
+ *
+ * will show up in db.serverStatus().metrics.path.to.counter
+ */
+template <typename T>
+class ServerStatusMetricField : public ServerStatusMetric {
+public:
+ ServerStatusMetricField(const std::string& name, const T* t)
+ : ServerStatusMetric(name), _t(t) {}
+
+ const T* get() {
+ return _t;
+ }
+
+ virtual void appendAtLeaf(BSONObjBuilder& b) const {
+ b.append(_leafName, *_t);
+ }
+
+private:
+ const T* _t;
+};
+}
diff --git a/src/mongo/db/commands/shutdown.h b/src/mongo/db/commands/shutdown.h
index 0adef08311d..c184c22aa4f 100644
--- a/src/mongo/db/commands/shutdown.h
+++ b/src/mongo/db/commands/shutdown.h
@@ -35,24 +35,31 @@
namespace mongo {
- class CmdShutdown : public Command {
- public:
- CmdShutdown() : Command("shutdown") { }
+class CmdShutdown : public Command {
+public:
+ CmdShutdown() : Command("shutdown") {}
- virtual bool requiresAuth() { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) { return true; }
- virtual bool slaveOk() const {
- return true;
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out);
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ virtual bool requiresAuth() {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out);
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- protected:
- static void shutdownHelper();
- };
+protected:
+ static void shutdownHelper();
+};
} // namespace mongo
-
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 1c4c9d7a508..733b3b19cee 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -49,193 +49,205 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::stringstream;
-
- /* For testing only, not for general use. Enabled via command-line */
- class GodInsert : public Command {
- public:
- GodInsert() : Command( "godinsert" ) { }
- virtual bool adminOnly() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
- virtual void help( stringstream &help ) const {
- help << "internal. for testing only.";
- }
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- string coll = cmdObj[ "godinsert" ].valuestrsafe();
- log() << "test only command godinsert invoked coll:" << coll << endl;
- uassert( 13049, "godinsert must specify a collection", !coll.empty() );
- string ns = dbname + "." + coll;
- BSONObj obj = cmdObj[ "obj" ].embeddedObjectUserCheck();
-
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), dbname, MODE_X);
- OldClientContext ctx(txn, ns );
- Database* db = ctx.db();
-
- WriteUnitOfWork wunit(txn);
- txn->setReplicatedWrites(false);
- Collection* collection = db->getCollection( ns );
- if ( !collection ) {
- collection = db->createCollection( txn, ns );
- if ( !collection ) {
- errmsg = "could not create collection";
- return false;
- }
- }
- StatusWith<RecordId> res = collection->insertDocument( txn, obj, false );
- Status status = res.getStatus();
- if (status.isOK()) {
- wunit.commit();
+using std::endl;
+using std::string;
+using std::stringstream;
+
+/* For testing only, not for general use. Enabled via command-line */
+class GodInsert : public Command {
+public:
+ GodInsert() : Command("godinsert") {}
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+ virtual void help(stringstream& help) const {
+ help << "internal. for testing only.";
+ }
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string coll = cmdObj["godinsert"].valuestrsafe();
+ log() << "test only command godinsert invoked coll:" << coll << endl;
+ uassert(13049, "godinsert must specify a collection", !coll.empty());
+ string ns = dbname + "." + coll;
+ BSONObj obj = cmdObj["obj"].embeddedObjectUserCheck();
+
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lk(txn->lockState(), dbname, MODE_X);
+ OldClientContext ctx(txn, ns);
+ Database* db = ctx.db();
+
+ WriteUnitOfWork wunit(txn);
+ txn->setReplicatedWrites(false);
+ Collection* collection = db->getCollection(ns);
+ if (!collection) {
+ collection = db->createCollection(txn, ns);
+ if (!collection) {
+ errmsg = "could not create collection";
+ return false;
}
- return appendCommandStatus( result, res.getStatus() );
}
- };
-
- /* for diagnostic / testing purposes. Enabled via command line. */
- class CmdSleep : public Command {
- public:
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool adminOnly() const { return true; }
- virtual bool slaveOk() const { return true; }
- virtual void help( stringstream& help ) const {
- help << "internal testing command. Makes db block (in a read lock) for 100 seconds\n";
- help << "w:true write lock. secs:<seconds>";
+ StatusWith<RecordId> res = collection->insertDocument(txn, obj, false);
+ Status status = res.getStatus();
+ if (status.isOK()) {
+ wunit.commit();
}
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
- CmdSleep() : Command("sleep") { }
- bool run(OperationContext* txn,
- const string& ns,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- log() << "test only command sleep invoked" << endl;
- long long millis = 10 * 1000;
-
- if (cmdObj["secs"].isNumber() && cmdObj["millis"].isNumber()) {
- millis = cmdObj["secs"].numberLong() * 1000 + cmdObj["millis"].numberLong();
- }
- else if (cmdObj["secs"].isNumber()) {
- millis = cmdObj["secs"].numberLong() * 1000;
- }
- else if (cmdObj["millis"].isNumber()) {
- millis = cmdObj["millis"].numberLong();
- }
-
- if(cmdObj.getBoolField("w")) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- sleepmillis(millis);
- }
- else {
- ScopedTransaction transaction(txn, MODE_S);
- Lock::GlobalRead lk(txn->lockState());
- sleepmillis(millis);
- }
-
- // Interrupt point for testing (e.g. maxTimeMS).
- txn->checkForInterrupt();
+ return appendCommandStatus(result, res.getStatus());
+ }
+};
- return true;
- }
- };
-
- // Testing only, enabled via command-line.
- class CapTrunc : public Command {
- public:
- CapTrunc() : Command( "captrunc" ) {}
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- string coll = cmdObj[ "captrunc" ].valuestrsafe();
- uassert( 13416, "captrunc must specify a collection", !coll.empty() );
- NamespaceString nss( dbname, coll );
- int n = cmdObj.getIntField( "n" );
- bool inc = cmdObj.getBoolField( "inc" ); // inclusive range?
-
- OldClientWriteContext ctx(txn, nss.ns() );
- Collection* collection = ctx.getCollection();
- massert( 13417, "captrunc collection not found or empty", collection);
-
- RecordId end;
- {
- std::unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(txn,
- nss.ns(),
- collection,
- InternalPlanner::BACKWARD));
- // We remove 'n' elements so the start is one past that
- for( int i = 0; i < n + 1; ++i ) {
- PlanExecutor::ExecState state = exec->getNext(NULL, &end);
- massert( 13418, "captrunc invalid n", PlanExecutor::ADVANCED == state);
- }
- }
- WriteUnitOfWork wuow(txn);
- collection->temp_cappedTruncateAfter( txn, end, inc );
- wuow.commit();
- return true;
- }
- };
-
- // Testing-only, enabled via command line.
- class EmptyCapped : public Command {
- public:
- EmptyCapped() : Command( "emptycapped" ) {}
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
-
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const std::string ns = parseNsCollectionRequired(dbname, cmdObj);
-
- return appendCommandStatus(result, emptyCapped(txn, NamespaceString(ns)));
+/* for diagnostic / testing purposes. Enabled via command line. */
+class CmdSleep : public Command {
+public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "internal testing command. Makes db block (in a read lock) for 100 seconds\n";
+ help << "w:true write lock. secs:<seconds>";
+ }
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+ CmdSleep() : Command("sleep") {}
+ bool run(OperationContext* txn,
+ const string& ns,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ log() << "test only command sleep invoked" << endl;
+ long long millis = 10 * 1000;
+
+ if (cmdObj["secs"].isNumber() && cmdObj["millis"].isNumber()) {
+ millis = cmdObj["secs"].numberLong() * 1000 + cmdObj["millis"].numberLong();
+ } else if (cmdObj["secs"].isNumber()) {
+ millis = cmdObj["secs"].numberLong() * 1000;
+ } else if (cmdObj["millis"].isNumber()) {
+ millis = cmdObj["millis"].numberLong();
}
- };
+ if (cmdObj.getBoolField("w")) {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ sleepmillis(millis);
+ } else {
+ ScopedTransaction transaction(txn, MODE_S);
+ Lock::GlobalRead lk(txn->lockState());
+ sleepmillis(millis);
+ }
- // ----------------------------
+ // Interrupt point for testing (e.g. maxTimeMS).
+ txn->checkForInterrupt();
- MONGO_INITIALIZER(RegisterEmptyCappedCmd)(InitializerContext* context) {
- if (Command::testCommandsEnabled) {
- // Leaked intentionally: a Command registers itself when constructed.
- new CapTrunc();
- new CmdSleep();
- new EmptyCapped();
- new GodInsert();
+ return true;
+ }
+};
+
+// Testing only, enabled via command-line.
+class CapTrunc : public Command {
+public:
+ CapTrunc() : Command("captrunc") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string coll = cmdObj["captrunc"].valuestrsafe();
+ uassert(13416, "captrunc must specify a collection", !coll.empty());
+ NamespaceString nss(dbname, coll);
+ int n = cmdObj.getIntField("n");
+ bool inc = cmdObj.getBoolField("inc"); // inclusive range?
+
+ OldClientWriteContext ctx(txn, nss.ns());
+ Collection* collection = ctx.getCollection();
+ massert(13417, "captrunc collection not found or empty", collection);
+
+ RecordId end;
+ {
+ std::unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
+ txn, nss.ns(), collection, InternalPlanner::BACKWARD));
+ // We remove 'n' elements so the start is one past that
+ for (int i = 0; i < n + 1; ++i) {
+ PlanExecutor::ExecState state = exec->getNext(NULL, &end);
+ massert(13418, "captrunc invalid n", PlanExecutor::ADVANCED == state);
+ }
}
- return Status::OK();
+ WriteUnitOfWork wuow(txn);
+ collection->temp_cappedTruncateAfter(txn, end, inc);
+ wuow.commit();
+ return true;
+ }
+};
+
+// Testing-only, enabled via command line.
+class EmptyCapped : public Command {
+public:
+ EmptyCapped() : Command("emptycapped") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
}
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string ns = parseNsCollectionRequired(dbname, cmdObj);
+
+ return appendCommandStatus(result, emptyCapped(txn, NamespaceString(ns)));
+ }
+};
+// ----------------------------
+MONGO_INITIALIZER(RegisterEmptyCappedCmd)(InitializerContext* context) {
+ if (Command::testCommandsEnabled) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new CapTrunc();
+ new CmdSleep();
+ new EmptyCapped();
+ new GodInsert();
+ }
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/commands/top_command.cpp b/src/mongo/db/commands/top_command.cpp
index 3328c286fd0..b716457f311 100644
--- a/src/mongo/db/commands/top_command.cpp
+++ b/src/mongo/db/commands/top_command.cpp
@@ -40,52 +40,56 @@
namespace {
- using namespace mongo;
+using namespace mongo;
- class TopCommand : public Command {
- public:
- TopCommand() : Command("top", true) {}
+class TopCommand : public Command {
+public:
+ TopCommand() : Command("top", true) {}
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help(std::stringstream& help) const {
- help << "usage by collection, in micros ";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::top);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- virtual bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
- {
- BSONObjBuilder b( result.subobjStart( "totals" ) );
- b.append( "note", "all times in microseconds" );
- Top::get(txn->getClient()->getServiceContext()).append(b);
- b.done();
- }
- return true;
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(std::stringstream& help) const {
+ help << "usage by collection, in micros ";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::top);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ virtual bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ {
+ BSONObjBuilder b(result.subobjStart("totals"));
+ b.append("note", "all times in microseconds");
+ Top::get(txn->getClient()->getServiceContext()).append(b);
+ b.done();
}
+ return true;
+ }
+};
- };
-
- //
- // Command instance.
- // Registers command with the command system and make command
- // available to the client.
- //
-
- MONGO_INITIALIZER(RegisterTopCommand)(InitializerContext* context) {
+//
+// Command instance.
+// Registers command with the command system and make command
+// available to the client.
+//
- new TopCommand();
+MONGO_INITIALIZER(RegisterTopCommand)(InitializerContext* context) {
+ new TopCommand();
- return Status::OK();
- }
-} // namespace
+ return Status::OK();
+}
+} // namespace
diff --git a/src/mongo/db/commands/touch.cpp b/src/mongo/db/commands/touch.cpp
index 1b71e55ab53..9beace559f2 100644
--- a/src/mongo/db/commands/touch.cpp
+++ b/src/mongo/db/commands/touch.cpp
@@ -52,66 +52,71 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
- class TouchCmd : public Command {
- public:
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool adminOnly() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool maintenanceMode() const { return true; }
- virtual void help( stringstream& help ) const {
- help << "touch collection\n"
+class TouchCmd : public Command {
+public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool maintenanceMode() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "touch collection\n"
"Page in all pages of memory containing every extent for the given collection\n"
"{ touch : <collection_name>, [data : true] , [index : true] }\n"
" at least one of data or index must be true; default is both are false\n";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::touch);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- TouchCmd() : Command("touch") { }
-
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const std::string ns = parseNsCollectionRequired(dbname, cmdObj);
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::touch);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ TouchCmd() : Command("touch") {}
- const NamespaceString nss(ns);
- if ( ! nss.isNormal() ) {
- errmsg = "bad namespace name";
- return false;
- }
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string ns = parseNsCollectionRequired(dbname, cmdObj);
- bool touch_indexes( cmdObj["index"].trueValue() );
- bool touch_data( cmdObj["data"].trueValue() );
+ const NamespaceString nss(ns);
+ if (!nss.isNormal()) {
+ errmsg = "bad namespace name";
+ return false;
+ }
- if ( ! (touch_indexes || touch_data) ) {
- errmsg = "must specify at least one of (data:true, index:true)";
- return false;
- }
+ bool touch_indexes(cmdObj["index"].trueValue());
+ bool touch_data(cmdObj["data"].trueValue());
- AutoGetCollectionForRead context(txn, nss);
+ if (!(touch_indexes || touch_data)) {
+ errmsg = "must specify at least one of (data:true, index:true)";
+ return false;
+ }
- Collection* collection = context.getCollection();
- if ( !collection ) {
- errmsg = "collection not found";
- return false;
- }
+ AutoGetCollectionForRead context(txn, nss);
- return appendCommandStatus( result,
- collection->touch( txn,
- touch_data, touch_indexes,
- &result ) );
+ Collection* collection = context.getCollection();
+ if (!collection) {
+ errmsg = "collection not found";
+ return false;
}
- };
- static TouchCmd touchCmd;
+ return appendCommandStatus(result,
+ collection->touch(txn, touch_data, touch_indexes, &result));
+ }
+};
+static TouchCmd touchCmd;
}
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 709325a5410..efdf7929bdb 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -72,2872 +72,2724 @@
namespace mongo {
- namespace str = mongoutils::str;
+namespace str = mongoutils::str;
- using std::endl;
- using std::string;
- using std::stringstream;
- using std::vector;
+using std::endl;
+using std::string;
+using std::stringstream;
+using std::vector;
namespace {
- // Used to obtain mutex that guards modifications to persistent authorization data
- const auto getAuthzDataMutex = ServiceContext::declareDecoration<stdx::timed_mutex>();
+// Used to obtain mutex that guards modifications to persistent authorization data
+const auto getAuthzDataMutex = ServiceContext::declareDecoration<stdx::timed_mutex>();
- const Seconds authzDataMutexAcquisitionTimeout{5};
+const Seconds authzDataMutexAcquisitionTimeout{5};
- BSONArray roleSetToBSONArray(const unordered_set<RoleName>& roles) {
- BSONArrayBuilder rolesArrayBuilder;
- for (unordered_set<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
- const RoleName& role = *it;
- rolesArrayBuilder.append(
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << role.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()));
- }
- return rolesArrayBuilder.arr();
+BSONArray roleSetToBSONArray(const unordered_set<RoleName>& roles) {
+ BSONArrayBuilder rolesArrayBuilder;
+ for (unordered_set<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
+ const RoleName& role = *it;
+ rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getDB()));
}
+ return rolesArrayBuilder.arr();
+}
+
+BSONArray rolesVectorToBSONArray(const std::vector<RoleName>& roles) {
+ BSONArrayBuilder rolesArrayBuilder;
+ for (std::vector<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
+ const RoleName& role = *it;
+ rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getDB()));
+ }
+ return rolesArrayBuilder.arr();
+}
- BSONArray rolesVectorToBSONArray(const std::vector<RoleName>& roles) {
- BSONArrayBuilder rolesArrayBuilder;
- for (std::vector<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
- const RoleName& role = *it;
- rolesArrayBuilder.append(
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << role.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()));
- }
- return rolesArrayBuilder.arr();
- }
-
- Status privilegeVectorToBSONArray(const PrivilegeVector& privileges, BSONArray* result) {
- BSONArrayBuilder arrBuilder;
- for (PrivilegeVector::const_iterator it = privileges.begin();
- it != privileges.end(); ++it) {
- const Privilege& privilege = *it;
-
- ParsedPrivilege parsedPrivilege;
- std::string errmsg;
- if (!ParsedPrivilege::privilegeToParsedPrivilege(privilege,
- &parsedPrivilege,
- &errmsg)) {
- return Status(ErrorCodes::FailedToParse, errmsg);
- }
- if (!parsedPrivilege.isValid(&errmsg)) {
- return Status(ErrorCodes::FailedToParse, errmsg);
- }
- arrBuilder.append(parsedPrivilege.toBSON());
+Status privilegeVectorToBSONArray(const PrivilegeVector& privileges, BSONArray* result) {
+ BSONArrayBuilder arrBuilder;
+ for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) {
+ const Privilege& privilege = *it;
+
+ ParsedPrivilege parsedPrivilege;
+ std::string errmsg;
+ if (!ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)) {
+ return Status(ErrorCodes::FailedToParse, errmsg);
}
- *result = arrBuilder.arr();
- return Status::OK();
+ if (!parsedPrivilege.isValid(&errmsg)) {
+ return Status(ErrorCodes::FailedToParse, errmsg);
+ }
+ arrBuilder.append(parsedPrivilege.toBSON());
}
+ *result = arrBuilder.arr();
+ return Status::OK();
+}
- /**
- * Used to get all current roles of the user identified by 'userName'.
- */
- Status getCurrentUserRoles(OperationContext* txn,
- AuthorizationManager* authzManager,
- const UserName& userName,
- unordered_set<RoleName>* roles) {
- User* user;
- authzManager->invalidateUserByName(userName); // Need to make sure cache entry is up to date
- Status status = authzManager->acquireUser(txn, userName, &user);
+/**
+ * Used to get all current roles of the user identified by 'userName'.
+ */
+Status getCurrentUserRoles(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ const UserName& userName,
+ unordered_set<RoleName>* roles) {
+ User* user;
+ authzManager->invalidateUserByName(userName); // Need to make sure cache entry is up to date
+ Status status = authzManager->acquireUser(txn, userName, &user);
+ if (!status.isOK()) {
+ return status;
+ }
+ RoleNameIterator rolesIt = user->getRoles();
+ while (rolesIt.more()) {
+ roles->insert(rolesIt.next());
+ }
+ authzManager->releaseUser(user);
+ return Status::OK();
+}
+
+/**
+ * Checks that every role in "rolesToAdd" exists, that adding each of those roles to "role"
+ * will not result in a cycle to the role graph, and that every role being added comes from the
+ * same database as the role it is being added to (or that the role being added to is from the
+ * "admin" database.
+ */
+Status checkOkayToGrantRolesToRole(const RoleName& role,
+ const std::vector<RoleName> rolesToAdd,
+ AuthorizationManager* authzManager) {
+ for (std::vector<RoleName>::const_iterator it = rolesToAdd.begin(); it != rolesToAdd.end();
+ ++it) {
+ const RoleName& roleToAdd = *it;
+ if (roleToAdd == role) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream() << "Cannot grant role " << role.getFullName()
+ << " to itself.");
+ }
+
+ if (role.getDB() != "admin" && roleToAdd.getDB() != role.getDB()) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ str::stream()
+ << "Roles on the \'" << role.getDB()
+ << "\' database cannot be granted roles from other databases");
+ }
+
+ BSONObj roleToAddDoc;
+ Status status = authzManager->getRoleDescription(roleToAdd, false, &roleToAddDoc);
+ if (status == ErrorCodes::RoleNotFound) {
+ return Status(ErrorCodes::RoleNotFound,
+ "Cannot grant nonexistent role " + roleToAdd.toString());
+ }
if (!status.isOK()) {
return status;
}
- RoleNameIterator rolesIt = user->getRoles();
- while (rolesIt.more()) {
- roles->insert(rolesIt.next());
+ std::vector<RoleName> indirectRoles;
+ status = auth::parseRoleNamesFromBSONArray(
+ BSONArray(roleToAddDoc["inheritedRoles"].Obj()), role.getDB(), &indirectRoles);
+ if (!status.isOK()) {
+ return status;
}
- authzManager->releaseUser(user);
+
+ if (sequenceContains(indirectRoles, role)) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream()
+ << "Granting " << roleToAdd.getFullName() << " to "
+ << role.getFullName()
+ << " would introduce a cycle in the role graph.");
+ }
+ }
+ return Status::OK();
+}
+
+/**
+ * Checks that every privilege being granted targets just the database the role is from, or that
+ * the role is from the "admin" db.
+ */
+Status checkOkayToGrantPrivilegesToRole(const RoleName& role, const PrivilegeVector& privileges) {
+ if (role.getDB() == "admin") {
return Status::OK();
}
- /**
- * Checks that every role in "rolesToAdd" exists, that adding each of those roles to "role"
- * will not result in a cycle to the role graph, and that every role being added comes from the
- * same database as the role it is being added to (or that the role being added to is from the
- * "admin" database.
- */
- Status checkOkayToGrantRolesToRole(const RoleName& role,
- const std::vector<RoleName> rolesToAdd,
- AuthorizationManager* authzManager) {
- for (std::vector<RoleName>::const_iterator it = rolesToAdd.begin();
- it != rolesToAdd.end(); ++it) {
- const RoleName& roleToAdd = *it;
- if (roleToAdd == role) {
- return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream() << "Cannot grant role " <<
- role.getFullName() << " to itself.");
- }
+ for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) {
+ const ResourcePattern& resource = (*it).getResourcePattern();
+ if ((resource.isDatabasePattern() || resource.isExactNamespacePattern()) &&
+ (resource.databaseToMatch() == role.getDB())) {
+ continue;
+ }
- if (role.getDB() != "admin" && roleToAdd.getDB() != role.getDB()) {
- return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Roles on the \'" << role.getDB() <<
- "\' database cannot be granted roles from other databases");
- }
+ return Status(ErrorCodes::InvalidRoleModification,
+ str::stream() << "Roles on the \'" << role.getDB()
+ << "\' database cannot be granted privileges that target other "
+ "databases or the cluster");
+ }
- BSONObj roleToAddDoc;
- Status status = authzManager->getRoleDescription(roleToAdd, false, &roleToAddDoc);
- if (status == ErrorCodes::RoleNotFound) {
- return Status(ErrorCodes::RoleNotFound,
- "Cannot grant nonexistent role " + roleToAdd.toString());
- }
- if (!status.isOK()) {
- return status;
- }
- std::vector<RoleName> indirectRoles;
- status = auth::parseRoleNamesFromBSONArray(
- BSONArray(roleToAddDoc["inheritedRoles"].Obj()),
- role.getDB(),
- &indirectRoles);
- if (!status.isOK()) {
- return status;
- }
+ return Status::OK();
+}
- if (sequenceContains(indirectRoles, role)) {
- return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream() << "Granting " <<
- roleToAdd.getFullName() << " to " << role.getFullName()
- << " would introduce a cycle in the role graph.");
- }
- }
+void appendBSONObjToBSONArrayBuilder(BSONArrayBuilder* array, const BSONObj& obj) {
+ array->append(obj);
+}
+
+/**
+ * Finds all documents matching "query" in "collectionName". For each document returned,
+ * calls the function resultProcessor on it.
+ * Should only be called on collections with authorization documents in them
+ * (ie admin.system.users and admin.system.roles).
+ */
+Status queryAuthzDocument(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& projection,
+ const stdx::function<void(const BSONObj&)>& resultProcessor) {
+ try {
+ DBDirectClient client(txn);
+ client.query(resultProcessor, collectionName.ns(), query, &projection);
return Status::OK();
+ } catch (const DBException& e) {
+ return e.toStatus();
}
+}
- /**
- * Checks that every privilege being granted targets just the database the role is from, or that
- * the role is from the "admin" db.
- */
- Status checkOkayToGrantPrivilegesToRole(const RoleName& role,
- const PrivilegeVector& privileges) {
- if (role.getDB() == "admin") {
+/**
+ * Inserts "document" into "collectionName".
+ * If there is a duplicate key error, returns a Status with code DuplicateKey.
+ *
+ * Should only be called on collections with authorization documents in them
+ * (ie admin.system.users and admin.system.roles).
+ */
+Status insertAuthzDocument(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& document,
+ const BSONObj& writeConcern) {
+ try {
+ DBDirectClient client(txn);
+ client.insert(collectionName, document);
+
+ // Handle write concern
+ BSONObjBuilder gleBuilder;
+ gleBuilder.append("getLastError", 1);
+ gleBuilder.appendElements(writeConcern);
+ BSONObj res;
+ client.runCommand("admin", gleBuilder.done(), res);
+ string errstr = client.getLastErrorString(res);
+ if (errstr.empty()) {
return Status::OK();
}
-
- for (PrivilegeVector::const_iterator it = privileges.begin();
- it != privileges.end(); ++it) {
- const ResourcePattern& resource = (*it).getResourcePattern();
- if ((resource.isDatabasePattern() || resource.isExactNamespacePattern()) &&
- (resource.databaseToMatch() == role.getDB())) {
- continue;
- }
-
- return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Roles on the \'" << role.getDB() <<
- "\' database cannot be granted privileges that target other "
- "databases or the cluster");
+ if (res.hasField("code") && res["code"].Int() == ASSERT_ID_DUPKEY) {
+ return Status(ErrorCodes::DuplicateKey, errstr);
}
-
- return Status::OK();
+ return Status(ErrorCodes::UnknownError, errstr);
+ } catch (const DBException& e) {
+ return e.toStatus();
}
+}
- void appendBSONObjToBSONArrayBuilder(BSONArrayBuilder* array, const BSONObj& obj) {
- array->append(obj);
+/**
+ * Updates documents matching "query" according to "updatePattern" in "collectionName".
+ *
+ * Should only be called on collections with authorization documents in them
+ * (ie admin.system.users and admin.system.roles).
+ */
+Status updateAuthzDocuments(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& updatePattern,
+ bool upsert,
+ bool multi,
+ const BSONObj& writeConcern,
+ int* nMatched) {
+ try {
+ DBDirectClient client(txn);
+ client.update(collectionName, query, updatePattern, upsert, multi);
+
+ // Handle write concern
+ BSONObjBuilder gleBuilder;
+ gleBuilder.append("getLastError", 1);
+ gleBuilder.appendElements(writeConcern);
+ BSONObj res;
+ client.runCommand("admin", gleBuilder.done(), res);
+ string errstr = client.getLastErrorString(res);
+ if (errstr.empty()) {
+ *nMatched = res["n"].numberInt();
+ return Status::OK();
+ }
+ return Status(ErrorCodes::UnknownError, errstr);
+ } catch (const DBException& e) {
+ return e.toStatus();
}
+}
- /**
- * Finds all documents matching "query" in "collectionName". For each document returned,
- * calls the function resultProcessor on it.
- * Should only be called on collections with authorization documents in them
- * (ie admin.system.users and admin.system.roles).
- */
- Status queryAuthzDocument(OperationContext* txn,
+/**
+ * Update one document matching "query" according to "updatePattern" in "collectionName".
+ *
+ * If "upsert" is true and no document matches "query", inserts one using "query" as a
+ * template.
+ * If "upsert" is false and no document matches "query", return a Status with the code
+ * NoMatchingDocument. The Status message in that case is not very descriptive and should
+ * not be displayed to the end user.
+ *
+ * Should only be called on collections with authorization documents in them
+ * (ie admin.system.users and admin.system.roles).
+ */
+Status updateOneAuthzDocument(OperationContext* txn,
const NamespaceString& collectionName,
const BSONObj& query,
- const BSONObj& projection,
- const stdx::function<void(const BSONObj&)>& resultProcessor) {
- try {
- DBDirectClient client(txn);
- client.query(resultProcessor, collectionName.ns(), query, &projection);
- return Status::OK();
- } catch (const DBException& e) {
- return e.toStatus();
- }
+ const BSONObj& updatePattern,
+ bool upsert,
+ const BSONObj& writeConcern) {
+ int nMatched;
+ Status status = updateAuthzDocuments(
+ txn, collectionName, query, updatePattern, upsert, false, writeConcern, &nMatched);
+ if (!status.isOK()) {
+ return status;
}
+ dassert(nMatched == 1 || nMatched == 0);
+ if (nMatched == 0) {
+ return Status(ErrorCodes::NoMatchingDocument, "No document found");
+ }
+ return Status::OK();
+}
- /**
- * Inserts "document" into "collectionName".
- * If there is a duplicate key error, returns a Status with code DuplicateKey.
- *
- * Should only be called on collections with authorization documents in them
- * (ie admin.system.users and admin.system.roles).
- */
- Status insertAuthzDocument(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& document,
- const BSONObj& writeConcern) {
- try {
- DBDirectClient client(txn);
- client.insert(collectionName, document);
-
- // Handle write concern
- BSONObjBuilder gleBuilder;
- gleBuilder.append("getLastError", 1);
- gleBuilder.appendElements(writeConcern);
- BSONObj res;
- client.runCommand("admin", gleBuilder.done(), res);
- string errstr = client.getLastErrorString(res);
- if (errstr.empty()) {
- return Status::OK();
- }
- if (res.hasField("code") && res["code"].Int() == ASSERT_ID_DUPKEY) {
- return Status(ErrorCodes::DuplicateKey, errstr);
- }
- return Status(ErrorCodes::UnknownError, errstr);
- } catch (const DBException& e) {
- return e.toStatus();
+/**
+ * Removes all documents matching "query" from "collectionName".
+ *
+ * Should only be called on collections with authorization documents in them
+ * (ie admin.system.users and admin.system.roles).
+ */
+Status removeAuthzDocuments(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& writeConcern,
+ int* numRemoved) {
+ try {
+ DBDirectClient client(txn);
+ client.remove(collectionName, query);
+
+ // Handle write concern
+ BSONObjBuilder gleBuilder;
+ gleBuilder.append("getLastError", 1);
+ gleBuilder.appendElements(writeConcern);
+ BSONObj res;
+ client.runCommand("admin", gleBuilder.done(), res);
+ string errstr = client.getLastErrorString(res);
+ if (errstr.empty()) {
+ *numRemoved = res["n"].numberInt();
+ return Status::OK();
}
+ return Status(ErrorCodes::UnknownError, errstr);
+ } catch (const DBException& e) {
+ return e.toStatus();
}
+}
- /**
- * Updates documents matching "query" according to "updatePattern" in "collectionName".
- *
- * Should only be called on collections with authorization documents in them
- * (ie admin.system.users and admin.system.roles).
- */
- Status updateAuthzDocuments(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& updatePattern,
- bool upsert,
- bool multi,
- const BSONObj& writeConcern,
- int* nMatched) {
- try {
- DBDirectClient client(txn);
- client.update(collectionName, query, updatePattern, upsert, multi);
-
- // Handle write concern
- BSONObjBuilder gleBuilder;
- gleBuilder.append("getLastError", 1);
- gleBuilder.appendElements(writeConcern);
- BSONObj res;
- client.runCommand("admin", gleBuilder.done(), res);
- string errstr = client.getLastErrorString(res);
- if (errstr.empty()) {
- *nMatched = res["n"].numberInt();
- return Status::OK();
- }
- return Status(ErrorCodes::UnknownError, errstr);
- } catch (const DBException& e) {
- return e.toStatus();
- }
+/**
+ * Creates the given role object in the given database.
+ * 'writeConcern' contains the arguments to be passed to getLastError to block for
+ * successful completion of the write.
+ */
+Status insertRoleDocument(OperationContext* txn,
+ const BSONObj& roleObj,
+ const BSONObj& writeConcern) {
+ Status status = insertAuthzDocument(
+ txn, AuthorizationManager::rolesCollectionNamespace, roleObj, writeConcern);
+ if (status.isOK()) {
+ return status;
}
+ if (status.code() == ErrorCodes::DuplicateKey) {
+ std::string name = roleObj[AuthorizationManager::ROLE_NAME_FIELD_NAME].String();
+ std::string source = roleObj[AuthorizationManager::ROLE_DB_FIELD_NAME].String();
+ return Status(ErrorCodes::DuplicateKey,
+ str::stream() << "Role \"" << name << "@" << source << "\" already exists");
+ }
+ if (status.code() == ErrorCodes::UnknownError) {
+ return Status(ErrorCodes::RoleModificationFailed, status.reason());
+ }
+ return status;
+}
- /**
- * Update one document matching "query" according to "updatePattern" in "collectionName".
- *
- * If "upsert" is true and no document matches "query", inserts one using "query" as a
- * template.
- * If "upsert" is false and no document matches "query", return a Status with the code
- * NoMatchingDocument. The Status message in that case is not very descriptive and should
- * not be displayed to the end user.
- *
- * Should only be called on collections with authorization documents in them
- * (ie admin.system.users and admin.system.roles).
- */
- Status updateOneAuthzDocument(
- OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& updatePattern,
- bool upsert,
- const BSONObj& writeConcern) {
- int nMatched;
- Status status = updateAuthzDocuments(txn,
- collectionName,
- query,
- updatePattern,
- upsert,
- false,
- writeConcern,
- &nMatched);
- if (!status.isOK()) {
- return status;
- }
- dassert(nMatched == 1 || nMatched == 0);
- if (nMatched == 0) {
- return Status(ErrorCodes::NoMatchingDocument, "No document found");
- }
- return Status::OK();
+/**
+ * Updates the given role object with the given update modifier.
+ * 'writeConcern' contains the arguments to be passed to getLastError to block for
+ * successful completion of the write.
+ */
+Status updateRoleDocument(OperationContext* txn,
+ const RoleName& role,
+ const BSONObj& updateObj,
+ const BSONObj& writeConcern) {
+ Status status = updateOneAuthzDocument(
+ txn,
+ AuthorizationManager::rolesCollectionNamespace,
+ BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()),
+ updateObj,
+ false,
+ writeConcern);
+ if (status.isOK()) {
+ return status;
+ }
+ if (status.code() == ErrorCodes::NoMatchingDocument) {
+ return Status(ErrorCodes::RoleNotFound,
+ str::stream() << "Role " << role.getFullName() << " not found");
}
+ if (status.code() == ErrorCodes::UnknownError) {
+ return Status(ErrorCodes::RoleModificationFailed, status.reason());
+ }
+ return status;
+}
- /**
- * Removes all documents matching "query" from "collectionName".
- *
- * Should only be called on collections with authorization documents in them
- * (ie admin.system.users and admin.system.roles).
- */
- Status removeAuthzDocuments(
- OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& writeConcern,
- int* numRemoved) {
- try {
- DBDirectClient client(txn);
- client.remove(collectionName, query);
-
- // Handle write concern
- BSONObjBuilder gleBuilder;
- gleBuilder.append("getLastError", 1);
- gleBuilder.appendElements(writeConcern);
- BSONObj res;
- client.runCommand("admin", gleBuilder.done(), res);
- string errstr = client.getLastErrorString(res);
- if (errstr.empty()) {
- *numRemoved = res["n"].numberInt();
- return Status::OK();
- }
- return Status(ErrorCodes::UnknownError, errstr);
- } catch (const DBException& e) {
- return e.toStatus();
- }
+/**
+ * Removes roles matching the given query.
+ * Writes into *numRemoved the number of role documents that were modified.
+ * 'writeConcern' contains the arguments to be passed to getLastError to block for
+ * successful completion of the write.
+ */
+Status removeRoleDocuments(OperationContext* txn,
+ const BSONObj& query,
+ const BSONObj& writeConcern,
+ int* numRemoved) {
+ Status status = removeAuthzDocuments(
+ txn, AuthorizationManager::rolesCollectionNamespace, query, writeConcern, numRemoved);
+ if (status.code() == ErrorCodes::UnknownError) {
+ return Status(ErrorCodes::RoleModificationFailed, status.reason());
}
+ return status;
+}
- /**
- * Creates the given role object in the given database.
- * 'writeConcern' contains the arguments to be passed to getLastError to block for
- * successful completion of the write.
- */
- Status insertRoleDocument(OperationContext* txn,
- const BSONObj& roleObj,
- const BSONObj& writeConcern) {
- Status status = insertAuthzDocument(txn,
- AuthorizationManager::rolesCollectionNamespace,
- roleObj,
- writeConcern);
- if (status.isOK()) {
- return status;
- }
- if (status.code() == ErrorCodes::DuplicateKey) {
- std::string name = roleObj[AuthorizationManager::ROLE_NAME_FIELD_NAME].String();
- std::string source = roleObj[AuthorizationManager::ROLE_DB_FIELD_NAME].String();
- return Status(ErrorCodes::DuplicateKey,
- str::stream() << "Role \"" << name << "@" << source
- << "\" already exists");
- }
- if (status.code() == ErrorCodes::UnknownError) {
- return Status(ErrorCodes::RoleModificationFailed, status.reason());
- }
+/**
+ * Creates the given user object in the given database.
+ * 'writeConcern' contains the arguments to be passed to getLastError to block for
+ * successful completion of the write.
+ */
+Status insertPrivilegeDocument(OperationContext* txn,
+ const BSONObj& userObj,
+ const BSONObj& writeConcern) {
+ Status status = insertAuthzDocument(
+ txn, AuthorizationManager::usersCollectionNamespace, userObj, writeConcern);
+ if (status.isOK()) {
return status;
}
+ if (status.code() == ErrorCodes::DuplicateKey) {
+ std::string name = userObj[AuthorizationManager::USER_NAME_FIELD_NAME].String();
+ std::string source = userObj[AuthorizationManager::USER_DB_FIELD_NAME].String();
+ return Status(ErrorCodes::DuplicateKey,
+ str::stream() << "User \"" << name << "@" << source << "\" already exists");
+ }
+ if (status.code() == ErrorCodes::UnknownError) {
+ return Status(ErrorCodes::UserModificationFailed, status.reason());
+ }
+ return status;
+}
- /**
- * Updates the given role object with the given update modifier.
- * 'writeConcern' contains the arguments to be passed to getLastError to block for
- * successful completion of the write.
- */
- Status updateRoleDocument(OperationContext* txn,
- const RoleName& role,
+/**
+ * Updates the given user object with the given update modifier.
+ * 'writeConcern' contains the arguments to be passed to getLastError to block for
+ * successful completion of the write.
+ */
+Status updatePrivilegeDocument(OperationContext* txn,
+ const UserName& user,
const BSONObj& updateObj,
const BSONObj& writeConcern) {
- Status status = updateOneAuthzDocument(
- txn,
- AuthorizationManager::rolesCollectionNamespace,
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << role.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()),
- updateObj,
- false,
- writeConcern);
- if (status.isOK()) {
- return status;
- }
- if (status.code() == ErrorCodes::NoMatchingDocument) {
- return Status(ErrorCodes::RoleNotFound,
- str::stream() << "Role " << role.getFullName()
- << " not found");
- }
- if (status.code() == ErrorCodes::UnknownError) {
- return Status(ErrorCodes::RoleModificationFailed, status.reason());
- }
+ Status status = updateOneAuthzDocument(
+ txn,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << user.getUser() << AuthorizationManager::USER_DB_FIELD_NAME << user.getDB()),
+ updateObj,
+ false,
+ writeConcern);
+ if (status.isOK()) {
return status;
}
-
- /**
- * Removes roles matching the given query.
- * Writes into *numRemoved the number of role documents that were modified.
- * 'writeConcern' contains the arguments to be passed to getLastError to block for
- * successful completion of the write.
- */
- Status removeRoleDocuments(OperationContext* txn,
- const BSONObj& query,
- const BSONObj& writeConcern,
- int* numRemoved) {
- Status status = removeAuthzDocuments(txn,
- AuthorizationManager::rolesCollectionNamespace,
- query,
- writeConcern,
- numRemoved);
- if (status.code() == ErrorCodes::UnknownError) {
- return Status(ErrorCodes::RoleModificationFailed, status.reason());
- }
- return status;
+ if (status.code() == ErrorCodes::NoMatchingDocument) {
+ return Status(ErrorCodes::UserNotFound,
+ str::stream() << "User " << user.getFullName() << " not found");
}
+ if (status.code() == ErrorCodes::UnknownError) {
+ return Status(ErrorCodes::UserModificationFailed, status.reason());
+ }
+ return status;
+}
- /**
- * Creates the given user object in the given database.
- * 'writeConcern' contains the arguments to be passed to getLastError to block for
- * successful completion of the write.
- */
- Status insertPrivilegeDocument(OperationContext* txn,
- const BSONObj& userObj,
- const BSONObj& writeConcern) {
- Status status = insertAuthzDocument(txn,
- AuthorizationManager::usersCollectionNamespace,
- userObj,
- writeConcern);
- if (status.isOK()) {
- return status;
- }
- if (status.code() == ErrorCodes::DuplicateKey) {
- std::string name = userObj[AuthorizationManager::USER_NAME_FIELD_NAME].String();
- std::string source = userObj[AuthorizationManager::USER_DB_FIELD_NAME].String();
- return Status(ErrorCodes::DuplicateKey,
- str::stream() << "User \"" << name << "@" << source
- << "\" already exists");
- }
- if (status.code() == ErrorCodes::UnknownError) {
- return Status(ErrorCodes::UserModificationFailed, status.reason());
- }
- return status;
+/**
+ * Removes users for the given database matching the given query.
+ * Writes into *numRemoved the number of user documents that were modified.
+ * 'writeConcern' contains the arguments to be passed to getLastError to block for
+ * successful completion of the write.
+ */
+Status removePrivilegeDocuments(OperationContext* txn,
+ const BSONObj& query,
+ const BSONObj& writeConcern,
+ int* numRemoved) {
+ Status status = removeAuthzDocuments(
+ txn, AuthorizationManager::usersCollectionNamespace, query, writeConcern, numRemoved);
+ if (status.code() == ErrorCodes::UnknownError) {
+ return Status(ErrorCodes::UserModificationFailed, status.reason());
}
+ return status;
+}
- /**
- * Updates the given user object with the given update modifier.
- * 'writeConcern' contains the arguments to be passed to getLastError to block for
- * successful completion of the write.
- */
- Status updatePrivilegeDocument(OperationContext* txn,
- const UserName& user,
- const BSONObj& updateObj,
- const BSONObj& writeConcern) {
- Status status = updateOneAuthzDocument(
- txn,
- AuthorizationManager::usersCollectionNamespace,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME << user.getUser() <<
- AuthorizationManager::USER_DB_FIELD_NAME << user.getDB()),
- updateObj,
- false,
- writeConcern);
- if (status.isOK()) {
- return status;
- }
- if (status.code() == ErrorCodes::NoMatchingDocument) {
- return Status(ErrorCodes::UserNotFound,
- str::stream() << "User " << user.getFullName()
- << " not found");
- }
- if (status.code() == ErrorCodes::UnknownError) {
- return Status(ErrorCodes::UserModificationFailed, status.reason());
- }
- return status;
+/**
+ * Updates the auth schema version document to reflect the current state of the system.
+ * 'foundSchemaVersion' is the authSchemaVersion to update with.
+ */
+Status writeAuthSchemaVersionIfNeeded(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ int foundSchemaVersion) {
+ Status status = updateOneAuthzDocument(
+ txn,
+ AuthorizationManager::versionCollectionNamespace,
+ AuthorizationManager::versionDocumentQuery,
+ BSON("$set" << BSON(AuthorizationManager::schemaVersionFieldName << foundSchemaVersion)),
+ true, // upsert
+ BSONObj()); // write concern
+ if (status == ErrorCodes::NoMatchingDocument) { // SERVER-11492
+ status = Status::OK();
}
- /**
- * Removes users for the given database matching the given query.
- * Writes into *numRemoved the number of user documents that were modified.
- * 'writeConcern' contains the arguments to be passed to getLastError to block for
- * successful completion of the write.
- */
- Status removePrivilegeDocuments(OperationContext* txn,
- const BSONObj& query,
- const BSONObj& writeConcern,
- int* numRemoved) {
- Status status = removeAuthzDocuments(txn,
- AuthorizationManager::usersCollectionNamespace,
- query,
- writeConcern,
- numRemoved);
- if (status.code() == ErrorCodes::UnknownError) {
- return Status(ErrorCodes::UserModificationFailed, status.reason());
- }
+ return status;
+}
+
+/**
+ * Returns Status::OK() if the current Auth schema version is at least the auth schema version
+ * for the MongoDB 2.6 and 3.0 MongoDB-CR/SCRAM mixed auth mode.
+ * Returns an error otherwise.
+ */
+Status requireAuthSchemaVersion26Final(OperationContext* txn, AuthorizationManager* authzManager) {
+ int foundSchemaVersion;
+ Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
+ if (!status.isOK()) {
return status;
}
- /**
- * Updates the auth schema version document to reflect the current state of the system.
- * 'foundSchemaVersion' is the authSchemaVersion to update with.
- */
- Status writeAuthSchemaVersionIfNeeded(OperationContext* txn,
- AuthorizationManager* authzManager,
- int foundSchemaVersion) {
- Status status = updateOneAuthzDocument(txn,
- AuthorizationManager::versionCollectionNamespace,
- AuthorizationManager::versionDocumentQuery,
- BSON("$set" << BSON(AuthorizationManager::schemaVersionFieldName
- << foundSchemaVersion)),
- true, // upsert
- BSONObj()); // write concern
- if (status == ErrorCodes::NoMatchingDocument) { // SERVER-11492
- status = Status::OK();
- }
+ if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) {
+ return Status(ErrorCodes::AuthSchemaIncompatible,
+ str::stream()
+ << "User and role management commands require auth data to have "
+ << "at least schema version "
+ << AuthorizationManager::schemaVersion26Final << " but found "
+ << foundSchemaVersion);
+ }
+ return writeAuthSchemaVersionIfNeeded(txn, authzManager, foundSchemaVersion);
+}
+/**
+ * Returns Status::OK() if the current Auth schema version is at least the auth schema version
+ * for MongoDB 2.6 during the upgrade process.
+ * Returns an error otherwise.
+ */
+Status requireAuthSchemaVersion26UpgradeOrFinal(OperationContext* txn,
+ AuthorizationManager* authzManager) {
+ int foundSchemaVersion;
+ Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
+ if (!status.isOK()) {
return status;
}
- /**
- * Returns Status::OK() if the current Auth schema version is at least the auth schema version
- * for the MongoDB 2.6 and 3.0 MongoDB-CR/SCRAM mixed auth mode.
- * Returns an error otherwise.
- */
- Status requireAuthSchemaVersion26Final(OperationContext* txn,
- AuthorizationManager* authzManager) {
- int foundSchemaVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
- if (!status.isOK()) {
- return status;
- }
-
- if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) {
- return Status(
- ErrorCodes::AuthSchemaIncompatible,
- str::stream() << "User and role management commands require auth data to have "
- << "at least schema version "
- << AuthorizationManager::schemaVersion26Final
- << " but found " << foundSchemaVersion);
- }
- return writeAuthSchemaVersionIfNeeded(txn, authzManager, foundSchemaVersion);
+ if (foundSchemaVersion < AuthorizationManager::schemaVersion26Upgrade) {
+ return Status(ErrorCodes::AuthSchemaIncompatible,
+ str::stream() << "The usersInfo and rolesInfo commands require auth data to "
+ << "have at least schema version "
+ << AuthorizationManager::schemaVersion26Upgrade << " but found "
+ << foundSchemaVersion);
}
+ return Status::OK();
+}
- /**
- * Returns Status::OK() if the current Auth schema version is at least the auth schema version
- * for MongoDB 2.6 during the upgrade process.
- * Returns an error otherwise.
- */
- Status requireAuthSchemaVersion26UpgradeOrFinal(OperationContext* txn,
- AuthorizationManager* authzManager) {
- int foundSchemaVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
- if (!status.isOK()) {
- return status;
- }
+} // namespace
- if (foundSchemaVersion < AuthorizationManager::schemaVersion26Upgrade) {
- return Status(
- ErrorCodes::AuthSchemaIncompatible,
- str::stream() << "The usersInfo and rolesInfo commands require auth data to "
- << "have at least schema version "
- << AuthorizationManager::schemaVersion26Upgrade
- << " but found " << foundSchemaVersion);
- }
- return Status::OK();
- }
-} // namespace
+class CmdCreateUser : public Command {
+public:
+ CmdCreateUser() : Command("createUser") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- class CmdCreateUser : public Command {
- public:
+ virtual void help(stringstream& ss) const {
+ ss << "Adds a user to the system" << endl;
+ }
- CmdCreateUser() : Command("createUser") {}
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForCreateUserCommand(client, dbname, cmdObj);
+ }
- virtual bool slaveOk() const {
- return false;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::CreateOrUpdateUserArgs args;
+ Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "createUser", dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ if (args.userName.getDB() == "local") {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::BadValue, "Cannot create users in the local database"));
+ }
- virtual void help(stringstream& ss) const {
- ss << "Adds a user to the system" << endl;
+ if (!args.hasHashedPassword && args.userName.getDB() != "$external") {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Must provide a 'pwd' field for all user documents, except those"
+ " with '$external' as the user's source db"));
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForCreateUserCommand(client, dbname, cmdObj);
+ if ((args.hasHashedPassword) && args.userName.getDB() == "$external") {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Cannot set the password for users defined on the '$external' "
+ "database"));
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- auth::CreateOrUpdateUserArgs args;
- Status status = auth::parseCreateOrUpdateUserCommands(cmdObj,
- "createUser",
- dbname,
- &args);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ if (!args.hasRoles) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue, "\"createUser\" command requires a \"roles\" array"));
+ }
- if (args.userName.getDB() == "local") {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue, "Cannot create users in the local database"));
- }
+#ifdef MONGO_CONFIG_SSL
+ if (args.userName.getDB() == "$external" && getSSLManager() &&
+ getSSLManager()->getSSLConfiguration().serverSubjectName == args.userName.getUser()) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::BadValue,
+ "Cannot create an x.509 user with the same "
+ "subjectname as the server"));
+ }
+#endif
- if (!args.hasHashedPassword && args.userName.getDB() != "$external") {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Must provide a 'pwd' field for all user documents, except those"
- " with '$external' as the user's source db"));
- }
+ BSONObjBuilder userObjBuilder;
+ userObjBuilder.append(
+ "_id", str::stream() << args.userName.getDB() << "." << args.userName.getUser());
+ userObjBuilder.append(AuthorizationManager::USER_NAME_FIELD_NAME, args.userName.getUser());
+ userObjBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME, args.userName.getDB());
- if ((args.hasHashedPassword) &&
- args.userName.getDB() == "$external") {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Cannot set the password for users defined on the '$external' "
- "database"));
- }
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ int authzVersion;
+ status = authzManager->getAuthorizationVersion(txn, &authzVersion);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- if (!args.hasRoles) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "\"createUser\" command requires a \"roles\" array"));
+ BSONObjBuilder credentialsBuilder(userObjBuilder.subobjStart("credentials"));
+ if (!args.hasHashedPassword) {
+ // Must be an external user
+ credentialsBuilder.append("external", true);
+ } else {
+ // Add SCRAM credentials for appropriate authSchemaVersions.
+ if (authzVersion > AuthorizationManager::schemaVersion26Final) {
+ BSONObj scramCred = scram::generateCredentials(
+ args.hashedPassword, saslGlobalParams.scramIterationCount);
+ credentialsBuilder.append("SCRAM-SHA-1", scramCred);
+ } else { // Otherwise default to MONGODB-CR.
+ credentialsBuilder.append("MONGODB-CR", args.hashedPassword);
}
+ }
+ credentialsBuilder.done();
-#ifdef MONGO_CONFIG_SSL
- if (args.userName.getDB() == "$external" &&
- getSSLManager() &&
- getSSLManager()->getSSLConfiguration()
- .serverSubjectName == args.userName.getUser()) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Cannot create an x.509 user with the same "
- "subjectname as the server"));
- }
-#endif
+ if (args.hasCustomData) {
+ userObjBuilder.append("customData", args.customData);
+ }
+ userObjBuilder.append("roles", rolesVectorToBSONArray(args.roles));
- BSONObjBuilder userObjBuilder;
- userObjBuilder.append("_id",
- str::stream() << args.userName.getDB() << "." <<
- args.userName.getUser());
- userObjBuilder.append(AuthorizationManager::USER_NAME_FIELD_NAME,
- args.userName.getUser());
- userObjBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME,
- args.userName.getDB());
-
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- int authzVersion;
- status = authzManager->getAuthorizationVersion(txn, &authzVersion);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ BSONObj userObj = userObjBuilder.obj();
+ V2UserDocumentParser parser;
+ status = parser.checkValidUserDocument(userObj);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- BSONObjBuilder credentialsBuilder(userObjBuilder.subobjStart("credentials"));
- if (!args.hasHashedPassword) {
- // Must be an external user
- credentialsBuilder.append("external", true);
- }
- else {
- // Add SCRAM credentials for appropriate authSchemaVersions.
- if (authzVersion > AuthorizationManager::schemaVersion26Final) {
- BSONObj scramCred = scram::generateCredentials(
- args.hashedPassword,
- saslGlobalParams.scramIterationCount);
- credentialsBuilder.append("SCRAM-SHA-1", scramCred);
- }
- else { // Otherwise default to MONGODB-CR.
- credentialsBuilder.append("MONGODB-CR", args.hashedPassword);
- }
- }
- credentialsBuilder.done();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
- if (args.hasCustomData) {
- userObjBuilder.append("customData", args.customData);
- }
- userObjBuilder.append("roles", rolesVectorToBSONArray(args.roles));
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- BSONObj userObj = userObjBuilder.obj();
- V2UserDocumentParser parser;
- status = parser.checkValidUserDocument(userObj);
+ // Role existence has to be checked after acquiring the update lock
+ for (size_t i = 0; i < args.roles.size(); ++i) {
+ BSONObj ignored;
+ status = authzManager->getRoleDescription(args.roles[i], false, &ignored);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ }
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ audit::logCreateUser(ClientBasic::getCurrent(),
+ args.userName,
+ args.hasHashedPassword,
+ args.hasCustomData ? &args.customData : NULL,
+ args.roles);
+ status = insertPrivilegeDocument(txn, userObj, args.writeConcern);
+ return appendCommandStatus(result, status);
+ }
- status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual void redactForLogging(mutablebson::Document* cmdObj) {
+ auth::redactPasswordData(cmdObj->root());
+ }
- // Role existence has to be checked after acquiring the update lock
- for (size_t i = 0; i < args.roles.size(); ++i) {
- BSONObj ignored;
- status = authzManager->getRoleDescription(args.roles[i], false, &ignored);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- }
+} cmdCreateUser;
- audit::logCreateUser(ClientBasic::getCurrent(),
- args.userName,
- args.hasHashedPassword,
- args.hasCustomData? &args.customData : NULL,
- args.roles);
- status = insertPrivilegeDocument(txn,
- userObj,
- args.writeConcern);
- return appendCommandStatus(result, status);
- }
+class CmdUpdateUser : public Command {
+public:
+ CmdUpdateUser() : Command("updateUser") {}
- virtual void redactForLogging(mutablebson::Document* cmdObj) {
- auth::redactPasswordData(cmdObj->root());
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- } cmdCreateUser;
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- class CmdUpdateUser : public Command {
- public:
+ virtual void help(stringstream& ss) const {
+ ss << "Used to update a user, for example to change its password" << endl;
+ }
- CmdUpdateUser() : Command("updateUser") {}
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForUpdateUserCommand(client, dbname, cmdObj);
+ }
- virtual bool slaveOk() const {
- return false;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::CreateOrUpdateUserArgs args;
+ Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "updateUser", dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
-
- virtual void help(stringstream& ss) const {
- ss << "Used to update a user, for example to change its password" << endl;
+ if (!args.hasHashedPassword && !args.hasCustomData && !args.hasRoles) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Must specify at least one field to update in updateUser"));
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForUpdateUserCommand(client, dbname, cmdObj);
+ if (args.hasHashedPassword && args.userName.getDB() == "$external") {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Cannot set the password for users defined on the '$external' "
+ "database"));
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- auth::CreateOrUpdateUserArgs args;
- Status status = auth::parseCreateOrUpdateUserCommands(cmdObj,
- "updateUser",
- dbname,
- &args);
+ BSONObjBuilder updateSetBuilder;
+ if (args.hasHashedPassword) {
+ BSONObjBuilder credentialsBuilder(updateSetBuilder.subobjStart("credentials"));
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ int authzVersion;
+ Status status = authzManager->getAuthorizationVersion(txn, &authzVersion);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- if (!args.hasHashedPassword && !args.hasCustomData && !args.hasRoles) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Must specify at least one field to update in updateUser"));
+ // Add SCRAM credentials for appropriate authSchemaVersions
+ if (authzVersion > AuthorizationManager::schemaVersion26Final) {
+ BSONObj scramCred = scram::generateCredentials(
+ args.hashedPassword, saslGlobalParams.scramIterationCount);
+ credentialsBuilder.append("SCRAM-SHA-1", scramCred);
+ } else { // Otherwise default to MONGODB-CR
+ credentialsBuilder.append("MONGODB-CR", args.hashedPassword);
}
+ credentialsBuilder.done();
+ }
+ if (args.hasCustomData) {
+ updateSetBuilder.append("customData", args.customData);
+ }
+ if (args.hasRoles) {
+ updateSetBuilder.append("roles", rolesVectorToBSONArray(args.roles));
+ }
- if (args.hasHashedPassword && args.userName.getDB() == "$external") {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Cannot set the password for users defined on the '$external' "
- "database"));
- }
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
- BSONObjBuilder updateSetBuilder;
- if (args.hasHashedPassword) {
- BSONObjBuilder credentialsBuilder(updateSetBuilder.subobjStart("credentials"));
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- int authzVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &authzVersion);
+
+ // Role existence has to be checked after acquiring the update lock
+ if (args.hasRoles) {
+ for (size_t i = 0; i < args.roles.size(); ++i) {
+ BSONObj ignored;
+ status = authzManager->getRoleDescription(args.roles[i], false, &ignored);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
-
- // Add SCRAM credentials for appropriate authSchemaVersions
- if (authzVersion > AuthorizationManager::schemaVersion26Final) {
- BSONObj scramCred = scram::generateCredentials(
- args.hashedPassword,
- saslGlobalParams.scramIterationCount);
- credentialsBuilder.append("SCRAM-SHA-1",scramCred);
- }
- else { // Otherwise default to MONGODB-CR
- credentialsBuilder.append("MONGODB-CR", args.hashedPassword);
- }
- credentialsBuilder.done();
- }
- if (args.hasCustomData) {
- updateSetBuilder.append("customData", args.customData);
- }
- if (args.hasRoles) {
- updateSetBuilder.append("roles", rolesVectorToBSONArray(args.roles));
}
+ }
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ audit::logUpdateUser(ClientBasic::getCurrent(),
+ args.userName,
+ args.hasHashedPassword,
+ args.hasCustomData ? &args.customData : NULL,
+ args.hasRoles ? &args.roles : NULL);
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ status = updatePrivilegeDocument(
+ txn, args.userName, BSON("$set" << updateSetBuilder.done()), args.writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserByName(args.userName);
+ return appendCommandStatus(result, status);
+ }
+ virtual void redactForLogging(mutablebson::Document* cmdObj) {
+ auth::redactPasswordData(cmdObj->root());
+ }
- // Role existence has to be checked after acquiring the update lock
- if (args.hasRoles) {
- for (size_t i = 0; i < args.roles.size(); ++i) {
- BSONObj ignored;
- status = authzManager->getRoleDescription(args.roles[i], false, &ignored);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- }
- }
+} cmdUpdateUser;
- audit::logUpdateUser(ClientBasic::getCurrent(),
- args.userName,
- args.hasHashedPassword,
- args.hasCustomData? &args.customData : NULL,
- args.hasRoles? &args.roles : NULL);
-
- status = updatePrivilegeDocument(txn,
- args.userName,
- BSON("$set" << updateSetBuilder.done()),
- args.writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserByName(args.userName);
- return appendCommandStatus(result, status);
- }
+class CmdDropUser : public Command {
+public:
+ CmdDropUser() : Command("dropUser") {}
- virtual void redactForLogging(mutablebson::Document* cmdObj) {
- auth::redactPasswordData(cmdObj->root());
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- } cmdUpdateUser;
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- class CmdDropUser : public Command {
- public:
+ virtual void help(stringstream& ss) const {
+ ss << "Drops a single user." << endl;
+ }
- CmdDropUser() : Command("dropUser") {}
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForDropUserCommand(client, dbname, cmdObj);
+ }
- virtual bool slaveOk() const {
- return false;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual void help(stringstream& ss) const {
- ss << "Drops a single user." << endl;
+ UserName userName;
+ BSONObj writeConcern;
+ status = auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForDropUserCommand(client, dbname, cmdObj);
+ int nMatched;
+
+ audit::logDropUser(ClientBasic::getCurrent(), userName);
+
+ status = removePrivilegeDocuments(txn,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << userName.getUser()
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getDB()),
+ writeConcern,
+ &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserByName(userName);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ if (nMatched == 0) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::UserNotFound,
+ str::stream() << "User '" << userName.getFullName() << "' not found"));
+ }
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ return true;
+ }
+} cmdDropUser;
- UserName userName;
- BSONObj writeConcern;
- status = auth::parseAndValidateDropUserCommand(cmdObj,
- dbname,
- &userName,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdDropAllUsersFromDatabase : public Command {
+public:
+ CmdDropAllUsersFromDatabase() : Command("dropAllUsersFromDatabase") {}
- int nMatched;
+ virtual bool slaveOk() const {
+ return false;
+ }
- audit::logDropUser(ClientBasic::getCurrent(), userName);
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- status = removePrivilegeDocuments(
- txn,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME << userName.getUser() <<
- AuthorizationManager::USER_DB_FIELD_NAME << userName.getDB()),
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserByName(userName);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Drops all users for a single database." << endl;
+ }
- if (nMatched == 0) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::UserNotFound,
- str::stream() << "User '" << userName.getFullName() <<
- "' not found"));
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForDropAllUsersFromDatabaseCommand(client, dbname);
+ }
- return true;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- } cmdDropUser;
+ BSONObj writeConcern;
+ status =
+ auth::parseAndValidateDropAllUsersFromDatabaseCommand(cmdObj, dbname, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- class CmdDropAllUsersFromDatabase : public Command {
- public:
+ int numRemoved;
- CmdDropAllUsersFromDatabase() : Command("dropAllUsersFromDatabase") {}
+ audit::logDropAllUsersFromDatabase(ClientBasic::getCurrent(), dbname);
- virtual bool slaveOk() const {
- return false;
+ status = removePrivilegeDocuments(txn,
+ BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname),
+ writeConcern,
+ &numRemoved);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUsersFromDB(dbname);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ result.append("n", numRemoved);
+ return true;
+ }
- virtual void help(stringstream& ss) const {
- ss << "Drops all users for a single database." << endl;
- }
+} cmdDropAllUsersFromDatabase;
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForDropAllUsersFromDatabaseCommand(client, dbname);
- }
+class CmdGrantRolesToUser : public Command {
+public:
+ CmdGrantRolesToUser() : Command("grantRolesToUser") {}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- BSONObj writeConcern;
- status = auth::parseAndValidateDropAllUsersFromDatabaseCommand(cmdObj,
- dbname,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Grants roles to a user." << endl;
+ }
- int numRemoved;
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForGrantRolesToUserCommand(client, dbname, cmdObj);
+ }
- audit::logDropAllUsersFromDatabase(ClientBasic::getCurrent(), dbname);
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
+ std::string userNameString;
+ std::vector<RoleName> roles;
+ BSONObj writeConcern;
+ status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "grantRolesToUser", dbname, &userNameString, &roles, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- status = removePrivilegeDocuments(
- txn,
- BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname),
- writeConcern,
- &numRemoved);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUsersFromDB(dbname);
+ UserName userName(userNameString, dbname);
+ unordered_set<RoleName> userRoles;
+ status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
+ for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
+ RoleName& roleName = *it;
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, false, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- result.append("n", numRemoved);
- return true;
+ userRoles.insert(roleName);
}
- } cmdDropAllUsersFromDatabase;
+ audit::logGrantRolesToUser(ClientBasic::getCurrent(), userName, roles);
+ BSONArray newRolesBSONArray = roleSetToBSONArray(userRoles);
+ status = updatePrivilegeDocument(
+ txn, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)), writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserByName(userName);
+ return appendCommandStatus(result, status);
+ }
- class CmdGrantRolesToUser: public Command {
- public:
+} cmdGrantRolesToUser;
- CmdGrantRolesToUser() : Command("grantRolesToUser") {}
+class CmdRevokeRolesFromUser : public Command {
+public:
+ CmdRevokeRolesFromUser() : Command("revokeRolesFromUser") {}
- virtual bool slaveOk() const {
- return false;
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Revokes roles from a user." << endl;
+ }
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForRevokeRolesFromUserCommand(client, dbname, cmdObj);
+ }
- virtual void help(stringstream& ss) const {
- ss << "Grants roles to a user." << endl;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForGrantRolesToUserCommand(client, dbname, cmdObj);
+ std::string userNameString;
+ std::vector<RoleName> roles;
+ BSONObj writeConcern;
+ status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "revokeRolesFromUser", dbname, &userNameString, &roles, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ UserName userName(userNameString, dbname);
+ unordered_set<RoleName> userRoles;
+ status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
+ RoleName& roleName = *it;
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, false, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- std::string userNameString;
- std::vector<RoleName> roles;
- BSONObj writeConcern;
- status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "grantRolesToUser",
- dbname,
- &userNameString,
- &roles,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ userRoles.erase(roleName);
+ }
- UserName userName(userNameString, dbname);
- unordered_set<RoleName> userRoles;
- status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ audit::logRevokeRolesFromUser(ClientBasic::getCurrent(), userName, roles);
+ BSONArray newRolesBSONArray = roleSetToBSONArray(userRoles);
+ status = updatePrivilegeDocument(
+ txn, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)), writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserByName(userName);
+ return appendCommandStatus(result, status);
+ }
- for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
- RoleName& roleName = *it;
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, false, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+} cmdRevokeRolesFromUser;
- userRoles.insert(roleName);
- }
+class CmdUsersInfo : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
- audit::logGrantRolesToUser(ClientBasic::getCurrent(),
- userName,
- roles);
- BSONArray newRolesBSONArray = roleSetToBSONArray(userRoles);
- status = updatePrivilegeDocument(
- txn, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)), writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserByName(userName);
- return appendCommandStatus(result, status);
- }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
- } cmdGrantRolesToUser;
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- class CmdRevokeRolesFromUser: public Command {
- public:
+ CmdUsersInfo() : Command("usersInfo") {}
- CmdRevokeRolesFromUser() : Command("revokeRolesFromUser") {}
+ virtual void help(stringstream& ss) const {
+ ss << "Returns information about users." << endl;
+ }
- virtual bool slaveOk() const {
- return false;
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForUsersInfoCommand(client, dbname, cmdObj);
+ }
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::UsersInfoArgs args;
+ Status status = auth::parseUsersInfoCommand(cmdObj, dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- virtual void help(stringstream& ss) const {
- ss << "Revokes roles from a user." << endl;
+ status = requireAuthSchemaVersion26UpgradeOrFinal(txn, getGlobalAuthorizationManager());
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForRevokeRolesFromUserCommand(client, dbname, cmdObj);
+ if (args.allForDB && args.showPrivileges) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation,
+ "Can only get privilege details on exact-match usersInfo "
+ "queries."));
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ BSONArrayBuilder usersArrayBuilder;
+ if (args.showPrivileges) {
+ // If you want privileges you need to call getUserDescription on each user.
+ for (size_t i = 0; i < args.userNames.size(); ++i) {
+ BSONObj userDetails;
+ status = getGlobalAuthorizationManager()->getUserDescription(
+ txn, args.userNames[i], &userDetails);
+ if (status.code() == ErrorCodes::UserNotFound) {
+ continue;
+ }
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ if (!args.showCredentials) {
+ // getUserDescription always includes credentials, need to strip it out
+ BSONObjBuilder userWithoutCredentials(usersArrayBuilder.subobjStart());
+ for (BSONObjIterator it(userDetails); it.more();) {
+ BSONElement e = it.next();
+ if (e.fieldNameStringData() != "credentials")
+ userWithoutCredentials.append(e);
+ }
+ userWithoutCredentials.doneFast();
+ } else {
+ usersArrayBuilder.append(userDetails);
+ }
}
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ } else {
+ // If you don't need privileges, you can just do a regular query on system.users
+ BSONObjBuilder queryBuilder;
+ if (args.allForDB) {
+ queryBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME, dbname);
+ } else {
+ BSONArrayBuilder usersMatchArray;
+ for (size_t i = 0; i < args.userNames.size(); ++i) {
+ usersMatchArray.append(BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << args.userNames[i].getUser()
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << args.userNames[i].getDB()));
+ }
+ queryBuilder.append("$or", usersMatchArray.arr());
}
- std::string userNameString;
- std::vector<RoleName> roles;
- BSONObj writeConcern;
- status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "revokeRolesFromUser",
- dbname,
- &userNameString,
- &roles,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ BSONObjBuilder projection;
+ if (!args.showCredentials) {
+ projection.append("credentials", 0);
}
+ const stdx::function<void(const BSONObj&)> function = stdx::bind(
+ appendBSONObjToBSONArrayBuilder, &usersArrayBuilder, stdx::placeholders::_1);
+ queryAuthzDocument(txn,
+ AuthorizationManager::usersCollectionNamespace,
+ queryBuilder.done(),
+ projection.done(),
+ function);
+ }
+ result.append("users", usersArrayBuilder.arr());
+ return true;
+ }
- UserName userName(userNameString, dbname);
- unordered_set<RoleName> userRoles;
- status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+} cmdUsersInfo;
- for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
- RoleName& roleName = *it;
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, false, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdCreateRole : public Command {
+public:
+ CmdCreateRole() : Command("createRole") {}
- userRoles.erase(roleName);
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- audit::logRevokeRolesFromUser(ClientBasic::getCurrent(),
- userName,
- roles);
- BSONArray newRolesBSONArray = roleSetToBSONArray(userRoles);
- status = updatePrivilegeDocument(
- txn, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)), writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserByName(userName);
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- } cmdRevokeRolesFromUser;
+ virtual void help(stringstream& ss) const {
+ ss << "Adds a role to the system" << endl;
+ }
- class CmdUsersInfo: public Command {
- public:
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForCreateRoleCommand(client, dbname, cmdObj);
+ }
- virtual bool slaveOk() const {
- return false;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::CreateOrUpdateRoleArgs args;
+ Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "createRole", dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool slaveOverrideOk() const {
- return true;
+ if (args.roleName.getRole().empty()) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::BadValue, "Role name must be non-empty"));
}
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ if (args.roleName.getDB() == "local") {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::BadValue, "Cannot create roles in the local database"));
+ }
- CmdUsersInfo() : Command("usersInfo") {}
+ if (args.roleName.getDB() == "$external") {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue, "Cannot create roles in the $external database"));
+ }
- virtual void help(stringstream& ss) const {
- ss << "Returns information about users." << endl;
+ if (!args.hasRoles) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue, "\"createRole\" command requires a \"roles\" array"));
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForUsersInfoCommand(client, dbname, cmdObj);
+ if (!args.hasPrivileges) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "\"createRole\" command requires a \"privileges\" array"));
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
+ BSONObjBuilder roleObjBuilder;
- auth::UsersInfoArgs args;
- Status status = auth::parseUsersInfoCommand(cmdObj, dbname, &args);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ roleObjBuilder.append(
+ "_id", str::stream() << args.roleName.getDB() << "." << args.roleName.getRole());
+ roleObjBuilder.append(AuthorizationManager::ROLE_NAME_FIELD_NAME, args.roleName.getRole());
+ roleObjBuilder.append(AuthorizationManager::ROLE_DB_FIELD_NAME, args.roleName.getDB());
- status = requireAuthSchemaVersion26UpgradeOrFinal(txn,
- getGlobalAuthorizationManager());
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- if (args.allForDB && args.showPrivileges) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- "Can only get privilege details on exact-match usersInfo "
- "queries."));
- }
-
- BSONArrayBuilder usersArrayBuilder;
- if (args.showPrivileges) {
- // If you want privileges you need to call getUserDescription on each user.
- for (size_t i = 0; i < args.userNames.size(); ++i) {
- BSONObj userDetails;
- status = getGlobalAuthorizationManager()->getUserDescription(
- txn, args.userNames[i], &userDetails);
- if (status.code() == ErrorCodes::UserNotFound) {
- continue;
- }
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- if (!args.showCredentials) {
- // getUserDescription always includes credentials, need to strip it out
- BSONObjBuilder userWithoutCredentials(usersArrayBuilder.subobjStart());
- for (BSONObjIterator it(userDetails); it.more(); ) {
- BSONElement e = it.next();
- if (e.fieldNameStringData() != "credentials")
- userWithoutCredentials.append(e);
- }
- userWithoutCredentials.doneFast();
- } else {
- usersArrayBuilder.append(userDetails);
- }
- }
- } else {
- // If you don't need privileges, you can just do a regular query on system.users
- BSONObjBuilder queryBuilder;
- if (args.allForDB) {
- queryBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME, dbname);
- } else {
- BSONArrayBuilder usersMatchArray;
- for (size_t i = 0; i < args.userNames.size(); ++i) {
- usersMatchArray.append(BSON(AuthorizationManager::USER_NAME_FIELD_NAME <<
- args.userNames[i].getUser() <<
- AuthorizationManager::USER_DB_FIELD_NAME <<
- args.userNames[i].getDB()));
- }
- queryBuilder.append("$or", usersMatchArray.arr());
+ BSONArray privileges;
+ status = privilegeVectorToBSONArray(args.privileges, &privileges);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ roleObjBuilder.append("privileges", privileges);
- }
+ roleObjBuilder.append("roles", rolesVectorToBSONArray(args.roles));
- BSONObjBuilder projection;
- if (!args.showCredentials) {
- projection.append("credentials", 0);
- }
- const stdx::function<void(const BSONObj&)> function = stdx::bind(
- appendBSONObjToBSONArrayBuilder,
- &usersArrayBuilder,
- stdx::placeholders::_1);
- queryAuthzDocument(txn,
- AuthorizationManager::usersCollectionNamespace,
- queryBuilder.done(),
- projection.done(),
- function);
- }
- result.append("users", usersArrayBuilder.arr());
- return true;
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- } cmdUsersInfo;
-
- class CmdCreateRole: public Command {
- public:
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- CmdCreateRole() : Command("createRole") {}
+ // Role existence has to be checked after acquiring the update lock
+ status = checkOkayToGrantRolesToRole(args.roleName, args.roles, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- virtual bool slaveOk() const {
- return false;
+ status = checkOkayToGrantPrivilegesToRole(args.roleName, args.privileges);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ audit::logCreateRole(ClientBasic::getCurrent(), args.roleName, args.roles, args.privileges);
- virtual void help(stringstream& ss) const {
- ss << "Adds a role to the system" << endl;
- }
+ status = insertRoleDocument(txn, roleObjBuilder.done(), args.writeConcern);
+ return appendCommandStatus(result, status);
+ }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForCreateRoleCommand(client, dbname, cmdObj);
- }
+} cmdCreateRole;
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- auth::CreateOrUpdateRoleArgs args;
- Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj,
- "createRole",
- dbname,
- &args);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdUpdateRole : public Command {
+public:
+ CmdUpdateRole() : Command("updateRole") {}
- if (args.roleName.getRole().empty()) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue, "Role name must be non-empty"));
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- if (args.roleName.getDB() == "local") {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue, "Cannot create roles in the local database"));
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- if (args.roleName.getDB() == "$external") {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Cannot create roles in the $external database"));
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Used to update a role" << endl;
+ }
- if (!args.hasRoles) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "\"createRole\" command requires a \"roles\" array"));
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForUpdateRoleCommand(client, dbname, cmdObj);
+ }
- if (!args.hasPrivileges) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "\"createRole\" command requires a \"privileges\" array"));
- }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::CreateOrUpdateRoleArgs args;
+ Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "updateRole", dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- BSONObjBuilder roleObjBuilder;
+ if (!args.hasPrivileges && !args.hasRoles) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Must specify at least one field to update in updateRole"));
+ }
- roleObjBuilder.append("_id", str::stream() << args.roleName.getDB() << "." <<
- args.roleName.getRole());
- roleObjBuilder.append(AuthorizationManager::ROLE_NAME_FIELD_NAME,
- args.roleName.getRole());
- roleObjBuilder.append(AuthorizationManager::ROLE_DB_FIELD_NAME,
- args.roleName.getDB());
+ BSONObjBuilder updateSetBuilder;
+ if (args.hasPrivileges) {
BSONArray privileges;
status = privilegeVectorToBSONArray(args.privileges, &privileges);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- roleObjBuilder.append("privileges", privileges);
+ updateSetBuilder.append("privileges", privileges);
+ }
- roleObjBuilder.append("roles", rolesVectorToBSONArray(args.roles));
+ if (args.hasRoles) {
+ updateSetBuilder.append("roles", rolesVectorToBSONArray(args.roles));
+ }
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
+ // Role existence has to be checked after acquiring the update lock
+ BSONObj ignored;
+ status = authzManager->getRoleDescription(args.roleName, false, &ignored);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- // Role existence has to be checked after acquiring the update lock
+ if (args.hasRoles) {
status = checkOkayToGrantRolesToRole(args.roleName, args.roles, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ }
+ if (args.hasPrivileges) {
status = checkOkayToGrantPrivilegesToRole(args.roleName, args.privileges);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ }
- audit::logCreateRole(ClientBasic::getCurrent(),
- args.roleName,
- args.roles,
- args.privileges);
+ audit::logUpdateRole(ClientBasic::getCurrent(),
+ args.roleName,
+ args.hasRoles ? &args.roles : NULL,
+ args.hasPrivileges ? &args.privileges : NULL);
- status = insertRoleDocument(txn, roleObjBuilder.done(), args.writeConcern);
+ status = updateRoleDocument(
+ txn, args.roleName, BSON("$set" << updateSetBuilder.done()), args.writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ return appendCommandStatus(result, status);
+ }
+} cmdUpdateRole;
+
+class CmdGrantPrivilegesToRole : public Command {
+public:
+ CmdGrantPrivilegesToRole() : Command("grantPrivilegesToRole") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Grants privileges to a role" << endl;
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForGrantPrivilegesToRoleCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdCreateRole;
+ RoleName roleName;
+ PrivilegeVector privilegesToAdd;
+ BSONObj writeConcern;
+ status = auth::parseAndValidateRolePrivilegeManipulationCommands(
+ cmdObj, "grantPrivilegesToRole", dbname, &roleName, &privilegesToAdd, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- class CmdUpdateRole: public Command {
- public:
+ if (RoleGraph::isBuiltinRole(roleName)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified."));
+ }
- CmdUpdateRole() : Command("updateRole") {}
+ status = checkOkayToGrantPrivilegesToRole(roleName, privilegesToAdd);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- virtual bool slaveOk() const {
- return false;
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, true, &roleDoc);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ PrivilegeVector privileges;
+ status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
+ &privileges);
- virtual void help(stringstream& ss) const {
- ss << "Used to update a role" << endl;
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForUpdateRoleCommand(client, dbname, cmdObj);
+ for (PrivilegeVector::iterator it = privilegesToAdd.begin(); it != privilegesToAdd.end();
+ ++it) {
+ Privilege::addPrivilegeToPrivilegeVector(&privileges, *it);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- auth::CreateOrUpdateRoleArgs args;
- Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj,
- "updateRole",
- dbname,
- &args);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ // Build up update modifier object to $set privileges.
+ mutablebson::Document updateObj;
+ mutablebson::Element setElement = updateObj.makeElementObject("$set");
+ status = updateObj.root().pushBack(setElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
+ status = setElement.pushBack(privilegesElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- if (!args.hasPrivileges && !args.hasRoles) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Must specify at least one field to update in updateRole"));
- }
+ BSONObjBuilder updateBSONBuilder;
+ updateObj.writeTo(&updateBSONBuilder);
- BSONObjBuilder updateSetBuilder;
+ audit::logGrantPrivilegesToRole(ClientBasic::getCurrent(), roleName, privilegesToAdd);
- if (args.hasPrivileges) {
- BSONArray privileges;
- status = privilegeVectorToBSONArray(args.privileges, &privileges);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- updateSetBuilder.append("privileges", privileges);
- }
+ status = updateRoleDocument(txn, roleName, updateBSONBuilder.done(), writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ return appendCommandStatus(result, status);
+ }
- if (args.hasRoles) {
- updateSetBuilder.append("roles", rolesVectorToBSONArray(args.roles));
- }
+} cmdGrantPrivilegesToRole;
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+class CmdRevokePrivilegesFromRole : public Command {
+public:
+ CmdRevokePrivilegesFromRole() : Command("revokePrivilegesFromRole") {}
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- // Role existence has to be checked after acquiring the update lock
- BSONObj ignored;
- status = authzManager->getRoleDescription(args.roleName, false, &ignored);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- if (args.hasRoles) {
- status = checkOkayToGrantRolesToRole(args.roleName, args.roles, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Revokes privileges from a role" << endl;
+ }
- if (args.hasPrivileges) {
- status = checkOkayToGrantPrivilegesToRole(args.roleName, args.privileges);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForRevokePrivilegesFromRoleCommand(client, dbname, cmdObj);
+ }
- audit::logUpdateRole(ClientBasic::getCurrent(),
- args.roleName,
- args.hasRoles? &args.roles : NULL,
- args.hasPrivileges? &args.privileges : NULL);
-
- status = updateRoleDocument(txn,
- args.roleName,
- BSON("$set" << updateSetBuilder.done()),
- args.writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdUpdateRole;
-
- class CmdGrantPrivilegesToRole: public Command {
- public:
- CmdGrantPrivilegesToRole() : Command("grantPrivilegesToRole") {}
-
- virtual bool slaveOk() const {
- return false;
+ RoleName roleName;
+ PrivilegeVector privilegesToRemove;
+ BSONObj writeConcern;
+ status = auth::parseAndValidateRolePrivilegeManipulationCommands(cmdObj,
+ "revokePrivilegesFromRole",
+ dbname,
+ &roleName,
+ &privilegesToRemove,
+ &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
-
- virtual void help(stringstream& ss) const {
- ss << "Grants privileges to a role" << endl;
+ if (RoleGraph::isBuiltinRole(roleName)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified."));
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForGrantPrivilegesToRoleCommand(client, dbname, cmdObj);
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, true, &roleDoc);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ PrivilegeVector privileges;
+ status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
+ &privileges);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- RoleName roleName;
- PrivilegeVector privilegesToAdd;
- BSONObj writeConcern;
- status = auth::parseAndValidateRolePrivilegeManipulationCommands(
- cmdObj,
- "grantPrivilegesToRole",
- dbname,
- &roleName,
- &privilegesToAdd,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ for (PrivilegeVector::iterator itToRm = privilegesToRemove.begin();
+ itToRm != privilegesToRemove.end();
+ ++itToRm) {
+ for (PrivilegeVector::iterator curIt = privileges.begin(); curIt != privileges.end();
+ ++curIt) {
+ if (curIt->getResourcePattern() == itToRm->getResourcePattern()) {
+ curIt->removeActions(itToRm->getActions());
+ if (curIt->getActions().empty()) {
+ privileges.erase(curIt);
+ }
+ break;
+ }
}
+ }
- if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName() <<
- " is a built-in role and cannot be modified."));
- }
+ // Build up update modifier object to $set privileges.
+ mutablebson::Document updateObj;
+ mutablebson::Element setElement = updateObj.makeElementObject("$set");
+ status = updateObj.root().pushBack(setElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
+ status = setElement.pushBack(privilegesElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- status = checkOkayToGrantPrivilegesToRole(roleName, privilegesToAdd);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ audit::logRevokePrivilegesFromRole(ClientBasic::getCurrent(), roleName, privilegesToRemove);
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, true, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ BSONObjBuilder updateBSONBuilder;
+ updateObj.writeTo(&updateBSONBuilder);
+ status = updateRoleDocument(txn, roleName, updateBSONBuilder.done(), writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ return appendCommandStatus(result, status);
+ }
- PrivilegeVector privileges;
- status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
- &privileges);
+} cmdRevokePrivilegesFromRole;
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdGrantRolesToRole : public Command {
+public:
+ CmdGrantRolesToRole() : Command("grantRolesToRole") {}
- for (PrivilegeVector::iterator it = privilegesToAdd.begin();
- it != privilegesToAdd.end(); ++it) {
- Privilege::addPrivilegeToPrivilegeVector(&privileges, *it);
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- // Build up update modifier object to $set privileges.
- mutablebson::Document updateObj;
- mutablebson::Element setElement = updateObj.makeElementObject("$set");
- status = updateObj.root().pushBack(setElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
- status = setElement.pushBack(privilegesElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- BSONObjBuilder updateBSONBuilder;
- updateObj.writeTo(&updateBSONBuilder);
+ virtual void help(stringstream& ss) const {
+ ss << "Grants roles to another role." << endl;
+ }
- audit::logGrantPrivilegesToRole(ClientBasic::getCurrent(),
- roleName,
- privilegesToAdd);
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForGrantRolesToRoleCommand(client, dbname, cmdObj);
+ }
- status = updateRoleDocument(
- txn,
- roleName,
- updateBSONBuilder.done(),
- writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ std::string roleNameString;
+ std::vector<RoleName> rolesToAdd;
+ BSONObj writeConcern;
+ Status status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "grantRolesToRole", dbname, &roleNameString, &rolesToAdd, &writeConcern);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdGrantPrivilegesToRole;
-
- class CmdRevokePrivilegesFromRole: public Command {
- public:
+ RoleName roleName(roleNameString, dbname);
+ if (RoleGraph::isBuiltinRole(roleName)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified."));
+ }
- CmdRevokePrivilegesFromRole() : Command("revokePrivilegesFromRole") {}
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
- virtual bool slaveOk() const {
- return false;
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ // Role existence has to be checked after acquiring the update lock
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, false, &roleDoc);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- virtual void help(stringstream& ss) const {
- ss << "Revokes privileges from a role" << endl;
+ // Check for cycles
+ status = checkOkayToGrantRolesToRole(roleName, rolesToAdd, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForRevokePrivilegesFromRoleCommand(client, dbname, cmdObj);
+ // Add new roles to existing roles
+ std::vector<RoleName> directRoles;
+ status = auth::parseRoleNamesFromBSONArray(
+ BSONArray(roleDoc["roles"].Obj()), roleName.getDB(), &directRoles);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ for (vector<RoleName>::iterator it = rolesToAdd.begin(); it != rolesToAdd.end(); ++it) {
+ const RoleName& roleToAdd = *it;
+ if (!sequenceContains(directRoles, roleToAdd)) // Don't double-add role
+ directRoles.push_back(*it);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ audit::logGrantRolesToRole(ClientBasic::getCurrent(), roleName, rolesToAdd);
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ status =
+ updateRoleDocument(txn,
+ roleName,
+ BSON("$set" << BSON("roles" << rolesVectorToBSONArray(directRoles))),
+ writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ return appendCommandStatus(result, status);
+ }
- RoleName roleName;
- PrivilegeVector privilegesToRemove;
- BSONObj writeConcern;
- status = auth::parseAndValidateRolePrivilegeManipulationCommands(
- cmdObj,
- "revokePrivilegesFromRole",
- dbname,
- &roleName,
- &privilegesToRemove,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+} cmdGrantRolesToRole;
- if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName() <<
- " is a built-in role and cannot be modified."));
- }
+class CmdRevokeRolesFromRole : public Command {
+public:
+ CmdRevokeRolesFromRole() : Command("revokeRolesFromRole") {}
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, true, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- PrivilegeVector privileges;
- status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
- &privileges);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- for (PrivilegeVector::iterator itToRm = privilegesToRemove.begin();
- itToRm != privilegesToRemove.end(); ++itToRm) {
- for (PrivilegeVector::iterator curIt = privileges.begin();
- curIt != privileges.end(); ++curIt) {
- if (curIt->getResourcePattern() == itToRm->getResourcePattern()) {
- curIt->removeActions(itToRm->getActions());
- if (curIt->getActions().empty()) {
- privileges.erase(curIt);
- }
- break;
- }
- }
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Revokes roles from another role." << endl;
+ }
- // Build up update modifier object to $set privileges.
- mutablebson::Document updateObj;
- mutablebson::Element setElement = updateObj.makeElementObject("$set");
- status = updateObj.root().pushBack(setElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
- status = setElement.pushBack(privilegesElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForRevokeRolesFromRoleCommand(client, dbname, cmdObj);
+ }
- audit::logRevokePrivilegesFromRole(ClientBasic::getCurrent(),
- roleName,
- privilegesToRemove);
-
- BSONObjBuilder updateBSONBuilder;
- updateObj.writeTo(&updateBSONBuilder);
- status = updateRoleDocument(
- txn,
- roleName,
- updateBSONBuilder.done(),
- writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdRevokePrivilegesFromRole;
-
- class CmdGrantRolesToRole: public Command {
- public:
-
- CmdGrantRolesToRole() : Command("grantRolesToRole") {}
-
- virtual bool slaveOk() const {
- return false;
+ std::string roleNameString;
+ std::vector<RoleName> rolesToRemove;
+ BSONObj writeConcern;
+ status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "revokeRolesFromRole", dbname, &roleNameString, &rolesToRemove, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ RoleName roleName(roleNameString, dbname);
+ if (RoleGraph::isBuiltinRole(roleName)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified."));
+ }
- virtual void help(stringstream& ss) const {
- ss << "Grants roles to another role." << endl;
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, false, &roleDoc);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForGrantRolesToRoleCommand(client, dbname, cmdObj);
+ std::vector<RoleName> roles;
+ status = auth::parseRoleNamesFromBSONArray(
+ BSONArray(roleDoc["roles"].Obj()), roleName.getDB(), &roles);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- std::string roleNameString;
- std::vector<RoleName> rolesToAdd;
- BSONObj writeConcern;
- Status status = auth::parseRolePossessionManipulationCommands(
- cmdObj,
- "grantRolesToRole",
- dbname,
- &roleNameString,
- &rolesToAdd,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ for (vector<RoleName>::const_iterator it = rolesToRemove.begin(); it != rolesToRemove.end();
+ ++it) {
+ vector<RoleName>::iterator itToRm = std::find(roles.begin(), roles.end(), *it);
+ if (itToRm != roles.end()) {
+ roles.erase(itToRm);
}
+ }
- RoleName roleName(roleNameString, dbname);
- if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName() <<
- " is a built-in role and cannot be modified."));
- }
+ audit::logRevokeRolesFromRole(ClientBasic::getCurrent(), roleName, rolesToRemove);
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ status = updateRoleDocument(txn,
+ roleName,
+ BSON("$set" << BSON("roles" << rolesVectorToBSONArray(roles))),
+ writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ return appendCommandStatus(result, status);
+ }
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+} cmdRevokeRolesFromRole;
- // Role existence has to be checked after acquiring the update lock
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, false, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdDropRole : public Command {
+public:
+ CmdDropRole() : Command("dropRole") {}
- // Check for cycles
- status = checkOkayToGrantRolesToRole(roleName, rolesToAdd, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- // Add new roles to existing roles
- std::vector<RoleName> directRoles;
- status = auth::parseRoleNamesFromBSONArray(BSONArray(roleDoc["roles"].Obj()),
- roleName.getDB(),
- &directRoles);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- for (vector<RoleName>::iterator it = rolesToAdd.begin(); it != rolesToAdd.end(); ++it) {
- const RoleName& roleToAdd = *it;
- if (!sequenceContains(directRoles, roleToAdd)) // Don't double-add role
- directRoles.push_back(*it);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- audit::logGrantRolesToRole(ClientBasic::getCurrent(),
- roleName,
- rolesToAdd);
-
- status = updateRoleDocument(
- txn,
- roleName,
- BSON("$set" << BSON("roles" << rolesVectorToBSONArray(directRoles))),
- writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- return appendCommandStatus(result, status);
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Drops a single role. Before deleting the role completely it must remove it "
+ "from any users or roles that reference it. If any errors occur in the middle "
+ "of that process it's possible to be left in a state where the role has been "
+ "removed from some user/roles but otherwise still exists." << endl;
+ }
- } cmdGrantRolesToRole;
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForDropRoleCommand(client, dbname, cmdObj);
+ }
- class CmdRevokeRolesFromRole: public Command {
- public:
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- CmdRevokeRolesFromRole() : Command("revokeRolesFromRole") {}
+ RoleName roleName;
+ BSONObj writeConcern;
+ status = auth::parseDropRoleCommand(cmdObj, dbname, &roleName, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- virtual bool slaveOk() const {
- return false;
+ if (RoleGraph::isBuiltinRole(roleName)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified."));
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, false, &roleDoc);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- virtual void help(stringstream& ss) const {
- ss << "Revokes roles from another role." << endl;
+ // Remove this role from all users
+ int nMatched;
+ status = updateAuthzDocuments(
+ txn,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON("roles" << BSON("$elemMatch" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB()))),
+ BSON("$pull" << BSON("roles" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB()))),
+ false,
+ true,
+ writeConcern,
+ &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ if (!status.isOK()) {
+ ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
+ ? ErrorCodes::UserModificationFailed
+ : status.code();
+ return appendCommandStatus(
+ result,
+ Status(code,
+ str::stream() << "Failed to remove role " << roleName.getFullName()
+ << " from all users: " << status.reason()));
+ }
+
+ // Remove this role from all other roles
+ status = updateAuthzDocuments(
+ txn,
+ AuthorizationManager::rolesCollectionNamespace,
+ BSON("roles" << BSON("$elemMatch" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB()))),
+ BSON("$pull" << BSON("roles" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB()))),
+ false,
+ true,
+ writeConcern,
+ &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ if (!status.isOK()) {
+ ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
+ ? ErrorCodes::RoleModificationFailed
+ : status.code();
+ return appendCommandStatus(
+ result,
+ Status(code,
+ str::stream() << "Removed role " << roleName.getFullName()
+ << " from all users but failed to remove from all roles: "
+ << status.reason()));
+ }
+
+ audit::logDropRole(ClientBasic::getCurrent(), roleName);
+ // Finally, remove the actual role document
+ status = removeRoleDocuments(txn,
+ BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB()),
+ writeConcern,
+ &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ if (!status.isOK()) {
+ return appendCommandStatus(
+ result,
+ Status(status.code(),
+ str::stream() << "Removed role " << roleName.getFullName()
+ << " from all users and roles but failed to actually delete"
+ " the role itself: " << status.reason()));
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForRevokeRolesFromRoleCommand(client, dbname, cmdObj);
+ dassert(nMatched == 0 || nMatched == 1);
+ if (nMatched == 0) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::RoleNotFound,
+ str::stream() << "Role '" << roleName.getFullName() << "' not found"));
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ return true;
+ }
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+} cmdDropRole;
- std::string roleNameString;
- std::vector<RoleName> rolesToRemove;
- BSONObj writeConcern;
- status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "revokeRolesFromRole",
- dbname,
- &roleNameString,
- &rolesToRemove,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdDropAllRolesFromDatabase : public Command {
+public:
+ CmdDropAllRolesFromDatabase() : Command("dropAllRolesFromDatabase") {}
- RoleName roleName(roleNameString, dbname);
- if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName() <<
- " is a built-in role and cannot be modified."));
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, false, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- std::vector<RoleName> roles;
- status = auth::parseRoleNamesFromBSONArray(BSONArray(roleDoc["roles"].Obj()),
- roleName.getDB(),
- &roles);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Drops all roles from the given database. Before deleting the roles completely "
+ "it must remove them from any users or other roles that reference them. If any "
+ "errors occur in the middle of that process it's possible to be left in a state "
+ "where the roles have been removed from some user/roles but otherwise still "
+ "exist." << endl;
+ }
- for (vector<RoleName>::const_iterator it = rolesToRemove.begin();
- it != rolesToRemove.end(); ++it) {
- vector<RoleName>::iterator itToRm = std::find(roles.begin(), roles.end(), *it);
- if (itToRm != roles.end()) {
- roles.erase(itToRm);
- }
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForDropAllRolesFromDatabaseCommand(client, dbname);
+ }
- audit::logRevokeRolesFromRole(ClientBasic::getCurrent(),
- roleName,
- rolesToRemove);
-
- status = updateRoleDocument(
- txn,
- roleName,
- BSON("$set" << BSON("roles" << rolesVectorToBSONArray(roles))),
- writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ BSONObj writeConcern;
+ Status status = auth::parseDropAllRolesFromDatabaseCommand(cmdObj, dbname, &writeConcern);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdRevokeRolesFromRole;
-
- class CmdDropRole: public Command {
- public:
-
- CmdDropRole() : Command("dropRole") {}
-
- virtual bool slaveOk() const {
- return false;
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
-
- virtual void help(stringstream& ss) const {
- ss << "Drops a single role. Before deleting the role completely it must remove it "
- "from any users or roles that reference it. If any errors occur in the middle "
- "of that process it's possible to be left in a state where the role has been "
- "removed from some user/roles but otherwise still exists."<< endl;
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForDropRoleCommand(client, dbname, cmdObj);
+ // Remove these roles from all users
+ int nMatched;
+ status = updateAuthzDocuments(
+ txn,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON("roles" << BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname)),
+ BSON("$pull" << BSON("roles"
+ << BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname))),
+ false,
+ true,
+ writeConcern,
+ &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ if (!status.isOK()) {
+ ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
+ ? ErrorCodes::UserModificationFailed
+ : status.code();
+ return appendCommandStatus(result,
+ Status(code,
+ str::stream()
+ << "Failed to remove roles from \"" << dbname
+ << "\" db from all users: " << status.reason()));
+ }
+
+ // Remove these roles from all other roles
+ std::string sourceFieldName = str::stream() << "roles."
+ << AuthorizationManager::ROLE_DB_FIELD_NAME;
+ status = updateAuthzDocuments(
+ txn,
+ AuthorizationManager::rolesCollectionNamespace,
+ BSON(sourceFieldName << dbname),
+ BSON("$pull" << BSON("roles"
+ << BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname))),
+ false,
+ true,
+ writeConcern,
+ &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ if (!status.isOK()) {
+ ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
+ ? ErrorCodes::RoleModificationFailed
+ : status.code();
+ return appendCommandStatus(result,
+ Status(code,
+ str::stream()
+ << "Failed to remove roles from \"" << dbname
+ << "\" db from all roles: " << status.reason()));
+ }
+
+ audit::logDropAllRolesFromDatabase(ClientBasic::getCurrent(), dbname);
+ // Finally, remove the actual role documents
+ status = removeRoleDocuments(
+ txn, BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname), writeConcern, &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ if (!status.isOK()) {
+ return appendCommandStatus(
+ result,
+ Status(status.code(),
+ str::stream() << "Removed roles from \"" << dbname
+ << "\" db "
+ " from all users and roles but failed to actually delete"
+ " those roles themselves: " << status.reason()));
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ result.append("n", nMatched);
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- RoleName roleName;
- BSONObj writeConcern;
- status = auth::parseDropRoleCommand(cmdObj,
- dbname,
- &roleName,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName() <<
- " is a built-in role and cannot be modified."));
- }
-
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, false, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- // Remove this role from all users
- int nMatched;
- status = updateAuthzDocuments(
- txn,
- AuthorizationManager::usersCollectionNamespace,
- BSON("roles" << BSON("$elemMatch" <<
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME <<
- roleName.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME <<
- roleName.getDB()))),
- BSON("$pull" << BSON("roles" <<
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME <<
- roleName.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME <<
- roleName.getDB()))),
- false,
- true,
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- if (!status.isOK()) {
- ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError ?
- ErrorCodes::UserModificationFailed : status.code();
- return appendCommandStatus(
- result,
- Status(code,
- str::stream() << "Failed to remove role " << roleName.getFullName()
- << " from all users: " << status.reason()));
- }
-
- // Remove this role from all other roles
- status = updateAuthzDocuments(
- txn,
- AuthorizationManager::rolesCollectionNamespace,
- BSON("roles" << BSON("$elemMatch" <<
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME <<
- roleName.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME <<
- roleName.getDB()))),
- BSON("$pull" << BSON("roles" <<
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME <<
- roleName.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME <<
- roleName.getDB()))),
- false,
- true,
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- if (!status.isOK()) {
- ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError ?
- ErrorCodes::RoleModificationFailed : status.code();
- return appendCommandStatus(
- result,
- Status(code,
- str::stream() << "Removed role " << roleName.getFullName() <<
- " from all users but failed to remove from all roles: " <<
- status.reason()));
- }
-
- audit::logDropRole(ClientBasic::getCurrent(),
- roleName);
- // Finally, remove the actual role document
- status = removeRoleDocuments(
- txn,
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << roleName.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME << roleName.getDB()),
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- if (!status.isOK()) {
- return appendCommandStatus(
- result,
- Status(status.code(),
- str::stream() << "Removed role " << roleName.getFullName() <<
- " from all users and roles but failed to actually delete"
- " the role itself: " << status.reason()));
- }
+ return true;
+ }
- dassert(nMatched == 0 || nMatched == 1);
- if (nMatched == 0) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::RoleNotFound,
- str::stream() << "Role '" << roleName.getFullName() <<
- "' not found"));
- }
+} cmdDropAllRolesFromDatabase;
- return true;
- }
+class CmdRolesInfo : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
- } cmdDropRole;
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
- class CmdDropAllRolesFromDatabase: public Command {
- public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- CmdDropAllRolesFromDatabase() : Command("dropAllRolesFromDatabase") {}
+ CmdRolesInfo() : Command("rolesInfo") {}
- virtual bool slaveOk() const {
- return false;
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Returns information about roles." << endl;
+ }
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForRolesInfoCommand(client, dbname, cmdObj);
+ }
- virtual void help(stringstream& ss) const {
- ss << "Drops all roles from the given database. Before deleting the roles completely "
- "it must remove them from any users or other roles that reference them. If any "
- "errors occur in the middle of that process it's possible to be left in a state "
- "where the roles have been removed from some user/roles but otherwise still "
- "exist." << endl;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::RolesInfoArgs args;
+ Status status = auth::parseRolesInfoCommand(cmdObj, dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForDropAllRolesFromDatabaseCommand(client, dbname);
+ status = requireAuthSchemaVersion26UpgradeOrFinal(txn, getGlobalAuthorizationManager());
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- BSONObj writeConcern;
- Status status = auth::parseDropAllRolesFromDatabaseCommand(cmdObj,
- dbname,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ BSONArrayBuilder rolesArrayBuilder;
+ if (args.allForDB) {
+ std::vector<BSONObj> rolesDocs;
+ status = getGlobalAuthorizationManager()->getRoleDescriptionsForDB(
+ dbname, args.showPrivileges, args.showBuiltinRoles, &rolesDocs);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- // Remove these roles from all users
- int nMatched;
- status = updateAuthzDocuments(
- txn,
- AuthorizationManager::usersCollectionNamespace,
- BSON("roles" << BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname)),
- BSON("$pull" << BSON("roles" <<
- BSON(AuthorizationManager::ROLE_DB_FIELD_NAME <<
- dbname))),
- false,
- true,
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- if (!status.isOK()) {
- ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError ?
- ErrorCodes::UserModificationFailed : status.code();
- return appendCommandStatus(
- result,
- Status(code,
- str::stream() << "Failed to remove roles from \"" << dbname
- << "\" db from all users: " << status.reason()));
+ for (size_t i = 0; i < rolesDocs.size(); ++i) {
+ rolesArrayBuilder.append(rolesDocs[i]);
}
-
- // Remove these roles from all other roles
- std::string sourceFieldName =
- str::stream() << "roles." << AuthorizationManager::ROLE_DB_FIELD_NAME;
- status = updateAuthzDocuments(
- txn,
- AuthorizationManager::rolesCollectionNamespace,
- BSON(sourceFieldName << dbname),
- BSON("$pull" << BSON("roles" <<
- BSON(AuthorizationManager::ROLE_DB_FIELD_NAME <<
- dbname))),
- false,
- true,
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- if (!status.isOK()) {
- ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError ?
- ErrorCodes::RoleModificationFailed : status.code();
- return appendCommandStatus(
- result,
- Status(code,
- str::stream() << "Failed to remove roles from \"" << dbname
- << "\" db from all roles: " << status.reason()));
- }
-
- audit::logDropAllRolesFromDatabase(ClientBasic::getCurrent(), dbname);
- // Finally, remove the actual role documents
- status = removeRoleDocuments(
- txn,
- BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname),
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- if (!status.isOK()) {
- return appendCommandStatus(
- result,
- Status(status.code(),
- str::stream() << "Removed roles from \"" << dbname << "\" db "
- " from all users and roles but failed to actually delete"
- " those roles themselves: " << status.reason()));
+ } else {
+ for (size_t i = 0; i < args.roleNames.size(); ++i) {
+ BSONObj roleDetails;
+ status = getGlobalAuthorizationManager()->getRoleDescription(
+ args.roleNames[i], args.showPrivileges, &roleDetails);
+ if (status.code() == ErrorCodes::RoleNotFound) {
+ continue;
+ }
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ rolesArrayBuilder.append(roleDetails);
}
-
- result.append("n", nMatched);
-
- return true;
}
+ result.append("roles", rolesArrayBuilder.arr());
+ return true;
+ }
- } cmdDropAllRolesFromDatabase;
+} cmdRolesInfo;
- class CmdRolesInfo: public Command {
- public:
+class CmdInvalidateUserCache : public Command {
+public:
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual bool slaveOk() const {
- return false;
- }
+ virtual bool adminOnly() const {
+ return true;
+ }
- virtual bool slaveOverrideOk() const {
- return true;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ CmdInvalidateUserCache() : Command("invalidateUserCache") {}
- CmdRolesInfo() : Command("rolesInfo") {}
+ virtual void help(stringstream& ss) const {
+ ss << "Invalidates the in-memory cache of user information" << endl;
+ }
- virtual void help(stringstream& ss) const {
- ss << "Returns information about roles." << endl;
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForInvalidateUserCacheCommand(client);
+ }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForRolesInfoCommand(client, dbname, cmdObj);
- }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ authzManager->invalidateUserCache();
+ return true;
+ }
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
+} cmdInvalidateUserCache;
- auth::RolesInfoArgs args;
- Status status = auth::parseRolesInfoCommand(cmdObj, dbname, &args);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdGetCacheGeneration : public Command {
+public:
+ virtual bool slaveOk() const {
+ return true;
+ }
- status = requireAuthSchemaVersion26UpgradeOrFinal(txn,
- getGlobalAuthorizationManager());
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool adminOnly() const {
+ return true;
+ }
- BSONArrayBuilder rolesArrayBuilder;
- if (args.allForDB) {
- std::vector<BSONObj> rolesDocs;
- status = getGlobalAuthorizationManager()->getRoleDescriptionsForDB(
- dbname, args.showPrivileges, args.showBuiltinRoles, &rolesDocs);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- for (size_t i = 0; i < rolesDocs.size(); ++i) {
- rolesArrayBuilder.append(rolesDocs[i]);
- }
- } else {
- for (size_t i = 0; i < args.roleNames.size(); ++i) {
- BSONObj roleDetails;
- status = getGlobalAuthorizationManager()->getRoleDescription(
- args.roleNames[i], args.showPrivileges, &roleDetails);
- if (status.code() == ErrorCodes::RoleNotFound) {
- continue;
- }
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- rolesArrayBuilder.append(roleDetails);
- }
- }
- result.append("roles", rolesArrayBuilder.arr());
- return true;
- }
+ CmdGetCacheGeneration() : Command("_getUserCacheGeneration") {}
- } cmdRolesInfo;
+ virtual void help(stringstream& ss) const {
+ ss << "internal" << endl;
+ }
- class CmdInvalidateUserCache: public Command {
- public:
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForGetUserCacheGenerationCommand(client);
+ }
- virtual bool slaveOk() const {
- return true;
- }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ result.append("cacheGeneration", authzManager->getCacheGeneration());
+ return true;
+ }
- virtual bool adminOnly() const {
- return true;
- }
+} CmdGetCacheGeneration;
- virtual bool isWriteCommandForConfigServer() const { return false; }
+/**
+ * This command is used only by mongorestore to handle restoring users/roles. We do this so
+ * that mongorestore doesn't do direct inserts into the admin.system.users and
+ * admin.system.roles, which would bypass the authzUpdateLock and allow multiple concurrent
+ * modifications to users/roles. What mongorestore now does instead is it inserts all user/role
+ * definitions it wants to restore into temporary collections, then this command moves those
+ * user/role definitions into their proper place in admin.system.users and admin.system.roles.
+ * It either adds the users/roles to the existing ones or replaces the existing ones, depending
+ * on whether the "drop" argument is true or false.
+ */
+class CmdMergeAuthzCollections : public Command {
+public:
+ CmdMergeAuthzCollections() : Command("_mergeAuthzCollections") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
- CmdInvalidateUserCache() : Command("invalidateUserCache") {}
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- virtual void help(stringstream& ss) const {
- ss << "Invalidates the in-memory cache of user information" << endl;
- }
+ virtual bool adminOnly() const {
+ return true;
+ }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForInvalidateUserCacheCommand(client);
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Internal command used by mongorestore for updating user/role data" << endl;
+ }
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForMergeAuthzCollectionsCommand(client, cmdObj);
+ }
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- authzManager->invalidateUserCache();
- return true;
- }
+ static UserName extractUserNameFromBSON(const BSONObj& userObj) {
+ std::string name;
+ std::string db;
+ Status status =
+ bsonExtractStringField(userObj, AuthorizationManager::USER_NAME_FIELD_NAME, &name);
+ uassertStatusOK(status);
+ status = bsonExtractStringField(userObj, AuthorizationManager::USER_DB_FIELD_NAME, &db);
+ uassertStatusOK(status);
+ return UserName(name, db);
+ }
- } cmdInvalidateUserCache;
+ /**
+ * Extracts the UserName from the user document and adds it to set of existing users.
+ * This function is written so it can used with stdx::bind over the result set of a query
+ * on admin.system.users to add the user names of all existing users to the "usersToDrop"
+ * set used in the command body.
+ */
+ static void extractAndInsertUserName(unordered_set<UserName>* existingUsers,
+ const BSONObj& userObj) {
+ UserName userName = extractUserNameFromBSON(userObj);
+ existingUsers->insert(userName);
+ }
- class CmdGetCacheGeneration: public Command {
- public:
+ static RoleName extractRoleNameFromBSON(const BSONObj& roleObj) {
+ std::string name;
+ std::string db;
+ Status status =
+ bsonExtractStringField(roleObj, AuthorizationManager::ROLE_NAME_FIELD_NAME, &name);
+ uassertStatusOK(status);
+ status = bsonExtractStringField(roleObj, AuthorizationManager::ROLE_DB_FIELD_NAME, &db);
+ uassertStatusOK(status);
+ return RoleName(name, db);
+ }
- virtual bool slaveOk() const {
- return true;
- }
+ /**
+ * Extracts the RoleName from the role document and adds it to set of existing roles.
+ * This function is written so it can used with stdx::bind over the result set of a query
+ * on admin.system.roles to add the role names of all existing roles to the "rolesToDrop"
+ * set used in the command body.
+ */
+ static void extractAndInsertRoleName(unordered_set<RoleName>* existingRoles,
+ const BSONObj& roleObj) {
+ RoleName roleName = extractRoleNameFromBSON(roleObj);
+ existingRoles->insert(roleName);
+ }
- virtual bool adminOnly() const {
- return true;
+ /**
+ * Audits the fact that we are creating or updating the user described by userObj.
+ */
+ static void auditCreateOrUpdateUser(const BSONObj& userObj, bool create) {
+ UserName userName = extractUserNameFromBSON(userObj);
+ std::vector<RoleName> roles;
+ uassertStatusOK(auth::parseRoleNamesFromBSONArray(
+ BSONArray(userObj["roles"].Obj()), userName.getDB(), &roles));
+ BSONObj customData;
+ if (userObj.hasField("customData")) {
+ customData = userObj["customData"].Obj();
}
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- CmdGetCacheGeneration() : Command("_getUserCacheGeneration") {}
-
- virtual void help(stringstream& ss) const {
- ss << "internal" << endl;
+ if (create) {
+ audit::logCreateUser(ClientBasic::getCurrent(),
+ userName,
+ userObj["credentials"].Obj().hasField("MONGODB-CR"),
+ userObj.hasField("customData") ? &customData : NULL,
+ roles);
+ } else {
+ audit::logUpdateUser(ClientBasic::getCurrent(),
+ userName,
+ userObj["credentials"].Obj().hasField("MONGODB-CR"),
+ userObj.hasField("customData") ? &customData : NULL,
+ &roles);
}
+ }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForGetUserCacheGenerationCommand(client);
+ /**
+ * Audits the fact that we are creating or updating the role described by roleObj.
+ */
+ static void auditCreateOrUpdateRole(const BSONObj& roleObj, bool create) {
+ RoleName roleName = extractRoleNameFromBSON(roleObj);
+ std::vector<RoleName> roles;
+ std::vector<Privilege> privileges;
+ uassertStatusOK(auth::parseRoleNamesFromBSONArray(
+ BSONArray(roleObj["roles"].Obj()), roleName.getDB(), &roles));
+ uassertStatusOK(auth::parseAndValidatePrivilegeArray(BSONArray(roleObj["privileges"].Obj()),
+ &privileges));
+ if (create) {
+ audit::logCreateRole(ClientBasic::getCurrent(), roleName, roles, privileges);
+ } else {
+ audit::logUpdateRole(ClientBasic::getCurrent(), roleName, &roles, &privileges);
}
+ }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- result.append("cacheGeneration", authzManager->getCacheGeneration());
- return true;
+ /**
+ * Designed to be used with stdx::bind to be called on every user object in the result
+ * set of a query over the tempUsersCollection provided to the command. For each user
+ * in the temp collection that is defined on the given db, adds that user to the actual
+ * admin.system.users collection.
+ * Also removes any users it encounters from the usersToDrop set.
+ */
+ static void addUser(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ StringData db,
+ bool update,
+ const BSONObj& writeConcern,
+ unordered_set<UserName>* usersToDrop,
+ const BSONObj& userObj) {
+ UserName userName = extractUserNameFromBSON(userObj);
+ if (!db.empty() && userName.getDB() != db) {
+ return;
}
- } CmdGetCacheGeneration;
+ if (update && usersToDrop->count(userName)) {
+ auditCreateOrUpdateUser(userObj, false);
+ Status status = updatePrivilegeDocument(txn, userName, userObj, writeConcern);
+ if (!status.isOK()) {
+ // Match the behavior of mongorestore to continue on failure
+ warning() << "Could not update user " << userName
+ << " in _mergeAuthzCollections command: " << status << endl;
+ }
+ } else {
+ auditCreateOrUpdateUser(userObj, true);
+ Status status = insertPrivilegeDocument(txn, userObj, writeConcern);
+ if (!status.isOK()) {
+ // Match the behavior of mongorestore to continue on failure
+ warning() << "Could not insert user " << userName
+ << " in _mergeAuthzCollections command: " << status << endl;
+ }
+ }
+ usersToDrop->erase(userName);
+ }
/**
- * This command is used only by mongorestore to handle restoring users/roles. We do this so
- * that mongorestore doesn't do direct inserts into the admin.system.users and
- * admin.system.roles, which would bypass the authzUpdateLock and allow multiple concurrent
- * modifications to users/roles. What mongorestore now does instead is it inserts all user/role
- * definitions it wants to restore into temporary collections, then this command moves those
- * user/role definitions into their proper place in admin.system.users and admin.system.roles.
- * It either adds the users/roles to the existing ones or replaces the existing ones, depending
- * on whether the "drop" argument is true or false.
+ * Designed to be used with stdx::bind to be called on every role object in the result
+ * set of a query over the tempRolesCollection provided to the command. For each role
+ * in the temp collection that is defined on the given db, adds that role to the actual
+ * admin.system.roles collection.
+ * Also removes any roles it encounters from the rolesToDrop set.
*/
- class CmdMergeAuthzCollections : public Command {
- public:
-
- CmdMergeAuthzCollections() : Command("_mergeAuthzCollections") {}
-
- virtual bool slaveOk() const {
- return false;
- }
-
- virtual bool isWriteCommandForConfigServer() const { return true; }
-
- virtual bool adminOnly() const {
- return true;
- }
-
- virtual void help(stringstream& ss) const {
- ss << "Internal command used by mongorestore for updating user/role data" << endl;
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForMergeAuthzCollectionsCommand(client, cmdObj);
- }
-
- static UserName extractUserNameFromBSON(const BSONObj& userObj) {
- std::string name;
- std::string db;
- Status status = bsonExtractStringField(userObj,
- AuthorizationManager::USER_NAME_FIELD_NAME,
- &name);
- uassertStatusOK(status);
- status = bsonExtractStringField(userObj,
- AuthorizationManager::USER_DB_FIELD_NAME,
- &db);
- uassertStatusOK(status);
- return UserName(name, db);
- }
-
- /**
- * Extracts the UserName from the user document and adds it to set of existing users.
- * This function is written so it can used with stdx::bind over the result set of a query
- * on admin.system.users to add the user names of all existing users to the "usersToDrop"
- * set used in the command body.
- */
- static void extractAndInsertUserName(unordered_set<UserName>* existingUsers,
- const BSONObj& userObj) {
-
- UserName userName = extractUserNameFromBSON(userObj);
- existingUsers->insert(userName);
- }
-
- static RoleName extractRoleNameFromBSON(const BSONObj& roleObj) {
- std::string name;
- std::string db;
- Status status = bsonExtractStringField(roleObj,
- AuthorizationManager::ROLE_NAME_FIELD_NAME,
- &name);
- uassertStatusOK(status);
- status = bsonExtractStringField(roleObj,
- AuthorizationManager::ROLE_DB_FIELD_NAME,
- &db);
- uassertStatusOK(status);
- return RoleName(name, db);
- }
-
- /**
- * Extracts the RoleName from the role document and adds it to set of existing roles.
- * This function is written so it can used with stdx::bind over the result set of a query
- * on admin.system.roles to add the role names of all existing roles to the "rolesToDrop"
- * set used in the command body.
- */
- static void extractAndInsertRoleName(unordered_set<RoleName>* existingRoles,
- const BSONObj& roleObj) {
- RoleName roleName = extractRoleNameFromBSON(roleObj);
- existingRoles->insert(roleName);
- }
-
- /**
- * Audits the fact that we are creating or updating the user described by userObj.
- */
- static void auditCreateOrUpdateUser(const BSONObj& userObj, bool create) {
- UserName userName = extractUserNameFromBSON(userObj);
- std::vector<RoleName> roles;
- uassertStatusOK(auth::parseRoleNamesFromBSONArray(BSONArray(userObj["roles"].Obj()),
- userName.getDB(),
- &roles));
- BSONObj customData;
- if (userObj.hasField("customData")) {
- customData = userObj["customData"].Obj();
- }
-
- if (create) {
- audit::logCreateUser(ClientBasic::getCurrent(),
- userName,
- userObj["credentials"].Obj().hasField("MONGODB-CR"),
- userObj.hasField("customData") ? &customData : NULL,
- roles);
- } else {
- audit::logUpdateUser(ClientBasic::getCurrent(),
- userName,
- userObj["credentials"].Obj().hasField("MONGODB-CR"),
- userObj.hasField("customData") ? &customData : NULL,
- &roles);
-
- }
+ static void addRole(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ StringData db,
+ bool update,
+ const BSONObj& writeConcern,
+ unordered_set<RoleName>* rolesToDrop,
+ const BSONObj roleObj) {
+ RoleName roleName = extractRoleNameFromBSON(roleObj);
+ if (!db.empty() && roleName.getDB() != db) {
+ return;
}
- /**
- * Audits the fact that we are creating or updating the role described by roleObj.
- */
- static void auditCreateOrUpdateRole(const BSONObj& roleObj, bool create) {
- RoleName roleName = extractRoleNameFromBSON(roleObj);
- std::vector<RoleName> roles;
- std::vector<Privilege> privileges;
- uassertStatusOK(auth::parseRoleNamesFromBSONArray(BSONArray(roleObj["roles"].Obj()),
- roleName.getDB(),
- &roles));
- uassertStatusOK(auth::parseAndValidatePrivilegeArray(
- BSONArray(roleObj["privileges"].Obj()), &privileges));
- if (create) {
- audit::logCreateRole(ClientBasic::getCurrent(), roleName, roles, privileges);
- } else {
- audit::logUpdateRole(ClientBasic::getCurrent(), roleName, &roles, &privileges);
+ if (update && rolesToDrop->count(roleName)) {
+ auditCreateOrUpdateRole(roleObj, false);
+ Status status = updateRoleDocument(txn, roleName, roleObj, writeConcern);
+ if (!status.isOK()) {
+ // Match the behavior of mongorestore to continue on failure
+ warning() << "Could not update role " << roleName
+ << " in _mergeAuthzCollections command: " << status << endl;
+ }
+ } else {
+ auditCreateOrUpdateRole(roleObj, true);
+ Status status = insertRoleDocument(txn, roleObj, writeConcern);
+ if (!status.isOK()) {
+ // Match the behavior of mongorestore to continue on failure
+ warning() << "Could not insert role " << roleName
+ << " in _mergeAuthzCollections command: " << status << endl;
}
}
+ rolesToDrop->erase(roleName);
+ }
- /**
- * Designed to be used with stdx::bind to be called on every user object in the result
- * set of a query over the tempUsersCollection provided to the command. For each user
- * in the temp collection that is defined on the given db, adds that user to the actual
- * admin.system.users collection.
- * Also removes any users it encounters from the usersToDrop set.
- */
- static void addUser(OperationContext* txn,
- AuthorizationManager* authzManager,
- StringData db,
- bool update,
- const BSONObj& writeConcern,
- unordered_set<UserName>* usersToDrop,
- const BSONObj& userObj) {
- UserName userName = extractUserNameFromBSON(userObj);
- if (!db.empty() && userName.getDB() != db) {
- return;
+ /**
+ * Moves all user objects from usersCollName into admin.system.users. If drop is true,
+ * removes any users that were in admin.system.users but not in usersCollName.
+ */
+ Status processUsers(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ StringData usersCollName,
+ StringData db,
+ bool drop,
+ const BSONObj& writeConcern) {
+ // When the "drop" argument has been provided, we use this set to store the users
+ // that are currently in the system, and remove from it as we encounter
+ // same-named users in the collection we are restoring from. Once we've fully
+ // moved over the temp users collection into its final location, we drop
+ // any users that previously existed there but weren't in the temp collection.
+ // This is so that we can completely replace the system.users
+ // collection with the users from the temp collection, without removing all
+ // users at the beginning and thus potentially locking ourselves out by having
+ // no users in the whole system for a time.
+ unordered_set<UserName> usersToDrop;
+
+ if (drop) {
+ // Create map of the users currently in the DB
+ BSONObj query =
+ db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db);
+ BSONObj fields = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
+
+ Status status =
+ queryAuthzDocument(txn,
+ AuthorizationManager::usersCollectionNamespace,
+ query,
+ fields,
+ stdx::bind(&CmdMergeAuthzCollections::extractAndInsertUserName,
+ &usersToDrop,
+ stdx::placeholders::_1));
+ if (!status.isOK()) {
+ return status;
}
+ }
- if (update && usersToDrop->count(userName)) {
- auditCreateOrUpdateUser(userObj, false);
- Status status = updatePrivilegeDocument(txn,
- userName,
- userObj,
- writeConcern);
- if (!status.isOK()) {
- // Match the behavior of mongorestore to continue on failure
- warning() << "Could not update user " << userName <<
- " in _mergeAuthzCollections command: " << status << endl;
- }
- } else {
- auditCreateOrUpdateUser(userObj, true);
- Status status = insertPrivilegeDocument(txn,
- userObj,
- writeConcern);
- if (!status.isOK()) {
- // Match the behavior of mongorestore to continue on failure
- warning() << "Could not insert user " << userName <<
- " in _mergeAuthzCollections command: " << status << endl;
- }
- }
- usersToDrop->erase(userName);
- }
-
- /**
- * Designed to be used with stdx::bind to be called on every role object in the result
- * set of a query over the tempRolesCollection provided to the command. For each role
- * in the temp collection that is defined on the given db, adds that role to the actual
- * admin.system.roles collection.
- * Also removes any roles it encounters from the rolesToDrop set.
- */
- static void addRole(OperationContext* txn,
- AuthorizationManager* authzManager,
- StringData db,
- bool update,
- const BSONObj& writeConcern,
- unordered_set<RoleName>* rolesToDrop,
- const BSONObj roleObj) {
- RoleName roleName = extractRoleNameFromBSON(roleObj);
- if (!db.empty() && roleName.getDB() != db) {
- return;
- }
+ Status status = queryAuthzDocument(
+ txn,
+ NamespaceString(usersCollName),
+ db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db),
+ BSONObj(),
+ stdx::bind(&CmdMergeAuthzCollections::addUser,
+ txn,
+ authzManager,
+ db,
+ drop,
+ writeConcern,
+ &usersToDrop,
+ stdx::placeholders::_1));
+ if (!status.isOK()) {
+ return status;
+ }
- if (update && rolesToDrop->count(roleName)) {
- auditCreateOrUpdateRole(roleObj, false);
- Status status = updateRoleDocument(txn,
- roleName,
- roleObj,
- writeConcern);
- if (!status.isOK()) {
- // Match the behavior of mongorestore to continue on failure
- warning() << "Could not update role " << roleName <<
- " in _mergeAuthzCollections command: " << status << endl;
- }
- } else {
- auditCreateOrUpdateRole(roleObj, true);
- Status status = insertRoleDocument(txn, roleObj, writeConcern);
- if (!status.isOK()) {
- // Match the behavior of mongorestore to continue on failure
- warning() << "Could not insert role " << roleName <<
- " in _mergeAuthzCollections command: " << status << endl;
- }
- }
- rolesToDrop->erase(roleName);
- }
-
- /**
- * Moves all user objects from usersCollName into admin.system.users. If drop is true,
- * removes any users that were in admin.system.users but not in usersCollName.
- */
- Status processUsers(OperationContext* txn,
- AuthorizationManager* authzManager,
- StringData usersCollName,
- StringData db,
- bool drop,
- const BSONObj& writeConcern) {
- // When the "drop" argument has been provided, we use this set to store the users
- // that are currently in the system, and remove from it as we encounter
- // same-named users in the collection we are restoring from. Once we've fully
- // moved over the temp users collection into its final location, we drop
- // any users that previously existed there but weren't in the temp collection.
- // This is so that we can completely replace the system.users
- // collection with the users from the temp collection, without removing all
- // users at the beginning and thus potentially locking ourselves out by having
- // no users in the whole system for a time.
- unordered_set<UserName> usersToDrop;
-
- if (drop) {
- // Create map of the users currently in the DB
- BSONObj query = db.empty() ?
- BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db);
- BSONObj fields = BSON(AuthorizationManager::USER_NAME_FIELD_NAME << 1 <<
- AuthorizationManager::USER_DB_FIELD_NAME << 1);
-
- Status status = queryAuthzDocument(
- txn,
- AuthorizationManager::usersCollectionNamespace,
- query,
- fields,
- stdx::bind(&CmdMergeAuthzCollections::extractAndInsertUserName,
- &usersToDrop,
- stdx::placeholders::_1));
+ if (drop) {
+ int numRemoved;
+ for (unordered_set<UserName>::iterator it = usersToDrop.begin();
+ it != usersToDrop.end();
+ ++it) {
+ const UserName& userName = *it;
+ audit::logDropUser(ClientBasic::getCurrent(), userName);
+ status = removePrivilegeDocuments(txn,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << userName.getUser().toString()
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getDB().toString()),
+ writeConcern,
+ &numRemoved);
if (!status.isOK()) {
return status;
}
+ dassert(numRemoved == 1);
}
+ }
- Status status = queryAuthzDocument(
- txn,
- NamespaceString(usersCollName),
- db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db),
- BSONObj(),
- stdx::bind(&CmdMergeAuthzCollections::addUser,
- txn,
- authzManager,
- db,
- drop,
- writeConcern,
- &usersToDrop,
- stdx::placeholders::_1));
+ return Status::OK();
+ }
+
+ /**
+ * Moves all user objects from usersCollName into admin.system.users. If drop is true,
+ * removes any users that were in admin.system.users but not in usersCollName.
+ */
+ Status processRoles(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ StringData rolesCollName,
+ StringData db,
+ bool drop,
+ const BSONObj& writeConcern) {
+ // When the "drop" argument has been provided, we use this set to store the roles
+ // that are currently in the system, and remove from it as we encounter
+ // same-named roles in the collection we are restoring from. Once we've fully
+ // moved over the temp roles collection into its final location, we drop
+ // any roles that previously existed there but weren't in the temp collection.
+ // This is so that we can completely replace the system.roles
+ // collection with the roles from the temp collection, without removing all
+ // roles at the beginning and thus potentially locking ourselves out.
+ unordered_set<RoleName> rolesToDrop;
+
+ if (drop) {
+ // Create map of the roles currently in the DB
+ BSONObj query =
+ db.empty() ? BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db);
+ BSONObj fields = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
+
+ Status status =
+ queryAuthzDocument(txn,
+ AuthorizationManager::rolesCollectionNamespace,
+ query,
+ fields,
+ stdx::bind(&CmdMergeAuthzCollections::extractAndInsertRoleName,
+ &rolesToDrop,
+ stdx::placeholders::_1));
if (!status.isOK()) {
return status;
}
+ }
- if (drop) {
- int numRemoved;
- for (unordered_set<UserName>::iterator it = usersToDrop.begin();
- it != usersToDrop.end(); ++it) {
- const UserName& userName = *it;
- audit::logDropUser(ClientBasic::getCurrent(), userName);
- status = removePrivilegeDocuments(
- txn,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME <<
- userName.getUser().toString() <<
- AuthorizationManager::USER_DB_FIELD_NAME <<
- userName.getDB().toString()
- ),
- writeConcern,
- &numRemoved);
- if (!status.isOK()) {
- return status;
- }
- dassert(numRemoved == 1);
- }
- }
-
- return Status::OK();
+ Status status = queryAuthzDocument(
+ txn,
+ NamespaceString(rolesCollName),
+ db.empty() ? BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db),
+ BSONObj(),
+ stdx::bind(&CmdMergeAuthzCollections::addRole,
+ txn,
+ authzManager,
+ db,
+ drop,
+ writeConcern,
+ &rolesToDrop,
+ stdx::placeholders::_1));
+ if (!status.isOK()) {
+ return status;
}
- /**
- * Moves all user objects from usersCollName into admin.system.users. If drop is true,
- * removes any users that were in admin.system.users but not in usersCollName.
- */
- Status processRoles(OperationContext* txn,
- AuthorizationManager* authzManager,
- StringData rolesCollName,
- StringData db,
- bool drop,
- const BSONObj& writeConcern) {
- // When the "drop" argument has been provided, we use this set to store the roles
- // that are currently in the system, and remove from it as we encounter
- // same-named roles in the collection we are restoring from. Once we've fully
- // moved over the temp roles collection into its final location, we drop
- // any roles that previously existed there but weren't in the temp collection.
- // This is so that we can completely replace the system.roles
- // collection with the roles from the temp collection, without removing all
- // roles at the beginning and thus potentially locking ourselves out.
- unordered_set<RoleName> rolesToDrop;
-
- if (drop) {
- // Create map of the roles currently in the DB
- BSONObj query = db.empty() ?
- BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db);
- BSONObj fields = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << 1 <<
- AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
-
- Status status = queryAuthzDocument(
- txn,
- AuthorizationManager::rolesCollectionNamespace,
- query,
- fields,
- stdx::bind(&CmdMergeAuthzCollections::extractAndInsertRoleName,
- &rolesToDrop,
- stdx::placeholders::_1));
+ if (drop) {
+ int numRemoved;
+ for (unordered_set<RoleName>::iterator it = rolesToDrop.begin();
+ it != rolesToDrop.end();
+ ++it) {
+ const RoleName& roleName = *it;
+ audit::logDropRole(ClientBasic::getCurrent(), roleName);
+ status = removeRoleDocuments(txn,
+ BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole().toString()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB().toString()),
+ writeConcern,
+ &numRemoved);
if (!status.isOK()) {
return status;
}
+ dassert(numRemoved == 1);
}
+ }
- Status status = queryAuthzDocument(
- txn,
- NamespaceString(rolesCollName),
- db.empty() ?
- BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db),
- BSONObj(),
- stdx::bind(&CmdMergeAuthzCollections::addRole,
- txn,
- authzManager,
- db,
- drop,
- writeConcern,
- &rolesToDrop,
- stdx::placeholders::_1));
- if (!status.isOK()) {
- return status;
- }
+ return Status::OK();
+ }
- if (drop) {
- int numRemoved;
- for (unordered_set<RoleName>::iterator it = rolesToDrop.begin();
- it != rolesToDrop.end(); ++it) {
- const RoleName& roleName = *it;
- audit::logDropRole(ClientBasic::getCurrent(), roleName);
- status = removeRoleDocuments(
- txn,
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME <<
- roleName.getRole().toString() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME <<
- roleName.getDB().toString()
- ),
- writeConcern,
- &numRemoved);
- if (!status.isOK()) {
- return status;
- }
- dassert(numRemoved == 1);
- }
- }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::MergeAuthzCollectionsArgs args;
+ Status status = auth::parseMergeAuthzCollectionsCommand(cmdObj, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- return Status::OK();
+ if (args.usersCollName.empty() && args.rolesCollName.empty()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Must provide at least one of \"tempUsersCollection\" and "
+ "\"tempRolescollection\""));
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
- auth::MergeAuthzCollectionsArgs args;
- Status status = auth::parseMergeAuthzCollectionsCommand(cmdObj, &args);
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
+ if (!args.usersCollName.empty()) {
+ Status status = processUsers(
+ txn, authzManager, args.usersCollName, args.db, args.drop, args.writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ }
- if (args.usersCollName.empty() && args.rolesCollName.empty()) {
- return appendCommandStatus(
- result, Status(ErrorCodes::BadValue,
- "Must provide at least one of \"tempUsersCollection\" and "
- "\"tempRolescollection\""));
- }
-
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!args.rolesCollName.empty()) {
+ Status status = processRoles(
+ txn, authzManager, args.rolesCollName, args.db, args.drop, args.writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
-
- if (!args.usersCollName.empty()) {
- Status status = processUsers(txn,
- authzManager,
- args.usersCollName,
- args.db,
- args.drop,
- args.writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- }
-
- if (!args.rolesCollName.empty()) {
- Status status = processRoles(txn,
- authzManager,
- args.rolesCollName,
- args.db,
- args.drop,
- args.writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- }
-
- return true;
}
- } cmdMergeAuthzCollections;
+ return true;
+ }
+
+} cmdMergeAuthzCollections;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/user_management_commands.h b/src/mongo/db/commands/user_management_commands.h
index 6b67b8f7891..63c7a7d8d6c 100644
--- a/src/mongo/db/commands/user_management_commands.h
+++ b/src/mongo/db/commands/user_management_commands.h
@@ -38,112 +38,109 @@
namespace mongo {
- class AuthorizationManager;
- class AuthorizationSession;
- struct BSONArray;
- class BSONObj;
- class ClientBasic;
- class OperationContext;
+class AuthorizationManager;
+class AuthorizationSession;
+struct BSONArray;
+class BSONObj;
+class ClientBasic;
+class OperationContext;
namespace auth {
- /**
- * Looks for a field name "pwd" in the given BSONObj and if found replaces its contents with the
- * string "xxx" so that password data on the command object used in executing a user management
- * command isn't exposed in the logs.
- */
- void redactPasswordData(mutablebson::Element parent);
+/**
+ * Looks for a field name "pwd" in the given BSONObj and if found replaces its contents with the
+ * string "xxx" so that password data on the command object used in executing a user management
+ * command isn't exposed in the logs.
+ */
+void redactPasswordData(mutablebson::Element parent);
- //
- // checkAuthorizedTo* methods
- //
+//
+// checkAuthorizedTo* methods
+//
- Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
- const std::vector<RoleName>& roles);
+Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
+ const std::vector<RoleName>& roles);
- Status checkAuthorizedToGrantPrivileges(AuthorizationSession* authzSession,
- const PrivilegeVector& privileges);
+Status checkAuthorizedToGrantPrivileges(AuthorizationSession* authzSession,
+ const PrivilegeVector& privileges);
- Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
- const std::vector<RoleName>& roles);
+Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
+ const std::vector<RoleName>& roles);
- Status checkAuthorizedToRevokePrivileges(AuthorizationSession* authzSession,
- const PrivilegeVector& privileges);
+Status checkAuthorizedToRevokePrivileges(AuthorizationSession* authzSession,
+ const PrivilegeVector& privileges);
- //
- // checkAuthFor*Command methods
- //
+//
+// checkAuthFor*Command methods
+//
- Status checkAuthForCreateUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForCreateUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForUpdateUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForUpdateUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForGrantRolesToUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForGrantRolesToUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForCreateRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForCreateRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForUpdateRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForUpdateRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForGrantRolesToRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForGrantRolesToRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForGrantPrivilegesToRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForGrantPrivilegesToRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForDropAllUsersFromDatabaseCommand(ClientBasic* client,
- const std::string& dbname);
+Status checkAuthForDropAllUsersFromDatabaseCommand(ClientBasic* client, const std::string& dbname);
- Status checkAuthForRevokeRolesFromUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForRevokeRolesFromUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForRevokeRolesFromRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForRevokeRolesFromRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForDropUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForDropUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForDropRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForDropRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForUsersInfoCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForUsersInfoCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForRevokePrivilegesFromRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForRevokePrivilegesFromRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForDropAllRolesFromDatabaseCommand(ClientBasic* client,
- const std::string& dbname);
+Status checkAuthForDropAllRolesFromDatabaseCommand(ClientBasic* client, const std::string& dbname);
- Status checkAuthForRolesInfoCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForRolesInfoCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForInvalidateUserCacheCommand(ClientBasic* client);
+Status checkAuthForInvalidateUserCacheCommand(ClientBasic* client);
- Status checkAuthForGetUserCacheGenerationCommand(ClientBasic* client);
+Status checkAuthForGetUserCacheGenerationCommand(ClientBasic* client);
- Status checkAuthForMergeAuthzCollectionsCommand(ClientBasic* client,
- const BSONObj& cmdObj);
+Status checkAuthForMergeAuthzCollectionsCommand(ClientBasic* client, const BSONObj& cmdObj);
-} // namespace auth
-} // namespace mongo
+} // namespace auth
+} // namespace mongo
diff --git a/src/mongo/db/commands/user_management_commands_common.cpp b/src/mongo/db/commands/user_management_commands_common.cpp
index 9e2210a92d5..64aee9eca51 100644
--- a/src/mongo/db/commands/user_management_commands_common.cpp
+++ b/src/mongo/db/commands/user_management_commands_common.cpp
@@ -52,525 +52,476 @@
namespace mongo {
namespace auth {
- void redactPasswordData(mutablebson::Element parent) {
- namespace mmb = mutablebson;
- const StringData pwdFieldName("pwd", StringData::LiteralTag());
- for (mmb::Element pwdElement = mmb::findFirstChildNamed(parent, pwdFieldName);
- pwdElement.ok();
- pwdElement = mmb::findElementNamed(pwdElement.rightSibling(), pwdFieldName)) {
-
- pwdElement.setValueString("xxx");
- }
- }
-
- Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
- const std::vector<RoleName>& roles) {
- for (size_t i = 0; i < roles.size(); ++i) {
- if (!authzSession->isAuthorizedToGrantRole(roles[i])) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to grant role: "
- << roles[i].getFullName());
- }
- }
-
- return Status::OK();
+void redactPasswordData(mutablebson::Element parent) {
+ namespace mmb = mutablebson;
+ const StringData pwdFieldName("pwd", StringData::LiteralTag());
+ for (mmb::Element pwdElement = mmb::findFirstChildNamed(parent, pwdFieldName); pwdElement.ok();
+ pwdElement = mmb::findElementNamed(pwdElement.rightSibling(), pwdFieldName)) {
+ pwdElement.setValueString("xxx");
}
+}
- Status checkAuthorizedToGrantPrivileges(AuthorizationSession* authzSession,
- const PrivilegeVector& privileges) {
- for (PrivilegeVector::const_iterator it = privileges.begin();
- it != privileges.end(); ++it) {
- Status status = authzSession->checkAuthorizedToGrantPrivilege(*it);
- if (!status.isOK()) {
- return status;
- }
- }
-
- return Status::OK();
- }
-
- Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
- const std::vector<RoleName>& roles) {
- for (size_t i = 0; i < roles.size(); ++i) {
- if (!authzSession->isAuthorizedToRevokeRole(roles[i])) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to revoke role: " <<
- roles[i].getFullName());
- }
+Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
+ const std::vector<RoleName>& roles) {
+ for (size_t i = 0; i < roles.size(); ++i) {
+ if (!authzSession->isAuthorizedToGrantRole(roles[i])) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream()
+ << "Not authorized to grant role: " << roles[i].getFullName());
}
- return Status::OK();
}
- Status checkAuthorizedToRevokePrivileges(AuthorizationSession* authzSession,
- const PrivilegeVector& privileges) {
- for (PrivilegeVector::const_iterator it = privileges.begin();
- it != privileges.end(); ++it) {
- Status status = authzSession->checkAuthorizedToRevokePrivilege(*it);
- if (!status.isOK()) {
- return status;
- }
- }
-
- return Status::OK();
- }
+ return Status::OK();
+}
- Status checkAuthForCreateUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- auth::CreateOrUpdateUserArgs args;
- Status status = auth::parseCreateOrUpdateUserCommands(cmdObj,
- "createUser",
- dbname,
- &args);
+Status checkAuthorizedToGrantPrivileges(AuthorizationSession* authzSession,
+ const PrivilegeVector& privileges) {
+ for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) {
+ Status status = authzSession->checkAuthorizedToGrantPrivilege(*it);
if (!status.isOK()) {
return status;
}
+ }
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(args.userName.getDB()),
- ActionType::createUser)) {
+ return Status::OK();
+}
+
+Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
+ const std::vector<RoleName>& roles) {
+ for (size_t i = 0; i < roles.size(); ++i) {
+ if (!authzSession->isAuthorizedToRevokeRole(roles[i])) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create users on db: "
- << args.userName.getDB());
+ str::stream()
+ << "Not authorized to revoke role: " << roles[i].getFullName());
}
-
- return checkAuthorizedToGrantRoles(authzSession, args.roles);
}
+ return Status::OK();
+}
- Status checkAuthForUpdateUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- auth::CreateOrUpdateUserArgs args;
- Status status = auth::parseCreateOrUpdateUserCommands(cmdObj,
- "updateUser",
- dbname,
- &args);
+Status checkAuthorizedToRevokePrivileges(AuthorizationSession* authzSession,
+ const PrivilegeVector& privileges) {
+ for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) {
+ Status status = authzSession->checkAuthorizedToRevokePrivilege(*it);
if (!status.isOK()) {
return status;
}
+ }
- if (args.hasHashedPassword) {
- if (!authzSession->isAuthorizedToChangeOwnPasswordAsUser(args.userName) &&
- !authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(args.userName.getDB()),
- ActionType::changePassword)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to change password of user: "
- << args.userName.getFullName());
- }
- }
-
- if (args.hasCustomData) {
- if (!authzSession->isAuthorizedToChangeOwnCustomDataAsUser(args.userName) &&
- !authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(args.userName.getDB()),
- ActionType::changeCustomData)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to change customData of user: "
- << args.userName.getFullName());
- }
- }
+ return Status::OK();
+}
+
+Status checkAuthForCreateUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ auth::CreateOrUpdateUserArgs args;
+ Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "createUser", dbname, &args);
+ if (!status.isOK()) {
+ return status;
+ }
- if (args.hasRoles) {
- // You don't know what roles you might be revoking, so require the ability to
- // revoke any role in the system.
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forAnyNormalResource(), ActionType::revokeRole)) {
- return Status(ErrorCodes::Unauthorized,
- "In order to use updateUser to set roles array, must be "
- "authorized to revoke any role in the system");
- }
+ if (!authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(args.userName.getDB()), ActionType::createUser)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream()
+ << "Not authorized to create users on db: " << args.userName.getDB());
+ }
- return checkAuthorizedToGrantRoles(authzSession, args.roles);
- }
+ return checkAuthorizedToGrantRoles(authzSession, args.roles);
+}
+
+Status checkAuthForUpdateUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ auth::CreateOrUpdateUserArgs args;
+ Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "updateUser", dbname, &args);
+ if (!status.isOK()) {
+ return status;
+ }
- return Status::OK();
- }
-
- Status checkAuthForGrantRolesToUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- std::vector<RoleName> roles;
- std::string unusedUserNameString;
- BSONObj unusedWriteConcern;
- Status status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "grantRolesToUser",
- dbname,
- &unusedUserNameString,
- &roles,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
+ if (args.hasHashedPassword) {
+ if (!authzSession->isAuthorizedToChangeOwnPasswordAsUser(args.userName) &&
+ !authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(args.userName.getDB()),
+ ActionType::changePassword)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to change password of user: "
+ << args.userName.getFullName());
}
-
- return checkAuthorizedToGrantRoles(authzSession, roles);
}
- Status checkAuthForCreateRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- auth::CreateOrUpdateRoleArgs args;
- Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj,
- "createRole",
- dbname,
- &args);
- if (!status.isOK()) {
- return status;
+ if (args.hasCustomData) {
+ if (!authzSession->isAuthorizedToChangeOwnCustomDataAsUser(args.userName) &&
+ !authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(args.userName.getDB()),
+ ActionType::changeCustomData)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to change customData of user: "
+ << args.userName.getFullName());
}
+ }
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(args.roleName.getDB()),
- ActionType::createRole)) {
+ if (args.hasRoles) {
+ // You don't know what roles you might be revoking, so require the ability to
+ // revoke any role in the system.
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forAnyNormalResource(),
+ ActionType::revokeRole)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create roles on db: "
- << args.roleName.getDB());
+ "In order to use updateUser to set roles array, must be "
+ "authorized to revoke any role in the system");
}
- status = checkAuthorizedToGrantRoles(authzSession, args.roles);
- if (!status.isOK()) {
- return status;
- }
+ return checkAuthorizedToGrantRoles(authzSession, args.roles);
+ }
- return checkAuthorizedToGrantPrivileges(authzSession, args.privileges);
+ return Status::OK();
+}
+
+Status checkAuthForGrantRolesToUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ std::vector<RoleName> roles;
+ std::string unusedUserNameString;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "grantRolesToUser", dbname, &unusedUserNameString, &roles, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
}
- Status checkAuthForUpdateRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- auth::CreateOrUpdateRoleArgs args;
- Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj,
- "updateRole",
- dbname,
- &args);
- if (!status.isOK()) {
- return status;
- }
+ return checkAuthorizedToGrantRoles(authzSession, roles);
+}
+
+Status checkAuthForCreateRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ auth::CreateOrUpdateRoleArgs args;
+ Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "createRole", dbname, &args);
+ if (!status.isOK()) {
+ return status;
+ }
- // You don't know what roles or privileges you might be revoking, so require the ability
- // to revoke any role (or privilege) in the system.
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forAnyNormalResource(), ActionType::revokeRole)) {
- return Status(ErrorCodes::Unauthorized,
- "updateRole command required the ability to revoke any role in the "
- "system");
- }
+ if (!authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(args.roleName.getDB()), ActionType::createRole)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream()
+ << "Not authorized to create roles on db: " << args.roleName.getDB());
+ }
- status = checkAuthorizedToGrantRoles(authzSession, args.roles);
- if (!status.isOK()) {
- return status;
- }
+ status = checkAuthorizedToGrantRoles(authzSession, args.roles);
+ if (!status.isOK()) {
+ return status;
+ }
- return checkAuthorizedToGrantPrivileges(authzSession, args.privileges);
- }
-
- Status checkAuthForGrantRolesToRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- std::vector<RoleName> roles;
- std::string unusedUserNameString;
- BSONObj unusedWriteConcern;
- Status status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "grantRolesToRole",
- dbname,
- &unusedUserNameString,
- &roles,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ return checkAuthorizedToGrantPrivileges(authzSession, args.privileges);
+}
+
+Status checkAuthForUpdateRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ auth::CreateOrUpdateRoleArgs args;
+ Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "updateRole", dbname, &args);
+ if (!status.isOK()) {
+ return status;
+ }
- return checkAuthorizedToGrantRoles(authzSession, roles);
- }
-
- Status checkAuthForGrantPrivilegesToRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- PrivilegeVector privileges;
- RoleName unusedRoleName;
- BSONObj unusedWriteConcern;
- Status status =
- auth::parseAndValidateRolePrivilegeManipulationCommands(cmdObj,
- "grantPrivilegesToRole",
- dbname,
- &unusedRoleName,
- &privileges,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ // You don't know what roles or privileges you might be revoking, so require the ability
+ // to revoke any role (or privilege) in the system.
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forAnyNormalResource(),
+ ActionType::revokeRole)) {
+ return Status(ErrorCodes::Unauthorized,
+ "updateRole command required the ability to revoke any role in the "
+ "system");
+ }
- return checkAuthorizedToGrantPrivileges(authzSession, privileges);
+ status = checkAuthorizedToGrantRoles(authzSession, args.roles);
+ if (!status.isOK()) {
+ return status;
}
- Status checkAuthForDropUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- UserName userName;
- BSONObj unusedWriteConcern;
- Status status = auth::parseAndValidateDropUserCommand(cmdObj,
- dbname,
- &userName,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ return checkAuthorizedToGrantPrivileges(authzSession, args.privileges);
+}
+
+Status checkAuthForGrantRolesToRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ std::vector<RoleName> roles;
+ std::string unusedUserNameString;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "grantRolesToRole", dbname, &unusedUserNameString, &roles, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(userName.getDB()), ActionType::dropUser)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop users from the "
- << userName.getDB() << " database");
- }
- return Status::OK();
- }
-
- Status checkAuthForDropRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- RoleName roleName;
- BSONObj unusedWriteConcern;
- Status status = auth::parseDropRoleCommand(cmdObj,
- dbname,
- &roleName,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ return checkAuthorizedToGrantRoles(authzSession, roles);
+}
+
+Status checkAuthForGrantPrivilegesToRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ PrivilegeVector privileges;
+ RoleName unusedRoleName;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseAndValidateRolePrivilegeManipulationCommands(
+ cmdObj, "grantPrivilegesToRole", dbname, &unusedRoleName, &privileges, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(roleName.getDB()), ActionType::dropRole)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop roles from the "
- << roleName.getDB() << " database");
- }
- return Status::OK();
+ return checkAuthorizedToGrantPrivileges(authzSession, privileges);
+}
+
+Status checkAuthForDropUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ UserName userName;
+ BSONObj unusedWriteConcern;
+ Status status =
+ auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
}
- Status checkAuthForDropAllUsersFromDatabaseCommand(ClientBasic* client,
- const std::string& dbname) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname), ActionType::dropUser)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop users from the "
- << dbname << " database");
- }
- return Status::OK();
- }
-
- Status checkAuthForRevokeRolesFromUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- std::vector<RoleName> roles;
- std::string unusedUserNameString;
- BSONObj unusedWriteConcern;
- Status status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "revokeRolesFromUser",
- dbname,
- &unusedUserNameString,
- &roles,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ if (!authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(userName.getDB()), ActionType::dropUser)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to drop users from the " << userName.getDB()
+ << " database");
+ }
+ return Status::OK();
+}
+
+Status checkAuthForDropRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ RoleName roleName;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseDropRoleCommand(cmdObj, dbname, &roleName, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- return checkAuthorizedToRevokeRoles(authzSession, roles);
- }
-
- Status checkAuthForRevokeRolesFromRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- std::vector<RoleName> roles;
- std::string unusedUserNameString;
- BSONObj unusedWriteConcern;
- Status status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "revokeRolesFromRole",
- dbname,
- &unusedUserNameString,
- &roles,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ if (!authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(roleName.getDB()), ActionType::dropRole)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to drop roles from the " << roleName.getDB()
+ << " database");
+ }
+ return Status::OK();
+}
+
+Status checkAuthForDropAllUsersFromDatabaseCommand(ClientBasic* client, const std::string& dbname) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
+ ActionType::dropUser)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to drop users from the " << dbname
+ << " database");
+ }
+ return Status::OK();
+}
+
+Status checkAuthForRevokeRolesFromUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ std::vector<RoleName> roles;
+ std::string unusedUserNameString;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "revokeRolesFromUser", dbname, &unusedUserNameString, &roles, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- return checkAuthorizedToRevokeRoles(authzSession, roles);
+ return checkAuthorizedToRevokeRoles(authzSession, roles);
+}
+
+Status checkAuthForRevokeRolesFromRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ std::vector<RoleName> roles;
+ std::string unusedUserNameString;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "revokeRolesFromRole", dbname, &unusedUserNameString, &roles, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
}
- Status checkAuthForUsersInfoCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- auth::UsersInfoArgs args;
- Status status = auth::parseUsersInfoCommand(cmdObj, dbname, &args);
- if (!status.isOK()) {
- return status;
- }
+ return checkAuthorizedToRevokeRoles(authzSession, roles);
+}
+
+Status checkAuthForUsersInfoCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ auth::UsersInfoArgs args;
+ Status status = auth::parseUsersInfoCommand(cmdObj, dbname, &args);
+ if (!status.isOK()) {
+ return status;
+ }
- if (args.allForDB) {
+ if (args.allForDB) {
+ if (!authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(dbname), ActionType::viewUser)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to view users from the " << dbname
+ << " database");
+ }
+ } else {
+ for (size_t i = 0; i < args.userNames.size(); ++i) {
+ if (authzSession->lookupUser(args.userNames[i])) {
+ continue; // Can always view users you are logged in as
+ }
if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname), ActionType::viewUser)) {
+ ResourcePattern::forDatabaseName(args.userNames[i].getDB()),
+ ActionType::viewUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to view users from the "
- << dbname << " database");
- }
- } else {
- for (size_t i = 0; i < args.userNames.size(); ++i) {
- if (authzSession->lookupUser(args.userNames[i])) {
- continue; // Can always view users you are logged in as
- }
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(args.userNames[i].getDB()),
- ActionType::viewUser)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to view users from the "
- << dbname << " database");
- }
+ str::stream() << "Not authorized to view users from the " << dbname
+ << " database");
}
}
- return Status::OK();
- }
-
- Status checkAuthForRevokePrivilegesFromRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- PrivilegeVector privileges;
- RoleName unusedRoleName;
- BSONObj unusedWriteConcern;
- Status status =
- auth::parseAndValidateRolePrivilegeManipulationCommands(cmdObj,
- "revokePrivilegesFromRole",
- dbname,
- &unusedRoleName,
- &privileges,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ }
+ return Status::OK();
+}
+
+Status checkAuthForRevokePrivilegesFromRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ PrivilegeVector privileges;
+ RoleName unusedRoleName;
+ BSONObj unusedWriteConcern;
+ Status status =
+ auth::parseAndValidateRolePrivilegeManipulationCommands(cmdObj,
+ "revokePrivilegesFromRole",
+ dbname,
+ &unusedRoleName,
+ &privileges,
+ &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- return checkAuthorizedToRevokePrivileges(authzSession, privileges);
+ return checkAuthorizedToRevokePrivileges(authzSession, privileges);
+}
+
+Status checkAuthForDropAllRolesFromDatabaseCommand(ClientBasic* client, const std::string& dbname) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
+ ActionType::dropRole)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to drop roles from the " << dbname
+ << " database");
+ }
+ return Status::OK();
+}
+
+Status checkAuthForRolesInfoCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ auth::RolesInfoArgs args;
+ Status status = auth::parseRolesInfoCommand(cmdObj, dbname, &args);
+ if (!status.isOK()) {
+ return status;
}
- Status checkAuthForDropAllRolesFromDatabaseCommand(ClientBasic* client,
- const std::string& dbname) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ if (args.allForDB) {
if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname), ActionType::dropRole)) {
+ ResourcePattern::forDatabaseName(dbname), ActionType::viewRole)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop roles from the "
- << dbname << " database");
- }
- return Status::OK();
- }
-
- Status checkAuthForRolesInfoCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- auth::RolesInfoArgs args;
- Status status = auth::parseRolesInfoCommand(cmdObj, dbname, &args);
- if (!status.isOK()) {
- return status;
+ str::stream() << "Not authorized to view roles from the " << dbname
+ << " database");
}
+ } else {
+ for (size_t i = 0; i < args.roleNames.size(); ++i) {
+ if (authzSession->isAuthenticatedAsUserWithRole(args.roleNames[i])) {
+ continue; // Can always see roles that you are a member of
+ }
- if (args.allForDB) {
if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname), ActionType::viewRole)) {
+ ResourcePattern::forDatabaseName(args.roleNames[i].getDB()),
+ ActionType::viewRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to view roles from the "
- << dbname << " database");
- }
- } else {
- for (size_t i = 0; i < args.roleNames.size(); ++i) {
- if (authzSession->isAuthenticatedAsUserWithRole(args.roleNames[i])) {
- continue; // Can always see roles that you are a member of
- }
-
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(args.roleNames[i].getDB()),
- ActionType::viewRole)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to view roles from the "
- << args.roleNames[i].getDB() << " database");
- }
+ << args.roleNames[i].getDB() << " database");
}
}
-
- return Status::OK();
}
- Status checkAuthForInvalidateUserCacheCommand(ClientBasic* client) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::invalidateUserCache)) {
- return Status(ErrorCodes::Unauthorized, "Not authorized to invalidate user cache");
- }
- return Status::OK();
- }
+ return Status::OK();
+}
- Status checkAuthForGetUserCacheGenerationCommand(ClientBasic* client) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::internal)) {
- return Status(ErrorCodes::Unauthorized, "Not authorized to get cache generation");
- }
- return Status::OK();
+Status checkAuthForInvalidateUserCacheCommand(ClientBasic* client) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::invalidateUserCache)) {
+ return Status(ErrorCodes::Unauthorized, "Not authorized to invalidate user cache");
+ }
+ return Status::OK();
+}
+
+Status checkAuthForGetUserCacheGenerationCommand(ClientBasic* client) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::internal)) {
+ return Status(ErrorCodes::Unauthorized, "Not authorized to get cache generation");
+ }
+ return Status::OK();
+}
+
+Status checkAuthForMergeAuthzCollectionsCommand(ClientBasic* client, const BSONObj& cmdObj) {
+ auth::MergeAuthzCollectionsArgs args;
+ Status status = auth::parseMergeAuthzCollectionsCommand(cmdObj, &args);
+ if (!status.isOK()) {
+ return status;
}
- Status checkAuthForMergeAuthzCollectionsCommand(ClientBasic* client,
- const BSONObj& cmdObj) {
- auth::MergeAuthzCollectionsArgs args;
- Status status = auth::parseMergeAuthzCollectionsCommand(cmdObj, &args);
- if (!status.isOK()) {
- return status;
- }
-
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- ActionSet actions;
- actions.addAction(ActionType::createUser);
- actions.addAction(ActionType::createRole);
- actions.addAction(ActionType::grantRole);
- actions.addAction(ActionType::revokeRole);
- if (args.drop) {
- actions.addAction(ActionType::dropUser);
- actions.addAction(ActionType::dropRole);
- }
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forAnyNormalResource(), actions)) {
- return Status(ErrorCodes::Unauthorized,
- "Not authorized to update user/role data using _mergeAuthzCollections"
- " command");
- }
- if (!args.usersCollName.empty() &&
- !authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(args.usersCollName)),
- ActionType::find)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to read "
- << args.usersCollName);
- }
- if (!args.rolesCollName.empty() &&
- !authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(args.rolesCollName)),
- ActionType::find)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to read "
- << args.rolesCollName);
- }
- return Status::OK();
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ ActionSet actions;
+ actions.addAction(ActionType::createUser);
+ actions.addAction(ActionType::createRole);
+ actions.addAction(ActionType::grantRole);
+ actions.addAction(ActionType::revokeRole);
+ if (args.drop) {
+ actions.addAction(ActionType::dropUser);
+ actions.addAction(ActionType::dropRole);
+ }
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forAnyNormalResource(),
+ actions)) {
+ return Status(ErrorCodes::Unauthorized,
+ "Not authorized to update user/role data using _mergeAuthzCollections"
+ " command");
+ }
+ if (!args.usersCollName.empty() &&
+ !authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(args.usersCollName)),
+ ActionType::find)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to read " << args.usersCollName);
+ }
+ if (!args.rolesCollName.empty() &&
+ !authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(args.rolesCollName)),
+ ActionType::find)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to read " << args.rolesCollName);
}
+ return Status::OK();
+}
-} // namespace auth
-} // namespace mongo
+} // namespace auth
+} // namespace mongo
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 5fc7a871de0..41d5b4afdb8 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -41,81 +41,87 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::stringstream;
+using std::endl;
+using std::string;
+using std::stringstream;
+
+class ValidateCmd : public Command {
+public:
+ ValidateCmd() : Command("validate") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual void help(stringstream& h) const {
+ h << "Validate contents of a namespace by scanning its data structures for correctness. "
+ "Slow.\n"
+ "Add full:true option to do a more thorough check";
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::validate);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+ //{ validate: "collectionnamewithoutthedbpart" [, scandata: <bool>] [, full: <bool> } */
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
+
+ NamespaceString ns_string(ns);
+ const bool full = cmdObj["full"].trueValue();
+ const bool scanData = full || cmdObj["scandata"].trueValue();
+
+ if (!ns_string.isNormal() && full) {
+ errmsg = "Can only run full validate on a regular collection";
+ return false;
+ }
- class ValidateCmd : public Command {
- public:
- ValidateCmd() : Command( "validate" ) {}
+ if (!serverGlobalParams.quiet) {
+ LOG(0) << "CMD: validate " << ns << endl;
+ }
- virtual bool slaveOk() const {
- return true;
+ AutoGetDb ctx(txn, ns_string.db(), MODE_IX);
+ Lock::CollectionLock collLk(txn->lockState(), ns_string.ns(), MODE_X);
+ Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(ns_string) : NULL;
+ if (!collection) {
+ errmsg = "ns not found";
+ return false;
}
- virtual void help(stringstream& h) const { h << "Validate contents of a namespace by scanning its data structures for correctness. Slow.\n"
- "Add full:true option to do a more thorough check"; }
+ result.append("ns", ns);
+
+ ValidateResults results;
+ Status status = collection->validate(txn, full, scanData, &results, &result);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::validate);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ result.appendBool("valid", results.valid);
+ result.append("errors", results.errors);
+
+ if (!full) {
+ result.append(
+ "warning",
+ "Some checks omitted for speed. use {full:true} option to do more thorough scan.");
}
- //{ validate: "collectionnamewithoutthedbpart" [, scandata: <bool>] [, full: <bool> } */
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
-
- NamespaceString ns_string(ns);
- const bool full = cmdObj["full"].trueValue();
- const bool scanData = full || cmdObj["scandata"].trueValue();
-
- if ( !ns_string.isNormal() && full ) {
- errmsg = "Can only run full validate on a regular collection";
- return false;
- }
-
- if (!serverGlobalParams.quiet) {
- LOG(0) << "CMD: validate " << ns << endl;
- }
-
- AutoGetDb ctx(txn, ns_string.db(), MODE_IX);
- Lock::CollectionLock collLk(txn->lockState(), ns_string.ns(), MODE_X);
- Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(ns_string) : NULL;
- if ( !collection ) {
- errmsg = "ns not found";
- return false;
- }
-
- result.append( "ns", ns );
-
- ValidateResults results;
- Status status = collection->validate( txn, full, scanData, &results, &result );
- if ( !status.isOK() )
- return appendCommandStatus( result, status );
-
- result.appendBool("valid", results.valid);
- result.append("errors", results.errors);
-
- if ( !full ){
- result.append("warning", "Some checks omitted for speed. use {full:true} option to do more thorough scan.");
- }
-
- if ( !results.valid ) {
- result.append("advice", "ns corrupt. See http://dochub.mongodb.org/core/data-recovery");
- }
-
- return true;
+
+ if (!results.valid) {
+ result.append("advice", "ns corrupt. See http://dochub.mongodb.org/core/data-recovery");
}
- } validateCmd;
+ return true;
+ }
+} validateCmd;
}
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 0b277ddfa56..2087b5bd2f4 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -83,303 +83,290 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::unique_ptr;
- using std::vector;
-
- namespace {
-
- /**
- * Data structure to safely hold and clean up results of single write operations.
- */
- class WriteOpResult {
- MONGO_DISALLOW_COPYING(WriteOpResult);
- public:
- WriteOpResult() {}
-
- WriteOpStats& getStats() { return _stats; }
-
- WriteErrorDetail* getError() { return _error.get(); }
- WriteErrorDetail* releaseError() { return _error.release(); }
- void setError(WriteErrorDetail* error) { _error.reset(error); }
-
- private:
- WriteOpStats _stats;
- std::unique_ptr<WriteErrorDetail> _error;
- };
-
- } // namespace
-
- // TODO: Determine queueing behavior we want here
- MONGO_EXPORT_SERVER_PARAMETER( queueForMigrationCommit, bool, true );
-
- using mongoutils::str::stream;
-
- WriteBatchExecutor::WriteBatchExecutor( OperationContext* txn,
- OpCounters* opCounters,
- LastError* le ) :
- _txn(txn),
- _opCounters( opCounters ),
- _le( le ),
- _stats( new WriteBatchStats ) {
- }
+using std::endl;
+using std::string;
+using std::unique_ptr;
+using std::vector;
- static WCErrorDetail* toWriteConcernError( const Status& wcStatus,
- const WriteConcernResult& wcResult ) {
+namespace {
- WCErrorDetail* wcError = new WCErrorDetail;
+/**
+ * Data structure to safely hold and clean up results of single write operations.
+ */
+class WriteOpResult {
+ MONGO_DISALLOW_COPYING(WriteOpResult);
- wcError->setErrCode( wcStatus.code() );
- wcError->setErrMessage( wcStatus.reason() );
- if ( wcResult.wTimedOut )
- wcError->setErrInfo( BSON( "wtimeout" << true ) );
+public:
+ WriteOpResult() {}
- return wcError;
+ WriteOpStats& getStats() {
+ return _stats;
}
- static WriteErrorDetail* toWriteError( const Status& status ) {
-
- WriteErrorDetail* error = new WriteErrorDetail;
-
- // TODO: Complex transform here?
- error->setErrCode( status.code() );
- error->setErrMessage( status.reason() );
-
- return error;
+ WriteErrorDetail* getError() {
+ return _error.get();
}
-
- static void toBatchError( const Status& status, BatchedCommandResponse* response ) {
- response->clear();
- response->setErrCode( status.code() );
- response->setErrMessage( status.reason() );
- response->setOk( false );
- dassert( response->isValid(NULL) );
+ WriteErrorDetail* releaseError() {
+ return _error.release();
}
-
- static void noteInCriticalSection( WriteErrorDetail* staleError ) {
- BSONObjBuilder builder;
- if ( staleError->isErrInfoSet() )
- builder.appendElements( staleError->getErrInfo() );
- builder.append( "inCriticalSection", true );
- staleError->setErrInfo( builder.obj() );
+ void setError(WriteErrorDetail* error) {
+ _error.reset(error);
}
- // static
- Status WriteBatchExecutor::validateBatch( const BatchedCommandRequest& request ) {
-
- // Validate namespace
- const NamespaceString& nss = request.getNSS();
- if ( !nss.isValid() ) {
- return Status( ErrorCodes::InvalidNamespace,
- nss.ns() + " is not a valid namespace" );
- }
-
- // Make sure we can write to the namespace
- Status allowedStatus = userAllowedWriteNS( nss );
- if ( !allowedStatus.isOK() ) {
- return allowedStatus;
- }
-
- // Validate insert index requests
- // TODO: Push insert index requests through createIndex once all upgrade paths support it
- string errMsg;
- if ( request.isInsertIndexRequest() && !request.isValidIndexRequest( &errMsg ) ) {
- return Status( ErrorCodes::InvalidOptions, errMsg );
- }
-
- return Status::OK();
+private:
+ WriteOpStats _stats;
+ std::unique_ptr<WriteErrorDetail> _error;
+};
+
+} // namespace
+
+// TODO: Determine queueing behavior we want here
+MONGO_EXPORT_SERVER_PARAMETER(queueForMigrationCommit, bool, true);
+
+using mongoutils::str::stream;
+
+WriteBatchExecutor::WriteBatchExecutor(OperationContext* txn, OpCounters* opCounters, LastError* le)
+ : _txn(txn), _opCounters(opCounters), _le(le), _stats(new WriteBatchStats) {}
+
+static WCErrorDetail* toWriteConcernError(const Status& wcStatus,
+ const WriteConcernResult& wcResult) {
+ WCErrorDetail* wcError = new WCErrorDetail;
+
+ wcError->setErrCode(wcStatus.code());
+ wcError->setErrMessage(wcStatus.reason());
+ if (wcResult.wTimedOut)
+ wcError->setErrInfo(BSON("wtimeout" << true));
+
+ return wcError;
+}
+
+static WriteErrorDetail* toWriteError(const Status& status) {
+ WriteErrorDetail* error = new WriteErrorDetail;
+
+ // TODO: Complex transform here?
+ error->setErrCode(status.code());
+ error->setErrMessage(status.reason());
+
+ return error;
+}
+
+static void toBatchError(const Status& status, BatchedCommandResponse* response) {
+ response->clear();
+ response->setErrCode(status.code());
+ response->setErrMessage(status.reason());
+ response->setOk(false);
+ dassert(response->isValid(NULL));
+}
+
+static void noteInCriticalSection(WriteErrorDetail* staleError) {
+ BSONObjBuilder builder;
+ if (staleError->isErrInfoSet())
+ builder.appendElements(staleError->getErrInfo());
+ builder.append("inCriticalSection", true);
+ staleError->setErrInfo(builder.obj());
+}
+
+// static
+Status WriteBatchExecutor::validateBatch(const BatchedCommandRequest& request) {
+ // Validate namespace
+ const NamespaceString& nss = request.getNSS();
+ if (!nss.isValid()) {
+ return Status(ErrorCodes::InvalidNamespace, nss.ns() + " is not a valid namespace");
}
- void WriteBatchExecutor::executeBatch( const BatchedCommandRequest& request,
- BatchedCommandResponse* response ) {
+ // Make sure we can write to the namespace
+ Status allowedStatus = userAllowedWriteNS(nss);
+ if (!allowedStatus.isOK()) {
+ return allowedStatus;
+ }
- // Validate namespace
- Status isValid = validateBatch(request);
- if (!isValid.isOK()) {
- toBatchError( isValid, response );
- return;
- }
+ // Validate insert index requests
+ // TODO: Push insert index requests through createIndex once all upgrade paths support it
+ string errMsg;
+ if (request.isInsertIndexRequest() && !request.isValidIndexRequest(&errMsg)) {
+ return Status(ErrorCodes::InvalidOptions, errMsg);
+ }
- if ( request.sizeWriteOps() == 0u ) {
- toBatchError( Status( ErrorCodes::InvalidLength,
- "no write ops were included in the batch" ),
- response );
- return;
- }
+ return Status::OK();
+}
- // Validate batch size
- if ( request.sizeWriteOps() > BatchedCommandRequest::kMaxWriteBatchSize ) {
- toBatchError( Status( ErrorCodes::InvalidLength,
- stream() << "exceeded maximum write batch size of "
- << BatchedCommandRequest::kMaxWriteBatchSize ),
- response );
- return;
- }
+void WriteBatchExecutor::executeBatch(const BatchedCommandRequest& request,
+ BatchedCommandResponse* response) {
+ // Validate namespace
+ Status isValid = validateBatch(request);
+ if (!isValid.isOK()) {
+ toBatchError(isValid, response);
+ return;
+ }
- //
- // End validation
- //
+ if (request.sizeWriteOps() == 0u) {
+ toBatchError(Status(ErrorCodes::InvalidLength, "no write ops were included in the batch"),
+ response);
+ return;
+ }
- const WriteConcernOptions& writeConcern = _txn->getWriteConcern();
- bool silentWC = writeConcern.wMode.empty() && writeConcern.wNumNodes == 0
- && writeConcern.syncMode == WriteConcernOptions::NONE;
+ // Validate batch size
+ if (request.sizeWriteOps() > BatchedCommandRequest::kMaxWriteBatchSize) {
+ toBatchError(Status(ErrorCodes::InvalidLength,
+ stream() << "exceeded maximum write batch size of "
+ << BatchedCommandRequest::kMaxWriteBatchSize),
+ response);
+ return;
+ }
- Timer commandTimer;
+ //
+ // End validation
+ //
- OwnedPointerVector<WriteErrorDetail> writeErrorsOwned;
- vector<WriteErrorDetail*>& writeErrors = writeErrorsOwned.mutableVector();
+ const WriteConcernOptions& writeConcern = _txn->getWriteConcern();
+ bool silentWC = writeConcern.wMode.empty() && writeConcern.wNumNodes == 0 &&
+ writeConcern.syncMode == WriteConcernOptions::NONE;
- OwnedPointerVector<BatchedUpsertDetail> upsertedOwned;
- vector<BatchedUpsertDetail*>& upserted = upsertedOwned.mutableVector();
+ Timer commandTimer;
- //
- // Apply each batch item, possibly bulking some items together in the write lock.
- // Stops on error if batch is ordered.
- //
+ OwnedPointerVector<WriteErrorDetail> writeErrorsOwned;
+ vector<WriteErrorDetail*>& writeErrors = writeErrorsOwned.mutableVector();
- bulkExecute( request, &upserted, &writeErrors );
+ OwnedPointerVector<BatchedUpsertDetail> upsertedOwned;
+ vector<BatchedUpsertDetail*>& upserted = upsertedOwned.mutableVector();
- //
- // Try to enforce the write concern if everything succeeded (unordered or ordered)
- // OR if something succeeded and we're unordered.
- //
+ //
+ // Apply each batch item, possibly bulking some items together in the write lock.
+ // Stops on error if batch is ordered.
+ //
- unique_ptr<WCErrorDetail> wcError;
- bool needToEnforceWC = writeErrors.empty()
- || ( !request.getOrdered()
- && writeErrors.size() < request.sizeWriteOps() );
+ bulkExecute(request, &upserted, &writeErrors);
- if ( needToEnforceWC ) {
- {
- stdx::lock_guard<Client> lk(*_txn->getClient());
- CurOp::get(_txn)->setMessage_inlock( "waiting for write concern" );
- }
+ //
+ // Try to enforce the write concern if everything succeeded (unordered or ordered)
+ // OR if something succeeded and we're unordered.
+ //
- WriteConcernResult res;
- Status status = waitForWriteConcern(
- _txn,
- repl::ReplClientInfo::forClient(_txn->getClient()).getLastOp(),
- &res);
+ unique_ptr<WCErrorDetail> wcError;
+ bool needToEnforceWC = writeErrors.empty() ||
+ (!request.getOrdered() && writeErrors.size() < request.sizeWriteOps());
- if ( !status.isOK() ) {
- wcError.reset( toWriteConcernError( status, res ) );
- }
+ if (needToEnforceWC) {
+ {
+ stdx::lock_guard<Client> lk(*_txn->getClient());
+ CurOp::get(_txn)->setMessage_inlock("waiting for write concern");
}
- //
- // Refresh metadata if needed
- //
+ WriteConcernResult res;
+ Status status = waitForWriteConcern(
+ _txn, repl::ReplClientInfo::forClient(_txn->getClient()).getLastOp(), &res);
- bool staleBatch = !writeErrors.empty()
- && writeErrors.back()->getErrCode() == ErrorCodes::StaleShardVersion;
-
- if ( staleBatch ) {
+ if (!status.isOK()) {
+ wcError.reset(toWriteConcernError(status, res));
+ }
+ }
- const BatchedRequestMetadata* requestMetadata = request.getMetadata();
- dassert( requestMetadata );
+ //
+ // Refresh metadata if needed
+ //
- // Make sure our shard name is set or is the same as what was set previously
- if ( shardingState.setShardName( requestMetadata->getShardName() ) ) {
+ bool staleBatch =
+ !writeErrors.empty() && writeErrors.back()->getErrCode() == ErrorCodes::StaleShardVersion;
+
+ if (staleBatch) {
+ const BatchedRequestMetadata* requestMetadata = request.getMetadata();
+ dassert(requestMetadata);
+
+ // Make sure our shard name is set or is the same as what was set previously
+ if (shardingState.setShardName(requestMetadata->getShardName())) {
+ //
+ // First, we refresh metadata if we need to based on the requested version.
+ //
+
+ ChunkVersion latestShardVersion;
+ shardingState.refreshMetadataIfNeeded(_txn,
+ request.getTargetingNS(),
+ requestMetadata->getShardVersion(),
+ &latestShardVersion);
+
+ // Report if we're still changing our metadata
+ // TODO: Better reporting per-collection
+ if (shardingState.inCriticalMigrateSection()) {
+ noteInCriticalSection(writeErrors.back());
+ }
+ if (queueForMigrationCommit) {
//
- // First, we refresh metadata if we need to based on the requested version.
+ // Queue up for migration to end - this allows us to be sure that clients will
+ // not repeatedly try to refresh metadata that is not yet written to the config
+ // server. Not necessary for correctness.
+ // Exposed as optional parameter to allow testing of queuing behavior with
+ // different network timings.
//
- ChunkVersion latestShardVersion;
- shardingState.refreshMetadataIfNeeded( _txn,
- request.getTargetingNS(),
- requestMetadata->getShardVersion(),
- &latestShardVersion );
-
- // Report if we're still changing our metadata
- // TODO: Better reporting per-collection
- if ( shardingState.inCriticalMigrateSection() ) {
- noteInCriticalSection( writeErrors.back() );
- }
-
- if ( queueForMigrationCommit ) {
-
- //
- // Queue up for migration to end - this allows us to be sure that clients will
- // not repeatedly try to refresh metadata that is not yet written to the config
- // server. Not necessary for correctness.
- // Exposed as optional parameter to allow testing of queuing behavior with
- // different network timings.
- //
-
- const ChunkVersion& requestShardVersion = requestMetadata->getShardVersion();
+ const ChunkVersion& requestShardVersion = requestMetadata->getShardVersion();
- //
- // Only wait if we're an older version (in the current collection epoch) and
- // we're not write compatible, implying that the current migration is affecting
- // writes.
- //
-
- if ( requestShardVersion.isOlderThan( latestShardVersion ) &&
- !requestShardVersion.isWriteCompatibleWith( latestShardVersion ) ) {
-
- while ( shardingState.inCriticalMigrateSection() ) {
+ //
+ // Only wait if we're an older version (in the current collection epoch) and
+ // we're not write compatible, implying that the current migration is affecting
+ // writes.
+ //
- log() << "write request to old shard version "
- << requestMetadata->getShardVersion().toString()
- << " waiting for migration commit" << endl;
+ if (requestShardVersion.isOlderThan(latestShardVersion) &&
+ !requestShardVersion.isWriteCompatibleWith(latestShardVersion)) {
+ while (shardingState.inCriticalMigrateSection()) {
+ log() << "write request to old shard version "
+ << requestMetadata->getShardVersion().toString()
+ << " waiting for migration commit" << endl;
- shardingState.waitTillNotInCriticalSection( 10 /* secs */);
- }
+ shardingState.waitTillNotInCriticalSection(10 /* secs */);
}
}
}
- else {
- // If our shard name is stale, our version must have been stale as well
- dassert( writeErrors.size() == request.sizeWriteOps() );
- }
+ } else {
+ // If our shard name is stale, our version must have been stale as well
+ dassert(writeErrors.size() == request.sizeWriteOps());
}
+ }
- //
- // Construct response
- //
-
- response->setOk( true );
+ //
+ // Construct response
+ //
- if ( !silentWC ) {
+ response->setOk(true);
- if ( upserted.size() ) {
- response->setUpsertDetails( upserted );
- }
+ if (!silentWC) {
+ if (upserted.size()) {
+ response->setUpsertDetails(upserted);
+ }
- if ( writeErrors.size() ) {
- response->setErrDetails( writeErrors );
- }
+ if (writeErrors.size()) {
+ response->setErrDetails(writeErrors);
+ }
- if ( wcError.get() ) {
- response->setWriteConcernError( wcError.release() );
- }
+ if (wcError.get()) {
+ response->setWriteConcernError(wcError.release());
+ }
- repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
- const repl::ReplicationCoordinator::Mode replMode = replCoord->getReplicationMode();
- if (replMode != repl::ReplicationCoordinator::modeNone) {
- response->setLastOp(repl::ReplClientInfo::forClient(_txn->getClient()).getLastOp()
- .getTimestamp());
- if (replMode == repl::ReplicationCoordinator::modeReplSet) {
- response->setElectionId(replCoord->getElectionId());
- }
+ repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
+ const repl::ReplicationCoordinator::Mode replMode = replCoord->getReplicationMode();
+ if (replMode != repl::ReplicationCoordinator::modeNone) {
+ response->setLastOp(
+ repl::ReplClientInfo::forClient(_txn->getClient()).getLastOp().getTimestamp());
+ if (replMode == repl::ReplicationCoordinator::modeReplSet) {
+ response->setElectionId(replCoord->getElectionId());
}
-
- // Set the stats for the response
- response->setN( _stats->numInserted + _stats->numUpserted + _stats->numMatched
- + _stats->numDeleted );
- if ( request.getBatchType() == BatchedCommandRequest::BatchType_Update )
- response->setNModified( _stats->numModified );
}
- dassert( response->isValid( NULL ) );
+ // Set the stats for the response
+ response->setN(_stats->numInserted + _stats->numUpserted + _stats->numMatched +
+ _stats->numDeleted);
+ if (request.getBatchType() == BatchedCommandRequest::BatchType_Update)
+ response->setNModified(_stats->numModified);
}
- // Translates write item type to wire protocol op code.
- // Helper for WriteBatchExecutor::applyWriteItem().
- static int getOpCode(const BatchItemRef& currWrite) {
- switch (currWrite.getRequest()->getBatchType()) {
+ dassert(response->isValid(NULL));
+}
+
+// Translates write item type to wire protocol op code.
+// Helper for WriteBatchExecutor::applyWriteItem().
+static int getOpCode(const BatchItemRef& currWrite) {
+ switch (currWrite.getRequest()->getBatchType()) {
case BatchedCommandRequest::BatchType_Insert:
return dbInsert;
case BatchedCommandRequest::BatchType_Update:
@@ -388,1068 +375,990 @@ namespace mongo {
return dbDelete;
default:
MONGO_UNREACHABLE;
+ }
+}
+
+static void buildStaleError(const ChunkVersion& shardVersionRecvd,
+ const ChunkVersion& shardVersionWanted,
+ WriteErrorDetail* error) {
+ // Write stale error to results
+ error->setErrCode(ErrorCodes::StaleShardVersion);
+
+ BSONObjBuilder infoB;
+ shardVersionWanted.addToBSON(infoB, "vWanted");
+ error->setErrInfo(infoB.obj());
+
+ string errMsg = stream() << "stale shard version detected before write, received "
+ << shardVersionRecvd.toString() << " but local version is "
+ << shardVersionWanted.toString();
+ error->setErrMessage(errMsg);
+}
+
+static bool checkShardVersion(OperationContext* txn,
+ ShardingState* shardingState,
+ const BatchedCommandRequest& request,
+ WriteOpResult* result) {
+ const NamespaceString& nss = request.getTargetingNSS();
+ dassert(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IX));
+
+ ChunkVersion requestShardVersion =
+ request.isMetadataSet() && request.getMetadata()->isShardVersionSet()
+ ? request.getMetadata()->getShardVersion()
+ : ChunkVersion::IGNORED();
+
+ if (shardingState->enabled()) {
+ CollectionMetadataPtr metadata = shardingState->getCollectionMetadata(nss.ns());
+
+ if (!ChunkVersion::isIgnoredVersion(requestShardVersion)) {
+ ChunkVersion shardVersion =
+ metadata ? metadata->getShardVersion() : ChunkVersion::UNSHARDED();
+
+ if (!requestShardVersion.isWriteCompatibleWith(shardVersion)) {
+ result->setError(new WriteErrorDetail);
+ buildStaleError(requestShardVersion, shardVersion, result->getError());
+ return false;
+ }
}
}
- static void buildStaleError( const ChunkVersion& shardVersionRecvd,
- const ChunkVersion& shardVersionWanted,
- WriteErrorDetail* error ) {
-
- // Write stale error to results
- error->setErrCode( ErrorCodes::StaleShardVersion );
+ return true;
+}
- BSONObjBuilder infoB;
- shardVersionWanted.addToBSON( infoB, "vWanted" );
- error->setErrInfo( infoB.obj() );
-
- string errMsg = stream() << "stale shard version detected before write, received "
- << shardVersionRecvd.toString() << " but local version is "
- << shardVersionWanted.toString();
- error->setErrMessage( errMsg );
+static bool checkIsMasterForDatabase(const NamespaceString& ns, WriteOpResult* result) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
+ WriteErrorDetail* errorDetail = new WriteErrorDetail;
+ result->setError(errorDetail);
+ errorDetail->setErrCode(ErrorCodes::NotMaster);
+ errorDetail->setErrMessage("Not primary while writing to " + ns.toString());
+ return false;
}
-
- static bool checkShardVersion(OperationContext* txn,
+ return true;
+}
+
+static void buildUniqueIndexError(const BSONObj& keyPattern,
+ const BSONObj& indexPattern,
+ WriteErrorDetail* error) {
+ error->setErrCode(ErrorCodes::CannotCreateIndex);
+ string errMsg = stream() << "cannot create unique index over " << indexPattern
+ << " with shard key pattern " << keyPattern;
+ error->setErrMessage(errMsg);
+}
+
+static bool checkIndexConstraints(OperationContext* txn,
ShardingState* shardingState,
const BatchedCommandRequest& request,
WriteOpResult* result) {
+ const NamespaceString& nss = request.getTargetingNSS();
+ dassert(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IX));
- const NamespaceString& nss = request.getTargetingNSS();
- dassert(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IX));
-
- ChunkVersion requestShardVersion =
- request.isMetadataSet() && request.getMetadata()->isShardVersionSet() ?
- request.getMetadata()->getShardVersion() : ChunkVersion::IGNORED();
-
- if ( shardingState->enabled() ) {
-
- CollectionMetadataPtr metadata = shardingState->getCollectionMetadata( nss.ns() );
+ if (!request.isUniqueIndexRequest())
+ return true;
- if ( !ChunkVersion::isIgnoredVersion( requestShardVersion ) ) {
+ if (shardingState->enabled()) {
+ CollectionMetadataPtr metadata = shardingState->getCollectionMetadata(nss.ns());
- ChunkVersion shardVersion =
- metadata ? metadata->getShardVersion() : ChunkVersion::UNSHARDED();
+ if (metadata) {
+ ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
+ if (!shardKeyPattern.isUniqueIndexCompatible(request.getIndexKeyPattern())) {
+ result->setError(new WriteErrorDetail);
+ buildUniqueIndexError(
+ metadata->getKeyPattern(), request.getIndexKeyPattern(), result->getError());
- if ( !requestShardVersion.isWriteCompatibleWith( shardVersion ) ) {
- result->setError(new WriteErrorDetail);
- buildStaleError(requestShardVersion, shardVersion, result->getError());
- return false;
- }
+ return false;
}
}
-
- return true;
- }
-
- static bool checkIsMasterForDatabase(const NamespaceString& ns, WriteOpResult* result) {
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
- WriteErrorDetail* errorDetail = new WriteErrorDetail;
- result->setError(errorDetail);
- errorDetail->setErrCode(ErrorCodes::NotMaster);
- errorDetail->setErrMessage("Not primary while writing to " + ns.toString());
- return false;
- }
- return true;
}
- static void buildUniqueIndexError( const BSONObj& keyPattern,
- const BSONObj& indexPattern,
- WriteErrorDetail* error ) {
- error->setErrCode( ErrorCodes::CannotCreateIndex );
- string errMsg = stream() << "cannot create unique index over " << indexPattern
- << " with shard key pattern " << keyPattern;
- error->setErrMessage( errMsg );
+ return true;
+}
+
+//
+// HELPERS FOR CUROP MANAGEMENT AND GLOBAL STATS
+//
+
+static void beginCurrentOp(OperationContext* txn, const BatchItemRef& currWrite) {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ CurOp* const currentOp = CurOp::get(txn);
+ currentOp->setOp_inlock(getOpCode(currWrite));
+ currentOp->ensureStarted();
+ currentOp->setNS_inlock(currWrite.getRequest()->getNS());
+
+ currentOp->debug().ns = currentOp->getNS();
+ currentOp->debug().op = currentOp->getOp();
+
+ if (currWrite.getOpType() == BatchedCommandRequest::BatchType_Insert) {
+ currentOp->setQuery_inlock(currWrite.getDocument());
+ currentOp->debug().query = currWrite.getDocument();
+ currentOp->debug().ninserted = 0;
+ } else if (currWrite.getOpType() == BatchedCommandRequest::BatchType_Update) {
+ currentOp->setQuery_inlock(currWrite.getUpdate()->getQuery());
+ currentOp->debug().query = currWrite.getUpdate()->getQuery();
+ currentOp->debug().updateobj = currWrite.getUpdate()->getUpdateExpr();
+ // Note: debug().nMatched, nModified and nmoved are set internally in update
+ } else {
+ dassert(currWrite.getOpType() == BatchedCommandRequest::BatchType_Delete);
+ currentOp->setQuery_inlock(currWrite.getDelete()->getQuery());
+ currentOp->debug().query = currWrite.getDelete()->getQuery();
+ currentOp->debug().ndeleted = 0;
}
-
- static bool checkIndexConstraints(OperationContext* txn,
- ShardingState* shardingState,
- const BatchedCommandRequest& request,
- WriteOpResult* result) {
-
- const NamespaceString& nss = request.getTargetingNSS();
- dassert(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IX));
-
- if ( !request.isUniqueIndexRequest() )
- return true;
-
- if ( shardingState->enabled() ) {
-
- CollectionMetadataPtr metadata = shardingState->getCollectionMetadata( nss.ns() );
-
- if ( metadata ) {
- ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
- if (!shardKeyPattern.isUniqueIndexCompatible(request.getIndexKeyPattern())) {
-
- result->setError(new WriteErrorDetail);
- buildUniqueIndexError(metadata->getKeyPattern(),
- request.getIndexKeyPattern(),
- result->getError());
-
- return false;
- }
- }
- }
-
- return true;
+}
+
+void WriteBatchExecutor::incOpStats(const BatchItemRef& currWrite) {
+ if (currWrite.getOpType() == BatchedCommandRequest::BatchType_Insert) {
+ _opCounters->gotInsert();
+ } else if (currWrite.getOpType() == BatchedCommandRequest::BatchType_Update) {
+ _opCounters->gotUpdate();
+ } else {
+ dassert(currWrite.getOpType() == BatchedCommandRequest::BatchType_Delete);
+ _opCounters->gotDelete();
}
-
- //
- // HELPERS FOR CUROP MANAGEMENT AND GLOBAL STATS
- //
-
- static void beginCurrentOp(OperationContext* txn, const BatchItemRef& currWrite) {
-
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp* const currentOp = CurOp::get(txn);
- currentOp->setOp_inlock(getOpCode(currWrite));
- currentOp->ensureStarted();
- currentOp->setNS_inlock( currWrite.getRequest()->getNS() );
-
- currentOp->debug().ns = currentOp->getNS();
- currentOp->debug().op = currentOp->getOp();
-
- if ( currWrite.getOpType() == BatchedCommandRequest::BatchType_Insert ) {
- currentOp->setQuery_inlock( currWrite.getDocument() );
- currentOp->debug().query = currWrite.getDocument();
- currentOp->debug().ninserted = 0;
+}
+
+void WriteBatchExecutor::incWriteStats(const BatchItemRef& currWrite,
+ const WriteOpStats& stats,
+ const WriteErrorDetail* error,
+ CurOp* currentOp) {
+ if (currWrite.getOpType() == BatchedCommandRequest::BatchType_Insert) {
+ _stats->numInserted += stats.n;
+ currentOp->debug().ninserted += stats.n;
+ if (!error) {
+ _le->recordInsert(stats.n);
}
- else if ( currWrite.getOpType() == BatchedCommandRequest::BatchType_Update ) {
- currentOp->setQuery_inlock( currWrite.getUpdate()->getQuery() );
- currentOp->debug().query = currWrite.getUpdate()->getQuery();
- currentOp->debug().updateobj = currWrite.getUpdate()->getUpdateExpr();
- // Note: debug().nMatched, nModified and nmoved are set internally in update
+ } else if (currWrite.getOpType() == BatchedCommandRequest::BatchType_Update) {
+ if (stats.upsertedID.isEmpty()) {
+ _stats->numMatched += stats.n;
+ _stats->numModified += stats.nModified;
+ } else {
+ ++_stats->numUpserted;
}
- else {
- dassert( currWrite.getOpType() == BatchedCommandRequest::BatchType_Delete );
- currentOp->setQuery_inlock( currWrite.getDelete()->getQuery() );
- currentOp->debug().query = currWrite.getDelete()->getQuery();
- currentOp->debug().ndeleted = 0;
- }
-
- }
- void WriteBatchExecutor::incOpStats( const BatchItemRef& currWrite ) {
-
- if ( currWrite.getOpType() == BatchedCommandRequest::BatchType_Insert ) {
- _opCounters->gotInsert();
- }
- else if ( currWrite.getOpType() == BatchedCommandRequest::BatchType_Update ) {
- _opCounters->gotUpdate();
+ if (!error) {
+ _le->recordUpdate(stats.upsertedID.isEmpty() && stats.n > 0, stats.n, stats.upsertedID);
}
- else {
- dassert( currWrite.getOpType() == BatchedCommandRequest::BatchType_Delete );
- _opCounters->gotDelete();
+ } else {
+ dassert(currWrite.getOpType() == BatchedCommandRequest::BatchType_Delete);
+ _stats->numDeleted += stats.n;
+ if (!error) {
+ _le->recordDelete(stats.n);
}
+ currentOp->debug().ndeleted += stats.n;
}
- void WriteBatchExecutor::incWriteStats( const BatchItemRef& currWrite,
- const WriteOpStats& stats,
- const WriteErrorDetail* error,
- CurOp* currentOp ) {
-
- if ( currWrite.getOpType() == BatchedCommandRequest::BatchType_Insert ) {
- _stats->numInserted += stats.n;
- currentOp->debug().ninserted += stats.n;
- if (!error) {
- _le->recordInsert(stats.n);
- }
- }
- else if ( currWrite.getOpType() == BatchedCommandRequest::BatchType_Update ) {
- if ( stats.upsertedID.isEmpty() ) {
- _stats->numMatched += stats.n;
- _stats->numModified += stats.nModified;
- }
- else {
- ++_stats->numUpserted;
- }
-
- if (!error) {
- _le->recordUpdate( stats.upsertedID.isEmpty() && stats.n > 0,
- stats.n,
- stats.upsertedID );
- }
- }
- else {
- dassert( currWrite.getOpType() == BatchedCommandRequest::BatchType_Delete );
- _stats->numDeleted += stats.n;
- if ( !error ) {
- _le->recordDelete( stats.n );
- }
- currentOp->debug().ndeleted += stats.n;
- }
-
- if (error) {
- _le->setLastError(error->getErrCode(), error->getErrMessage().c_str());
- }
+ if (error) {
+ _le->setLastError(error->getErrCode(), error->getErrMessage().c_str());
}
-
- static void finishCurrentOp(OperationContext* txn, WriteErrorDetail* opError) {
-
- CurOp* currentOp = CurOp::get(txn);
- currentOp->done();
- int executionTime = currentOp->debug().executionTime = currentOp->totalTimeMillis();
- recordCurOpMetrics(txn);
- Top::get(txn->getClient()->getServiceContext()).record(
- currentOp->getNS(),
+}
+
+static void finishCurrentOp(OperationContext* txn, WriteErrorDetail* opError) {
+ CurOp* currentOp = CurOp::get(txn);
+ currentOp->done();
+ int executionTime = currentOp->debug().executionTime = currentOp->totalTimeMillis();
+ recordCurOpMetrics(txn);
+ Top::get(txn->getClient()->getServiceContext())
+ .record(currentOp->getNS(),
currentOp->getOp(),
- 1, // "write locked"
+ 1, // "write locked"
currentOp->totalTimeMicros(),
currentOp->isCommand());
- if ( opError ) {
- currentOp->debug().exceptionInfo = ExceptionInfo( opError->getErrMessage(),
- opError->getErrCode() );
+ if (opError) {
+ currentOp->debug().exceptionInfo =
+ ExceptionInfo(opError->getErrMessage(), opError->getErrCode());
- LOG(3) << " Caught Assertion in " << opToString( currentOp->getOp() )
- << ", continuing " << causedBy( opError->getErrMessage() ) << endl;
- }
+ LOG(3) << " Caught Assertion in " << opToString(currentOp->getOp()) << ", continuing "
+ << causedBy(opError->getErrMessage()) << endl;
+ }
- bool logAll = logger::globalLogDomain()->shouldLog(logger::LogComponent::kWrite,
- logger::LogSeverity::Debug(1));
- bool logSlow = executionTime
- > ( serverGlobalParams.slowMS + currentOp->getExpectedLatencyMs() );
+ bool logAll = logger::globalLogDomain()->shouldLog(logger::LogComponent::kWrite,
+ logger::LogSeverity::Debug(1));
+ bool logSlow = executionTime > (serverGlobalParams.slowMS + currentOp->getExpectedLatencyMs());
- if ( logAll || logSlow ) {
- Locker::LockerInfo lockerInfo;
- txn->lockState()->getLockerInfo(&lockerInfo);
+ if (logAll || logSlow) {
+ Locker::LockerInfo lockerInfo;
+ txn->lockState()->getLockerInfo(&lockerInfo);
- LOG(0) << currentOp->debug().report(*currentOp, lockerInfo.stats);
- }
+ LOG(0) << currentOp->debug().report(*currentOp, lockerInfo.stats);
+ }
- if (currentOp->shouldDBProfile(executionTime)) {
- profile(txn, CurOp::get(txn)->getOp());
- }
+ if (currentOp->shouldDBProfile(executionTime)) {
+ profile(txn, CurOp::get(txn)->getOp());
}
+}
- // END HELPERS
+// END HELPERS
- //
- // CORE WRITE OPERATIONS (declaration)
- // These functions write to the database and return stats and zero or one of:
- // - page fault
- // - error
- //
+//
+// CORE WRITE OPERATIONS (declaration)
+// These functions write to the database and return stats and zero or one of:
+// - page fault
+// - error
+//
- static void singleInsert( OperationContext* txn,
- const BSONObj& docToInsert,
- Collection* collection,
- WriteOpResult* result );
+static void singleInsert(OperationContext* txn,
+ const BSONObj& docToInsert,
+ Collection* collection,
+ WriteOpResult* result);
- static void singleCreateIndex( OperationContext* txn,
- const BSONObj& indexDesc,
- WriteOpResult* result );
+static void singleCreateIndex(OperationContext* txn,
+ const BSONObj& indexDesc,
+ WriteOpResult* result);
- static void multiUpdate( OperationContext* txn,
- const BatchItemRef& updateItem,
- WriteOpResult* result );
+static void multiUpdate(OperationContext* txn,
+ const BatchItemRef& updateItem,
+ WriteOpResult* result);
- static void multiRemove( OperationContext* txn,
- const BatchItemRef& removeItem,
- WriteOpResult* result );
+static void multiRemove(OperationContext* txn,
+ const BatchItemRef& removeItem,
+ WriteOpResult* result);
- //
- // WRITE EXECUTION
- // In general, the exec* operations manage db lock state and stats before dispatching to the
- // core write operations, which are *only* responsible for performing a write and reporting
- // success or failure.
- //
+//
+// WRITE EXECUTION
+// In general, the exec* operations manage db lock state and stats before dispatching to the
+// core write operations, which are *only* responsible for performing a write and reporting
+// success or failure.
+//
+
+/**
+ * Representation of the execution state of execInserts. Used by a single
+ * execution of execInserts in a single thread.
+ */
+class WriteBatchExecutor::ExecInsertsState {
+ MONGO_DISALLOW_COPYING(ExecInsertsState);
+public:
/**
- * Representation of the execution state of execInserts. Used by a single
- * execution of execInserts in a single thread.
+ * Constructs a new instance, for performing inserts described in "aRequest".
*/
- class WriteBatchExecutor::ExecInsertsState {
- MONGO_DISALLOW_COPYING(ExecInsertsState);
- public:
- /**
- * Constructs a new instance, for performing inserts described in "aRequest".
- */
- explicit ExecInsertsState(OperationContext* txn,
- const BatchedCommandRequest* aRequest);
-
- /**
- * Acquires the write lock and client context needed to perform the current write operation.
- * Returns true on success, after which it is safe to use the "context" and "collection"
- * members. It is safe to call this function if this instance already holds the write lock.
- *
- * On failure, writeLock, context and collection will be NULL/clear.
- */
- bool lockAndCheck(WriteOpResult* result);
-
- /**
- * Releases the client context and write lock acquired by lockAndCheck. Safe to call
- * regardless of whether or not this state object currently owns the lock.
- */
- void unlock();
-
- /**
- * Returns true if this executor has the lock on the target database.
- */
- bool hasLock() { return _dbLock.get(); }
-
- /**
- * Gets the target collection for the batch operation. Value is undefined
- * unless hasLock() is true.
- */
- Collection* getCollection() { return _collection; }
-
- OperationContext* txn;
-
- // Request object describing the inserts.
- const BatchedCommandRequest* request;
-
- // Index of the current insert operation to perform.
- size_t currIndex = 0;
-
- // Translation of insert documents in "request" into insert-ready forms. This vector has a
- // correspondence with elements of the "request", and "currIndex" is used to
- // index both.
- std::vector<StatusWith<BSONObj> > normalizedInserts;
-
- private:
- bool _lockAndCheckImpl(WriteOpResult* result, bool intentLock);
-
- ScopedTransaction _transaction;
- // Guard object for the write lock on the target database.
- std::unique_ptr<Lock::DBLock> _dbLock;
- std::unique_ptr<Lock::CollectionLock> _collLock;
-
- Database* _database = nullptr;
- Collection* _collection = nullptr;
- };
-
- void WriteBatchExecutor::bulkExecute( const BatchedCommandRequest& request,
- std::vector<BatchedUpsertDetail*>* upsertedIds,
- std::vector<WriteErrorDetail*>* errors ) {
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (request.shouldBypassValidation()) {
- maybeDisableValidation.emplace(_txn);
- }
+ explicit ExecInsertsState(OperationContext* txn, const BatchedCommandRequest* aRequest);
- if ( request.getBatchType() == BatchedCommandRequest::BatchType_Insert ) {
- execInserts( request, errors );
- }
- else if ( request.getBatchType() == BatchedCommandRequest::BatchType_Update ) {
- for ( size_t i = 0; i < request.sizeWriteOps(); i++ ) {
+ /**
+ * Acquires the write lock and client context needed to perform the current write operation.
+ * Returns true on success, after which it is safe to use the "context" and "collection"
+ * members. It is safe to call this function if this instance already holds the write lock.
+ *
+ * On failure, writeLock, context and collection will be NULL/clear.
+ */
+ bool lockAndCheck(WriteOpResult* result);
- if ( i + 1 == request.sizeWriteOps() ) {
- setupSynchronousCommit( _txn );
- }
+ /**
+ * Releases the client context and write lock acquired by lockAndCheck. Safe to call
+ * regardless of whether or not this state object currently owns the lock.
+ */
+ void unlock();
- WriteErrorDetail* error = NULL;
- BSONObj upsertedId;
- execUpdate( BatchItemRef( &request, i ), &upsertedId, &error );
+ /**
+ * Returns true if this executor has the lock on the target database.
+ */
+ bool hasLock() {
+ return _dbLock.get();
+ }
- if ( !upsertedId.isEmpty() ) {
- BatchedUpsertDetail* batchUpsertedId = new BatchedUpsertDetail;
- batchUpsertedId->setIndex( i );
- batchUpsertedId->setUpsertedID( upsertedId );
- upsertedIds->push_back( batchUpsertedId );
- }
+ /**
+ * Gets the target collection for the batch operation. Value is undefined
+ * unless hasLock() is true.
+ */
+ Collection* getCollection() {
+ return _collection;
+ }
- if ( error ) {
- errors->push_back( error );
- if ( request.getOrdered() )
- break;
- }
- }
- }
- else {
- dassert( request.getBatchType() == BatchedCommandRequest::BatchType_Delete );
- for ( size_t i = 0; i < request.sizeWriteOps(); i++ ) {
+ OperationContext* txn;
- if ( i + 1 == request.sizeWriteOps() ) {
- setupSynchronousCommit( _txn );
- }
+ // Request object describing the inserts.
+ const BatchedCommandRequest* request;
- WriteErrorDetail* error = NULL;
- execRemove( BatchItemRef( &request, i ), &error );
+ // Index of the current insert operation to perform.
+ size_t currIndex = 0;
- if ( error ) {
- errors->push_back( error );
- if ( request.getOrdered() )
- break;
- }
- }
- }
-
- // Fill in stale version errors for unordered batches (update/delete can't do this on own)
- if ( !errors->empty() && !request.getOrdered() ) {
+ // Translation of insert documents in "request" into insert-ready forms. This vector has a
+ // correspondence with elements of the "request", and "currIndex" is used to
+ // index both.
+ std::vector<StatusWith<BSONObj>> normalizedInserts;
- const WriteErrorDetail* finalError = errors->back();
+private:
+ bool _lockAndCheckImpl(WriteOpResult* result, bool intentLock);
- if ( finalError->getErrCode() == ErrorCodes::StaleShardVersion ) {
- for ( size_t i = finalError->getIndex() + 1; i < request.sizeWriteOps(); i++ ) {
- WriteErrorDetail* dupStaleError = new WriteErrorDetail;
- finalError->cloneTo( dupStaleError );
- errors->push_back( dupStaleError );
- }
- }
- }
- }
+ ScopedTransaction _transaction;
+ // Guard object for the write lock on the target database.
+ std::unique_ptr<Lock::DBLock> _dbLock;
+ std::unique_ptr<Lock::CollectionLock> _collLock;
- // Goes over the request and preprocesses normalized versions of all the inserts in the request
- static void normalizeInserts( const BatchedCommandRequest& request,
- vector<StatusWith<BSONObj> >* normalizedInserts ) {
+ Database* _database = nullptr;
+ Collection* _collection = nullptr;
+};
- normalizedInserts->reserve(request.sizeWriteOps());
- for ( size_t i = 0; i < request.sizeWriteOps(); ++i ) {
- BSONObj insertDoc = request.getInsertRequest()->getDocumentsAt( i );
- StatusWith<BSONObj> normalInsert = fixDocumentForInsert( insertDoc );
- normalizedInserts->push_back( normalInsert );
- if ( request.getOrdered() && !normalInsert.isOK() )
- break;
- }
+void WriteBatchExecutor::bulkExecute(const BatchedCommandRequest& request,
+ std::vector<BatchedUpsertDetail*>* upsertedIds,
+ std::vector<WriteErrorDetail*>* errors) {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (request.shouldBypassValidation()) {
+ maybeDisableValidation.emplace(_txn);
}
- void WriteBatchExecutor::execInserts( const BatchedCommandRequest& request,
- std::vector<WriteErrorDetail*>* errors ) {
-
- // Theory of operation:
- //
- // Instantiates an ExecInsertsState, which represents all of the state involved in the batch
- // insert execution algorithm. Most importantly, encapsulates the lock state.
- //
- // Every iteration of the loop in execInserts() processes one document insertion, by calling
- // insertOne() exactly once for a given value of state.currIndex.
- //
- // If the ExecInsertsState indicates that the requisite write locks are not held, insertOne
- // acquires them and performs lock-acquisition-time checks. However, on non-error
- // execution, it does not release the locks. Therefore, the yielding logic in the while
- // loop in execInserts() is solely responsible for lock release in the non-error case.
- //
- // Internally, insertOne loops performing the single insert until it completes without a
- // PageFaultException, or until it fails with some kind of error. Errors are mostly
- // propagated via the request->error field, but DBExceptions or std::exceptions may escape,
- // particularly on operation interruption. These kinds of errors necessarily prevent
- // further insertOne calls, and stop the batch. As a result, the only expected source of
- // such exceptions are interruptions.
- ExecInsertsState state(_txn, &request);
- normalizeInserts(request, &state.normalizedInserts);
-
- ShardedConnectionInfo* info = ShardedConnectionInfo::get(_txn->getClient(), false);
- if (info) {
- if (request.isMetadataSet() && request.getMetadata()->isShardVersionSet()) {
- info->setVersion(request.getTargetingNS(),
- request.getMetadata()->getShardVersion());
- }
- else {
- info->setVersion(request.getTargetingNS(), ChunkVersion::IGNORED());
- }
- }
-
- // Yield frequency is based on the same constants used by PlanYieldPolicy.
- ElapsedTracker elapsedTracker(internalQueryExecYieldIterations,
- internalQueryExecYieldPeriodMS);
-
- for (state.currIndex = 0;
- state.currIndex < state.request->sizeWriteOps();
- ++state.currIndex) {
-
- if (state.currIndex + 1 == state.request->sizeWriteOps()) {
+ if (request.getBatchType() == BatchedCommandRequest::BatchType_Insert) {
+ execInserts(request, errors);
+ } else if (request.getBatchType() == BatchedCommandRequest::BatchType_Update) {
+ for (size_t i = 0; i < request.sizeWriteOps(); i++) {
+ if (i + 1 == request.sizeWriteOps()) {
setupSynchronousCommit(_txn);
}
- if (elapsedTracker.intervalHasElapsed()) {
- // Yield between inserts.
- if (state.hasLock()) {
- // Release our locks. They get reacquired when insertOne() calls
- // ExecInsertsState::lockAndCheck(). Since the lock manager guarantees FIFO
- // queues waiting on locks, there is no need to explicitly sleep or give up
- // control of the processor here.
- state.unlock();
-
- // This releases any storage engine held locks/snapshots.
- _txn->recoveryUnit()->abandonSnapshot();
- }
+ WriteErrorDetail* error = NULL;
+ BSONObj upsertedId;
+ execUpdate(BatchItemRef(&request, i), &upsertedId, &error);
+
+ if (!upsertedId.isEmpty()) {
+ BatchedUpsertDetail* batchUpsertedId = new BatchedUpsertDetail;
+ batchUpsertedId->setIndex(i);
+ batchUpsertedId->setUpsertedID(upsertedId);
+ upsertedIds->push_back(batchUpsertedId);
+ }
- _txn->checkForInterrupt();
- elapsedTracker.resetLastTime();
+ if (error) {
+ errors->push_back(error);
+ if (request.getOrdered())
+ break;
+ }
+ }
+ } else {
+ dassert(request.getBatchType() == BatchedCommandRequest::BatchType_Delete);
+ for (size_t i = 0; i < request.sizeWriteOps(); i++) {
+ if (i + 1 == request.sizeWriteOps()) {
+ setupSynchronousCommit(_txn);
}
WriteErrorDetail* error = NULL;
- execOneInsert(&state, &error);
+ execRemove(BatchItemRef(&request, i), &error);
+
if (error) {
errors->push_back(error);
- error->setIndex(state.currIndex);
if (request.getOrdered())
- return;
+ break;
}
}
}
- void WriteBatchExecutor::execUpdate( const BatchItemRef& updateItem,
- BSONObj* upsertedId,
- WriteErrorDetail** error ) {
-
- // BEGIN CURRENT OP
- CurOp currentOp(_txn);
- beginCurrentOp(_txn, updateItem);
- incOpStats( updateItem );
-
- ShardedConnectionInfo* info = ShardedConnectionInfo::get(_txn->getClient(), false);
- if (info) {
- auto rootRequest = updateItem.getRequest();
- if (!updateItem.getUpdate()->getMulti() &&
- rootRequest->isMetadataSet() &&
- rootRequest->getMetadata()->isShardVersionSet()) {
- info->setVersion(rootRequest->getTargetingNS(),
- rootRequest->getMetadata()->getShardVersion());
- }
- else {
- info->setVersion(rootRequest->getTargetingNS(), ChunkVersion::IGNORED());
+ // Fill in stale version errors for unordered batches (update/delete can't do this on own)
+ if (!errors->empty() && !request.getOrdered()) {
+ const WriteErrorDetail* finalError = errors->back();
+
+ if (finalError->getErrCode() == ErrorCodes::StaleShardVersion) {
+ for (size_t i = finalError->getIndex() + 1; i < request.sizeWriteOps(); i++) {
+ WriteErrorDetail* dupStaleError = new WriteErrorDetail;
+ finalError->cloneTo(dupStaleError);
+ errors->push_back(dupStaleError);
}
}
+ }
+}
+
+// Goes over the request and preprocesses normalized versions of all the inserts in the request
+static void normalizeInserts(const BatchedCommandRequest& request,
+ vector<StatusWith<BSONObj>>* normalizedInserts) {
+ normalizedInserts->reserve(request.sizeWriteOps());
+ for (size_t i = 0; i < request.sizeWriteOps(); ++i) {
+ BSONObj insertDoc = request.getInsertRequest()->getDocumentsAt(i);
+ StatusWith<BSONObj> normalInsert = fixDocumentForInsert(insertDoc);
+ normalizedInserts->push_back(normalInsert);
+ if (request.getOrdered() && !normalInsert.isOK())
+ break;
+ }
+}
- WriteOpResult result;
-
- multiUpdate( _txn, updateItem, &result );
-
- if ( !result.getStats().upsertedID.isEmpty() ) {
- *upsertedId = result.getStats().upsertedID;
+void WriteBatchExecutor::execInserts(const BatchedCommandRequest& request,
+ std::vector<WriteErrorDetail*>* errors) {
+ // Theory of operation:
+ //
+ // Instantiates an ExecInsertsState, which represents all of the state involved in the batch
+ // insert execution algorithm. Most importantly, encapsulates the lock state.
+ //
+ // Every iteration of the loop in execInserts() processes one document insertion, by calling
+ // insertOne() exactly once for a given value of state.currIndex.
+ //
+ // If the ExecInsertsState indicates that the requisite write locks are not held, insertOne
+ // acquires them and performs lock-acquisition-time checks. However, on non-error
+ // execution, it does not release the locks. Therefore, the yielding logic in the while
+ // loop in execInserts() is solely responsible for lock release in the non-error case.
+ //
+ // Internally, insertOne loops performing the single insert until it completes without a
+ // PageFaultException, or until it fails with some kind of error. Errors are mostly
+ // propagated via the request->error field, but DBExceptions or std::exceptions may escape,
+ // particularly on operation interruption. These kinds of errors necessarily prevent
+ // further insertOne calls, and stop the batch. As a result, the only expected source of
+ // such exceptions are interruptions.
+ ExecInsertsState state(_txn, &request);
+ normalizeInserts(request, &state.normalizedInserts);
+
+ ShardedConnectionInfo* info = ShardedConnectionInfo::get(_txn->getClient(), false);
+ if (info) {
+ if (request.isMetadataSet() && request.getMetadata()->isShardVersionSet()) {
+ info->setVersion(request.getTargetingNS(), request.getMetadata()->getShardVersion());
+ } else {
+ info->setVersion(request.getTargetingNS(), ChunkVersion::IGNORED());
}
- // END CURRENT OP
- incWriteStats( updateItem, result.getStats(), result.getError(), &currentOp );
- finishCurrentOp(_txn, result.getError());
+ }
- // End current transaction and release snapshot.
- _txn->recoveryUnit()->abandonSnapshot();
+ // Yield frequency is based on the same constants used by PlanYieldPolicy.
+ ElapsedTracker elapsedTracker(internalQueryExecYieldIterations, internalQueryExecYieldPeriodMS);
- if ( result.getError() ) {
- result.getError()->setIndex( updateItem.getItemIndex() );
- *error = result.releaseError();
+ for (state.currIndex = 0; state.currIndex < state.request->sizeWriteOps(); ++state.currIndex) {
+ if (state.currIndex + 1 == state.request->sizeWriteOps()) {
+ setupSynchronousCommit(_txn);
}
- }
-
- void WriteBatchExecutor::execRemove( const BatchItemRef& removeItem,
- WriteErrorDetail** error ) {
- // Removes are similar to updates, but page faults are handled externally
+ if (elapsedTracker.intervalHasElapsed()) {
+ // Yield between inserts.
+ if (state.hasLock()) {
+ // Release our locks. They get reacquired when insertOne() calls
+ // ExecInsertsState::lockAndCheck(). Since the lock manager guarantees FIFO
+ // queues waiting on locks, there is no need to explicitly sleep or give up
+ // control of the processor here.
+ state.unlock();
+
+ // This releases any storage engine held locks/snapshots.
+ _txn->recoveryUnit()->abandonSnapshot();
+ }
- // BEGIN CURRENT OP
- CurOp currentOp(_txn);
- beginCurrentOp(_txn, removeItem);
- incOpStats( removeItem );
+ _txn->checkForInterrupt();
+ elapsedTracker.resetLastTime();
+ }
- ShardedConnectionInfo* info = ShardedConnectionInfo::get(_txn->getClient(), false);
- if (info) {
- auto rootRequest = removeItem.getRequest();
- if (removeItem.getDelete()->getLimit() == 1 &&
- rootRequest->isMetadataSet() &&
- rootRequest->getMetadata()->isShardVersionSet()) {
- info->setVersion(rootRequest->getTargetingNS(),
- rootRequest->getMetadata()->getShardVersion());
- }
- else {
- info->setVersion(rootRequest->getTargetingNS(), ChunkVersion::IGNORED());
- }
+ WriteErrorDetail* error = NULL;
+ execOneInsert(&state, &error);
+ if (error) {
+ errors->push_back(error);
+ error->setIndex(state.currIndex);
+ if (request.getOrdered())
+ return;
+ }
+ }
+}
+
+void WriteBatchExecutor::execUpdate(const BatchItemRef& updateItem,
+ BSONObj* upsertedId,
+ WriteErrorDetail** error) {
+ // BEGIN CURRENT OP
+ CurOp currentOp(_txn);
+ beginCurrentOp(_txn, updateItem);
+ incOpStats(updateItem);
+
+ ShardedConnectionInfo* info = ShardedConnectionInfo::get(_txn->getClient(), false);
+ if (info) {
+ auto rootRequest = updateItem.getRequest();
+ if (!updateItem.getUpdate()->getMulti() && rootRequest->isMetadataSet() &&
+ rootRequest->getMetadata()->isShardVersionSet()) {
+ info->setVersion(rootRequest->getTargetingNS(),
+ rootRequest->getMetadata()->getShardVersion());
+ } else {
+ info->setVersion(rootRequest->getTargetingNS(), ChunkVersion::IGNORED());
}
+ }
- WriteOpResult result;
+ WriteOpResult result;
- multiRemove( _txn, removeItem, &result );
+ multiUpdate(_txn, updateItem, &result);
- // END CURRENT OP
- incWriteStats( removeItem, result.getStats(), result.getError(), &currentOp );
- finishCurrentOp(_txn, result.getError());
+ if (!result.getStats().upsertedID.isEmpty()) {
+ *upsertedId = result.getStats().upsertedID;
+ }
+ // END CURRENT OP
+ incWriteStats(updateItem, result.getStats(), result.getError(), &currentOp);
+ finishCurrentOp(_txn, result.getError());
- // End current transaction and release snapshot.
- _txn->recoveryUnit()->abandonSnapshot();
+ // End current transaction and release snapshot.
+ _txn->recoveryUnit()->abandonSnapshot();
- if ( result.getError() ) {
- result.getError()->setIndex( removeItem.getItemIndex() );
- *error = result.releaseError();
+ if (result.getError()) {
+ result.getError()->setIndex(updateItem.getItemIndex());
+ *error = result.releaseError();
+ }
+}
+
+void WriteBatchExecutor::execRemove(const BatchItemRef& removeItem, WriteErrorDetail** error) {
+ // Removes are similar to updates, but page faults are handled externally
+
+ // BEGIN CURRENT OP
+ CurOp currentOp(_txn);
+ beginCurrentOp(_txn, removeItem);
+ incOpStats(removeItem);
+
+ ShardedConnectionInfo* info = ShardedConnectionInfo::get(_txn->getClient(), false);
+ if (info) {
+ auto rootRequest = removeItem.getRequest();
+ if (removeItem.getDelete()->getLimit() == 1 && rootRequest->isMetadataSet() &&
+ rootRequest->getMetadata()->isShardVersionSet()) {
+ info->setVersion(rootRequest->getTargetingNS(),
+ rootRequest->getMetadata()->getShardVersion());
+ } else {
+ info->setVersion(rootRequest->getTargetingNS(), ChunkVersion::IGNORED());
}
}
- //
- // IN-DB-LOCK CORE OPERATIONS
- //
+ WriteOpResult result;
+
+ multiRemove(_txn, removeItem, &result);
+
+ // END CURRENT OP
+ incWriteStats(removeItem, result.getStats(), result.getError(), &currentOp);
+ finishCurrentOp(_txn, result.getError());
+
+ // End current transaction and release snapshot.
+ _txn->recoveryUnit()->abandonSnapshot();
- WriteBatchExecutor::ExecInsertsState::ExecInsertsState(OperationContext* txn,
- const BatchedCommandRequest* aRequest) :
- txn(txn),
- request(aRequest),
- _transaction(txn, MODE_IX) {
+ if (result.getError()) {
+ result.getError()->setIndex(removeItem.getItemIndex());
+ *error = result.releaseError();
}
+}
- bool WriteBatchExecutor::ExecInsertsState::_lockAndCheckImpl(WriteOpResult* result,
- bool intentLock) {
- if (hasLock()) {
- CurOp::get(txn)->raiseDbProfileLevel(_database->getProfilingLevel());
- return true;
- }
+//
+// IN-DB-LOCK CORE OPERATIONS
+//
- if (request->isInsertIndexRequest())
- intentLock = false; // can't build indexes in intent mode
-
- const NamespaceString& nss = request->getNSS();
- invariant(!_collLock);
- invariant(!_dbLock);
- _dbLock = stdx::make_unique<Lock::DBLock>(txn->lockState(),
- nss.db(),
- intentLock ? MODE_IX : MODE_X);
- _database = dbHolder().get(txn, nss.ns());
- if (intentLock && !_database) {
- // Ensure exclusive lock in case the database doesn't yet exist
- _dbLock.reset();
- _dbLock = stdx::make_unique<Lock::DBLock>(txn->lockState(), nss.db(), MODE_X);
- intentLock = false;
- }
- _collLock = stdx::make_unique<Lock::CollectionLock>(txn->lockState(),
- nss.ns(),
- intentLock ? MODE_IX : MODE_X);
- if (!checkIsMasterForDatabase(nss, result)) {
- return false;
- }
- if (!checkShardVersion(txn, &shardingState, *request, result)) {
- return false;
- }
- if (!checkIndexConstraints(txn, &shardingState, *request, result)) {
- return false;
- }
+WriteBatchExecutor::ExecInsertsState::ExecInsertsState(OperationContext* txn,
+ const BatchedCommandRequest* aRequest)
+ : txn(txn), request(aRequest), _transaction(txn, MODE_IX) {}
- if (!_database) {
- invariant(!intentLock);
- _database = dbHolder().openDb(txn, nss.ns());
- }
+bool WriteBatchExecutor::ExecInsertsState::_lockAndCheckImpl(WriteOpResult* result,
+ bool intentLock) {
+ if (hasLock()) {
CurOp::get(txn)->raiseDbProfileLevel(_database->getProfilingLevel());
- _collection = _database->getCollection(request->getTargetingNS());
- if (!_collection) {
- if (intentLock) {
- // try again with full X lock.
- unlock();
- return _lockAndCheckImpl(result, false);
- }
-
- WriteUnitOfWork wunit (txn);
- // Implicitly create if it doesn't exist
- _collection = _database->createCollection(txn, request->getTargetingNS());
- if (!_collection) {
- result->setError(
- toWriteError(Status(ErrorCodes::InternalError,
- "could not create collection " +
- request->getTargetingNS())));
- return false;
- }
- wunit.commit();
- }
return true;
}
- bool WriteBatchExecutor::ExecInsertsState::lockAndCheck(WriteOpResult* result) {
- if (_lockAndCheckImpl(result, true))
- return true;
- unlock();
+ if (request->isInsertIndexRequest())
+ intentLock = false; // can't build indexes in intent mode
+
+ const NamespaceString& nss = request->getNSS();
+ invariant(!_collLock);
+ invariant(!_dbLock);
+ _dbLock =
+ stdx::make_unique<Lock::DBLock>(txn->lockState(), nss.db(), intentLock ? MODE_IX : MODE_X);
+ _database = dbHolder().get(txn, nss.ns());
+ if (intentLock && !_database) {
+ // Ensure exclusive lock in case the database doesn't yet exist
+ _dbLock.reset();
+ _dbLock = stdx::make_unique<Lock::DBLock>(txn->lockState(), nss.db(), MODE_X);
+ intentLock = false;
+ }
+ _collLock = stdx::make_unique<Lock::CollectionLock>(
+ txn->lockState(), nss.ns(), intentLock ? MODE_IX : MODE_X);
+ if (!checkIsMasterForDatabase(nss, result)) {
return false;
}
-
- void WriteBatchExecutor::ExecInsertsState::unlock() {
- _collection = nullptr;
- _database = nullptr;
- _collLock.reset();
- _dbLock.reset();
+ if (!checkShardVersion(txn, &shardingState, *request, result)) {
+ return false;
+ }
+ if (!checkIndexConstraints(txn, &shardingState, *request, result)) {
+ return false;
}
- static void insertOne(WriteBatchExecutor::ExecInsertsState* state, WriteOpResult* result) {
- // we have to be top level so we can retry
- invariant(!state->txn->lockState()->inAWriteUnitOfWork() );
- invariant(state->currIndex < state->normalizedInserts.size());
-
- const StatusWith<BSONObj>& normalizedInsert(state->normalizedInserts[state->currIndex]);
+ if (!_database) {
+ invariant(!intentLock);
+ _database = dbHolder().openDb(txn, nss.ns());
+ }
+ CurOp::get(txn)->raiseDbProfileLevel(_database->getProfilingLevel());
+ _collection = _database->getCollection(request->getTargetingNS());
+ if (!_collection) {
+ if (intentLock) {
+ // try again with full X lock.
+ unlock();
+ return _lockAndCheckImpl(result, false);
+ }
- if (!normalizedInsert.isOK()) {
- result->setError(toWriteError(normalizedInsert.getStatus()));
- return;
+ WriteUnitOfWork wunit(txn);
+ // Implicitly create if it doesn't exist
+ _collection = _database->createCollection(txn, request->getTargetingNS());
+ if (!_collection) {
+ result->setError(
+ toWriteError(Status(ErrorCodes::InternalError,
+ "could not create collection " + request->getTargetingNS())));
+ return false;
}
+ wunit.commit();
+ }
+ return true;
+}
- const BSONObj& insertDoc = normalizedInsert.getValue().isEmpty() ?
- state->request->getInsertRequest()->getDocumentsAt( state->currIndex ) :
- normalizedInsert.getValue();
+bool WriteBatchExecutor::ExecInsertsState::lockAndCheck(WriteOpResult* result) {
+ if (_lockAndCheckImpl(result, true))
+ return true;
+ unlock();
+ return false;
+}
+
+void WriteBatchExecutor::ExecInsertsState::unlock() {
+ _collection = nullptr;
+ _database = nullptr;
+ _collLock.reset();
+ _dbLock.reset();
+}
+
+static void insertOne(WriteBatchExecutor::ExecInsertsState* state, WriteOpResult* result) {
+ // we have to be top level so we can retry
+ invariant(!state->txn->lockState()->inAWriteUnitOfWork());
+ invariant(state->currIndex < state->normalizedInserts.size());
+
+ const StatusWith<BSONObj>& normalizedInsert(state->normalizedInserts[state->currIndex]);
+
+ if (!normalizedInsert.isOK()) {
+ result->setError(toWriteError(normalizedInsert.getStatus()));
+ return;
+ }
- int attempt = 0;
- while (true) {
- try {
- if (!state->request->isInsertIndexRequest()) {
- if (state->lockAndCheck(result)) {
- singleInsert(state->txn, insertDoc, state->getCollection(), result);
- }
- }
- else {
- singleCreateIndex(state->txn, insertDoc, result);
+ const BSONObj& insertDoc = normalizedInsert.getValue().isEmpty()
+ ? state->request->getInsertRequest()->getDocumentsAt(state->currIndex)
+ : normalizedInsert.getValue();
+
+ int attempt = 0;
+ while (true) {
+ try {
+ if (!state->request->isInsertIndexRequest()) {
+ if (state->lockAndCheck(result)) {
+ singleInsert(state->txn, insertDoc, state->getCollection(), result);
}
- break;
- }
- catch ( const WriteConflictException& wce ) {
- state->unlock();
- CurOp::get(state->txn)->debug().writeConflicts++;
- state->txn->recoveryUnit()->abandonSnapshot();
- WriteConflictException::logAndBackoff( attempt++,
- "insert",
- state->getCollection() ?
- state->getCollection()->ns().ns() :
- "index" );
- }
- catch (const StaleConfigException& staleExcep) {
- result->setError(new WriteErrorDetail);
- result->getError()->setErrCode(ErrorCodes::StaleShardVersion);
- buildStaleError(staleExcep.getVersionReceived(),
- staleExcep.getVersionWanted(),
- result->getError());
- break;
+ } else {
+ singleCreateIndex(state->txn, insertDoc, result);
}
- catch (const DBException& ex) {
- Status status(ex.toStatus());
- if (ErrorCodes::isInterruption(status.code()))
- throw;
- result->setError(toWriteError(status));
- break;
- }
- }
-
- // Errors release the write lock, as a matter of policy.
- if (result->getError()) {
- state->txn->recoveryUnit()->abandonSnapshot();
+ break;
+ } catch (const WriteConflictException& wce) {
state->unlock();
+ CurOp::get(state->txn)->debug().writeConflicts++;
+ state->txn->recoveryUnit()->abandonSnapshot();
+ WriteConflictException::logAndBackoff(
+ attempt++,
+ "insert",
+ state->getCollection() ? state->getCollection()->ns().ns() : "index");
+ } catch (const StaleConfigException& staleExcep) {
+ result->setError(new WriteErrorDetail);
+ result->getError()->setErrCode(ErrorCodes::StaleShardVersion);
+ buildStaleError(
+ staleExcep.getVersionReceived(), staleExcep.getVersionWanted(), result->getError());
+ break;
+ } catch (const DBException& ex) {
+ Status status(ex.toStatus());
+ if (ErrorCodes::isInterruption(status.code()))
+ throw;
+ result->setError(toWriteError(status));
+ break;
}
}
- void WriteBatchExecutor::execOneInsert(ExecInsertsState* state, WriteErrorDetail** error) {
- BatchItemRef currInsertItem(state->request, state->currIndex);
- CurOp currentOp(_txn);
- beginCurrentOp(_txn, currInsertItem);
- incOpStats(currInsertItem);
+ // Errors release the write lock, as a matter of policy.
+ if (result->getError()) {
+ state->txn->recoveryUnit()->abandonSnapshot();
+ state->unlock();
+ }
+}
- WriteOpResult result;
- insertOne(state, &result);
+void WriteBatchExecutor::execOneInsert(ExecInsertsState* state, WriteErrorDetail** error) {
+ BatchItemRef currInsertItem(state->request, state->currIndex);
+ CurOp currentOp(_txn);
+ beginCurrentOp(_txn, currInsertItem);
+ incOpStats(currInsertItem);
- incWriteStats(currInsertItem,
- result.getStats(),
- result.getError(),
- &currentOp);
- finishCurrentOp(_txn, result.getError());
+ WriteOpResult result;
+ insertOne(state, &result);
- if (result.getError()) {
- *error = result.releaseError();
- }
- }
+ incWriteStats(currInsertItem, result.getStats(), result.getError(), &currentOp);
+ finishCurrentOp(_txn, result.getError());
- /**
- * Perform a single insert into a collection. Requires the insert be preprocessed and the
- * collection already has been created.
- *
- * Might fault or error, otherwise populates the result.
- */
- static void singleInsert( OperationContext* txn,
- const BSONObj& docToInsert,
- Collection* collection,
- WriteOpResult* result ) {
-
- const string& insertNS = collection->ns().ns();
- invariant(txn->lockState()->isCollectionLockedForMode(insertNS, MODE_IX));
+ if (result.getError()) {
+ *error = result.releaseError();
+ }
+}
- WriteUnitOfWork wunit(txn);
- StatusWith<RecordId> status = collection->insertDocument( txn, docToInsert, true );
+/**
+ * Perform a single insert into a collection. Requires the insert be preprocessed and the
+ * collection already has been created.
+ *
+ * Might fault or error, otherwise populates the result.
+ */
+static void singleInsert(OperationContext* txn,
+ const BSONObj& docToInsert,
+ Collection* collection,
+ WriteOpResult* result) {
+ const string& insertNS = collection->ns().ns();
+ invariant(txn->lockState()->isCollectionLockedForMode(insertNS, MODE_IX));
+
+ WriteUnitOfWork wunit(txn);
+ StatusWith<RecordId> status = collection->insertDocument(txn, docToInsert, true);
+
+ if (!status.isOK()) {
+ result->setError(toWriteError(status.getStatus()));
+ } else {
+ result->getStats().n = 1;
+ wunit.commit();
+ }
+}
- if ( !status.isOK() ) {
- result->setError(toWriteError(status.getStatus()));
- }
- else {
- result->getStats().n = 1;
- wunit.commit();
+/**
+ * Perform a single index creation on a collection. Requires the index descriptor be
+ * preprocessed.
+ *
+ * Might fault or error, otherwise populates the result.
+ */
+static void singleCreateIndex(OperationContext* txn,
+ const BSONObj& indexDesc,
+ WriteOpResult* result) {
+ BSONElement nsElement = indexDesc["ns"];
+ uassert(ErrorCodes::NoSuchKey, "Missing \"ns\" field in index description", !nsElement.eoo());
+ uassert(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"ns\" field of index description to be a "
+ "string, "
+ "but found a " << typeName(nsElement.type()),
+ nsElement.type() == String);
+ const NamespaceString ns(nsElement.valueStringData());
+ BSONObjBuilder cmdBuilder;
+ cmdBuilder << "createIndexes" << ns.coll();
+ cmdBuilder << "indexes" << BSON_ARRAY(indexDesc);
+ BSONObj cmd = cmdBuilder.done();
+ Command* createIndexesCmd = Command::findCommand("createIndexes");
+ invariant(createIndexesCmd);
+ std::string errmsg;
+ BSONObjBuilder resultBuilder;
+ const bool success =
+ createIndexesCmd->run(txn, ns.db().toString(), cmd, 0, errmsg, resultBuilder);
+ Command::appendCommandStatus(resultBuilder, success, errmsg);
+ BSONObj cmdResult = resultBuilder.done();
+ uassertStatusOK(Command::getStatusFromCommandResult(cmdResult));
+ result->getStats().n =
+ cmdResult["numIndexesAfter"].numberInt() - cmdResult["numIndexesBefore"].numberInt();
+}
+
+static void multiUpdate(OperationContext* txn,
+ const BatchItemRef& updateItem,
+ WriteOpResult* result) {
+ const NamespaceString nsString(updateItem.getRequest()->getNS());
+ const bool isMulti = updateItem.getUpdate()->getMulti();
+ UpdateRequest request(nsString);
+ request.setQuery(updateItem.getUpdate()->getQuery());
+ request.setUpdates(updateItem.getUpdate()->getUpdateExpr());
+ request.setMulti(isMulti);
+ request.setUpsert(updateItem.getUpdate()->getUpsert());
+ UpdateLifecycleImpl updateLifecycle(true, request.getNamespaceString());
+ request.setLifecycle(&updateLifecycle);
+
+ // Updates from the write commands path can yield.
+ request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
+
+ int attempt = 0;
+ bool createCollection = false;
+ for (int fakeLoop = 0; fakeLoop < 1; fakeLoop++) {
+ ParsedUpdate parsedUpdate(txn, &request);
+ Status status = parsedUpdate.parseRequest();
+ if (!status.isOK()) {
+ result->setError(toWriteError(status));
+ return;
}
- }
- /**
- * Perform a single index creation on a collection. Requires the index descriptor be
- * preprocessed.
- *
- * Might fault or error, otherwise populates the result.
- */
- static void singleCreateIndex(OperationContext* txn,
- const BSONObj& indexDesc,
- WriteOpResult* result) {
+ if (createCollection) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ const AutoGetOrCreateDb adb{txn, nsString.db(), MODE_X};
- BSONElement nsElement = indexDesc["ns"];
- uassert(ErrorCodes::NoSuchKey,
- "Missing \"ns\" field in index description",
- !nsElement.eoo());
- uassert(ErrorCodes::TypeMismatch,
- str::stream() << "Expected \"ns\" field of index description to be a " "string, "
- "but found a " << typeName(nsElement.type()),
- nsElement.type() == String);
- const NamespaceString ns(nsElement.valueStringData());
- BSONObjBuilder cmdBuilder;
- cmdBuilder << "createIndexes" << ns.coll();
- cmdBuilder << "indexes" << BSON_ARRAY(indexDesc);
- BSONObj cmd = cmdBuilder.done();
- Command* createIndexesCmd = Command::findCommand("createIndexes");
- invariant(createIndexesCmd);
- std::string errmsg;
- BSONObjBuilder resultBuilder;
- const bool success = createIndexesCmd->run(
- txn,
- ns.db().toString(),
- cmd,
- 0,
- errmsg,
- resultBuilder);
- Command::appendCommandStatus(resultBuilder, success, errmsg);
- BSONObj cmdResult = resultBuilder.done();
- uassertStatusOK(Command::getStatusFromCommandResult(cmdResult));
- result->getStats().n =
- cmdResult["numIndexesAfter"].numberInt() - cmdResult["numIndexesBefore"].numberInt();
- }
+ if (!checkIsMasterForDatabase(nsString, result)) {
+ return;
+ }
- static void multiUpdate( OperationContext* txn,
- const BatchItemRef& updateItem,
- WriteOpResult* result ) {
-
- const NamespaceString nsString(updateItem.getRequest()->getNS());
- const bool isMulti = updateItem.getUpdate()->getMulti();
- UpdateRequest request(nsString);
- request.setQuery(updateItem.getUpdate()->getQuery());
- request.setUpdates(updateItem.getUpdate()->getUpdateExpr());
- request.setMulti(isMulti);
- request.setUpsert(updateItem.getUpdate()->getUpsert());
- UpdateLifecycleImpl updateLifecycle(true, request.getNamespaceString());
- request.setLifecycle(&updateLifecycle);
-
- // Updates from the write commands path can yield.
- request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
-
- int attempt = 0;
- bool createCollection = false;
- for ( int fakeLoop = 0; fakeLoop < 1; fakeLoop++ ) {
-
- ParsedUpdate parsedUpdate(txn, &request);
- Status status = parsedUpdate.parseRequest();
- if (!status.isOK()) {
- result->setError(toWriteError(status));
- return;
+ Database* const db = adb.getDb();
+ if (db->getCollection(nsString.ns())) {
+ // someone else beat us to it
+ } else {
+ WriteUnitOfWork wuow(txn);
+ uassertStatusOK(userCreateNS(txn, db, nsString.ns(), BSONObj()));
+ wuow.commit();
+ }
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "update", nsString.ns());
+ }
- if ( createCollection ) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- const AutoGetOrCreateDb adb{txn, nsString.db(), MODE_X};
+ ///////////////////////////////////////////
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbLock(txn->lockState(), nsString.db(), MODE_IX);
+ Lock::CollectionLock colLock(
+ txn->lockState(), nsString.ns(), parsedUpdate.isIsolated() ? MODE_X : MODE_IX);
+ ///////////////////////////////////////////
- if (!checkIsMasterForDatabase(nsString, result)) {
- return;
- }
+ if (!checkIsMasterForDatabase(nsString, result)) {
+ return;
+ }
- Database* const db = adb.getDb();
- if ( db->getCollection( nsString.ns() ) ) {
- // someone else beat us to it
- }
- else {
- WriteUnitOfWork wuow(txn);
- uassertStatusOK(userCreateNS(txn, db, nsString.ns(), BSONObj()));
- wuow.commit();
- }
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "update", nsString.ns());
- }
+ if (!checkShardVersion(txn, &shardingState, *updateItem.getRequest(), result))
+ return;
- ///////////////////////////////////////////
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), nsString.db(), MODE_IX);
- Lock::CollectionLock colLock(txn->lockState(),
- nsString.ns(),
- parsedUpdate.isIsolated() ? MODE_X : MODE_IX);
- ///////////////////////////////////////////
+ Database* const db = dbHolder().get(txn, nsString.db());
- if (!checkIsMasterForDatabase(nsString, result)) {
+ if (db == NULL) {
+ if (createCollection) {
+ // we raced with some, accept defeat
+ result->getStats().nModified = 0;
+ result->getStats().n = 0;
return;
}
- if (!checkShardVersion(txn, &shardingState, *updateItem.getRequest(), result))
+ // Database not yet created
+ if (!request.isUpsert()) {
+ // not an upsert, no database, nothing to do
+ result->getStats().nModified = 0;
+ result->getStats().n = 0;
return;
-
- Database* const db = dbHolder().get(txn, nsString.db());
-
- if (db == NULL) {
- if (createCollection) {
- // we raced with some, accept defeat
- result->getStats().nModified = 0;
- result->getStats().n = 0;
- return;
- }
-
- // Database not yet created
- if (!request.isUpsert()) {
- // not an upsert, no database, nothing to do
- result->getStats().nModified = 0;
- result->getStats().n = 0;
- return;
- }
-
- // upsert, don't try to get a context as no MODE_X lock is held
- fakeLoop = -1;
- createCollection = true;
- continue;
}
- CurOp::get(txn)->raiseDbProfileLevel(db->getProfilingLevel());
- Collection* collection = db->getCollection(nsString.ns());
-
- if ( collection == NULL ) {
- if ( createCollection ) {
- // we raced with some, accept defeat
- result->getStats().nModified = 0;
- result->getStats().n = 0;
- return;
- }
+ // upsert, don't try to get a context as no MODE_X lock is held
+ fakeLoop = -1;
+ createCollection = true;
+ continue;
+ }
- if ( !request.isUpsert() ) {
- // not an upsert, no collection, nothing to do
- result->getStats().nModified = 0;
- result->getStats().n = 0;
- return;
- }
+ CurOp::get(txn)->raiseDbProfileLevel(db->getProfilingLevel());
+ Collection* collection = db->getCollection(nsString.ns());
- // upsert, mark that we should create collection
- fakeLoop = -1;
- createCollection = true;
- continue;
+ if (collection == NULL) {
+ if (createCollection) {
+ // we raced with some, accept defeat
+ result->getStats().nModified = 0;
+ result->getStats().n = 0;
+ return;
}
- OpDebug* debug = &CurOp::get(txn)->debug();
+ if (!request.isUpsert()) {
+ // not an upsert, no collection, nothing to do
+ result->getStats().nModified = 0;
+ result->getStats().n = 0;
+ return;
+ }
- try {
- invariant(collection);
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ // upsert, mark that we should create collection
+ fakeLoop = -1;
+ createCollection = true;
+ continue;
+ }
- uassertStatusOK(exec->executePlan());
- UpdateResult res = UpdateStage::makeUpdateResult(exec.get(), debug);
+ OpDebug* debug = &CurOp::get(txn)->debug();
- const long long numDocsModified = res.numDocsModified;
- const long long numMatched = res.numMatched;
- const BSONObj resUpsertedID = res.upserted;
+ try {
+ invariant(collection);
+ PlanExecutor* rawExec;
+ uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug, &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
- // We have an _id from an insert
- const bool didInsert = !resUpsertedID.isEmpty();
+ uassertStatusOK(exec->executePlan());
+ UpdateResult res = UpdateStage::makeUpdateResult(exec.get(), debug);
- result->getStats().nModified = didInsert ? 0 : numDocsModified;
- result->getStats().n = didInsert ? 1 : numMatched;
- result->getStats().upsertedID = resUpsertedID;
- }
- catch ( const WriteConflictException& dle ) {
- debug->writeConflicts++;
- if ( isMulti ) {
- log() << "Had WriteConflict during multi update, aborting";
- throw;
- }
+ const long long numDocsModified = res.numDocsModified;
+ const long long numMatched = res.numMatched;
+ const BSONObj resUpsertedID = res.upserted;
- createCollection = false;
- // RESTART LOOP
- fakeLoop = -1;
- txn->recoveryUnit()->abandonSnapshot();
+ // We have an _id from an insert
+ const bool didInsert = !resUpsertedID.isEmpty();
- WriteConflictException::logAndBackoff( attempt++, "update", nsString.ns() );
- }
- catch (const StaleConfigException& staleExcep) {
- result->setError(new WriteErrorDetail);
- result->getError()->setErrCode(ErrorCodes::StaleShardVersion);
- buildStaleError(staleExcep.getVersionReceived(),
- staleExcep.getVersionWanted(),
- result->getError());
+ result->getStats().nModified = didInsert ? 0 : numDocsModified;
+ result->getStats().n = didInsert ? 1 : numMatched;
+ result->getStats().upsertedID = resUpsertedID;
+ } catch (const WriteConflictException& dle) {
+ debug->writeConflicts++;
+ if (isMulti) {
+ log() << "Had WriteConflict during multi update, aborting";
+ throw;
}
- catch (const DBException& ex) {
- Status status = ex.toStatus();
- if (ErrorCodes::isInterruption(status.code())) {
- throw;
- }
- result->setError(toWriteError(status));
+
+ createCollection = false;
+ // RESTART LOOP
+ fakeLoop = -1;
+ txn->recoveryUnit()->abandonSnapshot();
+
+ WriteConflictException::logAndBackoff(attempt++, "update", nsString.ns());
+ } catch (const StaleConfigException& staleExcep) {
+ result->setError(new WriteErrorDetail);
+ result->getError()->setErrCode(ErrorCodes::StaleShardVersion);
+ buildStaleError(
+ staleExcep.getVersionReceived(), staleExcep.getVersionWanted(), result->getError());
+ } catch (const DBException& ex) {
+ Status status = ex.toStatus();
+ if (ErrorCodes::isInterruption(status.code())) {
+ throw;
}
+ result->setError(toWriteError(status));
}
}
+}
- /**
- * Perform a remove operation, which might remove multiple documents. Dispatches to remove code
- * currently to do most of this.
- *
- * Might fault or error, otherwise populates the result.
- */
- static void multiRemove( OperationContext* txn,
- const BatchItemRef& removeItem,
- WriteOpResult* result ) {
-
- const NamespaceString& nss = removeItem.getRequest()->getNSS();
- DeleteRequest request(nss);
- request.setQuery( removeItem.getDelete()->getQuery() );
- request.setMulti( removeItem.getDelete()->getLimit() != 1 );
- request.setGod( false );
-
- // Deletes running through the write commands path can yield.
- request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
-
- int attempt = 1;
- while ( 1 ) {
- try {
-
- ParsedDelete parsedDelete(txn, &request);
- Status status = parsedDelete.parseRequest();
- if (!status.isOK()) {
- result->setError(toWriteError(status));
- return;
- }
-
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetDb autoDb(txn, nss.db(), MODE_IX);
- if (!autoDb.getDb()) {
- break;
- }
-
- CurOp::get(txn)->raiseDbProfileLevel(autoDb.getDb()->getProfilingLevel());
- Lock::CollectionLock collLock(txn->lockState(),
- nss.ns(),
- parsedDelete.isIsolated() ? MODE_X : MODE_IX);
-
- // getExecutorDelete() also checks if writes are allowed.
- if (!checkIsMasterForDatabase(nss, result)) {
- return;
- }
- // Check version once we're locked
-
- if (!checkShardVersion(txn, &shardingState, *removeItem.getRequest(), result)) {
- // Version error
- return;
- }
-
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorDelete(txn,
- autoDb.getDb()->getCollection(nss),
- &parsedDelete,
- &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
-
- // Execute the delete and retrieve the number deleted.
- uassertStatusOK(exec->executePlan());
- result->getStats().n = DeleteStage::getNumDeleted(exec.get());
+/**
+ * Perform a remove operation, which might remove multiple documents. Dispatches to remove code
+ * currently to do most of this.
+ *
+ * Might fault or error, otherwise populates the result.
+ */
+static void multiRemove(OperationContext* txn,
+ const BatchItemRef& removeItem,
+ WriteOpResult* result) {
+ const NamespaceString& nss = removeItem.getRequest()->getNSS();
+ DeleteRequest request(nss);
+ request.setQuery(removeItem.getDelete()->getQuery());
+ request.setMulti(removeItem.getDelete()->getLimit() != 1);
+ request.setGod(false);
+
+ // Deletes running through the write commands path can yield.
+ request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
+
+ int attempt = 1;
+ while (1) {
+ try {
+ ParsedDelete parsedDelete(txn, &request);
+ Status status = parsedDelete.parseRequest();
+ if (!status.isOK()) {
+ result->setError(toWriteError(status));
+ return;
+ }
+ ScopedTransaction scopedXact(txn, MODE_IX);
+ AutoGetDb autoDb(txn, nss.db(), MODE_IX);
+ if (!autoDb.getDb()) {
break;
}
- catch ( const WriteConflictException& dle ) {
- CurOp::get(txn)->debug().writeConflicts++;
- WriteConflictException::logAndBackoff( attempt++, "delete", nss.ns() );
- }
- catch (const StaleConfigException& staleExcep) {
- result->setError(new WriteErrorDetail);
- result->getError()->setErrCode(ErrorCodes::StaleShardVersion);
- buildStaleError(staleExcep.getVersionReceived(),
- staleExcep.getVersionWanted(),
- result->getError());
+
+ CurOp::get(txn)->raiseDbProfileLevel(autoDb.getDb()->getProfilingLevel());
+ Lock::CollectionLock collLock(
+ txn->lockState(), nss.ns(), parsedDelete.isIsolated() ? MODE_X : MODE_IX);
+
+ // getExecutorDelete() also checks if writes are allowed.
+ if (!checkIsMasterForDatabase(nss, result)) {
return;
}
- catch ( const DBException& ex ) {
- Status status = ex.toStatus();
- if (ErrorCodes::isInterruption(status.code())) {
- throw;
- }
- result->setError(toWriteError(status));
+ // Check version once we're locked
+
+ if (!checkShardVersion(txn, &shardingState, *removeItem.getRequest(), result)) {
+ // Version error
return;
}
+
+ PlanExecutor* rawExec;
+ uassertStatusOK(getExecutorDelete(
+ txn, autoDb.getDb()->getCollection(nss), &parsedDelete, &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+
+ // Execute the delete and retrieve the number deleted.
+ uassertStatusOK(exec->executePlan());
+ result->getStats().n = DeleteStage::getNumDeleted(exec.get());
+
+ break;
+ } catch (const WriteConflictException& dle) {
+ CurOp::get(txn)->debug().writeConflicts++;
+ WriteConflictException::logAndBackoff(attempt++, "delete", nss.ns());
+ } catch (const StaleConfigException& staleExcep) {
+ result->setError(new WriteErrorDetail);
+ result->getError()->setErrCode(ErrorCodes::StaleShardVersion);
+ buildStaleError(
+ staleExcep.getVersionReceived(), staleExcep.getVersionWanted(), result->getError());
+ return;
+ } catch (const DBException& ex) {
+ Status status = ex.toStatus();
+ if (ErrorCodes::isInterruption(status.code())) {
+ throw;
+ }
+ result->setError(toWriteError(status));
+ return;
}
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/batch_executor.h b/src/mongo/db/commands/write_commands/batch_executor.h
index 0bab41d3ff8..0dd1d71848a 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.h
+++ b/src/mongo/db/commands/write_commands/batch_executor.h
@@ -40,158 +40,148 @@
namespace mongo {
- class BSONObjBuilder;
- class CurOp;
- class LastError;
- class OpCounters;
- class OperationContext;
- class WriteBatchStats;
- struct WriteOpStats;
+class BSONObjBuilder;
+class CurOp;
+class LastError;
+class OpCounters;
+class OperationContext;
+class WriteBatchStats;
+struct WriteOpStats;
+
+/**
+ * An instance of WriteBatchExecutor is an object capable of issuing a write batch.
+ */
+class WriteBatchExecutor {
+ MONGO_DISALLOW_COPYING(WriteBatchExecutor);
+
+public:
+ // State object used by private execInserts. TODO: Do not expose this type.
+ class ExecInsertsState;
+
+ WriteBatchExecutor(OperationContext* txn, OpCounters* opCounters, LastError* le);
/**
- * An instance of WriteBatchExecutor is an object capable of issuing a write batch.
+ * Issues writes with requested write concern. Fills response with errors if problems
+ * occur.
*/
- class WriteBatchExecutor {
- MONGO_DISALLOW_COPYING(WriteBatchExecutor);
- public:
-
- // State object used by private execInserts. TODO: Do not expose this type.
- class ExecInsertsState;
-
- WriteBatchExecutor( OperationContext* txn,
- OpCounters* opCounters,
- LastError* le );
-
- /**
- * Issues writes with requested write concern. Fills response with errors if problems
- * occur.
- */
- void executeBatch( const BatchedCommandRequest& request, BatchedCommandResponse* response );
-
- const WriteBatchStats& getStats() const;
-
- /**
- * Does basic validation of the batch request. Returns a non-OK status if
- * any problems with the batch are found.
- */
- static Status validateBatch( const BatchedCommandRequest& request );
-
- private:
- /**
- * Executes the writes in the batch and returns upserted _ids and write errors.
- * Dispatches to one of the three functions below for DBLock, CurOp, and stats management.
- */
- void bulkExecute( const BatchedCommandRequest& request,
- std::vector<BatchedUpsertDetail*>* upsertedIds,
- std::vector<WriteErrorDetail*>* errors );
-
- /**
- * Executes the inserts of an insert batch and returns the write errors.
- *
- * Internally uses the DBLock of the request namespace.
- * May execute multiple inserts inside the same DBLock, and/or take the DBLock multiple
- * times.
- */
- void execInserts( const BatchedCommandRequest& request,
- std::vector<WriteErrorDetail*>* errors );
-
- /**
- * Executes a single insert from a batch, described in the opaque "state" object.
- */
- void execOneInsert( ExecInsertsState* state, WriteErrorDetail** error );
-
- /**
- * Executes an update item (which may update many documents or upsert), and returns the
- * upserted _id on upsert or error on failure.
- *
- * Internally uses the DBLock of the update namespace.
- * May take the DBLock multiple times.
- */
- void execUpdate( const BatchItemRef& updateItem,
- BSONObj* upsertedId,
- WriteErrorDetail** error );
-
- /**
- * Executes a delete item (which may remove many documents) and returns an error on failure.
- *
- * Internally uses the DBLock of the delete namespace.
- * May take the DBLock multiple times.
- */
- void execRemove( const BatchItemRef& removeItem, WriteErrorDetail** error );
-
- /**
- * Helper for incrementing stats on the next CurOp.
- *
- * No lock requirements.
- */
- void incOpStats( const BatchItemRef& currWrite );
-
- /**
- * Helper for incrementing stats after each individual write op.
- *
- * No lock requirements (though usually done inside write lock to make stats update look
- * atomic).
- */
- void incWriteStats( const BatchItemRef& currWrite,
- const WriteOpStats& stats,
- const WriteErrorDetail* error,
- CurOp* currentOp );
-
- OperationContext* _txn;
-
- // OpCounters object to update - needed for stats reporting
- // Not owned here.
- OpCounters* _opCounters;
-
- // LastError object to use for preparing write results - needed for stats reporting
- // Not owned here.
- LastError* _le;
-
- // Stats
- std::unique_ptr<WriteBatchStats> _stats;
- };
+ void executeBatch(const BatchedCommandRequest& request, BatchedCommandResponse* response);
+
+ const WriteBatchStats& getStats() const;
/**
- * Holds information about the result of a single write operation.
+ * Does basic validation of the batch request. Returns a non-OK status if
+ * any problems with the batch are found.
*/
- struct WriteOpStats {
+ static Status validateBatch(const BatchedCommandRequest& request);
- WriteOpStats() :
- n( 0 ), nModified( 0 ) {
- }
+private:
+ /**
+ * Executes the writes in the batch and returns upserted _ids and write errors.
+ * Dispatches to one of the three functions below for DBLock, CurOp, and stats management.
+ */
+ void bulkExecute(const BatchedCommandRequest& request,
+ std::vector<BatchedUpsertDetail*>* upsertedIds,
+ std::vector<WriteErrorDetail*>* errors);
- void reset() {
- n = 0;
- nModified = 0;
- upsertedID = BSONObj();
- }
+ /**
+ * Executes the inserts of an insert batch and returns the write errors.
+ *
+ * Internally uses the DBLock of the request namespace.
+ * May execute multiple inserts inside the same DBLock, and/or take the DBLock multiple
+ * times.
+ */
+ void execInserts(const BatchedCommandRequest& request, std::vector<WriteErrorDetail*>* errors);
- // Num docs logically affected by this operation.
- int n;
+ /**
+ * Executes a single insert from a batch, described in the opaque "state" object.
+ */
+ void execOneInsert(ExecInsertsState* state, WriteErrorDetail** error);
+
+ /**
+ * Executes an update item (which may update many documents or upsert), and returns the
+ * upserted _id on upsert or error on failure.
+ *
+ * Internally uses the DBLock of the update namespace.
+ * May take the DBLock multiple times.
+ */
+ void execUpdate(const BatchItemRef& updateItem, BSONObj* upsertedId, WriteErrorDetail** error);
- // Num docs actually modified by this operation, if applicable (update)
- int nModified;
+ /**
+ * Executes a delete item (which may remove many documents) and returns an error on failure.
+ *
+ * Internally uses the DBLock of the delete namespace.
+ * May take the DBLock multiple times.
+ */
+ void execRemove(const BatchItemRef& removeItem, WriteErrorDetail** error);
- // _id of newly upserted document, if applicable (update)
- BSONObj upsertedID;
- };
+ /**
+ * Helper for incrementing stats on the next CurOp.
+ *
+ * No lock requirements.
+ */
+ void incOpStats(const BatchItemRef& currWrite);
/**
- * Full stats accumulated by a write batch execution. Note that these stats do not directly
- * correspond to the stats accumulated in opCounters and LastError.
+ * Helper for incrementing stats after each individual write op.
+ *
+ * No lock requirements (though usually done inside write lock to make stats update look
+ * atomic).
*/
- class WriteBatchStats {
- public:
+ void incWriteStats(const BatchItemRef& currWrite,
+ const WriteOpStats& stats,
+ const WriteErrorDetail* error,
+ CurOp* currentOp);
- WriteBatchStats() :
- numInserted( 0 ), numUpserted( 0 ), numMatched( 0 ), numModified( 0 ), numDeleted( 0 ) {
- }
+ OperationContext* _txn;
- int numInserted;
- int numUpserted;
- int numMatched;
- int numModified;
- int numDeleted;
- };
+ // OpCounters object to update - needed for stats reporting
+ // Not owned here.
+ OpCounters* _opCounters;
-} // namespace mongo
+ // LastError object to use for preparing write results - needed for stats reporting
+ // Not owned here.
+ LastError* _le;
+
+ // Stats
+ std::unique_ptr<WriteBatchStats> _stats;
+};
+
+/**
+ * Holds information about the result of a single write operation.
+ */
+struct WriteOpStats {
+ WriteOpStats() : n(0), nModified(0) {}
+
+ void reset() {
+ n = 0;
+ nModified = 0;
+ upsertedID = BSONObj();
+ }
+
+ // Num docs logically affected by this operation.
+ int n;
+
+ // Num docs actually modified by this operation, if applicable (update)
+ int nModified;
+
+ // _id of newly upserted document, if applicable (update)
+ BSONObj upsertedID;
+};
+
+/**
+ * Full stats accumulated by a write batch execution. Note that these stats do not directly
+ * correspond to the stats accumulated in opCounters and LastError.
+ */
+class WriteBatchStats {
+public:
+ WriteBatchStats()
+ : numInserted(0), numUpserted(0), numMatched(0), numModified(0), numDeleted(0) {}
+
+ int numInserted;
+ int numUpserted;
+ int numMatched;
+ int numModified;
+ int numDeleted;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 4bf374778fb..fe6bb9a2ff9 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -53,270 +53,265 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
- namespace {
+namespace {
- MONGO_INITIALIZER(RegisterWriteCommands)(InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed.
- new CmdInsert();
- new CmdUpdate();
- new CmdDelete();
- return Status::OK();
- }
-
- } // namespace
+MONGO_INITIALIZER(RegisterWriteCommands)(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new CmdInsert();
+ new CmdUpdate();
+ new CmdDelete();
+ return Status::OK();
+}
- WriteCmd::WriteCmd( StringData name, BatchedCommandRequest::BatchType writeType ) :
- Command( name ), _writeType( writeType ) {
- }
+} // namespace
- void WriteCmd::redactTooLongLog( mutablebson::Document* cmdObj, StringData fieldName ) {
- namespace mmb = mutablebson;
- mmb::Element root = cmdObj->root();
- mmb::Element field = root.findFirstChildNamed( fieldName );
+WriteCmd::WriteCmd(StringData name, BatchedCommandRequest::BatchType writeType)
+ : Command(name), _writeType(writeType) {}
- // If the cmdObj is too large, it will be a "too big" message given by CachedBSONObj.get()
- if ( !field.ok() ) {
- return;
- }
+void WriteCmd::redactTooLongLog(mutablebson::Document* cmdObj, StringData fieldName) {
+ namespace mmb = mutablebson;
+ mmb::Element root = cmdObj->root();
+ mmb::Element field = root.findFirstChildNamed(fieldName);
- // Redact the log if there are more than one documents or operations.
- if ( field.countChildren() > 1 ) {
- field.setValueInt( field.countChildren() );
- }
+ // If the cmdObj is too large, it will be a "too big" message given by CachedBSONObj.get()
+ if (!field.ok()) {
+ return;
}
- // Slaves can't perform writes.
- bool WriteCmd::slaveOk() const { return false; }
+ // Redact the log if there are more than one documents or operations.
+ if (field.countChildren() > 1) {
+ field.setValueInt(field.countChildren());
+ }
+}
+
+// Slaves can't perform writes.
+bool WriteCmd::slaveOk() const {
+ return false;
+}
+
+bool WriteCmd::isWriteCommandForConfigServer() const {
+ return false;
+}
+
+Status WriteCmd::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ Status status(auth::checkAuthForWriteCommand(AuthorizationSession::get(client),
+ _writeType,
+ NamespaceString(parseNs(dbname, cmdObj)),
+ cmdObj));
+
+ // TODO: Remove this when we standardize GLE reporting from commands
+ if (!status.isOK()) {
+ LastError::get(client).setLastError(status.code(), status.reason());
+ }
- bool WriteCmd::isWriteCommandForConfigServer() const { return false; }
+ return status;
+}
+
+// Write commands are counted towards their corresponding opcounters, not command opcounters.
+bool WriteCmd::shouldAffectCommandCounter() const {
+ return false;
+}
+
+bool WriteCmd::run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ string& errMsg,
+ BSONObjBuilder& result) {
+ // Can't be run on secondaries.
+ dassert(txn->writesAreReplicated());
+ BatchedCommandRequest request(_writeType);
+ BatchedCommandResponse response;
+
+ if (!request.parseBSON(cmdObj, &errMsg) || !request.isValid(&errMsg)) {
+ return appendCommandStatus(result, Status(ErrorCodes::FailedToParse, errMsg));
+ }
- Status WriteCmd::checkAuthForCommand( ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj ) {
+ // Note that this is a runCommmand, and therefore, the database and the collection name
+ // are in different parts of the grammar for the command. But it's more convenient to
+ // work with a NamespaceString. We built it here and replace it in the parsed command.
+ // Internally, everything work with the namespace string as opposed to just the
+ // collection name.
+ NamespaceString nss(dbName, request.getNS());
+ request.setNSS(nss);
- Status status( auth::checkAuthForWriteCommand( AuthorizationSession::get(client),
- _writeType,
- NamespaceString( parseNs( dbname, cmdObj ) ),
- cmdObj ));
+ StatusWith<WriteConcernOptions> wcStatus = extractWriteConcern(cmdObj);
- // TODO: Remove this when we standardize GLE reporting from commands
- if ( !status.isOK() ) {
- LastError::get(client).setLastError(status.code(), status.reason());
- }
+ if (!wcStatus.isOK()) {
+ return appendCommandStatus(result, wcStatus.getStatus());
+ }
+ txn->setWriteConcern(wcStatus.getValue());
+
+ WriteBatchExecutor writeBatchExecutor(
+ txn, &globalOpCounters, &LastError::get(txn->getClient()));
+
+ writeBatchExecutor.executeBatch(request, &response);
+
+ result.appendElements(response.toBSON());
+ return response.getOk();
+}
+
+Status WriteCmd::explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ // For now we only explain update and delete write commands.
+ if (BatchedCommandRequest::BatchType_Update != _writeType &&
+ BatchedCommandRequest::BatchType_Delete != _writeType) {
+ return Status(ErrorCodes::IllegalOperation,
+ "Only update and delete write ops can be explained");
+ }
- return status;
+ // Parse the batch request.
+ BatchedCommandRequest request(_writeType);
+ std::string errMsg;
+ if (!request.parseBSON(cmdObj, &errMsg) || !request.isValid(&errMsg)) {
+ return Status(ErrorCodes::FailedToParse, errMsg);
}
- // Write commands are counted towards their corresponding opcounters, not command opcounters.
- bool WriteCmd::shouldAffectCommandCounter() const { return false; }
-
- bool WriteCmd::run(OperationContext* txn,
- const string& dbName,
- BSONObj& cmdObj,
- int options,
- string& errMsg,
- BSONObjBuilder& result) {
- // Can't be run on secondaries.
- dassert(txn->writesAreReplicated());
- BatchedCommandRequest request( _writeType );
- BatchedCommandResponse response;
-
- if ( !request.parseBSON( cmdObj, &errMsg ) || !request.isValid( &errMsg ) ) {
- return appendCommandStatus( result, Status( ErrorCodes::FailedToParse, errMsg ) );
- }
+ // Note that this is a runCommmand, and therefore, the database and the collection name
+ // are in different parts of the grammar for the command. But it's more convenient to
+ // work with a NamespaceString. We built it here and replace it in the parsed command.
+ // Internally, everything work with the namespace string as opposed to just the
+ // collection name.
+ NamespaceString nsString(dbname, request.getNS());
+ request.setNSS(nsString);
+
+ // Do the validation of the batch that is shared with non-explained write batches.
+ Status isValid = WriteBatchExecutor::validateBatch(request);
+ if (!isValid.isOK()) {
+ return isValid;
+ }
- // Note that this is a runCommmand, and therefore, the database and the collection name
- // are in different parts of the grammar for the command. But it's more convenient to
- // work with a NamespaceString. We built it here and replace it in the parsed command.
- // Internally, everything work with the namespace string as opposed to just the
- // collection name.
- NamespaceString nss(dbName, request.getNS());
- request.setNSS(nss);
+ // Explain must do one additional piece of validation: For now we only explain
+ // singleton batches.
+ if (request.sizeWriteOps() != 1u) {
+ return Status(ErrorCodes::InvalidLength, "explained write batches must be of size 1");
+ }
- StatusWith<WriteConcernOptions> wcStatus = extractWriteConcern(cmdObj);
+ ScopedTransaction scopedXact(txn, MODE_IX);
- if (!wcStatus.isOK()) {
- return appendCommandStatus(result, wcStatus.getStatus());
- }
- txn->setWriteConcern(wcStatus.getValue());
+ // Get a reference to the singleton batch item (it's the 0th item in the batch).
+ BatchItemRef batchItem(&request, 0);
- WriteBatchExecutor writeBatchExecutor(txn,
- &globalOpCounters,
- &LastError::get(txn->getClient()));
+ if (BatchedCommandRequest::BatchType_Update == _writeType) {
+ // Create the update request.
+ UpdateRequest updateRequest(nsString);
+ updateRequest.setQuery(batchItem.getUpdate()->getQuery());
+ updateRequest.setUpdates(batchItem.getUpdate()->getUpdateExpr());
+ updateRequest.setMulti(batchItem.getUpdate()->getMulti());
+ updateRequest.setUpsert(batchItem.getUpdate()->getUpsert());
+ UpdateLifecycleImpl updateLifecycle(true, updateRequest.getNamespaceString());
+ updateRequest.setLifecycle(&updateLifecycle);
+ updateRequest.setExplain();
- writeBatchExecutor.executeBatch( request, &response );
+ // Explained updates can yield.
+ updateRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
- result.appendElements( response.toBSON() );
- return response.getOk();
- }
+ OpDebug* debug = &CurOp::get(txn)->debug();
- Status WriteCmd::explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const {
- // For now we only explain update and delete write commands.
- if ( BatchedCommandRequest::BatchType_Update != _writeType &&
- BatchedCommandRequest::BatchType_Delete != _writeType ) {
- return Status( ErrorCodes::IllegalOperation,
- "Only update and delete write ops can be explained" );
+ ParsedUpdate parsedUpdate(txn, &updateRequest);
+ Status parseStatus = parsedUpdate.parseRequest();
+ if (!parseStatus.isOK()) {
+ return parseStatus;
}
- // Parse the batch request.
- BatchedCommandRequest request( _writeType );
- std::string errMsg;
- if ( !request.parseBSON( cmdObj, &errMsg ) || !request.isValid( &errMsg ) ) {
- return Status( ErrorCodes::FailedToParse, errMsg );
- }
+ // Explains of write commands are read-only, but we take write locks so
+ // that timing info is more accurate.
+ AutoGetDb autoDb(txn, nsString.db(), MODE_IX);
+ Lock::CollectionLock colLock(txn->lockState(), nsString.ns(), MODE_IX);
- // Note that this is a runCommmand, and therefore, the database and the collection name
- // are in different parts of the grammar for the command. But it's more convenient to
- // work with a NamespaceString. We built it here and replace it in the parsed command.
- // Internally, everything work with the namespace string as opposed to just the
- // collection name.
- NamespaceString nsString(dbname, request.getNS());
- request.setNSS(nsString);
-
- // Do the validation of the batch that is shared with non-explained write batches.
- Status isValid = WriteBatchExecutor::validateBatch( request );
- if (!isValid.isOK()) {
- return isValid;
- }
+ ensureShardVersionOKOrThrow(txn->getClient(), nsString.ns());
- // Explain must do one additional piece of validation: For now we only explain
- // singleton batches.
- if ( request.sizeWriteOps() != 1u ) {
- return Status( ErrorCodes::InvalidLength,
- "explained write batches must be of size 1" );
+ // Get a pointer to the (possibly NULL) collection.
+ Collection* collection = NULL;
+ if (autoDb.getDb()) {
+ collection = autoDb.getDb()->getCollection(nsString.ns());
}
- ScopedTransaction scopedXact(txn, MODE_IX);
-
- // Get a reference to the singleton batch item (it's the 0th item in the batch).
- BatchItemRef batchItem( &request, 0 );
-
- if ( BatchedCommandRequest::BatchType_Update == _writeType ) {
- // Create the update request.
- UpdateRequest updateRequest( nsString );
- updateRequest.setQuery( batchItem.getUpdate()->getQuery() );
- updateRequest.setUpdates( batchItem.getUpdate()->getUpdateExpr() );
- updateRequest.setMulti( batchItem.getUpdate()->getMulti() );
- updateRequest.setUpsert( batchItem.getUpdate()->getUpsert() );
- UpdateLifecycleImpl updateLifecycle( true, updateRequest.getNamespaceString() );
- updateRequest.setLifecycle( &updateLifecycle );
- updateRequest.setExplain();
-
- // Explained updates can yield.
- updateRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
-
- OpDebug* debug = &CurOp::get(txn)->debug();
-
- ParsedUpdate parsedUpdate( txn, &updateRequest );
- Status parseStatus = parsedUpdate.parseRequest();
- if ( !parseStatus.isOK() ) {
- return parseStatus;
- }
-
- // Explains of write commands are read-only, but we take write locks so
- // that timing info is more accurate.
- AutoGetDb autoDb( txn, nsString.db(), MODE_IX );
- Lock::CollectionLock colLock( txn->lockState(), nsString.ns(), MODE_IX );
-
- ensureShardVersionOKOrThrow( txn->getClient(), nsString.ns() );
-
- // Get a pointer to the (possibly NULL) collection.
- Collection* collection = NULL;
- if ( autoDb.getDb() ) {
- collection = autoDb.getDb()->getCollection( nsString.ns() );
- }
-
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
-
- // Explain the plan tree.
- Explain::explainStages( exec.get(), verbosity, out );
- return Status::OK();
+ PlanExecutor* rawExec;
+ uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug, &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+
+ // Explain the plan tree.
+ Explain::explainStages(exec.get(), verbosity, out);
+ return Status::OK();
+ } else {
+ invariant(BatchedCommandRequest::BatchType_Delete == _writeType);
+
+ // Create the delete request.
+ DeleteRequest deleteRequest(nsString);
+ deleteRequest.setQuery(batchItem.getDelete()->getQuery());
+ deleteRequest.setMulti(batchItem.getDelete()->getLimit() != 1);
+ deleteRequest.setGod(false);
+ deleteRequest.setExplain();
+
+ // Explained deletes can yield.
+ deleteRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
+
+ ParsedDelete parsedDelete(txn, &deleteRequest);
+ Status parseStatus = parsedDelete.parseRequest();
+ if (!parseStatus.isOK()) {
+ return parseStatus;
}
- else {
- invariant( BatchedCommandRequest::BatchType_Delete == _writeType );
-
- // Create the delete request.
- DeleteRequest deleteRequest( nsString );
- deleteRequest.setQuery( batchItem.getDelete()->getQuery() );
- deleteRequest.setMulti( batchItem.getDelete()->getLimit() != 1 );
- deleteRequest.setGod( false );
- deleteRequest.setExplain();
-
- // Explained deletes can yield.
- deleteRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
-
- ParsedDelete parsedDelete(txn, &deleteRequest);
- Status parseStatus = parsedDelete.parseRequest();
- if (!parseStatus.isOK()) {
- return parseStatus;
- }
-
- // Explains of write commands are read-only, but we take write locks so that timing
- // info is more accurate.
- AutoGetDb autoDb(txn, nsString.db(), MODE_IX);
- Lock::CollectionLock colLock(txn->lockState(), nsString.ns(), MODE_IX);
-
- ensureShardVersionOKOrThrow( txn->getClient(), nsString.ns() );
-
- // Get a pointer to the (possibly NULL) collection.
- Collection* collection = NULL;
- if (autoDb.getDb()) {
- collection = autoDb.getDb()->getCollection(nsString.ns());
- }
-
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
-
- // Explain the plan tree.
- Explain::explainStages(exec.get(), verbosity, out);
- return Status::OK();
+
+ // Explains of write commands are read-only, but we take write locks so that timing
+ // info is more accurate.
+ AutoGetDb autoDb(txn, nsString.db(), MODE_IX);
+ Lock::CollectionLock colLock(txn->lockState(), nsString.ns(), MODE_IX);
+
+ ensureShardVersionOKOrThrow(txn->getClient(), nsString.ns());
+
+ // Get a pointer to the (possibly NULL) collection.
+ Collection* collection = NULL;
+ if (autoDb.getDb()) {
+ collection = autoDb.getDb()->getCollection(nsString.ns());
}
- }
- CmdInsert::CmdInsert() :
- WriteCmd( "insert", BatchedCommandRequest::BatchType_Insert ) {
- }
+ PlanExecutor* rawExec;
+ uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete, &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
- void CmdInsert::redactForLogging( mutablebson::Document* cmdObj ) {
- redactTooLongLog( cmdObj, StringData( "documents", StringData::LiteralTag() ) );
+ // Explain the plan tree.
+ Explain::explainStages(exec.get(), verbosity, out);
+ return Status::OK();
}
+}
- void CmdInsert::help( stringstream& help ) const {
- help << "insert documents";
- }
+CmdInsert::CmdInsert() : WriteCmd("insert", BatchedCommandRequest::BatchType_Insert) {}
- CmdUpdate::CmdUpdate() :
- WriteCmd( "update", BatchedCommandRequest::BatchType_Update ) {
- }
+void CmdInsert::redactForLogging(mutablebson::Document* cmdObj) {
+ redactTooLongLog(cmdObj, StringData("documents", StringData::LiteralTag()));
+}
- void CmdUpdate::redactForLogging( mutablebson::Document* cmdObj ) {
- redactTooLongLog( cmdObj, StringData( "updates", StringData::LiteralTag() ) );
- }
+void CmdInsert::help(stringstream& help) const {
+ help << "insert documents";
+}
- void CmdUpdate::help( stringstream& help ) const {
- help << "update documents";
- }
+CmdUpdate::CmdUpdate() : WriteCmd("update", BatchedCommandRequest::BatchType_Update) {}
- CmdDelete::CmdDelete() :
- WriteCmd( "delete", BatchedCommandRequest::BatchType_Delete ) {
- }
+void CmdUpdate::redactForLogging(mutablebson::Document* cmdObj) {
+ redactTooLongLog(cmdObj, StringData("updates", StringData::LiteralTag()));
+}
- void CmdDelete::redactForLogging( mutablebson::Document* cmdObj ) {
- redactTooLongLog( cmdObj, StringData( "deletes", StringData::LiteralTag() ) );
- }
+void CmdUpdate::help(stringstream& help) const {
+ help << "update documents";
+}
- void CmdDelete::help( stringstream& help ) const {
- help << "delete documents";
- }
+CmdDelete::CmdDelete() : WriteCmd("delete", BatchedCommandRequest::BatchType_Delete) {}
+
+void CmdDelete::redactForLogging(mutablebson::Document* cmdObj) {
+ redactTooLongLog(cmdObj, StringData("deletes", StringData::LiteralTag()));
+}
+
+void CmdDelete::help(stringstream& help) const {
+ help << "delete documents";
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/write_commands.h b/src/mongo/db/commands/write_commands/write_commands.h
index cbb2db6cac6..fcdda1b56fd 100644
--- a/src/mongo/db/commands/write_commands/write_commands.h
+++ b/src/mongo/db/commands/write_commands/write_commands.h
@@ -36,89 +36,91 @@
namespace mongo {
+/**
+ * Base class for write commands. Write commands support batch writes and write concern,
+ * and return per-item error information. All write commands use the (non-virtual) entry
+ * point WriteCmd::run().
+ *
+ * Command parsing is performed by the WriteBatch class (command syntax documented there),
+ * and command execution is performed by the WriteBatchExecutor class.
+ */
+class WriteCmd : public Command {
+ MONGO_DISALLOW_COPYING(WriteCmd);
+
+public:
+ virtual ~WriteCmd() {}
+
+protected:
/**
- * Base class for write commands. Write commands support batch writes and write concern,
- * and return per-item error information. All write commands use the (non-virtual) entry
- * point WriteCmd::run().
- *
- * Command parsing is performed by the WriteBatch class (command syntax documented there),
- * and command execution is performed by the WriteBatchExecutor class.
+ * Instantiates a command that can be invoked by "name", which will be capable of issuing
+ * write batches of type "writeType", and will require privilege "action" to run.
*/
- class WriteCmd : public Command {
- MONGO_DISALLOW_COPYING(WriteCmd);
- public:
- virtual ~WriteCmd() {}
-
- protected:
-
- /**
- * Instantiates a command that can be invoked by "name", which will be capable of issuing
- * write batches of type "writeType", and will require privilege "action" to run.
- */
- WriteCmd( StringData name, BatchedCommandRequest::BatchType writeType );
-
- // Full log of write command can be quite large.
- static void redactTooLongLog( mutablebson::Document* cmdObj, StringData fieldName );
-
- private:
- virtual bool slaveOk() const;
-
- virtual bool isWriteCommandForConfigServer() const;
-
- virtual Status checkAuthForCommand( ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj );
-
- virtual bool shouldAffectCommandCounter() const;
-
- // Write command entry point.
- virtual bool run(
- OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result);
-
- // Write commands can be explained.
- virtual Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const;
-
- // Type of batch (e.g. insert).
- BatchedCommandRequest::BatchType _writeType;
- };
-
- class CmdInsert : public WriteCmd {
- MONGO_DISALLOW_COPYING(CmdInsert);
- public:
- CmdInsert();
- void redactForLogging(mutablebson::Document* cmdObj);
-
- private:
- virtual void help(std::stringstream& help) const;
- };
-
- class CmdUpdate : public WriteCmd {
- MONGO_DISALLOW_COPYING(CmdUpdate);
- public:
- CmdUpdate();
- void redactForLogging(mutablebson::Document* cmdObj);
-
- private:
- virtual void help(std::stringstream& help) const;
- };
-
- class CmdDelete : public WriteCmd {
- MONGO_DISALLOW_COPYING(CmdDelete);
- public:
- CmdDelete();
- void redactForLogging(mutablebson::Document* cmdObj);
-
- private:
- virtual void help(std::stringstream& help) const;
- };
-
-} // namespace mongo
+ WriteCmd(StringData name, BatchedCommandRequest::BatchType writeType);
+
+ // Full log of write command can be quite large.
+ static void redactTooLongLog(mutablebson::Document* cmdObj, StringData fieldName);
+
+private:
+ virtual bool slaveOk() const;
+
+ virtual bool isWriteCommandForConfigServer() const;
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+
+ virtual bool shouldAffectCommandCounter() const;
+
+ // Write command entry point.
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ // Write commands can be explained.
+ virtual Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const;
+
+ // Type of batch (e.g. insert).
+ BatchedCommandRequest::BatchType _writeType;
+};
+
+class CmdInsert : public WriteCmd {
+ MONGO_DISALLOW_COPYING(CmdInsert);
+
+public:
+ CmdInsert();
+ void redactForLogging(mutablebson::Document* cmdObj);
+
+private:
+ virtual void help(std::stringstream& help) const;
+};
+
+class CmdUpdate : public WriteCmd {
+ MONGO_DISALLOW_COPYING(CmdUpdate);
+
+public:
+ CmdUpdate();
+ void redactForLogging(mutablebson::Document* cmdObj);
+
+private:
+ virtual void help(std::stringstream& help) const;
+};
+
+class CmdDelete : public WriteCmd {
+ MONGO_DISALLOW_COPYING(CmdDelete);
+
+public:
+ CmdDelete();
+ void redactForLogging(mutablebson::Document* cmdObj);
+
+private:
+ virtual void help(std::stringstream& help) const;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/write_commands_common.cpp b/src/mongo/db/commands/write_commands/write_commands_common.cpp
index 69ca1014140..82f3ab4db67 100644
--- a/src/mongo/db/commands/write_commands/write_commands_common.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands_common.cpp
@@ -42,62 +42,55 @@
namespace mongo {
namespace auth {
- using std::string;
- using std::vector;
-
- Status checkAuthForWriteCommand( AuthorizationSession* authzSession,
- BatchedCommandRequest::BatchType cmdType,
- const NamespaceString& cmdNSS,
- const BSONObj& cmdObj ) {
-
- vector<Privilege> privileges;
- ActionSet actionsOnCommandNSS;
-
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- actionsOnCommandNSS.addAction(ActionType::bypassDocumentValidation);
- }
-
- if ( cmdType == BatchedCommandRequest::BatchType_Insert ) {
+using std::string;
+using std::vector;
+
+Status checkAuthForWriteCommand(AuthorizationSession* authzSession,
+ BatchedCommandRequest::BatchType cmdType,
+ const NamespaceString& cmdNSS,
+ const BSONObj& cmdObj) {
+ vector<Privilege> privileges;
+ ActionSet actionsOnCommandNSS;
+
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ actionsOnCommandNSS.addAction(ActionType::bypassDocumentValidation);
+ }
- if ( !cmdNSS.isSystemDotIndexes() ) {
- actionsOnCommandNSS.addAction(ActionType::insert);
+ if (cmdType == BatchedCommandRequest::BatchType_Insert) {
+ if (!cmdNSS.isSystemDotIndexes()) {
+ actionsOnCommandNSS.addAction(ActionType::insert);
+ } else {
+ // Special-case indexes until we have a command
+ string nsToIndex, errMsg;
+ if (!BatchedCommandRequest::getIndexedNS(cmdObj, &nsToIndex, &errMsg)) {
+ return Status(ErrorCodes::FailedToParse, errMsg);
}
- else {
- // Special-case indexes until we have a command
- string nsToIndex, errMsg;
- if ( !BatchedCommandRequest::getIndexedNS( cmdObj, &nsToIndex, &errMsg ) ) {
- return Status( ErrorCodes::FailedToParse, errMsg );
- }
- NamespaceString nssToIndex( nsToIndex );
- privileges.push_back( Privilege( ResourcePattern::forExactNamespace( nssToIndex ),
- ActionType::createIndex ) );
- }
+ NamespaceString nssToIndex(nsToIndex);
+ privileges.push_back(
+ Privilege(ResourcePattern::forExactNamespace(nssToIndex), ActionType::createIndex));
}
- else if ( cmdType == BatchedCommandRequest::BatchType_Update ) {
- actionsOnCommandNSS.addAction(ActionType::update);
+ } else if (cmdType == BatchedCommandRequest::BatchType_Update) {
+ actionsOnCommandNSS.addAction(ActionType::update);
- // Upsert also requires insert privs
- if ( BatchedCommandRequest::containsUpserts( cmdObj ) ) {
- actionsOnCommandNSS.addAction(ActionType::insert);
- }
- }
- else {
- fassert( 17251, cmdType == BatchedCommandRequest::BatchType_Delete );
- actionsOnCommandNSS.addAction(ActionType::remove);
- }
-
-
- if (!actionsOnCommandNSS.empty()) {
- privileges.emplace_back(ResourcePattern::forExactNamespace(cmdNSS),
- actionsOnCommandNSS);
+ // Upsert also requires insert privs
+ if (BatchedCommandRequest::containsUpserts(cmdObj)) {
+ actionsOnCommandNSS.addAction(ActionType::insert);
}
+ } else {
+ fassert(17251, cmdType == BatchedCommandRequest::BatchType_Delete);
+ actionsOnCommandNSS.addAction(ActionType::remove);
+ }
- if ( authzSession->isAuthorizedForPrivileges( privileges ) )
- return Status::OK();
- return Status( ErrorCodes::Unauthorized, "unauthorized" );
+ if (!actionsOnCommandNSS.empty()) {
+ privileges.emplace_back(ResourcePattern::forExactNamespace(cmdNSS), actionsOnCommandNSS);
}
+ if (authzSession->isAuthorizedForPrivileges(privileges))
+ return Status::OK();
+
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+}
}
}
diff --git a/src/mongo/db/commands/write_commands/write_commands_common.h b/src/mongo/db/commands/write_commands/write_commands_common.h
index a1fe6bc9772..cf47bdc02b1 100644
--- a/src/mongo/db/commands/write_commands/write_commands_common.h
+++ b/src/mongo/db/commands/write_commands/write_commands_common.h
@@ -40,10 +40,9 @@
namespace mongo {
namespace auth {
- Status checkAuthForWriteCommand( AuthorizationSession* authzSession,
- BatchedCommandRequest::BatchType cmdType,
- const NamespaceString& cmdNSS,
- const BSONObj& cmdObj );
-
+Status checkAuthForWriteCommand(AuthorizationSession* authzSession,
+ BatchedCommandRequest::BatchType cmdType,
+ const NamespaceString& cmdNSS,
+ const BSONObj& cmdObj);
}
}
diff --git a/src/mongo/db/commands/writeback_compatibility_shim.cpp b/src/mongo/db/commands/writeback_compatibility_shim.cpp
index 99feccfad58..b03cf3b21dc 100644
--- a/src/mongo/db/commands/writeback_compatibility_shim.cpp
+++ b/src/mongo/db/commands/writeback_compatibility_shim.cpp
@@ -43,85 +43,85 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- using mongoutils::str::stream;
-
- /**
- * This command is required in v3.0 mongod to prevent v2.6 mongos from entering a tight loop and
- * spamming the server with invalid writebacklisten requests. This command reports an error
- * and pauses, which is safe because the original v2.6 WBL command was a long-poll (30s).
- */
- class WriteBackCommand : public Command {
- public:
- WriteBackCommand() : Command("writebacklisten") {}
-
- void help(stringstream& helpOut) const {
- helpOut << "v3.0 disallowed internal command, present for compatibility only";
- }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- //
- // Same as v2.6 settings
- //
-
- virtual bool adminOnly() const { return true; }
- virtual bool slaveOk() const { return true; }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::internal);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
-
- virtual bool run(OperationContext* opCtx,
- const string&,
- BSONObj&,
- int,
- string&,
- BSONObjBuilder& result) {
-
- string errMsg = stream()
- << "Writeback functionality is no longer present in v3.0 mongod, "
- << "a v2.6 mongos may be running in the v3.0 cluster at "
- << opCtx->getClient()->clientAddress(false);
-
- error() << errMsg;
-
- // Prevent v2.6 mongos from spamming writebacklisten retries
- const int kSleepSecsBeforeMessage = 5;
- sleepsecs(kSleepSecsBeforeMessage);
-
- return appendCommandStatus(result, Status(ErrorCodes::CommandNotFound, errMsg));
- }
- };
-
- /**
- * The "writeBacksQueued" field is required in ServerStatus output to avoid v2.6 mongos crashing
- * confusingly when upgrading a cluster.
- */
- class WriteBacksQueuedSSM : public ServerStatusMetric {
- public:
- WriteBacksQueuedSSM() : ServerStatusMetric(".writeBacksQueued") {}
-
- virtual void appendAtLeaf(BSONObjBuilder& b) const {
- // always append false, we don't queue writebacks
- b.appendBool(_leafName, false);
- }
- };
-
- namespace {
- MONGO_INITIALIZER(RegisterWriteBackShim)(InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed.
- new WriteBackCommand();
- // Leaked intentionally: a SSM registers itself when constructed.
- new WriteBacksQueuedSSM();
- return Status::OK();
- }
+using std::string;
+using std::stringstream;
+
+using mongoutils::str::stream;
+
+/**
+ * This command is required in v3.0 mongod to prevent v2.6 mongos from entering a tight loop and
+ * spamming the server with invalid writebacklisten requests. This command reports an error
+ * and pauses, which is safe because the original v2.6 WBL command was a long-poll (30s).
+ */
+class WriteBackCommand : public Command {
+public:
+ WriteBackCommand() : Command("writebacklisten") {}
+
+ void help(stringstream& helpOut) const {
+ helpOut << "v3.0 disallowed internal command, present for compatibility only";
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ //
+ // Same as v2.6 settings
+ //
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return true;
}
-} // namespace
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::internal);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+
+ virtual bool run(
+ OperationContext* opCtx, const string&, BSONObj&, int, string&, BSONObjBuilder& result) {
+ string errMsg = stream() << "Writeback functionality is no longer present in v3.0 mongod, "
+ << "a v2.6 mongos may be running in the v3.0 cluster at "
+ << opCtx->getClient()->clientAddress(false);
+
+ error() << errMsg;
+
+ // Prevent v2.6 mongos from spamming writebacklisten retries
+ const int kSleepSecsBeforeMessage = 5;
+ sleepsecs(kSleepSecsBeforeMessage);
+
+ return appendCommandStatus(result, Status(ErrorCodes::CommandNotFound, errMsg));
+ }
+};
+
+/**
+ * The "writeBacksQueued" field is required in ServerStatus output to avoid v2.6 mongos crashing
+ * confusingly when upgrading a cluster.
+ */
+class WriteBacksQueuedSSM : public ServerStatusMetric {
+public:
+ WriteBacksQueuedSSM() : ServerStatusMetric(".writeBacksQueued") {}
+
+ virtual void appendAtLeaf(BSONObjBuilder& b) const {
+ // always append false, we don't queue writebacks
+ b.appendBool(_leafName, false);
+ }
+};
+
+namespace {
+MONGO_INITIALIZER(RegisterWriteBackShim)(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new WriteBackCommand();
+ // Leaked intentionally: a SSM registers itself when constructed.
+ new WriteBacksQueuedSSM();
+ return Status::OK();
+}
+}
+
+} // namespace
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 1851e84dcc2..ae0526bb535 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -42,184 +42,168 @@
namespace mongo {
namespace {
- // SERVER-14668: Remove or invert sense once MMAPv1 CLL can be default
- MONGO_EXPORT_STARTUP_SERVER_PARAMETER(enableCollectionLocking, bool, true);
-} // namespace
+// SERVER-14668: Remove or invert sense once MMAPv1 CLL can be default
+MONGO_EXPORT_STARTUP_SERVER_PARAMETER(enableCollectionLocking, bool, true);
+} // namespace
- Lock::TempRelease::TempRelease(Locker* lockState)
- : _lockState(lockState),
- _lockSnapshot(),
- _locksReleased(_lockState->saveLockStateAndUnlock(&_lockSnapshot)) {
+Lock::TempRelease::TempRelease(Locker* lockState)
+ : _lockState(lockState),
+ _lockSnapshot(),
+ _locksReleased(_lockState->saveLockStateAndUnlock(&_lockSnapshot)) {}
+Lock::TempRelease::~TempRelease() {
+ if (_locksReleased) {
+ invariant(!_lockState->isLocked());
+ _lockState->restoreLockState(_lockSnapshot);
}
+}
- Lock::TempRelease::~TempRelease() {
- if (_locksReleased) {
- invariant(!_lockState->isLocked());
- _lockState->restoreLockState(_lockSnapshot);
- }
- }
-
- Lock::GlobalLock::GlobalLock(Locker* locker)
- : _locker(locker),
- _result(LOCK_INVALID),
- _pbwm(locker, resourceIdParallelBatchWriterMode) { }
-
- Lock::GlobalLock::GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs)
- : _locker(locker),
- _result(LOCK_INVALID),
- _pbwm(locker, resourceIdParallelBatchWriterMode) {
- _lock(lockMode, timeoutMs);
- }
-
+Lock::GlobalLock::GlobalLock(Locker* locker)
+ : _locker(locker), _result(LOCK_INVALID), _pbwm(locker, resourceIdParallelBatchWriterMode) {}
+Lock::GlobalLock::GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs)
+ : _locker(locker), _result(LOCK_INVALID), _pbwm(locker, resourceIdParallelBatchWriterMode) {
+ _lock(lockMode, timeoutMs);
+}
- void Lock::GlobalLock::_lock(LockMode lockMode, unsigned timeoutMs) {
- if (!_locker->isBatchWriter()) {
- _pbwm.lock(MODE_IS);
- }
- _result = _locker->lockGlobalBegin(lockMode);
- if (_result == LOCK_WAITING) {
- _result = _locker->lockGlobalComplete(timeoutMs);
- }
-
- if (_result != LOCK_OK && !_locker->isBatchWriter()) {
- _pbwm.unlock();
- }
+void Lock::GlobalLock::_lock(LockMode lockMode, unsigned timeoutMs) {
+ if (!_locker->isBatchWriter()) {
+ _pbwm.lock(MODE_IS);
}
- void Lock::GlobalLock::_unlock() {
- if (isLocked()) {
- _locker->unlockAll();
- _result = LOCK_INVALID;
- }
+ _result = _locker->lockGlobalBegin(lockMode);
+ if (_result == LOCK_WAITING) {
+ _result = _locker->lockGlobalComplete(timeoutMs);
}
+ if (_result != LOCK_OK && !_locker->isBatchWriter()) {
+ _pbwm.unlock();
+ }
+}
- Lock::DBLock::DBLock(Locker* locker, StringData db, LockMode mode)
- : _id(RESOURCE_DATABASE, db),
- _locker(locker),
- _mode(mode),
- _globalLock(locker, isSharedLockMode(_mode) ? MODE_IS : MODE_IX, UINT_MAX) {
+void Lock::GlobalLock::_unlock() {
+ if (isLocked()) {
+ _locker->unlockAll();
+ _result = LOCK_INVALID;
+ }
+}
- massert(28539, "need a valid database name", !db.empty() && nsIsDbOnly(db));
- // Need to acquire the flush lock
- _locker->lockMMAPV1Flush();
+Lock::DBLock::DBLock(Locker* locker, StringData db, LockMode mode)
+ : _id(RESOURCE_DATABASE, db),
+ _locker(locker),
+ _mode(mode),
+ _globalLock(locker, isSharedLockMode(_mode) ? MODE_IS : MODE_IX, UINT_MAX) {
+ massert(28539, "need a valid database name", !db.empty() && nsIsDbOnly(db));
- if (supportsDocLocking() || enableCollectionLocking) {
- // The check for the admin db is to ensure direct writes to auth collections
- // are serialized (see SERVER-16092).
- if ((_id == resourceIdAdminDB) && !isSharedLockMode(_mode)) {
- _mode = MODE_X;
- }
+ // Need to acquire the flush lock
+ _locker->lockMMAPV1Flush();
- invariant(LOCK_OK == _locker->lock(_id, _mode));
- }
- else {
- invariant(LOCK_OK == _locker->lock(_id, isSharedLockMode(_mode) ? MODE_S : MODE_X));
+ if (supportsDocLocking() || enableCollectionLocking) {
+ // The check for the admin db is to ensure direct writes to auth collections
+ // are serialized (see SERVER-16092).
+ if ((_id == resourceIdAdminDB) && !isSharedLockMode(_mode)) {
+ _mode = MODE_X;
}
- }
- Lock::DBLock::~DBLock() {
- _locker->unlock(_id);
+ invariant(LOCK_OK == _locker->lock(_id, _mode));
+ } else {
+ invariant(LOCK_OK == _locker->lock(_id, isSharedLockMode(_mode) ? MODE_S : MODE_X));
}
+}
- void Lock::DBLock::relockWithMode(LockMode newMode) {
- // 2PL would delay the unlocking
- invariant(!_locker->inAWriteUnitOfWork());
+Lock::DBLock::~DBLock() {
+ _locker->unlock(_id);
+}
- // Not allowed to change global intent
- invariant(!isSharedLockMode(_mode) || isSharedLockMode(newMode));
+void Lock::DBLock::relockWithMode(LockMode newMode) {
+ // 2PL would delay the unlocking
+ invariant(!_locker->inAWriteUnitOfWork());
- _locker->unlock(_id);
- _mode = newMode;
+ // Not allowed to change global intent
+ invariant(!isSharedLockMode(_mode) || isSharedLockMode(newMode));
- if (supportsDocLocking() || enableCollectionLocking) {
- invariant(LOCK_OK == _locker->lock(_id, _mode));
- }
- else {
- invariant(LOCK_OK == _locker->lock(_id, isSharedLockMode(_mode) ? MODE_S : MODE_X));
- }
- }
+ _locker->unlock(_id);
+ _mode = newMode;
+ if (supportsDocLocking() || enableCollectionLocking) {
+ invariant(LOCK_OK == _locker->lock(_id, _mode));
+ } else {
+ invariant(LOCK_OK == _locker->lock(_id, isSharedLockMode(_mode) ? MODE_S : MODE_X));
+ }
+}
- Lock::CollectionLock::CollectionLock(Locker* lockState,
- StringData ns,
- LockMode mode)
- : _id(RESOURCE_COLLECTION, ns),
- _lockState(lockState) {
- massert(28538, "need a non-empty collection name", nsIsFull(ns));
+Lock::CollectionLock::CollectionLock(Locker* lockState, StringData ns, LockMode mode)
+ : _id(RESOURCE_COLLECTION, ns), _lockState(lockState) {
+ massert(28538, "need a non-empty collection name", nsIsFull(ns));
- dassert(_lockState->isDbLockedForMode(nsToDatabaseSubstring(ns),
- isSharedLockMode(mode) ? MODE_IS : MODE_IX));
- if (supportsDocLocking()) {
- _lockState->lock(_id, mode);
- }
- else if (enableCollectionLocking) {
- _lockState->lock(_id, isSharedLockMode(mode) ? MODE_S : MODE_X);
- }
+ dassert(_lockState->isDbLockedForMode(nsToDatabaseSubstring(ns),
+ isSharedLockMode(mode) ? MODE_IS : MODE_IX));
+ if (supportsDocLocking()) {
+ _lockState->lock(_id, mode);
+ } else if (enableCollectionLocking) {
+ _lockState->lock(_id, isSharedLockMode(mode) ? MODE_S : MODE_X);
}
+}
- Lock::CollectionLock::~CollectionLock() {
- if (supportsDocLocking() || enableCollectionLocking) {
- _lockState->unlock(_id);
- }
+Lock::CollectionLock::~CollectionLock() {
+ if (supportsDocLocking() || enableCollectionLocking) {
+ _lockState->unlock(_id);
}
+}
- void Lock::CollectionLock::relockAsDatabaseExclusive(Lock::DBLock& dbLock) {
- if (supportsDocLocking() || enableCollectionLocking) {
- _lockState->unlock(_id);
- }
+void Lock::CollectionLock::relockAsDatabaseExclusive(Lock::DBLock& dbLock) {
+ if (supportsDocLocking() || enableCollectionLocking) {
+ _lockState->unlock(_id);
+ }
- dbLock.relockWithMode(MODE_X);
+ dbLock.relockWithMode(MODE_X);
- if (supportsDocLocking() || enableCollectionLocking) {
- // don't need the lock, but need something to unlock in the destructor
- _lockState->lock(_id, MODE_IX);
- }
+ if (supportsDocLocking() || enableCollectionLocking) {
+ // don't need the lock, but need something to unlock in the destructor
+ _lockState->lock(_id, MODE_IX);
}
+}
namespace {
- stdx::mutex oplogSerialization; // for OplogIntentWriteLock
-} // namespace
+stdx::mutex oplogSerialization; // for OplogIntentWriteLock
+} // namespace
- Lock::OplogIntentWriteLock::OplogIntentWriteLock(Locker* lockState)
- : _lockState(lockState),
- _serialized(false) {
- _lockState->lock(resourceIdOplog, MODE_IX);
- }
+Lock::OplogIntentWriteLock::OplogIntentWriteLock(Locker* lockState)
+ : _lockState(lockState), _serialized(false) {
+ _lockState->lock(resourceIdOplog, MODE_IX);
+}
- Lock::OplogIntentWriteLock::~OplogIntentWriteLock() {
- if (_serialized) {
- oplogSerialization.unlock();
- }
- _lockState->unlock(resourceIdOplog);
+Lock::OplogIntentWriteLock::~OplogIntentWriteLock() {
+ if (_serialized) {
+ oplogSerialization.unlock();
}
+ _lockState->unlock(resourceIdOplog);
+}
- void Lock::OplogIntentWriteLock::serializeIfNeeded() {
- if (!supportsDocLocking() && !_serialized) {
- oplogSerialization.lock();
- _serialized = true;
- }
+void Lock::OplogIntentWriteLock::serializeIfNeeded() {
+ if (!supportsDocLocking() && !_serialized) {
+ oplogSerialization.lock();
+ _serialized = true;
}
+}
- Lock::ParallelBatchWriterMode::ParallelBatchWriterMode(Locker* lockState)
- : _pbwm(lockState, resourceIdParallelBatchWriterMode, MODE_X) { }
+Lock::ParallelBatchWriterMode::ParallelBatchWriterMode(Locker* lockState)
+ : _pbwm(lockState, resourceIdParallelBatchWriterMode, MODE_X) {}
- void Lock::ResourceLock::lock(LockMode mode) {
- invariant(_result == LOCK_INVALID);
- _result = _locker->lock(_rid, mode);
- invariant(_result == LOCK_OK);
- }
+void Lock::ResourceLock::lock(LockMode mode) {
+ invariant(_result == LOCK_INVALID);
+ _result = _locker->lock(_rid, mode);
+ invariant(_result == LOCK_OK);
+}
- void Lock::ResourceLock::unlock() {
- if (_result == LOCK_OK) {
- _locker->unlock(_rid);
- _result = LOCK_INVALID;
- }
+void Lock::ResourceLock::unlock() {
+ if (_result == LOCK_OK) {
+ _locker->unlock(_rid);
+ _result = LOCK_INVALID;
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h
index f21d356994e..b95b7b46fec 100644
--- a/src/mongo/db/concurrency/d_concurrency.h
+++ b/src/mongo/db/concurrency/d_concurrency.h
@@ -28,257 +28,256 @@
#pragma once
-#include <climits> // For UINT_MAX
+#include <climits> // For UINT_MAX
#include "mongo/db/concurrency/locker.h"
#include "mongo/util/timer.h"
namespace mongo {
- class StringData;
+class StringData;
+
+class Lock {
+public:
+ /**
+ * NOTE: DO NOT add any new usages of TempRelease. It is being deprecated/removed.
+ */
+ class TempRelease {
+ MONGO_DISALLOW_COPYING(TempRelease);
- class Lock {
public:
+ explicit TempRelease(Locker* lockState);
+ ~TempRelease();
- /**
- * NOTE: DO NOT add any new usages of TempRelease. It is being deprecated/removed.
- */
- class TempRelease {
- MONGO_DISALLOW_COPYING(TempRelease);
- public:
- explicit TempRelease(Locker* lockState);
- ~TempRelease();
+ private:
+ // Not owned
+ Locker* const _lockState;
- private:
- // Not owned
- Locker* const _lockState;
+ // If _locksReleased is true, this stores the persisted lock information to be restored
+ // in the destructor. Otherwise it is empty.
+ Locker::LockSnapshot _lockSnapshot;
- // If _locksReleased is true, this stores the persisted lock information to be restored
- // in the destructor. Otherwise it is empty.
- Locker::LockSnapshot _lockSnapshot;
+ // False if locks could not be released because of recursive locking
+ const bool _locksReleased;
+ };
- // False if locks could not be released because of recursive locking
- const bool _locksReleased;
- };
+ /**
+ * General purpose RAII wrapper for a resource managed by the lock manager
+ *
+ * See LockMode for the supported modes. Unlike DBLock/Collection lock, this will not do
+ * any additional checks/upgrades or global locking. Use ResourceLock for locking
+ * resources other than RESOURCE_GLOBAL, RESOURCE_DATABASE and RESOURCE_COLLECTION.
+ */
+ class ResourceLock {
+ MONGO_DISALLOW_COPYING(ResourceLock);
- /**
- * General purpose RAII wrapper for a resource managed by the lock manager
- *
- * See LockMode for the supported modes. Unlike DBLock/Collection lock, this will not do
- * any additional checks/upgrades or global locking. Use ResourceLock for locking
- * resources other than RESOURCE_GLOBAL, RESOURCE_DATABASE and RESOURCE_COLLECTION.
- */
- class ResourceLock {
- MONGO_DISALLOW_COPYING(ResourceLock);
-
- public:
- ResourceLock(Locker* locker, ResourceId rid)
- : _rid(rid),
- _locker(locker),
- _result(LOCK_INVALID) {
- }
-
- ResourceLock(Locker* locker, ResourceId rid, LockMode mode)
- : _rid(rid),
- _locker(locker),
- _result(LOCK_INVALID) {
- lock(mode);
- }
+ public:
+ ResourceLock(Locker* locker, ResourceId rid)
+ : _rid(rid), _locker(locker), _result(LOCK_INVALID) {}
- ~ResourceLock() {
- unlock();
- }
+ ResourceLock(Locker* locker, ResourceId rid, LockMode mode)
+ : _rid(rid), _locker(locker), _result(LOCK_INVALID) {
+ lock(mode);
+ }
- void lock(LockMode mode);
- void unlock();
+ ~ResourceLock() {
+ unlock();
+ }
- bool isLocked() const { return _result == LOCK_OK; }
+ void lock(LockMode mode);
+ void unlock();
- private:
- const ResourceId _rid;
- Locker* const _locker;
+ bool isLocked() const {
+ return _result == LOCK_OK;
+ }
- LockResult _result;
- };
+ private:
+ const ResourceId _rid;
+ Locker* const _locker;
+ LockResult _result;
+ };
- /**
- * Global lock.
- *
- * Grabs global resource lock. Allows further (recursive) acquisition of the global lock
- * in any mode, see LockMode.
- * NOTE: Does not acquire flush lock.
- */
- class GlobalLock {
- public:
- explicit GlobalLock(Locker* locker);
- GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs);
- ~GlobalLock() {
- _unlock();
- }
+ /**
+ * Global lock.
+ *
+ * Grabs global resource lock. Allows further (recursive) acquisition of the global lock
+ * in any mode, see LockMode.
+ * NOTE: Does not acquire flush lock.
+ */
+ class GlobalLock {
+ public:
+ explicit GlobalLock(Locker* locker);
+ GlobalLock(Locker* locker, LockMode lockMode, unsigned timeoutMs);
- bool isLocked() const { return _result == LOCK_OK; }
+ ~GlobalLock() {
+ _unlock();
+ }
- private:
+ bool isLocked() const {
+ return _result == LOCK_OK;
+ }
- void _lock(LockMode lockMode, unsigned timeoutMs);
- void _unlock();
+ private:
+ void _lock(LockMode lockMode, unsigned timeoutMs);
+ void _unlock();
- Locker* const _locker;
- LockResult _result;
- ResourceLock _pbwm;
- };
+ Locker* const _locker;
+ LockResult _result;
+ ResourceLock _pbwm;
+ };
- /**
- * Global exclusive lock
- *
- * Allows exclusive write access to all databases and collections, blocking all other
- * access. Allows further (recursive) acquisition of the global lock in any mode,
- * see LockMode.
- */
- class GlobalWrite : public GlobalLock {
- public:
- explicit GlobalWrite(Locker* locker, unsigned timeoutMs = UINT_MAX)
- : GlobalLock(locker, MODE_X, timeoutMs) {
-
- if (isLocked()) {
- locker->lockMMAPV1Flush();
- }
+ /**
+ * Global exclusive lock
+ *
+ * Allows exclusive write access to all databases and collections, blocking all other
+ * access. Allows further (recursive) acquisition of the global lock in any mode,
+ * see LockMode.
+ */
+ class GlobalWrite : public GlobalLock {
+ public:
+ explicit GlobalWrite(Locker* locker, unsigned timeoutMs = UINT_MAX)
+ : GlobalLock(locker, MODE_X, timeoutMs) {
+ if (isLocked()) {
+ locker->lockMMAPV1Flush();
}
- };
+ }
+ };
- /**
- * Global shared lock
- *
- * Allows concurrent read access to all databases and collections, blocking any writers.
- * Allows further (recursive) acquisition of the global lock in shared (S) or intent-shared
- * (IS) mode, see LockMode.
- */
- class GlobalRead : public GlobalLock {
- public:
- explicit GlobalRead(Locker* locker, unsigned timeoutMs = UINT_MAX)
- : GlobalLock(locker, MODE_S, timeoutMs) {
-
- if (isLocked()) {
- locker->lockMMAPV1Flush();
- }
+ /**
+ * Global shared lock
+ *
+ * Allows concurrent read access to all databases and collections, blocking any writers.
+ * Allows further (recursive) acquisition of the global lock in shared (S) or intent-shared
+ * (IS) mode, see LockMode.
+ */
+ class GlobalRead : public GlobalLock {
+ public:
+ explicit GlobalRead(Locker* locker, unsigned timeoutMs = UINT_MAX)
+ : GlobalLock(locker, MODE_S, timeoutMs) {
+ if (isLocked()) {
+ locker->lockMMAPV1Flush();
}
- };
+ }
+ };
+
+ /**
+ * Database lock with support for collection- and document-level locking
+ *
+ * This lock supports four modes (see Lock_Mode):
+ * MODE_IS: concurrent database access, requiring further collection read locks
+ * MODE_IX: concurrent database access, requiring further collection read or write locks
+ * MODE_S: shared read access to the database, blocking any writers
+ * MODE_X: exclusive access to the database, blocking all other readers and writers
+ *
+ * For MODE_IS or MODE_S also acquires global lock in intent-shared (IS) mode, and
+ * for MODE_IX or MODE_X also acquires global lock in intent-exclusive (IX) mode.
+ * For storage engines that do not support collection-level locking, MODE_IS will be
+ * upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
+ */
+ class DBLock {
+ public:
+ DBLock(Locker* locker, StringData db, LockMode mode);
+ ~DBLock();
/**
- * Database lock with support for collection- and document-level locking
- *
- * This lock supports four modes (see Lock_Mode):
- * MODE_IS: concurrent database access, requiring further collection read locks
- * MODE_IX: concurrent database access, requiring further collection read or write locks
- * MODE_S: shared read access to the database, blocking any writers
- * MODE_X: exclusive access to the database, blocking all other readers and writers
- *
- * For MODE_IS or MODE_S also acquires global lock in intent-shared (IS) mode, and
- * for MODE_IX or MODE_X also acquires global lock in intent-exclusive (IX) mode.
- * For storage engines that do not support collection-level locking, MODE_IS will be
- * upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
+ * Releases the DBLock and reacquires it with the new mode. The global intent
+ * lock is retained (so the database can't disappear). Relocking from MODE_IS or
+ * MODE_S to MODE_IX or MODE_X is not allowed to avoid violating the global intent.
+ * Use relockWithMode() instead of upgrading to avoid deadlock.
*/
- class DBLock {
- public:
- DBLock(Locker* locker, StringData db, LockMode mode);
- ~DBLock();
+ void relockWithMode(LockMode newMode);
- /**
- * Releases the DBLock and reacquires it with the new mode. The global intent
- * lock is retained (so the database can't disappear). Relocking from MODE_IS or
- * MODE_S to MODE_IX or MODE_X is not allowed to avoid violating the global intent.
- * Use relockWithMode() instead of upgrading to avoid deadlock.
- */
- void relockWithMode(LockMode newMode);
+ private:
+ const ResourceId _id;
+ Locker* const _locker;
- private:
- const ResourceId _id;
- Locker* const _locker;
+ // May be changed through relockWithMode. The global lock mode won't change though,
+ // because we never change from IS/S to IX/X or vice versa, just convert locks from
+ // IX -> X.
+ LockMode _mode;
- // May be changed through relockWithMode. The global lock mode won't change though,
- // because we never change from IS/S to IX/X or vice versa, just convert locks from
- // IX -> X.
- LockMode _mode;
+ // Acquires the global lock on our behalf.
+ GlobalLock _globalLock;
+ };
- // Acquires the global lock on our behalf.
- GlobalLock _globalLock;
- };
+ /**
+ * Collection lock with support for document-level locking
+ *
+ * This lock supports four modes (see Lock_Mode):
+ * MODE_IS: concurrent collection access, requiring document level locking read locks
+ * MODE_IX: concurrent collection access, requiring document level read or write locks
+ * MODE_S: shared read access to the collection, blocking any writers
+ * MODE_X: exclusive access to the collection, blocking all other readers and writers
+ *
+ * An appropriate DBLock must already be held before locking a collection: it is an error,
+ * checked with a dassert(), to not have a suitable database lock before locking the
+ * collection. For storage engines that do not support document-level locking, MODE_IS
+ * will be upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
+ */
+ class CollectionLock {
+ MONGO_DISALLOW_COPYING(CollectionLock);
- /**
- * Collection lock with support for document-level locking
- *
- * This lock supports four modes (see Lock_Mode):
- * MODE_IS: concurrent collection access, requiring document level locking read locks
- * MODE_IX: concurrent collection access, requiring document level read or write locks
- * MODE_S: shared read access to the collection, blocking any writers
- * MODE_X: exclusive access to the collection, blocking all other readers and writers
- *
- * An appropriate DBLock must already be held before locking a collection: it is an error,
- * checked with a dassert(), to not have a suitable database lock before locking the
- * collection. For storage engines that do not support document-level locking, MODE_IS
- * will be upgraded to MODE_S and MODE_IX will be upgraded to MODE_X.
- */
- class CollectionLock {
- MONGO_DISALLOW_COPYING(CollectionLock);
- public:
- CollectionLock(Locker* lockState, StringData ns, LockMode mode);
- ~CollectionLock();
-
- /**
- * When holding the collection in MODE_IX or MODE_X, calling this will release the
- * collection and database locks, and relocks the database in MODE_X. This is typically
- * used if the collection still needs to be created. Upgrading would not be safe as
- * it could lead to deadlock, similarly for relocking the database without releasing
- * the collection lock. The collection lock will also be reacquired even though it is
- * not really needed, as it simplifies invariant checking: the CollectionLock class
- * has as invariant that a collection lock is being held.
- */
- void relockAsDatabaseExclusive(Lock::DBLock& dbLock);
-
- private:
- const ResourceId _id;
- Locker* const _lockState;
- };
+ public:
+ CollectionLock(Locker* lockState, StringData ns, LockMode mode);
+ ~CollectionLock();
/**
- * Like the CollectionLock, but optimized for the local oplog. Always locks in MODE_IX,
- * must call serializeIfNeeded() before doing any concurrent operations in order to
- * support storage engines without document level locking. It is an error, checked with a
- * dassert(), to not have a suitable database lock when taking this lock.
+ * When holding the collection in MODE_IX or MODE_X, calling this will release the
+ * collection and database locks, and relocks the database in MODE_X. This is typically
+ * used if the collection still needs to be created. Upgrading would not be safe as
+ * it could lead to deadlock, similarly for relocking the database without releasing
+ * the collection lock. The collection lock will also be reacquired even though it is
+ * not really needed, as it simplifies invariant checking: the CollectionLock class
+ * has as invariant that a collection lock is being held.
*/
- class OplogIntentWriteLock {
- MONGO_DISALLOW_COPYING(OplogIntentWriteLock);
- public:
- explicit OplogIntentWriteLock(Locker* lockState);
- ~OplogIntentWriteLock();
- void serializeIfNeeded();
- private:
- Locker* const _lockState;
- bool _serialized;
- };
+ void relockAsDatabaseExclusive(Lock::DBLock& dbLock);
+ private:
+ const ResourceId _id;
+ Locker* const _lockState;
+ };
- /**
- * Turn on "parallel batch writer mode" by locking the global ParallelBatchWriterMode
- * resource in exclusive mode. This mode is off by default.
- * Note that only one thread creates a ParallelBatchWriterMode object; the other batch
- * writers just call setIsBatchWriter().
- */
- class ParallelBatchWriterMode {
- MONGO_DISALLOW_COPYING(ParallelBatchWriterMode);
+ /**
+ * Like the CollectionLock, but optimized for the local oplog. Always locks in MODE_IX,
+ * must call serializeIfNeeded() before doing any concurrent operations in order to
+ * support storage engines without document level locking. It is an error, checked with a
+ * dassert(), to not have a suitable database lock when taking this lock.
+ */
+ class OplogIntentWriteLock {
+ MONGO_DISALLOW_COPYING(OplogIntentWriteLock);
+
+ public:
+ explicit OplogIntentWriteLock(Locker* lockState);
+ ~OplogIntentWriteLock();
+ void serializeIfNeeded();
- public:
- explicit ParallelBatchWriterMode(Locker* lockState);
+ private:
+ Locker* const _lockState;
+ bool _serialized;
+ };
+
+
+ /**
+ * Turn on "parallel batch writer mode" by locking the global ParallelBatchWriterMode
+ * resource in exclusive mode. This mode is off by default.
+ * Note that only one thread creates a ParallelBatchWriterMode object; the other batch
+ * writers just call setIsBatchWriter().
+ */
+ class ParallelBatchWriterMode {
+ MONGO_DISALLOW_COPYING(ParallelBatchWriterMode);
+
+ public:
+ explicit ParallelBatchWriterMode(Locker* lockState);
- private:
- ResourceLock _pbwm;
- };
+ private:
+ ResourceLock _pbwm;
};
+};
}
diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp
index 245424cdf39..855a17d99c6 100644
--- a/src/mongo/db/concurrency/d_concurrency_test.cpp
+++ b/src/mongo/db/concurrency/d_concurrency_test.cpp
@@ -36,274 +36,274 @@
namespace mongo {
- using std::string;
+using std::string;
- TEST(DConcurrency, GlobalRead) {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead globalRead(&ls);
- ASSERT(ls.isR());
- }
-
- TEST(DConcurrency, GlobalWrite) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
- ASSERT(ls.isW());
- }
+TEST(DConcurrency, GlobalRead) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalRead globalRead(&ls);
+ ASSERT(ls.isR());
+}
- TEST(DConcurrency, GlobalWriteAndGlobalRead) {
- MMAPV1LockerImpl ls;
+TEST(DConcurrency, GlobalWrite) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalWrite globalWrite(&ls);
+ ASSERT(ls.isW());
+}
- Lock::GlobalWrite globalWrite(&ls);
- ASSERT(ls.isW());
+TEST(DConcurrency, GlobalWriteAndGlobalRead) {
+ MMAPV1LockerImpl ls;
- {
- Lock::GlobalRead globalRead(&ls);
- ASSERT(ls.isW());
- }
+ Lock::GlobalWrite globalWrite(&ls);
+ ASSERT(ls.isW());
+ {
+ Lock::GlobalRead globalRead(&ls);
ASSERT(ls.isW());
}
- TEST(DConcurrency, GlobalLockS_Timeout) {
- MMAPV1LockerImpl ls;
- Lock::GlobalLock globalWrite(&ls, MODE_X, 0);
- ASSERT(globalWrite.isLocked());
-
- {
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
- ASSERT(!globalReadTry.isLocked());
- }
- }
-
- TEST(DConcurrency, GlobalLockX_Timeout) {
- MMAPV1LockerImpl ls;
- Lock::GlobalLock globalWrite(&ls, MODE_X, 0);
- ASSERT(globalWrite.isLocked());
-
- {
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
- ASSERT(!globalWriteTry.isLocked());
- }
- }
+ ASSERT(ls.isW());
+}
- TEST(DConcurrency, GlobalLockS_NoTimeoutDueToGlobalLockS) {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead globalRead(&ls);
+TEST(DConcurrency, GlobalLockS_Timeout) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalLock globalWrite(&ls, MODE_X, 0);
+ ASSERT(globalWrite.isLocked());
+ {
MMAPV1LockerImpl lsTry;
Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
-
- ASSERT(globalReadTry.isLocked());
+ ASSERT(!globalReadTry.isLocked());
}
+}
- TEST(DConcurrency, GlobalLockX_TimeoutDueToGlobalLockS) {
- MMAPV1LockerImpl ls;
- Lock::GlobalRead globalRead(&ls);
+TEST(DConcurrency, GlobalLockX_Timeout) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalLock globalWrite(&ls, MODE_X, 0);
+ ASSERT(globalWrite.isLocked());
+ {
MMAPV1LockerImpl lsTry;
Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
-
ASSERT(!globalWriteTry.isLocked());
}
+}
- TEST(DConcurrency, GlobalLockS_TimeoutDueToGlobalLockX) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
+TEST(DConcurrency, GlobalLockS_NoTimeoutDueToGlobalLockS) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalRead globalRead(&ls);
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
+ MMAPV1LockerImpl lsTry;
+ Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
- ASSERT(!globalReadTry.isLocked());
- }
+ ASSERT(globalReadTry.isLocked());
+}
- TEST(DConcurrency, GlobalLockX_TimeoutDueToGlobalLockX) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
+TEST(DConcurrency, GlobalLockX_TimeoutDueToGlobalLockS) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalRead globalRead(&ls);
- MMAPV1LockerImpl lsTry;
- Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
+ MMAPV1LockerImpl lsTry;
+ Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
- ASSERT(!globalWriteTry.isLocked());
- }
+ ASSERT(!globalWriteTry.isLocked());
+}
- TEST(DConcurrency, TempReleaseGlobalWrite) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
+TEST(DConcurrency, GlobalLockS_TimeoutDueToGlobalLockX) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalWrite globalWrite(&ls);
- {
- Lock::TempRelease tempRelease(&ls);
- ASSERT(!ls.isLocked());
- }
+ MMAPV1LockerImpl lsTry;
+ Lock::GlobalLock globalReadTry(&lsTry, MODE_S, 1);
- ASSERT(ls.isW());
+ ASSERT(!globalReadTry.isLocked());
+}
+
+TEST(DConcurrency, GlobalLockX_TimeoutDueToGlobalLockX) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalWrite globalWrite(&ls);
+
+ MMAPV1LockerImpl lsTry;
+ Lock::GlobalLock globalWriteTry(&lsTry, MODE_X, 1);
+
+ ASSERT(!globalWriteTry.isLocked());
+}
+
+TEST(DConcurrency, TempReleaseGlobalWrite) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalWrite globalWrite(&ls);
+
+ {
+ Lock::TempRelease tempRelease(&ls);
+ ASSERT(!ls.isLocked());
}
- TEST(DConcurrency, TempReleaseRecursive) {
- MMAPV1LockerImpl ls;
- Lock::GlobalWrite globalWrite(&ls);
- Lock::DBLock lk(&ls, "SomeDBName", MODE_X);
+ ASSERT(ls.isW());
+}
- {
- Lock::TempRelease tempRelease(&ls);
- ASSERT(ls.isW());
- ASSERT(ls.isDbLockedForMode("SomeDBName", MODE_X));
- }
+TEST(DConcurrency, TempReleaseRecursive) {
+ MMAPV1LockerImpl ls;
+ Lock::GlobalWrite globalWrite(&ls);
+ Lock::DBLock lk(&ls, "SomeDBName", MODE_X);
+ {
+ Lock::TempRelease tempRelease(&ls);
ASSERT(ls.isW());
+ ASSERT(ls.isDbLockedForMode("SomeDBName", MODE_X));
}
- TEST(DConcurrency, DBLockTakesS) {
- MMAPV1LockerImpl ls;
+ ASSERT(ls.isW());
+}
- Lock::DBLock dbRead(&ls, "db", MODE_S);
+TEST(DConcurrency, DBLockTakesS) {
+ MMAPV1LockerImpl ls;
- const ResourceId resIdDb(RESOURCE_DATABASE, string("db"));
- ASSERT(ls.getLockMode(resIdDb) == MODE_S);
- }
+ Lock::DBLock dbRead(&ls, "db", MODE_S);
- TEST(DConcurrency, DBLockTakesX) {
- MMAPV1LockerImpl ls;
+ const ResourceId resIdDb(RESOURCE_DATABASE, string("db"));
+ ASSERT(ls.getLockMode(resIdDb) == MODE_S);
+}
- Lock::DBLock dbWrite(&ls, "db", MODE_X);
+TEST(DConcurrency, DBLockTakesX) {
+ MMAPV1LockerImpl ls;
- const ResourceId resIdDb(RESOURCE_DATABASE, string("db"));
- ASSERT(ls.getLockMode(resIdDb) == MODE_X);
- }
+ Lock::DBLock dbWrite(&ls, "db", MODE_X);
- TEST(DConcurrency, DBLockTakesISForAdminIS) {
- DefaultLockerImpl ls;
+ const ResourceId resIdDb(RESOURCE_DATABASE, string("db"));
+ ASSERT(ls.getLockMode(resIdDb) == MODE_X);
+}
- Lock::DBLock dbRead(&ls, "admin", MODE_IS);
+TEST(DConcurrency, DBLockTakesISForAdminIS) {
+ DefaultLockerImpl ls;
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_IS);
- }
+ Lock::DBLock dbRead(&ls, "admin", MODE_IS);
- TEST(DConcurrency, DBLockTakesSForAdminS) {
- DefaultLockerImpl ls;
+ ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_IS);
+}
- Lock::DBLock dbRead(&ls, "admin", MODE_S);
+TEST(DConcurrency, DBLockTakesSForAdminS) {
+ DefaultLockerImpl ls;
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_S);
- }
+ Lock::DBLock dbRead(&ls, "admin", MODE_S);
- TEST(DConcurrency, DBLockTakesXForAdminIX) {
- DefaultLockerImpl ls;
+ ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_S);
+}
- Lock::DBLock dbWrite(&ls, "admin", MODE_IX);
+TEST(DConcurrency, DBLockTakesXForAdminIX) {
+ DefaultLockerImpl ls;
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_X);
- }
+ Lock::DBLock dbWrite(&ls, "admin", MODE_IX);
- TEST(DConcurrency, DBLockTakesXForAdminX) {
- DefaultLockerImpl ls;
+ ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_X);
+}
- Lock::DBLock dbWrite(&ls, "admin", MODE_X);
+TEST(DConcurrency, DBLockTakesXForAdminX) {
+ DefaultLockerImpl ls;
- ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_X);
- }
+ Lock::DBLock dbWrite(&ls, "admin", MODE_X);
- TEST(DConcurrency, MultipleWriteDBLocksOnSameThread) {
- MMAPV1LockerImpl ls;
+ ASSERT(ls.getLockMode(resourceIdAdminDB) == MODE_X);
+}
- Lock::DBLock r1(&ls, "db1", MODE_X);
- Lock::DBLock r2(&ls, "db1", MODE_X);
+TEST(DConcurrency, MultipleWriteDBLocksOnSameThread) {
+ MMAPV1LockerImpl ls;
- ASSERT(ls.isDbLockedForMode("db1", MODE_X));
- }
+ Lock::DBLock r1(&ls, "db1", MODE_X);
+ Lock::DBLock r2(&ls, "db1", MODE_X);
- TEST(DConcurrency, MultipleConflictingDBLocksOnSameThread) {
- MMAPV1LockerImpl ls;
+ ASSERT(ls.isDbLockedForMode("db1", MODE_X));
+}
- Lock::DBLock r1(&ls, "db1", MODE_X);
- Lock::DBLock r2(&ls, "db1", MODE_S);
+TEST(DConcurrency, MultipleConflictingDBLocksOnSameThread) {
+ MMAPV1LockerImpl ls;
- ASSERT(ls.isDbLockedForMode("db1", MODE_X));
- ASSERT(ls.isDbLockedForMode("db1", MODE_S));
- }
+ Lock::DBLock r1(&ls, "db1", MODE_X);
+ Lock::DBLock r2(&ls, "db1", MODE_S);
- TEST(DConcurrency, IsDbLockedForSMode) {
- const std::string dbName("db");
+ ASSERT(ls.isDbLockedForMode("db1", MODE_X));
+ ASSERT(ls.isDbLockedForMode("db1", MODE_S));
+}
- MMAPV1LockerImpl ls;
+TEST(DConcurrency, IsDbLockedForSMode) {
+ const std::string dbName("db");
- Lock::DBLock dbLock(&ls, dbName, MODE_S);
+ MMAPV1LockerImpl ls;
- ASSERT(ls.isDbLockedForMode(dbName, MODE_IS));
- ASSERT(!ls.isDbLockedForMode(dbName, MODE_IX));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_S));
- ASSERT(!ls.isDbLockedForMode(dbName, MODE_X));
- }
+ Lock::DBLock dbLock(&ls, dbName, MODE_S);
- TEST(DConcurrency, IsDbLockedForXMode) {
- const std::string dbName("db");
+ ASSERT(ls.isDbLockedForMode(dbName, MODE_IS));
+ ASSERT(!ls.isDbLockedForMode(dbName, MODE_IX));
+ ASSERT(ls.isDbLockedForMode(dbName, MODE_S));
+ ASSERT(!ls.isDbLockedForMode(dbName, MODE_X));
+}
- MMAPV1LockerImpl ls;
+TEST(DConcurrency, IsDbLockedForXMode) {
+ const std::string dbName("db");
- Lock::DBLock dbLock(&ls, dbName, MODE_X);
+ MMAPV1LockerImpl ls;
- ASSERT(ls.isDbLockedForMode(dbName, MODE_IS));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_IX));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_S));
- ASSERT(ls.isDbLockedForMode(dbName, MODE_X));
- }
+ Lock::DBLock dbLock(&ls, dbName, MODE_X);
- TEST(DConcurrency, IsCollectionLocked_DB_Locked_IS) {
- const std::string ns("db1.coll");
+ ASSERT(ls.isDbLockedForMode(dbName, MODE_IS));
+ ASSERT(ls.isDbLockedForMode(dbName, MODE_IX));
+ ASSERT(ls.isDbLockedForMode(dbName, MODE_S));
+ ASSERT(ls.isDbLockedForMode(dbName, MODE_X));
+}
- MMAPV1LockerImpl ls;
+TEST(DConcurrency, IsCollectionLocked_DB_Locked_IS) {
+ const std::string ns("db1.coll");
- Lock::DBLock dbLock(&ls, "db1", MODE_IS);
+ MMAPV1LockerImpl ls;
- {
- Lock::CollectionLock collLock(&ls, ns, MODE_IS);
+ Lock::DBLock dbLock(&ls, "db1", MODE_IS);
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_IX));
+ {
+ Lock::CollectionLock collLock(&ls, ns, MODE_IS);
- // TODO: This is TRUE because Lock::CollectionLock converts IS lock to S
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(!ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_X));
- }
+ // TODO: This is TRUE because Lock::CollectionLock converts IS lock to S
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- {
- Lock::CollectionLock collLock(&ls, ns, MODE_S);
+ ASSERT(!ls.isCollectionLockedForMode(ns, MODE_X));
+ }
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- ASSERT(!ls.isCollectionLockedForMode(ns, MODE_X));
- }
+ {
+ Lock::CollectionLock collLock(&ls, ns, MODE_S);
+
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(!ls.isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(!ls.isCollectionLockedForMode(ns, MODE_X));
}
+}
- TEST(DConcurrency, IsCollectionLocked_DB_Locked_IX) {
- const std::string ns("db1.coll");
+TEST(DConcurrency, IsCollectionLocked_DB_Locked_IX) {
+ const std::string ns("db1.coll");
- MMAPV1LockerImpl ls;
+ MMAPV1LockerImpl ls;
- Lock::DBLock dbLock(&ls, "db1", MODE_IX);
+ Lock::DBLock dbLock(&ls, "db1", MODE_IX);
- {
- Lock::CollectionLock collLock(&ls, ns, MODE_IX);
+ {
+ Lock::CollectionLock collLock(&ls, ns, MODE_IX);
- // TODO: This is TRUE because Lock::CollectionLock converts IX lock to X
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
+ // TODO: This is TRUE because Lock::CollectionLock converts IX lock to X
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_X));
- }
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_X));
+ }
- {
- Lock::CollectionLock collLock(&ls, ns, MODE_X);
+ {
+ Lock::CollectionLock collLock(&ls, ns, MODE_X);
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_IX));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
- ASSERT(ls.isCollectionLockedForMode(ns, MODE_X));
- }
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_IS));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_IX));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_S));
+ ASSERT(ls.isCollectionLockedForMode(ns, MODE_X));
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/deadlock_detection_test.cpp b/src/mongo/db/concurrency/deadlock_detection_test.cpp
index 87274255635..ce29fd37b01 100644
--- a/src/mongo/db/concurrency/deadlock_detection_test.cpp
+++ b/src/mongo/db/concurrency/deadlock_detection_test.cpp
@@ -31,161 +31,161 @@
namespace mongo {
- TEST(Deadlock, NoDeadlock) {
- const ResourceId resId(RESOURCE_DATABASE, std::string("A"));
+TEST(Deadlock, NoDeadlock) {
+ const ResourceId resId(RESOURCE_DATABASE, std::string("A"));
- LockerForTests locker1(MODE_IS);
- LockerForTests locker2(MODE_IS);
+ LockerForTests locker1(MODE_IS);
+ LockerForTests locker2(MODE_IS);
- ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resId, MODE_S));
- ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resId, MODE_S));
+ ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resId, MODE_S));
+ ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resId, MODE_S));
- DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
- ASSERT(!wfg1.check().hasCycle());
+ DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
+ ASSERT(!wfg1.check().hasCycle());
- DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
- ASSERT(!wfg2.check().hasCycle());
- }
+ DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
+ ASSERT(!wfg2.check().hasCycle());
+}
- TEST(Deadlock, Simple) {
- const ResourceId resIdA(RESOURCE_DATABASE, std::string("A"));
- const ResourceId resIdB(RESOURCE_DATABASE, std::string("B"));
+TEST(Deadlock, Simple) {
+ const ResourceId resIdA(RESOURCE_DATABASE, std::string("A"));
+ const ResourceId resIdB(RESOURCE_DATABASE, std::string("B"));
- LockerForTests locker1(MODE_IX);
- LockerForTests locker2(MODE_IX);
+ LockerForTests locker1(MODE_IX);
+ LockerForTests locker2(MODE_IX);
- ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resIdA, MODE_X));
- ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resIdB, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resIdA, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resIdB, MODE_X));
- // 1 -> 2
- ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resIdB, MODE_X));
+ // 1 -> 2
+ ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resIdB, MODE_X));
- // 2 -> 1
- ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resIdA, MODE_X));
+ // 2 -> 1
+ ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resIdA, MODE_X));
- DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
- ASSERT(wfg1.check().hasCycle());
+ DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
+ ASSERT(wfg1.check().hasCycle());
- DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
- ASSERT(wfg2.check().hasCycle());
+ DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
+ ASSERT(wfg2.check().hasCycle());
- // Cleanup, so that LockerImpl doesn't complain about leaked locks
- locker1.unlock(resIdB);
- locker2.unlock(resIdA);
- }
+ // Cleanup, so that LockerImpl doesn't complain about leaked locks
+ locker1.unlock(resIdB);
+ locker2.unlock(resIdA);
+}
- TEST(Deadlock, SimpleUpgrade) {
- const ResourceId resId(RESOURCE_DATABASE, std::string("A"));
+TEST(Deadlock, SimpleUpgrade) {
+ const ResourceId resId(RESOURCE_DATABASE, std::string("A"));
- LockerForTests locker1(MODE_IX);
- LockerForTests locker2(MODE_IX);
+ LockerForTests locker1(MODE_IX);
+ LockerForTests locker2(MODE_IX);
- // Both acquire lock in intent mode
- ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resId, MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resId, MODE_IX));
+ // Both acquire lock in intent mode
+ ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resId, MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resId, MODE_IX));
- // Both try to upgrade
- ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resId, MODE_X));
- ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resId, MODE_X));
+ // Both try to upgrade
+ ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resId, MODE_X));
+ ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resId, MODE_X));
- DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
- ASSERT(wfg1.check().hasCycle());
+ DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
+ ASSERT(wfg1.check().hasCycle());
- DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
- ASSERT(wfg2.check().hasCycle());
+ DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
+ ASSERT(wfg2.check().hasCycle());
- // Cleanup, so that LockerImpl doesn't complain about leaked locks
- locker1.unlock(resId);
- locker2.unlock(resId);
- }
+ // Cleanup, so that LockerImpl doesn't complain about leaked locks
+ locker1.unlock(resId);
+ locker2.unlock(resId);
+}
- TEST(Deadlock, Indirect) {
- const ResourceId resIdA(RESOURCE_DATABASE, std::string("A"));
- const ResourceId resIdB(RESOURCE_DATABASE, std::string("B"));
+TEST(Deadlock, Indirect) {
+ const ResourceId resIdA(RESOURCE_DATABASE, std::string("A"));
+ const ResourceId resIdB(RESOURCE_DATABASE, std::string("B"));
- LockerForTests locker1(MODE_IX);
- LockerForTests locker2(MODE_IX);
- LockerForTests lockerIndirect(MODE_IX);
+ LockerForTests locker1(MODE_IX);
+ LockerForTests locker2(MODE_IX);
+ LockerForTests lockerIndirect(MODE_IX);
- ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resIdA, MODE_X));
- ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resIdB, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, locker1.lockBegin(resIdA, MODE_X));
+ ASSERT_EQUALS(LOCK_OK, locker2.lockBegin(resIdB, MODE_X));
- // 1 -> 2
- ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resIdB, MODE_X));
+ // 1 -> 2
+ ASSERT_EQUALS(LOCK_WAITING, locker1.lockBegin(resIdB, MODE_X));
- // 2 -> 1
- ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resIdA, MODE_X));
+ // 2 -> 1
+ ASSERT_EQUALS(LOCK_WAITING, locker2.lockBegin(resIdA, MODE_X));
- // 3 -> 2
- ASSERT_EQUALS(LOCK_WAITING, lockerIndirect.lockBegin(resIdA, MODE_X));
+ // 3 -> 2
+ ASSERT_EQUALS(LOCK_WAITING, lockerIndirect.lockBegin(resIdA, MODE_X));
- DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
- ASSERT(wfg1.check().hasCycle());
+ DeadlockDetector wfg1(*getGlobalLockManager(), &locker1);
+ ASSERT(wfg1.check().hasCycle());
- DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
- ASSERT(wfg2.check().hasCycle());
+ DeadlockDetector wfg2(*getGlobalLockManager(), &locker2);
+ ASSERT(wfg2.check().hasCycle());
- // Indirect locker should not report the cycle since it does not participate in it
- DeadlockDetector wfgIndirect(*getGlobalLockManager(), &lockerIndirect);
- ASSERT(!wfgIndirect.check().hasCycle());
+ // Indirect locker should not report the cycle since it does not participate in it
+ DeadlockDetector wfgIndirect(*getGlobalLockManager(), &lockerIndirect);
+ ASSERT(!wfgIndirect.check().hasCycle());
- // Cleanup, so that LockerImpl doesn't complain about leaked locks
- locker1.unlock(resIdB);
- locker2.unlock(resIdA);
- }
+ // Cleanup, so that LockerImpl doesn't complain about leaked locks
+ locker1.unlock(resIdB);
+ locker2.unlock(resIdA);
+}
- TEST(Deadlock, IndirectWithUpgrade) {
- const ResourceId resIdFlush(RESOURCE_MMAPV1_FLUSH, 1);
- const ResourceId resIdDb(RESOURCE_DATABASE, 2);
+TEST(Deadlock, IndirectWithUpgrade) {
+ const ResourceId resIdFlush(RESOURCE_MMAPV1_FLUSH, 1);
+ const ResourceId resIdDb(RESOURCE_DATABASE, 2);
- LockerForTests flush(MODE_IX);
- LockerForTests reader(MODE_IS);
- LockerForTests writer(MODE_IX);
+ LockerForTests flush(MODE_IX);
+ LockerForTests reader(MODE_IS);
+ LockerForTests writer(MODE_IX);
- // This sequence simulates the deadlock which occurs during flush
- ASSERT_EQUALS(LOCK_OK, writer.lockBegin(resIdFlush, MODE_IX));
- ASSERT_EQUALS(LOCK_OK, writer.lockBegin(resIdDb, MODE_X));
+ // This sequence simulates the deadlock which occurs during flush
+ ASSERT_EQUALS(LOCK_OK, writer.lockBegin(resIdFlush, MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, writer.lockBegin(resIdDb, MODE_X));
- ASSERT_EQUALS(LOCK_OK, reader.lockBegin(resIdFlush, MODE_IS));
+ ASSERT_EQUALS(LOCK_OK, reader.lockBegin(resIdFlush, MODE_IS));
- // R -> W
- ASSERT_EQUALS(LOCK_WAITING, reader.lockBegin(resIdDb, MODE_S));
+ // R -> W
+ ASSERT_EQUALS(LOCK_WAITING, reader.lockBegin(resIdDb, MODE_S));
- // R -> W
- // F -> W
- ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(resIdFlush, MODE_S));
+ // R -> W
+ // F -> W
+ ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(resIdFlush, MODE_S));
- // W yields its flush lock, so now f is granted in mode S
- //
- // R -> W
- writer.unlock(resIdFlush);
+ // W yields its flush lock, so now f is granted in mode S
+ //
+ // R -> W
+ writer.unlock(resIdFlush);
- // Flush thread upgrades S -> X in order to do the remap
- //
- // R -> W
- // F -> R
- ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(resIdFlush, MODE_X));
+ // Flush thread upgrades S -> X in order to do the remap
+ //
+ // R -> W
+ // F -> R
+ ASSERT_EQUALS(LOCK_WAITING, flush.lockBegin(resIdFlush, MODE_X));
- // W comes back from the commit and tries to re-acquire the flush lock
- //
- // R -> W
- // F -> R
- // W -> F
- ASSERT_EQUALS(LOCK_WAITING, writer.lockBegin(resIdFlush, MODE_IX));
+ // W comes back from the commit and tries to re-acquire the flush lock
+ //
+ // R -> W
+ // F -> R
+ // W -> F
+ ASSERT_EQUALS(LOCK_WAITING, writer.lockBegin(resIdFlush, MODE_IX));
- // Run deadlock detection from the point of view of each of the involved lockers
- DeadlockDetector wfgF(*getGlobalLockManager(), &flush);
- ASSERT(wfgF.check().hasCycle());
+ // Run deadlock detection from the point of view of each of the involved lockers
+ DeadlockDetector wfgF(*getGlobalLockManager(), &flush);
+ ASSERT(wfgF.check().hasCycle());
- DeadlockDetector wfgR(*getGlobalLockManager(), &reader);
- ASSERT(wfgR.check().hasCycle());
+ DeadlockDetector wfgR(*getGlobalLockManager(), &reader);
+ ASSERT(wfgR.check().hasCycle());
- DeadlockDetector wfgW(*getGlobalLockManager(), &writer);
- ASSERT(wfgW.check().hasCycle());
+ DeadlockDetector wfgW(*getGlobalLockManager(), &writer);
+ ASSERT(wfgW.check().hasCycle());
- // Cleanup, so that LockerImpl doesn't complain about leaked locks
- flush.unlock(resIdFlush);
- writer.unlock(resIdFlush);
- }
+ // Cleanup, so that LockerImpl doesn't complain about leaked locks
+ flush.unlock(resIdFlush);
+ writer.unlock(resIdFlush);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/fast_map_noalloc.h b/src/mongo/db/concurrency/fast_map_noalloc.h
index e7077a56222..cc2ccdb64ae 100644
--- a/src/mongo/db/concurrency/fast_map_noalloc.h
+++ b/src/mongo/db/concurrency/fast_map_noalloc.h
@@ -32,244 +32,229 @@
#include "mongo/util/assert_util.h"
namespace mongo {
-
+
+/**
+ * NOTE: This structure should not be used for anything other than the Lock Manager.
+ *
+ * This is a simple implementation of an unordered associative array with minimal
+ * functionality, used by the lock manager. It keeps a small number of memory entries to store
+ * values, in order to avoid memory allocations, which dominate the cost of the lock manager
+ * calls by a wide margin.
+ *
+ * This class is not thread-safe.
+ */
+template <class KeyType, class ValueType, int PreallocCount>
+class FastMapNoAlloc {
+public:
/**
- * NOTE: This structure should not be used for anything other than the Lock Manager.
- *
- * This is a simple implementation of an unordered associative array with minimal
- * functionality, used by the lock manager. It keeps a small number of memory entries to store
- * values, in order to avoid memory allocations, which dominate the cost of the lock manager
- * calls by a wide margin.
- *
- * This class is not thread-safe.
+ * Forward-only iterator. Does not synchronize with the underlying collection in any way.
+ * In other words, do not modify the collection while there is an open iterator on it.
*/
- template <class KeyType, class ValueType, int PreallocCount>
- class FastMapNoAlloc {
+ template <class MapType, class IteratorValueType>
+ class IteratorImpl {
public:
+ IteratorImpl(const IteratorImpl& other) : _map(other._map), _idx(other._idx) {}
- /**
- * Forward-only iterator. Does not synchronize with the underlying collection in any way.
- * In other words, do not modify the collection while there is an open iterator on it.
- */
- template<class MapType, class IteratorValueType>
- class IteratorImpl {
- public:
-
- IteratorImpl(const IteratorImpl& other)
- : _map(other._map),
- _idx(other._idx) {
-
- }
+ //
+ // Operators
+ //
- //
- // Operators
- //
+ bool operator!() const {
+ return finished();
+ }
- bool operator!() const {
- return finished();
- }
+ IteratorValueType& operator*() const {
+ return *objAddr();
+ }
- IteratorValueType& operator*() const {
- return *objAddr();
- }
+ IteratorValueType* operator->() const {
+ return objAddr();
+ }
- IteratorValueType* operator->() const {
- return objAddr();
- }
+ //
+ // Other methods
+ //
- //
- // Other methods
- //
+ /**
+ * Returns whether the iterator has been exhausted through calls to next. This value
+ * can be used to determine whether a previous call to find has found something.
+ */
+ bool finished() const {
+ return (MONGO_unlikely(_idx == PreallocCount));
+ }
- /**
- * Returns whether the iterator has been exhausted through calls to next. This value
- * can be used to determine whether a previous call to find has found something.
- */
- bool finished() const {
- return (MONGO_unlikely(_idx == PreallocCount));
- }
+ /**
+ * Returns the address of the object at the current position. Cannot be called with an
+ * uninitialized iterator, or iterator which has reached the end.
+ */
+ IteratorValueType* objAddr() const {
+ invariant(!finished());
- /**
- * Returns the address of the object at the current position. Cannot be called with an
- * uninitialized iterator, or iterator which has reached the end.
- */
- IteratorValueType* objAddr() const {
- invariant(!finished());
+ return &_map._fastAccess[_idx].value;
+ }
- return &_map._fastAccess[_idx].value;
- }
+ /**
+ * Returns the key of the value at the current position. Cannot be called with an
+ * uninitialized iterator or iterator which has reached the end.
+ */
+ const KeyType& key() const {
+ invariant(!finished());
- /**
- * Returns the key of the value at the current position. Cannot be called with an
- * uninitialized iterator or iterator which has reached the end.
- */
- const KeyType& key() const {
- invariant(!finished());
+ return _map._fastAccess[_idx].key;
+ }
- return _map._fastAccess[_idx].key;
- }
+ /**
+ * Advances the iterator to the next entry. No particular order of iteration is
+ * guaranteed.
+ */
+ void next() {
+ invariant(!finished());
- /**
- * Advances the iterator to the next entry. No particular order of iteration is
- * guaranteed.
- */
- void next() {
- invariant(!finished());
-
- while (++_idx < PreallocCount) {
- if (_map._fastAccess[_idx].inUse) {
- return;
- }
+ while (++_idx < PreallocCount) {
+ if (_map._fastAccess[_idx].inUse) {
+ return;
}
}
+ }
- /**
- * Removes the element at the current position and moves the iterator to the next,
- * which might be the last entry on the map.
- */
- void remove() {
- invariant(!finished());
- invariant(_map._fastAccess[_idx].inUse);
-
- _map._fastAccess[_idx].inUse = false;
- _map._fastAccessUsedSize--;
-
- next();
- }
-
-
- private:
-
- friend class FastMapNoAlloc<KeyType, ValueType, PreallocCount>;
+ /**
+ * Removes the element at the current position and moves the iterator to the next,
+ * which might be the last entry on the map.
+ */
+ void remove() {
+ invariant(!finished());
+ invariant(_map._fastAccess[_idx].inUse);
- // Used for iteration of the complete map
- IteratorImpl(MapType& map)
- : _map(map),
- _idx(-1) {
+ _map._fastAccess[_idx].inUse = false;
+ _map._fastAccessUsedSize--;
- next();
- }
+ next();
+ }
- // Used for iterator starting at a position
- IteratorImpl(MapType& map, int idx)
- : _map(map),
- _idx(idx) {
- invariant(_idx >= 0);
- }
+ private:
+ friend class FastMapNoAlloc<KeyType, ValueType, PreallocCount>;
- // Used for iteration starting at a particular key
- IteratorImpl(MapType& map, const KeyType& key)
- : _map(map),
- _idx(0) {
+ // Used for iteration of the complete map
+ IteratorImpl(MapType& map) : _map(map), _idx(-1) {
+ next();
+ }
- while (_idx < PreallocCount) {
- if (_map._fastAccess[_idx].inUse && (_map._fastAccess[_idx].key == key)) {
- return;
- }
+ // Used for iterator starting at a position
+ IteratorImpl(MapType& map, int idx) : _map(map), _idx(idx) {
+ invariant(_idx >= 0);
+ }
- ++_idx;
+ // Used for iteration starting at a particular key
+ IteratorImpl(MapType& map, const KeyType& key) : _map(map), _idx(0) {
+ while (_idx < PreallocCount) {
+ if (_map._fastAccess[_idx].inUse && (_map._fastAccess[_idx].key == key)) {
+ return;
}
- }
+ ++_idx;
+ }
+ }
- // The map being iterated on
- MapType& _map;
-
- // Index to the current entry being iterated
- int _idx;
- };
+ // The map being iterated on
+ MapType& _map;
- typedef IteratorImpl<FastMapNoAlloc<KeyType, ValueType, PreallocCount>,
- ValueType> Iterator;
+ // Index to the current entry being iterated
+ int _idx;
+ };
- typedef IteratorImpl<const FastMapNoAlloc<KeyType, ValueType, PreallocCount>,
- const ValueType> ConstIterator;
+ typedef IteratorImpl<FastMapNoAlloc<KeyType, ValueType, PreallocCount>, ValueType> Iterator;
- FastMapNoAlloc() : _fastAccess(),
- _fastAccessUsedSize(0) { }
+ typedef IteratorImpl<const FastMapNoAlloc<KeyType, ValueType, PreallocCount>, const ValueType>
+ ConstIterator;
- /**
- * Inserts the specified entry in the map and returns a reference to the memory for the
- * entry just inserted.
- */
- Iterator insert(const KeyType& key) {
- // Find the first unused slot. This could probably be even further optimized by adding
- // a field pointing to the first unused location.
- int idx = 0;
- for (; _fastAccess[idx].inUse; idx++);
- invariant(idx < PreallocCount);
+ FastMapNoAlloc() : _fastAccess(), _fastAccessUsedSize(0) {}
- _fastAccess[idx].inUse = true;
- _fastAccess[idx].key = key;
- _fastAccessUsedSize++;
+ /**
+ * Inserts the specified entry in the map and returns a reference to the memory for the
+ * entry just inserted.
+ */
+ Iterator insert(const KeyType& key) {
+ // Find the first unused slot. This could probably be even further optimized by adding
+ // a field pointing to the first unused location.
+ int idx = 0;
+ for (; _fastAccess[idx].inUse; idx++)
+ ;
- return Iterator(*this, idx);
- }
+ invariant(idx < PreallocCount);
- /**
- * Returns an iterator to the first element in the map.
- */
- Iterator begin() {
- return Iterator(*this);
- }
+ _fastAccess[idx].inUse = true;
+ _fastAccess[idx].key = key;
+ _fastAccessUsedSize++;
- ConstIterator begin() const {
- return ConstIterator(*this);
- }
+ return Iterator(*this, idx);
+ }
- /**
- * Returns an iterator pointing to the first position, which has entry with the specified
- * key. Before dereferencing the returned iterator, it should be checked for validity using
- * the finished() method or the ! operator. If no element was found, finished() will return
- * false.
- *
- * While it is allowed to call next() on the returned iterator, this is not very useful,
- * because the container is not ordered.
- */
- Iterator find(const KeyType& key) {
- return Iterator(*this, key);
- }
+ /**
+ * Returns an iterator to the first element in the map.
+ */
+ Iterator begin() {
+ return Iterator(*this);
+ }
- ConstIterator find(const KeyType& key) const {
- return ConstIterator(*this, key);
- }
+ ConstIterator begin() const {
+ return ConstIterator(*this);
+ }
- int size() const { return _fastAccessUsedSize; }
- bool empty() const { return (_fastAccessUsedSize == 0); }
+ /**
+ * Returns an iterator pointing to the first position, which has entry with the specified
+ * key. Before dereferencing the returned iterator, it should be checked for validity using
+ * the finished() method or the ! operator. If no element was found, finished() will return
+ * false.
+ *
+ * While it is allowed to call next() on the returned iterator, this is not very useful,
+ * because the container is not ordered.
+ */
+ Iterator find(const KeyType& key) {
+ return Iterator(*this, key);
+ }
- private:
+ ConstIterator find(const KeyType& key) const {
+ return ConstIterator(*this, key);
+ }
- // Empty and very large maps do not make sense since there will be no performance gain, so
- // disallow them.
- BOOST_STATIC_ASSERT(PreallocCount > 0);
- BOOST_STATIC_ASSERT(PreallocCount < 32);
+ int size() const {
+ return _fastAccessUsedSize;
+ }
+ bool empty() const {
+ return (_fastAccessUsedSize == 0);
+ }
- // Iterator accesses the map directly
- friend class IteratorImpl<FastMapNoAlloc<KeyType, ValueType, PreallocCount>,
- ValueType>;
+private:
+ // Empty and very large maps do not make sense since there will be no performance gain, so
+ // disallow them.
+ BOOST_STATIC_ASSERT(PreallocCount > 0);
+ BOOST_STATIC_ASSERT(PreallocCount < 32);
- friend class IteratorImpl<const FastMapNoAlloc<KeyType, ValueType, PreallocCount>,
- const ValueType>;
+ // Iterator accesses the map directly
+ friend class IteratorImpl<FastMapNoAlloc<KeyType, ValueType, PreallocCount>, ValueType>;
+ friend class IteratorImpl<const FastMapNoAlloc<KeyType, ValueType, PreallocCount>,
+ const ValueType>;
- struct PreallocEntry {
- PreallocEntry() : inUse(false) { }
- bool inUse;
+ struct PreallocEntry {
+ PreallocEntry() : inUse(false) {}
- KeyType key;
- ValueType value;
- };
+ bool inUse;
- // Pre-allocated memory for entries
- PreallocEntry _fastAccess[PreallocCount];
- int _fastAccessUsedSize;
+ KeyType key;
+ ValueType value;
};
-} // namespace mongo
+ // Pre-allocated memory for entries
+ PreallocEntry _fastAccess[PreallocCount];
+ int _fastAccessUsedSize;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/fast_map_noalloc_test.cpp b/src/mongo/db/concurrency/fast_map_noalloc_test.cpp
index 70810cb6b5a..e9a013427df 100644
--- a/src/mongo/db/concurrency/fast_map_noalloc_test.cpp
+++ b/src/mongo/db/concurrency/fast_map_noalloc_test.cpp
@@ -36,127 +36,125 @@
namespace mongo {
- struct TestStruct {
-
- void initNew(int newId, const std::string& newValue) {
- id = newId;
- value = newValue;
- }
-
- int id;
- std::string value;
- };
+struct TestStruct {
+ void initNew(int newId, const std::string& newValue) {
+ id = newId;
+ value = newValue;
+ }
- typedef class FastMapNoAlloc<ResourceId, TestStruct, 6> TestFastMapNoAlloc;
+ int id;
+ std::string value;
+};
+typedef class FastMapNoAlloc<ResourceId, TestStruct, 6> TestFastMapNoAlloc;
- TEST(FastMapNoAlloc, Empty) {
- TestFastMapNoAlloc map;
- ASSERT(map.empty());
- TestFastMapNoAlloc::Iterator it = map.begin();
- ASSERT(it.finished());
- }
+TEST(FastMapNoAlloc, Empty) {
+ TestFastMapNoAlloc map;
+ ASSERT(map.empty());
- TEST(FastMapNoAlloc, NotEmpty) {
- TestFastMapNoAlloc map;
+ TestFastMapNoAlloc::Iterator it = map.begin();
+ ASSERT(it.finished());
+}
- map.insert(ResourceId(RESOURCE_COLLECTION, 1))->initNew(101, "Item101");
- map.insert(ResourceId(RESOURCE_COLLECTION, 2))->initNew(102, "Item102");
- ASSERT(!map.empty());
+TEST(FastMapNoAlloc, NotEmpty) {
+ TestFastMapNoAlloc map;
- TestFastMapNoAlloc::Iterator it = map.begin();
- ASSERT(!it.finished());
- ASSERT(!!it);
+ map.insert(ResourceId(RESOURCE_COLLECTION, 1))->initNew(101, "Item101");
+ map.insert(ResourceId(RESOURCE_COLLECTION, 2))->initNew(102, "Item102");
+ ASSERT(!map.empty());
- ASSERT(it->id == 101);
- ASSERT(it->value == "Item101");
+ TestFastMapNoAlloc::Iterator it = map.begin();
+ ASSERT(!it.finished());
+ ASSERT(!!it);
- it.next();
- ASSERT(!it.finished());
- ASSERT(!!it);
+ ASSERT(it->id == 101);
+ ASSERT(it->value == "Item101");
- ASSERT(it->id == 102);
- ASSERT(it->value == "Item102");
+ it.next();
+ ASSERT(!it.finished());
+ ASSERT(!!it);
- // We are at the last element
- it.next();
- ASSERT(it.finished());
- ASSERT(!it);
- }
+ ASSERT(it->id == 102);
+ ASSERT(it->value == "Item102");
- TEST(FastMapNoAlloc, FindNonExisting) {
- TestFastMapNoAlloc map;
+ // We are at the last element
+ it.next();
+ ASSERT(it.finished());
+ ASSERT(!it);
+}
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 0)));
- }
+TEST(FastMapNoAlloc, FindNonExisting) {
+ TestFastMapNoAlloc map;
- TEST(FastMapNoAlloc, FindAndRemove) {
- TestFastMapNoAlloc map;
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 0)));
+}
- for (int i = 0; i < 6; i++) {
- map.insert(ResourceId(RESOURCE_COLLECTION, i))->initNew(
- i, "Item" + boost::lexical_cast<std::string>(i));
- }
+TEST(FastMapNoAlloc, FindAndRemove) {
+ TestFastMapNoAlloc map;
- for (int i = 0; i < 6; i++) {
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, i)).finished());
+ for (int i = 0; i < 6; i++) {
+ map.insert(ResourceId(RESOURCE_COLLECTION, i))
+ ->initNew(i, "Item" + boost::lexical_cast<std::string>(i));
+ }
- ASSERT_EQUALS(i, map.find(ResourceId(RESOURCE_COLLECTION, i))->id);
+ for (int i = 0; i < 6; i++) {
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, i)).finished());
- ASSERT_EQUALS("Item" + boost::lexical_cast<std::string>(i),
- map.find(ResourceId(RESOURCE_COLLECTION, i))->value);
- }
+ ASSERT_EQUALS(i, map.find(ResourceId(RESOURCE_COLLECTION, i))->id);
- // Remove a middle entry
- map.find(ResourceId(RESOURCE_COLLECTION, 2)).remove();
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 2)));
+ ASSERT_EQUALS("Item" + boost::lexical_cast<std::string>(i),
+ map.find(ResourceId(RESOURCE_COLLECTION, i))->value);
+ }
- // Remove entry after first
- map.find(ResourceId(RESOURCE_COLLECTION, 1)).remove();
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 1)));
+ // Remove a middle entry
+ map.find(ResourceId(RESOURCE_COLLECTION, 2)).remove();
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 2)));
- // Remove entry before last
- map.find(ResourceId(RESOURCE_COLLECTION, 4)).remove();
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 4)));
+ // Remove entry after first
+ map.find(ResourceId(RESOURCE_COLLECTION, 1)).remove();
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 1)));
- // Remove first entry
- map.find(ResourceId(RESOURCE_COLLECTION, 0)).remove();
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 0)));
+ // Remove entry before last
+ map.find(ResourceId(RESOURCE_COLLECTION, 4)).remove();
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 4)));
- // Remove last entry
- map.find(ResourceId(RESOURCE_COLLECTION, 5)).remove();
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 5)));
+ // Remove first entry
+ map.find(ResourceId(RESOURCE_COLLECTION, 0)).remove();
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 0)));
- // Remove final entry
- map.find(ResourceId(RESOURCE_COLLECTION, 3)).remove();
- ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 3)));
- }
+ // Remove last entry
+ map.find(ResourceId(RESOURCE_COLLECTION, 5)).remove();
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 5)));
- TEST(FastMapNoAlloc, RemoveAll) {
- TestFastMapNoAlloc map;
- unordered_map<ResourceId, TestStruct> checkMap;
+ // Remove final entry
+ map.find(ResourceId(RESOURCE_COLLECTION, 3)).remove();
+ ASSERT(!map.find(ResourceId(RESOURCE_COLLECTION, 3)));
+}
- for (int i = 1; i <= 6; i++) {
- map.insert(ResourceId(RESOURCE_COLLECTION, i))->initNew(
- i, "Item" + boost::lexical_cast<std::string>(i));
+TEST(FastMapNoAlloc, RemoveAll) {
+ TestFastMapNoAlloc map;
+ unordered_map<ResourceId, TestStruct> checkMap;
- checkMap[ResourceId(RESOURCE_COLLECTION, i)].initNew(
- i, "Item" + boost::lexical_cast<std::string>(i));
- }
+ for (int i = 1; i <= 6; i++) {
+ map.insert(ResourceId(RESOURCE_COLLECTION, i))
+ ->initNew(i, "Item" + boost::lexical_cast<std::string>(i));
- TestFastMapNoAlloc::Iterator it = map.begin();
- while (!it.finished()) {
- ASSERT_EQUALS(it->id, checkMap[it.key()].id);
- ASSERT_EQUALS(
- "Item" + boost::lexical_cast<std::string>(it->id), checkMap[it.key()].value);
+ checkMap[ResourceId(RESOURCE_COLLECTION, i)].initNew(
+ i, "Item" + boost::lexical_cast<std::string>(i));
+ }
- checkMap.erase(it.key());
- it.remove();
- }
+ TestFastMapNoAlloc::Iterator it = map.begin();
+ while (!it.finished()) {
+ ASSERT_EQUALS(it->id, checkMap[it.key()].id);
+ ASSERT_EQUALS("Item" + boost::lexical_cast<std::string>(it->id), checkMap[it.key()].value);
- ASSERT(map.empty());
- ASSERT(checkMap.empty());
+ checkMap.erase(it.key());
+ it.remove();
}
-} // namespace mongo
+ ASSERT(map.empty());
+ ASSERT(checkMap.empty());
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_manager.cpp b/src/mongo/db/concurrency/lock_manager.cpp
index 7a0e094ea30..c82e2d0ee5c 100644
--- a/src/mongo/db/concurrency/lock_manager.cpp
+++ b/src/mongo/db/concurrency/lock_manager.cpp
@@ -43,997 +43,933 @@
namespace mongo {
- using std::string;
+using std::string;
namespace {
- /**
- * Map of conflicts. 'LockConflictsTable[newMode] & existingMode != 0' means that a new request
- * with the given 'newMode' conflicts with an existing request with mode 'existingMode'.
- */
- static const int LockConflictsTable[] = {
- // MODE_NONE
- 0,
+/**
+ * Map of conflicts. 'LockConflictsTable[newMode] & existingMode != 0' means that a new request
+ * with the given 'newMode' conflicts with an existing request with mode 'existingMode'.
+ */
+static const int LockConflictsTable[] = {
+ // MODE_NONE
+ 0,
- // MODE_IS
- (1 << MODE_X),
+ // MODE_IS
+ (1 << MODE_X),
- // MODE_IX
- (1 << MODE_S) | (1 << MODE_X),
+ // MODE_IX
+ (1 << MODE_S) | (1 << MODE_X),
- // MODE_S
- (1 << MODE_IX) | (1 << MODE_X),
+ // MODE_S
+ (1 << MODE_IX) | (1 << MODE_X),
- // MODE_X
- (1 << MODE_S) | (1 << MODE_X) | (1 << MODE_IS) | (1 << MODE_IX),
- };
+ // MODE_X
+ (1 << MODE_S) | (1 << MODE_X) | (1 << MODE_IS) | (1 << MODE_IX),
+};
- // Mask of modes
- const uint64_t intentModes = (1 << MODE_IS) | (1<< MODE_IX);
+// Mask of modes
+const uint64_t intentModes = (1 << MODE_IS) | (1 << MODE_IX);
- // Ensure we do not add new modes without updating the conflicts table
- BOOST_STATIC_ASSERT(
- (sizeof(LockConflictsTable) / sizeof(LockConflictsTable[0])) == LockModesCount);
+// Ensure we do not add new modes without updating the conflicts table
+BOOST_STATIC_ASSERT((sizeof(LockConflictsTable) / sizeof(LockConflictsTable[0])) == LockModesCount);
- /**
- * Maps the mode id to a string.
- */
- static const char* LockModeNames[] = {
- "NONE", "IS", "IX", "S", "X"
- };
+/**
+ * Maps the mode id to a string.
+ */
+static const char* LockModeNames[] = {"NONE", "IS", "IX", "S", "X"};
- static const char* LegacyLockModeNames[] = {
- "", "r", "w", "R", "W"
- };
+static const char* LegacyLockModeNames[] = {"", "r", "w", "R", "W"};
- // Ensure we do not add new modes without updating the names array
- BOOST_STATIC_ASSERT((sizeof(LockModeNames) / sizeof(LockModeNames[0])) == LockModesCount);
- BOOST_STATIC_ASSERT(
- (sizeof(LegacyLockModeNames) / sizeof(LegacyLockModeNames[0])) == LockModesCount);
+// Ensure we do not add new modes without updating the names array
+BOOST_STATIC_ASSERT((sizeof(LockModeNames) / sizeof(LockModeNames[0])) == LockModesCount);
+BOOST_STATIC_ASSERT((sizeof(LegacyLockModeNames) / sizeof(LegacyLockModeNames[0])) ==
+ LockModesCount);
- // Helper functions for the lock modes
- bool conflicts(LockMode newMode, uint32_t existingModesMask) {
- return (LockConflictsTable[newMode] & existingModesMask) != 0;
- }
+// Helper functions for the lock modes
+bool conflicts(LockMode newMode, uint32_t existingModesMask) {
+ return (LockConflictsTable[newMode] & existingModesMask) != 0;
+}
- uint32_t modeMask(LockMode mode) {
- return 1 << mode;
- }
+uint32_t modeMask(LockMode mode) {
+ return 1 << mode;
+}
- /**
- * Maps the resource id to a human-readable string.
- */
- static const char* ResourceTypeNames[] = {
- "Invalid",
- "Global",
- "MMAPV1Journal",
- "Database",
- "Collection",
- "Metadata",
- };
+/**
+ * Maps the resource id to a human-readable string.
+ */
+static const char* ResourceTypeNames[] = {
+ "Invalid", "Global", "MMAPV1Journal", "Database", "Collection", "Metadata",
+};
- // Ensure we do not add new types without updating the names array
- BOOST_STATIC_ASSERT(
- (sizeof(ResourceTypeNames) / sizeof(ResourceTypeNames[0])) == ResourceTypesCount);
+// Ensure we do not add new types without updating the names array
+BOOST_STATIC_ASSERT((sizeof(ResourceTypeNames) / sizeof(ResourceTypeNames[0])) ==
+ ResourceTypesCount);
- /**
- * Maps the LockRequest status to a human-readable string.
- */
- static const char* LockRequestStatusNames[] = {
- "new",
- "granted",
- "waiting",
- "converting",
- };
+/**
+ * Maps the LockRequest status to a human-readable string.
+ */
+static const char* LockRequestStatusNames[] = {
+ "new", "granted", "waiting", "converting",
+};
- // Ensure we do not add new status types without updating the names array
- BOOST_STATIC_ASSERT(
- (sizeof(LockRequestStatusNames) / sizeof(LockRequestStatusNames[0]))
- == LockRequest::StatusCount);
+// Ensure we do not add new status types without updating the names array
+BOOST_STATIC_ASSERT((sizeof(LockRequestStatusNames) / sizeof(LockRequestStatusNames[0])) ==
+ LockRequest::StatusCount);
-} // namespace
+} // namespace
+/**
+ * There is one of these objects for each resource that has a lock request. Empty objects
+ * (i.e. LockHead with no requests) are allowed to exist on the lock manager's hash table.
+ *
+ * The memory and lifetime is controlled entirely by the LockManager class.
+ *
+ * Not thread-safe and should only be accessed under the LockManager's bucket lock.
+ * Must be locked before locking a partition, not after.
+ */
+struct LockHead {
/**
- * There is one of these objects for each resource that has a lock request. Empty objects
- * (i.e. LockHead with no requests) are allowed to exist on the lock manager's hash table.
- *
- * The memory and lifetime is controlled entirely by the LockManager class.
- *
- * Not thread-safe and should only be accessed under the LockManager's bucket lock.
- * Must be locked before locking a partition, not after.
+ * Used for initialization of a LockHead, which might have been retrieved from cache and
+ * also in order to keep the LockHead structure a POD.
*/
- struct LockHead {
+ void initNew(ResourceId resId) {
+ resourceId = resId;
- /**
- * Used for initialization of a LockHead, which might have been retrieved from cache and
- * also in order to keep the LockHead structure a POD.
- */
- void initNew(ResourceId resId) {
- resourceId = resId;
+ grantedList.reset();
+ memset(grantedCounts, 0, sizeof(grantedCounts));
+ grantedModes = 0;
- grantedList.reset();
- memset(grantedCounts, 0, sizeof(grantedCounts));
- grantedModes = 0;
+ conflictList.reset();
+ memset(conflictCounts, 0, sizeof(conflictCounts));
+ conflictModes = 0;
- conflictList.reset();
- memset(conflictCounts, 0, sizeof(conflictCounts));
- conflictModes = 0;
+ conversionsCount = 0;
+ compatibleFirstCount = 0;
+ }
- conversionsCount = 0;
- compatibleFirstCount = 0;
- }
+ /**
+ * True iff there may be partitions with granted requests for this
+ * resource.
+ */
+ bool partitioned() const {
+ return !partitions.empty();
+ }
- /**
- * True iff there may be partitions with granted requests for this
- * resource.
- */
- bool partitioned() const {
- return !partitions.empty();
+ /**
+ * Locates the request corresponding to the particular locker or returns NULL. Must be
+ * called with the bucket holding this lock head locked.
+ */
+ LockRequest* findRequest(LockerId lockerId) const {
+ // Check the granted queue first
+ for (LockRequest* it = grantedList._front; it != NULL; it = it->next) {
+ if (it->locker->getId() == lockerId) {
+ return it;
+ }
}
- /**
- * Locates the request corresponding to the particular locker or returns NULL. Must be
- * called with the bucket holding this lock head locked.
- */
- LockRequest* findRequest(LockerId lockerId) const {
- // Check the granted queue first
- for (LockRequest* it = grantedList._front; it != NULL; it = it->next) {
- if (it->locker->getId() == lockerId) {
- return it;
- }
+ // Check the conflict queue second
+ for (LockRequest* it = conflictList._front; it != NULL; it = it->next) {
+ if (it->locker->getId() == lockerId) {
+ return it;
}
+ }
- // Check the conflict queue second
- for (LockRequest* it = conflictList._front; it != NULL; it = it->next) {
- if (it->locker->getId() == lockerId) {
- return it;
- }
- }
+ return NULL;
+ }
- return NULL;
+ /**
+ * Finish creation of request and put it on the lockhead's conflict or granted queues.
+ * Returns LOCK_WAITING for conflict case and LOCK_OK otherwise.
+ */
+ LockResult newRequest(LockRequest* request, LockMode mode) {
+ request->mode = mode;
+ request->lock = this;
+ request->partitionedLock = NULL;
+ if (!partitioned()) {
+ request->recursiveCount = 1;
}
-
- /**
- * Finish creation of request and put it on the lockhead's conflict or granted queues.
- * Returns LOCK_WAITING for conflict case and LOCK_OK otherwise.
- */
- LockResult newRequest(LockRequest* request, LockMode mode) {
- request->mode = mode;
- request->lock = this;
- request->partitionedLock = NULL;
- if (!partitioned()) {
- request->recursiveCount = 1;
+ // request->partitioned cannot be set to false, as this might be a migration, in
+ // which case access to that field is not protected. The 'partitioned' member instead
+ // indicates if a request was initially partitioned.
+
+ // New lock request. Queue after all granted modes and after any already requested
+ // conflicting modes.
+ if (conflicts(mode, grantedModes) ||
+ (!compatibleFirstCount && conflicts(mode, conflictModes))) {
+ request->status = LockRequest::STATUS_WAITING;
+
+ // Put it on the conflict queue. Conflicts are granted front to back.
+ if (request->enqueueAtFront) {
+ conflictList.push_front(request);
+ } else {
+ conflictList.push_back(request);
}
- // request->partitioned cannot be set to false, as this might be a migration, in
- // which case access to that field is not protected. The 'partitioned' member instead
- // indicates if a request was initially partitioned.
-
- // New lock request. Queue after all granted modes and after any already requested
- // conflicting modes.
- if (conflicts(mode, grantedModes) ||
- (!compatibleFirstCount && conflicts(mode, conflictModes))) {
- request->status = LockRequest::STATUS_WAITING;
-
- // Put it on the conflict queue. Conflicts are granted front to back.
- if (request->enqueueAtFront) {
- conflictList.push_front(request);
- }
- else {
- conflictList.push_back(request);
- }
- incConflictModeCount(mode);
+ incConflictModeCount(mode);
- return LOCK_WAITING;
- }
-
- // No conflict, new request
- request->status = LockRequest::STATUS_GRANTED;
+ return LOCK_WAITING;
+ }
- grantedList.push_back(request);
- incGrantedModeCount(mode);
+ // No conflict, new request
+ request->status = LockRequest::STATUS_GRANTED;
- if (request->compatibleFirst) {
- compatibleFirstCount++;
- }
+ grantedList.push_back(request);
+ incGrantedModeCount(mode);
- return LOCK_OK;
+ if (request->compatibleFirst) {
+ compatibleFirstCount++;
}
- /**
- * Lock each partitioned LockHead in turn, and move any (granted) intent mode requests for
- * lock->resourceId to lock, which must itself already be locked.
- */
- void migratePartitionedLockHeads();
-
- // Methods to maintain the granted queue
- void incGrantedModeCount(LockMode mode) {
- invariant(grantedCounts[mode] >= 0);
- if (++grantedCounts[mode] == 1) {
- invariant((grantedModes & modeMask(mode)) == 0);
- grantedModes |= modeMask(mode);
- }
- }
+ return LOCK_OK;
+ }
- void decGrantedModeCount(LockMode mode) {
- invariant(grantedCounts[mode] >= 1);
- if (--grantedCounts[mode] == 0) {
- invariant((grantedModes & modeMask(mode)) == modeMask(mode));
- grantedModes &= ~modeMask(mode);
- }
+ /**
+ * Lock each partitioned LockHead in turn, and move any (granted) intent mode requests for
+ * lock->resourceId to lock, which must itself already be locked.
+ */
+ void migratePartitionedLockHeads();
+
+ // Methods to maintain the granted queue
+ void incGrantedModeCount(LockMode mode) {
+ invariant(grantedCounts[mode] >= 0);
+ if (++grantedCounts[mode] == 1) {
+ invariant((grantedModes & modeMask(mode)) == 0);
+ grantedModes |= modeMask(mode);
}
+ }
- // Methods to maintain the conflict queue
- void incConflictModeCount(LockMode mode) {
- invariant(conflictCounts[mode] >= 0);
- if (++conflictCounts[mode] == 1) {
- invariant((conflictModes & modeMask(mode)) == 0);
- conflictModes |= modeMask(mode);
- }
+ void decGrantedModeCount(LockMode mode) {
+ invariant(grantedCounts[mode] >= 1);
+ if (--grantedCounts[mode] == 0) {
+ invariant((grantedModes & modeMask(mode)) == modeMask(mode));
+ grantedModes &= ~modeMask(mode);
}
+ }
- void decConflictModeCount(LockMode mode) {
- invariant(conflictCounts[mode] >= 1);
- if (--conflictCounts[mode] == 0) {
- invariant((conflictModes & modeMask(mode)) == modeMask(mode));
- conflictModes &= ~modeMask(mode);
- }
+ // Methods to maintain the conflict queue
+ void incConflictModeCount(LockMode mode) {
+ invariant(conflictCounts[mode] >= 0);
+ if (++conflictCounts[mode] == 1) {
+ invariant((conflictModes & modeMask(mode)) == 0);
+ conflictModes |= modeMask(mode);
}
+ }
+ void decConflictModeCount(LockMode mode) {
+ invariant(conflictCounts[mode] >= 1);
+ if (--conflictCounts[mode] == 0) {
+ invariant((conflictModes & modeMask(mode)) == modeMask(mode));
+ conflictModes &= ~modeMask(mode);
+ }
+ }
- // Id of the resource which this lock protects
- ResourceId resourceId;
-
- //
- // Granted queue
- //
-
- // Doubly-linked list of requests, which have been granted. Newly granted requests go to
- // the end of the queue. Conversion requests are granted from the beginning forward.
- LockRequestList grantedList;
-
- // Counts the grants and coversion counts for each of the supported lock modes. These
- // counts should exactly match the aggregated modes on the granted list.
- uint32_t grantedCounts[LockModesCount];
-
- // Bit-mask of the granted + converting modes on the granted queue. Maintained in lock-step
- // with the grantedCounts array.
- uint32_t grantedModes;
-
-
- //
- // Conflict queue
- //
- // Doubly-linked list of requests, which have not been granted yet because they conflict
- // with the set of granted modes. Requests are queued at the end of the queue and are
- // granted from the beginning forward, which gives these locks FIFO ordering. Exceptions
- // are high-priorty locks, such as the MMAP V1 flush lock.
- LockRequestList conflictList;
+ // Id of the resource which this lock protects
+ ResourceId resourceId;
- // Counts the conflicting requests for each of the lock modes. These counts should exactly
- // match the aggregated modes on the conflicts list.
- uint32_t conflictCounts[LockModesCount];
+ //
+ // Granted queue
+ //
- // Bit-mask of the conflict modes on the conflict queue. Maintained in lock-step with the
- // conflictCounts array.
- uint32_t conflictModes;
+ // Doubly-linked list of requests, which have been granted. Newly granted requests go to
+ // the end of the queue. Conversion requests are granted from the beginning forward.
+ LockRequestList grantedList;
- // References partitions that may have PartitionedLockHeads for this LockHead.
- // Non-empty implies the lock has no conflicts and only has intent modes as grantedModes.
- // TODO: Remove this vector and make LockHead a POD
- std::vector<LockManager::Partition *> partitions;
+ // Counts the grants and coversion counts for each of the supported lock modes. These
+ // counts should exactly match the aggregated modes on the granted list.
+ uint32_t grantedCounts[LockModesCount];
- //
- // Conversion
- //
+ // Bit-mask of the granted + converting modes on the granted queue. Maintained in lock-step
+ // with the grantedCounts array.
+ uint32_t grantedModes;
- // Counts the number of requests on the granted queue, which have requested any kind of
- // conflicting conversion and are blocked (i.e. all requests which are currently
- // STATUS_CONVERTING). This is an optimization for unlocking in that we do not need to
- // check the granted queue for requests in STATUS_CONVERTING if this count is zero. This
- // saves cycles in the regular case and only burdens the less-frequent lock upgrade case.
- uint32_t conversionsCount;
- // Counts the number of requests on the granted queue, which have requested that the policy
- // be switched to compatible-first. As long as this value is > 0, the policy will stay
- // compatible-first.
- uint32_t compatibleFirstCount;
- };
+ //
+ // Conflict queue
+ //
- /**
- * The PartitionedLockHead allows optimizing the case where requests overwhelmingly use
- * the intent lock modes MODE_IS and MODE_IX, which are compatible with each other.
- * Having to use a single LockHead causes contention where none would be needed.
- * So, each Locker is associated with a specific partition containing a mapping
- * of resourceId to PartitionedLockHead.
- *
- * As long as all lock requests for a resource have an intent mode, as opposed to a conflicting
- * mode, its LockHead may reference ParitionedLockHeads. A partitioned LockHead will not have
- * any conflicts. The total set of granted requests (with intent mode) is the union of
- * its grantedList and all grantedLists in PartitionedLockHeads.
- *
- * The existence of a PartitionedLockHead for a resource implies that its LockHead is
- * partitioned. If a conflicting request is made on a LockHead, all requests from
- * PartitionedLockHeads are migrated to that LockHead and the LockHead no longer partitioned.
- *
- * Not thread-safe, must be accessed under its partition lock.
- * May not lock a LockManager bucket while holding a partition lock.
- */
- struct PartitionedLockHead {
- void initNew(ResourceId resId) {
- grantedList.reset();
- }
+ // Doubly-linked list of requests, which have not been granted yet because they conflict
+ // with the set of granted modes. Requests are queued at the end of the queue and are
+ // granted from the beginning forward, which gives these locks FIFO ordering. Exceptions
+ // are high-priorty locks, such as the MMAP V1 flush lock.
+ LockRequestList conflictList;
- void newRequest(LockRequest* request, LockMode mode) {
- request->lock = NULL;
- request->partitionedLock = this;
- request->recursiveCount = 1;
- request->status = LockRequest::STATUS_GRANTED;
- request->partitioned = true;
- request->mode = mode;
+ // Counts the conflicting requests for each of the lock modes. These counts should exactly
+ // match the aggregated modes on the conflicts list.
+ uint32_t conflictCounts[LockModesCount];
- grantedList.push_back(request);
- }
+ // Bit-mask of the conflict modes on the conflict queue. Maintained in lock-step with the
+ // conflictCounts array.
+ uint32_t conflictModes;
- //
- // Granted queue
- //
-
- // Doubly-linked list of requests, which have been granted. Newly granted requests go to
- // the end of the queue. The PartitionedLockHead never contains anything but granted
- // requests with intent modes.
- LockRequestList grantedList;
- };
-
- void LockHead::migratePartitionedLockHeads() {
- invariant(partitioned());
- // There can't be non-intent modes or conflicts when the lock is partitioned
- invariant(!(grantedModes & ~intentModes) && !conflictModes);
-
- // Migration time: lock each partition in turn and transfer its requests, if any
- while(partitioned()) {
- LockManager::Partition* partition = partitions.back();
- stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
-
- LockManager::Partition::Map::iterator it = partition->data.find(resourceId);
- if (it != partition->data.end()) {
- PartitionedLockHead* partitionedLock = it->second;
-
- while (!partitionedLock->grantedList.empty()) {
- LockRequest* request = partitionedLock->grantedList._front;
- partitionedLock->grantedList.remove(request);
- // Ordering is important here, as the next/prev fields are shared.
- // Note that newRequest() will preserve the recursiveCount in this case
- LockResult res = newRequest(request, request->mode);
- invariant(res == LOCK_OK); // Lock must still be granted
- }
- partition->data.erase(it);
- delete partitionedLock;
- }
- // Don't pop-back to early as otherwise the lock will be considered not partioned in
- // newRequest().
- partitions.pop_back();
- }
- }
+ // References partitions that may have PartitionedLockHeads for this LockHead.
+ // Non-empty implies the lock has no conflicts and only has intent modes as grantedModes.
+ // TODO: Remove this vector and make LockHead a POD
+ std::vector<LockManager::Partition*> partitions;
//
- // LockManager
+ // Conversion
//
- // Have more buckets than CPUs to reduce contention on lock and caches
- const unsigned LockManager::_numLockBuckets(128);
+ // Counts the number of requests on the granted queue, which have requested any kind of
+ // conflicting conversion and are blocked (i.e. all requests which are currently
+ // STATUS_CONVERTING). This is an optimization for unlocking in that we do not need to
+ // check the granted queue for requests in STATUS_CONVERTING if this count is zero. This
+ // saves cycles in the regular case and only burdens the less-frequent lock upgrade case.
+ uint32_t conversionsCount;
- // Balance scalability of intent locks against potential added cost of conflicting locks.
- // The exact value doesn't appear very important, but should be power of two
- const unsigned LockManager::_numPartitions = 32;
+ // Counts the number of requests on the granted queue, which have requested that the policy
+ // be switched to compatible-first. As long as this value is > 0, the policy will stay
+ // compatible-first.
+ uint32_t compatibleFirstCount;
+};
- LockManager::LockManager() {
- _lockBuckets = new LockBucket[_numLockBuckets];
- _partitions = new Partition[_numPartitions];
+/**
+ * The PartitionedLockHead allows optimizing the case where requests overwhelmingly use
+ * the intent lock modes MODE_IS and MODE_IX, which are compatible with each other.
+ * Having to use a single LockHead causes contention where none would be needed.
+ * So, each Locker is associated with a specific partition containing a mapping
+ * of resourceId to PartitionedLockHead.
+ *
+ * As long as all lock requests for a resource have an intent mode, as opposed to a conflicting
+ * mode, its LockHead may reference ParitionedLockHeads. A partitioned LockHead will not have
+ * any conflicts. The total set of granted requests (with intent mode) is the union of
+ * its grantedList and all grantedLists in PartitionedLockHeads.
+ *
+ * The existence of a PartitionedLockHead for a resource implies that its LockHead is
+ * partitioned. If a conflicting request is made on a LockHead, all requests from
+ * PartitionedLockHeads are migrated to that LockHead and the LockHead no longer partitioned.
+ *
+ * Not thread-safe, must be accessed under its partition lock.
+ * May not lock a LockManager bucket while holding a partition lock.
+ */
+struct PartitionedLockHead {
+ void initNew(ResourceId resId) {
+ grantedList.reset();
}
- LockManager::~LockManager() {
- cleanupUnusedLocks();
-
- for (unsigned i = 0; i < _numLockBuckets; i++) {
- // TODO: dump more information about the non-empty bucket to see what locks were leaked
- invariant(_lockBuckets[i].data.empty());
- }
+ void newRequest(LockRequest* request, LockMode mode) {
+ request->lock = NULL;
+ request->partitionedLock = this;
+ request->recursiveCount = 1;
+ request->status = LockRequest::STATUS_GRANTED;
+ request->partitioned = true;
+ request->mode = mode;
- delete[] _lockBuckets;
- delete[] _partitions;
+ grantedList.push_back(request);
}
- LockResult LockManager::lock(ResourceId resId, LockRequest* request, LockMode mode) {
- // Sanity check that requests are not being reused without proper cleanup
- invariant(request->status == LockRequest::STATUS_NEW);
-
- request->partitioned = (mode == MODE_IX || mode == MODE_IS);
-
- // For intent modes, try the PartitionedLockHead
- if (request->partitioned) {
- Partition* partition = _getPartition(request);
- stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
-
- // Fast path for intent locks
- PartitionedLockHead* partitionedLock = partition->find(resId);
+ //
+ // Granted queue
+ //
- if (partitionedLock) {
- partitionedLock->newRequest(request, mode);
- return LOCK_OK;
+ // Doubly-linked list of requests, which have been granted. Newly granted requests go to
+ // the end of the queue. The PartitionedLockHead never contains anything but granted
+ // requests with intent modes.
+ LockRequestList grantedList;
+};
+
+void LockHead::migratePartitionedLockHeads() {
+ invariant(partitioned());
+ // There can't be non-intent modes or conflicts when the lock is partitioned
+ invariant(!(grantedModes & ~intentModes) && !conflictModes);
+
+ // Migration time: lock each partition in turn and transfer its requests, if any
+ while (partitioned()) {
+ LockManager::Partition* partition = partitions.back();
+ stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
+
+ LockManager::Partition::Map::iterator it = partition->data.find(resourceId);
+ if (it != partition->data.end()) {
+ PartitionedLockHead* partitionedLock = it->second;
+
+ while (!partitionedLock->grantedList.empty()) {
+ LockRequest* request = partitionedLock->grantedList._front;
+ partitionedLock->grantedList.remove(request);
+ // Ordering is important here, as the next/prev fields are shared.
+ // Note that newRequest() will preserve the recursiveCount in this case
+ LockResult res = newRequest(request, request->mode);
+ invariant(res == LOCK_OK); // Lock must still be granted
}
- // Unsuccessful: there was no PartitionedLockHead yet, so use regular LockHead.
- // Must not hold any locks. It is OK for requests with intent modes to be on
- // both a PartitionedLockHead and a regular LockHead, so the race here is benign.
- }
-
- // Use regular LockHead, maybe start partitioning
- LockBucket* bucket = _getBucket(resId);
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
-
- LockHead* lock = bucket->findOrInsert(resId);
-
- // Start a partitioned lock if possible
- if (request->partitioned && !(lock->grantedModes & (~intentModes))
- && !lock->conflictModes) {
- Partition* partition = _getPartition(request);
- stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
- PartitionedLockHead* partitionedLock = partition->findOrInsert(resId);
- invariant(partitionedLock);
- lock->partitions.push_back(partition);
- partitionedLock->newRequest(request, mode);
- return LOCK_OK;
+ partition->data.erase(it);
+ delete partitionedLock;
}
-
- // For the first lock with a non-intent mode, migrate requests from partitioned lock heads
- if (lock->partitioned()) {
- lock->migratePartitionedLockHeads();
- }
-
- request->partitioned = false;
- return lock->newRequest(request, mode);
+ // Don't pop-back to early as otherwise the lock will be considered not partioned in
+ // newRequest().
+ partitions.pop_back();
}
+}
- LockResult LockManager::convert(ResourceId resId, LockRequest* request, LockMode newMode) {
- // If we are here, we already hold the lock in some mode. In order to keep it simple, we do
- // not allow requesting a conversion while a lock is already waiting or pending conversion.
- invariant(request->status == LockRequest::STATUS_GRANTED);
- invariant(request->recursiveCount > 0);
+//
+// LockManager
+//
- request->recursiveCount++;
+// Have more buckets than CPUs to reduce contention on lock and caches
+const unsigned LockManager::_numLockBuckets(128);
- // Fast path for acquiring the same lock multiple times in modes, which are already covered
- // by the current mode. It is safe to do this without locking, because 1) all calls for the
- // same lock request must be done on the same thread and 2) if there are lock requests
- // hanging off a given LockHead, then this lock will never disappear.
- if ((LockConflictsTable[request->mode] | LockConflictsTable[newMode]) ==
- LockConflictsTable[request->mode]) {
- return LOCK_OK;
- }
+// Balance scalability of intent locks against potential added cost of conflicting locks.
+// The exact value doesn't appear very important, but should be power of two
+const unsigned LockManager::_numPartitions = 32;
- // TODO: For the time being we do not need conversions between unrelated lock modes (i.e.,
- // modes which both add and remove to the conflicts set), so these are not implemented yet
- // (e.g., S -> IX).
- invariant((LockConflictsTable[request->mode] | LockConflictsTable[newMode]) ==
- LockConflictsTable[newMode]);
+LockManager::LockManager() {
+ _lockBuckets = new LockBucket[_numLockBuckets];
+ _partitions = new Partition[_numPartitions];
+}
- LockBucket* bucket = _getBucket(resId);
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
+LockManager::~LockManager() {
+ cleanupUnusedLocks();
- LockBucket::Map::iterator it = bucket->data.find(resId);
- invariant(it != bucket->data.end());
+ for (unsigned i = 0; i < _numLockBuckets; i++) {
+ // TODO: dump more information about the non-empty bucket to see what locks were leaked
+ invariant(_lockBuckets[i].data.empty());
+ }
- LockHead* const lock = it->second;
+ delete[] _lockBuckets;
+ delete[] _partitions;
+}
- if (lock->partitioned()) {
- lock->migratePartitionedLockHeads();
- }
+LockResult LockManager::lock(ResourceId resId, LockRequest* request, LockMode mode) {
+ // Sanity check that requests are not being reused without proper cleanup
+ invariant(request->status == LockRequest::STATUS_NEW);
- // Construct granted mask without our current mode, so that it is not counted as
- // conflicting
- uint32_t grantedModesWithoutCurrentRequest = 0;
+ request->partitioned = (mode == MODE_IX || mode == MODE_IS);
- // We start the counting at 1 below, because LockModesCount also includes MODE_NONE
- // at position 0, which can never be acquired/granted.
- for (uint32_t i = 1; i < LockModesCount; i++) {
- const uint32_t currentRequestHolds =
- (request->mode == static_cast<LockMode>(i) ? 1 : 0);
+ // For intent modes, try the PartitionedLockHead
+ if (request->partitioned) {
+ Partition* partition = _getPartition(request);
+ stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
- if (lock->grantedCounts[i] > currentRequestHolds) {
- grantedModesWithoutCurrentRequest |= modeMask(static_cast<LockMode>(i));
- }
- }
-
- // This check favours conversion requests over pending requests. For example:
- //
- // T1 requests lock L in IS
- // T2 requests lock L in X
- // T1 then upgrades L from IS -> S
- //
- // Because the check does not look into the conflict modes bitmap, it will grant L to
- // T1 in S mode, instead of block, which would otherwise cause deadlock.
- if (conflicts(newMode, grantedModesWithoutCurrentRequest)) {
- request->status = LockRequest::STATUS_CONVERTING;
- invariant(request->recursiveCount > 1);
- request->convertMode = newMode;
-
- lock->conversionsCount++;
- lock->incGrantedModeCount(request->convertMode);
-
- return LOCK_WAITING;
- }
- else { // No conflict, existing request
- lock->incGrantedModeCount(newMode);
- lock->decGrantedModeCount(request->mode);
- request->mode = newMode;
+ // Fast path for intent locks
+ PartitionedLockHead* partitionedLock = partition->find(resId);
+ if (partitionedLock) {
+ partitionedLock->newRequest(request, mode);
return LOCK_OK;
}
+ // Unsuccessful: there was no PartitionedLockHead yet, so use regular LockHead.
+ // Must not hold any locks. It is OK for requests with intent modes to be on
+ // both a PartitionedLockHead and a regular LockHead, so the race here is benign.
}
- bool LockManager::unlock(LockRequest* request) {
- // Fast path for decrementing multiple references of the same lock. It is safe to do this
- // without locking, because 1) all calls for the same lock request must be done on the same
- // thread and 2) if there are lock requests hanging of a given LockHead, then this lock
- // will never disappear.
- invariant(request->recursiveCount > 0);
- request->recursiveCount--;
- if ((request->status == LockRequest::STATUS_GRANTED) && (request->recursiveCount > 0)) {
- return false;
- }
-
- if (request->partitioned) {
- // Unlocking a lock that was acquired as partitioned. The lock request may since have
- // moved to the lock head, but there is no safe way to find out without synchronizing
- // thorough the partition mutex. Migrations are expected to be rare.
- invariant(request->status == LockRequest::STATUS_GRANTED
- || request->status == LockRequest::STATUS_CONVERTING);
- Partition* partition = _getPartition(request);
- stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
- // Fast path: still partitioned.
- if (request->partitionedLock) {
- request->partitionedLock->grantedList.remove(request);
- return true;
- }
-
- // not partitioned anymore, fall through to regular case
- }
- invariant(request->lock);
+ // Use regular LockHead, maybe start partitioning
+ LockBucket* bucket = _getBucket(resId);
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
+
+ LockHead* lock = bucket->findOrInsert(resId);
+
+ // Start a partitioned lock if possible
+ if (request->partitioned && !(lock->grantedModes & (~intentModes)) && !lock->conflictModes) {
+ Partition* partition = _getPartition(request);
+ stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
+ PartitionedLockHead* partitionedLock = partition->findOrInsert(resId);
+ invariant(partitionedLock);
+ lock->partitions.push_back(partition);
+ partitionedLock->newRequest(request, mode);
+ return LOCK_OK;
+ }
- LockHead* lock = request->lock;
- LockBucket* bucket = _getBucket(lock->resourceId);
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
+ // For the first lock with a non-intent mode, migrate requests from partitioned lock heads
+ if (lock->partitioned()) {
+ lock->migratePartitionedLockHeads();
+ }
- if (request->status == LockRequest::STATUS_GRANTED) {
- // This releases a currently held lock and is the most common path, so it should be
- // as efficient as possible. The fast path for decrementing multiple references did
- // already ensure request->recursiveCount == 0.
+ request->partitioned = false;
+ return lock->newRequest(request, mode);
+}
+
+LockResult LockManager::convert(ResourceId resId, LockRequest* request, LockMode newMode) {
+ // If we are here, we already hold the lock in some mode. In order to keep it simple, we do
+ // not allow requesting a conversion while a lock is already waiting or pending conversion.
+ invariant(request->status == LockRequest::STATUS_GRANTED);
+ invariant(request->recursiveCount > 0);
+
+ request->recursiveCount++;
+
+ // Fast path for acquiring the same lock multiple times in modes, which are already covered
+ // by the current mode. It is safe to do this without locking, because 1) all calls for the
+ // same lock request must be done on the same thread and 2) if there are lock requests
+ // hanging off a given LockHead, then this lock will never disappear.
+ if ((LockConflictsTable[request->mode] | LockConflictsTable[newMode]) ==
+ LockConflictsTable[request->mode]) {
+ return LOCK_OK;
+ }
- // Remove from the granted list
- lock->grantedList.remove(request);
- lock->decGrantedModeCount(request->mode);
+ // TODO: For the time being we do not need conversions between unrelated lock modes (i.e.,
+ // modes which both add and remove to the conflicts set), so these are not implemented yet
+ // (e.g., S -> IX).
+ invariant((LockConflictsTable[request->mode] | LockConflictsTable[newMode]) ==
+ LockConflictsTable[newMode]);
- if (request->compatibleFirst) {
- lock->compatibleFirstCount--;
- }
+ LockBucket* bucket = _getBucket(resId);
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
- _onLockModeChanged(lock, lock->grantedCounts[request->mode] == 0);
- }
- else if (request->status == LockRequest::STATUS_WAITING) {
- // This cancels a pending lock request
- invariant(request->recursiveCount == 0);
+ LockBucket::Map::iterator it = bucket->data.find(resId);
+ invariant(it != bucket->data.end());
- lock->conflictList.remove(request);
- lock->decConflictModeCount(request->mode);
- }
- else if (request->status == LockRequest::STATUS_CONVERTING) {
- // This cancels a pending convert request
- invariant(request->recursiveCount > 0);
+ LockHead* const lock = it->second;
- // Lock only goes from GRANTED to CONVERTING, so cancelling the conversion request
- // brings it back to the previous granted mode.
- request->status = LockRequest::STATUS_GRANTED;
+ if (lock->partitioned()) {
+ lock->migratePartitionedLockHeads();
+ }
- lock->conversionsCount--;
- lock->decGrantedModeCount(request->convertMode);
+ // Construct granted mask without our current mode, so that it is not counted as
+ // conflicting
+ uint32_t grantedModesWithoutCurrentRequest = 0;
- request->convertMode = MODE_NONE;
+ // We start the counting at 1 below, because LockModesCount also includes MODE_NONE
+ // at position 0, which can never be acquired/granted.
+ for (uint32_t i = 1; i < LockModesCount; i++) {
+ const uint32_t currentRequestHolds = (request->mode == static_cast<LockMode>(i) ? 1 : 0);
- _onLockModeChanged(lock, lock->grantedCounts[request->convertMode] == 0);
- }
- else {
- // Invalid request status
- invariant(false);
+ if (lock->grantedCounts[i] > currentRequestHolds) {
+ grantedModesWithoutCurrentRequest |= modeMask(static_cast<LockMode>(i));
}
-
- return (request->recursiveCount == 0);
}
- void LockManager::downgrade(LockRequest* request, LockMode newMode) {
- invariant(request->lock);
- invariant(request->status == LockRequest::STATUS_GRANTED);
- invariant(request->recursiveCount > 0);
-
- // The conflict set of the newMode should be a subset of the conflict set of the old mode.
- // Can't downgrade from S -> IX for example.
- invariant((LockConflictsTable[request->mode] | LockConflictsTable[newMode])
- == LockConflictsTable[request->mode]);
-
- LockHead* lock = request->lock;
-
- LockBucket* bucket = _getBucket(lock->resourceId);
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
-
+ // This check favours conversion requests over pending requests. For example:
+ //
+ // T1 requests lock L in IS
+ // T2 requests lock L in X
+ // T1 then upgrades L from IS -> S
+ //
+ // Because the check does not look into the conflict modes bitmap, it will grant L to
+ // T1 in S mode, instead of block, which would otherwise cause deadlock.
+ if (conflicts(newMode, grantedModesWithoutCurrentRequest)) {
+ request->status = LockRequest::STATUS_CONVERTING;
+ invariant(request->recursiveCount > 1);
+ request->convertMode = newMode;
+
+ lock->conversionsCount++;
+ lock->incGrantedModeCount(request->convertMode);
+
+ return LOCK_WAITING;
+ } else { // No conflict, existing request
lock->incGrantedModeCount(newMode);
lock->decGrantedModeCount(request->mode);
request->mode = newMode;
- _onLockModeChanged(lock, true);
+ return LOCK_OK;
+ }
+}
+
+bool LockManager::unlock(LockRequest* request) {
+ // Fast path for decrementing multiple references of the same lock. It is safe to do this
+ // without locking, because 1) all calls for the same lock request must be done on the same
+ // thread and 2) if there are lock requests hanging of a given LockHead, then this lock
+ // will never disappear.
+ invariant(request->recursiveCount > 0);
+ request->recursiveCount--;
+ if ((request->status == LockRequest::STATUS_GRANTED) && (request->recursiveCount > 0)) {
+ return false;
}
- void LockManager::cleanupUnusedLocks() {
- size_t deletedLockHeads = 0;
- for (unsigned i = 0; i < _numLockBuckets; i++) {
- LockBucket* bucket = &_lockBuckets[i];
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
-
- LockBucket::Map::iterator it = bucket->data.begin();
- while (it != bucket->data.end()) {
- LockHead* lock = it->second;
- if (lock->partitioned()) {
- lock->migratePartitionedLockHeads();
- }
- if (lock->grantedModes == 0) {
- invariant(lock->grantedModes == 0);
- invariant(lock->grantedList._front == NULL);
- invariant(lock->grantedList._back == NULL);
- invariant(lock->conflictModes == 0);
- invariant(lock->conflictList._front == NULL);
- invariant(lock->conflictList._back == NULL);
- invariant(lock->conversionsCount == 0);
- invariant(lock->compatibleFirstCount == 0);
-
- bucket->data.erase(it++);
- deletedLockHeads++;
- delete lock;
- }
- else {
- it++;
- }
- }
+ if (request->partitioned) {
+ // Unlocking a lock that was acquired as partitioned. The lock request may since have
+ // moved to the lock head, but there is no safe way to find out without synchronizing
+ // thorough the partition mutex. Migrations are expected to be rare.
+ invariant(request->status == LockRequest::STATUS_GRANTED ||
+ request->status == LockRequest::STATUS_CONVERTING);
+ Partition* partition = _getPartition(request);
+ stdx::lock_guard<SimpleMutex> scopedLock(partition->mutex);
+ // Fast path: still partitioned.
+ if (request->partitionedLock) {
+ request->partitionedLock->grantedList.remove(request);
+ return true;
}
+
+ // not partitioned anymore, fall through to regular case
}
+ invariant(request->lock);
- void LockManager::_onLockModeChanged(LockHead* lock, bool checkConflictQueue) {
- // Unblock any converting requests (because conversions are still counted as granted and
- // are on the granted queue).
- for (LockRequest* iter = lock->grantedList._front;
- (iter != NULL) && (lock->conversionsCount > 0);
- iter = iter->next) {
+ LockHead* lock = request->lock;
+ LockBucket* bucket = _getBucket(lock->resourceId);
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
- // Conversion requests are going in a separate queue
- if (iter->status == LockRequest::STATUS_CONVERTING) {
- invariant(iter->convertMode != 0);
+ if (request->status == LockRequest::STATUS_GRANTED) {
+ // This releases a currently held lock and is the most common path, so it should be
+ // as efficient as possible. The fast path for decrementing multiple references did
+ // already ensure request->recursiveCount == 0.
- // Construct granted mask without our current mode, so that it is not accounted as
- // a conflict
- uint32_t grantedModesWithoutCurrentRequest = 0;
+ // Remove from the granted list
+ lock->grantedList.remove(request);
+ lock->decGrantedModeCount(request->mode);
- // We start the counting at 1 below, because LockModesCount also includes
- // MODE_NONE at position 0, which can never be acquired/granted.
- for (uint32_t i = 1; i < LockModesCount; i++) {
- const uint32_t currentRequestHolds =
- (iter->mode == static_cast<LockMode>(i) ? 1 : 0);
+ if (request->compatibleFirst) {
+ lock->compatibleFirstCount--;
+ }
- const uint32_t currentRequestWaits =
- (iter->convertMode == static_cast<LockMode>(i) ? 1 : 0);
+ _onLockModeChanged(lock, lock->grantedCounts[request->mode] == 0);
+ } else if (request->status == LockRequest::STATUS_WAITING) {
+ // This cancels a pending lock request
+ invariant(request->recursiveCount == 0);
- // We cannot both hold and wait on the same lock mode
- invariant(currentRequestHolds + currentRequestWaits <= 1);
+ lock->conflictList.remove(request);
+ lock->decConflictModeCount(request->mode);
+ } else if (request->status == LockRequest::STATUS_CONVERTING) {
+ // This cancels a pending convert request
+ invariant(request->recursiveCount > 0);
- if (lock->grantedCounts[i] > (currentRequestHolds + currentRequestWaits)) {
- grantedModesWithoutCurrentRequest |= modeMask(static_cast<LockMode>(i));
- }
- }
+ // Lock only goes from GRANTED to CONVERTING, so cancelling the conversion request
+ // brings it back to the previous granted mode.
+ request->status = LockRequest::STATUS_GRANTED;
- if (!conflicts(iter->convertMode, grantedModesWithoutCurrentRequest)) {
- lock->conversionsCount--;
- lock->decGrantedModeCount(iter->mode);
- iter->status = LockRequest::STATUS_GRANTED;
- iter->mode = iter->convertMode;
- iter->convertMode = MODE_NONE;
+ lock->conversionsCount--;
+ lock->decGrantedModeCount(request->convertMode);
- iter->notify->notify(lock->resourceId, LOCK_OK);
- }
- }
- }
+ request->convertMode = MODE_NONE;
- // Grant any conflicting requests, which might now be unblocked. Note that the loop below
- // slightly violates fairness in that it will grant *all* compatible requests on the line
- // even though there might be conflicting ones interspersed between them. For example,
- // consider an X lock was just freed and the conflict queue looked like this:
- //
- // IS -> IS -> X -> X -> S -> IS
- //
- // In strict FIFO, we should grant the first two IS modes and then stop when we reach the
- // first X mode (the third request on the queue). However, the loop below would actually
- // grant all IS + S modes and once they all drain it will grant X.
+ _onLockModeChanged(lock, lock->grantedCounts[request->convertMode] == 0);
+ } else {
+ // Invalid request status
+ invariant(false);
+ }
- LockRequest* iterNext = NULL;
+ return (request->recursiveCount == 0);
+}
- for (LockRequest* iter = lock->conflictList._front;
- (iter != NULL) && checkConflictQueue;
- iter = iterNext) {
+void LockManager::downgrade(LockRequest* request, LockMode newMode) {
+ invariant(request->lock);
+ invariant(request->status == LockRequest::STATUS_GRANTED);
+ invariant(request->recursiveCount > 0);
- invariant(iter->status == LockRequest::STATUS_WAITING);
+ // The conflict set of the newMode should be a subset of the conflict set of the old mode.
+ // Can't downgrade from S -> IX for example.
+ invariant((LockConflictsTable[request->mode] | LockConflictsTable[newMode]) ==
+ LockConflictsTable[request->mode]);
- // Store the actual next pointer, because we muck with the iter below and move it to
- // the granted queue.
- iterNext = iter->next;
+ LockHead* lock = request->lock;
- if (conflicts(iter->mode, lock->grantedModes)) {
- continue;
- }
+ LockBucket* bucket = _getBucket(lock->resourceId);
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
- iter->status = LockRequest::STATUS_GRANTED;
+ lock->incGrantedModeCount(newMode);
+ lock->decGrantedModeCount(request->mode);
+ request->mode = newMode;
- lock->conflictList.remove(iter);
- lock->grantedList.push_back(iter);
+ _onLockModeChanged(lock, true);
+}
- lock->incGrantedModeCount(iter->mode);
- lock->decConflictModeCount(iter->mode);
+void LockManager::cleanupUnusedLocks() {
+ size_t deletedLockHeads = 0;
+ for (unsigned i = 0; i < _numLockBuckets; i++) {
+ LockBucket* bucket = &_lockBuckets[i];
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
- if (iter->compatibleFirst) {
- lock->compatibleFirstCount++;
+ LockBucket::Map::iterator it = bucket->data.begin();
+ while (it != bucket->data.end()) {
+ LockHead* lock = it->second;
+ if (lock->partitioned()) {
+ lock->migratePartitionedLockHeads();
}
-
- iter->notify->notify(lock->resourceId, LOCK_OK);
-
- // Small optimization - nothing is compatible with MODE_X, so no point in looking
- // further in the conflict queue.
- if (iter->mode == MODE_X) {
- break;
+ if (lock->grantedModes == 0) {
+ invariant(lock->grantedModes == 0);
+ invariant(lock->grantedList._front == NULL);
+ invariant(lock->grantedList._back == NULL);
+ invariant(lock->conflictModes == 0);
+ invariant(lock->conflictList._front == NULL);
+ invariant(lock->conflictList._back == NULL);
+ invariant(lock->conversionsCount == 0);
+ invariant(lock->compatibleFirstCount == 0);
+
+ bucket->data.erase(it++);
+ deletedLockHeads++;
+ delete lock;
+ } else {
+ it++;
}
}
-
- // This is a convenient place to check that the state of the two request queues is in sync
- // with the bitmask on the modes.
- invariant((lock->grantedModes == 0) ^ (lock->grantedList._front != NULL));
- invariant((lock->conflictModes == 0) ^ (lock->conflictList._front != NULL));
}
+}
+
+void LockManager::_onLockModeChanged(LockHead* lock, bool checkConflictQueue) {
+ // Unblock any converting requests (because conversions are still counted as granted and
+ // are on the granted queue).
+ for (LockRequest* iter = lock->grantedList._front;
+ (iter != NULL) && (lock->conversionsCount > 0);
+ iter = iter->next) {
+ // Conversion requests are going in a separate queue
+ if (iter->status == LockRequest::STATUS_CONVERTING) {
+ invariant(iter->convertMode != 0);
+
+ // Construct granted mask without our current mode, so that it is not accounted as
+ // a conflict
+ uint32_t grantedModesWithoutCurrentRequest = 0;
+
+ // We start the counting at 1 below, because LockModesCount also includes
+ // MODE_NONE at position 0, which can never be acquired/granted.
+ for (uint32_t i = 1; i < LockModesCount; i++) {
+ const uint32_t currentRequestHolds =
+ (iter->mode == static_cast<LockMode>(i) ? 1 : 0);
+
+ const uint32_t currentRequestWaits =
+ (iter->convertMode == static_cast<LockMode>(i) ? 1 : 0);
+
+ // We cannot both hold and wait on the same lock mode
+ invariant(currentRequestHolds + currentRequestWaits <= 1);
+
+ if (lock->grantedCounts[i] > (currentRequestHolds + currentRequestWaits)) {
+ grantedModesWithoutCurrentRequest |= modeMask(static_cast<LockMode>(i));
+ }
+ }
- LockManager::LockBucket* LockManager::_getBucket(ResourceId resId) const {
- return &_lockBuckets[resId % _numLockBuckets];
- }
-
- LockManager::Partition* LockManager::_getPartition(LockRequest* request) const {
- return &_partitions[request->locker->getId() % _numPartitions];
- }
-
- void LockManager::dump() const {
- log() << "Dumping LockManager @ " << static_cast<const void*>(this) << '\n';
-
- for (unsigned i = 0; i < _numLockBuckets; i++) {
- LockBucket* bucket = &_lockBuckets[i];
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
+ if (!conflicts(iter->convertMode, grantedModesWithoutCurrentRequest)) {
+ lock->conversionsCount--;
+ lock->decGrantedModeCount(iter->mode);
+ iter->status = LockRequest::STATUS_GRANTED;
+ iter->mode = iter->convertMode;
+ iter->convertMode = MODE_NONE;
- if (!bucket->data.empty()) {
- _dumpBucket(bucket);
+ iter->notify->notify(lock->resourceId, LOCK_OK);
}
}
}
- void LockManager::_dumpBucket(const LockBucket* bucket) const {
- for (LockBucket::Map::const_iterator it = bucket->data.begin();
- it != bucket->data.end();
- it++) {
+ // Grant any conflicting requests, which might now be unblocked. Note that the loop below
+ // slightly violates fairness in that it will grant *all* compatible requests on the line
+ // even though there might be conflicting ones interspersed between them. For example,
+ // consider an X lock was just freed and the conflict queue looked like this:
+ //
+ // IS -> IS -> X -> X -> S -> IS
+ //
+ // In strict FIFO, we should grant the first two IS modes and then stop when we reach the
+ // first X mode (the third request on the queue). However, the loop below would actually
+ // grant all IS + S modes and once they all drain it will grant X.
- const LockHead* lock = it->second;
+ LockRequest* iterNext = NULL;
- if (lock->grantedList.empty()) {
- // If there are no granted requests, this lock is empty, so no need to print it
- continue;
- }
+ for (LockRequest* iter = lock->conflictList._front; (iter != NULL) && checkConflictQueue;
+ iter = iterNext) {
+ invariant(iter->status == LockRequest::STATUS_WAITING);
- StringBuilder sb;
- sb << "Lock @ " << lock << ": " << lock->resourceId.toString() << '\n';
-
- sb << "GRANTED:\n";
- for (const LockRequest* iter = lock->grantedList._front;
- iter != NULL;
- iter = iter->next) {
-
- sb << '\t'
- << "LockRequest " << iter->locker->getId() << " @ " << iter->locker << ": "
- << "Mode = " << modeName(iter->mode) << "; "
- << "ConvertMode = " << modeName(iter->convertMode) << "; "
- << "EnqueueAtFront = " << iter->enqueueAtFront << "; "
- << "CompatibleFirst = " << iter->compatibleFirst << "; "
- << '\n';
- }
+ // Store the actual next pointer, because we muck with the iter below and move it to
+ // the granted queue.
+ iterNext = iter->next;
- sb << '\n';
+ if (conflicts(iter->mode, lock->grantedModes)) {
+ continue;
+ }
- sb << "PENDING:\n";
- for (const LockRequest* iter = lock->conflictList._front;
- iter != NULL;
- iter = iter->next) {
+ iter->status = LockRequest::STATUS_GRANTED;
- sb << '\t'
- << "LockRequest " << iter->locker->getId() << " @ " << iter->locker << ": "
- << "Mode = " << modeName(iter->mode) << "; "
- << "ConvertMode = " << modeName(iter->convertMode) << "; "
- << "EnqueueAtFront = " << iter->enqueueAtFront << "; "
- << "CompatibleFirst = " << iter->compatibleFirst << "; "
- << '\n';
- }
+ lock->conflictList.remove(iter);
+ lock->grantedList.push_back(iter);
- log() << sb.str();
- }
- }
+ lock->incGrantedModeCount(iter->mode);
+ lock->decConflictModeCount(iter->mode);
- PartitionedLockHead* LockManager::Partition::find(ResourceId resId) {
- Map::iterator it = data.find(resId);
- return it == data.end() ? NULL : it->second;
- }
+ if (iter->compatibleFirst) {
+ lock->compatibleFirstCount++;
+ }
- PartitionedLockHead* LockManager::Partition::findOrInsert(ResourceId resId) {
- PartitionedLockHead* lock;
- Map::iterator it = data.find(resId);
- if (it == data.end()) {
- lock = new PartitionedLockHead();
- lock->initNew(resId);
+ iter->notify->notify(lock->resourceId, LOCK_OK);
- data.insert(Map::value_type(resId, lock));
+ // Small optimization - nothing is compatible with MODE_X, so no point in looking
+ // further in the conflict queue.
+ if (iter->mode == MODE_X) {
+ break;
}
- else {
- lock = it->second;
- }
- return lock;
}
- LockHead* LockManager::LockBucket::findOrInsert(ResourceId resId) {
- LockHead* lock;
- Map::iterator it = data.find(resId);
- if (it == data.end()) {
- lock = new LockHead();
- lock->initNew(resId);
+ // This is a convenient place to check that the state of the two request queues is in sync
+ // with the bitmask on the modes.
+ invariant((lock->grantedModes == 0) ^ (lock->grantedList._front != NULL));
+ invariant((lock->conflictModes == 0) ^ (lock->conflictList._front != NULL));
+}
- data.insert(Map::value_type(resId, lock));
- }
- else {
- lock = it->second;
- }
- return lock;
- }
+LockManager::LockBucket* LockManager::_getBucket(ResourceId resId) const {
+ return &_lockBuckets[resId % _numLockBuckets];
+}
- //
- // DeadlockDetector
- //
+LockManager::Partition* LockManager::_getPartition(LockRequest* request) const {
+ return &_partitions[request->locker->getId() % _numPartitions];
+}
- DeadlockDetector::DeadlockDetector(const LockManager& lockMgr, const Locker* initialLocker)
- : _lockMgr(lockMgr),
- _initialLockerId(initialLocker->getId()),
- _foundCycle(false) {
+void LockManager::dump() const {
+ log() << "Dumping LockManager @ " << static_cast<const void*>(this) << '\n';
- const ResourceId resId = initialLocker->getWaitingResource();
+ for (unsigned i = 0; i < _numLockBuckets; i++) {
+ LockBucket* bucket = &_lockBuckets[i];
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
- // If there is no resource waiting there is nothing to do
- if (resId.isValid()) {
- _queue.push_front(UnprocessedNode(_initialLockerId, resId));
+ if (!bucket->data.empty()) {
+ _dumpBucket(bucket);
}
}
+}
- bool DeadlockDetector::next() {
- if (_queue.empty()) return false;
+void LockManager::_dumpBucket(const LockBucket* bucket) const {
+ for (LockBucket::Map::const_iterator it = bucket->data.begin(); it != bucket->data.end();
+ it++) {
+ const LockHead* lock = it->second;
- UnprocessedNode front = _queue.front();
- _queue.pop_front();
+ if (lock->grantedList.empty()) {
+ // If there are no granted requests, this lock is empty, so no need to print it
+ continue;
+ }
- _processNextNode(front);
+ StringBuilder sb;
+ sb << "Lock @ " << lock << ": " << lock->resourceId.toString() << '\n';
+
+ sb << "GRANTED:\n";
+ for (const LockRequest* iter = lock->grantedList._front; iter != NULL; iter = iter->next) {
+ sb << '\t' << "LockRequest " << iter->locker->getId() << " @ " << iter->locker << ": "
+ << "Mode = " << modeName(iter->mode) << "; "
+ << "ConvertMode = " << modeName(iter->convertMode) << "; "
+ << "EnqueueAtFront = " << iter->enqueueAtFront << "; "
+ << "CompatibleFirst = " << iter->compatibleFirst << "; " << '\n';
+ }
- return !_queue.empty();
- }
+ sb << '\n';
- bool DeadlockDetector::hasCycle() const {
- invariant(_queue.empty());
+ sb << "PENDING:\n";
+ for (const LockRequest* iter = lock->conflictList._front; iter != NULL; iter = iter->next) {
+ sb << '\t' << "LockRequest " << iter->locker->getId() << " @ " << iter->locker << ": "
+ << "Mode = " << modeName(iter->mode) << "; "
+ << "ConvertMode = " << modeName(iter->convertMode) << "; "
+ << "EnqueueAtFront = " << iter->enqueueAtFront << "; "
+ << "CompatibleFirst = " << iter->compatibleFirst << "; " << '\n';
+ }
- return _foundCycle;
+ log() << sb.str();
+ }
+}
+
+PartitionedLockHead* LockManager::Partition::find(ResourceId resId) {
+ Map::iterator it = data.find(resId);
+ return it == data.end() ? NULL : it->second;
+}
+
+PartitionedLockHead* LockManager::Partition::findOrInsert(ResourceId resId) {
+ PartitionedLockHead* lock;
+ Map::iterator it = data.find(resId);
+ if (it == data.end()) {
+ lock = new PartitionedLockHead();
+ lock->initNew(resId);
+
+ data.insert(Map::value_type(resId, lock));
+ } else {
+ lock = it->second;
}
+ return lock;
+}
+
+LockHead* LockManager::LockBucket::findOrInsert(ResourceId resId) {
+ LockHead* lock;
+ Map::iterator it = data.find(resId);
+ if (it == data.end()) {
+ lock = new LockHead();
+ lock->initNew(resId);
+
+ data.insert(Map::value_type(resId, lock));
+ } else {
+ lock = it->second;
+ }
+ return lock;
+}
- string DeadlockDetector::toString() const {
- StringBuilder sb;
+//
+// DeadlockDetector
+//
- for (WaitForGraph::const_iterator it = _graph.begin(); it != _graph.end(); it++) {
- sb << "Locker " << it->first << " waits for resource " << it->second.resId.toString()
- << " held by [";
+DeadlockDetector::DeadlockDetector(const LockManager& lockMgr, const Locker* initialLocker)
+ : _lockMgr(lockMgr), _initialLockerId(initialLocker->getId()), _foundCycle(false) {
+ const ResourceId resId = initialLocker->getWaitingResource();
- const ConflictingOwnersList owners = it->second.owners;
- for (ConflictingOwnersList::const_iterator itW = owners.begin();
- itW != owners.end();
- itW++) {
+ // If there is no resource waiting there is nothing to do
+ if (resId.isValid()) {
+ _queue.push_front(UnprocessedNode(_initialLockerId, resId));
+ }
+}
- sb << *itW << ", ";
- }
+bool DeadlockDetector::next() {
+ if (_queue.empty())
+ return false;
- sb << "]\n";
- }
+ UnprocessedNode front = _queue.front();
+ _queue.pop_front();
- return sb.str();
- }
+ _processNextNode(front);
- void DeadlockDetector::_processNextNode(const UnprocessedNode& node) {
- // Locate the request
- LockManager::LockBucket* bucket = _lockMgr._getBucket(node.resId);
- stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
+ return !_queue.empty();
+}
- LockManager::LockBucket::Map::const_iterator iter = bucket->data.find(node.resId);
- if (iter == bucket->data.end()) {
- return;
- }
+bool DeadlockDetector::hasCycle() const {
+ invariant(_queue.empty());
+
+ return _foundCycle;
+}
- const LockHead* lock = iter->second;
+string DeadlockDetector::toString() const {
+ StringBuilder sb;
- LockRequest* request = lock->findRequest(node.lockerId);
+ for (WaitForGraph::const_iterator it = _graph.begin(); it != _graph.end(); it++) {
+ sb << "Locker " << it->first << " waits for resource " << it->second.resId.toString()
+ << " held by [";
- // It is possible that a request which was thought to be waiting suddenly became
- // granted, so check that before proceeding
- if (!request || (request->status == LockRequest::STATUS_GRANTED)) {
- return;
+ const ConflictingOwnersList owners = it->second.owners;
+ for (ConflictingOwnersList::const_iterator itW = owners.begin(); itW != owners.end();
+ itW++) {
+ sb << *itW << ", ";
}
- std::pair<WaitForGraph::iterator, bool> val =
- _graph.insert(WaitForGraphPair(node.lockerId, Edges(node.resId)));
- if (!val.second) {
- // We already saw this locker id, which means we have a cycle.
- if (!_foundCycle) {
- _foundCycle = (node.lockerId == _initialLockerId);
- }
+ sb << "]\n";
+ }
- return;
- }
+ return sb.str();
+}
- Edges& edges = val.first->second;
+void DeadlockDetector::_processNextNode(const UnprocessedNode& node) {
+ // Locate the request
+ LockManager::LockBucket* bucket = _lockMgr._getBucket(node.resId);
+ stdx::lock_guard<SimpleMutex> scopedLock(bucket->mutex);
- bool seen = false;
- for (LockRequest* it = lock->grantedList._back; it != NULL; it = it->prev) {
- // We can't conflict with ourselves
- if (it == request) {
- seen = true;
- continue;
- }
+ LockManager::LockBucket::Map::const_iterator iter = bucket->data.find(node.resId);
+ if (iter == bucket->data.end()) {
+ return;
+ }
- // If we are a regular conflicting request, both granted and conversion modes need to
- // be checked for conflict, since conversions will be granted first.
- if (request->status == LockRequest::STATUS_WAITING) {
- if (conflicts(request->mode, modeMask(it->mode)) ||
- conflicts(request->mode, modeMask(it->convertMode))) {
+ const LockHead* lock = iter->second;
- const LockerId lockerId = it->locker->getId();
- const ResourceId waitResId = it->locker->getWaitingResource();
+ LockRequest* request = lock->findRequest(node.lockerId);
- if (waitResId.isValid()) {
- _queue.push_front(UnprocessedNode(lockerId, waitResId));
- edges.owners.push_back(lockerId);
- }
- }
+ // It is possible that a request which was thought to be waiting suddenly became
+ // granted, so check that before proceeding
+ if (!request || (request->status == LockRequest::STATUS_GRANTED)) {
+ return;
+ }
- continue;
- }
+ std::pair<WaitForGraph::iterator, bool> val =
+ _graph.insert(WaitForGraphPair(node.lockerId, Edges(node.resId)));
+ if (!val.second) {
+ // We already saw this locker id, which means we have a cycle.
+ if (!_foundCycle) {
+ _foundCycle = (node.lockerId == _initialLockerId);
+ }
- // If we are a conversion request, only requests, which are before us need to be
- // accounted for.
- invariant(request->status == LockRequest::STATUS_CONVERTING);
+ return;
+ }
- if (conflicts(request->convertMode, modeMask(it->mode)) ||
- (seen && conflicts(request->convertMode, modeMask(it->convertMode)))) {
+ Edges& edges = val.first->second;
+ bool seen = false;
+ for (LockRequest* it = lock->grantedList._back; it != NULL; it = it->prev) {
+ // We can't conflict with ourselves
+ if (it == request) {
+ seen = true;
+ continue;
+ }
+
+ // If we are a regular conflicting request, both granted and conversion modes need to
+ // be checked for conflict, since conversions will be granted first.
+ if (request->status == LockRequest::STATUS_WAITING) {
+ if (conflicts(request->mode, modeMask(it->mode)) ||
+ conflicts(request->mode, modeMask(it->convertMode))) {
const LockerId lockerId = it->locker->getId();
const ResourceId waitResId = it->locker->getWaitingResource();
@@ -1042,117 +978,132 @@ namespace {
edges.owners.push_back(lockerId);
}
}
+
+ continue;
}
- // All conflicting waits, which would be granted before us
- for (LockRequest* it = request->prev;
- (request->status == LockRequest::STATUS_WAITING) && (it != NULL);
- it = it->prev) {
+ // If we are a conversion request, only requests, which are before us need to be
+ // accounted for.
+ invariant(request->status == LockRequest::STATUS_CONVERTING);
- // We started from the previous element, so we should never see ourselves
- invariant(it != request);
+ if (conflicts(request->convertMode, modeMask(it->mode)) ||
+ (seen && conflicts(request->convertMode, modeMask(it->convertMode)))) {
+ const LockerId lockerId = it->locker->getId();
+ const ResourceId waitResId = it->locker->getWaitingResource();
- if (conflicts(request->mode, modeMask(it->mode))) {
- const LockerId lockerId = it->locker->getId();
- const ResourceId waitResId = it->locker->getWaitingResource();
+ if (waitResId.isValid()) {
+ _queue.push_front(UnprocessedNode(lockerId, waitResId));
+ edges.owners.push_back(lockerId);
+ }
+ }
+ }
- if (waitResId.isValid()) {
- _queue.push_front(UnprocessedNode(lockerId, waitResId));
- edges.owners.push_back(lockerId);
- }
+ // All conflicting waits, which would be granted before us
+ for (LockRequest* it = request->prev;
+ (request->status == LockRequest::STATUS_WAITING) && (it != NULL);
+ it = it->prev) {
+ // We started from the previous element, so we should never see ourselves
+ invariant(it != request);
+
+ if (conflicts(request->mode, modeMask(it->mode))) {
+ const LockerId lockerId = it->locker->getId();
+ const ResourceId waitResId = it->locker->getWaitingResource();
+
+ if (waitResId.isValid()) {
+ _queue.push_front(UnprocessedNode(lockerId, waitResId));
+ edges.owners.push_back(lockerId);
}
}
}
+}
- //
- // ResourceId
- //
+//
+// ResourceId
+//
- static const StringData::Hasher stringDataHashFunction = StringData::Hasher();
+static const StringData::Hasher stringDataHashFunction = StringData::Hasher();
- uint64_t ResourceId::fullHash(ResourceType type, uint64_t hashId) {
- return (static_cast<uint64_t>(type) << (64 - resourceTypeBits))
- + (hashId & (std::numeric_limits<uint64_t>::max() >> resourceTypeBits));
- }
+uint64_t ResourceId::fullHash(ResourceType type, uint64_t hashId) {
+ return (static_cast<uint64_t>(type) << (64 - resourceTypeBits)) +
+ (hashId & (std::numeric_limits<uint64_t>::max() >> resourceTypeBits));
+}
- ResourceId::ResourceId(ResourceType type, StringData ns)
- : _fullHash(fullHash(type, stringDataHashFunction(ns))) {
+ResourceId::ResourceId(ResourceType type, StringData ns)
+ : _fullHash(fullHash(type, stringDataHashFunction(ns))) {
#ifdef MONGO_CONFIG_DEBUG_BUILD
- _nsCopy = ns.toString();
+ _nsCopy = ns.toString();
#endif
- }
+}
- ResourceId::ResourceId(ResourceType type, const string& ns)
- : _fullHash(fullHash(type, stringDataHashFunction(ns))) {
+ResourceId::ResourceId(ResourceType type, const string& ns)
+ : _fullHash(fullHash(type, stringDataHashFunction(ns))) {
#ifdef MONGO_CONFIG_DEBUG_BUILD
- _nsCopy = ns;
+ _nsCopy = ns;
#endif
- }
+}
- ResourceId::ResourceId(ResourceType type, uint64_t hashId)
- : _fullHash(fullHash(type, hashId)) { }
+ResourceId::ResourceId(ResourceType type, uint64_t hashId) : _fullHash(fullHash(type, hashId)) {}
- string ResourceId::toString() const {
- StringBuilder ss;
- ss << "{" << _fullHash << ": " << resourceTypeName(getType())
- << ", " << getHashId();
+string ResourceId::toString() const {
+ StringBuilder ss;
+ ss << "{" << _fullHash << ": " << resourceTypeName(getType()) << ", " << getHashId();
#ifdef MONGO_CONFIG_DEBUG_BUILD
- ss << ", " << _nsCopy;
+ ss << ", " << _nsCopy;
#endif
- ss << "}";
+ ss << "}";
- return ss.str();
- }
+ return ss.str();
+}
- //
- // LockRequest
- //
+//
+// LockRequest
+//
- void LockRequest::initNew(Locker* locker, LockGrantNotification* notify) {
- this->locker = locker;
- this->notify = notify;
-
- enqueueAtFront = false;
- compatibleFirst = false;
- recursiveCount = 0;
-
- lock = NULL;
- prev = NULL;
- next = NULL;
- status = STATUS_NEW;
- partitioned = false;
- mode = MODE_NONE;
- convertMode = MODE_NONE;
- }
+void LockRequest::initNew(Locker* locker, LockGrantNotification* notify) {
+ this->locker = locker;
+ this->notify = notify;
+ enqueueAtFront = false;
+ compatibleFirst = false;
+ recursiveCount = 0;
- //
- // Helper calls
- //
+ lock = NULL;
+ prev = NULL;
+ next = NULL;
+ status = STATUS_NEW;
+ partitioned = false;
+ mode = MODE_NONE;
+ convertMode = MODE_NONE;
+}
- const char* modeName(LockMode mode) {
- return LockModeNames[mode];
- }
- const char* legacyModeName(LockMode mode) {
- return LegacyLockModeNames[mode];
- }
+//
+// Helper calls
+//
- bool isModeCovered(LockMode mode, LockMode coveringMode) {
- return (LockConflictsTable[coveringMode] | LockConflictsTable[mode]) ==
- LockConflictsTable[coveringMode];
- }
+const char* modeName(LockMode mode) {
+ return LockModeNames[mode];
+}
- const char* resourceTypeName(ResourceType resourceType) {
- return ResourceTypeNames[resourceType];
- }
+const char* legacyModeName(LockMode mode) {
+ return LegacyLockModeNames[mode];
+}
- const char* lockRequestStatusName(LockRequest::Status status) {
- return LockRequestStatusNames[status];
- }
+bool isModeCovered(LockMode mode, LockMode coveringMode) {
+ return (LockConflictsTable[coveringMode] | LockConflictsTable[mode]) ==
+ LockConflictsTable[coveringMode];
+}
+
+const char* resourceTypeName(ResourceType resourceType) {
+ return ResourceTypeNames[resourceType];
+}
+
+const char* lockRequestStatusName(LockRequest::Status status) {
+ return LockRequestStatusNames[status];
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_manager.h b/src/mongo/db/concurrency/lock_manager.h
index badad082214..991768f54c4 100644
--- a/src/mongo/db/concurrency/lock_manager.h
+++ b/src/mongo/db/concurrency/lock_manager.h
@@ -43,247 +43,241 @@
namespace mongo {
+/**
+ * Entry point for the lock manager scheduling functionality. Don't use it directly, but
+ * instead go through the Locker interface.
+ */
+class LockManager {
+ MONGO_DISALLOW_COPYING(LockManager);
+
+public:
+ LockManager();
+ ~LockManager();
+
/**
- * Entry point for the lock manager scheduling functionality. Don't use it directly, but
- * instead go through the Locker interface.
- */
- class LockManager {
- MONGO_DISALLOW_COPYING(LockManager);
- public:
- LockManager();
- ~LockManager();
-
- /**
- * Acquires lock on the specified resource in the specified mode and returns the outcome
- * of the operation. See the details for LockResult for more information on what the
- * different results mean.
- *
- * Locking the same resource twice increments the reference count of the lock so each call
- * to lock must be matched with a call to unlock with the same resource.
- *
- * @param resId Id of the resource to be locked.
- * @param request LockRequest structure on which the state of the request will be tracked.
- * This value cannot be NULL and the notify value must be set. If the
- * return value is not LOCK_WAITING, this pointer can be freed and will
- * not be used any more.
- *
- * If the return value is LOCK_WAITING, the notification method will be
- * called at some point into the future, when the lock either becomes
- * granted or a deadlock is discovered. If unlock is called before the
- * lock becomes granted, the notification will not be invoked.
- *
- * If the return value is LOCK_WAITING, the notification object *must*
- * live at least until the notfy method has been invoked or unlock has
- * been called for the resource it was assigned to. Failure to do so will
- * cause the lock manager to call into an invalid memory location.
- * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
- *
- * @return See comments for LockResult.
- */
- LockResult lock(ResourceId resId, LockRequest* request, LockMode mode);
- LockResult convert(ResourceId resId, LockRequest* request, LockMode newMode);
-
- /**
- * Decrements the reference count of a previously locked request and if the reference count
- * becomes zero, removes the request and proceeds to granting any conflicts.
- *
- * This method always succeeds and never blocks.
- *
- * @param request A previously locked request. Calling unlock more times than lock was
- * called for the same LockRequest is an error.
- *
- * @return true if this is the last reference for the request; false otherwise
- */
- bool unlock(LockRequest* request);
-
- /**
- * Downgrades the mode in which an already granted request is held, without changing the
- * reference count of the lock request. This call never blocks, will always succeed and may
- * potentially allow other blocked lock requests to proceed.
- *
- * @param request Request, already in granted mode through a previous call to lock.
- * @param newMode Mode, which is less-restrictive than the mode in which the request is
- * already held. I.e., the conflict set of newMode must be a sub-set of
- * the conflict set of the request's current mode.
- */
- void downgrade(LockRequest* request, LockMode newMode);
-
- /**
- * Iterates through all buckets and deletes all locks, which have no requests on them. This
- * call is kind of expensive and should only be used for reducing the memory footprint of
- * the lock manager.
- */
- void cleanupUnusedLocks();
-
- /**
- * Dumps the contents of all locks to the log.
- */
- void dump() const;
-
- private:
- // The deadlock detector needs to access the buckets and locks directly
- friend class DeadlockDetector;
-
- // The lockheads need access to the partitions
- friend struct LockHead;
-
- // These types describe the locks hash table
-
- struct LockBucket {
- SimpleMutex mutex;
- typedef unordered_map<ResourceId, LockHead*> Map;
- Map data;
- LockHead* findOrInsert(ResourceId resId);
- };
-
- // Each locker maps to a partition that is used for resources acquired in intent modes
- // modes and potentially other modes that don't conflict with themselves. This avoids
- // contention on the regular LockHead in the lock manager.
- struct Partition {
- PartitionedLockHead* find(ResourceId resId);
- PartitionedLockHead* findOrInsert(ResourceId resId);
- typedef unordered_map<ResourceId, PartitionedLockHead*> Map;
- SimpleMutex mutex;
- Map data;
- };
-
- /**
- * Retrieves the bucket in which the particular resource must reside. There is no need to
- * hold a lock when calling this function.
- */
- LockBucket* _getBucket(ResourceId resId) const;
-
-
- /**
- * Retrieves the Partition that a particular LockRequest should use for intent locking.
- */
- Partition* _getPartition(LockRequest* request) const;
-
- /**
- * Prints the contents of a bucket to the log.
- */
- void _dumpBucket(const LockBucket* bucket) const;
-
- /**
- * Should be invoked when the state of a lock changes in a way, which could potentially
- * allow other blocked requests to proceed.
- *
- * MUST be called under the lock bucket's mutex.
- *
- * @param lock Lock whose grant state should be recalculated.
- * @param checkConflictQueue Whether to go through the conflict queue. This is an
- * optimisation in that we only need to check the conflict queue if one of the
- * granted modes, which was conflicting before became zero.
- */
- void _onLockModeChanged(LockHead* lock, bool checkConflictQueue);
-
- static const unsigned _numLockBuckets;
- LockBucket* _lockBuckets;
-
- static const unsigned _numPartitions;
- Partition* _partitions;
- };
+ * Acquires lock on the specified resource in the specified mode and returns the outcome
+ * of the operation. See the details for LockResult for more information on what the
+ * different results mean.
+ *
+ * Locking the same resource twice increments the reference count of the lock so each call
+ * to lock must be matched with a call to unlock with the same resource.
+ *
+ * @param resId Id of the resource to be locked.
+ * @param request LockRequest structure on which the state of the request will be tracked.
+ * This value cannot be NULL and the notify value must be set. If the
+ * return value is not LOCK_WAITING, this pointer can be freed and will
+ * not be used any more.
+ *
+ * If the return value is LOCK_WAITING, the notification method will be
+ * called at some point into the future, when the lock either becomes
+ * granted or a deadlock is discovered. If unlock is called before the
+ * lock becomes granted, the notification will not be invoked.
+ *
+ * If the return value is LOCK_WAITING, the notification object *must*
+ * live at least until the notfy method has been invoked or unlock has
+ * been called for the resource it was assigned to. Failure to do so will
+ * cause the lock manager to call into an invalid memory location.
+ * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
+ *
+ * @return See comments for LockResult.
+ */
+ LockResult lock(ResourceId resId, LockRequest* request, LockMode mode);
+ LockResult convert(ResourceId resId, LockRequest* request, LockMode newMode);
+ /**
+ * Decrements the reference count of a previously locked request and if the reference count
+ * becomes zero, removes the request and proceeds to granting any conflicts.
+ *
+ * This method always succeeds and never blocks.
+ *
+ * @param request A previously locked request. Calling unlock more times than lock was
+ * called for the same LockRequest is an error.
+ *
+ * @return true if this is the last reference for the request; false otherwise
+ */
+ bool unlock(LockRequest* request);
/**
- * Iteratively builds the wait-for graph, starting from a given blocked Locker and stops either
- * when all reachable nodes have been checked or if a cycle is detected. This class is
- * thread-safe. Because locks may come and go in parallel with deadlock detection, it may
- * report false positives, but if there is a stable cycle it will be discovered.
+ * Downgrades the mode in which an already granted request is held, without changing the
+ * reference count of the lock request. This call never blocks, will always succeed and may
+ * potentially allow other blocked lock requests to proceed.
*
- * Implemented as a separate class in order to facilitate diagnostics and also unit-testing for
- * cases where locks come and go in parallel with deadlock detection.
+ * @param request Request, already in granted mode through a previous call to lock.
+ * @param newMode Mode, which is less-restrictive than the mode in which the request is
+ * already held. I.e., the conflict set of newMode must be a sub-set of
+ * the conflict set of the request's current mode.
*/
- class DeadlockDetector {
- public:
+ void downgrade(LockRequest* request, LockMode newMode);
- /**
- * Initializes the wait-for graph builder with the LM to operate on and a locker object
- * from which to start the search. Deadlock will only be reported if there is a wait cycle
- * in which the initial locker participates.
- */
- DeadlockDetector(const LockManager& lockMgr, const Locker* initialLocker);
+ /**
+ * Iterates through all buckets and deletes all locks, which have no requests on them. This
+ * call is kind of expensive and should only be used for reducing the memory footprint of
+ * the lock manager.
+ */
+ void cleanupUnusedLocks();
- DeadlockDetector& check() {
- while (next()) {
+ /**
+ * Dumps the contents of all locks to the log.
+ */
+ void dump() const;
- }
+private:
+ // The deadlock detector needs to access the buckets and locks directly
+ friend class DeadlockDetector;
- return *this;
- }
+ // The lockheads need access to the partitions
+ friend struct LockHead;
- /**
- * Processes the next wait for node and queues up its set of owners to the unprocessed
- * queue.
- *
- * @return true if there are more unprocessed nodes and no cycle has been discovered yet;
- * false if either all reachable nodes have been processed or
- */
- bool next();
+ // These types describe the locks hash table
- /**
- * Checks whether a cycle exists in the wait-for graph, which has been built so far. It's
- * only useful to call this after next() has returned false.
- */
- bool hasCycle() const;
+ struct LockBucket {
+ SimpleMutex mutex;
+ typedef unordered_map<ResourceId, LockHead*> Map;
+ Map data;
+ LockHead* findOrInsert(ResourceId resId);
+ };
+
+ // Each locker maps to a partition that is used for resources acquired in intent modes
+ // modes and potentially other modes that don't conflict with themselves. This avoids
+ // contention on the regular LockHead in the lock manager.
+ struct Partition {
+ PartitionedLockHead* find(ResourceId resId);
+ PartitionedLockHead* findOrInsert(ResourceId resId);
+ typedef unordered_map<ResourceId, PartitionedLockHead*> Map;
+ SimpleMutex mutex;
+ Map data;
+ };
- /**
- * Produces a string containing the wait-for graph that has been built so far.
- */
- std::string toString() const;
+ /**
+ * Retrieves the bucket in which the particular resource must reside. There is no need to
+ * hold a lock when calling this function.
+ */
+ LockBucket* _getBucket(ResourceId resId) const;
- private:
- // An entry in the owners list below means that some locker L is blocked on some resource
- // resId, which is currently held by the given set of owners. The reason to store it in
- // such form is in order to avoid storing pointers to the lockers or to have to look them
- // up by id, both of which require some form of synchronization other than locking the
- // bucket for the resource. Instead, given the resId, we can lock the bucket for the lock
- // and find the respective LockRequests and continue our scan forward.
- typedef std::vector<LockerId> ConflictingOwnersList;
+ /**
+ * Retrieves the Partition that a particular LockRequest should use for intent locking.
+ */
+ Partition* _getPartition(LockRequest* request) const;
- struct Edges {
- explicit Edges(ResourceId resId) : resId(resId) { }
+ /**
+ * Prints the contents of a bucket to the log.
+ */
+ void _dumpBucket(const LockBucket* bucket) const;
- // Resource id indicating the lock node
- ResourceId resId;
+ /**
+ * Should be invoked when the state of a lock changes in a way, which could potentially
+ * allow other blocked requests to proceed.
+ *
+ * MUST be called under the lock bucket's mutex.
+ *
+ * @param lock Lock whose grant state should be recalculated.
+ * @param checkConflictQueue Whether to go through the conflict queue. This is an
+ * optimisation in that we only need to check the conflict queue if one of the
+ * granted modes, which was conflicting before became zero.
+ */
+ void _onLockModeChanged(LockHead* lock, bool checkConflictQueue);
- // List of lock owners/pariticipants with which the initial locker conflicts for
- // obtaining the lock
- ConflictingOwnersList owners;
- };
+ static const unsigned _numLockBuckets;
+ LockBucket* _lockBuckets;
- typedef std::map<LockerId, Edges> WaitForGraph;
- typedef WaitForGraph::value_type WaitForGraphPair;
+ static const unsigned _numPartitions;
+ Partition* _partitions;
+};
- // We don't want to hold locks between iteration cycles, so just store the resourceId and
- // the lockerId so we can directly find them from the lock manager.
- struct UnprocessedNode {
- UnprocessedNode(LockerId lockerId, ResourceId resId)
- : lockerId(lockerId),
- resId(resId) {
+/**
+ * Iteratively builds the wait-for graph, starting from a given blocked Locker and stops either
+ * when all reachable nodes have been checked or if a cycle is detected. This class is
+ * thread-safe. Because locks may come and go in parallel with deadlock detection, it may
+ * report false positives, but if there is a stable cycle it will be discovered.
+ *
+ * Implemented as a separate class in order to facilitate diagnostics and also unit-testing for
+ * cases where locks come and go in parallel with deadlock detection.
+ */
+class DeadlockDetector {
+public:
+ /**
+ * Initializes the wait-for graph builder with the LM to operate on and a locker object
+ * from which to start the search. Deadlock will only be reported if there is a wait cycle
+ * in which the initial locker participates.
+ */
+ DeadlockDetector(const LockManager& lockMgr, const Locker* initialLocker);
- }
+ DeadlockDetector& check() {
+ while (next()) {
+ }
- LockerId lockerId;
- ResourceId resId;
- };
+ return *this;
+ }
- typedef std::deque<UnprocessedNode> UnprocessedNodesQueue;
+ /**
+ * Processes the next wait for node and queues up its set of owners to the unprocessed
+ * queue.
+ *
+ * @return true if there are more unprocessed nodes and no cycle has been discovered yet;
+ * false if either all reachable nodes have been processed or
+ */
+ bool next();
+ /**
+ * Checks whether a cycle exists in the wait-for graph, which has been built so far. It's
+ * only useful to call this after next() has returned false.
+ */
+ bool hasCycle() const;
- void _processNextNode(const UnprocessedNode& node);
+ /**
+ * Produces a string containing the wait-for graph that has been built so far.
+ */
+ std::string toString() const;
+
+private:
+ // An entry in the owners list below means that some locker L is blocked on some resource
+ // resId, which is currently held by the given set of owners. The reason to store it in
+ // such form is in order to avoid storing pointers to the lockers or to have to look them
+ // up by id, both of which require some form of synchronization other than locking the
+ // bucket for the resource. Instead, given the resId, we can lock the bucket for the lock
+ // and find the respective LockRequests and continue our scan forward.
+ typedef std::vector<LockerId> ConflictingOwnersList;
+
+ struct Edges {
+ explicit Edges(ResourceId resId) : resId(resId) {}
+
+ // Resource id indicating the lock node
+ ResourceId resId;
+
+ // List of lock owners/pariticipants with which the initial locker conflicts for
+ // obtaining the lock
+ ConflictingOwnersList owners;
+ };
+ typedef std::map<LockerId, Edges> WaitForGraph;
+ typedef WaitForGraph::value_type WaitForGraphPair;
- // Not owned. Lifetime must be longer than that of the graph builder.
- const LockManager& _lockMgr;
- const LockerId _initialLockerId;
- UnprocessedNodesQueue _queue;
- WaitForGraph _graph;
+ // We don't want to hold locks between iteration cycles, so just store the resourceId and
+ // the lockerId so we can directly find them from the lock manager.
+ struct UnprocessedNode {
+ UnprocessedNode(LockerId lockerId, ResourceId resId) : lockerId(lockerId), resId(resId) {}
- bool _foundCycle;
+ LockerId lockerId;
+ ResourceId resId;
};
-} // namespace mongo
+ typedef std::deque<UnprocessedNode> UnprocessedNodesQueue;
+
+
+ void _processNextNode(const UnprocessedNode& node);
+
+
+ // Not owned. Lifetime must be longer than that of the graph builder.
+ const LockManager& _lockMgr;
+ const LockerId _initialLockerId;
+
+ UnprocessedNodesQueue _queue;
+ WaitForGraph _graph;
+
+ bool _foundCycle;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_manager_defs.h b/src/mongo/db/concurrency/lock_manager_defs.h
index 33cc0279ea9..2f93a4ca017 100644
--- a/src/mongo/db/concurrency/lock_manager_defs.h
+++ b/src/mongo/db/concurrency/lock_manager_defs.h
@@ -39,369 +39,371 @@
namespace mongo {
- class Locker;
+class Locker;
- struct LockHead;
- struct PartitionedLockHead;
+struct LockHead;
+struct PartitionedLockHead;
+
+/**
+ * Lock modes.
+ *
+ * Compatibility Matrix
+ * Granted mode
+ * ---------------.--------------------------------------------------------.
+ * Requested Mode | MODE_NONE MODE_IS MODE_IX MODE_S MODE_X |
+ * MODE_IS | + + + + - |
+ * MODE_IX | + + + - - |
+ * MODE_S | + + - + - |
+ * MODE_X | + - - - - |
+ */
+enum LockMode {
+ MODE_NONE = 0,
+ MODE_IS = 1,
+ MODE_IX = 2,
+ MODE_S = 3,
+ MODE_X = 4,
+
+ // Counts the lock modes. Used for array size allocations, etc. Always insert new lock
+ // modes above this entry.
+ LockModesCount
+};
+
+/**
+ * Returns a human-readable name for the specified lock mode.
+ */
+const char* modeName(LockMode mode);
+
+/**
+ * Legacy lock mode names in parity for 2.6 reports.
+ */
+const char* legacyModeName(LockMode mode);
+
+/**
+ * Mode A is covered by mode B if the set of conflicts for mode A is a subset of the set of
+ * conflicts for mode B. For example S is covered by X. IS is covered by S. However, IX is not
+ * covered by S or IS.
+ */
+bool isModeCovered(LockMode mode, LockMode coveringMode);
+
+/**
+ * Returns whether the passed in mode is S or IS. Used for validation checks.
+ */
+inline bool isSharedLockMode(LockMode mode) {
+ return (mode == MODE_IS || mode == MODE_S);
+}
+
+
+/**
+ * Return values for the locking functions of the lock manager.
+ */
+enum LockResult {
/**
- * Lock modes.
- *
- * Compatibility Matrix
- * Granted mode
- * ---------------.--------------------------------------------------------.
- * Requested Mode | MODE_NONE MODE_IS MODE_IX MODE_S MODE_X |
- * MODE_IS | + + + + - |
- * MODE_IX | + + + - - |
- * MODE_S | + + - + - |
- * MODE_X | + - - - - |
+ * The lock request was granted and is now on the granted list for the specified resource.
*/
- enum LockMode {
- MODE_NONE = 0,
- MODE_IS = 1,
- MODE_IX = 2,
- MODE_S = 3,
- MODE_X = 4,
-
- // Counts the lock modes. Used for array size allocations, etc. Always insert new lock
- // modes above this entry.
- LockModesCount
- };
+ LOCK_OK,
/**
- * Returns a human-readable name for the specified lock mode.
+ * The lock request was not granted because of conflict. If this value is returned, the
+ * request was placed on the conflict queue of the specified resource and a call to the
+ * LockGrantNotification::notify callback should be expected with the resource whose lock
+ * was requested.
*/
- const char* modeName(LockMode mode);
+ LOCK_WAITING,
/**
- * Legacy lock mode names in parity for 2.6 reports.
+ * The lock request waited, but timed out before it could be granted. This value is never
+ * returned by the LockManager methods here, but by the Locker class, which offers
+ * capability to block while waiting for locks.
*/
- const char* legacyModeName(LockMode mode);
+ LOCK_TIMEOUT,
/**
- * Mode A is covered by mode B if the set of conflicts for mode A is a subset of the set of
- * conflicts for mode B. For example S is covered by X. IS is covered by S. However, IX is not
- * covered by S or IS.
+ * The lock request was not granted because it would result in a deadlock. No changes to
+ * the state of the Locker would be made if this value is returned (i.e., it will not be
+ * killed due to deadlock). It is up to the caller to decide how to recover from this
+ * return value - could be either release some locks and try again, or just bail with an
+ * error and have some upper code handle it.
*/
- bool isModeCovered(LockMode mode, LockMode coveringMode);
+ LOCK_DEADLOCK,
/**
- * Returns whether the passed in mode is S or IS. Used for validation checks.
+ * This is used as an initialiser value. Should never be returned.
*/
- inline bool isSharedLockMode(LockMode mode) {
- return (mode == MODE_IS || mode == MODE_S);
- }
+ LOCK_INVALID
+};
- /**
- * Return values for the locking functions of the lock manager.
- */
- enum LockResult {
-
- /**
- * The lock request was granted and is now on the granted list for the specified resource.
- */
- LOCK_OK,
-
- /**
- * The lock request was not granted because of conflict. If this value is returned, the
- * request was placed on the conflict queue of the specified resource and a call to the
- * LockGrantNotification::notify callback should be expected with the resource whose lock
- * was requested.
- */
- LOCK_WAITING,
-
- /**
- * The lock request waited, but timed out before it could be granted. This value is never
- * returned by the LockManager methods here, but by the Locker class, which offers
- * capability to block while waiting for locks.
- */
- LOCK_TIMEOUT,
-
- /**
- * The lock request was not granted because it would result in a deadlock. No changes to
- * the state of the Locker would be made if this value is returned (i.e., it will not be
- * killed due to deadlock). It is up to the caller to decide how to recover from this
- * return value - could be either release some locks and try again, or just bail with an
- * error and have some upper code handle it.
- */
- LOCK_DEADLOCK,
-
- /**
- * This is used as an initialiser value. Should never be returned.
- */
- LOCK_INVALID
- };
+/**
+ * Hierarchy of resource types. The lock manager knows nothing about this hierarchy, it is
+ * purely logical. Resources of different types will never conflict with each other.
+ *
+ * While the lock manager does not know or care about ordering, the general policy is that
+ * resources are acquired in the order below. For example, one might first acquire a
+ * RESOURCE_GLOBAL and then the desired RESOURCE_DATABASE, both using intent modes, and
+ * finally a RESOURCE_COLLECTION in exclusive mode. When locking multiple resources of the
+ * same type, the canonical order is by resourceId order.
+ *
+ * It is OK to lock resources out of order, but it is the users responsibility to ensure
+ * ordering is consistent so deadlock cannot occur.
+ */
+enum ResourceType {
+ // Types used for special resources, use with a hash id from ResourceId::SingletonHashIds.
+ RESOURCE_INVALID = 0,
+ RESOURCE_GLOBAL, // Used for mode changes or global exclusive operations
+ RESOURCE_MMAPV1_FLUSH, // Necessary only for the MMAPv1 engine
+
+ // Generic resources
+ RESOURCE_DATABASE,
+ RESOURCE_COLLECTION,
+ RESOURCE_METADATA,
+
+ // Counts the rest. Always insert new resource types above this entry.
+ ResourceTypesCount
+};
+
+/**
+ * Returns a human-readable name for the specified resource type.
+ */
+const char* resourceTypeName(ResourceType resourceType);
+/**
+ * Uniquely identifies a lockable resource.
+ */
+class ResourceId {
+ // We only use 3 bits for the resource type in the ResourceId hash
+ enum { resourceTypeBits = 3 };
+ BOOST_STATIC_ASSERT(ResourceTypesCount <= (1 << resourceTypeBits));
+public:
/**
- * Hierarchy of resource types. The lock manager knows nothing about this hierarchy, it is
- * purely logical. Resources of different types will never conflict with each other.
- *
- * While the lock manager does not know or care about ordering, the general policy is that
- * resources are acquired in the order below. For example, one might first acquire a
- * RESOURCE_GLOBAL and then the desired RESOURCE_DATABASE, both using intent modes, and
- * finally a RESOURCE_COLLECTION in exclusive mode. When locking multiple resources of the
- * same type, the canonical order is by resourceId order.
- *
- * It is OK to lock resources out of order, but it is the users responsibility to ensure
- * ordering is consistent so deadlock cannot occur.
+ * Assign hash ids for special resources to avoid accidental reuse of ids. For ids used
+ * with the same ResourceType, the order here must be the same as the locking order.
*/
- enum ResourceType {
- // Types used for special resources, use with a hash id from ResourceId::SingletonHashIds.
- RESOURCE_INVALID = 0,
- RESOURCE_GLOBAL, // Used for mode changes or global exclusive operations
- RESOURCE_MMAPV1_FLUSH, // Necessary only for the MMAPv1 engine
-
- // Generic resources
- RESOURCE_DATABASE,
- RESOURCE_COLLECTION,
- RESOURCE_METADATA,
-
- // Counts the rest. Always insert new resource types above this entry.
- ResourceTypesCount
+ enum SingletonHashIds {
+ SINGLETON_INVALID = 0,
+ SINGLETON_PARALLEL_BATCH_WRITER_MODE,
+ SINGLETON_GLOBAL,
+ SINGLETON_MMAPV1_FLUSH
};
- /**
- * Returns a human-readable name for the specified resource type.
- */
- const char* resourceTypeName(ResourceType resourceType);
+ ResourceId() : _fullHash(0) {}
+ ResourceId(ResourceType type, StringData ns);
+ ResourceId(ResourceType type, const std::string& ns);
+ ResourceId(ResourceType type, uint64_t hashId);
+
+ bool isValid() const {
+ return getType() != RESOURCE_INVALID;
+ }
+
+ operator uint64_t() const {
+ return _fullHash;
+ }
+ // This defines the canonical locking order, first by type and then hash id
+ bool operator<(const ResourceId& rhs) const {
+ return _fullHash < rhs._fullHash;
+ }
+
+ ResourceType getType() const {
+ return static_cast<ResourceType>(_fullHash >> (64 - resourceTypeBits));
+ }
+
+ uint64_t getHashId() const {
+ return _fullHash & (std::numeric_limits<uint64_t>::max() >> resourceTypeBits);
+ }
+
+ std::string toString() const;
+
+private:
/**
- * Uniquely identifies a lockable resource.
+ * The top 'resourceTypeBits' bits of '_fullHash' represent the resource type,
+ * while the remaining bits contain the bottom bits of the hashId. This avoids false
+ * conflicts between resources of different types, which is necessary to prevent deadlocks.
*/
- class ResourceId {
- // We only use 3 bits for the resource type in the ResourceId hash
- enum {resourceTypeBits = 3};
- BOOST_STATIC_ASSERT(ResourceTypesCount <= (1 << resourceTypeBits));
-
- public:
- /**
- * Assign hash ids for special resources to avoid accidental reuse of ids. For ids used
- * with the same ResourceType, the order here must be the same as the locking order.
- */
- enum SingletonHashIds {
- SINGLETON_INVALID = 0,
- SINGLETON_PARALLEL_BATCH_WRITER_MODE,
- SINGLETON_GLOBAL,
- SINGLETON_MMAPV1_FLUSH
- };
-
- ResourceId() : _fullHash(0) { }
- ResourceId(ResourceType type, StringData ns);
- ResourceId(ResourceType type, const std::string& ns);
- ResourceId(ResourceType type, uint64_t hashId);
-
- bool isValid() const { return getType() != RESOURCE_INVALID; }
-
- operator uint64_t() const {
- return _fullHash;
- }
-
- // This defines the canonical locking order, first by type and then hash id
- bool operator<(const ResourceId& rhs) const {
- return _fullHash < rhs._fullHash;
- }
-
- ResourceType getType() const {
- return static_cast<ResourceType>(_fullHash >> (64 - resourceTypeBits));
- }
-
- uint64_t getHashId() const {
- return _fullHash & (std::numeric_limits<uint64_t>::max() >> resourceTypeBits);
- }
-
- std::string toString() const;
-
- private:
- /**
- * The top 'resourceTypeBits' bits of '_fullHash' represent the resource type,
- * while the remaining bits contain the bottom bits of the hashId. This avoids false
- * conflicts between resources of different types, which is necessary to prevent deadlocks.
- */
- uint64_t _fullHash;
-
- static uint64_t fullHash(ResourceType type, uint64_t hashId);
+ uint64_t _fullHash;
+
+ static uint64_t fullHash(ResourceType type, uint64_t hashId);
#ifdef MONGO_CONFIG_DEBUG_BUILD
- // Keep the complete namespace name for debugging purposes (TODO: this will be
- // removed once we are confident in the robustness of the lock manager).
- std::string _nsCopy;
+ // Keep the complete namespace name for debugging purposes (TODO: this will be
+ // removed once we are confident in the robustness of the lock manager).
+ std::string _nsCopy;
#endif
- };
+};
#ifndef MONGO_CONFIG_DEBUG_BUILD
- // Treat the resource ids as 64-bit integers in release mode in order to ensure we do
- // not spend too much time doing comparisons for hashing.
- BOOST_STATIC_ASSERT(sizeof(ResourceId) == sizeof(uint64_t));
+// Treat the resource ids as 64-bit integers in release mode in order to ensure we do
+// not spend too much time doing comparisons for hashing.
+BOOST_STATIC_ASSERT(sizeof(ResourceId) == sizeof(uint64_t));
#endif
- // Type to uniquely identify a given locker object
- typedef uint64_t LockerId;
+// Type to uniquely identify a given locker object
+typedef uint64_t LockerId;
- // Hardcoded resource id for the oplog collection, which is special-cased both for resource
- // acquisition purposes and for statistics reporting.
- extern const ResourceId resourceIdLocalDB;
- extern const ResourceId resourceIdOplog;
+// Hardcoded resource id for the oplog collection, which is special-cased both for resource
+// acquisition purposes and for statistics reporting.
+extern const ResourceId resourceIdLocalDB;
+extern const ResourceId resourceIdOplog;
- // Hardcoded resource id for admin db. This is to ensure direct writes to auth collections
- // are serialized (see SERVER-16092)
- extern const ResourceId resourceIdAdminDB;
+// Hardcoded resource id for admin db. This is to ensure direct writes to auth collections
+// are serialized (see SERVER-16092)
+extern const ResourceId resourceIdAdminDB;
- // Hardcoded resource id for ParallelBatchWriterMode. We use the same resource type
- // as resourceIdGlobal. This will also ensure the waits are reported as global, which
- // is appropriate. The lock will never be contended unless the parallel batch writers
- // must stop all other accesses globally. This resource must be locked before all other
- // resources (including resourceIdGlobal). Replication applier threads don't take this
- // lock.
- // TODO: Merge this with resourceIdGlobal
- extern const ResourceId resourceIdParallelBatchWriterMode;
+// Hardcoded resource id for ParallelBatchWriterMode. We use the same resource type
+// as resourceIdGlobal. This will also ensure the waits are reported as global, which
+// is appropriate. The lock will never be contended unless the parallel batch writers
+// must stop all other accesses globally. This resource must be locked before all other
+// resources (including resourceIdGlobal). Replication applier threads don't take this
+// lock.
+// TODO: Merge this with resourceIdGlobal
+extern const ResourceId resourceIdParallelBatchWriterMode;
+
+/**
+ * Interface on which granted lock requests will be notified. See the contract for the notify
+ * method for more information and also the LockManager::lock call.
+ *
+ * The default implementation of this method would simply block on an event until notify has
+ * been invoked (see CondVarLockGrantNotification).
+ *
+ * Test implementations could just count the number of notifications and their outcome so that
+ * they can validate locks are granted as desired and drive the test execution.
+ */
+class LockGrantNotification {
+public:
+ virtual ~LockGrantNotification() {}
/**
- * Interface on which granted lock requests will be notified. See the contract for the notify
- * method for more information and also the LockManager::lock call.
+ * This method is invoked at most once for each lock request and indicates the outcome of
+ * the lock acquisition for the specified resource id.
+ *
+ * Cases where it won't be called are if a lock acquisition (be it in waiting or converting
+ * state) is cancelled through a call to unlock.
*
- * The default implementation of this method would simply block on an event until notify has
- * been invoked (see CondVarLockGrantNotification).
+ * IMPORTANT: This callback runs under a spinlock for the lock manager, so the work done
+ * inside must be kept to a minimum and no locks or operations which may block
+ * should be run. Also, no methods which call back into the lock manager should
+ * be invoked from within this methods (LockManager is not reentrant).
*
- * Test implementations could just count the number of notifications and their outcome so that
- * they can validate locks are granted as desired and drive the test execution.
+ * @resId ResourceId for which a lock operation was previously called.
+ * @result Outcome of the lock operation.
*/
- class LockGrantNotification {
- public:
- virtual ~LockGrantNotification() {}
-
- /**
- * This method is invoked at most once for each lock request and indicates the outcome of
- * the lock acquisition for the specified resource id.
- *
- * Cases where it won't be called are if a lock acquisition (be it in waiting or converting
- * state) is cancelled through a call to unlock.
- *
- * IMPORTANT: This callback runs under a spinlock for the lock manager, so the work done
- * inside must be kept to a minimum and no locks or operations which may block
- * should be run. Also, no methods which call back into the lock manager should
- * be invoked from within this methods (LockManager is not reentrant).
- *
- * @resId ResourceId for which a lock operation was previously called.
- * @result Outcome of the lock operation.
- */
- virtual void notify(ResourceId resId, LockResult result) = 0;
- };
+ virtual void notify(ResourceId resId, LockResult result) = 0;
+};
- /**
- * There is one of those entries per each request for a lock. They hang on a linked list off
- * the LockHead or off a PartitionedLockHead and also are in a map for each Locker. This
- * structure is not thread-safe.
- *
- * LockRequest are owned by the Locker class and it controls their lifetime. They should not
- * be deleted while on the LockManager though (see the contract for the lock/unlock methods).
- */
- struct LockRequest {
-
- enum Status {
- STATUS_NEW,
- STATUS_GRANTED,
- STATUS_WAITING,
- STATUS_CONVERTING,
-
- // Counts the rest. Always insert new status types above this entry.
- StatusCount
- };
-
- /**
- * Used for initialization of a LockRequest, which might have been retrieved from cache.
- */
- void initNew(Locker* locker, LockGrantNotification* notify);
-
-
- //
- // These fields are maintained by the Locker class
- //
-
- // This is the Locker, which created this LockRequest. Pointer is not owned, just
- // referenced. Must outlive the LockRequest.
- Locker* locker;
-
- // Not owned, just referenced. If a request is in the WAITING or CONVERTING state, must
- // live at least until LockManager::unlock is cancelled or the notification has been
- // invoked.
- LockGrantNotification* notify;
-
-
- //
- // These fields are maintained by both the LockManager and Locker class
- //
-
- // If the request cannot be granted right away, whether to put it at the front or at the
- // end of the queue. By default, requests are put at the back. If a request is requested
- // to be put at the front, this effectively bypasses fairness. Default is FALSE.
- bool enqueueAtFront;
-
- // When this request is granted and as long as it is on the granted queue, the particular
- // resource's policy will be changed to "compatibleFirst". This means that even if there
- // are pending requests on the conflict queue, if a compatible request comes in it will be
- // granted immediately. This effectively turns off fairness.
- bool compatibleFirst;
-
- // When set, an attempt is made to execute this request using partitioned lockheads.
- // This speeds up the common case where all requested locking modes are compatible with
- // each other, at the cost of extra overhead for conflicting modes.
- bool partitioned;
-
- // How many times has LockManager::lock been called for this request. Locks are released
- // when their recursive count drops to zero.
- unsigned recursiveCount;
-
- //
- // These fields are owned and maintained by the LockManager class exclusively
- //
-
-
- // Pointer to the lock to which this request belongs, or null if this request has not yet
- // been assigned to a lock or if it belongs to the PartitionedLockHead for locker. The
- // LockHead should be alive as long as there are LockRequests on it, so it is safe to have
- // this pointer hanging around.
- LockHead* lock;
-
- // Pointer to the partitioned lock to which this request belongs, or null if it is not
- // partitioned. Only one of 'lock' and 'partitionedLock' is non-NULL, and a request can
- // only transition from 'partitionedLock' to 'lock', never the other way around.
- PartitionedLockHead* partitionedLock;
-
- // The reason intrusive linked list is used instead of the std::list class is to allow
- // for entries to be removed from the middle of the list in O(1) time, if they are known
- // instead of having to search for them and we cannot persist iterators, because the list
- // can be modified while an iterator is held.
- LockRequest* prev;
- LockRequest* next;
-
- // Current status of this request.
- Status status;
-
- // If not granted, the mode which has been requested for this lock. If granted, the mode
- // in which it is currently granted.
- LockMode mode;
-
- // This value is different from MODE_NONE only if a conversion is requested for a lock and
- // that conversion cannot be immediately granted.
- LockMode convertMode;
+/**
+ * There is one of those entries per each request for a lock. They hang on a linked list off
+ * the LockHead or off a PartitionedLockHead and also are in a map for each Locker. This
+ * structure is not thread-safe.
+ *
+ * LockRequest are owned by the Locker class and it controls their lifetime. They should not
+ * be deleted while on the LockManager though (see the contract for the lock/unlock methods).
+ */
+struct LockRequest {
+ enum Status {
+ STATUS_NEW,
+ STATUS_GRANTED,
+ STATUS_WAITING,
+ STATUS_CONVERTING,
+
+ // Counts the rest. Always insert new status types above this entry.
+ StatusCount
};
/**
- * Returns a human readable status name for the specified LockRequest status.
+ * Used for initialization of a LockRequest, which might have been retrieved from cache.
*/
- const char* lockRequestStatusName(LockRequest::Status status);
+ void initNew(Locker* locker, LockGrantNotification* notify);
+
-} // namespace mongo
+ //
+ // These fields are maintained by the Locker class
+ //
+
+ // This is the Locker, which created this LockRequest. Pointer is not owned, just
+ // referenced. Must outlive the LockRequest.
+ Locker* locker;
+
+ // Not owned, just referenced. If a request is in the WAITING or CONVERTING state, must
+ // live at least until LockManager::unlock is cancelled or the notification has been
+ // invoked.
+ LockGrantNotification* notify;
+
+
+ //
+ // These fields are maintained by both the LockManager and Locker class
+ //
+
+ // If the request cannot be granted right away, whether to put it at the front or at the
+ // end of the queue. By default, requests are put at the back. If a request is requested
+ // to be put at the front, this effectively bypasses fairness. Default is FALSE.
+ bool enqueueAtFront;
+
+ // When this request is granted and as long as it is on the granted queue, the particular
+ // resource's policy will be changed to "compatibleFirst". This means that even if there
+ // are pending requests on the conflict queue, if a compatible request comes in it will be
+ // granted immediately. This effectively turns off fairness.
+ bool compatibleFirst;
+
+ // When set, an attempt is made to execute this request using partitioned lockheads.
+ // This speeds up the common case where all requested locking modes are compatible with
+ // each other, at the cost of extra overhead for conflicting modes.
+ bool partitioned;
+
+ // How many times has LockManager::lock been called for this request. Locks are released
+ // when their recursive count drops to zero.
+ unsigned recursiveCount;
+
+ //
+ // These fields are owned and maintained by the LockManager class exclusively
+ //
+
+
+ // Pointer to the lock to which this request belongs, or null if this request has not yet
+ // been assigned to a lock or if it belongs to the PartitionedLockHead for locker. The
+ // LockHead should be alive as long as there are LockRequests on it, so it is safe to have
+ // this pointer hanging around.
+ LockHead* lock;
+
+ // Pointer to the partitioned lock to which this request belongs, or null if it is not
+ // partitioned. Only one of 'lock' and 'partitionedLock' is non-NULL, and a request can
+ // only transition from 'partitionedLock' to 'lock', never the other way around.
+ PartitionedLockHead* partitionedLock;
+
+ // The reason intrusive linked list is used instead of the std::list class is to allow
+ // for entries to be removed from the middle of the list in O(1) time, if they are known
+ // instead of having to search for them and we cannot persist iterators, because the list
+ // can be modified while an iterator is held.
+ LockRequest* prev;
+ LockRequest* next;
+
+ // Current status of this request.
+ Status status;
+
+ // If not granted, the mode which has been requested for this lock. If granted, the mode
+ // in which it is currently granted.
+ LockMode mode;
+
+ // This value is different from MODE_NONE only if a conversion is requested for a lock and
+ // that conversion cannot be immediately granted.
+ LockMode convertMode;
+};
+
+/**
+ * Returns a human readable status name for the specified LockRequest status.
+ */
+const char* lockRequestStatusName(LockRequest::Status status);
+
+} // namespace mongo
MONGO_HASH_NAMESPACE_START
- template <> struct hash<mongo::ResourceId> {
- size_t operator()(const mongo::ResourceId& resource) const {
- return resource;
- }
- };
+template <>
+struct hash<mongo::ResourceId> {
+ size_t operator()(const mongo::ResourceId& resource) const {
+ return resource;
+ }
+};
MONGO_HASH_NAMESPACE_END
diff --git a/src/mongo/db/concurrency/lock_manager_test.cpp b/src/mongo/db/concurrency/lock_manager_test.cpp
index 50fc9826a9b..ce722b6f572 100644
--- a/src/mongo/db/concurrency/lock_manager_test.cpp
+++ b/src/mongo/db/concurrency/lock_manager_test.cpp
@@ -31,792 +31,789 @@
namespace mongo {
- TEST(ResourceId, Semantics) {
- ResourceId resIdDb(RESOURCE_DATABASE, 324334234);
- ASSERT(resIdDb.getType() == RESOURCE_DATABASE);
- ASSERT(resIdDb.getHashId() == 324334234);
-
- ResourceId resIdColl(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- ASSERT(resIdColl.getType() == RESOURCE_COLLECTION);
-
- // Comparison functions
-
- // Make sure the operator < is defined.
- ASSERT(resIdDb < resIdColl || resIdColl < resIdDb);
-
- ResourceId resId(RESOURCE_DATABASE, 324334234);
- ASSERT_EQUALS(resIdDb, resId);
-
- // Assignment functions
- resId = resIdColl;
- ASSERT_EQUALS(resId, resIdColl);
- }
-
- TEST(ResourceId, Constructors) {
- ResourceId resIdString(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- ResourceId resIdStringData(RESOURCE_COLLECTION, StringData("TestDB.collection"));
-
- ASSERT_EQUALS(resIdString, resIdStringData);
- }
-
- TEST(ResourceId, Masking) {
- const ResourceType maxRes = static_cast<ResourceType>(ResourceTypesCount - 1);
- const uint64_t maxHash = (1ULL<<61) - 1; // Only 61 bits usable for hash
- ResourceType resources[3] = { maxRes, RESOURCE_GLOBAL, RESOURCE_METADATA };
- uint64_t hashes[3] = {maxHash, maxHash / 3, maxHash / 3 * 2};
-
- // The test below verifies that types/hashes are stored/retrieved unchanged
- for (int h = 0; h < 3; h++) {
- for (int r = 0; r < 3; r++) {
- ResourceId id(resources[r], hashes[h]);
- ASSERT_EQUALS(id.getHashId(), hashes[h]);
- ASSERT_EQUALS(id.getType(), resources[r]);
- }
+TEST(ResourceId, Semantics) {
+ ResourceId resIdDb(RESOURCE_DATABASE, 324334234);
+ ASSERT(resIdDb.getType() == RESOURCE_DATABASE);
+ ASSERT(resIdDb.getHashId() == 324334234);
+
+ ResourceId resIdColl(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ ASSERT(resIdColl.getType() == RESOURCE_COLLECTION);
+
+ // Comparison functions
+
+ // Make sure the operator < is defined.
+ ASSERT(resIdDb < resIdColl || resIdColl < resIdDb);
+
+ ResourceId resId(RESOURCE_DATABASE, 324334234);
+ ASSERT_EQUALS(resIdDb, resId);
+
+ // Assignment functions
+ resId = resIdColl;
+ ASSERT_EQUALS(resId, resIdColl);
+}
+
+TEST(ResourceId, Constructors) {
+ ResourceId resIdString(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ ResourceId resIdStringData(RESOURCE_COLLECTION, StringData("TestDB.collection"));
+
+ ASSERT_EQUALS(resIdString, resIdStringData);
+}
+
+TEST(ResourceId, Masking) {
+ const ResourceType maxRes = static_cast<ResourceType>(ResourceTypesCount - 1);
+ const uint64_t maxHash = (1ULL << 61) - 1; // Only 61 bits usable for hash
+ ResourceType resources[3] = {maxRes, RESOURCE_GLOBAL, RESOURCE_METADATA};
+ uint64_t hashes[3] = {maxHash, maxHash / 3, maxHash / 3 * 2};
+
+ // The test below verifies that types/hashes are stored/retrieved unchanged
+ for (int h = 0; h < 3; h++) {
+ for (int r = 0; r < 3; r++) {
+ ResourceId id(resources[r], hashes[h]);
+ ASSERT_EQUALS(id.getHashId(), hashes[h]);
+ ASSERT_EQUALS(id.getType(), resources[r]);
}
}
+}
- //
- // LockManager
- //
+//
+// LockManager
+//
- TEST(LockManager, Grant) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+TEST(LockManager, Grant) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
- TrackingLockGrantNotification notify;
+ MMAPV1LockerImpl locker;
+ TrackingLockGrantNotification notify;
- LockRequest request;
- request.initNew(&locker, &notify);
+ LockRequest request;
+ request.initNew(&locker, &notify);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 1);
- ASSERT(notify.numNotifies == 0);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 1);
+ ASSERT(notify.numNotifies == 0);
- lockMgr.unlock(&request);
- ASSERT(request.recursiveCount == 0);
- }
+ lockMgr.unlock(&request);
+ ASSERT(request.recursiveCount == 0);
+}
- TEST(LockManager, GrantMultipleNoConflict) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+TEST(LockManager, GrantMultipleNoConflict) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
- TrackingLockGrantNotification notify;
+ MMAPV1LockerImpl locker;
+ TrackingLockGrantNotification notify;
- LockRequest request[6];
- for (int i = 0; i < 6; i++) {
- request[i].initNew(&locker, &notify);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request[i], MODE_S));
+ LockRequest request[6];
+ for (int i = 0; i < 6; i++) {
+ request[i].initNew(&locker, &notify);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request[i], MODE_S));
- ASSERT(request[i].mode == MODE_S);
- ASSERT(request[i].recursiveCount == 1);
- }
-
- ASSERT(notify.numNotifies == 0);
-
- // Free the first
- lockMgr.unlock(&request[0]);
-
- // Free the last
- lockMgr.unlock(&request[5]);
-
- // Free one in the middle
- lockMgr.unlock(&request[3]);
-
- // Free the remaining so the LockMgr does not compain about leaked locks
- lockMgr.unlock(&request[1]);
- lockMgr.unlock(&request[2]);
- lockMgr.unlock(&request[4]);
+ ASSERT(request[i].mode == MODE_S);
+ ASSERT(request[i].recursiveCount == 1);
}
- TEST(LockManager, GrantMultipleFIFOOrder) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ ASSERT(notify.numNotifies == 0);
- std::unique_ptr<MMAPV1LockerImpl> locker[6];
- for (int i = 0; i < 6; i++) {
- locker[i].reset(new MMAPV1LockerImpl());
- }
+ // Free the first
+ lockMgr.unlock(&request[0]);
- TrackingLockGrantNotification notify[6];
+ // Free the last
+ lockMgr.unlock(&request[5]);
- LockRequest request[6];
- for (int i = 0; i < 6; i++) {
- request[i].initNew(locker[i].get(), &notify[i]);
- lockMgr.lock(resId, &request[i], MODE_X);
+ // Free one in the middle
+ lockMgr.unlock(&request[3]);
- ASSERT(request[i].mode == MODE_X);
- ASSERT(request[i].recursiveCount == 1);
- }
+ // Free the remaining so the LockMgr does not compain about leaked locks
+ lockMgr.unlock(&request[1]);
+ lockMgr.unlock(&request[2]);
+ lockMgr.unlock(&request[4]);
+}
- // Release the last held lock and ensure the next one, based on time is granted
- for (int i = 0; i < 5; i++) {
- lockMgr.unlock(&request[i]);
-
- ASSERT(notify[i + 1].numNotifies == 1);
- ASSERT(notify[i + 1].lastResId == resId);
- ASSERT(notify[i + 1].lastResult == LOCK_OK);
- }
+TEST(LockManager, GrantMultipleFIFOOrder) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // Release the last one
- lockMgr.unlock(&request[5]);
+ std::unique_ptr<MMAPV1LockerImpl> locker[6];
+ for (int i = 0; i < 6; i++) {
+ locker[i].reset(new MMAPV1LockerImpl());
}
- TEST(LockManager, GrantRecursive) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ TrackingLockGrantNotification notify[6];
- MMAPV1LockerImpl locker;
- LockRequestCombo request(&locker);
+ LockRequest request[6];
+ for (int i = 0; i < 6; i++) {
+ request[i].initNew(locker[i].get(), &notify[i]);
+ lockMgr.lock(resId, &request[i], MODE_X);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 1);
- ASSERT(request.numNotifies == 0);
-
- // Acquire again, in the same mode
- ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_S));
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 2);
- ASSERT(request.numNotifies == 0);
-
- // Release first acquire
- lockMgr.unlock(&request);
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 1);
-
- // Release second acquire
- lockMgr.unlock(&request);
- ASSERT(request.recursiveCount == 0);
+ ASSERT(request[i].mode == MODE_X);
+ ASSERT(request[i].recursiveCount == 1);
}
- TEST(LockManager, GrantRecursiveCompatibleConvertUp) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
-
- MMAPV1LockerImpl locker;
- LockRequestCombo request(&locker);
-
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_IS));
- ASSERT(request.mode == MODE_IS);
- ASSERT(request.recursiveCount == 1);
- ASSERT(request.numNotifies == 0);
+ // Release the last held lock and ensure the next one, based on time is granted
+ for (int i = 0; i < 5; i++) {
+ lockMgr.unlock(&request[i]);
- // Acquire again, in *compatible*, but stricter mode
- ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_S));
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 2);
- ASSERT(request.numNotifies == 0);
-
- // Release the first acquire
- lockMgr.unlock(&request);
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 1);
-
- // Release the second acquire
- lockMgr.unlock(&request);
- ASSERT(request.recursiveCount == 0);
+ ASSERT(notify[i + 1].numNotifies == 1);
+ ASSERT(notify[i + 1].lastResId == resId);
+ ASSERT(notify[i + 1].lastResult == LOCK_OK);
}
- TEST(LockManager, GrantRecursiveNonCompatibleConvertUp) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
-
- MMAPV1LockerImpl locker;
- LockRequestCombo request(&locker);
-
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
- ASSERT(request.mode == MODE_S);
- ASSERT(request.recursiveCount == 1);
- ASSERT(request.numNotifies == 0);
-
- // Acquire again, in *non-compatible*, but stricter mode
- ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_X));
- ASSERT(request.mode == MODE_X);
- ASSERT(request.recursiveCount == 2);
- ASSERT(request.numNotifies == 0);
-
- // Release first acquire
- lockMgr.unlock(&request);
- ASSERT(request.mode == MODE_X);
- ASSERT(request.recursiveCount == 1);
+ // Release the last one
+ lockMgr.unlock(&request[5]);
+}
+
+TEST(LockManager, GrantRecursive) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ MMAPV1LockerImpl locker;
+ LockRequestCombo request(&locker);
+
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 1);
+ ASSERT(request.numNotifies == 0);
+
+ // Acquire again, in the same mode
+ ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_S));
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 2);
+ ASSERT(request.numNotifies == 0);
+
+ // Release first acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 1);
+
+ // Release second acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.recursiveCount == 0);
+}
+
+TEST(LockManager, GrantRecursiveCompatibleConvertUp) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ MMAPV1LockerImpl locker;
+ LockRequestCombo request(&locker);
+
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_IS));
+ ASSERT(request.mode == MODE_IS);
+ ASSERT(request.recursiveCount == 1);
+ ASSERT(request.numNotifies == 0);
+
+ // Acquire again, in *compatible*, but stricter mode
+ ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_S));
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 2);
+ ASSERT(request.numNotifies == 0);
+
+ // Release the first acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 1);
+
+ // Release the second acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.recursiveCount == 0);
+}
+
+TEST(LockManager, GrantRecursiveNonCompatibleConvertUp) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ MMAPV1LockerImpl locker;
+ LockRequestCombo request(&locker);
+
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_S));
+ ASSERT(request.mode == MODE_S);
+ ASSERT(request.recursiveCount == 1);
+ ASSERT(request.numNotifies == 0);
+
+ // Acquire again, in *non-compatible*, but stricter mode
+ ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_X));
+ ASSERT(request.mode == MODE_X);
+ ASSERT(request.recursiveCount == 2);
+ ASSERT(request.numNotifies == 0);
+
+ // Release first acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.mode == MODE_X);
+ ASSERT(request.recursiveCount == 1);
+
+ // Release second acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.recursiveCount == 0);
+}
+
+TEST(LockManager, GrantRecursiveNonCompatibleConvertDown) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ MMAPV1LockerImpl locker;
+ LockRequestCombo request(&locker);
+
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_X));
+ ASSERT(request.mode == MODE_X);
+ ASSERT(request.recursiveCount == 1);
+ ASSERT(request.numNotifies == 0);
+
+ // Acquire again, in *non-compatible*, but less strict mode
+ ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_S));
+ ASSERT(request.mode == MODE_X);
+ ASSERT(request.recursiveCount == 2);
+ ASSERT(request.numNotifies == 0);
+
+ // Release first acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.mode == MODE_X);
+ ASSERT(request.recursiveCount == 1);
+
+ // Release second acquire
+ lockMgr.unlock(&request);
+ ASSERT(request.recursiveCount == 0);
+}
+
+TEST(LockManager, Conflict) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ MMAPV1LockerImpl locker1;
+ MMAPV1LockerImpl locker2;
+
+ LockRequestCombo request1(&locker1);
+ LockRequestCombo request2(&locker2);
+
+ // First request granted right away
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
+ ASSERT(request1.recursiveCount == 1);
+ ASSERT(request1.numNotifies == 0);
+
+ // Second request must block
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_X));
+ ASSERT(request2.mode == MODE_X);
+ ASSERT(request2.recursiveCount == 1);
+ ASSERT(request2.numNotifies == 0);
+
+ // Release first request
+ lockMgr.unlock(&request1);
+ ASSERT(request1.recursiveCount == 0);
+ ASSERT(request1.numNotifies == 0);
+
+ ASSERT(request2.mode == MODE_X);
+ ASSERT(request2.recursiveCount == 1);
+ ASSERT(request2.numNotifies == 1);
+ ASSERT(request2.lastResult == LOCK_OK);
+
+ // Release second acquire
+ lockMgr.unlock(&request2);
+ ASSERT(request2.recursiveCount == 0);
+
+ ASSERT(request1.numNotifies == 0);
+ ASSERT(request2.numNotifies == 1);
+}
+
+TEST(LockManager, MultipleConflict) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+
+ MMAPV1LockerImpl locker;
+ TrackingLockGrantNotification notify;
+
+ LockRequest request[6];
+ for (int i = 0; i < 6; i++) {
+ request[i].initNew(&locker, &notify);
+
+ if (i == 0) {
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request[i], MODE_X));
+ } else {
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request[i], MODE_X));
+ }
- // Release second acquire
- lockMgr.unlock(&request);
- ASSERT(request.recursiveCount == 0);
+ ASSERT(request[i].mode == MODE_X);
+ ASSERT(request[i].recursiveCount == 1);
}
- TEST(LockManager, GrantRecursiveNonCompatibleConvertDown) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
-
- MMAPV1LockerImpl locker;
- LockRequestCombo request(&locker);
-
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request, MODE_X));
- ASSERT(request.mode == MODE_X);
- ASSERT(request.recursiveCount == 1);
- ASSERT(request.numNotifies == 0);
-
- // Acquire again, in *non-compatible*, but less strict mode
- ASSERT(LOCK_OK == lockMgr.convert(resId, &request, MODE_S));
- ASSERT(request.mode == MODE_X);
- ASSERT(request.recursiveCount == 2);
- ASSERT(request.numNotifies == 0);
+ ASSERT(notify.numNotifies == 0);
- // Release first acquire
- lockMgr.unlock(&request);
- ASSERT(request.mode == MODE_X);
- ASSERT(request.recursiveCount == 1);
+ // Free them one by one and make sure they get granted in the correct order
+ for (int i = 0; i < 6; i++) {
+ lockMgr.unlock(&request[i]);
- // Release second acquire
- lockMgr.unlock(&request);
- ASSERT(request.recursiveCount == 0);
+ if (i < 5) {
+ ASSERT(notify.numNotifies == i + 1);
+ }
}
+}
- TEST(LockManager, Conflict) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
-
- MMAPV1LockerImpl locker1;
- MMAPV1LockerImpl locker2;
-
- LockRequestCombo request1(&locker1);
- LockRequestCombo request2(&locker2);
+TEST(LockManager, ConflictCancelWaiting) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // First request granted right away
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
- ASSERT(request1.recursiveCount == 1);
- ASSERT(request1.numNotifies == 0);
+ MMAPV1LockerImpl locker1;
+ TrackingLockGrantNotification notify1;
- // Second request must block
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_X));
- ASSERT(request2.mode == MODE_X);
- ASSERT(request2.recursiveCount == 1);
- ASSERT(request2.numNotifies == 0);
+ MMAPV1LockerImpl locker2;
+ TrackingLockGrantNotification notify2;
- // Release first request
- lockMgr.unlock(&request1);
- ASSERT(request1.recursiveCount == 0);
- ASSERT(request1.numNotifies == 0);
+ LockRequest request1;
+ request1.initNew(&locker1, &notify1);
- ASSERT(request2.mode == MODE_X);
- ASSERT(request2.recursiveCount == 1);
- ASSERT(request2.numNotifies == 1);
- ASSERT(request2.lastResult == LOCK_OK);
+ LockRequest request2;
+ request2.initNew(&locker2, &notify2);
- // Release second acquire
- lockMgr.unlock(&request2);
- ASSERT(request2.recursiveCount == 0);
-
- ASSERT(request1.numNotifies == 0);
- ASSERT(request2.numNotifies == 1);
- }
+ // First request granted right away
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
+ ASSERT(notify1.numNotifies == 0);
- TEST(LockManager, MultipleConflict) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_X));
- MMAPV1LockerImpl locker;
- TrackingLockGrantNotification notify;
+ // Release second request (which is still in the WAITING mode)
+ lockMgr.unlock(&request2);
+ ASSERT(notify2.numNotifies == 0);
- LockRequest request[6];
- for (int i = 0; i < 6; i++) {
- request[i].initNew(&locker, &notify);
+ ASSERT(request1.mode == MODE_S);
+ ASSERT(request1.recursiveCount == 1);
- if (i == 0) {
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request[i], MODE_X));
- }
- else {
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request[i], MODE_X));
- }
+ // Release second acquire
+ lockMgr.unlock(&request1);
+}
- ASSERT(request[i].mode == MODE_X);
- ASSERT(request[i].recursiveCount == 1);
- }
+TEST(LockManager, ConflictCancelMultipleWaiting) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- ASSERT(notify.numNotifies == 0);
+ MMAPV1LockerImpl locker;
+ TrackingLockGrantNotification notify;
- // Free them one by one and make sure they get granted in the correct order
- for (int i = 0; i < 6; i++) {
- lockMgr.unlock(&request[i]);
+ LockRequest request[6];
+ for (int i = 0; i < 6; i++) {
+ request[i].initNew(&locker, &notify);
+ lockMgr.lock(resId, &request[i], MODE_X);
- if (i < 5) {
- ASSERT(notify.numNotifies == i + 1);
- }
- }
+ ASSERT(request[i].mode == MODE_X);
+ ASSERT(request[i].recursiveCount == 1);
}
- TEST(LockManager, ConflictCancelWaiting) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
-
- MMAPV1LockerImpl locker1;
- TrackingLockGrantNotification notify1;
+ ASSERT(notify.numNotifies == 0);
- MMAPV1LockerImpl locker2;
- TrackingLockGrantNotification notify2;
+ // Free the second (waiting)
+ lockMgr.unlock(&request[1]);
- LockRequest request1;
- request1.initNew(&locker1, &notify1);
+ // Free the last
+ lockMgr.unlock(&request[5]);
- LockRequest request2;
- request2.initNew(&locker2, &notify2);
+ // Free one in the middle
+ lockMgr.unlock(&request[3]);
- // First request granted right away
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
- ASSERT(notify1.numNotifies == 0);
+ // Free the remaining so the LockMgr does not compain about leaked locks
+ lockMgr.unlock(&request[2]);
+ lockMgr.unlock(&request[4]);
+ lockMgr.unlock(&request[0]);
+}
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_X));
+TEST(LockManager, ConflictCancelWaitingConversion) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // Release second request (which is still in the WAITING mode)
- lockMgr.unlock(&request2);
- ASSERT(notify2.numNotifies == 0);
+ MMAPV1LockerImpl locker1;
+ MMAPV1LockerImpl locker2;
- ASSERT(request1.mode == MODE_S);
- ASSERT(request1.recursiveCount == 1);
+ LockRequestCombo request1(&locker1);
+ LockRequestCombo request2(&locker2);
- // Release second acquire
- lockMgr.unlock(&request1);
- }
+ // First request granted right away
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
+ ASSERT(request1.numNotifies == 0);
- TEST(LockManager, ConflictCancelMultipleWaiting) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ // Second request is granted right away
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
+ ASSERT(request2.numNotifies == 0);
- MMAPV1LockerImpl locker;
- TrackingLockGrantNotification notify;
+ // Convert second request to conflicting
+ ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request2, MODE_X));
+ ASSERT(request2.mode == MODE_S);
+ ASSERT(request2.convertMode == MODE_X);
+ ASSERT(request2.numNotifies == 0);
- LockRequest request[6];
- for (int i = 0; i < 6; i++) {
- request[i].initNew(&locker, &notify);
- lockMgr.lock(resId, &request[i], MODE_X);
+ // Cancel the conflicting upgrade
+ lockMgr.unlock(&request2);
+ ASSERT(request2.mode == MODE_S);
+ ASSERT(request2.convertMode == MODE_NONE);
+ ASSERT(request2.numNotifies == 0);
- ASSERT(request[i].mode == MODE_X);
- ASSERT(request[i].recursiveCount == 1);
- }
+ // Free the remaining locks so the LockManager destructor does not complain
+ lockMgr.unlock(&request1);
+ lockMgr.unlock(&request2);
+}
- ASSERT(notify.numNotifies == 0);
+TEST(LockManager, ConflictingConversion) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // Free the second (waiting)
- lockMgr.unlock(&request[1]);
+ MMAPV1LockerImpl locker1;
+ MMAPV1LockerImpl locker2;
- // Free the last
- lockMgr.unlock(&request[5]);
+ LockRequestCombo request1(&locker1);
+ LockRequestCombo request2(&locker2);
- // Free one in the middle
- lockMgr.unlock(&request[3]);
+ // The S requests are granted right away
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
+ ASSERT(request1.numNotifies == 0);
- // Free the remaining so the LockMgr does not compain about leaked locks
- lockMgr.unlock(&request[2]);
- lockMgr.unlock(&request[4]);
- lockMgr.unlock(&request[0]);
- }
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
+ ASSERT(request2.numNotifies == 0);
- TEST(LockManager, ConflictCancelWaitingConversion) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ // Convert first request to conflicting
+ ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request1, MODE_X));
+ ASSERT(request1.numNotifies == 0);
- MMAPV1LockerImpl locker1;
- MMAPV1LockerImpl locker2;
+ // Free the second lock and make sure the first is granted
+ lockMgr.unlock(&request2);
+ ASSERT(request1.mode == MODE_X);
+ ASSERT(request1.numNotifies == 1);
+ ASSERT(request2.numNotifies == 0);
- LockRequestCombo request1(&locker1);
- LockRequestCombo request2(&locker2);
+ // Frees the first reference, mode remains X
+ lockMgr.unlock(&request1);
+ ASSERT(request1.mode == MODE_X);
+ ASSERT(request1.recursiveCount == 1);
- // First request granted right away
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
- ASSERT(request1.numNotifies == 0);
+ lockMgr.unlock(&request1);
+}
- // Second request is granted right away
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
- ASSERT(request2.numNotifies == 0);
+TEST(LockManager, ConflictingConversionInTheMiddle) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // Convert second request to conflicting
- ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request2, MODE_X));
- ASSERT(request2.mode == MODE_S);
- ASSERT(request2.convertMode == MODE_X);
- ASSERT(request2.numNotifies == 0);
+ MMAPV1LockerImpl locker;
+ TrackingLockGrantNotification notify;
- // Cancel the conflicting upgrade
- lockMgr.unlock(&request2);
- ASSERT(request2.mode == MODE_S);
- ASSERT(request2.convertMode == MODE_NONE);
- ASSERT(request2.numNotifies == 0);
-
- // Free the remaining locks so the LockManager destructor does not complain
- lockMgr.unlock(&request1);
- lockMgr.unlock(&request2);
+ LockRequest request[3];
+ for (int i = 0; i < 3; i++) {
+ request[i].initNew(&locker, &notify);
+ lockMgr.lock(resId, &request[i], MODE_S);
}
- TEST(LockManager, ConflictingConversion) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ // Upgrade the one in the middle (not the first one)
+ ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request[1], MODE_X));
- MMAPV1LockerImpl locker1;
- MMAPV1LockerImpl locker2;
+ ASSERT(notify.numNotifies == 0);
- LockRequestCombo request1(&locker1);
- LockRequestCombo request2(&locker2);
+ // Release the two shared modes
+ lockMgr.unlock(&request[0]);
+ ASSERT(notify.numNotifies == 0);
- // The S requests are granted right away
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
- ASSERT(request1.numNotifies == 0);
+ lockMgr.unlock(&request[2]);
+ ASSERT(notify.numNotifies == 1);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
- ASSERT(request2.numNotifies == 0);
+ ASSERT(request[1].mode == MODE_X);
- // Convert first request to conflicting
- ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request1, MODE_X));
- ASSERT(request1.numNotifies == 0);
+ // Request 1 should be unlocked twice
+ lockMgr.unlock(&request[1]);
+ lockMgr.unlock(&request[1]);
+}
- // Free the second lock and make sure the first is granted
- lockMgr.unlock(&request2);
- ASSERT(request1.mode == MODE_X);
- ASSERT(request1.numNotifies == 1);
- ASSERT(request2.numNotifies == 0);
+TEST(LockManager, ConvertUpgrade) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // Frees the first reference, mode remains X
- lockMgr.unlock(&request1);
- ASSERT(request1.mode == MODE_X);
- ASSERT(request1.recursiveCount == 1);
+ MMAPV1LockerImpl locker1;
+ LockRequestCombo request1(&locker1);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
- lockMgr.unlock(&request1);
- }
+ MMAPV1LockerImpl locker2;
+ LockRequestCombo request2(&locker2);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
- TEST(LockManager, ConflictingConversionInTheMiddle) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ // Upgrade the S lock to X
+ ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request1, MODE_X));
- MMAPV1LockerImpl locker;
- TrackingLockGrantNotification notify;
+ ASSERT(!lockMgr.unlock(&request1));
+ ASSERT(lockMgr.unlock(&request1));
- LockRequest request[3];
- for (int i = 0; i < 3; i++) {
- request[i].initNew(&locker, &notify);
- lockMgr.lock(resId, &request[i], MODE_S);
- }
+ ASSERT(lockMgr.unlock(&request2));
+}
- // Upgrade the one in the middle (not the first one)
- ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request[1], MODE_X));
+TEST(LockManager, Downgrade) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- ASSERT(notify.numNotifies == 0);
+ MMAPV1LockerImpl locker1;
+ LockRequestCombo request1(&locker1);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_X));
- // Release the two shared modes
- lockMgr.unlock(&request[0]);
- ASSERT(notify.numNotifies == 0);
+ MMAPV1LockerImpl locker2;
+ LockRequestCombo request2(&locker2);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_S));
- lockMgr.unlock(&request[2]);
- ASSERT(notify.numNotifies == 1);
+ // Downgrade the X request to S
+ lockMgr.downgrade(&request1, MODE_S);
- ASSERT(request[1].mode == MODE_X);
+ ASSERT(request2.numNotifies == 1);
+ ASSERT(request2.lastResult == LOCK_OK);
+ ASSERT(request2.recursiveCount == 1);
- // Request 1 should be unlocked twice
- lockMgr.unlock(&request[1]);
- lockMgr.unlock(&request[1]);
- }
+ ASSERT(lockMgr.unlock(&request1));
+ ASSERT(lockMgr.unlock(&request2));
+}
- TEST(LockManager, ConvertUpgrade) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker1;
- LockRequestCombo request1(&locker1);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_S));
+// Lock conflict matrix tests
+static void checkConflict(LockMode existingMode, LockMode newMode, bool hasConflict) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker2;
- LockRequestCombo request2(&locker2);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_S));
+ MMAPV1LockerImpl lockerExisting;
+ TrackingLockGrantNotification notifyExisting;
+ LockRequest requestExisting;
+ requestExisting.initNew(&lockerExisting, &notifyExisting);
- // Upgrade the S lock to X
- ASSERT(LOCK_WAITING == lockMgr.convert(resId, &request1, MODE_X));
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestExisting, existingMode));
- ASSERT(!lockMgr.unlock(&request1));
- ASSERT(lockMgr.unlock(&request1));
+ MMAPV1LockerImpl lockerNew;
+ TrackingLockGrantNotification notifyNew;
+ LockRequest requestNew;
+ requestNew.initNew(&lockerNew, &notifyNew);
- ASSERT(lockMgr.unlock(&request2));
+ LockResult result = lockMgr.lock(resId, &requestNew, newMode);
+ if (hasConflict) {
+ ASSERT_EQUALS(LOCK_WAITING, result);
+ } else {
+ ASSERT_EQUALS(LOCK_OK, result);
}
- TEST(LockManager, Downgrade) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ lockMgr.unlock(&requestNew);
+ lockMgr.unlock(&requestExisting);
+}
- MMAPV1LockerImpl locker1;
- LockRequestCombo request1(&locker1);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_X));
+TEST(LockManager, ValidateConflictMatrix) {
+ checkConflict(MODE_IS, MODE_IS, false);
+ checkConflict(MODE_IS, MODE_IX, false);
+ checkConflict(MODE_IS, MODE_S, false);
+ checkConflict(MODE_IS, MODE_X, true);
- MMAPV1LockerImpl locker2;
- LockRequestCombo request2(&locker2);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_S));
+ checkConflict(MODE_IX, MODE_IS, false);
+ checkConflict(MODE_IX, MODE_IX, false);
+ checkConflict(MODE_IX, MODE_S, true);
+ checkConflict(MODE_IX, MODE_X, true);
- // Downgrade the X request to S
- lockMgr.downgrade(&request1, MODE_S);
+ checkConflict(MODE_S, MODE_IS, false);
+ checkConflict(MODE_S, MODE_IX, true);
+ checkConflict(MODE_S, MODE_S, false);
+ checkConflict(MODE_S, MODE_X, true);
- ASSERT(request2.numNotifies == 1);
- ASSERT(request2.lastResult == LOCK_OK);
- ASSERT(request2.recursiveCount == 1);
+ checkConflict(MODE_X, MODE_IS, true);
+ checkConflict(MODE_X, MODE_IX, true);
+ checkConflict(MODE_X, MODE_S, true);
+ checkConflict(MODE_X, MODE_X, true);
+}
- ASSERT(lockMgr.unlock(&request1));
- ASSERT(lockMgr.unlock(&request2));
- }
+TEST(LockManager, EnqueueAtFront) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ MMAPV1LockerImpl lockerX;
+ LockRequestCombo requestX(&lockerX);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestX, MODE_X));
- // Lock conflict matrix tests
- static void checkConflict(LockMode existingMode, LockMode newMode, bool hasConflict) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ // The subsequent request will block
+ MMAPV1LockerImpl lockerLow;
+ LockRequestCombo requestLow(&lockerLow);
- MMAPV1LockerImpl lockerExisting;
- TrackingLockGrantNotification notifyExisting;
- LockRequest requestExisting;
- requestExisting.initNew(&lockerExisting, &notifyExisting);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestLow, MODE_X));
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestExisting, existingMode));
+ // This is a "queue jumping request", which will go before locker 2 above
+ MMAPV1LockerImpl lockerHi;
+ LockRequestCombo requestHi(&lockerHi);
+ requestHi.enqueueAtFront = true;
- MMAPV1LockerImpl lockerNew;
- TrackingLockGrantNotification notifyNew;
- LockRequest requestNew;
- requestNew.initNew(&lockerNew, &notifyNew);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestHi, MODE_X));
- LockResult result = lockMgr.lock(resId, &requestNew, newMode);
- if (hasConflict) {
- ASSERT_EQUALS(LOCK_WAITING, result);
- }
- else {
- ASSERT_EQUALS(LOCK_OK, result);
- }
-
- lockMgr.unlock(&requestNew);
- lockMgr.unlock(&requestExisting);
- }
-
- TEST(LockManager, ValidateConflictMatrix) {
- checkConflict(MODE_IS, MODE_IS, false);
- checkConflict(MODE_IS, MODE_IX, false);
- checkConflict(MODE_IS, MODE_S, false);
- checkConflict(MODE_IS, MODE_X, true);
-
- checkConflict(MODE_IX, MODE_IS, false);
- checkConflict(MODE_IX, MODE_IX, false);
- checkConflict(MODE_IX, MODE_S, true);
- checkConflict(MODE_IX, MODE_X, true);
-
- checkConflict(MODE_S, MODE_IS, false);
- checkConflict(MODE_S, MODE_IX, true);
- checkConflict(MODE_S, MODE_S, false);
- checkConflict(MODE_S, MODE_X, true);
-
- checkConflict(MODE_X, MODE_IS, true);
- checkConflict(MODE_X, MODE_IX, true);
- checkConflict(MODE_X, MODE_S, true);
- checkConflict(MODE_X, MODE_X, true);
- }
+ // Once the X request is gone, lockerHi should be granted, because it's queue jumping
+ ASSERT(lockMgr.unlock(&requestX));
- TEST(LockManager, EnqueueAtFront) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ ASSERT(requestHi.lastResId == resId);
+ ASSERT(requestHi.lastResult == LOCK_OK);
- MMAPV1LockerImpl lockerX;
- LockRequestCombo requestX(&lockerX);
+ // Finally lockerLow should be granted
+ ASSERT(lockMgr.unlock(&requestHi));
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestX, MODE_X));
+ ASSERT(requestLow.lastResId == resId);
+ ASSERT(requestLow.lastResult == LOCK_OK);
- // The subsequent request will block
- MMAPV1LockerImpl lockerLow;
- LockRequestCombo requestLow(&lockerLow);
+ // This avoids the lock manager asserting on leaked locks
+ ASSERT(lockMgr.unlock(&requestLow));
+}
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestLow, MODE_X));
+TEST(LockManager, CompatibleFirstImmediateGrant) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_GLOBAL, 0);
- // This is a "queue jumping request", which will go before locker 2 above
- MMAPV1LockerImpl lockerHi;
- LockRequestCombo requestHi(&lockerHi);
- requestHi.enqueueAtFront = true;
+ MMAPV1LockerImpl locker1;
+ LockRequestCombo request1(&locker1);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestHi, MODE_X));
+ MMAPV1LockerImpl locker2;
+ LockRequestCombo request2(&locker2);
+ request2.compatibleFirst = true;
- // Once the X request is gone, lockerHi should be granted, because it's queue jumping
- ASSERT(lockMgr.unlock(&requestX));
+ MMAPV1LockerImpl locker3;
+ LockRequestCombo request3(&locker3);
- ASSERT(requestHi.lastResId == resId);
- ASSERT(requestHi.lastResult == LOCK_OK);
+ // Lock all in IS mode
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_IS));
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_IS));
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &request3, MODE_IS));
- // Finally lockerLow should be granted
- ASSERT(lockMgr.unlock(&requestHi));
+ // Now an exclusive mode comes, which would block
+ MMAPV1LockerImpl lockerX;
+ LockRequestCombo requestX(&lockerX);
- ASSERT(requestLow.lastResId == resId);
- ASSERT(requestLow.lastResult == LOCK_OK);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
- // This avoids the lock manager asserting on leaked locks
- ASSERT(lockMgr.unlock(&requestLow));
+ // If an S comes, it should be granted, because of request2
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
}
- TEST(LockManager, CompatibleFirstImmediateGrant) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_GLOBAL, 0);
-
- MMAPV1LockerImpl locker1;
- LockRequestCombo request1(&locker1);
-
- MMAPV1LockerImpl locker2;
- LockRequestCombo request2(&locker2);
- request2.compatibleFirst = true;
-
- MMAPV1LockerImpl locker3;
- LockRequestCombo request3(&locker3);
-
- // Lock all in IS mode
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request1, MODE_IS));
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request2, MODE_IS));
- ASSERT(LOCK_OK == lockMgr.lock(resId, &request3, MODE_IS));
+ // If request1 goes away, the policy should still be compatible-first, because of request2
+ ASSERT(lockMgr.unlock(&request1));
- // Now an exclusive mode comes, which would block
- MMAPV1LockerImpl lockerX;
- LockRequestCombo requestX(&lockerX);
-
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
-
- // If an S comes, it should be granted, because of request2
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
-
- // If request1 goes away, the policy should still be compatible-first, because of request2
- ASSERT(lockMgr.unlock(&request1));
-
- // If S comes again, it should be granted, because of request2 still there
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
+ // If S comes again, it should be granted, because of request2 still there
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
+ }
- // With request2 gone the policy should go back to FIFO, even though request3 is active
- ASSERT(lockMgr.unlock(&request2));
+ // With request2 gone the policy should go back to FIFO, even though request3 is active
+ ASSERT(lockMgr.unlock(&request2));
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
-
- // Unlock request3 to keep the lock mgr not assert for leaked locks
- ASSERT(lockMgr.unlock(&request3));
- ASSERT(lockMgr.unlock(&requestX));
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
}
- TEST(LockManager, CompatibleFirstDelayedGrant) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_GLOBAL, 0);
-
- MMAPV1LockerImpl lockerXInitial;
- LockRequestCombo requestXInitial(&lockerXInitial);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestXInitial, MODE_X));
-
- MMAPV1LockerImpl locker1;
- LockRequestCombo request1(&locker1);
-
- MMAPV1LockerImpl locker2;
- LockRequestCombo request2(&locker2);
- request2.compatibleFirst = true;
-
- MMAPV1LockerImpl locker3;
- LockRequestCombo request3(&locker3);
-
- // Lock all in IS mode (should block behind the global lock)
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request1, MODE_IS));
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_IS));
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request3, MODE_IS));
-
- // Now an exclusive mode comes, which would block behind the IS modes
- MMAPV1LockerImpl lockerX;
- LockRequestCombo requestX(&lockerX);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
-
- // Free the first X lock so all IS modes are granted
- ASSERT(lockMgr.unlock(&requestXInitial));
- ASSERT(request1.lastResult == LOCK_OK);
- ASSERT(request2.lastResult == LOCK_OK);
- ASSERT(request3.lastResult == LOCK_OK);
-
- // If an S comes, it should be granted, because of request2
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
-
- // If request1 goes away, the policy should still be compatible-first, because of request2
- ASSERT(lockMgr.unlock(&request1));
+ // Unlock request3 to keep the lock mgr not assert for leaked locks
+ ASSERT(lockMgr.unlock(&request3));
+ ASSERT(lockMgr.unlock(&requestX));
+}
+
+TEST(LockManager, CompatibleFirstDelayedGrant) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_GLOBAL, 0);
+
+ MMAPV1LockerImpl lockerXInitial;
+ LockRequestCombo requestXInitial(&lockerXInitial);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestXInitial, MODE_X));
+
+ MMAPV1LockerImpl locker1;
+ LockRequestCombo request1(&locker1);
+
+ MMAPV1LockerImpl locker2;
+ LockRequestCombo request2(&locker2);
+ request2.compatibleFirst = true;
+
+ MMAPV1LockerImpl locker3;
+ LockRequestCombo request3(&locker3);
+
+ // Lock all in IS mode (should block behind the global lock)
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request1, MODE_IS));
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request2, MODE_IS));
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &request3, MODE_IS));
+
+ // Now an exclusive mode comes, which would block behind the IS modes
+ MMAPV1LockerImpl lockerX;
+ LockRequestCombo requestX(&lockerX);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
+
+ // Free the first X lock so all IS modes are granted
+ ASSERT(lockMgr.unlock(&requestXInitial));
+ ASSERT(request1.lastResult == LOCK_OK);
+ ASSERT(request2.lastResult == LOCK_OK);
+ ASSERT(request3.lastResult == LOCK_OK);
+
+ // If an S comes, it should be granted, because of request2
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
+ }
- // If S comes again, it should be granted, because of request2 still there
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
+ // If request1 goes away, the policy should still be compatible-first, because of request2
+ ASSERT(lockMgr.unlock(&request1));
- // With request2 gone the policy should go back to FIFO, even though request3 is active
- ASSERT(lockMgr.unlock(&request2));
+ // If S comes again, it should be granted, because of request2 still there
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
+ }
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
+ // With request2 gone the policy should go back to FIFO, even though request3 is active
+ ASSERT(lockMgr.unlock(&request2));
- // Unlock request3 to keep the lock mgr not assert for leaked locks
- ASSERT(lockMgr.unlock(&request3));
- ASSERT(lockMgr.unlock(&requestX));
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
}
- TEST(LockManager, CompatibleFirstCancelWaiting) {
- LockManager lockMgr;
- const ResourceId resId(RESOURCE_GLOBAL, 0);
-
- MMAPV1LockerImpl lockerSInitial;
- LockRequestCombo requestSInitial(&lockerSInitial);
- ASSERT(LOCK_OK == lockMgr.lock(resId, &requestSInitial, MODE_S));
-
- MMAPV1LockerImpl lockerX;
- LockRequestCombo requestX(&lockerX);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
-
- MMAPV1LockerImpl lockerPending;
- LockRequestCombo requestPending(&lockerPending);
- requestPending.compatibleFirst = true;
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestPending, MODE_S));
-
- // S1 is not granted yet, so the policy should still be FIFO
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
+ // Unlock request3 to keep the lock mgr not assert for leaked locks
+ ASSERT(lockMgr.unlock(&request3));
+ ASSERT(lockMgr.unlock(&requestX));
+}
+
+TEST(LockManager, CompatibleFirstCancelWaiting) {
+ LockManager lockMgr;
+ const ResourceId resId(RESOURCE_GLOBAL, 0);
+
+ MMAPV1LockerImpl lockerSInitial;
+ LockRequestCombo requestSInitial(&lockerSInitial);
+ ASSERT(LOCK_OK == lockMgr.lock(resId, &requestSInitial, MODE_S));
+
+ MMAPV1LockerImpl lockerX;
+ LockRequestCombo requestX(&lockerX);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestX, MODE_X));
+
+ MMAPV1LockerImpl lockerPending;
+ LockRequestCombo requestPending(&lockerPending);
+ requestPending.compatibleFirst = true;
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestPending, MODE_S));
+
+ // S1 is not granted yet, so the policy should still be FIFO
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
+ }
- // Unlock S1, the policy should still be FIFO
- ASSERT(lockMgr.unlock(&requestPending));
+ // Unlock S1, the policy should still be FIFO
+ ASSERT(lockMgr.unlock(&requestPending));
- {
- MMAPV1LockerImpl lockerS;
- LockRequestCombo requestS(&lockerS);
- ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
- ASSERT(lockMgr.unlock(&requestS));
- }
-
- // Unlock remaining locks to keep the leak detection logic happy
- ASSERT(lockMgr.unlock(&requestSInitial));
- ASSERT(lockMgr.unlock(&requestX));
+ {
+ MMAPV1LockerImpl lockerS;
+ LockRequestCombo requestS(&lockerS);
+ ASSERT(LOCK_WAITING == lockMgr.lock(resId, &requestS, MODE_S));
+ ASSERT(lockMgr.unlock(&requestS));
}
-} // namespace mongo
+ // Unlock remaining locks to keep the leak detection logic happy
+ ASSERT(lockMgr.unlock(&requestSInitial));
+ ASSERT(lockMgr.unlock(&requestX));
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_manager_test_help.h b/src/mongo/db/concurrency/lock_manager_test_help.h
index 1650ca0806a..9344ee67a32 100644
--- a/src/mongo/db/concurrency/lock_manager_test_help.h
+++ b/src/mongo/db/concurrency/lock_manager_test_help.h
@@ -33,43 +33,41 @@
namespace mongo {
- class LockerForTests : public LockerImpl<false> {
- public:
- explicit LockerForTests(LockMode globalLockMode) {
- lockGlobal(globalLockMode);
- }
+class LockerForTests : public LockerImpl<false> {
+public:
+ explicit LockerForTests(LockMode globalLockMode) {
+ lockGlobal(globalLockMode);
+ }
- ~LockerForTests() {
- unlockAll();
- }
- };
+ ~LockerForTests() {
+ unlockAll();
+ }
+};
- class TrackingLockGrantNotification : public LockGrantNotification {
- public:
- TrackingLockGrantNotification() : numNotifies(0), lastResult(LOCK_INVALID) {
+class TrackingLockGrantNotification : public LockGrantNotification {
+public:
+ TrackingLockGrantNotification() : numNotifies(0), lastResult(LOCK_INVALID) {}
- }
+ virtual void notify(ResourceId resId, LockResult result) {
+ numNotifies++;
+ lastResId = resId;
+ lastResult = result;
+ }
- virtual void notify(ResourceId resId, LockResult result) {
- numNotifies++;
- lastResId = resId;
- lastResult = result;
- }
+public:
+ int numNotifies;
- public:
- int numNotifies;
+ ResourceId lastResId;
+ LockResult lastResult;
+};
- ResourceId lastResId;
- LockResult lastResult;
- };
+struct LockRequestCombo : public LockRequest, TrackingLockGrantNotification {
+public:
+ explicit LockRequestCombo(Locker* locker) {
+ initNew(locker, this);
+ }
+};
- struct LockRequestCombo : public LockRequest, TrackingLockGrantNotification {
- public:
- explicit LockRequestCombo (Locker* locker) {
- initNew(locker, this);
- }
- };
-
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_request_list.h b/src/mongo/db/concurrency/lock_request_list.h
index 066a09dcd98..3501504ac4f 100644
--- a/src/mongo/db/concurrency/lock_request_list.h
+++ b/src/mongo/db/concurrency/lock_request_list.h
@@ -33,81 +33,76 @@
namespace mongo {
- /**
- * Simple intrusive list implementation for the lock's granted and conflicting lists. Does not
- * own its contents, just uses the intrusive pointers on the LockRequest structure to link them
- * together. Therefore requests must outlive this list.
- *
- * Intentionally implemented as a POD in order to avoid constructor/destructor invocations.
- *
- * NOTE: This class should not be used for generic purposes and should not be used outside of
- * the Lock Manager library.
- */
- class LockRequestList {
- public:
-
- void push_front(LockRequest* request) {
- // Sanity check that we do not reuse entries without cleaning them up
- invariant(request->next == NULL);
- invariant(request->prev == NULL);
-
- if (_front == NULL) {
- _front = _back = request;
- }
- else {
- request->next = _front;
-
- _front->prev = request;
- _front = request;
- }
+/**
+ * Simple intrusive list implementation for the lock's granted and conflicting lists. Does not
+ * own its contents, just uses the intrusive pointers on the LockRequest structure to link them
+ * together. Therefore requests must outlive this list.
+ *
+ * Intentionally implemented as a POD in order to avoid constructor/destructor invocations.
+ *
+ * NOTE: This class should not be used for generic purposes and should not be used outside of
+ * the Lock Manager library.
+ */
+class LockRequestList {
+public:
+ void push_front(LockRequest* request) {
+ // Sanity check that we do not reuse entries without cleaning them up
+ invariant(request->next == NULL);
+ invariant(request->prev == NULL);
+
+ if (_front == NULL) {
+ _front = _back = request;
+ } else {
+ request->next = _front;
+
+ _front->prev = request;
+ _front = request;
}
+ }
- void push_back(LockRequest* request) {
- // Sanity check that we do not reuse entries without cleaning them up
- invariant(request->next == NULL);
- invariant(request->prev == NULL);
+ void push_back(LockRequest* request) {
+ // Sanity check that we do not reuse entries without cleaning them up
+ invariant(request->next == NULL);
+ invariant(request->prev == NULL);
- if (_front == NULL) {
- _front = _back = request;
- }
- else {
- request->prev = _back;
+ if (_front == NULL) {
+ _front = _back = request;
+ } else {
+ request->prev = _back;
- _back->next = request;
- _back = request;
- }
+ _back->next = request;
+ _back = request;
}
+ }
- void remove(LockRequest* request) {
- if (request->prev != NULL) {
- request->prev->next = request->next;
- }
- else {
- _front = request->next;
- }
-
- if (request->next != NULL) {
- request->next->prev = request->prev;
- }
- else {
- _back = request->prev;
- }
-
- request->prev = NULL;
- request->next = NULL;
+ void remove(LockRequest* request) {
+ if (request->prev != NULL) {
+ request->prev->next = request->next;
+ } else {
+ _front = request->next;
}
- void reset() {
- _front = _back = NULL;
+ if (request->next != NULL) {
+ request->next->prev = request->prev;
+ } else {
+ _back = request->prev;
}
- bool empty() const {
- return _front == NULL;
- }
+ request->prev = NULL;
+ request->next = NULL;
+ }
+
+ void reset() {
+ _front = _back = NULL;
+ }
+
+ bool empty() const {
+ return _front == NULL;
+ }
- // Pointers to the beginning and the end of the list
- LockRequest* _front;
- LockRequest* _back;
- };
+ // Pointers to the beginning and the end of the list
+ LockRequest* _front;
+ LockRequest* _back;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index 6c12a8ae1b1..37ca3c7f611 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -46,104 +46,102 @@
namespace mongo {
namespace {
- /**
- * Partitioned global lock statistics, so we don't hit the same bucket.
- */
- class PartitionedInstanceWideLockStats {
- MONGO_DISALLOW_COPYING(PartitionedInstanceWideLockStats);
- public:
+/**
+ * Partitioned global lock statistics, so we don't hit the same bucket.
+ */
+class PartitionedInstanceWideLockStats {
+ MONGO_DISALLOW_COPYING(PartitionedInstanceWideLockStats);
- PartitionedInstanceWideLockStats() { }
+public:
+ PartitionedInstanceWideLockStats() {}
- void recordAcquisition(LockerId id, ResourceId resId, LockMode mode) {
- _get(id).recordAcquisition(resId, mode);
- }
+ void recordAcquisition(LockerId id, ResourceId resId, LockMode mode) {
+ _get(id).recordAcquisition(resId, mode);
+ }
- void recordWait(LockerId id, ResourceId resId, LockMode mode) {
- _get(id).recordWait(resId, mode);
- }
+ void recordWait(LockerId id, ResourceId resId, LockMode mode) {
+ _get(id).recordWait(resId, mode);
+ }
- void recordWaitTime(LockerId id, ResourceId resId, LockMode mode, uint64_t waitMicros) {
- _get(id).recordWaitTime(resId, mode, waitMicros);
- }
+ void recordWaitTime(LockerId id, ResourceId resId, LockMode mode, uint64_t waitMicros) {
+ _get(id).recordWaitTime(resId, mode, waitMicros);
+ }
- void recordDeadlock(ResourceId resId, LockMode mode) {
- _get(resId).recordDeadlock(resId, mode);
- }
+ void recordDeadlock(ResourceId resId, LockMode mode) {
+ _get(resId).recordDeadlock(resId, mode);
+ }
- void report(SingleThreadedLockStats* outStats) const {
- for (int i = 0; i < NumPartitions; i++) {
- outStats->append(_partitions[i].stats);
- }
+ void report(SingleThreadedLockStats* outStats) const {
+ for (int i = 0; i < NumPartitions; i++) {
+ outStats->append(_partitions[i].stats);
}
+ }
- void reset() {
- for (int i = 0; i < NumPartitions; i++) {
- _partitions[i].stats.reset();
- }
+ void reset() {
+ for (int i = 0; i < NumPartitions; i++) {
+ _partitions[i].stats.reset();
}
+ }
- private:
-
- // This alignment is a best effort approach to ensure that each partition falls on a
- // separate page/cache line in order to avoid false sharing.
- struct MONGO_COMPILER_ALIGN_TYPE(128) AlignedLockStats {
- AtomicLockStats stats;
- };
+private:
+ // This alignment is a best effort approach to ensure that each partition falls on a
+ // separate page/cache line in order to avoid false sharing.
+ struct MONGO_COMPILER_ALIGN_TYPE(128) AlignedLockStats {
+ AtomicLockStats stats;
+ };
- enum { NumPartitions = 8 };
+ enum { NumPartitions = 8 };
- AtomicLockStats& _get(LockerId id) {
- return _partitions[id % NumPartitions].stats;
- }
+ AtomicLockStats& _get(LockerId id) {
+ return _partitions[id % NumPartitions].stats;
+ }
- AlignedLockStats _partitions[NumPartitions];
- };
+ AlignedLockStats _partitions[NumPartitions];
+};
- // Global lock manager instance.
- LockManager globalLockManager;
+// Global lock manager instance.
+LockManager globalLockManager;
- // Global lock. Every server operation, which uses the Locker must acquire this lock at least
- // once. See comments in the header file (begin/endTransaction) for more information.
- const ResourceId resourceIdGlobal = ResourceId(RESOURCE_GLOBAL,
- ResourceId::SINGLETON_GLOBAL);
+// Global lock. Every server operation, which uses the Locker must acquire this lock at least
+// once. See comments in the header file (begin/endTransaction) for more information.
+const ResourceId resourceIdGlobal = ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_GLOBAL);
- // Flush lock. This is only used for the MMAP V1 storage engine and synchronizes journal writes
- // to the shared view and remaps. See the comments in the header for information on how MMAP V1
- // concurrency control works.
- const ResourceId resourceIdMMAPV1Flush = ResourceId(RESOURCE_MMAPV1_FLUSH,
- ResourceId::SINGLETON_MMAPV1_FLUSH);
+// Flush lock. This is only used for the MMAP V1 storage engine and synchronizes journal writes
+// to the shared view and remaps. See the comments in the header for information on how MMAP V1
+// concurrency control works.
+const ResourceId resourceIdMMAPV1Flush =
+ ResourceId(RESOURCE_MMAPV1_FLUSH, ResourceId::SINGLETON_MMAPV1_FLUSH);
- // How often (in millis) to check for deadlock if a lock has not been granted for some time
- const unsigned DeadlockTimeoutMs = 500;
+// How often (in millis) to check for deadlock if a lock has not been granted for some time
+const unsigned DeadlockTimeoutMs = 500;
- // Dispenses unique LockerId identifiers
- AtomicUInt64 idCounter(0);
+// Dispenses unique LockerId identifiers
+AtomicUInt64 idCounter(0);
- // Partitioned global lock statistics, so we don't hit the same bucket
- PartitionedInstanceWideLockStats globalStats;
+// Partitioned global lock statistics, so we don't hit the same bucket
+PartitionedInstanceWideLockStats globalStats;
- /**
- * Whether the particular lock's release should be held until the end of the operation. We
- * delay release of exclusive locks (locks that are for write operations) in order to ensure
- * that the data they protect is committed successfully.
- */
- bool shouldDelayUnlock(ResourceId resId, LockMode mode) {
- // Global and flush lock are not used to protect transactional resources and as such, they
- // need to be acquired and released when requested.
- if (resId.getType() == RESOURCE_GLOBAL) {
- return false;
- }
+/**
+ * Whether the particular lock's release should be held until the end of the operation. We
+ * delay release of exclusive locks (locks that are for write operations) in order to ensure
+ * that the data they protect is committed successfully.
+ */
+bool shouldDelayUnlock(ResourceId resId, LockMode mode) {
+ // Global and flush lock are not used to protect transactional resources and as such, they
+ // need to be acquired and released when requested.
+ if (resId.getType() == RESOURCE_GLOBAL) {
+ return false;
+ }
- if (resId == resourceIdMMAPV1Flush) {
- return false;
- }
+ if (resId == resourceIdMMAPV1Flush) {
+ return false;
+ }
- switch (mode) {
+ switch (mode) {
case MODE_X:
case MODE_IX:
return true;
@@ -154,614 +152,612 @@ namespace {
default:
invariant(false);
- }
}
+}
-} // namespace
+} // namespace
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isW() const {
- return getLockMode(resourceIdGlobal) == MODE_X;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isW() const {
+ return getLockMode(resourceIdGlobal) == MODE_X;
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isR() const {
- return getLockMode(resourceIdGlobal) == MODE_S;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isR() const {
+ return getLockMode(resourceIdGlobal) == MODE_S;
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isLocked() const {
- return getLockMode(resourceIdGlobal) != MODE_NONE;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isLocked() const {
+ return getLockMode(resourceIdGlobal) != MODE_NONE;
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isWriteLocked() const {
- return isLockHeldForMode(resourceIdGlobal, MODE_IX);
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isWriteLocked() const {
+ return isLockHeldForMode(resourceIdGlobal, MODE_IX);
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isReadLocked() const {
- return isLockHeldForMode(resourceIdGlobal, MODE_IS);
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isReadLocked() const {
+ return isLockHeldForMode(resourceIdGlobal, MODE_IS);
+}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::assertEmptyAndReset() {
- invariant(!inAWriteUnitOfWork());
- invariant(_resourcesToUnlockAtEndOfUnitOfWork.empty());
- invariant(_requests.empty());
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::assertEmptyAndReset() {
+ invariant(!inAWriteUnitOfWork());
+ invariant(_resourcesToUnlockAtEndOfUnitOfWork.empty());
+ invariant(_requests.empty());
- // Reset the locking statistics so the object can be reused
- _stats.reset();
- }
+ // Reset the locking statistics so the object can be reused
+ _stats.reset();
+}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::dump() const {
- StringBuilder ss;
- ss << "Locker id " << _id << " status: ";
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::dump() const {
+ StringBuilder ss;
+ ss << "Locker id " << _id << " status: ";
- _lock.lock();
- LockRequestsMap::ConstIterator it = _requests.begin();
- while (!it.finished()) {
- ss << it.key().toString() << " "
- << lockRequestStatusName(it->status) << " in "
- << modeName(it->mode) << "; ";
- it.next();
- }
- _lock.unlock();
-
- log() << ss.str() << std::endl;
+ _lock.lock();
+ LockRequestsMap::ConstIterator it = _requests.begin();
+ while (!it.finished()) {
+ ss << it.key().toString() << " " << lockRequestStatusName(it->status) << " in "
+ << modeName(it->mode) << "; ";
+ it.next();
}
+ _lock.unlock();
+ log() << ss.str() << std::endl;
+}
- //
- // CondVarLockGrantNotification
- //
- CondVarLockGrantNotification::CondVarLockGrantNotification() {
- clear();
- }
+//
+// CondVarLockGrantNotification
+//
- void CondVarLockGrantNotification::clear() {
- _result = LOCK_INVALID;
- }
+CondVarLockGrantNotification::CondVarLockGrantNotification() {
+ clear();
+}
- LockResult CondVarLockGrantNotification::wait(unsigned timeoutMs) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- while (_result == LOCK_INVALID) {
- if (boost::cv_status::timeout == _cond.wait_for(lock, Milliseconds(timeoutMs))) {
- // Timeout
- return LOCK_TIMEOUT;
- }
- }
+void CondVarLockGrantNotification::clear() {
+ _result = LOCK_INVALID;
+}
- return _result;
+LockResult CondVarLockGrantNotification::wait(unsigned timeoutMs) {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ while (_result == LOCK_INVALID) {
+ if (boost::cv_status::timeout == _cond.wait_for(lock, Milliseconds(timeoutMs))) {
+ // Timeout
+ return LOCK_TIMEOUT;
+ }
}
- void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- invariant(_result == LOCK_INVALID);
- _result = result;
+ return _result;
+}
- _cond.notify_all();
- }
+void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ invariant(_result == LOCK_INVALID);
+ _result = result;
+ _cond.notify_all();
+}
- //
- // Locker
- //
- template<bool IsForMMAPV1>
- LockerImpl<IsForMMAPV1>::LockerImpl()
- : _id(idCounter.addAndFetch(1)),
- _requestStartTime(0),
- _wuowNestingLevel(0),
- _batchWriter(false) {
- }
+//
+// Locker
+//
- template<bool IsForMMAPV1>
- LockerImpl<IsForMMAPV1>::~LockerImpl() {
- // Cannot delete the Locker while there are still outstanding requests, because the
- // LockManager may attempt to access deleted memory. Besides it is probably incorrect
- // to delete with unaccounted locks anyways.
- assertEmptyAndReset();
- }
+template <bool IsForMMAPV1>
+LockerImpl<IsForMMAPV1>::LockerImpl()
+ : _id(idCounter.addAndFetch(1)),
+ _requestStartTime(0),
+ _wuowNestingLevel(0),
+ _batchWriter(false) {}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockGlobal(LockMode mode, unsigned timeoutMs) {
- LockResult result = lockGlobalBegin(mode);
- if (result == LOCK_WAITING) {
- result = lockGlobalComplete(timeoutMs);
- }
+template <bool IsForMMAPV1>
+LockerImpl<IsForMMAPV1>::~LockerImpl() {
+ // Cannot delete the Locker while there are still outstanding requests, because the
+ // LockManager may attempt to access deleted memory. Besides it is probably incorrect
+ // to delete with unaccounted locks anyways.
+ assertEmptyAndReset();
+}
- if (result == LOCK_OK) {
- lockMMAPV1Flush();
- }
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockGlobal(LockMode mode, unsigned timeoutMs) {
+ LockResult result = lockGlobalBegin(mode);
+ if (result == LOCK_WAITING) {
+ result = lockGlobalComplete(timeoutMs);
+ }
- return result;
+ if (result == LOCK_OK) {
+ lockMMAPV1Flush();
}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockGlobalBegin(LockMode mode) {
- const LockResult result = lockBegin(resourceIdGlobal, mode);
- if (result == LOCK_OK) return LOCK_OK;
+ return result;
+}
- // Currently, deadlock detection does not happen inline with lock acquisition so the only
- // unsuccessful result that the lock manager would return is LOCK_WAITING.
- invariant(result == LOCK_WAITING);
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockGlobalBegin(LockMode mode) {
+ const LockResult result = lockBegin(resourceIdGlobal, mode);
+ if (result == LOCK_OK)
+ return LOCK_OK;
- return result;
- }
+ // Currently, deadlock detection does not happen inline with lock acquisition so the only
+ // unsuccessful result that the lock manager would return is LOCK_WAITING.
+ invariant(result == LOCK_WAITING);
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockGlobalComplete(unsigned timeoutMs) {
- return lockComplete(resourceIdGlobal, getLockMode(resourceIdGlobal), timeoutMs, false);
- }
+ return result;
+}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::lockMMAPV1Flush() {
- if (!IsForMMAPV1) return;
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockGlobalComplete(unsigned timeoutMs) {
+ return lockComplete(resourceIdGlobal, getLockMode(resourceIdGlobal), timeoutMs, false);
+}
- // The flush lock always has a reference count of 1, because it is dropped at the end of
- // each write unit of work in order to allow the flush thread to run. See the comments in
- // the header for information on how the MMAP V1 journaling system works.
- LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
- if (globalLockRequest->recursiveCount == 1) {
- invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
- }
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::lockMMAPV1Flush() {
+ if (!IsForMMAPV1)
+ return;
- dassert(getLockMode(resourceIdMMAPV1Flush) == _getModeForMMAPV1FlushLock());
+ // The flush lock always has a reference count of 1, because it is dropped at the end of
+ // each write unit of work in order to allow the flush thread to run. See the comments in
+ // the header for information on how the MMAP V1 journaling system works.
+ LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
+ if (globalLockRequest->recursiveCount == 1) {
+ invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::downgradeGlobalXtoSForMMAPV1() {
- invariant(!inAWriteUnitOfWork());
+ dassert(getLockMode(resourceIdMMAPV1Flush) == _getModeForMMAPV1FlushLock());
+}
- LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
- invariant(globalLockRequest->mode == MODE_X);
- invariant(globalLockRequest->recursiveCount == 1);
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::downgradeGlobalXtoSForMMAPV1() {
+ invariant(!inAWriteUnitOfWork());
- // Making this call here will record lock downgrades as acquisitions, which is acceptable
- globalStats.recordAcquisition(_id, resourceIdGlobal, MODE_S);
- _stats.recordAcquisition(resourceIdGlobal, MODE_S);
+ LockRequest* globalLockRequest = _requests.find(resourceIdGlobal).objAddr();
+ invariant(globalLockRequest->mode == MODE_X);
+ invariant(globalLockRequest->recursiveCount == 1);
- globalLockManager.downgrade(globalLockRequest, MODE_S);
+ // Making this call here will record lock downgrades as acquisitions, which is acceptable
+ globalStats.recordAcquisition(_id, resourceIdGlobal, MODE_S);
+ _stats.recordAcquisition(resourceIdGlobal, MODE_S);
- if (IsForMMAPV1) {
- invariant(unlock(resourceIdMMAPV1Flush));
- }
+ globalLockManager.downgrade(globalLockRequest, MODE_S);
+
+ if (IsForMMAPV1) {
+ invariant(unlock(resourceIdMMAPV1Flush));
}
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::unlockAll() {
- if (!unlock(resourceIdGlobal)) {
- return false;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::unlockAll() {
+ if (!unlock(resourceIdGlobal)) {
+ return false;
+ }
- LockRequestsMap::Iterator it = _requests.begin();
- while (!it.finished()) {
- // If we're here we should only have one reference to any lock. It is a programming
- // error for any lock to have more references than the global lock, because every
- // scope starts by calling lockGlobal.
- if (it.key().getType() == RESOURCE_GLOBAL) {
- it.next();
- }
- else {
- invariant(_unlockImpl(it));
- }
+ LockRequestsMap::Iterator it = _requests.begin();
+ while (!it.finished()) {
+ // If we're here we should only have one reference to any lock. It is a programming
+ // error for any lock to have more references than the global lock, because every
+ // scope starts by calling lockGlobal.
+ if (it.key().getType() == RESOURCE_GLOBAL) {
+ it.next();
+ } else {
+ invariant(_unlockImpl(it));
}
-
- return true;
}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::beginWriteUnitOfWork() {
- // Sanity check that write transactions under MMAP V1 have acquired the flush lock, so we
- // don't allow partial changes to be written.
- dassert(!IsForMMAPV1 || isLockHeldForMode(resourceIdMMAPV1Flush, MODE_IX));
+ return true;
+}
- _wuowNestingLevel++;
- }
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::beginWriteUnitOfWork() {
+ // Sanity check that write transactions under MMAP V1 have acquired the flush lock, so we
+ // don't allow partial changes to be written.
+ dassert(!IsForMMAPV1 || isLockHeldForMode(resourceIdMMAPV1Flush, MODE_IX));
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::endWriteUnitOfWork() {
- invariant(_wuowNestingLevel > 0);
+ _wuowNestingLevel++;
+}
- if (--_wuowNestingLevel > 0) {
- // Don't do anything unless leaving outermost WUOW.
- return;
- }
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::endWriteUnitOfWork() {
+ invariant(_wuowNestingLevel > 0);
- while (!_resourcesToUnlockAtEndOfUnitOfWork.empty()) {
- unlock(_resourcesToUnlockAtEndOfUnitOfWork.front());
- _resourcesToUnlockAtEndOfUnitOfWork.pop();
- }
+ if (--_wuowNestingLevel > 0) {
+ // Don't do anything unless leaving outermost WUOW.
+ return;
+ }
- // For MMAP V1, we need to yield the flush lock so that the flush thread can run
- if (IsForMMAPV1) {
- invariant(unlock(resourceIdMMAPV1Flush));
- invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
- }
+ while (!_resourcesToUnlockAtEndOfUnitOfWork.empty()) {
+ unlock(_resourcesToUnlockAtEndOfUnitOfWork.front());
+ _resourcesToUnlockAtEndOfUnitOfWork.pop();
}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lock(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs,
- bool checkDeadlock) {
+ // For MMAP V1, we need to yield the flush lock so that the flush thread can run
+ if (IsForMMAPV1) {
+ invariant(unlock(resourceIdMMAPV1Flush));
+ invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
+ }
+}
- const LockResult result = lockBegin(resId, mode);
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lock(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs,
+ bool checkDeadlock) {
+ const LockResult result = lockBegin(resId, mode);
- // Fast, uncontended path
- if (result == LOCK_OK) return LOCK_OK;
+ // Fast, uncontended path
+ if (result == LOCK_OK)
+ return LOCK_OK;
- // Currently, deadlock detection does not happen inline with lock acquisition so the only
- // unsuccessful result that the lock manager would return is LOCK_WAITING.
- invariant(result == LOCK_WAITING);
+ // Currently, deadlock detection does not happen inline with lock acquisition so the only
+ // unsuccessful result that the lock manager would return is LOCK_WAITING.
+ invariant(result == LOCK_WAITING);
- return lockComplete(resId, mode, timeoutMs, checkDeadlock);
- }
+ return lockComplete(resId, mode, timeoutMs, checkDeadlock);
+}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::downgrade(ResourceId resId, LockMode newMode) {
- LockRequestsMap::Iterator it = _requests.find(resId);
- globalLockManager.downgrade(it.objAddr(), newMode);
- }
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::downgrade(ResourceId resId, LockMode newMode) {
+ LockRequestsMap::Iterator it = _requests.find(resId);
+ globalLockManager.downgrade(it.objAddr(), newMode);
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::unlock(ResourceId resId) {
- LockRequestsMap::Iterator it = _requests.find(resId);
- return _unlockImpl(it);
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::unlock(ResourceId resId) {
+ LockRequestsMap::Iterator it = _requests.find(resId);
+ return _unlockImpl(it);
+}
- template<bool IsForMMAPV1>
- LockMode LockerImpl<IsForMMAPV1>::getLockMode(ResourceId resId) const {
- scoped_spinlock scopedLock(_lock);
+template <bool IsForMMAPV1>
+LockMode LockerImpl<IsForMMAPV1>::getLockMode(ResourceId resId) const {
+ scoped_spinlock scopedLock(_lock);
- const LockRequestsMap::ConstIterator it = _requests.find(resId);
- if (!it) return MODE_NONE;
+ const LockRequestsMap::ConstIterator it = _requests.find(resId);
+ if (!it)
+ return MODE_NONE;
- return it->mode;
- }
+ return it->mode;
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isLockHeldForMode(ResourceId resId, LockMode mode) const {
- return isModeCovered(mode, getLockMode(resId));
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isLockHeldForMode(ResourceId resId, LockMode mode) const {
+ return isModeCovered(mode, getLockMode(resId));
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isDbLockedForMode(StringData dbName,
- LockMode mode) const {
- invariant(nsIsDbOnly(dbName));
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isDbLockedForMode(StringData dbName, LockMode mode) const {
+ invariant(nsIsDbOnly(dbName));
- if (isW()) return true;
- if (isR() && isSharedLockMode(mode)) return true;
+ if (isW())
+ return true;
+ if (isR() && isSharedLockMode(mode))
+ return true;
- const ResourceId resIdDb(RESOURCE_DATABASE, dbName);
- return isLockHeldForMode(resIdDb, mode);
- }
+ const ResourceId resIdDb(RESOURCE_DATABASE, dbName);
+ return isLockHeldForMode(resIdDb, mode);
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::isCollectionLockedForMode(StringData ns,
- LockMode mode) const {
- invariant(nsIsFull(ns));
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::isCollectionLockedForMode(StringData ns, LockMode mode) const {
+ invariant(nsIsFull(ns));
- if (isW()) return true;
- if (isR() && isSharedLockMode(mode)) return true;
+ if (isW())
+ return true;
+ if (isR() && isSharedLockMode(mode))
+ return true;
- const NamespaceString nss(ns);
- const ResourceId resIdDb(RESOURCE_DATABASE, nss.db());
+ const NamespaceString nss(ns);
+ const ResourceId resIdDb(RESOURCE_DATABASE, nss.db());
- LockMode dbMode = getLockMode(resIdDb);
+ LockMode dbMode = getLockMode(resIdDb);
- switch (dbMode) {
- case MODE_NONE: return false;
- case MODE_X: return true;
- case MODE_S: return isSharedLockMode(mode);
+ switch (dbMode) {
+ case MODE_NONE:
+ return false;
+ case MODE_X:
+ return true;
+ case MODE_S:
+ return isSharedLockMode(mode);
case MODE_IX:
- case MODE_IS:
- {
- const ResourceId resIdColl(RESOURCE_COLLECTION, ns);
- return isLockHeldForMode(resIdColl, mode);
- }
- break;
+ case MODE_IS: {
+ const ResourceId resIdColl(RESOURCE_COLLECTION, ns);
+ return isLockHeldForMode(resIdColl, mode);
+ } break;
case LockModesCount:
break;
- }
-
- invariant(false);
- return false;
}
- template<bool IsForMMAPV1>
- ResourceId LockerImpl<IsForMMAPV1>::getWaitingResource() const {
- scoped_spinlock scopedLock(_lock);
+ invariant(false);
+ return false;
+}
- LockRequestsMap::ConstIterator it = _requests.begin();
- while (!it.finished()) {
- if (it->status != LockRequest::STATUS_GRANTED) {
- return it.key();
- }
+template <bool IsForMMAPV1>
+ResourceId LockerImpl<IsForMMAPV1>::getWaitingResource() const {
+ scoped_spinlock scopedLock(_lock);
- it.next();
+ LockRequestsMap::ConstIterator it = _requests.begin();
+ while (!it.finished()) {
+ if (it->status != LockRequest::STATUS_GRANTED) {
+ return it.key();
}
- return ResourceId();
+ it.next();
}
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::getLockerInfo(LockerInfo* lockerInfo) const {
- invariant(lockerInfo);
+ return ResourceId();
+}
- // Zero-out the contents
- lockerInfo->locks.clear();
- lockerInfo->waitingResource = ResourceId();
- lockerInfo->stats.reset();
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::getLockerInfo(LockerInfo* lockerInfo) const {
+ invariant(lockerInfo);
- _lock.lock();
- LockRequestsMap::ConstIterator it = _requests.begin();
- while (!it.finished()) {
- OneLock info;
- info.resourceId = it.key();
- info.mode = it->mode;
+ // Zero-out the contents
+ lockerInfo->locks.clear();
+ lockerInfo->waitingResource = ResourceId();
+ lockerInfo->stats.reset();
- lockerInfo->locks.push_back(info);
- it.next();
- }
- _lock.unlock();
-
- std::sort(lockerInfo->locks.begin(), lockerInfo->locks.end());
+ _lock.lock();
+ LockRequestsMap::ConstIterator it = _requests.begin();
+ while (!it.finished()) {
+ OneLock info;
+ info.resourceId = it.key();
+ info.mode = it->mode;
- lockerInfo->waitingResource = getWaitingResource();
- lockerInfo->stats.append(_stats);
+ lockerInfo->locks.push_back(info);
+ it.next();
}
+ _lock.unlock();
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) {
- // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
- invariant(!inAWriteUnitOfWork());
-
- // Clear out whatever is in stateOut.
- stateOut->locks.clear();
- stateOut->globalMode = MODE_NONE;
-
- // First, we look at the global lock. There is special handling for this (as the flush
- // lock goes along with it) so we store it separately from the more pedestrian locks.
- LockRequestsMap::Iterator globalRequest = _requests.find(resourceIdGlobal);
- if (!globalRequest) {
- // If there's no global lock there isn't really anything to do.
- invariant(_requests.empty());
- return false;
- }
-
- // If the global lock has been acquired more than once, we're probably somewhere in a
- // DBDirectClient call. It's not safe to release and reacquire locks -- the context using
- // the DBDirectClient is probably not prepared for lock release.
- if (globalRequest->recursiveCount > 1) {
- return false;
- }
+ std::sort(lockerInfo->locks.begin(), lockerInfo->locks.end());
- // The global lock must have been acquired just once
- stateOut->globalMode = globalRequest->mode;
- invariant(unlock(resourceIdGlobal));
+ lockerInfo->waitingResource = getWaitingResource();
+ lockerInfo->stats.append(_stats);
+}
- // Next, the non-global locks.
- for (LockRequestsMap::Iterator it = _requests.begin(); !it.finished(); it.next()) {
- const ResourceId resId = it.key();
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) {
+ // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
+ invariant(!inAWriteUnitOfWork());
- // We should never have to save and restore metadata locks.
- invariant((IsForMMAPV1 && (resourceIdMMAPV1Flush == resId)) ||
- RESOURCE_DATABASE == resId.getType() ||
- RESOURCE_COLLECTION == resId.getType() ||
- (RESOURCE_GLOBAL == resId.getType() && isSharedLockMode(it->mode)));
+ // Clear out whatever is in stateOut.
+ stateOut->locks.clear();
+ stateOut->globalMode = MODE_NONE;
- // And, stuff the info into the out parameter.
- OneLock info;
- info.resourceId = resId;
- info.mode = it->mode;
+ // First, we look at the global lock. There is special handling for this (as the flush
+ // lock goes along with it) so we store it separately from the more pedestrian locks.
+ LockRequestsMap::Iterator globalRequest = _requests.find(resourceIdGlobal);
+ if (!globalRequest) {
+ // If there's no global lock there isn't really anything to do.
+ invariant(_requests.empty());
+ return false;
+ }
- stateOut->locks.push_back(info);
+ // If the global lock has been acquired more than once, we're probably somewhere in a
+ // DBDirectClient call. It's not safe to release and reacquire locks -- the context using
+ // the DBDirectClient is probably not prepared for lock release.
+ if (globalRequest->recursiveCount > 1) {
+ return false;
+ }
- invariant(unlock(resId));
- }
+ // The global lock must have been acquired just once
+ stateOut->globalMode = globalRequest->mode;
+ invariant(unlock(resourceIdGlobal));
- // Sort locks by ResourceId. They'll later be acquired in this canonical locking order.
- std::sort(stateOut->locks.begin(), stateOut->locks.end());
+ // Next, the non-global locks.
+ for (LockRequestsMap::Iterator it = _requests.begin(); !it.finished(); it.next()) {
+ const ResourceId resId = it.key();
- return true;
- }
+ // We should never have to save and restore metadata locks.
+ invariant((IsForMMAPV1 && (resourceIdMMAPV1Flush == resId)) ||
+ RESOURCE_DATABASE == resId.getType() || RESOURCE_COLLECTION == resId.getType() ||
+ (RESOURCE_GLOBAL == resId.getType() && isSharedLockMode(it->mode)));
- template<bool IsForMMAPV1>
- void LockerImpl<IsForMMAPV1>::restoreLockState(const Locker::LockSnapshot& state) {
- // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
- invariant(!inAWriteUnitOfWork());
+ // And, stuff the info into the out parameter.
+ OneLock info;
+ info.resourceId = resId;
+ info.mode = it->mode;
- std::vector<OneLock>::const_iterator it = state.locks.begin();
- // If we locked the PBWM, it must be locked before the resourceIdGlobal resource.
- if (it != state.locks.end() && it->resourceId == resourceIdParallelBatchWriterMode) {
- invariant(LOCK_OK == lock(it->resourceId, it->mode));
- it++;
- }
+ stateOut->locks.push_back(info);
- invariant(LOCK_OK == lockGlobal(state.globalMode));
- for (; it != state.locks.end(); it++) {
- // This is a sanity check that lockGlobal restored the MMAP V1 flush lock in the
- // expected mode.
- if (IsForMMAPV1 && (it->resourceId == resourceIdMMAPV1Flush)) {
- invariant(it->mode == _getModeForMMAPV1FlushLock());
- }
- else {
- invariant(LOCK_OK == lock(it->resourceId, it->mode));
- }
- }
+ invariant(unlock(resId));
}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockBegin(ResourceId resId, LockMode mode) {
- dassert(!getWaitingResource().isValid());
+ // Sort locks by ResourceId. They'll later be acquired in this canonical locking order.
+ std::sort(stateOut->locks.begin(), stateOut->locks.end());
- LockRequest* request;
- bool isNew = true;
+ return true;
+}
- LockRequestsMap::Iterator it = _requests.find(resId);
- if (!it) {
- scoped_spinlock scopedLock(_lock);
- LockRequestsMap::Iterator itNew = _requests.insert(resId);
- itNew->initNew(this, &_notify);
+template <bool IsForMMAPV1>
+void LockerImpl<IsForMMAPV1>::restoreLockState(const Locker::LockSnapshot& state) {
+ // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork.
+ invariant(!inAWriteUnitOfWork());
- request = itNew.objAddr();
- }
- else {
- request = it.objAddr();
- isNew = false;
- }
+ std::vector<OneLock>::const_iterator it = state.locks.begin();
+ // If we locked the PBWM, it must be locked before the resourceIdGlobal resource.
+ if (it != state.locks.end() && it->resourceId == resourceIdParallelBatchWriterMode) {
+ invariant(LOCK_OK == lock(it->resourceId, it->mode));
+ it++;
+ }
- // Making this call here will record lock re-acquisitions and conversions as well.
- globalStats.recordAcquisition(_id, resId, mode);
- _stats.recordAcquisition(resId, mode);
-
- // Give priority to the full modes for global, parallel batch writer mode,
- // and flush lock so we don't stall global operations such as shutdown or flush.
- const ResourceType resType = resId.getType();
- if (resType == RESOURCE_GLOBAL || (IsForMMAPV1 && resId == resourceIdMMAPV1Flush)) {
- if (mode == MODE_S || mode == MODE_X) {
- request->enqueueAtFront = true;
- request->compatibleFirst = true;
- }
- }
- else {
- // This is all sanity checks that the global and flush locks are always be acquired
- // before any other lock has been acquired and they must be in sync with the nesting.
- DEV {
- const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal);
- invariant(itGlobal->recursiveCount > 0);
- invariant(itGlobal->mode != MODE_NONE);
-
- // Check the MMAP V1 flush lock is held in the appropriate mode
- invariant(!IsForMMAPV1 || isLockHeldForMode(resourceIdMMAPV1Flush,
- _getModeForMMAPV1FlushLock()));
- };
+ invariant(LOCK_OK == lockGlobal(state.globalMode));
+ for (; it != state.locks.end(); it++) {
+ // This is a sanity check that lockGlobal restored the MMAP V1 flush lock in the
+ // expected mode.
+ if (IsForMMAPV1 && (it->resourceId == resourceIdMMAPV1Flush)) {
+ invariant(it->mode == _getModeForMMAPV1FlushLock());
+ } else {
+ invariant(LOCK_OK == lock(it->resourceId, it->mode));
}
+ }
+}
- // The notification object must be cleared before we invoke the lock manager, because
- // otherwise we might reset state if the lock becomes granted very fast.
- _notify.clear();
-
- LockResult result = isNew ? globalLockManager.lock(resId, request, mode) :
- globalLockManager.convert(resId, request, mode);
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockBegin(ResourceId resId, LockMode mode) {
+ dassert(!getWaitingResource().isValid());
- if (result == LOCK_WAITING) {
- // Start counting the wait time so that lockComplete can update that metric
- _requestStartTime = curTimeMicros64();
- globalStats.recordWait(_id, resId, mode);
- _stats.recordWait(resId, mode);
- }
+ LockRequest* request;
+ bool isNew = true;
- return result;
+ LockRequestsMap::Iterator it = _requests.find(resId);
+ if (!it) {
+ scoped_spinlock scopedLock(_lock);
+ LockRequestsMap::Iterator itNew = _requests.insert(resId);
+ itNew->initNew(this, &_notify);
+
+ request = itNew.objAddr();
+ } else {
+ request = it.objAddr();
+ isNew = false;
+ }
+
+ // Making this call here will record lock re-acquisitions and conversions as well.
+ globalStats.recordAcquisition(_id, resId, mode);
+ _stats.recordAcquisition(resId, mode);
+
+ // Give priority to the full modes for global, parallel batch writer mode,
+ // and flush lock so we don't stall global operations such as shutdown or flush.
+ const ResourceType resType = resId.getType();
+ if (resType == RESOURCE_GLOBAL || (IsForMMAPV1 && resId == resourceIdMMAPV1Flush)) {
+ if (mode == MODE_S || mode == MODE_X) {
+ request->enqueueAtFront = true;
+ request->compatibleFirst = true;
+ }
+ } else {
+ // This is all sanity checks that the global and flush locks are always be acquired
+ // before any other lock has been acquired and they must be in sync with the nesting.
+ DEV {
+ const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal);
+ invariant(itGlobal->recursiveCount > 0);
+ invariant(itGlobal->mode != MODE_NONE);
+
+ // Check the MMAP V1 flush lock is held in the appropriate mode
+ invariant(!IsForMMAPV1 ||
+ isLockHeldForMode(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
+ };
}
- template<bool IsForMMAPV1>
- LockResult LockerImpl<IsForMMAPV1>::lockComplete(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs,
- bool checkDeadlock) {
+ // The notification object must be cleared before we invoke the lock manager, because
+ // otherwise we might reset state if the lock becomes granted very fast.
+ _notify.clear();
- // Under MMAP V1 engine a deadlock can occur if a thread goes to sleep waiting on
- // DB lock, while holding the flush lock, so it has to be released. This is only
- // correct to do if not in a write unit of work.
- const bool yieldFlushLock = IsForMMAPV1 &&
- !inAWriteUnitOfWork() &&
- resId.getType() != RESOURCE_GLOBAL &&
- resId != resourceIdMMAPV1Flush;
- if (yieldFlushLock) {
- invariant(unlock(resourceIdMMAPV1Flush));
- }
+ LockResult result = isNew ? globalLockManager.lock(resId, request, mode)
+ : globalLockManager.convert(resId, request, mode);
- LockResult result;
+ if (result == LOCK_WAITING) {
+ // Start counting the wait time so that lockComplete can update that metric
+ _requestStartTime = curTimeMicros64();
+ globalStats.recordWait(_id, resId, mode);
+ _stats.recordWait(resId, mode);
+ }
- // Don't go sleeping without bound in order to be able to report long waits or wake up for
- // deadlock detection.
- unsigned waitTimeMs = std::min(timeoutMs, DeadlockTimeoutMs);
- while (true) {
- // It is OK if this call wakes up spuriously, because we re-evaluate the remaining
- // wait time anyways.
- result = _notify.wait(waitTimeMs);
+ return result;
+}
- // Account for the time spent waiting on the notification object
- const uint64_t elapsedTimeMicros = curTimeMicros64() - _requestStartTime;
- globalStats.recordWaitTime(_id, resId, mode, elapsedTimeMicros);
- _stats.recordWaitTime(resId, mode, elapsedTimeMicros);
+template <bool IsForMMAPV1>
+LockResult LockerImpl<IsForMMAPV1>::lockComplete(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs,
+ bool checkDeadlock) {
+ // Under MMAP V1 engine a deadlock can occur if a thread goes to sleep waiting on
+ // DB lock, while holding the flush lock, so it has to be released. This is only
+ // correct to do if not in a write unit of work.
+ const bool yieldFlushLock = IsForMMAPV1 && !inAWriteUnitOfWork() &&
+ resId.getType() != RESOURCE_GLOBAL && resId != resourceIdMMAPV1Flush;
+ if (yieldFlushLock) {
+ invariant(unlock(resourceIdMMAPV1Flush));
+ }
- if (result == LOCK_OK) break;
+ LockResult result;
- if (checkDeadlock) {
- DeadlockDetector wfg(globalLockManager, this);
- if (wfg.check().hasCycle()) {
- warning() << "Deadlock found: " << wfg.toString();
+ // Don't go sleeping without bound in order to be able to report long waits or wake up for
+ // deadlock detection.
+ unsigned waitTimeMs = std::min(timeoutMs, DeadlockTimeoutMs);
+ while (true) {
+ // It is OK if this call wakes up spuriously, because we re-evaluate the remaining
+ // wait time anyways.
+ result = _notify.wait(waitTimeMs);
- globalStats.recordDeadlock(resId, mode);
- _stats.recordDeadlock(resId, mode);
+ // Account for the time spent waiting on the notification object
+ const uint64_t elapsedTimeMicros = curTimeMicros64() - _requestStartTime;
+ globalStats.recordWaitTime(_id, resId, mode, elapsedTimeMicros);
+ _stats.recordWaitTime(resId, mode, elapsedTimeMicros);
- result = LOCK_DEADLOCK;
- break;
- }
- }
+ if (result == LOCK_OK)
+ break;
- // If infinite timeout was requested, just keep waiting
- if (timeoutMs == UINT_MAX) {
- continue;
- }
+ if (checkDeadlock) {
+ DeadlockDetector wfg(globalLockManager, this);
+ if (wfg.check().hasCycle()) {
+ warning() << "Deadlock found: " << wfg.toString();
- const unsigned elapsedTimeMs = elapsedTimeMicros / 1000;
- waitTimeMs = (elapsedTimeMs < timeoutMs) ?
- std::min(timeoutMs - elapsedTimeMs, DeadlockTimeoutMs) : 0;
+ globalStats.recordDeadlock(resId, mode);
+ _stats.recordDeadlock(resId, mode);
- if (waitTimeMs == 0) {
+ result = LOCK_DEADLOCK;
break;
}
}
- // Cleanup the state, since this is an unused lock now
- if (result != LOCK_OK) {
- LockRequestsMap::Iterator it = _requests.find(resId);
- if (globalLockManager.unlock(it.objAddr())) {
- scoped_spinlock scopedLock(_lock);
- it.remove();
- }
- }
-
- if (yieldFlushLock) {
- // We cannot obey the timeout here, because it is not correct to return from the lock
- // request with the flush lock released.
- invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
+ // If infinite timeout was requested, just keep waiting
+ if (timeoutMs == UINT_MAX) {
+ continue;
}
- return result;
- }
+ const unsigned elapsedTimeMs = elapsedTimeMicros / 1000;
+ waitTimeMs = (elapsedTimeMs < timeoutMs)
+ ? std::min(timeoutMs - elapsedTimeMs, DeadlockTimeoutMs)
+ : 0;
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::_unlockImpl(LockRequestsMap::Iterator& it) {
- if (inAWriteUnitOfWork() && shouldDelayUnlock(it.key(), it->mode)) {
- _resourcesToUnlockAtEndOfUnitOfWork.push(it.key());
- return false;
+ if (waitTimeMs == 0) {
+ break;
}
+ }
+ // Cleanup the state, since this is an unused lock now
+ if (result != LOCK_OK) {
+ LockRequestsMap::Iterator it = _requests.find(resId);
if (globalLockManager.unlock(it.objAddr())) {
scoped_spinlock scopedLock(_lock);
it.remove();
-
- return true;
}
+ }
+ if (yieldFlushLock) {
+ // We cannot obey the timeout here, because it is not correct to return from the lock
+ // request with the flush lock released.
+ invariant(LOCK_OK == lock(resourceIdMMAPV1Flush, _getModeForMMAPV1FlushLock()));
+ }
+
+ return result;
+}
+
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::_unlockImpl(LockRequestsMap::Iterator& it) {
+ if (inAWriteUnitOfWork() && shouldDelayUnlock(it.key(), it->mode)) {
+ _resourcesToUnlockAtEndOfUnitOfWork.push(it.key());
return false;
}
- template<bool IsForMMAPV1>
- LockMode LockerImpl<IsForMMAPV1>::_getModeForMMAPV1FlushLock() const {
- invariant(IsForMMAPV1);
+ if (globalLockManager.unlock(it.objAddr())) {
+ scoped_spinlock scopedLock(_lock);
+ it.remove();
+
+ return true;
+ }
- LockMode mode = getLockMode(resourceIdGlobal);
- switch (mode) {
+ return false;
+}
+
+template <bool IsForMMAPV1>
+LockMode LockerImpl<IsForMMAPV1>::_getModeForMMAPV1FlushLock() const {
+ invariant(IsForMMAPV1);
+
+ LockMode mode = getLockMode(resourceIdGlobal);
+ switch (mode) {
case MODE_X:
case MODE_IX:
return MODE_IX;
@@ -771,153 +767,150 @@ namespace {
default:
invariant(false);
return MODE_NONE;
- }
}
+}
- template<bool IsForMMAPV1>
- bool LockerImpl<IsForMMAPV1>::hasStrongLocks() const {
- if (!isLocked()) return false;
-
- stdx::lock_guard<SpinLock> lk(_lock);
- LockRequestsMap::ConstIterator it = _requests.begin();
- while (!it.finished()) {
- if (it->mode == MODE_X || it->mode == MODE_S) {
- return true;
- }
+template <bool IsForMMAPV1>
+bool LockerImpl<IsForMMAPV1>::hasStrongLocks() const {
+ if (!isLocked())
+ return false;
- it.next();
+ stdx::lock_guard<SpinLock> lk(_lock);
+ LockRequestsMap::ConstIterator it = _requests.begin();
+ while (!it.finished()) {
+ if (it->mode == MODE_X || it->mode == MODE_S) {
+ return true;
}
- return false;
+ it.next();
}
+ return false;
+}
- //
- // Auto classes
- //
- AutoYieldFlushLockForMMAPV1Commit::AutoYieldFlushLockForMMAPV1Commit(Locker* locker)
- : _locker(static_cast<MMAPV1LockerImpl*>(locker)) {
+//
+// Auto classes
+//
- // Explicit yielding of the flush lock should happen only at global synchronization points
- // such as database drop. There should not be any active writes at these points.
- invariant(!_locker->inAWriteUnitOfWork());
+AutoYieldFlushLockForMMAPV1Commit::AutoYieldFlushLockForMMAPV1Commit(Locker* locker)
+ : _locker(static_cast<MMAPV1LockerImpl*>(locker)) {
+ // Explicit yielding of the flush lock should happen only at global synchronization points
+ // such as database drop. There should not be any active writes at these points.
+ invariant(!_locker->inAWriteUnitOfWork());
- if (isMMAPV1()) {
- invariant(_locker->unlock(resourceIdMMAPV1Flush));
- }
+ if (isMMAPV1()) {
+ invariant(_locker->unlock(resourceIdMMAPV1Flush));
}
+}
- AutoYieldFlushLockForMMAPV1Commit::~AutoYieldFlushLockForMMAPV1Commit() {
- if (isMMAPV1()) {
- invariant(LOCK_OK == _locker->lock(resourceIdMMAPV1Flush,
- _locker->_getModeForMMAPV1FlushLock()));
- }
+AutoYieldFlushLockForMMAPV1Commit::~AutoYieldFlushLockForMMAPV1Commit() {
+ if (isMMAPV1()) {
+ invariant(LOCK_OK ==
+ _locker->lock(resourceIdMMAPV1Flush, _locker->_getModeForMMAPV1FlushLock()));
}
+}
- AutoAcquireFlushLockForMMAPV1Commit::AutoAcquireFlushLockForMMAPV1Commit(Locker* locker)
- : _locker(locker),
- _released(false) {
-
- // The journal thread acquiring the journal lock in S-mode opens opportunity for deadlock
- // involving operations which do not acquire and release the Oplog collection's X lock
- // inside a WUOW (see SERVER-17416 for the sequence of events), therefore acquire it with
- // check for deadlock and back-off if one is encountered.
- //
- // This exposes theoretical chance that we might starve the journaling system, but given
- // that these deadlocks happen extremely rarely and are usually due to incorrect locking
- // policy, and we have the deadlock counters as part of the locking statistics, this is a
- // reasonable handling.
- //
- // In the worst case, if we are to starve the journaling system, the server will shut down
- // due to too much uncommitted in-memory journal, but won't have corruption.
+AutoAcquireFlushLockForMMAPV1Commit::AutoAcquireFlushLockForMMAPV1Commit(Locker* locker)
+ : _locker(locker), _released(false) {
+ // The journal thread acquiring the journal lock in S-mode opens opportunity for deadlock
+ // involving operations which do not acquire and release the Oplog collection's X lock
+ // inside a WUOW (see SERVER-17416 for the sequence of events), therefore acquire it with
+ // check for deadlock and back-off if one is encountered.
+ //
+ // This exposes theoretical chance that we might starve the journaling system, but given
+ // that these deadlocks happen extremely rarely and are usually due to incorrect locking
+ // policy, and we have the deadlock counters as part of the locking statistics, this is a
+ // reasonable handling.
+ //
+ // In the worst case, if we are to starve the journaling system, the server will shut down
+ // due to too much uncommitted in-memory journal, but won't have corruption.
- while (true) {
- LockResult result = _locker->lock(resourceIdMMAPV1Flush, MODE_S, UINT_MAX, true);
- if (result == LOCK_OK) {
- break;
- }
+ while (true) {
+ LockResult result = _locker->lock(resourceIdMMAPV1Flush, MODE_S, UINT_MAX, true);
+ if (result == LOCK_OK) {
+ break;
+ }
- invariant(result == LOCK_DEADLOCK);
+ invariant(result == LOCK_DEADLOCK);
- warning() << "Delayed journaling in order to avoid deadlock during MMAP V1 journal " <<
- "lock acquisition. See the previous messages for information on the " <<
- "involved threads.";
- }
+ warning() << "Delayed journaling in order to avoid deadlock during MMAP V1 journal "
+ << "lock acquisition. See the previous messages for information on the "
+ << "involved threads.";
}
+}
- void AutoAcquireFlushLockForMMAPV1Commit::upgradeFlushLockToExclusive() {
- // This should not be able to deadlock, since we already hold the S journal lock, which
- // means all writers are kicked out. Readers always yield the journal lock if they block
- // waiting on any other lock.
- invariant(LOCK_OK == _locker->lock(resourceIdMMAPV1Flush, MODE_X, UINT_MAX, false));
+void AutoAcquireFlushLockForMMAPV1Commit::upgradeFlushLockToExclusive() {
+ // This should not be able to deadlock, since we already hold the S journal lock, which
+ // means all writers are kicked out. Readers always yield the journal lock if they block
+ // waiting on any other lock.
+ invariant(LOCK_OK == _locker->lock(resourceIdMMAPV1Flush, MODE_X, UINT_MAX, false));
- // Lock bumps the recursive count. Drop it back down so that the destructor doesn't
- // complain.
- invariant(!_locker->unlock(resourceIdMMAPV1Flush));
- }
+ // Lock bumps the recursive count. Drop it back down so that the destructor doesn't
+ // complain.
+ invariant(!_locker->unlock(resourceIdMMAPV1Flush));
+}
- void AutoAcquireFlushLockForMMAPV1Commit::release() {
- if (!_released) {
- invariant(_locker->unlock(resourceIdMMAPV1Flush));
- _released = true;
- }
+void AutoAcquireFlushLockForMMAPV1Commit::release() {
+ if (!_released) {
+ invariant(_locker->unlock(resourceIdMMAPV1Flush));
+ _released = true;
}
+}
- AutoAcquireFlushLockForMMAPV1Commit::~AutoAcquireFlushLockForMMAPV1Commit() {
- release();
- }
+AutoAcquireFlushLockForMMAPV1Commit::~AutoAcquireFlushLockForMMAPV1Commit() {
+ release();
+}
namespace {
- /**
- * Periodically purges unused lock buckets. The first time the lock is used again after
- * cleanup it needs to be allocated, and similarly, every first use by a client for an intent
- * mode may need to create a partitioned lock head. Cleanup is done roughtly once a minute.
- */
- class UnusedLockCleaner : PeriodicTask {
- public:
- std::string taskName() const {
- return "UnusedLockCleaner";
- }
+/**
+ * Periodically purges unused lock buckets. The first time the lock is used again after
+ * cleanup it needs to be allocated, and similarly, every first use by a client for an intent
+ * mode may need to create a partitioned lock head. Cleanup is done roughtly once a minute.
+ */
+class UnusedLockCleaner : PeriodicTask {
+public:
+ std::string taskName() const {
+ return "UnusedLockCleaner";
+ }
- void taskDoWork() {
- LOG(2) << "cleaning up unused lock buckets of the global lock manager";
- getGlobalLockManager()->cleanupUnusedLocks();
- }
- } unusedLockCleaner;
-} // namespace
+ void taskDoWork() {
+ LOG(2) << "cleaning up unused lock buckets of the global lock manager";
+ getGlobalLockManager()->cleanupUnusedLocks();
+ }
+} unusedLockCleaner;
+} // namespace
- //
- // Standalone functions
- //
+//
+// Standalone functions
+//
- LockManager* getGlobalLockManager() {
- return &globalLockManager;
- }
+LockManager* getGlobalLockManager() {
+ return &globalLockManager;
+}
- void reportGlobalLockingStats(SingleThreadedLockStats* outStats) {
- globalStats.report(outStats);
- }
+void reportGlobalLockingStats(SingleThreadedLockStats* outStats) {
+ globalStats.report(outStats);
+}
+
+void resetGlobalLockStats() {
+ globalStats.reset();
+}
- void resetGlobalLockStats() {
- globalStats.reset();
- }
-
- // Ensures that there are two instances compiled for LockerImpl for the two values of the
- // template argument.
- template class LockerImpl<true>;
- template class LockerImpl<false>;
+// Ensures that there are two instances compiled for LockerImpl for the two values of the
+// template argument.
+template class LockerImpl<true>;
+template class LockerImpl<false>;
- // Definition for the hardcoded localdb and oplog collection info
- const ResourceId resourceIdLocalDB = ResourceId(RESOURCE_DATABASE, StringData("local"));
- const ResourceId resourceIdOplog =
- ResourceId(RESOURCE_COLLECTION, StringData("local.oplog.rs"));
- const ResourceId resourceIdAdminDB = ResourceId(RESOURCE_DATABASE, StringData("admin"));
- const ResourceId resourceIdParallelBatchWriterMode =
- ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_PARALLEL_BATCH_WRITER_MODE);
+// Definition for the hardcoded localdb and oplog collection info
+const ResourceId resourceIdLocalDB = ResourceId(RESOURCE_DATABASE, StringData("local"));
+const ResourceId resourceIdOplog = ResourceId(RESOURCE_COLLECTION, StringData("local.oplog.rs"));
+const ResourceId resourceIdAdminDB = ResourceId(RESOURCE_DATABASE, StringData("admin"));
+const ResourceId resourceIdParallelBatchWriterMode =
+ ResourceId(RESOURCE_GLOBAL, ResourceId::SINGLETON_PARALLEL_BATCH_WRITER_MODE);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h
index ca16767eb49..f8c696b786c 100644
--- a/src/mongo/db/concurrency/lock_state.h
+++ b/src/mongo/db/concurrency/lock_state.h
@@ -36,286 +36,293 @@
namespace mongo {
+/**
+ * Notfication callback, which stores the last notification result and signals a condition
+ * variable, which can be waited on.
+ */
+class CondVarLockGrantNotification : public LockGrantNotification {
+ MONGO_DISALLOW_COPYING(CondVarLockGrantNotification);
+
+public:
+ CondVarLockGrantNotification();
+
+ /**
+ * Clears the object so it can be reused.
+ */
+ void clear();
+
+ /**
+ * Uninterruptible blocking method, which waits for the notification to fire.
+ *
+ * @param timeoutMs How many milliseconds to wait before returning LOCK_TIMEOUT.
+ */
+ LockResult wait(unsigned timeoutMs);
+
+private:
+ virtual void notify(ResourceId resId, LockResult result);
+
+ // These two go together to implement the conditional variable pattern.
+ stdx::mutex _mutex;
+ stdx::condition_variable _cond;
+
+ // Result from the last call to notify
+ LockResult _result;
+};
+
+
+/**
+ * Interface for acquiring locks. One of those objects will have to be instantiated for each
+ * request (transaction).
+ *
+ * Lock/unlock methods must always be called from a single thread.
+ *
+ * All instances reference a single global lock manager.
+ *
+ * @param IsForMMAPV1 Whether to compile-in the flush lock functionality, which is specific to
+ * the way the MMAP V1 (legacy) storag engine does commit concurrency control.
+ */
+template <bool IsForMMAPV1>
+class LockerImpl : public Locker {
+public:
/**
- * Notfication callback, which stores the last notification result and signals a condition
- * variable, which can be waited on.
+ * Instantiates new locker. Must be given a unique identifier for disambiguation. Lockers
+ * having the same identifier will not conflict on lock acquisition.
*/
- class CondVarLockGrantNotification : public LockGrantNotification {
- MONGO_DISALLOW_COPYING(CondVarLockGrantNotification);
- public:
- CondVarLockGrantNotification();
+ LockerImpl();
+
+ virtual ~LockerImpl();
+
+ virtual LockerId getId() const {
+ return _id;
+ }
+
+ virtual LockResult lockGlobal(LockMode mode, unsigned timeoutMs = UINT_MAX);
+ virtual LockResult lockGlobalBegin(LockMode mode);
+ virtual LockResult lockGlobalComplete(unsigned timeoutMs);
+ virtual void lockMMAPV1Flush();
+
+ virtual void downgradeGlobalXtoSForMMAPV1();
+ virtual bool unlockAll();
- /**
- * Clears the object so it can be reused.
- */
- void clear();
+ virtual void beginWriteUnitOfWork();
+ virtual void endWriteUnitOfWork();
- /**
- * Uninterruptible blocking method, which waits for the notification to fire.
- *
- * @param timeoutMs How many milliseconds to wait before returning LOCK_TIMEOUT.
- */
- LockResult wait(unsigned timeoutMs);
+ virtual bool inAWriteUnitOfWork() const {
+ return _wuowNestingLevel > 0;
+ }
- private:
+ virtual LockResult lock(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs = UINT_MAX,
+ bool checkDeadlock = false);
- virtual void notify(ResourceId resId, LockResult result);
+ virtual void downgrade(ResourceId resId, LockMode newMode);
- // These two go together to implement the conditional variable pattern.
- stdx::mutex _mutex;
- stdx::condition_variable _cond;
+ virtual bool unlock(ResourceId resId);
- // Result from the last call to notify
- LockResult _result;
- };
+ virtual LockMode getLockMode(ResourceId resId) const;
+ virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const;
+ virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const;
+ virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const;
+ virtual ResourceId getWaitingResource() const;
+
+ virtual void getLockerInfo(LockerInfo* lockerInfo) const;
+
+ virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut);
+
+ virtual void restoreLockState(const LockSnapshot& stateToRestore);
/**
- * Interface for acquiring locks. One of those objects will have to be instantiated for each
- * request (transaction).
+ * Allows for lock requests to be requested in a non-blocking way. There can be only one
+ * outstanding pending lock request per locker object.
+ *
+ * lockBegin posts a request to the lock manager for the specified lock to be acquired,
+ * which either immediately grants the lock, or puts the requestor on the conflict queue
+ * and returns immediately with the result of the acquisition. The result can be one of:
*
- * Lock/unlock methods must always be called from a single thread.
+ * LOCK_OK - Nothing more needs to be done. The lock is granted.
+ * LOCK_WAITING - The request has been queued up and will be granted as soon as the lock
+ * is free. If this result is returned, typically lockComplete needs to be called in
+ * order to wait for the actual grant to occur. If the caller no longer needs to wait
+ * for the grant to happen, unlock needs to be called with the same resource passed
+ * to lockBegin.
*
- * All instances reference a single global lock manager.
+ * In other words for each call to lockBegin, which does not return LOCK_OK, there needs to
+ * be a corresponding call to either lockComplete or unlock.
*
- * @param IsForMMAPV1 Whether to compile-in the flush lock functionality, which is specific to
- * the way the MMAP V1 (legacy) storag engine does commit concurrency control.
+ * NOTE: These methods are not public and should only be used inside the class
+ * implementation and for unit-tests and not called directly.
*/
- template<bool IsForMMAPV1>
- class LockerImpl : public Locker {
- public:
-
- /**
- * Instantiates new locker. Must be given a unique identifier for disambiguation. Lockers
- * having the same identifier will not conflict on lock acquisition.
- */
- LockerImpl();
-
- virtual ~LockerImpl();
-
- virtual LockerId getId() const { return _id; }
-
- virtual LockResult lockGlobal(LockMode mode, unsigned timeoutMs = UINT_MAX);
- virtual LockResult lockGlobalBegin(LockMode mode);
- virtual LockResult lockGlobalComplete(unsigned timeoutMs);
- virtual void lockMMAPV1Flush();
-
- virtual void downgradeGlobalXtoSForMMAPV1();
- virtual bool unlockAll();
-
- virtual void beginWriteUnitOfWork();
- virtual void endWriteUnitOfWork();
-
- virtual bool inAWriteUnitOfWork() const { return _wuowNestingLevel > 0; }
-
- virtual LockResult lock(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs = UINT_MAX,
- bool checkDeadlock = false);
-
- virtual void downgrade(ResourceId resId, LockMode newMode);
-
- virtual bool unlock(ResourceId resId);
-
- virtual LockMode getLockMode(ResourceId resId) const;
- virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const;
- virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const;
- virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const;
-
- virtual ResourceId getWaitingResource() const;
-
- virtual void getLockerInfo(LockerInfo* lockerInfo) const;
-
- virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut);
-
- virtual void restoreLockState(const LockSnapshot& stateToRestore);
-
- /**
- * Allows for lock requests to be requested in a non-blocking way. There can be only one
- * outstanding pending lock request per locker object.
- *
- * lockBegin posts a request to the lock manager for the specified lock to be acquired,
- * which either immediately grants the lock, or puts the requestor on the conflict queue
- * and returns immediately with the result of the acquisition. The result can be one of:
- *
- * LOCK_OK - Nothing more needs to be done. The lock is granted.
- * LOCK_WAITING - The request has been queued up and will be granted as soon as the lock
- * is free. If this result is returned, typically lockComplete needs to be called in
- * order to wait for the actual grant to occur. If the caller no longer needs to wait
- * for the grant to happen, unlock needs to be called with the same resource passed
- * to lockBegin.
- *
- * In other words for each call to lockBegin, which does not return LOCK_OK, there needs to
- * be a corresponding call to either lockComplete or unlock.
- *
- * NOTE: These methods are not public and should only be used inside the class
- * implementation and for unit-tests and not called directly.
- */
- LockResult lockBegin(ResourceId resId, LockMode mode);
+ LockResult lockBegin(ResourceId resId, LockMode mode);
- /**
- * Waits for the completion of a lock, previously requested through lockBegin or
- * lockGlobalBegin. Must only be called, if lockBegin returned LOCK_WAITING.
- *
- * @param resId Resource id which was passed to an earlier lockBegin call. Must match.
- * @param mode Mode which was passed to an earlier lockBegin call. Must match.
- * @param timeoutMs How long to wait for the lock acquisition to complete.
- * @param checkDeadlock whether to perform deadlock detection while waiting.
- */
- LockResult lockComplete(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs,
- bool checkDeadlock);
-
- private:
+ /**
+ * Waits for the completion of a lock, previously requested through lockBegin or
+ * lockGlobalBegin. Must only be called, if lockBegin returned LOCK_WAITING.
+ *
+ * @param resId Resource id which was passed to an earlier lockBegin call. Must match.
+ * @param mode Mode which was passed to an earlier lockBegin call. Must match.
+ * @param timeoutMs How long to wait for the lock acquisition to complete.
+ * @param checkDeadlock whether to perform deadlock detection while waiting.
+ */
+ LockResult lockComplete(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs,
+ bool checkDeadlock);
- friend class AutoYieldFlushLockForMMAPV1Commit;
+private:
+ friend class AutoYieldFlushLockForMMAPV1Commit;
- typedef FastMapNoAlloc<ResourceId, LockRequest, 16> LockRequestsMap;
+ typedef FastMapNoAlloc<ResourceId, LockRequest, 16> LockRequestsMap;
- /**
- * The main functionality of the unlock method, except accepts iterator in order to avoid
- * additional lookups during unlockAll.
- */
- bool _unlockImpl(LockRequestsMap::Iterator& it);
+ /**
+ * The main functionality of the unlock method, except accepts iterator in order to avoid
+ * additional lookups during unlockAll.
+ */
+ bool _unlockImpl(LockRequestsMap::Iterator& it);
- /**
- * MMAP V1 locking code yields and re-acquires the flush lock occasionally in order to
- * allow the flush thread proceed. This call returns in what mode the flush lock should be
- * acquired. It is based on the type of the operation (IS for readers, IX for writers).
- */
- LockMode _getModeForMMAPV1FlushLock() const;
+ /**
+ * MMAP V1 locking code yields and re-acquires the flush lock occasionally in order to
+ * allow the flush thread proceed. This call returns in what mode the flush lock should be
+ * acquired. It is based on the type of the operation (IS for readers, IX for writers).
+ */
+ LockMode _getModeForMMAPV1FlushLock() const;
- // Used to disambiguate different lockers
- const LockerId _id;
+ // Used to disambiguate different lockers
+ const LockerId _id;
- // The only reason we have this spin lock here is for the diagnostic tools, which could
- // iterate through the LockRequestsMap on a separate thread and need it to be stable.
- // Apart from that, all accesses to the LockerImpl are always from a single thread.
- //
- // This has to be locked inside const methods, hence the mutable.
- mutable SpinLock _lock;
- LockRequestsMap _requests;
+ // The only reason we have this spin lock here is for the diagnostic tools, which could
+ // iterate through the LockRequestsMap on a separate thread and need it to be stable.
+ // Apart from that, all accesses to the LockerImpl are always from a single thread.
+ //
+ // This has to be locked inside const methods, hence the mutable.
+ mutable SpinLock _lock;
+ LockRequestsMap _requests;
- // Reuse the notification object across requests so we don't have to create a new mutex
- // and condition variable every time.
- CondVarLockGrantNotification _notify;
+ // Reuse the notification object across requests so we don't have to create a new mutex
+ // and condition variable every time.
+ CondVarLockGrantNotification _notify;
- // Timer for measuring duration and timeouts. This value is set when lock acquisition is
- // about to wait and is sampled at grant time.
- uint64_t _requestStartTime;
+ // Timer for measuring duration and timeouts. This value is set when lock acquisition is
+ // about to wait and is sampled at grant time.
+ uint64_t _requestStartTime;
- // Per-locker locking statistics. Reported in the slow-query log message and through
- // db.currentOp. Complementary to the per-instance locking statistics.
- SingleThreadedLockStats _stats;
+ // Per-locker locking statistics. Reported in the slow-query log message and through
+ // db.currentOp. Complementary to the per-instance locking statistics.
+ SingleThreadedLockStats _stats;
- // Delays release of exclusive/intent-exclusive locked resources until the write unit of
- // work completes. Value of 0 means we are not inside a write unit of work.
- int _wuowNestingLevel;
- std::queue<ResourceId> _resourcesToUnlockAtEndOfUnitOfWork;
+ // Delays release of exclusive/intent-exclusive locked resources until the write unit of
+ // work completes. Value of 0 means we are not inside a write unit of work.
+ int _wuowNestingLevel;
+ std::queue<ResourceId> _resourcesToUnlockAtEndOfUnitOfWork;
- //////////////////////////////////////////////////////////////////////////////////////////
- //
- // Methods merged from LockState, which should eventually be removed or changed to methods
- // on the LockerImpl interface.
- //
+ //////////////////////////////////////////////////////////////////////////////////////////
+ //
+ // Methods merged from LockState, which should eventually be removed or changed to methods
+ // on the LockerImpl interface.
+ //
- public:
+public:
+ virtual void dump() const;
- virtual void dump() const;
-
- virtual bool isW() const;
- virtual bool isR() const;
-
- virtual bool isLocked() const;
- virtual bool isWriteLocked() const;
- virtual bool isReadLocked() const;
-
- virtual void assertEmptyAndReset();
-
- virtual bool hasLockPending() const { return getWaitingResource().isValid(); }
+ virtual bool isW() const;
+ virtual bool isR() const;
- virtual void setIsBatchWriter(bool newValue) { _batchWriter = newValue; }
- virtual bool isBatchWriter() const { return _batchWriter; }
+ virtual bool isLocked() const;
+ virtual bool isWriteLocked() const;
+ virtual bool isReadLocked() const;
- virtual bool hasStrongLocks() const;
+ virtual void assertEmptyAndReset();
- private:
- bool _batchWriter;
- };
+ virtual bool hasLockPending() const {
+ return getWaitingResource().isValid();
+ }
- typedef LockerImpl<false> DefaultLockerImpl;
- typedef LockerImpl<true> MMAPV1LockerImpl;
+ virtual void setIsBatchWriter(bool newValue) {
+ _batchWriter = newValue;
+ }
+ virtual bool isBatchWriter() const {
+ return _batchWriter;
+ }
+ virtual bool hasStrongLocks() const;
- /**
- * At global synchronization points, such as drop database we are running under a global
- * exclusive lock and without an active write unit of work, doing changes which require global
- * commit. This utility allows the flush lock to be temporarily dropped so the flush thread
- * could run in such circumstances. Should not be used where write units of work are used,
- * because these have different mechanism of yielding the flush lock.
- */
- class AutoYieldFlushLockForMMAPV1Commit {
- public:
- AutoYieldFlushLockForMMAPV1Commit(Locker* locker);
- ~AutoYieldFlushLockForMMAPV1Commit();
+private:
+ bool _batchWriter;
+};
- private:
- MMAPV1LockerImpl* const _locker;
- };
+typedef LockerImpl<false> DefaultLockerImpl;
+typedef LockerImpl<true> MMAPV1LockerImpl;
- /**
- * This explains how the MMAP V1 durability system is implemented.
- *
- * Every server operation (OperationContext), must call Locker::lockGlobal as the first lock
- * action (it is illegal to acquire any other locks without calling this first). This action
- * acquires the global and flush locks in the appropriate modes (IS for read operations, IX
- * for write operations). Having the flush lock in one of these modes indicates to the flush
- * thread that there is an active reader or writer.
- *
- * Whenever the flush thread(dur.cpp) activates, it goes through the following steps :
- *
- * Acquire the flush lock in S mode using AutoAcquireFlushLockForMMAPV1Commit. This waits until
- * all current write activity on the system completes and does not allow any new operations to
- * start.
- *
- * Once the S lock is granted, the flush thread writes the journal entries to disk (it is
- * guaranteed that there will not be any modifications) and applies them to the shared view.
- *
- * After that, it upgrades the S lock to X and remaps the private view.
- *
- * NOTE: There should be only one usage of this class and this should be in dur.cpp
- */
- class AutoAcquireFlushLockForMMAPV1Commit {
- public:
- AutoAcquireFlushLockForMMAPV1Commit(Locker* locker);
- ~AutoAcquireFlushLockForMMAPV1Commit();
+/**
+ * At global synchronization points, such as drop database we are running under a global
+ * exclusive lock and without an active write unit of work, doing changes which require global
+ * commit. This utility allows the flush lock to be temporarily dropped so the flush thread
+ * could run in such circumstances. Should not be used where write units of work are used,
+ * because these have different mechanism of yielding the flush lock.
+ */
+class AutoYieldFlushLockForMMAPV1Commit {
+public:
+ AutoYieldFlushLockForMMAPV1Commit(Locker* locker);
+ ~AutoYieldFlushLockForMMAPV1Commit();
- /**
- * We need the exclusive lock in order to do the shared view remap.
- */
- void upgradeFlushLockToExclusive();
+private:
+ MMAPV1LockerImpl* const _locker;
+};
- /**
- * Allows the acquired flush lock to be prematurely released. This is helpful for the case
- * where we know that we won't be doing a remap after gathering the write intents, so the
- * rest can be done outside of flush lock.
- */
- void release();
- private:
- Locker* const _locker;
- bool _released;
- };
+/**
+ * This explains how the MMAP V1 durability system is implemented.
+ *
+ * Every server operation (OperationContext), must call Locker::lockGlobal as the first lock
+ * action (it is illegal to acquire any other locks without calling this first). This action
+ * acquires the global and flush locks in the appropriate modes (IS for read operations, IX
+ * for write operations). Having the flush lock in one of these modes indicates to the flush
+ * thread that there is an active reader or writer.
+ *
+ * Whenever the flush thread(dur.cpp) activates, it goes through the following steps :
+ *
+ * Acquire the flush lock in S mode using AutoAcquireFlushLockForMMAPV1Commit. This waits until
+ * all current write activity on the system completes and does not allow any new operations to
+ * start.
+ *
+ * Once the S lock is granted, the flush thread writes the journal entries to disk (it is
+ * guaranteed that there will not be any modifications) and applies them to the shared view.
+ *
+ * After that, it upgrades the S lock to X and remaps the private view.
+ *
+ * NOTE: There should be only one usage of this class and this should be in dur.cpp
+ */
+class AutoAcquireFlushLockForMMAPV1Commit {
+public:
+ AutoAcquireFlushLockForMMAPV1Commit(Locker* locker);
+ ~AutoAcquireFlushLockForMMAPV1Commit();
+ /**
+ * We need the exclusive lock in order to do the shared view remap.
+ */
+ void upgradeFlushLockToExclusive();
/**
- * Retrieves the global lock manager instance.
+ * Allows the acquired flush lock to be prematurely released. This is helpful for the case
+ * where we know that we won't be doing a remap after gathering the write intents, so the
+ * rest can be done outside of flush lock.
*/
- LockManager* getGlobalLockManager();
+ void release();
+
+private:
+ Locker* const _locker;
+ bool _released;
+};
+
+
+/**
+ * Retrieves the global lock manager instance.
+ */
+LockManager* getGlobalLockManager();
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp
index 54f305f20df..cb3d87aa1ad 100644
--- a/src/mongo/db/concurrency/lock_state_test.cpp
+++ b/src/mongo/db/concurrency/lock_state_test.cpp
@@ -40,280 +40,278 @@
namespace mongo {
namespace {
- const int NUM_PERF_ITERS = 1000*1000; // numeber of iterations to use for lock perf
+const int NUM_PERF_ITERS = 1000 * 1000; // numeber of iterations to use for lock perf
}
- TEST(LockerImpl, LockNoConflict) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+TEST(LockerImpl, LockNoConflict) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
- locker.lockGlobal(MODE_IX);
+ MMAPV1LockerImpl locker;
+ locker.lockGlobal(MODE_IX);
- ASSERT(LOCK_OK == locker.lock(resId, MODE_X));
+ ASSERT(LOCK_OK == locker.lock(resId, MODE_X));
- ASSERT(locker.isLockHeldForMode(resId, MODE_X));
- ASSERT(locker.isLockHeldForMode(resId, MODE_S));
+ ASSERT(locker.isLockHeldForMode(resId, MODE_X));
+ ASSERT(locker.isLockHeldForMode(resId, MODE_S));
- ASSERT(locker.unlock(resId));
+ ASSERT(locker.unlock(resId));
- ASSERT(locker.isLockHeldForMode(resId, MODE_NONE));
+ ASSERT(locker.isLockHeldForMode(resId, MODE_NONE));
- locker.unlockAll();
- }
+ locker.unlockAll();
+}
- TEST(LockerImpl, ReLockNoConflict) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+TEST(LockerImpl, ReLockNoConflict) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- MMAPV1LockerImpl locker;
- locker.lockGlobal(MODE_IX);
+ MMAPV1LockerImpl locker;
+ locker.lockGlobal(MODE_IX);
- ASSERT(LOCK_OK == locker.lock(resId, MODE_S));
- ASSERT(LOCK_OK == locker.lock(resId, MODE_X));
+ ASSERT(LOCK_OK == locker.lock(resId, MODE_S));
+ ASSERT(LOCK_OK == locker.lock(resId, MODE_X));
- ASSERT(!locker.unlock(resId));
- ASSERT(locker.isLockHeldForMode(resId, MODE_X));
+ ASSERT(!locker.unlock(resId));
+ ASSERT(locker.isLockHeldForMode(resId, MODE_X));
- ASSERT(locker.unlock(resId));
- ASSERT(locker.isLockHeldForMode(resId, MODE_NONE));
+ ASSERT(locker.unlock(resId));
+ ASSERT(locker.isLockHeldForMode(resId, MODE_NONE));
- ASSERT(locker.unlockAll());
- }
+ ASSERT(locker.unlockAll());
+}
- TEST(LockerImpl, ConflictWithTimeout) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+TEST(LockerImpl, ConflictWithTimeout) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- DefaultLockerImpl locker1;
- ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IX));
- ASSERT(LOCK_OK == locker1.lock(resId, MODE_X));
+ DefaultLockerImpl locker1;
+ ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IX));
+ ASSERT(LOCK_OK == locker1.lock(resId, MODE_X));
- DefaultLockerImpl locker2;
- ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IX));
- ASSERT(LOCK_TIMEOUT == locker2.lock(resId, MODE_S, 0));
+ DefaultLockerImpl locker2;
+ ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IX));
+ ASSERT(LOCK_TIMEOUT == locker2.lock(resId, MODE_S, 0));
- ASSERT(locker2.getLockMode(resId) == MODE_NONE);
+ ASSERT(locker2.getLockMode(resId) == MODE_NONE);
- ASSERT(locker1.unlock(resId));
+ ASSERT(locker1.unlock(resId));
- ASSERT(locker1.unlockAll());
- ASSERT(locker2.unlockAll());
- }
+ ASSERT(locker1.unlockAll());
+ ASSERT(locker2.unlockAll());
+}
- TEST(LockerImpl, ConflictUpgradeWithTimeout) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+TEST(LockerImpl, ConflictUpgradeWithTimeout) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- DefaultLockerImpl locker1;
- ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IS));
- ASSERT(LOCK_OK == locker1.lock(resId, MODE_S));
+ DefaultLockerImpl locker1;
+ ASSERT(LOCK_OK == locker1.lockGlobal(MODE_IS));
+ ASSERT(LOCK_OK == locker1.lock(resId, MODE_S));
- DefaultLockerImpl locker2;
- ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IS));
- ASSERT(LOCK_OK == locker2.lock(resId, MODE_S));
+ DefaultLockerImpl locker2;
+ ASSERT(LOCK_OK == locker2.lockGlobal(MODE_IS));
+ ASSERT(LOCK_OK == locker2.lock(resId, MODE_S));
- // Try upgrading locker 1, which should block and timeout
- ASSERT(LOCK_TIMEOUT == locker1.lock(resId, MODE_X, 1));
+ // Try upgrading locker 1, which should block and timeout
+ ASSERT(LOCK_TIMEOUT == locker1.lock(resId, MODE_X, 1));
- locker1.unlockAll();
- locker2.unlockAll();
- }
+ locker1.unlockAll();
+ locker2.unlockAll();
+}
- TEST(LockerImpl, ReadTransaction) {
- DefaultLockerImpl locker;
+TEST(LockerImpl, ReadTransaction) {
+ DefaultLockerImpl locker;
- locker.lockGlobal(MODE_IS);
- locker.unlockAll();
+ locker.lockGlobal(MODE_IS);
+ locker.unlockAll();
- locker.lockGlobal(MODE_IX);
- locker.unlockAll();
+ locker.lockGlobal(MODE_IX);
+ locker.unlockAll();
- locker.lockGlobal(MODE_IX);
- locker.lockGlobal(MODE_IS);
- locker.unlockAll();
- locker.unlockAll();
- }
+ locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(MODE_IS);
+ locker.unlockAll();
+ locker.unlockAll();
+}
- /**
- * Test that saveMMAPV1LockerImpl works by examining the output.
- */
- TEST(LockerImpl, saveAndRestoreGlobal) {
- Locker::LockSnapshot lockInfo;
+/**
+ * Test that saveMMAPV1LockerImpl works by examining the output.
+ */
+TEST(LockerImpl, saveAndRestoreGlobal) {
+ Locker::LockSnapshot lockInfo;
- DefaultLockerImpl locker;
+ DefaultLockerImpl locker;
- // No lock requests made, no locks held.
- locker.saveLockStateAndUnlock(&lockInfo);
- ASSERT_EQUALS(0U, lockInfo.locks.size());
+ // No lock requests made, no locks held.
+ locker.saveLockStateAndUnlock(&lockInfo);
+ ASSERT_EQUALS(0U, lockInfo.locks.size());
- // Lock the global lock, but just once.
- locker.lockGlobal(MODE_IX);
+ // Lock the global lock, but just once.
+ locker.lockGlobal(MODE_IX);
- // We've locked the global lock. This should be reflected in the lockInfo.
- locker.saveLockStateAndUnlock(&lockInfo);
- ASSERT(!locker.isLocked());
- ASSERT_EQUALS(MODE_IX, lockInfo.globalMode);
+ // We've locked the global lock. This should be reflected in the lockInfo.
+ locker.saveLockStateAndUnlock(&lockInfo);
+ ASSERT(!locker.isLocked());
+ ASSERT_EQUALS(MODE_IX, lockInfo.globalMode);
- // Restore the lock(s) we had.
- locker.restoreLockState(lockInfo);
+ // Restore the lock(s) we had.
+ locker.restoreLockState(lockInfo);
- ASSERT(locker.isLocked());
- ASSERT(locker.unlockAll());
- }
+ ASSERT(locker.isLocked());
+ ASSERT(locker.unlockAll());
+}
- /**
- * Test that we don't unlock when we have the global lock more than once.
- */
- TEST(LockerImpl, saveAndRestoreGlobalAcquiredTwice) {
- Locker::LockSnapshot lockInfo;
+/**
+ * Test that we don't unlock when we have the global lock more than once.
+ */
+TEST(LockerImpl, saveAndRestoreGlobalAcquiredTwice) {
+ Locker::LockSnapshot lockInfo;
- DefaultLockerImpl locker;
+ DefaultLockerImpl locker;
- // No lock requests made, no locks held.
- locker.saveLockStateAndUnlock(&lockInfo);
- ASSERT_EQUALS(0U, lockInfo.locks.size());
+ // No lock requests made, no locks held.
+ locker.saveLockStateAndUnlock(&lockInfo);
+ ASSERT_EQUALS(0U, lockInfo.locks.size());
- // Lock the global lock.
- locker.lockGlobal(MODE_IX);
- locker.lockGlobal(MODE_IX);
+ // Lock the global lock.
+ locker.lockGlobal(MODE_IX);
+ locker.lockGlobal(MODE_IX);
- // This shouldn't actually unlock as we're in a nested scope.
- ASSERT(!locker.saveLockStateAndUnlock(&lockInfo));
+ // This shouldn't actually unlock as we're in a nested scope.
+ ASSERT(!locker.saveLockStateAndUnlock(&lockInfo));
- ASSERT(locker.isLocked());
+ ASSERT(locker.isLocked());
- // We must unlockAll twice.
- ASSERT(!locker.unlockAll());
- ASSERT(locker.unlockAll());
- }
+ // We must unlockAll twice.
+ ASSERT(!locker.unlockAll());
+ ASSERT(locker.unlockAll());
+}
- /**
- * Tests that restoreMMAPV1LockerImpl works by locking a db and collection and saving + restoring.
- */
- TEST(LockerImpl, saveAndRestoreDBAndCollection) {
- Locker::LockSnapshot lockInfo;
+/**
+ * Tests that restoreMMAPV1LockerImpl works by locking a db and collection and saving + restoring.
+ */
+TEST(LockerImpl, saveAndRestoreDBAndCollection) {
+ Locker::LockSnapshot lockInfo;
- DefaultLockerImpl locker;
+ DefaultLockerImpl locker;
- const ResourceId resIdDatabase(RESOURCE_DATABASE, std::string("TestDB"));
- const ResourceId resIdCollection(RESOURCE_COLLECTION, std::string("TestDB.collection"));
+ const ResourceId resIdDatabase(RESOURCE_DATABASE, std::string("TestDB"));
+ const ResourceId resIdCollection(RESOURCE_COLLECTION, std::string("TestDB.collection"));
- // Lock some stuff.
- locker.lockGlobal(MODE_IX);
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdDatabase, MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker.lock(resIdCollection, MODE_X));
- locker.saveLockStateAndUnlock(&lockInfo);
+ // Lock some stuff.
+ locker.lockGlobal(MODE_IX);
+ ASSERT_EQUALS(LOCK_OK, locker.lock(resIdDatabase, MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, locker.lock(resIdCollection, MODE_X));
+ locker.saveLockStateAndUnlock(&lockInfo);
- // Things shouldn't be locked anymore.
- ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase));
- ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection));
+ // Things shouldn't be locked anymore.
+ ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase));
+ ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection));
- // Restore lock state.
- locker.restoreLockState(lockInfo);
+ // Restore lock state.
+ locker.restoreLockState(lockInfo);
- // Make sure things were re-locked.
- ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase));
- ASSERT_EQUALS(MODE_X, locker.getLockMode(resIdCollection));
+ // Make sure things were re-locked.
+ ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase));
+ ASSERT_EQUALS(MODE_X, locker.getLockMode(resIdCollection));
- ASSERT(locker.unlockAll());
- }
+ ASSERT(locker.unlockAll());
+}
- TEST(LockerImpl, DefaultLocker) {
- const ResourceId resId(RESOURCE_DATABASE, std::string("TestDB"));
+TEST(LockerImpl, DefaultLocker) {
+ const ResourceId resId(RESOURCE_DATABASE, std::string("TestDB"));
- DefaultLockerImpl locker;
- ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker.lock(resId, MODE_X));
-
- // Make sure the flush lock IS NOT held
- Locker::LockerInfo info;
- locker.getLockerInfo(&info);
- ASSERT(!info.waitingResource.isValid());
- ASSERT_EQUALS(2U, info.locks.size());
- ASSERT_EQUALS(RESOURCE_GLOBAL, info.locks[0].resourceId.getType());
- ASSERT_EQUALS(resId, info.locks[1].resourceId);
-
- ASSERT(locker.unlockAll());
- }
+ DefaultLockerImpl locker;
+ ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, locker.lock(resId, MODE_X));
- TEST(LockerImpl, MMAPV1Locker) {
- const ResourceId resId(RESOURCE_DATABASE, std::string("TestDB"));
+ // Make sure the flush lock IS NOT held
+ Locker::LockerInfo info;
+ locker.getLockerInfo(&info);
+ ASSERT(!info.waitingResource.isValid());
+ ASSERT_EQUALS(2U, info.locks.size());
+ ASSERT_EQUALS(RESOURCE_GLOBAL, info.locks[0].resourceId.getType());
+ ASSERT_EQUALS(resId, info.locks[1].resourceId);
- MMAPV1LockerImpl locker;
- ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
- ASSERT_EQUALS(LOCK_OK, locker.lock(resId, MODE_X));
+ ASSERT(locker.unlockAll());
+}
- // Make sure the flush lock IS held
- Locker::LockerInfo info;
- locker.getLockerInfo(&info);
- ASSERT(!info.waitingResource.isValid());
- ASSERT_EQUALS(3U, info.locks.size());
- ASSERT_EQUALS(RESOURCE_GLOBAL, info.locks[0].resourceId.getType());
- ASSERT_EQUALS(RESOURCE_MMAPV1_FLUSH, info.locks[1].resourceId.getType());
- ASSERT_EQUALS(resId, info.locks[2].resourceId);
+TEST(LockerImpl, MMAPV1Locker) {
+ const ResourceId resId(RESOURCE_DATABASE, std::string("TestDB"));
- ASSERT(locker.unlockAll());
- }
+ MMAPV1LockerImpl locker;
+ ASSERT_EQUALS(LOCK_OK, locker.lockGlobal(MODE_IX));
+ ASSERT_EQUALS(LOCK_OK, locker.lock(resId, MODE_X));
+ // Make sure the flush lock IS held
+ Locker::LockerInfo info;
+ locker.getLockerInfo(&info);
+ ASSERT(!info.waitingResource.isValid());
+ ASSERT_EQUALS(3U, info.locks.size());
+ ASSERT_EQUALS(RESOURCE_GLOBAL, info.locks[0].resourceId.getType());
+ ASSERT_EQUALS(RESOURCE_MMAPV1_FLUSH, info.locks[1].resourceId.getType());
+ ASSERT_EQUALS(resId, info.locks[2].resourceId);
- // These two tests exercise single-threaded performance of uncontended lock acquisition. It
- // is not practical to run them on debug builds.
+ ASSERT(locker.unlockAll());
+}
+
+
+// These two tests exercise single-threaded performance of uncontended lock acquisition. It
+// is not practical to run them on debug builds.
#ifndef MONGO_CONFIG_DEBUG_BUILD
- TEST(Locker, PerformanceBoostSharedMutex) {
- for (int numLockers = 1; numLockers <= 64; numLockers = numLockers * 2) {
- stdx::mutex mtx;
-
- // Do some warm-up loops
- for (int i = 0; i < 1000; i++) {
- mtx.lock();
- mtx.unlock();
- }
-
- // Measure the number of loops
- //
- Timer t;
-
- for (int i = 0; i < NUM_PERF_ITERS; i++) {
- mtx.lock();
- mtx.unlock();
- }
-
- log() << numLockers
- << " locks took: "
- << static_cast<double>(t.micros()) * 1000.0 / static_cast<double>(NUM_PERF_ITERS)
- << " ns";
+TEST(Locker, PerformanceBoostSharedMutex) {
+ for (int numLockers = 1; numLockers <= 64; numLockers = numLockers * 2) {
+ stdx::mutex mtx;
+
+ // Do some warm-up loops
+ for (int i = 0; i < 1000; i++) {
+ mtx.lock();
+ mtx.unlock();
+ }
+
+ // Measure the number of loops
+ //
+ Timer t;
+
+ for (int i = 0; i < NUM_PERF_ITERS; i++) {
+ mtx.lock();
+ mtx.unlock();
}
+
+ log() << numLockers << " locks took: "
+ << static_cast<double>(t.micros()) * 1000.0 / static_cast<double>(NUM_PERF_ITERS)
+ << " ns";
}
+}
+
+TEST(Locker, PerformanceLocker) {
+ for (int numLockers = 1; numLockers <= 64; numLockers = numLockers * 2) {
+ std::vector<std::shared_ptr<LockerForTests>> lockers(numLockers);
+ for (int i = 0; i < numLockers; i++) {
+ lockers[i].reset(new LockerForTests(MODE_S));
+ }
+
+ DefaultLockerImpl locker;
+
+ // Do some warm-up loops
+ for (int i = 0; i < 1000; i++) {
+ locker.lockGlobal(MODE_IS);
+ locker.unlockAll();
+ }
- TEST(Locker, PerformanceLocker) {
- for (int numLockers = 1; numLockers <= 64; numLockers = numLockers * 2) {
- std::vector<std::shared_ptr<LockerForTests> > lockers(numLockers);
- for (int i = 0; i < numLockers; i++) {
- lockers[i].reset(new LockerForTests(MODE_S));
- }
-
- DefaultLockerImpl locker;
-
- // Do some warm-up loops
- for (int i = 0; i < 1000; i++) {
- locker.lockGlobal(MODE_IS);
- locker.unlockAll();
- }
-
- // Measure the number of loops
- Timer t;
-
- for (int i = 0; i < NUM_PERF_ITERS; i++) {
- locker.lockGlobal(MODE_IS);
- locker.unlockAll();
- }
-
- log() << numLockers
- << " locks took: "
- << static_cast<double>(t.micros()) * 1000.0 / static_cast<double>(NUM_PERF_ITERS)
- << " ns";
+ // Measure the number of loops
+ Timer t;
+
+ for (int i = 0; i < NUM_PERF_ITERS; i++) {
+ locker.lockGlobal(MODE_IS);
+ locker.unlockAll();
}
+
+ log() << numLockers << " locks took: "
+ << static_cast<double>(t.micros()) * 1000.0 / static_cast<double>(NUM_PERF_ITERS)
+ << " ns";
}
+}
#endif // MONGO_CONFIG_DEBUG_BUILD
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_stats.cpp b/src/mongo/db/concurrency/lock_stats.cpp
index c115244f369..809799299a7 100644
--- a/src/mongo/db/concurrency/lock_stats.cpp
+++ b/src/mongo/db/concurrency/lock_stats.cpp
@@ -34,125 +34,121 @@
namespace mongo {
- template<typename CounterType>
- LockStats<CounterType>::LockStats() {
- reset();
+template <typename CounterType>
+LockStats<CounterType>::LockStats() {
+ reset();
+}
+
+template <typename CounterType>
+void LockStats<CounterType>::report(BSONObjBuilder* builder) const {
+ // All indexing below starts from offset 1, because we do not want to report/account
+ // position 0, which is a sentinel value for invalid resource/no lock.
+ for (int i = 1; i < ResourceTypesCount; i++) {
+ _report(builder, resourceTypeName(static_cast<ResourceType>(i)), _stats[i]);
}
- template<typename CounterType>
- void LockStats<CounterType>::report(BSONObjBuilder* builder) const {
- // All indexing below starts from offset 1, because we do not want to report/account
- // position 0, which is a sentinel value for invalid resource/no lock.
- for (int i = 1; i < ResourceTypesCount; i++) {
- _report(builder, resourceTypeName(static_cast<ResourceType>(i)), _stats[i]);
- }
-
- _report(builder, "oplog", _oplogStats);
- }
-
- template<typename CounterType>
- void LockStats<CounterType>::_report(BSONObjBuilder* builder,
- const char* sectionName,
- const PerModeLockStatCounters& stat) const {
-
- std::unique_ptr<BSONObjBuilder> section;
-
- // All indexing below starts from offset 1, because we do not want to report/account
- // position 0, which is a sentinel value for invalid resource/no lock.
-
- // Num acquires
- {
- std::unique_ptr<BSONObjBuilder> numAcquires;
- for (int mode = 1; mode < LockModesCount; mode++) {
- const long long value = CounterOps::get(stat.modeStats[mode].numAcquisitions);
- if (value > 0) {
- if (!numAcquires) {
- if (!section) {
- section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
- }
-
- numAcquires.reset(
- new BSONObjBuilder(section->subobjStart("acquireCount")));
+ _report(builder, "oplog", _oplogStats);
+}
+
+template <typename CounterType>
+void LockStats<CounterType>::_report(BSONObjBuilder* builder,
+ const char* sectionName,
+ const PerModeLockStatCounters& stat) const {
+ std::unique_ptr<BSONObjBuilder> section;
+
+ // All indexing below starts from offset 1, because we do not want to report/account
+ // position 0, which is a sentinel value for invalid resource/no lock.
+
+ // Num acquires
+ {
+ std::unique_ptr<BSONObjBuilder> numAcquires;
+ for (int mode = 1; mode < LockModesCount; mode++) {
+ const long long value = CounterOps::get(stat.modeStats[mode].numAcquisitions);
+ if (value > 0) {
+ if (!numAcquires) {
+ if (!section) {
+ section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
}
- numAcquires->append(legacyModeName(static_cast<LockMode>(mode)), value);
+
+ numAcquires.reset(new BSONObjBuilder(section->subobjStart("acquireCount")));
}
+ numAcquires->append(legacyModeName(static_cast<LockMode>(mode)), value);
}
}
+ }
- // Num waits
- {
- std::unique_ptr<BSONObjBuilder> numWaits;
- for (int mode = 1; mode < LockModesCount; mode++) {
- const long long value = CounterOps::get(stat.modeStats[mode].numWaits);
- if (value > 0) {
- if (!numWaits) {
- if (!section) {
- section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
- }
-
- numWaits.reset(
- new BSONObjBuilder(section->subobjStart("acquireWaitCount")));
+ // Num waits
+ {
+ std::unique_ptr<BSONObjBuilder> numWaits;
+ for (int mode = 1; mode < LockModesCount; mode++) {
+ const long long value = CounterOps::get(stat.modeStats[mode].numWaits);
+ if (value > 0) {
+ if (!numWaits) {
+ if (!section) {
+ section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
}
- numWaits->append(legacyModeName(static_cast<LockMode>(mode)), value);
+
+ numWaits.reset(new BSONObjBuilder(section->subobjStart("acquireWaitCount")));
}
+ numWaits->append(legacyModeName(static_cast<LockMode>(mode)), value);
}
}
+ }
- // Total time waiting
- {
- std::unique_ptr<BSONObjBuilder> timeAcquiring;
- for (int mode = 1; mode < LockModesCount; mode++) {
- const long long value = CounterOps::get(stat.modeStats[mode].combinedWaitTimeMicros);
- if (value > 0) {
- if (!timeAcquiring) {
- if (!section) {
- section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
- }
-
- timeAcquiring.reset(
- new BSONObjBuilder(section->subobjStart("timeAcquiringMicros")));
+ // Total time waiting
+ {
+ std::unique_ptr<BSONObjBuilder> timeAcquiring;
+ for (int mode = 1; mode < LockModesCount; mode++) {
+ const long long value = CounterOps::get(stat.modeStats[mode].combinedWaitTimeMicros);
+ if (value > 0) {
+ if (!timeAcquiring) {
+ if (!section) {
+ section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
}
- timeAcquiring->append(legacyModeName(static_cast<LockMode>(mode)), value);
+
+ timeAcquiring.reset(
+ new BSONObjBuilder(section->subobjStart("timeAcquiringMicros")));
}
+ timeAcquiring->append(legacyModeName(static_cast<LockMode>(mode)), value);
}
}
+ }
- // Deadlocks
- {
- std::unique_ptr<BSONObjBuilder> deadlockCount;
- for (int mode = 1; mode < LockModesCount; mode++) {
- const long long value = CounterOps::get(stat.modeStats[mode].numDeadlocks);
- if (value > 0) {
- if (!deadlockCount) {
- if (!section) {
- section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
- }
-
- deadlockCount.reset(
- new BSONObjBuilder(section->subobjStart("deadlockCount")));
+ // Deadlocks
+ {
+ std::unique_ptr<BSONObjBuilder> deadlockCount;
+ for (int mode = 1; mode < LockModesCount; mode++) {
+ const long long value = CounterOps::get(stat.modeStats[mode].numDeadlocks);
+ if (value > 0) {
+ if (!deadlockCount) {
+ if (!section) {
+ section.reset(new BSONObjBuilder(builder->subobjStart(sectionName)));
}
- deadlockCount->append(legacyModeName(static_cast<LockMode>(mode)), value);
+
+ deadlockCount.reset(new BSONObjBuilder(section->subobjStart("deadlockCount")));
}
+ deadlockCount->append(legacyModeName(static_cast<LockMode>(mode)), value);
}
}
}
+}
- template<typename CounterType>
- void LockStats<CounterType>::reset() {
- for (int i = 0; i < ResourceTypesCount; i++) {
- for (int mode = 0; mode < LockModesCount; mode++) {
- _stats[i].modeStats[mode].reset();
- }
- }
-
+template <typename CounterType>
+void LockStats<CounterType>::reset() {
+ for (int i = 0; i < ResourceTypesCount; i++) {
for (int mode = 0; mode < LockModesCount; mode++) {
- _oplogStats.modeStats[mode].reset();
+ _stats[i].modeStats[mode].reset();
}
}
+ for (int mode = 0; mode < LockModesCount; mode++) {
+ _oplogStats.modeStats[mode].reset();
+ }
+}
+
- // Ensures that there are instances compiled for LockStats for AtomicInt64 and int64_t
- template class LockStats<int64_t>;
- template class LockStats<AtomicInt64>;
+// Ensures that there are instances compiled for LockStats for AtomicInt64 and int64_t
+template class LockStats<int64_t>;
+template class LockStats<AtomicInt64>;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_stats.h b/src/mongo/db/concurrency/lock_stats.h
index 2994fb99e3e..86f4a26d273 100644
--- a/src/mongo/db/concurrency/lock_stats.h
+++ b/src/mongo/db/concurrency/lock_stats.h
@@ -33,172 +33,171 @@
namespace mongo {
- class BSONObjBuilder;
+class BSONObjBuilder;
- /**
- * Operations for manipulating the lock statistics abstracting whether they are atomic or not.
- */
- struct CounterOps {
- static int64_t get(const int64_t& counter) {
- return counter;
- }
+/**
+ * Operations for manipulating the lock statistics abstracting whether they are atomic or not.
+ */
+struct CounterOps {
+ static int64_t get(const int64_t& counter) {
+ return counter;
+ }
- static int64_t get(const AtomicInt64& counter) {
- return counter.load();
- }
+ static int64_t get(const AtomicInt64& counter) {
+ return counter.load();
+ }
- static void set(int64_t& counter, int64_t value) {
- counter = value;
- }
+ static void set(int64_t& counter, int64_t value) {
+ counter = value;
+ }
- static void set(AtomicInt64& counter, int64_t value) {
- counter.store(value);
- }
+ static void set(AtomicInt64& counter, int64_t value) {
+ counter.store(value);
+ }
- static void add(int64_t& counter, int64_t value) {
- counter += value;
- }
+ static void add(int64_t& counter, int64_t value) {
+ counter += value;
+ }
- static void add(int64_t& counter, const AtomicInt64& value) {
- counter += value.load();
- }
+ static void add(int64_t& counter, const AtomicInt64& value) {
+ counter += value.load();
+ }
- static void add(AtomicInt64& counter, int64_t value) {
- counter.addAndFetch(value);
- }
- };
-
-
- /**
- * Bundle of locking statistics values.
- */
- template<typename CounterType>
- struct LockStatCounters {
-
- template<typename OtherType>
- void append(const LockStatCounters<OtherType>& other) {
- CounterOps::add(numAcquisitions, other.numAcquisitions);
- CounterOps::add(numWaits, other.numWaits);
- CounterOps::add(combinedWaitTimeMicros, other.combinedWaitTimeMicros);
- CounterOps::add(numDeadlocks, other.numDeadlocks);
- }
+ static void add(AtomicInt64& counter, int64_t value) {
+ counter.addAndFetch(value);
+ }
+};
- void reset() {
- CounterOps::set(numAcquisitions, 0);
- CounterOps::set(numWaits, 0);
- CounterOps::set(combinedWaitTimeMicros, 0);
- CounterOps::set(numDeadlocks, 0);
- }
+/**
+ * Bundle of locking statistics values.
+ */
+template <typename CounterType>
+struct LockStatCounters {
+ template <typename OtherType>
+ void append(const LockStatCounters<OtherType>& other) {
+ CounterOps::add(numAcquisitions, other.numAcquisitions);
+ CounterOps::add(numWaits, other.numWaits);
+ CounterOps::add(combinedWaitTimeMicros, other.combinedWaitTimeMicros);
+ CounterOps::add(numDeadlocks, other.numDeadlocks);
+ }
+
+ void reset() {
+ CounterOps::set(numAcquisitions, 0);
+ CounterOps::set(numWaits, 0);
+ CounterOps::set(combinedWaitTimeMicros, 0);
+ CounterOps::set(numDeadlocks, 0);
+ }
+
+
+ CounterType numAcquisitions;
+ CounterType numWaits;
+ CounterType combinedWaitTimeMicros;
+ CounterType numDeadlocks;
+};
- CounterType numAcquisitions;
- CounterType numWaits;
- CounterType combinedWaitTimeMicros;
- CounterType numDeadlocks;
- };
+/**
+ * Templatized lock statistics management class, which can be specialized with atomic integers
+ * for the global stats and with regular integers for the per-locker stats.
+ */
+template <typename CounterType>
+class LockStats {
+public:
+ // Declare the type for the lock counters bundle
+ typedef LockStatCounters<CounterType> LockStatCountersType;
/**
- * Templatized lock statistics management class, which can be specialized with atomic integers
- * for the global stats and with regular integers for the per-locker stats.
+ * Initializes the locking statistics with zeroes (calls reset).
*/
- template<typename CounterType>
- class LockStats {
- public:
- // Declare the type for the lock counters bundle
- typedef LockStatCounters<CounterType> LockStatCountersType;
-
- /**
- * Initializes the locking statistics with zeroes (calls reset).
- */
- LockStats();
-
- void recordAcquisition(ResourceId resId, LockMode mode) {
- CounterOps::add(get(resId, mode).numAcquisitions, 1);
- }
+ LockStats();
- void recordWait(ResourceId resId, LockMode mode) {
- CounterOps::add(get(resId, mode).numWaits, 1);
- }
+ void recordAcquisition(ResourceId resId, LockMode mode) {
+ CounterOps::add(get(resId, mode).numAcquisitions, 1);
+ }
- void recordWaitTime(ResourceId resId, LockMode mode, int64_t waitMicros) {
- CounterOps::add(get(resId, mode).combinedWaitTimeMicros, waitMicros);
- }
+ void recordWait(ResourceId resId, LockMode mode) {
+ CounterOps::add(get(resId, mode).numWaits, 1);
+ }
- void recordDeadlock(ResourceId resId, LockMode mode) {
- CounterOps::add(get(resId, mode).numDeadlocks, 1);
- }
+ void recordWaitTime(ResourceId resId, LockMode mode, int64_t waitMicros) {
+ CounterOps::add(get(resId, mode).combinedWaitTimeMicros, waitMicros);
+ }
- LockStatCountersType& get(ResourceId resId, LockMode mode) {
- if (resId == resourceIdOplog) {
- return _oplogStats.modeStats[mode];
- }
+ void recordDeadlock(ResourceId resId, LockMode mode) {
+ CounterOps::add(get(resId, mode).numDeadlocks, 1);
+ }
- return _stats[resId.getType()].modeStats[mode];
+ LockStatCountersType& get(ResourceId resId, LockMode mode) {
+ if (resId == resourceIdOplog) {
+ return _oplogStats.modeStats[mode];
}
- template<typename OtherType>
- void append(const LockStats<OtherType>& other) {
- typedef LockStatCounters<OtherType> OtherLockStatCountersType;
-
- // Append all lock stats
- for (int i = 0; i < ResourceTypesCount; i++) {
- for (int mode = 0; mode < LockModesCount; mode++) {
- const OtherLockStatCountersType& otherStats = other._stats[i].modeStats[mode];
- LockStatCountersType& thisStats = _stats[i].modeStats[mode];
- thisStats.append(otherStats);
- }
- }
+ return _stats[resId.getType()].modeStats[mode];
+ }
+
+ template <typename OtherType>
+ void append(const LockStats<OtherType>& other) {
+ typedef LockStatCounters<OtherType> OtherLockStatCountersType;
- // Append the oplog stats
+ // Append all lock stats
+ for (int i = 0; i < ResourceTypesCount; i++) {
for (int mode = 0; mode < LockModesCount; mode++) {
- const OtherLockStatCountersType& otherStats = other._oplogStats.modeStats[mode];
- LockStatCountersType& thisStats = _oplogStats.modeStats[mode];
+ const OtherLockStatCountersType& otherStats = other._stats[i].modeStats[mode];
+ LockStatCountersType& thisStats = _stats[i].modeStats[mode];
thisStats.append(otherStats);
}
}
- void report(BSONObjBuilder* builder) const;
- void reset();
+ // Append the oplog stats
+ for (int mode = 0; mode < LockModesCount; mode++) {
+ const OtherLockStatCountersType& otherStats = other._oplogStats.modeStats[mode];
+ LockStatCountersType& thisStats = _oplogStats.modeStats[mode];
+ thisStats.append(otherStats);
+ }
+ }
- private:
- // Necessary for the append call, which accepts argument of type different than our
- // template parameter.
- template<typename T>
- friend class LockStats;
+ void report(BSONObjBuilder* builder) const;
+ void reset();
+private:
+ // Necessary for the append call, which accepts argument of type different than our
+ // template parameter.
+ template <typename T>
+ friend class LockStats;
- // Keep the per-mode lock stats next to each other in case we want to do fancy operations
- // such as atomic operations on 128-bit values.
- struct PerModeLockStatCounters {
- LockStatCountersType modeStats[LockModesCount];
- };
+ // Keep the per-mode lock stats next to each other in case we want to do fancy operations
+ // such as atomic operations on 128-bit values.
+ struct PerModeLockStatCounters {
+ LockStatCountersType modeStats[LockModesCount];
+ };
- void _report(BSONObjBuilder* builder,
- const char* sectionName,
- const PerModeLockStatCounters& stat) const;
+ void _report(BSONObjBuilder* builder,
+ const char* sectionName,
+ const PerModeLockStatCounters& stat) const;
- // Split the lock stats per resource type and special-case the oplog so we can collect
- // more detailed stats for it.
- PerModeLockStatCounters _stats[ResourceTypesCount];
- PerModeLockStatCounters _oplogStats;
- };
- typedef LockStats<int64_t> SingleThreadedLockStats;
- typedef LockStats<AtomicInt64> AtomicLockStats;
+ // Split the lock stats per resource type and special-case the oplog so we can collect
+ // more detailed stats for it.
+ PerModeLockStatCounters _stats[ResourceTypesCount];
+ PerModeLockStatCounters _oplogStats;
+};
+typedef LockStats<int64_t> SingleThreadedLockStats;
+typedef LockStats<AtomicInt64> AtomicLockStats;
- /**
- * Reports instance-wide locking statistics, which can then be converted to BSON or logged.
- */
- void reportGlobalLockingStats(SingleThreadedLockStats* outStats);
- /**
- * Currently used for testing only.
- */
- void resetGlobalLockStats();
+/**
+ * Reports instance-wide locking statistics, which can then be converted to BSON or logged.
+ */
+void reportGlobalLockingStats(SingleThreadedLockStats* outStats);
+
+/**
+ * Currently used for testing only.
+ */
+void resetGlobalLockStats();
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/lock_stats_test.cpp b/src/mongo/db/concurrency/lock_stats_test.cpp
index 9b148e606bd..8ae6eb3c010 100644
--- a/src/mongo/db/concurrency/lock_stats_test.cpp
+++ b/src/mongo/db/concurrency/lock_stats_test.cpp
@@ -34,69 +34,69 @@
namespace mongo {
- TEST(LockStats, NoWait) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("LockStats.NoWait"));
+TEST(LockStats, NoWait) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("LockStats.NoWait"));
- resetGlobalLockStats();
+ resetGlobalLockStats();
- LockerForTests locker(MODE_IX);
- locker.lock(resId, MODE_X);
- locker.unlock(resId);
+ LockerForTests locker(MODE_IX);
+ locker.lock(resId, MODE_X);
+ locker.unlock(resId);
- // Make sure that the waits/blocks are zero
- SingleThreadedLockStats stats;
- reportGlobalLockingStats(&stats);
+ // Make sure that the waits/blocks are zero
+ SingleThreadedLockStats stats;
+ reportGlobalLockingStats(&stats);
- ASSERT_EQUALS(1, stats.get(resId, MODE_X).numAcquisitions);
- ASSERT_EQUALS(0, stats.get(resId, MODE_X).numWaits);
- ASSERT_EQUALS(0, stats.get(resId, MODE_X).combinedWaitTimeMicros);
- }
+ ASSERT_EQUALS(1, stats.get(resId, MODE_X).numAcquisitions);
+ ASSERT_EQUALS(0, stats.get(resId, MODE_X).numWaits);
+ ASSERT_EQUALS(0, stats.get(resId, MODE_X).combinedWaitTimeMicros);
+}
- TEST(LockStats, Wait) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("LockStats.Wait"));
+TEST(LockStats, Wait) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("LockStats.Wait"));
- resetGlobalLockStats();
+ resetGlobalLockStats();
- LockerForTests locker(MODE_IX);
- locker.lock(resId, MODE_X);
+ LockerForTests locker(MODE_IX);
+ locker.lock(resId, MODE_X);
- {
- // This will block
- LockerForTests lockerConflict(MODE_IX);
- ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBegin(resId, MODE_S));
+ {
+ // This will block
+ LockerForTests lockerConflict(MODE_IX);
+ ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBegin(resId, MODE_S));
- // Sleep 1 millisecond so the wait time passes
- ASSERT_EQUALS(LOCK_TIMEOUT, lockerConflict.lockComplete(resId, MODE_S, 1, false));
- }
+ // Sleep 1 millisecond so the wait time passes
+ ASSERT_EQUALS(LOCK_TIMEOUT, lockerConflict.lockComplete(resId, MODE_S, 1, false));
+ }
- // Make sure that the waits/blocks are non-zero
- SingleThreadedLockStats stats;
- reportGlobalLockingStats(&stats);
+ // Make sure that the waits/blocks are non-zero
+ SingleThreadedLockStats stats;
+ reportGlobalLockingStats(&stats);
- ASSERT_EQUALS(1, stats.get(resId, MODE_X).numAcquisitions);
- ASSERT_EQUALS(0, stats.get(resId, MODE_X).numWaits);
- ASSERT_EQUALS(0, stats.get(resId, MODE_X).combinedWaitTimeMicros);
+ ASSERT_EQUALS(1, stats.get(resId, MODE_X).numAcquisitions);
+ ASSERT_EQUALS(0, stats.get(resId, MODE_X).numWaits);
+ ASSERT_EQUALS(0, stats.get(resId, MODE_X).combinedWaitTimeMicros);
- ASSERT_EQUALS(1, stats.get(resId, MODE_S).numAcquisitions);
- ASSERT_EQUALS(1, stats.get(resId, MODE_S).numWaits);
- ASSERT_GREATER_THAN(stats.get(resId, MODE_S).combinedWaitTimeMicros, 0);
- }
+ ASSERT_EQUALS(1, stats.get(resId, MODE_S).numAcquisitions);
+ ASSERT_EQUALS(1, stats.get(resId, MODE_S).numWaits);
+ ASSERT_GREATER_THAN(stats.get(resId, MODE_S).combinedWaitTimeMicros, 0);
+}
- TEST(LockStats, Reporting) {
- const ResourceId resId(RESOURCE_COLLECTION, std::string("LockStats.Reporting"));
+TEST(LockStats, Reporting) {
+ const ResourceId resId(RESOURCE_COLLECTION, std::string("LockStats.Reporting"));
- resetGlobalLockStats();
+ resetGlobalLockStats();
- LockerForTests locker(MODE_IX);
- locker.lock(resId, MODE_X);
- locker.unlock(resId);
+ LockerForTests locker(MODE_IX);
+ locker.lock(resId, MODE_X);
+ locker.unlock(resId);
- // Make sure that the waits/blocks are zero
- SingleThreadedLockStats stats;
- reportGlobalLockingStats(&stats);
+ // Make sure that the waits/blocks are zero
+ SingleThreadedLockStats stats;
+ reportGlobalLockingStats(&stats);
- BSONObjBuilder builder;
- stats.report(&builder);
- }
+ BSONObjBuilder builder;
+ stats.report(&builder);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h
index 304b92bea9c..7c19f421e5c 100644
--- a/src/mongo/db/concurrency/locker.h
+++ b/src/mongo/db/concurrency/locker.h
@@ -28,269 +28,270 @@
#pragma once
-#include <climits> // For UINT_MAX
+#include <climits> // For UINT_MAX
#include <vector>
#include "mongo/db/concurrency/lock_manager.h"
#include "mongo/db/concurrency/lock_stats.h"
namespace mongo {
-
+
+/**
+ * Interface for acquiring locks. One of those objects will have to be instantiated for each
+ * request (transaction).
+ *
+ * Lock/unlock methods must always be called from a single thread.
+ */
+class Locker {
+ MONGO_DISALLOW_COPYING(Locker);
+
+public:
+ virtual ~Locker() {}
+
+ virtual LockerId getId() const = 0;
+
+ /**
+ * This should be the first method invoked for a particular Locker object. It acquires the
+ * Global lock in the specified mode and effectively indicates the mode of the operation.
+ * This is what the lock modes on the global lock mean:
+ *
+ * IX - Regular write operation
+ * IS - Regular read operation
+ * S - Stops all *write* activity. Used for administrative operations (repl, etc).
+ * X - Stops all activity. Used for administrative operations (repl state changes,
+ * shutdown, etc).
+ *
+ * This method can be called recursively, but each call to lockGlobal must be accompanied
+ * by a call to unlockAll.
+ *
+ * @param mode Mode in which the global lock should be acquired. Also indicates the intent
+ * of the operation.
+ * @param timeoutMs How long to wait for the global lock (and the flush lock, for the MMAP
+ * V1 engine) to be acquired.
+ *
+ * @return LOCK_OK, if the global lock (and the flush lock, for the MMAP V1 engine) were
+ * acquired within the specified time bound. Otherwise, the respective failure
+ * code and neither lock will be acquired.
+ */
+ virtual LockResult lockGlobal(LockMode mode, unsigned timeoutMs = UINT_MAX) = 0;
+
+ /**
+ * Requests the global lock to be acquired in the specified mode.
+ *
+ * See the comments for lockBegin/Complete for more information on the semantics.
+ */
+ virtual LockResult lockGlobalBegin(LockMode mode) = 0;
+ virtual LockResult lockGlobalComplete(unsigned timeoutMs) = 0;
+
+ /**
+ * This method is used only in the MMAP V1 storage engine, otherwise it is a no-op. See the
+ * comments in the implementation for more details on how MMAP V1 journaling works.
+ */
+ virtual void lockMMAPV1Flush() = 0;
+
+ /**
+ * Decrements the reference count on the global lock. If the reference count on the
+ * global lock hits zero, the transaction is over, and unlockAll unlocks all other locks.
+ *
+ * @return true if this is the last endTransaction call (i.e., the global lock was
+ * released); false if there are still references on the global lock. This value
+ * should not be relied on and is only used for assertion purposes.
+ *
+ * @return false if the global lock is still held.
+ */
+ virtual bool unlockAll() = 0;
+
+ /**
+ * This is only necessary for the MMAP V1 engine and in particular, the fsyncLock command
+ * which needs to first acquire the global lock in X-mode for truncating the journal and
+ * then downgrade to S before it blocks.
+ *
+ * The downgrade is necessary in order to be nice and not block readers while under
+ * fsyncLock.
+ */
+ virtual void downgradeGlobalXtoSForMMAPV1() = 0;
+
+ /**
+ * beginWriteUnitOfWork/endWriteUnitOfWork must only be called by WriteUnitOfWork. See
+ * comments there for the semantics of units of work.
+ */
+ virtual void beginWriteUnitOfWork() = 0;
+ virtual void endWriteUnitOfWork() = 0;
+
+ virtual bool inAWriteUnitOfWork() const = 0;
+
+ /**
+ * Acquires lock on the specified resource in the specified mode and returns the outcome
+ * of the operation. See the details for LockResult for more information on what the
+ * different results mean.
+ *
+ * Each successful acquisition of a lock on a given resource increments the reference count
+ * of the lock. Therefore, each call, which returns LOCK_OK must be matched with a
+ * corresponding call to unlock.
+ *
+ * @param resId Id of the resource to be locked.
+ * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
+ * @param timeoutMs How many milliseconds to wait for the lock to be granted, before
+ * returning LOCK_TIMEOUT. This parameter defaults to UINT_MAX, which means
+ * wait infinitely. If 0 is passed, the request will return immediately, if
+ * the request could not be granted right away.
+ * @param checkDeadlock Whether to enable deadlock detection for this acquisition. This
+ * parameter is put in place until we can handle deadlocks at all places,
+ * which acquire locks.
+ *
+ * @return All LockResults except for LOCK_WAITING, because it blocks.
+ */
+ virtual LockResult lock(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs = UINT_MAX,
+ bool checkDeadlock = false) = 0;
+
+ /**
+ * Downgrades the specified resource's lock mode without changing the reference count.
+ */
+ virtual void downgrade(ResourceId resId, LockMode newMode) = 0;
+
+ /**
+ * Releases a lock previously acquired through a lock call. It is an error to try to
+ * release lock which has not been previously acquired (invariant violation).
+ *
+ * @return true if the lock was actually released; false if only the reference count was
+ * decremented, but the lock is still held.
+ */
+ virtual bool unlock(ResourceId resId) = 0;
+
/**
- * Interface for acquiring locks. One of those objects will have to be instantiated for each
- * request (transaction).
+ * Retrieves the mode in which a lock is held or checks whether the lock held for a
+ * particular resource covers the specified mode.
*
- * Lock/unlock methods must always be called from a single thread.
+ * For example isLockHeldForMode will return true for MODE_S, if MODE_X is already held,
+ * because MODE_X covers MODE_S.
+ */
+ virtual LockMode getLockMode(ResourceId resId) const = 0;
+ virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const = 0;
+
+ // These are shortcut methods for the above calls. They however check that the entire
+ // hierarchy is properly locked and because of this they are very expensive to call.
+ // Do not use them in performance critical code paths.
+ virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const = 0;
+ virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const = 0;
+
+ /**
+ * Returns the resource that this locker is waiting/blocked on (if any). If the locker is
+ * not waiting for a resource the returned value will be invalid (isValid() == false).
+ */
+ virtual ResourceId getWaitingResource() const = 0;
+
+ /**
+ * Describes a single lock acquisition for reporting/serialization purposes.
*/
- class Locker {
- MONGO_DISALLOW_COPYING(Locker);
- public:
- virtual ~Locker() {}
-
- virtual LockerId getId() const = 0;
-
- /**
- * This should be the first method invoked for a particular Locker object. It acquires the
- * Global lock in the specified mode and effectively indicates the mode of the operation.
- * This is what the lock modes on the global lock mean:
- *
- * IX - Regular write operation
- * IS - Regular read operation
- * S - Stops all *write* activity. Used for administrative operations (repl, etc).
- * X - Stops all activity. Used for administrative operations (repl state changes,
- * shutdown, etc).
- *
- * This method can be called recursively, but each call to lockGlobal must be accompanied
- * by a call to unlockAll.
- *
- * @param mode Mode in which the global lock should be acquired. Also indicates the intent
- * of the operation.
- * @param timeoutMs How long to wait for the global lock (and the flush lock, for the MMAP
- * V1 engine) to be acquired.
- *
- * @return LOCK_OK, if the global lock (and the flush lock, for the MMAP V1 engine) were
- * acquired within the specified time bound. Otherwise, the respective failure
- * code and neither lock will be acquired.
- */
- virtual LockResult lockGlobal(LockMode mode, unsigned timeoutMs = UINT_MAX) = 0;
-
- /**
- * Requests the global lock to be acquired in the specified mode.
- *
- * See the comments for lockBegin/Complete for more information on the semantics.
- */
- virtual LockResult lockGlobalBegin(LockMode mode) = 0;
- virtual LockResult lockGlobalComplete(unsigned timeoutMs) = 0;
-
- /**
- * This method is used only in the MMAP V1 storage engine, otherwise it is a no-op. See the
- * comments in the implementation for more details on how MMAP V1 journaling works.
- */
- virtual void lockMMAPV1Flush() = 0;
-
- /**
- * Decrements the reference count on the global lock. If the reference count on the
- * global lock hits zero, the transaction is over, and unlockAll unlocks all other locks.
- *
- * @return true if this is the last endTransaction call (i.e., the global lock was
- * released); false if there are still references on the global lock. This value
- * should not be relied on and is only used for assertion purposes.
- *
- * @return false if the global lock is still held.
- */
- virtual bool unlockAll() = 0;
-
- /**
- * This is only necessary for the MMAP V1 engine and in particular, the fsyncLock command
- * which needs to first acquire the global lock in X-mode for truncating the journal and
- * then downgrade to S before it blocks.
- *
- * The downgrade is necessary in order to be nice and not block readers while under
- * fsyncLock.
- */
- virtual void downgradeGlobalXtoSForMMAPV1() = 0;
-
- /**
- * beginWriteUnitOfWork/endWriteUnitOfWork must only be called by WriteUnitOfWork. See
- * comments there for the semantics of units of work.
- */
- virtual void beginWriteUnitOfWork() = 0;
- virtual void endWriteUnitOfWork() = 0;
-
- virtual bool inAWriteUnitOfWork() const = 0;
-
- /**
- * Acquires lock on the specified resource in the specified mode and returns the outcome
- * of the operation. See the details for LockResult for more information on what the
- * different results mean.
- *
- * Each successful acquisition of a lock on a given resource increments the reference count
- * of the lock. Therefore, each call, which returns LOCK_OK must be matched with a
- * corresponding call to unlock.
- *
- * @param resId Id of the resource to be locked.
- * @param mode Mode in which the resource should be locked. Lock upgrades are allowed.
- * @param timeoutMs How many milliseconds to wait for the lock to be granted, before
- * returning LOCK_TIMEOUT. This parameter defaults to UINT_MAX, which means
- * wait infinitely. If 0 is passed, the request will return immediately, if
- * the request could not be granted right away.
- * @param checkDeadlock Whether to enable deadlock detection for this acquisition. This
- * parameter is put in place until we can handle deadlocks at all places,
- * which acquire locks.
- *
- * @return All LockResults except for LOCK_WAITING, because it blocks.
- */
- virtual LockResult lock(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs = UINT_MAX,
- bool checkDeadlock = false) = 0;
-
- /**
- * Downgrades the specified resource's lock mode without changing the reference count.
- */
- virtual void downgrade(ResourceId resId, LockMode newMode) = 0;
-
- /**
- * Releases a lock previously acquired through a lock call. It is an error to try to
- * release lock which has not been previously acquired (invariant violation).
- *
- * @return true if the lock was actually released; false if only the reference count was
- * decremented, but the lock is still held.
- */
- virtual bool unlock(ResourceId resId) = 0;
-
- /**
- * Retrieves the mode in which a lock is held or checks whether the lock held for a
- * particular resource covers the specified mode.
- *
- * For example isLockHeldForMode will return true for MODE_S, if MODE_X is already held,
- * because MODE_X covers MODE_S.
- */
- virtual LockMode getLockMode(ResourceId resId) const = 0;
- virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const = 0;
-
- // These are shortcut methods for the above calls. They however check that the entire
- // hierarchy is properly locked and because of this they are very expensive to call.
- // Do not use them in performance critical code paths.
- virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const = 0;
- virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const = 0;
-
- /**
- * Returns the resource that this locker is waiting/blocked on (if any). If the locker is
- * not waiting for a resource the returned value will be invalid (isValid() == false).
- */
- virtual ResourceId getWaitingResource() const = 0;
-
- /**
- * Describes a single lock acquisition for reporting/serialization purposes.
- */
- struct OneLock {
- // What lock resource is held?
- ResourceId resourceId;
-
- // In what mode is it held?
- LockMode mode;
-
- // Reporting/serialization order is by resourceId, which is the canonical locking order
- bool operator<(const OneLock& rhs) const {
- return resourceId < rhs.resourceId;
- }
- };
-
- /**
- * Returns information and locking statistics for this instance of the locker. Used to
- * support the db.currentOp view. This structure is not thread-safe and ideally should
- * be used only for obtaining the necessary information and then discarded instead of
- * reused.
- */
- struct LockerInfo {
- // List of high-level locks held by this locker, sorted by ResourceId
- std::vector<OneLock> locks;
-
- // If isValid(), then what lock this particular locker is sleeping on
- ResourceId waitingResource;
-
- // Lock timing statistics
- SingleThreadedLockStats stats;
- };
-
- virtual void getLockerInfo(LockerInfo* lockerInfo) const = 0;
-
- /**
- * LockSnapshot captures the state of all resources that are locked, what modes they're
- * locked in, and how many times they've been locked in that mode.
- */
- struct LockSnapshot {
- // The global lock is handled differently from all other locks.
- LockMode globalMode;
-
- // The non-global non-flush locks held, sorted by granularity. That is, locks[i] is
- // coarser or as coarse as locks[i + 1].
- std::vector<OneLock> locks;
- };
-
- /**
- * Retrieves all locks held by this transaction, and what mode they're held in.
- * Stores these locks in 'stateOut', destroying any previous state. Unlocks all locks
- * held by this transaction. This functionality is used for yielding in the MMAPV1
- * storage engine. MMAPV1 uses voluntary/cooperative lock release and reacquisition
- * in order to allow for interleaving of otherwise conflicting long-running operations.
- *
- * This functionality is also used for releasing locks on databases and collections
- * when cursors are dormant and waiting for a getMore request.
- *
- * Returns true if locks are released. It is expected that restoreLockerImpl will be called
- * in the future.
- *
- * Returns false if locks are not released. restoreLockState(...) does not need to be
- * called in this case.
- */
- virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut) = 0;
-
- /**
- * Re-locks all locks whose state was stored in 'stateToRestore'.
- */
- virtual void restoreLockState(const LockSnapshot& stateToRestore) = 0;
-
- //
- // These methods are legacy from LockerImpl and will eventually go away or be converted to
- // calls into the Locker methods
- //
-
- virtual void dump() const = 0;
-
- virtual bool isW() const = 0;
- virtual bool isR() const = 0;
-
- virtual bool isLocked() const = 0;
- virtual bool isWriteLocked() const = 0;
- virtual bool isReadLocked() const = 0;
-
- /**
- * Asserts that the Locker is effectively not in use and resets the locking statistics.
- * This means, there should be no locks on it, no WUOW, etc, so it would be safe to call
- * the destructor or reuse the Locker.
- */
- virtual void assertEmptyAndReset() = 0;
-
- /**
- * Pending means we are currently trying to get a lock (could be the parallel batch writer
- * lock).
- */
- virtual bool hasLockPending() const = 0;
-
- // Used for the replication parallel log op application threads
- virtual void setIsBatchWriter(bool newValue) = 0;
- virtual bool isBatchWriter() const = 0;
-
- /**
- * A string lock is MODE_X or MODE_S.
- * These are incompatible with other locks and therefore are strong.
- */
- virtual bool hasStrongLocks() const = 0;
-
- protected:
- Locker() { }
+ struct OneLock {
+ // What lock resource is held?
+ ResourceId resourceId;
+
+ // In what mode is it held?
+ LockMode mode;
+
+ // Reporting/serialization order is by resourceId, which is the canonical locking order
+ bool operator<(const OneLock& rhs) const {
+ return resourceId < rhs.resourceId;
+ }
};
-} // namespace mongo
+ /**
+ * Returns information and locking statistics for this instance of the locker. Used to
+ * support the db.currentOp view. This structure is not thread-safe and ideally should
+ * be used only for obtaining the necessary information and then discarded instead of
+ * reused.
+ */
+ struct LockerInfo {
+ // List of high-level locks held by this locker, sorted by ResourceId
+ std::vector<OneLock> locks;
+
+ // If isValid(), then what lock this particular locker is sleeping on
+ ResourceId waitingResource;
+
+ // Lock timing statistics
+ SingleThreadedLockStats stats;
+ };
+
+ virtual void getLockerInfo(LockerInfo* lockerInfo) const = 0;
+
+ /**
+ * LockSnapshot captures the state of all resources that are locked, what modes they're
+ * locked in, and how many times they've been locked in that mode.
+ */
+ struct LockSnapshot {
+ // The global lock is handled differently from all other locks.
+ LockMode globalMode;
+
+ // The non-global non-flush locks held, sorted by granularity. That is, locks[i] is
+ // coarser or as coarse as locks[i + 1].
+ std::vector<OneLock> locks;
+ };
+
+ /**
+ * Retrieves all locks held by this transaction, and what mode they're held in.
+ * Stores these locks in 'stateOut', destroying any previous state. Unlocks all locks
+ * held by this transaction. This functionality is used for yielding in the MMAPV1
+ * storage engine. MMAPV1 uses voluntary/cooperative lock release and reacquisition
+ * in order to allow for interleaving of otherwise conflicting long-running operations.
+ *
+ * This functionality is also used for releasing locks on databases and collections
+ * when cursors are dormant and waiting for a getMore request.
+ *
+ * Returns true if locks are released. It is expected that restoreLockerImpl will be called
+ * in the future.
+ *
+ * Returns false if locks are not released. restoreLockState(...) does not need to be
+ * called in this case.
+ */
+ virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut) = 0;
+
+ /**
+ * Re-locks all locks whose state was stored in 'stateToRestore'.
+ */
+ virtual void restoreLockState(const LockSnapshot& stateToRestore) = 0;
+
+ //
+ // These methods are legacy from LockerImpl and will eventually go away or be converted to
+ // calls into the Locker methods
+ //
+
+ virtual void dump() const = 0;
+
+ virtual bool isW() const = 0;
+ virtual bool isR() const = 0;
+
+ virtual bool isLocked() const = 0;
+ virtual bool isWriteLocked() const = 0;
+ virtual bool isReadLocked() const = 0;
+
+ /**
+ * Asserts that the Locker is effectively not in use and resets the locking statistics.
+ * This means, there should be no locks on it, no WUOW, etc, so it would be safe to call
+ * the destructor or reuse the Locker.
+ */
+ virtual void assertEmptyAndReset() = 0;
+
+ /**
+ * Pending means we are currently trying to get a lock (could be the parallel batch writer
+ * lock).
+ */
+ virtual bool hasLockPending() const = 0;
+
+ // Used for the replication parallel log op application threads
+ virtual void setIsBatchWriter(bool newValue) = 0;
+ virtual bool isBatchWriter() const = 0;
+
+ /**
+ * A string lock is MODE_X or MODE_S.
+ * These are incompatible with other locks and therefore are strong.
+ */
+ virtual bool hasStrongLocks() const = 0;
+
+protected:
+ Locker() {}
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/locker_noop.h b/src/mongo/db/concurrency/locker_noop.h
index 9bfcbc93227..e62c87c862e 100644
--- a/src/mongo/db/concurrency/locker_noop.h
+++ b/src/mongo/db/concurrency/locker_noop.h
@@ -31,144 +31,142 @@
#include "mongo/db/concurrency/locker.h"
namespace mongo {
-
- /**
- * Locker, which cannot be used to lock/unlock resources and just returns true for checks for
- * whether a particular resource is locked. Do not use it for cases where actual locking
- * behaviour is expected or locking is performed.
- */
- class LockerNoop : public Locker {
- public:
- LockerNoop() { }
- virtual LockerId getId() const { invariant(false); }
-
- virtual LockResult lockGlobal(LockMode mode, unsigned timeoutMs) {
- invariant(false);
- }
+/**
+ * Locker, which cannot be used to lock/unlock resources and just returns true for checks for
+ * whether a particular resource is locked. Do not use it for cases where actual locking
+ * behaviour is expected or locking is performed.
+ */
+class LockerNoop : public Locker {
+public:
+ LockerNoop() {}
- virtual LockResult lockGlobalBegin(LockMode mode) {
- invariant(false);
- }
+ virtual LockerId getId() const {
+ invariant(false);
+ }
- virtual LockResult lockGlobalComplete(unsigned timeoutMs) {
- invariant(false);
- }
+ virtual LockResult lockGlobal(LockMode mode, unsigned timeoutMs) {
+ invariant(false);
+ }
- virtual void lockMMAPV1Flush() {
- invariant(false);
- }
+ virtual LockResult lockGlobalBegin(LockMode mode) {
+ invariant(false);
+ }
- virtual bool unlockAll() {
- invariant(false);
- }
+ virtual LockResult lockGlobalComplete(unsigned timeoutMs) {
+ invariant(false);
+ }
- virtual void downgradeGlobalXtoSForMMAPV1() {
- invariant(false);
- }
+ virtual void lockMMAPV1Flush() {
+ invariant(false);
+ }
- virtual void beginWriteUnitOfWork() {
+ virtual bool unlockAll() {
+ invariant(false);
+ }
- }
+ virtual void downgradeGlobalXtoSForMMAPV1() {
+ invariant(false);
+ }
- virtual void endWriteUnitOfWork() {
+ virtual void beginWriteUnitOfWork() {}
- }
+ virtual void endWriteUnitOfWork() {}
- virtual bool inAWriteUnitOfWork() const {
- invariant(false);
- }
+ virtual bool inAWriteUnitOfWork() const {
+ invariant(false);
+ }
- virtual LockResult lock(ResourceId resId,
- LockMode mode,
- unsigned timeoutMs,
- bool checkDeadlock) {
- invariant(false);
- }
+ virtual LockResult lock(ResourceId resId,
+ LockMode mode,
+ unsigned timeoutMs,
+ bool checkDeadlock) {
+ invariant(false);
+ }
- virtual void downgrade(ResourceId resId, LockMode newMode) {
- invariant(false);
- }
+ virtual void downgrade(ResourceId resId, LockMode newMode) {
+ invariant(false);
+ }
- virtual bool unlock(ResourceId resId) {
- invariant(false);
- }
+ virtual bool unlock(ResourceId resId) {
+ invariant(false);
+ }
- virtual LockMode getLockMode(ResourceId resId) const {
- invariant(false);
- }
+ virtual LockMode getLockMode(ResourceId resId) const {
+ invariant(false);
+ }
- virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const {
- return true;
- }
+ virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const {
+ return true;
+ }
- virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const {
- return true;
- }
+ virtual bool isDbLockedForMode(StringData dbName, LockMode mode) const {
+ return true;
+ }
- virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const {
- return true;
- }
+ virtual bool isCollectionLockedForMode(StringData ns, LockMode mode) const {
+ return true;
+ }
- virtual ResourceId getWaitingResource() const {
- invariant(false);
- }
+ virtual ResourceId getWaitingResource() const {
+ invariant(false);
+ }
- virtual void getLockerInfo(LockerInfo* lockerInfo) const {
- invariant(false);
- }
+ virtual void getLockerInfo(LockerInfo* lockerInfo) const {
+ invariant(false);
+ }
- virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut) {
- invariant(false);
- }
+ virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut) {
+ invariant(false);
+ }
- virtual void restoreLockState(const LockSnapshot& stateToRestore) {
- invariant(false);
- }
+ virtual void restoreLockState(const LockSnapshot& stateToRestore) {
+ invariant(false);
+ }
- virtual void dump() const {
- invariant(false);
- }
+ virtual void dump() const {
+ invariant(false);
+ }
- virtual bool isW() const {
- invariant(false);
- }
+ virtual bool isW() const {
+ invariant(false);
+ }
- virtual bool isR() const {
- invariant(false);
- }
+ virtual bool isR() const {
+ invariant(false);
+ }
- virtual bool isLocked() const {
- invariant(false);
- }
+ virtual bool isLocked() const {
+ invariant(false);
+ }
- virtual bool isWriteLocked() const {
- return false;
- }
+ virtual bool isWriteLocked() const {
+ return false;
+ }
- virtual bool isReadLocked() const {
- invariant(false);
- }
+ virtual bool isReadLocked() const {
+ invariant(false);
+ }
- virtual void assertEmptyAndReset() {
- invariant(false);
- }
+ virtual void assertEmptyAndReset() {
+ invariant(false);
+ }
- virtual bool hasLockPending() const {
- invariant(false);
- }
+ virtual bool hasLockPending() const {
+ invariant(false);
+ }
- virtual void setIsBatchWriter(bool newValue) {
- invariant(false);
- }
+ virtual void setIsBatchWriter(bool newValue) {
+ invariant(false);
+ }
- virtual bool isBatchWriter() const {
- invariant(false);
- }
+ virtual bool isBatchWriter() const {
+ invariant(false);
+ }
- virtual bool hasStrongLocks() const {
- return false;
- }
- };
+ virtual bool hasStrongLocks() const {
+ return false;
+ }
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/concurrency/write_conflict_exception.cpp b/src/mongo/db/concurrency/write_conflict_exception.cpp
index 1bd6859e0c2..a0547976100 100644
--- a/src/mongo/db/concurrency/write_conflict_exception.cpp
+++ b/src/mongo/db/concurrency/write_conflict_exception.cpp
@@ -37,48 +37,37 @@
namespace mongo {
- bool WriteConflictException::trace = false;
-
- WriteConflictException::WriteConflictException()
- : DBException( "WriteConflict", ErrorCodes::WriteConflict ) {
-
- if ( trace ) {
- printStackTrace();
- }
+bool WriteConflictException::trace = false;
+WriteConflictException::WriteConflictException()
+ : DBException("WriteConflict", ErrorCodes::WriteConflict) {
+ if (trace) {
+ printStackTrace();
}
+}
- void WriteConflictException::logAndBackoff(int attempt,
- StringData operation,
- StringData ns) {
-
- LOG(1) << "Caught WriteConflictException doing " << operation
- << " on " << ns
- << ", attempt: " << attempt << " retrying";
-
- // All numbers below chosen by guess and check against a few random benchmarks.
- if (attempt < 4) {
- // no-op
- }
- else if (attempt < 10) {
- sleepmillis(1);
- }
- else if (attempt < 100) {
- sleepmillis(5);
- }
- else {
- sleepmillis(10);
- }
-
- }
+void WriteConflictException::logAndBackoff(int attempt, StringData operation, StringData ns) {
+ LOG(1) << "Caught WriteConflictException doing " << operation << " on " << ns
+ << ", attempt: " << attempt << " retrying";
- namespace {
- // for WriteConflictException
- ExportedServerParameter<bool> TraceWCExceptionsSetting(ServerParameterSet::getGlobal(),
- "traceWriteConflictExceptions",
- &WriteConflictException::trace,
- true, // allowedToChangeAtStartup
- true); // allowedToChangeAtRuntime
+ // All numbers below chosen by guess and check against a few random benchmarks.
+ if (attempt < 4) {
+ // no-op
+ } else if (attempt < 10) {
+ sleepmillis(1);
+ } else if (attempt < 100) {
+ sleepmillis(5);
+ } else {
+ sleepmillis(10);
}
+}
+namespace {
+// for WriteConflictException
+ExportedServerParameter<bool> TraceWCExceptionsSetting(ServerParameterSet::getGlobal(),
+ "traceWriteConflictExceptions",
+ &WriteConflictException::trace,
+ true, // allowedToChangeAtStartup
+ true); // allowedToChangeAtRuntime
+}
}
diff --git a/src/mongo/db/concurrency/write_conflict_exception.h b/src/mongo/db/concurrency/write_conflict_exception.h
index e94eab741d7..5c7bcd5e87c 100644
--- a/src/mongo/db/concurrency/write_conflict_exception.h
+++ b/src/mongo/db/concurrency/write_conflict_exception.h
@@ -34,45 +34,51 @@
#include "mongo/util/assert_util.h"
-#define MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN do { int wcr__Attempts = 0; do { try
-#define MONGO_WRITE_CONFLICT_RETRY_LOOP_END(PTXN, OPSTR, NSSTR) \
- catch (const ::mongo::WriteConflictException &wce) { \
- const OperationContext* ptxn = (PTXN); \
- ++CurOp::get(ptxn)->debug().writeConflicts; \
- wce.logAndBackoff(wcr__Attempts, (OPSTR), (NSSTR)); \
- ++wcr__Attempts; \
- ptxn->recoveryUnit()->abandonSnapshot(); \
- continue; \
- } \
- break; \
- } while (true); } while (false);
+#define MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN \
+ do { \
+ int wcr__Attempts = 0; \
+ do { \
+ try
+#define MONGO_WRITE_CONFLICT_RETRY_LOOP_END(PTXN, OPSTR, NSSTR) \
+ catch (const ::mongo::WriteConflictException& wce) { \
+ const OperationContext* ptxn = (PTXN); \
+ ++CurOp::get(ptxn)->debug().writeConflicts; \
+ wce.logAndBackoff(wcr__Attempts, (OPSTR), (NSSTR)); \
+ ++wcr__Attempts; \
+ ptxn->recoveryUnit()->abandonSnapshot(); \
+ continue; \
+ } \
+ break; \
+ } \
+ while (true) \
+ ; \
+ } \
+ while (false) \
+ ;
namespace mongo {
+/**
+ * This is thrown if during a write, two or more operations conflict with each other.
+ * For example if two operations get the same version of a document, and then both try to
+ * modify that document, this exception will get thrown by one of them.
+ */
+class WriteConflictException : public DBException {
+public:
+ WriteConflictException();
+
/**
- * This is thrown if during a write, two or more operations conflict with each other.
- * For example if two operations get the same version of a document, and then both try to
- * modify that document, this exception will get thrown by one of them.
+ * Will log a message if sensible and will do an exponential backoff to make sure
+ * we don't hammer the same doc over and over.
+ * @param attempt - what attempt is this, 1 based
+ * @param operation - e.g. "update"
*/
- class WriteConflictException : public DBException {
- public:
- WriteConflictException();
-
- /**
- * Will log a message if sensible and will do an exponential backoff to make sure
- * we don't hammer the same doc over and over.
- * @param attempt - what attempt is this, 1 based
- * @param operation - e.g. "update"
- */
- static void logAndBackoff(int attempt,
- StringData operation,
- StringData ns);
-
- /**
- * If true, will call printStackTrace on every WriteConflictException created.
- * Can be set via setParameter named traceWriteConflictExceptions.
- */
- static bool trace;
- };
+ static void logAndBackoff(int attempt, StringData operation, StringData ns);
+ /**
+ * If true, will call printStackTrace on every WriteConflictException created.
+ * Can be set via setParameter named traceWriteConflictExceptions.
+ */
+ static bool trace;
+};
}
diff --git a/src/mongo/db/conn_pool_options.cpp b/src/mongo/db/conn_pool_options.cpp
index 5fd4c1ffb9d..71cb840cd7e 100644
--- a/src/mongo/db/conn_pool_options.cpp
+++ b/src/mongo/db/conn_pool_options.cpp
@@ -38,41 +38,39 @@
namespace mongo {
- int ConnPoolOptions::maxConnsPerHost(200);
- int ConnPoolOptions::maxShardedConnsPerHost(200);
+int ConnPoolOptions::maxConnsPerHost(200);
+int ConnPoolOptions::maxShardedConnsPerHost(200);
- namespace {
+namespace {
- ExportedServerParameter<int> //
- maxConnsPerHostParameter(ServerParameterSet::getGlobal(),
- "connPoolMaxConnsPerHost",
- &ConnPoolOptions::maxConnsPerHost,
- true,
- false /* can't change at runtime */);
+ExportedServerParameter<int> //
+ maxConnsPerHostParameter(ServerParameterSet::getGlobal(),
+ "connPoolMaxConnsPerHost",
+ &ConnPoolOptions::maxConnsPerHost,
+ true,
+ false /* can't change at runtime */);
- ExportedServerParameter<int> //
- maxShardedConnsPerHostParameter(ServerParameterSet::getGlobal(),
- "connPoolMaxShardedConnsPerHost",
- &ConnPoolOptions::maxShardedConnsPerHost,
- true,
- false /* can't change at runtime */);
+ExportedServerParameter<int> //
+ maxShardedConnsPerHostParameter(ServerParameterSet::getGlobal(),
+ "connPoolMaxShardedConnsPerHost",
+ &ConnPoolOptions::maxShardedConnsPerHost,
+ true,
+ false /* can't change at runtime */);
- MONGO_INITIALIZER(InitializeConnectionPools)(InitializerContext* context) {
+MONGO_INITIALIZER(InitializeConnectionPools)(InitializerContext* context) {
+ // Initialize the sharded and unsharded outgoing connection pools
+ // NOTES:
+ // - All mongods and mongoses have both pools
+ // - The connection hooks for sharding are added on startup (mongos) or on first sharded
+ // operation (mongod)
- // Initialize the sharded and unsharded outgoing connection pools
- // NOTES:
- // - All mongods and mongoses have both pools
- // - The connection hooks for sharding are added on startup (mongos) or on first sharded
- // operation (mongod)
+ globalConnPool.setName("connection pool");
+ globalConnPool.setMaxPoolSize(ConnPoolOptions::maxConnsPerHost);
- globalConnPool.setName("connection pool");
- globalConnPool.setMaxPoolSize(ConnPoolOptions::maxConnsPerHost);
-
- shardConnectionPool.setName("sharded connection pool");
- shardConnectionPool.setMaxPoolSize(ConnPoolOptions::maxShardedConnsPerHost);
-
- return Status::OK();
- }
- }
+ shardConnectionPool.setName("sharded connection pool");
+ shardConnectionPool.setMaxPoolSize(ConnPoolOptions::maxShardedConnsPerHost);
+ return Status::OK();
+}
+}
}
diff --git a/src/mongo/db/conn_pool_options.h b/src/mongo/db/conn_pool_options.h
index a5402c6a367..faf10c0842c 100644
--- a/src/mongo/db/conn_pool_options.h
+++ b/src/mongo/db/conn_pool_options.h
@@ -30,25 +30,23 @@
namespace mongo {
- // NOTE:
- // The connection pools themselves are placed in different files and are currently hard to move
- // due to spaghetti dependencies.
- // TODO: Extract conn pools from driver files and shardconnection.cpp
+// NOTE:
+// The connection pools themselves are placed in different files and are currently hard to move
+// due to spaghetti dependencies.
+// TODO: Extract conn pools from driver files and shardconnection.cpp
+/**
+ * Struct namespace for connection pool options on mongos and mongod
+ */
+struct ConnPoolOptions {
/**
- * Struct namespace for connection pool options on mongos and mongod
+ * Maximum connections per host the connection pool should use
*/
- struct ConnPoolOptions {
-
- /**
- * Maximum connections per host the connection pool should use
- */
- static int maxConnsPerHost;
-
- /**
- * Maximum connections per host the sharded conn pool should use
- */
- static int maxShardedConnsPerHost;
- };
+ static int maxConnsPerHost;
+ /**
+ * Maximum connections per host the sharded conn pool should use
+ */
+ static int maxShardedConnsPerHost;
+};
}
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 3da1f2427e4..8ae58cf8d3b 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -43,559 +43,561 @@
namespace mongo {
- using std::string;
+using std::string;
+
+/**
+ * This type decorates a Client object with a stack of active CurOp objects.
+ *
+ * It encapsulates the nesting logic for curops attached to a Client, along with
+ * the notion that there is always a root CurOp attached to a Client.
+ *
+ * The stack itself is represented in the _parent pointers of the CurOp class.
+ */
+class CurOp::CurOpStack {
+ MONGO_DISALLOW_COPYING(CurOpStack);
+
+public:
+ CurOpStack() : _base(nullptr, this) {}
/**
- * This type decorates a Client object with a stack of active CurOp objects.
- *
- * It encapsulates the nesting logic for curops attached to a Client, along with
- * the notion that there is always a root CurOp attached to a Client.
- *
- * The stack itself is represented in the _parent pointers of the CurOp class.
+ * Returns the top of the CurOp stack.
*/
- class CurOp::CurOpStack {
- MONGO_DISALLOW_COPYING(CurOpStack);
- public:
- CurOpStack() : _base(nullptr, this) {}
-
- /**
- * Returns the top of the CurOp stack.
- */
- CurOp* top() const { return _top; }
-
- /**
- * Adds "curOp" to the top of the CurOp stack for a client. Called by CurOp's constructor.
- */
- void push(OperationContext* opCtx, CurOp* curOp) {
- invariant(opCtx);
- if (_opCtx) {
- invariant(_opCtx == opCtx);
- }
- else {
- _opCtx = opCtx;
- }
- stdx::lock_guard<Client> lk(*_opCtx->getClient());
- push_nolock(curOp);
- }
+ CurOp* top() const {
+ return _top;
+ }
- void push_nolock(CurOp* curOp) {
- invariant(!curOp->_parent);
- curOp->_parent = _top;
- _top = curOp;
- }
+ /**
+ * Adds "curOp" to the top of the CurOp stack for a client. Called by CurOp's constructor.
+ */
+ void push(OperationContext* opCtx, CurOp* curOp) {
+ invariant(opCtx);
+ if (_opCtx) {
+ invariant(_opCtx == opCtx);
+ } else {
+ _opCtx = opCtx;
+ }
+ stdx::lock_guard<Client> lk(*_opCtx->getClient());
+ push_nolock(curOp);
+ }
- /**
- * Pops the top off the CurOp stack for a Client. Called by CurOp's destructor.
- */
- CurOp* pop() {
- // It is not necessary to lock when popping the final item off of the curop stack. This
- // is because the item at the base of the stack is owned by the stack itself, and is not
- // popped until the stack is being destroyed. By the time the stack is being destroyed,
- // no other threads can be observing the Client that owns the stack, because it has been
- // removed from its ServiceContext's set of owned clients. Further, because the last
- // item is popped in the destructor of the stack, and that destructor runs during
- // destruction of the owning client, it is not safe to access other member variables of
- // the client during the final pop.
- const bool shouldLock = _top->_parent;
- if (shouldLock) {
- invariant(_opCtx);
- _opCtx->getClient()->lock();
- }
- invariant(_top);
- CurOp* retval = _top;
- _top = _top->_parent;
- if (shouldLock) {
- _opCtx->getClient()->unlock();
- }
- return retval;
- }
+ void push_nolock(CurOp* curOp) {
+ invariant(!curOp->_parent);
+ curOp->_parent = _top;
+ _top = curOp;
+ }
- private:
- OperationContext* _opCtx = nullptr;
+ /**
+ * Pops the top off the CurOp stack for a Client. Called by CurOp's destructor.
+ */
+ CurOp* pop() {
+ // It is not necessary to lock when popping the final item off of the curop stack. This
+ // is because the item at the base of the stack is owned by the stack itself, and is not
+ // popped until the stack is being destroyed. By the time the stack is being destroyed,
+ // no other threads can be observing the Client that owns the stack, because it has been
+ // removed from its ServiceContext's set of owned clients. Further, because the last
+ // item is popped in the destructor of the stack, and that destructor runs during
+ // destruction of the owning client, it is not safe to access other member variables of
+ // the client during the final pop.
+ const bool shouldLock = _top->_parent;
+ if (shouldLock) {
+ invariant(_opCtx);
+ _opCtx->getClient()->lock();
+ }
+ invariant(_top);
+ CurOp* retval = _top;
+ _top = _top->_parent;
+ if (shouldLock) {
+ _opCtx->getClient()->unlock();
+ }
+ return retval;
+ }
- // Top of the stack of CurOps for a Client.
- CurOp* _top = nullptr;
+private:
+ OperationContext* _opCtx = nullptr;
- // The bottom-most CurOp for a client.
- const CurOp _base;
- };
+ // Top of the stack of CurOps for a Client.
+ CurOp* _top = nullptr;
- const OperationContext::Decoration<CurOp::CurOpStack> CurOp::_curopStack =
- OperationContext::declareDecoration<CurOp::CurOpStack>();
+ // The bottom-most CurOp for a client.
+ const CurOp _base;
+};
- // Enabling the maxTimeAlwaysTimeOut fail point will cause any query or command run with a
- // valid non-zero max time to fail immediately. Any getmore operation on a cursor already
- // created with a valid non-zero max time will also fail immediately.
- //
- // This fail point cannot be used with the maxTimeNeverTimeOut fail point.
- MONGO_FP_DECLARE(maxTimeAlwaysTimeOut);
+const OperationContext::Decoration<CurOp::CurOpStack> CurOp::_curopStack =
+ OperationContext::declareDecoration<CurOp::CurOpStack>();
- // Enabling the maxTimeNeverTimeOut fail point will cause the server to never time out any
- // query, command, or getmore operation, regardless of whether a max time is set.
- //
- // This fail point cannot be used with the maxTimeAlwaysTimeOut fail point.
- MONGO_FP_DECLARE(maxTimeNeverTimeOut);
+// Enabling the maxTimeAlwaysTimeOut fail point will cause any query or command run with a
+// valid non-zero max time to fail immediately. Any getmore operation on a cursor already
+// created with a valid non-zero max time will also fail immediately.
+//
+// This fail point cannot be used with the maxTimeNeverTimeOut fail point.
+MONGO_FP_DECLARE(maxTimeAlwaysTimeOut);
+// Enabling the maxTimeNeverTimeOut fail point will cause the server to never time out any
+// query, command, or getmore operation, regardless of whether a max time is set.
+//
+// This fail point cannot be used with the maxTimeAlwaysTimeOut fail point.
+MONGO_FP_DECLARE(maxTimeNeverTimeOut);
- BSONObj CachedBSONObjBase::_tooBig =
- fromjson("{\"$msg\":\"query not recording (too large)\"}");
+BSONObj CachedBSONObjBase::_tooBig = fromjson("{\"$msg\":\"query not recording (too large)\"}");
- CurOp* CurOp::get(const OperationContext* opCtx) { return get(*opCtx); }
- CurOp* CurOp::get(const OperationContext& opCtx) { return _curopStack(opCtx).top(); }
+CurOp* CurOp::get(const OperationContext* opCtx) {
+ return get(*opCtx);
+}
- CurOp::CurOp(OperationContext* opCtx) : CurOp(opCtx, &_curopStack(opCtx)) {}
+CurOp* CurOp::get(const OperationContext& opCtx) {
+ return _curopStack(opCtx).top();
+}
- CurOp::CurOp(OperationContext* opCtx, CurOpStack* stack) : _stack(stack) {
- if (opCtx) {
- _stack->push(opCtx, this);
- }
- else {
- _stack->push_nolock(this);
- }
- _start = 0;
- _isCommand = false;
- _dbprofile = 0;
- _end = 0;
- _maxTimeMicros = 0;
- _maxTimeTracker.reset();
- _message = "";
- _progressMeter.finished();
- _numYields = 0;
- _expectedLatencyMs = 0;
- _op = 0;
- _command = NULL;
- }
+CurOp::CurOp(OperationContext* opCtx) : CurOp(opCtx, &_curopStack(opCtx)) {}
- void CurOp::setOp_inlock(int op) {
- _op = op;
+CurOp::CurOp(OperationContext* opCtx, CurOpStack* stack) : _stack(stack) {
+ if (opCtx) {
+ _stack->push(opCtx, this);
+ } else {
+ _stack->push_nolock(this);
}
-
- ProgressMeter& CurOp::setMessage_inlock(const char * msg,
- std::string name,
- unsigned long long progressMeterTotal,
- int secondsBetween) {
- if ( progressMeterTotal ) {
- if ( _progressMeter.isActive() ) {
- error() << "old _message: " << _message << " new message:" << msg;
- verify( ! _progressMeter.isActive() );
- }
- _progressMeter.reset( progressMeterTotal , secondsBetween );
- _progressMeter.setName(name);
- }
- else {
- _progressMeter.finished();
- }
- _message = msg;
- return _progressMeter;
+ _start = 0;
+ _isCommand = false;
+ _dbprofile = 0;
+ _end = 0;
+ _maxTimeMicros = 0;
+ _maxTimeTracker.reset();
+ _message = "";
+ _progressMeter.finished();
+ _numYields = 0;
+ _expectedLatencyMs = 0;
+ _op = 0;
+ _command = NULL;
+}
+
+void CurOp::setOp_inlock(int op) {
+ _op = op;
+}
+
+ProgressMeter& CurOp::setMessage_inlock(const char* msg,
+ std::string name,
+ unsigned long long progressMeterTotal,
+ int secondsBetween) {
+ if (progressMeterTotal) {
+ if (_progressMeter.isActive()) {
+ error() << "old _message: " << _message << " new message:" << msg;
+ verify(!_progressMeter.isActive());
+ }
+ _progressMeter.reset(progressMeterTotal, secondsBetween);
+ _progressMeter.setName(name);
+ } else {
+ _progressMeter.finished();
}
+ _message = msg;
+ return _progressMeter;
+}
- CurOp::~CurOp() {
- invariant(this == _stack->pop());
- }
+CurOp::~CurOp() {
+ invariant(this == _stack->pop());
+}
- void CurOp::setNS_inlock(StringData ns) {
- _ns = ns.toString();
- }
+void CurOp::setNS_inlock(StringData ns) {
+ _ns = ns.toString();
+}
- void CurOp::ensureStarted() {
- if ( _start == 0 ) {
- _start = curTimeMicros64();
+void CurOp::ensureStarted() {
+ if (_start == 0) {
+ _start = curTimeMicros64();
- // If ensureStarted() is invoked after setMaxTimeMicros(), then time limit tracking will
- // start here. This is because time limit tracking can only commence after the
- // operation is assigned a start time.
- if (_maxTimeMicros > 0) {
- _maxTimeTracker.setTimeLimit(_start, _maxTimeMicros);
- }
+ // If ensureStarted() is invoked after setMaxTimeMicros(), then time limit tracking will
+ // start here. This is because time limit tracking can only commence after the
+ // operation is assigned a start time.
+ if (_maxTimeMicros > 0) {
+ _maxTimeTracker.setTimeLimit(_start, _maxTimeMicros);
}
}
-
- void CurOp::enter_inlock(const char* ns, int dbProfileLevel) {
- ensureStarted();
- _ns = ns;
- raiseDbProfileLevel(dbProfileLevel);
- }
-
- void CurOp::raiseDbProfileLevel(int dbProfileLevel) {
- _dbprofile = std::max(dbProfileLevel, _dbprofile);
+}
+
+void CurOp::enter_inlock(const char* ns, int dbProfileLevel) {
+ ensureStarted();
+ _ns = ns;
+ raiseDbProfileLevel(dbProfileLevel);
+}
+
+void CurOp::raiseDbProfileLevel(int dbProfileLevel) {
+ _dbprofile = std::max(dbProfileLevel, _dbprofile);
+}
+
+void CurOp::reportState(BSONObjBuilder* builder) {
+ if (_start) {
+ builder->append("secs_running", elapsedSeconds());
+ builder->append("microsecs_running", static_cast<long long int>(elapsedMicros()));
}
- void CurOp::reportState(BSONObjBuilder* builder) {
+ builder->append("op", opToString(_op));
- if (_start) {
- builder->append("secs_running", elapsedSeconds() );
- builder->append("microsecs_running", static_cast<long long int>(elapsedMicros()) );
- }
+ // Fill out "ns" from our namespace member (and if it's not available, fall back to the
+ // OpDebug namespace member). We prefer our ns when set because it changes to match each
+ // accessed namespace, while _debug.ns is set once at the start of the operation. However,
+ // sometimes _ns is not yet set.
+ builder->append("ns", !_ns.empty() ? _ns : _debug.ns);
- builder->append("op", opToString(_op));
+ if (_op == dbInsert) {
+ _query.append(*builder, "insert");
+ } else {
+ _query.append(*builder, "query");
+ }
- // Fill out "ns" from our namespace member (and if it's not available, fall back to the
- // OpDebug namespace member). We prefer our ns when set because it changes to match each
- // accessed namespace, while _debug.ns is set once at the start of the operation. However,
- // sometimes _ns is not yet set.
- builder->append("ns", !_ns.empty() ? _ns : _debug.ns);
+ if (!debug().planSummary.empty()) {
+ builder->append("planSummary", debug().planSummary.toString());
+ }
- if (_op == dbInsert) {
- _query.append(*builder, "insert");
- }
- else {
- _query.append(*builder, "query");
+ if (!_message.empty()) {
+ if (_progressMeter.isActive()) {
+ StringBuilder buf;
+ buf << _message << " " << _progressMeter.toString();
+ builder->append("msg", buf.str());
+ BSONObjBuilder sub(builder->subobjStart("progress"));
+ sub.appendNumber("done", (long long)_progressMeter.done());
+ sub.appendNumber("total", (long long)_progressMeter.total());
+ sub.done();
+ } else {
+ builder->append("msg", _message);
}
+ }
- if ( !debug().planSummary.empty() ) {
- builder->append( "planSummary" , debug().planSummary.toString() );
- }
+ builder->append("numYields", _numYields);
+}
- if ( ! _message.empty() ) {
- if ( _progressMeter.isActive() ) {
- StringBuilder buf;
- buf << _message << " " << _progressMeter.toString();
- builder->append( "msg" , buf.str() );
- BSONObjBuilder sub( builder->subobjStart( "progress" ) );
- sub.appendNumber( "done" , (long long)_progressMeter.done() );
- sub.appendNumber( "total" , (long long)_progressMeter.total() );
- sub.done();
- }
- else {
- builder->append("msg" , _message);
- }
- }
+void CurOp::setMaxTimeMicros(uint64_t maxTimeMicros) {
+ _maxTimeMicros = maxTimeMicros;
- builder->append( "numYields" , _numYields );
+ if (_maxTimeMicros == 0) {
+ // 0 is "allow to run indefinitely".
+ return;
}
- void CurOp::setMaxTimeMicros(uint64_t maxTimeMicros) {
- _maxTimeMicros = maxTimeMicros;
+ // If the operation has a start time, then enable the tracker.
+ //
+ // If the operation has no start time yet, then ensureStarted() will take responsibility for
+ // enabling the tracker.
+ if (isStarted()) {
+ _maxTimeTracker.setTimeLimit(startTime(), _maxTimeMicros);
+ }
+}
- if (_maxTimeMicros == 0) {
- // 0 is "allow to run indefinitely".
- return;
- }
+bool CurOp::isMaxTimeSet() const {
+ return _maxTimeMicros != 0;
+}
- // If the operation has a start time, then enable the tracker.
- //
- // If the operation has no start time yet, then ensureStarted() will take responsibility for
- // enabling the tracker.
- if (isStarted()) {
- _maxTimeTracker.setTimeLimit(startTime(), _maxTimeMicros);
- }
+bool CurOp::maxTimeHasExpired() {
+ if (MONGO_FAIL_POINT(maxTimeNeverTimeOut)) {
+ return false;
}
-
- bool CurOp::isMaxTimeSet() const {
- return _maxTimeMicros != 0;
+ if (_maxTimeMicros > 0 && MONGO_FAIL_POINT(maxTimeAlwaysTimeOut)) {
+ return true;
}
+ return _maxTimeTracker.checkTimeLimit();
+}
- bool CurOp::maxTimeHasExpired() {
- if (MONGO_FAIL_POINT(maxTimeNeverTimeOut)) {
- return false;
- }
- if (_maxTimeMicros > 0 && MONGO_FAIL_POINT(maxTimeAlwaysTimeOut)) {
- return true;
- }
- return _maxTimeTracker.checkTimeLimit();
- }
+uint64_t CurOp::getRemainingMaxTimeMicros() const {
+ return _maxTimeTracker.getRemainingMicros();
+}
- uint64_t CurOp::getRemainingMaxTimeMicros() const {
- return _maxTimeTracker.getRemainingMicros();
- }
+CurOp::MaxTimeTracker::MaxTimeTracker() {
+ reset();
+}
- CurOp::MaxTimeTracker::MaxTimeTracker() {
- reset();
- }
+void CurOp::MaxTimeTracker::reset() {
+ _enabled = false;
+ _targetEpochMicros = 0;
+ _approxTargetServerMillis = 0;
+}
- void CurOp::MaxTimeTracker::reset() {
- _enabled = false;
- _targetEpochMicros = 0;
- _approxTargetServerMillis = 0;
- }
+void CurOp::MaxTimeTracker::setTimeLimit(uint64_t startEpochMicros, uint64_t durationMicros) {
+ dassert(durationMicros != 0);
- void CurOp::MaxTimeTracker::setTimeLimit(uint64_t startEpochMicros, uint64_t durationMicros) {
- dassert(durationMicros != 0);
+ _enabled = true;
- _enabled = true;
+ _targetEpochMicros = startEpochMicros + durationMicros;
- _targetEpochMicros = startEpochMicros + durationMicros;
+ uint64_t now = curTimeMicros64();
+ // If our accurate time source thinks time is not up yet, calculate the next target for
+ // our approximate time source.
+ if (_targetEpochMicros > now) {
+ _approxTargetServerMillis = Listener::getElapsedTimeMillis() +
+ static_cast<int64_t>((_targetEpochMicros - now) / 1000);
+ }
+ // Otherwise, set our approximate time source target such that it thinks time is already
+ // up.
+ else {
+ _approxTargetServerMillis = Listener::getElapsedTimeMillis();
+ }
+}
- uint64_t now = curTimeMicros64();
- // If our accurate time source thinks time is not up yet, calculate the next target for
- // our approximate time source.
- if (_targetEpochMicros > now) {
- _approxTargetServerMillis = Listener::getElapsedTimeMillis() +
- static_cast<int64_t>((_targetEpochMicros - now) / 1000);
- }
- // Otherwise, set our approximate time source target such that it thinks time is already
- // up.
- else {
- _approxTargetServerMillis = Listener::getElapsedTimeMillis();
- }
+bool CurOp::MaxTimeTracker::checkTimeLimit() {
+ if (!_enabled) {
+ return false;
}
- bool CurOp::MaxTimeTracker::checkTimeLimit() {
- if (!_enabled) {
- return false;
- }
+ // Does our approximate time source think time is not up yet? If so, return early.
+ if (_approxTargetServerMillis > Listener::getElapsedTimeMillis()) {
+ return false;
+ }
- // Does our approximate time source think time is not up yet? If so, return early.
- if (_approxTargetServerMillis > Listener::getElapsedTimeMillis()) {
- return false;
- }
+ uint64_t now = curTimeMicros64();
+ // Does our accurate time source think time is not up yet? If so, readjust the target for
+ // our approximate time source and return early.
+ if (_targetEpochMicros > now) {
+ _approxTargetServerMillis = Listener::getElapsedTimeMillis() +
+ static_cast<int64_t>((_targetEpochMicros - now) / 1000);
+ return false;
+ }
- uint64_t now = curTimeMicros64();
- // Does our accurate time source think time is not up yet? If so, readjust the target for
- // our approximate time source and return early.
- if (_targetEpochMicros > now) {
- _approxTargetServerMillis = Listener::getElapsedTimeMillis() +
- static_cast<int64_t>((_targetEpochMicros - now) / 1000);
- return false;
- }
+ // Otherwise, time is up.
+ return true;
+}
- // Otherwise, time is up.
- return true;
+uint64_t CurOp::MaxTimeTracker::getRemainingMicros() const {
+ if (!_enabled) {
+ // 0 is "allow to run indefinitely".
+ return 0;
}
- uint64_t CurOp::MaxTimeTracker::getRemainingMicros() const {
- if (!_enabled) {
- // 0 is "allow to run indefinitely".
- return 0;
- }
-
- // Does our accurate time source think time is up? If so, claim there is 1 microsecond
- // left for this operation.
- uint64_t now = curTimeMicros64();
- if (_targetEpochMicros <= now) {
- return 1;
- }
+ // Does our accurate time source think time is up? If so, claim there is 1 microsecond
+ // left for this operation.
+ uint64_t now = curTimeMicros64();
+ if (_targetEpochMicros <= now) {
+ return 1;
+ }
- // Otherwise, calculate remaining time.
- return _targetEpochMicros - now;
- }
-
- void OpDebug::reset() {
- extra.reset();
-
- op = 0;
- iscommand = false;
- ns = "";
- query = BSONObj();
- updateobj = BSONObj();
-
- cursorid = -1;
- ntoreturn = -1;
- ntoskip = -1;
- exhaust = false;
-
- nscanned = -1;
- nscannedObjects = -1;
- idhack = false;
- scanAndOrder = false;
- nMatched = -1;
- nModified = -1;
- ninserted = -1;
- ndeleted = -1;
- nmoved = -1;
- fastmod = false;
- fastmodinsert = false;
- upsert = false;
- cursorExhausted = false;
- keyUpdates = 0; // unsigned, so -1 not possible
- writeConflicts = 0;
- planSummary = "";
- execStats.reset();
-
- exceptionInfo.reset();
-
- executionTime = 0;
- nreturned = -1;
- responseLength = -1;
- }
-
-
-#define OPDEBUG_TOSTRING_HELP(x) if( x >= 0 ) s << " " #x ":" << (x)
-#define OPDEBUG_TOSTRING_HELP_BOOL(x) if( x ) s << " " #x ":" << (x)
- string OpDebug::report(const CurOp& curop, const SingleThreadedLockStats& lockStats) const {
- StringBuilder s;
- if ( iscommand )
- s << "command ";
- else
- s << opToString( op ) << ' ';
- s << ns;
-
- if ( ! query.isEmpty() ) {
- if ( iscommand ) {
- s << " command: ";
-
- Command* curCommand = curop.getCommand();
- if (curCommand) {
- mutablebson::Document cmdToLog(query, mutablebson::Document::kInPlaceDisabled);
- curCommand->redactForLogging(&cmdToLog);
- s << curCommand->name << " ";
- s << cmdToLog.toString();
- }
- else { // Should not happen but we need to handle curCommand == NULL gracefully
- s << query.toString();
- }
- }
- else {
- s << " query: ";
+ // Otherwise, calculate remaining time.
+ return _targetEpochMicros - now;
+}
+
+void OpDebug::reset() {
+ extra.reset();
+
+ op = 0;
+ iscommand = false;
+ ns = "";
+ query = BSONObj();
+ updateobj = BSONObj();
+
+ cursorid = -1;
+ ntoreturn = -1;
+ ntoskip = -1;
+ exhaust = false;
+
+ nscanned = -1;
+ nscannedObjects = -1;
+ idhack = false;
+ scanAndOrder = false;
+ nMatched = -1;
+ nModified = -1;
+ ninserted = -1;
+ ndeleted = -1;
+ nmoved = -1;
+ fastmod = false;
+ fastmodinsert = false;
+ upsert = false;
+ cursorExhausted = false;
+ keyUpdates = 0; // unsigned, so -1 not possible
+ writeConflicts = 0;
+ planSummary = "";
+ execStats.reset();
+
+ exceptionInfo.reset();
+
+ executionTime = 0;
+ nreturned = -1;
+ responseLength = -1;
+}
+
+
+#define OPDEBUG_TOSTRING_HELP(x) \
+ if (x >= 0) \
+ s << " " #x ":" << (x)
+#define OPDEBUG_TOSTRING_HELP_BOOL(x) \
+ if (x) \
+ s << " " #x ":" << (x)
+string OpDebug::report(const CurOp& curop, const SingleThreadedLockStats& lockStats) const {
+ StringBuilder s;
+ if (iscommand)
+ s << "command ";
+ else
+ s << opToString(op) << ' ';
+ s << ns;
+
+ if (!query.isEmpty()) {
+ if (iscommand) {
+ s << " command: ";
+
+ Command* curCommand = curop.getCommand();
+ if (curCommand) {
+ mutablebson::Document cmdToLog(query, mutablebson::Document::kInPlaceDisabled);
+ curCommand->redactForLogging(&cmdToLog);
+ s << curCommand->name << " ";
+ s << cmdToLog.toString();
+ } else { // Should not happen but we need to handle curCommand == NULL gracefully
s << query.toString();
}
+ } else {
+ s << " query: ";
+ s << query.toString();
}
+ }
- if (!planSummary.empty()) {
- s << " planSummary: " << planSummary.toString();
- }
-
- if ( ! updateobj.isEmpty() ) {
- s << " update: ";
- updateobj.toString( s );
- }
-
- OPDEBUG_TOSTRING_HELP( cursorid );
- OPDEBUG_TOSTRING_HELP( ntoreturn );
- OPDEBUG_TOSTRING_HELP( ntoskip );
- OPDEBUG_TOSTRING_HELP_BOOL( exhaust );
-
- OPDEBUG_TOSTRING_HELP( nscanned );
- OPDEBUG_TOSTRING_HELP( nscannedObjects );
- OPDEBUG_TOSTRING_HELP_BOOL( idhack );
- OPDEBUG_TOSTRING_HELP_BOOL( scanAndOrder );
- OPDEBUG_TOSTRING_HELP( nmoved );
- OPDEBUG_TOSTRING_HELP( nMatched );
- OPDEBUG_TOSTRING_HELP( nModified );
- OPDEBUG_TOSTRING_HELP( ninserted );
- OPDEBUG_TOSTRING_HELP( ndeleted );
- OPDEBUG_TOSTRING_HELP_BOOL( fastmod );
- OPDEBUG_TOSTRING_HELP_BOOL( fastmodinsert );
- OPDEBUG_TOSTRING_HELP_BOOL( upsert );
- OPDEBUG_TOSTRING_HELP_BOOL( cursorExhausted );
- OPDEBUG_TOSTRING_HELP( keyUpdates );
- OPDEBUG_TOSTRING_HELP( writeConflicts );
-
- if ( extra.len() )
- s << " " << extra.str();
-
- if ( ! exceptionInfo.empty() ) {
- s << " exception: " << exceptionInfo.msg;
- if ( exceptionInfo.code )
- s << " code:" << exceptionInfo.code;
- }
-
- s << " numYields:" << curop.numYields();
+ if (!planSummary.empty()) {
+ s << " planSummary: " << planSummary.toString();
+ }
- OPDEBUG_TOSTRING_HELP( nreturned );
- if (responseLength > 0) {
- s << " reslen:" << responseLength;
- }
+ if (!updateobj.isEmpty()) {
+ s << " update: ";
+ updateobj.toString(s);
+ }
- {
- BSONObjBuilder locks;
- lockStats.report(&locks);
- s << " locks:" << locks.obj().toString();
- }
+ OPDEBUG_TOSTRING_HELP(cursorid);
+ OPDEBUG_TOSTRING_HELP(ntoreturn);
+ OPDEBUG_TOSTRING_HELP(ntoskip);
+ OPDEBUG_TOSTRING_HELP_BOOL(exhaust);
+
+ OPDEBUG_TOSTRING_HELP(nscanned);
+ OPDEBUG_TOSTRING_HELP(nscannedObjects);
+ OPDEBUG_TOSTRING_HELP_BOOL(idhack);
+ OPDEBUG_TOSTRING_HELP_BOOL(scanAndOrder);
+ OPDEBUG_TOSTRING_HELP(nmoved);
+ OPDEBUG_TOSTRING_HELP(nMatched);
+ OPDEBUG_TOSTRING_HELP(nModified);
+ OPDEBUG_TOSTRING_HELP(ninserted);
+ OPDEBUG_TOSTRING_HELP(ndeleted);
+ OPDEBUG_TOSTRING_HELP_BOOL(fastmod);
+ OPDEBUG_TOSTRING_HELP_BOOL(fastmodinsert);
+ OPDEBUG_TOSTRING_HELP_BOOL(upsert);
+ OPDEBUG_TOSTRING_HELP_BOOL(cursorExhausted);
+ OPDEBUG_TOSTRING_HELP(keyUpdates);
+ OPDEBUG_TOSTRING_HELP(writeConflicts);
+
+ if (extra.len())
+ s << " " << extra.str();
+
+ if (!exceptionInfo.empty()) {
+ s << " exception: " << exceptionInfo.msg;
+ if (exceptionInfo.code)
+ s << " code:" << exceptionInfo.code;
+ }
- s << " " << executionTime << "ms";
+ s << " numYields:" << curop.numYields();
- return s.str();
+ OPDEBUG_TOSTRING_HELP(nreturned);
+ if (responseLength > 0) {
+ s << " reslen:" << responseLength;
}
- namespace {
- /**
- * Appends {name: obj} to the provided builder. If obj is greater than maxSize, appends a
- * string summary of obj instead of the object itself.
- */
- void appendAsObjOrString(StringData name,
- const BSONObj& obj,
- size_t maxSize,
- BSONObjBuilder* builder) {
- if (static_cast<size_t>(obj.objsize()) <= maxSize) {
- builder->append(name, obj);
- }
- else {
- // Generate an abbreviated serialization for the object, by passing false as the
- // "full" argument to obj.toString().
- const bool isArray = false;
- const bool full = false;
- std::string objToString = obj.toString(isArray, full);
- if (objToString.size() <= maxSize) {
- builder->append(name, objToString);
- }
- else {
- // objToString is still too long, so we append to the builder a truncated form
- // of objToString concatenated with "...". Instead of creating a new string
- // temporary, mutate objToString to do this (we know that we can mutate
- // characters in objToString up to and including objToString[maxSize]).
- objToString[maxSize - 3] = '.';
- objToString[maxSize - 2] = '.';
- objToString[maxSize - 1] = '.';
- builder->append(name, StringData(objToString).substr(0, maxSize));
- }
- }
- }
- } // namespace
-
-#define OPDEBUG_APPEND_NUMBER(x) if( x != -1 ) b.appendNumber( #x , (x) )
-#define OPDEBUG_APPEND_BOOL(x) if( x ) b.appendBool( #x , (x) )
- void OpDebug::append(const CurOp& curop,
- const SingleThreadedLockStats& lockStats,
- BSONObjBuilder& b) const {
+ {
+ BSONObjBuilder locks;
+ lockStats.report(&locks);
+ s << " locks:" << locks.obj().toString();
+ }
- const size_t maxElementSize = 50 * 1024;
+ s << " " << executionTime << "ms";
- b.append( "op" , iscommand ? "command" : opToString( op ) );
- b.append( "ns" , ns );
+ return s.str();
+}
- if (!query.isEmpty()) {
- appendAsObjOrString(iscommand ? "command" : "query", query, maxElementSize, &b);
- }
- else if (!iscommand && curop.haveQuery()) {
- appendAsObjOrString("query", curop.query(), maxElementSize, &b);
+namespace {
+/**
+ * Appends {name: obj} to the provided builder. If obj is greater than maxSize, appends a
+ * string summary of obj instead of the object itself.
+ */
+void appendAsObjOrString(StringData name,
+ const BSONObj& obj,
+ size_t maxSize,
+ BSONObjBuilder* builder) {
+ if (static_cast<size_t>(obj.objsize()) <= maxSize) {
+ builder->append(name, obj);
+ } else {
+ // Generate an abbreviated serialization for the object, by passing false as the
+ // "full" argument to obj.toString().
+ const bool isArray = false;
+ const bool full = false;
+ std::string objToString = obj.toString(isArray, full);
+ if (objToString.size() <= maxSize) {
+ builder->append(name, objToString);
+ } else {
+ // objToString is still too long, so we append to the builder a truncated form
+ // of objToString concatenated with "...". Instead of creating a new string
+ // temporary, mutate objToString to do this (we know that we can mutate
+ // characters in objToString up to and including objToString[maxSize]).
+ objToString[maxSize - 3] = '.';
+ objToString[maxSize - 2] = '.';
+ objToString[maxSize - 1] = '.';
+ builder->append(name, StringData(objToString).substr(0, maxSize));
}
+ }
+}
+} // namespace
+
+#define OPDEBUG_APPEND_NUMBER(x) \
+ if (x != -1) \
+ b.appendNumber(#x, (x))
+#define OPDEBUG_APPEND_BOOL(x) \
+ if (x) \
+ b.appendBool(#x, (x))
+void OpDebug::append(const CurOp& curop,
+ const SingleThreadedLockStats& lockStats,
+ BSONObjBuilder& b) const {
+ const size_t maxElementSize = 50 * 1024;
+
+ b.append("op", iscommand ? "command" : opToString(op));
+ b.append("ns", ns);
+
+ if (!query.isEmpty()) {
+ appendAsObjOrString(iscommand ? "command" : "query", query, maxElementSize, &b);
+ } else if (!iscommand && curop.haveQuery()) {
+ appendAsObjOrString("query", curop.query(), maxElementSize, &b);
+ }
- if (!updateobj.isEmpty()) {
- appendAsObjOrString("updateobj", updateobj, maxElementSize, &b);
- }
+ if (!updateobj.isEmpty()) {
+ appendAsObjOrString("updateobj", updateobj, maxElementSize, &b);
+ }
- const bool moved = (nmoved >= 1);
-
- OPDEBUG_APPEND_NUMBER( cursorid );
- OPDEBUG_APPEND_NUMBER( ntoreturn );
- OPDEBUG_APPEND_NUMBER( ntoskip );
- OPDEBUG_APPEND_BOOL( exhaust );
-
- OPDEBUG_APPEND_NUMBER( nscanned );
- OPDEBUG_APPEND_NUMBER( nscannedObjects );
- OPDEBUG_APPEND_BOOL( idhack );
- OPDEBUG_APPEND_BOOL( scanAndOrder );
- OPDEBUG_APPEND_BOOL( moved );
- OPDEBUG_APPEND_NUMBER( nmoved );
- OPDEBUG_APPEND_NUMBER( nMatched );
- OPDEBUG_APPEND_NUMBER( nModified );
- OPDEBUG_APPEND_NUMBER( ninserted );
- OPDEBUG_APPEND_NUMBER( ndeleted );
- OPDEBUG_APPEND_BOOL( fastmod );
- OPDEBUG_APPEND_BOOL( fastmodinsert );
- OPDEBUG_APPEND_BOOL( upsert );
- OPDEBUG_APPEND_BOOL( cursorExhausted );
- OPDEBUG_APPEND_NUMBER( keyUpdates );
- OPDEBUG_APPEND_NUMBER( writeConflicts );
- b.appendNumber("numYield", curop.numYields());
-
- {
- BSONObjBuilder locks(b.subobjStart("locks"));
- lockStats.report(&locks);
- }
+ const bool moved = (nmoved >= 1);
+
+ OPDEBUG_APPEND_NUMBER(cursorid);
+ OPDEBUG_APPEND_NUMBER(ntoreturn);
+ OPDEBUG_APPEND_NUMBER(ntoskip);
+ OPDEBUG_APPEND_BOOL(exhaust);
+
+ OPDEBUG_APPEND_NUMBER(nscanned);
+ OPDEBUG_APPEND_NUMBER(nscannedObjects);
+ OPDEBUG_APPEND_BOOL(idhack);
+ OPDEBUG_APPEND_BOOL(scanAndOrder);
+ OPDEBUG_APPEND_BOOL(moved);
+ OPDEBUG_APPEND_NUMBER(nmoved);
+ OPDEBUG_APPEND_NUMBER(nMatched);
+ OPDEBUG_APPEND_NUMBER(nModified);
+ OPDEBUG_APPEND_NUMBER(ninserted);
+ OPDEBUG_APPEND_NUMBER(ndeleted);
+ OPDEBUG_APPEND_BOOL(fastmod);
+ OPDEBUG_APPEND_BOOL(fastmodinsert);
+ OPDEBUG_APPEND_BOOL(upsert);
+ OPDEBUG_APPEND_BOOL(cursorExhausted);
+ OPDEBUG_APPEND_NUMBER(keyUpdates);
+ OPDEBUG_APPEND_NUMBER(writeConflicts);
+ b.appendNumber("numYield", curop.numYields());
+
+ {
+ BSONObjBuilder locks(b.subobjStart("locks"));
+ lockStats.report(&locks);
+ }
- if (!exceptionInfo.empty()) {
- exceptionInfo.append(b, "exception", "exceptionCode");
- }
+ if (!exceptionInfo.empty()) {
+ exceptionInfo.append(b, "exception", "exceptionCode");
+ }
- OPDEBUG_APPEND_NUMBER( nreturned );
- OPDEBUG_APPEND_NUMBER( responseLength );
- b.append( "millis" , executionTime );
+ OPDEBUG_APPEND_NUMBER(nreturned);
+ OPDEBUG_APPEND_NUMBER(responseLength);
+ b.append("millis", executionTime);
- execStats.append(b, "execStats");
- }
+ execStats.append(b, "execStats");
+}
} // namespace mongo
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index e8467f507b8..e223d888386 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -42,417 +42,472 @@
namespace mongo {
- class Client;
- class Command;
- class CurOp;
- class OperationContext;
+class Client;
+class Command;
+class CurOp;
+class OperationContext;
+
+/**
+ * stores a copy of a bson obj in a fixed size buffer
+ * if its too big for the buffer, says "too big"
+ * useful for keeping a copy around indefinitely without wasting a lot of space or doing malloc
+ */
+class CachedBSONObjBase {
+public:
+ static BSONObj _tooBig; // { $msg : "query not recording (too large)" }
+};
+
+template <size_t BUFFER_SIZE>
+class CachedBSONObj : public CachedBSONObjBase {
+public:
+ enum { TOO_BIG_SENTINEL = 1 };
+
+ CachedBSONObj() {
+ _size = (int*)_buf;
+ reset();
+ }
+
+ void reset(int sz = 0) {
+ _lock.lock();
+ _reset(sz);
+ _lock.unlock();
+ }
+
+ void set(const BSONObj& o) {
+ scoped_spinlock lk(_lock);
+ size_t sz = o.objsize();
+ if (sz > sizeof(_buf)) {
+ _reset(TOO_BIG_SENTINEL);
+ } else {
+ memcpy(_buf, o.objdata(), sz);
+ }
+ }
+
+ int size() const {
+ return *_size;
+ }
+ bool have() const {
+ return size() > 0;
+ }
+ bool tooBig() const {
+ return size() == TOO_BIG_SENTINEL;
+ }
+
+ BSONObj get() const {
+ scoped_spinlock lk(_lock);
+ return _get();
+ }
+
+ void append(BSONObjBuilder& b, StringData name) const {
+ scoped_spinlock lk(_lock);
+ BSONObj temp = _get();
+ b.append(name, temp);
+ }
+
+private:
+ /** you have to be locked when you call this */
+ BSONObj _get() const {
+ int sz = size();
+ if (sz == 0)
+ return BSONObj();
+ if (sz == TOO_BIG_SENTINEL)
+ return _tooBig;
+ return BSONObj(_buf).copy();
+ }
+
+ /** you have to be locked when you call this */
+ void _reset(int sz) {
+ _size[0] = sz;
+ }
+
+ mutable SpinLock _lock;
+ int* _size;
+ char _buf[BUFFER_SIZE];
+};
+
+/* lifespan is different than CurOp because of recursives with DBDirectClient */
+class OpDebug {
+public:
+ OpDebug() : planSummary(2048) {
+ reset();
+ }
+
+ void reset();
+
+ std::string report(const CurOp& curop, const SingleThreadedLockStats& lockStats) const;
/**
- * stores a copy of a bson obj in a fixed size buffer
- * if its too big for the buffer, says "too big"
- * useful for keeping a copy around indefinitely without wasting a lot of space or doing malloc
+ * Appends information about the current operation to "builder"
+ *
+ * @param curop reference to the CurOp that owns this OpDebug
+ * @param lockStats lockStats object containing locking information about the operation
*/
- class CachedBSONObjBase {
- public:
- static BSONObj _tooBig; // { $msg : "query not recording (too large)" }
- };
-
- template <size_t BUFFER_SIZE>
- class CachedBSONObj : public CachedBSONObjBase {
- public:
- enum { TOO_BIG_SENTINEL = 1 } ;
-
- CachedBSONObj() {
- _size = (int*)_buf;
- reset();
- }
-
- void reset( int sz = 0 ) {
- _lock.lock();
- _reset( sz );
- _lock.unlock();
- }
-
- void set( const BSONObj& o ) {
- scoped_spinlock lk(_lock);
- size_t sz = o.objsize();
- if ( sz > sizeof(_buf) ) {
- _reset(TOO_BIG_SENTINEL);
- }
- else {
- memcpy(_buf, o.objdata(), sz );
- }
- }
+ void append(const CurOp& curop,
+ const SingleThreadedLockStats& lockStats,
+ BSONObjBuilder& builder) const;
+
+ // -------------------
+
+ StringBuilder extra; // weird things we need to fix later
+
+ // basic options
+ int op;
+ bool iscommand;
+ std::string ns;
+ BSONObj query;
+ BSONObj updateobj;
+
+ // detailed options
+ long long cursorid;
+ int ntoreturn;
+ int ntoskip;
+ bool exhaust;
+
+ // debugging/profile info
+ long long nscanned;
+ long long nscannedObjects;
+ bool idhack; // indicates short circuited code path on an update to make the update faster
+ bool scanAndOrder; // scanandorder query plan aspect was used
+ long long nMatched; // number of records that match the query
+ long long nModified; // number of records written (no no-ops)
+ long long nmoved; // updates resulted in a move (moves are expensive)
+ long long ninserted;
+ long long ndeleted;
+ bool fastmod;
+ bool fastmodinsert; // upsert of an $operation. builds a default object
+ bool upsert; // true if the update actually did an insert
+ bool cursorExhausted; // true if the cursor has been closed at end a find/getMore operation
+ int keyUpdates;
+ long long writeConflicts;
+ ThreadSafeString planSummary; // a brief std::string describing the query solution
+
+ // New Query Framework debugging/profiling info
+ // TODO: should this really be an opaque BSONObj? Not sure.
+ CachedBSONObj<4096> execStats;
+
+ // error handling
+ ExceptionInfo exceptionInfo;
+
+ // response info
+ int executionTime;
+ int nreturned;
+ int responseLength;
+};
+
+/**
+ * Container for data used to report information about an OperationContext.
+ *
+ * Every OperationContext in a server with CurOp support has a stack of CurOp
+ * objects. The entry at the top of the stack is used to record timing and
+ * resource statistics for the executing operation or suboperation.
+ *
+ * All of the accessor methods on CurOp may be called by the thread executing
+ * the associated OperationContext at any time, or by other threads that have
+ * locked the context's owning Client object.
+ *
+ * The mutator methods on CurOp whose names end _inlock may only be called by the thread
+ * executing the associated OperationContext and Client, and only when that thread has also
+ * locked the Client object. All other mutators may only be called by the thread executing
+ * CurOp, but do not require holding the Client lock. The exception to this is the kill()
+ * method, which is self-synchronizing.
+ *
+ * The OpDebug member of a CurOp, accessed via the debug() accessor should *only* be accessed
+ * from the thread executing an operation, and as a result its fields may be accessed without
+ * any synchronization.
+ */
+class CurOp {
+ MONGO_DISALLOW_COPYING(CurOp);
- int size() const { return *_size; }
- bool have() const { return size() > 0; }
- bool tooBig() const { return size() == TOO_BIG_SENTINEL; }
+public:
+ static CurOp* get(const OperationContext* opCtx);
+ static CurOp* get(const OperationContext& opCtx);
- BSONObj get() const {
- scoped_spinlock lk(_lock);
- return _get();
- }
+ /**
+ * Constructs a nested CurOp at the top of the given "opCtx"'s CurOp stack.
+ */
+ explicit CurOp(OperationContext* opCtx);
+ ~CurOp();
- void append( BSONObjBuilder& b , StringData name ) const {
- scoped_spinlock lk(_lock);
- BSONObj temp = _get();
- b.append( name , temp );
- }
+ bool haveQuery() const {
+ return _query.have();
+ }
+ BSONObj query() const {
+ return _query.get();
+ }
+ void appendQuery(BSONObjBuilder& b, StringData name) const {
+ _query.append(b, name);
+ }
- private:
- /** you have to be locked when you call this */
- BSONObj _get() const {
- int sz = size();
- if ( sz == 0 )
- return BSONObj();
- if ( sz == TOO_BIG_SENTINEL )
- return _tooBig;
- return BSONObj( _buf ).copy();
- }
+ void enter_inlock(const char* ns, int dbProfileLevel);
- /** you have to be locked when you call this */
- void _reset( int sz ) { _size[0] = sz; }
+ /**
+ * Sets the type of the current operation to "op".
+ */
+ void setOp_inlock(int op);
- mutable SpinLock _lock;
- int * _size;
- char _buf[BUFFER_SIZE];
- };
+ /**
+ * Marks the current operation as being a command.
+ */
+ void markCommand_inlock() {
+ _isCommand = true;
+ }
- /* lifespan is different than CurOp because of recursives with DBDirectClient */
- class OpDebug {
- public:
- OpDebug() : planSummary(2048) { reset(); }
+ /**
+ * Returns a structure containing data used for profiling, accessed only by a thread
+ * currently executing the operation context associated with this CurOp.
+ */
+ OpDebug& debug() {
+ return _debug;
+ }
- void reset();
+ /**
+ * Gets the name of the namespace on which the current operation operates.
+ */
+ std::string getNS() const {
+ return _ns;
+ }
- std::string report(const CurOp& curop, const SingleThreadedLockStats& lockStats) const;
+ bool shouldDBProfile(int ms) const {
+ if (_dbprofile <= 0)
+ return false;
- /**
- * Appends information about the current operation to "builder"
- *
- * @param curop reference to the CurOp that owns this OpDebug
- * @param lockStats lockStats object containing locking information about the operation
- */
- void append(const CurOp& curop,
- const SingleThreadedLockStats& lockStats,
- BSONObjBuilder& builder) const;
-
- // -------------------
-
- StringBuilder extra; // weird things we need to fix later
-
- // basic options
- int op;
- bool iscommand;
- std::string ns;
- BSONObj query;
- BSONObj updateobj;
-
- // detailed options
- long long cursorid;
- int ntoreturn;
- int ntoskip;
- bool exhaust;
-
- // debugging/profile info
- long long nscanned;
- long long nscannedObjects;
- bool idhack; // indicates short circuited code path on an update to make the update faster
- bool scanAndOrder; // scanandorder query plan aspect was used
- long long nMatched; // number of records that match the query
- long long nModified; // number of records written (no no-ops)
- long long nmoved; // updates resulted in a move (moves are expensive)
- long long ninserted;
- long long ndeleted;
- bool fastmod;
- bool fastmodinsert; // upsert of an $operation. builds a default object
- bool upsert; // true if the update actually did an insert
- bool cursorExhausted; // true if the cursor has been closed at end a find/getMore operation
- int keyUpdates;
- long long writeConflicts;
- ThreadSafeString planSummary; // a brief std::string describing the query solution
-
- // New Query Framework debugging/profiling info
- // TODO: should this really be an opaque BSONObj? Not sure.
- CachedBSONObj<4096> execStats;
-
- // error handling
- ExceptionInfo exceptionInfo;
-
- // response info
- int executionTime;
- int nreturned;
- int responseLength;
- };
+ return _dbprofile >= 2 || ms >= serverGlobalParams.slowMS;
+ }
/**
- * Container for data used to report information about an OperationContext.
- *
- * Every OperationContext in a server with CurOp support has a stack of CurOp
- * objects. The entry at the top of the stack is used to record timing and
- * resource statistics for the executing operation or suboperation.
+ * Raises the profiling level for this operation to "dbProfileLevel" if it was previously
+ * less than "dbProfileLevel".
*
- * All of the accessor methods on CurOp may be called by the thread executing
- * the associated OperationContext at any time, or by other threads that have
- * locked the context's owning Client object.
- *
- * The mutator methods on CurOp whose names end _inlock may only be called by the thread
- * executing the associated OperationContext and Client, and only when that thread has also
- * locked the Client object. All other mutators may only be called by the thread executing
- * CurOp, but do not require holding the Client lock. The exception to this is the kill()
- * method, which is self-synchronizing.
- *
- * The OpDebug member of a CurOp, accessed via the debug() accessor should *only* be accessed
- * from the thread executing an operation, and as a result its fields may be accessed without
- * any synchronization.
+ * This belongs on OpDebug, and so does not have the _inlock suffix.
*/
- class CurOp {
- MONGO_DISALLOW_COPYING(CurOp);
- public:
- static CurOp* get(const OperationContext* opCtx);
- static CurOp* get(const OperationContext& opCtx);
-
- /**
- * Constructs a nested CurOp at the top of the given "opCtx"'s CurOp stack.
- */
- explicit CurOp(OperationContext* opCtx);
- ~CurOp();
-
- bool haveQuery() const { return _query.have(); }
- BSONObj query() const { return _query.get(); }
- void appendQuery( BSONObjBuilder& b , StringData name ) const { _query.append( b , name ); }
+ void raiseDbProfileLevel(int dbProfileLevel);
- void enter_inlock(const char* ns, int dbProfileLevel);
-
- /**
- * Sets the type of the current operation to "op".
- */
- void setOp_inlock(int op);
-
- /**
- * Marks the current operation as being a command.
- */
- void markCommand_inlock() { _isCommand = true; }
+ /**
+ * Gets the type of the current operation.
+ */
+ int getOp() const {
+ return _op;
+ }
- /**
- * Returns a structure containing data used for profiling, accessed only by a thread
- * currently executing the operation context associated with this CurOp.
- */
- OpDebug& debug() { return _debug; }
+ /**
+ * Returns true if the current operation is known to be a command.
+ */
+ bool isCommand() const {
+ return _isCommand;
+ }
- /**
- * Gets the name of the namespace on which the current operation operates.
- */
- std::string getNS() const { return _ns; }
+ //
+ // Methods for controlling CurOp "max time".
+ //
- bool shouldDBProfile( int ms ) const {
- if ( _dbprofile <= 0 )
- return false;
+ /**
+ * Sets the amount of time operation this should be allowed to run, units of microseconds.
+ * The special value 0 is "allow to run indefinitely".
+ */
+ void setMaxTimeMicros(uint64_t maxTimeMicros);
- return _dbprofile >= 2 || ms >= serverGlobalParams.slowMS;
- }
+ /**
+ * Returns true if a time limit has been set on this operation, and false otherwise.
+ */
+ bool isMaxTimeSet() const;
- /**
- * Raises the profiling level for this operation to "dbProfileLevel" if it was previously
- * less than "dbProfileLevel".
- *
- * This belongs on OpDebug, and so does not have the _inlock suffix.
- */
- void raiseDbProfileLevel(int dbProfileLevel);
+ /**
+ * Checks whether this operation has been running longer than its time limit. Returns
+ * false if not, or if the operation has no time limit.
+ */
+ bool maxTimeHasExpired();
- /**
- * Gets the type of the current operation.
- */
- int getOp() const { return _op; }
+ /**
+ * Returns the number of microseconds remaining for this operation's time limit, or the
+ * special value 0 if the operation has no time limit.
+ *
+ * Calling this method is more expensive than calling its sibling "maxTimeHasExpired()",
+ * since an accurate measure of remaining time needs to be calculated.
+ */
+ uint64_t getRemainingMaxTimeMicros() const;
+
+ //
+ // Methods for getting/setting elapsed time.
+ //
+
+ void ensureStarted();
+ bool isStarted() const {
+ return _start > 0;
+ }
+ long long startTime() { // micros
+ ensureStarted();
+ return _start;
+ }
+ void done() {
+ _end = curTimeMicros64();
+ }
+
+ long long totalTimeMicros() {
+ massert(12601, "CurOp not marked done yet", _end);
+ return _end - startTime();
+ }
+ int totalTimeMillis() {
+ return (int)(totalTimeMicros() / 1000);
+ }
+ long long elapsedMicros() {
+ return curTimeMicros64() - startTime();
+ }
+ int elapsedMillis() {
+ return (int)(elapsedMicros() / 1000);
+ }
+ int elapsedSeconds() {
+ return elapsedMillis() / 1000;
+ }
+
+ void setQuery_inlock(const BSONObj& query) {
+ _query.set(query);
+ }
+
+ Command* getCommand() const {
+ return _command;
+ }
+ void setCommand_inlock(Command* command) {
+ _command = command;
+ }
- /**
- * Returns true if the current operation is known to be a command.
- */
- bool isCommand() const { return _isCommand; }
+ /**
+ * Appends information about this CurOp to "builder".
+ *
+ * If called from a thread other than the one executing the operation associated with this
+ * CurOp, it is necessary to lock the associated Client object before executing this method.
+ */
+ void reportState(BSONObjBuilder* builder);
- //
- // Methods for controlling CurOp "max time".
- //
+ /**
+ * Sets the message and the progress meter for this CurOp.
+ *
+ * While it is necessary to hold the lock while this method executes, the
+ * "hit" and "finished" methods of ProgressMeter may be called safely from
+ * the thread executing the operation without locking the Client.
+ */
+ ProgressMeter& setMessage_inlock(const char* msg,
+ std::string name = "Progress",
+ unsigned long long progressMeterTotal = 0,
+ int secondsBetween = 3);
- /**
- * Sets the amount of time operation this should be allowed to run, units of microseconds.
- * The special value 0 is "allow to run indefinitely".
- */
- void setMaxTimeMicros(uint64_t maxTimeMicros);
+ /**
+ * Gets the message for this CurOp.
+ */
+ const std::string& getMessage() const {
+ return _message;
+ }
+ const ProgressMeter& getProgressMeter() {
+ return _progressMeter;
+ }
+ CurOp* parent() const {
+ return _parent;
+ }
+ void yielded() {
+ _numYields++;
+ } // Should be _inlock()?
- /**
- * Returns true if a time limit has been set on this operation, and false otherwise.
- */
- bool isMaxTimeSet() const;
+ /**
+ * Returns the number of times yielded() was called. Callers on threads other
+ * than the one executing the operation must lock the client.
+ */
+ int numYields() const {
+ return _numYields;
+ }
- /**
- * Checks whether this operation has been running longer than its time limit. Returns
- * false if not, or if the operation has no time limit.
- */
- bool maxTimeHasExpired();
+ long long getExpectedLatencyMs() const {
+ return _expectedLatencyMs;
+ }
+ void setExpectedLatencyMs(long long latency) {
+ _expectedLatencyMs = latency;
+ }
- /**
- * Returns the number of microseconds remaining for this operation's time limit, or the
- * special value 0 if the operation has no time limit.
- *
- * Calling this method is more expensive than calling its sibling "maxTimeHasExpired()",
- * since an accurate measure of remaining time needs to be calculated.
- */
- uint64_t getRemainingMaxTimeMicros() const;
+ /**
+ * this should be used very sparingly
+ * generally the Context should set this up
+ * but sometimes you want to do it ahead of time
+ */
+ void setNS_inlock(StringData ns);
+
+private:
+ class CurOpStack;
+
+ static const OperationContext::Decoration<CurOpStack> _curopStack;
+
+ CurOp(OperationContext*, CurOpStack*);
+
+ CurOpStack* _stack;
+ CurOp* _parent = nullptr;
+ Command* _command;
+ long long _start;
+ long long _end;
+ int _op;
+ bool _isCommand;
+ int _dbprofile; // 0=off, 1=slow, 2=all
+ std::string _ns;
+ CachedBSONObj<512> _query; // CachedBSONObj is thread safe
+ OpDebug _debug;
+ std::string _message;
+ ProgressMeter _progressMeter;
+ int _numYields;
+
+ // this is how much "extra" time a query might take
+ // a writebacklisten for example will block for 30s
+ // so this should be 30000 in that case
+ long long _expectedLatencyMs;
+
+ // Time limit for this operation. 0 if the operation has no time limit.
+ uint64_t _maxTimeMicros;
+
+ /** Nested class that implements tracking of a time limit for a CurOp object. */
+ class MaxTimeTracker {
+ MONGO_DISALLOW_COPYING(MaxTimeTracker);
- //
- // Methods for getting/setting elapsed time.
- //
+ public:
+ /** Newly-constructed MaxTimeTracker objects have the time limit disabled. */
+ MaxTimeTracker();
- void ensureStarted();
- bool isStarted() const { return _start > 0; }
- long long startTime() { // micros
- ensureStarted();
- return _start;
- }
- void done() {
- _end = curTimeMicros64();
- }
+ /** Disables the time tracker. */
+ void reset();
- long long totalTimeMicros() {
- massert( 12601 , "CurOp not marked done yet" , _end );
- return _end - startTime();
+ /** Returns whether or not time tracking is enabled. */
+ bool isEnabled() const {
+ return _enabled;
}
- int totalTimeMillis() { return (int) (totalTimeMicros() / 1000); }
- long long elapsedMicros() {
- return curTimeMicros64() - startTime();
- }
- int elapsedMillis() {
- return (int) (elapsedMicros() / 1000);
- }
- int elapsedSeconds() { return elapsedMillis() / 1000; }
-
- void setQuery_inlock(const BSONObj& query) { _query.set( query ); }
-
- Command * getCommand() const { return _command; }
- void setCommand_inlock(Command* command) { _command = command; }
-
- /**
- * Appends information about this CurOp to "builder".
- *
- * If called from a thread other than the one executing the operation associated with this
- * CurOp, it is necessary to lock the associated Client object before executing this method.
- */
- void reportState(BSONObjBuilder* builder);
/**
- * Sets the message and the progress meter for this CurOp.
+ * Enables time tracking. The time limit is set to be "durationMicros" microseconds
+ * from "startEpochMicros" (units of microseconds since the epoch).
*
- * While it is necessary to hold the lock while this method executes, the
- * "hit" and "finished" methods of ProgressMeter may be called safely from
- * the thread executing the operation without locking the Client.
+ * "durationMicros" must be nonzero.
*/
- ProgressMeter& setMessage_inlock(const char * msg,
- std::string name = "Progress",
- unsigned long long progressMeterTotal = 0,
- int secondsBetween = 3);
+ void setTimeLimit(uint64_t startEpochMicros, uint64_t durationMicros);
/**
- * Gets the message for this CurOp.
+ * Checks whether the time limit has been hit. Returns false if not, or if time
+ * tracking is disabled.
*/
- const std::string& getMessage() const { return _message; }
- const ProgressMeter& getProgressMeter() { return _progressMeter; }
- CurOp *parent() const { return _parent; }
- void yielded() { _numYields++; } // Should be _inlock()?
+ bool checkTimeLimit();
/**
- * Returns the number of times yielded() was called. Callers on threads other
- * than the one executing the operation must lock the client.
- */
- int numYields() const { return _numYields; }
-
- long long getExpectedLatencyMs() const { return _expectedLatencyMs; }
- void setExpectedLatencyMs( long long latency ) { _expectedLatencyMs = latency; }
-
- /**
- * this should be used very sparingly
- * generally the Context should set this up
- * but sometimes you want to do it ahead of time
+ * Returns the number of microseconds remaining for the time limit, or the special
+ * value 0 if time tracking is disabled.
+ *
+ * Calling this method is more expensive than calling its sibling "checkInterval()",
+ * since an accurate measure of remaining time needs to be calculated.
*/
- void setNS_inlock( StringData ns );
+ uint64_t getRemainingMicros() const;
private:
- class CurOpStack;
-
- static const OperationContext::Decoration<CurOpStack> _curopStack;
-
- CurOp(OperationContext*, CurOpStack*);
-
- CurOpStack* _stack;
- CurOp* _parent = nullptr;
- Command * _command;
- long long _start;
- long long _end;
- int _op;
- bool _isCommand;
- int _dbprofile; // 0=off, 1=slow, 2=all
- std::string _ns;
- CachedBSONObj<512> _query; // CachedBSONObj is thread safe
- OpDebug _debug;
- std::string _message;
- ProgressMeter _progressMeter;
- int _numYields;
-
- // this is how much "extra" time a query might take
- // a writebacklisten for example will block for 30s
- // so this should be 30000 in that case
- long long _expectedLatencyMs;
-
- // Time limit for this operation. 0 if the operation has no time limit.
- uint64_t _maxTimeMicros;
-
- /** Nested class that implements tracking of a time limit for a CurOp object. */
- class MaxTimeTracker {
- MONGO_DISALLOW_COPYING(MaxTimeTracker);
- public:
- /** Newly-constructed MaxTimeTracker objects have the time limit disabled. */
- MaxTimeTracker();
-
- /** Disables the time tracker. */
- void reset();
-
- /** Returns whether or not time tracking is enabled. */
- bool isEnabled() const { return _enabled; }
-
- /**
- * Enables time tracking. The time limit is set to be "durationMicros" microseconds
- * from "startEpochMicros" (units of microseconds since the epoch).
- *
- * "durationMicros" must be nonzero.
- */
- void setTimeLimit(uint64_t startEpochMicros, uint64_t durationMicros);
-
- /**
- * Checks whether the time limit has been hit. Returns false if not, or if time
- * tracking is disabled.
- */
- bool checkTimeLimit();
-
- /**
- * Returns the number of microseconds remaining for the time limit, or the special
- * value 0 if time tracking is disabled.
- *
- * Calling this method is more expensive than calling its sibling "checkInterval()",
- * since an accurate measure of remaining time needs to be calculated.
- */
- uint64_t getRemainingMicros() const;
- private:
- // Whether or not time tracking is enabled for this operation.
- bool _enabled;
-
- // Point in time at which the time limit is hit. Units of microseconds since the
- // epoch.
- uint64_t _targetEpochMicros;
-
- // Approximate point in time at which the time limit is hit. Units of milliseconds
- // since the server process was started.
- int64_t _approxTargetServerMillis;
- } _maxTimeTracker;
-
- };
+ // Whether or not time tracking is enabled for this operation.
+ bool _enabled;
+
+ // Point in time at which the time limit is hit. Units of microseconds since the
+ // epoch.
+ uint64_t _targetEpochMicros;
+
+ // Approximate point in time at which the time limit is hit. Units of milliseconds
+ // since the server process was started.
+ int64_t _approxTargetServerMillis;
+ } _maxTimeTracker;
+};
}
diff --git a/src/mongo/db/curop_metrics.cpp b/src/mongo/db/curop_metrics.cpp
index d0cde25d225..1319c481ede 100644
--- a/src/mongo/db/curop_metrics.cpp
+++ b/src/mongo/db/curop_metrics.cpp
@@ -33,58 +33,58 @@
namespace mongo {
namespace {
- Counter64 returnedCounter;
- Counter64 insertedCounter;
- Counter64 updatedCounter;
- Counter64 deletedCounter;
- Counter64 scannedCounter;
- Counter64 scannedObjectCounter;
+Counter64 returnedCounter;
+Counter64 insertedCounter;
+Counter64 updatedCounter;
+Counter64 deletedCounter;
+Counter64 scannedCounter;
+Counter64 scannedObjectCounter;
- ServerStatusMetricField<Counter64> displayReturned("document.returned", &returnedCounter);
- ServerStatusMetricField<Counter64> displayUpdated("document.updated", &updatedCounter);
- ServerStatusMetricField<Counter64> displayInserted("document.inserted", &insertedCounter);
- ServerStatusMetricField<Counter64> displayDeleted("document.deleted", &deletedCounter);
- ServerStatusMetricField<Counter64> displayScanned("queryExecutor.scanned", &scannedCounter);
- ServerStatusMetricField<Counter64> displayScannedObjects("queryExecutor.scannedObjects",
- &scannedObjectCounter);
+ServerStatusMetricField<Counter64> displayReturned("document.returned", &returnedCounter);
+ServerStatusMetricField<Counter64> displayUpdated("document.updated", &updatedCounter);
+ServerStatusMetricField<Counter64> displayInserted("document.inserted", &insertedCounter);
+ServerStatusMetricField<Counter64> displayDeleted("document.deleted", &deletedCounter);
+ServerStatusMetricField<Counter64> displayScanned("queryExecutor.scanned", &scannedCounter);
+ServerStatusMetricField<Counter64> displayScannedObjects("queryExecutor.scannedObjects",
+ &scannedObjectCounter);
- Counter64 idhackCounter;
- Counter64 scanAndOrderCounter;
- Counter64 fastmodCounter;
- Counter64 writeConflictsCounter;
+Counter64 idhackCounter;
+Counter64 scanAndOrderCounter;
+Counter64 fastmodCounter;
+Counter64 writeConflictsCounter;
- ServerStatusMetricField<Counter64> displayIdhack("operation.idhack", &idhackCounter);
- ServerStatusMetricField<Counter64> displayScanAndOrder("operation.scanAndOrder",
- &scanAndOrderCounter);
- ServerStatusMetricField<Counter64> displayFastMod("operation.fastmod", &fastmodCounter);
- ServerStatusMetricField<Counter64> displayWriteConflicts("operation.writeConflicts",
- &writeConflictsCounter);
+ServerStatusMetricField<Counter64> displayIdhack("operation.idhack", &idhackCounter);
+ServerStatusMetricField<Counter64> displayScanAndOrder("operation.scanAndOrder",
+ &scanAndOrderCounter);
+ServerStatusMetricField<Counter64> displayFastMod("operation.fastmod", &fastmodCounter);
+ServerStatusMetricField<Counter64> displayWriteConflicts("operation.writeConflicts",
+ &writeConflictsCounter);
} // namespace
- void recordCurOpMetrics(OperationContext* opCtx) {
- const OpDebug& debug = CurOp::get(opCtx)->debug();
- if (debug.nreturned > 0)
- returnedCounter.increment(debug.nreturned);
- if (debug.ninserted > 0)
- insertedCounter.increment(debug.ninserted);
- if (debug.nMatched > 0)
- updatedCounter.increment(debug.nMatched);
- if (debug.ndeleted > 0)
- deletedCounter.increment(debug.ndeleted);
- if (debug.nscanned > 0)
- scannedCounter.increment(debug.nscanned);
- if (debug.nscannedObjects > 0)
- scannedObjectCounter.increment(debug.nscannedObjects);
+void recordCurOpMetrics(OperationContext* opCtx) {
+ const OpDebug& debug = CurOp::get(opCtx)->debug();
+ if (debug.nreturned > 0)
+ returnedCounter.increment(debug.nreturned);
+ if (debug.ninserted > 0)
+ insertedCounter.increment(debug.ninserted);
+ if (debug.nMatched > 0)
+ updatedCounter.increment(debug.nMatched);
+ if (debug.ndeleted > 0)
+ deletedCounter.increment(debug.ndeleted);
+ if (debug.nscanned > 0)
+ scannedCounter.increment(debug.nscanned);
+ if (debug.nscannedObjects > 0)
+ scannedObjectCounter.increment(debug.nscannedObjects);
- if (debug.idhack)
- idhackCounter.increment();
- if (debug.scanAndOrder)
- scanAndOrderCounter.increment();
- if (debug.fastmod)
- fastmodCounter.increment();
- if (debug.writeConflicts)
- writeConflictsCounter.increment(debug.writeConflicts);
- }
+ if (debug.idhack)
+ idhackCounter.increment();
+ if (debug.scanAndOrder)
+ scanAndOrderCounter.increment();
+ if (debug.fastmod)
+ fastmodCounter.increment();
+ if (debug.writeConflicts)
+ writeConflictsCounter.increment(debug.writeConflicts);
+}
} // namespace mongo
diff --git a/src/mongo/db/curop_metrics.h b/src/mongo/db/curop_metrics.h
index 42429f981be..b95a39f8a17 100644
--- a/src/mongo/db/curop_metrics.h
+++ b/src/mongo/db/curop_metrics.h
@@ -30,8 +30,8 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
- void recordCurOpMetrics(OperationContext* opCtx);
+void recordCurOpMetrics(OperationContext* opCtx);
} // namespace mongo
diff --git a/src/mongo/db/curop_test.cpp b/src/mongo/db/curop_test.cpp
index 0edbde43d31..098ead0674f 100644
--- a/src/mongo/db/curop_test.cpp
+++ b/src/mongo/db/curop_test.cpp
@@ -40,63 +40,63 @@
namespace mongo {
- namespace {
+namespace {
- const long long intervalLong = 2000 * 1000; // 2s in micros
- const long long intervalShort = 10 * 1000; // 10ms in micros
+const long long intervalLong = 2000 * 1000; // 2s in micros
+const long long intervalShort = 10 * 1000; // 10ms in micros
- //
- // Before executing the TimeHasExpired suite, spawn a dummy listener thread to be the
- // process time tracker (the tests rely on Listener::_timeTracker being available).
- //
+//
+// Before executing the TimeHasExpired suite, spawn a dummy listener thread to be the
+// process time tracker (the tests rely on Listener::_timeTracker being available).
+//
- class TestListener : public Listener {
- public:
- TestListener() : Listener("test", "", 0) {} // port 0 => any available high port
- virtual void acceptedMP(MessagingPort *mp) {}
- };
+class TestListener : public Listener {
+public:
+ TestListener() : Listener("test", "", 0) {} // port 0 => any available high port
+ virtual void acceptedMP(MessagingPort* mp) {}
+};
- void timeTrackerSetup() {
- TestListener listener;
- listener.setAsTimeTracker();
- listener.setupSockets();
- listener.initAndListen();
- }
+void timeTrackerSetup() {
+ TestListener listener;
+ listener.setAsTimeTracker();
+ listener.setupSockets();
+ listener.initAndListen();
+}
- MONGO_INITIALIZER(CurOpTest)(InitializerContext* context) {
- stdx::thread t(timeTrackerSetup);
+MONGO_INITIALIZER(CurOpTest)(InitializerContext* context) {
+ stdx::thread t(timeTrackerSetup);
- // Wait for listener thread to start tracking time.
- while (Listener::getElapsedTimeMillis() == 0) {
- sleepmillis(10);
- }
- return Status::OK();
- }
+ // Wait for listener thread to start tracking time.
+ while (Listener::getElapsedTimeMillis() == 0) {
+ sleepmillis(10);
+ }
+ return Status::OK();
+}
- // Long operation + short timeout => time should expire.
- TEST(TimeHasExpired, PosSimple) {
- auto service = stdx::make_unique<ServiceContextNoop>();
- auto client = service->makeClient("CurOpTest");
- OperationContextNoop txn(client.get(), 100);
- CurOp curOp(&txn);
- curOp.setMaxTimeMicros(intervalShort);
- curOp.ensureStarted();
- sleepmicros(intervalLong);
- ASSERT_TRUE(curOp.maxTimeHasExpired());
- }
+// Long operation + short timeout => time should expire.
+TEST(TimeHasExpired, PosSimple) {
+ auto service = stdx::make_unique<ServiceContextNoop>();
+ auto client = service->makeClient("CurOpTest");
+ OperationContextNoop txn(client.get(), 100);
+ CurOp curOp(&txn);
+ curOp.setMaxTimeMicros(intervalShort);
+ curOp.ensureStarted();
+ sleepmicros(intervalLong);
+ ASSERT_TRUE(curOp.maxTimeHasExpired());
+}
- // Short operation + long timeout => time should not expire.
- TEST(TimeHasExpired, NegSimple) {
- auto service = stdx::make_unique<ServiceContextNoop>();
- auto client = service->makeClient("CurOpTest");
- OperationContextNoop txn(client.get(), 100);
- CurOp curOp(&txn);
- curOp.setMaxTimeMicros(intervalLong);
- curOp.ensureStarted();
- sleepmicros(intervalShort);
- ASSERT_FALSE(curOp.maxTimeHasExpired());
- }
+// Short operation + long timeout => time should not expire.
+TEST(TimeHasExpired, NegSimple) {
+ auto service = stdx::make_unique<ServiceContextNoop>();
+ auto client = service->makeClient("CurOpTest");
+ OperationContextNoop txn(client.get(), 100);
+ CurOp curOp(&txn);
+ curOp.setMaxTimeMicros(intervalLong);
+ curOp.ensureStarted();
+ sleepmicros(intervalShort);
+ ASSERT_FALSE(curOp.maxTimeHasExpired());
+}
- } // namespace
+} // namespace
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index b16c9c8ba8b..f5e87da95e6 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -119,525 +119,505 @@
#include "mongo/util/version.h"
#if !defined(_WIN32)
-# include <sys/file.h>
+#include <sys/file.h>
#endif
namespace mongo {
- using std::unique_ptr;
- using std::cout;
- using std::cerr;
- using std::endl;
- using std::list;
- using std::string;
- using std::stringstream;
- using std::vector;
+using std::unique_ptr;
+using std::cout;
+using std::cerr;
+using std::endl;
+using std::list;
+using std::string;
+using std::stringstream;
+using std::vector;
- using logger::LogComponent;
+using logger::LogComponent;
- void (*snmpInit)() = NULL;
+void (*snmpInit)() = NULL;
- extern int diagLogging;
+extern int diagLogging;
#ifdef _WIN32
- ntservice::NtServiceDefaultStrings defaultServiceStrings = {
- L"MongoDB",
- L"MongoDB",
- L"MongoDB Server"
- };
+ntservice::NtServiceDefaultStrings defaultServiceStrings = {
+ L"MongoDB", L"MongoDB", L"MongoDB Server"};
#endif
- Timer startupSrandTimer;
+Timer startupSrandTimer;
- QueryResult::View emptyMoreResult(long long);
+QueryResult::View emptyMoreResult(long long);
- class MyMessageHandler : public MessageHandler {
- public:
- virtual void connected( AbstractMessagingPort* p ) {
- Client::initThread("conn", p);
- }
+class MyMessageHandler : public MessageHandler {
+public:
+ virtual void connected(AbstractMessagingPort* p) {
+ Client::initThread("conn", p);
+ }
- virtual void process(Message& m , AbstractMessagingPort* port) {
- while ( true ) {
- if ( inShutdown() ) {
- log() << "got request after shutdown()" << endl;
- break;
- }
+ virtual void process(Message& m, AbstractMessagingPort* port) {
+ while (true) {
+ if (inShutdown()) {
+ log() << "got request after shutdown()" << endl;
+ break;
+ }
- DbResponse dbresponse;
- {
- OperationContextImpl txn;
- assembleResponse(&txn, m, dbresponse, port->remote());
- // txn must go out of scope here so that the operation cannot show up in
- // currentOp results after the response reaches the client.
- }
+ DbResponse dbresponse;
+ {
+ OperationContextImpl txn;
+ assembleResponse(&txn, m, dbresponse, port->remote());
+ // txn must go out of scope here so that the operation cannot show up in
+ // currentOp results after the response reaches the client.
+ }
- if ( dbresponse.response ) {
- port->reply(m, *dbresponse.response, dbresponse.responseTo);
- if( dbresponse.exhaustNS.size() > 0 ) {
- MsgData::View header = dbresponse.response->header();
- QueryResult::View qr = header.view2ptr();
- long long cursorid = qr.getCursorId();
- if( cursorid ) {
- verify( dbresponse.exhaustNS.size() && dbresponse.exhaustNS[0] );
- string ns = dbresponse.exhaustNS; // before reset() free's it...
- m.reset();
- BufBuilder b(512);
- b.appendNum((int) 0 /*size set later in appendData()*/);
- b.appendNum(header.getId());
- b.appendNum(header.getResponseTo());
- b.appendNum((int) dbGetMore);
- b.appendNum((int) 0);
- b.appendStr(ns);
- b.appendNum((int) 0); // ntoreturn
- b.appendNum(cursorid);
- m.appendData(b.buf(), b.len());
- b.decouple();
- DEV log() << "exhaust=true sending more" << endl;
- continue; // this goes back to top loop
- }
+ if (dbresponse.response) {
+ port->reply(m, *dbresponse.response, dbresponse.responseTo);
+ if (dbresponse.exhaustNS.size() > 0) {
+ MsgData::View header = dbresponse.response->header();
+ QueryResult::View qr = header.view2ptr();
+ long long cursorid = qr.getCursorId();
+ if (cursorid) {
+ verify(dbresponse.exhaustNS.size() && dbresponse.exhaustNS[0]);
+ string ns = dbresponse.exhaustNS; // before reset() free's it...
+ m.reset();
+ BufBuilder b(512);
+ b.appendNum((int)0 /*size set later in appendData()*/);
+ b.appendNum(header.getId());
+ b.appendNum(header.getResponseTo());
+ b.appendNum((int)dbGetMore);
+ b.appendNum((int)0);
+ b.appendStr(ns);
+ b.appendNum((int)0); // ntoreturn
+ b.appendNum(cursorid);
+ m.appendData(b.buf(), b.len());
+ b.decouple();
+ DEV log() << "exhaust=true sending more" << endl;
+ continue; // this goes back to top loop
}
}
- break;
}
+ break;
}
- };
-
- static void logStartup() {
- BSONObjBuilder toLog;
- stringstream id;
- id << getHostNameCached() << "-" << jsTime().asInt64();
- toLog.append( "_id", id.str() );
- toLog.append( "hostname", getHostNameCached() );
-
- toLog.appendTimeT( "startTime", time(0) );
- toLog.append( "startTimeLocal", dateToCtimeString(Date_t::now()) );
-
- toLog.append("cmdLine", serverGlobalParams.parsedOpts);
- toLog.append( "pid", ProcessId::getCurrent().asLongLong() );
-
-
- BSONObjBuilder buildinfo( toLog.subobjStart("buildinfo"));
- appendBuildInfo(buildinfo);
- appendStorageEngineList(&buildinfo);
- buildinfo.doneFast();
-
- BSONObj o = toLog.obj();
-
- OperationContextImpl txn;
-
- ScopedTransaction transaction(&txn, MODE_X);
- Lock::GlobalWrite lk(txn.lockState());
- AutoGetOrCreateDb autoDb(&txn, "local", mongo::MODE_X);
- Database* db = autoDb.getDb();
- const std::string ns = "local.startup_log";
- Collection* collection = db->getCollection(ns);
- WriteUnitOfWork wunit(&txn);
- if (!collection) {
- BSONObj options = BSON("capped" << true << "size" << 10 * 1024 * 1024);
- bool shouldReplicateWrites = txn.writesAreReplicated();
- txn.setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, &txn, shouldReplicateWrites);
- uassertStatusOK(userCreateNS(&txn, db, ns, options));
- collection = db->getCollection(ns);
- }
- invariant(collection);
- uassertStatusOK(collection->insertDocument(&txn, o, false).getStatus());
- wunit.commit();
}
+};
+
+static void logStartup() {
+ BSONObjBuilder toLog;
+ stringstream id;
+ id << getHostNameCached() << "-" << jsTime().asInt64();
+ toLog.append("_id", id.str());
+ toLog.append("hostname", getHostNameCached());
+
+ toLog.appendTimeT("startTime", time(0));
+ toLog.append("startTimeLocal", dateToCtimeString(Date_t::now()));
+
+ toLog.append("cmdLine", serverGlobalParams.parsedOpts);
+ toLog.append("pid", ProcessId::getCurrent().asLongLong());
+
+
+ BSONObjBuilder buildinfo(toLog.subobjStart("buildinfo"));
+ appendBuildInfo(buildinfo);
+ appendStorageEngineList(&buildinfo);
+ buildinfo.doneFast();
+
+ BSONObj o = toLog.obj();
+
+ OperationContextImpl txn;
+
+ ScopedTransaction transaction(&txn, MODE_X);
+ Lock::GlobalWrite lk(txn.lockState());
+ AutoGetOrCreateDb autoDb(&txn, "local", mongo::MODE_X);
+ Database* db = autoDb.getDb();
+ const std::string ns = "local.startup_log";
+ Collection* collection = db->getCollection(ns);
+ WriteUnitOfWork wunit(&txn);
+ if (!collection) {
+ BSONObj options = BSON("capped" << true << "size" << 10 * 1024 * 1024);
+ bool shouldReplicateWrites = txn.writesAreReplicated();
+ txn.setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, &txn, shouldReplicateWrites);
+ uassertStatusOK(userCreateNS(&txn, db, ns, options));
+ collection = db->getCollection(ns);
+ }
+ invariant(collection);
+ uassertStatusOK(collection->insertDocument(&txn, o, false).getStatus());
+ wunit.commit();
+}
- static void checkForIdIndexes(OperationContext* txn, Database* db) {
- if ( db->name() == "local") {
- // we do not need an _id index on anything in the local database
- return;
- }
+static void checkForIdIndexes(OperationContext* txn, Database* db) {
+ if (db->name() == "local") {
+ // we do not need an _id index on anything in the local database
+ return;
+ }
- list<string> collections;
- db->getDatabaseCatalogEntry()->getCollectionNamespaces( &collections );
+ list<string> collections;
+ db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collections);
- // for each collection, ensure there is a $_id_ index
- for (list<string>::iterator i = collections.begin(); i != collections.end(); ++i) {
- const string& collectionName = *i;
- NamespaceString ns( collectionName );
- if ( ns.isSystem() )
- continue;
+ // for each collection, ensure there is a $_id_ index
+ for (list<string>::iterator i = collections.begin(); i != collections.end(); ++i) {
+ const string& collectionName = *i;
+ NamespaceString ns(collectionName);
+ if (ns.isSystem())
+ continue;
- Collection* coll = db->getCollection( collectionName );
- if ( !coll )
- continue;
+ Collection* coll = db->getCollection(collectionName);
+ if (!coll)
+ continue;
- if ( coll->getIndexCatalog()->findIdIndex( txn ) )
- continue;
+ if (coll->getIndexCatalog()->findIdIndex(txn))
+ continue;
- log() << "WARNING: the collection '" << *i
- << "' lacks a unique index on _id."
- << " This index is needed for replication to function properly"
- << startupWarningsLog;
- log() << "\t To fix this, you need to create a unique index on _id."
- << " See http://dochub.mongodb.org/core/build-replica-set-indexes"
- << startupWarningsLog;
- }
+ log() << "WARNING: the collection '" << *i << "' lacks a unique index on _id."
+ << " This index is needed for replication to function properly" << startupWarningsLog;
+ log() << "\t To fix this, you need to create a unique index on _id."
+ << " See http://dochub.mongodb.org/core/build-replica-set-indexes"
+ << startupWarningsLog;
}
+}
- /**
- * Checks if this server was started without --replset but has a config in local.system.replset
- * (meaning that this is probably a replica set member started in stand-alone mode).
- *
- * @returns the number of documents in local.system.replset or 0 if this was started with
- * --replset.
- */
- static unsigned long long checkIfReplMissingFromCommandLine(OperationContext* txn) {
- // This is helpful for the query below to work as you can't open files when readlocked
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- if (!repl::getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
- DBDirectClient c(txn);
- return c.count("local.system.replset");
- }
- return 0;
+/**
+ * Checks if this server was started without --replset but has a config in local.system.replset
+ * (meaning that this is probably a replica set member started in stand-alone mode).
+ *
+ * @returns the number of documents in local.system.replset or 0 if this was started with
+ * --replset.
+ */
+static unsigned long long checkIfReplMissingFromCommandLine(OperationContext* txn) {
+ // This is helpful for the query below to work as you can't open files when readlocked
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ if (!repl::getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
+ DBDirectClient c(txn);
+ return c.count("local.system.replset");
}
+ return 0;
+}
- static void repairDatabasesAndCheckVersion() {
- LOG(1) << "enter repairDatabases (to check pdfile version #)" << endl;
-
- OperationContextImpl txn;
- ScopedTransaction transaction(&txn, MODE_X);
- Lock::GlobalWrite lk(txn.lockState());
-
- vector<string> dbNames;
-
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->listDatabases( &dbNames );
-
- // Repair all databases first, so that we do not try to open them if they are in bad shape
- if (storageGlobalParams.repair) {
- for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) {
- const string dbName = *i;
- LOG(1) << " Repairing database: " << dbName << endl;
+static void repairDatabasesAndCheckVersion() {
+ LOG(1) << "enter repairDatabases (to check pdfile version #)" << endl;
- fassert(18506, repairDatabase(&txn, storageEngine, dbName));
- }
- }
+ OperationContextImpl txn;
+ ScopedTransaction transaction(&txn, MODE_X);
+ Lock::GlobalWrite lk(txn.lockState());
- const repl::ReplSettings& replSettings =
- repl::getGlobalReplicationCoordinator()->getSettings();
+ vector<string> dbNames;
- // On replica set members we only clear temp collections on DBs other than "local" during
- // promotion to primary. On pure slaves, they are only cleared when the oplog tells them
- // to. The local DB is special because it is not replicated. See SERVER-10927 for more
- // details.
- const bool shouldClearNonLocalTmpCollections = !(checkIfReplMissingFromCommandLine(&txn)
- || replSettings.usingReplSets()
- || replSettings.slave == repl::SimpleSlave);
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ storageEngine->listDatabases(&dbNames);
+ // Repair all databases first, so that we do not try to open them if they are in bad shape
+ if (storageGlobalParams.repair) {
for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) {
const string dbName = *i;
- LOG(1) << " Recovering database: " << dbName << endl;
-
- Database* db = dbHolder().openDb(&txn, dbName);
- invariant(db);
-
- // First thing after opening the database is to check for file compatibility,
- // otherwise we might crash if this is a deprecated format.
- if (!db->getDatabaseCatalogEntry()->currentFilesCompatible(&txn)) {
- log() << "****";
- log() << "cannot do this upgrade without an upgrade in the middle";
- log() << "please do a --repair with 2.6 and then start this version";
- dbexit(EXIT_NEED_UPGRADE);
- return;
- }
+ LOG(1) << " Repairing database: " << dbName << endl;
+
+ fassert(18506, repairDatabase(&txn, storageEngine, dbName));
+ }
+ }
- // Major versions match, check indexes
- const string systemIndexes = db->name() + ".system.indexes";
+ const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings();
+
+ // On replica set members we only clear temp collections on DBs other than "local" during
+ // promotion to primary. On pure slaves, they are only cleared when the oplog tells them
+ // to. The local DB is special because it is not replicated. See SERVER-10927 for more
+ // details.
+ const bool shouldClearNonLocalTmpCollections =
+ !(checkIfReplMissingFromCommandLine(&txn) || replSettings.usingReplSets() ||
+ replSettings.slave == repl::SimpleSlave);
+
+ for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) {
+ const string dbName = *i;
+ LOG(1) << " Recovering database: " << dbName << endl;
+
+ Database* db = dbHolder().openDb(&txn, dbName);
+ invariant(db);
+
+ // First thing after opening the database is to check for file compatibility,
+ // otherwise we might crash if this is a deprecated format.
+ if (!db->getDatabaseCatalogEntry()->currentFilesCompatible(&txn)) {
+ log() << "****";
+ log() << "cannot do this upgrade without an upgrade in the middle";
+ log() << "please do a --repair with 2.6 and then start this version";
+ dbexit(EXIT_NEED_UPGRADE);
+ return;
+ }
- Collection* coll = db->getCollection( systemIndexes );
- unique_ptr<PlanExecutor> exec(
- InternalPlanner::collectionScan(&txn, systemIndexes, coll));
+ // Major versions match, check indexes
+ const string systemIndexes = db->name() + ".system.indexes";
- BSONObj index;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&index, NULL))) {
- const BSONObj key = index.getObjectField("key");
- const string plugin = IndexNames::findPluginName(key);
+ Collection* coll = db->getCollection(systemIndexes);
+ unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(&txn, systemIndexes, coll));
- if (db->getDatabaseCatalogEntry()->isOlderThan24(&txn)) {
- if (IndexNames::existedBefore24(plugin)) {
- continue;
- }
+ BSONObj index;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&index, NULL))) {
+ const BSONObj key = index.getObjectField("key");
+ const string plugin = IndexNames::findPluginName(key);
- log() << "Index " << index << " claims to be of type '" << plugin << "', "
- << "which is either invalid or did not exist before v2.4. "
- << "See the upgrade section: "
- << "http://dochub.mongodb.org/core/upgrade-2.4"
- << startupWarningsLog;
+ if (db->getDatabaseCatalogEntry()->isOlderThan24(&txn)) {
+ if (IndexNames::existedBefore24(plugin)) {
+ continue;
}
- const Status keyStatus = validateKeyPattern(key);
- if (!keyStatus.isOK()) {
- log() << "Problem with index " << index << ": " << keyStatus.reason()
- << " This index can still be used however it cannot be rebuilt."
- << " For more info see"
- << " http://dochub.mongodb.org/core/index-validation"
- << startupWarningsLog;
- }
+ log() << "Index " << index << " claims to be of type '" << plugin << "', "
+ << "which is either invalid or did not exist before v2.4. "
+ << "See the upgrade section: "
+ << "http://dochub.mongodb.org/core/upgrade-2.4" << startupWarningsLog;
}
- if (PlanExecutor::IS_EOF != state) {
- warning() << "Internal error while reading collection " << systemIndexes;
+ const Status keyStatus = validateKeyPattern(key);
+ if (!keyStatus.isOK()) {
+ log() << "Problem with index " << index << ": " << keyStatus.reason()
+ << " This index can still be used however it cannot be rebuilt."
+ << " For more info see"
+ << " http://dochub.mongodb.org/core/index-validation" << startupWarningsLog;
}
+ }
- if (replSettings.usingReplSets()) {
- // We only care about the _id index if we are in a replset
- checkForIdIndexes(&txn, db);
- }
+ if (PlanExecutor::IS_EOF != state) {
+ warning() << "Internal error while reading collection " << systemIndexes;
+ }
- if (shouldClearNonLocalTmpCollections || dbName == "local") {
- db->clearTmpCollections(&txn);
- }
+ if (replSettings.usingReplSets()) {
+ // We only care about the _id index if we are in a replset
+ checkForIdIndexes(&txn, db);
}
- LOG(1) << "done repairDatabases" << endl;
+ if (shouldClearNonLocalTmpCollections || dbName == "local") {
+ db->clearTmpCollections(&txn);
+ }
}
- static void _initAndListen(int listenPort ) {
- Client::initThread("initandlisten");
+ LOG(1) << "done repairDatabases" << endl;
+}
- // Due to SERVER-15389, we must setupSockets first thing at startup in order to avoid
- // obtaining too high a file descriptor for our calls to select().
- MessageServer::Options options;
- options.port = listenPort;
- options.ipList = serverGlobalParams.bind_ip;
+static void _initAndListen(int listenPort) {
+ Client::initThread("initandlisten");
- MessageServer* server = createServer(options, new MyMessageHandler());
- server->setAsTimeTracker();
+ // Due to SERVER-15389, we must setupSockets first thing at startup in order to avoid
+ // obtaining too high a file descriptor for our calls to select().
+ MessageServer::Options options;
+ options.port = listenPort;
+ options.ipList = serverGlobalParams.bind_ip;
- // This is what actually creates the sockets, but does not yet listen on them because we
- // do not want connections to just hang if recovery takes a very long time.
- server->setupSockets();
+ MessageServer* server = createServer(options, new MyMessageHandler());
+ server->setAsTimeTracker();
- std::shared_ptr<DbWebServer> dbWebServer;
- if (serverGlobalParams.isHttpInterfaceEnabled) {
- dbWebServer.reset(new DbWebServer(serverGlobalParams.bind_ip,
- serverGlobalParams.port + 1000,
- new RestAdminAccess()));
- dbWebServer->setupSockets();
- }
+ // This is what actually creates the sockets, but does not yet listen on them because we
+ // do not want connections to just hang if recovery takes a very long time.
+ server->setupSockets();
- getGlobalServiceContext()->initializeGlobalStorageEngine();
-
- // Warn if we detect configurations for multiple registered storage engines in
- // the same configuration file/environment.
- if (serverGlobalParams.parsedOpts.hasField("storage")) {
- BSONElement storageElement = serverGlobalParams.parsedOpts.getField("storage");
- invariant(storageElement.isABSONObj());
- BSONObj storageParamsObj = storageElement.Obj();
- BSONObjIterator i = storageParamsObj.begin();
- while (i.more()) {
- BSONElement e = i.next();
- // Ignore if field name under "storage" matches current storage engine.
- if (storageGlobalParams.engine == e.fieldName()) {
- continue;
- }
+ std::shared_ptr<DbWebServer> dbWebServer;
+ if (serverGlobalParams.isHttpInterfaceEnabled) {
+ dbWebServer.reset(new DbWebServer(
+ serverGlobalParams.bind_ip, serverGlobalParams.port + 1000, new RestAdminAccess()));
+ dbWebServer->setupSockets();
+ }
- // Warn if field name matches non-active registered storage engine.
- if (getGlobalServiceContext()->isRegisteredStorageEngine(e.fieldName())) {
- warning() << "Detected configuration for non-active storage engine "
- << e.fieldName()
- << " when current storage engine is "
- << storageGlobalParams.engine;
- }
+ getGlobalServiceContext()->initializeGlobalStorageEngine();
+
+ // Warn if we detect configurations for multiple registered storage engines in
+ // the same configuration file/environment.
+ if (serverGlobalParams.parsedOpts.hasField("storage")) {
+ BSONElement storageElement = serverGlobalParams.parsedOpts.getField("storage");
+ invariant(storageElement.isABSONObj());
+ BSONObj storageParamsObj = storageElement.Obj();
+ BSONObjIterator i = storageParamsObj.begin();
+ while (i.more()) {
+ BSONElement e = i.next();
+ // Ignore if field name under "storage" matches current storage engine.
+ if (storageGlobalParams.engine == e.fieldName()) {
+ continue;
}
- }
- getGlobalServiceContext()->setOpObserver(stdx::make_unique<OpObserver>());
+ // Warn if field name matches non-active registered storage engine.
+ if (getGlobalServiceContext()->isRegisteredStorageEngine(e.fieldName())) {
+ warning() << "Detected configuration for non-active storage engine "
+ << e.fieldName() << " when current storage engine is "
+ << storageGlobalParams.engine;
+ }
+ }
+ }
- const repl::ReplSettings& replSettings =
- repl::getGlobalReplicationCoordinator()->getSettings();
+ getGlobalServiceContext()->setOpObserver(stdx::make_unique<OpObserver>());
- {
- ProcessId pid = ProcessId::getCurrent();
- LogstreamBuilder l = log(LogComponent::kControl);
- l << "MongoDB starting : pid=" << pid
- << " port=" << serverGlobalParams.port
- << " dbpath=" << storageGlobalParams.dbpath;
- if( replSettings.master ) l << " master=" << replSettings.master;
- if( replSettings.slave ) l << " slave=" << (int) replSettings.slave;
+ const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings();
- const bool is32bit = sizeof(int*) == 4;
- l << ( is32bit ? " 32" : " 64" ) << "-bit host=" << getHostNameCached() << endl;
- }
+ {
+ ProcessId pid = ProcessId::getCurrent();
+ LogstreamBuilder l = log(LogComponent::kControl);
+ l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port
+ << " dbpath=" << storageGlobalParams.dbpath;
+ if (replSettings.master)
+ l << " master=" << replSettings.master;
+ if (replSettings.slave)
+ l << " slave=" << (int)replSettings.slave;
+
+ const bool is32bit = sizeof(int*) == 4;
+ l << (is32bit ? " 32" : " 64") << "-bit host=" << getHostNameCached() << endl;
+ }
- DEV log(LogComponent::kControl) << "DEBUG build (which is slower)" << endl;
- logMongodStartupWarnings(storageGlobalParams);
+ DEV log(LogComponent::kControl) << "DEBUG build (which is slower)" << endl;
+ logMongodStartupWarnings(storageGlobalParams);
#if defined(_WIN32)
- printTargetMinOS();
+ printTargetMinOS();
#endif
- logProcessDetails();
-
- {
- stringstream ss;
- ss << endl;
- ss << "*********************************************************************" << endl;
- ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl;
- ss << " Create this directory or give existing directory in --dbpath." << endl;
- ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl;
- ss << "*********************************************************************" << endl;
- uassert(10296, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath));
- }
+ logProcessDetails();
- {
- stringstream ss;
- ss << "repairpath (" << storageGlobalParams.repairpath << ") does not exist";
- uassert(12590,
- ss.str().c_str(),
- boost::filesystem::exists(storageGlobalParams.repairpath));
- }
+ {
+ stringstream ss;
+ ss << endl;
+ ss << "*********************************************************************" << endl;
+ ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl;
+ ss << " Create this directory or give existing directory in --dbpath." << endl;
+ ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl;
+ ss << "*********************************************************************" << endl;
+ uassert(10296, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath));
+ }
- // TODO: This should go into a MONGO_INITIALIZER once we have figured out the correct
- // dependencies.
- if (snmpInit) {
- snmpInit();
- }
+ {
+ stringstream ss;
+ ss << "repairpath (" << storageGlobalParams.repairpath << ") does not exist";
+ uassert(12590, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.repairpath));
+ }
- boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/");
+ // TODO: This should go into a MONGO_INITIALIZER once we have figured out the correct
+ // dependencies.
+ if (snmpInit) {
+ snmpInit();
+ }
- if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalRecoverOnly)
- return;
+ boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/");
- if (mongodGlobalParams.scriptingEnabled) {
- ScriptEngine::setup();
- }
+ if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalRecoverOnly)
+ return;
- repairDatabasesAndCheckVersion();
+ if (mongodGlobalParams.scriptingEnabled) {
+ ScriptEngine::setup();
+ }
- if (storageGlobalParams.upgrade) {
- log() << "finished checking dbs" << endl;
- exitCleanly(EXIT_CLEAN);
- }
+ repairDatabasesAndCheckVersion();
- {
- OperationContextImpl txn;
- uassertStatusOK(getGlobalAuthorizationManager()->initialize(&txn));
- }
+ if (storageGlobalParams.upgrade) {
+ log() << "finished checking dbs" << endl;
+ exitCleanly(EXIT_CLEAN);
+ }
- /* this is for security on certain platforms (nonce generation) */
- srand((unsigned) (curTimeMicros64() ^ startupSrandTimer.micros()));
+ {
+ OperationContextImpl txn;
+ uassertStatusOK(getGlobalAuthorizationManager()->initialize(&txn));
+ }
- // The snapshot thread provides historical collection level and lock statistics for use
- // by the web interface. Only needed when HTTP is enabled.
- if (serverGlobalParams.isHttpInterfaceEnabled) {
- snapshotThread.go();
+ /* this is for security on certain platforms (nonce generation) */
+ srand((unsigned)(curTimeMicros64() ^ startupSrandTimer.micros()));
- invariant(dbWebServer);
- stdx::thread web(stdx::bind(&webServerListenThread, dbWebServer));
- web.detach();
- }
+ // The snapshot thread provides historical collection level and lock statistics for use
+ // by the web interface. Only needed when HTTP is enabled.
+ if (serverGlobalParams.isHttpInterfaceEnabled) {
+ snapshotThread.go();
- {
- OperationContextImpl txn;
+ invariant(dbWebServer);
+ stdx::thread web(stdx::bind(&webServerListenThread, dbWebServer));
+ web.detach();
+ }
+
+ {
+ OperationContextImpl txn;
#ifndef _WIN32
- mongo::signalForkSuccess();
+ mongo::signalForkSuccess();
#endif
- Status status = authindex::verifySystemIndexes(&txn);
- if (!status.isOK()) {
- log() << status.reason();
- exitCleanly(EXIT_NEED_UPGRADE);
- }
-
- // SERVER-14090: Verify that auth schema version is schemaVersion26Final.
- int foundSchemaVersion;
- status = getGlobalAuthorizationManager()->getAuthorizationVersion(
- &txn, &foundSchemaVersion);
- if (!status.isOK()) {
- log() << "Auth schema version is incompatible: "
- << "User and role management commands require auth data to have "
- << "at least schema version " << AuthorizationManager::schemaVersion26Final
- << " but startup could not verify schema version: " << status.toString()
- << endl;
- exitCleanly(EXIT_NEED_UPGRADE);
- }
- if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) {
- log() << "Auth schema version is incompatible: "
- << "User and role management commands require auth data to have "
- << "at least schema version " << AuthorizationManager::schemaVersion26Final
- << " but found " << foundSchemaVersion << ". In order to upgrade "
- << "the auth schema, first downgrade MongoDB binaries to version "
- << "2.6 and then run the authSchemaUpgrade command." << endl;
- exitCleanly(EXIT_NEED_UPGRADE);
- }
-
- getDeleter()->startWorkers();
-
- restartInProgressIndexesFromLastShutdown(&txn);
-
- repl::getGlobalReplicationCoordinator()->startReplication(&txn);
-
- const unsigned long long missingRepl = checkIfReplMissingFromCommandLine(&txn);
- if (missingRepl) {
- log() << startupWarningsLog;
- log() << "** WARNING: mongod started without --replSet yet " << missingRepl
- << " documents are present in local.system.replset" << startupWarningsLog;
- log() << "** Restart with --replSet unless you are doing maintenance and "
- << " no other clients are connected." << startupWarningsLog;
- log() << "** The TTL collection monitor will not start because of this."
- << startupWarningsLog;
- log() << "** ";
- log() << " For more info see http://dochub.mongodb.org/core/ttlcollections";
- log() << startupWarningsLog;
- }
- else {
- startTTLBackgroundJob();
- }
-
+ Status status = authindex::verifySystemIndexes(&txn);
+ if (!status.isOK()) {
+ log() << status.reason();
+ exitCleanly(EXIT_NEED_UPGRADE);
+ }
+
+ // SERVER-14090: Verify that auth schema version is schemaVersion26Final.
+ int foundSchemaVersion;
+ status =
+ getGlobalAuthorizationManager()->getAuthorizationVersion(&txn, &foundSchemaVersion);
+ if (!status.isOK()) {
+ log() << "Auth schema version is incompatible: "
+ << "User and role management commands require auth data to have "
+ << "at least schema version " << AuthorizationManager::schemaVersion26Final
+ << " but startup could not verify schema version: " << status.toString() << endl;
+ exitCleanly(EXIT_NEED_UPGRADE);
+ }
+ if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) {
+ log() << "Auth schema version is incompatible: "
+ << "User and role management commands require auth data to have "
+ << "at least schema version " << AuthorizationManager::schemaVersion26Final
+ << " but found " << foundSchemaVersion << ". In order to upgrade "
+ << "the auth schema, first downgrade MongoDB binaries to version "
+ << "2.6 and then run the authSchemaUpgrade command." << endl;
+ exitCleanly(EXIT_NEED_UPGRADE);
+ }
+
+ getDeleter()->startWorkers();
+
+ restartInProgressIndexesFromLastShutdown(&txn);
+
+ repl::getGlobalReplicationCoordinator()->startReplication(&txn);
+
+ const unsigned long long missingRepl = checkIfReplMissingFromCommandLine(&txn);
+ if (missingRepl) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: mongod started without --replSet yet " << missingRepl
+ << " documents are present in local.system.replset" << startupWarningsLog;
+ log() << "** Restart with --replSet unless you are doing maintenance and "
+ << " no other clients are connected." << startupWarningsLog;
+ log() << "** The TTL collection monitor will not start because of this."
+ << startupWarningsLog;
+ log() << "** ";
+ log() << " For more info see http://dochub.mongodb.org/core/ttlcollections";
+ log() << startupWarningsLog;
+ } else {
+ startTTLBackgroundJob();
}
+ }
- startClientCursorMonitor();
-
- PeriodicTask::startRunningPeriodicTasks();
+ startClientCursorMonitor();
- logStartup();
+ PeriodicTask::startRunningPeriodicTasks();
- // MessageServer::run will return when exit code closes its socket
- server->run();
- }
+ logStartup();
- ExitCode initAndListen(int listenPort) {
- try {
- _initAndListen(listenPort);
+ // MessageServer::run will return when exit code closes its socket
+ server->run();
+}
- return inShutdown() ? EXIT_CLEAN : EXIT_NET_ERROR;
- }
- catch ( DBException &e ) {
- log() << "exception in initAndListen: " << e.toString() << ", terminating" << endl;
- return EXIT_UNCAUGHT;
- }
- catch ( std::exception &e ) {
- log() << "exception in initAndListen std::exception: " << e.what() << ", terminating";
- return EXIT_UNCAUGHT;
- }
- catch ( int& n ) {
- log() << "exception in initAndListen int: " << n << ", terminating" << endl;
- return EXIT_UNCAUGHT;
- }
- catch(...) {
- log() << "exception in initAndListen, terminating" << endl;
- return EXIT_UNCAUGHT;
- }
+ExitCode initAndListen(int listenPort) {
+ try {
+ _initAndListen(listenPort);
+
+ return inShutdown() ? EXIT_CLEAN : EXIT_NET_ERROR;
+ } catch (DBException& e) {
+ log() << "exception in initAndListen: " << e.toString() << ", terminating" << endl;
+ return EXIT_UNCAUGHT;
+ } catch (std::exception& e) {
+ log() << "exception in initAndListen std::exception: " << e.what() << ", terminating";
+ return EXIT_UNCAUGHT;
+ } catch (int& n) {
+ log() << "exception in initAndListen int: " << n << ", terminating" << endl;
+ return EXIT_UNCAUGHT;
+ } catch (...) {
+ log() << "exception in initAndListen, terminating" << endl;
+ return EXIT_UNCAUGHT;
}
+}
#if defined(_WIN32)
- ExitCode initService() {
- ntservice::reportStatus( SERVICE_RUNNING );
- log() << "Service running" << endl;
- return initAndListen(serverGlobalParams.port);
- }
+ExitCode initService() {
+ ntservice::reportStatus(SERVICE_RUNNING);
+ log() << "Service running" << endl;
+ return initAndListen(serverGlobalParams.port);
+}
#endif
-} // namespace mongo
+} // namespace mongo
using namespace mongo;
@@ -677,7 +657,7 @@ static void startupConfigActions(const std::vector<std::string>& args) {
// and "dbppath" command. The "run" command is the same as just running mongod, so just
// falls through.
if (moe::startupOptionsParsed.count("command")) {
- vector<string> command = moe::startupOptionsParsed["command"].as< vector<string> >();
+ vector<string> command = moe::startupOptionsParsed["command"].as<vector<string>>();
if (command[0].compare("dbpath") == 0) {
cout << storageGlobalParams.dbpath << endl;
@@ -699,10 +679,10 @@ static void startupConfigActions(const std::vector<std::string>& args) {
#ifdef _WIN32
ntservice::configureService(initService,
- moe::startupOptionsParsed,
- defaultServiceStrings,
- std::vector<std::string>(),
- args);
+ moe::startupOptionsParsed,
+ defaultServiceStrings,
+ std::vector<std::string>(),
+ args);
#endif // _WIN32
#ifdef __linux__
@@ -710,21 +690,21 @@ static void startupConfigActions(const std::vector<std::string>& args) {
moe::startupOptionsParsed["shutdown"].as<bool>() == true) {
bool failed = false;
- string name = (boost::filesystem::path(storageGlobalParams.dbpath) / "mongod.lock").string();
- if ( !boost::filesystem::exists( name ) || boost::filesystem::file_size( name ) == 0 )
+ string name =
+ (boost::filesystem::path(storageGlobalParams.dbpath) / "mongod.lock").string();
+ if (!boost::filesystem::exists(name) || boost::filesystem::file_size(name) == 0)
failed = true;
pid_t pid;
string procPath;
- if (!failed){
+ if (!failed) {
try {
- std::ifstream f (name.c_str());
+ std::ifstream f(name.c_str());
f >> pid;
procPath = (str::stream() << "/proc/" << pid);
if (!boost::filesystem::exists(procPath))
failed = true;
- }
- catch (const std::exception& e){
+ } catch (const std::exception& e) {
cerr << "Error reading pid from lock file [" << name << "]: " << e.what() << endl;
failed = true;
}
@@ -754,7 +734,7 @@ static void startupConfigActions(const std::vector<std::string>& args) {
}
MONGO_INITIALIZER_WITH_PREREQUISITES(CreateReplicationManager, ("SetGlobalEnvironment"))
- (InitializerContext* context) {
+(InitializerContext* context) {
auto replCoord = stdx::make_unique<repl::ReplicationCoordinatorImpl>(
getGlobalReplSettings(),
new repl::ReplicationCoordinatorExternalStateImpl,
@@ -780,27 +760,21 @@ MONGO_INITIALIZER_GENERAL(setSSLManagerType,
#if defined(_WIN32)
namespace mongo {
- // the hook for mongoAbort
- extern void (*reportEventToSystem)(const char *msg);
- static void reportEventToSystemImpl(const char *msg) {
- static ::HANDLE hEventLog = RegisterEventSource( NULL, TEXT("mongod") );
- if( hEventLog ) {
- std::wstring s = toNativeString(msg);
- LPCTSTR txt = s.c_str();
- BOOL ok = ReportEvent(
- hEventLog, EVENTLOG_ERROR_TYPE,
- 0, 0, NULL,
- 1,
- 0,
- &txt,
- 0);
- wassert(ok);
- }
+// the hook for mongoAbort
+extern void (*reportEventToSystem)(const char* msg);
+static void reportEventToSystemImpl(const char* msg) {
+ static ::HANDLE hEventLog = RegisterEventSource(NULL, TEXT("mongod"));
+ if (hEventLog) {
+ std::wstring s = toNativeString(msg);
+ LPCTSTR txt = s.c_str();
+ BOOL ok = ReportEvent(hEventLog, EVENTLOG_ERROR_TYPE, 0, 0, NULL, 1, 0, &txt, 0);
+ wassert(ok);
}
-} // namespace mongo
+}
+} // namespace mongo
#endif // if defined(_WIN32)
-static int mongoDbMain(int argc, char* argv[], char **envp) {
+static int mongoDbMain(int argc, char* argv[], char** envp) {
static StaticObserver staticObserver;
#if defined(_WIN32)
@@ -815,8 +789,8 @@ static int mongoDbMain(int argc, char* argv[], char **envp) {
{
unsigned x = 0x12345678;
- unsigned char& b = (unsigned char&) x;
- if ( b != 0x78 ) {
+ unsigned char& b = (unsigned char&)x;
+ if (b != 0x78) {
mongo::log(LogComponent::kControl) << "big endian cpus not yet supported" << endl;
return 33;
}
diff --git a/src/mongo/db/db.h b/src/mongo/db/db.h
index 20385c1386a..5a9da9cafb6 100644
--- a/src/mongo/db/db.h
+++ b/src/mongo/db/db.h
@@ -38,9 +38,9 @@
namespace mongo {
namespace repl {
- class ReplSettings;
-} // namespace repl
+class ReplSettings;
+} // namespace repl
- extern void (*snmpInit)();
+extern void (*snmpInit)();
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index 8c190b96f38..f2b065532f1 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -38,177 +38,157 @@
namespace mongo {
- AutoGetDb::AutoGetDb(OperationContext* txn, StringData ns, LockMode mode)
- : _dbLock(txn->lockState(), ns, mode),
- _db(dbHolder().get(txn, ns)) {
-
- }
-
- AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* txn,
- StringData ns,
- LockMode mode)
- : _transaction(txn, MODE_IX),
- _dbLock(txn->lockState(), ns, mode),
- _db(dbHolder().get(txn, ns)) {
- invariant(mode == MODE_IX || mode == MODE_X);
- _justCreated = false;
- // If the database didn't exist, relock in MODE_X
- if (_db == NULL) {
- if (mode != MODE_X) {
- _dbLock.relockWithMode(MODE_X);
- }
- _db = dbHolder().openDb(txn, ns);
- _justCreated = true;
+AutoGetDb::AutoGetDb(OperationContext* txn, StringData ns, LockMode mode)
+ : _dbLock(txn->lockState(), ns, mode), _db(dbHolder().get(txn, ns)) {}
+
+AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* txn, StringData ns, LockMode mode)
+ : _transaction(txn, MODE_IX),
+ _dbLock(txn->lockState(), ns, mode),
+ _db(dbHolder().get(txn, ns)) {
+ invariant(mode == MODE_IX || mode == MODE_X);
+ _justCreated = false;
+ // If the database didn't exist, relock in MODE_X
+ if (_db == NULL) {
+ if (mode != MODE_X) {
+ _dbLock.relockWithMode(MODE_X);
}
+ _db = dbHolder().openDb(txn, ns);
+ _justCreated = true;
}
-
- AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* txn,
- const std::string& ns)
- : _txn(txn),
- _transaction(txn, MODE_IS),
- _db(_txn, nsToDatabaseSubstring(ns), MODE_IS),
- _collLock(_txn->lockState(), ns, MODE_IS),
- _coll(NULL) {
-
- _init(ns, nsToCollectionSubstring(ns));
- }
-
- AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* txn,
- const NamespaceString& nss)
- : _txn(txn),
- _transaction(txn, MODE_IS),
- _db(_txn, nss.db(), MODE_IS),
- _collLock(_txn->lockState(), nss.toString(), MODE_IS),
- _coll(NULL) {
-
- _init(nss.toString(), nss.coll());
- }
-
- void AutoGetCollectionForRead::_init(const std::string& ns, StringData coll) {
- massert(28535, "need a non-empty collection name", !coll.empty());
-
- // We have both the DB and collection locked, which the prerequisite to do a stable shard
- // version check.
- ensureShardVersionOKOrThrow(_txn->getClient(), ns);
-
- auto curOp = CurOp::get(_txn);
- stdx::lock_guard<Client> lk(*_txn->getClient());
+}
+
+AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* txn, const std::string& ns)
+ : _txn(txn),
+ _transaction(txn, MODE_IS),
+ _db(_txn, nsToDatabaseSubstring(ns), MODE_IS),
+ _collLock(_txn->lockState(), ns, MODE_IS),
+ _coll(NULL) {
+ _init(ns, nsToCollectionSubstring(ns));
+}
+
+AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* txn,
+ const NamespaceString& nss)
+ : _txn(txn),
+ _transaction(txn, MODE_IS),
+ _db(_txn, nss.db(), MODE_IS),
+ _collLock(_txn->lockState(), nss.toString(), MODE_IS),
+ _coll(NULL) {
+ _init(nss.toString(), nss.coll());
+}
+
+void AutoGetCollectionForRead::_init(const std::string& ns, StringData coll) {
+ massert(28535, "need a non-empty collection name", !coll.empty());
+
+ // We have both the DB and collection locked, which the prerequisite to do a stable shard
+ // version check.
+ ensureShardVersionOKOrThrow(_txn->getClient(), ns);
+
+ auto curOp = CurOp::get(_txn);
+ stdx::lock_guard<Client> lk(*_txn->getClient());
+ // TODO: OldClientContext legacy, needs to be removed
+ curOp->ensureStarted();
+ curOp->setNS_inlock(ns);
+
+ // At this point, we are locked in shared mode for the database by the DB lock in the
+ // constructor, so it is safe to load the DB pointer.
+ if (_db.getDb()) {
// TODO: OldClientContext legacy, needs to be removed
- curOp->ensureStarted();
- curOp->setNS_inlock(ns);
+ curOp->enter_inlock(ns.c_str(), _db.getDb()->getProfilingLevel());
- // At this point, we are locked in shared mode for the database by the DB lock in the
- // constructor, so it is safe to load the DB pointer.
- if (_db.getDb()) {
- // TODO: OldClientContext legacy, needs to be removed
- curOp->enter_inlock(ns.c_str(), _db.getDb()->getProfilingLevel());
-
- _coll = _db.getDb()->getCollection(ns);
- }
+ _coll = _db.getDb()->getCollection(ns);
}
+}
- AutoGetCollectionForRead::~AutoGetCollectionForRead() {
- // Report time spent in read lock
- auto currentOp = CurOp::get(_txn);
- Top::get(_txn->getClient()->getServiceContext()).record(
- currentOp->getNS(),
+AutoGetCollectionForRead::~AutoGetCollectionForRead() {
+ // Report time spent in read lock
+ auto currentOp = CurOp::get(_txn);
+ Top::get(_txn->getClient()->getServiceContext())
+ .record(currentOp->getNS(),
currentOp->getOp(),
-1, // "read locked"
_timer.micros(),
currentOp->isCommand());
+}
+
+
+OldClientContext::OldClientContext(OperationContext* txn, const std::string& ns, Database* db)
+ : _justCreated(false), _doVersion(true), _ns(ns), _db(db), _txn(txn) {}
+
+OldClientContext::OldClientContext(OperationContext* txn,
+ const std::string& ns,
+ Database* db,
+ bool justCreated)
+ : _justCreated(justCreated), _doVersion(true), _ns(ns), _db(db), _txn(txn) {
+ _finishInit();
+}
+
+OldClientContext::OldClientContext(OperationContext* txn,
+ const std::string& ns,
+ bool doVersion)
+ : _justCreated(false), // set for real in finishInit
+ _doVersion(doVersion),
+ _ns(ns),
+ _db(NULL),
+ _txn(txn) {
+ _finishInit();
+}
+
+void OldClientContext::_finishInit() {
+ _db = dbHolder().get(_txn, _ns);
+ if (_db) {
+ _justCreated = false;
+ } else {
+ invariant(_txn->lockState()->isDbLockedForMode(nsToDatabaseSubstring(_ns), MODE_X));
+ _db = dbHolder().openDb(_txn, _ns, &_justCreated);
+ invariant(_db);
}
-
- OldClientContext::OldClientContext(OperationContext* txn, const std::string& ns, Database * db)
- : _justCreated(false),
- _doVersion(true),
- _ns(ns),
- _db(db),
- _txn(txn) {
- }
-
- OldClientContext::OldClientContext(OperationContext* txn,
- const std::string& ns,
- Database* db,
- bool justCreated)
- : _justCreated(justCreated),
- _doVersion(true),
- _ns(ns),
- _db(db),
- _txn(txn) {
- _finishInit();
- }
-
- OldClientContext::OldClientContext(OperationContext* txn,
- const std::string& ns,
- bool doVersion)
- : _justCreated(false), // set for real in finishInit
- _doVersion(doVersion),
- _ns(ns),
- _db(NULL),
- _txn(txn) {
-
- _finishInit();
+ if (_doVersion) {
+ _checkNotStale();
}
- void OldClientContext::_finishInit() {
- _db = dbHolder().get(_txn, _ns);
- if (_db) {
- _justCreated = false;
- }
- else {
- invariant(_txn->lockState()->isDbLockedForMode(nsToDatabaseSubstring(_ns), MODE_X));
- _db = dbHolder().openDb(_txn, _ns, &_justCreated);
- invariant(_db);
- }
-
- if (_doVersion) {
- _checkNotStale();
- }
+ stdx::lock_guard<Client> lk(*_txn->getClient());
+ CurOp::get(_txn)->enter_inlock(_ns.c_str(), _db->getProfilingLevel());
+}
- stdx::lock_guard<Client> lk(*_txn->getClient());
- CurOp::get(_txn)->enter_inlock(_ns.c_str(), _db->getProfilingLevel());
- }
-
- void OldClientContext::_checkNotStale() const {
- switch (CurOp::get(_txn)->getOp()) {
- case dbGetMore: // getMore is special and should be handled elsewhere.
- case dbUpdate: // update & delete check shard version in instance.cpp, so don't check
- case dbDelete: // here as well.
+void OldClientContext::_checkNotStale() const {
+ switch (CurOp::get(_txn)->getOp()) {
+ case dbGetMore: // getMore is special and should be handled elsewhere.
+ case dbUpdate: // update & delete check shard version in instance.cpp, so don't check
+ case dbDelete: // here as well.
break;
default:
ensureShardVersionOKOrThrow(_txn->getClient(), _ns);
- }
}
+}
- OldClientContext::~OldClientContext() {
- // Lock must still be held
- invariant(_txn->lockState()->isLocked());
+OldClientContext::~OldClientContext() {
+ // Lock must still be held
+ invariant(_txn->lockState()->isLocked());
- auto currentOp = CurOp::get(_txn);
- Top::get(_txn->getClient()->getServiceContext()).record(
- currentOp->getNS(),
+ auto currentOp = CurOp::get(_txn);
+ Top::get(_txn->getClient()->getServiceContext())
+ .record(currentOp->getNS(),
currentOp->getOp(),
_txn->lockState()->isWriteLocked() ? 1 : -1,
_timer.micros(),
currentOp->isCommand());
+}
+
+
+OldClientWriteContext::OldClientWriteContext(OperationContext* opCtx, const std::string& ns)
+ : _txn(opCtx),
+ _nss(ns),
+ _autodb(opCtx, _nss.db(), MODE_IX),
+ _collk(opCtx->lockState(), ns, MODE_IX),
+ _c(opCtx, ns, _autodb.getDb(), _autodb.justCreated()) {
+ _collection = _c.db()->getCollection(ns);
+ if (!_collection && !_autodb.justCreated()) {
+ // relock database in MODE_X to allow collection creation
+ _collk.relockAsDatabaseExclusive(_autodb.lock());
+ Database* db = dbHolder().get(_txn, ns);
+ invariant(db == _c.db());
}
-
-
-
- OldClientWriteContext::OldClientWriteContext(OperationContext* opCtx, const std::string& ns)
- : _txn(opCtx),
- _nss(ns),
- _autodb(opCtx, _nss.db(), MODE_IX),
- _collk(opCtx->lockState(), ns, MODE_IX),
- _c(opCtx, ns, _autodb.getDb(), _autodb.justCreated()) {
- _collection = _c.db()->getCollection(ns);
- if (!_collection && !_autodb.justCreated()) {
- // relock database in MODE_X to allow collection creation
- _collk.relockAsDatabaseExclusive(_autodb.lock());
- Database* db = dbHolder().get(_txn, ns);
- invariant(db == _c.db());
- }
- }
+}
} // namespace mongo
diff --git a/src/mongo/db/db_raii.h b/src/mongo/db/db_raii.h
index 580389e9b1b..b37112cb267 100644
--- a/src/mongo/db/db_raii.h
+++ b/src/mongo/db/db_raii.h
@@ -39,164 +39,175 @@
namespace mongo {
- class Collection;
+class Collection;
- /**
- * RAII-style class, which acquires a lock on the specified database in the requested mode and
- * obtains a reference to the database. Used as a shortcut for calls to dbHolder().get().
- *
- * It is guaranteed that locks will be released when this object goes out of scope, therefore
- * the database reference returned by this class should not be retained.
- */
- class AutoGetDb {
- MONGO_DISALLOW_COPYING(AutoGetDb);
- public:
- AutoGetDb(OperationContext* txn, StringData ns, LockMode mode);
+/**
+ * RAII-style class, which acquires a lock on the specified database in the requested mode and
+ * obtains a reference to the database. Used as a shortcut for calls to dbHolder().get().
+ *
+ * It is guaranteed that locks will be released when this object goes out of scope, therefore
+ * the database reference returned by this class should not be retained.
+ */
+class AutoGetDb {
+ MONGO_DISALLOW_COPYING(AutoGetDb);
- Database* getDb() const {
- return _db;
- }
+public:
+ AutoGetDb(OperationContext* txn, StringData ns, LockMode mode);
- private:
- const Lock::DBLock _dbLock;
- Database* const _db;
- };
+ Database* getDb() const {
+ return _db;
+ }
- /**
- * RAII-style class, which acquires a lock on the specified database in the requested mode and
- * obtains a reference to the database, creating it was non-existing. Used as a shortcut for
- * calls to dbHolder().openDb(), taking care of locking details. The requested mode must be
- * MODE_IX or MODE_X. If the database needs to be created, the lock will automatically be
- * reacquired as MODE_X.
- *
- * It is guaranteed that locks will be released when this object goes out of scope, therefore
- * the database reference returned by this class should not be retained.
- */
- class AutoGetOrCreateDb {
- MONGO_DISALLOW_COPYING(AutoGetOrCreateDb);
- public:
- AutoGetOrCreateDb(OperationContext* txn, StringData ns, LockMode mode);
+private:
+ const Lock::DBLock _dbLock;
+ Database* const _db;
+};
+
+/**
+ * RAII-style class, which acquires a lock on the specified database in the requested mode and
+ * obtains a reference to the database, creating it was non-existing. Used as a shortcut for
+ * calls to dbHolder().openDb(), taking care of locking details. The requested mode must be
+ * MODE_IX or MODE_X. If the database needs to be created, the lock will automatically be
+ * reacquired as MODE_X.
+ *
+ * It is guaranteed that locks will be released when this object goes out of scope, therefore
+ * the database reference returned by this class should not be retained.
+ */
+class AutoGetOrCreateDb {
+ MONGO_DISALLOW_COPYING(AutoGetOrCreateDb);
+
+public:
+ AutoGetOrCreateDb(OperationContext* txn, StringData ns, LockMode mode);
+
+ Database* getDb() const {
+ return _db;
+ }
+
+ bool justCreated() const {
+ return _justCreated;
+ }
+
+ Lock::DBLock& lock() {
+ return _dbLock;
+ }
+
+private:
+ ScopedTransaction _transaction;
+ Lock::DBLock _dbLock; // not const, as we may need to relock for implicit create
+ Database* _db;
+ bool _justCreated;
+};
+
+/**
+ * RAII-style class, which would acquire the appropritate hierarchy of locks for obtaining
+ * a particular collection and would retrieve a reference to the collection.
+ *
+ * It is guaranteed that locks will be released when this object goes out of scope, therefore
+ * database and collection references returned by this class should not be retained.
+ */
+class AutoGetCollectionForRead {
+ MONGO_DISALLOW_COPYING(AutoGetCollectionForRead);
- Database* getDb() const {
- return _db;
- }
+public:
+ AutoGetCollectionForRead(OperationContext* txn, const std::string& ns);
+ AutoGetCollectionForRead(OperationContext* txn, const NamespaceString& nss);
+ ~AutoGetCollectionForRead();
- bool justCreated() const {
- return _justCreated;
- }
+ Database* getDb() const {
+ return _db.getDb();
+ }
- Lock::DBLock& lock() { return _dbLock; }
+ Collection* getCollection() const {
+ return _coll;
+ }
- private:
- ScopedTransaction _transaction;
- Lock::DBLock _dbLock; // not const, as we may need to relock for implicit create
- Database* _db;
- bool _justCreated;
- };
+private:
+ void _init(const std::string& ns, StringData coll);
+
+ const Timer _timer;
+ OperationContext* const _txn;
+ const ScopedTransaction _transaction;
+ const AutoGetDb _db;
+ const Lock::CollectionLock _collLock;
+
+ Collection* _coll;
+};
+
+/**
+ * Opens the database that we want to use and sets the appropriate namespace on the
+ * current operation.
+ */
+class OldClientContext {
+ MONGO_DISALLOW_COPYING(OldClientContext);
+
+public:
+ /** this is probably what you want */
+ OldClientContext(OperationContext* txn, const std::string& ns, bool doVersion = true);
/**
- * RAII-style class, which would acquire the appropritate hierarchy of locks for obtaining
- * a particular collection and would retrieve a reference to the collection.
- *
- * It is guaranteed that locks will be released when this object goes out of scope, therefore
- * database and collection references returned by this class should not be retained.
+ * Below still calls _finishInit, but assumes database has already been acquired
+ * or just created.
*/
- class AutoGetCollectionForRead {
- MONGO_DISALLOW_COPYING(AutoGetCollectionForRead);
- public:
- AutoGetCollectionForRead(OperationContext* txn, const std::string& ns);
- AutoGetCollectionForRead(OperationContext* txn, const NamespaceString& nss);
- ~AutoGetCollectionForRead();
-
- Database* getDb() const {
- return _db.getDb();
- }
-
- Collection* getCollection() const {
- return _coll;
- }
-
- private:
- void _init(const std::string& ns,
- StringData coll);
-
- const Timer _timer;
- OperationContext* const _txn;
- const ScopedTransaction _transaction;
- const AutoGetDb _db;
- const Lock::CollectionLock _collLock;
-
- Collection* _coll;
- };
+ OldClientContext(OperationContext* txn, const std::string& ns, Database* db, bool justCreated);
/**
- * Opens the database that we want to use and sets the appropriate namespace on the
- * current operation.
+ * note: this does not call _finishInit -- i.e., does not call
+ * ensureShardVersionOKOrThrow for example.
+ * see also: reset().
*/
- class OldClientContext {
- MONGO_DISALLOW_COPYING(OldClientContext);
- public:
- /** this is probably what you want */
- OldClientContext(OperationContext* txn, const std::string& ns, bool doVersion = true);
-
- /**
- * Below still calls _finishInit, but assumes database has already been acquired
- * or just created.
- */
- OldClientContext(OperationContext* txn,
- const std::string& ns,
- Database* db,
- bool justCreated);
-
- /**
- * note: this does not call _finishInit -- i.e., does not call
- * ensureShardVersionOKOrThrow for example.
- * see also: reset().
- */
- OldClientContext(OperationContext* txn, const std::string& ns, Database * db);
-
- ~OldClientContext();
-
- Database* db() const { return _db; }
- const char* ns() const { return _ns.c_str(); }
-
- /** @return if the db was created by this OldClientContext */
- bool justCreated() const { return _justCreated; }
-
- private:
- friend class CurOp;
- void _finishInit();
- void _checkNotStale() const;
-
- bool _justCreated;
- bool _doVersion;
- const std::string _ns;
- Database * _db;
- OperationContext* _txn;
-
- Timer _timer;
- };
-
-
- class OldClientWriteContext {
- MONGO_DISALLOW_COPYING(OldClientWriteContext);
- public:
- OldClientWriteContext(OperationContext* opCtx, const std::string& ns);
-
- Database* db() const { return _c.db(); }
-
- Collection* getCollection() const {
- return _c.db()->getCollection(_nss.ns());
- }
-
- private:
- OperationContext* const _txn;
- const NamespaceString _nss;
-
- AutoGetOrCreateDb _autodb;
- Lock::CollectionLock _collk;
- OldClientContext _c;
- Collection* _collection;
- };
+ OldClientContext(OperationContext* txn, const std::string& ns, Database* db);
+
+ ~OldClientContext();
+
+ Database* db() const {
+ return _db;
+ }
+ const char* ns() const {
+ return _ns.c_str();
+ }
+
+ /** @return if the db was created by this OldClientContext */
+ bool justCreated() const {
+ return _justCreated;
+ }
+
+private:
+ friend class CurOp;
+ void _finishInit();
+ void _checkNotStale() const;
+
+ bool _justCreated;
+ bool _doVersion;
+ const std::string _ns;
+ Database* _db;
+ OperationContext* _txn;
+
+ Timer _timer;
+};
+
+
+class OldClientWriteContext {
+ MONGO_DISALLOW_COPYING(OldClientWriteContext);
+
+public:
+ OldClientWriteContext(OperationContext* opCtx, const std::string& ns);
+
+ Database* db() const {
+ return _c.db();
+ }
+
+ Collection* getCollection() const {
+ return _c.db()->getCollection(_nss.ns());
+ }
+
+private:
+ OperationContext* const _txn;
+ const NamespaceString _nss;
+
+ AutoGetOrCreateDb _autodb;
+ Lock::CollectionLock _collk;
+ OldClientContext _c;
+ Collection* _collection;
+};
} // namespace mongo
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 8369de72ec8..935b4ec41f6 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -110,1224 +110,1219 @@
namespace mongo {
- using std::endl;
- using std::ostringstream;
- using std::string;
- using std::stringstream;
- using std::unique_ptr;
-
- class CmdShutdownMongoD : public CmdShutdown {
- public:
- virtual void help(stringstream& help) const {
- help << "shutdown the database. must be ran against admin db and "
- << "either (1) ran from localhost or (2) authenticated. If "
- << "this is a primary in a replica set and there is no member "
- << "within 10 seconds of its optime, it will not shutdown "
- << "without force : true. You can also specify timeoutSecs : "
- << "N to wait N seconds for other members to catch up.";
- }
+using std::endl;
+using std::ostringstream;
+using std::string;
+using std::stringstream;
+using std::unique_ptr;
+
+class CmdShutdownMongoD : public CmdShutdown {
+public:
+ virtual void help(stringstream& help) const {
+ help << "shutdown the database. must be ran against admin db and "
+ << "either (1) ran from localhost or (2) authenticated. If "
+ << "this is a primary in a replica set and there is no member "
+ << "within 10 seconds of its optime, it will not shutdown "
+ << "without force : true. You can also specify timeoutSecs : "
+ << "N to wait N seconds for other members to catch up.";
+ }
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- bool force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
-
- long long timeoutSecs = 0;
- if (cmdObj.hasField("timeoutSecs")) {
- timeoutSecs = cmdObj["timeoutSecs"].numberLong();
- }
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ bool force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
- Status status = repl::getGlobalReplicationCoordinator()->stepDown(
- txn,
- force,
- Seconds(timeoutSecs),
- Seconds(120));
- if (!status.isOK() && status.code() != ErrorCodes::NotMaster) { // ignore not master
- return appendCommandStatus(result, status);
- }
+ long long timeoutSecs = 0;
+ if (cmdObj.hasField("timeoutSecs")) {
+ timeoutSecs = cmdObj["timeoutSecs"].numberLong();
+ }
- // Never returns
- shutdownHelper();
- return true;
+ Status status = repl::getGlobalReplicationCoordinator()->stepDown(
+ txn, force, Seconds(timeoutSecs), Seconds(120));
+ if (!status.isOK() && status.code() != ErrorCodes::NotMaster) { // ignore not master
+ return appendCommandStatus(result, status);
}
- } cmdShutdownMongoD;
+ // Never returns
+ shutdownHelper();
+ return true;
+ }
- class CmdDropDatabase : public Command {
- public:
- virtual void help( stringstream& help ) const {
- help << "drop (delete) this database";
- }
- virtual bool slaveOk() const {
- return false;
- }
+} cmdShutdownMongoD;
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::dropDatabase);
- out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
- }
+class CmdDropDatabase : public Command {
+public:
+ virtual void help(stringstream& help) const {
+ help << "drop (delete) this database";
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
- virtual bool isWriteCommandForConfigServer() const { return true; }
-
- CmdDropDatabase() : Command("dropDatabase") {}
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- // disallow dropping the config database
- if (serverGlobalParams.configsvr && (dbname == "config")) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "Cannot drop 'config' database if mongod started "
- "with --configsvr"));
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dropDatabase);
+ out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
+ }
- if ((repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
- repl::ReplicationCoordinator::modeNone) &&
- (dbname == "local")) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "Cannot drop 'local' database while replication "
- "is active"));
- }
- BSONElement e = cmdObj.firstElement();
- int p = (int) e.number();
- if ( p != 1 ) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "have to pass 1 as db parameter"));
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- Status status = dropDatabase(txn, dbname);
- if (status == ErrorCodes::DatabaseNotFound) {
- return appendCommandStatus(result, Status::OK());
- }
- if (status.isOK()) {
- result.append( "dropped" , dbname );
- }
- return appendCommandStatus(result, status);
- }
+ CmdDropDatabase() : Command("dropDatabase") {}
- } cmdDropDatabase;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ // disallow dropping the config database
+ if (serverGlobalParams.configsvr && (dbname == "config")) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::IllegalOperation,
+ "Cannot drop 'config' database if mongod started "
+ "with --configsvr"));
+ }
- class CmdRepairDatabase : public Command {
- public:
- virtual bool slaveOk() const {
- return true;
+ if ((repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
+ repl::ReplicationCoordinator::modeNone) &&
+ (dbname == "local")) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::IllegalOperation,
+ "Cannot drop 'local' database while replication "
+ "is active"));
}
- virtual bool maintenanceMode() const { return true; }
- virtual void help( stringstream& help ) const {
- help << "repair database. also compacts. note: slow.";
+ BSONElement e = cmdObj.firstElement();
+ int p = (int)e.number();
+ if (p != 1) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::IllegalOperation, "have to pass 1 as db parameter"));
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::repairDatabase);
- out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
+ Status status = dropDatabase(txn, dbname);
+ if (status == ErrorCodes::DatabaseNotFound) {
+ return appendCommandStatus(result, Status::OK());
+ }
+ if (status.isOK()) {
+ result.append("dropped", dbname);
}
+ return appendCommandStatus(result, status);
+ }
- CmdRepairDatabase() : Command("repairDatabase") {
+} cmdDropDatabase;
- }
+class CmdRepairDatabase : public Command {
+public:
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool maintenanceMode() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "repair database. also compacts. note: slow.";
+ }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- BSONElement e = cmdObj.firstElement();
- if ( e.numberInt() != 1 ) {
- errmsg = "bad option";
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- // TODO: SERVER-4328 Don't lock globally
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- OldClientContext context(txn, dbname);
-
- log() << "repairDatabase " << dbname;
- BackgroundOperation::assertNoBgOpInProgForDb(dbname);
-
- e = cmdObj.getField( "preserveClonedFilesOnFailure" );
- bool preserveClonedFilesOnFailure = e.isBoolean() && e.boolean();
- e = cmdObj.getField( "backupOriginalFiles" );
- bool backupOriginalFiles = e.isBoolean() && e.boolean();
-
- StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
- Status status = repairDatabase(txn, engine, dbname, preserveClonedFilesOnFailure,
- backupOriginalFiles );
-
- // Open database before returning
- dbHolder().openDb(txn, dbname);
- return appendCommandStatus( result, status );
- }
- } cmdRepairDatabase;
-
- /* set db profiling level
- todo: how do we handle profiling information put in the db with replication?
- sensibly or not?
- */
- class CmdProfile : public Command {
- public:
- virtual bool slaveOk() const {
- return true;
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::repairDatabase);
+ out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
+ }
- virtual void help( stringstream& help ) const {
- help << "enable or disable performance profiling\n";
- help << "{ profile : <n> }\n";
- help << "0=off 1=log slow ops 2=log all\n";
- help << "-1 to get current values\n";
- help << "http://docs.mongodb.org/manual/reference/command/profile/#dbcmd.profile";
+ CmdRepairDatabase() : Command("repairDatabase") {}
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ BSONElement e = cmdObj.firstElement();
+ if (e.numberInt() != 1) {
+ errmsg = "bad option";
+ return false;
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
-
- if (cmdObj.firstElement().numberInt() == -1 && !cmdObj.hasField("slowms")) {
- // If you just want to get the current profiling level you can do so with just
- // read access to system.profile, even if you can't change the profiling level.
- if (authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(dbname,
- "system.profile")),
- ActionType::find)) {
- return Status::OK();
- }
- }
+ // TODO: SERVER-4328 Don't lock globally
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ OldClientContext context(txn, dbname);
- if (authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname), ActionType::enableProfiler)) {
- return Status::OK();
- }
+ log() << "repairDatabase " << dbname;
+ BackgroundOperation::assertNoBgOpInProgForDb(dbname);
- return Status(ErrorCodes::Unauthorized, "unauthorized");
- }
+ e = cmdObj.getField("preserveClonedFilesOnFailure");
+ bool preserveClonedFilesOnFailure = e.isBoolean() && e.boolean();
+ e = cmdObj.getField("backupOriginalFiles");
+ bool backupOriginalFiles = e.isBoolean() && e.boolean();
- CmdProfile() : Command("profile") {
+ StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
+ Status status =
+ repairDatabase(txn, engine, dbname, preserveClonedFilesOnFailure, backupOriginalFiles);
- }
+ // Open database before returning
+ dbHolder().openDb(txn, dbname);
+ return appendCommandStatus(result, status);
+ }
+} cmdRepairDatabase;
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- // Needs to be locked exclusively, because creates the system.profile collection
- // in the local database.
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
- OldClientContext ctx(txn, dbname);
-
- BSONElement e = cmdObj.firstElement();
- result.append("was", ctx.db()->getProfilingLevel());
- result.append("slowms", serverGlobalParams.slowMS);
-
- int p = (int) e.number();
- Status status = Status::OK();
-
- if (p == -1)
- status = Status::OK();
- else if ( p >= 0 && p <= 2 ) {
- status = ctx.db()->setProfilingLevel(txn, p);
- }
+/* set db profiling level
+ todo: how do we handle profiling information put in the db with replication?
+ sensibly or not?
+*/
+class CmdProfile : public Command {
+public:
+ virtual bool slaveOk() const {
+ return true;
+ }
- const BSONElement slow = cmdObj["slowms"];
- if (slow.isNumber()) {
- serverGlobalParams.slowMS = slow.numberInt();
- }
+ virtual void help(stringstream& help) const {
+ help << "enable or disable performance profiling\n";
+ help << "{ profile : <n> }\n";
+ help << "0=off 1=log slow ops 2=log all\n";
+ help << "-1 to get current values\n";
+ help << "http://docs.mongodb.org/manual/reference/command/profile/#dbcmd.profile";
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- if (!status.isOK()) {
- errmsg = status.reason();
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+
+ if (cmdObj.firstElement().numberInt() == -1 && !cmdObj.hasField("slowms")) {
+ // If you just want to get the current profiling level you can do so with just
+ // read access to system.profile, even if you can't change the profiling level.
+ if (authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(dbname, "system.profile")),
+ ActionType::find)) {
+ return Status::OK();
}
+ }
- return status.isOK();
+ if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
+ ActionType::enableProfiler)) {
+ return Status::OK();
}
- } cmdProfile;
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+ }
- class CmdDiagLogging : public Command {
- public:
- virtual bool slaveOk() const {
- return true;
- }
- CmdDiagLogging() : Command("diagLogging") { }
- bool adminOnly() const {
- return true;
- }
+ CmdProfile() : Command("profile") {}
- void help(stringstream& h) const { h << "http://dochub.mongodb.org/core/monitoring#MonitoringandDiagnostics-DatabaseRecord%2FReplay%28diagLoggingcommand%29"; }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ // Needs to be locked exclusively, because creates the system.profile collection
+ // in the local database.
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
+ OldClientContext ctx(txn, dbname);
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ BSONElement e = cmdObj.firstElement();
+ result.append("was", ctx.db()->getProfilingLevel());
+ result.append("slowms", serverGlobalParams.slowMS);
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::diagLogging);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
+ int p = (int)e.number();
+ Status status = Status::OK();
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const char* deprecationWarning =
- "CMD diagLogging is deprecated and will be removed in a future release";
- warning() << deprecationWarning << startupWarningsLog;
-
- // This doesn't look like it requires exclusive DB lock, because it uses its own diag
- // locking, but originally the lock was set to be WRITE, so preserving the behaviour.
- //
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
- OldClientContext ctx(txn, dbname);
-
- int was = _diaglog.setLevel( cmdObj.firstElement().numberInt() );
- _diaglog.flush();
- if (!serverGlobalParams.quiet) {
- LOG(0) << "CMD: diagLogging set to " << _diaglog.getLevel() << " from: " << was << endl;
- }
- result.append( "was" , was );
- result.append( "note", deprecationWarning );
- return true;
+ if (p == -1)
+ status = Status::OK();
+ else if (p >= 0 && p <= 2) {
+ status = ctx.db()->setProfilingLevel(txn, p);
}
- } cmddiaglogging;
- /* drop collection */
- class CmdDrop : public Command {
- public:
- CmdDrop() : Command("drop") { }
- virtual bool slaveOk() const {
- return false;
+ const BSONElement slow = cmdObj["slowms"];
+ if (slow.isNumber()) {
+ serverGlobalParams.slowMS = slow.numberInt();
}
- virtual bool adminOnly() const {
- return false;
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::dropCollection);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+
+ if (!status.isOK()) {
+ errmsg = status.reason();
}
- virtual void help( stringstream& help ) const { help << "drop a collection\n{drop : <collectionName>}"; }
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ return status.isOK();
+ }
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const std::string nsToDrop = parseNsCollectionRequired(dbname, cmdObj);
+} cmdProfile;
- if (nsToDrop.find('$') != string::npos) {
- errmsg = "can't drop collection with reserved $ character in name";
- return false;
- }
+class CmdDiagLogging : public Command {
+public:
+ virtual bool slaveOk() const {
+ return true;
+ }
+ CmdDiagLogging() : Command("diagLogging") {}
+ bool adminOnly() const {
+ return true;
+ }
- if ((repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
- repl::ReplicationCoordinator::modeNone) &&
- NamespaceString(nsToDrop).isOplog()) {
- errmsg = "can't drop live oplog while replicating";
- return false;
- }
+ void help(stringstream& h) const {
+ h << "http://dochub.mongodb.org/core/"
+ "monitoring#MonitoringandDiagnostics-DatabaseRecord%2FReplay%28diagLoggingcommand%29";
+ }
- return appendCommandStatus(result,
- dropCollection(txn, NamespaceString(nsToDrop), result));
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::diagLogging);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const char* deprecationWarning =
+ "CMD diagLogging is deprecated and will be removed in a future release";
+ warning() << deprecationWarning << startupWarningsLog;
+
+ // This doesn't look like it requires exclusive DB lock, because it uses its own diag
+ // locking, but originally the lock was set to be WRITE, so preserving the behaviour.
+ //
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
+ OldClientContext ctx(txn, dbname);
+
+ int was = _diaglog.setLevel(cmdObj.firstElement().numberInt());
+ _diaglog.flush();
+ if (!serverGlobalParams.quiet) {
+ LOG(0) << "CMD: diagLogging set to " << _diaglog.getLevel() << " from: " << was << endl;
+ }
+ result.append("was", was);
+ result.append("note", deprecationWarning);
+ return true;
+ }
+} cmddiaglogging;
+
+/* drop collection */
+class CmdDrop : public Command {
+public:
+ CmdDrop() : Command("drop") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dropCollection);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+ virtual void help(stringstream& help) const {
+ help << "drop a collection\n{drop : <collectionName>}";
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- } cmdDrop;
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string nsToDrop = parseNsCollectionRequired(dbname, cmdObj);
- /* create collection */
- class CmdCreate : public Command {
- public:
- CmdCreate() : Command("create") { }
- virtual bool slaveOk() const {
+ if (nsToDrop.find('$') != string::npos) {
+ errmsg = "can't drop collection with reserved $ character in name";
return false;
}
- virtual bool adminOnly() const {
+
+ if ((repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
+ repl::ReplicationCoordinator::modeNone) &&
+ NamespaceString(nsToDrop).isOplog()) {
+ errmsg = "can't drop live oplog while replicating";
return false;
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ return appendCommandStatus(result, dropCollection(txn, NamespaceString(nsToDrop), result));
+ }
- virtual void help( stringstream& help ) const {
- help << "create a collection explicitly\n"
- "{ create: <ns>[, capped: <bool>, size: <collSizeInBytes>, max: <nDocs>] }";
- }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- if (cmdObj["capped"].trueValue()) {
- if (!authzSession->isAuthorizedForActionsOnResource(
- parseResourcePattern(dbname, cmdObj), ActionType::convertToCapped)) {
- return Status(ErrorCodes::Unauthorized, "unauthorized");
- }
- }
+} cmdDrop;
- // ActionType::createCollection or ActionType::insert are both acceptable
- if (authzSession->isAuthorizedForActionsOnResource(
- parseResourcePattern(dbname, cmdObj), ActionType::createCollection) ||
- authzSession->isAuthorizedForActionsOnResource(
- parseResourcePattern(dbname, cmdObj), ActionType::insert)) {
- return Status::OK();
- }
+/* create collection */
+class CmdCreate : public Command {
+public:
+ CmdCreate() : Command("create") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- return Status(ErrorCodes::Unauthorized, "unauthorized");
+ virtual void help(stringstream& help) const {
+ help << "create a collection explicitly\n"
+ "{ create: <ns>[, capped: <bool>, size: <collSizeInBytes>, max: <nDocs>] }";
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ if (cmdObj["capped"].trueValue()) {
+ if (!authzSession->isAuthorizedForActionsOnResource(
+ parseResourcePattern(dbname, cmdObj), ActionType::convertToCapped)) {
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+ }
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- return appendCommandStatus(result,
- createCollection(txn, dbname, cmdObj));
+
+ // ActionType::createCollection or ActionType::insert are both acceptable
+ if (authzSession->isAuthorizedForActionsOnResource(parseResourcePattern(dbname, cmdObj),
+ ActionType::createCollection) ||
+ authzSession->isAuthorizedForActionsOnResource(parseResourcePattern(dbname, cmdObj),
+ ActionType::insert)) {
+ return Status::OK();
}
- } cmdCreate;
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+ }
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ return appendCommandStatus(result, createCollection(txn, dbname, cmdObj));
+ }
+} cmdCreate;
- class CmdFileMD5 : public Command {
- public:
- CmdFileMD5() : Command( "filemd5" ) {
- }
+class CmdFileMD5 : public Command {
+public:
+ CmdFileMD5() : Command("filemd5") {}
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual void help( stringstream& help ) const {
- help << " example: { filemd5 : ObjectId(aaaaaaa) , root : \"fs\" }";
- }
+ virtual void help(stringstream& help) const {
+ help << " example: { filemd5 : ObjectId(aaaaaaa) , root : \"fs\" }";
+ }
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- std::string collectionName = cmdObj.getStringField("root");
- if (collectionName.empty())
- collectionName = "fs";
- collectionName += ".chunks";
- return NamespaceString(dbname, collectionName).ns();
- }
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ std::string collectionName = cmdObj.getStringField("root");
+ if (collectionName.empty())
+ collectionName = "fs";
+ collectionName += ".chunks";
+ return NamespaceString(dbname, collectionName).ns();
+ }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), ActionType::find));
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), ActionType::find));
+ }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const std::string ns = parseNs(dbname, jsobj);
-
- md5digest d;
- md5_state_t st;
- md5_init(&st);
-
- int n = 0;
-
- bool partialOk = jsobj["partialOk"].trueValue();
- if (partialOk) {
- // WARNING: This code depends on the binary layout of md5_state. It will not be
- // compatible with different md5 libraries or work correctly in an environment with
- // mongod's of different endians. It is ok for mongos to be a different endian since
- // it just passes the buffer through to another mongod.
- BSONElement stateElem = jsobj["md5state"];
- if (!stateElem.eoo()){
- int len;
- const char* data = stateElem.binDataClean(len);
- massert(16247, "md5 state not correct size", len == sizeof(st));
- memcpy(&st, data, sizeof(st));
- }
- n = jsobj["startAt"].numberInt();
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string ns = parseNs(dbname, jsobj);
+
+ md5digest d;
+ md5_state_t st;
+ md5_init(&st);
+
+ int n = 0;
+
+ bool partialOk = jsobj["partialOk"].trueValue();
+ if (partialOk) {
+ // WARNING: This code depends on the binary layout of md5_state. It will not be
+ // compatible with different md5 libraries or work correctly in an environment with
+ // mongod's of different endians. It is ok for mongos to be a different endian since
+ // it just passes the buffer through to another mongod.
+ BSONElement stateElem = jsobj["md5state"];
+ if (!stateElem.eoo()) {
+ int len;
+ const char* data = stateElem.binDataClean(len);
+ massert(16247, "md5 state not correct size", len == sizeof(st));
+ memcpy(&st, data, sizeof(st));
}
+ n = jsobj["startAt"].numberInt();
+ }
- BSONObj query = BSON( "files_id" << jsobj["filemd5"] << "n" << GTE << n );
- BSONObj sort = BSON( "files_id" << 1 << "n" << 1 );
+ BSONObj query = BSON("files_id" << jsobj["filemd5"] << "n" << GTE << n);
+ BSONObj sort = BSON("files_id" << 1 << "n" << 1);
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- CanonicalQuery* cq;
- if (!CanonicalQuery::canonicalize(ns, query, sort, BSONObj(), &cq).isOK()) {
- uasserted(17240, "Can't canonicalize query " + query.toString());
- return 0;
- }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ CanonicalQuery* cq;
+ if (!CanonicalQuery::canonicalize(ns, query, sort, BSONObj(), &cq).isOK()) {
+ uasserted(17240, "Can't canonicalize query " + query.toString());
+ return 0;
+ }
- // Check shard version at startup.
- // This will throw before we've done any work if shard version is outdated
- // We drop and re-acquire these locks every document because md5'ing is expensive
- unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(txn, ns));
- Collection* coll = ctx->getCollection();
-
- PlanExecutor* rawExec;
- if (!getExecutor(txn, coll, cq, PlanExecutor::YIELD_MANUAL, &rawExec,
- QueryPlannerParams::NO_TABLE_SCAN).isOK()) {
- uasserted(17241, "Can't get executor for query " + query.toString());
- return 0;
- }
+ // Check shard version at startup.
+ // This will throw before we've done any work if shard version is outdated
+ // We drop and re-acquire these locks every document because md5'ing is expensive
+ unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(txn, ns));
+ Collection* coll = ctx->getCollection();
+
+ PlanExecutor* rawExec;
+ if (!getExecutor(txn,
+ coll,
+ cq,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec,
+ QueryPlannerParams::NO_TABLE_SCAN).isOK()) {
+ uasserted(17241, "Can't get executor for query " + query.toString());
+ return 0;
+ }
- unique_ptr<PlanExecutor> exec(rawExec);
- // Process notifications when the lock is released/reacquired in the loop below
- exec->registerExec();
-
- BSONObj obj;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- BSONElement ne = obj["n"];
- verify(ne.isNumber());
- int myn = ne.numberInt();
- if ( n != myn ) {
- if (partialOk) {
- break; // skipped chunk is probably on another shard
- }
- log() << "should have chunk: " << n << " have:" << myn << endl;
- dumpChunks(txn, ns, query, sort);
- uassert( 10040 , "chunks out of order" , n == myn );
- }
+ unique_ptr<PlanExecutor> exec(rawExec);
+ // Process notifications when the lock is released/reacquired in the loop below
+ exec->registerExec();
- // make a copy of obj since we access data in it while yielding locks
- BSONObj owned = obj.getOwned();
- exec->saveState();
- // UNLOCKED
- ctx.reset();
-
- int len;
- const char * data = owned["data"].binDataClean( len );
- // This is potentially an expensive operation, so do it out of the lock
- md5_append( &st , (const md5_byte_t*)(data) , len );
- n++;
-
- try {
- // RELOCKED
- ctx.reset(new AutoGetCollectionForRead(txn, ns));
- }
- catch (const SendStaleConfigException& ex) {
- LOG(1) << "chunk metadata changed during filemd5, will retarget and continue";
- break;
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ BSONElement ne = obj["n"];
+ verify(ne.isNumber());
+ int myn = ne.numberInt();
+ if (n != myn) {
+ if (partialOk) {
+ break; // skipped chunk is probably on another shard
}
+ log() << "should have chunk: " << n << " have:" << myn << endl;
+ dumpChunks(txn, ns, query, sort);
+ uassert(10040, "chunks out of order", n == myn);
+ }
+
+ // make a copy of obj since we access data in it while yielding locks
+ BSONObj owned = obj.getOwned();
+ exec->saveState();
+ // UNLOCKED
+ ctx.reset();
+
+ int len;
+ const char* data = owned["data"].binDataClean(len);
+ // This is potentially an expensive operation, so do it out of the lock
+ md5_append(&st, (const md5_byte_t*)(data), len);
+ n++;
+
+ try {
+ // RELOCKED
+ ctx.reset(new AutoGetCollectionForRead(txn, ns));
+ } catch (const SendStaleConfigException& ex) {
+ LOG(1) << "chunk metadata changed during filemd5, will retarget and continue";
+ break;
+ }
- // Have the lock again. See if we were killed.
- if (!exec->restoreState(txn)) {
- if (!partialOk) {
- uasserted(13281, "File deleted during filemd5 command");
- }
+ // Have the lock again. See if we were killed.
+ if (!exec->restoreState(txn)) {
+ if (!partialOk) {
+ uasserted(13281, "File deleted during filemd5 command");
}
}
+ }
- if (partialOk)
- result.appendBinData("md5state", sizeof(st), BinDataGeneral, &st);
+ if (partialOk)
+ result.appendBinData("md5state", sizeof(st), BinDataGeneral, &st);
- // This must be *after* the capture of md5state since it mutates st
- md5_finish(&st, d);
+ // This must be *after* the capture of md5state since it mutates st
+ md5_finish(&st, d);
- result.append( "numChunks" , n );
- result.append( "md5" , digestToString( d ) );
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "filemd5", dbname);
- return true;
+ result.append("numChunks", n);
+ result.append("md5", digestToString(d));
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "filemd5", dbname);
+ return true;
+ }
- void dumpChunks(OperationContext* txn,
- const string& ns,
- const BSONObj& query,
- const BSONObj& sort) {
- DBDirectClient client(txn);
- Query q(query);
- q.sort(sort);
- unique_ptr<DBClientCursor> c = client.query(ns, q);
- while(c->more())
- PRINT(c->nextSafe());
- }
- } cmdFileMD5;
+ void dumpChunks(OperationContext* txn,
+ const string& ns,
+ const BSONObj& query,
+ const BSONObj& sort) {
+ DBDirectClient client(txn);
+ Query q(query);
+ q.sort(sort);
+ unique_ptr<DBClientCursor> c = client.query(ns, q);
+ while (c->more())
+ PRINT(c->nextSafe());
+ }
+} cmdFileMD5;
- class CmdDatasize : public Command {
- virtual string parseNs(const string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
- }
- public:
- CmdDatasize() : Command( "dataSize", false, "datasize" ) {
+class CmdDatasize : public Command {
+ virtual string parseNs(const string& dbname, const BSONObj& cmdObj) const {
+ return parseNsFullyQualified(dbname, cmdObj);
+ }
- }
+public:
+ CmdDatasize() : Command("dataSize", false, "datasize") {}
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream &help ) const {
- help <<
- "determine data size for a set of data in a certain range"
- "\nexample: { dataSize:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }"
- "\nmin and max parameters are optional. They must either both be included or both omitted"
- "\nkeyPattern is an optional parameter indicating an index pattern that would be useful"
- "for iterating over the min/max bounds. If keyPattern is omitted, it is inferred from "
- "the structure of min. "
- "\nnote: This command may take a while to run";
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "determine data size for a set of data in a certain range"
+ "\nexample: { dataSize:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }"
+ "\nmin and max parameters are optional. They must either both be included or both "
+ "omitted"
+ "\nkeyPattern is an optional parameter indicating an index pattern that would be "
+ "useful"
+ "for iterating over the min/max bounds. If keyPattern is omitted, it is inferred "
+ "from "
+ "the structure of min. "
+ "\nnote: This command may take a while to run";
+ }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Timer timer;
-
- string ns = jsobj.firstElement().String();
- BSONObj min = jsobj.getObjectField( "min" );
- BSONObj max = jsobj.getObjectField( "max" );
- BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
- bool estimate = jsobj["estimate"].trueValue();
-
- AutoGetCollectionForRead ctx(txn, ns);
-
- Collection* collection = ctx.getCollection();
-
- if ( !collection || collection->numRecords(txn) == 0 ) {
- result.appendNumber( "size" , 0 );
- result.appendNumber( "numObjects" , 0 );
- result.append( "millis" , timer.millis() );
- return true;
- }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Timer timer;
- result.appendBool( "estimate" , estimate );
+ string ns = jsobj.firstElement().String();
+ BSONObj min = jsobj.getObjectField("min");
+ BSONObj max = jsobj.getObjectField("max");
+ BSONObj keyPattern = jsobj.getObjectField("keyPattern");
+ bool estimate = jsobj["estimate"].trueValue();
- unique_ptr<PlanExecutor> exec;
- if ( min.isEmpty() && max.isEmpty() ) {
- if ( estimate ) {
- result.appendNumber( "size" , static_cast<long long>(collection->dataSize(txn)) );
- result.appendNumber( "numObjects",
- static_cast<long long>( collection->numRecords(txn) ) );
- result.append( "millis" , timer.millis() );
- return 1;
- }
- exec.reset(InternalPlanner::collectionScan(txn, ns,collection));
- }
- else if ( min.isEmpty() || max.isEmpty() ) {
- errmsg = "only one of min or max specified";
- return false;
- }
- else {
+ AutoGetCollectionForRead ctx(txn, ns);
- if ( keyPattern.isEmpty() ){
- // if keyPattern not provided, try to infer it from the fields in 'min'
- keyPattern = Helpers::inferKeyPattern( min );
- }
+ Collection* collection = ctx.getCollection();
- IndexDescriptor* idx = collection->getIndexCatalog()->findShardKeyPrefixedIndex(
- txn,
- keyPattern,
- true ); // requireSingleKey
+ if (!collection || collection->numRecords(txn) == 0) {
+ result.appendNumber("size", 0);
+ result.appendNumber("numObjects", 0);
+ result.append("millis", timer.millis());
+ return true;
+ }
- if ( idx == NULL ) {
- errmsg = "couldn't find valid index containing key pattern";
- return false;
- }
- // If both min and max non-empty, append MinKey's to make them fit chosen index
- KeyPattern kp( idx->keyPattern() );
- min = Helpers::toKeyFormat( kp.extendRangeBound( min, false ) );
- max = Helpers::toKeyFormat( kp.extendRangeBound( max, false ) );
+ result.appendBool("estimate", estimate);
- exec.reset(InternalPlanner::indexScan(txn, collection, idx, min, max, false));
+ unique_ptr<PlanExecutor> exec;
+ if (min.isEmpty() && max.isEmpty()) {
+ if (estimate) {
+ result.appendNumber("size", static_cast<long long>(collection->dataSize(txn)));
+ result.appendNumber("numObjects",
+ static_cast<long long>(collection->numRecords(txn)));
+ result.append("millis", timer.millis());
+ return 1;
+ }
+ exec.reset(InternalPlanner::collectionScan(txn, ns, collection));
+ } else if (min.isEmpty() || max.isEmpty()) {
+ errmsg = "only one of min or max specified";
+ return false;
+ } else {
+ if (keyPattern.isEmpty()) {
+ // if keyPattern not provided, try to infer it from the fields in 'min'
+ keyPattern = Helpers::inferKeyPattern(min);
}
- long long avgObjSize = collection->dataSize(txn) / collection->numRecords(txn);
+ IndexDescriptor* idx =
+ collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn,
+ keyPattern,
+ true); // requireSingleKey
- long long maxSize = jsobj["maxSize"].numberLong();
- long long maxObjects = jsobj["maxObjects"].numberLong();
+ if (idx == NULL) {
+ errmsg = "couldn't find valid index containing key pattern";
+ return false;
+ }
+ // If both min and max non-empty, append MinKey's to make them fit chosen index
+ KeyPattern kp(idx->keyPattern());
+ min = Helpers::toKeyFormat(kp.extendRangeBound(min, false));
+ max = Helpers::toKeyFormat(kp.extendRangeBound(max, false));
- long long size = 0;
- long long numObjects = 0;
+ exec.reset(InternalPlanner::indexScan(txn, collection, idx, min, max, false));
+ }
- RecordId loc;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
- if ( estimate )
- size += avgObjSize;
- else
- size += collection->getRecordStore()->dataFor(txn, loc).size();
+ long long avgObjSize = collection->dataSize(txn) / collection->numRecords(txn);
- numObjects++;
+ long long maxSize = jsobj["maxSize"].numberLong();
+ long long maxObjects = jsobj["maxObjects"].numberLong();
- if ( ( maxSize && size > maxSize ) ||
- ( maxObjects && numObjects > maxObjects ) ) {
- result.appendBool( "maxReached" , true );
- break;
- }
- }
+ long long size = 0;
+ long long numObjects = 0;
- if (PlanExecutor::IS_EOF != state) {
- warning() << "Internal error while reading " << ns << endl;
- }
+ RecordId loc;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ if (estimate)
+ size += avgObjSize;
+ else
+ size += collection->getRecordStore()->dataFor(txn, loc).size();
- ostringstream os;
- os << "Finding size for ns: " << ns;
- if ( ! min.isEmpty() ) {
- os << " between " << min << " and " << max;
- }
- logIfSlow( timer , os.str() );
+ numObjects++;
- result.appendNumber( "size", size );
- result.appendNumber( "numObjects" , numObjects );
- result.append( "millis" , timer.millis() );
- return true;
+ if ((maxSize && size > maxSize) || (maxObjects && numObjects > maxObjects)) {
+ result.appendBool("maxReached", true);
+ break;
+ }
}
- } cmdDatasize;
-
- class CollectionStats : public Command {
- public:
- CollectionStats() : Command( "collStats", false, "collstats" ) {
-
+ if (PlanExecutor::IS_EOF != state) {
+ warning() << "Internal error while reading " << ns << endl;
}
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream &help ) const {
- help << "{ collStats:\"blog.posts\" , scale : 1 } scale divides sizes e.g. for KB use 1024\n"
- " avgObjSize - in bytes";
+ ostringstream os;
+ os << "Finding size for ns: " << ns;
+ if (!min.isEmpty()) {
+ os << " between " << min << " and " << max;
}
+ logIfSlow(timer, os.str());
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::collStats);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
+ result.appendNumber("size", size);
+ result.appendNumber("numObjects", numObjects);
+ result.append("millis", timer.millis());
+ return true;
+ }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- int scale = 1;
- if ( jsobj["scale"].isNumber() ) {
- scale = jsobj["scale"].numberInt();
- if ( scale <= 0 ) {
- errmsg = "scale has to be >= 1";
- return false;
- }
- }
- else if ( jsobj["scale"].trueValue() ) {
- errmsg = "scale has to be a number >= 1";
- return false;
- }
+} cmdDatasize;
- bool verbose = jsobj["verbose"].trueValue();
+class CollectionStats : public Command {
+public:
+ CollectionStats() : Command("collStats", false, "collstats") {}
- const NamespaceString nss(parseNs(dbname, jsobj));
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help
+ << "{ collStats:\"blog.posts\" , scale : 1 } scale divides sizes e.g. for KB use 1024\n"
+ " avgObjSize - in bytes";
+ }
- if (nss.coll().empty()) {
- errmsg = "No collection name specified";
- return false;
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::collStats);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
- AutoGetCollectionForRead ctx(txn, nss);
- if (!ctx.getDb()) {
- errmsg = "Database [" + nss.db().toString() + "] not found.";
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ int scale = 1;
+ if (jsobj["scale"].isNumber()) {
+ scale = jsobj["scale"].numberInt();
+ if (scale <= 0) {
+ errmsg = "scale has to be >= 1";
return false;
}
+ } else if (jsobj["scale"].trueValue()) {
+ errmsg = "scale has to be a number >= 1";
+ return false;
+ }
- Collection* collection = ctx.getCollection();
- if (!collection) {
- errmsg = "Collection [" + nss.toString() + "] not found.";
- return false;
- }
+ bool verbose = jsobj["verbose"].trueValue();
- result.append( "ns" , nss );
+ const NamespaceString nss(parseNs(dbname, jsobj));
- long long size = collection->dataSize(txn) / scale;
- long long numRecords = collection->numRecords(txn);
- result.appendNumber( "count" , numRecords );
- result.appendNumber( "size" , size );
- if( numRecords )
- result.append( "avgObjSize" , collection->averageObjectSize(txn) );
+ if (nss.coll().empty()) {
+ errmsg = "No collection name specified";
+ return false;
+ }
- result.appendNumber("storageSize",
- static_cast<long long>(collection->getRecordStore()
- ->storageSize(txn,
- &result,
- verbose ? 1 : 0)) / scale);
+ AutoGetCollectionForRead ctx(txn, nss);
+ if (!ctx.getDb()) {
+ errmsg = "Database [" + nss.db().toString() + "] not found.";
+ return false;
+ }
- collection->getRecordStore()->appendCustomStats( txn, &result, scale );
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ errmsg = "Collection [" + nss.toString() + "] not found.";
+ return false;
+ }
- IndexCatalog* indexCatalog = collection->getIndexCatalog();
- result.append( "nindexes" , indexCatalog->numIndexesReady( txn ) );
+ result.append("ns", nss);
- // indexes
- BSONObjBuilder indexDetails;
+ long long size = collection->dataSize(txn) / scale;
+ long long numRecords = collection->numRecords(txn);
+ result.appendNumber("count", numRecords);
+ result.appendNumber("size", size);
+ if (numRecords)
+ result.append("avgObjSize", collection->averageObjectSize(txn));
- IndexCatalog::IndexIterator i = indexCatalog->getIndexIterator(txn, false);
- while (i.more()) {
- const IndexDescriptor* descriptor = i.next();
- IndexAccessMethod* iam = indexCatalog->getIndex(descriptor);
- invariant(iam);
+ result.appendNumber("storageSize",
+ static_cast<long long>(collection->getRecordStore()->storageSize(
+ txn, &result, verbose ? 1 : 0)) /
+ scale);
- BSONObjBuilder bob;
- if (iam->appendCustomStats(txn, &bob, scale)) {
- indexDetails.append(descriptor->indexName(), bob.obj());
- }
- }
+ collection->getRecordStore()->appendCustomStats(txn, &result, scale);
- result.append("indexDetails", indexDetails.done());
+ IndexCatalog* indexCatalog = collection->getIndexCatalog();
+ result.append("nindexes", indexCatalog->numIndexesReady(txn));
- BSONObjBuilder indexSizes;
- long long indexSize = collection->getIndexSize(txn, &indexSizes, scale);
+ // indexes
+ BSONObjBuilder indexDetails;
- result.appendNumber("totalIndexSize", indexSize / scale);
- result.append("indexSizes", indexSizes.obj());
+ IndexCatalog::IndexIterator i = indexCatalog->getIndexIterator(txn, false);
+ while (i.more()) {
+ const IndexDescriptor* descriptor = i.next();
+ IndexAccessMethod* iam = indexCatalog->getIndex(descriptor);
+ invariant(iam);
- return true;
+ BSONObjBuilder bob;
+ if (iam->appendCustomStats(txn, &bob, scale)) {
+ indexDetails.append(descriptor->indexName(), bob.obj());
+ }
}
- } cmdCollectionStats;
+ result.append("indexDetails", indexDetails.done());
- class CollectionModCommand : public Command {
- public:
- CollectionModCommand() : Command( "collMod" ) {
+ BSONObjBuilder indexSizes;
+ long long indexSize = collection->getIndexSize(txn, &indexSizes, scale);
- }
+ result.appendNumber("totalIndexSize", indexSize / scale);
+ result.append("indexSizes", indexSizes.obj());
+
+ return true;
+ }
+
+} cmdCollectionStats;
+
+class CollectionModCommand : public Command {
+public:
+ CollectionModCommand() : Command("collMod") {}
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual void help( stringstream &help ) const {
- help <<
- "Sets collection options.\n"
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "Sets collection options.\n"
"Example: { collMod: 'foo', usePowerOf2Sizes:true }\n"
"Example: { collMod: 'foo', index: {keyPattern: {a: 1}, expireAfterSeconds: 600} }";
- }
+ }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::collMod);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::collMod);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const std::string ns = parseNsCollectionRequired(dbname, jsobj);
- return appendCommandStatus(result,
- collMod(txn, NamespaceString(ns), jsobj, &result));
- }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string ns = parseNsCollectionRequired(dbname, jsobj);
+ return appendCommandStatus(result, collMod(txn, NamespaceString(ns), jsobj, &result));
+ }
- } collectionModCommand;
+} collectionModCommand;
- class DBStats : public Command {
- public:
- DBStats() : Command( "dbStats", false, "dbstats" ) {
+class DBStats : public Command {
+public:
+ DBStats() : Command("dbStats", false, "dbstats") {}
- }
-
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream &help ) const {
- help <<
- "Get stats on a database. Not instantaneous. Slower for databases with large "
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "Get stats on a database. Not instantaneous. Slower for databases with large "
".ns files.\n"
"Example: { dbStats:1, scale:1 }";
- }
+ }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::dbStats);
- out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dbStats);
+ out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
+ }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- int scale = 1;
- if ( jsobj["scale"].isNumber() ) {
- scale = jsobj["scale"].numberInt();
- if ( scale <= 0 ) {
- errmsg = "scale has to be > 0";
- return false;
- }
- }
- else if ( jsobj["scale"].trueValue() ) {
- errmsg = "scale has to be a number > 0";
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ int scale = 1;
+ if (jsobj["scale"].isNumber()) {
+ scale = jsobj["scale"].numberInt();
+ if (scale <= 0) {
+ errmsg = "scale has to be > 0";
return false;
}
+ } else if (jsobj["scale"].trueValue()) {
+ errmsg = "scale has to be a number > 0";
+ return false;
+ }
- const string ns = parseNs(dbname, jsobj);
+ const string ns = parseNs(dbname, jsobj);
- // TODO: OldClientContext legacy, needs to be removed
- CurOp::get(txn)->ensureStarted();
+ // TODO: OldClientContext legacy, needs to be removed
+ CurOp::get(txn)->ensureStarted();
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ CurOp::get(txn)->setNS_inlock(dbname);
+ }
+
+ // We lock the entire database in S-mode in order to ensure that the contents will not
+ // change for the stats snapshot. This might be unnecessary and if it becomes a
+ // performance issue, we can take IS lock and then lock collection-by-collection.
+ ScopedTransaction scopedXact(txn, MODE_IS);
+ AutoGetDb autoDb(txn, ns, MODE_S);
+
+ result.append("db", ns);
+
+ Database* db = autoDb.getDb();
+ if (!db) {
+ // TODO: This preserves old behaviour where we used to create an empty database
+ // metadata even when the database is accessed for read. Without this several
+ // unit-tests will fail, which are fairly easy to fix. If backwards compatibility
+ // is not needed for the missing DB case, we can just do the same that's done in
+ // CollectionStats.
+ result.appendNumber("collections", 0);
+ result.appendNumber("objects", 0);
+ result.append("avgObjSize", 0);
+ result.appendNumber("dataSize", 0);
+ result.appendNumber("storageSize", 0);
+ result.appendNumber("numExtents", 0);
+ result.appendNumber("indexes", 0);
+ result.appendNumber("indexSize", 0);
+ result.appendNumber("fileSize", 0);
+ } else {
{
stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setNS_inlock(dbname);
- }
-
- // We lock the entire database in S-mode in order to ensure that the contents will not
- // change for the stats snapshot. This might be unnecessary and if it becomes a
- // performance issue, we can take IS lock and then lock collection-by-collection.
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, ns, MODE_S);
-
- result.append("db", ns);
-
- Database* db = autoDb.getDb();
- if (!db) {
- // TODO: This preserves old behaviour where we used to create an empty database
- // metadata even when the database is accessed for read. Without this several
- // unit-tests will fail, which are fairly easy to fix. If backwards compatibility
- // is not needed for the missing DB case, we can just do the same that's done in
- // CollectionStats.
- result.appendNumber("collections", 0);
- result.appendNumber("objects", 0);
- result.append("avgObjSize", 0);
- result.appendNumber("dataSize", 0);
- result.appendNumber("storageSize", 0);
- result.appendNumber("numExtents", 0);
- result.appendNumber("indexes", 0);
- result.appendNumber("indexSize", 0);
- result.appendNumber("fileSize", 0);
+ // TODO: OldClientContext legacy, needs to be removed
+ CurOp::get(txn)->enter_inlock(dbname.c_str(), db->getProfilingLevel());
}
- else {
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- // TODO: OldClientContext legacy, needs to be removed
- CurOp::get(txn)->enter_inlock(dbname.c_str(), db->getProfilingLevel());
- }
- db->getStats(txn, &result, scale);
- }
-
- return true;
+ db->getStats(txn, &result, scale);
}
- } cmdDBStats;
-
- /* Returns client's uri */
- class CmdWhatsMyUri : public Command {
- public:
- CmdWhatsMyUri() : Command("whatsmyuri") { }
- virtual bool slaveOk() const {
- return true;
- }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream &help ) const {
- help << "{whatsmyuri:1}";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- result << "you" << txn->getClient()->clientAddress(true /*includePort*/);
- return true;
- }
- } cmdWhatsMyUri;
+ return true;
+ }
- class AvailableQueryOptions: public Command {
- public:
- AvailableQueryOptions(): Command("availableQueryOptions",
- false,
- "availablequeryoptions") {
- }
+} cmdDBStats;
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return Status::OK();
- }
+/* Returns client's uri */
+class CmdWhatsMyUri : public Command {
+public:
+ CmdWhatsMyUri() : Command("whatsmyuri") {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "{whatsmyuri:1}";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ result << "you" << txn->getClient()->clientAddress(true /*includePort*/);
+ return true;
+ }
+} cmdWhatsMyUri;
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- result << "options" << QueryOption_AllSupported;
- return true;
- }
- } availableQueryOptionsCmd;
-
- /**
- * Guard object for making a good-faith effort to enter maintenance mode and leave it when it
- * goes out of scope.
- *
- * Sometimes we cannot set maintenance mode, in which case the call to setMaintenanceMode will
- * return a non-OK status. This class does not treat that case as an error which means that
- * anybody using it is assuming it is ok to continue execution without maintenance mode.
- *
- * TODO: This assumption needs to be audited and documented, or this behavior should be moved
- * elsewhere.
- */
- class MaintenanceModeSetter {
- public:
- MaintenanceModeSetter() :
- maintenanceModeSet(
- repl::getGlobalReplicationCoordinator()->setMaintenanceMode(true).isOK())
- {}
- ~MaintenanceModeSetter() {
- if (maintenanceModeSet)
- repl::getGlobalReplicationCoordinator()->setMaintenanceMode(false);
- }
- private:
- bool maintenanceModeSet;
- };
-
- /**
- * this handles
- - auth
- - maintenance mode
- - opcounters
- - locking
- - context
- then calls run()
- */
- void Command::execCommand(OperationContext* txn,
- Command* command,
- const rpc::RequestInterface& request,
- rpc::ReplyBuilderInterface* replyBuilder) {
-
- try {
+class AvailableQueryOptions : public Command {
+public:
+ AvailableQueryOptions() : Command("availableQueryOptions", false, "availablequeryoptions") {}
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setCommand_inlock(command);
- }
- // TODO: move this back to runCommands when mongos supports OperationContext
- // see SERVER-18515 for details.
- uassertStatusOK(rpc::readRequestMetadata(txn, request.getMetadata()));
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return Status::OK();
+ }
- dassert(replyBuilder->getState() == rpc::ReplyBuilderInterface::State::kMetadata);
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ result << "options" << QueryOption_AllSupported;
+ return true;
+ }
+} availableQueryOptionsCmd;
- std::string dbname = request.getDatabase().toString();
- unique_ptr<MaintenanceModeSetter> mmSetter;
+/**
+ * Guard object for making a good-faith effort to enter maintenance mode and leave it when it
+ * goes out of scope.
+ *
+ * Sometimes we cannot set maintenance mode, in which case the call to setMaintenanceMode will
+ * return a non-OK status. This class does not treat that case as an error which means that
+ * anybody using it is assuming it is ok to continue execution without maintenance mode.
+ *
+ * TODO: This assumption needs to be audited and documented, or this behavior should be moved
+ * elsewhere.
+ */
+class MaintenanceModeSetter {
+public:
+ MaintenanceModeSetter()
+ : maintenanceModeSet(
+ repl::getGlobalReplicationCoordinator()->setMaintenanceMode(true).isOK()) {}
+ ~MaintenanceModeSetter() {
+ if (maintenanceModeSet)
+ repl::getGlobalReplicationCoordinator()->setMaintenanceMode(false);
+ }
- if (isHelpRequest(request)) {
- CurOp::get(txn)->ensureStarted();
- generateHelpResponse(txn, request, replyBuilder, *command);
- return;
- }
+private:
+ bool maintenanceModeSet;
+};
- ImpersonationSessionGuard guard(txn);
+/**
+ * this handles
+ - auth
+ - maintenance mode
+ - opcounters
+ - locking
+ - context
+ then calls run()
+*/
+void Command::execCommand(OperationContext* txn,
+ Command* command,
+ const rpc::RequestInterface& request,
+ rpc::ReplyBuilderInterface* replyBuilder) {
+ try {
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ CurOp::get(txn)->setCommand_inlock(command);
+ }
+ // TODO: move this back to runCommands when mongos supports OperationContext
+ // see SERVER-18515 for details.
+ uassertStatusOK(rpc::readRequestMetadata(txn, request.getMetadata()));
- uassertStatusOK(
- _checkAuthorization(command,
- txn->getClient(),
- dbname,
- request.getCommandArgs())
- );
+ dassert(replyBuilder->getState() == rpc::ReplyBuilderInterface::State::kMetadata);
- {
- repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
+ std::string dbname = request.getDatabase().toString();
+ unique_ptr<MaintenanceModeSetter> mmSetter;
- bool iAmPrimary = replCoord->canAcceptWritesForDatabase(dbname);
- bool commandCanRunOnSecondary = command->slaveOk();
+ if (isHelpRequest(request)) {
+ CurOp::get(txn)->ensureStarted();
+ generateHelpResponse(txn, request, replyBuilder, *command);
+ return;
+ }
- bool commandIsOverriddenToRunOnSecondary = command->slaveOverrideOk() &&
+ ImpersonationSessionGuard guard(txn);
- // The $secondaryOk option is set.
- (rpc::ServerSelectionMetadata::get(txn).isSecondaryOk() ||
+ uassertStatusOK(
+ _checkAuthorization(command, txn->getClient(), dbname, request.getCommandArgs()));
- // Or the command has a read preference (may be incorrect, see SERVER-18194).
- (rpc::ServerSelectionMetadata::get(txn).getReadPreference() != boost::none));
+ {
+ repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
- bool iAmStandalone = !txn->writesAreReplicated();
- bool canRunHere = iAmPrimary ||
- commandCanRunOnSecondary ||
- commandIsOverriddenToRunOnSecondary ||
- iAmStandalone;
+ bool iAmPrimary = replCoord->canAcceptWritesForDatabase(dbname);
+ bool commandCanRunOnSecondary = command->slaveOk();
- // This logic is clearer if we don't have to invert it.
- if (!canRunHere && command->slaveOverrideOk()) {
- uasserted(ErrorCodes::NotMasterNoSlaveOkCode,
- "not master and slaveOk=false");
- }
+ bool commandIsOverriddenToRunOnSecondary = command->slaveOverrideOk() &&
- uassert(ErrorCodes::NotMaster,
- "not master",
- canRunHere);
+ // The $secondaryOk option is set.
+ (rpc::ServerSelectionMetadata::get(txn).isSecondaryOk() ||
- if (!command->maintenanceOk()
- && replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet
- && !replCoord->canAcceptWritesForDatabase(dbname)
- && !replCoord->getMemberState().secondary()) {
+ // Or the command has a read preference (may be incorrect, see SERVER-18194).
+ (rpc::ServerSelectionMetadata::get(txn).getReadPreference() != boost::none));
- uasserted(ErrorCodes::NotMasterOrSecondaryCode,
- "node is recovering");
- }
- }
+ bool iAmStandalone = !txn->writesAreReplicated();
+ bool canRunHere = iAmPrimary || commandCanRunOnSecondary ||
+ commandIsOverriddenToRunOnSecondary || iAmStandalone;
- if (command->adminOnly()) {
- LOG(2) << "command: " << request.getCommandName();
+ // This logic is clearer if we don't have to invert it.
+ if (!canRunHere && command->slaveOverrideOk()) {
+ uasserted(ErrorCodes::NotMasterNoSlaveOkCode, "not master and slaveOk=false");
}
- if (command->maintenanceMode()) {
- mmSetter.reset(new MaintenanceModeSetter);
- }
+ uassert(ErrorCodes::NotMaster, "not master", canRunHere);
- if (command->shouldAffectCommandCounter()) {
- OpCounters* opCounters = &globalOpCounters;
- opCounters->gotCommand();
+ if (!command->maintenanceOk() &&
+ replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet &&
+ !replCoord->canAcceptWritesForDatabase(dbname) &&
+ !replCoord->getMemberState().secondary()) {
+ uasserted(ErrorCodes::NotMasterOrSecondaryCode, "node is recovering");
}
+ }
- // Handle command option maxTimeMS.
- int maxTimeMS = uassertStatusOK(
- LiteParsedQuery::parseMaxTimeMSCommand(request.getCommandArgs())
- );
-
- uassert(ErrorCodes::InvalidOptions,
- "no such command option $maxTimeMs; use maxTimeMS instead",
- !request.getCommandArgs().hasField("$maxTimeMS"));
+ if (command->adminOnly()) {
+ LOG(2) << "command: " << request.getCommandName();
+ }
- CurOp::get(txn)->setMaxTimeMicros(static_cast<unsigned long long>(maxTimeMS)
- * 1000);
+ if (command->maintenanceMode()) {
+ mmSetter.reset(new MaintenanceModeSetter);
+ }
- // Can throw
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ if (command->shouldAffectCommandCounter()) {
+ OpCounters* opCounters = &globalOpCounters;
+ opCounters->gotCommand();
+ }
- bool retval = false;
+ // Handle command option maxTimeMS.
+ int maxTimeMS =
+ uassertStatusOK(LiteParsedQuery::parseMaxTimeMSCommand(request.getCommandArgs()));
- CurOp::get(txn)->ensureStarted();
+ uassert(ErrorCodes::InvalidOptions,
+ "no such command option $maxTimeMs; use maxTimeMS instead",
+ !request.getCommandArgs().hasField("$maxTimeMS"));
- command->_commandsExecuted.increment();
+ CurOp::get(txn)->setMaxTimeMicros(static_cast<unsigned long long>(maxTimeMS) * 1000);
- retval = command->run(txn, request, replyBuilder);
+ // Can throw
+ txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
- dassert(replyBuilder->getState() == rpc::ReplyBuilderInterface::State::kOutputDocs);
+ bool retval = false;
- if (!retval) {
- command->_commandsFailed.increment();
- }
- }
- catch (const DBException& exception) {
- Command::generateErrorResponse(txn, replyBuilder, exception, request, command);
- }
- }
+ CurOp::get(txn)->ensureStarted();
- // This really belongs in commands.cpp, but we need to move it here so we can
- // use shardingState and the repl coordinator without changing our entire library
- // structure.
- // It will be moved back as part of SERVER-18236.
- bool Command::run(OperationContext* txn,
- const rpc::RequestInterface& request,
- rpc::ReplyBuilderInterface* replyBuilder) {
+ command->_commandsExecuted.increment();
- BSONObjBuilder replyBuilderBob;
+ retval = command->run(txn, request, replyBuilder);
- repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
- {
- // Handle read after opTime.
- repl::ReadAfterOpTimeArgs readAfterOptimeSettings;
- auto readAfterParseStatus = readAfterOptimeSettings.initialize(request.getCommandArgs());
- if (!readAfterParseStatus.isOK()) {
- replyBuilder
- ->setMetadata(rpc::makeEmptyMetadata())
- .setCommandReply(readAfterParseStatus);
- return false;
- }
+ dassert(replyBuilder->getState() == rpc::ReplyBuilderInterface::State::kOutputDocs);
- auto readAfterResult = replCoord->waitUntilOpTime(txn, readAfterOptimeSettings);
- readAfterResult.appendInfo(&replyBuilderBob);
- if (!readAfterResult.getStatus().isOK()) {
- replyBuilder
- ->setMetadata(rpc::makeEmptyMetadata())
- .setCommandReply(readAfterResult.getStatus(), replyBuilderBob.done());
- return false;
- }
+ if (!retval) {
+ command->_commandsFailed.increment();
+ }
+ } catch (const DBException& exception) {
+ Command::generateErrorResponse(txn, replyBuilder, exception, request, command);
+ }
+}
+
+// This really belongs in commands.cpp, but we need to move it here so we can
+// use shardingState and the repl coordinator without changing our entire library
+// structure.
+// It will be moved back as part of SERVER-18236.
+bool Command::run(OperationContext* txn,
+ const rpc::RequestInterface& request,
+ rpc::ReplyBuilderInterface* replyBuilder) {
+ BSONObjBuilder replyBuilderBob;
+
+ repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
+ {
+ // Handle read after opTime.
+ repl::ReadAfterOpTimeArgs readAfterOptimeSettings;
+ auto readAfterParseStatus = readAfterOptimeSettings.initialize(request.getCommandArgs());
+ if (!readAfterParseStatus.isOK()) {
+ replyBuilder->setMetadata(rpc::makeEmptyMetadata())
+ .setCommandReply(readAfterParseStatus);
+ return false;
}
- // run expects non-const bsonobj
- BSONObj cmd = request.getCommandArgs();
- // Implementation just forwards to the old method signature for now.
- std::string errmsg;
+ auto readAfterResult = replCoord->waitUntilOpTime(txn, readAfterOptimeSettings);
+ readAfterResult.appendInfo(&replyBuilderBob);
+ if (!readAfterResult.getStatus().isOK()) {
+ replyBuilder->setMetadata(rpc::makeEmptyMetadata())
+ .setCommandReply(readAfterResult.getStatus(), replyBuilderBob.done());
+ return false;
+ }
+ }
- // run expects const db std::string (can't bind to temporary)
- const std::string db = request.getDatabase().toString();
+ // run expects non-const bsonobj
+ BSONObj cmd = request.getCommandArgs();
+ // Implementation just forwards to the old method signature for now.
+ std::string errmsg;
- // TODO: remove queryOptions parameter from command's run method.
- bool result = this->run(txn, db, cmd, 0, errmsg, replyBuilderBob);
+ // run expects const db std::string (can't bind to temporary)
+ const std::string db = request.getDatabase().toString();
- BSONObjBuilder metadataBob;
+ // TODO: remove queryOptions parameter from command's run method.
+ bool result = this->run(txn, db, cmd, 0, errmsg, replyBuilderBob);
- // For commands from mongos, append some info to help getLastError(w) work.
- // TODO: refactor out of here as part of SERVER-18326
- if (replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet &&
- shardingState.enabled()) {
- rpc::ShardingMetadata(
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp().getTimestamp(),
- replCoord->getElectionId()
- ).writeToMetadata(&metadataBob);
- }
+ BSONObjBuilder metadataBob;
- auto cmdResponse = replyBuilderBob.done();
- replyBuilder->setMetadata(metadataBob.done());
+ // For commands from mongos, append some info to help getLastError(w) work.
+ // TODO: refactor out of here as part of SERVER-18326
+ if (replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet &&
+ shardingState.enabled()) {
+ rpc::ShardingMetadata(
+ repl::ReplClientInfo::forClient(txn->getClient()).getLastOp().getTimestamp(),
+ replCoord->getElectionId()).writeToMetadata(&metadataBob);
+ }
- if (result) {
- replyBuilder->setCommandReply(std::move(cmdResponse));
- }
- else {
- // maintain existing behavior of returning all data appended to builder
- // even if command returned false
- replyBuilder->setCommandReply(Status(ErrorCodes::CommandFailed, errmsg),
- std::move(cmdResponse));
- }
+ auto cmdResponse = replyBuilderBob.done();
+ replyBuilder->setMetadata(metadataBob.done());
- return result;
+ if (result) {
+ replyBuilder->setCommandReply(std::move(cmdResponse));
+ } else {
+ // maintain existing behavior of returning all data appended to builder
+ // even if command returned false
+ replyBuilder->setCommandReply(Status(ErrorCodes::CommandFailed, errmsg),
+ std::move(cmdResponse));
}
- void Command::registerError(OperationContext* txn, const DBException& exception) {
- CurOp::get(txn)->debug().exceptionInfo = exception.getInfo();
- }
+ return result;
+}
+
+void Command::registerError(OperationContext* txn, const DBException& exception) {
+ CurOp::get(txn)->debug().exceptionInfo = exception.getInfo();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/dbcommands_generic.cpp b/src/mongo/db/dbcommands_generic.cpp
index 94c584f6292..5e377e07a17 100644
--- a/src/mongo/db/dbcommands_generic.cpp
+++ b/src/mongo/db/dbcommands_generic.cpp
@@ -66,365 +66,417 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- class CmdBuildInfo : public Command {
- public:
- CmdBuildInfo() : Command( "buildInfo", true, "buildinfo" ) {}
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- virtual void help( stringstream &help ) const {
- help << "get version #, etc.\n";
- help << "{ buildinfo:1 }";
- }
+using std::endl;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+class CmdBuildInfo : public Command {
+public:
+ CmdBuildInfo() : Command("buildInfo", true, "buildinfo") {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ virtual void help(stringstream& help) const {
+ help << "get version #, etc.\n";
+ help << "{ buildinfo:1 }";
+ }
- bool run(OperationContext* txn, const std::string& dbname,
- BSONObj& jsobj,
- int, // options
- std::string& errmsg,
- BSONObjBuilder& result) {
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& jsobj,
+ int, // options
+ std::string& errmsg,
+ BSONObjBuilder& result) {
appendBuildInfo(result);
appendStorageEngineList(&result);
return true;
+ }
- }
-
- } cmdBuildInfo;
-
-
- class PingCommand : public Command {
- public:
- PingCommand() : Command( "ping" ) {}
- virtual bool slaveOk() const { return true; }
- virtual void help( stringstream &help ) const { help << "a way to check that the server is alive. responds immediately even if server is in a db lock."; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
- const string& badns,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- // IMPORTANT: Don't put anything in here that might lock db - including authentication
- return true;
- }
- } pingCmd;
-
- class FeaturesCmd : public Command {
- public:
- FeaturesCmd() : Command( "features", true ) {}
- void help(stringstream& h) const { h << "return build level feature settings"; }
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
- const string& ns,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- if ( globalScriptEngine ) {
- BSONObjBuilder bb( result.subobjStart( "js" ) );
- result.append( "utf8" , globalScriptEngine->utf8Ok() );
- bb.done();
- }
- if ( cmdObj["oidReset"].trueValue() ) {
- result.append( "oidMachineOld" , OID::getMachineId() );
- OID::regenMachineId();
- }
- result.append( "oidMachine" , OID::getMachineId() );
- return true;
- }
+} cmdBuildInfo;
- } featuresCmd;
- class HostInfoCmd : public Command {
- public:
- HostInfoCmd() : Command("hostInfo", true) {}
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
+class PingCommand : public Command {
+public:
+ PingCommand() : Command("ping") {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "a way to check that the server is alive. responds immediately even if server is "
+ "in a db lock.";
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ virtual bool run(OperationContext* txn,
+ const string& badns,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ // IMPORTANT: Don't put anything in here that might lock db - including authentication
+ return true;
+ }
+} pingCmd;
- virtual void help( stringstream& help ) const {
- help << "returns information about the daemon's host";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::hostInfo);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+class FeaturesCmd : public Command {
+public:
+ FeaturesCmd() : Command("features", true) {}
+ void help(stringstream& h) const {
+ h << "return build level feature settings";
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ virtual bool run(OperationContext* txn,
+ const string& ns,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ if (globalScriptEngine) {
+ BSONObjBuilder bb(result.subobjStart("js"));
+ result.append("utf8", globalScriptEngine->utf8Ok());
+ bb.done();
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- ProcessInfo p;
- BSONObjBuilder bSys, bOs;
-
- bSys.appendDate( "currentTime" , jsTime() );
- bSys.append( "hostname" , prettyHostName() );
- bSys.append( "cpuAddrSize", p.getAddrSize() );
- bSys.append( "memSizeMB", static_cast <unsigned>( p.getMemSizeMB() ) );
- bSys.append( "numCores", p.getNumCores() );
- bSys.append( "cpuArch", p.getArch() );
- bSys.append( "numaEnabled", p.hasNumaEnabled() );
- bOs.append( "type", p.getOsType() );
- bOs.append( "name", p.getOsName() );
- bOs.append( "version", p.getOsVersion() );
-
- result.append( StringData( "system" ), bSys.obj() );
- result.append( StringData( "os" ), bOs.obj() );
- p.appendSystemDetails( result );
-
- return true;
+ if (cmdObj["oidReset"].trueValue()) {
+ result.append("oidMachineOld", OID::getMachineId());
+ OID::regenMachineId();
}
+ result.append("oidMachine", OID::getMachineId());
+ return true;
+ }
- } hostInfoCmd;
-
- class LogRotateCmd : public Command {
- public:
- LogRotateCmd() : Command( "logRotate" ) {}
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::logRotate);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- virtual bool run(OperationContext* txn,
- const string& ns,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- bool didRotate = rotateLogs(serverGlobalParams.logRenameOnRotate);
- if (didRotate)
- logProcessDetailsForLogRotate();
- return didRotate;
- }
+} featuresCmd;
- } logRotateCmd;
-
- class ListCommandsCmd : public Command {
- public:
- virtual void help( stringstream &help ) const { help << "get a list of all db commands"; }
- ListCommandsCmd() : Command( "listCommands", false ) {}
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
- const string& ns,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- BSONObjBuilder b( result.subobjStart( "commands" ) );
- for ( CommandMap::const_iterator i=_commands->begin(); i!=_commands->end(); ++i ) {
- Command * c = i->second;
-
- // don't show oldnames
- if (i->first != c->name)
- continue;
-
- BSONObjBuilder temp( b.subobjStart( c->name ) );
-
- {
- stringstream help;
- c->help( help );
- temp.append( "help" , help.str() );
- }
- temp.append( "slaveOk" , c->slaveOk() );
- temp.append( "adminOnly" , c->adminOnly() );
- //optionally indicates that the command can be forced to run on a slave/secondary
- if ( c->slaveOverrideOk() ) temp.append( "slaveOverrideOk" , c->slaveOverrideOk() );
- temp.done();
- }
- b.done();
+class HostInfoCmd : public Command {
+public:
+ HostInfoCmd() : Command("hostInfo", true) {}
+ virtual bool slaveOk() const {
+ return true;
+ }
- return 1;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- } listCommandsCmd;
+ virtual void help(stringstream& help) const {
+ help << "returns information about the daemon's host";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::hostInfo);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ProcessInfo p;
+ BSONObjBuilder bSys, bOs;
+
+ bSys.appendDate("currentTime", jsTime());
+ bSys.append("hostname", prettyHostName());
+ bSys.append("cpuAddrSize", p.getAddrSize());
+ bSys.append("memSizeMB", static_cast<unsigned>(p.getMemSizeMB()));
+ bSys.append("numCores", p.getNumCores());
+ bSys.append("cpuArch", p.getArch());
+ bSys.append("numaEnabled", p.hasNumaEnabled());
+ bOs.append("type", p.getOsType());
+ bOs.append("name", p.getOsName());
+ bOs.append("version", p.getOsVersion());
+
+ result.append(StringData("system"), bSys.obj());
+ result.append(StringData("os"), bOs.obj());
+ p.appendSystemDetails(result);
- namespace {
- MONGO_FP_DECLARE(crashOnShutdown);
+ return true;
+ }
- int* volatile illegalAddress;
- } // namespace
+} hostInfoCmd;
- void CmdShutdown::addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
+class LogRotateCmd : public Command {
+public:
+ LogRotateCmd() : Command("logRotate") {}
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
ActionSet actions;
- actions.addAction(ActionType::shutdown);
+ actions.addAction(ActionType::logRotate);
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
+ virtual bool run(OperationContext* txn,
+ const string& ns,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ bool didRotate = rotateLogs(serverGlobalParams.logRenameOnRotate);
+ if (didRotate)
+ logProcessDetailsForLogRotate();
+ return didRotate;
+ }
- void CmdShutdown::shutdownHelper() {
- MONGO_FAIL_POINT_BLOCK(crashOnShutdown, crashBlock) {
- const std::string crashHow = crashBlock.getData()["how"].str();
- if (crashHow == "fault") {
- ++*illegalAddress;
+} logRotateCmd;
+
+class ListCommandsCmd : public Command {
+public:
+ virtual void help(stringstream& help) const {
+ help << "get a list of all db commands";
+ }
+ ListCommandsCmd() : Command("listCommands", false) {}
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ virtual bool run(OperationContext* txn,
+ const string& ns,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ BSONObjBuilder b(result.subobjStart("commands"));
+ for (CommandMap::const_iterator i = _commands->begin(); i != _commands->end(); ++i) {
+ Command* c = i->second;
+
+ // don't show oldnames
+ if (i->first != c->name)
+ continue;
+
+ BSONObjBuilder temp(b.subobjStart(c->name));
+
+ {
+ stringstream help;
+ c->help(help);
+ temp.append("help", help.str());
}
- ::abort();
+ temp.append("slaveOk", c->slaveOk());
+ temp.append("adminOnly", c->adminOnly());
+ // optionally indicates that the command can be forced to run on a slave/secondary
+ if (c->slaveOverrideOk())
+ temp.append("slaveOverrideOk", c->slaveOverrideOk());
+ temp.done();
}
+ b.done();
- log() << "terminating, shutdown command received";
+ return 1;
+ }
-#if defined(_WIN32)
- // Signal the ServiceMain thread to shutdown.
- if(ntservice::shouldStartService()) {
- signalShutdown();
-
- // Client expects us to abruptly close the socket as part of exiting
- // so this function is not allowed to return.
- // The ServiceMain thread will quit for us so just sleep until it does.
- while (true)
- sleepsecs(60); // Loop forever
+} listCommandsCmd;
+
+namespace {
+MONGO_FP_DECLARE(crashOnShutdown);
+
+int* volatile illegalAddress;
+} // namespace
+
+void CmdShutdown::addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::shutdown);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+}
+
+void CmdShutdown::shutdownHelper() {
+ MONGO_FAIL_POINT_BLOCK(crashOnShutdown, crashBlock) {
+ const std::string crashHow = crashBlock.getData()["how"].str();
+ if (crashHow == "fault") {
+ ++*illegalAddress;
}
- else
+ ::abort();
+ }
+
+ log() << "terminating, shutdown command received";
+
+#if defined(_WIN32)
+ // Signal the ServiceMain thread to shutdown.
+ if (ntservice::shouldStartService()) {
+ signalShutdown();
+
+ // Client expects us to abruptly close the socket as part of exiting
+ // so this function is not allowed to return.
+ // The ServiceMain thread will quit for us so just sleep until it does.
+ while (true)
+ sleepsecs(60); // Loop forever
+ } else
#endif
- {
- exitCleanly(EXIT_CLEAN); // this never returns
- invariant(false);
- }
+ {
+ exitCleanly(EXIT_CLEAN); // this never returns
+ invariant(false);
}
+}
- /* for testing purposes only */
- class CmdForceError : public Command {
- public:
- virtual void help( stringstream& help ) const {
- help << "for testing purposes only. forces a user assertion exception";
- }
- virtual bool slaveOk() const {
- return true;
- }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- CmdForceError() : Command("forceerror") {}
- bool run(OperationContext* txn,
- const string& dbnamne,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- LastError::get(cc()).setLastError(10038, "forced error");
- return false;
- }
- } cmdForceError;
-
- class GetLogCmd : public Command {
- public:
- GetLogCmd() : Command( "getLog" ){}
-
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool adminOnly() const { return true; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::getLog);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- virtual void help( stringstream& help ) const {
- help << "{ getLog : '*' } OR { getLog : 'global' }";
+/* for testing purposes only */
+class CmdForceError : public Command {
+public:
+ virtual void help(stringstream& help) const {
+ help << "for testing purposes only. forces a user assertion exception";
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ CmdForceError() : Command("forceerror") {}
+ bool run(OperationContext* txn,
+ const string& dbnamne,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ LastError::get(cc()).setLastError(10038, "forced error");
+ return false;
+ }
+} cmdForceError;
+
+class GetLogCmd : public Command {
+public:
+ GetLogCmd() : Command("getLog") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::getLog);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ virtual void help(stringstream& help) const {
+ help << "{ getLog : '*' } OR { getLog : 'global' }";
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ BSONElement val = cmdObj.firstElement();
+ if (val.type() != String) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Argument to getLog must be of type String; found "
+ << val.toString(false) << " of type "
+ << typeName(val.type())));
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- BSONElement val = cmdObj.firstElement();
- if (val.type() != String) {
- return appendCommandStatus(result, Status(ErrorCodes::TypeMismatch, str::stream()
- << "Argument to getLog must be of type String; found "
- << val.toString(false) << " of type " << typeName(val.type())));
- }
+ string p = val.String();
+ if (p == "*") {
+ vector<string> names;
+ RamLog::getNames(names);
- string p = val.String();
- if ( p == "*" ) {
- vector<string> names;
- RamLog::getNames( names );
-
- BSONArrayBuilder arr;
- for ( unsigned i=0; i<names.size(); i++ ) {
- arr.append( names[i] );
- }
-
- result.appendArray( "names" , arr.arr() );
+ BSONArrayBuilder arr;
+ for (unsigned i = 0; i < names.size(); i++) {
+ arr.append(names[i]);
}
- else {
- RamLog* ramlog = RamLog::getIfExists(p);
- if ( ! ramlog ) {
- errmsg = str::stream() << "no RamLog named: " << p;
- return false;
- }
- RamLog::LineIterator rl(ramlog);
-
- result.appendNumber( "totalLinesWritten", rl.getTotalLinesWritten() );
-
- BSONArrayBuilder arr( result.subarrayStart( "log" ) );
- while (rl.more())
- arr.append(rl.next());
- arr.done();
+
+ result.appendArray("names", arr.arr());
+ } else {
+ RamLog* ramlog = RamLog::getIfExists(p);
+ if (!ramlog) {
+ errmsg = str::stream() << "no RamLog named: " << p;
+ return false;
}
- return true;
- }
+ RamLog::LineIterator rl(ramlog);
- } getLogCmd;
-
- class CmdGetCmdLineOpts : Command {
- public:
- CmdGetCmdLineOpts(): Command("getCmdLineOpts") {}
- void help(stringstream& h) const { h << "get argv"; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool adminOnly() const { return true; }
- virtual bool slaveOk() const { return true; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::getCmdLineOpts);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- result.append("argv", serverGlobalParams.argvArray);
- result.append("parsed", serverGlobalParams.parsedOpts);
- return true;
+ result.appendNumber("totalLinesWritten", rl.getTotalLinesWritten());
+
+ BSONArrayBuilder arr(result.subarrayStart("log"));
+ while (rl.more())
+ arr.append(rl.next());
+ arr.done();
}
+ return true;
+ }
+
+} getLogCmd;
- } cmdGetCmdLineOpts;
+class CmdGetCmdLineOpts : Command {
+public:
+ CmdGetCmdLineOpts() : Command("getCmdLineOpts") {}
+ void help(stringstream& h) const {
+ h << "get argv";
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::getCmdLineOpts);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ result.append("argv", serverGlobalParams.argvArray);
+ result.append("parsed", serverGlobalParams.parsedOpts);
+ return true;
+ }
+} cmdGetCmdLineOpts;
}
diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp
index 6f297520379..15ab03eaf80 100644
--- a/src/mongo/db/dbdirectclient.cpp
+++ b/src/mongo/db/dbdirectclient.cpp
@@ -39,163 +39,152 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::string;
+using std::unique_ptr;
+using std::endl;
+using std::string;
- // Called from scripting/engine.cpp and scripting/v8_db.cpp.
- DBClientBase* createDirectClient(OperationContext* txn) {
- return new DBDirectClient(txn);
- }
+// Called from scripting/engine.cpp and scripting/v8_db.cpp.
+DBClientBase* createDirectClient(OperationContext* txn) {
+ return new DBDirectClient(txn);
+}
- namespace {
+namespace {
- class DirectClientScope {
- MONGO_DISALLOW_COPYING(DirectClientScope);
- public:
- explicit DirectClientScope(OperationContext* txn)
- : _txn(txn), _prev(_txn->getClient()->isInDirectClient()) {
- _txn->getClient()->setInDirectClient(true);
- }
+class DirectClientScope {
+ MONGO_DISALLOW_COPYING(DirectClientScope);
- ~DirectClientScope() {
- _txn->getClient()->setInDirectClient(_prev);
- }
+public:
+ explicit DirectClientScope(OperationContext* txn)
+ : _txn(txn), _prev(_txn->getClient()->isInDirectClient()) {
+ _txn->getClient()->setInDirectClient(true);
+ }
- private:
- OperationContext* const _txn;
- const bool _prev;
- };
+ ~DirectClientScope() {
+ _txn->getClient()->setInDirectClient(_prev);
+ }
- } // namespace
+private:
+ OperationContext* const _txn;
+ const bool _prev;
+};
+} // namespace
- DBDirectClient::DBDirectClient(OperationContext* txn) : _txn(txn) { }
- bool DBDirectClient::isFailed() const {
- return false;
- }
+DBDirectClient::DBDirectClient(OperationContext* txn) : _txn(txn) {}
- bool DBDirectClient::isStillConnected() {
- return true;
- }
+bool DBDirectClient::isFailed() const {
+ return false;
+}
- std::string DBDirectClient::toString() const {
- return "DBDirectClient";
- }
+bool DBDirectClient::isStillConnected() {
+ return true;
+}
- std::string DBDirectClient::getServerAddress() const {
- return "localhost"; // TODO: should this have the port?
- }
+std::string DBDirectClient::toString() const {
+ return "DBDirectClient";
+}
- void DBDirectClient::sayPiggyBack(Message& toSend) {
- // don't need to piggy back when connected locally
- return say(toSend);
- }
+std::string DBDirectClient::getServerAddress() const {
+ return "localhost"; // TODO: should this have the port?
+}
- bool DBDirectClient::callRead(Message& toSend, Message& response) {
- return call(toSend, response);
- }
+void DBDirectClient::sayPiggyBack(Message& toSend) {
+ // don't need to piggy back when connected locally
+ return say(toSend);
+}
- ConnectionString::ConnectionType DBDirectClient::type() const {
- return ConnectionString::MASTER;
- }
+bool DBDirectClient::callRead(Message& toSend, Message& response) {
+ return call(toSend, response);
+}
- double DBDirectClient::getSoTimeout() const {
- return 0;
- }
+ConnectionString::ConnectionType DBDirectClient::type() const {
+ return ConnectionString::MASTER;
+}
- bool DBDirectClient::lazySupported() const {
- return true;
- }
+double DBDirectClient::getSoTimeout() const {
+ return 0;
+}
- void DBDirectClient::setOpCtx(OperationContext* txn) {
- _txn = txn;
- }
+bool DBDirectClient::lazySupported() const {
+ return true;
+}
- QueryOptions DBDirectClient::_lookupAvailableOptions() {
- // Exhaust mode is not available in DBDirectClient.
- return QueryOptions(DBClientBase::_lookupAvailableOptions() & ~QueryOption_Exhaust);
- }
+void DBDirectClient::setOpCtx(OperationContext* txn) {
+ _txn = txn;
+}
- bool DBDirectClient::call(Message& toSend,
- Message& response,
- bool assertOk,
- string* actualServer) {
- DirectClientScope directClientScope(_txn);
- LastError::get(_txn->getClient()).startRequest();
+QueryOptions DBDirectClient::_lookupAvailableOptions() {
+ // Exhaust mode is not available in DBDirectClient.
+ return QueryOptions(DBClientBase::_lookupAvailableOptions() & ~QueryOption_Exhaust);
+}
- DbResponse dbResponse;
- CurOp curOp(_txn);
- assembleResponse(_txn, toSend, dbResponse, dummyHost);
- verify(dbResponse.response);
+bool DBDirectClient::call(Message& toSend, Message& response, bool assertOk, string* actualServer) {
+ DirectClientScope directClientScope(_txn);
+ LastError::get(_txn->getClient()).startRequest();
- // can get rid of this if we make response handling smarter
- dbResponse.response->concat();
- response = *dbResponse.response;
+ DbResponse dbResponse;
+ CurOp curOp(_txn);
+ assembleResponse(_txn, toSend, dbResponse, dummyHost);
+ verify(dbResponse.response);
- return true;
- }
+ // can get rid of this if we make response handling smarter
+ dbResponse.response->concat();
+ response = *dbResponse.response;
- void DBDirectClient::say(Message& toSend, bool isRetry, string* actualServer) {
- DirectClientScope directClientScope(_txn);
- LastError::get(_txn->getClient()).startRequest();
+ return true;
+}
- DbResponse dbResponse;
- CurOp curOp(_txn);
- assembleResponse(_txn, toSend, dbResponse, dummyHost);
- }
+void DBDirectClient::say(Message& toSend, bool isRetry, string* actualServer) {
+ DirectClientScope directClientScope(_txn);
+ LastError::get(_txn->getClient()).startRequest();
- unique_ptr<DBClientCursor> DBDirectClient::query(const string& ns,
- Query query,
- int nToReturn,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize) {
-
- return DBClientBase::query(ns,
- query,
- nToReturn,
- nToSkip,
- fieldsToReturn,
- queryOptions,
- batchSize);
- }
+ DbResponse dbResponse;
+ CurOp curOp(_txn);
+ assembleResponse(_txn, toSend, dbResponse, dummyHost);
+}
- void DBDirectClient::killCursor(long long id) {
- // The killCursor command on the DB client is only used by sharding,
- // so no need to have it for MongoD.
- verify(!"killCursor should not be used in MongoD");
- }
+unique_ptr<DBClientCursor> DBDirectClient::query(const string& ns,
+ Query query,
+ int nToReturn,
+ int nToSkip,
+ const BSONObj* fieldsToReturn,
+ int queryOptions,
+ int batchSize) {
+ return DBClientBase::query(
+ ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions, batchSize);
+}
- const HostAndPort DBDirectClient::dummyHost("0.0.0.0", 0);
-
- unsigned long long DBDirectClient::count(const string& ns,
- const BSONObj& query,
- int options,
- int limit,
- int skip) {
- BSONObj cmdObj = _countCmd(ns, query, options, limit, skip);
-
- NamespaceString nsString(ns);
- std::string dbname = nsString.db().toString();
-
- Command* countCmd = Command::findCommand("count");
- invariant(countCmd);
-
- std::string errmsg;
- BSONObjBuilder result;
- bool runRetval = countCmd->run(_txn, dbname, cmdObj, options, errmsg, result);
- if (!runRetval) {
- Command::appendCommandStatus(result, runRetval, errmsg);
- Status commandStatus = Command::getStatusFromCommandResult(result.obj());
- invariant(!commandStatus.isOK());
- uassertStatusOK(commandStatus);
- }
-
- BSONObj resultObj = result.obj();
- return static_cast<unsigned long long>(resultObj["n"].numberLong());
- }
+void DBDirectClient::killCursor(long long id) {
+ // The killCursor command on the DB client is only used by sharding,
+ // so no need to have it for MongoD.
+ verify(!"killCursor should not be used in MongoD");
+}
+
+const HostAndPort DBDirectClient::dummyHost("0.0.0.0", 0);
+
+unsigned long long DBDirectClient::count(
+ const string& ns, const BSONObj& query, int options, int limit, int skip) {
+ BSONObj cmdObj = _countCmd(ns, query, options, limit, skip);
+
+ NamespaceString nsString(ns);
+ std::string dbname = nsString.db().toString();
+
+ Command* countCmd = Command::findCommand("count");
+ invariant(countCmd);
+
+ std::string errmsg;
+ BSONObjBuilder result;
+ bool runRetval = countCmd->run(_txn, dbname, cmdObj, options, errmsg, result);
+ if (!runRetval) {
+ Command::appendCommandStatus(result, runRetval, errmsg);
+ Status commandStatus = Command::getStatusFromCommandResult(result.obj());
+ invariant(!commandStatus.isOK());
+ uassertStatusOK(commandStatus);
+ }
+
+ BSONObj resultObj = result.obj();
+ return static_cast<unsigned long long>(resultObj["n"].numberLong());
+}
} // namespace mongo
diff --git a/src/mongo/db/dbdirectclient.h b/src/mongo/db/dbdirectclient.h
index 835b63eab46..d34f1d2379c 100644
--- a/src/mongo/db/dbdirectclient.h
+++ b/src/mongo/db/dbdirectclient.h
@@ -34,77 +34,75 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
- /**
- * Embedded calls to the local server using the DBClientBase API without going over the network.
- *
- * Caller does not need to lock, that is handled within.
- *
- * All operations are performed within the scope of a passed-in OperationContext (except when
- * using the deprecated constructor). You must ensure that the OperationContext is valid when
- * calling into any function. If you ever need to change the OperationContext, that can be done
- * without the overhead of creating a new DBDirectClient by calling setOpCtx(), after which all
- * operations will use the new OperationContext.
- */
- class DBDirectClient : public DBClientBase {
- public:
- static const HostAndPort dummyHost;
+/**
+ * Embedded calls to the local server using the DBClientBase API without going over the network.
+ *
+ * Caller does not need to lock, that is handled within.
+ *
+ * All operations are performed within the scope of a passed-in OperationContext (except when
+ * using the deprecated constructor). You must ensure that the OperationContext is valid when
+ * calling into any function. If you ever need to change the OperationContext, that can be done
+ * without the overhead of creating a new DBDirectClient by calling setOpCtx(), after which all
+ * operations will use the new OperationContext.
+ */
+class DBDirectClient : public DBClientBase {
+public:
+ static const HostAndPort dummyHost;
+
+ DBDirectClient(OperationContext* txn);
+
+ using DBClientBase::query;
- DBDirectClient(OperationContext* txn);
+ // XXX: is this valid or useful?
+ void setOpCtx(OperationContext* txn);
- using DBClientBase::query;
+ virtual std::unique_ptr<DBClientCursor> query(const std::string& ns,
+ Query query,
+ int nToReturn = 0,
+ int nToSkip = 0,
+ const BSONObj* fieldsToReturn = 0,
+ int queryOptions = 0,
+ int batchSize = 0);
- // XXX: is this valid or useful?
- void setOpCtx(OperationContext* txn);
+ virtual bool isFailed() const;
- virtual std::unique_ptr<DBClientCursor> query(const std::string &ns,
- Query query,
- int nToReturn = 0,
- int nToSkip = 0,
- const BSONObj* fieldsToReturn = 0,
- int queryOptions = 0,
- int batchSize = 0);
+ virtual bool isStillConnected();
- virtual bool isFailed() const;
+ virtual std::string toString() const;
- virtual bool isStillConnected();
+ virtual std::string getServerAddress() const;
- virtual std::string toString() const;
+ virtual bool call(Message& toSend,
+ Message& response,
+ bool assertOk = true,
+ std::string* actualServer = 0);
- virtual std::string getServerAddress() const;
+ virtual void say(Message& toSend, bool isRetry = false, std::string* actualServer = 0);
- virtual bool call(Message& toSend,
- Message& response,
- bool assertOk = true,
- std::string* actualServer = 0);
+ virtual void sayPiggyBack(Message& toSend);
- virtual void say(Message& toSend,
- bool isRetry = false,
- std::string* actualServer = 0);
+ virtual void killCursor(long long cursorID);
- virtual void sayPiggyBack(Message& toSend);
+ virtual bool callRead(Message& toSend, Message& response);
- virtual void killCursor(long long cursorID);
+ virtual unsigned long long count(const std::string& ns,
+ const BSONObj& query = BSONObj(),
+ int options = 0,
+ int limit = 0,
+ int skip = 0);
- virtual bool callRead(Message& toSend, Message& response);
-
- virtual unsigned long long count(const std::string &ns,
- const BSONObj& query = BSONObj(),
- int options = 0,
- int limit = 0,
- int skip = 0);
-
- virtual ConnectionString::ConnectionType type() const;
+ virtual ConnectionString::ConnectionType type() const;
- double getSoTimeout() const;
+ double getSoTimeout() const;
- virtual bool lazySupported() const;
+ virtual bool lazySupported() const;
- virtual QueryOptions _lookupAvailableOptions();
+ virtual QueryOptions _lookupAvailableOptions();
- private:
- OperationContext* _txn;
- };
+private:
+ OperationContext* _txn;
+};
} // namespace mongo
diff --git a/src/mongo/db/dbeval.cpp b/src/mongo/db/dbeval.cpp
index ae42d2a349d..ada1495be36 100644
--- a/src/mongo/db/dbeval.cpp
+++ b/src/mongo/db/dbeval.cpp
@@ -48,33 +48,31 @@
namespace mongo {
- using std::unique_ptr;
- using std::dec;
- using std::endl;
- using std::string;
- using std::stringstream;
+using std::unique_ptr;
+using std::dec;
+using std::endl;
+using std::string;
+using std::stringstream;
namespace {
- const int edebug=0;
+const int edebug = 0;
- bool dbEval(OperationContext* txn,
- const string& dbName,
- const BSONObj& cmd,
- BSONObjBuilder& result,
- string& errmsg) {
-
- RARELY {
- warning() << "the eval command is deprecated" << startupWarningsLog;
- }
+bool dbEval(OperationContext* txn,
+ const string& dbName,
+ const BSONObj& cmd,
+ BSONObjBuilder& result,
+ string& errmsg) {
+ RARELY {
+ warning() << "the eval command is deprecated" << startupWarningsLog;
+ }
- const BSONElement e = cmd.firstElement();
- uassert(10046,
- "eval needs Code",
- e.type() == Code || e.type() == CodeWScope || e.type() == String);
+ const BSONElement e = cmd.firstElement();
+ uassert(
+ 10046, "eval needs Code", e.type() == Code || e.type() == CodeWScope || e.type() == String);
- const char *code = 0;
- switch ( e.type() ) {
+ const char* code = 0;
+ switch (e.type()) {
case String:
case Code:
code = e.valuestr();
@@ -84,112 +82,114 @@ namespace {
break;
default:
verify(0);
- }
+ }
- verify(code);
+ verify(code);
- if (!globalScriptEngine) {
- errmsg = "db side execution is disabled";
- return false;
- }
+ if (!globalScriptEngine) {
+ errmsg = "db side execution is disabled";
+ return false;
+ }
- unique_ptr<Scope> s(globalScriptEngine->newScope());
- s->registerOperation(txn);
+ unique_ptr<Scope> s(globalScriptEngine->newScope());
+ s->registerOperation(txn);
- ScriptingFunction f = s->createFunction(code);
- if (f == 0) {
- errmsg = string("compile failed: ") + s->getError();
- return false;
- }
-
- s->localConnectForDbEval(txn, dbName.c_str());
+ ScriptingFunction f = s->createFunction(code);
+ if (f == 0) {
+ errmsg = string("compile failed: ") + s->getError();
+ return false;
+ }
- if (e.type() == CodeWScope) {
- s->init(e.codeWScopeScopeDataUnsafe());
- }
+ s->localConnectForDbEval(txn, dbName.c_str());
- BSONObj args;
- {
- BSONElement argsElement = cmd.getField("args");
- if ( argsElement.type() == Array ) {
- args = argsElement.embeddedObject();
- if ( edebug ) {
- log() << "args:" << args.toString() << endl;
- log() << "code:\n" << code << endl;
- }
- }
- }
+ if (e.type() == CodeWScope) {
+ s->init(e.codeWScopeScopeDataUnsafe());
+ }
- int res;
- {
- Timer t;
- res = s->invoke(f, &args, 0, 0);
- int m = t.millis();
- if (m > serverGlobalParams.slowMS) {
- log() << "dbeval slow, time: " << dec << m << "ms " << dbName << endl;
- if ( m >= 1000 ) log() << code << endl;
- else OCCASIONALLY log() << code << endl;
+ BSONObj args;
+ {
+ BSONElement argsElement = cmd.getField("args");
+ if (argsElement.type() == Array) {
+ args = argsElement.embeddedObject();
+ if (edebug) {
+ log() << "args:" << args.toString() << endl;
+ log() << "code:\n" << code << endl;
}
}
+ }
- if (res || s->isLastRetNativeCode()) {
- result.append("errno", (double) res);
- errmsg = "invoke failed: ";
- if (s->isLastRetNativeCode())
- errmsg += "cannot return native function";
+ int res;
+ {
+ Timer t;
+ res = s->invoke(f, &args, 0, 0);
+ int m = t.millis();
+ if (m > serverGlobalParams.slowMS) {
+ log() << "dbeval slow, time: " << dec << m << "ms " << dbName << endl;
+ if (m >= 1000)
+ log() << code << endl;
else
- errmsg += s->getError();
-
- return false;
+ OCCASIONALLY log() << code << endl;
}
+ }
- s->append(result, "retval", "__returnValue");
+ if (res || s->isLastRetNativeCode()) {
+ result.append("errno", (double)res);
+ errmsg = "invoke failed: ";
+ if (s->isLastRetNativeCode())
+ errmsg += "cannot return native function";
+ else
+ errmsg += s->getError();
- return true;
+ return false;
}
+ s->append(result, "retval", "__returnValue");
- class CmdEval : public Command {
- public:
- virtual bool slaveOk() const {
- return false;
- }
+ return true;
+}
- virtual void help(stringstream &help) const {
- help << "DEPRECATED\n"
- << "Evaluate javascript at the server.\n"
- << "http://dochub.mongodb.org/core/serversidecodeexecution";
- }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- RoleGraph::generateUniversalPrivileges(out);
- }
+class CmdEval : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
- CmdEval() : Command("eval", false, "$eval") { }
+ virtual void help(stringstream& help) const {
+ help << "DEPRECATED\n"
+ << "Evaluate javascript at the server.\n"
+ << "http://dochub.mongodb.org/core/serversidecodeexecution";
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ RoleGraph::generateUniversalPrivileges(out);
+ }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
+ CmdEval() : Command("eval", false, "$eval") {}
- if (cmdObj["nolock"].trueValue()) {
- return dbEval(txn, dbname, cmdObj, result, errmsg);
- }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ if (cmdObj["nolock"].trueValue()) {
+ return dbEval(txn, dbname, cmdObj, result, errmsg);
+ }
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
- OldClientContext ctx(txn, dbname);
+ OldClientContext ctx(txn, dbname);
- return dbEval(txn, dbname, cmdObj, result, errmsg);
- }
+ return dbEval(txn, dbname, cmdObj, result, errmsg);
+ }
- } cmdeval;
+} cmdeval;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index ae5444b8c1a..e5e198d7ff0 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -68,558 +68,535 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::ios_base;
- using std::ofstream;
- using std::set;
- using std::string;
- using std::stringstream;
-
- using logger::LogComponent;
-
- void Helpers::ensureIndex(OperationContext* txn,
- Collection* collection,
- BSONObj keyPattern,
- bool unique,
- const char *name) {
- BSONObjBuilder b;
- b.append("name", name);
- b.append("ns", collection->ns());
- b.append("key", keyPattern);
- b.appendBool("unique", unique);
- BSONObj o = b.done();
-
- MultiIndexBlock indexer(txn, collection);
-
- Status status = indexer.init(o);
- if ( status.code() == ErrorCodes::IndexAlreadyExists )
- return;
- uassertStatusOK( status );
-
- uassertStatusOK(indexer.insertAllDocumentsInCollection());
-
- WriteUnitOfWork wunit(txn);
- indexer.commit();
- wunit.commit();
- }
+using std::unique_ptr;
+using std::endl;
+using std::ios_base;
+using std::ofstream;
+using std::set;
+using std::string;
+using std::stringstream;
+
+using logger::LogComponent;
+
+void Helpers::ensureIndex(OperationContext* txn,
+ Collection* collection,
+ BSONObj keyPattern,
+ bool unique,
+ const char* name) {
+ BSONObjBuilder b;
+ b.append("name", name);
+ b.append("ns", collection->ns());
+ b.append("key", keyPattern);
+ b.appendBool("unique", unique);
+ BSONObj o = b.done();
+
+ MultiIndexBlock indexer(txn, collection);
+
+ Status status = indexer.init(o);
+ if (status.code() == ErrorCodes::IndexAlreadyExists)
+ return;
+ uassertStatusOK(status);
+
+ uassertStatusOK(indexer.insertAllDocumentsInCollection());
+
+ WriteUnitOfWork wunit(txn);
+ indexer.commit();
+ wunit.commit();
+}
+
+/* fetch a single object from collection ns that matches query
+ set your db SavedContext first
+*/
+bool Helpers::findOne(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& query,
+ BSONObj& result,
+ bool requireIndex) {
+ RecordId loc = findOne(txn, collection, query, requireIndex);
+ if (loc.isNull())
+ return false;
+ result = collection->docFor(txn, loc).value();
+ return true;
+}
- /* fetch a single object from collection ns that matches query
- set your db SavedContext first
- */
- bool Helpers::findOne(OperationContext* txn,
- Collection* collection,
- const BSONObj &query,
- BSONObj& result,
+/* fetch a single object from collection ns that matches query
+ set your db SavedContext first
+*/
+RecordId Helpers::findOne(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& query,
bool requireIndex) {
- RecordId loc = findOne( txn, collection, query, requireIndex );
- if ( loc.isNull() )
- return false;
- result = collection->docFor(txn, loc).value();
- return true;
- }
-
- /* fetch a single object from collection ns that matches query
- set your db SavedContext first
- */
- RecordId Helpers::findOne(OperationContext* txn,
- Collection* collection,
- const BSONObj &query,
- bool requireIndex) {
- if ( !collection )
- return RecordId();
+ if (!collection)
+ return RecordId();
- CanonicalQuery* cq;
- const WhereCallbackReal whereCallback(txn, collection->ns().db());
+ CanonicalQuery* cq;
+ const WhereCallbackReal whereCallback(txn, collection->ns().db());
- massert(17244, "Could not canonicalize " + query.toString(),
+ massert(17244,
+ "Could not canonicalize " + query.toString(),
CanonicalQuery::canonicalize(collection->ns(), query, &cq, whereCallback).isOK());
- PlanExecutor* rawExec;
- size_t options = requireIndex ? QueryPlannerParams::NO_TABLE_SCAN : QueryPlannerParams::DEFAULT;
- massert(17245, "Could not get executor for query " + query.toString(),
- getExecutor(txn,
- collection,
- cq,
- PlanExecutor::YIELD_MANUAL,
- &rawExec,
- options).isOK());
-
- unique_ptr<PlanExecutor> exec(rawExec);
- PlanExecutor::ExecState state;
- RecordId loc;
- if (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
- return loc;
- }
- return RecordId();
+ PlanExecutor* rawExec;
+ size_t options = requireIndex ? QueryPlannerParams::NO_TABLE_SCAN : QueryPlannerParams::DEFAULT;
+ massert(17245,
+ "Could not get executor for query " + query.toString(),
+ getExecutor(txn, collection, cq, PlanExecutor::YIELD_MANUAL, &rawExec, options).isOK());
+
+ unique_ptr<PlanExecutor> exec(rawExec);
+ PlanExecutor::ExecState state;
+ RecordId loc;
+ if (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ return loc;
+ }
+ return RecordId();
+}
+
+bool Helpers::findById(OperationContext* txn,
+ Database* database,
+ const char* ns,
+ BSONObj query,
+ BSONObj& result,
+ bool* nsFound,
+ bool* indexFound) {
+ invariant(database);
+
+ Collection* collection = database->getCollection(ns);
+ if (!collection) {
+ return false;
}
- bool Helpers::findById(OperationContext* txn,
- Database* database,
- const char *ns,
- BSONObj query,
- BSONObj& result,
- bool* nsFound,
- bool* indexFound) {
-
- invariant(database);
-
- Collection* collection = database->getCollection( ns );
- if ( !collection ) {
- return false;
- }
-
- if ( nsFound )
- *nsFound = true;
+ if (nsFound)
+ *nsFound = true;
- IndexCatalog* catalog = collection->getIndexCatalog();
- const IndexDescriptor* desc = catalog->findIdIndex( txn );
+ IndexCatalog* catalog = collection->getIndexCatalog();
+ const IndexDescriptor* desc = catalog->findIdIndex(txn);
- if ( !desc )
- return false;
+ if (!desc)
+ return false;
- if ( indexFound )
- *indexFound = 1;
+ if (indexFound)
+ *indexFound = 1;
- RecordId loc = catalog->getIndex(desc)->findSingle( txn, query["_id"].wrap() );
- if ( loc.isNull() )
- return false;
- result = collection->docFor(txn, loc).value();
+ RecordId loc = catalog->getIndex(desc)->findSingle(txn, query["_id"].wrap());
+ if (loc.isNull())
+ return false;
+ result = collection->docFor(txn, loc).value();
+ return true;
+}
+
+RecordId Helpers::findById(OperationContext* txn, Collection* collection, const BSONObj& idquery) {
+ verify(collection);
+ IndexCatalog* catalog = collection->getIndexCatalog();
+ const IndexDescriptor* desc = catalog->findIdIndex(txn);
+ uassert(13430, "no _id index", desc);
+ return catalog->getIndex(desc)->findSingle(txn, idquery["_id"].wrap());
+}
+
+bool Helpers::getSingleton(OperationContext* txn, const char* ns, BSONObj& result) {
+ AutoGetCollectionForRead ctx(txn, ns);
+ unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(txn, ns, ctx.getCollection()));
+ PlanExecutor::ExecState state = exec->getNext(&result, NULL);
+
+ CurOp::get(txn)->done();
+
+ if (PlanExecutor::ADVANCED == state) {
+ result = result.getOwned();
return true;
}
+ return false;
+}
- RecordId Helpers::findById(OperationContext* txn,
- Collection* collection,
- const BSONObj& idquery) {
- verify(collection);
- IndexCatalog* catalog = collection->getIndexCatalog();
- const IndexDescriptor* desc = catalog->findIdIndex( txn );
- uassert(13430, "no _id index", desc);
- return catalog->getIndex(desc)->findSingle( txn, idquery["_id"].wrap() );
- }
-
- bool Helpers::getSingleton(OperationContext* txn, const char *ns, BSONObj& result) {
- AutoGetCollectionForRead ctx(txn, ns);
- unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(txn, ns, ctx.getCollection()));
- PlanExecutor::ExecState state = exec->getNext(&result, NULL);
-
- CurOp::get(txn)->done();
+bool Helpers::getLast(OperationContext* txn, const char* ns, BSONObj& result) {
+ AutoGetCollectionForRead autoColl(txn, ns);
+ unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
+ txn, ns, autoColl.getCollection(), InternalPlanner::BACKWARD));
+ PlanExecutor::ExecState state = exec->getNext(&result, NULL);
- if (PlanExecutor::ADVANCED == state) {
- result = result.getOwned();
- return true;
- }
- return false;
+ if (PlanExecutor::ADVANCED == state) {
+ result = result.getOwned();
+ return true;
}
+ return false;
+}
- bool Helpers::getLast(OperationContext* txn, const char *ns, BSONObj& result) {
- AutoGetCollectionForRead autoColl(txn, ns);
- unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(txn,
- ns,
- autoColl.getCollection(),
- InternalPlanner::BACKWARD));
- PlanExecutor::ExecState state = exec->getNext(&result, NULL);
-
- if (PlanExecutor::ADVANCED == state) {
- result = result.getOwned();
- return true;
- }
- return false;
- }
+void Helpers::upsert(OperationContext* txn, const string& ns, const BSONObj& o, bool fromMigrate) {
+ BSONElement e = o["_id"];
+ verify(e.type());
+ BSONObj id = e.wrap();
- void Helpers::upsert( OperationContext* txn,
- const string& ns,
- const BSONObj& o,
- bool fromMigrate ) {
- BSONElement e = o["_id"];
- verify( e.type() );
- BSONObj id = e.wrap();
+ OpDebug debug;
+ OldClientContext context(txn, ns);
- OpDebug debug;
- OldClientContext context(txn, ns);
+ const NamespaceString requestNs(ns);
+ UpdateRequest request(requestNs);
- const NamespaceString requestNs(ns);
- UpdateRequest request(requestNs);
+ request.setQuery(id);
+ request.setUpdates(o);
+ request.setUpsert();
+ request.setFromMigration(fromMigrate);
+ UpdateLifecycleImpl updateLifecycle(true, requestNs);
+ request.setLifecycle(&updateLifecycle);
- request.setQuery(id);
- request.setUpdates(o);
- request.setUpsert();
- request.setFromMigration(fromMigrate);
- UpdateLifecycleImpl updateLifecycle(true, requestNs);
- request.setLifecycle(&updateLifecycle);
+ update(txn, context.db(), request, &debug);
+}
- update(txn, context.db(), request, &debug);
- }
+void Helpers::putSingleton(OperationContext* txn, const char* ns, BSONObj obj) {
+ OpDebug debug;
+ OldClientContext context(txn, ns);
- void Helpers::putSingleton(OperationContext* txn, const char *ns, BSONObj obj) {
- OpDebug debug;
- OldClientContext context(txn, ns);
+ const NamespaceString requestNs(ns);
+ UpdateRequest request(requestNs);
- const NamespaceString requestNs(ns);
- UpdateRequest request(requestNs);
+ request.setUpdates(obj);
+ request.setUpsert();
+ UpdateLifecycleImpl updateLifecycle(true, requestNs);
+ request.setLifecycle(&updateLifecycle);
- request.setUpdates(obj);
- request.setUpsert();
- UpdateLifecycleImpl updateLifecycle(true, requestNs);
- request.setLifecycle(&updateLifecycle);
+ update(txn, context.db(), request, &debug);
- update(txn, context.db(), request, &debug);
+ CurOp::get(txn)->done();
+}
- CurOp::get(txn)->done();
+BSONObj Helpers::toKeyFormat(const BSONObj& o) {
+ BSONObjBuilder keyObj(o.objsize());
+ BSONForEach(e, o) {
+ keyObj.appendAs(e, "");
}
+ return keyObj.obj();
+}
- BSONObj Helpers::toKeyFormat( const BSONObj& o ) {
- BSONObjBuilder keyObj( o.objsize() );
- BSONForEach( e , o ) {
- keyObj.appendAs( e , "" );
- }
- return keyObj.obj();
+BSONObj Helpers::inferKeyPattern(const BSONObj& o) {
+ BSONObjBuilder kpBuilder;
+ BSONForEach(e, o) {
+ kpBuilder.append(e.fieldName(), 1);
+ }
+ return kpBuilder.obj();
+}
+
+static bool findShardKeyIndexPattern(OperationContext* txn,
+ const string& ns,
+ const BSONObj& shardKeyPattern,
+ BSONObj* indexPattern) {
+ AutoGetCollectionForRead ctx(txn, ns);
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ return false;
}
- BSONObj Helpers::inferKeyPattern( const BSONObj& o ) {
- BSONObjBuilder kpBuilder;
- BSONForEach( e , o ) {
- kpBuilder.append( e.fieldName() , 1 );
- }
- return kpBuilder.obj();
+ // Allow multiKey based on the invariant that shard keys must be single-valued.
+ // Therefore, any multi-key index prefixed by shard key cannot be multikey over
+ // the shard key fields.
+ const IndexDescriptor* idx =
+ collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn,
+ shardKeyPattern,
+ false); // requireSingleKey
+
+ if (idx == NULL)
+ return false;
+ *indexPattern = idx->keyPattern().getOwned();
+ return true;
+}
+
+long long Helpers::removeRange(OperationContext* txn,
+ const KeyRange& range,
+ bool maxInclusive,
+ const WriteConcernOptions& writeConcern,
+ RemoveSaver* callback,
+ bool fromMigrate,
+ bool onlyRemoveOrphanedDocs) {
+ Timer rangeRemoveTimer;
+ const string& ns = range.ns;
+
+ // The IndexChunk has a keyPattern that may apply to more than one index - we need to
+ // select the index and get the full index keyPattern here.
+ BSONObj indexKeyPatternDoc;
+ if (!findShardKeyIndexPattern(txn, ns, range.keyPattern, &indexKeyPatternDoc)) {
+ warning(LogComponent::kSharding) << "no index found to clean data over range of type "
+ << range.keyPattern << " in " << ns << endl;
+ return -1;
}
- static bool findShardKeyIndexPattern(OperationContext* txn,
- const string& ns,
- const BSONObj& shardKeyPattern,
- BSONObj* indexPattern ) {
+ KeyPattern indexKeyPattern(indexKeyPatternDoc);
- AutoGetCollectionForRead ctx(txn, ns);
- Collection* collection = ctx.getCollection();
- if (!collection) {
- return false;
- }
+ // Extend bounds to match the index we found
- // Allow multiKey based on the invariant that shard keys must be single-valued.
- // Therefore, any multi-key index prefixed by shard key cannot be multikey over
- // the shard key fields.
- const IndexDescriptor* idx =
- collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn,
- shardKeyPattern,
- false); // requireSingleKey
-
- if ( idx == NULL )
- return false;
- *indexPattern = idx->keyPattern().getOwned();
- return true;
- }
+ // Extend min to get (min, MinKey, MinKey, ....)
+ const BSONObj& min =
+ Helpers::toKeyFormat(indexKeyPattern.extendRangeBound(range.minKey, false));
+ // If upper bound is included, extend max to get (max, MaxKey, MaxKey, ...)
+ // If not included, extend max to get (max, MinKey, MinKey, ....)
+ const BSONObj& max =
+ Helpers::toKeyFormat(indexKeyPattern.extendRangeBound(range.maxKey, maxInclusive));
+
+ MONGO_LOG_COMPONENT(1, LogComponent::kSharding)
+ << "begin removal of " << min << " to " << max << " in " << ns
+ << " with write concern: " << writeConcern.toBSON() << endl;
+
+ long long numDeleted = 0;
- long long Helpers::removeRange( OperationContext* txn,
- const KeyRange& range,
- bool maxInclusive,
- const WriteConcernOptions& writeConcern,
- RemoveSaver* callback,
- bool fromMigrate,
- bool onlyRemoveOrphanedDocs )
- {
- Timer rangeRemoveTimer;
- const string& ns = range.ns;
-
- // The IndexChunk has a keyPattern that may apply to more than one index - we need to
- // select the index and get the full index keyPattern here.
- BSONObj indexKeyPatternDoc;
- if ( !findShardKeyIndexPattern( txn,
- ns,
- range.keyPattern,
- &indexKeyPatternDoc ) )
+ Milliseconds millisWaitingForReplication{0};
+
+ while (1) {
+ // Scoping for write lock.
{
- warning(LogComponent::kSharding) << "no index found to clean data over range of type "
- << range.keyPattern << " in " << ns << endl;
- return -1;
- }
+ OldClientWriteContext ctx(txn, ns);
+ Collection* collection = ctx.getCollection();
+ if (!collection)
+ break;
+
+ IndexDescriptor* desc =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, indexKeyPattern.toBSON());
+
+ unique_ptr<PlanExecutor> exec(
+ InternalPlanner::indexScan(txn,
+ collection,
+ desc,
+ min,
+ max,
+ maxInclusive,
+ InternalPlanner::FORWARD,
+ InternalPlanner::IXSCAN_FETCH));
+ exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
+
+ RecordId rloc;
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ // This may yield so we cannot touch nsd after this.
+ state = exec->getNext(&obj, &rloc);
+ exec.reset();
+ if (PlanExecutor::IS_EOF == state) {
+ break;
+ }
- KeyPattern indexKeyPattern( indexKeyPatternDoc );
-
- // Extend bounds to match the index we found
-
- // Extend min to get (min, MinKey, MinKey, ....)
- const BSONObj& min =
- Helpers::toKeyFormat(indexKeyPattern.extendRangeBound(range.minKey,
- false));
- // If upper bound is included, extend max to get (max, MaxKey, MaxKey, ...)
- // If not included, extend max to get (max, MinKey, MinKey, ....)
- const BSONObj& max =
- Helpers::toKeyFormat( indexKeyPattern.extendRangeBound(range.maxKey,maxInclusive));
-
- MONGO_LOG_COMPONENT(1, LogComponent::kSharding)
- << "begin removal of " << min << " to " << max << " in " << ns
- << " with write concern: " << writeConcern.toBSON() << endl;
-
- long long numDeleted = 0;
-
- Milliseconds millisWaitingForReplication{0};
-
- while ( 1 ) {
- // Scoping for write lock.
- {
- OldClientWriteContext ctx(txn, ns);
- Collection* collection = ctx.getCollection();
- if ( !collection )
- break;
+ if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ warning(LogComponent::kSharding)
+ << PlanExecutor::statestr(state) << " - cursor error while trying to delete "
+ << min << " to " << max << " in " << ns << ": "
+ << WorkingSetCommon::toStatusString(obj)
+ << ", stats: " << Explain::statsToBSON(*stats) << endl;
+ break;
+ }
- IndexDescriptor* desc =
- collection->getIndexCatalog()->findIndexByKeyPattern( txn,
- indexKeyPattern.toBSON() );
-
- unique_ptr<PlanExecutor> exec(InternalPlanner::indexScan(txn, collection, desc,
- min, max,
- maxInclusive,
- InternalPlanner::FORWARD,
- InternalPlanner::IXSCAN_FETCH));
- exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
-
- RecordId rloc;
- BSONObj obj;
- PlanExecutor::ExecState state;
- // This may yield so we cannot touch nsd after this.
- state = exec->getNext(&obj, &rloc);
- exec.reset();
- if (PlanExecutor::IS_EOF == state) { break; }
-
- if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- warning(LogComponent::kSharding) << PlanExecutor::statestr(state)
- << " - cursor error while trying to delete "
- << min << " to " << max
- << " in " << ns << ": "
- << WorkingSetCommon::toStatusString(obj) << ", stats: "
- << Explain::statsToBSON(*stats) << endl;
- break;
- }
+ verify(PlanExecutor::ADVANCED == state);
- verify(PlanExecutor::ADVANCED == state);
-
- WriteUnitOfWork wuow(txn);
-
- if ( onlyRemoveOrphanedDocs ) {
- // Do a final check in the write lock to make absolutely sure that our
- // collection hasn't been modified in a way that invalidates our migration
- // cleanup.
-
- // We should never be able to turn off the sharding state once enabled, but
- // in the future we might want to.
- verify(shardingState.enabled());
-
- // In write lock, so will be the most up-to-date version
- CollectionMetadataPtr metadataNow = shardingState.getCollectionMetadata( ns );
-
- bool docIsOrphan;
- if ( metadataNow ) {
- ShardKeyPattern kp( metadataNow->getKeyPattern() );
- BSONObj key = kp.extractShardKeyFromDoc(obj);
- docIsOrphan = !metadataNow->keyBelongsToMe( key )
- && !metadataNow->keyIsPending( key );
- }
- else {
- docIsOrphan = false;
- }
-
- if ( !docIsOrphan ) {
- warning(LogComponent::kSharding)
- << "aborting migration cleanup for chunk " << min << " to " << max
- << ( metadataNow ? (string) " at document " + obj.toString() : "" )
- << ", collection " << ns << " has changed " << endl;
- break;
- }
- }
+ WriteUnitOfWork wuow(txn);
- NamespaceString nss(ns);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
- warning() << "stepped down from primary while deleting chunk; "
- << "orphaning data in " << ns
- << " in range [" << min << ", " << max << ")";
- return numDeleted;
- }
+ if (onlyRemoveOrphanedDocs) {
+ // Do a final check in the write lock to make absolutely sure that our
+ // collection hasn't been modified in a way that invalidates our migration
+ // cleanup.
- if ( callback )
- callback->goingToDelete( obj );
+ // We should never be able to turn off the sharding state once enabled, but
+ // in the future we might want to.
+ verify(shardingState.enabled());
- BSONObj deletedId;
- collection->deleteDocument( txn, rloc, false, false, &deletedId );
- wuow.commit();
- numDeleted++;
- }
+ // In write lock, so will be the most up-to-date version
+ CollectionMetadataPtr metadataNow = shardingState.getCollectionMetadata(ns);
- // TODO remove once the yielding below that references this timer has been removed
- Timer secondaryThrottleTime;
+ bool docIsOrphan;
+ if (metadataNow) {
+ ShardKeyPattern kp(metadataNow->getKeyPattern());
+ BSONObj key = kp.extractShardKeyFromDoc(obj);
+ docIsOrphan =
+ !metadataNow->keyBelongsToMe(key) && !metadataNow->keyIsPending(key);
+ } else {
+ docIsOrphan = false;
+ }
- if (writeConcern.shouldWaitForOtherNodes() && numDeleted > 0) {
- repl::ReplicationCoordinator::StatusAndDuration replStatus =
- repl::getGlobalReplicationCoordinator()->awaitReplication(
- txn,
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(),
- writeConcern);
- if (replStatus.status.code() == ErrorCodes::ExceededTimeLimit) {
+ if (!docIsOrphan) {
warning(LogComponent::kSharding)
- << "replication to secondaries for removeRange at "
- "least 60 seconds behind";
- }
- else {
- massertStatusOK(replStatus.status);
+ << "aborting migration cleanup for chunk " << min << " to " << max
+ << (metadataNow ? (string) " at document " + obj.toString() : "")
+ << ", collection " << ns << " has changed " << endl;
+ break;
}
- millisWaitingForReplication += replStatus.duration;
}
- }
-
- if (writeConcern.shouldWaitForOtherNodes())
- log(LogComponent::kSharding)
- << "Helpers::removeRangeUnlocked time spent waiting for replication: "
- << durationCount<Milliseconds>(millisWaitingForReplication) << "ms" << endl;
-
- MONGO_LOG_COMPONENT(1, LogComponent::kSharding)
- << "end removal of " << min << " to " << max << " in " << ns
- << " (took " << rangeRemoveTimer.millis() << "ms)" << endl;
-
- return numDeleted;
- }
- const long long Helpers::kMaxDocsPerChunk( 250000 );
-
- // Used by migration clone step
- // TODO: Cannot hook up quite yet due to _trackerLocks in shared migration code.
- // TODO: This function is not used outside of tests
- Status Helpers::getLocsInRange( OperationContext* txn,
- const KeyRange& range,
- long long maxChunkSizeBytes,
- set<RecordId>* locs,
- long long* numDocs,
- long long* estChunkSizeBytes )
- {
- const string ns = range.ns;
- *estChunkSizeBytes = 0;
- *numDocs = 0;
-
- AutoGetCollectionForRead ctx(txn, ns);
-
- Collection* collection = ctx.getCollection();
- if (!collection) {
- return Status(ErrorCodes::NamespaceNotFound, ns);
- }
+ NamespaceString nss(ns);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
+ warning() << "stepped down from primary while deleting chunk; "
+ << "orphaning data in " << ns << " in range [" << min << ", " << max
+ << ")";
+ return numDeleted;
+ }
- // Require single key
- IndexDescriptor *idx =
- collection->getIndexCatalog()->findShardKeyPrefixedIndex( txn, range.keyPattern, true );
+ if (callback)
+ callback->goingToDelete(obj);
- if ( idx == NULL ) {
- return Status( ErrorCodes::IndexNotFound, range.keyPattern.toString() );
+ BSONObj deletedId;
+ collection->deleteDocument(txn, rloc, false, false, &deletedId);
+ wuow.commit();
+ numDeleted++;
}
- // use the average object size to estimate how many objects a full chunk would carry
- // do that while traversing the chunk's range using the sharding index, below
- // there's a fair amount of slack before we determine a chunk is too large because object
- // sizes will vary
- long long avgDocsWhenFull;
- long long avgDocSizeBytes;
- const long long totalDocsInNS = collection->numRecords( txn );
- if ( totalDocsInNS > 0 ) {
- // TODO: Figure out what's up here
- avgDocSizeBytes = collection->dataSize( txn ) / totalDocsInNS;
- avgDocsWhenFull = maxChunkSizeBytes / avgDocSizeBytes;
- avgDocsWhenFull = std::min( kMaxDocsPerChunk + 1,
- 130 * avgDocsWhenFull / 100 /* slack */);
- }
- else {
- avgDocSizeBytes = 0;
- avgDocsWhenFull = kMaxDocsPerChunk + 1;
+ // TODO remove once the yielding below that references this timer has been removed
+ Timer secondaryThrottleTime;
+
+ if (writeConcern.shouldWaitForOtherNodes() && numDeleted > 0) {
+ repl::ReplicationCoordinator::StatusAndDuration replStatus =
+ repl::getGlobalReplicationCoordinator()->awaitReplication(
+ txn,
+ repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(),
+ writeConcern);
+ if (replStatus.status.code() == ErrorCodes::ExceededTimeLimit) {
+ warning(LogComponent::kSharding) << "replication to secondaries for removeRange at "
+ "least 60 seconds behind";
+ } else {
+ massertStatusOK(replStatus.status);
+ }
+ millisWaitingForReplication += replStatus.duration;
}
+ }
- // Assume both min and max non-empty, append MinKey's to make them fit chosen index
- KeyPattern idxKeyPattern( idx->keyPattern() );
- BSONObj min = Helpers::toKeyFormat( idxKeyPattern.extendRangeBound( range.minKey, false ) );
- BSONObj max = Helpers::toKeyFormat( idxKeyPattern.extendRangeBound( range.maxKey, false ) );
+ if (writeConcern.shouldWaitForOtherNodes())
+ log(LogComponent::kSharding)
+ << "Helpers::removeRangeUnlocked time spent waiting for replication: "
+ << durationCount<Milliseconds>(millisWaitingForReplication) << "ms" << endl;
+
+ MONGO_LOG_COMPONENT(1, LogComponent::kSharding) << "end removal of " << min << " to " << max
+ << " in " << ns << " (took "
+ << rangeRemoveTimer.millis() << "ms)" << endl;
+
+ return numDeleted;
+}
+
+const long long Helpers::kMaxDocsPerChunk(250000);
+
+// Used by migration clone step
+// TODO: Cannot hook up quite yet due to _trackerLocks in shared migration code.
+// TODO: This function is not used outside of tests
+Status Helpers::getLocsInRange(OperationContext* txn,
+ const KeyRange& range,
+ long long maxChunkSizeBytes,
+ set<RecordId>* locs,
+ long long* numDocs,
+ long long* estChunkSizeBytes) {
+ const string ns = range.ns;
+ *estChunkSizeBytes = 0;
+ *numDocs = 0;
+
+ AutoGetCollectionForRead ctx(txn, ns);
+
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ return Status(ErrorCodes::NamespaceNotFound, ns);
+ }
+ // Require single key
+ IndexDescriptor* idx =
+ collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn, range.keyPattern, true);
- // do a full traversal of the chunk and don't stop even if we think it is a large chunk
- // we want the number of records to better report, in that case
- bool isLargeChunk = false;
- long long docCount = 0;
+ if (idx == NULL) {
+ return Status(ErrorCodes::IndexNotFound, range.keyPattern.toString());
+ }
- unique_ptr<PlanExecutor> exec(
- InternalPlanner::indexScan(txn, collection, idx, min, max, false));
- // we can afford to yield here because any change to the base data that we might miss is
- // already being queued and will be migrated in the 'transferMods' stage
- exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
+ // use the average object size to estimate how many objects a full chunk would carry
+ // do that while traversing the chunk's range using the sharding index, below
+ // there's a fair amount of slack before we determine a chunk is too large because object
+ // sizes will vary
+ long long avgDocsWhenFull;
+ long long avgDocSizeBytes;
+ const long long totalDocsInNS = collection->numRecords(txn);
+ if (totalDocsInNS > 0) {
+ // TODO: Figure out what's up here
+ avgDocSizeBytes = collection->dataSize(txn) / totalDocsInNS;
+ avgDocsWhenFull = maxChunkSizeBytes / avgDocSizeBytes;
+ avgDocsWhenFull = std::min(kMaxDocsPerChunk + 1, 130 * avgDocsWhenFull / 100 /* slack */);
+ } else {
+ avgDocSizeBytes = 0;
+ avgDocsWhenFull = kMaxDocsPerChunk + 1;
+ }
- RecordId loc;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
- if ( !isLargeChunk ) {
- locs->insert( loc );
- }
+ // Assume both min and max non-empty, append MinKey's to make them fit chosen index
+ KeyPattern idxKeyPattern(idx->keyPattern());
+ BSONObj min = Helpers::toKeyFormat(idxKeyPattern.extendRangeBound(range.minKey, false));
+ BSONObj max = Helpers::toKeyFormat(idxKeyPattern.extendRangeBound(range.maxKey, false));
- if ( ++docCount > avgDocsWhenFull ) {
- isLargeChunk = true;
- }
- }
- *numDocs = docCount;
- *estChunkSizeBytes = docCount * avgDocSizeBytes;
+ // do a full traversal of the chunk and don't stop even if we think it is a large chunk
+ // we want the number of records to better report, in that case
+ bool isLargeChunk = false;
+ long long docCount = 0;
- if ( isLargeChunk ) {
- stringstream ss;
- ss << estChunkSizeBytes;
- return Status( ErrorCodes::InvalidLength, ss.str() );
+ unique_ptr<PlanExecutor> exec(
+ InternalPlanner::indexScan(txn, collection, idx, min, max, false));
+ // we can afford to yield here because any change to the base data that we might miss is
+ // already being queued and will be migrated in the 'transferMods' stage
+ exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
+
+ RecordId loc;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ if (!isLargeChunk) {
+ locs->insert(loc);
}
- return Status::OK();
+ if (++docCount > avgDocsWhenFull) {
+ isLargeChunk = true;
+ }
}
+ *numDocs = docCount;
+ *estChunkSizeBytes = docCount* avgDocSizeBytes;
- void Helpers::emptyCollection(OperationContext* txn, const char *ns) {
- OldClientContext context(txn, ns);
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
- deleteObjects(txn, context.db(), ns, BSONObj(), PlanExecutor::YIELD_MANUAL, false);
+ if (isLargeChunk) {
+ stringstream ss;
+ ss << estChunkSizeBytes;
+ return Status(ErrorCodes::InvalidLength, ss.str());
}
- Helpers::RemoveSaver::RemoveSaver( const string& a , const string& b , const string& why)
- : _out(0) {
- static int NUM = 0;
+ return Status::OK();
+}
- _root = storageGlobalParams.dbpath;
- if ( a.size() )
- _root /= a;
- if ( b.size() )
- _root /= b;
- verify( a.size() || b.size() );
- _file = _root;
+void Helpers::emptyCollection(OperationContext* txn, const char* ns) {
+ OldClientContext context(txn, ns);
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
+ deleteObjects(txn, context.db(), ns, BSONObj(), PlanExecutor::YIELD_MANUAL, false);
+}
- stringstream ss;
- ss << why << "." << terseCurrentTime(false) << "." << NUM++ << ".bson";
- _file /= ss.str();
- }
+Helpers::RemoveSaver::RemoveSaver(const string& a, const string& b, const string& why) : _out(0) {
+ static int NUM = 0;
- Helpers::RemoveSaver::~RemoveSaver() {
- if ( _out ) {
- _out->close();
- delete _out;
- _out = 0;
- }
- }
+ _root = storageGlobalParams.dbpath;
+ if (a.size())
+ _root /= a;
+ if (b.size())
+ _root /= b;
+ verify(a.size() || b.size());
- void Helpers::RemoveSaver::goingToDelete( const BSONObj& o ) {
- if ( ! _out ) {
- boost::filesystem::create_directories( _root );
- _out = new ofstream();
- _out->open( _file.string().c_str() , ios_base::out | ios_base::binary );
- if ( ! _out->good() ) {
- error() << "couldn't create file: " << _file.string() <<
- " for remove saving" << endl;
- delete _out;
- _out = 0;
- return;
- }
+ _file = _root;
+ stringstream ss;
+ ss << why << "." << terseCurrentTime(false) << "." << NUM++ << ".bson";
+ _file /= ss.str();
+}
+
+Helpers::RemoveSaver::~RemoveSaver() {
+ if (_out) {
+ _out->close();
+ delete _out;
+ _out = 0;
+ }
+}
+
+void Helpers::RemoveSaver::goingToDelete(const BSONObj& o) {
+ if (!_out) {
+ boost::filesystem::create_directories(_root);
+ _out = new ofstream();
+ _out->open(_file.string().c_str(), ios_base::out | ios_base::binary);
+ if (!_out->good()) {
+ error() << "couldn't create file: " << _file.string() << " for remove saving" << endl;
+ delete _out;
+ _out = 0;
+ return;
}
- _out->write( o.objdata() , o.objsize() );
}
+ _out->write(o.objdata(), o.objsize());
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index a0ca0f0bc3d..146943fcfc3 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -35,194 +35,196 @@
namespace mongo {
- class Collection;
- class Cursor;
- class OperationContext;
- struct KeyRange;
- struct WriteConcernOptions;
+class Collection;
+class Cursor;
+class OperationContext;
+struct KeyRange;
+struct WriteConcernOptions;
+
+/**
+ * db helpers are helper functions and classes that let us easily manipulate the local
+ * database instance in-proc.
+ *
+ * all helpers assume locking is handled above them
+ */
+struct Helpers {
+ class RemoveSaver;
+
+ /* ensure the specified index exists.
+
+ @param keyPattern key pattern, e.g., { ts : 1 }
+ @param name index name, e.g., "name_1"
+
+ This method can be a little (not much) cpu-slow, so you may wish to use
+ OCCASIONALLY ensureIndex(...);
+
+ Note: does nothing if collection does not yet exist.
+ */
+ static void ensureIndex(OperationContext* txn,
+ Collection* collection,
+ BSONObj keyPattern,
+ bool unique,
+ const char* name);
+
+ /* fetch a single object from collection ns that matches query.
+ set your db SavedContext first.
+
+ @param query - the query to perform. note this is the low level portion of query so "orderby : ..."
+ won't work.
+
+ @param requireIndex if true, assert if no index for the query. a way to guard against
+ writing a slow query.
+
+ @return true if object found
+ */
+ static bool findOne(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& query,
+ BSONObj& result,
+ bool requireIndex = false);
+
+ static RecordId findOne(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& query,
+ bool requireIndex);
/**
- * db helpers are helper functions and classes that let us easily manipulate the local
- * database instance in-proc.
+ * @param foundIndex if passed in will be set to 1 if ns and index found
+ * @return true if object found
+ */
+ static bool findById(OperationContext* txn,
+ Database* db,
+ const char* ns,
+ BSONObj query,
+ BSONObj& result,
+ bool* nsFound = 0,
+ bool* indexFound = 0);
+
+ /* TODO: should this move into Collection?
+ * uasserts if no _id index.
+ * @return null loc if not found */
+ static RecordId findById(OperationContext* txn, Collection* collection, const BSONObj& query);
+
+ /**
+ * Get the first object generated from a forward natural-order scan on "ns". Callers do not
+ * have to lock "ns".
+ *
+ * Returns true if there is such an object. An owned copy of the object is placed into the
+ * out-argument "result".
*
- * all helpers assume locking is handled above them
+ * Returns false if there is no such object.
+ */
+ static bool getSingleton(OperationContext* txn, const char* ns, BSONObj& result);
+
+ /**
+ * Same as getSingleton, but with a reverse natural-order scan on "ns".
*/
- struct Helpers {
+ static bool getLast(OperationContext* txn, const char* ns, BSONObj& result);
- class RemoveSaver;
+ /**
+ * Performs an upsert of "obj" into the collection "ns", with an empty update predicate.
+ * Callers must have "ns" locked.
+ */
+ static void putSingleton(OperationContext* txn, const char* ns, BSONObj obj);
+
+ /**
+ * you have to lock
+ * you do not have to have Context set
+ * o has to have an _id field or will assert
+ */
+ static void upsert(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& o,
+ bool fromMigrate = false);
+
+ // TODO: this should be somewhere else probably
+ /* Takes object o, and returns a new object with the
+ * same field elements but the names stripped out.
+ * Example:
+ * o = {a : 5 , b : 6} --> {"" : 5, "" : 6}
+ */
+ static BSONObj toKeyFormat(const BSONObj& o);
- /* ensure the specified index exists.
+ /* Takes object o, and infers an ascending keyPattern with the same fields as o
+ * Example:
+ * o = {a : 5 , b : 6} --> {a : 1 , b : 1 }
+ */
+ static BSONObj inferKeyPattern(const BSONObj& o);
+
+ /**
+ * Takes a namespace range, specified by a min and max and qualified by an index pattern,
+ * and removes all the documents in that range found by iterating
+ * over the given index. Caller is responsible for insuring that min/max are
+ * compatible with the given keyPattern (e.g min={a:100} is compatible with
+ * keyPattern={a:1,b:1} since it can be extended to {a:100,b:minKey}, but
+ * min={b:100} is not compatible).
+ *
+ * Caller must hold a write lock on 'ns'
+ *
+ * Returns -1 when no usable index exists
+ *
+ * Does oplog the individual document deletions.
+ * // TODO: Refactor this mechanism, it is growing too large
+ */
+ static long long removeRange(OperationContext* txn,
+ const KeyRange& range,
+ bool maxInclusive,
+ const WriteConcernOptions& secondaryThrottle,
+ RemoveSaver* callback = NULL,
+ bool fromMigrate = false,
+ bool onlyRemoveOrphanedDocs = false);
- @param keyPattern key pattern, e.g., { ts : 1 }
- @param name index name, e.g., "name_1"
- This method can be a little (not much) cpu-slow, so you may wish to use
- OCCASIONALLY ensureIndex(...);
+ // TODO: This will supersede Chunk::MaxObjectsPerChunk
+ static const long long kMaxDocsPerChunk;
- Note: does nothing if collection does not yet exist.
- */
- static void ensureIndex(OperationContext* txn,
- Collection* collection,
- BSONObj keyPattern,
- bool unique,
- const char *name);
+ /**
+ * Get sorted disklocs that belong to a range of a namespace defined over an index
+ * key pattern (KeyRange).
+ *
+ * @param chunk range of a namespace over an index key pattern.
+ * @param maxChunkSizeBytes max number of bytes that we will retrieve locs for, if the
+ * range is estimated larger (from avg doc stats) we will stop recording locs.
+ * @param locs set to record locs in
+ * @param estChunkSizeBytes chunk size estimated from doc count and avg doc size
+ * @param chunkTooBig whether the chunk was estimated larger than our maxChunkSizeBytes
+ * @param errmsg filled with textual description of error if this call return false
+ *
+ * @return NamespaceNotFound if the namespace doesn't exist
+ * @return IndexNotFound if the index pattern doesn't match any indexes
+ * @return InvalidLength if the estimated size exceeds maxChunkSizeBytes
+ */
+ static Status getLocsInRange(OperationContext* txn,
+ const KeyRange& range,
+ long long maxChunkSizeBytes,
+ std::set<RecordId>* locs,
+ long long* numDocs,
+ long long* estChunkSizeBytes);
- /* fetch a single object from collection ns that matches query.
- set your db SavedContext first.
+ /**
+ * Remove all documents from a collection.
+ * You do not need to set the database before calling.
+ * Does not oplog the operation.
+ */
+ static void emptyCollection(OperationContext* txn, const char* ns);
- @param query - the query to perform. note this is the low level portion of query so "orderby : ..."
- won't work.
+ /**
+ * for saving deleted bson objects to a flat file
+ */
+ class RemoveSaver {
+ MONGO_DISALLOW_COPYING(RemoveSaver);
- @param requireIndex if true, assert if no index for the query. a way to guard against
- writing a slow query.
+ public:
+ RemoveSaver(const std::string& type, const std::string& ns, const std::string& why);
+ ~RemoveSaver();
- @return true if object found
- */
- static bool findOne(OperationContext* txn,
- Collection* collection,
- const BSONObj &query,
- BSONObj& result,
- bool requireIndex = false);
-
- static RecordId findOne(OperationContext* txn,
- Collection* collection,
- const BSONObj &query,
- bool requireIndex);
-
- /**
- * @param foundIndex if passed in will be set to 1 if ns and index found
- * @return true if object found
- */
- static bool findById(OperationContext* txn,
- Database* db, const char *ns, BSONObj query, BSONObj& result,
- bool* nsFound = 0, bool* indexFound = 0 );
-
- /* TODO: should this move into Collection?
- * uasserts if no _id index.
- * @return null loc if not found */
- static RecordId findById(OperationContext* txn,
- Collection* collection, const BSONObj& query);
-
- /**
- * Get the first object generated from a forward natural-order scan on "ns". Callers do not
- * have to lock "ns".
- *
- * Returns true if there is such an object. An owned copy of the object is placed into the
- * out-argument "result".
- *
- * Returns false if there is no such object.
- */
- static bool getSingleton(OperationContext* txn, const char *ns, BSONObj& result);
-
- /**
- * Same as getSingleton, but with a reverse natural-order scan on "ns".
- */
- static bool getLast(OperationContext* txn, const char *ns, BSONObj& result);
-
- /**
- * Performs an upsert of "obj" into the collection "ns", with an empty update predicate.
- * Callers must have "ns" locked.
- */
- static void putSingleton(OperationContext* txn, const char *ns, BSONObj obj);
-
- /**
- * you have to lock
- * you do not have to have Context set
- * o has to have an _id field or will assert
- */
- static void upsert( OperationContext* txn,
- const std::string& ns,
- const BSONObj& o,
- bool fromMigrate = false );
-
- // TODO: this should be somewhere else probably
- /* Takes object o, and returns a new object with the
- * same field elements but the names stripped out.
- * Example:
- * o = {a : 5 , b : 6} --> {"" : 5, "" : 6}
- */
- static BSONObj toKeyFormat( const BSONObj& o );
-
- /* Takes object o, and infers an ascending keyPattern with the same fields as o
- * Example:
- * o = {a : 5 , b : 6} --> {a : 1 , b : 1 }
- */
- static BSONObj inferKeyPattern( const BSONObj& o );
-
- /**
- * Takes a namespace range, specified by a min and max and qualified by an index pattern,
- * and removes all the documents in that range found by iterating
- * over the given index. Caller is responsible for insuring that min/max are
- * compatible with the given keyPattern (e.g min={a:100} is compatible with
- * keyPattern={a:1,b:1} since it can be extended to {a:100,b:minKey}, but
- * min={b:100} is not compatible).
- *
- * Caller must hold a write lock on 'ns'
- *
- * Returns -1 when no usable index exists
- *
- * Does oplog the individual document deletions.
- * // TODO: Refactor this mechanism, it is growing too large
- */
- static long long removeRange( OperationContext* txn,
- const KeyRange& range,
- bool maxInclusive,
- const WriteConcernOptions& secondaryThrottle,
- RemoveSaver* callback = NULL,
- bool fromMigrate = false,
- bool onlyRemoveOrphanedDocs = false );
-
-
- // TODO: This will supersede Chunk::MaxObjectsPerChunk
- static const long long kMaxDocsPerChunk;
-
- /**
- * Get sorted disklocs that belong to a range of a namespace defined over an index
- * key pattern (KeyRange).
- *
- * @param chunk range of a namespace over an index key pattern.
- * @param maxChunkSizeBytes max number of bytes that we will retrieve locs for, if the
- * range is estimated larger (from avg doc stats) we will stop recording locs.
- * @param locs set to record locs in
- * @param estChunkSizeBytes chunk size estimated from doc count and avg doc size
- * @param chunkTooBig whether the chunk was estimated larger than our maxChunkSizeBytes
- * @param errmsg filled with textual description of error if this call return false
- *
- * @return NamespaceNotFound if the namespace doesn't exist
- * @return IndexNotFound if the index pattern doesn't match any indexes
- * @return InvalidLength if the estimated size exceeds maxChunkSizeBytes
- */
- static Status getLocsInRange( OperationContext* txn,
- const KeyRange& range,
- long long maxChunkSizeBytes,
- std::set<RecordId>* locs,
- long long* numDocs,
- long long* estChunkSizeBytes );
-
- /**
- * Remove all documents from a collection.
- * You do not need to set the database before calling.
- * Does not oplog the operation.
- */
- static void emptyCollection(OperationContext* txn, const char *ns);
-
- /**
- * for saving deleted bson objects to a flat file
- */
- class RemoveSaver {
- MONGO_DISALLOW_COPYING(RemoveSaver);
- public:
- RemoveSaver(const std::string& type, const std::string& ns, const std::string& why);
- ~RemoveSaver();
-
- void goingToDelete( const BSONObj& o );
-
- private:
- boost::filesystem::path _root;
- boost::filesystem::path _file;
- std::ofstream* _out;
- };
+ void goingToDelete(const BSONObj& o);
+ private:
+ boost::filesystem::path _root;
+ boost::filesystem::path _file;
+ std::ofstream* _out;
};
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/dbmessage.cpp b/src/mongo/db/dbmessage.cpp
index 2dbbce7704a..a91178afa16 100644
--- a/src/mongo/db/dbmessage.cpp
+++ b/src/mongo/db/dbmessage.cpp
@@ -33,16 +33,16 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- string Message::toString() const {
- stringstream ss;
- ss << "op: " << opToString( operation() ) << " len: " << size();
- if ( operation() >= 2000 && operation() < 2100 ) {
- DbMessage d(*this);
- ss << " ns: " << d.getns();
- switch ( operation() ) {
+using std::string;
+using std::stringstream;
+
+string Message::toString() const {
+ stringstream ss;
+ ss << "op: " << opToString(operation()) << " len: " << size();
+ if (operation() >= 2000 && operation() < 2100) {
+ DbMessage d(*this);
+ ss << " ns: " << d.getns();
+ switch (operation()) {
case dbUpdate: {
int flags = d.pullInt();
BSONObj q = d.nextJsObj();
@@ -61,168 +61,167 @@ namespace mongo {
}
default:
ss << " CANNOT HANDLE YET";
- }
-
-
}
- return ss.str();
}
+ return ss.str();
+}
- DbMessage::DbMessage(const Message& msg) : _msg(msg), _nsStart(NULL), _mark(NULL), _nsLen(0) {
- // for received messages, Message has only one buffer
- _theEnd = _msg.singleData().data() + _msg.singleData().dataLen();
- _nextjsobj = _msg.singleData().data();
-
- _reserved = readAndAdvance<int>();
+DbMessage::DbMessage(const Message& msg) : _msg(msg), _nsStart(NULL), _mark(NULL), _nsLen(0) {
+ // for received messages, Message has only one buffer
+ _theEnd = _msg.singleData().data() + _msg.singleData().dataLen();
+ _nextjsobj = _msg.singleData().data();
- // Read packet for NS
- if (messageShouldHaveNs()) {
+ _reserved = readAndAdvance<int>();
- // Limit = buffer size of message -
- // (first int4 in message which is either flags or a zero constant)
- size_t limit = _msg.singleData().dataLen() - sizeof(int);
+ // Read packet for NS
+ if (messageShouldHaveNs()) {
+ // Limit = buffer size of message -
+ // (first int4 in message which is either flags or a zero constant)
+ size_t limit = _msg.singleData().dataLen() - sizeof(int);
- _nsStart = _nextjsobj;
- _nsLen = strnlen(_nsStart, limit);
+ _nsStart = _nextjsobj;
+ _nsLen = strnlen(_nsStart, limit);
- // Validate there is room for a null byte in the buffer
- // Strings can be zero length
- uassert(18633, "Failed to parse ns string", _nsLen < limit);
+ // Validate there is room for a null byte in the buffer
+ // Strings can be zero length
+ uassert(18633, "Failed to parse ns string", _nsLen < limit);
- _nextjsobj += _nsLen + 1; // skip namespace + null
- }
+ _nextjsobj += _nsLen + 1; // skip namespace + null
}
+}
- const char * DbMessage::getns() const {
- verify(messageShouldHaveNs());
- return _nsStart;
- }
+const char* DbMessage::getns() const {
+ verify(messageShouldHaveNs());
+ return _nsStart;
+}
- int DbMessage::getQueryNToReturn() const {
- verify(messageShouldHaveNs());
- const char* p = _nsStart + _nsLen + 1;
- checkRead<int>(p, 2);
+int DbMessage::getQueryNToReturn() const {
+ verify(messageShouldHaveNs());
+ const char* p = _nsStart + _nsLen + 1;
+ checkRead<int>(p, 2);
- return ConstDataView(p).read<LittleEndian<int32_t>>(sizeof(int32_t));
- }
+ return ConstDataView(p).read<LittleEndian<int32_t>>(sizeof(int32_t));
+}
- int DbMessage::pullInt() {
- return readAndAdvance<int32_t>();
- }
+int DbMessage::pullInt() {
+ return readAndAdvance<int32_t>();
+}
- long long DbMessage::pullInt64() {
- return readAndAdvance<int64_t>();
- }
+long long DbMessage::pullInt64() {
+ return readAndAdvance<int64_t>();
+}
- const char* DbMessage::getArray(size_t count) const {
- checkRead<long long>(_nextjsobj, count);
- return _nextjsobj;
- }
+const char* DbMessage::getArray(size_t count) const {
+ checkRead<long long>(_nextjsobj, count);
+ return _nextjsobj;
+}
- BSONObj DbMessage::nextJsObj() {
- massert(10304,
+BSONObj DbMessage::nextJsObj() {
+ massert(10304,
"Client Error: Remaining data too small for BSON object",
_nextjsobj != NULL && _theEnd - _nextjsobj >= 5);
- if (serverGlobalParams.objcheck) {
- Status status = validateBSON(_nextjsobj, _theEnd - _nextjsobj);
- massert(10307,
+ if (serverGlobalParams.objcheck) {
+ Status status = validateBSON(_nextjsobj, _theEnd - _nextjsobj);
+ massert(10307,
str::stream() << "Client Error: bad object in message: " << status.reason(),
status.isOK());
- }
-
- BSONObj js(_nextjsobj);
- verify(js.objsize() >= 5);
- verify(js.objsize() <= (_theEnd - _nextjsobj));
-
- _nextjsobj += js.objsize();
- if (_nextjsobj >= _theEnd)
- _nextjsobj = NULL;
- return js;
}
- void DbMessage::markReset(const char * toMark = NULL) {
- if (toMark == NULL) {
- toMark = _mark;
- }
+ BSONObj js(_nextjsobj);
+ verify(js.objsize() >= 5);
+ verify(js.objsize() <= (_theEnd - _nextjsobj));
- verify(toMark);
- _nextjsobj = toMark;
- }
+ _nextjsobj += js.objsize();
+ if (_nextjsobj >= _theEnd)
+ _nextjsobj = NULL;
+ return js;
+}
- template<typename T>
- void DbMessage::checkRead(const char* start, size_t count) const {
- if ((_theEnd - start) < static_cast<int>(sizeof(T) * count)) {
- uassert(18634, "Not enough data to read", false);
- }
+void DbMessage::markReset(const char* toMark = NULL) {
+ if (toMark == NULL) {
+ toMark = _mark;
}
- template<typename T>
- T DbMessage::read() const {
- checkRead<T>(_nextjsobj, 1);
+ verify(toMark);
+ _nextjsobj = toMark;
+}
- return ConstDataView(_nextjsobj).read<LittleEndian<T>>();
+template <typename T>
+void DbMessage::checkRead(const char* start, size_t count) const {
+ if ((_theEnd - start) < static_cast<int>(sizeof(T) * count)) {
+ uassert(18634, "Not enough data to read", false);
}
+}
- template<typename T> T DbMessage::readAndAdvance() {
- T t = read<T>();
- _nextjsobj += sizeof(T);
- return t;
- }
+template <typename T>
+T DbMessage::read() const {
+ checkRead<T>(_nextjsobj, 1);
- void replyToQuery(int queryResultFlags,
- AbstractMessagingPort* p, Message& requestMsg,
- void *data, int size,
- int nReturned, int startingFrom,
- long long cursorId
- ) {
- BufBuilder b(32768);
- b.skip(sizeof(QueryResult::Value));
- b.appendBuf(data, size);
- QueryResult::View qr = b.buf();
- qr.setResultFlags(queryResultFlags);
- qr.msgdata().setLen(b.len());
- qr.msgdata().setOperation(opReply);
- qr.setCursorId(cursorId);
- qr.setStartingFrom(startingFrom);
- qr.setNReturned(nReturned);
- b.decouple();
- Message resp(qr.view2ptr(), true);
- p->reply(requestMsg, resp, requestMsg.header().getId());
- }
+ return ConstDataView(_nextjsobj).read<LittleEndian<T>>();
+}
- void replyToQuery(int queryResultFlags,
- AbstractMessagingPort* p, Message& requestMsg,
- const BSONObj& responseObj) {
- replyToQuery(queryResultFlags,
- p, requestMsg,
- (void *) responseObj.objdata(), responseObj.objsize(), 1);
- }
+template <typename T>
+T DbMessage::readAndAdvance() {
+ T t = read<T>();
+ _nextjsobj += sizeof(T);
+ return t;
+}
- void replyToQuery( int queryResultFlags, Message &m, DbResponse &dbresponse, BSONObj obj ) {
- Message *resp = new Message();
- replyToQuery( queryResultFlags, *resp, obj );
- dbresponse.response = resp;
- dbresponse.responseTo = m.header().getId();
- }
+void replyToQuery(int queryResultFlags,
+ AbstractMessagingPort* p,
+ Message& requestMsg,
+ void* data,
+ int size,
+ int nReturned,
+ int startingFrom,
+ long long cursorId) {
+ BufBuilder b(32768);
+ b.skip(sizeof(QueryResult::Value));
+ b.appendBuf(data, size);
+ QueryResult::View qr = b.buf();
+ qr.setResultFlags(queryResultFlags);
+ qr.msgdata().setLen(b.len());
+ qr.msgdata().setOperation(opReply);
+ qr.setCursorId(cursorId);
+ qr.setStartingFrom(startingFrom);
+ qr.setNReturned(nReturned);
+ b.decouple();
+ Message resp(qr.view2ptr(), true);
+ p->reply(requestMsg, resp, requestMsg.header().getId());
+}
- void replyToQuery( int queryResultFlags, Message& response, const BSONObj& resultObj ) {
- BufBuilder bufBuilder;
- bufBuilder.skip( sizeof( QueryResult::Value ));
- bufBuilder.appendBuf( reinterpret_cast< void *>(
- const_cast< char* >( resultObj.objdata() )), resultObj.objsize() );
+void replyToQuery(int queryResultFlags,
+ AbstractMessagingPort* p,
+ Message& requestMsg,
+ const BSONObj& responseObj) {
+ replyToQuery(
+ queryResultFlags, p, requestMsg, (void*)responseObj.objdata(), responseObj.objsize(), 1);
+}
- QueryResult::View queryResult = bufBuilder.buf();
- bufBuilder.decouple();
+void replyToQuery(int queryResultFlags, Message& m, DbResponse& dbresponse, BSONObj obj) {
+ Message* resp = new Message();
+ replyToQuery(queryResultFlags, *resp, obj);
+ dbresponse.response = resp;
+ dbresponse.responseTo = m.header().getId();
+}
- queryResult.setResultFlags(queryResultFlags);
- queryResult.msgdata().setLen(bufBuilder.len());
- queryResult.msgdata().setOperation( opReply );
- queryResult.setCursorId(0);
- queryResult.setStartingFrom(0);
- queryResult.setNReturned(1);
+void replyToQuery(int queryResultFlags, Message& response, const BSONObj& resultObj) {
+ BufBuilder bufBuilder;
+ bufBuilder.skip(sizeof(QueryResult::Value));
+ bufBuilder.appendBuf(reinterpret_cast<void*>(const_cast<char*>(resultObj.objdata())),
+ resultObj.objsize());
- response.setData( queryResult.view2ptr(), true ); // transport will free
- }
+ QueryResult::View queryResult = bufBuilder.buf();
+ bufBuilder.decouple();
+ queryResult.setResultFlags(queryResultFlags);
+ queryResult.msgdata().setLen(bufBuilder.len());
+ queryResult.msgdata().setOperation(opReply);
+ queryResult.setCursorId(0);
+ queryResult.setStartingFrom(0);
+ queryResult.setNReturned(1);
+
+ response.setData(queryResult.view2ptr(), true); // transport will free
+}
}
diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h
index 78da93956c2..10f57a17e09 100644
--- a/src/mongo/db/dbmessage.h
+++ b/src/mongo/db/dbmessage.h
@@ -39,15 +39,15 @@
namespace mongo {
- /* db response format
+/* db response format
- Query or GetMore: // see struct QueryResult
- int resultFlags;
- int64 cursorID;
- int startingFrom;
- int nReturned;
- list of marshalled JSObjects;
- */
+ Query or GetMore: // see struct QueryResult
+ int resultFlags;
+ int64 cursorID;
+ int startingFrom;
+ int nReturned;
+ list of marshalled JSObjects;
+*/
/* db request message format
@@ -91,254 +91,264 @@ namespace mongo {
Note that the update field layout is very similar layout to Query.
*/
- namespace QueryResult {
+namespace QueryResult {
#pragma pack(1)
- /* see http://dochub.mongodb.org/core/mongowireprotocol
- */
- struct Layout {
- MsgData::Layout msgdata;
- int64_t cursorId;
- int32_t startingFrom;
- int32_t nReturned;
- };
+/* see http://dochub.mongodb.org/core/mongowireprotocol
+*/
+struct Layout {
+ MsgData::Layout msgdata;
+ int64_t cursorId;
+ int32_t startingFrom;
+ int32_t nReturned;
+};
#pragma pack()
- class ConstView {
- public:
- ConstView(const char* storage) : _storage(storage) { }
+class ConstView {
+public:
+ ConstView(const char* storage) : _storage(storage) {}
- const char* view2ptr() const {
- return storage().view();
- }
+ const char* view2ptr() const {
+ return storage().view();
+ }
- MsgData::ConstView msgdata() const {
- return storage().view(offsetof(Layout, msgdata));
- }
+ MsgData::ConstView msgdata() const {
+ return storage().view(offsetof(Layout, msgdata));
+ }
- int64_t getCursorId() const {
- return storage().read<LittleEndian<int64_t>>(offsetof(Layout, cursorId));
- }
+ int64_t getCursorId() const {
+ return storage().read<LittleEndian<int64_t>>(offsetof(Layout, cursorId));
+ }
- int32_t getStartingFrom() const {
- return storage().read<LittleEndian<int32_t>>(offsetof(Layout, startingFrom));
- }
+ int32_t getStartingFrom() const {
+ return storage().read<LittleEndian<int32_t>>(offsetof(Layout, startingFrom));
+ }
- int32_t getNReturned() const {
- return storage().read<LittleEndian<int32_t>>(offsetof(Layout, nReturned));
- }
+ int32_t getNReturned() const {
+ return storage().read<LittleEndian<int32_t>>(offsetof(Layout, nReturned));
+ }
- const char* data() const {
- return storage().view(sizeof(Layout));
- }
+ const char* data() const {
+ return storage().view(sizeof(Layout));
+ }
- protected:
- const ConstDataView& storage() const {
- return _storage;
- }
+protected:
+ const ConstDataView& storage() const {
+ return _storage;
+ }
- private:
- ConstDataView _storage;
- };
+private:
+ ConstDataView _storage;
+};
- class View : public ConstView {
- public:
- View(char* data) : ConstView(data) {}
+class View : public ConstView {
+public:
+ View(char* data) : ConstView(data) {}
- using ConstView::view2ptr;
- char* view2ptr() {
- return storage().view();
- }
+ using ConstView::view2ptr;
+ char* view2ptr() {
+ return storage().view();
+ }
- using ConstView::msgdata;
- MsgData::View msgdata() {
- return storage().view(offsetof(Layout, msgdata));
- }
+ using ConstView::msgdata;
+ MsgData::View msgdata() {
+ return storage().view(offsetof(Layout, msgdata));
+ }
- void setCursorId(int64_t value) {
- storage().write(tagLittleEndian(value), offsetof(Layout, cursorId));
- }
+ void setCursorId(int64_t value) {
+ storage().write(tagLittleEndian(value), offsetof(Layout, cursorId));
+ }
- void setStartingFrom(int32_t value) {
- storage().write(tagLittleEndian(value), offsetof(Layout, startingFrom));
- }
+ void setStartingFrom(int32_t value) {
+ storage().write(tagLittleEndian(value), offsetof(Layout, startingFrom));
+ }
- void setNReturned(int32_t value) {
- storage().write(tagLittleEndian(value), offsetof(Layout, nReturned));
- }
+ void setNReturned(int32_t value) {
+ storage().write(tagLittleEndian(value), offsetof(Layout, nReturned));
+ }
- int32_t getResultFlags() {
- return DataView(msgdata().data()).read<LittleEndian<int32_t>>();
- }
+ int32_t getResultFlags() {
+ return DataView(msgdata().data()).read<LittleEndian<int32_t>>();
+ }
- void setResultFlags(int32_t value) {
- DataView(msgdata().data()).write(tagLittleEndian(value));
- }
+ void setResultFlags(int32_t value) {
+ DataView(msgdata().data()).write(tagLittleEndian(value));
+ }
- void setResultFlagsToOk() {
- setResultFlags(ResultFlag_AwaitCapable);
- }
+ void setResultFlagsToOk() {
+ setResultFlags(ResultFlag_AwaitCapable);
+ }
- void initializeResultFlags() {
- setResultFlags(0);
- }
+ void initializeResultFlags() {
+ setResultFlags(0);
+ }
- private:
- DataView storage() const {
- return const_cast<char*>(ConstView::view2ptr());
- }
- };
+private:
+ DataView storage() const {
+ return const_cast<char*>(ConstView::view2ptr());
+ }
+};
- class Value : public EncodedValueStorage<Layout, ConstView, View> {
- public:
- Value() {
- BOOST_STATIC_ASSERT(sizeof(Value) == sizeof(Layout));
- }
+class Value : public EncodedValueStorage<Layout, ConstView, View> {
+public:
+ Value() {
+ BOOST_STATIC_ASSERT(sizeof(Value) == sizeof(Layout));
+ }
- Value(ZeroInitTag_t zit) : EncodedValueStorage<Layout, ConstView, View>(zit) {}
- };
+ Value(ZeroInitTag_t zit) : EncodedValueStorage<Layout, ConstView, View>(zit) {}
+};
- } // namespace QueryResult
+} // namespace QueryResult
- /* For the database/server protocol, these objects and functions encapsulate
- the various messages transmitted over the connection.
+/* For the database/server protocol, these objects and functions encapsulate
+ the various messages transmitted over the connection.
- See http://dochub.mongodb.org/core/mongowireprotocol
- */
- class DbMessage {
+ See http://dochub.mongodb.org/core/mongowireprotocol
+*/
+class DbMessage {
// Assume sizeof(int) == 4 bytes
BOOST_STATIC_ASSERT(sizeof(int) == 4);
- public:
- // Note: DbMessage constructor reads the first 4 bytes and stores it in reserved
- DbMessage(const Message& msg);
+public:
+ // Note: DbMessage constructor reads the first 4 bytes and stores it in reserved
+ DbMessage(const Message& msg);
- // Indicates whether this message is expected to have a ns
- // or in the case of dbMsg, a string in the same place as ns
- bool messageShouldHaveNs() const {
- return (_msg.operation() >= dbMsg) & (_msg.operation() <= dbDelete);
- }
+ // Indicates whether this message is expected to have a ns
+ // or in the case of dbMsg, a string in the same place as ns
+ bool messageShouldHaveNs() const {
+ return (_msg.operation() >= dbMsg) & (_msg.operation() <= dbDelete);
+ }
- /** the 32 bit field before the ns
- * track all bit usage here as its cross op
- * 0: InsertOption_ContinueOnError
- * 1: fromWriteback
- */
- int reservedField() const { return _reserved; }
+ /** the 32 bit field before the ns
+ * track all bit usage here as its cross op
+ * 0: InsertOption_ContinueOnError
+ * 1: fromWriteback
+ */
+ int reservedField() const {
+ return _reserved;
+ }
- const char * getns() const;
- int getQueryNToReturn() const;
+ const char* getns() const;
+ int getQueryNToReturn() const;
- int pullInt();
- long long pullInt64();
- const char* getArray(size_t count) const;
+ int pullInt();
+ long long pullInt64();
+ const char* getArray(size_t count) const;
- /* for insert and update msgs */
- bool moreJSObjs() const {
- return _nextjsobj != 0;
- }
+ /* for insert and update msgs */
+ bool moreJSObjs() const {
+ return _nextjsobj != 0;
+ }
- BSONObj nextJsObj();
+ BSONObj nextJsObj();
- const Message& msg() const { return _msg; }
+ const Message& msg() const {
+ return _msg;
+ }
- const char * markGet() const {
- return _nextjsobj;
- }
+ const char* markGet() const {
+ return _nextjsobj;
+ }
- void markSet() {
- _mark = _nextjsobj;
- }
+ void markSet() {
+ _mark = _nextjsobj;
+ }
- void markReset(const char * toMark);
-
- private:
- // Check if we have enough data to read
- template<typename T>
- void checkRead(const char* start, size_t count = 0) const;
-
- // Read some type without advancing our position
- template<typename T>
- T read() const;
-
- // Read some type, and advance our position
- template<typename T> T readAndAdvance();
-
- const Message& _msg;
- int _reserved; // flags or zero depending on packet, starts the packet
-
- const char* _nsStart; // start of namespace string, +4 from message start
- const char* _nextjsobj; // current position reading packet
- const char* _theEnd; // end of packet
-
- const char* _mark;
-
- unsigned int _nsLen;
- };
-
-
- /* a request to run a query, received from the database */
- class QueryMessage {
- public:
- const char *ns;
- int ntoskip;
- int ntoreturn;
- int queryOptions;
- BSONObj query;
- BSONObj fields;
-
- /**
- * parses the message into the above fields
- * Warning: constructor mutates DbMessage.
- */
- QueryMessage(DbMessage& d) {
- ns = d.getns();
- ntoskip = d.pullInt();
- ntoreturn = d.pullInt();
- query = d.nextJsObj();
- if ( d.moreJSObjs() ) {
- fields = d.nextJsObj();
- }
- queryOptions = DataView(d.msg().header().data()).read<LittleEndian<int32_t>>();
- }
- };
+ void markReset(const char* toMark);
- /**
- * A response to a DbMessage.
- */
- struct DbResponse {
- Message *response;
- MSGID responseTo;
- std::string exhaustNS; /* points to ns if exhaust mode. 0=normal mode*/
- DbResponse(Message *r, MSGID rt) : response(r), responseTo(rt){ }
- DbResponse() {
- response = 0;
- }
- ~DbResponse() { delete response; }
- };
+private:
+ // Check if we have enough data to read
+ template <typename T>
+ void checkRead(const char* start, size_t count = 0) const;
+
+ // Read some type without advancing our position
+ template <typename T>
+ T read() const;
+
+ // Read some type, and advance our position
+ template <typename T>
+ T readAndAdvance();
+
+ const Message& _msg;
+ int _reserved; // flags or zero depending on packet, starts the packet
+
+ const char* _nsStart; // start of namespace string, +4 from message start
+ const char* _nextjsobj; // current position reading packet
+ const char* _theEnd; // end of packet
- void replyToQuery(int queryResultFlags,
- AbstractMessagingPort* p, Message& requestMsg,
- void *data, int size,
- int nReturned, int startingFrom = 0,
- long long cursorId = 0
- );
+ const char* _mark;
+ unsigned int _nsLen;
+};
- /* object reply helper. */
- void replyToQuery(int queryResultFlags,
- AbstractMessagingPort* p, Message& requestMsg,
- const BSONObj& responseObj);
- /* helper to do a reply using a DbResponse object */
- void replyToQuery( int queryResultFlags, Message& m, DbResponse& dbresponse, BSONObj obj );
+/* a request to run a query, received from the database */
+class QueryMessage {
+public:
+ const char* ns;
+ int ntoskip;
+ int ntoreturn;
+ int queryOptions;
+ BSONObj query;
+ BSONObj fields;
/**
- * Helper method for setting up a response object.
- *
- * @param queryResultFlags The flags to set to the response object.
- * @param response The object to be used for building the response. The internal buffer of
- * this object will contain the raw data from resultObj after a successful call.
- * @param resultObj The bson object that contains the reply data.
+ * parses the message into the above fields
+ * Warning: constructor mutates DbMessage.
*/
- void replyToQuery( int queryResultFlags, Message& response, const BSONObj& resultObj );
-} // namespace mongo
+ QueryMessage(DbMessage& d) {
+ ns = d.getns();
+ ntoskip = d.pullInt();
+ ntoreturn = d.pullInt();
+ query = d.nextJsObj();
+ if (d.moreJSObjs()) {
+ fields = d.nextJsObj();
+ }
+ queryOptions = DataView(d.msg().header().data()).read<LittleEndian<int32_t>>();
+ }
+};
+
+/**
+ * A response to a DbMessage.
+ */
+struct DbResponse {
+ Message* response;
+ MSGID responseTo;
+ std::string exhaustNS; /* points to ns if exhaust mode. 0=normal mode*/
+ DbResponse(Message* r, MSGID rt) : response(r), responseTo(rt) {}
+ DbResponse() {
+ response = 0;
+ }
+ ~DbResponse() {
+ delete response;
+ }
+};
+
+void replyToQuery(int queryResultFlags,
+ AbstractMessagingPort* p,
+ Message& requestMsg,
+ void* data,
+ int size,
+ int nReturned,
+ int startingFrom = 0,
+ long long cursorId = 0);
+
+
+/* object reply helper. */
+void replyToQuery(int queryResultFlags,
+ AbstractMessagingPort* p,
+ Message& requestMsg,
+ const BSONObj& responseObj);
+
+/* helper to do a reply using a DbResponse object */
+void replyToQuery(int queryResultFlags, Message& m, DbResponse& dbresponse, BSONObj obj);
+
+/**
+ * Helper method for setting up a response object.
+ *
+ * @param queryResultFlags The flags to set to the response object.
+ * @param response The object to be used for building the response. The internal buffer of
+ * this object will contain the raw data from resultObj after a successful call.
+ * @param resultObj The bson object that contains the reply data.
+ */
+void replyToQuery(int queryResultFlags, Message& response, const BSONObj& resultObj);
+} // namespace mongo
diff --git a/src/mongo/db/dbmessage_test.cpp b/src/mongo/db/dbmessage_test.cpp
index 867a52d9885..c0d40f49bde 100644
--- a/src/mongo/db/dbmessage_test.cpp
+++ b/src/mongo/db/dbmessage_test.cpp
@@ -33,111 +33,110 @@
#include "mongo/unittest/unittest.h"
namespace mongo {
- using std::string;
+using std::string;
- // Test if the reserved field is short of 4 bytes
- TEST(DBMessage1, ShortFlags) {
- BufBuilder b;
- string ns("test");
+// Test if the reserved field is short of 4 bytes
+TEST(DBMessage1, ShortFlags) {
+ BufBuilder b;
+ string ns("test");
- b.appendChar( 1 );
+ b.appendChar(1);
- Message toSend;
- toSend.setData( dbDelete , b.buf() , b.len() );
+ Message toSend;
+ toSend.setData(dbDelete, b.buf(), b.len());
- ASSERT_THROWS(DbMessage d1(toSend), UserException);
- }
+ ASSERT_THROWS(DbMessage d1(toSend), UserException);
+}
- // Test a short NS missing a trailing null
- TEST(DBMessage1, BadNS) {
- BufBuilder b;
+// Test a short NS missing a trailing null
+TEST(DBMessage1, BadNS) {
+ BufBuilder b;
- b.appendNum( static_cast<int>(1) );
- b.appendChar( 'b' );
- b.appendChar( 'a' );
- b.appendChar( 'd' );
- // Forget to append \0
+ b.appendNum(static_cast<int>(1));
+ b.appendChar('b');
+ b.appendChar('a');
+ b.appendChar('d');
+ // Forget to append \0
- Message toSend;
- toSend.setData( dbDelete , b.buf() , b.len() );
+ Message toSend;
+ toSend.setData(dbDelete, b.buf(), b.len());
- ASSERT_THROWS(DbMessage d1(toSend), UserException);
- }
+ ASSERT_THROWS(DbMessage d1(toSend), UserException);
+}
- // Test a valid kill message and try an extra pull
- TEST(DBMessage1, GoodKill) {
- BufBuilder b;
+// Test a valid kill message and try an extra pull
+TEST(DBMessage1, GoodKill) {
+ BufBuilder b;
- b.appendNum( static_cast<int>(1) );
- b.appendNum( static_cast<int>(3) );
+ b.appendNum(static_cast<int>(1));
+ b.appendNum(static_cast<int>(3));
- Message toSend;
- toSend.setData( dbKillCursors , b.buf() , b.len() );
+ Message toSend;
+ toSend.setData(dbKillCursors, b.buf(), b.len());
- DbMessage d1(toSend);
- ASSERT_EQUALS(3, d1.pullInt());
+ DbMessage d1(toSend);
+ ASSERT_EQUALS(3, d1.pullInt());
- ASSERT_THROWS(d1.pullInt(), UserException);
- }
+ ASSERT_THROWS(d1.pullInt(), UserException);
+}
- // Try a bad read of a type too large
- TEST(DBMessage1, GoodKill2) {
- BufBuilder b;
+// Try a bad read of a type too large
+TEST(DBMessage1, GoodKill2) {
+ BufBuilder b;
- b.appendNum( static_cast<int>(1) );
- b.appendNum( static_cast<int>(3) );
+ b.appendNum(static_cast<int>(1));
+ b.appendNum(static_cast<int>(3));
- Message toSend;
- toSend.setData( dbKillCursors , b.buf() , b.len() );
+ Message toSend;
+ toSend.setData(dbKillCursors, b.buf(), b.len());
- DbMessage d1(toSend);
- ASSERT_THROWS(d1.pullInt64(), UserException);
- }
+ DbMessage d1(toSend);
+ ASSERT_THROWS(d1.pullInt64(), UserException);
+}
- // Test a basic good insert, and an extra read
- TEST(DBMessage1, GoodInsert) {
- BufBuilder b;
- string ns("test");
+// Test a basic good insert, and an extra read
+TEST(DBMessage1, GoodInsert) {
+ BufBuilder b;
+ string ns("test");
- b.appendNum( static_cast<int>(1) );
- b.appendStr(ns);
- b.appendNum( static_cast<int>(3) );
- b.appendNum( static_cast<int>(39) );
+ b.appendNum(static_cast<int>(1));
+ b.appendStr(ns);
+ b.appendNum(static_cast<int>(3));
+ b.appendNum(static_cast<int>(39));
- Message toSend;
- toSend.setData( dbInsert , b.buf() , b.len() );
+ Message toSend;
+ toSend.setData(dbInsert, b.buf(), b.len());
- DbMessage d1(toSend);
- ASSERT_EQUALS(3, d1.pullInt());
- ASSERT_EQUALS(39, d1.pullInt());
- ASSERT_THROWS(d1.pullInt(), UserException);
- }
+ DbMessage d1(toSend);
+ ASSERT_EQUALS(3, d1.pullInt());
+ ASSERT_EQUALS(39, d1.pullInt());
+ ASSERT_THROWS(d1.pullInt(), UserException);
+}
- // Test a basic good insert, and an extra read
- TEST(DBMessage1, GoodInsert2) {
- BufBuilder b;
- string ns("test");
+// Test a basic good insert, and an extra read
+TEST(DBMessage1, GoodInsert2) {
+ BufBuilder b;
+ string ns("test");
- b.appendNum( static_cast<int>(1) );
- b.appendStr(ns);
- b.appendNum( static_cast<int>(3) );
- b.appendNum( static_cast<int>(39) );
+ b.appendNum(static_cast<int>(1));
+ b.appendStr(ns);
+ b.appendNum(static_cast<int>(3));
+ b.appendNum(static_cast<int>(39));
- BSONObj bo = BSON( "ts" << 0 );
- bo.appendSelfToBufBuilder( b );
+ BSONObj bo = BSON("ts" << 0);
+ bo.appendSelfToBufBuilder(b);
- Message toSend;
- toSend.setData( dbInsert , b.buf() , b.len() );
+ Message toSend;
+ toSend.setData(dbInsert, b.buf(), b.len());
- DbMessage d1(toSend);
- ASSERT_EQUALS(3, d1.pullInt());
+ DbMessage d1(toSend);
+ ASSERT_EQUALS(3, d1.pullInt());
- ASSERT_EQUALS(39, d1.pullInt());
- BSONObj bo2 = d1.nextJsObj();
- ASSERT_THROWS(d1.nextJsObj(), MsgAssertionException);
- }
+ ASSERT_EQUALS(39, d1.pullInt());
+ BSONObj bo2 = d1.nextJsObj();
+ ASSERT_THROWS(d1.nextJsObj(), MsgAssertionException);
+}
-
-} // mongo namespace
+} // mongo namespace
diff --git a/src/mongo/db/dbwebserver.cpp b/src/mongo/db/dbwebserver.cpp
index 73662327766..1ad6e54a236 100644
--- a/src/mongo/db/dbwebserver.cpp
+++ b/src/mongo/db/dbwebserver.cpp
@@ -66,548 +66,546 @@
namespace mongo {
- using std::map;
- using std::stringstream;
- using std::vector;
+using std::map;
+using std::stringstream;
+using std::vector;
- using namespace html;
+using namespace html;
namespace {
- void doUnlockedStuff(stringstream& ss) {
- // This is in the header already ss << "port: " << port << '\n'
- ss << "<pre>";
- ss << mongodVersion() << '\n';
- ss << "git hash: " << gitVersion() << '\n';
- ss << openSSLVersion("OpenSSL version: ", "\n");
- ss << "uptime: " << time(0) - serverGlobalParams.started << " seconds\n";
- ss << "</pre>";
- }
-
+void doUnlockedStuff(stringstream& ss) {
+ // This is in the header already ss << "port: " << port << '\n'
+ ss << "<pre>";
+ ss << mongodVersion() << '\n';
+ ss << "git hash: " << gitVersion() << '\n';
+ ss << openSSLVersion("OpenSSL version: ", "\n");
+ ss << "uptime: " << time(0) - serverGlobalParams.started << " seconds\n";
+ ss << "</pre>";
+}
- bool prisort(const Prioritizable* a, const Prioritizable* b) {
- return a->priority() < b->priority();
- }
-
- struct Timing {
- Timing() {
- start = timeLocked = 0;
- }
- unsigned long long start, timeLocked;
- };
+bool prisort(const Prioritizable* a, const Prioritizable* b) {
+ return a->priority() < b->priority();
+}
- class LogPlugin : public WebStatusPlugin {
- public:
- LogPlugin() : WebStatusPlugin("Log", 100), _log(0) {
- _log = RamLog::get("global");
- }
-
- virtual void init() {}
-
- virtual void run(OperationContext* txn, stringstream& ss) {
- _log->toHTML(ss);
- }
- RamLog * _log;
- };
+struct Timing {
+ Timing() {
+ start = timeLocked = 0;
+ }
+ unsigned long long start, timeLocked;
+};
- class FavIconHandler : public DbWebHandler {
- public:
- FavIconHandler() : DbWebHandler("favicon.ico", 0, false) {}
+class LogPlugin : public WebStatusPlugin {
+public:
+ LogPlugin() : WebStatusPlugin("Log", 100), _log(0) {
+ _log = RamLog::get("global");
+ }
- virtual void handle(OperationContext* txn,
- const char *rq, const std::string& url, BSONObj params,
- string& responseMsg, int& responseCode,
- vector<string>& headers, const SockAddr &from) {
- responseCode = 404;
- headers.push_back("Content-Type: text/plain;charset=utf-8");
- responseMsg = "no favicon\n";
- }
+ virtual void init() {}
- } faviconHandler;
+ virtual void run(OperationContext* txn, stringstream& ss) {
+ _log->toHTML(ss);
+ }
+ RamLog* _log;
+};
+
+
+class FavIconHandler : public DbWebHandler {
+public:
+ FavIconHandler() : DbWebHandler("favicon.ico", 0, false) {}
+
+ virtual void handle(OperationContext* txn,
+ const char* rq,
+ const std::string& url,
+ BSONObj params,
+ string& responseMsg,
+ int& responseCode,
+ vector<string>& headers,
+ const SockAddr& from) {
+ responseCode = 404;
+ headers.push_back("Content-Type: text/plain;charset=utf-8");
+ responseMsg = "no favicon\n";
+ }
+} faviconHandler;
- class StatusHandler : public DbWebHandler {
- public:
- StatusHandler() : DbWebHandler("_status", 1, false) {}
- virtual void handle(OperationContext* txn,
- const char *rq, const std::string& url, BSONObj params,
- string& responseMsg, int& responseCode,
- vector<string>& headers, const SockAddr &from) {
- headers.push_back("Content-Type: application/json;charset=utf-8");
- responseCode = 200;
+class StatusHandler : public DbWebHandler {
+public:
+ StatusHandler() : DbWebHandler("_status", 1, false) {}
- static vector<string> commands;
- if (commands.size() == 0) {
- commands.push_back("serverStatus");
- commands.push_back("buildinfo");
- }
+ virtual void handle(OperationContext* txn,
+ const char* rq,
+ const std::string& url,
+ BSONObj params,
+ string& responseMsg,
+ int& responseCode,
+ vector<string>& headers,
+ const SockAddr& from) {
+ headers.push_back("Content-Type: application/json;charset=utf-8");
+ responseCode = 200;
- BSONObjBuilder buf(1024);
+ static vector<string> commands;
+ if (commands.size() == 0) {
+ commands.push_back("serverStatus");
+ commands.push_back("buildinfo");
+ }
- for (unsigned i = 0; i<commands.size(); i++) {
- string cmd = commands[i];
+ BSONObjBuilder buf(1024);
- Command * c = Command::findCommand(cmd);
- verify(c);
+ for (unsigned i = 0; i < commands.size(); i++) {
+ string cmd = commands[i];
- BSONObj co;
- {
- BSONObjBuilder b;
- b.append(cmd, 1);
+ Command* c = Command::findCommand(cmd);
+ verify(c);
- if (cmd == "serverStatus" && params["repl"].type()) {
- b.append("repl", atoi(params["repl"].valuestr()));
- }
+ BSONObj co;
+ {
+ BSONObjBuilder b;
+ b.append(cmd, 1);
- co = b.obj();
+ if (cmd == "serverStatus" && params["repl"].type()) {
+ b.append("repl", atoi(params["repl"].valuestr()));
}
- string errmsg;
-
- BSONObjBuilder sub;
- if (!c->run(txn, "admin.$cmd", co, 0, errmsg, sub))
- buf.append(cmd, errmsg);
- else
- buf.append(cmd, sub.obj());
+ co = b.obj();
}
- responseMsg = buf.obj().jsonString();
- }
+ string errmsg;
- } statusHandler;
+ BSONObjBuilder sub;
+ if (!c->run(txn, "admin.$cmd", co, 0, errmsg, sub))
+ buf.append(cmd, errmsg);
+ else
+ buf.append(cmd, sub.obj());
+ }
+ responseMsg = buf.obj().jsonString();
+ }
- class CommandListHandler : public DbWebHandler {
- public:
- CommandListHandler() : DbWebHandler("_commands", 1, true) {}
+} statusHandler;
- virtual void handle(OperationContext* txn,
- const char *rq, const std::string& url, BSONObj params,
- string& responseMsg, int& responseCode,
- vector<string>& headers, const SockAddr &from) {
- headers.push_back("Content-Type: text/html;charset=utf-8");
- responseCode = 200;
- stringstream ss;
- ss << start("Commands List");
- ss << p(a("/", "back", "Home"));
- ss << p("<b>MongoDB List of "
- "<a href=\"http://dochub.mongodb.org/core/commands\">Commands</a>"
- "</b>\n");
+class CommandListHandler : public DbWebHandler {
+public:
+ CommandListHandler() : DbWebHandler("_commands", 1, true) {}
- const Command::CommandMap* m = Command::commandsByBestName();
- ss << "S:slave-ok R:read-lock W:write-lock A:admin-only<br>\n";
- ss << table();
- ss << "<tr><th>Command</th><th>Attributes</th><th>Help</th></tr>\n";
- for (Command::CommandMap::const_iterator i = m->begin(); i != m->end(); ++i) {
- i->second->htmlHelp(ss);
- }
- ss << _table() << _end();
+ virtual void handle(OperationContext* txn,
+ const char* rq,
+ const std::string& url,
+ BSONObj params,
+ string& responseMsg,
+ int& responseCode,
+ vector<string>& headers,
+ const SockAddr& from) {
+ headers.push_back("Content-Type: text/html;charset=utf-8");
+ responseCode = 200;
- responseMsg = ss.str();
+ stringstream ss;
+ ss << start("Commands List");
+ ss << p(a("/", "back", "Home"));
+ ss << p(
+ "<b>MongoDB List of "
+ "<a href=\"http://dochub.mongodb.org/core/commands\">Commands</a>"
+ "</b>\n");
+
+ const Command::CommandMap* m = Command::commandsByBestName();
+ ss << "S:slave-ok R:read-lock W:write-lock A:admin-only<br>\n";
+ ss << table();
+ ss << "<tr><th>Command</th><th>Attributes</th><th>Help</th></tr>\n";
+ for (Command::CommandMap::const_iterator i = m->begin(); i != m->end(); ++i) {
+ i->second->htmlHelp(ss);
}
- } commandListHandler;
+ ss << _table() << _end();
+ responseMsg = ss.str();
+ }
+} commandListHandler;
- class CommandsHandler : public DbWebHandler {
- public:
- CommandsHandler() : DbWebHandler("DUMMY COMMANDS", 2, true) {}
- bool _cmd(const string& url, string& cmd, bool& text, bo params) const {
- cmd = str::after(url, '/');
- text = params["text"].boolean();
- return true;
- }
+class CommandsHandler : public DbWebHandler {
+public:
+ CommandsHandler() : DbWebHandler("DUMMY COMMANDS", 2, true) {}
- Command * _cmd(const string& cmd) const {
- const Command::CommandMap* m = Command::webCommands();
- if (!m)
- return 0;
+ bool _cmd(const string& url, string& cmd, bool& text, bo params) const {
+ cmd = str::after(url, '/');
+ text = params["text"].boolean();
+ return true;
+ }
- Command::CommandMap::const_iterator i = m->find(cmd);
- if (i == m->end())
- return 0;
+ Command* _cmd(const string& cmd) const {
+ const Command::CommandMap* m = Command::webCommands();
+ if (!m)
+ return 0;
- return i->second;
- }
+ Command::CommandMap::const_iterator i = m->find(cmd);
+ if (i == m->end())
+ return 0;
- virtual bool handles(const string& url) const {
- string cmd;
- bool text;
- if (!_cmd(url, cmd, text, bo()))
- return false;
- return _cmd(cmd) != 0;
- }
+ return i->second;
+ }
- virtual void handle(OperationContext* txn,
- const char *rq, const std::string& url, BSONObj params,
- string& responseMsg, int& responseCode,
- vector<string>& headers, const SockAddr &from) {
- string cmd;
- bool text = false;
- verify(_cmd(url, cmd, text, params));
- Command * c = _cmd(cmd);
- verify(c);
+ virtual bool handles(const string& url) const {
+ string cmd;
+ bool text;
+ if (!_cmd(url, cmd, text, bo()))
+ return false;
+ return _cmd(cmd) != 0;
+ }
- BSONObj cmdObj = BSON(cmd << 1);
+ virtual void handle(OperationContext* txn,
+ const char* rq,
+ const std::string& url,
+ BSONObj params,
+ string& responseMsg,
+ int& responseCode,
+ vector<string>& headers,
+ const SockAddr& from) {
+ string cmd;
+ bool text = false;
+ verify(_cmd(url, cmd, text, params));
+ Command* c = _cmd(cmd);
+ verify(c);
- rpc::CommandRequestBuilder requestBuilder{};
+ BSONObj cmdObj = BSON(cmd << 1);
- requestBuilder.setDatabase("admin")
- .setCommandName(cmd)
- .setMetadata(rpc::makeEmptyMetadata())
- .setCommandArgs(cmdObj);
+ rpc::CommandRequestBuilder requestBuilder{};
- auto cmdRequestMsg = requestBuilder.done();
- rpc::CommandRequest cmdRequest{cmdRequestMsg.get()};
- rpc::CommandReplyBuilder cmdReplyBuilder{};
+ requestBuilder.setDatabase("admin")
+ .setCommandName(cmd)
+ .setMetadata(rpc::makeEmptyMetadata())
+ .setCommandArgs(cmdObj);
- Command::execCommand(txn, c, cmdRequest, &cmdReplyBuilder);
+ auto cmdRequestMsg = requestBuilder.done();
+ rpc::CommandRequest cmdRequest{cmdRequestMsg.get()};
+ rpc::CommandReplyBuilder cmdReplyBuilder{};
- auto cmdReplyMsg = cmdReplyBuilder.done();
- rpc::CommandReply cmdReply{cmdReplyMsg.get()};
+ Command::execCommand(txn, c, cmdRequest, &cmdReplyBuilder);
- responseCode = 200;
+ auto cmdReplyMsg = cmdReplyBuilder.done();
+ rpc::CommandReply cmdReply{cmdReplyMsg.get()};
- string j = cmdReply.getCommandReply().jsonString(Strict, text);
- responseMsg = j;
+ responseCode = 200;
- if (text) {
- headers.push_back("Content-Type: text/plain;charset=utf-8");
- responseMsg += '\n';
- }
- else {
- headers.push_back("Content-Type: application/json;charset=utf-8");
- }
+ string j = cmdReply.getCommandReply().jsonString(Strict, text);
+ responseMsg = j;
+ if (text) {
+ headers.push_back("Content-Type: text/plain;charset=utf-8");
+ responseMsg += '\n';
+ } else {
+ headers.push_back("Content-Type: application/json;charset=utf-8");
}
+ }
- } commandsHandler;
+} commandsHandler;
- MONGO_INITIALIZER(WebStatusLogPlugin)(InitializerContext*) {
- if (serverGlobalParams.isHttpInterfaceEnabled) {
- new LogPlugin;
- }
- return Status::OK();
+MONGO_INITIALIZER(WebStatusLogPlugin)(InitializerContext*) {
+ if (serverGlobalParams.isHttpInterfaceEnabled) {
+ new LogPlugin;
}
+ return Status::OK();
+}
-} // namespace
+} // namespace
- DbWebServer::DbWebServer(const string& ip, int port, AdminAccess* webUsers)
- : MiniWebServer("admin web console", ip, port),
- _webUsers(webUsers) {
+DbWebServer::DbWebServer(const string& ip, int port, AdminAccess* webUsers)
+ : MiniWebServer("admin web console", ip, port), _webUsers(webUsers) {
+ WebStatusPlugin::initAll();
+}
- WebStatusPlugin::initAll();
- }
+void DbWebServer::doRequest(const char* rq,
+ string url,
+ string& responseMsg,
+ int& responseCode,
+ vector<string>& headers,
+ const SockAddr& from) {
+ Client* client = &cc();
+ auto txn = client->makeOperationContext();
- void DbWebServer::doRequest(const char *rq,
- string url,
- string& responseMsg,
- int& responseCode,
- vector<string>& headers,
- const SockAddr &from) {
+ if (url.size() > 1) {
+ if (!_allowed(txn.get(), rq, headers, from)) {
+ responseCode = 401;
+ headers.push_back("Content-Type: text/plain;charset=utf-8");
+ responseMsg = "not allowed\n";
+ return;
+ }
- Client* client = &cc();
- auto txn = client->makeOperationContext();
+ {
+ BSONObj params;
+ const size_t pos = url.find("?");
+ if (pos != string::npos) {
+ MiniWebServer::parseParams(params, url.substr(pos + 1));
+ url = url.substr(0, pos);
+ }
- if (url.size() > 1) {
+ DbWebHandler* handler = DbWebHandler::findHandler(url);
+ if (handler) {
+ if (handler->requiresREST(url) && !serverGlobalParams.rest) {
+ _rejectREST(responseMsg, responseCode, headers);
+ } else {
+ const string callback = params.getStringField("jsonp");
- if (!_allowed(txn.get(), rq, headers, from)) {
- responseCode = 401;
- headers.push_back("Content-Type: text/plain;charset=utf-8");
- responseMsg = "not allowed\n";
- return;
- }
+ uassert(13453,
+ "server not started with --jsonp",
+ callback.empty() || serverGlobalParams.jsonp);
- {
- BSONObj params;
- const size_t pos = url.find("?");
- if (pos != string::npos) {
- MiniWebServer::parseParams(params, url.substr(pos + 1));
- url = url.substr(0, pos);
- }
+ handler->handle(
+ txn.get(), rq, url, params, responseMsg, responseCode, headers, from);
- DbWebHandler * handler = DbWebHandler::findHandler(url);
- if (handler) {
- if (handler->requiresREST(url) && !serverGlobalParams.rest) {
- _rejectREST(responseMsg, responseCode, headers);
- }
- else {
- const string callback = params.getStringField("jsonp");
-
- uassert(13453,
- "server not started with --jsonp",
- callback.empty() || serverGlobalParams.jsonp);
-
- handler->handle(txn.get(),
- rq,
- url,
- params,
- responseMsg,
- responseCode,
- headers,
- from);
-
- if (responseCode == 200 && !callback.empty()) {
- responseMsg = callback + '(' + responseMsg + ')';
- }
+ if (responseCode == 200 && !callback.empty()) {
+ responseMsg = callback + '(' + responseMsg + ')';
}
-
- return;
}
- }
- if (!serverGlobalParams.rest) {
- _rejectREST(responseMsg, responseCode, headers);
return;
}
+ }
- responseCode = 404;
- headers.push_back("Content-Type: text/html;charset=utf-8");
- responseMsg = "<html><body>unknown url</body></html>\n";
+ if (!serverGlobalParams.rest) {
+ _rejectREST(responseMsg, responseCode, headers);
return;
}
- // generate home page
+ responseCode = 404;
+ headers.push_back("Content-Type: text/html;charset=utf-8");
+ responseMsg = "<html><body>unknown url</body></html>\n";
+ return;
+ }
- if (!_allowed(txn.get(), rq, headers, from)) {
- responseCode = 401;
- headers.push_back("Content-Type: text/plain;charset=utf-8");
- responseMsg = "not allowed\n";
- return;
- }
+ // generate home page
- responseCode = 200;
- stringstream ss;
- string dbname;
- {
- stringstream z;
- z << serverGlobalParams.binaryName << ' ' << prettyHostName();
- dbname = z.str();
- }
+ if (!_allowed(txn.get(), rq, headers, from)) {
+ responseCode = 401;
+ headers.push_back("Content-Type: text/plain;charset=utf-8");
+ responseMsg = "not allowed\n";
+ return;
+ }
- ss << start(dbname) << h2(dbname);
- ss << "<p><a href=\"/_commands\">List all commands</a> | \n";
- ss << "<a href=\"/_replSet\">Replica set status</a></p>\n";
+ responseCode = 200;
+ stringstream ss;
+ string dbname;
+ {
+ stringstream z;
+ z << serverGlobalParams.binaryName << ' ' << prettyHostName();
+ dbname = z.str();
+ }
- {
- const Command::CommandMap* m = Command::webCommands();
- if (m) {
- ss << a("",
- "These read-only context-less commands can be executed from the web "
- "interface. Results are json format, unless ?text=1 is appended in which "
- "case the result is output as text for easier human viewing",
- "Commands")
- << ": ";
-
- for (Command::CommandMap::const_iterator i = m->begin(); i != m->end(); ++i) {
- stringstream h;
- i->second->help(h);
-
- const string help = h.str();
- ss << "<a href=\"/" << i->first << "?text=1\"";
- if (help != "no help defined") {
- ss << " title=\"" << help << '"';
- }
+ ss << start(dbname) << h2(dbname);
+ ss << "<p><a href=\"/_commands\">List all commands</a> | \n";
+ ss << "<a href=\"/_replSet\">Replica set status</a></p>\n";
+
+ {
+ const Command::CommandMap* m = Command::webCommands();
+ if (m) {
+ ss << a("",
+ "These read-only context-less commands can be executed from the web "
+ "interface. Results are json format, unless ?text=1 is appended in which "
+ "case the result is output as text for easier human viewing",
+ "Commands") << ": ";
- ss << ">" << i->first << "</a> ";
+ for (Command::CommandMap::const_iterator i = m->begin(); i != m->end(); ++i) {
+ stringstream h;
+ i->second->help(h);
+
+ const string help = h.str();
+ ss << "<a href=\"/" << i->first << "?text=1\"";
+ if (help != "no help defined") {
+ ss << " title=\"" << help << '"';
}
- ss << '\n';
+
+ ss << ">" << i->first << "</a> ";
}
+ ss << '\n';
}
+ }
- ss << '\n';
+ ss << '\n';
- doUnlockedStuff(ss);
+ doUnlockedStuff(ss);
- WebStatusPlugin::runAll(txn.get(), ss);
+ WebStatusPlugin::runAll(txn.get(), ss);
- ss << "</body></html>\n";
- responseMsg = ss.str();
- headers.push_back("Content-Type: text/html;charset=utf-8");
- }
+ ss << "</body></html>\n";
+ responseMsg = ss.str();
+ headers.push_back("Content-Type: text/html;charset=utf-8");
+}
- bool DbWebServer::_allowed(OperationContext* txn,
- const char * rq,
- vector<string>& headers,
- const SockAddr &from) {
+bool DbWebServer::_allowed(OperationContext* txn,
+ const char* rq,
+ vector<string>& headers,
+ const SockAddr& from) {
+ AuthorizationSession* authSess = AuthorizationSession::get(txn->getClient());
+ if (!authSess->getAuthorizationManager().isAuthEnabled()) {
+ return true;
+ }
- AuthorizationSession* authSess = AuthorizationSession::get(txn->getClient());
- if (!authSess->getAuthorizationManager().isAuthEnabled()) {
- return true;
- }
+ if (from.isLocalHost() && !_webUsers->haveAdminUsers(txn)) {
+ authSess->grantInternalAuthorization();
+ return true;
+ }
- if (from.isLocalHost() && !_webUsers->haveAdminUsers(txn)) {
- authSess->grantInternalAuthorization();
- return true;
- }
+ string auth = getHeader(rq, "Authorization");
- string auth = getHeader(rq, "Authorization");
+ if (auth.size() > 0 && auth.find("Digest ") == 0) {
+ auth = auth.substr(7) + ", ";
- if (auth.size() > 0 && auth.find("Digest ") == 0) {
- auth = auth.substr(7) + ", ";
+ map<string, string> parms;
+ pcrecpp::StringPiece input(auth);
- map<string, string> parms;
- pcrecpp::StringPiece input(auth);
+ string name, val;
+ pcrecpp::RE re("(\\w+)=\"?(.*?)\"?,\\s*");
+ while (re.Consume(&input, &name, &val)) {
+ parms[name] = val;
+ }
- string name, val;
- pcrecpp::RE re("(\\w+)=\"?(.*?)\"?,\\s*");
- while (re.Consume(&input, &name, &val)) {
- parms[name] = val;
+ // Only users in the admin DB are visible by the webserver
+ UserName userName(parms["username"], "admin");
+ User* user;
+ AuthorizationManager& authzManager = authSess->getAuthorizationManager();
+ Status status = authzManager.acquireUser(txn, userName, &user);
+ if (!status.isOK()) {
+ if (status.code() != ErrorCodes::UserNotFound) {
+ uasserted(17051, status.reason());
}
+ } else {
+ uassert(
+ 17090, "External users don't have a password", !user->getCredentials().isExternal);
- // Only users in the admin DB are visible by the webserver
- UserName userName(parms["username"], "admin");
- User* user;
- AuthorizationManager& authzManager = authSess->getAuthorizationManager();
- Status status = authzManager.acquireUser(txn, userName, &user);
- if (!status.isOK()) {
- if (status.code() != ErrorCodes::UserNotFound) {
- uasserted(17051, status.reason());
- }
+ string ha1 = user->getCredentials().password;
+ authzManager.releaseUser(user);
+ if (ha1.empty()) {
+ return false;
}
- else {
- uassert(17090,
- "External users don't have a password",
- !user->getCredentials().isExternal);
-
- string ha1 = user->getCredentials().password;
- authzManager.releaseUser(user);
- if (ha1.empty()) {
- return false;
- }
- const string ha2 = md5simpledigest((string)"GET" + ":" + parms["uri"]);
-
- stringstream r;
- r << ha1 << ':' << parms["nonce"];
- if (parms["nc"].size() && parms["cnonce"].size() && parms["qop"].size()) {
- r << ':';
- r << parms["nc"];
- r << ':';
- r << parms["cnonce"];
- r << ':';
- r << parms["qop"];
- }
+ const string ha2 = md5simpledigest((string) "GET" + ":" + parms["uri"]);
+
+ stringstream r;
+ r << ha1 << ':' << parms["nonce"];
+ if (parms["nc"].size() && parms["cnonce"].size() && parms["qop"].size()) {
+ r << ':';
+ r << parms["nc"];
r << ':';
- r << ha2;
+ r << parms["cnonce"];
+ r << ':';
+ r << parms["qop"];
+ }
+ r << ':';
+ r << ha2;
- const string r1 = md5simpledigest(r.str());
+ const string r1 = md5simpledigest(r.str());
- if (r1 == parms["response"]) {
- Status status = authSess->addAndAuthorizeUser(txn, userName);
- uassertStatusOK(status);
- return true;
- }
+ if (r1 == parms["response"]) {
+ Status status = authSess->addAndAuthorizeUser(txn, userName);
+ uassertStatusOK(status);
+ return true;
}
}
-
- stringstream authHeader;
- authHeader << "WWW-Authenticate: "
- << "Digest realm=\"mongo\", "
- << "nonce=\"abc\", "
- << "algorithm=MD5, qop=\"auth\" ";
-
- headers.push_back(authHeader.str());
- return 0;
}
- void DbWebServer::_rejectREST(string& responseMsg, int& responseCode, vector<string>& headers) {
- responseCode = 403;
- stringstream ss;
- ss << "REST is not enabled. use --rest to turn on.\n";
- ss << "check that port " << _port << " is secured for the network too.\n";
- responseMsg = ss.str();
- headers.push_back("Content-Type: text/plain;charset=utf-8");
- }
+ stringstream authHeader;
+ authHeader << "WWW-Authenticate: "
+ << "Digest realm=\"mongo\", "
+ << "nonce=\"abc\", "
+ << "algorithm=MD5, qop=\"auth\" ";
+ headers.push_back(authHeader.str());
+ return 0;
+}
- // -- status framework ---
- WebStatusPlugin::WebStatusPlugin( const string& secionName , double priority , const string& subheader )
- : Prioritizable(priority), _name( secionName ) , _subHeading( subheader ) {
- if ( ! _plugins )
- _plugins = new vector<WebStatusPlugin*>();
- _plugins->push_back( this );
- }
+void DbWebServer::_rejectREST(string& responseMsg, int& responseCode, vector<string>& headers) {
+ responseCode = 403;
+ stringstream ss;
+ ss << "REST is not enabled. use --rest to turn on.\n";
+ ss << "check that port " << _port << " is secured for the network too.\n";
+ responseMsg = ss.str();
+ headers.push_back("Content-Type: text/plain;charset=utf-8");
+}
- void WebStatusPlugin::initAll() {
- if ( ! _plugins )
- return;
- sort( _plugins->begin(), _plugins->end() , prisort );
+// -- status framework ---
+WebStatusPlugin::WebStatusPlugin(const string& secionName, double priority, const string& subheader)
+ : Prioritizable(priority), _name(secionName), _subHeading(subheader) {
+ if (!_plugins)
+ _plugins = new vector<WebStatusPlugin*>();
+ _plugins->push_back(this);
+}
- for ( unsigned i=0; i<_plugins->size(); i++ )
- (*_plugins)[i]->init();
- }
+void WebStatusPlugin::initAll() {
+ if (!_plugins)
+ return;
- void WebStatusPlugin::runAll(OperationContext* txn, stringstream& ss) {
- if ( ! _plugins )
- return;
+ sort(_plugins->begin(), _plugins->end(), prisort);
- for ( unsigned i=0; i<_plugins->size(); i++ ) {
- WebStatusPlugin * p = (*_plugins)[i];
- ss << "<hr>\n"
- << "<b>" << p->_name << "</b>";
+ for (unsigned i = 0; i < _plugins->size(); i++)
+ (*_plugins)[i]->init();
+}
- ss << " " << p->_subHeading;
+void WebStatusPlugin::runAll(OperationContext* txn, stringstream& ss) {
+ if (!_plugins)
+ return;
- ss << "<br>\n";
+ for (unsigned i = 0; i < _plugins->size(); i++) {
+ WebStatusPlugin* p = (*_plugins)[i];
+ ss << "<hr>\n"
+ << "<b>" << p->_name << "</b>";
- p->run(txn, ss);
- }
-
- }
+ ss << " " << p->_subHeading;
- vector<WebStatusPlugin*> * WebStatusPlugin::_plugins = 0;
+ ss << "<br>\n";
+ p->run(txn, ss);
+ }
+}
- DbWebHandler::DbWebHandler( const string& name , double priority , bool requiresREST )
- : Prioritizable(priority), _name(name) , _requiresREST(requiresREST) {
+vector<WebStatusPlugin*>* WebStatusPlugin::_plugins = 0;
- {
- // setup strings
- _defaultUrl = "/";
- _defaultUrl += name;
- stringstream ss;
- ss << name << " priority: " << priority << " rest: " << requiresREST;
- _toString = ss.str();
- }
+DbWebHandler::DbWebHandler(const string& name, double priority, bool requiresREST)
+ : Prioritizable(priority), _name(name), _requiresREST(requiresREST) {
+ {
+ // setup strings
+ _defaultUrl = "/";
+ _defaultUrl += name;
- {
- // add to handler list
- if ( ! _handlers )
- _handlers = new vector<DbWebHandler*>();
- _handlers->push_back( this );
- sort( _handlers->begin() , _handlers->end() , prisort );
- }
+ stringstream ss;
+ ss << name << " priority: " << priority << " rest: " << requiresREST;
+ _toString = ss.str();
}
- DbWebHandler * DbWebHandler::findHandler( const string& url ) {
- if ( ! _handlers )
- return 0;
-
- for ( unsigned i=0; i<_handlers->size(); i++ ) {
- DbWebHandler * h = (*_handlers)[i];
- if ( h->handles( url ) )
- return h;
- }
+ {
+ // add to handler list
+ if (!_handlers)
+ _handlers = new vector<DbWebHandler*>();
+ _handlers->push_back(this);
+ sort(_handlers->begin(), _handlers->end(), prisort);
+ }
+}
+DbWebHandler* DbWebHandler::findHandler(const string& url) {
+ if (!_handlers)
return 0;
+
+ for (unsigned i = 0; i < _handlers->size(); i++) {
+ DbWebHandler* h = (*_handlers)[i];
+ if (h->handles(url))
+ return h;
}
- vector<DbWebHandler*> * DbWebHandler::_handlers = 0;
+ return 0;
+}
- void webServerListenThread(std::shared_ptr<DbWebServer> dbWebServer) {
- Client::initThread("websvr");
+vector<DbWebHandler*>* DbWebHandler::_handlers = 0;
- dbWebServer->initAndListen();
- }
+void webServerListenThread(std::shared_ptr<DbWebServer> dbWebServer) {
+ Client::initThread("websvr");
+
+ dbWebServer->initAndListen();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/dbwebserver.h b/src/mongo/db/dbwebserver.h
index a7135601221..f5096abc58a 100644
--- a/src/mongo/db/dbwebserver.h
+++ b/src/mongo/db/dbwebserver.h
@@ -40,99 +40,110 @@
namespace mongo {
- class AdminAccess;
- class DbWebServer;
- class OperationContext;
+class AdminAccess;
+class DbWebServer;
+class OperationContext;
- class Prioritizable {
- public:
- Prioritizable( double p ) : _priority(p) {}
- double priority() const { return _priority; }
- private:
- double _priority;
- };
+class Prioritizable {
+public:
+ Prioritizable(double p) : _priority(p) {}
+ double priority() const {
+ return _priority;
+ }
- class DbWebHandler : public Prioritizable {
- MONGO_DISALLOW_COPYING(DbWebHandler);
- public:
- DbWebHandler( const std::string& name , double priority , bool requiresREST );
- virtual ~DbWebHandler() {}
-
- virtual bool handles( const std::string& url ) const { return url == _defaultUrl; }
-
- virtual bool requiresREST( const std::string& url ) const { return _requiresREST; }
-
- virtual void handle( OperationContext* txn,
- const char *rq, // the full request
- const std::string& url,
- BSONObj params,
- // set these and return them:
- std::string& responseMsg,
- int& responseCode,
- std::vector<std::string>& headers, // if completely empty, content-type: text/html will be added
- const SockAddr &from
- ) = 0;
-
- std::string toString() const { return _toString; }
- static DbWebHandler * findHandler( const std::string& url );
-
- private:
- std::string _name;
- bool _requiresREST;
-
- std::string _defaultUrl;
- std::string _toString;
-
- static std::vector<DbWebHandler*> * _handlers;
- };
+private:
+ double _priority;
+};
+class DbWebHandler : public Prioritizable {
+ MONGO_DISALLOW_COPYING(DbWebHandler);
+
+public:
+ DbWebHandler(const std::string& name, double priority, bool requiresREST);
+ virtual ~DbWebHandler() {}
+
+ virtual bool handles(const std::string& url) const {
+ return url == _defaultUrl;
+ }
+
+ virtual bool requiresREST(const std::string& url) const {
+ return _requiresREST;
+ }
+
+ virtual void handle(OperationContext* txn,
+ const char* rq, // the full request
+ const std::string& url,
+ BSONObj params,
+ // set these and return them:
+ std::string& responseMsg,
+ int& responseCode,
+ std::vector<std::string>&
+ headers, // if completely empty, content-type: text/html will be added
+ const SockAddr& from) = 0;
+
+ std::string toString() const {
+ return _toString;
+ }
+ static DbWebHandler* findHandler(const std::string& url);
+
+private:
+ std::string _name;
+ bool _requiresREST;
+
+ std::string _defaultUrl;
+ std::string _toString;
+
+ static std::vector<DbWebHandler*>* _handlers;
+};
- class WebStatusPlugin : public Prioritizable {
- public:
- WebStatusPlugin( const std::string& secionName , double priority , const std::string& subheader = "" );
- virtual ~WebStatusPlugin() {}
- virtual void run(OperationContext* txn, std::stringstream& ss) = 0;
- /** called when web server stats up */
- virtual void init() = 0;
+class WebStatusPlugin : public Prioritizable {
+public:
+ WebStatusPlugin(const std::string& secionName,
+ double priority,
+ const std::string& subheader = "");
+ virtual ~WebStatusPlugin() {}
- static void initAll();
- static void runAll(OperationContext* txn, std::stringstream& ss);
- private:
- std::string _name;
- std::string _subHeading;
- static std::vector<WebStatusPlugin*> * _plugins;
+ virtual void run(OperationContext* txn, std::stringstream& ss) = 0;
+ /** called when web server stats up */
+ virtual void init() = 0;
- };
+ static void initAll();
+ static void runAll(OperationContext* txn, std::stringstream& ss);
- class DbWebServer : public MiniWebServer {
- public:
- DbWebServer(const std::string& ip, int port, AdminAccess* webUsers);
+private:
+ std::string _name;
+ std::string _subHeading;
+ static std::vector<WebStatusPlugin*>* _plugins;
+};
- private:
- virtual void doRequest(const char *rq,
- std::string url,
- std::string& responseMsg,
- int& responseCode,
- std::vector<std::string>& headers,
- const SockAddr &from);
+class DbWebServer : public MiniWebServer {
+public:
+ DbWebServer(const std::string& ip, int port, AdminAccess* webUsers);
- bool _allowed(OperationContext* txn,
- const char* rq,
- std::vector<std::string>& headers,
- const SockAddr& from);
+private:
+ virtual void doRequest(const char* rq,
+ std::string url,
+ std::string& responseMsg,
+ int& responseCode,
+ std::vector<std::string>& headers,
+ const SockAddr& from);
- void _rejectREST(std::string& responseMsg,
- int& responseCode,
- std::vector<std::string>& headers);
+ bool _allowed(OperationContext* txn,
+ const char* rq,
+ std::vector<std::string>& headers,
+ const SockAddr& from);
+ void _rejectREST(std::string& responseMsg,
+ int& responseCode,
+ std::vector<std::string>& headers);
- const std::unique_ptr<AdminAccess> _webUsers;
- };
- void webServerListenThread(std::shared_ptr<DbWebServer> dbWebServer);
+ const std::unique_ptr<AdminAccess> _webUsers;
+};
- std::string prettyHostName();
+void webServerListenThread(std::shared_ptr<DbWebServer> dbWebServer);
+std::string prettyHostName();
};
diff --git a/src/mongo/db/driverHelpers.cpp b/src/mongo/db/driverHelpers.cpp
index 77f04ee2860..3721d93f194 100644
--- a/src/mongo/db/driverHelpers.cpp
+++ b/src/mongo/db/driverHelpers.cpp
@@ -50,39 +50,45 @@
namespace mongo {
- using std::string;
+using std::string;
- class BasicDriverHelper : public Command {
- public:
- BasicDriverHelper( const char * name ) : Command( name ) {}
+class BasicDriverHelper : public Command {
+public:
+ BasicDriverHelper(const char* name) : Command(name) {}
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool slaveOverrideOk() const { return true; }
- };
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+};
- class ObjectIdTest : public BasicDriverHelper {
- public:
- ObjectIdTest() : BasicDriverHelper( "driverOIDTest" ) {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- virtual bool run(OperationContext* txn,
- const string& ,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- if ( cmdObj.firstElement().type() != jstOID ) {
- errmsg = "not oid";
- return false;
- }
+class ObjectIdTest : public BasicDriverHelper {
+public:
+ ObjectIdTest() : BasicDriverHelper("driverOIDTest") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ if (cmdObj.firstElement().type() != jstOID) {
+ errmsg = "not oid";
+ return false;
+ }
- const OID& oid = cmdObj.firstElement().__oid();
- result.append( "oid" , oid );
- result.append( "str" , oid.toString() );
+ const OID& oid = cmdObj.firstElement().__oid();
+ result.append("oid", oid);
+ result.append("str", oid.toString());
- return true;
- }
- } driverObjectIdTest;
+ return true;
+ }
+} driverObjectIdTest;
}
diff --git a/src/mongo/db/exec/and_common-inl.h b/src/mongo/db/exec/and_common-inl.h
index 53e172d153d..c4be4684078 100644
--- a/src/mongo/db/exec/and_common-inl.h
+++ b/src/mongo/db/exec/and_common-inl.h
@@ -30,68 +30,69 @@
namespace mongo {
- class AndCommon {
- public:
- /**
- * If src has any data dest doesn't, add that data to dest.
- */
- static void mergeFrom(WorkingSetMember* dest, const WorkingSetMember& src) {
- // Both 'src' and 'dest' must have a RecordId (and they must be the same RecordId), as
- // we should have just matched them according to this RecordId while doing an
- // intersection.
- verify(dest->hasLoc());
- verify(src.hasLoc());
- verify(dest->loc == src.loc);
+class AndCommon {
+public:
+ /**
+ * If src has any data dest doesn't, add that data to dest.
+ */
+ static void mergeFrom(WorkingSetMember* dest, const WorkingSetMember& src) {
+ // Both 'src' and 'dest' must have a RecordId (and they must be the same RecordId), as
+ // we should have just matched them according to this RecordId while doing an
+ // intersection.
+ verify(dest->hasLoc());
+ verify(src.hasLoc());
+ verify(dest->loc == src.loc);
- // Merge computed data.
- typedef WorkingSetComputedDataType WSCD;
- for (WSCD i = WSCD(0); i < WSM_COMPUTED_NUM_TYPES; i = WSCD(i + 1)) {
- if (!dest->hasComputed(i) && src.hasComputed(i)) {
- dest->addComputed(src.getComputed(i)->clone());
- }
+ // Merge computed data.
+ typedef WorkingSetComputedDataType WSCD;
+ for (WSCD i = WSCD(0); i < WSM_COMPUTED_NUM_TYPES; i = WSCD(i + 1)) {
+ if (!dest->hasComputed(i) && src.hasComputed(i)) {
+ dest->addComputed(src.getComputed(i)->clone());
}
+ }
- if (dest->hasObj()) {
- // The merged WSM that we're creating already has the full document, so there's
- // nothing left to do.
- return;
- }
+ if (dest->hasObj()) {
+ // The merged WSM that we're creating already has the full document, so there's
+ // nothing left to do.
+ return;
+ }
- if (src.hasObj()) {
- // 'src' has the full document but 'dest' doesn't so we need to copy it over.
- dest->obj = src.obj;
+ if (src.hasObj()) {
+ // 'src' has the full document but 'dest' doesn't so we need to copy it over.
+ dest->obj = src.obj;
- // We have an object so we don't need key data.
- dest->keyData.clear();
+ // We have an object so we don't need key data.
+ dest->keyData.clear();
- // 'dest' should have the same state as 'src'. If 'src' has an unowned obj, then
- // 'dest' also should have an unowned obj; if 'src' has an owned obj, then dest
- // should also have an owned obj.
- dest->state = src.state;
+ // 'dest' should have the same state as 'src'. If 'src' has an unowned obj, then
+ // 'dest' also should have an unowned obj; if 'src' has an owned obj, then dest
+ // should also have an owned obj.
+ dest->state = src.state;
- // Now 'dest' has the full object. No more work to do.
- return;
- }
+ // Now 'dest' has the full object. No more work to do.
+ return;
+ }
- // If we're here, then both WSMs getting merged contain index keys. We need
- // to merge the key data.
- //
- // This is N^2 but N is probably pretty small. Easy enough to revisit.
- for (size_t i = 0; i < src.keyData.size(); ++i) {
- bool found = false;
- for (size_t j = 0; j < dest->keyData.size(); ++j) {
- if (dest->keyData[j].indexKeyPattern == src.keyData[i].indexKeyPattern) {
- found = true;
- break;
- }
+ // If we're here, then both WSMs getting merged contain index keys. We need
+ // to merge the key data.
+ //
+ // This is N^2 but N is probably pretty small. Easy enough to revisit.
+ for (size_t i = 0; i < src.keyData.size(); ++i) {
+ bool found = false;
+ for (size_t j = 0; j < dest->keyData.size(); ++j) {
+ if (dest->keyData[j].indexKeyPattern == src.keyData[i].indexKeyPattern) {
+ found = true;
+ break;
}
- if (!found) { dest->keyData.push_back(src.keyData[i]); }
}
-
- if (src.isSuspicious)
- dest->isSuspicious = true;
+ if (!found) {
+ dest->keyData.push_back(src.keyData[i]);
+ }
}
- };
-} // namespace mongo
+ if (src.isSuspicious)
+ dest->isSuspicious = true;
+ }
+};
+} // namespace mongo
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index 21d7322dd67..71084b40a31 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -36,498 +36,493 @@
namespace {
- // Upper limit for buffered data.
- // Stage execution will fail once size of all buffered data exceeds this threshold.
- const size_t kDefaultMaxMemUsageBytes = 32 * 1024 * 1024;
+// Upper limit for buffered data.
+// Stage execution will fail once size of all buffered data exceeds this threshold.
+const size_t kDefaultMaxMemUsageBytes = 32 * 1024 * 1024;
-} // namespace
+} // namespace
namespace mongo {
- using std::unique_ptr;
- using std::vector;
-
- const size_t AndHashStage::kLookAheadWorks = 10;
-
- // static
- const char* AndHashStage::kStageType = "AND_HASH";
-
- AndHashStage::AndHashStage(WorkingSet* ws, const Collection* collection)
- : _collection(collection),
- _ws(ws),
- _hashingChildren(true),
- _currentChild(0),
- _commonStats(kStageType),
- _memUsage(0),
- _maxMemUsage(kDefaultMaxMemUsageBytes) {}
-
- AndHashStage::AndHashStage(WorkingSet* ws,
- const Collection* collection,
- size_t maxMemUsage)
- : _collection(collection),
- _ws(ws),
- _hashingChildren(true),
- _currentChild(0),
- _commonStats(kStageType),
- _memUsage(0),
- _maxMemUsage(maxMemUsage) {}
-
- AndHashStage::~AndHashStage() {
- for (size_t i = 0; i < _children.size(); ++i) { delete _children[i]; }
+using std::unique_ptr;
+using std::vector;
+
+const size_t AndHashStage::kLookAheadWorks = 10;
+
+// static
+const char* AndHashStage::kStageType = "AND_HASH";
+
+AndHashStage::AndHashStage(WorkingSet* ws, const Collection* collection)
+ : _collection(collection),
+ _ws(ws),
+ _hashingChildren(true),
+ _currentChild(0),
+ _commonStats(kStageType),
+ _memUsage(0),
+ _maxMemUsage(kDefaultMaxMemUsageBytes) {}
+
+AndHashStage::AndHashStage(WorkingSet* ws, const Collection* collection, size_t maxMemUsage)
+ : _collection(collection),
+ _ws(ws),
+ _hashingChildren(true),
+ _currentChild(0),
+ _commonStats(kStageType),
+ _memUsage(0),
+ _maxMemUsage(maxMemUsage) {}
+
+AndHashStage::~AndHashStage() {
+ for (size_t i = 0; i < _children.size(); ++i) {
+ delete _children[i];
}
+}
- void AndHashStage::addChild(PlanStage* child) { _children.push_back(child); }
+void AndHashStage::addChild(PlanStage* child) {
+ _children.push_back(child);
+}
- size_t AndHashStage::getMemUsage() const {
- return _memUsage;
- }
-
- bool AndHashStage::isEOF() {
- // This is empty before calling work() and not-empty after.
- if (_lookAheadResults.empty()) { return false; }
+size_t AndHashStage::getMemUsage() const {
+ return _memUsage;
+}
- // Either we're busy hashing children, in which case we're not done yet.
- if (_hashingChildren) { return false; }
+bool AndHashStage::isEOF() {
+ // This is empty before calling work() and not-empty after.
+ if (_lookAheadResults.empty()) {
+ return false;
+ }
- // Or we're streaming in results from the last child.
+ // Either we're busy hashing children, in which case we're not done yet.
+ if (_hashingChildren) {
+ return false;
+ }
- // If there's nothing to probe against, we're EOF.
- if (_dataMap.empty()) { return true; }
+ // Or we're streaming in results from the last child.
- // Otherwise, we're done when the last child is done.
- invariant(_children.size() >= 2);
- return (WorkingSet::INVALID_ID == _lookAheadResults[_children.size() - 1])
- && _children[_children.size() - 1]->isEOF();
+ // If there's nothing to probe against, we're EOF.
+ if (_dataMap.empty()) {
+ return true;
}
- PlanStage::StageState AndHashStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+ // Otherwise, we're done when the last child is done.
+ invariant(_children.size() >= 2);
+ return (WorkingSet::INVALID_ID == _lookAheadResults[_children.size() - 1]) &&
+ _children[_children.size() - 1]->isEOF();
+}
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+PlanStage::StageState AndHashStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- if (isEOF()) { return PlanStage::IS_EOF; }
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- // Fast-path for one of our children being EOF immediately. We work each child a few times.
- // If it hits EOF, the AND cannot output anything. If it produces a result, we stash that
- // result in _lookAheadResults.
- if (_lookAheadResults.empty()) {
- // INVALID_ID means that the child didn't produce a valid result.
-
- // We specifically are not using .resize(size, value) here because C++11 builds don't
- // seem to resolve WorkingSet::INVALID_ID during linking.
- _lookAheadResults.resize(_children.size());
- for (size_t i = 0; i < _children.size(); ++i) {
- _lookAheadResults[i] = WorkingSet::INVALID_ID;
- }
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
+ }
- // Work each child some number of times until it's either EOF or produces
- // a result. If it's EOF this whole stage will be EOF. If it produces a
- // result we cache it for later.
- for (size_t i = 0; i < _children.size(); ++i) {
- PlanStage* child = _children[i];
- for (size_t j = 0; j < kLookAheadWorks; ++j) {
- StageState childStatus = child->work(&_lookAheadResults[i]);
+ // Fast-path for one of our children being EOF immediately. We work each child a few times.
+ // If it hits EOF, the AND cannot output anything. If it produces a result, we stash that
+ // result in _lookAheadResults.
+ if (_lookAheadResults.empty()) {
+ // INVALID_ID means that the child didn't produce a valid result.
- if (PlanStage::IS_EOF == childStatus) {
+ // We specifically are not using .resize(size, value) here because C++11 builds don't
+ // seem to resolve WorkingSet::INVALID_ID during linking.
+ _lookAheadResults.resize(_children.size());
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _lookAheadResults[i] = WorkingSet::INVALID_ID;
+ }
- // A child went right to EOF. Bail out.
- _hashingChildren = false;
- _dataMap.clear();
- return PlanStage::IS_EOF;
- }
- else if (PlanStage::ADVANCED == childStatus) {
- // We have a result cached in _lookAheadResults[i]. Stop looking at this
- // child.
- break;
- }
- else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
- // Propage error to parent.
- *out = _lookAheadResults[i];
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == *out) {
- mongoutils::str::stream ss;
- ss << "hashed AND stage failed to read in look ahead results "
- << "from child " << i
- << ", childStatus: " << PlanStage::stateStr(childStatus);
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- }
-
- _hashingChildren = false;
- _dataMap.clear();
- return childStatus;
+ // Work each child some number of times until it's either EOF or produces
+ // a result. If it's EOF this whole stage will be EOF. If it produces a
+ // result we cache it for later.
+ for (size_t i = 0; i < _children.size(); ++i) {
+ PlanStage* child = _children[i];
+ for (size_t j = 0; j < kLookAheadWorks; ++j) {
+ StageState childStatus = child->work(&_lookAheadResults[i]);
+
+ if (PlanStage::IS_EOF == childStatus) {
+ // A child went right to EOF. Bail out.
+ _hashingChildren = false;
+ _dataMap.clear();
+ return PlanStage::IS_EOF;
+ } else if (PlanStage::ADVANCED == childStatus) {
+ // We have a result cached in _lookAheadResults[i]. Stop looking at this
+ // child.
+ break;
+ } else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ // Propage error to parent.
+ *out = _lookAheadResults[i];
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == *out) {
+ mongoutils::str::stream ss;
+ ss << "hashed AND stage failed to read in look ahead results "
+ << "from child " << i
+ << ", childStatus: " << PlanStage::stateStr(childStatus);
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
}
- // We ignore NEED_TIME. TODO: what do we want to do if we get NEED_YIELD here?
+
+ _hashingChildren = false;
+ _dataMap.clear();
+ return childStatus;
}
+ // We ignore NEED_TIME. TODO: what do we want to do if we get NEED_YIELD here?
}
-
- // We did a bunch of work above, return NEED_TIME to be fair.
- return PlanStage::NEED_TIME;
}
- // An AND is either reading the first child into the hash table, probing against the hash
- // table with subsequent children, or checking the last child's results to see if they're
- // in the hash table.
+ // We did a bunch of work above, return NEED_TIME to be fair.
+ return PlanStage::NEED_TIME;
+ }
- // We read the first child into our hash table.
- if (_hashingChildren) {
- // Check memory usage of previously hashed results.
- if (_memUsage > _maxMemUsage) {
- mongoutils::str::stream ss;
- ss << "hashed AND stage buffered data usage of " << _memUsage
- << " bytes exceeds internal limit of " << kDefaultMaxMemUsageBytes << " bytes";
- Status status(ErrorCodes::Overflow, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- return PlanStage::FAILURE;
- }
+ // An AND is either reading the first child into the hash table, probing against the hash
+ // table with subsequent children, or checking the last child's results to see if they're
+ // in the hash table.
+
+ // We read the first child into our hash table.
+ if (_hashingChildren) {
+ // Check memory usage of previously hashed results.
+ if (_memUsage > _maxMemUsage) {
+ mongoutils::str::stream ss;
+ ss << "hashed AND stage buffered data usage of " << _memUsage
+ << " bytes exceeds internal limit of " << kDefaultMaxMemUsageBytes << " bytes";
+ Status status(ErrorCodes::Overflow, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ return PlanStage::FAILURE;
+ }
- if (0 == _currentChild) {
- return readFirstChild(out);
- }
- else if (_currentChild < _children.size() - 1) {
- return hashOtherChildren(out);
- }
- else {
- _hashingChildren = false;
- // We don't hash our last child. Instead, we probe the table created from the
- // previous children, returning results in the order of the last child.
- // Fall through to below.
- }
+ if (0 == _currentChild) {
+ return readFirstChild(out);
+ } else if (_currentChild < _children.size() - 1) {
+ return hashOtherChildren(out);
+ } else {
+ _hashingChildren = false;
+ // We don't hash our last child. Instead, we probe the table created from the
+ // previous children, returning results in the order of the last child.
+ // Fall through to below.
}
+ }
- // Returning results. We read from the last child and return the results that are in our
- // hash map.
+ // Returning results. We read from the last child and return the results that are in our
+ // hash map.
- // We should be EOF if we're not hashing results and the dataMap is empty.
- verify(!_dataMap.empty());
+ // We should be EOF if we're not hashing results and the dataMap is empty.
+ verify(!_dataMap.empty());
- // We probe _dataMap with the last child.
- verify(_currentChild == _children.size() - 1);
+ // We probe _dataMap with the last child.
+ verify(_currentChild == _children.size() - 1);
- // Get the next result for the (_children.size() - 1)-th child.
- StageState childStatus = workChild(_children.size() - 1, out);
- if (PlanStage::ADVANCED != childStatus) {
- return childStatus;
- }
+ // Get the next result for the (_children.size() - 1)-th child.
+ StageState childStatus = workChild(_children.size() - 1, out);
+ if (PlanStage::ADVANCED != childStatus) {
+ return childStatus;
+ }
+
+ // We know that we've ADVANCED. See if the WSM is in our table.
+ WorkingSetMember* member = _ws->get(*out);
+
+ // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
+ // with this WSM.
+ if (!member->hasLoc()) {
+ _ws->flagForReview(*out);
+ return PlanStage::NEED_TIME;
+ }
- // We know that we've ADVANCED. See if the WSM is in our table.
- WorkingSetMember* member = _ws->get(*out);
+ DataMap::iterator it = _dataMap.find(member->loc);
+ if (_dataMap.end() == it) {
+ // Child's output wasn't in every previous child. Throw it out.
+ _ws->free(*out);
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else {
+ // Child's output was in every previous child. Merge any key data in
+ // the child's output and free the child's just-outputted WSM.
+ WorkingSetID hashID = it->second;
+ _dataMap.erase(it);
+
+ WorkingSetMember* olderMember = _ws->get(hashID);
+ AndCommon::mergeFrom(olderMember, *member);
+ _ws->free(*out);
+
+ ++_commonStats.advanced;
+ *out = hashID;
+ return PlanStage::ADVANCED;
+ }
+}
+
+PlanStage::StageState AndHashStage::workChild(size_t childNo, WorkingSetID* out) {
+ if (WorkingSet::INVALID_ID != _lookAheadResults[childNo]) {
+ *out = _lookAheadResults[childNo];
+ _lookAheadResults[childNo] = WorkingSet::INVALID_ID;
+ return PlanStage::ADVANCED;
+ } else {
+ return _children[childNo]->work(out);
+ }
+}
+
+PlanStage::StageState AndHashStage::readFirstChild(WorkingSetID* out) {
+ verify(_currentChild == 0);
+
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ StageState childStatus = workChild(0, &id);
+
+ if (PlanStage::ADVANCED == childStatus) {
+ WorkingSetMember* member = _ws->get(id);
// Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
// with this WSM.
if (!member->hasLoc()) {
- _ws->flagForReview(*out);
+ _ws->flagForReview(id);
return PlanStage::NEED_TIME;
}
- DataMap::iterator it = _dataMap.find(member->loc);
- if (_dataMap.end() == it) {
- // Child's output wasn't in every previous child. Throw it out.
- _ws->free(*out);
+ if (!_dataMap.insert(std::make_pair(member->loc, id)).second) {
+ // Didn't insert because we already had this loc inside the map. This should only
+ // happen if we're seeing a newer copy of the same doc in a more recent snapshot.
+ // Throw out the newer copy of the doc.
+ _ws->free(id);
++_commonStats.needTime;
return PlanStage::NEED_TIME;
}
- else {
- // Child's output was in every previous child. Merge any key data in
- // the child's output and free the child's just-outputted WSM.
- WorkingSetID hashID = it->second;
- _dataMap.erase(it);
- WorkingSetMember* olderMember = _ws->get(hashID);
- AndCommon::mergeFrom(olderMember, *member);
- _ws->free(*out);
+ // Update memory stats.
+ _memUsage += member->getMemUsage();
- ++_commonStats.advanced;
- *out = hashID;
- return PlanStage::ADVANCED;
- }
- }
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::IS_EOF == childStatus) {
+ // Done reading child 0.
+ _currentChild = 1;
- PlanStage::StageState AndHashStage::workChild(size_t childNo, WorkingSetID* out) {
- if (WorkingSet::INVALID_ID != _lookAheadResults[childNo]) {
- *out = _lookAheadResults[childNo];
- _lookAheadResults[childNo] = WorkingSet::INVALID_ID;
- return PlanStage::ADVANCED;
- }
- else {
- return _children[childNo]->work(out);
+ // If our first child was empty, don't scan any others, no possible results.
+ if (_dataMap.empty()) {
+ _hashingChildren = false;
+ return PlanStage::IS_EOF;
}
- }
-
- PlanStage::StageState AndHashStage::readFirstChild(WorkingSetID* out) {
- verify(_currentChild == 0);
-
- WorkingSetID id = WorkingSet::INVALID_ID;
- StageState childStatus = workChild(0, &id);
- if (PlanStage::ADVANCED == childStatus) {
- WorkingSetMember* member = _ws->get(id);
-
- // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
- // with this WSM.
- if (!member->hasLoc()) {
- _ws->flagForReview(id);
- return PlanStage::NEED_TIME;
- }
-
- if (!_dataMap.insert(std::make_pair(member->loc, id)).second) {
- // Didn't insert because we already had this loc inside the map. This should only
- // happen if we're seeing a newer copy of the same doc in a more recent snapshot.
- // Throw out the newer copy of the doc.
- _ws->free(id);
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
-
- // Update memory stats.
- _memUsage += member->getMemUsage();
-
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
+ ++_commonStats.needTime;
+ _specificStats.mapAfterChild.push_back(_dataMap.size());
+
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ mongoutils::str::stream ss;
+ ss << "hashed AND stage failed to read in results to from first child";
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
}
- else if (PlanStage::IS_EOF == childStatus) {
- // Done reading child 0.
- _currentChild = 1;
-
- // If our first child was empty, don't scan any others, no possible results.
- if (_dataMap.empty()) {
- _hashingChildren = false;
- return PlanStage::IS_EOF;
- }
-
+ return childStatus;
+ } else {
+ if (PlanStage::NEED_TIME == childStatus) {
++_commonStats.needTime;
- _specificStats.mapAfterChild.push_back(_dataMap.size());
-
- return PlanStage::NEED_TIME;
- }
- else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ } else if (PlanStage::NEED_YIELD == childStatus) {
+ ++_commonStats.needYield;
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "hashed AND stage failed to read in results to from first child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- }
- return childStatus;
}
- else {
- if (PlanStage::NEED_TIME == childStatus) {
- ++_commonStats.needTime;
- }
- else if (PlanStage::NEED_YIELD == childStatus) {
- ++_commonStats.needYield;
- *out = id;
- }
- return childStatus;
- }
+ return childStatus;
}
+}
- PlanStage::StageState AndHashStage::hashOtherChildren(WorkingSetID* out) {
- verify(_currentChild > 0);
+PlanStage::StageState AndHashStage::hashOtherChildren(WorkingSetID* out) {
+ verify(_currentChild > 0);
- WorkingSetID id = WorkingSet::INVALID_ID;
- StageState childStatus = workChild(_currentChild, &id);
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ StageState childStatus = workChild(_currentChild, &id);
- if (PlanStage::ADVANCED == childStatus) {
- WorkingSetMember* member = _ws->get(id);
+ if (PlanStage::ADVANCED == childStatus) {
+ WorkingSetMember* member = _ws->get(id);
- // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
- // with this WSM.
- if (!member->hasLoc()) {
- _ws->flagForReview(id);
- return PlanStage::NEED_TIME;
- }
+ // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
+ // with this WSM.
+ if (!member->hasLoc()) {
+ _ws->flagForReview(id);
+ return PlanStage::NEED_TIME;
+ }
- verify(member->hasLoc());
- if (_dataMap.end() == _dataMap.find(member->loc)) {
- // Ignore. It's not in any previous child.
- }
- else {
- // We have a hit. Copy data into the WSM we already have.
- _seenMap.insert(member->loc);
- WorkingSetMember* olderMember = _ws->get(_dataMap[member->loc]);
- size_t memUsageBefore = olderMember->getMemUsage();
+ verify(member->hasLoc());
+ if (_dataMap.end() == _dataMap.find(member->loc)) {
+ // Ignore. It's not in any previous child.
+ } else {
+ // We have a hit. Copy data into the WSM we already have.
+ _seenMap.insert(member->loc);
+ WorkingSetMember* olderMember = _ws->get(_dataMap[member->loc]);
+ size_t memUsageBefore = olderMember->getMemUsage();
+
+ AndCommon::mergeFrom(olderMember, *member);
- AndCommon::mergeFrom(olderMember, *member);
+ // Update memory stats.
+ _memUsage += olderMember->getMemUsage() - memUsageBefore;
+ }
+ _ws->free(id);
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::IS_EOF == childStatus) {
+ // Finished with a child.
+ ++_currentChild;
+
+ // Keep elements of _dataMap that are in _seenMap.
+ DataMap::iterator it = _dataMap.begin();
+ while (it != _dataMap.end()) {
+ if (_seenMap.end() == _seenMap.find(it->first)) {
+ DataMap::iterator toErase = it;
+ ++it;
// Update memory stats.
- _memUsage += olderMember->getMemUsage() - memUsageBefore;
+ WorkingSetMember* member = _ws->get(toErase->second);
+ _memUsage -= member->getMemUsage();
+
+ _ws->free(toErase->second);
+ _dataMap.erase(toErase);
+ } else {
+ ++it;
}
- _ws->free(id);
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
}
- else if (PlanStage::IS_EOF == childStatus) {
- // Finished with a child.
- ++_currentChild;
-
- // Keep elements of _dataMap that are in _seenMap.
- DataMap::iterator it = _dataMap.begin();
- while (it != _dataMap.end()) {
- if (_seenMap.end() == _seenMap.find(it->first)) {
- DataMap::iterator toErase = it;
- ++it;
-
- // Update memory stats.
- WorkingSetMember* member = _ws->get(toErase->second);
- _memUsage -= member->getMemUsage();
-
- _ws->free(toErase->second);
- _dataMap.erase(toErase);
- }
- else { ++it; }
- }
- _specificStats.mapAfterChild.push_back(_dataMap.size());
+ _specificStats.mapAfterChild.push_back(_dataMap.size());
- _seenMap.clear();
+ _seenMap.clear();
- // _dataMap is now the intersection of the first _currentChild nodes.
+ // _dataMap is now the intersection of the first _currentChild nodes.
- // If we have nothing to AND with after finishing any child, stop.
- if (_dataMap.empty()) {
- _hashingChildren = false;
- return PlanStage::IS_EOF;
- }
+ // If we have nothing to AND with after finishing any child, stop.
+ if (_dataMap.empty()) {
+ _hashingChildren = false;
+ return PlanStage::IS_EOF;
+ }
- // We've finished scanning all children. Return results with the next call to work().
- if (_currentChild == _children.size()) {
- _hashingChildren = false;
- }
+ // We've finished scanning all children. Return results with the next call to work().
+ if (_currentChild == _children.size()) {
+ _hashingChildren = false;
+ }
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ mongoutils::str::stream ss;
+ ss << "hashed AND stage failed to read in results from other child " << _currentChild;
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
}
- else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ return childStatus;
+ } else {
+ if (PlanStage::NEED_TIME == childStatus) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == childStatus) {
+ ++_commonStats.needYield;
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "hashed AND stage failed to read in results from other child "
- << _currentChild;
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- }
- return childStatus;
}
- else {
- if (PlanStage::NEED_TIME == childStatus) {
- ++_commonStats.needTime;
- }
- else if (PlanStage::NEED_YIELD == childStatus) {
- ++_commonStats.needYield;
- *out = id;
- }
- return childStatus;
- }
+ return childStatus;
}
+}
- void AndHashStage::saveState() {
- ++_commonStats.yields;
+void AndHashStage::saveState() {
+ ++_commonStats.yields;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->saveState();
- }
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _children[i]->saveState();
}
+}
- void AndHashStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
+void AndHashStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->restoreState(opCtx);
- }
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _children[i]->restoreState(opCtx);
}
+}
- void AndHashStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
+void AndHashStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
- if (isEOF()) { return; }
+ if (isEOF()) {
+ return;
+ }
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->invalidate(txn, dl, type);
- }
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _children[i]->invalidate(txn, dl, type);
+ }
- // Invalidation can happen to our warmup results. If that occurs just
- // flag it and forget about it.
- for (size_t i = 0; i < _lookAheadResults.size(); ++i) {
- if (WorkingSet::INVALID_ID != _lookAheadResults[i]) {
- WorkingSetMember* member = _ws->get(_lookAheadResults[i]);
- if (member->hasLoc() && member->loc == dl) {
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
- _ws->flagForReview(_lookAheadResults[i]);
- _lookAheadResults[i] = WorkingSet::INVALID_ID;
- }
+ // Invalidation can happen to our warmup results. If that occurs just
+ // flag it and forget about it.
+ for (size_t i = 0; i < _lookAheadResults.size(); ++i) {
+ if (WorkingSet::INVALID_ID != _lookAheadResults[i]) {
+ WorkingSetMember* member = _ws->get(_lookAheadResults[i]);
+ if (member->hasLoc() && member->loc == dl) {
+ WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ _ws->flagForReview(_lookAheadResults[i]);
+ _lookAheadResults[i] = WorkingSet::INVALID_ID;
}
}
+ }
- // If it's a deletion, we have to forget about the RecordId, and since the AND-ing is by
- // RecordId we can't continue processing it even with the object.
- //
- // If it's a mutation the predicates implied by the AND-ing may no longer be true.
- //
- // So, we flag and try to pick it up later.
- DataMap::iterator it = _dataMap.find(dl);
- if (_dataMap.end() != it) {
- WorkingSetID id = it->second;
- WorkingSetMember* member = _ws->get(id);
- verify(member->loc == dl);
-
- if (_hashingChildren) {
- ++_specificStats.flaggedInProgress;
- }
- else {
- ++_specificStats.flaggedButPassed;
- }
+ // If it's a deletion, we have to forget about the RecordId, and since the AND-ing is by
+ // RecordId we can't continue processing it even with the object.
+ //
+ // If it's a mutation the predicates implied by the AND-ing may no longer be true.
+ //
+ // So, we flag and try to pick it up later.
+ DataMap::iterator it = _dataMap.find(dl);
+ if (_dataMap.end() != it) {
+ WorkingSetID id = it->second;
+ WorkingSetMember* member = _ws->get(id);
+ verify(member->loc == dl);
- // Update memory stats.
- _memUsage -= member->getMemUsage();
+ if (_hashingChildren) {
+ ++_specificStats.flaggedInProgress;
+ } else {
+ ++_specificStats.flaggedButPassed;
+ }
- // The loc is about to be invalidated. Fetch it and clear the loc.
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ // Update memory stats.
+ _memUsage -= member->getMemUsage();
- // Add the WSID to the to-be-reviewed list in the WS.
- _ws->flagForReview(id);
+ // The loc is about to be invalidated. Fetch it and clear the loc.
+ WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
- // And don't return it from this stage.
- _dataMap.erase(it);
- }
- }
+ // Add the WSID to the to-be-reviewed list in the WS.
+ _ws->flagForReview(id);
- vector<PlanStage*> AndHashStage::getChildren() const {
- return _children;
+ // And don't return it from this stage.
+ _dataMap.erase(it);
}
+}
- PlanStageStats* AndHashStage::getStats() {
- _commonStats.isEOF = isEOF();
+vector<PlanStage*> AndHashStage::getChildren() const {
+ return _children;
+}
- _specificStats.memLimit = _maxMemUsage;
- _specificStats.memUsage = _memUsage;
+PlanStageStats* AndHashStage::getStats() {
+ _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_AND_HASH));
- ret->specific.reset(new AndHashStats(_specificStats));
- for (size_t i = 0; i < _children.size(); ++i) {
- ret->children.push_back(_children[i]->getStats());
- }
+ _specificStats.memLimit = _maxMemUsage;
+ _specificStats.memUsage = _memUsage;
- return ret.release();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_AND_HASH));
+ ret->specific.reset(new AndHashStats(_specificStats));
+ for (size_t i = 0; i < _children.size(); ++i) {
+ ret->children.push_back(_children[i]->getStats());
}
- const CommonStats* AndHashStage::getCommonStats() const {
- return &_commonStats;
- }
+ return ret.release();
+}
- const SpecificStats* AndHashStage::getSpecificStats() const {
- return &_specificStats;
- }
+const CommonStats* AndHashStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* AndHashStage::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index 16e7ec3759f..efe625619db 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -38,106 +38,106 @@
namespace mongo {
+/**
+ * Reads from N children, each of which must have a valid RecordId. Uses a hash table to
+ * intersect the outputs of the N children, and outputs the intersection.
+ *
+ * Preconditions: Valid RecordId. More than one child.
+ *
+ * Any RecordId that we keep a reference to that is invalidated before we are able to return it
+ * is fetched and added to the WorkingSet as "flagged for further review." Because this stage
+ * operates with RecordIds, we are unable to evaluate the AND for the invalidated RecordId, and it
+ * must be fully matched later.
+ */
+class AndHashStage : public PlanStage {
+public:
+ AndHashStage(WorkingSet* ws, const Collection* collection);
+
/**
- * Reads from N children, each of which must have a valid RecordId. Uses a hash table to
- * intersect the outputs of the N children, and outputs the intersection.
- *
- * Preconditions: Valid RecordId. More than one child.
- *
- * Any RecordId that we keep a reference to that is invalidated before we are able to return it
- * is fetched and added to the WorkingSet as "flagged for further review." Because this stage
- * operates with RecordIds, we are unable to evaluate the AND for the invalidated RecordId, and it
- * must be fully matched later.
+ * For testing only. Allows tests to set memory usage threshold.
*/
- class AndHashStage : public PlanStage {
- public:
- AndHashStage(WorkingSet* ws, const Collection* collection);
-
- /**
- * For testing only. Allows tests to set memory usage threshold.
- */
- AndHashStage(WorkingSet* ws,
- const Collection* collection,
- size_t maxMemUsage);
+ AndHashStage(WorkingSet* ws, const Collection* collection, size_t maxMemUsage);
- virtual ~AndHashStage();
+ virtual ~AndHashStage();
- void addChild(PlanStage* child);
+ void addChild(PlanStage* child);
- /**
- * Returns memory usage.
- * For testing only.
- */
- size_t getMemUsage() const;
+ /**
+ * Returns memory usage.
+ * For testing only.
+ */
+ size_t getMemUsage() const;
- virtual StageState work(WorkingSetID* out);
- virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_AND_HASH; }
+ virtual StageType stageType() const {
+ return STAGE_AND_HASH;
+ }
- virtual PlanStageStats* getStats();
+ virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- static const size_t kLookAheadWorks;
+private:
+ static const size_t kLookAheadWorks;
- StageState readFirstChild(WorkingSetID* out);
- StageState hashOtherChildren(WorkingSetID* out);
- StageState workChild(size_t childNo, WorkingSetID* out);
+ StageState readFirstChild(WorkingSetID* out);
+ StageState hashOtherChildren(WorkingSetID* out);
+ StageState workChild(size_t childNo, WorkingSetID* out);
- // Not owned by us.
- const Collection* _collection;
+ // Not owned by us.
+ const Collection* _collection;
- // Not owned by us.
- WorkingSet* _ws;
+ // Not owned by us.
+ WorkingSet* _ws;
- // The stages we read from. Owned by us.
- std::vector<PlanStage*> _children;
+ // The stages we read from. Owned by us.
+ std::vector<PlanStage*> _children;
- // We want to see if any of our children are EOF immediately. This requires working them a
- // few times to see if they hit EOF or if they produce a result. If they produce a result,
- // we place that result here.
- std::vector<WorkingSetID> _lookAheadResults;
+ // We want to see if any of our children are EOF immediately. This requires working them a
+ // few times to see if they hit EOF or if they produce a result. If they produce a result,
+ // we place that result here.
+ std::vector<WorkingSetID> _lookAheadResults;
- // _dataMap is filled out by the first child and probed by subsequent children. This is the
- // hash table that we create by intersecting _children and probe with the last child.
- typedef unordered_map<RecordId, WorkingSetID, RecordId::Hasher> DataMap;
- DataMap _dataMap;
+ // _dataMap is filled out by the first child and probed by subsequent children. This is the
+ // hash table that we create by intersecting _children and probe with the last child.
+ typedef unordered_map<RecordId, WorkingSetID, RecordId::Hasher> DataMap;
+ DataMap _dataMap;
- // Keeps track of what elements from _dataMap subsequent children have seen.
- // Only used while _hashingChildren.
- typedef unordered_set<RecordId, RecordId::Hasher> SeenMap;
- SeenMap _seenMap;
+ // Keeps track of what elements from _dataMap subsequent children have seen.
+ // Only used while _hashingChildren.
+ typedef unordered_set<RecordId, RecordId::Hasher> SeenMap;
+ SeenMap _seenMap;
- // True if we're still intersecting _children[0..._children.size()-1].
- bool _hashingChildren;
+ // True if we're still intersecting _children[0..._children.size()-1].
+ bool _hashingChildren;
- // Which child are we currently working on?
- size_t _currentChild;
+ // Which child are we currently working on?
+ size_t _currentChild;
- // Stats
- CommonStats _commonStats;
- AndHashStats _specificStats;
+ // Stats
+ CommonStats _commonStats;
+ AndHashStats _specificStats;
- // The usage in bytes of all buffered data that we're holding.
- // Memory usage is calculated from keys held in _dataMap only.
- // For simplicity, results in _lookAheadResults do not count towards the limit.
- size_t _memUsage;
+ // The usage in bytes of all buffered data that we're holding.
+ // Memory usage is calculated from keys held in _dataMap only.
+ // For simplicity, results in _lookAheadResults do not count towards the limit.
+ size_t _memUsage;
- // Upper limit for buffered data memory usage.
- // Defaults to 32 MB (See kMaxBytes in and_hash.cpp).
- size_t _maxMemUsage;
- };
+ // Upper limit for buffered data memory usage.
+ // Defaults to 32 MB (See kMaxBytes in and_hash.cpp).
+ size_t _maxMemUsage;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp
index c407b9c842c..27966791c87 100644
--- a/src/mongo/db/exec/and_sorted.cpp
+++ b/src/mongo/db/exec/and_sorted.cpp
@@ -35,292 +35,289 @@
namespace mongo {
- using std::unique_ptr;
- using std::numeric_limits;
- using std::vector;
-
- // static
- const char* AndSortedStage::kStageType = "AND_SORTED";
-
- AndSortedStage::AndSortedStage(WorkingSet* ws, const Collection* collection)
- : _collection(collection),
- _ws(ws),
- _targetNode(numeric_limits<size_t>::max()),
- _targetId(WorkingSet::INVALID_ID), _isEOF(false),
- _commonStats(kStageType) { }
-
- AndSortedStage::~AndSortedStage() {
- for (size_t i = 0; i < _children.size(); ++i) { delete _children[i]; }
+using std::unique_ptr;
+using std::numeric_limits;
+using std::vector;
+
+// static
+const char* AndSortedStage::kStageType = "AND_SORTED";
+
+AndSortedStage::AndSortedStage(WorkingSet* ws, const Collection* collection)
+ : _collection(collection),
+ _ws(ws),
+ _targetNode(numeric_limits<size_t>::max()),
+ _targetId(WorkingSet::INVALID_ID),
+ _isEOF(false),
+ _commonStats(kStageType) {}
+
+AndSortedStage::~AndSortedStage() {
+ for (size_t i = 0; i < _children.size(); ++i) {
+ delete _children[i];
}
+}
- void AndSortedStage::addChild(PlanStage* child) {
- _children.push_back(child);
- }
-
- bool AndSortedStage::isEOF() { return _isEOF; }
+void AndSortedStage::addChild(PlanStage* child) {
+ _children.push_back(child);
+}
- PlanStage::StageState AndSortedStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+bool AndSortedStage::isEOF() {
+ return _isEOF;
+}
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+PlanStage::StageState AndSortedStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- if (isEOF()) { return PlanStage::IS_EOF; }
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- if (0 == _specificStats.failedAnd.size()) {
- _specificStats.failedAnd.resize(_children.size());
- }
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
+ }
- // If we don't have any nodes that we're work()-ing until they hit a certain RecordId...
- if (0 == _workingTowardRep.size()) {
- // Get a target RecordId.
- return getTargetLoc(out);
- }
+ if (0 == _specificStats.failedAnd.size()) {
+ _specificStats.failedAnd.resize(_children.size());
+ }
- // Move nodes toward the target RecordId.
- // If all nodes reach the target RecordId, return it. The next call to work() will set a new
- // target.
- return moveTowardTargetLoc(out);
+ // If we don't have any nodes that we're work()-ing until they hit a certain RecordId...
+ if (0 == _workingTowardRep.size()) {
+ // Get a target RecordId.
+ return getTargetLoc(out);
}
- PlanStage::StageState AndSortedStage::getTargetLoc(WorkingSetID* out) {
- verify(numeric_limits<size_t>::max() == _targetNode);
- verify(WorkingSet::INVALID_ID == _targetId);
- verify(RecordId() == _targetLoc);
+ // Move nodes toward the target RecordId.
+ // If all nodes reach the target RecordId, return it. The next call to work() will set a new
+ // target.
+ return moveTowardTargetLoc(out);
+}
- // Pick one, and get a loc to work toward.
- WorkingSetID id = WorkingSet::INVALID_ID;
- StageState state = _children[0]->work(&id);
+PlanStage::StageState AndSortedStage::getTargetLoc(WorkingSetID* out) {
+ verify(numeric_limits<size_t>::max() == _targetNode);
+ verify(WorkingSet::INVALID_ID == _targetId);
+ verify(RecordId() == _targetLoc);
- if (PlanStage::ADVANCED == state) {
- WorkingSetMember* member = _ws->get(id);
+ // Pick one, and get a loc to work toward.
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ StageState state = _children[0]->work(&id);
- // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
- // with this WSM.
- if (!member->hasLoc()) {
- _ws->flagForReview(id);
- return PlanStage::NEED_TIME;
- }
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = _ws->get(id);
- verify(member->hasLoc());
+ // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
+ // with this WSM.
+ if (!member->hasLoc()) {
+ _ws->flagForReview(id);
+ return PlanStage::NEED_TIME;
+ }
- // We have a value from one child to AND with.
- _targetNode = 0;
- _targetId = id;
- _targetLoc = member->loc;
+ verify(member->hasLoc());
- // We have to AND with all other children.
- for (size_t i = 1; i < _children.size(); ++i) {
- _workingTowardRep.push(i);
- }
+ // We have a value from one child to AND with.
+ _targetNode = 0;
+ _targetId = id;
+ _targetLoc = member->loc;
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
+ // We have to AND with all other children.
+ for (size_t i = 1; i < _children.size(); ++i) {
+ _workingTowardRep.push(i);
}
- else if (PlanStage::IS_EOF == state) {
- _isEOF = true;
- return state;
+
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::IS_EOF == state) {
+ _isEOF = true;
+ return state;
+ } else if (PlanStage::FAILURE == state) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ mongoutils::str::stream ss;
+ ss << "sorted AND stage failed to read in results from first child";
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
}
- else if (PlanStage::FAILURE == state) {
+ _isEOF = true;
+ return state;
+ } else {
+ if (PlanStage::NEED_TIME == state) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == state) {
+ ++_commonStats.needYield;
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "sorted AND stage failed to read in results from first child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- }
- _isEOF = true;
- return state;
}
- else {
- if (PlanStage::NEED_TIME == state) {
- ++_commonStats.needTime;
- }
- else if (PlanStage::NEED_YIELD == state) {
- ++_commonStats.needYield;
- *out = id;
- }
- // NEED_TIME, NEED_YIELD.
- return state;
- }
+ // NEED_TIME, NEED_YIELD.
+ return state;
}
+}
- PlanStage::StageState AndSortedStage::moveTowardTargetLoc(WorkingSetID* out) {
- verify(numeric_limits<size_t>::max() != _targetNode);
- verify(WorkingSet::INVALID_ID != _targetId);
+PlanStage::StageState AndSortedStage::moveTowardTargetLoc(WorkingSetID* out) {
+ verify(numeric_limits<size_t>::max() != _targetNode);
+ verify(WorkingSet::INVALID_ID != _targetId);
- // We have nodes that haven't hit _targetLoc yet.
- size_t workingChildNumber = _workingTowardRep.front();
- PlanStage* next = _children[workingChildNumber];
- WorkingSetID id = WorkingSet::INVALID_ID;
- StageState state = next->work(&id);
+ // We have nodes that haven't hit _targetLoc yet.
+ size_t workingChildNumber = _workingTowardRep.front();
+ PlanStage* next = _children[workingChildNumber];
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ StageState state = next->work(&id);
- if (PlanStage::ADVANCED == state) {
- WorkingSetMember* member = _ws->get(id);
+ if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = _ws->get(id);
- // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
- // with this WSM.
- if (!member->hasLoc()) {
- _ws->flagForReview(id);
- return PlanStage::NEED_TIME;
- }
+ // Maybe the child had an invalidation. We intersect RecordId(s) so we can't do anything
+ // with this WSM.
+ if (!member->hasLoc()) {
+ _ws->flagForReview(id);
+ return PlanStage::NEED_TIME;
+ }
- verify(member->hasLoc());
+ verify(member->hasLoc());
- if (member->loc == _targetLoc) {
- // The front element has hit _targetLoc. Don't move it forward anymore/work on
- // another element.
- _workingTowardRep.pop();
- AndCommon::mergeFrom(_ws->get(_targetId), *member);
- _ws->free(id);
+ if (member->loc == _targetLoc) {
+ // The front element has hit _targetLoc. Don't move it forward anymore/work on
+ // another element.
+ _workingTowardRep.pop();
+ AndCommon::mergeFrom(_ws->get(_targetId), *member);
+ _ws->free(id);
- if (0 == _workingTowardRep.size()) {
- WorkingSetID toReturn = _targetId;
+ if (0 == _workingTowardRep.size()) {
+ WorkingSetID toReturn = _targetId;
- _targetNode = numeric_limits<size_t>::max();
- _targetId = WorkingSet::INVALID_ID;
- _targetLoc = RecordId();
+ _targetNode = numeric_limits<size_t>::max();
+ _targetId = WorkingSet::INVALID_ID;
+ _targetLoc = RecordId();
- *out = toReturn;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
- // More children need to be advanced to _targetLoc.
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- else if (member->loc < _targetLoc) {
- // The front element of _workingTowardRep hasn't hit the thing we're AND-ing with
- // yet. Try again later.
- _ws->free(id);
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
+ *out = toReturn;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
}
- else {
- // member->loc > _targetLoc.
- // _targetLoc wasn't successfully AND-ed with the other sub-plans. We toss it and
- // try AND-ing with the next value.
- _specificStats.failedAnd[_targetNode]++;
-
- _ws->free(_targetId);
- _targetNode = workingChildNumber;
- _targetLoc = member->loc;
- _targetId = id;
- _workingTowardRep = std::queue<size_t>();
- for (size_t i = 0; i < _children.size(); ++i) {
- if (workingChildNumber != i) {
- _workingTowardRep.push(i);
- }
+ // More children need to be advanced to _targetLoc.
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else if (member->loc < _targetLoc) {
+ // The front element of _workingTowardRep hasn't hit the thing we're AND-ing with
+ // yet. Try again later.
+ _ws->free(id);
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else {
+ // member->loc > _targetLoc.
+ // _targetLoc wasn't successfully AND-ed with the other sub-plans. We toss it and
+ // try AND-ing with the next value.
+ _specificStats.failedAnd[_targetNode]++;
+
+ _ws->free(_targetId);
+ _targetNode = workingChildNumber;
+ _targetLoc = member->loc;
+ _targetId = id;
+ _workingTowardRep = std::queue<size_t>();
+ for (size_t i = 0; i < _children.size(); ++i) {
+ if (workingChildNumber != i) {
+ _workingTowardRep.push(i);
}
- // Need time to chase after the new _targetLoc.
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
}
+ // Need time to chase after the new _targetLoc.
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
- else if (PlanStage::IS_EOF == state) {
- _isEOF = true;
- _ws->free(_targetId);
- return state;
+ } else if (PlanStage::IS_EOF == state) {
+ _isEOF = true;
+ _ws->free(_targetId);
+ return state;
+ } else if (PlanStage::FAILURE == state || PlanStage::DEAD == state) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ mongoutils::str::stream ss;
+ ss << "sorted AND stage failed to read in results from child " << workingChildNumber;
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
}
- else if (PlanStage::FAILURE == state || PlanStage::DEAD == state) {
+ _isEOF = true;
+ _ws->free(_targetId);
+ return state;
+ } else {
+ if (PlanStage::NEED_TIME == state) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == state) {
+ ++_commonStats.needYield;
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "sorted AND stage failed to read in results from child " << workingChildNumber;
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- }
- _isEOF = true;
- _ws->free(_targetId);
- return state;
}
- else {
- if (PlanStage::NEED_TIME == state) {
- ++_commonStats.needTime;
- }
- else if (PlanStage::NEED_YIELD == state) {
- ++_commonStats.needYield;
- *out = id;
- }
- return state;
- }
+ return state;
}
+}
- void AndSortedStage::saveState() {
- ++_commonStats.yields;
+void AndSortedStage::saveState() {
+ ++_commonStats.yields;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->saveState();
- }
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _children[i]->saveState();
}
+}
- void AndSortedStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
+void AndSortedStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->restoreState(opCtx);
- }
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _children[i]->restoreState(opCtx);
}
+}
- void AndSortedStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- ++_commonStats.invalidates;
-
- if (isEOF()) { return; }
-
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->invalidate(txn, dl, type);
- }
-
- if (dl == _targetLoc) {
- // We're in the middle of moving children forward until they hit _targetLoc, which is no
- // longer a valid target. If it's a deletion we can't AND it with anything, if it's a
- // mutation the predicates implied by the AND may no longer be true. So no matter what,
- // fetch it, flag for review, and find another _targetLoc.
- ++_specificStats.flagged;
+void AndSortedStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
- // The RecordId could still be a valid result so flag it and save it for later.
- WorkingSetCommon::fetchAndInvalidateLoc(txn, _ws->get(_targetId), _collection);
- _ws->flagForReview(_targetId);
+ if (isEOF()) {
+ return;
+ }
- _targetId = WorkingSet::INVALID_ID;
- _targetNode = numeric_limits<size_t>::max();
- _targetLoc = RecordId();
- _workingTowardRep = std::queue<size_t>();
- }
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _children[i]->invalidate(txn, dl, type);
}
- vector<PlanStage*> AndSortedStage::getChildren() const {
- return _children;
+ if (dl == _targetLoc) {
+ // We're in the middle of moving children forward until they hit _targetLoc, which is no
+ // longer a valid target. If it's a deletion we can't AND it with anything, if it's a
+ // mutation the predicates implied by the AND may no longer be true. So no matter what,
+ // fetch it, flag for review, and find another _targetLoc.
+ ++_specificStats.flagged;
+
+ // The RecordId could still be a valid result so flag it and save it for later.
+ WorkingSetCommon::fetchAndInvalidateLoc(txn, _ws->get(_targetId), _collection);
+ _ws->flagForReview(_targetId);
+
+ _targetId = WorkingSet::INVALID_ID;
+ _targetNode = numeric_limits<size_t>::max();
+ _targetLoc = RecordId();
+ _workingTowardRep = std::queue<size_t>();
}
+}
- PlanStageStats* AndSortedStage::getStats() {
- _commonStats.isEOF = isEOF();
+vector<PlanStage*> AndSortedStage::getChildren() const {
+ return _children;
+}
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_AND_SORTED));
- ret->specific.reset(new AndSortedStats(_specificStats));
- for (size_t i = 0; i < _children.size(); ++i) {
- ret->children.push_back(_children[i]->getStats());
- }
+PlanStageStats* AndSortedStage::getStats() {
+ _commonStats.isEOF = isEOF();
- return ret.release();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_AND_SORTED));
+ ret->specific.reset(new AndSortedStats(_specificStats));
+ for (size_t i = 0; i < _children.size(); ++i) {
+ ret->children.push_back(_children[i]->getStats());
}
- const CommonStats* AndSortedStage::getCommonStats() const {
- return &_commonStats;
- }
+ return ret.release();
+}
- const SpecificStats* AndSortedStage::getSpecificStats() const {
- return &_specificStats;
- }
+const CommonStats* AndSortedStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* AndSortedStage::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h
index 424cda280f6..3a25aa8456e 100644
--- a/src/mongo/db/exec/and_sorted.h
+++ b/src/mongo/db/exec/and_sorted.h
@@ -39,77 +39,79 @@
namespace mongo {
- /**
- * Reads from N children, each of which must have a valid RecordId. Assumes each child produces
- * RecordIds in sorted order. Outputs the intersection of the RecordIds outputted by the
- * children.
- *
- * Preconditions: Valid RecordId. More than one child.
- *
- * Any RecordId that we keep a reference to that is invalidated before we are able to return it
- * is fetched and added to the WorkingSet as "flagged for further review." Because this stage
- * operates with RecordIds, we are unable to evaluate the AND for the invalidated RecordId, and it
- * must be fully matched later.
- */
- class AndSortedStage : public PlanStage {
- public:
- AndSortedStage(WorkingSet* ws, const Collection* collection);
- virtual ~AndSortedStage();
+/**
+ * Reads from N children, each of which must have a valid RecordId. Assumes each child produces
+ * RecordIds in sorted order. Outputs the intersection of the RecordIds outputted by the
+ * children.
+ *
+ * Preconditions: Valid RecordId. More than one child.
+ *
+ * Any RecordId that we keep a reference to that is invalidated before we are able to return it
+ * is fetched and added to the WorkingSet as "flagged for further review." Because this stage
+ * operates with RecordIds, we are unable to evaluate the AND for the invalidated RecordId, and it
+ * must be fully matched later.
+ */
+class AndSortedStage : public PlanStage {
+public:
+ AndSortedStage(WorkingSet* ws, const Collection* collection);
+ virtual ~AndSortedStage();
- void addChild(PlanStage* child);
+ void addChild(PlanStage* child);
- virtual StageState work(WorkingSetID* out);
- virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_AND_SORTED; }
+ virtual StageType stageType() const {
+ return STAGE_AND_SORTED;
+ }
- virtual PlanStageStats* getStats();
+ virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- // Find a node to AND against.
- PlanStage::StageState getTargetLoc(WorkingSetID* out);
+private:
+ // Find a node to AND against.
+ PlanStage::StageState getTargetLoc(WorkingSetID* out);
- // Move a child which hasn't advanced to the target node forward.
- // Returns the target node in 'out' if all children successfully advance to it.
- PlanStage::StageState moveTowardTargetLoc(WorkingSetID* out);
+ // Move a child which hasn't advanced to the target node forward.
+ // Returns the target node in 'out' if all children successfully advance to it.
+ PlanStage::StageState moveTowardTargetLoc(WorkingSetID* out);
- // Not owned by us.
- const Collection* _collection;
+ // Not owned by us.
+ const Collection* _collection;
- // Not owned by us.
- WorkingSet* _ws;
+ // Not owned by us.
+ WorkingSet* _ws;
- // Owned by us.
- std::vector<PlanStage*> _children;
+ // Owned by us.
+ std::vector<PlanStage*> _children;
- // The current node we're AND-ing against.
- size_t _targetNode;
- RecordId _targetLoc;
- WorkingSetID _targetId;
+ // The current node we're AND-ing against.
+ size_t _targetNode;
+ RecordId _targetLoc;
+ WorkingSetID _targetId;
- // Nodes we're moving forward until they hit the element we're AND-ing.
- // Everything in here has not advanced to _targetLoc yet.
- // These are indices into _children.
- std::queue<size_t> _workingTowardRep;
+ // Nodes we're moving forward until they hit the element we're AND-ing.
+ // Everything in here has not advanced to _targetLoc yet.
+ // These are indices into _children.
+ std::queue<size_t> _workingTowardRep;
- // If any child hits EOF or if we have any errors, we're EOF.
- bool _isEOF;
+ // If any child hits EOF or if we have any errors, we're EOF.
+ bool _isEOF;
- // Stats
- CommonStats _commonStats;
- AndSortedStats _specificStats;
- };
+ // Stats
+ CommonStats _commonStats;
+ AndSortedStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 3c512e01890..78894d28d35 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -49,338 +49,317 @@
namespace mongo {
- // static
- const char* CachedPlanStage::kStageType = "CACHED_PLAN";
-
- CachedPlanStage::CachedPlanStage(OperationContext* txn,
- Collection* collection,
- WorkingSet* ws,
- CanonicalQuery* cq,
- const QueryPlannerParams& params,
- size_t decisionWorks,
- PlanStage* root)
- : _txn(txn),
- _collection(collection),
- _ws(ws),
- _canonicalQuery(cq),
- _plannerParams(params),
- _decisionWorks(decisionWorks),
- _root(root),
- _commonStats(kStageType) {
- invariant(_collection);
- }
-
- Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
- // Adds the amount of time taken by pickBestPlan() to executionTimeMillis. There's lots of
- // execution work that happens here, so this is needed for the time accounting to
- // make sense.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
-
- // If we work this many times during the trial period, then we will replan the
- // query from scratch.
- size_t maxWorksBeforeReplan = static_cast<size_t>(internalQueryCacheEvictionRatio
- * _decisionWorks);
-
- // The trial period ends without replanning if the cached plan produces this many results.
- size_t numResults = MultiPlanStage::getTrialPeriodNumToReturn(*_canonicalQuery);
-
- for (size_t i = 0; i < maxWorksBeforeReplan; ++i) {
- // Might need to yield between calls to work due to the timer elapsing.
- Status yieldStatus = tryYield(yieldPolicy);
- if (!yieldStatus.isOK()) {
- return yieldStatus;
- }
+// static
+const char* CachedPlanStage::kStageType = "CACHED_PLAN";
+
+CachedPlanStage::CachedPlanStage(OperationContext* txn,
+ Collection* collection,
+ WorkingSet* ws,
+ CanonicalQuery* cq,
+ const QueryPlannerParams& params,
+ size_t decisionWorks,
+ PlanStage* root)
+ : _txn(txn),
+ _collection(collection),
+ _ws(ws),
+ _canonicalQuery(cq),
+ _plannerParams(params),
+ _decisionWorks(decisionWorks),
+ _root(root),
+ _commonStats(kStageType) {
+ invariant(_collection);
+}
+
+Status CachedPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
+ // Adds the amount of time taken by pickBestPlan() to executionTimeMillis. There's lots of
+ // execution work that happens here, so this is needed for the time accounting to
+ // make sense.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+
+ // If we work this many times during the trial period, then we will replan the
+ // query from scratch.
+ size_t maxWorksBeforeReplan =
+ static_cast<size_t>(internalQueryCacheEvictionRatio * _decisionWorks);
+
+ // The trial period ends without replanning if the cached plan produces this many results.
+ size_t numResults = MultiPlanStage::getTrialPeriodNumToReturn(*_canonicalQuery);
+
+ for (size_t i = 0; i < maxWorksBeforeReplan; ++i) {
+ // Might need to yield between calls to work due to the timer elapsing.
+ Status yieldStatus = tryYield(yieldPolicy);
+ if (!yieldStatus.isOK()) {
+ return yieldStatus;
+ }
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = _root->work(&id);
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = _root->work(&id);
- if (PlanStage::ADVANCED == state) {
- // Save result for later.
- _results.push_back(id);
+ if (PlanStage::ADVANCED == state) {
+ // Save result for later.
+ _results.push_back(id);
- if (_results.size() >= numResults) {
- // Once a plan returns enough results, stop working. Update cache with stats
- // from this run and return.
- updatePlanCache();
- return Status::OK();
- }
- }
- else if (PlanStage::IS_EOF == state) {
- // Cached plan hit EOF quickly enough. No need to replan. Update cache with stats
+ if (_results.size() >= numResults) {
+ // Once a plan returns enough results, stop working. Update cache with stats
// from this run and return.
updatePlanCache();
return Status::OK();
}
- else if (PlanStage::NEED_YIELD == state) {
- if (id == WorkingSet::INVALID_ID) {
- if (!yieldPolicy->allowedToYield()) {
- throw WriteConflictException();
- }
- }
- else {
- WorkingSetMember* member = _ws->get(id);
- invariant(member->hasFetcher());
- // Transfer ownership of the fetcher and yield.
- _fetcher.reset(member->releaseFetcher());
- }
-
- if (yieldPolicy->allowedToYield()) {
- yieldPolicy->forceYield();
- }
-
- Status yieldStatus = tryYield(yieldPolicy);
- if (!yieldStatus.isOK()) {
- return yieldStatus;
+ } else if (PlanStage::IS_EOF == state) {
+ // Cached plan hit EOF quickly enough. No need to replan. Update cache with stats
+ // from this run and return.
+ updatePlanCache();
+ return Status::OK();
+ } else if (PlanStage::NEED_YIELD == state) {
+ if (id == WorkingSet::INVALID_ID) {
+ if (!yieldPolicy->allowedToYield()) {
+ throw WriteConflictException();
}
+ } else {
+ WorkingSetMember* member = _ws->get(id);
+ invariant(member->hasFetcher());
+ // Transfer ownership of the fetcher and yield.
+ _fetcher.reset(member->releaseFetcher());
}
- else if (PlanStage::FAILURE == state) {
- // On failure, fall back to replanning the whole query. We neither evict the
- // existing cache entry nor cache the result of replanning.
- BSONObj statusObj;
- WorkingSetCommon::getStatusMemberObject(*_ws, id, &statusObj);
-
- LOG(1) << "Execution of cached plan failed, falling back to replan."
- << " query: "
- << _canonicalQuery->toStringShort()
- << " planSummary: "
- << Explain::getPlanSummary(_root.get())
- << " status: "
- << statusObj;
-
- const bool shouldCache = false;
- return replan(yieldPolicy, shouldCache);
- }
- else if (PlanStage::DEAD == state) {
- BSONObj statusObj;
- WorkingSetCommon::getStatusMemberObject(*_ws, id, &statusObj);
-
- LOG(1) << "Execution of cached plan failed: PlanStage died"
- << ", query: "
- << _canonicalQuery->toStringShort()
- << " planSummary: "
- << Explain::getPlanSummary(_root.get())
- << " status: "
- << statusObj;
-
- return WorkingSetCommon::getMemberObjectStatus(statusObj);
- }
- else {
- invariant(PlanStage::NEED_TIME == state);
- }
- }
- // If we're here, the trial period took more than 'maxWorksBeforeReplan' work cycles. This
- // plan is taking too long, so we replan from scratch.
- LOG(1) << "Execution of cached plan required "
- << maxWorksBeforeReplan
- << " works, but was originally cached with only "
- << _decisionWorks
- << " works. Evicting cache entry and replanning query: "
- << _canonicalQuery->toStringShort()
- << " plan summary before replan: "
- << Explain::getPlanSummary(_root.get());
-
- const bool shouldCache = true;
- return replan(yieldPolicy, shouldCache);
- }
+ if (yieldPolicy->allowedToYield()) {
+ yieldPolicy->forceYield();
+ }
- Status CachedPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
- // These are the conditions which can cause us to yield:
- // 1) The yield policy's timer elapsed, or
- // 2) some stage requested a yield due to a document fetch, or
- // 3) we need to yield and retry due to a WriteConflictException.
- // In all cases, the actual yielding happens here.
- if (yieldPolicy->shouldYield()) {
- // Here's where we yield.
- bool alive = yieldPolicy->yield(_fetcher.get());
-
- if (!alive) {
- return Status(ErrorCodes::OperationFailed,
- "CachedPlanStage killed during plan selection");
+ Status yieldStatus = tryYield(yieldPolicy);
+ if (!yieldStatus.isOK()) {
+ return yieldStatus;
}
+ } else if (PlanStage::FAILURE == state) {
+ // On failure, fall back to replanning the whole query. We neither evict the
+ // existing cache entry nor cache the result of replanning.
+ BSONObj statusObj;
+ WorkingSetCommon::getStatusMemberObject(*_ws, id, &statusObj);
+
+ LOG(1) << "Execution of cached plan failed, falling back to replan."
+ << " query: " << _canonicalQuery->toStringShort()
+ << " planSummary: " << Explain::getPlanSummary(_root.get())
+ << " status: " << statusObj;
+
+ const bool shouldCache = false;
+ return replan(yieldPolicy, shouldCache);
+ } else if (PlanStage::DEAD == state) {
+ BSONObj statusObj;
+ WorkingSetCommon::getStatusMemberObject(*_ws, id, &statusObj);
+
+ LOG(1) << "Execution of cached plan failed: PlanStage died"
+ << ", query: " << _canonicalQuery->toStringShort()
+ << " planSummary: " << Explain::getPlanSummary(_root.get())
+ << " status: " << statusObj;
+
+ return WorkingSetCommon::getMemberObjectStatus(statusObj);
+ } else {
+ invariant(PlanStage::NEED_TIME == state);
}
-
- // We're done using the fetcher, so it should be freed. We don't want to
- // use the same RecordFetcher twice.
- _fetcher.reset();
-
- return Status::OK();
}
- Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
- // We're going to start over with a new plan. No need for only old buffered results.
- _results.clear();
-
- // Clear out the working set. We'll start with a fresh working set.
- _ws->clear();
-
- // Use the query planning module to plan the whole query.
- std::vector<QuerySolution*> rawSolutions;
- Status status = QueryPlanner::plan(*_canonicalQuery, _plannerParams, &rawSolutions);
- if (!status.isOK()) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "error processing query: " << _canonicalQuery->toString()
- << " planner returned error: " << status.reason());
+ // If we're here, the trial period took more than 'maxWorksBeforeReplan' work cycles. This
+ // plan is taking too long, so we replan from scratch.
+ LOG(1) << "Execution of cached plan required " << maxWorksBeforeReplan
+ << " works, but was originally cached with only " << _decisionWorks
+ << " works. Evicting cache entry and replanning query: "
+ << _canonicalQuery->toStringShort()
+ << " plan summary before replan: " << Explain::getPlanSummary(_root.get());
+
+ const bool shouldCache = true;
+ return replan(yieldPolicy, shouldCache);
+}
+
+Status CachedPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
+ // These are the conditions which can cause us to yield:
+ // 1) The yield policy's timer elapsed, or
+ // 2) some stage requested a yield due to a document fetch, or
+ // 3) we need to yield and retry due to a WriteConflictException.
+ // In all cases, the actual yielding happens here.
+ if (yieldPolicy->shouldYield()) {
+ // Here's where we yield.
+ bool alive = yieldPolicy->yield(_fetcher.get());
+
+ if (!alive) {
+ return Status(ErrorCodes::OperationFailed,
+ "CachedPlanStage killed during plan selection");
}
+ }
- OwnedPointerVector<QuerySolution> solutions(rawSolutions);
+ // We're done using the fetcher, so it should be freed. We don't want to
+ // use the same RecordFetcher twice.
+ _fetcher.reset();
- // We cannot figure out how to answer the query. Perhaps it requires an index
- // we do not have?
- if (0 == solutions.size()) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "error processing query: "
- << _canonicalQuery->toString()
- << " No query solutions");
- }
+ return Status::OK();
+}
- if (1 == solutions.size()) {
- // If there's only one solution, it won't get cached. Make sure to evict the existing
- // cache entry if requested by the caller.
- if (shouldCache) {
- PlanCache* cache = _collection->infoCache()->getPlanCache();
- cache->remove(*_canonicalQuery);
- }
+Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
+ // We're going to start over with a new plan. No need for only old buffered results.
+ _results.clear();
- PlanStage* newRoot;
- // Only one possible plan. Build the stages from the solution.
- verify(StageBuilder::build(_txn, _collection, *solutions[0], _ws, &newRoot));
- _root.reset(newRoot);
- _replannedQs.reset(solutions.popAndReleaseBack());
- return Status::OK();
- }
+ // Clear out the working set. We'll start with a fresh working set.
+ _ws->clear();
- // Many solutions. Create a MultiPlanStage to pick the best, update the cache,
- // and so on. The working set will be shared by all candidate plans.
- _root.reset(new MultiPlanStage(_txn, _collection, _canonicalQuery, shouldCache));
- MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(_root.get());
+ // Use the query planning module to plan the whole query.
+ std::vector<QuerySolution*> rawSolutions;
+ Status status = QueryPlanner::plan(*_canonicalQuery, _plannerParams, &rawSolutions);
+ if (!status.isOK()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "error processing query: " << _canonicalQuery->toString()
+ << " planner returned error: " << status.reason());
+ }
- for (size_t ix = 0; ix < solutions.size(); ++ix) {
- if (solutions[ix]->cacheData.get()) {
- solutions[ix]->cacheData->indexFilterApplied = _plannerParams.indexFiltersApplied;
- }
+ OwnedPointerVector<QuerySolution> solutions(rawSolutions);
- PlanStage* nextPlanRoot;
- verify(StageBuilder::build(_txn, _collection, *solutions[ix], _ws, &nextPlanRoot));
+ // We cannot figure out how to answer the query. Perhaps it requires an index
+ // we do not have?
+ if (0 == solutions.size()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "error processing query: " << _canonicalQuery->toString()
+ << " No query solutions");
+ }
- // Takes ownership of 'solutions[ix]' and 'nextPlanRoot'.
- multiPlanStage->addPlan(solutions.releaseAt(ix), nextPlanRoot, _ws);
+ if (1 == solutions.size()) {
+ // If there's only one solution, it won't get cached. Make sure to evict the existing
+ // cache entry if requested by the caller.
+ if (shouldCache) {
+ PlanCache* cache = _collection->infoCache()->getPlanCache();
+ cache->remove(*_canonicalQuery);
}
- // Delegate to the MultiPlanStage's plan selection facility.
- return multiPlanStage->pickBestPlan(yieldPolicy);
- }
-
- bool CachedPlanStage::isEOF() {
- return _results.empty() && _root->isEOF();
+ PlanStage* newRoot;
+ // Only one possible plan. Build the stages from the solution.
+ verify(StageBuilder::build(_txn, _collection, *solutions[0], _ws, &newRoot));
+ _root.reset(newRoot);
+ _replannedQs.reset(solutions.popAndReleaseBack());
+ return Status::OK();
}
- PlanStage::StageState CachedPlanStage::work(WorkingSetID* out) {
- ++_commonStats.works;
-
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
-
- if (isEOF()) { return PlanStage::IS_EOF; }
+ // Many solutions. Create a MultiPlanStage to pick the best, update the cache,
+ // and so on. The working set will be shared by all candidate plans.
+ _root.reset(new MultiPlanStage(_txn, _collection, _canonicalQuery, shouldCache));
+ MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(_root.get());
- // First exhaust any results buffered during the trial period.
- if (!_results.empty()) {
- *out = _results.front();
- _results.pop_front();
- _commonStats.advanced++;
- return PlanStage::ADVANCED;
+ for (size_t ix = 0; ix < solutions.size(); ++ix) {
+ if (solutions[ix]->cacheData.get()) {
+ solutions[ix]->cacheData->indexFilterApplied = _plannerParams.indexFiltersApplied;
}
- // Nothing left in trial period buffer.
- StageState childStatus = _root->work(out);
+ PlanStage* nextPlanRoot;
+ verify(StageBuilder::build(_txn, _collection, *solutions[ix], _ws, &nextPlanRoot));
- if (PlanStage::ADVANCED == childStatus) {
- _commonStats.advanced++;
- }
- else if (PlanStage::NEED_YIELD == childStatus) {
- _commonStats.needYield++;
- }
- else if (PlanStage::NEED_TIME == childStatus) {
- _commonStats.needTime++;
- }
-
- return childStatus;
+ // Takes ownership of 'solutions[ix]' and 'nextPlanRoot'.
+ multiPlanStage->addPlan(solutions.releaseAt(ix), nextPlanRoot, _ws);
}
- void CachedPlanStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- _root->saveState();
- }
+ // Delegate to the MultiPlanStage's plan selection facility.
+ return multiPlanStage->pickBestPlan(yieldPolicy);
+}
- void CachedPlanStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
+bool CachedPlanStage::isEOF() {
+ return _results.empty() && _root->isEOF();
+}
- ++_commonStats.unyields;
- _root->restoreState(opCtx);
- }
+PlanStage::StageState CachedPlanStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- void CachedPlanStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- _root->invalidate(txn, dl, type);
- ++_commonStats.invalidates;
-
- for (std::list<WorkingSetID>::iterator it = _results.begin(); it != _results.end(); ) {
- WorkingSetMember* member = _ws->get(*it);
- if (member->hasLoc() && member->loc == dl) {
- std::list<WorkingSetID>::iterator next = it;
- ++next;
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
- _results.erase(it);
- it = next;
- }
- else {
- ++it;
- }
- }
- }
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- std::vector<PlanStage*> CachedPlanStage::getChildren() const {
- return { _root.get() };
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
}
- PlanStageStats* CachedPlanStage::getStats() {
- _commonStats.isEOF = isEOF();
-
- std::unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_CACHED_PLAN));
- ret->specific.reset(new CachedPlanStats(_specificStats));
- ret->children.push_back(_root->getStats());
-
- return ret.release();
+ // First exhaust any results buffered during the trial period.
+ if (!_results.empty()) {
+ *out = _results.front();
+ _results.pop_front();
+ _commonStats.advanced++;
+ return PlanStage::ADVANCED;
}
- const CommonStats* CachedPlanStage::getCommonStats() const {
- return &_commonStats;
- }
+ // Nothing left in trial period buffer.
+ StageState childStatus = _root->work(out);
- const SpecificStats* CachedPlanStage::getSpecificStats() const {
- return &_specificStats;
+ if (PlanStage::ADVANCED == childStatus) {
+ _commonStats.advanced++;
+ } else if (PlanStage::NEED_YIELD == childStatus) {
+ _commonStats.needYield++;
+ } else if (PlanStage::NEED_TIME == childStatus) {
+ _commonStats.needTime++;
}
- void CachedPlanStage::updatePlanCache() {
- std::unique_ptr<PlanCacheEntryFeedback> feedback(new PlanCacheEntryFeedback());
- feedback->stats.reset(getStats());
- feedback->score = PlanRanker::scoreTree(feedback->stats.get());
-
- PlanCache* cache = _collection->infoCache()->getPlanCache();
- Status fbs = cache->feedback(*_canonicalQuery, feedback.release());
- if (!fbs.isOK()) {
- LOG(5) << _canonicalQuery->ns() << ": Failed to update cache with feedback: "
- << fbs.toString() << " - "
- << "(query: " << _canonicalQuery->getQueryObj()
- << "; sort: " << _canonicalQuery->getParsed().getSort()
- << "; projection: " << _canonicalQuery->getParsed().getProj()
- << ") is no longer in plan cache.";
+ return childStatus;
+}
+
+void CachedPlanStage::saveState() {
+ _txn = NULL;
+ ++_commonStats.yields;
+ _root->saveState();
+}
+
+void CachedPlanStage::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+
+ ++_commonStats.unyields;
+ _root->restoreState(opCtx);
+}
+
+void CachedPlanStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ _root->invalidate(txn, dl, type);
+ ++_commonStats.invalidates;
+
+ for (std::list<WorkingSetID>::iterator it = _results.begin(); it != _results.end();) {
+ WorkingSetMember* member = _ws->get(*it);
+ if (member->hasLoc() && member->loc == dl) {
+ std::list<WorkingSetID>::iterator next = it;
+ ++next;
+ WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ _results.erase(it);
+ it = next;
+ } else {
+ ++it;
}
}
+}
+
+std::vector<PlanStage*> CachedPlanStage::getChildren() const {
+ return {_root.get()};
+}
+
+PlanStageStats* CachedPlanStage::getStats() {
+ _commonStats.isEOF = isEOF();
+
+ std::unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_CACHED_PLAN));
+ ret->specific.reset(new CachedPlanStats(_specificStats));
+ ret->children.push_back(_root->getStats());
+
+ return ret.release();
+}
+
+const CommonStats* CachedPlanStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* CachedPlanStage::getSpecificStats() const {
+ return &_specificStats;
+}
+
+void CachedPlanStage::updatePlanCache() {
+ std::unique_ptr<PlanCacheEntryFeedback> feedback(new PlanCacheEntryFeedback());
+ feedback->stats.reset(getStats());
+ feedback->score = PlanRanker::scoreTree(feedback->stats.get());
+
+ PlanCache* cache = _collection->infoCache()->getPlanCache();
+ Status fbs = cache->feedback(*_canonicalQuery, feedback.release());
+ if (!fbs.isOK()) {
+ LOG(5) << _canonicalQuery->ns()
+ << ": Failed to update cache with feedback: " << fbs.toString() << " - "
+ << "(query: " << _canonicalQuery->getQueryObj()
+ << "; sort: " << _canonicalQuery->getParsed().getSort()
+ << "; projection: " << _canonicalQuery->getParsed().getProj()
+ << ") is no longer in plan cache.";
+ }
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index ccc82d9cc7d..937b18a8ad2 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -42,118 +42,120 @@
namespace mongo {
- class PlanYieldPolicy;
+class PlanYieldPolicy;
+
+/**
+ * This stage outputs its mainChild, and possibly its backup child
+ * and also updates the cache.
+ *
+ * Preconditions: Valid RecordId.
+ *
+ */
+class CachedPlanStage : public PlanStage {
+public:
+ CachedPlanStage(OperationContext* txn,
+ Collection* collection,
+ WorkingSet* ws,
+ CanonicalQuery* cq,
+ const QueryPlannerParams& params,
+ size_t decisionWorks,
+ PlanStage* root);
+
+ virtual bool isEOF();
+
+ virtual StageState work(WorkingSetID* out);
+
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+
+ virtual std::vector<PlanStage*> getChildren() const;
+
+ virtual StageType stageType() const {
+ return STAGE_CACHED_PLAN;
+ }
+
+ virtual PlanStageStats* getStats();
+
+ virtual const CommonStats* getCommonStats() const;
+
+ virtual const SpecificStats* getSpecificStats() const;
+
+ static const char* kStageType;
/**
- * This stage outputs its mainChild, and possibly its backup child
- * and also updates the cache.
+ * Runs the cached plan for a trial period, yielding during the trial period according to
+ * 'yieldPolicy'.
*
- * Preconditions: Valid RecordId.
+ * Feedback from the trial period is passed to the plan cache. If the performance is lower
+ * than expected, the old plan is evicted and a new plan is selected from scratch (again
+ * yielding according to 'yieldPolicy'). Otherwise, the cached plan is run.
+ */
+ Status pickBestPlan(PlanYieldPolicy* yieldPolicy);
+
+private:
+ /**
+ * Passes stats from the trial period run of the cached plan to the plan cache.
*
+ * If the plan cache entry is deleted before we get a chance to update it, then this
+ * is a no-op.
*/
- class CachedPlanStage : public PlanStage {
- public:
- CachedPlanStage(OperationContext* txn,
- Collection* collection,
- WorkingSet* ws,
- CanonicalQuery* cq,
- const QueryPlannerParams& params,
- size_t decisionWorks,
- PlanStage* root);
-
- virtual bool isEOF();
-
- virtual StageState work(WorkingSetID* out);
-
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
-
- virtual StageType stageType() const { return STAGE_CACHED_PLAN; }
-
- virtual PlanStageStats* getStats();
-
- virtual const CommonStats* getCommonStats() const;
-
- virtual const SpecificStats* getSpecificStats() const;
-
- static const char* kStageType;
-
- /**
- * Runs the cached plan for a trial period, yielding during the trial period according to
- * 'yieldPolicy'.
- *
- * Feedback from the trial period is passed to the plan cache. If the performance is lower
- * than expected, the old plan is evicted and a new plan is selected from scratch (again
- * yielding according to 'yieldPolicy'). Otherwise, the cached plan is run.
- */
- Status pickBestPlan(PlanYieldPolicy* yieldPolicy);
-
- private:
- /**
- * Passes stats from the trial period run of the cached plan to the plan cache.
- *
- * If the plan cache entry is deleted before we get a chance to update it, then this
- * is a no-op.
- */
- void updatePlanCache();
-
- /**
- * Uses the QueryPlanner and the MultiPlanStage to re-generate candidate plans for this
- * query and select a new winner.
- *
- * We fallback to a new plan if updatePlanCache() tells us that the performance was worse
- * than anticipated during the trial period.
- *
- * We only write the result of re-planning to the plan cache if 'shouldCache' is true.
- */
- Status replan(PlanYieldPolicy* yieldPolicy, bool shouldCache);
-
- /**
- * May yield during the cached plan stage's trial period or replanning phases.
- *
- * Returns a non-OK status if the plan was killed during a yield.
- */
- Status tryYield(PlanYieldPolicy* yieldPolicy);
-
- // Not owned.
- OperationContext* _txn;
-
- // Not owned. Must be non-null.
- Collection* _collection;
-
- // Not owned.
- WorkingSet* _ws;
-
- // Not owned.
- CanonicalQuery* _canonicalQuery;
-
- QueryPlannerParams _plannerParams;
-
- // The number of work cycles taken to decide on a winning plan when the plan was first
- // cached.
- size_t _decisionWorks;
-
- // If we fall back to re-planning the query, and there is just one resulting query solution,
- // that solution is owned here.
- std::unique_ptr<QuerySolution> _replannedQs;
-
- std::unique_ptr<PlanStage> _root;
-
- // Any results produced during trial period execution are kept here.
- std::list<WorkingSetID> _results;
-
- // When a stage requests a yield for document fetch, it gives us back a RecordFetcher*
- // to use to pull the record into memory. We take ownership of the RecordFetcher here,
- // deleting it after we've had a chance to do the fetch. For timing-based yields, we
- // just pass a NULL fetcher.
- std::unique_ptr<RecordFetcher> _fetcher;
-
- // Stats
- CommonStats _commonStats;
- CachedPlanStats _specificStats;
- };
+ void updatePlanCache();
+
+ /**
+ * Uses the QueryPlanner and the MultiPlanStage to re-generate candidate plans for this
+ * query and select a new winner.
+ *
+ * We fallback to a new plan if updatePlanCache() tells us that the performance was worse
+ * than anticipated during the trial period.
+ *
+ * We only write the result of re-planning to the plan cache if 'shouldCache' is true.
+ */
+ Status replan(PlanYieldPolicy* yieldPolicy, bool shouldCache);
+
+ /**
+ * May yield during the cached plan stage's trial period or replanning phases.
+ *
+ * Returns a non-OK status if the plan was killed during a yield.
+ */
+ Status tryYield(PlanYieldPolicy* yieldPolicy);
+
+ // Not owned.
+ OperationContext* _txn;
+
+ // Not owned. Must be non-null.
+ Collection* _collection;
+
+ // Not owned.
+ WorkingSet* _ws;
+
+ // Not owned.
+ CanonicalQuery* _canonicalQuery;
+
+ QueryPlannerParams _plannerParams;
+
+ // The number of work cycles taken to decide on a winning plan when the plan was first
+ // cached.
+ size_t _decisionWorks;
+
+ // If we fall back to re-planning the query, and there is just one resulting query solution,
+ // that solution is owned here.
+ std::unique_ptr<QuerySolution> _replannedQs;
+
+ std::unique_ptr<PlanStage> _root;
+
+ // Any results produced during trial period execution are kept here.
+ std::list<WorkingSetID> _results;
+
+ // When a stage requests a yield for document fetch, it gives us back a RecordFetcher*
+ // to use to pull the record into memory. We take ownership of the RecordFetcher here,
+ // deleting it after we've had a chance to do the fetch. For timing-based yields, we
+ // just pass a NULL fetcher.
+ std::unique_ptr<RecordFetcher> _fetcher;
+
+ // Stats
+ CommonStats _commonStats;
+ CachedPlanStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index 1a0c16c6b55..f0e09f31629 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -42,225 +42,221 @@
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
-#include "mongo/db/client.h" // XXX-ERH
+#include "mongo/db/client.h" // XXX-ERH
namespace mongo {
- using std::unique_ptr;
- using std::vector;
-
- // static
- const char* CollectionScan::kStageType = "COLLSCAN";
-
- CollectionScan::CollectionScan(OperationContext* txn,
- const CollectionScanParams& params,
- WorkingSet* workingSet,
- const MatchExpression* filter)
- : _txn(txn),
- _workingSet(workingSet),
- _filter(filter),
- _params(params),
- _isDead(false),
- _wsidForFetch(_workingSet->allocate()),
- _commonStats(kStageType) {
- // Explain reports the direction of the collection scan.
- _specificStats.direction = params.direction;
-
- // We pre-allocate a WSM and use it to pass up fetch requests. This should never be used
- // for anything other than passing up NEED_YIELD. We use the loc and owned obj state, but
- // the loc isn't really pointing at any obj. The obj field of the WSM should never be used.
- WorkingSetMember* member = _workingSet->get(_wsidForFetch);
- member->state = WorkingSetMember::LOC_AND_OWNED_OBJ;
+using std::unique_ptr;
+using std::vector;
+
+// static
+const char* CollectionScan::kStageType = "COLLSCAN";
+
+CollectionScan::CollectionScan(OperationContext* txn,
+ const CollectionScanParams& params,
+ WorkingSet* workingSet,
+ const MatchExpression* filter)
+ : _txn(txn),
+ _workingSet(workingSet),
+ _filter(filter),
+ _params(params),
+ _isDead(false),
+ _wsidForFetch(_workingSet->allocate()),
+ _commonStats(kStageType) {
+ // Explain reports the direction of the collection scan.
+ _specificStats.direction = params.direction;
+
+ // We pre-allocate a WSM and use it to pass up fetch requests. This should never be used
+ // for anything other than passing up NEED_YIELD. We use the loc and owned obj state, but
+ // the loc isn't really pointing at any obj. The obj field of the WSM should never be used.
+ WorkingSetMember* member = _workingSet->get(_wsidForFetch);
+ member->state = WorkingSetMember::LOC_AND_OWNED_OBJ;
+}
+
+PlanStage::StageState CollectionScan::work(WorkingSetID* out) {
+ ++_commonStats.works;
+
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+
+ if (_isDead) {
+ Status status(ErrorCodes::InternalError, "CollectionScan died");
+ *out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
+ return PlanStage::DEAD;
}
- PlanStage::StageState CollectionScan::work(WorkingSetID* out) {
- ++_commonStats.works;
-
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
-
- if (_isDead) {
- Status status(ErrorCodes::InternalError, "CollectionScan died");
- *out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
- return PlanStage::DEAD;
- }
+ if ((0 != _params.maxScan) && (_specificStats.docsTested >= _params.maxScan)) {
+ _commonStats.isEOF = true;
+ }
- if ((0 != _params.maxScan) && (_specificStats.docsTested >= _params.maxScan)) {
- _commonStats.isEOF = true;
- }
+ if (_commonStats.isEOF) {
+ return PlanStage::IS_EOF;
+ }
- if (_commonStats.isEOF) { return PlanStage::IS_EOF; }
-
- boost::optional<Record> record;
- const bool needToMakeCursor = !_cursor;
- try {
- if (needToMakeCursor) {
- const bool forward = _params.direction == CollectionScanParams::FORWARD;
- _cursor = _params.collection->getCursor(_txn, forward);
-
- if (!_lastSeenId.isNull()) {
- invariant(_params.tailable);
- // Seek to where we were last time. If it no longer exists, mark us as dead
- // since we want to signal an error rather than silently dropping data from the
- // stream. This is related to the _lastSeenId handling in invalidate. Note that
- // we want to return the record *after* this one since we have already returned
- // this one. This is only possible in the tailing case because that is the only
- // time we'd need to create a cursor after already getting a record out of it.
- if (!_cursor->seekExact(_lastSeenId)) {
- _isDead = true;
- Status status(ErrorCodes::InternalError,
- "CollectionScan died: Unexpected RecordId");
- *out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
- return PlanStage::DEAD;
- }
+ boost::optional<Record> record;
+ const bool needToMakeCursor = !_cursor;
+ try {
+ if (needToMakeCursor) {
+ const bool forward = _params.direction == CollectionScanParams::FORWARD;
+ _cursor = _params.collection->getCursor(_txn, forward);
+
+ if (!_lastSeenId.isNull()) {
+ invariant(_params.tailable);
+ // Seek to where we were last time. If it no longer exists, mark us as dead
+ // since we want to signal an error rather than silently dropping data from the
+ // stream. This is related to the _lastSeenId handling in invalidate. Note that
+ // we want to return the record *after* this one since we have already returned
+ // this one. This is only possible in the tailing case because that is the only
+ // time we'd need to create a cursor after already getting a record out of it.
+ if (!_cursor->seekExact(_lastSeenId)) {
+ _isDead = true;
+ Status status(ErrorCodes::InternalError,
+ "CollectionScan died: Unexpected RecordId");
+ *out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
+ return PlanStage::DEAD;
}
-
- _commonStats.needTime++;
- return PlanStage::NEED_TIME;
- }
-
- if (_lastSeenId.isNull() && !_params.start.isNull()) {
- record = _cursor->seekExact(_params.start);
}
- else {
- // See if the record we're about to access is in memory. If not, pass a fetch
- // request up.
- if (auto fetcher = _cursor->fetcherForNext()) {
- // Pass the RecordFetcher up.
- WorkingSetMember* member = _workingSet->get(_wsidForFetch);
- member->setFetcher(fetcher.release());
- *out = _wsidForFetch;
- _commonStats.needYield++;
- return PlanStage::NEED_YIELD;
- }
- record = _cursor->next();
- }
- }
- catch (const WriteConflictException& wce) {
- // Leave us in a state to try again next time.
- if (needToMakeCursor)
- _cursor.reset();
- *out = WorkingSet::INVALID_ID;
- return PlanStage::NEED_YIELD;
+ _commonStats.needTime++;
+ return PlanStage::NEED_TIME;
}
- if (!record) {
- // We just hit EOF. If we are tailable and have already returned data, leave us in a
- // state to pick up where we left off on the next call to work(). Otherwise EOF is
- // permanent.
- if (_params.tailable && !_lastSeenId.isNull()) {
- _cursor.reset();
- }
- else {
- _commonStats.isEOF = true;
+ if (_lastSeenId.isNull() && !_params.start.isNull()) {
+ record = _cursor->seekExact(_params.start);
+ } else {
+ // See if the record we're about to access is in memory. If not, pass a fetch
+ // request up.
+ if (auto fetcher = _cursor->fetcherForNext()) {
+ // Pass the RecordFetcher up.
+ WorkingSetMember* member = _workingSet->get(_wsidForFetch);
+ member->setFetcher(fetcher.release());
+ *out = _wsidForFetch;
+ _commonStats.needYield++;
+ return PlanStage::NEED_YIELD;
}
-
- return PlanStage::IS_EOF;
- }
-
- _lastSeenId = record->id;
-
- WorkingSetID id = _workingSet->allocate();
- WorkingSetMember* member = _workingSet->get(id);
- member->loc = record->id;
- member->obj = {_txn->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
- member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
- return returnIfMatches(member, id, out);
+ record = _cursor->next();
+ }
+ } catch (const WriteConflictException& wce) {
+ // Leave us in a state to try again next time.
+ if (needToMakeCursor)
+ _cursor.reset();
+ *out = WorkingSet::INVALID_ID;
+ return PlanStage::NEED_YIELD;
}
- PlanStage::StageState CollectionScan::returnIfMatches(WorkingSetMember* member,
- WorkingSetID memberID,
- WorkingSetID* out) {
- ++_specificStats.docsTested;
-
- if (Filter::passes(member, _filter)) {
- *out = memberID;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
- else {
- _workingSet->free(memberID);
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
+ if (!record) {
+ // We just hit EOF. If we are tailable and have already returned data, leave us in a
+ // state to pick up where we left off on the next call to work(). Otherwise EOF is
+ // permanent.
+ if (_params.tailable && !_lastSeenId.isNull()) {
+ _cursor.reset();
+ } else {
+ _commonStats.isEOF = true;
}
+
+ return PlanStage::IS_EOF;
}
- bool CollectionScan::isEOF() {
- return _commonStats.isEOF || _isDead;
+ _lastSeenId = record->id;
+
+ WorkingSetID id = _workingSet->allocate();
+ WorkingSetMember* member = _workingSet->get(id);
+ member->loc = record->id;
+ member->obj = {_txn->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
+ member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
+
+ return returnIfMatches(member, id, out);
+}
+
+PlanStage::StageState CollectionScan::returnIfMatches(WorkingSetMember* member,
+ WorkingSetID memberID,
+ WorkingSetID* out) {
+ ++_specificStats.docsTested;
+
+ if (Filter::passes(member, _filter)) {
+ *out = memberID;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
+ } else {
+ _workingSet->free(memberID);
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
+}
- void CollectionScan::invalidate(OperationContext* txn,
- const RecordId& id,
- InvalidationType type) {
- ++_commonStats.invalidates;
+bool CollectionScan::isEOF() {
+ return _commonStats.isEOF || _isDead;
+}
- // We don't care about mutations since we apply any filters to the result when we (possibly)
- // return it.
- if (INVALIDATION_DELETION != type) {
- return;
- }
+void CollectionScan::invalidate(OperationContext* txn, const RecordId& id, InvalidationType type) {
+ ++_commonStats.invalidates;
- // If we're here, 'id' is being deleted.
+ // We don't care about mutations since we apply any filters to the result when we (possibly)
+ // return it.
+ if (INVALIDATION_DELETION != type) {
+ return;
+ }
- // Deletions can harm the underlying RecordCursor so we must pass them down.
- if (_cursor) {
- _cursor->invalidate(id);
- }
+ // If we're here, 'id' is being deleted.
- if (_params.tailable && id == _lastSeenId) {
- // This means that deletes have caught up to the reader. We want to error in this case
- // so readers don't miss potentially important data.
- _isDead = true;
- }
+ // Deletions can harm the underlying RecordCursor so we must pass them down.
+ if (_cursor) {
+ _cursor->invalidate(id);
}
- void CollectionScan::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- if (_cursor) {
- _cursor->savePositioned();
- }
+ if (_params.tailable && id == _lastSeenId) {
+ // This means that deletes have caught up to the reader. We want to error in this case
+ // so readers don't miss potentially important data.
+ _isDead = true;
}
+}
- void CollectionScan::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_commonStats.unyields;
- if (_cursor) {
- if (!_cursor->restore(opCtx)) {
- warning() << "Collection dropped or state deleted during yield of CollectionScan: "
- << opCtx->getNS();
- _isDead = true;
- }
+void CollectionScan::saveState() {
+ _txn = NULL;
+ ++_commonStats.yields;
+ if (_cursor) {
+ _cursor->savePositioned();
+ }
+}
+
+void CollectionScan::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_commonStats.unyields;
+ if (_cursor) {
+ if (!_cursor->restore(opCtx)) {
+ warning() << "Collection dropped or state deleted during yield of CollectionScan: "
+ << opCtx->getNS();
+ _isDead = true;
}
}
-
- vector<PlanStage*> CollectionScan::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
+}
+
+vector<PlanStage*> CollectionScan::getChildren() const {
+ vector<PlanStage*> empty;
+ return empty;
+}
+
+PlanStageStats* CollectionScan::getStats() {
+ // Add a BSON representation of the filter to the stats tree, if there is one.
+ if (NULL != _filter) {
+ BSONObjBuilder bob;
+ _filter->toBSON(&bob);
+ _commonStats.filter = bob.obj();
}
- PlanStageStats* CollectionScan::getStats() {
- // Add a BSON representation of the filter to the stats tree, if there is one.
- if (NULL != _filter) {
- BSONObjBuilder bob;
- _filter->toBSON(&bob);
- _commonStats.filter = bob.obj();
- }
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_COLLSCAN));
+ ret->specific.reset(new CollectionScanStats(_specificStats));
+ return ret.release();
+}
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_COLLSCAN));
- ret->specific.reset(new CollectionScanStats(_specificStats));
- return ret.release();
- }
-
- const CommonStats* CollectionScan::getCommonStats() const {
- return &_commonStats;
- }
+const CommonStats* CollectionScan::getCommonStats() const {
+ return &_commonStats;
+}
- const SpecificStats* CollectionScan::getSpecificStats() const {
- return &_specificStats;
- }
+const SpecificStats* CollectionScan::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h
index ec19c5e22ee..ec3ffe63bea 100644
--- a/src/mongo/db/exec/collection_scan.h
+++ b/src/mongo/db/exec/collection_scan.h
@@ -37,75 +37,75 @@
namespace mongo {
- class RecordCursor;
- class WorkingSet;
- class OperationContext;
+class RecordCursor;
+class WorkingSet;
+class OperationContext;
- /**
- * Scans over a collection, starting at the RecordId provided in params and continuing until
- * there are no more records in the collection.
- *
- * Preconditions: Valid RecordId.
- */
- class CollectionScan : public PlanStage {
- public:
- CollectionScan(OperationContext* txn,
- const CollectionScanParams& params,
- WorkingSet* workingSet,
- const MatchExpression* filter);
+/**
+ * Scans over a collection, starting at the RecordId provided in params and continuing until
+ * there are no more records in the collection.
+ *
+ * Preconditions: Valid RecordId.
+ */
+class CollectionScan : public PlanStage {
+public:
+ CollectionScan(OperationContext* txn,
+ const CollectionScanParams& params,
+ WorkingSet* workingSet,
+ const MatchExpression* filter);
- virtual StageState work(WorkingSetID* out);
- virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_COLLSCAN; }
+ virtual StageType stageType() const {
+ return STAGE_COLLSCAN;
+ }
- virtual PlanStageStats* getStats();
+ virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- /**
- * If the member (with id memberID) passes our filter, set *out to memberID and return that
- * ADVANCED. Otherwise, free memberID and return NEED_TIME.
- */
- StageState returnIfMatches(WorkingSetMember* member,
- WorkingSetID memberID,
- WorkingSetID* out);
+private:
+ /**
+ * If the member (with id memberID) passes our filter, set *out to memberID and return that
+ * ADVANCED. Otherwise, free memberID and return NEED_TIME.
+ */
+ StageState returnIfMatches(WorkingSetMember* member, WorkingSetID memberID, WorkingSetID* out);
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
- // WorkingSet is not owned by us.
- WorkingSet* _workingSet;
+ // WorkingSet is not owned by us.
+ WorkingSet* _workingSet;
- // The filter is not owned by us.
- const MatchExpression* _filter;
+ // The filter is not owned by us.
+ const MatchExpression* _filter;
- std::unique_ptr<RecordCursor> _cursor;
+ std::unique_ptr<RecordCursor> _cursor;
- CollectionScanParams _params;
+ CollectionScanParams _params;
- bool _isDead;
+ bool _isDead;
- RecordId _lastSeenId; // Null if nothing has been returned from _cursor yet.
+ RecordId _lastSeenId; // Null if nothing has been returned from _cursor yet.
- // We allocate a working set member with this id on construction of the stage. It gets
- // used for all fetch requests, changing the RecordId as appropriate.
- const WorkingSetID _wsidForFetch;
+ // We allocate a working set member with this id on construction of the stage. It gets
+ // used for all fetch requests, changing the RecordId as appropriate.
+ const WorkingSetID _wsidForFetch;
- // Stats
- CommonStats _commonStats;
- CollectionScanStats _specificStats;
- };
+ // Stats
+ CommonStats _commonStats;
+ CollectionScanStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/collection_scan_common.h b/src/mongo/db/exec/collection_scan_common.h
index 9b327598fa5..f5766e0a0d6 100644
--- a/src/mongo/db/exec/collection_scan_common.h
+++ b/src/mongo/db/exec/collection_scan_common.h
@@ -32,35 +32,32 @@
namespace mongo {
- class Collection;
+class Collection;
- struct CollectionScanParams {
- enum Direction {
- FORWARD = 1,
- BACKWARD = -1,
- };
+struct CollectionScanParams {
+ enum Direction {
+ FORWARD = 1,
+ BACKWARD = -1,
+ };
- CollectionScanParams() : collection(NULL),
- start(RecordId()),
- direction(FORWARD),
- tailable(false),
- maxScan(0) { }
+ CollectionScanParams()
+ : collection(NULL), start(RecordId()), direction(FORWARD), tailable(false), maxScan(0) {}
- // What collection?
- // not owned
- const Collection* collection;
+ // What collection?
+ // not owned
+ const Collection* collection;
- // isNull by default. If you specify any value for this, you're responsible for the RecordId
- // not being invalidated before the first call to work(...).
- RecordId start;
+ // isNull by default. If you specify any value for this, you're responsible for the RecordId
+ // not being invalidated before the first call to work(...).
+ RecordId start;
- Direction direction;
+ Direction direction;
- // Do we want the scan to be 'tailable'? Only meaningful if the collection is capped.
- bool tailable;
+ // Do we want the scan to be 'tailable'? Only meaningful if the collection is capped.
+ bool tailable;
- // If non-zero, how many documents will we look at?
- size_t maxScan;
- };
+ // If non-zero, how many documents will we look at?
+ size_t maxScan;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp
index 7534f7e1e44..092c36dfc03 100644
--- a/src/mongo/db/exec/count.cpp
+++ b/src/mongo/db/exec/count.cpp
@@ -36,185 +36,180 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
-
- // static
- const char* CountStage::kStageType = "COUNT";
-
- CountStage::CountStage(OperationContext* txn,
- Collection* collection,
- const CountRequest& request,
- WorkingSet* ws,
- PlanStage* child)
- : _txn(txn),
- _collection(collection),
- _request(request),
- _leftToSkip(request.getSkip()),
- _ws(ws),
- _child(child),
- _commonStats(kStageType) { }
-
- CountStage::~CountStage() { }
-
- bool CountStage::isEOF() {
- if (_specificStats.trivialCount) {
- return true;
- }
-
- if (_request.getLimit() > 0 && _specificStats.nCounted >= _request.getLimit()) {
- return true;
- }
-
- return NULL != _child.get() && _child->isEOF();
+using std::unique_ptr;
+using std::vector;
+
+// static
+const char* CountStage::kStageType = "COUNT";
+
+CountStage::CountStage(OperationContext* txn,
+ Collection* collection,
+ const CountRequest& request,
+ WorkingSet* ws,
+ PlanStage* child)
+ : _txn(txn),
+ _collection(collection),
+ _request(request),
+ _leftToSkip(request.getSkip()),
+ _ws(ws),
+ _child(child),
+ _commonStats(kStageType) {}
+
+CountStage::~CountStage() {}
+
+bool CountStage::isEOF() {
+ if (_specificStats.trivialCount) {
+ return true;
}
- void CountStage::trivialCount() {
- invariant(_collection);
- long long nCounted = _collection->numRecords(_txn);
+ if (_request.getLimit() > 0 && _specificStats.nCounted >= _request.getLimit()) {
+ return true;
+ }
- if (0 != _request.getSkip()) {
- nCounted -= _request.getSkip();
- if (nCounted < 0) {
- nCounted = 0;
- }
- }
+ return NULL != _child.get() && _child->isEOF();
+}
- long long limit = _request.getLimit();
- if (limit < 0) {
- limit = -limit;
- }
+void CountStage::trivialCount() {
+ invariant(_collection);
+ long long nCounted = _collection->numRecords(_txn);
- if (limit < nCounted && 0 != limit) {
- nCounted = limit;
+ if (0 != _request.getSkip()) {
+ nCounted -= _request.getSkip();
+ if (nCounted < 0) {
+ nCounted = 0;
}
+ }
- _specificStats.nCounted = nCounted;
- _specificStats.nSkipped = _request.getSkip();
- _specificStats.trivialCount = true;
+ long long limit = _request.getLimit();
+ if (limit < 0) {
+ limit = -limit;
}
- PlanStage::StageState CountStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+ if (limit < nCounted && 0 != limit) {
+ nCounted = limit;
+ }
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+ _specificStats.nCounted = nCounted;
+ _specificStats.nSkipped = _request.getSkip();
+ _specificStats.trivialCount = true;
+}
- // This stage never returns a working set member.
- *out = WorkingSet::INVALID_ID;
+PlanStage::StageState CountStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- // If we don't have a query and we have a non-NULL collection, then we can execute this
- // as a trivial count (just ask the collection for how many records it has).
- if (_request.getQuery().isEmpty() && NULL != _collection) {
- trivialCount();
- return PlanStage::IS_EOF;
- }
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- if (isEOF()) {
- _commonStats.isEOF = true;
- return PlanStage::IS_EOF;
- }
-
- // For non-trivial counts, we should always have a child stage from which we can retrieve
- // results.
- invariant(_child.get());
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = _child->work(&id);
-
- if (PlanStage::IS_EOF == state) {
- _commonStats.isEOF = true;
- return PlanStage::IS_EOF;
- }
- else if (PlanStage::DEAD == state) {
- return state;
- }
- else if (PlanStage::FAILURE == state || PlanStage::DEAD == state) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it failed, in which
- // case 'id' is valid. If ID is invalid, we create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- const std::string errmsg = "count stage failed to read result from child";
- Status status = Status(ErrorCodes::InternalError, errmsg);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- }
- return state;
- }
- else if (PlanStage::ADVANCED == state) {
- // We got a result. If we're still skipping, then decrement the number left to skip.
- // Otherwise increment the count until we hit the limit.
- if (_leftToSkip > 0) {
- _leftToSkip--;
- _specificStats.nSkipped++;
- }
- else {
- _specificStats.nCounted++;
- }
-
- // Count doesn't need the actual results, so we just discard any valid working
- // set members that got returned from the child.
- if (WorkingSet::INVALID_ID != id) {
- _ws->free(id);
- }
- }
- else if (PlanStage::NEED_YIELD == state) {
- *out = id;
- _commonStats.needYield++;
- return PlanStage::NEED_YIELD;
- }
+ // This stage never returns a working set member.
+ *out = WorkingSet::INVALID_ID;
- _commonStats.needTime++;
- return PlanStage::NEED_TIME;
+ // If we don't have a query and we have a non-NULL collection, then we can execute this
+ // as a trivial count (just ask the collection for how many records it has).
+ if (_request.getQuery().isEmpty() && NULL != _collection) {
+ trivialCount();
+ return PlanStage::IS_EOF;
}
- void CountStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- if (_child.get()) {
- _child->saveState();
- }
+ if (isEOF()) {
+ _commonStats.isEOF = true;
+ return PlanStage::IS_EOF;
}
- void CountStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_commonStats.unyields;
- if (_child.get()) {
- _child->restoreState(opCtx);
- }
+ // For non-trivial counts, we should always have a child stage from which we can retrieve
+ // results.
+ invariant(_child.get());
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = _child->work(&id);
+
+ if (PlanStage::IS_EOF == state) {
+ _commonStats.isEOF = true;
+ return PlanStage::IS_EOF;
+ } else if (PlanStage::DEAD == state) {
+ return state;
+ } else if (PlanStage::FAILURE == state || PlanStage::DEAD == state) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it failed, in which
+ // case 'id' is valid. If ID is invalid, we create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ const std::string errmsg = "count stage failed to read result from child";
+ Status status = Status(ErrorCodes::InternalError, errmsg);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ }
+ return state;
+ } else if (PlanStage::ADVANCED == state) {
+ // We got a result. If we're still skipping, then decrement the number left to skip.
+ // Otherwise increment the count until we hit the limit.
+ if (_leftToSkip > 0) {
+ _leftToSkip--;
+ _specificStats.nSkipped++;
+ } else {
+ _specificStats.nCounted++;
+ }
+
+ // Count doesn't need the actual results, so we just discard any valid working
+ // set members that got returned from the child.
+ if (WorkingSet::INVALID_ID != id) {
+ _ws->free(id);
+ }
+ } else if (PlanStage::NEED_YIELD == state) {
+ *out = id;
+ _commonStats.needYield++;
+ return PlanStage::NEED_YIELD;
}
- void CountStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- if (_child.get()) {
- _child->invalidate(txn, dl, type);
- }
- }
+ _commonStats.needTime++;
+ return PlanStage::NEED_TIME;
+}
- vector<PlanStage*> CountStage::getChildren() const {
- vector<PlanStage*> children;
- if (_child.get()) {
- children.push_back(_child.get());
- }
- return children;
+void CountStage::saveState() {
+ _txn = NULL;
+ ++_commonStats.yields;
+ if (_child.get()) {
+ _child->saveState();
}
-
- PlanStageStats* CountStage::getStats() {
- _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_COUNT));
- CountStats* countStats = new CountStats(_specificStats);
- ret->specific.reset(countStats);
- if (_child.get()) {
- ret->children.push_back(_child->getStats());
- }
- return ret.release();
+}
+
+void CountStage::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_commonStats.unyields;
+ if (_child.get()) {
+ _child->restoreState(opCtx);
}
+}
- const CommonStats* CountStage::getCommonStats() const {
- return &_commonStats;
+void CountStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+ if (_child.get()) {
+ _child->invalidate(txn, dl, type);
}
+}
- const SpecificStats* CountStage::getSpecificStats() const {
- return &_specificStats;
+vector<PlanStage*> CountStage::getChildren() const {
+ vector<PlanStage*> children;
+ if (_child.get()) {
+ children.push_back(_child.get());
}
+ return children;
+}
+
+PlanStageStats* CountStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_COUNT));
+ CountStats* countStats = new CountStats(_specificStats);
+ ret->specific.reset(countStats);
+ if (_child.get()) {
+ ret->children.push_back(_child->getStats());
+ }
+ return ret.release();
+}
+
+const CommonStats* CountStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* CountStage::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/count.h b/src/mongo/db/exec/count.h
index 05bce99ac66..6f5a5f4e203 100644
--- a/src/mongo/db/exec/count.h
+++ b/src/mongo/db/exec/count.h
@@ -34,72 +34,74 @@
namespace mongo {
- /**
- * Stage used by the count command. This stage sits at the root of a plan tree
- * and counts the number of results returned by its child stage.
- *
- * This should not be confused with the CountScan stage. CountScan is a special
- * index access stage which can optimize index access for count operations in
- * some cases. On the other hand, *every* count op has a CountStage at its root.
- *
- * Only returns NEED_TIME until hitting EOF. The count result can be obtained by examining
- * the specific stats.
- */
- class CountStage : public PlanStage {
- public:
- CountStage(OperationContext* txn,
- Collection* collection,
- const CountRequest& request,
- WorkingSet* ws,
- PlanStage* child);
+/**
+ * Stage used by the count command. This stage sits at the root of a plan tree
+ * and counts the number of results returned by its child stage.
+ *
+ * This should not be confused with the CountScan stage. CountScan is a special
+ * index access stage which can optimize index access for count operations in
+ * some cases. On the other hand, *every* count op has a CountStage at its root.
+ *
+ * Only returns NEED_TIME until hitting EOF. The count result can be obtained by examining
+ * the specific stats.
+ */
+class CountStage : public PlanStage {
+public:
+ CountStage(OperationContext* txn,
+ Collection* collection,
+ const CountRequest& request,
+ WorkingSet* ws,
+ PlanStage* child);
- virtual ~CountStage();
+ virtual ~CountStage();
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_COUNT; }
+ virtual StageType stageType() const {
+ return STAGE_COUNT;
+ }
- PlanStageStats* getStats();
+ PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- /**
- * Computes the count in the case of an empty query, applying the skip and
- * limit if necessary. The result is stored in '_specificStats'.
- */
- void trivialCount();
+private:
+ /**
+ * Computes the count in the case of an empty query, applying the skip and
+ * limit if necessary. The result is stored in '_specificStats'.
+ */
+ void trivialCount();
- // Transactional context for read locks. Not owned by us.
- OperationContext* _txn;
+ // Transactional context for read locks. Not owned by us.
+ OperationContext* _txn;
- // The collection over which we are counting.
- Collection* _collection;
+ // The collection over which we are counting.
+ Collection* _collection;
- CountRequest _request;
+ CountRequest _request;
- // The number of documents that we still need to skip.
- long long _leftToSkip;
+ // The number of documents that we still need to skip.
+ long long _leftToSkip;
- // The working set used to pass intermediate results between stages. Not owned
- // by us.
- WorkingSet* _ws;
+ // The working set used to pass intermediate results between stages. Not owned
+ // by us.
+ WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
+ std::unique_ptr<PlanStage> _child;
- CommonStats _commonStats;
- CountStats _specificStats;
- };
+ CommonStats _commonStats;
+ CountStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp
index c002e72e9ca..23499102147 100644
--- a/src/mongo/db/exec/count_scan.cpp
+++ b/src/mongo/db/exec/count_scan.cpp
@@ -34,149 +34,148 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
-
- // static
- const char* CountScan::kStageType = "COUNT_SCAN";
-
- CountScan::CountScan(OperationContext* txn,
- const CountScanParams& params,
- WorkingSet* workingSet)
- : _txn(txn),
- _workingSet(workingSet),
- _descriptor(params.descriptor),
- _iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
- _shouldDedup(params.descriptor->isMultikey(txn)),
- _params(params),
- _commonStats(kStageType) {
- _specificStats.keyPattern = _params.descriptor->keyPattern();
- _specificStats.indexName = _params.descriptor->indexName();
- _specificStats.isMultiKey = _params.descriptor->isMultikey(txn);
- _specificStats.isUnique = _params.descriptor->unique();
- _specificStats.isSparse = _params.descriptor->isSparse();
- _specificStats.isPartial = _params.descriptor->isPartial();
- _specificStats.indexVersion = _params.descriptor->version();
-
- // endKey must be after startKey in index order since we only do forward scans.
- dassert(_params.startKey.woCompare(_params.endKey,
- Ordering::make(params.descriptor->keyPattern()),
- /*compareFieldNames*/false) <= 0);
- }
-
-
- PlanStage::StageState CountScan::work(WorkingSetID* out) {
- ++_commonStats.works;
- if (_commonStats.isEOF) return PlanStage::IS_EOF;
-
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
-
- boost::optional<IndexKeyEntry> entry;
- const bool needInit = !_cursor;
- try {
- // We don't care about the keys.
- const auto kWantLoc = SortedDataInterface::Cursor::kWantLoc;
-
- if (needInit) {
- // First call to work(). Perform cursor init.
- _cursor = _iam->newCursor(_txn);
- _cursor->setEndPosition(_params.endKey, _params.endKeyInclusive);
-
- entry = _cursor->seek(_params.startKey, _params.startKeyInclusive, kWantLoc);
- }
- else {
- entry = _cursor->next(kWantLoc);
- }
- }
- catch (const WriteConflictException& wce) {
- if (needInit) {
- // Release our cursor and try again next time.
- _cursor.reset();
- }
- *out = WorkingSet::INVALID_ID;
- return PlanStage::NEED_YIELD;
+using std::unique_ptr;
+using std::vector;
+
+// static
+const char* CountScan::kStageType = "COUNT_SCAN";
+
+CountScan::CountScan(OperationContext* txn, const CountScanParams& params, WorkingSet* workingSet)
+ : _txn(txn),
+ _workingSet(workingSet),
+ _descriptor(params.descriptor),
+ _iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
+ _shouldDedup(params.descriptor->isMultikey(txn)),
+ _params(params),
+ _commonStats(kStageType) {
+ _specificStats.keyPattern = _params.descriptor->keyPattern();
+ _specificStats.indexName = _params.descriptor->indexName();
+ _specificStats.isMultiKey = _params.descriptor->isMultikey(txn);
+ _specificStats.isUnique = _params.descriptor->unique();
+ _specificStats.isSparse = _params.descriptor->isSparse();
+ _specificStats.isPartial = _params.descriptor->isPartial();
+ _specificStats.indexVersion = _params.descriptor->version();
+
+ // endKey must be after startKey in index order since we only do forward scans.
+ dassert(_params.startKey.woCompare(_params.endKey,
+ Ordering::make(params.descriptor->keyPattern()),
+ /*compareFieldNames*/ false) <= 0);
+}
+
+
+PlanStage::StageState CountScan::work(WorkingSetID* out) {
+ ++_commonStats.works;
+ if (_commonStats.isEOF)
+ return PlanStage::IS_EOF;
+
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+
+ boost::optional<IndexKeyEntry> entry;
+ const bool needInit = !_cursor;
+ try {
+ // We don't care about the keys.
+ const auto kWantLoc = SortedDataInterface::Cursor::kWantLoc;
+
+ if (needInit) {
+ // First call to work(). Perform cursor init.
+ _cursor = _iam->newCursor(_txn);
+ _cursor->setEndPosition(_params.endKey, _params.endKeyInclusive);
+
+ entry = _cursor->seek(_params.startKey, _params.startKeyInclusive, kWantLoc);
+ } else {
+ entry = _cursor->next(kWantLoc);
}
-
- ++_specificStats.keysExamined;
-
- if (!entry) {
- _commonStats.isEOF = true;
+ } catch (const WriteConflictException& wce) {
+ if (needInit) {
+ // Release our cursor and try again next time.
_cursor.reset();
- return PlanStage::IS_EOF;
- }
-
- if (_shouldDedup && !_returned.insert(entry->loc).second) {
- // *loc was already in _returned.
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
}
-
*out = WorkingSet::INVALID_ID;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
+ return PlanStage::NEED_YIELD;
}
- bool CountScan::isEOF() {
- return _commonStats.isEOF;
- }
+ ++_specificStats.keysExamined;
- void CountScan::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- if (_cursor) _cursor->savePositioned();
+ if (!entry) {
+ _commonStats.isEOF = true;
+ _cursor.reset();
+ return PlanStage::IS_EOF;
}
- void CountScan::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_commonStats.unyields;
-
- if (_cursor) _cursor->restore(opCtx);
-
- // This can change during yielding.
- // TODO this isn't sufficient. See SERVER-17678.
- _shouldDedup = _descriptor->isMultikey(_txn);
+ if (_shouldDedup && !_returned.insert(entry->loc).second) {
+ // *loc was already in _returned.
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
- void CountScan::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-
- // The only state we're responsible for holding is what RecordIds to drop. If a document
- // mutates the underlying index cursor will deal with it.
- if (INVALIDATION_MUTATION == type) {
- return;
- }
-
- // If we see this RecordId again, it may not be the same document it was before, so we want
- // to return it if we see it again.
- unordered_set<RecordId, RecordId::Hasher>::iterator it = _returned.find(dl);
- if (it != _returned.end()) {
- _returned.erase(it);
- }
+ *out = WorkingSet::INVALID_ID;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
+}
+
+bool CountScan::isEOF() {
+ return _commonStats.isEOF;
+}
+
+void CountScan::saveState() {
+ _txn = NULL;
+ ++_commonStats.yields;
+ if (_cursor)
+ _cursor->savePositioned();
+}
+
+void CountScan::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_commonStats.unyields;
+
+ if (_cursor)
+ _cursor->restore(opCtx);
+
+ // This can change during yielding.
+ // TODO this isn't sufficient. See SERVER-17678.
+ _shouldDedup = _descriptor->isMultikey(_txn);
+}
+
+void CountScan::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+
+ // The only state we're responsible for holding is what RecordIds to drop. If a document
+ // mutates the underlying index cursor will deal with it.
+ if (INVALIDATION_MUTATION == type) {
+ return;
}
- vector<PlanStage*> CountScan::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
+ // If we see this RecordId again, it may not be the same document it was before, so we want
+ // to return it if we see it again.
+ unordered_set<RecordId, RecordId::Hasher>::iterator it = _returned.find(dl);
+ if (it != _returned.end()) {
+ _returned.erase(it);
}
+}
- PlanStageStats* CountScan::getStats() {
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_COUNT_SCAN));
+vector<PlanStage*> CountScan::getChildren() const {
+ vector<PlanStage*> empty;
+ return empty;
+}
- CountScanStats* countStats = new CountScanStats(_specificStats);
- countStats->keyPattern = _specificStats.keyPattern.getOwned();
- ret->specific.reset(countStats);
+PlanStageStats* CountScan::getStats() {
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_COUNT_SCAN));
- return ret.release();
- }
+ CountScanStats* countStats = new CountScanStats(_specificStats);
+ countStats->keyPattern = _specificStats.keyPattern.getOwned();
+ ret->specific.reset(countStats);
- const CommonStats* CountScan::getCommonStats() const {
- return &_commonStats;
- }
+ return ret.release();
+}
- const SpecificStats* CountScan::getSpecificStats() const {
- return &_specificStats;
- }
+const CommonStats* CountScan::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* CountScan::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/count_scan.h b/src/mongo/db/exec/count_scan.h
index e63f672637d..e00fa05e2f8 100644
--- a/src/mongo/db/exec/count_scan.h
+++ b/src/mongo/db/exec/count_scan.h
@@ -39,75 +39,77 @@
namespace mongo {
- class IndexAccessMethod;
- class IndexDescriptor;
- class WorkingSet;
+class IndexAccessMethod;
+class IndexDescriptor;
+class WorkingSet;
- struct CountScanParams {
- CountScanParams() : descriptor(NULL) { }
+struct CountScanParams {
+ CountScanParams() : descriptor(NULL) {}
- // What index are we traversing?
- const IndexDescriptor* descriptor;
+ // What index are we traversing?
+ const IndexDescriptor* descriptor;
- BSONObj startKey;
- bool startKeyInclusive;
+ BSONObj startKey;
+ bool startKeyInclusive;
- BSONObj endKey;
- bool endKeyInclusive;
- };
+ BSONObj endKey;
+ bool endKeyInclusive;
+};
- /**
- * Used by the count command. Scans an index from a start key to an end key. Does not create
- * any WorkingSetMember(s) for any of the data, instead returning ADVANCED to indicate to the
- * caller that another result should be counted.
- *
- * Only created through the getExecutorCount path, as count is the only operation that doesn't
- * care about its data.
- */
- class CountScan : public PlanStage {
- public:
- CountScan(OperationContext* txn, const CountScanParams& params, WorkingSet* workingSet);
- virtual ~CountScan() { }
+/**
+ * Used by the count command. Scans an index from a start key to an end key. Does not create
+ * any WorkingSetMember(s) for any of the data, instead returning ADVANCED to indicate to the
+ * caller that another result should be counted.
+ *
+ * Only created through the getExecutorCount path, as count is the only operation that doesn't
+ * care about its data.
+ */
+class CountScan : public PlanStage {
+public:
+ CountScan(OperationContext* txn, const CountScanParams& params, WorkingSet* workingSet);
+ virtual ~CountScan() {}
- virtual StageState work(WorkingSetID* out);
- virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_COUNT_SCAN; }
+ virtual StageType stageType() const {
+ return STAGE_COUNT_SCAN;
+ }
- virtual PlanStageStats* getStats();
+ virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
+private:
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
- // The WorkingSet we annotate with results. Not owned by us.
- WorkingSet* _workingSet;
+ // The WorkingSet we annotate with results. Not owned by us.
+ WorkingSet* _workingSet;
- // Index access. Both pointers below are owned by Collection -> IndexCatalog.
- const IndexDescriptor* _descriptor;
- const IndexAccessMethod* _iam;
+ // Index access. Both pointers below are owned by Collection -> IndexCatalog.
+ const IndexDescriptor* _descriptor;
+ const IndexAccessMethod* _iam;
- std::unique_ptr<SortedDataInterface::Cursor> _cursor;
+ std::unique_ptr<SortedDataInterface::Cursor> _cursor;
- // Could our index have duplicates? If so, we use _returned to dedup.
- bool _shouldDedup;
- unordered_set<RecordId, RecordId::Hasher> _returned;
+ // Could our index have duplicates? If so, we use _returned to dedup.
+ bool _shouldDedup;
+ unordered_set<RecordId, RecordId::Hasher> _returned;
- CountScanParams _params;
+ CountScanParams _params;
- CommonStats _commonStats;
- CountScanStats _specificStats;
- };
+ CommonStats _commonStats;
+ CountScanStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index 787f3f244bf..5831b44e86a 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -45,274 +45,271 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
-
- // static
- const char* DeleteStage::kStageType = "DELETE";
-
- DeleteStage::DeleteStage(OperationContext* txn,
- const DeleteStageParams& params,
- WorkingSet* ws,
- Collection* collection,
- PlanStage* child)
- : _txn(txn),
- _params(params),
- _ws(ws),
- _collection(collection),
- _child(child),
- _idRetrying(WorkingSet::INVALID_ID),
- _idReturning(WorkingSet::INVALID_ID),
- _commonStats(kStageType) { }
-
- DeleteStage::~DeleteStage() {}
-
- bool DeleteStage::isEOF() {
- if (!_collection) {
- return true;
- }
- if (!_params.isMulti && _specificStats.docsDeleted > 0) {
- return true;
- }
- return _idRetrying == WorkingSet::INVALID_ID
- && _idReturning == WorkingSet::INVALID_ID
- && _child->isEOF();
+using std::unique_ptr;
+using std::vector;
+
+// static
+const char* DeleteStage::kStageType = "DELETE";
+
+DeleteStage::DeleteStage(OperationContext* txn,
+ const DeleteStageParams& params,
+ WorkingSet* ws,
+ Collection* collection,
+ PlanStage* child)
+ : _txn(txn),
+ _params(params),
+ _ws(ws),
+ _collection(collection),
+ _child(child),
+ _idRetrying(WorkingSet::INVALID_ID),
+ _idReturning(WorkingSet::INVALID_ID),
+ _commonStats(kStageType) {}
+
+DeleteStage::~DeleteStage() {}
+
+bool DeleteStage::isEOF() {
+ if (!_collection) {
+ return true;
}
+ if (!_params.isMulti && _specificStats.docsDeleted > 0) {
+ return true;
+ }
+ return _idRetrying == WorkingSet::INVALID_ID && _idReturning == WorkingSet::INVALID_ID &&
+ _child->isEOF();
+}
- PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
- ++_commonStats.works;
-
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+PlanStage::StageState DeleteStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- if (isEOF()) { return PlanStage::IS_EOF; }
- invariant(_collection); // If isEOF() returns false, we must have a collection.
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- // It is possible that after a delete was executed, a WriteConflictException occurred
- // and prevented us from returning ADVANCED with the old version of the document.
- if (_idReturning != WorkingSet::INVALID_ID) {
- // We should only get here if we were trying to return something before.
- invariant(_params.returnDeleted);
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
+ }
+ invariant(_collection); // If isEOF() returns false, we must have a collection.
- WorkingSetMember* member = _ws->get(_idReturning);
- invariant(member->state == WorkingSetMember::OWNED_OBJ);
+ // It is possible that after a delete was executed, a WriteConflictException occurred
+ // and prevented us from returning ADVANCED with the old version of the document.
+ if (_idReturning != WorkingSet::INVALID_ID) {
+ // We should only get here if we were trying to return something before.
+ invariant(_params.returnDeleted);
- *out = _idReturning;
- _idReturning = WorkingSet::INVALID_ID;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
+ WorkingSetMember* member = _ws->get(_idReturning);
+ invariant(member->state == WorkingSetMember::OWNED_OBJ);
- // Either retry the last WSM we worked on or get a new one from our child.
- WorkingSetID id;
- StageState status;
- if (_idRetrying == WorkingSet::INVALID_ID) {
- status = _child->work(&id);
- }
- else {
- status = ADVANCED;
- id = _idRetrying;
- _idRetrying = WorkingSet::INVALID_ID;
- }
+ *out = _idReturning;
+ _idReturning = WorkingSet::INVALID_ID;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
+ }
- if (PlanStage::ADVANCED == status) {
- WorkingSetMember* member = _ws->get(id);
+ // Either retry the last WSM we worked on or get a new one from our child.
+ WorkingSetID id;
+ StageState status;
+ if (_idRetrying == WorkingSet::INVALID_ID) {
+ status = _child->work(&id);
+ } else {
+ status = ADVANCED;
+ id = _idRetrying;
+ _idRetrying = WorkingSet::INVALID_ID;
+ }
- // We want to free this member when we return, unless we need to retry it.
- ScopeGuard memberFreer = MakeGuard(&WorkingSet::free, _ws, id);
+ if (PlanStage::ADVANCED == status) {
+ WorkingSetMember* member = _ws->get(id);
- if (!member->hasLoc()) {
- // We expect to be here because of an invalidation causing a force-fetch, and
- // doc-locking storage engines do not issue invalidations.
- ++_specificStats.nInvalidateSkips;
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- RecordId rloc = member->loc;
- // Deletes can't have projections. This means that covering analysis will always add
- // a fetch. We should always get fetched data, and never just key data.
- invariant(member->hasObj());
+ // We want to free this member when we return, unless we need to retry it.
+ ScopeGuard memberFreer = MakeGuard(&WorkingSet::free, _ws, id);
- try {
- // If the snapshot changed, then we have to make sure we have the latest copy of the
- // doc and that it still matches.
- std::unique_ptr<RecordCursor> cursor;
- if (_txn->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
- cursor = _collection->getCursor(_txn);
- if (!WorkingSetCommon::fetch(_txn, member, cursor)) {
- // Doc is already deleted. Nothing more to do.
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
-
- // Make sure the re-fetched doc still matches the predicate.
- if (_params.canonicalQuery &&
- !_params.canonicalQuery->root()->matchesBSON(member->obj.value(), NULL)) {
- // Doesn't match.
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
+ if (!member->hasLoc()) {
+ // We expect to be here because of an invalidation causing a force-fetch, and
+ // doc-locking storage engines do not issue invalidations.
+ ++_specificStats.nInvalidateSkips;
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ }
+ RecordId rloc = member->loc;
+ // Deletes can't have projections. This means that covering analysis will always add
+ // a fetch. We should always get fetched data, and never just key data.
+ invariant(member->hasObj());
+
+ try {
+ // If the snapshot changed, then we have to make sure we have the latest copy of the
+ // doc and that it still matches.
+ std::unique_ptr<RecordCursor> cursor;
+ if (_txn->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
+ cursor = _collection->getCursor(_txn);
+ if (!WorkingSetCommon::fetch(_txn, member, cursor)) {
+ // Doc is already deleted. Nothing more to do.
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
- // TODO: Do we want to buffer docs and delete them in a group rather than
- // saving/restoring state repeatedly?
-
- try {
- _child->saveState();
- if (supportsDocLocking()) {
- // Doc-locking engines require this after saveState() since they don't use
- // invalidations.
- WorkingSetCommon::prepareForSnapshotChange(_ws);
- }
- }
- catch ( const WriteConflictException& wce ) {
- std::terminate();
+ // Make sure the re-fetched doc still matches the predicate.
+ if (_params.canonicalQuery &&
+ !_params.canonicalQuery->root()->matchesBSON(member->obj.value(), NULL)) {
+ // Doesn't match.
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
+ }
- if (_params.returnDeleted) {
- // Save a copy of the document that is about to get deleted.
- BSONObj deletedDoc = member->obj.value();
- member->obj.setValue(deletedDoc.getOwned());
- member->loc = RecordId();
- member->state = WorkingSetMember::OWNED_OBJ;
+ // TODO: Do we want to buffer docs and delete them in a group rather than
+ // saving/restoring state repeatedly?
+
+ try {
+ _child->saveState();
+ if (supportsDocLocking()) {
+ // Doc-locking engines require this after saveState() since they don't use
+ // invalidations.
+ WorkingSetCommon::prepareForSnapshotChange(_ws);
}
+ } catch (const WriteConflictException& wce) {
+ std::terminate();
+ }
- // Do the write, unless this is an explain.
- if (!_params.isExplain) {
- WriteUnitOfWork wunit(_txn);
+ if (_params.returnDeleted) {
+ // Save a copy of the document that is about to get deleted.
+ BSONObj deletedDoc = member->obj.value();
+ member->obj.setValue(deletedDoc.getOwned());
+ member->loc = RecordId();
+ member->state = WorkingSetMember::OWNED_OBJ;
+ }
- const bool deleteCappedOK = false;
- const bool deleteNoWarn = false;
- BSONObj deletedId;
+ // Do the write, unless this is an explain.
+ if (!_params.isExplain) {
+ WriteUnitOfWork wunit(_txn);
- _collection->deleteDocument(_txn, rloc, deleteCappedOK, deleteNoWarn,
- _params.shouldCallLogOp ? &deletedId : NULL);
+ const bool deleteCappedOK = false;
+ const bool deleteNoWarn = false;
+ BSONObj deletedId;
- wunit.commit();
- }
+ _collection->deleteDocument(_txn,
+ rloc,
+ deleteCappedOK,
+ deleteNoWarn,
+ _params.shouldCallLogOp ? &deletedId : NULL);
- ++_specificStats.docsDeleted;
- }
- catch ( const WriteConflictException& wce ) {
- _idRetrying = id;
- memberFreer.Dismiss(); // Keep this member around so we can retry deleting it.
- *out = WorkingSet::INVALID_ID;
- _commonStats.needYield++;
- return NEED_YIELD;
+ wunit.commit();
}
- // As restoreState may restore (recreate) cursors, cursors are tied to the
- // transaction in which they are created, and a WriteUnitOfWork is a
- // transaction, make sure to restore the state outside of the WritUnitOfWork.
- try {
- _child->restoreState(_txn);
- }
- catch ( const WriteConflictException& wce ) {
- // Note we don't need to retry anything in this case since the delete already
- // was committed. However, we still need to return the deleted document
- // (if it was requested).
- if (_params.returnDeleted) {
- // member->obj should refer to the deleted document.
- invariant(member->state == WorkingSetMember::OWNED_OBJ);
-
- _idReturning = id;
- // Keep this member around so that we can return it on the next work() call.
- memberFreer.Dismiss();
- }
- *out = WorkingSet::INVALID_ID;
- _commonStats.needYield++;
- return NEED_YIELD;
- }
+ ++_specificStats.docsDeleted;
+ } catch (const WriteConflictException& wce) {
+ _idRetrying = id;
+ memberFreer.Dismiss(); // Keep this member around so we can retry deleting it.
+ *out = WorkingSet::INVALID_ID;
+ _commonStats.needYield++;
+ return NEED_YIELD;
+ }
+ // As restoreState may restore (recreate) cursors, cursors are tied to the
+ // transaction in which they are created, and a WriteUnitOfWork is a
+ // transaction, make sure to restore the state outside of the WritUnitOfWork.
+ try {
+ _child->restoreState(_txn);
+ } catch (const WriteConflictException& wce) {
+ // Note we don't need to retry anything in this case since the delete already
+ // was committed. However, we still need to return the deleted document
+ // (if it was requested).
if (_params.returnDeleted) {
// member->obj should refer to the deleted document.
invariant(member->state == WorkingSetMember::OWNED_OBJ);
- memberFreer.Dismiss(); // Keep this member around so we can return it.
- *out = id;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
-
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it failed, in which case
- // 'id' is valid. If ID is invalid, we create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- const std::string errmsg = "delete stage failed to read in results from child";
- *out = WorkingSetCommon::allocateStatusMember(_ws, Status(ErrorCodes::InternalError,
- errmsg));
+ _idReturning = id;
+ // Keep this member around so that we can return it on the next work() call.
+ memberFreer.Dismiss();
}
- return status;
- }
- else if (PlanStage::NEED_TIME == status) {
- ++_commonStats.needTime;
+ *out = WorkingSet::INVALID_ID;
+ _commonStats.needYield++;
+ return NEED_YIELD;
}
- else if (PlanStage::NEED_YIELD == status) {
+
+ if (_params.returnDeleted) {
+ // member->obj should refer to the deleted document.
+ invariant(member->state == WorkingSetMember::OWNED_OBJ);
+
+ memberFreer.Dismiss(); // Keep this member around so we can return it.
*out = id;
- ++_commonStats.needYield;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
}
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it failed, in which case
+ // 'id' is valid. If ID is invalid, we create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ const std::string errmsg = "delete stage failed to read in results from child";
+ *out = WorkingSetCommon::allocateStatusMember(
+ _ws, Status(ErrorCodes::InternalError, errmsg));
+ }
return status;
+ } else if (PlanStage::NEED_TIME == status) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == status) {
+ *out = id;
+ ++_commonStats.needYield;
}
- void DeleteStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- _child->saveState();
- }
-
- void DeleteStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
-
- const NamespaceString& ns(_collection->ns());
- massert(28537,
- str::stream() << "Demoted from primary while removing from " << ns.ns(),
- !_params.shouldCallLogOp ||
+ return status;
+}
+
+void DeleteStage::saveState() {
+ _txn = NULL;
+ ++_commonStats.yields;
+ _child->saveState();
+}
+
+void DeleteStage::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_commonStats.unyields;
+ _child->restoreState(opCtx);
+
+ const NamespaceString& ns(_collection->ns());
+ massert(28537,
+ str::stream() << "Demoted from primary while removing from " << ns.ns(),
+ !_params.shouldCallLogOp ||
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns));
- }
-
- void DeleteStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
- }
-
- vector<PlanStage*> DeleteStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
- }
-
- PlanStageStats* DeleteStage::getStats() {
- _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_DELETE));
- ret->specific.reset(new DeleteStats(_specificStats));
- ret->children.push_back(_child->getStats());
- return ret.release();
- }
-
- const CommonStats* DeleteStage::getCommonStats() const {
- return &_commonStats;
- }
-
- const SpecificStats* DeleteStage::getSpecificStats() const {
- return &_specificStats;
- }
-
- // static
- long long DeleteStage::getNumDeleted(PlanExecutor* exec) {
- invariant(exec->getRootStage()->isEOF());
- invariant(exec->getRootStage()->stageType() == STAGE_DELETE);
- DeleteStage* deleteStage = static_cast<DeleteStage*>(exec->getRootStage());
- const DeleteStats* deleteStats =
- static_cast<const DeleteStats*>(deleteStage->getSpecificStats());
- return deleteStats->docsDeleted;
- }
+}
+
+void DeleteStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+ _child->invalidate(txn, dl, type);
+}
+
+vector<PlanStage*> DeleteStage::getChildren() const {
+ vector<PlanStage*> children;
+ children.push_back(_child.get());
+ return children;
+}
+
+PlanStageStats* DeleteStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_DELETE));
+ ret->specific.reset(new DeleteStats(_specificStats));
+ ret->children.push_back(_child->getStats());
+ return ret.release();
+}
+
+const CommonStats* DeleteStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* DeleteStage::getSpecificStats() const {
+ return &_specificStats;
+}
+
+// static
+long long DeleteStage::getNumDeleted(PlanExecutor* exec) {
+ invariant(exec->getRootStage()->isEOF());
+ invariant(exec->getRootStage()->stageType() == STAGE_DELETE);
+ DeleteStage* deleteStage = static_cast<DeleteStage*>(exec->getRootStage());
+ const DeleteStats* deleteStats =
+ static_cast<const DeleteStats*>(deleteStage->getSpecificStats());
+ return deleteStats->docsDeleted;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/delete.h b/src/mongo/db/exec/delete.h
index 028cd086279..75556152680 100644
--- a/src/mongo/db/exec/delete.h
+++ b/src/mongo/db/exec/delete.h
@@ -34,109 +34,112 @@
namespace mongo {
- class CanonicalQuery;
- class OperationContext;
- class PlanExecutor;
+class CanonicalQuery;
+class OperationContext;
+class PlanExecutor;
- struct DeleteStageParams {
- DeleteStageParams() :
- isMulti(false),
- shouldCallLogOp(false),
- fromMigrate(false),
- isExplain(false),
- returnDeleted(false),
- canonicalQuery(NULL) { }
+struct DeleteStageParams {
+ DeleteStageParams()
+ : isMulti(false),
+ shouldCallLogOp(false),
+ fromMigrate(false),
+ isExplain(false),
+ returnDeleted(false),
+ canonicalQuery(NULL) {}
- // Should we delete all documents returned from the child (a "multi delete"), or at most one
- // (a "single delete")?
- bool isMulti;
+ // Should we delete all documents returned from the child (a "multi delete"), or at most one
+ // (a "single delete")?
+ bool isMulti;
- // Should we write each delete to the oplog?
- bool shouldCallLogOp;
+ // Should we write each delete to the oplog?
+ bool shouldCallLogOp;
- // Is this delete part of a migrate operation that is essentially like a no-op
- // when the cluster is observed by an external client.
- bool fromMigrate;
+ // Is this delete part of a migrate operation that is essentially like a no-op
+ // when the cluster is observed by an external client.
+ bool fromMigrate;
- // Are we explaining a delete command rather than actually executing it?
- bool isExplain;
+ // Are we explaining a delete command rather than actually executing it?
+ bool isExplain;
- // Should we return the document we just deleted?
- bool returnDeleted;
+ // Should we return the document we just deleted?
+ bool returnDeleted;
- // The parsed query predicate for this delete. Not owned here.
- CanonicalQuery* canonicalQuery;
- };
+ // The parsed query predicate for this delete. Not owned here.
+ CanonicalQuery* canonicalQuery;
+};
- /**
- * This stage delete documents by RecordId that are returned from its child. If the deleted
- * document was requested to be returned, then ADVANCED is returned after deleting a document.
- * Otherwise, NEED_TIME is returned after deleting a document.
- *
- * Callers of work() must be holding a write lock (and, for shouldCallLogOp=true deletes,
- * callers must have had the replication coordinator approve the write).
- */
- class DeleteStage : public PlanStage {
- MONGO_DISALLOW_COPYING(DeleteStage);
- public:
- DeleteStage(OperationContext* txn,
- const DeleteStageParams& params,
- WorkingSet* ws,
- Collection* collection,
- PlanStage* child);
- virtual ~DeleteStage();
+/**
+ * This stage delete documents by RecordId that are returned from its child. If the deleted
+ * document was requested to be returned, then ADVANCED is returned after deleting a document.
+ * Otherwise, NEED_TIME is returned after deleting a document.
+ *
+ * Callers of work() must be holding a write lock (and, for shouldCallLogOp=true deletes,
+ * callers must have had the replication coordinator approve the write).
+ */
+class DeleteStage : public PlanStage {
+ MONGO_DISALLOW_COPYING(DeleteStage);
+
+public:
+ DeleteStage(OperationContext* txn,
+ const DeleteStageParams& params,
+ WorkingSet* ws,
+ Collection* collection,
+ PlanStage* child);
+ virtual ~DeleteStage();
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_DELETE; }
+ virtual StageType stageType() const {
+ return STAGE_DELETE;
+ }
- virtual PlanStageStats* getStats();
+ virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- /**
- * Extracts the number of documents deleted by the update plan 'exec'.
- *
- * Should only be called if the root plan stage of 'exec' is UPDATE and if 'exec' is EOF.
- */
- static long long getNumDeleted(PlanExecutor* exec);
+ /**
+ * Extracts the number of documents deleted by the update plan 'exec'.
+ *
+ * Should only be called if the root plan stage of 'exec' is UPDATE and if 'exec' is EOF.
+ */
+ static long long getNumDeleted(PlanExecutor* exec);
- private:
- // Transactional context. Not owned by us.
- OperationContext* _txn;
+private:
+ // Transactional context. Not owned by us.
+ OperationContext* _txn;
- DeleteStageParams _params;
+ DeleteStageParams _params;
- // Not owned by us.
- WorkingSet* _ws;
+ // Not owned by us.
+ WorkingSet* _ws;
- // Collection to operate on. Not owned by us. Can be NULL (if NULL, isEOF() will always
- // return true). If non-NULL, the lifetime of the collection must supersede that of the
- // stage.
- Collection* _collection;
+ // Collection to operate on. Not owned by us. Can be NULL (if NULL, isEOF() will always
+ // return true). If non-NULL, the lifetime of the collection must supersede that of the
+ // stage.
+ Collection* _collection;
- std::unique_ptr<PlanStage> _child;
+ std::unique_ptr<PlanStage> _child;
- // If not WorkingSet::INVALID_ID, we use this rather than asking our child what to do next.
- WorkingSetID _idRetrying;
+ // If not WorkingSet::INVALID_ID, we use this rather than asking our child what to do next.
+ WorkingSetID _idRetrying;
- // If not WorkingSet::INVALID_ID, we return this member to our caller.
- WorkingSetID _idReturning;
+ // If not WorkingSet::INVALID_ID, we return this member to our caller.
+ WorkingSetID _idReturning;
- // Stats
- CommonStats _commonStats;
- DeleteStats _specificStats;
- };
+ // Stats
+ CommonStats _commonStats;
+ DeleteStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/distinct_scan.cpp b/src/mongo/db/exec/distinct_scan.cpp
index 24dcc36019a..7cbe5db389e 100644
--- a/src/mongo/db/exec/distinct_scan.cpp
+++ b/src/mongo/db/exec/distinct_scan.cpp
@@ -37,60 +37,62 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
-
- // static
- const char* DistinctScan::kStageType = "DISTINCT_SCAN";
-
- DistinctScan::DistinctScan(OperationContext* txn, const DistinctParams& params, WorkingSet* workingSet)
- : _txn(txn),
- _workingSet(workingSet),
- _descriptor(params.descriptor),
- _iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
- _params(params),
- _checker(&_params.bounds, _descriptor->keyPattern(), _params.direction),
- _commonStats(kStageType) {
-
- _specificStats.keyPattern = _params.descriptor->keyPattern();
- _specificStats.indexName = _params.descriptor->indexName();
- _specificStats.indexVersion = _params.descriptor->version();
-
- // Set up our initial seek. If there is no valid data, just mark as EOF.
- _commonStats.isEOF = !_checker.getStartSeekPoint(&_seekPoint);
+using std::unique_ptr;
+using std::vector;
+
+// static
+const char* DistinctScan::kStageType = "DISTINCT_SCAN";
+
+DistinctScan::DistinctScan(OperationContext* txn,
+ const DistinctParams& params,
+ WorkingSet* workingSet)
+ : _txn(txn),
+ _workingSet(workingSet),
+ _descriptor(params.descriptor),
+ _iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
+ _params(params),
+ _checker(&_params.bounds, _descriptor->keyPattern(), _params.direction),
+ _commonStats(kStageType) {
+ _specificStats.keyPattern = _params.descriptor->keyPattern();
+ _specificStats.indexName = _params.descriptor->indexName();
+ _specificStats.indexVersion = _params.descriptor->version();
+
+ // Set up our initial seek. If there is no valid data, just mark as EOF.
+ _commonStats.isEOF = !_checker.getStartSeekPoint(&_seekPoint);
+}
+
+PlanStage::StageState DistinctScan::work(WorkingSetID* out) {
+ ++_commonStats.works;
+ if (_commonStats.isEOF)
+ return PlanStage::IS_EOF;
+
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+
+ boost::optional<IndexKeyEntry> kv;
+ try {
+ if (!_cursor)
+ _cursor = _iam->newCursor(_txn, _params.direction == 1);
+ kv = _cursor->seek(_seekPoint);
+ } catch (const WriteConflictException& wce) {
+ *out = WorkingSet::INVALID_ID;
+ return PlanStage::NEED_YIELD;
}
- PlanStage::StageState DistinctScan::work(WorkingSetID* out) {
- ++_commonStats.works;
- if (_commonStats.isEOF) return PlanStage::IS_EOF;
-
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
-
- boost::optional<IndexKeyEntry> kv;
- try {
- if (!_cursor) _cursor = _iam->newCursor(_txn, _params.direction == 1);
- kv = _cursor->seek(_seekPoint);
- }
- catch (const WriteConflictException& wce) {
- *out = WorkingSet::INVALID_ID;
- return PlanStage::NEED_YIELD;
- }
-
- if (!kv) {
- _commonStats.isEOF = true;
- return PlanStage::IS_EOF;
- }
+ if (!kv) {
+ _commonStats.isEOF = true;
+ return PlanStage::IS_EOF;
+ }
- ++_specificStats.keysExamined;
+ ++_specificStats.keysExamined;
- switch (_checker.checkKey(kv->key, &_seekPoint)) {
- case IndexBoundsChecker::MUST_ADVANCE:
+ switch (_checker.checkKey(kv->key, &_seekPoint)) {
+ case IndexBoundsChecker::MUST_ADVANCE:
// Try again next time. The checker has adjusted the _seekPoint.
++_commonStats.needTime;
return PlanStage::NEED_TIME;
- case IndexBoundsChecker::DONE:
+ case IndexBoundsChecker::DONE:
// There won't be a next time.
_commonStats.isEOF = true;
_cursor.reset();
@@ -99,8 +101,9 @@ namespace mongo {
case IndexBoundsChecker::VALID:
// Return this key. Adjust the _seekPoint so that it is exclusive on the field we
// are using.
-
- if (!kv->key.isOwned()) kv->key = kv->key.getOwned();
+
+ if (!kv->key.isOwned())
+ kv->key = kv->key.getOwned();
_seekPoint.keyPrefix = kv->key;
_seekPoint.prefixLen = _params.fieldNo + 1;
_seekPoint.prefixExclusive = true;
@@ -115,51 +118,53 @@ namespace mongo {
*out = id;
++_commonStats.advanced;
return PlanStage::ADVANCED;
- }
- invariant(false);
- }
-
- bool DistinctScan::isEOF() {
- return _commonStats.isEOF;
- }
-
- void DistinctScan::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
-
- // We always seek, so we don't care where the cursor is.
- if (_cursor) _cursor->saveUnpositioned();
- }
-
- void DistinctScan::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_commonStats.unyields;
-
- if (_cursor) _cursor->restore(opCtx);
- }
-
- void DistinctScan::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- }
-
- vector<PlanStage*> DistinctScan::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
- }
-
- PlanStageStats* DistinctScan::getStats() {
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_DISTINCT_SCAN));
- ret->specific.reset(new DistinctScanStats(_specificStats));
- return ret.release();
- }
-
- const CommonStats* DistinctScan::getCommonStats() const {
- return &_commonStats;
- }
-
- const SpecificStats* DistinctScan::getSpecificStats() const {
- return &_specificStats;
}
+ invariant(false);
+}
+
+bool DistinctScan::isEOF() {
+ return _commonStats.isEOF;
+}
+
+void DistinctScan::saveState() {
+ _txn = NULL;
+ ++_commonStats.yields;
+
+ // We always seek, so we don't care where the cursor is.
+ if (_cursor)
+ _cursor->saveUnpositioned();
+}
+
+void DistinctScan::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_commonStats.unyields;
+
+ if (_cursor)
+ _cursor->restore(opCtx);
+}
+
+void DistinctScan::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+}
+
+vector<PlanStage*> DistinctScan::getChildren() const {
+ vector<PlanStage*> empty;
+ return empty;
+}
+
+PlanStageStats* DistinctScan::getStats() {
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_DISTINCT_SCAN));
+ ret->specific.reset(new DistinctScanStats(_specificStats));
+ return ret.release();
+}
+
+const CommonStats* DistinctScan::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* DistinctScan::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/distinct_scan.h b/src/mongo/db/exec/distinct_scan.h
index d87f5249535..10f850d5a2a 100644
--- a/src/mongo/db/exec/distinct_scan.h
+++ b/src/mongo/db/exec/distinct_scan.h
@@ -39,87 +39,87 @@
namespace mongo {
- class IndexAccessMethod;
- class IndexDescriptor;
- class WorkingSet;
+class IndexAccessMethod;
+class IndexDescriptor;
+class WorkingSet;
- struct DistinctParams {
- DistinctParams() : descriptor(NULL),
- direction(1),
- fieldNo(0) { }
+struct DistinctParams {
+ DistinctParams() : descriptor(NULL), direction(1), fieldNo(0) {}
- // What index are we traversing?
- const IndexDescriptor* descriptor;
+ // What index are we traversing?
+ const IndexDescriptor* descriptor;
- // And in what direction?
- int direction;
+ // And in what direction?
+ int direction;
- // What are the bounds?
- IndexBounds bounds;
+ // What are the bounds?
+ IndexBounds bounds;
- // What field in the index's key pattern is the one we're distinct-ing over?
- // For example:
- // If we have an index {a:1, b:1} we could use it to distinct over either 'a' or 'b'.
- // If we distinct over 'a' the position is 0.
- // If we distinct over 'b' the position is 1.
- int fieldNo;
- };
+ // What field in the index's key pattern is the one we're distinct-ing over?
+ // For example:
+ // If we have an index {a:1, b:1} we could use it to distinct over either 'a' or 'b'.
+ // If we distinct over 'a' the position is 0.
+ // If we distinct over 'b' the position is 1.
+ int fieldNo;
+};
- /**
- * Used by the distinct command. Executes a mutated index scan over the provided bounds.
- * However, rather than looking at every key in the bounds, it skips to the next value of the
- * _params.fieldNo-th indexed field. This is because distinct only cares about distinct values
- * for that field, so there is no point in examining all keys with the same value for that
- * field.
- *
- * Only created through the getExecutorDistinct path. See db/query/get_executor.cpp
- */
- class DistinctScan : public PlanStage {
- public:
- DistinctScan(OperationContext* txn, const DistinctParams& params, WorkingSet* workingSet);
- virtual ~DistinctScan() { }
+/**
+ * Used by the distinct command. Executes a mutated index scan over the provided bounds.
+ * However, rather than looking at every key in the bounds, it skips to the next value of the
+ * _params.fieldNo-th indexed field. This is because distinct only cares about distinct values
+ * for that field, so there is no point in examining all keys with the same value for that
+ * field.
+ *
+ * Only created through the getExecutorDistinct path. See db/query/get_executor.cpp
+ */
+class DistinctScan : public PlanStage {
+public:
+ DistinctScan(OperationContext* txn, const DistinctParams& params, WorkingSet* workingSet);
+ virtual ~DistinctScan() {}
- virtual StageState work(WorkingSetID* out);
- virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_DISTINCT_SCAN; }
+ virtual StageType stageType() const {
+ return STAGE_DISTINCT_SCAN;
+ }
- virtual PlanStageStats* getStats();
+ virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
+private:
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
- // The WorkingSet we annotate with results. Not owned by us.
- WorkingSet* _workingSet;
+ // The WorkingSet we annotate with results. Not owned by us.
+ WorkingSet* _workingSet;
- // Index access.
- const IndexDescriptor* _descriptor; // owned by Collection -> IndexCatalog
- const IndexAccessMethod* _iam; // owned by Collection -> IndexCatalog
+ // Index access.
+ const IndexDescriptor* _descriptor; // owned by Collection -> IndexCatalog
+ const IndexAccessMethod* _iam; // owned by Collection -> IndexCatalog
- // The cursor we use to navigate the tree.
- std::unique_ptr<SortedDataInterface::Cursor> _cursor;
+ // The cursor we use to navigate the tree.
+ std::unique_ptr<SortedDataInterface::Cursor> _cursor;
- DistinctParams _params;
+ DistinctParams _params;
- // _checker gives us our start key and ensures we stay in bounds.
- IndexBoundsChecker _checker;
- IndexSeekPoint _seekPoint;
+ // _checker gives us our start key and ensures we stay in bounds.
+ IndexBoundsChecker _checker;
+ IndexSeekPoint _seekPoint;
- // Stats
- CommonStats _commonStats;
- DistinctScanStats _specificStats;
- };
+ // Stats
+ CommonStats _commonStats;
+ DistinctScanStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/eof.cpp b/src/mongo/db/exec/eof.cpp
index 1766c9b7a3e..4318e6ad9f6 100644
--- a/src/mongo/db/exec/eof.cpp
+++ b/src/mongo/db/exec/eof.cpp
@@ -34,54 +34,54 @@
namespace mongo {
- using std::vector;
+using std::vector;
- // static
- const char* EOFStage::kStageType = "EOF";
+// static
+const char* EOFStage::kStageType = "EOF";
- EOFStage::EOFStage() : _commonStats(kStageType) { }
+EOFStage::EOFStage() : _commonStats(kStageType) {}
- EOFStage::~EOFStage() { }
+EOFStage::~EOFStage() {}
- bool EOFStage::isEOF() {
- return true;
- }
+bool EOFStage::isEOF() {
+ return true;
+}
- PlanStage::StageState EOFStage::work(WorkingSetID* out) {
- ++_commonStats.works;
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
- return PlanStage::IS_EOF;
- }
+PlanStage::StageState EOFStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+ return PlanStage::IS_EOF;
+}
- void EOFStage::saveState() {
- ++_commonStats.yields;
- }
+void EOFStage::saveState() {
+ ++_commonStats.yields;
+}
- void EOFStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- }
+void EOFStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
+}
- void EOFStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- }
+void EOFStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+}
- vector<PlanStage*> EOFStage::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
- }
+vector<PlanStage*> EOFStage::getChildren() const {
+ vector<PlanStage*> empty;
+ return empty;
+}
- PlanStageStats* EOFStage::getStats() {
- _commonStats.isEOF = isEOF();
- return new PlanStageStats(_commonStats, STAGE_EOF);
- }
+PlanStageStats* EOFStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ return new PlanStageStats(_commonStats, STAGE_EOF);
+}
- const CommonStats* EOFStage::getCommonStats() const {
- return &_commonStats;
- }
+const CommonStats* EOFStage::getCommonStats() const {
+ return &_commonStats;
+}
- const SpecificStats* EOFStage::getSpecificStats() const {
- return nullptr;
- }
+const SpecificStats* EOFStage::getSpecificStats() const {
+ return nullptr;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/eof.h b/src/mongo/db/exec/eof.h
index 247453d1d03..c81b6fdefc3 100644
--- a/src/mongo/db/exec/eof.h
+++ b/src/mongo/db/exec/eof.h
@@ -33,36 +33,38 @@
namespace mongo {
- /**
- * This stage just returns EOF immediately.
- */
- class EOFStage : public PlanStage {
- public:
- EOFStage();
+/**
+ * This stage just returns EOF immediately.
+ */
+class EOFStage : public PlanStage {
+public:
+ EOFStage();
- virtual ~EOFStage();
+ virtual ~EOFStage();
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_EOF; }
+ virtual StageType stageType() const {
+ return STAGE_EOF;
+ }
- PlanStageStats* getStats();
+ PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- CommonStats _commonStats;
- };
+private:
+ CommonStats _commonStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp
index 817bc72fc8d..cab7655f2f0 100644
--- a/src/mongo/db/exec/fetch.cpp
+++ b/src/mongo/db/exec/fetch.cpp
@@ -41,216 +41,214 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
-
- // static
- const char* FetchStage::kStageType = "FETCH";
-
- FetchStage::FetchStage(OperationContext* txn,
- WorkingSet* ws,
- PlanStage* child,
- const MatchExpression* filter,
- const Collection* collection)
- : _txn(txn),
- _collection(collection),
- _ws(ws),
- _child(child),
- _filter(filter),
- _idRetrying(WorkingSet::INVALID_ID),
- _commonStats(kStageType) { }
-
- FetchStage::~FetchStage() { }
-
- bool FetchStage::isEOF() {
- if (WorkingSet::INVALID_ID != _idRetrying) {
- // We asked the parent for a page-in, but still haven't had a chance to return the
- // paged in document
- return false;
- }
-
- return _child->isEOF();
+using std::unique_ptr;
+using std::vector;
+
+// static
+const char* FetchStage::kStageType = "FETCH";
+
+FetchStage::FetchStage(OperationContext* txn,
+ WorkingSet* ws,
+ PlanStage* child,
+ const MatchExpression* filter,
+ const Collection* collection)
+ : _txn(txn),
+ _collection(collection),
+ _ws(ws),
+ _child(child),
+ _filter(filter),
+ _idRetrying(WorkingSet::INVALID_ID),
+ _commonStats(kStageType) {}
+
+FetchStage::~FetchStage() {}
+
+bool FetchStage::isEOF() {
+ if (WorkingSet::INVALID_ID != _idRetrying) {
+ // We asked the parent for a page-in, but still haven't had a chance to return the
+ // paged in document
+ return false;
}
- PlanStage::StageState FetchStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+ return _child->isEOF();
+}
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+PlanStage::StageState FetchStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- if (isEOF()) { return PlanStage::IS_EOF; }
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- // Either retry the last WSM we worked on or get a new one from our child.
- WorkingSetID id;
- StageState status;
- if (_idRetrying == WorkingSet::INVALID_ID) {
- status = _child->work(&id);
- }
- else {
- status = ADVANCED;
- id = _idRetrying;
- _idRetrying = WorkingSet::INVALID_ID;
- }
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
+ }
- if (PlanStage::ADVANCED == status) {
- WorkingSetMember* member = _ws->get(id);
+ // Either retry the last WSM we worked on or get a new one from our child.
+ WorkingSetID id;
+ StageState status;
+ if (_idRetrying == WorkingSet::INVALID_ID) {
+ status = _child->work(&id);
+ } else {
+ status = ADVANCED;
+ id = _idRetrying;
+ _idRetrying = WorkingSet::INVALID_ID;
+ }
- // If there's an obj there, there is no fetching to perform.
- if (member->hasObj()) {
- ++_specificStats.alreadyHasObj;
- }
- else {
- // We need a valid loc to fetch from and this is the only state that has one.
- verify(WorkingSetMember::LOC_AND_IDX == member->state);
- verify(member->hasLoc());
-
- try {
- if (!_cursor) _cursor = _collection->getCursor(_txn);
-
- if (auto fetcher = _cursor->fetcherForId(member->loc)) {
- // There's something to fetch. Hand the fetcher off to the WSM, and pass up
- // a fetch request.
- _idRetrying = id;
- member->setFetcher(fetcher.release());
- *out = id;
- _commonStats.needYield++;
- return NEED_YIELD;
- }
-
- // The doc is already in memory, so go ahead and grab it. Now we have a RecordId
- // as well as an unowned object
- if (!WorkingSetCommon::fetch(_txn, member, _cursor)) {
- _ws->free(id);
- _commonStats.needTime++;
- return NEED_TIME;
- }
- }
- catch (const WriteConflictException& wce) {
+ if (PlanStage::ADVANCED == status) {
+ WorkingSetMember* member = _ws->get(id);
+
+ // If there's an obj there, there is no fetching to perform.
+ if (member->hasObj()) {
+ ++_specificStats.alreadyHasObj;
+ } else {
+ // We need a valid loc to fetch from and this is the only state that has one.
+ verify(WorkingSetMember::LOC_AND_IDX == member->state);
+ verify(member->hasLoc());
+
+ try {
+ if (!_cursor)
+ _cursor = _collection->getCursor(_txn);
+
+ if (auto fetcher = _cursor->fetcherForId(member->loc)) {
+ // There's something to fetch. Hand the fetcher off to the WSM, and pass up
+ // a fetch request.
_idRetrying = id;
- *out = WorkingSet::INVALID_ID;
+ member->setFetcher(fetcher.release());
+ *out = id;
_commonStats.needYield++;
return NEED_YIELD;
}
- }
- return returnIfMatches(member, id, out);
- }
- else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "fetch stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
+ // The doc is already in memory, so go ahead and grab it. Now we have a RecordId
+ // as well as an unowned object
+ if (!WorkingSetCommon::fetch(_txn, member, _cursor)) {
+ _ws->free(id);
+ _commonStats.needTime++;
+ return NEED_TIME;
+ }
+ } catch (const WriteConflictException& wce) {
+ _idRetrying = id;
+ *out = WorkingSet::INVALID_ID;
+ _commonStats.needYield++;
+ return NEED_YIELD;
}
- return status;
- }
- else if (PlanStage::NEED_TIME == status) {
- ++_commonStats.needTime;
- }
- else if (PlanStage::NEED_YIELD == status) {
- ++_commonStats.needYield;
- *out = id;
}
+ return returnIfMatches(member, id, out);
+ } else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ mongoutils::str::stream ss;
+ ss << "fetch stage failed to read in results from child";
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ }
return status;
+ } else if (PlanStage::NEED_TIME == status) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == status) {
+ ++_commonStats.needYield;
+ *out = id;
}
- void FetchStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- if (_cursor) _cursor->saveUnpositioned();
- _child->saveState();
- }
-
- void FetchStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_commonStats.unyields;
- if (_cursor) _cursor->restore(opCtx);
- _child->restoreState(opCtx);
- }
-
- void FetchStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
-
- _child->invalidate(txn, dl, type);
-
- // It's possible that the loc getting invalidated is the one we're about to
- // fetch. In this case we do a "forced fetch" and put the WSM in owned object state.
- if (WorkingSet::INVALID_ID != _idRetrying) {
- WorkingSetMember* member = _ws->get(_idRetrying);
- if (member->hasLoc() && (member->loc == dl)) {
- // Fetch it now and kill the diskloc.
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
- }
+ return status;
+}
+
+void FetchStage::saveState() {
+ _txn = NULL;
+ ++_commonStats.yields;
+ if (_cursor)
+ _cursor->saveUnpositioned();
+ _child->saveState();
+}
+
+void FetchStage::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_commonStats.unyields;
+ if (_cursor)
+ _cursor->restore(opCtx);
+ _child->restoreState(opCtx);
+}
+
+void FetchStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+
+ _child->invalidate(txn, dl, type);
+
+ // It's possible that the loc getting invalidated is the one we're about to
+ // fetch. In this case we do a "forced fetch" and put the WSM in owned object state.
+ if (WorkingSet::INVALID_ID != _idRetrying) {
+ WorkingSetMember* member = _ws->get(_idRetrying);
+ if (member->hasLoc() && (member->loc == dl)) {
+ // Fetch it now and kill the diskloc.
+ WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
}
}
-
- PlanStage::StageState FetchStage::returnIfMatches(WorkingSetMember* member,
- WorkingSetID memberID,
- WorkingSetID* out) {
- // We consider "examining a document" to be every time that we pass a document through
- // a filter by calling Filter::passes(...) below. Therefore, the 'docsExamined' metric
- // is not always equal to the number of documents that were fetched from the collection.
- // In particular, we can sometimes generate plans which have two fetch stages. The first
- // one actually grabs the document from the collection, and the second passes the
- // document through a second filter.
- //
- // One common example of this is geoNear. Suppose that a geoNear plan is searching an
- // annulus to find 2dsphere-indexed documents near some point (x, y) on the globe.
- // After fetching documents within geo hashes that intersect this annulus, the docs are
- // fetched and filtered to make sure that they really do fall into this annulus. However,
- // the user might also want to find only those documents for which accommodationType==
- // "restaurant". The planner will add a second fetch stage to filter by this non-geo
- // predicate.
- ++_specificStats.docsExamined;
-
- if (Filter::passes(member, _filter)) {
- *out = memberID;
-
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
- else {
- _ws->free(memberID);
-
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
+}
+
+PlanStage::StageState FetchStage::returnIfMatches(WorkingSetMember* member,
+ WorkingSetID memberID,
+ WorkingSetID* out) {
+ // We consider "examining a document" to be every time that we pass a document through
+ // a filter by calling Filter::passes(...) below. Therefore, the 'docsExamined' metric
+ // is not always equal to the number of documents that were fetched from the collection.
+ // In particular, we can sometimes generate plans which have two fetch stages. The first
+ // one actually grabs the document from the collection, and the second passes the
+ // document through a second filter.
+ //
+ // One common example of this is geoNear. Suppose that a geoNear plan is searching an
+ // annulus to find 2dsphere-indexed documents near some point (x, y) on the globe.
+ // After fetching documents within geo hashes that intersect this annulus, the docs are
+ // fetched and filtered to make sure that they really do fall into this annulus. However,
+ // the user might also want to find only those documents for which accommodationType==
+ // "restaurant". The planner will add a second fetch stage to filter by this non-geo
+ // predicate.
+ ++_specificStats.docsExamined;
+
+ if (Filter::passes(member, _filter)) {
+ *out = memberID;
+
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
+ } else {
+ _ws->free(memberID);
+
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
-
- vector<PlanStage*> FetchStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
+}
+
+vector<PlanStage*> FetchStage::getChildren() const {
+ vector<PlanStage*> children;
+ children.push_back(_child.get());
+ return children;
+}
+
+PlanStageStats* FetchStage::getStats() {
+ _commonStats.isEOF = isEOF();
+
+ // Add a BSON representation of the filter to the stats tree, if there is one.
+ if (NULL != _filter) {
+ BSONObjBuilder bob;
+ _filter->toBSON(&bob);
+ _commonStats.filter = bob.obj();
}
- PlanStageStats* FetchStage::getStats() {
- _commonStats.isEOF = isEOF();
-
- // Add a BSON representation of the filter to the stats tree, if there is one.
- if (NULL != _filter) {
- BSONObjBuilder bob;
- _filter->toBSON(&bob);
- _commonStats.filter = bob.obj();
- }
-
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_FETCH));
- ret->specific.reset(new FetchStats(_specificStats));
- ret->children.push_back(_child->getStats());
- return ret.release();
- }
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_FETCH));
+ ret->specific.reset(new FetchStats(_specificStats));
+ ret->children.push_back(_child->getStats());
+ return ret.release();
+}
- const CommonStats* FetchStage::getCommonStats() const {
- return &_commonStats;
- }
+const CommonStats* FetchStage::getCommonStats() const {
+ return &_commonStats;
+}
- const SpecificStats* FetchStage::getSpecificStats() const {
- return &_specificStats;
- }
+const SpecificStats* FetchStage::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h
index b43a38bb7eb..5fba058b730 100644
--- a/src/mongo/db/exec/fetch.h
+++ b/src/mongo/db/exec/fetch.h
@@ -37,75 +37,75 @@
namespace mongo {
- class RecordCursor;
+class RecordCursor;
- /**
- * This stage turns a RecordId into a BSONObj.
- *
- * In WorkingSetMember terms, it transitions from LOC_AND_IDX to LOC_AND_UNOWNED_OBJ by reading
- * the record at the provided loc. Returns verbatim any data that already has an object.
- *
- * Preconditions: Valid RecordId.
- */
- class FetchStage : public PlanStage {
- public:
- FetchStage(OperationContext* txn,
- WorkingSet* ws,
- PlanStage* child,
- const MatchExpression* filter,
- const Collection* collection);
-
- virtual ~FetchStage();
+/**
+ * This stage turns a RecordId into a BSONObj.
+ *
+ * In WorkingSetMember terms, it transitions from LOC_AND_IDX to LOC_AND_UNOWNED_OBJ by reading
+ * the record at the provided loc. Returns verbatim any data that already has an object.
+ *
+ * Preconditions: Valid RecordId.
+ */
+class FetchStage : public PlanStage {
+public:
+ FetchStage(OperationContext* txn,
+ WorkingSet* ws,
+ PlanStage* child,
+ const MatchExpression* filter,
+ const Collection* collection);
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
+ virtual ~FetchStage();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual StageType stageType() const { return STAGE_FETCH; }
+ virtual std::vector<PlanStage*> getChildren() const;
- PlanStageStats* getStats();
+ virtual StageType stageType() const {
+ return STAGE_FETCH;
+ }
- virtual const CommonStats* getCommonStats() const;
+ PlanStageStats* getStats();
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const CommonStats* getCommonStats() const;
- static const char* kStageType;
+ virtual const SpecificStats* getSpecificStats() const;
- private:
+ static const char* kStageType;
- /**
- * If the member (with id memberID) passes our filter, set *out to memberID and return that
- * ADVANCED. Otherwise, free memberID and return NEED_TIME.
- */
- StageState returnIfMatches(WorkingSetMember* member, WorkingSetID memberID,
- WorkingSetID* out);
+private:
+ /**
+ * If the member (with id memberID) passes our filter, set *out to memberID and return that
+ * ADVANCED. Otherwise, free memberID and return NEED_TIME.
+ */
+ StageState returnIfMatches(WorkingSetMember* member, WorkingSetID memberID, WorkingSetID* out);
- OperationContext* _txn;
+ OperationContext* _txn;
- // Collection which is used by this stage. Used to resolve record ids retrieved by child
- // stages. The lifetime of the collection must supersede that of the stage.
- const Collection* _collection;
- // Used to fetch Records from _collection.
- std::unique_ptr<RecordCursor> _cursor;
+ // Collection which is used by this stage. Used to resolve record ids retrieved by child
+ // stages. The lifetime of the collection must supersede that of the stage.
+ const Collection* _collection;
+ // Used to fetch Records from _collection.
+ std::unique_ptr<RecordCursor> _cursor;
- // _ws is not owned by us.
- WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
+ // _ws is not owned by us.
+ WorkingSet* _ws;
+ std::unique_ptr<PlanStage> _child;
- // The filter is not owned by us.
- const MatchExpression* _filter;
+ // The filter is not owned by us.
+ const MatchExpression* _filter;
- // If not Null, we use this rather than asking our child what to do next.
- WorkingSetID _idRetrying;
+ // If not Null, we use this rather than asking our child what to do next.
+ WorkingSetID _idRetrying;
- // Stats
- CommonStats _commonStats;
- FetchStats _specificStats;
- };
+ // Stats
+ CommonStats _commonStats;
+ FetchStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/filter.h b/src/mongo/db/exec/filter.h
index 802c68993e5..e23e116dfd9 100644
--- a/src/mongo/db/exec/filter.h
+++ b/src/mongo/db/exec/filter.h
@@ -34,82 +34,37 @@
namespace mongo {
- /**
- * The MatchExpression uses the MatchableDocument interface to see if a document satisfies the
- * expression. This wraps a WorkingSetMember in the MatchableDocument interface so that any of
- * the WorkingSetMember's various types can be tested to see if they satisfy an expression.
- */
- class WorkingSetMatchableDocument : public MatchableDocument {
- public:
- WorkingSetMatchableDocument(WorkingSetMember* wsm) : _wsm(wsm) { }
- virtual ~WorkingSetMatchableDocument() { }
-
- // This is only called by a $where query. The query system must be smart enough to realize
- // that it should do a fetch beforehand.
- BSONObj toBSON() const {
- invariant(_wsm->hasObj());
- return _wsm->obj.value();
- }
-
- virtual ElementIterator* allocateIterator(const ElementPath* path) const {
- // BSONElementIterator does some interesting things with arrays that I don't think
- // SimpleArrayElementIterator does.
- if (_wsm->hasObj()) {
- return new BSONElementIterator(path, _wsm->obj.value());
- }
-
- // NOTE: This (kind of) duplicates code in WorkingSetMember::getFieldDotted.
- // Keep in sync w/that.
- // Find the first field in the index key data described by path and return an iterator
- // over it.
- for (size_t i = 0; i < _wsm->keyData.size(); ++i) {
- BSONObjIterator keyPatternIt(_wsm->keyData[i].indexKeyPattern);
- BSONObjIterator keyDataIt(_wsm->keyData[i].keyData);
-
- while (keyPatternIt.more()) {
- BSONElement keyPatternElt = keyPatternIt.next();
- invariant(keyDataIt.more());
- BSONElement keyDataElt = keyDataIt.next();
-
- if (path->fieldRef().equalsDottedField(keyPatternElt.fieldName())) {
- if (Array == keyDataElt.type()) {
- return new SimpleArrayElementIterator(keyDataElt, true);
- }
- else {
- return new SingleElementElementIterator(keyDataElt);
- }
- }
- }
- }
-
- // This should not happen.
- massert(16920, "trying to match on unknown field: " + path->fieldRef().dottedField().toString(),
- 0);
-
- return new SingleElementElementIterator(BSONElement());
- }
-
- virtual void releaseIterator( ElementIterator* iterator ) const {
- delete iterator;
- }
-
- private:
- WorkingSetMember* _wsm;
- };
-
- class IndexKeyMatchableDocument : public MatchableDocument {
- public:
- IndexKeyMatchableDocument(const BSONObj& key,
- const BSONObj& keyPattern)
- : _keyPattern(keyPattern), _key(key) { }
-
- BSONObj toBSON() const {
- return _key;
+/**
+ * The MatchExpression uses the MatchableDocument interface to see if a document satisfies the
+ * expression. This wraps a WorkingSetMember in the MatchableDocument interface so that any of
+ * the WorkingSetMember's various types can be tested to see if they satisfy an expression.
+ */
+class WorkingSetMatchableDocument : public MatchableDocument {
+public:
+ WorkingSetMatchableDocument(WorkingSetMember* wsm) : _wsm(wsm) {}
+ virtual ~WorkingSetMatchableDocument() {}
+
+ // This is only called by a $where query. The query system must be smart enough to realize
+ // that it should do a fetch beforehand.
+ BSONObj toBSON() const {
+ invariant(_wsm->hasObj());
+ return _wsm->obj.value();
+ }
+
+ virtual ElementIterator* allocateIterator(const ElementPath* path) const {
+ // BSONElementIterator does some interesting things with arrays that I don't think
+ // SimpleArrayElementIterator does.
+ if (_wsm->hasObj()) {
+ return new BSONElementIterator(path, _wsm->obj.value());
}
- virtual ElementIterator* allocateIterator(const ElementPath* path) const {
- BSONObjIterator keyPatternIt(_keyPattern);
- BSONObjIterator keyDataIt(_key);
+ // NOTE: This (kind of) duplicates code in WorkingSetMember::getFieldDotted.
+ // Keep in sync w/that.
+ // Find the first field in the index key data described by path and return an iterator
+ // over it.
+ for (size_t i = 0; i < _wsm->keyData.size(); ++i) {
+ BSONObjIterator keyPatternIt(_wsm->keyData[i].indexKeyPattern);
+ BSONObjIterator keyDataIt(_wsm->keyData[i].keyData);
while (keyPatternIt.more()) {
BSONElement keyPatternElt = keyPatternIt.next();
@@ -119,53 +74,99 @@ namespace mongo {
if (path->fieldRef().equalsDottedField(keyPatternElt.fieldName())) {
if (Array == keyDataElt.type()) {
return new SimpleArrayElementIterator(keyDataElt, true);
- }
- else {
+ } else {
return new SingleElementElementIterator(keyDataElt);
}
}
}
-
- // Planning should not let this happen.
- massert(17409,
- "trying to match on unknown field: " + path->fieldRef().dottedField().toString(),
- 0);
-
- return new SingleElementElementIterator(BSONElement());
}
- virtual void releaseIterator(ElementIterator* iterator) const {
- delete iterator;
+ // This should not happen.
+ massert(16920,
+ "trying to match on unknown field: " + path->fieldRef().dottedField().toString(),
+ 0);
+
+ return new SingleElementElementIterator(BSONElement());
+ }
+
+ virtual void releaseIterator(ElementIterator* iterator) const {
+ delete iterator;
+ }
+
+private:
+ WorkingSetMember* _wsm;
+};
+
+class IndexKeyMatchableDocument : public MatchableDocument {
+public:
+ IndexKeyMatchableDocument(const BSONObj& key, const BSONObj& keyPattern)
+ : _keyPattern(keyPattern), _key(key) {}
+
+ BSONObj toBSON() const {
+ return _key;
+ }
+
+ virtual ElementIterator* allocateIterator(const ElementPath* path) const {
+ BSONObjIterator keyPatternIt(_keyPattern);
+ BSONObjIterator keyDataIt(_key);
+
+ while (keyPatternIt.more()) {
+ BSONElement keyPatternElt = keyPatternIt.next();
+ invariant(keyDataIt.more());
+ BSONElement keyDataElt = keyDataIt.next();
+
+ if (path->fieldRef().equalsDottedField(keyPatternElt.fieldName())) {
+ if (Array == keyDataElt.type()) {
+ return new SimpleArrayElementIterator(keyDataElt, true);
+ } else {
+ return new SingleElementElementIterator(keyDataElt);
+ }
+ }
}
- private:
- BSONObj _keyPattern;
- BSONObj _key;
- };
+ // Planning should not let this happen.
+ massert(17409,
+ "trying to match on unknown field: " + path->fieldRef().dottedField().toString(),
+ 0);
+ return new SingleElementElementIterator(BSONElement());
+ }
+
+ virtual void releaseIterator(ElementIterator* iterator) const {
+ delete iterator;
+ }
+
+private:
+ BSONObj _keyPattern;
+ BSONObj _key;
+};
+
+/**
+ * Used by every stage with a filter.
+ */
+class Filter {
+public:
/**
- * Used by every stage with a filter.
+ * Returns true if filter is NULL or if 'wsm' satisfies the filter.
+ * Returns false if 'wsm' does not satisfy the filter.
*/
- class Filter {
- public:
- /**
- * Returns true if filter is NULL or if 'wsm' satisfies the filter.
- * Returns false if 'wsm' does not satisfy the filter.
- */
- static bool passes(WorkingSetMember* wsm, const MatchExpression* filter) {
- if (NULL == filter) { return true; }
- WorkingSetMatchableDocument doc(wsm);
- return filter->matches(&doc, NULL);
+ static bool passes(WorkingSetMember* wsm, const MatchExpression* filter) {
+ if (NULL == filter) {
+ return true;
}
-
- static bool passes(const BSONObj& keyData,
- const BSONObj& keyPattern,
- const MatchExpression* filter) {
-
- if (NULL == filter) { return true; }
- IndexKeyMatchableDocument doc(keyData, keyPattern);
- return filter->matches(&doc, NULL);
+ WorkingSetMatchableDocument doc(wsm);
+ return filter->matches(&doc, NULL);
+ }
+
+ static bool passes(const BSONObj& keyData,
+ const BSONObj& keyPattern,
+ const MatchExpression* filter) {
+ if (NULL == filter) {
+ return true;
}
- };
+ IndexKeyMatchableDocument doc(keyData, keyPattern);
+ return filter->matches(&doc, NULL);
+ }
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index 1776fd95a26..b07113f21fd 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -51,1247 +51,1159 @@
namespace mongo {
- using std::abs;
- using std::unique_ptr;
+using std::abs;
+using std::unique_ptr;
- //
- // Shared GeoNear search functionality
- //
-
- static const double kCircOfEarthInMeters = 2 * M_PI * kRadiusOfEarthInMeters;
- static const double kMaxEarthDistanceInMeters = kCircOfEarthInMeters / 2;
- static const double kMetersPerDegreeAtEquator = kCircOfEarthInMeters / 360;
-
- namespace {
-
- /**
- * Structure that holds BSON addresses (BSONElements) and the corresponding geometry parsed
- * at those locations.
- * Used to separate the parsing of geometries from a BSONObj (which must stay in scope) from
- * the computation over those geometries.
- * TODO: Merge with 2D/2DSphere key extraction?
- */
- struct StoredGeometry {
-
- static StoredGeometry* parseFrom(const BSONElement& element) {
- if (!element.isABSONObj())
- return NULL;
-
- unique_ptr<StoredGeometry> stored(new StoredGeometry);
- if (!stored->geometry.parseFromStorage(element).isOK())
- return NULL;
- stored->element = element;
- return stored.release();
- }
+//
+// Shared GeoNear search functionality
+//
- BSONElement element;
- GeometryContainer geometry;
- };
- }
-
- /**
- * Find and parse all geometry elements on the appropriate field path from the document.
- */
- static void extractGeometries(const BSONObj& doc,
- const string& path,
- vector<StoredGeometry*>* geometries) {
+static const double kCircOfEarthInMeters = 2 * M_PI * kRadiusOfEarthInMeters;
+static const double kMaxEarthDistanceInMeters = kCircOfEarthInMeters / 2;
+static const double kMetersPerDegreeAtEquator = kCircOfEarthInMeters / 360;
- BSONElementSet geomElements;
- // NOTE: Annoyingly, we cannot just expand arrays b/c single 2d points are arrays, we need
- // to manually expand all results to check if they are geometries
- doc.getFieldsDotted(path, geomElements, false /* expand arrays */);
+namespace {
- for (BSONElementSet::iterator it = geomElements.begin(); it != geomElements.end(); ++it) {
+/**
+ * Structure that holds BSON addresses (BSONElements) and the corresponding geometry parsed
+ * at those locations.
+ * Used to separate the parsing of geometries from a BSONObj (which must stay in scope) from
+ * the computation over those geometries.
+ * TODO: Merge with 2D/2DSphere key extraction?
+ */
+struct StoredGeometry {
+ static StoredGeometry* parseFrom(const BSONElement& element) {
+ if (!element.isABSONObj())
+ return NULL;
+
+ unique_ptr<StoredGeometry> stored(new StoredGeometry);
+ if (!stored->geometry.parseFromStorage(element).isOK())
+ return NULL;
+ stored->element = element;
+ return stored.release();
+ }
- const BSONElement& el = *it;
- unique_ptr<StoredGeometry> stored(StoredGeometry::parseFrom(el));
+ BSONElement element;
+ GeometryContainer geometry;
+};
+}
- if (stored.get()) {
- // Valid geometry element
- geometries->push_back(stored.release());
- }
- else if (el.type() == Array) {
-
- // Many geometries may be in an array
- BSONObjIterator arrIt(el.Obj());
- while (arrIt.more()) {
-
- const BSONElement nextEl = arrIt.next();
- stored.reset(StoredGeometry::parseFrom(nextEl));
-
- if (stored.get()) {
- // Valid geometry element
- geometries->push_back(stored.release());
- }
- else {
- warning() << "geoNear stage read non-geometry element " << nextEl.toString()
- << " in array " << el.toString();
- }
+/**
+ * Find and parse all geometry elements on the appropriate field path from the document.
+ */
+static void extractGeometries(const BSONObj& doc,
+ const string& path,
+ vector<StoredGeometry*>* geometries) {
+ BSONElementSet geomElements;
+ // NOTE: Annoyingly, we cannot just expand arrays b/c single 2d points are arrays, we need
+ // to manually expand all results to check if they are geometries
+ doc.getFieldsDotted(path, geomElements, false /* expand arrays */);
+
+ for (BSONElementSet::iterator it = geomElements.begin(); it != geomElements.end(); ++it) {
+ const BSONElement& el = *it;
+ unique_ptr<StoredGeometry> stored(StoredGeometry::parseFrom(el));
+
+ if (stored.get()) {
+ // Valid geometry element
+ geometries->push_back(stored.release());
+ } else if (el.type() == Array) {
+ // Many geometries may be in an array
+ BSONObjIterator arrIt(el.Obj());
+ while (arrIt.more()) {
+ const BSONElement nextEl = arrIt.next();
+ stored.reset(StoredGeometry::parseFrom(nextEl));
+
+ if (stored.get()) {
+ // Valid geometry element
+ geometries->push_back(stored.release());
+ } else {
+ warning() << "geoNear stage read non-geometry element " << nextEl.toString()
+ << " in array " << el.toString();
}
}
- else {
- warning() << "geoNear stage read non-geometry element " << el.toString();
- }
+ } else {
+ warning() << "geoNear stage read non-geometry element " << el.toString();
}
}
+}
- static StatusWith<double> computeGeoNearDistance(const GeoNearParams& nearParams,
- WorkingSetMember* member) {
-
- //
- // Generic GeoNear distance computation
- // Distances are computed by projecting the stored geometry into the query CRS, and
- // computing distance in that CRS.
- //
-
- // Must have an object in order to get geometry out of it.
- invariant(member->hasObj());
-
- CRS queryCRS = nearParams.nearQuery->centroid->crs;
-
- // Extract all the geometries out of this document for the near query
- OwnedPointerVector<StoredGeometry> geometriesOwned;
- vector<StoredGeometry*>& geometries = geometriesOwned.mutableVector();
- extractGeometries(member->obj.value(), nearParams.nearQuery->field, &geometries);
-
- // Compute the minimum distance of all the geometries in the document
- double minDistance = -1;
- BSONObj minDistanceObj;
- for (vector<StoredGeometry*>::iterator it = geometries.begin(); it != geometries.end();
- ++it) {
+static StatusWith<double> computeGeoNearDistance(const GeoNearParams& nearParams,
+ WorkingSetMember* member) {
+ //
+ // Generic GeoNear distance computation
+ // Distances are computed by projecting the stored geometry into the query CRS, and
+ // computing distance in that CRS.
+ //
- StoredGeometry& stored = **it;
+ // Must have an object in order to get geometry out of it.
+ invariant(member->hasObj());
- // NOTE: A stored document with STRICT_SPHERE CRS is treated as a malformed document
- // and ignored. Since GeoNear requires an index, there's no stored STRICT_SPHERE shape.
- // So we don't check it here.
+ CRS queryCRS = nearParams.nearQuery->centroid->crs;
- // NOTE: For now, we're sure that if we get this far in the query we'll have an
- // appropriate index which validates the type of geometry we're pulling back here.
- // TODO: It may make sense to change our semantics and, by default, only return
- // shapes in the same CRS from $geoNear.
- if (!stored.geometry.supportsProject(queryCRS))
- continue;
- stored.geometry.projectInto(queryCRS);
+ // Extract all the geometries out of this document for the near query
+ OwnedPointerVector<StoredGeometry> geometriesOwned;
+ vector<StoredGeometry*>& geometries = geometriesOwned.mutableVector();
+ extractGeometries(member->obj.value(), nearParams.nearQuery->field, &geometries);
- double nextDistance = stored.geometry.minDistance(*nearParams.nearQuery->centroid);
+ // Compute the minimum distance of all the geometries in the document
+ double minDistance = -1;
+ BSONObj minDistanceObj;
+ for (vector<StoredGeometry*>::iterator it = geometries.begin(); it != geometries.end(); ++it) {
+ StoredGeometry& stored = **it;
- if (minDistance < 0 || nextDistance < minDistance) {
- minDistance = nextDistance;
- minDistanceObj = stored.element.Obj();
- }
- }
+ // NOTE: A stored document with STRICT_SPHERE CRS is treated as a malformed document
+ // and ignored. Since GeoNear requires an index, there's no stored STRICT_SPHERE shape.
+ // So we don't check it here.
- if (minDistance < 0) {
- // No distance to report
- return StatusWith<double>(-1);
- }
+ // NOTE: For now, we're sure that if we get this far in the query we'll have an
+ // appropriate index which validates the type of geometry we're pulling back here.
+ // TODO: It may make sense to change our semantics and, by default, only return
+ // shapes in the same CRS from $geoNear.
+ if (!stored.geometry.supportsProject(queryCRS))
+ continue;
+ stored.geometry.projectInto(queryCRS);
- if (nearParams.addDistMeta) {
- if (nearParams.nearQuery->unitsAreRadians) {
- // Hack for nearSphere
- // TODO: Remove nearSphere?
- invariant(SPHERE == queryCRS);
- member->addComputed(new GeoDistanceComputedData(minDistance
- / kRadiusOfEarthInMeters));
- }
- else {
- member->addComputed(new GeoDistanceComputedData(minDistance));
- }
- }
+ double nextDistance = stored.geometry.minDistance(*nearParams.nearQuery->centroid);
- if (nearParams.addPointMeta) {
- member->addComputed(new GeoNearPointComputedData(minDistanceObj));
+ if (minDistance < 0 || nextDistance < minDistance) {
+ minDistance = nextDistance;
+ minDistanceObj = stored.element.Obj();
}
-
- return StatusWith<double>(minDistance);
}
- static R2Annulus geoNearDistanceBounds(const GeoNearExpression& query) {
-
- const CRS queryCRS = query.centroid->crs;
-
- if (FLAT == queryCRS) {
- return R2Annulus(query.centroid->oldPoint, query.minDistance, query.maxDistance);
- }
-
- invariant(SPHERE == queryCRS);
-
- // TODO: Tighten this up a bit by making a CRS for "sphere with radians"
- double minDistance = query.minDistance;
- double maxDistance = query.maxDistance;
+ if (minDistance < 0) {
+ // No distance to report
+ return StatusWith<double>(-1);
+ }
- if (query.unitsAreRadians) {
- // Our input bounds are in radians, convert to meters since the query CRS is actually
- // SPHERE. We'll convert back to radians on outputting distances.
- minDistance *= kRadiusOfEarthInMeters;
- maxDistance *= kRadiusOfEarthInMeters;
+ if (nearParams.addDistMeta) {
+ if (nearParams.nearQuery->unitsAreRadians) {
+ // Hack for nearSphere
+ // TODO: Remove nearSphere?
+ invariant(SPHERE == queryCRS);
+ member->addComputed(new GeoDistanceComputedData(minDistance / kRadiusOfEarthInMeters));
+ } else {
+ member->addComputed(new GeoDistanceComputedData(minDistance));
}
-
- // GOTCHA: oldPoint is a misnomer - it is the original point data and is in the correct
- // CRS. We must not try to derive the original point from the spherical S2Point generated
- // as an optimization - the mapping is not 1->1 - [-180, 0] and [180, 0] map to the same
- // place.
- // TODO: Wrapping behavior should not depend on the index, which would make $near code
- // insensitive to which direction we explore the index in.
- return R2Annulus(query.centroid->oldPoint,
- min(minDistance, kMaxEarthDistanceInMeters),
- min(maxDistance, kMaxEarthDistanceInMeters));
}
- //
- // GeoNear2DStage
- //
-
- static R2Annulus twoDDistanceBounds(const GeoNearParams& nearParams,
- const IndexDescriptor* twoDIndex) {
+ if (nearParams.addPointMeta) {
+ member->addComputed(new GeoNearPointComputedData(minDistanceObj));
+ }
- R2Annulus fullBounds = geoNearDistanceBounds(*nearParams.nearQuery);
- const CRS queryCRS = nearParams.nearQuery->centroid->crs;
+ return StatusWith<double>(minDistance);
+}
- if (FLAT == queryCRS) {
+static R2Annulus geoNearDistanceBounds(const GeoNearExpression& query) {
+ const CRS queryCRS = query.centroid->crs;
- // Reset the full bounds based on our index bounds
- GeoHashConverter::Parameters hashParams;
- Status status = GeoHashConverter::parseParameters(twoDIndex->infoObj(), &hashParams);
- invariant(status.isOK()); // The index status should always be valid
+ if (FLAT == queryCRS) {
+ return R2Annulus(query.centroid->oldPoint, query.minDistance, query.maxDistance);
+ }
- // The biggest distance possible in this indexed collection is the diagonal of the
- // square indexed region.
- const double sqrt2Approx = 1.5;
- const double diagonalDist = sqrt2Approx * (hashParams.max - hashParams.min);
+ invariant(SPHERE == queryCRS);
- fullBounds = R2Annulus(fullBounds.center(),
- fullBounds.getInner(),
- min(fullBounds.getOuter(), diagonalDist));
- }
- else {
- // Spherical queries have upper bounds set by the earth - no-op
- // TODO: Wrapping errors would creep in here if nearSphere wasn't defined to not wrap
- invariant(SPHERE == queryCRS);
- invariant(!nearParams.nearQuery->isWrappingQuery);
- }
+ // TODO: Tighten this up a bit by making a CRS for "sphere with radians"
+ double minDistance = query.minDistance;
+ double maxDistance = query.maxDistance;
- return fullBounds;
+ if (query.unitsAreRadians) {
+ // Our input bounds are in radians, convert to meters since the query CRS is actually
+ // SPHERE. We'll convert back to radians on outputting distances.
+ minDistance *= kRadiusOfEarthInMeters;
+ maxDistance *= kRadiusOfEarthInMeters;
}
- class GeoNear2DStage::DensityEstimator {
- public:
- DensityEstimator(const IndexDescriptor* twoDindex, const GeoNearParams* nearParams) :
- _twoDIndex(twoDindex), _nearParams(nearParams), _currentLevel(0)
- {
- GeoHashConverter::Parameters hashParams;
- Status status = GeoHashConverter::parseParameters(_twoDIndex->infoObj(),
- &hashParams);
- // The index status should always be valid.
- invariant(status.isOK());
-
- _converter.reset(new GeoHashConverter(hashParams));
- _centroidCell = _converter->hash(_nearParams->nearQuery->centroid->oldPoint);
-
- // Since appendVertexNeighbors(level, output) requires level < hash.getBits(),
- // we have to start to find documents at most GeoHash::kMaxBits - 1. Thus the finest
- // search area is 16 * finest cell area at GeoHash::kMaxBits.
- _currentLevel = std::max(0u, hashParams.bits - 1u);
- }
+ // GOTCHA: oldPoint is a misnomer - it is the original point data and is in the correct
+ // CRS. We must not try to derive the original point from the spherical S2Point generated
+ // as an optimization - the mapping is not 1->1 - [-180, 0] and [180, 0] map to the same
+ // place.
+ // TODO: Wrapping behavior should not depend on the index, which would make $near code
+ // insensitive to which direction we explore the index in.
+ return R2Annulus(query.centroid->oldPoint,
+ min(minDistance, kMaxEarthDistanceInMeters),
+ min(maxDistance, kMaxEarthDistanceInMeters));
+}
+
+//
+// GeoNear2DStage
+//
+
+static R2Annulus twoDDistanceBounds(const GeoNearParams& nearParams,
+ const IndexDescriptor* twoDIndex) {
+ R2Annulus fullBounds = geoNearDistanceBounds(*nearParams.nearQuery);
+ const CRS queryCRS = nearParams.nearQuery->centroid->crs;
+
+ if (FLAT == queryCRS) {
+ // Reset the full bounds based on our index bounds
+ GeoHashConverter::Parameters hashParams;
+ Status status = GeoHashConverter::parseParameters(twoDIndex->infoObj(), &hashParams);
+ invariant(status.isOK()); // The index status should always be valid
+
+ // The biggest distance possible in this indexed collection is the diagonal of the
+ // square indexed region.
+ const double sqrt2Approx = 1.5;
+ const double diagonalDist = sqrt2Approx * (hashParams.max - hashParams.min);
+
+ fullBounds = R2Annulus(
+ fullBounds.center(), fullBounds.getInner(), min(fullBounds.getOuter(), diagonalDist));
+ } else {
+ // Spherical queries have upper bounds set by the earth - no-op
+ // TODO: Wrapping errors would creep in here if nearSphere wasn't defined to not wrap
+ invariant(SPHERE == queryCRS);
+ invariant(!nearParams.nearQuery->isWrappingQuery);
+ }
- PlanStage::StageState work(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- WorkingSetID* out,
- double* estimatedDistance);
-
- void saveState();
- void restoreState(OperationContext* txn);
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- private:
- void buildIndexScan(OperationContext* txn, WorkingSet* workingSet, Collection* collection);
-
- const IndexDescriptor* _twoDIndex; // Not owned here.
- const GeoNearParams* _nearParams; // Not owned here.
- unique_ptr<IndexScan> _indexScan;
- unique_ptr<GeoHashConverter> _converter;
- GeoHash _centroidCell;
- unsigned _currentLevel;
- };
-
- // Initialize the internal states
- void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection)
- {
- IndexScanParams scanParams;
- scanParams.descriptor = _twoDIndex;
- scanParams.direction = 1;
- scanParams.doNotDedup = true;
-
- // Scan bounds on 2D indexes are only over the 2D field - other bounds aren't applicable.
- // This is handled in query planning.
- scanParams.bounds = _nearParams->baseBounds;
-
- // The "2d" field is always the first in the index
- const string twoDFieldName = _nearParams->nearQuery->field;
- const int twoDFieldPosition = 0;
-
- // Construct index intervals used by this stage
- OrderedIntervalList oil;
- oil.name = scanParams.bounds.fields[twoDFieldPosition].name;
-
- vector<GeoHash> neighbors;
- // Return the neighbors of closest vertex to this cell at the given level.
- _centroidCell.appendVertexNeighbors(_currentLevel, &neighbors);
- std::sort(neighbors.begin(), neighbors.end());
-
- for (vector<GeoHash>::const_iterator it = neighbors.begin(); it != neighbors.end(); it++) {
- mongo::BSONObjBuilder builder;
- it->appendHashMin(&builder, "");
- it->appendHashMax(&builder, "");
- oil.intervals.push_back(IndexBoundsBuilder::makeRangeInterval(builder.obj(),
- true,
- true));
- }
+ return fullBounds;
+}
- invariant(oil.isValidFor(1));
+class GeoNear2DStage::DensityEstimator {
+public:
+ DensityEstimator(const IndexDescriptor* twoDindex, const GeoNearParams* nearParams)
+ : _twoDIndex(twoDindex), _nearParams(nearParams), _currentLevel(0) {
+ GeoHashConverter::Parameters hashParams;
+ Status status = GeoHashConverter::parseParameters(_twoDIndex->infoObj(), &hashParams);
+ // The index status should always be valid.
+ invariant(status.isOK());
- // Intersect the $near bounds we just generated into the bounds we have for anything else
- // in the scan (i.e. $within)
- IndexBoundsBuilder::intersectize(oil,
- &scanParams.bounds.fields[twoDFieldPosition]);
+ _converter.reset(new GeoHashConverter(hashParams));
+ _centroidCell = _converter->hash(_nearParams->nearQuery->centroid->oldPoint);
- _indexScan.reset(new IndexScan(txn, scanParams, workingSet, NULL));
+ // Since appendVertexNeighbors(level, output) requires level < hash.getBits(),
+ // we have to start to find documents at most GeoHash::kMaxBits - 1. Thus the finest
+ // search area is 16 * finest cell area at GeoHash::kMaxBits.
+ _currentLevel = std::max(0u, hashParams.bits - 1u);
}
- // Return IS_EOF is we find a document in it's ancestor cells and set estimated distance
- // from the nearest document.
- PlanStage::StageState GeoNear2DStage::DensityEstimator::work(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- WorkingSetID* out,
- double* estimatedDistance)
- {
- if (!_indexScan) {
- // Setup index scan stage for current level.
- buildIndexScan(txn, workingSet, collection);
- }
+ PlanStage::StageState work(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ WorkingSetID* out,
+ double* estimatedDistance);
+
+ void saveState();
+ void restoreState(OperationContext* txn);
+ void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+
+private:
+ void buildIndexScan(OperationContext* txn, WorkingSet* workingSet, Collection* collection);
+
+ const IndexDescriptor* _twoDIndex; // Not owned here.
+ const GeoNearParams* _nearParams; // Not owned here.
+ unique_ptr<IndexScan> _indexScan;
+ unique_ptr<GeoHashConverter> _converter;
+ GeoHash _centroidCell;
+ unsigned _currentLevel;
+};
+
+// Initialize the internal states
+void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection) {
+ IndexScanParams scanParams;
+ scanParams.descriptor = _twoDIndex;
+ scanParams.direction = 1;
+ scanParams.doNotDedup = true;
+
+ // Scan bounds on 2D indexes are only over the 2D field - other bounds aren't applicable.
+ // This is handled in query planning.
+ scanParams.bounds = _nearParams->baseBounds;
+
+ // The "2d" field is always the first in the index
+ const string twoDFieldName = _nearParams->nearQuery->field;
+ const int twoDFieldPosition = 0;
+
+ // Construct index intervals used by this stage
+ OrderedIntervalList oil;
+ oil.name = scanParams.bounds.fields[twoDFieldPosition].name;
+
+ vector<GeoHash> neighbors;
+ // Return the neighbors of closest vertex to this cell at the given level.
+ _centroidCell.appendVertexNeighbors(_currentLevel, &neighbors);
+ std::sort(neighbors.begin(), neighbors.end());
+
+ for (vector<GeoHash>::const_iterator it = neighbors.begin(); it != neighbors.end(); it++) {
+ mongo::BSONObjBuilder builder;
+ it->appendHashMin(&builder, "");
+ it->appendHashMax(&builder, "");
+ oil.intervals.push_back(IndexBoundsBuilder::makeRangeInterval(builder.obj(), true, true));
+ }
- WorkingSetID workingSetID;
- PlanStage::StageState state = _indexScan->work(&workingSetID);
-
- if (state == PlanStage::IS_EOF) {
- // We ran through the neighbors but found nothing.
- if (_currentLevel > 0u) {
- // Advance to the next level and search again.
- _currentLevel--;
- // Reset index scan for the next level.
- _indexScan.reset(NULL);
- return PlanStage::NEED_TIME;
- }
+ invariant(oil.isValidFor(1));
+
+ // Intersect the $near bounds we just generated into the bounds we have for anything else
+ // in the scan (i.e. $within)
+ IndexBoundsBuilder::intersectize(oil, &scanParams.bounds.fields[twoDFieldPosition]);
+
+ _indexScan.reset(new IndexScan(txn, scanParams, workingSet, NULL));
+}
+
+// Return IS_EOF is we find a document in it's ancestor cells and set estimated distance
+// from the nearest document.
+PlanStage::StageState GeoNear2DStage::DensityEstimator::work(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ WorkingSetID* out,
+ double* estimatedDistance) {
+ if (!_indexScan) {
+ // Setup index scan stage for current level.
+ buildIndexScan(txn, workingSet, collection);
+ }
- // We are already at the top level.
- *estimatedDistance = _converter->sizeEdge(_currentLevel);
- return PlanStage::IS_EOF;
- } else if (state == PlanStage::ADVANCED) {
- // Found a document at current level.
- *estimatedDistance = _converter->sizeEdge(_currentLevel);
- // Clean up working set.
- workingSet->free(workingSetID);
- return PlanStage::IS_EOF;
- } else if (state == PlanStage::NEED_YIELD) {
- *out = workingSetID;
+ WorkingSetID workingSetID;
+ PlanStage::StageState state = _indexScan->work(&workingSetID);
+
+ if (state == PlanStage::IS_EOF) {
+ // We ran through the neighbors but found nothing.
+ if (_currentLevel > 0u) {
+ // Advance to the next level and search again.
+ _currentLevel--;
+ // Reset index scan for the next level.
+ _indexScan.reset(NULL);
+ return PlanStage::NEED_TIME;
}
- // Propagate NEED_TIME or errors
- return state;
+ // We are already at the top level.
+ *estimatedDistance = _converter->sizeEdge(_currentLevel);
+ return PlanStage::IS_EOF;
+ } else if (state == PlanStage::ADVANCED) {
+ // Found a document at current level.
+ *estimatedDistance = _converter->sizeEdge(_currentLevel);
+ // Clean up working set.
+ workingSet->free(workingSetID);
+ return PlanStage::IS_EOF;
+ } else if (state == PlanStage::NEED_YIELD) {
+ *out = workingSetID;
}
- void GeoNear2DStage::DensityEstimator::saveState() {
- if (_indexScan) {
- _indexScan->saveState();
- }
+ // Propagate NEED_TIME or errors
+ return state;
+}
+
+void GeoNear2DStage::DensityEstimator::saveState() {
+ if (_indexScan) {
+ _indexScan->saveState();
}
+}
- void GeoNear2DStage::DensityEstimator::restoreState(OperationContext* txn) {
- if (_indexScan) {
- _indexScan->restoreState(txn);
- }
+void GeoNear2DStage::DensityEstimator::restoreState(OperationContext* txn) {
+ if (_indexScan) {
+ _indexScan->restoreState(txn);
}
+}
- void GeoNear2DStage::DensityEstimator::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- if (_indexScan) {
- _indexScan->invalidate(txn, dl, type);
- }
+void GeoNear2DStage::DensityEstimator::invalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
+ if (_indexScan) {
+ _indexScan->invalidate(txn, dl, type);
}
+}
- PlanStage::StageState GeoNear2DStage::initialize(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- WorkingSetID* out)
- {
- if (!_densityEstimator) {
- _densityEstimator.reset(new DensityEstimator(_twoDIndex, &_nearParams));
- }
+PlanStage::StageState GeoNear2DStage::initialize(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ WorkingSetID* out) {
+ if (!_densityEstimator) {
+ _densityEstimator.reset(new DensityEstimator(_twoDIndex, &_nearParams));
+ }
- double estimatedDistance;
- PlanStage::StageState state = _densityEstimator->work(txn, workingSet, collection, out,
- &estimatedDistance);
-
- if (state == PlanStage::IS_EOF) {
- // 2d index only works with legacy points as centroid. $nearSphere will project
- // the point into SPHERE CRS and calculate distance based on that.
- // STRICT_SPHERE is impossible here, as GeoJSON centroid is not allowed for 2d index.
-
- // Estimator finished its work, we need to finish initialization too.
- if (SPHERE == _nearParams.nearQuery->centroid->crs) {
- // Estimated distance is in degrees, convert it to meters.
- _boundsIncrement = deg2rad(estimatedDistance) * kRadiusOfEarthInMeters * 3;
- // Limit boundsIncrement to ~20KM, so that the first circle won't be too aggressive.
- _boundsIncrement = std::min(_boundsIncrement, kMaxEarthDistanceInMeters / 1000.0);
- }
- else {
- // We expand the radius by 3 times to give a reasonable starting search area.
- // Assume points are distributed evenly. X is the edge size of cells at whose
- // level we found a document in 4 neighbors. Thus the closest point is at least
- // X/2 far from the centroid. The distance between two points is at least X.
- // The area of Pi * (3X)^2 ~= 28 * X^2 will cover dozens of points at most.
- // We'll explore the space with exponentially increasing radius if this guess is
- // too small, so starting from a conservative initial radius doesn't hurt.
-
- _boundsIncrement = 3 * estimatedDistance;
- }
- invariant(_boundsIncrement > 0.0);
+ double estimatedDistance;
+ PlanStage::StageState state =
+ _densityEstimator->work(txn, workingSet, collection, out, &estimatedDistance);
+
+ if (state == PlanStage::IS_EOF) {
+ // 2d index only works with legacy points as centroid. $nearSphere will project
+ // the point into SPHERE CRS and calculate distance based on that.
+ // STRICT_SPHERE is impossible here, as GeoJSON centroid is not allowed for 2d index.
+
+ // Estimator finished its work, we need to finish initialization too.
+ if (SPHERE == _nearParams.nearQuery->centroid->crs) {
+ // Estimated distance is in degrees, convert it to meters.
+ _boundsIncrement = deg2rad(estimatedDistance) * kRadiusOfEarthInMeters * 3;
+ // Limit boundsIncrement to ~20KM, so that the first circle won't be too aggressive.
+ _boundsIncrement = std::min(_boundsIncrement, kMaxEarthDistanceInMeters / 1000.0);
+ } else {
+ // We expand the radius by 3 times to give a reasonable starting search area.
+ // Assume points are distributed evenly. X is the edge size of cells at whose
+ // level we found a document in 4 neighbors. Thus the closest point is at least
+ // X/2 far from the centroid. The distance between two points is at least X.
+ // The area of Pi * (3X)^2 ~= 28 * X^2 will cover dozens of points at most.
+ // We'll explore the space with exponentially increasing radius if this guess is
+ // too small, so starting from a conservative initial radius doesn't hurt.
- // Clean up
- _densityEstimator.reset(NULL);
+ _boundsIncrement = 3 * estimatedDistance;
}
+ invariant(_boundsIncrement > 0.0);
- return state;
+ // Clean up
+ _densityEstimator.reset(NULL);
}
- static const string kTwoDIndexNearStage("GEO_NEAR_2D");
-
- GeoNear2DStage::GeoNear2DStage(const GeoNearParams& nearParams,
- OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- IndexDescriptor* twoDIndex)
- : NearStage(txn,
- workingSet,
- collection,
- new PlanStageStats(CommonStats(kTwoDIndexNearStage.c_str()),
- STAGE_GEO_NEAR_2D)),
- _nearParams(nearParams),
- _twoDIndex(twoDIndex),
- _fullBounds(twoDDistanceBounds(nearParams, twoDIndex)),
- _currBounds(_fullBounds.center(), -1, _fullBounds.getInner()),
- _boundsIncrement(0.0) {
-
- getNearStats()->keyPattern = twoDIndex->keyPattern();
- getNearStats()->indexName = twoDIndex->indexName();
- }
+ return state;
+}
- GeoNear2DStage::~GeoNear2DStage() {
- }
+static const string kTwoDIndexNearStage("GEO_NEAR_2D");
- void GeoNear2DStage::finishSaveState() {
- if (_densityEstimator) {
- _densityEstimator->saveState();
- }
+GeoNear2DStage::GeoNear2DStage(const GeoNearParams& nearParams,
+ OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ IndexDescriptor* twoDIndex)
+ : NearStage(txn,
+ workingSet,
+ collection,
+ new PlanStageStats(CommonStats(kTwoDIndexNearStage.c_str()), STAGE_GEO_NEAR_2D)),
+ _nearParams(nearParams),
+ _twoDIndex(twoDIndex),
+ _fullBounds(twoDDistanceBounds(nearParams, twoDIndex)),
+ _currBounds(_fullBounds.center(), -1, _fullBounds.getInner()),
+ _boundsIncrement(0.0) {
+ getNearStats()->keyPattern = twoDIndex->keyPattern();
+ getNearStats()->indexName = twoDIndex->indexName();
+}
+
+GeoNear2DStage::~GeoNear2DStage() {}
+
+void GeoNear2DStage::finishSaveState() {
+ if (_densityEstimator) {
+ _densityEstimator->saveState();
}
+}
- void GeoNear2DStage::finishRestoreState(OperationContext* txn) {
- if (_densityEstimator) {
- _densityEstimator->restoreState(txn);
- }
+void GeoNear2DStage::finishRestoreState(OperationContext* txn) {
+ if (_densityEstimator) {
+ _densityEstimator->restoreState(txn);
}
+}
- void GeoNear2DStage::finishInvalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- if (_densityEstimator) {
- _densityEstimator->invalidate(txn, dl, type);
- }
+void GeoNear2DStage::finishInvalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
+ if (_densityEstimator) {
+ _densityEstimator->invalidate(txn, dl, type);
}
+}
- namespace {
-
- /**
- * Expression which checks whether a legacy 2D index point is contained within our near
- * search annulus. See nextInterval() below for more discussion.
- * TODO: Make this a standard type of GEO match expression
- */
- class TwoDPtInAnnulusExpression : public LeafMatchExpression {
- public:
-
- TwoDPtInAnnulusExpression(const R2Annulus& annulus, StringData twoDPath)
- : LeafMatchExpression(INTERNAL_2D_POINT_IN_ANNULUS), _annulus(annulus) {
-
- initPath(twoDPath);
- }
-
- virtual ~TwoDPtInAnnulusExpression() {
- }
-
- virtual void toBSON(BSONObjBuilder* out) const {
- out->append("TwoDPtInAnnulusExpression", true);
- }
-
- virtual bool matchesSingleElement(const BSONElement& e) const {
- if (!e.isABSONObj())
- return false;
-
- PointWithCRS point;
- if (!GeoParser::parseStoredPoint(e, &point).isOK()) return false;
-
- return _annulus.contains(point.oldPoint);
- }
-
- //
- // These won't be called.
- //
-
- virtual void debugString(StringBuilder& debug, int level = 0) const {
- invariant(false);
- }
-
- virtual bool equivalent(const MatchExpression* other) const {
- invariant(false);
- return false;
- }
-
- virtual LeafMatchExpression* shallowClone() const {
- invariant(false);
- return NULL;
- }
-
- private:
+namespace {
- R2Annulus _annulus;
- };
-
- /**
- * Expression which checks whether a 2D key for a point (2D hash) intersects our search
- * region. The search region may have been formed by more granular hashes.
- */
- class TwoDKeyInRegionExpression : public LeafMatchExpression {
- public:
-
- TwoDKeyInRegionExpression(R2Region* region,
- const GeoHashConverter::Parameters& hashParams,
- StringData twoDKeyPath)
- : LeafMatchExpression(INTERNAL_2D_KEY_IN_REGION),
- _region(region),
- _unhasher(hashParams) {
-
- initPath(twoDKeyPath);
- }
-
- virtual ~TwoDKeyInRegionExpression() {
- }
-
- virtual void toBSON(BSONObjBuilder* out) const {
- out->append("TwoDKeyInRegionExpression", true);
- }
-
- virtual bool matchesSingleElement(const BSONElement& e) const {
- // Something has gone terribly wrong if this doesn't hold.
- invariant(BinData == e.type());
- return !_region->fastDisjoint(_unhasher.unhashToBoxCovering(_unhasher.hash(e)));
- }
-
- //
- // These won't be called.
- //
-
- virtual void debugString(StringBuilder& debug, int level = 0) const {
- invariant(false);
- }
-
- virtual bool equivalent(const MatchExpression* other) const {
- invariant(false);
- return true;
- }
-
- virtual MatchExpression* shallowClone() const {
- invariant(false);
- return NULL;
- }
+/**
+ * Expression which checks whether a legacy 2D index point is contained within our near
+ * search annulus. See nextInterval() below for more discussion.
+ * TODO: Make this a standard type of GEO match expression
+ */
+class TwoDPtInAnnulusExpression : public LeafMatchExpression {
+public:
+ TwoDPtInAnnulusExpression(const R2Annulus& annulus, StringData twoDPath)
+ : LeafMatchExpression(INTERNAL_2D_POINT_IN_ANNULUS), _annulus(annulus) {
+ initPath(twoDPath);
+ }
- private:
+ virtual ~TwoDPtInAnnulusExpression() {}
- const unique_ptr<R2Region> _region;
- const GeoHashConverter _unhasher;
- };
+ virtual void toBSON(BSONObjBuilder* out) const {
+ out->append("TwoDPtInAnnulusExpression", true);
+ }
- // Helper class to maintain ownership of a match expression alongside an index scan
- class IndexScanWithMatch : public IndexScan {
- public:
+ virtual bool matchesSingleElement(const BSONElement& e) const {
+ if (!e.isABSONObj())
+ return false;
- IndexScanWithMatch(OperationContext* txn,
- const IndexScanParams& params,
- WorkingSet* workingSet,
- MatchExpression* filter)
- : IndexScan(txn, params, workingSet, filter), _matcher(filter) {
- }
+ PointWithCRS point;
+ if (!GeoParser::parseStoredPoint(e, &point).isOK())
+ return false;
- virtual ~IndexScanWithMatch() {
- }
-
- private:
+ return _annulus.contains(point.oldPoint);
+ }
- // Owns matcher
- const unique_ptr<MatchExpression> _matcher;
- };
+ //
+ // These won't be called.
+ //
- // Helper class to maintain ownership of a match expression alongside an index scan
- class FetchStageWithMatch : public FetchStage {
- public:
+ virtual void debugString(StringBuilder& debug, int level = 0) const {
+ invariant(false);
+ }
- FetchStageWithMatch(OperationContext* txn,
- WorkingSet* ws,
- PlanStage* child,
- MatchExpression* filter,
- const Collection* collection)
- : FetchStage(txn, ws, child, filter, collection), _matcher(filter) {
- }
+ virtual bool equivalent(const MatchExpression* other) const {
+ invariant(false);
+ return false;
+ }
- virtual ~FetchStageWithMatch() {
- }
+ virtual LeafMatchExpression* shallowClone() const {
+ invariant(false);
+ return NULL;
+ }
- private:
+private:
+ R2Annulus _annulus;
+};
- // Owns matcher
- const unique_ptr<MatchExpression> _matcher;
- };
+/**
+ * Expression which checks whether a 2D key for a point (2D hash) intersects our search
+ * region. The search region may have been formed by more granular hashes.
+ */
+class TwoDKeyInRegionExpression : public LeafMatchExpression {
+public:
+ TwoDKeyInRegionExpression(R2Region* region,
+ const GeoHashConverter::Parameters& hashParams,
+ StringData twoDKeyPath)
+ : LeafMatchExpression(INTERNAL_2D_KEY_IN_REGION), _region(region), _unhasher(hashParams) {
+ initPath(twoDKeyPath);
}
- static double min2DBoundsIncrement(const GeoNearExpression& query, IndexDescriptor* twoDIndex) {
- GeoHashConverter::Parameters hashParams;
- Status status = GeoHashConverter::parseParameters(twoDIndex->infoObj(), &hashParams);
- invariant(status.isOK()); // The index status should always be valid
- GeoHashConverter hasher(hashParams);
+ virtual ~TwoDKeyInRegionExpression() {}
- // The hasher error is the diagonal of a 2D hash region - it's generally not helpful
- // to change region size such that a search radius is smaller than the 2D hash region
- // max radius. This is slightly conservative for now (box diagonal vs circle radius).
- double minBoundsIncrement = hasher.getError() / 2;
+ virtual void toBSON(BSONObjBuilder* out) const {
+ out->append("TwoDKeyInRegionExpression", true);
+ }
- const CRS queryCRS = query.centroid->crs;
- if (FLAT == queryCRS)
- return minBoundsIncrement;
+ virtual bool matchesSingleElement(const BSONElement& e) const {
+ // Something has gone terribly wrong if this doesn't hold.
+ invariant(BinData == e.type());
+ return !_region->fastDisjoint(_unhasher.unhashToBoxCovering(_unhasher.hash(e)));
+ }
- invariant(SPHERE == queryCRS);
+ //
+ // These won't be called.
+ //
- // If this is a spherical query, units are in meters - this is just a heuristic
- return minBoundsIncrement * kMetersPerDegreeAtEquator;
+ virtual void debugString(StringBuilder& debug, int level = 0) const {
+ invariant(false);
}
- static R2Annulus projectBoundsToTwoDDegrees(R2Annulus sphereBounds) {
-
- const double outerDegrees = rad2deg(sphereBounds.getOuter() / kRadiusOfEarthInMeters);
- const double innerDegrees = rad2deg(sphereBounds.getInner() / kRadiusOfEarthInMeters);
- const double maxErrorDegrees = computeXScanDistance(sphereBounds.center().y, outerDegrees);
+ virtual bool equivalent(const MatchExpression* other) const {
+ invariant(false);
+ return true;
+ }
- return R2Annulus(sphereBounds.center(),
- max(0.0, innerDegrees - maxErrorDegrees),
- outerDegrees + maxErrorDegrees);
+ virtual MatchExpression* shallowClone() const {
+ invariant(false);
+ return NULL;
}
- StatusWith<NearStage::CoveredInterval*> //
+private:
+ const unique_ptr<R2Region> _region;
+ const GeoHashConverter _unhasher;
+};
+
+// Helper class to maintain ownership of a match expression alongside an index scan
+class IndexScanWithMatch : public IndexScan {
+public:
+ IndexScanWithMatch(OperationContext* txn,
+ const IndexScanParams& params,
+ WorkingSet* workingSet,
+ MatchExpression* filter)
+ : IndexScan(txn, params, workingSet, filter), _matcher(filter) {}
+
+ virtual ~IndexScanWithMatch() {}
+
+private:
+ // Owns matcher
+ const unique_ptr<MatchExpression> _matcher;
+};
+
+// Helper class to maintain ownership of a match expression alongside an index scan
+class FetchStageWithMatch : public FetchStage {
+public:
+ FetchStageWithMatch(OperationContext* txn,
+ WorkingSet* ws,
+ PlanStage* child,
+ MatchExpression* filter,
+ const Collection* collection)
+ : FetchStage(txn, ws, child, filter, collection), _matcher(filter) {}
+
+ virtual ~FetchStageWithMatch() {}
+
+private:
+ // Owns matcher
+ const unique_ptr<MatchExpression> _matcher;
+};
+}
+
+static double min2DBoundsIncrement(const GeoNearExpression& query, IndexDescriptor* twoDIndex) {
+ GeoHashConverter::Parameters hashParams;
+ Status status = GeoHashConverter::parseParameters(twoDIndex->infoObj(), &hashParams);
+ invariant(status.isOK()); // The index status should always be valid
+ GeoHashConverter hasher(hashParams);
+
+ // The hasher error is the diagonal of a 2D hash region - it's generally not helpful
+ // to change region size such that a search radius is smaller than the 2D hash region
+ // max radius. This is slightly conservative for now (box diagonal vs circle radius).
+ double minBoundsIncrement = hasher.getError() / 2;
+
+ const CRS queryCRS = query.centroid->crs;
+ if (FLAT == queryCRS)
+ return minBoundsIncrement;
+
+ invariant(SPHERE == queryCRS);
+
+ // If this is a spherical query, units are in meters - this is just a heuristic
+ return minBoundsIncrement * kMetersPerDegreeAtEquator;
+}
+
+static R2Annulus projectBoundsToTwoDDegrees(R2Annulus sphereBounds) {
+ const double outerDegrees = rad2deg(sphereBounds.getOuter() / kRadiusOfEarthInMeters);
+ const double innerDegrees = rad2deg(sphereBounds.getInner() / kRadiusOfEarthInMeters);
+ const double maxErrorDegrees = computeXScanDistance(sphereBounds.center().y, outerDegrees);
+
+ return R2Annulus(sphereBounds.center(),
+ max(0.0, innerDegrees - maxErrorDegrees),
+ outerDegrees + maxErrorDegrees);
+}
+
+StatusWith<NearStage::CoveredInterval*> //
GeoNear2DStage::nextInterval(OperationContext* txn,
WorkingSet* workingSet,
Collection* collection) {
+ // The search is finished if we searched at least once and all the way to the edge
+ if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
+ return StatusWith<CoveredInterval*>(NULL);
+ }
- // The search is finished if we searched at least once and all the way to the edge
- if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
- return StatusWith<CoveredInterval*>(NULL);
- }
-
- //
- // Setup the next interval
- //
-
- const NearStats* stats = getNearStats();
-
- if (!stats->intervalStats.empty()) {
-
- const IntervalStats& lastIntervalStats = stats->intervalStats.back();
+ //
+ // Setup the next interval
+ //
- // TODO: Generally we want small numbers of results fast, then larger numbers later
- if (lastIntervalStats.numResultsBuffered < 300)
- _boundsIncrement *= 2;
- else if (lastIntervalStats.numResultsBuffered > 600)
- _boundsIncrement /= 2;
- }
+ const NearStats* stats = getNearStats();
- _boundsIncrement = max(_boundsIncrement,
- min2DBoundsIncrement(*_nearParams.nearQuery, _twoDIndex));
+ if (!stats->intervalStats.empty()) {
+ const IntervalStats& lastIntervalStats = stats->intervalStats.back();
- R2Annulus nextBounds(_currBounds.center(),
- _currBounds.getOuter(),
- min(_currBounds.getOuter() + _boundsIncrement,
- _fullBounds.getOuter()));
+ // TODO: Generally we want small numbers of results fast, then larger numbers later
+ if (lastIntervalStats.numResultsBuffered < 300)
+ _boundsIncrement *= 2;
+ else if (lastIntervalStats.numResultsBuffered > 600)
+ _boundsIncrement /= 2;
+ }
- const bool isLastInterval = (nextBounds.getOuter() == _fullBounds.getOuter());
- _currBounds = nextBounds;
+ _boundsIncrement =
+ max(_boundsIncrement, min2DBoundsIncrement(*_nearParams.nearQuery, _twoDIndex));
- //
- // Get a covering region for this interval
- //
+ R2Annulus nextBounds(_currBounds.center(),
+ _currBounds.getOuter(),
+ min(_currBounds.getOuter() + _boundsIncrement, _fullBounds.getOuter()));
- const CRS queryCRS = _nearParams.nearQuery->centroid->crs;
-
- unique_ptr<R2Region> coverRegion;
-
- if (FLAT == queryCRS) {
-
- // NOTE: Due to floating point math issues, FLAT searches of a 2D index need to treat
- // containment and distance separately.
- // Ex: (distance) 54.001 - 54 > 0.001, but (containment) 54 + 0.001 <= 54.001
- // The idea is that a $near search with bounds is really a $within search, sorted by
- // distance. We attach a custom $within : annulus matcher to do the $within search,
- // and adjust max/min bounds slightly since, as above, containment does not mean the
- // distance calculation won't slightly overflow the boundary.
- //
- // The code below adjusts:
- // 1) Overall min/max bounds of the generated distance intervals to be more inclusive
- // 2) Bounds of the interval covering to be more inclusive
- // ... and later on we add the custom $within : annulus matcher.
- //
- // IMPORTANT: The *internal* interval distance bounds are *exact thresholds* - these
- // should not be adjusted.
- // TODO: Maybe integrate annuluses as a standard shape, and literally transform $near
- // internally into a $within query with $near just as sort.
-
- // Compute the maximum axis-aligned distance error
- const double epsilon = std::numeric_limits<double>::epsilon()
- * (max(abs(_fullBounds.center().x), abs(_fullBounds.center().y))
- + _fullBounds.getOuter());
-
- if (nextBounds.getInner() > 0 && nextBounds.getInner() == _fullBounds.getInner()) {
- nextBounds = R2Annulus(nextBounds.center(),
- max(0.0, nextBounds.getInner() - epsilon),
- nextBounds.getOuter());
- }
+ const bool isLastInterval = (nextBounds.getOuter() == _fullBounds.getOuter());
+ _currBounds = nextBounds;
- if (nextBounds.getOuter() > 0 && nextBounds.getOuter() == _fullBounds.getOuter()) {
- // We're at the max bound of the search, adjust interval maximum
- nextBounds = R2Annulus(nextBounds.center(),
- nextBounds.getInner(),
- nextBounds.getOuter() + epsilon);
- }
+ //
+ // Get a covering region for this interval
+ //
- // *Always* adjust the covering bounds to be more inclusive
- coverRegion.reset(new R2Annulus(nextBounds.center(),
- max(0.0, nextBounds.getInner() - epsilon),
- nextBounds.getOuter() + epsilon));
- }
- else {
- invariant(SPHERE == queryCRS);
- // TODO: As above, make this consistent with $within : $centerSphere
+ const CRS queryCRS = _nearParams.nearQuery->centroid->crs;
- // Our intervals aren't in the same CRS as our index, so we need to adjust them
- coverRegion.reset(new R2Annulus(projectBoundsToTwoDDegrees(nextBounds)));
- }
+ unique_ptr<R2Region> coverRegion;
+ if (FLAT == queryCRS) {
+ // NOTE: Due to floating point math issues, FLAT searches of a 2D index need to treat
+ // containment and distance separately.
+ // Ex: (distance) 54.001 - 54 > 0.001, but (containment) 54 + 0.001 <= 54.001
+ // The idea is that a $near search with bounds is really a $within search, sorted by
+ // distance. We attach a custom $within : annulus matcher to do the $within search,
+ // and adjust max/min bounds slightly since, as above, containment does not mean the
+ // distance calculation won't slightly overflow the boundary.
//
- // Setup the stages for this interval
- //
-
- IndexScanParams scanParams;
- scanParams.descriptor = _twoDIndex;
- scanParams.direction = 1;
- // We use a filter on the key. The filter rejects keys that don't intersect with the
- // annulus. An object that is in the annulus might have a key that's not in it and a key
- // that's in it. As such we can't just look at one key per object.
+ // The code below adjusts:
+ // 1) Overall min/max bounds of the generated distance intervals to be more inclusive
+ // 2) Bounds of the interval covering to be more inclusive
+ // ... and later on we add the custom $within : annulus matcher.
//
- // This does force us to do our own deduping of results, though.
- scanParams.doNotDedup = true;
-
- // Scan bounds on 2D indexes are only over the 2D field - other bounds aren't applicable.
- // This is handled in query planning.
- scanParams.bounds = _nearParams.baseBounds;
-
- // The "2d" field is always the first in the index
- const string twoDFieldName = _nearParams.nearQuery->field;
- const int twoDFieldPosition = 0;
-
- OrderedIntervalList coveredIntervals;
- coveredIntervals.name = scanParams.bounds.fields[twoDFieldPosition].name;
-
- ExpressionMapping::cover2d(*coverRegion,
- _twoDIndex->infoObj(),
- internalGeoNearQuery2DMaxCoveringCells,
- &coveredIntervals);
-
- // Intersect the $near bounds we just generated into the bounds we have for anything else
- // in the scan (i.e. $within)
- IndexBoundsBuilder::intersectize(coveredIntervals,
- &scanParams.bounds.fields[twoDFieldPosition]);
-
- // These parameters are stored by the index, and so must be ok
- GeoHashConverter::Parameters hashParams;
- GeoHashConverter::parseParameters(_twoDIndex->infoObj(), &hashParams);
-
- MatchExpression* keyMatcher =
- new TwoDKeyInRegionExpression(coverRegion.release(),
- hashParams,
- twoDFieldName);
-
- // 2D indexes support covered search over additional fields they contain
- // TODO: Don't need to clone, can just attach to custom matcher above
- if (_nearParams.filter) {
- AndMatchExpression* andMatcher = new AndMatchExpression();
- andMatcher->add(keyMatcher);
- andMatcher->add(_nearParams.filter->shallowClone());
- keyMatcher = andMatcher;
+ // IMPORTANT: The *internal* interval distance bounds are *exact thresholds* - these
+ // should not be adjusted.
+ // TODO: Maybe integrate annuluses as a standard shape, and literally transform $near
+ // internally into a $within query with $near just as sort.
+
+ // Compute the maximum axis-aligned distance error
+ const double epsilon = std::numeric_limits<double>::epsilon() *
+ (max(abs(_fullBounds.center().x), abs(_fullBounds.center().y)) +
+ _fullBounds.getOuter());
+
+ if (nextBounds.getInner() > 0 && nextBounds.getInner() == _fullBounds.getInner()) {
+ nextBounds = R2Annulus(nextBounds.center(),
+ max(0.0, nextBounds.getInner() - epsilon),
+ nextBounds.getOuter());
}
- // IndexScanWithMatch owns the matcher
- IndexScan* scan = new IndexScanWithMatch(txn, scanParams, workingSet, keyMatcher);
-
- MatchExpression* docMatcher = NULL;
-
- // FLAT searches need to add an additional annulus $within matcher, see above
- if (FLAT == queryCRS) {
- docMatcher = new TwoDPtInAnnulusExpression(_fullBounds, twoDFieldName);
+ if (nextBounds.getOuter() > 0 && nextBounds.getOuter() == _fullBounds.getOuter()) {
+ // We're at the max bound of the search, adjust interval maximum
+ nextBounds = R2Annulus(
+ nextBounds.center(), nextBounds.getInner(), nextBounds.getOuter() + epsilon);
}
- // FetchStage owns index scan
- FetchStage* fetcher(new FetchStageWithMatch(txn,
- workingSet,
- scan,
- docMatcher,
- collection));
-
- return StatusWith<CoveredInterval*>(new CoveredInterval(fetcher,
- true,
- nextBounds.getInner(),
- nextBounds.getOuter(),
- isLastInterval));
- }
+ // *Always* adjust the covering bounds to be more inclusive
+ coverRegion.reset(new R2Annulus(nextBounds.center(),
+ max(0.0, nextBounds.getInner() - epsilon),
+ nextBounds.getOuter() + epsilon));
+ } else {
+ invariant(SPHERE == queryCRS);
+ // TODO: As above, make this consistent with $within : $centerSphere
- StatusWith<double> GeoNear2DStage::computeDistance(WorkingSetMember* member) {
- return computeGeoNearDistance(_nearParams, member);
+ // Our intervals aren't in the same CRS as our index, so we need to adjust them
+ coverRegion.reset(new R2Annulus(projectBoundsToTwoDDegrees(nextBounds)));
}
//
- // GeoNear2DSphereStage
+ // Setup the stages for this interval
//
- static int getFieldPosition(const IndexDescriptor* index, const string& fieldName) {
-
- int fieldPosition = 0;
-
- BSONObjIterator specIt(index->keyPattern());
- while (specIt.more()) {
- if (specIt.next().fieldName() == fieldName) {
- break;
- }
- ++fieldPosition;
- }
-
- if (fieldPosition == index->keyPattern().nFields())
- return -1;
-
- return fieldPosition;
+ IndexScanParams scanParams;
+ scanParams.descriptor = _twoDIndex;
+ scanParams.direction = 1;
+ // We use a filter on the key. The filter rejects keys that don't intersect with the
+ // annulus. An object that is in the annulus might have a key that's not in it and a key
+ // that's in it. As such we can't just look at one key per object.
+ //
+ // This does force us to do our own deduping of results, though.
+ scanParams.doNotDedup = true;
+
+ // Scan bounds on 2D indexes are only over the 2D field - other bounds aren't applicable.
+ // This is handled in query planning.
+ scanParams.bounds = _nearParams.baseBounds;
+
+ // The "2d" field is always the first in the index
+ const string twoDFieldName = _nearParams.nearQuery->field;
+ const int twoDFieldPosition = 0;
+
+ OrderedIntervalList coveredIntervals;
+ coveredIntervals.name = scanParams.bounds.fields[twoDFieldPosition].name;
+
+ ExpressionMapping::cover2d(*coverRegion,
+ _twoDIndex->infoObj(),
+ internalGeoNearQuery2DMaxCoveringCells,
+ &coveredIntervals);
+
+ // Intersect the $near bounds we just generated into the bounds we have for anything else
+ // in the scan (i.e. $within)
+ IndexBoundsBuilder::intersectize(coveredIntervals,
+ &scanParams.bounds.fields[twoDFieldPosition]);
+
+ // These parameters are stored by the index, and so must be ok
+ GeoHashConverter::Parameters hashParams;
+ GeoHashConverter::parseParameters(_twoDIndex->infoObj(), &hashParams);
+
+ MatchExpression* keyMatcher =
+ new TwoDKeyInRegionExpression(coverRegion.release(), hashParams, twoDFieldName);
+
+ // 2D indexes support covered search over additional fields they contain
+ // TODO: Don't need to clone, can just attach to custom matcher above
+ if (_nearParams.filter) {
+ AndMatchExpression* andMatcher = new AndMatchExpression();
+ andMatcher->add(keyMatcher);
+ andMatcher->add(_nearParams.filter->shallowClone());
+ keyMatcher = andMatcher;
}
- static const string kS2IndexNearStage("GEO_NEAR_2DSPHERE");
-
- GeoNear2DSphereStage::GeoNear2DSphereStage(const GeoNearParams& nearParams,
- OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- IndexDescriptor* s2Index)
- : NearStage(txn,
- workingSet,
- collection,
- new PlanStageStats(CommonStats(kS2IndexNearStage.c_str()),
- STAGE_GEO_NEAR_2DSPHERE)),
- _nearParams(nearParams),
- _s2Index(s2Index),
- _fullBounds(geoNearDistanceBounds(*nearParams.nearQuery)),
- _currBounds(_fullBounds.center(), -1, _fullBounds.getInner()),
- _boundsIncrement(0.0) {
-
- getNearStats()->keyPattern = s2Index->keyPattern();
- getNearStats()->indexName = s2Index->indexName();
- }
+ // IndexScanWithMatch owns the matcher
+ IndexScan* scan = new IndexScanWithMatch(txn, scanParams, workingSet, keyMatcher);
- GeoNear2DSphereStage::~GeoNear2DSphereStage() {
+ MatchExpression* docMatcher = NULL;
+
+ // FLAT searches need to add an additional annulus $within matcher, see above
+ if (FLAT == queryCRS) {
+ docMatcher = new TwoDPtInAnnulusExpression(_fullBounds, twoDFieldName);
}
- namespace {
-
- S2Region* buildS2Region(const R2Annulus& sphereBounds) {
- // Internal bounds come in SPHERE CRS units
- // i.e. center is lon/lat, inner/outer are in meters
- S2LatLng latLng = S2LatLng::FromDegrees(sphereBounds.center().y,
- sphereBounds.center().x);
-
- vector<S2Region*> regions;
-
- S2Cap innerCap = S2Cap::FromAxisAngle(latLng.ToPoint(),
- S1Angle::Radians(sphereBounds.getInner()
- / kRadiusOfEarthInMeters));
- innerCap = innerCap.Complement();
- regions.push_back(new S2Cap(innerCap));
-
- // We only need to max bound if this is not a full search of the Earth
- // Using the constant here is important since we use the min of kMaxEarthDistance
- // and the actual bounds passed in to set up the search area.
- if (sphereBounds.getOuter() < kMaxEarthDistanceInMeters) {
- S2Cap outerCap = S2Cap::FromAxisAngle(latLng.ToPoint(),
- S1Angle::Radians(sphereBounds.getOuter()
- / kRadiusOfEarthInMeters));
- regions.push_back(new S2Cap(outerCap));
- }
+ // FetchStage owns index scan
+ FetchStage* fetcher(new FetchStageWithMatch(txn, workingSet, scan, docMatcher, collection));
- // Takes ownership of caps
- return new S2RegionIntersection(&regions);
- }
+ return StatusWith<CoveredInterval*>(new CoveredInterval(
+ fetcher, true, nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
+}
- /**
- * Expression which checks whether a 2DSphere key for a point (S2 hash) intersects our
- * search region. The search region may have been formed by more granular hashes.
- */
- class TwoDSphereKeyInRegionExpression : public LeafMatchExpression {
- public:
+StatusWith<double> GeoNear2DStage::computeDistance(WorkingSetMember* member) {
+ return computeGeoNearDistance(_nearParams, member);
+}
- TwoDSphereKeyInRegionExpression(const R2Annulus& bounds, StringData twoDSpherePath)
- : LeafMatchExpression(INTERNAL_2DSPHERE_KEY_IN_REGION),
- _region(buildS2Region(bounds)) {
+//
+// GeoNear2DSphereStage
+//
- initPath(twoDSpherePath);
- }
+static int getFieldPosition(const IndexDescriptor* index, const string& fieldName) {
+ int fieldPosition = 0;
- virtual ~TwoDSphereKeyInRegionExpression() {
- }
+ BSONObjIterator specIt(index->keyPattern());
+ while (specIt.more()) {
+ if (specIt.next().fieldName() == fieldName) {
+ break;
+ }
+ ++fieldPosition;
+ }
- virtual void toBSON(BSONObjBuilder* out) const {
- out->append("TwoDSphereKeyInRegionExpression", true);
- }
+ if (fieldPosition == index->keyPattern().nFields())
+ return -1;
+
+ return fieldPosition;
+}
+
+static const string kS2IndexNearStage("GEO_NEAR_2DSPHERE");
+
+GeoNear2DSphereStage::GeoNear2DSphereStage(const GeoNearParams& nearParams,
+ OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ IndexDescriptor* s2Index)
+ : NearStage(
+ txn,
+ workingSet,
+ collection,
+ new PlanStageStats(CommonStats(kS2IndexNearStage.c_str()), STAGE_GEO_NEAR_2DSPHERE)),
+ _nearParams(nearParams),
+ _s2Index(s2Index),
+ _fullBounds(geoNearDistanceBounds(*nearParams.nearQuery)),
+ _currBounds(_fullBounds.center(), -1, _fullBounds.getInner()),
+ _boundsIncrement(0.0) {
+ getNearStats()->keyPattern = s2Index->keyPattern();
+ getNearStats()->indexName = s2Index->indexName();
+}
+
+GeoNear2DSphereStage::~GeoNear2DSphereStage() {}
+
+namespace {
+
+S2Region* buildS2Region(const R2Annulus& sphereBounds) {
+ // Internal bounds come in SPHERE CRS units
+ // i.e. center is lon/lat, inner/outer are in meters
+ S2LatLng latLng = S2LatLng::FromDegrees(sphereBounds.center().y, sphereBounds.center().x);
+
+ vector<S2Region*> regions;
+
+ S2Cap innerCap = S2Cap::FromAxisAngle(
+ latLng.ToPoint(), S1Angle::Radians(sphereBounds.getInner() / kRadiusOfEarthInMeters));
+ innerCap = innerCap.Complement();
+ regions.push_back(new S2Cap(innerCap));
+
+ // We only need to max bound if this is not a full search of the Earth
+ // Using the constant here is important since we use the min of kMaxEarthDistance
+ // and the actual bounds passed in to set up the search area.
+ if (sphereBounds.getOuter() < kMaxEarthDistanceInMeters) {
+ S2Cap outerCap = S2Cap::FromAxisAngle(
+ latLng.ToPoint(), S1Angle::Radians(sphereBounds.getOuter() / kRadiusOfEarthInMeters));
+ regions.push_back(new S2Cap(outerCap));
+ }
- virtual bool matchesSingleElement(const BSONElement& e) const {
- // Something has gone terribly wrong if this doesn't hold.
- invariant(String == e.type());
- S2Cell keyCell = S2Cell(S2CellId::FromString(e.str()));
- return _region->MayIntersect(keyCell);
- }
+ // Takes ownership of caps
+ return new S2RegionIntersection(&regions);
+}
- const S2Region& getRegion() {
- return *_region;
- }
+/**
+ * Expression which checks whether a 2DSphere key for a point (S2 hash) intersects our
+ * search region. The search region may have been formed by more granular hashes.
+ */
+class TwoDSphereKeyInRegionExpression : public LeafMatchExpression {
+public:
+ TwoDSphereKeyInRegionExpression(const R2Annulus& bounds, StringData twoDSpherePath)
+ : LeafMatchExpression(INTERNAL_2DSPHERE_KEY_IN_REGION), _region(buildS2Region(bounds)) {
+ initPath(twoDSpherePath);
+ }
- //
- // These won't be called.
- //
+ virtual ~TwoDSphereKeyInRegionExpression() {}
- virtual void debugString(StringBuilder& debug, int level = 0) const {
- invariant(false);
- }
+ virtual void toBSON(BSONObjBuilder* out) const {
+ out->append("TwoDSphereKeyInRegionExpression", true);
+ }
- virtual bool equivalent(const MatchExpression* other) const {
- invariant(false);
- return true;
- }
+ virtual bool matchesSingleElement(const BSONElement& e) const {
+ // Something has gone terribly wrong if this doesn't hold.
+ invariant(String == e.type());
+ S2Cell keyCell = S2Cell(S2CellId::FromString(e.str()));
+ return _region->MayIntersect(keyCell);
+ }
- virtual MatchExpression* shallowClone() const {
- invariant(false);
- return NULL;
- }
+ const S2Region& getRegion() {
+ return *_region;
+ }
- private:
+ //
+ // These won't be called.
+ //
- const unique_ptr<S2Region> _region;
- };
+ virtual void debugString(StringBuilder& debug, int level = 0) const {
+ invariant(false);
}
- // Estimate the density of data by search the nearest cells level by level around center.
- class GeoNear2DSphereStage::DensityEstimator {
- public:
- DensityEstimator(const IndexDescriptor* s2Index, const GeoNearParams* nearParams) :
- _s2Index(s2Index), _nearParams(nearParams), _currentLevel(0)
- {
- S2IndexingParams params;
- ExpressionParams::parse2dsphereParams(_s2Index->infoObj(), &params);
- // Since cellId.AppendVertexNeighbors(level, output) requires level < cellId.level(),
- // we have to start to find documents at most S2::kMaxCellLevel - 1. Thus the finest
- // search area is 16 * finest cell area at S2::kMaxCellLevel, which is less than
- // (1.4 inch X 1.4 inch) on the earth.
- _currentLevel = std::max(0, params.finestIndexedLevel - 1);
- }
+ virtual bool equivalent(const MatchExpression* other) const {
+ invariant(false);
+ return true;
+ }
- // Search for a document in neighbors at current level.
- // Return IS_EOF is such document exists and set the estimated distance to the nearest doc.
- PlanStage::StageState work(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- WorkingSetID* out,
- double* estimatedDistance);
-
- void saveState();
- void restoreState(OperationContext* txn);
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- private:
- void buildIndexScan(OperationContext* txn, WorkingSet* workingSet, Collection* collection);
-
- const IndexDescriptor* _s2Index; // Not owned here.
- const GeoNearParams* _nearParams; // Not owned here.
- int _currentLevel;
- unique_ptr<IndexScan> _indexScan;
- };
-
- // Setup the index scan stage for neighbors at this level.
- void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection)
- {
- IndexScanParams scanParams;
- scanParams.descriptor = _s2Index;
- scanParams.direction = 1;
- scanParams.doNotDedup = true;
- scanParams.bounds = _nearParams->baseBounds;
-
- // Because the planner doesn't yet set up 2D index bounds, do it ourselves here
- const string s2Field = _nearParams->nearQuery->field;
- const int s2FieldPosition = getFieldPosition(_s2Index, s2Field);
- fassert(28677, s2FieldPosition >= 0);
- OrderedIntervalList* coveredIntervals = &scanParams.bounds.fields[s2FieldPosition];
- coveredIntervals->intervals.clear();
-
- // Find 4 neighbors (3 neighbors at face vertex) at current level.
- const S2CellId& centerId = _nearParams->nearQuery->centroid->cell.id();
- vector<S2CellId> neighbors;
-
- // The search area expands 4X each time.
- // Return the neighbors of closest vertex to this cell at the given level.
- invariant(_currentLevel < centerId.level());
- centerId.AppendVertexNeighbors(_currentLevel, &neighbors);
-
- // Convert S2CellId to string and sort
- vector<string> neighborKeys;
- for (vector<S2CellId>::const_iterator it = neighbors.begin(); it != neighbors.end(); it++) {
- neighborKeys.push_back(it->toString());
- }
- std::sort(neighborKeys.begin(), neighborKeys.end());
-
- for (vector<string>::const_iterator it = neighborKeys.begin(); it != neighborKeys.end();
- it++)
- {
- // construct interval [*it, end) for this cell.
- std::string end = *it;
- end[end.size() - 1]++;
- coveredIntervals->intervals.push_back(
- IndexBoundsBuilder::makeRangeInterval(*it, end, true, false));
- }
+ virtual MatchExpression* shallowClone() const {
+ invariant(false);
+ return NULL;
+ }
- invariant(coveredIntervals->isValidFor(1));
+private:
+ const unique_ptr<S2Region> _region;
+};
+}
+
+// Estimate the density of data by search the nearest cells level by level around center.
+class GeoNear2DSphereStage::DensityEstimator {
+public:
+ DensityEstimator(const IndexDescriptor* s2Index, const GeoNearParams* nearParams)
+ : _s2Index(s2Index), _nearParams(nearParams), _currentLevel(0) {
+ S2IndexingParams params;
+ ExpressionParams::parse2dsphereParams(_s2Index->infoObj(), &params);
+ // Since cellId.AppendVertexNeighbors(level, output) requires level < cellId.level(),
+ // we have to start to find documents at most S2::kMaxCellLevel - 1. Thus the finest
+ // search area is 16 * finest cell area at S2::kMaxCellLevel, which is less than
+ // (1.4 inch X 1.4 inch) on the earth.
+ _currentLevel = std::max(0, params.finestIndexedLevel - 1);
+ }
- // Index scan
- _indexScan.reset(new IndexScan(txn, scanParams, workingSet, NULL));
+ // Search for a document in neighbors at current level.
+ // Return IS_EOF is such document exists and set the estimated distance to the nearest doc.
+ PlanStage::StageState work(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ WorkingSetID* out,
+ double* estimatedDistance);
+
+ void saveState();
+ void restoreState(OperationContext* txn);
+ void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+
+private:
+ void buildIndexScan(OperationContext* txn, WorkingSet* workingSet, Collection* collection);
+
+ const IndexDescriptor* _s2Index; // Not owned here.
+ const GeoNearParams* _nearParams; // Not owned here.
+ int _currentLevel;
+ unique_ptr<IndexScan> _indexScan;
+};
+
+// Setup the index scan stage for neighbors at this level.
+void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection) {
+ IndexScanParams scanParams;
+ scanParams.descriptor = _s2Index;
+ scanParams.direction = 1;
+ scanParams.doNotDedup = true;
+ scanParams.bounds = _nearParams->baseBounds;
+
+ // Because the planner doesn't yet set up 2D index bounds, do it ourselves here
+ const string s2Field = _nearParams->nearQuery->field;
+ const int s2FieldPosition = getFieldPosition(_s2Index, s2Field);
+ fassert(28677, s2FieldPosition >= 0);
+ OrderedIntervalList* coveredIntervals = &scanParams.bounds.fields[s2FieldPosition];
+ coveredIntervals->intervals.clear();
+
+ // Find 4 neighbors (3 neighbors at face vertex) at current level.
+ const S2CellId& centerId = _nearParams->nearQuery->centroid->cell.id();
+ vector<S2CellId> neighbors;
+
+ // The search area expands 4X each time.
+ // Return the neighbors of closest vertex to this cell at the given level.
+ invariant(_currentLevel < centerId.level());
+ centerId.AppendVertexNeighbors(_currentLevel, &neighbors);
+
+ // Convert S2CellId to string and sort
+ vector<string> neighborKeys;
+ for (vector<S2CellId>::const_iterator it = neighbors.begin(); it != neighbors.end(); it++) {
+ neighborKeys.push_back(it->toString());
+ }
+ std::sort(neighborKeys.begin(), neighborKeys.end());
+
+ for (vector<string>::const_iterator it = neighborKeys.begin(); it != neighborKeys.end(); it++) {
+ // construct interval [*it, end) for this cell.
+ std::string end = *it;
+ end[end.size() - 1]++;
+ coveredIntervals->intervals.push_back(
+ IndexBoundsBuilder::makeRangeInterval(*it, end, true, false));
}
- PlanStage::StageState GeoNear2DSphereStage::DensityEstimator::work(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- WorkingSetID* out,
- double* estimatedDistance)
- {
- if (!_indexScan) {
- // Setup index scan stage for current level.
- buildIndexScan(txn, workingSet, collection);
- }
+ invariant(coveredIntervals->isValidFor(1));
- WorkingSetID workingSetID;
- PlanStage::StageState state = _indexScan->work(&workingSetID);
-
- if (state == PlanStage::IS_EOF) {
- // We ran through the neighbors but found nothing.
- if (_currentLevel > 0) {
- // Advance to the next level and search again.
- _currentLevel--;
- // Reset index scan for the next level.
- _indexScan.reset(NULL);
- return PlanStage::NEED_TIME;
- }
+ // Index scan
+ _indexScan.reset(new IndexScan(txn, scanParams, workingSet, NULL));
+}
+
+PlanStage::StageState GeoNear2DSphereStage::DensityEstimator::work(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ WorkingSetID* out,
+ double* estimatedDistance) {
+ if (!_indexScan) {
+ // Setup index scan stage for current level.
+ buildIndexScan(txn, workingSet, collection);
+ }
- // We are already at the top level.
- *estimatedDistance = S2::kAvgEdge.GetValue(_currentLevel) * kRadiusOfEarthInMeters;
- return PlanStage::IS_EOF;
- } else if (state == PlanStage::ADVANCED) {
- // We found something!
- *estimatedDistance = S2::kAvgEdge.GetValue(_currentLevel) * kRadiusOfEarthInMeters;
- // Clean up working set.
- workingSet->free(workingSetID);
- return PlanStage::IS_EOF;
- } else if (state == PlanStage::NEED_YIELD) {
- *out = workingSetID;
+ WorkingSetID workingSetID;
+ PlanStage::StageState state = _indexScan->work(&workingSetID);
+
+ if (state == PlanStage::IS_EOF) {
+ // We ran through the neighbors but found nothing.
+ if (_currentLevel > 0) {
+ // Advance to the next level and search again.
+ _currentLevel--;
+ // Reset index scan for the next level.
+ _indexScan.reset(NULL);
+ return PlanStage::NEED_TIME;
}
- // Propagate NEED_TIME or errors
- return state;
+ // We are already at the top level.
+ *estimatedDistance = S2::kAvgEdge.GetValue(_currentLevel) * kRadiusOfEarthInMeters;
+ return PlanStage::IS_EOF;
+ } else if (state == PlanStage::ADVANCED) {
+ // We found something!
+ *estimatedDistance = S2::kAvgEdge.GetValue(_currentLevel) * kRadiusOfEarthInMeters;
+ // Clean up working set.
+ workingSet->free(workingSetID);
+ return PlanStage::IS_EOF;
+ } else if (state == PlanStage::NEED_YIELD) {
+ *out = workingSetID;
}
- void GeoNear2DSphereStage::DensityEstimator::saveState() {
- if (_indexScan) {
- _indexScan->saveState();
- }
+ // Propagate NEED_TIME or errors
+ return state;
+}
+
+void GeoNear2DSphereStage::DensityEstimator::saveState() {
+ if (_indexScan) {
+ _indexScan->saveState();
}
+}
- void GeoNear2DSphereStage::DensityEstimator::restoreState(OperationContext* txn) {
- if (_indexScan) {
- _indexScan->restoreState(txn);
- }
+void GeoNear2DSphereStage::DensityEstimator::restoreState(OperationContext* txn) {
+ if (_indexScan) {
+ _indexScan->restoreState(txn);
}
+}
- void GeoNear2DSphereStage::DensityEstimator::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- if (_indexScan) {
- _indexScan->invalidate(txn, dl, type);
- }
+void GeoNear2DSphereStage::DensityEstimator::invalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
+ if (_indexScan) {
+ _indexScan->invalidate(txn, dl, type);
}
+}
- PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- WorkingSetID* out)
- {
- if (!_densityEstimator) {
- _densityEstimator.reset(new DensityEstimator(_s2Index, &_nearParams));
- }
+PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ WorkingSetID* out) {
+ if (!_densityEstimator) {
+ _densityEstimator.reset(new DensityEstimator(_s2Index, &_nearParams));
+ }
- double estimatedDistance;
- PlanStage::StageState state = _densityEstimator->work(txn, workingSet, collection, out,
- &estimatedDistance);
-
- if (state == IS_EOF) {
- // We find a document in 4 neighbors at current level, but didn't at previous level.
- //
- // Assuming cell size at current level is d and data is even distributed, the distance
- // between two nearest points are at least d. The following circle with radius of 3 * d
- // covers PI * 9 * d^2, giving at most 30 documents.
- //
- // At the coarsest level, the search area is the whole earth.
- _boundsIncrement = 3 * estimatedDistance;
- invariant(_boundsIncrement > 0.0);
+ double estimatedDistance;
+ PlanStage::StageState state =
+ _densityEstimator->work(txn, workingSet, collection, out, &estimatedDistance);
- // Clean up
- _densityEstimator.reset(NULL);
- }
+ if (state == IS_EOF) {
+ // We find a document in 4 neighbors at current level, but didn't at previous level.
+ //
+ // Assuming cell size at current level is d and data is even distributed, the distance
+ // between two nearest points are at least d. The following circle with radius of 3 * d
+ // covers PI * 9 * d^2, giving at most 30 documents.
+ //
+ // At the coarsest level, the search area is the whole earth.
+ _boundsIncrement = 3 * estimatedDistance;
+ invariant(_boundsIncrement > 0.0);
- return state;
+ // Clean up
+ _densityEstimator.reset(NULL);
}
- void GeoNear2DSphereStage::finishSaveState() {
- if (_densityEstimator) {
- _densityEstimator->saveState();
- }
+ return state;
+}
+
+void GeoNear2DSphereStage::finishSaveState() {
+ if (_densityEstimator) {
+ _densityEstimator->saveState();
}
+}
- void GeoNear2DSphereStage::finishRestoreState(OperationContext* txn) {
- if (_densityEstimator) {
- _densityEstimator->restoreState(txn);
- }
+void GeoNear2DSphereStage::finishRestoreState(OperationContext* txn) {
+ if (_densityEstimator) {
+ _densityEstimator->restoreState(txn);
}
+}
- void GeoNear2DSphereStage::finishInvalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- if (_densityEstimator) {
- _densityEstimator->invalidate(txn, dl, type);
- }
+void GeoNear2DSphereStage::finishInvalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
+ if (_densityEstimator) {
+ _densityEstimator->invalidate(txn, dl, type);
}
+}
- StatusWith<NearStage::CoveredInterval*> //
+StatusWith<NearStage::CoveredInterval*> //
GeoNear2DSphereStage::nextInterval(OperationContext* txn,
WorkingSet* workingSet,
Collection* collection) {
+ // The search is finished if we searched at least once and all the way to the edge
+ if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
+ return StatusWith<CoveredInterval*>(NULL);
+ }
- // The search is finished if we searched at least once and all the way to the edge
- if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) {
- return StatusWith<CoveredInterval*>(NULL);
- }
+ //
+ // Setup the next interval
+ //
- //
- // Setup the next interval
- //
+ const NearStats* stats = getNearStats();
- const NearStats* stats = getNearStats();
+ if (!stats->intervalStats.empty()) {
+ const IntervalStats& lastIntervalStats = stats->intervalStats.back();
- if (!stats->intervalStats.empty()) {
+ // TODO: Generally we want small numbers of results fast, then larger numbers later
+ if (lastIntervalStats.numResultsBuffered < 300)
+ _boundsIncrement *= 2;
+ else if (lastIntervalStats.numResultsBuffered > 600)
+ _boundsIncrement /= 2;
+ }
- const IntervalStats& lastIntervalStats = stats->intervalStats.back();
+ invariant(_boundsIncrement > 0.0);
- // TODO: Generally we want small numbers of results fast, then larger numbers later
- if (lastIntervalStats.numResultsBuffered < 300)
- _boundsIncrement *= 2;
- else if (lastIntervalStats.numResultsBuffered > 600)
- _boundsIncrement /= 2;
- }
+ R2Annulus nextBounds(_currBounds.center(),
+ _currBounds.getOuter(),
+ min(_currBounds.getOuter() + _boundsIncrement, _fullBounds.getOuter()));
- invariant(_boundsIncrement > 0.0);
+ bool isLastInterval = (nextBounds.getOuter() == _fullBounds.getOuter());
+ _currBounds = nextBounds;
- R2Annulus nextBounds(_currBounds.center(),
- _currBounds.getOuter(),
- min(_currBounds.getOuter() + _boundsIncrement,
- _fullBounds.getOuter()));
+ //
+ // Setup the covering region and stages for this interval
+ //
- bool isLastInterval = (nextBounds.getOuter() == _fullBounds.getOuter());
- _currBounds = nextBounds;
+ IndexScanParams scanParams;
+ scanParams.descriptor = _s2Index;
+ scanParams.direction = 1;
+ // We use a filter on the key. The filter rejects keys that don't intersect with the
+ // annulus. An object that is in the annulus might have a key that's not in it and a key
+ // that's in it. As such we can't just look at one key per object.
+ //
+ // This does force us to do our own deduping of results, though.
+ scanParams.doNotDedup = true;
+ scanParams.bounds = _nearParams.baseBounds;
- //
- // Setup the covering region and stages for this interval
- //
+ // Because the planner doesn't yet set up 2D index bounds, do it ourselves here
+ const string s2Field = _nearParams.nearQuery->field;
+ const int s2FieldPosition = getFieldPosition(_s2Index, s2Field);
+ fassert(28678, s2FieldPosition >= 0);
+ scanParams.bounds.fields[s2FieldPosition].intervals.clear();
+ OrderedIntervalList* coveredIntervals = &scanParams.bounds.fields[s2FieldPosition];
- IndexScanParams scanParams;
- scanParams.descriptor = _s2Index;
- scanParams.direction = 1;
- // We use a filter on the key. The filter rejects keys that don't intersect with the
- // annulus. An object that is in the annulus might have a key that's not in it and a key
- // that's in it. As such we can't just look at one key per object.
- //
- // This does force us to do our own deduping of results, though.
- scanParams.doNotDedup = true;
- scanParams.bounds = _nearParams.baseBounds;
-
- // Because the planner doesn't yet set up 2D index bounds, do it ourselves here
- const string s2Field = _nearParams.nearQuery->field;
- const int s2FieldPosition = getFieldPosition(_s2Index, s2Field);
- fassert(28678, s2FieldPosition >= 0);
- scanParams.bounds.fields[s2FieldPosition].intervals.clear();
- OrderedIntervalList* coveredIntervals = &scanParams.bounds.fields[s2FieldPosition];
-
- TwoDSphereKeyInRegionExpression* keyMatcher =
- new TwoDSphereKeyInRegionExpression(_currBounds, s2Field);
-
- ExpressionMapping::cover2dsphere(keyMatcher->getRegion(),
- _s2Index->infoObj(),
- coveredIntervals);
-
- // IndexScan owns the hash matcher
- IndexScan* scan = new IndexScanWithMatch(txn, scanParams, workingSet, keyMatcher);
-
- // FetchStage owns index scan
- FetchStage* fetcher(new FetchStage(txn, workingSet, scan, _nearParams.filter, collection));
-
- return StatusWith<CoveredInterval*>(new CoveredInterval(fetcher,
- true,
- nextBounds.getInner(),
- nextBounds.getOuter(),
- isLastInterval));
- }
+ TwoDSphereKeyInRegionExpression* keyMatcher =
+ new TwoDSphereKeyInRegionExpression(_currBounds, s2Field);
- StatusWith<double> GeoNear2DSphereStage::computeDistance(WorkingSetMember* member) {
- return computeGeoNearDistance(_nearParams, member);
- }
+ ExpressionMapping::cover2dsphere(
+ keyMatcher->getRegion(), _s2Index->infoObj(), coveredIntervals);
+
+ // IndexScan owns the hash matcher
+ IndexScan* scan = new IndexScanWithMatch(txn, scanParams, workingSet, keyMatcher);
+
+ // FetchStage owns index scan
+ FetchStage* fetcher(new FetchStage(txn, workingSet, scan, _nearParams.filter, collection));
+
+ return StatusWith<CoveredInterval*>(new CoveredInterval(
+ fetcher, true, nextBounds.getInner(), nextBounds.getOuter(), isLastInterval));
+}
-} // namespace mongo
+StatusWith<double> GeoNear2DSphereStage::computeDistance(WorkingSetMember* member) {
+ return computeGeoNearDistance(_nearParams, member);
+}
+} // namespace mongo
diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h
index 52c28ed8303..f9295217992 100644
--- a/src/mongo/db/exec/geo_near.h
+++ b/src/mongo/db/exec/geo_near.h
@@ -40,137 +40,124 @@
namespace mongo {
- /**
- * Generic parameters for a GeoNear search
- */
- struct GeoNearParams {
-
- GeoNearParams() :
- filter(NULL), nearQuery(NULL), addPointMeta(false), addDistMeta(false) {
- }
-
- // MatchExpression to apply to the index keys and fetched documents
- // Not owned here, owned by solution nodes
- MatchExpression* filter;
- // Index scan bounds, not including the geo bounds
- IndexBounds baseBounds;
-
- // Not owned here
- const GeoNearExpression* nearQuery;
- bool addPointMeta;
- bool addDistMeta;
- };
-
- /**
- * Implementation of GeoNear on top of a 2D index
- */
- class GeoNear2DStage : public NearStage {
- public:
-
- GeoNear2DStage(const GeoNearParams& nearParams,
- OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- IndexDescriptor* twoDIndex);
-
- virtual ~GeoNear2DStage();
-
- protected:
+/**
+ * Generic parameters for a GeoNear search
+ */
+struct GeoNearParams {
+ GeoNearParams() : filter(NULL), nearQuery(NULL), addPointMeta(false), addDistMeta(false) {}
- virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection);
+ // MatchExpression to apply to the index keys and fetched documents
+ // Not owned here, owned by solution nodes
+ MatchExpression* filter;
+ // Index scan bounds, not including the geo bounds
+ IndexBounds baseBounds;
- virtual StatusWith<double> computeDistance(WorkingSetMember* member);
+ // Not owned here
+ const GeoNearExpression* nearQuery;
+ bool addPointMeta;
+ bool addDistMeta;
+};
- virtual PlanStage::StageState initialize(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- WorkingSetID* out);
+/**
+ * Implementation of GeoNear on top of a 2D index
+ */
+class GeoNear2DStage : public NearStage {
+public:
+ GeoNear2DStage(const GeoNearParams& nearParams,
+ OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ IndexDescriptor* twoDIndex);
- private:
+ virtual ~GeoNear2DStage();
- virtual void finishSaveState();
+protected:
+ virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection);
- virtual void finishRestoreState(OperationContext* txn);
+ virtual StatusWith<double> computeDistance(WorkingSetMember* member);
- virtual void finishInvalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type);
+ virtual PlanStage::StageState initialize(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ WorkingSetID* out);
- const GeoNearParams _nearParams;
+private:
+ virtual void finishSaveState();
- // The 2D index we're searching over
- // Not owned here
- IndexDescriptor* const _twoDIndex;
+ virtual void finishRestoreState(OperationContext* txn);
- // The total search annulus
- const R2Annulus _fullBounds;
+ virtual void finishInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- // The current search annulus
- R2Annulus _currBounds;
+ const GeoNearParams _nearParams;
- // Amount to increment the next bounds by
- double _boundsIncrement;
+ // The 2D index we're searching over
+ // Not owned here
+ IndexDescriptor* const _twoDIndex;
- class DensityEstimator;
- std::unique_ptr<DensityEstimator> _densityEstimator;
- };
+ // The total search annulus
+ const R2Annulus _fullBounds;
- /**
- * Implementation of GeoNear on top of a 2DSphere (S2) index
- */
- class GeoNear2DSphereStage : public NearStage {
- public:
+ // The current search annulus
+ R2Annulus _currBounds;
- GeoNear2DSphereStage(const GeoNearParams& nearParams,
- OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- IndexDescriptor* s2Index);
+ // Amount to increment the next bounds by
+ double _boundsIncrement;
- virtual ~GeoNear2DSphereStage();
+ class DensityEstimator;
+ std::unique_ptr<DensityEstimator> _densityEstimator;
+};
- protected:
+/**
+ * Implementation of GeoNear on top of a 2DSphere (S2) index
+ */
+class GeoNear2DSphereStage : public NearStage {
+public:
+ GeoNear2DSphereStage(const GeoNearParams& nearParams,
+ OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ IndexDescriptor* s2Index);
- virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection);
+ virtual ~GeoNear2DSphereStage();
- virtual StatusWith<double> computeDistance(WorkingSetMember* member);
+protected:
+ virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection);
- virtual PlanStage::StageState initialize(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- WorkingSetID* out);
+ virtual StatusWith<double> computeDistance(WorkingSetMember* member);
- private:
+ virtual PlanStage::StageState initialize(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ WorkingSetID* out);
- virtual void finishSaveState();
+private:
+ virtual void finishSaveState();
- virtual void finishRestoreState(OperationContext* txn);
+ virtual void finishRestoreState(OperationContext* txn);
- virtual void finishInvalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type);
+ virtual void finishInvalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- const GeoNearParams _nearParams;
+ const GeoNearParams _nearParams;
- // The 2D index we're searching over
- // Not owned here
- IndexDescriptor* const _s2Index;
+ // The 2D index we're searching over
+ // Not owned here
+ IndexDescriptor* const _s2Index;
- // The total search annulus
- const R2Annulus _fullBounds;
+ // The total search annulus
+ const R2Annulus _fullBounds;
- // The current search annulus
- R2Annulus _currBounds;
+ // The current search annulus
+ R2Annulus _currBounds;
- // Amount to increment the next bounds by
- double _boundsIncrement;
+ // Amount to increment the next bounds by
+ double _boundsIncrement;
- class DensityEstimator;
- std::unique_ptr<DensityEstimator> _densityEstimator;
- };
+ class DensityEstimator;
+ std::unique_ptr<DensityEstimator> _densityEstimator;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/exec/group.cpp b/src/mongo/db/exec/group.cpp
index 68b21928b62..433b4802cdf 100644
--- a/src/mongo/db/exec/group.cpp
+++ b/src/mongo/db/exec/group.cpp
@@ -38,266 +38,261 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
-
- namespace {
-
- // Helper function that extracts the group key from a BSONObj.
- Status getKey(const BSONObj& obj,
- const BSONObj& keyPattern,
- ScriptingFunction func,
- Scope* s,
- BSONObj* key) {
- if (func) {
- BSONObjBuilder b(obj.objsize() + 32);
- b.append("0", obj);
- const BSONObj& k = b.obj();
- int res = s->invoke(func, &k, 0);
- if (res != 0) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "invoke failed in $keyf: " << s->getError());
- }
- int type = s->type("__returnValue");
- if (type != Object) {
- return Status(ErrorCodes::BadValue, "return of $key has to be an object");
- }
- *key = s->getObject("__returnValue");
- return Status::OK();
- }
- *key = obj.extractFields(keyPattern, true).getOwned();
- return Status::OK();
+using std::unique_ptr;
+using std::vector;
+
+namespace {
+
+// Helper function that extracts the group key from a BSONObj.
+Status getKey(
+ const BSONObj& obj, const BSONObj& keyPattern, ScriptingFunction func, Scope* s, BSONObj* key) {
+ if (func) {
+ BSONObjBuilder b(obj.objsize() + 32);
+ b.append("0", obj);
+ const BSONObj& k = b.obj();
+ int res = s->invoke(func, &k, 0);
+ if (res != 0) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "invoke failed in $keyf: " << s->getError());
}
-
- } // namespace
-
- // static
- const char* GroupStage::kStageType = "GROUP";
-
- GroupStage::GroupStage(OperationContext* txn,
- const GroupRequest& request,
- WorkingSet* workingSet,
- PlanStage* child)
- : _txn(txn),
- _request(request),
- _ws(workingSet),
- _commonStats(kStageType),
- _specificStats(),
- _child(child),
- _groupState(GroupState_Initializing),
- _reduceFunction(0),
- _keyFunction(0) {}
-
- void GroupStage::initGroupScripting() {
- // Initialize _scope.
- const std::string userToken =
- AuthorizationSession::get(ClientBasic::getCurrent())
- ->getAuthenticatedUserNamesToken();
-
- const NamespaceString nss(_request.ns);
- _scope = globalScriptEngine->getPooledScope(_txn, nss.db().toString(), "group" + userToken);
- if (!_request.reduceScope.isEmpty()) {
- _scope->init(&_request.reduceScope);
- }
- _scope->setObject("$initial", _request.initial, true);
- _scope->exec("$reduce = " + _request.reduceCode, "$group reduce setup", false, true, true,
- 2 * 1000);
- _scope->exec("$arr = [];", "$group reduce setup 2", false, true, true, 2 * 1000);
-
- // Initialize _reduceFunction.
- _reduceFunction = _scope->createFunction("function(){ "
- " if ( $arr[n] == null ){ "
- " next = {}; "
- " Object.extend( next , $key ); "
- " Object.extend( next , $initial , true ); "
- " $arr[n] = next; "
- " next = null; "
- " } "
- " $reduce( obj , $arr[n] ); "
- "}");
-
- // Initialize _keyFunction, if a key function was provided.
- if (_request.keyFunctionCode.size()) {
- _keyFunction = _scope->createFunction(_request.keyFunctionCode.c_str());
+ int type = s->type("__returnValue");
+ if (type != Object) {
+ return Status(ErrorCodes::BadValue, "return of $key has to be an object");
}
+ *key = s->getObject("__returnValue");
+ return Status::OK();
}
+ *key = obj.extractFields(keyPattern, true).getOwned();
+ return Status::OK();
+}
+
+} // namespace
+
+// static
+const char* GroupStage::kStageType = "GROUP";
+
+GroupStage::GroupStage(OperationContext* txn,
+ const GroupRequest& request,
+ WorkingSet* workingSet,
+ PlanStage* child)
+ : _txn(txn),
+ _request(request),
+ _ws(workingSet),
+ _commonStats(kStageType),
+ _specificStats(),
+ _child(child),
+ _groupState(GroupState_Initializing),
+ _reduceFunction(0),
+ _keyFunction(0) {}
+
+void GroupStage::initGroupScripting() {
+ // Initialize _scope.
+ const std::string userToken =
+ AuthorizationSession::get(ClientBasic::getCurrent())->getAuthenticatedUserNamesToken();
+
+ const NamespaceString nss(_request.ns);
+ _scope = globalScriptEngine->getPooledScope(_txn, nss.db().toString(), "group" + userToken);
+ if (!_request.reduceScope.isEmpty()) {
+ _scope->init(&_request.reduceScope);
+ }
+ _scope->setObject("$initial", _request.initial, true);
+ _scope->exec(
+ "$reduce = " + _request.reduceCode, "$group reduce setup", false, true, true, 2 * 1000);
+ _scope->exec("$arr = [];", "$group reduce setup 2", false, true, true, 2 * 1000);
+
+ // Initialize _reduceFunction.
+ _reduceFunction = _scope->createFunction(
+ "function(){ "
+ " if ( $arr[n] == null ){ "
+ " next = {}; "
+ " Object.extend( next , $key ); "
+ " Object.extend( next , $initial , true ); "
+ " $arr[n] = next; "
+ " next = null; "
+ " } "
+ " $reduce( obj , $arr[n] ); "
+ "}");
+
+ // Initialize _keyFunction, if a key function was provided.
+ if (_request.keyFunctionCode.size()) {
+ _keyFunction = _scope->createFunction(_request.keyFunctionCode.c_str());
+ }
+}
- Status GroupStage::processObject(const BSONObj& obj) {
- BSONObj key;
- Status getKeyStatus = getKey(obj, _request.keyPattern, _keyFunction, _scope.get(),
- &key);
- if (!getKeyStatus.isOK()) {
- return getKeyStatus;
- }
-
- int& n = _groupMap[key];
- if (n == 0) {
- n = _groupMap.size();
- _scope->setObject("$key", key, true);
- if (n > 20000) {
- return Status(ErrorCodes::BadValue,
- "group() can't handle more than 20000 unique keys");
- }
- }
-
- _scope->setObject("obj", obj, true);
- _scope->setNumber("n", n - 1);
- if (_scope->invoke(_reduceFunction, 0, 0, 0, true)) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "reduce invoke failed: " << _scope->getError());
- }
-
- return Status::OK();
+Status GroupStage::processObject(const BSONObj& obj) {
+ BSONObj key;
+ Status getKeyStatus = getKey(obj, _request.keyPattern, _keyFunction, _scope.get(), &key);
+ if (!getKeyStatus.isOK()) {
+ return getKeyStatus;
}
- BSONObj GroupStage::finalizeResults() {
- if (!_request.finalize.empty()) {
- _scope->exec("$finalize = " + _request.finalize, "$group finalize define", false,
- true, true, 2 * 1000);
- ScriptingFunction finalizeFunction =
- _scope->createFunction("function(){ "
- " for(var i=0; i < $arr.length; i++){ "
- " var ret = $finalize($arr[i]); "
- " if (ret !== undefined) "
- " $arr[i] = ret; "
- " } "
- "}");
- _scope->invoke(finalizeFunction, 0, 0, 0, true);
+ int& n = _groupMap[key];
+ if (n == 0) {
+ n = _groupMap.size();
+ _scope->setObject("$key", key, true);
+ if (n > 20000) {
+ return Status(ErrorCodes::BadValue, "group() can't handle more than 20000 unique keys");
}
-
- _specificStats.nGroups = _groupMap.size();
-
- BSONObj results = _scope->getObject("$arr").getOwned();
-
- _scope->exec("$arr = [];", "$group reduce setup 2", false, true, true, 2 * 1000);
- _scope->gc();
-
- return results;
}
- PlanStage::StageState GroupStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+ _scope->setObject("obj", obj, true);
+ _scope->setNumber("n", n - 1);
+ if (_scope->invoke(_reduceFunction, 0, 0, 0, true)) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "reduce invoke failed: " << _scope->getError());
+ }
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+ return Status::OK();
+}
- if (isEOF()) { return PlanStage::IS_EOF; }
+BSONObj GroupStage::finalizeResults() {
+ if (!_request.finalize.empty()) {
+ _scope->exec("$finalize = " + _request.finalize,
+ "$group finalize define",
+ false,
+ true,
+ true,
+ 2 * 1000);
+ ScriptingFunction finalizeFunction = _scope->createFunction(
+ "function(){ "
+ " for(var i=0; i < $arr.length; i++){ "
+ " var ret = $finalize($arr[i]); "
+ " if (ret !== undefined) "
+ " $arr[i] = ret; "
+ " } "
+ "}");
+ _scope->invoke(finalizeFunction, 0, 0, 0, true);
+ }
- // On the first call to work(), call initGroupScripting().
- if (_groupState == GroupState_Initializing) {
- initGroupScripting();
- _groupState = GroupState_ReadingFromChild;
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
+ _specificStats.nGroups = _groupMap.size();
- // Otherwise, read from our child.
- invariant(_groupState == GroupState_ReadingFromChild);
- WorkingSetID id = WorkingSet::INVALID_ID;
- StageState state = _child->work(&id);
+ BSONObj results = _scope->getObject("$arr").getOwned();
- if (PlanStage::NEED_TIME == state) {
- ++_commonStats.needTime;
- return state;
- }
- else if (PlanStage::NEED_YIELD == state) {
- ++_commonStats.needYield;
- *out = id;
- return state;
- }
- else if (PlanStage::FAILURE == state) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it failed, in which
- // case 'id' is valid. If ID is invalid, we create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- const std::string errmsg = "group stage failed to read in results from child";
- *out = WorkingSetCommon::allocateStatusMember(_ws,
- Status(ErrorCodes::InternalError,
- errmsg));
- }
- return state;
- }
- else if (PlanStage::DEAD == state) {
- return state;
- }
- else if (PlanStage::ADVANCED == state) {
- WorkingSetMember* member = _ws->get(id);
- // Group queries can't have projections. This means that covering analysis will always
- // add a fetch. We should always get fetched data, and never just key data.
- invariant(member->hasObj());
-
- Status status = processObject(member->obj.value());
- if (!status.isOK()) {
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- return PlanStage::FAILURE;
- }
-
- _ws->free(id);
-
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- else {
- // We're done reading from our child.
- invariant(PlanStage::IS_EOF == state);
+ _scope->exec("$arr = [];", "$group reduce setup 2", false, true, true, 2 * 1000);
+ _scope->gc();
- // Transition to state "done." Future calls to work() will return IS_EOF.
- _groupState = GroupState_Done;
+ return results;
+}
- BSONObj results = finalizeResults();
+PlanStage::StageState GroupStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- *out = _ws->allocate();
- WorkingSetMember* member = _ws->get(*out);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), results);
- member->state = WorkingSetMember::OWNED_OBJ;
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
}
- bool GroupStage::isEOF() {
- return _groupState == GroupState_Done;
+ // On the first call to work(), call initGroupScripting().
+ if (_groupState == GroupState_Initializing) {
+ initGroupScripting();
+ _groupState = GroupState_ReadingFromChild;
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
- void GroupStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- _child->saveState();
- }
+ // Otherwise, read from our child.
+ invariant(_groupState == GroupState_ReadingFromChild);
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ StageState state = _child->work(&id);
+
+ if (PlanStage::NEED_TIME == state) {
+ ++_commonStats.needTime;
+ return state;
+ } else if (PlanStage::NEED_YIELD == state) {
+ ++_commonStats.needYield;
+ *out = id;
+ return state;
+ } else if (PlanStage::FAILURE == state) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it failed, in which
+ // case 'id' is valid. If ID is invalid, we create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ const std::string errmsg = "group stage failed to read in results from child";
+ *out = WorkingSetCommon::allocateStatusMember(
+ _ws, Status(ErrorCodes::InternalError, errmsg));
+ }
+ return state;
+ } else if (PlanStage::DEAD == state) {
+ return state;
+ } else if (PlanStage::ADVANCED == state) {
+ WorkingSetMember* member = _ws->get(id);
+ // Group queries can't have projections. This means that covering analysis will always
+ // add a fetch. We should always get fetched data, and never just key data.
+ invariant(member->hasObj());
+
+ Status status = processObject(member->obj.value());
+ if (!status.isOK()) {
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ return PlanStage::FAILURE;
+ }
- void GroupStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
- }
+ _ws->free(id);
- void GroupStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
- }
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else {
+ // We're done reading from our child.
+ invariant(PlanStage::IS_EOF == state);
- vector<PlanStage*> GroupStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
- }
+ // Transition to state "done." Future calls to work() will return IS_EOF.
+ _groupState = GroupState_Done;
- PlanStageStats* GroupStage::getStats() {
- _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_GROUP));
- GroupStats* groupStats = new GroupStats(_specificStats);
- ret->specific.reset(groupStats);
- ret->children.push_back(_child->getStats());
- return ret.release();
- }
+ BSONObj results = finalizeResults();
- const CommonStats* GroupStage::getCommonStats() const {
- return &_commonStats;
- }
+ *out = _ws->allocate();
+ WorkingSetMember* member = _ws->get(*out);
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), results);
+ member->state = WorkingSetMember::OWNED_OBJ;
- const SpecificStats* GroupStage::getSpecificStats() const {
- return &_specificStats;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
}
+}
+
+bool GroupStage::isEOF() {
+ return _groupState == GroupState_Done;
+}
+
+void GroupStage::saveState() {
+ _txn = NULL;
+ ++_commonStats.yields;
+ _child->saveState();
+}
+
+void GroupStage::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_commonStats.unyields;
+ _child->restoreState(opCtx);
+}
+
+void GroupStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+ _child->invalidate(txn, dl, type);
+}
+
+vector<PlanStage*> GroupStage::getChildren() const {
+ vector<PlanStage*> children;
+ children.push_back(_child.get());
+ return children;
+}
+
+PlanStageStats* GroupStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_GROUP));
+ GroupStats* groupStats = new GroupStats(_specificStats);
+ ret->specific.reset(groupStats);
+ ret->children.push_back(_child->getStats());
+ return ret.release();
+}
+
+const CommonStats* GroupStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* GroupStage::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/group.h b/src/mongo/db/exec/group.h
index b9f525b95bb..49f5c881d84 100644
--- a/src/mongo/db/exec/group.h
+++ b/src/mongo/db/exec/group.h
@@ -34,135 +34,138 @@
namespace mongo {
- class Collection;
+class Collection;
- /**
- * A description of a request for a group operation. Copyable.
- */
- struct GroupRequest {
- // Namespace to operate on (e.g. "foo.bar").
- std::string ns;
+/**
+ * A description of a request for a group operation. Copyable.
+ */
+struct GroupRequest {
+ // Namespace to operate on (e.g. "foo.bar").
+ std::string ns;
- // A predicate describing the set of documents to group.
- BSONObj query;
+ // A predicate describing the set of documents to group.
+ BSONObj query;
- // The field(s) to group by. Alternative to "keyFunctionCode". Empty if "keyFunctionCode"
- // is being used instead.
- BSONObj keyPattern;
+ // The field(s) to group by. Alternative to "keyFunctionCode". Empty if "keyFunctionCode"
+ // is being used instead.
+ BSONObj keyPattern;
- // A Javascript function that maps a document to a key object. Alternative to "keyPattern".
- // Empty is "keyPattern" is being used instead.
- std::string keyFunctionCode;
+ // A Javascript function that maps a document to a key object. Alternative to "keyPattern".
+ // Empty is "keyPattern" is being used instead.
+ std::string keyFunctionCode;
- // A Javascript function that takes a (input document, group result) pair and
- // updates the group result document.
- std::string reduceCode;
+ // A Javascript function that takes a (input document, group result) pair and
+ // updates the group result document.
+ std::string reduceCode;
- // Scope for the reduce function. Optional.
- BSONObj reduceScope;
+ // Scope for the reduce function. Optional.
+ BSONObj reduceScope;
- // The initial value for the group result.
- BSONObj initial;
+ // The initial value for the group result.
+ BSONObj initial;
- // A Javascript function that "finalizes" a group result. Optional.
- std::string finalize;
+ // A Javascript function that "finalizes" a group result. Optional.
+ std::string finalize;
- // Whether this is an explain of a group.
- bool explain;
- };
+ // Whether this is an explain of a group.
+ bool explain;
+};
- /**
- * Stage used by the group command. Consumes input documents from its child stage (returning
- * NEED_TIME once for each document produced by the child), returns ADVANCED exactly once with
- * the entire group result, then returns EOF.
- *
- * Only created through the getExecutorGroup path.
- */
- class GroupStage: public PlanStage {
- MONGO_DISALLOW_COPYING(GroupStage);
- public:
- GroupStage(OperationContext* txn,
- const GroupRequest& request,
- WorkingSet* workingSet,
- PlanStage* child);
- virtual ~GroupStage() { }
+/**
+ * Stage used by the group command. Consumes input documents from its child stage (returning
+ * NEED_TIME once for each document produced by the child), returns ADVANCED exactly once with
+ * the entire group result, then returns EOF.
+ *
+ * Only created through the getExecutorGroup path.
+ */
+class GroupStage : public PlanStage {
+ MONGO_DISALLOW_COPYING(GroupStage);
- virtual StageState work(WorkingSetID* out);
- virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+public:
+ GroupStage(OperationContext* txn,
+ const GroupRequest& request,
+ WorkingSet* workingSet,
+ PlanStage* child);
+ virtual ~GroupStage() {}
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual StageType stageType() const { return STAGE_GROUP; }
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual PlanStageStats* getStats();
+ virtual StageType stageType() const {
+ return STAGE_GROUP;
+ }
- virtual const CommonStats* getCommonStats() const;
+ virtual PlanStageStats* getStats();
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const CommonStats* getCommonStats() const;
- static const char* kStageType;
+ virtual const SpecificStats* getSpecificStats() const;
- private:
- /**
- * Keeps track of what this group is currently doing so that it can do the right thing on
- * the next call to work().
- */
- enum GroupState {
- // Need to initialize the underlying Javascript machinery.
- GroupState_Initializing,
+ static const char* kStageType;
- // Retrieving the next document from the child stage and processing it.
- GroupState_ReadingFromChild,
+private:
+ /**
+ * Keeps track of what this group is currently doing so that it can do the right thing on
+ * the next call to work().
+ */
+ enum GroupState {
+ // Need to initialize the underlying Javascript machinery.
+ GroupState_Initializing,
- // Results have been returned.
- GroupState_Done
- };
+ // Retrieving the next document from the child stage and processing it.
+ GroupState_ReadingFromChild,
- // Initializes _scope, _reduceFunction and _keyFunction using the global scripting engine.
- void initGroupScripting();
+ // Results have been returned.
+ GroupState_Done
+ };
- // Updates _groupMap and _scope to account for the group key associated with this object.
- // Returns an error status if an error occurred, else Status::OK().
- Status processObject(const BSONObj& obj);
+ // Initializes _scope, _reduceFunction and _keyFunction using the global scripting engine.
+ void initGroupScripting();
- // Finalize the results for this group operation. Returns an owned BSONObj with the results
- // array.
- BSONObj finalizeResults();
+ // Updates _groupMap and _scope to account for the group key associated with this object.
+ // Returns an error status if an error occurred, else Status::OK().
+ Status processObject(const BSONObj& obj);
- // Transactional context for read locks. Not owned by us.
- OperationContext* _txn;
+ // Finalize the results for this group operation. Returns an owned BSONObj with the results
+ // array.
+ BSONObj finalizeResults();
- GroupRequest _request;
+ // Transactional context for read locks. Not owned by us.
+ OperationContext* _txn;
- // The WorkingSet we annotate with results. Not owned by us.
- WorkingSet* _ws;
+ GroupRequest _request;
- CommonStats _commonStats;
- GroupStats _specificStats;
+ // The WorkingSet we annotate with results. Not owned by us.
+ WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
+ CommonStats _commonStats;
+ GroupStats _specificStats;
- // Current state for this stage.
- GroupState _groupState;
+ std::unique_ptr<PlanStage> _child;
- // The Scope object that all script operations for this group stage will use. Initialized
- // by initGroupScripting(). Owned here.
- std::unique_ptr<Scope> _scope;
+ // Current state for this stage.
+ GroupState _groupState;
- // The reduce function for the group operation. Initialized by initGroupScripting(). Owned
- // by _scope.
- ScriptingFunction _reduceFunction;
+ // The Scope object that all script operations for this group stage will use. Initialized
+ // by initGroupScripting(). Owned here.
+ std::unique_ptr<Scope> _scope;
- // The key function for the group operation if one was provided by the user, else 0.
- // Initialized by initGroupScripting(). Owned by _scope.
- ScriptingFunction _keyFunction;
+ // The reduce function for the group operation. Initialized by initGroupScripting(). Owned
+ // by _scope.
+ ScriptingFunction _reduceFunction;
- // Map from group key => group index. The group index is used to index into "$arr", a
- // variable owned by _scope which contains the group data for this key.
- std::map<BSONObj, int, BSONObjCmp> _groupMap;
- };
+ // The key function for the group operation if one was provided by the user, else 0.
+ // Initialized by initGroupScripting(). Owned by _scope.
+ ScriptingFunction _keyFunction;
+
+ // Map from group key => group index. The group index is used to index into "$arr", a
+ // variable owned by _scope which contains the group data for this key.
+ std::map<BSONObj, int, BSONObjCmp> _groupMap;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index 73a950e4dc2..dd5c36622e1 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -42,215 +42,221 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
-
- // static
- const char* IDHackStage::kStageType = "IDHACK";
-
- IDHackStage::IDHackStage(OperationContext* txn, const Collection* collection,
- CanonicalQuery* query, WorkingSet* ws)
- : _txn(txn),
- _collection(collection),
- _workingSet(ws),
- _key(query->getQueryObj()["_id"].wrap()),
- _done(false),
- _idBeingPagedIn(WorkingSet::INVALID_ID),
- _commonStats(kStageType) {
- if (NULL != query->getProj()) {
- _addKeyMetadata = query->getProj()->wantIndexKey();
- }
- else {
- _addKeyMetadata = false;
- }
+using std::unique_ptr;
+using std::vector;
+
+// static
+const char* IDHackStage::kStageType = "IDHACK";
+
+IDHackStage::IDHackStage(OperationContext* txn,
+ const Collection* collection,
+ CanonicalQuery* query,
+ WorkingSet* ws)
+ : _txn(txn),
+ _collection(collection),
+ _workingSet(ws),
+ _key(query->getQueryObj()["_id"].wrap()),
+ _done(false),
+ _idBeingPagedIn(WorkingSet::INVALID_ID),
+ _commonStats(kStageType) {
+ if (NULL != query->getProj()) {
+ _addKeyMetadata = query->getProj()->wantIndexKey();
+ } else {
+ _addKeyMetadata = false;
}
-
- IDHackStage::IDHackStage(OperationContext* txn, Collection* collection,
- const BSONObj& key, WorkingSet* ws)
- : _txn(txn),
- _collection(collection),
- _workingSet(ws),
- _key(key),
- _done(false),
- _addKeyMetadata(false),
- _idBeingPagedIn(WorkingSet::INVALID_ID),
- _commonStats(kStageType) { }
-
- IDHackStage::~IDHackStage() { }
-
- bool IDHackStage::isEOF() {
- if (WorkingSet::INVALID_ID != _idBeingPagedIn) {
- // We asked the parent for a page-in, but still haven't had a chance to return the
- // paged in document
- return false;
- }
-
- return _done;
+}
+
+IDHackStage::IDHackStage(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& key,
+ WorkingSet* ws)
+ : _txn(txn),
+ _collection(collection),
+ _workingSet(ws),
+ _key(key),
+ _done(false),
+ _addKeyMetadata(false),
+ _idBeingPagedIn(WorkingSet::INVALID_ID),
+ _commonStats(kStageType) {}
+
+IDHackStage::~IDHackStage() {}
+
+bool IDHackStage::isEOF() {
+ if (WorkingSet::INVALID_ID != _idBeingPagedIn) {
+ // We asked the parent for a page-in, but still haven't had a chance to return the
+ // paged in document
+ return false;
}
- PlanStage::StageState IDHackStage::work(WorkingSetID* out) {
- ++_commonStats.works;
-
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+ return _done;
+}
- if (_done) { return PlanStage::IS_EOF; }
+PlanStage::StageState IDHackStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- if (WorkingSet::INVALID_ID != _idBeingPagedIn) {
- invariant(_recordCursor);
- WorkingSetID id = _idBeingPagedIn;
- _idBeingPagedIn = WorkingSet::INVALID_ID;
- WorkingSetMember* member = _workingSet->get(id);
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- invariant(WorkingSetCommon::fetchIfUnfetched(_txn, member, _recordCursor));
+ if (_done) {
+ return PlanStage::IS_EOF;
+ }
- return advance(id, member, out);
- }
+ if (WorkingSet::INVALID_ID != _idBeingPagedIn) {
+ invariant(_recordCursor);
+ WorkingSetID id = _idBeingPagedIn;
+ _idBeingPagedIn = WorkingSet::INVALID_ID;
+ WorkingSetMember* member = _workingSet->get(id);
- WorkingSetID id = WorkingSet::INVALID_ID;
- try {
- // Use the index catalog to get the id index.
- const IndexCatalog* catalog = _collection->getIndexCatalog();
-
- // Find the index we use.
- IndexDescriptor* idDesc = catalog->findIdIndex(_txn);
- if (NULL == idDesc) {
- _done = true;
- return PlanStage::IS_EOF;
- }
-
- // Look up the key by going directly to the index.
- RecordId loc = catalog->getIndex(idDesc)->findSingle(_txn, _key);
-
- // Key not found.
- if (loc.isNull()) {
- _done = true;
- return PlanStage::IS_EOF;
- }
-
- ++_specificStats.keysExamined;
- ++_specificStats.docsExamined;
-
- // Create a new WSM for the result document.
- id = _workingSet->allocate();
- WorkingSetMember* member = _workingSet->get(id);
- member->state = WorkingSetMember::LOC_AND_IDX;
- member->loc = loc;
-
- if (!_recordCursor) _recordCursor = _collection->getCursor(_txn);
-
- // We may need to request a yield while we fetch the document.
- if (auto fetcher = _recordCursor->fetcherForId(loc)) {
- // There's something to fetch. Hand the fetcher off to the WSM, and pass up a
- // fetch request.
- _idBeingPagedIn = id;
- member->setFetcher(fetcher.release());
- *out = id;
- _commonStats.needYield++;
- return NEED_YIELD;
- }
-
- // The doc was already in memory, so we go ahead and return it.
- if (!WorkingSetCommon::fetch(_txn, member, _recordCursor)) {
- // _id is immutable so the index would return the only record that could
- // possibly match the query.
- _workingSet->free(id);
- _commonStats.isEOF = true;
- _done = true;
- return IS_EOF;
- }
-
- return advance(id, member, out);
- }
- catch (const WriteConflictException& wce) {
- // Restart at the beginning on retry.
- _recordCursor.reset();
- if (id != WorkingSet::INVALID_ID)
- _workingSet->free(id);
+ invariant(WorkingSetCommon::fetchIfUnfetched(_txn, member, _recordCursor));
- *out = WorkingSet::INVALID_ID;
- _commonStats.needYield++;
- return NEED_YIELD;
- }
+ return advance(id, member, out);
}
- PlanStage::StageState IDHackStage::advance(WorkingSetID id,
- WorkingSetMember* member,
- WorkingSetID* out) {
- invariant(member->hasObj());
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ try {
+ // Use the index catalog to get the id index.
+ const IndexCatalog* catalog = _collection->getIndexCatalog();
- if (_addKeyMetadata) {
- BSONObjBuilder bob;
- BSONObj ownedKeyObj = member->obj.value()["_id"].wrap().getOwned();
- bob.appendKeys(_key, ownedKeyObj);
- member->addComputed(new IndexKeyComputedData(bob.obj()));
+ // Find the index we use.
+ IndexDescriptor* idDesc = catalog->findIdIndex(_txn);
+ if (NULL == idDesc) {
+ _done = true;
+ return PlanStage::IS_EOF;
}
- _done = true;
- ++_commonStats.advanced;
- *out = id;
- return PlanStage::ADVANCED;
- }
-
- void IDHackStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- if (_recordCursor) _recordCursor->saveUnpositioned();
- }
+ // Look up the key by going directly to the index.
+ RecordId loc = catalog->getIndex(idDesc)->findSingle(_txn, _key);
- void IDHackStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_commonStats.unyields;
- if (_recordCursor) _recordCursor->restore(opCtx);
- }
-
- void IDHackStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
+ // Key not found.
+ if (loc.isNull()) {
+ _done = true;
+ return PlanStage::IS_EOF;
+ }
- // Since updates can't mutate the '_id' field, we can ignore mutation invalidations.
- if (INVALIDATION_MUTATION == type) {
- return;
+ ++_specificStats.keysExamined;
+ ++_specificStats.docsExamined;
+
+ // Create a new WSM for the result document.
+ id = _workingSet->allocate();
+ WorkingSetMember* member = _workingSet->get(id);
+ member->state = WorkingSetMember::LOC_AND_IDX;
+ member->loc = loc;
+
+ if (!_recordCursor)
+ _recordCursor = _collection->getCursor(_txn);
+
+ // We may need to request a yield while we fetch the document.
+ if (auto fetcher = _recordCursor->fetcherForId(loc)) {
+ // There's something to fetch. Hand the fetcher off to the WSM, and pass up a
+ // fetch request.
+ _idBeingPagedIn = id;
+ member->setFetcher(fetcher.release());
+ *out = id;
+ _commonStats.needYield++;
+ return NEED_YIELD;
}
- // It's possible that the loc getting invalidated is the one we're about to
- // fetch. In this case we do a "forced fetch" and put the WSM in owned object state.
- if (WorkingSet::INVALID_ID != _idBeingPagedIn) {
- WorkingSetMember* member = _workingSet->get(_idBeingPagedIn);
- if (member->hasLoc() && (member->loc == dl)) {
- // Fetch it now and kill the diskloc.
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
- }
+ // The doc was already in memory, so we go ahead and return it.
+ if (!WorkingSetCommon::fetch(_txn, member, _recordCursor)) {
+ // _id is immutable so the index would return the only record that could
+ // possibly match the query.
+ _workingSet->free(id);
+ _commonStats.isEOF = true;
+ _done = true;
+ return IS_EOF;
}
- }
- // static
- bool IDHackStage::supportsQuery(const CanonicalQuery& query) {
- return !query.getParsed().showRecordId()
- && query.getParsed().getHint().isEmpty()
- && 0 == query.getParsed().getSkip()
- && CanonicalQuery::isSimpleIdQuery(query.getParsed().getFilter())
- && !query.getParsed().isTailable();
- }
+ return advance(id, member, out);
+ } catch (const WriteConflictException& wce) {
+ // Restart at the beginning on retry.
+ _recordCursor.reset();
+ if (id != WorkingSet::INVALID_ID)
+ _workingSet->free(id);
- vector<PlanStage*> IDHackStage::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
+ *out = WorkingSet::INVALID_ID;
+ _commonStats.needYield++;
+ return NEED_YIELD;
}
-
- PlanStageStats* IDHackStage::getStats() {
- _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_IDHACK));
- ret->specific.reset(new IDHackStats(_specificStats));
- return ret.release();
+}
+
+PlanStage::StageState IDHackStage::advance(WorkingSetID id,
+ WorkingSetMember* member,
+ WorkingSetID* out) {
+ invariant(member->hasObj());
+
+ if (_addKeyMetadata) {
+ BSONObjBuilder bob;
+ BSONObj ownedKeyObj = member->obj.value()["_id"].wrap().getOwned();
+ bob.appendKeys(_key, ownedKeyObj);
+ member->addComputed(new IndexKeyComputedData(bob.obj()));
}
- const CommonStats* IDHackStage::getCommonStats() const {
- return &_commonStats;
+ _done = true;
+ ++_commonStats.advanced;
+ *out = id;
+ return PlanStage::ADVANCED;
+}
+
+void IDHackStage::saveState() {
+ _txn = NULL;
+ ++_commonStats.yields;
+ if (_recordCursor)
+ _recordCursor->saveUnpositioned();
+}
+
+void IDHackStage::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_commonStats.unyields;
+ if (_recordCursor)
+ _recordCursor->restore(opCtx);
+}
+
+void IDHackStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+
+ // Since updates can't mutate the '_id' field, we can ignore mutation invalidations.
+ if (INVALIDATION_MUTATION == type) {
+ return;
}
- const SpecificStats* IDHackStage::getSpecificStats() const {
- return &_specificStats;
+ // It's possible that the loc getting invalidated is the one we're about to
+ // fetch. In this case we do a "forced fetch" and put the WSM in owned object state.
+ if (WorkingSet::INVALID_ID != _idBeingPagedIn) {
+ WorkingSetMember* member = _workingSet->get(_idBeingPagedIn);
+ if (member->hasLoc() && (member->loc == dl)) {
+ // Fetch it now and kill the diskloc.
+ WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ }
}
+}
+
+// static
+bool IDHackStage::supportsQuery(const CanonicalQuery& query) {
+ return !query.getParsed().showRecordId() && query.getParsed().getHint().isEmpty() &&
+ 0 == query.getParsed().getSkip() &&
+ CanonicalQuery::isSimpleIdQuery(query.getParsed().getFilter()) &&
+ !query.getParsed().isTailable();
+}
+
+vector<PlanStage*> IDHackStage::getChildren() const {
+ vector<PlanStage*> empty;
+ return empty;
+}
+
+PlanStageStats* IDHackStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_IDHACK));
+ ret->specific.reset(new IDHackStats(_specificStats));
+ return ret.release();
+}
+
+const CommonStats* IDHackStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* IDHackStage::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/idhack.h b/src/mongo/db/exec/idhack.h
index 5430addd12b..b4dc87c1b84 100644
--- a/src/mongo/db/exec/idhack.h
+++ b/src/mongo/db/exec/idhack.h
@@ -37,83 +37,86 @@
namespace mongo {
- class RecordCursor;
+class RecordCursor;
- /**
- * A standalone stage implementing the fast path for key-value retrievals
- * via the _id index.
- */
- class IDHackStage : public PlanStage {
- public:
- /** Takes ownership of all the arguments -collection. */
- IDHackStage(OperationContext* txn, const Collection* collection,
- CanonicalQuery* query, WorkingSet* ws);
+/**
+ * A standalone stage implementing the fast path for key-value retrievals
+ * via the _id index.
+ */
+class IDHackStage : public PlanStage {
+public:
+ /** Takes ownership of all the arguments -collection. */
+ IDHackStage(OperationContext* txn,
+ const Collection* collection,
+ CanonicalQuery* query,
+ WorkingSet* ws);
- IDHackStage(OperationContext* txn, Collection* collection,
- const BSONObj& key, WorkingSet* ws);
+ IDHackStage(OperationContext* txn, Collection* collection, const BSONObj& key, WorkingSet* ws);
- virtual ~IDHackStage();
+ virtual ~IDHackStage();
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- /**
- * ID Hack has a very strict criteria for the queries it supports.
- */
- static bool supportsQuery(const CanonicalQuery& query);
+ /**
+ * ID Hack has a very strict criteria for the queries it supports.
+ */
+ static bool supportsQuery(const CanonicalQuery& query);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_IDHACK; }
+ virtual StageType stageType() const {
+ return STAGE_IDHACK;
+ }
- PlanStageStats* getStats();
+ PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- /**
- * Marks this stage as done, optionally adds key metadata, and returns PlanStage::ADVANCED.
- *
- * Called whenever we have a WSM containing the matching obj.
- */
- StageState advance(WorkingSetID id, WorkingSetMember* member, WorkingSetID* out);
+private:
+ /**
+ * Marks this stage as done, optionally adds key metadata, and returns PlanStage::ADVANCED.
+ *
+ * Called whenever we have a WSM containing the matching obj.
+ */
+ StageState advance(WorkingSetID id, WorkingSetMember* member, WorkingSetID* out);
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
- // Not owned here.
- const Collection* _collection;
+ // Not owned here.
+ const Collection* _collection;
- std::unique_ptr<RecordCursor> _recordCursor;
+ std::unique_ptr<RecordCursor> _recordCursor;
- // The WorkingSet we annotate with results. Not owned by us.
- WorkingSet* _workingSet;
+ // The WorkingSet we annotate with results. Not owned by us.
+ WorkingSet* _workingSet;
- // The value to match against the _id field.
- BSONObj _key;
+ // The value to match against the _id field.
+ BSONObj _key;
- // Have we returned our one document?
- bool _done;
+ // Have we returned our one document?
+ bool _done;
- // Do we need to add index key metadata for $returnKey?
- bool _addKeyMetadata;
+ // Do we need to add index key metadata for $returnKey?
+ bool _addKeyMetadata;
- // If we want to return a RecordId and it points to something that's not in memory,
- // we return a "please page this in" result. We add a RecordFetcher given back to us by the
- // storage engine to the WSM. The RecordFetcher is used by the PlanExecutor when it handles
- // the fetch request.
- WorkingSetID _idBeingPagedIn;
+ // If we want to return a RecordId and it points to something that's not in memory,
+ // we return a "please page this in" result. We add a RecordFetcher given back to us by the
+ // storage engine to the WSM. The RecordFetcher is used by the PlanExecutor when it handles
+ // the fetch request.
+ WorkingSetID _idBeingPagedIn;
- CommonStats _commonStats;
- IDHackStats _specificStats;
- };
+ CommonStats _commonStats;
+ IDHackStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp
index 0dda64c99d2..b7a963d3fd8 100644
--- a/src/mongo/db/exec/index_scan.cpp
+++ b/src/mongo/db/exec/index_scan.cpp
@@ -43,133 +43,130 @@
namespace {
- // Return a value in the set {-1, 0, 1} to represent the sign of parameter i.
- int sgn(int i) {
- if (i == 0)
- return 0;
- return i > 0 ? 1 : -1;
- }
+// Return a value in the set {-1, 0, 1} to represent the sign of parameter i.
+int sgn(int i) {
+ if (i == 0)
+ return 0;
+ return i > 0 ? 1 : -1;
+}
} // namespace
namespace mongo {
- // static
- const char* IndexScan::kStageType = "IXSCAN";
-
- IndexScan::IndexScan(OperationContext* txn,
- const IndexScanParams& params,
- WorkingSet* workingSet,
- const MatchExpression* filter)
- : _txn(txn),
- _workingSet(workingSet),
- _iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
- _keyPattern(params.descriptor->keyPattern().getOwned()),
- _scanState(INITIALIZING),
- _filter(filter),
- _shouldDedup(true),
- _forward(params.direction == 1),
- _params(params),
- _commonStats(kStageType),
- _endKeyInclusive(false) {
-
- // We can't always access the descriptor in the call to getStats() so we pull
- // any info we need for stats reporting out here.
- _specificStats.keyPattern = _keyPattern;
- _specificStats.indexName = _params.descriptor->indexName();
- _specificStats.isMultiKey = _params.descriptor->isMultikey(_txn);
- _specificStats.isUnique = _params.descriptor->unique();
- _specificStats.isSparse = _params.descriptor->isSparse();
- _specificStats.isPartial = _params.descriptor->isPartial();
- _specificStats.indexVersion = _params.descriptor->version();
+// static
+const char* IndexScan::kStageType = "IXSCAN";
+
+IndexScan::IndexScan(OperationContext* txn,
+ const IndexScanParams& params,
+ WorkingSet* workingSet,
+ const MatchExpression* filter)
+ : _txn(txn),
+ _workingSet(workingSet),
+ _iam(params.descriptor->getIndexCatalog()->getIndex(params.descriptor)),
+ _keyPattern(params.descriptor->keyPattern().getOwned()),
+ _scanState(INITIALIZING),
+ _filter(filter),
+ _shouldDedup(true),
+ _forward(params.direction == 1),
+ _params(params),
+ _commonStats(kStageType),
+ _endKeyInclusive(false) {
+ // We can't always access the descriptor in the call to getStats() so we pull
+ // any info we need for stats reporting out here.
+ _specificStats.keyPattern = _keyPattern;
+ _specificStats.indexName = _params.descriptor->indexName();
+ _specificStats.isMultiKey = _params.descriptor->isMultikey(_txn);
+ _specificStats.isUnique = _params.descriptor->unique();
+ _specificStats.isSparse = _params.descriptor->isSparse();
+ _specificStats.isPartial = _params.descriptor->isPartial();
+ _specificStats.indexVersion = _params.descriptor->version();
+}
+
+boost::optional<IndexKeyEntry> IndexScan::initIndexScan() {
+ if (_params.doNotDedup) {
+ _shouldDedup = false;
+ } else {
+ // TODO it is incorrect to rely on this not changing. SERVER-17678
+ _shouldDedup = _params.descriptor->isMultikey(_txn);
}
- boost::optional<IndexKeyEntry> IndexScan::initIndexScan() {
- if (_params.doNotDedup) {
- _shouldDedup = false;
- }
- else {
- // TODO it is incorrect to rely on this not changing. SERVER-17678
- _shouldDedup = _params.descriptor->isMultikey(_txn);
- }
+ // Perform the possibly heavy-duty initialization of the underlying index cursor.
+ _indexCursor = _iam->newCursor(_txn, _forward);
+
+ if (_params.bounds.isSimpleRange) {
+ // Start at one key, end at another.
+ _endKey = _params.bounds.endKey;
+ _endKeyInclusive = _params.bounds.endKeyInclusive;
+ _indexCursor->setEndPosition(_endKey, _endKeyInclusive);
+ return _indexCursor->seek(_params.bounds.startKey, /*inclusive*/ true);
+ } else {
+ // For single intervals, we can use an optimized scan which checks against the position
+ // of an end cursor. For all other index scans, we fall back on using
+ // IndexBoundsChecker to determine when we've finished the scan.
+ BSONObj startKey;
+ bool startKeyInclusive;
+ if (IndexBoundsBuilder::isSingleInterval(
+ _params.bounds, &startKey, &startKeyInclusive, &_endKey, &_endKeyInclusive)) {
+ _indexCursor->setEndPosition(_endKey, _endKeyInclusive);
+ return _indexCursor->seek(startKey, startKeyInclusive);
+ } else {
+ _checker.reset(new IndexBoundsChecker(&_params.bounds, _keyPattern, _params.direction));
- // Perform the possibly heavy-duty initialization of the underlying index cursor.
- _indexCursor = _iam->newCursor(_txn, _forward);
+ if (!_checker->getStartSeekPoint(&_seekPoint))
+ return boost::none;
- if (_params.bounds.isSimpleRange) {
- // Start at one key, end at another.
- _endKey = _params.bounds.endKey;
- _endKeyInclusive = _params.bounds.endKeyInclusive;
- _indexCursor->setEndPosition(_endKey, _endKeyInclusive);
- return _indexCursor->seek(_params.bounds.startKey, /*inclusive*/true);
- }
- else {
- // For single intervals, we can use an optimized scan which checks against the position
- // of an end cursor. For all other index scans, we fall back on using
- // IndexBoundsChecker to determine when we've finished the scan.
- BSONObj startKey;
- bool startKeyInclusive;
- if (IndexBoundsBuilder::isSingleInterval(_params.bounds,
- &startKey,
- &startKeyInclusive,
- &_endKey,
- &_endKeyInclusive)) {
-
- _indexCursor->setEndPosition(_endKey, _endKeyInclusive);
- return _indexCursor->seek(startKey, startKeyInclusive);
- }
- else {
- _checker.reset(new IndexBoundsChecker(&_params.bounds,
- _keyPattern,
- _params.direction));
-
- if (!_checker->getStartSeekPoint(&_seekPoint))
- return boost::none;
-
- return _indexCursor->seek(_seekPoint);
- }
+ return _indexCursor->seek(_seekPoint);
}
}
+}
+
+PlanStage::StageState IndexScan::work(WorkingSetID* out) {
+ ++_commonStats.works;
+
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- PlanStage::StageState IndexScan::work(WorkingSetID* out) {
- ++_commonStats.works;
-
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
-
- // Get the next kv pair from the index, if any.
- boost::optional<IndexKeyEntry> kv;
- try {
- switch (_scanState) {
- case INITIALIZING: kv = initIndexScan(); break;
- case GETTING_NEXT: kv = _indexCursor->next(); break;
- case NEED_SEEK: kv = _indexCursor->seek(_seekPoint); break;
- case HIT_END: return PlanStage::IS_EOF;
- }
+ // Get the next kv pair from the index, if any.
+ boost::optional<IndexKeyEntry> kv;
+ try {
+ switch (_scanState) {
+ case INITIALIZING:
+ kv = initIndexScan();
+ break;
+ case GETTING_NEXT:
+ kv = _indexCursor->next();
+ break;
+ case NEED_SEEK:
+ kv = _indexCursor->seek(_seekPoint);
+ break;
+ case HIT_END:
+ return PlanStage::IS_EOF;
}
- catch (const WriteConflictException& wce) {
- *out = WorkingSet::INVALID_ID;
- return PlanStage::NEED_YIELD;
+ } catch (const WriteConflictException& wce) {
+ *out = WorkingSet::INVALID_ID;
+ return PlanStage::NEED_YIELD;
+ }
+
+ if (kv) {
+ // In debug mode, check that the cursor isn't lying to us.
+ if (kDebugBuild && !_endKey.isEmpty()) {
+ int cmp = kv->key.woCompare(_endKey,
+ Ordering::make(_params.descriptor->keyPattern()),
+ /*compareFieldNames*/ false);
+ if (cmp == 0)
+ dassert(_endKeyInclusive);
+ dassert(_forward ? cmp <= 0 : cmp >= 0);
}
- if (kv) {
- // In debug mode, check that the cursor isn't lying to us.
- if (kDebugBuild && !_endKey.isEmpty()) {
- int cmp = kv->key.woCompare(_endKey,
- Ordering::make(_params.descriptor->keyPattern()),
- /*compareFieldNames*/false);
- if (cmp == 0) dassert(_endKeyInclusive);
- dassert(_forward ? cmp <= 0 : cmp >= 0);
- }
-
- ++_specificStats.keysExamined;
- if (_params.maxScan && _specificStats.keysExamined >= _params.maxScan) {
- kv = boost::none;
- }
+ ++_specificStats.keysExamined;
+ if (_params.maxScan && _specificStats.keysExamined >= _params.maxScan) {
+ kv = boost::none;
}
+ }
- if (kv && _checker) {
- switch (_checker->checkKey(kv->key, &_seekPoint)) {
+ if (kv && _checker) {
+ switch (_checker->checkKey(kv->key, &_seekPoint)) {
case IndexBoundsChecker::VALID:
break;
@@ -181,138 +178,141 @@ namespace mongo {
_scanState = NEED_SEEK;
_commonStats.needTime++;
return PlanStage::NEED_TIME;
- }
}
+ }
- if (!kv) {
- _scanState = HIT_END;
- _commonStats.isEOF = true;
- _indexCursor.reset();
- return PlanStage::IS_EOF;
- }
+ if (!kv) {
+ _scanState = HIT_END;
+ _commonStats.isEOF = true;
+ _indexCursor.reset();
+ return PlanStage::IS_EOF;
+ }
- _scanState = GETTING_NEXT;
+ _scanState = GETTING_NEXT;
- if (_shouldDedup) {
- ++_specificStats.dupsTested;
- if (!_returned.insert(kv->loc).second) {
- // We've seen this RecordId before. Skip it this time.
- ++_specificStats.dupsDropped;
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
+ if (_shouldDedup) {
+ ++_specificStats.dupsTested;
+ if (!_returned.insert(kv->loc).second) {
+ // We've seen this RecordId before. Skip it this time.
+ ++_specificStats.dupsDropped;
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
+ }
- if (_filter) {
- if (!Filter::passes(kv->key, _keyPattern, _filter)) {
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
+ if (_filter) {
+ if (!Filter::passes(kv->key, _keyPattern, _filter)) {
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
+ }
- if (!kv->key.isOwned()) kv->key = kv->key.getOwned();
-
- // We found something to return, so fill out the WSM.
- WorkingSetID id = _workingSet->allocate();
- WorkingSetMember* member = _workingSet->get(id);
- member->loc = kv->loc;
- member->keyData.push_back(IndexKeyDatum(_keyPattern, kv->key, _iam));
- member->state = WorkingSetMember::LOC_AND_IDX;
+ if (!kv->key.isOwned())
+ kv->key = kv->key.getOwned();
- if (_params.addKeyMetadata) {
- BSONObjBuilder bob;
- bob.appendKeys(_keyPattern, kv->key);
- member->addComputed(new IndexKeyComputedData(bob.obj()));
- }
+ // We found something to return, so fill out the WSM.
+ WorkingSetID id = _workingSet->allocate();
+ WorkingSetMember* member = _workingSet->get(id);
+ member->loc = kv->loc;
+ member->keyData.push_back(IndexKeyDatum(_keyPattern, kv->key, _iam));
+ member->state = WorkingSetMember::LOC_AND_IDX;
- *out = id;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
+ if (_params.addKeyMetadata) {
+ BSONObjBuilder bob;
+ bob.appendKeys(_keyPattern, kv->key);
+ member->addComputed(new IndexKeyComputedData(bob.obj()));
}
- bool IndexScan::isEOF() {
- return _commonStats.isEOF;
- }
+ *out = id;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
+}
- void IndexScan::saveState() {
- if (!_txn) {
- // We were already saved. Nothing to do.
- return;
- }
+bool IndexScan::isEOF() {
+ return _commonStats.isEOF;
+}
- _txn = NULL;
- ++_commonStats.yields;
- if (!_indexCursor) return;
+void IndexScan::saveState() {
+ if (!_txn) {
+ // We were already saved. Nothing to do.
+ return;
+ }
- if (_scanState == NEED_SEEK) {
- _indexCursor->saveUnpositioned();
- return;
- }
+ _txn = NULL;
+ ++_commonStats.yields;
+ if (!_indexCursor)
+ return;
- _indexCursor->savePositioned();
+ if (_scanState == NEED_SEEK) {
+ _indexCursor->saveUnpositioned();
+ return;
}
- void IndexScan::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_commonStats.unyields;
+ _indexCursor->savePositioned();
+}
- if (_indexCursor) _indexCursor->restore(opCtx);
- }
+void IndexScan::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_commonStats.unyields;
- void IndexScan::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
+ if (_indexCursor)
+ _indexCursor->restore(opCtx);
+}
- // The only state we're responsible for holding is what RecordIds to drop. If a document
- // mutates the underlying index cursor will deal with it.
- if (INVALIDATION_MUTATION == type) {
- return;
- }
+void IndexScan::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
- // If we see this RecordId again, it may not be the same document it was before, so we want
- // to return it if we see it again.
- unordered_set<RecordId, RecordId::Hasher>::iterator it = _returned.find(dl);
- if (it != _returned.end()) {
- ++_specificStats.seenInvalidated;
- _returned.erase(it);
- }
+ // The only state we're responsible for holding is what RecordIds to drop. If a document
+ // mutates the underlying index cursor will deal with it.
+ if (INVALIDATION_MUTATION == type) {
+ return;
}
- std::vector<PlanStage*> IndexScan::getChildren() const {
- return {};
+ // If we see this RecordId again, it may not be the same document it was before, so we want
+ // to return it if we see it again.
+ unordered_set<RecordId, RecordId::Hasher>::iterator it = _returned.find(dl);
+ if (it != _returned.end()) {
+ ++_specificStats.seenInvalidated;
+ _returned.erase(it);
}
+}
- PlanStageStats* IndexScan::getStats() {
- // WARNING: this could be called even if the collection was dropped. Do not access any
- // catalog information here.
+std::vector<PlanStage*> IndexScan::getChildren() const {
+ return {};
+}
- // Add a BSON representation of the filter to the stats tree, if there is one.
- if (NULL != _filter) {
- BSONObjBuilder bob;
- _filter->toBSON(&bob);
- _commonStats.filter = bob.obj();
- }
+PlanStageStats* IndexScan::getStats() {
+ // WARNING: this could be called even if the collection was dropped. Do not access any
+ // catalog information here.
- // These specific stats fields never change.
- if (_specificStats.indexType.empty()) {
- _specificStats.indexType = "BtreeCursor"; // TODO amName;
+ // Add a BSON representation of the filter to the stats tree, if there is one.
+ if (NULL != _filter) {
+ BSONObjBuilder bob;
+ _filter->toBSON(&bob);
+ _commonStats.filter = bob.obj();
+ }
- _specificStats.indexBounds = _params.bounds.toBSON();
+ // These specific stats fields never change.
+ if (_specificStats.indexType.empty()) {
+ _specificStats.indexType = "BtreeCursor"; // TODO amName;
- _specificStats.direction = _params.direction;
- }
+ _specificStats.indexBounds = _params.bounds.toBSON();
- std::unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_IXSCAN));
- ret->specific.reset(new IndexScanStats(_specificStats));
- return ret.release();
+ _specificStats.direction = _params.direction;
}
- const CommonStats* IndexScan::getCommonStats() const {
- return &_commonStats;
- }
+ std::unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_IXSCAN));
+ ret->specific.reset(new IndexScanStats(_specificStats));
+ return ret.release();
+}
- const SpecificStats* IndexScan::getSpecificStats() const {
- return &_specificStats;
- }
+const CommonStats* IndexScan::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* IndexScan::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/index_scan.h b/src/mongo/db/exec/index_scan.h
index 555b00c4beb..d415c3b985b 100644
--- a/src/mongo/db/exec/index_scan.h
+++ b/src/mongo/db/exec/index_scan.h
@@ -41,145 +41,143 @@
namespace mongo {
- class IndexAccessMethod;
- class IndexDescriptor;
- class WorkingSet;
+class IndexAccessMethod;
+class IndexDescriptor;
+class WorkingSet;
- struct IndexScanParams {
- IndexScanParams() : descriptor(NULL),
- direction(1),
- doNotDedup(false),
- maxScan(0),
- addKeyMetadata(false) { }
+struct IndexScanParams {
+ IndexScanParams()
+ : descriptor(NULL), direction(1), doNotDedup(false), maxScan(0), addKeyMetadata(false) {}
- const IndexDescriptor* descriptor;
+ const IndexDescriptor* descriptor;
- IndexBounds bounds;
+ IndexBounds bounds;
- int direction;
+ int direction;
- bool doNotDedup;
+ bool doNotDedup;
- // How many keys will we look at?
- size_t maxScan;
+ // How many keys will we look at?
+ size_t maxScan;
- // Do we want to add the key as metadata?
- bool addKeyMetadata;
- };
+ // Do we want to add the key as metadata?
+ bool addKeyMetadata;
+};
+/**
+ * Stage scans over an index from startKey to endKey, returning results that pass the provided
+ * filter. Internally dedups on RecordId.
+ *
+ * Sub-stage preconditions: None. Is a leaf and consumes no stage data.
+ */
+class IndexScan : public PlanStage {
+public:
/**
- * Stage scans over an index from startKey to endKey, returning results that pass the provided
- * filter. Internally dedups on RecordId.
- *
- * Sub-stage preconditions: None. Is a leaf and consumes no stage data.
+ * Keeps track of what this index scan is currently doing so that it
+ * can do the right thing on the next call to work().
*/
- class IndexScan : public PlanStage {
- public:
-
- /**
- * Keeps track of what this index scan is currently doing so that it
- * can do the right thing on the next call to work().
- */
- enum ScanState {
- // Need to initialize the underlying index traversal machinery.
- INITIALIZING,
+ enum ScanState {
+ // Need to initialize the underlying index traversal machinery.
+ INITIALIZING,
- // Skipping keys as directed by the _checker.
- NEED_SEEK,
+ // Skipping keys as directed by the _checker.
+ NEED_SEEK,
- // Retrieving the next key, and applying the filter if necessary.
- GETTING_NEXT,
+ // Retrieving the next key, and applying the filter if necessary.
+ GETTING_NEXT,
- // The index scan is finished.
- HIT_END
- };
+ // The index scan is finished.
+ HIT_END
+ };
- IndexScan(OperationContext* txn,
- const IndexScanParams& params,
- WorkingSet* workingSet,
- const MatchExpression* filter);
+ IndexScan(OperationContext* txn,
+ const IndexScanParams& params,
+ WorkingSet* workingSet,
+ const MatchExpression* filter);
- virtual ~IndexScan() { }
+ virtual ~IndexScan() {}
- virtual StageState work(WorkingSetID* out);
- virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_IXSCAN; }
+ virtual StageType stageType() const {
+ return STAGE_IXSCAN;
+ }
- virtual PlanStageStats* getStats();
+ virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- /**
- * Initialize the underlying index Cursor, returning first result if any.
- */
- boost::optional<IndexKeyEntry> initIndexScan();
+private:
+ /**
+ * Initialize the underlying index Cursor, returning first result if any.
+ */
+ boost::optional<IndexKeyEntry> initIndexScan();
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
- // The WorkingSet we fill with results. Not owned by us.
- WorkingSet* const _workingSet;
+ // The WorkingSet we fill with results. Not owned by us.
+ WorkingSet* const _workingSet;
- // Index access.
- const IndexAccessMethod* const _iam; // owned by Collection -> IndexCatalog
- std::unique_ptr<SortedDataInterface::Cursor> _indexCursor;
- const BSONObj _keyPattern;
+ // Index access.
+ const IndexAccessMethod* const _iam; // owned by Collection -> IndexCatalog
+ std::unique_ptr<SortedDataInterface::Cursor> _indexCursor;
+ const BSONObj _keyPattern;
- // Keeps track of what work we need to do next.
- ScanState _scanState;
+ // Keeps track of what work we need to do next.
+ ScanState _scanState;
- // Contains expressions only over fields in the index key. We assume this is built
- // correctly by whomever creates this class.
- // The filter is not owned by us.
- const MatchExpression* const _filter;
+ // Contains expressions only over fields in the index key. We assume this is built
+ // correctly by whomever creates this class.
+ // The filter is not owned by us.
+ const MatchExpression* const _filter;
- // Could our index have duplicates? If so, we use _returned to dedup.
- bool _shouldDedup;
- unordered_set<RecordId, RecordId::Hasher> _returned;
+ // Could our index have duplicates? If so, we use _returned to dedup.
+ bool _shouldDedup;
+ unordered_set<RecordId, RecordId::Hasher> _returned;
- const bool _forward;
- const IndexScanParams _params;
+ const bool _forward;
+ const IndexScanParams _params;
- // Stats
- CommonStats _commonStats;
- IndexScanStats _specificStats;
+ // Stats
+ CommonStats _commonStats;
+ IndexScanStats _specificStats;
- //
- // This class employs one of two different algorithms for determining when the index scan
- // has reached the end:
- //
+ //
+ // This class employs one of two different algorithms for determining when the index scan
+ // has reached the end:
+ //
- //
- // 1) If the index scan is not a single contiguous interval, then we use an
- // IndexBoundsChecker to determine which keys to return and when to stop scanning.
- // In this case, _checker will be non-NULL.
- //
+ //
+ // 1) If the index scan is not a single contiguous interval, then we use an
+ // IndexBoundsChecker to determine which keys to return and when to stop scanning.
+ // In this case, _checker will be non-NULL.
+ //
- std::unique_ptr<IndexBoundsChecker> _checker;
- IndexSeekPoint _seekPoint;
+ std::unique_ptr<IndexBoundsChecker> _checker;
+ IndexSeekPoint _seekPoint;
- //
- // 2) If the index scan is a single contiguous interval, then the scan can execute faster by
- // letting the index cursor tell us when it hits the end, rather than repeatedly doing
- // BSON compares against scanned keys. In this case _checker will be NULL.
- //
+ //
+ // 2) If the index scan is a single contiguous interval, then the scan can execute faster by
+ // letting the index cursor tell us when it hits the end, rather than repeatedly doing
+ // BSON compares against scanned keys. In this case _checker will be NULL.
+ //
- // The key that the index cursor should stop on/after.
- BSONObj _endKey;
+ // The key that the index cursor should stop on/after.
+ BSONObj _endKey;
- // Is the end key included in the range?
- bool _endKeyInclusive;
- };
+ // Is the end key included in the range?
+ bool _endKeyInclusive;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/keep_mutations.cpp b/src/mongo/db/exec/keep_mutations.cpp
index 7c88a59fde5..c30d276782d 100644
--- a/src/mongo/db/exec/keep_mutations.cpp
+++ b/src/mongo/db/exec/keep_mutations.cpp
@@ -33,127 +33,127 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
-
- // static
- const char* KeepMutationsStage::kStageType = "KEEP_MUTATIONS";
-
- KeepMutationsStage::KeepMutationsStage(const MatchExpression* filter,
- WorkingSet* ws,
- PlanStage* child)
- : _workingSet(ws),
- _child(child),
- _filter(filter),
- _doneReadingChild(false),
- _doneReturningFlagged(false),
- _commonStats(kStageType) { }
-
- KeepMutationsStage::~KeepMutationsStage() { }
-
- bool KeepMutationsStage::isEOF() {
- return _doneReadingChild && _doneReturningFlagged;
- }
-
- PlanStage::StageState KeepMutationsStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+using std::unique_ptr;
+using std::vector;
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+// static
+const char* KeepMutationsStage::kStageType = "KEEP_MUTATIONS";
- // If we've returned as many results as we're limited to, isEOF will be true.
- if (isEOF()) { return PlanStage::IS_EOF; }
+KeepMutationsStage::KeepMutationsStage(const MatchExpression* filter,
+ WorkingSet* ws,
+ PlanStage* child)
+ : _workingSet(ws),
+ _child(child),
+ _filter(filter),
+ _doneReadingChild(false),
+ _doneReturningFlagged(false),
+ _commonStats(kStageType) {}
- // Stream child results until the child is all done.
- if (!_doneReadingChild) {
- StageState status = _child->work(out);
+KeepMutationsStage::~KeepMutationsStage() {}
- // Child is still returning results. Pass them through.
- if (PlanStage::IS_EOF != status) {
- if (PlanStage::ADVANCED == status) {
- ++_commonStats.advanced;
- }
- else if (PlanStage::NEED_TIME == status) {
- ++_commonStats.needTime;
- }
- else if (PlanStage::NEED_YIELD == status) {
- ++_commonStats.needYield;
- }
+bool KeepMutationsStage::isEOF() {
+ return _doneReadingChild && _doneReturningFlagged;
+}
- return status;
- }
+PlanStage::StageState KeepMutationsStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- // Child is EOF. We want to stream flagged results if there are any.
- _doneReadingChild = true;
-
- // Read out all of the flagged results from the working set. We can't iterate through
- // the working set's flagged result set directly, since it may be modified later if
- // further documents are invalidated during a yield.
- std::copy(_workingSet->getFlagged().begin(), _workingSet->getFlagged().end(),
- std::back_inserter(_flagged));
- _flaggedIterator = _flagged.begin();
- }
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- // We're streaming flagged results.
- invariant(!_doneReturningFlagged);
- if (_flaggedIterator == _flagged.end()) {
- _doneReturningFlagged = true;
- return PlanStage::IS_EOF;
- }
-
- WorkingSetID idToTest = *_flaggedIterator;
- _flaggedIterator++;
-
- WorkingSetMember* member = _workingSet->get(idToTest);
- if (Filter::passes(member, _filter)) {
- *out = idToTest;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
- else {
- _workingSet->free(idToTest);
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- }
-
- void KeepMutationsStage::saveState() {
- ++_commonStats.yields;
- _child->saveState();
+ // If we've returned as many results as we're limited to, isEOF will be true.
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
}
- void KeepMutationsStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
- }
+ // Stream child results until the child is all done.
+ if (!_doneReadingChild) {
+ StageState status = _child->work(out);
+
+ // Child is still returning results. Pass them through.
+ if (PlanStage::IS_EOF != status) {
+ if (PlanStage::ADVANCED == status) {
+ ++_commonStats.advanced;
+ } else if (PlanStage::NEED_TIME == status) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == status) {
+ ++_commonStats.needYield;
+ }
- void KeepMutationsStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
- }
+ return status;
+ }
- vector<PlanStage*> KeepMutationsStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
- }
+ // Child is EOF. We want to stream flagged results if there are any.
+ _doneReadingChild = true;
- PlanStageStats* KeepMutationsStage::getStats() {
- _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_KEEP_MUTATIONS));
- // Takes ownership of the object returned from _child->getStats().
- ret->children.push_back(_child->getStats());
- return ret.release();
+ // Read out all of the flagged results from the working set. We can't iterate through
+ // the working set's flagged result set directly, since it may be modified later if
+ // further documents are invalidated during a yield.
+ std::copy(_workingSet->getFlagged().begin(),
+ _workingSet->getFlagged().end(),
+ std::back_inserter(_flagged));
+ _flaggedIterator = _flagged.begin();
}
- const CommonStats* KeepMutationsStage::getCommonStats() const {
- return &_commonStats;
+ // We're streaming flagged results.
+ invariant(!_doneReturningFlagged);
+ if (_flaggedIterator == _flagged.end()) {
+ _doneReturningFlagged = true;
+ return PlanStage::IS_EOF;
}
- const SpecificStats* KeepMutationsStage::getSpecificStats() const {
- return NULL;
+ WorkingSetID idToTest = *_flaggedIterator;
+ _flaggedIterator++;
+
+ WorkingSetMember* member = _workingSet->get(idToTest);
+ if (Filter::passes(member, _filter)) {
+ *out = idToTest;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
+ } else {
+ _workingSet->free(idToTest);
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
+}
+
+void KeepMutationsStage::saveState() {
+ ++_commonStats.yields;
+ _child->saveState();
+}
+
+void KeepMutationsStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
+ _child->restoreState(opCtx);
+}
+
+void KeepMutationsStage::invalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
+ ++_commonStats.invalidates;
+ _child->invalidate(txn, dl, type);
+}
+
+vector<PlanStage*> KeepMutationsStage::getChildren() const {
+ vector<PlanStage*> children;
+ children.push_back(_child.get());
+ return children;
+}
+
+PlanStageStats* KeepMutationsStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_KEEP_MUTATIONS));
+ // Takes ownership of the object returned from _child->getStats().
+ ret->children.push_back(_child->getStats());
+ return ret.release();
+}
+
+const CommonStats* KeepMutationsStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* KeepMutationsStage::getSpecificStats() const {
+ return NULL;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/keep_mutations.h b/src/mongo/db/exec/keep_mutations.h
index 111d777d894..cbf9f75b928 100644
--- a/src/mongo/db/exec/keep_mutations.h
+++ b/src/mongo/db/exec/keep_mutations.h
@@ -36,62 +36,64 @@
namespace mongo {
- /**
- * KeepMutationsStage passes all of its child's data through until the child is EOF.
- * It then returns all flagged elements in the WorkingSet that pass the stage's filter.
- *
- * This stage is used to merge results that are invalidated mid-query back into the query
- * results when possible. The query planner is responsible for determining when it's valid to
- * merge these results.
- */
- class KeepMutationsStage : public PlanStage {
- public:
- KeepMutationsStage(const MatchExpression* filter, WorkingSet* ws, PlanStage* child);
- virtual ~KeepMutationsStage();
+/**
+ * KeepMutationsStage passes all of its child's data through until the child is EOF.
+ * It then returns all flagged elements in the WorkingSet that pass the stage's filter.
+ *
+ * This stage is used to merge results that are invalidated mid-query back into the query
+ * results when possible. The query planner is responsible for determining when it's valid to
+ * merge these results.
+ */
+class KeepMutationsStage : public PlanStage {
+public:
+ KeepMutationsStage(const MatchExpression* filter, WorkingSet* ws, PlanStage* child);
+ virtual ~KeepMutationsStage();
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_KEEP_MUTATIONS; }
+ virtual StageType stageType() const {
+ return STAGE_KEEP_MUTATIONS;
+ }
- virtual PlanStageStats* getStats();
+ virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- // Not owned here.
- WorkingSet* _workingSet;
+private:
+ // Not owned here.
+ WorkingSet* _workingSet;
- std::unique_ptr<PlanStage> _child;
+ std::unique_ptr<PlanStage> _child;
- // Not owned here. Should be the full query expression tree.
- const MatchExpression* _filter;
+ // Not owned here. Should be the full query expression tree.
+ const MatchExpression* _filter;
- // We read from our child...
- bool _doneReadingChild;
+ // We read from our child...
+ bool _doneReadingChild;
- // ...until it's out of results, at which point we put any flagged results back in the query
- // stream.
- bool _doneReturningFlagged;
+ // ...until it's out of results, at which point we put any flagged results back in the query
+ // stream.
+ bool _doneReturningFlagged;
- // Stats.
- CommonStats _commonStats;
+ // Stats.
+ CommonStats _commonStats;
- // Our copy of the working set's flagged results.
- std::vector<WorkingSetID> _flagged;
+ // Our copy of the working set's flagged results.
+ std::vector<WorkingSetID> _flagged;
- // Iterator pointing into _flagged.
- std::vector<WorkingSetID>::const_iterator _flaggedIterator;
- };
+ // Iterator pointing into _flagged.
+ std::vector<WorkingSetID>::const_iterator _flaggedIterator;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp
index c766d07c650..a62f6e863e3 100644
--- a/src/mongo/db/exec/limit.cpp
+++ b/src/mongo/db/exec/limit.cpp
@@ -34,103 +34,99 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
-
- // static
- const char* LimitStage::kStageType = "LIMIT";
-
- LimitStage::LimitStage(int limit, WorkingSet* ws, PlanStage* child)
- : _ws(ws),
- _child(child),
- _numToReturn(limit),
- _commonStats(kStageType) {
- _specificStats.limit = _numToReturn;
- }
+using std::unique_ptr;
+using std::vector;
- LimitStage::~LimitStage() { }
+// static
+const char* LimitStage::kStageType = "LIMIT";
- bool LimitStage::isEOF() { return (0 == _numToReturn) || _child->isEOF(); }
+LimitStage::LimitStage(int limit, WorkingSet* ws, PlanStage* child)
+ : _ws(ws), _child(child), _numToReturn(limit), _commonStats(kStageType) {
+ _specificStats.limit = _numToReturn;
+}
- PlanStage::StageState LimitStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+LimitStage::~LimitStage() {}
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+bool LimitStage::isEOF() {
+ return (0 == _numToReturn) || _child->isEOF();
+}
- if (0 == _numToReturn) {
- // We've returned as many results as we're limited to.
- return PlanStage::IS_EOF;
- }
+PlanStage::StageState LimitStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- WorkingSetID id = WorkingSet::INVALID_ID;
- StageState status = _child->work(&id);
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- if (PlanStage::ADVANCED == status) {
- *out = id;
- --_numToReturn;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
- else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "limit stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- }
- return status;
- }
- else if (PlanStage::NEED_TIME == status) {
- ++_commonStats.needTime;
- }
- else if (PlanStage::NEED_YIELD == status) {
- ++_commonStats.needYield;
- *out = id;
- }
-
- return status;
+ if (0 == _numToReturn) {
+ // We've returned as many results as we're limited to.
+ return PlanStage::IS_EOF;
}
- void LimitStage::saveState() {
- ++_commonStats.yields;
- _child->saveState();
- }
-
- void LimitStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
- }
-
- void LimitStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
- }
-
- vector<PlanStage*> LimitStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
- }
-
- PlanStageStats* LimitStage::getStats() {
- _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_LIMIT));
- ret->specific.reset(new LimitStats(_specificStats));
- ret->children.push_back(_child->getStats());
- return ret.release();
- }
-
- const CommonStats* LimitStage::getCommonStats() const {
- return &_commonStats;
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ StageState status = _child->work(&id);
+
+ if (PlanStage::ADVANCED == status) {
+ *out = id;
+ --_numToReturn;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
+ } else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ mongoutils::str::stream ss;
+ ss << "limit stage failed to read in results from child";
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ }
+ return status;
+ } else if (PlanStage::NEED_TIME == status) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == status) {
+ ++_commonStats.needYield;
+ *out = id;
}
- const SpecificStats* LimitStage::getSpecificStats() const {
- return &_specificStats;
- }
+ return status;
+}
+
+void LimitStage::saveState() {
+ ++_commonStats.yields;
+ _child->saveState();
+}
+
+void LimitStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
+ _child->restoreState(opCtx);
+}
+
+void LimitStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+ _child->invalidate(txn, dl, type);
+}
+
+vector<PlanStage*> LimitStage::getChildren() const {
+ vector<PlanStage*> children;
+ children.push_back(_child.get());
+ return children;
+}
+
+PlanStageStats* LimitStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_LIMIT));
+ ret->specific.reset(new LimitStats(_specificStats));
+ ret->children.push_back(_child->getStats());
+ return ret.release();
+}
+
+const CommonStats* LimitStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* LimitStage::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h
index f3f722fd1a1..828b6e6c0d4 100644
--- a/src/mongo/db/exec/limit.h
+++ b/src/mongo/db/exec/limit.h
@@ -35,47 +35,49 @@
namespace mongo {
- /**
- * This stage implements limit functionality. It only returns 'limit' results before EOF.
- *
- * Sort has a baked-in limit, as it can optimize the sort if it has a limit.
- *
- * Preconditions: None.
- */
- class LimitStage : public PlanStage {
- public:
- LimitStage(int limit, WorkingSet* ws, PlanStage* child);
- virtual ~LimitStage();
+/**
+ * This stage implements limit functionality. It only returns 'limit' results before EOF.
+ *
+ * Sort has a baked-in limit, as it can optimize the sort if it has a limit.
+ *
+ * Preconditions: None.
+ */
+class LimitStage : public PlanStage {
+public:
+ LimitStage(int limit, WorkingSet* ws, PlanStage* child);
+ virtual ~LimitStage();
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_LIMIT; }
+ virtual StageType stageType() const {
+ return STAGE_LIMIT;
+ }
- virtual PlanStageStats* getStats();
+ virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
+private:
+ WorkingSet* _ws;
+ std::unique_ptr<PlanStage> _child;
- // We only return this many results.
- int _numToReturn;
+ // We only return this many results.
+ int _numToReturn;
- // Stats
- CommonStats _commonStats;
- LimitStats _specificStats;
- };
+ // Stats
+ CommonStats _commonStats;
+ LimitStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp
index 37c8269a502..7f0581da18c 100644
--- a/src/mongo/db/exec/merge_sort.cpp
+++ b/src/mongo/db/exec/merge_sort.cpp
@@ -35,255 +35,257 @@
namespace mongo {
- using std::unique_ptr;
- using std::list;
- using std::string;
- using std::vector;
-
- // static
- const char* MergeSortStage::kStageType = "SORT_MERGE";
-
- MergeSortStage::MergeSortStage(const MergeSortStageParams& params,
- WorkingSet* ws,
- const Collection* collection)
- : _collection(collection),
- _ws(ws),
- _pattern(params.pattern),
- _dedup(params.dedup),
- _merging(StageWithValueComparison(ws, params.pattern)),
- _commonStats(kStageType) { }
-
- MergeSortStage::~MergeSortStage() {
- for (size_t i = 0; i < _children.size(); ++i) { delete _children[i]; }
+using std::unique_ptr;
+using std::list;
+using std::string;
+using std::vector;
+
+// static
+const char* MergeSortStage::kStageType = "SORT_MERGE";
+
+MergeSortStage::MergeSortStage(const MergeSortStageParams& params,
+ WorkingSet* ws,
+ const Collection* collection)
+ : _collection(collection),
+ _ws(ws),
+ _pattern(params.pattern),
+ _dedup(params.dedup),
+ _merging(StageWithValueComparison(ws, params.pattern)),
+ _commonStats(kStageType) {}
+
+MergeSortStage::~MergeSortStage() {
+ for (size_t i = 0; i < _children.size(); ++i) {
+ delete _children[i];
}
+}
- void MergeSortStage::addChild(PlanStage* child) {
- _children.push_back(child);
+void MergeSortStage::addChild(PlanStage* child) {
+ _children.push_back(child);
- // We have to call work(...) on every child before we can pick a min.
- _noResultToMerge.push(child);
- }
-
- bool MergeSortStage::isEOF() {
- // If we have no more results to return, and we have no more children that we can call
- // work(...) on to get results, we're done.
- return _merging.empty() && _noResultToMerge.empty();
- }
+ // We have to call work(...) on every child before we can pick a min.
+ _noResultToMerge.push(child);
+}
- PlanStage::StageState MergeSortStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+bool MergeSortStage::isEOF() {
+ // If we have no more results to return, and we have no more children that we can call
+ // work(...) on to get results, we're done.
+ return _merging.empty() && _noResultToMerge.empty();
+}
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+PlanStage::StageState MergeSortStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- if (isEOF()) { return PlanStage::IS_EOF; }
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- if (!_noResultToMerge.empty()) {
- // We have some child that we don't have a result from. Each child must have a result
- // in order to pick the minimum result among all our children. Work a child.
- PlanStage* child = _noResultToMerge.front();
- WorkingSetID id = WorkingSet::INVALID_ID;
- StageState code = child->work(&id);
-
- if (PlanStage::ADVANCED == code) {
- // If we're deduping...
- if (_dedup) {
- WorkingSetMember* member = _ws->get(id);
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
+ }
- if (!member->hasLoc()) {
- // Can't dedup data unless there's a RecordId. We go ahead and use its
- // result.
+ if (!_noResultToMerge.empty()) {
+ // We have some child that we don't have a result from. Each child must have a result
+ // in order to pick the minimum result among all our children. Work a child.
+ PlanStage* child = _noResultToMerge.front();
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ StageState code = child->work(&id);
+
+ if (PlanStage::ADVANCED == code) {
+ // If we're deduping...
+ if (_dedup) {
+ WorkingSetMember* member = _ws->get(id);
+
+ if (!member->hasLoc()) {
+ // Can't dedup data unless there's a RecordId. We go ahead and use its
+ // result.
+ _noResultToMerge.pop();
+ } else {
+ ++_specificStats.dupsTested;
+ // ...and there's a diskloc and and we've seen the RecordId before
+ if (_seen.end() != _seen.find(member->loc)) {
+ // ...drop it.
+ _ws->free(id);
+ ++_commonStats.needTime;
+ ++_specificStats.dupsDropped;
+ return PlanStage::NEED_TIME;
+ } else {
+ // Otherwise, note that we've seen it.
+ _seen.insert(member->loc);
+ // We're going to use the result from the child, so we remove it from
+ // the queue of children without a result.
_noResultToMerge.pop();
}
- else {
- ++_specificStats.dupsTested;
- // ...and there's a diskloc and and we've seen the RecordId before
- if (_seen.end() != _seen.find(member->loc)) {
- // ...drop it.
- _ws->free(id);
- ++_commonStats.needTime;
- ++_specificStats.dupsDropped;
- return PlanStage::NEED_TIME;
- }
- else {
- // Otherwise, note that we've seen it.
- _seen.insert(member->loc);
- // We're going to use the result from the child, so we remove it from
- // the queue of children without a result.
- _noResultToMerge.pop();
- }
- }
- }
- else {
- // Not deduping. We use any result we get from the child. Remove the child
- // from the queue of things without a result.
- _noResultToMerge.pop();
}
+ } else {
+ // Not deduping. We use any result we get from the child. Remove the child
+ // from the queue of things without a result.
+ _noResultToMerge.pop();
+ }
- // Store the result in our list.
- StageWithValue value;
- value.id = id;
- value.stage = child;
- _mergingData.push_front(value);
+ // Store the result in our list.
+ StageWithValue value;
+ value.id = id;
+ value.stage = child;
+ _mergingData.push_front(value);
- // Insert the result (indirectly) into our priority queue.
- _merging.push(_mergingData.begin());
+ // Insert the result (indirectly) into our priority queue.
+ _merging.push(_mergingData.begin());
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::IS_EOF == code) {
+ // There are no more results possible from this child. Don't bother with it
+ // anymore.
+ _noResultToMerge.pop();
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::FAILURE == code || PlanStage::DEAD == code) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ mongoutils::str::stream ss;
+ ss << "merge sort stage failed to read in results from child";
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
}
- else if (PlanStage::IS_EOF == code) {
- // There are no more results possible from this child. Don't bother with it
- // anymore.
- _noResultToMerge.pop();
+ return code;
+ } else {
+ if (PlanStage::NEED_TIME == code) {
++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- else if (PlanStage::FAILURE == code || PlanStage::DEAD == code) {
+ } else if (PlanStage::NEED_YIELD == code) {
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "merge sort stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- }
- return code;
+ ++_commonStats.needYield;
}
- else {
- if (PlanStage::NEED_TIME == code) {
- ++_commonStats.needTime;
- }
- else if (PlanStage::NEED_YIELD == code) {
- *out = id;
- ++_commonStats.needYield;
- }
- return code;
- }
+ return code;
}
+ }
- // If we're here, for each non-EOF child, we have a valid WSID.
- verify(!_merging.empty());
-
- // Get the 'min' WSID. _merging is a priority queue so its top is the smallest.
- MergingRef top = _merging.top();
- _merging.pop();
+ // If we're here, for each non-EOF child, we have a valid WSID.
+ verify(!_merging.empty());
- // Since we're returning the WSID that came from top->stage, we need to work(...) it again
- // to get a new result.
- _noResultToMerge.push(top->stage);
+ // Get the 'min' WSID. _merging is a priority queue so its top is the smallest.
+ MergingRef top = _merging.top();
+ _merging.pop();
- // Save the ID that we're returning and remove the returned result from our data.
- WorkingSetID idToTest = top->id;
- _mergingData.erase(top);
+ // Since we're returning the WSID that came from top->stage, we need to work(...) it again
+ // to get a new result.
+ _noResultToMerge.push(top->stage);
- // Return the min.
- *out = idToTest;
- ++_commonStats.advanced;
+ // Save the ID that we're returning and remove the returned result from our data.
+ WorkingSetID idToTest = top->id;
+ _mergingData.erase(top);
- // But don't return it if it's flagged.
- if (_ws->isFlagged(*out)) {
- return PlanStage::NEED_TIME;
- }
+ // Return the min.
+ *out = idToTest;
+ ++_commonStats.advanced;
- return PlanStage::ADVANCED;
+ // But don't return it if it's flagged.
+ if (_ws->isFlagged(*out)) {
+ return PlanStage::NEED_TIME;
}
- void MergeSortStage::saveState() {
- ++_commonStats.yields;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->saveState();
- }
+ return PlanStage::ADVANCED;
+}
+
+void MergeSortStage::saveState() {
+ ++_commonStats.yields;
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _children[i]->saveState();
}
+}
- void MergeSortStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->restoreState(opCtx);
- }
+void MergeSortStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _children[i]->restoreState(opCtx);
}
+}
- void MergeSortStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- ++_commonStats.invalidates;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->invalidate(txn, dl, type);
- }
+void MergeSortStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _children[i]->invalidate(txn, dl, type);
+ }
- // Go through our data and see if we're holding on to the invalidated loc.
- for (list<StageWithValue>::iterator valueIt = _mergingData.begin(); valueIt != _mergingData.end(); valueIt++) {
- WorkingSetMember* member = _ws->get(valueIt->id);
- if (member->hasLoc() && (dl == member->loc)) {
- // Force a fetch and flag. We could possibly merge this result back in later.
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
- _ws->flagForReview(valueIt->id);
- ++_specificStats.forcedFetches;
- }
+ // Go through our data and see if we're holding on to the invalidated loc.
+ for (list<StageWithValue>::iterator valueIt = _mergingData.begin();
+ valueIt != _mergingData.end();
+ valueIt++) {
+ WorkingSetMember* member = _ws->get(valueIt->id);
+ if (member->hasLoc() && (dl == member->loc)) {
+ // Force a fetch and flag. We could possibly merge this result back in later.
+ WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ _ws->flagForReview(valueIt->id);
+ ++_specificStats.forcedFetches;
}
-
- // If we see DL again it is not the same record as it once was so we still want to
- // return it.
- if (_dedup) { _seen.erase(dl); }
}
- // Is lhs less than rhs? Note that priority_queue is a max heap by default so we invert
- // the return from the expected value.
- bool MergeSortStage::StageWithValueComparison::operator()(
- const MergingRef& lhs, const MergingRef& rhs) {
-
- WorkingSetMember* lhsMember = _ws->get(lhs->id);
- WorkingSetMember* rhsMember = _ws->get(rhs->id);
-
- BSONObjIterator it(_pattern);
- while (it.more()) {
- BSONElement patternElt = it.next();
- string fn = patternElt.fieldName();
-
- BSONElement lhsElt;
- verify(lhsMember->getFieldDotted(fn, &lhsElt));
-
- BSONElement rhsElt;
- verify(rhsMember->getFieldDotted(fn, &rhsElt));
-
- // false means don't compare field name.
- int x = lhsElt.woCompare(rhsElt, false);
- if (-1 == patternElt.number()) { x = -x; }
- if (x != 0) { return x > 0; }
+ // If we see DL again it is not the same record as it once was so we still want to
+ // return it.
+ if (_dedup) {
+ _seen.erase(dl);
+ }
+}
+
+// Is lhs less than rhs? Note that priority_queue is a max heap by default so we invert
+// the return from the expected value.
+bool MergeSortStage::StageWithValueComparison::operator()(const MergingRef& lhs,
+ const MergingRef& rhs) {
+ WorkingSetMember* lhsMember = _ws->get(lhs->id);
+ WorkingSetMember* rhsMember = _ws->get(rhs->id);
+
+ BSONObjIterator it(_pattern);
+ while (it.more()) {
+ BSONElement patternElt = it.next();
+ string fn = patternElt.fieldName();
+
+ BSONElement lhsElt;
+ verify(lhsMember->getFieldDotted(fn, &lhsElt));
+
+ BSONElement rhsElt;
+ verify(rhsMember->getFieldDotted(fn, &rhsElt));
+
+ // false means don't compare field name.
+ int x = lhsElt.woCompare(rhsElt, false);
+ if (-1 == patternElt.number()) {
+ x = -x;
+ }
+ if (x != 0) {
+ return x > 0;
}
-
- // A comparator for use with sort is required to model a strict weak ordering, so
- // to satisfy irreflexivity we must return 'false' for elements that we consider
- // equivalent under the pattern.
- return false;
}
- vector<PlanStage*> MergeSortStage::getChildren() const {
- return _children;
- }
+ // A comparator for use with sort is required to model a strict weak ordering, so
+ // to satisfy irreflexivity we must return 'false' for elements that we consider
+ // equivalent under the pattern.
+ return false;
+}
- PlanStageStats* MergeSortStage::getStats() {
- _commonStats.isEOF = isEOF();
+vector<PlanStage*> MergeSortStage::getChildren() const {
+ return _children;
+}
- _specificStats.sortPattern = _pattern;
+PlanStageStats* MergeSortStage::getStats() {
+ _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SORT_MERGE));
- ret->specific.reset(new MergeSortStats(_specificStats));
- for (size_t i = 0; i < _children.size(); ++i) {
- ret->children.push_back(_children[i]->getStats());
- }
- return ret.release();
- }
+ _specificStats.sortPattern = _pattern;
- const CommonStats* MergeSortStage::getCommonStats() const {
- return &_commonStats;
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SORT_MERGE));
+ ret->specific.reset(new MergeSortStats(_specificStats));
+ for (size_t i = 0; i < _children.size(); ++i) {
+ ret->children.push_back(_children[i]->getStats());
}
+ return ret.release();
+}
- const SpecificStats* MergeSortStage::getSpecificStats() const {
- return &_specificStats;
- }
+const CommonStats* MergeSortStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* MergeSortStage::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h
index e8e216e5fe3..7ef6d960013 100644
--- a/src/mongo/db/exec/merge_sort.h
+++ b/src/mongo/db/exec/merge_sort.h
@@ -39,129 +39,130 @@
namespace mongo {
- // External params for the merge sort stage. Declared below.
- class MergeSortStageParams;
-
- /**
- * Merges the outputs of N children, each of which is sorted in the order specified by
- * 'pattern'. The output is sorted by 'pattern'. Practically speaking, all of this stage's
- * children are indices.
- *
- * AKA the SERVER-1205 stage. Allows very efficient handling of the following query:
- * find($or[{a:1}, {b:1}]).sort({c:1}) with indices {a:1, c:1} and {b:1, c:1}.
- *
- * Preconditions: For each field in 'pattern' all inputs in the child must handle a
- * getFieldDotted for that field.
- */
- class MergeSortStage : public PlanStage {
- public:
- MergeSortStage(const MergeSortStageParams& params,
- WorkingSet* ws,
- const Collection* collection);
- virtual ~MergeSortStage();
+// External params for the merge sort stage. Declared below.
+class MergeSortStageParams;
- void addChild(PlanStage* child);
+/**
+ * Merges the outputs of N children, each of which is sorted in the order specified by
+ * 'pattern'. The output is sorted by 'pattern'. Practically speaking, all of this stage's
+ * children are indices.
+ *
+ * AKA the SERVER-1205 stage. Allows very efficient handling of the following query:
+ * find($or[{a:1}, {b:1}]).sort({c:1}) with indices {a:1, c:1} and {b:1, c:1}.
+ *
+ * Preconditions: For each field in 'pattern' all inputs in the child must handle a
+ * getFieldDotted for that field.
+ */
+class MergeSortStage : public PlanStage {
+public:
+ MergeSortStage(const MergeSortStageParams& params,
+ WorkingSet* ws,
+ const Collection* collection);
+ virtual ~MergeSortStage();
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
+ void addChild(PlanStage* child);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual StageType stageType() const { return STAGE_SORT_MERGE; }
+ virtual std::vector<PlanStage*> getChildren() const;
- PlanStageStats* getStats();
+ virtual StageType stageType() const {
+ return STAGE_SORT_MERGE;
+ }
- virtual const CommonStats* getCommonStats() const;
+ PlanStageStats* getStats();
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const CommonStats* getCommonStats() const;
- static const char* kStageType;
+ virtual const SpecificStats* getSpecificStats() const;
- private:
- // Not owned by us.
- const Collection* _collection;
+ static const char* kStageType;
- // Not owned by us.
- WorkingSet* _ws;
+private:
+ // Not owned by us.
+ const Collection* _collection;
- // The pattern that we're sorting by.
- BSONObj _pattern;
+ // Not owned by us.
+ WorkingSet* _ws;
+
+ // The pattern that we're sorting by.
+ BSONObj _pattern;
+
+ // Are we deduplicating on RecordId?
+ bool _dedup;
+
+ // Which RecordIds have we seen?
+ unordered_set<RecordId, RecordId::Hasher> _seen;
+
+ // Owned by us. All the children we're reading from.
+ std::vector<PlanStage*> _children;
- // Are we deduplicating on RecordId?
- bool _dedup;
-
- // Which RecordIds have we seen?
- unordered_set<RecordId, RecordId::Hasher> _seen;
-
- // Owned by us. All the children we're reading from.
- std::vector<PlanStage*> _children;
-
- // In order to pick the next smallest value, we need each child work(...) until it produces
- // a result. This is the queue of children that haven't given us a result yet.
- std::queue<PlanStage*> _noResultToMerge;
-
- // There is some confusing STL wrangling going on below. Here's a guide:
- //
- // We want to keep a priority_queue of results so we can quickly return the min result.
- //
- // If we receive an invalidate, we need to iterate over any cached state to see if the
- // invalidate is relevant.
- //
- // We can't iterate over a priority_queue, so we keep the actual cached state in a list and
- // have a priority_queue of iterators into that list.
- //
- // Why an iterator instead of a pointer? We need to be able to use the information in the
- // priority_queue to remove the item from the list and quickly.
-
- struct StageWithValue {
- StageWithValue() : id(WorkingSet::INVALID_ID), stage(NULL) { }
- WorkingSetID id;
- PlanStage* stage;
- };
-
- // We have a priority queue of these.
- typedef std::list<StageWithValue>::iterator MergingRef;
-
- // The comparison function used in our priority queue.
- class StageWithValueComparison {
- public:
- StageWithValueComparison(WorkingSet* ws, BSONObj pattern)
- : _ws(ws), _pattern(pattern) {}
-
- // Is lhs less than rhs? Note that priority_queue is a max heap by default so we invert
- // the return from the expected value.
- bool operator()(const MergingRef& lhs, const MergingRef& rhs);
-
- private:
- WorkingSet* _ws;
- BSONObj _pattern;
- };
-
- // The min heap of the results we're returning.
- std::priority_queue<MergingRef, std::vector<MergingRef>, StageWithValueComparison> _merging;
-
- // The data referred to by the _merging queue above.
- std::list<StageWithValue> _mergingData;
-
- // Stats
- CommonStats _commonStats;
- MergeSortStats _specificStats;
+ // In order to pick the next smallest value, we need each child work(...) until it produces
+ // a result. This is the queue of children that haven't given us a result yet.
+ std::queue<PlanStage*> _noResultToMerge;
+
+ // There is some confusing STL wrangling going on below. Here's a guide:
+ //
+ // We want to keep a priority_queue of results so we can quickly return the min result.
+ //
+ // If we receive an invalidate, we need to iterate over any cached state to see if the
+ // invalidate is relevant.
+ //
+ // We can't iterate over a priority_queue, so we keep the actual cached state in a list and
+ // have a priority_queue of iterators into that list.
+ //
+ // Why an iterator instead of a pointer? We need to be able to use the information in the
+ // priority_queue to remove the item from the list and quickly.
+
+ struct StageWithValue {
+ StageWithValue() : id(WorkingSet::INVALID_ID), stage(NULL) {}
+ WorkingSetID id;
+ PlanStage* stage;
};
- // Parameters that must be provided to a MergeSortStage
- class MergeSortStageParams {
+ // We have a priority queue of these.
+ typedef std::list<StageWithValue>::iterator MergingRef;
+
+ // The comparison function used in our priority queue.
+ class StageWithValueComparison {
public:
- MergeSortStageParams() : dedup(true) { }
+ StageWithValueComparison(WorkingSet* ws, BSONObj pattern) : _ws(ws), _pattern(pattern) {}
- // How we're sorting.
- BSONObj pattern;
+ // Is lhs less than rhs? Note that priority_queue is a max heap by default so we invert
+ // the return from the expected value.
+ bool operator()(const MergingRef& lhs, const MergingRef& rhs);
- // Do we deduplicate on RecordId?
- bool dedup;
+ private:
+ WorkingSet* _ws;
+ BSONObj _pattern;
};
+ // The min heap of the results we're returning.
+ std::priority_queue<MergingRef, std::vector<MergingRef>, StageWithValueComparison> _merging;
+
+ // The data referred to by the _merging queue above.
+ std::list<StageWithValue> _mergingData;
+
+ // Stats
+ CommonStats _commonStats;
+ MergeSortStats _specificStats;
+};
+
+// Parameters that must be provided to a MergeSortStage
+class MergeSortStageParams {
+public:
+ MergeSortStageParams() : dedup(true) {}
+
+ // How we're sorting.
+ BSONObj pattern;
+
+ // Do we deduplicate on RecordId?
+ bool dedup;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp
index f8aeaac8ca5..fe955cb05af 100644
--- a/src/mongo/db/exec/multi_iterator.cpp
+++ b/src/mongo/db/exec/multi_iterator.cpp
@@ -36,100 +36,96 @@
namespace mongo {
- using std::vector;
-
- const char* MultiIteratorStage::kStageType = "MULTI_ITERATOR";
-
- MultiIteratorStage::MultiIteratorStage(OperationContext* txn,
- WorkingSet* ws,
- Collection* collection)
- : _txn(txn),
- _collection(collection),
- _ws(ws),
- _wsidForFetch(_ws->allocate()) {
- // We pre-allocate a WSM and use it to pass up fetch requests. This should never be used
- // for anything other than passing up NEED_YIELD. We use the loc and owned obj state, but
- // the loc isn't really pointing at any obj. The obj field of the WSM should never be used.
- WorkingSetMember* member = _ws->get(_wsidForFetch);
- member->state = WorkingSetMember::LOC_AND_OWNED_OBJ;
+using std::vector;
+
+const char* MultiIteratorStage::kStageType = "MULTI_ITERATOR";
+
+MultiIteratorStage::MultiIteratorStage(OperationContext* txn,
+ WorkingSet* ws,
+ Collection* collection)
+ : _txn(txn), _collection(collection), _ws(ws), _wsidForFetch(_ws->allocate()) {
+ // We pre-allocate a WSM and use it to pass up fetch requests. This should never be used
+ // for anything other than passing up NEED_YIELD. We use the loc and owned obj state, but
+ // the loc isn't really pointing at any obj. The obj field of the WSM should never be used.
+ WorkingSetMember* member = _ws->get(_wsidForFetch);
+ member->state = WorkingSetMember::LOC_AND_OWNED_OBJ;
+}
+
+void MultiIteratorStage::addIterator(std::unique_ptr<RecordCursor> it) {
+ _iterators.push_back(std::move(it));
+}
+
+PlanStage::StageState MultiIteratorStage::work(WorkingSetID* out) {
+ if (_collection == NULL) {
+ Status status(ErrorCodes::InternalError, "MultiIteratorStage died on null collection");
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ return PlanStage::DEAD;
}
- void MultiIteratorStage::addIterator(std::unique_ptr<RecordCursor> it) {
- _iterators.push_back(std::move(it));
- }
-
- PlanStage::StageState MultiIteratorStage::work(WorkingSetID* out) {
- if (_collection == NULL) {
- Status status(ErrorCodes::InternalError,
- "MultiIteratorStage died on null collection");
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- return PlanStage::DEAD;
- }
-
- boost::optional<Record> record;
- try {
- while (!_iterators.empty()) {
- if (auto fetcher = _iterators.back()->fetcherForNext()) {
- // Pass the RecordFetcher off up.
- WorkingSetMember* member = _ws->get(_wsidForFetch);
- member->setFetcher(fetcher.release());
- *out = _wsidForFetch;
- return NEED_YIELD;
- }
-
- record = _iterators.back()->next();
- if (record) break;
- _iterators.pop_back();
+ boost::optional<Record> record;
+ try {
+ while (!_iterators.empty()) {
+ if (auto fetcher = _iterators.back()->fetcherForNext()) {
+ // Pass the RecordFetcher off up.
+ WorkingSetMember* member = _ws->get(_wsidForFetch);
+ member->setFetcher(fetcher.release());
+ *out = _wsidForFetch;
+ return NEED_YIELD;
}
- }
- catch (const WriteConflictException& wce) {
- // If _advance throws a WCE we shouldn't have moved.
- invariant(!_iterators.empty());
- *out = WorkingSet::INVALID_ID;
- return NEED_YIELD;
- }
-
- if (!record)
- return IS_EOF;
-
- *out = _ws->allocate();
- WorkingSetMember* member = _ws->get(*out);
- member->loc = record->id;
- member->obj = {_txn->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
- member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
- return PlanStage::ADVANCED;
- }
- bool MultiIteratorStage::isEOF() {
- return _collection == NULL || _iterators.empty();
- }
-
- void MultiIteratorStage::kill() {
- _collection = NULL;
- _iterators.clear();
- }
-
- void MultiIteratorStage::saveState() {
- _txn = NULL;
- for (size_t i = 0; i < _iterators.size(); i++) {
- _iterators[i]->savePositioned();
+ record = _iterators.back()->next();
+ if (record)
+ break;
+ _iterators.pop_back();
}
+ } catch (const WriteConflictException& wce) {
+ // If _advance throws a WCE we shouldn't have moved.
+ invariant(!_iterators.empty());
+ *out = WorkingSet::INVALID_ID;
+ return NEED_YIELD;
}
- void MultiIteratorStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- for (size_t i = 0; i < _iterators.size(); i++) {
- if (!_iterators[i]->restore(opCtx)) {
- kill();
- }
+ if (!record)
+ return IS_EOF;
+
+ *out = _ws->allocate();
+ WorkingSetMember* member = _ws->get(*out);
+ member->loc = record->id;
+ member->obj = {_txn->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
+ member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
+ return PlanStage::ADVANCED;
+}
+
+bool MultiIteratorStage::isEOF() {
+ return _collection == NULL || _iterators.empty();
+}
+
+void MultiIteratorStage::kill() {
+ _collection = NULL;
+ _iterators.clear();
+}
+
+void MultiIteratorStage::saveState() {
+ _txn = NULL;
+ for (size_t i = 0; i < _iterators.size(); i++) {
+ _iterators[i]->savePositioned();
+ }
+}
+
+void MultiIteratorStage::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ for (size_t i = 0; i < _iterators.size(); i++) {
+ if (!_iterators[i]->restore(opCtx)) {
+ kill();
}
}
+}
- void MultiIteratorStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- switch ( type ) {
+void MultiIteratorStage::invalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
+ switch (type) {
case INVALIDATION_DELETION:
for (size_t i = 0; i < _iterators.size(); i++) {
_iterators[i]->invalidate(dl);
@@ -138,19 +134,19 @@ namespace mongo {
case INVALIDATION_MUTATION:
// no-op
break;
- }
}
+}
- vector<PlanStage*> MultiIteratorStage::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
- }
+vector<PlanStage*> MultiIteratorStage::getChildren() const {
+ vector<PlanStage*> empty;
+ return empty;
+}
- PlanStageStats* MultiIteratorStage::getStats() {
- std::unique_ptr<PlanStageStats> ret(new PlanStageStats(CommonStats(kStageType),
- STAGE_MULTI_ITERATOR));
- ret->specific.reset(new CollectionScanStats());
- return ret.release();
- }
+PlanStageStats* MultiIteratorStage::getStats() {
+ std::unique_ptr<PlanStageStats> ret(
+ new PlanStageStats(CommonStats(kStageType), STAGE_MULTI_ITERATOR));
+ ret->specific.reset(new CollectionScanStats());
+ return ret.release();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/exec/multi_iterator.h b/src/mongo/db/exec/multi_iterator.h
index ac2cf44b007..ada4bc16c9a 100644
--- a/src/mongo/db/exec/multi_iterator.h
+++ b/src/mongo/db/exec/multi_iterator.h
@@ -38,60 +38,66 @@
namespace mongo {
- /**
- * Iterates over a collection using multiple underlying RecordCursors.
- *
- * This is a special stage which is not used automatically by queries. It is intended for
- * special commands that work with RecordCursors. For example, it is used by the
- * parallelCollectionScan and repairCursor commands
- */
- class MultiIteratorStage : public PlanStage {
- public:
- MultiIteratorStage(OperationContext* txn, WorkingSet* ws, Collection* collection);
+/**
+ * Iterates over a collection using multiple underlying RecordCursors.
+ *
+ * This is a special stage which is not used automatically by queries. It is intended for
+ * special commands that work with RecordCursors. For example, it is used by the
+ * parallelCollectionScan and repairCursor commands
+ */
+class MultiIteratorStage : public PlanStage {
+public:
+ MultiIteratorStage(OperationContext* txn, WorkingSet* ws, Collection* collection);
- ~MultiIteratorStage() { }
+ ~MultiIteratorStage() {}
- void addIterator(std::unique_ptr<RecordCursor> it);
+ void addIterator(std::unique_ptr<RecordCursor> it);
- virtual PlanStage::StageState work(WorkingSetID* out);
+ virtual PlanStage::StageState work(WorkingSetID* out);
- virtual bool isEOF();
+ virtual bool isEOF();
- void kill();
+ void kill();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- // Returns empty PlanStageStats object
- virtual PlanStageStats* getStats();
+ // Returns empty PlanStageStats object
+ virtual PlanStageStats* getStats();
- // Not used.
- virtual CommonStats* getCommonStats() const { return NULL; }
+ // Not used.
+ virtual CommonStats* getCommonStats() const {
+ return NULL;
+ }
- // Not used.
- virtual SpecificStats* getSpecificStats() const { return NULL; }
+ // Not used.
+ virtual SpecificStats* getSpecificStats() const {
+ return NULL;
+ }
- // Not used.
- virtual std::vector<PlanStage*> getChildren() const;
+ // Not used.
+ virtual std::vector<PlanStage*> getChildren() const;
- // Not used.
- virtual StageType stageType() const { return STAGE_MULTI_ITERATOR; }
+ // Not used.
+ virtual StageType stageType() const {
+ return STAGE_MULTI_ITERATOR;
+ }
- static const char* kStageType;
+ static const char* kStageType;
- private:
- OperationContext* _txn;
- Collection* _collection;
- std::vector<std::unique_ptr<RecordCursor>> _iterators;
+private:
+ OperationContext* _txn;
+ Collection* _collection;
+ std::vector<std::unique_ptr<RecordCursor>> _iterators;
- // Not owned by us.
- WorkingSet* _ws;
+ // Not owned by us.
+ WorkingSet* _ws;
- // We allocate a working set member with this id on construction of the stage. It gets
- // used for all fetch requests, changing the RecordId as appropriate.
- const WorkingSetID _wsidForFetch;
- };
+ // We allocate a working set member with this id on construction of the stage. It gets
+ // used for all fetch requests, changing the RecordId as appropriate.
+ const WorkingSetID _wsidForFetch;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 8919ada64e7..7f82a8c3b5f 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -51,466 +51,462 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::list;
- using std::vector;
-
- // static
- const char* MultiPlanStage::kStageType = "MULTI_PLAN";
-
- MultiPlanStage::MultiPlanStage(OperationContext* txn,
- const Collection* collection,
- CanonicalQuery* cq,
- bool shouldCache)
- : _txn(txn),
- _collection(collection),
- _shouldCache(shouldCache),
- _query(cq),
- _bestPlanIdx(kNoSuchPlan),
- _backupPlanIdx(kNoSuchPlan),
- _failure(false),
- _failureCount(0),
- _statusMemberId(WorkingSet::INVALID_ID),
- _commonStats(kStageType) {
- invariant(_collection);
+using std::unique_ptr;
+using std::endl;
+using std::list;
+using std::vector;
+
+// static
+const char* MultiPlanStage::kStageType = "MULTI_PLAN";
+
+MultiPlanStage::MultiPlanStage(OperationContext* txn,
+ const Collection* collection,
+ CanonicalQuery* cq,
+ bool shouldCache)
+ : _txn(txn),
+ _collection(collection),
+ _shouldCache(shouldCache),
+ _query(cq),
+ _bestPlanIdx(kNoSuchPlan),
+ _backupPlanIdx(kNoSuchPlan),
+ _failure(false),
+ _failureCount(0),
+ _statusMemberId(WorkingSet::INVALID_ID),
+ _commonStats(kStageType) {
+ invariant(_collection);
+}
+
+MultiPlanStage::~MultiPlanStage() {
+ for (size_t ix = 0; ix < _candidates.size(); ++ix) {
+ delete _candidates[ix].solution;
+ delete _candidates[ix].root;
}
+}
- MultiPlanStage::~MultiPlanStage() {
- for (size_t ix = 0; ix < _candidates.size(); ++ix) {
- delete _candidates[ix].solution;
- delete _candidates[ix].root;
- }
+void MultiPlanStage::addPlan(QuerySolution* solution, PlanStage* root, WorkingSet* ws) {
+ _candidates.push_back(CandidatePlan(solution, root, ws));
+}
+
+bool MultiPlanStage::isEOF() {
+ if (_failure) {
+ return true;
}
- void MultiPlanStage::addPlan(QuerySolution* solution, PlanStage* root,
- WorkingSet* ws) {
- _candidates.push_back(CandidatePlan(solution, root, ws));
+ // If _bestPlanIdx hasn't been found, can't be at EOF
+ if (!bestPlanChosen()) {
+ return false;
}
- bool MultiPlanStage::isEOF() {
- if (_failure) { return true; }
+ // We must have returned all our cached results
+ // and there must be no more results from the best plan.
+ CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
+ return bestPlan.results.empty() && bestPlan.root->isEOF();
+}
- // If _bestPlanIdx hasn't been found, can't be at EOF
- if (!bestPlanChosen()) { return false; }
+PlanStage::StageState MultiPlanStage::work(WorkingSetID* out) {
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- // We must have returned all our cached results
- // and there must be no more results from the best plan.
- CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
- return bestPlan.results.empty() && bestPlan.root->isEOF();
+ if (_failure) {
+ *out = _statusMemberId;
+ return PlanStage::FAILURE;
}
- PlanStage::StageState MultiPlanStage::work(WorkingSetID* out) {
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
-
- if (_failure) {
- *out = _statusMemberId;
- return PlanStage::FAILURE;
- }
+ CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
- CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
+ // Look for an already produced result that provides the data the caller wants.
+ if (!bestPlan.results.empty()) {
+ *out = bestPlan.results.front();
+ bestPlan.results.pop_front();
+ _commonStats.advanced++;
+ return PlanStage::ADVANCED;
+ }
- // Look for an already produced result that provides the data the caller wants.
- if (!bestPlan.results.empty()) {
- *out = bestPlan.results.front();
- bestPlan.results.pop_front();
- _commonStats.advanced++;
- return PlanStage::ADVANCED;
- }
+ // best plan had no (or has no more) cached results
- // best plan had no (or has no more) cached results
+ StageState state = bestPlan.root->work(out);
- StageState state = bestPlan.root->work(out);
+ if (PlanStage::FAILURE == state && hasBackupPlan()) {
+ LOG(5) << "Best plan errored out switching to backup\n";
+ // Uncache the bad solution if we fall back
+ // on the backup solution.
+ //
+ // XXX: Instead of uncaching we should find a way for the
+ // cached plan runner to fall back on a different solution
+ // if the best solution fails. Alternatively we could try to
+ // defer cache insertion to be after the first produced result.
- if (PlanStage::FAILURE == state && hasBackupPlan()) {
- LOG(5) << "Best plan errored out switching to backup\n";
- // Uncache the bad solution if we fall back
- // on the backup solution.
- //
- // XXX: Instead of uncaching we should find a way for the
- // cached plan runner to fall back on a different solution
- // if the best solution fails. Alternatively we could try to
- // defer cache insertion to be after the first produced result.
+ _collection->infoCache()->getPlanCache()->remove(*_query);
- _collection->infoCache()->getPlanCache()->remove(*_query);
+ _bestPlanIdx = _backupPlanIdx;
+ _backupPlanIdx = kNoSuchPlan;
- _bestPlanIdx = _backupPlanIdx;
- _backupPlanIdx = kNoSuchPlan;
+ return _candidates[_bestPlanIdx].root->work(out);
+ }
- return _candidates[_bestPlanIdx].root->work(out);
- }
+ if (hasBackupPlan() && PlanStage::ADVANCED == state) {
+ LOG(5) << "Best plan had a blocking stage, became unblocked\n";
+ _backupPlanIdx = kNoSuchPlan;
+ }
- if (hasBackupPlan() && PlanStage::ADVANCED == state) {
- LOG(5) << "Best plan had a blocking stage, became unblocked\n";
- _backupPlanIdx = kNoSuchPlan;
- }
+ // Increment stats.
+ if (PlanStage::ADVANCED == state) {
+ _commonStats.advanced++;
+ } else if (PlanStage::NEED_TIME == state) {
+ _commonStats.needTime++;
+ } else if (PlanStage::NEED_YIELD == state) {
+ _commonStats.needYield++;
+ }
- // Increment stats.
- if (PlanStage::ADVANCED == state) {
- _commonStats.advanced++;
- }
- else if (PlanStage::NEED_TIME == state) {
- _commonStats.needTime++;
- }
- else if (PlanStage::NEED_YIELD == state) {
- _commonStats.needYield++;
+ return state;
+}
+
+Status MultiPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
+ // These are the conditions which can cause us to yield:
+ // 1) The yield policy's timer elapsed, or
+ // 2) some stage requested a yield due to a document fetch, or
+ // 3) we need to yield and retry due to a WriteConflictException.
+ // In all cases, the actual yielding happens here.
+ if (yieldPolicy->shouldYield()) {
+ bool alive = yieldPolicy->yield(_fetcher.get());
+
+ if (!alive) {
+ _failure = true;
+ Status failStat(ErrorCodes::OperationFailed,
+ "PlanExecutor killed during plan selection");
+ _statusMemberId = WorkingSetCommon::allocateStatusMember(_candidates[0].ws, failStat);
+ return failStat;
}
+ }
- return state;
+ // We're done using the fetcher, so it should be freed. We don't want to
+ // use the same RecordFetcher twice.
+ _fetcher.reset();
+
+ return Status::OK();
+}
+
+// static
+size_t MultiPlanStage::getTrialPeriodWorks(OperationContext* txn, const Collection* collection) {
+ // Run each plan some number of times. This number is at least as great as
+ // 'internalQueryPlanEvaluationWorks', but may be larger for big collections.
+ size_t numWorks = internalQueryPlanEvaluationWorks;
+ if (NULL != collection) {
+ // For large collections, the number of works is set to be this
+ // fraction of the collection size.
+ double fraction = internalQueryPlanEvaluationCollFraction;
+
+ numWorks = std::max(static_cast<size_t>(internalQueryPlanEvaluationWorks),
+ static_cast<size_t>(fraction * collection->numRecords(txn)));
}
- Status MultiPlanStage::tryYield(PlanYieldPolicy* yieldPolicy) {
- // These are the conditions which can cause us to yield:
- // 1) The yield policy's timer elapsed, or
- // 2) some stage requested a yield due to a document fetch, or
- // 3) we need to yield and retry due to a WriteConflictException.
- // In all cases, the actual yielding happens here.
- if (yieldPolicy->shouldYield()) {
- bool alive = yieldPolicy->yield(_fetcher.get());
+ return numWorks;
+}
+
+// static
+size_t MultiPlanStage::getTrialPeriodNumToReturn(const CanonicalQuery& query) {
+ // Determine the number of results which we will produce during the plan
+ // ranking phase before stopping.
+ size_t numResults = static_cast<size_t>(internalQueryPlanEvaluationMaxResults);
+ if (query.getParsed().getLimit()) {
+ numResults = std::min(static_cast<size_t>(*query.getParsed().getLimit()), numResults);
+ } else if (!query.getParsed().isFromFindCommand() && query.getParsed().getBatchSize()) {
+ numResults = std::min(static_cast<size_t>(*query.getParsed().getBatchSize()), numResults);
+ }
- if (!alive) {
- _failure = true;
- Status failStat(ErrorCodes::OperationFailed,
- "PlanExecutor killed during plan selection");
- _statusMemberId = WorkingSetCommon::allocateStatusMember(_candidates[0].ws,
- failStat);
- return failStat;
- }
- }
+ return numResults;
+}
- // We're done using the fetcher, so it should be freed. We don't want to
- // use the same RecordFetcher twice.
- _fetcher.reset();
+Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
+ // Adds the amount of time taken by pickBestPlan() to executionTimeMillis. There's lots of
+ // execution work that happens here, so this is needed for the time accounting to
+ // make sense.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- return Status::OK();
- }
+ size_t numWorks = getTrialPeriodWorks(_txn, _collection);
+ size_t numResults = getTrialPeriodNumToReturn(*_query);
- // static
- size_t MultiPlanStage::getTrialPeriodWorks(OperationContext* txn,
- const Collection* collection) {
- // Run each plan some number of times. This number is at least as great as
- // 'internalQueryPlanEvaluationWorks', but may be larger for big collections.
- size_t numWorks = internalQueryPlanEvaluationWorks;
- if (NULL != collection) {
- // For large collections, the number of works is set to be this
- // fraction of the collection size.
- double fraction = internalQueryPlanEvaluationCollFraction;
-
- numWorks = std::max(static_cast<size_t>(internalQueryPlanEvaluationWorks),
- static_cast<size_t>(fraction * collection->numRecords(txn)));
+ // Work the plans, stopping when a plan hits EOF or returns some
+ // fixed number of results.
+ for (size_t ix = 0; ix < numWorks; ++ix) {
+ bool moreToDo = workAllPlans(numResults, yieldPolicy);
+ if (!moreToDo) {
+ break;
}
+ }
- return numWorks;
+ if (_failure) {
+ invariant(WorkingSet::INVALID_ID != _statusMemberId);
+ WorkingSetMember* member = _candidates[0].ws->get(_statusMemberId);
+ return WorkingSetCommon::getMemberStatus(*member);
}
- // static
- size_t MultiPlanStage::getTrialPeriodNumToReturn(const CanonicalQuery& query) {
- // Determine the number of results which we will produce during the plan
- // ranking phase before stopping.
- size_t numResults = static_cast<size_t>(internalQueryPlanEvaluationMaxResults);
- if (query.getParsed().getLimit()) {
- numResults = std::min(static_cast<size_t>(*query.getParsed().getLimit()),
- numResults);
- }
- else if (!query.getParsed().isFromFindCommand() && query.getParsed().getBatchSize()) {
- numResults = std::min(static_cast<size_t>(*query.getParsed().getBatchSize()),
- numResults);
- }
+ // After picking best plan, ranking will own plan stats from
+ // candidate solutions (winner and losers).
+ std::unique_ptr<PlanRankingDecision> ranking(new PlanRankingDecision);
+ _bestPlanIdx = PlanRanker::pickBestPlan(_candidates, ranking.get());
+ verify(_bestPlanIdx >= 0 && _bestPlanIdx < static_cast<int>(_candidates.size()));
- return numResults;
- }
+ // Copy candidate order. We will need this to sort candidate stats for explain
+ // after transferring ownership of 'ranking' to plan cache.
+ std::vector<size_t> candidateOrder = ranking->candidateOrder;
- Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
- // Adds the amount of time taken by pickBestPlan() to executionTimeMillis. There's lots of
- // execution work that happens here, so this is needed for the time accounting to
- // make sense.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+ CandidatePlan& bestCandidate = _candidates[_bestPlanIdx];
+ std::list<WorkingSetID>& alreadyProduced = bestCandidate.results;
+ QuerySolution* bestSolution = bestCandidate.solution;
- size_t numWorks = getTrialPeriodWorks(_txn, _collection);
- size_t numResults = getTrialPeriodNumToReturn(*_query);
+ LOG(5) << "Winning solution:\n" << bestSolution->toString() << endl;
+ LOG(2) << "Winning plan: " << Explain::getPlanSummary(bestCandidate.root);
- // Work the plans, stopping when a plan hits EOF or returns some
- // fixed number of results.
- for (size_t ix = 0; ix < numWorks; ++ix) {
- bool moreToDo = workAllPlans(numResults, yieldPolicy);
- if (!moreToDo) { break; }
+ _backupPlanIdx = kNoSuchPlan;
+ if (bestSolution->hasBlockingStage && (0 == alreadyProduced.size())) {
+ LOG(5) << "Winner has blocking stage, looking for backup plan...\n";
+ for (size_t ix = 0; ix < _candidates.size(); ++ix) {
+ if (!_candidates[ix].solution->hasBlockingStage) {
+ LOG(5) << "Candidate " << ix << " is backup child\n";
+ _backupPlanIdx = ix;
+ break;
+ }
}
+ }
- if (_failure) {
- invariant(WorkingSet::INVALID_ID != _statusMemberId);
- WorkingSetMember* member = _candidates[0].ws->get(_statusMemberId);
- return WorkingSetCommon::getMemberStatus(*member);
+ // Store the choice we just made in the cache, if the query is of a type that is safe to
+ // cache.
+ if (PlanCache::shouldCacheQuery(*_query) && _shouldCache) {
+ // Create list of candidate solutions for the cache with
+ // the best solution at the front.
+ std::vector<QuerySolution*> solutions;
+
+ // Generate solutions and ranking decisions sorted by score.
+ for (size_t orderingIndex = 0; orderingIndex < candidateOrder.size(); ++orderingIndex) {
+ // index into candidates/ranking
+ size_t ix = candidateOrder[orderingIndex];
+ solutions.push_back(_candidates[ix].solution);
}
- // After picking best plan, ranking will own plan stats from
- // candidate solutions (winner and losers).
- std::unique_ptr<PlanRankingDecision> ranking(new PlanRankingDecision);
- _bestPlanIdx = PlanRanker::pickBestPlan(_candidates, ranking.get());
- verify(_bestPlanIdx >= 0 && _bestPlanIdx < static_cast<int>(_candidates.size()));
-
- // Copy candidate order. We will need this to sort candidate stats for explain
- // after transferring ownership of 'ranking' to plan cache.
- std::vector<size_t> candidateOrder = ranking->candidateOrder;
-
- CandidatePlan& bestCandidate = _candidates[_bestPlanIdx];
- std::list<WorkingSetID>& alreadyProduced = bestCandidate.results;
- QuerySolution* bestSolution = bestCandidate.solution;
-
- LOG(5) << "Winning solution:\n" << bestSolution->toString() << endl;
- LOG(2) << "Winning plan: " << Explain::getPlanSummary(bestCandidate.root);
-
- _backupPlanIdx = kNoSuchPlan;
- if (bestSolution->hasBlockingStage && (0 == alreadyProduced.size())) {
- LOG(5) << "Winner has blocking stage, looking for backup plan...\n";
- for (size_t ix = 0; ix < _candidates.size(); ++ix) {
- if (!_candidates[ix].solution->hasBlockingStage) {
- LOG(5) << "Candidate " << ix << " is backup child\n";
- _backupPlanIdx = ix;
- break;
- }
+ // Check solution cache data. Do not add to cache if
+ // we have any invalid SolutionCacheData data.
+ // XXX: One known example is 2D queries
+ bool validSolutions = true;
+ for (size_t ix = 0; ix < solutions.size(); ++ix) {
+ if (NULL == solutions[ix]->cacheData.get()) {
+ LOG(5) << "Not caching query because this solution has no cache data: "
+ << solutions[ix]->toString();
+ validSolutions = false;
+ break;
}
}
- // Store the choice we just made in the cache, if the query is of a type that is safe to
- // cache.
- if (PlanCache::shouldCacheQuery(*_query) && _shouldCache) {
- // Create list of candidate solutions for the cache with
- // the best solution at the front.
- std::vector<QuerySolution*> solutions;
-
- // Generate solutions and ranking decisions sorted by score.
- for (size_t orderingIndex = 0;
- orderingIndex < candidateOrder.size(); ++orderingIndex) {
- // index into candidates/ranking
- size_t ix = candidateOrder[orderingIndex];
- solutions.push_back(_candidates[ix].solution);
- }
+ if (validSolutions) {
+ _collection->infoCache()->getPlanCache()->add(*_query, solutions, ranking.release());
+ }
+ }
- // Check solution cache data. Do not add to cache if
- // we have any invalid SolutionCacheData data.
- // XXX: One known example is 2D queries
- bool validSolutions = true;
- for (size_t ix = 0; ix < solutions.size(); ++ix) {
- if (NULL == solutions[ix]->cacheData.get()) {
- LOG(5) << "Not caching query because this solution has no cache data: "
- << solutions[ix]->toString();
- validSolutions = false;
- break;
- }
- }
+ return Status::OK();
+}
- if (validSolutions) {
- _collection->infoCache()->getPlanCache()->add(*_query, solutions, ranking.release());
- }
+vector<PlanStageStats*> MultiPlanStage::generateCandidateStats() {
+ OwnedPointerVector<PlanStageStats> candidateStats;
+
+ for (size_t ix = 0; ix < _candidates.size(); ix++) {
+ if (ix == (size_t)_bestPlanIdx) {
+ continue;
+ }
+ if (ix == (size_t)_backupPlanIdx) {
+ continue;
}
- return Status::OK();
+ PlanStageStats* stats = _candidates[ix].root->getStats();
+ candidateStats.push_back(stats);
}
- vector<PlanStageStats*> MultiPlanStage::generateCandidateStats() {
- OwnedPointerVector<PlanStageStats> candidateStats;
+ return candidateStats.release();
+}
- for (size_t ix = 0; ix < _candidates.size(); ix++) {
- if (ix == (size_t)_bestPlanIdx) { continue; }
- if (ix == (size_t)_backupPlanIdx) { continue; }
+bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolicy) {
+ bool doneWorking = false;
- PlanStageStats* stats = _candidates[ix].root->getStats();
- candidateStats.push_back(stats);
+ for (size_t ix = 0; ix < _candidates.size(); ++ix) {
+ CandidatePlan& candidate = _candidates[ix];
+ if (candidate.failed) {
+ continue;
}
- return candidateStats.release();
- }
+ // Might need to yield between calls to work due to the timer elapsing.
+ if (!(tryYield(yieldPolicy)).isOK()) {
+ return false;
+ }
- bool MultiPlanStage::workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolicy) {
- bool doneWorking = false;
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = candidate.root->work(&id);
- for (size_t ix = 0; ix < _candidates.size(); ++ix) {
- CandidatePlan& candidate = _candidates[ix];
- if (candidate.failed) { continue; }
+ if (PlanStage::ADVANCED == state) {
+ // Save result for later.
+ candidate.results.push_back(id);
+
+ // Once a plan returns enough results, stop working.
+ if (candidate.results.size() >= numResults) {
+ doneWorking = true;
+ }
+ } else if (PlanStage::IS_EOF == state) {
+ // First plan to hit EOF wins automatically. Stop evaluating other plans.
+ // Assumes that the ranking will pick this plan.
+ doneWorking = true;
+ } else if (PlanStage::NEED_YIELD == state) {
+ if (id == WorkingSet::INVALID_ID) {
+ if (!yieldPolicy->allowedToYield())
+ throw WriteConflictException();
+ } else {
+ WorkingSetMember* member = candidate.ws->get(id);
+ invariant(member->hasFetcher());
+ // Transfer ownership of the fetcher and yield.
+ _fetcher.reset(member->releaseFetcher());
+ }
+
+ if (yieldPolicy->allowedToYield()) {
+ yieldPolicy->forceYield();
+ }
- // Might need to yield between calls to work due to the timer elapsing.
if (!(tryYield(yieldPolicy)).isOK()) {
return false;
}
+ } else if (PlanStage::NEED_TIME != state) {
+ // FAILURE or DEAD. Do we want to just tank that plan and try the rest? We
+ // probably want to fail globally as this shouldn't happen anyway.
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = candidate.root->work(&id);
-
- if (PlanStage::ADVANCED == state) {
- // Save result for later.
- candidate.results.push_back(id);
+ candidate.failed = true;
+ ++_failureCount;
- // Once a plan returns enough results, stop working.
- if (candidate.results.size() >= numResults) {
- doneWorking = true;
- }
+ // Propagate most recent seen failure to parent.
+ if (PlanStage::FAILURE == state) {
+ _statusMemberId = id;
}
- else if (PlanStage::IS_EOF == state) {
- // First plan to hit EOF wins automatically. Stop evaluating other plans.
- // Assumes that the ranking will pick this plan.
- doneWorking = true;
- }
- else if (PlanStage::NEED_YIELD == state) {
- if (id == WorkingSet::INVALID_ID) {
- if (!yieldPolicy->allowedToYield())
- throw WriteConflictException();
- }
- else {
- WorkingSetMember* member = candidate.ws->get(id);
- invariant(member->hasFetcher());
- // Transfer ownership of the fetcher and yield.
- _fetcher.reset(member->releaseFetcher());
- }
-
- if (yieldPolicy->allowedToYield()) {
- yieldPolicy->forceYield();
- }
-
- if (!(tryYield(yieldPolicy)).isOK()) {
- return false;
- }
- }
- else if (PlanStage::NEED_TIME != state) {
- // FAILURE or DEAD. Do we want to just tank that plan and try the rest? We
- // probably want to fail globally as this shouldn't happen anyway.
-
- candidate.failed = true;
- ++_failureCount;
-
- // Propagate most recent seen failure to parent.
- if (PlanStage::FAILURE == state) {
- _statusMemberId = id;
- }
-
- if (_failureCount == _candidates.size()) {
- _failure = true;
- return false;
- }
+
+ if (_failureCount == _candidates.size()) {
+ _failure = true;
+ return false;
}
}
-
- return !doneWorking;
}
- void MultiPlanStage::saveState() {
- _txn = NULL;
- for (size_t i = 0; i < _candidates.size(); ++i) {
- _candidates[i].root->saveState();
- }
+ return !doneWorking;
+}
+
+void MultiPlanStage::saveState() {
+ _txn = NULL;
+ for (size_t i = 0; i < _candidates.size(); ++i) {
+ _candidates[i].root->saveState();
}
+}
- void MultiPlanStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
+void MultiPlanStage::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
- for (size_t i = 0; i < _candidates.size(); ++i) {
- _candidates[i].root->restoreState(opCtx);
+ for (size_t i = 0; i < _candidates.size(); ++i) {
+ _candidates[i].root->restoreState(opCtx);
+ }
+}
+
+namespace {
+
+void invalidateHelper(OperationContext* txn,
+ WorkingSet* ws, // may flag for review
+ const RecordId& dl,
+ list<WorkingSetID>* idsToInvalidate,
+ const Collection* collection) {
+ for (list<WorkingSetID>::iterator it = idsToInvalidate->begin();
+ it != idsToInvalidate->end();) {
+ WorkingSetMember* member = ws->get(*it);
+ if (member->hasLoc() && member->loc == dl) {
+ list<WorkingSetID>::iterator next = it;
+ next++;
+ WorkingSetCommon::fetchAndInvalidateLoc(txn, member, collection);
+ ws->flagForReview(*it);
+ idsToInvalidate->erase(it);
+ it = next;
+ } else {
+ it++;
}
}
+}
+}
- namespace {
-
- void invalidateHelper(OperationContext* txn,
- WorkingSet* ws, // may flag for review
- const RecordId& dl,
- list<WorkingSetID>* idsToInvalidate,
- const Collection* collection) {
- for (list<WorkingSetID>::iterator it = idsToInvalidate->begin();
- it != idsToInvalidate->end();) {
- WorkingSetMember* member = ws->get(*it);
- if (member->hasLoc() && member->loc == dl) {
- list<WorkingSetID>::iterator next = it;
- next++;
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, collection);
- ws->flagForReview(*it);
- idsToInvalidate->erase(it);
- it = next;
- }
- else {
- it++;
- }
- }
- }
+void MultiPlanStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ if (_failure) {
+ return;
}
- void MultiPlanStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- if (_failure) { return; }
-
- if (bestPlanChosen()) {
- CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
- bestPlan.root->invalidate(txn, dl, type);
- invalidateHelper(txn, bestPlan.ws, dl, &bestPlan.results, _collection);
- if (hasBackupPlan()) {
- CandidatePlan& backupPlan = _candidates[_backupPlanIdx];
- backupPlan.root->invalidate(txn, dl, type);
- invalidateHelper(txn, backupPlan.ws, dl, &backupPlan.results, _collection);
- }
+ if (bestPlanChosen()) {
+ CandidatePlan& bestPlan = _candidates[_bestPlanIdx];
+ bestPlan.root->invalidate(txn, dl, type);
+ invalidateHelper(txn, bestPlan.ws, dl, &bestPlan.results, _collection);
+ if (hasBackupPlan()) {
+ CandidatePlan& backupPlan = _candidates[_backupPlanIdx];
+ backupPlan.root->invalidate(txn, dl, type);
+ invalidateHelper(txn, backupPlan.ws, dl, &backupPlan.results, _collection);
}
- else {
- for (size_t ix = 0; ix < _candidates.size(); ++ix) {
- _candidates[ix].root->invalidate(txn, dl, type);
- invalidateHelper(txn, _candidates[ix].ws, dl, &_candidates[ix].results, _collection);
- }
+ } else {
+ for (size_t ix = 0; ix < _candidates.size(); ++ix) {
+ _candidates[ix].root->invalidate(txn, dl, type);
+ invalidateHelper(txn, _candidates[ix].ws, dl, &_candidates[ix].results, _collection);
}
}
+}
- bool MultiPlanStage::hasBackupPlan() const {
- return kNoSuchPlan != _backupPlanIdx;
- }
+bool MultiPlanStage::hasBackupPlan() const {
+ return kNoSuchPlan != _backupPlanIdx;
+}
- bool MultiPlanStage::bestPlanChosen() const {
- return kNoSuchPlan != _bestPlanIdx;
- }
+bool MultiPlanStage::bestPlanChosen() const {
+ return kNoSuchPlan != _bestPlanIdx;
+}
- int MultiPlanStage::bestPlanIdx() const {
- return _bestPlanIdx;
- }
+int MultiPlanStage::bestPlanIdx() const {
+ return _bestPlanIdx;
+}
- QuerySolution* MultiPlanStage::bestSolution() {
- if (_bestPlanIdx == kNoSuchPlan)
- return NULL;
+QuerySolution* MultiPlanStage::bestSolution() {
+ if (_bestPlanIdx == kNoSuchPlan)
+ return NULL;
- return _candidates[_bestPlanIdx].solution;
- }
+ return _candidates[_bestPlanIdx].solution;
+}
- vector<PlanStage*> MultiPlanStage::getChildren() const {
- vector<PlanStage*> children;
+vector<PlanStage*> MultiPlanStage::getChildren() const {
+ vector<PlanStage*> children;
- if (bestPlanChosen()) {
- children.push_back(_candidates[_bestPlanIdx].root);
- }
- else {
- for (size_t i = 0; i < _candidates.size(); i++) {
- children.push_back(_candidates[i].root);
- }
+ if (bestPlanChosen()) {
+ children.push_back(_candidates[_bestPlanIdx].root);
+ } else {
+ for (size_t i = 0; i < _candidates.size(); i++) {
+ children.push_back(_candidates[i].root);
}
-
- return children;
}
- PlanStageStats* MultiPlanStage::getStats() {
- if (bestPlanChosen()) {
- return _candidates[_bestPlanIdx].root->getStats();
- }
- if (hasBackupPlan()) {
- return _candidates[_backupPlanIdx].root->getStats();
- }
- _commonStats.isEOF = isEOF();
-
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_MULTI_PLAN));
+ return children;
+}
- return ret.release();
+PlanStageStats* MultiPlanStage::getStats() {
+ if (bestPlanChosen()) {
+ return _candidates[_bestPlanIdx].root->getStats();
}
-
- const CommonStats* MultiPlanStage::getCommonStats() const {
- return &_commonStats;
+ if (hasBackupPlan()) {
+ return _candidates[_backupPlanIdx].root->getStats();
}
+ _commonStats.isEOF = isEOF();
- const SpecificStats* MultiPlanStage::getSpecificStats() const {
- return &_specificStats;
- }
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_MULTI_PLAN));
+
+ return ret.release();
+}
+
+const CommonStats* MultiPlanStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* MultiPlanStage::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 189190163ca..28030fb8d34 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -41,192 +41,194 @@
namespace mongo {
+/**
+ * This stage outputs its mainChild, and possibly it's backup child
+ * and also updates the cache.
+ *
+ * Preconditions: Valid RecordId.
+ *
+ * Owns the query solutions and PlanStage roots for all candidate plans.
+ */
+class MultiPlanStage : public PlanStage {
+public:
+ /**
+ * Takes no ownership.
+ *
+ * If 'shouldCache' is true, writes a cache entry for the winning plan to the plan cache
+ * when possible. If 'shouldCache' is false, the plan cache will never be written.
+ */
+ MultiPlanStage(OperationContext* txn,
+ const Collection* collection,
+ CanonicalQuery* cq,
+ bool shouldCache = true);
+
+ virtual ~MultiPlanStage();
+
+ virtual bool isEOF();
+
+ virtual StageState work(WorkingSetID* out);
+
+ virtual void saveState();
+
+ virtual void restoreState(OperationContext* opCtx);
+
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+
+ virtual std::vector<PlanStage*> getChildren() const;
+
+ virtual StageType stageType() const {
+ return STAGE_MULTI_PLAN;
+ }
+
+ virtual PlanStageStats* getStats();
+
+ virtual const CommonStats* getCommonStats() const;
+
+ virtual const SpecificStats* getSpecificStats() const;
+
+ /**
+ * Takes ownership of QuerySolution and PlanStage. not of WorkingSet
+ */
+ void addPlan(QuerySolution* solution, PlanStage* root, WorkingSet* sharedWs);
+
+ /**
+ * Runs all plans added by addPlan, ranks them, and picks a best.
+ * All further calls to work(...) will return results from the best plan.
+ *
+ * If 'yieldPolicy' is non-NULL, then all locks may be yielded in between round-robin
+ * works of the candidate plans. By default, 'yieldPolicy' is NULL and no yielding will
+ * take place.
+ *
+ * Returns a non-OK status if the plan was killed during yield.
+ */
+ Status pickBestPlan(PlanYieldPolicy* yieldPolicy);
+
+ /**
+ * Returns the number of times that we are willing to work a plan during a trial period.
+ *
+ * Calculated based on a fixed query knob and the size of the collection.
+ */
+ static size_t getTrialPeriodWorks(OperationContext* txn, const Collection* collection);
+
+ /**
+ * Returns the max number of documents which we should allow any plan to return during the
+ * trial period. As soon as any plan hits this number of documents, the trial period ends.
+ */
+ static size_t getTrialPeriodNumToReturn(const CanonicalQuery& query);
+
+ /** Return true if a best plan has been chosen */
+ bool bestPlanChosen() const;
+
+ /** Return the index of the best plan chosen, for testing */
+ int bestPlanIdx() const;
+
+ /**
+ * Returns the QuerySolution for the best plan, or NULL if no best plan
+ *
+ * The MultiPlanStage retains ownership of the winning QuerySolution and returns an
+ * unowned pointer.
+ */
+ QuerySolution* bestSolution();
+
/**
- * This stage outputs its mainChild, and possibly it's backup child
- * and also updates the cache.
+ * Returns true if a backup plan was picked.
+ * This is the case when the best plan has a blocking stage.
+ * Exposed for testing.
+ */
+ bool hasBackupPlan() const;
+
+ //
+ // Used by explain.
+ //
+
+ /**
+ * Gathers execution stats for all losing plans. Caller takes ownership of
+ * all pointers in the returned vector.
+ */
+ std::vector<PlanStageStats*> generateCandidateStats();
+
+ static const char* kStageType;
+
+private:
+ //
+ // Have all our candidate plans do something.
+ // If all our candidate plans fail, *objOut will contain
+ // information on the failure.
+ //
+
+ /**
+ * Calls work on each child plan in a round-robin fashion. We stop when any plan hits EOF
+ * or returns 'numResults' results.
*
- * Preconditions: Valid RecordId.
+ * Returns true if we need to keep working the plans and false otherwise.
+ */
+ bool workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolicy);
+
+ /**
+ * Checks whether we need to perform either a timing-based yield or a yield for a document
+ * fetch. If so, then uses 'yieldPolicy' to actually perform the yield.
*
- * Owns the query solutions and PlanStage roots for all candidate plans.
+ * Returns a non-OK status if killed during a yield.
*/
- class MultiPlanStage : public PlanStage {
- public:
- /**
- * Takes no ownership.
- *
- * If 'shouldCache' is true, writes a cache entry for the winning plan to the plan cache
- * when possible. If 'shouldCache' is false, the plan cache will never be written.
- */
- MultiPlanStage(OperationContext* txn,
- const Collection* collection,
- CanonicalQuery* cq,
- bool shouldCache = true);
-
- virtual ~MultiPlanStage();
-
- virtual bool isEOF();
-
- virtual StageState work(WorkingSetID* out);
-
- virtual void saveState();
-
- virtual void restoreState(OperationContext* opCtx);
-
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
-
- virtual StageType stageType() const { return STAGE_MULTI_PLAN; }
-
- virtual PlanStageStats* getStats();
-
- virtual const CommonStats* getCommonStats() const;
-
- virtual const SpecificStats* getSpecificStats() const;
-
- /**
- * Takes ownership of QuerySolution and PlanStage. not of WorkingSet
- */
- void addPlan(QuerySolution* solution, PlanStage* root, WorkingSet* sharedWs);
-
- /**
- * Runs all plans added by addPlan, ranks them, and picks a best.
- * All further calls to work(...) will return results from the best plan.
- *
- * If 'yieldPolicy' is non-NULL, then all locks may be yielded in between round-robin
- * works of the candidate plans. By default, 'yieldPolicy' is NULL and no yielding will
- * take place.
- *
- * Returns a non-OK status if the plan was killed during yield.
- */
- Status pickBestPlan(PlanYieldPolicy* yieldPolicy);
-
- /**
- * Returns the number of times that we are willing to work a plan during a trial period.
- *
- * Calculated based on a fixed query knob and the size of the collection.
- */
- static size_t getTrialPeriodWorks(OperationContext* txn, const Collection* collection);
-
- /**
- * Returns the max number of documents which we should allow any plan to return during the
- * trial period. As soon as any plan hits this number of documents, the trial period ends.
- */
- static size_t getTrialPeriodNumToReturn(const CanonicalQuery& query);
-
- /** Return true if a best plan has been chosen */
- bool bestPlanChosen() const;
-
- /** Return the index of the best plan chosen, for testing */
- int bestPlanIdx() const;
-
- /**
- * Returns the QuerySolution for the best plan, or NULL if no best plan
- *
- * The MultiPlanStage retains ownership of the winning QuerySolution and returns an
- * unowned pointer.
- */
- QuerySolution* bestSolution();
-
- /**
- * Returns true if a backup plan was picked.
- * This is the case when the best plan has a blocking stage.
- * Exposed for testing.
- */
- bool hasBackupPlan() const;
-
- //
- // Used by explain.
- //
-
- /**
- * Gathers execution stats for all losing plans. Caller takes ownership of
- * all pointers in the returned vector.
- */
- std::vector<PlanStageStats*> generateCandidateStats();
-
- static const char* kStageType;
-
- private:
- //
- // Have all our candidate plans do something.
- // If all our candidate plans fail, *objOut will contain
- // information on the failure.
- //
-
- /**
- * Calls work on each child plan in a round-robin fashion. We stop when any plan hits EOF
- * or returns 'numResults' results.
- *
- * Returns true if we need to keep working the plans and false otherwise.
- */
- bool workAllPlans(size_t numResults, PlanYieldPolicy* yieldPolicy);
-
- /**
- * Checks whether we need to perform either a timing-based yield or a yield for a document
- * fetch. If so, then uses 'yieldPolicy' to actually perform the yield.
- *
- * Returns a non-OK status if killed during a yield.
- */
- Status tryYield(PlanYieldPolicy* yieldPolicy);
-
- static const int kNoSuchPlan = -1;
-
- // Not owned here.
- OperationContext* _txn;
-
- // Not owned here. Must be non-null.
- const Collection* _collection;
-
- // Whether or not we should try to cache the winning plan in the plan cache.
- const bool _shouldCache;
-
- // The query that we're trying to figure out the best solution to.
- // not owned here
- CanonicalQuery* _query;
-
- // Candidate plans. Each candidate includes a child PlanStage tree and QuerySolution which
- // are owned here. Ownership of all QuerySolutions is retained here, and will *not* be
- // tranferred to the PlanExecutor that wraps this stage.
- std::vector<CandidatePlan> _candidates;
-
- // index into _candidates, of the winner of the plan competition
- // uses -1 / kNoSuchPlan when best plan is not (yet) known
- int _bestPlanIdx;
-
- // index into _candidates, of the backup plan for sort
- // uses -1 / kNoSuchPlan when best plan is not (yet) known
- int _backupPlanIdx;
-
- // Set if this MultiPlanStage cannot continue, and the query must fail. This can happen in
- // two ways. The first is that all candidate plans fail. Note that one plan can fail
- // during normal execution of the plan competition. Here is an example:
- //
- // Plan 1: collection scan with sort. Sort runs out of memory.
- // Plan 2: ixscan that provides sort. Won't run out of memory.
- //
- // We want to choose plan 2 even if plan 1 fails.
- //
- // The second way for failure to occur is that the execution of this query is killed during
- // a yield, by some concurrent event such as a collection drop.
- bool _failure;
-
- // If everything fails during the plan competition, we can't pick one.
- size_t _failureCount;
-
- // if pickBestPlan fails, this is set to the wsid of the statusMember
- // returned by ::work()
- WorkingSetID _statusMemberId;
-
- // When a stage requests a yield for document fetch, it gives us back a RecordFetcher*
- // to use to pull the record into memory. We take ownership of the RecordFetcher here,
- // deleting it after we've had a chance to do the fetch. For timing-based yields, we
- // just pass a NULL fetcher.
- std::unique_ptr<RecordFetcher> _fetcher;
-
- // Stats
- CommonStats _commonStats;
- MultiPlanStats _specificStats;
- };
+ Status tryYield(PlanYieldPolicy* yieldPolicy);
+
+ static const int kNoSuchPlan = -1;
+
+ // Not owned here.
+ OperationContext* _txn;
+
+ // Not owned here. Must be non-null.
+ const Collection* _collection;
+
+ // Whether or not we should try to cache the winning plan in the plan cache.
+ const bool _shouldCache;
+
+ // The query that we're trying to figure out the best solution to.
+ // not owned here
+ CanonicalQuery* _query;
+
+ // Candidate plans. Each candidate includes a child PlanStage tree and QuerySolution which
+ // are owned here. Ownership of all QuerySolutions is retained here, and will *not* be
+ // tranferred to the PlanExecutor that wraps this stage.
+ std::vector<CandidatePlan> _candidates;
+
+ // index into _candidates, of the winner of the plan competition
+ // uses -1 / kNoSuchPlan when best plan is not (yet) known
+ int _bestPlanIdx;
+
+ // index into _candidates, of the backup plan for sort
+ // uses -1 / kNoSuchPlan when best plan is not (yet) known
+ int _backupPlanIdx;
+
+ // Set if this MultiPlanStage cannot continue, and the query must fail. This can happen in
+ // two ways. The first is that all candidate plans fail. Note that one plan can fail
+ // during normal execution of the plan competition. Here is an example:
+ //
+ // Plan 1: collection scan with sort. Sort runs out of memory.
+ // Plan 2: ixscan that provides sort. Won't run out of memory.
+ //
+ // We want to choose plan 2 even if plan 1 fails.
+ //
+ // The second way for failure to occur is that the execution of this query is killed during
+ // a yield, by some concurrent event such as a collection drop.
+ bool _failure;
+
+ // If everything fails during the plan competition, we can't pick one.
+ size_t _failureCount;
+
+ // if pickBestPlan fails, this is set to the wsid of the statusMember
+ // returned by ::work()
+ WorkingSetID _statusMemberId;
+
+ // When a stage requests a yield for document fetch, it gives us back a RecordFetcher*
+ // to use to pull the record into memory. We take ownership of the RecordFetcher here,
+ // deleting it after we've had a chance to do the fetch. For timing-based yields, we
+ // just pass a NULL fetcher.
+ std::unique_ptr<RecordFetcher> _fetcher;
+
+ // Stats
+ CommonStats _commonStats;
+ MultiPlanStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index f7c1c9035d3..515120d86a6 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -36,360 +36,334 @@
namespace mongo {
- using std::vector;
-
- NearStage::NearStage(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- PlanStageStats* stats)
- : _txn(txn),
- _workingSet(workingSet),
- _collection(collection),
- _searchState(SearchState_Initializing),
- _stats(stats),
- _nextInterval(NULL) {
-
- // Ensure we have specific distance search stats unless a child class specified their
- // own distance stats subclass
- if (!_stats->specific) {
- _stats->specific.reset(new NearStats);
- }
+using std::vector;
+
+NearStage::NearStage(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ PlanStageStats* stats)
+ : _txn(txn),
+ _workingSet(workingSet),
+ _collection(collection),
+ _searchState(SearchState_Initializing),
+ _stats(stats),
+ _nextInterval(NULL) {
+ // Ensure we have specific distance search stats unless a child class specified their
+ // own distance stats subclass
+ if (!_stats->specific) {
+ _stats->specific.reset(new NearStats);
}
-
- NearStage::~NearStage() {
+}
+
+NearStage::~NearStage() {}
+
+NearStage::CoveredInterval::CoveredInterval(PlanStage* covering,
+ bool dedupCovering,
+ double minDistance,
+ double maxDistance,
+ bool inclusiveMax)
+ : covering(covering),
+ dedupCovering(dedupCovering),
+ minDistance(minDistance),
+ maxDistance(maxDistance),
+ inclusiveMax(inclusiveMax) {}
+
+
+PlanStage::StageState NearStage::initNext(WorkingSetID* out) {
+ PlanStage::StageState state = initialize(_txn, _workingSet, _collection, out);
+ if (state == PlanStage::IS_EOF) {
+ _searchState = SearchState_Buffering;
+ return PlanStage::NEED_TIME;
}
- NearStage::CoveredInterval::CoveredInterval(PlanStage* covering,
- bool dedupCovering,
- double minDistance,
- double maxDistance,
- bool inclusiveMax) :
- covering(covering),
- dedupCovering(dedupCovering),
- minDistance(minDistance),
- maxDistance(maxDistance),
- inclusiveMax(inclusiveMax) {
- }
+ invariant(state != PlanStage::ADVANCED);
+ // Propagate NEED_TIME or errors upward.
+ return state;
+}
- PlanStage::StageState NearStage::initNext(WorkingSetID* out) {
- PlanStage::StageState state = initialize(_txn, _workingSet, _collection, out);
- if (state == PlanStage::IS_EOF) {
- _searchState = SearchState_Buffering;
- return PlanStage::NEED_TIME;
- }
+PlanStage::StageState NearStage::work(WorkingSetID* out) {
+ ++_stats->common.works;
- invariant(state != PlanStage::ADVANCED);
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_stats->common.executionTimeMillis);
- // Propagate NEED_TIME or errors upward.
- return state;
- }
+ WorkingSetID toReturn = WorkingSet::INVALID_ID;
+ Status error = Status::OK();
+ PlanStage::StageState nextState = PlanStage::NEED_TIME;
- PlanStage::StageState NearStage::work(WorkingSetID* out) {
+ //
+ // Work the search
+ //
- ++_stats->common.works;
+ if (SearchState_Initializing == _searchState) {
+ nextState = initNext(&toReturn);
+ } else if (SearchState_Buffering == _searchState) {
+ nextState = bufferNext(&toReturn, &error);
+ } else if (SearchState_Advancing == _searchState) {
+ nextState = advanceNext(&toReturn);
+ } else {
+ invariant(SearchState_Finished == _searchState);
+ nextState = PlanStage::IS_EOF;
+ }
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_stats->common.executionTimeMillis);
+ //
+ // Handle the results
+ //
+
+ if (PlanStage::FAILURE == nextState) {
+ *out = WorkingSetCommon::allocateStatusMember(_workingSet, error);
+ } else if (PlanStage::ADVANCED == nextState) {
+ *out = toReturn;
+ ++_stats->common.advanced;
+ } else if (PlanStage::NEED_YIELD == nextState) {
+ *out = toReturn;
+ ++_stats->common.needYield;
+ } else if (PlanStage::NEED_TIME == nextState) {
+ ++_stats->common.needTime;
+ } else if (PlanStage::IS_EOF == nextState) {
+ _stats->common.isEOF = true;
+ }
- WorkingSetID toReturn = WorkingSet::INVALID_ID;
- Status error = Status::OK();
- PlanStage::StageState nextState = PlanStage::NEED_TIME;
+ return nextState;
+}
- //
- // Work the search
- //
+/**
+ * Holds a generic search result with a distance computed in some fashion.
+ */
+struct NearStage::SearchResult {
+ SearchResult(WorkingSetID resultID, double distance) : resultID(resultID), distance(distance) {}
- if (SearchState_Initializing == _searchState) {
- nextState = initNext(&toReturn);
- }
- else if (SearchState_Buffering == _searchState) {
- nextState = bufferNext(&toReturn, &error);
- }
- else if (SearchState_Advancing == _searchState) {
- nextState = advanceNext(&toReturn);
- }
- else {
- invariant(SearchState_Finished == _searchState);
- nextState = PlanStage::IS_EOF;
- }
+ bool operator<(const SearchResult& other) const {
+ // We want increasing distance, not decreasing, so we reverse the <
+ return distance > other.distance;
+ }
- //
- // Handle the results
- //
+ WorkingSetID resultID;
+ double distance;
+};
- if (PlanStage::FAILURE == nextState) {
- *out = WorkingSetCommon::allocateStatusMember(_workingSet, error);
- }
- else if (PlanStage::ADVANCED == nextState) {
- *out = toReturn;
- ++_stats->common.advanced;
- }
- else if (PlanStage::NEED_YIELD == nextState) {
- *out = toReturn;
- ++_stats->common.needYield;
- }
- else if (PlanStage::NEED_TIME == nextState) {
- ++_stats->common.needTime;
+// Set "toReturn" when NEED_YIELD.
+PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* error) {
+ //
+ // Try to retrieve the next covered member
+ //
+
+ if (!_nextInterval) {
+ StatusWith<CoveredInterval*> intervalStatus = nextInterval(_txn, _workingSet, _collection);
+ if (!intervalStatus.isOK()) {
+ _searchState = SearchState_Finished;
+ *error = intervalStatus.getStatus();
+ return PlanStage::FAILURE;
}
- else if (PlanStage::IS_EOF == nextState) {
- _stats->common.isEOF = true;
+
+ if (NULL == intervalStatus.getValue()) {
+ _searchState = SearchState_Finished;
+ return PlanStage::IS_EOF;
}
- return nextState;
+ // CoveredInterval and its child stage are owned by _childrenIntervals
+ _childrenIntervals.push_back(intervalStatus.getValue());
+ _nextInterval = _childrenIntervals.back();
+ _nextIntervalStats.reset(new IntervalStats());
+ _nextIntervalStats->minDistanceAllowed = _nextInterval->minDistance;
+ _nextIntervalStats->maxDistanceAllowed = _nextInterval->maxDistance;
+ _nextIntervalStats->inclusiveMaxDistanceAllowed = _nextInterval->inclusiveMax;
}
- /**
- * Holds a generic search result with a distance computed in some fashion.
- */
- struct NearStage::SearchResult {
-
- SearchResult(WorkingSetID resultID, double distance) :
- resultID(resultID), distance(distance) {
- }
+ WorkingSetID nextMemberID;
+ PlanStage::StageState intervalState = _nextInterval->covering->work(&nextMemberID);
- bool operator<(const SearchResult& other) const {
- // We want increasing distance, not decreasing, so we reverse the <
- return distance > other.distance;
- }
+ if (PlanStage::IS_EOF == intervalState) {
+ getNearStats()->intervalStats.push_back(*_nextIntervalStats);
+ _nextIntervalStats.reset();
+ _nextInterval = NULL;
+ _searchState = SearchState_Advancing;
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::FAILURE == intervalState) {
+ *error = WorkingSetCommon::getMemberStatus(*_workingSet->get(nextMemberID));
+ return intervalState;
+ } else if (PlanStage::NEED_YIELD == intervalState) {
+ *toReturn = nextMemberID;
+ return intervalState;
+ } else if (PlanStage::ADVANCED != intervalState) {
+ return intervalState;
+ }
- WorkingSetID resultID;
- double distance;
- };
-
- // Set "toReturn" when NEED_YIELD.
- PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* error) {
-
- //
- // Try to retrieve the next covered member
- //
-
- if (!_nextInterval) {
-
- StatusWith<CoveredInterval*> intervalStatus = nextInterval(_txn,
- _workingSet,
- _collection);
- if (!intervalStatus.isOK()) {
- _searchState = SearchState_Finished;
- *error = intervalStatus.getStatus();
- return PlanStage::FAILURE;
- }
-
- if (NULL == intervalStatus.getValue()) {
- _searchState = SearchState_Finished;
- return PlanStage::IS_EOF;
- }
-
- // CoveredInterval and its child stage are owned by _childrenIntervals
- _childrenIntervals.push_back(intervalStatus.getValue());
- _nextInterval = _childrenIntervals.back();
- _nextIntervalStats.reset(new IntervalStats());
- _nextIntervalStats->minDistanceAllowed = _nextInterval->minDistance;
- _nextIntervalStats->maxDistanceAllowed = _nextInterval->maxDistance;
- _nextIntervalStats->inclusiveMaxDistanceAllowed = _nextInterval->inclusiveMax;
- }
+ //
+ // Try to buffer the next covered member
+ //
- WorkingSetID nextMemberID;
- PlanStage::StageState intervalState = _nextInterval->covering->work(&nextMemberID);
+ WorkingSetMember* nextMember = _workingSet->get(nextMemberID);
- if (PlanStage::IS_EOF == intervalState) {
- getNearStats()->intervalStats.push_back(*_nextIntervalStats);
- _nextIntervalStats.reset();
- _nextInterval = NULL;
- _searchState = SearchState_Advancing;
+ // The child stage may not dedup so we must dedup them ourselves.
+ if (_nextInterval->dedupCovering && nextMember->hasLoc()) {
+ if (_nextIntervalSeen.end() != _nextIntervalSeen.find(nextMember->loc)) {
+ _workingSet->free(nextMemberID);
return PlanStage::NEED_TIME;
}
- else if (PlanStage::FAILURE == intervalState) {
- *error = WorkingSetCommon::getMemberStatus(*_workingSet->get(nextMemberID));
- return intervalState;
- }
- else if (PlanStage::NEED_YIELD == intervalState) {
- *toReturn = nextMemberID;
- return intervalState;
- }
- else if (PlanStage::ADVANCED != intervalState) {
- return intervalState;
- }
-
- //
- // Try to buffer the next covered member
- //
+ }
- WorkingSetMember* nextMember = _workingSet->get(nextMemberID);
+ ++_nextIntervalStats->numResultsFound;
- // The child stage may not dedup so we must dedup them ourselves.
- if (_nextInterval->dedupCovering && nextMember->hasLoc()) {
- if (_nextIntervalSeen.end() != _nextIntervalSeen.find(nextMember->loc)) {
- _workingSet->free(nextMemberID);
- return PlanStage::NEED_TIME;
- }
- }
+ StatusWith<double> distanceStatus = computeDistance(nextMember);
- ++_nextIntervalStats->numResultsFound;
+ if (!distanceStatus.isOK()) {
+ _searchState = SearchState_Finished;
+ *error = distanceStatus.getStatus();
+ return PlanStage::FAILURE;
+ }
- StatusWith<double> distanceStatus = computeDistance(nextMember);
+ // If the member's distance is in the current distance interval, add it to our buffered
+ // results.
+ double memberDistance = distanceStatus.getValue();
+ bool inInterval = memberDistance >= _nextInterval->minDistance &&
+ (_nextInterval->inclusiveMax ? memberDistance <= _nextInterval->maxDistance
+ : memberDistance < _nextInterval->maxDistance);
+
+ // Update found distance stats
+ if (_nextIntervalStats->minDistanceFound < 0 ||
+ memberDistance < _nextIntervalStats->minDistanceFound) {
+ _nextIntervalStats->minDistanceFound = memberDistance;
+ }
- if (!distanceStatus.isOK()) {
- _searchState = SearchState_Finished;
- *error = distanceStatus.getStatus();
- return PlanStage::FAILURE;
- }
+ if (_nextIntervalStats->maxDistanceFound < 0 ||
+ memberDistance > _nextIntervalStats->maxDistanceFound) {
+ _nextIntervalStats->maxDistanceFound = memberDistance;
+ }
- // If the member's distance is in the current distance interval, add it to our buffered
- // results.
- double memberDistance = distanceStatus.getValue();
- bool inInterval = memberDistance >= _nextInterval->minDistance
- && (_nextInterval->inclusiveMax ?
- memberDistance <= _nextInterval->maxDistance :
- memberDistance < _nextInterval->maxDistance);
-
- // Update found distance stats
- if (_nextIntervalStats->minDistanceFound < 0
- || memberDistance < _nextIntervalStats->minDistanceFound) {
- _nextIntervalStats->minDistanceFound = memberDistance;
- }
+ if (inInterval) {
+ _resultBuffer.push(SearchResult(nextMemberID, memberDistance));
- if (_nextIntervalStats->maxDistanceFound < 0
- || memberDistance > _nextIntervalStats->maxDistanceFound) {
- _nextIntervalStats->maxDistanceFound = memberDistance;
+ // Store the member's RecordId, if available, for quick invalidation
+ if (nextMember->hasLoc()) {
+ _nextIntervalSeen.insert(std::make_pair(nextMember->loc, nextMemberID));
}
- if (inInterval) {
- _resultBuffer.push(SearchResult(nextMemberID, memberDistance));
-
- // Store the member's RecordId, if available, for quick invalidation
- if (nextMember->hasLoc()) {
- _nextIntervalSeen.insert(std::make_pair(nextMember->loc, nextMemberID));
- }
+ ++_nextIntervalStats->numResultsBuffered;
- ++_nextIntervalStats->numResultsBuffered;
-
- // Update buffered distance stats
- if (_nextIntervalStats->minDistanceBuffered < 0
- || memberDistance < _nextIntervalStats->minDistanceBuffered) {
- _nextIntervalStats->minDistanceBuffered = memberDistance;
- }
-
- if (_nextIntervalStats->maxDistanceBuffered < 0
- || memberDistance > _nextIntervalStats->maxDistanceBuffered) {
- _nextIntervalStats->maxDistanceBuffered = memberDistance;
- }
- }
- else {
- _workingSet->free(nextMemberID);
+ // Update buffered distance stats
+ if (_nextIntervalStats->minDistanceBuffered < 0 ||
+ memberDistance < _nextIntervalStats->minDistanceBuffered) {
+ _nextIntervalStats->minDistanceBuffered = memberDistance;
}
- return PlanStage::NEED_TIME;
+ if (_nextIntervalStats->maxDistanceBuffered < 0 ||
+ memberDistance > _nextIntervalStats->maxDistanceBuffered) {
+ _nextIntervalStats->maxDistanceBuffered = memberDistance;
+ }
+ } else {
+ _workingSet->free(nextMemberID);
}
- PlanStage::StageState NearStage::advanceNext(WorkingSetID* toReturn) {
-
- if (_resultBuffer.empty()) {
-
- // We're done returning the documents buffered for this annulus, so we can
- // clear out our buffered RecordIds.
- _nextIntervalSeen.clear();
- _searchState = SearchState_Buffering;
- return PlanStage::NEED_TIME;
- }
+ return PlanStage::NEED_TIME;
+}
- *toReturn = _resultBuffer.top().resultID;
- _resultBuffer.pop();
+PlanStage::StageState NearStage::advanceNext(WorkingSetID* toReturn) {
+ if (_resultBuffer.empty()) {
+ // We're done returning the documents buffered for this annulus, so we can
+ // clear out our buffered RecordIds.
+ _nextIntervalSeen.clear();
+ _searchState = SearchState_Buffering;
+ return PlanStage::NEED_TIME;
+ }
- // If we're returning something, take it out of our RecordId -> WSID map so that future
- // calls to invalidate don't cause us to take action for a RecordId we're done with.
- WorkingSetMember* member = _workingSet->get(*toReturn);
- if (member->hasLoc()) {
- _nextIntervalSeen.erase(member->loc);
- }
+ *toReturn = _resultBuffer.top().resultID;
+ _resultBuffer.pop();
- return PlanStage::ADVANCED;
+ // If we're returning something, take it out of our RecordId -> WSID map so that future
+ // calls to invalidate don't cause us to take action for a RecordId we're done with.
+ WorkingSetMember* member = _workingSet->get(*toReturn);
+ if (member->hasLoc()) {
+ _nextIntervalSeen.erase(member->loc);
}
- bool NearStage::isEOF() {
- return SearchState_Finished == _searchState;
- }
+ return PlanStage::ADVANCED;
+}
- void NearStage::saveState() {
- _txn = NULL;
- ++_stats->common.yields;
- for (size_t i = 0; i < _childrenIntervals.size(); i++) {
- _childrenIntervals[i]->covering->saveState();
- }
+bool NearStage::isEOF() {
+ return SearchState_Finished == _searchState;
+}
- // Subclass specific saving, e.g. saving the 2d or 2dsphere density estimator.
- finishSaveState();
+void NearStage::saveState() {
+ _txn = NULL;
+ ++_stats->common.yields;
+ for (size_t i = 0; i < _childrenIntervals.size(); i++) {
+ _childrenIntervals[i]->covering->saveState();
}
- void NearStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_stats->common.unyields;
- for (size_t i = 0; i < _childrenIntervals.size(); i++) {
- _childrenIntervals[i]->covering->restoreState(opCtx);
- }
+ // Subclass specific saving, e.g. saving the 2d or 2dsphere density estimator.
+ finishSaveState();
+}
- // Subclass specific restoring, e.g. restoring the 2d or 2dsphere density estimator.
- finishRestoreState(opCtx);
+void NearStage::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_stats->common.unyields;
+ for (size_t i = 0; i < _childrenIntervals.size(); i++) {
+ _childrenIntervals[i]->covering->restoreState(opCtx);
}
- void NearStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_stats->common.invalidates;
- for (size_t i = 0; i < _childrenIntervals.size(); i++) {
- _childrenIntervals[i]->covering->invalidate(txn, dl, type);
- }
-
- // If a result is in _resultBuffer and has a RecordId it will be in _nextIntervalSeen as
- // well. It's safe to return the result w/o the RecordId, so just fetch the result.
- unordered_map<RecordId, WorkingSetID, RecordId::Hasher>::iterator seenIt = _nextIntervalSeen
- .find(dl);
+ // Subclass specific restoring, e.g. restoring the 2d or 2dsphere density estimator.
+ finishRestoreState(opCtx);
+}
- if (seenIt != _nextIntervalSeen.end()) {
+void NearStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_stats->common.invalidates;
+ for (size_t i = 0; i < _childrenIntervals.size(); i++) {
+ _childrenIntervals[i]->covering->invalidate(txn, dl, type);
+ }
- WorkingSetMember* member = _workingSet->get(seenIt->second);
- verify(member->hasLoc());
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
- verify(!member->hasLoc());
+ // If a result is in _resultBuffer and has a RecordId it will be in _nextIntervalSeen as
+ // well. It's safe to return the result w/o the RecordId, so just fetch the result.
+ unordered_map<RecordId, WorkingSetID, RecordId::Hasher>::iterator seenIt =
+ _nextIntervalSeen.find(dl);
- // Don't keep it around in the seen map since there's no valid RecordId anymore
- _nextIntervalSeen.erase(seenIt);
- }
+ if (seenIt != _nextIntervalSeen.end()) {
+ WorkingSetMember* member = _workingSet->get(seenIt->second);
+ verify(member->hasLoc());
+ WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+ verify(!member->hasLoc());
- // Subclass specific invalidation, e.g. passing the invalidation to the 2d or 2dsphere
- // density estimator.
- finishInvalidate(txn, dl, type);
+ // Don't keep it around in the seen map since there's no valid RecordId anymore
+ _nextIntervalSeen.erase(seenIt);
}
- vector<PlanStage*> NearStage::getChildren() const {
- vector<PlanStage*> children;
- for (size_t i = 0; i < _childrenIntervals.size(); i++) {
- children.push_back(_childrenIntervals[i]->covering.get());
- }
- return children;
- }
+ // Subclass specific invalidation, e.g. passing the invalidation to the 2d or 2dsphere
+ // density estimator.
+ finishInvalidate(txn, dl, type);
+}
- PlanStageStats* NearStage::getStats() {
- PlanStageStats* statsClone = _stats->clone();
- for (size_t i = 0; i < _childrenIntervals.size(); ++i) {
- statsClone->children.push_back(_childrenIntervals[i]->covering->getStats());
- }
- return statsClone;
+vector<PlanStage*> NearStage::getChildren() const {
+ vector<PlanStage*> children;
+ for (size_t i = 0; i < _childrenIntervals.size(); i++) {
+ children.push_back(_childrenIntervals[i]->covering.get());
}
+ return children;
+}
- StageType NearStage::stageType() const {
- return _stats->stageType;
+PlanStageStats* NearStage::getStats() {
+ PlanStageStats* statsClone = _stats->clone();
+ for (size_t i = 0; i < _childrenIntervals.size(); ++i) {
+ statsClone->children.push_back(_childrenIntervals[i]->covering->getStats());
}
+ return statsClone;
+}
- const CommonStats* NearStage::getCommonStats() const {
- return &_stats->common;
- }
+StageType NearStage::stageType() const {
+ return _stats->stageType;
+}
- const SpecificStats* NearStage::getSpecificStats() const {
- return _stats->specific.get();
- }
+const CommonStats* NearStage::getCommonStats() const {
+ return &_stats->common;
+}
- NearStats* NearStage::getNearStats() {
- return static_cast<NearStats*>(_stats->specific.get());
- }
+const SpecificStats* NearStage::getSpecificStats() const {
+ return _stats->specific.get();
+}
+
+NearStats* NearStage::getNearStats() {
+ return static_cast<NearStats*>(_stats->specific.get());
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index f21758617ac..6468d5fcadb 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -42,194 +42,190 @@
namespace mongo {
+/**
+ * An abstract stage which uses a progressive sort to return results sorted by distance. This
+ * is useful when we do not have a full ordering computed over the distance metric and don't
+ * want to generate one.
+ *
+ * Child stages need to implement functionality which:
+ *
+ * - defines a distance metric
+ * - iterates through ordered distance intervals, nearest to furthest
+ * - provides a covering for each distance interval
+ *
+ * For example - given a distance search over documents with distances from [0 -> 10], the child
+ * stage might break up the search into intervals [0->5),[5,7),[7->10].
+ *
+ * Each interval requires a PlanStage which *covers* the interval (returns all results in the
+ * interval). Results in each interval are buffered fully before being returned to ensure that
+ * ordering is preserved.
+ *
+ * For efficient search, the child stage which covers the distance interval in question should
+ * not return too many results outside the interval, but correctness only depends on the child
+ * stage returning all results inside the interval. As an example, a PlanStage which covers the
+ * interval [0->5) might just be a full collection scan - this will always cover every interval,
+ * but is slow. If there is an index available, an IndexScan stage might also return all
+ * documents with distance [0->5) but would be much faster.
+ *
+ * Also for efficient search, the intervals should not be too large or too small - though again
+ * correctness does not depend on interval size.
+ *
+ * TODO: Right now the interface allows the nextCovering() to be adaptive, but doesn't allow
+ * aborting and shrinking a covered range being buffered if we guess wrong.
+ */
+class NearStage : public PlanStage {
+public:
+ struct CoveredInterval;
+
+ virtual ~NearStage();
+
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
+
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+
+ virtual std::vector<PlanStage*> getChildren() const;
+
+ virtual StageType stageType() const;
+ virtual PlanStageStats* getStats();
+ virtual const CommonStats* getCommonStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
+
+protected:
/**
- * An abstract stage which uses a progressive sort to return results sorted by distance. This
- * is useful when we do not have a full ordering computed over the distance metric and don't
- * want to generate one.
- *
- * Child stages need to implement functionality which:
- *
- * - defines a distance metric
- * - iterates through ordered distance intervals, nearest to furthest
- * - provides a covering for each distance interval
- *
- * For example - given a distance search over documents with distances from [0 -> 10], the child
- * stage might break up the search into intervals [0->5),[5,7),[7->10].
- *
- * Each interval requires a PlanStage which *covers* the interval (returns all results in the
- * interval). Results in each interval are buffered fully before being returned to ensure that
- * ordering is preserved.
- *
- * For efficient search, the child stage which covers the distance interval in question should
- * not return too many results outside the interval, but correctness only depends on the child
- * stage returning all results inside the interval. As an example, a PlanStage which covers the
- * interval [0->5) might just be a full collection scan - this will always cover every interval,
- * but is slow. If there is an index available, an IndexScan stage might also return all
- * documents with distance [0->5) but would be much faster.
- *
- * Also for efficient search, the intervals should not be too large or too small - though again
- * correctness does not depend on interval size.
+ * Subclasses of NearStage must provide basics + a stats object which gets owned here.
+ * The stats object must have specific stats which are a subclass of NearStats, otherwise
+ * it's generated automatically.
+ */
+ NearStage(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ PlanStageStats* stats);
+
+ /**
+ * Exposes NearStats for adaptive search, allows additional specific stats in subclasses.
+ */
+ NearStats* getNearStats();
+
+ //
+ // Methods implemented for specific search functionality
+ //
+
+ /**
+ * Constructs the next covering over the next interval to buffer results from, or NULL
+ * if the full range has been searched. Use the provided working set as the working
+ * set for the covering stage if required.
*
- * TODO: Right now the interface allows the nextCovering() to be adaptive, but doesn't allow
- * aborting and shrinking a covered range being buffered if we guess wrong.
+ * Returns !OK on failure to create next stage.
*/
- class NearStage : public PlanStage {
- public:
-
- struct CoveredInterval;
-
- virtual ~NearStage();
-
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
-
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
-
- virtual StageType stageType() const;
- virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
-
- protected:
-
- /**
- * Subclasses of NearStage must provide basics + a stats object which gets owned here.
- * The stats object must have specific stats which are a subclass of NearStats, otherwise
- * it's generated automatically.
- */
- NearStage(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- PlanStageStats* stats);
-
- /**
- * Exposes NearStats for adaptive search, allows additional specific stats in subclasses.
- */
- NearStats* getNearStats();
-
- //
- // Methods implemented for specific search functionality
- //
-
- /**
- * Constructs the next covering over the next interval to buffer results from, or NULL
- * if the full range has been searched. Use the provided working set as the working
- * set for the covering stage if required.
- *
- * Returns !OK on failure to create next stage.
- */
- virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection) = 0;
-
- /**
- * Computes the distance value for the given member data, or -1 if the member should not be
- * returned in the sorted results.
- *
- * Returns !OK on invalid member data.
- */
- virtual StatusWith<double> computeDistance(WorkingSetMember* member) = 0;
-
- /*
- * Initialize near stage before buffering the data.
- * Return IS_EOF if subclass finishes the initialization.
- * Return NEED_TIME if we need more time.
- * Return errors if an error occurs.
- * Can't return ADVANCED.
- */
- virtual StageState initialize(OperationContext* txn,
- WorkingSet* workingSet,
- Collection* collection,
- WorkingSetID* out) = 0;
-
- private:
-
- //
- // Save/restore/invalidate work specific to the search type.
- //
-
- virtual void finishSaveState() = 0;
-
- virtual void finishRestoreState(OperationContext* txn) = 0;
-
- virtual void finishInvalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) = 0;
-
- //
- // Generic methods for progressive search functionality
- //
-
- StageState initNext(WorkingSetID* out);
- StageState bufferNext(WorkingSetID* toReturn, Status* error);
- StageState advanceNext(WorkingSetID* toReturn);
-
- //
- // Generic state for progressive near search
- //
-
- // Not owned here
- OperationContext* _txn;
- // Not owned here
- WorkingSet* const _workingSet;
- // Not owned here, used for fetching buffered results before invalidation
- Collection* const _collection;
-
- // A progressive search works in stages of buffering and then advancing
- enum SearchState {
- SearchState_Initializing,
- SearchState_Buffering,
- SearchState_Advancing,
- SearchState_Finished
- } _searchState;
-
- // May need to track disklocs from the child stage to do our own deduping, also to do
- // invalidation of buffered results.
- unordered_map<RecordId, WorkingSetID, RecordId::Hasher> _nextIntervalSeen;
-
- // Stats for the stage covering this interval
- std::unique_ptr<IntervalStats> _nextIntervalStats;
-
- // Sorted buffered results to be returned - the current interval
- struct SearchResult;
- std::priority_queue<SearchResult> _resultBuffer;
-
- // Stats
- std::unique_ptr<PlanStageStats> _stats;
-
- // The current stage from which this stage should buffer results
- // Pointer to the last interval in _childrenIntervals. Owned by _childrenIntervals.
- CoveredInterval* _nextInterval;
-
- // All children CoveredIntervals and the sub-stages owned by them.
- //
- // All children intervals except the last active one are only used by getStats(),
- // because they are all EOF.
- OwnedPointerVector<CoveredInterval> _childrenIntervals;
- };
+ virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection) = 0;
/**
- * A covered interval over which a portion of a near search can be run.
+ * Computes the distance value for the given member data, or -1 if the member should not be
+ * returned in the sorted results.
+ *
+ * Returns !OK on invalid member data.
*/
- struct NearStage::CoveredInterval {
-
- CoveredInterval(PlanStage* covering,
- bool dedupCovering,
- double minDistance,
- double maxDistance,
- bool inclusiveMax);
-
- // Owned by NearStage
- std::unique_ptr<PlanStage> const covering;
- const bool dedupCovering;
-
- const double minDistance;
- const double maxDistance;
- const bool inclusiveMax;
- };
+ virtual StatusWith<double> computeDistance(WorkingSetMember* member) = 0;
+
+ /*
+ * Initialize near stage before buffering the data.
+ * Return IS_EOF if subclass finishes the initialization.
+ * Return NEED_TIME if we need more time.
+ * Return errors if an error occurs.
+ * Can't return ADVANCED.
+ */
+ virtual StageState initialize(OperationContext* txn,
+ WorkingSet* workingSet,
+ Collection* collection,
+ WorkingSetID* out) = 0;
+
+private:
+ //
+ // Save/restore/invalidate work specific to the search type.
+ //
+
+ virtual void finishSaveState() = 0;
+
+ virtual void finishRestoreState(OperationContext* txn) = 0;
+
+ virtual void finishInvalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) = 0;
+
+ //
+ // Generic methods for progressive search functionality
+ //
+
+ StageState initNext(WorkingSetID* out);
+ StageState bufferNext(WorkingSetID* toReturn, Status* error);
+ StageState advanceNext(WorkingSetID* toReturn);
+
+ //
+ // Generic state for progressive near search
+ //
+
+ // Not owned here
+ OperationContext* _txn;
+ // Not owned here
+ WorkingSet* const _workingSet;
+ // Not owned here, used for fetching buffered results before invalidation
+ Collection* const _collection;
+
+ // A progressive search works in stages of buffering and then advancing
+ enum SearchState {
+ SearchState_Initializing,
+ SearchState_Buffering,
+ SearchState_Advancing,
+ SearchState_Finished
+ } _searchState;
+
+ // May need to track disklocs from the child stage to do our own deduping, also to do
+ // invalidation of buffered results.
+ unordered_map<RecordId, WorkingSetID, RecordId::Hasher> _nextIntervalSeen;
+
+ // Stats for the stage covering this interval
+ std::unique_ptr<IntervalStats> _nextIntervalStats;
+
+ // Sorted buffered results to be returned - the current interval
+ struct SearchResult;
+ std::priority_queue<SearchResult> _resultBuffer;
+
+ // Stats
+ std::unique_ptr<PlanStageStats> _stats;
+
+ // The current stage from which this stage should buffer results
+ // Pointer to the last interval in _childrenIntervals. Owned by _childrenIntervals.
+ CoveredInterval* _nextInterval;
+
+ // All children CoveredIntervals and the sub-stages owned by them.
+ //
+ // All children intervals except the last active one are only used by getStats(),
+ // because they are all EOF.
+ OwnedPointerVector<CoveredInterval> _childrenIntervals;
+};
+
+/**
+ * A covered interval over which a portion of a near search can be run.
+ */
+struct NearStage::CoveredInterval {
+ CoveredInterval(PlanStage* covering,
+ bool dedupCovering,
+ double minDistance,
+ double maxDistance,
+ bool inclusiveMax);
+
+ // Owned by NearStage
+ std::unique_ptr<PlanStage> const covering;
+ const bool dedupCovering;
+
+ const double minDistance;
+ const double maxDistance;
+ const bool inclusiveMax;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/oplogstart.cpp b/src/mongo/db/exec/oplogstart.cpp
index 92de52db505..d05ddfc2f44 100644
--- a/src/mongo/db/exec/oplogstart.cpp
+++ b/src/mongo/db/exec/oplogstart.cpp
@@ -35,187 +35,192 @@
namespace mongo {
- using std::vector;
-
- const char* OplogStart::kStageType = "OPLOG_START";
-
- // Does not take ownership.
- OplogStart::OplogStart(OperationContext* txn,
- const Collection* collection,
- MatchExpression* filter,
- WorkingSet* ws)
- : _txn(txn),
- _needInit(true),
- _backwardsScanning(false),
- _extentHopping(false),
- _done(false),
- _collection(collection),
- _workingSet(ws),
- _filter(filter) { }
-
- OplogStart::~OplogStart() { }
-
- PlanStage::StageState OplogStart::work(WorkingSetID* out) {
- // We do our (heavy) init in a work(), where work is expected.
- if (_needInit) {
- CollectionScanParams params;
- params.collection = _collection;
- params.direction = CollectionScanParams::BACKWARD;
- _cs.reset(new CollectionScan(_txn, params, _workingSet, NULL));
-
- _needInit = false;
- _backwardsScanning = true;
- _timer.reset();
- }
-
- // If we're still reading backwards, keep trying until timing out.
- if (_backwardsScanning) {
- verify(!_extentHopping);
- // Still have time to succeed with reading backwards.
- if (_timer.seconds() < _backwardsScanTime) {
- return workBackwardsScan(out);
- }
-
- try {
- // If this throws WCE, it leave us in a state were the next call to work will retry.
- switchToExtentHopping();
- }
- catch (const WriteConflictException& wce) {
- _subIterators.clear();
- *out = WorkingSet::INVALID_ID;
- return NEED_YIELD;
- }
- }
-
- // Don't find it in time? Swing from extent to extent like tarzan.com.
- verify(_extentHopping);
- return workExtentHopping(out);
+using std::vector;
+
+const char* OplogStart::kStageType = "OPLOG_START";
+
+// Does not take ownership.
+OplogStart::OplogStart(OperationContext* txn,
+ const Collection* collection,
+ MatchExpression* filter,
+ WorkingSet* ws)
+ : _txn(txn),
+ _needInit(true),
+ _backwardsScanning(false),
+ _extentHopping(false),
+ _done(false),
+ _collection(collection),
+ _workingSet(ws),
+ _filter(filter) {}
+
+OplogStart::~OplogStart() {}
+
+PlanStage::StageState OplogStart::work(WorkingSetID* out) {
+ // We do our (heavy) init in a work(), where work is expected.
+ if (_needInit) {
+ CollectionScanParams params;
+ params.collection = _collection;
+ params.direction = CollectionScanParams::BACKWARD;
+ _cs.reset(new CollectionScan(_txn, params, _workingSet, NULL));
+
+ _needInit = false;
+ _backwardsScanning = true;
+ _timer.reset();
}
- PlanStage::StageState OplogStart::workExtentHopping(WorkingSetID* out) {
- if (_done || _subIterators.empty()) {
- return PlanStage::IS_EOF;
+ // If we're still reading backwards, keep trying until timing out.
+ if (_backwardsScanning) {
+ verify(!_extentHopping);
+ // Still have time to succeed with reading backwards.
+ if (_timer.seconds() < _backwardsScanTime) {
+ return workBackwardsScan(out);
}
- // we work from the back to the front since the back has the newest data.
try {
- // TODO: should we ever check fetcherForNext()?
- if (auto record = _subIterators.back()->next()) {
- BSONObj obj = record->data.releaseToBson();
- if (!_filter->matchesBSON(obj)) {
- _done = true;
- WorkingSetID id = _workingSet->allocate();
- WorkingSetMember* member = _workingSet->get(id);
- member->loc = record->id;
- member->obj = {_txn->recoveryUnit()->getSnapshotId(), std::move(obj)};
- member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
- *out = id;
- return PlanStage::ADVANCED;
- }
- }
- }
- catch (const WriteConflictException& wce) {
+ // If this throws WCE, it leave us in a state were the next call to work will retry.
+ switchToExtentHopping();
+ } catch (const WriteConflictException& wce) {
+ _subIterators.clear();
*out = WorkingSet::INVALID_ID;
- return PlanStage::NEED_YIELD;
+ return NEED_YIELD;
}
-
- _subIterators.pop_back();
- return PlanStage::NEED_TIME;
}
- void OplogStart::switchToExtentHopping() {
- // Set up our extent hopping state.
- _subIterators = _collection->getManyCursors(_txn);
+ // Don't find it in time? Swing from extent to extent like tarzan.com.
+ verify(_extentHopping);
+ return workExtentHopping(out);
+}
- // Transition from backwards scanning to extent hopping.
- _backwardsScanning = false;
- _extentHopping = true;
+PlanStage::StageState OplogStart::workExtentHopping(WorkingSetID* out) {
+ if (_done || _subIterators.empty()) {
+ return PlanStage::IS_EOF;
+ }
- // Toss the collection scan we were using.
- _cs.reset();
+ // we work from the back to the front since the back has the newest data.
+ try {
+ // TODO: should we ever check fetcherForNext()?
+ if (auto record = _subIterators.back()->next()) {
+ BSONObj obj = record->data.releaseToBson();
+ if (!_filter->matchesBSON(obj)) {
+ _done = true;
+ WorkingSetID id = _workingSet->allocate();
+ WorkingSetMember* member = _workingSet->get(id);
+ member->loc = record->id;
+ member->obj = {_txn->recoveryUnit()->getSnapshotId(), std::move(obj)};
+ member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
+ *out = id;
+ return PlanStage::ADVANCED;
+ }
+ }
+ } catch (const WriteConflictException& wce) {
+ *out = WorkingSet::INVALID_ID;
+ return PlanStage::NEED_YIELD;
}
- PlanStage::StageState OplogStart::workBackwardsScan(WorkingSetID* out) {
- PlanStage::StageState state = _cs->work(out);
+ _subIterators.pop_back();
+ return PlanStage::NEED_TIME;
+}
- // EOF. Just start from the beginning, which is where we've hit.
- if (PlanStage::IS_EOF == state) {
- _done = true;
- return state;
- }
+void OplogStart::switchToExtentHopping() {
+ // Set up our extent hopping state.
+ _subIterators = _collection->getManyCursors(_txn);
- if (PlanStage::ADVANCED != state) { return state; }
+ // Transition from backwards scanning to extent hopping.
+ _backwardsScanning = false;
+ _extentHopping = true;
- WorkingSetMember* member = _workingSet->get(*out);
- verify(member->hasObj());
- verify(member->hasLoc());
+ // Toss the collection scan we were using.
+ _cs.reset();
+}
- if (!_filter->matchesBSON(member->obj.value())) {
- _done = true;
- // RecordId is returned in *out.
- return PlanStage::ADVANCED;
- }
- else {
- _workingSet->free(*out);
- return PlanStage::NEED_TIME;
- }
+PlanStage::StageState OplogStart::workBackwardsScan(WorkingSetID* out) {
+ PlanStage::StageState state = _cs->work(out);
+
+ // EOF. Just start from the beginning, which is where we've hit.
+ if (PlanStage::IS_EOF == state) {
+ _done = true;
+ return state;
}
- bool OplogStart::isEOF() { return _done; }
+ if (PlanStage::ADVANCED != state) {
+ return state;
+ }
- void OplogStart::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- if (_needInit) { return; }
+ WorkingSetMember* member = _workingSet->get(*out);
+ verify(member->hasObj());
+ verify(member->hasLoc());
- if (INVALIDATION_DELETION != type) { return; }
+ if (!_filter->matchesBSON(member->obj.value())) {
+ _done = true;
+ // RecordId is returned in *out.
+ return PlanStage::ADVANCED;
+ } else {
+ _workingSet->free(*out);
+ return PlanStage::NEED_TIME;
+ }
+}
- if (_cs) {
- _cs->invalidate(txn, dl, type);
- }
+bool OplogStart::isEOF() {
+ return _done;
+}
- for (size_t i = 0; i < _subIterators.size(); i++) {
- _subIterators[i]->invalidate(dl);
- }
+void OplogStart::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ if (_needInit) {
+ return;
}
- void OplogStart::saveState() {
- _txn = NULL;
- if (_cs) {
- _cs->saveState();
- }
+ if (INVALIDATION_DELETION != type) {
+ return;
+ }
- for (size_t i = 0; i < _subIterators.size(); i++) {
- _subIterators[i]->savePositioned();
- }
+ if (_cs) {
+ _cs->invalidate(txn, dl, type);
}
- void OplogStart::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- if (_cs) {
- _cs->restoreState(opCtx);
- }
+ for (size_t i = 0; i < _subIterators.size(); i++) {
+ _subIterators[i]->invalidate(dl);
+ }
+}
- for (size_t i = 0; i < _subIterators.size(); i++) {
- if (!_subIterators[i]->restore(opCtx)) {
- _subIterators.erase(_subIterators.begin() + i);
- // need to hit same i on next pass through loop
- i--;
- }
- }
+void OplogStart::saveState() {
+ _txn = NULL;
+ if (_cs) {
+ _cs->saveState();
}
- PlanStageStats* OplogStart::getStats() {
- std::unique_ptr<PlanStageStats> ret(new PlanStageStats(CommonStats(kStageType),
- STAGE_OPLOG_START));
- ret->specific.reset(new CollectionScanStats());
- return ret.release();
+ for (size_t i = 0; i < _subIterators.size(); i++) {
+ _subIterators[i]->savePositioned();
}
+}
- vector<PlanStage*> OplogStart::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
+void OplogStart::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ if (_cs) {
+ _cs->restoreState(opCtx);
}
- int OplogStart::_backwardsScanTime = 5;
+ for (size_t i = 0; i < _subIterators.size(); i++) {
+ if (!_subIterators[i]->restore(opCtx)) {
+ _subIterators.erase(_subIterators.begin() + i);
+ // need to hit same i on next pass through loop
+ i--;
+ }
+ }
+}
+
+PlanStageStats* OplogStart::getStats() {
+ std::unique_ptr<PlanStageStats> ret(
+ new PlanStageStats(CommonStats(kStageType), STAGE_OPLOG_START));
+ ret->specific.reset(new CollectionScanStats());
+ return ret.release();
+}
+
+vector<PlanStage*> OplogStart::getChildren() const {
+ vector<PlanStage*> empty;
+ return empty;
+}
+
+int OplogStart::_backwardsScanTime = 5;
} // namespace mongo
diff --git a/src/mongo/db/exec/oplogstart.h b/src/mongo/db/exec/oplogstart.h
index d7da98f6833..193233a6215 100644
--- a/src/mongo/db/exec/oplogstart.h
+++ b/src/mongo/db/exec/oplogstart.h
@@ -38,104 +38,116 @@
namespace mongo {
- /**
- * OplogStart walks a collection backwards to find the first object in the collection that
- * matches the query. It's used by replication to efficiently find where the oplog should be
- * replayed from.
- *
- * The oplog is always a capped collection. In capped collections, documents are oriented on
- * disk according to insertion order. The oplog inserts documents with increasing timestamps.
- * Queries on the oplog look for entries that are after a certain time. Therefore if we
- * navigate backwards, the last document we encounter that satisfies our query (over the
- * timestamp) is the first document we must scan from to answer the query.
- *
- * Why isn't this a normal reverse table scan, you may ask? We could be correct if we used a
- * normal reverse collection scan. However, that's not fast enough. Since we know all
- * documents are oriented on disk in insertion order, we know all documents in one extent were
- * inserted before documents in a subsequent extent. As such we can skip through entire extents
- * looking only at the first document.
- *
- * Why is this a stage? Because we want to yield, and we want to be notified of RecordId
- * invalidations. :(
- */
- class OplogStart : public PlanStage {
- public:
- // Does not take ownership.
- OplogStart(OperationContext* txn,
- const Collection* collection,
- MatchExpression* filter,
- WorkingSet* ws);
- virtual ~OplogStart();
+/**
+ * OplogStart walks a collection backwards to find the first object in the collection that
+ * matches the query. It's used by replication to efficiently find where the oplog should be
+ * replayed from.
+ *
+ * The oplog is always a capped collection. In capped collections, documents are oriented on
+ * disk according to insertion order. The oplog inserts documents with increasing timestamps.
+ * Queries on the oplog look for entries that are after a certain time. Therefore if we
+ * navigate backwards, the last document we encounter that satisfies our query (over the
+ * timestamp) is the first document we must scan from to answer the query.
+ *
+ * Why isn't this a normal reverse table scan, you may ask? We could be correct if we used a
+ * normal reverse collection scan. However, that's not fast enough. Since we know all
+ * documents are oriented on disk in insertion order, we know all documents in one extent were
+ * inserted before documents in a subsequent extent. As such we can skip through entire extents
+ * looking only at the first document.
+ *
+ * Why is this a stage? Because we want to yield, and we want to be notified of RecordId
+ * invalidations. :(
+ */
+class OplogStart : public PlanStage {
+public:
+ // Does not take ownership.
+ OplogStart(OperationContext* txn,
+ const Collection* collection,
+ MatchExpression* filter,
+ WorkingSet* ws);
+ virtual ~OplogStart();
- virtual StageState work(WorkingSetID* out);
- virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- // Returns empty PlanStageStats object
- virtual PlanStageStats* getStats();
+ // Returns empty PlanStageStats object
+ virtual PlanStageStats* getStats();
- //
- // Exec stats -- do not call these for the oplog start stage.
- //
- virtual const CommonStats* getCommonStats() const { return NULL; }
+ //
+ // Exec stats -- do not call these for the oplog start stage.
+ //
+ virtual const CommonStats* getCommonStats() const {
+ return NULL;
+ }
- virtual const SpecificStats* getSpecificStats() const { return NULL; }
+ virtual const SpecificStats* getSpecificStats() const {
+ return NULL;
+ }
- virtual StageType stageType() const { return STAGE_OPLOG_START; }
+ virtual StageType stageType() const {
+ return STAGE_OPLOG_START;
+ }
- // For testing only.
- void setBackwardsScanTime(int newTime) { _backwardsScanTime = newTime; }
- bool isExtentHopping() { return _extentHopping; }
- bool isBackwardsScanning() { return _backwardsScanning; }
+ // For testing only.
+ void setBackwardsScanTime(int newTime) {
+ _backwardsScanTime = newTime;
+ }
+ bool isExtentHopping() {
+ return _extentHopping;
+ }
+ bool isBackwardsScanning() {
+ return _backwardsScanning;
+ }
- static const char* kStageType;
+ static const char* kStageType;
- private:
- StageState workBackwardsScan(WorkingSetID* out);
+private:
+ StageState workBackwardsScan(WorkingSetID* out);
- void switchToExtentHopping();
+ void switchToExtentHopping();
- StageState workExtentHopping(WorkingSetID* out);
+ StageState workExtentHopping(WorkingSetID* out);
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
- // If we're backwards scanning we just punt to a collscan.
- std::unique_ptr<CollectionScan> _cs;
+ // If we're backwards scanning we just punt to a collscan.
+ std::unique_ptr<CollectionScan> _cs;
- // This is only used for the extent hopping scan.
- std::vector<std::unique_ptr<RecordCursor>> _subIterators;
+ // This is only used for the extent hopping scan.
+ std::vector<std::unique_ptr<RecordCursor>> _subIterators;
- // Have we done our heavy init yet?
- bool _needInit;
+ // Have we done our heavy init yet?
+ bool _needInit;
- // Our first state: going backwards via a collscan.
- bool _backwardsScanning;
+ // Our first state: going backwards via a collscan.
+ bool _backwardsScanning;
- // Our second state: hopping backwards extent by extent.
- bool _extentHopping;
+ // Our second state: hopping backwards extent by extent.
+ bool _extentHopping;
- // Our final state: done.
- bool _done;
+ // Our final state: done.
+ bool _done;
- const Collection* _collection;
+ const Collection* _collection;
- // We only go backwards via a collscan for a few seconds.
- Timer _timer;
+ // We only go backwards via a collscan for a few seconds.
+ Timer _timer;
- // WorkingSet is not owned by us.
- WorkingSet* _workingSet;
+ // WorkingSet is not owned by us.
+ WorkingSet* _workingSet;
- std::string _ns;
+ std::string _ns;
- MatchExpression* _filter;
+ MatchExpression* _filter;
- static int _backwardsScanTime;
- };
+ static int _backwardsScanTime;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp
index a7d370c41fa..2513635db1d 100644
--- a/src/mongo/db/exec/or.cpp
+++ b/src/mongo/db/exec/or.cpp
@@ -35,171 +35,172 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
+using std::unique_ptr;
+using std::vector;
- // static
- const char* OrStage::kStageType = "OR";
+// static
+const char* OrStage::kStageType = "OR";
- OrStage::OrStage(WorkingSet* ws, bool dedup, const MatchExpression* filter)
- : _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup), _commonStats(kStageType) { }
+OrStage::OrStage(WorkingSet* ws, bool dedup, const MatchExpression* filter)
+ : _ws(ws), _filter(filter), _currentChild(0), _dedup(dedup), _commonStats(kStageType) {}
- OrStage::~OrStage() {
- for (size_t i = 0; i < _children.size(); ++i) {
- delete _children[i];
- }
+OrStage::~OrStage() {
+ for (size_t i = 0; i < _children.size(); ++i) {
+ delete _children[i];
}
+}
- void OrStage::addChild(PlanStage* child) { _children.push_back(child); }
-
- bool OrStage::isEOF() { return _currentChild >= _children.size(); }
+void OrStage::addChild(PlanStage* child) {
+ _children.push_back(child);
+}
- PlanStage::StageState OrStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+bool OrStage::isEOF() {
+ return _currentChild >= _children.size();
+}
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+PlanStage::StageState OrStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- if (isEOF()) { return PlanStage::IS_EOF; }
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- WorkingSetID id = WorkingSet::INVALID_ID;
- StageState childStatus = _children[_currentChild]->work(&id);
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
+ }
- if (PlanStage::ADVANCED == childStatus) {
- WorkingSetMember* member = _ws->get(id);
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ StageState childStatus = _children[_currentChild]->work(&id);
- // If we're deduping (and there's something to dedup by)
- if (_dedup && member->hasLoc()) {
- ++_specificStats.dupsTested;
+ if (PlanStage::ADVANCED == childStatus) {
+ WorkingSetMember* member = _ws->get(id);
- // ...and we've seen the RecordId before
- if (_seen.end() != _seen.find(member->loc)) {
- // ...drop it.
- ++_specificStats.dupsDropped;
- _ws->free(id);
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- else {
- // Otherwise, note that we've seen it.
- _seen.insert(member->loc);
- }
- }
+ // If we're deduping (and there's something to dedup by)
+ if (_dedup && member->hasLoc()) {
+ ++_specificStats.dupsTested;
- if (Filter::passes(member, _filter)) {
- // Match! return it.
- *out = id;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
- else {
- // Does not match, try again.
+ // ...and we've seen the RecordId before
+ if (_seen.end() != _seen.find(member->loc)) {
+ // ...drop it.
+ ++_specificStats.dupsDropped;
_ws->free(id);
++_commonStats.needTime;
return PlanStage::NEED_TIME;
+ } else {
+ // Otherwise, note that we've seen it.
+ _seen.insert(member->loc);
}
}
- else if (PlanStage::IS_EOF == childStatus) {
- // Done with _currentChild, move to the next one.
- ++_currentChild;
- // Maybe we're out of children.
- if (isEOF()) {
- return PlanStage::IS_EOF;
- }
- else {
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- }
- else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ if (Filter::passes(member, _filter)) {
+ // Match! return it.
*out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "OR stage failed to read in results from child " << _currentChild;
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- }
- return childStatus;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
+ } else {
+ // Does not match, try again.
+ _ws->free(id);
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
- else if (PlanStage::NEED_TIME == childStatus) {
+ } else if (PlanStage::IS_EOF == childStatus) {
+ // Done with _currentChild, move to the next one.
+ ++_currentChild;
+
+ // Maybe we're out of children.
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
+ } else {
++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
- else if (PlanStage::NEED_YIELD == childStatus) {
- ++_commonStats.needYield;
- *out = id;
+ } else if (PlanStage::FAILURE == childStatus || PlanStage::DEAD == childStatus) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ mongoutils::str::stream ss;
+ ss << "OR stage failed to read in results from child " << _currentChild;
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
}
-
- // NEED_TIME, ERROR, NEED_YIELD, pass them up.
return childStatus;
+ } else if (PlanStage::NEED_TIME == childStatus) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == childStatus) {
+ ++_commonStats.needYield;
+ *out = id;
}
- void OrStage::saveState() {
- ++_commonStats.yields;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->saveState();
- }
- }
+ // NEED_TIME, ERROR, NEED_YIELD, pass them up.
+ return childStatus;
+}
- void OrStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->restoreState(opCtx);
- }
+void OrStage::saveState() {
+ ++_commonStats.yields;
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _children[i]->saveState();
}
+}
- void OrStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
+void OrStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _children[i]->restoreState(opCtx);
+ }
+}
- if (isEOF()) { return; }
+void OrStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
- for (size_t i = 0; i < _children.size(); ++i) {
- _children[i]->invalidate(txn, dl, type);
- }
-
- // If we see DL again it is not the same record as it once was so we still want to
- // return it.
- if (_dedup && INVALIDATION_DELETION == type) {
- unordered_set<RecordId, RecordId::Hasher>::iterator it = _seen.find(dl);
- if (_seen.end() != it) {
- ++_specificStats.locsForgotten;
- _seen.erase(dl);
- }
- }
+ if (isEOF()) {
+ return;
}
- vector<PlanStage*> OrStage::getChildren() const {
- return _children;
+ for (size_t i = 0; i < _children.size(); ++i) {
+ _children[i]->invalidate(txn, dl, type);
}
- PlanStageStats* OrStage::getStats() {
- _commonStats.isEOF = isEOF();
-
- // Add a BSON representation of the filter to the stats tree, if there is one.
- if (NULL != _filter) {
- BSONObjBuilder bob;
- _filter->toBSON(&bob);
- _commonStats.filter = bob.obj();
+ // If we see DL again it is not the same record as it once was so we still want to
+ // return it.
+ if (_dedup && INVALIDATION_DELETION == type) {
+ unordered_set<RecordId, RecordId::Hasher>::iterator it = _seen.find(dl);
+ if (_seen.end() != it) {
+ ++_specificStats.locsForgotten;
+ _seen.erase(dl);
}
+ }
+}
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_OR));
- ret->specific.reset(new OrStats(_specificStats));
- for (size_t i = 0; i < _children.size(); ++i) {
- ret->children.push_back(_children[i]->getStats());
- }
+vector<PlanStage*> OrStage::getChildren() const {
+ return _children;
+}
- return ret.release();
- }
+PlanStageStats* OrStage::getStats() {
+ _commonStats.isEOF = isEOF();
- const CommonStats* OrStage::getCommonStats() const {
- return &_commonStats;
+ // Add a BSON representation of the filter to the stats tree, if there is one.
+ if (NULL != _filter) {
+ BSONObjBuilder bob;
+ _filter->toBSON(&bob);
+ _commonStats.filter = bob.obj();
}
- const SpecificStats* OrStage::getSpecificStats() const {
- return &_specificStats;
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_OR));
+ ret->specific.reset(new OrStats(_specificStats));
+ for (size_t i = 0; i < _children.size(); ++i) {
+ ret->children.push_back(_children[i]->getStats());
}
+ return ret.release();
+}
+
+const CommonStats* OrStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* OrStage::getSpecificStats() const {
+ return &_specificStats;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/exec/or.h b/src/mongo/db/exec/or.h
index 3ea2d55a466..6e9250db9bd 100644
--- a/src/mongo/db/exec/or.h
+++ b/src/mongo/db/exec/or.h
@@ -36,62 +36,64 @@
namespace mongo {
- /**
- * This stage outputs the union of its children. It optionally deduplicates on RecordId.
- *
- * Preconditions: Valid RecordId.
- *
- * If we're deduping, we may fail to dedup any invalidated RecordId properly.
- */
- class OrStage : public PlanStage {
- public:
- OrStage(WorkingSet* ws, bool dedup, const MatchExpression* filter);
- virtual ~OrStage();
+/**
+ * This stage outputs the union of its children. It optionally deduplicates on RecordId.
+ *
+ * Preconditions: Valid RecordId.
+ *
+ * If we're deduping, we may fail to dedup any invalidated RecordId properly.
+ */
+class OrStage : public PlanStage {
+public:
+ OrStage(WorkingSet* ws, bool dedup, const MatchExpression* filter);
+ virtual ~OrStage();
- void addChild(PlanStage* child);
+ void addChild(PlanStage* child);
- virtual bool isEOF();
+ virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
+ virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_OR; }
+ virtual StageType stageType() const {
+ return STAGE_OR;
+ }
- virtual PlanStageStats* getStats();
+ virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- // Not owned by us.
- WorkingSet* _ws;
+private:
+ // Not owned by us.
+ WorkingSet* _ws;
- // The filter is not owned by us.
- const MatchExpression* _filter;
+ // The filter is not owned by us.
+ const MatchExpression* _filter;
- // Owned by us.
- std::vector<PlanStage*> _children;
+ // Owned by us.
+ std::vector<PlanStage*> _children;
- // Which of _children are we calling work(...) on now?
- size_t _currentChild;
+ // Which of _children are we calling work(...) on now?
+ size_t _currentChild;
- // True if we dedup on RecordId, false otherwise.
- bool _dedup;
+ // True if we dedup on RecordId, false otherwise.
+ bool _dedup;
- // Which RecordIds have we returned?
- unordered_set<RecordId, RecordId::Hasher> _seen;
+ // Which RecordIds have we returned?
+ unordered_set<RecordId, RecordId::Hasher> _seen;
- // Stats
- CommonStats _commonStats;
- OrStats _specificStats;
- };
+ // Stats
+ CommonStats _commonStats;
+ OrStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/pipeline_proxy.cpp b/src/mongo/db/exec/pipeline_proxy.cpp
index d7082cacdbc..9ca0fe788b5 100644
--- a/src/mongo/db/exec/pipeline_proxy.cpp
+++ b/src/mongo/db/exec/pipeline_proxy.cpp
@@ -36,107 +36,106 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::shared_ptr;
- using std::vector;
-
- const char* PipelineProxyStage::kStageType = "PIPELINE_PROXY";
-
- PipelineProxyStage::PipelineProxyStage(intrusive_ptr<Pipeline> pipeline,
- const std::shared_ptr<PlanExecutor>& child,
- WorkingSet* ws)
- : _pipeline(pipeline)
- , _includeMetaData(_pipeline->getContext()->inShard) // send metadata to merger
- , _childExec(child)
- , _ws(ws)
- {}
-
- PlanStage::StageState PipelineProxyStage::work(WorkingSetID* out) {
- if (!out) {
- return PlanStage::FAILURE;
- }
-
- if (!_stash.empty()) {
- *out = _ws->allocate();
- WorkingSetMember* member = _ws->get(*out);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), _stash.back());
- _stash.pop_back();
- member->state = WorkingSetMember::OWNED_OBJ;
- return PlanStage::ADVANCED;
- }
-
- if (boost::optional<BSONObj> next = getNextBson()) {
- *out = _ws->allocate();
- WorkingSetMember* member = _ws->get(*out);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), *next);
- member->state = WorkingSetMember::OWNED_OBJ;
- return PlanStage::ADVANCED;
- }
-
- return PlanStage::IS_EOF;
+using boost::intrusive_ptr;
+using std::shared_ptr;
+using std::vector;
+
+const char* PipelineProxyStage::kStageType = "PIPELINE_PROXY";
+
+PipelineProxyStage::PipelineProxyStage(intrusive_ptr<Pipeline> pipeline,
+ const std::shared_ptr<PlanExecutor>& child,
+ WorkingSet* ws)
+ : _pipeline(pipeline),
+ _includeMetaData(_pipeline->getContext()->inShard) // send metadata to merger
+ ,
+ _childExec(child),
+ _ws(ws) {}
+
+PlanStage::StageState PipelineProxyStage::work(WorkingSetID* out) {
+ if (!out) {
+ return PlanStage::FAILURE;
}
- bool PipelineProxyStage::isEOF() {
- if (!_stash.empty())
- return false;
-
- if (boost::optional<BSONObj> next = getNextBson()) {
- _stash.push_back(*next);
- return false;
- }
-
- return true;
+ if (!_stash.empty()) {
+ *out = _ws->allocate();
+ WorkingSetMember* member = _ws->get(*out);
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), _stash.back());
+ _stash.pop_back();
+ member->state = WorkingSetMember::OWNED_OBJ;
+ return PlanStage::ADVANCED;
}
- void PipelineProxyStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- // propagate to child executor if still in use
- if (std::shared_ptr<PlanExecutor> exec = _childExec.lock()) {
- exec->invalidate(txn, dl, type);
- }
+ if (boost::optional<BSONObj> next = getNextBson()) {
+ *out = _ws->allocate();
+ WorkingSetMember* member = _ws->get(*out);
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), *next);
+ member->state = WorkingSetMember::OWNED_OBJ;
+ return PlanStage::ADVANCED;
}
- void PipelineProxyStage::saveState() {
- _pipeline->getContext()->opCtx = NULL;
- }
+ return PlanStage::IS_EOF;
+}
- void PipelineProxyStage::restoreState(OperationContext* opCtx) {
- invariant(_pipeline->getContext()->opCtx == NULL);
- _pipeline->getContext()->opCtx = opCtx;
- }
+bool PipelineProxyStage::isEOF() {
+ if (!_stash.empty())
+ return false;
- void PipelineProxyStage::pushBack(const BSONObj& obj) {
- _stash.push_back(obj);
+ if (boost::optional<BSONObj> next = getNextBson()) {
+ _stash.push_back(*next);
+ return false;
}
- vector<PlanStage*> PipelineProxyStage::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
- }
+ return true;
+}
- PlanStageStats* PipelineProxyStage::getStats() {
- std::unique_ptr<PlanStageStats> ret(new PlanStageStats(CommonStats(kStageType),
- STAGE_PIPELINE_PROXY));
- ret->specific.reset(new CollectionScanStats());
- return ret.release();
+void PipelineProxyStage::invalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
+ // propagate to child executor if still in use
+ if (std::shared_ptr<PlanExecutor> exec = _childExec.lock()) {
+ exec->invalidate(txn, dl, type);
}
-
- boost::optional<BSONObj> PipelineProxyStage::getNextBson() {
- if (boost::optional<Document> next = _pipeline->output()->getNext()) {
- if (_includeMetaData) {
- return next->toBsonWithMetaData();
- }
- else {
- return next->toBson();
- }
+}
+
+void PipelineProxyStage::saveState() {
+ _pipeline->getContext()->opCtx = NULL;
+}
+
+void PipelineProxyStage::restoreState(OperationContext* opCtx) {
+ invariant(_pipeline->getContext()->opCtx == NULL);
+ _pipeline->getContext()->opCtx = opCtx;
+}
+
+void PipelineProxyStage::pushBack(const BSONObj& obj) {
+ _stash.push_back(obj);
+}
+
+vector<PlanStage*> PipelineProxyStage::getChildren() const {
+ vector<PlanStage*> empty;
+ return empty;
+}
+
+PlanStageStats* PipelineProxyStage::getStats() {
+ std::unique_ptr<PlanStageStats> ret(
+ new PlanStageStats(CommonStats(kStageType), STAGE_PIPELINE_PROXY));
+ ret->specific.reset(new CollectionScanStats());
+ return ret.release();
+}
+
+boost::optional<BSONObj> PipelineProxyStage::getNextBson() {
+ if (boost::optional<Document> next = _pipeline->output()->getNext()) {
+ if (_includeMetaData) {
+ return next->toBsonWithMetaData();
+ } else {
+ return next->toBson();
}
-
- return boost::none;
}
- shared_ptr<PlanExecutor> PipelineProxyStage::getChildExecutor() {
- return _childExec.lock();
- }
+ return boost::none;
+}
+
+shared_ptr<PlanExecutor> PipelineProxyStage::getChildExecutor() {
+ return _childExec.lock();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/exec/pipeline_proxy.h b/src/mongo/db/exec/pipeline_proxy.h
index 68a33c0170e..ac501b70191 100644
--- a/src/mongo/db/exec/pipeline_proxy.h
+++ b/src/mongo/db/exec/pipeline_proxy.h
@@ -39,67 +39,73 @@
namespace mongo {
- /**
- * Stage for pulling results out from an aggregation pipeline.
- */
- class PipelineProxyStage : public PlanStage {
- public:
- PipelineProxyStage(boost::intrusive_ptr<Pipeline> pipeline,
- const std::shared_ptr<PlanExecutor>& child,
- WorkingSet* ws);
+/**
+ * Stage for pulling results out from an aggregation pipeline.
+ */
+class PipelineProxyStage : public PlanStage {
+public:
+ PipelineProxyStage(boost::intrusive_ptr<Pipeline> pipeline,
+ const std::shared_ptr<PlanExecutor>& child,
+ WorkingSet* ws);
- virtual PlanStage::StageState work(WorkingSetID* out);
+ virtual PlanStage::StageState work(WorkingSetID* out);
- virtual bool isEOF();
+ virtual bool isEOF();
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- //
- // Manage our OperationContext. We intentionally don't propagate to the child
- // Runner as that is handled by DocumentSourceCursor as it needs to.
- //
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
+ //
+ // Manage our OperationContext. We intentionally don't propagate to the child
+ // Runner as that is handled by DocumentSourceCursor as it needs to.
+ //
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
- /**
- * Make obj the next object returned by getNext().
- */
- void pushBack(const BSONObj& obj);
+ /**
+ * Make obj the next object returned by getNext().
+ */
+ void pushBack(const BSONObj& obj);
- /**
- * Return a shared pointer to the PlanExecutor that feeds the pipeline. The returned
- * pointer may be NULL.
- */
- std::shared_ptr<PlanExecutor> getChildExecutor();
+ /**
+ * Return a shared pointer to the PlanExecutor that feeds the pipeline. The returned
+ * pointer may be NULL.
+ */
+ std::shared_ptr<PlanExecutor> getChildExecutor();
- // Returns empty PlanStageStats object
- virtual PlanStageStats* getStats();
+ // Returns empty PlanStageStats object
+ virtual PlanStageStats* getStats();
- // Not used.
- virtual CommonStats* getCommonStats() const { return NULL; }
+ // Not used.
+ virtual CommonStats* getCommonStats() const {
+ return NULL;
+ }
- // Not used.
- virtual SpecificStats* getSpecificStats() const { return NULL; }
+ // Not used.
+ virtual SpecificStats* getSpecificStats() const {
+ return NULL;
+ }
- // Not used.
- virtual std::vector<PlanStage*> getChildren() const;
+ // Not used.
+ virtual std::vector<PlanStage*> getChildren() const;
- // Not used.
- virtual StageType stageType() const { return STAGE_PIPELINE_PROXY; }
+ // Not used.
+ virtual StageType stageType() const {
+ return STAGE_PIPELINE_PROXY;
+ }
- static const char* kStageType;
+ static const char* kStageType;
- private:
- boost::optional<BSONObj> getNextBson();
+private:
+ boost::optional<BSONObj> getNextBson();
- // Things in the _stash sould be returned before pulling items from _pipeline.
- const boost::intrusive_ptr<Pipeline> _pipeline;
- std::vector<BSONObj> _stash;
- const bool _includeMetaData;
- std::weak_ptr<PlanExecutor> _childExec;
+ // Things in the _stash sould be returned before pulling items from _pipeline.
+ const boost::intrusive_ptr<Pipeline> _pipeline;
+ std::vector<BSONObj> _stash;
+ const bool _includeMetaData;
+ std::weak_ptr<PlanExecutor> _childExec;
- // Not owned by us.
- WorkingSet* _ws;
- };
+ // Not owned by us.
+ WorkingSet* _ws;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h
index 07536817ca6..a096664b01d 100644
--- a/src/mongo/db/exec/plan_stage.h
+++ b/src/mongo/db/exec/plan_stage.h
@@ -34,255 +34,247 @@
namespace mongo {
- class Collection;
- class RecordId;
- class OperationContext;
+class Collection;
+class RecordId;
+class OperationContext;
+
+/**
+ * A PlanStage ("stage") is the basic building block of a "Query Execution Plan." A stage is
+ * the smallest piece of machinery used in executing a compiled query. Stages either access
+ * data (from a collection or an index) to create a stream of results, or transform a stream of
+ * results (e.g. AND, OR, SORT) to create a stream of results.
+ *
+ * Stages have zero or more input streams but only one output stream. Data-accessing stages are
+ * leaves and data-transforming stages have children. Stages can be connected together to form
+ * a tree which is then executed (see plan_executor.h) to solve a query.
+ *
+ * A stage's input and output are each typed. Only stages with compatible types can be
+ * connected.
+ *
+ * All of the stages of a QEP share a WorkingSet (see working_set.h). Data source stages
+ * allocate a slot in the WorkingSet, fill the slot with data, and return the ID of that slot.
+ * Subsequent stages fetch a WorkingSetElement by its ID and operate on the enclosed data.
+ *
+ * Stages do nothing unless work() is called. work() is a request to the stage to consume one
+ * unit of input. Some stages (e.g. AND, SORT) require many calls to work() before generating
+ * output as they must consume many units of input. These stages will inform the caller that
+ * they need more time, and work() must be called again in order to produce an output.
+ *
+ * Every stage of a query implements the PlanStage interface. Queries perform a unit of work
+ * and report on their subsequent status; see StatusCode for possible states. Query results are
+ * passed through the WorkingSet interface; see working_set.h for details.
+ *
+ * All synchronization is the responsibility of the caller. Queries must be told to yield with
+ * saveState() if any underlying database state changes. If saveState() is called,
+ * restoreState() must be called again before any work() is done.
+ *
+ * Here is a very simple usage example:
+ *
+ * WorkingSet workingSet;
+ * PlanStage* rootStage = makeQueryPlan(&workingSet, ...);
+ * while (!rootStage->isEOF()) {
+ * WorkingSetID result;
+ * switch(rootStage->work(&result)) {
+ * case PlanStage::ADVANCED:
+ * // do something with result
+ * WorkingSetMember* member = workingSet.get(result);
+ * cout << "Result: " << member->obj << std::endl;
+ * break;
+ * case PlanStage::IS_EOF:
+ * // All done. Will fall out of while loop.
+ * break;
+ * case PlanStage::NEED_TIME:
+ * // Need more time.
+ * break;
+ * case PlanStage::FAILURE:
+ * // Throw exception or return error
+ * break;
+ * }
+ *
+ * if (shouldYield) {
+ * // Occasionally yield.
+ * stage->saveState();
+ * // Do work that requires a yield here (execute other plans, insert, delete, etc.).
+ * stage->restoreState();
+ * }
+ * }
+ */
+class PlanStage {
+public:
+ virtual ~PlanStage() {}
/**
- * A PlanStage ("stage") is the basic building block of a "Query Execution Plan." A stage is
- * the smallest piece of machinery used in executing a compiled query. Stages either access
- * data (from a collection or an index) to create a stream of results, or transform a stream of
- * results (e.g. AND, OR, SORT) to create a stream of results.
- *
- * Stages have zero or more input streams but only one output stream. Data-accessing stages are
- * leaves and data-transforming stages have children. Stages can be connected together to form
- * a tree which is then executed (see plan_executor.h) to solve a query.
- *
- * A stage's input and output are each typed. Only stages with compatible types can be
- * connected.
- *
- * All of the stages of a QEP share a WorkingSet (see working_set.h). Data source stages
- * allocate a slot in the WorkingSet, fill the slot with data, and return the ID of that slot.
- * Subsequent stages fetch a WorkingSetElement by its ID and operate on the enclosed data.
- *
- * Stages do nothing unless work() is called. work() is a request to the stage to consume one
- * unit of input. Some stages (e.g. AND, SORT) require many calls to work() before generating
- * output as they must consume many units of input. These stages will inform the caller that
- * they need more time, and work() must be called again in order to produce an output.
- *
- * Every stage of a query implements the PlanStage interface. Queries perform a unit of work
- * and report on their subsequent status; see StatusCode for possible states. Query results are
- * passed through the WorkingSet interface; see working_set.h for details.
- *
- * All synchronization is the responsibility of the caller. Queries must be told to yield with
- * saveState() if any underlying database state changes. If saveState() is called,
- * restoreState() must be called again before any work() is done.
- *
- * Here is a very simple usage example:
- *
- * WorkingSet workingSet;
- * PlanStage* rootStage = makeQueryPlan(&workingSet, ...);
- * while (!rootStage->isEOF()) {
- * WorkingSetID result;
- * switch(rootStage->work(&result)) {
- * case PlanStage::ADVANCED:
- * // do something with result
- * WorkingSetMember* member = workingSet.get(result);
- * cout << "Result: " << member->obj << std::endl;
- * break;
- * case PlanStage::IS_EOF:
- * // All done. Will fall out of while loop.
- * break;
- * case PlanStage::NEED_TIME:
- * // Need more time.
- * break;
- * case PlanStage::FAILURE:
- * // Throw exception or return error
- * break;
- * }
- *
- * if (shouldYield) {
- * // Occasionally yield.
- * stage->saveState();
- * // Do work that requires a yield here (execute other plans, insert, delete, etc.).
- * stage->restoreState();
- * }
- * }
+ * All possible return values of work(...)
*/
- class PlanStage {
- public:
- virtual ~PlanStage() { }
+ enum StageState {
+ // work(...) has returned a new result in its out parameter. The caller must free it
+ // from the working set when done with it.
+ ADVANCED,
- /**
- * All possible return values of work(...)
- */
- enum StageState {
- // work(...) has returned a new result in its out parameter. The caller must free it
- // from the working set when done with it.
- ADVANCED,
+ // work(...) won't do anything more. isEOF() will also be true. There is nothing
+ // output in the out parameter.
+ IS_EOF,
- // work(...) won't do anything more. isEOF() will also be true. There is nothing
- // output in the out parameter.
- IS_EOF,
+ // work(...) needs more time to product a result. Call work(...) again. There is
+ // nothing output in the out parameter.
+ NEED_TIME,
- // work(...) needs more time to product a result. Call work(...) again. There is
- // nothing output in the out parameter.
- NEED_TIME,
-
- // The storage engine says we need to yield, possibly to fetch a record from disk, or
- // due to an aborted transaction in the storage layer.
- //
- // Full yield request semantics:
- //
- // Each stage that receives a NEED_YIELD from a child must propagate the NEED_YIELD up
- // and perform no work.
- //
- // If a yield is requested due to a WriteConflict, the out parameter of work(...) should
- // be populated with WorkingSet::INVALID_ID. If it is illegal to yield, a
- // WriteConflictException will be thrown.
- //
- // A yield-requesting stage populates the out parameter of work(...) with a WSID that
- // refers to a WSM with a Fetcher*. If it is illegal to yield, this is ignored. This
- // difference in behavior can be removed once SERVER-16051 is resolved.
- //
- // The plan executor is responsible for yielding and, if requested, paging in the data
- // upon receipt of a NEED_YIELD. The plan executor does NOT free the WSID of the
- // requested fetch. The stage that requested the fetch holds the WSID of the loc it
- // wants fetched. On the next call to work() that stage can assume a fetch was performed
- // on the WSM that the held WSID refers to.
- NEED_YIELD,
+ // The storage engine says we need to yield, possibly to fetch a record from disk, or
+ // due to an aborted transaction in the storage layer.
+ //
+ // Full yield request semantics:
+ //
+ // Each stage that receives a NEED_YIELD from a child must propagate the NEED_YIELD up
+ // and perform no work.
+ //
+ // If a yield is requested due to a WriteConflict, the out parameter of work(...) should
+ // be populated with WorkingSet::INVALID_ID. If it is illegal to yield, a
+ // WriteConflictException will be thrown.
+ //
+ // A yield-requesting stage populates the out parameter of work(...) with a WSID that
+ // refers to a WSM with a Fetcher*. If it is illegal to yield, this is ignored. This
+ // difference in behavior can be removed once SERVER-16051 is resolved.
+ //
+ // The plan executor is responsible for yielding and, if requested, paging in the data
+ // upon receipt of a NEED_YIELD. The plan executor does NOT free the WSID of the
+ // requested fetch. The stage that requested the fetch holds the WSID of the loc it
+ // wants fetched. On the next call to work() that stage can assume a fetch was performed
+ // on the WSM that the held WSID refers to.
+ NEED_YIELD,
- // Something went wrong but it's not an internal error. Perhaps our collection was
- // dropped or state deleted.
- DEAD,
+ // Something went wrong but it's not an internal error. Perhaps our collection was
+ // dropped or state deleted.
+ DEAD,
- // Something has gone unrecoverably wrong. Stop running this query.
- // If the out parameter does not refer to an invalid working set member,
- // call WorkingSetCommon::getStatusMemberObject() to get details on the failure.
- // Any class implementing this interface must set the WSID out parameter to
- // INVALID_ID or a valid WSM ID if FAILURE is returned.
- FAILURE,
- };
+ // Something has gone unrecoverably wrong. Stop running this query.
+ // If the out parameter does not refer to an invalid working set member,
+ // call WorkingSetCommon::getStatusMemberObject() to get details on the failure.
+ // Any class implementing this interface must set the WSID out parameter to
+ // INVALID_ID or a valid WSM ID if FAILURE is returned.
+ FAILURE,
+ };
- static std::string stateStr(const StageState& state) {
- if (ADVANCED == state) {
- return "ADVANCED";
- }
- else if (IS_EOF == state) {
- return "IS_EOF";
- }
- else if (NEED_TIME == state) {
- return "NEED_TIME";
- }
- else if (NEED_YIELD == state) {
- return "NEED_YIELD";
- }
- else if (DEAD == state) {
- return "DEAD";
- }
- else {
- verify(FAILURE == state);
- return "FAILURE";
- }
+ static std::string stateStr(const StageState& state) {
+ if (ADVANCED == state) {
+ return "ADVANCED";
+ } else if (IS_EOF == state) {
+ return "IS_EOF";
+ } else if (NEED_TIME == state) {
+ return "NEED_TIME";
+ } else if (NEED_YIELD == state) {
+ return "NEED_YIELD";
+ } else if (DEAD == state) {
+ return "DEAD";
+ } else {
+ verify(FAILURE == state);
+ return "FAILURE";
}
+ }
- /**
- * Perform a unit of work on the query. Ask the stage to produce the next unit of output.
- * Stage returns StageState::ADVANCED if *out is set to the next unit of output. Otherwise,
- * returns another value of StageState to indicate the stage's status.
- */
- virtual StageState work(WorkingSetID* out) = 0;
-
- /**
- * Returns true if no more work can be done on the query / out of results.
- */
- virtual bool isEOF() = 0;
+ /**
+ * Perform a unit of work on the query. Ask the stage to produce the next unit of output.
+ * Stage returns StageState::ADVANCED if *out is set to the next unit of output. Otherwise,
+ * returns another value of StageState to indicate the stage's status.
+ */
+ virtual StageState work(WorkingSetID* out) = 0;
- //
- // Yielding and isolation semantics:
- //
- // Any data that is not inserted, deleted, or modified during a yield will be faithfully
- // returned by a query that should return that data.
- //
- // Any data inserted, deleted, or modified during a yield that should be returned by a query
- // may or may not be returned by that query. The query could return: nothing; the data
- // before; the data after; or both the data before and the data after.
- //
- // In short, there is no isolation between a query and an insert/delete/update. AKA,
- // READ_UNCOMMITTED.
- //
+ /**
+ * Returns true if no more work can be done on the query / out of results.
+ */
+ virtual bool isEOF() = 0;
- /**
- * Notifies the stage that all locks are about to be released. The stage must save any
- * state required to resume where it was before saveState was called.
- *
- * Stages must be able to handle multiple calls to saveState() in a row without a call to
- * restoreState() in between.
- */
- virtual void saveState() = 0;
+ //
+ // Yielding and isolation semantics:
+ //
+ // Any data that is not inserted, deleted, or modified during a yield will be faithfully
+ // returned by a query that should return that data.
+ //
+ // Any data inserted, deleted, or modified during a yield that should be returned by a query
+ // may or may not be returned by that query. The query could return: nothing; the data
+ // before; the data after; or both the data before and the data after.
+ //
+ // In short, there is no isolation between a query and an insert/delete/update. AKA,
+ // READ_UNCOMMITTED.
+ //
- /**
- * Notifies the stage that any required locks have been reacquired. The stage must restore
- * any saved state and be ready to handle calls to work().
- *
- * Can only be called after saveState.
- *
- * If the stage needs an OperationContext during its execution, it may keep a handle to the
- * provided OperationContext (which is valid until the next call to saveState()).
- */
- virtual void restoreState(OperationContext* opCtx) = 0;
+ /**
+ * Notifies the stage that all locks are about to be released. The stage must save any
+ * state required to resume where it was before saveState was called.
+ *
+ * Stages must be able to handle multiple calls to saveState() in a row without a call to
+ * restoreState() in between.
+ */
+ virtual void saveState() = 0;
- /**
- * Notifies a stage that a RecordId is going to be deleted (or in-place updated) so that the
- * stage can invalidate or modify any state required to continue processing without this
- * RecordId.
- *
- * Can only be called after a saveState but before a restoreState.
- *
- * The provided OperationContext should be used if any work needs to be performed during the
- * invalidate (as the state of the stage must be saved before any calls to invalidate, the
- * stage's own OperationContext is inactive during the invalidate and should not be used).
- */
- virtual void invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) = 0;
+ /**
+ * Notifies the stage that any required locks have been reacquired. The stage must restore
+ * any saved state and be ready to handle calls to work().
+ *
+ * Can only be called after saveState.
+ *
+ * If the stage needs an OperationContext during its execution, it may keep a handle to the
+ * provided OperationContext (which is valid until the next call to saveState()).
+ */
+ virtual void restoreState(OperationContext* opCtx) = 0;
- /**
- * Retrieve a list of this stage's children. This stage keeps ownership of
- * its children.
- */
- virtual std::vector<PlanStage*> getChildren() const = 0;
+ /**
+ * Notifies a stage that a RecordId is going to be deleted (or in-place updated) so that the
+ * stage can invalidate or modify any state required to continue processing without this
+ * RecordId.
+ *
+ * Can only be called after a saveState but before a restoreState.
+ *
+ * The provided OperationContext should be used if any work needs to be performed during the
+ * invalidate (as the state of the stage must be saved before any calls to invalidate, the
+ * stage's own OperationContext is inactive during the invalidate and should not be used).
+ */
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) = 0;
- /**
- * What type of stage is this?
- */
- virtual StageType stageType() const = 0;
+ /**
+ * Retrieve a list of this stage's children. This stage keeps ownership of
+ * its children.
+ */
+ virtual std::vector<PlanStage*> getChildren() const = 0;
- //
- // Execution stats.
- //
+ /**
+ * What type of stage is this?
+ */
+ virtual StageType stageType() const = 0;
- /**
- * Returns a tree of stats. See plan_stats.h for the details of this structure. If the
- * stage has any children it must propagate the request for stats to them.
- *
- * Creates plan stats tree which has the same topology as the original execution tree,
- * but has a separate lifetime.
- *
- * Caller owns returned pointer.
- */
- virtual PlanStageStats* getStats() = 0;
+ //
+ // Execution stats.
+ //
- /**
- * Get the CommonStats for this stage. The pointer is *not* owned by the caller.
- *
- * The returned pointer is only valid when the corresponding stage is also valid.
- * It must not exist past the stage. If you need the stats to outlive the stage,
- * use the getStats(...) method above.
- */
- virtual const CommonStats* getCommonStats() const = 0;
+ /**
+ * Returns a tree of stats. See plan_stats.h for the details of this structure. If the
+ * stage has any children it must propagate the request for stats to them.
+ *
+ * Creates plan stats tree which has the same topology as the original execution tree,
+ * but has a separate lifetime.
+ *
+ * Caller owns returned pointer.
+ */
+ virtual PlanStageStats* getStats() = 0;
- /**
- * Get stats specific to this stage. Some stages may not have specific stats, in which
- * case they return NULL. The pointer is *not* owned by the caller.
- *
- * The returned pointer is only valid when the corresponding stage is also valid.
- * It must not exist past the stage. If you need the stats to outlive the stage,
- * use the getStats(...) method above.
- */
- virtual const SpecificStats* getSpecificStats() const = 0;
+ /**
+ * Get the CommonStats for this stage. The pointer is *not* owned by the caller.
+ *
+ * The returned pointer is only valid when the corresponding stage is also valid.
+ * It must not exist past the stage. If you need the stats to outlive the stage,
+ * use the getStats(...) method above.
+ */
+ virtual const CommonStats* getCommonStats() const = 0;
- };
+ /**
+ * Get stats specific to this stage. Some stages may not have specific stats, in which
+ * case they return NULL. The pointer is *not* owned by the caller.
+ *
+ * The returned pointer is only valid when the corresponding stage is also valid.
+ * It must not exist past the stage. If you need the stats to outlive the stage,
+ * use the getStats(...) method above.
+ */
+ virtual const SpecificStats* getSpecificStats() const = 0;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/plan_stats.cpp b/src/mongo/db/exec/plan_stats.cpp
index bf079a7113a..0eb3c69b6f2 100644
--- a/src/mongo/db/exec/plan_stats.cpp
+++ b/src/mongo/db/exec/plan_stats.cpp
@@ -31,19 +31,19 @@
namespace mongo {
- void CommonStats::writeExplainTo(BSONObjBuilder* bob) const {
- if (NULL == bob) {
- return;
- }
- // potential overflow because original counters are unsigned 64-bit values
- bob->append("works", static_cast<long long>(works));
- bob->append("advanced", static_cast<long long>(advanced));
+void CommonStats::writeExplainTo(BSONObjBuilder* bob) const {
+ if (NULL == bob) {
+ return;
}
+ // potential overflow because original counters are unsigned 64-bit values
+ bob->append("works", static_cast<long long>(works));
+ bob->append("advanced", static_cast<long long>(advanced));
+}
- // forward to CommonStats for now
- // TODO: fill in specific stats
- void PlanStageStats::writeExplainTo(BSONObjBuilder* bob) const {
- common.writeExplainTo(bob);
- }
+// forward to CommonStats for now
+// TODO: fill in specific stats
+void PlanStageStats::writeExplainTo(BSONObjBuilder* bob) const {
+ common.writeExplainTo(bob);
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h
index 3504355f071..c3b514260a3 100644
--- a/src/mongo/db/exec/plan_stats.h
+++ b/src/mongo/db/exec/plan_stats.h
@@ -37,626 +37,614 @@
#include "mongo/db/query/stage_types.h"
#include "mongo/platform/cstdint.h"
#include "mongo/util/time_support.h"
-#include "mongo/util/net/listen.h" // for Listener::getElapsedTimeMillis()
+#include "mongo/util/net/listen.h" // for Listener::getElapsedTimeMillis()
namespace mongo {
+/**
+ * The interface all specific-to-stage stats provide.
+ */
+struct SpecificStats {
+ virtual ~SpecificStats() {}
+
/**
- * The interface all specific-to-stage stats provide.
+ * Make a deep copy.
*/
- struct SpecificStats {
- virtual ~SpecificStats() { }
-
- /**
- * Make a deep copy.
- */
- virtual SpecificStats* clone() const = 0;
- };
-
- // Every stage has CommonStats.
- struct CommonStats {
- CommonStats(const char* type)
- : stageTypeStr(type),
- works(0),
- yields(0),
- unyields(0),
- invalidates(0),
- advanced(0),
- needTime(0),
- needYield(0),
- executionTimeMillis(0),
- isEOF(false) { }
- // String giving the type of the stage. Not owned.
- const char* stageTypeStr;
-
- // Count calls into the stage.
- size_t works;
- size_t yields;
- size_t unyields;
- size_t invalidates;
-
- // How many times was this state the return value of work(...)?
- size_t advanced;
- size_t needTime;
- size_t needYield;
-
- // BSON representation of a MatchExpression affixed to this node. If there
- // is no filter affixed, then 'filter' should be an empty BSONObj.
- BSONObj filter;
-
- // Time elapsed while working inside this stage.
- long long executionTimeMillis;
-
- // TODO: have some way of tracking WSM sizes (or really any series of #s). We can measure
- // the size of our inputs and the size of our outputs. We can do a lot with the WS here.
-
- // TODO: once we've picked a plan, collect different (or additional) stats for display to
- // the user, eg. time_t totalTimeSpent;
-
- // TODO: keep track of the total yield time / fetch time done for a plan.
-
- bool isEOF;
- private:
- // Default constructor is illegal.
- CommonStats();
- };
-
- // The universal container for a stage's stats.
- struct PlanStageStats {
- PlanStageStats(const CommonStats& c, StageType t) : stageType(t), common(c) { }
-
- ~PlanStageStats() {
- for (size_t i = 0; i < children.size(); ++i) {
- delete children[i];
- }
+ virtual SpecificStats* clone() const = 0;
+};
+
+// Every stage has CommonStats.
+struct CommonStats {
+ CommonStats(const char* type)
+ : stageTypeStr(type),
+ works(0),
+ yields(0),
+ unyields(0),
+ invalidates(0),
+ advanced(0),
+ needTime(0),
+ needYield(0),
+ executionTimeMillis(0),
+ isEOF(false) {}
+ // String giving the type of the stage. Not owned.
+ const char* stageTypeStr;
+
+ // Count calls into the stage.
+ size_t works;
+ size_t yields;
+ size_t unyields;
+ size_t invalidates;
+
+ // How many times was this state the return value of work(...)?
+ size_t advanced;
+ size_t needTime;
+ size_t needYield;
+
+ // BSON representation of a MatchExpression affixed to this node. If there
+ // is no filter affixed, then 'filter' should be an empty BSONObj.
+ BSONObj filter;
+
+ // Time elapsed while working inside this stage.
+ long long executionTimeMillis;
+
+ // TODO: have some way of tracking WSM sizes (or really any series of #s). We can measure
+ // the size of our inputs and the size of our outputs. We can do a lot with the WS here.
+
+ // TODO: once we've picked a plan, collect different (or additional) stats for display to
+ // the user, eg. time_t totalTimeSpent;
+
+ // TODO: keep track of the total yield time / fetch time done for a plan.
+
+ bool isEOF;
+
+private:
+ // Default constructor is illegal.
+ CommonStats();
+};
+
+// The universal container for a stage's stats.
+struct PlanStageStats {
+ PlanStageStats(const CommonStats& c, StageType t) : stageType(t), common(c) {}
+
+ ~PlanStageStats() {
+ for (size_t i = 0; i < children.size(); ++i) {
+ delete children[i];
}
+ }
- /**
- * Make a deep copy.
- */
- PlanStageStats* clone() const {
- PlanStageStats* stats = new PlanStageStats(common, stageType);
- if (specific.get()) {
- stats->specific.reset(specific->clone());
- }
- for (size_t i = 0; i < children.size(); ++i) {
- invariant(children[i]);
- stats->children.push_back(children[i]->clone());
- }
- return stats;
+ /**
+ * Make a deep copy.
+ */
+ PlanStageStats* clone() const {
+ PlanStageStats* stats = new PlanStageStats(common, stageType);
+ if (specific.get()) {
+ stats->specific.reset(specific->clone());
}
+ for (size_t i = 0; i < children.size(); ++i) {
+ invariant(children[i]);
+ stats->children.push_back(children[i]->clone());
+ }
+ return stats;
+ }
- // See query/stage_type.h
- StageType stageType;
+ // See query/stage_type.h
+ StageType stageType;
- // Stats exported by implementing the PlanStage interface.
- CommonStats common;
+ // Stats exported by implementing the PlanStage interface.
+ CommonStats common;
- // Per-stage place to stash additional information
- std::unique_ptr<SpecificStats> specific;
+ // Per-stage place to stash additional information
+ std::unique_ptr<SpecificStats> specific;
- // The stats of the node's children.
- std::vector<PlanStageStats*> children;
+ // The stats of the node's children.
+ std::vector<PlanStageStats*> children;
- private:
- MONGO_DISALLOW_COPYING(PlanStageStats);
- };
+private:
+ MONGO_DISALLOW_COPYING(PlanStageStats);
+};
- struct AndHashStats : public SpecificStats {
- AndHashStats() : flaggedButPassed(0),
- flaggedInProgress(0),
- memUsage(0),
- memLimit(0) { }
+struct AndHashStats : public SpecificStats {
+ AndHashStats() : flaggedButPassed(0), flaggedInProgress(0), memUsage(0), memLimit(0) {}
- virtual ~AndHashStats() { }
+ virtual ~AndHashStats() {}
- virtual SpecificStats* clone() const {
- AndHashStats* specific = new AndHashStats(*this);
- return specific;
- }
+ virtual SpecificStats* clone() const {
+ AndHashStats* specific = new AndHashStats(*this);
+ return specific;
+ }
- // Invalidation counters.
- // How many results had the AND fully evaluated but were invalidated?
- size_t flaggedButPassed;
+ // Invalidation counters.
+ // How many results had the AND fully evaluated but were invalidated?
+ size_t flaggedButPassed;
- // How many results were mid-AND but got flagged?
- size_t flaggedInProgress;
+ // How many results were mid-AND but got flagged?
+ size_t flaggedInProgress;
- // How many entries are in the map after each child?
- // child 'i' produced children[i].common.advanced RecordIds, of which mapAfterChild[i] were
- // intersections.
- std::vector<size_t> mapAfterChild;
+ // How many entries are in the map after each child?
+ // child 'i' produced children[i].common.advanced RecordIds, of which mapAfterChild[i] were
+ // intersections.
+ std::vector<size_t> mapAfterChild;
- // mapAfterChild[mapAfterChild.size() - 1] WSMswere match tested.
- // commonstats.advanced is how many passed.
+ // mapAfterChild[mapAfterChild.size() - 1] WSMswere match tested.
+ // commonstats.advanced is how many passed.
- // What's our current memory usage?
- size_t memUsage;
+ // What's our current memory usage?
+ size_t memUsage;
- // What's our memory limit?
- size_t memLimit;
- };
+ // What's our memory limit?
+ size_t memLimit;
+};
- struct AndSortedStats : public SpecificStats {
- AndSortedStats() : flagged(0) { }
+struct AndSortedStats : public SpecificStats {
+ AndSortedStats() : flagged(0) {}
- virtual ~AndSortedStats() { }
+ virtual ~AndSortedStats() {}
- virtual SpecificStats* clone() const {
- AndSortedStats* specific = new AndSortedStats(*this);
- return specific;
- }
+ virtual SpecificStats* clone() const {
+ AndSortedStats* specific = new AndSortedStats(*this);
+ return specific;
+ }
- // How many results from each child did not pass the AND?
- std::vector<size_t> failedAnd;
+ // How many results from each child did not pass the AND?
+ std::vector<size_t> failedAnd;
- // How many results were flagged via invalidation?
- size_t flagged;
- };
+ // How many results were flagged via invalidation?
+ size_t flagged;
+};
- struct CachedPlanStats : public SpecificStats {
- CachedPlanStats() { }
+struct CachedPlanStats : public SpecificStats {
+ CachedPlanStats() {}
- virtual SpecificStats* clone() const {
- return new CachedPlanStats(*this);
- }
- };
+ virtual SpecificStats* clone() const {
+ return new CachedPlanStats(*this);
+ }
+};
- struct CollectionScanStats : public SpecificStats {
- CollectionScanStats() : docsTested(0), direction(1) { }
+struct CollectionScanStats : public SpecificStats {
+ CollectionScanStats() : docsTested(0), direction(1) {}
- virtual SpecificStats* clone() const {
- CollectionScanStats* specific = new CollectionScanStats(*this);
- return specific;
- }
+ virtual SpecificStats* clone() const {
+ CollectionScanStats* specific = new CollectionScanStats(*this);
+ return specific;
+ }
- // How many documents did we check against our filter?
- size_t docsTested;
+ // How many documents did we check against our filter?
+ size_t docsTested;
- // >0 if we're traversing the collection forwards. <0 if we're traversing it
- // backwards.
- int direction;
- };
+ // >0 if we're traversing the collection forwards. <0 if we're traversing it
+ // backwards.
+ int direction;
+};
- struct CountStats : public SpecificStats {
- CountStats() : nCounted(0), nSkipped(0), trivialCount(false) { }
+struct CountStats : public SpecificStats {
+ CountStats() : nCounted(0), nSkipped(0), trivialCount(false) {}
- virtual SpecificStats* clone() const {
- CountStats* specific = new CountStats(*this);
- return specific;
- }
+ virtual SpecificStats* clone() const {
+ CountStats* specific = new CountStats(*this);
+ return specific;
+ }
- // The result of the count.
- long long nCounted;
+ // The result of the count.
+ long long nCounted;
- // The number of results we skipped over.
- long long nSkipped;
+ // The number of results we skipped over.
+ long long nSkipped;
- // A "trivial count" is one that we can answer by calling numRecords() on the
- // collection, without actually going through any query logic.
- bool trivialCount;
- };
+ // A "trivial count" is one that we can answer by calling numRecords() on the
+ // collection, without actually going through any query logic.
+ bool trivialCount;
+};
- struct CountScanStats : public SpecificStats {
- CountScanStats() : indexVersion(0),
- isMultiKey(false),
- isPartial(false),
- isSparse(false),
- isUnique(false),
- keysExamined(0) { }
+struct CountScanStats : public SpecificStats {
+ CountScanStats()
+ : indexVersion(0),
+ isMultiKey(false),
+ isPartial(false),
+ isSparse(false),
+ isUnique(false),
+ keysExamined(0) {}
- virtual ~CountScanStats() { }
+ virtual ~CountScanStats() {}
- virtual SpecificStats* clone() const {
- CountScanStats* specific = new CountScanStats(*this);
- // BSON objects have to be explicitly copied.
- specific->keyPattern = keyPattern.getOwned();
- return specific;
- }
+ virtual SpecificStats* clone() const {
+ CountScanStats* specific = new CountScanStats(*this);
+ // BSON objects have to be explicitly copied.
+ specific->keyPattern = keyPattern.getOwned();
+ return specific;
+ }
- std::string indexName;
+ std::string indexName;
- BSONObj keyPattern;
+ BSONObj keyPattern;
- int indexVersion;
+ int indexVersion;
- bool isMultiKey;
- bool isPartial;
- bool isSparse;
- bool isUnique;
+ bool isMultiKey;
+ bool isPartial;
+ bool isSparse;
+ bool isUnique;
- size_t keysExamined;
+ size_t keysExamined;
+};
- };
+struct DeleteStats : public SpecificStats {
+ DeleteStats() : docsDeleted(0), nInvalidateSkips(0) {}
- struct DeleteStats : public SpecificStats {
- DeleteStats() : docsDeleted(0), nInvalidateSkips(0) { }
+ virtual SpecificStats* clone() const {
+ return new DeleteStats(*this);
+ }
- virtual SpecificStats* clone() const {
- return new DeleteStats(*this);
- }
+ size_t docsDeleted;
- size_t docsDeleted;
+ // Invalidated documents can be force-fetched, causing the now invalid RecordId to
+ // be thrown out. The delete stage skips over any results which do not have a RecordId.
+ size_t nInvalidateSkips;
+};
- // Invalidated documents can be force-fetched, causing the now invalid RecordId to
- // be thrown out. The delete stage skips over any results which do not have a RecordId.
- size_t nInvalidateSkips;
- };
+struct DistinctScanStats : public SpecificStats {
+ DistinctScanStats() : keysExamined(0), indexVersion(0) {}
- struct DistinctScanStats : public SpecificStats {
- DistinctScanStats() : keysExamined(0), indexVersion(0) { }
+ virtual SpecificStats* clone() const {
+ DistinctScanStats* specific = new DistinctScanStats(*this);
+ specific->keyPattern = keyPattern.getOwned();
+ return specific;
+ }
- virtual SpecificStats* clone() const {
- DistinctScanStats* specific = new DistinctScanStats(*this);
- specific->keyPattern = keyPattern.getOwned();
- return specific;
- }
+ // How many keys did we look at while distinct-ing?
+ size_t keysExamined;
- // How many keys did we look at while distinct-ing?
- size_t keysExamined;
+ std::string indexName;
- std::string indexName;
+ BSONObj keyPattern;
- BSONObj keyPattern;
+ int indexVersion;
+};
- int indexVersion;
- };
+struct FetchStats : public SpecificStats {
+ FetchStats() : alreadyHasObj(0), forcedFetches(0), docsExamined(0) {}
- struct FetchStats : public SpecificStats {
- FetchStats() : alreadyHasObj(0),
- forcedFetches(0),
- docsExamined(0) { }
+ virtual ~FetchStats() {}
- virtual ~FetchStats() { }
+ virtual SpecificStats* clone() const {
+ FetchStats* specific = new FetchStats(*this);
+ return specific;
+ }
- virtual SpecificStats* clone() const {
- FetchStats* specific = new FetchStats(*this);
- return specific;
- }
+ // Have we seen anything that already had an object?
+ size_t alreadyHasObj;
- // Have we seen anything that already had an object?
- size_t alreadyHasObj;
+ // How many records were we forced to fetch as the result of an invalidation?
+ size_t forcedFetches;
- // How many records were we forced to fetch as the result of an invalidation?
- size_t forcedFetches;
+ // The total number of full documents touched by the fetch stage.
+ size_t docsExamined;
+};
- // The total number of full documents touched by the fetch stage.
- size_t docsExamined;
- };
+struct GroupStats : public SpecificStats {
+ GroupStats() : nGroups(0) {}
- struct GroupStats : public SpecificStats {
- GroupStats() : nGroups(0) { }
+ virtual ~GroupStats() {}
- virtual ~GroupStats() { }
+ virtual SpecificStats* clone() const {
+ GroupStats* specific = new GroupStats(*this);
+ return specific;
+ }
- virtual SpecificStats* clone() const {
- GroupStats* specific = new GroupStats(*this);
- return specific;
- }
+ // The total number of groups.
+ size_t nGroups;
+};
- // The total number of groups.
- size_t nGroups;
- };
+struct IDHackStats : public SpecificStats {
+ IDHackStats() : keysExamined(0), docsExamined(0) {}
- struct IDHackStats : public SpecificStats {
- IDHackStats() : keysExamined(0),
- docsExamined(0) { }
+ virtual ~IDHackStats() {}
- virtual ~IDHackStats() { }
+ virtual SpecificStats* clone() const {
+ IDHackStats* specific = new IDHackStats(*this);
+ return specific;
+ }
- virtual SpecificStats* clone() const {
- IDHackStats* specific = new IDHackStats(*this);
- return specific;
- }
+ // Number of entries retrieved from the index while executing the idhack.
+ size_t keysExamined;
- // Number of entries retrieved from the index while executing the idhack.
- size_t keysExamined;
-
- // Number of documents retrieved from the collection while executing the idhack.
- size_t docsExamined;
-
- };
-
- struct IndexScanStats : public SpecificStats {
- IndexScanStats() : indexVersion(0),
- direction(1),
- isMultiKey(false),
- isPartial(false),
- isSparse(false),
- isUnique(false),
- dupsTested(0),
- dupsDropped(0),
- seenInvalidated(0),
- keysExamined(0) { }
-
- virtual ~IndexScanStats() { }
-
- virtual SpecificStats* clone() const {
- IndexScanStats* specific = new IndexScanStats(*this);
- // BSON objects have to be explicitly copied.
- specific->keyPattern = keyPattern.getOwned();
- specific->indexBounds = indexBounds.getOwned();
- return specific;
- }
+ // Number of documents retrieved from the collection while executing the idhack.
+ size_t docsExamined;
+};
- // Index type being used.
- std::string indexType;
+struct IndexScanStats : public SpecificStats {
+ IndexScanStats()
+ : indexVersion(0),
+ direction(1),
+ isMultiKey(false),
+ isPartial(false),
+ isSparse(false),
+ isUnique(false),
+ dupsTested(0),
+ dupsDropped(0),
+ seenInvalidated(0),
+ keysExamined(0) {}
- // name of the index being used
- std::string indexName;
+ virtual ~IndexScanStats() {}
- BSONObj keyPattern;
+ virtual SpecificStats* clone() const {
+ IndexScanStats* specific = new IndexScanStats(*this);
+ // BSON objects have to be explicitly copied.
+ specific->keyPattern = keyPattern.getOwned();
+ specific->indexBounds = indexBounds.getOwned();
+ return specific;
+ }
- int indexVersion;
+ // Index type being used.
+ std::string indexType;
- // A BSON (opaque, ie. hands off other than toString() it) representation of the bounds
- // used.
- BSONObj indexBounds;
+ // name of the index being used
+ std::string indexName;
- // >1 if we're traversing the index along with its order. <1 if we're traversing it
- // against the order.
- int direction;
+ BSONObj keyPattern;
- // index properties
- // Whether this index is over a field that contain array values.
- bool isMultiKey;
- bool isPartial;
- bool isSparse;
- bool isUnique;
+ int indexVersion;
- size_t dupsTested;
- size_t dupsDropped;
+ // A BSON (opaque, ie. hands off other than toString() it) representation of the bounds
+ // used.
+ BSONObj indexBounds;
- size_t seenInvalidated;
- // TODO: we could track key sizes here.
+ // >1 if we're traversing the index along with its order. <1 if we're traversing it
+ // against the order.
+ int direction;
- // Number of entries retrieved from the index during the scan.
- size_t keysExamined;
+ // index properties
+ // Whether this index is over a field that contain array values.
+ bool isMultiKey;
+ bool isPartial;
+ bool isSparse;
+ bool isUnique;
- };
+ size_t dupsTested;
+ size_t dupsDropped;
- struct LimitStats : public SpecificStats {
- LimitStats() : limit(0) { }
+ size_t seenInvalidated;
+ // TODO: we could track key sizes here.
- virtual SpecificStats* clone() const {
- LimitStats* specific = new LimitStats(*this);
- return specific;
- }
+ // Number of entries retrieved from the index during the scan.
+ size_t keysExamined;
+};
- size_t limit;
- };
+struct LimitStats : public SpecificStats {
+ LimitStats() : limit(0) {}
- struct MockStats : public SpecificStats {
- MockStats() { }
+ virtual SpecificStats* clone() const {
+ LimitStats* specific = new LimitStats(*this);
+ return specific;
+ }
- virtual SpecificStats* clone() const {
- return new MockStats(*this);
- }
- };
+ size_t limit;
+};
- struct MultiPlanStats : public SpecificStats {
- MultiPlanStats() { }
+struct MockStats : public SpecificStats {
+ MockStats() {}
- virtual SpecificStats* clone() const {
- return new MultiPlanStats(*this);
- }
- };
+ virtual SpecificStats* clone() const {
+ return new MockStats(*this);
+ }
+};
- struct OrStats : public SpecificStats {
- OrStats() : dupsTested(0),
- dupsDropped(0),
- locsForgotten(0) { }
+struct MultiPlanStats : public SpecificStats {
+ MultiPlanStats() {}
- virtual ~OrStats() { }
+ virtual SpecificStats* clone() const {
+ return new MultiPlanStats(*this);
+ }
+};
- virtual SpecificStats* clone() const {
- OrStats* specific = new OrStats(*this);
- return specific;
- }
+struct OrStats : public SpecificStats {
+ OrStats() : dupsTested(0), dupsDropped(0), locsForgotten(0) {}
- size_t dupsTested;
- size_t dupsDropped;
+ virtual ~OrStats() {}
- // How many calls to invalidate(...) actually removed a RecordId from our deduping map?
- size_t locsForgotten;
- };
+ virtual SpecificStats* clone() const {
+ OrStats* specific = new OrStats(*this);
+ return specific;
+ }
- struct ProjectionStats : public SpecificStats {
- ProjectionStats() { }
+ size_t dupsTested;
+ size_t dupsDropped;
- virtual SpecificStats* clone() const {
- ProjectionStats* specific = new ProjectionStats(*this);
- return specific;
- }
+ // How many calls to invalidate(...) actually removed a RecordId from our deduping map?
+ size_t locsForgotten;
+};
- // Object specifying the projection transformation to apply.
- BSONObj projObj;
- };
+struct ProjectionStats : public SpecificStats {
+ ProjectionStats() {}
- struct SortStats : public SpecificStats {
- SortStats() : forcedFetches(0), memUsage(0), memLimit(0) { }
+ virtual SpecificStats* clone() const {
+ ProjectionStats* specific = new ProjectionStats(*this);
+ return specific;
+ }
- virtual ~SortStats() { }
+ // Object specifying the projection transformation to apply.
+ BSONObj projObj;
+};
- virtual SpecificStats* clone() const {
- SortStats* specific = new SortStats(*this);
- return specific;
- }
+struct SortStats : public SpecificStats {
+ SortStats() : forcedFetches(0), memUsage(0), memLimit(0) {}
- // How many records were we forced to fetch as the result of an invalidation?
- size_t forcedFetches;
+ virtual ~SortStats() {}
- // What's our current memory usage?
- size_t memUsage;
+ virtual SpecificStats* clone() const {
+ SortStats* specific = new SortStats(*this);
+ return specific;
+ }
- // What's our memory limit?
- size_t memLimit;
+ // How many records were we forced to fetch as the result of an invalidation?
+ size_t forcedFetches;
- // The number of results to return from the sort.
- size_t limit;
+ // What's our current memory usage?
+ size_t memUsage;
- // The pattern according to which we are sorting.
- BSONObj sortPattern;
- };
+ // What's our memory limit?
+ size_t memLimit;
- struct MergeSortStats : public SpecificStats {
- MergeSortStats() : dupsTested(0),
- dupsDropped(0),
- forcedFetches(0) { }
+ // The number of results to return from the sort.
+ size_t limit;
- virtual ~MergeSortStats() { }
+ // The pattern according to which we are sorting.
+ BSONObj sortPattern;
+};
- virtual SpecificStats* clone() const {
- MergeSortStats* specific = new MergeSortStats(*this);
- return specific;
- }
+struct MergeSortStats : public SpecificStats {
+ MergeSortStats() : dupsTested(0), dupsDropped(0), forcedFetches(0) {}
- size_t dupsTested;
- size_t dupsDropped;
+ virtual ~MergeSortStats() {}
- // How many records were we forced to fetch as the result of an invalidation?
- size_t forcedFetches;
+ virtual SpecificStats* clone() const {
+ MergeSortStats* specific = new MergeSortStats(*this);
+ return specific;
+ }
- // The pattern according to which we are sorting.
- BSONObj sortPattern;
- };
+ size_t dupsTested;
+ size_t dupsDropped;
- struct ShardingFilterStats : public SpecificStats {
- ShardingFilterStats() : chunkSkips(0) { }
+ // How many records were we forced to fetch as the result of an invalidation?
+ size_t forcedFetches;
- virtual SpecificStats* clone() const {
- ShardingFilterStats* specific = new ShardingFilterStats(*this);
- return specific;
- }
+ // The pattern according to which we are sorting.
+ BSONObj sortPattern;
+};
- size_t chunkSkips;
- };
+struct ShardingFilterStats : public SpecificStats {
+ ShardingFilterStats() : chunkSkips(0) {}
- struct SkipStats : public SpecificStats {
- SkipStats() : skip(0) { }
+ virtual SpecificStats* clone() const {
+ ShardingFilterStats* specific = new ShardingFilterStats(*this);
+ return specific;
+ }
- virtual SpecificStats* clone() const {
- SkipStats* specific = new SkipStats(*this);
- return specific;
- }
+ size_t chunkSkips;
+};
- size_t skip;
- };
-
- struct IntervalStats {
-
- IntervalStats() :
- numResultsFound(0),
- numResultsBuffered(0),
- minDistanceAllowed(-1),
- maxDistanceAllowed(-1),
- inclusiveMaxDistanceAllowed(false),
- minDistanceFound(-1),
- maxDistanceFound(-1),
- minDistanceBuffered(-1),
- maxDistanceBuffered(-1) {
- }
+struct SkipStats : public SpecificStats {
+ SkipStats() : skip(0) {}
- long long numResultsFound;
- long long numResultsBuffered;
+ virtual SpecificStats* clone() const {
+ SkipStats* specific = new SkipStats(*this);
+ return specific;
+ }
- double minDistanceAllowed;
- double maxDistanceAllowed;
- bool inclusiveMaxDistanceAllowed;
+ size_t skip;
+};
- double minDistanceFound;
- double maxDistanceFound;
- double minDistanceBuffered;
- double maxDistanceBuffered;
- };
+struct IntervalStats {
+ IntervalStats()
+ : numResultsFound(0),
+ numResultsBuffered(0),
+ minDistanceAllowed(-1),
+ maxDistanceAllowed(-1),
+ inclusiveMaxDistanceAllowed(false),
+ minDistanceFound(-1),
+ maxDistanceFound(-1),
+ minDistanceBuffered(-1),
+ maxDistanceBuffered(-1) {}
- class NearStats : public SpecificStats {
- public:
+ long long numResultsFound;
+ long long numResultsBuffered;
- NearStats() {}
+ double minDistanceAllowed;
+ double maxDistanceAllowed;
+ bool inclusiveMaxDistanceAllowed;
- virtual SpecificStats* clone() const {
- return new NearStats(*this);
- }
+ double minDistanceFound;
+ double maxDistanceFound;
+ double minDistanceBuffered;
+ double maxDistanceBuffered;
+};
- long long totalResultsFound() {
- long long totalResultsFound = 0;
- for (std::vector<IntervalStats>::iterator it = intervalStats.begin();
- it != intervalStats.end(); ++it) {
- totalResultsFound += it->numResultsFound;
- }
- return totalResultsFound;
- }
+class NearStats : public SpecificStats {
+public:
+ NearStats() {}
+
+ virtual SpecificStats* clone() const {
+ return new NearStats(*this);
+ }
- std::vector<IntervalStats> intervalStats;
- std::string indexName;
- BSONObj keyPattern;
- };
-
- struct UpdateStats : public SpecificStats {
- UpdateStats()
- : nMatched(0),
- nModified(0),
- isDocReplacement(false),
- fastmod(false),
- fastmodinsert(false),
- inserted(false),
- nInvalidateSkips(0) { }
-
- virtual SpecificStats* clone() const {
- return new UpdateStats(*this);
+ long long totalResultsFound() {
+ long long totalResultsFound = 0;
+ for (std::vector<IntervalStats>::iterator it = intervalStats.begin();
+ it != intervalStats.end();
+ ++it) {
+ totalResultsFound += it->numResultsFound;
}
+ return totalResultsFound;
+ }
- // The number of documents which match the query part of the update.
- size_t nMatched;
+ std::vector<IntervalStats> intervalStats;
+ std::string indexName;
+ BSONObj keyPattern;
+};
- // The number of documents modified by this update.
- size_t nModified;
+struct UpdateStats : public SpecificStats {
+ UpdateStats()
+ : nMatched(0),
+ nModified(0),
+ isDocReplacement(false),
+ fastmod(false),
+ fastmodinsert(false),
+ inserted(false),
+ nInvalidateSkips(0) {}
- // True iff this is a doc-replacement style update, as opposed to a $mod update.
- bool isDocReplacement;
+ virtual SpecificStats* clone() const {
+ return new UpdateStats(*this);
+ }
- // A 'fastmod' update is an in-place update that does not have to modify
- // any indices. It's "fast" because the only work needed is changing the bits
- // inside the document.
- bool fastmod;
+ // The number of documents which match the query part of the update.
+ size_t nMatched;
- // A 'fastmodinsert' is an insert resulting from an {upsert: true} update
- // which is a doc-replacement style update. It's "fast" because we don't need
- // to compute the document to insert based on the modifiers.
- bool fastmodinsert;
+ // The number of documents modified by this update.
+ size_t nModified;
- // Is this an {upsert: true} update that did an insert?
- bool inserted;
+ // True iff this is a doc-replacement style update, as opposed to a $mod update.
+ bool isDocReplacement;
- // The object that was inserted. This is an empty document if no insert was performed.
- BSONObj objInserted;
+ // A 'fastmod' update is an in-place update that does not have to modify
+ // any indices. It's "fast" because the only work needed is changing the bits
+ // inside the document.
+ bool fastmod;
- // Invalidated documents can be force-fetched, causing the now invalid RecordId to
- // be thrown out. The update stage skips over any results which do not have the
- // RecordId to update.
- size_t nInvalidateSkips;
- };
+ // A 'fastmodinsert' is an insert resulting from an {upsert: true} update
+ // which is a doc-replacement style update. It's "fast" because we don't need
+ // to compute the document to insert based on the modifiers.
+ bool fastmodinsert;
- struct TextStats : public SpecificStats {
- TextStats() : keysExamined(0), fetches(0), parsedTextQuery() { }
+ // Is this an {upsert: true} update that did an insert?
+ bool inserted;
- virtual SpecificStats* clone() const {
- TextStats* specific = new TextStats(*this);
- return specific;
- }
+ // The object that was inserted. This is an empty document if no insert was performed.
+ BSONObj objInserted;
+
+ // Invalidated documents can be force-fetched, causing the now invalid RecordId to
+ // be thrown out. The update stage skips over any results which do not have the
+ // RecordId to update.
+ size_t nInvalidateSkips;
+};
+
+struct TextStats : public SpecificStats {
+ TextStats() : keysExamined(0), fetches(0), parsedTextQuery() {}
+
+ virtual SpecificStats* clone() const {
+ TextStats* specific = new TextStats(*this);
+ return specific;
+ }
- std::string indexName;
+ std::string indexName;
- size_t keysExamined;
+ size_t keysExamined;
- size_t fetches;
+ size_t fetches;
- // Human-readable form of the FTSQuery associated with the text stage.
- BSONObj parsedTextQuery;
+ // Human-readable form of the FTSQuery associated with the text stage.
+ BSONObj parsedTextQuery;
- // Index keys that precede the "text" index key.
- BSONObj indexPrefix;
- };
+ // Index keys that precede the "text" index key.
+ BSONObj indexPrefix;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/plan_stats_test.cpp b/src/mongo/db/exec/plan_stats_test.cpp
index 805401ea9a5..02152308d12 100644
--- a/src/mongo/db/exec/plan_stats_test.cpp
+++ b/src/mongo/db/exec/plan_stats_test.cpp
@@ -38,70 +38,70 @@ using namespace mongo;
namespace {
- /**
- * Basic test on field initializers
- */
- TEST(CommonStatsTest, defaultValues) {
- CommonStats stats;
- ASSERT_EQUALS(stats.works, static_cast<size_t>(0));
- ASSERT_EQUALS(stats.yields, static_cast<size_t>(0));
- ASSERT_EQUALS(stats.invalidates, static_cast<size_t>(0));
- ASSERT_EQUALS(stats.advanced, static_cast<size_t>(0));
- ASSERT_EQUALS(stats.needTime, static_cast<size_t>(0));
- ASSERT_EQUALS(stats.needYield, static_cast<size_t>(0));
- ASSERT_FALSE(stats.isEOF);
- }
+/**
+ * Basic test on field initializers
+ */
+TEST(CommonStatsTest, defaultValues) {
+ CommonStats stats;
+ ASSERT_EQUALS(stats.works, static_cast<size_t>(0));
+ ASSERT_EQUALS(stats.yields, static_cast<size_t>(0));
+ ASSERT_EQUALS(stats.invalidates, static_cast<size_t>(0));
+ ASSERT_EQUALS(stats.advanced, static_cast<size_t>(0));
+ ASSERT_EQUALS(stats.needTime, static_cast<size_t>(0));
+ ASSERT_EQUALS(stats.needYield, static_cast<size_t>(0));
+ ASSERT_FALSE(stats.isEOF);
+}
- /**
- * Verifies null argument check in CommonStats::writeExplainTo
- */
- TEST(CommonStatsTest, writeExplainToNullBuilder) {
- CommonStats stats;
- stats.writeExplainTo(NULL);
- }
+/**
+ * Verifies null argument check in CommonStats::writeExplainTo
+ */
+TEST(CommonStatsTest, writeExplainToNullBuilder) {
+ CommonStats stats;
+ stats.writeExplainTo(NULL);
+}
- /**
- * Verifies null argument check in PlanStageStats::writeExplainTo
- */
- TEST(PlanStageStatsTest, writeExplainToNullBuilder) {
- CommonStats stats;
- PlanStageStats pss(stats);
- pss.writeExplainTo(NULL);
- }
+/**
+ * Verifies null argument check in PlanStageStats::writeExplainTo
+ */
+TEST(PlanStageStatsTest, writeExplainToNullBuilder) {
+ CommonStats stats;
+ PlanStageStats pss(stats);
+ pss.writeExplainTo(NULL);
+}
- /**
- * Checks BSON output of CommonStats::writeExplainTo to ensure it contains
- * correct values for CommonStats fields
- */
- TEST(CommonStatsTest, writeExplainTo) {
- CommonStats stats;
- stats.works = static_cast<size_t>(2);
- stats.advanced = static_cast<size_t>(3);
- BSONObjBuilder bob;
- stats.writeExplainTo(&bob);
- BSONObj obj = bob.done();
- ASSERT_TRUE(obj.hasField("works"));
- ASSERT_EQUALS(obj.getIntField("works"), 2);
- ASSERT_TRUE(obj.hasField("advanced"));
- ASSERT_EQUALS(obj.getIntField("advanced"), 3);
- }
+/**
+ * Checks BSON output of CommonStats::writeExplainTo to ensure it contains
+ * correct values for CommonStats fields
+ */
+TEST(CommonStatsTest, writeExplainTo) {
+ CommonStats stats;
+ stats.works = static_cast<size_t>(2);
+ stats.advanced = static_cast<size_t>(3);
+ BSONObjBuilder bob;
+ stats.writeExplainTo(&bob);
+ BSONObj obj = bob.done();
+ ASSERT_TRUE(obj.hasField("works"));
+ ASSERT_EQUALS(obj.getIntField("works"), 2);
+ ASSERT_TRUE(obj.hasField("advanced"));
+ ASSERT_EQUALS(obj.getIntField("advanced"), 3);
+}
- /**
- * Checks BSON output of PlanStageStats::writeExplainTo to ensure it contains
- * correct values for CommonStats fields
- */
- TEST(PlanStageStatsTest, writeExplainTo) {
- CommonStats stats;
- stats.works = static_cast<size_t>(2);
- stats.advanced = static_cast<size_t>(3);
- BSONObjBuilder bob;
- PlanStageStats pss(stats);
- pss.writeExplainTo(&bob);
- BSONObj obj = bob.done();
- ASSERT_TRUE(obj.hasField("works"));
- ASSERT_EQUALS(obj.getIntField("works"), 2);
- ASSERT_TRUE(obj.hasField("advanced"));
- ASSERT_EQUALS(obj.getIntField("advanced"), 3);
- }
+/**
+ * Checks BSON output of PlanStageStats::writeExplainTo to ensure it contains
+ * correct values for CommonStats fields
+ */
+TEST(PlanStageStatsTest, writeExplainTo) {
+ CommonStats stats;
+ stats.works = static_cast<size_t>(2);
+ stats.advanced = static_cast<size_t>(3);
+ BSONObjBuilder bob;
+ PlanStageStats pss(stats);
+ pss.writeExplainTo(&bob);
+ BSONObj obj = bob.done();
+ ASSERT_TRUE(obj.hasField("works"));
+ ASSERT_EQUALS(obj.getIntField("works"), 2);
+ ASSERT_TRUE(obj.hasField("advanced"));
+ ASSERT_EQUALS(obj.getIntField("advanced"), 3);
+}
} // namespace
diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp
index 15d06963b31..65ab6a1323c 100644
--- a/src/mongo/db/exec/projection.cpp
+++ b/src/mongo/db/exec/projection.cpp
@@ -41,256 +41,241 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::vector;
-
- static const char* kIdField = "_id";
-
- // static
- const char* ProjectionStage::kStageType = "PROJECTION";
-
- ProjectionStage::ProjectionStage(const ProjectionStageParams& params,
- WorkingSet* ws,
- PlanStage* child)
- : _ws(ws),
- _child(child),
- _commonStats(kStageType),
- _projImpl(params.projImpl) {
-
- _projObj = params.projObj;
-
- if (ProjectionStageParams::NO_FAST_PATH == _projImpl) {
- _exec.reset(new ProjectionExec(params.projObj,
- params.fullExpression,
- *params.whereCallback));
- }
- else {
- // We shouldn't need the full expression if we're fast-pathing.
- invariant(NULL == params.fullExpression);
-
- // Sanity-check the input.
- invariant(_projObj.isOwned());
- invariant(!_projObj.isEmpty());
-
- // Figure out what fields are in the projection.
- getSimpleInclusionFields(_projObj, &_includedFields);
-
- // If we're pulling data out of one index we can pre-compute the indices of the fields
- // in the key that we pull data from and avoid looking up the field name each time.
- if (ProjectionStageParams::COVERED_ONE_INDEX == params.projImpl) {
- // Sanity-check.
- _coveredKeyObj = params.coveredKeyObj;
- invariant(_coveredKeyObj.isOwned());
-
- BSONObjIterator kpIt(_coveredKeyObj);
- while (kpIt.more()) {
- BSONElement elt = kpIt.next();
- unordered_set<StringData, StringData::Hasher>::iterator fieldIt;
- fieldIt = _includedFields.find(elt.fieldNameStringData());
-
- if (_includedFields.end() == fieldIt) {
- // Push an unused value on the back to keep _includeKey and _keyFieldNames
- // in sync.
- _keyFieldNames.push_back(StringData());
- _includeKey.push_back(false);
- }
- else {
- // If we are including this key field store its field name.
- _keyFieldNames.push_back(*fieldIt);
- _includeKey.push_back(true);
- }
+using std::unique_ptr;
+using std::endl;
+using std::vector;
+
+static const char* kIdField = "_id";
+
+// static
+const char* ProjectionStage::kStageType = "PROJECTION";
+
+ProjectionStage::ProjectionStage(const ProjectionStageParams& params,
+ WorkingSet* ws,
+ PlanStage* child)
+ : _ws(ws), _child(child), _commonStats(kStageType), _projImpl(params.projImpl) {
+ _projObj = params.projObj;
+
+ if (ProjectionStageParams::NO_FAST_PATH == _projImpl) {
+ _exec.reset(
+ new ProjectionExec(params.projObj, params.fullExpression, *params.whereCallback));
+ } else {
+ // We shouldn't need the full expression if we're fast-pathing.
+ invariant(NULL == params.fullExpression);
+
+ // Sanity-check the input.
+ invariant(_projObj.isOwned());
+ invariant(!_projObj.isEmpty());
+
+ // Figure out what fields are in the projection.
+ getSimpleInclusionFields(_projObj, &_includedFields);
+
+ // If we're pulling data out of one index we can pre-compute the indices of the fields
+ // in the key that we pull data from and avoid looking up the field name each time.
+ if (ProjectionStageParams::COVERED_ONE_INDEX == params.projImpl) {
+ // Sanity-check.
+ _coveredKeyObj = params.coveredKeyObj;
+ invariant(_coveredKeyObj.isOwned());
+
+ BSONObjIterator kpIt(_coveredKeyObj);
+ while (kpIt.more()) {
+ BSONElement elt = kpIt.next();
+ unordered_set<StringData, StringData::Hasher>::iterator fieldIt;
+ fieldIt = _includedFields.find(elt.fieldNameStringData());
+
+ if (_includedFields.end() == fieldIt) {
+ // Push an unused value on the back to keep _includeKey and _keyFieldNames
+ // in sync.
+ _keyFieldNames.push_back(StringData());
+ _includeKey.push_back(false);
+ } else {
+ // If we are including this key field store its field name.
+ _keyFieldNames.push_back(*fieldIt);
+ _includeKey.push_back(true);
}
}
- else {
- invariant(ProjectionStageParams::SIMPLE_DOC == params.projImpl);
- }
+ } else {
+ invariant(ProjectionStageParams::SIMPLE_DOC == params.projImpl);
}
}
-
- // static
- void ProjectionStage::getSimpleInclusionFields(const BSONObj& projObj,
- FieldSet* includedFields) {
- // The _id is included by default.
- bool includeId = true;
-
- // Figure out what fields are in the projection. TODO: we can get this from the
- // ParsedProjection...modify that to have this type instead of a vector.
- BSONObjIterator projObjIt(projObj);
- while (projObjIt.more()) {
- BSONElement elt = projObjIt.next();
- // Must deal with the _id case separately as there is an implicit _id: 1 in the
- // projection.
- if (mongoutils::str::equals(elt.fieldName(), kIdField)
- && !elt.trueValue()) {
- includeId = false;
- continue;
- }
- includedFields->insert(elt.fieldNameStringData());
- }
-
- if (includeId) {
- includedFields->insert(kIdField);
+}
+
+// static
+void ProjectionStage::getSimpleInclusionFields(const BSONObj& projObj, FieldSet* includedFields) {
+ // The _id is included by default.
+ bool includeId = true;
+
+ // Figure out what fields are in the projection. TODO: we can get this from the
+ // ParsedProjection...modify that to have this type instead of a vector.
+ BSONObjIterator projObjIt(projObj);
+ while (projObjIt.more()) {
+ BSONElement elt = projObjIt.next();
+ // Must deal with the _id case separately as there is an implicit _id: 1 in the
+ // projection.
+ if (mongoutils::str::equals(elt.fieldName(), kIdField) && !elt.trueValue()) {
+ includeId = false;
+ continue;
}
+ includedFields->insert(elt.fieldNameStringData());
}
- // static
- void ProjectionStage::transformSimpleInclusion(const BSONObj& in,
- const FieldSet& includedFields,
- BSONObjBuilder& bob) {
- // Look at every field in the source document and see if we're including it.
- BSONObjIterator inputIt(in);
- while (inputIt.more()) {
- BSONElement elt = inputIt.next();
- unordered_set<StringData, StringData::Hasher>::const_iterator fieldIt;
- fieldIt = includedFields.find(elt.fieldNameStringData());
- if (includedFields.end() != fieldIt) {
- // If so, add it to the builder.
- bob.append(elt);
- }
- }
+ if (includeId) {
+ includedFields->insert(kIdField);
}
-
- Status ProjectionStage::transform(WorkingSetMember* member) {
- // The default no-fast-path case.
- if (ProjectionStageParams::NO_FAST_PATH == _projImpl) {
- return _exec->transform(member);
- }
-
- BSONObjBuilder bob;
-
- // Note that even if our fast path analysis is bug-free something that is
- // covered might be invalidated and just be an obj. In this case we just go
- // through the SIMPLE_DOC path which is still correct if the covered data
- // is not available.
- //
- // SIMPLE_DOC implies that we expect an object so it's kind of redundant.
- if ((ProjectionStageParams::SIMPLE_DOC == _projImpl) || member->hasObj()) {
- // If we got here because of SIMPLE_DOC the planner shouldn't have messed up.
- invariant(member->hasObj());
-
- // Apply the SIMPLE_DOC projection.
- transformSimpleInclusion(member->obj.value(), _includedFields, bob);
+}
+
+// static
+void ProjectionStage::transformSimpleInclusion(const BSONObj& in,
+ const FieldSet& includedFields,
+ BSONObjBuilder& bob) {
+ // Look at every field in the source document and see if we're including it.
+ BSONObjIterator inputIt(in);
+ while (inputIt.more()) {
+ BSONElement elt = inputIt.next();
+ unordered_set<StringData, StringData::Hasher>::const_iterator fieldIt;
+ fieldIt = includedFields.find(elt.fieldNameStringData());
+ if (includedFields.end() != fieldIt) {
+ // If so, add it to the builder.
+ bob.append(elt);
}
- else {
- invariant(ProjectionStageParams::COVERED_ONE_INDEX == _projImpl);
- // We're pulling data out of the key.
- invariant(1 == member->keyData.size());
- size_t keyIndex = 0;
-
- // Look at every key element...
- BSONObjIterator keyIterator(member->keyData[0].keyData);
- while (keyIterator.more()) {
- BSONElement elt = keyIterator.next();
- // If we're supposed to include it...
- if (_includeKey[keyIndex]) {
- // Do so.
- bob.appendAs(elt, _keyFieldNames[keyIndex]);
- }
- ++keyIndex;
- }
- }
-
- member->state = WorkingSetMember::OWNED_OBJ;
- member->keyData.clear();
- member->loc = RecordId();
- member->obj = Snapshotted<BSONObj>(SnapshotId(), bob.obj());
- return Status::OK();
}
+}
- ProjectionStage::~ProjectionStage() { }
-
- bool ProjectionStage::isEOF() { return _child->isEOF(); }
-
- PlanStage::StageState ProjectionStage::work(WorkingSetID* out) {
- ++_commonStats.works;
-
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
-
- WorkingSetID id = WorkingSet::INVALID_ID;
- StageState status = _child->work(&id);
-
- // Note that we don't do the normal if isEOF() return EOF thing here. Our child might be a
- // tailable cursor and isEOF() would be true even if it had more data...
- if (PlanStage::ADVANCED == status) {
- WorkingSetMember* member = _ws->get(id);
- // Punt to our specific projection impl.
- Status projStatus = transform(member);
- if (!projStatus.isOK()) {
- warning() << "Couldn't execute projection, status = "
- << projStatus.toString() << endl;
- *out = WorkingSetCommon::allocateStatusMember(_ws, projStatus);
- return PlanStage::FAILURE;
- }
+Status ProjectionStage::transform(WorkingSetMember* member) {
+ // The default no-fast-path case.
+ if (ProjectionStageParams::NO_FAST_PATH == _projImpl) {
+ return _exec->transform(member);
+ }
- *out = id;
- ++_commonStats.advanced;
- }
- else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "projection stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
+ BSONObjBuilder bob;
+
+ // Note that even if our fast path analysis is bug-free something that is
+ // covered might be invalidated and just be an obj. In this case we just go
+ // through the SIMPLE_DOC path which is still correct if the covered data
+ // is not available.
+ //
+ // SIMPLE_DOC implies that we expect an object so it's kind of redundant.
+ if ((ProjectionStageParams::SIMPLE_DOC == _projImpl) || member->hasObj()) {
+ // If we got here because of SIMPLE_DOC the planner shouldn't have messed up.
+ invariant(member->hasObj());
+
+ // Apply the SIMPLE_DOC projection.
+ transformSimpleInclusion(member->obj.value(), _includedFields, bob);
+ } else {
+ invariant(ProjectionStageParams::COVERED_ONE_INDEX == _projImpl);
+ // We're pulling data out of the key.
+ invariant(1 == member->keyData.size());
+ size_t keyIndex = 0;
+
+ // Look at every key element...
+ BSONObjIterator keyIterator(member->keyData[0].keyData);
+ while (keyIterator.more()) {
+ BSONElement elt = keyIterator.next();
+ // If we're supposed to include it...
+ if (_includeKey[keyIndex]) {
+ // Do so.
+ bob.appendAs(elt, _keyFieldNames[keyIndex]);
}
+ ++keyIndex;
}
- else if (PlanStage::NEED_TIME == status) {
- _commonStats.needTime++;
- }
- else if (PlanStage::NEED_YIELD == status) {
- _commonStats.needYield++;
- *out = id;
- }
-
- return status;
}
- void ProjectionStage::saveState() {
- ++_commonStats.yields;
- _child->saveState();
- }
-
- void ProjectionStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
- }
-
- void ProjectionStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
- }
-
- vector<PlanStage*> ProjectionStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
- }
-
- PlanStageStats* ProjectionStage::getStats() {
- _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_PROJECTION));
-
- ProjectionStats* projStats = new ProjectionStats(_specificStats);
- projStats->projObj = _projObj;
- ret->specific.reset(projStats);
-
- ret->children.push_back(_child->getStats());
- return ret.release();
- }
+ member->state = WorkingSetMember::OWNED_OBJ;
+ member->keyData.clear();
+ member->loc = RecordId();
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), bob.obj());
+ return Status::OK();
+}
+
+ProjectionStage::~ProjectionStage() {}
+
+bool ProjectionStage::isEOF() {
+ return _child->isEOF();
+}
+
+PlanStage::StageState ProjectionStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
+
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ StageState status = _child->work(&id);
+
+ // Note that we don't do the normal if isEOF() return EOF thing here. Our child might be a
+ // tailable cursor and isEOF() would be true even if it had more data...
+ if (PlanStage::ADVANCED == status) {
+ WorkingSetMember* member = _ws->get(id);
+ // Punt to our specific projection impl.
+ Status projStatus = transform(member);
+ if (!projStatus.isOK()) {
+ warning() << "Couldn't execute projection, status = " << projStatus.toString() << endl;
+ *out = WorkingSetCommon::allocateStatusMember(_ws, projStatus);
+ return PlanStage::FAILURE;
+ }
- const CommonStats* ProjectionStage::getCommonStats() const {
- return &_commonStats;
+ *out = id;
+ ++_commonStats.advanced;
+ } else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ mongoutils::str::stream ss;
+ ss << "projection stage failed to read in results from child";
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ }
+ } else if (PlanStage::NEED_TIME == status) {
+ _commonStats.needTime++;
+ } else if (PlanStage::NEED_YIELD == status) {
+ _commonStats.needYield++;
+ *out = id;
}
- const SpecificStats* ProjectionStage::getSpecificStats() const {
- return &_specificStats;
- }
+ return status;
+}
+
+void ProjectionStage::saveState() {
+ ++_commonStats.yields;
+ _child->saveState();
+}
+
+void ProjectionStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
+ _child->restoreState(opCtx);
+}
+
+void ProjectionStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+ _child->invalidate(txn, dl, type);
+}
+
+vector<PlanStage*> ProjectionStage::getChildren() const {
+ vector<PlanStage*> children;
+ children.push_back(_child.get());
+ return children;
+}
+
+PlanStageStats* ProjectionStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_PROJECTION));
+
+ ProjectionStats* projStats = new ProjectionStats(_specificStats);
+ projStats->projObj = _projObj;
+ ret->specific.reset(projStats);
+
+ ret->children.push_back(_child->getStats());
+ return ret.release();
+}
+
+const CommonStats* ProjectionStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* ProjectionStage::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/projection.h b/src/mongo/db/exec/projection.h
index d70967eebe1..b09ef956cd2 100644
--- a/src/mongo/db/exec/projection.h
+++ b/src/mongo/db/exec/projection.h
@@ -37,123 +37,122 @@
namespace mongo {
- struct ProjectionStageParams {
- enum ProjectionImplementation {
- // The default case. Will handle every projection.
- NO_FAST_PATH,
+struct ProjectionStageParams {
+ enum ProjectionImplementation {
+ // The default case. Will handle every projection.
+ NO_FAST_PATH,
- // The projection is simple inclusion and is totally covered by one index.
- COVERED_ONE_INDEX,
+ // The projection is simple inclusion and is totally covered by one index.
+ COVERED_ONE_INDEX,
- // The projection is simple inclusion and we expect an object.
- SIMPLE_DOC
- };
+ // The projection is simple inclusion and we expect an object.
+ SIMPLE_DOC
+ };
- ProjectionStageParams(const MatchExpressionParser::WhereCallback& wc)
- : projImpl(NO_FAST_PATH), fullExpression(NULL), whereCallback(&wc) { }
+ ProjectionStageParams(const MatchExpressionParser::WhereCallback& wc)
+ : projImpl(NO_FAST_PATH), fullExpression(NULL), whereCallback(&wc) {}
- ProjectionImplementation projImpl;
+ ProjectionImplementation projImpl;
- // The projection object. We lack a ProjectionExpression or similar so we use a BSONObj.
- BSONObj projObj;
+ // The projection object. We lack a ProjectionExpression or similar so we use a BSONObj.
+ BSONObj projObj;
- // If we have a positional or elemMatch projection we need a MatchExpression to pull out the
- // right data.
- // Not owned here, we do not take ownership.
- const MatchExpression* fullExpression;
+ // If we have a positional or elemMatch projection we need a MatchExpression to pull out the
+ // right data.
+ // Not owned here, we do not take ownership.
+ const MatchExpression* fullExpression;
- // If (COVERED_ONE_INDEX == projObj) this is the key pattern we're extracting covered data
- // from. Otherwise, this field is ignored.
- BSONObj coveredKeyObj;
+ // If (COVERED_ONE_INDEX == projObj) this is the key pattern we're extracting covered data
+ // from. Otherwise, this field is ignored.
+ BSONObj coveredKeyObj;
- // Used for creating context for the $where clause processing. Not owned.
- const MatchExpressionParser::WhereCallback* whereCallback;
- };
+ // Used for creating context for the $where clause processing. Not owned.
+ const MatchExpressionParser::WhereCallback* whereCallback;
+};
- /**
- * This stage computes a projection.
- */
- class ProjectionStage : public PlanStage {
- public:
- ProjectionStage(const ProjectionStageParams& params,
- WorkingSet* ws,
- PlanStage* child);
+/**
+ * This stage computes a projection.
+ */
+class ProjectionStage : public PlanStage {
+public:
+ ProjectionStage(const ProjectionStageParams& params, WorkingSet* ws, PlanStage* child);
- virtual ~ProjectionStage();
+ virtual ~ProjectionStage();
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_PROJECTION; }
+ virtual StageType stageType() const {
+ return STAGE_PROJECTION;
+ }
- PlanStageStats* getStats();
+ PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- typedef unordered_set<StringData, StringData::Hasher> FieldSet;
+ typedef unordered_set<StringData, StringData::Hasher> FieldSet;
- /**
- * Given the projection spec for a simple inclusion projection,
- * 'projObj', populates 'includedFields' with the set of field
- * names to be included.
- */
- static void getSimpleInclusionFields(const BSONObj& projObj,
- FieldSet* includedFields);
+ /**
+ * Given the projection spec for a simple inclusion projection,
+ * 'projObj', populates 'includedFields' with the set of field
+ * names to be included.
+ */
+ static void getSimpleInclusionFields(const BSONObj& projObj, FieldSet* includedFields);
- /**
- * Applies a simple inclusion projection to 'in', including
- * only the fields specified by 'includedFields'.
- *
- * The resulting document is constructed using 'bob'.
- */
- static void transformSimpleInclusion(const BSONObj& in,
- const FieldSet& includedFields,
- BSONObjBuilder& bob);
+ /**
+ * Applies a simple inclusion projection to 'in', including
+ * only the fields specified by 'includedFields'.
+ *
+ * The resulting document is constructed using 'bob'.
+ */
+ static void transformSimpleInclusion(const BSONObj& in,
+ const FieldSet& includedFields,
+ BSONObjBuilder& bob);
- static const char* kStageType;
+ static const char* kStageType;
- private:
- Status transform(WorkingSetMember* member);
+private:
+ Status transform(WorkingSetMember* member);
- std::unique_ptr<ProjectionExec> _exec;
+ std::unique_ptr<ProjectionExec> _exec;
- // _ws is not owned by us.
- WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
+ // _ws is not owned by us.
+ WorkingSet* _ws;
+ std::unique_ptr<PlanStage> _child;
- // Stats
- CommonStats _commonStats;
- ProjectionStats _specificStats;
+ // Stats
+ CommonStats _commonStats;
+ ProjectionStats _specificStats;
- // Fast paths:
- ProjectionStageParams::ProjectionImplementation _projImpl;
+ // Fast paths:
+ ProjectionStageParams::ProjectionImplementation _projImpl;
- // Used by all projection implementations.
- BSONObj _projObj;
+ // Used by all projection implementations.
+ BSONObj _projObj;
- // Data used for both SIMPLE_DOC and COVERED_ONE_INDEX paths.
- // Has the field names present in the simple projection.
- unordered_set<StringData, StringData::Hasher> _includedFields;
+ // Data used for both SIMPLE_DOC and COVERED_ONE_INDEX paths.
+ // Has the field names present in the simple projection.
+ unordered_set<StringData, StringData::Hasher> _includedFields;
- //
- // Used for the COVERED_ONE_INDEX path.
- //
- BSONObj _coveredKeyObj;
+ //
+ // Used for the COVERED_ONE_INDEX path.
+ //
+ BSONObj _coveredKeyObj;
- // Field names can be empty in 2.4 and before so we can't use them as a sentinel value.
- // If the i-th entry is true we include the i-th field in the key.
- std::vector<bool> _includeKey;
+ // Field names can be empty in 2.4 and before so we can't use them as a sentinel value.
+ // If the i-th entry is true we include the i-th field in the key.
+ std::vector<bool> _includeKey;
- // If the i-th entry of _includeKey is true this is the field name for the i-th key field.
- std::vector<StringData> _keyFieldNames;
- };
+ // If the i-th entry of _includeKey is true this is the field name for the i-th key field.
+ std::vector<StringData> _keyFieldNames;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/projection_exec.cpp b/src/mongo/db/exec/projection_exec.cpp
index a200f17d381..80a2d8772af 100644
--- a/src/mongo/db/exec/projection_exec.cpp
+++ b/src/mongo/db/exec/projection_exec.cpp
@@ -36,433 +36,405 @@
namespace mongo {
- using std::max;
- using std::string;
-
- ProjectionExec::ProjectionExec()
- : _include(true),
- _special(false),
- _includeID(true),
- _skip(0),
- _limit(-1),
- _arrayOpType(ARRAY_OP_NORMAL),
- _hasNonSimple(false),
- _hasDottedField(false),
- _queryExpression(NULL),
- _hasReturnKey(false) { }
-
-
- ProjectionExec::ProjectionExec(const BSONObj& spec,
- const MatchExpression* queryExpression,
- const MatchExpressionParser::WhereCallback& whereCallback)
- : _include(true),
- _special(false),
- _source(spec),
- _includeID(true),
- _skip(0),
- _limit(-1),
- _arrayOpType(ARRAY_OP_NORMAL),
- _hasNonSimple(false),
- _hasDottedField(false),
- _queryExpression(queryExpression),
- _hasReturnKey(false) {
-
- // Are we including or excluding fields?
- // -1 when we haven't initialized it.
- // 1 when we're including
- // 0 when we're excluding.
- int include_exclude = -1;
-
- BSONObjIterator it(_source);
- while (it.more()) {
- BSONElement e = it.next();
-
- if (!e.isNumber() && !e.isBoolean()) {
- _hasNonSimple = true;
- }
+using std::max;
+using std::string;
+
+ProjectionExec::ProjectionExec()
+ : _include(true),
+ _special(false),
+ _includeID(true),
+ _skip(0),
+ _limit(-1),
+ _arrayOpType(ARRAY_OP_NORMAL),
+ _hasNonSimple(false),
+ _hasDottedField(false),
+ _queryExpression(NULL),
+ _hasReturnKey(false) {}
+
+
+ProjectionExec::ProjectionExec(const BSONObj& spec,
+ const MatchExpression* queryExpression,
+ const MatchExpressionParser::WhereCallback& whereCallback)
+ : _include(true),
+ _special(false),
+ _source(spec),
+ _includeID(true),
+ _skip(0),
+ _limit(-1),
+ _arrayOpType(ARRAY_OP_NORMAL),
+ _hasNonSimple(false),
+ _hasDottedField(false),
+ _queryExpression(queryExpression),
+ _hasReturnKey(false) {
+ // Are we including or excluding fields?
+ // -1 when we haven't initialized it.
+ // 1 when we're including
+ // 0 when we're excluding.
+ int include_exclude = -1;
+
+ BSONObjIterator it(_source);
+ while (it.more()) {
+ BSONElement e = it.next();
+
+ if (!e.isNumber() && !e.isBoolean()) {
+ _hasNonSimple = true;
+ }
- if (Object == e.type()) {
- BSONObj obj = e.embeddedObject();
- verify(1 == obj.nFields());
-
- BSONElement e2 = obj.firstElement();
- if (mongoutils::str::equals(e2.fieldName(), "$slice")) {
- if (e2.isNumber()) {
- int i = e2.numberInt();
- if (i < 0) {
- add(e.fieldName(), i, -i); // limit is now positive
- }
- else {
- add(e.fieldName(), 0, i);
- }
+ if (Object == e.type()) {
+ BSONObj obj = e.embeddedObject();
+ verify(1 == obj.nFields());
+
+ BSONElement e2 = obj.firstElement();
+ if (mongoutils::str::equals(e2.fieldName(), "$slice")) {
+ if (e2.isNumber()) {
+ int i = e2.numberInt();
+ if (i < 0) {
+ add(e.fieldName(), i, -i); // limit is now positive
+ } else {
+ add(e.fieldName(), 0, i);
}
- else {
- verify(e2.type() == Array);
- BSONObj arr = e2.embeddedObject();
- verify(2 == arr.nFields());
+ } else {
+ verify(e2.type() == Array);
+ BSONObj arr = e2.embeddedObject();
+ verify(2 == arr.nFields());
- BSONObjIterator it(arr);
- int skip = it.next().numberInt();
- int limit = it.next().numberInt();
+ BSONObjIterator it(arr);
+ int skip = it.next().numberInt();
+ int limit = it.next().numberInt();
- verify(limit > 0);
+ verify(limit > 0);
- add(e.fieldName(), skip, limit);
- }
- }
- else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) {
- _arrayOpType = ARRAY_OP_ELEM_MATCH;
-
- // Create a MatchExpression for the elemMatch.
- BSONObj elemMatchObj = e.wrap();
- verify(elemMatchObj.isOwned());
- _elemMatchObjs.push_back(elemMatchObj);
- StatusWithMatchExpression swme = MatchExpressionParser::parse(elemMatchObj,
- whereCallback);
- verify(swme.isOK());
- // And store it in _matchers.
- _matchers[mongoutils::str::before(e.fieldName(), '.').c_str()]
- = swme.getValue();
-
- add(e.fieldName(), true);
- }
- else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
- verify(String == e2.type());
- if (e2.valuestr() == LiteParsedQuery::metaTextScore) {
- _meta[e.fieldName()] = META_TEXT_SCORE;
- }
- else if (e2.valuestr() == LiteParsedQuery::metaRecordId) {
- _meta[e.fieldName()] = META_RECORDID;
- }
- else if (e2.valuestr() == LiteParsedQuery::metaGeoNearPoint) {
- _meta[e.fieldName()] = META_GEONEAR_POINT;
- }
- else if (e2.valuestr() == LiteParsedQuery::metaGeoNearDistance) {
- _meta[e.fieldName()] = META_GEONEAR_DIST;
- }
- else if (e2.valuestr() == LiteParsedQuery::metaIndexKey) {
- _hasReturnKey = true;
- // The index key clobbers everything so just stop parsing here.
- return;
- }
- else {
- // This shouldn't happen, should be caught by parsing.
- verify(0);
- }
+ add(e.fieldName(), skip, limit);
}
- else {
+ } else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) {
+ _arrayOpType = ARRAY_OP_ELEM_MATCH;
+
+ // Create a MatchExpression for the elemMatch.
+ BSONObj elemMatchObj = e.wrap();
+ verify(elemMatchObj.isOwned());
+ _elemMatchObjs.push_back(elemMatchObj);
+ StatusWithMatchExpression swme =
+ MatchExpressionParser::parse(elemMatchObj, whereCallback);
+ verify(swme.isOK());
+ // And store it in _matchers.
+ _matchers[mongoutils::str::before(e.fieldName(), '.').c_str()] = swme.getValue();
+
+ add(e.fieldName(), true);
+ } else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
+ verify(String == e2.type());
+ if (e2.valuestr() == LiteParsedQuery::metaTextScore) {
+ _meta[e.fieldName()] = META_TEXT_SCORE;
+ } else if (e2.valuestr() == LiteParsedQuery::metaRecordId) {
+ _meta[e.fieldName()] = META_RECORDID;
+ } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearPoint) {
+ _meta[e.fieldName()] = META_GEONEAR_POINT;
+ } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearDistance) {
+ _meta[e.fieldName()] = META_GEONEAR_DIST;
+ } else if (e2.valuestr() == LiteParsedQuery::metaIndexKey) {
+ _hasReturnKey = true;
+ // The index key clobbers everything so just stop parsing here.
+ return;
+ } else {
+ // This shouldn't happen, should be caught by parsing.
verify(0);
}
+ } else {
+ verify(0);
}
- else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) {
- _includeID = false;
+ } else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) {
+ _includeID = false;
+ } else {
+ add(e.fieldName(), e.trueValue());
+
+ // Projections of dotted fields aren't covered.
+ if (mongoutils::str::contains(e.fieldName(), '.')) {
+ _hasDottedField = true;
}
- else {
- add(e.fieldName(), e.trueValue());
-
- // Projections of dotted fields aren't covered.
- if (mongoutils::str::contains(e.fieldName(), '.')) {
- _hasDottedField = true;
- }
- // Validate input.
- if (include_exclude == -1) {
- // If we haven't specified an include/exclude, initialize include_exclude.
- // We expect further include/excludes to match it.
- include_exclude = e.trueValue();
- _include = !e.trueValue();
- }
- }
-
- if (mongoutils::str::contains(e.fieldName(), ".$")) {
- _arrayOpType = ARRAY_OP_POSITIONAL;
+ // Validate input.
+ if (include_exclude == -1) {
+ // If we haven't specified an include/exclude, initialize include_exclude.
+ // We expect further include/excludes to match it.
+ include_exclude = e.trueValue();
+ _include = !e.trueValue();
}
}
- }
- ProjectionExec::~ProjectionExec() {
- for (FieldMap::const_iterator it = _fields.begin(); it != _fields.end(); ++it) {
- delete it->second;
+ if (mongoutils::str::contains(e.fieldName(), ".$")) {
+ _arrayOpType = ARRAY_OP_POSITIONAL;
}
+ }
+}
- for (Matchers::const_iterator it = _matchers.begin(); it != _matchers.end(); ++it) {
- delete it->second;
- }
+ProjectionExec::~ProjectionExec() {
+ for (FieldMap::const_iterator it = _fields.begin(); it != _fields.end(); ++it) {
+ delete it->second;
}
- void ProjectionExec::add(const string& field, bool include) {
- if (field.empty()) { // this is the field the user referred to
- _include = include;
- }
- else {
- _include = !include;
+ for (Matchers::const_iterator it = _matchers.begin(); it != _matchers.end(); ++it) {
+ delete it->second;
+ }
+}
- const size_t dot = field.find('.');
- const string subfield = field.substr(0,dot);
- const string rest = (dot == string::npos ? "" : field.substr(dot + 1, string::npos));
+void ProjectionExec::add(const string& field, bool include) {
+ if (field.empty()) { // this is the field the user referred to
+ _include = include;
+ } else {
+ _include = !include;
- ProjectionExec*& fm = _fields[subfield.c_str()];
+ const size_t dot = field.find('.');
+ const string subfield = field.substr(0, dot);
+ const string rest = (dot == string::npos ? "" : field.substr(dot + 1, string::npos));
- if (NULL == fm) {
- fm = new ProjectionExec();
- }
+ ProjectionExec*& fm = _fields[subfield.c_str()];
- fm->add(rest, include);
+ if (NULL == fm) {
+ fm = new ProjectionExec();
}
- }
- void ProjectionExec::add(const string& field, int skip, int limit) {
- _special = true; // can't include or exclude whole object
+ fm->add(rest, include);
+ }
+}
- if (field.empty()) { // this is the field the user referred to
- _skip = skip;
- _limit = limit;
- }
- else {
- const size_t dot = field.find('.');
- const string subfield = field.substr(0,dot);
- const string rest = (dot == string::npos ? "" : field.substr(dot + 1, string::npos));
+void ProjectionExec::add(const string& field, int skip, int limit) {
+ _special = true; // can't include or exclude whole object
- ProjectionExec*& fm = _fields[subfield.c_str()];
+ if (field.empty()) { // this is the field the user referred to
+ _skip = skip;
+ _limit = limit;
+ } else {
+ const size_t dot = field.find('.');
+ const string subfield = field.substr(0, dot);
+ const string rest = (dot == string::npos ? "" : field.substr(dot + 1, string::npos));
- if (NULL == fm) {
- fm = new ProjectionExec();
- }
+ ProjectionExec*& fm = _fields[subfield.c_str()];
- fm->add(rest, skip, limit);
+ if (NULL == fm) {
+ fm = new ProjectionExec();
}
- }
- //
- // Execution
- //
+ fm->add(rest, skip, limit);
+ }
+}
- Status ProjectionExec::transform(WorkingSetMember* member) const {
- if (_hasReturnKey) {
- BSONObj keyObj;
+//
+// Execution
+//
- if (member->hasComputed(WSM_INDEX_KEY)) {
- const IndexKeyComputedData* key
- = static_cast<const IndexKeyComputedData*>(member->getComputed(WSM_INDEX_KEY));
- keyObj = key->getKey();
- }
+Status ProjectionExec::transform(WorkingSetMember* member) const {
+ if (_hasReturnKey) {
+ BSONObj keyObj;
- member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = Snapshotted<BSONObj>(SnapshotId(), keyObj);
- member->keyData.clear();
- member->loc = RecordId();
- return Status::OK();
+ if (member->hasComputed(WSM_INDEX_KEY)) {
+ const IndexKeyComputedData* key =
+ static_cast<const IndexKeyComputedData*>(member->getComputed(WSM_INDEX_KEY));
+ keyObj = key->getKey();
}
- BSONObjBuilder bob;
- if (member->hasObj()) {
- MatchDetails matchDetails;
+ member->state = WorkingSetMember::OWNED_OBJ;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), keyObj);
+ member->keyData.clear();
+ member->loc = RecordId();
+ return Status::OK();
+ }
- // If it's a positional projection we need a MatchDetails.
- if (transformRequiresDetails()) {
- matchDetails.requestElemMatchKey();
- verify(NULL != _queryExpression);
- verify(_queryExpression->matchesBSON(member->obj.value(), &matchDetails));
- }
+ BSONObjBuilder bob;
+ if (member->hasObj()) {
+ MatchDetails matchDetails;
- Status projStatus = transform(member->obj.value(), &bob, &matchDetails);
- if (!projStatus.isOK()) {
- return projStatus;
- }
+ // If it's a positional projection we need a MatchDetails.
+ if (transformRequiresDetails()) {
+ matchDetails.requestElemMatchKey();
+ verify(NULL != _queryExpression);
+ verify(_queryExpression->matchesBSON(member->obj.value(), &matchDetails));
}
- else {
- verify(!requiresDocument());
- // Go field by field.
- if (_includeID) {
- BSONElement elt;
- // Sometimes the _id field doesn't exist...
- if (member->getFieldDotted("_id", &elt) && !elt.eoo()) {
- bob.appendAs(elt, "_id");
- }
+
+ Status projStatus = transform(member->obj.value(), &bob, &matchDetails);
+ if (!projStatus.isOK()) {
+ return projStatus;
+ }
+ } else {
+ verify(!requiresDocument());
+ // Go field by field.
+ if (_includeID) {
+ BSONElement elt;
+ // Sometimes the _id field doesn't exist...
+ if (member->getFieldDotted("_id", &elt) && !elt.eoo()) {
+ bob.appendAs(elt, "_id");
}
+ }
- BSONObjIterator it(_source);
- while (it.more()) {
- BSONElement specElt = it.next();
- if (mongoutils::str::equals("_id", specElt.fieldName())) {
- continue;
- }
+ BSONObjIterator it(_source);
+ while (it.more()) {
+ BSONElement specElt = it.next();
+ if (mongoutils::str::equals("_id", specElt.fieldName())) {
+ continue;
+ }
- BSONElement keyElt;
- // We can project a field that doesn't exist. We just ignore it.
- if (member->getFieldDotted(specElt.fieldName(), &keyElt) && !keyElt.eoo()) {
- bob.appendAs(keyElt, specElt.fieldName());
- }
+ BSONElement keyElt;
+ // We can project a field that doesn't exist. We just ignore it.
+ if (member->getFieldDotted(specElt.fieldName(), &keyElt) && !keyElt.eoo()) {
+ bob.appendAs(keyElt, specElt.fieldName());
}
}
+ }
- for (MetaMap::const_iterator it = _meta.begin(); it != _meta.end(); ++it) {
- if (META_GEONEAR_DIST == it->second) {
- if (member->hasComputed(WSM_COMPUTED_GEO_DISTANCE)) {
- const GeoDistanceComputedData* dist
- = static_cast<const GeoDistanceComputedData*>(
- member->getComputed(WSM_COMPUTED_GEO_DISTANCE));
- bob.append(it->first, dist->getDist());
- }
- else {
- return Status(ErrorCodes::InternalError,
- "near loc dist requested but no data available");
- }
+ for (MetaMap::const_iterator it = _meta.begin(); it != _meta.end(); ++it) {
+ if (META_GEONEAR_DIST == it->second) {
+ if (member->hasComputed(WSM_COMPUTED_GEO_DISTANCE)) {
+ const GeoDistanceComputedData* dist = static_cast<const GeoDistanceComputedData*>(
+ member->getComputed(WSM_COMPUTED_GEO_DISTANCE));
+ bob.append(it->first, dist->getDist());
+ } else {
+ return Status(ErrorCodes::InternalError,
+ "near loc dist requested but no data available");
}
- else if (META_GEONEAR_POINT == it->second) {
- if (member->hasComputed(WSM_GEO_NEAR_POINT)) {
- const GeoNearPointComputedData* point
- = static_cast<const GeoNearPointComputedData*>(
- member->getComputed(WSM_GEO_NEAR_POINT));
- BSONObj ptObj = point->getPoint();
- if (ptObj.couldBeArray()) {
- bob.appendArray(it->first, ptObj);
- }
- else {
- bob.append(it->first, ptObj);
- }
- }
- else {
- return Status(ErrorCodes::InternalError,
- "near loc proj requested but no data available");
- }
- }
- else if (META_TEXT_SCORE == it->second) {
- if (member->hasComputed(WSM_COMPUTED_TEXT_SCORE)) {
- const TextScoreComputedData* score
- = static_cast<const TextScoreComputedData*>(
- member->getComputed(WSM_COMPUTED_TEXT_SCORE));
- bob.append(it->first, score->getScore());
- }
- else {
- bob.append(it->first, 0.0);
+ } else if (META_GEONEAR_POINT == it->second) {
+ if (member->hasComputed(WSM_GEO_NEAR_POINT)) {
+ const GeoNearPointComputedData* point =
+ static_cast<const GeoNearPointComputedData*>(
+ member->getComputed(WSM_GEO_NEAR_POINT));
+ BSONObj ptObj = point->getPoint();
+ if (ptObj.couldBeArray()) {
+ bob.appendArray(it->first, ptObj);
+ } else {
+ bob.append(it->first, ptObj);
}
+ } else {
+ return Status(ErrorCodes::InternalError,
+ "near loc proj requested but no data available");
}
- else if (META_RECORDID == it->second) {
- bob.append(it->first, static_cast<long long>(member->loc.repr()));
+ } else if (META_TEXT_SCORE == it->second) {
+ if (member->hasComputed(WSM_COMPUTED_TEXT_SCORE)) {
+ const TextScoreComputedData* score = static_cast<const TextScoreComputedData*>(
+ member->getComputed(WSM_COMPUTED_TEXT_SCORE));
+ bob.append(it->first, score->getScore());
+ } else {
+ bob.append(it->first, 0.0);
}
+ } else if (META_RECORDID == it->second) {
+ bob.append(it->first, static_cast<long long>(member->loc.repr()));
}
-
- BSONObj newObj = bob.obj();
- member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = Snapshotted<BSONObj>(SnapshotId(), newObj);
- member->keyData.clear();
- member->loc = RecordId();
-
- return Status::OK();
}
- Status ProjectionExec::transform(const BSONObj& in, BSONObj* out) const {
- // If it's a positional projection we need a MatchDetails.
- MatchDetails matchDetails;
- if (transformRequiresDetails()) {
- matchDetails.requestElemMatchKey();
- verify(NULL != _queryExpression);
- verify(_queryExpression->matchesBSON(in, &matchDetails));
- }
-
- BSONObjBuilder bob;
- Status s = transform(in, &bob, &matchDetails);
- if (!s.isOK()) {
- return s;
- }
- *out = bob.obj();
- return Status::OK();
+ BSONObj newObj = bob.obj();
+ member->state = WorkingSetMember::OWNED_OBJ;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), newObj);
+ member->keyData.clear();
+ member->loc = RecordId();
+
+ return Status::OK();
+}
+
+Status ProjectionExec::transform(const BSONObj& in, BSONObj* out) const {
+ // If it's a positional projection we need a MatchDetails.
+ MatchDetails matchDetails;
+ if (transformRequiresDetails()) {
+ matchDetails.requestElemMatchKey();
+ verify(NULL != _queryExpression);
+ verify(_queryExpression->matchesBSON(in, &matchDetails));
}
- Status ProjectionExec::transform(const BSONObj& in,
- BSONObjBuilder* bob,
- const MatchDetails* details) const {
+ BSONObjBuilder bob;
+ Status s = transform(in, &bob, &matchDetails);
+ if (!s.isOK()) {
+ return s;
+ }
+ *out = bob.obj();
+ return Status::OK();
+}
- const ArrayOpType& arrayOpType = _arrayOpType;
+Status ProjectionExec::transform(const BSONObj& in,
+ BSONObjBuilder* bob,
+ const MatchDetails* details) const {
+ const ArrayOpType& arrayOpType = _arrayOpType;
- BSONObjIterator it(in);
- while (it.more()) {
- BSONElement elt = it.next();
+ BSONObjIterator it(in);
+ while (it.more()) {
+ BSONElement elt = it.next();
- // Case 1: _id
- if (mongoutils::str::equals("_id", elt.fieldName())) {
- if (_includeID) {
- bob->append(elt);
- }
- continue;
+ // Case 1: _id
+ if (mongoutils::str::equals("_id", elt.fieldName())) {
+ if (_includeID) {
+ bob->append(elt);
}
+ continue;
+ }
- // Case 2: no array projection for this field.
- Matchers::const_iterator matcher = _matchers.find(elt.fieldName());
- if (_matchers.end() == matcher) {
- Status s = append(bob, elt, details, arrayOpType);
- if (!s.isOK()) {
- return s;
- }
- continue;
+ // Case 2: no array projection for this field.
+ Matchers::const_iterator matcher = _matchers.find(elt.fieldName());
+ if (_matchers.end() == matcher) {
+ Status s = append(bob, elt, details, arrayOpType);
+ if (!s.isOK()) {
+ return s;
}
+ continue;
+ }
- // Case 3: field has array projection with $elemMatch specified.
- if (ARRAY_OP_ELEM_MATCH != arrayOpType) {
- return Status(ErrorCodes::BadValue,
- "Matchers are only supported for $elemMatch");
- }
+ // Case 3: field has array projection with $elemMatch specified.
+ if (ARRAY_OP_ELEM_MATCH != arrayOpType) {
+ return Status(ErrorCodes::BadValue, "Matchers are only supported for $elemMatch");
+ }
- MatchDetails arrayDetails;
- arrayDetails.requestElemMatchKey();
+ MatchDetails arrayDetails;
+ arrayDetails.requestElemMatchKey();
- if (matcher->second->matchesBSON(in, &arrayDetails)) {
- FieldMap::const_iterator fieldIt = _fields.find(elt.fieldName());
- if (_fields.end() == fieldIt) {
- return Status(ErrorCodes::BadValue,
- "$elemMatch specified, but projection field not found.");
- }
+ if (matcher->second->matchesBSON(in, &arrayDetails)) {
+ FieldMap::const_iterator fieldIt = _fields.find(elt.fieldName());
+ if (_fields.end() == fieldIt) {
+ return Status(ErrorCodes::BadValue,
+ "$elemMatch specified, but projection field not found.");
+ }
- BSONArrayBuilder arrBuilder;
- BSONObjBuilder subBob;
+ BSONArrayBuilder arrBuilder;
+ BSONObjBuilder subBob;
- if (in.getField(elt.fieldName()).eoo()) {
- return Status(ErrorCodes::InternalError,
- "$elemMatch called on document element with eoo");
- }
+ if (in.getField(elt.fieldName()).eoo()) {
+ return Status(ErrorCodes::InternalError,
+ "$elemMatch called on document element with eoo");
+ }
- if (in.getField(elt.fieldName()).Obj().getField(arrayDetails.elemMatchKey()).eoo()) {
- return Status(ErrorCodes::InternalError,
- "$elemMatch called on array element with eoo");
- }
+ if (in.getField(elt.fieldName()).Obj().getField(arrayDetails.elemMatchKey()).eoo()) {
+ return Status(ErrorCodes::InternalError,
+ "$elemMatch called on array element with eoo");
+ }
- arrBuilder.append(
- in.getField(elt.fieldName()).Obj().getField(arrayDetails.elemMatchKey()));
- subBob.appendArray(matcher->first, arrBuilder.arr());
- Status status = append(bob, subBob.done().firstElement(), details, arrayOpType);
- if (!status.isOK()) {
- return status;
- }
+ arrBuilder.append(
+ in.getField(elt.fieldName()).Obj().getField(arrayDetails.elemMatchKey()));
+ subBob.appendArray(matcher->first, arrBuilder.arr());
+ Status status = append(bob, subBob.done().firstElement(), details, arrayOpType);
+ if (!status.isOK()) {
+ return status;
}
}
-
- return Status::OK();
}
- void ProjectionExec::appendArray(BSONObjBuilder* bob, const BSONObj& array, bool nested) const {
- int skip = nested ? 0 : _skip;
- int limit = nested ? -1 : _limit;
+ return Status::OK();
+}
- if (skip < 0) {
- skip = max(0, skip + array.nFields());
- }
+void ProjectionExec::appendArray(BSONObjBuilder* bob, const BSONObj& array, bool nested) const {
+ int skip = nested ? 0 : _skip;
+ int limit = nested ? -1 : _limit;
- int index = 0;
- BSONObjIterator it(array);
- while (it.more()) {
- BSONElement elt = it.next();
+ if (skip < 0) {
+ skip = max(0, skip + array.nFields());
+ }
- if (skip) {
- skip--;
- continue;
- }
+ int index = 0;
+ BSONObjIterator it(array);
+ while (it.more()) {
+ BSONElement elt = it.next();
- if (limit != -1 && (limit-- == 0)) {
- break;
- }
+ if (skip) {
+ skip--;
+ continue;
+ }
- switch(elt.type()) {
+ if (limit != -1 && (limit-- == 0)) {
+ break;
+ }
+
+ switch (elt.type()) {
case Array: {
BSONObjBuilder subBob;
appendArray(&subBob, elt.embeddedObject(), true);
@@ -482,76 +454,70 @@ namespace mongo {
if (_include) {
bob->appendAs(elt, bob->numStr(index++));
}
- }
}
}
+}
+
+Status ProjectionExec::append(BSONObjBuilder* bob,
+ const BSONElement& elt,
+ const MatchDetails* details,
+ const ArrayOpType arrayOpType) const {
+ // Skip if the field name matches a computed $meta field.
+ // $meta projection fields can exist at the top level of
+ // the result document and the field names cannot be dotted.
+ if (_meta.find(elt.fieldName()) != _meta.end()) {
+ return Status::OK();
+ }
- Status ProjectionExec::append(BSONObjBuilder* bob,
- const BSONElement& elt,
- const MatchDetails* details,
- const ArrayOpType arrayOpType) const {
-
-
- // Skip if the field name matches a computed $meta field.
- // $meta projection fields can exist at the top level of
- // the result document and the field names cannot be dotted.
- if (_meta.find(elt.fieldName()) != _meta.end()) {
- return Status::OK();
+ FieldMap::const_iterator field = _fields.find(elt.fieldName());
+ if (field == _fields.end()) {
+ if (_include) {
+ bob->append(elt);
}
+ return Status::OK();
+ }
- FieldMap::const_iterator field = _fields.find(elt.fieldName());
- if (field == _fields.end()) {
- if (_include) {
- bob->append(elt);
- }
- return Status::OK();
+ ProjectionExec& subfm = *field->second;
+ if ((subfm._fields.empty() && !subfm._special) ||
+ !(elt.type() == Object || elt.type() == Array)) {
+ // field map empty, or element is not an array/object
+ if (subfm._include) {
+ bob->append(elt);
}
-
- ProjectionExec& subfm = *field->second;
- if ((subfm._fields.empty() && !subfm._special)
- || !(elt.type() == Object || elt.type() == Array)) {
- // field map empty, or element is not an array/object
- if (subfm._include) {
- bob->append(elt);
- }
+ } else if (elt.type() == Object) {
+ BSONObjBuilder subBob;
+ BSONObjIterator it(elt.embeddedObject());
+ while (it.more()) {
+ subfm.append(&subBob, it.next(), details, arrayOpType);
}
- else if (elt.type() == Object) {
- BSONObjBuilder subBob;
- BSONObjIterator it(elt.embeddedObject());
- while (it.more()) {
- subfm.append(&subBob, it.next(), details, arrayOpType);
+ bob->append(elt.fieldName(), subBob.obj());
+ } else {
+ // Array
+ BSONObjBuilder matchedBuilder;
+ if (details && arrayOpType == ARRAY_OP_POSITIONAL) {
+ // $ positional operator specified
+ if (!details->hasElemMatchKey()) {
+ mongoutils::str::stream error;
+ error << "positional operator (" << elt.fieldName()
+ << ".$) requires corresponding field"
+ << " in query specifier";
+ return Status(ErrorCodes::BadValue, error);
}
- bob->append(elt.fieldName(), subBob.obj());
- }
- else {
- // Array
- BSONObjBuilder matchedBuilder;
- if (details && arrayOpType == ARRAY_OP_POSITIONAL) {
- // $ positional operator specified
- if (!details->hasElemMatchKey()) {
- mongoutils::str::stream error;
- error << "positional operator (" << elt.fieldName()
- << ".$) requires corresponding field"
- << " in query specifier";
- return Status(ErrorCodes::BadValue, error);
- }
-
- if (elt.embeddedObject()[details->elemMatchKey()].eoo()) {
- return Status(ErrorCodes::BadValue,
- "positional operator element mismatch");
- }
- // append as the first and only element in the projected array
- matchedBuilder.appendAs( elt.embeddedObject()[details->elemMatchKey()], "0" );
- }
- else {
- // append exact array; no subarray matcher specified
- subfm.appendArray(&matchedBuilder, elt.embeddedObject());
+ if (elt.embeddedObject()[details->elemMatchKey()].eoo()) {
+ return Status(ErrorCodes::BadValue, "positional operator element mismatch");
}
- bob->appendArray(elt.fieldName(), matchedBuilder.obj());
- }
- return Status::OK();
+ // append as the first and only element in the projected array
+ matchedBuilder.appendAs(elt.embeddedObject()[details->elemMatchKey()], "0");
+ } else {
+ // append exact array; no subarray matcher specified
+ subfm.appendArray(&matchedBuilder, elt.embeddedObject());
+ }
+ bob->appendArray(elt.fieldName(), matchedBuilder.obj());
}
+ return Status::OK();
+}
+
} // namespace mongo
diff --git a/src/mongo/db/exec/projection_exec.h b/src/mongo/db/exec/projection_exec.h
index 6b8dd1456af..4a1382b4318 100644
--- a/src/mongo/db/exec/projection_exec.h
+++ b/src/mongo/db/exec/projection_exec.h
@@ -36,166 +36,162 @@
namespace mongo {
- class ProjectionExec {
- public:
- /**
- * A .find() projection can have an array operation, either an elemMatch or positional (or
- * neither).
- */
- enum ArrayOpType {
- ARRAY_OP_NORMAL = 0,
- ARRAY_OP_ELEM_MATCH,
- ARRAY_OP_POSITIONAL
- };
-
- /**
- * Projections based on data computed while answering a query, or other metadata about a
- * document / query.
- */
- enum MetaProjection {
- META_TEXT_SCORE,
- META_GEONEAR_DIST,
- META_GEONEAR_POINT,
- META_RECORDID,
- META_IX_KEY,
- };
-
- /**
- * TODO: document why we like StringMap so much here
- */
- typedef StringMap<ProjectionExec*> FieldMap;
- typedef StringMap<MatchExpression*> Matchers;
- typedef StringMap<MetaProjection> MetaMap;
-
- ProjectionExec(const BSONObj& spec,
- const MatchExpression* queryExpression,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
- ~ProjectionExec();
-
- /**
- * Apply this projection to the 'member'. Changes the type to OWNED_OBJ.
- */
- Status transform(WorkingSetMember* member) const;
-
- /**
- * Apply this projection to the object 'in'.
- *
- * Upon success, 'out' is set to the new object and Status::OK() is returned.
- * Otherwise, returns an error Status and *out is not mutated.
- */
- Status transform(const BSONObj& in, BSONObj* out) const;
-
- private:
- //
- // Initialization
- //
-
- ProjectionExec();
-
- /**
- * Add 'field' as a field name that is included or excluded as part of the projection.
- */
- void add(const std::string& field, bool include);
-
- /**
- * Add 'field' as a field name that is sliced as part of the projection.
- */
- void add(const std::string& field, int skip, int limit);
-
- //
- // Execution
- //
-
- /**
- * Apply the projection that 'this' represents to the object 'in'. 'details' is the result
- * of a match evaluation of the full query on the object 'in'. This is only required
- * if the projection is positional.
- *
- * If the projection is successfully computed, returns Status::OK() and stuff the result in
- * 'bob'.
- * Otherwise, returns error.
- */
- Status transform(const BSONObj& in,
- BSONObjBuilder* bob,
- const MatchDetails* details = NULL) const;
-
- /**
- * See transform(...) above.
- */
- bool transformRequiresDetails() const {
- return ARRAY_OP_POSITIONAL == _arrayOpType;
- }
-
- /**
- * Is the full document required to compute this projection?
- */
- bool requiresDocument() const {
- return _include || _hasNonSimple || _hasDottedField;
- }
-
- /**
- * Appends the element 'e' to the builder 'bob', possibly descending into sub-fields of 'e'
- * if needed.
- */
- Status append(BSONObjBuilder* bob,
- const BSONElement& elt,
- const MatchDetails* details = NULL,
- const ArrayOpType arrayOpType = ARRAY_OP_NORMAL) const;
-
- /**
- * Like append, but for arrays.
- * Deals with slice and calls appendArray to preserve the array-ness.
- */
- void appendArray(BSONObjBuilder* bob, const BSONObj& array, bool nested = false) const;
-
- // True if default at this level is to include.
- bool _include;
-
- // True if this level can't be skipped or included without recursing.
- bool _special;
-
- // We must group projections with common prefixes together.
- // TODO: benchmark std::vector<pair> vs map
- //
- // Projection is a rooted tree. If we have {a.b: 1, a.c: 1} we don't want to
- // double-traverse the document when we're projecting it. Instead, we have an entry in
- // _fields for 'a' with two sub projections: b:1 and c:1.
- FieldMap _fields;
-
- // The raw projection spec. that is passed into init(...)
- BSONObj _source;
-
- // Should we include the _id field?
- bool _includeID;
-
- // Arguments from the $slice operator.
- int _skip;
- int _limit;
-
- // Used for $elemMatch and positional operator ($)
- Matchers _matchers;
-
- // The matchers above point into BSONObjs and this is where those objs live.
- std::vector<BSONObj> _elemMatchObjs;
-
- ArrayOpType _arrayOpType;
-
- // Is there an slice, elemMatch or meta operator?
- bool _hasNonSimple;
-
- // Is there a projection over a dotted field or a $ positional operator?
- bool _hasDottedField;
-
- // The full query expression. Used when we need MatchDetails.
- const MatchExpression* _queryExpression;
-
- // Projections that aren't sourced from the document or index keys.
- MetaMap _meta;
-
- // Do we have a returnKey projection? If so we *only* output the index key metadata. If
- // it's not found we output nothing.
- bool _hasReturnKey;
+class ProjectionExec {
+public:
+ /**
+ * A .find() projection can have an array operation, either an elemMatch or positional (or
+ * neither).
+ */
+ enum ArrayOpType { ARRAY_OP_NORMAL = 0, ARRAY_OP_ELEM_MATCH, ARRAY_OP_POSITIONAL };
+
+ /**
+ * Projections based on data computed while answering a query, or other metadata about a
+ * document / query.
+ */
+ enum MetaProjection {
+ META_TEXT_SCORE,
+ META_GEONEAR_DIST,
+ META_GEONEAR_POINT,
+ META_RECORDID,
+ META_IX_KEY,
};
+ /**
+ * TODO: document why we like StringMap so much here
+ */
+ typedef StringMap<ProjectionExec*> FieldMap;
+ typedef StringMap<MatchExpression*> Matchers;
+ typedef StringMap<MetaProjection> MetaMap;
+
+ ProjectionExec(const BSONObj& spec,
+ const MatchExpression* queryExpression,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+ ~ProjectionExec();
+
+ /**
+ * Apply this projection to the 'member'. Changes the type to OWNED_OBJ.
+ */
+ Status transform(WorkingSetMember* member) const;
+
+ /**
+ * Apply this projection to the object 'in'.
+ *
+ * Upon success, 'out' is set to the new object and Status::OK() is returned.
+ * Otherwise, returns an error Status and *out is not mutated.
+ */
+ Status transform(const BSONObj& in, BSONObj* out) const;
+
+private:
+ //
+ // Initialization
+ //
+
+ ProjectionExec();
+
+ /**
+ * Add 'field' as a field name that is included or excluded as part of the projection.
+ */
+ void add(const std::string& field, bool include);
+
+ /**
+ * Add 'field' as a field name that is sliced as part of the projection.
+ */
+ void add(const std::string& field, int skip, int limit);
+
+ //
+ // Execution
+ //
+
+ /**
+ * Apply the projection that 'this' represents to the object 'in'. 'details' is the result
+ * of a match evaluation of the full query on the object 'in'. This is only required
+ * if the projection is positional.
+ *
+ * If the projection is successfully computed, returns Status::OK() and stuff the result in
+ * 'bob'.
+ * Otherwise, returns error.
+ */
+ Status transform(const BSONObj& in,
+ BSONObjBuilder* bob,
+ const MatchDetails* details = NULL) const;
+
+ /**
+ * See transform(...) above.
+ */
+ bool transformRequiresDetails() const {
+ return ARRAY_OP_POSITIONAL == _arrayOpType;
+ }
+
+ /**
+ * Is the full document required to compute this projection?
+ */
+ bool requiresDocument() const {
+ return _include || _hasNonSimple || _hasDottedField;
+ }
+
+ /**
+ * Appends the element 'e' to the builder 'bob', possibly descending into sub-fields of 'e'
+ * if needed.
+ */
+ Status append(BSONObjBuilder* bob,
+ const BSONElement& elt,
+ const MatchDetails* details = NULL,
+ const ArrayOpType arrayOpType = ARRAY_OP_NORMAL) const;
+
+ /**
+ * Like append, but for arrays.
+ * Deals with slice and calls appendArray to preserve the array-ness.
+ */
+ void appendArray(BSONObjBuilder* bob, const BSONObj& array, bool nested = false) const;
+
+ // True if default at this level is to include.
+ bool _include;
+
+ // True if this level can't be skipped or included without recursing.
+ bool _special;
+
+ // We must group projections with common prefixes together.
+ // TODO: benchmark std::vector<pair> vs map
+ //
+ // Projection is a rooted tree. If we have {a.b: 1, a.c: 1} we don't want to
+ // double-traverse the document when we're projecting it. Instead, we have an entry in
+ // _fields for 'a' with two sub projections: b:1 and c:1.
+ FieldMap _fields;
+
+ // The raw projection spec. that is passed into init(...)
+ BSONObj _source;
+
+ // Should we include the _id field?
+ bool _includeID;
+
+ // Arguments from the $slice operator.
+ int _skip;
+ int _limit;
+
+ // Used for $elemMatch and positional operator ($)
+ Matchers _matchers;
+
+ // The matchers above point into BSONObjs and this is where those objs live.
+ std::vector<BSONObj> _elemMatchObjs;
+
+ ArrayOpType _arrayOpType;
+
+ // Is there an slice, elemMatch or meta operator?
+ bool _hasNonSimple;
+
+ // Is there a projection over a dotted field or a $ positional operator?
+ bool _hasDottedField;
+
+ // The full query expression. Used when we need MatchDetails.
+ const MatchExpression* _queryExpression;
+
+ // Projections that aren't sourced from the document or index keys.
+ MetaMap _meta;
+
+ // Do we have a returnKey projection? If so we *only* output the index key metadata. If
+ // it's not found we output nothing.
+ bool _hasReturnKey;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/exec/projection_exec_test.cpp b/src/mongo/db/exec/projection_exec_test.cpp
index 56d9cbc5be2..fca1065cba0 100644
--- a/src/mongo/db/exec/projection_exec_test.cpp
+++ b/src/mongo/db/exec/projection_exec_test.cpp
@@ -42,178 +42,187 @@ using namespace mongo;
namespace {
- using std::unique_ptr;
-
- /**
- * Utility function to create MatchExpression
- */
- MatchExpression* parseMatchExpression(const BSONObj& obj) {
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
- ASSERT_TRUE(status.isOK());
- MatchExpression* expr(status.getValue());
- return expr;
- }
-
- //
- // transform tests
- //
-
- /**
- * test function to verify results of transform()
- * on a working set member.
- *
- * specStr - projection specification
- * queryStr - query
- * objStr - object to run projection on
- * data - computed data. Owned by working set member created in this function if not null.
- * expectedStatusOK - expected status of transformation
- * expectedObjStr - expected object after successful projection.
- * Ignored if expectedStatusOK is false.
- */
-
- void testTransform(const char* specStr, const char* queryStr, const char* objStr,
- WorkingSetComputedData* data,
- bool expectedStatusOK, const char* expectedObjStr) {
- // Create projection exec object.
- BSONObj spec = fromjson(specStr);
- BSONObj query = fromjson(queryStr);
- unique_ptr<MatchExpression> queryExpression(parseMatchExpression(query));
- ProjectionExec exec(spec, queryExpression.get());
-
- // Create working set member.
- WorkingSetMember wsm;
- wsm.state = WorkingSetMember::OWNED_OBJ;
- wsm.obj = Snapshotted<BSONObj>(SnapshotId(), fromjson(objStr));
- if (data) {
- wsm.addComputed(data);
- }
-
- // Transform object
- Status status = exec.transform(&wsm);
-
- // There are fewer checks to perform if we are expected a failed status.
- if (!expectedStatusOK) {
- if (status.isOK()) {
- mongoutils::str::stream ss;
- ss << "expected transform() to fail but got success instead."
- << "\nprojection spec: " << specStr
- << "\nquery: " << queryStr
- << "\nobject before projection: " << objStr;
- FAIL(ss);
- }
- return;
- }
-
- // If we are expecting a successful transformation but got a failed status instead,
- // print out status message in assertion message.
- if (!status.isOK()) {
- mongoutils::str::stream ss;
- ss << "transform() test failed: unexpected failed status: " << status.toString()
- << "\nprojection spec: " << specStr
- << "\nquery: " << queryStr
- << "\nobject before projection: " << objStr
- << "\nexpected object after projection: " << expectedObjStr;
- FAIL(ss);
- }
-
- // Finally, we compare the projected object.
- const BSONObj& obj = wsm.obj.value();
- BSONObj expectedObj = fromjson(expectedObjStr);
- if (obj != expectedObj) {
- mongoutils::str::stream ss;
- ss << "transform() test failed: unexpected projected object."
- << "\nprojection spec: " << specStr
- << "\nquery: " << queryStr
- << "\nobject before projection: " << objStr
- << "\nexpected object after projection: " << expectedObjStr
- << "\nactual object after projection: " << obj.toString();
- FAIL(ss);
- }
- }
+using std::unique_ptr;
- /**
- * testTransform without computed data argument.
- */
- void testTransform(const char* specStr, const char* queryStr, const char* objStr,
- bool expectedStatusOK, const char* expectedObjStr) {
- testTransform(specStr, queryStr, objStr, NULL, expectedStatusOK, expectedObjStr);
- }
+/**
+ * Utility function to create MatchExpression
+ */
+MatchExpression* parseMatchExpression(const BSONObj& obj) {
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
+ ASSERT_TRUE(status.isOK());
+ MatchExpression* expr(status.getValue());
+ return expr;
+}
- //
- // position $
- //
+//
+// transform tests
+//
- TEST(ProjectionExecTest, TransformPositionalDollar) {
- // Valid position $ projections.
- testTransform("{'a.$': 1}", "{a: 10}", "{a: [10, 20, 30]}", true, "{a: [10]}");
- testTransform("{'a.$': 1}", "{a: 20}", "{a: [10, 20, 30]}", true, "{a: [20]}");
- testTransform("{'a.$': 1}", "{a: 30}", "{a: [10, 20, 30]}", true, "{a: [30]}");
- testTransform("{'a.$': 1}", "{a: {$gt: 4}}", "{a: [5]}", true, "{a: [5]}");
+/**
+ * test function to verify results of transform()
+ * on a working set member.
+ *
+ * specStr - projection specification
+ * queryStr - query
+ * objStr - object to run projection on
+ * data - computed data. Owned by working set member created in this function if not null.
+ * expectedStatusOK - expected status of transformation
+ * expectedObjStr - expected object after successful projection.
+ * Ignored if expectedStatusOK is false.
+ */
- // Invalid position $ projections.
- testTransform("{'a.$': 1}", "{a: {$size: 1}}", "{a: [5]}", false, "");
+void testTransform(const char* specStr,
+ const char* queryStr,
+ const char* objStr,
+ WorkingSetComputedData* data,
+ bool expectedStatusOK,
+ const char* expectedObjStr) {
+ // Create projection exec object.
+ BSONObj spec = fromjson(specStr);
+ BSONObj query = fromjson(queryStr);
+ unique_ptr<MatchExpression> queryExpression(parseMatchExpression(query));
+ ProjectionExec exec(spec, queryExpression.get());
+
+ // Create working set member.
+ WorkingSetMember wsm;
+ wsm.state = WorkingSetMember::OWNED_OBJ;
+ wsm.obj = Snapshotted<BSONObj>(SnapshotId(), fromjson(objStr));
+ if (data) {
+ wsm.addComputed(data);
}
- //
- // $elemMatch
- //
-
- TEST(ProjectionExecTest, TransformElemMatch) {
- const char* s = "{a: [{x: 1, y: 10}, {x: 1, y: 20}, {x: 2, y: 10}]}";
+ // Transform object
+ Status status = exec.transform(&wsm);
- // Valid $elemMatch projections.
- testTransform("{a: {$elemMatch: {x: 1}}}", "{}", s, true, "{a: [{x: 1, y: 10}]}");
- testTransform("{a: {$elemMatch: {x: 1, y: 20}}}", "{}", s, true, "{a: [{x: 1, y: 20}]}");
- testTransform("{a: {$elemMatch: {x: 2}}}", "{}", s, true, "{a: [{x: 2, y: 10}]}");
- testTransform("{a: {$elemMatch: {x: 3}}}", "{}", s, true, "{}");
-
- // $elemMatch on unknown field z
- testTransform("{a: {$elemMatch: {z: 1}}}", "{}", s, true, "{}");
+ // There are fewer checks to perform if we are expected a failed status.
+ if (!expectedStatusOK) {
+ if (status.isOK()) {
+ mongoutils::str::stream ss;
+ ss << "expected transform() to fail but got success instead."
+ << "\nprojection spec: " << specStr << "\nquery: " << queryStr
+ << "\nobject before projection: " << objStr;
+ FAIL(ss);
+ }
+ return;
}
- //
- // $slice
- //
-
- TEST(ProjectionExecTest, TransformSliceCount) {
- // Valid $slice projections using format {$slice: count}.
- testTransform("{a: {$slice: -10}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6, 8]}");
- testTransform("{a: {$slice: -3}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6, 8]}");
- testTransform("{a: {$slice: -1}}", "{}", "{a: [4, 6, 8]}", true, "{a: [8]}");
- testTransform("{a: {$slice: 0}}", "{}", "{a: [4, 6, 8]}", true, "{a: []}");
- testTransform("{a: {$slice: 1}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4]}");
- testTransform("{a: {$slice: 3}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6, 8]}");
- testTransform("{a: {$slice: 10}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6, 8]}");
+ // If we are expecting a successful transformation but got a failed status instead,
+ // print out status message in assertion message.
+ if (!status.isOK()) {
+ mongoutils::str::stream ss;
+ ss << "transform() test failed: unexpected failed status: " << status.toString()
+ << "\nprojection spec: " << specStr << "\nquery: " << queryStr
+ << "\nobject before projection: " << objStr
+ << "\nexpected object after projection: " << expectedObjStr;
+ FAIL(ss);
}
- TEST(ProjectionExecTest, TransformSliceSkipLimit) {
- // Valid $slice projections using format {$slice: [skip, limit]}.
- // Non-positive limits are rejected at the query parser and therefore not handled by
- // the projection execution stage. In fact, it will abort on an invalid limit.
- testTransform("{a: {$slice: [-10, 10]}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6, 8]}");
- testTransform("{a: {$slice: [-3, 5]}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6, 8]}");
- testTransform("{a: {$slice: [-1, 1]}}", "{}", "{a: [4, 6, 8]}", true, "{a: [8]}");
- testTransform("{a: {$slice: [0, 2]}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6]}");
- testTransform("{a: {$slice: [0, 1]}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4]}");
- testTransform("{a: {$slice: [1, 1]}}", "{}", "{a: [4, 6, 8]}", true, "{a: [6]}");
- testTransform("{a: {$slice: [3, 5]}}", "{}", "{a: [4, 6, 8]}", true, "{a: []}");
- testTransform("{a: {$slice: [10, 10]}}", "{}", "{a: [4, 6, 8]}", true, "{a: []}");
+ // Finally, we compare the projected object.
+ const BSONObj& obj = wsm.obj.value();
+ BSONObj expectedObj = fromjson(expectedObjStr);
+ if (obj != expectedObj) {
+ mongoutils::str::stream ss;
+ ss << "transform() test failed: unexpected projected object."
+ << "\nprojection spec: " << specStr << "\nquery: " << queryStr
+ << "\nobject before projection: " << objStr
+ << "\nexpected object after projection: " << expectedObjStr
+ << "\nactual object after projection: " << obj.toString();
+ FAIL(ss);
}
+}
- //
- // $meta
- // $meta projections add computed values to the projected object.
- //
-
- TEST(ProjectionExecTest, TransformMetaTextScore) {
- // Query {} is ignored.
- testTransform("{b: {$meta: 'textScore'}}", "{}", "{a: 'hello'}",
- new mongo::TextScoreComputedData(100),
- true, "{a: 'hello', b: 100}");
- // Projected meta field should overwrite existing field.
- testTransform("{b: {$meta: 'textScore'}}", "{}", "{a: 'hello', b: -1}",
- new mongo::TextScoreComputedData(100),
- true, "{a: 'hello', b: 100}");
- }
+/**
+ * testTransform without computed data argument.
+ */
+void testTransform(const char* specStr,
+ const char* queryStr,
+ const char* objStr,
+ bool expectedStatusOK,
+ const char* expectedObjStr) {
+ testTransform(specStr, queryStr, objStr, NULL, expectedStatusOK, expectedObjStr);
+}
+
+//
+// position $
+//
+
+TEST(ProjectionExecTest, TransformPositionalDollar) {
+ // Valid position $ projections.
+ testTransform("{'a.$': 1}", "{a: 10}", "{a: [10, 20, 30]}", true, "{a: [10]}");
+ testTransform("{'a.$': 1}", "{a: 20}", "{a: [10, 20, 30]}", true, "{a: [20]}");
+ testTransform("{'a.$': 1}", "{a: 30}", "{a: [10, 20, 30]}", true, "{a: [30]}");
+ testTransform("{'a.$': 1}", "{a: {$gt: 4}}", "{a: [5]}", true, "{a: [5]}");
+
+ // Invalid position $ projections.
+ testTransform("{'a.$': 1}", "{a: {$size: 1}}", "{a: [5]}", false, "");
+}
+
+//
+// $elemMatch
+//
+
+TEST(ProjectionExecTest, TransformElemMatch) {
+ const char* s = "{a: [{x: 1, y: 10}, {x: 1, y: 20}, {x: 2, y: 10}]}";
+
+ // Valid $elemMatch projections.
+ testTransform("{a: {$elemMatch: {x: 1}}}", "{}", s, true, "{a: [{x: 1, y: 10}]}");
+ testTransform("{a: {$elemMatch: {x: 1, y: 20}}}", "{}", s, true, "{a: [{x: 1, y: 20}]}");
+ testTransform("{a: {$elemMatch: {x: 2}}}", "{}", s, true, "{a: [{x: 2, y: 10}]}");
+ testTransform("{a: {$elemMatch: {x: 3}}}", "{}", s, true, "{}");
+
+ // $elemMatch on unknown field z
+ testTransform("{a: {$elemMatch: {z: 1}}}", "{}", s, true, "{}");
+}
+
+//
+// $slice
+//
+
+TEST(ProjectionExecTest, TransformSliceCount) {
+ // Valid $slice projections using format {$slice: count}.
+ testTransform("{a: {$slice: -10}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6, 8]}");
+ testTransform("{a: {$slice: -3}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6, 8]}");
+ testTransform("{a: {$slice: -1}}", "{}", "{a: [4, 6, 8]}", true, "{a: [8]}");
+ testTransform("{a: {$slice: 0}}", "{}", "{a: [4, 6, 8]}", true, "{a: []}");
+ testTransform("{a: {$slice: 1}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4]}");
+ testTransform("{a: {$slice: 3}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6, 8]}");
+ testTransform("{a: {$slice: 10}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6, 8]}");
+}
+
+TEST(ProjectionExecTest, TransformSliceSkipLimit) {
+ // Valid $slice projections using format {$slice: [skip, limit]}.
+ // Non-positive limits are rejected at the query parser and therefore not handled by
+ // the projection execution stage. In fact, it will abort on an invalid limit.
+ testTransform("{a: {$slice: [-10, 10]}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6, 8]}");
+ testTransform("{a: {$slice: [-3, 5]}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6, 8]}");
+ testTransform("{a: {$slice: [-1, 1]}}", "{}", "{a: [4, 6, 8]}", true, "{a: [8]}");
+ testTransform("{a: {$slice: [0, 2]}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4, 6]}");
+ testTransform("{a: {$slice: [0, 1]}}", "{}", "{a: [4, 6, 8]}", true, "{a: [4]}");
+ testTransform("{a: {$slice: [1, 1]}}", "{}", "{a: [4, 6, 8]}", true, "{a: [6]}");
+ testTransform("{a: {$slice: [3, 5]}}", "{}", "{a: [4, 6, 8]}", true, "{a: []}");
+ testTransform("{a: {$slice: [10, 10]}}", "{}", "{a: [4, 6, 8]}", true, "{a: []}");
+}
+
+//
+// $meta
+// $meta projections add computed values to the projected object.
+//
+
+TEST(ProjectionExecTest, TransformMetaTextScore) {
+ // Query {} is ignored.
+ testTransform("{b: {$meta: 'textScore'}}",
+ "{}",
+ "{a: 'hello'}",
+ new mongo::TextScoreComputedData(100),
+ true,
+ "{a: 'hello', b: 100}");
+ // Projected meta field should overwrite existing field.
+ testTransform("{b: {$meta: 'textScore'}}",
+ "{}",
+ "{a: 'hello', b: -1}",
+ new mongo::TextScoreComputedData(100),
+ true,
+ "{a: 'hello', b: 100}");
+}
} // namespace
diff --git a/src/mongo/db/exec/queued_data_stage.cpp b/src/mongo/db/exec/queued_data_stage.cpp
index 740f3084740..1fffe7aba86 100644
--- a/src/mongo/db/exec/queued_data_stage.cpp
+++ b/src/mongo/db/exec/queued_data_stage.cpp
@@ -33,85 +33,87 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
+using std::unique_ptr;
+using std::vector;
- const char* QueuedDataStage::kStageType = "QUEUED_DATA";
+const char* QueuedDataStage::kStageType = "QUEUED_DATA";
- QueuedDataStage::QueuedDataStage(WorkingSet* ws)
- : _ws(ws),
- _commonStats(kStageType)
- {}
+QueuedDataStage::QueuedDataStage(WorkingSet* ws) : _ws(ws), _commonStats(kStageType) {}
- PlanStage::StageState QueuedDataStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+PlanStage::StageState QueuedDataStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- if (isEOF()) { return PlanStage::IS_EOF; }
-
- StageState state = _results.front();
- _results.pop();
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
+ }
- if (PlanStage::ADVANCED == state) {
- ++_commonStats.advanced;
- *out = _members.front();
- _members.pop();
- }
- else if (PlanStage::NEED_TIME == state) {
- ++_commonStats.needTime;
- }
+ StageState state = _results.front();
+ _results.pop();
- return state;
+ if (PlanStage::ADVANCED == state) {
+ ++_commonStats.advanced;
+ *out = _members.front();
+ _members.pop();
+ } else if (PlanStage::NEED_TIME == state) {
+ ++_commonStats.needTime;
}
- bool QueuedDataStage::isEOF() { return _results.empty(); }
+ return state;
+}
- void QueuedDataStage::saveState() {
- ++_commonStats.yields;
- }
+bool QueuedDataStage::isEOF() {
+ return _results.empty();
+}
- void QueuedDataStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- }
+void QueuedDataStage::saveState() {
+ ++_commonStats.yields;
+}
- void QueuedDataStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- ++_commonStats.invalidates;
- }
+void QueuedDataStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
+}
- PlanStageStats* QueuedDataStage::getStats() {
- _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_QUEUED_DATA));
- ret->specific.reset(new MockStats(_specificStats));
- return ret.release();
- }
+void QueuedDataStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+}
- const CommonStats* QueuedDataStage::getCommonStats() const { return &_commonStats; }
+PlanStageStats* QueuedDataStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_QUEUED_DATA));
+ ret->specific.reset(new MockStats(_specificStats));
+ return ret.release();
+}
- const SpecificStats* QueuedDataStage::getSpecificStats() const { return &_specificStats; }
+const CommonStats* QueuedDataStage::getCommonStats() const {
+ return &_commonStats;
+}
- void QueuedDataStage::pushBack(const PlanStage::StageState state) {
- invariant(PlanStage::ADVANCED != state);
- _results.push(state);
- }
+const SpecificStats* QueuedDataStage::getSpecificStats() const {
+ return &_specificStats;
+}
- void QueuedDataStage::pushBack(const WorkingSetMember& member) {
- _results.push(PlanStage::ADVANCED);
+void QueuedDataStage::pushBack(const PlanStage::StageState state) {
+ invariant(PlanStage::ADVANCED != state);
+ _results.push(state);
+}
- WorkingSetID id = _ws->allocate();
- WorkingSetMember* ourMember = _ws->get(id);
- WorkingSetCommon::initFrom(ourMember, member);
+void QueuedDataStage::pushBack(const WorkingSetMember& member) {
+ _results.push(PlanStage::ADVANCED);
- // member lives in _ws. We'll return it when _results hits ADVANCED.
- _members.push(id);
- }
+ WorkingSetID id = _ws->allocate();
+ WorkingSetMember* ourMember = _ws->get(id);
+ WorkingSetCommon::initFrom(ourMember, member);
- vector<PlanStage*> QueuedDataStage::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
- }
+ // member lives in _ws. We'll return it when _results hits ADVANCED.
+ _members.push(id);
+}
+
+vector<PlanStage*> QueuedDataStage::getChildren() const {
+ vector<PlanStage*> empty;
+ return empty;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/queued_data_stage.h b/src/mongo/db/exec/queued_data_stage.h
index 5d7a7b2b159..89185f6d751 100644
--- a/src/mongo/db/exec/queued_data_stage.h
+++ b/src/mongo/db/exec/queued_data_stage.h
@@ -35,78 +35,80 @@
namespace mongo {
- class RecordId;
+class RecordId;
- /**
- * QueuedDataStage is a data-producing stage. Unlike the other two leaf stages (CollectionScan
- * and IndexScan) QueuedDataStage does not require any underlying storage layer.
- *
- * A QueuedDataStage is "programmed" by pushing return values from work() onto its internal
- * queue. Calls to QueuedDataStage::work() pop values off that queue and return them in FIFO
- * order, annotating the working set with data when appropriate.
- */
- class QueuedDataStage : public PlanStage {
- public:
- QueuedDataStage(WorkingSet* ws);
- virtual ~QueuedDataStage() { }
+/**
+ * QueuedDataStage is a data-producing stage. Unlike the other two leaf stages (CollectionScan
+ * and IndexScan) QueuedDataStage does not require any underlying storage layer.
+ *
+ * A QueuedDataStage is "programmed" by pushing return values from work() onto its internal
+ * queue. Calls to QueuedDataStage::work() pop values off that queue and return them in FIFO
+ * order, annotating the working set with data when appropriate.
+ */
+class QueuedDataStage : public PlanStage {
+public:
+ QueuedDataStage(WorkingSet* ws);
+ virtual ~QueuedDataStage() {}
- virtual StageState work(WorkingSetID* out);
+ virtual StageState work(WorkingSetID* out);
- virtual bool isEOF();
+ virtual bool isEOF();
- // These don't really mean anything here.
- // Some day we could count the # of calls to the yield functions to check that other stages
- // have correct yielding behavior.
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ // These don't really mean anything here.
+ // Some day we could count the # of calls to the yield functions to check that other stages
+ // have correct yielding behavior.
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_QUEUED_DATA; }
+ virtual StageType stageType() const {
+ return STAGE_QUEUED_DATA;
+ }
- //
- // Exec stats
- //
+ //
+ // Exec stats
+ //
- virtual PlanStageStats* getStats();
+ virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- /**
- * Add a result to the back of the queue.
- *
- * Note: do not add PlanStage::ADVANCED with this method, ADVANCED can
- * only be added with a data member.
- *
- * Work() goes through the queue.
- * Either no data is returned (just a state), or...
- */
- void pushBack(const PlanStage::StageState state);
+ /**
+ * Add a result to the back of the queue.
+ *
+ * Note: do not add PlanStage::ADVANCED with this method, ADVANCED can
+ * only be added with a data member.
+ *
+ * Work() goes through the queue.
+ * Either no data is returned (just a state), or...
+ */
+ void pushBack(const PlanStage::StageState state);
- /**
- * ...data is returned (and we ADVANCED)
- *
- * Allocates a new member and copies 'member' into it.
- * Does not take ownership of anything in 'member'.
- */
- void pushBack(const WorkingSetMember& member);
+ /**
+ * ...data is returned (and we ADVANCED)
+ *
+ * Allocates a new member and copies 'member' into it.
+ * Does not take ownership of anything in 'member'.
+ */
+ void pushBack(const WorkingSetMember& member);
- static const char* kStageType;
+ static const char* kStageType;
- private:
- // We don't own this.
- WorkingSet* _ws;
+private:
+ // We don't own this.
+ WorkingSet* _ws;
- // The data we return.
- std::queue<PlanStage::StageState> _results;
- std::queue<WorkingSetID> _members;
+ // The data we return.
+ std::queue<PlanStage::StageState> _results;
+ std::queue<WorkingSetID> _members;
- // Stats
- CommonStats _commonStats;
- MockStats _specificStats;
- };
+ // Stats
+ CommonStats _commonStats;
+ MockStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/queued_data_stage_test.cpp b/src/mongo/db/exec/queued_data_stage_test.cpp
index ef823c04cfb..45e4a6b2a96 100644
--- a/src/mongo/db/exec/queued_data_stage_test.cpp
+++ b/src/mongo/db/exec/queued_data_stage_test.cpp
@@ -38,70 +38,70 @@ using namespace mongo;
namespace {
- using std::unique_ptr;
+using std::unique_ptr;
- //
- // Basic test that we get out valid stats objects.
- //
- TEST(QueuedDataStageTest, getValidStats) {
- WorkingSet ws;
- unique_ptr<QueuedDataStage> mock(new QueuedDataStage(&ws));
- const CommonStats* commonStats = mock->getCommonStats();
- ASSERT_EQUALS(commonStats->works, static_cast<size_t>(0));
- const SpecificStats* specificStats = mock->getSpecificStats();
- ASSERT(specificStats);
- unique_ptr<PlanStageStats> allStats(mock->getStats());
- ASSERT_EQUALS(allStats->stageType, mock->stageType());
- }
+//
+// Basic test that we get out valid stats objects.
+//
+TEST(QueuedDataStageTest, getValidStats) {
+ WorkingSet ws;
+ unique_ptr<QueuedDataStage> mock(new QueuedDataStage(&ws));
+ const CommonStats* commonStats = mock->getCommonStats();
+ ASSERT_EQUALS(commonStats->works, static_cast<size_t>(0));
+ const SpecificStats* specificStats = mock->getSpecificStats();
+ ASSERT(specificStats);
+ unique_ptr<PlanStageStats> allStats(mock->getStats());
+ ASSERT_EQUALS(allStats->stageType, mock->stageType());
+}
- //
- // Test that our stats are updated as we perform operations.
- //
- TEST(QueuedDataStageTest, validateStats) {
- WorkingSet ws;
- WorkingSetID wsID;
- unique_ptr<QueuedDataStage> mock(new QueuedDataStage(&ws));
+//
+// Test that our stats are updated as we perform operations.
+//
+TEST(QueuedDataStageTest, validateStats) {
+ WorkingSet ws;
+ WorkingSetID wsID;
+ unique_ptr<QueuedDataStage> mock(new QueuedDataStage(&ws));
- // make sure that we're at all zero
- const CommonStats* stats = mock->getCommonStats();
- ASSERT_EQUALS(stats->yields, 0U);
- ASSERT_EQUALS(stats->unyields, 0U);
- ASSERT_EQUALS(stats->invalidates, 0U);
- ASSERT_EQUALS(stats->works, 0U);
- ASSERT_EQUALS(stats->needTime, 0U);
- ASSERT_EQUALS(stats->advanced, 0U);
- ASSERT_FALSE(stats->isEOF);
+ // make sure that we're at all zero
+ const CommonStats* stats = mock->getCommonStats();
+ ASSERT_EQUALS(stats->yields, 0U);
+ ASSERT_EQUALS(stats->unyields, 0U);
+ ASSERT_EQUALS(stats->invalidates, 0U);
+ ASSERT_EQUALS(stats->works, 0U);
+ ASSERT_EQUALS(stats->needTime, 0U);
+ ASSERT_EQUALS(stats->advanced, 0U);
+ ASSERT_FALSE(stats->isEOF);
- // 'perform' some operations, validate stats
- // needTime
- mock->pushBack(PlanStage::NEED_TIME);
- mock->work(&wsID);
- ASSERT_EQUALS(stats->works, 1U);
- ASSERT_EQUALS(stats->needTime, 1U);
+ // 'perform' some operations, validate stats
+ // needTime
+ mock->pushBack(PlanStage::NEED_TIME);
+ mock->work(&wsID);
+ ASSERT_EQUALS(stats->works, 1U);
+ ASSERT_EQUALS(stats->needTime, 1U);
- // advanced, with pushed data
- const WorkingSetMember member;
- mock->pushBack(member);
- mock->work(&wsID);
- ASSERT_EQUALS(stats->works, 2U);
- ASSERT_EQUALS(stats->advanced, 1U);
+ // advanced, with pushed data
+ const WorkingSetMember member;
+ mock->pushBack(member);
+ mock->work(&wsID);
+ ASSERT_EQUALS(stats->works, 2U);
+ ASSERT_EQUALS(stats->advanced, 1U);
- // yields
- mock->saveState();
- ASSERT_EQUALS(stats->yields, 1U);
+ // yields
+ mock->saveState();
+ ASSERT_EQUALS(stats->yields, 1U);
- // unyields
- mock->restoreState(NULL);
- ASSERT_EQUALS(stats->unyields, 1U);
+ // unyields
+ mock->restoreState(NULL);
+ ASSERT_EQUALS(stats->unyields, 1U);
- // invalidates
- const RecordId dl(0, 0);
- mock->invalidate(NULL, dl, INVALIDATION_MUTATION);
- ASSERT_EQUALS(stats->invalidates, 1U);
+ // invalidates
+ const RecordId dl(0, 0);
+ mock->invalidate(NULL, dl, INVALIDATION_MUTATION);
+ ASSERT_EQUALS(stats->invalidates, 1U);
- // and now we are d1U, but must trigger EOF with getStats()
- ASSERT_FALSE(stats->isEOF);
- unique_ptr<PlanStageStats> allStats(mock->getStats());
- ASSERT_TRUE(stats->isEOF);
- }
+ // and now we are d1U, but must trigger EOF with getStats()
+ ASSERT_FALSE(stats->isEOF);
+ unique_ptr<PlanStageStats> allStats(mock->getStats());
+ ASSERT_TRUE(stats->isEOF);
+}
}
diff --git a/src/mongo/db/exec/scoped_timer.cpp b/src/mongo/db/exec/scoped_timer.cpp
index 9cf6f60ebad..e1db4a44ff8 100644
--- a/src/mongo/db/exec/scoped_timer.cpp
+++ b/src/mongo/db/exec/scoped_timer.cpp
@@ -34,14 +34,12 @@
namespace mongo {
- ScopedTimer::ScopedTimer(long long* counter) :
- _counter(counter),
- _start(Listener::getElapsedTimeMillis()) {
- }
+ScopedTimer::ScopedTimer(long long* counter)
+ : _counter(counter), _start(Listener::getElapsedTimeMillis()) {}
- ScopedTimer::~ScopedTimer() {
- long long elapsed = Listener::getElapsedTimeMillis() - _start;
- *_counter += elapsed;
- }
+ScopedTimer::~ScopedTimer() {
+ long long elapsed = Listener::getElapsedTimeMillis() - _start;
+ *_counter += elapsed;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/scoped_timer.h b/src/mongo/db/exec/scoped_timer.h
index fa4c0d1f5c8..3e1c29fe719 100644
--- a/src/mongo/db/exec/scoped_timer.h
+++ b/src/mongo/db/exec/scoped_timer.h
@@ -32,26 +32,27 @@
namespace mongo {
- /**
- * This class increments a counter by a rough estimate of the time elapsed since its
- * construction when it goes out of scope.
- */
- class ScopedTimer {
- MONGO_DISALLOW_COPYING(ScopedTimer);
- public:
- ScopedTimer(long long* counter);
-
- ~ScopedTimer();
-
- private:
- // Default constructor disallowed.
- ScopedTimer();
-
- // Reference to the counter that we are incrementing with the elapsed time.
- long long* _counter;
-
- // Time at which the timer was constructed.
- long long _start;
- };
+/**
+ * This class increments a counter by a rough estimate of the time elapsed since its
+ * construction when it goes out of scope.
+ */
+class ScopedTimer {
+ MONGO_DISALLOW_COPYING(ScopedTimer);
+
+public:
+ ScopedTimer(long long* counter);
+
+ ~ScopedTimer();
+
+private:
+ // Default constructor disallowed.
+ ScopedTimer();
+
+ // Reference to the counter that we are incrementing with the elapsed time.
+ long long* _counter;
+
+ // Time at which the timer was constructed.
+ long long _start;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index 2727b1c99f8..8f2f3005df1 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -38,130 +38,128 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
+using std::unique_ptr;
+using std::vector;
- // static
- const char* ShardFilterStage::kStageType = "SHARDING_FILTER";
+// static
+const char* ShardFilterStage::kStageType = "SHARDING_FILTER";
- ShardFilterStage::ShardFilterStage(const CollectionMetadataPtr& metadata,
- WorkingSet* ws,
- PlanStage* child)
- : _ws(ws), _child(child), _commonStats(kStageType), _metadata(metadata) { }
+ShardFilterStage::ShardFilterStage(const CollectionMetadataPtr& metadata,
+ WorkingSet* ws,
+ PlanStage* child)
+ : _ws(ws), _child(child), _commonStats(kStageType), _metadata(metadata) {}
- ShardFilterStage::~ShardFilterStage() { }
+ShardFilterStage::~ShardFilterStage() {}
- bool ShardFilterStage::isEOF() { return _child->isEOF(); }
+bool ShardFilterStage::isEOF() {
+ return _child->isEOF();
+}
- PlanStage::StageState ShardFilterStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+PlanStage::StageState ShardFilterStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- // If we've returned as many results as we're limited to, isEOF will be true.
- if (isEOF()) { return PlanStage::IS_EOF; }
-
- StageState status = _child->work(out);
-
- if (PlanStage::ADVANCED == status) {
-
- // If we're sharded make sure that we don't return data that is not owned by us,
- // including pending documents from in-progress migrations and orphaned documents from
- // aborted migrations
- if (_metadata) {
-
- ShardKeyPattern shardKeyPattern(_metadata->getKeyPattern());
- WorkingSetMember* member = _ws->get(*out);
- WorkingSetMatchableDocument matchable(member);
- BSONObj shardKey = shardKeyPattern.extractShardKeyFromMatchable(matchable);
-
- if (shardKey.isEmpty()) {
-
- // We can't find a shard key for this document - this should never happen with
- // a non-fetched result unless our query planning is screwed up
- if (!member->hasObj()) {
-
- Status status(ErrorCodes::InternalError,
- "shard key not found after a covered stage, "
- "query planning has failed");
-
- // Fail loudly and cleanly in production, fatally in debug
- error() << status.toString();
- dassert(false);
-
- _ws->free(*out);
- *out = WorkingSetCommon::allocateStatusMember(_ws, status);
- return PlanStage::FAILURE;
- }
+ // If we've returned as many results as we're limited to, isEOF will be true.
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
+ }
- // Skip this document with a warning - no shard key should not be possible
- // unless manually inserting data into a shard
- warning() << "no shard key found in document "
- << member->obj.value().toString() << " "
- << "for shard key pattern " << _metadata->getKeyPattern() << ", "
- << "document may have been inserted manually into shard";
- }
+ StageState status = _child->work(out);
+
+ if (PlanStage::ADVANCED == status) {
+ // If we're sharded make sure that we don't return data that is not owned by us,
+ // including pending documents from in-progress migrations and orphaned documents from
+ // aborted migrations
+ if (_metadata) {
+ ShardKeyPattern shardKeyPattern(_metadata->getKeyPattern());
+ WorkingSetMember* member = _ws->get(*out);
+ WorkingSetMatchableDocument matchable(member);
+ BSONObj shardKey = shardKeyPattern.extractShardKeyFromMatchable(matchable);
+
+ if (shardKey.isEmpty()) {
+ // We can't find a shard key for this document - this should never happen with
+ // a non-fetched result unless our query planning is screwed up
+ if (!member->hasObj()) {
+ Status status(ErrorCodes::InternalError,
+ "shard key not found after a covered stage, "
+ "query planning has failed");
+
+ // Fail loudly and cleanly in production, fatally in debug
+ error() << status.toString();
+ dassert(false);
- if (!_metadata->keyBelongsToMe(shardKey)) {
_ws->free(*out);
- ++_specificStats.chunkSkips;
- return PlanStage::NEED_TIME;
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ return PlanStage::FAILURE;
}
+
+ // Skip this document with a warning - no shard key should not be possible
+ // unless manually inserting data into a shard
+ warning() << "no shard key found in document " << member->obj.value().toString()
+ << " "
+ << "for shard key pattern " << _metadata->getKeyPattern() << ", "
+ << "document may have been inserted manually into shard";
}
- // If we're here either we have shard state and our doc passed, or we have no shard
- // state. Either way, we advance.
- ++_commonStats.advanced;
- return status;
- }
- else if (PlanStage::NEED_TIME == status) {
- ++_commonStats.needTime;
- }
- else if (PlanStage::NEED_YIELD == status) {
- ++_commonStats.needYield;
+ if (!_metadata->keyBelongsToMe(shardKey)) {
+ _ws->free(*out);
+ ++_specificStats.chunkSkips;
+ return PlanStage::NEED_TIME;
+ }
}
+ // If we're here either we have shard state and our doc passed, or we have no shard
+ // state. Either way, we advance.
+ ++_commonStats.advanced;
return status;
+ } else if (PlanStage::NEED_TIME == status) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == status) {
+ ++_commonStats.needYield;
}
- void ShardFilterStage::saveState() {
- ++_commonStats.yields;
- _child->saveState();
- }
-
- void ShardFilterStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
- }
-
- void ShardFilterStage::invalidate(OperationContext* txn,
- const RecordId& dl,
- InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
- }
-
- vector<PlanStage*> ShardFilterStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
- }
-
- PlanStageStats* ShardFilterStage::getStats() {
- _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SHARDING_FILTER));
- ret->children.push_back(_child->getStats());
- ret->specific.reset(new ShardingFilterStats(_specificStats));
- return ret.release();
- }
-
- const CommonStats* ShardFilterStage::getCommonStats() const {
- return &_commonStats;
- }
-
- const SpecificStats* ShardFilterStage::getSpecificStats() const {
- return &_specificStats;
- }
+ return status;
+}
+
+void ShardFilterStage::saveState() {
+ ++_commonStats.yields;
+ _child->saveState();
+}
+
+void ShardFilterStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
+ _child->restoreState(opCtx);
+}
+
+void ShardFilterStage::invalidate(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
+ ++_commonStats.invalidates;
+ _child->invalidate(txn, dl, type);
+}
+
+vector<PlanStage*> ShardFilterStage::getChildren() const {
+ vector<PlanStage*> children;
+ children.push_back(_child.get());
+ return children;
+}
+
+PlanStageStats* ShardFilterStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SHARDING_FILTER));
+ ret->children.push_back(_child->getStats());
+ ret->specific.reset(new ShardingFilterStats(_specificStats));
+ return ret.release();
+}
+
+const CommonStats* ShardFilterStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* ShardFilterStage::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/shard_filter.h b/src/mongo/db/exec/shard_filter.h
index 0703522b525..07b5d000bbb 100644
--- a/src/mongo/db/exec/shard_filter.h
+++ b/src/mongo/db/exec/shard_filter.h
@@ -36,76 +36,78 @@
namespace mongo {
- /**
- * This stage drops documents that didn't belong to the shard we're executing on at the time of
- * construction. This matches the contract for sharded cursorids which guarantees that a
- * StaleConfigException will be thrown early or the cursorid for its entire lifetime will return
- * documents matching the shard version set on the connection at the time of cursorid creation.
- *
- * A related system will ensure that the data migrated away from a shard will not be deleted as
- * long as there are active queries from before the migration. Currently, "active queries" is
- * defined by cursorids so it is important that the metadata used in this stage uses the same
- * version as the cursorid. Therefore, you must wrap any Runner using this Stage in a
- * ClientCursor during the same lock grab as constructing the Runner.
- *
- * BEGIN NOTE FROM GREG
- *
- * There are three sharded query contracts:
- *
- * 0) Migration commit takes the db lock - i.e. is serialized with writes and reads.
- * 1) No data should be returned from a query in ranges of migrations that committed after the
- * query started, or from ranges not owned when the query began.
- * 2) No migrated data should be removed from a shard while there are queries that were active
- * before the migration.
- *
- * As implementation details, collection metadata is used to determine the ranges of all data
- * not actively migrated (or orphaned). CursorIds are currently used to establish "active"
- * queries before migration commit.
- *
- * Combining all this: if a query is started in a db lock and acquires in that (same) lock the
- * collection metadata and a cursorId, the query will return results for exactly the ranges in
- * the metadata (though of arbitrary staleness). This is the sharded collection query contract.
- *
- * END NOTE FROM GREG
- *
- * Preconditions: Child must be fetched. TODO: when covering analysis is in just build doc
- * and check that against shard key. See SERVER-5022.
- */
- class ShardFilterStage : public PlanStage {
- public:
- ShardFilterStage(const CollectionMetadataPtr& metadata, WorkingSet* ws, PlanStage* child);
- virtual ~ShardFilterStage();
-
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
-
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
-
- virtual StageType stageType() const { return STAGE_SHARDING_FILTER; }
-
- virtual PlanStageStats* getStats();
-
- virtual const CommonStats* getCommonStats() const;
-
- virtual const SpecificStats* getSpecificStats() const;
-
- static const char* kStageType;
-
- private:
- WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
-
- // Stats
- CommonStats _commonStats;
- ShardingFilterStats _specificStats;
-
- // Note: it is important that this is the metadata from the time this stage is constructed.
- // See class comment for details.
- const CollectionMetadataPtr _metadata;
- };
+/**
+ * This stage drops documents that didn't belong to the shard we're executing on at the time of
+ * construction. This matches the contract for sharded cursorids which guarantees that a
+ * StaleConfigException will be thrown early or the cursorid for its entire lifetime will return
+ * documents matching the shard version set on the connection at the time of cursorid creation.
+ *
+ * A related system will ensure that the data migrated away from a shard will not be deleted as
+ * long as there are active queries from before the migration. Currently, "active queries" is
+ * defined by cursorids so it is important that the metadata used in this stage uses the same
+ * version as the cursorid. Therefore, you must wrap any Runner using this Stage in a
+ * ClientCursor during the same lock grab as constructing the Runner.
+ *
+ * BEGIN NOTE FROM GREG
+ *
+ * There are three sharded query contracts:
+ *
+ * 0) Migration commit takes the db lock - i.e. is serialized with writes and reads.
+ * 1) No data should be returned from a query in ranges of migrations that committed after the
+ * query started, or from ranges not owned when the query began.
+ * 2) No migrated data should be removed from a shard while there are queries that were active
+ * before the migration.
+ *
+ * As implementation details, collection metadata is used to determine the ranges of all data
+ * not actively migrated (or orphaned). CursorIds are currently used to establish "active"
+ * queries before migration commit.
+ *
+ * Combining all this: if a query is started in a db lock and acquires in that (same) lock the
+ * collection metadata and a cursorId, the query will return results for exactly the ranges in
+ * the metadata (though of arbitrary staleness). This is the sharded collection query contract.
+ *
+ * END NOTE FROM GREG
+ *
+ * Preconditions: Child must be fetched. TODO: when covering analysis is in just build doc
+ * and check that against shard key. See SERVER-5022.
+ */
+class ShardFilterStage : public PlanStage {
+public:
+ ShardFilterStage(const CollectionMetadataPtr& metadata, WorkingSet* ws, PlanStage* child);
+ virtual ~ShardFilterStage();
+
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
+
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+
+ virtual std::vector<PlanStage*> getChildren() const;
+
+ virtual StageType stageType() const {
+ return STAGE_SHARDING_FILTER;
+ }
+
+ virtual PlanStageStats* getStats();
+
+ virtual const CommonStats* getCommonStats() const;
+
+ virtual const SpecificStats* getSpecificStats() const;
+
+ static const char* kStageType;
+
+private:
+ WorkingSet* _ws;
+ std::unique_ptr<PlanStage> _child;
+
+ // Stats
+ CommonStats _commonStats;
+ ShardingFilterStats _specificStats;
+
+ // Note: it is important that this is the metadata from the time this stage is constructed.
+ // See class comment for details.
+ const CollectionMetadataPtr _metadata;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp
index 979e952b1d7..33e66178af0 100644
--- a/src/mongo/db/exec/skip.cpp
+++ b/src/mongo/db/exec/skip.cpp
@@ -33,103 +33,102 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
+using std::unique_ptr;
+using std::vector;
- // static
- const char* SkipStage::kStageType = "SKIP";
+// static
+const char* SkipStage::kStageType = "SKIP";
- SkipStage::SkipStage(int toSkip, WorkingSet* ws, PlanStage* child)
- : _ws(ws), _child(child), _toSkip(toSkip), _commonStats(kStageType) { }
+SkipStage::SkipStage(int toSkip, WorkingSet* ws, PlanStage* child)
+ : _ws(ws), _child(child), _toSkip(toSkip), _commonStats(kStageType) {}
- SkipStage::~SkipStage() { }
+SkipStage::~SkipStage() {}
- bool SkipStage::isEOF() { return _child->isEOF(); }
+bool SkipStage::isEOF() {
+ return _child->isEOF();
+}
- PlanStage::StageState SkipStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+PlanStage::StageState SkipStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- WorkingSetID id = WorkingSet::INVALID_ID;
- StageState status = _child->work(&id);
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ StageState status = _child->work(&id);
- if (PlanStage::ADVANCED == status) {
- // If we're still skipping results...
- if (_toSkip > 0) {
- // ...drop the result.
- --_toSkip;
- _ws->free(id);
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
-
- *out = id;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
- else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "skip stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- }
- return status;
- }
- else if (PlanStage::NEED_TIME == status) {
+ if (PlanStage::ADVANCED == status) {
+ // If we're still skipping results...
+ if (_toSkip > 0) {
+ // ...drop the result.
+ --_toSkip;
+ _ws->free(id);
++_commonStats.needTime;
- }
- else if (PlanStage::NEED_YIELD == status) {
- ++_commonStats.needYield;
- *out = id;
+ return PlanStage::NEED_TIME;
}
- // NEED_TIME, NEED_YIELD, ERROR, IS_EOF
+ *out = id;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
+ } else if (PlanStage::FAILURE == status || PlanStage::DEAD == status) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ mongoutils::str::stream ss;
+ ss << "skip stage failed to read in results from child";
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ }
return status;
+ } else if (PlanStage::NEED_TIME == status) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == status) {
+ ++_commonStats.needYield;
+ *out = id;
}
- void SkipStage::saveState() {
- ++_commonStats.yields;
- _child->saveState();
- }
-
- void SkipStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
- }
-
- void SkipStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
- }
-
- vector<PlanStage*> SkipStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
- }
-
- PlanStageStats* SkipStage::getStats() {
- _commonStats.isEOF = isEOF();
- _specificStats.skip = _toSkip;
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SKIP));
- ret->specific.reset(new SkipStats(_specificStats));
- ret->children.push_back(_child->getStats());
- return ret.release();
- }
-
- const CommonStats* SkipStage::getCommonStats() const {
- return &_commonStats;
- }
-
- const SpecificStats* SkipStage::getSpecificStats() const {
- return &_specificStats;
- }
+ // NEED_TIME, NEED_YIELD, ERROR, IS_EOF
+ return status;
+}
+
+void SkipStage::saveState() {
+ ++_commonStats.yields;
+ _child->saveState();
+}
+
+void SkipStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
+ _child->restoreState(opCtx);
+}
+
+void SkipStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+ _child->invalidate(txn, dl, type);
+}
+
+vector<PlanStage*> SkipStage::getChildren() const {
+ vector<PlanStage*> children;
+ children.push_back(_child.get());
+ return children;
+}
+
+PlanStageStats* SkipStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ _specificStats.skip = _toSkip;
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SKIP));
+ ret->specific.reset(new SkipStats(_specificStats));
+ ret->children.push_back(_child->getStats());
+ return ret.release();
+}
+
+const CommonStats* SkipStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* SkipStage::getSpecificStats() const {
+ return &_specificStats;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/skip.h b/src/mongo/db/exec/skip.h
index 77bdd5786a5..f03d0135186 100644
--- a/src/mongo/db/exec/skip.h
+++ b/src/mongo/db/exec/skip.h
@@ -35,46 +35,48 @@
namespace mongo {
- /**
- * This stage implements skip functionality. It drops the first 'toSkip' results from its child
- * then returns the rest verbatim.
- *
- * Preconditions: None.
- */
- class SkipStage : public PlanStage {
- public:
- SkipStage(int toSkip, WorkingSet* ws, PlanStage* child);
- virtual ~SkipStage();
+/**
+ * This stage implements skip functionality. It drops the first 'toSkip' results from its child
+ * then returns the rest verbatim.
+ *
+ * Preconditions: None.
+ */
+class SkipStage : public PlanStage {
+public:
+ SkipStage(int toSkip, WorkingSet* ws, PlanStage* child);
+ virtual ~SkipStage();
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_SKIP; }
+ virtual StageType stageType() const {
+ return STAGE_SKIP;
+ }
- virtual PlanStageStats* getStats();
+ virtual PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- WorkingSet* _ws;
- std::unique_ptr<PlanStage> _child;
+private:
+ WorkingSet* _ws;
+ std::unique_ptr<PlanStage> _child;
- // We drop the first _toSkip results that we would have returned.
- int _toSkip;
+ // We drop the first _toSkip results that we would have returned.
+ int _toSkip;
- // Stats
- CommonStats _commonStats;
- SkipStats _specificStats;
- };
+ // Stats
+ CommonStats _commonStats;
+ SkipStats _specificStats;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index 61dd09e1dbc..7a98b5113c2 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -45,552 +45,534 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::vector;
-
- // static
- const char* SortStage::kStageType = "SORT";
-
- SortStageKeyGenerator::SortStageKeyGenerator(const Collection* collection,
- const BSONObj& sortSpec,
- const BSONObj& queryObj) {
- _collection = collection;
- _hasBounds = false;
- _sortHasMeta = false;
- _rawSortSpec = sortSpec;
-
- // 'sortSpec' can be a mix of $meta and index key expressions. We pick it apart so that
- // we only generate Btree keys for the index key expressions.
-
- // The Btree key fields go in here. We pass this fake index key pattern to the Btree
- // key generator below as part of generating sort keys for the docs.
- BSONObjBuilder btreeBob;
-
- // The pattern we use to woCompare keys. Each field in 'sortSpec' will go in here with
- // a value of 1 or -1. The Btree key fields are verbatim, meta fields have a default.
- BSONObjBuilder comparatorBob;
-
- BSONObjIterator it(sortSpec);
- while (it.more()) {
- BSONElement elt = it.next();
- if (elt.isNumber()) {
- // Btree key. elt (should be) foo: 1 or foo: -1.
- comparatorBob.append(elt);
- btreeBob.append(elt);
- }
- else if (LiteParsedQuery::isTextScoreMeta(elt)) {
- // Sort text score decreasing by default. Field name doesn't matter but we choose
- // something that a user shouldn't ever have.
- comparatorBob.append("$metaTextScore", -1);
- _sortHasMeta = true;
- }
- else {
- // Sort spec. should have been validated before here.
- verify(false);
- }
- }
-
- // Our pattern for woComparing keys.
- _comparatorObj = comparatorBob.obj();
-
- // The fake index key pattern used to generate Btree keys.
- _btreeObj = btreeBob.obj();
-
- // If we're just sorting by meta, don't bother with all the key stuff.
- if (_btreeObj.isEmpty()) {
- return;
+using std::unique_ptr;
+using std::endl;
+using std::vector;
+
+// static
+const char* SortStage::kStageType = "SORT";
+
+SortStageKeyGenerator::SortStageKeyGenerator(const Collection* collection,
+ const BSONObj& sortSpec,
+ const BSONObj& queryObj) {
+ _collection = collection;
+ _hasBounds = false;
+ _sortHasMeta = false;
+ _rawSortSpec = sortSpec;
+
+ // 'sortSpec' can be a mix of $meta and index key expressions. We pick it apart so that
+ // we only generate Btree keys for the index key expressions.
+
+ // The Btree key fields go in here. We pass this fake index key pattern to the Btree
+ // key generator below as part of generating sort keys for the docs.
+ BSONObjBuilder btreeBob;
+
+ // The pattern we use to woCompare keys. Each field in 'sortSpec' will go in here with
+ // a value of 1 or -1. The Btree key fields are verbatim, meta fields have a default.
+ BSONObjBuilder comparatorBob;
+
+ BSONObjIterator it(sortSpec);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ if (elt.isNumber()) {
+ // Btree key. elt (should be) foo: 1 or foo: -1.
+ comparatorBob.append(elt);
+ btreeBob.append(elt);
+ } else if (LiteParsedQuery::isTextScoreMeta(elt)) {
+ // Sort text score decreasing by default. Field name doesn't matter but we choose
+ // something that a user shouldn't ever have.
+ comparatorBob.append("$metaTextScore", -1);
+ _sortHasMeta = true;
+ } else {
+ // Sort spec. should have been validated before here.
+ verify(false);
}
+ }
- // We'll need to treat arrays as if we were to create an index over them. that is,
- // we may need to unnest the first level and consider each array element to decide
- // the sort order.
- std::vector<const char *> fieldNames;
- std::vector<BSONElement> fixed;
- BSONObjIterator btreeIt(_btreeObj);
- while (btreeIt.more()) {
- BSONElement patternElt = btreeIt.next();
- fieldNames.push_back(patternElt.fieldName());
- fixed.push_back(BSONElement());
- }
+ // Our pattern for woComparing keys.
+ _comparatorObj = comparatorBob.obj();
- _keyGen.reset(new BtreeKeyGeneratorV1(fieldNames, fixed, false /* not sparse */));
+ // The fake index key pattern used to generate Btree keys.
+ _btreeObj = btreeBob.obj();
- // The bounds checker only works on the Btree part of the sort key.
- getBoundsForSort(queryObj, _btreeObj);
+ // If we're just sorting by meta, don't bother with all the key stuff.
+ if (_btreeObj.isEmpty()) {
+ return;
+ }
- if (_hasBounds) {
- _boundsChecker.reset(new IndexBoundsChecker(&_bounds, _btreeObj, 1 /* == order */));
- }
+ // We'll need to treat arrays as if we were to create an index over them. that is,
+ // we may need to unnest the first level and consider each array element to decide
+ // the sort order.
+ std::vector<const char*> fieldNames;
+ std::vector<BSONElement> fixed;
+ BSONObjIterator btreeIt(_btreeObj);
+ while (btreeIt.more()) {
+ BSONElement patternElt = btreeIt.next();
+ fieldNames.push_back(patternElt.fieldName());
+ fixed.push_back(BSONElement());
}
- Status SortStageKeyGenerator::getSortKey(const WorkingSetMember& member,
- BSONObj* objOut) const {
- BSONObj btreeKeyToUse;
+ _keyGen.reset(new BtreeKeyGeneratorV1(fieldNames, fixed, false /* not sparse */));
- Status btreeStatus = getBtreeKey(member.obj.value(), &btreeKeyToUse);
- if (!btreeStatus.isOK()) {
- return btreeStatus;
- }
+ // The bounds checker only works on the Btree part of the sort key.
+ getBoundsForSort(queryObj, _btreeObj);
- if (!_sortHasMeta) {
- *objOut = btreeKeyToUse;
- return Status::OK();
- }
+ if (_hasBounds) {
+ _boundsChecker.reset(new IndexBoundsChecker(&_bounds, _btreeObj, 1 /* == order */));
+ }
+}
- BSONObjBuilder mergedKeyBob;
+Status SortStageKeyGenerator::getSortKey(const WorkingSetMember& member, BSONObj* objOut) const {
+ BSONObj btreeKeyToUse;
- // Merge metadata into the key.
- BSONObjIterator it(_rawSortSpec);
- BSONObjIterator btreeIt(btreeKeyToUse);
- while (it.more()) {
- BSONElement elt = it.next();
- if (elt.isNumber()) {
- // Merge btree key elt.
- mergedKeyBob.append(btreeIt.next());
- }
- else if (LiteParsedQuery::isTextScoreMeta(elt)) {
- // Add text score metadata
- double score = 0.0;
- if (member.hasComputed(WSM_COMPUTED_TEXT_SCORE)) {
- const TextScoreComputedData* scoreData
- = static_cast<const TextScoreComputedData*>(
- member.getComputed(WSM_COMPUTED_TEXT_SCORE));
- score = scoreData->getScore();
- }
- mergedKeyBob.append("$metaTextScore", score);
- }
- }
+ Status btreeStatus = getBtreeKey(member.obj.value(), &btreeKeyToUse);
+ if (!btreeStatus.isOK()) {
+ return btreeStatus;
+ }
- *objOut = mergedKeyBob.obj();
+ if (!_sortHasMeta) {
+ *objOut = btreeKeyToUse;
return Status::OK();
}
- Status SortStageKeyGenerator::getBtreeKey(const BSONObj& memberObj, BSONObj* objOut) const {
- // Not sorting by anything in the key, just bail out early.
- if (_btreeObj.isEmpty()) {
- *objOut = BSONObj();
- return Status::OK();
- }
-
- // We will sort '_data' in the same order an index over '_pattern' would have. This is
- // tricky. Consider the sort pattern {a:1} and the document {a:[1, 10]}. We have
- // potentially two keys we could use to sort on. Here we extract these keys.
- BSONObjCmp patternCmp(_btreeObj);
- BSONObjSet keys(patternCmp);
-
- try {
- _keyGen->getKeys(memberObj, &keys);
- }
- catch (const UserException& e) {
- // Probably a parallel array.
- if (BtreeKeyGenerator::ParallelArraysCode == e.getCode()) {
- return Status(ErrorCodes::BadValue,
- "cannot sort with keys that are parallel arrays");
+ BSONObjBuilder mergedKeyBob;
+
+ // Merge metadata into the key.
+ BSONObjIterator it(_rawSortSpec);
+ BSONObjIterator btreeIt(btreeKeyToUse);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ if (elt.isNumber()) {
+ // Merge btree key elt.
+ mergedKeyBob.append(btreeIt.next());
+ } else if (LiteParsedQuery::isTextScoreMeta(elt)) {
+ // Add text score metadata
+ double score = 0.0;
+ if (member.hasComputed(WSM_COMPUTED_TEXT_SCORE)) {
+ const TextScoreComputedData* scoreData = static_cast<const TextScoreComputedData*>(
+ member.getComputed(WSM_COMPUTED_TEXT_SCORE));
+ score = scoreData->getScore();
}
- else {
- return e.toStatus();
- }
- }
- catch (...) {
- return Status(ErrorCodes::InternalError, "unknown error during sort key generation");
+ mergedKeyBob.append("$metaTextScore", score);
}
+ }
- // Key generator isn't sparse so we should at least get an all-null key.
- invariant(!keys.empty());
+ *objOut = mergedKeyBob.obj();
+ return Status::OK();
+}
- // No bounds? No problem! Use the first key.
- if (!_hasBounds) {
- // Note that we sort 'keys' according to the pattern '_btreeObj'.
- *objOut = *keys.begin();
- return Status::OK();
- }
+Status SortStageKeyGenerator::getBtreeKey(const BSONObj& memberObj, BSONObj* objOut) const {
+ // Not sorting by anything in the key, just bail out early.
+ if (_btreeObj.isEmpty()) {
+ *objOut = BSONObj();
+ return Status::OK();
+ }
- // To decide which key to use in sorting, we must consider not only the sort pattern but
- // the query. Assume we have the query {a: {$gte: 5}} and a document {a:1}. That
- // document wouldn't match the query. As such, the key '1' in an array {a: [1, 10]}
- // should not be considered as being part of the result set and thus that array cannot
- // sort using the key '1'. To ensure that the keys we sort by are valid w.r.t. the
- // query we use a bounds checker.
- verify(NULL != _boundsChecker.get());
- for (BSONObjSet::const_iterator it = keys.begin(); it != keys.end(); ++it) {
- if (_boundsChecker->isValidKey(*it)) {
- *objOut = *it;
- return Status::OK();
- }
+ // We will sort '_data' in the same order an index over '_pattern' would have. This is
+ // tricky. Consider the sort pattern {a:1} and the document {a:[1, 10]}. We have
+ // potentially two keys we could use to sort on. Here we extract these keys.
+ BSONObjCmp patternCmp(_btreeObj);
+ BSONObjSet keys(patternCmp);
+
+ try {
+ _keyGen->getKeys(memberObj, &keys);
+ } catch (const UserException& e) {
+ // Probably a parallel array.
+ if (BtreeKeyGenerator::ParallelArraysCode == e.getCode()) {
+ return Status(ErrorCodes::BadValue, "cannot sort with keys that are parallel arrays");
+ } else {
+ return e.toStatus();
}
+ } catch (...) {
+ return Status(ErrorCodes::InternalError, "unknown error during sort key generation");
+ }
+
+ // Key generator isn't sparse so we should at least get an all-null key.
+ invariant(!keys.empty());
- // No key is in our bounds.
- // TODO: will this ever happen? don't think it should.
+ // No bounds? No problem! Use the first key.
+ if (!_hasBounds) {
+ // Note that we sort 'keys' according to the pattern '_btreeObj'.
*objOut = *keys.begin();
return Status::OK();
}
- void SortStageKeyGenerator::getBoundsForSort(const BSONObj& queryObj, const BSONObj& sortObj) {
- QueryPlannerParams params;
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
-
- // We're creating a "virtual index" with key pattern equal to the sort order.
- IndexEntry sortOrder(sortObj, IndexNames::BTREE, true, false, false, "doesnt_matter", NULL,
- BSONObj());
- params.indices.push_back(sortOrder);
-
- CanonicalQuery* rawQueryForSort;
- verify(CanonicalQuery::canonicalize(
- "fake_ns", queryObj, &rawQueryForSort, WhereCallbackNoop()).isOK());
- unique_ptr<CanonicalQuery> queryForSort(rawQueryForSort);
-
- vector<QuerySolution*> solns;
- LOG(5) << "Sort stage: Planning to obtain bounds for sort." << endl;
- QueryPlanner::plan(*queryForSort, params, &solns);
-
- // TODO: are there ever > 1 solns? If so, do we look for a specific soln?
- if (1 == solns.size()) {
- IndexScanNode* ixScan = NULL;
- QuerySolutionNode* rootNode = solns[0]->root.get();
-
- if (rootNode->getType() == STAGE_FETCH) {
- FetchNode* fetchNode = static_cast<FetchNode*>(rootNode);
- if (fetchNode->children[0]->getType() != STAGE_IXSCAN) {
- delete solns[0];
- // No bounds.
- return;
- }
- ixScan = static_cast<IndexScanNode*>(fetchNode->children[0]);
- }
- else if (rootNode->getType() == STAGE_IXSCAN) {
- ixScan = static_cast<IndexScanNode*>(rootNode);
- }
+ // To decide which key to use in sorting, we must consider not only the sort pattern but
+ // the query. Assume we have the query {a: {$gte: 5}} and a document {a:1}. That
+ // document wouldn't match the query. As such, the key '1' in an array {a: [1, 10]}
+ // should not be considered as being part of the result set and thus that array cannot
+ // sort using the key '1'. To ensure that the keys we sort by are valid w.r.t. the
+ // query we use a bounds checker.
+ verify(NULL != _boundsChecker.get());
+ for (BSONObjSet::const_iterator it = keys.begin(); it != keys.end(); ++it) {
+ if (_boundsChecker->isValidKey(*it)) {
+ *objOut = *it;
+ return Status::OK();
+ }
+ }
- if (ixScan) {
- _bounds.fields.swap(ixScan->bounds.fields);
- _hasBounds = true;
+ // No key is in our bounds.
+ // TODO: will this ever happen? don't think it should.
+ *objOut = *keys.begin();
+ return Status::OK();
+}
+
+void SortStageKeyGenerator::getBoundsForSort(const BSONObj& queryObj, const BSONObj& sortObj) {
+ QueryPlannerParams params;
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+
+ // We're creating a "virtual index" with key pattern equal to the sort order.
+ IndexEntry sortOrder(
+ sortObj, IndexNames::BTREE, true, false, false, "doesnt_matter", NULL, BSONObj());
+ params.indices.push_back(sortOrder);
+
+ CanonicalQuery* rawQueryForSort;
+ verify(CanonicalQuery::canonicalize("fake_ns", queryObj, &rawQueryForSort, WhereCallbackNoop())
+ .isOK());
+ unique_ptr<CanonicalQuery> queryForSort(rawQueryForSort);
+
+ vector<QuerySolution*> solns;
+ LOG(5) << "Sort stage: Planning to obtain bounds for sort." << endl;
+ QueryPlanner::plan(*queryForSort, params, &solns);
+
+ // TODO: are there ever > 1 solns? If so, do we look for a specific soln?
+ if (1 == solns.size()) {
+ IndexScanNode* ixScan = NULL;
+ QuerySolutionNode* rootNode = solns[0]->root.get();
+
+ if (rootNode->getType() == STAGE_FETCH) {
+ FetchNode* fetchNode = static_cast<FetchNode*>(rootNode);
+ if (fetchNode->children[0]->getType() != STAGE_IXSCAN) {
+ delete solns[0];
+ // No bounds.
+ return;
}
+ ixScan = static_cast<IndexScanNode*>(fetchNode->children[0]);
+ } else if (rootNode->getType() == STAGE_IXSCAN) {
+ ixScan = static_cast<IndexScanNode*>(rootNode);
}
- for (size_t i = 0; i < solns.size(); ++i) {
- delete solns[i];
+ if (ixScan) {
+ _bounds.fields.swap(ixScan->bounds.fields);
+ _hasBounds = true;
}
}
- SortStage::WorkingSetComparator::WorkingSetComparator(BSONObj p) : pattern(p) { }
+ for (size_t i = 0; i < solns.size(); ++i) {
+ delete solns[i];
+ }
+}
+
+SortStage::WorkingSetComparator::WorkingSetComparator(BSONObj p) : pattern(p) {}
- bool SortStage::WorkingSetComparator::operator()(const SortableDataItem& lhs, const SortableDataItem& rhs) const {
- // False means ignore field names.
- int result = lhs.sortKey.woCompare(rhs.sortKey, pattern, false);
- if (0 != result) {
- return result < 0;
+bool SortStage::WorkingSetComparator::operator()(const SortableDataItem& lhs,
+ const SortableDataItem& rhs) const {
+ // False means ignore field names.
+ int result = lhs.sortKey.woCompare(rhs.sortKey, pattern, false);
+ if (0 != result) {
+ return result < 0;
+ }
+ // Indices use RecordId as an additional sort key so we must as well.
+ return lhs.loc < rhs.loc;
+}
+
+SortStage::SortStage(const SortStageParams& params, WorkingSet* ws, PlanStage* child)
+ : _collection(params.collection),
+ _ws(ws),
+ _child(child),
+ _pattern(params.pattern),
+ _query(params.query),
+ _limit(params.limit),
+ _sorted(false),
+ _resultIterator(_data.end()),
+ _commonStats(kStageType),
+ _memUsage(0) {}
+
+SortStage::~SortStage() {}
+
+bool SortStage::isEOF() {
+ // We're done when our child has no more results, we've sorted the child's results, and
+ // we've returned all sorted results.
+ return _child->isEOF() && _sorted && (_data.end() == _resultIterator);
+}
+
+PlanStage::StageState SortStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
+
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+
+ if (NULL == _sortKeyGen) {
+ // This is heavy and should be done as part of work().
+ _sortKeyGen.reset(new SortStageKeyGenerator(_collection, _pattern, _query));
+ _sortKeyComparator.reset(new WorkingSetComparator(_sortKeyGen->getSortComparator()));
+ // If limit > 1, we need to initialize _dataSet here to maintain ordered
+ // set of data items while fetching from the child stage.
+ if (_limit > 1) {
+ const WorkingSetComparator& cmp = *_sortKeyComparator;
+ _dataSet.reset(new SortableDataItemSet(cmp));
}
- // Indices use RecordId as an additional sort key so we must as well.
- return lhs.loc < rhs.loc;
+ return PlanStage::NEED_TIME;
}
- SortStage::SortStage(const SortStageParams& params,
- WorkingSet* ws,
- PlanStage* child)
- : _collection(params.collection),
- _ws(ws),
- _child(child),
- _pattern(params.pattern),
- _query(params.query),
- _limit(params.limit),
- _sorted(false),
- _resultIterator(_data.end()),
- _commonStats(kStageType),
- _memUsage(0) {
+ const size_t maxBytes = static_cast<size_t>(internalQueryExecMaxBlockingSortBytes);
+ if (_memUsage > maxBytes) {
+ mongoutils::str::stream ss;
+ ss << "Sort operation used more than the maximum " << maxBytes
+ << " bytes of RAM. Add an index, or specify a smaller limit.";
+ Status status(ErrorCodes::OperationFailed, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ return PlanStage::FAILURE;
}
- SortStage::~SortStage() { }
-
- bool SortStage::isEOF() {
- // We're done when our child has no more results, we've sorted the child's results, and
- // we've returned all sorted results.
- return _child->isEOF() && _sorted && (_data.end() == _resultIterator);
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
}
- PlanStage::StageState SortStage::work(WorkingSetID* out) {
- ++_commonStats.works;
-
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
-
- if (NULL == _sortKeyGen) {
- // This is heavy and should be done as part of work().
- _sortKeyGen.reset(new SortStageKeyGenerator(_collection, _pattern, _query));
- _sortKeyComparator.reset(new WorkingSetComparator(_sortKeyGen->getSortComparator()));
- // If limit > 1, we need to initialize _dataSet here to maintain ordered
- // set of data items while fetching from the child stage.
- if (_limit > 1) {
- const WorkingSetComparator& cmp = *_sortKeyComparator;
- _dataSet.reset(new SortableDataItemSet(cmp));
- }
- return PlanStage::NEED_TIME;
- }
+ // Still reading in results to sort.
+ if (!_sorted) {
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ StageState code = _child->work(&id);
- const size_t maxBytes = static_cast<size_t>(internalQueryExecMaxBlockingSortBytes);
- if (_memUsage > maxBytes) {
- mongoutils::str::stream ss;
- ss << "Sort operation used more than the maximum " << maxBytes
- << " bytes of RAM. Add an index, or specify a smaller limit.";
- Status status(ErrorCodes::OperationFailed, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- return PlanStage::FAILURE;
- }
+ if (PlanStage::ADVANCED == code) {
+ // Add it into the map for quick invalidation if it has a valid RecordId.
+ // A RecordId may be invalidated at any time (during a yield). We need to get into
+ // the WorkingSet as quickly as possible to handle it.
+ WorkingSetMember* member = _ws->get(id);
- if (isEOF()) { return PlanStage::IS_EOF; }
-
- // Still reading in results to sort.
- if (!_sorted) {
- WorkingSetID id = WorkingSet::INVALID_ID;
- StageState code = _child->work(&id);
-
- if (PlanStage::ADVANCED == code) {
- // Add it into the map for quick invalidation if it has a valid RecordId.
- // A RecordId may be invalidated at any time (during a yield). We need to get into
- // the WorkingSet as quickly as possible to handle it.
- WorkingSetMember* member = _ws->get(id);
-
- // Planner must put a fetch before we get here.
- verify(member->hasObj());
-
- // We might be sorting something that was invalidated at some point.
- if (member->hasLoc()) {
- _wsidByDiskLoc[member->loc] = id;
- }
-
- // The data remains in the WorkingSet and we wrap the WSID with the sort key.
- SortableDataItem item;
- Status sortKeyStatus = _sortKeyGen->getSortKey(*member, &item.sortKey);
- if (!_sortKeyGen->getSortKey(*member, &item.sortKey).isOK()) {
- *out = WorkingSetCommon::allocateStatusMember(_ws, sortKeyStatus);
- return PlanStage::FAILURE;
- }
- item.wsid = id;
- if (member->hasLoc()) {
- // The RecordId breaks ties when sorting two WSMs with the same sort key.
- item.loc = member->loc;
- }
-
- addToBuffer(item);
-
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- else if (PlanStage::IS_EOF == code) {
- // TODO: We don't need the lock for this. We could ask for a yield and do this work
- // unlocked. Also, this is performing a lot of work for one call to work(...)
- sortBuffer();
- _resultIterator = _data.begin();
- _sorted = true;
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- else if (PlanStage::FAILURE == code || PlanStage::DEAD == code) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "sort stage failed to read in results to sort from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- }
- return code;
+ // Planner must put a fetch before we get here.
+ verify(member->hasObj());
+
+ // We might be sorting something that was invalidated at some point.
+ if (member->hasLoc()) {
+ _wsidByDiskLoc[member->loc] = id;
}
- else if (PlanStage::NEED_TIME == code) {
- ++_commonStats.needTime;
+
+ // The data remains in the WorkingSet and we wrap the WSID with the sort key.
+ SortableDataItem item;
+ Status sortKeyStatus = _sortKeyGen->getSortKey(*member, &item.sortKey);
+ if (!_sortKeyGen->getSortKey(*member, &item.sortKey).isOK()) {
+ *out = WorkingSetCommon::allocateStatusMember(_ws, sortKeyStatus);
+ return PlanStage::FAILURE;
}
- else if (PlanStage::NEED_YIELD == code) {
- ++_commonStats.needYield;
- *out = id;
+ item.wsid = id;
+ if (member->hasLoc()) {
+ // The RecordId breaks ties when sorting two WSMs with the same sort key.
+ item.loc = member->loc;
}
- return code;
- }
-
- // Returning results.
- verify(_resultIterator != _data.end());
- verify(_sorted);
- *out = _resultIterator->wsid;
- _resultIterator++;
+ addToBuffer(item);
- // If we're returning something, take it out of our DL -> WSID map so that future
- // calls to invalidate don't cause us to take action for a DL we're done with.
- WorkingSetMember* member = _ws->get(*out);
- if (member->hasLoc()) {
- _wsidByDiskLoc.erase(member->loc);
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::IS_EOF == code) {
+ // TODO: We don't need the lock for this. We could ask for a yield and do this work
+ // unlocked. Also, this is performing a lot of work for one call to work(...)
+ sortBuffer();
+ _resultIterator = _data.begin();
+ _sorted = true;
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::FAILURE == code || PlanStage::DEAD == code) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ mongoutils::str::stream ss;
+ ss << "sort stage failed to read in results to sort from child";
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
+ }
+ return code;
+ } else if (PlanStage::NEED_TIME == code) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == code) {
+ ++_commonStats.needYield;
+ *out = id;
}
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
-
- void SortStage::saveState() {
- ++_commonStats.yields;
- _child->saveState();
+ return code;
}
- void SortStage::restoreState(OperationContext* opCtx) {
- ++_commonStats.unyields;
- _child->restoreState(opCtx);
+ // Returning results.
+ verify(_resultIterator != _data.end());
+ verify(_sorted);
+ *out = _resultIterator->wsid;
+ _resultIterator++;
+
+ // If we're returning something, take it out of our DL -> WSID map so that future
+ // calls to invalidate don't cause us to take action for a DL we're done with.
+ WorkingSetMember* member = _ws->get(*out);
+ if (member->hasLoc()) {
+ _wsidByDiskLoc.erase(member->loc);
}
- void SortStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
-
- // If we have a deletion, we can fetch and carry on.
- // If we have a mutation, it's easier to fetch and use the previous document.
- // So, no matter what, fetch and keep the doc in play.
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
+}
- // _data contains indices into the WorkingSet, not actual data. If a WorkingSetMember in
- // the WorkingSet needs to change state as a result of a RecordId invalidation, it will still
- // be at the same spot in the WorkingSet. As such, we don't need to modify _data.
- DataMap::iterator it = _wsidByDiskLoc.find(dl);
+void SortStage::saveState() {
+ ++_commonStats.yields;
+ _child->saveState();
+}
- // If we're holding on to data that's got the RecordId we're invalidating...
- if (_wsidByDiskLoc.end() != it) {
- // Grab the WSM that we're nuking.
- WorkingSetMember* member = _ws->get(it->second);
- verify(member->loc == dl);
+void SortStage::restoreState(OperationContext* opCtx) {
+ ++_commonStats.unyields;
+ _child->restoreState(opCtx);
+}
- WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
+void SortStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+ _child->invalidate(txn, dl, type);
- // Remove the RecordId from our set of active DLs.
- _wsidByDiskLoc.erase(it);
- ++_specificStats.forcedFetches;
- }
- }
+ // If we have a deletion, we can fetch and carry on.
+ // If we have a mutation, it's easier to fetch and use the previous document.
+ // So, no matter what, fetch and keep the doc in play.
- vector<PlanStage*> SortStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
- }
+ // _data contains indices into the WorkingSet, not actual data. If a WorkingSetMember in
+ // the WorkingSet needs to change state as a result of a RecordId invalidation, it will still
+ // be at the same spot in the WorkingSet. As such, we don't need to modify _data.
+ DataMap::iterator it = _wsidByDiskLoc.find(dl);
- PlanStageStats* SortStage::getStats() {
- _commonStats.isEOF = isEOF();
- const size_t maxBytes = static_cast<size_t>(internalQueryExecMaxBlockingSortBytes);
- _specificStats.memLimit = maxBytes;
- _specificStats.memUsage = _memUsage;
- _specificStats.limit = _limit;
- _specificStats.sortPattern = _pattern.getOwned();
-
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SORT));
- ret->specific.reset(new SortStats(_specificStats));
- ret->children.push_back(_child->getStats());
- return ret.release();
- }
+ // If we're holding on to data that's got the RecordId we're invalidating...
+ if (_wsidByDiskLoc.end() != it) {
+ // Grab the WSM that we're nuking.
+ WorkingSetMember* member = _ws->get(it->second);
+ verify(member->loc == dl);
- const CommonStats* SortStage::getCommonStats() const {
- return &_commonStats;
- }
+ WorkingSetCommon::fetchAndInvalidateLoc(txn, member, _collection);
- const SpecificStats* SortStage::getSpecificStats() const {
- return &_specificStats;
+ // Remove the RecordId from our set of active DLs.
+ _wsidByDiskLoc.erase(it);
+ ++_specificStats.forcedFetches;
}
+}
+
+vector<PlanStage*> SortStage::getChildren() const {
+ vector<PlanStage*> children;
+ children.push_back(_child.get());
+ return children;
+}
+
+PlanStageStats* SortStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ const size_t maxBytes = static_cast<size_t>(internalQueryExecMaxBlockingSortBytes);
+ _specificStats.memLimit = maxBytes;
+ _specificStats.memUsage = _memUsage;
+ _specificStats.limit = _limit;
+ _specificStats.sortPattern = _pattern.getOwned();
+
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SORT));
+ ret->specific.reset(new SortStats(_specificStats));
+ ret->children.push_back(_child->getStats());
+ return ret.release();
+}
+
+const CommonStats* SortStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* SortStage::getSpecificStats() const {
+ return &_specificStats;
+}
- /**
- * addToBuffer() and sortBuffer() work differently based on the
- * configured limit. addToBuffer() is also responsible for
- * performing some accounting on the overall memory usage to
- * make sure we're not using too much memory.
- *
- * limit == 0:
- * addToBuffer() - Adds item to vector.
- * sortBuffer() - Sorts vector.
- * limit == 1:
- * addToBuffer() - Replaces first item in vector with max of
- * current and new item.
- * Updates memory usage if item was replaced.
- * sortBuffer() - Does nothing.
- * limit > 1:
- * addToBuffer() - Does not update vector. Adds item to set.
- * If size of set exceeds limit, remove item from set
- * with lowest key. Updates memory usage accordingly.
- * sortBuffer() - Copies items from set to vectors.
- */
- void SortStage::addToBuffer(const SortableDataItem& item) {
- // Holds ID of working set member to be freed at end of this function.
- WorkingSetID wsidToFree = WorkingSet::INVALID_ID;
-
- if (_limit == 0) {
+/**
+ * addToBuffer() and sortBuffer() work differently based on the
+ * configured limit. addToBuffer() is also responsible for
+ * performing some accounting on the overall memory usage to
+ * make sure we're not using too much memory.
+ *
+ * limit == 0:
+ * addToBuffer() - Adds item to vector.
+ * sortBuffer() - Sorts vector.
+ * limit == 1:
+ * addToBuffer() - Replaces first item in vector with max of
+ * current and new item.
+ * Updates memory usage if item was replaced.
+ * sortBuffer() - Does nothing.
+ * limit > 1:
+ * addToBuffer() - Does not update vector. Adds item to set.
+ * If size of set exceeds limit, remove item from set
+ * with lowest key. Updates memory usage accordingly.
+ * sortBuffer() - Copies items from set to vectors.
+ */
+void SortStage::addToBuffer(const SortableDataItem& item) {
+ // Holds ID of working set member to be freed at end of this function.
+ WorkingSetID wsidToFree = WorkingSet::INVALID_ID;
+
+ if (_limit == 0) {
+ _data.push_back(item);
+ _memUsage += _ws->get(item.wsid)->getMemUsage();
+ } else if (_limit == 1) {
+ if (_data.empty()) {
_data.push_back(item);
- _memUsage += _ws->get(item.wsid)->getMemUsage();
+ _memUsage = _ws->get(item.wsid)->getMemUsage();
+ return;
}
- else if (_limit == 1) {
- if (_data.empty()) {
- _data.push_back(item);
- _memUsage = _ws->get(item.wsid)->getMemUsage();
- return;
- }
- wsidToFree = item.wsid;
- const WorkingSetComparator& cmp = *_sortKeyComparator;
- // Compare new item with existing item in vector.
- if (cmp(item, _data[0])) {
- wsidToFree = _data[0].wsid;
- _data[0] = item;
- _memUsage = _ws->get(item.wsid)->getMemUsage();
- }
+ wsidToFree = item.wsid;
+ const WorkingSetComparator& cmp = *_sortKeyComparator;
+ // Compare new item with existing item in vector.
+ if (cmp(item, _data[0])) {
+ wsidToFree = _data[0].wsid;
+ _data[0] = item;
+ _memUsage = _ws->get(item.wsid)->getMemUsage();
}
- else {
- // Update data item set instead of vector
- // Limit not reached - insert and return
- vector<SortableDataItem>::size_type limit(_limit);
- if (_dataSet->size() < limit) {
- _dataSet->insert(item);
- _memUsage += _ws->get(item.wsid)->getMemUsage();
- return;
- }
- // Limit will be exceeded - compare with item with lowest key
- // If new item does not have a lower key value than last item,
- // do nothing.
- wsidToFree = item.wsid;
- SortableDataItemSet::const_iterator lastItemIt = --(_dataSet->end());
- const SortableDataItem& lastItem = *lastItemIt;
- const WorkingSetComparator& cmp = *_sortKeyComparator;
- if (cmp(item, lastItem)) {
- _memUsage -= _ws->get(lastItem.wsid)->getMemUsage();
- _memUsage += _ws->get(item.wsid)->getMemUsage();
- wsidToFree = lastItem.wsid;
- // According to std::set iterator validity rules,
- // it does not matter which of erase()/insert() happens first.
- // Here, we choose to erase first to release potential resources
- // used by the last item and to keep the scope of the iterator to a minimum.
- _dataSet->erase(lastItemIt);
- _dataSet->insert(item);
- }
+ } else {
+ // Update data item set instead of vector
+ // Limit not reached - insert and return
+ vector<SortableDataItem>::size_type limit(_limit);
+ if (_dataSet->size() < limit) {
+ _dataSet->insert(item);
+ _memUsage += _ws->get(item.wsid)->getMemUsage();
+ return;
}
-
- // If the working set ID is valid, remove from
- // RecordId invalidation map and free from working set.
- if (wsidToFree != WorkingSet::INVALID_ID) {
- WorkingSetMember* member = _ws->get(wsidToFree);
- if (member->hasLoc()) {
- _wsidByDiskLoc.erase(member->loc);
- }
- _ws->free(wsidToFree);
+ // Limit will be exceeded - compare with item with lowest key
+ // If new item does not have a lower key value than last item,
+ // do nothing.
+ wsidToFree = item.wsid;
+ SortableDataItemSet::const_iterator lastItemIt = --(_dataSet->end());
+ const SortableDataItem& lastItem = *lastItemIt;
+ const WorkingSetComparator& cmp = *_sortKeyComparator;
+ if (cmp(item, lastItem)) {
+ _memUsage -= _ws->get(lastItem.wsid)->getMemUsage();
+ _memUsage += _ws->get(item.wsid)->getMemUsage();
+ wsidToFree = lastItem.wsid;
+ // According to std::set iterator validity rules,
+ // it does not matter which of erase()/insert() happens first.
+ // Here, we choose to erase first to release potential resources
+ // used by the last item and to keep the scope of the iterator to a minimum.
+ _dataSet->erase(lastItemIt);
+ _dataSet->insert(item);
}
}
- void SortStage::sortBuffer() {
- if (_limit == 0) {
- const WorkingSetComparator& cmp = *_sortKeyComparator;
- std::sort(_data.begin(), _data.end(), cmp);
- }
- else if (_limit == 1) {
- // Buffer contains either 0 or 1 item so it is already in a sorted state.
- return;
- }
- else {
- // Set already contains items in sorted order, so we simply copy the items
- // from the set to the vector.
- // Release the memory for the set after the copy.
- vector<SortableDataItem> newData(_dataSet->begin(), _dataSet->end());
- _data.swap(newData);
- _dataSet.reset();
+ // If the working set ID is valid, remove from
+ // RecordId invalidation map and free from working set.
+ if (wsidToFree != WorkingSet::INVALID_ID) {
+ WorkingSetMember* member = _ws->get(wsidToFree);
+ if (member->hasLoc()) {
+ _wsidByDiskLoc.erase(member->loc);
}
+ _ws->free(wsidToFree);
+ }
+}
+
+void SortStage::sortBuffer() {
+ if (_limit == 0) {
+ const WorkingSetComparator& cmp = *_sortKeyComparator;
+ std::sort(_data.begin(), _data.end(), cmp);
+ } else if (_limit == 1) {
+ // Buffer contains either 0 or 1 item so it is already in a sorted state.
+ return;
+ } else {
+ // Set already contains items in sorted order, so we simply copy the items
+ // from the set to the vector.
+ // Release the memory for the set after the copy.
+ vector<SortableDataItem> newData(_dataSet->begin(), _dataSet->end());
+ _data.swap(newData);
+ _dataSet.reset();
}
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index 692b4b3b4cb..b04a3d07e43 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -41,235 +41,234 @@
namespace mongo {
- class BtreeKeyGenerator;
+class BtreeKeyGenerator;
- // Parameters that must be provided to a SortStage
- class SortStageParams {
- public:
- SortStageParams() : collection(NULL), limit(0) { }
+// Parameters that must be provided to a SortStage
+class SortStageParams {
+public:
+ SortStageParams() : collection(NULL), limit(0) {}
- // Used for resolving RecordIds to BSON
- const Collection* collection;
+ // Used for resolving RecordIds to BSON
+ const Collection* collection;
- // How we're sorting.
- BSONObj pattern;
+ // How we're sorting.
+ BSONObj pattern;
- // The query. Used to create the IndexBounds for the sorting.
- BSONObj query;
+ // The query. Used to create the IndexBounds for the sorting.
+ BSONObj query;
- // Equal to 0 for no limit.
- size_t limit;
- };
+ // Equal to 0 for no limit.
+ size_t limit;
+};
+/**
+ * Maps a WSM value to a BSONObj key that can then be sorted via BSONObjCmp.
+ */
+class SortStageKeyGenerator {
+public:
/**
- * Maps a WSM value to a BSONObj key that can then be sorted via BSONObjCmp.
+ * 'sortSpec' is the BSONObj in the .sort(...) clause.
+ *
+ * 'queryObj' is the BSONObj in the .find(...) clause. For multikey arrays we have to
+ * ensure that the value we select to sort by is within bounds generated by
+ * executing 'queryObj' using the virtual index with key pattern 'sortSpec'.
*/
- class SortStageKeyGenerator {
- public:
- /**
- * 'sortSpec' is the BSONObj in the .sort(...) clause.
- *
- * 'queryObj' is the BSONObj in the .find(...) clause. For multikey arrays we have to
- * ensure that the value we select to sort by is within bounds generated by
- * executing 'queryObj' using the virtual index with key pattern 'sortSpec'.
- */
- SortStageKeyGenerator(const Collection* collection,
- const BSONObj& sortSpec,
- const BSONObj& queryObj);
-
- /**
- * Returns the key used to sort 'member'.
- */
- Status getSortKey(const WorkingSetMember& member,
- BSONObj* objOut) const;
-
- /**
- * Passed to std::sort and used to sort the keys that are returned from getSortKey.
- *
- * Returned reference lives as long as 'this'.
- */
- const BSONObj& getSortComparator() const { return _comparatorObj; }
-
- private:
- Status getBtreeKey(const BSONObj& memberObj, BSONObj* objOut) const;
-
- /**
- * In order to emulate the existing sort behavior we must make unindexed sort behavior as
- * consistent as possible with indexed sort behavior. As such, we must only consider index
- * keys that we would encounter if we were answering the query using the sort-providing
- * index.
- *
- * Populates _hasBounds and _bounds.
- */
- void getBoundsForSort(const BSONObj& queryObj,
- const BSONObj& sortObj);
-
- // Not owned by us
- const Collection* _collection;
-
- // The object that we use to call woCompare on our resulting key. Is equal to _rawSortSpec
- // unless we have some $meta expressions. Each $meta expression has a default sort order.
- BSONObj _comparatorObj;
-
- // The raw object in .sort()
- BSONObj _rawSortSpec;
-
- // The sort pattern with any non-Btree sort pulled out.
- BSONObj _btreeObj;
-
- // If we're not sorting with a $meta value we can short-cut some work.
- bool _sortHasMeta;
-
- // True if the bounds are valid.
- bool _hasBounds;
-
- // The bounds generated from the query we're sorting.
- IndexBounds _bounds;
-
- // Helper to extract sorting keys from documents.
- std::unique_ptr<BtreeKeyGenerator> _keyGen;
-
- // Helper to filter keys, ensuring keys generated with _keyGen are within _bounds.
- std::unique_ptr<IndexBoundsChecker> _boundsChecker;
- };
+ SortStageKeyGenerator(const Collection* collection,
+ const BSONObj& sortSpec,
+ const BSONObj& queryObj);
+
+ /**
+ * Returns the key used to sort 'member'.
+ */
+ Status getSortKey(const WorkingSetMember& member, BSONObj* objOut) const;
+
+ /**
+ * Passed to std::sort and used to sort the keys that are returned from getSortKey.
+ *
+ * Returned reference lives as long as 'this'.
+ */
+ const BSONObj& getSortComparator() const {
+ return _comparatorObj;
+ }
+
+private:
+ Status getBtreeKey(const BSONObj& memberObj, BSONObj* objOut) const;
/**
- * Sorts the input received from the child according to the sort pattern provided.
+ * In order to emulate the existing sort behavior we must make unindexed sort behavior as
+ * consistent as possible with indexed sort behavior. As such, we must only consider index
+ * keys that we would encounter if we were answering the query using the sort-providing
+ * index.
*
- * Preconditions: For each field in 'pattern', all inputs in the child must handle a
- * getFieldDotted for that field.
+ * Populates _hasBounds and _bounds.
*/
- class SortStage : public PlanStage {
- public:
- SortStage(const SortStageParams& params,
- WorkingSet* ws,
- PlanStage* child);
+ void getBoundsForSort(const BSONObj& queryObj, const BSONObj& sortObj);
- virtual ~SortStage();
+ // Not owned by us
+ const Collection* _collection;
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
+ // The object that we use to call woCompare on our resulting key. Is equal to _rawSortSpec
+ // unless we have some $meta expressions. Each $meta expression has a default sort order.
+ BSONObj _comparatorObj;
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ // The raw object in .sort()
+ BSONObj _rawSortSpec;
- virtual std::vector<PlanStage*> getChildren() const;
+ // The sort pattern with any non-Btree sort pulled out.
+ BSONObj _btreeObj;
- virtual StageType stageType() const { return STAGE_SORT; }
+ // If we're not sorting with a $meta value we can short-cut some work.
+ bool _sortHasMeta;
- PlanStageStats* getStats();
+ // True if the bounds are valid.
+ bool _hasBounds;
- virtual const CommonStats* getCommonStats() const;
+ // The bounds generated from the query we're sorting.
+ IndexBounds _bounds;
- virtual const SpecificStats* getSpecificStats() const;
+ // Helper to extract sorting keys from documents.
+ std::unique_ptr<BtreeKeyGenerator> _keyGen;
- static const char* kStageType;
+ // Helper to filter keys, ensuring keys generated with _keyGen are within _bounds.
+ std::unique_ptr<IndexBoundsChecker> _boundsChecker;
+};
- private:
+/**
+ * Sorts the input received from the child according to the sort pattern provided.
+ *
+ * Preconditions: For each field in 'pattern', all inputs in the child must handle a
+ * getFieldDotted for that field.
+ */
+class SortStage : public PlanStage {
+public:
+ SortStage(const SortStageParams& params, WorkingSet* ws, PlanStage* child);
+
+ virtual ~SortStage();
- //
- // Query Stage
- //
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
- // Not owned by us.
- const Collection* _collection;
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- // Not owned by us.
- WorkingSet* _ws;
+ virtual std::vector<PlanStage*> getChildren() const;
- // Where we're reading data to sort from.
- std::unique_ptr<PlanStage> _child;
+ virtual StageType stageType() const {
+ return STAGE_SORT;
+ }
- // The raw sort _pattern as expressed by the user
- BSONObj _pattern;
+ PlanStageStats* getStats();
- // The raw query as expressed by the user
- BSONObj _query;
+ virtual const CommonStats* getCommonStats() const;
- // Equal to 0 for no limit.
- size_t _limit;
+ virtual const SpecificStats* getSpecificStats() const;
- //
- // Sort key generation
- //
- std::unique_ptr<SortStageKeyGenerator> _sortKeyGen;
+ static const char* kStageType;
- //
- // Data storage
- //
+private:
+ //
+ // Query Stage
+ //
- // Have we sorted our data? If so, we can access _resultIterator. If not,
- // we're still populating _data.
- bool _sorted;
+ // Not owned by us.
+ const Collection* _collection;
- // Collection of working set members to sort with their respective sort key.
- struct SortableDataItem {
- WorkingSetID wsid;
- BSONObj sortKey;
- // Since we must replicate the behavior of a covered sort as much as possible we use the
- // RecordId to break sortKey ties.
- // See sorta.js.
- RecordId loc;
- };
+ // Not owned by us.
+ WorkingSet* _ws;
- // Comparison object for data buffers (vector and set).
- // Items are compared on (sortKey, loc). This is also how the items are
- // ordered in the indices.
- // Keys are compared using BSONObj::woCompare() with RecordId as a tie-breaker.
- struct WorkingSetComparator {
- explicit WorkingSetComparator(BSONObj p);
+ // Where we're reading data to sort from.
+ std::unique_ptr<PlanStage> _child;
- bool operator()(const SortableDataItem& lhs, const SortableDataItem& rhs) const;
+ // The raw sort _pattern as expressed by the user
+ BSONObj _pattern;
- BSONObj pattern;
- };
+ // The raw query as expressed by the user
+ BSONObj _query;
- /**
- * Inserts one item into data buffer (vector or set).
- * If limit is exceeded, remove item with lowest key.
- */
- void addToBuffer(const SortableDataItem& item);
+ // Equal to 0 for no limit.
+ size_t _limit;
- /**
- * Sorts data buffer.
- * Assumes no more items will be added to buffer.
- * If data is stored in set, copy set
- * contents to vector and clear set.
- */
- void sortBuffer();
+ //
+ // Sort key generation
+ //
+ std::unique_ptr<SortStageKeyGenerator> _sortKeyGen;
- // Comparator for data buffer
- // Initialization follows sort key generator
- std::unique_ptr<WorkingSetComparator> _sortKeyComparator;
+ //
+ // Data storage
+ //
+
+ // Have we sorted our data? If so, we can access _resultIterator. If not,
+ // we're still populating _data.
+ bool _sorted;
+
+ // Collection of working set members to sort with their respective sort key.
+ struct SortableDataItem {
+ WorkingSetID wsid;
+ BSONObj sortKey;
+ // Since we must replicate the behavior of a covered sort as much as possible we use the
+ // RecordId to break sortKey ties.
+ // See sorta.js.
+ RecordId loc;
+ };
- // The data we buffer and sort.
- // _data will contain sorted data when all data is gathered
- // and sorted.
- // When _limit is greater than 1 and not all data has been gathered from child stage,
- // _dataSet is used instead to maintain an ordered set of the incomplete data set.
- // When the data set is complete, we copy the items from _dataSet to _data which will
- // be used to provide the results of this stage through _resultIterator.
- std::vector<SortableDataItem> _data;
- typedef std::set<SortableDataItem, WorkingSetComparator> SortableDataItemSet;
- std::unique_ptr<SortableDataItemSet> _dataSet;
+ // Comparison object for data buffers (vector and set).
+ // Items are compared on (sortKey, loc). This is also how the items are
+ // ordered in the indices.
+ // Keys are compared using BSONObj::woCompare() with RecordId as a tie-breaker.
+ struct WorkingSetComparator {
+ explicit WorkingSetComparator(BSONObj p);
- // Iterates through _data post-sort returning it.
- std::vector<SortableDataItem>::iterator _resultIterator;
+ bool operator()(const SortableDataItem& lhs, const SortableDataItem& rhs) const;
- // We buffer a lot of data and we want to look it up by RecordId quickly upon invalidation.
- typedef unordered_map<RecordId, WorkingSetID, RecordId::Hasher> DataMap;
- DataMap _wsidByDiskLoc;
-
- //
- // Stats
- //
-
- CommonStats _commonStats;
- SortStats _specificStats;
-
- // The usage in bytes of all buffered data that we're sorting.
- size_t _memUsage;
+ BSONObj pattern;
};
+ /**
+ * Inserts one item into data buffer (vector or set).
+ * If limit is exceeded, remove item with lowest key.
+ */
+ void addToBuffer(const SortableDataItem& item);
+
+ /**
+ * Sorts data buffer.
+ * Assumes no more items will be added to buffer.
+ * If data is stored in set, copy set
+ * contents to vector and clear set.
+ */
+ void sortBuffer();
+
+ // Comparator for data buffer
+ // Initialization follows sort key generator
+ std::unique_ptr<WorkingSetComparator> _sortKeyComparator;
+
+ // The data we buffer and sort.
+ // _data will contain sorted data when all data is gathered
+ // and sorted.
+ // When _limit is greater than 1 and not all data has been gathered from child stage,
+ // _dataSet is used instead to maintain an ordered set of the incomplete data set.
+ // When the data set is complete, we copy the items from _dataSet to _data which will
+ // be used to provide the results of this stage through _resultIterator.
+ std::vector<SortableDataItem> _data;
+ typedef std::set<SortableDataItem, WorkingSetComparator> SortableDataItemSet;
+ std::unique_ptr<SortableDataItemSet> _dataSet;
+
+ // Iterates through _data post-sort returning it.
+ std::vector<SortableDataItem>::iterator _resultIterator;
+
+ // We buffer a lot of data and we want to look it up by RecordId quickly upon invalidation.
+ typedef unordered_map<RecordId, WorkingSetID, RecordId::Hasher> DataMap;
+ DataMap _wsidByDiskLoc;
+
+ //
+ // Stats
+ //
+
+ CommonStats _commonStats;
+ SortStats _specificStats;
+
+ // The usage in bytes of all buffered data that we're sorting.
+ size_t _memUsage;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/exec/sort_test.cpp b/src/mongo/db/exec/sort_test.cpp
index 8f6bf64ca79..0d4cc891952 100644
--- a/src/mongo/db/exec/sort_test.cpp
+++ b/src/mongo/db/exec/sort_test.cpp
@@ -41,201 +41,205 @@ using namespace mongo;
namespace {
- TEST(SortStageTest, SortEmptyWorkingSet) {
- WorkingSet ws;
+TEST(SortStageTest, SortEmptyWorkingSet) {
+ WorkingSet ws;
- // QueuedDataStage will be owned by SortStage.
- QueuedDataStage* ms = new QueuedDataStage(&ws);
- SortStageParams params;
- SortStage sort(params, &ws, ms);
+ // QueuedDataStage will be owned by SortStage.
+ QueuedDataStage* ms = new QueuedDataStage(&ws);
+ SortStageParams params;
+ SortStage sort(params, &ws, ms);
- // Check initial EOF state.
- ASSERT_TRUE(ms->isEOF());
- ASSERT_FALSE(sort.isEOF());
+ // Check initial EOF state.
+ ASSERT_TRUE(ms->isEOF());
+ ASSERT_FALSE(sort.isEOF());
- // First call to work() initializes sort key generator.
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = sort.work(&id);
- ASSERT_EQUALS(state, PlanStage::NEED_TIME);
+ // First call to work() initializes sort key generator.
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = sort.work(&id);
+ ASSERT_EQUALS(state, PlanStage::NEED_TIME);
- // Second call to work() sorts data in vector.
- state = sort.work(&id);
- ASSERT_EQUALS(state, PlanStage::NEED_TIME);
-
- // Finally we hit EOF.
- state = sort.work(&id);
- ASSERT_EQUALS(state, PlanStage::IS_EOF);
-
- ASSERT_TRUE(sort.isEOF());
- }
-
- /**
- * Test function to verify sort stage.
- * SortStageParams will be initialized using patternStr, queryStr and limit.
- * inputStr represents the input data set in a BSONObj.
- * {input: [doc1, doc2, doc3, ...]}
- * expectedStr represents the expected sorted data set.
- * {output: [docA, docB, docC, ...]}
- */
- void testWork(const char* patternStr, const char* queryStr, int limit,
- const char* inputStr, const char* expectedStr) {
-
- // WorkingSet is not owned by stages
- // so it's fine to declare
- WorkingSet ws;
-
- // QueuedDataStage will be owned by SortStage.
- QueuedDataStage* ms = new QueuedDataStage(&ws);
- BSONObj inputObj = fromjson(inputStr);
- BSONElement inputElt = inputObj.getField("input");
- ASSERT(inputElt.isABSONObj());
- BSONObjIterator inputIt(inputElt.embeddedObject());
- while (inputIt.more()) {
- BSONElement elt = inputIt.next();
- ASSERT(elt.isABSONObj());
- BSONObj obj = elt.embeddedObject();
-
- // Insert obj from input array into working set.
- WorkingSetMember wsm;
- wsm.state = WorkingSetMember::OWNED_OBJ;
- wsm.obj = Snapshotted<BSONObj>(SnapshotId(), obj);
- ms->pushBack(wsm);
- }
-
- // Initialize SortStageParams
- // Setting limit to 0 means no limit
- SortStageParams params;
- params.pattern = fromjson(patternStr);
- params.query = fromjson(queryStr);
- params.limit = limit;
-
- SortStage sort(params, &ws, ms);
-
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState state = PlanStage::NEED_TIME;
-
- // Keep working sort stage until data is available.
- while (state == PlanStage::NEED_TIME) {
- state = sort.work(&id);
- }
-
- // Child's state should be EOF when sort is ready to advance.
- ASSERT_TRUE(ms->isEOF());
-
- // While there's data to be retrieved, state should be equal to ADVANCED.
- // Insert documents into BSON document in this format:
- // {output: [docA, docB, docC, ...]}
- BSONObjBuilder bob;
- BSONArrayBuilder arr(bob.subarrayStart("output"));
- while (state == PlanStage::ADVANCED) {
- WorkingSetMember* member = ws.get(id);
- const BSONObj& obj = member->obj.value();
- arr.append(obj);
- state = sort.work(&id);
- }
- arr.doneFast();
- BSONObj outputObj = bob.obj();
-
- // Sort stage should be EOF after data is retrieved.
- ASSERT_EQUALS(state, PlanStage::IS_EOF);
- ASSERT_TRUE(sort.isEOF());
-
- // Finally, we get to compare the sorted results against what we expect.
- BSONObj expectedObj = fromjson(expectedStr);
- if (outputObj != expectedObj) {
- mongoutils::str::stream ss;
- // Even though we have the original string representation of the expected output,
- // we invoke BSONObj::toString() to get a format consistent with outputObj.
- ss << "Unexpected sort result with query=" << queryStr << "; pattern=" << patternStr
- << "; limit=" << limit << ":\n"
- << "Expected: " << expectedObj.toString() << "\n"
- << "Actual: " << outputObj.toString() << "\n";
- FAIL(ss);
- }
- }
-
- //
- // Limit values
- // The server interprets limit values from the user as follows:
- // 0: no limit on query results. This is passed along unchanged to the sort stage.
- // >0: soft limit. Also unchanged in sort stage.
- // <0: hard limit. Absolute value is stored in parsed query and passed to sort stage.
- // The sort stage treats both soft and hard limits in the same manner
-
- //
- // Sort without limit
- // Implementation should keep all items fetched from child.
- //
-
- TEST(SortStageTest, SortAscending) {
- testWork("{a: 1}", "{}", 0,
- "{input: [{a: 2}, {a: 1}, {a: 3}]}",
- "{output: [{a: 1}, {a: 2}, {a: 3}]}");
- }
-
- TEST(SortStageTest, SortDescending) {
- testWork("{a: -1}", "{}", 0,
- "{input: [{a: 2}, {a: 1}, {a: 3}]}",
- "{output: [{a: 3}, {a: 2}, {a: 1}]}");
- }
+ // Second call to work() sorts data in vector.
+ state = sort.work(&id);
+ ASSERT_EQUALS(state, PlanStage::NEED_TIME);
- TEST(SortStageTest, SortIrrelevantSortKey) {
- testWork("{b: 1}", "{}", 0,
- "{input: [{a: 2}, {a: 1}, {a: 3}]}",
- "{output: [{a: 2}, {a: 1}, {a: 3}]}");
- }
+ // Finally we hit EOF.
+ state = sort.work(&id);
+ ASSERT_EQUALS(state, PlanStage::IS_EOF);
- //
- // Sorting with limit > 1
- // Implementation should retain top N items
- // and discard the rest.
- //
+ ASSERT_TRUE(sort.isEOF());
+}
- TEST(SortStageTest, SortAscendingWithLimit) {
- testWork("{a: 1}", "{}", 2,
- "{input: [{a: 2}, {a: 1}, {a: 3}]}",
- "{output: [{a: 1}, {a: 2}]}");
+/**
+ * Test function to verify sort stage.
+ * SortStageParams will be initialized using patternStr, queryStr and limit.
+ * inputStr represents the input data set in a BSONObj.
+ * {input: [doc1, doc2, doc3, ...]}
+ * expectedStr represents the expected sorted data set.
+ * {output: [docA, docB, docC, ...]}
+ */
+void testWork(const char* patternStr,
+ const char* queryStr,
+ int limit,
+ const char* inputStr,
+ const char* expectedStr) {
+ // WorkingSet is not owned by stages
+ // so it's fine to declare
+ WorkingSet ws;
+
+ // QueuedDataStage will be owned by SortStage.
+ QueuedDataStage* ms = new QueuedDataStage(&ws);
+ BSONObj inputObj = fromjson(inputStr);
+ BSONElement inputElt = inputObj.getField("input");
+ ASSERT(inputElt.isABSONObj());
+ BSONObjIterator inputIt(inputElt.embeddedObject());
+ while (inputIt.more()) {
+ BSONElement elt = inputIt.next();
+ ASSERT(elt.isABSONObj());
+ BSONObj obj = elt.embeddedObject();
+
+ // Insert obj from input array into working set.
+ WorkingSetMember wsm;
+ wsm.state = WorkingSetMember::OWNED_OBJ;
+ wsm.obj = Snapshotted<BSONObj>(SnapshotId(), obj);
+ ms->pushBack(wsm);
}
- TEST(SortStageTest, SortDescendingWithLimit) {
- testWork("{a: -1}", "{}", 2,
- "{input: [{a: 2}, {a: 1}, {a: 3}]}",
- "{output: [{a: 3}, {a: 2}]}");
- }
+ // Initialize SortStageParams
+ // Setting limit to 0 means no limit
+ SortStageParams params;
+ params.pattern = fromjson(patternStr);
+ params.query = fromjson(queryStr);
+ params.limit = limit;
- //
- // Sorting with limit > size of data set
- // Implementation should retain top N items
- // and discard the rest.
- //
+ SortStage sort(params, &ws, ms);
- TEST(SortStageTest, SortAscendingWithLimitGreaterThanInputSize) {
- testWork("{a: 1}", "{}", 10,
- "{input: [{a: 2}, {a: 1}, {a: 3}]}",
- "{output: [{a: 1}, {a: 2}, {a: 3}]}");
- }
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState state = PlanStage::NEED_TIME;
- TEST(SortStageTest, SortDescendingWithLimitGreaterThanInputSize) {
- testWork("{a: -1}", "{}", 10,
- "{input: [{a: 2}, {a: 1}, {a: 3}]}",
- "{output: [{a: 3}, {a: 2}, {a: 1}]}");
+ // Keep working sort stage until data is available.
+ while (state == PlanStage::NEED_TIME) {
+ state = sort.work(&id);
}
- //
- // Sorting with limit 1
- // Implementation should optimize this into a running maximum.
- //
-
- TEST(SortStageTest, SortAscendingWithLimitOfOne) {
- testWork("{a: 1}", "{}", 1,
- "{input: [{a: 2}, {a: 1}, {a: 3}]}",
- "{output: [{a: 1}]}");
+ // Child's state should be EOF when sort is ready to advance.
+ ASSERT_TRUE(ms->isEOF());
+
+ // While there's data to be retrieved, state should be equal to ADVANCED.
+ // Insert documents into BSON document in this format:
+ // {output: [docA, docB, docC, ...]}
+ BSONObjBuilder bob;
+ BSONArrayBuilder arr(bob.subarrayStart("output"));
+ while (state == PlanStage::ADVANCED) {
+ WorkingSetMember* member = ws.get(id);
+ const BSONObj& obj = member->obj.value();
+ arr.append(obj);
+ state = sort.work(&id);
}
-
- TEST(SortStageTest, SortDescendingWithLimitOfOne) {
- testWork("{a: -1}", "{}", 1,
- "{input: [{a: 2}, {a: 1}, {a: 3}]}",
- "{output: [{a: 3}]}");
+ arr.doneFast();
+ BSONObj outputObj = bob.obj();
+
+ // Sort stage should be EOF after data is retrieved.
+ ASSERT_EQUALS(state, PlanStage::IS_EOF);
+ ASSERT_TRUE(sort.isEOF());
+
+ // Finally, we get to compare the sorted results against what we expect.
+ BSONObj expectedObj = fromjson(expectedStr);
+ if (outputObj != expectedObj) {
+ mongoutils::str::stream ss;
+ // Even though we have the original string representation of the expected output,
+ // we invoke BSONObj::toString() to get a format consistent with outputObj.
+ ss << "Unexpected sort result with query=" << queryStr << "; pattern=" << patternStr
+ << "; limit=" << limit << ":\n"
+ << "Expected: " << expectedObj.toString() << "\n"
+ << "Actual: " << outputObj.toString() << "\n";
+ FAIL(ss);
}
+}
+
+//
+// Limit values
+// The server interprets limit values from the user as follows:
+// 0: no limit on query results. This is passed along unchanged to the sort stage.
+// >0: soft limit. Also unchanged in sort stage.
+// <0: hard limit. Absolute value is stored in parsed query and passed to sort stage.
+// The sort stage treats both soft and hard limits in the same manner
+
+//
+// Sort without limit
+// Implementation should keep all items fetched from child.
+//
+
+TEST(SortStageTest, SortAscending) {
+ testWork("{a: 1}",
+ "{}",
+ 0,
+ "{input: [{a: 2}, {a: 1}, {a: 3}]}",
+ "{output: [{a: 1}, {a: 2}, {a: 3}]}");
+}
+
+TEST(SortStageTest, SortDescending) {
+ testWork("{a: -1}",
+ "{}",
+ 0,
+ "{input: [{a: 2}, {a: 1}, {a: 3}]}",
+ "{output: [{a: 3}, {a: 2}, {a: 1}]}");
+}
+
+TEST(SortStageTest, SortIrrelevantSortKey) {
+ testWork("{b: 1}",
+ "{}",
+ 0,
+ "{input: [{a: 2}, {a: 1}, {a: 3}]}",
+ "{output: [{a: 2}, {a: 1}, {a: 3}]}");
+}
+
+//
+// Sorting with limit > 1
+// Implementation should retain top N items
+// and discard the rest.
+//
+
+TEST(SortStageTest, SortAscendingWithLimit) {
+ testWork("{a: 1}", "{}", 2, "{input: [{a: 2}, {a: 1}, {a: 3}]}", "{output: [{a: 1}, {a: 2}]}");
+}
+
+TEST(SortStageTest, SortDescendingWithLimit) {
+ testWork("{a: -1}", "{}", 2, "{input: [{a: 2}, {a: 1}, {a: 3}]}", "{output: [{a: 3}, {a: 2}]}");
+}
+
+//
+// Sorting with limit > size of data set
+// Implementation should retain top N items
+// and discard the rest.
+//
+
+TEST(SortStageTest, SortAscendingWithLimitGreaterThanInputSize) {
+ testWork("{a: 1}",
+ "{}",
+ 10,
+ "{input: [{a: 2}, {a: 1}, {a: 3}]}",
+ "{output: [{a: 1}, {a: 2}, {a: 3}]}");
+}
+
+TEST(SortStageTest, SortDescendingWithLimitGreaterThanInputSize) {
+ testWork("{a: -1}",
+ "{}",
+ 10,
+ "{input: [{a: 2}, {a: 1}, {a: 3}]}",
+ "{output: [{a: 3}, {a: 2}, {a: 1}]}");
+}
+
+//
+// Sorting with limit 1
+// Implementation should optimize this into a running maximum.
+//
+
+TEST(SortStageTest, SortAscendingWithLimitOfOne) {
+ testWork("{a: 1}", "{}", 1, "{input: [{a: 2}, {a: 1}, {a: 3}]}", "{output: [{a: 1}]}");
+}
+
+TEST(SortStageTest, SortDescendingWithLimitOfOne) {
+ testWork("{a: -1}", "{}", 1, "{input: [{a: 2}, {a: 1}, {a: 3}]}", "{output: [{a: 3}]}");
+}
} // namespace
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index e648a97518a..3dc0aa8549b 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -59,328 +59,318 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::vector;
-
- /**
- * A command for manually constructing a query tree and running it.
- *
- * db.runCommand({stageDebug: {collection: collname, plan: rootNode}})
- *
- * The value of the filter field is a BSONObj that specifies values that fields must have. What
- * you'd pass to a matcher.
- *
- * Leaf Nodes:
- *
- * node -> {ixscan: {filter: {FILTER},
- * args: {indexKeyPattern: kpObj, start: startObj,
- * stop: stopObj, endInclusive: true/false, direction: -1/1,
- * limit: int}}}
- * node -> {cscan: {filter: {filter}, args: {direction: -1/1}}}
- * TODO: language for text.
- * node -> {text: {filter: {filter}, args: {search: "searchstr"}}}
- *
- * Internal Nodes:
- *
- * node -> {andHash: {args: { nodes: [node, node]}}}
- * node -> {andSorted: {args: { nodes: [node, node]}}}
- * node -> {or: {filter: {filter}, args: { dedup:bool, nodes:[node, node]}}}
- * node -> {fetch: {filter: {filter}, args: {node: node}}}
- * node -> {limit: {args: {node: node, num: posint}}}
- * node -> {skip: {args: {node: node, num: posint}}}
- * node -> {sort: {args: {node: node, pattern: objWithSortCriterion }}}
- * node -> {mergeSort: {args: {nodes: [node, node], pattern: objWithSortCriterion}}}
- * node -> {delete: {args: {node: node, isMulti: bool, shouldCallLogOp: bool}}}
- *
- * Forthcoming Nodes:
- *
- * node -> {dedup: {filter: {filter}, args: {node: node, field: field}}}
- * node -> {unwind: {filter: filter}, args: {node: node, field: field}}
- */
- class StageDebugCmd : public Command {
- public:
- StageDebugCmd() : Command("stageDebug") { }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
- bool slaveOk() const { return false; }
- bool slaveOverrideOk() const { return false; }
- void help(std::stringstream& h) const { }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- // Command is testing-only, and can only be enabled at command line. Hence, no auth
- // check needed.
- }
+using std::unique_ptr;
+using std::string;
+using std::vector;
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- BSONElement argElt = cmdObj["stageDebug"];
- if (argElt.eoo() || !argElt.isABSONObj()) { return false; }
- BSONObj argObj = argElt.Obj();
-
- // Pull out the collection name.
- BSONElement collElt = argObj["collection"];
- if (collElt.eoo() || (String != collElt.type())) {
- return false;
- }
- string collName = collElt.String();
-
- // Need a context to get the actual Collection*
- // TODO A write lock is currently taken here to accommodate stages that perform writes
- // (e.g. DeleteStage). This should be changed to use a read lock for read-only
- // execution trees.
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), dbname, MODE_X);
- OldClientContext ctx(txn, dbname);
-
- // Make sure the collection is valid.
- Database* db = ctx.db();
- Collection* collection = db->getCollection(db->name() + '.' + collName);
- uassert(17446, "Couldn't find the collection " + collName, NULL != collection);
-
- // Pull out the plan
- BSONElement planElt = argObj["plan"];
- if (planElt.eoo() || !planElt.isABSONObj()) {
- return false;
- }
- BSONObj planObj = planElt.Obj();
+/**
+ * A command for manually constructing a query tree and running it.
+ *
+ * db.runCommand({stageDebug: {collection: collname, plan: rootNode}})
+ *
+ * The value of the filter field is a BSONObj that specifies values that fields must have. What
+ * you'd pass to a matcher.
+ *
+ * Leaf Nodes:
+ *
+ * node -> {ixscan: {filter: {FILTER},
+ * args: {indexKeyPattern: kpObj, start: startObj,
+ * stop: stopObj, endInclusive: true/false, direction: -1/1,
+ * limit: int}}}
+ * node -> {cscan: {filter: {filter}, args: {direction: -1/1}}}
+ * TODO: language for text.
+ * node -> {text: {filter: {filter}, args: {search: "searchstr"}}}
+ *
+ * Internal Nodes:
+ *
+ * node -> {andHash: {args: { nodes: [node, node]}}}
+ * node -> {andSorted: {args: { nodes: [node, node]}}}
+ * node -> {or: {filter: {filter}, args: { dedup:bool, nodes:[node, node]}}}
+ * node -> {fetch: {filter: {filter}, args: {node: node}}}
+ * node -> {limit: {args: {node: node, num: posint}}}
+ * node -> {skip: {args: {node: node, num: posint}}}
+ * node -> {sort: {args: {node: node, pattern: objWithSortCriterion }}}
+ * node -> {mergeSort: {args: {nodes: [node, node], pattern: objWithSortCriterion}}}
+ * node -> {delete: {args: {node: node, isMulti: bool, shouldCallLogOp: bool}}}
+ *
+ * Forthcoming Nodes:
+ *
+ * node -> {dedup: {filter: {filter}, args: {node: node, field: field}}}
+ * node -> {unwind: {filter: filter}, args: {node: node, field: field}}
+ */
+class StageDebugCmd : public Command {
+public:
+ StageDebugCmd() : Command("stageDebug") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ bool slaveOk() const {
+ return false;
+ }
+ bool slaveOverrideOk() const {
+ return false;
+ }
+ void help(std::stringstream& h) const {}
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // Command is testing-only, and can only be enabled at command line. Hence, no auth
+ // check needed.
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ BSONElement argElt = cmdObj["stageDebug"];
+ if (argElt.eoo() || !argElt.isABSONObj()) {
+ return false;
+ }
+ BSONObj argObj = argElt.Obj();
- // Parse the plan into these.
- OwnedPointerVector<MatchExpression> exprs;
- unique_ptr<WorkingSet> ws(new WorkingSet());
+ // Pull out the collection name.
+ BSONElement collElt = argObj["collection"];
+ if (collElt.eoo() || (String != collElt.type())) {
+ return false;
+ }
+ string collName = collElt.String();
+
+ // Need a context to get the actual Collection*
+ // TODO A write lock is currently taken here to accommodate stages that perform writes
+ // (e.g. DeleteStage). This should be changed to use a read lock for read-only
+ // execution trees.
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lk(txn->lockState(), dbname, MODE_X);
+ OldClientContext ctx(txn, dbname);
+
+ // Make sure the collection is valid.
+ Database* db = ctx.db();
+ Collection* collection = db->getCollection(db->name() + '.' + collName);
+ uassert(17446, "Couldn't find the collection " + collName, NULL != collection);
+
+ // Pull out the plan
+ BSONElement planElt = argObj["plan"];
+ if (planElt.eoo() || !planElt.isABSONObj()) {
+ return false;
+ }
+ BSONObj planObj = planElt.Obj();
- PlanStage* userRoot = parseQuery(txn, collection, planObj, ws.get(), &exprs);
- uassert(16911, "Couldn't parse plan from " + cmdObj.toString(), NULL != userRoot);
+ // Parse the plan into these.
+ OwnedPointerVector<MatchExpression> exprs;
+ unique_ptr<WorkingSet> ws(new WorkingSet());
- // Add a fetch at the top for the user so we can get obj back for sure.
- // TODO: Do we want to do this for the user? I think so.
- PlanStage* rootFetch = new FetchStage(txn, ws.get(), userRoot, NULL, collection);
+ PlanStage* userRoot = parseQuery(txn, collection, planObj, ws.get(), &exprs);
+ uassert(16911, "Couldn't parse plan from " + cmdObj.toString(), NULL != userRoot);
- PlanExecutor* rawExec;
- Status execStatus = PlanExecutor::make(txn, ws.release(), rootFetch, collection,
- PlanExecutor::YIELD_AUTO, &rawExec);
- fassert(28536, execStatus);
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ // Add a fetch at the top for the user so we can get obj back for sure.
+ // TODO: Do we want to do this for the user? I think so.
+ PlanStage* rootFetch = new FetchStage(txn, ws.get(), userRoot, NULL, collection);
- BSONArrayBuilder resultBuilder(result.subarrayStart("results"));
+ PlanExecutor* rawExec;
+ Status execStatus = PlanExecutor::make(
+ txn, ws.release(), rootFetch, collection, PlanExecutor::YIELD_AUTO, &rawExec);
+ fassert(28536, execStatus);
+ std::unique_ptr<PlanExecutor> exec(rawExec);
- BSONObj obj;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- resultBuilder.append(obj);
- }
+ BSONArrayBuilder resultBuilder(result.subarrayStart("results"));
- resultBuilder.done();
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ resultBuilder.append(obj);
+ }
- if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- error() << "Plan executor error during StageDebug command: "
- << PlanExecutor::statestr(state)
- << ", stats: " << Explain::statsToBSON(*stats);
+ resultBuilder.done();
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during "
- << "StageDebug command: "
- << WorkingSetCommon::toStatusString(obj)));
- }
+ if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ error() << "Plan executor error during StageDebug command: "
+ << PlanExecutor::statestr(state) << ", stats: " << Explain::statsToBSON(*stats);
- return true;
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream()
+ << "Executor error during "
+ << "StageDebug command: " << WorkingSetCommon::toStatusString(obj)));
}
- PlanStage* parseQuery(OperationContext* txn,
- Collection* collection,
- BSONObj obj,
- WorkingSet* workingSet,
- OwnedPointerVector<MatchExpression>* exprs) {
+ return true;
+ }
- BSONElement firstElt = obj.firstElement();
- if (!firstElt.isABSONObj()) { return NULL; }
- BSONObj paramObj = firstElt.Obj();
+ PlanStage* parseQuery(OperationContext* txn,
+ Collection* collection,
+ BSONObj obj,
+ WorkingSet* workingSet,
+ OwnedPointerVector<MatchExpression>* exprs) {
+ BSONElement firstElt = obj.firstElement();
+ if (!firstElt.isABSONObj()) {
+ return NULL;
+ }
+ BSONObj paramObj = firstElt.Obj();
- MatchExpression* matcher = NULL;
- BSONObj nodeArgs;
+ MatchExpression* matcher = NULL;
+ BSONObj nodeArgs;
- // Every node has these two fields.
- const string filterTag = "filter";
- const string argsTag = "args";
+ // Every node has these two fields.
+ const string filterTag = "filter";
+ const string argsTag = "args";
- BSONObjIterator it(paramObj);
- while (it.more()) {
- BSONElement e = it.next();
- if (!e.isABSONObj()) { return NULL; }
- BSONObj argObj = e.Obj();
- if (filterTag == e.fieldName()) {
- StatusWithMatchExpression swme = MatchExpressionParser::parse(
- argObj, WhereCallbackReal(txn, collection->ns().db()));
- if (!swme.isOK()) { return NULL; }
- // exprs is what will wind up deleting this.
- matcher = swme.getValue();
- verify(NULL != matcher);
- exprs->mutableVector().push_back(matcher);
- }
- else if (argsTag == e.fieldName()) {
- nodeArgs = argObj;
- }
- else {
- uasserted(16910, "Unknown fieldname " + string(e.fieldName())
- + " in query node " + obj.toString());
+ BSONObjIterator it(paramObj);
+ while (it.more()) {
+ BSONElement e = it.next();
+ if (!e.isABSONObj()) {
+ return NULL;
+ }
+ BSONObj argObj = e.Obj();
+ if (filterTag == e.fieldName()) {
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(
+ argObj, WhereCallbackReal(txn, collection->ns().db()));
+ if (!swme.isOK()) {
return NULL;
}
+ // exprs is what will wind up deleting this.
+ matcher = swme.getValue();
+ verify(NULL != matcher);
+ exprs->mutableVector().push_back(matcher);
+ } else if (argsTag == e.fieldName()) {
+ nodeArgs = argObj;
+ } else {
+ uasserted(16910,
+ "Unknown fieldname " + string(e.fieldName()) + " in query node " +
+ obj.toString());
+ return NULL;
}
+ }
- string nodeName = firstElt.fieldName();
+ string nodeName = firstElt.fieldName();
- if ("ixscan" == nodeName) {
- // This'll throw if it's not an obj but that's OK.
- BSONObj keyPatternObj = nodeArgs["keyPattern"].Obj();
+ if ("ixscan" == nodeName) {
+ // This'll throw if it's not an obj but that's OK.
+ BSONObj keyPatternObj = nodeArgs["keyPattern"].Obj();
- IndexDescriptor* desc =
- collection->getIndexCatalog()->findIndexByKeyPattern(txn, keyPatternObj);
- uassert(16890, "Can't find index: " + keyPatternObj.toString(), desc);
+ IndexDescriptor* desc =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, keyPatternObj);
+ uassert(16890, "Can't find index: " + keyPatternObj.toString(), desc);
- IndexScanParams params;
- params.descriptor = desc;
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = nodeArgs["startKey"].Obj();
- params.bounds.endKey = nodeArgs["endKey"].Obj();
- params.bounds.endKeyInclusive = nodeArgs["endKeyInclusive"].Bool();
- params.direction = nodeArgs["direction"].numberInt();
+ IndexScanParams params;
+ params.descriptor = desc;
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = nodeArgs["startKey"].Obj();
+ params.bounds.endKey = nodeArgs["endKey"].Obj();
+ params.bounds.endKeyInclusive = nodeArgs["endKeyInclusive"].Bool();
+ params.direction = nodeArgs["direction"].numberInt();
- return new IndexScan(txn, params, workingSet, matcher);
- }
- else if ("andHash" == nodeName) {
- uassert(16921, "Nodes argument must be provided to AND",
- nodeArgs["nodes"].isABSONObj());
-
- unique_ptr<AndHashStage> andStage(new AndHashStage(workingSet, collection));
-
- int nodesAdded = 0;
- BSONObjIterator it(nodeArgs["nodes"].Obj());
- while (it.more()) {
- BSONElement e = it.next();
- uassert(16922, "node of AND isn't an obj?: " + e.toString(),
- e.isABSONObj());
-
- PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
- uassert(16923, "Can't parse sub-node of AND: " + e.Obj().toString(),
- NULL != subNode);
- // takes ownership
- andStage->addChild(subNode);
- ++nodesAdded;
- }
+ return new IndexScan(txn, params, workingSet, matcher);
+ } else if ("andHash" == nodeName) {
+ uassert(
+ 16921, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
- uassert(16927, "AND requires more than one child", nodesAdded >= 2);
+ unique_ptr<AndHashStage> andStage(new AndHashStage(workingSet, collection));
- return andStage.release();
+ int nodesAdded = 0;
+ BSONObjIterator it(nodeArgs["nodes"].Obj());
+ while (it.more()) {
+ BSONElement e = it.next();
+ uassert(16922, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj());
+
+ PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
+ uassert(
+ 16923, "Can't parse sub-node of AND: " + e.Obj().toString(), NULL != subNode);
+ // takes ownership
+ andStage->addChild(subNode);
+ ++nodesAdded;
}
- else if ("andSorted" == nodeName) {
- uassert(16924, "Nodes argument must be provided to AND",
- nodeArgs["nodes"].isABSONObj());
-
- unique_ptr<AndSortedStage> andStage(new AndSortedStage(workingSet, collection));
-
- int nodesAdded = 0;
- BSONObjIterator it(nodeArgs["nodes"].Obj());
- while (it.more()) {
- BSONElement e = it.next();
- uassert(16925, "node of AND isn't an obj?: " + e.toString(),
- e.isABSONObj());
-
- PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
- uassert(16926, "Can't parse sub-node of AND: " + e.Obj().toString(),
- NULL != subNode);
- // takes ownership
- andStage->addChild(subNode);
- ++nodesAdded;
- }
- uassert(16928, "AND requires more than one child", nodesAdded >= 2);
+ uassert(16927, "AND requires more than one child", nodesAdded >= 2);
- return andStage.release();
- }
- else if ("or" == nodeName) {
- uassert(16934, "Nodes argument must be provided to AND",
- nodeArgs["nodes"].isABSONObj());
- uassert(16935, "Dedup argument must be provided to OR",
- !nodeArgs["dedup"].eoo());
- BSONObjIterator it(nodeArgs["nodes"].Obj());
- unique_ptr<OrStage> orStage(new OrStage(workingSet, nodeArgs["dedup"].Bool(),
- matcher));
- while (it.more()) {
- BSONElement e = it.next();
- if (!e.isABSONObj()) { return NULL; }
- PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
- uassert(16936, "Can't parse sub-node of OR: " + e.Obj().toString(),
- NULL != subNode);
- // takes ownership
- orStage->addChild(subNode);
- }
+ return andStage.release();
+ } else if ("andSorted" == nodeName) {
+ uassert(
+ 16924, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
- return orStage.release();
- }
- else if ("fetch" == nodeName) {
- uassert(16929, "Node argument must be provided to fetch",
- nodeArgs["node"].isABSONObj());
- PlanStage* subNode = parseQuery(txn,
- collection,
- nodeArgs["node"].Obj(),
- workingSet,
- exprs);
- return new FetchStage(txn, workingSet, subNode, matcher, collection);
- }
- else if ("limit" == nodeName) {
- uassert(16937, "Limit stage doesn't have a filter (put it on the child)",
- NULL == matcher);
- uassert(16930, "Node argument must be provided to limit",
- nodeArgs["node"].isABSONObj());
- uassert(16931, "Num argument must be provided to limit",
- nodeArgs["num"].isNumber());
- PlanStage* subNode = parseQuery(txn,
- collection,
- nodeArgs["node"].Obj(),
- workingSet,
- exprs);
- return new LimitStage(nodeArgs["num"].numberInt(), workingSet, subNode);
- }
- else if ("skip" == nodeName) {
- uassert(16938, "Skip stage doesn't have a filter (put it on the child)",
- NULL == matcher);
- uassert(16932, "Node argument must be provided to skip",
- nodeArgs["node"].isABSONObj());
- uassert(16933, "Num argument must be provided to skip",
- nodeArgs["num"].isNumber());
- PlanStage* subNode = parseQuery(txn,
- collection,
- nodeArgs["node"].Obj(),
- workingSet,
- exprs);
- return new SkipStage(nodeArgs["num"].numberInt(), workingSet, subNode);
+ unique_ptr<AndSortedStage> andStage(new AndSortedStage(workingSet, collection));
+
+ int nodesAdded = 0;
+ BSONObjIterator it(nodeArgs["nodes"].Obj());
+ while (it.more()) {
+ BSONElement e = it.next();
+ uassert(16925, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj());
+
+ PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
+ uassert(
+ 16926, "Can't parse sub-node of AND: " + e.Obj().toString(), NULL != subNode);
+ // takes ownership
+ andStage->addChild(subNode);
+ ++nodesAdded;
}
- else if ("cscan" == nodeName) {
- CollectionScanParams params;
- params.collection = collection;
-
- // What direction?
- uassert(16963, "Direction argument must be specified and be a number",
- nodeArgs["direction"].isNumber());
- if (1 == nodeArgs["direction"].numberInt()) {
- params.direction = CollectionScanParams::FORWARD;
- }
- else {
- params.direction = CollectionScanParams::BACKWARD;
+
+ uassert(16928, "AND requires more than one child", nodesAdded >= 2);
+
+ return andStage.release();
+ } else if ("or" == nodeName) {
+ uassert(
+ 16934, "Nodes argument must be provided to AND", nodeArgs["nodes"].isABSONObj());
+ uassert(16935, "Dedup argument must be provided to OR", !nodeArgs["dedup"].eoo());
+ BSONObjIterator it(nodeArgs["nodes"].Obj());
+ unique_ptr<OrStage> orStage(new OrStage(workingSet, nodeArgs["dedup"].Bool(), matcher));
+ while (it.more()) {
+ BSONElement e = it.next();
+ if (!e.isABSONObj()) {
+ return NULL;
}
+ PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
+ uassert(
+ 16936, "Can't parse sub-node of OR: " + e.Obj().toString(), NULL != subNode);
+ // takes ownership
+ orStage->addChild(subNode);
+ }
- return new CollectionScan(txn, params, workingSet, matcher);
+ return orStage.release();
+ } else if ("fetch" == nodeName) {
+ uassert(
+ 16929, "Node argument must be provided to fetch", nodeArgs["node"].isABSONObj());
+ PlanStage* subNode =
+ parseQuery(txn, collection, nodeArgs["node"].Obj(), workingSet, exprs);
+ return new FetchStage(txn, workingSet, subNode, matcher, collection);
+ } else if ("limit" == nodeName) {
+ uassert(
+ 16937, "Limit stage doesn't have a filter (put it on the child)", NULL == matcher);
+ uassert(
+ 16930, "Node argument must be provided to limit", nodeArgs["node"].isABSONObj());
+ uassert(16931, "Num argument must be provided to limit", nodeArgs["num"].isNumber());
+ PlanStage* subNode =
+ parseQuery(txn, collection, nodeArgs["node"].Obj(), workingSet, exprs);
+ return new LimitStage(nodeArgs["num"].numberInt(), workingSet, subNode);
+ } else if ("skip" == nodeName) {
+ uassert(
+ 16938, "Skip stage doesn't have a filter (put it on the child)", NULL == matcher);
+ uassert(16932, "Node argument must be provided to skip", nodeArgs["node"].isABSONObj());
+ uassert(16933, "Num argument must be provided to skip", nodeArgs["num"].isNumber());
+ PlanStage* subNode =
+ parseQuery(txn, collection, nodeArgs["node"].Obj(), workingSet, exprs);
+ return new SkipStage(nodeArgs["num"].numberInt(), workingSet, subNode);
+ } else if ("cscan" == nodeName) {
+ CollectionScanParams params;
+ params.collection = collection;
+
+ // What direction?
+ uassert(16963,
+ "Direction argument must be specified and be a number",
+ nodeArgs["direction"].isNumber());
+ if (1 == nodeArgs["direction"].numberInt()) {
+ params.direction = CollectionScanParams::FORWARD;
+ } else {
+ params.direction = CollectionScanParams::BACKWARD;
}
- // sort is disabled for now.
+
+ return new CollectionScan(txn, params, workingSet, matcher);
+ }
+// sort is disabled for now.
#if 0
else if ("sort" == nodeName) {
uassert(16969, "Node argument must be provided to sort",
@@ -393,96 +383,93 @@ namespace mongo {
return new SortStage(params, workingSet, subNode);
}
#endif
- else if ("mergeSort" == nodeName) {
- uassert(16971, "Nodes argument must be provided to sort",
- nodeArgs["nodes"].isABSONObj());
- uassert(16972, "Pattern argument must be provided to sort",
- nodeArgs["pattern"].isABSONObj());
-
- MergeSortStageParams params;
- params.pattern = nodeArgs["pattern"].Obj();
- // Dedup is true by default.
-
- unique_ptr<MergeSortStage> mergeStage(new MergeSortStage(params, workingSet,
- collection));
-
- BSONObjIterator it(nodeArgs["nodes"].Obj());
- while (it.more()) {
- BSONElement e = it.next();
- uassert(16973, "node of mergeSort isn't an obj?: " + e.toString(),
- e.isABSONObj());
-
- PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
- uassert(16974, "Can't parse sub-node of mergeSort: " + e.Obj().toString(),
- NULL != subNode);
- // takes ownership
- mergeStage->addChild(subNode);
- }
- return mergeStage.release();
- }
- else if ("text" == nodeName) {
- string search = nodeArgs["search"].String();
-
- vector<IndexDescriptor*> idxMatches;
- collection->getIndexCatalog()->findIndexByType(txn, "text", idxMatches);
- uassert(17194, "Expected exactly one text index", idxMatches.size() == 1);
-
- IndexDescriptor* index = idxMatches[0];
- FTSAccessMethod* fam =
- dynamic_cast<FTSAccessMethod*>( collection->getIndexCatalog()->getIndex( index ) );
- TextStageParams params(fam->getSpec());
- params.index = index;
-
- // TODO: Deal with non-empty filters. This is a hack to put in covering information
- // that can only be checked for equality. We ignore this now.
- Status s = fam->getSpec().getIndexPrefix(BSONObj(), &params.indexPrefix);
- if (!s.isOK()) {
- // errmsg = s.toString();
- return NULL;
- }
+ else if ("mergeSort" == nodeName) {
+ uassert(
+ 16971, "Nodes argument must be provided to sort", nodeArgs["nodes"].isABSONObj());
+ uassert(16972,
+ "Pattern argument must be provided to sort",
+ nodeArgs["pattern"].isABSONObj());
- params.spec = fam->getSpec();
+ MergeSortStageParams params;
+ params.pattern = nodeArgs["pattern"].Obj();
+ // Dedup is true by default.
- if (!params.query.parse(search,
- fam->getSpec().defaultLanguage().str().c_str(),
- fts::FTSQuery::caseSensitiveDefault,
- fam->getSpec().getTextIndexVersion()).isOK()) {
- return NULL;
- }
+ unique_ptr<MergeSortStage> mergeStage(
+ new MergeSortStage(params, workingSet, collection));
- return new TextStage(txn, params, workingSet, matcher);
+ BSONObjIterator it(nodeArgs["nodes"].Obj());
+ while (it.more()) {
+ BSONElement e = it.next();
+ uassert(16973, "node of mergeSort isn't an obj?: " + e.toString(), e.isABSONObj());
+
+ PlanStage* subNode = parseQuery(txn, collection, e.Obj(), workingSet, exprs);
+ uassert(16974,
+ "Can't parse sub-node of mergeSort: " + e.Obj().toString(),
+ NULL != subNode);
+ // takes ownership
+ mergeStage->addChild(subNode);
}
- else if ("delete" == nodeName) {
- uassert(18636, "Delete stage doesn't have a filter (put it on the child)",
- NULL == matcher);
- uassert(18637, "node argument must be provided to delete",
- nodeArgs["node"].isABSONObj());
- uassert(18638, "isMulti argument must be provided to delete",
- nodeArgs["isMulti"].type() == Bool);
- uassert(18639, "shouldCallLogOp argument must be provided to delete",
- nodeArgs["shouldCallLogOp"].type() == Bool);
- PlanStage* subNode = parseQuery(txn,
- collection,
- nodeArgs["node"].Obj(),
- workingSet,
- exprs);
- DeleteStageParams params;
- params.isMulti = nodeArgs["isMulti"].Bool();
- params.shouldCallLogOp = nodeArgs["shouldCallLogOp"].Bool();
- return new DeleteStage(txn, params, workingSet, collection, subNode);
+ return mergeStage.release();
+ } else if ("text" == nodeName) {
+ string search = nodeArgs["search"].String();
+
+ vector<IndexDescriptor*> idxMatches;
+ collection->getIndexCatalog()->findIndexByType(txn, "text", idxMatches);
+ uassert(17194, "Expected exactly one text index", idxMatches.size() == 1);
+
+ IndexDescriptor* index = idxMatches[0];
+ FTSAccessMethod* fam =
+ dynamic_cast<FTSAccessMethod*>(collection->getIndexCatalog()->getIndex(index));
+ TextStageParams params(fam->getSpec());
+ params.index = index;
+
+ // TODO: Deal with non-empty filters. This is a hack to put in covering information
+ // that can only be checked for equality. We ignore this now.
+ Status s = fam->getSpec().getIndexPrefix(BSONObj(), &params.indexPrefix);
+ if (!s.isOK()) {
+ // errmsg = s.toString();
+ return NULL;
}
- else {
+
+ params.spec = fam->getSpec();
+
+ if (!params.query.parse(search,
+ fam->getSpec().defaultLanguage().str().c_str(),
+ fts::FTSQuery::caseSensitiveDefault,
+ fam->getSpec().getTextIndexVersion()).isOK()) {
return NULL;
}
- }
- };
- MONGO_INITIALIZER(RegisterStageDebugCmd)(InitializerContext* context) {
- if (Command::testCommandsEnabled) {
- // Leaked intentionally: a Command registers itself when constructed.
- new StageDebugCmd();
+ return new TextStage(txn, params, workingSet, matcher);
+ } else if ("delete" == nodeName) {
+ uassert(
+ 18636, "Delete stage doesn't have a filter (put it on the child)", NULL == matcher);
+ uassert(
+ 18637, "node argument must be provided to delete", nodeArgs["node"].isABSONObj());
+ uassert(18638,
+ "isMulti argument must be provided to delete",
+ nodeArgs["isMulti"].type() == Bool);
+ uassert(18639,
+ "shouldCallLogOp argument must be provided to delete",
+ nodeArgs["shouldCallLogOp"].type() == Bool);
+ PlanStage* subNode =
+ parseQuery(txn, collection, nodeArgs["node"].Obj(), workingSet, exprs);
+ DeleteStageParams params;
+ params.isMulti = nodeArgs["isMulti"].Bool();
+ params.shouldCallLogOp = nodeArgs["shouldCallLogOp"].Bool();
+ return new DeleteStage(txn, params, workingSet, collection, subNode);
+ } else {
+ return NULL;
}
- return Status::OK();
}
+};
+
+MONGO_INITIALIZER(RegisterStageDebugCmd)(InitializerContext* context) {
+ if (Command::testCommandsEnabled) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new StageDebugCmd();
+ }
+ return Status::OK();
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 0c64aa7b178..62daed09f32 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -45,513 +45,487 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::vector;
-
- // static
- const char* SubplanStage::kStageType = "SUBPLAN";
-
- SubplanStage::SubplanStage(OperationContext* txn,
- Collection* collection,
- WorkingSet* ws,
- const QueryPlannerParams& params,
- CanonicalQuery* cq)
- : _txn(txn),
- _collection(collection),
- _ws(ws),
- _plannerParams(params),
- _query(cq),
- _child(nullptr),
- _commonStats(kStageType) {
- invariant(_collection);
+using std::unique_ptr;
+using std::endl;
+using std::vector;
+
+// static
+const char* SubplanStage::kStageType = "SUBPLAN";
+
+SubplanStage::SubplanStage(OperationContext* txn,
+ Collection* collection,
+ WorkingSet* ws,
+ const QueryPlannerParams& params,
+ CanonicalQuery* cq)
+ : _txn(txn),
+ _collection(collection),
+ _ws(ws),
+ _plannerParams(params),
+ _query(cq),
+ _child(nullptr),
+ _commonStats(kStageType) {
+ invariant(_collection);
+}
+
+// static
+bool SubplanStage::canUseSubplanning(const CanonicalQuery& query) {
+ const LiteParsedQuery& lpq = query.getParsed();
+ const MatchExpression* expr = query.root();
+
+ // Only rooted ORs work with the subplan scheme.
+ if (MatchExpression::OR != expr->matchType()) {
+ return false;
}
- // static
- bool SubplanStage::canUseSubplanning(const CanonicalQuery& query) {
- const LiteParsedQuery& lpq = query.getParsed();
- const MatchExpression* expr = query.root();
+ // Hint provided
+ if (!lpq.getHint().isEmpty()) {
+ return false;
+ }
- // Only rooted ORs work with the subplan scheme.
- if (MatchExpression::OR != expr->matchType()) {
- return false;
- }
+ // Min provided
+ // Min queries are a special case of hinted queries.
+ if (!lpq.getMin().isEmpty()) {
+ return false;
+ }
- // Hint provided
- if (!lpq.getHint().isEmpty()) {
- return false;
- }
+ // Max provided
+ // Similar to min, max queries are a special case of hinted queries.
+ if (!lpq.getMax().isEmpty()) {
+ return false;
+ }
- // Min provided
- // Min queries are a special case of hinted queries.
- if (!lpq.getMin().isEmpty()) {
- return false;
- }
+ // Tailable cursors won't get cached, just turn into collscans.
+ if (query.getParsed().isTailable()) {
+ return false;
+ }
- // Max provided
- // Similar to min, max queries are a special case of hinted queries.
- if (!lpq.getMax().isEmpty()) {
- return false;
- }
+ // Snapshot is really a hint.
+ if (query.getParsed().isSnapshot()) {
+ return false;
+ }
- // Tailable cursors won't get cached, just turn into collscans.
- if (query.getParsed().isTailable()) {
- return false;
- }
+ return true;
+}
- // Snapshot is really a hint.
- if (query.getParsed().isSnapshot()) {
- return false;
- }
+Status SubplanStage::planSubqueries() {
+ // Adds the amount of time taken by planSubqueries() to executionTimeMillis. There's lots of
+ // work that happens here, so this is needed for the time accounting to make sense.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- return true;
+ MatchExpression* orExpr = _query->root();
+
+ for (size_t i = 0; i < _plannerParams.indices.size(); ++i) {
+ const IndexEntry& ie = _plannerParams.indices[i];
+ _indexMap[ie.keyPattern] = i;
+ LOG(5) << "Subplanner: index " << i << " is " << ie.toString() << endl;
}
- Status SubplanStage::planSubqueries() {
- // Adds the amount of time taken by planSubqueries() to executionTimeMillis. There's lots of
- // work that happens here, so this is needed for the time accounting to make sense.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+ const WhereCallbackReal whereCallback(_txn, _collection->ns().db());
- MatchExpression* orExpr = _query->root();
+ for (size_t i = 0; i < orExpr->numChildren(); ++i) {
+ // We need a place to shove the results from planning this branch.
+ _branchResults.push_back(new BranchPlanningResult());
+ BranchPlanningResult* branchResult = _branchResults.back();
- for (size_t i = 0; i < _plannerParams.indices.size(); ++i) {
- const IndexEntry& ie = _plannerParams.indices[i];
- _indexMap[ie.keyPattern] = i;
- LOG(5) << "Subplanner: index " << i << " is " << ie.toString() << endl;
- }
+ MatchExpression* orChild = orExpr->getChild(i);
- const WhereCallbackReal whereCallback(_txn, _collection->ns().db());
-
- for (size_t i = 0; i < orExpr->numChildren(); ++i) {
- // We need a place to shove the results from planning this branch.
- _branchResults.push_back(new BranchPlanningResult());
- BranchPlanningResult* branchResult = _branchResults.back();
-
- MatchExpression* orChild = orExpr->getChild(i);
-
- // Turn the i-th child into its own query.
- {
- CanonicalQuery* orChildCQ;
- Status childCQStatus = CanonicalQuery::canonicalize(*_query,
- orChild,
- &orChildCQ,
- whereCallback);
- if (!childCQStatus.isOK()) {
- mongoutils::str::stream ss;
- ss << "Can't canonicalize subchild " << orChild->toString()
- << " " << childCQStatus.reason();
- return Status(ErrorCodes::BadValue, ss);
- }
-
- branchResult->canonicalQuery.reset(orChildCQ);
+ // Turn the i-th child into its own query.
+ {
+ CanonicalQuery* orChildCQ;
+ Status childCQStatus =
+ CanonicalQuery::canonicalize(*_query, orChild, &orChildCQ, whereCallback);
+ if (!childCQStatus.isOK()) {
+ mongoutils::str::stream ss;
+ ss << "Can't canonicalize subchild " << orChild->toString() << " "
+ << childCQStatus.reason();
+ return Status(ErrorCodes::BadValue, ss);
}
- // Plan the i-th child. We might be able to find a plan for the i-th child in the plan
- // cache. If there's no cached plan, then we generate and rank plans using the MPS.
- CachedSolution* rawCS;
- if (PlanCache::shouldCacheQuery(*branchResult->canonicalQuery.get()) &&
- _collection->infoCache()->getPlanCache()->get(*branchResult->canonicalQuery.get(),
- &rawCS).isOK()) {
- // We have a CachedSolution. Store it for later.
- LOG(5) << "Subplanner: cached plan found for child " << i << " of "
- << orExpr->numChildren();
-
- branchResult->cachedSolution.reset(rawCS);
- }
- else {
- // No CachedSolution found. We'll have to plan from scratch.
- LOG(5) << "Subplanner: planning child " << i << " of " << orExpr->numChildren();
-
- // We don't set NO_TABLE_SCAN because peeking at the cache data will keep us from
- // considering any plan that's a collscan.
- Status status = QueryPlanner::plan(*branchResult->canonicalQuery.get(),
- _plannerParams,
- &branchResult->solutions.mutableVector());
-
- if (!status.isOK()) {
- mongoutils::str::stream ss;
- ss << "Can't plan for subchild "
- << branchResult->canonicalQuery->toString()
- << " " << status.reason();
- return Status(ErrorCodes::BadValue, ss);
- }
- LOG(5) << "Subplanner: got " << branchResult->solutions.size() << " solutions";
-
- if (0 == branchResult->solutions.size()) {
- // If one child doesn't have an indexed solution, bail out.
- mongoutils::str::stream ss;
- ss << "No solutions for subchild " << branchResult->canonicalQuery->toString();
- return Status(ErrorCodes::BadValue, ss);
- }
- }
+ branchResult->canonicalQuery.reset(orChildCQ);
}
- return Status::OK();
- }
-
- namespace {
-
- /**
- * On success, applies the index tags from 'branchCacheData' (which represent the winning
- * plan for 'orChild') to 'compositeCacheData'.
- */
- Status tagOrChildAccordingToCache(PlanCacheIndexTree* compositeCacheData,
- SolutionCacheData* branchCacheData,
- MatchExpression* orChild,
- const std::map<BSONObj, size_t>& indexMap) {
- invariant(compositeCacheData);
-
- // We want a well-formed *indexed* solution.
- if (NULL == branchCacheData) {
- // For example, we don't cache things for 2d indices.
+ // Plan the i-th child. We might be able to find a plan for the i-th child in the plan
+ // cache. If there's no cached plan, then we generate and rank plans using the MPS.
+ CachedSolution* rawCS;
+ if (PlanCache::shouldCacheQuery(*branchResult->canonicalQuery.get()) &&
+ _collection->infoCache()
+ ->getPlanCache()
+ ->get(*branchResult->canonicalQuery.get(), &rawCS)
+ .isOK()) {
+ // We have a CachedSolution. Store it for later.
+ LOG(5) << "Subplanner: cached plan found for child " << i << " of "
+ << orExpr->numChildren();
+
+ branchResult->cachedSolution.reset(rawCS);
+ } else {
+ // No CachedSolution found. We'll have to plan from scratch.
+ LOG(5) << "Subplanner: planning child " << i << " of " << orExpr->numChildren();
+
+ // We don't set NO_TABLE_SCAN because peeking at the cache data will keep us from
+ // considering any plan that's a collscan.
+ Status status = QueryPlanner::plan(*branchResult->canonicalQuery.get(),
+ _plannerParams,
+ &branchResult->solutions.mutableVector());
+
+ if (!status.isOK()) {
mongoutils::str::stream ss;
- ss << "No cache data for subchild " << orChild->toString();
+ ss << "Can't plan for subchild " << branchResult->canonicalQuery->toString() << " "
+ << status.reason();
return Status(ErrorCodes::BadValue, ss);
}
+ LOG(5) << "Subplanner: got " << branchResult->solutions.size() << " solutions";
- if (SolutionCacheData::USE_INDEX_TAGS_SOLN != branchCacheData->solnType) {
+ if (0 == branchResult->solutions.size()) {
+ // If one child doesn't have an indexed solution, bail out.
mongoutils::str::stream ss;
- ss << "No indexed cache data for subchild "
- << orChild->toString();
+ ss << "No solutions for subchild " << branchResult->canonicalQuery->toString();
return Status(ErrorCodes::BadValue, ss);
}
+ }
+ }
- // Add the index assignments to our original query.
- Status tagStatus = QueryPlanner::tagAccordingToCache(orChild,
- branchCacheData->tree.get(),
- indexMap);
+ return Status::OK();
+}
+
+namespace {
+/**
+ * On success, applies the index tags from 'branchCacheData' (which represent the winning
+ * plan for 'orChild') to 'compositeCacheData'.
+ */
+Status tagOrChildAccordingToCache(PlanCacheIndexTree* compositeCacheData,
+ SolutionCacheData* branchCacheData,
+ MatchExpression* orChild,
+ const std::map<BSONObj, size_t>& indexMap) {
+ invariant(compositeCacheData);
+
+ // We want a well-formed *indexed* solution.
+ if (NULL == branchCacheData) {
+ // For example, we don't cache things for 2d indices.
+ mongoutils::str::stream ss;
+ ss << "No cache data for subchild " << orChild->toString();
+ return Status(ErrorCodes::BadValue, ss);
+ }
+
+ if (SolutionCacheData::USE_INDEX_TAGS_SOLN != branchCacheData->solnType) {
+ mongoutils::str::stream ss;
+ ss << "No indexed cache data for subchild " << orChild->toString();
+ return Status(ErrorCodes::BadValue, ss);
+ }
+
+ // Add the index assignments to our original query.
+ Status tagStatus =
+ QueryPlanner::tagAccordingToCache(orChild, branchCacheData->tree.get(), indexMap);
+
+ if (!tagStatus.isOK()) {
+ mongoutils::str::stream ss;
+ ss << "Failed to extract indices from subchild " << orChild->toString();
+ return Status(ErrorCodes::BadValue, ss);
+ }
+
+ // Add the child's cache data to the cache data we're creating for the main query.
+ compositeCacheData->children.push_back(branchCacheData->tree->clone());
+
+ return Status::OK();
+}
+
+} // namespace
+
+Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
+ // This is what we annotate with the index selections and then turn into a solution.
+ unique_ptr<OrMatchExpression> orExpr(
+ static_cast<OrMatchExpression*>(_query->root()->shallowClone()));
+
+ // This is the skeleton of index selections that is inserted into the cache.
+ unique_ptr<PlanCacheIndexTree> cacheData(new PlanCacheIndexTree());
+
+ for (size_t i = 0; i < orExpr->numChildren(); ++i) {
+ MatchExpression* orChild = orExpr->getChild(i);
+ BranchPlanningResult* branchResult = _branchResults[i];
+
+ if (branchResult->cachedSolution.get()) {
+ // We can get the index tags we need out of the cache.
+ Status tagStatus = tagOrChildAccordingToCache(
+ cacheData.get(), branchResult->cachedSolution->plannerData[0], orChild, _indexMap);
if (!tagStatus.isOK()) {
- mongoutils::str::stream ss;
- ss << "Failed to extract indices from subchild "
- << orChild->toString();
- return Status(ErrorCodes::BadValue, ss);
+ return tagStatus;
}
+ } else if (1 == branchResult->solutions.size()) {
+ QuerySolution* soln = branchResult->solutions.front();
+ Status tagStatus = tagOrChildAccordingToCache(
+ cacheData.get(), soln->cacheData.get(), orChild, _indexMap);
+ if (!tagStatus.isOK()) {
+ return tagStatus;
+ }
+ } else {
+ // N solutions, rank them.
- // Add the child's cache data to the cache data we're creating for the main query.
- compositeCacheData->children.push_back(branchCacheData->tree->clone());
+ // We already checked for zero solutions in planSubqueries(...).
+ invariant(!branchResult->solutions.empty());
- return Status::OK();
- }
+ _ws->clear();
+
+ _child.reset(new MultiPlanStage(_txn, _collection, branchResult->canonicalQuery.get()));
+ MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(_child.get());
+
+ // Dump all the solutions into the MPS.
+ for (size_t ix = 0; ix < branchResult->solutions.size(); ++ix) {
+ PlanStage* nextPlanRoot;
+ invariant(StageBuilder::build(
+ _txn, _collection, *branchResult->solutions[ix], _ws, &nextPlanRoot));
- } // namespace
-
- Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
- // This is what we annotate with the index selections and then turn into a solution.
- unique_ptr<OrMatchExpression> orExpr(
- static_cast<OrMatchExpression*>(_query->root()->shallowClone()));
-
- // This is the skeleton of index selections that is inserted into the cache.
- unique_ptr<PlanCacheIndexTree> cacheData(new PlanCacheIndexTree());
-
- for (size_t i = 0; i < orExpr->numChildren(); ++i) {
- MatchExpression* orChild = orExpr->getChild(i);
- BranchPlanningResult* branchResult = _branchResults[i];
-
- if (branchResult->cachedSolution.get()) {
- // We can get the index tags we need out of the cache.
- Status tagStatus = tagOrChildAccordingToCache(
- cacheData.get(),
- branchResult->cachedSolution->plannerData[0],
- orChild,
- _indexMap);
- if (!tagStatus.isOK()) {
- return tagStatus;
- }
+ // Takes ownership of solution with index 'ix' and 'nextPlanRoot'.
+ multiPlanStage->addPlan(branchResult->solutions.releaseAt(ix), nextPlanRoot, _ws);
}
- else if (1 == branchResult->solutions.size()) {
- QuerySolution* soln = branchResult->solutions.front();
- Status tagStatus = tagOrChildAccordingToCache(cacheData.get(),
- soln->cacheData.get(),
- orChild,
- _indexMap);
- if (!tagStatus.isOK()) {
- return tagStatus;
- }
+
+ Status planSelectStat = multiPlanStage->pickBestPlan(yieldPolicy);
+ if (!planSelectStat.isOK()) {
+ return planSelectStat;
}
- else {
- // N solutions, rank them.
-
- // We already checked for zero solutions in planSubqueries(...).
- invariant(!branchResult->solutions.empty());
-
- _ws->clear();
-
- _child.reset(new MultiPlanStage(_txn, _collection,
- branchResult->canonicalQuery.get()));
- MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(_child.get());
-
- // Dump all the solutions into the MPS.
- for (size_t ix = 0; ix < branchResult->solutions.size(); ++ix) {
- PlanStage* nextPlanRoot;
- invariant(StageBuilder::build(_txn,
- _collection,
- *branchResult->solutions[ix],
- _ws,
- &nextPlanRoot));
-
- // Takes ownership of solution with index 'ix' and 'nextPlanRoot'.
- multiPlanStage->addPlan(branchResult->solutions.releaseAt(ix),
- nextPlanRoot,
- _ws);
- }
-
- Status planSelectStat = multiPlanStage->pickBestPlan(yieldPolicy);
- if (!planSelectStat.isOK()) {
- return planSelectStat;
- }
-
- if (!multiPlanStage->bestPlanChosen()) {
- mongoutils::str::stream ss;
- ss << "Failed to pick best plan for subchild "
- << branchResult->canonicalQuery->toString();
- return Status(ErrorCodes::BadValue, ss);
- }
-
- QuerySolution* bestSoln = multiPlanStage->bestSolution();
-
- // Check that we have good cache data. For example, we don't cache things
- // for 2d indices.
- if (NULL == bestSoln->cacheData.get()) {
- mongoutils::str::stream ss;
- ss << "No cache data for subchild " << orChild->toString();
- return Status(ErrorCodes::BadValue, ss);
- }
-
- if (SolutionCacheData::USE_INDEX_TAGS_SOLN != bestSoln->cacheData->solnType) {
- mongoutils::str::stream ss;
- ss << "No indexed cache data for subchild "
- << orChild->toString();
- return Status(ErrorCodes::BadValue, ss);
- }
-
- // Add the index assignments to our original query.
- Status tagStatus = QueryPlanner::tagAccordingToCache(
- orChild, bestSoln->cacheData->tree.get(), _indexMap);
-
- if (!tagStatus.isOK()) {
- mongoutils::str::stream ss;
- ss << "Failed to extract indices from subchild "
- << orChild->toString();
- return Status(ErrorCodes::BadValue, ss);
- }
-
- cacheData->children.push_back(bestSoln->cacheData->tree->clone());
+
+ if (!multiPlanStage->bestPlanChosen()) {
+ mongoutils::str::stream ss;
+ ss << "Failed to pick best plan for subchild "
+ << branchResult->canonicalQuery->toString();
+ return Status(ErrorCodes::BadValue, ss);
}
- }
- // Must do this before using the planner functionality.
- sortUsingTags(orExpr.get());
+ QuerySolution* bestSoln = multiPlanStage->bestSolution();
- // Use the cached index assignments to build solnRoot. Takes ownership of 'orExpr'.
- QuerySolutionNode* solnRoot = QueryPlannerAccess::buildIndexedDataAccess(
- *_query, orExpr.release(), false, _plannerParams.indices, _plannerParams);
+ // Check that we have good cache data. For example, we don't cache things
+ // for 2d indices.
+ if (NULL == bestSoln->cacheData.get()) {
+ mongoutils::str::stream ss;
+ ss << "No cache data for subchild " << orChild->toString();
+ return Status(ErrorCodes::BadValue, ss);
+ }
- if (NULL == solnRoot) {
- mongoutils::str::stream ss;
- ss << "Failed to build indexed data path for subplanned query\n";
- return Status(ErrorCodes::BadValue, ss);
- }
+ if (SolutionCacheData::USE_INDEX_TAGS_SOLN != bestSoln->cacheData->solnType) {
+ mongoutils::str::stream ss;
+ ss << "No indexed cache data for subchild " << orChild->toString();
+ return Status(ErrorCodes::BadValue, ss);
+ }
- LOG(5) << "Subplanner: fully tagged tree is " << solnRoot->toString();
+ // Add the index assignments to our original query.
+ Status tagStatus = QueryPlanner::tagAccordingToCache(
+ orChild, bestSoln->cacheData->tree.get(), _indexMap);
- // Takes ownership of 'solnRoot'
- _compositeSolution.reset(QueryPlannerAnalysis::analyzeDataAccess(*_query,
- _plannerParams,
- solnRoot));
+ if (!tagStatus.isOK()) {
+ mongoutils::str::stream ss;
+ ss << "Failed to extract indices from subchild " << orChild->toString();
+ return Status(ErrorCodes::BadValue, ss);
+ }
- if (NULL == _compositeSolution.get()) {
- mongoutils::str::stream ss;
- ss << "Failed to analyze subplanned query";
- return Status(ErrorCodes::BadValue, ss);
+ cacheData->children.push_back(bestSoln->cacheData->tree->clone());
}
+ }
- LOG(5) << "Subplanner: Composite solution is " << _compositeSolution->toString() << endl;
+ // Must do this before using the planner functionality.
+ sortUsingTags(orExpr.get());
- // Use the index tags from planning each branch to construct the composite solution,
- // and set that solution as our child stage.
- _ws->clear();
- PlanStage* root;
- invariant(StageBuilder::build(_txn, _collection, *_compositeSolution.get(), _ws, &root));
- _child.reset(root);
+ // Use the cached index assignments to build solnRoot. Takes ownership of 'orExpr'.
+ QuerySolutionNode* solnRoot = QueryPlannerAccess::buildIndexedDataAccess(
+ *_query, orExpr.release(), false, _plannerParams.indices, _plannerParams);
- return Status::OK();
+ if (NULL == solnRoot) {
+ mongoutils::str::stream ss;
+ ss << "Failed to build indexed data path for subplanned query\n";
+ return Status(ErrorCodes::BadValue, ss);
}
- Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
- // Clear out the working set. We'll start with a fresh working set.
- _ws->clear();
+ LOG(5) << "Subplanner: fully tagged tree is " << solnRoot->toString();
- // Use the query planning module to plan the whole query.
- vector<QuerySolution*> rawSolutions;
- Status status = QueryPlanner::plan(*_query, _plannerParams, &rawSolutions);
- if (!status.isOK()) {
- return Status(ErrorCodes::BadValue,
- "error processing query: " + _query->toString() +
- " planner returned error: " + status.reason());
- }
+ // Takes ownership of 'solnRoot'
+ _compositeSolution.reset(
+ QueryPlannerAnalysis::analyzeDataAccess(*_query, _plannerParams, solnRoot));
- OwnedPointerVector<QuerySolution> solutions(rawSolutions);
+ if (NULL == _compositeSolution.get()) {
+ mongoutils::str::stream ss;
+ ss << "Failed to analyze subplanned query";
+ return Status(ErrorCodes::BadValue, ss);
+ }
- // We cannot figure out how to answer the query. Perhaps it requires an index
- // we do not have?
- if (0 == solutions.size()) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "error processing query: "
- << _query->toString()
- << " No query solutions");
- }
+ LOG(5) << "Subplanner: Composite solution is " << _compositeSolution->toString() << endl;
- if (1 == solutions.size()) {
- PlanStage* root;
- // Only one possible plan. Run it. Build the stages from the solution.
- verify(StageBuilder::build(_txn, _collection, *solutions[0], _ws, &root));
- _child.reset(root);
+ // Use the index tags from planning each branch to construct the composite solution,
+ // and set that solution as our child stage.
+ _ws->clear();
+ PlanStage* root;
+ invariant(StageBuilder::build(_txn, _collection, *_compositeSolution.get(), _ws, &root));
+ _child.reset(root);
- // This SubplanStage takes ownership of the query solution.
- _compositeSolution.reset(solutions.popAndReleaseBack());
+ return Status::OK();
+}
- return Status::OK();
- }
- else {
- // Many solutions. Create a MultiPlanStage to pick the best, update the cache,
- // and so on. The working set will be shared by all candidate plans.
- _child.reset(new MultiPlanStage(_txn, _collection, _query));
- MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(_child.get());
+Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
+ // Clear out the working set. We'll start with a fresh working set.
+ _ws->clear();
+
+ // Use the query planning module to plan the whole query.
+ vector<QuerySolution*> rawSolutions;
+ Status status = QueryPlanner::plan(*_query, _plannerParams, &rawSolutions);
+ if (!status.isOK()) {
+ return Status(ErrorCodes::BadValue,
+ "error processing query: " + _query->toString() +
+ " planner returned error: " + status.reason());
+ }
- for (size_t ix = 0; ix < solutions.size(); ++ix) {
- if (solutions[ix]->cacheData.get()) {
- solutions[ix]->cacheData->indexFilterApplied =
- _plannerParams.indexFiltersApplied;
- }
+ OwnedPointerVector<QuerySolution> solutions(rawSolutions);
- // version of StageBuild::build when WorkingSet is shared
- PlanStage* nextPlanRoot;
- verify(StageBuilder::build(_txn, _collection, *solutions[ix], _ws,
- &nextPlanRoot));
+ // We cannot figure out how to answer the query. Perhaps it requires an index
+ // we do not have?
+ if (0 == solutions.size()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "error processing query: " << _query->toString()
+ << " No query solutions");
+ }
- // Takes ownership of 'solutions[ix]' and 'nextPlanRoot'.
- multiPlanStage->addPlan(solutions.releaseAt(ix), nextPlanRoot, _ws);
- }
+ if (1 == solutions.size()) {
+ PlanStage* root;
+ // Only one possible plan. Run it. Build the stages from the solution.
+ verify(StageBuilder::build(_txn, _collection, *solutions[0], _ws, &root));
+ _child.reset(root);
- // Delegate the the MultiPlanStage's plan selection facility.
- Status planSelectStat = multiPlanStage->pickBestPlan(yieldPolicy);
- if (!planSelectStat.isOK()) {
- return planSelectStat;
- }
+ // This SubplanStage takes ownership of the query solution.
+ _compositeSolution.reset(solutions.popAndReleaseBack());
- return Status::OK();
- }
- }
+ return Status::OK();
+ } else {
+ // Many solutions. Create a MultiPlanStage to pick the best, update the cache,
+ // and so on. The working set will be shared by all candidate plans.
+ _child.reset(new MultiPlanStage(_txn, _collection, _query));
+ MultiPlanStage* multiPlanStage = static_cast<MultiPlanStage*>(_child.get());
+
+ for (size_t ix = 0; ix < solutions.size(); ++ix) {
+ if (solutions[ix]->cacheData.get()) {
+ solutions[ix]->cacheData->indexFilterApplied = _plannerParams.indexFiltersApplied;
+ }
- Status SubplanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
- // Adds the amount of time taken by pickBestPlan() to executionTimeMillis. There's lots of
- // work that happens here, so this is needed for the time accounting to make sense.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+ // version of StageBuild::build when WorkingSet is shared
+ PlanStage* nextPlanRoot;
+ verify(StageBuilder::build(_txn, _collection, *solutions[ix], _ws, &nextPlanRoot));
- // Plan each branch of the $or.
- Status subplanningStatus = planSubqueries();
- if (!subplanningStatus.isOK()) {
- return choosePlanWholeQuery(yieldPolicy);
+ // Takes ownership of 'solutions[ix]' and 'nextPlanRoot'.
+ multiPlanStage->addPlan(solutions.releaseAt(ix), nextPlanRoot, _ws);
}
- // Use the multi plan stage to select a winning plan for each branch, and then construct
- // the overall winning plan from the resulting index tags.
- Status subplanSelectStat = choosePlanForSubqueries(yieldPolicy);
- if (!subplanSelectStat.isOK()) {
- return choosePlanWholeQuery(yieldPolicy);
+ // Delegate the the MultiPlanStage's plan selection facility.
+ Status planSelectStat = multiPlanStage->pickBestPlan(yieldPolicy);
+ if (!planSelectStat.isOK()) {
+ return planSelectStat;
}
return Status::OK();
}
+}
- bool SubplanStage::isEOF() {
- // If we're running we best have a runner.
- invariant(_child.get());
- return _child->isEOF();
+Status SubplanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
+ // Adds the amount of time taken by pickBestPlan() to executionTimeMillis. There's lots of
+ // work that happens here, so this is needed for the time accounting to make sense.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+
+ // Plan each branch of the $or.
+ Status subplanningStatus = planSubqueries();
+ if (!subplanningStatus.isOK()) {
+ return choosePlanWholeQuery(yieldPolicy);
}
- PlanStage::StageState SubplanStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+ // Use the multi plan stage to select a winning plan for each branch, and then construct
+ // the overall winning plan from the resulting index tags.
+ Status subplanSelectStat = choosePlanForSubqueries(yieldPolicy);
+ if (!subplanSelectStat.isOK()) {
+ return choosePlanWholeQuery(yieldPolicy);
+ }
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+ return Status::OK();
+}
- if (isEOF()) { return PlanStage::IS_EOF; }
+bool SubplanStage::isEOF() {
+ // If we're running we best have a runner.
+ invariant(_child.get());
+ return _child->isEOF();
+}
- invariant(_child.get());
- StageState state = _child->work(out);
+PlanStage::StageState SubplanStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
- if (PlanStage::NEED_TIME == state) {
- ++_commonStats.needTime;
- }
- else if (PlanStage::NEED_YIELD == state) {
- ++_commonStats.needYield;
- }
- else if (PlanStage::ADVANCED == state) {
- ++_commonStats.advanced;
- }
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
- return state;
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
}
- void SubplanStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
+ invariant(_child.get());
+ StageState state = _child->work(out);
- // We're ranking a sub-plan via an MPS or we're streaming results from this stage. Either
- // way, pass on the request.
- if (NULL != _child.get()) {
- _child->saveState();
- }
+ if (PlanStage::NEED_TIME == state) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == state) {
+ ++_commonStats.needYield;
+ } else if (PlanStage::ADVANCED == state) {
+ ++_commonStats.advanced;
}
- void SubplanStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_commonStats.unyields;
+ return state;
+}
- // We're ranking a sub-plan via an MPS or we're streaming results from this stage. Either
- // way, pass on the request.
- if (NULL != _child.get()) {
- _child->restoreState(opCtx);
- }
- }
-
- void SubplanStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
+void SubplanStage::saveState() {
+ _txn = NULL;
+ ++_commonStats.yields;
- if (NULL != _child.get()) {
- _child->invalidate(txn, dl, type);
- }
+ // We're ranking a sub-plan via an MPS or we're streaming results from this stage. Either
+ // way, pass on the request.
+ if (NULL != _child.get()) {
+ _child->saveState();
}
+}
- vector<PlanStage*> SubplanStage::getChildren() const {
- vector<PlanStage*> children;
- if (NULL != _child.get()) {
- children.push_back(_child.get());
- }
- return children;
- }
+void SubplanStage::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_commonStats.unyields;
- PlanStageStats* SubplanStage::getStats() {
- _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SUBPLAN));
- ret->children.push_back(_child->getStats());
- return ret.release();
+ // We're ranking a sub-plan via an MPS or we're streaming results from this stage. Either
+ // way, pass on the request.
+ if (NULL != _child.get()) {
+ _child->restoreState(opCtx);
}
+}
- bool SubplanStage::branchPlannedFromCache(size_t i) const {
- return NULL != _branchResults[i]->cachedSolution.get();
- }
+void SubplanStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
- const CommonStats* SubplanStage::getCommonStats() const {
- return &_commonStats;
+ if (NULL != _child.get()) {
+ _child->invalidate(txn, dl, type);
}
+}
- const SpecificStats* SubplanStage::getSpecificStats() const {
- return NULL;
+vector<PlanStage*> SubplanStage::getChildren() const {
+ vector<PlanStage*> children;
+ if (NULL != _child.get()) {
+ children.push_back(_child.get());
}
+ return children;
+}
+
+PlanStageStats* SubplanStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_SUBPLAN));
+ ret->children.push_back(_child->getStats());
+ return ret.release();
+}
+
+bool SubplanStage::branchPlannedFromCache(size_t i) const {
+ return NULL != _branchResults[i]->cachedSolution.get();
+}
+
+const CommonStats* SubplanStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* SubplanStage::getSpecificStats() const {
+ return NULL;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h
index f46a25b3bc8..ca831a1856e 100644
--- a/src/mongo/db/exec/subplan.h
+++ b/src/mongo/db/exec/subplan.h
@@ -42,154 +42,157 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
+
+/**
+ * The SubplanStage is used for rooted $or queries. It plans each clause of the $or
+ * individually, and then creates an overall query plan based on the winning plan from
+ * each clause.
+ *
+ * Uses the MultiPlanStage in order to rank plans for the individual clauses.
+ *
+ * Notes on caching strategy:
+ *
+ * --Interaction with the plan cache is done on a per-clause basis. For a given clause C,
+ * if there is a plan in the cache for shape C, then C is planned using the index tags
+ * obtained from the plan cache entry. If no cached plan is found for C, then a MultiPlanStage
+ * is used to determine the best plan for the clause; unless there is a tie between multiple
+ * candidate plans, the winner is inserted into the plan cache and used to plan subsequent
+ * executions of C. These subsequent executions of shape C could be either as a clause in
+ * another rooted $or query, or shape C as its own query.
+ *
+ * --Plans for entire rooted $or queries are neither written to nor read from the plan cache.
+ */
+class SubplanStage : public PlanStage {
+public:
+ SubplanStage(OperationContext* txn,
+ Collection* collection,
+ WorkingSet* ws,
+ const QueryPlannerParams& params,
+ CanonicalQuery* cq);
+
+ static bool canUseSubplanning(const CanonicalQuery& query);
+
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
+
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+
+ virtual std::vector<PlanStage*> getChildren() const;
+
+ virtual StageType stageType() const {
+ return STAGE_SUBPLAN;
+ }
+
+ PlanStageStats* getStats();
+
+ virtual const CommonStats* getCommonStats() const;
+
+ virtual const SpecificStats* getSpecificStats() const;
+
+ static const char* kStageType;
/**
- * The SubplanStage is used for rooted $or queries. It plans each clause of the $or
- * individually, and then creates an overall query plan based on the winning plan from
- * each clause.
+ * Selects a plan using subplanning. First uses the query planning results from
+ * planSubqueries() and the multi plan stage to select the best plan for each branch.
*
- * Uses the MultiPlanStage in order to rank plans for the individual clauses.
+ * If this effort fails, then falls back on planning the whole query normally rather
+ * then planning $or branches independently.
*
- * Notes on caching strategy:
+ * If 'yieldPolicy' is non-NULL, then all locks may be yielded in between round-robin
+ * works of the candidate plans. By default, 'yieldPolicy' is NULL and no yielding will
+ * take place.
*
- * --Interaction with the plan cache is done on a per-clause basis. For a given clause C,
- * if there is a plan in the cache for shape C, then C is planned using the index tags
- * obtained from the plan cache entry. If no cached plan is found for C, then a MultiPlanStage
- * is used to determine the best plan for the clause; unless there is a tie between multiple
- * candidate plans, the winner is inserted into the plan cache and used to plan subsequent
- * executions of C. These subsequent executions of shape C could be either as a clause in
- * another rooted $or query, or shape C as its own query.
- *
- * --Plans for entire rooted $or queries are neither written to nor read from the plan cache.
+ * Returns a non-OK status if the plan was killed during yield or if planning fails.
+ */
+ Status pickBestPlan(PlanYieldPolicy* yieldPolicy);
+
+ //
+ // For testing.
+ //
+
+ /**
+ * Returns true if the i-th branch was planned by retrieving a cached solution,
+ * otherwise returns false.
+ */
+ bool branchPlannedFromCache(size_t i) const;
+
+private:
+ /**
+ * A class used internally in order to keep track of the results of planning
+ * a particular $or branch.
*/
- class SubplanStage : public PlanStage {
+ struct BranchPlanningResult {
+ MONGO_DISALLOW_COPYING(BranchPlanningResult);
+
public:
- SubplanStage(OperationContext* txn,
- Collection* collection,
- WorkingSet* ws,
- const QueryPlannerParams& params,
- CanonicalQuery* cq);
-
- static bool canUseSubplanning(const CanonicalQuery& query);
-
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
-
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
-
- virtual StageType stageType() const { return STAGE_SUBPLAN; }
-
- PlanStageStats* getStats();
-
- virtual const CommonStats* getCommonStats() const;
-
- virtual const SpecificStats* getSpecificStats() const;
-
- static const char* kStageType;
-
- /**
- * Selects a plan using subplanning. First uses the query planning results from
- * planSubqueries() and the multi plan stage to select the best plan for each branch.
- *
- * If this effort fails, then falls back on planning the whole query normally rather
- * then planning $or branches independently.
- *
- * If 'yieldPolicy' is non-NULL, then all locks may be yielded in between round-robin
- * works of the candidate plans. By default, 'yieldPolicy' is NULL and no yielding will
- * take place.
- *
- * Returns a non-OK status if the plan was killed during yield or if planning fails.
- */
- Status pickBestPlan(PlanYieldPolicy* yieldPolicy);
-
- //
- // For testing.
- //
-
- /**
- * Returns true if the i-th branch was planned by retrieving a cached solution,
- * otherwise returns false.
- */
- bool branchPlannedFromCache(size_t i) const;
-
- private:
- /**
- * A class used internally in order to keep track of the results of planning
- * a particular $or branch.
- */
- struct BranchPlanningResult {
- MONGO_DISALLOW_COPYING(BranchPlanningResult);
- public:
- BranchPlanningResult() { }
-
- // A parsed version of one branch of the $or.
- std::unique_ptr<CanonicalQuery> canonicalQuery;
-
- // If there is cache data available, then we store it here rather than generating
- // a set of alternate plans for the branch. The index tags from the cache data
- // can be applied directly to the parent $or MatchExpression when generating the
- // composite solution.
- std::unique_ptr<CachedSolution> cachedSolution;
-
- // Query solutions resulting from planning the $or branch.
- OwnedPointerVector<QuerySolution> solutions;
- };
-
- /**
- * Plan each branch of the $or independently, and store the resulting
- * lists of query solutions in '_solutions'.
- *
- * Called from SubplanStage::make so that construction of the subplan stage
- * fails immediately, rather than returning a plan executor and subsequently
- * through getNext(...).
- */
- Status planSubqueries();
-
- /**
- * Uses the query planning results from planSubqueries() and the multi plan stage
- * to select the best plan for each branch.
- *
- * Helper for pickBestPlan().
- */
- Status choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy);
-
- /**
- * Used as a fallback if subplanning fails. Helper for pickBestPlan().
- */
- Status choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy);
-
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
-
- // Not owned here. Must be non-null.
- Collection* _collection;
-
- // Not owned here.
- WorkingSet* _ws;
-
- QueryPlannerParams _plannerParams;
-
- // Not owned here.
- CanonicalQuery* _query;
-
- // If we successfully create a "composite solution" by planning each $or branch
- // independently, that solution is owned here.
- std::unique_ptr<QuerySolution> _compositeSolution;
-
- std::unique_ptr<PlanStage> _child;
-
- // Holds a list of the results from planning each branch.
- OwnedPointerVector<BranchPlanningResult> _branchResults;
-
- // We need this to extract cache-friendly index data from the index assignments.
- std::map<BSONObj, size_t> _indexMap;
-
- CommonStats _commonStats;
+ BranchPlanningResult() {}
+
+ // A parsed version of one branch of the $or.
+ std::unique_ptr<CanonicalQuery> canonicalQuery;
+
+ // If there is cache data available, then we store it here rather than generating
+ // a set of alternate plans for the branch. The index tags from the cache data
+ // can be applied directly to the parent $or MatchExpression when generating the
+ // composite solution.
+ std::unique_ptr<CachedSolution> cachedSolution;
+
+ // Query solutions resulting from planning the $or branch.
+ OwnedPointerVector<QuerySolution> solutions;
};
+ /**
+ * Plan each branch of the $or independently, and store the resulting
+ * lists of query solutions in '_solutions'.
+ *
+ * Called from SubplanStage::make so that construction of the subplan stage
+ * fails immediately, rather than returning a plan executor and subsequently
+ * through getNext(...).
+ */
+ Status planSubqueries();
+
+ /**
+ * Uses the query planning results from planSubqueries() and the multi plan stage
+ * to select the best plan for each branch.
+ *
+ * Helper for pickBestPlan().
+ */
+ Status choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy);
+
+ /**
+ * Used as a fallback if subplanning fails. Helper for pickBestPlan().
+ */
+ Status choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy);
+
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
+ // Not owned here. Must be non-null.
+ Collection* _collection;
+
+ // Not owned here.
+ WorkingSet* _ws;
+
+ QueryPlannerParams _plannerParams;
+
+ // Not owned here.
+ CanonicalQuery* _query;
+
+ // If we successfully create a "composite solution" by planning each $or branch
+ // independently, that solution is owned here.
+ std::unique_ptr<QuerySolution> _compositeSolution;
+
+ std::unique_ptr<PlanStage> _child;
+
+ // Holds a list of the results from planning each branch.
+ OwnedPointerVector<BranchPlanningResult> _branchResults;
+
+ // We need this to extract cache-friendly index data from the index assignments.
+ std::map<BSONObj, size_t> _indexMap;
+
+ CommonStats _commonStats;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index f9db2e94be3..933e2b6aba2 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -41,54 +41,55 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::vector;
-
- // static
- const char* TextStage::kStageType = "TEXT";
-
- TextStage::TextStage(OperationContext* txn,
- const TextStageParams& params,
- WorkingSet* ws,
- const MatchExpression* filter)
- : _txn(txn),
- _params(params),
- _ftsMatcher(params.query, params.spec),
- _ws(ws),
- _filter(filter),
- _commonStats(kStageType),
- _internalState(INIT_SCANS),
- _currentIndexScanner(0),
- _idRetrying(WorkingSet::INVALID_ID) {
- _scoreIterator = _scores.end();
- _specificStats.indexPrefix = _params.indexPrefix;
- _specificStats.indexName = _params.index->indexName();
+using std::unique_ptr;
+using std::string;
+using std::vector;
+
+// static
+const char* TextStage::kStageType = "TEXT";
+
+TextStage::TextStage(OperationContext* txn,
+ const TextStageParams& params,
+ WorkingSet* ws,
+ const MatchExpression* filter)
+ : _txn(txn),
+ _params(params),
+ _ftsMatcher(params.query, params.spec),
+ _ws(ws),
+ _filter(filter),
+ _commonStats(kStageType),
+ _internalState(INIT_SCANS),
+ _currentIndexScanner(0),
+ _idRetrying(WorkingSet::INVALID_ID) {
+ _scoreIterator = _scores.end();
+ _specificStats.indexPrefix = _params.indexPrefix;
+ _specificStats.indexName = _params.index->indexName();
+}
+
+TextStage::~TextStage() {}
+
+bool TextStage::isEOF() {
+ return _internalState == DONE;
+}
+
+PlanStage::StageState TextStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
+
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
}
+ invariant(_internalState != DONE);
- TextStage::~TextStage() { }
+ PlanStage::StageState stageState = PlanStage::IS_EOF;
- bool TextStage::isEOF() {
- return _internalState == DONE;
- }
-
- PlanStage::StageState TextStage::work(WorkingSetID* out) {
- ++_commonStats.works;
-
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
-
- if (isEOF()) { return PlanStage::IS_EOF; }
- invariant(_internalState != DONE);
-
- PlanStage::StageState stageState = PlanStage::IS_EOF;
-
- switch (_internalState) {
+ switch (_internalState) {
case INIT_SCANS:
try {
stageState = initScans(out);
- }
- catch (const WriteConflictException& wce) {
+ } catch (const WriteConflictException& wce) {
// Reset and try again next time.
_internalState = INIT_SCANS;
_scanners.clear();
@@ -106,10 +107,10 @@ namespace mongo {
case DONE:
// Handled above.
break;
- }
+ }
- // Increment common stats counters that are specific to the return value of work().
- switch (stageState) {
+ // Increment common stats counters that are specific to the return value of work().
+ switch (stageState) {
case PlanStage::ADVANCED:
++_commonStats.advanced;
break;
@@ -121,376 +122,358 @@ namespace mongo {
break;
default:
break;
- }
-
- return stageState;
}
- void TextStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
+ return stageState;
+}
- for (size_t i = 0; i < _scanners.size(); ++i) {
- _scanners.mutableVector()[i]->saveState();
- }
+void TextStage::saveState() {
+ _txn = NULL;
+ ++_commonStats.yields;
- if (_recordCursor) _recordCursor->saveUnpositioned();
+ for (size_t i = 0; i < _scanners.size(); ++i) {
+ _scanners.mutableVector()[i]->saveState();
}
- void TextStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_commonStats.unyields;
+ if (_recordCursor)
+ _recordCursor->saveUnpositioned();
+}
- for (size_t i = 0; i < _scanners.size(); ++i) {
- _scanners.mutableVector()[i]->restoreState(opCtx);
- }
+void TextStage::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_commonStats.unyields;
- if (_recordCursor) invariant(_recordCursor->restore(opCtx));
+ for (size_t i = 0; i < _scanners.size(); ++i) {
+ _scanners.mutableVector()[i]->restoreState(opCtx);
}
- void TextStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
+ if (_recordCursor)
+ invariant(_recordCursor->restore(opCtx));
+}
- // Propagate invalidate to children.
- for (size_t i = 0; i < _scanners.size(); ++i) {
- _scanners.mutableVector()[i]->invalidate(txn, dl, type);
- }
+void TextStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
- // We store the score keyed by RecordId. We have to toss out our state when the RecordId
- // changes.
- // TODO: If we're RETURNING_RESULTS we could somehow buffer the object.
- ScoreMap::iterator scoreIt = _scores.find(dl);
- if (scoreIt != _scores.end()) {
- if (scoreIt == _scoreIterator) {
- _scoreIterator++;
- }
- _scores.erase(scoreIt);
- }
+ // Propagate invalidate to children.
+ for (size_t i = 0; i < _scanners.size(); ++i) {
+ _scanners.mutableVector()[i]->invalidate(txn, dl, type);
}
- vector<PlanStage*> TextStage::getChildren() const {
- vector<PlanStage*> empty;
- return empty;
+ // We store the score keyed by RecordId. We have to toss out our state when the RecordId
+ // changes.
+ // TODO: If we're RETURNING_RESULTS we could somehow buffer the object.
+ ScoreMap::iterator scoreIt = _scores.find(dl);
+ if (scoreIt != _scores.end()) {
+ if (scoreIt == _scoreIterator) {
+ _scoreIterator++;
+ }
+ _scores.erase(scoreIt);
}
+}
- PlanStageStats* TextStage::getStats() {
- _commonStats.isEOF = isEOF();
+vector<PlanStage*> TextStage::getChildren() const {
+ vector<PlanStage*> empty;
+ return empty;
+}
- // Add a BSON representation of the filter to the stats tree, if there is one.
- if (NULL != _filter) {
- BSONObjBuilder bob;
- _filter->toBSON(&bob);
- _commonStats.filter = bob.obj();
- }
+PlanStageStats* TextStage::getStats() {
+ _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_TEXT));
- ret->specific.reset(new TextStats(_specificStats));
- return ret.release();
+ // Add a BSON representation of the filter to the stats tree, if there is one.
+ if (NULL != _filter) {
+ BSONObjBuilder bob;
+ _filter->toBSON(&bob);
+ _commonStats.filter = bob.obj();
}
- const CommonStats* TextStage::getCommonStats() const {
- return &_commonStats;
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_TEXT));
+ ret->specific.reset(new TextStats(_specificStats));
+ return ret.release();
+}
+
+const CommonStats* TextStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* TextStage::getSpecificStats() const {
+ return &_specificStats;
+}
+
+PlanStage::StageState TextStage::initScans(WorkingSetID* out) {
+ invariant(0 == _scanners.size());
+
+ _recordCursor = _params.index->getCollection()->getCursor(_txn);
+
+ _specificStats.parsedTextQuery = _params.query.toBSON();
+
+ // Get all the index scans for each term in our query.
+ // TODO it would be more efficient to only have one active scan at a time and create the
+ // next when each finishes.
+ for (std::set<std::string>::const_iterator it = _params.query.getTermsForBounds().begin();
+ it != _params.query.getTermsForBounds().end();
+ ++it) {
+ const string& term = *it;
+ IndexScanParams params;
+ params.bounds.startKey = FTSIndexFormat::getIndexKey(
+ MAX_WEIGHT, term, _params.indexPrefix, _params.spec.getTextIndexVersion());
+ params.bounds.endKey = FTSIndexFormat::getIndexKey(
+ 0, term, _params.indexPrefix, _params.spec.getTextIndexVersion());
+ params.bounds.endKeyInclusive = true;
+ params.bounds.isSimpleRange = true;
+ params.descriptor = _params.index;
+ params.direction = -1;
+ _scanners.mutableVector().push_back(new IndexScan(_txn, params, _ws, NULL));
}
- const SpecificStats* TextStage::getSpecificStats() const {
- return &_specificStats;
+ // If we have no terms we go right to EOF.
+ if (0 == _scanners.size()) {
+ _internalState = DONE;
+ return PlanStage::IS_EOF;
}
- PlanStage::StageState TextStage::initScans(WorkingSetID* out) {
- invariant(0 == _scanners.size());
-
- _recordCursor = _params.index->getCollection()->getCursor(_txn);
-
- _specificStats.parsedTextQuery = _params.query.toBSON();
-
- // Get all the index scans for each term in our query.
- // TODO it would be more efficient to only have one active scan at a time and create the
- // next when each finishes.
- for (std::set<std::string>::const_iterator it = _params.query.getTermsForBounds().begin();
- it != _params.query.getTermsForBounds().end();
- ++it) {
- const string& term = *it;
- IndexScanParams params;
- params.bounds.startKey = FTSIndexFormat::getIndexKey(MAX_WEIGHT,
- term,
- _params.indexPrefix,
- _params.spec.getTextIndexVersion());
- params.bounds.endKey = FTSIndexFormat::getIndexKey(0,
- term,
- _params.indexPrefix,
- _params.spec.getTextIndexVersion());
- params.bounds.endKeyInclusive = true;
- params.bounds.isSimpleRange = true;
- params.descriptor = _params.index;
- params.direction = -1;
- _scanners.mutableVector().push_back(new IndexScan(_txn, params, _ws, NULL));
- }
-
- // If we have no terms we go right to EOF.
- if (0 == _scanners.size()) {
- _internalState = DONE;
- return PlanStage::IS_EOF;
- }
-
- // Transition to the next state.
- _internalState = READING_TERMS;
- return PlanStage::NEED_TIME;
+ // Transition to the next state.
+ _internalState = READING_TERMS;
+ return PlanStage::NEED_TIME;
+}
+
+PlanStage::StageState TextStage::readFromSubScanners(WorkingSetID* out) {
+ // This should be checked before we get here.
+ invariant(_currentIndexScanner < _scanners.size());
+
+ // Either retry the last WSM we worked on or get a new one from our current scanner.
+ WorkingSetID id;
+ StageState childState;
+ if (_idRetrying == WorkingSet::INVALID_ID) {
+ childState = _scanners.vector()[_currentIndexScanner]->work(&id);
+ } else {
+ childState = ADVANCED;
+ id = _idRetrying;
+ _idRetrying = WorkingSet::INVALID_ID;
}
- PlanStage::StageState TextStage::readFromSubScanners(WorkingSetID* out) {
- // This should be checked before we get here.
- invariant(_currentIndexScanner < _scanners.size());
-
- // Either retry the last WSM we worked on or get a new one from our current scanner.
- WorkingSetID id;
- StageState childState;
- if (_idRetrying == WorkingSet::INVALID_ID) {
- childState = _scanners.vector()[_currentIndexScanner]->work(&id);
- }
- else {
- childState = ADVANCED;
- id = _idRetrying;
- _idRetrying = WorkingSet::INVALID_ID;
- }
+ if (PlanStage::ADVANCED == childState) {
+ return addTerm(id, out);
+ } else if (PlanStage::IS_EOF == childState) {
+ // Done with this scan.
+ ++_currentIndexScanner;
- if (PlanStage::ADVANCED == childState) {
- return addTerm(id, out);
+ if (_currentIndexScanner < _scanners.size()) {
+ // We have another scan to read from.
+ return PlanStage::NEED_TIME;
}
- else if (PlanStage::IS_EOF == childState) {
- // Done with this scan.
- ++_currentIndexScanner;
- if (_currentIndexScanner < _scanners.size()) {
- // We have another scan to read from.
- return PlanStage::NEED_TIME;
- }
+ // If we're here we are done reading results. Move to the next state.
+ _scoreIterator = _scores.begin();
+ _internalState = RETURNING_RESULTS;
- // If we're here we are done reading results. Move to the next state.
- _scoreIterator = _scores.begin();
- _internalState = RETURNING_RESULTS;
-
- // Don't need to keep these around.
- _scanners.clear();
- return PlanStage::NEED_TIME;
- }
- else {
- // Propagate WSID from below.
- *out = id;
- if (PlanStage::FAILURE == childState) {
- // If a stage fails, it may create a status WSM to indicate why it
- // failed, in which case 'id' is valid. If ID is invalid, we
- // create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- mongoutils::str::stream ss;
- ss << "text stage failed to read in results from child";
- Status status(ErrorCodes::InternalError, ss);
- *out = WorkingSetCommon::allocateStatusMember( _ws, status);
- }
+ // Don't need to keep these around.
+ _scanners.clear();
+ return PlanStage::NEED_TIME;
+ } else {
+ // Propagate WSID from below.
+ *out = id;
+ if (PlanStage::FAILURE == childState) {
+ // If a stage fails, it may create a status WSM to indicate why it
+ // failed, in which case 'id' is valid. If ID is invalid, we
+ // create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ mongoutils::str::stream ss;
+ ss << "text stage failed to read in results from child";
+ Status status(ErrorCodes::InternalError, ss);
+ *out = WorkingSetCommon::allocateStatusMember(_ws, status);
}
- return childState;
}
+ return childState;
}
+}
- PlanStage::StageState TextStage::returnResults(WorkingSetID* out) {
- if (_scoreIterator == _scores.end()) {
- _internalState = DONE;
- return PlanStage::IS_EOF;
- }
-
- // Filter for phrases and negative terms, score and truncate.
- TextRecordData textRecordData = _scoreIterator->second;
+PlanStage::StageState TextStage::returnResults(WorkingSetID* out) {
+ if (_scoreIterator == _scores.end()) {
+ _internalState = DONE;
+ return PlanStage::IS_EOF;
+ }
- // Ignore non-matched documents.
- if (textRecordData.score < 0) {
- _scoreIterator++;
- invariant(textRecordData.wsid == WorkingSet::INVALID_ID);
- return PlanStage::NEED_TIME;
- }
-
- WorkingSetMember* wsm = _ws->get(textRecordData.wsid);
- try {
- if (!WorkingSetCommon::fetchIfUnfetched(_txn, wsm, _recordCursor)) {
- _scoreIterator++;
- _ws->free(textRecordData.wsid);
- _commonStats.needTime++;
- return NEED_TIME;
- }
- }
- catch (const WriteConflictException& wce) {
- // Do this record again next time around.
- *out = WorkingSet::INVALID_ID;
- _commonStats.needYield++;
- return NEED_YIELD;
- }
+ // Filter for phrases and negative terms, score and truncate.
+ TextRecordData textRecordData = _scoreIterator->second;
+ // Ignore non-matched documents.
+ if (textRecordData.score < 0) {
_scoreIterator++;
+ invariant(textRecordData.wsid == WorkingSet::INVALID_ID);
+ return PlanStage::NEED_TIME;
+ }
- // Filter for phrases and negated terms
- if (!_ftsMatcher.matches(wsm->obj.value())) {
+ WorkingSetMember* wsm = _ws->get(textRecordData.wsid);
+ try {
+ if (!WorkingSetCommon::fetchIfUnfetched(_txn, wsm, _recordCursor)) {
+ _scoreIterator++;
_ws->free(textRecordData.wsid);
- return PlanStage::NEED_TIME;
+ _commonStats.needTime++;
+ return NEED_TIME;
}
+ } catch (const WriteConflictException& wce) {
+ // Do this record again next time around.
+ *out = WorkingSet::INVALID_ID;
+ _commonStats.needYield++;
+ return NEED_YIELD;
+ }
- // Populate the working set member with the text score and return it.
- wsm->addComputed(new TextScoreComputedData(textRecordData.score));
- *out = textRecordData.wsid;
- return PlanStage::ADVANCED;
+ _scoreIterator++;
+
+ // Filter for phrases and negated terms
+ if (!_ftsMatcher.matches(wsm->obj.value())) {
+ _ws->free(textRecordData.wsid);
+ return PlanStage::NEED_TIME;
}
- class TextMatchableDocument : public MatchableDocument {
- public:
- TextMatchableDocument(OperationContext* txn,
- const BSONObj& keyPattern,
- const BSONObj& key,
- WorkingSetMember* wsm,
- unowned_ptr<RecordCursor> recordCursor)
- : _txn(txn),
- _recordCursor(recordCursor),
- _keyPattern(keyPattern),
- _key(key),
- _wsm(wsm) { }
-
- BSONObj toBSON() const {
- return getObj();
- }
+ // Populate the working set member with the text score and return it.
+ wsm->addComputed(new TextScoreComputedData(textRecordData.score));
+ *out = textRecordData.wsid;
+ return PlanStage::ADVANCED;
+}
+
+class TextMatchableDocument : public MatchableDocument {
+public:
+ TextMatchableDocument(OperationContext* txn,
+ const BSONObj& keyPattern,
+ const BSONObj& key,
+ WorkingSetMember* wsm,
+ unowned_ptr<RecordCursor> recordCursor)
+ : _txn(txn), _recordCursor(recordCursor), _keyPattern(keyPattern), _key(key), _wsm(wsm) {}
+
+ BSONObj toBSON() const {
+ return getObj();
+ }
- virtual ElementIterator* allocateIterator(const ElementPath* path) const {
- if (!_wsm->hasObj()) {
- // Try to look in the key.
- BSONObjIterator keyPatternIt(_keyPattern);
- BSONObjIterator keyDataIt(_key);
-
- while (keyPatternIt.more()) {
- BSONElement keyPatternElt = keyPatternIt.next();
- verify(keyDataIt.more());
- BSONElement keyDataElt = keyDataIt.next();
-
- if (path->fieldRef().equalsDottedField(keyPatternElt.fieldName())) {
- if (Array == keyDataElt.type()) {
- return new SimpleArrayElementIterator(keyDataElt, true);
- }
- else {
- return new SingleElementElementIterator(keyDataElt);
- }
+ virtual ElementIterator* allocateIterator(const ElementPath* path) const {
+ if (!_wsm->hasObj()) {
+ // Try to look in the key.
+ BSONObjIterator keyPatternIt(_keyPattern);
+ BSONObjIterator keyDataIt(_key);
+
+ while (keyPatternIt.more()) {
+ BSONElement keyPatternElt = keyPatternIt.next();
+ verify(keyDataIt.more());
+ BSONElement keyDataElt = keyDataIt.next();
+
+ if (path->fieldRef().equalsDottedField(keyPatternElt.fieldName())) {
+ if (Array == keyDataElt.type()) {
+ return new SimpleArrayElementIterator(keyDataElt, true);
+ } else {
+ return new SingleElementElementIterator(keyDataElt);
}
}
}
-
- // Go to the raw document, fetching if needed.
- return new BSONElementIterator(path, getObj());
}
- virtual void releaseIterator( ElementIterator* iterator ) const {
- delete iterator;
- }
+ // Go to the raw document, fetching if needed.
+ return new BSONElementIterator(path, getObj());
+ }
- // Thrown if we detect that the document being matched was deleted.
- class DocumentDeletedException {};
+ virtual void releaseIterator(ElementIterator* iterator) const {
+ delete iterator;
+ }
- private:
- BSONObj getObj() const {
- if (!WorkingSetCommon::fetchIfUnfetched(_txn, _wsm, _recordCursor))
- throw DocumentDeletedException();
+ // Thrown if we detect that the document being matched was deleted.
+ class DocumentDeletedException {};
- // Make it owned since we are buffering results.
- _wsm->obj.setValue(_wsm->obj.value().getOwned());
- return _wsm->obj.value();
- }
+private:
+ BSONObj getObj() const {
+ if (!WorkingSetCommon::fetchIfUnfetched(_txn, _wsm, _recordCursor))
+ throw DocumentDeletedException();
- OperationContext* _txn;
- unowned_ptr<RecordCursor> _recordCursor;
- BSONObj _keyPattern;
- BSONObj _key;
- WorkingSetMember* _wsm;
- };
-
- PlanStage::StageState TextStage::addTerm(WorkingSetID wsid, WorkingSetID* out) {
- WorkingSetMember* wsm = _ws->get(wsid);
- invariant(wsm->state == WorkingSetMember::LOC_AND_IDX);
- invariant(1 == wsm->keyData.size());
- const IndexKeyDatum newKeyData = wsm->keyData.back(); // copy to keep it around.
-
- TextRecordData* textRecordData = &_scores[wsm->loc];
- double* documentAggregateScore = &textRecordData->score;
-
- if (WorkingSet::INVALID_ID == textRecordData->wsid) {
- // We haven't seen this RecordId before. Keep the working set member around
- // (it may be force-fetched on saveState()).
- textRecordData->wsid = wsid;
-
- if (_filter) {
- // We have not seen this document before and need to apply a filter.
- bool shouldKeep;
- bool wasDeleted = false;
- try {
- TextMatchableDocument tdoc(_txn,
- newKeyData.indexKeyPattern,
- newKeyData.keyData,
- wsm,
- _recordCursor);
- shouldKeep = _filter->matches(&tdoc);
- }
- catch (const WriteConflictException& wce) {
- _idRetrying = wsid;
- *out = WorkingSet::INVALID_ID;
- return NEED_YIELD;
- }
- catch (const TextMatchableDocument::DocumentDeletedException&) {
- // We attempted to fetch the document but decided it should be excluded from the
- // result set.
- shouldKeep = false;
- wasDeleted = true;
- }
+ // Make it owned since we are buffering results.
+ _wsm->obj.setValue(_wsm->obj.value().getOwned());
+ return _wsm->obj.value();
+ }
- if (!shouldKeep) {
- if (wasDeleted || wsm->hasObj()) {
- // We had to fetch but we're not going to return it.
- ++_specificStats.fetches;
- }
- _ws->free(textRecordData->wsid);
- textRecordData->wsid = WorkingSet::INVALID_ID;
- *documentAggregateScore = -1;
- return NEED_TIME;
- }
+ OperationContext* _txn;
+ unowned_ptr<RecordCursor> _recordCursor;
+ BSONObj _keyPattern;
+ BSONObj _key;
+ WorkingSetMember* _wsm;
+};
+
+PlanStage::StageState TextStage::addTerm(WorkingSetID wsid, WorkingSetID* out) {
+ WorkingSetMember* wsm = _ws->get(wsid);
+ invariant(wsm->state == WorkingSetMember::LOC_AND_IDX);
+ invariant(1 == wsm->keyData.size());
+ const IndexKeyDatum newKeyData = wsm->keyData.back(); // copy to keep it around.
+
+ TextRecordData* textRecordData = &_scores[wsm->loc];
+ double* documentAggregateScore = &textRecordData->score;
+
+ if (WorkingSet::INVALID_ID == textRecordData->wsid) {
+ // We haven't seen this RecordId before. Keep the working set member around
+ // (it may be force-fetched on saveState()).
+ textRecordData->wsid = wsid;
+
+ if (_filter) {
+ // We have not seen this document before and need to apply a filter.
+ bool shouldKeep;
+ bool wasDeleted = false;
+ try {
+ TextMatchableDocument tdoc(
+ _txn, newKeyData.indexKeyPattern, newKeyData.keyData, wsm, _recordCursor);
+ shouldKeep = _filter->matches(&tdoc);
+ } catch (const WriteConflictException& wce) {
+ _idRetrying = wsid;
+ *out = WorkingSet::INVALID_ID;
+ return NEED_YIELD;
+ } catch (const TextMatchableDocument::DocumentDeletedException&) {
+ // We attempted to fetch the document but decided it should be excluded from the
+ // result set.
+ shouldKeep = false;
+ wasDeleted = true;
}
- else {
- // If we're here, we're going to return the doc, and we do a fetch later.
- ++_specificStats.fetches;
+
+ if (!shouldKeep) {
+ if (wasDeleted || wsm->hasObj()) {
+ // We had to fetch but we're not going to return it.
+ ++_specificStats.fetches;
+ }
+ _ws->free(textRecordData->wsid);
+ textRecordData->wsid = WorkingSet::INVALID_ID;
+ *documentAggregateScore = -1;
+ return NEED_TIME;
}
+ } else {
+ // If we're here, we're going to return the doc, and we do a fetch later.
+ ++_specificStats.fetches;
}
- else {
- // We already have a working set member for this RecordId. Free the new
- // WSM and retrieve the old one.
- // Note that since we don't keep all index keys, we could get a score that doesn't match
- // the document, but this has always been a problem.
- // TODO something to improve the situation.
- invariant(wsid != textRecordData->wsid);
- _ws->free(wsid);
- wsm = _ws->get(textRecordData->wsid);
- }
+ } else {
+ // We already have a working set member for this RecordId. Free the new
+ // WSM and retrieve the old one.
+ // Note that since we don't keep all index keys, we could get a score that doesn't match
+ // the document, but this has always been a problem.
+ // TODO something to improve the situation.
+ invariant(wsid != textRecordData->wsid);
+ _ws->free(wsid);
+ wsm = _ws->get(textRecordData->wsid);
+ }
- ++_specificStats.keysExamined;
+ ++_specificStats.keysExamined;
- if (*documentAggregateScore < 0) {
- // We have already rejected this document for not matching the filter.
- return NEED_TIME;
- }
+ if (*documentAggregateScore < 0) {
+ // We have already rejected this document for not matching the filter.
+ return NEED_TIME;
+ }
- // Locate score within possibly compound key: {prefix,term,score,suffix}.
- BSONObjIterator keyIt(newKeyData.keyData);
- for (unsigned i = 0; i < _params.spec.numExtraBefore(); i++) {
- keyIt.next();
- }
+ // Locate score within possibly compound key: {prefix,term,score,suffix}.
+ BSONObjIterator keyIt(newKeyData.keyData);
+ for (unsigned i = 0; i < _params.spec.numExtraBefore(); i++) {
+ keyIt.next();
+ }
- keyIt.next(); // Skip past 'term'.
+ keyIt.next(); // Skip past 'term'.
- BSONElement scoreElement = keyIt.next();
- double documentTermScore = scoreElement.number();
+ BSONElement scoreElement = keyIt.next();
+ double documentTermScore = scoreElement.number();
- // Aggregate relevance score, term keys.
- *documentAggregateScore += documentTermScore;
- return NEED_TIME;
- }
+ // Aggregate relevance score, term keys.
+ *documentAggregateScore += documentTermScore;
+ return NEED_TIME;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/text.h b/src/mongo/db/exec/text.h
index 96f5c67bc4d..9e64621cbb7 100644
--- a/src/mongo/db/exec/text.h
+++ b/src/mongo/db/exec/text.h
@@ -46,157 +46,159 @@
namespace mongo {
- using fts::FTSIndexFormat;
- using fts::FTSMatcher;
- using fts::FTSQuery;
- using fts::FTSSpec;
- using fts::MAX_WEIGHT;
+using fts::FTSIndexFormat;
+using fts::FTSMatcher;
+using fts::FTSQuery;
+using fts::FTSSpec;
+using fts::MAX_WEIGHT;
- class OperationContext;
+class OperationContext;
- struct TextStageParams {
- TextStageParams(const FTSSpec& s) : spec(s) {}
+struct TextStageParams {
+ TextStageParams(const FTSSpec& s) : spec(s) {}
- // Text index descriptor. IndexCatalog owns this.
- IndexDescriptor* index;
+ // Text index descriptor. IndexCatalog owns this.
+ IndexDescriptor* index;
- // Index spec.
- FTSSpec spec;
+ // Index spec.
+ FTSSpec spec;
- // Index keys that precede the "text" index key.
- BSONObj indexPrefix;
+ // Index keys that precede the "text" index key.
+ BSONObj indexPrefix;
- // The text query.
- FTSQuery query;
- };
+ // The text query.
+ FTSQuery query;
+};
+/**
+ * Implements a blocking stage that returns text search results.
+ *
+ * Prerequisites: None; is a leaf node.
+ * Output type: LOC_AND_OBJ_UNOWNED.
+ *
+ * TODO: Should the TextStage ever generate NEED_YIELD requests for fetching MMAP v1 records?
+ * Right now this stage could reduce concurrency by failing to request a yield during fetch.
+ */
+class TextStage : public PlanStage {
+public:
/**
- * Implements a blocking stage that returns text search results.
- *
- * Prerequisites: None; is a leaf node.
- * Output type: LOC_AND_OBJ_UNOWNED.
- *
- * TODO: Should the TextStage ever generate NEED_YIELD requests for fetching MMAP v1 records?
- * Right now this stage could reduce concurrency by failing to request a yield during fetch.
+ * The text stage has a few 'states' it transitions between.
*/
- class TextStage : public PlanStage {
- public:
- /**
- * The text stage has a few 'states' it transitions between.
- */
- enum State {
- // 1. Initialize the index scans we use to retrieve term/score info.
- INIT_SCANS,
+ enum State {
+ // 1. Initialize the index scans we use to retrieve term/score info.
+ INIT_SCANS,
- // 2. Read the terms/scores from the text index.
- READING_TERMS,
+ // 2. Read the terms/scores from the text index.
+ READING_TERMS,
- // 3. Return results to our parent.
- RETURNING_RESULTS,
+ // 3. Return results to our parent.
+ RETURNING_RESULTS,
- // 4. Done.
- DONE,
- };
+ // 4. Done.
+ DONE,
+ };
- TextStage(OperationContext* txn,
- const TextStageParams& params,
- WorkingSet* ws,
- const MatchExpression* filter);
+ TextStage(OperationContext* txn,
+ const TextStageParams& params,
+ WorkingSet* ws,
+ const MatchExpression* filter);
- virtual ~TextStage();
+ virtual ~TextStage();
- virtual StageState work(WorkingSetID* out);
- virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
+ virtual bool isEOF();
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
- virtual std::vector<PlanStage*> getChildren() const;
+ virtual std::vector<PlanStage*> getChildren() const;
- virtual StageType stageType() const { return STAGE_TEXT; }
+ virtual StageType stageType() const {
+ return STAGE_TEXT;
+ }
- PlanStageStats* getStats();
+ PlanStageStats* getStats();
- virtual const CommonStats* getCommonStats() const;
+ virtual const CommonStats* getCommonStats() const;
- virtual const SpecificStats* getSpecificStats() const;
+ virtual const SpecificStats* getSpecificStats() const;
- static const char* kStageType;
+ static const char* kStageType;
- private:
- /**
- * Initializes sub-scanners.
- */
- StageState initScans(WorkingSetID* out);
+private:
+ /**
+ * Initializes sub-scanners.
+ */
+ StageState initScans(WorkingSetID* out);
- /**
- * Helper for buffering results array. Returns NEED_TIME (if any results were produced),
- * IS_EOF, or FAILURE.
- */
- StageState readFromSubScanners(WorkingSetID* out);
+ /**
+ * Helper for buffering results array. Returns NEED_TIME (if any results were produced),
+ * IS_EOF, or FAILURE.
+ */
+ StageState readFromSubScanners(WorkingSetID* out);
- /**
- * Helper called from readFromSubScanners to update aggregate score with a new-found (term,
- * score) pair for this document. Also rejects documents that don't match this stage's
- * filter.
- */
- StageState addTerm(WorkingSetID wsid, WorkingSetID* out);
+ /**
+ * Helper called from readFromSubScanners to update aggregate score with a new-found (term,
+ * score) pair for this document. Also rejects documents that don't match this stage's
+ * filter.
+ */
+ StageState addTerm(WorkingSetID wsid, WorkingSetID* out);
- /**
- * Possibly return a result. FYI, this may perform a fetch directly if it is needed to
- * evaluate all filters.
- */
- StageState returnResults(WorkingSetID* out);
+ /**
+ * Possibly return a result. FYI, this may perform a fetch directly if it is needed to
+ * evaluate all filters.
+ */
+ StageState returnResults(WorkingSetID* out);
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
- // Parameters of this text stage.
- TextStageParams _params;
+ // Parameters of this text stage.
+ TextStageParams _params;
- // Text-specific phrase and negated term matcher.
- FTSMatcher _ftsMatcher;
+ // Text-specific phrase and negated term matcher.
+ FTSMatcher _ftsMatcher;
- // Working set. Not owned by us.
- WorkingSet* _ws;
+ // Working set. Not owned by us.
+ WorkingSet* _ws;
- // Filter. Not owned by us.
- const MatchExpression* _filter;
+ // Filter. Not owned by us.
+ const MatchExpression* _filter;
- // Stats.
- CommonStats _commonStats;
- TextStats _specificStats;
+ // Stats.
+ CommonStats _commonStats;
+ TextStats _specificStats;
- // What state are we in? See the State enum above.
- State _internalState;
+ // What state are we in? See the State enum above.
+ State _internalState;
- // Used in INIT_SCANS and READING_TERMS. The index scans we're using to retrieve text
- // terms.
- OwnedPointerVector<PlanStage> _scanners;
+ // Used in INIT_SCANS and READING_TERMS. The index scans we're using to retrieve text
+ // terms.
+ OwnedPointerVector<PlanStage> _scanners;
- // Which _scanners are we currently reading from?
- size_t _currentIndexScanner;
+ // Which _scanners are we currently reading from?
+ size_t _currentIndexScanner;
- // If not Null, we use this rather than asking our child what to do next.
- WorkingSetID _idRetrying;
+ // If not Null, we use this rather than asking our child what to do next.
+ WorkingSetID _idRetrying;
- // Map each buffered record id to this data.
- struct TextRecordData {
- TextRecordData() : wsid(WorkingSet::INVALID_ID), score(0.0) { }
- WorkingSetID wsid;
- double score;
- };
+ // Map each buffered record id to this data.
+ struct TextRecordData {
+ TextRecordData() : wsid(WorkingSet::INVALID_ID), score(0.0) {}
+ WorkingSetID wsid;
+ double score;
+ };
- // Temporary score data filled out by sub-scans. Used in READING_TERMS and
- // RETURNING_RESULTS.
- // Maps from diskloc -> (aggregate score for doc, wsid).
- typedef unordered_map<RecordId, TextRecordData, RecordId::Hasher> ScoreMap;
- ScoreMap _scores;
- ScoreMap::const_iterator _scoreIterator;
+ // Temporary score data filled out by sub-scans. Used in READING_TERMS and
+ // RETURNING_RESULTS.
+ // Maps from diskloc -> (aggregate score for doc, wsid).
+ typedef unordered_map<RecordId, TextRecordData, RecordId::Hasher> ScoreMap;
+ ScoreMap _scores;
+ ScoreMap::const_iterator _scoreIterator;
- // Used for fetching records from the collection.
- std::unique_ptr<RecordCursor> _recordCursor;
- };
+ // Used for fetching records from the collection.
+ std::unique_ptr<RecordCursor> _recordCursor;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index 8ba566cd7e0..7d2a768664f 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -46,1070 +46,1025 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::string;
+using std::vector;
- namespace mb = mutablebson;
+namespace mb = mutablebson;
- namespace {
+namespace {
- const char idFieldName[] = "_id";
- const FieldRef idFieldRef(idFieldName);
+const char idFieldName[] = "_id";
+const FieldRef idFieldRef(idFieldName);
- Status storageValid(const mb::Document&, const bool = true);
- Status storageValid(const mb::ConstElement&, const bool = true);
- Status storageValidChildren(const mb::ConstElement&, const bool = true);
+Status storageValid(const mb::Document&, const bool = true);
+Status storageValid(const mb::ConstElement&, const bool = true);
+Status storageValidChildren(const mb::ConstElement&, const bool = true);
- /**
- * mutable::document storageValid check -- like BSONObj::_okForStorage
- */
- Status storageValid(const mb::Document& doc, const bool deep) {
- mb::ConstElement currElem = doc.root().leftChild();
- while (currElem.ok()) {
- if (currElem.getFieldName() == idFieldName) {
- switch (currElem.getType()) {
- case RegEx:
- case Array:
- case Undefined:
- return Status(ErrorCodes::InvalidIdField,
- str::stream() << "The '_id' value cannot be of type "
- << typeName(currElem.getType()));
- default:
- break;
- }
- }
- Status s = storageValid(currElem, deep);
- if (!s.isOK())
- return s;
- currElem = currElem.rightSibling();
+/**
+ * mutable::document storageValid check -- like BSONObj::_okForStorage
+ */
+Status storageValid(const mb::Document& doc, const bool deep) {
+ mb::ConstElement currElem = doc.root().leftChild();
+ while (currElem.ok()) {
+ if (currElem.getFieldName() == idFieldName) {
+ switch (currElem.getType()) {
+ case RegEx:
+ case Array:
+ case Undefined:
+ return Status(ErrorCodes::InvalidIdField,
+ str::stream() << "The '_id' value cannot be of type "
+ << typeName(currElem.getType()));
+ default:
+ break;
}
+ }
+ Status s = storageValid(currElem, deep);
+ if (!s.isOK())
+ return s;
+ currElem = currElem.rightSibling();
+ }
+
+ return Status::OK();
+}
- return Status::OK();
+/**
+ * Validates an element that has a field name which starts with a dollar sign ($).
+ * In the case of a DBRef field ($id, $ref, [$db]) these fields may be valid in
+ * the correct order/context only.
+ */
+Status validateDollarPrefixElement(const mb::ConstElement elem, const bool deep) {
+ mb::ConstElement curr = elem;
+ StringData currName = elem.getFieldName();
+ LOG(5) << "validateDollarPrefixElement -- validating field '" << currName << "'";
+ // Found a $db field
+ if (currName == "$db") {
+ if (curr.getType() != String) {
+ return Status(ErrorCodes::InvalidDBRef,
+ str::stream() << "The DBRef $db field must be a String, not a "
+ << typeName(curr.getType()));
}
+ curr = curr.leftSibling();
- /**
- * Validates an element that has a field name which starts with a dollar sign ($).
- * In the case of a DBRef field ($id, $ref, [$db]) these fields may be valid in
- * the correct order/context only.
- */
- Status validateDollarPrefixElement(const mb::ConstElement elem, const bool deep) {
- mb::ConstElement curr = elem;
- StringData currName = elem.getFieldName();
- LOG(5) << "validateDollarPrefixElement -- validating field '" << currName << "'";
- // Found a $db field
- if (currName == "$db") {
- if (curr.getType() != String) {
- return Status(ErrorCodes::InvalidDBRef,
- str::stream() << "The DBRef $db field must be a String, not a "
- << typeName(curr.getType()));
- }
- curr = curr.leftSibling();
+ if (!curr.ok() || (curr.getFieldName() != "$id"))
+ return Status(ErrorCodes::InvalidDBRef,
+ "Found $db field without a $id before it, which is invalid.");
- if (!curr.ok() || (curr.getFieldName() != "$id"))
- return Status(ErrorCodes::InvalidDBRef,
- "Found $db field without a $id before it, which is invalid.");
+ currName = curr.getFieldName();
+ }
- currName = curr.getFieldName();
- }
+ // Found a $id field
+ if (currName == "$id") {
+ Status s = storageValidChildren(curr, deep);
+ if (!s.isOK())
+ return s;
- // Found a $id field
- if (currName == "$id") {
- Status s = storageValidChildren(curr, deep);
- if (!s.isOK())
- return s;
+ curr = curr.leftSibling();
+ if (!curr.ok() || (curr.getFieldName() != "$ref")) {
+ return Status(ErrorCodes::InvalidDBRef,
+ "Found $id field without a $ref before it, which is invalid.");
+ }
- curr = curr.leftSibling();
- if (!curr.ok() || (curr.getFieldName() != "$ref")) {
- return Status(ErrorCodes::InvalidDBRef,
- "Found $id field without a $ref before it, which is invalid.");
- }
+ currName = curr.getFieldName();
+ }
- currName = curr.getFieldName();
- }
+ if (currName == "$ref") {
+ if (curr.getType() != String) {
+ return Status(ErrorCodes::InvalidDBRef,
+ str::stream() << "The DBRef $ref field must be a String, not a "
+ << typeName(curr.getType()));
+ }
- if (currName == "$ref") {
- if (curr.getType() != String) {
- return Status(ErrorCodes::InvalidDBRef,
- str::stream() << "The DBRef $ref field must be a String, not a "
- << typeName(curr.getType()));
- }
+ if (!curr.rightSibling().ok() || curr.rightSibling().getFieldName() != "$id")
+ return Status(ErrorCodes::InvalidDBRef,
+ str::stream() << "The DBRef $ref field must be "
+ "following by a $id field");
+ } else {
+ // not an okay, $ prefixed field name.
+ return Status(ErrorCodes::DollarPrefixedFieldName,
+ str::stream() << "The dollar ($) prefixed field '" << elem.getFieldName()
+ << "' in '" << mb::getFullName(elem)
+ << "' is not valid for storage.");
+ }
- if (!curr.rightSibling().ok() || curr.rightSibling().getFieldName() != "$id")
- return Status(ErrorCodes::InvalidDBRef,
- str::stream() << "The DBRef $ref field must be "
- "following by a $id field");
- }
- else {
- // not an okay, $ prefixed field name.
- return Status(ErrorCodes::DollarPrefixedFieldName,
- str::stream() << "The dollar ($) prefixed field '"
- << elem.getFieldName() << "' in '"
- << mb::getFullName(elem)
- << "' is not valid for storage.");
+ return Status::OK();
+}
+/**
+ * Checks that all parents, of the element passed in, are valid for storage
+ *
+ * Note: The elem argument must be in a valid state when using this function
+ */
+Status storageValidParents(const mb::ConstElement& elem) {
+ const mb::ConstElement& root = elem.getDocument().root();
+ if (elem != root) {
+ const mb::ConstElement& parent = elem.parent();
+ if (parent.ok() && parent != root) {
+ Status s = storageValid(parent, false);
+ if (s.isOK()) {
+ s = storageValidParents(parent);
}
- return Status::OK();
+ return s;
+ }
+ }
+ return Status::OK();
+}
+
+Status storageValid(const mb::ConstElement& elem, const bool deep) {
+ if (!elem.ok())
+ return Status(ErrorCodes::BadValue, "Invalid elements cannot be stored.");
+
+ // Field names of elements inside arrays are not meaningful in mutable bson,
+ // so we do not want to validate them.
+ //
+ // TODO: Revisit how mutable handles array field names. We going to need to make
+ // this better if we ever want to support ordered updates that can alter the same
+ // element repeatedly; see SERVER-12848.
+ const mb::ConstElement& parent = elem.parent();
+ const bool childOfArray = parent.ok() ? (parent.getType() == mongo::Array) : false;
+
+ if (!childOfArray) {
+ StringData fieldName = elem.getFieldName();
+ // Cannot start with "$", unless dbref
+ if (fieldName[0] == '$') {
+ Status status = validateDollarPrefixElement(elem, deep);
+ if (!status.isOK())
+ return status;
+ } else if (fieldName.find(".") != string::npos) {
+ // Field name cannot have a "." in it.
+ return Status(ErrorCodes::DottedFieldName,
+ str::stream() << "The dotted field '" << elem.getFieldName() << "' in '"
+ << mb::getFullName(elem) << "' is not valid for storage.");
}
+ }
- /**
- * Checks that all parents, of the element passed in, are valid for storage
- *
- * Note: The elem argument must be in a valid state when using this function
- */
- Status storageValidParents(const mb::ConstElement& elem) {
- const mb::ConstElement& root = elem.getDocument().root();
- if (elem != root) {
- const mb::ConstElement& parent = elem.parent();
- if (parent.ok() && parent != root) {
- Status s = storageValid(parent, false);
- if (s.isOK()) {
- s = storageValidParents(parent);
- }
+ if (deep) {
+ // Check children if there are any.
+ Status s = storageValidChildren(elem, deep);
+ if (!s.isOK())
+ return s;
+ }
- return s;
- }
- }
- return Status::OK();
- }
+ return Status::OK();
+}
- Status storageValid(const mb::ConstElement& elem, const bool deep) {
- if (!elem.ok())
- return Status(ErrorCodes::BadValue, "Invalid elements cannot be stored.");
-
- // Field names of elements inside arrays are not meaningful in mutable bson,
- // so we do not want to validate them.
- //
- // TODO: Revisit how mutable handles array field names. We going to need to make
- // this better if we ever want to support ordered updates that can alter the same
- // element repeatedly; see SERVER-12848.
- const mb::ConstElement& parent = elem.parent();
- const bool childOfArray = parent.ok() ? (parent.getType() == mongo::Array) : false;
-
- if (!childOfArray) {
- StringData fieldName = elem.getFieldName();
- // Cannot start with "$", unless dbref
- if (fieldName[0] == '$') {
- Status status = validateDollarPrefixElement(elem, deep);
- if (!status.isOK())
- return status;
- }
- else if (fieldName.find(".") != string::npos) {
- // Field name cannot have a "." in it.
- return Status(ErrorCodes::DottedFieldName,
- str::stream() << "The dotted field '"
- << elem.getFieldName() << "' in '"
- << mb::getFullName(elem)
- << "' is not valid for storage.");
- }
- }
+Status storageValidChildren(const mb::ConstElement& elem, const bool deep) {
+ if (!elem.hasChildren())
+ return Status::OK();
- if (deep) {
- // Check children if there are any.
- Status s = storageValidChildren(elem, deep);
- if (!s.isOK())
- return s;
- }
+ mb::ConstElement curr = elem.leftChild();
+ while (curr.ok()) {
+ Status s = storageValid(curr, deep);
+ if (!s.isOK())
+ return s;
+ curr = curr.rightSibling();
+ }
+
+ return Status::OK();
+}
- return Status::OK();
+/**
+ * This will verify that all updated fields are
+ * 1.) Valid for storage (checking parent to make sure things like DBRefs are valid)
+ * 2.) Compare updated immutable fields do not change values
+ *
+ * If updateFields is empty then it was replacement and/or we need to check all fields
+ */
+inline Status validate(const BSONObj& original,
+ const FieldRefSet& updatedFields,
+ const mb::Document& updated,
+ const std::vector<FieldRef*>* immutableAndSingleValueFields,
+ const ModifierInterface::Options& opts) {
+ LOG(3) << "update validate options -- "
+ << " updatedFields: " << updatedFields << " immutableAndSingleValueFields.size:"
+ << (immutableAndSingleValueFields ? immutableAndSingleValueFields->size() : 0)
+ << " validate:" << opts.enforceOkForStorage;
+
+ // 1.) Loop through each updated field and validate for storage
+ // and detect immutable field updates
+
+ // The set of possibly changed immutable fields -- we will need to check their vals
+ FieldRefSet changedImmutableFields;
+
+ // Check to see if there were no fields specified or if we are not validating
+ // The case if a range query, or query that didn't result in saved fields
+ if (updatedFields.empty() || !opts.enforceOkForStorage) {
+ if (opts.enforceOkForStorage) {
+ // No specific fields were updated so the whole doc must be checked
+ Status s = storageValid(updated, true);
+ if (!s.isOK())
+ return s;
}
- Status storageValidChildren(const mb::ConstElement& elem, const bool deep) {
- if (!elem.hasChildren())
- return Status::OK();
+ // Check all immutable fields
+ if (immutableAndSingleValueFields)
+ changedImmutableFields.fillFrom(*immutableAndSingleValueFields);
+ } else {
+ // TODO: Change impl so we don't need to create a new FieldRefSet
+ // -- move all conflict logic into static function on FieldRefSet?
+ FieldRefSet immutableFieldRef;
+ if (immutableAndSingleValueFields)
+ immutableFieldRef.fillFrom(*immutableAndSingleValueFields);
+
+ FieldRefSet::const_iterator where = updatedFields.begin();
+ const FieldRefSet::const_iterator end = updatedFields.end();
+ for (; where != end; ++where) {
+ const FieldRef& current = **where;
+
+ // Find the updated field in the updated document.
+ mutablebson::ConstElement newElem = updated.root();
+ size_t currentPart = 0;
+ while (newElem.ok() && currentPart < current.numParts())
+ newElem = newElem[current.getPart(currentPart++)];
+
+ // newElem might be missing if $unset/$renamed-away
+ if (newElem.ok()) {
+ // Check element, and its children
+ Status s = storageValid(newElem, true);
+ if (!s.isOK())
+ return s;
- mb::ConstElement curr = elem.leftChild();
- while (curr.ok()) {
- Status s = storageValid(curr, deep);
+ // Check parents to make sure they are valid as well.
+ s = storageValidParents(newElem);
if (!s.isOK())
return s;
- curr = curr.rightSibling();
}
-
- return Status::OK();
+ // Check if the updated field conflicts with immutable fields
+ immutableFieldRef.findConflicts(&current, &changedImmutableFields);
}
+ }
- /**
- * This will verify that all updated fields are
- * 1.) Valid for storage (checking parent to make sure things like DBRefs are valid)
- * 2.) Compare updated immutable fields do not change values
- *
- * If updateFields is empty then it was replacement and/or we need to check all fields
- */
- inline Status validate(const BSONObj& original,
- const FieldRefSet& updatedFields,
- const mb::Document& updated,
- const std::vector<FieldRef*>* immutableAndSingleValueFields,
- const ModifierInterface::Options& opts) {
-
- LOG(3) << "update validate options -- "
- << " updatedFields: " << updatedFields
- << " immutableAndSingleValueFields.size:"
- << (immutableAndSingleValueFields ? immutableAndSingleValueFields->size() : 0)
- << " validate:" << opts.enforceOkForStorage;
-
- // 1.) Loop through each updated field and validate for storage
- // and detect immutable field updates
-
- // The set of possibly changed immutable fields -- we will need to check their vals
- FieldRefSet changedImmutableFields;
-
- // Check to see if there were no fields specified or if we are not validating
- // The case if a range query, or query that didn't result in saved fields
- if (updatedFields.empty() || !opts.enforceOkForStorage) {
- if (opts.enforceOkForStorage) {
- // No specific fields were updated so the whole doc must be checked
- Status s = storageValid(updated, true);
- if (!s.isOK())
- return s;
- }
-
- // Check all immutable fields
- if (immutableAndSingleValueFields)
- changedImmutableFields.fillFrom(*immutableAndSingleValueFields);
- }
- else {
-
- // TODO: Change impl so we don't need to create a new FieldRefSet
- // -- move all conflict logic into static function on FieldRefSet?
- FieldRefSet immutableFieldRef;
- if (immutableAndSingleValueFields)
- immutableFieldRef.fillFrom(*immutableAndSingleValueFields);
-
- FieldRefSet::const_iterator where = updatedFields.begin();
- const FieldRefSet::const_iterator end = updatedFields.end();
- for( ; where != end; ++where) {
- const FieldRef& current = **where;
-
- // Find the updated field in the updated document.
- mutablebson::ConstElement newElem = updated.root();
- size_t currentPart = 0;
- while (newElem.ok() && currentPart < current.numParts())
- newElem = newElem[current.getPart(currentPart++)];
-
- // newElem might be missing if $unset/$renamed-away
- if (newElem.ok()) {
-
- // Check element, and its children
- Status s = storageValid(newElem, true);
- if (!s.isOK())
- return s;
-
- // Check parents to make sure they are valid as well.
- s = storageValidParents(newElem);
- if (!s.isOK())
- return s;
-
- }
- // Check if the updated field conflicts with immutable fields
- immutableFieldRef.findConflicts(&current, &changedImmutableFields);
- }
- }
+ const bool checkIdField = (updatedFields.empty() && !original.isEmpty()) ||
+ updatedFields.findConflicts(&idFieldRef, NULL);
- const bool checkIdField = (updatedFields.empty() && !original.isEmpty()) ||
- updatedFields.findConflicts(&idFieldRef, NULL);
+ // Add _id to fields to check since it too is immutable
+ if (checkIdField)
+ changedImmutableFields.keepShortest(&idFieldRef);
+ else if (changedImmutableFields.empty()) {
+ // Return early if nothing changed which is immutable
+ return Status::OK();
+ }
- // Add _id to fields to check since it too is immutable
- if (checkIdField)
- changedImmutableFields.keepShortest(&idFieldRef);
- else if (changedImmutableFields.empty()) {
- // Return early if nothing changed which is immutable
- return Status::OK();
+ LOG(4) << "Changed immutable fields: " << changedImmutableFields;
+ // 2.) Now compare values of the changed immutable fields (to make sure they haven't)
+
+ const mutablebson::ConstElement newIdElem = updated.root()[idFieldName];
+
+ FieldRefSet::const_iterator where = changedImmutableFields.begin();
+ const FieldRefSet::const_iterator end = changedImmutableFields.end();
+ for (; where != end; ++where) {
+ const FieldRef& current = **where;
+
+ // Find the updated field in the updated document.
+ mutablebson::ConstElement newElem = updated.root();
+ size_t currentPart = 0;
+ while (newElem.ok() && currentPart < current.numParts())
+ newElem = newElem[current.getPart(currentPart++)];
+
+ if (!newElem.ok()) {
+ if (original.isEmpty()) {
+ // If the _id is missing and not required, then skip this check
+ if (!(current.dottedField() == idFieldName))
+ return Status(ErrorCodes::NoSuchKey,
+ mongoutils::str::stream() << "After applying the update, the new"
+ << " document was missing the '"
+ << current.dottedField()
+ << "' (required and immutable) field.");
+
+ } else {
+ if (current.dottedField() != idFieldName)
+ return Status(ErrorCodes::ImmutableField,
+ mongoutils::str::stream()
+ << "After applying the update to the document with "
+ << newIdElem.toString() << ", the '" << current.dottedField()
+ << "' (required and immutable) field was "
+ "found to have been removed --" << original);
}
+ } else {
+ // Find the potentially affected field in the original document.
+ const BSONElement oldElem = original.getFieldDotted(current.dottedField());
+ const BSONElement oldIdElem = original.getField(idFieldName);
- LOG(4) << "Changed immutable fields: " << changedImmutableFields;
- // 2.) Now compare values of the changed immutable fields (to make sure they haven't)
-
- const mutablebson::ConstElement newIdElem = updated.root()[idFieldName];
-
- FieldRefSet::const_iterator where = changedImmutableFields.begin();
- const FieldRefSet::const_iterator end = changedImmutableFields.end();
- for( ; where != end; ++where ) {
- const FieldRef& current = **where;
-
- // Find the updated field in the updated document.
- mutablebson::ConstElement newElem = updated.root();
- size_t currentPart = 0;
- while (newElem.ok() && currentPart < current.numParts())
- newElem = newElem[current.getPart(currentPart++)];
-
- if (!newElem.ok()) {
- if (original.isEmpty()) {
- // If the _id is missing and not required, then skip this check
- if (!(current.dottedField() == idFieldName))
- return Status(ErrorCodes::NoSuchKey,
- mongoutils::str::stream()
- << "After applying the update, the new"
- << " document was missing the '"
- << current.dottedField()
- << "' (required and immutable) field.");
-
- }
- else {
- if (current.dottedField() != idFieldName)
- return Status(ErrorCodes::ImmutableField,
- mongoutils::str::stream()
- << "After applying the update to the document with "
- << newIdElem.toString()
- << ", the '" << current.dottedField()
- << "' (required and immutable) field was "
- "found to have been removed --"
- << original);
- }
- }
- else {
-
- // Find the potentially affected field in the original document.
- const BSONElement oldElem = original.getFieldDotted(current.dottedField());
- const BSONElement oldIdElem = original.getField(idFieldName);
-
- // Ensure no arrays since neither _id nor shard keys can be in an array, or one.
- mb::ConstElement currElem = newElem;
- while (currElem.ok()) {
- if (currElem.getType() == Array) {
- return Status(ErrorCodes::NotSingleValueField,
- mongoutils::str::stream()
- << "After applying the update to the document {"
- << (oldIdElem.ok() ? oldIdElem.toString() :
- newIdElem.toString())
- << " , ...}, the (immutable) field '"
- << current.dottedField()
- << "' was found to be an array or array descendant.");
- }
- currElem = currElem.parent();
- }
-
- // If we have both (old and new), compare them. If we just have new we are good
- if (oldElem.ok() && newElem.compareWithBSONElement(oldElem, false) != 0) {
- return Status(ErrorCodes::ImmutableField,
- mongoutils::str::stream()
- << "After applying the update to the document {"
- << oldElem.toString()
- << " , ...}, the (immutable) field '" << current.dottedField()
- << "' was found to have been altered to "
- << newElem.toString());
- }
+ // Ensure no arrays since neither _id nor shard keys can be in an array, or one.
+ mb::ConstElement currElem = newElem;
+ while (currElem.ok()) {
+ if (currElem.getType() == Array) {
+ return Status(
+ ErrorCodes::NotSingleValueField,
+ mongoutils::str::stream()
+ << "After applying the update to the document {"
+ << (oldIdElem.ok() ? oldIdElem.toString() : newIdElem.toString())
+ << " , ...}, the (immutable) field '" << current.dottedField()
+ << "' was found to be an array or array descendant.");
}
+ currElem = currElem.parent();
}
- return Status::OK();
- }
-
- Status ensureIdAndFirst(mb::Document& doc) {
- mb::Element idElem = mb::findFirstChildNamed(doc.root(), idFieldName);
-
- // Move _id as first element if it exists
- if (idElem.ok()) {
- if (idElem.leftSibling().ok()) {
- Status s = idElem.remove();
- if (!s.isOK())
- return s;
- s = doc.root().pushFront(idElem);
- if (!s.isOK())
- return s;
- }
- }
- else {
- // Create _id if the document does not currently have one.
- idElem = doc.makeElementNewOID(idFieldName);
- if (!idElem.ok())
- return Status(ErrorCodes::BadValue,
- "Could not create new _id ObjectId element.",
- 17268);
- Status s = doc.root().pushFront(idElem);
- if (!s.isOK())
- return s;
+ // If we have both (old and new), compare them. If we just have new we are good
+ if (oldElem.ok() && newElem.compareWithBSONElement(oldElem, false) != 0) {
+ return Status(ErrorCodes::ImmutableField,
+ mongoutils::str::stream()
+ << "After applying the update to the document {"
+ << oldElem.toString() << " , ...}, the (immutable) field '"
+ << current.dottedField() << "' was found to have been altered to "
+ << newElem.toString());
}
-
- return Status::OK();
}
-
- } // namespace
-
- // static
- const char* UpdateStage::kStageType = "UPDATE";
-
- UpdateStage::UpdateStage(OperationContext* txn,
- const UpdateStageParams& params,
- WorkingSet* ws,
- Collection* collection,
- PlanStage* child)
- : _txn(txn),
- _params(params),
- _ws(ws),
- _collection(collection),
- _child(child),
- _idRetrying(WorkingSet::INVALID_ID),
- _idReturning(WorkingSet::INVALID_ID),
- _commonStats(kStageType),
- _updatedLocs(params.request->isMulti() ? new DiskLocSet() : NULL),
- _doc(params.driver->getDocument()) {
- // We are an update until we fall into the insert case.
- params.driver->setContext(ModifierInterface::ExecInfo::UPDATE_CONTEXT);
-
- // Before we even start executing, we know whether or not this is a replacement
- // style or $mod style update.
- _specificStats.isDocReplacement = params.driver->isDocReplacement();
}
- BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& loc) {
- const UpdateRequest* request = _params.request;
- UpdateDriver* driver = _params.driver;
- CanonicalQuery* cq = _params.canonicalQuery;
- UpdateLifecycle* lifecycle = request->getLifecycle();
-
- // If asked to return new doc, default to the oldObj, in case nothing changes.
- BSONObj newObj = oldObj.value();
-
- // Ask the driver to apply the mods. It may be that the driver can apply those "in
- // place", that is, some values of the old document just get adjusted without any
- // change to the binary layout on the bson layer. It may be that a whole new document
- // is needed to accomodate the new bson layout of the resulting document. In any event,
- // only enable in-place mutations if the underlying storage engine offers support for
- // writing damage events.
- _doc.reset(oldObj.value(),
- (_collection->updateWithDamagesSupported() ?
- mutablebson::Document::kInPlaceEnabled :
- mutablebson::Document::kInPlaceDisabled));
-
- BSONObj logObj;
-
- FieldRefSet updatedFields;
- bool docWasModified = false;
-
- Status status = Status::OK();
- if (!driver->needMatchDetails()) {
- // If we don't need match details, avoid doing the rematch
- status = driver->update(StringData(), &_doc, &logObj, &updatedFields, &docWasModified);
+ return Status::OK();
+}
+
+Status ensureIdAndFirst(mb::Document& doc) {
+ mb::Element idElem = mb::findFirstChildNamed(doc.root(), idFieldName);
+
+ // Move _id as first element if it exists
+ if (idElem.ok()) {
+ if (idElem.leftSibling().ok()) {
+ Status s = idElem.remove();
+ if (!s.isOK())
+ return s;
+ s = doc.root().pushFront(idElem);
+ if (!s.isOK())
+ return s;
}
- else {
- // If there was a matched field, obtain it.
- MatchDetails matchDetails;
- matchDetails.requestElemMatchKey();
+ } else {
+ // Create _id if the document does not currently have one.
+ idElem = doc.makeElementNewOID(idFieldName);
+ if (!idElem.ok())
+ return Status(
+ ErrorCodes::BadValue, "Could not create new _id ObjectId element.", 17268);
+ Status s = doc.root().pushFront(idElem);
+ if (!s.isOK())
+ return s;
+ }
- dassert(cq);
- verify(cq->root()->matchesBSON(oldObj.value(), &matchDetails));
+ return Status::OK();
+}
+
+} // namespace
+
+// static
+const char* UpdateStage::kStageType = "UPDATE";
+
+UpdateStage::UpdateStage(OperationContext* txn,
+ const UpdateStageParams& params,
+ WorkingSet* ws,
+ Collection* collection,
+ PlanStage* child)
+ : _txn(txn),
+ _params(params),
+ _ws(ws),
+ _collection(collection),
+ _child(child),
+ _idRetrying(WorkingSet::INVALID_ID),
+ _idReturning(WorkingSet::INVALID_ID),
+ _commonStats(kStageType),
+ _updatedLocs(params.request->isMulti() ? new DiskLocSet() : NULL),
+ _doc(params.driver->getDocument()) {
+ // We are an update until we fall into the insert case.
+ params.driver->setContext(ModifierInterface::ExecInfo::UPDATE_CONTEXT);
+
+ // Before we even start executing, we know whether or not this is a replacement
+ // style or $mod style update.
+ _specificStats.isDocReplacement = params.driver->isDocReplacement();
+}
+
+BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& loc) {
+ const UpdateRequest* request = _params.request;
+ UpdateDriver* driver = _params.driver;
+ CanonicalQuery* cq = _params.canonicalQuery;
+ UpdateLifecycle* lifecycle = request->getLifecycle();
+
+ // If asked to return new doc, default to the oldObj, in case nothing changes.
+ BSONObj newObj = oldObj.value();
+
+ // Ask the driver to apply the mods. It may be that the driver can apply those "in
+ // place", that is, some values of the old document just get adjusted without any
+ // change to the binary layout on the bson layer. It may be that a whole new document
+ // is needed to accomodate the new bson layout of the resulting document. In any event,
+ // only enable in-place mutations if the underlying storage engine offers support for
+ // writing damage events.
+ _doc.reset(oldObj.value(),
+ (_collection->updateWithDamagesSupported()
+ ? mutablebson::Document::kInPlaceEnabled
+ : mutablebson::Document::kInPlaceDisabled));
+
+ BSONObj logObj;
+
+ FieldRefSet updatedFields;
+ bool docWasModified = false;
+
+ Status status = Status::OK();
+ if (!driver->needMatchDetails()) {
+ // If we don't need match details, avoid doing the rematch
+ status = driver->update(StringData(), &_doc, &logObj, &updatedFields, &docWasModified);
+ } else {
+ // If there was a matched field, obtain it.
+ MatchDetails matchDetails;
+ matchDetails.requestElemMatchKey();
+
+ dassert(cq);
+ verify(cq->root()->matchesBSON(oldObj.value(), &matchDetails));
+
+ string matchedField;
+ if (matchDetails.hasElemMatchKey())
+ matchedField = matchDetails.elemMatchKey();
+
+ // TODO: Right now, each mod checks in 'prepare' that if it needs positional
+ // data, that a non-empty StringData() was provided. In principle, we could do
+ // that check here in an else clause to the above conditional and remove the
+ // checks from the mods.
+
+ status = driver->update(matchedField, &_doc, &logObj, &updatedFields, &docWasModified);
+ }
- string matchedField;
- if (matchDetails.hasElemMatchKey())
- matchedField = matchDetails.elemMatchKey();
+ if (!status.isOK()) {
+ uasserted(16837, status.reason());
+ }
- // TODO: Right now, each mod checks in 'prepare' that if it needs positional
- // data, that a non-empty StringData() was provided. In principle, we could do
- // that check here in an else clause to the above conditional and remove the
- // checks from the mods.
+ // Ensure _id exists and is first
+ uassertStatusOK(ensureIdAndFirst(_doc));
+
+ // See if the changes were applied in place
+ const char* source = NULL;
+ const bool inPlace = _doc.getInPlaceUpdates(&_damages, &source);
+
+ if (inPlace && _damages.empty()) {
+ // An interesting edge case. A modifier didn't notice that it was really a no-op
+ // during its 'prepare' phase. That represents a missed optimization, but we still
+ // shouldn't do any real work. Toggle 'docWasModified' to 'false'.
+ //
+ // Currently, an example of this is '{ $pushAll : { x : [] } }' when the 'x' array
+ // exists.
+ docWasModified = false;
+ }
- status = driver->update(matchedField, &_doc, &logObj, &updatedFields, &docWasModified);
- }
+ if (docWasModified) {
+ // Verify that no immutable fields were changed and data is valid for storage.
- if (!status.isOK()) {
- uasserted(16837, status.reason());
- }
+ if (!(!_txn->writesAreReplicated() || request->isFromMigration())) {
+ const std::vector<FieldRef*>* immutableFields = NULL;
+ if (lifecycle)
+ immutableFields = lifecycle->getImmutableFields();
- // Ensure _id exists and is first
- uassertStatusOK(ensureIdAndFirst(_doc));
-
- // See if the changes were applied in place
- const char* source = NULL;
- const bool inPlace = _doc.getInPlaceUpdates(&_damages, &source);
-
- if (inPlace && _damages.empty()) {
- // An interesting edge case. A modifier didn't notice that it was really a no-op
- // during its 'prepare' phase. That represents a missed optimization, but we still
- // shouldn't do any real work. Toggle 'docWasModified' to 'false'.
- //
- // Currently, an example of this is '{ $pushAll : { x : [] } }' when the 'x' array
- // exists.
- docWasModified = false;
+ uassertStatusOK(validate(
+ oldObj.value(), updatedFields, _doc, immutableFields, driver->modOptions()));
}
- if (docWasModified) {
-
- // Verify that no immutable fields were changed and data is valid for storage.
-
- if (!(!_txn->writesAreReplicated() || request->isFromMigration())) {
- const std::vector<FieldRef*>* immutableFields = NULL;
- if (lifecycle)
- immutableFields = lifecycle->getImmutableFields();
-
- uassertStatusOK(validate(oldObj.value(),
- updatedFields,
- _doc,
- immutableFields,
- driver->modOptions()) );
+ // Prepare to write back the modified document
+ WriteUnitOfWork wunit(_txn);
+
+ RecordId newLoc;
+
+ if (inPlace) {
+ // Don't actually do the write if this is an explain.
+ if (!request->isExplain()) {
+ invariant(_collection);
+ newObj = oldObj.value();
+ const RecordData oldRec(oldObj.value().objdata(), oldObj.value().objsize());
+ BSONObj idQuery = driver->makeOplogEntryQuery(newObj, request->isMulti());
+ oplogUpdateEntryArgs args;
+ args.update = logObj;
+ args.criteria = idQuery;
+ args.fromMigrate = request->isFromMigration();
+ _collection->updateDocumentWithDamages(
+ _txn,
+ loc,
+ Snapshotted<RecordData>(oldObj.snapshotId(), oldRec),
+ source,
+ _damages,
+ args);
}
- // Prepare to write back the modified document
- WriteUnitOfWork wunit(_txn);
-
- RecordId newLoc;
-
- if (inPlace) {
-
- // Don't actually do the write if this is an explain.
- if (!request->isExplain()) {
- invariant(_collection);
- newObj = oldObj.value();
- const RecordData oldRec(oldObj.value().objdata(), oldObj.value().objsize());
- BSONObj idQuery = driver->makeOplogEntryQuery(newObj, request->isMulti());
- oplogUpdateEntryArgs args;
- args.update = logObj;
- args.criteria = idQuery;
- args.fromMigrate = request->isFromMigration();
- _collection->updateDocumentWithDamages(
- _txn,
- loc,
- Snapshotted<RecordData>(oldObj.snapshotId(), oldRec),
- source,
- _damages,
- args);
- }
-
- _specificStats.fastmod = true;
- newLoc = loc;
- }
- else {
- // The updates were not in place. Apply them through the file manager.
-
- newObj = _doc.getObject();
- uassert(17419,
- str::stream() << "Resulting document after update is larger than "
- << BSONObjMaxUserSize,
- newObj.objsize() <= BSONObjMaxUserSize);
-
- // Don't actually do the write if this is an explain.
- if (!request->isExplain()) {
- invariant(_collection);
- BSONObj idQuery = driver->makeOplogEntryQuery(newObj, request->isMulti());
- oplogUpdateEntryArgs args;
- args.update = logObj;
- args.criteria = idQuery;
- args.fromMigrate = request->isFromMigration();
- StatusWith<RecordId> res = _collection->updateDocument(
- _txn,
- loc,
- oldObj,
- newObj,
- true,
- driver->modsAffectIndices(),
- _params.opDebug,
- args);
- uassertStatusOK(res.getStatus());
- newLoc = res.getValue();
- }
- }
-
- invariant(oldObj.snapshotId() == _txn->recoveryUnit()->getSnapshotId());
- wunit.commit();
-
- // If the document moved, we might see it again in a collection scan (maybe it's
- // a document after our current document).
- //
- // If the document is indexed and the mod changes an indexed value, we might see
- // it again. For an example, see the comment above near declaration of
- // updatedLocs.
- //
- // This must be done after the wunit commits so we are sure we won't be rolling back.
- if (_updatedLocs && (newLoc != loc || driver->modsAffectIndices())) {
- _updatedLocs->insert(newLoc);
+ _specificStats.fastmod = true;
+ newLoc = loc;
+ } else {
+ // The updates were not in place. Apply them through the file manager.
+
+ newObj = _doc.getObject();
+ uassert(17419,
+ str::stream() << "Resulting document after update is larger than "
+ << BSONObjMaxUserSize,
+ newObj.objsize() <= BSONObjMaxUserSize);
+
+ // Don't actually do the write if this is an explain.
+ if (!request->isExplain()) {
+ invariant(_collection);
+ BSONObj idQuery = driver->makeOplogEntryQuery(newObj, request->isMulti());
+ oplogUpdateEntryArgs args;
+ args.update = logObj;
+ args.criteria = idQuery;
+ args.fromMigrate = request->isFromMigration();
+ StatusWith<RecordId> res = _collection->updateDocument(_txn,
+ loc,
+ oldObj,
+ newObj,
+ true,
+ driver->modsAffectIndices(),
+ _params.opDebug,
+ args);
+ uassertStatusOK(res.getStatus());
+ newLoc = res.getValue();
}
}
- // Only record doc modifications if they wrote (exclude no-ops). Explains get
- // recorded as if they wrote.
- if (docWasModified || request->isExplain()) {
- _specificStats.nModified++;
+ invariant(oldObj.snapshotId() == _txn->recoveryUnit()->getSnapshotId());
+ wunit.commit();
+
+ // If the document moved, we might see it again in a collection scan (maybe it's
+ // a document after our current document).
+ //
+ // If the document is indexed and the mod changes an indexed value, we might see
+ // it again. For an example, see the comment above near declaration of
+ // updatedLocs.
+ //
+ // This must be done after the wunit commits so we are sure we won't be rolling back.
+ if (_updatedLocs && (newLoc != loc || driver->modsAffectIndices())) {
+ _updatedLocs->insert(newLoc);
}
-
- return newObj;
}
- // static
- Status UpdateStage::applyUpdateOpsForInsert(const CanonicalQuery* cq,
- const BSONObj& query,
- UpdateDriver* driver,
- UpdateLifecycle* lifecycle,
- mutablebson::Document* doc,
- bool isInternalRequest,
- UpdateStats* stats,
- BSONObj* out) {
- // Since this is an insert (no docs found and upsert:true), we will be logging it
- // as an insert in the oplog. We don't need the driver's help to build the
- // oplog record, then. We also set the context of the update driver to the INSERT_CONTEXT.
- // Some mods may only work in that context (e.g. $setOnInsert).
- driver->setLogOp(false);
- driver->setContext(ModifierInterface::ExecInfo::INSERT_CONTEXT);
-
- const vector<FieldRef*>* immutablePaths = NULL;
- if (!isInternalRequest && lifecycle)
- immutablePaths = lifecycle->getImmutableFields();
-
- // The original document we compare changes to - immutable paths must not change
- BSONObj original;
-
- if (cq) {
- Status status = driver->populateDocumentWithQueryFields(cq, immutablePaths, *doc);
- if (!status.isOK()) {
- return status;
- }
+ // Only record doc modifications if they wrote (exclude no-ops). Explains get
+ // recorded as if they wrote.
+ if (docWasModified || request->isExplain()) {
+ _specificStats.nModified++;
+ }
- if (driver->isDocReplacement())
- stats->fastmodinsert = true;
- original = doc->getObject();
- }
- else {
- fassert(17354, CanonicalQuery::isSimpleIdQuery(query));
- BSONElement idElt = query[idFieldName];
- original = idElt.wrap();
- fassert(17352, doc->root().appendElement(idElt));
+ return newObj;
+}
+
+// static
+Status UpdateStage::applyUpdateOpsForInsert(const CanonicalQuery* cq,
+ const BSONObj& query,
+ UpdateDriver* driver,
+ UpdateLifecycle* lifecycle,
+ mutablebson::Document* doc,
+ bool isInternalRequest,
+ UpdateStats* stats,
+ BSONObj* out) {
+ // Since this is an insert (no docs found and upsert:true), we will be logging it
+ // as an insert in the oplog. We don't need the driver's help to build the
+ // oplog record, then. We also set the context of the update driver to the INSERT_CONTEXT.
+ // Some mods may only work in that context (e.g. $setOnInsert).
+ driver->setLogOp(false);
+ driver->setContext(ModifierInterface::ExecInfo::INSERT_CONTEXT);
+
+ const vector<FieldRef*>* immutablePaths = NULL;
+ if (!isInternalRequest && lifecycle)
+ immutablePaths = lifecycle->getImmutableFields();
+
+ // The original document we compare changes to - immutable paths must not change
+ BSONObj original;
+
+ if (cq) {
+ Status status = driver->populateDocumentWithQueryFields(cq, immutablePaths, *doc);
+ if (!status.isOK()) {
+ return status;
}
- // Apply the update modifications here.
- Status updateStatus = driver->update(StringData(), doc);
- if (!updateStatus.isOK()) {
- return Status(updateStatus.code(), updateStatus.reason(), 16836);
- }
+ if (driver->isDocReplacement())
+ stats->fastmodinsert = true;
+ original = doc->getObject();
+ } else {
+ fassert(17354, CanonicalQuery::isSimpleIdQuery(query));
+ BSONElement idElt = query[idFieldName];
+ original = idElt.wrap();
+ fassert(17352, doc->root().appendElement(idElt));
+ }
- // Ensure _id exists and is first
- Status idAndFirstStatus = ensureIdAndFirst(*doc);
- if (!idAndFirstStatus.isOK()) {
- return idAndFirstStatus;
- }
+ // Apply the update modifications here.
+ Status updateStatus = driver->update(StringData(), doc);
+ if (!updateStatus.isOK()) {
+ return Status(updateStatus.code(), updateStatus.reason(), 16836);
+ }
- // Validate that the object replacement or modifiers resulted in a document
- // that contains all the immutable keys and can be stored if it isn't coming
- // from a migration or via replication.
- if (!isInternalRequest) {
- FieldRefSet noFields;
- // This will only validate the modified fields if not a replacement.
- Status validateStatus = validate(original,
- noFields,
- *doc,
- immutablePaths,
- driver->modOptions());
- if (!validateStatus.isOK()) {
- return validateStatus;
- }
- }
+ // Ensure _id exists and is first
+ Status idAndFirstStatus = ensureIdAndFirst(*doc);
+ if (!idAndFirstStatus.isOK()) {
+ return idAndFirstStatus;
+ }
- BSONObj newObj = doc->getObject();
- if (newObj.objsize() > BSONObjMaxUserSize) {
- return Status(ErrorCodes::InvalidBSON,
- str::stream() << "Document to upsert is larger than "
- << BSONObjMaxUserSize,
- 17420);
+ // Validate that the object replacement or modifiers resulted in a document
+ // that contains all the immutable keys and can be stored if it isn't coming
+ // from a migration or via replication.
+ if (!isInternalRequest) {
+ FieldRefSet noFields;
+ // This will only validate the modified fields if not a replacement.
+ Status validateStatus =
+ validate(original, noFields, *doc, immutablePaths, driver->modOptions());
+ if (!validateStatus.isOK()) {
+ return validateStatus;
}
+ }
- *out = newObj;
- return Status::OK();
+ BSONObj newObj = doc->getObject();
+ if (newObj.objsize() > BSONObjMaxUserSize) {
+ return Status(ErrorCodes::InvalidBSON,
+ str::stream() << "Document to upsert is larger than " << BSONObjMaxUserSize,
+ 17420);
}
- void UpdateStage::doInsert() {
- _specificStats.inserted = true;
+ *out = newObj;
+ return Status::OK();
+}
- const UpdateRequest* request = _params.request;
- bool isInternalRequest = !_txn->writesAreReplicated() || request->isFromMigration();
+void UpdateStage::doInsert() {
+ _specificStats.inserted = true;
- // Reset the document we will be writing to.
- _doc.reset();
+ const UpdateRequest* request = _params.request;
+ bool isInternalRequest = !_txn->writesAreReplicated() || request->isFromMigration();
- BSONObj newObj;
- uassertStatusOK(applyUpdateOpsForInsert(_params.canonicalQuery,
- request->getQuery(),
- _params.driver,
- request->getLifecycle(),
- &_doc,
- isInternalRequest,
- &_specificStats,
- &newObj));
+ // Reset the document we will be writing to.
+ _doc.reset();
- _specificStats.objInserted = newObj;
+ BSONObj newObj;
+ uassertStatusOK(applyUpdateOpsForInsert(_params.canonicalQuery,
+ request->getQuery(),
+ _params.driver,
+ request->getLifecycle(),
+ &_doc,
+ isInternalRequest,
+ &_specificStats,
+ &newObj));
- // If this is an explain, bail out now without doing the insert.
- if (request->isExplain()) {
- return;
- }
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(_txn);
- invariant(_collection);
- const bool enforceQuota = !request->isGod();
- uassertStatusOK(_collection->insertDocument(_txn,
- newObj,
- enforceQuota,
- request->isFromMigration()));
-
- // Technically, we should save/restore state here, but since we are going to return
- // immediately after, it would just be wasted work.
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "upsert", _collection->ns().ns());
- }
+ _specificStats.objInserted = newObj;
- bool UpdateStage::doneUpdating() {
- // We're done updating if either the child has no more results to give us, or we've
- // already gotten a result back and we're not a multi-update.
- return _idRetrying == WorkingSet::INVALID_ID && _idReturning == WorkingSet::INVALID_ID
- && (_child->isEOF() || (_specificStats.nMatched > 0 && !_params.request->isMulti()));
+ // If this is an explain, bail out now without doing the insert.
+ if (request->isExplain()) {
+ return;
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(_txn);
+ invariant(_collection);
+ const bool enforceQuota = !request->isGod();
+ uassertStatusOK(
+ _collection->insertDocument(_txn, newObj, enforceQuota, request->isFromMigration()));
- bool UpdateStage::needInsert() {
- // We need to insert if
- // 1) we haven't inserted already,
- // 2) the child stage returned zero matches, and
- // 3) the user asked for an upsert.
- return !_specificStats.inserted
- && _specificStats.nMatched == 0
- && _params.request->isUpsert();
+ // Technically, we should save/restore state here, but since we are going to return
+ // immediately after, it would just be wasted work.
+ wunit.commit();
}
-
- bool UpdateStage::isEOF() {
- return doneUpdating() && !needInsert();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(_txn, "upsert", _collection->ns().ns());
+}
+
+bool UpdateStage::doneUpdating() {
+ // We're done updating if either the child has no more results to give us, or we've
+ // already gotten a result back and we're not a multi-update.
+ return _idRetrying == WorkingSet::INVALID_ID && _idReturning == WorkingSet::INVALID_ID &&
+ (_child->isEOF() || (_specificStats.nMatched > 0 && !_params.request->isMulti()));
+}
+
+bool UpdateStage::needInsert() {
+ // We need to insert if
+ // 1) we haven't inserted already,
+ // 2) the child stage returned zero matches, and
+ // 3) the user asked for an upsert.
+ return !_specificStats.inserted && _specificStats.nMatched == 0 && _params.request->isUpsert();
+}
+
+bool UpdateStage::isEOF() {
+ return doneUpdating() && !needInsert();
+}
+
+PlanStage::StageState UpdateStage::work(WorkingSetID* out) {
+ ++_commonStats.works;
+
+ // Adds the amount of time taken by work() to executionTimeMillis.
+ ScopedTimer timer(&_commonStats.executionTimeMillis);
+
+ if (isEOF()) {
+ return PlanStage::IS_EOF;
}
- PlanStage::StageState UpdateStage::work(WorkingSetID* out) {
- ++_commonStats.works;
+ if (doneUpdating()) {
+ // Even if we're done updating, we may have some inserting left to do.
+ if (needInsert()) {
+ // TODO we may want to handle WriteConflictException here. Currently we bounce it
+ // out to a higher level since if this WCEs it is likely that we raced with another
+ // upsert that may have matched our query, and therefore this may need to perform an
+ // update rather than an insert. Bouncing to the higher level allows restarting the
+ // query in this case.
+ doInsert();
- // Adds the amount of time taken by work() to executionTimeMillis.
- ScopedTimer timer(&_commonStats.executionTimeMillis);
+ invariant(isEOF());
+ if (_params.request->shouldReturnNewDocs()) {
+ // Want to return the document we just inserted, create it as a WorkingSetMember
+ // so that we can return it.
+ BSONObj newObj = _specificStats.objInserted;
+ *out = _ws->allocate();
+ WorkingSetMember* member = _ws->get(*out);
+ member->obj =
+ Snapshotted<BSONObj>(_txn->recoveryUnit()->getSnapshotId(), newObj.getOwned());
+ member->state = WorkingSetMember::OWNED_OBJ;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
+ }
+ }
- if (isEOF()) { return PlanStage::IS_EOF; }
+ // At this point either we're done updating and there was no insert to do,
+ // or we're done updating and we're done inserting. Either way, we're EOF.
+ invariant(isEOF());
+ return PlanStage::IS_EOF;
+ }
- if (doneUpdating()) {
- // Even if we're done updating, we may have some inserting left to do.
- if (needInsert()) {
- // TODO we may want to handle WriteConflictException here. Currently we bounce it
- // out to a higher level since if this WCEs it is likely that we raced with another
- // upsert that may have matched our query, and therefore this may need to perform an
- // update rather than an insert. Bouncing to the higher level allows restarting the
- // query in this case.
- doInsert();
+ // If we're here, then we still have to ask for results from the child and apply
+ // updates to them. We should only get here if the collection exists.
+ invariant(_collection);
- invariant(isEOF());
- if (_params.request->shouldReturnNewDocs()) {
- // Want to return the document we just inserted, create it as a WorkingSetMember
- // so that we can return it.
- BSONObj newObj = _specificStats.objInserted;
- *out = _ws->allocate();
- WorkingSetMember* member = _ws->get(*out);
- member->obj = Snapshotted<BSONObj>(_txn->recoveryUnit()->getSnapshotId(),
- newObj.getOwned());
- member->state = WorkingSetMember::OWNED_OBJ;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
- }
+ // It is possible that after an update was applied, a WriteConflictException
+ // occurred and prevented us from returning ADVANCED with the requested version
+ // of the document.
+ if (_idReturning != WorkingSet::INVALID_ID) {
+ // We should only get here if we were trying to return something before.
+ invariant(_params.request->shouldReturnAnyDocs());
- // At this point either we're done updating and there was no insert to do,
- // or we're done updating and we're done inserting. Either way, we're EOF.
- invariant(isEOF());
- return PlanStage::IS_EOF;
- }
+ WorkingSetMember* member = _ws->get(_idReturning);
+ invariant(member->state == WorkingSetMember::OWNED_OBJ);
- // If we're here, then we still have to ask for results from the child and apply
- // updates to them. We should only get here if the collection exists.
- invariant(_collection);
+ *out = _idReturning;
+ _idReturning = WorkingSet::INVALID_ID;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
+ }
- // It is possible that after an update was applied, a WriteConflictException
- // occurred and prevented us from returning ADVANCED with the requested version
- // of the document.
- if (_idReturning != WorkingSet::INVALID_ID) {
- // We should only get here if we were trying to return something before.
- invariant(_params.request->shouldReturnAnyDocs());
+ // Either retry the last WSM we worked on or get a new one from our child.
+ WorkingSetID id;
+ StageState status;
+ if (_idRetrying == WorkingSet::INVALID_ID) {
+ status = _child->work(&id);
+ } else {
+ status = ADVANCED;
+ id = _idRetrying;
+ _idRetrying = WorkingSet::INVALID_ID;
+ }
- WorkingSetMember* member = _ws->get(_idReturning);
- invariant(member->state == WorkingSetMember::OWNED_OBJ);
+ if (PlanStage::ADVANCED == status) {
+ // Need to get these things from the result returned by the child.
+ RecordId loc;
- *out = _idReturning;
- _idReturning = WorkingSet::INVALID_ID;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
+ WorkingSetMember* member = _ws->get(id);
- // Either retry the last WSM we worked on or get a new one from our child.
- WorkingSetID id;
- StageState status;
- if (_idRetrying == WorkingSet::INVALID_ID) {
- status = _child->work(&id);
- }
- else {
- status = ADVANCED;
- id = _idRetrying;
- _idRetrying = WorkingSet::INVALID_ID;
+ // We want to free this member when we return, unless we need to retry it.
+ ScopeGuard memberFreer = MakeGuard(&WorkingSet::free, _ws, id);
+
+ if (!member->hasLoc()) {
+ // We expect to be here because of an invalidation causing a force-fetch, and
+ // doc-locking storage engines do not issue invalidations.
+ ++_specificStats.nInvalidateSkips;
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
}
+ loc = member->loc;
- if (PlanStage::ADVANCED == status) {
- // Need to get these things from the result returned by the child.
- RecordId loc;
+ // Updates can't have projections. This means that covering analysis will always add
+ // a fetch. We should always get fetched data, and never just key data.
+ invariant(member->hasObj());
- WorkingSetMember* member = _ws->get(id);
+ // We fill this with the new locs of moved doc so we don't double-update.
+ if (_updatedLocs && _updatedLocs->count(loc) > 0) {
+ // Found a loc that refers to a document we had already updated. Note that
+ // we can never remove from _updatedLocs because updates by other clients
+ // could cause us to encounter a document again later.
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ }
- // We want to free this member when we return, unless we need to retry it.
- ScopeGuard memberFreer = MakeGuard(&WorkingSet::free, _ws, id);
+ try {
+ std::unique_ptr<RecordCursor> cursor;
+ if (_txn->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
+ cursor = _collection->getCursor(_txn);
+ // our snapshot has changed, refetch
+ if (!WorkingSetCommon::fetch(_txn, member, cursor)) {
+ // document was deleted, we're done here
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ }
- if (!member->hasLoc()) {
- // We expect to be here because of an invalidation causing a force-fetch, and
- // doc-locking storage engines do not issue invalidations.
- ++_specificStats.nInvalidateSkips;
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- loc = member->loc;
-
- // Updates can't have projections. This means that covering analysis will always add
- // a fetch. We should always get fetched data, and never just key data.
- invariant(member->hasObj());
-
- // We fill this with the new locs of moved doc so we don't double-update.
- if (_updatedLocs && _updatedLocs->count(loc) > 0) {
- // Found a loc that refers to a document we had already updated. Note that
- // we can never remove from _updatedLocs because updates by other clients
- // could cause us to encounter a document again later.
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
+ // we have to re-match the doc as it might not match anymore
+ CanonicalQuery* cq = _params.canonicalQuery;
+ if (cq && !cq->root()->matchesBSON(member->obj.value(), NULL)) {
+ // doesn't match predicates anymore!
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ }
}
+ // Save state before making changes
try {
- std::unique_ptr<RecordCursor> cursor;
- if (_txn->recoveryUnit()->getSnapshotId() != member->obj.snapshotId()) {
- cursor = _collection->getCursor(_txn);
- // our snapshot has changed, refetch
- if (!WorkingSetCommon::fetch(_txn, member, cursor)) {
- // document was deleted, we're done here
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
-
- // we have to re-match the doc as it might not match anymore
- CanonicalQuery* cq = _params.canonicalQuery;
- if (cq && !cq->root()->matchesBSON(member->obj.value(), NULL)) {
- // doesn't match predicates anymore!
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
+ _child->saveState();
+ if (supportsDocLocking()) {
+ // Doc-locking engines require this after saveState() since they don't use
+ // invalidations.
+ WorkingSetCommon::prepareForSnapshotChange(_ws);
}
+ } catch (const WriteConflictException& wce) {
+ std::terminate();
+ }
- // Save state before making changes
- try {
- _child->saveState();
- if (supportsDocLocking()) {
- // Doc-locking engines require this after saveState() since they don't use
- // invalidations.
- WorkingSetCommon::prepareForSnapshotChange(_ws);
- }
- }
- catch ( const WriteConflictException& wce ) {
- std::terminate();
- }
+ // If we care about the pre-updated version of the doc, save it out here.
+ BSONObj oldObj;
+ if (_params.request->shouldReturnOldDocs()) {
+ oldObj = member->obj.value().getOwned();
+ }
- // If we care about the pre-updated version of the doc, save it out here.
- BSONObj oldObj;
- if (_params.request->shouldReturnOldDocs()) {
- oldObj = member->obj.value().getOwned();
- }
+ // Do the update, get us the new version of the doc.
+ BSONObj newObj = transformAndUpdate(member->obj, loc);
- // Do the update, get us the new version of the doc.
- BSONObj newObj = transformAndUpdate(member->obj, loc);
-
- // Set member's obj to be the doc we want to return.
- if (_params.request->shouldReturnAnyDocs()) {
- if (_params.request->shouldReturnNewDocs()) {
- member->obj = Snapshotted<BSONObj>(_txn->recoveryUnit()->getSnapshotId(),
- newObj.getOwned());
- }
- else {
- invariant(_params.request->shouldReturnOldDocs());
- member->obj.setValue(oldObj);
- }
- member->loc = RecordId();
- member->state = WorkingSetMember::OWNED_OBJ;
+ // Set member's obj to be the doc we want to return.
+ if (_params.request->shouldReturnAnyDocs()) {
+ if (_params.request->shouldReturnNewDocs()) {
+ member->obj = Snapshotted<BSONObj>(_txn->recoveryUnit()->getSnapshotId(),
+ newObj.getOwned());
+ } else {
+ invariant(_params.request->shouldReturnOldDocs());
+ member->obj.setValue(oldObj);
}
+ member->loc = RecordId();
+ member->state = WorkingSetMember::OWNED_OBJ;
}
- catch ( const WriteConflictException& wce ) {
- _idRetrying = id;
- memberFreer.Dismiss(); // Keep this member around so we can retry updating it.
- *out = WorkingSet::INVALID_ID;
- _commonStats.needYield++;
- return NEED_YIELD;
- }
-
- // This should be after transformAndUpdate to make sure we actually updated this doc.
- ++_specificStats.nMatched;
+ } catch (const WriteConflictException& wce) {
+ _idRetrying = id;
+ memberFreer.Dismiss(); // Keep this member around so we can retry updating it.
+ *out = WorkingSet::INVALID_ID;
+ _commonStats.needYield++;
+ return NEED_YIELD;
+ }
- // Restore state after modification
+ // This should be after transformAndUpdate to make sure we actually updated this doc.
+ ++_specificStats.nMatched;
- // As restoreState may restore (recreate) cursors, make sure to restore the
- // state outside of the WritUnitOfWork.
- try {
- _child->restoreState(_txn);
- }
- catch ( const WriteConflictException& wce ) {
- // Note we don't need to retry updating anything in this case since the update
- // already was committed. However, we still need to return the updated document
- // (if it was requested).
- if (_params.request->shouldReturnAnyDocs()) {
- // member->obj should refer to the document we want to return.
- invariant(member->state == WorkingSetMember::OWNED_OBJ);
-
- _idReturning = id;
- // Keep this member around so that we can return it on the next work() call.
- memberFreer.Dismiss();
- }
- *out = WorkingSet::INVALID_ID;
- _commonStats.needYield++;
- return NEED_YIELD;
- }
+ // Restore state after modification
+ // As restoreState may restore (recreate) cursors, make sure to restore the
+ // state outside of the WritUnitOfWork.
+ try {
+ _child->restoreState(_txn);
+ } catch (const WriteConflictException& wce) {
+ // Note we don't need to retry updating anything in this case since the update
+ // already was committed. However, we still need to return the updated document
+ // (if it was requested).
if (_params.request->shouldReturnAnyDocs()) {
// member->obj should refer to the document we want to return.
invariant(member->state == WorkingSetMember::OWNED_OBJ);
- memberFreer.Dismiss(); // Keep this member around so we can return it.
- *out = id;
- ++_commonStats.advanced;
- return PlanStage::ADVANCED;
- }
-
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- else if (PlanStage::IS_EOF == status) {
- // The child is out of results, but we might not be done yet because we still might
- // have to do an insert.
- ++_commonStats.needTime;
- return PlanStage::NEED_TIME;
- }
- else if (PlanStage::FAILURE == status) {
- *out = id;
- // If a stage fails, it may create a status WSM to indicate why it failed, in which case
- // 'id' is valid. If ID is invalid, we create our own error message.
- if (WorkingSet::INVALID_ID == id) {
- const std::string errmsg = "update stage failed to read in results from child";
- *out = WorkingSetCommon::allocateStatusMember(_ws, Status(ErrorCodes::InternalError,
- errmsg));
- return PlanStage::FAILURE;
+ _idReturning = id;
+ // Keep this member around so that we can return it on the next work() call.
+ memberFreer.Dismiss();
}
- return status;
- }
- else if (PlanStage::NEED_TIME == status) {
- ++_commonStats.needTime;
+ *out = WorkingSet::INVALID_ID;
+ _commonStats.needYield++;
+ return NEED_YIELD;
}
- else if (PlanStage::NEED_YIELD == status) {
- ++_commonStats.needYield;
+
+ if (_params.request->shouldReturnAnyDocs()) {
+ // member->obj should refer to the document we want to return.
+ invariant(member->state == WorkingSetMember::OWNED_OBJ);
+
+ memberFreer.Dismiss(); // Keep this member around so we can return it.
*out = id;
+ ++_commonStats.advanced;
+ return PlanStage::ADVANCED;
}
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::IS_EOF == status) {
+ // The child is out of results, but we might not be done yet because we still might
+ // have to do an insert.
+ ++_commonStats.needTime;
+ return PlanStage::NEED_TIME;
+ } else if (PlanStage::FAILURE == status) {
+ *out = id;
+ // If a stage fails, it may create a status WSM to indicate why it failed, in which case
+ // 'id' is valid. If ID is invalid, we create our own error message.
+ if (WorkingSet::INVALID_ID == id) {
+ const std::string errmsg = "update stage failed to read in results from child";
+ *out = WorkingSetCommon::allocateStatusMember(
+ _ws, Status(ErrorCodes::InternalError, errmsg));
+ return PlanStage::FAILURE;
+ }
return status;
+ } else if (PlanStage::NEED_TIME == status) {
+ ++_commonStats.needTime;
+ } else if (PlanStage::NEED_YIELD == status) {
+ ++_commonStats.needYield;
+ *out = id;
}
- void UpdateStage::saveState() {
- _txn = NULL;
- ++_commonStats.yields;
- _child->saveState();
- }
-
- Status UpdateStage::restoreUpdateState(OperationContext* opCtx) {
- const UpdateRequest& request = *_params.request;
- const NamespaceString& nsString(request.getNamespaceString());
-
- // We may have stepped down during the yield.
- bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString);
+ return status;
+}
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Demoted from primary while performing update on "
- << nsString.ns());
- }
+void UpdateStage::saveState() {
+ _txn = NULL;
+ ++_commonStats.yields;
+ _child->saveState();
+}
- if (request.getLifecycle()) {
- UpdateLifecycle* lifecycle = request.getLifecycle();
- lifecycle->setCollection(_collection);
+Status UpdateStage::restoreUpdateState(OperationContext* opCtx) {
+ const UpdateRequest& request = *_params.request;
+ const NamespaceString& nsString(request.getNamespaceString());
- if (!lifecycle->canContinue()) {
- return Status(ErrorCodes::IllegalOperation,
- "Update aborted due to invalid state transitions after yield.",
- 17270);
- }
+ // We may have stepped down during the yield.
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString);
- _params.driver->refreshIndexKeys(lifecycle->getIndexKeys(opCtx));
- }
-
- return Status::OK();
- }
-
- void UpdateStage::restoreState(OperationContext* opCtx) {
- invariant(_txn == NULL);
- _txn = opCtx;
- ++_commonStats.unyields;
- // Restore our child.
- _child->restoreState(opCtx);
- // Restore self.
- uassertStatusOK(restoreUpdateState(opCtx));
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Demoted from primary while performing update on "
+ << nsString.ns());
}
- void UpdateStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- ++_commonStats.invalidates;
- _child->invalidate(txn, dl, type);
- }
+ if (request.getLifecycle()) {
+ UpdateLifecycle* lifecycle = request.getLifecycle();
+ lifecycle->setCollection(_collection);
- vector<PlanStage*> UpdateStage::getChildren() const {
- vector<PlanStage*> children;
- children.push_back(_child.get());
- return children;
- }
-
- PlanStageStats* UpdateStage::getStats() {
- _commonStats.isEOF = isEOF();
- unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_UPDATE));
- ret->specific.reset(new UpdateStats(_specificStats));
- ret->children.push_back(_child->getStats());
- return ret.release();
- }
+ if (!lifecycle->canContinue()) {
+ return Status(ErrorCodes::IllegalOperation,
+ "Update aborted due to invalid state transitions after yield.",
+ 17270);
+ }
- const CommonStats* UpdateStage::getCommonStats() const {
- return &_commonStats;
+ _params.driver->refreshIndexKeys(lifecycle->getIndexKeys(opCtx));
}
- const SpecificStats* UpdateStage::getSpecificStats() const {
- return &_specificStats;
+ return Status::OK();
+}
+
+void UpdateStage::restoreState(OperationContext* opCtx) {
+ invariant(_txn == NULL);
+ _txn = opCtx;
+ ++_commonStats.unyields;
+ // Restore our child.
+ _child->restoreState(opCtx);
+ // Restore self.
+ uassertStatusOK(restoreUpdateState(opCtx));
+}
+
+void UpdateStage::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ ++_commonStats.invalidates;
+ _child->invalidate(txn, dl, type);
+}
+
+vector<PlanStage*> UpdateStage::getChildren() const {
+ vector<PlanStage*> children;
+ children.push_back(_child.get());
+ return children;
+}
+
+PlanStageStats* UpdateStage::getStats() {
+ _commonStats.isEOF = isEOF();
+ unique_ptr<PlanStageStats> ret(new PlanStageStats(_commonStats, STAGE_UPDATE));
+ ret->specific.reset(new UpdateStats(_specificStats));
+ ret->children.push_back(_child->getStats());
+ return ret.release();
+}
+
+const CommonStats* UpdateStage::getCommonStats() const {
+ return &_commonStats;
+}
+
+const SpecificStats* UpdateStage::getSpecificStats() const {
+ return &_specificStats;
+}
+
+// static
+UpdateResult UpdateStage::makeUpdateResult(PlanExecutor* exec, OpDebug* opDebug) {
+ // Get stats from the root stage.
+ invariant(exec->getRootStage()->isEOF());
+ invariant(exec->getRootStage()->stageType() == STAGE_UPDATE);
+ UpdateStage* updateStage = static_cast<UpdateStage*>(exec->getRootStage());
+ const UpdateStats* updateStats =
+ static_cast<const UpdateStats*>(updateStage->getSpecificStats());
+
+ // Use stats from the root stage to fill out opDebug.
+ opDebug->nMatched = updateStats->nMatched;
+ opDebug->nModified = updateStats->nModified;
+ opDebug->upsert = updateStats->inserted;
+ opDebug->fastmodinsert = updateStats->fastmodinsert;
+ opDebug->fastmod = updateStats->fastmod;
+
+ // Historically, 'opDebug' considers 'nMatched' and 'nModified' to be 1 (rather than 0)
+ // if there is an upsert that inserts a document. The UpdateStage does not participate
+ // in this madness in order to have saner stats reporting for explain. This means that
+ // we have to set these values "manually" in the case of an insert.
+ if (updateStats->inserted) {
+ opDebug->nMatched = 1;
+ opDebug->nModified = 1;
}
- // static
- UpdateResult UpdateStage::makeUpdateResult(PlanExecutor* exec, OpDebug* opDebug) {
- // Get stats from the root stage.
- invariant(exec->getRootStage()->isEOF());
- invariant(exec->getRootStage()->stageType() == STAGE_UPDATE);
- UpdateStage* updateStage = static_cast<UpdateStage*>(exec->getRootStage());
- const UpdateStats* updateStats =
- static_cast<const UpdateStats*>(updateStage->getSpecificStats());
-
- // Use stats from the root stage to fill out opDebug.
- opDebug->nMatched = updateStats->nMatched;
- opDebug->nModified = updateStats->nModified;
- opDebug->upsert = updateStats->inserted;
- opDebug->fastmodinsert = updateStats->fastmodinsert;
- opDebug->fastmod = updateStats->fastmod;
-
- // Historically, 'opDebug' considers 'nMatched' and 'nModified' to be 1 (rather than 0)
- // if there is an upsert that inserts a document. The UpdateStage does not participate
- // in this madness in order to have saner stats reporting for explain. This means that
- // we have to set these values "manually" in the case of an insert.
- if (updateStats->inserted) {
- opDebug->nMatched = 1;
- opDebug->nModified = 1;
- }
-
- // Get summary information about the plan.
- PlanSummaryStats stats;
- Explain::getSummaryStats(exec, &stats);
- opDebug->nscanned = stats.totalKeysExamined;
- opDebug->nscannedObjects = stats.totalDocsExamined;
+ // Get summary information about the plan.
+ PlanSummaryStats stats;
+ Explain::getSummaryStats(exec, &stats);
+ opDebug->nscanned = stats.totalKeysExamined;
+ opDebug->nscannedObjects = stats.totalDocsExamined;
- return UpdateResult(updateStats->nMatched > 0 /* Did we update at least one obj? */,
- !updateStats->isDocReplacement /* $mod or obj replacement */,
- opDebug->nModified /* number of modified docs, no no-ops */,
- opDebug->nMatched /* # of docs matched/updated, even no-ops */,
- updateStats->objInserted);
- };
+ return UpdateResult(updateStats->nMatched > 0 /* Did we update at least one obj? */,
+ !updateStats->isDocReplacement /* $mod or obj replacement */,
+ opDebug->nModified /* number of modified docs, no no-ops */,
+ opDebug->nMatched /* # of docs matched/updated, even no-ops */,
+ updateStats->objInserted);
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/exec/update.h b/src/mongo/db/exec/update.h
index 8460846aca1..28ff014b232 100644
--- a/src/mongo/db/exec/update.h
+++ b/src/mongo/db/exec/update.h
@@ -38,184 +38,181 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
- struct UpdateStageParams {
+struct UpdateStageParams {
+ UpdateStageParams(const UpdateRequest* r, UpdateDriver* d, OpDebug* o)
+ : request(r), driver(d), opDebug(o), canonicalQuery(NULL) {}
- UpdateStageParams(const UpdateRequest* r,
- UpdateDriver* d,
- OpDebug* o)
- : request(r),
- driver(d),
- opDebug(o),
- canonicalQuery(NULL) { }
+ // Contains update parameters like whether it's a multi update or an upsert. Not owned.
+ // Must outlive the UpdateStage.
+ const UpdateRequest* request;
- // Contains update parameters like whether it's a multi update or an upsert. Not owned.
- // Must outlive the UpdateStage.
- const UpdateRequest* request;
+ // Contains the logic for applying mods to documents. Not owned. Must outlive
+ // the UpdateStage.
+ UpdateDriver* driver;
- // Contains the logic for applying mods to documents. Not owned. Must outlive
- // the UpdateStage.
- UpdateDriver* driver;
+ // Needed to pass to Collection::updateDocument(...).
+ OpDebug* opDebug;
- // Needed to pass to Collection::updateDocument(...).
- OpDebug* opDebug;
+ // Not owned here.
+ CanonicalQuery* canonicalQuery;
- // Not owned here.
- CanonicalQuery* canonicalQuery;
+private:
+ // Default constructor not allowed.
+ UpdateStageParams();
+};
- private:
- // Default constructor not allowed.
- UpdateStageParams();
- };
+/**
+ * Execution stage responsible for updates to documents and upserts. If the prior or
+ * newly-updated version of the document was requested to be returned, then ADVANCED is
+ * returned after updating or inserting a document. Otherwise, NEED_TIME is returned after
+ * updating or inserting a document.
+ *
+ * Callers of work() must be holding a write lock.
+ */
+class UpdateStage : public PlanStage {
+ MONGO_DISALLOW_COPYING(UpdateStage);
+
+public:
+ UpdateStage(OperationContext* txn,
+ const UpdateStageParams& params,
+ WorkingSet* ws,
+ Collection* collection,
+ PlanStage* child);
+
+ virtual bool isEOF();
+ virtual StageState work(WorkingSetID* out);
+
+ virtual void saveState();
+ virtual void restoreState(OperationContext* opCtx);
+ virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+
+ virtual std::vector<PlanStage*> getChildren() const;
+
+ virtual StageType stageType() const {
+ return STAGE_UPDATE;
+ }
+
+ virtual PlanStageStats* getStats();
+
+ virtual const CommonStats* getCommonStats() const;
+
+ virtual const SpecificStats* getSpecificStats() const;
+
+ static const char* kStageType;
/**
- * Execution stage responsible for updates to documents and upserts. If the prior or
- * newly-updated version of the document was requested to be returned, then ADVANCED is
- * returned after updating or inserting a document. Otherwise, NEED_TIME is returned after
- * updating or inserting a document.
+ * Converts the execution stats (stored by the update stage as an UpdateStats) for the
+ * update plan represented by 'exec' into the UpdateResult format used to report the results
+ * of writes.
+ *
+ * Also responsible for filling out 'opDebug' with execution info.
+ *
+ * Should only be called once this stage is EOF.
+ */
+ static UpdateResult makeUpdateResult(PlanExecutor* exec, OpDebug* opDebug);
+
+ /**
+ * Computes the document to insert if the upsert flag is set to true and no matching
+ * documents are found in the database. The document to upsert is computing using the
+ * query 'cq' and the update mods contained in 'driver'.
+ *
+ * If 'cq' is NULL, which can happen for the idhack update fast path, then 'query' is
+ * used to compute the doc to insert instead of 'cq'.
*
- * Callers of work() must be holding a write lock.
+ * 'doc' is the mutable BSON document which you would like the update driver to use
+ * when computing the document to insert.
+ *
+ * Set 'isInternalRequest' to true if the upsert was issued by the replication or
+ * sharding systems.
+ *
+ * Fills out whether or not this is a fastmodinsert in 'stats'.
+ *
+ * Returns the document to insert in *out.
+ */
+ static Status applyUpdateOpsForInsert(const CanonicalQuery* cq,
+ const BSONObj& query,
+ UpdateDriver* driver,
+ UpdateLifecycle* lifecycle,
+ mutablebson::Document* doc,
+ bool isInternalRequest,
+ UpdateStats* stats,
+ BSONObj* out);
+
+private:
+ /**
+ * Computes the result of applying mods to the document 'oldObj' at RecordId 'loc' in
+ * memory, then commits these changes to the database. Returns a possibly unowned copy
+ * of the newly-updated version of the document.
+ */
+ BSONObj transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& loc);
+
+ /**
+ * Computes the document to insert and inserts it into the collection. Used if the
+ * user requested an upsert and no matching documents were found.
+ */
+ void doInsert();
+
+ /**
+ * Have we performed all necessary updates? Even if this is true, we might not be EOF,
+ * as we might still have to do an insert.
+ */
+ bool doneUpdating();
+
+ /**
+ * Examines the stats / update request and returns whether there is still an insert left
+ * to do. If so then this stage is not EOF yet.
+ */
+ bool needInsert();
+
+ /**
+ * Helper for restoring the state of this update.
*/
- class UpdateStage : public PlanStage {
- MONGO_DISALLOW_COPYING(UpdateStage);
- public:
- UpdateStage(OperationContext* txn,
- const UpdateStageParams& params,
- WorkingSet* ws,
- Collection* collection,
- PlanStage* child);
-
- virtual bool isEOF();
- virtual StageState work(WorkingSetID* out);
-
- virtual void saveState();
- virtual void restoreState(OperationContext* opCtx);
- virtual void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- virtual std::vector<PlanStage*> getChildren() const;
-
- virtual StageType stageType() const { return STAGE_UPDATE; }
-
- virtual PlanStageStats* getStats();
-
- virtual const CommonStats* getCommonStats() const;
-
- virtual const SpecificStats* getSpecificStats() const;
-
- static const char* kStageType;
-
- /**
- * Converts the execution stats (stored by the update stage as an UpdateStats) for the
- * update plan represented by 'exec' into the UpdateResult format used to report the results
- * of writes.
- *
- * Also responsible for filling out 'opDebug' with execution info.
- *
- * Should only be called once this stage is EOF.
- */
- static UpdateResult makeUpdateResult(PlanExecutor* exec, OpDebug* opDebug);
-
- /**
- * Computes the document to insert if the upsert flag is set to true and no matching
- * documents are found in the database. The document to upsert is computing using the
- * query 'cq' and the update mods contained in 'driver'.
- *
- * If 'cq' is NULL, which can happen for the idhack update fast path, then 'query' is
- * used to compute the doc to insert instead of 'cq'.
- *
- * 'doc' is the mutable BSON document which you would like the update driver to use
- * when computing the document to insert.
- *
- * Set 'isInternalRequest' to true if the upsert was issued by the replication or
- * sharding systems.
- *
- * Fills out whether or not this is a fastmodinsert in 'stats'.
- *
- * Returns the document to insert in *out.
- */
- static Status applyUpdateOpsForInsert(const CanonicalQuery* cq,
- const BSONObj& query,
- UpdateDriver* driver,
- UpdateLifecycle* lifecycle,
- mutablebson::Document* doc,
- bool isInternalRequest,
- UpdateStats* stats,
- BSONObj* out);
-
- private:
- /**
- * Computes the result of applying mods to the document 'oldObj' at RecordId 'loc' in
- * memory, then commits these changes to the database. Returns a possibly unowned copy
- * of the newly-updated version of the document.
- */
- BSONObj transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& loc);
-
- /**
- * Computes the document to insert and inserts it into the collection. Used if the
- * user requested an upsert and no matching documents were found.
- */
- void doInsert();
-
- /**
- * Have we performed all necessary updates? Even if this is true, we might not be EOF,
- * as we might still have to do an insert.
- */
- bool doneUpdating();
-
- /**
- * Examines the stats / update request and returns whether there is still an insert left
- * to do. If so then this stage is not EOF yet.
- */
- bool needInsert();
-
- /**
- * Helper for restoring the state of this update.
- */
- Status restoreUpdateState(OperationContext* opCtx);
-
- // Transactional context. Not owned by us.
- OperationContext* _txn;
-
- UpdateStageParams _params;
-
- // Not owned by us.
- WorkingSet* _ws;
-
- // Not owned by us. May be NULL.
- Collection* _collection;
-
- // Owned by us.
- std::unique_ptr<PlanStage> _child;
-
- // If not WorkingSet::INVALID_ID, we use this rather than asking our child what to do next.
- WorkingSetID _idRetrying;
-
- // If not WorkingSet::INVALID_ID, we return this member to our caller.
- WorkingSetID _idReturning;
-
- // Stats
- CommonStats _commonStats;
- UpdateStats _specificStats;
-
- // If the update was in-place, we may see it again. This only matters if we're doing
- // a multi-update; if we're not doing a multi-update we stop after one update and we
- // won't see any more docs.
- //
- // For example: If we're scanning an index {x:1} and performing {$inc:{x:5}}, we'll keep
- // moving the document forward and it will continue to reappear in our index scan.
- // Unless the index is multikey, the underlying query machinery won't de-dup.
- //
- // If the update wasn't in-place we may see it again. Our query may return the new
- // document and we wouldn't want to update that.
- //
- // So, no matter what, we keep track of where the doc wound up.
- typedef unordered_set<RecordId, RecordId::Hasher> DiskLocSet;
- const std::unique_ptr<DiskLocSet> _updatedLocs;
-
- // These get reused for each update.
- mutablebson::Document& _doc;
- mutablebson::DamageVector _damages;
- };
+ Status restoreUpdateState(OperationContext* opCtx);
+
+ // Transactional context. Not owned by us.
+ OperationContext* _txn;
+
+ UpdateStageParams _params;
+
+ // Not owned by us.
+ WorkingSet* _ws;
+
+ // Not owned by us. May be NULL.
+ Collection* _collection;
+
+ // Owned by us.
+ std::unique_ptr<PlanStage> _child;
+
+ // If not WorkingSet::INVALID_ID, we use this rather than asking our child what to do next.
+ WorkingSetID _idRetrying;
+
+ // If not WorkingSet::INVALID_ID, we return this member to our caller.
+ WorkingSetID _idReturning;
+
+ // Stats
+ CommonStats _commonStats;
+ UpdateStats _specificStats;
+
+ // If the update was in-place, we may see it again. This only matters if we're doing
+ // a multi-update; if we're not doing a multi-update we stop after one update and we
+ // won't see any more docs.
+ //
+ // For example: If we're scanning an index {x:1} and performing {$inc:{x:5}}, we'll keep
+ // moving the document forward and it will continue to reappear in our index scan.
+ // Unless the index is multikey, the underlying query machinery won't de-dup.
+ //
+ // If the update wasn't in-place we may see it again. Our query may return the new
+ // document and we wouldn't want to update that.
+ //
+ // So, no matter what, we keep track of where the doc wound up.
+ typedef unordered_set<RecordId, RecordId::Hasher> DiskLocSet;
+ const std::unique_ptr<DiskLocSet> _updatedLocs;
+
+ // These get reused for each update.
+ mutablebson::Document& _doc;
+ mutablebson::DamageVector _damages;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/working_set.cpp b/src/mongo/db/exec/working_set.cpp
index 746ace2f3ca..5e531af346a 100644
--- a/src/mongo/db/exec/working_set.cpp
+++ b/src/mongo/db/exec/working_set.cpp
@@ -33,248 +33,247 @@
namespace mongo {
- using std::string;
+using std::string;
- WorkingSet::MemberHolder::MemberHolder() : member(NULL) { }
- WorkingSet::MemberHolder::~MemberHolder() {}
+WorkingSet::MemberHolder::MemberHolder() : member(NULL) {}
+WorkingSet::MemberHolder::~MemberHolder() {}
- WorkingSet::WorkingSet() : _freeList(INVALID_ID) { }
+WorkingSet::WorkingSet() : _freeList(INVALID_ID) {}
- WorkingSet::~WorkingSet() {
- for (size_t i = 0; i < _data.size(); i++) {
- delete _data[i].member;
- }
+WorkingSet::~WorkingSet() {
+ for (size_t i = 0; i < _data.size(); i++) {
+ delete _data[i].member;
}
-
- WorkingSetID WorkingSet::allocate() {
- if (_freeList == INVALID_ID) {
- // The free list is empty so we need to make a single new WSM to return. This relies on
- // vector::resize being amortized O(1) for efficient allocation. Note that the free list
- // remains empty until something is returned by a call to free().
- WorkingSetID id = _data.size();
- _data.resize(_data.size() + 1);
- _data.back().nextFreeOrSelf = id;
- _data.back().member = new WorkingSetMember();
- return id;
- }
-
- // Pop the head off the free list and return it.
- WorkingSetID id = _freeList;
- _freeList = _data[id].nextFreeOrSelf;
- _data[id].nextFreeOrSelf = id; // set to self to mark as in-use
+}
+
+WorkingSetID WorkingSet::allocate() {
+ if (_freeList == INVALID_ID) {
+ // The free list is empty so we need to make a single new WSM to return. This relies on
+ // vector::resize being amortized O(1) for efficient allocation. Note that the free list
+ // remains empty until something is returned by a call to free().
+ WorkingSetID id = _data.size();
+ _data.resize(_data.size() + 1);
+ _data.back().nextFreeOrSelf = id;
+ _data.back().member = new WorkingSetMember();
return id;
}
- void WorkingSet::free(const WorkingSetID& i) {
- MemberHolder& holder = _data[i];
- verify(i < _data.size()); // ID has been allocated.
- verify(holder.nextFreeOrSelf == i); // ID currently in use.
-
- // Free resources and push this WSM to the head of the freelist.
- holder.member->clear();
- holder.nextFreeOrSelf = _freeList;
- _freeList = i;
- }
-
- void WorkingSet::flagForReview(const WorkingSetID& i) {
- WorkingSetMember* member = get(i);
- verify(WorkingSetMember::OWNED_OBJ == member->state);
- _flagged.insert(i);
+ // Pop the head off the free list and return it.
+ WorkingSetID id = _freeList;
+ _freeList = _data[id].nextFreeOrSelf;
+ _data[id].nextFreeOrSelf = id; // set to self to mark as in-use
+ return id;
+}
+
+void WorkingSet::free(const WorkingSetID& i) {
+ MemberHolder& holder = _data[i];
+ verify(i < _data.size()); // ID has been allocated.
+ verify(holder.nextFreeOrSelf == i); // ID currently in use.
+
+ // Free resources and push this WSM to the head of the freelist.
+ holder.member->clear();
+ holder.nextFreeOrSelf = _freeList;
+ _freeList = i;
+}
+
+void WorkingSet::flagForReview(const WorkingSetID& i) {
+ WorkingSetMember* member = get(i);
+ verify(WorkingSetMember::OWNED_OBJ == member->state);
+ _flagged.insert(i);
+}
+
+const unordered_set<WorkingSetID>& WorkingSet::getFlagged() const {
+ return _flagged;
+}
+
+bool WorkingSet::isFlagged(WorkingSetID id) const {
+ invariant(id < _data.size());
+ return _flagged.end() != _flagged.find(id);
+}
+
+void WorkingSet::clear() {
+ for (size_t i = 0; i < _data.size(); i++) {
+ delete _data[i].member;
}
+ _data.clear();
- const unordered_set<WorkingSetID>& WorkingSet::getFlagged() const {
- return _flagged;
- }
-
- bool WorkingSet::isFlagged(WorkingSetID id) const {
- invariant(id < _data.size());
- return _flagged.end() != _flagged.find(id);
- }
+ // Since working set is now empty, the free list pointer should
+ // point to nothing.
+ _freeList = INVALID_ID;
- void WorkingSet::clear() {
- for (size_t i = 0; i < _data.size(); i++) {
- delete _data[i].member;
- }
- _data.clear();
+ _flagged.clear();
+}
- // Since working set is now empty, the free list pointer should
- // point to nothing.
- _freeList = INVALID_ID;
+//
+// Iteration
+//
- _flagged.clear();
+WorkingSet::iterator::iterator(WorkingSet* ws, size_t index) : _ws(ws), _index(index) {
+ // If we're currently not pointing at an allocated member, then we have
+ // to advance to the first one, unless we're already at the end.
+ if (_index < _ws->_data.size() && isFree()) {
+ advance();
}
+}
- //
- // Iteration
- //
-
- WorkingSet::iterator::iterator(WorkingSet* ws, size_t index)
- : _ws(ws),
- _index(index) {
- // If we're currently not pointing at an allocated member, then we have
- // to advance to the first one, unless we're already at the end.
- if (_index < _ws->_data.size() && isFree()) {
- advance();
- }
- }
+void WorkingSet::iterator::advance() {
+ // Move forward at least once in the data list.
+ _index++;
- void WorkingSet::iterator::advance() {
- // Move forward at least once in the data list.
+ // While we haven't hit the end and the current member is not in use. (Skips ahead until
+ // we find the next allocated member.)
+ while (_index < _ws->_data.size() && isFree()) {
_index++;
-
- // While we haven't hit the end and the current member is not in use. (Skips ahead until
- // we find the next allocated member.)
- while (_index < _ws->_data.size() && isFree()) {
- _index++;
- }
}
+}
- bool WorkingSet::iterator::isFree() const {
- return _ws->_data[_index].nextFreeOrSelf != _index;
- }
+bool WorkingSet::iterator::isFree() const {
+ return _ws->_data[_index].nextFreeOrSelf != _index;
+}
- void WorkingSet::iterator::free() {
- dassert(!isFree());
- _ws->free(_index);
- }
+void WorkingSet::iterator::free() {
+ dassert(!isFree());
+ _ws->free(_index);
+}
- void WorkingSet::iterator::operator++() {
- dassert(_index < _ws->_data.size());
- advance();
- }
+void WorkingSet::iterator::operator++() {
+ dassert(_index < _ws->_data.size());
+ advance();
+}
- bool WorkingSet::iterator::operator==(const WorkingSet::iterator& other) const {
- return (_index == other._index);
- }
+bool WorkingSet::iterator::operator==(const WorkingSet::iterator& other) const {
+ return (_index == other._index);
+}
- bool WorkingSet::iterator::operator!=(const WorkingSet::iterator& other) const {
- return (_index != other._index);
- }
+bool WorkingSet::iterator::operator!=(const WorkingSet::iterator& other) const {
+ return (_index != other._index);
+}
- WorkingSetMember& WorkingSet::iterator::operator*() {
- dassert(_index < _ws->_data.size() && !isFree());
- return *_ws->_data[_index].member;
- }
+WorkingSetMember& WorkingSet::iterator::operator*() {
+ dassert(_index < _ws->_data.size() && !isFree());
+ return *_ws->_data[_index].member;
+}
- WorkingSetMember* WorkingSet::iterator::operator->() {
- dassert(_index < _ws->_data.size() && !isFree());
- return _ws->_data[_index].member;
- }
+WorkingSetMember* WorkingSet::iterator::operator->() {
+ dassert(_index < _ws->_data.size() && !isFree());
+ return _ws->_data[_index].member;
+}
- WorkingSet::iterator WorkingSet::begin() {
- return WorkingSet::iterator(this, 0);
- }
+WorkingSet::iterator WorkingSet::begin() {
+ return WorkingSet::iterator(this, 0);
+}
- WorkingSet::iterator WorkingSet::end() {
- return WorkingSet::iterator(this, _data.size());
- }
+WorkingSet::iterator WorkingSet::end() {
+ return WorkingSet::iterator(this, _data.size());
+}
- //
- // WorkingSetMember
- //
+//
+// WorkingSetMember
+//
- WorkingSetMember::WorkingSetMember() : state(WorkingSetMember::INVALID), isSuspicious(false) { }
+WorkingSetMember::WorkingSetMember() : state(WorkingSetMember::INVALID), isSuspicious(false) {}
- WorkingSetMember::~WorkingSetMember() { }
+WorkingSetMember::~WorkingSetMember() {}
- void WorkingSetMember::clear() {
- for (size_t i = 0; i < WSM_COMPUTED_NUM_TYPES; i++) {
- _computed[i].reset();
- }
-
- keyData.clear();
- obj.reset();
- state = WorkingSetMember::INVALID;
- }
-
- bool WorkingSetMember::hasLoc() const {
- return state == LOC_AND_IDX || state == LOC_AND_UNOWNED_OBJ || state == LOC_AND_OWNED_OBJ;
+void WorkingSetMember::clear() {
+ for (size_t i = 0; i < WSM_COMPUTED_NUM_TYPES; i++) {
+ _computed[i].reset();
}
- bool WorkingSetMember::hasObj() const {
- return hasOwnedObj() || hasUnownedObj();
+ keyData.clear();
+ obj.reset();
+ state = WorkingSetMember::INVALID;
+}
+
+bool WorkingSetMember::hasLoc() const {
+ return state == LOC_AND_IDX || state == LOC_AND_UNOWNED_OBJ || state == LOC_AND_OWNED_OBJ;
+}
+
+bool WorkingSetMember::hasObj() const {
+ return hasOwnedObj() || hasUnownedObj();
+}
+
+bool WorkingSetMember::hasOwnedObj() const {
+ return state == OWNED_OBJ || state == LOC_AND_OWNED_OBJ;
+}
+
+bool WorkingSetMember::hasUnownedObj() const {
+ return state == LOC_AND_UNOWNED_OBJ;
+}
+
+bool WorkingSetMember::hasComputed(const WorkingSetComputedDataType type) const {
+ return _computed[type].get();
+}
+
+const WorkingSetComputedData* WorkingSetMember::getComputed(
+ const WorkingSetComputedDataType type) const {
+ verify(_computed[type]);
+ return _computed[type].get();
+}
+
+void WorkingSetMember::addComputed(WorkingSetComputedData* data) {
+ verify(!hasComputed(data->type()));
+ _computed[data->type()].reset(data);
+}
+
+void WorkingSetMember::setFetcher(RecordFetcher* fetcher) {
+ _fetcher.reset(fetcher);
+}
+
+RecordFetcher* WorkingSetMember::releaseFetcher() {
+ return _fetcher.release();
+}
+
+bool WorkingSetMember::hasFetcher() const {
+ return NULL != _fetcher.get();
+}
+
+bool WorkingSetMember::getFieldDotted(const string& field, BSONElement* out) const {
+ // If our state is such that we have an object, use it.
+ if (hasObj()) {
+ *out = obj.value().getFieldDotted(field);
+ return true;
}
- bool WorkingSetMember::hasOwnedObj() const {
- return state == OWNED_OBJ || state == LOC_AND_OWNED_OBJ;
- }
+ // Our state should be such that we have index data/are covered.
+ for (size_t i = 0; i < keyData.size(); ++i) {
+ BSONObjIterator keyPatternIt(keyData[i].indexKeyPattern);
+ BSONObjIterator keyDataIt(keyData[i].keyData);
- bool WorkingSetMember::hasUnownedObj() const {
- return state == LOC_AND_UNOWNED_OBJ;
- }
-
- bool WorkingSetMember::hasComputed(const WorkingSetComputedDataType type) const {
- return _computed[type].get();
- }
+ while (keyPatternIt.more()) {
+ BSONElement keyPatternElt = keyPatternIt.next();
+ verify(keyDataIt.more());
+ BSONElement keyDataElt = keyDataIt.next();
- const WorkingSetComputedData* WorkingSetMember::getComputed(const WorkingSetComputedDataType type) const {
- verify(_computed[type]);
- return _computed[type].get();
+ if (field == keyPatternElt.fieldName()) {
+ *out = keyDataElt;
+ return true;
+ }
+ }
}
- void WorkingSetMember::addComputed(WorkingSetComputedData* data) {
- verify(!hasComputed(data->type()));
- _computed[data->type()].reset(data);
- }
+ return false;
+}
- void WorkingSetMember::setFetcher(RecordFetcher* fetcher) {
- _fetcher.reset(fetcher);
- }
+size_t WorkingSetMember::getMemUsage() const {
+ size_t memUsage = 0;
- RecordFetcher* WorkingSetMember::releaseFetcher() {
- return _fetcher.release();
+ if (hasLoc()) {
+ memUsage += sizeof(RecordId);
}
- bool WorkingSetMember::hasFetcher() const {
- return NULL != _fetcher.get();
+ // XXX: Unowned objects count towards current size.
+ // See SERVER-12579
+ if (hasObj()) {
+ memUsage += obj.value().objsize();
}
- bool WorkingSetMember::getFieldDotted(const string& field, BSONElement* out) const {
- // If our state is such that we have an object, use it.
- if (hasObj()) {
- *out = obj.value().getFieldDotted(field);
- return true;
- }
-
- // Our state should be such that we have index data/are covered.
- for (size_t i = 0; i < keyData.size(); ++i) {
- BSONObjIterator keyPatternIt(keyData[i].indexKeyPattern);
- BSONObjIterator keyDataIt(keyData[i].keyData);
-
- while (keyPatternIt.more()) {
- BSONElement keyPatternElt = keyPatternIt.next();
- verify(keyDataIt.more());
- BSONElement keyDataElt = keyDataIt.next();
-
- if (field == keyPatternElt.fieldName()) {
- *out = keyDataElt;
- return true;
- }
- }
- }
-
- return false;
+ for (size_t i = 0; i < keyData.size(); ++i) {
+ const IndexKeyDatum& keyDatum = keyData[i];
+ memUsage += keyDatum.keyData.objsize();
}
- size_t WorkingSetMember::getMemUsage() const {
- size_t memUsage = 0;
-
- if (hasLoc()) {
- memUsage += sizeof(RecordId);
- }
-
- // XXX: Unowned objects count towards current size.
- // See SERVER-12579
- if (hasObj()) {
- memUsage += obj.value().objsize();
- }
-
- for (size_t i = 0; i < keyData.size(); ++i) {
- const IndexKeyDatum& keyDatum = keyData[i];
- memUsage += keyDatum.keyData.objsize();
- }
-
- return memUsage;
- }
+ return memUsage;
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/working_set.h b/src/mongo/db/exec/working_set.h
index ac23c2cf667..74080e6f4cc 100644
--- a/src/mongo/db/exec/working_set.h
+++ b/src/mongo/db/exec/working_set.h
@@ -38,308 +38,311 @@
namespace mongo {
- class IndexAccessMethod;
- class RecordFetcher;
- class WorkingSetMember;
+class IndexAccessMethod;
+class RecordFetcher;
+class WorkingSetMember;
- typedef size_t WorkingSetID;
+typedef size_t WorkingSetID;
+
+/**
+ * All data in use by a query. Data is passed through the stage tree by referencing the ID of
+ * an element of the working set. Stages can add elements to the working set, delete elements
+ * from the working set, or mutate elements in the working set.
+ *
+ * Concurrency Notes:
+ * flagForReview() can only be called with a write lock covering the collection this WorkingSet
+ * is for. All other methods should only be called by the thread owning this WorkingSet while
+ * holding the read lock covering the collection.
+ */
+class WorkingSet {
+ MONGO_DISALLOW_COPYING(WorkingSet);
+
+public:
+ static const WorkingSetID INVALID_ID = WorkingSetID(-1);
+
+ WorkingSet();
+ ~WorkingSet();
+
+ /**
+ * Allocate a new query result and return the ID used to get and free it.
+ */
+ WorkingSetID allocate();
+
+ /**
+ * Get the i-th mutable query result. The pointer will be valid for this id until freed.
+ * Do not delete the returned pointer as the WorkingSet retains ownership. Call free() to
+ * release it.
+ */
+ WorkingSetMember* get(const WorkingSetID& i) const {
+ dassert(i < _data.size()); // ID has been allocated.
+ dassert(_data[i].nextFreeOrSelf == i); // ID currently in use.
+ return _data[i].member;
+ }
+
+ /**
+ * Deallocate the i-th query result and release its resources.
+ */
+ void free(const WorkingSetID& i);
/**
- * All data in use by a query. Data is passed through the stage tree by referencing the ID of
- * an element of the working set. Stages can add elements to the working set, delete elements
- * from the working set, or mutate elements in the working set.
+ * The RecordId in WSM 'i' was invalidated while being processed. Any predicates over the
+ * WSM could not be fully evaluated, so the WSM may or may not satisfy them. As such, if we
+ * wish to output the WSM, we must do some clean-up work later. Adds the WSM with id 'i' to
+ * the list of flagged WSIDs.
*
- * Concurrency Notes:
- * flagForReview() can only be called with a write lock covering the collection this WorkingSet
- * is for. All other methods should only be called by the thread owning this WorkingSet while
- * holding the read lock covering the collection.
+ * The WSM must be in the state OWNED_OBJ.
*/
- class WorkingSet {
- MONGO_DISALLOW_COPYING(WorkingSet);
- public:
- static const WorkingSetID INVALID_ID = WorkingSetID(-1);
+ void flagForReview(const WorkingSetID& i);
- WorkingSet();
- ~WorkingSet();
+ /**
+ * Return true if the provided ID is flagged.
+ */
+ bool isFlagged(WorkingSetID id) const;
- /**
- * Allocate a new query result and return the ID used to get and free it.
- */
- WorkingSetID allocate();
+ /**
+ * Return the set of all WSIDs passed to flagForReview.
+ */
+ const unordered_set<WorkingSetID>& getFlagged() const;
- /**
- * Get the i-th mutable query result. The pointer will be valid for this id until freed.
- * Do not delete the returned pointer as the WorkingSet retains ownership. Call free() to
- * release it.
- */
- WorkingSetMember* get(const WorkingSetID& i) const {
- dassert(i < _data.size()); // ID has been allocated.
- dassert(_data[i].nextFreeOrSelf == i); // ID currently in use.
- return _data[i].member;
- }
+ /**
+ * Removes and deallocates all members of this working set.
+ */
+ void clear();
- /**
- * Deallocate the i-th query result and release its resources.
- */
- void free(const WorkingSetID& i);
+ //
+ // Iteration
+ //
- /**
- * The RecordId in WSM 'i' was invalidated while being processed. Any predicates over the
- * WSM could not be fully evaluated, so the WSM may or may not satisfy them. As such, if we
- * wish to output the WSM, we must do some clean-up work later. Adds the WSM with id 'i' to
- * the list of flagged WSIDs.
- *
- * The WSM must be in the state OWNED_OBJ.
- */
- void flagForReview(const WorkingSetID& i);
+ /**
+ * Forward iterates over the list of working set members, skipping any entries
+ * that are on the free list.
+ */
+ class iterator {
+ public:
+ iterator(WorkingSet* ws, size_t index);
- /**
- * Return true if the provided ID is flagged.
- */
- bool isFlagged(WorkingSetID id) const;
+ void operator++();
+
+ bool operator==(const WorkingSet::iterator& other) const;
+ bool operator!=(const WorkingSet::iterator& other) const;
+
+ WorkingSetMember& operator*();
+
+ WorkingSetMember* operator->();
/**
- * Return the set of all WSIDs passed to flagForReview.
+ * Free the WSM we are currently pointing to. Does not advance the iterator.
+ *
+ * It is invalid to dereference the iterator after calling free until the iterator is
+ * next incremented.
*/
- const unordered_set<WorkingSetID>& getFlagged() const;
+ void free();
+ private:
/**
- * Removes and deallocates all members of this working set.
+ * Move the iterator forward to the next allocated WSM.
*/
- void clear();
-
- //
- // Iteration
- //
+ void advance();
/**
- * Forward iterates over the list of working set members, skipping any entries
- * that are on the free list.
+ * Returns true if the MemberHolder currently pointed at by the iterator is free, and
+ * false if it contains an allocated working set member.
*/
- class iterator {
- public:
- iterator(WorkingSet* ws, size_t index);
-
- void operator++();
-
- bool operator==(const WorkingSet::iterator& other) const;
- bool operator!=(const WorkingSet::iterator& other) const;
+ bool isFree() const;
- WorkingSetMember& operator*();
+ // The working set we're iterating over. Not owned here.
+ WorkingSet* _ws;
- WorkingSetMember* operator->();
-
- /**
- * Free the WSM we are currently pointing to. Does not advance the iterator.
- *
- * It is invalid to dereference the iterator after calling free until the iterator is
- * next incremented.
- */
- void free();
-
- private:
- /**
- * Move the iterator forward to the next allocated WSM.
- */
- void advance();
+ // The index of the member we're currently pointing at.
+ size_t _index;
+ };
- /**
- * Returns true if the MemberHolder currently pointed at by the iterator is free, and
- * false if it contains an allocated working set member.
- */
- bool isFree() const;
+ WorkingSet::iterator begin();
- // The working set we're iterating over. Not owned here.
- WorkingSet* _ws;
+ WorkingSet::iterator end();
- // The index of the member we're currently pointing at.
- size_t _index;
- };
+private:
+ struct MemberHolder {
+ MemberHolder();
+ ~MemberHolder();
- WorkingSet::iterator begin();
+ // Free list link if freed. Points to self if in use.
+ WorkingSetID nextFreeOrSelf;
- WorkingSet::iterator end();
+ // Owning pointer
+ WorkingSetMember* member;
+ };
- private:
- struct MemberHolder {
- MemberHolder();
- ~MemberHolder();
+ // All WorkingSetIDs are indexes into this, except for INVALID_ID.
+ // Elements are added to _freeList rather than removed when freed.
+ std::vector<MemberHolder> _data;
- // Free list link if freed. Points to self if in use.
- WorkingSetID nextFreeOrSelf;
+ // Index into _data, forming a linked-list using MemberHolder::nextFreeOrSelf as the next
+ // link. INVALID_ID is the list terminator since 0 is a valid index.
+ // If _freeList == INVALID_ID, the free list is empty and all elements in _data are in use.
+ WorkingSetID _freeList;
- // Owning pointer
- WorkingSetMember* member;
- };
+ // An insert-only set of WorkingSetIDs that have been flagged for review.
+ unordered_set<WorkingSetID> _flagged;
+};
- // All WorkingSetIDs are indexes into this, except for INVALID_ID.
- // Elements are added to _freeList rather than removed when freed.
- std::vector<MemberHolder> _data;
+/**
+ * The key data extracted from an index. Keeps track of both the key (currently a BSONObj) and
+ * the index that provided the key. The index key pattern is required to correctly interpret
+ * the key.
+ */
+struct IndexKeyDatum {
+ IndexKeyDatum(const BSONObj& keyPattern, const BSONObj& key, const IndexAccessMethod* index)
+ : indexKeyPattern(keyPattern), keyData(key), index(index) {}
- // Index into _data, forming a linked-list using MemberHolder::nextFreeOrSelf as the next
- // link. INVALID_ID is the list terminator since 0 is a valid index.
- // If _freeList == INVALID_ID, the free list is empty and all elements in _data are in use.
- WorkingSetID _freeList;
+ // This is not owned and points into the IndexDescriptor's data.
+ BSONObj indexKeyPattern;
- // An insert-only set of WorkingSetIDs that have been flagged for review.
- unordered_set<WorkingSetID> _flagged;
- };
+ // This is the BSONObj for the key that we put into the index. Owned by us.
+ BSONObj keyData;
- /**
- * The key data extracted from an index. Keeps track of both the key (currently a BSONObj) and
- * the index that provided the key. The index key pattern is required to correctly interpret
- * the key.
- */
- struct IndexKeyDatum {
- IndexKeyDatum(const BSONObj& keyPattern, const BSONObj& key, const IndexAccessMethod* index)
- : indexKeyPattern(keyPattern),
- keyData(key),
- index(index) { }
+ const IndexAccessMethod* index;
+};
- // This is not owned and points into the IndexDescriptor's data.
- BSONObj indexKeyPattern;
+/**
+ * What types of computed data can we have?
+ */
+enum WorkingSetComputedDataType {
+ // What's the score of the document retrieved from a $text query?
+ WSM_COMPUTED_TEXT_SCORE = 0,
- // This is the BSONObj for the key that we put into the index. Owned by us.
- BSONObj keyData;
+ // What's the distance from a geoNear query point to the document?
+ WSM_COMPUTED_GEO_DISTANCE = 1,
- const IndexAccessMethod* index;
- };
+ // The index key used to retrieve the document, for $returnKey query option.
+ WSM_INDEX_KEY = 2,
- /**
- * What types of computed data can we have?
- */
- enum WorkingSetComputedDataType {
- // What's the score of the document retrieved from a $text query?
- WSM_COMPUTED_TEXT_SCORE = 0,
+ // What point (of several possible points) was used to compute the distance to the document
+ // via geoNear?
+ WSM_GEO_NEAR_POINT = 3,
- // What's the distance from a geoNear query point to the document?
- WSM_COMPUTED_GEO_DISTANCE = 1,
+ // Must be last.
+ WSM_COMPUTED_NUM_TYPES,
+};
- // The index key used to retrieve the document, for $returnKey query option.
- WSM_INDEX_KEY = 2,
+/**
+ * Data that is a computed function of a WSM.
+ */
+class WorkingSetComputedData {
+ MONGO_DISALLOW_COPYING(WorkingSetComputedData);
- // What point (of several possible points) was used to compute the distance to the document
- // via geoNear?
- WSM_GEO_NEAR_POINT = 3,
+public:
+ WorkingSetComputedData(const WorkingSetComputedDataType type) : _type(type) {}
+ virtual ~WorkingSetComputedData() {}
- // Must be last.
- WSM_COMPUTED_NUM_TYPES,
- };
+ WorkingSetComputedDataType type() const {
+ return _type;
+ }
- /**
- * Data that is a computed function of a WSM.
- */
- class WorkingSetComputedData {
- MONGO_DISALLOW_COPYING(WorkingSetComputedData);
- public:
- WorkingSetComputedData(const WorkingSetComputedDataType type) : _type(type) { }
- virtual ~WorkingSetComputedData() { }
+ virtual WorkingSetComputedData* clone() const = 0;
- WorkingSetComputedDataType type() const { return _type; }
+private:
+ WorkingSetComputedDataType _type;
+};
- virtual WorkingSetComputedData* clone() const = 0;
+/**
+ * The type of the data passed between query stages. In particular:
+ *
+ * Index scan stages return a WorkingSetMember in the LOC_AND_IDX state.
+ *
+ * Collection scan stages the LOC_AND_UNOWNED_OBJ state.
+ *
+ * A WorkingSetMember may have any of the data above.
+ */
+class WorkingSetMember {
+ MONGO_DISALLOW_COPYING(WorkingSetMember);
- private:
- WorkingSetComputedDataType _type;
- };
+public:
+ WorkingSetMember();
+ ~WorkingSetMember();
/**
- * The type of the data passed between query stages. In particular:
- *
- * Index scan stages return a WorkingSetMember in the LOC_AND_IDX state.
- *
- * Collection scan stages the LOC_AND_UNOWNED_OBJ state.
- *
- * A WorkingSetMember may have any of the data above.
+ * Reset to an "empty" state.
*/
- class WorkingSetMember {
- MONGO_DISALLOW_COPYING(WorkingSetMember);
- public:
- WorkingSetMember();
- ~WorkingSetMember();
+ void clear();
- /**
- * Reset to an "empty" state.
- */
- void clear();
-
- enum MemberState {
- // Initial state.
- INVALID,
-
- // Data is from 1 or more indices.
- LOC_AND_IDX,
+ enum MemberState {
+ // Initial state.
+ INVALID,
- // Data is from a collection scan, or data is from an index scan and was fetched.
- LOC_AND_UNOWNED_OBJ,
+ // Data is from 1 or more indices.
+ LOC_AND_IDX,
- // RecordId has been invalidated, or the obj doesn't correspond to an on-disk document
- // anymore (e.g. is a computed expression).
- OWNED_OBJ,
+ // Data is from a collection scan, or data is from an index scan and was fetched.
+ LOC_AND_UNOWNED_OBJ,
- // Due to a yield, RecordId is no longer protected by the storage engine's transaction
- // and may have been invalidated. The object is either identical to the object keyed
- // by RecordId, or is an old version of the document stored at RecordId.
- //
- // Only used by doc-level locking storage engines (not used by MMAP v1).
- LOC_AND_OWNED_OBJ,
- };
+ // RecordId has been invalidated, or the obj doesn't correspond to an on-disk document
+ // anymore (e.g. is a computed expression).
+ OWNED_OBJ,
+ // Due to a yield, RecordId is no longer protected by the storage engine's transaction
+ // and may have been invalidated. The object is either identical to the object keyed
+ // by RecordId, or is an old version of the document stored at RecordId.
//
- // Core attributes
- //
+ // Only used by doc-level locking storage engines (not used by MMAP v1).
+ LOC_AND_OWNED_OBJ,
+ };
- RecordId loc;
- Snapshotted<BSONObj> obj;
- std::vector<IndexKeyDatum> keyData;
- MemberState state;
+ //
+ // Core attributes
+ //
- // True if this WSM has survived a yield in LOC_AND_IDX state.
- // TODO consider replacing by tracking SnapshotIds for IndexKeyDatums.
- bool isSuspicious;
+ RecordId loc;
+ Snapshotted<BSONObj> obj;
+ std::vector<IndexKeyDatum> keyData;
+ MemberState state;
- bool hasLoc() const;
- bool hasObj() const;
- bool hasOwnedObj() const;
- bool hasUnownedObj() const;
+ // True if this WSM has survived a yield in LOC_AND_IDX state.
+ // TODO consider replacing by tracking SnapshotIds for IndexKeyDatums.
+ bool isSuspicious;
- //
- // Computed data
- //
+ bool hasLoc() const;
+ bool hasObj() const;
+ bool hasOwnedObj() const;
+ bool hasUnownedObj() const;
- bool hasComputed(const WorkingSetComputedDataType type) const;
- const WorkingSetComputedData* getComputed(const WorkingSetComputedDataType type) const;
- void addComputed(WorkingSetComputedData* data);
+ //
+ // Computed data
+ //
- //
- // Fetching
- //
+ bool hasComputed(const WorkingSetComputedDataType type) const;
+ const WorkingSetComputedData* getComputed(const WorkingSetComputedDataType type) const;
+ void addComputed(WorkingSetComputedData* data);
- void setFetcher(RecordFetcher* fetcher);
- // Transfers ownership to the caller.
- RecordFetcher* releaseFetcher();
- bool hasFetcher() const;
+ //
+ // Fetching
+ //
- /**
- * getFieldDotted uses its state (obj or index data) to produce the field with the provided
- * name.
- *
- * Returns true if there is the element is in an index key or in an (owned or unowned)
- * object. *out is set to the element if so.
- *
- * Returns false otherwise. Returning false indicates a query planning error.
- */
- bool getFieldDotted(const std::string& field, BSONElement* out) const;
+ void setFetcher(RecordFetcher* fetcher);
+ // Transfers ownership to the caller.
+ RecordFetcher* releaseFetcher();
+ bool hasFetcher() const;
- /**
- * Returns expected memory usage of working set member.
- */
- size_t getMemUsage() const;
+ /**
+ * getFieldDotted uses its state (obj or index data) to produce the field with the provided
+ * name.
+ *
+ * Returns true if there is the element is in an index key or in an (owned or unowned)
+ * object. *out is set to the element if so.
+ *
+ * Returns false otherwise. Returning false indicates a query planning error.
+ */
+ bool getFieldDotted(const std::string& field, BSONElement* out) const;
- private:
- std::unique_ptr<WorkingSetComputedData> _computed[WSM_COMPUTED_NUM_TYPES];
+ /**
+ * Returns expected memory usage of working set member.
+ */
+ size_t getMemUsage() const;
- std::unique_ptr<RecordFetcher> _fetcher;
- };
+private:
+ std::unique_ptr<WorkingSetComputedData> _computed[WSM_COMPUTED_NUM_TYPES];
+
+ std::unique_ptr<RecordFetcher> _fetcher;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index 6f03d1378a1..7fb10dd1792 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -39,170 +39,171 @@
namespace mongo {
- // static
- bool WorkingSetCommon::fetchAndInvalidateLoc(OperationContext* txn,
- WorkingSetMember* member,
- const Collection* collection) {
- // Already in our desired state.
- if (member->state == WorkingSetMember::OWNED_OBJ) { return true; }
-
- // We can't do anything without a RecordId.
- if (!member->hasLoc()) { return false; }
-
- // Do the fetch, invalidate the DL.
- member->obj = collection->docFor(txn, member->loc);
- member->obj.setValue(member->obj.value().getOwned() );
-
- member->state = WorkingSetMember::OWNED_OBJ;
- member->loc = RecordId();
+// static
+bool WorkingSetCommon::fetchAndInvalidateLoc(OperationContext* txn,
+ WorkingSetMember* member,
+ const Collection* collection) {
+ // Already in our desired state.
+ if (member->state == WorkingSetMember::OWNED_OBJ) {
return true;
}
- void WorkingSetCommon::prepareForSnapshotChange(WorkingSet* workingSet) {
- dassert(supportsDocLocking());
-
- for (WorkingSet::iterator it = workingSet->begin(); it != workingSet->end(); ++it) {
- if (it->state == WorkingSetMember::LOC_AND_IDX) {
- it->isSuspicious = true;
- }
- else if (it->state == WorkingSetMember::LOC_AND_UNOWNED_OBJ) {
- // We already have the data so convert directly to owned state.
- it->obj.setValue(it->obj.value().getOwned());
- it->state = WorkingSetMember::LOC_AND_OWNED_OBJ;
- }
- }
+ // We can't do anything without a RecordId.
+ if (!member->hasLoc()) {
+ return false;
}
- // static
- bool WorkingSetCommon::fetch(OperationContext* txn,
- WorkingSetMember* member,
- unowned_ptr<RecordCursor> cursor) {
- // The RecordFetcher should already have been transferred out of the WSM and used.
- invariant(!member->hasFetcher());
-
- // We should have a RecordId but need to retrieve the obj. Get the obj now and reset all WSM
- // state appropriately.
- invariant(member->hasLoc());
-
- member->obj.reset();
- auto record = cursor->seekExact(member->loc);
- if (!record) {
- return false;
- }
-
- member->obj = {txn->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
-
- if (member->isSuspicious) {
- // Make sure that all of the keyData is still valid for this copy of the document.
- // This ensures both that index-provided filters and sort orders still hold.
- // TODO provide a way for the query planner to opt out of this checking if it is
- // unneeded due to the structure of the plan.
- invariant(!member->keyData.empty());
- for (size_t i = 0; i < member->keyData.size(); i++) {
- BSONObjSet keys;
- member->keyData[i].index->getKeys(member->obj.value(), &keys);
- if (!keys.count(member->keyData[i].keyData)) {
- // document would no longer be at this position in the index.
- return false;
- }
- }
-
- member->isSuspicious = false;
+ // Do the fetch, invalidate the DL.
+ member->obj = collection->docFor(txn, member->loc);
+ member->obj.setValue(member->obj.value().getOwned());
+
+ member->state = WorkingSetMember::OWNED_OBJ;
+ member->loc = RecordId();
+ return true;
+}
+
+void WorkingSetCommon::prepareForSnapshotChange(WorkingSet* workingSet) {
+ dassert(supportsDocLocking());
+
+ for (WorkingSet::iterator it = workingSet->begin(); it != workingSet->end(); ++it) {
+ if (it->state == WorkingSetMember::LOC_AND_IDX) {
+ it->isSuspicious = true;
+ } else if (it->state == WorkingSetMember::LOC_AND_UNOWNED_OBJ) {
+ // We already have the data so convert directly to owned state.
+ it->obj.setValue(it->obj.value().getOwned());
+ it->state = WorkingSetMember::LOC_AND_OWNED_OBJ;
}
-
- member->keyData.clear();
- member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
- return true;
+ }
+}
+
+// static
+bool WorkingSetCommon::fetch(OperationContext* txn,
+ WorkingSetMember* member,
+ unowned_ptr<RecordCursor> cursor) {
+ // The RecordFetcher should already have been transferred out of the WSM and used.
+ invariant(!member->hasFetcher());
+
+ // We should have a RecordId but need to retrieve the obj. Get the obj now and reset all WSM
+ // state appropriately.
+ invariant(member->hasLoc());
+
+ member->obj.reset();
+ auto record = cursor->seekExact(member->loc);
+ if (!record) {
+ return false;
}
- // static
- void WorkingSetCommon::initFrom(WorkingSetMember* dest, const WorkingSetMember& src) {
- dest->loc = src.loc;
- dest->obj = src.obj;
- dest->keyData = src.keyData;
- dest->state = src.state;
-
- // Merge computed data.
- typedef WorkingSetComputedDataType WSCD;
- for (WSCD i = WSCD(0); i < WSM_COMPUTED_NUM_TYPES; i = WSCD(i + 1)) {
- if (src.hasComputed(i)) {
- dest->addComputed(src.getComputed(i)->clone());
+ member->obj = {txn->recoveryUnit()->getSnapshotId(), record->data.releaseToBson()};
+
+ if (member->isSuspicious) {
+ // Make sure that all of the keyData is still valid for this copy of the document.
+ // This ensures both that index-provided filters and sort orders still hold.
+ // TODO provide a way for the query planner to opt out of this checking if it is
+ // unneeded due to the structure of the plan.
+ invariant(!member->keyData.empty());
+ for (size_t i = 0; i < member->keyData.size(); i++) {
+ BSONObjSet keys;
+ member->keyData[i].index->getKeys(member->obj.value(), &keys);
+ if (!keys.count(member->keyData[i].keyData)) {
+ // document would no longer be at this position in the index.
+ return false;
}
}
- }
-
- // static
- BSONObj WorkingSetCommon::buildMemberStatusObject(const Status& status) {
- BSONObjBuilder bob;
- bob.append("ok", status.isOK() ? 1.0 : 0.0);
- bob.append("code", status.code());
- bob.append("errmsg", status.reason());
- return bob.obj();
+ member->isSuspicious = false;
}
- // static
- WorkingSetID WorkingSetCommon::allocateStatusMember(WorkingSet* ws, const Status& status) {
- invariant(ws);
-
- WorkingSetID wsid = ws->allocate();
- WorkingSetMember* member = ws->get(wsid);
- member->state = WorkingSetMember::OWNED_OBJ;
- member->obj = Snapshotted<BSONObj>(SnapshotId(), buildMemberStatusObject(status));
-
- return wsid;
- }
-
- // static
- bool WorkingSetCommon::isValidStatusMemberObject(const BSONObj& obj) {
- return obj.nFields() == 3 &&
- obj.hasField("ok") &&
- obj.hasField("code") &&
- obj.hasField("errmsg");
- }
-
- // static
- void WorkingSetCommon::getStatusMemberObject(const WorkingSet& ws, WorkingSetID wsid,
- BSONObj* objOut) {
- invariant(objOut);
-
- // Validate ID and working set member.
- if (WorkingSet::INVALID_ID == wsid) {
- return;
- }
- WorkingSetMember* member = ws.get(wsid);
- if (!member->hasOwnedObj()) {
- return;
+ member->keyData.clear();
+ member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
+ return true;
+}
+
+// static
+void WorkingSetCommon::initFrom(WorkingSetMember* dest, const WorkingSetMember& src) {
+ dest->loc = src.loc;
+ dest->obj = src.obj;
+ dest->keyData = src.keyData;
+ dest->state = src.state;
+
+ // Merge computed data.
+ typedef WorkingSetComputedDataType WSCD;
+ for (WSCD i = WSCD(0); i < WSM_COMPUTED_NUM_TYPES; i = WSCD(i + 1)) {
+ if (src.hasComputed(i)) {
+ dest->addComputed(src.getComputed(i)->clone());
}
- BSONObj obj = member->obj.value();
- if (!isValidStatusMemberObject(obj)) {
- return;
- }
- *objOut = obj;
}
-
- // static
- Status WorkingSetCommon::getMemberObjectStatus(const BSONObj& memberObj) {
- invariant(WorkingSetCommon::isValidStatusMemberObject(memberObj));
- return Status(static_cast<ErrorCodes::Error>(memberObj["code"].numberInt()),
- memberObj["errmsg"]);
+}
+
+// static
+BSONObj WorkingSetCommon::buildMemberStatusObject(const Status& status) {
+ BSONObjBuilder bob;
+ bob.append("ok", status.isOK() ? 1.0 : 0.0);
+ bob.append("code", status.code());
+ bob.append("errmsg", status.reason());
+
+ return bob.obj();
+}
+
+// static
+WorkingSetID WorkingSetCommon::allocateStatusMember(WorkingSet* ws, const Status& status) {
+ invariant(ws);
+
+ WorkingSetID wsid = ws->allocate();
+ WorkingSetMember* member = ws->get(wsid);
+ member->state = WorkingSetMember::OWNED_OBJ;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), buildMemberStatusObject(status));
+
+ return wsid;
+}
+
+// static
+bool WorkingSetCommon::isValidStatusMemberObject(const BSONObj& obj) {
+ return obj.nFields() == 3 && obj.hasField("ok") && obj.hasField("code") &&
+ obj.hasField("errmsg");
+}
+
+// static
+void WorkingSetCommon::getStatusMemberObject(const WorkingSet& ws,
+ WorkingSetID wsid,
+ BSONObj* objOut) {
+ invariant(objOut);
+
+ // Validate ID and working set member.
+ if (WorkingSet::INVALID_ID == wsid) {
+ return;
}
-
- // static
- Status WorkingSetCommon::getMemberStatus(const WorkingSetMember& member) {
- invariant(member.hasObj());
- return getMemberObjectStatus(member.obj.value());
+ WorkingSetMember* member = ws.get(wsid);
+ if (!member->hasOwnedObj()) {
+ return;
}
-
- // static
- std::string WorkingSetCommon::toStatusString(const BSONObj& obj) {
- if (!isValidStatusMemberObject(obj)) {
- Status unknownStatus(ErrorCodes::UnknownError, "no details available");
- return unknownStatus.toString();
- }
- Status status(ErrorCodes::fromInt(obj.getIntField("code")),
- obj.getStringField("errmsg"));
- return status.toString();
+ BSONObj obj = member->obj.value();
+ if (!isValidStatusMemberObject(obj)) {
+ return;
+ }
+ *objOut = obj;
+}
+
+// static
+Status WorkingSetCommon::getMemberObjectStatus(const BSONObj& memberObj) {
+ invariant(WorkingSetCommon::isValidStatusMemberObject(memberObj));
+ return Status(static_cast<ErrorCodes::Error>(memberObj["code"].numberInt()),
+ memberObj["errmsg"]);
+}
+
+// static
+Status WorkingSetCommon::getMemberStatus(const WorkingSetMember& member) {
+ invariant(member.hasObj());
+ return getMemberObjectStatus(member.obj.value());
+}
+
+// static
+std::string WorkingSetCommon::toStatusString(const BSONObj& obj) {
+ if (!isValidStatusMemberObject(obj)) {
+ Status unknownStatus(ErrorCodes::UnknownError, "no details available");
+ return unknownStatus.toString();
}
+ Status status(ErrorCodes::fromInt(obj.getIntField("code")), obj.getStringField("errmsg"));
+ return status.toString();
+}
} // namespace mongo
diff --git a/src/mongo/db/exec/working_set_common.h b/src/mongo/db/exec/working_set_common.h
index aa1ecdb96c6..2fde57fda3a 100644
--- a/src/mongo/db/exec/working_set_common.h
+++ b/src/mongo/db/exec/working_set_common.h
@@ -33,104 +33,104 @@
namespace mongo {
- class CanonicalQuery;
- class Collection;
- class OperationContext;
- class RecordCursor;
-
- class WorkingSetCommon {
- public:
- /**
- * Get an owned copy of the BSONObj the WSM refers to.
- * Requires either a valid BSONObj or valid RecordId.
- * Returns true if the fetch and invalidate succeeded, false otherwise.
- */
- static bool fetchAndInvalidateLoc(OperationContext* txn,
- WorkingSetMember* member,
- const Collection* collection);
-
- /**
- * This must be called as part of "saveState" operations after all nodes in the tree save
- * their state.
- *
- * Iterates over 'workingSet' and converts all LOC_AND_UNOWNED_OBJ members to
- * LOC_AND_OWNED_OBJ by calling getOwned on their obj. Also sets the isSuspicious flag on
- * all nodes in LOC_AND_IDX state.
- */
- static void prepareForSnapshotChange(WorkingSet* workingSet);
-
- /**
- * Retrieves the document corresponding to 'member' from 'collection', and sets the state of
- * 'member' appropriately.
- *
- * If false is returned, the document should not be considered for the result set. It is the
- * caller's responsibility to free 'member' in this case.
- *
- * WriteConflict exceptions may be thrown. When they are, 'member' will be unmodified.
- */
- static bool fetch(OperationContext* txn,
- WorkingSetMember* member,
- unowned_ptr<RecordCursor> cursor);
-
- static bool fetchIfUnfetched(OperationContext* txn,
- WorkingSetMember* member,
- unowned_ptr<RecordCursor> cursor) {
- if (member->hasObj()) return true;
- return fetch(txn, member, cursor);
- }
-
- /**
- * Initialize the fields in 'dest' from 'src', creating copies of owned objects as needed.
- */
- static void initFrom(WorkingSetMember* dest, const WorkingSetMember& src);
-
- /**
- * Build a BSONObj which represents a Status to return in a WorkingSet.
- */
- static BSONObj buildMemberStatusObject(const Status& status);
-
- /**
- * Allocate a new WSM and initialize it with
- * the code and reason from the status.
- * Owned BSON object will have the following layout:
- * {
- * ok: <ok>, // 1 for OK; 0 otherwise.
- * code: <code>, // Status::code()
- * errmsg: <errmsg> // Status::reason()
- * }
- */
- static WorkingSetID allocateStatusMember(WorkingSet* ws, const Status& status);
-
- /**
- * Returns true if object was created by allocateStatusMember().
- */
- static bool isValidStatusMemberObject(const BSONObj& obj);
-
- /**
- * Returns object in working set member created with allocateStatusMember().
- * Does not assume isValidStatusMemberObject.
- * If the WSID is invalid or the working set member is created by
- * allocateStatusMember, objOut will not be updated.
- */
- static void getStatusMemberObject(const WorkingSet& ws, WorkingSetID wsid,
- BSONObj* objOut);
-
- /**
- * Returns status from working set member object.
- * Assumes isValidStatusMemberObject().
- */
- static Status getMemberObjectStatus(const BSONObj& memberObj);
-
- /**
- * Returns status from working set member created with allocateStatusMember().
- * Assumes isValidStatusMemberObject().
- */
- static Status getMemberStatus(const WorkingSetMember& member);
-
- /**
- * Formats working set member object created with allocateStatusMember().
- */
- static std::string toStatusString(const BSONObj& obj);
- };
+class CanonicalQuery;
+class Collection;
+class OperationContext;
+class RecordCursor;
+
+class WorkingSetCommon {
+public:
+ /**
+ * Get an owned copy of the BSONObj the WSM refers to.
+ * Requires either a valid BSONObj or valid RecordId.
+ * Returns true if the fetch and invalidate succeeded, false otherwise.
+ */
+ static bool fetchAndInvalidateLoc(OperationContext* txn,
+ WorkingSetMember* member,
+ const Collection* collection);
+
+ /**
+ * This must be called as part of "saveState" operations after all nodes in the tree save
+ * their state.
+ *
+ * Iterates over 'workingSet' and converts all LOC_AND_UNOWNED_OBJ members to
+ * LOC_AND_OWNED_OBJ by calling getOwned on their obj. Also sets the isSuspicious flag on
+ * all nodes in LOC_AND_IDX state.
+ */
+ static void prepareForSnapshotChange(WorkingSet* workingSet);
+
+ /**
+ * Retrieves the document corresponding to 'member' from 'collection', and sets the state of
+ * 'member' appropriately.
+ *
+ * If false is returned, the document should not be considered for the result set. It is the
+ * caller's responsibility to free 'member' in this case.
+ *
+ * WriteConflict exceptions may be thrown. When they are, 'member' will be unmodified.
+ */
+ static bool fetch(OperationContext* txn,
+ WorkingSetMember* member,
+ unowned_ptr<RecordCursor> cursor);
+
+ static bool fetchIfUnfetched(OperationContext* txn,
+ WorkingSetMember* member,
+ unowned_ptr<RecordCursor> cursor) {
+ if (member->hasObj())
+ return true;
+ return fetch(txn, member, cursor);
+ }
+
+ /**
+ * Initialize the fields in 'dest' from 'src', creating copies of owned objects as needed.
+ */
+ static void initFrom(WorkingSetMember* dest, const WorkingSetMember& src);
+
+ /**
+ * Build a BSONObj which represents a Status to return in a WorkingSet.
+ */
+ static BSONObj buildMemberStatusObject(const Status& status);
+
+ /**
+ * Allocate a new WSM and initialize it with
+ * the code and reason from the status.
+ * Owned BSON object will have the following layout:
+ * {
+ * ok: <ok>, // 1 for OK; 0 otherwise.
+ * code: <code>, // Status::code()
+ * errmsg: <errmsg> // Status::reason()
+ * }
+ */
+ static WorkingSetID allocateStatusMember(WorkingSet* ws, const Status& status);
+
+ /**
+ * Returns true if object was created by allocateStatusMember().
+ */
+ static bool isValidStatusMemberObject(const BSONObj& obj);
+
+ /**
+ * Returns object in working set member created with allocateStatusMember().
+ * Does not assume isValidStatusMemberObject.
+ * If the WSID is invalid or the working set member is created by
+ * allocateStatusMember, objOut will not be updated.
+ */
+ static void getStatusMemberObject(const WorkingSet& ws, WorkingSetID wsid, BSONObj* objOut);
+
+ /**
+ * Returns status from working set member object.
+ * Assumes isValidStatusMemberObject().
+ */
+ static Status getMemberObjectStatus(const BSONObj& memberObj);
+
+ /**
+ * Returns status from working set member created with allocateStatusMember().
+ * Assumes isValidStatusMemberObject().
+ */
+ static Status getMemberStatus(const WorkingSetMember& member);
+
+ /**
+ * Formats working set member object created with allocateStatusMember().
+ */
+ static std::string toStatusString(const BSONObj& obj);
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/working_set_computed_data.h b/src/mongo/db/exec/working_set_computed_data.h
index 53a74633764..c7dc27ce45d 100644
--- a/src/mongo/db/exec/working_set_computed_data.h
+++ b/src/mongo/db/exec/working_set_computed_data.h
@@ -32,68 +32,72 @@
namespace mongo {
- class TextScoreComputedData : public WorkingSetComputedData {
- public:
- TextScoreComputedData(double score)
- : WorkingSetComputedData(WSM_COMPUTED_TEXT_SCORE),
- _score(score) { }
-
- double getScore() const { return _score; }
-
- virtual TextScoreComputedData* clone() const {
- return new TextScoreComputedData(_score);
- }
-
- private:
- double _score;
- };
-
- class GeoDistanceComputedData : public WorkingSetComputedData {
- public:
- GeoDistanceComputedData(double dist)
- : WorkingSetComputedData(WSM_COMPUTED_GEO_DISTANCE),
- _dist(dist) { }
-
- double getDist() const { return _dist; }
-
- virtual GeoDistanceComputedData* clone() const {
- return new GeoDistanceComputedData(_dist);
- }
-
- private:
- double _dist;
- };
-
- class IndexKeyComputedData : public WorkingSetComputedData {
- public:
- IndexKeyComputedData(BSONObj key)
- : WorkingSetComputedData(WSM_INDEX_KEY),
- _key(key.getOwned()) { }
-
- BSONObj getKey() const { return _key; }
-
- virtual IndexKeyComputedData* clone() const {
- return new IndexKeyComputedData(_key);
- }
-
- private:
- BSONObj _key;
- };
-
- class GeoNearPointComputedData : public WorkingSetComputedData {
- public:
- GeoNearPointComputedData(BSONObj point)
- : WorkingSetComputedData(WSM_GEO_NEAR_POINT),
- _point(point.getOwned()) { }
-
- BSONObj getPoint() const { return _point; }
-
- virtual GeoNearPointComputedData* clone() const {
- return new GeoNearPointComputedData(_point);
- }
-
- private:
- BSONObj _point;
- };
+class TextScoreComputedData : public WorkingSetComputedData {
+public:
+ TextScoreComputedData(double score)
+ : WorkingSetComputedData(WSM_COMPUTED_TEXT_SCORE), _score(score) {}
+
+ double getScore() const {
+ return _score;
+ }
+
+ virtual TextScoreComputedData* clone() const {
+ return new TextScoreComputedData(_score);
+ }
+
+private:
+ double _score;
+};
+
+class GeoDistanceComputedData : public WorkingSetComputedData {
+public:
+ GeoDistanceComputedData(double dist)
+ : WorkingSetComputedData(WSM_COMPUTED_GEO_DISTANCE), _dist(dist) {}
+
+ double getDist() const {
+ return _dist;
+ }
+
+ virtual GeoDistanceComputedData* clone() const {
+ return new GeoDistanceComputedData(_dist);
+ }
+
+private:
+ double _dist;
+};
+
+class IndexKeyComputedData : public WorkingSetComputedData {
+public:
+ IndexKeyComputedData(BSONObj key)
+ : WorkingSetComputedData(WSM_INDEX_KEY), _key(key.getOwned()) {}
+
+ BSONObj getKey() const {
+ return _key;
+ }
+
+ virtual IndexKeyComputedData* clone() const {
+ return new IndexKeyComputedData(_key);
+ }
+
+private:
+ BSONObj _key;
+};
+
+class GeoNearPointComputedData : public WorkingSetComputedData {
+public:
+ GeoNearPointComputedData(BSONObj point)
+ : WorkingSetComputedData(WSM_GEO_NEAR_POINT), _point(point.getOwned()) {}
+
+ BSONObj getPoint() const {
+ return _point;
+ }
+
+ virtual GeoNearPointComputedData* clone() const {
+ return new GeoNearPointComputedData(_point);
+ }
+
+private:
+ BSONObj _point;
+};
} // namespace mongo
diff --git a/src/mongo/db/exec/working_set_test.cpp b/src/mongo/db/exec/working_set_test.cpp
index d4d7163831b..ff95717963c 100644
--- a/src/mongo/db/exec/working_set_test.cpp
+++ b/src/mongo/db/exec/working_set_test.cpp
@@ -41,207 +41,207 @@ using namespace mongo;
namespace {
- using std::string;
-
- class WorkingSetFixture : public mongo::unittest::Test {
- protected:
- void setUp() {
- ws.reset(new WorkingSet());
- WorkingSetID id = ws->allocate();
- ASSERT(id != WorkingSet::INVALID_ID);
- member = ws->get(id);
- ASSERT(NULL != member);
- }
-
- void tearDown() {
- ws.reset();
- member = NULL;
- }
-
- std::unique_ptr<WorkingSet> ws;
- WorkingSetMember* member;
- };
-
- TEST_F(WorkingSetFixture, noFieldToGet) {
- BSONElement elt;
-
- // Make sure we're not getting anything out of an invalid WSM.
- ASSERT_EQUALS(WorkingSetMember::INVALID, member->state);
- ASSERT_FALSE(member->getFieldDotted("foo", &elt));
-
- member->state = WorkingSetMember::LOC_AND_IDX;
- ASSERT_FALSE(member->getFieldDotted("foo", &elt));
-
- // Our state is that of a valid object. The getFieldDotted shouldn't throw; there's
- // something to call getFieldDotted on, but there's no field there.
- member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
- ASSERT_TRUE(member->getFieldDotted("foo", &elt));
-
- member->state = WorkingSetMember::OWNED_OBJ;
- ASSERT_TRUE(member->getFieldDotted("foo", &elt));
+using std::string;
+
+class WorkingSetFixture : public mongo::unittest::Test {
+protected:
+ void setUp() {
+ ws.reset(new WorkingSet());
+ WorkingSetID id = ws->allocate();
+ ASSERT(id != WorkingSet::INVALID_ID);
+ member = ws->get(id);
+ ASSERT(NULL != member);
}
- TEST_F(WorkingSetFixture, getFieldUnowned) {
- string fieldName = "x";
-
- BSONObj obj = BSON(fieldName << 5);
- // Not truthful since the loc is bogus, but the loc isn't accessed anyway...
- member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
- member->obj = Snapshotted<BSONObj>(SnapshotId(), BSONObj(obj.objdata()));
- ASSERT_TRUE(obj.isOwned());
- ASSERT_FALSE(member->obj.value().isOwned());
-
- // Get out the field we put in.
- BSONElement elt;
- ASSERT_TRUE(member->getFieldDotted(fieldName, &elt));
- ASSERT_EQUALS(elt.numberInt(), 5);
+ void tearDown() {
+ ws.reset();
+ member = NULL;
}
- TEST_F(WorkingSetFixture, getFieldOwned) {
- string fieldName = "x";
-
- BSONObj obj = BSON(fieldName << 5);
- member->obj = Snapshotted<BSONObj>(SnapshotId(), obj);
- ASSERT_TRUE(member->obj.value().isOwned());
- member->state = WorkingSetMember::OWNED_OBJ;
- BSONElement elt;
- ASSERT_TRUE(member->getFieldDotted(fieldName, &elt));
- ASSERT_EQUALS(elt.numberInt(), 5);
+ std::unique_ptr<WorkingSet> ws;
+ WorkingSetMember* member;
+};
+
+TEST_F(WorkingSetFixture, noFieldToGet) {
+ BSONElement elt;
+
+ // Make sure we're not getting anything out of an invalid WSM.
+ ASSERT_EQUALS(WorkingSetMember::INVALID, member->state);
+ ASSERT_FALSE(member->getFieldDotted("foo", &elt));
+
+ member->state = WorkingSetMember::LOC_AND_IDX;
+ ASSERT_FALSE(member->getFieldDotted("foo", &elt));
+
+ // Our state is that of a valid object. The getFieldDotted shouldn't throw; there's
+ // something to call getFieldDotted on, but there's no field there.
+ member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
+ ASSERT_TRUE(member->getFieldDotted("foo", &elt));
+
+ member->state = WorkingSetMember::OWNED_OBJ;
+ ASSERT_TRUE(member->getFieldDotted("foo", &elt));
+}
+
+TEST_F(WorkingSetFixture, getFieldUnowned) {
+ string fieldName = "x";
+
+ BSONObj obj = BSON(fieldName << 5);
+ // Not truthful since the loc is bogus, but the loc isn't accessed anyway...
+ member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), BSONObj(obj.objdata()));
+ ASSERT_TRUE(obj.isOwned());
+ ASSERT_FALSE(member->obj.value().isOwned());
+
+ // Get out the field we put in.
+ BSONElement elt;
+ ASSERT_TRUE(member->getFieldDotted(fieldName, &elt));
+ ASSERT_EQUALS(elt.numberInt(), 5);
+}
+
+TEST_F(WorkingSetFixture, getFieldOwned) {
+ string fieldName = "x";
+
+ BSONObj obj = BSON(fieldName << 5);
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), obj);
+ ASSERT_TRUE(member->obj.value().isOwned());
+ member->state = WorkingSetMember::OWNED_OBJ;
+ BSONElement elt;
+ ASSERT_TRUE(member->getFieldDotted(fieldName, &elt));
+ ASSERT_EQUALS(elt.numberInt(), 5);
+}
+
+TEST_F(WorkingSetFixture, getFieldFromIndex) {
+ string firstName = "x";
+ int firstValue = 5;
+
+ string secondName = "y";
+ int secondValue = 10;
+
+ member->keyData.push_back(IndexKeyDatum(BSON(firstName << 1), BSON("" << firstValue), NULL));
+ // Also a minor lie as loc is bogus.
+ member->state = WorkingSetMember::LOC_AND_IDX;
+ BSONElement elt;
+ ASSERT_TRUE(member->getFieldDotted(firstName, &elt));
+ ASSERT_EQUALS(elt.numberInt(), firstValue);
+ // No foo field.
+ ASSERT_FALSE(member->getFieldDotted("foo", &elt));
+
+ // Add another index datum.
+ member->keyData.push_back(IndexKeyDatum(BSON(secondName << 1), BSON("" << secondValue), NULL));
+ ASSERT_TRUE(member->getFieldDotted(secondName, &elt));
+ ASSERT_EQUALS(elt.numberInt(), secondValue);
+ ASSERT_TRUE(member->getFieldDotted(firstName, &elt));
+ ASSERT_EQUALS(elt.numberInt(), firstValue);
+ // Still no foo.
+ ASSERT_FALSE(member->getFieldDotted("foo", &elt));
+}
+
+TEST_F(WorkingSetFixture, getDottedFieldFromIndex) {
+ string firstName = "x.y";
+ int firstValue = 5;
+
+ member->keyData.push_back(IndexKeyDatum(BSON(firstName << 1), BSON("" << firstValue), NULL));
+ member->state = WorkingSetMember::LOC_AND_IDX;
+ BSONElement elt;
+ ASSERT_TRUE(member->getFieldDotted(firstName, &elt));
+ ASSERT_EQUALS(elt.numberInt(), firstValue);
+ ASSERT_FALSE(member->getFieldDotted("x", &elt));
+ ASSERT_FALSE(member->getFieldDotted("y", &elt));
+}
+
+//
+// WorkingSet::iterator tests
+//
+
+TEST(WorkingSetIteratorTest, BasicIteratorTest) {
+ WorkingSet ws;
+
+ WorkingSetID id1 = ws.allocate();
+ WorkingSetMember* member1 = ws.get(id1);
+ member1->state = WorkingSetMember::LOC_AND_IDX;
+ member1->keyData.push_back(IndexKeyDatum(BSON("a" << 1), BSON("" << 3), NULL));
+
+ WorkingSetID id2 = ws.allocate();
+ WorkingSetMember* member2 = ws.get(id2);
+ member2->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
+ member2->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("a" << 3));
+
+ int counter = 0;
+ for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
+ ASSERT(it->state == WorkingSetMember::LOC_AND_IDX ||
+ it->state == WorkingSetMember::LOC_AND_UNOWNED_OBJ);
+ counter++;
}
+ ASSERT_EQ(counter, 2);
+}
- TEST_F(WorkingSetFixture, getFieldFromIndex) {
- string firstName = "x";
- int firstValue = 5;
-
- string secondName = "y";
- int secondValue = 10;
-
- member->keyData.push_back(IndexKeyDatum(BSON(firstName << 1), BSON("" << firstValue), NULL));
- // Also a minor lie as loc is bogus.
- member->state = WorkingSetMember::LOC_AND_IDX;
- BSONElement elt;
- ASSERT_TRUE(member->getFieldDotted(firstName, &elt));
- ASSERT_EQUALS(elt.numberInt(), firstValue);
- // No foo field.
- ASSERT_FALSE(member->getFieldDotted("foo", &elt));
-
- // Add another index datum.
- member->keyData.push_back(IndexKeyDatum(BSON(secondName << 1), BSON("" << secondValue), NULL));
- ASSERT_TRUE(member->getFieldDotted(secondName, &elt));
- ASSERT_EQUALS(elt.numberInt(), secondValue);
- ASSERT_TRUE(member->getFieldDotted(firstName, &elt));
- ASSERT_EQUALS(elt.numberInt(), firstValue);
- // Still no foo.
- ASSERT_FALSE(member->getFieldDotted("foo", &elt));
- }
+TEST(WorkingSetIteratorTest, EmptyWorkingSet) {
+ WorkingSet ws;
- TEST_F(WorkingSetFixture, getDottedFieldFromIndex) {
- string firstName = "x.y";
- int firstValue = 5;
-
- member->keyData.push_back(IndexKeyDatum(BSON(firstName << 1), BSON("" << firstValue), NULL));
- member->state = WorkingSetMember::LOC_AND_IDX;
- BSONElement elt;
- ASSERT_TRUE(member->getFieldDotted(firstName, &elt));
- ASSERT_EQUALS(elt.numberInt(), firstValue);
- ASSERT_FALSE(member->getFieldDotted("x", &elt));
- ASSERT_FALSE(member->getFieldDotted("y", &elt));
+ int counter = 0;
+ for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
+ counter++;
}
+ ASSERT_EQ(counter, 0);
+}
- //
- // WorkingSet::iterator tests
- //
+TEST(WorkingSetIteratorTest, EmptyWorkingSetDueToFree) {
+ WorkingSet ws;
- TEST(WorkingSetIteratorTest, BasicIteratorTest) {
- WorkingSet ws;
+ WorkingSetID id = ws.allocate();
+ ws.free(id);
- WorkingSetID id1 = ws.allocate();
- WorkingSetMember* member1 = ws.get(id1);
- member1->state = WorkingSetMember::LOC_AND_IDX;
- member1->keyData.push_back(IndexKeyDatum(BSON("a" << 1), BSON("" << 3), NULL));
-
- WorkingSetID id2 = ws.allocate();
- WorkingSetMember* member2 = ws.get(id2);
- member2->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
- member2->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("a" << 3));
-
- int counter = 0;
- for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
- ASSERT(it->state == WorkingSetMember::LOC_AND_IDX ||
- it->state == WorkingSetMember::LOC_AND_UNOWNED_OBJ);
- counter++;
- }
- ASSERT_EQ(counter, 2);
+ int counter = 0;
+ for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
+ counter++;
}
+ ASSERT_EQ(counter, 0);
+}
- TEST(WorkingSetIteratorTest, EmptyWorkingSet) {
- WorkingSet ws;
+TEST(WorkingSetIteratorTest, MixedFreeAndInUse) {
+ WorkingSet ws;
- int counter = 0;
- for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
- counter++;
- }
- ASSERT_EQ(counter, 0);
- }
+ WorkingSetID id1 = ws.allocate();
+ WorkingSetID id2 = ws.allocate();
+ WorkingSetID id3 = ws.allocate();
- TEST(WorkingSetIteratorTest, EmptyWorkingSetDueToFree) {
- WorkingSet ws;
+ WorkingSetMember* member = ws.get(id2);
+ member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
+ member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("a" << 3));
- WorkingSetID id = ws.allocate();
- ws.free(id);
+ ws.free(id1);
+ ws.free(id3);
- int counter = 0;
- for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
- counter++;
- }
- ASSERT_EQ(counter, 0);
+ int counter = 0;
+ for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
+ ASSERT(it->state == WorkingSetMember::LOC_AND_UNOWNED_OBJ);
+ counter++;
}
+ ASSERT_EQ(counter, 1);
+}
- TEST(WorkingSetIteratorTest, MixedFreeAndInUse) {
- WorkingSet ws;
-
- WorkingSetID id1 = ws.allocate();
- WorkingSetID id2 = ws.allocate();
- WorkingSetID id3 = ws.allocate();
+TEST(WorkingSetIteratorTest, FreeWhileIterating) {
+ WorkingSet ws;
- WorkingSetMember* member = ws.get(id2);
- member->state = WorkingSetMember::LOC_AND_UNOWNED_OBJ;
- member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("a" << 3));
+ ws.allocate();
+ ws.allocate();
+ ws.allocate();
- ws.free(id1);
- ws.free(id3);
-
- int counter = 0;
- for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
- ASSERT(it->state == WorkingSetMember::LOC_AND_UNOWNED_OBJ);
- counter++;
+ // Free the last two members during iteration.
+ int counter = 0;
+ for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
+ if (counter > 0) {
+ it.free();
}
- ASSERT_EQ(counter, 1);
+ counter++;
}
+ ASSERT_EQ(counter, 3);
- TEST(WorkingSetIteratorTest, FreeWhileIterating) {
- WorkingSet ws;
-
- ws.allocate();
- ws.allocate();
- ws.allocate();
-
- // Free the last two members during iteration.
- int counter = 0;
- for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
- if (counter > 0) {
- it.free();
- }
- counter++;
- }
- ASSERT_EQ(counter, 3);
-
- // Verify that only one item remains in the working set.
- counter = 0;
- for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
- counter++;
- }
- ASSERT_EQ(counter, 1);
+ // Verify that only one item remains in the working set.
+ counter = 0;
+ for (WorkingSet::iterator it = ws.begin(); it != ws.end(); ++it) {
+ counter++;
}
+ ASSERT_EQ(counter, 1);
+}
} // namespace
diff --git a/src/mongo/db/field_parser-inl.h b/src/mongo/db/field_parser-inl.h
index 04dbccf9e2a..b121659f59f 100644
--- a/src/mongo/db/field_parser-inl.h
+++ b/src/mongo/db/field_parser-inl.h
@@ -31,346 +31,332 @@
namespace mongo {
- using mongoutils::str::stream;
-
- template<class T>
- void _genFieldErrMsg(const BSONElement& elem,
- const BSONField<T>& field,
- const std::string expected,
- std::string* errMsg)
- {
- if (!errMsg) return;
- *errMsg = stream() << "wrong type for '" << field() << "' field, expected " << expected
- << ", found " << elem.toString();
+using mongoutils::str::stream;
+
+template <class T>
+void _genFieldErrMsg(const BSONElement& elem,
+ const BSONField<T>& field,
+ const std::string expected,
+ std::string* errMsg) {
+ if (!errMsg)
+ return;
+ *errMsg = stream() << "wrong type for '" << field() << "' field, expected " << expected
+ << ", found " << elem.toString();
+}
+
+template <typename T>
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<T>& field,
+ T* out,
+ std::string* errMsg) {
+ BSONElement elem = doc[field.name()];
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ field.getDefault().cloneTo(out);
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
+ }
}
- template<typename T>
- FieldParser::FieldState FieldParser::extract(BSONObj doc,
- const BSONField<T>& field,
- T* out,
- std::string* errMsg)
- {
- BSONElement elem = doc[field.name()];
- if (elem.eoo()) {
- if (field.hasDefault()) {
- field.getDefault().cloneTo(out);
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
- }
+ if (elem.type() != Object && elem.type() != Array) {
+ _genFieldErrMsg(elem, field, "Object/Array", errMsg);
+ return FIELD_INVALID;
+ }
- if (elem.type() != Object && elem.type() != Array) {
- _genFieldErrMsg(elem, field, "Object/Array", errMsg);
- return FIELD_INVALID;
- }
+ if (!out->parseBSON(elem.embeddedObject(), errMsg)) {
+ return FIELD_INVALID;
+ }
- if (!out->parseBSON(elem.embeddedObject(), errMsg)) {
- return FIELD_INVALID;
+ return FIELD_SET;
+}
+
+template <typename T>
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<T*>& field,
+ T** out,
+ std::string* errMsg) {
+ BSONElement elem = doc[field.name()];
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ std::unique_ptr<T> temp(new T);
+ field.getDefault()->cloneTo(temp.get());
+
+ *out = temp.release();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
+ }
- return FIELD_SET;
+ if (elem.type() != Object && elem.type() != Array) {
+ _genFieldErrMsg(elem, field, "Object/Array", errMsg);
+ return FIELD_INVALID;
}
- template<typename T>
- FieldParser::FieldState FieldParser::extract(BSONObj doc,
- const BSONField<T*>& field,
- T** out,
- std::string* errMsg)
- {
- BSONElement elem = doc[field.name()];
- if (elem.eoo()) {
- if (field.hasDefault()) {
- std::unique_ptr<T> temp(new T);
- field.getDefault()->cloneTo(temp.get());
-
- *out = temp.release();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
- }
+ std::unique_ptr<T> temp(new T);
+ if (!temp->parseBSON(elem.embeddedObject(), errMsg)) {
+ return FIELD_INVALID;
+ }
- if (elem.type() != Object && elem.type() != Array) {
- _genFieldErrMsg(elem, field, "Object/Array", errMsg);
- return FIELD_INVALID;
+ *out = temp.release();
+ return FIELD_SET;
+}
+
+template <typename T>
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<T>& field,
+ T** out,
+ std::string* errMsg) {
+ BSONElement elem = doc[field.name()];
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = new T;
+ field.getDefault().cloneTo(*out);
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
+ }
- std::unique_ptr<T> temp(new T);
- if (!temp->parseBSON(elem.embeddedObject(), errMsg)) {
- return FIELD_INVALID;
+ if (elem.type() != Object && elem.type() != Array) {
+ if (errMsg) {
+ *errMsg = stream() << "wrong type for '" << field() << "' field, expected "
+ << "vector or array"
+ << ", found " << doc[field.name()].toString();
}
+ return FIELD_INVALID;
+ }
- *out = temp.release();
- return FIELD_SET;
+ std::unique_ptr<T> temp(new T);
+ if (!temp->parseBSON(elem.embeddedObject(), errMsg)) {
+ return FIELD_INVALID;
}
- template<typename T>
- FieldParser::FieldState FieldParser::extract(BSONObj doc,
- const BSONField<T>& field,
- T** out,
- std::string* errMsg)
- {
- BSONElement elem = doc[field.name()];
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = new T;
- field.getDefault().cloneTo(*out);
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
+ *out = temp.release();
+ return FIELD_SET;
+}
+
+// Extracts an array into a vector
+template <typename T>
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<std::vector<T>>& field,
+ std::vector<T>* out,
+ std::string* errMsg) {
+ return extract(doc[field.name()], field, out, errMsg);
+}
+
+template <typename T>
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<std::vector<T>>& field,
+ std::vector<T>* out,
+ std::string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
+ }
- if (elem.type() != Object && elem.type() != Array) {
- if (errMsg) {
- *errMsg = stream() << "wrong type for '" << field() << "' field, expected "
- << "vector or array" << ", found "
- << doc[field.name()].toString();
- }
- return FIELD_INVALID;
- }
+ if (elem.type() == Array) {
+ BSONArray arr = BSONArray(elem.embeddedObject());
+ std::string elErrMsg;
- std::unique_ptr<T> temp(new T);
- if (!temp->parseBSON(elem.embeddedObject(), errMsg)) {
- return FIELD_INVALID;
+ // Append all the new elements to the end of the vector
+ size_t initialSize = out->size();
+ out->resize(initialSize + arr.nFields());
+
+ int i = 0;
+ BSONObjIterator objIt(arr);
+ while (objIt.more()) {
+ BSONElement next = objIt.next();
+ BSONField<T> fieldFor(next.fieldName(), out->at(initialSize + i));
+
+ if (!FieldParser::extract(next, fieldFor, &out->at(initialSize + i), &elErrMsg)) {
+ if (errMsg) {
+ *errMsg = stream() << "error parsing element " << i << " of field " << field()
+ << causedBy(elErrMsg);
+ }
+ return FIELD_INVALID;
+ }
+ i++;
}
- *out = temp.release();
return FIELD_SET;
}
- // Extracts an array into a vector
- template<typename T>
- FieldParser::FieldState FieldParser::extract( BSONObj doc,
- const BSONField<std::vector<T> >& field,
- std::vector<T>* out,
- std::string* errMsg ) {
- return extract( doc[field.name()], field, out, errMsg );
+ if (errMsg) {
+ *errMsg = stream() << "wrong type for '" << field() << "' field, expected "
+ << "vector array"
+ << ", found " << elem.toString();
+ }
+ return FIELD_INVALID;
+}
+
+template <typename T>
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<std::vector<T*>>& field,
+ std::vector<T*>* out,
+ std::string* errMsg) {
+ dassert(!field.hasDefault());
+
+ BSONElement elem = doc[field.name()];
+ if (elem.eoo()) {
+ return FIELD_NONE;
}
- template<typename T>
- FieldParser::FieldState FieldParser::extract( BSONElement elem,
- const BSONField<std::vector<T> >& field,
- std::vector<T>* out,
- std::string* errMsg )
- {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
- }
-
- if (elem.type() == Array) {
- BSONArray arr = BSONArray(elem.embeddedObject());
- std::string elErrMsg;
-
- // Append all the new elements to the end of the vector
- size_t initialSize = out->size();
- out->resize(initialSize + arr.nFields());
-
- int i = 0;
- BSONObjIterator objIt(arr);
- while (objIt.more()) {
- BSONElement next = objIt.next();
- BSONField<T> fieldFor(next.fieldName(), out->at(initialSize + i));
-
- if (!FieldParser::extract(next,
- fieldFor,
- &out->at(initialSize + i),
- &elErrMsg))
- {
- if (errMsg) {
- *errMsg = stream() << "error parsing element " << i << " of field "
- << field() << causedBy(elErrMsg);
- }
- return FIELD_INVALID;
- }
- i++;
- }
-
- return FIELD_SET;
- }
+ return extract(elem, field, out, errMsg);
+}
+template <typename T>
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<std::vector<T*>>& field,
+ std::vector<T*>* out,
+ std::string* errMsg) {
+ if (elem.type() != Array) {
if (errMsg) {
*errMsg = stream() << "wrong type for '" << field() << "' field, expected "
- << "vector array" << ", found " << elem.toString();
+ << "vector array"
+ << ", found " << elem.toString();
}
return FIELD_INVALID;
}
- template<typename T>
- FieldParser::FieldState FieldParser::extract(BSONObj doc,
- const BSONField<std::vector<T*> >& field,
- std::vector<T*>* out,
- std::string* errMsg) {
- dassert(!field.hasDefault());
+ BSONArray arr = BSONArray(elem.embeddedObject());
+ BSONObjIterator objIt(arr);
+ while (objIt.more()) {
+ BSONElement next = objIt.next();
- BSONElement elem = doc[field.name()];
- if (elem.eoo()) {
- return FIELD_NONE;
- }
-
- return extract(elem, field, out, errMsg);
- }
-
- template<typename T>
- FieldParser::FieldState FieldParser::extract(BSONElement elem,
- const BSONField<std::vector<T*> >& field,
- std::vector<T*>* out,
- std::string* errMsg) {
-
- if (elem.type() != Array) {
+ if (next.type() != Object) {
if (errMsg) {
- *errMsg = stream() << "wrong type for '" << field() << "' field, expected "
- << "vector array" << ", found " << elem.toString();
+ *errMsg = stream() << "wrong type for '" << field() << "' field contents, "
+ << "expected object, found " << elem.type();
}
return FIELD_INVALID;
}
- BSONArray arr = BSONArray(elem.embeddedObject());
- BSONObjIterator objIt(arr);
- while (objIt.more()) {
+ std::unique_ptr<T> toInsert(new T);
- BSONElement next = objIt.next();
-
- if (next.type() != Object) {
- if (errMsg) {
- *errMsg = stream() << "wrong type for '" << field() << "' field contents, "
- << "expected object, found " << elem.type();
- }
- return FIELD_INVALID;
- }
-
- std::unique_ptr<T> toInsert(new T);
+ if (!toInsert->parseBSON(next.embeddedObject(), errMsg) || !toInsert->isValid(errMsg)) {
+ return FIELD_INVALID;
+ }
- if ( !toInsert->parseBSON( next.embeddedObject(), errMsg )
- || !toInsert->isValid( errMsg ) ) {
- return FIELD_INVALID;
- }
+ out->push_back(toInsert.release());
+ }
- out->push_back(toInsert.release());
- }
+ return FIELD_SET;
+}
- return FIELD_SET;
+template <typename T>
+void FieldParser::clearOwnedVector(std::vector<T*>* vec) {
+ for (typename std::vector<T*>::iterator it = vec->begin(); it != vec->end(); ++it) {
+ delete (*it);
+ }
+}
+
+template <typename T>
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<std::vector<T*>>& field,
+ std::vector<T*>** out,
+ std::string* errMsg) {
+ dassert(!field.hasDefault());
+
+ BSONElement elem = doc[field.name()];
+ if (elem.eoo()) {
+ return FIELD_NONE;
}
- template<typename T>
- void FieldParser::clearOwnedVector(std::vector<T*>* vec) {
- for (typename std::vector<T*>::iterator it = vec->begin(); it != vec->end(); ++it) {
- delete (*it);
+ if (elem.type() != Array) {
+ if (errMsg) {
+ *errMsg = stream() << "wrong type for '" << field() << "' field, expected "
+ << "vector array"
+ << ", found " << doc[field.name()].toString();
}
+ return FIELD_INVALID;
}
- template<typename T>
- FieldParser::FieldState FieldParser::extract(BSONObj doc,
- const BSONField<std::vector<T*> >& field,
- std::vector<T*>** out,
- std::string* errMsg) {
- dassert(!field.hasDefault());
+ std::unique_ptr<std::vector<T*>> tempVector(new std::vector<T*>);
- BSONElement elem = doc[field.name()];
- if (elem.eoo()) {
- return FIELD_NONE;
- }
+ BSONArray arr = BSONArray(elem.embeddedObject());
+ BSONObjIterator objIt(arr);
+ while (objIt.more()) {
+ BSONElement next = objIt.next();
- if (elem.type() != Array) {
+ if (next.type() != Object) {
if (errMsg) {
- *errMsg = stream() << "wrong type for '" << field() << "' field, expected "
- << "vector array" << ", found " << doc[field.name()].toString();
+ *errMsg = stream() << "wrong type for '" << field() << "' field contents, "
+ << "expected object, found " << elem.type();
}
+ clearOwnedVector(tempVector.get());
return FIELD_INVALID;
}
- std::unique_ptr<std::vector<T*> > tempVector(new std::vector<T*>);
+ std::unique_ptr<T> toInsert(new T);
+ if (!toInsert->parseBSON(next.embeddedObject(), errMsg)) {
+ clearOwnedVector(tempVector.get());
+ return FIELD_INVALID;
+ }
- BSONArray arr = BSONArray(elem.embeddedObject());
- BSONObjIterator objIt(arr);
- while (objIt.more()) {
+ tempVector->push_back(toInsert.release());
+ }
+
+ *out = tempVector.release();
+ return FIELD_SET;
+}
+
+// Extracts an object into a map
+template <typename K, typename T>
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<std::map<K, T>>& field,
+ std::map<K, T>* out,
+ std::string* errMsg) {
+ return extract(doc[field.name()], field, out, errMsg);
+}
+
+template <typename K, typename T>
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<std::map<K, T>>& field,
+ std::map<K, T>* out,
+ std::string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
+ }
+ }
+
+ if (elem.type() == Object) {
+ BSONObj obj = elem.embeddedObject();
+ std::string elErrMsg;
+ BSONObjIterator objIt(obj);
+ while (objIt.more()) {
BSONElement next = objIt.next();
+ T& value = (*out)[next.fieldName()];
- if (next.type() != Object) {
+ BSONField<T> fieldFor(next.fieldName(), value);
+ if (!FieldParser::extract(next, fieldFor, &value, &elErrMsg)) {
if (errMsg) {
- *errMsg = stream() << "wrong type for '" << field() << "' field contents, "
- << "expected object, found " << elem.type();
+ *errMsg = stream() << "error parsing map element " << next.fieldName()
+ << " of field " << field() << causedBy(elErrMsg);
}
- clearOwnedVector(tempVector.get());
- return FIELD_INVALID;
- }
-
- std::unique_ptr<T> toInsert(new T);
- if (!toInsert->parseBSON(next.embeddedObject(), errMsg)) {
- clearOwnedVector(tempVector.get());
return FIELD_INVALID;
}
-
- tempVector->push_back(toInsert.release());
}
- *out = tempVector.release();
return FIELD_SET;
}
- // Extracts an object into a map
- template<typename K, typename T>
- FieldParser::FieldState FieldParser::extract( BSONObj doc,
- const BSONField<std::map<K, T> >& field,
- std::map<K, T>* out,
- std::string* errMsg ) {
- return extract( doc[field.name()], field, out, errMsg );
- }
-
- template<typename K, typename T>
- FieldParser::FieldState FieldParser::extract( BSONElement elem,
- const BSONField<std::map<K, T> >& field,
- std::map<K, T>* out,
- std::string* errMsg )
- {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
- }
-
- if (elem.type() == Object) {
- BSONObj obj = elem.embeddedObject();
- std::string elErrMsg;
-
- BSONObjIterator objIt(obj);
- while (objIt.more()) {
- BSONElement next = objIt.next();
- T& value = (*out)[next.fieldName()];
-
- BSONField<T> fieldFor(next.fieldName(), value);
- if (!FieldParser::extract(next, fieldFor, &value, &elErrMsg)) {
- if (errMsg) {
- *errMsg = stream() << "error parsing map element " << next.fieldName()
- << " of field " << field() << causedBy(elErrMsg);
- }
- return FIELD_INVALID;
- }
- }
-
- return FIELD_SET;
- }
-
- if (errMsg) {
- *errMsg = stream() << "wrong type for '" << field() << "' field, expected "
- << "vector array" << ", found " << elem.toString();
- }
- return FIELD_INVALID;
+ if (errMsg) {
+ *errMsg = stream() << "wrong type for '" << field() << "' field, expected "
+ << "vector array"
+ << ", found " << elem.toString();
}
+ return FIELD_INVALID;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/field_parser.cpp b/src/mongo/db/field_parser.cpp
index 6437c35febf..e06806b6ea8 100644
--- a/src/mongo/db/field_parser.cpp
+++ b/src/mongo/db/field_parser.cpp
@@ -32,442 +32,418 @@
namespace mongo {
- using std::string;
-
- FieldParser::FieldState FieldParser::extract( BSONObj doc,
- const BSONField<bool>& field,
- bool* out,
- string* errMsg ) {
- return extract( doc[field.name()], field, out, errMsg );
- }
-
- FieldParser::FieldState FieldParser::extract( BSONElement elem,
- const BSONField<bool>& field,
- bool* out,
- string* errMsg )
- {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
+using std::string;
+
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<bool>& field,
+ bool* out,
+ string* errMsg) {
+ return extract(doc[field.name()], field, out, errMsg);
+}
+
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<bool>& field,
+ bool* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- if (elem.type() == Bool) {
- *out = elem.boolean();
- return FIELD_SET;
- }
-
- _genFieldErrMsg(elem, field, "boolean", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extract( BSONObj doc,
- const BSONField<BSONArray>& field,
- BSONArray* out,
- string* errMsg ) {
- return extract( doc[field.name()], field, out, errMsg );
+ if (elem.type() == Bool) {
+ *out = elem.boolean();
+ return FIELD_SET;
}
-
- FieldParser::FieldState FieldParser::extract( BSONElement elem,
- const BSONField<BSONArray>& field,
- BSONArray* out,
- string* errMsg )
- {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
+ _genFieldErrMsg(elem, field, "boolean", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<BSONArray>& field,
+ BSONArray* out,
+ string* errMsg) {
+ return extract(doc[field.name()], field, out, errMsg);
+}
+
+
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<BSONArray>& field,
+ BSONArray* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- if (elem.type() == Array) {
- *out = BSONArray(elem.embeddedObject().getOwned());
- return FIELD_SET;
- }
-
- _genFieldErrMsg(elem, field, "array", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extract( BSONObj doc,
- const BSONField<BSONObj>& field,
- BSONObj* out,
- string* errMsg ) {
- return extract( doc[field.name()], field, out, errMsg );
+ if (elem.type() == Array) {
+ *out = BSONArray(elem.embeddedObject().getOwned());
+ return FIELD_SET;
}
-
- FieldParser::FieldState FieldParser::extract( BSONElement elem,
- const BSONField<BSONObj>& field,
- BSONObj* out,
- string* errMsg )
- {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault().getOwned();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
- }
-
- if (elem.type() == Object) {
- *out = elem.embeddedObject().getOwned();
- return FIELD_SET;
+ _genFieldErrMsg(elem, field, "array", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<BSONObj>& field,
+ BSONObj* out,
+ string* errMsg) {
+ return extract(doc[field.name()], field, out, errMsg);
+}
+
+
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<BSONObj>& field,
+ BSONObj* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault().getOwned();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- _genFieldErrMsg(elem, field, "object", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extract( BSONObj doc,
- const BSONField<Date_t>& field,
- Date_t* out,
- string* errMsg ) {
- return extract( doc[field.name()], field, out, errMsg );
+ if (elem.type() == Object) {
+ *out = elem.embeddedObject().getOwned();
+ return FIELD_SET;
}
-
- FieldParser::FieldState FieldParser::extract( BSONElement elem,
- const BSONField<Date_t>& field,
- Date_t* out,
- string* errMsg )
- {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
- }
-
- if (elem.type() == Date) {
- *out = elem.date();
- return FIELD_SET;
+ _genFieldErrMsg(elem, field, "object", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<Date_t>& field,
+ Date_t* out,
+ string* errMsg) {
+ return extract(doc[field.name()], field, out, errMsg);
+}
+
+
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<Date_t>& field,
+ Date_t* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- _genFieldErrMsg(elem, field, "date", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extract( BSONObj doc,
- const BSONField<Timestamp>& field,
- Timestamp* out,
- string* errMsg ) {
- return extract( doc[field.name()], field, out, errMsg );
+ if (elem.type() == Date) {
+ *out = elem.date();
+ return FIELD_SET;
}
-
- FieldParser::FieldState FieldParser::extract( BSONElement elem,
- const BSONField<Timestamp>& field,
- Timestamp* out,
- string* errMsg )
- {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
+ _genFieldErrMsg(elem, field, "date", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<Timestamp>& field,
+ Timestamp* out,
+ string* errMsg) {
+ return extract(doc[field.name()], field, out, errMsg);
+}
+
+
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<Timestamp>& field,
+ Timestamp* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- if (elem.type() == bsonTimestamp) {
- *out = elem.timestamp();
- return FIELD_SET;
- }
-
- _genFieldErrMsg(elem, field, "timestamp", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extract( BSONObj doc,
- const BSONField<string>& field,
- string* out,
- string* errMsg ) {
- return extract( doc[field.name()], field, out, errMsg );
+ if (elem.type() == bsonTimestamp) {
+ *out = elem.timestamp();
+ return FIELD_SET;
}
- FieldParser::FieldState FieldParser::extract( BSONElement elem,
- const BSONField<string>& field,
- string* out,
- string* errMsg )
- {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
- }
-
- if (elem.type() == String) {
- // Extract everything, including embedded null characters.
- *out = string(elem.valuestr(), elem.valuestrsize() - 1);
- return FIELD_SET;
+ _genFieldErrMsg(elem, field, "timestamp", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<string>& field,
+ string* out,
+ string* errMsg) {
+ return extract(doc[field.name()], field, out, errMsg);
+}
+
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<string>& field,
+ string* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- _genFieldErrMsg(elem, field, "string", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extract( BSONObj doc,
- const BSONField<OID>& field,
- OID* out,
- string* errMsg ) {
- return extract( doc[field.name()], field, out, errMsg );
+ if (elem.type() == String) {
+ // Extract everything, including embedded null characters.
+ *out = string(elem.valuestr(), elem.valuestrsize() - 1);
+ return FIELD_SET;
}
-
- FieldParser::FieldState FieldParser::extract( BSONElement elem,
- const BSONField<OID>& field,
- OID* out,
- string* errMsg )
- {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
+ _genFieldErrMsg(elem, field, "string", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<OID>& field,
+ OID* out,
+ string* errMsg) {
+ return extract(doc[field.name()], field, out, errMsg);
+}
+
+
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<OID>& field,
+ OID* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- if (elem.type() == jstOID) {
- *out = elem.__oid();
- return FIELD_SET;
- }
-
- _genFieldErrMsg(elem, field, "OID", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extract( BSONObj doc,
- const BSONField<int>& field,
- int* out,
- string* errMsg ) {
- return extract( doc[field.name()], field, out, errMsg );
+ if (elem.type() == jstOID) {
+ *out = elem.__oid();
+ return FIELD_SET;
}
- FieldParser::FieldState FieldParser::extract( BSONElement elem,
- const BSONField<int>& field,
- int* out,
- string* errMsg )
- {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
+ _genFieldErrMsg(elem, field, "OID", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<int>& field,
+ int* out,
+ string* errMsg) {
+ return extract(doc[field.name()], field, out, errMsg);
+}
+
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<int>& field,
+ int* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- if (elem.type() == NumberInt) {
- *out = elem.numberInt();
- return FIELD_SET;
- }
-
- _genFieldErrMsg(elem, field, "integer", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extractNumber( BSONObj doc,
- const BSONField<int>& field,
- int* out,
- string* errMsg ) {
- return extractNumber( doc[field.name()], field, out, errMsg );
+ if (elem.type() == NumberInt) {
+ *out = elem.numberInt();
+ return FIELD_SET;
}
- FieldParser::FieldState FieldParser::extractNumber( BSONElement elem,
- const BSONField<int>& field,
- int* out,
- string* errMsg ) {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
+ _genFieldErrMsg(elem, field, "integer", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extractNumber(BSONObj doc,
+ const BSONField<int>& field,
+ int* out,
+ string* errMsg) {
+ return extractNumber(doc[field.name()], field, out, errMsg);
+}
+
+FieldParser::FieldState FieldParser::extractNumber(BSONElement elem,
+ const BSONField<int>& field,
+ int* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- if (elem.isNumber()) {
- *out = elem.numberInt();
- return FIELD_SET;
- }
-
- _genFieldErrMsg(elem, field, "number", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extract( BSONObj doc,
- const BSONField<long long>& field,
- long long* out,
- string* errMsg ) {
- return extract( doc[field.name()], field, out, errMsg );
+ if (elem.isNumber()) {
+ *out = elem.numberInt();
+ return FIELD_SET;
}
- FieldParser::FieldState FieldParser::extract( BSONElement elem,
- const BSONField<long long>& field,
- long long* out,
- string* errMsg )
- {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
- }
-
- if (elem.type() == NumberLong) {
- *out = elem.numberLong();
- return FIELD_SET;
+ _genFieldErrMsg(elem, field, "number", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<long long>& field,
+ long long* out,
+ string* errMsg) {
+ return extract(doc[field.name()], field, out, errMsg);
+}
+
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<long long>& field,
+ long long* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- _genFieldErrMsg(elem, field, "long", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extractNumber( BSONObj doc,
- const BSONField<long long>& field,
- long long* out,
- string* errMsg ) {
- return extractNumber( doc[field.name()], field, out, errMsg );
+ if (elem.type() == NumberLong) {
+ *out = elem.numberLong();
+ return FIELD_SET;
}
- FieldParser::FieldState FieldParser::extractNumber( BSONElement elem,
- const BSONField<long long>& field,
- long long* out,
- string* errMsg ) {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
+ _genFieldErrMsg(elem, field, "long", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extractNumber(BSONObj doc,
+ const BSONField<long long>& field,
+ long long* out,
+ string* errMsg) {
+ return extractNumber(doc[field.name()], field, out, errMsg);
+}
+
+FieldParser::FieldState FieldParser::extractNumber(BSONElement elem,
+ const BSONField<long long>& field,
+ long long* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- if (elem.isNumber()) {
- *out = elem.numberLong();
- return FIELD_SET;
- }
-
- _genFieldErrMsg(elem, field, "number", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extract( BSONObj doc,
- const BSONField<double>& field,
- double* out,
- string* errMsg ) {
- return extract( doc[field.name()], field, out, errMsg );
+ if (elem.isNumber()) {
+ *out = elem.numberLong();
+ return FIELD_SET;
}
- FieldParser::FieldState FieldParser::extract( BSONElement elem,
- const BSONField<double>& field,
- double* out,
- string* errMsg )
- {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
- }
-
- if (elem.type() == NumberDouble) {
- *out = elem.numberDouble();
- return FIELD_SET;
+ _genFieldErrMsg(elem, field, "number", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extract(BSONObj doc,
+ const BSONField<double>& field,
+ double* out,
+ string* errMsg) {
+ return extract(doc[field.name()], field, out, errMsg);
+}
+
+FieldParser::FieldState FieldParser::extract(BSONElement elem,
+ const BSONField<double>& field,
+ double* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- _genFieldErrMsg(elem, field, "double", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extractNumber( BSONObj doc,
- const BSONField<double>& field,
- double* out,
- string* errMsg ) {
- return extractNumber( doc[field.name()], field, out, errMsg );
+ if (elem.type() == NumberDouble) {
+ *out = elem.numberDouble();
+ return FIELD_SET;
}
- FieldParser::FieldState FieldParser::extractNumber( BSONElement elem,
- const BSONField<double>& field,
- double* out,
- string* errMsg ) {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault();
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
- }
-
- if (elem.isNumber()) {
- *out = elem.numberDouble();
- return FIELD_SET;
+ _genFieldErrMsg(elem, field, "double", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extractNumber(BSONObj doc,
+ const BSONField<double>& field,
+ double* out,
+ string* errMsg) {
+ return extractNumber(doc[field.name()], field, out, errMsg);
+}
+
+FieldParser::FieldState FieldParser::extractNumber(BSONElement elem,
+ const BSONField<double>& field,
+ double* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault();
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
-
- _genFieldErrMsg(elem, field, "number", errMsg);
- return FIELD_INVALID;
}
- FieldParser::FieldState FieldParser::extractID( BSONObj doc,
- const BSONField<BSONObj>& field,
- BSONObj* out,
- string* errMsg ) {
- return extractID( doc[field.name()], field, out, errMsg );
+ if (elem.isNumber()) {
+ *out = elem.numberDouble();
+ return FIELD_SET;
}
- FieldParser::FieldState FieldParser::extractID( BSONElement elem,
- const BSONField<BSONObj>& field,
- BSONObj* out,
- string* errMsg ) {
- if (elem.eoo()) {
- if (field.hasDefault()) {
- *out = field.getDefault().firstElement().wrap( "" );
- return FIELD_DEFAULT;
- }
- else {
- return FIELD_NONE;
- }
- }
-
- if ( elem.type() != Array ) {
- *out = elem.wrap( "" ).getOwned();
- return FIELD_SET;
+ _genFieldErrMsg(elem, field, "number", errMsg);
+ return FIELD_INVALID;
+}
+
+FieldParser::FieldState FieldParser::extractID(BSONObj doc,
+ const BSONField<BSONObj>& field,
+ BSONObj* out,
+ string* errMsg) {
+ return extractID(doc[field.name()], field, out, errMsg);
+}
+
+FieldParser::FieldState FieldParser::extractID(BSONElement elem,
+ const BSONField<BSONObj>& field,
+ BSONObj* out,
+ string* errMsg) {
+ if (elem.eoo()) {
+ if (field.hasDefault()) {
+ *out = field.getDefault().firstElement().wrap("");
+ return FIELD_DEFAULT;
+ } else {
+ return FIELD_NONE;
}
+ }
- _genFieldErrMsg(elem, field, "id", errMsg);
- return FIELD_INVALID;
+ if (elem.type() != Array) {
+ *out = elem.wrap("").getOwned();
+ return FIELD_SET;
}
-} // namespace mongo
+ _genFieldErrMsg(elem, field, "id", errMsg);
+ return FIELD_INVALID;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/field_parser.h b/src/mongo/db/field_parser.h
index 2b8da3eaa0a..01487d8c894 100644
--- a/src/mongo/db/field_parser.h
+++ b/src/mongo/db/field_parser.h
@@ -36,310 +36,309 @@
namespace mongo {
- class FieldParser {
- public:
- /**
- * Returns true and fills in 'out' with the contents of the field described by 'field'
- * or with the value in 'def', depending on whether the field is present and has the
- * correct type in 'doc' or not, respectively. Otherwise, if the field exists but has
- * the wrong type, returns false.
- *
- * NOTE ON BSON OWNERSHIP:
- *
- * The caller must assume that this class will point to data inside 'doc' without
- * copying it. In practice this means that 'doc' MUST EXIST for as long as 'out'
- * stays in scope.
- */
-
- enum FieldState {
- // The field is present but has the wrong type
- FIELD_INVALID = 0,
-
- // The field is present and has the correct type
- FIELD_SET,
-
- // The field is absent in the BSON object but set from default
- FIELD_DEFAULT,
-
- // The field is absent and no default was specified
- FIELD_NONE
- };
-
- static FieldState extract( BSONObj doc,
- const BSONField<bool>& field,
- bool* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONElement elem,
- const BSONField<bool>& field,
- bool* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONObj doc,
- const BSONField<BSONArray>& field,
- BSONArray* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONElement elem,
- const BSONField<BSONArray>& field,
- BSONArray* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONObj doc,
- const BSONField<BSONObj>& field,
- BSONObj* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONElement elem,
- const BSONField<BSONObj>& field,
- BSONObj* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONObj doc,
- const BSONField<Date_t>& field,
- Date_t* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONElement elem,
- const BSONField<Date_t>& field,
- Date_t* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONObj doc,
- const BSONField<Timestamp>& field,
- Timestamp* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONElement elem,
- const BSONField<Timestamp>& field,
- Timestamp* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONObj doc,
- const BSONField<std::string>& field,
- std::string* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONElement elem,
- const BSONField<std::string>& field,
- std::string* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONObj doc,
- const BSONField<OID>& field,
- OID* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONElement elem,
- const BSONField<OID>& field,
- OID* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONObj doc,
- const BSONField<int>& field,
- int* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONElement elem,
- const BSONField<int>& field,
- int* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONObj doc,
- const BSONField<long long>& field,
- long long* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONElement elem,
- const BSONField<long long>& field,
- long long* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONElement elem,
- const BSONField<double>& field,
- double* out,
- std::string* errMsg = NULL );
-
- static FieldState extract( BSONObj doc,
- const BSONField<double>& field,
- double* out,
- std::string* errMsg = NULL );
-
- /**
- * The following extractNumber methods do implicit conversion between any numeric type and
- * the BSONField type. This can be useful when an exact numeric type is not needed, for
- * example if the field is sometimes modified from the shell which can change the type.
- */
- static FieldState extractNumber( BSONObj doc,
- const BSONField<int>& field,
- int* out,
- std::string* errMsg = NULL );
-
- static FieldState extractNumber( BSONElement elem,
- const BSONField<int>& field,
- int* out,
- std::string* errMsg = NULL );
-
- static FieldState extractNumber( BSONObj doc,
- const BSONField<long long>& field,
- long long* out,
- std::string* errMsg = NULL );
-
- static FieldState extractNumber( BSONElement elem,
- const BSONField<long long>& field,
- long long* out,
- std::string* errMsg = NULL );
-
- static FieldState extractNumber( BSONObj doc,
- const BSONField<double>& field,
- double* out,
- std::string* errMsg = NULL );
-
- static FieldState extractNumber( BSONElement elem,
- const BSONField<double>& field,
- double* out,
- std::string* errMsg = NULL );
-
- /**
- * Extracts a document id from a particular field name, which may be of any type but Array.
- * Wraps the extracted id value in a BSONObj with one element and empty field name.
- */
- static FieldState extractID( BSONObj doc,
- const BSONField<BSONObj>& field,
- BSONObj* out,
- std::string* errMsg = NULL );
-
- static FieldState extractID( BSONElement elem,
- const BSONField<BSONObj>& field,
- BSONObj* out,
- std::string* errMsg = NULL );
-
- // TODO: BSONElement extraction of types below
-
- /**
- * Extracts a mandatory BSONSerializable structure 'field' from the object 'doc'. Write
- * the extracted contents to '*out' if successful or fills '*errMsg', if exising,
- * otherwise. This variant relies on T having a parseBSON, which all
- * BSONSerializable's have.
- *
- * TODO: Tighten for BSONSerializable's only
- */
- template<typename T>
- static FieldState extract(BSONObj doc,
- const BSONField<T>& field,
- T* out,
- std::string* errMsg = NULL);
-
- template<typename T>
- static FieldState extract(BSONObj doc,
- const BSONField<T*>& field,
- T** out,
- std::string* errMsg = NULL);
-
- /**
- * Similar to the mandatory 'extract' but on a optional field. '*out' would only be
- * allocated if the field is present. The ownership of '*out' would be transferred to
- * the caller, in that case.
- *
- * TODO: Tighten for BSONSerializable's only
- */
- template<typename T>
- static FieldState extract(BSONObj doc,
- const BSONField<T>& field,
- T** out, // alloc variation
- std::string* errMsg = NULL);
-
- /**
- * Extracts a mandatory repetition of BSONSerializable structures, 'field', from the
- * object 'doc'. Write the extracted contents to '*out' if successful or fills
- * '*errMsg', if exising, otherwise. This variant relies on T having a parseBSON,
- * which all BSONSerializable's have.
- *
- * The vector owns the instances of T.
- *
- * TODO: Tighten for BSONSerializable's only
- */
- template<typename T>
- static FieldState extract(BSONObj doc,
- const BSONField<std::vector<T*> >& field,
- std::vector<T*>* out,
- std::string* errMsg = NULL);
-
- /**
- * Extracts a mandatory repetition of BSONSerializable structures, 'field', from the
- * field 'elem'. Write the extracted contents to '*out' if successful or fills
- * '*errMsg', if exising, otherwise. This variant relies on T having a parseBSON,
- * which all BSONSerializable's have.
- *
- * The vector owns the instances of T.
- *
- * TODO: Tighten for BSONSerializable's only
- */
- template<typename T>
- static FieldState extract(BSONElement elem,
- const BSONField<std::vector<T*> >& field,
- std::vector<T*>* out,
- std::string* errMsg = NULL);
-
- /**
- * Similar to the mandatory repetition' extract but on an optional field. '*out' would
- * only be allocated if the field is present. The ownership of '*out' would be
- * transferred to the caller, in that case.
- *
- * The vector owns the instances of T.
- *
- * TODO: Tighten for BSONSerializable's only
- */
- template<typename T>
- static FieldState extract(BSONObj doc,
- const BSONField<std::vector<T*> >& field,
- std::vector<T*>** out,
- std::string* errMsg = NULL);
-
- //
- // ==================== Below DEPRECATED; use types instead ====================
- //
-
- /**
- * The following extract methods are templatized to handle extraction of vectors and
- * maps of sub-objects. Keys in the map should be StringData compatible.
- *
- * It's possible to nest extraction of vectors and maps to any depth, i.e:
- *
- * std::vector<map<std::string,vector<std::string> > > val;
- * FieldParser::extract(doc, field, val, &val);
- */
- template<typename T>
- static FieldState extract( BSONObj doc,
- const BSONField<std::vector<T> >& field,
- std::vector<T>* out,
- std::string* errMsg = NULL );
-
- template<typename T>
- static FieldState extract( BSONElement elem,
- const BSONField<std::vector<T> >& field,
- std::vector<T>* out,
- std::string* errMsg = NULL );
-
- template<typename K, typename T>
- static FieldState extract( BSONObj doc,
- const BSONField<std::map<K, T> >& field,
- std::map<K, T>* out,
- std::string* errMsg = NULL );
-
- template<typename K, typename T>
- static FieldState extract( BSONElement elem,
- const BSONField<std::map<K, T> >& field,
- std::map<K, T>* out,
- std::string* errMsg = NULL );
-
- private:
- template<typename T>
- static void clearOwnedVector(std::vector<T*>* vec);
+class FieldParser {
+public:
+ /**
+ * Returns true and fills in 'out' with the contents of the field described by 'field'
+ * or with the value in 'def', depending on whether the field is present and has the
+ * correct type in 'doc' or not, respectively. Otherwise, if the field exists but has
+ * the wrong type, returns false.
+ *
+ * NOTE ON BSON OWNERSHIP:
+ *
+ * The caller must assume that this class will point to data inside 'doc' without
+ * copying it. In practice this means that 'doc' MUST EXIST for as long as 'out'
+ * stays in scope.
+ */
+
+ enum FieldState {
+ // The field is present but has the wrong type
+ FIELD_INVALID = 0,
+
+ // The field is present and has the correct type
+ FIELD_SET,
+
+ // The field is absent in the BSON object but set from default
+ FIELD_DEFAULT,
+
+ // The field is absent and no default was specified
+ FIELD_NONE
};
-} // namespace mongo
+ static FieldState extract(BSONObj doc,
+ const BSONField<bool>& field,
+ bool* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONElement elem,
+ const BSONField<bool>& field,
+ bool* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONObj doc,
+ const BSONField<BSONArray>& field,
+ BSONArray* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONElement elem,
+ const BSONField<BSONArray>& field,
+ BSONArray* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONObj doc,
+ const BSONField<BSONObj>& field,
+ BSONObj* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONElement elem,
+ const BSONField<BSONObj>& field,
+ BSONObj* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONObj doc,
+ const BSONField<Date_t>& field,
+ Date_t* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONElement elem,
+ const BSONField<Date_t>& field,
+ Date_t* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONObj doc,
+ const BSONField<Timestamp>& field,
+ Timestamp* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONElement elem,
+ const BSONField<Timestamp>& field,
+ Timestamp* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONObj doc,
+ const BSONField<std::string>& field,
+ std::string* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONElement elem,
+ const BSONField<std::string>& field,
+ std::string* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONObj doc,
+ const BSONField<OID>& field,
+ OID* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONElement elem,
+ const BSONField<OID>& field,
+ OID* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONObj doc,
+ const BSONField<int>& field,
+ int* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONElement elem,
+ const BSONField<int>& field,
+ int* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONObj doc,
+ const BSONField<long long>& field,
+ long long* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONElement elem,
+ const BSONField<long long>& field,
+ long long* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONElement elem,
+ const BSONField<double>& field,
+ double* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extract(BSONObj doc,
+ const BSONField<double>& field,
+ double* out,
+ std::string* errMsg = NULL);
+
+ /**
+ * The following extractNumber methods do implicit conversion between any numeric type and
+ * the BSONField type. This can be useful when an exact numeric type is not needed, for
+ * example if the field is sometimes modified from the shell which can change the type.
+ */
+ static FieldState extractNumber(BSONObj doc,
+ const BSONField<int>& field,
+ int* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extractNumber(BSONElement elem,
+ const BSONField<int>& field,
+ int* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extractNumber(BSONObj doc,
+ const BSONField<long long>& field,
+ long long* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extractNumber(BSONElement elem,
+ const BSONField<long long>& field,
+ long long* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extractNumber(BSONObj doc,
+ const BSONField<double>& field,
+ double* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extractNumber(BSONElement elem,
+ const BSONField<double>& field,
+ double* out,
+ std::string* errMsg = NULL);
+
+ /**
+ * Extracts a document id from a particular field name, which may be of any type but Array.
+ * Wraps the extracted id value in a BSONObj with one element and empty field name.
+ */
+ static FieldState extractID(BSONObj doc,
+ const BSONField<BSONObj>& field,
+ BSONObj* out,
+ std::string* errMsg = NULL);
+
+ static FieldState extractID(BSONElement elem,
+ const BSONField<BSONObj>& field,
+ BSONObj* out,
+ std::string* errMsg = NULL);
+
+ // TODO: BSONElement extraction of types below
+
+ /**
+ * Extracts a mandatory BSONSerializable structure 'field' from the object 'doc'. Write
+ * the extracted contents to '*out' if successful or fills '*errMsg', if exising,
+ * otherwise. This variant relies on T having a parseBSON, which all
+ * BSONSerializable's have.
+ *
+ * TODO: Tighten for BSONSerializable's only
+ */
+ template <typename T>
+ static FieldState extract(BSONObj doc,
+ const BSONField<T>& field,
+ T* out,
+ std::string* errMsg = NULL);
+
+ template <typename T>
+ static FieldState extract(BSONObj doc,
+ const BSONField<T*>& field,
+ T** out,
+ std::string* errMsg = NULL);
+
+ /**
+ * Similar to the mandatory 'extract' but on a optional field. '*out' would only be
+ * allocated if the field is present. The ownership of '*out' would be transferred to
+ * the caller, in that case.
+ *
+ * TODO: Tighten for BSONSerializable's only
+ */
+ template <typename T>
+ static FieldState extract(BSONObj doc,
+ const BSONField<T>& field,
+ T** out, // alloc variation
+ std::string* errMsg = NULL);
+
+ /**
+ * Extracts a mandatory repetition of BSONSerializable structures, 'field', from the
+ * object 'doc'. Write the extracted contents to '*out' if successful or fills
+ * '*errMsg', if exising, otherwise. This variant relies on T having a parseBSON,
+ * which all BSONSerializable's have.
+ *
+ * The vector owns the instances of T.
+ *
+ * TODO: Tighten for BSONSerializable's only
+ */
+ template <typename T>
+ static FieldState extract(BSONObj doc,
+ const BSONField<std::vector<T*>>& field,
+ std::vector<T*>* out,
+ std::string* errMsg = NULL);
+
+ /**
+ * Extracts a mandatory repetition of BSONSerializable structures, 'field', from the
+ * field 'elem'. Write the extracted contents to '*out' if successful or fills
+ * '*errMsg', if exising, otherwise. This variant relies on T having a parseBSON,
+ * which all BSONSerializable's have.
+ *
+ * The vector owns the instances of T.
+ *
+ * TODO: Tighten for BSONSerializable's only
+ */
+ template <typename T>
+ static FieldState extract(BSONElement elem,
+ const BSONField<std::vector<T*>>& field,
+ std::vector<T*>* out,
+ std::string* errMsg = NULL);
+
+ /**
+ * Similar to the mandatory repetition' extract but on an optional field. '*out' would
+ * only be allocated if the field is present. The ownership of '*out' would be
+ * transferred to the caller, in that case.
+ *
+ * The vector owns the instances of T.
+ *
+ * TODO: Tighten for BSONSerializable's only
+ */
+ template <typename T>
+ static FieldState extract(BSONObj doc,
+ const BSONField<std::vector<T*>>& field,
+ std::vector<T*>** out,
+ std::string* errMsg = NULL);
+
+ //
+ // ==================== Below DEPRECATED; use types instead ====================
+ //
+
+ /**
+ * The following extract methods are templatized to handle extraction of vectors and
+ * maps of sub-objects. Keys in the map should be StringData compatible.
+ *
+ * It's possible to nest extraction of vectors and maps to any depth, i.e:
+ *
+ * std::vector<map<std::string,vector<std::string> > > val;
+ * FieldParser::extract(doc, field, val, &val);
+ */
+ template <typename T>
+ static FieldState extract(BSONObj doc,
+ const BSONField<std::vector<T>>& field,
+ std::vector<T>* out,
+ std::string* errMsg = NULL);
+
+ template <typename T>
+ static FieldState extract(BSONElement elem,
+ const BSONField<std::vector<T>>& field,
+ std::vector<T>* out,
+ std::string* errMsg = NULL);
+
+ template <typename K, typename T>
+ static FieldState extract(BSONObj doc,
+ const BSONField<std::map<K, T>>& field,
+ std::map<K, T>* out,
+ std::string* errMsg = NULL);
+
+ template <typename K, typename T>
+ static FieldState extract(BSONElement elem,
+ const BSONField<std::map<K, T>>& field,
+ std::map<K, T>* out,
+ std::string* errMsg = NULL);
+
+private:
+ template <typename T>
+ static void clearOwnedVector(std::vector<T*>* vec);
+};
+
+} // namespace mongo
// Inline functions for templating
#include "field_parser-inl.h"
-
diff --git a/src/mongo/db/field_parser_test.cpp b/src/mongo/db/field_parser_test.cpp
index 184d883a2b4..2e9027362c8 100644
--- a/src/mongo/db/field_parser_test.cpp
+++ b/src/mongo/db/field_parser_test.cpp
@@ -37,407 +37,420 @@
namespace {
- using mongo::BSONArray;
- using mongo::BSONField;
- using mongo::BSONObj;
- using mongo::BSONObjBuilder;
- using mongo::Date_t;
- using mongo::FieldParser;
- using mongo::OID;
- using std::string;
- using std::vector;
- using std::map;
-
- class ExtractionFixture: public mongo::unittest::Test {
- protected:
- BSONObj doc;
-
- bool valBool;
- BSONArray valArray;
- BSONObj valObj;
- Date_t valDate;
- string valString;
- OID valOID;
- long long valLong;
-
- static BSONField<bool> aBool;
- static BSONField<BSONArray> anArray;
- static BSONField<BSONObj> anObj;
- static BSONField<Date_t> aDate;
- static BSONField<string> aString;
- static BSONField<OID> anOID;
- static BSONField<long long> aLong;
-
- void setUp() {
- valBool = true;
- valArray = BSON_ARRAY(1 << 2 << 3);
- valObj = BSON("a" << 1);
- valDate = Date_t::fromMillisSinceEpoch(1);
- valString = "a string";
- valOID = OID::gen();
- valLong = 1LL;
-
- doc = BSON(aBool(valBool) <<
- anArray(valArray) <<
- anObj(valObj) <<
- aDate(valDate) <<
- aString(valString) <<
- anOID(valOID) <<
- aLong(valLong));
- }
-
- void tearDown() {
- }
- };
-
- BSONField<bool> ExtractionFixture::aBool("aBool");
- BSONField<BSONArray> ExtractionFixture::anArray("anArray");
- BSONField<BSONObj> ExtractionFixture::anObj("anObj");
- BSONField<Date_t> ExtractionFixture::aDate("aDate");
- BSONField<string> ExtractionFixture::aString("aString");
- BSONField<OID> ExtractionFixture::anOID("anOID");
- BSONField<long long> ExtractionFixture::aLong("aLong");
-
- TEST_F(ExtractionFixture, GetBool) {
- BSONField<bool> notThere("otherBool", true);
- BSONField<bool> wrongType(anObj.name());
- bool val;
- ASSERT_TRUE(FieldParser::extract(doc, aBool, &val));
- ASSERT_EQUALS(val, valBool);
- ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
- ASSERT_EQUALS(val, true);
- ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
+using mongo::BSONArray;
+using mongo::BSONField;
+using mongo::BSONObj;
+using mongo::BSONObjBuilder;
+using mongo::Date_t;
+using mongo::FieldParser;
+using mongo::OID;
+using std::string;
+using std::vector;
+using std::map;
+
+class ExtractionFixture : public mongo::unittest::Test {
+protected:
+ BSONObj doc;
+
+ bool valBool;
+ BSONArray valArray;
+ BSONObj valObj;
+ Date_t valDate;
+ string valString;
+ OID valOID;
+ long long valLong;
+
+ static BSONField<bool> aBool;
+ static BSONField<BSONArray> anArray;
+ static BSONField<BSONObj> anObj;
+ static BSONField<Date_t> aDate;
+ static BSONField<string> aString;
+ static BSONField<OID> anOID;
+ static BSONField<long long> aLong;
+
+ void setUp() {
+ valBool = true;
+ valArray = BSON_ARRAY(1 << 2 << 3);
+ valObj = BSON("a" << 1);
+ valDate = Date_t::fromMillisSinceEpoch(1);
+ valString = "a string";
+ valOID = OID::gen();
+ valLong = 1LL;
+
+ doc = BSON(aBool(valBool) << anArray(valArray) << anObj(valObj) << aDate(valDate)
+ << aString(valString) << anOID(valOID) << aLong(valLong));
}
- TEST_F(ExtractionFixture, GetBSONArray) {
- BSONField<BSONArray> notThere("otherArray", BSON_ARRAY("a" << "b"));
- BSONField<BSONArray> wrongType(aString.name());
- BSONArray val;
- ASSERT_TRUE(FieldParser::extract(doc, anArray, &val));
- ASSERT_EQUALS(val, valArray);
- ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
- ASSERT_EQUALS(val, BSON_ARRAY("a" << "b"));
- ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
- }
-
- TEST_F(ExtractionFixture, GetBSONObj) {
- BSONField<BSONObj> notThere("otherObj", BSON("b" << 1));
- BSONField<BSONObj> wrongType(aString.name());
- BSONObj val;
- ASSERT_TRUE(FieldParser::extract(doc, anObj, &val));
- ASSERT_EQUALS(val, valObj);
- ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
- ASSERT_EQUALS(val, BSON("b" << 1));
- ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
- }
-
- TEST_F(ExtractionFixture, GetDate) {
- BSONField<Date_t> notThere("otherDate", Date_t::fromMillisSinceEpoch(99));
- BSONField<Date_t> wrongType(aString.name());
- Date_t val;
- ASSERT_TRUE(FieldParser::extract(doc, aDate, &val));
- ASSERT_EQUALS(val, valDate);
- ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
- ASSERT_EQUALS(val, Date_t::fromMillisSinceEpoch(99));
- ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
- }
-
- TEST_F(ExtractionFixture, GetString) {
- BSONField<string> notThere("otherString", "abc");
- BSONField<string> wrongType(aBool.name());
- string val;
- ASSERT_TRUE(FieldParser::extract(doc, aString, &val));
- ASSERT_EQUALS(val, valString);
- ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
- ASSERT_EQUALS(val, "abc");
- ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
- }
-
- TEST_F(ExtractionFixture, GetOID) {
- OID defOID = OID::gen();
- BSONField<OID> notThere("otherOID", defOID);
- BSONField<OID> wrongType(aString.name());
- OID val;
- ASSERT_TRUE(FieldParser::extract(doc, anOID, &val));
- ASSERT_EQUALS(val, valOID);
- ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
- ASSERT_EQUALS(val, defOID);
- ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
- }
-
- TEST_F(ExtractionFixture, GetLong) {
- BSONField<long long> notThere("otherLong", 0);
- BSONField<long long> wrongType(aString.name());
- long long val;
- ASSERT_TRUE(FieldParser::extract(doc, aLong, &val));
- ASSERT_EQUALS(val, valLong);
- ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
- ASSERT_EQUALS(val, 0);
- ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
- }
-
- TEST_F(ExtractionFixture, IsFound) {
- bool bool_val;
- BSONField<bool> aBoolMissing("aBoolMissing");
- ASSERT_EQUALS(FieldParser::extract(doc, aBool, &bool_val, NULL),
- FieldParser::FIELD_SET);
- ASSERT_EQUALS(FieldParser::extract(doc, aBoolMissing, &bool_val, NULL),
- FieldParser::FIELD_NONE);
-
- Date_t Date_t_val;
- BSONField<Date_t> aDateMissing("aDateMissing");
- ASSERT_EQUALS(FieldParser::extract(doc, aDate, &Date_t_val, NULL),
- FieldParser::FIELD_SET);
- ASSERT_EQUALS(FieldParser::extract(doc, aDateMissing, &Date_t_val, NULL),
- FieldParser::FIELD_NONE);
-
- string string_val;
- BSONField<string> aStringMissing("aStringMissing");
- ASSERT_EQUALS(FieldParser::extract(doc, aString, &string_val, NULL),
- FieldParser::FIELD_SET);
- ASSERT_EQUALS(FieldParser::extract(doc, aStringMissing, &string_val, NULL),
- FieldParser::FIELD_NONE);
-
- OID OID_val;
- BSONField<OID> anOIDMissing("anOIDMissing");
- ASSERT_EQUALS(FieldParser::extract(doc, anOID, &OID_val, NULL),
- FieldParser::FIELD_SET);
- ASSERT_EQUALS(FieldParser::extract(doc, anOIDMissing, &OID_val, NULL),
- FieldParser::FIELD_NONE);
-
- long long long_long_val;
- BSONField<long long> aLongMissing("aLongMissing");
- ASSERT_EQUALS(FieldParser::extract(doc, aLong, &long_long_val, NULL),
- FieldParser::FIELD_SET);
- ASSERT_EQUALS(FieldParser::extract(doc, aLongMissing, &long_long_val, NULL),
- FieldParser::FIELD_NONE);
- }
-
- TEST(ComplexExtraction, GetStringVector) {
-
- // Test valid string vector extraction
- BSONField<vector<string> > vectorField("testVector");
-
+ void tearDown() {}
+};
+
+BSONField<bool> ExtractionFixture::aBool("aBool");
+BSONField<BSONArray> ExtractionFixture::anArray("anArray");
+BSONField<BSONObj> ExtractionFixture::anObj("anObj");
+BSONField<Date_t> ExtractionFixture::aDate("aDate");
+BSONField<string> ExtractionFixture::aString("aString");
+BSONField<OID> ExtractionFixture::anOID("anOID");
+BSONField<long long> ExtractionFixture::aLong("aLong");
+
+TEST_F(ExtractionFixture, GetBool) {
+ BSONField<bool> notThere("otherBool", true);
+ BSONField<bool> wrongType(anObj.name());
+ bool val;
+ ASSERT_TRUE(FieldParser::extract(doc, aBool, &val));
+ ASSERT_EQUALS(val, valBool);
+ ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
+ ASSERT_EQUALS(val, true);
+ ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
+}
+
+TEST_F(ExtractionFixture, GetBSONArray) {
+ BSONField<BSONArray> notThere("otherArray",
+ BSON_ARRAY("a"
+ << "b"));
+ BSONField<BSONArray> wrongType(aString.name());
+ BSONArray val;
+ ASSERT_TRUE(FieldParser::extract(doc, anArray, &val));
+ ASSERT_EQUALS(val, valArray);
+ ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
+ ASSERT_EQUALS(val,
+ BSON_ARRAY("a"
+ << "b"));
+ ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
+}
+
+TEST_F(ExtractionFixture, GetBSONObj) {
+ BSONField<BSONObj> notThere("otherObj", BSON("b" << 1));
+ BSONField<BSONObj> wrongType(aString.name());
+ BSONObj val;
+ ASSERT_TRUE(FieldParser::extract(doc, anObj, &val));
+ ASSERT_EQUALS(val, valObj);
+ ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
+ ASSERT_EQUALS(val, BSON("b" << 1));
+ ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
+}
+
+TEST_F(ExtractionFixture, GetDate) {
+ BSONField<Date_t> notThere("otherDate", Date_t::fromMillisSinceEpoch(99));
+ BSONField<Date_t> wrongType(aString.name());
+ Date_t val;
+ ASSERT_TRUE(FieldParser::extract(doc, aDate, &val));
+ ASSERT_EQUALS(val, valDate);
+ ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
+ ASSERT_EQUALS(val, Date_t::fromMillisSinceEpoch(99));
+ ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
+}
+
+TEST_F(ExtractionFixture, GetString) {
+ BSONField<string> notThere("otherString", "abc");
+ BSONField<string> wrongType(aBool.name());
+ string val;
+ ASSERT_TRUE(FieldParser::extract(doc, aString, &val));
+ ASSERT_EQUALS(val, valString);
+ ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
+ ASSERT_EQUALS(val, "abc");
+ ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
+}
+
+TEST_F(ExtractionFixture, GetOID) {
+ OID defOID = OID::gen();
+ BSONField<OID> notThere("otherOID", defOID);
+ BSONField<OID> wrongType(aString.name());
+ OID val;
+ ASSERT_TRUE(FieldParser::extract(doc, anOID, &val));
+ ASSERT_EQUALS(val, valOID);
+ ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
+ ASSERT_EQUALS(val, defOID);
+ ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
+}
+
+TEST_F(ExtractionFixture, GetLong) {
+ BSONField<long long> notThere("otherLong", 0);
+ BSONField<long long> wrongType(aString.name());
+ long long val;
+ ASSERT_TRUE(FieldParser::extract(doc, aLong, &val));
+ ASSERT_EQUALS(val, valLong);
+ ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
+ ASSERT_EQUALS(val, 0);
+ ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
+}
+
+TEST_F(ExtractionFixture, IsFound) {
+ bool bool_val;
+ BSONField<bool> aBoolMissing("aBoolMissing");
+ ASSERT_EQUALS(FieldParser::extract(doc, aBool, &bool_val, NULL), FieldParser::FIELD_SET);
+ ASSERT_EQUALS(FieldParser::extract(doc, aBoolMissing, &bool_val, NULL),
+ FieldParser::FIELD_NONE);
+
+ Date_t Date_t_val;
+ BSONField<Date_t> aDateMissing("aDateMissing");
+ ASSERT_EQUALS(FieldParser::extract(doc, aDate, &Date_t_val, NULL), FieldParser::FIELD_SET);
+ ASSERT_EQUALS(FieldParser::extract(doc, aDateMissing, &Date_t_val, NULL),
+ FieldParser::FIELD_NONE);
+
+ string string_val;
+ BSONField<string> aStringMissing("aStringMissing");
+ ASSERT_EQUALS(FieldParser::extract(doc, aString, &string_val, NULL), FieldParser::FIELD_SET);
+ ASSERT_EQUALS(FieldParser::extract(doc, aStringMissing, &string_val, NULL),
+ FieldParser::FIELD_NONE);
+
+ OID OID_val;
+ BSONField<OID> anOIDMissing("anOIDMissing");
+ ASSERT_EQUALS(FieldParser::extract(doc, anOID, &OID_val, NULL), FieldParser::FIELD_SET);
+ ASSERT_EQUALS(FieldParser::extract(doc, anOIDMissing, &OID_val, NULL), FieldParser::FIELD_NONE);
+
+ long long long_long_val;
+ BSONField<long long> aLongMissing("aLongMissing");
+ ASSERT_EQUALS(FieldParser::extract(doc, aLong, &long_long_val, NULL), FieldParser::FIELD_SET);
+ ASSERT_EQUALS(FieldParser::extract(doc, aLongMissing, &long_long_val, NULL),
+ FieldParser::FIELD_NONE);
+}
+
+TEST(ComplexExtraction, GetStringVector) {
+ // Test valid string vector extraction
+ BSONField<vector<string>> vectorField("testVector");
+
+ BSONObjBuilder bob;
+ bob << vectorField() << BSON_ARRAY("a"
+ << "b"
+ << "c");
+ BSONObj obj = bob.obj();
+
+ vector<string> parsedVector;
+
+ ASSERT(FieldParser::extract(obj, vectorField, &parsedVector));
+ ASSERT_EQUALS("a", parsedVector[0]);
+ ASSERT_EQUALS("b", parsedVector[1]);
+ ASSERT_EQUALS("c", parsedVector[2]);
+ ASSERT_EQUALS(parsedVector.size(), static_cast<size_t>(3));
+}
+
+TEST(ComplexExtraction, GetObjectVector) {
+ // Test valid BSONObj vector extraction
+ BSONField<vector<BSONObj>> vectorField("testVector");
+
+ BSONObjBuilder bob;
+ bob << vectorField() << BSON_ARRAY(BSON("a" << 1) << BSON("b" << 1) << BSON("c" << 1));
+ BSONObj obj = bob.obj();
+
+ vector<BSONObj> parsedVector;
+
+ ASSERT(FieldParser::extract(obj, vectorField, &parsedVector));
+ ASSERT_EQUALS(BSON("a" << 1), parsedVector[0]);
+ ASSERT_EQUALS(BSON("b" << 1), parsedVector[1]);
+ ASSERT_EQUALS(BSON("c" << 1), parsedVector[2]);
+ ASSERT_EQUALS(parsedVector.size(), static_cast<size_t>(3));
+}
+
+TEST(ComplexExtraction, GetBadVector) {
+ // Test invalid vector extraction
+ BSONField<vector<BSONObj>> vectorField("testVector");
+
+ BSONObjBuilder bob;
+ bob << vectorField() << BSON_ARRAY(BSON("a" << 1) << "XXX" << BSON("c" << 1));
+ BSONObj obj = bob.obj();
+
+ vector<BSONObj> parsedVector;
+
+ string errMsg;
+ ASSERT(!FieldParser::extract(obj, vectorField, &parsedVector, &errMsg));
+ ASSERT_NOT_EQUALS(errMsg, "");
+}
+
+TEST(ComplexExtraction, RoundTripVector) {
+ // Test vector extraction after re-writing to BSON
+ BSONField<vector<string>> vectorField("testVector");
+
+ BSONObj obj;
+ {
BSONObjBuilder bob;
- bob << vectorField() << BSON_ARRAY("a" << "b" << "c");
- BSONObj obj = bob.obj();
-
- vector<string> parsedVector;
-
- ASSERT(FieldParser::extract(obj, vectorField, &parsedVector));
- ASSERT_EQUALS("a", parsedVector[0]);
- ASSERT_EQUALS("b", parsedVector[1]);
- ASSERT_EQUALS("c", parsedVector[2]);
- ASSERT_EQUALS(parsedVector.size(), static_cast<size_t>(3));
- }
-
- TEST(ComplexExtraction, GetObjectVector) {
-
- // Test valid BSONObj vector extraction
- BSONField<vector<BSONObj> > vectorField("testVector");
-
- BSONObjBuilder bob;
- bob << vectorField() << BSON_ARRAY(BSON("a" << 1) << BSON("b" << 1) << BSON("c" << 1));
- BSONObj obj = bob.obj();
-
- vector<BSONObj> parsedVector;
-
- ASSERT(FieldParser::extract(obj, vectorField, &parsedVector));
- ASSERT_EQUALS(BSON("a" << 1), parsedVector[0]);
- ASSERT_EQUALS(BSON("b" << 1), parsedVector[1]);
- ASSERT_EQUALS(BSON("c" << 1), parsedVector[2]);
- ASSERT_EQUALS(parsedVector.size(), static_cast<size_t>(3));
- }
-
- TEST(ComplexExtraction, GetBadVector) {
-
- // Test invalid vector extraction
- BSONField<vector<BSONObj> > vectorField("testVector");
-
- BSONObjBuilder bob;
- bob << vectorField() << BSON_ARRAY(BSON("a" << 1) << "XXX" << BSON("c" << 1));
- BSONObj obj = bob.obj();
-
- vector<BSONObj> parsedVector;
-
- string errMsg;
- ASSERT(!FieldParser::extract(obj, vectorField, &parsedVector, &errMsg));
- ASSERT_NOT_EQUALS(errMsg, "");
- }
-
- TEST(ComplexExtraction, RoundTripVector) {
-
- // Test vector extraction after re-writing to BSON
- BSONField<vector<string> > vectorField("testVector");
-
- BSONObj obj;
- {
- BSONObjBuilder bob;
- bob << vectorField() << BSON_ARRAY("a" << "b" << "c");
- obj = bob.obj();
- }
-
- vector<string> parsedVector;
- ASSERT(FieldParser::extract(obj, vectorField, &parsedVector));
-
- {
- BSONObjBuilder bob;
- bob.append(vectorField(), parsedVector);
- obj = bob.obj();
- }
-
- parsedVector.clear();
- ASSERT(FieldParser::extract(obj, vectorField, &parsedVector));
-
- ASSERT_EQUALS("a", parsedVector[0]);
- ASSERT_EQUALS("b", parsedVector[1]);
- ASSERT_EQUALS("c", parsedVector[2]);
- ASSERT_EQUALS(parsedVector.size(), static_cast<size_t>(3));
+ bob << vectorField() << BSON_ARRAY("a"
+ << "b"
+ << "c");
+ obj = bob.obj();
}
- TEST(ComplexExtraction, GetStringMap) {
-
- // Test valid string->string map extraction
- BSONField<map<string, string> > mapField("testMap");
+ vector<string> parsedVector;
+ ASSERT(FieldParser::extract(obj, vectorField, &parsedVector));
+ {
BSONObjBuilder bob;
- bob << mapField() << BSON("a" << "a" << "b" << "b" << "c" << "c");
- BSONObj obj = bob.obj();
-
- map<string, string> parsedMap;
-
- ASSERT(FieldParser::extract(obj, mapField, &parsedMap));
- ASSERT_EQUALS("a", parsedMap["a"]);
- ASSERT_EQUALS("b", parsedMap["b"]);
- ASSERT_EQUALS("c", parsedMap["c"]);
- ASSERT_EQUALS(parsedMap.size(), static_cast<size_t>(3));
+ bob.append(vectorField(), parsedVector);
+ obj = bob.obj();
}
- TEST(ComplexExtraction, GetObjectMap) {
-
- // Test valid string->BSONObj map extraction
- BSONField<map<string, BSONObj> > mapField("testMap");
-
+ parsedVector.clear();
+ ASSERT(FieldParser::extract(obj, vectorField, &parsedVector));
+
+ ASSERT_EQUALS("a", parsedVector[0]);
+ ASSERT_EQUALS("b", parsedVector[1]);
+ ASSERT_EQUALS("c", parsedVector[2]);
+ ASSERT_EQUALS(parsedVector.size(), static_cast<size_t>(3));
+}
+
+TEST(ComplexExtraction, GetStringMap) {
+ // Test valid string->string map extraction
+ BSONField<map<string, string>> mapField("testMap");
+
+ BSONObjBuilder bob;
+ bob << mapField() << BSON("a"
+ << "a"
+ << "b"
+ << "b"
+ << "c"
+ << "c");
+ BSONObj obj = bob.obj();
+
+ map<string, string> parsedMap;
+
+ ASSERT(FieldParser::extract(obj, mapField, &parsedMap));
+ ASSERT_EQUALS("a", parsedMap["a"]);
+ ASSERT_EQUALS("b", parsedMap["b"]);
+ ASSERT_EQUALS("c", parsedMap["c"]);
+ ASSERT_EQUALS(parsedMap.size(), static_cast<size_t>(3));
+}
+
+TEST(ComplexExtraction, GetObjectMap) {
+ // Test valid string->BSONObj map extraction
+ BSONField<map<string, BSONObj>> mapField("testMap");
+
+ BSONObjBuilder bob;
+ bob << mapField() << BSON("a" << BSON("a"
+ << "a") << "b" << BSON("b"
+ << "b") << "c" << BSON("c"
+ << "c"));
+ BSONObj obj = bob.obj();
+
+ map<string, BSONObj> parsedMap;
+
+ ASSERT(FieldParser::extract(obj, mapField, &parsedMap));
+ ASSERT_EQUALS(BSON("a"
+ << "a"),
+ parsedMap["a"]);
+ ASSERT_EQUALS(BSON("b"
+ << "b"),
+ parsedMap["b"]);
+ ASSERT_EQUALS(BSON("c"
+ << "c"),
+ parsedMap["c"]);
+ ASSERT_EQUALS(parsedMap.size(), static_cast<size_t>(3));
+}
+
+TEST(ComplexExtraction, GetBadMap) {
+ // Test invalid map extraction
+ BSONField<map<string, string>> mapField("testMap");
+
+ BSONObjBuilder bob;
+ bob << mapField() << BSON("a"
+ << "a"
+ << "b" << 123 << "c"
+ << "c");
+ BSONObj obj = bob.obj();
+
+ map<string, string> parsedMap;
+
+ string errMsg;
+ ASSERT(!FieldParser::extract(obj, mapField, &parsedMap, &errMsg));
+ ASSERT_NOT_EQUALS(errMsg, "");
+}
+
+TEST(ComplexExtraction, RoundTripMap) {
+ // Test map extraction after re-writing to BSON
+ BSONField<map<string, string>> mapField("testMap");
+
+ BSONObj obj;
+ {
BSONObjBuilder bob;
- bob << mapField() << BSON("a" << BSON("a" << "a") <<
- "b" << BSON("b" << "b") <<
- "c" << BSON("c" << "c"));
- BSONObj obj = bob.obj();
-
- map<string, BSONObj> parsedMap;
-
- ASSERT(FieldParser::extract(obj, mapField, &parsedMap));
- ASSERT_EQUALS(BSON("a" << "a"), parsedMap["a"]);
- ASSERT_EQUALS(BSON("b" << "b"), parsedMap["b"]);
- ASSERT_EQUALS(BSON("c" << "c"), parsedMap["c"]);
- ASSERT_EQUALS(parsedMap.size(), static_cast<size_t>(3));
+ bob << mapField() << BSON("a"
+ << "a"
+ << "b"
+ << "b"
+ << "c"
+ << "c");
+ obj = bob.obj();
}
- TEST(ComplexExtraction, GetBadMap) {
-
- // Test invalid map extraction
- BSONField<map<string, string> > mapField("testMap");
+ map<string, string> parsedMap;
+ ASSERT(FieldParser::extract(obj, mapField, &parsedMap));
+ {
BSONObjBuilder bob;
- bob << mapField() << BSON("a" << "a" << "b" << 123 << "c" << "c");
- BSONObj obj = bob.obj();
-
- map<string, string> parsedMap;
-
- string errMsg;
- ASSERT(!FieldParser::extract(obj, mapField, &parsedMap, &errMsg));
- ASSERT_NOT_EQUALS(errMsg, "");
+ bob.append(mapField(), parsedMap);
+ obj = bob.obj();
}
- TEST(ComplexExtraction, RoundTripMap) {
+ parsedMap.clear();
+ ASSERT(FieldParser::extract(obj, mapField, &parsedMap));
- // Test map extraction after re-writing to BSON
- BSONField<map<string, string> > mapField("testMap");
+ ASSERT_EQUALS("a", parsedMap["a"]);
+ ASSERT_EQUALS("b", parsedMap["b"]);
+ ASSERT_EQUALS("c", parsedMap["c"]);
+ ASSERT_EQUALS(parsedMap.size(), static_cast<size_t>(3));
+}
- BSONObj obj;
- {
- BSONObjBuilder bob;
- bob << mapField() << BSON("a" << "a" << "b" << "b" << "c" << "c");
- obj = bob.obj();
- }
+TEST(ComplexExtraction, GetNestedMap) {
+ // Test extraction of complex nested vector and map
+ BSONField<vector<map<string, string>>> nestedField("testNested");
- map<string, string> parsedMap;
- ASSERT(FieldParser::extract(obj, mapField, &parsedMap));
+ BSONObj nestedMapObj = BSON("a"
+ << "a"
+ << "b"
+ << "b"
+ << "c"
+ << "c");
- {
- BSONObjBuilder bob;
- bob.append(mapField(), parsedMap);
- obj = bob.obj();
- }
+ BSONObjBuilder bob;
+ bob << nestedField() << BSON_ARRAY(nestedMapObj << nestedMapObj << nestedMapObj);
+ BSONObj obj = bob.obj();
- parsedMap.clear();
- ASSERT(FieldParser::extract(obj, mapField, &parsedMap));
+ vector<map<string, string>> parsed;
+ ASSERT(FieldParser::extract(obj, nestedField, &parsed));
+ ASSERT_EQUALS(parsed.size(), static_cast<size_t>(3));
+ for (int i = 0; i < 3; i++) {
+ map<string, string>& parsedMap = parsed[i];
ASSERT_EQUALS("a", parsedMap["a"]);
ASSERT_EQUALS("b", parsedMap["b"]);
ASSERT_EQUALS("c", parsedMap["c"]);
ASSERT_EQUALS(parsedMap.size(), static_cast<size_t>(3));
}
+}
- TEST(ComplexExtraction, GetNestedMap) {
-
- // Test extraction of complex nested vector and map
- BSONField<vector<map<string, string> > > nestedField("testNested");
-
- BSONObj nestedMapObj = BSON("a" << "a" << "b" << "b" << "c" << "c");
-
- BSONObjBuilder bob;
- bob << nestedField() << BSON_ARRAY(nestedMapObj << nestedMapObj << nestedMapObj);
- BSONObj obj = bob.obj();
-
- vector<map<string, string> > parsed;
-
- ASSERT(FieldParser::extract(obj, nestedField, &parsed));
- ASSERT_EQUALS(parsed.size(), static_cast<size_t>(3));
- for (int i = 0; i < 3; i++) {
- map<string, string>& parsedMap = parsed[i];
- ASSERT_EQUALS("a", parsedMap["a"]);
- ASSERT_EQUALS("b", parsedMap["b"]);
- ASSERT_EQUALS("c", parsedMap["c"]);
- ASSERT_EQUALS(parsedMap.size(), static_cast<size_t>(3));
- }
- }
-
- TEST(ComplexExtraction, GetBadNestedMap) {
-
- // Test extraction of invalid complex nested vector and map
- BSONField<vector<map<string, string> > > nestedField("testNested");
+TEST(ComplexExtraction, GetBadNestedMap) {
+ // Test extraction of invalid complex nested vector and map
+ BSONField<vector<map<string, string>>> nestedField("testNested");
- BSONObj nestedMapObj = BSON("a" << "a" << "b" << 123 << "c" << "c");
+ BSONObj nestedMapObj = BSON("a"
+ << "a"
+ << "b" << 123 << "c"
+ << "c");
- BSONObjBuilder bob;
- bob << nestedField() << BSON_ARRAY(nestedMapObj << nestedMapObj << nestedMapObj);
- BSONObj obj = bob.obj();
-
- vector<map<string, string> > parsed;
+ BSONObjBuilder bob;
+ bob << nestedField() << BSON_ARRAY(nestedMapObj << nestedMapObj << nestedMapObj);
+ BSONObj obj = bob.obj();
- string errMsg;
- ASSERT(!FieldParser::extract(obj, nestedField, &parsed, &errMsg));
- ASSERT_NOT_EQUALS(errMsg, "");
- }
+ vector<map<string, string>> parsed;
- TEST(EdgeCases, EmbeddedNullStrings) {
+ string errMsg;
+ ASSERT(!FieldParser::extract(obj, nestedField, &parsed, &errMsg));
+ ASSERT_NOT_EQUALS(errMsg, "");
+}
- // Test extraction of string values with embedded nulls.
- BSONField<string> field("testStr");
+TEST(EdgeCases, EmbeddedNullStrings) {
+ // Test extraction of string values with embedded nulls.
+ BSONField<string> field("testStr");
- const char* str = "a\0c";
- const size_t strSize = 4;
- BSONObjBuilder doc;
- doc.append(field(), str, strSize);
- BSONObj obj(doc.obj());
+ const char* str = "a\0c";
+ const size_t strSize = 4;
+ BSONObjBuilder doc;
+ doc.append(field(), str, strSize);
+ BSONObj obj(doc.obj());
- string parsed;
- string errMsg;
- ASSERT(FieldParser::extract(obj, field, &parsed, &errMsg));
+ string parsed;
+ string errMsg;
+ ASSERT(FieldParser::extract(obj, field, &parsed, &errMsg));
- ASSERT_EQUALS(0, memcmp(parsed.data(), str, strSize));
- ASSERT_EQUALS(errMsg, "");
- }
+ ASSERT_EQUALS(0, memcmp(parsed.data(), str, strSize));
+ ASSERT_EQUALS(errMsg, "");
+}
-} // unnamed namespace
+} // unnamed namespace
diff --git a/src/mongo/db/field_ref.cpp b/src/mongo/db/field_ref.cpp
index 9881e42b75a..e3bc5305c0b 100644
--- a/src/mongo/db/field_ref.cpp
+++ b/src/mongo/db/field_ref.cpp
@@ -28,262 +28,256 @@
#include "mongo/db/field_ref.h"
-#include <algorithm> // for min
+#include <algorithm> // for min
#include "mongo/util/assert_util.h"
namespace mongo {
- FieldRef::FieldRef() : _size(0) {}
+FieldRef::FieldRef() : _size(0) {}
- FieldRef::FieldRef(StringData path) : _size(0) {
- parse(path);
- }
-
- void FieldRef::parse(StringData path) {
- if (path.size() == 0) {
- return;
- }
+FieldRef::FieldRef(StringData path) : _size(0) {
+ parse(path);
+}
- if (_size != 0) {
- clear();
- }
-
- // We guarantee that accesses through getPart() will be valid while 'this' is. So we
- // keep a copy in a local sting.
-
- _dotted = path.toString();
-
- // Separate the field parts using '.' as a delimiter.
- std::string::iterator beg = _dotted.begin();
- std::string::iterator cur = beg;
- const std::string::iterator end = _dotted.end();
- while (true) {
- if (cur != end && *cur != '.') {
- cur++;
- continue;
- }
-
- // If cur != beg then we advanced cur in the loop above, so we have a real sequence
- // of characters to add as a new part. Otherwise, we may be parsing something odd,
- // like "..", and we need to add an empty StringData piece to represent the "part"
- // in-between the dots. This also handles the case where 'beg' and 'cur' are both
- // at 'end', which can happen if we are parsing anything with a terminal "."
- // character. In that case, we still need to add an empty part, but we will break
- // out of the loop below since we will not execute the guarded 'continue' and will
- // instead reach the break statement.
-
- if (cur != beg)
- appendPart(StringData(&*beg, cur - beg));
- else
- appendPart(StringData());
-
- if (cur != end) {
- beg = ++cur;
- continue;
- }
+void FieldRef::parse(StringData path) {
+ if (path.size() == 0) {
+ return;
+ }
- break;
- }
+ if (_size != 0) {
+ clear();
}
- void FieldRef::setPart(size_t i, StringData part) {
- dassert(i < _size);
+ // We guarantee that accesses through getPart() will be valid while 'this' is. So we
+ // keep a copy in a local sting.
- if (_replacements.size() != _size) {
- _replacements.resize(_size);
- }
+ _dotted = path.toString();
- _replacements[i] = part.toString();
- if (i < kReserveAhead) {
- _fixed[i] = _replacements[i];
+ // Separate the field parts using '.' as a delimiter.
+ std::string::iterator beg = _dotted.begin();
+ std::string::iterator cur = beg;
+ const std::string::iterator end = _dotted.end();
+ while (true) {
+ if (cur != end && *cur != '.') {
+ cur++;
+ continue;
}
- else {
- _variable[getIndex(i)] = _replacements[i];
- }
- }
- size_t FieldRef::appendPart(StringData part) {
- if (_size < kReserveAhead) {
- _fixed[_size] = part;
- }
- else {
- _variable.push_back(part);
+ // If cur != beg then we advanced cur in the loop above, so we have a real sequence
+ // of characters to add as a new part. Otherwise, we may be parsing something odd,
+ // like "..", and we need to add an empty StringData piece to represent the "part"
+ // in-between the dots. This also handles the case where 'beg' and 'cur' are both
+ // at 'end', which can happen if we are parsing anything with a terminal "."
+ // character. In that case, we still need to add an empty part, but we will break
+ // out of the loop below since we will not execute the guarded 'continue' and will
+ // instead reach the break statement.
+
+ if (cur != beg)
+ appendPart(StringData(&*beg, cur - beg));
+ else
+ appendPart(StringData());
+
+ if (cur != end) {
+ beg = ++cur;
+ continue;
}
- return ++_size;
+
+ break;
}
+}
- void FieldRef::reserialize() const {
- std::string nextDotted;
- // Reserve some space in the string. We know we will have, at minimum, a character for
- // each component we are writing, and a dot for each component, less one. We don't want
- // to reserve more, since we don't want to forfeit the SSO if it is applicable.
- nextDotted.reserve((_size * 2) - 1);
-
- // Concatenate the fields to a new string
- for (size_t i = 0; i != _size; ++i) {
- if (i > 0)
- nextDotted.append(1, '.');
- const StringData part = getPart(i);
- nextDotted.append(part.rawData(), part.size());
- }
+void FieldRef::setPart(size_t i, StringData part) {
+ dassert(i < _size);
- // Make the new string our contents
- _dotted.swap(nextDotted);
-
- // Fixup the parts to refer to the new string
- std::string::const_iterator where = _dotted.begin();
- const std::string::const_iterator end = _dotted.end();
- for (size_t i = 0; i != _size; ++i) {
- StringData& part = (i < kReserveAhead) ? _fixed[i] : _variable[getIndex(i)];
- const size_t size = part.size();
- part = StringData(&*where, size);
- where += size;
- // skip over '.' unless we are at the end.
- if (where != end) {
- dassert(*where == '.');
- ++where;
- }
- }
+ if (_replacements.size() != _size) {
+ _replacements.resize(_size);
+ }
- // Drop any replacements
- _replacements.clear();
+ _replacements[i] = part.toString();
+ if (i < kReserveAhead) {
+ _fixed[i] = _replacements[i];
+ } else {
+ _variable[getIndex(i)] = _replacements[i];
}
+}
- StringData FieldRef::getPart(size_t i) const {
- dassert(i < _size);
+size_t FieldRef::appendPart(StringData part) {
+ if (_size < kReserveAhead) {
+ _fixed[_size] = part;
+ } else {
+ _variable.push_back(part);
+ }
+ return ++_size;
+}
+
+void FieldRef::reserialize() const {
+ std::string nextDotted;
+ // Reserve some space in the string. We know we will have, at minimum, a character for
+ // each component we are writing, and a dot for each component, less one. We don't want
+ // to reserve more, since we don't want to forfeit the SSO if it is applicable.
+ nextDotted.reserve((_size * 2) - 1);
+
+ // Concatenate the fields to a new string
+ for (size_t i = 0; i != _size; ++i) {
+ if (i > 0)
+ nextDotted.append(1, '.');
+ const StringData part = getPart(i);
+ nextDotted.append(part.rawData(), part.size());
+ }
- if (i < kReserveAhead) {
- return _fixed[i];
- }
- else {
- return _variable[getIndex(i)];
+ // Make the new string our contents
+ _dotted.swap(nextDotted);
+
+ // Fixup the parts to refer to the new string
+ std::string::const_iterator where = _dotted.begin();
+ const std::string::const_iterator end = _dotted.end();
+ for (size_t i = 0; i != _size; ++i) {
+ StringData& part = (i < kReserveAhead) ? _fixed[i] : _variable[getIndex(i)];
+ const size_t size = part.size();
+ part = StringData(&*where, size);
+ where += size;
+ // skip over '.' unless we are at the end.
+ if (where != end) {
+ dassert(*where == '.');
+ ++where;
}
}
- bool FieldRef::isPrefixOf( const FieldRef& other ) const {
- // Can't be a prefix if the size is equal to or larger.
- if ( _size >= other._size ) {
- return false;
- }
+ // Drop any replacements
+ _replacements.clear();
+}
- // Empty FieldRef is not a prefix of anything.
- if ( _size == 0 ) {
- return false;
- }
+StringData FieldRef::getPart(size_t i) const {
+ dassert(i < _size);
- size_t common = commonPrefixSize( other );
- return common == _size && other._size > common;
+ if (i < kReserveAhead) {
+ return _fixed[i];
+ } else {
+ return _variable[getIndex(i)];
}
+}
- size_t FieldRef::commonPrefixSize( const FieldRef& other ) const {
- if (_size == 0 || other._size == 0) {
- return 0;
- }
+bool FieldRef::isPrefixOf(const FieldRef& other) const {
+ // Can't be a prefix if the size is equal to or larger.
+ if (_size >= other._size) {
+ return false;
+ }
- size_t maxPrefixSize = std::min( _size-1, other._size-1 );
- size_t prefixSize = 0;
+ // Empty FieldRef is not a prefix of anything.
+ if (_size == 0) {
+ return false;
+ }
- while ( prefixSize <= maxPrefixSize ) {
- if ( getPart( prefixSize ) != other.getPart( prefixSize ) ) {
- break;
- }
- prefixSize++;
- }
+ size_t common = commonPrefixSize(other);
+ return common == _size && other._size > common;
+}
- return prefixSize;
+size_t FieldRef::commonPrefixSize(const FieldRef& other) const {
+ if (_size == 0 || other._size == 0) {
+ return 0;
}
- StringData FieldRef::dottedField( size_t offset ) const {
- return dottedSubstring(offset, numParts());
+ size_t maxPrefixSize = std::min(_size - 1, other._size - 1);
+ size_t prefixSize = 0;
+
+ while (prefixSize <= maxPrefixSize) {
+ if (getPart(prefixSize) != other.getPart(prefixSize)) {
+ break;
+ }
+ prefixSize++;
}
- StringData FieldRef::dottedSubstring(size_t startPart, size_t endPart) const {
- if (_size == 0 || startPart >= endPart || endPart > numParts())
- return StringData();
+ return prefixSize;
+}
- if (!_replacements.empty())
- reserialize();
- dassert(_replacements.empty());
+StringData FieldRef::dottedField(size_t offset) const {
+ return dottedSubstring(offset, numParts());
+}
- StringData result(_dotted);
+StringData FieldRef::dottedSubstring(size_t startPart, size_t endPart) const {
+ if (_size == 0 || startPart >= endPart || endPart > numParts())
+ return StringData();
- // Fast-path if we want the whole thing
- if (startPart == 0 && endPart == numParts())
- return result;
+ if (!_replacements.empty())
+ reserialize();
+ dassert(_replacements.empty());
- size_t startChar = 0;
- for (size_t i = 0; i < startPart; ++i) {
- startChar += getPart(i).size() + 1; // correct for '.'
- }
- size_t endChar = startChar;
- for (size_t i = startPart; i < endPart; ++i) {
- endChar += getPart(i).size() + 1;
- }
- // correct for last '.'
- if (endPart != numParts())
- --endChar;
+ StringData result(_dotted);
- return result.substr(startChar, endChar - startChar);
- }
+ // Fast-path if we want the whole thing
+ if (startPart == 0 && endPart == numParts())
+ return result;
- bool FieldRef::equalsDottedField( StringData other ) const {
- StringData rest = other;
+ size_t startChar = 0;
+ for (size_t i = 0; i < startPart; ++i) {
+ startChar += getPart(i).size() + 1; // correct for '.'
+ }
+ size_t endChar = startChar;
+ for (size_t i = startPart; i < endPart; ++i) {
+ endChar += getPart(i).size() + 1;
+ }
+ // correct for last '.'
+ if (endPart != numParts())
+ --endChar;
- for ( size_t i = 0; i < _size; i++ ) {
+ return result.substr(startChar, endChar - startChar);
+}
- StringData part = getPart( i );
+bool FieldRef::equalsDottedField(StringData other) const {
+ StringData rest = other;
- if ( !rest.startsWith( part ) )
- return false;
+ for (size_t i = 0; i < _size; i++) {
+ StringData part = getPart(i);
- if ( i == _size - 1 )
- return rest.size() == part.size();
+ if (!rest.startsWith(part))
+ return false;
- // make sure next thing is a dot
- if ( rest.size() == part.size() )
- return false;
+ if (i == _size - 1)
+ return rest.size() == part.size();
- if ( rest[part.size()] != '.' )
- return false;
+ // make sure next thing is a dot
+ if (rest.size() == part.size())
+ return false;
- rest = rest.substr( part.size() + 1 );
- }
+ if (rest[part.size()] != '.')
+ return false;
- return false;
+ rest = rest.substr(part.size() + 1);
}
- int FieldRef::compare(const FieldRef& other) const {
- const size_t toCompare = std::min(_size, other._size);
- for (size_t i = 0; i < toCompare; i++) {
- if (getPart(i) == other.getPart(i)) {
- continue;
- }
- return getPart(i) < other.getPart(i) ? -1 : 1;
- }
+ return false;
+}
- const size_t rest = _size - toCompare;
- const size_t otherRest = other._size - toCompare;
- if ((rest == 0) && (otherRest == 0)) {
- return 0;
- }
- else if (rest < otherRest ) {
- return -1;
- }
- else {
- return 1;
+int FieldRef::compare(const FieldRef& other) const {
+ const size_t toCompare = std::min(_size, other._size);
+ for (size_t i = 0; i < toCompare; i++) {
+ if (getPart(i) == other.getPart(i)) {
+ continue;
}
+ return getPart(i) < other.getPart(i) ? -1 : 1;
}
- void FieldRef::clear() {
- _size = 0;
- _variable.clear();
- _dotted.clear();
- _replacements.clear();
+ const size_t rest = _size - toCompare;
+ const size_t otherRest = other._size - toCompare;
+ if ((rest == 0) && (otherRest == 0)) {
+ return 0;
+ } else if (rest < otherRest) {
+ return -1;
+ } else {
+ return 1;
}
+}
- std::ostream& operator<<(std::ostream& stream, const FieldRef& field) {
- return stream << field.dottedField();
- }
+void FieldRef::clear() {
+ _size = 0;
+ _variable.clear();
+ _dotted.clear();
+ _replacements.clear();
+}
+
+std::ostream& operator<<(std::ostream& stream, const FieldRef& field) {
+ return stream << field.dottedField();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/field_ref.h b/src/mongo/db/field_ref.h
index b22bfcb3da9..82116faf6fe 100644
--- a/src/mongo/db/field_ref.h
+++ b/src/mongo/db/field_ref.h
@@ -37,165 +37,172 @@
namespace mongo {
+/**
+ * A FieldPath represents a path in a document, starting from the root. The path
+ * is made of "field parts" separated by dots. The class provides an efficient means to
+ * "split" the dotted fields in its parts, but no validation is done.
+ *
+ * Any field part may be replaced, after the "original" field reference was parsed. Any
+ * part can be accessed through a StringData object.
+ *
+ * The class is not thread safe.
+ */
+class FieldRef {
+ MONGO_DISALLOW_COPYING(FieldRef);
+
+public:
+ FieldRef();
+
+ explicit FieldRef(StringData path);
+
/**
- * A FieldPath represents a path in a document, starting from the root. The path
- * is made of "field parts" separated by dots. The class provides an efficient means to
- * "split" the dotted fields in its parts, but no validation is done.
- *
- * Any field part may be replaced, after the "original" field reference was parsed. Any
- * part can be accessed through a StringData object.
- *
- * The class is not thread safe.
+ * Field parts accessed through getPart() calls no longer would be valid, after the
+ * destructor ran.
*/
- class FieldRef {
- MONGO_DISALLOW_COPYING(FieldRef);
- public:
- FieldRef();
-
- explicit FieldRef(StringData path);
-
- /**
- * Field parts accessed through getPart() calls no longer would be valid, after the
- * destructor ran.
- */
- ~FieldRef() {}
-
- /**
- * Builds a field path out of each field part in 'dottedField'.
- */
- void parse(StringData dottedField);
-
- /**
- * Sets the 'i-th' field part to point to 'part'. Assumes i < size(). Behavior is
- * undefined otherwise.
- */
- void setPart(size_t i, StringData part);
-
- /**
- * Returns the 'i-th' field part. Assumes i < size(). Behavior is undefined otherwise.
- */
- StringData getPart(size_t i) const;
-
- /**
- * Returns true when 'this' FieldRef is a prefix of 'other'. Equality is not considered
- * a prefix.
- */
- bool isPrefixOf( const FieldRef& other ) const;
-
- /**
- * Returns the number of field parts in the prefix that 'this' and 'other' share.
- */
- size_t commonPrefixSize( const FieldRef& other ) const;
-
- /**
- * Returns a StringData of the full dotted field in its current state (i.e., some parts may
- * have been replaced since the parse() call).
- */
- StringData dottedField( size_t offsetFromStart = 0 ) const;
-
- /**
- * Returns a StringData of parts of the dotted field from startPart to endPart in its
- * current state (i.e., some parts may have been replaced since the parse() call).
- */
- StringData dottedSubstring(size_t startPart, size_t endPart) const;
-
- /**
- * Compares the full dotted path represented by this FieldRef to other
- */
- bool equalsDottedField( StringData other ) const;
-
- /**
- * Return 0 if 'this' is equal to 'other' lexicographically, -1 if is it less than or
- * +1 if it is greater than.
- */
- int compare( const FieldRef& other ) const;
-
- /**
- * Resets the internal state. See note in parse() call.
- */
- void clear();
-
- //
- // accessors
- //
-
- /**
- * Returns the number of parts in this FieldRef.
- */
- size_t numParts() const { return _size; }
-
- bool empty() const { return numParts() == 0; }
-
- private:
- // Dotted fields are most often not longer than four parts. We use a mixed structure
- // here that will not require any extra memory allocation when that is the case. And
- // handle larger dotted fields if it is. The idea is not to penalize the common case
- // with allocations.
- static const size_t kReserveAhead = 4;
-
- /**
- * Parses 'path' into parts.
- */
- void _parse(StringData path);
-
- /** Converts the field part index to the variable part equivalent */
- size_t getIndex(size_t i) const { return i-kReserveAhead; }
-
- /**
- * Returns the new number of parts after appending 'part' to this field path. It
- * assumes that 'part' is pointing to an internally allocated area.
- */
- size_t appendPart(StringData part);
-
- /**
- * Re-assemble _dotted from components, including any replacements in _replacements,
- * and update the StringData components in _fixed and _variable to refer to the parts
- * of the new _dotted. This is used to make the storage for the current value of this
- * FieldRef contiguous so it can be returned as a StringData from the dottedField
- * method above.
- */
- void reserialize() const;
-
- // number of field parts stored
- size_t _size;
-
- // first kResevedAhead field components
- mutable StringData _fixed[kReserveAhead];
-
- // remaining field components
- mutable std::vector<StringData> _variable;
-
- // cached dotted name
- mutable std::string _dotted;
-
- // back memory added with the setPart call pointed to by _fized and _variable
- mutable std::vector<std::string> _replacements;
- };
-
- inline bool operator==(const FieldRef& lhs, const FieldRef& rhs) {
- return lhs.compare(rhs) == 0;
- }
+ ~FieldRef() {}
- inline bool operator!=(const FieldRef& lhs, const FieldRef& rhs) {
- return lhs.compare(rhs) != 0;
- }
+ /**
+ * Builds a field path out of each field part in 'dottedField'.
+ */
+ void parse(StringData dottedField);
- inline bool operator<(const FieldRef& lhs, const FieldRef& rhs) {
- return lhs.compare(rhs) < 0;
- }
+ /**
+ * Sets the 'i-th' field part to point to 'part'. Assumes i < size(). Behavior is
+ * undefined otherwise.
+ */
+ void setPart(size_t i, StringData part);
+
+ /**
+ * Returns the 'i-th' field part. Assumes i < size(). Behavior is undefined otherwise.
+ */
+ StringData getPart(size_t i) const;
+
+ /**
+ * Returns true when 'this' FieldRef is a prefix of 'other'. Equality is not considered
+ * a prefix.
+ */
+ bool isPrefixOf(const FieldRef& other) const;
+
+ /**
+ * Returns the number of field parts in the prefix that 'this' and 'other' share.
+ */
+ size_t commonPrefixSize(const FieldRef& other) const;
+
+ /**
+ * Returns a StringData of the full dotted field in its current state (i.e., some parts may
+ * have been replaced since the parse() call).
+ */
+ StringData dottedField(size_t offsetFromStart = 0) const;
+
+ /**
+ * Returns a StringData of parts of the dotted field from startPart to endPart in its
+ * current state (i.e., some parts may have been replaced since the parse() call).
+ */
+ StringData dottedSubstring(size_t startPart, size_t endPart) const;
+
+ /**
+ * Compares the full dotted path represented by this FieldRef to other
+ */
+ bool equalsDottedField(StringData other) const;
+
+ /**
+ * Return 0 if 'this' is equal to 'other' lexicographically, -1 if is it less than or
+ * +1 if it is greater than.
+ */
+ int compare(const FieldRef& other) const;
+
+ /**
+ * Resets the internal state. See note in parse() call.
+ */
+ void clear();
+
+ //
+ // accessors
+ //
- inline bool operator<=(const FieldRef& lhs, const FieldRef& rhs) {
- return lhs.compare(rhs) <= 0;
+ /**
+ * Returns the number of parts in this FieldRef.
+ */
+ size_t numParts() const {
+ return _size;
}
- inline bool operator>(const FieldRef& lhs, const FieldRef& rhs) {
- return lhs.compare(rhs) > 0;
+ bool empty() const {
+ return numParts() == 0;
}
- inline bool operator>=(const FieldRef& lhs, const FieldRef& rhs) {
- return lhs.compare(rhs) >= 0;
+private:
+ // Dotted fields are most often not longer than four parts. We use a mixed structure
+ // here that will not require any extra memory allocation when that is the case. And
+ // handle larger dotted fields if it is. The idea is not to penalize the common case
+ // with allocations.
+ static const size_t kReserveAhead = 4;
+
+ /**
+ * Parses 'path' into parts.
+ */
+ void _parse(StringData path);
+
+ /** Converts the field part index to the variable part equivalent */
+ size_t getIndex(size_t i) const {
+ return i - kReserveAhead;
}
- std::ostream& operator<<(std::ostream& stream, const FieldRef& value);
+ /**
+ * Returns the new number of parts after appending 'part' to this field path. It
+ * assumes that 'part' is pointing to an internally allocated area.
+ */
+ size_t appendPart(StringData part);
+
+ /**
+ * Re-assemble _dotted from components, including any replacements in _replacements,
+ * and update the StringData components in _fixed and _variable to refer to the parts
+ * of the new _dotted. This is used to make the storage for the current value of this
+ * FieldRef contiguous so it can be returned as a StringData from the dottedField
+ * method above.
+ */
+ void reserialize() const;
+
+ // number of field parts stored
+ size_t _size;
+
+ // first kResevedAhead field components
+ mutable StringData _fixed[kReserveAhead];
+
+ // remaining field components
+ mutable std::vector<StringData> _variable;
+
+ // cached dotted name
+ mutable std::string _dotted;
+
+ // back memory added with the setPart call pointed to by _fized and _variable
+ mutable std::vector<std::string> _replacements;
+};
+
+inline bool operator==(const FieldRef& lhs, const FieldRef& rhs) {
+ return lhs.compare(rhs) == 0;
+}
+
+inline bool operator!=(const FieldRef& lhs, const FieldRef& rhs) {
+ return lhs.compare(rhs) != 0;
+}
+
+inline bool operator<(const FieldRef& lhs, const FieldRef& rhs) {
+ return lhs.compare(rhs) < 0;
+}
+
+inline bool operator<=(const FieldRef& lhs, const FieldRef& rhs) {
+ return lhs.compare(rhs) <= 0;
+}
+
+inline bool operator>(const FieldRef& lhs, const FieldRef& rhs) {
+ return lhs.compare(rhs) > 0;
+}
+
+inline bool operator>=(const FieldRef& lhs, const FieldRef& rhs) {
+ return lhs.compare(rhs) >= 0;
+}
+
+std::ostream& operator<<(std::ostream& stream, const FieldRef& value);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/field_ref_set.cpp b/src/mongo/db/field_ref_set.cpp
index 1ec099c7c7b..366da3fed5a 100644
--- a/src/mongo/db/field_ref_set.cpp
+++ b/src/mongo/db/field_ref_set.cpp
@@ -35,128 +35,124 @@
namespace mongo {
- using std::vector;
- using std::string;
-
- namespace {
-
- // For legacy purposes, we must handle empty fieldnames, which FieldRef clearly
- // prohibits. It is preferrable to have FieldRef keep that constraint and relax it here
- // -- stricly in update code. The rationale is that, if we want to ban data with no
- // field names, we must allow that data to be updated.
- StringData safeFirstPart(const FieldRef* fieldRef) {
- if (fieldRef->numParts() == 0) {
- return StringData();
- }
- else {
- return fieldRef->getPart(0);
- }
- }
-
+using std::vector;
+using std::string;
+
+namespace {
+
+// For legacy purposes, we must handle empty fieldnames, which FieldRef clearly
+// prohibits. It is preferrable to have FieldRef keep that constraint and relax it here
+// -- stricly in update code. The rationale is that, if we want to ban data with no
+// field names, we must allow that data to be updated.
+StringData safeFirstPart(const FieldRef* fieldRef) {
+ if (fieldRef->numParts() == 0) {
+ return StringData();
+ } else {
+ return fieldRef->getPart(0);
}
+}
+}
- bool FieldRefSet::FieldRefPtrLessThan::operator()(const FieldRef* l, const FieldRef* r) const {
- return *l < *r;
- }
+bool FieldRefSet::FieldRefPtrLessThan::operator()(const FieldRef* l, const FieldRef* r) const {
+ return *l < *r;
+}
- FieldRefSet::FieldRefSet() {
- }
+FieldRefSet::FieldRefSet() {}
- FieldRefSet::FieldRefSet(const vector<FieldRef*>& paths) {
- fillFrom(paths);
- }
+FieldRefSet::FieldRefSet(const vector<FieldRef*>& paths) {
+ fillFrom(paths);
+}
- bool FieldRefSet::findConflicts(const FieldRef* toCheck, FieldRefSet* conflicts) const {
- bool foundConflict = false;
+bool FieldRefSet::findConflicts(const FieldRef* toCheck, FieldRefSet* conflicts) const {
+ bool foundConflict = false;
- // If the set is empty, there is no work to do.
- if (_fieldSet.empty())
- return foundConflict;
+ // If the set is empty, there is no work to do.
+ if (_fieldSet.empty())
+ return foundConflict;
- StringData prefixStr = safeFirstPart(toCheck);
- FieldRef prefixField(prefixStr);
+ StringData prefixStr = safeFirstPart(toCheck);
+ FieldRef prefixField(prefixStr);
- FieldSet::iterator it = _fieldSet.lower_bound(&prefixField);
- // Now, iterate over all the present fields in the set that have the same prefix.
+ FieldSet::iterator it = _fieldSet.lower_bound(&prefixField);
+ // Now, iterate over all the present fields in the set that have the same prefix.
- while (it != _fieldSet.end() && safeFirstPart(*it) == prefixStr) {
- size_t common = (*it)->commonPrefixSize(*toCheck);
- if ((*it)->numParts() == common || toCheck->numParts() == common) {
- if (!conflicts)
- return true;
+ while (it != _fieldSet.end() && safeFirstPart(*it) == prefixStr) {
+ size_t common = (*it)->commonPrefixSize(*toCheck);
+ if ((*it)->numParts() == common || toCheck->numParts() == common) {
+ if (!conflicts)
+ return true;
- conflicts->_fieldSet.insert(*it);
- foundConflict = true;
- }
- ++it;
+ conflicts->_fieldSet.insert(*it);
+ foundConflict = true;
}
-
- return foundConflict;
+ ++it;
}
- void FieldRefSet::keepShortest(const FieldRef* toInsert) {
- const FieldRef* conflict;
- if ( !insert(toInsert, &conflict) && (toInsert->numParts() < (conflict->numParts()))) {
- _fieldSet.erase(conflict);
- keepShortest(toInsert);
- }
- }
+ return foundConflict;
+}
- void FieldRefSet::fillFrom(const std::vector<FieldRef*>& fields) {
- dassert(_fieldSet.empty());
- _fieldSet.insert(fields.begin(), fields.end());
+void FieldRefSet::keepShortest(const FieldRef* toInsert) {
+ const FieldRef* conflict;
+ if (!insert(toInsert, &conflict) && (toInsert->numParts() < (conflict->numParts()))) {
+ _fieldSet.erase(conflict);
+ keepShortest(toInsert);
}
-
- bool FieldRefSet::insert(const FieldRef* toInsert, const FieldRef** conflict) {
-
- // We can determine if two fields conflict by checking their common prefix.
- //
- // If each field is exactly of the size of the common prefix, this means the fields are
- // the same. If one of the fields is greater than the common prefix and the other
- // isn't, the latter is a prefix of the former. And vice-versa.
- //
- // Example:
- //
- // inserted > | a a.c
- // exiting v | (0) (+1)
- // ----------------|------------------------
- // a (0) | equal prefix <
- // a.b (+1) | prefix ^ *
- //
- // * Disjoint sub-trees
-
- // At each insertion, we only need to bother checking the fields in the set that have
- // at least some common prefix with the 'toInsert' field.
- StringData prefixStr = safeFirstPart(toInsert);
- FieldRef prefixField(prefixStr);
- FieldSet::iterator it = _fieldSet.lower_bound(&prefixField);
-
- // Now, iterate over all the present fields in the set that have the same prefix.
- while (it != _fieldSet.end() && safeFirstPart(*it) == prefixStr) {
- size_t common = (*it)->commonPrefixSize(*toInsert);
- if ((*it)->numParts() == common || toInsert->numParts() == common) {
- *conflict = *it;
- return false;
- }
- ++it;
+}
+
+void FieldRefSet::fillFrom(const std::vector<FieldRef*>& fields) {
+ dassert(_fieldSet.empty());
+ _fieldSet.insert(fields.begin(), fields.end());
+}
+
+bool FieldRefSet::insert(const FieldRef* toInsert, const FieldRef** conflict) {
+ // We can determine if two fields conflict by checking their common prefix.
+ //
+ // If each field is exactly of the size of the common prefix, this means the fields are
+ // the same. If one of the fields is greater than the common prefix and the other
+ // isn't, the latter is a prefix of the former. And vice-versa.
+ //
+ // Example:
+ //
+ // inserted > | a a.c
+ // exiting v | (0) (+1)
+ // ----------------|------------------------
+ // a (0) | equal prefix <
+ // a.b (+1) | prefix ^ *
+ //
+ // * Disjoint sub-trees
+
+ // At each insertion, we only need to bother checking the fields in the set that have
+ // at least some common prefix with the 'toInsert' field.
+ StringData prefixStr = safeFirstPart(toInsert);
+ FieldRef prefixField(prefixStr);
+ FieldSet::iterator it = _fieldSet.lower_bound(&prefixField);
+
+ // Now, iterate over all the present fields in the set that have the same prefix.
+ while (it != _fieldSet.end() && safeFirstPart(*it) == prefixStr) {
+ size_t common = (*it)->commonPrefixSize(*toInsert);
+ if ((*it)->numParts() == common || toInsert->numParts() == common) {
+ *conflict = *it;
+ return false;
}
-
- _fieldSet.insert(it, toInsert);
- *conflict = NULL;
- return true;
+ ++it;
}
- const std::string FieldRefSet::toString() const {
- str::stream res;
- res << "Fields:[ ";
- FieldRefSet::const_iterator where = _fieldSet.begin();
- const FieldRefSet::const_iterator end = _fieldSet.end();
- for( ; where != end; ++where ) {
- const FieldRef& current = **where;
- res << current.dottedField() << ",";
- }
- res << "]";
- return res;
+ _fieldSet.insert(it, toInsert);
+ *conflict = NULL;
+ return true;
+}
+
+const std::string FieldRefSet::toString() const {
+ str::stream res;
+ res << "Fields:[ ";
+ FieldRefSet::const_iterator where = _fieldSet.begin();
+ const FieldRefSet::const_iterator end = _fieldSet.end();
+ for (; where != end; ++where) {
+ const FieldRef& current = **where;
+ res << current.dottedField() << ",";
}
+ res << "]";
+ return res;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/field_ref_set.h b/src/mongo/db/field_ref_set.h
index 0403f9265b2..6f86855ab06 100644
--- a/src/mongo/db/field_ref_set.h
+++ b/src/mongo/db/field_ref_set.h
@@ -38,98 +38,98 @@
namespace mongo {
+/**
+ * A FieldRefSet holds a number of unique FieldRefs - a set of dotted paths into a document.
+ *
+ * The FieldRefSet provides helpful functions for efficiently finding conflicts between field
+ * ref paths - field ref paths conflict if they are equal to each other or if one is a prefix.
+ * To maintain a FieldRefSet of non-conflicting paths, always use the insert method which
+ * returns conflicting FieldRefs.
+ *
+ * FieldRefSets do not own the FieldRef paths they contain.
+ */
+class FieldRefSet {
+ MONGO_DISALLOW_COPYING(FieldRefSet);
+
+ struct FieldRefPtrLessThan {
+ bool operator()(const FieldRef* lhs, const FieldRef* rhs) const;
+ };
+
+ typedef std::set<const FieldRef*, FieldRefPtrLessThan> FieldSet;
+
+public:
+ typedef FieldSet::iterator iterator;
+ typedef FieldSet::const_iterator const_iterator;
+
+ FieldRefSet();
+
+ FieldRefSet(const std::vector<FieldRef*>& paths);
+
+ /** Returns 'true' if the set is empty */
+ bool empty() const {
+ return _fieldSet.empty();
+ }
+
+ inline const_iterator begin() const {
+ return _fieldSet.begin();
+ }
+
+ inline const_iterator end() const {
+ return _fieldSet.end();
+ }
+
+ /**
+ * Returns true if the path does not already exist in the set, false otherwise.
+ *
+ * Note that *no* conflict resolution occurs - any path can be inserted into a set.
+ */
+ inline bool insert(const FieldRef* path) {
+ return _fieldSet.insert(path).second;
+ }
+
/**
- * A FieldRefSet holds a number of unique FieldRefs - a set of dotted paths into a document.
+ * Returns true if the field 'toInsert' can be added in the set without
+ * conflicts. Otherwise returns false and fill in '*conflict' with the field 'toInsert'
+ * clashed with.
*
- * The FieldRefSet provides helpful functions for efficiently finding conflicts between field
- * ref paths - field ref paths conflict if they are equal to each other or if one is a prefix.
- * To maintain a FieldRefSet of non-conflicting paths, always use the insert method which
- * returns conflicting FieldRefs.
+ * There is no ownership transfer of 'toInsert'. The caller is responsible for
+ * maintaining it alive for as long as the FieldRefSet is so. By the same token
+ * 'conflict' can only be referred to while the FieldRefSet can.
+ */
+ bool insert(const FieldRef* toInsert, const FieldRef** conflict);
+
+ /**
+ * Fills the set with the supplied FieldRef*s
*
- * FieldRefSets do not own the FieldRef paths they contain.
+ * Note that *no* conflict resolution occurs here.
*/
- class FieldRefSet {
- MONGO_DISALLOW_COPYING(FieldRefSet);
-
- struct FieldRefPtrLessThan {
- bool operator()(const FieldRef* lhs, const FieldRef* rhs) const;
- };
-
- typedef std::set<const FieldRef*, FieldRefPtrLessThan> FieldSet;
-
- public:
- typedef FieldSet::iterator iterator;
- typedef FieldSet::const_iterator const_iterator;
-
- FieldRefSet();
-
- FieldRefSet(const std::vector<FieldRef*>& paths);
-
- /** Returns 'true' if the set is empty */
- bool empty() const {
- return _fieldSet.empty();
- }
-
- inline const_iterator begin() const {
- return _fieldSet.begin();
- }
-
- inline const_iterator end() const {
- return _fieldSet.end();
- }
-
- /**
- * Returns true if the path does not already exist in the set, false otherwise.
- *
- * Note that *no* conflict resolution occurs - any path can be inserted into a set.
- */
- inline bool insert(const FieldRef* path) {
- return _fieldSet.insert(path).second;
- }
-
- /**
- * Returns true if the field 'toInsert' can be added in the set without
- * conflicts. Otherwise returns false and fill in '*conflict' with the field 'toInsert'
- * clashed with.
- *
- * There is no ownership transfer of 'toInsert'. The caller is responsible for
- * maintaining it alive for as long as the FieldRefSet is so. By the same token
- * 'conflict' can only be referred to while the FieldRefSet can.
- */
- bool insert(const FieldRef* toInsert, const FieldRef** conflict);
-
- /**
- * Fills the set with the supplied FieldRef*s
- *
- * Note that *no* conflict resolution occurs here.
- */
- void fillFrom(const std::vector<FieldRef*>& fields);
-
- /**
- * Replace any existing conflicting FieldRef with the shortest (closest to root) one
- */
- void keepShortest(const FieldRef* toInsert);
-
- /**
- * Find all inserted fields which conflict with the FieldRef 'toCheck' by the semantics
- * of 'insert', and add those fields to the 'conflicts' set.
- *
- * Return true if conflicts were found.
- */
- bool findConflicts(const FieldRef* toCheck, FieldRefSet* conflicts) const;
-
- void clear() {
- _fieldSet.clear();
- }
-
- /**
- * A debug/log-able string
- */
- const std::string toString() const;
-
- private:
- // A set of field_ref pointers, none of which is owned here.
- FieldSet _fieldSet;
- };
+ void fillFrom(const std::vector<FieldRef*>& fields);
+
+ /**
+ * Replace any existing conflicting FieldRef with the shortest (closest to root) one
+ */
+ void keepShortest(const FieldRef* toInsert);
+
+ /**
+ * Find all inserted fields which conflict with the FieldRef 'toCheck' by the semantics
+ * of 'insert', and add those fields to the 'conflicts' set.
+ *
+ * Return true if conflicts were found.
+ */
+ bool findConflicts(const FieldRef* toCheck, FieldRefSet* conflicts) const;
+
+ void clear() {
+ _fieldSet.clear();
+ }
+
+ /**
+ * A debug/log-able string
+ */
+ const std::string toString() const;
+
+private:
+ // A set of field_ref pointers, none of which is owned here.
+ FieldSet _fieldSet;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/field_ref_set_test.cpp b/src/mongo/db/field_ref_set_test.cpp
index 95323ca6fc5..aa5bd3cc322 100644
--- a/src/mongo/db/field_ref_set_test.cpp
+++ b/src/mongo/db/field_ref_set_test.cpp
@@ -33,112 +33,112 @@
namespace {
- using mongo::FieldRef;
- using mongo::FieldRefSet;
-
- TEST(EmptySet, Normal) {
- // insert "b"
- FieldRefSet fieldSet;
- FieldRef bSimple("b");
- const FieldRef* conflict;
- ASSERT_TRUE(fieldSet.insert(&bSimple, &conflict));
- ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
-
- // insert "a", OK
- FieldRef aSimple("a");
- ASSERT_TRUE(fieldSet.insert(&aSimple, &conflict));
- ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
-
- // insert "c", OK
- FieldRef cSimple("c");
- ASSERT_TRUE(fieldSet.insert(&cSimple, &conflict));
- ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
- }
-
- TEST(EmptySet, Conflict) {
- // insert "a.b"
- FieldRefSet fieldSet;
- FieldRef aDotB("a.b");
- const FieldRef* conflict;
- ASSERT_TRUE(fieldSet.insert(&aDotB, &conflict));
- ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
-
- // insert "a", conflicts with "a.b"
- FieldRef prefix("a");
- ASSERT_FALSE(fieldSet.insert(&prefix, &conflict));
- ASSERT_EQUALS(aDotB, *conflict);
-
- // insert "a.b.c", conflicts with "a.b"
- FieldRef superSet("a.b.c");
- ASSERT_FALSE(fieldSet.insert(&superSet, &conflict));
- ASSERT_EQUALS(aDotB, *conflict);
- }
-
- TEST(EmptySet, EmptyField) {
- // Old data may have empty field names. We test that we can catch conflicts if we try
- // to insert an empty field twice.
- FieldRefSet fieldSet;
- FieldRef empty;
- const FieldRef* conflict;
- ASSERT_TRUE(fieldSet.insert(&empty, &conflict));
- ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
-
- ASSERT_FALSE(fieldSet.insert(&empty, &conflict));
- ASSERT_EQUALS(empty, *conflict);
- }
-
- TEST(NotEmptySet, Normal) {
- // insert "b.c" and "b.e"
- FieldRefSet fieldSet;
- FieldRef bDotC("b.c");
- FieldRef bDotE("b.e");
- const FieldRef* conflict;
- ASSERT_TRUE(fieldSet.insert(&bDotC, &conflict));
- ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
- ASSERT_TRUE(fieldSet.insert(&bDotE, &conflict));
- ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
-
- // insert "a" before, OK
- FieldRef aSimple("a");
- ASSERT_TRUE(fieldSet.insert(&aSimple, &conflict));
- ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
-
- // insert "b.d" in the middle, OK
- FieldRef bDotD("b.d");
- ASSERT_TRUE(fieldSet.insert(&bDotD, &conflict));
- ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
-
- // insert "c" after, OK
- FieldRef cSimple("c");
- ASSERT_TRUE(fieldSet.insert(&cSimple, &conflict));
- ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
- }
-
- TEST(NotEmpty, Conflict) {
- // insert "b.c" and "b.e"
- FieldRefSet fieldSet;
- FieldRef bDotC("b.c");
- FieldRef bDotE("b.e");
- const FieldRef* conflict;
- ASSERT_TRUE(fieldSet.insert(&bDotC, &conflict));
- ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
- ASSERT_TRUE(fieldSet.insert(&bDotE, &conflict));
- ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
-
- // insert "b" before, conflicts "b.c"
- FieldRef bSimple("b");
- ASSERT_FALSE(fieldSet.insert(&bSimple, &conflict));
- ASSERT_EQUALS(bDotC, *conflict);
-
- // insert: "b.c.d" in the "middle", conflicts "b.c"
- FieldRef bDotCDotD("b.c.d");
- ASSERT_FALSE(fieldSet.insert(&bDotCDotD, &conflict));
- ASSERT_EQUALS(bDotC, *conflict);
-
- // insert: "b.e.f" at the end, conflicts "b.e"
- FieldRef bDotEDotF("b.e.f");
- ASSERT_FALSE(fieldSet.insert(&bDotEDotF, &conflict));
- ASSERT_EQUALS(bDotE, *conflict);
- }
-
-} // unnamed namespace
+using mongo::FieldRef;
+using mongo::FieldRefSet;
+
+TEST(EmptySet, Normal) {
+ // insert "b"
+ FieldRefSet fieldSet;
+ FieldRef bSimple("b");
+ const FieldRef* conflict;
+ ASSERT_TRUE(fieldSet.insert(&bSimple, &conflict));
+ ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
+
+ // insert "a", OK
+ FieldRef aSimple("a");
+ ASSERT_TRUE(fieldSet.insert(&aSimple, &conflict));
+ ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
+
+ // insert "c", OK
+ FieldRef cSimple("c");
+ ASSERT_TRUE(fieldSet.insert(&cSimple, &conflict));
+ ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
+}
+
+TEST(EmptySet, Conflict) {
+ // insert "a.b"
+ FieldRefSet fieldSet;
+ FieldRef aDotB("a.b");
+ const FieldRef* conflict;
+ ASSERT_TRUE(fieldSet.insert(&aDotB, &conflict));
+ ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
+
+ // insert "a", conflicts with "a.b"
+ FieldRef prefix("a");
+ ASSERT_FALSE(fieldSet.insert(&prefix, &conflict));
+ ASSERT_EQUALS(aDotB, *conflict);
+
+ // insert "a.b.c", conflicts with "a.b"
+ FieldRef superSet("a.b.c");
+ ASSERT_FALSE(fieldSet.insert(&superSet, &conflict));
+ ASSERT_EQUALS(aDotB, *conflict);
+}
+
+TEST(EmptySet, EmptyField) {
+ // Old data may have empty field names. We test that we can catch conflicts if we try
+ // to insert an empty field twice.
+ FieldRefSet fieldSet;
+ FieldRef empty;
+ const FieldRef* conflict;
+ ASSERT_TRUE(fieldSet.insert(&empty, &conflict));
+ ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
+
+ ASSERT_FALSE(fieldSet.insert(&empty, &conflict));
+ ASSERT_EQUALS(empty, *conflict);
+}
+
+TEST(NotEmptySet, Normal) {
+ // insert "b.c" and "b.e"
+ FieldRefSet fieldSet;
+ FieldRef bDotC("b.c");
+ FieldRef bDotE("b.e");
+ const FieldRef* conflict;
+ ASSERT_TRUE(fieldSet.insert(&bDotC, &conflict));
+ ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
+ ASSERT_TRUE(fieldSet.insert(&bDotE, &conflict));
+ ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
+
+ // insert "a" before, OK
+ FieldRef aSimple("a");
+ ASSERT_TRUE(fieldSet.insert(&aSimple, &conflict));
+ ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
+
+ // insert "b.d" in the middle, OK
+ FieldRef bDotD("b.d");
+ ASSERT_TRUE(fieldSet.insert(&bDotD, &conflict));
+ ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
+
+ // insert "c" after, OK
+ FieldRef cSimple("c");
+ ASSERT_TRUE(fieldSet.insert(&cSimple, &conflict));
+ ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
+}
+
+TEST(NotEmpty, Conflict) {
+ // insert "b.c" and "b.e"
+ FieldRefSet fieldSet;
+ FieldRef bDotC("b.c");
+ FieldRef bDotE("b.e");
+ const FieldRef* conflict;
+ ASSERT_TRUE(fieldSet.insert(&bDotC, &conflict));
+ ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
+ ASSERT_TRUE(fieldSet.insert(&bDotE, &conflict));
+ ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict);
+
+ // insert "b" before, conflicts "b.c"
+ FieldRef bSimple("b");
+ ASSERT_FALSE(fieldSet.insert(&bSimple, &conflict));
+ ASSERT_EQUALS(bDotC, *conflict);
+
+ // insert: "b.c.d" in the "middle", conflicts "b.c"
+ FieldRef bDotCDotD("b.c.d");
+ ASSERT_FALSE(fieldSet.insert(&bDotCDotD, &conflict));
+ ASSERT_EQUALS(bDotC, *conflict);
+
+ // insert: "b.e.f" at the end, conflicts "b.e"
+ FieldRef bDotEDotF("b.e.f");
+ ASSERT_FALSE(fieldSet.insert(&bDotEDotF, &conflict));
+ ASSERT_EQUALS(bDotE, *conflict);
+}
+
+} // unnamed namespace
diff --git a/src/mongo/db/field_ref_test.cpp b/src/mongo/db/field_ref_test.cpp
index 02390302190..26497d32840 100644
--- a/src/mongo/db/field_ref_test.cpp
+++ b/src/mongo/db/field_ref_test.cpp
@@ -36,287 +36,287 @@
namespace {
- using mongo::FieldRef;
- using mongo::StringData;
- using mongoutils::str::stream;
- using std::string;
-
- TEST(Empty, NoFields) {
- FieldRef fieldRef("");
- ASSERT_EQUALS(fieldRef.numParts(), 0U);
- ASSERT_EQUALS(fieldRef.dottedField(), "");
- }
-
- TEST(Empty, NoFieldNames) {
- string field = ".";
- FieldRef fieldRef(field);
- ASSERT_EQUALS(fieldRef.numParts(), 2U);
- ASSERT_EQUALS(fieldRef.getPart(0), "");
- ASSERT_EQUALS(fieldRef.getPart(1), "");
- ASSERT_EQUALS(fieldRef.dottedField(), field);
- }
-
- TEST(Empty, NoFieldNames2) {
- string field = "..";
- FieldRef fieldRef(field);
- ASSERT_EQUALS(fieldRef.numParts(), 3U);
- ASSERT_EQUALS(fieldRef.getPart(0), "");
- ASSERT_EQUALS(fieldRef.getPart(1), "");
- ASSERT_EQUALS(fieldRef.getPart(2), "");
- ASSERT_EQUALS(fieldRef.dottedField(), field);
- }
-
- TEST(Empty, EmptyFieldName) {
- string field = ".b.";
- FieldRef fieldRef(field);
- ASSERT_EQUALS(fieldRef.numParts(), 3U);
- ASSERT_EQUALS(fieldRef.getPart(0), "");
- ASSERT_EQUALS(fieldRef.getPart(1), "b");
- ASSERT_EQUALS(fieldRef.getPart(2), "");
- ASSERT_EQUALS(fieldRef.dottedField(), field);
- }
-
- TEST(Normal, SinglePart) {
- string field = "a";
- FieldRef fieldRef(field);
+using mongo::FieldRef;
+using mongo::StringData;
+using mongoutils::str::stream;
+using std::string;
+
+TEST(Empty, NoFields) {
+ FieldRef fieldRef("");
+ ASSERT_EQUALS(fieldRef.numParts(), 0U);
+ ASSERT_EQUALS(fieldRef.dottedField(), "");
+}
+
+TEST(Empty, NoFieldNames) {
+ string field = ".";
+ FieldRef fieldRef(field);
+ ASSERT_EQUALS(fieldRef.numParts(), 2U);
+ ASSERT_EQUALS(fieldRef.getPart(0), "");
+ ASSERT_EQUALS(fieldRef.getPart(1), "");
+ ASSERT_EQUALS(fieldRef.dottedField(), field);
+}
+
+TEST(Empty, NoFieldNames2) {
+ string field = "..";
+ FieldRef fieldRef(field);
+ ASSERT_EQUALS(fieldRef.numParts(), 3U);
+ ASSERT_EQUALS(fieldRef.getPart(0), "");
+ ASSERT_EQUALS(fieldRef.getPart(1), "");
+ ASSERT_EQUALS(fieldRef.getPart(2), "");
+ ASSERT_EQUALS(fieldRef.dottedField(), field);
+}
+
+TEST(Empty, EmptyFieldName) {
+ string field = ".b.";
+ FieldRef fieldRef(field);
+ ASSERT_EQUALS(fieldRef.numParts(), 3U);
+ ASSERT_EQUALS(fieldRef.getPart(0), "");
+ ASSERT_EQUALS(fieldRef.getPart(1), "b");
+ ASSERT_EQUALS(fieldRef.getPart(2), "");
+ ASSERT_EQUALS(fieldRef.dottedField(), field);
+}
+
+TEST(Normal, SinglePart) {
+ string field = "a";
+ FieldRef fieldRef(field);
+ ASSERT_EQUALS(fieldRef.numParts(), 1U);
+ ASSERT_EQUALS(fieldRef.getPart(0), field);
+ ASSERT_EQUALS(fieldRef.dottedField(), field);
+}
+
+TEST(Normal, ParseTwice) {
+ string field = "a";
+ FieldRef fieldRef;
+ for (int i = 0; i < 2; i++) {
+ fieldRef.parse(field);
ASSERT_EQUALS(fieldRef.numParts(), 1U);
ASSERT_EQUALS(fieldRef.getPart(0), field);
ASSERT_EQUALS(fieldRef.dottedField(), field);
}
-
- TEST(Normal, ParseTwice) {
- string field = "a";
- FieldRef fieldRef;
- for (int i = 0; i < 2; i++) {
- fieldRef.parse(field);
- ASSERT_EQUALS(fieldRef.numParts(), 1U);
- ASSERT_EQUALS(fieldRef.getPart(0), field);
- ASSERT_EQUALS(fieldRef.dottedField(), field);
- }
- }
-
- TEST(Normal, MulitplePartsVariable) {
- const char* parts[] = {"a", "b", "c", "d", "e"};
- size_t size = sizeof(parts)/sizeof(char*);
- string field(parts[0]);
- for (size_t i=1; i<size; i++) {
- field.append(1, '.');
- field.append(parts[i]);
- }
-
- FieldRef fieldRef(field);
- ASSERT_EQUALS(fieldRef.numParts(), size);
- for (size_t i=0; i<size; i++) {
- ASSERT_EQUALS(fieldRef.getPart(i), parts[i]);
- }
- ASSERT_EQUALS(fieldRef.dottedField(), field);
- }
-
- TEST(Replacement, SingleField) {
- string field = "$";
- FieldRef fieldRef(field);
- ASSERT_EQUALS(fieldRef.numParts(), 1U);
- ASSERT_EQUALS(fieldRef.getPart(0), "$");
-
- string newField = "a";
- fieldRef.setPart(0, newField);
- ASSERT_EQUALS(fieldRef.numParts(), 1U);
- ASSERT_EQUALS(fieldRef.getPart(0), newField);
- ASSERT_EQUALS(fieldRef.dottedField(), newField);
- }
-
- TEST(Replacement, InMultipleField) {
- string field = "a.b.c.$.e";
- FieldRef fieldRef(field);
- ASSERT_EQUALS(fieldRef.numParts(), 5U);
- ASSERT_EQUALS(fieldRef.getPart(3), "$");
-
- string newField = "d";
- fieldRef.setPart(3, newField);
- ASSERT_EQUALS(fieldRef.numParts(), 5U);
- ASSERT_EQUALS(fieldRef.getPart(3), newField);
- ASSERT_EQUALS(fieldRef.dottedField(), "a.b.c.d.e");
- }
-
- TEST(Replacement, SameFieldMultipleReplacements) {
- string prefix = "a.";
- string field = prefix + "$";
- FieldRef fieldRef(field);
- ASSERT_EQUALS(fieldRef.numParts(), 2U);
-
- const char* parts[] = {"a", "b", "c", "d", "e"};
- size_t size = sizeof(parts)/sizeof(char*);
- for (size_t i=0; i<size; i++) {
- fieldRef.setPart(1, parts[i]);
- ASSERT_EQUALS(fieldRef.dottedField(), prefix + parts[i]);
- }
- }
-
- TEST( Prefix, Normal ) {
- FieldRef prefix, base( "a.b.c" );
-
- prefix.parse( "a.b" );
- ASSERT_TRUE( prefix.isPrefixOf( base ) );
-
- prefix.parse( "a" );
- ASSERT_TRUE( prefix.isPrefixOf( base ) );
- }
-
- TEST( Prefix, Dotted ) {
- FieldRef prefix( "a.0" ), base( "a.0.c" );
- ASSERT_TRUE( prefix.isPrefixOf( base ) );
- }
-
- TEST( Prefix, NoPrefixes ) {
- FieldRef prefix( "a.b" ), base( "a.b" );
- ASSERT_FALSE( prefix.isPrefixOf( base ) );
-
- base.parse( "a" );
- ASSERT_FALSE( prefix.isPrefixOf( base ) );
-
- base.parse( "b" );
- ASSERT_FALSE( prefix.isPrefixOf( base ) );
- }
-
- TEST( Prefix, EmptyBase ) {
- FieldRef field( "a" ), empty;
- ASSERT_FALSE( field.isPrefixOf( empty ) );
- ASSERT_FALSE( empty.isPrefixOf( field ) );
- ASSERT_FALSE( empty.isPrefixOf( empty ) );
- }
-
- TEST( PrefixSize, Normal ) {
- FieldRef fieldA( "a.b" ), fieldB( "a" );
- ASSERT_EQUALS( fieldA.commonPrefixSize( fieldB ), 1U );
-
- fieldB.parse( "a.b" );
- ASSERT_EQUALS( fieldA.commonPrefixSize( fieldB ), 2U );
-
- fieldB.parse( "a.b.c" );
- ASSERT_EQUALS( fieldA.commonPrefixSize( fieldB ), 2U );
- }
-
- TEST( PrefixSize, NoCommonatility ) {
- FieldRef fieldA, fieldB;
- fieldA.parse( "a" );
- fieldB.parse( "b" );
- ASSERT_EQUALS( fieldA.commonPrefixSize( fieldB ), 0U );
- }
-
- TEST( PrefixSize, Empty ) {
- FieldRef fieldA( "a" ), empty;
- ASSERT_EQUALS( fieldA.commonPrefixSize( empty ), 0U );
- ASSERT_EQUALS( empty.commonPrefixSize( fieldA ), 0U );
- }
-
- TEST( Equality, Simple1 ) {
- FieldRef a( "a.b" );
- ASSERT( a.equalsDottedField( "a.b" ) );
- ASSERT( !a.equalsDottedField( "a" ) );
- ASSERT( !a.equalsDottedField( "b" ) );
- ASSERT( !a.equalsDottedField( "a.b.c" ) );
+}
+
+TEST(Normal, MulitplePartsVariable) {
+ const char* parts[] = {"a", "b", "c", "d", "e"};
+ size_t size = sizeof(parts) / sizeof(char*);
+ string field(parts[0]);
+ for (size_t i = 1; i < size; i++) {
+ field.append(1, '.');
+ field.append(parts[i]);
}
- TEST( Equality, Simple2 ) {
- FieldRef a( "a" );
- ASSERT( !a.equalsDottedField( "a.b" ) );
- ASSERT( a.equalsDottedField( "a" ) );
- ASSERT( !a.equalsDottedField( "b" ) );
- ASSERT( !a.equalsDottedField( "a.b.c" ) );
+ FieldRef fieldRef(field);
+ ASSERT_EQUALS(fieldRef.numParts(), size);
+ for (size_t i = 0; i < size; i++) {
+ ASSERT_EQUALS(fieldRef.getPart(i), parts[i]);
}
-
- TEST( Comparison, BothEmpty ) {
- FieldRef a;
- ASSERT_TRUE( a == a );
- ASSERT_FALSE( a != a );
- ASSERT_FALSE( a < a );
- ASSERT_TRUE( a <= a );
- ASSERT_FALSE( a > a );
- ASSERT_TRUE( a >= a );
+ ASSERT_EQUALS(fieldRef.dottedField(), field);
+}
+
+TEST(Replacement, SingleField) {
+ string field = "$";
+ FieldRef fieldRef(field);
+ ASSERT_EQUALS(fieldRef.numParts(), 1U);
+ ASSERT_EQUALS(fieldRef.getPart(0), "$");
+
+ string newField = "a";
+ fieldRef.setPart(0, newField);
+ ASSERT_EQUALS(fieldRef.numParts(), 1U);
+ ASSERT_EQUALS(fieldRef.getPart(0), newField);
+ ASSERT_EQUALS(fieldRef.dottedField(), newField);
+}
+
+TEST(Replacement, InMultipleField) {
+ string field = "a.b.c.$.e";
+ FieldRef fieldRef(field);
+ ASSERT_EQUALS(fieldRef.numParts(), 5U);
+ ASSERT_EQUALS(fieldRef.getPart(3), "$");
+
+ string newField = "d";
+ fieldRef.setPart(3, newField);
+ ASSERT_EQUALS(fieldRef.numParts(), 5U);
+ ASSERT_EQUALS(fieldRef.getPart(3), newField);
+ ASSERT_EQUALS(fieldRef.dottedField(), "a.b.c.d.e");
+}
+
+TEST(Replacement, SameFieldMultipleReplacements) {
+ string prefix = "a.";
+ string field = prefix + "$";
+ FieldRef fieldRef(field);
+ ASSERT_EQUALS(fieldRef.numParts(), 2U);
+
+ const char* parts[] = {"a", "b", "c", "d", "e"};
+ size_t size = sizeof(parts) / sizeof(char*);
+ for (size_t i = 0; i < size; i++) {
+ fieldRef.setPart(1, parts[i]);
+ ASSERT_EQUALS(fieldRef.dottedField(), prefix + parts[i]);
}
-
- TEST( Comparison, EqualInSize ) {
- FieldRef a( "a.b.c" ), b( "a.d.c" );
- ASSERT_FALSE( a == b );
- ASSERT_TRUE( a != b );
- ASSERT_TRUE( a < b );
- ASSERT_TRUE( a <= b );
- ASSERT_FALSE( a > b );
- ASSERT_FALSE( a >= b );
- }
-
- TEST( Comparison, NonEqual ) {
- FieldRef a( "a.b.c" ), b( "b.d" );
- ASSERT_FALSE( a == b );
- ASSERT_TRUE( a != b );
- ASSERT_TRUE( a < b );
- ASSERT_TRUE( a <= b );
- ASSERT_FALSE( a > b );
- ASSERT_FALSE( a >= b );
- }
-
- TEST( Comparison, MixedEmtpyAndNot ) {
- FieldRef a( "a" ), b;
- ASSERT_FALSE( a == b );
- ASSERT_TRUE( a != b );
- ASSERT_FALSE( a < b );
- ASSERT_FALSE( a <= b );
- ASSERT_TRUE( a > b );
- ASSERT_TRUE( a >= b );
- }
-
- TEST( DottedField, Simple1 ) {
- FieldRef a( "a.b.c.d.e" );
- ASSERT_EQUALS( "a.b.c.d.e", a.dottedField() );
- ASSERT_EQUALS( "a.b.c.d.e", a.dottedField(0) );
- ASSERT_EQUALS( "b.c.d.e", a.dottedField(1) );
- ASSERT_EQUALS( "c.d.e", a.dottedField(2) );
- ASSERT_EQUALS( "d.e", a.dottedField(3) );
- ASSERT_EQUALS( "e", a.dottedField(4) );
- ASSERT_EQUALS( "", a.dottedField(5) );
- ASSERT_EQUALS( "", a.dottedField(6) );
- }
-
- TEST(DottedSubstring, Short) {
- FieldRef path("a");
- ASSERT_EQUALS(1u, path.numParts());
- ASSERT_EQUALS("a", path.dottedSubstring(0, path.numParts()));
- ASSERT_EQUALS("", path.dottedSubstring(1, path.numParts()));
- ASSERT_EQUALS("", path.dottedSubstring(0, 0));
- }
-
- TEST(DottedSubstring, Empty) {
- FieldRef path("");
- ASSERT_EQUALS(0u, path.numParts());
- ASSERT_EQUALS("", path.dottedSubstring(0, path.numParts()));
- ASSERT_EQUALS("", path.dottedSubstring(1, path.numParts()));
- ASSERT_EQUALS("", path.dottedSubstring(0, 0));
- }
-
- TEST(DottedSubstring, Nested) {
- FieldRef path("a.b.c.d.e");
- ASSERT_EQUALS(5u, path.numParts());
-
- ASSERT_EQUALS("b.c.d.e", path.dottedSubstring(1, path.numParts()));
- ASSERT_EQUALS("c.d.e", path.dottedSubstring(2, path.numParts()));
- ASSERT_EQUALS("d.e", path.dottedSubstring(3, path.numParts()));
- ASSERT_EQUALS("e", path.dottedSubstring(4, path.numParts()));
- ASSERT_EQUALS("", path.dottedSubstring(5, path.numParts()));
- ASSERT_EQUALS("", path.dottedSubstring(6, path.numParts()));
-
- ASSERT_EQUALS("a.b.c.d.e", path.dottedSubstring(0, path.numParts()));
- ASSERT_EQUALS("a.b.c.d", path.dottedSubstring(0, path.numParts() - 1));
- ASSERT_EQUALS("a.b.c", path.dottedSubstring(0, path.numParts() - 2));
- ASSERT_EQUALS("a.b", path.dottedSubstring(0, path.numParts() - 3));
- ASSERT_EQUALS("a", path.dottedSubstring(0, path.numParts() - 4));
- ASSERT_EQUALS("", path.dottedSubstring(0, path.numParts() - 5));
- ASSERT_EQUALS("", path.dottedSubstring(0, path.numParts() - 6));
-
- ASSERT_EQUALS("b.c.d", path.dottedSubstring(1, path.numParts() - 1));
- ASSERT_EQUALS("b.c", path.dottedSubstring(1, path.numParts() - 2));
- ASSERT_EQUALS("b", path.dottedSubstring(1, path.numParts() - 3));
- ASSERT_EQUALS("", path.dottedSubstring(1, path.numParts() - 4));
- ASSERT_EQUALS("", path.dottedSubstring(1, path.numParts() - 5));
- }
-
-} // namespace
+}
+
+TEST(Prefix, Normal) {
+ FieldRef prefix, base("a.b.c");
+
+ prefix.parse("a.b");
+ ASSERT_TRUE(prefix.isPrefixOf(base));
+
+ prefix.parse("a");
+ ASSERT_TRUE(prefix.isPrefixOf(base));
+}
+
+TEST(Prefix, Dotted) {
+ FieldRef prefix("a.0"), base("a.0.c");
+ ASSERT_TRUE(prefix.isPrefixOf(base));
+}
+
+TEST(Prefix, NoPrefixes) {
+ FieldRef prefix("a.b"), base("a.b");
+ ASSERT_FALSE(prefix.isPrefixOf(base));
+
+ base.parse("a");
+ ASSERT_FALSE(prefix.isPrefixOf(base));
+
+ base.parse("b");
+ ASSERT_FALSE(prefix.isPrefixOf(base));
+}
+
+TEST(Prefix, EmptyBase) {
+ FieldRef field("a"), empty;
+ ASSERT_FALSE(field.isPrefixOf(empty));
+ ASSERT_FALSE(empty.isPrefixOf(field));
+ ASSERT_FALSE(empty.isPrefixOf(empty));
+}
+
+TEST(PrefixSize, Normal) {
+ FieldRef fieldA("a.b"), fieldB("a");
+ ASSERT_EQUALS(fieldA.commonPrefixSize(fieldB), 1U);
+
+ fieldB.parse("a.b");
+ ASSERT_EQUALS(fieldA.commonPrefixSize(fieldB), 2U);
+
+ fieldB.parse("a.b.c");
+ ASSERT_EQUALS(fieldA.commonPrefixSize(fieldB), 2U);
+}
+
+TEST(PrefixSize, NoCommonatility) {
+ FieldRef fieldA, fieldB;
+ fieldA.parse("a");
+ fieldB.parse("b");
+ ASSERT_EQUALS(fieldA.commonPrefixSize(fieldB), 0U);
+}
+
+TEST(PrefixSize, Empty) {
+ FieldRef fieldA("a"), empty;
+ ASSERT_EQUALS(fieldA.commonPrefixSize(empty), 0U);
+ ASSERT_EQUALS(empty.commonPrefixSize(fieldA), 0U);
+}
+
+TEST(Equality, Simple1) {
+ FieldRef a("a.b");
+ ASSERT(a.equalsDottedField("a.b"));
+ ASSERT(!a.equalsDottedField("a"));
+ ASSERT(!a.equalsDottedField("b"));
+ ASSERT(!a.equalsDottedField("a.b.c"));
+}
+
+TEST(Equality, Simple2) {
+ FieldRef a("a");
+ ASSERT(!a.equalsDottedField("a.b"));
+ ASSERT(a.equalsDottedField("a"));
+ ASSERT(!a.equalsDottedField("b"));
+ ASSERT(!a.equalsDottedField("a.b.c"));
+}
+
+TEST(Comparison, BothEmpty) {
+ FieldRef a;
+ ASSERT_TRUE(a == a);
+ ASSERT_FALSE(a != a);
+ ASSERT_FALSE(a < a);
+ ASSERT_TRUE(a <= a);
+ ASSERT_FALSE(a > a);
+ ASSERT_TRUE(a >= a);
+}
+
+TEST(Comparison, EqualInSize) {
+ FieldRef a("a.b.c"), b("a.d.c");
+ ASSERT_FALSE(a == b);
+ ASSERT_TRUE(a != b);
+ ASSERT_TRUE(a < b);
+ ASSERT_TRUE(a <= b);
+ ASSERT_FALSE(a > b);
+ ASSERT_FALSE(a >= b);
+}
+
+TEST(Comparison, NonEqual) {
+ FieldRef a("a.b.c"), b("b.d");
+ ASSERT_FALSE(a == b);
+ ASSERT_TRUE(a != b);
+ ASSERT_TRUE(a < b);
+ ASSERT_TRUE(a <= b);
+ ASSERT_FALSE(a > b);
+ ASSERT_FALSE(a >= b);
+}
+
+TEST(Comparison, MixedEmtpyAndNot) {
+ FieldRef a("a"), b;
+ ASSERT_FALSE(a == b);
+ ASSERT_TRUE(a != b);
+ ASSERT_FALSE(a < b);
+ ASSERT_FALSE(a <= b);
+ ASSERT_TRUE(a > b);
+ ASSERT_TRUE(a >= b);
+}
+
+TEST(DottedField, Simple1) {
+ FieldRef a("a.b.c.d.e");
+ ASSERT_EQUALS("a.b.c.d.e", a.dottedField());
+ ASSERT_EQUALS("a.b.c.d.e", a.dottedField(0));
+ ASSERT_EQUALS("b.c.d.e", a.dottedField(1));
+ ASSERT_EQUALS("c.d.e", a.dottedField(2));
+ ASSERT_EQUALS("d.e", a.dottedField(3));
+ ASSERT_EQUALS("e", a.dottedField(4));
+ ASSERT_EQUALS("", a.dottedField(5));
+ ASSERT_EQUALS("", a.dottedField(6));
+}
+
+TEST(DottedSubstring, Short) {
+ FieldRef path("a");
+ ASSERT_EQUALS(1u, path.numParts());
+ ASSERT_EQUALS("a", path.dottedSubstring(0, path.numParts()));
+ ASSERT_EQUALS("", path.dottedSubstring(1, path.numParts()));
+ ASSERT_EQUALS("", path.dottedSubstring(0, 0));
+}
+
+TEST(DottedSubstring, Empty) {
+ FieldRef path("");
+ ASSERT_EQUALS(0u, path.numParts());
+ ASSERT_EQUALS("", path.dottedSubstring(0, path.numParts()));
+ ASSERT_EQUALS("", path.dottedSubstring(1, path.numParts()));
+ ASSERT_EQUALS("", path.dottedSubstring(0, 0));
+}
+
+TEST(DottedSubstring, Nested) {
+ FieldRef path("a.b.c.d.e");
+ ASSERT_EQUALS(5u, path.numParts());
+
+ ASSERT_EQUALS("b.c.d.e", path.dottedSubstring(1, path.numParts()));
+ ASSERT_EQUALS("c.d.e", path.dottedSubstring(2, path.numParts()));
+ ASSERT_EQUALS("d.e", path.dottedSubstring(3, path.numParts()));
+ ASSERT_EQUALS("e", path.dottedSubstring(4, path.numParts()));
+ ASSERT_EQUALS("", path.dottedSubstring(5, path.numParts()));
+ ASSERT_EQUALS("", path.dottedSubstring(6, path.numParts()));
+
+ ASSERT_EQUALS("a.b.c.d.e", path.dottedSubstring(0, path.numParts()));
+ ASSERT_EQUALS("a.b.c.d", path.dottedSubstring(0, path.numParts() - 1));
+ ASSERT_EQUALS("a.b.c", path.dottedSubstring(0, path.numParts() - 2));
+ ASSERT_EQUALS("a.b", path.dottedSubstring(0, path.numParts() - 3));
+ ASSERT_EQUALS("a", path.dottedSubstring(0, path.numParts() - 4));
+ ASSERT_EQUALS("", path.dottedSubstring(0, path.numParts() - 5));
+ ASSERT_EQUALS("", path.dottedSubstring(0, path.numParts() - 6));
+
+ ASSERT_EQUALS("b.c.d", path.dottedSubstring(1, path.numParts() - 1));
+ ASSERT_EQUALS("b.c", path.dottedSubstring(1, path.numParts() - 2));
+ ASSERT_EQUALS("b", path.dottedSubstring(1, path.numParts() - 3));
+ ASSERT_EQUALS("", path.dottedSubstring(1, path.numParts() - 4));
+ ASSERT_EQUALS("", path.dottedSubstring(1, path.numParts() - 5));
+}
+
+} // namespace
diff --git a/src/mongo/db/fts/fts_basic_tokenizer.cpp b/src/mongo/db/fts/fts_basic_tokenizer.cpp
index 2d5cc493123..9fc41923d40 100644
--- a/src/mongo/db/fts/fts_basic_tokenizer.cpp
+++ b/src/mongo/db/fts/fts_basic_tokenizer.cpp
@@ -42,56 +42,54 @@
namespace mongo {
namespace fts {
- using std::string;
-
- BasicFTSTokenizer::BasicFTSTokenizer(const FTSLanguage* language)
- : _language(language), _stemmer(language), _stopWords(StopWords::getStopWords(language)) {
- }
-
- void BasicFTSTokenizer::reset(StringData document, Options options) {
- _options = options;
- _document = document.toString();
- _tokenizer = stdx::make_unique<Tokenizer>(_language, _document);
- }
-
- bool BasicFTSTokenizer::moveNext() {
- while (true) {
- bool hasMore = _tokenizer->more();
- if (!hasMore) {
- _stem = "";
- return false;
- }
-
- Token token = _tokenizer->next();
+using std::string;
+
+BasicFTSTokenizer::BasicFTSTokenizer(const FTSLanguage* language)
+ : _language(language), _stemmer(language), _stopWords(StopWords::getStopWords(language)) {}
+
+void BasicFTSTokenizer::reset(StringData document, Options options) {
+ _options = options;
+ _document = document.toString();
+ _tokenizer = stdx::make_unique<Tokenizer>(_language, _document);
+}
+
+bool BasicFTSTokenizer::moveNext() {
+ while (true) {
+ bool hasMore = _tokenizer->more();
+ if (!hasMore) {
+ _stem = "";
+ return false;
+ }
- // Do not return delimiters
- if (token.type != Token::TEXT) {
- continue;
- }
+ Token token = _tokenizer->next();
- string word = token.data.toString();
+ // Do not return delimiters
+ if (token.type != Token::TEXT) {
+ continue;
+ }
- word = tolowerString(token.data);
+ string word = token.data.toString();
- // Stop words are case-sensitive so we need them to be lower cased to check
- // against the stop word list
- if ((_options & FTSTokenizer::FilterStopWords) &&
- _stopWords->isStopWord(word)) {
- continue;
- }
+ word = tolowerString(token.data);
- if (_options & FTSTokenizer::GenerateCaseSensitiveTokens) {
- word = token.data.toString();
- }
+ // Stop words are case-sensitive so we need them to be lower cased to check
+ // against the stop word list
+ if ((_options & FTSTokenizer::FilterStopWords) && _stopWords->isStopWord(word)) {
+ continue;
+ }
- _stem = _stemmer.stem(word);
- return true;
+ if (_options & FTSTokenizer::GenerateCaseSensitiveTokens) {
+ word = token.data.toString();
}
- }
- StringData BasicFTSTokenizer::get() const {
- return _stem;
+ _stem = _stemmer.stem(word);
+ return true;
}
+}
+
+StringData BasicFTSTokenizer::get() const {
+ return _stem;
+}
-} // namespace fts
-} // namespace mongo
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_basic_tokenizer.h b/src/mongo/db/fts/fts_basic_tokenizer.h
index 45b3ad8e074..221de72bb8c 100644
--- a/src/mongo/db/fts/fts_basic_tokenizer.h
+++ b/src/mongo/db/fts/fts_basic_tokenizer.h
@@ -37,44 +37,45 @@
namespace mongo {
namespace fts {
- class FTSLanguage;
- class StopWords;
+class FTSLanguage;
+class StopWords;
- /**
- * BasicFTSTokenizer
- * A iterator of "documents" where a document contains ASCII space (U+0020) delimited words.
- * Uses
- * - Tokenizer for tokenizing words via ASCII space (ie, U+0020 space).
- * - tolower from the C standard libary to lower letters, ie, it only supports lower casing
- * - ASCII letters (U+0000 - U+007F)
- * - Stemmer (ie, Snowball Stemmer) to stem words.
- * - Embeded stop word lists for each language in StopWord class
- *
- * For each word returns a stem version of a word optimized for full text indexing.
- * Optionally supports returning case sensitive search terms.
- */
- class BasicFTSTokenizer : public FTSTokenizer {
- MONGO_DISALLOW_COPYING(BasicFTSTokenizer);
- public:
- BasicFTSTokenizer(const FTSLanguage* language);
+/**
+ * BasicFTSTokenizer
+ * A iterator of "documents" where a document contains ASCII space (U+0020) delimited words.
+ * Uses
+ * - Tokenizer for tokenizing words via ASCII space (ie, U+0020 space).
+ * - tolower from the C standard libary to lower letters, ie, it only supports lower casing
+ * - ASCII letters (U+0000 - U+007F)
+ * - Stemmer (ie, Snowball Stemmer) to stem words.
+ * - Embeded stop word lists for each language in StopWord class
+ *
+ * For each word returns a stem version of a word optimized for full text indexing.
+ * Optionally supports returning case sensitive search terms.
+ */
+class BasicFTSTokenizer : public FTSTokenizer {
+ MONGO_DISALLOW_COPYING(BasicFTSTokenizer);
+
+public:
+ BasicFTSTokenizer(const FTSLanguage* language);
- void reset(StringData document, Options options) final;
+ void reset(StringData document, Options options) final;
- bool moveNext() final;
+ bool moveNext() final;
- StringData get() const final;
+ StringData get() const final;
- private:
- const FTSLanguage* const _language;
- const Stemmer _stemmer;
- const StopWords* const _stopWords;
+private:
+ const FTSLanguage* const _language;
+ const Stemmer _stemmer;
+ const StopWords* const _stopWords;
- std::string _document;
- std::unique_ptr<Tokenizer> _tokenizer;
- Options _options;
+ std::string _document;
+ std::unique_ptr<Tokenizer> _tokenizer;
+ Options _options;
- std::string _stem;
- };
+ std::string _stem;
+};
-} // namespace fts
-} // namespace mongo
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_basic_tokenizer_test.cpp b/src/mongo/db/fts/fts_basic_tokenizer_test.cpp
index 384be225f28..5feab67face 100644
--- a/src/mongo/db/fts/fts_basic_tokenizer_test.cpp
+++ b/src/mongo/db/fts/fts_basic_tokenizer_test.cpp
@@ -33,53 +33,51 @@
namespace mongo {
namespace fts {
- std::vector<std::string> tokenizeString(const char* str, const char* language) {
- StatusWithFTSLanguage swl = FTSLanguage::make(language, TEXT_INDEX_VERSION_2);
- ASSERT_OK(swl);
+std::vector<std::string> tokenizeString(const char* str, const char* language) {
+ StatusWithFTSLanguage swl = FTSLanguage::make(language, TEXT_INDEX_VERSION_2);
+ ASSERT_OK(swl);
- std::unique_ptr<FTSTokenizer> tokenizer(swl.getValue()->createTokenizer());
+ std::unique_ptr<FTSTokenizer> tokenizer(swl.getValue()->createTokenizer());
- tokenizer->reset(str, FTSTokenizer::None);
+ tokenizer->reset(str, FTSTokenizer::None);
- std::vector<std::string> terms;
+ std::vector<std::string> terms;
- while (tokenizer->moveNext()) {
- terms.push_back(tokenizer->get().toString());
- }
-
- return terms;
+ while (tokenizer->moveNext()) {
+ terms.push_back(tokenizer->get().toString());
}
- // Ensure punctuation is filtered out of the indexed document
- // and the 's is not separated
- TEST(FtsBasicTokenizer, English) {
- std::vector<std::string> terms = tokenizeString("Do you see Mark's dog running?",
- "english");
+ return terms;
+}
- ASSERT_EQUALS(6U, terms.size());
- ASSERT_EQUALS("do", terms[0]);
- ASSERT_EQUALS("you", terms[1]);
- ASSERT_EQUALS("see", terms[2]);
- ASSERT_EQUALS("mark", terms[3]);
- ASSERT_EQUALS("dog", terms[4]);
- ASSERT_EQUALS("run", terms[5]);
- }
+// Ensure punctuation is filtered out of the indexed document
+// and the 's is not separated
+TEST(FtsBasicTokenizer, English) {
+ std::vector<std::string> terms = tokenizeString("Do you see Mark's dog running?", "english");
- // Ensure punctuation is filtered out of the indexed document
- // and the 's is separated
- TEST(FtsBasicTokenizer, French) {
- std::vector<std::string> terms = tokenizeString("Do you see Mark's dog running?",
- "french");
+ ASSERT_EQUALS(6U, terms.size());
+ ASSERT_EQUALS("do", terms[0]);
+ ASSERT_EQUALS("you", terms[1]);
+ ASSERT_EQUALS("see", terms[2]);
+ ASSERT_EQUALS("mark", terms[3]);
+ ASSERT_EQUALS("dog", terms[4]);
+ ASSERT_EQUALS("run", terms[5]);
+}
- ASSERT_EQUALS(7U, terms.size());
- ASSERT_EQUALS("do", terms[0]);
- ASSERT_EQUALS("you", terms[1]);
- ASSERT_EQUALS("se", terms[2]);
- ASSERT_EQUALS("mark", terms[3]);
- ASSERT_EQUALS("s", terms[4]);
- ASSERT_EQUALS("dog", terms[5]);
- ASSERT_EQUALS("running", terms[6]);
- }
+// Ensure punctuation is filtered out of the indexed document
+// and the 's is separated
+TEST(FtsBasicTokenizer, French) {
+ std::vector<std::string> terms = tokenizeString("Do you see Mark's dog running?", "french");
+
+ ASSERT_EQUALS(7U, terms.size());
+ ASSERT_EQUALS("do", terms[0]);
+ ASSERT_EQUALS("you", terms[1]);
+ ASSERT_EQUALS("se", terms[2]);
+ ASSERT_EQUALS("mark", terms[3]);
+ ASSERT_EQUALS("s", terms[4]);
+ ASSERT_EQUALS("dog", terms[5]);
+ ASSERT_EQUALS("running", terms[6]);
+}
-} // namespace fts
-} // namespace mongo
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_element_iterator.cpp b/src/mongo/db/fts/fts_element_iterator.cpp
index f57e1097c14..4df642dc66a 100644
--- a/src/mongo/db/fts/fts_element_iterator.cpp
+++ b/src/mongo/db/fts/fts_element_iterator.cpp
@@ -37,152 +37,149 @@
namespace mongo {
- namespace fts {
+namespace fts {
+
+using std::string;
+
+extern const double DEFAULT_WEIGHT;
+extern const double MAX_WEIGHT;
+
+std::ostream& operator<<(std::ostream& os, FTSElementIterator::FTSIteratorFrame& frame) {
+ BSONObjIterator it = frame._it;
+ return os << "FTSIteratorFrame["
+ " element=" << (*it).toString() << ", _language=" << frame._language->str()
+ << ", _parentPath=" << frame._parentPath << ", _isArray=" << frame._isArray << "]";
+}
+
+FTSElementIterator::FTSElementIterator(const FTSSpec& spec, const BSONObj& obj)
+ : _frame(obj, spec, &spec.defaultLanguage(), "", false),
+ _spec(spec),
+ _currentValue(advance()) {}
+
+namespace {
+/** Check for exact match or path prefix match. */
+inline bool _matchPrefix(const string& dottedName, const string& weight) {
+ if (weight == dottedName) {
+ return true;
+ }
+ return mongoutils::str::startsWith(weight, dottedName + '.');
+}
+}
+
+bool FTSElementIterator::more() {
+ //_currentValue = advance();
+ return _currentValue.valid();
+}
+
+FTSIteratorValue FTSElementIterator::next() {
+ FTSIteratorValue result = _currentValue;
+ _currentValue = advance();
+ return result;
+}
- using std::string;
-
- extern const double DEFAULT_WEIGHT;
- extern const double MAX_WEIGHT;
-
- std::ostream& operator<<( std::ostream& os, FTSElementIterator::FTSIteratorFrame& frame ) {
- BSONObjIterator it = frame._it;
- return os << "FTSIteratorFrame["
- " element=" << (*it).toString() <<
- ", _language=" << frame._language->str() <<
- ", _parentPath=" << frame._parentPath <<
- ", _isArray=" << frame._isArray << "]";
- }
-
- FTSElementIterator::FTSElementIterator( const FTSSpec& spec, const BSONObj& obj )
- : _frame( obj, spec, &spec.defaultLanguage(), "", false ),
- _spec( spec ),
- _currentValue( advance() )
- { }
-
- namespace {
- /** Check for exact match or path prefix match. */
- inline bool _matchPrefix( const string& dottedName, const string& weight ) {
- if ( weight == dottedName ) {
- return true;
- }
- return mongoutils::str::startsWith( weight, dottedName + '.' );
- }
- }
-
- bool FTSElementIterator::more() {
- //_currentValue = advance();
- return _currentValue.valid();
+/**
+ * Helper method:
+ * if (current object iterator not exhausted) return true;
+ * while (frame stack not empty) {
+ * resume object iterator popped from stack;
+ * if (resumed iterator not exhausted) return true;
+ * }
+ * return false;
+ */
+bool FTSElementIterator::moreFrames() {
+ if (_frame._it.more())
+ return true;
+ while (!_frameStack.empty()) {
+ _frame = _frameStack.top();
+ _frameStack.pop();
+ if (_frame._it.more()) {
+ return true;
}
-
- FTSIteratorValue FTSElementIterator::next() {
- FTSIteratorValue result = _currentValue;
- _currentValue = advance();
- return result;
+ }
+ return false;
+}
+
+FTSIteratorValue FTSElementIterator::advance() {
+ while (moreFrames()) {
+ BSONElement elem = _frame._it.next();
+ string fieldName = elem.fieldName();
+
+ // Skip "language" specifier fields if wildcard.
+ if (_spec.wildcard() && _spec.languageOverrideField() == fieldName) {
+ continue;
}
- /**
- * Helper method:
- * if (current object iterator not exhausted) return true;
- * while (frame stack not empty) {
- * resume object iterator popped from stack;
- * if (resumed iterator not exhausted) return true;
- * }
- * return false;
- */
- bool FTSElementIterator::moreFrames() {
- if (_frame._it.more()) return true;
- while (!_frameStack.empty()) {
- _frame = _frameStack.top();
- _frameStack.pop();
- if (_frame._it.more()) {
- return true;
- }
+ // Compose the dotted name of the current field:
+ // 1. parent path empty (top level): use the current field name
+ // 2. parent path non-empty and obj is an array: use the parent path
+ // 3. parent path non-empty and obj is a sub-doc: append field name to parent path
+ string dottedName = (_frame._parentPath.empty() ? fieldName : _frame._isArray
+ ? _frame._parentPath
+ : _frame._parentPath + '.' + fieldName);
+
+ // Find lower bound of dottedName in _weights. lower_bound leaves us at the first
+ // weight that could possibly match or be a prefix of dottedName. And if this
+ // element fails to match, then no subsequent weight can match, since the weights
+ // are lexicographically ordered.
+ Weights::const_iterator i =
+ _spec.weights().lower_bound(elem.type() == Object ? dottedName + '.' : dottedName);
+
+ // possibleWeightMatch is set if the weight map contains either a match or some item
+ // lexicographically larger than fieldName. This boolean acts as a guard on
+ // dereferences of iterator 'i'.
+ bool possibleWeightMatch = (i != _spec.weights().end());
+
+ // Optimize away two cases, when not wildcard:
+ // 1. lower_bound seeks to end(): no prefix match possible
+ // 2. lower_bound seeks to a name which is not a prefix
+ if (!_spec.wildcard()) {
+ if (!possibleWeightMatch) {
+ continue;
+ } else if (!_matchPrefix(dottedName, i->first)) {
+ continue;
}
- return false;
}
- FTSIteratorValue FTSElementIterator::advance() {
- while ( moreFrames() ) {
-
- BSONElement elem = _frame._it.next();
- string fieldName = elem.fieldName();
+ // Is the current field an exact match on a weight?
+ bool exactMatch = (possibleWeightMatch && i->first == dottedName);
+ double weight = (possibleWeightMatch ? i->second : DEFAULT_WEIGHT);
- // Skip "language" specifier fields if wildcard.
- if ( _spec.wildcard() && _spec.languageOverrideField() == fieldName ) {
- continue;
+ switch (elem.type()) {
+ case String:
+ // Only index strings on exact match or wildcard.
+ if (exactMatch || _spec.wildcard()) {
+ return FTSIteratorValue(elem.valuestr(), _frame._language, weight);
}
-
- // Compose the dotted name of the current field:
- // 1. parent path empty (top level): use the current field name
- // 2. parent path non-empty and obj is an array: use the parent path
- // 3. parent path non-empty and obj is a sub-doc: append field name to parent path
- string dottedName = ( _frame._parentPath.empty() ? fieldName
- : _frame._isArray ? _frame._parentPath
- : _frame._parentPath + '.' + fieldName );
-
- // Find lower bound of dottedName in _weights. lower_bound leaves us at the first
- // weight that could possibly match or be a prefix of dottedName. And if this
- // element fails to match, then no subsequent weight can match, since the weights
- // are lexicographically ordered.
- Weights::const_iterator i = _spec.weights().lower_bound( elem.type() == Object
- ? dottedName + '.'
- : dottedName );
-
- // possibleWeightMatch is set if the weight map contains either a match or some item
- // lexicographically larger than fieldName. This boolean acts as a guard on
- // dereferences of iterator 'i'.
- bool possibleWeightMatch = ( i != _spec.weights().end() );
-
- // Optimize away two cases, when not wildcard:
- // 1. lower_bound seeks to end(): no prefix match possible
- // 2. lower_bound seeks to a name which is not a prefix
- if ( !_spec.wildcard() ) {
- if ( !possibleWeightMatch ) {
- continue;
- }
- else if ( !_matchPrefix( dottedName, i->first ) ) {
- continue;
- }
+ break;
+
+ case Object:
+ // Only descend into a sub-document on proper prefix or wildcard. Note that
+ // !exactMatch is a sufficient test for proper prefix match, because of
+ // if ( !matchPrefix( dottedName, i->first ) ) continue;
+ // block above.
+ if (!exactMatch || _spec.wildcard()) {
+ _frameStack.push(_frame);
+ _frame =
+ FTSIteratorFrame(elem.Obj(), _spec, _frame._language, dottedName, false);
}
-
- // Is the current field an exact match on a weight?
- bool exactMatch = ( possibleWeightMatch && i->first == dottedName );
- double weight = ( possibleWeightMatch ? i->second : DEFAULT_WEIGHT );
-
- switch ( elem.type() ) {
- case String:
- // Only index strings on exact match or wildcard.
- if ( exactMatch || _spec.wildcard() ) {
- return FTSIteratorValue( elem.valuestr(), _frame._language, weight );
- }
- break;
-
- case Object:
- // Only descend into a sub-document on proper prefix or wildcard. Note that
- // !exactMatch is a sufficient test for proper prefix match, because of
- // if ( !matchPrefix( dottedName, i->first ) ) continue;
- // block above.
- if ( !exactMatch || _spec.wildcard() ) {
- _frameStack.push( _frame );
- _frame = FTSIteratorFrame( elem.Obj(), _spec, _frame._language, dottedName, false );
- }
- break;
-
- case Array:
- // Only descend into arrays from non-array parents or on wildcard.
- if ( !_frame._isArray || _spec.wildcard() ) {
- _frameStack.push( _frame );
- _frame = FTSIteratorFrame( elem.Obj(), _spec, _frame._language, dottedName, true );
- }
- break;
-
- default:
- // Skip over all other BSON types.
- break;
+ break;
+
+ case Array:
+ // Only descend into arrays from non-array parents or on wildcard.
+ if (!_frame._isArray || _spec.wildcard()) {
+ _frameStack.push(_frame);
+ _frame =
+ FTSIteratorFrame(elem.Obj(), _spec, _frame._language, dottedName, true);
}
- }
- return FTSIteratorValue(); // valid()==false
+ break;
+
+ default:
+ // Skip over all other BSON types.
+ break;
}
+ }
+ return FTSIteratorValue(); // valid()==false
+}
- } // namespace fts
-} // namespace mongo
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_element_iterator.h b/src/mongo/db/fts/fts_element_iterator.h
index 17f72fff7f9..21e1e446627 100644
--- a/src/mongo/db/fts/fts_element_iterator.h
+++ b/src/mongo/db/fts/fts_element_iterator.h
@@ -40,133 +40,121 @@
namespace mongo {
- namespace fts {
-
- /**
- * Encapsulates data fields returned by FTSElementIterator
- */
- struct FTSIteratorValue {
-
- FTSIteratorValue( const char* text,
- const FTSLanguage* language,
- double weight )
- : _text(text),
- _language(language),
- _weight(weight),
- _valid(true)
- {}
-
- FTSIteratorValue()
- : _text(NULL),
- _language(),
- _weight(0.0),
- _valid(false)
- {}
-
- bool valid() const { return _valid; }
-
- const char* _text;
- const FTSLanguage* _language;
- double _weight;
- bool _valid;
- };
-
- /**
- * Iterator pattern for walking through text-indexed fields of a
- * BSON document.
- *
- * Example usage:
- * FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- * FTSElementIterator it( spec, obj );
- * while ( it.more() ) {
- * FTSIteratorValue val = it.next();
- * std::cout << val._text << '[' << val._language.str()
- * << ',' << val._weight << ']' << std::endl;
- * }
- *
- */
- class FTSElementIterator {
- public:
- /**
- * Iterator constructor
- *
- * Note: Caller must ensure that the constructed FTSElementIterator
- * does >not< outlive either spec or obj.
- *
- * @arg spec text index specifier
- * @arg obj document that the iterator will traverse
- */
- FTSElementIterator( const FTSSpec& spec, const BSONObj& obj);
-
- /**
- * Iterator interface: returns false iff there are no further text-indexable fields.
- */
- bool more();
-
- /**
- * Iterator interface: advances to the next text-indexable field.
- */
- FTSIteratorValue next();
-
- /**
- * Iterator frame needed for iterative implementation of
- * recursive sub-documents.
- */
- struct FTSIteratorFrame {
- FTSIteratorFrame( const BSONObj& obj,
- const FTSSpec& spec,
- const FTSLanguage* parentLanguage,
- const std::string& parentPath,
- bool isArray )
- : _it( obj ),
- _language( spec._getLanguageToUseV2( obj, parentLanguage ) ),
- _parentPath( parentPath ),
- _isArray( isArray )
- {}
-
- friend std::ostream& operator<<(std::ostream&, FTSIteratorFrame&);
-
- BSONObjIterator _it;
- const FTSLanguage* _language;
- std::string _parentPath;
- bool _isArray;
- };
-
- private:
- /**
- * Helper method:
- * returns false iff all FTSIteratorFrames on _frameStack are exhausted.
- */
- bool moreFrames();
-
- /**
- * Helper method:
- * advances to the next text-indexable field, possibly pushing frames as
- * needed for recursive sub-documents.
- */
- FTSIteratorValue advance();
-
- /**
- * Stack used by iterative implementation of recursive sub-document traversal.
- */
- std::stack<FTSIteratorFrame> _frameStack;
-
- /**
- * Current frame, not yet pushed to stack.
- */
- FTSIteratorFrame _frame;
-
- /**
- * Constructor input parameter: text index specification.
- */
- const FTSSpec& _spec;
-
- /**
- * Current iterator return value, computed by 'more()', returned by 'next()'.
- */
- FTSIteratorValue _currentValue;
- };
-
- } // namespace fts
-} // namespace mongo
+namespace fts {
+/**
+ * Encapsulates data fields returned by FTSElementIterator
+ */
+struct FTSIteratorValue {
+ FTSIteratorValue(const char* text, const FTSLanguage* language, double weight)
+ : _text(text), _language(language), _weight(weight), _valid(true) {}
+
+ FTSIteratorValue() : _text(NULL), _language(), _weight(0.0), _valid(false) {}
+
+ bool valid() const {
+ return _valid;
+ }
+
+ const char* _text;
+ const FTSLanguage* _language;
+ double _weight;
+ bool _valid;
+};
+
+/**
+ * Iterator pattern for walking through text-indexed fields of a
+ * BSON document.
+ *
+ * Example usage:
+ * FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
+ * FTSElementIterator it( spec, obj );
+ * while ( it.more() ) {
+ * FTSIteratorValue val = it.next();
+ * std::cout << val._text << '[' << val._language.str()
+ * << ',' << val._weight << ']' << std::endl;
+ * }
+ *
+ */
+class FTSElementIterator {
+public:
+ /**
+ * Iterator constructor
+ *
+ * Note: Caller must ensure that the constructed FTSElementIterator
+ * does >not< outlive either spec or obj.
+ *
+ * @arg spec text index specifier
+ * @arg obj document that the iterator will traverse
+ */
+ FTSElementIterator(const FTSSpec& spec, const BSONObj& obj);
+
+ /**
+ * Iterator interface: returns false iff there are no further text-indexable fields.
+ */
+ bool more();
+
+ /**
+ * Iterator interface: advances to the next text-indexable field.
+ */
+ FTSIteratorValue next();
+
+ /**
+ * Iterator frame needed for iterative implementation of
+ * recursive sub-documents.
+ */
+ struct FTSIteratorFrame {
+ FTSIteratorFrame(const BSONObj& obj,
+ const FTSSpec& spec,
+ const FTSLanguage* parentLanguage,
+ const std::string& parentPath,
+ bool isArray)
+ : _it(obj),
+ _language(spec._getLanguageToUseV2(obj, parentLanguage)),
+ _parentPath(parentPath),
+ _isArray(isArray) {}
+
+ friend std::ostream& operator<<(std::ostream&, FTSIteratorFrame&);
+
+ BSONObjIterator _it;
+ const FTSLanguage* _language;
+ std::string _parentPath;
+ bool _isArray;
+ };
+
+private:
+ /**
+ * Helper method:
+ * returns false iff all FTSIteratorFrames on _frameStack are exhausted.
+ */
+ bool moreFrames();
+
+ /**
+ * Helper method:
+ * advances to the next text-indexable field, possibly pushing frames as
+ * needed for recursive sub-documents.
+ */
+ FTSIteratorValue advance();
+
+ /**
+ * Stack used by iterative implementation of recursive sub-document traversal.
+ */
+ std::stack<FTSIteratorFrame> _frameStack;
+
+ /**
+ * Current frame, not yet pushed to stack.
+ */
+ FTSIteratorFrame _frame;
+
+ /**
+ * Constructor input parameter: text index specification.
+ */
+ const FTSSpec& _spec;
+
+ /**
+ * Current iterator return value, computed by 'more()', returned by 'next()'.
+ */
+ FTSIteratorValue _currentValue;
+};
+
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_element_iterator_test.cpp b/src/mongo/db/fts/fts_element_iterator_test.cpp
index 6d5694c5990..2a16c14b5a5 100644
--- a/src/mongo/db/fts/fts_element_iterator_test.cpp
+++ b/src/mongo/db/fts/fts_element_iterator_test.cpp
@@ -34,279 +34,267 @@
#include "mongo/unittest/unittest.h"
namespace mongo {
- namespace fts {
-
- using std::string;
-
- TEST( FTSElementIterator, Test1 ) {
-
- BSONObj obj = fromjson(
- "{ b : \"walking\","
- " c : { e: \"walked\" },"
- " d : \"walker\""
- " }" );
-
- BSONObj indexSpec = fromjson(
- "{ key : { a : \"text\" }, weights : { b : 10, d : 5 } }" );
-
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- Weights::const_iterator itt = spec.weights().begin();
- ASSERT( itt != spec.weights().end() );
- ASSERT_EQUALS( "a", itt->first );
- ASSERT_EQUALS( 1, itt->second );
- ++itt;
- ASSERT( itt != spec.weights().end() );
- ASSERT_EQUALS( "b", itt->first );
- ASSERT_EQUALS( 10, itt->second );
- ++itt;
- ASSERT( itt != spec.weights().end() );
- ASSERT_EQUALS( "d", itt->first );
- ASSERT_EQUALS( 5, itt->second );
- ++itt;
-
- FTSElementIterator it( spec, obj );
-
- ASSERT( it.more() );
- FTSIteratorValue val = it.next();
- ASSERT_EQUALS( "walking", string(val._text) );
- ASSERT_EQUALS( "english", val._language->str() );
- ASSERT_EQUALS( 10, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "walker", string(val._text) );
- ASSERT_EQUALS( "english", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
- }
-
- // Multi-language : test
- TEST( FTSElementIterator, Test2 ) {
-
- BSONObj obj = fromjson(
- "{ a :"
- " { b :"
- " [ { c : \"walked\", language : \"english\" },"
- " { c : \"camminato\", language : \"italian\" },"
- " { c : \"ging\", language : \"german\" } ]"
- " },"
- " d : \"Feliz Año Nuevo!\","
- " language : \"spanish\""
- " }" );
-
- BSONObj indexSpec = fromjson(
- "{ key : { \"a.b.c\" : \"text\", d : \"text\" } }" );
-
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
-
- FTSElementIterator it( spec, obj );
-
- ASSERT( it.more() );
- FTSIteratorValue val = it.next();
- ASSERT_EQUALS( "walked", string(val._text) );
- ASSERT_EQUALS( "english", val._language->str() );
- ASSERT_EQUALS( 1, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "camminato", string(val._text) );
- ASSERT_EQUALS( "italian", val._language->str() );
- ASSERT_EQUALS( 1, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "ging", string(val._text) );
- ASSERT_EQUALS( "german", val._language->str() );
- ASSERT_EQUALS( 1, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "Feliz Año Nuevo!", string(val._text) );
- ASSERT_EQUALS( "spanish", val._language->str() );
- ASSERT_EQUALS( 1, val._weight );
- }
-
- // Multi-language : test nested stemming per sub-document
- TEST( FTSElementIterator, Test3 ) {
-
- BSONObj obj = fromjson(
- "{ language : \"english\","
- " a :"
- " { language : \"danish\","
- " b :"
- " [ { c : \"foredrag\" },"
- " { c : \"foredragsholder\" },"
- " { c : \"lector\" } ]"
- " }"
- "}" );
-
- BSONObj indexSpec = fromjson(
- "{ key : { a : \"text\", \"a.b.c\" : \"text\" }, weights : { \"a.b.c\" : 5 } }" );
-
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- Weights::const_iterator itt = spec.weights().begin();
- ASSERT( itt != spec.weights().end() );
- ASSERT_EQUALS( "a", itt->first );
- ASSERT_EQUALS( 1, itt->second );
- ++itt;
- ASSERT( itt != spec.weights().end() );
- ASSERT_EQUALS( "a.b.c", itt->first );
- ASSERT_EQUALS( 5, itt->second );
-
- FTSElementIterator it( spec, obj );
-
- ASSERT( it.more() );
- FTSIteratorValue val = it.next();
- ASSERT_EQUALS( "foredrag", string(val._text) );
- ASSERT_EQUALS( "danish", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "foredragsholder", string(val._text) );
- ASSERT_EQUALS( "danish", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "lector", string(val._text) );
- ASSERT_EQUALS( "danish", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
-
- }
-
- // Multi-language : test nested arrays
- TEST( FTSElementIterator, Test4 ) {
-
- BSONObj obj = fromjson(
- "{ language : \"english\","
- " a : ["
- " { language : \"danish\","
- " b :"
- " [ { c : [\"foredrag\"] },"
- " { c : [\"foredragsholder\"] },"
- " { c : [\"lector\"] } ]"
- " } ]"
- "}" );
-
- BSONObj indexSpec = fromjson(
- "{ key : { \"a.b.c\" : \"text\" }, weights : { \"a.b.c\" : 5 } }" );
-
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- FTSElementIterator it( spec, obj );
-
- ASSERT( it.more() );
- FTSIteratorValue val = it.next();
- ASSERT_EQUALS( "foredrag", string(val._text) );
- ASSERT_EQUALS( "danish", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "foredragsholder", string(val._text) );
- ASSERT_EQUALS( "danish", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "lector", string(val._text) );
- ASSERT_EQUALS( "danish", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
-
- }
-
- // Multi-language : test wildcard spec
- TEST( FTSElementIterator, Test5 ) {
-
- BSONObj obj = fromjson(
- "{ language : \"english\","
- " b : \"these boots were made for walking\","
- " c : { e: \"I walked half way to the market before seeing the sunrise\" },"
- " d : "
- " { language : \"danish\","
- " e :"
- " [ { f : \"foredrag\", g : 12 },"
- " { f : \"foredragsholder\", g : 13 },"
- " { f : \"lector\", g : 14 } ]"
- " }"
- "}" );
-
- BSONObj indexSpec = fromjson(
- "{ key : { a : \"text\" }, weights : { b : 20, c : 10, \"d.e.f\" : 5 } }" );
-
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- FTSElementIterator it( spec, obj );
-
- ASSERT( it.more() );
- FTSIteratorValue val = it.next();
- ASSERT_EQUALS( "these boots were made for walking", string(val._text) );
- ASSERT_EQUALS( "english", val._language->str() );
- ASSERT_EQUALS( 20, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "foredrag", string(val._text) );
- ASSERT_EQUALS( "danish", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "foredragsholder", string(val._text) );
- ASSERT_EQUALS( "danish", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "lector", string(val._text) );
- ASSERT_EQUALS( "danish", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
- }
-
- // Multi-language : test wildcard spec
- TEST( FTSElementIterator, Test6 ) {
-
- BSONObj obj = fromjson(
- "{ language : \"english\","
- " b : \"these boots were made for walking\","
- " c : { e: \"I walked half way to the market before seeing the sunrise\" },"
- " d : "
- " { language : \"danish\","
- " e :"
- " [ { f : \"foredrag\", g : 12 },"
- " { f : \"foredragsholder\", g : 13 },"
- " { f : \"lector\", g : 14 } ]"
- " }"
- "}" );
-
- BSONObj indexSpec = fromjson(
- "{ key : { a : \"text\" }, weights : { b : 20, c : 10, \"d.e.f\" : 5 } }" );
-
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- FTSElementIterator it( spec, obj );
-
- ASSERT( it.more() );
- FTSIteratorValue val = it.next();
- ASSERT_EQUALS( "these boots were made for walking", string(val._text) );
- ASSERT_EQUALS( "english", val._language->str() );
- ASSERT_EQUALS( 20, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "foredrag", string(val._text) );
- ASSERT_EQUALS( "danish", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "foredragsholder", string(val._text) );
- ASSERT_EQUALS( "danish", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
-
- ASSERT( it.more() );
- val = it.next();
- ASSERT_EQUALS( "lector", string(val._text) );
- ASSERT_EQUALS( "danish", val._language->str() );
- ASSERT_EQUALS( 5, val._weight );
- }
- }
+namespace fts {
+
+using std::string;
+
+TEST(FTSElementIterator, Test1) {
+ BSONObj obj = fromjson(
+ "{ b : \"walking\","
+ " c : { e: \"walked\" },"
+ " d : \"walker\""
+ " }");
+
+ BSONObj indexSpec = fromjson("{ key : { a : \"text\" }, weights : { b : 10, d : 5 } }");
+
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ Weights::const_iterator itt = spec.weights().begin();
+ ASSERT(itt != spec.weights().end());
+ ASSERT_EQUALS("a", itt->first);
+ ASSERT_EQUALS(1, itt->second);
+ ++itt;
+ ASSERT(itt != spec.weights().end());
+ ASSERT_EQUALS("b", itt->first);
+ ASSERT_EQUALS(10, itt->second);
+ ++itt;
+ ASSERT(itt != spec.weights().end());
+ ASSERT_EQUALS("d", itt->first);
+ ASSERT_EQUALS(5, itt->second);
+ ++itt;
+
+ FTSElementIterator it(spec, obj);
+
+ ASSERT(it.more());
+ FTSIteratorValue val = it.next();
+ ASSERT_EQUALS("walking", string(val._text));
+ ASSERT_EQUALS("english", val._language->str());
+ ASSERT_EQUALS(10, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("walker", string(val._text));
+ ASSERT_EQUALS("english", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
}
+// Multi-language : test
+TEST(FTSElementIterator, Test2) {
+ BSONObj obj = fromjson(
+ "{ a :"
+ " { b :"
+ " [ { c : \"walked\", language : \"english\" },"
+ " { c : \"camminato\", language : \"italian\" },"
+ " { c : \"ging\", language : \"german\" } ]"
+ " },"
+ " d : \"Feliz Año Nuevo!\","
+ " language : \"spanish\""
+ " }");
+
+ BSONObj indexSpec = fromjson("{ key : { \"a.b.c\" : \"text\", d : \"text\" } }");
+
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+
+ FTSElementIterator it(spec, obj);
+
+ ASSERT(it.more());
+ FTSIteratorValue val = it.next();
+ ASSERT_EQUALS("walked", string(val._text));
+ ASSERT_EQUALS("english", val._language->str());
+ ASSERT_EQUALS(1, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("camminato", string(val._text));
+ ASSERT_EQUALS("italian", val._language->str());
+ ASSERT_EQUALS(1, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("ging", string(val._text));
+ ASSERT_EQUALS("german", val._language->str());
+ ASSERT_EQUALS(1, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("Feliz Año Nuevo!", string(val._text));
+ ASSERT_EQUALS("spanish", val._language->str());
+ ASSERT_EQUALS(1, val._weight);
+}
+
+// Multi-language : test nested stemming per sub-document
+TEST(FTSElementIterator, Test3) {
+ BSONObj obj = fromjson(
+ "{ language : \"english\","
+ " a :"
+ " { language : \"danish\","
+ " b :"
+ " [ { c : \"foredrag\" },"
+ " { c : \"foredragsholder\" },"
+ " { c : \"lector\" } ]"
+ " }"
+ "}");
+
+ BSONObj indexSpec =
+ fromjson("{ key : { a : \"text\", \"a.b.c\" : \"text\" }, weights : { \"a.b.c\" : 5 } }");
+
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ Weights::const_iterator itt = spec.weights().begin();
+ ASSERT(itt != spec.weights().end());
+ ASSERT_EQUALS("a", itt->first);
+ ASSERT_EQUALS(1, itt->second);
+ ++itt;
+ ASSERT(itt != spec.weights().end());
+ ASSERT_EQUALS("a.b.c", itt->first);
+ ASSERT_EQUALS(5, itt->second);
+
+ FTSElementIterator it(spec, obj);
+
+ ASSERT(it.more());
+ FTSIteratorValue val = it.next();
+ ASSERT_EQUALS("foredrag", string(val._text));
+ ASSERT_EQUALS("danish", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("foredragsholder", string(val._text));
+ ASSERT_EQUALS("danish", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("lector", string(val._text));
+ ASSERT_EQUALS("danish", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
+}
+
+// Multi-language : test nested arrays
+TEST(FTSElementIterator, Test4) {
+ BSONObj obj = fromjson(
+ "{ language : \"english\","
+ " a : ["
+ " { language : \"danish\","
+ " b :"
+ " [ { c : [\"foredrag\"] },"
+ " { c : [\"foredragsholder\"] },"
+ " { c : [\"lector\"] } ]"
+ " } ]"
+ "}");
+
+ BSONObj indexSpec = fromjson("{ key : { \"a.b.c\" : \"text\" }, weights : { \"a.b.c\" : 5 } }");
+
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ FTSElementIterator it(spec, obj);
+
+ ASSERT(it.more());
+ FTSIteratorValue val = it.next();
+ ASSERT_EQUALS("foredrag", string(val._text));
+ ASSERT_EQUALS("danish", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("foredragsholder", string(val._text));
+ ASSERT_EQUALS("danish", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("lector", string(val._text));
+ ASSERT_EQUALS("danish", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
+}
+
+// Multi-language : test wildcard spec
+TEST(FTSElementIterator, Test5) {
+ BSONObj obj = fromjson(
+ "{ language : \"english\","
+ " b : \"these boots were made for walking\","
+ " c : { e: \"I walked half way to the market before seeing the sunrise\" },"
+ " d : "
+ " { language : \"danish\","
+ " e :"
+ " [ { f : \"foredrag\", g : 12 },"
+ " { f : \"foredragsholder\", g : 13 },"
+ " { f : \"lector\", g : 14 } ]"
+ " }"
+ "}");
+
+ BSONObj indexSpec =
+ fromjson("{ key : { a : \"text\" }, weights : { b : 20, c : 10, \"d.e.f\" : 5 } }");
+
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ FTSElementIterator it(spec, obj);
+
+ ASSERT(it.more());
+ FTSIteratorValue val = it.next();
+ ASSERT_EQUALS("these boots were made for walking", string(val._text));
+ ASSERT_EQUALS("english", val._language->str());
+ ASSERT_EQUALS(20, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("foredrag", string(val._text));
+ ASSERT_EQUALS("danish", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("foredragsholder", string(val._text));
+ ASSERT_EQUALS("danish", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("lector", string(val._text));
+ ASSERT_EQUALS("danish", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
+}
+
+// Multi-language : test wildcard spec
+TEST(FTSElementIterator, Test6) {
+ BSONObj obj = fromjson(
+ "{ language : \"english\","
+ " b : \"these boots were made for walking\","
+ " c : { e: \"I walked half way to the market before seeing the sunrise\" },"
+ " d : "
+ " { language : \"danish\","
+ " e :"
+ " [ { f : \"foredrag\", g : 12 },"
+ " { f : \"foredragsholder\", g : 13 },"
+ " { f : \"lector\", g : 14 } ]"
+ " }"
+ "}");
+
+ BSONObj indexSpec =
+ fromjson("{ key : { a : \"text\" }, weights : { b : 20, c : 10, \"d.e.f\" : 5 } }");
+
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ FTSElementIterator it(spec, obj);
+
+ ASSERT(it.more());
+ FTSIteratorValue val = it.next();
+ ASSERT_EQUALS("these boots were made for walking", string(val._text));
+ ASSERT_EQUALS("english", val._language->str());
+ ASSERT_EQUALS(20, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("foredrag", string(val._text));
+ ASSERT_EQUALS("danish", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("foredragsholder", string(val._text));
+ ASSERT_EQUALS("danish", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
+
+ ASSERT(it.more());
+ val = it.next();
+ ASSERT_EQUALS("lector", string(val._text));
+ ASSERT_EQUALS("danish", val._language->str());
+ ASSERT_EQUALS(5, val._weight);
+}
+}
+}
diff --git a/src/mongo/db/fts/fts_enabled.cpp b/src/mongo/db/fts/fts_enabled.cpp
index b8e071bd62a..fb261194db1 100644
--- a/src/mongo/db/fts/fts_enabled.cpp
+++ b/src/mongo/db/fts/fts_enabled.cpp
@@ -35,46 +35,42 @@
#include "mongo/util/log.h"
namespace mongo {
- namespace fts {
- namespace {
+namespace fts {
+namespace {
- bool dummyEnabledFlag = true; // Unused, needed for server parameter.
+bool dummyEnabledFlag = true; // Unused, needed for server parameter.
- /**
- * Declaration for the "textSearchEnabled" server parameter, which is now deprecated.
- * Note that:
- * - setting to true performs a no-op and logs a deprecation message.
- * - setting to false will fail.
- */
- class ExportedTextSearchEnabledParameter : public ExportedServerParameter<bool> {
- public:
- ExportedTextSearchEnabledParameter() :
- ExportedServerParameter<bool>( ServerParameterSet::getGlobal(),
- "textSearchEnabled",
- &dummyEnabledFlag,
- true,
- true ) {}
-
- virtual Status validate( const bool& potentialNewValue ) {
- if ( !potentialNewValue ) {
- return Status( ErrorCodes::BadValue,
- "textSearchEnabled cannot be set to false");
- }
-
- log() << "Attempted to set textSearchEnabled server parameter.";
- log() << "Text search is enabled by default and cannot be disabled.";
- log() << "The following are now deprecated and will be removed in a future "
- << "release:";
- log() << "- the \"textSearchEnabled\" server parameter (setting it has no "
- << "effect)";
- log() << "- the \"text\" command (has been replaced by the $text query "
- "operator)";
+/**
+ * Declaration for the "textSearchEnabled" server parameter, which is now deprecated.
+ * Note that:
+ * - setting to true performs a no-op and logs a deprecation message.
+ * - setting to false will fail.
+ */
+class ExportedTextSearchEnabledParameter : public ExportedServerParameter<bool> {
+public:
+ ExportedTextSearchEnabledParameter()
+ : ExportedServerParameter<bool>(
+ ServerParameterSet::getGlobal(), "textSearchEnabled", &dummyEnabledFlag, true, true) {
+ }
- return Status::OK();
- }
+ virtual Status validate(const bool& potentialNewValue) {
+ if (!potentialNewValue) {
+ return Status(ErrorCodes::BadValue, "textSearchEnabled cannot be set to false");
+ }
- } exportedTextSearchEnabledParam;
+ log() << "Attempted to set textSearchEnabled server parameter.";
+ log() << "Text search is enabled by default and cannot be disabled.";
+ log() << "The following are now deprecated and will be removed in a future "
+ << "release:";
+ log() << "- the \"textSearchEnabled\" server parameter (setting it has no "
+ << "effect)";
+ log() << "- the \"text\" command (has been replaced by the $text query "
+ "operator)";
- }
+ return Status::OK();
}
+
+} exportedTextSearchEnabledParam;
+}
+}
}
diff --git a/src/mongo/db/fts/fts_index_format.cpp b/src/mongo/db/fts/fts_index_format.cpp
index fc0e703b84d..f7110d80858 100644
--- a/src/mongo/db/fts/fts_index_format.cpp
+++ b/src/mongo/db/fts/fts_index_format.cpp
@@ -40,178 +40,168 @@
namespace mongo {
- namespace fts {
-
- using std::string;
- using std::vector;
-
- namespace {
- BSONObj nullObj;
- BSONElement nullElt;
-
- // New in textIndexVersion 2.
- // If the term is longer than 32 characters, it may
- // result in the generated key being too large
- // for the index. In that case, we generate a 64-character key
- // from the concatenation of the first 32 characters
- // and the hex string of the murmur3 hash value of the entire
- // term value.
- const size_t termKeyPrefixLength = 32U;
- // 128-bit hash value expressed in hex = 32 characters
- const size_t termKeySuffixLength = 32U;
- const size_t termKeyLength = termKeyPrefixLength + termKeySuffixLength;
-
- /**
- * Returns size of buffer required to store term in index key.
- * In version 1, terms are stored verbatim in key.
- * In version 2, terms longer than 32 characters are hashed and combined
- * with a prefix.
- */
- int guessTermSize( const std::string& term, TextIndexVersion textIndexVersion ) {
- if ( TEXT_INDEX_VERSION_1 == textIndexVersion ) {
- return term.size();
- }
- else {
- invariant( TEXT_INDEX_VERSION_2 == textIndexVersion );
- if ( term.size() <= termKeyPrefixLength ) {
- return term.size();
- }
- return termKeyLength;
- }
- }
- }
+namespace fts {
+
+using std::string;
+using std::vector;
+
+namespace {
+BSONObj nullObj;
+BSONElement nullElt;
+
+// New in textIndexVersion 2.
+// If the term is longer than 32 characters, it may
+// result in the generated key being too large
+// for the index. In that case, we generate a 64-character key
+// from the concatenation of the first 32 characters
+// and the hex string of the murmur3 hash value of the entire
+// term value.
+const size_t termKeyPrefixLength = 32U;
+// 128-bit hash value expressed in hex = 32 characters
+const size_t termKeySuffixLength = 32U;
+const size_t termKeyLength = termKeyPrefixLength + termKeySuffixLength;
- MONGO_INITIALIZER( FTSIndexFormat )( InitializerContext* context ) {
- BSONObjBuilder b;
- b.appendNull( "" );
- nullObj = b.obj();
- nullElt = nullObj.firstElement();
- return Status::OK();
+/**
+ * Returns size of buffer required to store term in index key.
+ * In version 1, terms are stored verbatim in key.
+ * In version 2, terms longer than 32 characters are hashed and combined
+ * with a prefix.
+ */
+int guessTermSize(const std::string& term, TextIndexVersion textIndexVersion) {
+ if (TEXT_INDEX_VERSION_1 == textIndexVersion) {
+ return term.size();
+ } else {
+ invariant(TEXT_INDEX_VERSION_2 == textIndexVersion);
+ if (term.size() <= termKeyPrefixLength) {
+ return term.size();
}
+ return termKeyLength;
+ }
+}
+}
- void FTSIndexFormat::getKeys( const FTSSpec& spec,
- const BSONObj& obj,
- BSONObjSet* keys ) {
-
- int extraSize = 0;
- vector<BSONElement> extrasBefore;
- vector<BSONElement> extrasAfter;
-
- // compute the non FTS key elements
- for ( unsigned i = 0; i < spec.numExtraBefore(); i++ ) {
- BSONElement e = obj.getFieldDotted(spec.extraBefore(i));
- if ( e.eoo() )
- e = nullElt;
- uassert( 16675, "cannot have a multi-key as a prefix to a text index",
- e.type() != Array );
- extrasBefore.push_back(e);
- extraSize += e.size();
- }
- for ( unsigned i = 0; i < spec.numExtraAfter(); i++ ) {
- BSONElement e = obj.getFieldDotted(spec.extraAfter(i));
- if ( e.eoo() )
- e = nullElt;
- extrasAfter.push_back(e);
- extraSize += e.size();
- }
-
-
- TermFrequencyMap term_freqs;
- spec.scoreDocument( obj, &term_freqs );
-
- // create index keys from raw scores
- // only 1 per string
-
- uassert( 16732,
- mongoutils::str::stream() << "too many unique keys for a single document to"
- << " have a text index, max is " << term_freqs.size() << obj["_id"],
- term_freqs.size() <= 400000 );
-
- long long keyBSONSize = 0;
- const int MaxKeyBSONSizeMB = 4;
-
- for ( TermFrequencyMap::const_iterator i = term_freqs.begin(); i != term_freqs.end(); ++i ) {
-
- const string& term = i->first;
- double weight = i->second;
-
- // guess the total size of the btree entry based on the size of the weight, term tuple
- int guess =
- 5 /* bson overhead */ +
- 10 /* weight */ +
- 8 /* term overhead */ +
- /* term size (could be truncated/hashed) */
- guessTermSize( term, spec.getTextIndexVersion() ) +
- extraSize;
-
- BSONObjBuilder b(guess); // builds a BSON object with guess length.
- for ( unsigned k = 0; k < extrasBefore.size(); k++ ) {
- b.appendAs( extrasBefore[k], "" );
- }
- _appendIndexKey( b, weight, term, spec.getTextIndexVersion() );
- for ( unsigned k = 0; k < extrasAfter.size(); k++ ) {
- b.appendAs( extrasAfter[k], "" );
- }
- BSONObj res = b.obj();
-
- verify( guess >= res.objsize() );
-
- keys->insert( res );
- keyBSONSize += res.objsize();
-
- uassert( 16733,
- mongoutils::str::stream()
- << "trying to index text where term list is too big, max is "
- << MaxKeyBSONSizeMB << "mb " << obj["_id"],
- keyBSONSize <= ( MaxKeyBSONSizeMB * 1024 * 1024 ) );
-
- }
- }
+MONGO_INITIALIZER(FTSIndexFormat)(InitializerContext* context) {
+ BSONObjBuilder b;
+ b.appendNull("");
+ nullObj = b.obj();
+ nullElt = nullObj.firstElement();
+ return Status::OK();
+}
+
+void FTSIndexFormat::getKeys(const FTSSpec& spec, const BSONObj& obj, BSONObjSet* keys) {
+ int extraSize = 0;
+ vector<BSONElement> extrasBefore;
+ vector<BSONElement> extrasAfter;
+
+ // compute the non FTS key elements
+ for (unsigned i = 0; i < spec.numExtraBefore(); i++) {
+ BSONElement e = obj.getFieldDotted(spec.extraBefore(i));
+ if (e.eoo())
+ e = nullElt;
+ uassert(16675, "cannot have a multi-key as a prefix to a text index", e.type() != Array);
+ extrasBefore.push_back(e);
+ extraSize += e.size();
+ }
+ for (unsigned i = 0; i < spec.numExtraAfter(); i++) {
+ BSONElement e = obj.getFieldDotted(spec.extraAfter(i));
+ if (e.eoo())
+ e = nullElt;
+ extrasAfter.push_back(e);
+ extraSize += e.size();
+ }
+
+
+ TermFrequencyMap term_freqs;
+ spec.scoreDocument(obj, &term_freqs);
+
+ // create index keys from raw scores
+ // only 1 per string
+
+ uassert(16732,
+ mongoutils::str::stream() << "too many unique keys for a single document to"
+ << " have a text index, max is " << term_freqs.size()
+ << obj["_id"],
+ term_freqs.size() <= 400000);
+
+ long long keyBSONSize = 0;
+ const int MaxKeyBSONSizeMB = 4;
- BSONObj FTSIndexFormat::getIndexKey( double weight,
- const string& term,
- const BSONObj& indexPrefix,
- TextIndexVersion textIndexVersion ) {
- BSONObjBuilder b;
+ for (TermFrequencyMap::const_iterator i = term_freqs.begin(); i != term_freqs.end(); ++i) {
+ const string& term = i->first;
+ double weight = i->second;
- BSONObjIterator i( indexPrefix );
- while ( i.more() ) {
- b.appendAs( i.next(), "" );
- }
+ // guess the total size of the btree entry based on the size of the weight, term tuple
+ int guess = 5 /* bson overhead */ + 10 /* weight */ + 8 /* term overhead */ +
+ /* term size (could be truncated/hashed) */
+ guessTermSize(term, spec.getTextIndexVersion()) + extraSize;
- _appendIndexKey( b, weight, term, textIndexVersion );
- return b.obj();
+ BSONObjBuilder b(guess); // builds a BSON object with guess length.
+ for (unsigned k = 0; k < extrasBefore.size(); k++) {
+ b.appendAs(extrasBefore[k], "");
}
+ _appendIndexKey(b, weight, term, spec.getTextIndexVersion());
+ for (unsigned k = 0; k < extrasAfter.size(); k++) {
+ b.appendAs(extrasAfter[k], "");
+ }
+ BSONObj res = b.obj();
+
+ verify(guess >= res.objsize());
+
+ keys->insert(res);
+ keyBSONSize += res.objsize();
+
+ uassert(16733,
+ mongoutils::str::stream()
+ << "trying to index text where term list is too big, max is "
+ << MaxKeyBSONSizeMB << "mb " << obj["_id"],
+ keyBSONSize <= (MaxKeyBSONSizeMB * 1024 * 1024));
+ }
+}
+
+BSONObj FTSIndexFormat::getIndexKey(double weight,
+ const string& term,
+ const BSONObj& indexPrefix,
+ TextIndexVersion textIndexVersion) {
+ BSONObjBuilder b;
- void FTSIndexFormat::_appendIndexKey( BSONObjBuilder& b, double weight, const string& term,
- TextIndexVersion textIndexVersion ) {
- verify( weight >= 0 && weight <= MAX_WEIGHT ); // FTSmaxweight = defined in fts_header
- // Terms are added to index key verbatim.
- if ( TEXT_INDEX_VERSION_1 == textIndexVersion ) {
- b.append( "", term );
- b.append( "", weight );
- }
- // See comments at the top of file for termKeyPrefixLength.
- // Apply hash for text index version 2 to long terms (longer than 32 characters).
- else {
- invariant( TEXT_INDEX_VERSION_2 == textIndexVersion );
- if ( term.size() <= termKeyPrefixLength ) {
- b.append( "", term );
- }
- else {
- union {
- uint64_t hash[2];
- char data[16];
- } t;
- uint32_t seed = 0;
- MurmurHash3_x64_128( term.data(), term.size(), seed, t.hash );
- string keySuffix = mongo::toHexLower( t.data, sizeof( t.data ) );
- invariant( termKeySuffixLength == keySuffix.size() );
- b.append( "", term.substr( 0, termKeyPrefixLength ) +
- keySuffix );
- }
- b.append( "", weight );
- }
+ BSONObjIterator i(indexPrefix);
+ while (i.more()) {
+ b.appendAs(i.next(), "");
+ }
+
+ _appendIndexKey(b, weight, term, textIndexVersion);
+ return b.obj();
+}
+
+void FTSIndexFormat::_appendIndexKey(BSONObjBuilder& b,
+ double weight,
+ const string& term,
+ TextIndexVersion textIndexVersion) {
+ verify(weight >= 0 && weight <= MAX_WEIGHT); // FTSmaxweight = defined in fts_header
+ // Terms are added to index key verbatim.
+ if (TEXT_INDEX_VERSION_1 == textIndexVersion) {
+ b.append("", term);
+ b.append("", weight);
+ }
+ // See comments at the top of file for termKeyPrefixLength.
+ // Apply hash for text index version 2 to long terms (longer than 32 characters).
+ else {
+ invariant(TEXT_INDEX_VERSION_2 == textIndexVersion);
+ if (term.size() <= termKeyPrefixLength) {
+ b.append("", term);
+ } else {
+ union {
+ uint64_t hash[2];
+ char data[16];
+ } t;
+ uint32_t seed = 0;
+ MurmurHash3_x64_128(term.data(), term.size(), seed, t.hash);
+ string keySuffix = mongo::toHexLower(t.data, sizeof(t.data));
+ invariant(termKeySuffixLength == keySuffix.size());
+ b.append("", term.substr(0, termKeyPrefixLength) + keySuffix);
}
+ b.append("", weight);
}
}
+}
+}
diff --git a/src/mongo/db/fts/fts_index_format.h b/src/mongo/db/fts/fts_index_format.h
index 579afb2d673..82be9ad03f5 100644
--- a/src/mongo/db/fts/fts_index_format.h
+++ b/src/mongo/db/fts/fts_index_format.h
@@ -37,40 +37,38 @@
namespace mongo {
- namespace fts {
+namespace fts {
- class FTSSpec;
+class FTSSpec;
- class FTSIndexFormat {
- public:
+class FTSIndexFormat {
+public:
+ static void getKeys(const FTSSpec& spec, const BSONObj& document, BSONObjSet* keys);
- static void getKeys( const FTSSpec& spec,
- const BSONObj& document,
- BSONObjSet* keys );
+ /**
+ * Helper method to get return entry from the FTSIndex as a BSONObj
+ * @param weight, the weight of the term in the entry
+ * @param term, the std::string term in the entry
+ * @param indexPrefix, the fields that go in the index first
+ * @param textIndexVersion, index version. affects key format.
+ */
+ static BSONObj getIndexKey(double weight,
+ const std::string& term,
+ const BSONObj& indexPrefix,
+ TextIndexVersion textIndexVersion);
- /**
- * Helper method to get return entry from the FTSIndex as a BSONObj
- * @param weight, the weight of the term in the entry
- * @param term, the std::string term in the entry
- * @param indexPrefix, the fields that go in the index first
- * @param textIndexVersion, index version. affects key format.
- */
- static BSONObj getIndexKey( double weight,
- const std::string& term,
- const BSONObj& indexPrefix,
- TextIndexVersion textIndexVersion );
-
- private:
- /**
- * Helper method to get return entry from the FTSIndex as a BSONObj
- * @param b, reference to the BSONOBjBuilder
- * @param weight, the weight of the term in the entry
- * @param term, the std::string term in the entry
- * @param textIndexVersion, index version. affects key format.
- */
- static void _appendIndexKey( BSONObjBuilder& b, double weight, const std::string& term,
- TextIndexVersion textIndexVersion );
- };
-
- }
+private:
+ /**
+ * Helper method to get return entry from the FTSIndex as a BSONObj
+ * @param b, reference to the BSONOBjBuilder
+ * @param weight, the weight of the term in the entry
+ * @param term, the std::string term in the entry
+ * @param textIndexVersion, index version. affects key format.
+ */
+ static void _appendIndexKey(BSONObjBuilder& b,
+ double weight,
+ const std::string& term,
+ TextIndexVersion textIndexVersion);
+};
+}
}
diff --git a/src/mongo/db/fts/fts_index_format_test.cpp b/src/mongo/db/fts/fts_index_format_test.cpp
index a15d014e98c..f7c8a5fa432 100644
--- a/src/mongo/db/fts/fts_index_format_test.cpp
+++ b/src/mongo/db/fts/fts_index_format_test.cpp
@@ -42,165 +42,184 @@
namespace mongo {
- namespace fts {
-
- using std::string;
-
- TEST( FTSIndexFormat, Simple1 ) {
- FTSSpec spec( FTSSpec::fixSpec( BSON( "key" << BSON( "data" << "text" ) ) ) );
- BSONObjSet keys;
- FTSIndexFormat::getKeys( spec, BSON( "data" << "cat sat" ), &keys );
-
- ASSERT_EQUALS( 2U, keys.size() );
- for ( BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i ) {
- BSONObj key = *i;
- ASSERT_EQUALS( 2, key.nFields() );
- ASSERT_EQUALS( String, key.firstElement().type() );
- }
- }
-
- TEST( FTSIndexFormat, ExtraBack1 ) {
- FTSSpec spec( FTSSpec::fixSpec( BSON( "key" << BSON( "data" << "text" <<
- "x" << 1 ) ) ) );
- BSONObjSet keys;
- FTSIndexFormat::getKeys( spec, BSON( "data" << "cat" << "x" << 5 ), &keys );
-
- ASSERT_EQUALS( 1U, keys.size() );
- BSONObj key = *(keys.begin());
- ASSERT_EQUALS( 3, key.nFields() );
- BSONObjIterator i( key );
- ASSERT_EQUALS( StringData("cat"), i.next().valuestr() );
- ASSERT( i.next().numberDouble() > 0 );
- ASSERT_EQUALS( 5, i.next().numberInt() );
- }
+namespace fts {
+
+using std::string;
+
+TEST(FTSIndexFormat, Simple1) {
+ FTSSpec spec(FTSSpec::fixSpec(BSON("key" << BSON("data"
+ << "text"))));
+ BSONObjSet keys;
+ FTSIndexFormat::getKeys(spec,
+ BSON("data"
+ << "cat sat"),
+ &keys);
+
+ ASSERT_EQUALS(2U, keys.size());
+ for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
+ BSONObj key = *i;
+ ASSERT_EQUALS(2, key.nFields());
+ ASSERT_EQUALS(String, key.firstElement().type());
+ }
+}
- /*
- TEST( FTSIndexFormat, ExtraBackArray1 ) {
- FTSSpec spec( FTSSpec::fixSpec( BSON( "key" << BSON( "data" << "text" <<
- "x.y" << 1 ) ) ) );
- BSONObjSet keys;
- FTSIndexFormat::getKeys( spec,
- BSON( "data" << "cat" <<
- "x" << BSON_ARRAY( BSON( "y" << 1 ) <<
- BSON( "y" << 2 ) ) ),
- &keys );
-
- ASSERT_EQUALS( 1U, keys.size() );
- BSONObj key = *(keys.begin());
- log() << "e: " << key << endl;
- ASSERT_EQUALS( 3, key.nFields() );
- BSONObjIterator i( key );
- ASSERT_EQUALS( StringData("cat"), i.next().valuestr() );
- ASSERT( i.next().numberDouble() > 0 );
- ASSERT_EQUALS( 5, i.next().numberInt() );
- }
- */
-
- TEST( FTSIndexFormat, ExtraFront1 ) {
- FTSSpec spec( FTSSpec::fixSpec( BSON( "key" << BSON( "x" << 1 <<
- "data" << "text" ) ) ) );
- BSONObjSet keys;
- FTSIndexFormat::getKeys( spec, BSON( "data" << "cat" << "x" << 5 ), &keys );
-
- ASSERT_EQUALS( 1U, keys.size() );
- BSONObj key = *(keys.begin());
- ASSERT_EQUALS( 3, key.nFields() );
- BSONObjIterator i( key );
- ASSERT_EQUALS( 5, i.next().numberInt() );
- ASSERT_EQUALS( StringData("cat"), i.next().valuestr() );
- ASSERT( i.next().numberDouble() > 0 );
- }
+TEST(FTSIndexFormat, ExtraBack1) {
+ FTSSpec spec(FTSSpec::fixSpec(BSON("key" << BSON("data"
+ << "text"
+ << "x" << 1))));
+ BSONObjSet keys;
+ FTSIndexFormat::getKeys(spec,
+ BSON("data"
+ << "cat"
+ << "x" << 5),
+ &keys);
+
+ ASSERT_EQUALS(1U, keys.size());
+ BSONObj key = *(keys.begin());
+ ASSERT_EQUALS(3, key.nFields());
+ BSONObjIterator i(key);
+ ASSERT_EQUALS(StringData("cat"), i.next().valuestr());
+ ASSERT(i.next().numberDouble() > 0);
+ ASSERT_EQUALS(5, i.next().numberInt());
+}
- TEST( FTSIndexFormat, StopWords1 ) {
- FTSSpec spec( FTSSpec::fixSpec( BSON( "key" << BSON( "data" << "text" ) ) ) );
+/*
+TEST( FTSIndexFormat, ExtraBackArray1 ) {
+ FTSSpec spec( FTSSpec::fixSpec( BSON( "key" << BSON( "data" << "text" <<
+ "x.y" << 1 ) ) ) );
+ BSONObjSet keys;
+ FTSIndexFormat::getKeys( spec,
+ BSON( "data" << "cat" <<
+ "x" << BSON_ARRAY( BSON( "y" << 1 ) <<
+ BSON( "y" << 2 ) ) ),
+ &keys );
+
+ ASSERT_EQUALS( 1U, keys.size() );
+ BSONObj key = *(keys.begin());
+ log() << "e: " << key << endl;
+ ASSERT_EQUALS( 3, key.nFields() );
+ BSONObjIterator i( key );
+ ASSERT_EQUALS( StringData("cat"), i.next().valuestr() );
+ ASSERT( i.next().numberDouble() > 0 );
+ ASSERT_EQUALS( 5, i.next().numberInt() );
+}
+*/
- BSONObjSet keys1;
- FTSIndexFormat::getKeys( spec, BSON( "data" << "computer" ), &keys1 );
- ASSERT_EQUALS( 1U, keys1.size() );
+TEST(FTSIndexFormat, ExtraFront1) {
+ FTSSpec spec(FTSSpec::fixSpec(BSON("key" << BSON("x" << 1 << "data"
+ << "text"))));
+ BSONObjSet keys;
+ FTSIndexFormat::getKeys(spec,
+ BSON("data"
+ << "cat"
+ << "x" << 5),
+ &keys);
+
+ ASSERT_EQUALS(1U, keys.size());
+ BSONObj key = *(keys.begin());
+ ASSERT_EQUALS(3, key.nFields());
+ BSONObjIterator i(key);
+ ASSERT_EQUALS(5, i.next().numberInt());
+ ASSERT_EQUALS(StringData("cat"), i.next().valuestr());
+ ASSERT(i.next().numberDouble() > 0);
+}
- BSONObjSet keys2;
- FTSIndexFormat::getKeys( spec, BSON( "data" << "any computer" ), &keys2 );
- ASSERT_EQUALS( 1U, keys2.size() );
- }
+TEST(FTSIndexFormat, StopWords1) {
+ FTSSpec spec(FTSSpec::fixSpec(BSON("key" << BSON("data"
+ << "text"))));
+
+ BSONObjSet keys1;
+ FTSIndexFormat::getKeys(spec,
+ BSON("data"
+ << "computer"),
+ &keys1);
+ ASSERT_EQUALS(1U, keys1.size());
+
+ BSONObjSet keys2;
+ FTSIndexFormat::getKeys(spec,
+ BSON("data"
+ << "any computer"),
+ &keys2);
+ ASSERT_EQUALS(1U, keys2.size());
+}
- /**
- * Helper function to compare keys returned in getKeys() result
- * with expected values.
- */
- void assertEqualsIndexKeys( std::set<std::string>& expectedKeys, const BSONObjSet& keys ) {
- ASSERT_EQUALS( expectedKeys.size(), keys.size() );
- for ( BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i ) {
- BSONObj key = *i;
- ASSERT_EQUALS( 2, key.nFields() );
- ASSERT_EQUALS( String, key.firstElement().type() );
- string s = key.firstElement().String();
- std::set<string>::const_iterator j = expectedKeys.find(s);
- if (j == expectedKeys.end()) {
- mongoutils::str::stream ss;
- ss << "unexpected key " << s << " in FTSIndexFormat::getKeys result. "
- << "expected keys:";
- for (std::set<string>::const_iterator k = expectedKeys.begin();
- k != expectedKeys.end(); ++k) {
- ss << "\n " << *k;
- }
- FAIL(ss);
- }
+/**
+ * Helper function to compare keys returned in getKeys() result
+ * with expected values.
+ */
+void assertEqualsIndexKeys(std::set<std::string>& expectedKeys, const BSONObjSet& keys) {
+ ASSERT_EQUALS(expectedKeys.size(), keys.size());
+ for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
+ BSONObj key = *i;
+ ASSERT_EQUALS(2, key.nFields());
+ ASSERT_EQUALS(String, key.firstElement().type());
+ string s = key.firstElement().String();
+ std::set<string>::const_iterator j = expectedKeys.find(s);
+ if (j == expectedKeys.end()) {
+ mongoutils::str::stream ss;
+ ss << "unexpected key " << s << " in FTSIndexFormat::getKeys result. "
+ << "expected keys:";
+ for (std::set<string>::const_iterator k = expectedKeys.begin(); k != expectedKeys.end();
+ ++k) {
+ ss << "\n " << *k;
}
+ FAIL(ss);
}
+ }
+}
- /**
- * Tests keys for long terms using text index version 1.
- * Terms that are too long are not truncated in version 1.
- */
- TEST( FTSIndexFormat, LongWordsTextIndexVersion1 ) {
- FTSSpec spec( FTSSpec::fixSpec( BSON( "key" << BSON( "data" << "text" ) <<
- "textIndexVersion" << 1 ) ) );
- BSONObjSet keys;
- string longPrefix( 1024U, 'a' );
- // "aaa...aaacat"
- string longWordCat = longPrefix + "cat";
- // "aaa...aaasat"
- string longWordSat = longPrefix + "sat";
- string text = mongoutils::str::stream() << longWordCat << " " << longWordSat;
- FTSIndexFormat::getKeys( spec, BSON( "data" << text ), &keys );
-
- // Hard-coded expected computed keys for future-proofing.
- std::set<string> expectedKeys;
- // cat
- expectedKeys.insert( longWordCat );
- // sat
- expectedKeys.insert( longWordSat );
-
- assertEqualsIndexKeys( expectedKeys, keys);
- }
-
- /**
- * Tests keys for long terms using text index version 2.
- * In version 2, long terms (longer than 32 characters)
- * are hashed with murmur3 and appended to the first 32
- * characters of the term to form the index key.
- */
- TEST( FTSIndexFormat, LongWordTextIndexVersion2 ) {
- FTSSpec spec( FTSSpec::fixSpec( BSON( "key" << BSON( "data" << "text" ) <<
- "textIndexVersion" << 2 ) ) );
- BSONObjSet keys;
- string longPrefix( 1024U, 'a' );
- // "aaa...aaacat"
- string longWordCat = longPrefix + "cat";
- // "aaa...aaasat"
- string longWordSat = longPrefix + "sat";
- string text = mongoutils::str::stream() << longWordCat << " " << longWordSat;
- FTSIndexFormat::getKeys( spec, BSON( "data" << text ), &keys );
-
- // Hard-coded expected computed keys for future-proofing.
- std::set<string> expectedKeys;
- // cat
- expectedKeys.insert( "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab8e78455d827ebb87cbe87f392bf45f6" );
- // sat
- expectedKeys.insert( "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaf2d6f58bb3b81b97e611ae7ccac6dea7" );
-
- assertEqualsIndexKeys( expectedKeys, keys);
- }
+/**
+ * Tests keys for long terms using text index version 1.
+ * Terms that are too long are not truncated in version 1.
+ */
+TEST(FTSIndexFormat, LongWordsTextIndexVersion1) {
+ FTSSpec spec(FTSSpec::fixSpec(BSON("key" << BSON("data"
+ << "text") << "textIndexVersion" << 1)));
+ BSONObjSet keys;
+ string longPrefix(1024U, 'a');
+ // "aaa...aaacat"
+ string longWordCat = longPrefix + "cat";
+ // "aaa...aaasat"
+ string longWordSat = longPrefix + "sat";
+ string text = mongoutils::str::stream() << longWordCat << " " << longWordSat;
+ FTSIndexFormat::getKeys(spec, BSON("data" << text), &keys);
+
+ // Hard-coded expected computed keys for future-proofing.
+ std::set<string> expectedKeys;
+ // cat
+ expectedKeys.insert(longWordCat);
+ // sat
+ expectedKeys.insert(longWordSat);
+
+ assertEqualsIndexKeys(expectedKeys, keys);
+}
- }
+/**
+ * Tests keys for long terms using text index version 2.
+ * In version 2, long terms (longer than 32 characters)
+ * are hashed with murmur3 and appended to the first 32
+ * characters of the term to form the index key.
+ */
+TEST(FTSIndexFormat, LongWordTextIndexVersion2) {
+ FTSSpec spec(FTSSpec::fixSpec(BSON("key" << BSON("data"
+ << "text") << "textIndexVersion" << 2)));
+ BSONObjSet keys;
+ string longPrefix(1024U, 'a');
+ // "aaa...aaacat"
+ string longWordCat = longPrefix + "cat";
+ // "aaa...aaasat"
+ string longWordSat = longPrefix + "sat";
+ string text = mongoutils::str::stream() << longWordCat << " " << longWordSat;
+ FTSIndexFormat::getKeys(spec, BSON("data" << text), &keys);
+
+ // Hard-coded expected computed keys for future-proofing.
+ std::set<string> expectedKeys;
+ // cat
+ expectedKeys.insert("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab8e78455d827ebb87cbe87f392bf45f6");
+ // sat
+ expectedKeys.insert("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaf2d6f58bb3b81b97e611ae7ccac6dea7");
+
+ assertEqualsIndexKeys(expectedKeys, keys);
+}
+}
}
diff --git a/src/mongo/db/fts/fts_language.cpp b/src/mongo/db/fts/fts_language.cpp
index 4b1966d6458..7a0c64ab1cf 100644
--- a/src/mongo/db/fts/fts_language.cpp
+++ b/src/mongo/db/fts/fts_language.cpp
@@ -31,7 +31,7 @@
#include "mongo/db/fts/fts_language.h"
#include <string>
-
+
#include "mongo/base/init.h"
#include "mongo/db/fts/fts_basic_tokenizer.h"
#include "mongo/stdx/memory.h"
@@ -42,225 +42,220 @@
namespace mongo {
- namespace fts {
+namespace fts {
- namespace {
+namespace {
- /**
- * Case-insensitive StringData comparator.
- */
- struct LanguageStringCompare {
- /** Returns true if lhs < rhs. */
- bool operator()( std::string lhs, std::string rhs ) const {
- size_t minSize = std::min( lhs.size(), rhs.size() );
+/**
+ * Case-insensitive StringData comparator.
+ */
+struct LanguageStringCompare {
+ /** Returns true if lhs < rhs. */
+ bool operator()(std::string lhs, std::string rhs) const {
+ size_t minSize = std::min(lhs.size(), rhs.size());
- for ( size_t x = 0; x < minSize; x++ ) {
- char a = tolower( lhs[x] );
- char b = tolower( rhs[x] );
- if ( a < b ) {
- return true;
- }
- if ( a > b ) {
- return false;
- }
- }
+ for (size_t x = 0; x < minSize; x++) {
+ char a = tolower(lhs[x]);
+ char b = tolower(rhs[x]);
+ if (a < b) {
+ return true;
+ }
+ if (a > b) {
+ return false;
+ }
+ }
- return lhs.size() < rhs.size();
- }
- };
+ return lhs.size() < rhs.size();
+ }
+};
- // Lookup table from user language string (case-insensitive) to FTSLanguage. Populated
- // by initializers in group FTSAllLanguagesRegistered and initializer
- // FTSRegisterLanguageAliases. For use with TEXT_INDEX_VERSION_2 text indexes only.
- typedef std::map<std::string, const FTSLanguage*, LanguageStringCompare> LanguageMapV2;
- LanguageMapV2 languageMapV2;
+// Lookup table from user language string (case-insensitive) to FTSLanguage. Populated
+// by initializers in group FTSAllLanguagesRegistered and initializer
+// FTSRegisterLanguageAliases. For use with TEXT_INDEX_VERSION_2 text indexes only.
+typedef std::map<std::string, const FTSLanguage*, LanguageStringCompare> LanguageMapV2;
+LanguageMapV2 languageMapV2;
- // Like languageMapV2, but for use with TEXT_INDEX_VERSION_1 text indexes.
- // Case-sensitive by lookup key.
- typedef std::map<StringData, const FTSLanguage*> LanguageMapV1;
- LanguageMapV1 languageMapV1;
- }
+// Like languageMapV2, but for use with TEXT_INDEX_VERSION_1 text indexes.
+// Case-sensitive by lookup key.
+typedef std::map<StringData, const FTSLanguage*> LanguageMapV1;
+LanguageMapV1 languageMapV1;
+}
- std::unique_ptr<FTSTokenizer> BasicFTSLanguage::createTokenizer() const {
- return stdx::make_unique<BasicFTSTokenizer>(this);
- }
+std::unique_ptr<FTSTokenizer> BasicFTSLanguage::createTokenizer() const {
+ return stdx::make_unique<BasicFTSTokenizer>(this);
+}
- MONGO_INITIALIZER_GROUP( FTSAllLanguagesRegistered, MONGO_NO_PREREQUISITES,
- MONGO_NO_DEPENDENTS );
+MONGO_INITIALIZER_GROUP(FTSAllLanguagesRegistered, MONGO_NO_PREREQUISITES, MONGO_NO_DEPENDENTS);
- //
- // Register supported languages' canonical names for TEXT_INDEX_VERSION_2.
- //
+//
+// Register supported languages' canonical names for TEXT_INDEX_VERSION_2.
+//
- MONGO_FTS_LANGUAGE_DECLARE( languageNoneV2, "none", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageDanishV2, "danish", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageDutchV2, "dutch", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageEnglishV2, "english", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageFinnishV2, "finnish", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageFrenchV2, "french", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageGermanV2, "german", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageHungarianV2, "hungarian", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageItalianV2, "italian", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageNorwegianV2, "norwegian", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languagePortugueseV2, "portuguese", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageRomanianV2, "romanian", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageRussianV2, "russian", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageSpanishV2, "spanish", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageSwedishV2, "swedish", TEXT_INDEX_VERSION_2 );
- MONGO_FTS_LANGUAGE_DECLARE( languageTurkishV2, "turkish", TEXT_INDEX_VERSION_2 );
+MONGO_FTS_LANGUAGE_DECLARE(languageNoneV2, "none", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageDanishV2, "danish", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageDutchV2, "dutch", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageEnglishV2, "english", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageFinnishV2, "finnish", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageFrenchV2, "french", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageGermanV2, "german", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageHungarianV2, "hungarian", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageItalianV2, "italian", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageNorwegianV2, "norwegian", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languagePortugueseV2, "portuguese", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageRomanianV2, "romanian", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageRussianV2, "russian", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageSpanishV2, "spanish", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageSwedishV2, "swedish", TEXT_INDEX_VERSION_2);
+MONGO_FTS_LANGUAGE_DECLARE(languageTurkishV2, "turkish", TEXT_INDEX_VERSION_2);
- //
- // Register all Snowball language modules for TEXT_INDEX_VERSION_1. Note that only the full
- // names are recognized by the StopWords class (as such, the language string "dan" in
- // TEXT_INDEX_VERSION_1 will generate the Danish stemmer and the empty stopword list).
- //
+//
+// Register all Snowball language modules for TEXT_INDEX_VERSION_1. Note that only the full
+// names are recognized by the StopWords class (as such, the language string "dan" in
+// TEXT_INDEX_VERSION_1 will generate the Danish stemmer and the empty stopword list).
+//
- MONGO_FTS_LANGUAGE_DECLARE( languageNoneV1, "none", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageDaV1, "da", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageDanV1, "dan", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageDanishV1, "danish", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageDeV1, "de", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageDeuV1, "deu", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageDutV1, "dut", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageDutchV1, "dutch", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageEnV1, "en", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageEngV1, "eng", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageEnglishV1, "english", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageEsV1, "es", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageEslV1, "esl", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageFiV1, "fi", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageFinV1, "fin", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageFinnishV1, "finnish", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageFrV1, "fr", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageFraV1, "fra", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageFreV1, "fre", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageFrenchV1, "french", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageGerV1, "ger", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageGermanV1, "german", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageHuV1, "hu", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageHunV1, "hun", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageHungarianV1, "hungarian", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageItV1, "it", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageItaV1, "ita", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageItalianV1, "italian", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageNlV1, "nl", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageNldV1, "nld", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageNoV1, "no", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageNorV1, "nor", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageNorwegianV1, "norwegian", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languagePorV1, "por", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languagePorterV1, "porter", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languagePortugueseV1, "portuguese", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languagePtV1, "pt", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageRoV1, "ro", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageRomanianV1, "romanian", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageRonV1, "ron", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageRuV1, "ru", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageRumV1, "rum", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageRusV1, "rus", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageRussianV1, "russian", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageSpaV1, "spa", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageSpanishV1, "spanish", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageSvV1, "sv", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageSweV1, "swe", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageSwedishV1, "swedish", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageTrV1, "tr", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageTurV1, "tur", TEXT_INDEX_VERSION_1 );
- MONGO_FTS_LANGUAGE_DECLARE( languageTurkishV1, "turkish", TEXT_INDEX_VERSION_1 );
+MONGO_FTS_LANGUAGE_DECLARE(languageNoneV1, "none", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageDaV1, "da", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageDanV1, "dan", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageDanishV1, "danish", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageDeV1, "de", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageDeuV1, "deu", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageDutV1, "dut", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageDutchV1, "dutch", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageEnV1, "en", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageEngV1, "eng", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageEnglishV1, "english", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageEsV1, "es", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageEslV1, "esl", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageFiV1, "fi", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageFinV1, "fin", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageFinnishV1, "finnish", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageFrV1, "fr", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageFraV1, "fra", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageFreV1, "fre", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageFrenchV1, "french", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageGerV1, "ger", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageGermanV1, "german", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageHuV1, "hu", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageHunV1, "hun", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageHungarianV1, "hungarian", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageItV1, "it", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageItaV1, "ita", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageItalianV1, "italian", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageNlV1, "nl", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageNldV1, "nld", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageNoV1, "no", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageNorV1, "nor", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageNorwegianV1, "norwegian", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languagePorV1, "por", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languagePorterV1, "porter", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languagePortugueseV1, "portuguese", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languagePtV1, "pt", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageRoV1, "ro", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageRomanianV1, "romanian", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageRonV1, "ron", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageRuV1, "ru", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageRumV1, "rum", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageRusV1, "rus", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageRussianV1, "russian", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageSpaV1, "spa", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageSpanishV1, "spanish", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageSvV1, "sv", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageSweV1, "swe", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageSwedishV1, "swedish", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageTrV1, "tr", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageTurV1, "tur", TEXT_INDEX_VERSION_1);
+MONGO_FTS_LANGUAGE_DECLARE(languageTurkishV1, "turkish", TEXT_INDEX_VERSION_1);
- MONGO_INITIALIZER_WITH_PREREQUISITES( FTSRegisterLanguageAliases,
- ( "FTSAllLanguagesRegistered" ) )
- ( InitializerContext* context ) {
- // Register language aliases for TEXT_INDEX_VERSION_2.
- FTSLanguage::registerLanguageAlias( &languageDanishV2, "da", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageDutchV2, "nl", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageEnglishV2, "en", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageFinnishV2, "fi", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageFrenchV2, "fr", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageGermanV2, "de", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageHungarianV2, "hu", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageItalianV2, "it", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageNorwegianV2, "nb", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languagePortugueseV2, "pt", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageRomanianV2, "ro", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageRussianV2, "ru", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageSpanishV2, "es", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageSwedishV2, "sv", TEXT_INDEX_VERSION_2 );
- FTSLanguage::registerLanguageAlias( &languageTurkishV2, "tr", TEXT_INDEX_VERSION_2 );
- return Status::OK();
- }
-
- // static
- void FTSLanguage::registerLanguage( StringData languageName,
- TextIndexVersion textIndexVersion,
- FTSLanguage* language ) {
- verify( !languageName.empty() );
- language->_canonicalName = languageName.toString();
- switch ( textIndexVersion ) {
- case TEXT_INDEX_VERSION_2:
- languageMapV2[ languageName.toString() ] = language;
- return;
- case TEXT_INDEX_VERSION_1:
- verify( languageMapV1.find( languageName ) == languageMapV1.end() );
- languageMapV1[ languageName ] = language;
- return;
- }
- verify( false );
- }
+MONGO_INITIALIZER_WITH_PREREQUISITES(FTSRegisterLanguageAliases, ("FTSAllLanguagesRegistered"))
+(InitializerContext* context) {
+ // Register language aliases for TEXT_INDEX_VERSION_2.
+ FTSLanguage::registerLanguageAlias(&languageDanishV2, "da", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageDutchV2, "nl", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageEnglishV2, "en", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageFinnishV2, "fi", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageFrenchV2, "fr", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageGermanV2, "de", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageHungarianV2, "hu", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageItalianV2, "it", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageNorwegianV2, "nb", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languagePortugueseV2, "pt", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageRomanianV2, "ro", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageRussianV2, "ru", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageSpanishV2, "es", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageSwedishV2, "sv", TEXT_INDEX_VERSION_2);
+ FTSLanguage::registerLanguageAlias(&languageTurkishV2, "tr", TEXT_INDEX_VERSION_2);
+ return Status::OK();
+}
- // static
- void FTSLanguage::registerLanguageAlias( const FTSLanguage* language,
- StringData alias,
- TextIndexVersion textIndexVersion ) {
- switch ( textIndexVersion ) {
- case TEXT_INDEX_VERSION_2:
- languageMapV2[ alias.toString() ] = language;
- return;
- case TEXT_INDEX_VERSION_1:
- verify( languageMapV1.find( alias ) == languageMapV1.end() );
- languageMapV1[ alias ] = language;
- return;
- }
- verify( false );
- }
+// static
+void FTSLanguage::registerLanguage(StringData languageName,
+ TextIndexVersion textIndexVersion,
+ FTSLanguage* language) {
+ verify(!languageName.empty());
+ language->_canonicalName = languageName.toString();
+ switch (textIndexVersion) {
+ case TEXT_INDEX_VERSION_2:
+ languageMapV2[languageName.toString()] = language;
+ return;
+ case TEXT_INDEX_VERSION_1:
+ verify(languageMapV1.find(languageName) == languageMapV1.end());
+ languageMapV1[languageName] = language;
+ return;
+ }
+ verify(false);
+}
- FTSLanguage::FTSLanguage() : _canonicalName() {
- }
+// static
+void FTSLanguage::registerLanguageAlias(const FTSLanguage* language,
+ StringData alias,
+ TextIndexVersion textIndexVersion) {
+ switch (textIndexVersion) {
+ case TEXT_INDEX_VERSION_2:
+ languageMapV2[alias.toString()] = language;
+ return;
+ case TEXT_INDEX_VERSION_1:
+ verify(languageMapV1.find(alias) == languageMapV1.end());
+ languageMapV1[alias] = language;
+ return;
+ }
+ verify(false);
+}
- const std::string& FTSLanguage::str() const {
- verify( !_canonicalName.empty() );
- return _canonicalName;
- }
+FTSLanguage::FTSLanguage() : _canonicalName() {}
- // static
- StatusWithFTSLanguage FTSLanguage::make( StringData langName,
- TextIndexVersion textIndexVersion ) {
- switch ( textIndexVersion ) {
- case TEXT_INDEX_VERSION_2: {
- LanguageMapV2::const_iterator it = languageMapV2.find( langName.toString() );
- if ( it == languageMapV2.end() ) {
- // TEXT_INDEX_VERSION_2 rejects unrecognized language strings.
- Status status = Status( ErrorCodes::BadValue,
- mongoutils::str::stream() <<
- "unsupported language: \"" << langName <<
- "\"" );
- return StatusWithFTSLanguage( status );
- }
+const std::string& FTSLanguage::str() const {
+ verify(!_canonicalName.empty());
+ return _canonicalName;
+}
- return StatusWithFTSLanguage( it->second );
- }
- case TEXT_INDEX_VERSION_1: {
- LanguageMapV1::const_iterator it = languageMapV1.find( langName );
- if ( it == languageMapV1.end() ) {
- // TEXT_INDEX_VERSION_1 treats unrecognized language strings as "none".
- return StatusWithFTSLanguage( &languageNoneV1 );
- }
- return StatusWithFTSLanguage( it->second );
- }
+// static
+StatusWithFTSLanguage FTSLanguage::make(StringData langName, TextIndexVersion textIndexVersion) {
+ switch (textIndexVersion) {
+ case TEXT_INDEX_VERSION_2: {
+ LanguageMapV2::const_iterator it = languageMapV2.find(langName.toString());
+ if (it == languageMapV2.end()) {
+ // TEXT_INDEX_VERSION_2 rejects unrecognized language strings.
+ Status status = Status(ErrorCodes::BadValue,
+ mongoutils::str::stream() << "unsupported language: \""
+ << langName << "\"");
+ return StatusWithFTSLanguage(status);
}
- verify( false );
- return StatusWithFTSLanguage( Status::OK() );
+ return StatusWithFTSLanguage(it->second);
+ }
+ case TEXT_INDEX_VERSION_1: {
+ LanguageMapV1::const_iterator it = languageMapV1.find(langName);
+ if (it == languageMapV1.end()) {
+ // TEXT_INDEX_VERSION_1 treats unrecognized language strings as "none".
+ return StatusWithFTSLanguage(&languageNoneV1);
+ }
+ return StatusWithFTSLanguage(it->second);
}
}
+
+ verify(false);
+ return StatusWithFTSLanguage(Status::OK());
+}
+}
}
diff --git a/src/mongo/db/fts/fts_language.h b/src/mongo/db/fts/fts_language.h
index ce45e0b812a..facdb8c9ce0 100644
--- a/src/mongo/db/fts/fts_language.h
+++ b/src/mongo/db/fts/fts_language.h
@@ -37,108 +37,107 @@
namespace mongo {
- namespace fts {
-
- class FTSTokenizer;
-
- #define MONGO_FTS_LANGUAGE_DECLARE( language, name, version ) \
- BasicFTSLanguage language; \
- MONGO_INITIALIZER_GENERAL( language, MONGO_NO_PREREQUISITES, \
- ( "FTSAllLanguagesRegistered" ) ) \
- ( ::mongo::InitializerContext* context ) { \
- FTSLanguage::registerLanguage( name, version, &language ); \
- return Status::OK(); \
- }
-
- /**
- * A FTSLanguage represents a language for a text-indexed document or a text search.
- * FTSLanguage objects are not copyable.
- *
- * Recommended usage:
- *
- * StatusWithFTSLanguage swl = FTSLanguage::make( "en", TEXT_INDEX_VERSION_2 );
- * if ( !swl.getStatus().isOK() ) {
- * // Error.
- * }
- * else {
- * const FTSLanguage* language = swl.getValue();
- * // Use language.
- * }
- */
- class FTSLanguage {
- // Use make() instead of copying.
- MONGO_DISALLOW_COPYING( FTSLanguage );
- public:
- /** Create an uninitialized language. */
- FTSLanguage();
-
- virtual ~FTSLanguage() {}
-
- /**
- * Returns the language as a std::string in canonical form (lowercased English name). It is
- * an error to call str() on an uninitialized language.
- */
- const std::string& str() const;
-
- /**
- * Returns a new FTSTokenizer instance for this language.
- * Lifetime is scoped to FTSLanguage (which are currently all process lifetime)
- */
- virtual std::unique_ptr<FTSTokenizer> createTokenizer() const = 0;
-
- /**
- * Register std::string 'languageName' as a new language with text index version
- * 'textIndexVersion'. Saves the resulting language to out-argument 'languageOut'.
- * Subsequent calls to FTSLanguage::make() will recognize the newly-registered language
- * string.
- */
- static void registerLanguage( StringData languageName,
- TextIndexVersion textIndexVersion,
- FTSLanguage *languageOut );
-
- /**
- * Register 'alias' as an alias for 'language' with text index version
- * 'textIndexVersion'. Subsequent calls to FTSLanguage::make() will recognize the
- * newly-registered alias.
- */
- static void registerLanguageAlias( const FTSLanguage* language,
- StringData alias,
- TextIndexVersion textIndexVersion );
-
- /**
- * Return the FTSLanguage associated with the given language string. Returns an error
- * Status if an invalid language std::string is passed.
- *
- * For textIndexVersion=TEXT_INDEX_VERSION_2, language strings are
- * case-insensitive, and need to be in one of the two following forms:
- * - English name, like "spanish".
- * - Two-letter code, like "es".
- *
- * For textIndexVersion=TEXT_INDEX_VERSION_1, no validation or normalization of
- * language strings is performed. This is necessary to preserve indexing behavior for
- * documents with language strings like "en": for compatibility, text data in these
- * documents needs to be processed with the English stemmer and the empty stopword list
- * (since "en" is recognized by Snowball but not the stopword processing logic).
- */
- static StatusWith<const FTSLanguage*> make( StringData langName,
- TextIndexVersion textIndexVersion );
-
- private:
- // std::string representation of language in canonical form.
- std::string _canonicalName;
- };
-
- typedef StatusWith<const FTSLanguage*> StatusWithFTSLanguage;
-
-
- class BasicFTSLanguage : public FTSLanguage {
- public:
- std::unique_ptr<FTSTokenizer> createTokenizer() const override;
- };
-
- extern BasicFTSLanguage languagePorterV1;
- extern BasicFTSLanguage languageEnglishV2;
- extern BasicFTSLanguage languageFrenchV2;
+namespace fts {
+class FTSTokenizer;
+
+#define MONGO_FTS_LANGUAGE_DECLARE(language, name, version) \
+ BasicFTSLanguage language; \
+ MONGO_INITIALIZER_GENERAL(language, MONGO_NO_PREREQUISITES, ("FTSAllLanguagesRegistered")) \
+ (::mongo::InitializerContext * context) { \
+ FTSLanguage::registerLanguage(name, version, &language); \
+ return Status::OK(); \
}
+
+/**
+ * A FTSLanguage represents a language for a text-indexed document or a text search.
+ * FTSLanguage objects are not copyable.
+ *
+ * Recommended usage:
+ *
+ * StatusWithFTSLanguage swl = FTSLanguage::make( "en", TEXT_INDEX_VERSION_2 );
+ * if ( !swl.getStatus().isOK() ) {
+ * // Error.
+ * }
+ * else {
+ * const FTSLanguage* language = swl.getValue();
+ * // Use language.
+ * }
+ */
+class FTSLanguage {
+ // Use make() instead of copying.
+ MONGO_DISALLOW_COPYING(FTSLanguage);
+
+public:
+ /** Create an uninitialized language. */
+ FTSLanguage();
+
+ virtual ~FTSLanguage() {}
+
+ /**
+ * Returns the language as a std::string in canonical form (lowercased English name). It is
+ * an error to call str() on an uninitialized language.
+ */
+ const std::string& str() const;
+
+ /**
+ * Returns a new FTSTokenizer instance for this language.
+ * Lifetime is scoped to FTSLanguage (which are currently all process lifetime)
+ */
+ virtual std::unique_ptr<FTSTokenizer> createTokenizer() const = 0;
+
+ /**
+ * Register std::string 'languageName' as a new language with text index version
+ * 'textIndexVersion'. Saves the resulting language to out-argument 'languageOut'.
+ * Subsequent calls to FTSLanguage::make() will recognize the newly-registered language
+ * string.
+ */
+ static void registerLanguage(StringData languageName,
+ TextIndexVersion textIndexVersion,
+ FTSLanguage* languageOut);
+
+ /**
+ * Register 'alias' as an alias for 'language' with text index version
+ * 'textIndexVersion'. Subsequent calls to FTSLanguage::make() will recognize the
+ * newly-registered alias.
+ */
+ static void registerLanguageAlias(const FTSLanguage* language,
+ StringData alias,
+ TextIndexVersion textIndexVersion);
+
+ /**
+ * Return the FTSLanguage associated with the given language string. Returns an error
+ * Status if an invalid language std::string is passed.
+ *
+ * For textIndexVersion=TEXT_INDEX_VERSION_2, language strings are
+ * case-insensitive, and need to be in one of the two following forms:
+ * - English name, like "spanish".
+ * - Two-letter code, like "es".
+ *
+ * For textIndexVersion=TEXT_INDEX_VERSION_1, no validation or normalization of
+ * language strings is performed. This is necessary to preserve indexing behavior for
+ * documents with language strings like "en": for compatibility, text data in these
+ * documents needs to be processed with the English stemmer and the empty stopword list
+ * (since "en" is recognized by Snowball but not the stopword processing logic).
+ */
+ static StatusWith<const FTSLanguage*> make(StringData langName,
+ TextIndexVersion textIndexVersion);
+
+private:
+ // std::string representation of language in canonical form.
+ std::string _canonicalName;
+};
+
+typedef StatusWith<const FTSLanguage*> StatusWithFTSLanguage;
+
+
+class BasicFTSLanguage : public FTSLanguage {
+public:
+ std::unique_ptr<FTSTokenizer> createTokenizer() const override;
+};
+
+extern BasicFTSLanguage languagePorterV1;
+extern BasicFTSLanguage languageEnglishV2;
+extern BasicFTSLanguage languageFrenchV2;
+}
}
diff --git a/src/mongo/db/fts/fts_language_test.cpp b/src/mongo/db/fts/fts_language_test.cpp
index 0fb46ef2df7..c24f02ff7fd 100644
--- a/src/mongo/db/fts/fts_language_test.cpp
+++ b/src/mongo/db/fts/fts_language_test.cpp
@@ -35,103 +35,102 @@
namespace mongo {
- namespace fts {
-
- // Positive tests for FTSLanguage::make() with TEXT_INDEX_VERSION_2.
-
- TEST( FTSLanguageV2, ExactLanguage ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "spanish", TEXT_INDEX_VERSION_2 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "spanish" );
- }
-
- TEST( FTSLanguageV2, ExactCode ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "es", TEXT_INDEX_VERSION_2 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "spanish" );
- }
-
- TEST( FTSLanguageV2, UpperCaseLanguage ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "SPANISH", TEXT_INDEX_VERSION_2 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "spanish" );
- }
-
- TEST( FTSLanguageV2, UpperCaseCode ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "ES", TEXT_INDEX_VERSION_2 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "spanish" );
- }
-
- TEST( FTSLanguageV2, NoneLanguage ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "none", TEXT_INDEX_VERSION_2 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "none" );
- }
-
- // Negative tests for FTSLanguage::make() with TEXT_INDEX_VERSION_2.
-
- TEST( FTSLanguageV2, Unknown ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "spanglish", TEXT_INDEX_VERSION_2 );
- ASSERT( !swl.getStatus().isOK() );
- }
-
- TEST( FTSLanguageV2, Empty ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "", TEXT_INDEX_VERSION_2 );
- ASSERT( !swl.getStatus().isOK() );
- }
-
- // Positive tests for FTSLanguage::make() with TEXT_INDEX_VERSION_1.
-
- TEST( FTSLanguageV1, ExactLanguage ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "spanish", TEXT_INDEX_VERSION_1 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "spanish" );
- }
-
- TEST( FTSLanguageV1, DeprecatedLanguage ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "porter", TEXT_INDEX_VERSION_1 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "porter" );
- }
-
- TEST( FTSLanguageV1, StemmerOnlyLanguage1 ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "en", TEXT_INDEX_VERSION_1 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "en" );
- }
-
- TEST( FTSLanguageV1, StemmerOnlyLanguage2 ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "eng", TEXT_INDEX_VERSION_1 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "eng" );
- }
-
- TEST( FTSLanguageV1, NoneLanguage ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "none", TEXT_INDEX_VERSION_1 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "none" );
- }
-
- // Negative tests for FTSLanguage::make() with TEXT_INDEX_VERSION_1.
-
- TEST( FTSLanguageV1, CaseSensitive ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "SPANISH", TEXT_INDEX_VERSION_1 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "none" );
- }
-
- TEST( FTSLanguageV1, Unknown ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "asdf", TEXT_INDEX_VERSION_1 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "none" );
- }
-
- TEST( FTSLanguageV1, Empty ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( "", TEXT_INDEX_VERSION_1 );
- ASSERT( swl.getStatus().isOK() );
- ASSERT_EQUALS( swl.getValue()->str(), "none" );
- }
-
- }
+namespace fts {
+
+// Positive tests for FTSLanguage::make() with TEXT_INDEX_VERSION_2.
+
+TEST(FTSLanguageV2, ExactLanguage) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("spanish", TEXT_INDEX_VERSION_2);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "spanish");
+}
+
+TEST(FTSLanguageV2, ExactCode) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("es", TEXT_INDEX_VERSION_2);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "spanish");
+}
+
+TEST(FTSLanguageV2, UpperCaseLanguage) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("SPANISH", TEXT_INDEX_VERSION_2);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "spanish");
+}
+
+TEST(FTSLanguageV2, UpperCaseCode) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("ES", TEXT_INDEX_VERSION_2);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "spanish");
+}
+
+TEST(FTSLanguageV2, NoneLanguage) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("none", TEXT_INDEX_VERSION_2);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "none");
+}
+
+// Negative tests for FTSLanguage::make() with TEXT_INDEX_VERSION_2.
+
+TEST(FTSLanguageV2, Unknown) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("spanglish", TEXT_INDEX_VERSION_2);
+ ASSERT(!swl.getStatus().isOK());
+}
+
+TEST(FTSLanguageV2, Empty) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("", TEXT_INDEX_VERSION_2);
+ ASSERT(!swl.getStatus().isOK());
+}
+
+// Positive tests for FTSLanguage::make() with TEXT_INDEX_VERSION_1.
+
+TEST(FTSLanguageV1, ExactLanguage) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("spanish", TEXT_INDEX_VERSION_1);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "spanish");
+}
+
+TEST(FTSLanguageV1, DeprecatedLanguage) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("porter", TEXT_INDEX_VERSION_1);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "porter");
+}
+
+TEST(FTSLanguageV1, StemmerOnlyLanguage1) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("en", TEXT_INDEX_VERSION_1);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "en");
+}
+
+TEST(FTSLanguageV1, StemmerOnlyLanguage2) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("eng", TEXT_INDEX_VERSION_1);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "eng");
+}
+
+TEST(FTSLanguageV1, NoneLanguage) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("none", TEXT_INDEX_VERSION_1);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "none");
+}
+
+// Negative tests for FTSLanguage::make() with TEXT_INDEX_VERSION_1.
+
+TEST(FTSLanguageV1, CaseSensitive) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("SPANISH", TEXT_INDEX_VERSION_1);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "none");
+}
+
+TEST(FTSLanguageV1, Unknown) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("asdf", TEXT_INDEX_VERSION_1);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "none");
+}
+
+TEST(FTSLanguageV1, Empty) {
+ StatusWithFTSLanguage swl = FTSLanguage::make("", TEXT_INDEX_VERSION_1);
+ ASSERT(swl.getStatus().isOK());
+ ASSERT_EQUALS(swl.getValue()->str(), "none");
+}
+}
}
diff --git a/src/mongo/db/fts/fts_matcher.cpp b/src/mongo/db/fts/fts_matcher.cpp
index c2aa234cd51..544ef93cf36 100644
--- a/src/mongo/db/fts/fts_matcher.cpp
+++ b/src/mongo/db/fts/fts_matcher.cpp
@@ -37,144 +37,138 @@
namespace mongo {
- namespace fts {
-
- using std::string;
-
- /**
- * Does the string 'phrase' occur in the string 'haystack'? Match is case-insensitive if
- * 'caseSensitive' is false; otherwise, an exact substring match is performed.
- */
- static bool phraseMatches( const string& phrase,
- const string& haystack,
- bool caseSensitive ) {
- if ( caseSensitive ) {
- return haystack.find( phrase ) != string::npos;
- }
- return strcasestr( haystack.c_str(), phrase.c_str() ) != NULL;
- }
-
- FTSMatcher::FTSMatcher( const FTSQuery& query, const FTSSpec& spec )
- : _query( query ),
- _spec( spec ) {
- }
+namespace fts {
- bool FTSMatcher::matches( const BSONObj& obj ) const {
- if ( canSkipPositiveTermCheck() ) {
- // We can assume that 'obj' has at least one positive term, and dassert as a sanity
- // check.
- dassert( hasPositiveTerm( obj ) );
- }
- else {
- if ( !hasPositiveTerm( obj ) ) {
- return false;
- }
- }
-
- if ( hasNegativeTerm( obj ) ) {
- return false;
- }
-
- if ( !positivePhrasesMatch( obj ) ) {
- return false;
- }
-
- return negativePhrasesMatch( obj );
- }
+using std::string;
- bool FTSMatcher::hasPositiveTerm( const BSONObj& obj ) const {
- FTSElementIterator it( _spec, obj );
+/**
+ * Does the string 'phrase' occur in the string 'haystack'? Match is case-insensitive if
+ * 'caseSensitive' is false; otherwise, an exact substring match is performed.
+ */
+static bool phraseMatches(const string& phrase, const string& haystack, bool caseSensitive) {
+ if (caseSensitive) {
+ return haystack.find(phrase) != string::npos;
+ }
+ return strcasestr(haystack.c_str(), phrase.c_str()) != NULL;
+}
- while ( it.more() ) {
- FTSIteratorValue val = it.next();
- if ( _hasPositiveTerm_string( val._language, val._text ) ) {
- return true;
- }
- }
+FTSMatcher::FTSMatcher(const FTSQuery& query, const FTSSpec& spec) : _query(query), _spec(spec) {}
+bool FTSMatcher::matches(const BSONObj& obj) const {
+ if (canSkipPositiveTermCheck()) {
+ // We can assume that 'obj' has at least one positive term, and dassert as a sanity
+ // check.
+ dassert(hasPositiveTerm(obj));
+ } else {
+ if (!hasPositiveTerm(obj)) {
return false;
}
+ }
- bool FTSMatcher::_hasPositiveTerm_string( const FTSLanguage* language,
- const string& raw ) const {
- std::unique_ptr<FTSTokenizer> tokenizer(language->createTokenizer());
+ if (hasNegativeTerm(obj)) {
+ return false;
+ }
- tokenizer->reset(raw.c_str(), _query.getCaseSensitive() ?
- FTSTokenizer::GenerateCaseSensitiveTokens : FTSTokenizer::None);
+ if (!positivePhrasesMatch(obj)) {
+ return false;
+ }
- while (tokenizer->moveNext()) {
- string word = tokenizer->get().toString();
- if (_query.getPositiveTerms().count(word) > 0) {
- return true;
- }
- }
- return false;
+ return negativePhrasesMatch(obj);
+}
+
+bool FTSMatcher::hasPositiveTerm(const BSONObj& obj) const {
+ FTSElementIterator it(_spec, obj);
+
+ while (it.more()) {
+ FTSIteratorValue val = it.next();
+ if (_hasPositiveTerm_string(val._language, val._text)) {
+ return true;
}
+ }
- bool FTSMatcher::hasNegativeTerm( const BSONObj& obj ) const {
- if ( _query.getNegatedTerms().size() == 0 ) {
- return false;
- }
+ return false;
+}
- FTSElementIterator it( _spec, obj );
+bool FTSMatcher::_hasPositiveTerm_string(const FTSLanguage* language, const string& raw) const {
+ std::unique_ptr<FTSTokenizer> tokenizer(language->createTokenizer());
- while ( it.more() ) {
- FTSIteratorValue val = it.next();
- if ( _hasNegativeTerm_string( val._language, val._text ) ) {
- return true;
- }
- }
+ tokenizer->reset(raw.c_str(),
+ _query.getCaseSensitive() ? FTSTokenizer::GenerateCaseSensitiveTokens
+ : FTSTokenizer::None);
- return false;
+ while (tokenizer->moveNext()) {
+ string word = tokenizer->get().toString();
+ if (_query.getPositiveTerms().count(word) > 0) {
+ return true;
}
+ }
+ return false;
+}
- bool FTSMatcher::_hasNegativeTerm_string( const FTSLanguage* language,
- const string& raw ) const {
- std::unique_ptr<FTSTokenizer> tokenizer(language->createTokenizer());
+bool FTSMatcher::hasNegativeTerm(const BSONObj& obj) const {
+ if (_query.getNegatedTerms().size() == 0) {
+ return false;
+ }
- tokenizer->reset(raw.c_str(), _query.getCaseSensitive() ?
- FTSTokenizer::GenerateCaseSensitiveTokens : FTSTokenizer::None);
+ FTSElementIterator it(_spec, obj);
- while (tokenizer->moveNext()) {
- string word = tokenizer->get().toString();
- if ( _query.getNegatedTerms().count( word ) > 0 ) {
- return true;
- }
- }
- return false;
+ while (it.more()) {
+ FTSIteratorValue val = it.next();
+ if (_hasNegativeTerm_string(val._language, val._text)) {
+ return true;
}
+ }
- bool FTSMatcher::positivePhrasesMatch( const BSONObj& obj ) const {
- for ( size_t i = 0; i < _query.getPositivePhr().size(); i++ ) {
- if ( !_phraseMatch( _query.getPositivePhr()[i], obj ) ) {
- return false;
- }
- }
+ return false;
+}
- return true;
- }
+bool FTSMatcher::_hasNegativeTerm_string(const FTSLanguage* language, const string& raw) const {
+ std::unique_ptr<FTSTokenizer> tokenizer(language->createTokenizer());
- bool FTSMatcher::negativePhrasesMatch( const BSONObj& obj ) const {
- for ( size_t i = 0; i < _query.getNegatedPhr().size(); i++ ) {
- if ( _phraseMatch( _query.getNegatedPhr()[i], obj ) ) {
- return false;
- }
- }
+ tokenizer->reset(raw.c_str(),
+ _query.getCaseSensitive() ? FTSTokenizer::GenerateCaseSensitiveTokens
+ : FTSTokenizer::None);
+ while (tokenizer->moveNext()) {
+ string word = tokenizer->get().toString();
+ if (_query.getNegatedTerms().count(word) > 0) {
return true;
}
+ }
+ return false;
+}
- bool FTSMatcher::_phraseMatch( const string& phrase, const BSONObj& obj ) const {
- FTSElementIterator it( _spec, obj );
+bool FTSMatcher::positivePhrasesMatch(const BSONObj& obj) const {
+ for (size_t i = 0; i < _query.getPositivePhr().size(); i++) {
+ if (!_phraseMatch(_query.getPositivePhr()[i], obj)) {
+ return false;
+ }
+ }
- while ( it.more() ) {
- FTSIteratorValue val = it.next();
- if ( phraseMatches( phrase, val._text, _query.getCaseSensitive() ) ) {
- return true;
- }
- }
+ return true;
+}
+bool FTSMatcher::negativePhrasesMatch(const BSONObj& obj) const {
+ for (size_t i = 0; i < _query.getNegatedPhr().size(); i++) {
+ if (_phraseMatch(_query.getNegatedPhr()[i], obj)) {
return false;
}
}
+
+ return true;
+}
+
+bool FTSMatcher::_phraseMatch(const string& phrase, const BSONObj& obj) const {
+ FTSElementIterator it(_spec, obj);
+
+ while (it.more()) {
+ FTSIteratorValue val = it.next();
+ if (phraseMatches(phrase, val._text, _query.getCaseSensitive())) {
+ return true;
+ }
+ }
+
+ return false;
+}
+}
}
diff --git a/src/mongo/db/fts/fts_matcher.h b/src/mongo/db/fts/fts_matcher.h
index 058dcc7bcb6..00fe8291c4d 100644
--- a/src/mongo/db/fts/fts_matcher.h
+++ b/src/mongo/db/fts/fts_matcher.h
@@ -36,74 +36,74 @@
namespace mongo {
- namespace fts {
-
- class FTSMatcher {
- MONGO_DISALLOW_COPYING( FTSMatcher );
- public:
- FTSMatcher( const FTSQuery& query, const FTSSpec& spec );
-
- /**
- * Returns whether 'obj' matches the query. An object is considered to match the query
- * if all four of the following conditions hold:
- * 1) The object contains at least one positive term.
- * 2) The object contains zero negative terms.
- * 3) The object contains all positive phrases.
- * 4) The object contains zero negative phrases.
- */
- bool matches( const BSONObj& obj ) const;
-
- /**
- * Returns whether 'obj' contains at least one positive term.
- */
- bool hasPositiveTerm( const BSONObj& obj ) const;
-
- /**
- * Returns whether 'obj' contains at least one negative term.
- */
- bool hasNegativeTerm( const BSONObj& obj ) const;
-
- /**
- * Returns whether 'obj' contains all positive phrases.
- */
- bool positivePhrasesMatch( const BSONObj& obj ) const;
-
- /**
- * Returns whether 'obj' contains zero negative phrases.
- */
- bool negativePhrasesMatch( const BSONObj& obj ) const;
-
- private:
- /**
- * For matching, can we skip the positive term check? This is done as optimization when
- * we have a-priori knowledge that all documents being matched pass the positive term
- * check.
- */
- bool canSkipPositiveTermCheck() const { return !_query.getCaseSensitive(); }
-
- /**
- * Returns whether the string 'raw' contains any positive terms from the query.
- * 'language' specifies the language for 'raw'.
- */
- bool _hasPositiveTerm_string( const FTSLanguage* language,
- const std::string& raw ) const;
-
- /**
- * Returns whether the string 'raw' contains any negative terms from the query.
- * 'language' specifies the language for 'raw'.
- */
- bool _hasNegativeTerm_string( const FTSLanguage* language,
- const std::string& raw ) const;
-
- /**
- * Returns whether 'obj' contains the exact string 'phrase' in any indexed fields.
- */
- bool _phraseMatch( const std::string& phrase, const BSONObj& obj ) const;
-
- // TODO These should be unowned pointers instead of owned copies.
- const FTSQuery _query;
- const FTSSpec _spec;
- };
-
+namespace fts {
+
+class FTSMatcher {
+ MONGO_DISALLOW_COPYING(FTSMatcher);
+
+public:
+ FTSMatcher(const FTSQuery& query, const FTSSpec& spec);
+
+ /**
+ * Returns whether 'obj' matches the query. An object is considered to match the query
+ * if all four of the following conditions hold:
+ * 1) The object contains at least one positive term.
+ * 2) The object contains zero negative terms.
+ * 3) The object contains all positive phrases.
+ * 4) The object contains zero negative phrases.
+ */
+ bool matches(const BSONObj& obj) const;
+
+ /**
+ * Returns whether 'obj' contains at least one positive term.
+ */
+ bool hasPositiveTerm(const BSONObj& obj) const;
+
+ /**
+ * Returns whether 'obj' contains at least one negative term.
+ */
+ bool hasNegativeTerm(const BSONObj& obj) const;
+
+ /**
+ * Returns whether 'obj' contains all positive phrases.
+ */
+ bool positivePhrasesMatch(const BSONObj& obj) const;
+
+ /**
+ * Returns whether 'obj' contains zero negative phrases.
+ */
+ bool negativePhrasesMatch(const BSONObj& obj) const;
+
+private:
+ /**
+ * For matching, can we skip the positive term check? This is done as optimization when
+ * we have a-priori knowledge that all documents being matched pass the positive term
+ * check.
+ */
+ bool canSkipPositiveTermCheck() const {
+ return !_query.getCaseSensitive();
}
+
+ /**
+ * Returns whether the string 'raw' contains any positive terms from the query.
+ * 'language' specifies the language for 'raw'.
+ */
+ bool _hasPositiveTerm_string(const FTSLanguage* language, const std::string& raw) const;
+
+ /**
+ * Returns whether the string 'raw' contains any negative terms from the query.
+ * 'language' specifies the language for 'raw'.
+ */
+ bool _hasNegativeTerm_string(const FTSLanguage* language, const std::string& raw) const;
+
+ /**
+ * Returns whether 'obj' contains the exact string 'phrase' in any indexed fields.
+ */
+ bool _phraseMatch(const std::string& phrase, const BSONObj& obj) const;
+
+ // TODO These should be unowned pointers instead of owned copies.
+ const FTSQuery _query;
+ const FTSSpec _spec;
+};
+}
}
diff --git a/src/mongo/db/fts/fts_matcher_test.cpp b/src/mongo/db/fts/fts_matcher_test.cpp
index 0ea0fbe9e7e..13eb74609dc 100644
--- a/src/mongo/db/fts/fts_matcher_test.cpp
+++ b/src/mongo/db/fts/fts_matcher_test.cpp
@@ -34,187 +34,204 @@
#include "mongo/unittest/unittest.h"
namespace mongo {
- namespace fts {
-
- TEST( FTSMatcher, NegWild1 ) {
- FTSQuery q;
- ASSERT_OK( q.parse( "foo -bar", "english", false, TEXT_INDEX_VERSION_2 ) );
- FTSMatcher m( q,
- FTSSpec( FTSSpec::fixSpec( BSON( "key" << BSON( "$**" << "text" ) ) ) ) );
-
- ASSERT( m.hasNegativeTerm( BSON( "x" << BSON( "y" << "bar" ) ) ) );
- ASSERT( m.hasNegativeTerm( BSON( "x" << BSON( "y" << "bar" ) ) ) );
- }
-
- // Regression test for SERVER-11994.
- TEST( FTSMatcher, NegWild2 ) {
- FTSQuery q;
- ASSERT_OK( q.parse( "pizza -restaurant", "english", false, TEXT_INDEX_VERSION_2 ) );
- FTSMatcher m( q,
- FTSSpec( FTSSpec::fixSpec( BSON( "key" << BSON( "$**" << "text" ) ) ) ) );
-
- ASSERT( m.hasNegativeTerm( BSON( "x" << BSON( "y" << "pizza restaurant" ) ) ) );
- ASSERT( m.hasNegativeTerm( BSON( "x" << BSON( "y" << "PIZZA RESTAURANT" ) ) ) );
- }
-
- TEST( FTSMatcher, Phrase1 ) {
- FTSQuery q;
- ASSERT_OK( q.parse( "foo \"table top\"", "english", false, TEXT_INDEX_VERSION_2 ) );
- FTSMatcher m( q,
- FTSSpec( FTSSpec::fixSpec( BSON( "key" << BSON( "$**" << "text" ) ) ) ) );
-
- ASSERT( m.positivePhrasesMatch( BSON( "x" << "table top" ) ) );
- ASSERT( m.positivePhrasesMatch( BSON( "x" << " asd table top asd" ) ) );
- ASSERT( !m.positivePhrasesMatch( BSON( "x" << "tablz top" ) ) );
- ASSERT( !m.positivePhrasesMatch( BSON( "x" << " asd tablz top asd" ) ) );
-
- ASSERT( m.positivePhrasesMatch( BSON( "x" << "table top" ) ) );
- ASSERT( !m.positivePhrasesMatch( BSON( "x" << "table a top" ) ) );
-
- }
-
- TEST( FTSMatcher, Phrase2 ) {
- FTSQuery q;
- ASSERT_OK( q.parse( "foo \"table top\"", "english", false, TEXT_INDEX_VERSION_2 ) );
- FTSMatcher m( q,
- FTSSpec( FTSSpec::fixSpec( BSON( "key" << BSON( "x" << "text" ) ) ) ) );
- ASSERT( m.positivePhrasesMatch( BSON( "x" << BSON_ARRAY( "table top" ) ) ) );
- }
-
- // Test that the matcher parses the document with the document language, not the search
- // language.
- TEST( FTSMatcher, ParsesUsingDocLanguage ) {
- FTSQuery q;
- ASSERT_OK( q.parse( "-glad", "none", false, TEXT_INDEX_VERSION_2 ) );
- FTSMatcher m( q,
- FTSSpec( FTSSpec::fixSpec( BSON( "key" << BSON( "x" << "text" ) ) ) ) );
-
- // Even though the search language is "none", the document {x: "gladly"} should be
- // parsed using the English stemmer, and as such should match the negated term "glad".
- ASSERT( m.hasNegativeTerm( BSON( "x" << "gladly" ) ) );
- }
-
- // Test the matcher does not filter out stop words from positive terms
- TEST( FTSMatcher, MatcherDoesNotFilterStopWordsNeg ) {
- FTSQuery q;
- ASSERT_OK( q.parse( "-the", "none", false, TEXT_INDEX_VERSION_2 ) );
- FTSMatcher m( q,
- FTSSpec( FTSSpec::fixSpec( BSON( "key" << BSON( "x" << "text" ) ) ) ) );
-
- ASSERT( m.hasNegativeTerm( BSON( "x" << "the" ) ) );
- }
-
- // Test the matcher does not filter out stop words from negative terms
- TEST( FTSMatcher, MatcherDoesNotFilterStopWordsPos ) {
- FTSQuery q;
- ASSERT_OK( q.parse( "the", "none", false, TEXT_INDEX_VERSION_2 ) );
- FTSMatcher m( q,
- FTSSpec( FTSSpec::fixSpec( BSON( "key" << BSON( "x" << "text" ) ) ) ) );
-
- ASSERT( m.hasPositiveTerm( BSON( "x" << "the" ) ) );
- }
-
- // Returns whether a document indexed with text data 'doc' contains any positive terms from
- // case-sensitive text query 'search'.
- static bool docHasPositiveTermWithCase( const std::string& doc,
- const std::string& search ) {
- FTSQuery q;
- ASSERT_OK( q.parse( search, "english", true, TEXT_INDEX_VERSION_2 ) );
- FTSMatcher m( q,
- FTSSpec( FTSSpec::fixSpec( BSON( "key" << BSON( "x" << "text" ) ) ) ) );
-
- return m.hasPositiveTerm( BSON( "x" << doc ) );
- }
-
- TEST( FTSMatcher, HasPositiveTermCaseSensitive ) {
- ASSERT_TRUE( docHasPositiveTermWithCase( "hello world", "hello" ) );
- ASSERT_TRUE( docHasPositiveTermWithCase( "Hello World", "Hello" ) );
- ASSERT_TRUE( docHasPositiveTermWithCase( "Hello World", "World Hello" ) );
- ASSERT_TRUE( docHasPositiveTermWithCase( "Hello World", "World GoodBye" ) );
- ASSERT_TRUE( docHasPositiveTermWithCase( "John Runs", "Runs" ) );
- ASSERT_TRUE( docHasPositiveTermWithCase( "John Runs", "Running" ) );
- ASSERT_TRUE( docHasPositiveTermWithCase( "John Runs", "Run" ) );
-
- ASSERT_FALSE( docHasPositiveTermWithCase( "John Runs", "run" ) );
- ASSERT_FALSE( docHasPositiveTermWithCase( "Hello World", "HELLO" ) );
- ASSERT_FALSE( docHasPositiveTermWithCase( "hello world", "Hello" ) );
- ASSERT_FALSE( docHasPositiveTermWithCase( "Hello World", "hello" ) );
- }
-
- // Returns whether a document indexed with text data 'doc' contains any negative terms from
- // case-sensitive text query 'search'.
- static bool docHasNegativeTermWithCase( const std::string& doc,
- const std::string& search ) {
- FTSQuery q;
- ASSERT_OK( q.parse( search, "english", true, TEXT_INDEX_VERSION_2 ) );
- FTSMatcher m( q,
- FTSSpec( FTSSpec::fixSpec( BSON( "key" << BSON( "x" << "text" ) ) ) ) );
-
- return m.hasNegativeTerm( BSON( "x" << doc ) );
- }
-
- TEST( FTSMatcher, HasNegativeTermCaseSensitive ) {
- ASSERT_TRUE( docHasNegativeTermWithCase( "hello world", "hello -world" ) );
- ASSERT_TRUE( docHasNegativeTermWithCase( "Hello World", "Hello -World" ) );
- ASSERT_TRUE( docHasNegativeTermWithCase( "Hello World", "-World -Hello" ) );
- ASSERT_TRUE( docHasNegativeTermWithCase( "Hello World", "-Goodbye -World" ) );
- ASSERT_TRUE( docHasNegativeTermWithCase( "John Runs", "-Runs" ) );
- ASSERT_TRUE( docHasNegativeTermWithCase( "John Runs", "-Running" ) );
- ASSERT_TRUE( docHasNegativeTermWithCase( "John Runs", "-Run" ) );
-
- ASSERT_FALSE( docHasNegativeTermWithCase( "John Runs", "-run" ) );
- ASSERT_FALSE( docHasNegativeTermWithCase( "Hello World", "Hello -WORLD" ) );
- ASSERT_FALSE( docHasNegativeTermWithCase( "hello world", "hello -World" ) );
- ASSERT_FALSE( docHasNegativeTermWithCase( "Hello World", "Hello -world" ) );
- }
-
- // Returns whether a document indexed with text data 'doc' contains all positive phrases
- // from case-sensitive text query 'search'.
- static bool docPositivePhrasesMatchWithCase( const std::string& doc,
- const std::string& search ) {
- FTSQuery q;
- ASSERT_OK( q.parse( search, "english", true, TEXT_INDEX_VERSION_2 ) );
- FTSMatcher m( q,
- FTSSpec( FTSSpec::fixSpec( BSON( "key" << BSON( "x" << "text" ) ) ) ) );
-
- return m.positivePhrasesMatch( BSON( "x" << doc ) );
- }
-
- TEST( FTSMatcher, PositivePhrasesMatchWithCase ) {
- ASSERT_TRUE( docPositivePhrasesMatchWithCase( "John Runs", "\"John Runs\"" ) );
- ASSERT_TRUE( docPositivePhrasesMatchWithCase( "John Runs", "\"John Run\"" ) );
- ASSERT_TRUE( docPositivePhrasesMatchWithCase( "John Runs", "\"John\" \"Run\"" ) );
- ASSERT_TRUE( docPositivePhrasesMatchWithCase( "John Runs", "\"n R\"" ) );
-
- ASSERT_FALSE( docPositivePhrasesMatchWithCase( "John Runs", "\"john runs\"" ) );
- ASSERT_FALSE( docPositivePhrasesMatchWithCase( "john runs", "\"John Runs\"" ) );
- ASSERT_FALSE( docPositivePhrasesMatchWithCase( "John Runs", "\"John\" \"Running\"" ) );
- }
-
- // Returns whether a document indexed with text data 'doc' contains zero negative phrases
- // from case-sensitive text query 'search'.
- static bool docNegativePhrasesMatchWithCase( const std::string& doc,
- const std::string& search ) {
- FTSQuery q;
- ASSERT_OK( q.parse( search, "english", true, TEXT_INDEX_VERSION_2 ) );
- FTSMatcher m( q,
- FTSSpec( FTSSpec::fixSpec( BSON( "key" << BSON( "x" << "text" ) ) ) ) );
-
- return m.negativePhrasesMatch( BSON( "x" << doc ) );
- }
-
- TEST( FTSMatcher, NegativePhrasesMatchWithCase ) {
- ASSERT_TRUE( docNegativePhrasesMatchWithCase( "John Runs", "-\"john runs\"" ) );
- ASSERT_TRUE( docNegativePhrasesMatchWithCase( "john runs", "-\"John Runs\"" ) );
- ASSERT_TRUE( docNegativePhrasesMatchWithCase( "john runs", "-\"John\" -\"Runs\"" ) );
-
- ASSERT_FALSE( docNegativePhrasesMatchWithCase( "John Runs", "-\"John Runs\"" ) );
- ASSERT_FALSE( docNegativePhrasesMatchWithCase( "John Runs", "-\"John Run\"" ) );
- ASSERT_FALSE( docNegativePhrasesMatchWithCase( "John Runs", "-\"John\" -\"Run\"" ) );
- ASSERT_FALSE( docNegativePhrasesMatchWithCase( "John Runs", "-\"n R\"" ) );
- ASSERT_FALSE( docNegativePhrasesMatchWithCase( "John Runs",
- "-\"John\" -\"Running\"" ) );
- }
-
- }
+namespace fts {
+
+TEST(FTSMatcher, NegWild1) {
+ FTSQuery q;
+ ASSERT_OK(q.parse("foo -bar", "english", false, TEXT_INDEX_VERSION_2));
+ FTSMatcher m(q,
+ FTSSpec(FTSSpec::fixSpec(BSON("key" << BSON("$**"
+ << "text")))));
+
+ ASSERT(m.hasNegativeTerm(BSON("x" << BSON("y"
+ << "bar"))));
+ ASSERT(m.hasNegativeTerm(BSON("x" << BSON("y"
+ << "bar"))));
+}
+
+// Regression test for SERVER-11994.
+TEST(FTSMatcher, NegWild2) {
+ FTSQuery q;
+ ASSERT_OK(q.parse("pizza -restaurant", "english", false, TEXT_INDEX_VERSION_2));
+ FTSMatcher m(q,
+ FTSSpec(FTSSpec::fixSpec(BSON("key" << BSON("$**"
+ << "text")))));
+
+ ASSERT(m.hasNegativeTerm(BSON("x" << BSON("y"
+ << "pizza restaurant"))));
+ ASSERT(m.hasNegativeTerm(BSON("x" << BSON("y"
+ << "PIZZA RESTAURANT"))));
+}
+
+TEST(FTSMatcher, Phrase1) {
+ FTSQuery q;
+ ASSERT_OK(q.parse("foo \"table top\"", "english", false, TEXT_INDEX_VERSION_2));
+ FTSMatcher m(q,
+ FTSSpec(FTSSpec::fixSpec(BSON("key" << BSON("$**"
+ << "text")))));
+
+ ASSERT(m.positivePhrasesMatch(BSON("x"
+ << "table top")));
+ ASSERT(m.positivePhrasesMatch(BSON("x"
+ << " asd table top asd")));
+ ASSERT(!m.positivePhrasesMatch(BSON("x"
+ << "tablz top")));
+ ASSERT(!m.positivePhrasesMatch(BSON("x"
+ << " asd tablz top asd")));
+
+ ASSERT(m.positivePhrasesMatch(BSON("x"
+ << "table top")));
+ ASSERT(!m.positivePhrasesMatch(BSON("x"
+ << "table a top")));
+}
+
+TEST(FTSMatcher, Phrase2) {
+ FTSQuery q;
+ ASSERT_OK(q.parse("foo \"table top\"", "english", false, TEXT_INDEX_VERSION_2));
+ FTSMatcher m(q,
+ FTSSpec(FTSSpec::fixSpec(BSON("key" << BSON("x"
+ << "text")))));
+ ASSERT(m.positivePhrasesMatch(BSON("x" << BSON_ARRAY("table top"))));
+}
+
+// Test that the matcher parses the document with the document language, not the search
+// language.
+TEST(FTSMatcher, ParsesUsingDocLanguage) {
+ FTSQuery q;
+ ASSERT_OK(q.parse("-glad", "none", false, TEXT_INDEX_VERSION_2));
+ FTSMatcher m(q,
+ FTSSpec(FTSSpec::fixSpec(BSON("key" << BSON("x"
+ << "text")))));
+
+ // Even though the search language is "none", the document {x: "gladly"} should be
+ // parsed using the English stemmer, and as such should match the negated term "glad".
+ ASSERT(m.hasNegativeTerm(BSON("x"
+ << "gladly")));
+}
+
+// Test the matcher does not filter out stop words from positive terms
+TEST(FTSMatcher, MatcherDoesNotFilterStopWordsNeg) {
+ FTSQuery q;
+ ASSERT_OK(q.parse("-the", "none", false, TEXT_INDEX_VERSION_2));
+ FTSMatcher m(q,
+ FTSSpec(FTSSpec::fixSpec(BSON("key" << BSON("x"
+ << "text")))));
+
+ ASSERT(m.hasNegativeTerm(BSON("x"
+ << "the")));
+}
+
+// Test the matcher does not filter out stop words from negative terms
+TEST(FTSMatcher, MatcherDoesNotFilterStopWordsPos) {
+ FTSQuery q;
+ ASSERT_OK(q.parse("the", "none", false, TEXT_INDEX_VERSION_2));
+ FTSMatcher m(q,
+ FTSSpec(FTSSpec::fixSpec(BSON("key" << BSON("x"
+ << "text")))));
+
+ ASSERT(m.hasPositiveTerm(BSON("x"
+ << "the")));
+}
+
+// Returns whether a document indexed with text data 'doc' contains any positive terms from
+// case-sensitive text query 'search'.
+static bool docHasPositiveTermWithCase(const std::string& doc, const std::string& search) {
+ FTSQuery q;
+ ASSERT_OK(q.parse(search, "english", true, TEXT_INDEX_VERSION_2));
+ FTSMatcher m(q,
+ FTSSpec(FTSSpec::fixSpec(BSON("key" << BSON("x"
+ << "text")))));
+
+ return m.hasPositiveTerm(BSON("x" << doc));
+}
+
+TEST(FTSMatcher, HasPositiveTermCaseSensitive) {
+ ASSERT_TRUE(docHasPositiveTermWithCase("hello world", "hello"));
+ ASSERT_TRUE(docHasPositiveTermWithCase("Hello World", "Hello"));
+ ASSERT_TRUE(docHasPositiveTermWithCase("Hello World", "World Hello"));
+ ASSERT_TRUE(docHasPositiveTermWithCase("Hello World", "World GoodBye"));
+ ASSERT_TRUE(docHasPositiveTermWithCase("John Runs", "Runs"));
+ ASSERT_TRUE(docHasPositiveTermWithCase("John Runs", "Running"));
+ ASSERT_TRUE(docHasPositiveTermWithCase("John Runs", "Run"));
+
+ ASSERT_FALSE(docHasPositiveTermWithCase("John Runs", "run"));
+ ASSERT_FALSE(docHasPositiveTermWithCase("Hello World", "HELLO"));
+ ASSERT_FALSE(docHasPositiveTermWithCase("hello world", "Hello"));
+ ASSERT_FALSE(docHasPositiveTermWithCase("Hello World", "hello"));
+}
+
+// Returns whether a document indexed with text data 'doc' contains any negative terms from
+// case-sensitive text query 'search'.
+static bool docHasNegativeTermWithCase(const std::string& doc, const std::string& search) {
+ FTSQuery q;
+ ASSERT_OK(q.parse(search, "english", true, TEXT_INDEX_VERSION_2));
+ FTSMatcher m(q,
+ FTSSpec(FTSSpec::fixSpec(BSON("key" << BSON("x"
+ << "text")))));
+
+ return m.hasNegativeTerm(BSON("x" << doc));
+}
+
+TEST(FTSMatcher, HasNegativeTermCaseSensitive) {
+ ASSERT_TRUE(docHasNegativeTermWithCase("hello world", "hello -world"));
+ ASSERT_TRUE(docHasNegativeTermWithCase("Hello World", "Hello -World"));
+ ASSERT_TRUE(docHasNegativeTermWithCase("Hello World", "-World -Hello"));
+ ASSERT_TRUE(docHasNegativeTermWithCase("Hello World", "-Goodbye -World"));
+ ASSERT_TRUE(docHasNegativeTermWithCase("John Runs", "-Runs"));
+ ASSERT_TRUE(docHasNegativeTermWithCase("John Runs", "-Running"));
+ ASSERT_TRUE(docHasNegativeTermWithCase("John Runs", "-Run"));
+
+ ASSERT_FALSE(docHasNegativeTermWithCase("John Runs", "-run"));
+ ASSERT_FALSE(docHasNegativeTermWithCase("Hello World", "Hello -WORLD"));
+ ASSERT_FALSE(docHasNegativeTermWithCase("hello world", "hello -World"));
+ ASSERT_FALSE(docHasNegativeTermWithCase("Hello World", "Hello -world"));
+}
+
+// Returns whether a document indexed with text data 'doc' contains all positive phrases
+// from case-sensitive text query 'search'.
+static bool docPositivePhrasesMatchWithCase(const std::string& doc, const std::string& search) {
+ FTSQuery q;
+ ASSERT_OK(q.parse(search, "english", true, TEXT_INDEX_VERSION_2));
+ FTSMatcher m(q,
+ FTSSpec(FTSSpec::fixSpec(BSON("key" << BSON("x"
+ << "text")))));
+
+ return m.positivePhrasesMatch(BSON("x" << doc));
+}
+
+TEST(FTSMatcher, PositivePhrasesMatchWithCase) {
+ ASSERT_TRUE(docPositivePhrasesMatchWithCase("John Runs", "\"John Runs\""));
+ ASSERT_TRUE(docPositivePhrasesMatchWithCase("John Runs", "\"John Run\""));
+ ASSERT_TRUE(docPositivePhrasesMatchWithCase("John Runs", "\"John\" \"Run\""));
+ ASSERT_TRUE(docPositivePhrasesMatchWithCase("John Runs", "\"n R\""));
+
+ ASSERT_FALSE(docPositivePhrasesMatchWithCase("John Runs", "\"john runs\""));
+ ASSERT_FALSE(docPositivePhrasesMatchWithCase("john runs", "\"John Runs\""));
+ ASSERT_FALSE(docPositivePhrasesMatchWithCase("John Runs", "\"John\" \"Running\""));
+}
+
+// Returns whether a document indexed with text data 'doc' contains zero negative phrases
+// from case-sensitive text query 'search'.
+static bool docNegativePhrasesMatchWithCase(const std::string& doc, const std::string& search) {
+ FTSQuery q;
+ ASSERT_OK(q.parse(search, "english", true, TEXT_INDEX_VERSION_2));
+ FTSMatcher m(q,
+ FTSSpec(FTSSpec::fixSpec(BSON("key" << BSON("x"
+ << "text")))));
+
+ return m.negativePhrasesMatch(BSON("x" << doc));
+}
+
+TEST(FTSMatcher, NegativePhrasesMatchWithCase) {
+ ASSERT_TRUE(docNegativePhrasesMatchWithCase("John Runs", "-\"john runs\""));
+ ASSERT_TRUE(docNegativePhrasesMatchWithCase("john runs", "-\"John Runs\""));
+ ASSERT_TRUE(docNegativePhrasesMatchWithCase("john runs", "-\"John\" -\"Runs\""));
+
+ ASSERT_FALSE(docNegativePhrasesMatchWithCase("John Runs", "-\"John Runs\""));
+ ASSERT_FALSE(docNegativePhrasesMatchWithCase("John Runs", "-\"John Run\""));
+ ASSERT_FALSE(docNegativePhrasesMatchWithCase("John Runs", "-\"John\" -\"Run\""));
+ ASSERT_FALSE(docNegativePhrasesMatchWithCase("John Runs", "-\"n R\""));
+ ASSERT_FALSE(docNegativePhrasesMatchWithCase("John Runs", "-\"John\" -\"Running\""));
+}
+}
}
diff --git a/src/mongo/db/fts/fts_query.cpp b/src/mongo/db/fts/fts_query.cpp
index bbaac9b2f1e..8dec8e29204 100644
--- a/src/mongo/db/fts/fts_query.cpp
+++ b/src/mongo/db/fts/fts_query.cpp
@@ -40,219 +40,208 @@
namespace mongo {
- namespace fts {
+namespace fts {
- using namespace mongoutils;
+using namespace mongoutils;
- using std::set;
- using std::string;
- using std::stringstream;
- using std::vector;
+using std::set;
+using std::string;
+using std::stringstream;
+using std::vector;
- const bool FTSQuery::caseSensitiveDefault = false;
+const bool FTSQuery::caseSensitiveDefault = false;
- Status FTSQuery::parse(const string& query, StringData language, bool caseSensitive,
- TextIndexVersion textIndexVersion) {
- StatusWithFTSLanguage swl = FTSLanguage::make( language, textIndexVersion );
- if ( !swl.getStatus().isOK() ) {
- return swl.getStatus();
- }
- _language = swl.getValue();
- _caseSensitive = caseSensitive;
-
- // Build a space delimited list of words to have the FtsTokenizer tokenize
- string positiveTermSentence;
- string negativeTermSentence;
-
- bool inNegation = false;
- bool inPhrase = false;
-
- unsigned quoteOffset = 0;
-
- FTSQueryParser i(query);
- while ( i.more() ) {
- QueryToken t = i.next();
-
- if ( t.type == QueryToken::TEXT ) {
- string s = t.data.toString();
-
- if ( inPhrase && inNegation ) {
- // don't add term
- }
- else {
- if (inNegation) {
- negativeTermSentence.append(s);
- negativeTermSentence.push_back(' ');
- }
- else {
- positiveTermSentence.append(s);
- positiveTermSentence.push_back(' ');
- }
- }
-
- if ( inNegation && !inPhrase )
- inNegation = false;
+Status FTSQuery::parse(const string& query,
+ StringData language,
+ bool caseSensitive,
+ TextIndexVersion textIndexVersion) {
+ StatusWithFTSLanguage swl = FTSLanguage::make(language, textIndexVersion);
+ if (!swl.getStatus().isOK()) {
+ return swl.getStatus();
+ }
+ _language = swl.getValue();
+ _caseSensitive = caseSensitive;
+
+ // Build a space delimited list of words to have the FtsTokenizer tokenize
+ string positiveTermSentence;
+ string negativeTermSentence;
+
+ bool inNegation = false;
+ bool inPhrase = false;
+
+ unsigned quoteOffset = 0;
+
+ FTSQueryParser i(query);
+ while (i.more()) {
+ QueryToken t = i.next();
+
+ if (t.type == QueryToken::TEXT) {
+ string s = t.data.toString();
+
+ if (inPhrase && inNegation) {
+ // don't add term
+ } else {
+ if (inNegation) {
+ negativeTermSentence.append(s);
+ negativeTermSentence.push_back(' ');
+ } else {
+ positiveTermSentence.append(s);
+ positiveTermSentence.push_back(' ');
}
- else if ( t.type == QueryToken::DELIMITER ) {
- char c = t.data[0];
- if ( c == '-' ) {
- if ( !inPhrase && t.previousWhiteSpace ) {
- // phrases can be negated, and terms not in phrases can be negated.
- // terms in phrases can not be negated.
- inNegation = true;
- }
- }
- else if ( c == '"' ) {
- if ( inPhrase ) {
- // end of a phrase
- unsigned phraseStart = quoteOffset + 1;
- unsigned phraseLength = t.offset - phraseStart;
- StringData phrase = StringData( query ).substr( phraseStart,
- phraseLength );
- if ( inNegation )
- _negatedPhrases.push_back( normalizeString( phrase ) );
- else
- _positivePhrases.push_back( normalizeString( phrase ) );
- inNegation = false;
- inPhrase = false;
- }
- else {
- // start of a phrase
- inPhrase = true;
- quoteOffset = t.offset;
- }
- }
+ }
+
+ if (inNegation && !inPhrase)
+ inNegation = false;
+ } else if (t.type == QueryToken::DELIMITER) {
+ char c = t.data[0];
+ if (c == '-') {
+ if (!inPhrase && t.previousWhiteSpace) {
+ // phrases can be negated, and terms not in phrases can be negated.
+ // terms in phrases can not be negated.
+ inNegation = true;
}
- else {
- invariant( false );
+ } else if (c == '"') {
+ if (inPhrase) {
+ // end of a phrase
+ unsigned phraseStart = quoteOffset + 1;
+ unsigned phraseLength = t.offset - phraseStart;
+ StringData phrase = StringData(query).substr(phraseStart, phraseLength);
+ if (inNegation)
+ _negatedPhrases.push_back(normalizeString(phrase));
+ else
+ _positivePhrases.push_back(normalizeString(phrase));
+ inNegation = false;
+ inPhrase = false;
+ } else {
+ // start of a phrase
+ inPhrase = true;
+ quoteOffset = t.offset;
}
}
-
- std::unique_ptr<FTSTokenizer> tokenizer(_language->createTokenizer());
-
- _addTerms(tokenizer.get(), positiveTermSentence, false);
- _addTerms(tokenizer.get(), negativeTermSentence, true);
-
- return Status::OK();
+ } else {
+ invariant(false);
}
+ }
- void FTSQuery::_addTerms( FTSTokenizer* tokenizer,
- const string& sentence,
- bool negated ) {
-
- tokenizer->reset(sentence.c_str(), FTSTokenizer::FilterStopWords);
+ std::unique_ptr<FTSTokenizer> tokenizer(_language->createTokenizer());
- auto& activeTerms = negated ? _negatedTerms : _positiveTerms;
+ _addTerms(tokenizer.get(), positiveTermSentence, false);
+ _addTerms(tokenizer.get(), negativeTermSentence, true);
- // First, get all the terms for indexing, ie, lower cased words
- // If we are case-insensitive, we can also used this for positive, and negative terms
- // Some terms may be expanded into multiple words in some non-English languages
- while (tokenizer->moveNext()) {
+ return Status::OK();
+}
- string word = tokenizer->get().toString();
+void FTSQuery::_addTerms(FTSTokenizer* tokenizer, const string& sentence, bool negated) {
+ tokenizer->reset(sentence.c_str(), FTSTokenizer::FilterStopWords);
- if (!negated) {
- _termsForBounds.insert(word);
- }
+ auto& activeTerms = negated ? _negatedTerms : _positiveTerms;
- // Compute the string corresponding to 'token' that will be used for the matcher.
- // For case-insensitive queries, this is the same string as 'boundsTerm' computed
- // above.
- if (!_caseSensitive) {
- activeTerms.insert(word);
- }
- }
+ // First, get all the terms for indexing, ie, lower cased words
+ // If we are case-insensitive, we can also used this for positive, and negative terms
+ // Some terms may be expanded into multiple words in some non-English languages
+ while (tokenizer->moveNext()) {
+ string word = tokenizer->get().toString();
- if (!_caseSensitive) {
- return;
- }
+ if (!negated) {
+ _termsForBounds.insert(word);
+ }
- tokenizer->reset(sentence.c_str(), static_cast<FTSTokenizer::Options>(
- FTSTokenizer::FilterStopWords
- | FTSTokenizer::GenerateCaseSensitiveTokens));
+ // Compute the string corresponding to 'token' that will be used for the matcher.
+ // For case-insensitive queries, this is the same string as 'boundsTerm' computed
+ // above.
+ if (!_caseSensitive) {
+ activeTerms.insert(word);
+ }
+ }
- // If we want case-sensitivity, get the case-sensitive token
- while (tokenizer->moveNext()) {
+ if (!_caseSensitive) {
+ return;
+ }
- string word = tokenizer->get().toString();
+ tokenizer->reset(sentence.c_str(),
+ static_cast<FTSTokenizer::Options>(FTSTokenizer::FilterStopWords |
+ FTSTokenizer::GenerateCaseSensitiveTokens));
- activeTerms.insert(word);
- }
- }
+ // If we want case-sensitivity, get the case-sensitive token
+ while (tokenizer->moveNext()) {
+ string word = tokenizer->get().toString();
- string FTSQuery::normalizeString(StringData str) const {
- if (_caseSensitive) {
- return str.toString();
- }
- return tolowerString(str);
- }
+ activeTerms.insert(word);
+ }
+}
- namespace {
- void _debugHelp( stringstream& ss, const set<string>& s, const string& sep ) {
- bool first = true;
- for ( set<string>::const_iterator i = s.begin(); i != s.end(); ++i ) {
- if ( first )
- first = false;
- else
- ss << sep;
- ss << *i;
- }
- }
+string FTSQuery::normalizeString(StringData str) const {
+ if (_caseSensitive) {
+ return str.toString();
+ }
+ return tolowerString(str);
+}
- void _debugHelp( stringstream& ss, const vector<string>& v, const string& sep ) {
- set<string> s( v.begin(), v.end() );
- _debugHelp( ss, s, sep );
- }
+namespace {
+void _debugHelp(stringstream& ss, const set<string>& s, const string& sep) {
+ bool first = true;
+ for (set<string>::const_iterator i = s.begin(); i != s.end(); ++i) {
+ if (first)
+ first = false;
+ else
+ ss << sep;
+ ss << *i;
+ }
+}
- }
+void _debugHelp(stringstream& ss, const vector<string>& v, const string& sep) {
+ set<string> s(v.begin(), v.end());
+ _debugHelp(ss, s, sep);
+}
+}
- string FTSQuery::toString() const {
- stringstream ss;
- ss << "FTSQuery\n";
+string FTSQuery::toString() const {
+ stringstream ss;
+ ss << "FTSQuery\n";
- ss << " terms: ";
- _debugHelp( ss, getPositiveTerms(), ", " );
- ss << "\n";
+ ss << " terms: ";
+ _debugHelp(ss, getPositiveTerms(), ", ");
+ ss << "\n";
- ss << " negated terms: ";
- _debugHelp( ss, getNegatedTerms(), ", " );
- ss << "\n";
+ ss << " negated terms: ";
+ _debugHelp(ss, getNegatedTerms(), ", ");
+ ss << "\n";
- ss << " phrases: ";
- _debugHelp( ss, getPositivePhr(), ", " );
- ss << "\n";
+ ss << " phrases: ";
+ _debugHelp(ss, getPositivePhr(), ", ");
+ ss << "\n";
- ss << " negated phrases: ";
- _debugHelp( ss, getNegatedPhr(), ", " );
- ss << "\n";
+ ss << " negated phrases: ";
+ _debugHelp(ss, getNegatedPhr(), ", ");
+ ss << "\n";
- return ss.str();
- }
+ return ss.str();
+}
- string FTSQuery::debugString() const {
- stringstream ss;
+string FTSQuery::debugString() const {
+ stringstream ss;
- _debugHelp( ss, getPositiveTerms(), "|" );
- ss << "||";
+ _debugHelp(ss, getPositiveTerms(), "|");
+ ss << "||";
- _debugHelp( ss, getNegatedTerms(), "|" );
- ss << "||";
+ _debugHelp(ss, getNegatedTerms(), "|");
+ ss << "||";
- _debugHelp( ss, getPositivePhr(), "|" );
- ss << "||";
+ _debugHelp(ss, getPositivePhr(), "|");
+ ss << "||";
- _debugHelp( ss, getNegatedPhr(), "|" );
+ _debugHelp(ss, getNegatedPhr(), "|");
- return ss.str();
- }
+ return ss.str();
+}
- BSONObj FTSQuery::toBSON() const {
- BSONObjBuilder bob;
- bob.append( "terms", getPositiveTerms() );
- bob.append( "negatedTerms", getNegatedTerms() );
- bob.append( "phrases", getPositivePhr() );
- bob.append( "negatedPhrases", getNegatedPhr() );
- return bob.obj();
- }
- }
+BSONObj FTSQuery::toBSON() const {
+ BSONObjBuilder bob;
+ bob.append("terms", getPositiveTerms());
+ bob.append("negatedTerms", getNegatedTerms());
+ bob.append("phrases", getPositivePhr());
+ bob.append("negatedPhrases", getNegatedPhr());
+ return bob.obj();
+}
+}
}
diff --git a/src/mongo/db/fts/fts_query.h b/src/mongo/db/fts/fts_query.h
index 88ca4ce64d0..10e0cd2faaf 100644
--- a/src/mongo/db/fts/fts_query.h
+++ b/src/mongo/db/fts/fts_query.h
@@ -40,68 +40,77 @@
namespace mongo {
- namespace fts {
-
- class FTSQuery {
-
- public:
- // Initializes an FTSQuery. Note that the parsing of "language" depends on the text
- // index version, since a query which doesn't specify a language and is against a
- // version 1 text index with a version 1 default language string needs to be parsed as
- // version 1 (see fts_language.cpp for a list of language strings specific to version
- // 1).
- Status parse(const std::string& query, StringData language, bool caseSensitive,
- TextIndexVersion textIndexVersion);
-
- const std::set<std::string>& getPositiveTerms() const { return _positiveTerms; }
- const std::set<std::string>& getNegatedTerms() const { return _negatedTerms; }
- const std::vector<std::string>& getPositivePhr() const { return _positivePhrases; }
- const std::vector<std::string>& getNegatedPhr() const { return _negatedPhrases; }
-
- const std::set<std::string>& getTermsForBounds() const {
- return _termsForBounds;
- }
+namespace fts {
+
+class FTSQuery {
+public:
+ // Initializes an FTSQuery. Note that the parsing of "language" depends on the text
+ // index version, since a query which doesn't specify a language and is against a
+ // version 1 text index with a version 1 default language string needs to be parsed as
+ // version 1 (see fts_language.cpp for a list of language strings specific to version
+ // 1).
+ Status parse(const std::string& query,
+ StringData language,
+ bool caseSensitive,
+ TextIndexVersion textIndexVersion);
+
+ const std::set<std::string>& getPositiveTerms() const {
+ return _positiveTerms;
+ }
+ const std::set<std::string>& getNegatedTerms() const {
+ return _negatedTerms;
+ }
+ const std::vector<std::string>& getPositivePhr() const {
+ return _positivePhrases;
+ }
+ const std::vector<std::string>& getNegatedPhr() const {
+ return _negatedPhrases;
+ }
- const FTSLanguage& getLanguage() const { return *_language; }
- bool getCaseSensitive() const { return _caseSensitive; }
+ const std::set<std::string>& getTermsForBounds() const {
+ return _termsForBounds;
+ }
- std::string toString() const;
+ const FTSLanguage& getLanguage() const {
+ return *_language;
+ }
+ bool getCaseSensitive() const {
+ return _caseSensitive;
+ }
- std::string debugString() const;
+ std::string toString() const;
- BSONObj toBSON() const;
+ std::string debugString() const;
- /**
- * Lowercases "str" if _caseSensitive is set, else returns a copy of "str" unchanged.
- */
- std::string normalizeString( StringData str ) const;
+ BSONObj toBSON() const;
- static const bool caseSensitiveDefault;
+ /**
+ * Lowercases "str" if _caseSensitive is set, else returns a copy of "str" unchanged.
+ */
+ std::string normalizeString(StringData str) const;
- private:
- void _addTerms( FTSTokenizer* tokenizer,
- const std::string& tokens,
- bool negated );
+ static const bool caseSensitiveDefault;
- const FTSLanguage* _language;
- bool _caseSensitive;
+private:
+ void _addTerms(FTSTokenizer* tokenizer, const std::string& tokens, bool negated);
- // Positive terms.
- std::set<std::string> _positiveTerms;
+ const FTSLanguage* _language;
+ bool _caseSensitive;
- // Negated terms.
- std::set<std::string> _negatedTerms;
+ // Positive terms.
+ std::set<std::string> _positiveTerms;
- // Positive phrases.
- std::vector<std::string> _positivePhrases;
+ // Negated terms.
+ std::set<std::string> _negatedTerms;
- // Negated phrases.
- std::vector<std::string> _negatedPhrases;
+ // Positive phrases.
+ std::vector<std::string> _positivePhrases;
- // Terms for bounds.
- std::set<std::string> _termsForBounds;
- };
+ // Negated phrases.
+ std::vector<std::string> _negatedPhrases;
- }
+ // Terms for bounds.
+ std::set<std::string> _termsForBounds;
+};
+}
}
-
diff --git a/src/mongo/db/fts/fts_query_parser.cpp b/src/mongo/db/fts/fts_query_parser.cpp
index 5d73e69cb1e..6b2381c3366 100644
--- a/src/mongo/db/fts/fts_query_parser.cpp
+++ b/src/mongo/db/fts/fts_query_parser.cpp
@@ -34,77 +34,73 @@
namespace mongo {
- namespace fts {
+namespace fts {
- FTSQueryParser::FTSQueryParser( StringData str )
- : _pos(0), _raw( str ) {
- skipWhitespace();
- _previousWhiteSpace = true;
- }
-
- bool FTSQueryParser::more() const {
- return _pos < _raw.size();
- }
-
- QueryToken FTSQueryParser::next() {
- if ( _pos >= _raw.size() )
- return QueryToken( QueryToken::INVALID, "", 0, false );
+FTSQueryParser::FTSQueryParser(StringData str) : _pos(0), _raw(str) {
+ skipWhitespace();
+ _previousWhiteSpace = true;
+}
- unsigned start = _pos++;
- QueryToken::Type type = getType( _raw[start] );
+bool FTSQueryParser::more() const {
+ return _pos < _raw.size();
+}
- // Query Parser should never land on whitespace
- if ( type == QueryToken::WHITESPACE ) {
- invariant( false );
- }
+QueryToken FTSQueryParser::next() {
+ if (_pos >= _raw.size())
+ return QueryToken(QueryToken::INVALID, "", 0, false);
- if ( type == QueryToken::TEXT ) {
- while ( _pos < _raw.size() && getType( _raw[_pos] ) == type ) {
- _pos++;
- }
- }
+ unsigned start = _pos++;
+ QueryToken::Type type = getType(_raw[start]);
- StringData ret = _raw.substr( start, _pos - start );
- bool old = _previousWhiteSpace;
- _previousWhiteSpace = skipWhitespace();
+ // Query Parser should never land on whitespace
+ if (type == QueryToken::WHITESPACE) {
+ invariant(false);
+ }
- return QueryToken( type, ret, start, old );
+ if (type == QueryToken::TEXT) {
+ while (_pos < _raw.size() && getType(_raw[_pos]) == type) {
+ _pos++;
}
+ }
- bool FTSQueryParser::skipWhitespace() {
- unsigned start = _pos;
+ StringData ret = _raw.substr(start, _pos - start);
+ bool old = _previousWhiteSpace;
+ _previousWhiteSpace = skipWhitespace();
- while ( _pos < _raw.size() && getType( _raw[_pos] ) == QueryToken::WHITESPACE ) {
- _pos++;
- }
+ return QueryToken(type, ret, start, old);
+}
- return _pos > start;
- }
+bool FTSQueryParser::skipWhitespace() {
+ unsigned start = _pos;
+ while (_pos < _raw.size() && getType(_raw[_pos]) == QueryToken::WHITESPACE) {
+ _pos++;
+ }
- QueryToken::Type FTSQueryParser::getType( char c ) const {
- switch ( c ) {
- // Unicode TR29 defines these as Word Boundaries
- case '\n': // U+000A - LF
- case '\v': // U+000B - Veritical Tab
- case '\f': // U+000C - Form Feed
- case '\r': // U+000D - CR
- // Unicode TR29 remarks this could be used MidNum for Word Boundaries
- // but we treat this as a token separator
- case ' ': // U+0020 - Space
- return QueryToken::WHITESPACE;
- // Unicode TR29 has a particular note about the complexity of hyphens.
- // Since we use them for negation, we are sensitive to them, and we simply drop
- // them otherwise from words
- case '-':
- case '"':
- return QueryToken::DELIMITER;
- default:
- return QueryToken::TEXT;
- }
+ return _pos > start;
+}
- }
+QueryToken::Type FTSQueryParser::getType(char c) const {
+ switch (c) {
+ // Unicode TR29 defines these as Word Boundaries
+ case '\n': // U+000A - LF
+ case '\v': // U+000B - Veritical Tab
+ case '\f': // U+000C - Form Feed
+ case '\r': // U+000D - CR
+ // Unicode TR29 remarks this could be used MidNum for Word Boundaries
+ // but we treat this as a token separator
+ case ' ': // U+0020 - Space
+ return QueryToken::WHITESPACE;
+ // Unicode TR29 has a particular note about the complexity of hyphens.
+ // Since we use them for negation, we are sensitive to them, and we simply drop
+ // them otherwise from words
+ case '-':
+ case '"':
+ return QueryToken::DELIMITER;
+ default:
+ return QueryToken::TEXT;
}
-
+}
+}
}
diff --git a/src/mongo/db/fts/fts_query_parser.h b/src/mongo/db/fts/fts_query_parser.h
index 32804fd63fd..b5e8c53207f 100644
--- a/src/mongo/db/fts/fts_query_parser.h
+++ b/src/mongo/db/fts/fts_query_parser.h
@@ -34,57 +34,54 @@
namespace mongo {
- namespace fts {
+namespace fts {
- struct QueryToken {
- enum Type { WHITESPACE, DELIMITER, TEXT, INVALID };
- QueryToken( Type type, StringData data, unsigned offset, bool previousWhiteSpace )
- : type( type ),
- data( data ),
- offset( offset ),
- previousWhiteSpace( previousWhiteSpace ) {}
+struct QueryToken {
+ enum Type { WHITESPACE, DELIMITER, TEXT, INVALID };
+ QueryToken(Type type, StringData data, unsigned offset, bool previousWhiteSpace)
+ : type(type), data(data), offset(offset), previousWhiteSpace(previousWhiteSpace) {}
- bool ok() const { return type != INVALID; }
-
- Type type;
- StringData data;
- unsigned offset;
- bool previousWhiteSpace;
- };
+ bool ok() const {
+ return type != INVALID;
+ }
- /**
- * The pseudo EXBNF for the query parsing language is:
- *
- * SEARCH STRING = TOKEN_LIST ( ' ' TOKEN_LIST )*
- *
- * TOKEN_LIST = SEARCH_TOKEN
- * |'-' SEARCH_TOKEN
- * | QUOTED_SEARCH_TOKEN
- * |'-' QUOTED_SEARCH_TOKEN
- *
- * QUOTED_SEARCH_TOKEN = '“' SEARCH_TOKEN+ '"'
- *
- * SEARCH_TOKEN = CHARACTER_EXCLUDING_SPECIAL_CHARS
- *
- * SPECIAL_CHARS = '-' | ' ' | '"'
- */
- class FTSQueryParser {
- MONGO_DISALLOW_COPYING( FTSQueryParser );
- public:
+ Type type;
+ StringData data;
+ unsigned offset;
+ bool previousWhiteSpace;
+};
- FTSQueryParser(StringData str);
- bool more() const;
- QueryToken next();
+/**
+ * The pseudo EXBNF for the query parsing language is:
+ *
+ * SEARCH STRING = TOKEN_LIST ( ' ' TOKEN_LIST )*
+ *
+ * TOKEN_LIST = SEARCH_TOKEN
+ * |'-' SEARCH_TOKEN
+ * | QUOTED_SEARCH_TOKEN
+ * |'-' QUOTED_SEARCH_TOKEN
+ *
+ * QUOTED_SEARCH_TOKEN = '“' SEARCH_TOKEN+ '"'
+ *
+ * SEARCH_TOKEN = CHARACTER_EXCLUDING_SPECIAL_CHARS
+ *
+ * SPECIAL_CHARS = '-' | ' ' | '"'
+ */
+class FTSQueryParser {
+ MONGO_DISALLOW_COPYING(FTSQueryParser);
- private:
- QueryToken::Type getType( char c ) const;
- bool skipWhitespace();
+public:
+ FTSQueryParser(StringData str);
+ bool more() const;
+ QueryToken next();
- unsigned _pos;
- bool _previousWhiteSpace;
- const StringData _raw;
- };
+private:
+ QueryToken::Type getType(char c) const;
+ bool skipWhitespace();
- }
+ unsigned _pos;
+ bool _previousWhiteSpace;
+ const StringData _raw;
+};
+}
}
-
diff --git a/src/mongo/db/fts/fts_query_test.cpp b/src/mongo/db/fts/fts_query_test.cpp
index b090f23a660..a4a841c7f16 100644
--- a/src/mongo/db/fts/fts_query_test.cpp
+++ b/src/mongo/db/fts/fts_query_test.cpp
@@ -33,242 +33,222 @@
#include "mongo/unittest/unittest.h"
namespace mongo {
- namespace fts {
-
- TEST( FTSQuery, Basic1 ) {
- FTSQuery q;
- ASSERT( q.parse( "this is fun", "english", false, TEXT_INDEX_VERSION_2 ).isOK() );
-
- ASSERT_EQUALS( false, q.getCaseSensitive() );
- ASSERT_EQUALS( 1U, q.getPositiveTerms().size() );
- ASSERT_EQUALS( "fun", *q.getPositiveTerms().begin() );
- ASSERT_EQUALS( 0U, q.getNegatedTerms().size() );
- ASSERT_EQUALS( 0U, q.getPositivePhr().size() );
- ASSERT_EQUALS( 0U, q.getNegatedPhr().size() );
- ASSERT_TRUE( q.getTermsForBounds() == q.getPositiveTerms() );
- }
-
- TEST( FTSQuery, ParsePunctuation ) {
- FTSQuery q;
- ASSERT( q.parse( "hello.world", "english", false, TEXT_INDEX_VERSION_2 ).isOK() );
-
- ASSERT_EQUALS( false, q.getCaseSensitive() );
- ASSERT_EQUALS( 2U, q.getPositiveTerms().size() );
- ASSERT_EQUALS( "hello", *q.getPositiveTerms().begin() );
- ASSERT_EQUALS( "world", *(--q.getPositiveTerms().end()) );
- ASSERT_EQUALS( 0U, q.getNegatedTerms().size() );
- ASSERT_EQUALS( 0U, q.getPositivePhr().size() );
- ASSERT_EQUALS( 0U, q.getNegatedPhr().size() );
- ASSERT_TRUE( q.getTermsForBounds() == q.getPositiveTerms() );
- }
-
- TEST( FTSQuery, Neg1 ) {
- FTSQuery q;
- ASSERT( q.parse( "this is -really fun", "english", false, TEXT_INDEX_VERSION_2 ).isOK() );
-
- ASSERT_EQUALS( 1U, q.getPositiveTerms().size() );
- ASSERT_EQUALS( "fun", *q.getPositiveTerms().begin() );
- ASSERT_EQUALS( 1U, q.getNegatedTerms().size() );
- ASSERT_EQUALS( "realli", *q.getNegatedTerms().begin() );
- ASSERT_TRUE( q.getTermsForBounds() == q.getPositiveTerms() );
- }
-
- TEST( FTSQuery, Phrase1 ) {
- FTSQuery q;
- ASSERT( q.parse( "doing a \"phrase test\" for fun", "english", false,
- TEXT_INDEX_VERSION_2 ).isOK() );
-
- ASSERT_EQUALS( 3U, q.getPositiveTerms().size() );
- ASSERT_EQUALS( 0U, q.getNegatedTerms().size() );
- ASSERT_EQUALS( 1U, q.getPositivePhr().size() );
- ASSERT_EQUALS( 0U, q.getNegatedPhr().size() );
- ASSERT_TRUE( q.getTermsForBounds() == q.getPositiveTerms() );
-
- ASSERT_EQUALS( "phrase test", q.getPositivePhr()[0] );
- ASSERT_EQUALS( "fun|phrase|test||||phrase test||", q.debugString() );
- }
-
- TEST( FTSQuery, Phrase2 ) {
- FTSQuery q;
- ASSERT( q.parse( "doing a \"phrase-test\" for fun", "english", false,
- TEXT_INDEX_VERSION_2 ).isOK() );
- ASSERT_EQUALS( 1U, q.getPositivePhr().size() );
- ASSERT_EQUALS( "phrase-test", q.getPositivePhr()[0] );
- }
-
- TEST( FTSQuery, NegPhrase1 ) {
- FTSQuery q;
- ASSERT( q.parse( "doing a -\"phrase test\" for fun", "english", false,
- TEXT_INDEX_VERSION_2 ).isOK() );
- ASSERT_EQUALS( "fun||||||phrase test", q.debugString() );
- }
-
- TEST( FTSQuery, CaseSensitiveOption ) {
- FTSQuery q;
- ASSERT( q.parse( "this is fun", "english", true, TEXT_INDEX_VERSION_2 ).isOK() );
- ASSERT_EQUALS( true, q.getCaseSensitive() );
- }
-
- TEST( FTSQuery, CaseSensitivePositiveTerms ) {
- FTSQuery q;
- ASSERT( q.parse( "This is Positively fun", "english", true,
- TEXT_INDEX_VERSION_2 ).isOK() );
-
- ASSERT_EQUALS( 2U, q.getTermsForBounds().size() );
- ASSERT_EQUALS( 1, std::count( q.getTermsForBounds().begin(),
- q.getTermsForBounds().end(),
- "posit" ) );
- ASSERT_EQUALS( 1, std::count( q.getTermsForBounds().begin(),
- q.getTermsForBounds().end(),
- "fun" ) );
- ASSERT_EQUALS( 2U, q.getPositiveTerms().size() );
- ASSERT_EQUALS( 1, std::count( q.getPositiveTerms().begin(),
- q.getPositiveTerms().end(),
- "Posit" ) );
- ASSERT_EQUALS( 1, std::count( q.getPositiveTerms().begin(),
- q.getPositiveTerms().end(),
- "fun" ) );
- ASSERT_EQUALS( 0U, q.getNegatedTerms().size() );
- ASSERT_EQUALS( 0U, q.getPositivePhr().size() );
- ASSERT_EQUALS( 0U, q.getNegatedPhr().size() );
- }
-
- TEST( FTSQuery, CaseSensitiveNegativeTerms ) {
- FTSQuery q;
- ASSERT( q.parse( "-This -is -Negatively -miserable", "english", true,
- TEXT_INDEX_VERSION_2 ).isOK() );
-
- ASSERT_EQUALS( 0U, q.getPositiveTerms().size() );
- ASSERT_EQUALS( 0U, q.getTermsForBounds().size() );
- ASSERT_EQUALS( 2U, q.getNegatedTerms().size() );
- ASSERT_EQUALS( 1, std::count( q.getNegatedTerms().begin(),
- q.getNegatedTerms().end(),
- "Negat" ) );
- ASSERT_EQUALS( 1, std::count( q.getNegatedTerms().begin(),
- q.getNegatedTerms().end(),
- "miser" ) );
- ASSERT_EQUALS( 0U, q.getPositivePhr().size() );
- ASSERT_EQUALS( 0U, q.getNegatedPhr().size() );
- }
-
- TEST( FTSQuery, CaseSensitivePositivePhrases ) {
- FTSQuery q;
- ASSERT( q.parse( "doing a \"Phrase Test\" for fun", "english", true,
- TEXT_INDEX_VERSION_2 ).isOK() );
-
- ASSERT_EQUALS( 1U, q.getPositivePhr().size() );
- ASSERT_EQUALS( 0U, q.getNegatedPhr().size() );
- ASSERT_EQUALS( "Phrase Test", q.getPositivePhr()[0] );
- }
-
- TEST( FTSQuery, CaseSensitiveNegativePhrases ) {
- FTSQuery q;
- ASSERT( q.parse( "doing a -\"Phrase Test\" for fun", "english", true,
- TEXT_INDEX_VERSION_2 ).isOK() );
-
- ASSERT_EQUALS( 0U, q.getPositivePhr().size() );
- ASSERT_EQUALS( 1U, q.getNegatedPhr().size() );
- ASSERT_EQUALS( "Phrase Test", q.getNegatedPhr()[0] );
- }
-
- TEST( FTSQuery, Mix1 ) {
- FTSQuery q;
- ASSERT( q.parse( "\"industry\" -Melbourne -Physics", "english", false,
- TEXT_INDEX_VERSION_2 ).isOK() );
- ASSERT_EQUALS( "industri||melbourn|physic||industry||", q.debugString() );
- }
-
- TEST( FTSQuery, NegPhrase2) {
- FTSQuery q1, q2, q3;
- ASSERT( q1.parse( "foo \"bar\"", "english", false, TEXT_INDEX_VERSION_2 ).isOK() );
- ASSERT( q2.parse( "foo \"-bar\"", "english", false, TEXT_INDEX_VERSION_2 ).isOK() );
- ASSERT( q3.parse( "foo \" -bar\"", "english", false, TEXT_INDEX_VERSION_2 ).isOK() );
-
- ASSERT_EQUALS( 2U, q1.getPositiveTerms().size() );
- ASSERT_EQUALS( 2U, q2.getPositiveTerms().size() );
- ASSERT_EQUALS( 2U, q3.getPositiveTerms().size() );
-
- ASSERT_EQUALS( 0U, q1.getNegatedTerms().size() );
- ASSERT_EQUALS( 0U, q2.getNegatedTerms().size() );
- ASSERT_EQUALS( 0U, q3.getNegatedTerms().size() );
-
- ASSERT_EQUALS( 1U, q1.getPositivePhr().size() );
- ASSERT_EQUALS( 1U, q2.getPositivePhr().size() );
- ASSERT_EQUALS( 1U, q3.getPositivePhr().size() );
-
- ASSERT_EQUALS( 0U, q1.getNegatedPhr().size() );
- ASSERT_EQUALS( 0U, q2.getNegatedPhr().size() );
- ASSERT_EQUALS( 0U, q3.getNegatedPhr().size() );
- }
-
- TEST( FTSQuery, NegPhrase3) {
- FTSQuery q1, q2, q3;
- ASSERT( q1.parse( "foo -\"bar\"", "english", false, TEXT_INDEX_VERSION_2 ).isOK() );
- ASSERT( q2.parse( "foo -\"-bar\"", "english", false, TEXT_INDEX_VERSION_2 ).isOK() );
- ASSERT( q3.parse( "foo -\" -bar\"", "english", false, TEXT_INDEX_VERSION_2 ).isOK() );
-
- ASSERT_EQUALS( 1U, q1.getPositiveTerms().size() );
- ASSERT_EQUALS( 1U, q2.getPositiveTerms().size() );
- ASSERT_EQUALS( 1U, q3.getPositiveTerms().size() );
-
- ASSERT_EQUALS( 0U, q1.getNegatedTerms().size() );
- ASSERT_EQUALS( 0U, q2.getNegatedTerms().size() );
- ASSERT_EQUALS( 0U, q3.getNegatedTerms().size() );
-
- ASSERT_EQUALS( 0U, q1.getPositivePhr().size() );
- ASSERT_EQUALS( 0U, q2.getPositivePhr().size() );
- ASSERT_EQUALS( 0U, q3.getPositivePhr().size() );
-
- ASSERT_EQUALS( 1U, q1.getNegatedPhr().size() );
- ASSERT_EQUALS( 1U, q2.getNegatedPhr().size() );
- ASSERT_EQUALS( 1U, q3.getNegatedPhr().size() );
- }
-
- // Test textIndexVersion:1 query with language "english". This invokes the standard English
- // stemmer and stopword list.
- TEST( FTSQuery, TextIndexVersion1LanguageEnglish ) {
- FTSQuery q;
- ASSERT( q.parse( "the running", "english", false, TEXT_INDEX_VERSION_1 ).isOK() );
- ASSERT_EQUALS( 1U, q.getPositiveTerms().size() );
- ASSERT_EQUALS( "run", *q.getPositiveTerms().begin() );
- ASSERT_EQUALS( 0U, q.getNegatedTerms().size() );
- ASSERT_EQUALS( 0U, q.getPositivePhr().size() );
- ASSERT_EQUALS( 0U, q.getNegatedPhr().size() );
- }
-
- // Test textIndexVersion:1 query with language "eng". "eng" uses the English stemmer, and
- // no stopword list.
- TEST( FTSQuery, TextIndexVersion1LanguageEng ) {
- FTSQuery q;
- ASSERT( q.parse( "the running", "eng", false, TEXT_INDEX_VERSION_1 ).isOK() );
- ASSERT_EQUALS( 2U, q.getPositiveTerms().size() );
- ASSERT_EQUALS( 1, std::count( q.getPositiveTerms().begin(),
- q.getPositiveTerms().end(),
- "the" ) );
- ASSERT_EQUALS( 1, std::count( q.getPositiveTerms().begin(),
- q.getPositiveTerms().end(),
- "run" ) );
- ASSERT_EQUALS( 0U, q.getNegatedTerms().size() );
- ASSERT_EQUALS( 0U, q.getPositivePhr().size() );
- ASSERT_EQUALS( 0U, q.getNegatedPhr().size() );
- }
-
- // Test textIndexVersion:1 query with language "invalid". No stemming will be performed,
- // and no stopword list will be used.
- TEST( FTSQuery, TextIndexVersion1LanguageInvalid ) {
- FTSQuery q;
- ASSERT( q.parse( "the running", "invalid", false, TEXT_INDEX_VERSION_1 ).isOK() );
- ASSERT_EQUALS( 2U, q.getPositiveTerms().size() );
- ASSERT_EQUALS( 1, std::count( q.getPositiveTerms().begin(),
- q.getPositiveTerms().end(),
- "the" ) );
- ASSERT_EQUALS( 1, std::count( q.getPositiveTerms().begin(),
- q.getPositiveTerms().end(),
- "running" ) );
- ASSERT_EQUALS( 0U, q.getNegatedTerms().size() );
- ASSERT_EQUALS( 0U, q.getPositivePhr().size() );
- ASSERT_EQUALS( 0U, q.getNegatedPhr().size() );
- }
-
- }
+namespace fts {
+
+TEST(FTSQuery, Basic1) {
+ FTSQuery q;
+ ASSERT(q.parse("this is fun", "english", false, TEXT_INDEX_VERSION_2).isOK());
+
+ ASSERT_EQUALS(false, q.getCaseSensitive());
+ ASSERT_EQUALS(1U, q.getPositiveTerms().size());
+ ASSERT_EQUALS("fun", *q.getPositiveTerms().begin());
+ ASSERT_EQUALS(0U, q.getNegatedTerms().size());
+ ASSERT_EQUALS(0U, q.getPositivePhr().size());
+ ASSERT_EQUALS(0U, q.getNegatedPhr().size());
+ ASSERT_TRUE(q.getTermsForBounds() == q.getPositiveTerms());
+}
+
+TEST(FTSQuery, ParsePunctuation) {
+ FTSQuery q;
+ ASSERT(q.parse("hello.world", "english", false, TEXT_INDEX_VERSION_2).isOK());
+
+ ASSERT_EQUALS(false, q.getCaseSensitive());
+ ASSERT_EQUALS(2U, q.getPositiveTerms().size());
+ ASSERT_EQUALS("hello", *q.getPositiveTerms().begin());
+ ASSERT_EQUALS("world", *(--q.getPositiveTerms().end()));
+ ASSERT_EQUALS(0U, q.getNegatedTerms().size());
+ ASSERT_EQUALS(0U, q.getPositivePhr().size());
+ ASSERT_EQUALS(0U, q.getNegatedPhr().size());
+ ASSERT_TRUE(q.getTermsForBounds() == q.getPositiveTerms());
+}
+
+TEST(FTSQuery, Neg1) {
+ FTSQuery q;
+ ASSERT(q.parse("this is -really fun", "english", false, TEXT_INDEX_VERSION_2).isOK());
+
+ ASSERT_EQUALS(1U, q.getPositiveTerms().size());
+ ASSERT_EQUALS("fun", *q.getPositiveTerms().begin());
+ ASSERT_EQUALS(1U, q.getNegatedTerms().size());
+ ASSERT_EQUALS("realli", *q.getNegatedTerms().begin());
+ ASSERT_TRUE(q.getTermsForBounds() == q.getPositiveTerms());
+}
+
+TEST(FTSQuery, Phrase1) {
+ FTSQuery q;
+ ASSERT(
+ q.parse("doing a \"phrase test\" for fun", "english", false, TEXT_INDEX_VERSION_2).isOK());
+
+ ASSERT_EQUALS(3U, q.getPositiveTerms().size());
+ ASSERT_EQUALS(0U, q.getNegatedTerms().size());
+ ASSERT_EQUALS(1U, q.getPositivePhr().size());
+ ASSERT_EQUALS(0U, q.getNegatedPhr().size());
+ ASSERT_TRUE(q.getTermsForBounds() == q.getPositiveTerms());
+
+ ASSERT_EQUALS("phrase test", q.getPositivePhr()[0]);
+ ASSERT_EQUALS("fun|phrase|test||||phrase test||", q.debugString());
+}
+
+TEST(FTSQuery, Phrase2) {
+ FTSQuery q;
+ ASSERT(
+ q.parse("doing a \"phrase-test\" for fun", "english", false, TEXT_INDEX_VERSION_2).isOK());
+ ASSERT_EQUALS(1U, q.getPositivePhr().size());
+ ASSERT_EQUALS("phrase-test", q.getPositivePhr()[0]);
+}
+
+TEST(FTSQuery, NegPhrase1) {
+ FTSQuery q;
+ ASSERT(
+ q.parse("doing a -\"phrase test\" for fun", "english", false, TEXT_INDEX_VERSION_2).isOK());
+ ASSERT_EQUALS("fun||||||phrase test", q.debugString());
+}
+
+TEST(FTSQuery, CaseSensitiveOption) {
+ FTSQuery q;
+ ASSERT(q.parse("this is fun", "english", true, TEXT_INDEX_VERSION_2).isOK());
+ ASSERT_EQUALS(true, q.getCaseSensitive());
+}
+
+TEST(FTSQuery, CaseSensitivePositiveTerms) {
+ FTSQuery q;
+ ASSERT(q.parse("This is Positively fun", "english", true, TEXT_INDEX_VERSION_2).isOK());
+
+ ASSERT_EQUALS(2U, q.getTermsForBounds().size());
+ ASSERT_EQUALS(1,
+ std::count(q.getTermsForBounds().begin(), q.getTermsForBounds().end(), "posit"));
+ ASSERT_EQUALS(1, std::count(q.getTermsForBounds().begin(), q.getTermsForBounds().end(), "fun"));
+ ASSERT_EQUALS(2U, q.getPositiveTerms().size());
+ ASSERT_EQUALS(1, std::count(q.getPositiveTerms().begin(), q.getPositiveTerms().end(), "Posit"));
+ ASSERT_EQUALS(1, std::count(q.getPositiveTerms().begin(), q.getPositiveTerms().end(), "fun"));
+ ASSERT_EQUALS(0U, q.getNegatedTerms().size());
+ ASSERT_EQUALS(0U, q.getPositivePhr().size());
+ ASSERT_EQUALS(0U, q.getNegatedPhr().size());
+}
+
+TEST(FTSQuery, CaseSensitiveNegativeTerms) {
+ FTSQuery q;
+ ASSERT(
+ q.parse("-This -is -Negatively -miserable", "english", true, TEXT_INDEX_VERSION_2).isOK());
+
+ ASSERT_EQUALS(0U, q.getPositiveTerms().size());
+ ASSERT_EQUALS(0U, q.getTermsForBounds().size());
+ ASSERT_EQUALS(2U, q.getNegatedTerms().size());
+ ASSERT_EQUALS(1, std::count(q.getNegatedTerms().begin(), q.getNegatedTerms().end(), "Negat"));
+ ASSERT_EQUALS(1, std::count(q.getNegatedTerms().begin(), q.getNegatedTerms().end(), "miser"));
+ ASSERT_EQUALS(0U, q.getPositivePhr().size());
+ ASSERT_EQUALS(0U, q.getNegatedPhr().size());
+}
+
+TEST(FTSQuery, CaseSensitivePositivePhrases) {
+ FTSQuery q;
+ ASSERT(
+ q.parse("doing a \"Phrase Test\" for fun", "english", true, TEXT_INDEX_VERSION_2).isOK());
+
+ ASSERT_EQUALS(1U, q.getPositivePhr().size());
+ ASSERT_EQUALS(0U, q.getNegatedPhr().size());
+ ASSERT_EQUALS("Phrase Test", q.getPositivePhr()[0]);
+}
+
+TEST(FTSQuery, CaseSensitiveNegativePhrases) {
+ FTSQuery q;
+ ASSERT(
+ q.parse("doing a -\"Phrase Test\" for fun", "english", true, TEXT_INDEX_VERSION_2).isOK());
+
+ ASSERT_EQUALS(0U, q.getPositivePhr().size());
+ ASSERT_EQUALS(1U, q.getNegatedPhr().size());
+ ASSERT_EQUALS("Phrase Test", q.getNegatedPhr()[0]);
+}
+
+TEST(FTSQuery, Mix1) {
+ FTSQuery q;
+ ASSERT(
+ q.parse("\"industry\" -Melbourne -Physics", "english", false, TEXT_INDEX_VERSION_2).isOK());
+ ASSERT_EQUALS("industri||melbourn|physic||industry||", q.debugString());
+}
+
+TEST(FTSQuery, NegPhrase2) {
+ FTSQuery q1, q2, q3;
+ ASSERT(q1.parse("foo \"bar\"", "english", false, TEXT_INDEX_VERSION_2).isOK());
+ ASSERT(q2.parse("foo \"-bar\"", "english", false, TEXT_INDEX_VERSION_2).isOK());
+ ASSERT(q3.parse("foo \" -bar\"", "english", false, TEXT_INDEX_VERSION_2).isOK());
+
+ ASSERT_EQUALS(2U, q1.getPositiveTerms().size());
+ ASSERT_EQUALS(2U, q2.getPositiveTerms().size());
+ ASSERT_EQUALS(2U, q3.getPositiveTerms().size());
+
+ ASSERT_EQUALS(0U, q1.getNegatedTerms().size());
+ ASSERT_EQUALS(0U, q2.getNegatedTerms().size());
+ ASSERT_EQUALS(0U, q3.getNegatedTerms().size());
+
+ ASSERT_EQUALS(1U, q1.getPositivePhr().size());
+ ASSERT_EQUALS(1U, q2.getPositivePhr().size());
+ ASSERT_EQUALS(1U, q3.getPositivePhr().size());
+
+ ASSERT_EQUALS(0U, q1.getNegatedPhr().size());
+ ASSERT_EQUALS(0U, q2.getNegatedPhr().size());
+ ASSERT_EQUALS(0U, q3.getNegatedPhr().size());
+}
+
+TEST(FTSQuery, NegPhrase3) {
+ FTSQuery q1, q2, q3;
+ ASSERT(q1.parse("foo -\"bar\"", "english", false, TEXT_INDEX_VERSION_2).isOK());
+ ASSERT(q2.parse("foo -\"-bar\"", "english", false, TEXT_INDEX_VERSION_2).isOK());
+ ASSERT(q3.parse("foo -\" -bar\"", "english", false, TEXT_INDEX_VERSION_2).isOK());
+
+ ASSERT_EQUALS(1U, q1.getPositiveTerms().size());
+ ASSERT_EQUALS(1U, q2.getPositiveTerms().size());
+ ASSERT_EQUALS(1U, q3.getPositiveTerms().size());
+
+ ASSERT_EQUALS(0U, q1.getNegatedTerms().size());
+ ASSERT_EQUALS(0U, q2.getNegatedTerms().size());
+ ASSERT_EQUALS(0U, q3.getNegatedTerms().size());
+
+ ASSERT_EQUALS(0U, q1.getPositivePhr().size());
+ ASSERT_EQUALS(0U, q2.getPositivePhr().size());
+ ASSERT_EQUALS(0U, q3.getPositivePhr().size());
+
+ ASSERT_EQUALS(1U, q1.getNegatedPhr().size());
+ ASSERT_EQUALS(1U, q2.getNegatedPhr().size());
+ ASSERT_EQUALS(1U, q3.getNegatedPhr().size());
+}
+
+// Test textIndexVersion:1 query with language "english". This invokes the standard English
+// stemmer and stopword list.
+TEST(FTSQuery, TextIndexVersion1LanguageEnglish) {
+ FTSQuery q;
+ ASSERT(q.parse("the running", "english", false, TEXT_INDEX_VERSION_1).isOK());
+ ASSERT_EQUALS(1U, q.getPositiveTerms().size());
+ ASSERT_EQUALS("run", *q.getPositiveTerms().begin());
+ ASSERT_EQUALS(0U, q.getNegatedTerms().size());
+ ASSERT_EQUALS(0U, q.getPositivePhr().size());
+ ASSERT_EQUALS(0U, q.getNegatedPhr().size());
+}
+
+// Test textIndexVersion:1 query with language "eng". "eng" uses the English stemmer, and
+// no stopword list.
+TEST(FTSQuery, TextIndexVersion1LanguageEng) {
+ FTSQuery q;
+ ASSERT(q.parse("the running", "eng", false, TEXT_INDEX_VERSION_1).isOK());
+ ASSERT_EQUALS(2U, q.getPositiveTerms().size());
+ ASSERT_EQUALS(1, std::count(q.getPositiveTerms().begin(), q.getPositiveTerms().end(), "the"));
+ ASSERT_EQUALS(1, std::count(q.getPositiveTerms().begin(), q.getPositiveTerms().end(), "run"));
+ ASSERT_EQUALS(0U, q.getNegatedTerms().size());
+ ASSERT_EQUALS(0U, q.getPositivePhr().size());
+ ASSERT_EQUALS(0U, q.getNegatedPhr().size());
+}
+
+// Test textIndexVersion:1 query with language "invalid". No stemming will be performed,
+// and no stopword list will be used.
+TEST(FTSQuery, TextIndexVersion1LanguageInvalid) {
+ FTSQuery q;
+ ASSERT(q.parse("the running", "invalid", false, TEXT_INDEX_VERSION_1).isOK());
+ ASSERT_EQUALS(2U, q.getPositiveTerms().size());
+ ASSERT_EQUALS(1, std::count(q.getPositiveTerms().begin(), q.getPositiveTerms().end(), "the"));
+ ASSERT_EQUALS(1,
+ std::count(q.getPositiveTerms().begin(), q.getPositiveTerms().end(), "running"));
+ ASSERT_EQUALS(0U, q.getNegatedTerms().size());
+ ASSERT_EQUALS(0U, q.getPositivePhr().size());
+ ASSERT_EQUALS(0U, q.getNegatedPhr().size());
+}
+}
}
diff --git a/src/mongo/db/fts/fts_spec.cpp b/src/mongo/db/fts/fts_spec.cpp
index 274d9a6d6ba..eb7e018b522 100644
--- a/src/mongo/db/fts/fts_spec.cpp
+++ b/src/mongo/db/fts/fts_spec.cpp
@@ -40,457 +40,408 @@
namespace mongo {
- namespace fts {
-
- using std::map;
- using std::string;
- using namespace mongoutils;
-
- const double DEFAULT_WEIGHT = 1;
- const double MAX_WEIGHT = 1000000000;
- const double MAX_WORD_WEIGHT = MAX_WEIGHT / 10000;
-
- namespace {
- // Default language. Used for new indexes.
- const std::string moduleDefaultLanguage( "english" );
-
- /** Validate the given language override string. */
- bool validateOverride( const string& override ) {
- // The override field can't be empty, can't be prefixed with a dollar sign, and
- // can't contain a dot.
- return !override.empty() &&
- override[0] != '$' &&
- override.find('.') == std::string::npos;
- }
- }
-
- FTSSpec::FTSSpec( const BSONObj& indexInfo ) {
- // indexInfo is a text index spec. Text index specs pass through fixSpec() before
- // being saved to the system.indexes collection. fixSpec() enforces a schema, such that
- // required fields must exist and be of the correct type (e.g. weights,
- // textIndexVersion).
- massert( 16739, "found invalid spec for text index",
- indexInfo["weights"].isABSONObj() );
- BSONElement textIndexVersionElt = indexInfo["textIndexVersion"];
- massert( 17367,
- "found invalid spec for text index, expected number for textIndexVersion",
- textIndexVersionElt.isNumber() );
-
- // We currently support TEXT_INDEX_VERSION_1 (deprecated) and TEXT_INDEX_VERSION_2.
- // Reject all other values.
- massert( 17364,
- str::stream() << "attempt to use unsupported textIndexVersion " <<
- textIndexVersionElt.numberInt() << "; versions supported: " <<
- TEXT_INDEX_VERSION_2 << ", " << TEXT_INDEX_VERSION_1,
- textIndexVersionElt.numberInt() == TEXT_INDEX_VERSION_2 ||
- textIndexVersionElt.numberInt() == TEXT_INDEX_VERSION_1 );
-
- _textIndexVersion = ( textIndexVersionElt.numberInt() == TEXT_INDEX_VERSION_2 ) ?
- TEXT_INDEX_VERSION_2 : TEXT_INDEX_VERSION_1;
-
- // Initialize _defaultLanguage. Note that the FTSLanguage constructor requires
- // textIndexVersion, since language parsing is version-specific.
- auto indexLanguage = indexInfo["default_language"].String();
- auto swl = FTSLanguage::make(indexLanguage , _textIndexVersion );
-
- // This can fail if the user originally created the text index under an instance of
- // MongoDB that supports different languages then the current instance
- // TODO: consder propagating the index ns to here to improve the error message
- uassert(28682,
- str::stream() << "Unrecognized language " << indexLanguage <<
- " found for text index. Verify mongod was started with the"
- " correct options.",
- swl.getStatus().isOK());
- _defaultLanguage = swl.getValue();
-
- _languageOverrideField = indexInfo["language_override"].valuestrsafe();
-
- _wildcard = false;
-
- // in this block we fill in the _weights map
- {
- BSONObjIterator i( indexInfo["weights"].Obj() );
- while ( i.more() ) {
- BSONElement e = i.next();
- verify( e.isNumber() );
-
- if ( WILDCARD == e.fieldName() ) {
- _wildcard = true;
- }
- else {
- double num = e.number();
- _weights[ e.fieldName() ] = num;
- verify( num > 0 && num < MAX_WORD_WEIGHT );
- }
- }
- verify( _wildcard || _weights.size() );
- }
-
- // extra information
- {
- BSONObj keyPattern = indexInfo["key"].Obj();
- verify( keyPattern.nFields() >= 2 );
- BSONObjIterator i( keyPattern );
+namespace fts {
- bool passedFTS = false;
+using std::map;
+using std::string;
+using namespace mongoutils;
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( str::equals( e.fieldName(), "_fts" ) ||
- str::equals( e.fieldName(), "_ftsx" ) ) {
- passedFTS = true;
- continue;
- }
+const double DEFAULT_WEIGHT = 1;
+const double MAX_WEIGHT = 1000000000;
+const double MAX_WORD_WEIGHT = MAX_WEIGHT / 10000;
- if ( passedFTS )
- _extraAfter.push_back( e.fieldName() );
- else
- _extraBefore.push_back( e.fieldName() );
- }
+namespace {
+// Default language. Used for new indexes.
+const std::string moduleDefaultLanguage("english");
- }
- }
+/** Validate the given language override string. */
+bool validateOverride(const string& override) {
+ // The override field can't be empty, can't be prefixed with a dollar sign, and
+ // can't contain a dot.
+ return !override.empty() && override[0] != '$' && override.find('.') == std::string::npos;
+}
+}
- const FTSLanguage* FTSSpec::_getLanguageToUseV2( const BSONObj& userDoc,
- const FTSLanguage* currentLanguage ) const {
- BSONElement e = userDoc[_languageOverrideField];
- if ( e.eoo() ) {
- return currentLanguage;
+FTSSpec::FTSSpec(const BSONObj& indexInfo) {
+ // indexInfo is a text index spec. Text index specs pass through fixSpec() before
+ // being saved to the system.indexes collection. fixSpec() enforces a schema, such that
+ // required fields must exist and be of the correct type (e.g. weights,
+ // textIndexVersion).
+ massert(16739, "found invalid spec for text index", indexInfo["weights"].isABSONObj());
+ BSONElement textIndexVersionElt = indexInfo["textIndexVersion"];
+ massert(17367,
+ "found invalid spec for text index, expected number for textIndexVersion",
+ textIndexVersionElt.isNumber());
+
+ // We currently support TEXT_INDEX_VERSION_1 (deprecated) and TEXT_INDEX_VERSION_2.
+ // Reject all other values.
+ massert(17364,
+ str::stream() << "attempt to use unsupported textIndexVersion "
+ << textIndexVersionElt.numberInt() << "; versions supported: "
+ << TEXT_INDEX_VERSION_2 << ", " << TEXT_INDEX_VERSION_1,
+ textIndexVersionElt.numberInt() == TEXT_INDEX_VERSION_2 ||
+ textIndexVersionElt.numberInt() == TEXT_INDEX_VERSION_1);
+
+ _textIndexVersion = (textIndexVersionElt.numberInt() == TEXT_INDEX_VERSION_2)
+ ? TEXT_INDEX_VERSION_2
+ : TEXT_INDEX_VERSION_1;
+
+ // Initialize _defaultLanguage. Note that the FTSLanguage constructor requires
+ // textIndexVersion, since language parsing is version-specific.
+ auto indexLanguage = indexInfo["default_language"].String();
+ auto swl = FTSLanguage::make(indexLanguage, _textIndexVersion);
+
+ // This can fail if the user originally created the text index under an instance of
+ // MongoDB that supports different languages then the current instance
+ // TODO: consder propagating the index ns to here to improve the error message
+ uassert(28682,
+ str::stream() << "Unrecognized language " << indexLanguage
+ << " found for text index. Verify mongod was started with the"
+ " correct options.",
+ swl.getStatus().isOK());
+ _defaultLanguage = swl.getValue();
+
+ _languageOverrideField = indexInfo["language_override"].valuestrsafe();
+
+ _wildcard = false;
+
+ // in this block we fill in the _weights map
+ {
+ BSONObjIterator i(indexInfo["weights"].Obj());
+ while (i.more()) {
+ BSONElement e = i.next();
+ verify(e.isNumber());
+
+ if (WILDCARD == e.fieldName()) {
+ _wildcard = true;
+ } else {
+ double num = e.number();
+ _weights[e.fieldName()] = num;
+ verify(num > 0 && num < MAX_WORD_WEIGHT);
}
- uassert( 17261,
- "found language override field in document with non-string type",
- e.type() == mongo::String );
- StatusWithFTSLanguage swl = FTSLanguage::make( e.String(), TEXT_INDEX_VERSION_2 );
- uassert( 17262,
- "language override unsupported: " + e.String(),
- swl.getStatus().isOK() );
- return swl.getValue();
}
+ verify(_wildcard || _weights.size());
+ }
- void FTSSpec::scoreDocument( const BSONObj& obj, TermFrequencyMap* term_freqs ) const {
- if ( _textIndexVersion == TEXT_INDEX_VERSION_1 ) {
- return _scoreDocumentV1( obj, term_freqs );
- }
+ // extra information
+ {
+ BSONObj keyPattern = indexInfo["key"].Obj();
+ verify(keyPattern.nFields() >= 2);
+ BSONObjIterator i(keyPattern);
- FTSElementIterator it( *this, obj );
+ bool passedFTS = false;
- while ( it.more() ) {
- FTSIteratorValue val = it.next();
- std::unique_ptr<FTSTokenizer> tokenizer(val._language->createTokenizer());
- _scoreStringV2( tokenizer.get(), val._text, term_freqs, val._weight );
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (str::equals(e.fieldName(), "_fts") || str::equals(e.fieldName(), "_ftsx")) {
+ passedFTS = true;
+ continue;
}
+
+ if (passedFTS)
+ _extraAfter.push_back(e.fieldName());
+ else
+ _extraBefore.push_back(e.fieldName());
}
+ }
+}
- void FTSSpec::_scoreStringV2( FTSTokenizer* tokenizer,
- StringData raw,
- TermFrequencyMap* docScores,
- double weight ) const {
+const FTSLanguage* FTSSpec::_getLanguageToUseV2(const BSONObj& userDoc,
+ const FTSLanguage* currentLanguage) const {
+ BSONElement e = userDoc[_languageOverrideField];
+ if (e.eoo()) {
+ return currentLanguage;
+ }
+ uassert(17261,
+ "found language override field in document with non-string type",
+ e.type() == mongo::String);
+ StatusWithFTSLanguage swl = FTSLanguage::make(e.String(), TEXT_INDEX_VERSION_2);
+ uassert(17262, "language override unsupported: " + e.String(), swl.getStatus().isOK());
+ return swl.getValue();
+}
- ScoreHelperMap terms;
+void FTSSpec::scoreDocument(const BSONObj& obj, TermFrequencyMap* term_freqs) const {
+ if (_textIndexVersion == TEXT_INDEX_VERSION_1) {
+ return _scoreDocumentV1(obj, term_freqs);
+ }
- unsigned numTokens = 0;
+ FTSElementIterator it(*this, obj);
- tokenizer->reset(raw.rawData(), FTSTokenizer::FilterStopWords );
+ while (it.more()) {
+ FTSIteratorValue val = it.next();
+ std::unique_ptr<FTSTokenizer> tokenizer(val._language->createTokenizer());
+ _scoreStringV2(tokenizer.get(), val._text, term_freqs, val._weight);
+ }
+}
- while (tokenizer->moveNext()) {
- string term = tokenizer->get().toString();
+void FTSSpec::_scoreStringV2(FTSTokenizer* tokenizer,
+ StringData raw,
+ TermFrequencyMap* docScores,
+ double weight) const {
+ ScoreHelperMap terms;
- ScoreHelperStruct& data = terms[term];
+ unsigned numTokens = 0;
- if ( data.exp ) {
- data.exp *= 2;
- }
- else {
- data.exp = 1;
- }
- data.count += 1;
- data.freq += ( 1 / data.exp );
- numTokens++;
- }
+ tokenizer->reset(raw.rawData(), FTSTokenizer::FilterStopWords);
- for ( ScoreHelperMap::const_iterator i = terms.begin(); i != terms.end(); ++i ) {
+ while (tokenizer->moveNext()) {
+ string term = tokenizer->get().toString();
- const string& term = i->first;
- const ScoreHelperStruct& data = i->second;
+ ScoreHelperStruct& data = terms[term];
- // in order to adjust weights as a function of term count as it
- // relates to total field length. ie. is this the only word or
- // a frequently occuring term? or does it only show up once in
- // a long block of text?
+ if (data.exp) {
+ data.exp *= 2;
+ } else {
+ data.exp = 1;
+ }
+ data.count += 1;
+ data.freq += (1 / data.exp);
+ numTokens++;
+ }
- double coeff = ( 0.5 * data.count / numTokens ) + 0.5;
+ for (ScoreHelperMap::const_iterator i = terms.begin(); i != terms.end(); ++i) {
+ const string& term = i->first;
+ const ScoreHelperStruct& data = i->second;
- // if term is identical to the raw form of the
- // field (untokenized) give it a small boost.
- double adjustment = 1;
- if ( raw.size() == term.length() && raw.equalCaseInsensitive( term ) )
- adjustment += 0.1;
+ // in order to adjust weights as a function of term count as it
+ // relates to total field length. ie. is this the only word or
+ // a frequently occuring term? or does it only show up once in
+ // a long block of text?
- double& score = (*docScores)[term];
- score += ( weight * data.freq * coeff * adjustment );
- verify( score <= MAX_WEIGHT );
- }
- }
+ double coeff = (0.5 * data.count / numTokens) + 0.5;
- Status FTSSpec::getIndexPrefix( const BSONObj& query, BSONObj* out ) const {
- if ( numExtraBefore() == 0 ) {
- *out = BSONObj();
- return Status::OK();
- }
+ // if term is identical to the raw form of the
+ // field (untokenized) give it a small boost.
+ double adjustment = 1;
+ if (raw.size() == term.length() && raw.equalCaseInsensitive(term))
+ adjustment += 0.1;
- BSONObjBuilder b;
- for ( unsigned i = 0; i < numExtraBefore(); i++ ) {
- BSONElement e = query.getFieldDotted(extraBefore(i));
- if ( e.eoo() )
- return Status( ErrorCodes::BadValue,
- str::stream()
- << "need have an equality filter on: "
- << extraBefore(i) );
-
- if ( e.isABSONObj() && e.Obj().firstElement().getGtLtOp( -1 ) != -1 )
- return Status( ErrorCodes::BadValue,
- str::stream()
- << "need have an equality filter on: "
- << extraBefore(i) );
-
- b.append( e );
- }
- *out = b.obj();
- return Status::OK();
- }
+ double& score = (*docScores)[term];
+ score += (weight * data.freq * coeff * adjustment);
+ verify(score <= MAX_WEIGHT);
+ }
+}
- namespace {
- void _addFTSStuff( BSONObjBuilder* b ) {
- b->append( "_fts", INDEX_NAME );
- b->append( "_ftsx", 1 );
- }
+Status FTSSpec::getIndexPrefix(const BSONObj& query, BSONObj* out) const {
+ if (numExtraBefore() == 0) {
+ *out = BSONObj();
+ return Status::OK();
+ }
- void verifyFieldNameNotReserved( StringData s ) {
- uassert( 17289,
- "text index with reserved fields _fts/_ftsx not allowed",
- s != "_fts" && s != "_ftsx" );
- }
- }
+ BSONObjBuilder b;
+ for (unsigned i = 0; i < numExtraBefore(); i++) {
+ BSONElement e = query.getFieldDotted(extraBefore(i));
+ if (e.eoo())
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "need have an equality filter on: " << extraBefore(i));
- BSONObj FTSSpec::fixSpec( const BSONObj& spec ) {
- if ( spec["textIndexVersion"].numberInt() == TEXT_INDEX_VERSION_1 ) {
- return _fixSpecV1( spec );
- }
+ if (e.isABSONObj() && e.Obj().firstElement().getGtLtOp(-1) != -1)
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "need have an equality filter on: " << extraBefore(i));
- map<string,int> m;
-
- BSONObj keyPattern;
- {
- BSONObjBuilder b;
-
- // Populate m and keyPattern.
- {
- bool addedFtsStuff = false;
- BSONObjIterator i( spec["key"].Obj() );
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( str::equals( e.fieldName(), "_fts" ) ) {
- uassert( 17271,
- "expecting _fts:\"text\"",
- INDEX_NAME == e.valuestrsafe() );
- addedFtsStuff = true;
- b.append( e );
- }
- else if ( str::equals( e.fieldName(), "_ftsx" ) ) {
- uassert( 17272, "expecting _ftsx:1", e.numberInt() == 1 );
- b.append( e );
- }
- else if ( e.type() == String && INDEX_NAME == e.valuestr() ) {
-
- if ( !addedFtsStuff ) {
- _addFTSStuff( &b );
- addedFtsStuff = true;
- }
-
- m[e.fieldName()] = 1;
- }
- else {
- uassert( 17273,
- "expected value 1 or -1 for non-text key in compound index",
- e.numberInt() == 1 || e.numberInt() == -1 );
- b.append( e );
- }
- }
- verify( addedFtsStuff );
- }
- keyPattern = b.obj();
-
- // Verify that index key is in the correct format: extraBefore fields, then text
- // fields, then extraAfter fields.
- {
- BSONObjIterator i( spec["key"].Obj() );
- verify( i.more() );
- BSONElement e = i.next();
-
- // extraBefore fields
- while ( String != e.type() ) {
- verifyFieldNameNotReserved( e.fieldNameStringData() );
- verify( i.more() );
- e = i.next();
- }
+ b.append(e);
+ }
+ *out = b.obj();
+ return Status::OK();
+}
- // text fields
- bool alreadyFixed = str::equals( e.fieldName(), "_fts" );
- if ( alreadyFixed ) {
- uassert( 17288, "expected _ftsx after _fts", i.more() );
- e = i.next();
- uassert( 17274,
- "expected _ftsx after _fts",
- str::equals( e.fieldName(), "_ftsx" ) );
- e = i.next();
- }
- else {
- do {
- verifyFieldNameNotReserved( e.fieldNameStringData() );
- e = i.next();
- } while ( !e.eoo() && e.type() == String );
- }
+namespace {
+void _addFTSStuff(BSONObjBuilder* b) {
+ b->append("_fts", INDEX_NAME);
+ b->append("_ftsx", 1);
+}
- // extraAfterFields
- while ( !e.eoo() ) {
- uassert( 17389,
- "'text' fields in index must all be adjacent",
- e.type() != String );
- verifyFieldNameNotReserved( e.fieldNameStringData() );
- e = i.next();
- }
- }
+void verifyFieldNameNotReserved(StringData s) {
+ uassert(17289,
+ "text index with reserved fields _fts/_ftsx not allowed",
+ s != "_fts" && s != "_ftsx");
+}
+}
- }
+BSONObj FTSSpec::fixSpec(const BSONObj& spec) {
+ if (spec["textIndexVersion"].numberInt() == TEXT_INDEX_VERSION_1) {
+ return _fixSpecV1(spec);
+ }
- if ( spec["weights"].type() == Object ) {
- BSONObjIterator i( spec["weights"].Obj() );
- while ( i.more() ) {
- BSONElement e = i.next();
- uassert( 17283,
- "weight for text index needs numeric type",
- e.isNumber() );
- m[e.fieldName()] = e.numberInt();
- }
- }
- else if ( spec["weights"].str() == WILDCARD ) {
- m[WILDCARD] = 1;
- }
- else if ( !spec["weights"].eoo() ) {
- uasserted( 17284, "text index option 'weights' must be an object" );
- }
+ map<string, int> m;
- BSONObj weights;
- {
- BSONObjBuilder b;
- for ( map<string,int>::iterator i = m.begin(); i != m.end(); ++i ) {
- uassert( 16674, "score for word too high",
- i->second > 0 && i->second < MAX_WORD_WEIGHT );
-
- // Verify weight refers to a valid field.
- if ( i->first != "$**" ) {
- FieldRef keyField( i->first );
- uassert( 17294,
- "weight cannot be on an empty field",
- keyField.numParts() != 0 );
- for ( size_t partNum = 0; partNum < keyField.numParts(); partNum++ ) {
- StringData part = keyField.getPart(partNum);
- uassert( 17291,
- "weight cannot have empty path component",
- !part.empty() );
- uassert( 17292,
- "weight cannot have path component with $ prefix",
- !part.startsWith( "$" ) );
- }
+ BSONObj keyPattern;
+ {
+ BSONObjBuilder b;
+
+ // Populate m and keyPattern.
+ {
+ bool addedFtsStuff = false;
+ BSONObjIterator i(spec["key"].Obj());
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (str::equals(e.fieldName(), "_fts")) {
+ uassert(17271, "expecting _fts:\"text\"", INDEX_NAME == e.valuestrsafe());
+ addedFtsStuff = true;
+ b.append(e);
+ } else if (str::equals(e.fieldName(), "_ftsx")) {
+ uassert(17272, "expecting _ftsx:1", e.numberInt() == 1);
+ b.append(e);
+ } else if (e.type() == String && INDEX_NAME == e.valuestr()) {
+ if (!addedFtsStuff) {
+ _addFTSStuff(&b);
+ addedFtsStuff = true;
}
- b.append( i->first, i->second );
+ m[e.fieldName()] = 1;
+ } else {
+ uassert(17273,
+ "expected value 1 or -1 for non-text key in compound index",
+ e.numberInt() == 1 || e.numberInt() == -1);
+ b.append(e);
}
- weights = b.obj();
- }
-
- BSONElement default_language_elt = spec["default_language"];
- string default_language( default_language_elt.str() );
- if ( default_language_elt.eoo() ) {
- default_language = moduleDefaultLanguage;
}
- else {
- uassert( 17263,
- "default_language needs a string type",
- default_language_elt.type() == String );
+ verify(addedFtsStuff);
+ }
+ keyPattern = b.obj();
+
+ // Verify that index key is in the correct format: extraBefore fields, then text
+ // fields, then extraAfter fields.
+ {
+ BSONObjIterator i(spec["key"].Obj());
+ verify(i.more());
+ BSONElement e = i.next();
+
+ // extraBefore fields
+ while (String != e.type()) {
+ verifyFieldNameNotReserved(e.fieldNameStringData());
+ verify(i.more());
+ e = i.next();
}
- uassert( 17264,
- "default_language is not valid",
- FTSLanguage::make( default_language,
- TEXT_INDEX_VERSION_2 ).getStatus().isOK() );
-
- BSONElement language_override_elt = spec["language_override"];
- string language_override( language_override_elt.str() );
- if ( language_override_elt.eoo() ) {
- language_override = "language";
+
+ // text fields
+ bool alreadyFixed = str::equals(e.fieldName(), "_fts");
+ if (alreadyFixed) {
+ uassert(17288, "expected _ftsx after _fts", i.more());
+ e = i.next();
+ uassert(17274, "expected _ftsx after _fts", str::equals(e.fieldName(), "_ftsx"));
+ e = i.next();
+ } else {
+ do {
+ verifyFieldNameNotReserved(e.fieldNameStringData());
+ e = i.next();
+ } while (!e.eoo() && e.type() == String);
}
- else {
- uassert( 17136,
- "language_override is not valid",
- language_override_elt.type() == String
- && validateOverride( language_override ) );
+
+ // extraAfterFields
+ while (!e.eoo()) {
+ uassert(17389, "'text' fields in index must all be adjacent", e.type() != String);
+ verifyFieldNameNotReserved(e.fieldNameStringData());
+ e = i.next();
}
+ }
+ }
- int version = -1;
- int textIndexVersion = TEXT_INDEX_VERSION_2;
+ if (spec["weights"].type() == Object) {
+ BSONObjIterator i(spec["weights"].Obj());
+ while (i.more()) {
+ BSONElement e = i.next();
+ uassert(17283, "weight for text index needs numeric type", e.isNumber());
+ m[e.fieldName()] = e.numberInt();
+ }
+ } else if (spec["weights"].str() == WILDCARD) {
+ m[WILDCARD] = 1;
+ } else if (!spec["weights"].eoo()) {
+ uasserted(17284, "text index option 'weights' must be an object");
+ }
- BSONObjBuilder b;
- BSONObjIterator i( spec );
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( str::equals( e.fieldName(), "key" ) ) {
- b.append( "key", keyPattern );
- }
- else if ( str::equals( e.fieldName(), "weights" ) ) {
- b.append( "weights", weights );
- weights = BSONObj();
- }
- else if ( str::equals( e.fieldName(), "default_language" ) ) {
- b.append( "default_language", default_language);
- default_language = "";
- }
- else if ( str::equals( e.fieldName(), "language_override" ) ) {
- b.append( "language_override", language_override);
- language_override = "";
- }
- else if ( str::equals( e.fieldName(), "v" ) ) {
- version = e.numberInt();
- }
- else if ( str::equals( e.fieldName(), "textIndexVersion" ) ) {
- uassert( 17293,
- "text index option 'textIndexVersion' must be a number",
- e.isNumber() );
- textIndexVersion = e.numberInt();
- uassert( 16730,
- str::stream() << "bad textIndexVersion: " << textIndexVersion,
- textIndexVersion == TEXT_INDEX_VERSION_2 );
- }
- else {
- b.append( e );
+ BSONObj weights;
+ {
+ BSONObjBuilder b;
+ for (map<string, int>::iterator i = m.begin(); i != m.end(); ++i) {
+ uassert(16674, "score for word too high", i->second > 0 && i->second < MAX_WORD_WEIGHT);
+
+ // Verify weight refers to a valid field.
+ if (i->first != "$**") {
+ FieldRef keyField(i->first);
+ uassert(17294, "weight cannot be on an empty field", keyField.numParts() != 0);
+ for (size_t partNum = 0; partNum < keyField.numParts(); partNum++) {
+ StringData part = keyField.getPart(partNum);
+ uassert(17291, "weight cannot have empty path component", !part.empty());
+ uassert(17292,
+ "weight cannot have path component with $ prefix",
+ !part.startsWith("$"));
}
}
- if ( !weights.isEmpty() ) {
- b.append( "weights", weights );
- }
- if ( !default_language.empty() ) {
- b.append( "default_language", default_language);
- }
- if ( !language_override.empty() ) {
- b.append( "language_override", language_override);
- }
- if ( version >= 0 ) {
- b.append( "v", version );
- }
- b.append( "textIndexVersion", textIndexVersion );
+ b.append(i->first, i->second);
+ }
+ weights = b.obj();
+ }
+
+ BSONElement default_language_elt = spec["default_language"];
+ string default_language(default_language_elt.str());
+ if (default_language_elt.eoo()) {
+ default_language = moduleDefaultLanguage;
+ } else {
+ uassert(
+ 17263, "default_language needs a string type", default_language_elt.type() == String);
+ }
+ uassert(17264,
+ "default_language is not valid",
+ FTSLanguage::make(default_language, TEXT_INDEX_VERSION_2).getStatus().isOK());
+
+ BSONElement language_override_elt = spec["language_override"];
+ string language_override(language_override_elt.str());
+ if (language_override_elt.eoo()) {
+ language_override = "language";
+ } else {
+ uassert(17136,
+ "language_override is not valid",
+ language_override_elt.type() == String && validateOverride(language_override));
+ }
- return b.obj();
+ int version = -1;
+ int textIndexVersion = TEXT_INDEX_VERSION_2;
+
+ BSONObjBuilder b;
+ BSONObjIterator i(spec);
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (str::equals(e.fieldName(), "key")) {
+ b.append("key", keyPattern);
+ } else if (str::equals(e.fieldName(), "weights")) {
+ b.append("weights", weights);
+ weights = BSONObj();
+ } else if (str::equals(e.fieldName(), "default_language")) {
+ b.append("default_language", default_language);
+ default_language = "";
+ } else if (str::equals(e.fieldName(), "language_override")) {
+ b.append("language_override", language_override);
+ language_override = "";
+ } else if (str::equals(e.fieldName(), "v")) {
+ version = e.numberInt();
+ } else if (str::equals(e.fieldName(), "textIndexVersion")) {
+ uassert(17293, "text index option 'textIndexVersion' must be a number", e.isNumber());
+ textIndexVersion = e.numberInt();
+ uassert(16730,
+ str::stream() << "bad textIndexVersion: " << textIndexVersion,
+ textIndexVersion == TEXT_INDEX_VERSION_2);
+ } else {
+ b.append(e);
}
+ }
+ if (!weights.isEmpty()) {
+ b.append("weights", weights);
+ }
+ if (!default_language.empty()) {
+ b.append("default_language", default_language);
}
+ if (!language_override.empty()) {
+ b.append("language_override", language_override);
+ }
+ if (version >= 0) {
+ b.append("v", version);
+ }
+ b.append("textIndexVersion", textIndexVersion);
+
+ return b.obj();
+}
+}
}
diff --git a/src/mongo/db/fts/fts_spec.h b/src/mongo/db/fts/fts_spec.h
index 0f17d825dcc..d5cc0b46472 100644
--- a/src/mongo/db/fts/fts_spec.h
+++ b/src/mongo/db/fts/fts_spec.h
@@ -43,136 +43,146 @@
namespace mongo {
- namespace fts {
-
- extern const double MAX_WEIGHT;
- extern const double MAX_WORD_WEIGHT;
- extern const double DEFAULT_WEIGHT;
-
- typedef std::map<std::string,double> Weights; // TODO cool map
- typedef unordered_map<std::string,double> TermFrequencyMap;
-
- struct ScoreHelperStruct {
- ScoreHelperStruct()
- : freq(0), count(0), exp(0){
- }
- double freq;
- double count;
- double exp;
- };
- typedef unordered_map<std::string,ScoreHelperStruct> ScoreHelperMap;
-
- class FTSSpec {
-
- struct Tools {
- Tools( const FTSLanguage& _language,
- const Stemmer* _stemmer,
- const StopWords* _stopwords )
- : language( _language )
- , stemmer( _stemmer )
- , stopwords( _stopwords ) {}
-
- const FTSLanguage& language;
- const Stemmer* stemmer;
- const StopWords* stopwords;
- };
-
- public:
- FTSSpec( const BSONObj& indexInfo );
-
- bool wildcard() const { return _wildcard; }
- const FTSLanguage& defaultLanguage() const { return *_defaultLanguage; }
- const std::string& languageOverrideField() const { return _languageOverrideField; }
-
- size_t numExtraBefore() const { return _extraBefore.size(); }
- const std::string& extraBefore( unsigned i ) const { return _extraBefore[i]; }
-
- size_t numExtraAfter() const { return _extraAfter.size(); }
- const std::string& extraAfter( unsigned i ) const { return _extraAfter[i]; }
-
- /**
- * Calculates term/score pairs for a BSONObj as applied to this spec.
- * @arg obj document to traverse; can be a subdocument or array
- * @arg term_freqs output parameter to store (term,score) results
- */
- void scoreDocument( const BSONObj& obj, TermFrequencyMap* term_freqs ) const;
-
- /**
- * given a query, pulls out the pieces (in order) that go in the index first
- */
- Status getIndexPrefix( const BSONObj& filter, BSONObj* out ) const;
-
- const Weights& weights() const { return _weights; }
- static BSONObj fixSpec( const BSONObj& spec );
-
- /**
- * Returns text index version.
- */
- TextIndexVersion getTextIndexVersion() const { return _textIndexVersion; }
-
- private:
- //
- // Helper methods. Invoked for TEXT_INDEX_VERSION_2 spec objects only.
- //
-
- /**
- * Calculate the term scores for 'raw' and update 'term_freqs' with the result. Parses
- * 'raw' using 'tools', and weights term scores based on 'weight'.
- */
- void _scoreStringV2( FTSTokenizer* tokenizer,
- StringData raw,
- TermFrequencyMap* term_freqs,
- double weight ) const;
-
- public:
- /**
- * Get the language override for the given BSON doc. If no language override is
- * specified, returns currentLanguage.
- */
- const FTSLanguage* _getLanguageToUseV2( const BSONObj& userDoc,
- const FTSLanguage* currentLanguage ) const;
-
- private:
- //
- // Deprecated helper methods. Invoked for TEXT_INDEX_VERSION_1 spec objects only.
- //
-
- void _scoreStringV1( const Tools& tools,
- StringData raw,
- TermFrequencyMap* docScores,
- double weight ) const;
-
- bool _weightV1( StringData field, double* out ) const;
-
- void _scoreRecurseV1( const Tools& tools,
- const BSONObj& obj,
- TermFrequencyMap* term_freqs ) const;
-
- void _scoreDocumentV1( const BSONObj& obj, TermFrequencyMap* term_freqs ) const;
-
- const FTSLanguage& _getLanguageToUseV1( const BSONObj& userDoc ) const;
-
- static BSONObj _fixSpecV1( const BSONObj& spec );
-
- //
- // Instance variables.
- //
-
- TextIndexVersion _textIndexVersion;
+namespace fts {
+
+extern const double MAX_WEIGHT;
+extern const double MAX_WORD_WEIGHT;
+extern const double DEFAULT_WEIGHT;
+
+typedef std::map<std::string, double> Weights; // TODO cool map
+typedef unordered_map<std::string, double> TermFrequencyMap;
+
+struct ScoreHelperStruct {
+ ScoreHelperStruct() : freq(0), count(0), exp(0) {}
+ double freq;
+ double count;
+ double exp;
+};
+typedef unordered_map<std::string, ScoreHelperStruct> ScoreHelperMap;
+
+class FTSSpec {
+ struct Tools {
+ Tools(const FTSLanguage& _language, const Stemmer* _stemmer, const StopWords* _stopwords)
+ : language(_language), stemmer(_stemmer), stopwords(_stopwords) {}
+
+ const FTSLanguage& language;
+ const Stemmer* stemmer;
+ const StopWords* stopwords;
+ };
+
+public:
+ FTSSpec(const BSONObj& indexInfo);
+
+ bool wildcard() const {
+ return _wildcard;
+ }
+ const FTSLanguage& defaultLanguage() const {
+ return *_defaultLanguage;
+ }
+ const std::string& languageOverrideField() const {
+ return _languageOverrideField;
+ }
+
+ size_t numExtraBefore() const {
+ return _extraBefore.size();
+ }
+ const std::string& extraBefore(unsigned i) const {
+ return _extraBefore[i];
+ }
+
+ size_t numExtraAfter() const {
+ return _extraAfter.size();
+ }
+ const std::string& extraAfter(unsigned i) const {
+ return _extraAfter[i];
+ }
- const FTSLanguage* _defaultLanguage;
- std::string _languageOverrideField;
- bool _wildcard;
+ /**
+ * Calculates term/score pairs for a BSONObj as applied to this spec.
+ * @arg obj document to traverse; can be a subdocument or array
+ * @arg term_freqs output parameter to store (term,score) results
+ */
+ void scoreDocument(const BSONObj& obj, TermFrequencyMap* term_freqs) const;
- // mapping : fieldname -> weight
- Weights _weights;
-
- // Prefix compound key - used to partition search index
- std::vector<std::string> _extraBefore;
+ /**
+ * given a query, pulls out the pieces (in order) that go in the index first
+ */
+ Status getIndexPrefix(const BSONObj& filter, BSONObj* out) const;
- // Suffix compound key - used for covering index behavior
- std::vector<std::string> _extraAfter;
- };
+ const Weights& weights() const {
+ return _weights;
+ }
+ static BSONObj fixSpec(const BSONObj& spec);
+ /**
+ * Returns text index version.
+ */
+ TextIndexVersion getTextIndexVersion() const {
+ return _textIndexVersion;
}
+
+private:
+ //
+ // Helper methods. Invoked for TEXT_INDEX_VERSION_2 spec objects only.
+ //
+
+ /**
+ * Calculate the term scores for 'raw' and update 'term_freqs' with the result. Parses
+ * 'raw' using 'tools', and weights term scores based on 'weight'.
+ */
+ void _scoreStringV2(FTSTokenizer* tokenizer,
+ StringData raw,
+ TermFrequencyMap* term_freqs,
+ double weight) const;
+
+public:
+ /**
+ * Get the language override for the given BSON doc. If no language override is
+ * specified, returns currentLanguage.
+ */
+ const FTSLanguage* _getLanguageToUseV2(const BSONObj& userDoc,
+ const FTSLanguage* currentLanguage) const;
+
+private:
+ //
+ // Deprecated helper methods. Invoked for TEXT_INDEX_VERSION_1 spec objects only.
+ //
+
+ void _scoreStringV1(const Tools& tools,
+ StringData raw,
+ TermFrequencyMap* docScores,
+ double weight) const;
+
+ bool _weightV1(StringData field, double* out) const;
+
+ void _scoreRecurseV1(const Tools& tools,
+ const BSONObj& obj,
+ TermFrequencyMap* term_freqs) const;
+
+ void _scoreDocumentV1(const BSONObj& obj, TermFrequencyMap* term_freqs) const;
+
+ const FTSLanguage& _getLanguageToUseV1(const BSONObj& userDoc) const;
+
+ static BSONObj _fixSpecV1(const BSONObj& spec);
+
+ //
+ // Instance variables.
+ //
+
+ TextIndexVersion _textIndexVersion;
+
+ const FTSLanguage* _defaultLanguage;
+ std::string _languageOverrideField;
+ bool _wildcard;
+
+ // mapping : fieldname -> weight
+ Weights _weights;
+
+ // Prefix compound key - used to partition search index
+ std::vector<std::string> _extraBefore;
+
+ // Suffix compound key - used for covering index behavior
+ std::vector<std::string> _extraAfter;
+};
+}
}
diff --git a/src/mongo/db/fts/fts_spec_legacy.cpp b/src/mongo/db/fts/fts_spec_legacy.cpp
index a2dc1dc2489..4a161c8614a 100644
--- a/src/mongo/db/fts/fts_spec_legacy.cpp
+++ b/src/mongo/db/fts/fts_spec_legacy.cpp
@@ -33,290 +33,268 @@
namespace mongo {
- namespace fts {
+namespace fts {
- //
- // This file contains functionality specific to indexing documents from TEXT_INDEX_VERSION_1
- // text indexes.
- //
+//
+// This file contains functionality specific to indexing documents from TEXT_INDEX_VERSION_1
+// text indexes.
+//
- using std::map;
- using std::string;
- using namespace mongoutils;
+using std::map;
+using std::string;
+using namespace mongoutils;
- namespace {
- void _addFTSStuff( BSONObjBuilder* b ) {
- b->append( "_fts", INDEX_NAME );
- b->append( "_ftsx", 1 );
- }
- }
+namespace {
+void _addFTSStuff(BSONObjBuilder* b) {
+ b->append("_fts", INDEX_NAME);
+ b->append("_ftsx", 1);
+}
+}
- const FTSLanguage& FTSSpec::_getLanguageToUseV1( const BSONObj& userDoc ) const {
- BSONElement e = userDoc[_languageOverrideField];
- if ( e.type() == String ) {
- const char * x = e.valuestrsafe();
- if ( strlen( x ) > 0 ) {
- StatusWithFTSLanguage swl = FTSLanguage::make( x, TEXT_INDEX_VERSION_1 );
- dassert( swl.isOK() ); // make() w/ TEXT_INDEX_VERSION_1 guaranteed to not fail.
- return *swl.getValue();
- }
- }
- return *_defaultLanguage;
+const FTSLanguage& FTSSpec::_getLanguageToUseV1(const BSONObj& userDoc) const {
+ BSONElement e = userDoc[_languageOverrideField];
+ if (e.type() == String) {
+ const char* x = e.valuestrsafe();
+ if (strlen(x) > 0) {
+ StatusWithFTSLanguage swl = FTSLanguage::make(x, TEXT_INDEX_VERSION_1);
+ dassert(swl.isOK()); // make() w/ TEXT_INDEX_VERSION_1 guaranteed to not fail.
+ return *swl.getValue();
}
+ }
+ return *_defaultLanguage;
+}
- void FTSSpec::_scoreStringV1( const Tools& tools,
- StringData raw,
- TermFrequencyMap* docScores,
- double weight ) const {
-
- ScoreHelperMap terms;
+void FTSSpec::_scoreStringV1(const Tools& tools,
+ StringData raw,
+ TermFrequencyMap* docScores,
+ double weight) const {
+ ScoreHelperMap terms;
- unsigned numTokens = 0;
+ unsigned numTokens = 0;
- Tokenizer i( &tools.language, raw );
- while ( i.more() ) {
- Token t = i.next();
- if ( t.type != Token::TEXT )
- continue;
+ Tokenizer i(&tools.language, raw);
+ while (i.more()) {
+ Token t = i.next();
+ if (t.type != Token::TEXT)
+ continue;
- string term = tolowerString( t.data );
- if ( tools.stopwords->isStopWord( term ) )
- continue;
- term = tools.stemmer->stem( term );
+ string term = tolowerString(t.data);
+ if (tools.stopwords->isStopWord(term))
+ continue;
+ term = tools.stemmer->stem(term);
- ScoreHelperStruct& data = terms[term];
+ ScoreHelperStruct& data = terms[term];
- if ( data.exp )
- data.exp *= 2;
- else
- data.exp = 1;
- data.count += 1;
- data.freq += ( 1 / data.exp );
+ if (data.exp)
+ data.exp *= 2;
+ else
+ data.exp = 1;
+ data.count += 1;
+ data.freq += (1 / data.exp);
- numTokens++;
- }
+ numTokens++;
+ }
- for ( ScoreHelperMap::const_iterator i = terms.begin(); i != terms.end(); ++i ) {
+ for (ScoreHelperMap::const_iterator i = terms.begin(); i != terms.end(); ++i) {
+ const string& term = i->first;
+ const ScoreHelperStruct& data = i->second;
- const string& term = i->first;
- const ScoreHelperStruct& data = i->second;
+ // in order to adjust weights as a function of term count as it
+ // relates to total field length. ie. is this the only word or
+ // a frequently occuring term? or does it only show up once in
+ // a long block of text?
- // in order to adjust weights as a function of term count as it
- // relates to total field length. ie. is this the only word or
- // a frequently occuring term? or does it only show up once in
- // a long block of text?
+ double coeff = (0.5 * data.count / numTokens) + 0.5;
- double coeff = ( 0.5 * data.count / numTokens ) + 0.5;
+ // if term is identical to the raw form of the
+ // field (untokenized) give it a small boost.
+ double adjustment = 1;
+ if (raw.size() == term.length() && raw.equalCaseInsensitive(term))
+ adjustment += 0.1;
- // if term is identical to the raw form of the
- // field (untokenized) give it a small boost.
- double adjustment = 1;
- if ( raw.size() == term.length() && raw.equalCaseInsensitive( term ) )
- adjustment += 0.1;
+ double& score = (*docScores)[term];
+ score += (weight * data.freq * coeff * adjustment);
+ verify(score <= MAX_WEIGHT);
+ }
+}
- double& score = (*docScores)[term];
- score += ( weight * data.freq * coeff * adjustment );
- verify( score <= MAX_WEIGHT );
- }
- }
+bool FTSSpec::_weightV1(StringData field, double* out) const {
+ Weights::const_iterator i = _weights.find(field.toString());
+ if (i == _weights.end())
+ return false;
+ *out = i->second;
+ return true;
+}
- bool FTSSpec::_weightV1( StringData field, double* out ) const {
- Weights::const_iterator i = _weights.find( field.toString() );
- if ( i == _weights.end() )
- return false;
- *out = i->second;
- return true;
+/*
+ * Recurses over all fields of an obj (document in collection)
+ * and fills term,score map term_freqs
+ * @param tokenizer, tokenizer to tokenize a string into terms
+ * @param obj, object being parsed
+ * term_freqs, map <term,score> to be filled up
+ */
+void FTSSpec::_scoreRecurseV1(const Tools& tools,
+ const BSONObj& obj,
+ TermFrequencyMap* term_freqs) const {
+ BSONObjIterator j(obj);
+ while (j.more()) {
+ BSONElement x = j.next();
+
+ if (languageOverrideField() == x.fieldName())
+ continue;
+
+ if (x.type() == String) {
+ double w = 1;
+ _weightV1(x.fieldName(), &w);
+ _scoreStringV1(tools, x.valuestr(), term_freqs, w);
+ } else if (x.isABSONObj()) {
+ _scoreRecurseV1(tools, x.Obj(), term_freqs);
}
+ }
+}
- /*
- * Recurses over all fields of an obj (document in collection)
- * and fills term,score map term_freqs
- * @param tokenizer, tokenizer to tokenize a string into terms
- * @param obj, object being parsed
- * term_freqs, map <term,score> to be filled up
- */
- void FTSSpec::_scoreRecurseV1( const Tools& tools,
- const BSONObj& obj,
- TermFrequencyMap* term_freqs ) const {
- BSONObjIterator j( obj );
- while ( j.more() ) {
- BSONElement x = j.next();
+void FTSSpec::_scoreDocumentV1(const BSONObj& obj, TermFrequencyMap* term_freqs) const {
+ const FTSLanguage& language = _getLanguageToUseV1(obj);
- if ( languageOverrideField() == x.fieldName() )
- continue;
+ Stemmer stemmer(&language);
+ Tools tools(language, &stemmer, StopWords::getStopWords(&language));
- if (x.type() == String) {
- double w = 1;
- _weightV1( x.fieldName(), &w );
- _scoreStringV1(tools, x.valuestr(), term_freqs, w);
- }
- else if ( x.isABSONObj() ) {
- _scoreRecurseV1( tools, x.Obj(), term_freqs);
- }
+ if (wildcard()) {
+ // if * is specified for weight, we can recurse over all fields.
+ _scoreRecurseV1(tools, obj, term_freqs);
+ return;
+ }
+ // otherwise, we need to remember the different weights for each field
+ // and act accordingly (in other words, call _score)
+ for (Weights::const_iterator i = _weights.begin(); i != _weights.end(); i++) {
+ const char* leftOverName = i->first.c_str();
+ // name of field
+ BSONElement e = obj.getFieldDottedOrArray(leftOverName);
+ // weight associated to name of field
+ double weight = i->second;
+
+ if (e.eoo()) {
+ // do nothing
+ } else if (e.type() == Array) {
+ BSONObjIterator j(e.Obj());
+ while (j.more()) {
+ BSONElement x = j.next();
+ if (leftOverName[0] && x.isABSONObj())
+ x = x.Obj().getFieldDotted(leftOverName);
+ if (x.type() == String)
+ _scoreStringV1(tools, x.valuestr(), term_freqs, weight);
}
+ } else if (e.type() == String) {
+ _scoreStringV1(tools, e.valuestr(), term_freqs, weight);
}
+ }
+}
- void FTSSpec::_scoreDocumentV1( const BSONObj& obj,
- TermFrequencyMap* term_freqs ) const {
-
- const FTSLanguage& language = _getLanguageToUseV1( obj );
-
- Stemmer stemmer(&language);
- Tools tools(language, &stemmer, StopWords::getStopWords( &language ));
-
- if ( wildcard() ) {
- // if * is specified for weight, we can recurse over all fields.
- _scoreRecurseV1(tools, obj, term_freqs);
- return;
- }
-
- // otherwise, we need to remember the different weights for each field
- // and act accordingly (in other words, call _score)
- for ( Weights::const_iterator i = _weights.begin(); i != _weights.end(); i++ ) {
- const char * leftOverName = i->first.c_str();
- // name of field
- BSONElement e = obj.getFieldDottedOrArray(leftOverName);
- // weight associated to name of field
- double weight = i->second;
-
- if ( e.eoo() ) {
- // do nothing
- }
- else if ( e.type() == Array ) {
- BSONObjIterator j( e.Obj() );
- while ( j.more() ) {
- BSONElement x = j.next();
- if ( leftOverName[0] && x.isABSONObj() )
- x = x.Obj().getFieldDotted( leftOverName );
- if ( x.type() == String )
- _scoreStringV1( tools, x.valuestr(), term_freqs, weight );
- }
- }
- else if ( e.type() == String ) {
- _scoreStringV1( tools, e.valuestr(), term_freqs, weight );
+BSONObj FTSSpec::_fixSpecV1(const BSONObj& spec) {
+ map<string, int> m;
+
+ BSONObj keyPattern;
+ {
+ BSONObjBuilder b;
+ bool addedFtsStuff = false;
+
+ BSONObjIterator i(spec["key"].Obj());
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (str::equals(e.fieldName(), "_fts") || str::equals(e.fieldName(), "_ftsx")) {
+ addedFtsStuff = true;
+ b.append(e);
+ } else if (e.type() == String &&
+ (str::equals("fts", e.valuestr()) || str::equals("text", e.valuestr()))) {
+ if (!addedFtsStuff) {
+ _addFTSStuff(&b);
+ addedFtsStuff = true;
}
+ m[e.fieldName()] = 1;
+ } else {
+ b.append(e);
}
}
- BSONObj FTSSpec::_fixSpecV1( const BSONObj& spec ) {
- map<string,int> m;
-
- BSONObj keyPattern;
- {
- BSONObjBuilder b;
- bool addedFtsStuff = false;
-
- BSONObjIterator i( spec["key"].Obj() );
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( str::equals( e.fieldName(), "_fts" ) ||
- str::equals( e.fieldName(), "_ftsx" ) ) {
- addedFtsStuff = true;
- b.append( e );
- }
- else if ( e.type() == String &&
- ( str::equals( "fts", e.valuestr() ) ||
- str::equals( "text", e.valuestr() ) ) ) {
-
- if ( !addedFtsStuff ) {
- _addFTSStuff( &b );
- addedFtsStuff = true;
- }
-
- m[e.fieldName()] = 1;
- }
- else {
- b.append( e );
- }
- }
-
- if ( !addedFtsStuff )
- _addFTSStuff( &b );
-
- keyPattern = b.obj();
- }
-
- if ( spec["weights"].isABSONObj() ) {
- BSONObjIterator i( spec["weights"].Obj() );
- while ( i.more() ) {
- BSONElement e = i.next();
- m[e.fieldName()] = e.numberInt();
- }
- }
- else if ( spec["weights"].str() == WILDCARD ) {
- m[WILDCARD] = 1;
- }
-
- BSONObj weights;
- {
- BSONObjBuilder b;
- for ( map<string,int>::iterator i = m.begin(); i != m.end(); ++i ) {
- uassert( 17365, "score for word too high",
- i->second > 0 && i->second < MAX_WORD_WEIGHT );
- b.append( i->first, i->second );
- }
- weights = b.obj();
- }
+ if (!addedFtsStuff)
+ _addFTSStuff(&b);
- string default_language(spec.getStringField("default_language"));
- if ( default_language.empty() )
- default_language = "english";
+ keyPattern = b.obj();
+ }
- string language_override(spec.getStringField("language_override"));
- if ( language_override.empty() )
- language_override = "language";
+ if (spec["weights"].isABSONObj()) {
+ BSONObjIterator i(spec["weights"].Obj());
+ while (i.more()) {
+ BSONElement e = i.next();
+ m[e.fieldName()] = e.numberInt();
+ }
+ } else if (spec["weights"].str() == WILDCARD) {
+ m[WILDCARD] = 1;
+ }
- int version = -1;
- int textIndexVersion = 1;
+ BSONObj weights;
+ {
+ BSONObjBuilder b;
+ for (map<string, int>::iterator i = m.begin(); i != m.end(); ++i) {
+ uassert(17365, "score for word too high", i->second > 0 && i->second < MAX_WORD_WEIGHT);
+ b.append(i->first, i->second);
+ }
+ weights = b.obj();
+ }
- BSONObjBuilder b;
- BSONObjIterator i( spec );
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( str::equals( e.fieldName(), "key" ) ) {
- b.append( "key", keyPattern );
- }
- else if ( str::equals( e.fieldName(), "weights" ) ) {
- b.append( "weights", weights );
- weights = BSONObj();
- }
- else if ( str::equals( e.fieldName(), "default_language" ) ) {
- b.append( "default_language", default_language);
- default_language = "";
- }
- else if ( str::equals( e.fieldName(), "language_override" ) ) {
- b.append( "language_override", language_override);
- language_override = "";
- }
- else if ( str::equals( e.fieldName(), "v" ) ) {
- version = e.numberInt();
- }
- else if ( str::equals( e.fieldName(), "textIndexVersion" ) ) {
- textIndexVersion = e.numberInt();
- uassert( 17366,
- str::stream() << "bad textIndexVersion: " << textIndexVersion,
- textIndexVersion == 1 );
- }
- else {
- b.append( e );
- }
- }
+ string default_language(spec.getStringField("default_language"));
+ if (default_language.empty())
+ default_language = "english";
+
+ string language_override(spec.getStringField("language_override"));
+ if (language_override.empty())
+ language_override = "language";
+
+ int version = -1;
+ int textIndexVersion = 1;
+
+ BSONObjBuilder b;
+ BSONObjIterator i(spec);
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (str::equals(e.fieldName(), "key")) {
+ b.append("key", keyPattern);
+ } else if (str::equals(e.fieldName(), "weights")) {
+ b.append("weights", weights);
+ weights = BSONObj();
+ } else if (str::equals(e.fieldName(), "default_language")) {
+ b.append("default_language", default_language);
+ default_language = "";
+ } else if (str::equals(e.fieldName(), "language_override")) {
+ b.append("language_override", language_override);
+ language_override = "";
+ } else if (str::equals(e.fieldName(), "v")) {
+ version = e.numberInt();
+ } else if (str::equals(e.fieldName(), "textIndexVersion")) {
+ textIndexVersion = e.numberInt();
+ uassert(17366,
+ str::stream() << "bad textIndexVersion: " << textIndexVersion,
+ textIndexVersion == 1);
+ } else {
+ b.append(e);
+ }
+ }
- if ( !weights.isEmpty() )
- b.append( "weights", weights );
- if ( !default_language.empty() )
- b.append( "default_language", default_language);
- if ( !language_override.empty() )
- b.append( "language_override", language_override);
+ if (!weights.isEmpty())
+ b.append("weights", weights);
+ if (!default_language.empty())
+ b.append("default_language", default_language);
+ if (!language_override.empty())
+ b.append("language_override", language_override);
- if ( version >= 0 )
- b.append( "v", version );
+ if (version >= 0)
+ b.append("v", version);
- b.append( "textIndexVersion", textIndexVersion );
+ b.append("textIndexVersion", textIndexVersion);
- return b.obj();
- }
- }
+ return b.obj();
+}
+}
}
diff --git a/src/mongo/db/fts/fts_spec_test.cpp b/src/mongo/db/fts/fts_spec_test.cpp
index 832279eb18d..c9f628a2b28 100644
--- a/src/mongo/db/fts/fts_spec_test.cpp
+++ b/src/mongo/db/fts/fts_spec_test.cpp
@@ -36,541 +36,558 @@
namespace mongo {
- using std::set;
- using std::string;
-
- namespace fts {
-
- /**
- * Assert that fixSpec() accepts the provided text index spec.
- */
- void assertFixSuccess( const std::string& s ) {
- BSONObj user = fromjson( s );
-
- try {
- // fixSpec() should not throw on a valid spec.
- BSONObj fixed = FTSSpec::fixSpec( user );
-
- // fixSpec() on an already-fixed spec shouldn't change it.
- BSONObj fixed2 = FTSSpec::fixSpec( fixed );
- ASSERT_EQUALS( fixed, fixed2 );
- }
- catch ( UserException& ) {
- ASSERT( false );
- }
- }
-
- /**
- * Assert that fixSpec() rejects the provided text index spec.
- */
- void assertFixFailure( const std::string& s ) {
- BSONObj user = fromjson( s );
-
- try {
- // fixSpec() on an invalid spec should uassert.
- BSONObj fixed = FTSSpec::fixSpec( user );
- }
- catch ( UserException& ) {
- return;
- }
- ASSERT( false );
- }
-
- TEST( FTSSpec, FixNormalKey1 ) {
- assertFixSuccess("{key: {a: 'text'}}");
- assertFixSuccess("{key: {a: 'text', b: 'text'}}");
- assertFixSuccess("{key: {a: 'text', b: 'text', c: 'text'}}");
-
- assertFixFailure("{key: {_fts: 'text'}}"); // not allowed to index reserved field
- assertFixFailure("{key: {_ftsx: 'text'}}");
- }
-
- TEST( FTSSpec, FixCompoundKey1 ) {
- assertFixSuccess("{key: {a: 'text', b: 1.0}}");
- assertFixSuccess("{key: {a: 'text', b: NumberInt(1)}}");
- assertFixSuccess("{key: {a: 'text', b: NumberLong(1)}}");
- assertFixSuccess("{key: {a: 'text', b: -1.0}}");
- assertFixSuccess("{key: {a: 'text', b: NumberInt(-1)}}");
- assertFixSuccess("{key: {a: 'text', b: NumberLong(-1)}}");
- assertFixSuccess("{key: {a: 1.0, b: 'text'}}");
- assertFixSuccess("{key: {a: NumberInt(1), b: 'text'}}");
- assertFixSuccess("{key: {a: NumberLong(1), b: 'text'}}");
- assertFixSuccess("{key: {a: -1, b: 'text'}}");
- assertFixSuccess("{key: {a: 1, b: 1, c: 'text'}}");
- assertFixSuccess("{key: {a: 1, b: -1, c: 'text'}}");
- assertFixSuccess("{key: {a: -1, b: 1, c: 'text'}}");
- assertFixSuccess("{key: {a: 1, b: 'text', c: 1}}");
- assertFixSuccess("{key: {a: 'text', b: 1, c: 1}}");
- assertFixSuccess("{key: {a: 'text', b: 1, c: -1}}");
- assertFixSuccess("{key: {a: 'text', b: 'text', c: 1}}");
- assertFixSuccess("{key: {a: 1, b: 'text', c: 'text'}}");
-
- assertFixFailure("{key: {a: 'text', b: 0}}");
- assertFixFailure("{key: {a: 'text', b: '2d'}}"); // not allowed to mix special indexes
- assertFixFailure("{key: {a: 'text', b: '1'}}");
- assertFixFailure("{key: {a: 'text', _fts: 1}}");
- assertFixFailure("{key: {a: 'text', _fts: 'text'}}");
- assertFixFailure("{key: {a: 'text', _ftsx: 1}}");
- assertFixFailure("{key: {a: 'text', _ftsx: 'text'}}");
- assertFixFailure("{key: {_fts: 1, a: 'text'}}");
- assertFixFailure("{key: {_fts: 'text', a: 'text'}}");
- assertFixFailure("{key: {_ftsx: 1, a: 'text'}}");
- assertFixFailure("{key: {_ftsx: 'text', a: 'text'}}");
- assertFixFailure("{key: {a: 'text', b: 1, c: 'text'}}"); // 'text' must all be adjacent
- assertFixFailure("{key: {a: 'text', b: 1, c: 'text', d: 1}}");
- assertFixFailure("{key: {a: 1, b: 'text', c: 1, d: 'text', e: 1}}");
- }
-
- TEST( FTSSpec, FixDefaultLanguage1 ) {
- assertFixSuccess("{key: {a: 'text'}, default_language: 'english'}");
- assertFixSuccess("{key: {a: 'text'}, default_language: 'engLISH'}");
- assertFixSuccess("{key: {a: 'text'}, default_language: 'en'}");
- assertFixSuccess("{key: {a: 'text'}, default_language: 'eN'}");
- assertFixSuccess("{key: {a: 'text'}, default_language: 'spanish'}");
- assertFixSuccess("{key: {a: 'text'}, default_language: 'none'}");
-
- assertFixFailure("{key: {a: 'text'}, default_language: 'engrish'}");
- assertFixFailure("{key: {a: 'text'}, default_language: ' english'}");
- assertFixFailure("{key: {a: 'text'}, default_language: ''}");
- }
-
- TEST( FTSSpec, FixWeights1 ) {
- assertFixSuccess("{key: {a: 'text'}, weights: {}}");
- assertFixSuccess("{key: {a: 'text'}, weights: {a: 1.0}}");
- assertFixSuccess("{key: {a: 'text'}, weights: {a: NumberInt(1)}}");
- assertFixSuccess("{key: {a: 'text'}, weights: {a: NumberLong(1)}}");
- assertFixSuccess("{key: {a: 'text'}, weights: {a: 99999}}");
- assertFixSuccess("{key: {'$**': 'text'}, weights: {'a.b': 2}}");
- assertFixSuccess("{key: {'$**': 'text'}, weights: {a: 2, b: 2}}");
- assertFixSuccess("{key: {'$**': 'text'}, weights: {'$**': 2}}");
-
- assertFixFailure("{key: {a: 'text'}, weights: 0}");
- assertFixFailure("{key: {a: 'text'}, weights: []}");
- assertFixFailure("{key: {a: 'text'}, weights: 'x'}");
- assertFixFailure("{key: {a: 'text'}, weights: {a: 0}}");
- assertFixFailure("{key: {a: 'text'}, weights: {a: -1}}");
- assertFixFailure("{key: {a: 'text'}, weights: {a: 100000}}"); // above max weight
- assertFixFailure("{key: {a: 'text'}, weights: {a: '1'}}");
- assertFixFailure("{key: {a: 'text'}, weights: {'': 1}}"); // "invalid" path
- assertFixFailure("{key: {a: 'text'}, weights: {'a.': 1}}");
- assertFixFailure("{key: {a: 'text'}, weights: {'.a': 1}}");
- assertFixFailure("{key: {a: 'text'}, weights: {'a..a': 1}}");
- assertFixFailure("{key: {a: 'text'}, weights: {$a: 1}}");
- assertFixFailure("{key: {a: 'text'}, weights: {'a.$a': 1}}");
- assertFixFailure("{key: {a: 'text'}, weights: {'a.$**': 1}}");
- }
-
- TEST( FTSSpec, FixLanguageOverride1 ) {
- assertFixSuccess("{key: {a: 'text'}, language_override: 'foo'}");
- assertFixSuccess("{key: {a: 'text'}, language_override: 'foo$bar'}");
-
- assertFixFailure("{key: {a: 'text'}, language_override: 'foo.bar'}"); // can't have '.'
- assertFixFailure("{key: {a: 'text'}, language_override: ''}");
- assertFixFailure("{key: {a: 'text'}, language_override: '$foo'}");
- }
-
- TEST( FTSSpec, FixTextIndexVersion1 ) {
- assertFixSuccess("{key: {a: 'text'}, textIndexVersion: 1.0}}");
- assertFixSuccess("{key: {a: 'text'}, textIndexVersion: NumberInt(1)}}");
- assertFixSuccess("{key: {a: 'text'}, textIndexVersion: NumberLong(1)}}");
- assertFixSuccess("{key: {a: 'text'}, textIndexVersion: 2.0}}");
- assertFixSuccess("{key: {a: 'text'}, textIndexVersion: NumberInt(2)}}");
- assertFixSuccess("{key: {a: 'text'}, textIndexVersion: NumberLong(2)}}");
-
- assertFixFailure("{key: {a: 'text'}, textIndexVersion: 3}");
- assertFixFailure("{key: {a: 'text'}, textIndexVersion: '2'}");
- assertFixFailure("{key: {a: 'text'}, textIndexVersion: {}}");
- }
-
- TEST( FTSSpec, ScoreSingleField1 ) {
- BSONObj user = BSON( "key" << BSON( "title" << "text" <<
- "text" << "text" ) <<
- "weights" << BSON( "title" << 10 ) );
-
- FTSSpec spec( FTSSpec::fixSpec( user ) );
-
- TermFrequencyMap m;
- spec.scoreDocument( BSON( "title" << "cat sat run" ), &m );
- ASSERT_EQUALS( 3U, m.size() );
- ASSERT_EQUALS( m["cat"], m["sat"] );
- ASSERT_EQUALS( m["cat"], m["run"] );
- ASSERT( m["cat"] > 0 );
- }
-
- TEST( FTSSpec, ScoreMultipleField1 ) {
- BSONObj user = BSON( "key" << BSON( "title" << "text" <<
- "text" << "text" ) <<
- "weights" << BSON( "title" << 10 ) );
-
- FTSSpec spec( FTSSpec::fixSpec( user ) );
-
- TermFrequencyMap m;
- spec.scoreDocument( BSON( "title" << "cat sat run" << "text" << "cat book" ), &m );
-
- ASSERT_EQUALS( 4U, m.size() );
- ASSERT_EQUALS( m["sat"], m["run"] );
- ASSERT( m["sat"] > 0 );
-
- ASSERT( m["cat"] > m["sat"] );
- ASSERT( m["cat"] > m["book"] );
- ASSERT( m["book"] > 0 );
- ASSERT( m["book"] < m["sat"] );
- }
-
- TEST( FTSSpec, ScoreMultipleField2 ) {
- // Test where one indexed field is a parent component of another indexed field.
- BSONObj user = BSON( "key" << BSON( "a" << "text" << "a.b" << "text" ) );
-
- FTSSpec spec( FTSSpec::fixSpec( user ) );
-
- TermFrequencyMap m;
- spec.scoreDocument( BSON( "a" << BSON( "b" << "term" ) ), &m );
- ASSERT_EQUALS( 1U, m.size() );
- }
-
- TEST( FTSSpec, ScoreRepeatWord ) {
- BSONObj user = BSON( "key" << BSON( "title" << "text" <<
- "text" << "text" ) <<
- "weights" << BSON( "title" << 10 ) );
-
- FTSSpec spec( FTSSpec::fixSpec( user ) );
-
- TermFrequencyMap m;
- spec.scoreDocument( BSON( "title" << "cat sat sat run run run" ), &m );
- ASSERT_EQUALS( 3U, m.size() );
- ASSERT( m["cat"] > 0 );
- ASSERT( m["sat"] > m["cat"] );
- ASSERT( m["run"] > m["sat"] );
-
- }
-
- TEST( FTSSpec, Extra1 ) {
- BSONObj user = BSON( "key" << BSON( "data" << "text" ) );
- FTSSpec spec( FTSSpec::fixSpec( user ) );
- ASSERT_EQUALS( 0U, spec.numExtraBefore() );
- ASSERT_EQUALS( 0U, spec.numExtraAfter() );
- }
-
- TEST( FTSSpec, Extra2 ) {
- BSONObj user = BSON( "key" << BSON( "data" << "text" << "x" << 1 ) );
- BSONObj fixed = FTSSpec::fixSpec( user );
- FTSSpec spec( fixed );
- ASSERT_EQUALS( 0U, spec.numExtraBefore() );
- ASSERT_EQUALS( 1U, spec.numExtraAfter() );
- ASSERT_EQUALS( StringData("x"), spec.extraAfter(0) );
-
- BSONObj fixed2 = FTSSpec::fixSpec( fixed );
- ASSERT_EQUALS( fixed, fixed2 );
- }
-
- TEST( FTSSpec, Extra3 ) {
- BSONObj user = BSON( "key" << BSON( "x" << 1 << "data" << "text" ) );
- BSONObj fixed = FTSSpec::fixSpec( user );
-
- ASSERT_EQUALS( BSON( "x" << 1 <<
- "_fts" << "text" <<
- "_ftsx" << 1 ),
- fixed["key"].Obj() );
- ASSERT_EQUALS( BSON( "data" << 1 ),
- fixed["weights"].Obj() );
-
- BSONObj fixed2 = FTSSpec::fixSpec( fixed );
- ASSERT_EQUALS( fixed, fixed2 );
-
- FTSSpec spec( fixed );
- ASSERT_EQUALS( 1U, spec.numExtraBefore() );
- ASSERT_EQUALS( StringData("x"), spec.extraBefore(0) );
- ASSERT_EQUALS( 0U, spec.numExtraAfter() );
-
- BSONObj prefix;
-
- ASSERT( spec.getIndexPrefix( BSON( "x" << 2 ), &prefix ).isOK() );
- ASSERT_EQUALS( BSON( "x" << 2 ), prefix );
-
- ASSERT( spec.getIndexPrefix( BSON( "x" << 3 << "y" << 4 ), &prefix ).isOK() );
- ASSERT_EQUALS( BSON( "x" << 3 ), prefix );
-
- ASSERT( !spec.getIndexPrefix( BSON( "x" << BSON( "$gt" << 5 ) ), &prefix ).isOK() );
- ASSERT( !spec.getIndexPrefix( BSON( "y" << 4 ), &prefix ).isOK() );
- ASSERT( !spec.getIndexPrefix( BSONObj(), &prefix ).isOK() );
- }
-
- // Test for correct behavior when encountering nested arrays (both directly nested and
- // indirectly nested).
-
- TEST( FTSSpec, NestedArraysPos1 ) {
- BSONObj user = BSON( "key" << BSON( "a.b" << "text" ) );
- FTSSpec spec( FTSSpec::fixSpec( user ) );
-
- // The following document matches {"a.b": {$type: 2}}, so "term" should be indexed.
- BSONObj obj = fromjson("{a: [{b: ['term']}]}"); // indirectly nested arrays
- TermFrequencyMap m;
- spec.scoreDocument( obj, &m );
- ASSERT_EQUALS( 1U, m.size() );
- }
-
- TEST( FTSSpec, NestedArraysPos2 ) {
- BSONObj user = BSON( "key" << BSON( "$**" << "text" ) );
- FTSSpec spec( FTSSpec::fixSpec( user ) );
-
- // The wildcard spec implies a full recursive traversal, so "term" should be indexed.
- BSONObj obj = fromjson("{a: {b: [['term']]}}"); // directly nested arrays
- TermFrequencyMap m;
- spec.scoreDocument( obj, &m );
- ASSERT_EQUALS( 1U, m.size() );
- }
-
- TEST( FTSSpec, NestedArraysNeg1 ) {
- BSONObj user = BSON( "key" << BSON( "a.b" << "text" ) );
- FTSSpec spec( FTSSpec::fixSpec( user ) );
-
- // The following document does not match {"a.b": {$type: 2}}, so "term" should not be
- // indexed.
- BSONObj obj = fromjson("{a: {b: [['term']]}}"); // directly nested arrays
- TermFrequencyMap m;
- spec.scoreDocument( obj, &m );
- ASSERT_EQUALS( 0U, m.size() );
- }
-
- // Multi-language test_1: test independent stemming per sub-document
- TEST( FTSSpec, NestedLanguages_PerArrayItemStemming ) {
- BSONObj indexSpec = BSON( "key" << BSON( "a.b.c" << "text" ) );
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- TermFrequencyMap tfm;
-
- BSONObj obj = fromjson(
- "{ a :"
- " { b :"
- " [ { c : \"walked\", language : \"english\" },"
- " { c : \"camminato\", language : \"italian\" },"
- " { c : \"ging\", language : \"german\" } ]"
- " }"
- " }" );
-
- spec.scoreDocument( obj, &tfm );
-
- set<string> hits;
- hits.insert("walk");
- hits.insert("cammin");
- hits.insert("ging");
-
- for (TermFrequencyMap::const_iterator i = tfm.begin(); i!=tfm.end(); ++i) {
- string term = i->first;
- ASSERT_EQUALS( 1U, hits.count( term ) );
- }
-
- }
-
- // Multi-language test_2: test nested stemming per sub-document
- TEST( FTSSpec, NestedLanguages_PerSubdocStemming ) {
- BSONObj indexSpec = BSON( "key" << BSON( "a.b.c" << "text" ) );
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- TermFrequencyMap tfm;
-
- BSONObj obj = fromjson(
- "{ language : \"english\","
- " a :"
- " { language : \"danish\","
- " b :"
- " [ { c : \"foredrag\" },"
- " { c : \"foredragsholder\" },"
- " { c : \"lector\" } ]"
- " }"
- "}" );
-
- spec.scoreDocument( obj, &tfm );
-
- set<string> hits;
- hits.insert("foredrag");
- hits.insert("foredragshold");
- hits.insert("lector");
-
- for (TermFrequencyMap::const_iterator i = tfm.begin(); i!=tfm.end(); ++i) {
- string term = i->first;
- ASSERT_EQUALS( 1U, hits.count( term ) );
- }
-
- }
-
- // Multi-language test_3: test nested arrays
- TEST( FTSSpec, NestedLanguages_NestedArrays ) {
- BSONObj indexSpec = BSON( "key" << BSON( "a.b.c" << "text" ) );
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- TermFrequencyMap tfm;
-
- BSONObj obj = fromjson(
- "{ language : \"english\","
- " a : ["
- " { language : \"danish\","
- " b :"
- " [ { c : [\"foredrag\"] },"
- " { c : [\"foredragsholder\"] },"
- " { c : [\"lector\"] } ]"
- " } ]"
- "}" );
-
- spec.scoreDocument( obj, &tfm );
-
- set<string> hits;
- hits.insert("foredrag");
- hits.insert("foredragshold");
- hits.insert("lector");
-
- for (TermFrequencyMap::const_iterator i = tfm.begin(); i!=tfm.end(); ++i) {
- string term = i->first;
- ASSERT_EQUALS( 1U, hits.count( term ) );
- }
-
- }
-
- // Multi-language test_4: test pruning
- TEST( FTSSpec, NestedLanguages_PathPruning ) {
- BSONObj indexSpec = BSON( "key" << BSON( "a.b.c" << "text" ) );
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- TermFrequencyMap tfm;
-
- BSONObj obj = fromjson(
- "{ language : \"english\","
- " a : "
- " { language : \"danish\","
- " bc : \"foo\","
- " b : { d: \"bar\" },"
- " b :"
- " [ { c : \"foredrag\" },"
- " { c : \"foredragsholder\" },"
- " { c : \"lector\" } ]"
- " }"
- "}" );
-
- spec.scoreDocument( obj, &tfm );
-
- set<string> hits;
- hits.insert("foredrag");
- hits.insert("foredragshold");
- hits.insert("lector");
-
- for (TermFrequencyMap::const_iterator i = tfm.begin(); i!=tfm.end(); ++i) {
- string term = i->first;
- ASSERT_EQUALS( 1U, hits.count( term ) );
- }
-
- }
-
- // Multi-language test_5: test wildcard spec
- TEST( FTSSpec, NestedLanguages_Wildcard ) {
- BSONObj indexSpec = BSON( "key" << BSON( "$**" << "text" ) );
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- TermFrequencyMap tfm;
-
- BSONObj obj = fromjson(
- "{ language : \"english\","
- " b : \"walking\","
- " c : { e: \"walked\" },"
- " d : "
- " { language : \"danish\","
- " e :"
- " [ { f : \"foredrag\" },"
- " { f : \"foredragsholder\" },"
- " { f : \"lector\" } ]"
- " }"
- "}" );
-
- spec.scoreDocument( obj, &tfm );
-
- set<string> hits;
- hits.insert("foredrag");
- hits.insert("foredragshold");
- hits.insert("lector");
- hits.insert("walk");
-
- for (TermFrequencyMap::const_iterator i = tfm.begin(); i!=tfm.end(); ++i) {
- string term = i->first;
- ASSERT_EQUALS( 1U, hits.count( term ) );
- }
-
- }
-
- // Multi-language test_6: test wildcard spec with override
- TEST( FTSSpec, NestedLanguages_WildcardOverride ) {
- BSONObj indexSpec = BSON( "key" << BSON( "$**" << "text" ) <<
- "weights" << BSON( "d.e.f" << 20 ) );
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- TermFrequencyMap tfm;
-
- BSONObj obj = fromjson(
- "{ language : \"english\","
- " b : \"walking\","
- " c : { e: \"walked\" },"
- " d : "
- " { language : \"danish\","
- " e :"
- " [ { f : \"foredrag\" },"
- " { f : \"foredragsholder\" },"
- " { f : \"lector\" } ]"
- " }"
- "}" );
-
- spec.scoreDocument( obj, &tfm );
-
- set<string> hits;
- hits.insert("foredrag");
- hits.insert("foredragshold");
- hits.insert("lector");
- hits.insert("walk");
-
- for (TermFrequencyMap::const_iterator i = tfm.begin(); i!=tfm.end(); ++i) {
- string term = i->first;
- ASSERT_EQUALS( 1U, hits.count( term ) );
- }
-
- }
-
- /** Test differences across textIndexVersion values in handling of nested arrays. */
- TEST( FTSSpec, TextIndexLegacyNestedArrays ) {
- BSONObj obj = fromjson( "{a: [{b: ['hello']}]}" );
-
- // textIndexVersion=1 FTSSpec objects do not index nested arrays.
- {
- BSONObj indexSpec = fromjson( "{key: {'a.b': 'text'}, textIndexVersion: 1}" );
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- TermFrequencyMap tfm;
- spec.scoreDocument( obj, &tfm );
- ASSERT_EQUALS( tfm.size(), 0U );
- }
-
- // textIndexVersion=2 FTSSpec objects do index nested arrays.
- {
- BSONObj indexSpec = fromjson( "{key: {'a.b': 'text'}, textIndexVersion: 2}" );
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- TermFrequencyMap tfm;
- spec.scoreDocument( obj, &tfm );
- ASSERT_EQUALS( tfm.size(), 1U );
- }
- }
-
- /** Test differences across textIndexVersion values in handling of language annotations. */
- TEST( FTSSpec, TextIndexLegacyLanguageRecognition) {
- BSONObj obj = fromjson( "{a: 'the', language: 'EN'}" );
-
- // textIndexVersion=1 FTSSpec objects treat two-letter language annotations as "none"
- // for purposes of stopword processing.
- {
- BSONObj indexSpec = fromjson( "{key: {'a': 'text'}, textIndexVersion: 1}" );
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- TermFrequencyMap tfm;
- spec.scoreDocument( obj, &tfm );
- ASSERT_EQUALS( tfm.size(), 1U ); // "the" not recognized as stopword
- }
-
- // textIndexVersion=2 FTSSpec objects recognize two-letter codes.
- {
- BSONObj indexSpec = fromjson( "{key: {'a': 'text'}, textIndexVersion: 2}" );
- FTSSpec spec( FTSSpec::fixSpec( indexSpec ) );
- TermFrequencyMap tfm;
- spec.scoreDocument( obj, &tfm );
- ASSERT_EQUALS( tfm.size(), 0U ); // "the" recognized as stopword
- }
- }
+using std::set;
+using std::string;
+namespace fts {
+
+/**
+ * Assert that fixSpec() accepts the provided text index spec.
+ */
+void assertFixSuccess(const std::string& s) {
+ BSONObj user = fromjson(s);
+
+ try {
+ // fixSpec() should not throw on a valid spec.
+ BSONObj fixed = FTSSpec::fixSpec(user);
+
+ // fixSpec() on an already-fixed spec shouldn't change it.
+ BSONObj fixed2 = FTSSpec::fixSpec(fixed);
+ ASSERT_EQUALS(fixed, fixed2);
+ } catch (UserException&) {
+ ASSERT(false);
+ }
+}
+
+/**
+ * Assert that fixSpec() rejects the provided text index spec.
+ */
+void assertFixFailure(const std::string& s) {
+ BSONObj user = fromjson(s);
+
+ try {
+ // fixSpec() on an invalid spec should uassert.
+ BSONObj fixed = FTSSpec::fixSpec(user);
+ } catch (UserException&) {
+ return;
+ }
+ ASSERT(false);
+}
+
+TEST(FTSSpec, FixNormalKey1) {
+ assertFixSuccess("{key: {a: 'text'}}");
+ assertFixSuccess("{key: {a: 'text', b: 'text'}}");
+ assertFixSuccess("{key: {a: 'text', b: 'text', c: 'text'}}");
+
+ assertFixFailure("{key: {_fts: 'text'}}"); // not allowed to index reserved field
+ assertFixFailure("{key: {_ftsx: 'text'}}");
+}
+
+TEST(FTSSpec, FixCompoundKey1) {
+ assertFixSuccess("{key: {a: 'text', b: 1.0}}");
+ assertFixSuccess("{key: {a: 'text', b: NumberInt(1)}}");
+ assertFixSuccess("{key: {a: 'text', b: NumberLong(1)}}");
+ assertFixSuccess("{key: {a: 'text', b: -1.0}}");
+ assertFixSuccess("{key: {a: 'text', b: NumberInt(-1)}}");
+ assertFixSuccess("{key: {a: 'text', b: NumberLong(-1)}}");
+ assertFixSuccess("{key: {a: 1.0, b: 'text'}}");
+ assertFixSuccess("{key: {a: NumberInt(1), b: 'text'}}");
+ assertFixSuccess("{key: {a: NumberLong(1), b: 'text'}}");
+ assertFixSuccess("{key: {a: -1, b: 'text'}}");
+ assertFixSuccess("{key: {a: 1, b: 1, c: 'text'}}");
+ assertFixSuccess("{key: {a: 1, b: -1, c: 'text'}}");
+ assertFixSuccess("{key: {a: -1, b: 1, c: 'text'}}");
+ assertFixSuccess("{key: {a: 1, b: 'text', c: 1}}");
+ assertFixSuccess("{key: {a: 'text', b: 1, c: 1}}");
+ assertFixSuccess("{key: {a: 'text', b: 1, c: -1}}");
+ assertFixSuccess("{key: {a: 'text', b: 'text', c: 1}}");
+ assertFixSuccess("{key: {a: 1, b: 'text', c: 'text'}}");
+
+ assertFixFailure("{key: {a: 'text', b: 0}}");
+ assertFixFailure("{key: {a: 'text', b: '2d'}}"); // not allowed to mix special indexes
+ assertFixFailure("{key: {a: 'text', b: '1'}}");
+ assertFixFailure("{key: {a: 'text', _fts: 1}}");
+ assertFixFailure("{key: {a: 'text', _fts: 'text'}}");
+ assertFixFailure("{key: {a: 'text', _ftsx: 1}}");
+ assertFixFailure("{key: {a: 'text', _ftsx: 'text'}}");
+ assertFixFailure("{key: {_fts: 1, a: 'text'}}");
+ assertFixFailure("{key: {_fts: 'text', a: 'text'}}");
+ assertFixFailure("{key: {_ftsx: 1, a: 'text'}}");
+ assertFixFailure("{key: {_ftsx: 'text', a: 'text'}}");
+ assertFixFailure("{key: {a: 'text', b: 1, c: 'text'}}"); // 'text' must all be adjacent
+ assertFixFailure("{key: {a: 'text', b: 1, c: 'text', d: 1}}");
+ assertFixFailure("{key: {a: 1, b: 'text', c: 1, d: 'text', e: 1}}");
+}
+
+TEST(FTSSpec, FixDefaultLanguage1) {
+ assertFixSuccess("{key: {a: 'text'}, default_language: 'english'}");
+ assertFixSuccess("{key: {a: 'text'}, default_language: 'engLISH'}");
+ assertFixSuccess("{key: {a: 'text'}, default_language: 'en'}");
+ assertFixSuccess("{key: {a: 'text'}, default_language: 'eN'}");
+ assertFixSuccess("{key: {a: 'text'}, default_language: 'spanish'}");
+ assertFixSuccess("{key: {a: 'text'}, default_language: 'none'}");
+
+ assertFixFailure("{key: {a: 'text'}, default_language: 'engrish'}");
+ assertFixFailure("{key: {a: 'text'}, default_language: ' english'}");
+ assertFixFailure("{key: {a: 'text'}, default_language: ''}");
+}
+
+TEST(FTSSpec, FixWeights1) {
+ assertFixSuccess("{key: {a: 'text'}, weights: {}}");
+ assertFixSuccess("{key: {a: 'text'}, weights: {a: 1.0}}");
+ assertFixSuccess("{key: {a: 'text'}, weights: {a: NumberInt(1)}}");
+ assertFixSuccess("{key: {a: 'text'}, weights: {a: NumberLong(1)}}");
+ assertFixSuccess("{key: {a: 'text'}, weights: {a: 99999}}");
+ assertFixSuccess("{key: {'$**': 'text'}, weights: {'a.b': 2}}");
+ assertFixSuccess("{key: {'$**': 'text'}, weights: {a: 2, b: 2}}");
+ assertFixSuccess("{key: {'$**': 'text'}, weights: {'$**': 2}}");
+
+ assertFixFailure("{key: {a: 'text'}, weights: 0}");
+ assertFixFailure("{key: {a: 'text'}, weights: []}");
+ assertFixFailure("{key: {a: 'text'}, weights: 'x'}");
+ assertFixFailure("{key: {a: 'text'}, weights: {a: 0}}");
+ assertFixFailure("{key: {a: 'text'}, weights: {a: -1}}");
+ assertFixFailure("{key: {a: 'text'}, weights: {a: 100000}}"); // above max weight
+ assertFixFailure("{key: {a: 'text'}, weights: {a: '1'}}");
+ assertFixFailure("{key: {a: 'text'}, weights: {'': 1}}"); // "invalid" path
+ assertFixFailure("{key: {a: 'text'}, weights: {'a.': 1}}");
+ assertFixFailure("{key: {a: 'text'}, weights: {'.a': 1}}");
+ assertFixFailure("{key: {a: 'text'}, weights: {'a..a': 1}}");
+ assertFixFailure("{key: {a: 'text'}, weights: {$a: 1}}");
+ assertFixFailure("{key: {a: 'text'}, weights: {'a.$a': 1}}");
+ assertFixFailure("{key: {a: 'text'}, weights: {'a.$**': 1}}");
+}
+
+TEST(FTSSpec, FixLanguageOverride1) {
+ assertFixSuccess("{key: {a: 'text'}, language_override: 'foo'}");
+ assertFixSuccess("{key: {a: 'text'}, language_override: 'foo$bar'}");
+
+ assertFixFailure("{key: {a: 'text'}, language_override: 'foo.bar'}"); // can't have '.'
+ assertFixFailure("{key: {a: 'text'}, language_override: ''}");
+ assertFixFailure("{key: {a: 'text'}, language_override: '$foo'}");
+}
+
+TEST(FTSSpec, FixTextIndexVersion1) {
+ assertFixSuccess("{key: {a: 'text'}, textIndexVersion: 1.0}}");
+ assertFixSuccess("{key: {a: 'text'}, textIndexVersion: NumberInt(1)}}");
+ assertFixSuccess("{key: {a: 'text'}, textIndexVersion: NumberLong(1)}}");
+ assertFixSuccess("{key: {a: 'text'}, textIndexVersion: 2.0}}");
+ assertFixSuccess("{key: {a: 'text'}, textIndexVersion: NumberInt(2)}}");
+ assertFixSuccess("{key: {a: 'text'}, textIndexVersion: NumberLong(2)}}");
+
+ assertFixFailure("{key: {a: 'text'}, textIndexVersion: 3}");
+ assertFixFailure("{key: {a: 'text'}, textIndexVersion: '2'}");
+ assertFixFailure("{key: {a: 'text'}, textIndexVersion: {}}");
+}
+
+TEST(FTSSpec, ScoreSingleField1) {
+ BSONObj user = BSON("key" << BSON("title"
+ << "text"
+ << "text"
+ << "text") << "weights" << BSON("title" << 10));
+
+ FTSSpec spec(FTSSpec::fixSpec(user));
+
+ TermFrequencyMap m;
+ spec.scoreDocument(BSON("title"
+ << "cat sat run"),
+ &m);
+ ASSERT_EQUALS(3U, m.size());
+ ASSERT_EQUALS(m["cat"], m["sat"]);
+ ASSERT_EQUALS(m["cat"], m["run"]);
+ ASSERT(m["cat"] > 0);
+}
+
+TEST(FTSSpec, ScoreMultipleField1) {
+ BSONObj user = BSON("key" << BSON("title"
+ << "text"
+ << "text"
+ << "text") << "weights" << BSON("title" << 10));
+
+ FTSSpec spec(FTSSpec::fixSpec(user));
+
+ TermFrequencyMap m;
+ spec.scoreDocument(BSON("title"
+ << "cat sat run"
+ << "text"
+ << "cat book"),
+ &m);
+
+ ASSERT_EQUALS(4U, m.size());
+ ASSERT_EQUALS(m["sat"], m["run"]);
+ ASSERT(m["sat"] > 0);
+
+ ASSERT(m["cat"] > m["sat"]);
+ ASSERT(m["cat"] > m["book"]);
+ ASSERT(m["book"] > 0);
+ ASSERT(m["book"] < m["sat"]);
+}
+
+TEST(FTSSpec, ScoreMultipleField2) {
+ // Test where one indexed field is a parent component of another indexed field.
+ BSONObj user = BSON("key" << BSON("a"
+ << "text"
+ << "a.b"
+ << "text"));
+
+ FTSSpec spec(FTSSpec::fixSpec(user));
+
+ TermFrequencyMap m;
+ spec.scoreDocument(BSON("a" << BSON("b"
+ << "term")),
+ &m);
+ ASSERT_EQUALS(1U, m.size());
+}
+
+TEST(FTSSpec, ScoreRepeatWord) {
+ BSONObj user = BSON("key" << BSON("title"
+ << "text"
+ << "text"
+ << "text") << "weights" << BSON("title" << 10));
+
+ FTSSpec spec(FTSSpec::fixSpec(user));
+
+ TermFrequencyMap m;
+ spec.scoreDocument(BSON("title"
+ << "cat sat sat run run run"),
+ &m);
+ ASSERT_EQUALS(3U, m.size());
+ ASSERT(m["cat"] > 0);
+ ASSERT(m["sat"] > m["cat"]);
+ ASSERT(m["run"] > m["sat"]);
+}
+
+TEST(FTSSpec, Extra1) {
+ BSONObj user = BSON("key" << BSON("data"
+ << "text"));
+ FTSSpec spec(FTSSpec::fixSpec(user));
+ ASSERT_EQUALS(0U, spec.numExtraBefore());
+ ASSERT_EQUALS(0U, spec.numExtraAfter());
+}
+
+TEST(FTSSpec, Extra2) {
+ BSONObj user = BSON("key" << BSON("data"
+ << "text"
+ << "x" << 1));
+ BSONObj fixed = FTSSpec::fixSpec(user);
+ FTSSpec spec(fixed);
+ ASSERT_EQUALS(0U, spec.numExtraBefore());
+ ASSERT_EQUALS(1U, spec.numExtraAfter());
+ ASSERT_EQUALS(StringData("x"), spec.extraAfter(0));
+
+ BSONObj fixed2 = FTSSpec::fixSpec(fixed);
+ ASSERT_EQUALS(fixed, fixed2);
+}
+
+TEST(FTSSpec, Extra3) {
+ BSONObj user = BSON("key" << BSON("x" << 1 << "data"
+ << "text"));
+ BSONObj fixed = FTSSpec::fixSpec(user);
+
+ ASSERT_EQUALS(BSON("x" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1),
+ fixed["key"].Obj());
+ ASSERT_EQUALS(BSON("data" << 1), fixed["weights"].Obj());
+
+ BSONObj fixed2 = FTSSpec::fixSpec(fixed);
+ ASSERT_EQUALS(fixed, fixed2);
+
+ FTSSpec spec(fixed);
+ ASSERT_EQUALS(1U, spec.numExtraBefore());
+ ASSERT_EQUALS(StringData("x"), spec.extraBefore(0));
+ ASSERT_EQUALS(0U, spec.numExtraAfter());
+
+ BSONObj prefix;
+
+ ASSERT(spec.getIndexPrefix(BSON("x" << 2), &prefix).isOK());
+ ASSERT_EQUALS(BSON("x" << 2), prefix);
+
+ ASSERT(spec.getIndexPrefix(BSON("x" << 3 << "y" << 4), &prefix).isOK());
+ ASSERT_EQUALS(BSON("x" << 3), prefix);
+
+ ASSERT(!spec.getIndexPrefix(BSON("x" << BSON("$gt" << 5)), &prefix).isOK());
+ ASSERT(!spec.getIndexPrefix(BSON("y" << 4), &prefix).isOK());
+ ASSERT(!spec.getIndexPrefix(BSONObj(), &prefix).isOK());
+}
+
+// Test for correct behavior when encountering nested arrays (both directly nested and
+// indirectly nested).
+
+TEST(FTSSpec, NestedArraysPos1) {
+ BSONObj user = BSON("key" << BSON("a.b"
+ << "text"));
+ FTSSpec spec(FTSSpec::fixSpec(user));
+
+ // The following document matches {"a.b": {$type: 2}}, so "term" should be indexed.
+ BSONObj obj = fromjson("{a: [{b: ['term']}]}"); // indirectly nested arrays
+ TermFrequencyMap m;
+ spec.scoreDocument(obj, &m);
+ ASSERT_EQUALS(1U, m.size());
+}
+
+TEST(FTSSpec, NestedArraysPos2) {
+ BSONObj user = BSON("key" << BSON("$**"
+ << "text"));
+ FTSSpec spec(FTSSpec::fixSpec(user));
+
+ // The wildcard spec implies a full recursive traversal, so "term" should be indexed.
+ BSONObj obj = fromjson("{a: {b: [['term']]}}"); // directly nested arrays
+ TermFrequencyMap m;
+ spec.scoreDocument(obj, &m);
+ ASSERT_EQUALS(1U, m.size());
+}
+
+TEST(FTSSpec, NestedArraysNeg1) {
+ BSONObj user = BSON("key" << BSON("a.b"
+ << "text"));
+ FTSSpec spec(FTSSpec::fixSpec(user));
+
+ // The following document does not match {"a.b": {$type: 2}}, so "term" should not be
+ // indexed.
+ BSONObj obj = fromjson("{a: {b: [['term']]}}"); // directly nested arrays
+ TermFrequencyMap m;
+ spec.scoreDocument(obj, &m);
+ ASSERT_EQUALS(0U, m.size());
+}
+
+// Multi-language test_1: test independent stemming per sub-document
+TEST(FTSSpec, NestedLanguages_PerArrayItemStemming) {
+ BSONObj indexSpec = BSON("key" << BSON("a.b.c"
+ << "text"));
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ TermFrequencyMap tfm;
+
+ BSONObj obj = fromjson(
+ "{ a :"
+ " { b :"
+ " [ { c : \"walked\", language : \"english\" },"
+ " { c : \"camminato\", language : \"italian\" },"
+ " { c : \"ging\", language : \"german\" } ]"
+ " }"
+ " }");
+
+ spec.scoreDocument(obj, &tfm);
+
+ set<string> hits;
+ hits.insert("walk");
+ hits.insert("cammin");
+ hits.insert("ging");
+
+ for (TermFrequencyMap::const_iterator i = tfm.begin(); i != tfm.end(); ++i) {
+ string term = i->first;
+ ASSERT_EQUALS(1U, hits.count(term));
+ }
+}
+
+// Multi-language test_2: test nested stemming per sub-document
+TEST(FTSSpec, NestedLanguages_PerSubdocStemming) {
+ BSONObj indexSpec = BSON("key" << BSON("a.b.c"
+ << "text"));
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ TermFrequencyMap tfm;
+
+ BSONObj obj = fromjson(
+ "{ language : \"english\","
+ " a :"
+ " { language : \"danish\","
+ " b :"
+ " [ { c : \"foredrag\" },"
+ " { c : \"foredragsholder\" },"
+ " { c : \"lector\" } ]"
+ " }"
+ "}");
+
+ spec.scoreDocument(obj, &tfm);
+
+ set<string> hits;
+ hits.insert("foredrag");
+ hits.insert("foredragshold");
+ hits.insert("lector");
+
+ for (TermFrequencyMap::const_iterator i = tfm.begin(); i != tfm.end(); ++i) {
+ string term = i->first;
+ ASSERT_EQUALS(1U, hits.count(term));
}
}
+
+// Multi-language test_3: test nested arrays
+TEST(FTSSpec, NestedLanguages_NestedArrays) {
+ BSONObj indexSpec = BSON("key" << BSON("a.b.c"
+ << "text"));
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ TermFrequencyMap tfm;
+
+ BSONObj obj = fromjson(
+ "{ language : \"english\","
+ " a : ["
+ " { language : \"danish\","
+ " b :"
+ " [ { c : [\"foredrag\"] },"
+ " { c : [\"foredragsholder\"] },"
+ " { c : [\"lector\"] } ]"
+ " } ]"
+ "}");
+
+ spec.scoreDocument(obj, &tfm);
+
+ set<string> hits;
+ hits.insert("foredrag");
+ hits.insert("foredragshold");
+ hits.insert("lector");
+
+ for (TermFrequencyMap::const_iterator i = tfm.begin(); i != tfm.end(); ++i) {
+ string term = i->first;
+ ASSERT_EQUALS(1U, hits.count(term));
+ }
+}
+
+// Multi-language test_4: test pruning
+TEST(FTSSpec, NestedLanguages_PathPruning) {
+ BSONObj indexSpec = BSON("key" << BSON("a.b.c"
+ << "text"));
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ TermFrequencyMap tfm;
+
+ BSONObj obj = fromjson(
+ "{ language : \"english\","
+ " a : "
+ " { language : \"danish\","
+ " bc : \"foo\","
+ " b : { d: \"bar\" },"
+ " b :"
+ " [ { c : \"foredrag\" },"
+ " { c : \"foredragsholder\" },"
+ " { c : \"lector\" } ]"
+ " }"
+ "}");
+
+ spec.scoreDocument(obj, &tfm);
+
+ set<string> hits;
+ hits.insert("foredrag");
+ hits.insert("foredragshold");
+ hits.insert("lector");
+
+ for (TermFrequencyMap::const_iterator i = tfm.begin(); i != tfm.end(); ++i) {
+ string term = i->first;
+ ASSERT_EQUALS(1U, hits.count(term));
+ }
+}
+
+// Multi-language test_5: test wildcard spec
+TEST(FTSSpec, NestedLanguages_Wildcard) {
+ BSONObj indexSpec = BSON("key" << BSON("$**"
+ << "text"));
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ TermFrequencyMap tfm;
+
+ BSONObj obj = fromjson(
+ "{ language : \"english\","
+ " b : \"walking\","
+ " c : { e: \"walked\" },"
+ " d : "
+ " { language : \"danish\","
+ " e :"
+ " [ { f : \"foredrag\" },"
+ " { f : \"foredragsholder\" },"
+ " { f : \"lector\" } ]"
+ " }"
+ "}");
+
+ spec.scoreDocument(obj, &tfm);
+
+ set<string> hits;
+ hits.insert("foredrag");
+ hits.insert("foredragshold");
+ hits.insert("lector");
+ hits.insert("walk");
+
+ for (TermFrequencyMap::const_iterator i = tfm.begin(); i != tfm.end(); ++i) {
+ string term = i->first;
+ ASSERT_EQUALS(1U, hits.count(term));
+ }
+}
+
+// Multi-language test_6: test wildcard spec with override
+TEST(FTSSpec, NestedLanguages_WildcardOverride) {
+ BSONObj indexSpec = BSON("key" << BSON("$**"
+ << "text") << "weights" << BSON("d.e.f" << 20));
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ TermFrequencyMap tfm;
+
+ BSONObj obj = fromjson(
+ "{ language : \"english\","
+ " b : \"walking\","
+ " c : { e: \"walked\" },"
+ " d : "
+ " { language : \"danish\","
+ " e :"
+ " [ { f : \"foredrag\" },"
+ " { f : \"foredragsholder\" },"
+ " { f : \"lector\" } ]"
+ " }"
+ "}");
+
+ spec.scoreDocument(obj, &tfm);
+
+ set<string> hits;
+ hits.insert("foredrag");
+ hits.insert("foredragshold");
+ hits.insert("lector");
+ hits.insert("walk");
+
+ for (TermFrequencyMap::const_iterator i = tfm.begin(); i != tfm.end(); ++i) {
+ string term = i->first;
+ ASSERT_EQUALS(1U, hits.count(term));
+ }
+}
+
+/** Test differences across textIndexVersion values in handling of nested arrays. */
+TEST(FTSSpec, TextIndexLegacyNestedArrays) {
+ BSONObj obj = fromjson("{a: [{b: ['hello']}]}");
+
+ // textIndexVersion=1 FTSSpec objects do not index nested arrays.
+ {
+ BSONObj indexSpec = fromjson("{key: {'a.b': 'text'}, textIndexVersion: 1}");
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ TermFrequencyMap tfm;
+ spec.scoreDocument(obj, &tfm);
+ ASSERT_EQUALS(tfm.size(), 0U);
+ }
+
+ // textIndexVersion=2 FTSSpec objects do index nested arrays.
+ {
+ BSONObj indexSpec = fromjson("{key: {'a.b': 'text'}, textIndexVersion: 2}");
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ TermFrequencyMap tfm;
+ spec.scoreDocument(obj, &tfm);
+ ASSERT_EQUALS(tfm.size(), 1U);
+ }
+}
+
+/** Test differences across textIndexVersion values in handling of language annotations. */
+TEST(FTSSpec, TextIndexLegacyLanguageRecognition) {
+ BSONObj obj = fromjson("{a: 'the', language: 'EN'}");
+
+ // textIndexVersion=1 FTSSpec objects treat two-letter language annotations as "none"
+ // for purposes of stopword processing.
+ {
+ BSONObj indexSpec = fromjson("{key: {'a': 'text'}, textIndexVersion: 1}");
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ TermFrequencyMap tfm;
+ spec.scoreDocument(obj, &tfm);
+ ASSERT_EQUALS(tfm.size(), 1U); // "the" not recognized as stopword
+ }
+
+ // textIndexVersion=2 FTSSpec objects recognize two-letter codes.
+ {
+ BSONObj indexSpec = fromjson("{key: {'a': 'text'}, textIndexVersion: 2}");
+ FTSSpec spec(FTSSpec::fixSpec(indexSpec));
+ TermFrequencyMap tfm;
+ spec.scoreDocument(obj, &tfm);
+ ASSERT_EQUALS(tfm.size(), 0U); // "the" recognized as stopword
+ }
+}
+}
+}
diff --git a/src/mongo/db/fts/fts_tokenizer.h b/src/mongo/db/fts/fts_tokenizer.h
index 2b345d89266..40cdbde2cb8 100644
--- a/src/mongo/db/fts/fts_tokenizer.h
+++ b/src/mongo/db/fts/fts_tokenizer.h
@@ -35,58 +35,58 @@
namespace mongo {
namespace fts {
- class FTSLanguage;
- class StopWords;
+class FTSLanguage;
+class StopWords;
+
+/**
+ * FTSTokenizer
+ * A iterator of "documents" where a document contains space delimited words.
+ * For each word returns a stem or lemma version of a word optimized for full text indexing.
+ * Supports various options to control how tokens are generated.
+ */
+class FTSTokenizer {
+public:
+ virtual ~FTSTokenizer() = default;
/**
- * FTSTokenizer
- * A iterator of "documents" where a document contains space delimited words.
- * For each word returns a stem or lemma version of a word optimized for full text indexing.
- * Supports various options to control how tokens are generated.
+ * Options for generating tokens
*/
- class FTSTokenizer {
- public:
- virtual ~FTSTokenizer() = default;
-
- /**
- * Options for generating tokens
- */
- enum Options {
- /**
- * Default means lower cased, and stop words are not filtered.
- */
- None = 0,
-
- /**
- * Do not lower case terms.
- */
- GenerateCaseSensitiveTokens = 1 << 0,
-
- /**
- * Filter out stop words from return tokens.
- */
- FilterStopWords = 1 << 1,
- };
-
+ enum Options {
/**
- * Process a new document, and discards any previous results.
- * May be called multiple times on an instance of an iterator.
+ * Default means lower cased, and stop words are not filtered.
*/
- virtual void reset(StringData document, Options options) = 0;
+ None = 0,
/**
- * Moves to the next token in the iterator.
- * Returns false when the iterator reaches end of the document.
+ * Do not lower case terms.
*/
- virtual bool moveNext() = 0;
+ GenerateCaseSensitiveTokens = 1 << 0,
/**
- * Returns stemmed form, normalized, and lowercased depending on the parameter
- * to the reset method.
- * Returned StringData is valid until next call to moveNext().
+ * Filter out stop words from return tokens.
*/
- virtual StringData get() const = 0;
+ FilterStopWords = 1 << 1,
};
-} // namespace fts
-} // namespace mongo
+ /**
+ * Process a new document, and discards any previous results.
+ * May be called multiple times on an instance of an iterator.
+ */
+ virtual void reset(StringData document, Options options) = 0;
+
+ /**
+ * Moves to the next token in the iterator.
+ * Returns false when the iterator reaches end of the document.
+ */
+ virtual bool moveNext() = 0;
+
+ /**
+ * Returns stemmed form, normalized, and lowercased depending on the parameter
+ * to the reset method.
+ * Returned StringData is valid until next call to moveNext().
+ */
+ virtual StringData get() const = 0;
+};
+
+} // namespace fts
+} // namespace mongo
diff --git a/src/mongo/db/fts/fts_util.cpp b/src/mongo/db/fts/fts_util.cpp
index f2bd4e50905..85420fc66ad 100644
--- a/src/mongo/db/fts/fts_util.cpp
+++ b/src/mongo/db/fts/fts_util.cpp
@@ -32,11 +32,9 @@
namespace mongo {
- namespace fts {
+namespace fts {
- const std::string INDEX_NAME = "text";
- const std::string WILDCARD = "$**";
-
- }
+const std::string INDEX_NAME = "text";
+const std::string WILDCARD = "$**";
+}
}
-
diff --git a/src/mongo/db/fts/fts_util.h b/src/mongo/db/fts/fts_util.h
index 7cde2bbe985..a1377162443 100644
--- a/src/mongo/db/fts/fts_util.h
+++ b/src/mongo/db/fts/fts_util.h
@@ -36,16 +36,14 @@
namespace mongo {
- namespace fts {
+namespace fts {
- extern const std::string WILDCARD;
- extern const std::string INDEX_NAME;
+extern const std::string WILDCARD;
+extern const std::string INDEX_NAME;
- enum TextIndexVersion {
- TEXT_INDEX_VERSION_1 = 1, // Legacy index format. Deprecated.
- TEXT_INDEX_VERSION_2 = 2 // Current index format.
- };
-
- }
+enum TextIndexVersion {
+ TEXT_INDEX_VERSION_1 = 1, // Legacy index format. Deprecated.
+ TEXT_INDEX_VERSION_2 = 2 // Current index format.
+};
+}
}
-
diff --git a/src/mongo/db/fts/stemmer.cpp b/src/mongo/db/fts/stemmer.cpp
index 9353fccf297..07d17c050eb 100644
--- a/src/mongo/db/fts/stemmer.cpp
+++ b/src/mongo/db/fts/stemmer.cpp
@@ -36,39 +36,36 @@
namespace mongo {
- namespace fts {
+namespace fts {
- using std::string;
+using std::string;
- Stemmer::Stemmer( const FTSLanguage* language ) {
- _stemmer = NULL;
- if ( language->str() != "none" )
- _stemmer = sb_stemmer_new(language->str().c_str(), "UTF_8");
- }
-
- Stemmer::~Stemmer() {
- if ( _stemmer ) {
- sb_stemmer_delete(_stemmer);
- _stemmer = NULL;
- }
- }
-
- string Stemmer::stem( StringData word ) const {
- if ( !_stemmer )
- return word.toString();
+Stemmer::Stemmer(const FTSLanguage* language) {
+ _stemmer = NULL;
+ if (language->str() != "none")
+ _stemmer = sb_stemmer_new(language->str().c_str(), "UTF_8");
+}
- const sb_symbol* sb_sym = sb_stemmer_stem( _stemmer,
- (const sb_symbol*)word.rawData(),
- word.size() );
+Stemmer::~Stemmer() {
+ if (_stemmer) {
+ sb_stemmer_delete(_stemmer);
+ _stemmer = NULL;
+ }
+}
- if ( sb_sym == NULL ) {
- // out of memory
- invariant( false );
- }
+string Stemmer::stem(StringData word) const {
+ if (!_stemmer)
+ return word.toString();
- return string( (const char*)(sb_sym), sb_stemmer_length( _stemmer ) );
- }
+ const sb_symbol* sb_sym =
+ sb_stemmer_stem(_stemmer, (const sb_symbol*)word.rawData(), word.size());
+ if (sb_sym == NULL) {
+ // out of memory
+ invariant(false);
}
+ return string((const char*)(sb_sym), sb_stemmer_length(_stemmer));
+}
+}
}
diff --git a/src/mongo/db/fts/stemmer.h b/src/mongo/db/fts/stemmer.h
index d6d76e64218..59261bfb6a0 100644
--- a/src/mongo/db/fts/stemmer.h
+++ b/src/mongo/db/fts/stemmer.h
@@ -39,23 +39,24 @@
namespace mongo {
- namespace fts {
-
- /**
- * maintains case
- * but works
- * running/Running -> run/Run
- */
- class Stemmer {
- MONGO_DISALLOW_COPYING( Stemmer );
- public:
- Stemmer( const FTSLanguage* language );
- ~Stemmer();
-
- std::string stem( StringData word ) const;
- private:
- struct sb_stemmer* _stemmer;
- };
- }
-}
+namespace fts {
+/**
+ * maintains case
+ * but works
+ * running/Running -> run/Run
+ */
+class Stemmer {
+ MONGO_DISALLOW_COPYING(Stemmer);
+
+public:
+ Stemmer(const FTSLanguage* language);
+ ~Stemmer();
+
+ std::string stem(StringData word) const;
+
+private:
+ struct sb_stemmer* _stemmer;
+};
+}
+}
diff --git a/src/mongo/db/fts/stemmer_test.cpp b/src/mongo/db/fts/stemmer_test.cpp
index bef556bf2ad..d40d25e8348 100644
--- a/src/mongo/db/fts/stemmer_test.cpp
+++ b/src/mongo/db/fts/stemmer_test.cpp
@@ -35,19 +35,18 @@
#include "mongo/db/fts/stemmer.h"
namespace mongo {
- namespace fts {
+namespace fts {
- TEST( English, Stemmer1 ) {
- Stemmer s( &languageEnglishV2 );
- ASSERT_EQUALS( "run", s.stem( "running" ) );
- ASSERT_EQUALS( "Run", s.stem( "Running" ) );
- }
-
- TEST( English, Caps ) {
- Stemmer s( &languagePorterV1 );
- ASSERT_EQUALS( "unit", s.stem( "united" ) );
- ASSERT_EQUALS( "Unite", s.stem( "United" ) );
- }
+TEST(English, Stemmer1) {
+ Stemmer s(&languageEnglishV2);
+ ASSERT_EQUALS("run", s.stem("running"));
+ ASSERT_EQUALS("Run", s.stem("Running"));
+}
- }
+TEST(English, Caps) {
+ Stemmer s(&languagePorterV1);
+ ASSERT_EQUALS("unit", s.stem("united"));
+ ASSERT_EQUALS("Unite", s.stem("United"));
+}
+}
}
diff --git a/src/mongo/db/fts/stop_words.cpp b/src/mongo/db/fts/stop_words.cpp
index 421bfae63db..0a44eaf25ff 100644
--- a/src/mongo/db/fts/stop_words.cpp
+++ b/src/mongo/db/fts/stop_words.cpp
@@ -38,43 +38,38 @@
namespace mongo {
- namespace fts {
+namespace fts {
- void loadStopWordMap( StringMap< std::set< std::string > >* m );
-
- namespace {
- StringMap< std::shared_ptr<StopWords> > StopWordsMap;
- StopWords empty;
- }
+void loadStopWordMap(StringMap<std::set<std::string>>* m);
+namespace {
+StringMap<std::shared_ptr<StopWords>> StopWordsMap;
+StopWords empty;
+}
- StopWords::StopWords(){
- }
- StopWords::StopWords( const std::set<std::string>& words ) {
- for ( std::set<std::string>::const_iterator i = words.begin(); i != words.end(); ++i )
- _words.insert( *i );
- }
+StopWords::StopWords() {}
- const StopWords* StopWords::getStopWords( const FTSLanguage* language ) {
- auto i = StopWordsMap.find( language->str() );
- if ( i == StopWordsMap.end() )
- return &empty;
- return i->second.get();
- }
+StopWords::StopWords(const std::set<std::string>& words) {
+ for (std::set<std::string>::const_iterator i = words.begin(); i != words.end(); ++i)
+ _words.insert(*i);
+}
+const StopWords* StopWords::getStopWords(const FTSLanguage* language) {
+ auto i = StopWordsMap.find(language->str());
+ if (i == StopWordsMap.end())
+ return &empty;
+ return i->second.get();
+}
- MONGO_INITIALIZER(StopWords)(InitializerContext* context) {
- StringMap< std::set< std::string > > raw;
- loadStopWordMap( &raw );
- for ( StringMap< std::set< std::string > >::const_iterator i = raw.begin();
- i != raw.end();
- ++i ) {
- StopWordsMap[i->first].reset(new StopWords( i->second ));
- }
- return Status::OK();
- }
+MONGO_INITIALIZER(StopWords)(InitializerContext* context) {
+ StringMap<std::set<std::string>> raw;
+ loadStopWordMap(&raw);
+ for (StringMap<std::set<std::string>>::const_iterator i = raw.begin(); i != raw.end(); ++i) {
+ StopWordsMap[i->first].reset(new StopWords(i->second));
}
-
+ return Status::OK();
+}
+}
}
diff --git a/src/mongo/db/fts/stop_words.h b/src/mongo/db/fts/stop_words.h
index d989b4dcd32..eebc11c012a 100644
--- a/src/mongo/db/fts/stop_words.h
+++ b/src/mongo/db/fts/stop_words.h
@@ -39,25 +39,27 @@
namespace mongo {
- namespace fts {
+namespace fts {
- class StopWords {
- MONGO_DISALLOW_COPYING( StopWords );
- public:
- StopWords();
- StopWords( const std::set<std::string>& words );
+class StopWords {
+ MONGO_DISALLOW_COPYING(StopWords);
- bool isStopWord( const std::string& word ) const {
- return _words.count( word ) > 0;
- }
+public:
+ StopWords();
+ StopWords(const std::set<std::string>& words);
- size_t numStopWords() const { return _words.size(); }
-
- static const StopWords* getStopWords( const FTSLanguage* language );
- private:
- unordered_set<std::string> _words;
- };
+ bool isStopWord(const std::string& word) const {
+ return _words.count(word) > 0;
+ }
+ size_t numStopWords() const {
+ return _words.size();
}
-}
+ static const StopWords* getStopWords(const FTSLanguage* language);
+
+private:
+ unordered_set<std::string> _words;
+};
+}
+}
diff --git a/src/mongo/db/fts/stop_words_test.cpp b/src/mongo/db/fts/stop_words_test.cpp
index 248c4d93407..5834503dd4a 100644
--- a/src/mongo/db/fts/stop_words_test.cpp
+++ b/src/mongo/db/fts/stop_words_test.cpp
@@ -33,13 +33,12 @@
#include "mongo/unittest/unittest.h"
namespace mongo {
- namespace fts {
+namespace fts {
- TEST( English, Basic1 ) {
- const StopWords* englishStopWords = StopWords::getStopWords( &languageEnglishV2 );
- ASSERT( englishStopWords->isStopWord( "the" ) );
- ASSERT( !englishStopWords->isStopWord( "computer" ) );
- }
-
- }
+TEST(English, Basic1) {
+ const StopWords* englishStopWords = StopWords::getStopWords(&languageEnglishV2);
+ ASSERT(englishStopWords->isStopWord("the"));
+ ASSERT(!englishStopWords->isStopWord("computer"));
+}
+}
}
diff --git a/src/mongo/db/fts/tokenizer.cpp b/src/mongo/db/fts/tokenizer.cpp
index 01037a85c8d..e1f595b9a4a 100644
--- a/src/mongo/db/fts/tokenizer.cpp
+++ b/src/mongo/db/fts/tokenizer.cpp
@@ -36,105 +36,103 @@
namespace mongo {
- namespace fts {
-
- Tokenizer::Tokenizer(const FTSLanguage* language, StringData str)
- : _pos(0), _raw( str ) {
- _english = ( language->str() == "english" );
- _skipWhitespace();
- }
-
- bool Tokenizer::more() const {
- return _pos < _raw.size();
- }
-
- Token Tokenizer::next() {
- if ( _pos >= _raw.size() )
- return Token(Token::INVALID, "", 0);
-
- unsigned start = _pos++;
- Token::Type type = _type( _raw[start] );
- if ( type == Token::WHITESPACE ) invariant( false );
-
- if ( type == Token::TEXT )
- while ( _pos < _raw.size() && _type( _raw[_pos] ) == type )
- _pos++;
-
- StringData ret = _raw.substr( start, _pos - start );
- _skipWhitespace();
- return Token( type, ret, start );
- }
-
-
- bool Tokenizer::_skipWhitespace() {
- unsigned start = _pos;
- while ( _pos < _raw.size() && _type( _raw[_pos] ) == Token::WHITESPACE )
- _pos++;
- return _pos > start;
- }
-
-
- Token::Type Tokenizer::_type( char c ) const {
- switch ( c ) {
- case ' ':
- case '\f':
- case '\v':
- case '\t':
- case '\r':
- case '\n':
- return Token::WHITESPACE;
- case '\'':
- if ( _english )
- return Token::TEXT;
- else
- return Token::WHITESPACE;
-
- case '~':
- case '`':
-
- case '!':
- case '@':
- case '#':
- case '$':
- case '%':
- case '^':
- case '&':
- case '*':
- case '(':
- case ')':
-
- case '-':
-
- case '=':
- case '+':
-
- case '[':
- case ']':
- case '{':
- case '}':
- case '|':
- case '\\':
-
- case ';':
- case ':':
-
- case '"':
-
- case '<':
- case '>':
-
- case ',':
- case '.':
-
- case '/':
- case '?':
-
- return Token::DELIMITER;
- default:
+namespace fts {
+
+Tokenizer::Tokenizer(const FTSLanguage* language, StringData str) : _pos(0), _raw(str) {
+ _english = (language->str() == "english");
+ _skipWhitespace();
+}
+
+bool Tokenizer::more() const {
+ return _pos < _raw.size();
+}
+
+Token Tokenizer::next() {
+ if (_pos >= _raw.size())
+ return Token(Token::INVALID, "", 0);
+
+ unsigned start = _pos++;
+ Token::Type type = _type(_raw[start]);
+ if (type == Token::WHITESPACE)
+ invariant(false);
+
+ if (type == Token::TEXT)
+ while (_pos < _raw.size() && _type(_raw[_pos]) == type)
+ _pos++;
+
+ StringData ret = _raw.substr(start, _pos - start);
+ _skipWhitespace();
+ return Token(type, ret, start);
+}
+
+
+bool Tokenizer::_skipWhitespace() {
+ unsigned start = _pos;
+ while (_pos < _raw.size() && _type(_raw[_pos]) == Token::WHITESPACE)
+ _pos++;
+ return _pos > start;
+}
+
+
+Token::Type Tokenizer::_type(char c) const {
+ switch (c) {
+ case ' ':
+ case '\f':
+ case '\v':
+ case '\t':
+ case '\r':
+ case '\n':
+ return Token::WHITESPACE;
+ case '\'':
+ if (_english)
return Token::TEXT;
- }
- }
+ else
+ return Token::WHITESPACE;
- }
+ case '~':
+ case '`':
+
+ case '!':
+ case '@':
+ case '#':
+ case '$':
+ case '%':
+ case '^':
+ case '&':
+ case '*':
+ case '(':
+ case ')':
+
+ case '-':
+
+ case '=':
+ case '+':
+
+ case '[':
+ case ']':
+ case '{':
+ case '}':
+ case '|':
+ case '\\':
+
+ case ';':
+ case ':':
+ case '"':
+
+ case '<':
+ case '>':
+
+ case ',':
+ case '.':
+
+ case '/':
+ case '?':
+
+ return Token::DELIMITER;
+ default:
+ return Token::TEXT;
+ }
+}
+}
}
diff --git a/src/mongo/db/fts/tokenizer.h b/src/mongo/db/fts/tokenizer.h
index 503816cc434..f1184a455f2 100644
--- a/src/mongo/db/fts/tokenizer.h
+++ b/src/mongo/db/fts/tokenizer.h
@@ -38,41 +38,37 @@
namespace mongo {
- namespace fts {
+namespace fts {
- struct Token {
- enum Type { WHITESPACE, DELIMITER, TEXT, INVALID };
- Token( Type type, StringData data, unsigned offset)
- : type( type ),
- data( data ),
- offset( offset )
- {}
+struct Token {
+ enum Type { WHITESPACE, DELIMITER, TEXT, INVALID };
+ Token(Type type, StringData data, unsigned offset) : type(type), data(data), offset(offset) {}
- bool ok() const { return type != INVALID; }
-
- Type type;
- StringData data;
- unsigned offset;
- };
+ bool ok() const {
+ return type != INVALID;
+ }
- class Tokenizer {
- MONGO_DISALLOW_COPYING( Tokenizer );
- public:
+ Type type;
+ StringData data;
+ unsigned offset;
+};
- Tokenizer( const FTSLanguage* language, StringData str);
+class Tokenizer {
+ MONGO_DISALLOW_COPYING(Tokenizer);
- bool more() const;
- Token next();
+public:
+ Tokenizer(const FTSLanguage* language, StringData str);
- private:
- Token::Type _type( char c ) const;
- bool _skipWhitespace();
+ bool more() const;
+ Token next();
- unsigned _pos;
- const StringData _raw;
- bool _english;
- };
+private:
+ Token::Type _type(char c) const;
+ bool _skipWhitespace();
- }
+ unsigned _pos;
+ const StringData _raw;
+ bool _english;
+};
+}
}
-
diff --git a/src/mongo/db/fts/tokenizer_test.cpp b/src/mongo/db/fts/tokenizer_test.cpp
index d370c9f6c0b..143e3b372ce 100644
--- a/src/mongo/db/fts/tokenizer_test.cpp
+++ b/src/mongo/db/fts/tokenizer_test.cpp
@@ -33,91 +33,88 @@
#include "mongo/unittest/unittest.h"
namespace mongo {
- namespace fts {
+namespace fts {
- TEST( Tokenizer, Empty1 ) {
- Tokenizer i( &languageEnglishV2, "" );
- ASSERT( !i.more() );
- }
-
- TEST( Tokenizer, Basic1 ) {
- Tokenizer i( &languageEnglishV2, "blue red green" );
+TEST(Tokenizer, Empty1) {
+ Tokenizer i(&languageEnglishV2, "");
+ ASSERT(!i.more());
+}
- ASSERT( i.more() );
- ASSERT_EQUALS( i.next().data.toString(), "blue" );
+TEST(Tokenizer, Basic1) {
+ Tokenizer i(&languageEnglishV2, "blue red green");
- ASSERT( i.more() );
- ASSERT_EQUALS( i.next().data.toString(), "red" );
+ ASSERT(i.more());
+ ASSERT_EQUALS(i.next().data.toString(), "blue");
- ASSERT( i.more() );
- ASSERT_EQUALS( i.next().data.toString(), "green" );
+ ASSERT(i.more());
+ ASSERT_EQUALS(i.next().data.toString(), "red");
- ASSERT( !i.more() );
- }
+ ASSERT(i.more());
+ ASSERT_EQUALS(i.next().data.toString(), "green");
- TEST( Tokenizer, Basic2 ) {
- Tokenizer i( &languageEnglishV2, "blue-red" );
+ ASSERT(!i.more());
+}
- Token a = i.next();
- Token b = i.next();
- Token c = i.next();
- Token d = i.next();
+TEST(Tokenizer, Basic2) {
+ Tokenizer i(&languageEnglishV2, "blue-red");
- ASSERT_EQUALS( Token::TEXT, a.type );
- ASSERT_EQUALS( Token::DELIMITER, b.type );
- ASSERT_EQUALS( Token::TEXT, c.type );
- ASSERT_EQUALS( Token::INVALID, d.type );
+ Token a = i.next();
+ Token b = i.next();
+ Token c = i.next();
+ Token d = i.next();
- ASSERT_EQUALS( "blue", a.data.toString() );
- ASSERT_EQUALS( "-", b.data.toString() );
- ASSERT_EQUALS( "red", c.data.toString() );
- }
+ ASSERT_EQUALS(Token::TEXT, a.type);
+ ASSERT_EQUALS(Token::DELIMITER, b.type);
+ ASSERT_EQUALS(Token::TEXT, c.type);
+ ASSERT_EQUALS(Token::INVALID, d.type);
- TEST( Tokenizer, Basic3 ) {
- Tokenizer i( &languageEnglishV2, "blue -red" );
+ ASSERT_EQUALS("blue", a.data.toString());
+ ASSERT_EQUALS("-", b.data.toString());
+ ASSERT_EQUALS("red", c.data.toString());
+}
- Token a = i.next();
- Token b = i.next();
- Token c = i.next();
- Token d = i.next();
+TEST(Tokenizer, Basic3) {
+ Tokenizer i(&languageEnglishV2, "blue -red");
- ASSERT_EQUALS( Token::TEXT, a.type );
- ASSERT_EQUALS( Token::DELIMITER, b.type );
- ASSERT_EQUALS( Token::TEXT, c.type );
- ASSERT_EQUALS( Token::INVALID, d.type );
+ Token a = i.next();
+ Token b = i.next();
+ Token c = i.next();
+ Token d = i.next();
- ASSERT_EQUALS( "blue", a.data.toString() );
- ASSERT_EQUALS( "-", b.data.toString() );
- ASSERT_EQUALS( "red", c.data.toString() );
+ ASSERT_EQUALS(Token::TEXT, a.type);
+ ASSERT_EQUALS(Token::DELIMITER, b.type);
+ ASSERT_EQUALS(Token::TEXT, c.type);
+ ASSERT_EQUALS(Token::INVALID, d.type);
- ASSERT_EQUALS( 0U, a.offset );
- ASSERT_EQUALS( 5U, b.offset );
- ASSERT_EQUALS( 6U, c.offset );
- }
+ ASSERT_EQUALS("blue", a.data.toString());
+ ASSERT_EQUALS("-", b.data.toString());
+ ASSERT_EQUALS("red", c.data.toString());
- TEST( Tokenizer, Quote1English ) {
- Tokenizer i( &languageEnglishV2, "eliot's car" );
+ ASSERT_EQUALS(0U, a.offset);
+ ASSERT_EQUALS(5U, b.offset);
+ ASSERT_EQUALS(6U, c.offset);
+}
- Token a = i.next();
- Token b = i.next();
+TEST(Tokenizer, Quote1English) {
+ Tokenizer i(&languageEnglishV2, "eliot's car");
- ASSERT_EQUALS( "eliot's", a.data.toString() );
- ASSERT_EQUALS( "car", b.data.toString() );
- }
+ Token a = i.next();
+ Token b = i.next();
- TEST( Tokenizer, Quote1French ) {
- Tokenizer i( &languageFrenchV2, "eliot's car" );
+ ASSERT_EQUALS("eliot's", a.data.toString());
+ ASSERT_EQUALS("car", b.data.toString());
+}
- Token a = i.next();
- Token b = i.next();
- Token c = i.next();
+TEST(Tokenizer, Quote1French) {
+ Tokenizer i(&languageFrenchV2, "eliot's car");
- ASSERT_EQUALS( "eliot", a.data.toString() );
- ASSERT_EQUALS( "s", b.data.toString() );
- ASSERT_EQUALS( "car", c.data.toString() );
- }
+ Token a = i.next();
+ Token b = i.next();
+ Token c = i.next();
- }
+ ASSERT_EQUALS("eliot", a.data.toString());
+ ASSERT_EQUALS("s", b.data.toString());
+ ASSERT_EQUALS("car", c.data.toString());
+}
+}
}
-
-
diff --git a/src/mongo/db/geo/big_polygon.cpp b/src/mongo/db/geo/big_polygon.cpp
index fb496bfa96e..f50bdf1ae37 100644
--- a/src/mongo/db/geo/big_polygon.cpp
+++ b/src/mongo/db/geo/big_polygon.cpp
@@ -35,197 +35,191 @@
namespace mongo {
- using std::unique_ptr;
- using std::vector;
+using std::unique_ptr;
+using std::vector;
- BigSimplePolygon::BigSimplePolygon() {
- }
-
- // Caller should ensure loop is valid.
- BigSimplePolygon::BigSimplePolygon(S2Loop* loop) :
- _loop(loop), _isNormalized(loop->IsNormalized()) {
- }
+BigSimplePolygon::BigSimplePolygon() {}
- BigSimplePolygon::~BigSimplePolygon() {
- }
+// Caller should ensure loop is valid.
+BigSimplePolygon::BigSimplePolygon(S2Loop* loop)
+ : _loop(loop), _isNormalized(loop->IsNormalized()) {}
- void BigSimplePolygon::Init(S2Loop* loop) {
- _loop.reset(loop);
- _isNormalized = loop->IsNormalized();
- _borderLine.reset();
- _borderPoly.reset();
- }
+BigSimplePolygon::~BigSimplePolygon() {}
- double BigSimplePolygon::GetArea() const {
- return _loop->GetArea();
- }
+void BigSimplePolygon::Init(S2Loop* loop) {
+ _loop.reset(loop);
+ _isNormalized = loop->IsNormalized();
+ _borderLine.reset();
+ _borderPoly.reset();
+}
- bool BigSimplePolygon::Contains(const S2Polygon& polygon) const {
- const S2Polygon& polyBorder = GetPolygonBorder();
+double BigSimplePolygon::GetArea() const {
+ return _loop->GetArea();
+}
- if (_isNormalized) {
- // Polygon border is the same as the loop
- return polyBorder.Contains(&polygon);
- }
+bool BigSimplePolygon::Contains(const S2Polygon& polygon) const {
+ const S2Polygon& polyBorder = GetPolygonBorder();
- // Polygon border is the complement of the loop
- //
- // Return true iff big polygon's complement (polyBorder) doesn't intersect with polygon.
- // We don't guarantee whether the points on border are contained or not.
- return !polyBorder.Intersects(&polygon);
+ if (_isNormalized) {
+ // Polygon border is the same as the loop
+ return polyBorder.Contains(&polygon);
}
- bool BigSimplePolygon::Contains(const S2Polyline& line) const {
- //
- // A line is contained within a loop if the result of subtracting the loop from the line is
- // nothing.
- //
- // Also, a line is contained within a loop if the result of clipping the line to the
- // complement of the loop is nothing.
- //
- // If we can't subtract the loop itself using S2, we clip (intersect) to the inverse. Every
- // point in S2 is contained in exactly one of these loops.
- //
- // TODO: Polygon borders are actually kind of weird, and this is somewhat inconsistent with
- // Intersects(). A point might Intersect() a boundary exactly, but not be Contain()ed
- // within the Polygon. Think the right thing to do here is custom intersection functions.
- //
- const S2Polygon& polyBorder = GetPolygonBorder();
+ // Polygon border is the complement of the loop
+ //
+ // Return true iff big polygon's complement (polyBorder) doesn't intersect with polygon.
+ // We don't guarantee whether the points on border are contained or not.
+ return !polyBorder.Intersects(&polygon);
+}
- OwnedPointerVector<S2Polyline> clippedOwned;
- vector<S2Polyline*>& clipped = clippedOwned.mutableVector();
-
- if (_isNormalized) {
- // Polygon border is the same as the loop
- polyBorder.SubtractFromPolyline(&line, &clipped);
- return clipped.size() == 0;
- }
- else {
- // Polygon border is the complement of the loop
- polyBorder.IntersectWithPolyline(&line, &clipped);
- return clipped.size() == 0;
- }
+bool BigSimplePolygon::Contains(const S2Polyline& line) const {
+ //
+ // A line is contained within a loop if the result of subtracting the loop from the line is
+ // nothing.
+ //
+ // Also, a line is contained within a loop if the result of clipping the line to the
+ // complement of the loop is nothing.
+ //
+ // If we can't subtract the loop itself using S2, we clip (intersect) to the inverse. Every
+ // point in S2 is contained in exactly one of these loops.
+ //
+ // TODO: Polygon borders are actually kind of weird, and this is somewhat inconsistent with
+ // Intersects(). A point might Intersect() a boundary exactly, but not be Contain()ed
+ // within the Polygon. Think the right thing to do here is custom intersection functions.
+ //
+ const S2Polygon& polyBorder = GetPolygonBorder();
+
+ OwnedPointerVector<S2Polyline> clippedOwned;
+ vector<S2Polyline*>& clipped = clippedOwned.mutableVector();
+
+ if (_isNormalized) {
+ // Polygon border is the same as the loop
+ polyBorder.SubtractFromPolyline(&line, &clipped);
+ return clipped.size() == 0;
+ } else {
+ // Polygon border is the complement of the loop
+ polyBorder.IntersectWithPolyline(&line, &clipped);
+ return clipped.size() == 0;
}
+}
- bool BigSimplePolygon::Contains(S2Point const& point) const {
- return _loop->Contains(point);
- }
+bool BigSimplePolygon::Contains(S2Point const& point) const {
+ return _loop->Contains(point);
+}
- bool BigSimplePolygon::Intersects(const S2Polygon& polygon) const {
- // If the loop area is at most 2*Pi, treat it as a simple Polygon.
- if (_isNormalized) {
- const S2Polygon& polyBorder = GetPolygonBorder();
- return polyBorder.Intersects(&polygon);
- }
-
- // The loop area is greater than 2*Pi, so it intersects a polygon (even with holes) if it
- // intersects any of the top-level polygon loops, since any valid polygon is less than
- // a hemisphere.
- //
- // Intersecting a polygon hole requires that the loop must have intersected the containing
- // loop - topology ftw.
- //
- // Another approach is to check polyBorder doesn't contain polygon, but the following
- // approach is cheaper.
-
- // Iterate over all the top-level polygon loops
- for (int i = 0; i < polygon.num_loops(); i = polygon.GetLastDescendant(i) + 1) {
- const S2Loop* polyLoop = polygon.loop(i);
- if (_loop->Intersects(polyLoop))
- return true;
- }
-
- return false;
+bool BigSimplePolygon::Intersects(const S2Polygon& polygon) const {
+ // If the loop area is at most 2*Pi, treat it as a simple Polygon.
+ if (_isNormalized) {
+ const S2Polygon& polyBorder = GetPolygonBorder();
+ return polyBorder.Intersects(&polygon);
}
- bool BigSimplePolygon::Intersects(const S2Polyline& line) const {
- //
- // A loop intersects a line if line intersects the loop border or, if it doesn't, either
- // line is contained in the loop, or line is disjoint with the loop. So checking any
- // vertex of the line is sufficient.
- //
- // TODO: Make a general Polygon/Line relation tester which uses S2 primitives
- //
- return GetLineBorder().Intersects(&line) || _loop->Contains(line.vertex(0));
- }
+ // The loop area is greater than 2*Pi, so it intersects a polygon (even with holes) if it
+ // intersects any of the top-level polygon loops, since any valid polygon is less than
+ // a hemisphere.
+ //
+ // Intersecting a polygon hole requires that the loop must have intersected the containing
+ // loop - topology ftw.
+ //
+ // Another approach is to check polyBorder doesn't contain polygon, but the following
+ // approach is cheaper.
- bool BigSimplePolygon::Intersects(S2Point const& point) const {
- return Contains(point);
+ // Iterate over all the top-level polygon loops
+ for (int i = 0; i < polygon.num_loops(); i = polygon.GetLastDescendant(i) + 1) {
+ const S2Loop* polyLoop = polygon.loop(i);
+ if (_loop->Intersects(polyLoop))
+ return true;
}
- void BigSimplePolygon::Invert() {
- _loop->Invert();
- _isNormalized = _loop->IsNormalized();
- }
+ return false;
+}
- const S2Polygon& BigSimplePolygon::GetPolygonBorder() const {
- if (_borderPoly)
- return *_borderPoly;
+bool BigSimplePolygon::Intersects(const S2Polyline& line) const {
+ //
+ // A loop intersects a line if line intersects the loop border or, if it doesn't, either
+ // line is contained in the loop, or line is disjoint with the loop. So checking any
+ // vertex of the line is sufficient.
+ //
+ // TODO: Make a general Polygon/Line relation tester which uses S2 primitives
+ //
+ return GetLineBorder().Intersects(&line) || _loop->Contains(line.vertex(0));
+}
- unique_ptr<S2Loop> cloned(_loop->Clone());
+bool BigSimplePolygon::Intersects(S2Point const& point) const {
+ return Contains(point);
+}
- // Any loop in polygon should be than a hemisphere (2*Pi).
- cloned->Normalize();
+void BigSimplePolygon::Invert() {
+ _loop->Invert();
+ _isNormalized = _loop->IsNormalized();
+}
- OwnedPointerVector<S2Loop> loops;
- loops.mutableVector().push_back(cloned.release());
- _borderPoly.reset(new S2Polygon(&loops.mutableVector()));
+const S2Polygon& BigSimplePolygon::GetPolygonBorder() const {
+ if (_borderPoly)
return *_borderPoly;
- }
- const S2Polyline& BigSimplePolygon::GetLineBorder() const {
- if (_borderLine)
- return *_borderLine;
+ unique_ptr<S2Loop> cloned(_loop->Clone());
- vector<S2Point> points;
- int numVertices = _loop->num_vertices();
- for (int i = 0; i <= numVertices; ++i) {
- // vertex() maps "numVertices" to 0 internally, so we don't have to deal with
- // the index out of range.
- points.push_back(_loop->vertex(i));
- }
+ // Any loop in polygon should be than a hemisphere (2*Pi).
+ cloned->Normalize();
- _borderLine.reset(new S2Polyline(points));
+ OwnedPointerVector<S2Loop> loops;
+ loops.mutableVector().push_back(cloned.release());
+ _borderPoly.reset(new S2Polygon(&loops.mutableVector()));
+ return *_borderPoly;
+}
+const S2Polyline& BigSimplePolygon::GetLineBorder() const {
+ if (_borderLine)
return *_borderLine;
- }
- BigSimplePolygon* BigSimplePolygon::Clone() const {
- return new BigSimplePolygon(_loop->Clone());
+ vector<S2Point> points;
+ int numVertices = _loop->num_vertices();
+ for (int i = 0; i <= numVertices; ++i) {
+ // vertex() maps "numVertices" to 0 internally, so we don't have to deal with
+ // the index out of range.
+ points.push_back(_loop->vertex(i));
}
- S2Cap BigSimplePolygon::GetCapBound() const {
- return _loop->GetCapBound();
- }
+ _borderLine.reset(new S2Polyline(points));
- S2LatLngRect BigSimplePolygon::GetRectBound() const {
- return _loop->GetRectBound();
- }
+ return *_borderLine;
+}
- bool BigSimplePolygon::Contains(const S2Cell& cell) const {
- return _loop->Contains(cell);
- }
+BigSimplePolygon* BigSimplePolygon::Clone() const {
+ return new BigSimplePolygon(_loop->Clone());
+}
- bool BigSimplePolygon::MayIntersect(const S2Cell& cell) const {
- return _loop->MayIntersect(cell);
- }
+S2Cap BigSimplePolygon::GetCapBound() const {
+ return _loop->GetCapBound();
+}
- bool BigSimplePolygon::VirtualContainsPoint(const S2Point& p) const {
- return _loop->VirtualContainsPoint(p);
- }
+S2LatLngRect BigSimplePolygon::GetRectBound() const {
+ return _loop->GetRectBound();
+}
- void BigSimplePolygon::Encode(Encoder* const encoder) const {
- invariant(false);
- }
+bool BigSimplePolygon::Contains(const S2Cell& cell) const {
+ return _loop->Contains(cell);
+}
- bool BigSimplePolygon::Decode(Decoder* const decoder) {
- invariant(false);
- }
+bool BigSimplePolygon::MayIntersect(const S2Cell& cell) const {
+ return _loop->MayIntersect(cell);
+}
- bool BigSimplePolygon::DecodeWithinScope(Decoder* const decoder) {
- invariant(false);
- }
+bool BigSimplePolygon::VirtualContainsPoint(const S2Point& p) const {
+ return _loop->VirtualContainsPoint(p);
+}
+
+void BigSimplePolygon::Encode(Encoder* const encoder) const {
+ invariant(false);
+}
+bool BigSimplePolygon::Decode(Decoder* const decoder) {
+ invariant(false);
}
+bool BigSimplePolygon::DecodeWithinScope(Decoder* const decoder) {
+ invariant(false);
+}
+}
diff --git a/src/mongo/db/geo/big_polygon.h b/src/mongo/db/geo/big_polygon.h
index c5a913ac05b..9551ecb4b8f 100644
--- a/src/mongo/db/geo/big_polygon.h
+++ b/src/mongo/db/geo/big_polygon.h
@@ -40,82 +40,78 @@
namespace mongo {
- // Simple GeoJSON polygon with a custom CRS identifier as having a strict winding order.
- // The winding order will determine unambiguously the inside/outside of the polygon even
- // if larger than one hemisphere.
- //
- // BigSimplePolygon uses S2Loop internally, which follows a left-foot rule (inside to the
- // left when walking the edge of the polygon, counter-clockwise)
- class BigSimplePolygon : public S2Region {
- public:
-
- BigSimplePolygon();
-
- BigSimplePolygon(S2Loop* loop);
-
- virtual ~BigSimplePolygon();
-
- void Init(S2Loop* loop);
+// Simple GeoJSON polygon with a custom CRS identifier as having a strict winding order.
+// The winding order will determine unambiguously the inside/outside of the polygon even
+// if larger than one hemisphere.
+//
+// BigSimplePolygon uses S2Loop internally, which follows a left-foot rule (inside to the
+// left when walking the edge of the polygon, counter-clockwise)
+class BigSimplePolygon : public S2Region {
+public:
+ BigSimplePolygon();
- double GetArea() const;
+ BigSimplePolygon(S2Loop* loop);
- bool Contains(const S2Polygon& polygon) const;
+ virtual ~BigSimplePolygon();
- bool Contains(const S2Polyline& line) const;
+ void Init(S2Loop* loop);
- // Needs to be this way for S2 compatibility
- bool Contains(S2Point const& point) const;
+ double GetArea() const;
- bool Intersects(const S2Polygon& polygon) const;
+ bool Contains(const S2Polygon& polygon) const;
- bool Intersects(const S2Polyline& line) const;
+ bool Contains(const S2Polyline& line) const;
- bool Intersects(S2Point const& point) const;
+ // Needs to be this way for S2 compatibility
+ bool Contains(S2Point const& point) const;
- // Only used in tests
- void Invert();
+ bool Intersects(const S2Polygon& polygon) const;
- const S2Polygon& GetPolygonBorder() const;
+ bool Intersects(const S2Polyline& line) const;
- const S2Polyline& GetLineBorder() const;
+ bool Intersects(S2Point const& point) const;
- //
- // S2Region interface
- //
+ // Only used in tests
+ void Invert();
- BigSimplePolygon* Clone() const;
+ const S2Polygon& GetPolygonBorder() const;
- S2Cap GetCapBound() const;
+ const S2Polyline& GetLineBorder() const;
- S2LatLngRect GetRectBound() const;
+ //
+ // S2Region interface
+ //
- bool Contains(S2Cell const& cell) const;
+ BigSimplePolygon* Clone() const;
- bool MayIntersect(S2Cell const& cell) const;
+ S2Cap GetCapBound() const;
- bool VirtualContainsPoint(S2Point const& p) const;
+ S2LatLngRect GetRectBound() const;
- void Encode(Encoder* const encoder) const;
+ bool Contains(S2Cell const& cell) const;
- bool Decode(Decoder* const decoder);
+ bool MayIntersect(S2Cell const& cell) const;
- bool DecodeWithinScope(Decoder* const decoder);
+ bool VirtualContainsPoint(S2Point const& p) const;
- private:
+ void Encode(Encoder* const encoder) const;
- std::unique_ptr<S2Loop> _loop;
+ bool Decode(Decoder* const decoder);
- // Cache whether the loop area is at most 2*Pi (the area of hemisphere).
- //
- // S2 guarantees that any loop in a valid (normalized) polygon, no matter a hole
- // or a shell, has to be less than 2*Pi. So if the loop is normalized, it's the same
- // with the border polygon, otherwise, the border polygon is its complement.
- bool _isNormalized;
+ bool DecodeWithinScope(Decoder* const decoder);
- // Cached to do Intersects() and Contains() with S2Polylines.
- mutable std::unique_ptr<S2Polyline> _borderLine;
- mutable std::unique_ptr<S2Polygon> _borderPoly;
- };
+private:
+ std::unique_ptr<S2Loop> _loop;
+ // Cache whether the loop area is at most 2*Pi (the area of hemisphere).
+ //
+ // S2 guarantees that any loop in a valid (normalized) polygon, no matter a hole
+ // or a shell, has to be less than 2*Pi. So if the loop is normalized, it's the same
+ // with the border polygon, otherwise, the border polygon is its complement.
+ bool _isNormalized;
+
+ // Cached to do Intersects() and Contains() with S2Polylines.
+ mutable std::unique_ptr<S2Polyline> _borderLine;
+ mutable std::unique_ptr<S2Polygon> _borderPoly;
+};
}
-
diff --git a/src/mongo/db/geo/big_polygon_test.cpp b/src/mongo/db/geo/big_polygon_test.cpp
index c0c01abdba7..3ac82b03768 100644
--- a/src/mongo/db/geo/big_polygon_test.cpp
+++ b/src/mongo/db/geo/big_polygon_test.cpp
@@ -34,562 +34,534 @@
namespace {
- using namespace mongo;
- using std::unique_ptr;
- using std::string;
- using std::vector;
-
- // Helper to build a vector of S2Point
- struct PointBuilder {
-
- vector<S2Point> points;
-
- PointBuilder& operator<<(const S2LatLng& LatLng) {
- points.push_back(LatLng.ToPoint());
- return *this;
- }
- };
-
- vector<S2Point> pointVec(const PointBuilder& builder) {
- vector<S2Point> points(builder.points.begin(), builder.points.end());
- return points;
- }
-
- S2Loop* loop(const PointBuilder& builder) {
- return new S2Loop(builder.points);
- }
-
- vector<S2Loop*>* loopVec(const PointBuilder& builder) {
- static vector<S2Loop*> loops;
- loops.clear();
- loops.push_back(loop(builder));
- return &loops;
- }
-
- S2LatLng LatLng(double lat, double lng) {
- return S2LatLng::FromDegrees(lat, lng);
- }
-
- // Syntax sugar for PointBuilder, which can be used to construct
- // - vector<S2Point> pointVec()
- // - S2Loop* loop()
- // - vector<S2Loop*>* loopVec()
- //
- // e.g. points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0) << LatLng(0.0, 0.0))
- typedef PointBuilder points;
-
- TEST(BigSimplePolygon, Basic) {
-
- // A 20x20 square centered at [0,0]
- BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
-
- // A 10x10 square centered at [0,0]
- S2Polygon poly10(loopVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
-
- ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
- ASSERT_LESS_THAN(poly10.GetArea(), bigPoly20.GetArea());
- ASSERT(bigPoly20.Contains(poly10));
- ASSERT(bigPoly20.Intersects(poly10));
-
- // A 20x20 square centered at [0,20]
- BigSimplePolygon bigPoly20Offset(loop(points() << LatLng(10.0, 30.0) << LatLng(10.0, 10.0)
- << LatLng(-10.0, 10.0) << LatLng(-10.0, 30.0)));
-
- ASSERT_LESS_THAN(bigPoly20Offset.GetArea(), 2 * M_PI);
- ASSERT_LESS_THAN(poly10.GetArea(), bigPoly20Offset.GetArea());
- ASSERT_FALSE(bigPoly20Offset.Contains(poly10));
- ASSERT_FALSE(bigPoly20Offset.Intersects(poly10));
- }
-
- TEST(BigSimplePolygon, BasicWithHole) {
- // A 30x30 square centered at [0,0] with a 20X20 hole
- vector<S2Loop*> loops;
- loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
- loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
-
- S2Polygon holePoly(&loops);
-
- // A 16X16 square centered at [0,0]
- BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
-
- ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly16.Contains(holePoly));
- ASSERT_FALSE(bigPoly16.Intersects(holePoly));
-
- // A big polygon bigger than the hole.
- BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
- ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly24.Contains(holePoly));
- ASSERT_TRUE(bigPoly24.Intersects(holePoly));
- }
-
- TEST(BigSimplePolygon, BasicWithHoleAndShell) {
- // A 30x30 square centered at [0,0] with a 20X20 hole and 10X10 shell
- vector<S2Loop*> loops;
- // Border
- loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
- // Hole
- loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
- // Shell
- loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
- S2Polygon shellPoly(&loops);
-
- // A 16X16 square centered at [0,0] containing the shell
- BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
- ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly16.Contains(shellPoly));
- ASSERT_TRUE(bigPoly16.Intersects(shellPoly));
-
- // Try a big polygon bigger than the hole.
- BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
- ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly24.Contains(shellPoly));
- ASSERT_TRUE(bigPoly24.Intersects(shellPoly));
-
- // Try a big polygon smaller than the shell.
- BigSimplePolygon bigPoly8(loop(points() << LatLng(4.0, 4.0) << LatLng(4.0, -4.0)
- << LatLng(-4.0, -4.0) << LatLng(-4.0, 4.0)));
- ASSERT_LESS_THAN(bigPoly8.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly8.Contains(shellPoly));
- ASSERT_TRUE(bigPoly8.Intersects(shellPoly));
- }
-
- TEST(BigSimplePolygon, BasicComplement) {
-
- // Everything *not* in a 20x20 square centered at [0,0]
- BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
- bigPoly20Comp.Invert();
-
- // A 10x10 square centered at [0,0]
- S2Polygon poly10(loopVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
-
- ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly20Comp.Contains(poly10));
- ASSERT_FALSE(bigPoly20Comp.Intersects(poly10));
-
- // A 10x10 square centered at [0,20], contained by bigPoly20Comp
- S2Polygon poly10Contained(loopVec(points() << LatLng(25.0, 25.0) << LatLng(25.0, 15.0)
- << LatLng(15.0, 15.0) << LatLng(15.0, 25.0)));
-
- ASSERT_LESS_THAN(poly10Contained.GetArea(), bigPoly20Comp.GetArea());
- ASSERT(bigPoly20Comp.Contains(poly10Contained));
- ASSERT(bigPoly20Comp.Intersects(poly10Contained));
-
- // A 30x30 square centered at [0,0], so that bigPoly20Comp contains its complement entirely,
- // which is not allowed by S2.
- S2Polygon poly30(loopVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
- ASSERT_LESS_THAN(poly30.GetArea(), bigPoly20Comp.GetArea());
- ASSERT_FALSE(bigPoly20Comp.Contains(poly30));
- ASSERT_TRUE(bigPoly20Comp.Intersects(poly30));
- }
-
- TEST(BigSimplePolygon, BasicIntersects) {
-
- // Everything *not* in a 20x20 square centered at [0,0]
- BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
- bigPoly20.Invert();
-
- // A 10x10 square centered at [10,10] (partial overlap)
- S2Polygon poly10(loopVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, 5.0)
- << LatLng(5.0, 5.0) << LatLng(5.0, 15.0)));
-
- ASSERT_FALSE(bigPoly20.Contains(poly10));
- ASSERT(bigPoly20.Intersects(poly10));
+using namespace mongo;
+using std::unique_ptr;
+using std::string;
+using std::vector;
+
+// Helper to build a vector of S2Point
+struct PointBuilder {
+ vector<S2Point> points;
+
+ PointBuilder& operator<<(const S2LatLng& LatLng) {
+ points.push_back(LatLng.ToPoint());
+ return *this;
}
+};
- TEST(BigSimplePolygon, BasicComplementWithHole) {
- // A 30x30 square centered at [0,0] with a 20X20 hole
- vector<S2Loop*> loops;
- loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
- loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
-
- S2Polygon holePoly(&loops);
-
- // 1. BigPolygon doesn't touch holePoly
- // Everything *not* in a 40x40 square centered at [0,0]
- BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
- << LatLng(-20.0, -20.0)
- << LatLng(-20.0, 20.0)));
- bigPoly40Comp.Invert();
- ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly40Comp.Contains(holePoly));
- ASSERT_FALSE(bigPoly40Comp.Intersects(holePoly));
-
- // 2. BigPolygon intersects holePoly
- // Everything *not* in a 24X24 square centered at [0,0]
- BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
- bigPoly24Comp.Invert();
- ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly24Comp.Contains(holePoly));
- ASSERT_TRUE(bigPoly24Comp.Intersects(holePoly));
-
- // 3. BigPolygon contains holePoly
- // Everything *not* in a 16X16 square centered at [0,0]
- BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
- bigPoly16Comp.Invert();
- ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
- ASSERT_TRUE(bigPoly16Comp.Contains(holePoly));
- ASSERT_TRUE(bigPoly16Comp.Intersects(holePoly));
-
- // 4. BigPolygon contains the right half of holePoly
- // Everything *not* in a 40x40 square centered at [0,20]
- BigSimplePolygon bigPoly40CompOffset(loop(points() << LatLng(20.0, 40.0)
- << LatLng(20.0, 0.0)
- << LatLng(-20.0, 0.0)
- << LatLng(-20.0, 40.0)));
- bigPoly40CompOffset.Invert();
- ASSERT_GREATER_THAN(bigPoly40CompOffset.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly40CompOffset.Contains(holePoly));
- ASSERT_TRUE(bigPoly40CompOffset.Intersects(holePoly));
- }
+vector<S2Point> pointVec(const PointBuilder& builder) {
+ vector<S2Point> points(builder.points.begin(), builder.points.end());
+ return points;
+}
- TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
- // A 30x30 square centered at [0,0] with a 20X20 hole and 10X10 shell
- vector<S2Loop*> loops;
- // Border
- loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
- // Hole
- loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
- // Shell
- loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
- S2Polygon shellPoly(&loops);
-
- // 1. BigPolygon doesn't touch shellPoly
- // Everything *not* in a 40x40 square centered at [0,0]
- BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
- << LatLng(-20.0, -20.0)
- << LatLng(-20.0, 20.0)));
- bigPoly40Comp.Invert();
- ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly40Comp.Contains(shellPoly));
- ASSERT_FALSE(bigPoly40Comp.Intersects(shellPoly));
-
- // 2. BigPolygon intersects shellPoly
- // Everything *not* in a 24X24 square centered at [0,0]
- BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0)
- << LatLng(-12.0, 12.0)));
- bigPoly24Comp.Invert();
- ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly24Comp.Contains(shellPoly));
- ASSERT_TRUE(bigPoly24Comp.Intersects(shellPoly));
-
- // 3. BigPolygon contains shellPoly's outer ring
- // Everything *not* in a 16X16 square centered at [0,0]
- BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0)
- << LatLng(-8.0, 8.0)));
- bigPoly16Comp.Invert();
- ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly16Comp.Contains(shellPoly));
- ASSERT_TRUE(bigPoly16Comp.Intersects(shellPoly));
-
- // 4. BigPolygon contains the right half of shellPoly
- // Everything *not* in a 40x40 square centered at [0,20]
- BigSimplePolygon bigPoly40CompOffset(loop(points() << LatLng(20.0, 40.0)
- << LatLng(20.0, 0.0)
- << LatLng(-20.0, 0.0)
- << LatLng(-20.0, 40.0)));
- bigPoly40CompOffset.Invert();
- ASSERT_GREATER_THAN(bigPoly40CompOffset.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly40CompOffset.Contains(shellPoly));
- ASSERT_TRUE(bigPoly40CompOffset.Intersects(shellPoly));
-
- // 5. BigPolygon contain shellPoly (CW)
- BigSimplePolygon bigPolyCompOffset(loop(points() << LatLng(6.0, 6.0)
- << LatLng(6.0, 8.0)
- << LatLng(-6.0, 8.0)
- << LatLng(-6.0, 6.0)));
- ASSERT_GREATER_THAN(bigPolyCompOffset.GetArea(), 2 * M_PI);
- ASSERT_TRUE(bigPolyCompOffset.Contains(shellPoly));
- ASSERT_TRUE(bigPolyCompOffset.Intersects(shellPoly));
- }
+S2Loop* loop(const PointBuilder& builder) {
+ return new S2Loop(builder.points);
+}
- TEST(BigSimplePolygon, BasicWinding) {
+vector<S2Loop*>* loopVec(const PointBuilder& builder) {
+ static vector<S2Loop*> loops;
+ loops.clear();
+ loops.push_back(loop(builder));
+ return &loops;
+}
- // A 20x20 square centered at [0,0] (CCW)
- BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+S2LatLng LatLng(double lat, double lng) {
+ return S2LatLng::FromDegrees(lat, lng);
+}
- // Everything *not* in a 20x20 square centered at [0,0] (CW)
- BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(10.0, -10.0)));
+// Syntax sugar for PointBuilder, which can be used to construct
+// - vector<S2Point> pointVec()
+// - S2Loop* loop()
+// - vector<S2Loop*>* loopVec()
+//
+// e.g. points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0) << LatLng(0.0, 0.0))
+typedef PointBuilder points;
+
+TEST(BigSimplePolygon, Basic) {
+ // A 20x20 square centered at [0,0]
+ BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+
+ // A 10x10 square centered at [0,0]
+ S2Polygon poly10(loopVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
+ << LatLng(-5.0, 5.0)));
+
+ ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
+ ASSERT_LESS_THAN(poly10.GetArea(), bigPoly20.GetArea());
+ ASSERT(bigPoly20.Contains(poly10));
+ ASSERT(bigPoly20.Intersects(poly10));
+
+ // A 20x20 square centered at [0,20]
+ BigSimplePolygon bigPoly20Offset(loop(points() << LatLng(10.0, 30.0) << LatLng(10.0, 10.0)
+ << LatLng(-10.0, 10.0) << LatLng(-10.0, 30.0)));
+
+ ASSERT_LESS_THAN(bigPoly20Offset.GetArea(), 2 * M_PI);
+ ASSERT_LESS_THAN(poly10.GetArea(), bigPoly20Offset.GetArea());
+ ASSERT_FALSE(bigPoly20Offset.Contains(poly10));
+ ASSERT_FALSE(bigPoly20Offset.Intersects(poly10));
+}
- ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
- ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
- }
+TEST(BigSimplePolygon, BasicWithHole) {
+ // A 30x30 square centered at [0,0] with a 20X20 hole
+ vector<S2Loop*> loops;
+ loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+
+ S2Polygon holePoly(&loops);
+
+ // A 16X16 square centered at [0,0]
+ BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
+
+ ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly16.Contains(holePoly));
+ ASSERT_FALSE(bigPoly16.Intersects(holePoly));
+
+ // A big polygon bigger than the hole.
+ BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
+ ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly24.Contains(holePoly));
+ ASSERT_TRUE(bigPoly24.Intersects(holePoly));
+}
- TEST(BigSimplePolygon, LineRelations) {
+TEST(BigSimplePolygon, BasicWithHoleAndShell) {
+ // A 30x30 square centered at [0,0] with a 20X20 hole and 10X10 shell
+ vector<S2Loop*> loops;
+ // Border
+ loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ // Hole
+ loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ // Shell
+ loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
+ << LatLng(-5.0, 5.0)));
+ S2Polygon shellPoly(&loops);
+
+ // A 16X16 square centered at [0,0] containing the shell
+ BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
+ ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly16.Contains(shellPoly));
+ ASSERT_TRUE(bigPoly16.Intersects(shellPoly));
+
+ // Try a big polygon bigger than the hole.
+ BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
+ ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly24.Contains(shellPoly));
+ ASSERT_TRUE(bigPoly24.Intersects(shellPoly));
+
+ // Try a big polygon smaller than the shell.
+ BigSimplePolygon bigPoly8(loop(points() << LatLng(4.0, 4.0) << LatLng(4.0, -4.0)
+ << LatLng(-4.0, -4.0) << LatLng(-4.0, 4.0)));
+ ASSERT_LESS_THAN(bigPoly8.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly8.Contains(shellPoly));
+ ASSERT_TRUE(bigPoly8.Intersects(shellPoly));
+}
- // A 20x20 square centered at [0,0]
- BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
+TEST(BigSimplePolygon, BasicComplement) {
+ // Everything *not* in a 20x20 square centered at [0,0]
+ BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
<< LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ bigPoly20Comp.Invert();
+
+ // A 10x10 square centered at [0,0]
+ S2Polygon poly10(loopVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
+ << LatLng(-5.0, 5.0)));
+
+ ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly20Comp.Contains(poly10));
+ ASSERT_FALSE(bigPoly20Comp.Intersects(poly10));
+
+ // A 10x10 square centered at [0,20], contained by bigPoly20Comp
+ S2Polygon poly10Contained(loopVec(points() << LatLng(25.0, 25.0) << LatLng(25.0, 15.0)
+ << LatLng(15.0, 15.0) << LatLng(15.0, 25.0)));
+
+ ASSERT_LESS_THAN(poly10Contained.GetArea(), bigPoly20Comp.GetArea());
+ ASSERT(bigPoly20Comp.Contains(poly10Contained));
+ ASSERT(bigPoly20Comp.Intersects(poly10Contained));
+
+ // A 30x30 square centered at [0,0], so that bigPoly20Comp contains its complement entirely,
+ // which is not allowed by S2.
+ S2Polygon poly30(loopVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ ASSERT_LESS_THAN(poly30.GetArea(), bigPoly20Comp.GetArea());
+ ASSERT_FALSE(bigPoly20Comp.Contains(poly30));
+ ASSERT_TRUE(bigPoly20Comp.Intersects(poly30));
+}
- // A 10x10 line circling [0,0]
- S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
-
- ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
- ASSERT(bigPoly20.Contains(line10));
- ASSERT(bigPoly20.Intersects(line10));
+TEST(BigSimplePolygon, BasicIntersects) {
+ // Everything *not* in a 20x20 square centered at [0,0]
+ BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ bigPoly20.Invert();
- // Line segment disjoint from big polygon
- S2Polyline lineDisjoint(pointVec(points() << LatLng(15.0, 5.0) << LatLng(15.0, -5.0)));
- ASSERT_FALSE(bigPoly20.Contains(lineDisjoint));
- ASSERT_FALSE(bigPoly20.Intersects(lineDisjoint));
+ // A 10x10 square centered at [10,10] (partial overlap)
+ S2Polygon poly10(loopVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, 5.0) << LatLng(5.0, 5.0)
+ << LatLng(5.0, 15.0)));
- // Line segment intersects big polygon
- S2Polyline lineIntersect(pointVec(points() << LatLng(0.0, 0.0) << LatLng(15.0, 0.0)));
- ASSERT_FALSE(bigPoly20.Contains(lineIntersect));
- ASSERT_TRUE(bigPoly20.Intersects(lineIntersect));
- }
+ ASSERT_FALSE(bigPoly20.Contains(poly10));
+ ASSERT(bigPoly20.Intersects(poly10));
+}
- TEST(BigSimplePolygon, LineRelationsComplement) {
+TEST(BigSimplePolygon, BasicComplementWithHole) {
+ // A 30x30 square centered at [0,0] with a 20X20 hole
+ vector<S2Loop*> loops;
+ loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+
+ S2Polygon holePoly(&loops);
+
+ // 1. BigPolygon doesn't touch holePoly
+ // Everything *not* in a 40x40 square centered at [0,0]
+ BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
+ << LatLng(-20.0, -20.0) << LatLng(-20.0, 20.0)));
+ bigPoly40Comp.Invert();
+ ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly40Comp.Contains(holePoly));
+ ASSERT_FALSE(bigPoly40Comp.Intersects(holePoly));
+
+ // 2. BigPolygon intersects holePoly
+ // Everything *not* in a 24X24 square centered at [0,0]
+ BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
+ bigPoly24Comp.Invert();
+ ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly24Comp.Contains(holePoly));
+ ASSERT_TRUE(bigPoly24Comp.Intersects(holePoly));
+
+ // 3. BigPolygon contains holePoly
+ // Everything *not* in a 16X16 square centered at [0,0]
+ BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
+ bigPoly16Comp.Invert();
+ ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
+ ASSERT_TRUE(bigPoly16Comp.Contains(holePoly));
+ ASSERT_TRUE(bigPoly16Comp.Intersects(holePoly));
+
+ // 4. BigPolygon contains the right half of holePoly
+ // Everything *not* in a 40x40 square centered at [0,20]
+ BigSimplePolygon bigPoly40CompOffset(loop(points() << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
+ << LatLng(-20.0, 0.0)
+ << LatLng(-20.0, 40.0)));
+ bigPoly40CompOffset.Invert();
+ ASSERT_GREATER_THAN(bigPoly40CompOffset.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly40CompOffset.Contains(holePoly));
+ ASSERT_TRUE(bigPoly40CompOffset.Intersects(holePoly));
+}
- // A 20x20 square centered at [0,0]
- BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(-10.0, 10.0)));
- bigPoly20Comp.Invert();
+TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
+ // A 30x30 square centered at [0,0] with a 20X20 hole and 10X10 shell
+ vector<S2Loop*> loops;
+ // Border
+ loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ // Hole
+ loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ // Shell
+ loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
+ << LatLng(-5.0, 5.0)));
+ S2Polygon shellPoly(&loops);
+
+ // 1. BigPolygon doesn't touch shellPoly
+ // Everything *not* in a 40x40 square centered at [0,0]
+ BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
+ << LatLng(-20.0, -20.0) << LatLng(-20.0, 20.0)));
+ bigPoly40Comp.Invert();
+ ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly40Comp.Contains(shellPoly));
+ ASSERT_FALSE(bigPoly40Comp.Intersects(shellPoly));
+
+ // 2. BigPolygon intersects shellPoly
+ // Everything *not* in a 24X24 square centered at [0,0]
+ BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
+ << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
+ bigPoly24Comp.Invert();
+ ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly24Comp.Contains(shellPoly));
+ ASSERT_TRUE(bigPoly24Comp.Intersects(shellPoly));
+
+ // 3. BigPolygon contains shellPoly's outer ring
+ // Everything *not* in a 16X16 square centered at [0,0]
+ BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
+ << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
+ bigPoly16Comp.Invert();
+ ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly16Comp.Contains(shellPoly));
+ ASSERT_TRUE(bigPoly16Comp.Intersects(shellPoly));
+
+ // 4. BigPolygon contains the right half of shellPoly
+ // Everything *not* in a 40x40 square centered at [0,20]
+ BigSimplePolygon bigPoly40CompOffset(loop(points() << LatLng(20.0, 40.0) << LatLng(20.0, 0.0)
+ << LatLng(-20.0, 0.0)
+ << LatLng(-20.0, 40.0)));
+ bigPoly40CompOffset.Invert();
+ ASSERT_GREATER_THAN(bigPoly40CompOffset.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly40CompOffset.Contains(shellPoly));
+ ASSERT_TRUE(bigPoly40CompOffset.Intersects(shellPoly));
+
+ // 5. BigPolygon contain shellPoly (CW)
+ BigSimplePolygon bigPolyCompOffset(loop(points() << LatLng(6.0, 6.0) << LatLng(6.0, 8.0)
+ << LatLng(-6.0, 8.0) << LatLng(-6.0, 6.0)));
+ ASSERT_GREATER_THAN(bigPolyCompOffset.GetArea(), 2 * M_PI);
+ ASSERT_TRUE(bigPolyCompOffset.Contains(shellPoly));
+ ASSERT_TRUE(bigPolyCompOffset.Intersects(shellPoly));
+}
- // A 10x10 line circling [0,0]
- S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
+TEST(BigSimplePolygon, BasicWinding) {
+ // A 20x20 square centered at [0,0] (CCW)
+ BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
- ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly20Comp.Contains(line10));
- ASSERT_FALSE(bigPoly20Comp.Intersects(line10));
+ // Everything *not* in a 20x20 square centered at [0,0] (CW)
+ BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
+ << LatLng(-10.0, -10.0) << LatLng(10.0, -10.0)));
- // Line segment (0, 0) -> (0, 15)
- S2Polyline lineIntersect(pointVec(points() << LatLng(0.0, 0.0) << LatLng(0.0, 15.0)));
- ASSERT_FALSE(bigPoly20Comp.Contains(lineIntersect));
- ASSERT_TRUE(bigPoly20Comp.Intersects(lineIntersect));
+ ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
+ ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
+}
- // A 10x10 line circling [0,0]
- S2Polyline line30(pointVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
- ASSERT_TRUE(bigPoly20Comp.Contains(line30));
- ASSERT_TRUE(bigPoly20Comp.Intersects(line30));
- }
+TEST(BigSimplePolygon, LineRelations) {
+ // A 20x20 square centered at [0,0]
+ BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
- TEST(BigSimplePolygon, LineRelationsWinding) {
+ // A 10x10 line circling [0,0]
+ S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
+ << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
- // Everything *not* in a 20x20 square centered at [0,0] (CW winding)
- BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
- << LatLng(-10.0, -10.0)
- << LatLng(10.0, -10.0)));
+ ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
+ ASSERT(bigPoly20.Contains(line10));
+ ASSERT(bigPoly20.Intersects(line10));
- // A 10x10 line circling [0,0]
- S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
+ // Line segment disjoint from big polygon
+ S2Polyline lineDisjoint(pointVec(points() << LatLng(15.0, 5.0) << LatLng(15.0, -5.0)));
+ ASSERT_FALSE(bigPoly20.Contains(lineDisjoint));
+ ASSERT_FALSE(bigPoly20.Intersects(lineDisjoint));
- ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
- ASSERT_FALSE(bigPoly20Comp.Contains(line10));
- ASSERT_FALSE(bigPoly20Comp.Intersects(line10));
- }
+ // Line segment intersects big polygon
+ S2Polyline lineIntersect(pointVec(points() << LatLng(0.0, 0.0) << LatLng(15.0, 0.0)));
+ ASSERT_FALSE(bigPoly20.Contains(lineIntersect));
+ ASSERT_TRUE(bigPoly20.Intersects(lineIntersect));
+}
- TEST(BigSimplePolygon, PolarContains) {
+TEST(BigSimplePolygon, LineRelationsComplement) {
+ // A 20x20 square centered at [0,0]
+ BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
+ << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ bigPoly20Comp.Invert();
- // Square 10 degrees from the north pole [90,0]
- BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
+ // A 10x10 line circling [0,0]
+ S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
+ << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
- // Square 5 degrees from the north pole [90, 0]
- S2Polygon northPoly(loopVec(points() << LatLng(85.0, 0.0) << LatLng(85.0, 90.0)
- << LatLng(85.0, 180.0) << LatLng(85.0, -90.0)));
+ ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly20Comp.Contains(line10));
+ ASSERT_FALSE(bigPoly20Comp.Intersects(line10));
- ASSERT_LESS_THAN(bigNorthPoly.GetArea(), 2 * M_PI);
- ASSERT_LESS_THAN(northPoly.GetArea(), bigNorthPoly.GetArea());
- ASSERT(bigNorthPoly.Contains(northPoly));
- ASSERT(bigNorthPoly.Intersects(northPoly));
- }
+ // Line segment (0, 0) -> (0, 15)
+ S2Polyline lineIntersect(pointVec(points() << LatLng(0.0, 0.0) << LatLng(0.0, 15.0)));
+ ASSERT_FALSE(bigPoly20Comp.Contains(lineIntersect));
+ ASSERT_TRUE(bigPoly20Comp.Intersects(lineIntersect));
- TEST(BigSimplePolygon, PolarContainsWithHoles) {
+ // A 10x10 line circling [0,0]
+ S2Polyline line30(pointVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
+ << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ ASSERT_TRUE(bigPoly20Comp.Contains(line30));
+ ASSERT_TRUE(bigPoly20Comp.Intersects(line30));
+}
- // Square 10 degrees from the north pole [90,0]
- BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
+TEST(BigSimplePolygon, LineRelationsWinding) {
+ // Everything *not* in a 20x20 square centered at [0,0] (CW winding)
+ BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
+ << LatLng(-10.0, -10.0) << LatLng(10.0, -10.0)));
- // Square 5 degrees from the north pole [90, 0] with a concentric hole 1 degree from the
- // north pole
- vector<S2Loop*> loops;
- loops.push_back(loop(points() << LatLng(85.0, 0.0) << LatLng(85.0, 90.0)
- << LatLng(85.0, 180.0) << LatLng(85.0, -90.0)));
- loops.push_back(loop(points() << LatLng(89.0, 0.0) << LatLng(89.0, 90.0)
- << LatLng(89.0, 180.0) << LatLng(89.0, -90.0)));
- S2Polygon northPolyHole(&loops);
+ // A 10x10 line circling [0,0]
+ S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
+ << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
- ASSERT_LESS_THAN(northPolyHole.GetArea(), bigNorthPoly.GetArea());
- ASSERT(bigNorthPoly.Contains(northPolyHole));
- ASSERT(bigNorthPoly.Intersects(northPolyHole));
- }
+ ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
+ ASSERT_FALSE(bigPoly20Comp.Contains(line10));
+ ASSERT_FALSE(bigPoly20Comp.Intersects(line10));
+}
- TEST(BigSimplePolygon, PolarIntersectsWithHoles) {
+TEST(BigSimplePolygon, PolarContains) {
+ // Square 10 degrees from the north pole [90,0]
+ BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
+ << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
- // Square 10 degrees from the north pole [90,0]
- BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
+ // Square 5 degrees from the north pole [90, 0]
+ S2Polygon northPoly(loopVec(points() << LatLng(85.0, 0.0) << LatLng(85.0, 90.0)
+ << LatLng(85.0, 180.0) << LatLng(85.0, -90.0)));
- // 5-degree square with 1-degree-wide concentric hole, centered on [80.0, 0.0]
- vector<S2Loop*> loops;
- loops.push_back(loop(points() << LatLng(85.0, 5.0) << LatLng(85.0, -5.0)
- << LatLng(75.0, -5.0) << LatLng(75.0, 5.0)));
- loops.push_back(loop(points() << LatLng(81.0, 1.0) << LatLng(81.0, -1.0)
- << LatLng(79.0, -1.0) << LatLng(79.0, 1.0)));
- S2Polygon northPolyHole(&loops);
+ ASSERT_LESS_THAN(bigNorthPoly.GetArea(), 2 * M_PI);
+ ASSERT_LESS_THAN(northPoly.GetArea(), bigNorthPoly.GetArea());
+ ASSERT(bigNorthPoly.Contains(northPoly));
+ ASSERT(bigNorthPoly.Intersects(northPoly));
+}
- ASSERT_LESS_THAN(northPolyHole.GetArea(), bigNorthPoly.GetArea());
- ASSERT_FALSE(bigNorthPoly.Contains(northPolyHole));
- ASSERT(bigNorthPoly.Intersects(northPolyHole));
- }
+TEST(BigSimplePolygon, PolarContainsWithHoles) {
+ // Square 10 degrees from the north pole [90,0]
+ BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
+ << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
+
+ // Square 5 degrees from the north pole [90, 0] with a concentric hole 1 degree from the
+ // north pole
+ vector<S2Loop*> loops;
+ loops.push_back(loop(points() << LatLng(85.0, 0.0) << LatLng(85.0, 90.0) << LatLng(85.0, 180.0)
+ << LatLng(85.0, -90.0)));
+ loops.push_back(loop(points() << LatLng(89.0, 0.0) << LatLng(89.0, 90.0) << LatLng(89.0, 180.0)
+ << LatLng(89.0, -90.0)));
+ S2Polygon northPolyHole(&loops);
+
+ ASSERT_LESS_THAN(northPolyHole.GetArea(), bigNorthPoly.GetArea());
+ ASSERT(bigNorthPoly.Contains(northPolyHole));
+ ASSERT(bigNorthPoly.Intersects(northPolyHole));
+}
- // Edge cases
- //
- // No promise in terms of points on border - they may be inside or outside the big polygon.
- // But we need to ensure the result is consistent:
- // 1. If a polygon/line is contained by a big polygon, they must intersect with each other.
- // 2. Relation doesn't change as long as the touch point doesn't change, no matter the big
- // polygon is larger or less then a hemisphere.
- // 3. Relations for big polygons less than a hemisphere are consistent with ordinary (simple)
- // polygon results.
-
- template <typename TShape>
- void checkConsistency(const BigSimplePolygon& bigPoly,
- const BigSimplePolygon& expandedBigPoly,
- const TShape& shape) {
- // Contain() => Intersects()
- if (bigPoly.Contains(shape)) ASSERT(bigPoly.Intersects(shape));
- if (expandedBigPoly.Contains(shape)) ASSERT(expandedBigPoly.Intersects(shape));
- // Relation doesn't change
- ASSERT_EQUALS(bigPoly.Contains(shape), expandedBigPoly.Contains(shape));
- ASSERT_EQUALS(bigPoly.Intersects(shape), expandedBigPoly.Intersects(shape));
- }
+TEST(BigSimplePolygon, PolarIntersectsWithHoles) {
+ // Square 10 degrees from the north pole [90,0]
+ BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
+ << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
+
+ // 5-degree square with 1-degree-wide concentric hole, centered on [80.0, 0.0]
+ vector<S2Loop*> loops;
+ loops.push_back(loop(points() << LatLng(85.0, 5.0) << LatLng(85.0, -5.0) << LatLng(75.0, -5.0)
+ << LatLng(75.0, 5.0)));
+ loops.push_back(loop(points() << LatLng(81.0, 1.0) << LatLng(81.0, -1.0) << LatLng(79.0, -1.0)
+ << LatLng(79.0, 1.0)));
+ S2Polygon northPolyHole(&loops);
+
+ ASSERT_LESS_THAN(northPolyHole.GetArea(), bigNorthPoly.GetArea());
+ ASSERT_FALSE(bigNorthPoly.Contains(northPolyHole));
+ ASSERT(bigNorthPoly.Intersects(northPolyHole));
+}
- // Polygon shares big polygon's edge (disjoint)
- TEST(BigSimplePolygon, ShareEdgeDisjoint) {
- // Big polygon smaller than a hemisphere.
- BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
- ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
-
- // Vertex point and collinear point
- S2Point point = LatLng(80.0, 0.0).ToPoint();
- S2Point collinearPoint = LatLng(0.0, 0.0).ToPoint();
-
- // Polygon shares one edge
- S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, -10.0) << LatLng(80.0, -10.0)));
- // Polygon shares a segment of one edge
- S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, -10.0) << LatLng(50.0, -10.0)));
-
- // Line
- S2Polyline line(pointVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, -10.0)));
- // Line share a segment of one edge
- S2Polyline collinearLine(pointVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, -10.0)));
-
- // Big polygon larger than a hemisphere.
- BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(-80.0, 180.0)
- << LatLng(-80.0, -90.0)
- << LatLng(80.0, -90.0) << LatLng(80.0, 180.0)
- << LatLng(80.0, 90.0)));
- ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
-
- checkConsistency(bigPoly, expandedBigPoly, point);
- checkConsistency(bigPoly, expandedBigPoly, collinearPoint);
- checkConsistency(bigPoly, expandedBigPoly, poly);
- checkConsistency(bigPoly, expandedBigPoly, collinearPoly);
- checkConsistency(bigPoly, expandedBigPoly, line);
- checkConsistency(bigPoly, expandedBigPoly, collinearLine);
-
- // Check the complement of big polygon
- bigPoly.Invert();
- ASSERT_GREATER_THAN(bigPoly.GetArea(), 2 * M_PI);
- expandedBigPoly.Invert();
- ASSERT_LESS_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
-
- checkConsistency(bigPoly, expandedBigPoly, point);
- checkConsistency(bigPoly, expandedBigPoly, collinearPoint);
- checkConsistency(bigPoly, expandedBigPoly, poly);
- checkConsistency(bigPoly, expandedBigPoly, collinearPoly);
- checkConsistency(bigPoly, expandedBigPoly, line);
- checkConsistency(bigPoly, expandedBigPoly, collinearLine);
- }
+// Edge cases
+//
+// No promise in terms of points on border - they may be inside or outside the big polygon.
+// But we need to ensure the result is consistent:
+// 1. If a polygon/line is contained by a big polygon, they must intersect with each other.
+// 2. Relation doesn't change as long as the touch point doesn't change, no matter the big
+// polygon is larger or less then a hemisphere.
+// 3. Relations for big polygons less than a hemisphere are consistent with ordinary (simple)
+// polygon results.
+
+template <typename TShape>
+void checkConsistency(const BigSimplePolygon& bigPoly,
+ const BigSimplePolygon& expandedBigPoly,
+ const TShape& shape) {
+ // Contain() => Intersects()
+ if (bigPoly.Contains(shape))
+ ASSERT(bigPoly.Intersects(shape));
+ if (expandedBigPoly.Contains(shape))
+ ASSERT(expandedBigPoly.Intersects(shape));
+ // Relation doesn't change
+ ASSERT_EQUALS(bigPoly.Contains(shape), expandedBigPoly.Contains(shape));
+ ASSERT_EQUALS(bigPoly.Intersects(shape), expandedBigPoly.Intersects(shape));
+}
- // Polygon/line shares big polygon's edge (contained by big polygon)
- TEST(BigSimplePolygon, ShareEdgeContained) {
- // Big polygon smaller than a hemisphere.
- BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
- ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
-
- // Polygon
- S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 10.0) << LatLng(80.0, 10.0)));
- // Polygon shares a segment of one edge
- S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, 10.0) << LatLng(50.0, 10.0)));
- // Line
- S2Polyline line(pointVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(0.0, 10.0)));
- // Line shares a segment of one edge
- S2Polyline collinearLine(pointVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, 10.0)));
-
- // Big polygon larger than a hemisphere.
- BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0)
- << LatLng(-80.0, 180.0)
- << LatLng(-80.0, -90.0)
- << LatLng(80.0, -90.0) << LatLng(80.0, 180.0)
- << LatLng(80.0, 90.0)));
- ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
-
- checkConsistency(bigPoly, expandedBigPoly, poly);
- checkConsistency(bigPoly, expandedBigPoly, collinearPoly);
- checkConsistency(bigPoly, expandedBigPoly, line);
- checkConsistency(bigPoly, expandedBigPoly, collinearLine);
-
- // Check the complement of big polygon
- bigPoly.Invert();
- ASSERT_GREATER_THAN(bigPoly.GetArea(), 2 * M_PI);
- expandedBigPoly.Invert();
- ASSERT_LESS_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
-
- checkConsistency(bigPoly, expandedBigPoly, poly);
- checkConsistency(bigPoly, expandedBigPoly, collinearPoly);
- checkConsistency(bigPoly, expandedBigPoly, line);
- checkConsistency(bigPoly, expandedBigPoly, collinearLine);
- }
+// Polygon shares big polygon's edge (disjoint)
+TEST(BigSimplePolygon, ShareEdgeDisjoint) {
+ // Big polygon smaller than a hemisphere.
+ BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
+ << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
+ ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
+
+ // Vertex point and collinear point
+ S2Point point = LatLng(80.0, 0.0).ToPoint();
+ S2Point collinearPoint = LatLng(0.0, 0.0).ToPoint();
+
+ // Polygon shares one edge
+ S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
+ << LatLng(-80.0, -10.0) << LatLng(80.0, -10.0)));
+ // Polygon shares a segment of one edge
+ S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
+ << LatLng(-50.0, -10.0) << LatLng(50.0, -10.0)));
+
+ // Line
+ S2Polyline line(
+ pointVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0) << LatLng(-80.0, -10.0)));
+ // Line share a segment of one edge
+ S2Polyline collinearLine(
+ pointVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0) << LatLng(-50.0, -10.0)));
+
+ // Big polygon larger than a hemisphere.
+ BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
+ << LatLng(-80.0, 90.0) << LatLng(-80.0, 180.0)
+ << LatLng(-80.0, -90.0) << LatLng(80.0, -90.0)
+ << LatLng(80.0, 180.0) << LatLng(80.0, 90.0)));
+ ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
+
+ checkConsistency(bigPoly, expandedBigPoly, point);
+ checkConsistency(bigPoly, expandedBigPoly, collinearPoint);
+ checkConsistency(bigPoly, expandedBigPoly, poly);
+ checkConsistency(bigPoly, expandedBigPoly, collinearPoly);
+ checkConsistency(bigPoly, expandedBigPoly, line);
+ checkConsistency(bigPoly, expandedBigPoly, collinearLine);
+
+ // Check the complement of big polygon
+ bigPoly.Invert();
+ ASSERT_GREATER_THAN(bigPoly.GetArea(), 2 * M_PI);
+ expandedBigPoly.Invert();
+ ASSERT_LESS_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
+
+ checkConsistency(bigPoly, expandedBigPoly, point);
+ checkConsistency(bigPoly, expandedBigPoly, collinearPoint);
+ checkConsistency(bigPoly, expandedBigPoly, poly);
+ checkConsistency(bigPoly, expandedBigPoly, collinearPoly);
+ checkConsistency(bigPoly, expandedBigPoly, line);
+ checkConsistency(bigPoly, expandedBigPoly, collinearLine);
+}
+// Polygon/line shares big polygon's edge (contained by big polygon)
+TEST(BigSimplePolygon, ShareEdgeContained) {
+ // Big polygon smaller than a hemisphere.
+ BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
+ << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
+ ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
+
+ // Polygon
+ S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
+ << LatLng(-80.0, 10.0) << LatLng(80.0, 10.0)));
+ // Polygon shares a segment of one edge
+ S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
+ << LatLng(-50.0, 10.0) << LatLng(50.0, 10.0)));
+ // Line
+ S2Polyline line(
+ pointVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0) << LatLng(0.0, 10.0)));
+ // Line shares a segment of one edge
+ S2Polyline collinearLine(
+ pointVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0) << LatLng(-50.0, 10.0)));
+
+ // Big polygon larger than a hemisphere.
+ BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
+ << LatLng(-80.0, 90.0) << LatLng(-80.0, 180.0)
+ << LatLng(-80.0, -90.0) << LatLng(80.0, -90.0)
+ << LatLng(80.0, 180.0) << LatLng(80.0, 90.0)));
+ ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
+
+ checkConsistency(bigPoly, expandedBigPoly, poly);
+ checkConsistency(bigPoly, expandedBigPoly, collinearPoly);
+ checkConsistency(bigPoly, expandedBigPoly, line);
+ checkConsistency(bigPoly, expandedBigPoly, collinearLine);
+
+ // Check the complement of big polygon
+ bigPoly.Invert();
+ ASSERT_GREATER_THAN(bigPoly.GetArea(), 2 * M_PI);
+ expandedBigPoly.Invert();
+ ASSERT_LESS_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
+
+ checkConsistency(bigPoly, expandedBigPoly, poly);
+ checkConsistency(bigPoly, expandedBigPoly, collinearPoly);
+ checkConsistency(bigPoly, expandedBigPoly, line);
+ checkConsistency(bigPoly, expandedBigPoly, collinearLine);
+}
}
diff --git a/src/mongo/db/geo/geoconstants.h b/src/mongo/db/geo/geoconstants.h
index e97e1d3b233..5883ae0ee02 100644
--- a/src/mongo/db/geo/geoconstants.h
+++ b/src/mongo/db/geo/geoconstants.h
@@ -30,8 +30,8 @@
namespace mongo {
- // Equatorial radius of earth.
- // Source: http://nssdc.gsfc.nasa.gov/planetary/factsheet/earthfact.html
- const double kRadiusOfEarthInMeters = (6378.1 * 1000);
+// Equatorial radius of earth.
+// Source: http://nssdc.gsfc.nasa.gov/planetary/factsheet/earthfact.html
+const double kRadiusOfEarthInMeters = (6378.1 * 1000);
} // namespace mongo
diff --git a/src/mongo/db/geo/geometry_container.cpp b/src/mongo/db/geo/geometry_container.cpp
index 55afbe5c021..c74918e40c2 100644
--- a/src/mongo/db/geo/geometry_container.cpp
+++ b/src/mongo/db/geo/geometry_container.cpp
@@ -34,1184 +34,1259 @@
namespace mongo {
- using mongoutils::str::equals;
-
- GeometryContainer::GeometryContainer() {
- }
-
- bool GeometryContainer::isSimpleContainer() const {
- return NULL != _point || NULL != _line || NULL != _polygon;
- }
-
- bool GeometryContainer::supportsContains() const {
- return NULL != _polygon
- || NULL != _box
- || NULL != _cap
- || NULL != _multiPolygon
- || (NULL != _geometryCollection
- && (_geometryCollection->polygons.vector().size() > 0
- || _geometryCollection->multiPolygons.vector().size() > 0));
- }
-
- bool GeometryContainer::hasS2Region() const {
- return (NULL != _point && _point->crs == SPHERE)
- || NULL != _line
- || (NULL != _polygon && (_polygon->crs == SPHERE || _polygon->crs == STRICT_SPHERE))
- || (NULL != _cap && _cap->crs == SPHERE)
- || NULL != _multiPoint
- || NULL != _multiLine
- || NULL != _multiPolygon
- || NULL != _geometryCollection;
- }
-
- const S2Region& GeometryContainer::getS2Region() const {
- if (NULL != _point && SPHERE == _point->crs) {
- return _point->cell;
- } else if (NULL != _line) {
- return _line->line;
- } else if (NULL != _polygon && NULL != _polygon->s2Polygon) {
- return *_polygon->s2Polygon;
- } else if (NULL != _polygon && NULL != _polygon->bigPolygon) {
- return *_polygon->bigPolygon;
- } else if (NULL != _cap && SPHERE == _cap->crs) {
- return _cap->cap;
- } else if (NULL != _multiPoint) {
- return *_s2Region;
- } else if (NULL != _multiLine) {
- return *_s2Region;
- } else if (NULL != _multiPolygon) {
- return *_s2Region;
- } else {
- invariant(NULL != _geometryCollection);
- return *_s2Region;
- }
+using mongoutils::str::equals;
+
+GeometryContainer::GeometryContainer() {}
+
+bool GeometryContainer::isSimpleContainer() const {
+ return NULL != _point || NULL != _line || NULL != _polygon;
+}
+
+bool GeometryContainer::supportsContains() const {
+ return NULL != _polygon || NULL != _box || NULL != _cap || NULL != _multiPolygon ||
+ (NULL != _geometryCollection && (_geometryCollection->polygons.vector().size() > 0 ||
+ _geometryCollection->multiPolygons.vector().size() > 0));
+}
+
+bool GeometryContainer::hasS2Region() const {
+ return (NULL != _point && _point->crs == SPHERE) || NULL != _line ||
+ (NULL != _polygon && (_polygon->crs == SPHERE || _polygon->crs == STRICT_SPHERE)) ||
+ (NULL != _cap && _cap->crs == SPHERE) || NULL != _multiPoint || NULL != _multiLine ||
+ NULL != _multiPolygon || NULL != _geometryCollection;
+}
+
+const S2Region& GeometryContainer::getS2Region() const {
+ if (NULL != _point && SPHERE == _point->crs) {
+ return _point->cell;
+ } else if (NULL != _line) {
+ return _line->line;
+ } else if (NULL != _polygon && NULL != _polygon->s2Polygon) {
+ return *_polygon->s2Polygon;
+ } else if (NULL != _polygon && NULL != _polygon->bigPolygon) {
+ return *_polygon->bigPolygon;
+ } else if (NULL != _cap && SPHERE == _cap->crs) {
+ return _cap->cap;
+ } else if (NULL != _multiPoint) {
+ return *_s2Region;
+ } else if (NULL != _multiLine) {
+ return *_s2Region;
+ } else if (NULL != _multiPolygon) {
+ return *_s2Region;
+ } else {
+ invariant(NULL != _geometryCollection);
+ return *_s2Region;
}
+}
- bool GeometryContainer::hasR2Region() const {
- return _cap || _box || _point || (_polygon && _polygon->crs == FLAT)
- || (_multiPoint && FLAT == _multiPoint->crs);
- }
+bool GeometryContainer::hasR2Region() const {
+ return _cap || _box || _point || (_polygon && _polygon->crs == FLAT) ||
+ (_multiPoint && FLAT == _multiPoint->crs);
+}
- class GeometryContainer::R2BoxRegion : public R2Region {
- public:
+class GeometryContainer::R2BoxRegion : public R2Region {
+public:
+ R2BoxRegion(const GeometryContainer* geometry);
+ virtual ~R2BoxRegion();
- R2BoxRegion(const GeometryContainer* geometry);
- virtual ~R2BoxRegion();
+ Box getR2Bounds() const;
- Box getR2Bounds() const;
+ bool fastContains(const Box& other) const;
- bool fastContains(const Box& other) const;
+ bool fastDisjoint(const Box& other) const;
- bool fastDisjoint(const Box& other) const;
+private:
+ static Box buildBounds(const GeometryContainer& geometry);
- private:
+ // Not owned here
+ const GeometryContainer* _geometry;
- static Box buildBounds(const GeometryContainer& geometry);
+ // TODO: For big complex shapes, may be better to use actual shape from above
+ const Box _bounds;
+};
- // Not owned here
- const GeometryContainer* _geometry;
+GeometryContainer::R2BoxRegion::R2BoxRegion(const GeometryContainer* geometry)
+ : _geometry(geometry), _bounds(buildBounds(*geometry)) {}
- // TODO: For big complex shapes, may be better to use actual shape from above
- const Box _bounds;
- };
+GeometryContainer::R2BoxRegion::~R2BoxRegion() {}
- GeometryContainer::R2BoxRegion::R2BoxRegion(const GeometryContainer* geometry) :
- _geometry(geometry), _bounds(buildBounds(*geometry)) {
- }
+Box GeometryContainer::R2BoxRegion::getR2Bounds() const {
+ return _bounds;
+}
- GeometryContainer::R2BoxRegion::~R2BoxRegion() {
+bool GeometryContainer::R2BoxRegion::fastContains(const Box& other) const {
+ // TODO: Add more cases here to make coverings better
+ if (_geometry->_box && FLAT == _geometry->_box->crs) {
+ const Box& box = _geometry->_box->box;
+ if (box.contains(other))
+ return true;
+ } else if (_geometry->_cap && FLAT == _geometry->_cap->crs) {
+ const Circle& circle = _geometry->_cap->circle;
+ // Exact test
+ return circleContainsBox(circle, other);
}
- Box GeometryContainer::R2BoxRegion::getR2Bounds() const {
- return _bounds;
+ if (_geometry->_polygon && FLAT == _geometry->_polygon->crs) {
+ const Polygon& polygon = _geometry->_polygon->oldPolygon;
+ // Exact test
+ return polygonContainsBox(polygon, other);
}
- bool GeometryContainer::R2BoxRegion::fastContains(const Box& other) const {
+ // Not sure
+ return false;
+}
- // TODO: Add more cases here to make coverings better
- if (_geometry->_box && FLAT == _geometry->_box->crs) {
- const Box& box = _geometry->_box->box;
- if (box.contains(other))
- return true;
- } else if (_geometry->_cap && FLAT == _geometry->_cap->crs) {
- const Circle& circle = _geometry->_cap->circle;
- // Exact test
- return circleContainsBox(circle, other);
- }
-
- if (_geometry->_polygon && FLAT == _geometry->_polygon->crs) {
- const Polygon& polygon = _geometry->_polygon->oldPolygon;
- // Exact test
- return polygonContainsBox(polygon, other);
- }
+bool GeometryContainer::R2BoxRegion::fastDisjoint(const Box& other) const {
+ if (!_bounds.intersects(other))
+ return true;
- // Not sure
- return false;
- }
+ // Not sure
+ return false;
+}
- bool GeometryContainer::R2BoxRegion::fastDisjoint(const Box& other) const {
+static Point toLngLatPoint(const S2Point& s2Point) {
+ Point point;
+ S2LatLng latLng(s2Point);
+ point.x = latLng.lng().degrees();
+ point.y = latLng.lat().degrees();
+ return point;
+}
- if (!_bounds.intersects(other))
- return true;
+static void lineR2Bounds(const S2Polyline& flatLine, Box* flatBounds) {
+ int numVertices = flatLine.num_vertices();
+ verify(flatLine.num_vertices() > 0);
- // Not sure
- return false;
- }
+ flatBounds->init(toLngLatPoint(flatLine.vertex(0)), toLngLatPoint(flatLine.vertex(0)));
- static Point toLngLatPoint(const S2Point& s2Point) {
- Point point;
- S2LatLng latLng(s2Point);
- point.x = latLng.lng().degrees();
- point.y = latLng.lat().degrees();
- return point;
+ for (int i = 1; i < numVertices; ++i) {
+ flatBounds->expandToInclude(toLngLatPoint(flatLine.vertex(i)));
}
+}
- static void lineR2Bounds(const S2Polyline& flatLine, Box* flatBounds) {
+static void circleR2Bounds(const Circle& circle, Box* flatBounds) {
+ flatBounds->init(Point(circle.center.x - circle.radius, circle.center.y - circle.radius),
+ Point(circle.center.x + circle.radius, circle.center.y + circle.radius));
+}
- int numVertices = flatLine.num_vertices();
- verify(flatLine.num_vertices() > 0);
+static void multiPointR2Bounds(const vector<S2Point>& points, Box* flatBounds) {
+ verify(!points.empty());
- flatBounds->init(toLngLatPoint(flatLine.vertex(0)), toLngLatPoint(flatLine.vertex(0)));
+ flatBounds->init(toLngLatPoint(points.front()), toLngLatPoint(points.front()));
- for (int i = 1; i < numVertices; ++i) {
- flatBounds->expandToInclude(toLngLatPoint(flatLine.vertex(i)));
- }
+ vector<S2Point>::const_iterator it = points.begin();
+ for (++it; it != points.end(); ++it) {
+ const S2Point& s2Point = *it;
+ flatBounds->expandToInclude(toLngLatPoint(s2Point));
}
-
- static void circleR2Bounds(const Circle& circle, Box* flatBounds) {
- flatBounds->init(Point(circle.center.x - circle.radius, circle.center.y - circle.radius),
- Point(circle.center.x + circle.radius, circle.center.y + circle.radius));
+}
+
+static void polygonR2Bounds(const Polygon& polygon, Box* flatBounds) {
+ *flatBounds = polygon.bounds();
+}
+
+static void s2RegionR2Bounds(const S2Region& region, Box* flatBounds) {
+ S2LatLngRect s2Bounds = region.GetRectBound();
+ flatBounds->init(Point(s2Bounds.lng_lo().degrees(), s2Bounds.lat_lo().degrees()),
+ Point(s2Bounds.lng_hi().degrees(), s2Bounds.lat_hi().degrees()));
+}
+
+Box GeometryContainer::R2BoxRegion::buildBounds(const GeometryContainer& geometry) {
+ Box bounds;
+
+ if (geometry._point && FLAT == geometry._point->crs) {
+ bounds.init(geometry._point->oldPoint, geometry._point->oldPoint);
+ } else if (geometry._line && FLAT == geometry._line->crs) {
+ lineR2Bounds(geometry._line->line, &bounds);
+ } else if (geometry._cap && FLAT == geometry._cap->crs) {
+ circleR2Bounds(geometry._cap->circle, &bounds);
+ } else if (geometry._box && FLAT == geometry._box->crs) {
+ bounds = geometry._box->box;
+ } else if (geometry._polygon && FLAT == geometry._polygon->crs) {
+ polygonR2Bounds(geometry._polygon->oldPolygon, &bounds);
+ } else if (geometry._multiPoint && FLAT == geometry._multiPoint->crs) {
+ multiPointR2Bounds(geometry._multiPoint->points, &bounds);
+ } else if (geometry._multiLine && FLAT == geometry._multiLine->crs) {
+ verify(false);
+ } else if (geometry._multiPolygon && FLAT == geometry._multiPolygon->crs) {
+ verify(false);
+ } else if (geometry._geometryCollection) {
+ verify(false);
+ } else if (geometry.hasS2Region()) {
+ // For now, just support spherical cap for $centerSphere and GeoJSON points
+ verify((geometry._cap && FLAT != geometry._cap->crs) ||
+ (geometry._point && FLAT != geometry._point->crs));
+ s2RegionR2Bounds(geometry.getS2Region(), &bounds);
}
- static void multiPointR2Bounds(const vector<S2Point>& points, Box* flatBounds) {
+ return bounds;
+}
- verify(!points.empty());
+const R2Region& GeometryContainer::getR2Region() const {
+ return *_r2Region;
+}
- flatBounds->init(toLngLatPoint(points.front()), toLngLatPoint(points.front()));
+bool GeometryContainer::contains(const GeometryContainer& otherContainer) const {
+ // First let's deal with the FLAT cases
- vector<S2Point>::const_iterator it = points.begin();
- for (++it; it != points.end(); ++it) {
- const S2Point& s2Point = *it;
- flatBounds->expandToInclude(toLngLatPoint(s2Point));
- }
+ if (_point && FLAT == _point->crs) {
+ return false;
}
- static void polygonR2Bounds(const Polygon& polygon, Box* flatBounds) {
- *flatBounds = polygon.bounds();
+ if (NULL != _polygon && (FLAT == _polygon->crs)) {
+ if (NULL == otherContainer._point) {
+ return false;
+ }
+ return _polygon->oldPolygon.contains(otherContainer._point->oldPoint);
}
- static void s2RegionR2Bounds(const S2Region& region, Box* flatBounds) {
- S2LatLngRect s2Bounds = region.GetRectBound();
- flatBounds->init(Point(s2Bounds.lng_lo().degrees(), s2Bounds.lat_lo().degrees()),
- Point(s2Bounds.lng_hi().degrees(), s2Bounds.lat_hi().degrees()));
+ if (NULL != _box) {
+ verify(FLAT == _box->crs);
+ if (NULL == otherContainer._point) {
+ return false;
+ }
+ return _box->box.inside(otherContainer._point->oldPoint);
}
- Box GeometryContainer::R2BoxRegion::buildBounds(const GeometryContainer& geometry) {
-
- Box bounds;
-
- if (geometry._point && FLAT == geometry._point->crs) {
- bounds.init(geometry._point->oldPoint, geometry._point->oldPoint);
- }
- else if (geometry._line && FLAT == geometry._line->crs) {
- lineR2Bounds(geometry._line->line, &bounds);
- }
- else if (geometry._cap && FLAT == geometry._cap->crs) {
- circleR2Bounds(geometry._cap->circle, &bounds);
- }
- else if (geometry._box && FLAT == geometry._box->crs) {
- bounds = geometry._box->box;
- }
- else if (geometry._polygon && FLAT == geometry._polygon->crs) {
- polygonR2Bounds(geometry._polygon->oldPolygon, &bounds);
- }
- else if (geometry._multiPoint && FLAT == geometry._multiPoint->crs) {
- multiPointR2Bounds(geometry._multiPoint->points, &bounds);
- }
- else if (geometry._multiLine && FLAT == geometry._multiLine->crs) {
- verify(false);
- }
- else if (geometry._multiPolygon && FLAT == geometry._multiPolygon->crs) {
- verify(false);
- }
- else if (geometry._geometryCollection) {
- verify(false);
- }
- else if (geometry.hasS2Region()) {
- // For now, just support spherical cap for $centerSphere and GeoJSON points
- verify((geometry._cap && FLAT != geometry._cap->crs) ||
- (geometry._point && FLAT != geometry._point->crs));
- s2RegionR2Bounds(geometry.getS2Region(), &bounds);
+ if (NULL != _cap && (FLAT == _cap->crs)) {
+ if (NULL == otherContainer._point) {
+ return false;
}
-
- return bounds;
+ // Let's be as consistent epsilon-wise as we can with the '2d' indextype.
+ return distanceWithin(
+ _cap->circle.center, otherContainer._point->oldPoint, _cap->circle.radius);
}
- const R2Region& GeometryContainer::getR2Region() const {
- return *_r2Region;
- }
+ // Now we deal with all the SPHERE stuff.
- bool GeometryContainer::contains(const GeometryContainer& otherContainer) const {
+ // Iterate over the other thing and see if we contain it all.
+ if (NULL != otherContainer._point) {
+ return contains(otherContainer._point->cell, otherContainer._point->point);
+ }
- // First let's deal with the FLAT cases
+ if (NULL != otherContainer._line) {
+ return contains(otherContainer._line->line);
+ }
- if (_point && FLAT == _point->crs) {
- return false;
- }
+ if (NULL != otherContainer._polygon) {
+ invariant(NULL != otherContainer._polygon->s2Polygon);
+ return contains(*otherContainer._polygon->s2Polygon);
+ }
- if (NULL != _polygon && (FLAT == _polygon->crs)) {
- if (NULL == otherContainer._point) { return false; }
- return _polygon->oldPolygon.contains(otherContainer._point->oldPoint);
+ if (NULL != otherContainer._multiPoint) {
+ for (size_t i = 0; i < otherContainer._multiPoint->points.size(); ++i) {
+ if (!contains(otherContainer._multiPoint->cells[i],
+ otherContainer._multiPoint->points[i])) {
+ return false;
+ }
}
+ return true;
+ }
- if (NULL != _box) {
- verify(FLAT == _box->crs);
- if (NULL == otherContainer._point) { return false; }
- return _box->box.inside(otherContainer._point->oldPoint);
+ if (NULL != otherContainer._multiLine) {
+ const vector<S2Polyline*>& lines = otherContainer._multiLine->lines.vector();
+ for (size_t i = 0; i < lines.size(); ++i) {
+ if (!contains(*lines[i])) {
+ return false;
+ }
}
+ return true;
+ }
- if (NULL != _cap && (FLAT == _cap->crs)) {
- if (NULL == otherContainer._point) { return false; }
- // Let's be as consistent epsilon-wise as we can with the '2d' indextype.
- return distanceWithin(_cap->circle.center, otherContainer._point->oldPoint,
- _cap->circle.radius);
+ if (NULL != otherContainer._multiPolygon) {
+ const vector<S2Polygon*>& polys = otherContainer._multiPolygon->polygons.vector();
+ for (size_t i = 0; i < polys.size(); ++i) {
+ if (!contains(*polys[i])) {
+ return false;
+ }
}
+ return true;
+ }
- // Now we deal with all the SPHERE stuff.
+ if (NULL != otherContainer._geometryCollection) {
+ GeometryCollection& c = *otherContainer._geometryCollection;
- // Iterate over the other thing and see if we contain it all.
- if (NULL != otherContainer._point) {
- return contains(otherContainer._point->cell, otherContainer._point->point);
+ for (size_t i = 0; i < c.points.size(); ++i) {
+ if (!contains(c.points[i].cell, c.points[i].point)) {
+ return false;
+ }
}
- if (NULL != otherContainer._line) {
- return contains(otherContainer._line->line);
+ const vector<LineWithCRS*>& lines = c.lines.vector();
+ for (size_t i = 0; i < lines.size(); ++i) {
+ if (!contains(lines[i]->line)) {
+ return false;
+ }
}
- if (NULL != otherContainer._polygon) {
- invariant(NULL != otherContainer._polygon->s2Polygon);
- return contains(*otherContainer._polygon->s2Polygon);
+ const vector<PolygonWithCRS*>& polys = c.polygons.vector();
+ for (size_t i = 0; i < polys.size(); ++i) {
+ if (!contains(*polys[i]->s2Polygon)) {
+ return false;
+ }
}
- if (NULL != otherContainer._multiPoint) {
- for (size_t i = 0; i < otherContainer._multiPoint->points.size(); ++i) {
- if (!contains(otherContainer._multiPoint->cells[i],
- otherContainer._multiPoint->points[i])) {
+ const vector<MultiPointWithCRS*>& multipoints = c.multiPoints.vector();
+ for (size_t i = 0; i < multipoints.size(); ++i) {
+ MultiPointWithCRS* mp = multipoints[i];
+ for (size_t j = 0; j < mp->points.size(); ++j) {
+ if (!contains(mp->cells[j], mp->points[j])) {
return false;
}
}
- return true;
}
- if (NULL != otherContainer._multiLine) {
- const vector<S2Polyline*>& lines = otherContainer._multiLine->lines.vector();
- for (size_t i = 0; i < lines.size(); ++i) {
- if (!contains(*lines[i])) { return false; }
- }
- return true;
- }
-
- if (NULL != otherContainer._multiPolygon) {
- const vector<S2Polygon*>& polys = otherContainer._multiPolygon->polygons.vector();
- for (size_t i = 0; i < polys.size(); ++i) {
- if (!contains(*polys[i])) { return false; }
+ const vector<MultiLineWithCRS*>& multilines = c.multiLines.vector();
+ for (size_t i = 0; i < multilines.size(); ++i) {
+ const vector<S2Polyline*>& lines = multilines[i]->lines.vector();
+ for (size_t j = 0; j < lines.size(); ++j) {
+ if (!contains(*lines[j])) {
+ return false;
+ }
}
- return true;
}
- if (NULL != otherContainer._geometryCollection) {
- GeometryCollection& c = *otherContainer._geometryCollection;
-
- for (size_t i = 0; i < c.points.size(); ++i) {
- if (!contains(c.points[i].cell, c.points[i].point)) {
+ const vector<MultiPolygonWithCRS*>& multipolys = c.multiPolygons.vector();
+ for (size_t i = 0; i < multipolys.size(); ++i) {
+ const vector<S2Polygon*>& polys = multipolys[i]->polygons.vector();
+ for (size_t j = 0; j < polys.size(); ++j) {
+ if (!contains(*polys[j])) {
return false;
}
}
+ }
- const vector<LineWithCRS*>& lines = c.lines.vector();
- for (size_t i = 0; i < lines.size(); ++i) {
- if (!contains(lines[i]->line)) { return false; }
- }
+ return true;
+ }
- const vector<PolygonWithCRS*>& polys = c.polygons.vector();
- for (size_t i = 0; i < polys.size(); ++i) {
- if (!contains(*polys[i]->s2Polygon)) { return false; }
- }
+ return false;
+}
- const vector<MultiPointWithCRS*>& multipoints = c.multiPoints.vector();
- for (size_t i = 0; i < multipoints.size(); ++i) {
- MultiPointWithCRS* mp = multipoints[i];
- for (size_t j = 0; j < mp->points.size(); ++j) {
- if (!contains(mp->cells[j], mp->points[j])) { return false; }
- }
- }
-
- const vector<MultiLineWithCRS*>& multilines = c.multiLines.vector();
- for (size_t i = 0; i < multilines.size(); ++i) {
- const vector<S2Polyline*>& lines = multilines[i]->lines.vector();
- for (size_t j = 0; j < lines.size(); ++j) {
- if (!contains(*lines[j])) { return false; }
- }
- }
+bool containsPoint(const S2Polygon& poly, const S2Cell& otherCell, const S2Point& otherPoint) {
+ // This is much faster for actual containment checking.
+ if (poly.Contains(otherPoint)) {
+ return true;
+ }
+ // This is slower but contains edges/vertices.
+ return poly.MayIntersect(otherCell);
+}
- const vector<MultiPolygonWithCRS*>& multipolys = c.multiPolygons.vector();
- for (size_t i = 0; i < multipolys.size(); ++i) {
- const vector<S2Polygon*>& polys = multipolys[i]->polygons.vector();
- for (size_t j = 0; j < polys.size(); ++j) {
- if (!contains(*polys[j])) { return false; }
- }
- }
+bool GeometryContainer::contains(const S2Cell& otherCell, const S2Point& otherPoint) const {
+ if (NULL != _polygon && (NULL != _polygon->s2Polygon)) {
+ return containsPoint(*_polygon->s2Polygon, otherCell, otherPoint);
+ }
+ if (NULL != _polygon && (NULL != _polygon->bigPolygon)) {
+ if (_polygon->bigPolygon->Contains(otherPoint))
return true;
- }
-
- return false;
+ return _polygon->bigPolygon->MayIntersect(otherCell);
}
- bool containsPoint(const S2Polygon& poly, const S2Cell& otherCell, const S2Point& otherPoint) {
- // This is much faster for actual containment checking.
- if (poly.Contains(otherPoint)) { return true; }
- // This is slower but contains edges/vertices.
- return poly.MayIntersect(otherCell);
+ if (NULL != _cap && (_cap->crs == SPHERE)) {
+ return _cap->cap.MayIntersect(otherCell);
}
- bool GeometryContainer::contains(const S2Cell& otherCell, const S2Point& otherPoint) const {
- if (NULL != _polygon && (NULL != _polygon->s2Polygon)) {
- return containsPoint(*_polygon->s2Polygon, otherCell, otherPoint);
- }
-
- if (NULL != _polygon && (NULL != _polygon->bigPolygon)) {
- if (_polygon->bigPolygon->Contains(otherPoint))
+ if (NULL != _multiPolygon) {
+ const vector<S2Polygon*>& polys = _multiPolygon->polygons.vector();
+ for (size_t i = 0; i < polys.size(); ++i) {
+ if (containsPoint(*polys[i], otherCell, otherPoint)) {
return true;
- return _polygon->bigPolygon->MayIntersect(otherCell);
- }
-
- if (NULL != _cap && (_cap->crs == SPHERE)) {
- return _cap->cap.MayIntersect(otherCell);
- }
-
- if (NULL != _multiPolygon) {
- const vector<S2Polygon*>& polys = _multiPolygon->polygons.vector();
- for (size_t i = 0; i < polys.size(); ++i) {
- if (containsPoint(*polys[i], otherCell, otherPoint)) { return true; }
}
}
+ }
- if (NULL != _geometryCollection) {
- const vector<PolygonWithCRS*>& polys = _geometryCollection->polygons.vector();
- for (size_t i = 0; i < polys.size(); ++i) {
- if (containsPoint(*polys[i]->s2Polygon, otherCell, otherPoint)) { return true; }
+ if (NULL != _geometryCollection) {
+ const vector<PolygonWithCRS*>& polys = _geometryCollection->polygons.vector();
+ for (size_t i = 0; i < polys.size(); ++i) {
+ if (containsPoint(*polys[i]->s2Polygon, otherCell, otherPoint)) {
+ return true;
}
+ }
- const vector<MultiPolygonWithCRS*>& multipolys =_geometryCollection->multiPolygons.vector();
- for (size_t i = 0; i < multipolys.size(); ++i) {
- const vector<S2Polygon*>& innerpolys = multipolys[i]->polygons.vector();
- for (size_t j = 0; j < innerpolys.size(); ++j) {
- if (containsPoint(*innerpolys[j], otherCell, otherPoint)) { return true; }
+ const vector<MultiPolygonWithCRS*>& multipolys =
+ _geometryCollection->multiPolygons.vector();
+ for (size_t i = 0; i < multipolys.size(); ++i) {
+ const vector<S2Polygon*>& innerpolys = multipolys[i]->polygons.vector();
+ for (size_t j = 0; j < innerpolys.size(); ++j) {
+ if (containsPoint(*innerpolys[j], otherCell, otherPoint)) {
+ return true;
}
}
}
+ }
+
+ return false;
+}
+
+bool containsLine(const S2Polygon& poly, const S2Polyline& otherLine) {
+ // Kind of a mess. We get a function for clipping the line to the
+ // polygon. We do this and make sure the line is the same as the
+ // line we're clipping against.
+ OwnedPointerVector<S2Polyline> clippedOwned;
+ vector<S2Polyline*>& clipped = clippedOwned.mutableVector();
+ poly.IntersectWithPolyline(&otherLine, &clipped);
+ if (1 != clipped.size()) {
return false;
}
- bool containsLine(const S2Polygon& poly, const S2Polyline& otherLine) {
- // Kind of a mess. We get a function for clipping the line to the
- // polygon. We do this and make sure the line is the same as the
- // line we're clipping against.
- OwnedPointerVector<S2Polyline> clippedOwned;
- vector<S2Polyline*>& clipped = clippedOwned.mutableVector();
+ // If the line is entirely contained within the polygon, we should be
+ // getting it back verbatim, so really there should be no error.
+ bool ret = clipped[0]->NearlyCoversPolyline(otherLine, S1Angle::Degrees(1e-10));
- poly.IntersectWithPolyline(&otherLine, &clipped);
- if (1 != clipped.size()) { return false; }
+ return ret;
+}
- // If the line is entirely contained within the polygon, we should be
- // getting it back verbatim, so really there should be no error.
- bool ret = clipped[0]->NearlyCoversPolyline(otherLine,
- S1Angle::Degrees(1e-10));
-
- return ret;
+bool GeometryContainer::contains(const S2Polyline& otherLine) const {
+ if (NULL != _polygon && NULL != _polygon->s2Polygon) {
+ return containsLine(*_polygon->s2Polygon, otherLine);
}
- bool GeometryContainer::contains(const S2Polyline& otherLine) const {
- if (NULL != _polygon && NULL != _polygon->s2Polygon) {
- return containsLine(*_polygon->s2Polygon, otherLine);
- }
-
- if (NULL != _polygon && NULL != _polygon->bigPolygon) {
- return _polygon->bigPolygon->Contains(otherLine);
- }
+ if (NULL != _polygon && NULL != _polygon->bigPolygon) {
+ return _polygon->bigPolygon->Contains(otherLine);
+ }
- if (NULL != _multiPolygon) {
- const vector<S2Polygon*>& polys = _multiPolygon->polygons.vector();
- for (size_t i = 0; i < polys.size(); ++i) {
- if (containsLine(*polys[i], otherLine)) { return true; }
+ if (NULL != _multiPolygon) {
+ const vector<S2Polygon*>& polys = _multiPolygon->polygons.vector();
+ for (size_t i = 0; i < polys.size(); ++i) {
+ if (containsLine(*polys[i], otherLine)) {
+ return true;
}
}
+ }
- if (NULL != _geometryCollection) {
- const vector<PolygonWithCRS*>& polys = _geometryCollection->polygons.vector();
- for (size_t i = 0; i < polys.size(); ++i) {
- if (containsLine(*polys[i]->s2Polygon, otherLine)) { return true; }
+ if (NULL != _geometryCollection) {
+ const vector<PolygonWithCRS*>& polys = _geometryCollection->polygons.vector();
+ for (size_t i = 0; i < polys.size(); ++i) {
+ if (containsLine(*polys[i]->s2Polygon, otherLine)) {
+ return true;
}
+ }
- const vector<MultiPolygonWithCRS*>& multipolys =_geometryCollection->multiPolygons.vector();
- for (size_t i = 0; i < multipolys.size(); ++i) {
- const vector<S2Polygon*>& innerpolys = multipolys[i]->polygons.vector();
- for (size_t j = 0; j < innerpolys.size(); ++j) {
- if (containsLine(*innerpolys[j], otherLine)) { return true; }
+ const vector<MultiPolygonWithCRS*>& multipolys =
+ _geometryCollection->multiPolygons.vector();
+ for (size_t i = 0; i < multipolys.size(); ++i) {
+ const vector<S2Polygon*>& innerpolys = multipolys[i]->polygons.vector();
+ for (size_t j = 0; j < innerpolys.size(); ++j) {
+ if (containsLine(*innerpolys[j], otherLine)) {
+ return true;
}
}
}
-
- return false;
}
- bool containsPolygon(const S2Polygon& poly, const S2Polygon& otherPoly) {
- return poly.Contains(&otherPoly);
- }
+ return false;
+}
- bool GeometryContainer::contains(const S2Polygon& otherPolygon) const {
- if (NULL != _polygon && NULL != _polygon->s2Polygon) {
- return containsPolygon(*_polygon->s2Polygon, otherPolygon);
- }
+bool containsPolygon(const S2Polygon& poly, const S2Polygon& otherPoly) {
+ return poly.Contains(&otherPoly);
+}
- if (NULL != _polygon && NULL != _polygon->bigPolygon) {
- return _polygon->bigPolygon->Contains(otherPolygon);
- }
+bool GeometryContainer::contains(const S2Polygon& otherPolygon) const {
+ if (NULL != _polygon && NULL != _polygon->s2Polygon) {
+ return containsPolygon(*_polygon->s2Polygon, otherPolygon);
+ }
- if (NULL != _multiPolygon) {
- const vector<S2Polygon*>& polys = _multiPolygon->polygons.vector();
- for (size_t i = 0; i < polys.size(); ++i) {
- if (containsPolygon(*polys[i], otherPolygon)) { return true; }
+ if (NULL != _polygon && NULL != _polygon->bigPolygon) {
+ return _polygon->bigPolygon->Contains(otherPolygon);
+ }
+
+ if (NULL != _multiPolygon) {
+ const vector<S2Polygon*>& polys = _multiPolygon->polygons.vector();
+ for (size_t i = 0; i < polys.size(); ++i) {
+ if (containsPolygon(*polys[i], otherPolygon)) {
+ return true;
}
}
+ }
- if (NULL != _geometryCollection) {
- const vector<PolygonWithCRS*>& polys = _geometryCollection->polygons.vector();
- for (size_t i = 0; i < polys.size(); ++i) {
- if (containsPolygon(*polys[i]->s2Polygon, otherPolygon)) { return true; }
+ if (NULL != _geometryCollection) {
+ const vector<PolygonWithCRS*>& polys = _geometryCollection->polygons.vector();
+ for (size_t i = 0; i < polys.size(); ++i) {
+ if (containsPolygon(*polys[i]->s2Polygon, otherPolygon)) {
+ return true;
}
+ }
- const vector<MultiPolygonWithCRS*>& multipolys =_geometryCollection->multiPolygons.vector();
- for (size_t i = 0; i < multipolys.size(); ++i) {
- const vector<S2Polygon*>& innerpolys = multipolys[i]->polygons.vector();
- for (size_t j = 0; j < innerpolys.size(); ++j) {
- if (containsPolygon(*innerpolys[j], otherPolygon)) { return true; }
+ const vector<MultiPolygonWithCRS*>& multipolys =
+ _geometryCollection->multiPolygons.vector();
+ for (size_t i = 0; i < multipolys.size(); ++i) {
+ const vector<S2Polygon*>& innerpolys = multipolys[i]->polygons.vector();
+ for (size_t j = 0; j < innerpolys.size(); ++j) {
+ if (containsPolygon(*innerpolys[j], otherPolygon)) {
+ return true;
}
}
}
-
- return false;
}
- bool GeometryContainer::intersects(const GeometryContainer& otherContainer) const {
- if (NULL != otherContainer._point) {
- return intersects(otherContainer._point->cell);
- } else if (NULL != otherContainer._line) {
- return intersects(otherContainer._line->line);
- } else if (NULL != otherContainer._polygon) {
- if (NULL == otherContainer._polygon->s2Polygon) { return false; }
- return intersects(*otherContainer._polygon->s2Polygon);
- } else if (NULL != otherContainer._multiPoint) {
- return intersects(*otherContainer._multiPoint);
- } else if (NULL != otherContainer._multiLine) {
- return intersects(*otherContainer._multiLine);
- } else if (NULL != otherContainer._multiPolygon) {
- return intersects(*otherContainer._multiPolygon);
- } else if (NULL != otherContainer._geometryCollection) {
- const GeometryCollection& c = *otherContainer._geometryCollection;
-
- for (size_t i = 0; i < c.points.size(); ++i) {
- if (intersects(c.points[i].cell)) { return true; }
- }
+ return false;
+}
- for (size_t i = 0; i < c.polygons.vector().size(); ++i) {
- if (intersects(*c.polygons.vector()[i]->s2Polygon)) { return true; }
+bool GeometryContainer::intersects(const GeometryContainer& otherContainer) const {
+ if (NULL != otherContainer._point) {
+ return intersects(otherContainer._point->cell);
+ } else if (NULL != otherContainer._line) {
+ return intersects(otherContainer._line->line);
+ } else if (NULL != otherContainer._polygon) {
+ if (NULL == otherContainer._polygon->s2Polygon) {
+ return false;
+ }
+ return intersects(*otherContainer._polygon->s2Polygon);
+ } else if (NULL != otherContainer._multiPoint) {
+ return intersects(*otherContainer._multiPoint);
+ } else if (NULL != otherContainer._multiLine) {
+ return intersects(*otherContainer._multiLine);
+ } else if (NULL != otherContainer._multiPolygon) {
+ return intersects(*otherContainer._multiPolygon);
+ } else if (NULL != otherContainer._geometryCollection) {
+ const GeometryCollection& c = *otherContainer._geometryCollection;
+
+ for (size_t i = 0; i < c.points.size(); ++i) {
+ if (intersects(c.points[i].cell)) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.lines.vector().size(); ++i) {
- if (intersects(c.lines.vector()[i]->line)) { return true; }
+ for (size_t i = 0; i < c.polygons.vector().size(); ++i) {
+ if (intersects(*c.polygons.vector()[i]->s2Polygon)) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.multiPolygons.vector().size(); ++i) {
- if (intersects(*c.multiPolygons.vector()[i])) { return true; }
+ for (size_t i = 0; i < c.lines.vector().size(); ++i) {
+ if (intersects(c.lines.vector()[i]->line)) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.multiLines.vector().size(); ++i) {
- if (intersects(*c.multiLines.vector()[i])) { return true; }
+ for (size_t i = 0; i < c.multiPolygons.vector().size(); ++i) {
+ if (intersects(*c.multiPolygons.vector()[i])) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.multiPoints.vector().size(); ++i) {
- if (intersects(*c.multiPoints.vector()[i])) { return true; }
+ for (size_t i = 0; i < c.multiLines.vector().size(); ++i) {
+ if (intersects(*c.multiLines.vector()[i])) {
+ return true;
}
}
- return false;
+ for (size_t i = 0; i < c.multiPoints.vector().size(); ++i) {
+ if (intersects(*c.multiPoints.vector()[i])) {
+ return true;
+ }
+ }
}
- bool GeometryContainer::intersects(const MultiPointWithCRS& otherMultiPoint) const {
- for (size_t i = 0; i < otherMultiPoint.cells.size(); ++i) {
- if (intersects(otherMultiPoint.cells[i])) { return true; }
+ return false;
+}
+
+bool GeometryContainer::intersects(const MultiPointWithCRS& otherMultiPoint) const {
+ for (size_t i = 0; i < otherMultiPoint.cells.size(); ++i) {
+ if (intersects(otherMultiPoint.cells[i])) {
+ return true;
}
- return false;
}
+ return false;
+}
- bool GeometryContainer::intersects(const MultiLineWithCRS& otherMultiLine) const {
- for (size_t i = 0; i < otherMultiLine.lines.vector().size(); ++i) {
- if (intersects(*otherMultiLine.lines.vector()[i])) { return true; }
+bool GeometryContainer::intersects(const MultiLineWithCRS& otherMultiLine) const {
+ for (size_t i = 0; i < otherMultiLine.lines.vector().size(); ++i) {
+ if (intersects(*otherMultiLine.lines.vector()[i])) {
+ return true;
}
- return false;
}
+ return false;
+}
- bool GeometryContainer::intersects(const MultiPolygonWithCRS& otherMultiPolygon) const {
- for (size_t i = 0; i < otherMultiPolygon.polygons.vector().size(); ++i) {
- if (intersects(*otherMultiPolygon.polygons.vector()[i])) { return true; }
+bool GeometryContainer::intersects(const MultiPolygonWithCRS& otherMultiPolygon) const {
+ for (size_t i = 0; i < otherMultiPolygon.polygons.vector().size(); ++i) {
+ if (intersects(*otherMultiPolygon.polygons.vector()[i])) {
+ return true;
}
- return false;
}
-
- // Does this (GeometryContainer) intersect the provided data?
- bool GeometryContainer::intersects(const S2Cell &otherPoint) const {
- if (NULL != _point) {
- return _point->cell.MayIntersect(otherPoint);
- } else if (NULL != _line) {
- return _line->line.MayIntersect(otherPoint);
- } else if (NULL != _polygon && NULL != _polygon->s2Polygon) {
- return _polygon->s2Polygon->MayIntersect(otherPoint);
- } else if (NULL != _polygon && NULL != _polygon->bigPolygon) {
- return _polygon->bigPolygon->MayIntersect(otherPoint);
- } else if (NULL != _multiPoint) {
- const vector<S2Cell>& cells = _multiPoint->cells;
- for (size_t i = 0; i < cells.size(); ++i) {
- if (cells[i].MayIntersect(otherPoint)) { return true; }
+ return false;
+}
+
+// Does this (GeometryContainer) intersect the provided data?
+bool GeometryContainer::intersects(const S2Cell& otherPoint) const {
+ if (NULL != _point) {
+ return _point->cell.MayIntersect(otherPoint);
+ } else if (NULL != _line) {
+ return _line->line.MayIntersect(otherPoint);
+ } else if (NULL != _polygon && NULL != _polygon->s2Polygon) {
+ return _polygon->s2Polygon->MayIntersect(otherPoint);
+ } else if (NULL != _polygon && NULL != _polygon->bigPolygon) {
+ return _polygon->bigPolygon->MayIntersect(otherPoint);
+ } else if (NULL != _multiPoint) {
+ const vector<S2Cell>& cells = _multiPoint->cells;
+ for (size_t i = 0; i < cells.size(); ++i) {
+ if (cells[i].MayIntersect(otherPoint)) {
+ return true;
}
- } else if (NULL != _multiLine) {
- const vector<S2Polyline*>& lines = _multiLine->lines.vector();
- for (size_t i = 0; i < lines.size(); ++i) {
- if (lines[i]->MayIntersect(otherPoint)) { return true; }
+ }
+ } else if (NULL != _multiLine) {
+ const vector<S2Polyline*>& lines = _multiLine->lines.vector();
+ for (size_t i = 0; i < lines.size(); ++i) {
+ if (lines[i]->MayIntersect(otherPoint)) {
+ return true;
}
- } else if (NULL != _multiPolygon) {
- const vector<S2Polygon*>& polys = _multiPolygon->polygons.vector();
- for (size_t i = 0; i < polys.size(); ++i) {
- if (polys[i]->MayIntersect(otherPoint)) { return true; }
+ }
+ } else if (NULL != _multiPolygon) {
+ const vector<S2Polygon*>& polys = _multiPolygon->polygons.vector();
+ for (size_t i = 0; i < polys.size(); ++i) {
+ if (polys[i]->MayIntersect(otherPoint)) {
+ return true;
}
- } else if (NULL != _geometryCollection) {
- const GeometryCollection& c = *_geometryCollection;
+ }
+ } else if (NULL != _geometryCollection) {
+ const GeometryCollection& c = *_geometryCollection;
- for (size_t i = 0; i < c.points.size(); ++i) {
- if (c.points[i].cell.MayIntersect(otherPoint)) { return true; }
+ for (size_t i = 0; i < c.points.size(); ++i) {
+ if (c.points[i].cell.MayIntersect(otherPoint)) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.polygons.vector().size(); ++i) {
- if (c.polygons.vector()[i]->s2Polygon->MayIntersect(otherPoint)) { return true; }
+ for (size_t i = 0; i < c.polygons.vector().size(); ++i) {
+ if (c.polygons.vector()[i]->s2Polygon->MayIntersect(otherPoint)) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.lines.vector().size(); ++i) {
- if (c.lines.vector()[i]->line.MayIntersect(otherPoint)) { return true; }
+ for (size_t i = 0; i < c.lines.vector().size(); ++i) {
+ if (c.lines.vector()[i]->line.MayIntersect(otherPoint)) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.multiPolygons.vector().size(); ++i) {
- const vector<S2Polygon*>& innerPolys =
- c.multiPolygons.vector()[i]->polygons.vector();
- for (size_t j = 0; j < innerPolys.size(); ++j) {
- if (innerPolys[j]->MayIntersect(otherPoint)) { return true; }
+ for (size_t i = 0; i < c.multiPolygons.vector().size(); ++i) {
+ const vector<S2Polygon*>& innerPolys = c.multiPolygons.vector()[i]->polygons.vector();
+ for (size_t j = 0; j < innerPolys.size(); ++j) {
+ if (innerPolys[j]->MayIntersect(otherPoint)) {
+ return true;
}
}
+ }
- for (size_t i = 0; i < c.multiLines.vector().size(); ++i) {
- const vector<S2Polyline*>& innerLines =
- c.multiLines.vector()[i]->lines.vector();
- for (size_t j = 0; j < innerLines.size(); ++j) {
- if (innerLines[j]->MayIntersect(otherPoint)) { return true; }
+ for (size_t i = 0; i < c.multiLines.vector().size(); ++i) {
+ const vector<S2Polyline*>& innerLines = c.multiLines.vector()[i]->lines.vector();
+ for (size_t j = 0; j < innerLines.size(); ++j) {
+ if (innerLines[j]->MayIntersect(otherPoint)) {
+ return true;
}
}
+ }
- for (size_t i = 0; i < c.multiPoints.vector().size(); ++i) {
- const vector<S2Cell>& innerCells = c.multiPoints.vector()[i]->cells;
- for (size_t j = 0; j < innerCells.size(); ++j) {
- if (innerCells[j].MayIntersect(otherPoint)) { return true; }
+ for (size_t i = 0; i < c.multiPoints.vector().size(); ++i) {
+ const vector<S2Cell>& innerCells = c.multiPoints.vector()[i]->cells;
+ for (size_t j = 0; j < innerCells.size(); ++j) {
+ if (innerCells[j].MayIntersect(otherPoint)) {
+ return true;
}
}
}
-
- return false;
- }
-
- bool polygonLineIntersection(const S2Polyline& line, const S2Polygon& poly) {
- // TODO(hk): modify s2 library to just let us know if it intersected
- // rather than returning all this.
- vector<S2Polyline*> clipped;
- poly.IntersectWithPolyline(&line, &clipped);
- bool ret = clipped.size() > 0;
- for (size_t i = 0; i < clipped.size(); ++i) delete clipped[i];
- return ret;
}
- bool GeometryContainer::intersects(const S2Polyline& otherLine) const {
- if (NULL != _point) {
- return otherLine.MayIntersect(_point->cell);
- } else if (NULL != _line) {
- return otherLine.Intersects(&_line->line);
- } else if (NULL != _polygon && NULL != _polygon->s2Polygon) {
- return polygonLineIntersection(otherLine, *_polygon->s2Polygon);
- } else if (NULL != _polygon && NULL != _polygon->bigPolygon) {
- return _polygon->bigPolygon->Intersects(otherLine);
- } else if (NULL != _multiPoint) {
- for (size_t i = 0; i < _multiPoint->cells.size(); ++i) {
- if (otherLine.MayIntersect(_multiPoint->cells[i])) { return true; }
+ return false;
+}
+
+bool polygonLineIntersection(const S2Polyline& line, const S2Polygon& poly) {
+ // TODO(hk): modify s2 library to just let us know if it intersected
+ // rather than returning all this.
+ vector<S2Polyline*> clipped;
+ poly.IntersectWithPolyline(&line, &clipped);
+ bool ret = clipped.size() > 0;
+ for (size_t i = 0; i < clipped.size(); ++i)
+ delete clipped[i];
+ return ret;
+}
+
+bool GeometryContainer::intersects(const S2Polyline& otherLine) const {
+ if (NULL != _point) {
+ return otherLine.MayIntersect(_point->cell);
+ } else if (NULL != _line) {
+ return otherLine.Intersects(&_line->line);
+ } else if (NULL != _polygon && NULL != _polygon->s2Polygon) {
+ return polygonLineIntersection(otherLine, *_polygon->s2Polygon);
+ } else if (NULL != _polygon && NULL != _polygon->bigPolygon) {
+ return _polygon->bigPolygon->Intersects(otherLine);
+ } else if (NULL != _multiPoint) {
+ for (size_t i = 0; i < _multiPoint->cells.size(); ++i) {
+ if (otherLine.MayIntersect(_multiPoint->cells[i])) {
+ return true;
}
- } else if (NULL != _multiLine) {
- for (size_t i = 0; i < _multiLine->lines.vector().size(); ++i) {
- if (otherLine.Intersects(_multiLine->lines.vector()[i])) {
- return true;
- }
+ }
+ } else if (NULL != _multiLine) {
+ for (size_t i = 0; i < _multiLine->lines.vector().size(); ++i) {
+ if (otherLine.Intersects(_multiLine->lines.vector()[i])) {
+ return true;
}
- } else if (NULL != _multiPolygon) {
- for (size_t i = 0; i < _multiPolygon->polygons.vector().size(); ++i) {
- if (polygonLineIntersection(otherLine, *_multiPolygon->polygons.vector()[i])) {
- return true;
- }
+ }
+ } else if (NULL != _multiPolygon) {
+ for (size_t i = 0; i < _multiPolygon->polygons.vector().size(); ++i) {
+ if (polygonLineIntersection(otherLine, *_multiPolygon->polygons.vector()[i])) {
+ return true;
}
- } else if (NULL != _geometryCollection) {
- const GeometryCollection& c = *_geometryCollection;
+ }
+ } else if (NULL != _geometryCollection) {
+ const GeometryCollection& c = *_geometryCollection;
- for (size_t i = 0; i < c.points.size(); ++i) {
- if (otherLine.MayIntersect(c.points[i].cell)) { return true; }
+ for (size_t i = 0; i < c.points.size(); ++i) {
+ if (otherLine.MayIntersect(c.points[i].cell)) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.polygons.vector().size(); ++i) {
- if (polygonLineIntersection(otherLine, *c.polygons.vector()[i]->s2Polygon)) {
- return true;
- }
+ for (size_t i = 0; i < c.polygons.vector().size(); ++i) {
+ if (polygonLineIntersection(otherLine, *c.polygons.vector()[i]->s2Polygon)) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.lines.vector().size(); ++i) {
- if (c.lines.vector()[i]->line.Intersects(&otherLine)) { return true; }
+ for (size_t i = 0; i < c.lines.vector().size(); ++i) {
+ if (c.lines.vector()[i]->line.Intersects(&otherLine)) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.multiPolygons.vector().size(); ++i) {
- const vector<S2Polygon*>& innerPolys =
- c.multiPolygons.vector()[i]->polygons.vector();
- for (size_t j = 0; j < innerPolys.size(); ++j) {
- if (polygonLineIntersection(otherLine, *innerPolys[j])) {
- return true;
- }
+ for (size_t i = 0; i < c.multiPolygons.vector().size(); ++i) {
+ const vector<S2Polygon*>& innerPolys = c.multiPolygons.vector()[i]->polygons.vector();
+ for (size_t j = 0; j < innerPolys.size(); ++j) {
+ if (polygonLineIntersection(otherLine, *innerPolys[j])) {
+ return true;
}
}
+ }
- for (size_t i = 0; i < c.multiLines.vector().size(); ++i) {
- const vector<S2Polyline*>& innerLines =
- c.multiLines.vector()[i]->lines.vector();
- for (size_t j = 0; j < innerLines.size(); ++j) {
- if (innerLines[j]->Intersects(&otherLine)) { return true; }
+ for (size_t i = 0; i < c.multiLines.vector().size(); ++i) {
+ const vector<S2Polyline*>& innerLines = c.multiLines.vector()[i]->lines.vector();
+ for (size_t j = 0; j < innerLines.size(); ++j) {
+ if (innerLines[j]->Intersects(&otherLine)) {
+ return true;
}
}
+ }
- for (size_t i = 0; i < c.multiPoints.vector().size(); ++i) {
- const vector<S2Cell>& innerCells = c.multiPoints.vector()[i]->cells;
- for (size_t j = 0; j < innerCells.size(); ++j) {
- if (otherLine.MayIntersect(innerCells[j])) { return true; }
+ for (size_t i = 0; i < c.multiPoints.vector().size(); ++i) {
+ const vector<S2Cell>& innerCells = c.multiPoints.vector()[i]->cells;
+ for (size_t j = 0; j < innerCells.size(); ++j) {
+ if (otherLine.MayIntersect(innerCells[j])) {
+ return true;
}
}
}
-
- return false;
}
- // Does 'this' intersect with the provided polygon?
- bool GeometryContainer::intersects(const S2Polygon& otherPolygon) const {
- if (NULL != _point) {
- return otherPolygon.MayIntersect(_point->cell);
- } else if (NULL != _line) {
- return polygonLineIntersection(_line->line, otherPolygon);
- } else if (NULL != _polygon && NULL != _polygon->s2Polygon) {
- return otherPolygon.Intersects(_polygon->s2Polygon.get());
- } else if (NULL != _polygon && NULL != _polygon->bigPolygon) {
- return _polygon->bigPolygon->Intersects(otherPolygon);
- } else if (NULL != _multiPoint) {
- for (size_t i = 0; i < _multiPoint->cells.size(); ++i) {
- if (otherPolygon.MayIntersect(_multiPoint->cells[i])) { return true; }
+ return false;
+}
+
+// Does 'this' intersect with the provided polygon?
+bool GeometryContainer::intersects(const S2Polygon& otherPolygon) const {
+ if (NULL != _point) {
+ return otherPolygon.MayIntersect(_point->cell);
+ } else if (NULL != _line) {
+ return polygonLineIntersection(_line->line, otherPolygon);
+ } else if (NULL != _polygon && NULL != _polygon->s2Polygon) {
+ return otherPolygon.Intersects(_polygon->s2Polygon.get());
+ } else if (NULL != _polygon && NULL != _polygon->bigPolygon) {
+ return _polygon->bigPolygon->Intersects(otherPolygon);
+ } else if (NULL != _multiPoint) {
+ for (size_t i = 0; i < _multiPoint->cells.size(); ++i) {
+ if (otherPolygon.MayIntersect(_multiPoint->cells[i])) {
+ return true;
}
- } else if (NULL != _multiLine) {
- for (size_t i = 0; i < _multiLine->lines.vector().size(); ++i) {
- if (polygonLineIntersection(*_multiLine->lines.vector()[i], otherPolygon)) {
- return true;
- }
+ }
+ } else if (NULL != _multiLine) {
+ for (size_t i = 0; i < _multiLine->lines.vector().size(); ++i) {
+ if (polygonLineIntersection(*_multiLine->lines.vector()[i], otherPolygon)) {
+ return true;
}
- } else if (NULL != _multiPolygon) {
- for (size_t i = 0; i < _multiPolygon->polygons.vector().size(); ++i) {
- if (otherPolygon.Intersects(_multiPolygon->polygons.vector()[i])) {
- return true;
- }
+ }
+ } else if (NULL != _multiPolygon) {
+ for (size_t i = 0; i < _multiPolygon->polygons.vector().size(); ++i) {
+ if (otherPolygon.Intersects(_multiPolygon->polygons.vector()[i])) {
+ return true;
}
- } else if (NULL != _geometryCollection) {
- const GeometryCollection& c = *_geometryCollection;
+ }
+ } else if (NULL != _geometryCollection) {
+ const GeometryCollection& c = *_geometryCollection;
- for (size_t i = 0; i < c.points.size(); ++i) {
- if (otherPolygon.MayIntersect(c.points[i].cell)) { return true; }
+ for (size_t i = 0; i < c.points.size(); ++i) {
+ if (otherPolygon.MayIntersect(c.points[i].cell)) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.polygons.vector().size(); ++i) {
- if (otherPolygon.Intersects(c.polygons.vector()[i]->s2Polygon.get())) {
- return true;
- }
+ for (size_t i = 0; i < c.polygons.vector().size(); ++i) {
+ if (otherPolygon.Intersects(c.polygons.vector()[i]->s2Polygon.get())) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.lines.vector().size(); ++i) {
- if (polygonLineIntersection(c.lines.vector()[i]->line, otherPolygon)) {
- return true;
- }
+ for (size_t i = 0; i < c.lines.vector().size(); ++i) {
+ if (polygonLineIntersection(c.lines.vector()[i]->line, otherPolygon)) {
+ return true;
}
+ }
- for (size_t i = 0; i < c.multiPolygons.vector().size(); ++i) {
- const vector<S2Polygon*>& innerPolys =
- c.multiPolygons.vector()[i]->polygons.vector();
- for (size_t j = 0; j < innerPolys.size(); ++j) {
- if (otherPolygon.Intersects(innerPolys[j])) {
- return true;
- }
+ for (size_t i = 0; i < c.multiPolygons.vector().size(); ++i) {
+ const vector<S2Polygon*>& innerPolys = c.multiPolygons.vector()[i]->polygons.vector();
+ for (size_t j = 0; j < innerPolys.size(); ++j) {
+ if (otherPolygon.Intersects(innerPolys[j])) {
+ return true;
}
}
+ }
- for (size_t i = 0; i < c.multiLines.vector().size(); ++i) {
- const vector<S2Polyline*>& innerLines =
- c.multiLines.vector()[i]->lines.vector();
- for (size_t j = 0; j < innerLines.size(); ++j) {
- if (polygonLineIntersection(*innerLines[j], otherPolygon)) {
- return true;
- }
+ for (size_t i = 0; i < c.multiLines.vector().size(); ++i) {
+ const vector<S2Polyline*>& innerLines = c.multiLines.vector()[i]->lines.vector();
+ for (size_t j = 0; j < innerLines.size(); ++j) {
+ if (polygonLineIntersection(*innerLines[j], otherPolygon)) {
+ return true;
}
}
+ }
- for (size_t i = 0; i < c.multiPoints.vector().size(); ++i) {
- const vector<S2Cell>& innerCells = c.multiPoints.vector()[i]->cells;
- for (size_t j = 0; j < innerCells.size(); ++j) {
- if (otherPolygon.MayIntersect(innerCells[j])) {
- return true;
- }
+ for (size_t i = 0; i < c.multiPoints.vector().size(); ++i) {
+ const vector<S2Cell>& innerCells = c.multiPoints.vector()[i]->cells;
+ for (size_t j = 0; j < innerCells.size(); ++j) {
+ if (otherPolygon.MayIntersect(innerCells[j])) {
+ return true;
}
}
}
-
- return false;
}
- Status GeometryContainer::parseFromGeoJSON(const BSONObj& obj) {
- GeoParser::GeoJSONType type = GeoParser::parseGeoJSONType(obj);
-
- if (GeoParser::GEOJSON_UNKNOWN == type) {
- return Status(ErrorCodes::BadValue, str::stream() << "unknown GeoJSON type: " << obj);
- }
-
- Status status = Status::OK();
- vector<S2Region*> regions;
-
- if (GeoParser::GEOJSON_POINT == type) {
- _point.reset(new PointWithCRS());
- status = GeoParser::parseGeoJSONPoint(obj, _point.get());
- } else if (GeoParser::GEOJSON_LINESTRING == type) {
- _line.reset(new LineWithCRS());
- status = GeoParser::parseGeoJSONLine(obj, _line.get());
- } else if (GeoParser::GEOJSON_POLYGON == type) {
- _polygon.reset(new PolygonWithCRS());
- status = GeoParser::parseGeoJSONPolygon(obj, _polygon.get());
- } else if (GeoParser::GEOJSON_MULTI_POINT == type) {
- _multiPoint.reset(new MultiPointWithCRS());
- status = GeoParser::parseMultiPoint(obj, _multiPoint.get());
- for (size_t i = 0; i < _multiPoint->cells.size(); ++i) {
- regions.push_back(&_multiPoint->cells[i]);
- }
- } else if (GeoParser::GEOJSON_MULTI_LINESTRING == type) {
- _multiLine.reset(new MultiLineWithCRS());
- status = GeoParser::parseMultiLine(obj, _multiLine.get());
- for (size_t i = 0; i < _multiLine->lines.size(); ++i) {
- regions.push_back(_multiLine->lines[i]);
- }
- } else if (GeoParser::GEOJSON_MULTI_POLYGON == type) {
- _multiPolygon.reset(new MultiPolygonWithCRS());
- status = GeoParser::parseMultiPolygon(obj, _multiPolygon.get());
- for (size_t i = 0; i < _multiPolygon->polygons.size(); ++i) {
- regions.push_back(_multiPolygon->polygons[i]);
- }
- } else if (GeoParser::GEOJSON_GEOMETRY_COLLECTION == type) {
- _geometryCollection.reset(new GeometryCollection());
- status = GeoParser::parseGeometryCollection(obj, _geometryCollection.get());
-
- // Add regions
- for (size_t i = 0; i < _geometryCollection->points.size(); ++i) {
- regions.push_back(&_geometryCollection->points[i].cell);
- }
- for (size_t i = 0; i < _geometryCollection->lines.size(); ++i) {
- regions.push_back(&_geometryCollection->lines[i]->line);
- }
- for (size_t i = 0; i < _geometryCollection->polygons.size(); ++i) {
- regions.push_back(_geometryCollection->polygons[i]->s2Polygon.get());
- }
- for (size_t i = 0; i < _geometryCollection->multiPoints.size(); ++i) {
- MultiPointWithCRS* multiPoint = _geometryCollection->multiPoints[i];
- for (size_t j = 0; j < multiPoint->cells.size(); ++j) {
- regions.push_back(&multiPoint->cells[j]);
- }
- }
- for (size_t i = 0; i < _geometryCollection->multiLines.size(); ++i) {
- const MultiLineWithCRS* multiLine = _geometryCollection->multiLines[i];
- for (size_t j = 0; j < multiLine->lines.size(); ++j) {
- regions.push_back(multiLine->lines[j]);
- }
- }
- for (size_t i = 0; i < _geometryCollection->multiPolygons.size(); ++i) {
- const MultiPolygonWithCRS* multiPolygon = _geometryCollection->multiPolygons[i];
- for (size_t j = 0; j < multiPolygon->polygons.size(); ++j) {
- regions.push_back(multiPolygon->polygons[j]);
- }
- }
- } else {
- // Should not reach here.
- invariant(false);
- }
+ return false;
+}
- // Check parsing result.
- if (!status.isOK()) return status;
+Status GeometryContainer::parseFromGeoJSON(const BSONObj& obj) {
+ GeoParser::GeoJSONType type = GeoParser::parseGeoJSONType(obj);
- if (regions.size() > 0) {
- // S2RegionUnion doesn't take ownership of pointers.
- _s2Region.reset(new S2RegionUnion(&regions));
- }
+ if (GeoParser::GEOJSON_UNKNOWN == type) {
+ return Status(ErrorCodes::BadValue, str::stream() << "unknown GeoJSON type: " << obj);
+ }
- return Status::OK();
+ Status status = Status::OK();
+ vector<S2Region*> regions;
+
+ if (GeoParser::GEOJSON_POINT == type) {
+ _point.reset(new PointWithCRS());
+ status = GeoParser::parseGeoJSONPoint(obj, _point.get());
+ } else if (GeoParser::GEOJSON_LINESTRING == type) {
+ _line.reset(new LineWithCRS());
+ status = GeoParser::parseGeoJSONLine(obj, _line.get());
+ } else if (GeoParser::GEOJSON_POLYGON == type) {
+ _polygon.reset(new PolygonWithCRS());
+ status = GeoParser::parseGeoJSONPolygon(obj, _polygon.get());
+ } else if (GeoParser::GEOJSON_MULTI_POINT == type) {
+ _multiPoint.reset(new MultiPointWithCRS());
+ status = GeoParser::parseMultiPoint(obj, _multiPoint.get());
+ for (size_t i = 0; i < _multiPoint->cells.size(); ++i) {
+ regions.push_back(&_multiPoint->cells[i]);
+ }
+ } else if (GeoParser::GEOJSON_MULTI_LINESTRING == type) {
+ _multiLine.reset(new MultiLineWithCRS());
+ status = GeoParser::parseMultiLine(obj, _multiLine.get());
+ for (size_t i = 0; i < _multiLine->lines.size(); ++i) {
+ regions.push_back(_multiLine->lines[i]);
+ }
+ } else if (GeoParser::GEOJSON_MULTI_POLYGON == type) {
+ _multiPolygon.reset(new MultiPolygonWithCRS());
+ status = GeoParser::parseMultiPolygon(obj, _multiPolygon.get());
+ for (size_t i = 0; i < _multiPolygon->polygons.size(); ++i) {
+ regions.push_back(_multiPolygon->polygons[i]);
+ }
+ } else if (GeoParser::GEOJSON_GEOMETRY_COLLECTION == type) {
+ _geometryCollection.reset(new GeometryCollection());
+ status = GeoParser::parseGeometryCollection(obj, _geometryCollection.get());
+
+ // Add regions
+ for (size_t i = 0; i < _geometryCollection->points.size(); ++i) {
+ regions.push_back(&_geometryCollection->points[i].cell);
+ }
+ for (size_t i = 0; i < _geometryCollection->lines.size(); ++i) {
+ regions.push_back(&_geometryCollection->lines[i]->line);
+ }
+ for (size_t i = 0; i < _geometryCollection->polygons.size(); ++i) {
+ regions.push_back(_geometryCollection->polygons[i]->s2Polygon.get());
+ }
+ for (size_t i = 0; i < _geometryCollection->multiPoints.size(); ++i) {
+ MultiPointWithCRS* multiPoint = _geometryCollection->multiPoints[i];
+ for (size_t j = 0; j < multiPoint->cells.size(); ++j) {
+ regions.push_back(&multiPoint->cells[j]);
+ }
+ }
+ for (size_t i = 0; i < _geometryCollection->multiLines.size(); ++i) {
+ const MultiLineWithCRS* multiLine = _geometryCollection->multiLines[i];
+ for (size_t j = 0; j < multiLine->lines.size(); ++j) {
+ regions.push_back(multiLine->lines[j]);
+ }
+ }
+ for (size_t i = 0; i < _geometryCollection->multiPolygons.size(); ++i) {
+ const MultiPolygonWithCRS* multiPolygon = _geometryCollection->multiPolygons[i];
+ for (size_t j = 0; j < multiPolygon->polygons.size(); ++j) {
+ regions.push_back(multiPolygon->polygons[j]);
+ }
+ }
+ } else {
+ // Should not reach here.
+ invariant(false);
}
- // Examples:
- // { $geoWithin : { $geometry : <GeoJSON> } }
- // { $geoIntersects : { $geometry : <GeoJSON> } }
- // { $geoWithin : { $box : [[x1, y1], [x2, y2]] } }
- // { $geoWithin : { $polygon : [[x1, y1], [x1, y2], [x2, y2], [x2, y1]] } }
- // { $geoWithin : { $center : [[x1, y1], r], } }
- // { $geoWithin : { $centerSphere : [[x, y], radius] } }
- // { $geoIntersects : { $geometry : [1, 2] } }
- //
- // "elem" is the first element of the object after $geoWithin / $geoIntersects predicates.
- // i.e. { $box: ... }, { $geometry: ... }
- Status GeometryContainer::parseFromQuery(const BSONElement& elem) {
- // Check elem is an object and has geo specifier.
- GeoParser::GeoSpecifier specifier = GeoParser::parseGeoSpecifier(elem);
-
- if (GeoParser::UNKNOWN == specifier) {
- // Cannot parse geo specifier.
- return Status(ErrorCodes::BadValue, str::stream() << "unknown geo specifier: " << elem);
- }
-
- Status status = Status::OK();
- BSONObj obj = elem.Obj();
- if (GeoParser::BOX == specifier) {
- _box.reset(new BoxWithCRS());
- status = GeoParser::parseLegacyBox(obj, _box.get());
- } else if (GeoParser::CENTER == specifier) {
- _cap.reset(new CapWithCRS());
- status = GeoParser::parseLegacyCenter(obj, _cap.get());
- } else if (GeoParser::POLYGON == specifier) {
- _polygon.reset(new PolygonWithCRS());
- status = GeoParser::parseLegacyPolygon(obj, _polygon.get());
- } else if (GeoParser::CENTER_SPHERE == specifier) {
- _cap.reset(new CapWithCRS());
- status = GeoParser::parseCenterSphere(obj, _cap.get());
- } else if (GeoParser::GEOMETRY == specifier) {
- // GeoJSON geometry or legacy point
- if (Array == elem.type() || obj.firstElement().isNumber()) {
- // legacy point
- _point.reset(new PointWithCRS());
- status = GeoParser::parseQueryPoint(elem, _point.get());
- } else {
- // GeoJSON geometry
- status = parseFromGeoJSON(obj);
- }
- }
- if (!status.isOK()) return status;
+ // Check parsing result.
+ if (!status.isOK())
+ return status;
- // If we support R2 regions, build the region immediately
- if (hasR2Region()) {
- _r2Region.reset(new R2BoxRegion(this));
- }
+ if (regions.size() > 0) {
+ // S2RegionUnion doesn't take ownership of pointers.
+ _s2Region.reset(new S2RegionUnion(&regions));
+ }
- return status;
+ return Status::OK();
+}
+
+// Examples:
+// { $geoWithin : { $geometry : <GeoJSON> } }
+// { $geoIntersects : { $geometry : <GeoJSON> } }
+// { $geoWithin : { $box : [[x1, y1], [x2, y2]] } }
+// { $geoWithin : { $polygon : [[x1, y1], [x1, y2], [x2, y2], [x2, y1]] } }
+// { $geoWithin : { $center : [[x1, y1], r], } }
+// { $geoWithin : { $centerSphere : [[x, y], radius] } }
+// { $geoIntersects : { $geometry : [1, 2] } }
+//
+// "elem" is the first element of the object after $geoWithin / $geoIntersects predicates.
+// i.e. { $box: ... }, { $geometry: ... }
+Status GeometryContainer::parseFromQuery(const BSONElement& elem) {
+ // Check elem is an object and has geo specifier.
+ GeoParser::GeoSpecifier specifier = GeoParser::parseGeoSpecifier(elem);
+
+ if (GeoParser::UNKNOWN == specifier) {
+ // Cannot parse geo specifier.
+ return Status(ErrorCodes::BadValue, str::stream() << "unknown geo specifier: " << elem);
}
- // Examples:
- // { location: <GeoJSON> }
- // { location: [1, 2] }
- // { location: [1, 2, 3] }
- // { location: {x: 1, y: 2} }
- //
- // "elem" is the element that contains geo data. e.g. "location": [1, 2]
- // We need the type information to determine whether it's legacy point.
- Status GeometryContainer::parseFromStorage(const BSONElement& elem) {
- if (!elem.isABSONObj()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "geo element must be an array or object: " << elem);
- }
-
- BSONObj geoObj = elem.Obj();
- Status status = Status::OK();
- if (Array == elem.type() || geoObj.firstElement().isNumber()) {
- // Legacy point
- // { location: [1, 2] }
- // { location: [1, 2, 3] }
- // { location: {x: 1, y: 2} }
- // { location: {x: 1, y: 2, type: "Point" } }
+ Status status = Status::OK();
+ BSONObj obj = elem.Obj();
+ if (GeoParser::BOX == specifier) {
+ _box.reset(new BoxWithCRS());
+ status = GeoParser::parseLegacyBox(obj, _box.get());
+ } else if (GeoParser::CENTER == specifier) {
+ _cap.reset(new CapWithCRS());
+ status = GeoParser::parseLegacyCenter(obj, _cap.get());
+ } else if (GeoParser::POLYGON == specifier) {
+ _polygon.reset(new PolygonWithCRS());
+ status = GeoParser::parseLegacyPolygon(obj, _polygon.get());
+ } else if (GeoParser::CENTER_SPHERE == specifier) {
+ _cap.reset(new CapWithCRS());
+ status = GeoParser::parseCenterSphere(obj, _cap.get());
+ } else if (GeoParser::GEOMETRY == specifier) {
+ // GeoJSON geometry or legacy point
+ if (Array == elem.type() || obj.firstElement().isNumber()) {
+ // legacy point
_point.reset(new PointWithCRS());
- // Allow more than two dimensions or extra fields, like [1, 2, 3]
- status = GeoParser::parseLegacyPoint(elem, _point.get(), true);
+ status = GeoParser::parseQueryPoint(elem, _point.get());
} else {
- // GeoJSON
- // { location: { type: "Point", coordinates: [...] } }
- status = parseFromGeoJSON(elem.Obj());
+ // GeoJSON geometry
+ status = parseFromGeoJSON(obj);
}
- if (!status.isOK()) return status;
-
- // If we support R2 regions, build the region immediately
- if (hasR2Region()) _r2Region.reset(new R2BoxRegion(this));
-
- return Status::OK();
}
+ if (!status.isOK())
+ return status;
- string GeometryContainer::getDebugType() const {
- if (NULL != _point) { return "pt"; }
- else if (NULL != _line) { return "ln"; }
- else if (NULL != _box) { return "bx"; }
- else if (NULL != _polygon) { return "pl"; }
- else if (NULL != _cap ) { return "cc"; }
- else if (NULL != _multiPoint) { return "mp"; }
- else if (NULL != _multiLine) { return "ml"; }
- else if (NULL != _multiPolygon) { return "my"; }
- else if (NULL != _geometryCollection) { return "gc"; }
- else {
- invariant(false);
- return "";
- }
+ // If we support R2 regions, build the region immediately
+ if (hasR2Region()) {
+ _r2Region.reset(new R2BoxRegion(this));
}
- CRS GeometryContainer::getNativeCRS() const {
-
- // TODO: Fix geometry collection reporting when/if we support multiple CRSes
-
- if (NULL != _point) { return _point->crs; }
- else if (NULL != _line) { return _line->crs; }
- else if (NULL != _box) { return _box->crs; }
- else if (NULL != _polygon) { return _polygon->crs; }
- else if (NULL != _cap ) { return _cap->crs; }
- else if (NULL != _multiPoint) { return _multiPoint->crs; }
- else if (NULL != _multiLine) { return _multiLine->crs; }
- else if (NULL != _multiPolygon) { return _multiPolygon->crs; }
- else if (NULL != _geometryCollection) { return SPHERE; }
- else {
- invariant(false);
- return FLAT;
- }
+ return status;
+}
+
+// Examples:
+// { location: <GeoJSON> }
+// { location: [1, 2] }
+// { location: [1, 2, 3] }
+// { location: {x: 1, y: 2} }
+//
+// "elem" is the element that contains geo data. e.g. "location": [1, 2]
+// We need the type information to determine whether it's legacy point.
+Status GeometryContainer::parseFromStorage(const BSONElement& elem) {
+ if (!elem.isABSONObj()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "geo element must be an array or object: " << elem);
}
- bool GeometryContainer::supportsProject(CRS otherCRS) const {
-
- // TODO: Fix geometry collection reporting when/if we support more CRSes
-
- if (NULL != _point) {
- return ShapeProjection::supportsProject(*_point, otherCRS);
- }
- else if (NULL != _line) { return _line->crs == otherCRS; }
- else if (NULL != _box) { return _box->crs == otherCRS; }
- else if (NULL != _polygon) {
- return ShapeProjection::supportsProject(*_polygon, otherCRS);
- }
- else if (NULL != _cap ) { return _cap->crs == otherCRS; }
- else if (NULL != _multiPoint) { return _multiPoint->crs == otherCRS; }
- else if (NULL != _multiLine) { return _multiLine->crs == otherCRS; }
- else if (NULL != _multiPolygon) { return _multiPolygon->crs == otherCRS; }
- else {
- invariant(NULL != _geometryCollection);
- return SPHERE == otherCRS;
- }
+ BSONObj geoObj = elem.Obj();
+ Status status = Status::OK();
+ if (Array == elem.type() || geoObj.firstElement().isNumber()) {
+ // Legacy point
+ // { location: [1, 2] }
+ // { location: [1, 2, 3] }
+ // { location: {x: 1, y: 2} }
+ // { location: {x: 1, y: 2, type: "Point" } }
+ _point.reset(new PointWithCRS());
+ // Allow more than two dimensions or extra fields, like [1, 2, 3]
+ status = GeoParser::parseLegacyPoint(elem, _point.get(), true);
+ } else {
+ // GeoJSON
+ // { location: { type: "Point", coordinates: [...] } }
+ status = parseFromGeoJSON(elem.Obj());
}
+ if (!status.isOK())
+ return status;
- void GeometryContainer::projectInto(CRS otherCRS) {
+ // If we support R2 regions, build the region immediately
+ if (hasR2Region())
+ _r2Region.reset(new R2BoxRegion(this));
+
+ return Status::OK();
+}
+
+string GeometryContainer::getDebugType() const {
+ if (NULL != _point) {
+ return "pt";
+ } else if (NULL != _line) {
+ return "ln";
+ } else if (NULL != _box) {
+ return "bx";
+ } else if (NULL != _polygon) {
+ return "pl";
+ } else if (NULL != _cap) {
+ return "cc";
+ } else if (NULL != _multiPoint) {
+ return "mp";
+ } else if (NULL != _multiLine) {
+ return "ml";
+ } else if (NULL != _multiPolygon) {
+ return "my";
+ } else if (NULL != _geometryCollection) {
+ return "gc";
+ } else {
+ invariant(false);
+ return "";
+ }
+}
+
+CRS GeometryContainer::getNativeCRS() const {
+ // TODO: Fix geometry collection reporting when/if we support multiple CRSes
+
+ if (NULL != _point) {
+ return _point->crs;
+ } else if (NULL != _line) {
+ return _line->crs;
+ } else if (NULL != _box) {
+ return _box->crs;
+ } else if (NULL != _polygon) {
+ return _polygon->crs;
+ } else if (NULL != _cap) {
+ return _cap->crs;
+ } else if (NULL != _multiPoint) {
+ return _multiPoint->crs;
+ } else if (NULL != _multiLine) {
+ return _multiLine->crs;
+ } else if (NULL != _multiPolygon) {
+ return _multiPolygon->crs;
+ } else if (NULL != _geometryCollection) {
+ return SPHERE;
+ } else {
+ invariant(false);
+ return FLAT;
+ }
+}
+
+bool GeometryContainer::supportsProject(CRS otherCRS) const {
+ // TODO: Fix geometry collection reporting when/if we support more CRSes
+
+ if (NULL != _point) {
+ return ShapeProjection::supportsProject(*_point, otherCRS);
+ } else if (NULL != _line) {
+ return _line->crs == otherCRS;
+ } else if (NULL != _box) {
+ return _box->crs == otherCRS;
+ } else if (NULL != _polygon) {
+ return ShapeProjection::supportsProject(*_polygon, otherCRS);
+ } else if (NULL != _cap) {
+ return _cap->crs == otherCRS;
+ } else if (NULL != _multiPoint) {
+ return _multiPoint->crs == otherCRS;
+ } else if (NULL != _multiLine) {
+ return _multiLine->crs == otherCRS;
+ } else if (NULL != _multiPolygon) {
+ return _multiPolygon->crs == otherCRS;
+ } else {
+ invariant(NULL != _geometryCollection);
+ return SPHERE == otherCRS;
+ }
+}
- if (getNativeCRS() == otherCRS) return;
+void GeometryContainer::projectInto(CRS otherCRS) {
+ if (getNativeCRS() == otherCRS)
+ return;
- if (NULL != _polygon) {
- ShapeProjection::projectInto(_polygon.get(), otherCRS);
- return;
- }
-
- invariant(NULL != _point);
- ShapeProjection::projectInto(_point.get(), otherCRS);
+ if (NULL != _polygon) {
+ ShapeProjection::projectInto(_polygon.get(), otherCRS);
+ return;
}
- static double s2MinDistanceRad(const S2Point& s2Point, const MultiPointWithCRS& s2MultiPoint) {
+ invariant(NULL != _point);
+ ShapeProjection::projectInto(_point.get(), otherCRS);
+}
- double minDistance = -1;
- for (vector<S2Point>::const_iterator it = s2MultiPoint.points.begin();
- it != s2MultiPoint.points.end(); ++it) {
-
- double nextDistance = S2Distance::distanceRad(s2Point, *it);
- if (minDistance < 0 || nextDistance < minDistance) {
- minDistance = nextDistance;
- }
+static double s2MinDistanceRad(const S2Point& s2Point, const MultiPointWithCRS& s2MultiPoint) {
+ double minDistance = -1;
+ for (vector<S2Point>::const_iterator it = s2MultiPoint.points.begin();
+ it != s2MultiPoint.points.end();
+ ++it) {
+ double nextDistance = S2Distance::distanceRad(s2Point, *it);
+ if (minDistance < 0 || nextDistance < minDistance) {
+ minDistance = nextDistance;
}
-
- return minDistance;
}
- static double s2MinDistanceRad(const S2Point& s2Point, const MultiLineWithCRS& s2MultiLine) {
-
- double minDistance = -1;
- for (vector<S2Polyline*>::const_iterator it = s2MultiLine.lines.vector().begin();
- it != s2MultiLine.lines.vector().end(); ++it) {
+ return minDistance;
+}
- double nextDistance = S2Distance::minDistanceRad(s2Point, **it);
- if (minDistance < 0 || nextDistance < minDistance) {
- minDistance = nextDistance;
- }
+static double s2MinDistanceRad(const S2Point& s2Point, const MultiLineWithCRS& s2MultiLine) {
+ double minDistance = -1;
+ for (vector<S2Polyline*>::const_iterator it = s2MultiLine.lines.vector().begin();
+ it != s2MultiLine.lines.vector().end();
+ ++it) {
+ double nextDistance = S2Distance::minDistanceRad(s2Point, **it);
+ if (minDistance < 0 || nextDistance < minDistance) {
+ minDistance = nextDistance;
}
-
- return minDistance;
}
- static double s2MinDistanceRad(const S2Point& s2Point, const MultiPolygonWithCRS& s2MultiPolygon) {
+ return minDistance;
+}
- double minDistance = -1;
- for (vector<S2Polygon*>::const_iterator it = s2MultiPolygon.polygons.vector().begin();
- it != s2MultiPolygon.polygons.vector().end(); ++it) {
-
- double nextDistance = S2Distance::minDistanceRad(s2Point, **it);
- if (minDistance < 0 || nextDistance < minDistance) {
- minDistance = nextDistance;
- }
+static double s2MinDistanceRad(const S2Point& s2Point, const MultiPolygonWithCRS& s2MultiPolygon) {
+ double minDistance = -1;
+ for (vector<S2Polygon*>::const_iterator it = s2MultiPolygon.polygons.vector().begin();
+ it != s2MultiPolygon.polygons.vector().end();
+ ++it) {
+ double nextDistance = S2Distance::minDistanceRad(s2Point, **it);
+ if (minDistance < 0 || nextDistance < minDistance) {
+ minDistance = nextDistance;
}
-
- return minDistance;
}
- static double s2MinDistanceRad(const S2Point& s2Point,
- const GeometryCollection& geometryCollection) {
-
- double minDistance = -1;
- for (vector<PointWithCRS>::const_iterator it = geometryCollection.points.begin();
- it != geometryCollection.points.end(); ++it) {
+ return minDistance;
+}
- invariant(SPHERE == it->crs);
- double nextDistance = S2Distance::distanceRad(s2Point, it->point);
- if (minDistance < 0 || nextDistance < minDistance) {
- minDistance = nextDistance;
- }
+static double s2MinDistanceRad(const S2Point& s2Point,
+ const GeometryCollection& geometryCollection) {
+ double minDistance = -1;
+ for (vector<PointWithCRS>::const_iterator it = geometryCollection.points.begin();
+ it != geometryCollection.points.end();
+ ++it) {
+ invariant(SPHERE == it->crs);
+ double nextDistance = S2Distance::distanceRad(s2Point, it->point);
+ if (minDistance < 0 || nextDistance < minDistance) {
+ minDistance = nextDistance;
}
+ }
- for (vector<LineWithCRS*>::const_iterator it = geometryCollection.lines.vector().begin();
- it != geometryCollection.lines.vector().end(); ++it) {
-
- invariant(SPHERE == (*it)->crs);
- double nextDistance = S2Distance::minDistanceRad(s2Point, (*it)->line);
- if (minDistance < 0 || nextDistance < minDistance) {
- minDistance = nextDistance;
- }
+ for (vector<LineWithCRS*>::const_iterator it = geometryCollection.lines.vector().begin();
+ it != geometryCollection.lines.vector().end();
+ ++it) {
+ invariant(SPHERE == (*it)->crs);
+ double nextDistance = S2Distance::minDistanceRad(s2Point, (*it)->line);
+ if (minDistance < 0 || nextDistance < minDistance) {
+ minDistance = nextDistance;
}
+ }
- for (vector<PolygonWithCRS*>::const_iterator it = geometryCollection.polygons.vector().begin();
- it != geometryCollection.polygons.vector().end(); ++it) {
-
- invariant(SPHERE == (*it)->crs);
- // We don't support distances for big polygons yet.
- invariant(NULL != (*it)->s2Polygon);
- double nextDistance = S2Distance::minDistanceRad(s2Point, *((*it)->s2Polygon));
- if (minDistance < 0 || nextDistance < minDistance) {
- minDistance = nextDistance;
- }
+ for (vector<PolygonWithCRS*>::const_iterator it = geometryCollection.polygons.vector().begin();
+ it != geometryCollection.polygons.vector().end();
+ ++it) {
+ invariant(SPHERE == (*it)->crs);
+ // We don't support distances for big polygons yet.
+ invariant(NULL != (*it)->s2Polygon);
+ double nextDistance = S2Distance::minDistanceRad(s2Point, *((*it)->s2Polygon));
+ if (minDistance < 0 || nextDistance < minDistance) {
+ minDistance = nextDistance;
}
+ }
- for (vector<MultiPointWithCRS*>::const_iterator it = geometryCollection.multiPoints.vector()
- .begin(); it != geometryCollection.multiPoints.vector().end(); ++it) {
-
- double nextDistance = s2MinDistanceRad(s2Point, **it);
- if (minDistance < 0 || nextDistance < minDistance) {
- minDistance = nextDistance;
- }
+ for (vector<MultiPointWithCRS*>::const_iterator it =
+ geometryCollection.multiPoints.vector().begin();
+ it != geometryCollection.multiPoints.vector().end();
+ ++it) {
+ double nextDistance = s2MinDistanceRad(s2Point, **it);
+ if (minDistance < 0 || nextDistance < minDistance) {
+ minDistance = nextDistance;
}
+ }
- for (vector<MultiLineWithCRS*>::const_iterator it = geometryCollection.multiLines.vector()
- .begin(); it != geometryCollection.multiLines.vector().end(); ++it) {
-
- double nextDistance = s2MinDistanceRad(s2Point, **it);
- if (minDistance < 0 || nextDistance < minDistance) {
- minDistance = nextDistance;
- }
+ for (vector<MultiLineWithCRS*>::const_iterator it =
+ geometryCollection.multiLines.vector().begin();
+ it != geometryCollection.multiLines.vector().end();
+ ++it) {
+ double nextDistance = s2MinDistanceRad(s2Point, **it);
+ if (minDistance < 0 || nextDistance < minDistance) {
+ minDistance = nextDistance;
}
+ }
- for (vector<MultiPolygonWithCRS*>::const_iterator it = geometryCollection.multiPolygons
- .vector().begin(); it != geometryCollection.multiPolygons.vector().end(); ++it) {
-
- double nextDistance = s2MinDistanceRad(s2Point, **it);
- if (minDistance < 0 || nextDistance < minDistance) {
- minDistance = nextDistance;
- }
+ for (vector<MultiPolygonWithCRS*>::const_iterator it =
+ geometryCollection.multiPolygons.vector().begin();
+ it != geometryCollection.multiPolygons.vector().end();
+ ++it) {
+ double nextDistance = s2MinDistanceRad(s2Point, **it);
+ if (minDistance < 0 || nextDistance < minDistance) {
+ minDistance = nextDistance;
}
-
- return minDistance;
}
- double GeometryContainer::minDistance(const PointWithCRS& otherPoint) const {
-
- const CRS crs = getNativeCRS();
+ return minDistance;
+}
- if (FLAT == crs) {
+double GeometryContainer::minDistance(const PointWithCRS& otherPoint) const {
+ const CRS crs = getNativeCRS();
- invariant(NULL != _point);
+ if (FLAT == crs) {
+ invariant(NULL != _point);
- if (FLAT == otherPoint.crs) {
- return distance(_point->oldPoint, otherPoint.oldPoint);
- }
- else {
- S2LatLng latLng(otherPoint.point);
- return distance(_point->oldPoint,
- Point(latLng.lng().degrees(), latLng.lat().degrees()));
- }
+ if (FLAT == otherPoint.crs) {
+ return distance(_point->oldPoint, otherPoint.oldPoint);
+ } else {
+ S2LatLng latLng(otherPoint.point);
+ return distance(_point->oldPoint,
+ Point(latLng.lng().degrees(), latLng.lat().degrees()));
}
- else {
- invariant(SPHERE == crs);
-
- double minDistance = -1;
+ } else {
+ invariant(SPHERE == crs);
- if (NULL != _point) {
- minDistance = S2Distance::distanceRad(otherPoint.point, _point->point);
- }
- else if (NULL != _line) {
- minDistance = S2Distance::minDistanceRad(otherPoint.point, _line->line);
- }
- else if (NULL != _polygon) {
- // We don't support distances for big polygons yet.
- invariant(NULL != _polygon->s2Polygon);
- minDistance = S2Distance::minDistanceRad(otherPoint.point, *_polygon->s2Polygon);
- }
- else if (NULL != _cap) {
- minDistance = S2Distance::minDistanceRad(otherPoint.point, _cap->cap);
- }
- else if (NULL != _multiPoint) {
- minDistance = s2MinDistanceRad(otherPoint.point, *_multiPoint);
- }
- else if (NULL != _multiLine) {
- minDistance = s2MinDistanceRad(otherPoint.point, *_multiLine);
- }
- else if (NULL != _multiPolygon) {
- minDistance = s2MinDistanceRad(otherPoint.point, *_multiPolygon);
- }
- else if (NULL != _geometryCollection) {
- minDistance = s2MinDistanceRad(otherPoint.point, *_geometryCollection);
- }
+ double minDistance = -1;
- invariant(minDistance != -1);
- return minDistance * kRadiusOfEarthInMeters;
+ if (NULL != _point) {
+ minDistance = S2Distance::distanceRad(otherPoint.point, _point->point);
+ } else if (NULL != _line) {
+ minDistance = S2Distance::minDistanceRad(otherPoint.point, _line->line);
+ } else if (NULL != _polygon) {
+ // We don't support distances for big polygons yet.
+ invariant(NULL != _polygon->s2Polygon);
+ minDistance = S2Distance::minDistanceRad(otherPoint.point, *_polygon->s2Polygon);
+ } else if (NULL != _cap) {
+ minDistance = S2Distance::minDistanceRad(otherPoint.point, _cap->cap);
+ } else if (NULL != _multiPoint) {
+ minDistance = s2MinDistanceRad(otherPoint.point, *_multiPoint);
+ } else if (NULL != _multiLine) {
+ minDistance = s2MinDistanceRad(otherPoint.point, *_multiLine);
+ } else if (NULL != _multiPolygon) {
+ minDistance = s2MinDistanceRad(otherPoint.point, *_multiPolygon);
+ } else if (NULL != _geometryCollection) {
+ minDistance = s2MinDistanceRad(otherPoint.point, *_geometryCollection);
}
- }
- const CapWithCRS* GeometryContainer::getCapGeometryHack() const {
- return _cap.get();
+ invariant(minDistance != -1);
+ return minDistance * kRadiusOfEarthInMeters;
}
+}
+
+const CapWithCRS* GeometryContainer::getCapGeometryHack() const {
+ return _cap.get();
+}
} // namespace mongo
diff --git a/src/mongo/db/geo/geometry_container.h b/src/mongo/db/geo/geometry_container.h
index 05ca1ed2962..95dc8525440 100644
--- a/src/mongo/db/geo/geometry_container.h
+++ b/src/mongo/db/geo/geometry_container.h
@@ -36,126 +36,125 @@
namespace mongo {
- class GeometryContainer {
- MONGO_DISALLOW_COPYING(GeometryContainer);
- public:
-
- /**
- * Creates an empty geometry container which may then be loaded from BSON or directly.
- */
- GeometryContainer();
-
- /**
- * Loads an empty GeometryContainer from query.
- */
- Status parseFromQuery(const BSONElement& elem);
-
- /**
- * Loads an empty GeometryContainer from stored geometry.
- */
- Status parseFromStorage(const BSONElement& elem);
-
- /**
- * Is the geometry any of {Point, Line, Polygon}?
- */
- bool isSimpleContainer() const;
-
- /**
- * Reports the CRS of the contained geometry.
- * TODO: Rework once we have collections of multiple CRSes
- */
- CRS getNativeCRS() const;
-
- /**
- * Whether or not this geometry can be projected into a particular CRS
- */
- bool supportsProject(CRS crs) const;
-
- /**
- * Projects the current geometry into the supplied crs.
- * It is an error to call this function if canProjectInto(crs) is false.
- */
- void projectInto(CRS crs);
-
- /**
- * Minimum distance between this geometry and the supplied point.
- * TODO: Rework and generalize to full GeometryContainer distance
- */
- double minDistance(const PointWithCRS& point) const;
-
- /**
- * Only polygons (and aggregate types thereof) support contains.
- */
- bool supportsContains() const;
-
- /**
- * To check containment, we iterate over the otherContainer's geometries. If we don't
- * contain any sub-geometry of the otherContainer, the otherContainer is not contained
- * within us. If each sub-geometry of the otherContainer is contained within us, we contain
- * the entire otherContainer.
- */
- bool contains(const GeometryContainer& otherContainer) const;
-
- /**
- * To check intersection, we iterate over the otherContainer's geometries, checking each
- * geometry to see if we intersect it. If we intersect one geometry, we intersect the
- * entire other container.
- */
- bool intersects(const GeometryContainer& otherContainer) const;
-
- // Region which can be used to generate a covering of the query object in the S2 space.
- bool hasS2Region() const;
- const S2Region& getS2Region() const;
-
- // Region which can be used to generate a covering of the query object in euclidean space.
- bool hasR2Region() const;
- const R2Region& getR2Region() const;
-
- // Returns a string related to the type of the geometry (for debugging queries)
- std::string getDebugType() const;
-
- // Needed for 2D wrapping check (for now)
- // TODO: Remove these hacks
- const CapWithCRS* getCapGeometryHack() const;
-
- private:
-
- class R2BoxRegion;
-
- Status parseFromGeoJSON(const BSONObj& obj);
-
- // Does 'this' intersect with the provided type?
- bool intersects(const S2Cell& otherPoint) const;
- bool intersects(const S2Polyline& otherLine) const;
- bool intersects(const S2Polygon& otherPolygon) const;
- // These three just iterate over the geometries and call the 3 methods above.
- bool intersects(const MultiPointWithCRS& otherMultiPoint) const;
- bool intersects(const MultiLineWithCRS& otherMultiLine) const;
- bool intersects(const MultiPolygonWithCRS& otherMultiPolygon) const;
-
- // Used when 'this' has a polygon somewhere, either in _polygon or _multiPolygon or
- // _geometryCollection.
- bool contains(const S2Cell& otherCell, const S2Point& otherPoint) const;
- bool contains(const S2Polyline& otherLine) const;
- bool contains(const S2Polygon& otherPolygon) const;
-
- // Only one of these shared_ptrs should be non-NULL. S2Region is a
- // superclass but it only supports testing against S2Cells. We need
- // the most specific class we can get.
- std::unique_ptr<PointWithCRS> _point;
- std::unique_ptr<LineWithCRS> _line;
- std::unique_ptr<BoxWithCRS> _box;
- std::unique_ptr<PolygonWithCRS> _polygon;
- std::unique_ptr<CapWithCRS> _cap;
- std::unique_ptr<MultiPointWithCRS> _multiPoint;
- std::unique_ptr<MultiLineWithCRS> _multiLine;
- std::unique_ptr<MultiPolygonWithCRS> _multiPolygon;
- std::unique_ptr<GeometryCollection> _geometryCollection;
-
- // Cached for use during covering calculations
- // TODO: _s2Region is currently generated immediately - don't necessarily need to do this
- std::unique_ptr<S2RegionUnion> _s2Region;
- std::unique_ptr<R2Region> _r2Region;
- };
-
-} // namespace mongo
+class GeometryContainer {
+ MONGO_DISALLOW_COPYING(GeometryContainer);
+
+public:
+ /**
+ * Creates an empty geometry container which may then be loaded from BSON or directly.
+ */
+ GeometryContainer();
+
+ /**
+ * Loads an empty GeometryContainer from query.
+ */
+ Status parseFromQuery(const BSONElement& elem);
+
+ /**
+ * Loads an empty GeometryContainer from stored geometry.
+ */
+ Status parseFromStorage(const BSONElement& elem);
+
+ /**
+ * Is the geometry any of {Point, Line, Polygon}?
+ */
+ bool isSimpleContainer() const;
+
+ /**
+ * Reports the CRS of the contained geometry.
+ * TODO: Rework once we have collections of multiple CRSes
+ */
+ CRS getNativeCRS() const;
+
+ /**
+ * Whether or not this geometry can be projected into a particular CRS
+ */
+ bool supportsProject(CRS crs) const;
+
+ /**
+ * Projects the current geometry into the supplied crs.
+ * It is an error to call this function if canProjectInto(crs) is false.
+ */
+ void projectInto(CRS crs);
+
+ /**
+ * Minimum distance between this geometry and the supplied point.
+ * TODO: Rework and generalize to full GeometryContainer distance
+ */
+ double minDistance(const PointWithCRS& point) const;
+
+ /**
+ * Only polygons (and aggregate types thereof) support contains.
+ */
+ bool supportsContains() const;
+
+ /**
+ * To check containment, we iterate over the otherContainer's geometries. If we don't
+ * contain any sub-geometry of the otherContainer, the otherContainer is not contained
+ * within us. If each sub-geometry of the otherContainer is contained within us, we contain
+ * the entire otherContainer.
+ */
+ bool contains(const GeometryContainer& otherContainer) const;
+
+ /**
+ * To check intersection, we iterate over the otherContainer's geometries, checking each
+ * geometry to see if we intersect it. If we intersect one geometry, we intersect the
+ * entire other container.
+ */
+ bool intersects(const GeometryContainer& otherContainer) const;
+
+ // Region which can be used to generate a covering of the query object in the S2 space.
+ bool hasS2Region() const;
+ const S2Region& getS2Region() const;
+
+ // Region which can be used to generate a covering of the query object in euclidean space.
+ bool hasR2Region() const;
+ const R2Region& getR2Region() const;
+
+ // Returns a string related to the type of the geometry (for debugging queries)
+ std::string getDebugType() const;
+
+ // Needed for 2D wrapping check (for now)
+ // TODO: Remove these hacks
+ const CapWithCRS* getCapGeometryHack() const;
+
+private:
+ class R2BoxRegion;
+
+ Status parseFromGeoJSON(const BSONObj& obj);
+
+ // Does 'this' intersect with the provided type?
+ bool intersects(const S2Cell& otherPoint) const;
+ bool intersects(const S2Polyline& otherLine) const;
+ bool intersects(const S2Polygon& otherPolygon) const;
+ // These three just iterate over the geometries and call the 3 methods above.
+ bool intersects(const MultiPointWithCRS& otherMultiPoint) const;
+ bool intersects(const MultiLineWithCRS& otherMultiLine) const;
+ bool intersects(const MultiPolygonWithCRS& otherMultiPolygon) const;
+
+ // Used when 'this' has a polygon somewhere, either in _polygon or _multiPolygon or
+ // _geometryCollection.
+ bool contains(const S2Cell& otherCell, const S2Point& otherPoint) const;
+ bool contains(const S2Polyline& otherLine) const;
+ bool contains(const S2Polygon& otherPolygon) const;
+
+ // Only one of these shared_ptrs should be non-NULL. S2Region is a
+ // superclass but it only supports testing against S2Cells. We need
+ // the most specific class we can get.
+ std::unique_ptr<PointWithCRS> _point;
+ std::unique_ptr<LineWithCRS> _line;
+ std::unique_ptr<BoxWithCRS> _box;
+ std::unique_ptr<PolygonWithCRS> _polygon;
+ std::unique_ptr<CapWithCRS> _cap;
+ std::unique_ptr<MultiPointWithCRS> _multiPoint;
+ std::unique_ptr<MultiLineWithCRS> _multiLine;
+ std::unique_ptr<MultiPolygonWithCRS> _multiPolygon;
+ std::unique_ptr<GeometryCollection> _geometryCollection;
+
+ // Cached for use during covering calculations
+ // TODO: _s2Region is currently generated immediately - don't necessarily need to do this
+ std::unique_ptr<S2RegionUnion> _s2Region;
+ std::unique_ptr<R2Region> _r2Region;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/geo/geoparser.cpp b/src/mongo/db/geo/geoparser.cpp
index 95c0f33ac57..1f374f42215 100644
--- a/src/mongo/db/geo/geoparser.cpp
+++ b/src/mongo/db/geo/geoparser.cpp
@@ -44,683 +44,745 @@
namespace mongo {
- using std::unique_ptr;
- using std::stringstream;
-
- // This field must be present, and...
- static const string GEOJSON_TYPE = "type";
- // Have one of these values:
- static const string GEOJSON_TYPE_POINT = "Point";
- static const string GEOJSON_TYPE_LINESTRING = "LineString";
- static const string GEOJSON_TYPE_POLYGON = "Polygon";
- static const string GEOJSON_TYPE_MULTI_POINT = "MultiPoint";
- static const string GEOJSON_TYPE_MULTI_LINESTRING = "MultiLineString";
- static const string GEOJSON_TYPE_MULTI_POLYGON = "MultiPolygon";
- static const string GEOJSON_TYPE_GEOMETRY_COLLECTION = "GeometryCollection";
- // This field must also be present. The value depends on the type.
- static const string GEOJSON_COORDINATES = "coordinates";
- static const string GEOJSON_GEOMETRIES = "geometries";
-
- // Coordinate System Reference
- // see http://portal.opengeospatial.org/files/?artifact_id=24045
- // and http://spatialreference.org/ref/epsg/4326/
- // and http://www.geojson.org/geojson-spec.html#named-crs
- static const string CRS_CRS84 = "urn:ogc:def:crs:OGC:1.3:CRS84";
- static const string CRS_EPSG_4326 = "EPSG:4326";
- static const string CRS_STRICT_WINDING = "urn:x-mongodb:crs:strictwinding:EPSG:4326";
-
- static Status parseFlatPoint(const BSONElement &elem, Point *out, bool allowAddlFields = false) {
- if (!elem.isABSONObj()) return BAD_VALUE("Point must be an array or object");
- BSONObjIterator it(elem.Obj());
- BSONElement x = it.next();
- if (!x.isNumber()) { return BAD_VALUE("Point must only contain numeric elements"); }
- BSONElement y = it.next();
- if (!y.isNumber()) { return BAD_VALUE("Point must only contain numeric elements"); }
- if (!allowAddlFields && it.more()) { return BAD_VALUE("Point must only contain two numeric elements"); }
- out->x = x.number();
- out->y = y.number();
- // Point coordinates must be finite numbers, neither NaN or infinite.
- if (!std::isfinite(out->x) || !std::isfinite(out->y)) {
- return BAD_VALUE("Point coordinates must be finite numbers");
- }
- return Status::OK();
+using std::unique_ptr;
+using std::stringstream;
+
+// This field must be present, and...
+static const string GEOJSON_TYPE = "type";
+// Have one of these values:
+static const string GEOJSON_TYPE_POINT = "Point";
+static const string GEOJSON_TYPE_LINESTRING = "LineString";
+static const string GEOJSON_TYPE_POLYGON = "Polygon";
+static const string GEOJSON_TYPE_MULTI_POINT = "MultiPoint";
+static const string GEOJSON_TYPE_MULTI_LINESTRING = "MultiLineString";
+static const string GEOJSON_TYPE_MULTI_POLYGON = "MultiPolygon";
+static const string GEOJSON_TYPE_GEOMETRY_COLLECTION = "GeometryCollection";
+// This field must also be present. The value depends on the type.
+static const string GEOJSON_COORDINATES = "coordinates";
+static const string GEOJSON_GEOMETRIES = "geometries";
+
+// Coordinate System Reference
+// see http://portal.opengeospatial.org/files/?artifact_id=24045
+// and http://spatialreference.org/ref/epsg/4326/
+// and http://www.geojson.org/geojson-spec.html#named-crs
+static const string CRS_CRS84 = "urn:ogc:def:crs:OGC:1.3:CRS84";
+static const string CRS_EPSG_4326 = "EPSG:4326";
+static const string CRS_STRICT_WINDING = "urn:x-mongodb:crs:strictwinding:EPSG:4326";
+
+static Status parseFlatPoint(const BSONElement& elem, Point* out, bool allowAddlFields = false) {
+ if (!elem.isABSONObj())
+ return BAD_VALUE("Point must be an array or object");
+ BSONObjIterator it(elem.Obj());
+ BSONElement x = it.next();
+ if (!x.isNumber()) {
+ return BAD_VALUE("Point must only contain numeric elements");
}
-
- Status GeoParser::parseLegacyPoint(const BSONElement &elem, PointWithCRS *out, bool allowAddlFields) {
- out->crs = FLAT;
- return parseFlatPoint(elem, &out->oldPoint, allowAddlFields);
+ BSONElement y = it.next();
+ if (!y.isNumber()) {
+ return BAD_VALUE("Point must only contain numeric elements");
+ }
+ if (!allowAddlFields && it.more()) {
+ return BAD_VALUE("Point must only contain two numeric elements");
}
+ out->x = x.number();
+ out->y = y.number();
+ // Point coordinates must be finite numbers, neither NaN or infinite.
+ if (!std::isfinite(out->x) || !std::isfinite(out->y)) {
+ return BAD_VALUE("Point coordinates must be finite numbers");
+ }
+ return Status::OK();
+}
+
+Status GeoParser::parseLegacyPoint(const BSONElement& elem,
+ PointWithCRS* out,
+ bool allowAddlFields) {
+ out->crs = FLAT;
+ return parseFlatPoint(elem, &out->oldPoint, allowAddlFields);
+}
+
+static Status coordToPoint(double lng, double lat, S2Point* out) {
+ // We don't rely on drem to clean up non-sane points. We just don't let them become
+ // spherical.
+ if (!isValidLngLat(lng, lat))
+ return BAD_VALUE("longitude/latitude is out of bounds, lng: " << lng << " lat: " << lat);
+ // Note that it's (lat, lng) for S2 but (lng, lat) for MongoDB.
+ S2LatLng ll = S2LatLng::FromDegrees(lat, lng).Normalized();
+ // This shouldn't happen since we should only have valid lng/lats.
+ if (!ll.is_valid()) {
+ stringstream ss;
+ ss << "coords invalid after normalization, lng = " << lng << " lat = " << lat << endl;
+ uasserted(17125, ss.str());
+ }
+ *out = ll.ToPoint();
+ return Status::OK();
+}
- static Status coordToPoint(double lng, double lat, S2Point* out) {
- // We don't rely on drem to clean up non-sane points. We just don't let them become
- // spherical.
- if (!isValidLngLat(lng, lat))
- return BAD_VALUE("longitude/latitude is out of bounds, lng: " << lng << " lat: " << lat);
- // Note that it's (lat, lng) for S2 but (lng, lat) for MongoDB.
- S2LatLng ll = S2LatLng::FromDegrees(lat, lng).Normalized();
- // This shouldn't happen since we should only have valid lng/lats.
- if (!ll.is_valid()) {
- stringstream ss;
- ss << "coords invalid after normalization, lng = " << lng << " lat = " << lat << endl;
- uasserted(17125, ss.str());
- }
- *out = ll.ToPoint();
- return Status::OK();
+static Status parseGeoJSONCoordinate(const BSONElement& elem, S2Point* out) {
+ if (Array != elem.type()) {
+ return BAD_VALUE("GeoJSON coordinates must be an array");
}
+ Point p;
+ // GeoJSON allows extra elements, e.g. altitude.
+ Status status = parseFlatPoint(elem, &p, true);
+ if (!status.isOK())
+ return status;
- static Status parseGeoJSONCoordinate(const BSONElement& elem, S2Point* out) {
- if (Array != elem.type()) { return BAD_VALUE("GeoJSON coordinates must be an array"); }
- Point p;
- // GeoJSON allows extra elements, e.g. altitude.
- Status status = parseFlatPoint(elem, &p, true);
- if (!status.isOK()) return status;
+ status = coordToPoint(p.x, p.y, out);
+ return status;
+}
- status = coordToPoint(p.x, p.y, out);
- return status;
+// "coordinates": [ [100.0, 0.0], [101.0, 1.0] ]
+static Status parseArrayOfCoordinates(const BSONElement& elem, vector<S2Point>* out) {
+ if (Array != elem.type()) {
+ return BAD_VALUE("GeoJSON coordinates must be an array of coordinates");
}
-
- // "coordinates": [ [100.0, 0.0], [101.0, 1.0] ]
- static Status parseArrayOfCoordinates(const BSONElement& elem, vector<S2Point>* out) {
- if (Array != elem.type()) { return BAD_VALUE("GeoJSON coordinates must be an array of coordinates"); }
- BSONObjIterator it(elem.Obj());
- // Iterate all coordinates in array
- while (it.more()) {
- S2Point p;
- Status status = parseGeoJSONCoordinate(it.next(), &p);
- if (!status.isOK()) return status;
- out->push_back(p);
- }
- return Status::OK();
+ BSONObjIterator it(elem.Obj());
+ // Iterate all coordinates in array
+ while (it.more()) {
+ S2Point p;
+ Status status = parseGeoJSONCoordinate(it.next(), &p);
+ if (!status.isOK())
+ return status;
+ out->push_back(p);
}
-
- static void eraseDuplicatePoints(vector<S2Point>* vertices) {
- for (size_t i = 1; i < vertices->size(); ++i) {
- if ((*vertices)[i - 1] == (*vertices)[i]) {
- vertices->erase(vertices->begin() + i);
- // We could have > 2 adjacent identical vertices, and must examine i again.
- --i;
- }
+ return Status::OK();
+}
+
+static void eraseDuplicatePoints(vector<S2Point>* vertices) {
+ for (size_t i = 1; i < vertices->size(); ++i) {
+ if ((*vertices)[i - 1] == (*vertices)[i]) {
+ vertices->erase(vertices->begin() + i);
+ // We could have > 2 adjacent identical vertices, and must examine i again.
+ --i;
}
}
+}
- static Status isLoopClosed(const vector<S2Point>& loop, const BSONElement loopElt) {
- if (loop.empty()) {
+static Status isLoopClosed(const vector<S2Point>& loop, const BSONElement loopElt) {
+ if (loop.empty()) {
return BAD_VALUE("Loop has no vertices: " << loopElt.toString(false));
- }
+ }
- if (loop[0] != loop[loop.size() - 1]) {
+ if (loop[0] != loop[loop.size() - 1]) {
return BAD_VALUE("Loop is not closed: " << loopElt.toString(false));
- }
+ }
- return Status::OK();
+ return Status::OK();
+}
+
+static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem, S2Polygon* out) {
+ if (Array != elem.type()) {
+ return BAD_VALUE("Polygon coordinates must be an array");
}
- static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem, S2Polygon *out) {
- if (Array != elem.type()) { return BAD_VALUE("Polygon coordinates must be an array"); }
+ OwnedPointerVector<S2Loop> loops;
+ Status status = Status::OK();
+ string err;
+
+ BSONObjIterator it(elem.Obj());
+ // Iterate all loops of the polygon.
+ while (it.more()) {
+ // Parse the array of vertices of a loop.
+ BSONElement coordinateElt = it.next();
+ vector<S2Point> points;
+ status = parseArrayOfCoordinates(coordinateElt, &points);
+ if (!status.isOK())
+ return status;
+
+ // Check if the loop is closed.
+ status = isLoopClosed(points, coordinateElt);
+ if (!status.isOK())
+ return status;
+
+ eraseDuplicatePoints(&points);
+ // Drop the duplicated last point.
+ points.resize(points.size() - 1);
- OwnedPointerVector<S2Loop> loops;
- Status status = Status::OK();
- string err;
-
- BSONObjIterator it(elem.Obj());
- // Iterate all loops of the polygon.
- while (it.more()) {
- // Parse the array of vertices of a loop.
- BSONElement coordinateElt = it.next();
- vector<S2Point> points;
- status = parseArrayOfCoordinates(coordinateElt, &points);
- if (!status.isOK()) return status;
-
- // Check if the loop is closed.
- status = isLoopClosed(points, coordinateElt);
- if (!status.isOK()) return status;
-
- eraseDuplicatePoints(&points);
- // Drop the duplicated last point.
- points.resize(points.size() - 1);
-
- // At least 3 vertices.
- if (points.size() < 3) {
- return BAD_VALUE("Loop must have at least 3 different vertices: " <<
- coordinateElt.toString(false));
- }
-
- S2Loop* loop = new S2Loop(points);
- loops.push_back(loop);
-
- // Check whether this loop is valid.
- // 1. At least 3 vertices.
- // 2. All vertices must be unit length. Guaranteed by parsePoints().
- // 3. Loops are not allowed to have any duplicate vertices.
- // 4. Non-adjacent edges are not allowed to intersect.
- if (!loop->IsValid(&err)) {
- return BAD_VALUE("Loop is not valid: " << coordinateElt.toString(false) << " "
- << err);
- }
- // If the loop is more than one hemisphere, invert it.
- loop->Normalize();
-
- // Check the first loop must be the exterior ring and any others must be
- // interior rings or holes.
- if (loops.size() > 1 && !loops[0]->Contains(loop)) {
- return BAD_VALUE("Secondary loops not contained by first exterior loop - "
- "secondary loops must be holes: " << coordinateElt.toString(false)
- << " first loop: " << elem.Obj().firstElement().toString(false));
- }
+ // At least 3 vertices.
+ if (points.size() < 3) {
+ return BAD_VALUE(
+ "Loop must have at least 3 different vertices: " << coordinateElt.toString(false));
}
- if (loops.empty()) {
- return BAD_VALUE("Polygon has no loops.");
- }
+ S2Loop* loop = new S2Loop(points);
+ loops.push_back(loop);
- // Check if the given loops form a valid polygon.
- // 1. If a loop contains an edge AB, then no other loop may contain AB or BA.
- // 2. No loop covers more than half of the sphere.
- // 3. No two loops cross.
- if (!S2Polygon::IsValid(loops.vector(), &err))
- return BAD_VALUE("Polygon isn't valid: " << err << " " << elem.toString(false));
-
- // Given all loops are valid / normalized and S2Polygon::IsValid() above returns true.
- // The polygon must be valid. See S2Polygon member function IsValid().
-
- // Transfer ownership of the loops and clears loop vector.
- out->Init(&loops.mutableVector());
-
- // Check if every loop of this polygon shares at most one vertex with
- // its parent loop.
- if (!out->IsNormalized(&err))
- // "err" looks like "Loop 1 shares more than one vertex with its parent loop 0"
- return BAD_VALUE(err << ": " << elem.toString(false));
-
- // S2Polygon contains more than one ring, which is allowed by S2, but not by GeoJSON.
- //
- // Loops are indexed according to a preorder traversal of the nesting hierarchy.
- // GetLastDescendant() returns the index of the last loop that is contained within
- // a given loop. We guarantee that the first loop is the exterior ring.
- if (out->GetLastDescendant(0) < out->num_loops() - 1) {
- return BAD_VALUE("Only one exterior polygon loop is allowed: " << elem.toString(false));
+ // Check whether this loop is valid.
+ // 1. At least 3 vertices.
+ // 2. All vertices must be unit length. Guaranteed by parsePoints().
+ // 3. Loops are not allowed to have any duplicate vertices.
+ // 4. Non-adjacent edges are not allowed to intersect.
+ if (!loop->IsValid(&err)) {
+ return BAD_VALUE("Loop is not valid: " << coordinateElt.toString(false) << " " << err);
}
-
- // In GeoJSON, only one nesting is allowed.
- // The depth of a loop is set by polygon according to the nesting hierarchy of polygon,
- // so the exterior ring's depth is 0, a hole in it is 1, etc.
- for (int i = 0; i < out->num_loops(); i++) {
- if (out->loop(i)->depth() > 1) {
- return BAD_VALUE("Polygon interior loops cannot be nested: "<< elem.toString(false));
- }
+ // If the loop is more than one hemisphere, invert it.
+ loop->Normalize();
+
+ // Check the first loop must be the exterior ring and any others must be
+ // interior rings or holes.
+ if (loops.size() > 1 && !loops[0]->Contains(loop)) {
+ return BAD_VALUE(
+ "Secondary loops not contained by first exterior loop - "
+ "secondary loops must be holes: "
+ << coordinateElt.toString(false)
+ << " first loop: " << elem.Obj().firstElement().toString(false));
}
- return Status::OK();
}
- static Status parseBigSimplePolygonCoordinates(const BSONElement& elem,
- BigSimplePolygon *out) {
- if (Array != elem.type())
- return BAD_VALUE("Coordinates of polygon must be an array");
+ if (loops.empty()) {
+ return BAD_VALUE("Polygon has no loops.");
+ }
+ // Check if the given loops form a valid polygon.
+ // 1. If a loop contains an edge AB, then no other loop may contain AB or BA.
+ // 2. No loop covers more than half of the sphere.
+ // 3. No two loops cross.
+ if (!S2Polygon::IsValid(loops.vector(), &err))
+ return BAD_VALUE("Polygon isn't valid: " << err << " " << elem.toString(false));
+
+ // Given all loops are valid / normalized and S2Polygon::IsValid() above returns true.
+ // The polygon must be valid. See S2Polygon member function IsValid().
+
+ // Transfer ownership of the loops and clears loop vector.
+ out->Init(&loops.mutableVector());
+
+ // Check if every loop of this polygon shares at most one vertex with
+ // its parent loop.
+ if (!out->IsNormalized(&err))
+ // "err" looks like "Loop 1 shares more than one vertex with its parent loop 0"
+ return BAD_VALUE(err << ": " << elem.toString(false));
+
+ // S2Polygon contains more than one ring, which is allowed by S2, but not by GeoJSON.
+ //
+ // Loops are indexed according to a preorder traversal of the nesting hierarchy.
+ // GetLastDescendant() returns the index of the last loop that is contained within
+ // a given loop. We guarantee that the first loop is the exterior ring.
+ if (out->GetLastDescendant(0) < out->num_loops() - 1) {
+ return BAD_VALUE("Only one exterior polygon loop is allowed: " << elem.toString(false));
+ }
- const vector<BSONElement>& coordinates = elem.Array();
- // Only one loop is allowed in a BigSimplePolygon
- if (coordinates.size() != 1) {
- return BAD_VALUE("Only one simple loop is allowed in a big polygon: "
- << elem.toString(false));
+ // In GeoJSON, only one nesting is allowed.
+ // The depth of a loop is set by polygon according to the nesting hierarchy of polygon,
+ // so the exterior ring's depth is 0, a hole in it is 1, etc.
+ for (int i = 0; i < out->num_loops(); i++) {
+ if (out->loop(i)->depth() > 1) {
+ return BAD_VALUE("Polygon interior loops cannot be nested: " << elem.toString(false));
}
+ }
+ return Status::OK();
+}
- vector<S2Point> exteriorVertices;
- Status status = Status::OK();
- string err;
+static Status parseBigSimplePolygonCoordinates(const BSONElement& elem, BigSimplePolygon* out) {
+ if (Array != elem.type())
+ return BAD_VALUE("Coordinates of polygon must be an array");
- status = parseArrayOfCoordinates(coordinates.front(), &exteriorVertices);
- if (!status.isOK()) return status;
- status = isLoopClosed(exteriorVertices, coordinates.front());
- if (!status.isOK()) return status;
+ const vector<BSONElement>& coordinates = elem.Array();
+ // Only one loop is allowed in a BigSimplePolygon
+ if (coordinates.size() != 1) {
+ return BAD_VALUE(
+ "Only one simple loop is allowed in a big polygon: " << elem.toString(false));
+ }
- eraseDuplicatePoints(&exteriorVertices);
+ vector<S2Point> exteriorVertices;
+ Status status = Status::OK();
+ string err;
- // The last point is duplicated. We drop it, since S2Loop expects no
- // duplicate points
- exteriorVertices.resize(exteriorVertices.size() - 1);
+ status = parseArrayOfCoordinates(coordinates.front(), &exteriorVertices);
+ if (!status.isOK())
+ return status;
- // At least 3 vertices.
- if (exteriorVertices.size() < 3) {
- return BAD_VALUE("Loop must have at least 3 different vertices: " <<
- elem.toString(false));
- }
+ status = isLoopClosed(exteriorVertices, coordinates.front());
+ if (!status.isOK())
+ return status;
- unique_ptr<S2Loop> loop(new S2Loop(exteriorVertices));
- // Check whether this loop is valid.
- if (!loop->IsValid(&err)) {
- return BAD_VALUE("Loop is not valid: " << elem.toString(false) << " " << err);
- }
+ eraseDuplicatePoints(&exteriorVertices);
- out->Init(loop.release());
- return Status::OK();
+ // The last point is duplicated. We drop it, since S2Loop expects no
+ // duplicate points
+ exteriorVertices.resize(exteriorVertices.size() - 1);
+
+ // At least 3 vertices.
+ if (exteriorVertices.size() < 3) {
+ return BAD_VALUE("Loop must have at least 3 different vertices: " << elem.toString(false));
}
- // Parse "crs" field of BSON object.
- // "crs": {
- // "type": "name",
- // "properties": {
- // "name": "urn:ogc:def:crs:OGC:1.3:CRS84"
- // }
- // }
- static Status parseGeoJSONCRS(const BSONObj &obj, CRS* crs, bool allowStrictSphere = false) {
- *crs = SPHERE;
+ unique_ptr<S2Loop> loop(new S2Loop(exteriorVertices));
+ // Check whether this loop is valid.
+ if (!loop->IsValid(&err)) {
+ return BAD_VALUE("Loop is not valid: " << elem.toString(false) << " " << err);
+ }
- BSONElement crsElt = obj["crs"];
- // "crs" field doesn't exist, return the default SPHERE
- if (crsElt.eoo()) {
- return Status::OK();
- }
+ out->Init(loop.release());
+ return Status::OK();
+}
+
+// Parse "crs" field of BSON object.
+// "crs": {
+// "type": "name",
+// "properties": {
+// "name": "urn:ogc:def:crs:OGC:1.3:CRS84"
+// }
+// }
+static Status parseGeoJSONCRS(const BSONObj& obj, CRS* crs, bool allowStrictSphere = false) {
+ *crs = SPHERE;
+
+ BSONElement crsElt = obj["crs"];
+ // "crs" field doesn't exist, return the default SPHERE
+ if (crsElt.eoo()) {
+ return Status::OK();
+ }
- if (!crsElt.isABSONObj()) return BAD_VALUE("GeoJSON CRS must be an object");
- BSONObj crsObj = crsElt.embeddedObject();
-
- // "type": "name"
- if (String != crsObj["type"].type() || "name" != crsObj["type"].String())
- return BAD_VALUE("GeoJSON CRS must have field \"type\": \"name\"");
-
- // "properties"
- BSONElement propertiesElt = crsObj["properties"];
- if (!propertiesElt.isABSONObj())
- return BAD_VALUE("CRS must have field \"properties\" which is an object");
- BSONObj propertiesObj = propertiesElt.embeddedObject();
- if (String != propertiesObj["name"].type())
- return BAD_VALUE("In CRS, \"properties.name\" must be a string");
- const string& name = propertiesObj["name"].String();
- if (CRS_CRS84 == name || CRS_EPSG_4326 == name) {
- *crs = SPHERE;
- } else if (CRS_STRICT_WINDING == name) {
- if (!allowStrictSphere) {
- return BAD_VALUE("Strict winding order is only supported by polygon");
- }
- *crs = STRICT_SPHERE;
- } else {
- return BAD_VALUE("Unknown CRS name: " << name);
+ if (!crsElt.isABSONObj())
+ return BAD_VALUE("GeoJSON CRS must be an object");
+ BSONObj crsObj = crsElt.embeddedObject();
+
+ // "type": "name"
+ if (String != crsObj["type"].type() || "name" != crsObj["type"].String())
+ return BAD_VALUE("GeoJSON CRS must have field \"type\": \"name\"");
+
+ // "properties"
+ BSONElement propertiesElt = crsObj["properties"];
+ if (!propertiesElt.isABSONObj())
+ return BAD_VALUE("CRS must have field \"properties\" which is an object");
+ BSONObj propertiesObj = propertiesElt.embeddedObject();
+ if (String != propertiesObj["name"].type())
+ return BAD_VALUE("In CRS, \"properties.name\" must be a string");
+ const string& name = propertiesObj["name"].String();
+ if (CRS_CRS84 == name || CRS_EPSG_4326 == name) {
+ *crs = SPHERE;
+ } else if (CRS_STRICT_WINDING == name) {
+ if (!allowStrictSphere) {
+ return BAD_VALUE("Strict winding order is only supported by polygon");
}
- return Status::OK();
+ *crs = STRICT_SPHERE;
+ } else {
+ return BAD_VALUE("Unknown CRS name: " << name);
}
+ return Status::OK();
+}
+
+// Parse "coordinates" field of GeoJSON LineString
+// e.g. "coordinates": [ [100.0, 0.0], [101.0, 1.0] ]
+// Or a line in "coordinates" field of GeoJSON MultiLineString
+static Status parseGeoJSONLineCoordinates(const BSONElement& elem, S2Polyline* out) {
+ vector<S2Point> vertices;
+ Status status = parseArrayOfCoordinates(elem, &vertices);
+ if (!status.isOK())
+ return status;
- // Parse "coordinates" field of GeoJSON LineString
- // e.g. "coordinates": [ [100.0, 0.0], [101.0, 1.0] ]
- // Or a line in "coordinates" field of GeoJSON MultiLineString
- static Status parseGeoJSONLineCoordinates(const BSONElement& elem, S2Polyline* out) {
- vector<S2Point> vertices;
- Status status = parseArrayOfCoordinates(elem, &vertices);
- if (!status.isOK()) return status;
+ eraseDuplicatePoints(&vertices);
+ if (vertices.size() < 2)
+ return BAD_VALUE(
+ "GeoJSON LineString must have at least 2 vertices: " << elem.toString(false));
+
+ string err;
+ if (!S2Polyline::IsValid(vertices, &err))
+ return BAD_VALUE("GeoJSON LineString is not valid: " << err << " " << elem.toString(false));
+ out->Init(vertices);
+ return Status::OK();
+}
+
+// Parse legacy point or GeoJSON point, used by geo near.
+// Only stored legacy points allow additional fields.
+Status parsePoint(const BSONElement& elem, PointWithCRS* out, bool allowAddlFields) {
+ if (!elem.isABSONObj())
+ return BAD_VALUE("Point must be an array or object");
+
+ BSONObj obj = elem.Obj();
+ // location: [1, 2] or location: {x: 1, y:2}
+ if (Array == elem.type() || obj.firstElement().isNumber()) {
+ // Legacy point
+ return GeoParser::parseLegacyPoint(elem, out, allowAddlFields);
+ }
- eraseDuplicatePoints(&vertices);
- if (vertices.size() < 2)
- return BAD_VALUE("GeoJSON LineString must have at least 2 vertices: " << elem.toString(false));
+ // GeoJSON point. location: { type: "Point", coordinates: [1, 2] }
+ return GeoParser::parseGeoJSONPoint(obj, out);
+}
- string err;
- if (!S2Polyline::IsValid(vertices, &err))
- return BAD_VALUE("GeoJSON LineString is not valid: " << err << " " << elem.toString(false));
- out->Init(vertices);
- return Status::OK();
- }
+/** exported **/
+Status GeoParser::parseStoredPoint(const BSONElement& elem, PointWithCRS* out) {
+ return parsePoint(elem, out, true);
+}
- // Parse legacy point or GeoJSON point, used by geo near.
- // Only stored legacy points allow additional fields.
- Status parsePoint(const BSONElement &elem, PointWithCRS *out, bool allowAddlFields) {
- if (!elem.isABSONObj()) return BAD_VALUE("Point must be an array or object");
+Status GeoParser::parseQueryPoint(const BSONElement& elem, PointWithCRS* out) {
+ return parsePoint(elem, out, false);
+}
- BSONObj obj = elem.Obj();
- // location: [1, 2] or location: {x: 1, y:2}
- if (Array == elem.type() || obj.firstElement().isNumber()) {
- // Legacy point
- return GeoParser::parseLegacyPoint(elem, out, allowAddlFields);
- }
+Status GeoParser::parseLegacyBox(const BSONObj& obj, BoxWithCRS* out) {
+ Point ptA, ptB;
+ Status status = Status::OK();
- // GeoJSON point. location: { type: "Point", coordinates: [1, 2] }
- return GeoParser::parseGeoJSONPoint(obj, out);
+ BSONObjIterator coordIt(obj);
+ status = parseFlatPoint(coordIt.next(), &ptA);
+ if (!status.isOK()) {
+ return status;
}
-
- /** exported **/
- Status GeoParser::parseStoredPoint(const BSONElement &elem, PointWithCRS *out) {
- return parsePoint(elem, out, true);
+ status = parseFlatPoint(coordIt.next(), &ptB);
+ if (!status.isOK()) {
+ return status;
}
+ // XXX: VERIFY AREA >= 0
- Status GeoParser::parseQueryPoint(const BSONElement &elem, PointWithCRS *out) {
- return parsePoint(elem, out, false);
+ out->box.init(ptA, ptB);
+ out->crs = FLAT;
+ return status;
+}
+
+Status GeoParser::parseLegacyPolygon(const BSONObj& obj, PolygonWithCRS* out) {
+ BSONObjIterator coordIt(obj);
+ vector<Point> points;
+ while (coordIt.more()) {
+ Point p;
+ // A coordinate
+ Status status = parseFlatPoint(coordIt.next(), &p);
+ if (!status.isOK())
+ return status;
+ points.push_back(p);
}
+ if (points.size() < 3)
+ return BAD_VALUE("Polygon must have at least 3 points");
+ out->oldPolygon.init(points);
+ out->crs = FLAT;
+ return Status::OK();
+}
+
+// { "type": "Point", "coordinates": [100.0, 0.0] }
+Status GeoParser::parseGeoJSONPoint(const BSONObj& obj, PointWithCRS* out) {
+ Status status = Status::OK();
+ // "crs"
+ status = parseGeoJSONCRS(obj, &out->crs);
+ if (!status.isOK())
+ return status;
- Status GeoParser::parseLegacyBox(const BSONObj& obj, BoxWithCRS *out) {
- Point ptA, ptB;
- Status status = Status::OK();
+ // "coordinates"
+ status = parseFlatPoint(obj[GEOJSON_COORDINATES], &out->oldPoint, true);
+ if (!status.isOK())
+ return status;
- BSONObjIterator coordIt(obj);
- status = parseFlatPoint(coordIt.next(), &ptA);
- if (!status.isOK()) { return status; }
- status = parseFlatPoint(coordIt.next(), &ptB);
- if (!status.isOK()) { return status; }
- // XXX: VERIFY AREA >= 0
+ // Projection
+ out->crs = FLAT;
+ if (!ShapeProjection::supportsProject(*out, SPHERE))
+ return BAD_VALUE("longitude/latitude is out of bounds, lng: " << out->oldPoint.x << " lat: "
+ << out->oldPoint.y);
+ ShapeProjection::projectInto(out, SPHERE);
+ return Status::OK();
+}
+
+// { "type": "LineString", "coordinates": [ [100.0, 0.0], [101.0, 1.0] ] }
+Status GeoParser::parseGeoJSONLine(const BSONObj& obj, LineWithCRS* out) {
+ Status status = Status::OK();
+ // "crs"
+ status = parseGeoJSONCRS(obj, &out->crs);
+ if (!status.isOK())
+ return status;
- out->box.init(ptA, ptB);
- out->crs = FLAT;
+ // "coordinates"
+ status = parseGeoJSONLineCoordinates(obj[GEOJSON_COORDINATES], &out->line);
+ if (!status.isOK())
return status;
- }
- Status GeoParser::parseLegacyPolygon(const BSONObj& obj, PolygonWithCRS *out) {
- BSONObjIterator coordIt(obj);
- vector<Point> points;
- while (coordIt.more()) {
- Point p;
- // A coordinate
- Status status = parseFlatPoint(coordIt.next(), &p);
- if (!status.isOK()) return status;
- points.push_back(p);
- }
- if (points.size() < 3) return BAD_VALUE("Polygon must have at least 3 points");
- out->oldPolygon.init(points);
- out->crs = FLAT;
- return Status::OK();
- }
+ return Status::OK();
+}
- // { "type": "Point", "coordinates": [100.0, 0.0] }
- Status GeoParser::parseGeoJSONPoint(const BSONObj &obj, PointWithCRS *out) {
- Status status = Status::OK();
- // "crs"
- status = parseGeoJSONCRS(obj, &out->crs);
- if (!status.isOK()) return status;
-
- // "coordinates"
- status = parseFlatPoint(obj[GEOJSON_COORDINATES], &out->oldPoint, true);
- if (!status.isOK()) return status;
-
- // Projection
- out->crs = FLAT;
- if (!ShapeProjection::supportsProject(*out, SPHERE))
- return BAD_VALUE("longitude/latitude is out of bounds, lng: "
- << out->oldPoint.x << " lat: " << out->oldPoint.y);
- ShapeProjection::projectInto(out, SPHERE);
- return Status::OK();
+Status GeoParser::parseGeoJSONPolygon(const BSONObj& obj, PolygonWithCRS* out) {
+ const BSONElement coordinates = obj[GEOJSON_COORDINATES];
+
+ Status status = Status::OK();
+ // "crs", allow strict sphere
+ status = parseGeoJSONCRS(obj, &out->crs, true);
+ if (!status.isOK())
+ return status;
+
+ // "coordinates"
+ if (out->crs == SPHERE) {
+ out->s2Polygon.reset(new S2Polygon());
+ status = parseGeoJSONPolygonCoordinates(coordinates, out->s2Polygon.get());
+ } else if (out->crs == STRICT_SPHERE) {
+ out->bigPolygon.reset(new BigSimplePolygon());
+ status = parseBigSimplePolygonCoordinates(coordinates, out->bigPolygon.get());
}
+ return status;
+}
- // { "type": "LineString", "coordinates": [ [100.0, 0.0], [101.0, 1.0] ] }
- Status GeoParser::parseGeoJSONLine(const BSONObj& obj, LineWithCRS* out) {
- Status status = Status::OK();
- // "crs"
- status = parseGeoJSONCRS(obj, &out->crs);
- if (!status.isOK()) return status;
+Status GeoParser::parseMultiPoint(const BSONObj& obj, MultiPointWithCRS* out) {
+ Status status = Status::OK();
+ status = parseGeoJSONCRS(obj, &out->crs);
+ if (!status.isOK())
+ return status;
- // "coordinates"
- status = parseGeoJSONLineCoordinates(obj[GEOJSON_COORDINATES], &out->line);
- if (!status.isOK()) return status;
+ out->points.clear();
+ BSONElement coordElt = obj.getFieldDotted(GEOJSON_COORDINATES);
+ status = parseArrayOfCoordinates(coordElt, &out->points);
+ if (!status.isOK())
+ return status;
- return Status::OK();
+ if (0 == out->points.size())
+ return BAD_VALUE("MultiPoint coordinates must have at least 1 element");
+ out->cells.resize(out->points.size());
+ for (size_t i = 0; i < out->points.size(); ++i) {
+ out->cells[i] = S2Cell(out->points[i]);
}
- Status GeoParser::parseGeoJSONPolygon(const BSONObj &obj, PolygonWithCRS *out) {
- const BSONElement coordinates = obj[GEOJSON_COORDINATES];
+ return Status::OK();
+}
- Status status = Status::OK();
- // "crs", allow strict sphere
- status = parseGeoJSONCRS(obj, &out->crs, true);
- if (!status.isOK()) return status;
-
- // "coordinates"
- if (out->crs == SPHERE) {
- out->s2Polygon.reset(new S2Polygon());
- status = parseGeoJSONPolygonCoordinates(coordinates, out->s2Polygon.get());
- }
- else if (out->crs == STRICT_SPHERE) {
- out->bigPolygon.reset(new BigSimplePolygon());
- status = parseBigSimplePolygonCoordinates(coordinates, out->bigPolygon.get());
- }
+Status GeoParser::parseMultiLine(const BSONObj& obj, MultiLineWithCRS* out) {
+ Status status = Status::OK();
+ status = parseGeoJSONCRS(obj, &out->crs);
+ if (!status.isOK())
return status;
- }
- Status GeoParser::parseMultiPoint(const BSONObj &obj, MultiPointWithCRS *out) {
- Status status = Status::OK();
- status = parseGeoJSONCRS(obj, &out->crs);
- if (!status.isOK()) return status;
-
- out->points.clear();
- BSONElement coordElt = obj.getFieldDotted(GEOJSON_COORDINATES);
- status = parseArrayOfCoordinates(coordElt, &out->points);
- if (!status.isOK()) return status;
-
- if (0 == out->points.size())
- return BAD_VALUE("MultiPoint coordinates must have at least 1 element");
- out->cells.resize(out->points.size());
- for (size_t i = 0; i < out->points.size(); ++i) {
- out->cells[i] = S2Cell(out->points[i]);
- }
+ BSONElement coordElt = obj.getFieldDotted(GEOJSON_COORDINATES);
+ if (Array != coordElt.type())
+ return BAD_VALUE("MultiLineString coordinates must be an array");
- return Status::OK();
- }
+ out->lines.clear();
+ vector<S2Polyline*>& lines = out->lines.mutableVector();
- Status GeoParser::parseMultiLine(const BSONObj &obj, MultiLineWithCRS *out) {
- Status status = Status::OK();
- status = parseGeoJSONCRS(obj, &out->crs);
- if (!status.isOK()) return status;
+ BSONObjIterator it(coordElt.Obj());
- BSONElement coordElt = obj.getFieldDotted(GEOJSON_COORDINATES);
- if (Array != coordElt.type())
- return BAD_VALUE("MultiLineString coordinates must be an array");
+ // Iterate array
+ while (it.more()) {
+ lines.push_back(new S2Polyline());
+ status = parseGeoJSONLineCoordinates(it.next(), lines.back());
+ if (!status.isOK())
+ return status;
+ }
+ if (0 == lines.size())
+ return BAD_VALUE("MultiLineString coordinates must have at least 1 element");
- out->lines.clear();
- vector<S2Polyline*>& lines = out->lines.mutableVector();
+ return Status::OK();
+}
- BSONObjIterator it(coordElt.Obj());
+Status GeoParser::parseMultiPolygon(const BSONObj& obj, MultiPolygonWithCRS* out) {
+ Status status = Status::OK();
+ status = parseGeoJSONCRS(obj, &out->crs);
+ if (!status.isOK())
+ return status;
- // Iterate array
- while (it.more()) {
- lines.push_back(new S2Polyline());
- status = parseGeoJSONLineCoordinates(it.next(), lines.back());
- if (!status.isOK()) return status;
- }
- if (0 == lines.size())
- return BAD_VALUE("MultiLineString coordinates must have at least 1 element");
+ BSONElement coordElt = obj.getFieldDotted(GEOJSON_COORDINATES);
+ if (Array != coordElt.type())
+ return BAD_VALUE("MultiPolygon coordinates must be an array");
- return Status::OK();
+ out->polygons.clear();
+ vector<S2Polygon*>& polygons = out->polygons.mutableVector();
+
+ BSONObjIterator it(coordElt.Obj());
+ // Iterate array
+ while (it.more()) {
+ polygons.push_back(new S2Polygon());
+ status = parseGeoJSONPolygonCoordinates(it.next(), polygons.back());
+ if (!status.isOK())
+ return status;
}
+ if (0 == polygons.size())
+ return BAD_VALUE("MultiPolygon coordinates must have at least 1 element");
- Status GeoParser::parseMultiPolygon(const BSONObj &obj, MultiPolygonWithCRS *out) {
- Status status = Status::OK();
- status = parseGeoJSONCRS(obj, &out->crs);
- if (!status.isOK()) return status;
-
- BSONElement coordElt = obj.getFieldDotted(GEOJSON_COORDINATES);
- if (Array != coordElt.type())
- return BAD_VALUE("MultiPolygon coordinates must be an array");
-
- out->polygons.clear();
- vector<S2Polygon*>& polygons = out->polygons.mutableVector();
-
- BSONObjIterator it(coordElt.Obj());
- // Iterate array
- while (it.more()) {
- polygons.push_back(new S2Polygon());
- status = parseGeoJSONPolygonCoordinates(it.next(), polygons.back());
- if (!status.isOK()) return status;
- }
- if (0 == polygons.size())
- return BAD_VALUE("MultiPolygon coordinates must have at least 1 element");
+ return Status::OK();
+}
- return Status::OK();
- }
+Status GeoParser::parseLegacyCenter(const BSONObj& obj, CapWithCRS* out) {
+ BSONObjIterator objIt(obj);
+
+ // Center
+ BSONElement center = objIt.next();
+ Status status = parseFlatPoint(center, &out->circle.center);
+ if (!status.isOK())
+ return status;
- Status GeoParser::parseLegacyCenter(const BSONObj& obj, CapWithCRS *out) {
- BSONObjIterator objIt(obj);
+ // Radius
+ BSONElement radius = objIt.next();
+ // radius >= 0 and is not NaN
+ if (!radius.isNumber() || !(radius.number() >= 0))
+ return BAD_VALUE("radius must be a non-negative number");
+
+ // No more
+ if (objIt.more())
+ return BAD_VALUE("Only 2 fields allowed for circular region");
+
+ out->circle.radius = radius.number();
+ out->crs = FLAT;
+ return Status::OK();
+}
+
+Status GeoParser::parseCenterSphere(const BSONObj& obj, CapWithCRS* out) {
+ BSONObjIterator objIt(obj);
+
+ // Center
+ BSONElement center = objIt.next();
+ Point p;
+ // Check the object has and only has 2 numbers.
+ Status status = parseFlatPoint(center, &p);
+ if (!status.isOK())
+ return status;
- // Center
- BSONElement center = objIt.next();
- Status status = parseFlatPoint(center, &out->circle.center);
- if (!status.isOK()) return status;
+ S2Point centerPoint;
+ status = coordToPoint(p.x, p.y, &centerPoint);
+ if (!status.isOK())
+ return status;
- // Radius
- BSONElement radius = objIt.next();
- // radius >= 0 and is not NaN
- if (!radius.isNumber() || !(radius.number() >= 0))
- return BAD_VALUE("radius must be a non-negative number");
+ // Radius
+ BSONElement radiusElt = objIt.next();
+ // radius >= 0 and is not NaN
+ if (!radiusElt.isNumber() || !(radiusElt.number() >= 0))
+ return BAD_VALUE("radius must be a non-negative number");
+ double radius = radiusElt.number();
+
+ // No more elements
+ if (objIt.more())
+ return BAD_VALUE("Only 2 fields allowed for circular region");
+
+ out->cap = S2Cap::FromAxisAngle(centerPoint, S1Angle::Radians(radius));
+ out->circle.radius = radius;
+ out->circle.center = p;
+ out->crs = SPHERE;
+ return Status::OK();
+}
+
+// { "type": "GeometryCollection",
+// "geometries": [
+// { "type": "Point",
+// "coordinates": [100.0, 0.0]
+// },
+// { "type": "LineString",
+// "coordinates": [ [101.0, 0.0], [102.0, 1.0] ]
+// }
+// ]
+// }
+Status GeoParser::parseGeometryCollection(const BSONObj& obj, GeometryCollection* out) {
+ BSONElement coordElt = obj.getFieldDotted(GEOJSON_GEOMETRIES);
+ if (Array != coordElt.type())
+ return BAD_VALUE("GeometryCollection geometries must be an array");
+
+ const vector<BSONElement>& geometries = coordElt.Array();
+ if (0 == geometries.size())
+ return BAD_VALUE("GeometryCollection geometries must have at least 1 element");
+
+ for (size_t i = 0; i < geometries.size(); ++i) {
+ if (Object != geometries[i].type())
+ return BAD_VALUE("Element " << i << " of \"geometries\" is not an object");
+
+ const BSONObj& geoObj = geometries[i].Obj();
+ GeoJSONType type = parseGeoJSONType(geoObj);
+
+ if (GEOJSON_UNKNOWN == type)
+ return BAD_VALUE("Unknown GeoJSON type: " << geometries[i].toString(false));
+
+ if (GEOJSON_GEOMETRY_COLLECTION == type)
+ return BAD_VALUE(
+ "GeometryCollections cannot be nested: " << geometries[i].toString(false));
- // No more
- if (objIt.more())
- return BAD_VALUE("Only 2 fields allowed for circular region");
+ Status status = Status::OK();
+ if (GEOJSON_POINT == type) {
+ out->points.resize(out->points.size() + 1);
+ status = parseGeoJSONPoint(geoObj, &out->points.back());
+ } else if (GEOJSON_LINESTRING == type) {
+ out->lines.mutableVector().push_back(new LineWithCRS());
+ status = parseGeoJSONLine(geoObj, out->lines.vector().back());
+ } else if (GEOJSON_POLYGON == type) {
+ out->polygons.mutableVector().push_back(new PolygonWithCRS());
+ status = parseGeoJSONPolygon(geoObj, out->polygons.vector().back());
+ } else if (GEOJSON_MULTI_POINT == type) {
+ out->multiPoints.mutableVector().push_back(new MultiPointWithCRS());
+ status = parseMultiPoint(geoObj, out->multiPoints.mutableVector().back());
+ } else if (GEOJSON_MULTI_LINESTRING == type) {
+ out->multiLines.mutableVector().push_back(new MultiLineWithCRS());
+ status = parseMultiLine(geoObj, out->multiLines.mutableVector().back());
+ } else if (GEOJSON_MULTI_POLYGON == type) {
+ out->multiPolygons.mutableVector().push_back(new MultiPolygonWithCRS());
+ status = parseMultiPolygon(geoObj, out->multiPolygons.mutableVector().back());
+ } else {
+ // Should not reach here.
+ invariant(false);
+ }
- out->circle.radius = radius.number();
- out->crs = FLAT;
- return Status::OK();
+ // Check parsing result.
+ if (!status.isOK())
+ return status;
}
- Status GeoParser::parseCenterSphere(const BSONObj& obj, CapWithCRS *out) {
- BSONObjIterator objIt(obj);
+ return Status::OK();
+}
- // Center
- BSONElement center = objIt.next();
- Point p;
- // Check the object has and only has 2 numbers.
- Status status = parseFlatPoint(center, &p);
- if (!status.isOK()) return status;
-
- S2Point centerPoint;
- status = coordToPoint(p.x, p.y, &centerPoint);
- if (!status.isOK()) return status;
-
- // Radius
- BSONElement radiusElt = objIt.next();
- // radius >= 0 and is not NaN
- if (!radiusElt.isNumber() || !(radiusElt.number() >= 0))
- return BAD_VALUE("radius must be a non-negative number");
- double radius = radiusElt.number();
-
- // No more elements
- if (objIt.more())
- return BAD_VALUE("Only 2 fields allowed for circular region");
-
- out->cap = S2Cap::FromAxisAngle(centerPoint, S1Angle::Radians(radius));
- out->circle.radius = radius;
- out->circle.center = p;
- out->crs = SPHERE;
- return Status::OK();
+bool GeoParser::parsePointWithMaxDistance(const BSONObj& obj, PointWithCRS* out, double* maxOut) {
+ BSONObjIterator it(obj);
+ if (!it.more()) {
+ return false;
}
- // { "type": "GeometryCollection",
- // "geometries": [
- // { "type": "Point",
- // "coordinates": [100.0, 0.0]
- // },
- // { "type": "LineString",
- // "coordinates": [ [101.0, 0.0], [102.0, 1.0] ]
- // }
- // ]
- // }
- Status GeoParser::parseGeometryCollection(const BSONObj &obj, GeometryCollection *out) {
- BSONElement coordElt = obj.getFieldDotted(GEOJSON_GEOMETRIES);
- if (Array != coordElt.type())
- return BAD_VALUE("GeometryCollection geometries must be an array");
-
- const vector<BSONElement>& geometries = coordElt.Array();
- if (0 == geometries.size())
- return BAD_VALUE("GeometryCollection geometries must have at least 1 element");
-
- for (size_t i = 0; i < geometries.size(); ++i) {
- if (Object != geometries[i].type())
- return BAD_VALUE("Element " << i << " of \"geometries\" is not an object");
-
- const BSONObj& geoObj = geometries[i].Obj();
- GeoJSONType type = parseGeoJSONType(geoObj);
-
- if (GEOJSON_UNKNOWN == type)
- return BAD_VALUE("Unknown GeoJSON type: " << geometries[i].toString(false));
-
- if (GEOJSON_GEOMETRY_COLLECTION == type)
- return BAD_VALUE("GeometryCollections cannot be nested: "
- << geometries[i].toString(false));
-
- Status status = Status::OK();
- if (GEOJSON_POINT == type) {
- out->points.resize(out->points.size() + 1);
- status = parseGeoJSONPoint(geoObj, &out->points.back());
- } else if (GEOJSON_LINESTRING == type) {
- out->lines.mutableVector().push_back(new LineWithCRS());
- status = parseGeoJSONLine(geoObj, out->lines.vector().back());
- } else if (GEOJSON_POLYGON == type) {
- out->polygons.mutableVector().push_back(new PolygonWithCRS());
- status = parseGeoJSONPolygon(geoObj, out->polygons.vector().back());
- } else if (GEOJSON_MULTI_POINT == type) {
- out->multiPoints.mutableVector().push_back(new MultiPointWithCRS());
- status = parseMultiPoint(geoObj, out->multiPoints.mutableVector().back());
- } else if (GEOJSON_MULTI_LINESTRING == type) {
- out->multiLines.mutableVector().push_back(new MultiLineWithCRS());
- status = parseMultiLine(geoObj, out->multiLines.mutableVector().back());
- } else if (GEOJSON_MULTI_POLYGON == type) {
- out->multiPolygons.mutableVector().push_back(new MultiPolygonWithCRS());
- status = parseMultiPolygon(geoObj, out->multiPolygons.mutableVector().back());
- } else {
- // Should not reach here.
- invariant(false);
- }
-
- // Check parsing result.
- if (!status.isOK()) return status;
- }
+ BSONElement lng = it.next();
+ if (!lng.isNumber()) {
+ return false;
+ }
+ if (!it.more()) {
+ return false;
+ }
- return Status::OK();
+ BSONElement lat = it.next();
+ if (!lat.isNumber()) {
+ return false;
+ }
+ if (!it.more()) {
+ return false;
}
- bool GeoParser::parsePointWithMaxDistance(const BSONObj& obj, PointWithCRS* out, double* maxOut) {
- BSONObjIterator it(obj);
- if (!it.more()) { return false; }
-
- BSONElement lng = it.next();
- if (!lng.isNumber()) { return false; }
- if (!it.more()) { return false; }
-
- BSONElement lat = it.next();
- if (!lat.isNumber()) { return false; }
- if (!it.more()) { return false; }
-
- BSONElement dist = it.next();
- if (!dist.isNumber()) { return false; }
- if (it.more()) { return false; }
-
- out->oldPoint.x = lng.number();
- out->oldPoint.y = lat.number();
- out->crs = FLAT;
- *maxOut = dist.number();
- return true;
- }
-
- GeoParser::GeoSpecifier GeoParser::parseGeoSpecifier(const BSONElement& type) {
- if (!type.isABSONObj()) { return GeoParser::UNKNOWN; }
- const char* fieldName = type.fieldName();
- if (mongoutils::str::equals(fieldName, "$box")) {
- return GeoParser::BOX;
- } else if (mongoutils::str::equals(fieldName, "$center")) {
- return GeoParser::CENTER;
- } else if (mongoutils::str::equals(fieldName, "$polygon")) {
- return GeoParser::POLYGON;
- } else if (mongoutils::str::equals(fieldName, "$centerSphere")) {
- return GeoParser::CENTER_SPHERE;
- } else if (mongoutils::str::equals(fieldName, "$geometry")) {
- return GeoParser::GEOMETRY;
- }
+ BSONElement dist = it.next();
+ if (!dist.isNumber()) {
+ return false;
+ }
+ if (it.more()) {
+ return false;
+ }
+
+ out->oldPoint.x = lng.number();
+ out->oldPoint.y = lat.number();
+ out->crs = FLAT;
+ *maxOut = dist.number();
+ return true;
+}
+
+GeoParser::GeoSpecifier GeoParser::parseGeoSpecifier(const BSONElement& type) {
+ if (!type.isABSONObj()) {
return GeoParser::UNKNOWN;
}
+ const char* fieldName = type.fieldName();
+ if (mongoutils::str::equals(fieldName, "$box")) {
+ return GeoParser::BOX;
+ } else if (mongoutils::str::equals(fieldName, "$center")) {
+ return GeoParser::CENTER;
+ } else if (mongoutils::str::equals(fieldName, "$polygon")) {
+ return GeoParser::POLYGON;
+ } else if (mongoutils::str::equals(fieldName, "$centerSphere")) {
+ return GeoParser::CENTER_SPHERE;
+ } else if (mongoutils::str::equals(fieldName, "$geometry")) {
+ return GeoParser::GEOMETRY;
+ }
+ return GeoParser::UNKNOWN;
+}
- GeoParser::GeoJSONType GeoParser::parseGeoJSONType(const BSONObj& obj) {
- BSONElement type = obj.getFieldDotted(GEOJSON_TYPE);
- if (String != type.type()) { return GeoParser::GEOJSON_UNKNOWN; }
- const string& typeString = type.String();
- if (GEOJSON_TYPE_POINT == typeString) {
- return GeoParser::GEOJSON_POINT;
- } else if (GEOJSON_TYPE_LINESTRING == typeString) {
- return GeoParser::GEOJSON_LINESTRING;
- } else if (GEOJSON_TYPE_POLYGON == typeString) {
- return GeoParser::GEOJSON_POLYGON;
- } else if (GEOJSON_TYPE_MULTI_POINT == typeString) {
- return GeoParser::GEOJSON_MULTI_POINT;
- } else if (GEOJSON_TYPE_MULTI_LINESTRING == typeString) {
- return GeoParser::GEOJSON_MULTI_LINESTRING;
- } else if (GEOJSON_TYPE_MULTI_POLYGON == typeString) {
- return GeoParser::GEOJSON_MULTI_POLYGON;
- } else if (GEOJSON_TYPE_GEOMETRY_COLLECTION == typeString) {
- return GeoParser::GEOJSON_GEOMETRY_COLLECTION;
- }
+GeoParser::GeoJSONType GeoParser::parseGeoJSONType(const BSONObj& obj) {
+ BSONElement type = obj.getFieldDotted(GEOJSON_TYPE);
+ if (String != type.type()) {
return GeoParser::GEOJSON_UNKNOWN;
}
+ const string& typeString = type.String();
+ if (GEOJSON_TYPE_POINT == typeString) {
+ return GeoParser::GEOJSON_POINT;
+ } else if (GEOJSON_TYPE_LINESTRING == typeString) {
+ return GeoParser::GEOJSON_LINESTRING;
+ } else if (GEOJSON_TYPE_POLYGON == typeString) {
+ return GeoParser::GEOJSON_POLYGON;
+ } else if (GEOJSON_TYPE_MULTI_POINT == typeString) {
+ return GeoParser::GEOJSON_MULTI_POINT;
+ } else if (GEOJSON_TYPE_MULTI_LINESTRING == typeString) {
+ return GeoParser::GEOJSON_MULTI_LINESTRING;
+ } else if (GEOJSON_TYPE_MULTI_POLYGON == typeString) {
+ return GeoParser::GEOJSON_MULTI_POLYGON;
+ } else if (GEOJSON_TYPE_GEOMETRY_COLLECTION == typeString) {
+ return GeoParser::GEOJSON_GEOMETRY_COLLECTION;
+ }
+ return GeoParser::GEOJSON_UNKNOWN;
+}
} // namespace mongo
diff --git a/src/mongo/db/geo/geoparser.h b/src/mongo/db/geo/geoparser.h
index 796db6b087f..91cdb6649ea 100644
--- a/src/mongo/db/geo/geoparser.h
+++ b/src/mongo/db/geo/geoparser.h
@@ -33,64 +33,65 @@
namespace mongo {
- // This class parses geographic data.
- // It parses a subset of GeoJSON and creates S2 shapes from it.
- // See http://geojson.org/geojson-spec.html for the spec.
- //
- // This class also parses the ad-hoc geo formats that MongoDB introduced.
- //
- // parse* methods may do some more validation than the is* methods; they return false if they
- // encounter invalid geometry and true if the geometry is parsed successfully.
- class GeoParser {
- public:
-
- // Geospatial specifier after $geoWithin / $geoIntersects predicates.
- // i.e. "$box" in { $box: [[1, 2], [3, 4]] }
- enum GeoSpecifier {
- UNKNOWN = 0,
- BOX, // $box
- CENTER, // $center
- POLYGON, // $polygon
- CENTER_SPHERE, // $centerSphere
- GEOMETRY // GeoJSON geometry, $geometry
- };
+// This class parses geographic data.
+// It parses a subset of GeoJSON and creates S2 shapes from it.
+// See http://geojson.org/geojson-spec.html for the spec.
+//
+// This class also parses the ad-hoc geo formats that MongoDB introduced.
+//
+// parse* methods may do some more validation than the is* methods; they return false if they
+// encounter invalid geometry and true if the geometry is parsed successfully.
+class GeoParser {
+public:
+ // Geospatial specifier after $geoWithin / $geoIntersects predicates.
+ // i.e. "$box" in { $box: [[1, 2], [3, 4]] }
+ enum GeoSpecifier {
+ UNKNOWN = 0,
+ BOX, // $box
+ CENTER, // $center
+ POLYGON, // $polygon
+ CENTER_SPHERE, // $centerSphere
+ GEOMETRY // GeoJSON geometry, $geometry
+ };
- // GeoJSON type defined in GeoJSON document.
- // i.e. "Point" in { type: "Point", coordinates: [1, 2] }
- enum GeoJSONType {
- GEOJSON_UNKNOWN = 0,
- GEOJSON_POINT,
- GEOJSON_LINESTRING,
- GEOJSON_POLYGON,
- GEOJSON_MULTI_POINT,
- GEOJSON_MULTI_LINESTRING,
- GEOJSON_MULTI_POLYGON,
- GEOJSON_GEOMETRY_COLLECTION
- };
+ // GeoJSON type defined in GeoJSON document.
+ // i.e. "Point" in { type: "Point", coordinates: [1, 2] }
+ enum GeoJSONType {
+ GEOJSON_UNKNOWN = 0,
+ GEOJSON_POINT,
+ GEOJSON_LINESTRING,
+ GEOJSON_POLYGON,
+ GEOJSON_MULTI_POINT,
+ GEOJSON_MULTI_LINESTRING,
+ GEOJSON_MULTI_POLYGON,
+ GEOJSON_GEOMETRY_COLLECTION
+ };
- static GeoSpecifier parseGeoSpecifier(const BSONElement& elem);
- static GeoJSONType parseGeoJSONType(const BSONObj& obj);
+ static GeoSpecifier parseGeoSpecifier(const BSONElement& elem);
+ static GeoJSONType parseGeoJSONType(const BSONObj& obj);
- // Legacy points can contain extra data as extra fields - these are valid to index
- // e.g. { x: 1, y: 1, z: 1 }
- static Status parseLegacyPoint(const BSONElement &elem, PointWithCRS *out, bool allowAddlFields = false);
- // Parse the BSON object after $box, $center, etc.
- static Status parseLegacyBox(const BSONObj& obj, BoxWithCRS *out);
- static Status parseLegacyCenter(const BSONObj& obj, CapWithCRS *out);
- static Status parseLegacyPolygon(const BSONObj& obj, PolygonWithCRS *out);
- static Status parseCenterSphere(const BSONObj& obj, CapWithCRS *out);
- static Status parseGeoJSONPolygon(const BSONObj &obj, PolygonWithCRS *out);
- static Status parseGeoJSONPoint(const BSONObj &obj, PointWithCRS *out);
- static Status parseGeoJSONLine(const BSONObj& obj, LineWithCRS* out);
- static Status parseMultiPoint(const BSONObj &obj, MultiPointWithCRS *out);
- static Status parseMultiLine(const BSONObj &obj, MultiLineWithCRS *out);
- static Status parseMultiPolygon(const BSONObj &obj, MultiPolygonWithCRS *out);
- static Status parseGeometryCollection(const BSONObj &obj, GeometryCollection *out);
+ // Legacy points can contain extra data as extra fields - these are valid to index
+ // e.g. { x: 1, y: 1, z: 1 }
+ static Status parseLegacyPoint(const BSONElement& elem,
+ PointWithCRS* out,
+ bool allowAddlFields = false);
+ // Parse the BSON object after $box, $center, etc.
+ static Status parseLegacyBox(const BSONObj& obj, BoxWithCRS* out);
+ static Status parseLegacyCenter(const BSONObj& obj, CapWithCRS* out);
+ static Status parseLegacyPolygon(const BSONObj& obj, PolygonWithCRS* out);
+ static Status parseCenterSphere(const BSONObj& obj, CapWithCRS* out);
+ static Status parseGeoJSONPolygon(const BSONObj& obj, PolygonWithCRS* out);
+ static Status parseGeoJSONPoint(const BSONObj& obj, PointWithCRS* out);
+ static Status parseGeoJSONLine(const BSONObj& obj, LineWithCRS* out);
+ static Status parseMultiPoint(const BSONObj& obj, MultiPointWithCRS* out);
+ static Status parseMultiLine(const BSONObj& obj, MultiLineWithCRS* out);
+ static Status parseMultiPolygon(const BSONObj& obj, MultiPolygonWithCRS* out);
+ static Status parseGeometryCollection(const BSONObj& obj, GeometryCollection* out);
- // For geo near
- static Status parseQueryPoint(const BSONElement &elem, PointWithCRS *out);
- static Status parseStoredPoint(const BSONElement &elem, PointWithCRS *out);
- static bool parsePointWithMaxDistance(const BSONObj& obj, PointWithCRS* out, double* maxOut);
- };
+ // For geo near
+ static Status parseQueryPoint(const BSONElement& elem, PointWithCRS* out);
+ static Status parseStoredPoint(const BSONElement& elem, PointWithCRS* out);
+ static bool parsePointWithMaxDistance(const BSONObj& obj, PointWithCRS* out, double* maxOut);
+};
} // namespace mongo
diff --git a/src/mongo/db/geo/geoparser_test.cpp b/src/mongo/db/geo/geoparser_test.cpp
index 2387696b7be..201416e076b 100644
--- a/src/mongo/db/geo/geoparser_test.cpp
+++ b/src/mongo/db/geo/geoparser_test.cpp
@@ -47,361 +47,379 @@ using namespace mongo;
namespace {
- TEST(GeoParser, parseGeoSpecifier) {
- ASSERT_EQUALS(GeoParser::parseGeoSpecifier(
- fromjson("{$box : [[1, 2], [3, 4]]}").firstElement()),
- GeoParser::BOX);
- ASSERT_EQUALS(GeoParser::parseGeoSpecifier(
- fromjson("{$center : [[0, 0], 4]}").firstElement()),
- GeoParser::CENTER);
- ASSERT_EQUALS(GeoParser::parseGeoSpecifier(
- fromjson("{$centerSphere : [[0, 0], 1]}").firstElement()),
- GeoParser::CENTER_SPHERE);
- ASSERT_EQUALS(GeoParser::parseGeoSpecifier(
- fromjson("{$geometry : {'type':'Point', 'coordinates': [40, 5]}}").firstElement()),
- GeoParser::GEOMETRY);
- }
+TEST(GeoParser, parseGeoSpecifier) {
+ ASSERT_EQUALS(
+ GeoParser::parseGeoSpecifier(fromjson("{$box : [[1, 2], [3, 4]]}").firstElement()),
+ GeoParser::BOX);
+ ASSERT_EQUALS(GeoParser::parseGeoSpecifier(fromjson("{$center : [[0, 0], 4]}").firstElement()),
+ GeoParser::CENTER);
+ ASSERT_EQUALS(
+ GeoParser::parseGeoSpecifier(fromjson("{$centerSphere : [[0, 0], 1]}").firstElement()),
+ GeoParser::CENTER_SPHERE);
+ ASSERT_EQUALS(
+ GeoParser::parseGeoSpecifier(
+ fromjson("{$geometry : {'type':'Point', 'coordinates': [40, 5]}}").firstElement()),
+ GeoParser::GEOMETRY);
+}
- TEST(GeoParser, parseGeoJSONPoint) {
- PointWithCRS point;
-
- ASSERT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': [40, 5]}"), &point));
- ASSERT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': [-40.3, -5.0]}"), &point));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordhats': [40, -5]}"), &point));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': 40}"), &point));
- ASSERT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': [40, -5, 7]}"), &point));
-
- // Make sure lat is in range
- ASSERT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': [0, 90.0]}"), &point));
- ASSERT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': [0, -90.0]}"), &point));
- ASSERT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': [180, 90.0]}"), &point));
- ASSERT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': [-180, -90.0]}"), &point));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': [180.01, 90.0]}"), &point));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': [-180.01, -90.0]}"), &point));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': [0, 90.1]}"), &point));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': [0, -90.1]}"), &point));
- }
+TEST(GeoParser, parseGeoJSONPoint) {
+ PointWithCRS point;
+
+ ASSERT_OK(
+ GeoParser::parseGeoJSONPoint(fromjson("{'type':'Point', 'coordinates': [40, 5]}"), &point));
+ ASSERT_OK(GeoParser::parseGeoJSONPoint(
+ fromjson("{'type':'Point', 'coordinates': [-40.3, -5.0]}"), &point));
+ ASSERT_NOT_OK(
+ GeoParser::parseGeoJSONPoint(fromjson("{'type':'Point', 'coordhats': [40, -5]}"), &point));
+ ASSERT_NOT_OK(
+ GeoParser::parseGeoJSONPoint(fromjson("{'type':'Point', 'coordinates': 40}"), &point));
+ ASSERT_OK(GeoParser::parseGeoJSONPoint(fromjson("{'type':'Point', 'coordinates': [40, -5, 7]}"),
+ &point));
+
+ // Make sure lat is in range
+ ASSERT_OK(GeoParser::parseGeoJSONPoint(fromjson("{'type':'Point', 'coordinates': [0, 90.0]}"),
+ &point));
+ ASSERT_OK(GeoParser::parseGeoJSONPoint(fromjson("{'type':'Point', 'coordinates': [0, -90.0]}"),
+ &point));
+ ASSERT_OK(GeoParser::parseGeoJSONPoint(fromjson("{'type':'Point', 'coordinates': [180, 90.0]}"),
+ &point));
+ ASSERT_OK(GeoParser::parseGeoJSONPoint(
+ fromjson("{'type':'Point', 'coordinates': [-180, -90.0]}"), &point));
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(
+ fromjson("{'type':'Point', 'coordinates': [180.01, 90.0]}"), &point));
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(
+ fromjson("{'type':'Point', 'coordinates': [-180.01, -90.0]}"), &point));
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(
+ fromjson("{'type':'Point', 'coordinates': [0, 90.1]}"), &point));
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(
+ fromjson("{'type':'Point', 'coordinates': [0, -90.1]}"), &point));
+}
- TEST(GeoParser, parseGeoJSONLine) {
- LineWithCRS polyline;
-
- ASSERT_OK(GeoParser::parseGeoJSONLine(
- fromjson("{'type':'LineString', 'coordinates':[[1,2], [3,4]]}"), &polyline));
- ASSERT_OK(GeoParser::parseGeoJSONLine(
- fromjson("{'type':'LineString', 'coordinates':[[0,-90], [0,90]]}"), &polyline));
- ASSERT_OK(GeoParser::parseGeoJSONLine(
- fromjson("{'type':'LineString', 'coordinates':[[180,-90], [-180,90]]}"), &polyline));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(
- fromjson("{'type':'LineString', 'coordinates':[[180.1,-90], [-180.1,90]]}"),
- &polyline));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(
- fromjson("{'type':'LineString', 'coordinates':[[0,-91], [0,90]]}"), &polyline));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(
- fromjson("{'type':'LineString', 'coordinates':[[0,-90], [0,91]]}"), &polyline));
- ASSERT_OK(GeoParser::parseGeoJSONLine(
- fromjson("{'type':'LineString', 'coordinates':[[1,2], [3,4], [5,6]]}"), &polyline));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(
- fromjson("{'type':'LineString', 'coordinates':[[1,2]]}"), &polyline));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(
- fromjson("{'type':'LineString', 'coordinates':[['chicken','little']]}"), &polyline));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(
- fromjson("{'type':'LineString', 'coordinates':[1,2, 3, 4]}"), &polyline));
- ASSERT_OK(GeoParser::parseGeoJSONLine(
- fromjson("{'type':'LineString', 'coordinates':[[1,2, 3], [3,4, 5], [5,6]]}"),
- &polyline));
- }
+TEST(GeoParser, parseGeoJSONLine) {
+ LineWithCRS polyline;
+
+ ASSERT_OK(GeoParser::parseGeoJSONLine(
+ fromjson("{'type':'LineString', 'coordinates':[[1,2], [3,4]]}"), &polyline));
+ ASSERT_OK(GeoParser::parseGeoJSONLine(
+ fromjson("{'type':'LineString', 'coordinates':[[0,-90], [0,90]]}"), &polyline));
+ ASSERT_OK(GeoParser::parseGeoJSONLine(
+ fromjson("{'type':'LineString', 'coordinates':[[180,-90], [-180,90]]}"), &polyline));
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(
+ fromjson("{'type':'LineString', 'coordinates':[[180.1,-90], [-180.1,90]]}"), &polyline));
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(
+ fromjson("{'type':'LineString', 'coordinates':[[0,-91], [0,90]]}"), &polyline));
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(
+ fromjson("{'type':'LineString', 'coordinates':[[0,-90], [0,91]]}"), &polyline));
+ ASSERT_OK(GeoParser::parseGeoJSONLine(
+ fromjson("{'type':'LineString', 'coordinates':[[1,2], [3,4], [5,6]]}"), &polyline));
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(
+ fromjson("{'type':'LineString', 'coordinates':[[1,2]]}"), &polyline));
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(
+ fromjson("{'type':'LineString', 'coordinates':[['chicken','little']]}"), &polyline));
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(
+ fromjson("{'type':'LineString', 'coordinates':[1,2, 3, 4]}"), &polyline));
+ ASSERT_OK(GeoParser::parseGeoJSONLine(
+ fromjson("{'type':'LineString', 'coordinates':[[1,2, 3], [3,4, 5], [5,6]]}"), &polyline));
+}
- TEST(GeoParser, parseGeoJSONPolygon) {
- PolygonWithCRS polygon;
-
- ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]] ]}"),
- &polygon));
- // No out of bounds points
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,91],[0,5],[0,0]] ]}"),
- &polygon));
- ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[180,0],[5,5],[0,5],[0,0]] ]}"),
- &polygon));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[181,0],[5,5],[0,5],[0,0]] ]}"),
- &polygon));
- // And one with a hole.
- ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]],"
- " [[1,1],[4,1],[4,4],[1,4],[1,1]] ]}"), &polygon));
- // Latitudes must be OK
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,91],[0,91],[0,0]],"
- " [[1,1],[4,1],[4,4],[1,4],[1,1]] ]}"), &polygon));
- // First point must be the same as the last.
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[ [[1,2],[3,4],[5,6]] ]}"), &polygon));
- // Extra elements are allowed
- ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[ [[0,0,0,0],[5,0,0],[5,5,1],"
- " [0,5],[0,0]] ]}"), &polygon));
-
- // Test functionality of polygon
- PointWithCRS point;
- ASSERT_OK(GeoParser::parseGeoJSONPoint(
- fromjson("{'type':'Point', 'coordinates': [2, 2]}"), &point));
-
- PolygonWithCRS polygonA;
- ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]] ]}"),
- &polygonA));
- ASSERT_TRUE(polygonA.s2Polygon->Contains(point.point));
-
- PolygonWithCRS polygonB;
- ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]],"
- " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]}"),
- &polygonB));
- // We removed this in the hole.
- ASSERT_FALSE(polygonB.s2Polygon->Contains(point.point));
-
- // Now we reverse the orientations and verify that the code fixes it up
- // (outer loop must be CCW, inner CW).
- PolygonWithCRS polygonC;
- ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[0,5],[5,5],[5,0],[0,0]] ]}"),
- &polygonC));
- ASSERT_TRUE(polygonC.s2Polygon->Contains(point.point));
-
- PolygonWithCRS polygonD;
- ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[0,5],[5,5],[5,0],[0,0]],"
- " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]}"),
- &polygonD));
- // Also removed in the loop.
- ASSERT_FALSE(polygonD.s2Polygon->Contains(point.point));
-
- //
- // Bad polygon examples
- //
-
- // Polygon with not enough points, because some are duplicated
- PolygonWithCRS polygonBad;
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson("{'type':'Polygon', 'coordinates':[[ [0,0], [0,0], [5,5], [5,5], [0,0] ]]}"),
- &polygonBad));
- }
+TEST(GeoParser, parseGeoJSONPolygon) {
+ PolygonWithCRS polygon;
+
+ ASSERT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]] ]}"),
+ &polygon));
+ // No out of bounds points
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,91],[0,5],[0,0]] ]}"),
+ &polygon));
+ ASSERT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[180,0],[5,5],[0,5],[0,0]] ]}"),
+ &polygon));
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[181,0],[5,5],[0,5],[0,0]] ]}"),
+ &polygon));
+ // And one with a hole.
+ ASSERT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson(
+ "{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]],"
+ " [[1,1],[4,1],[4,4],[1,4],[1,1]] ]}"),
+ &polygon));
+ // Latitudes must be OK
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson(
+ "{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,91],[0,91],[0,0]],"
+ " [[1,1],[4,1],[4,4],[1,4],[1,1]] ]}"),
+ &polygon));
+ // First point must be the same as the last.
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson("{'type':'Polygon', 'coordinates':[ [[1,2],[3,4],[5,6]] ]}"), &polygon));
+ // Extra elements are allowed
+ ASSERT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson(
+ "{'type':'Polygon', 'coordinates':[ [[0,0,0,0],[5,0,0],[5,5,1],"
+ " [0,5],[0,0]] ]}"),
+ &polygon));
+
+ // Test functionality of polygon
+ PointWithCRS point;
+ ASSERT_OK(
+ GeoParser::parseGeoJSONPoint(fromjson("{'type':'Point', 'coordinates': [2, 2]}"), &point));
+
+ PolygonWithCRS polygonA;
+ ASSERT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]] ]}"),
+ &polygonA));
+ ASSERT_TRUE(polygonA.s2Polygon->Contains(point.point));
+
+ PolygonWithCRS polygonB;
+ ASSERT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson(
+ "{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]],"
+ " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]}"),
+ &polygonB));
+ // We removed this in the hole.
+ ASSERT_FALSE(polygonB.s2Polygon->Contains(point.point));
+
+ // Now we reverse the orientations and verify that the code fixes it up
+ // (outer loop must be CCW, inner CW).
+ PolygonWithCRS polygonC;
+ ASSERT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[0,5],[5,5],[5,0],[0,0]] ]}"),
+ &polygonC));
+ ASSERT_TRUE(polygonC.s2Polygon->Contains(point.point));
+
+ PolygonWithCRS polygonD;
+ ASSERT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson(
+ "{'type':'Polygon', 'coordinates':[ [[0,0],[0,5],[5,5],[5,0],[0,0]],"
+ " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]}"),
+ &polygonD));
+ // Also removed in the loop.
+ ASSERT_FALSE(polygonD.s2Polygon->Contains(point.point));
+
+ //
+ // Bad polygon examples
+ //
+
+ // Polygon with not enough points, because some are duplicated
+ PolygonWithCRS polygonBad;
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(
+ fromjson("{'type':'Polygon', 'coordinates':[[ [0,0], [0,0], [5,5], [5,5], [0,0] ]]}"),
+ &polygonBad));
+}
- TEST(GeoParser, parseGeoJSONCRS) {
- string goodCRS1 = "crs:{ type: 'name', properties:{name:'EPSG:4326'}}";
- string goodCRS2 = "crs:{ type: 'name', properties:{name:'urn:ogc:def:crs:OGC:1.3:CRS84'}}";
- string badCRS1 = "crs:{ type: 'name', properties:{name:'EPSG:2000'}}";
- string badCRS2 = "crs:{ type: 'name', properties:{name:'urn:ogc:def:crs:OGC:1.3:CRS83'}}";
-
- BSONObj point1 = fromjson("{'type':'Point', 'coordinates': [40, 5], " + goodCRS1 + "}");
- BSONObj point2 = fromjson("{'type':'Point', 'coordinates': [40, 5], " + goodCRS2 + "}");
- PointWithCRS point;
- ASSERT_OK(GeoParser::parseGeoJSONPoint(point1, &point));
- ASSERT_OK(GeoParser::parseGeoJSONPoint(point2, &point));
- BSONObj point3 = fromjson("{'type':'Point', 'coordinates': [40, 5], " + badCRS1 + "}");
- BSONObj point4 = fromjson("{'type':'Point', 'coordinates': [40, 5], " + badCRS2 + "}");
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(point3, &point));
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(point4, &point));
-
- PolygonWithCRS polygon;
- BSONObj polygon1 = fromjson("{'type':'Polygon', 'coordinates':[ "
- "[[0,0],[5,0],[5,5],[0,5],[0,0]],"
- " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]," + goodCRS1 + "}");
- ASSERT_OK(GeoParser::parseGeoJSONPolygon(polygon1, &polygon));
- BSONObj polygon2 = fromjson("{'type':'Polygon', 'coordinates':[ "
- "[[0,0],[5,0],[5,5],[0,5],[0,0]],"
- " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]," + badCRS2 + "}");
- ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(polygon2, &polygon));
-
- LineWithCRS line;
- BSONObj line1 = fromjson("{'type':'LineString', 'coordinates':[[1,2], [3,4], [5,6]]," +
- goodCRS2 + "}");
- ASSERT_OK(GeoParser::parseGeoJSONLine(line1, &line));
- BSONObj line2 = fromjson("{'type':'LineString', 'coordinates':[[1,2], [3,4], [5,6]]," +
- badCRS1 + "}");
- ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(line2, &line));
- }
+TEST(GeoParser, parseGeoJSONCRS) {
+ string goodCRS1 = "crs:{ type: 'name', properties:{name:'EPSG:4326'}}";
+ string goodCRS2 = "crs:{ type: 'name', properties:{name:'urn:ogc:def:crs:OGC:1.3:CRS84'}}";
+ string badCRS1 = "crs:{ type: 'name', properties:{name:'EPSG:2000'}}";
+ string badCRS2 = "crs:{ type: 'name', properties:{name:'urn:ogc:def:crs:OGC:1.3:CRS83'}}";
+
+ BSONObj point1 = fromjson("{'type':'Point', 'coordinates': [40, 5], " + goodCRS1 + "}");
+ BSONObj point2 = fromjson("{'type':'Point', 'coordinates': [40, 5], " + goodCRS2 + "}");
+ PointWithCRS point;
+ ASSERT_OK(GeoParser::parseGeoJSONPoint(point1, &point));
+ ASSERT_OK(GeoParser::parseGeoJSONPoint(point2, &point));
+ BSONObj point3 = fromjson("{'type':'Point', 'coordinates': [40, 5], " + badCRS1 + "}");
+ BSONObj point4 = fromjson("{'type':'Point', 'coordinates': [40, 5], " + badCRS2 + "}");
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(point3, &point));
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONPoint(point4, &point));
+
+ PolygonWithCRS polygon;
+ BSONObj polygon1 = fromjson(
+ "{'type':'Polygon', 'coordinates':[ "
+ "[[0,0],[5,0],[5,5],[0,5],[0,0]],"
+ " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]," +
+ goodCRS1 + "}");
+ ASSERT_OK(GeoParser::parseGeoJSONPolygon(polygon1, &polygon));
+ BSONObj polygon2 = fromjson(
+ "{'type':'Polygon', 'coordinates':[ "
+ "[[0,0],[5,0],[5,5],[0,5],[0,0]],"
+ " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]," +
+ badCRS2 + "}");
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(polygon2, &polygon));
+
+ LineWithCRS line;
+ BSONObj line1 =
+ fromjson("{'type':'LineString', 'coordinates':[[1,2], [3,4], [5,6]]," + goodCRS2 + "}");
+ ASSERT_OK(GeoParser::parseGeoJSONLine(line1, &line));
+ BSONObj line2 =
+ fromjson("{'type':'LineString', 'coordinates':[[1,2], [3,4], [5,6]]," + badCRS1 + "}");
+ ASSERT_NOT_OK(GeoParser::parseGeoJSONLine(line2, &line));
+}
- TEST(GeoParser, parseLegacyPoint) {
- PointWithCRS point;
- ASSERT_OK(GeoParser::parseLegacyPoint(BSON_ELT(BSON_ARRAY(0 << 1)), &point));
- ASSERT_NOT_OK(GeoParser::parseLegacyPoint(BSON_ELT(BSON_ARRAY(0)), &point));
- ASSERT_NOT_OK(GeoParser::parseLegacyPoint(BSON_ELT(BSON_ARRAY(0 << 1 << 2)), &point));
- ASSERT_OK(GeoParser::parseLegacyPoint(BSON_ELT(fromjson("{x: 50, y:40}")), &point));
- ASSERT_NOT_OK(GeoParser::parseLegacyPoint(BSON_ELT(fromjson("{x: '50', y:40}")), &point));
- ASSERT_NOT_OK(GeoParser::parseLegacyPoint(BSON_ELT(fromjson("{x: 5, y:40, z:50}")), &point));
- ASSERT_NOT_OK(GeoParser::parseLegacyPoint(BSON_ELT(fromjson("{x: 5}")), &point));
- }
+TEST(GeoParser, parseLegacyPoint) {
+ PointWithCRS point;
+ ASSERT_OK(GeoParser::parseLegacyPoint(BSON_ELT(BSON_ARRAY(0 << 1)), &point));
+ ASSERT_NOT_OK(GeoParser::parseLegacyPoint(BSON_ELT(BSON_ARRAY(0)), &point));
+ ASSERT_NOT_OK(GeoParser::parseLegacyPoint(BSON_ELT(BSON_ARRAY(0 << 1 << 2)), &point));
+ ASSERT_OK(GeoParser::parseLegacyPoint(BSON_ELT(fromjson("{x: 50, y:40}")), &point));
+ ASSERT_NOT_OK(GeoParser::parseLegacyPoint(BSON_ELT(fromjson("{x: '50', y:40}")), &point));
+ ASSERT_NOT_OK(GeoParser::parseLegacyPoint(BSON_ELT(fromjson("{x: 5, y:40, z:50}")), &point));
+ ASSERT_NOT_OK(GeoParser::parseLegacyPoint(BSON_ELT(fromjson("{x: 5}")), &point));
+}
- TEST(GeoParser, parseLegacyPolygon) {
- PolygonWithCRS polygon;
-
- // Parse the object after field name "$polygon"
- ASSERT_OK(GeoParser::parseLegacyPolygon(
- fromjson("[[10,20],[10,40],[30,40],[30,20]]"), &polygon));
- ASSERT(polygon.crs == FLAT);
-
- ASSERT_OK(GeoParser::parseLegacyPolygon(
- fromjson("[[10,20], [10,40], [30,40]]"), &polygon));
- ASSERT(polygon.crs == FLAT);
-
- ASSERT_NOT_OK(GeoParser::parseLegacyPolygon(
- fromjson("[[10,20],[10,40]]"), &polygon));
- ASSERT_NOT_OK(GeoParser::parseLegacyPolygon(
- fromjson("[['10',20],[10,40],[30,40],[30,20]]"), &polygon));
- ASSERT_NOT_OK(GeoParser::parseLegacyPolygon(
- fromjson("[[10,20,30],[10,40],[30,40],[30,20]]"), &polygon));
- ASSERT_OK(GeoParser::parseLegacyPolygon(
- fromjson("{a:{x:40,y:5},b:{x:40,y:6},c:{x:41,y:6},d:{x:41,y:5}}"), &polygon));
- }
+TEST(GeoParser, parseLegacyPolygon) {
+ PolygonWithCRS polygon;
- TEST(GeoParser, parseMultiPoint) {
- mongo::MultiPointWithCRS mp;
-
- ASSERT_OK(GeoParser::parseMultiPoint(
- fromjson("{'type':'MultiPoint','coordinates':[[1,2],[3,4]]}"), &mp));
- ASSERT_EQUALS(mp.points.size(), (size_t)2);
-
- ASSERT_OK(GeoParser::parseMultiPoint(
- fromjson("{'type':'MultiPoint','coordinates':[[3,4]]}"), &mp));
- ASSERT_EQUALS(mp.points.size(), (size_t)1);
-
- ASSERT_OK(GeoParser::parseMultiPoint(
- fromjson("{'type':'MultiPoint','coordinates':[[1,2],[3,4],[5,6],[7,8]]}"), &mp));
- ASSERT_EQUALS(mp.points.size(), (size_t)4);
-
- ASSERT_NOT_OK(GeoParser::parseMultiPoint(
- fromjson("{'type':'MultiPoint','coordinates':[]}"), &mp));
- ASSERT_NOT_OK(GeoParser::parseMultiPoint(
- fromjson("{'type':'MultiPoint','coordinates':[[181,2],[3,4]]}"), &mp));
- ASSERT_NOT_OK(GeoParser::parseMultiPoint(
- fromjson("{'type':'MultiPoint','coordinates':[[1,-91],[3,4]]}"), &mp));
- ASSERT_NOT_OK(GeoParser::parseMultiPoint(
- fromjson("{'type':'MultiPoint','coordinates':[[181,2],[3,'chicken']]}"), &mp));
- }
+ // Parse the object after field name "$polygon"
+ ASSERT_OK(
+ GeoParser::parseLegacyPolygon(fromjson("[[10,20],[10,40],[30,40],[30,20]]"), &polygon));
+ ASSERT(polygon.crs == FLAT);
- TEST(GeoParser, parseMultiLine) {
- mongo::MultiLineWithCRS ml;
-
- ASSERT_OK(GeoParser::parseMultiLine(
- fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2],[3,3]],"
- "[[4,5],[6,7]]]}"), &ml));
- ASSERT_EQUALS(ml.lines.size(), (size_t)2);
-
- ASSERT_OK(GeoParser::parseMultiLine(
- fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]],"
- "[[4,5],[6,7]]]}"), &ml));
- ASSERT_EQUALS(ml.lines.size(), (size_t)2);
-
- ASSERT_OK(GeoParser::parseMultiLine(
- fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]]]}"), &ml));
- ASSERT_EQUALS(ml.lines.size(), (size_t)1);
-
- ASSERT_OK(GeoParser::parseMultiLine(
- fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]],"
- "[[2,2],[1,1]]]}"), &ml));
- ASSERT_EQUALS(ml.lines.size(), (size_t)2);
-
- ASSERT_NOT_OK(GeoParser::parseMultiLine(
- fromjson("{'type':'MultiLineString','coordinates':[ [[1,1]]]}"), &ml));
- ASSERT_NOT_OK(GeoParser::parseMultiLine(
- fromjson("{'type':'MultiLineString','coordinates':[ [[1,1]],[[1,2],[3,4]]]}"), &ml));
- ASSERT_NOT_OK(GeoParser::parseMultiLine(
- fromjson("{'type':'MultiLineString','coordinates':[ [[181,1],[2,2]]]}"), &ml));
- ASSERT_NOT_OK(GeoParser::parseMultiLine(
- fromjson("{'type':'MultiLineString','coordinates':[ [[181,1],[2,-91]]]}"), &ml));
- }
+ ASSERT_OK(GeoParser::parseLegacyPolygon(fromjson("[[10,20], [10,40], [30,40]]"), &polygon));
+ ASSERT(polygon.crs == FLAT);
- TEST(GeoParser, parseMultiPolygon) {
- mongo::MultiPolygonWithCRS mp;
-
- ASSERT_OK(GeoParser::parseMultiPolygon(
- fromjson("{'type':'MultiPolygon','coordinates':["
- "[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],"
- "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
- "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
- "]}"), &mp));
- ASSERT_EQUALS(mp.polygons.size(), (size_t)2);
-
- ASSERT_OK(GeoParser::parseMultiPolygon(
- fromjson("{'type':'MultiPolygon','coordinates':["
- "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
- "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
- "]}"), &mp));
- ASSERT_EQUALS(mp.polygons.size(), (size_t)1);
- }
+ ASSERT_NOT_OK(GeoParser::parseLegacyPolygon(fromjson("[[10,20],[10,40]]"), &polygon));
+ ASSERT_NOT_OK(
+ GeoParser::parseLegacyPolygon(fromjson("[['10',20],[10,40],[30,40],[30,20]]"), &polygon));
+ ASSERT_NOT_OK(
+ GeoParser::parseLegacyPolygon(fromjson("[[10,20,30],[10,40],[30,40],[30,20]]"), &polygon));
+ ASSERT_OK(GeoParser::parseLegacyPolygon(
+ fromjson("{a:{x:40,y:5},b:{x:40,y:6},c:{x:41,y:6},d:{x:41,y:5}}"), &polygon));
+}
- TEST(GeoParser, parseGeometryCollection) {
- {
- mongo::GeometryCollection gc;
- BSONObj obj = fromjson(
+TEST(GeoParser, parseMultiPoint) {
+ mongo::MultiPointWithCRS mp;
+
+ ASSERT_OK(GeoParser::parseMultiPoint(
+ fromjson("{'type':'MultiPoint','coordinates':[[1,2],[3,4]]}"), &mp));
+ ASSERT_EQUALS(mp.points.size(), (size_t)2);
+
+ ASSERT_OK(
+ GeoParser::parseMultiPoint(fromjson("{'type':'MultiPoint','coordinates':[[3,4]]}"), &mp));
+ ASSERT_EQUALS(mp.points.size(), (size_t)1);
+
+ ASSERT_OK(GeoParser::parseMultiPoint(
+ fromjson("{'type':'MultiPoint','coordinates':[[1,2],[3,4],[5,6],[7,8]]}"), &mp));
+ ASSERT_EQUALS(mp.points.size(), (size_t)4);
+
+ ASSERT_NOT_OK(
+ GeoParser::parseMultiPoint(fromjson("{'type':'MultiPoint','coordinates':[]}"), &mp));
+ ASSERT_NOT_OK(GeoParser::parseMultiPoint(
+ fromjson("{'type':'MultiPoint','coordinates':[[181,2],[3,4]]}"), &mp));
+ ASSERT_NOT_OK(GeoParser::parseMultiPoint(
+ fromjson("{'type':'MultiPoint','coordinates':[[1,-91],[3,4]]}"), &mp));
+ ASSERT_NOT_OK(GeoParser::parseMultiPoint(
+ fromjson("{'type':'MultiPoint','coordinates':[[181,2],[3,'chicken']]}"), &mp));
+}
+
+TEST(GeoParser, parseMultiLine) {
+ mongo::MultiLineWithCRS ml;
+
+ ASSERT_OK(GeoParser::parseMultiLine(
+ fromjson(
+ "{'type':'MultiLineString','coordinates':[ [[1,1],[2,2],[3,3]],"
+ "[[4,5],[6,7]]]}"),
+ &ml));
+ ASSERT_EQUALS(ml.lines.size(), (size_t)2);
+
+ ASSERT_OK(
+ GeoParser::parseMultiLine(fromjson(
+ "{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]],"
+ "[[4,5],[6,7]]]}"),
+ &ml));
+ ASSERT_EQUALS(ml.lines.size(), (size_t)2);
+
+ ASSERT_OK(GeoParser::parseMultiLine(
+ fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]]]}"), &ml));
+ ASSERT_EQUALS(ml.lines.size(), (size_t)1);
+
+ ASSERT_OK(
+ GeoParser::parseMultiLine(fromjson(
+ "{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]],"
+ "[[2,2],[1,1]]]}"),
+ &ml));
+ ASSERT_EQUALS(ml.lines.size(), (size_t)2);
+
+ ASSERT_NOT_OK(GeoParser::parseMultiLine(
+ fromjson("{'type':'MultiLineString','coordinates':[ [[1,1]]]}"), &ml));
+ ASSERT_NOT_OK(GeoParser::parseMultiLine(
+ fromjson("{'type':'MultiLineString','coordinates':[ [[1,1]],[[1,2],[3,4]]]}"), &ml));
+ ASSERT_NOT_OK(GeoParser::parseMultiLine(
+ fromjson("{'type':'MultiLineString','coordinates':[ [[181,1],[2,2]]]}"), &ml));
+ ASSERT_NOT_OK(GeoParser::parseMultiLine(
+ fromjson("{'type':'MultiLineString','coordinates':[ [[181,1],[2,-91]]]}"), &ml));
+}
+
+TEST(GeoParser, parseMultiPolygon) {
+ mongo::MultiPolygonWithCRS mp;
+
+ ASSERT_OK(GeoParser::parseMultiPolygon(
+ fromjson(
+ "{'type':'MultiPolygon','coordinates':["
+ "[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],"
+ "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
+ "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
+ "]}"),
+ &mp));
+ ASSERT_EQUALS(mp.polygons.size(), (size_t)2);
+
+ ASSERT_OK(GeoParser::parseMultiPolygon(
+ fromjson(
+ "{'type':'MultiPolygon','coordinates':["
+ "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
+ "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
+ "]}"),
+ &mp));
+ ASSERT_EQUALS(mp.polygons.size(), (size_t)1);
+}
+
+TEST(GeoParser, parseGeometryCollection) {
+ {
+ mongo::GeometryCollection gc;
+ BSONObj obj = fromjson(
"{ 'type': 'GeometryCollection', 'geometries': ["
- "{ 'type': 'Point','coordinates': [100.0,0.0]},"
- "{ 'type': 'LineString', 'coordinates': [ [101.0, 0.0], [102.0, 1.0] ]}"
- "]}");
- ASSERT_OK(GeoParser::parseGeometryCollection(obj, &gc));
- ASSERT_FALSE(gc.supportsContains());
- }
-
- {
- BSONObj obj = fromjson(
+ "{ 'type': 'Point','coordinates': [100.0,0.0]},"
+ "{ 'type': 'LineString', 'coordinates': [ [101.0, 0.0], [102.0, 1.0] ]}"
+ "]}");
+ ASSERT_OK(GeoParser::parseGeometryCollection(obj, &gc));
+ ASSERT_FALSE(gc.supportsContains());
+ }
+
+ {
+ BSONObj obj = fromjson(
"{ 'type': 'GeometryCollection', 'geometries': ["
"{'type':'MultiPolygon','coordinates':["
- "[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],"
- "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
- "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
- "]}"
- "]}");
-
- mongo::GeometryCollection gc;
- ASSERT_OK(GeoParser::parseGeometryCollection(obj, &gc));
- ASSERT_TRUE(gc.supportsContains());
- }
-
- {
- BSONObj obj = fromjson(
+ "[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],"
+ "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
+ "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
+ "]}"
+ "]}");
+
+ mongo::GeometryCollection gc;
+ ASSERT_OK(GeoParser::parseGeometryCollection(obj, &gc));
+ ASSERT_TRUE(gc.supportsContains());
+ }
+
+ {
+ BSONObj obj = fromjson(
"{ 'type': 'GeometryCollection', 'geometries': ["
"{'type':'Polygon', 'coordinates':[ [[0,0],[0,91],[5,5],[5,0],[0,0]] ]},"
"{'type':'MultiPolygon','coordinates':["
- "[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],"
- "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
- "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
- "]}"
- "]}");
- mongo::GeometryCollection gc;
- ASSERT_NOT_OK(GeoParser::parseGeometryCollection(obj, &gc));
- }
-
- {
- BSONObj obj = fromjson(
+ "[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],"
+ "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
+ "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
+ "]}"
+ "]}");
+ mongo::GeometryCollection gc;
+ ASSERT_NOT_OK(GeoParser::parseGeometryCollection(obj, &gc));
+ }
+
+ {
+ BSONObj obj = fromjson(
"{ 'type': 'GeometryCollection', 'geometries': ["
"{'type':'Polygon', 'coordinates':[ [[0,0],[0,5],[5,5],[5,0],[0,0]] ]},"
"{'type':'MultiPolygon','coordinates':["
- "[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],"
- "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
- "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
- "]}"
- "]}");
-
- mongo::GeometryCollection gc;
- ASSERT_OK(GeoParser::parseGeometryCollection(obj, &gc));
- ASSERT_TRUE(gc.supportsContains());
- }
+ "[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],"
+ "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
+ "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
+ "]}"
+ "]}");
+
+ mongo::GeometryCollection gc;
+ ASSERT_OK(GeoParser::parseGeometryCollection(obj, &gc));
+ ASSERT_TRUE(gc.supportsContains());
}
}
+}
diff --git a/src/mongo/db/geo/hash.cpp b/src/mongo/db/geo/hash.cpp
index 318a1cfdcab..60d435812c1 100644
--- a/src/mongo/db/geo/hash.cpp
+++ b/src/mongo/db/geo/hash.cpp
@@ -32,7 +32,7 @@
#include "mongo/db/geo/shapes.h"
#include "mongo/util/mongoutils/str.h"
-#include <algorithm> // for max()
+#include <algorithm> // for max()
#include <iostream>
// So we can get at the str namespace.
@@ -40,791 +40,788 @@ using namespace mongoutils;
namespace mongo {
- using std::stringstream;
+using std::stringstream;
- std::ostream& operator<<(std::ostream &s, const GeoHash &h) {
- return s << h.toString();
- }
-
- /*
- * GeoBitSets fills out various bit patterns that are used by GeoHash.
- * What patterns? Look at the comments next to the fields.
- * TODO(hk): hashedToNormal is still a bit of a mystery.
- */
- class GeoBitSets {
- public:
- GeoBitSets() {
- for (unsigned i = 0; i < 16; i++) {
- unsigned fixed = 0;
- for (int j = 0; j < 4; j++) {
- if (i & (1 << j))
- fixed |= (1 << (j * 2));
- }
- hashedToNormal[fixed] = i;
- }
-
- // Generate all 32 + 1 all-on bit patterns by repeatedly shifting the next bit to the
- // correct position
-
- long long currAllX = 0, currAllY = 0;
- for (int i = 0; i < 64 + 2; i++){
-
- long long thisBit = 1LL << (63 >= i ? 63 - i : 0);
+std::ostream& operator<<(std::ostream& s, const GeoHash& h) {
+ return s << h.toString();
+}
- if (i % 2 == 0) {
- allX[i / 2] = currAllX;
- currAllX |= thisBit;
- } else{
- allY[i / 2] = currAllY;
- currAllY |= thisBit;
- }
+/*
+ * GeoBitSets fills out various bit patterns that are used by GeoHash.
+ * What patterns? Look at the comments next to the fields.
+ * TODO(hk): hashedToNormal is still a bit of a mystery.
+ */
+class GeoBitSets {
+public:
+ GeoBitSets() {
+ for (unsigned i = 0; i < 16; i++) {
+ unsigned fixed = 0;
+ for (int j = 0; j < 4; j++) {
+ if (i & (1 << j))
+ fixed |= (1 << (j * 2));
}
+ hashedToNormal[fixed] = i;
}
- // The 0-th entries of each all[XY] is 0.
- // The i-th entry of allX has i alternating bits turned on starting
- // with the most significant. Example:
- // allX[1] = 8000000000000000
- // allX[2] = a000000000000000
- // allX[3] = a800000000000000
- // Note that 32 + 1 entries are needed, since 0 and 32 are both valid numbers of bits.
- long long allX[33];
- // Same alternating bits but starting with one from the MSB:
- // allY[1] = 4000000000000000
- // allY[2] = 5000000000000000
- // allY[3] = 5400000000000000
- long long allY[33];
-
- unsigned hashedToNormal[256];
- };
-
- // Oh global variables.
- GeoBitSets geoBitSets;
-
- // For i return the i-th most significant bit.
- // masks(0) = 80000..000
- // masks(1) = 40000..000
- // etc.
- // Number of 0s depends on 32 vs. 64 bit.
- inline static int mask32For(const int i) {
- return 1 << (31 - i);
- }
-
- inline static long long mask64For(const int i) {
- return 1LL << (63 - i);
- }
-
- // Binary data is stored in some particular byte ordering that requires this.
- static void copyAndReverse(char *dst, const char *src) {
- for (unsigned a = 0; a < 8; a++) {
- dst[a] = src[7 - a];
- }
- }
-
- // Definition
- unsigned int const GeoHash::kMaxBits = 32;
-
- /* This class maps an x,y coordinate pair to a hash value.
- * This should probably be renamed/generalized so that it's more of a planar hash,
- * and we also have a spherical hash, etc.
- */
- GeoHash::GeoHash() : _hash(0), _bits(0) { }
-
- GeoHash::GeoHash(const string& hash) {
- initFromString(hash.c_str());
- }
-
- GeoHash::GeoHash(const char *s) {
- initFromString(s);
- }
-
- void GeoHash::initFromString(const char *s) {
- int length = strlen(s);
- uassert(16457, "initFromString passed a too-long string", length <= 64);
- uassert(16458, "initFromString passed an odd length string ", 0 == (length % 2));
- _hash = 0;
- // _bits is how many bits for X or Y, not both, so we divide by 2.
- _bits = length / 2;
- for (int i = 0; s[i] != '\0'; ++i)
- if (s[i] == '1')
- setBit(i, 1);
- }
+ // Generate all 32 + 1 all-on bit patterns by repeatedly shifting the next bit to the
+ // correct position
- // This only works if e is BinData.
- GeoHash::GeoHash(const BSONElement& e, unsigned bits) {
- _bits = bits;
- if (e.type() == BinData) {
- int len = 0;
- copyAndReverse((char*)&_hash, e.binData(len));
- verify(len == 8);
- } else {
- cout << "GeoHash bad element: " << e << endl;
- uassert(13047, "wrong type for geo index. if you're using a pre-release version,"
- " need to rebuild index", 0);
- }
- clearUnusedBits();
- }
+ long long currAllX = 0, currAllY = 0;
+ for (int i = 0; i < 64 + 2; i++) {
+ long long thisBit = 1LL << (63 >= i ? 63 - i : 0);
- GeoHash::GeoHash(unsigned x, unsigned y, unsigned bits) {
- verify(bits <= 32);
- _hash = 0;
- _bits = bits;
- for (unsigned i = 0; i < bits; i++) {
- if (isBitSet(x, i)) _hash |= mask64For(i * 2);
- if (isBitSet(y, i)) _hash |= mask64For((i * 2) + 1);
- }
- }
-
- GeoHash::GeoHash(const GeoHash& old) {
- _hash = old._hash;
- _bits = old._bits;
- }
-
- GeoHash::GeoHash(long long hash, unsigned bits) : _hash(hash) , _bits(bits) {
- clearUnusedBits();
- }
-
- // TODO(hk): This is nasty and has no examples.
- void GeoHash::unhash_fast(unsigned *x, unsigned *y) const {
- *x = 0;
- *y = 0;
- const char *c = reinterpret_cast<const char*>(&_hash);
- for (int i = 0; i < 8; i++) {
- unsigned t = (unsigned)(c[i]) & 0x55;
- *y |= (geoBitSets.hashedToNormal[t] << (4 * i));
-
- t = ((unsigned)(c[i]) >> 1) & 0x55;
- *x |= (geoBitSets.hashedToNormal[t] << (4 * i));
- }
- }
-
- void GeoHash::unhash_slow(unsigned *x, unsigned *y) const {
- *x = 0;
- *y = 0;
- for (unsigned i = 0; i < _bits; i++) {
- if (getBitX(i))
- *x |= mask32For(i);
- if (getBitY(i))
- *y |= mask32For(i);
+ if (i % 2 == 0) {
+ allX[i / 2] = currAllX;
+ currAllX |= thisBit;
+ } else {
+ allY[i / 2] = currAllY;
+ currAllY |= thisBit;
+ }
}
}
- void GeoHash::unhash(unsigned *x, unsigned *y) const {
- unhash_fast(x, y);
- }
-
- /** Is the 'bit'-th most significant bit set? (NOT the least significant) */
- bool GeoHash::isBitSet(unsigned val, unsigned bit) {
- return mask32For(bit) & val;
- }
-
- /** Return a GeoHash with one bit of precision lost. */
- GeoHash GeoHash::up() const {
- return GeoHash(_hash, _bits - 1);
- }
-
- bool GeoHash::hasPrefix(const GeoHash& other) const {
- verify(other._bits <= _bits);
- if (other._bits == 0)
- return true;
-
- long long x = other._hash ^ _hash;
- // We only care about the leftmost other._bits (well, really _bits*2 since we have x and
- // y)
- x = x >> (64 - (other._bits * 2));
- return x == 0;
- }
-
- string GeoHash::toString() const {
- StringBuilder buf;
- for (unsigned x = 0; x < _bits * 2; x++)
- buf.append((_hash & mask64For(x)) ? "1" : "0");
- return buf.str();
- }
-
- string GeoHash::toStringHex1() const {
- stringstream ss;
- ss << std::hex << _hash;
- return ss.str();
- }
-
- void GeoHash::setBit(unsigned pos, bool value) {
- verify(pos < _bits * 2);
- const long long mask = mask64For(pos);
- if (value)
- _hash |= mask;
- else // if (_hash & mask)
- _hash &= ~mask;
- }
-
- bool GeoHash::getBit(unsigned pos) const {
- return _hash & mask64For(pos);
- }
-
- bool GeoHash::getBitX(unsigned pos) const {
- verify(pos < 32);
- return getBit(pos * 2);
- }
-
- bool GeoHash::getBitY(unsigned pos) const {
- verify(pos < 32);
- return getBit((pos * 2) + 1);
- }
-
- // TODO(hk): Comment this.
- BSONObj GeoHash::wrap(const char* name) const {
- BSONObjBuilder b(20);
- appendHashMin(&b, name);
- BSONObj o = b.obj();
- if ('\0' == name[0]) verify(o.objsize() == 20);
- return o;
- }
-
- // Do we have a non-trivial GeoHash?
- bool GeoHash::constrains() const {
- return _bits > 0;
- }
-
- // Could our GeoHash have higher precision?
- bool GeoHash::canRefine() const {
- return _bits < 32;
- }
-
- /**
- * Hashing works like this:
- * Divide the world into 4 buckets. Label each one as such:
- * -----------------
- * | | |
- * | | |
- * | 0,1 | 1,1 |
- * -----------------
- * | | |
- * | | |
- * | 0,0 | 1,0 |
- * -----------------
- * We recursively divide each cell, furthermore.
- * The functions below tell us what quadrant we're in *at the finest level
- * of the subdivision.*
- */
- bool GeoHash::atMinX() const {
- return (_hash & geoBitSets.allX[_bits]) == 0;
- }
- bool GeoHash::atMinY() const {
- return (_hash & geoBitSets.allY[_bits]) == 0;
- }
- bool GeoHash::atMaxX() const {
- return (_hash & geoBitSets.allX[_bits]) == geoBitSets.allX[_bits];
- }
- bool GeoHash::atMaxY() const {
- return (_hash & geoBitSets.allY[_bits]) == geoBitSets.allY[_bits];
- }
+ // The 0-th entries of each all[XY] is 0.
+ // The i-th entry of allX has i alternating bits turned on starting
+ // with the most significant. Example:
+ // allX[1] = 8000000000000000
+ // allX[2] = a000000000000000
+ // allX[3] = a800000000000000
+ // Note that 32 + 1 entries are needed, since 0 and 32 are both valid numbers of bits.
+ long long allX[33];
+ // Same alternating bits but starting with one from the MSB:
+ // allY[1] = 4000000000000000
+ // allY[2] = 5000000000000000
+ // allY[3] = 5400000000000000
+ long long allY[33];
+
+ unsigned hashedToNormal[256];
+};
+
+// Oh global variables.
+GeoBitSets geoBitSets;
+
+// For i return the i-th most significant bit.
+// masks(0) = 80000..000
+// masks(1) = 40000..000
+// etc.
+// Number of 0s depends on 32 vs. 64 bit.
+inline static int mask32For(const int i) {
+ return 1 << (31 - i);
+}
+
+inline static long long mask64For(const int i) {
+ return 1LL << (63 - i);
+}
+
+// Binary data is stored in some particular byte ordering that requires this.
+static void copyAndReverse(char* dst, const char* src) {
+ for (unsigned a = 0; a < 8; a++) {
+ dst[a] = src[7 - a];
+ }
+}
+
+// Definition
+unsigned int const GeoHash::kMaxBits = 32;
+
+/* This class maps an x,y coordinate pair to a hash value.
+ * This should probably be renamed/generalized so that it's more of a planar hash,
+ * and we also have a spherical hash, etc.
+ */
+GeoHash::GeoHash() : _hash(0), _bits(0) {}
+
+GeoHash::GeoHash(const string& hash) {
+ initFromString(hash.c_str());
+}
+
+GeoHash::GeoHash(const char* s) {
+ initFromString(s);
+}
+
+void GeoHash::initFromString(const char* s) {
+ int length = strlen(s);
+ uassert(16457, "initFromString passed a too-long string", length <= 64);
+ uassert(16458, "initFromString passed an odd length string ", 0 == (length % 2));
+ _hash = 0;
+ // _bits is how many bits for X or Y, not both, so we divide by 2.
+ _bits = length / 2;
+ for (int i = 0; s[i] != '\0'; ++i)
+ if (s[i] == '1')
+ setBit(i, 1);
+}
+
+// This only works if e is BinData.
+GeoHash::GeoHash(const BSONElement& e, unsigned bits) {
+ _bits = bits;
+ if (e.type() == BinData) {
+ int len = 0;
+ copyAndReverse((char*)&_hash, e.binData(len));
+ verify(len == 8);
+ } else {
+ cout << "GeoHash bad element: " << e << endl;
+ uassert(13047,
+ "wrong type for geo index. if you're using a pre-release version,"
+ " need to rebuild index",
+ 0);
+ }
+ clearUnusedBits();
+}
+
+GeoHash::GeoHash(unsigned x, unsigned y, unsigned bits) {
+ verify(bits <= 32);
+ _hash = 0;
+ _bits = bits;
+ for (unsigned i = 0; i < bits; i++) {
+ if (isBitSet(x, i))
+ _hash |= mask64For(i * 2);
+ if (isBitSet(y, i))
+ _hash |= mask64For((i * 2) + 1);
+ }
+}
+
+GeoHash::GeoHash(const GeoHash& old) {
+ _hash = old._hash;
+ _bits = old._bits;
+}
+
+GeoHash::GeoHash(long long hash, unsigned bits) : _hash(hash), _bits(bits) {
+ clearUnusedBits();
+}
+
+// TODO(hk): This is nasty and has no examples.
+void GeoHash::unhash_fast(unsigned* x, unsigned* y) const {
+ *x = 0;
+ *y = 0;
+ const char* c = reinterpret_cast<const char*>(&_hash);
+ for (int i = 0; i < 8; i++) {
+ unsigned t = (unsigned)(c[i]) & 0x55;
+ *y |= (geoBitSets.hashedToNormal[t] << (4 * i));
+
+ t = ((unsigned)(c[i]) >> 1) & 0x55;
+ *x |= (geoBitSets.hashedToNormal[t] << (4 * i));
+ }
+}
+
+void GeoHash::unhash_slow(unsigned* x, unsigned* y) const {
+ *x = 0;
+ *y = 0;
+ for (unsigned i = 0; i < _bits; i++) {
+ if (getBitX(i))
+ *x |= mask32For(i);
+ if (getBitY(i))
+ *y |= mask32For(i);
+ }
+}
+
+void GeoHash::unhash(unsigned* x, unsigned* y) const {
+ unhash_fast(x, y);
+}
+
+/** Is the 'bit'-th most significant bit set? (NOT the least significant) */
+bool GeoHash::isBitSet(unsigned val, unsigned bit) {
+ return mask32For(bit) & val;
+}
+
+/** Return a GeoHash with one bit of precision lost. */
+GeoHash GeoHash::up() const {
+ return GeoHash(_hash, _bits - 1);
+}
+
+bool GeoHash::hasPrefix(const GeoHash& other) const {
+ verify(other._bits <= _bits);
+ if (other._bits == 0)
+ return true;
- // TODO(hk): comment better
- void GeoHash::move(int x, int y) {
- verify(_bits);
- _move(0, x);
- _move(1, y);
- }
+ long long x = other._hash ^ _hash;
+ // We only care about the leftmost other._bits (well, really _bits*2 since we have x and
+ // y)
+ x = x >> (64 - (other._bits * 2));
+ return x == 0;
+}
+
+string GeoHash::toString() const {
+ StringBuilder buf;
+ for (unsigned x = 0; x < _bits * 2; x++)
+ buf.append((_hash & mask64For(x)) ? "1" : "0");
+ return buf.str();
+}
+
+string GeoHash::toStringHex1() const {
+ stringstream ss;
+ ss << std::hex << _hash;
+ return ss.str();
+}
+
+void GeoHash::setBit(unsigned pos, bool value) {
+ verify(pos < _bits * 2);
+ const long long mask = mask64For(pos);
+ if (value)
+ _hash |= mask;
+ else // if (_hash & mask)
+ _hash &= ~mask;
+}
+
+bool GeoHash::getBit(unsigned pos) const {
+ return _hash & mask64For(pos);
+}
+
+bool GeoHash::getBitX(unsigned pos) const {
+ verify(pos < 32);
+ return getBit(pos * 2);
+}
+
+bool GeoHash::getBitY(unsigned pos) const {
+ verify(pos < 32);
+ return getBit((pos * 2) + 1);
+}
+
+// TODO(hk): Comment this.
+BSONObj GeoHash::wrap(const char* name) const {
+ BSONObjBuilder b(20);
+ appendHashMin(&b, name);
+ BSONObj o = b.obj();
+ if ('\0' == name[0])
+ verify(o.objsize() == 20);
+ return o;
+}
+
+// Do we have a non-trivial GeoHash?
+bool GeoHash::constrains() const {
+ return _bits > 0;
+}
+
+// Could our GeoHash have higher precision?
+bool GeoHash::canRefine() const {
+ return _bits < 32;
+}
- // TODO(hk): comment much better
- void GeoHash::_move(unsigned offset, int d) {
- if (d == 0)
+/**
+ * Hashing works like this:
+ * Divide the world into 4 buckets. Label each one as such:
+ * -----------------
+ * | | |
+ * | | |
+ * | 0,1 | 1,1 |
+ * -----------------
+ * | | |
+ * | | |
+ * | 0,0 | 1,0 |
+ * -----------------
+ * We recursively divide each cell, furthermore.
+ * The functions below tell us what quadrant we're in *at the finest level
+ * of the subdivision.*
+ */
+bool GeoHash::atMinX() const {
+ return (_hash & geoBitSets.allX[_bits]) == 0;
+}
+bool GeoHash::atMinY() const {
+ return (_hash & geoBitSets.allY[_bits]) == 0;
+}
+bool GeoHash::atMaxX() const {
+ return (_hash & geoBitSets.allX[_bits]) == geoBitSets.allX[_bits];
+}
+bool GeoHash::atMaxY() const {
+ return (_hash & geoBitSets.allY[_bits]) == geoBitSets.allY[_bits];
+}
+
+// TODO(hk): comment better
+void GeoHash::move(int x, int y) {
+ verify(_bits);
+ _move(0, x);
+ _move(1, y);
+}
+
+// TODO(hk): comment much better
+void GeoHash::_move(unsigned offset, int d) {
+ if (d == 0)
+ return;
+ verify(d <= 1 && d >= -1); // TEMP
+
+ bool from, to;
+ if (d > 0) {
+ from = 0;
+ to = 1;
+ } else {
+ from = 1;
+ to = 0;
+ }
+
+ unsigned pos = (_bits * 2) - 1;
+ if (offset == 0)
+ pos--;
+ while (true) {
+ if (getBit(pos) == from) {
+ setBit(pos, to);
return;
- verify(d <= 1 && d>= -1); // TEMP
-
- bool from, to;
- if (d > 0) {
- from = 0;
- to = 1;
- }
- else {
- from = 1;
- to = 0;
}
- unsigned pos = (_bits * 2) - 1;
- if (offset == 0)
- pos--;
- while (true) {
- if (getBit(pos) == from) {
- setBit(pos , to);
- return;
+ if (pos < 2) {
+ // overflow
+ for (; pos < (_bits * 2); pos += 2) {
+ setBit(pos, from);
}
-
- if (pos < 2) {
- // overflow
- for (; pos < (_bits * 2) ; pos += 2) {
- setBit(pos , from);
- }
- return;
- }
-
- setBit(pos , from);
- pos -= 2;
- }
-
- verify(0);
- }
-
- GeoHash& GeoHash::operator=(const GeoHash& h) {
- _hash = h._hash;
- _bits = h._bits;
- return *this;
- }
-
- bool GeoHash::operator==(const GeoHash& h) const {
- return _hash == h._hash && _bits == h._bits;
- }
-
- bool GeoHash::operator!=(const GeoHash& h) const {
- return !(*this == h);
- }
-
- bool GeoHash::operator<(const GeoHash& h) const {
-
- if (_hash != h._hash) {
- return static_cast<unsigned long long>(_hash) <
- static_cast<unsigned long long>(h._hash);
- }
-
- return _bits < h._bits;
- }
-
- // Append the hash in s to our current hash. We expect s to be '0' or '1' or '\0',
- // though we also treat non-'1' values as '0'.
- GeoHash& GeoHash::operator+=(const char* s) {
- unsigned pos = _bits * 2;
- _bits += strlen(s) / 2;
- verify(_bits <= 32);
- while ('\0' != s[0]) {
- if (s[0] == '1')
- setBit(pos , 1);
- pos++;
- s++;
- }
- return *this;
- }
-
- GeoHash GeoHash::operator+(const char *s) const {
- GeoHash n = *this;
- n += s;
- return n;
- }
-
- GeoHash GeoHash::operator+(const std::string& s) const {
- return operator+(s.c_str());
- }
-
- /*
- * Keep the upper _bits*2 bits of _hash, clear the lower bits.
- * Maybe there's junk in there? Not sure why this is done.
- */
- void GeoHash::clearUnusedBits() {
- // Left shift count should be less than 64
- if (_bits == 0) {
- _hash = 0;
return;
}
- static long long FULL = 0xFFFFFFFFFFFFFFFFLL;
- long long mask = FULL << (64 - (_bits * 2));
- _hash &= mask;
- }
-
- static void appendHashToBuilder(long long hash,
- BSONObjBuilder* builder,
- const char* fieldName) {
- char buf[8];
- copyAndReverse(buf, (char*) &hash);
- builder->appendBinData(fieldName, 8, bdtCustom, buf);
- }
-
- void GeoHash::appendHashMin(BSONObjBuilder* builder, const char* fieldName) const {
- // The min bound of a GeoHash region has all the unused suffix bits set to 0
- appendHashToBuilder(_hash, builder, fieldName);
- }
-
- void GeoHash::appendHashMax(BSONObjBuilder* builder, const char* fieldName) const {
- // The max bound of a GeoHash region has all the unused suffix bits set to 1
- long long suffixMax = ~(geoBitSets.allX[_bits] | geoBitSets.allY[_bits]);
- long long hashMax = _hash | suffixMax;
-
- appendHashToBuilder(hashMax, builder, fieldName);
- }
-
- long long GeoHash::getHash() const {
- return _hash;
- }
-
- unsigned GeoHash::getBits() const {
- return _bits;
- }
-
- GeoHash GeoHash::commonPrefix(const GeoHash& other) const {
- unsigned i = 0;
- for (; i < _bits && i < other._bits; i++) {
- if (getBitX(i) == other.getBitX(i) && getBitY(i) == other.getBitY(i))
- continue;
- break;
- }
- // i is how many bits match between this and other.
- return GeoHash(_hash, i);
- }
-
-
- bool GeoHash::subdivide( GeoHash children[4] ) const {
- if ( _bits == 32 ) {
- return false;
- }
-
- children[0] = GeoHash( _hash, _bits + 1 ); // (0, 0)
- children[1] = children[0];
- children[1].setBit( _bits * 2 + 1, 1 ); // (0, 1)
- children[2] = children[0];
- children[2].setBit( _bits * 2, 1 ); // (1, 0)
- children[3] = GeoHash(children[1]._hash | children[2]._hash, _bits + 1); // (1, 1)
- return true;
- }
-
- bool GeoHash::contains(const GeoHash& other) const {
- return _bits <= other._bits && other.hasPrefix(*this);
- }
-
- GeoHash GeoHash::parent(unsigned int level) const {
- return GeoHash(_hash, level);
- }
-
- GeoHash GeoHash::parent() const {
- verify(_bits > 0);
- return GeoHash(_hash, _bits - 1);
- }
-
-
- void GeoHash::appendVertexNeighbors(unsigned level, vector<GeoHash>* output) const {
- invariant(level >= 0 && level < _bits);
-
- // Parent at the given level.
- GeoHash parentHash = parent(level);
- output->push_back(parentHash);
-
- // Generate the neighbors of parent that are closest to me.
- unsigned px, py, parentBits;
- parentHash.unhash(&px, &py);
- parentBits = parentHash.getBits();
-
- // No Neighbors for the top level.
- if (parentBits == 0U) return;
-
- // Position in parent
- // Y
- // ^
- // | 01, 11
- // | 00, 10
- // +----------> X
- // We can guarantee _bits > 0.
- long long posInParent = (_hash >> (64 - 2 * (parentBits + 1))) & 3LL;
-
- // 1 bit at parent's level, the least significant bit of parent.
- unsigned parentMask = 1U << (32 - parentBits);
-
- // Along X Axis
- if ((posInParent & 2LL) == 0LL) {
- // Left side of parent, X - 1
- if (!parentHash.atMinX()) output->push_back(GeoHash(px - parentMask, py, parentBits));
- } else {
- // Right side of parent, X + 1
- if (!parentHash.atMaxX()) output->push_back(GeoHash(px + parentMask, py, parentBits));
- }
-
- // Along Y Axis
- if ((posInParent & 1LL) == 0LL) {
- // Bottom of parent, Y - 1
- if (!parentHash.atMinY()) output->push_back(GeoHash(px, py - parentMask, parentBits));
- } else {
- // Top of parent, Y + 1
- if (!parentHash.atMaxY()) output->push_back(GeoHash(px, py + parentMask, parentBits));
- }
-
- // Four corners
- if (posInParent == 0LL) {
- if (!parentHash.atMinX() && !parentHash.atMinY())
- output->push_back(GeoHash(px - parentMask, py - parentMask, parentBits));
- } else if (posInParent == 1LL) {
- if (!parentHash.atMinX() && !parentHash.atMaxY())
- output->push_back(GeoHash(px - parentMask, py + parentMask, parentBits));
- } else if (posInParent == 2LL) {
- if (!parentHash.atMaxX() && !parentHash.atMinY())
- output->push_back(GeoHash(px + parentMask, py - parentMask, parentBits));
- } else {
- // PosInParent == 3LL
- if (!parentHash.atMaxX() && !parentHash.atMaxY())
- output->push_back(GeoHash(px + parentMask, py + parentMask, parentBits));
- }
- }
-
- static BSONField<int> bitsField("bits", 26);
- static BSONField<double> maxField("max", 180.0);
- static BSONField<double> minField("min", -180.0);
-
- // a x b
- // | | |
- // -----|---o-----|---------|-- "|" is a representable double number.
- //
- // In the above figure, b is the next representable double number after a, so
- // |a - b|/|a| = epsilon (ULP) ~= 2.22E-16.
- //
- // An exact number x will be represented as the nearest representable double, which is a.
- // |x - a|/|a| <= 0.5 ULP ~= 1.11e-16
- //
- // IEEE floating-point operations have a maximum error of 0.5 ULPS (units in
- // the last place). For double-precision numbers, this works out to 2**-53
- // (about 1.11e-16) times the magnitude of the result.
- double const GeoHashConverter::kMachinePrecision = 0.5 * std::numeric_limits<double>::epsilon();
-
- Status GeoHashConverter::parseParameters(const BSONObj& paramDoc,
- GeoHashConverter::Parameters* params) {
-
- string errMsg;
-
- if (FieldParser::FIELD_INVALID
- == FieldParser::extractNumber(paramDoc, bitsField, &params->bits, &errMsg)) {
- return Status(ErrorCodes::InvalidOptions, errMsg);
- }
-
- if (FieldParser::FIELD_INVALID
- == FieldParser::extractNumber(paramDoc, maxField, &params->max, &errMsg)) {
- return Status(ErrorCodes::InvalidOptions, errMsg);
- }
-
- if (FieldParser::FIELD_INVALID
- == FieldParser::extractNumber(paramDoc, minField, &params->min, &errMsg)) {
- return Status(ErrorCodes::InvalidOptions, errMsg);
- }
-
- if (params->bits < 1 || params->bits > 32) {
- return Status(ErrorCodes::InvalidOptions,
- str::stream() << "bits for hash must be > 0 and <= 32, "
- << "but " << params->bits << " bits were specified");
- }
-
- if (params->min >= params->max) {
- return Status(ErrorCodes::InvalidOptions,
- str::stream() << "region for hash must be valid and have positive area, "
- << "but [" << params->min << ", " << params->max << "] "
- << "was specified");
- }
-
- double numBuckets = (1024 * 1024 * 1024 * 4.0);
- params->scaling = numBuckets / (params->max - params->min);
-
- return Status::OK();
- }
-
- GeoHashConverter::GeoHashConverter(const Parameters& params) : _params(params) {
- init();
- }
-
- void GeoHashConverter::init() {
- // TODO(hk): What do we require of the values in params?
-
- // Compute how much error there is so it can be used as a fudge factor.
- GeoHash a(0, 0, _params.bits);
- GeoHash b = a;
- b.move(1, 1);
-
- // Epsilon is 1/100th of a bucket size
- // TODO: Can we actually find error bounds for the sqrt function?
- double epsilon = 0.001 / _params.scaling;
- _error = distanceBetweenHashes(a, b) + epsilon;
-
- // Error in radians
- _errorSphere = deg2rad(_error);
-
- // 8 * max(|max|, |min|) * u
- _errorUnhashToBox = calcUnhashToBoxError(_params);
+ setBit(pos, from);
+ pos -= 2;
}
- double GeoHashConverter::distanceBetweenHashes(const GeoHash& a, const GeoHash& b) const {
- double ax, ay, bx, by;
- unhash(a, &ax, &ay);
- unhash(b, &bx, &by);
-
- double dx = bx - ax;
- double dy = by - ay;
-
- return sqrt((dx * dx) + (dy * dy));
- }
+ verify(0);
+}
- /**
- * Hashing functions. Convert the following types (which have a double precision point)
- * to a GeoHash:
- * BSONElement
- * BSONObj
- * Point
- * double, double
- */
-
- GeoHash GeoHashConverter::hash(const Point &p) const {
- return hash(p.x, p.y);
- }
+GeoHash& GeoHash::operator=(const GeoHash& h) {
+ _hash = h._hash;
+ _bits = h._bits;
+ return *this;
+}
- GeoHash GeoHashConverter::hash(const BSONElement& e) const {
- if (e.isABSONObj())
- return hash(e.embeddedObject());
- return GeoHash(e, _params.bits);
- }
+bool GeoHash::operator==(const GeoHash& h) const {
+ return _hash == h._hash && _bits == h._bits;
+}
- GeoHash GeoHashConverter::hash(const BSONObj& o) const {
- return hash(o, NULL);
- }
+bool GeoHash::operator!=(const GeoHash& h) const {
+ return !(*this == h);
+}
- // src is printed out as debugging information. Maybe it is actually somehow the 'source' of o?
- GeoHash GeoHashConverter::hash(const BSONObj& o, const BSONObj* src) const {
- BSONObjIterator i(o);
- uassert(13067,
- str::stream() << "geo field is empty"
- << (src ? causedBy((*src).toString()) : ""),
- i.more());
-
- BSONElement x = i.next();
- uassert(13068,
- str::stream() << "geo field only has 1 element"
- << causedBy(src ? (*src).toString() : x.toString()),
- i.more());
-
- BSONElement y = i.next();
- uassert(13026,
- str::stream() << "geo values must be 'legacy coordinate pairs' for 2d indexes"
- << causedBy(src ? (*src).toString() :
- BSON_ARRAY(x << y).toString()),
- x.isNumber() && y.isNumber());
-
- uassert(13027,
- str::stream() << "point not in interval of [ " << _params.min << ", "
- << _params.max << " ]"
- << causedBy(src ? (*src).toString() :
- BSON_ARRAY(x.number() << y.number()).toString()),
- x.number() <= _params.max && x.number() >= _params.min &&
- y.number() <= _params.max && y.number() >= _params.min);
-
- return GeoHash(convertToHashScale(x.number()), convertToHashScale(y.number()),
- _params.bits);
+bool GeoHash::operator<(const GeoHash& h) const {
+ if (_hash != h._hash) {
+ return static_cast<unsigned long long>(_hash) < static_cast<unsigned long long>(h._hash);
}
- GeoHash GeoHashConverter::hash(double x, double y) const {
- uassert(16433,
- str::stream() << "point not in interval of [ " << _params.min << ", "
- << _params.max << " ]"
- << causedBy(BSON_ARRAY(x << y).toString()),
- x <= _params.max && x >= _params.min &&
- y <= _params.max && y >= _params.min);
-
- return GeoHash(convertToHashScale(x), convertToHashScale(y) , _params.bits);
- }
+ return _bits < h._bits;
+}
- /**
- * Unhashing functions. These convert from a "discretized" GeoHash to the "continuous"
- * doubles according to our scaling parameters.
- *
- * Possible outputs:
- * double, double
- * Point
- * BSONObj
- */
- // TODO(hk): these should have consistent naming
- Point GeoHashConverter::unhashToPoint(const GeoHash &h) const {
- Point point;
- unhash(h, &point.x, &point.y);
- return point;
+// Append the hash in s to our current hash. We expect s to be '0' or '1' or '\0',
+// though we also treat non-'1' values as '0'.
+GeoHash& GeoHash::operator+=(const char* s) {
+ unsigned pos = _bits * 2;
+ _bits += strlen(s) / 2;
+ verify(_bits <= 32);
+ while ('\0' != s[0]) {
+ if (s[0] == '1')
+ setBit(pos, 1);
+ pos++;
+ s++;
}
+ return *this;
+}
- Point GeoHashConverter::unhashToPoint(const BSONElement &e) const {
- return unhashToPoint(hash(e));
- }
+GeoHash GeoHash::operator+(const char* s) const {
+ GeoHash n = *this;
+ n += s;
+ return n;
+}
- BSONObj GeoHashConverter::unhashToBSONObj(const GeoHash& h) const {
- unsigned x, y;
- h.unhash(&x, &y);
- BSONObjBuilder b;
- b.append("x", convertFromHashScale(x));
- b.append("y", convertFromHashScale(y));
- return b.obj();
- }
+GeoHash GeoHash::operator+(const std::string& s) const {
+ return operator+(s.c_str());
+}
- void GeoHashConverter::unhash(const GeoHash &h, double *x, double *y) const {
- unsigned a, b;
- h.unhash(&a, &b);
- *x = convertFromHashScale(a);
- *y = convertFromHashScale(b);
- }
-
- Box GeoHashConverter::unhashToBoxCovering(const GeoHash &h) const {
- if (h.getBits() == 0) {
- // Return the result without any error.
- return Box(Point(_params.min, _params.min), Point(_params.max, _params.max));
- }
-
- double sizeEdgeBox = sizeEdge(h.getBits());
- Point min(unhashToPoint(h));
- Point max(min.x + sizeEdgeBox, min.y + sizeEdgeBox);
-
- // Expand the box by the error bound
- Box box(min, max);
- box.fudge(_errorUnhashToBox);
- return box;
- }
-
- double GeoHashConverter::calcUnhashToBoxError(const GeoHashConverter::Parameters& params) {
- return std::max(fabs(params.min), fabs(params.max))
- * GeoHashConverter::kMachinePrecision* 8;
- }
+/*
+ * Keep the upper _bits*2 bits of _hash, clear the lower bits.
+ * Maybe there's junk in there? Not sure why this is done.
+ */
+void GeoHash::clearUnusedBits() {
+ // Left shift count should be less than 64
+ if (_bits == 0) {
+ _hash = 0;
+ return;
+ }
- double GeoHashConverter::sizeOfDiag(const GeoHash& a) const {
- GeoHash b = a;
- b.move(1, 1);
- return distanceBetweenHashes(a, b);
- }
+ static long long FULL = 0xFFFFFFFFFFFFFFFFLL;
+ long long mask = FULL << (64 - (_bits * 2));
+ _hash &= mask;
+}
+
+static void appendHashToBuilder(long long hash, BSONObjBuilder* builder, const char* fieldName) {
+ char buf[8];
+ copyAndReverse(buf, (char*)&hash);
+ builder->appendBinData(fieldName, 8, bdtCustom, buf);
+}
+
+void GeoHash::appendHashMin(BSONObjBuilder* builder, const char* fieldName) const {
+ // The min bound of a GeoHash region has all the unused suffix bits set to 0
+ appendHashToBuilder(_hash, builder, fieldName);
+}
+
+void GeoHash::appendHashMax(BSONObjBuilder* builder, const char* fieldName) const {
+ // The max bound of a GeoHash region has all the unused suffix bits set to 1
+ long long suffixMax = ~(geoBitSets.allX[_bits] | geoBitSets.allY[_bits]);
+ long long hashMax = _hash | suffixMax;
+
+ appendHashToBuilder(hashMax, builder, fieldName);
+}
+
+long long GeoHash::getHash() const {
+ return _hash;
+}
+
+unsigned GeoHash::getBits() const {
+ return _bits;
+}
+
+GeoHash GeoHash::commonPrefix(const GeoHash& other) const {
+ unsigned i = 0;
+ for (; i < _bits && i < other._bits; i++) {
+ if (getBitX(i) == other.getBitX(i) && getBitY(i) == other.getBitY(i))
+ continue;
+ break;
+ }
+ // i is how many bits match between this and other.
+ return GeoHash(_hash, i);
+}
+
+
+bool GeoHash::subdivide(GeoHash children[4]) const {
+ if (_bits == 32) {
+ return false;
+ }
+
+ children[0] = GeoHash(_hash, _bits + 1); // (0, 0)
+ children[1] = children[0];
+ children[1].setBit(_bits * 2 + 1, 1); // (0, 1)
+ children[2] = children[0];
+ children[2].setBit(_bits * 2, 1); // (1, 0)
+ children[3] = GeoHash(children[1]._hash | children[2]._hash, _bits + 1); // (1, 1)
+ return true;
+}
+
+bool GeoHash::contains(const GeoHash& other) const {
+ return _bits <= other._bits && other.hasPrefix(*this);
+}
+
+GeoHash GeoHash::parent(unsigned int level) const {
+ return GeoHash(_hash, level);
+}
+
+GeoHash GeoHash::parent() const {
+ verify(_bits > 0);
+ return GeoHash(_hash, _bits - 1);
+}
+
+
+void GeoHash::appendVertexNeighbors(unsigned level, vector<GeoHash>* output) const {
+ invariant(level >= 0 && level < _bits);
+
+ // Parent at the given level.
+ GeoHash parentHash = parent(level);
+ output->push_back(parentHash);
+
+ // Generate the neighbors of parent that are closest to me.
+ unsigned px, py, parentBits;
+ parentHash.unhash(&px, &py);
+ parentBits = parentHash.getBits();
+
+ // No Neighbors for the top level.
+ if (parentBits == 0U)
+ return;
+
+ // Position in parent
+ // Y
+ // ^
+ // | 01, 11
+ // | 00, 10
+ // +----------> X
+ // We can guarantee _bits > 0.
+ long long posInParent = (_hash >> (64 - 2 * (parentBits + 1))) & 3LL;
+
+ // 1 bit at parent's level, the least significant bit of parent.
+ unsigned parentMask = 1U << (32 - parentBits);
+
+ // Along X Axis
+ if ((posInParent & 2LL) == 0LL) {
+ // Left side of parent, X - 1
+ if (!parentHash.atMinX())
+ output->push_back(GeoHash(px - parentMask, py, parentBits));
+ } else {
+ // Right side of parent, X + 1
+ if (!parentHash.atMaxX())
+ output->push_back(GeoHash(px + parentMask, py, parentBits));
+ }
+
+ // Along Y Axis
+ if ((posInParent & 1LL) == 0LL) {
+ // Bottom of parent, Y - 1
+ if (!parentHash.atMinY())
+ output->push_back(GeoHash(px, py - parentMask, parentBits));
+ } else {
+ // Top of parent, Y + 1
+ if (!parentHash.atMaxY())
+ output->push_back(GeoHash(px, py + parentMask, parentBits));
+ }
+
+ // Four corners
+ if (posInParent == 0LL) {
+ if (!parentHash.atMinX() && !parentHash.atMinY())
+ output->push_back(GeoHash(px - parentMask, py - parentMask, parentBits));
+ } else if (posInParent == 1LL) {
+ if (!parentHash.atMinX() && !parentHash.atMaxY())
+ output->push_back(GeoHash(px - parentMask, py + parentMask, parentBits));
+ } else if (posInParent == 2LL) {
+ if (!parentHash.atMaxX() && !parentHash.atMinY())
+ output->push_back(GeoHash(px + parentMask, py - parentMask, parentBits));
+ } else {
+ // PosInParent == 3LL
+ if (!parentHash.atMaxX() && !parentHash.atMaxY())
+ output->push_back(GeoHash(px + parentMask, py + parentMask, parentBits));
+ }
+}
+
+static BSONField<int> bitsField("bits", 26);
+static BSONField<double> maxField("max", 180.0);
+static BSONField<double> minField("min", -180.0);
+
+// a x b
+// | | |
+// -----|---o-----|---------|-- "|" is a representable double number.
+//
+// In the above figure, b is the next representable double number after a, so
+// |a - b|/|a| = epsilon (ULP) ~= 2.22E-16.
+//
+// An exact number x will be represented as the nearest representable double, which is a.
+// |x - a|/|a| <= 0.5 ULP ~= 1.11e-16
+//
+// IEEE floating-point operations have a maximum error of 0.5 ULPS (units in
+// the last place). For double-precision numbers, this works out to 2**-53
+// (about 1.11e-16) times the magnitude of the result.
+double const GeoHashConverter::kMachinePrecision = 0.5 * std::numeric_limits<double>::epsilon();
+
+Status GeoHashConverter::parseParameters(const BSONObj& paramDoc,
+ GeoHashConverter::Parameters* params) {
+ string errMsg;
+
+ if (FieldParser::FIELD_INVALID ==
+ FieldParser::extractNumber(paramDoc, bitsField, &params->bits, &errMsg)) {
+ return Status(ErrorCodes::InvalidOptions, errMsg);
+ }
+
+ if (FieldParser::FIELD_INVALID ==
+ FieldParser::extractNumber(paramDoc, maxField, &params->max, &errMsg)) {
+ return Status(ErrorCodes::InvalidOptions, errMsg);
+ }
+
+ if (FieldParser::FIELD_INVALID ==
+ FieldParser::extractNumber(paramDoc, minField, &params->min, &errMsg)) {
+ return Status(ErrorCodes::InvalidOptions, errMsg);
+ }
+
+ if (params->bits < 1 || params->bits > 32) {
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream() << "bits for hash must be > 0 and <= 32, "
+ << "but " << params->bits << " bits were specified");
+ }
+
+ if (params->min >= params->max) {
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream() << "region for hash must be valid and have positive area, "
+ << "but [" << params->min << ", " << params->max << "] "
+ << "was specified");
+ }
+
+ double numBuckets = (1024 * 1024 * 1024 * 4.0);
+ params->scaling = numBuckets / (params->max - params->min);
+
+ return Status::OK();
+}
+
+GeoHashConverter::GeoHashConverter(const Parameters& params) : _params(params) {
+ init();
+}
+
+void GeoHashConverter::init() {
+ // TODO(hk): What do we require of the values in params?
+
+ // Compute how much error there is so it can be used as a fudge factor.
+ GeoHash a(0, 0, _params.bits);
+ GeoHash b = a;
+ b.move(1, 1);
+
+ // Epsilon is 1/100th of a bucket size
+ // TODO: Can we actually find error bounds for the sqrt function?
+ double epsilon = 0.001 / _params.scaling;
+ _error = distanceBetweenHashes(a, b) + epsilon;
+
+ // Error in radians
+ _errorSphere = deg2rad(_error);
+
+ // 8 * max(|max|, |min|) * u
+ _errorUnhashToBox = calcUnhashToBoxError(_params);
+}
+double GeoHashConverter::distanceBetweenHashes(const GeoHash& a, const GeoHash& b) const {
+ double ax, ay, bx, by;
+ unhash(a, &ax, &ay);
+ unhash(b, &bx, &by);
- // Relative error = epsilon_(max-min). ldexp() is just a direct translation to
- // floating point exponent, and should be exact.
- double GeoHashConverter::sizeEdge(unsigned level) const {
- invariant(level >= 0);
- invariant((int)level <= _params.bits);
- return ldexp(_params.max - _params.min, -level);
- }
+ double dx = bx - ax;
+ double dy = by - ay;
- // Convert from a double in [0, (max-min)*scaling] to [min, max]
- double GeoHashConverter::convertDoubleFromHashScale(double x) const {
- x /= _params.scaling;
- x += _params.min;
- return x;
- }
+ return sqrt((dx * dx) + (dy * dy));
+}
- // Convert from an unsigned in [0, (max-min)*scaling] to [min, max]
- double GeoHashConverter::convertFromHashScale(unsigned in) const {
- return convertDoubleFromHashScale((double)in);
- }
-
- // Convert from a double that is [min, max] to a double in [0, (max-min)*scaling]
- double GeoHashConverter::convertToDoubleHashScale(double in) const {
- verify(in <= _params.max && in >= _params.min);
+/**
+ * Hashing functions. Convert the following types (which have a double precision point)
+ * to a GeoHash:
+ * BSONElement
+ * BSONObj
+ * Point
+ * double, double
+ */
- if (in == _params.max) {
- // prevent aliasing with _min by moving inside the "box"
- // makes 180 == 179.999 (roughly)
- in -= _error / 2;
- }
+GeoHash GeoHashConverter::hash(const Point& p) const {
+ return hash(p.x, p.y);
+}
+
+GeoHash GeoHashConverter::hash(const BSONElement& e) const {
+ if (e.isABSONObj())
+ return hash(e.embeddedObject());
+ return GeoHash(e, _params.bits);
+}
+
+GeoHash GeoHashConverter::hash(const BSONObj& o) const {
+ return hash(o, NULL);
+}
+
+// src is printed out as debugging information. Maybe it is actually somehow the 'source' of o?
+GeoHash GeoHashConverter::hash(const BSONObj& o, const BSONObj* src) const {
+ BSONObjIterator i(o);
+ uassert(13067,
+ str::stream() << "geo field is empty" << (src ? causedBy((*src).toString()) : ""),
+ i.more());
+
+ BSONElement x = i.next();
+ uassert(13068,
+ str::stream() << "geo field only has 1 element"
+ << causedBy(src ? (*src).toString() : x.toString()),
+ i.more());
+
+ BSONElement y = i.next();
+ uassert(13026,
+ str::stream() << "geo values must be 'legacy coordinate pairs' for 2d indexes"
+ << causedBy(src ? (*src).toString() : BSON_ARRAY(x << y).toString()),
+ x.isNumber() && y.isNumber());
+
+ uassert(13027,
+ str::stream() << "point not in interval of [ " << _params.min << ", " << _params.max
+ << " ]"
+ << causedBy(src ? (*src).toString()
+ : BSON_ARRAY(x.number() << y.number()).toString()),
+ x.number() <= _params.max && x.number() >= _params.min && y.number() <= _params.max &&
+ y.number() >= _params.min);
+
+ return GeoHash(convertToHashScale(x.number()), convertToHashScale(y.number()), _params.bits);
+}
+
+GeoHash GeoHashConverter::hash(double x, double y) const {
+ uassert(16433,
+ str::stream() << "point not in interval of [ " << _params.min << ", " << _params.max
+ << " ]" << causedBy(BSON_ARRAY(x << y).toString()),
+ x <= _params.max && x >= _params.min && y <= _params.max && y >= _params.min);
+
+ return GeoHash(convertToHashScale(x), convertToHashScale(y), _params.bits);
+}
- in -= _params.min;
- verify(in >= 0);
- return in * _params.scaling;
- }
-
- // Convert from a double that is [min, max] to an unsigned in [0, (max-min)*scaling]
- unsigned GeoHashConverter::convertToHashScale(double in) const {
- return static_cast<unsigned>(convertToDoubleHashScale(in));
- }
+/**
+ * Unhashing functions. These convert from a "discretized" GeoHash to the "continuous"
+ * doubles according to our scaling parameters.
+ *
+ * Possible outputs:
+ * double, double
+ * Point
+ * BSONObj
+ */
+// TODO(hk): these should have consistent naming
+Point GeoHashConverter::unhashToPoint(const GeoHash& h) const {
+ Point point;
+ unhash(h, &point.x, &point.y);
+ return point;
+}
+
+Point GeoHashConverter::unhashToPoint(const BSONElement& e) const {
+ return unhashToPoint(hash(e));
+}
+
+BSONObj GeoHashConverter::unhashToBSONObj(const GeoHash& h) const {
+ unsigned x, y;
+ h.unhash(&x, &y);
+ BSONObjBuilder b;
+ b.append("x", convertFromHashScale(x));
+ b.append("y", convertFromHashScale(y));
+ return b.obj();
+}
+
+void GeoHashConverter::unhash(const GeoHash& h, double* x, double* y) const {
+ unsigned a, b;
+ h.unhash(&a, &b);
+ *x = convertFromHashScale(a);
+ *y = convertFromHashScale(b);
+}
+
+Box GeoHashConverter::unhashToBoxCovering(const GeoHash& h) const {
+ if (h.getBits() == 0) {
+ // Return the result without any error.
+ return Box(Point(_params.min, _params.min), Point(_params.max, _params.max));
+ }
+
+ double sizeEdgeBox = sizeEdge(h.getBits());
+ Point min(unhashToPoint(h));
+ Point max(min.x + sizeEdgeBox, min.y + sizeEdgeBox);
+
+ // Expand the box by the error bound
+ Box box(min, max);
+ box.fudge(_errorUnhashToBox);
+ return box;
+}
+
+double GeoHashConverter::calcUnhashToBoxError(const GeoHashConverter::Parameters& params) {
+ return std::max(fabs(params.min), fabs(params.max)) * GeoHashConverter::kMachinePrecision * 8;
+}
+
+double GeoHashConverter::sizeOfDiag(const GeoHash& a) const {
+ GeoHash b = a;
+ b.move(1, 1);
+ return distanceBetweenHashes(a, b);
+}
+
+
+// Relative error = epsilon_(max-min). ldexp() is just a direct translation to
+// floating point exponent, and should be exact.
+double GeoHashConverter::sizeEdge(unsigned level) const {
+ invariant(level >= 0);
+ invariant((int)level <= _params.bits);
+ return ldexp(_params.max - _params.min, -level);
+}
+
+// Convert from a double in [0, (max-min)*scaling] to [min, max]
+double GeoHashConverter::convertDoubleFromHashScale(double x) const {
+ x /= _params.scaling;
+ x += _params.min;
+ return x;
+}
+
+// Convert from an unsigned in [0, (max-min)*scaling] to [min, max]
+double GeoHashConverter::convertFromHashScale(unsigned in) const {
+ return convertDoubleFromHashScale((double)in);
+}
+
+// Convert from a double that is [min, max] to a double in [0, (max-min)*scaling]
+double GeoHashConverter::convertToDoubleHashScale(double in) const {
+ verify(in <= _params.max && in >= _params.min);
+
+ if (in == _params.max) {
+ // prevent aliasing with _min by moving inside the "box"
+ // makes 180 == 179.999 (roughly)
+ in -= _error / 2;
+ }
+
+ in -= _params.min;
+ verify(in >= 0);
+ return in * _params.scaling;
+}
+
+// Convert from a double that is [min, max] to an unsigned in [0, (max-min)*scaling]
+unsigned GeoHashConverter::convertToHashScale(double in) const {
+ return static_cast<unsigned>(convertToDoubleHashScale(in));
+}
} // namespace mongo
diff --git a/src/mongo/db/geo/hash.h b/src/mongo/db/geo/hash.h
index b50ac57d3d1..1c5f6d1717a 100644
--- a/src/mongo/db/geo/hash.h
+++ b/src/mongo/db/geo/hash.h
@@ -33,229 +33,240 @@
namespace mongo {
- class GeoHash;
- class Box;
- struct Point;
- std::ostream& operator<<(std::ostream &s, const GeoHash &h);
-
- /* This class maps an unsigned x,y coordinate pair to a hash value.
- * To hash values more interesting than unsigned, use the GeoHashConverter,
- * which maps doubles to unsigned values.
+class GeoHash;
+class Box;
+struct Point;
+std::ostream& operator<<(std::ostream& s, const GeoHash& h);
+
+/* This class maps an unsigned x,y coordinate pair to a hash value.
+ * To hash values more interesting than unsigned, use the GeoHashConverter,
+ * which maps doubles to unsigned values.
+ */
+class GeoHash {
+public:
+ static unsigned int const kMaxBits; // = 32;
+
+ GeoHash();
+ // The strings are binary values of length <= 64,
+ // examples: 1001010100101, 1
+ explicit GeoHash(const std::string& hash);
+ explicit GeoHash(const char* s);
+ // bits is how many bits are used to hash each of x and y.
+ GeoHash(unsigned x, unsigned y, unsigned bits = 32);
+ GeoHash(const GeoHash& old);
+ // hash is a raw hash value. we just copy these into our private fields.
+ GeoHash(long long hash, unsigned bits);
+ // This only works if e is BinData. To get a GeoHash from other BSONElements,
+ // use the converter class.
+ explicit GeoHash(const BSONElement& e, unsigned bits = 32);
+
+ // Convert from the hashed value to unsigned.
+ void unhash(unsigned* x, unsigned* y) const;
+
+ /** Is the 'bit'-th most significant bit set? (NOT the least significant) */
+ static bool isBitSet(unsigned val, unsigned bit);
+
+ /** Return a GeoHash with one bit of precision lost. */
+ GeoHash up() const;
+
+ bool hasPrefix(const GeoHash& other) const;
+
+ std::string toString() const;
+ std::string toStringHex1() const;
+
+ void setBit(unsigned pos, bool value);
+ bool getBit(unsigned pos) const;
+
+ bool getBitX(unsigned pos) const;
+ bool getBitY(unsigned pos) const;
+
+ // XXX: what does this really do?
+ BSONObj wrap(const char* name = "") const;
+
+ // XXX what does this do
+ bool constrains() const;
+ bool canRefine() const;
+
+ // XXX comment better
+ bool atMinX() const;
+ bool atMinY() const;
+
+ // XXX comment better
+ bool atMaxX() const;
+ bool atMaxY() const;
+
+ // XXX: what does this do
+ void move(int x, int y);
+
+ GeoHash& operator=(const GeoHash& h);
+ bool operator==(const GeoHash& h) const;
+ bool operator!=(const GeoHash& h) const;
+ bool operator<(const GeoHash& h) const;
+ // Append the hash in s to our current hash. We expect s to be '0' or '1' or '\0',
+ // though we also treat non-'1' values as '0'.
+ GeoHash& operator+=(const char* s);
+ GeoHash operator+(const char* s) const;
+ GeoHash operator+(const std::string& s) const;
+
+ // Append the minimum range of the hash to the builder provided (inclusive)
+ void appendHashMin(BSONObjBuilder* builder, const char* fieldName) const;
+ // Append the maximum range of the hash to the builder provided (inclusive)
+ void appendHashMax(BSONObjBuilder* builder, const char* fieldName) const;
+
+ long long getHash() const;
+ unsigned getBits() const;
+
+ GeoHash commonPrefix(const GeoHash& other) const;
+
+ // If this is not a leaf cell, set children[0..3] to the four children of
+ // this cell (in traversal order) and return true. Otherwise returns false.
+ bool subdivide(GeoHash children[4]) const;
+ // Return true if the given cell is contained within this one.
+ bool contains(const GeoHash& other) const;
+ // Return the parent at given level.
+ GeoHash parent(unsigned int level) const;
+ GeoHash parent() const;
+
+ // Return the neighbors of closest vertex to this cell at the given level,
+ // by appending them to "output". Normally there are four neighbors, but
+ // the closest vertex may only have two or one neighbor if it is next to the
+ // boundary.
+ //
+ // Requires: level < this->_bits, so that we can determine which vertex is
+ // closest (in particular, level == kMaxBits is not allowed).
+ void appendVertexNeighbors(unsigned level, std::vector<GeoHash>* output) const;
+
+private:
+ // Create a hash from the provided string. Used by the std::string and char* cons.
+ void initFromString(const char* s);
+ /* Keep the upper _bits*2 bits of _hash, clear the lower bits.
+ * Maybe there's junk in there? XXX Not sure why this is done.
*/
- class GeoHash {
- public:
- static unsigned int const kMaxBits; // = 32;
-
- GeoHash();
- // The strings are binary values of length <= 64,
- // examples: 1001010100101, 1
- explicit GeoHash(const std::string& hash);
- explicit GeoHash(const char *s);
- // bits is how many bits are used to hash each of x and y.
- GeoHash(unsigned x, unsigned y, unsigned bits = 32);
- GeoHash(const GeoHash& old);
- // hash is a raw hash value. we just copy these into our private fields.
- GeoHash(long long hash, unsigned bits);
- // This only works if e is BinData. To get a GeoHash from other BSONElements,
- // use the converter class.
- explicit GeoHash(const BSONElement& e, unsigned bits = 32);
-
- // Convert from the hashed value to unsigned.
- void unhash(unsigned *x, unsigned *y) const;
-
- /** Is the 'bit'-th most significant bit set? (NOT the least significant) */
- static bool isBitSet(unsigned val, unsigned bit);
-
- /** Return a GeoHash with one bit of precision lost. */
- GeoHash up() const;
-
- bool hasPrefix(const GeoHash& other) const;
-
- std::string toString() const;
- std::string toStringHex1() const;
-
- void setBit(unsigned pos, bool value);
- bool getBit(unsigned pos) const;
-
- bool getBitX(unsigned pos) const;
- bool getBitY(unsigned pos) const;
-
- // XXX: what does this really do?
- BSONObj wrap(const char* name = "") const;
-
- // XXX what does this do
- bool constrains() const;
- bool canRefine() const;
-
- // XXX comment better
- bool atMinX() const;
- bool atMinY() const;
-
- // XXX comment better
- bool atMaxX() const;
- bool atMaxY() const;
-
- // XXX: what does this do
- void move(int x, int y);
-
- GeoHash& operator=(const GeoHash& h);
- bool operator==(const GeoHash& h) const;
- bool operator!=(const GeoHash& h) const;
- bool operator<(const GeoHash& h) const;
- // Append the hash in s to our current hash. We expect s to be '0' or '1' or '\0',
- // though we also treat non-'1' values as '0'.
- GeoHash& operator+=(const char* s);
- GeoHash operator+(const char *s) const;
- GeoHash operator+(const std::string& s) const;
-
- // Append the minimum range of the hash to the builder provided (inclusive)
- void appendHashMin(BSONObjBuilder* builder, const char* fieldName) const;
- // Append the maximum range of the hash to the builder provided (inclusive)
- void appendHashMax(BSONObjBuilder* builder, const char* fieldName) const;
-
- long long getHash() const;
- unsigned getBits() const;
-
- GeoHash commonPrefix(const GeoHash& other) const;
-
- // If this is not a leaf cell, set children[0..3] to the four children of
- // this cell (in traversal order) and return true. Otherwise returns false.
- bool subdivide(GeoHash children[4]) const;
- // Return true if the given cell is contained within this one.
- bool contains(const GeoHash& other) const;
- // Return the parent at given level.
- GeoHash parent(unsigned int level) const;
- GeoHash parent() const;
-
- // Return the neighbors of closest vertex to this cell at the given level,
- // by appending them to "output". Normally there are four neighbors, but
- // the closest vertex may only have two or one neighbor if it is next to the
- // boundary.
- //
- // Requires: level < this->_bits, so that we can determine which vertex is
- // closest (in particular, level == kMaxBits is not allowed).
- void appendVertexNeighbors(unsigned level, std::vector<GeoHash>* output) const;
-
- private:
-
- // Create a hash from the provided string. Used by the std::string and char* cons.
- void initFromString(const char *s);
- /* Keep the upper _bits*2 bits of _hash, clear the lower bits.
- * Maybe there's junk in there? XXX Not sure why this is done.
- */
- void clearUnusedBits();
- // XXX: what does this do
- void _move(unsigned offset, int d);
- // XXX: this is nasty and has no example
- void unhash_fast(unsigned *x, unsigned *y) const;
- void unhash_slow(unsigned *x, unsigned *y) const;
-
- long long _hash;
- // Bits per field. Our hash is 64 bits, and we have an X and a Y field,
- // so this is 1 to 32.
- unsigned _bits;
+ void clearUnusedBits();
+ // XXX: what does this do
+ void _move(unsigned offset, int d);
+ // XXX: this is nasty and has no example
+ void unhash_fast(unsigned* x, unsigned* y) const;
+ void unhash_slow(unsigned* x, unsigned* y) const;
+
+ long long _hash;
+ // Bits per field. Our hash is 64 bits, and we have an X and a Y field,
+ // so this is 1 to 32.
+ unsigned _bits;
+};
+
+/* Convert between various types and the GeoHash. We need additional information (scaling etc.)
+ * to convert to/from GeoHash. The additional information doesn't change often and is the same
+ * for all conversions, so we stick all the conversion methods here with their associated
+ * data.
+ */
+class GeoHashConverter {
+public:
+ static double const kMachinePrecision; // = 1.1e-16
+
+ struct Parameters {
+ // How many bits to use for the hash?
+ int bits;
+ // X/Y values must be [min, max]
+ double min;
+ double max;
+ // Values are scaled by this when converted to/from hash scale.
+ double scaling;
};
- /* Convert between various types and the GeoHash. We need additional information (scaling etc.)
- * to convert to/from GeoHash. The additional information doesn't change often and is the same
- * for all conversions, so we stick all the conversion methods here with their associated
- * data.
+ GeoHashConverter(const Parameters& params);
+
+ /**
+ * Returns hashing parameters parsed from a BSONObj
*/
- class GeoHashConverter {
- public:
- static double const kMachinePrecision; // = 1.1e-16
-
- struct Parameters {
- // How many bits to use for the hash?
- int bits;
- // X/Y values must be [min, max]
- double min;
- double max;
- // Values are scaled by this when converted to/from hash scale.
- double scaling;
- };
-
- GeoHashConverter(const Parameters &params);
-
- /**
- * Returns hashing parameters parsed from a BSONObj
- */
- static Status parseParameters(const BSONObj& paramDoc, Parameters* params);
-
- static double calcUnhashToBoxError(const GeoHashConverter::Parameters& params);
-
- /**
- * Return converter parameterss which can be used to
- * construct an copy of this converter.
- */
- const Parameters& getParams() const { return _params; }
-
- int getBits() const { return _params.bits; }
- double getError() const { return _error; }
- double getErrorSphere() const { return _errorSphere ;}
- double getMin() const { return _params.min; }
- double getMax() const { return _params.max; }
-
- double distanceBetweenHashes(const GeoHash& a, const GeoHash& b) const;
-
- /**
- * Hashing functions. Convert the following types to a GeoHash:
- * BSONElement
- * BSONObj
- * Point
- * double, double
- */
- GeoHash hash(const Point &p) const;
- GeoHash hash(const BSONElement& e) const;
- GeoHash hash(const BSONObj& o) const;
- // src is printed out as debugging information. I'm not sure if it's actually
- // somehow the 'source' of o? Anyway, this is nasty, very nasty. XXX
- GeoHash hash(const BSONObj& o, const BSONObj* src) const;
- GeoHash hash(double x, double y) const;
-
- /** Unhashing functions.
- * Convert from a hash to the following types:
- * double, double
- * Point
- * Box
- * BSONObj
- */
- // XXX: these should have consistent naming
- Point unhashToPoint(const GeoHash &h) const;
- Point unhashToPoint(const BSONElement &e) const;
- BSONObj unhashToBSONObj(const GeoHash& h) const;
- void unhash(const GeoHash &h, double *x, double *y) const;
-
- /**
- * Generates bounding box from geohash, expanded by the error bound
- */
- Box unhashToBoxCovering(const GeoHash &h) const;
-
- double sizeOfDiag(const GeoHash& a) const;
-
- // Return the sizeEdge of a cell at a given level.
- double sizeEdge(unsigned level) const;
-
- // Used by test.
- double convertDoubleFromHashScale(double in) const;
- double convertToDoubleHashScale(double in) const;
- private:
-
- void init();
-
- // Convert from an unsigned in [0, (max-min)*scaling] to [min, max]
- double convertFromHashScale(unsigned in) const;
-
- // Convert from a double that is [min, max] to an unsigned in [0, (max-min)*scaling]
- unsigned convertToHashScale(double in) const;
-
- Parameters _params;
- // We compute these based on the _params:
- double _error;
- double _errorSphere;
-
- // Error bound of unhashToBox, see hash_test.cpp for its proof.
- // 8 * max(|max|, |min|) * u
- double _errorUnhashToBox;
- };
+ static Status parseParameters(const BSONObj& paramDoc, Parameters* params);
+
+ static double calcUnhashToBoxError(const GeoHashConverter::Parameters& params);
+
+ /**
+ * Return converter parameterss which can be used to
+ * construct an copy of this converter.
+ */
+ const Parameters& getParams() const {
+ return _params;
+ }
+
+ int getBits() const {
+ return _params.bits;
+ }
+ double getError() const {
+ return _error;
+ }
+ double getErrorSphere() const {
+ return _errorSphere;
+ }
+ double getMin() const {
+ return _params.min;
+ }
+ double getMax() const {
+ return _params.max;
+ }
+
+ double distanceBetweenHashes(const GeoHash& a, const GeoHash& b) const;
+
+ /**
+ * Hashing functions. Convert the following types to a GeoHash:
+ * BSONElement
+ * BSONObj
+ * Point
+ * double, double
+ */
+ GeoHash hash(const Point& p) const;
+ GeoHash hash(const BSONElement& e) const;
+ GeoHash hash(const BSONObj& o) const;
+ // src is printed out as debugging information. I'm not sure if it's actually
+ // somehow the 'source' of o? Anyway, this is nasty, very nasty. XXX
+ GeoHash hash(const BSONObj& o, const BSONObj* src) const;
+ GeoHash hash(double x, double y) const;
+
+ /** Unhashing functions.
+ * Convert from a hash to the following types:
+ * double, double
+ * Point
+ * Box
+ * BSONObj
+ */
+ // XXX: these should have consistent naming
+ Point unhashToPoint(const GeoHash& h) const;
+ Point unhashToPoint(const BSONElement& e) const;
+ BSONObj unhashToBSONObj(const GeoHash& h) const;
+ void unhash(const GeoHash& h, double* x, double* y) const;
+
+ /**
+ * Generates bounding box from geohash, expanded by the error bound
+ */
+ Box unhashToBoxCovering(const GeoHash& h) const;
+
+ double sizeOfDiag(const GeoHash& a) const;
+
+ // Return the sizeEdge of a cell at a given level.
+ double sizeEdge(unsigned level) const;
+
+ // Used by test.
+ double convertDoubleFromHashScale(double in) const;
+ double convertToDoubleHashScale(double in) const;
+
+private:
+ void init();
+
+ // Convert from an unsigned in [0, (max-min)*scaling] to [min, max]
+ double convertFromHashScale(unsigned in) const;
+
+ // Convert from a double that is [min, max] to an unsigned in [0, (max-min)*scaling]
+ unsigned convertToHashScale(double in) const;
+
+ Parameters _params;
+ // We compute these based on the _params:
+ double _error;
+ double _errorSphere;
+
+ // Error bound of unhashToBox, see hash_test.cpp for its proof.
+ // 8 * max(|max|, |min|) * u
+ double _errorUnhashToBox;
+};
} // namespace mongo
diff --git a/src/mongo/db/geo/hash_test.cpp b/src/mongo/db/geo/hash_test.cpp
index 6b64e11ebe0..725c0254cc0 100644
--- a/src/mongo/db/geo/hash_test.cpp
+++ b/src/mongo/db/geo/hash_test.cpp
@@ -34,7 +34,7 @@
#include <sstream>
#include <iomanip>
#include <cmath>
-#include <algorithm> // For max()
+#include <algorithm> // For max()
#include "mongo/db/geo/hash.h"
#include "mongo/db/geo/shapes.h"
@@ -48,416 +48,418 @@ using std::string;
using std::stringstream;
namespace {
- TEST(GeoHash, MakeZeroHash) {
- unsigned x = 0, y = 0;
- GeoHash hash(x, y);
- }
+TEST(GeoHash, MakeZeroHash) {
+ unsigned x = 0, y = 0;
+ GeoHash hash(x, y);
+}
- static string makeRandomBitString(int length) {
- stringstream ss;
- mongo::PseudoRandom random(31337);
- for (int i = 0; i < length; ++i) {
- if (random.nextInt32() & 1) {
- ss << "1";
- } else {
- ss << "0";
- }
+static string makeRandomBitString(int length) {
+ stringstream ss;
+ mongo::PseudoRandom random(31337);
+ for (int i = 0; i < length; ++i) {
+ if (random.nextInt32() & 1) {
+ ss << "1";
+ } else {
+ ss << "0";
}
- return ss.str();
}
+ return ss.str();
+}
- TEST(GeoHash, MakeRandomValidHashes) {
- int maxStringLength = 64;
- for (int i = 0; i < maxStringLength; i += 2) {
- string a = makeRandomBitString(i);
- GeoHash hashA = GeoHash(a);
- (void)hashA.isBitSet(i, 0);
- (void)hashA.isBitSet(i, 1);
- }
+TEST(GeoHash, MakeRandomValidHashes) {
+ int maxStringLength = 64;
+ for (int i = 0; i < maxStringLength; i += 2) {
+ string a = makeRandomBitString(i);
+ GeoHash hashA = GeoHash(a);
+ (void)hashA.isBitSet(i, 0);
+ (void)hashA.isBitSet(i, 1);
}
+}
- // ASSERT_THROWS does not work if we try to put GeoHash(a) in the macro.
- static GeoHash makeHash(const string& a) { return GeoHash(a); }
+// ASSERT_THROWS does not work if we try to put GeoHash(a) in the macro.
+static GeoHash makeHash(const string& a) {
+ return GeoHash(a);
+}
- TEST(GeoHash, MakeTooLongHash) {
- string a = makeRandomBitString(100);
- ASSERT_THROWS(makeHash(a), mongo::UserException);
- }
+TEST(GeoHash, MakeTooLongHash) {
+ string a = makeRandomBitString(100);
+ ASSERT_THROWS(makeHash(a), mongo::UserException);
+}
- TEST(GeoHash, MakeOddHash) {
- string a = makeRandomBitString(13);
- ASSERT_THROWS(makeHash(a), mongo::UserException);
- }
+TEST(GeoHash, MakeOddHash) {
+ string a = makeRandomBitString(13);
+ ASSERT_THROWS(makeHash(a), mongo::UserException);
+}
+
+TEST(GeoHashConvertor, EdgeLength) {
+ const double kError = 10E-15;
+ GeoHashConverter::Parameters params;
+ params.max = 200.0;
+ params.min = 100.0;
+ params.bits = 32;
+ double numBuckets = (1024 * 1024 * 1024 * 4.0);
+ params.scaling = numBuckets / (params.max - params.min);
- TEST(GeoHashConvertor, EdgeLength) {
- const double kError = 10E-15;
- GeoHashConverter::Parameters params;
- params.max = 200.0;
- params.min = 100.0;
+ GeoHashConverter converter(params);
+
+ ASSERT_APPROX_EQUAL(100.0, converter.sizeEdge(0), kError);
+ ASSERT_APPROX_EQUAL(50.0, converter.sizeEdge(1), kError);
+ ASSERT_APPROX_EQUAL(25.0, converter.sizeEdge(2), kError);
+}
+
+/**
+ * ==========================
+ * Error Bound of UnhashToBox
+ * ==========================
+ *
+ * Compute the absolute error when unhashing a GeoHash to a box, so that expanding
+ * the box by this absolute error can guarantee a point is always contained by the box
+ * of its GeoHash. Thus, the absolute error of box should consist of 3 components:
+ *
+ * 1) The error introduced by hashing x to GeoHash. The extreme example would be a point
+ * close to the boundary of a cell is hashed to an adjacent box.
+ *
+ * For a hash/unhash functions h(x)/uh(x) and computed functions h'(x),uh'(x):
+ *
+ * x uh(h'(x))
+ * |--------|----|--------------------> min-max scale
+ * min \
+ * \
+ * \
+ * \
+ * |--------|--|-|--------------------> hash scale for cells c
+ * 0 h(x) c h'(x)
+ *
+ * 2) The error introduced by unhashing an (int) GeoHash to its lower left corner in x-y
+ * space.
+ *
+ * uh(c)
+ * x | uh'(c)
+ * |--------|--|----|-----------------> min-max scale
+ * min \ /
+ * \ /
+ * \ /
+ * X
+ * |--------|--|-|--------------------> hash scale for cells c
+ * 0 h(x) c h'(x)
+ *
+ * 3) The error introduced by adding the edge length to get the top-right corner of box.
+ * Instead of directly computing uh'(c+1), we add the computed box edge length to the computed
+ * value uh(c), giving us an extra error.
+ *
+ * |edge(min,max)|
+ * | |
+ * | uh(c)+edge
+ * uh(c) |
+ * |-------------|------[uh(c)+edge']-----------> min-max scale
+ * min
+ *
+ * |-------------|-------------|----------------> hash scale
+ * 0 c c+1
+ * Hash and unhash definitions
+ * -------------------------
+ * h(x) = (x - min) * scaling = 2^32 * (x - min) / (max - min)
+ * uh(h) = h / scaling + min,
+ * where
+ * scaling = 2^32 / (max - min)
+ *
+ * Again, h(x)/uh(x) are the exact hash functions and h'(x)/uh'(x) are the computational hash
+ * functions which have small rounding errors.
+ *
+ * | h'(x) - h(x) | == | delta_h(x; max, min) |
+ * where delta_fn = the absolute difference between the computed and actual value of a
+ * function.
+ *
+ * Restating the problem, we're looking for:
+ * |delta_box| = | delta_x_{h'(x)=H} + delta_uh(h) + delta_edge_length |
+ * <= | delta_x_{h'(x)=H} | + | delta_uh(h) | + | delta_edge_length |
+ *
+ * 1. Error bounds calculation
+ * ---------------------------
+ *
+ * 1.1 Error: | delta_x_{h'(x)=H} |
+ * --------------------------------
+ * The first error | delta_x_{h'(x)=H} | means, given GeoHash H, we can find
+ * the range of x and only the range of x that may be mapped to H.
+ * In other words, given H, for any x that is far enough from uh(H) by at least d,
+ * it is impossible for x to be mapped to H.
+ * Mathematical, find d, such that for any x satisfying |x - uh(H)| > d,
+ * |h(x) - H| >= | delta_h(x) |
+ * => |h(x) - H| - | delta_h(x) | >= 0
+ * => |h(x) - H + delta_h(x) | >= 0 (|a + b| >= |a| - |b|)
+ * => |h'(x) - H| >= 0 (h'(x) = h(x) + delta_h(x))
+ * which guarantees h'(x) != H.
+ *
+ *
+ * uh(H)-d
+ * |
+ * x | uh(H)
+ * |--------|---[----|----]-----------> min-max scale
+ * min / \ \ /
+ * / \ \ /
+ * / \ \ /
+ * / \ \ /
+ * |---[----|--|-]---|----------------> hash scale for cells c
+ * 0 h(x) | H
+ * h'(x)
+ * =h(x)+delta_h(x)
+ *
+ *
+ * Let's consider one case of the above inequality. We need to find the d,
+ * such that, when
+ * x < uh(H) - d, (1)
+ * we have
+ * h(x) + |delta_h(x)| <= H. (2)
+ *
+ * Due to the monotonicity of h(x), apply h(x) to both side of inequality (1),
+ * we have
+ * h(x) < h(uh(H) - d) <= H - |delta_h(x)| (from (2))
+ *
+ * By solving it, we have
+ * d = |delta_h(x)| / scaling
+ * <= 2Mu * (1 + |x-min|/|max-min|) (see calculation for |delta_h(x)| below)
+ * <= 4Mu
+ *
+ * | delta_x_{h'(x)=H} | <= d <= 4Mu
+ * The similar calculation applies for the other side of the above inequality.
+ *
+ * 1.2 Error of h(x)
+ * -----------------
+ *
+ * Rules of error propagation
+ * --------------------------
+ * Absolute error of x is |delta_x|
+ * Relative error of x is epsilon_x = |delta_x| / |x|
+ * For any double number x, the relative error of x is bounded by "u". We assume all inputs
+ * have this error to make deduction clear.
+ * epsilon_x <= u = 0.5 * unit of least precision(ULP) ~= 1.1 * 10E-16
+ *
+ * |delta_(x + y)| <= |delta_x| + |delta_y|
+ * |delta_(x - y)| <= |delta_x| + |delta_y|
+ * epsilon_(x * y) <= epsilon_x + epsilon_y
+ * epsilon_(x / y) <= epsilon_x + epsilon_y
+ *
+ * For a given min, max scale, the maximum delta in a computation is bounded by the maximum
+ * value in the scale - M * u = max(|max|, |min|) * u.
+ *
+ * For the hash function h(x)
+ * --------------------------
+ *
+ * epsilon_h(x) = epsilon_(x-min) + epsilon_scaling
+ *
+ * epsilon_(x-min) = (|delta_x| + |delta_min|) / |x - min|
+ * <= 2Mu / |x - min|
+ *
+ * epsilon_scaling = epsilon_(2^32) + epsilon_(max - min)
+ * = 0 + epsilon_(max - min)
+ * <= 2Mu / |max - min|
+ *
+ * Hence, epsilon_h(x) <= 2Mu * (1/|x - min| + 1/|max - min|)
+ *
+ * |delta_h(x)| = 2Mu * (1 + |x-min|/|max-min|) * 2^32 / |max - min|
+ * <= 4Mu * 2^32 / |max-min|
+ *
+ * 2. Error: unhashing GeoHash to point
+ * ------------------------------------
+ * Similarly, we can calculate the error for uh(h) function, assuming h is exactly
+ * represented in form of GeoHash, since integer is represented exactly.
+ *
+ * |delta_uh(h)| = epsilon_(h/scaling) * |h/scaling| + delta_min
+ * = epsilon_(scaling) * |h/scaling| + delta_min
+ * <= 2Mu / |max-min| * |max-min| + |min| * u
+ * <= 3Mu
+ *
+ * Thus, the second error |delta_uh(h)| <= 3Mu
+ * Totally, the absolute error we need to add to unhashing to a point <= 4Mu + 3Mu = 7Mu
+ *
+ * 3. Error: edge length
+ * ---------------------
+ * The third part is easy to compute, since ldexp() doesn't introduce extra
+ * relative error.
+ *
+ * edge_length = ldexp(max - min, -level)
+ *
+ * epsilon_edge = epsilon_(max - min) <= 2 * M * u / |max - min|
+ *
+ * | delta_edge | = epsilon_edge * (max - min) * 2^(-level)
+ * = 2Mu * 2^(-level) <= Mu (level >= 1)
+ *
+ * This error is neglectable when level >> 0.
+ *
+ * In conclusion, | delta_box | <= 8Mu
+ *
+ *
+ * Test
+ * ====
+ * This first two component errors can be simulated by uh'(h'(x)).
+ * Let h = h'(x)
+ * |delta_(uh'(h'(x)))|
+ * = epsilon_(h/scaling) * |h/scaling| + delta_min
+ * = (epsilon_(h) + epsilon_(scaling)) * |h/scaling| + delta_min
+ * = epsilon_(h) * h/scaling + epsilon_(scaling) * |h/scaling| + delta_min
+ * = |delta_h|/scaling + |delta_uh(h)|
+ * ~= |delta_box| when level = 32
+ *
+ * Another way to think about it is the error of uh'(h'(x)) also consists of
+ * the same two components that constitute the error of unhashing to a point,
+ * by substituting c with h'(x).
+ *
+ * | delta_(uh'(h'(x))) | = | x - uh'(h(x)) |
+ *
+ * uh(h'(x))
+ * |
+ * x | uh'(h(x))
+ * |--------|---|---|----------------> min-max scale
+ * min \ /
+ * \ /
+ * \ /
+ * |--------|---|--------------------> hash scale for cells c
+ * 0 h(x) h'(x)
+ *
+ *
+ * We can get the maximum of the error by making max very large and min = -min, x -> max
+ */
+TEST(GeoHashConverter, UnhashToBoxError) {
+ GeoHashConverter::Parameters params;
+ // Test max from 2^-20 to 2^20
+ for (int times = -20; times <= 20; times += 2) {
+ // Construct parameters
+ params.max = ldexp(1 + 0.01 * times, times);
+ params.min = -params.max;
params.bits = 32;
double numBuckets = (1024 * 1024 * 1024 * 4.0);
params.scaling = numBuckets / (params.max - params.min);
GeoHashConverter converter(params);
+ // Assume level == 32, so we ignore the error of edge length here.
+ double delta_box = 7.0 / 8.0 * GeoHashConverter::calcUnhashToBoxError(params);
+ double cellEdge = 1 / params.scaling;
+ double x;
- ASSERT_APPROX_EQUAL(100.0, converter.sizeEdge(0), kError);
- ASSERT_APPROX_EQUAL(50.0, converter.sizeEdge(1), kError);
- ASSERT_APPROX_EQUAL(25.0, converter.sizeEdge(2), kError);
- }
-
- /**
- * ==========================
- * Error Bound of UnhashToBox
- * ==========================
- *
- * Compute the absolute error when unhashing a GeoHash to a box, so that expanding
- * the box by this absolute error can guarantee a point is always contained by the box
- * of its GeoHash. Thus, the absolute error of box should consist of 3 components:
- *
- * 1) The error introduced by hashing x to GeoHash. The extreme example would be a point
- * close to the boundary of a cell is hashed to an adjacent box.
- *
- * For a hash/unhash functions h(x)/uh(x) and computed functions h'(x),uh'(x):
- *
- * x uh(h'(x))
- * |--------|----|--------------------> min-max scale
- * min \
- * \
- * \
- * \
- * |--------|--|-|--------------------> hash scale for cells c
- * 0 h(x) c h'(x)
- *
- * 2) The error introduced by unhashing an (int) GeoHash to its lower left corner in x-y
- * space.
- *
- * uh(c)
- * x | uh'(c)
- * |--------|--|----|-----------------> min-max scale
- * min \ /
- * \ /
- * \ /
- * X
- * |--------|--|-|--------------------> hash scale for cells c
- * 0 h(x) c h'(x)
- *
- * 3) The error introduced by adding the edge length to get the top-right corner of box.
- * Instead of directly computing uh'(c+1), we add the computed box edge length to the computed
- * value uh(c), giving us an extra error.
- *
- * |edge(min,max)|
- * | |
- * | uh(c)+edge
- * uh(c) |
- * |-------------|------[uh(c)+edge']-----------> min-max scale
- * min
- *
- * |-------------|-------------|----------------> hash scale
- * 0 c c+1
- * Hash and unhash definitions
- * -------------------------
- * h(x) = (x - min) * scaling = 2^32 * (x - min) / (max - min)
- * uh(h) = h / scaling + min,
- * where
- * scaling = 2^32 / (max - min)
- *
- * Again, h(x)/uh(x) are the exact hash functions and h'(x)/uh'(x) are the computational hash
- * functions which have small rounding errors.
- *
- * | h'(x) - h(x) | == | delta_h(x; max, min) |
- * where delta_fn = the absolute difference between the computed and actual value of a
- * function.
- *
- * Restating the problem, we're looking for:
- * |delta_box| = | delta_x_{h'(x)=H} + delta_uh(h) + delta_edge_length |
- * <= | delta_x_{h'(x)=H} | + | delta_uh(h) | + | delta_edge_length |
- *
- * 1. Error bounds calculation
- * ---------------------------
- *
- * 1.1 Error: | delta_x_{h'(x)=H} |
- * --------------------------------
- * The first error | delta_x_{h'(x)=H} | means, given GeoHash H, we can find
- * the range of x and only the range of x that may be mapped to H.
- * In other words, given H, for any x that is far enough from uh(H) by at least d,
- * it is impossible for x to be mapped to H.
- * Mathematical, find d, such that for any x satisfying |x - uh(H)| > d,
- * |h(x) - H| >= | delta_h(x) |
- * => |h(x) - H| - | delta_h(x) | >= 0
- * => |h(x) - H + delta_h(x) | >= 0 (|a + b| >= |a| - |b|)
- * => |h'(x) - H| >= 0 (h'(x) = h(x) + delta_h(x))
- * which guarantees h'(x) != H.
- *
- *
- * uh(H)-d
- * |
- * x | uh(H)
- * |--------|---[----|----]-----------> min-max scale
- * min / \ \ /
- * / \ \ /
- * / \ \ /
- * / \ \ /
- * |---[----|--|-]---|----------------> hash scale for cells c
- * 0 h(x) | H
- * h'(x)
- * =h(x)+delta_h(x)
- *
- *
- * Let's consider one case of the above inequality. We need to find the d,
- * such that, when
- * x < uh(H) - d, (1)
- * we have
- * h(x) + |delta_h(x)| <= H. (2)
- *
- * Due to the monotonicity of h(x), apply h(x) to both side of inequality (1),
- * we have
- * h(x) < h(uh(H) - d) <= H - |delta_h(x)| (from (2))
- *
- * By solving it, we have
- * d = |delta_h(x)| / scaling
- * <= 2Mu * (1 + |x-min|/|max-min|) (see calculation for |delta_h(x)| below)
- * <= 4Mu
- *
- * | delta_x_{h'(x)=H} | <= d <= 4Mu
- * The similar calculation applies for the other side of the above inequality.
- *
- * 1.2 Error of h(x)
- * -----------------
- *
- * Rules of error propagation
- * --------------------------
- * Absolute error of x is |delta_x|
- * Relative error of x is epsilon_x = |delta_x| / |x|
- * For any double number x, the relative error of x is bounded by "u". We assume all inputs
- * have this error to make deduction clear.
- * epsilon_x <= u = 0.5 * unit of least precision(ULP) ~= 1.1 * 10E-16
- *
- * |delta_(x + y)| <= |delta_x| + |delta_y|
- * |delta_(x - y)| <= |delta_x| + |delta_y|
- * epsilon_(x * y) <= epsilon_x + epsilon_y
- * epsilon_(x / y) <= epsilon_x + epsilon_y
- *
- * For a given min, max scale, the maximum delta in a computation is bounded by the maximum
- * value in the scale - M * u = max(|max|, |min|) * u.
- *
- * For the hash function h(x)
- * --------------------------
- *
- * epsilon_h(x) = epsilon_(x-min) + epsilon_scaling
- *
- * epsilon_(x-min) = (|delta_x| + |delta_min|) / |x - min|
- * <= 2Mu / |x - min|
- *
- * epsilon_scaling = epsilon_(2^32) + epsilon_(max - min)
- * = 0 + epsilon_(max - min)
- * <= 2Mu / |max - min|
- *
- * Hence, epsilon_h(x) <= 2Mu * (1/|x - min| + 1/|max - min|)
- *
- * |delta_h(x)| = 2Mu * (1 + |x-min|/|max-min|) * 2^32 / |max - min|
- * <= 4Mu * 2^32 / |max-min|
- *
- * 2. Error: unhashing GeoHash to point
- * ------------------------------------
- * Similarly, we can calculate the error for uh(h) function, assuming h is exactly
- * represented in form of GeoHash, since integer is represented exactly.
- *
- * |delta_uh(h)| = epsilon_(h/scaling) * |h/scaling| + delta_min
- * = epsilon_(scaling) * |h/scaling| + delta_min
- * <= 2Mu / |max-min| * |max-min| + |min| * u
- * <= 3Mu
- *
- * Thus, the second error |delta_uh(h)| <= 3Mu
- * Totally, the absolute error we need to add to unhashing to a point <= 4Mu + 3Mu = 7Mu
- *
- * 3. Error: edge length
- * ---------------------
- * The third part is easy to compute, since ldexp() doesn't introduce extra
- * relative error.
- *
- * edge_length = ldexp(max - min, -level)
- *
- * epsilon_edge = epsilon_(max - min) <= 2 * M * u / |max - min|
- *
- * | delta_edge | = epsilon_edge * (max - min) * 2^(-level)
- * = 2Mu * 2^(-level) <= Mu (level >= 1)
- *
- * This error is neglectable when level >> 0.
- *
- * In conclusion, | delta_box | <= 8Mu
- *
- *
- * Test
- * ====
- * This first two component errors can be simulated by uh'(h'(x)).
- * Let h = h'(x)
- * |delta_(uh'(h'(x)))|
- * = epsilon_(h/scaling) * |h/scaling| + delta_min
- * = (epsilon_(h) + epsilon_(scaling)) * |h/scaling| + delta_min
- * = epsilon_(h) * h/scaling + epsilon_(scaling) * |h/scaling| + delta_min
- * = |delta_h|/scaling + |delta_uh(h)|
- * ~= |delta_box| when level = 32
- *
- * Another way to think about it is the error of uh'(h'(x)) also consists of
- * the same two components that constitute the error of unhashing to a point,
- * by substituting c with h'(x).
- *
- * | delta_(uh'(h'(x))) | = | x - uh'(h(x)) |
- *
- * uh(h'(x))
- * |
- * x | uh'(h(x))
- * |--------|---|---|----------------> min-max scale
- * min \ /
- * \ /
- * \ /
- * |--------|---|--------------------> hash scale for cells c
- * 0 h(x) h'(x)
- *
- *
- * We can get the maximum of the error by making max very large and min = -min, x -> max
- */
- TEST(GeoHashConverter, UnhashToBoxError) {
- GeoHashConverter::Parameters params;
- // Test max from 2^-20 to 2^20
- for (int times = -20; times <= 20; times += 2) {
- // Construct parameters
- params.max = ldexp(1 + 0.01 * times, times);
- params.min = -params.max;
- params.bits = 32;
- double numBuckets = (1024 * 1024 * 1024 * 4.0);
- params.scaling = numBuckets / (params.max - params.min);
-
- GeoHashConverter converter(params);
- // Assume level == 32, so we ignore the error of edge length here.
- double delta_box = 7.0 / 8.0 * GeoHashConverter::calcUnhashToBoxError(params);
- double cellEdge = 1 / params.scaling;
- double x;
-
- // We are not able to test all the FP numbers to verify the error bound by design,
- // so we consider the numbers in the cell near the point we are interested in.
- //
- // FP numbers starting at max, working downward in minimal increments
- x = params.max;
- while (x > params.max - cellEdge) {
- x = nextafter(x, params.min);
- double x_prime = converter.convertDoubleFromHashScale(
- converter.convertToDoubleHashScale(x));
- double delta = fabs(x - x_prime);
- ASSERT_LESS_THAN(delta, delta_box);
- }
+ // We are not able to test all the FP numbers to verify the error bound by design,
+ // so we consider the numbers in the cell near the point we are interested in.
+ //
+ // FP numbers starting at max, working downward in minimal increments
+ x = params.max;
+ while (x > params.max - cellEdge) {
+ x = nextafter(x, params.min);
+ double x_prime =
+ converter.convertDoubleFromHashScale(converter.convertToDoubleHashScale(x));
+ double delta = fabs(x - x_prime);
+ ASSERT_LESS_THAN(delta, delta_box);
+ }
- // FP numbers starting between first and second cell, working downward to min
- x = params.min + cellEdge;
- while (x > params.min) {
- x = nextafter(x, params.min);
- double x_prime = converter.convertDoubleFromHashScale(
- converter.convertToDoubleHashScale(x));
- double delta = fabs(x - x_prime);
- ASSERT_LESS_THAN(delta, delta_box);
- }
+ // FP numbers starting between first and second cell, working downward to min
+ x = params.min + cellEdge;
+ while (x > params.min) {
+ x = nextafter(x, params.min);
+ double x_prime =
+ converter.convertDoubleFromHashScale(converter.convertToDoubleHashScale(x));
+ double delta = fabs(x - x_prime);
+ ASSERT_LESS_THAN(delta, delta_box);
}
}
+}
- // SERVER-15576 Verify a point is contained by its GeoHash box.
- TEST(GeoHashConverter, GeoHashBox) {
- GeoHashConverter::Parameters params;
- params.max = 100000000.3;
- params.min = -params.max;
- params.bits = 32;
- double numBuckets = (1024 * 1024 * 1024 * 4.0);
- params.scaling = numBuckets / (params.max - params.min);
+// SERVER-15576 Verify a point is contained by its GeoHash box.
+TEST(GeoHashConverter, GeoHashBox) {
+ GeoHashConverter::Parameters params;
+ params.max = 100000000.3;
+ params.min = -params.max;
+ params.bits = 32;
+ double numBuckets = (1024 * 1024 * 1024 * 4.0);
+ params.scaling = numBuckets / (params.max - params.min);
- GeoHashConverter converter(params);
+ GeoHashConverter converter(params);
- // Without expanding the box, the following point is not contained by its GeoHash box.
- mongo::Point p(-7201198.6497758823, -0.1);
- mongo::GeoHash hash = converter.hash(p);
- mongo::Box box = converter.unhashToBoxCovering(hash);
- ASSERT(box.inside(p));
- }
+ // Without expanding the box, the following point is not contained by its GeoHash box.
+ mongo::Point p(-7201198.6497758823, -0.1);
+ mongo::GeoHash hash = converter.hash(p);
+ mongo::Box box = converter.unhashToBoxCovering(hash);
+ ASSERT(box.inside(p));
+}
- TEST(GeoHash, NeighborsBasic) {
- vector<GeoHash> neighbors;
+TEST(GeoHash, NeighborsBasic) {
+ vector<GeoHash> neighbors;
- // Top level
- GeoHash hashAtLevel3("100001");
- hashAtLevel3.appendVertexNeighbors(0u, &neighbors);
- ASSERT_EQUALS(neighbors.size(), (size_t)1);
- ASSERT_EQUALS(neighbors.front(), GeoHash(""));
+ // Top level
+ GeoHash hashAtLevel3("100001");
+ hashAtLevel3.appendVertexNeighbors(0u, &neighbors);
+ ASSERT_EQUALS(neighbors.size(), (size_t)1);
+ ASSERT_EQUALS(neighbors.front(), GeoHash(""));
- // Level 1
- neighbors.clear();
- hashAtLevel3.appendVertexNeighbors(1u, &neighbors);
- ASSERT_EQUALS(neighbors.size(), (size_t)2);
- std::sort(neighbors.begin(), neighbors.end());
- ASSERT_EQUALS(neighbors[0], GeoHash("00"));
- ASSERT_EQUALS(neighbors[1], GeoHash("10"));
+ // Level 1
+ neighbors.clear();
+ hashAtLevel3.appendVertexNeighbors(1u, &neighbors);
+ ASSERT_EQUALS(neighbors.size(), (size_t)2);
+ std::sort(neighbors.begin(), neighbors.end());
+ ASSERT_EQUALS(neighbors[0], GeoHash("00"));
+ ASSERT_EQUALS(neighbors[1], GeoHash("10"));
- // Level 2
- neighbors.clear();
- hashAtLevel3.appendVertexNeighbors(2u, &neighbors);
- ASSERT_EQUALS(neighbors.size(), (size_t)4);
- std::sort(neighbors.begin(), neighbors.end());
- ASSERT_EQUALS(neighbors[0], GeoHash("0010"));
- ASSERT_EQUALS(neighbors[1], GeoHash("0011"));
- ASSERT_EQUALS(neighbors[2], GeoHash("1000"));
- ASSERT_EQUALS(neighbors[3], GeoHash("1001"));
- }
+ // Level 2
+ neighbors.clear();
+ hashAtLevel3.appendVertexNeighbors(2u, &neighbors);
+ ASSERT_EQUALS(neighbors.size(), (size_t)4);
+ std::sort(neighbors.begin(), neighbors.end());
+ ASSERT_EQUALS(neighbors[0], GeoHash("0010"));
+ ASSERT_EQUALS(neighbors[1], GeoHash("0011"));
+ ASSERT_EQUALS(neighbors[2], GeoHash("1000"));
+ ASSERT_EQUALS(neighbors[3], GeoHash("1001"));
+}
- TEST(GeoHash, NeighborsAtFinestLevel) {
- std::vector<GeoHash> neighbors;
+TEST(GeoHash, NeighborsAtFinestLevel) {
+ std::vector<GeoHash> neighbors;
- std::string zeroBase = "00000000000000000000000000000000000000000000000000000000";
- // At finest level
- GeoHash cellHash(zeroBase + "00011110");
- neighbors.clear();
- cellHash.appendVertexNeighbors(31u, &neighbors);
- ASSERT_EQUALS(neighbors.size(), (size_t)4);
- std::sort(neighbors.begin(), neighbors.end());
- ASSERT_EQUALS(neighbors[0], GeoHash(zeroBase + "000110"));
- ASSERT_EQUALS(neighbors[1], GeoHash(zeroBase + "000111"));
- ASSERT_EQUALS(neighbors[2], GeoHash(zeroBase + "001100"));
- ASSERT_EQUALS(neighbors[3], GeoHash(zeroBase + "001101"));
+ std::string zeroBase = "00000000000000000000000000000000000000000000000000000000";
+ // At finest level
+ GeoHash cellHash(zeroBase + "00011110");
+ neighbors.clear();
+ cellHash.appendVertexNeighbors(31u, &neighbors);
+ ASSERT_EQUALS(neighbors.size(), (size_t)4);
+ std::sort(neighbors.begin(), neighbors.end());
+ ASSERT_EQUALS(neighbors[0], GeoHash(zeroBase + "000110"));
+ ASSERT_EQUALS(neighbors[1], GeoHash(zeroBase + "000111"));
+ ASSERT_EQUALS(neighbors[2], GeoHash(zeroBase + "001100"));
+ ASSERT_EQUALS(neighbors[3], GeoHash(zeroBase + "001101"));
- // Level 30
- neighbors.clear();
- cellHash.appendVertexNeighbors(30u, &neighbors);
- ASSERT_EQUALS(neighbors.size(), (size_t)4);
- std::sort(neighbors.begin(), neighbors.end());
- ASSERT_EQUALS(neighbors[0], GeoHash(zeroBase + "0001"));
- ASSERT_EQUALS(neighbors[1], GeoHash(zeroBase + "0011"));
- ASSERT_EQUALS(neighbors[2], GeoHash(zeroBase + "0100"));
- ASSERT_EQUALS(neighbors[3], GeoHash(zeroBase + "0110"));
+ // Level 30
+ neighbors.clear();
+ cellHash.appendVertexNeighbors(30u, &neighbors);
+ ASSERT_EQUALS(neighbors.size(), (size_t)4);
+ std::sort(neighbors.begin(), neighbors.end());
+ ASSERT_EQUALS(neighbors[0], GeoHash(zeroBase + "0001"));
+ ASSERT_EQUALS(neighbors[1], GeoHash(zeroBase + "0011"));
+ ASSERT_EQUALS(neighbors[2], GeoHash(zeroBase + "0100"));
+ ASSERT_EQUALS(neighbors[3], GeoHash(zeroBase + "0110"));
- // Level 29, only two neighbors including the parent.
- // ^
- // |
- // +-+
- // +-+
- // +-+-------> x
- neighbors.clear();
- cellHash.appendVertexNeighbors(29u, &neighbors);
- ASSERT_EQUALS(neighbors.size(), (size_t)2);
- std::sort(neighbors.begin(), neighbors.end());
- ASSERT_EQUALS(neighbors[0], GeoHash(zeroBase + "00"));
- ASSERT_EQUALS(neighbors[1], GeoHash(zeroBase + "01"));
+ // Level 29, only two neighbors including the parent.
+ // ^
+ // |
+ // +-+
+ // +-+
+ // +-+-------> x
+ neighbors.clear();
+ cellHash.appendVertexNeighbors(29u, &neighbors);
+ ASSERT_EQUALS(neighbors.size(), (size_t)2);
+ std::sort(neighbors.begin(), neighbors.end());
+ ASSERT_EQUALS(neighbors[0], GeoHash(zeroBase + "00"));
+ ASSERT_EQUALS(neighbors[1], GeoHash(zeroBase + "01"));
- // Level 28, only one neighbor (the parent) at the left bottom corner.
- // ^
- // |
- // +---+
- // | |
- // +---+-----> x
- neighbors.clear();
- cellHash.appendVertexNeighbors(28u, &neighbors);
- ASSERT_EQUALS(neighbors.size(), (size_t)1);
- ASSERT_EQUALS(neighbors[0], GeoHash(zeroBase));
+ // Level 28, only one neighbor (the parent) at the left bottom corner.
+ // ^
+ // |
+ // +---+
+ // | |
+ // +---+-----> x
+ neighbors.clear();
+ cellHash.appendVertexNeighbors(28u, &neighbors);
+ ASSERT_EQUALS(neighbors.size(), (size_t)1);
+ ASSERT_EQUALS(neighbors[0], GeoHash(zeroBase));
- // Level 1
- neighbors.clear();
- cellHash.appendVertexNeighbors(1u, &neighbors);
- ASSERT_EQUALS(neighbors.size(), (size_t)1);
- ASSERT_EQUALS(neighbors[0], GeoHash("00"));
- }
+ // Level 1
+ neighbors.clear();
+ cellHash.appendVertexNeighbors(1u, &neighbors);
+ ASSERT_EQUALS(neighbors.size(), (size_t)1);
+ ASSERT_EQUALS(neighbors[0], GeoHash("00"));
+}
}
diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp
index 531595d8719..4c294aba9e9 100644
--- a/src/mongo/db/geo/haystack.cpp
+++ b/src/mongo/db/geo/haystack.cpp
@@ -54,71 +54,82 @@
*/
namespace mongo {
- using std::string;
- using std::vector;
-
- class GeoHaystackSearchCommand : public Command {
- public:
- GeoHaystackSearchCommand() : Command("geoSearch") {}
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
- bool slaveOk() const { return true; }
- bool slaveOverrideOk() const { return true; }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+using std::string;
+using std::vector;
+
+class GeoHaystackSearchCommand : public Command {
+public:
+ GeoHaystackSearchCommand() : Command("geoSearch") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ bool slaveOk() const {
+ return true;
+ }
+ bool slaveOverrideOk() const {
+ return true;
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string ns = parseNsCollectionRequired(dbname, cmdObj);
+
+ AutoGetCollectionForRead ctx(txn, ns);
+
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ errmsg = "can't find ns";
+ return false;
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const std::string ns = parseNsCollectionRequired(dbname, cmdObj);
-
- AutoGetCollectionForRead ctx(txn, ns);
-
- Collection* collection = ctx.getCollection();
- if ( !collection ) {
- errmsg = "can't find ns";
- return false;
- }
-
- vector<IndexDescriptor*> idxs;
- collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_HAYSTACK, idxs);
- if (idxs.size() == 0) {
- errmsg = "no geoSearch index";
- return false;
- }
- if (idxs.size() > 1) {
- errmsg = "more than 1 geosearch index";
- return false;
- }
-
- BSONElement nearElt = cmdObj["near"];
- BSONElement maxDistance = cmdObj["maxDistance"];
- BSONElement search = cmdObj["search"];
-
- uassert(13318, "near needs to be an array", nearElt.isABSONObj());
- uassert(13319, "maxDistance needs a number", maxDistance.isNumber());
- uassert(13320, "search needs to be an object", search.type() == Object);
-
- unsigned limit = 50;
- if (cmdObj["limit"].isNumber())
- limit = static_cast<unsigned>(cmdObj["limit"].numberInt());
-
- IndexDescriptor* desc = idxs[0];
- HaystackAccessMethod* ham =
- static_cast<HaystackAccessMethod*>( collection->getIndexCatalog()->getIndex(desc) );
- ham->searchCommand(txn, collection, nearElt.Obj(), maxDistance.numberDouble(), search.Obj(),
- &result, limit);
- return 1;
+ vector<IndexDescriptor*> idxs;
+ collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_HAYSTACK, idxs);
+ if (idxs.size() == 0) {
+ errmsg = "no geoSearch index";
+ return false;
}
- } nameSearchCommand;
+ if (idxs.size() > 1) {
+ errmsg = "more than 1 geosearch index";
+ return false;
+ }
+
+ BSONElement nearElt = cmdObj["near"];
+ BSONElement maxDistance = cmdObj["maxDistance"];
+ BSONElement search = cmdObj["search"];
+
+ uassert(13318, "near needs to be an array", nearElt.isABSONObj());
+ uassert(13319, "maxDistance needs a number", maxDistance.isNumber());
+ uassert(13320, "search needs to be an object", search.type() == Object);
+
+ unsigned limit = 50;
+ if (cmdObj["limit"].isNumber())
+ limit = static_cast<unsigned>(cmdObj["limit"].numberInt());
+
+ IndexDescriptor* desc = idxs[0];
+ HaystackAccessMethod* ham =
+ static_cast<HaystackAccessMethod*>(collection->getIndexCatalog()->getIndex(desc));
+ ham->searchCommand(txn,
+ collection,
+ nearElt.Obj(),
+ maxDistance.numberDouble(),
+ search.Obj(),
+ &result,
+ limit);
+ return 1;
+ }
+} nameSearchCommand;
} // namespace mongo
diff --git a/src/mongo/db/geo/r2_region_coverer.cpp b/src/mongo/db/geo/r2_region_coverer.cpp
index b43db9665ad..3e49ab97099 100644
--- a/src/mongo/db/geo/r2_region_coverer.cpp
+++ b/src/mongo/db/geo/r2_region_coverer.cpp
@@ -36,250 +36,246 @@
namespace mongo {
- // Definition
- int const R2RegionCoverer::kDefaultMaxCells = 8;
-
- // Doesn't take ownership of "hashConverter". The caller should guarantee its life cycle
- // is longer than this coverer.
- R2RegionCoverer::R2RegionCoverer( GeoHashConverter* hashConverter ) :
- _hashConverter( hashConverter ),
- _minLevel( 0u ),
- _maxLevel( GeoHash::kMaxBits ),
- _maxCells( kDefaultMaxCells ),
- _region( NULL ),
- _candidateQueue( new CandidateQueue ),
- _results( new vector<GeoHash> )
- {
- }
-
- // Need to declare explicitly because of scoped pointers.
- R2RegionCoverer::~R2RegionCoverer() { }
-
- void R2RegionCoverer::setMinLevel( unsigned int minLevel ) {
- dassert(minLevel >= 0);
- dassert(minLevel <= GeoHash::kMaxBits);
- _minLevel = max(0u, min(GeoHash::kMaxBits, minLevel));
- }
-
- void R2RegionCoverer::setMaxLevel( unsigned int maxLevel ) {
- dassert(maxLevel >= 0);
- dassert(maxLevel <= GeoHash::kMaxBits);
- _maxLevel = max(0u, min(GeoHash::kMaxBits, maxLevel));
- }
-
- void R2RegionCoverer::setMaxCells( int maxCells ) {
- _maxCells = maxCells;
- }
-
- void R2RegionCoverer::getCovering( const R2Region& region, vector<GeoHash>* cover ) {
- // Strategy: Start with the full plane. Discard any
- // that do not intersect the shape. Then repeatedly choose the
- // largest cell that intersects the shape and subdivide it.
- //
- // _result contains the cells that will be part of the output, while the
- // queue contains cells that we may still subdivide further. Cells that
- // are entirely contained within the region are immediately added to the
- // output, while cells that do not intersect the region are immediately
- // discarded. Therefore the queue only contains cells that partially
- // intersect the region. Candidates are prioritized first according to
- // cell size (larger cells first), then by the number of intersecting
- // children they have (fewest children first), and then by the number of
- // fully contained children (fewest children first).
-
- verify(_minLevel <= _maxLevel);
- dassert(_candidateQueue->empty());
- dassert(_results->empty());
- _region = &region;
-
- getInitialCandidates();
-
- while(!_candidateQueue->empty()) {
- Candidate* candidate = _candidateQueue->top().second; // Owned
- _candidateQueue->pop();
- LOG(3) << "Pop: " << candidate->cell;
-
- // Try to expand this cell into its children
- if (candidate->cell.getBits() < _minLevel ||
- candidate->numChildren == 1 ||
- (int)_results->size() +
- (int)_candidateQueue->size() +
- candidate->numChildren <= _maxCells) {
-
- for (int i = 0; i < candidate->numChildren; i++) {
- addCandidate(candidate->children[i]);
- }
- deleteCandidate(candidate, false);
- } else {
- // Reached max cells. Move all candidates from the queue into results.
- candidate->isTerminal = true;
- addCandidate(candidate);
+// Definition
+int const R2RegionCoverer::kDefaultMaxCells = 8;
+
+// Doesn't take ownership of "hashConverter". The caller should guarantee its life cycle
+// is longer than this coverer.
+R2RegionCoverer::R2RegionCoverer(GeoHashConverter* hashConverter)
+ : _hashConverter(hashConverter),
+ _minLevel(0u),
+ _maxLevel(GeoHash::kMaxBits),
+ _maxCells(kDefaultMaxCells),
+ _region(NULL),
+ _candidateQueue(new CandidateQueue),
+ _results(new vector<GeoHash>) {}
+
+// Need to declare explicitly because of scoped pointers.
+R2RegionCoverer::~R2RegionCoverer() {}
+
+void R2RegionCoverer::setMinLevel(unsigned int minLevel) {
+ dassert(minLevel >= 0);
+ dassert(minLevel <= GeoHash::kMaxBits);
+ _minLevel = max(0u, min(GeoHash::kMaxBits, minLevel));
+}
+
+void R2RegionCoverer::setMaxLevel(unsigned int maxLevel) {
+ dassert(maxLevel >= 0);
+ dassert(maxLevel <= GeoHash::kMaxBits);
+ _maxLevel = max(0u, min(GeoHash::kMaxBits, maxLevel));
+}
+
+void R2RegionCoverer::setMaxCells(int maxCells) {
+ _maxCells = maxCells;
+}
+
+void R2RegionCoverer::getCovering(const R2Region& region, vector<GeoHash>* cover) {
+ // Strategy: Start with the full plane. Discard any
+ // that do not intersect the shape. Then repeatedly choose the
+ // largest cell that intersects the shape and subdivide it.
+ //
+ // _result contains the cells that will be part of the output, while the
+ // queue contains cells that we may still subdivide further. Cells that
+ // are entirely contained within the region are immediately added to the
+ // output, while cells that do not intersect the region are immediately
+ // discarded. Therefore the queue only contains cells that partially
+ // intersect the region. Candidates are prioritized first according to
+ // cell size (larger cells first), then by the number of intersecting
+ // children they have (fewest children first), and then by the number of
+ // fully contained children (fewest children first).
+
+ verify(_minLevel <= _maxLevel);
+ dassert(_candidateQueue->empty());
+ dassert(_results->empty());
+ _region = &region;
+
+ getInitialCandidates();
+
+ while (!_candidateQueue->empty()) {
+ Candidate* candidate = _candidateQueue->top().second; // Owned
+ _candidateQueue->pop();
+ LOG(3) << "Pop: " << candidate->cell;
+
+ // Try to expand this cell into its children
+ if (candidate->cell.getBits() < _minLevel || candidate->numChildren == 1 ||
+ (int)_results->size() + (int)_candidateQueue->size() + candidate->numChildren <=
+ _maxCells) {
+ for (int i = 0; i < candidate->numChildren; i++) {
+ addCandidate(candidate->children[i]);
}
- LOG(3) << "Queue: " << _candidateQueue->size();
- }
-
- _region = NULL;
- cover->swap(*_results);
- }
-
- // Caller owns the returned pointer
- R2RegionCoverer::Candidate* R2RegionCoverer::newCandidate( const GeoHash& cell ) {
- // Exclude the cell that doesn't intersect with the geometry.
- Box box = _hashConverter->unhashToBoxCovering(cell);
-
- if (_region->fastDisjoint(box)) {
- return NULL;
- }
-
- Candidate* candidate = new Candidate();
- candidate->cell = cell;
- candidate->numChildren = 0;
- // Stop subdivision when we reach the max level or there is no need to do so.
- // Don't stop if we haven't reach min level.
- candidate->isTerminal = cell.getBits() >= _minLevel &&
- (cell.getBits() >= _maxLevel || _region->fastContains(box));
-
- return candidate;
- }
-
- // Takes ownership of "candidate"
- void R2RegionCoverer::addCandidate( Candidate* candidate ) {
- if (candidate == NULL) return;
-
- if (candidate->isTerminal) {
- _results->push_back(candidate->cell);
- deleteCandidate(candidate, true);
- return;
- }
-
- verify(candidate->numChildren == 0);
-
- // Expand children
- int numTerminals = expandChildren(candidate);
-
- if (candidate->numChildren == 0) {
- deleteCandidate(candidate, true);
- } else if (numTerminals == 4 && candidate->cell.getBits() >= _minLevel) {
- // Optimization: add the parent cell rather than all of its children.
+ deleteCandidate(candidate, false);
+ } else {
+ // Reached max cells. Move all candidates from the queue into results.
candidate->isTerminal = true;
addCandidate(candidate);
- } else {
- // Add the cell into the priority queue for further subdivision.
- //
- // We negate the priority so that smaller absolute priorities are returned
- // first. The heuristic is designed to refine the largest cells first,
- // since those are where we have the largest potential gain. Among cells
- // at the same level, we prefer the cells with the smallest number of
- // intersecting children. Finally, we prefer cells that have the smallest
- // number of children that cannot be refined any further.
- int priority = -(((((int)candidate->cell.getBits() << 4)
- + candidate->numChildren) << 4)
- + numTerminals);
- _candidateQueue->push(make_pair(priority, candidate)); // queue owns candidate
- LOG(3) << "Push: " << candidate->cell << " (" << priority << ") ";
}
+ LOG(3) << "Queue: " << _candidateQueue->size();
}
- // Dones't take ownership of "candidate"
- int R2RegionCoverer::expandChildren( Candidate* candidate ) {
- GeoHash childCells[4];
- invariant(candidate->cell.subdivide(childCells));
-
- int numTerminals = 0;
- for (int i = 0; i < 4; ++i) {
- Candidate* child = newCandidate(childCells[i]);
- if (child) {
- candidate->children[candidate->numChildren++] = child;
- if (child->isTerminal) ++numTerminals;
- }
- }
- return numTerminals;
- }
+ _region = NULL;
+ cover->swap(*_results);
+}
- // Takes ownership of "candidate"
- void R2RegionCoverer::deleteCandidate( Candidate* candidate, bool freeChildren ) {
- if (freeChildren) {
- for (int i = 0; i < candidate->numChildren; i++) {
- deleteCandidate(candidate->children[i], true);
- }
- }
+// Caller owns the returned pointer
+R2RegionCoverer::Candidate* R2RegionCoverer::newCandidate(const GeoHash& cell) {
+ // Exclude the cell that doesn't intersect with the geometry.
+ Box box = _hashConverter->unhashToBoxCovering(cell);
- delete candidate;
+ if (_region->fastDisjoint(box)) {
+ return NULL;
}
- void R2RegionCoverer::getInitialCandidates() {
- // Add the full plane
- // TODO a better initialization.
- addCandidate(newCandidate(GeoHash()));
+ Candidate* candidate = new Candidate();
+ candidate->cell = cell;
+ candidate->numChildren = 0;
+ // Stop subdivision when we reach the max level or there is no need to do so.
+ // Don't stop if we haven't reach min level.
+ candidate->isTerminal =
+ cell.getBits() >= _minLevel && (cell.getBits() >= _maxLevel || _region->fastContains(box));
+
+ return candidate;
+}
+
+// Takes ownership of "candidate"
+void R2RegionCoverer::addCandidate(Candidate* candidate) {
+ if (candidate == NULL)
+ return;
+
+ if (candidate->isTerminal) {
+ _results->push_back(candidate->cell);
+ deleteCandidate(candidate, true);
+ return;
}
- //
- // R2CellUnion
- //
- void R2CellUnion::init(const vector<GeoHash>& cellIds) {
- _cellIds = cellIds;
- normalize();
- }
+ verify(candidate->numChildren == 0);
- bool R2CellUnion::contains(const GeoHash cellId) const {
- // Since all cells are ordered, if an ancestor of id exists, it must be the previous one.
- vector<GeoHash>::const_iterator it;
- it = upper_bound(_cellIds.begin(), _cellIds.end(), cellId); // it > cellId
- return it != _cellIds.begin() && (--it)->contains(cellId); // --it <= cellId
- }
+ // Expand children
+ int numTerminals = expandChildren(candidate);
- bool R2CellUnion::normalize() {
- vector<GeoHash> output;
- output.reserve(_cellIds.size());
- sort(_cellIds.begin(), _cellIds.end());
-
- for (size_t i = 0; i < _cellIds.size(); i++) {
- GeoHash id = _cellIds[i];
-
- // Parent is less than children. If an ancestor of id exists, it must be the last one.
- //
- // Invariant: output doesn't contain intersected cells (ancestor and its descendants)
- // Proof: Assume another cell "c" exists between ancestor "p" and the current "id",
- // i.e. p < c < id, then "c" has "p" as its prefix, since id share the same prefix "p",
- // so "p" contains "c", which conflicts with the invariant.
- if (!output.empty() && output.back().contains(id)) continue;
-
- // Check whether the last 3 elements of "output" plus "id" can be
- // collapsed into a single parent cell.
- while (output.size() >= 3) {
- // A necessary (but not sufficient) condition is that the XOR of the
- // four cells must be zero. This is also very fast to test.
- if ((output.end()[-3].getHash() ^ output.end()[-2].getHash() ^ output.back().getHash())
- != id.getHash())
- break;
-
- // Now we do a slightly more expensive but exact test.
- GeoHash parent = id.parent();
- if (parent != output.end()[-3].parent() ||
- parent != output.end()[-2].parent() ||
- parent != output.end()[-1].parent())
- break;
-
- // Replace four children by their parent cell.
- output.erase(output.end() - 3, output.end());
- id = parent;
- }
- output.push_back(id);
+ if (candidate->numChildren == 0) {
+ deleteCandidate(candidate, true);
+ } else if (numTerminals == 4 && candidate->cell.getBits() >= _minLevel) {
+ // Optimization: add the parent cell rather than all of its children.
+ candidate->isTerminal = true;
+ addCandidate(candidate);
+ } else {
+ // Add the cell into the priority queue for further subdivision.
+ //
+ // We negate the priority so that smaller absolute priorities are returned
+ // first. The heuristic is designed to refine the largest cells first,
+ // since those are where we have the largest potential gain. Among cells
+ // at the same level, we prefer the cells with the smallest number of
+ // intersecting children. Finally, we prefer cells that have the smallest
+ // number of children that cannot be refined any further.
+ int priority = -(((((int)candidate->cell.getBits() << 4) + candidate->numChildren) << 4) +
+ numTerminals);
+ _candidateQueue->push(make_pair(priority, candidate)); // queue owns candidate
+ LOG(3) << "Push: " << candidate->cell << " (" << priority << ") ";
+ }
+}
+
+// Dones't take ownership of "candidate"
+int R2RegionCoverer::expandChildren(Candidate* candidate) {
+ GeoHash childCells[4];
+ invariant(candidate->cell.subdivide(childCells));
+
+ int numTerminals = 0;
+ for (int i = 0; i < 4; ++i) {
+ Candidate* child = newCandidate(childCells[i]);
+ if (child) {
+ candidate->children[candidate->numChildren++] = child;
+ if (child->isTerminal)
+ ++numTerminals;
}
- if (output.size() < _cellIds.size()) {
- _cellIds.swap(output);
- return true;
+ }
+ return numTerminals;
+}
+
+// Takes ownership of "candidate"
+void R2RegionCoverer::deleteCandidate(Candidate* candidate, bool freeChildren) {
+ if (freeChildren) {
+ for (int i = 0; i < candidate->numChildren; i++) {
+ deleteCandidate(candidate->children[i], true);
}
- return false;
}
- string R2CellUnion::toString() const {
- std::stringstream ss;
- ss << "[ ";
- for (size_t i = 0; i < _cellIds.size(); i++) {
- ss << _cellIds[i] << " ";
+ delete candidate;
+}
+
+void R2RegionCoverer::getInitialCandidates() {
+ // Add the full plane
+ // TODO a better initialization.
+ addCandidate(newCandidate(GeoHash()));
+}
+
+//
+// R2CellUnion
+//
+void R2CellUnion::init(const vector<GeoHash>& cellIds) {
+ _cellIds = cellIds;
+ normalize();
+}
+
+bool R2CellUnion::contains(const GeoHash cellId) const {
+ // Since all cells are ordered, if an ancestor of id exists, it must be the previous one.
+ vector<GeoHash>::const_iterator it;
+ it = upper_bound(_cellIds.begin(), _cellIds.end(), cellId); // it > cellId
+ return it != _cellIds.begin() && (--it)->contains(cellId); // --it <= cellId
+}
+
+bool R2CellUnion::normalize() {
+ vector<GeoHash> output;
+ output.reserve(_cellIds.size());
+ sort(_cellIds.begin(), _cellIds.end());
+
+ for (size_t i = 0; i < _cellIds.size(); i++) {
+ GeoHash id = _cellIds[i];
+
+ // Parent is less than children. If an ancestor of id exists, it must be the last one.
+ //
+ // Invariant: output doesn't contain intersected cells (ancestor and its descendants)
+ // Proof: Assume another cell "c" exists between ancestor "p" and the current "id",
+ // i.e. p < c < id, then "c" has "p" as its prefix, since id share the same prefix "p",
+ // so "p" contains "c", which conflicts with the invariant.
+ if (!output.empty() && output.back().contains(id))
+ continue;
+
+ // Check whether the last 3 elements of "output" plus "id" can be
+ // collapsed into a single parent cell.
+ while (output.size() >= 3) {
+ // A necessary (but not sufficient) condition is that the XOR of the
+ // four cells must be zero. This is also very fast to test.
+ if ((output.end()[-3].getHash() ^ output.end()[-2].getHash() ^
+ output.back().getHash()) != id.getHash())
+ break;
+
+ // Now we do a slightly more expensive but exact test.
+ GeoHash parent = id.parent();
+ if (parent != output.end()[-3].parent() || parent != output.end()[-2].parent() ||
+ parent != output.end()[-1].parent())
+ break;
+
+ // Replace four children by their parent cell.
+ output.erase(output.end() - 3, output.end());
+ id = parent;
}
- ss << "]";
- return ss.str();
+ output.push_back(id);
+ }
+ if (output.size() < _cellIds.size()) {
+ _cellIds.swap(output);
+ return true;
+ }
+ return false;
+}
+
+string R2CellUnion::toString() const {
+ std::stringstream ss;
+ ss << "[ ";
+ for (size_t i = 0; i < _cellIds.size(); i++) {
+ ss << _cellIds[i] << " ";
}
+ ss << "]";
+ return ss.str();
+}
} /* namespace mongo */
diff --git a/src/mongo/db/geo/r2_region_coverer.h b/src/mongo/db/geo/r2_region_coverer.h
index db0aa69c8d9..ebd60e4997f 100644
--- a/src/mongo/db/geo/r2_region_coverer.h
+++ b/src/mongo/db/geo/r2_region_coverer.h
@@ -35,113 +35,120 @@
namespace mongo {
- class R2Region;
-
- class R2RegionCoverer {
- MONGO_DISALLOW_COPYING(R2RegionCoverer);
-
- // By default, the covering uses at most 8 cells at any level.
- static const int kDefaultMaxCells; // = 8;
-
- public:
- R2RegionCoverer() = default;
- R2RegionCoverer(GeoHashConverter* hashConverter);
- ~R2RegionCoverer();
-
- // Set the minimum and maximum cell level to be used. The default is to use
- // all cell levels. Requires: max_level() >= min_level().
- void setMinLevel(unsigned int minLevel);
- void setMaxLevel(unsigned int maxLevel);
- unsigned int minLevel() const { return _minLevel; }
- unsigned int maxLevel() const { return _maxLevel; }
-
- // Sets the maximum desired number of cells in the approximation (defaults
- // to kDefaultMaxCells).
- //
- // For any setting of max_cells(), an arbitrary number of cells may be
- // returned if min_level() is too high for the region being approximated.
- //
- // TODO(sz): accuracy experiments similar to S2RegionCoverer.
- void setMaxCells(int maxCells);
- int maxCells() const { return _maxCells; }
-
- void getCovering(const R2Region& region, std::vector<GeoHash>* cover);
-
- private:
- struct Candidate {
- GeoHash cell;
- bool isTerminal; // Cell should not be expanded further.
- int numChildren; // Number of children that intersect the region.
- Candidate* children[4];
- };
-
- // If the cell intersects the given region, return a new candidate with no
- // children, otherwise return NULL. Also marks the candidate as "terminal"
- // if it should not be expanded further.
- Candidate* newCandidate(GeoHash const& cell);
-
- // Process a candidate by either adding it to the result_ vector or
- // expanding its children and inserting it into the priority queue.
- // Passing an argument of NULL does nothing.
- void addCandidate(Candidate* candidate);
-
- // Free the memory associated with a candidate.
- void deleteCandidate( Candidate* candidate, bool freeChildren );
-
- // Populate the children of "candidate" by expanding from the given cell.
- // Returns the number of children that were marked "terminal".
- int expandChildren(Candidate* candidate);
-
- // Computes a set of initial candidates that cover the given region.
- void getInitialCandidates();
-
- GeoHashConverter* _hashConverter; // Not owned.
- // min / max level as unsigned so as to be consistent with GeoHash
- unsigned int _minLevel;
- unsigned int _maxLevel;
- int _maxCells;
-
- // Save the copy of pointer temporarily to avoid passing this parameter internally.
- // Only valid for the duration of a single getCovering() call.
- R2Region const* _region;
-
- // We keep the candidates that may intersect with this region in a priority queue.
- typedef std::pair<int, Candidate*> QueueEntry;
-
- // We define our own own comparison function on QueueEntries in order to
- // make the results deterministic. Using the default less<QueueEntry>,
- // entries of equal priority would be sorted according to the memory address
- // of the candidate.
- struct CompareQueueEntries {
- bool operator()(QueueEntry const& x, QueueEntry const& y) const {
- return x.first < y.first;
- }
- };
-
- typedef std::priority_queue<QueueEntry, std::vector<QueueEntry>,
- CompareQueueEntries> CandidateQueue;
- std::unique_ptr<CandidateQueue> _candidateQueue; // Priority queue owns candidate pointers.
- std::unique_ptr<std::vector<GeoHash> > _results;
+class R2Region;
+
+class R2RegionCoverer {
+ MONGO_DISALLOW_COPYING(R2RegionCoverer);
+
+ // By default, the covering uses at most 8 cells at any level.
+ static const int kDefaultMaxCells; // = 8;
+
+public:
+ R2RegionCoverer() = default;
+ R2RegionCoverer(GeoHashConverter* hashConverter);
+ ~R2RegionCoverer();
+
+ // Set the minimum and maximum cell level to be used. The default is to use
+ // all cell levels. Requires: max_level() >= min_level().
+ void setMinLevel(unsigned int minLevel);
+ void setMaxLevel(unsigned int maxLevel);
+ unsigned int minLevel() const {
+ return _minLevel;
+ }
+ unsigned int maxLevel() const {
+ return _maxLevel;
+ }
+
+ // Sets the maximum desired number of cells in the approximation (defaults
+ // to kDefaultMaxCells).
+ //
+ // For any setting of max_cells(), an arbitrary number of cells may be
+ // returned if min_level() is too high for the region being approximated.
+ //
+ // TODO(sz): accuracy experiments similar to S2RegionCoverer.
+ void setMaxCells(int maxCells);
+ int maxCells() const {
+ return _maxCells;
+ }
+
+ void getCovering(const R2Region& region, std::vector<GeoHash>* cover);
+
+private:
+ struct Candidate {
+ GeoHash cell;
+ bool isTerminal; // Cell should not be expanded further.
+ int numChildren; // Number of children that intersect the region.
+ Candidate* children[4];
};
-
- // An R2CellUnion is a region consisting of cells of various sizes.
- class R2CellUnion {
- MONGO_DISALLOW_COPYING(R2CellUnion);
- public:
- R2CellUnion() = default;
-
- void init(const std::vector<GeoHash>& cellIds);
- bool contains(const GeoHash cellId) const;
- std::string toString() const;
- private:
- // Normalizes the cell union by discarding cells that are contained by other
- // cells, replacing groups of 4 child cells by their parent cell whenever
- // possible, and sorting all the cell ids in increasing order. Returns true
- // if the number of cells was reduced.
- bool normalize();
- std::vector<GeoHash> _cellIds;
+ // If the cell intersects the given region, return a new candidate with no
+ // children, otherwise return NULL. Also marks the candidate as "terminal"
+ // if it should not be expanded further.
+ Candidate* newCandidate(GeoHash const& cell);
+
+ // Process a candidate by either adding it to the result_ vector or
+ // expanding its children and inserting it into the priority queue.
+ // Passing an argument of NULL does nothing.
+ void addCandidate(Candidate* candidate);
+
+ // Free the memory associated with a candidate.
+ void deleteCandidate(Candidate* candidate, bool freeChildren);
+
+ // Populate the children of "candidate" by expanding from the given cell.
+ // Returns the number of children that were marked "terminal".
+ int expandChildren(Candidate* candidate);
+
+ // Computes a set of initial candidates that cover the given region.
+ void getInitialCandidates();
+
+ GeoHashConverter* _hashConverter; // Not owned.
+ // min / max level as unsigned so as to be consistent with GeoHash
+ unsigned int _minLevel;
+ unsigned int _maxLevel;
+ int _maxCells;
+
+ // Save the copy of pointer temporarily to avoid passing this parameter internally.
+ // Only valid for the duration of a single getCovering() call.
+ R2Region const* _region;
+
+ // We keep the candidates that may intersect with this region in a priority queue.
+ typedef std::pair<int, Candidate*> QueueEntry;
+
+ // We define our own own comparison function on QueueEntries in order to
+ // make the results deterministic. Using the default less<QueueEntry>,
+ // entries of equal priority would be sorted according to the memory address
+ // of the candidate.
+ struct CompareQueueEntries {
+ bool operator()(QueueEntry const& x, QueueEntry const& y) const {
+ return x.first < y.first;
+ }
};
-} /* namespace mongo */
+ typedef std::priority_queue<QueueEntry, std::vector<QueueEntry>, CompareQueueEntries>
+ CandidateQueue;
+ std::unique_ptr<CandidateQueue> _candidateQueue; // Priority queue owns candidate pointers.
+ std::unique_ptr<std::vector<GeoHash>> _results;
+};
+
+
+// An R2CellUnion is a region consisting of cells of various sizes.
+class R2CellUnion {
+ MONGO_DISALLOW_COPYING(R2CellUnion);
+public:
+ R2CellUnion() = default;
+
+ void init(const std::vector<GeoHash>& cellIds);
+ bool contains(const GeoHash cellId) const;
+ std::string toString() const;
+
+private:
+ // Normalizes the cell union by discarding cells that are contained by other
+ // cells, replacing groups of 4 child cells by their parent cell whenever
+ // possible, and sorting all the cell ids in increasing order. Returns true
+ // if the number of cells was reduced.
+ bool normalize();
+ std::vector<GeoHash> _cellIds;
+};
+
+} /* namespace mongo */
diff --git a/src/mongo/db/geo/r2_region_coverer_test.cpp b/src/mongo/db/geo/r2_region_coverer_test.cpp
index e1df6edea77..7fc0f9192f5 100644
--- a/src/mongo/db/geo/r2_region_coverer_test.cpp
+++ b/src/mongo/db/geo/r2_region_coverer_test.cpp
@@ -38,588 +38,587 @@
namespace {
- using std::unique_ptr;
- using namespace mongo;
- using mongo::Polygon; // "windows.h" has another Polygon for Windows GDI.
+using std::unique_ptr;
+using namespace mongo;
+using mongo::Polygon; // "windows.h" has another Polygon for Windows GDI.
+
+//
+// GeoHash
+//
+TEST(R2RegionCoverer, GeoHashSubdivide) {
+ GeoHash children[4];
+
+ // Full plane -> 4 quadrants
+ GeoHash fullPlane;
+ ASSERT_TRUE(fullPlane.subdivide(children));
+ ASSERT_EQUALS(children[0], GeoHash(0LL, 1u)); // (x, y) : (0, 0)
+ ASSERT_EQUALS(children[1], GeoHash(1LL << 62, 1u)); // (x, y) : (0, 1)
+ ASSERT_EQUALS(children[2], GeoHash(2LL << 62, 1u)); // (x, y) : (1, 0)
+ ASSERT_EQUALS(children[3], GeoHash(3LL << 62, 1u)); // (x, y) : (1, 1)
+
+ // Small cell: 0...11XX -> 0...11[0-3]
+ const long long cellHash = 3LL << 2;
+ GeoHash cell(cellHash, 31u);
+ ASSERT_TRUE(cell.subdivide(children));
+ ASSERT_EQUALS(children[0], GeoHash(cellHash, 32u)); // (x, y) : (0, 0)
+ ASSERT_EQUALS(children[1], GeoHash(cellHash + 1, 32u)); // (x, y) : (0, 1)
+ ASSERT_EQUALS(children[2], GeoHash(cellHash + 2, 32u)); // (x, y) : (1, 0)
+ ASSERT_EQUALS(children[3], GeoHash(cellHash + 3, 32u)); // (x, y) : (1, 1)
+
+ // Smallest cell at finest level cannot subdivide
+ GeoHash leafCell(1LL, 32u);
+ ASSERT_FALSE(leafCell.subdivide(children));
+}
+
+TEST(R2RegionCoverer, GeoHashUnusedBits) {
+ GeoHash geoHash(5566154225580586776LL, 0u);
+ GeoHash entirePlane;
+ ASSERT_EQUALS(geoHash, entirePlane);
+}
+
+TEST(R2RegionCoverer, GeoHashContains) {
+ GeoHash entirePlane;
+ GeoHash geoHash(5566154225580586776LL, 32u); // An arbitrary random cell
+ // GeoHash contains itself
+ ASSERT_TRUE(entirePlane.contains(entirePlane));
+ ASSERT_TRUE(geoHash.contains(geoHash));
+ // Entire plane contains everything
+ ASSERT_TRUE(entirePlane.contains(geoHash));
+ ASSERT_FALSE(geoHash.contains(entirePlane));
+
+ // Positive cases
+ GeoHash parent("0010");
+ GeoHash child("00100101");
+ ASSERT_TRUE(parent.contains(parent));
+ ASSERT_TRUE(parent.contains(child));
+ ASSERT_TRUE(entirePlane.contains(geoHash));
+
+ // Negative cases
+ GeoHash other("01");
+ ASSERT_FALSE(parent.contains(other));
+ ASSERT_FALSE(other.contains(parent));
+}
+
+
+//
+// R2RegionCoverer
+//
+
+// Plane boundary, x: [0.0, 100.0], y: [0.0, 100.0]
+const double MAXBOUND = 100.0;
+
+// Global random number generator, repeatable.
+mongo::PseudoRandom rand(0);
+
+GeoHashConverter::Parameters getConverterParams() {
+ GeoHashConverter::Parameters params;
+ params.bits = 32;
+ params.min = 0.0;
+ params.max = MAXBOUND;
+ const double numBuckets = (1024 * 1024 * 1024 * 4.0);
+ params.scaling = numBuckets / (params.max - params.min);
+ return params;
+}
- //
- // GeoHash
- //
- TEST(R2RegionCoverer, GeoHashSubdivide) {
- GeoHash children[4];
-
- // Full plane -> 4 quadrants
- GeoHash fullPlane;
- ASSERT_TRUE( fullPlane.subdivide( children ) );
- ASSERT_EQUALS( children[0], GeoHash( 0LL, 1u ) ); // (x, y) : (0, 0)
- ASSERT_EQUALS( children[1], GeoHash( 1LL << 62, 1u ) ); // (x, y) : (0, 1)
- ASSERT_EQUALS( children[2], GeoHash( 2LL << 62, 1u ) ); // (x, y) : (1, 0)
- ASSERT_EQUALS( children[3], GeoHash( 3LL << 62, 1u ) ); // (x, y) : (1, 1)
-
- // Small cell: 0...11XX -> 0...11[0-3]
- const long long cellHash = 3LL << 2;
- GeoHash cell( cellHash, 31u );
- ASSERT_TRUE( cell.subdivide( children ) );
- ASSERT_EQUALS( children[0], GeoHash( cellHash, 32u ) ); // (x, y) : (0, 0)
- ASSERT_EQUALS( children[1], GeoHash( cellHash + 1, 32u ) ); // (x, y) : (0, 1)
- ASSERT_EQUALS( children[2], GeoHash( cellHash + 2, 32u ) ); // (x, y) : (1, 0)
- ASSERT_EQUALS( children[3], GeoHash( cellHash + 3, 32u ) ); // (x, y) : (1, 1)
-
- // Smallest cell at finest level cannot subdivide
- GeoHash leafCell(1LL, 32u);
- ASSERT_FALSE( leafCell.subdivide( children ) );
- }
-
- TEST(R2RegionCoverer, GeoHashUnusedBits) {
- GeoHash geoHash(5566154225580586776LL, 0u);
- GeoHash entirePlane;
- ASSERT_EQUALS(geoHash, entirePlane);
+/**
+ * Test region which mimics the region of a geohash cell.
+ * NOTE: Technically this is not 100% correct, since geohash cells are inclusive on lower and
+ * exclusive on upper edges. For now, this region is just exclusive on all edges.
+ * TODO: Create an explicit HashCell which correctly encapsulates this behavior, push to the
+ * R2Region interface.
+ */
+class HashBoxRegion : public R2Region {
+public:
+ HashBoxRegion(Box box) : _box(box) {}
+ Box getR2Bounds() const {
+ return _box;
}
- TEST(R2RegionCoverer, GeoHashContains) {
- GeoHash entirePlane;
- GeoHash geoHash(5566154225580586776LL, 32u); // An arbitrary random cell
- // GeoHash contains itself
- ASSERT_TRUE(entirePlane.contains(entirePlane));
- ASSERT_TRUE(geoHash.contains(geoHash));
- // Entire plane contains everything
- ASSERT_TRUE(entirePlane.contains(geoHash));
- ASSERT_FALSE(geoHash.contains(entirePlane));
-
- // Positive cases
- GeoHash parent("0010");
- GeoHash child("00100101");
- ASSERT_TRUE(parent.contains(parent));
- ASSERT_TRUE(parent.contains(child));
- ASSERT_TRUE(entirePlane.contains(geoHash));
-
- // Negative cases
- GeoHash other("01");
- ASSERT_FALSE(parent.contains(other));
- ASSERT_FALSE(other.contains(parent));
+ bool fastContains(const Box& other) const {
+ return _box.contains(other);
}
+ bool fastDisjoint(const Box& other) const {
+ if (!_box.intersects(other))
+ return true;
- //
- // R2RegionCoverer
- //
-
- // Plane boundary, x: [0.0, 100.0], y: [0.0, 100.0]
- const double MAXBOUND = 100.0;
-
- // Global random number generator, repeatable.
- mongo::PseudoRandom rand(0);
+ // Make outer edges exclusive
+ if (_box._max.x == other._min.x || _box._min.x == other._max.x ||
+ _box._max.y == other._min.y || _box._min.y == other._max.y)
+ return true;
- GeoHashConverter::Parameters getConverterParams() {
- GeoHashConverter::Parameters params;
- params.bits = 32;
- params.min = 0.0;
- params.max = MAXBOUND;
- const double numBuckets = (1024 * 1024 * 1024 * 4.0);
- params.scaling = numBuckets / (params.max - params.min);
- return params;
+ return false;
}
- /**
- * Test region which mimics the region of a geohash cell.
- * NOTE: Technically this is not 100% correct, since geohash cells are inclusive on lower and
- * exclusive on upper edges. For now, this region is just exclusive on all edges.
- * TODO: Create an explicit HashCell which correctly encapsulates this behavior, push to the
- * R2Region interface.
- */
- class HashBoxRegion : public R2Region {
- public:
-
- HashBoxRegion(Box box) : _box(box) {}
- Box getR2Bounds() const { return _box; }
-
- bool fastContains(const Box& other) const {
- return _box.contains(other);
- }
-
- bool fastDisjoint(const Box& other) const {
- if (!_box.intersects(other))
- return true;
-
- // Make outer edges exclusive
- if (_box._max.x == other._min.x || _box._min.x == other._max.x
- || _box._max.y == other._min.y || _box._min.y == other._max.y)
- return true;
-
- return false;
- }
-
- private:
- Box _box;
- };
-
- TEST(R2RegionCoverer, RandomCells) {
- GeoHashConverter converter(getConverterParams());
- R2RegionCoverer coverer(&converter);
- coverer.setMaxCells( 1 );
- // Test random cell ids at all levels.
- for ( int i = 0; i < 10000; ++i ) {
- GeoHash id( (long long) rand.nextInt64(),
- (unsigned) rand.nextInt32( GeoHash::kMaxBits + 1 ) );
- vector<GeoHash> covering;
- Box box = converter.unhashToBoxCovering(id);
- // Since the unhashed box is expanded by the error 8Mu, we need to shrink it.
- box.fudge(-GeoHashConverter::kMachinePrecision * MAXBOUND * 20);
- HashBoxRegion region(box);
- coverer.getCovering(region, &covering);
- ASSERT_EQUALS( covering.size(), (size_t)1 );
- ASSERT_EQUALS( covering[0], id );
+private:
+ Box _box;
+};
+
+TEST(R2RegionCoverer, RandomCells) {
+ GeoHashConverter converter(getConverterParams());
+ R2RegionCoverer coverer(&converter);
+ coverer.setMaxCells(1);
+ // Test random cell ids at all levels.
+ for (int i = 0; i < 10000; ++i) {
+ GeoHash id((long long)rand.nextInt64(), (unsigned)rand.nextInt32(GeoHash::kMaxBits + 1));
+ vector<GeoHash> covering;
+ Box box = converter.unhashToBoxCovering(id);
+ // Since the unhashed box is expanded by the error 8Mu, we need to shrink it.
+ box.fudge(-GeoHashConverter::kMachinePrecision * MAXBOUND * 20);
+ HashBoxRegion region(box);
+ coverer.getCovering(region, &covering);
+ ASSERT_EQUALS(covering.size(), (size_t)1);
+ ASSERT_EQUALS(covering[0], id);
+ }
+}
+
+double randDouble(double lowerBound, double upperBound) {
+ verify(lowerBound <= upperBound);
+ const int NUMBITS = 53;
+ // Random double in [0, 1)
+ double r = ldexp((double)(rand.nextInt64() & ((1ULL << NUMBITS) - 1ULL)), -NUMBITS);
+ return lowerBound + r * (upperBound - lowerBound);
+}
+
+// Check the given region is covered by the covering completely.
+// cellId is used internally.
+void checkCellIdCovering(const GeoHashConverter& converter,
+ const R2Region& region,
+ const R2CellUnion& covering,
+ const GeoHash cellId = GeoHash()) {
+ Box cell = converter.unhashToBoxCovering(cellId);
+
+ // The covering may or may not contain this disjoint cell, we don't care.
+ if (region.fastDisjoint(cell))
+ return;
+
+ // If the covering contains this id, that's fine.
+ if (covering.contains(cellId))
+ return;
+
+ // The covering doesn't contain this cell, so the region shouldn't contain this cell.
+ if (region.fastContains(cell)) {
+ log() << "covering " << covering.toString();
+ log() << "cellId " << cellId;
+ }
+ ASSERT_FALSE(region.fastContains(cell));
+
+ // The region intersects with this cell. So the covering should intersect with it too.
+ // We need to go deeper until a leaf. When we reach a leaf, it must be caught above
+ // - disjoint with the region, we don't care.
+ // - intersected with the region, contained in the covering.
+ // We can guarantee the disjoint/intersection test is exact, since it's a circle.
+ GeoHash children[4];
+ ASSERT_TRUE(cellId.subdivide(children)); // Not a leaf
+ for (int i = 0; i < 4; i++) {
+ checkCellIdCovering(converter, region, covering, children[i]);
+ }
+}
+
+void checkCovering(const GeoHashConverter& converter,
+ const R2Region& region,
+ const R2RegionCoverer& coverer,
+ const vector<GeoHash> covering) {
+ // Keep track of how many cells have the same coverer.minLevel() ancestor.
+ map<GeoHash, int> minLevelCells;
+ // Check covering's minLevel and maxLevel.
+ for (size_t i = 0; i < covering.size(); ++i) {
+ unsigned int level = covering[i].getBits();
+ ASSERT_NOT_LESS_THAN(level, coverer.minLevel());
+ ASSERT_NOT_GREATER_THAN(level, coverer.maxLevel());
+ minLevelCells[covering[i].parent(coverer.minLevel())] += 1;
+ }
+ if (covering.size() > (unsigned int)coverer.maxCells()) {
+ // If the covering has more than the requested number of cells, then check
+ // that the cell count cannot be reduced by using the parent of some cell.
+ for (map<GeoHash, int>::const_iterator i = minLevelCells.begin(); i != minLevelCells.end();
+ ++i) {
+ ASSERT_EQUALS(i->second, 1);
}
}
- double randDouble(double lowerBound, double upperBound) {
- verify(lowerBound <= upperBound);
- const int NUMBITS = 53;
- // Random double in [0, 1)
- double r = ldexp((double)(rand.nextInt64() & ((1ULL << NUMBITS) - 1ULL)), -NUMBITS);
- return lowerBound + r * ( upperBound - lowerBound );
+ R2CellUnion cellUnion;
+ cellUnion.init(covering);
+ checkCellIdCovering(converter, region, cellUnion);
+}
+
+// Generate a circle within [0, MAXBOUND]
+GeometryContainer* getRandomCircle(double radius) {
+ ASSERT_LESS_THAN(radius, MAXBOUND / 2);
+
+ // Format: { $center : [ [-74, 40.74], 10 ] }
+ GeometryContainer* container = new GeometryContainer();
+ container->parseFromQuery(
+ BSON("$center" << BSON_ARRAY(BSON_ARRAY(randDouble(radius, MAXBOUND - radius)
+ << randDouble(radius, MAXBOUND - radius))
+ << radius)).firstElement());
+ return container;
+}
+
+// Test the covering for arbitrary random circle.
+TEST(R2RegionCoverer, RandomCircles) {
+ GeoHashConverter converter(getConverterParams());
+ R2RegionCoverer coverer(&converter);
+ coverer.setMaxCells(8);
+
+ for (int i = 0; i < 1000; i++) {
+ // Using R2BoxRegion, the disjoint with circle gives poor results around the corner,
+ // so many small cells are considered as intersected in the priority queue, which is
+ // very slow for larger minLevel (smaller cell). So we limit minLevels in [0, 6].
+ coverer.setMinLevel(rand.nextInt32(7));
+ coverer.setMaxLevel(coverer.minLevel() + 4);
+
+ double radius = randDouble(0.0, MAXBOUND / 2);
+ unique_ptr<GeometryContainer> geometry(getRandomCircle(radius));
+ const R2Region& region = geometry->getR2Region();
+
+ vector<GeoHash> covering;
+ coverer.getCovering(region, &covering);
+ checkCovering(converter, region, coverer, covering);
}
+}
+
+// Test the covering for very small circles, since the above test doesn't cover finest cells.
+TEST(R2RegionCoverer, RandomTinyCircles) {
+ GeoHashConverter converter(getConverterParams());
+ R2RegionCoverer coverer(&converter);
+ coverer.setMaxCells(rand.nextInt32(20) + 1); // [1, 20]
+
+ for (int i = 0; i < 10000; i++) {
+ do {
+ coverer.setMinLevel(rand.nextInt32(GeoHash::kMaxBits + 1));
+ coverer.setMaxLevel(rand.nextInt32(GeoHash::kMaxBits + 1));
+ } while (coverer.minLevel() > coverer.maxLevel());
+
+ // 100 * 2 ^ -32 ~= 2.3E-8 (cell edge length)
+ double radius = randDouble(1E-15, ldexp(100.0, -32) * 10);
+ unique_ptr<GeometryContainer> geometry(getRandomCircle(radius));
+ const R2Region& region = geometry->getR2Region();
+
+ vector<GeoHash> covering;
+ coverer.getCovering(region, &covering);
+ checkCovering(converter, region, coverer, covering);
+ }
+}
+
+//
+// Shape Intersection
+//
+TEST(ShapeIntersection, Lines) {
+ /*
+ * E |D
+ * A___B |C G
+ * F
+ */
+ Point a(0, 0), b(1, 0), c(2, 0), d(2, 1);
+ Point e(0.5, 1), f(0.5, -0.5), g(3, 0);
- // Check the given region is covered by the covering completely.
- // cellId is used internally.
- void checkCellIdCovering(const GeoHashConverter& converter,
- const R2Region& region,
- const R2CellUnion& covering,
- const GeoHash cellId = GeoHash()) {
-
- Box cell = converter.unhashToBoxCovering(cellId);
+ /*
+ * Basic disjoint
+ * / |
+ * / |
+ */
+ ASSERT_FALSE(linesIntersect(a, d, c, b));
+ ASSERT_FALSE(linesIntersect(c, b, a, d)); // commutative
- // The covering may or may not contain this disjoint cell, we don't care.
- if (region.fastDisjoint(cell)) return;
+ /*
+ * Basic disjoint (axis aligned)
+ * |
+ * ___ |
+ */
+ ASSERT_FALSE(linesIntersect(a, b, c, d));
+ ASSERT_FALSE(linesIntersect(c, d, a, b)); // commutative
- // If the covering contains this id, that's fine.
- if (covering.contains(cellId)) return;
+ /*
+ * Basic intersection
+ * \/
+ * /\
+ */
+ ASSERT_TRUE(linesIntersect(e, c, f, d));
+ ASSERT_TRUE(linesIntersect(f, d, e, c)); // commutative
- // The covering doesn't contain this cell, so the region shouldn't contain this cell.
- if (region.fastContains(cell)) {
- log() << "covering " << covering.toString();
- log() << "cellId " << cellId;
- }
- ASSERT_FALSE(region.fastContains(cell));
-
- // The region intersects with this cell. So the covering should intersect with it too.
- // We need to go deeper until a leaf. When we reach a leaf, it must be caught above
- // - disjoint with the region, we don't care.
- // - intersected with the region, contained in the covering.
- // We can guarantee the disjoint/intersection test is exact, since it's a circle.
- GeoHash children[4];
- ASSERT_TRUE(cellId.subdivide( children )); // Not a leaf
- for ( int i = 0; i < 4; i++ ) {
- checkCellIdCovering( converter, region, covering, children[i] );
- }
- }
+ /*
+ * Basic intersection (axis aligned)
+ * _|_
+ * |
+ */
+ ASSERT_TRUE(linesIntersect(a, b, e, f));
+ ASSERT_TRUE(linesIntersect(f, e, b, a)); // commutative
- void checkCovering(const GeoHashConverter& converter,
- const R2Region& region,
- const R2RegionCoverer& coverer,
- const vector<GeoHash> covering) {
-
- // Keep track of how many cells have the same coverer.minLevel() ancestor.
- map<GeoHash, int> minLevelCells;
- // Check covering's minLevel and maxLevel.
- for (size_t i = 0; i < covering.size(); ++i) {
- unsigned int level = covering[i].getBits();
- ASSERT_NOT_LESS_THAN(level, coverer.minLevel());
- ASSERT_NOT_GREATER_THAN(level, coverer.maxLevel());
- minLevelCells[covering[i].parent(coverer.minLevel())] += 1;
- }
- if (covering.size() > (unsigned int)coverer.maxCells()) {
- // If the covering has more than the requested number of cells, then check
- // that the cell count cannot be reduced by using the parent of some cell.
- for (map<GeoHash, int>::const_iterator i = minLevelCells.begin();
- i != minLevelCells.end(); ++i) {
- ASSERT_EQUALS(i->second, 1);
- }
- }
+ /*
+ * One vertex on the line
+ * \
+ * ____ \
+ */
+ ASSERT_FALSE(linesIntersect(a, b, e, c));
+ ASSERT_FALSE(linesIntersect(e, c, a, b)); // commutative
- R2CellUnion cellUnion;
- cellUnion.init(covering);
- checkCellIdCovering(converter, region, cellUnion);
- }
+ /*
+ * One vertex on the segment
+ * \
+ * ___\___
+ */
+ ASSERT_TRUE(linesIntersect(a, c, b, e));
+ ASSERT_TRUE(linesIntersect(e, b, a, c)); // commutative
- // Generate a circle within [0, MAXBOUND]
- GeometryContainer* getRandomCircle(double radius) {
- ASSERT_LESS_THAN(radius, MAXBOUND / 2);
-
- // Format: { $center : [ [-74, 40.74], 10 ] }
- GeometryContainer* container = new GeometryContainer();
- container->parseFromQuery(BSON("$center"
- << BSON_ARRAY(
- BSON_ARRAY(randDouble(radius, MAXBOUND - radius)
- << randDouble(radius, MAXBOUND - radius))
- << radius)).firstElement());
- return container;
- }
+ /*
+ * Two segments share one vertex
+ * /
+ * /____
+ */
+ ASSERT_TRUE(linesIntersect(a, c, a, e));
+ ASSERT_TRUE(linesIntersect(a, e, a, c)); // commutative
- // Test the covering for arbitrary random circle.
- TEST(R2RegionCoverer, RandomCircles) {
- GeoHashConverter converter(getConverterParams());
- R2RegionCoverer coverer(&converter);
- coverer.setMaxCells( 8 );
-
- for (int i = 0; i < 1000; i++) {
- // Using R2BoxRegion, the disjoint with circle gives poor results around the corner,
- // so many small cells are considered as intersected in the priority queue, which is
- // very slow for larger minLevel (smaller cell). So we limit minLevels in [0, 6].
- coverer.setMinLevel( rand.nextInt32( 7 ) );
- coverer.setMaxLevel( coverer.minLevel() + 4 );
-
- double radius = randDouble(0.0, MAXBOUND / 2);
- unique_ptr<GeometryContainer> geometry(getRandomCircle(radius));
- const R2Region& region = geometry->getR2Region();
-
- vector<GeoHash> covering;
- coverer.getCovering(region, &covering);
- checkCovering(converter, region, coverer, covering);
- }
- }
+ /*
+ * Intersected segments on the same line
+ * A___B===C---G
+ */
+ ASSERT_TRUE(linesIntersect(a, c, b, g));
+ ASSERT_TRUE(linesIntersect(b, g, c, a)); // commutative
- // Test the covering for very small circles, since the above test doesn't cover finest cells.
- TEST(R2RegionCoverer, RandomTinyCircles) {
- GeoHashConverter converter(getConverterParams());
- R2RegionCoverer coverer(&converter);
- coverer.setMaxCells( rand.nextInt32(20) + 1 ); // [1, 20]
-
- for (int i = 0; i < 10000; i++) {
- do {
- coverer.setMinLevel( rand.nextInt32( GeoHash::kMaxBits + 1 ) );
- coverer.setMaxLevel( rand.nextInt32( GeoHash::kMaxBits + 1 ) );
- } while (coverer.minLevel() > coverer.maxLevel());
-
- // 100 * 2 ^ -32 ~= 2.3E-8 (cell edge length)
- double radius = randDouble(1E-15, ldexp(100.0, -32) * 10);
- unique_ptr<GeometryContainer> geometry(getRandomCircle(radius));
- const R2Region& region = geometry->getR2Region();
-
- vector<GeoHash> covering;
- coverer.getCovering(region, &covering);
- checkCovering(converter, region, coverer, covering);
- }
- }
+ /*
+ * Disjoint segments on the same line
+ * A___B C---G
+ */
+ ASSERT_FALSE(linesIntersect(a, b, c, g));
+ ASSERT_FALSE(linesIntersect(c, g, a, b)); // commutative
+
+ /*
+ * Segments on the same line share one vertex.
+ * /D
+ * /B
+ * F/
+ */
+ ASSERT_TRUE(linesIntersect(d, b, b, f));
+ ASSERT_TRUE(linesIntersect(f, b, d, b)); // commutative
+ // axis aligned
+ ASSERT_TRUE(linesIntersect(a, c, g, c));
+ ASSERT_TRUE(linesIntersect(c, g, a, c)); // commutative
+}
+
+TEST(ShapeIntersection, Polygons) {
+ // Convex polygon (triangle)
+
+ /*
+ * Disjoint, bounds disjoint
+ * /|
+ * / | []
+ * /__|
+ */
+ vector<Point> triangleVetices;
+ triangleVetices.push_back(Point(0, 0));
+ triangleVetices.push_back(Point(1, 0));
+ triangleVetices.push_back(Point(1, 4));
+ Polygon triangle(triangleVetices);
+ Box box;
+
+ box = Box(1.5, 1.5, 1);
+ ASSERT_FALSE(edgesIntersectsWithBox(triangle.points(), box));
+ ASSERT_FALSE(polygonIntersectsWithBox(triangle, box));
+ ASSERT_FALSE(polygonContainsBox(triangle, box));
+
+ /*
+ * Disjoint, bounds intersect
+ * [] /|
+ * / |
+ * /__|
+ */
+ box = Box(-0.5, 3.5, 1);
+ ASSERT_FALSE(edgesIntersectsWithBox(triangle.points(), box));
+ ASSERT_FALSE(polygonIntersectsWithBox(triangle, box));
+ ASSERT_FALSE(polygonContainsBox(triangle, box));
+
+ /*
+ * Intersect on one polygon vertex
+ * _____
+ * | |
+ * |_ /|_|
+ * / |
+ * /__|
+ */
+ box = Box(0, 3, 2);
+ ASSERT_TRUE(edgesIntersectsWithBox(triangle.points(), box));
+ ASSERT_TRUE(polygonIntersectsWithBox(triangle, box));
+ ASSERT_FALSE(polygonContainsBox(triangle, box));
+
+ /*
+ * Box contains polygon
+ * __________
+ * | |
+ * | /| |
+ * | / | |
+ * | /__| |
+ * |__________|
+ */
+ box = Box(-1, -1, 6);
+ ASSERT_FALSE(edgesIntersectsWithBox(triangle.points(), box));
+ ASSERT_TRUE(polygonIntersectsWithBox(triangle, box));
+ ASSERT_FALSE(polygonContainsBox(triangle, box));
+
+ /*
+ * Polygon contains box
+ * /|
+ * / |
+ * / |
+ * / []|
+ * /____|
+ */
+ box = Box(0.1, 0.1, 0.2);
+ ASSERT_FALSE(edgesIntersectsWithBox(triangle.points(), box));
+ ASSERT_TRUE(polygonIntersectsWithBox(triangle, box));
+ ASSERT_TRUE(polygonContainsBox(triangle, box));
+
+ /*
+ * Intersect, but no vertex is contained by the other shape.
+ * ___ /|_
+ * | / | |
+ * | / | |
+ * |_/___|_|
+ * /____|
+ */
+ box = Box(0, 1, 2);
+ ASSERT_TRUE(edgesIntersectsWithBox(triangle.points(), box));
+ ASSERT_TRUE(polygonIntersectsWithBox(triangle, box));
+ ASSERT_FALSE(polygonContainsBox(triangle, box));
+
+ // Concave polygon
+
+ /*
+ * (0,4)
+ * |\
+ * | \(1,1)
+ * | `.
+ * |____`. (4,0)
+ * (0,0)
+ */
+ vector<Point> concaveVetices;
+ concaveVetices.push_back(Point(0, 0));
+ concaveVetices.push_back(Point(4, 0));
+ concaveVetices.push_back(Point(1, 1));
+ concaveVetices.push_back(Point(0, 4));
+ Polygon concave(concaveVetices);
+
+ /*
+ * Disjoint
+ * |\
+ * | \
+ * | `.
+ * |____`.
+ * []
+ */
+ box = Box(1, -1, 0.9);
+ ASSERT_FALSE(edgesIntersectsWithBox(concave.points(), box));
+ ASSERT_FALSE(polygonIntersectsWithBox(concave, box));
+ ASSERT_FALSE(polygonContainsBox(concave, box));
+
+ /*
+ * Disjoint, bounds intersect
+ * |\
+ * | \[]
+ * | `.
+ * |____`.
+ */
+ box = Box(1.1, 1.1, 0.2);
+ ASSERT_FALSE(edgesIntersectsWithBox(concave.points(), box));
+ ASSERT_FALSE(polygonIntersectsWithBox(concave, box));
+ ASSERT_FALSE(polygonContainsBox(concave, box));
+
+ /*
+ * Intersect, one box vertex is contained by the polygon.
+ * |\
+ * |+\+ (1.5, 1.5)
+ * |+-`.
+ * |____`.
+ */
+ box = Box(0.5, 0.5, 1);
+ ASSERT_TRUE(edgesIntersectsWithBox(concave.points(), box));
+ ASSERT_TRUE(polygonIntersectsWithBox(concave, box));
+ ASSERT_FALSE(polygonContainsBox(concave, box));
+
+ /*
+ * Intersect, no vertex is contained by the other shape.
+ * |\
+ * +| \--+
+ * || `.|
+ * ||____`.
+ * +-----+
+ */
+ box = Box(-0.5, -0.5, 3);
+ ASSERT_TRUE(edgesIntersectsWithBox(concave.points(), box));
+ ASSERT_TRUE(polygonIntersectsWithBox(concave, box));
+ ASSERT_FALSE(polygonContainsBox(concave, box));
+}
+
+TEST(ShapeIntersection, Annulus) {
+ R2Annulus annulus(Point(0.0, 0.0), 1, 5);
+ Box box;
+
+ // Disjoint, out of outer circle
+ box = Box(4, 4, 1);
+ ASSERT_TRUE(annulus.fastDisjoint(box));
+ ASSERT_FALSE(annulus.fastContains(box));
+
+ // Box contains outer circle
+ box = Box(-6, -5.5, 12);
+ ASSERT_FALSE(annulus.fastDisjoint(box));
+ ASSERT_FALSE(annulus.fastContains(box));
+
+ // Box intersects with the outer circle, but not the inner circle
+ box = Box(3, 3, 4);
+ ASSERT_FALSE(annulus.fastDisjoint(box));
+ ASSERT_FALSE(annulus.fastContains(box));
+
+ // Box is contained by the annulus
+ box = Box(2, 2, 1);
+ ASSERT_FALSE(annulus.fastDisjoint(box));
+ ASSERT_TRUE(annulus.fastContains(box));
+
+ // Box is contained by the outer circle and intersects with the inner circle
+ box = Box(0.4, 0.5, 3);
+ ASSERT_FALSE(annulus.fastDisjoint(box));
+ ASSERT_FALSE(annulus.fastContains(box));
+
+ // Box intersects with both outer and inner circle
+ box = Box(-4, -4, 4.5);
+ ASSERT_FALSE(annulus.fastDisjoint(box));
+ ASSERT_FALSE(annulus.fastContains(box));
+
+ // Box is inside the inner circle
+ box = Box(-0.1, -0.2, 0.5);
+ ASSERT_TRUE(annulus.fastDisjoint(box));
+ ASSERT_FALSE(annulus.fastContains(box));
+
+ // Box contains the inner circle, but intersects with the outer circle
+ box = Box(-2, -2, 7);
+ ASSERT_FALSE(annulus.fastDisjoint(box));
+ ASSERT_FALSE(annulus.fastContains(box));
//
- // Shape Intersection
+ // Annulus contains both inner and outer circles as boundaries.
//
- TEST(ShapeIntersection, Lines) {
- /*
- * E |D
- * A___B |C G
- * F
- */
- Point a(0, 0), b(1, 0), c(2, 0), d(2, 1);
- Point e(0.5, 1), f(0.5, -0.5), g(3, 0);
-
- /*
- * Basic disjoint
- * / |
- * / |
- */
- ASSERT_FALSE(linesIntersect(a, d, c, b));
- ASSERT_FALSE(linesIntersect(c, b, a, d)); // commutative
-
- /*
- * Basic disjoint (axis aligned)
- * |
- * ___ |
- */
- ASSERT_FALSE(linesIntersect(a, b, c, d));
- ASSERT_FALSE(linesIntersect(c, d, a, b)); // commutative
-
- /*
- * Basic intersection
- * \/
- * /\
- */
- ASSERT_TRUE(linesIntersect(e, c, f, d));
- ASSERT_TRUE(linesIntersect(f, d, e, c)); // commutative
-
- /*
- * Basic intersection (axis aligned)
- * _|_
- * |
- */
- ASSERT_TRUE(linesIntersect(a, b, e, f));
- ASSERT_TRUE(linesIntersect(f, e, b, a)); // commutative
-
- /*
- * One vertex on the line
- * \
- * ____ \
- */
- ASSERT_FALSE(linesIntersect(a, b, e, c));
- ASSERT_FALSE(linesIntersect(e, c, a, b)); // commutative
-
- /*
- * One vertex on the segment
- * \
- * ___\___
- */
- ASSERT_TRUE(linesIntersect(a, c, b, e));
- ASSERT_TRUE(linesIntersect(e, b, a, c)); // commutative
-
- /*
- * Two segments share one vertex
- * /
- * /____
- */
- ASSERT_TRUE(linesIntersect(a, c, a, e));
- ASSERT_TRUE(linesIntersect(a, e, a, c)); // commutative
-
- /*
- * Intersected segments on the same line
- * A___B===C---G
- */
- ASSERT_TRUE(linesIntersect(a, c, b, g));
- ASSERT_TRUE(linesIntersect(b, g, c, a)); // commutative
-
- /*
- * Disjoint segments on the same line
- * A___B C---G
- */
- ASSERT_FALSE(linesIntersect(a, b, c, g));
- ASSERT_FALSE(linesIntersect(c, g, a, b)); // commutative
-
- /*
- * Segments on the same line share one vertex.
- * /D
- * /B
- * F/
- */
- ASSERT_TRUE(linesIntersect(d, b, b, f));
- ASSERT_TRUE(linesIntersect(f, b, d, b)); // commutative
- // axis aligned
- ASSERT_TRUE(linesIntersect(a, c, g, c));
- ASSERT_TRUE(linesIntersect(c, g, a, c)); // commutative
- }
-
- TEST(ShapeIntersection, Polygons) {
- // Convex polygon (triangle)
-
- /*
- * Disjoint, bounds disjoint
- * /|
- * / | []
- * /__|
- */
- vector<Point> triangleVetices;
- triangleVetices.push_back(Point(0, 0));
- triangleVetices.push_back(Point(1, 0));
- triangleVetices.push_back(Point(1, 4));
- Polygon triangle(triangleVetices);
- Box box;
-
- box = Box(1.5, 1.5, 1);
- ASSERT_FALSE(edgesIntersectsWithBox(triangle.points(), box));
- ASSERT_FALSE(polygonIntersectsWithBox(triangle, box));
- ASSERT_FALSE(polygonContainsBox(triangle, box));
-
- /*
- * Disjoint, bounds intersect
- * [] /|
- * / |
- * /__|
- */
- box = Box(-0.5, 3.5, 1);
- ASSERT_FALSE(edgesIntersectsWithBox(triangle.points(), box));
- ASSERT_FALSE(polygonIntersectsWithBox(triangle, box));
- ASSERT_FALSE(polygonContainsBox(triangle, box));
-
- /*
- * Intersect on one polygon vertex
- * _____
- * | |
- * |_ /|_|
- * / |
- * /__|
- */
- box = Box(0, 3, 2);
- ASSERT_TRUE(edgesIntersectsWithBox(triangle.points(), box));
- ASSERT_TRUE(polygonIntersectsWithBox(triangle, box));
- ASSERT_FALSE(polygonContainsBox(triangle, box));
-
- /*
- * Box contains polygon
- * __________
- * | |
- * | /| |
- * | / | |
- * | /__| |
- * |__________|
- */
- box = Box(-1, -1, 6);
- ASSERT_FALSE(edgesIntersectsWithBox(triangle.points(), box));
- ASSERT_TRUE(polygonIntersectsWithBox(triangle, box));
- ASSERT_FALSE(polygonContainsBox(triangle, box));
-
- /*
- * Polygon contains box
- * /|
- * / |
- * / |
- * / []|
- * /____|
- */
- box = Box(0.1, 0.1, 0.2);
- ASSERT_FALSE(edgesIntersectsWithBox(triangle.points(), box));
- ASSERT_TRUE(polygonIntersectsWithBox(triangle, box));
- ASSERT_TRUE(polygonContainsBox(triangle, box));
-
- /*
- * Intersect, but no vertex is contained by the other shape.
- * ___ /|_
- * | / | |
- * | / | |
- * |_/___|_|
- * /____|
- */
- box = Box(0, 1, 2);
- ASSERT_TRUE(edgesIntersectsWithBox(triangle.points(), box));
- ASSERT_TRUE(polygonIntersectsWithBox(triangle, box));
- ASSERT_FALSE(polygonContainsBox(triangle, box));
-
- // Concave polygon
-
- /*
- * (0,4)
- * |\
- * | \(1,1)
- * | `.
- * |____`. (4,0)
- * (0,0)
- */
- vector<Point> concaveVetices;
- concaveVetices.push_back(Point(0, 0));
- concaveVetices.push_back(Point(4, 0));
- concaveVetices.push_back(Point(1, 1));
- concaveVetices.push_back(Point(0, 4));
- Polygon concave(concaveVetices);
-
- /*
- * Disjoint
- * |\
- * | \
- * | `.
- * |____`.
- * []
- */
- box = Box(1, -1, 0.9);
- ASSERT_FALSE(edgesIntersectsWithBox(concave.points(), box));
- ASSERT_FALSE(polygonIntersectsWithBox(concave, box));
- ASSERT_FALSE(polygonContainsBox(concave, box));
-
- /*
- * Disjoint, bounds intersect
- * |\
- * | \[]
- * | `.
- * |____`.
- */
- box = Box(1.1, 1.1, 0.2);
- ASSERT_FALSE(edgesIntersectsWithBox(concave.points(), box));
- ASSERT_FALSE(polygonIntersectsWithBox(concave, box));
- ASSERT_FALSE(polygonContainsBox(concave, box));
-
- /*
- * Intersect, one box vertex is contained by the polygon.
- * |\
- * |+\+ (1.5, 1.5)
- * |+-`.
- * |____`.
- */
- box = Box(0.5, 0.5, 1);
- ASSERT_TRUE(edgesIntersectsWithBox(concave.points(), box));
- ASSERT_TRUE(polygonIntersectsWithBox(concave, box));
- ASSERT_FALSE(polygonContainsBox(concave, box));
-
- /*
- * Intersect, no vertex is contained by the other shape.
- * |\
- * +| \--+
- * || `.|
- * ||____`.
- * +-----+
- */
- box = Box(-0.5, -0.5, 3);
- ASSERT_TRUE(edgesIntersectsWithBox(concave.points(), box));
- ASSERT_TRUE(polygonIntersectsWithBox(concave, box));
- ASSERT_FALSE(polygonContainsBox(concave, box));
- }
-
- TEST(ShapeIntersection, Annulus) {
- R2Annulus annulus(Point(0.0, 0.0), 1, 5);
- Box box;
-
- // Disjoint, out of outer circle
- box = Box(4, 4, 1);
- ASSERT_TRUE(annulus.fastDisjoint(box));
- ASSERT_FALSE(annulus.fastContains(box));
-
- // Box contains outer circle
- box = Box(-6, -5.5, 12);
- ASSERT_FALSE(annulus.fastDisjoint(box));
- ASSERT_FALSE(annulus.fastContains(box));
-
- // Box intersects with the outer circle, but not the inner circle
- box = Box(3, 3, 4);
- ASSERT_FALSE(annulus.fastDisjoint(box));
- ASSERT_FALSE(annulus.fastContains(box));
-
- // Box is contained by the annulus
- box = Box(2, 2, 1);
- ASSERT_FALSE(annulus.fastDisjoint(box));
- ASSERT_TRUE(annulus.fastContains(box));
-
- // Box is contained by the outer circle and intersects with the inner circle
- box = Box(0.4, 0.5, 3);
- ASSERT_FALSE(annulus.fastDisjoint(box));
- ASSERT_FALSE(annulus.fastContains(box));
-
- // Box intersects with both outer and inner circle
- box = Box(-4, -4, 4.5);
- ASSERT_FALSE(annulus.fastDisjoint(box));
- ASSERT_FALSE(annulus.fastContains(box));
-
- // Box is inside the inner circle
- box = Box(-0.1, -0.2, 0.5);
- ASSERT_TRUE(annulus.fastDisjoint(box));
- ASSERT_FALSE(annulus.fastContains(box));
-
- // Box contains the inner circle, but intersects with the outer circle
- box = Box(-2, -2, 7);
- ASSERT_FALSE(annulus.fastDisjoint(box));
- ASSERT_FALSE(annulus.fastContains(box));
-
- //
- // Annulus contains both inner and outer circles as boundaries.
- //
-
- // Box only touches the outer boundary
- box = Box(3, 4, 1); // Lower left touches boundary
- ASSERT_FALSE(annulus.fastDisjoint(box));
- ASSERT_FALSE(annulus.fastContains(box));
- box = Box(-4, -5, 1); // Upper right touches boundary
- ASSERT_FALSE(annulus.fastDisjoint(box));
- ASSERT_FALSE(annulus.fastContains(box));
-
- // Box is contained by the annulus touching the outer boundary
- box = Box(-4, -3, 0.1);
- ASSERT_FALSE(annulus.fastDisjoint(box));
- ASSERT_TRUE(annulus.fastContains(box));
-
- // Box is contained by the annulus touching the inner boundary
- box = Box(0, 1, 1);
- ASSERT_FALSE(annulus.fastDisjoint(box));
- ASSERT_TRUE(annulus.fastContains(box));
-
- // Box only touches the inner boundary at (-0.6, 0.8)
- box = Box(-0.6, 0.3, 0.5);
- ASSERT_FALSE(annulus.fastDisjoint(box));
- ASSERT_FALSE(annulus.fastContains(box));
- }
-} // namespace
+ // Box only touches the outer boundary
+ box = Box(3, 4, 1); // Lower left touches boundary
+ ASSERT_FALSE(annulus.fastDisjoint(box));
+ ASSERT_FALSE(annulus.fastContains(box));
+ box = Box(-4, -5, 1); // Upper right touches boundary
+ ASSERT_FALSE(annulus.fastDisjoint(box));
+ ASSERT_FALSE(annulus.fastContains(box));
+
+ // Box is contained by the annulus touching the outer boundary
+ box = Box(-4, -3, 0.1);
+ ASSERT_FALSE(annulus.fastDisjoint(box));
+ ASSERT_TRUE(annulus.fastContains(box));
+
+ // Box is contained by the annulus touching the inner boundary
+ box = Box(0, 1, 1);
+ ASSERT_FALSE(annulus.fastDisjoint(box));
+ ASSERT_TRUE(annulus.fastContains(box));
+
+ // Box only touches the inner boundary at (-0.6, 0.8)
+ box = Box(-0.6, 0.3, 0.5);
+ ASSERT_FALSE(annulus.fastDisjoint(box));
+ ASSERT_FALSE(annulus.fastContains(box));
+}
+
+} // namespace
diff --git a/src/mongo/db/geo/s2.h b/src/mongo/db/geo/s2.h
index 426e9f0e5b5..7a3a1c6a840 100644
--- a/src/mongo/db/geo/s2.h
+++ b/src/mongo/db/geo/s2.h
@@ -42,4 +42,3 @@
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
-
diff --git a/src/mongo/db/geo/shapes.cpp b/src/mongo/db/geo/shapes.cpp
index d87cb9334ce..fa6018877bb 100644
--- a/src/mongo/db/geo/shapes.cpp
+++ b/src/mongo/db/geo/shapes.cpp
@@ -39,767 +39,753 @@ namespace mongo {
////////////// Point
- Point::Point() : x(0), y(0) { }
+Point::Point() : x(0), y(0) {}
- Point::Point(double x, double y) : x(x), y(y) { }
+Point::Point(double x, double y) : x(x), y(y) {}
- Point::Point(const BSONElement& e) {
- BSONObjIterator i(e.Obj());
- x = i.next().number();
- y = i.next().number();
- }
+Point::Point(const BSONElement& e) {
+ BSONObjIterator i(e.Obj());
+ x = i.next().number();
+ y = i.next().number();
+}
- Point::Point(const BSONObj& o) {
- BSONObjIterator i(o);
- x = i.next().number();
- y = i.next().number();
- }
+Point::Point(const BSONObj& o) {
+ BSONObjIterator i(o);
+ x = i.next().number();
+ y = i.next().number();
+}
- string Point::toString() const {
- StringBuilder buf;
- buf << "(" << x << "," << y << ")";
- return buf.str();
- }
+string Point::toString() const {
+ StringBuilder buf;
+ buf << "(" << x << "," << y << ")";
+ return buf.str();
+}
////////////// Circle
- Circle::Circle() {}
- Circle::Circle(double radius, Point center) : radius(radius), center(center) {}
+Circle::Circle() {}
+Circle::Circle(double radius, Point center) : radius(radius), center(center) {}
////////////// Box
- Box::Box() {}
+Box::Box() {}
- Box::Box(double x, double y, double size) :
- _min(x, y), _max(x + size, y + size) {
- }
+Box::Box(double x, double y, double size) : _min(x, y), _max(x + size, y + size) {}
- Box::Box(const Point& ptA, const Point& ptB) {
- init(ptA, ptB);
- }
+Box::Box(const Point& ptA, const Point& ptB) {
+ init(ptA, ptB);
+}
- void Box::init(const Point& ptA, const Point& ptB) {
- _min.x = min(ptA.x, ptB.x);
- _min.y = min(ptA.y, ptB.y);
- _max.x = max(ptA.x, ptB.x);
- _max.y = max(ptA.y, ptB.y);
- }
+void Box::init(const Point& ptA, const Point& ptB) {
+ _min.x = min(ptA.x, ptB.x);
+ _min.y = min(ptA.y, ptB.y);
+ _max.x = max(ptA.x, ptB.x);
+ _max.y = max(ptA.y, ptB.y);
+}
- void Box::init(const Box& other) {
- init(other._min, other._max);
- }
+void Box::init(const Box& other) {
+ init(other._min, other._max);
+}
- BSONArray Box::toBSON() const {
- return BSON_ARRAY(BSON_ARRAY(_min.x << _min.y) << BSON_ARRAY(_max.x << _max.y));
- }
+BSONArray Box::toBSON() const {
+ return BSON_ARRAY(BSON_ARRAY(_min.x << _min.y) << BSON_ARRAY(_max.x << _max.y));
+}
- string Box::toString() const {
- StringBuilder buf;
- buf << _min.toString() << " -->> " << _max.toString();
- return buf.str();
- }
+string Box::toString() const {
+ StringBuilder buf;
+ buf << _min.toString() << " -->> " << _max.toString();
+ return buf.str();
+}
- bool Box::between(double min, double max, double val, double fudge) const {
- return val + fudge >= min && val <= max + fudge;
- }
+bool Box::between(double min, double max, double val, double fudge) const {
+ return val + fudge >= min && val <= max + fudge;
+}
- bool Box::onBoundary(double bound, double val, double fudge) const {
- return (val >= bound - fudge && val <= bound + fudge);
- }
+bool Box::onBoundary(double bound, double val, double fudge) const {
+ return (val >= bound - fudge && val <= bound + fudge);
+}
- bool Box::mid(double amin, double amax,
- double bmin, double bmax, bool min, double* res) const {
- verify(amin <= amax);
- verify(bmin <= bmax);
+bool Box::mid(double amin, double amax, double bmin, double bmax, bool min, double* res) const {
+ verify(amin <= amax);
+ verify(bmin <= bmax);
- if (amin < bmin) {
- if (amax < bmin)
- return false;
- *res = min ? bmin : amax;
- return true;
- }
- if (amin > bmax)
+ if (amin < bmin) {
+ if (amax < bmin)
return false;
- *res = min ? amin : bmax;
+ *res = min ? bmin : amax;
return true;
}
-
- bool Box::intersects(const Box& other) const {
-
- bool intersectX = between(_min.x, _max.x, other._min.x) // contain part of other range
- || between(_min.x, _max.x, other._max.x) // contain part of other range
- || between(other._min.x, other._max.x, _min.x); // other range contains us
-
- bool intersectY = between(_min.y, _max.y, other._min.y)
- || between(_min.y, _max.y, other._max.y)
- || between(other._min.y, other._max.y, _min.y);
-
- return intersectX && intersectY;
- }
-
- double Box::legacyIntersectFraction(const Box& other) const {
-
- Point boundMin(0,0);
- Point boundMax(0,0);
-
- if (!mid(_min.x, _max.x, other._min.x, other._max.x, true, &boundMin.x) ||
- !mid(_min.x, _max.x, other._min.x, other._max.x, false, &boundMax.x) ||
- !mid(_min.y, _max.y, other._min.y, other._max.y, true, &boundMin.y) ||
- !mid(_min.y, _max.y, other._min.y, other._max.y, false, &boundMax.y))
- return 0;
-
- Box intersection(boundMin, boundMax);
- return intersection.area() / area();
- }
-
- double Box::area() const {
- return (_max.x - _min.x) * (_max.y - _min.y);
- }
-
- double Box::maxDim() const {
- return max(_max.x - _min.x, _max.y - _min.y);
- }
-
- Point Box::center() const {
- return Point((_min.x + _max.x) / 2,
- (_min.y + _max.y) / 2);
- }
-
- void Box::truncate(double min, double max) {
- if (_min.x < min) _min.x = min;
- if (_min.y < min) _min.y = min;
- if (_max.x > max) _max.x = max;
- if (_max.y > max) _max.y = max;
- }
-
- void Box::fudge(double error) {
- _min.x -= error;
- _min.y -= error;
- _max.x += error;
- _max.y += error;
- }
-
- void Box::expandToInclude(const Point& pt) {
- _min.x = min(_min.x, pt.x);
- _min.y = min(_min.y, pt.y);
- _max.x = max(_max.x, pt.x);
- _max.y = max(_max.y, pt.y);
- }
-
- bool Box::onBoundary(Point p, double fudge) const {
- return onBoundary(_min.x, p.x, fudge) ||
- onBoundary(_max.x, p.x, fudge) ||
- onBoundary(_min.y, p.y, fudge) ||
- onBoundary(_max.y, p.y, fudge);
- }
-
- bool Box::inside(Point p, double fudge) const {
- bool res = inside(p.x, p.y, fudge);
- return res;
- }
-
- bool Box::inside(double x, double y, double fudge) const {
- return between(_min.x, _max.x , x, fudge) &&
- between(_min.y, _max.y , y, fudge);
- }
-
- bool Box::contains(const Box& other, double fudge) const {
- return inside(other._min, fudge) && inside(other._max, fudge);
- }
+ if (amin > bmax)
+ return false;
+ *res = min ? amin : bmax;
+ return true;
+}
+
+bool Box::intersects(const Box& other) const {
+ bool intersectX = between(_min.x, _max.x, other._min.x) // contain part of other range
+ || between(_min.x, _max.x, other._max.x) // contain part of other range
+ || between(other._min.x, other._max.x, _min.x); // other range contains us
+
+ bool intersectY = between(_min.y, _max.y, other._min.y) ||
+ between(_min.y, _max.y, other._max.y) || between(other._min.y, other._max.y, _min.y);
+
+ return intersectX && intersectY;
+}
+
+double Box::legacyIntersectFraction(const Box& other) const {
+ Point boundMin(0, 0);
+ Point boundMax(0, 0);
+
+ if (!mid(_min.x, _max.x, other._min.x, other._max.x, true, &boundMin.x) ||
+ !mid(_min.x, _max.x, other._min.x, other._max.x, false, &boundMax.x) ||
+ !mid(_min.y, _max.y, other._min.y, other._max.y, true, &boundMin.y) ||
+ !mid(_min.y, _max.y, other._min.y, other._max.y, false, &boundMax.y))
+ return 0;
+
+ Box intersection(boundMin, boundMax);
+ return intersection.area() / area();
+}
+
+double Box::area() const {
+ return (_max.x - _min.x) * (_max.y - _min.y);
+}
+
+double Box::maxDim() const {
+ return max(_max.x - _min.x, _max.y - _min.y);
+}
+
+Point Box::center() const {
+ return Point((_min.x + _max.x) / 2, (_min.y + _max.y) / 2);
+}
+
+void Box::truncate(double min, double max) {
+ if (_min.x < min)
+ _min.x = min;
+ if (_min.y < min)
+ _min.y = min;
+ if (_max.x > max)
+ _max.x = max;
+ if (_max.y > max)
+ _max.y = max;
+}
+
+void Box::fudge(double error) {
+ _min.x -= error;
+ _min.y -= error;
+ _max.x += error;
+ _max.y += error;
+}
+
+void Box::expandToInclude(const Point& pt) {
+ _min.x = min(_min.x, pt.x);
+ _min.y = min(_min.y, pt.y);
+ _max.x = max(_max.x, pt.x);
+ _max.y = max(_max.y, pt.y);
+}
+
+bool Box::onBoundary(Point p, double fudge) const {
+ return onBoundary(_min.x, p.x, fudge) || onBoundary(_max.x, p.x, fudge) ||
+ onBoundary(_min.y, p.y, fudge) || onBoundary(_max.y, p.y, fudge);
+}
+
+bool Box::inside(Point p, double fudge) const {
+ bool res = inside(p.x, p.y, fudge);
+ return res;
+}
+
+bool Box::inside(double x, double y, double fudge) const {
+ return between(_min.x, _max.x, x, fudge) && between(_min.y, _max.y, y, fudge);
+}
+
+bool Box::contains(const Box& other, double fudge) const {
+ return inside(other._min, fudge) && inside(other._max, fudge);
+}
////////////// Polygon
- Polygon::Polygon() {
- }
-
- Polygon::Polygon(const vector<Point>& points) {
- init(points);
- }
-
- void Polygon::init(const vector<Point>& points) {
-
- _points.clear();
- _bounds.reset();
- _centroid.reset();
-
- _points.insert(_points.begin(), points.begin(), points.end());
- }
-
- void Polygon::init(const Polygon& other) {
- init(other._points);
- }
-
- int Polygon::size(void) const { return _points.size(); }
+Polygon::Polygon() {}
+
+Polygon::Polygon(const vector<Point>& points) {
+ init(points);
+}
+
+void Polygon::init(const vector<Point>& points) {
+ _points.clear();
+ _bounds.reset();
+ _centroid.reset();
+
+ _points.insert(_points.begin(), points.begin(), points.end());
+}
+
+void Polygon::init(const Polygon& other) {
+ init(other._points);
+}
+
+int Polygon::size(void) const {
+ return _points.size();
+}
+
+bool Polygon::contains(const Point& p) const {
+ return contains(p, 0) > 0;
+}
+
+/*
+ * Return values:
+ * -1 if no intersection
+ * 0 if maybe an intersection (using fudge)
+ * 1 if there is an intersection
+ *
+ * A ray casting intersection method is used.
+ */
+int Polygon::contains(const Point& p, double fudge) const {
+ Box fudgeBox(Point(p.x - fudge, p.y - fudge), Point(p.x + fudge, p.y + fudge));
+
+ int counter = 0;
+ Point p1 = _points[0];
+ for (int i = 1; i <= size(); i++) {
+ // XXX: why is there a mod here?
+ Point p2 = _points[i % size()];
+
+ // We need to check whether or not this segment intersects our error box
+ if (fudge > 0 &&
+ // Points not too far below box
+ fudgeBox._min.y <= std::max(p1.y, p2.y) &&
+ // Points not too far above box
+ fudgeBox._max.y >= std::min(p1.y, p2.y) &&
+ // Points not too far to left of box
+ fudgeBox._min.x <= std::max(p1.x, p2.x) &&
+ // Points not too far to right of box
+ fudgeBox._max.x >= std::min(p1.x, p2.x)) {
+ // If our box contains one or more of these points, we need to do an exact
+ // check.
+ if (fudgeBox.inside(p1)) {
+ return 0;
+ }
+ if (fudgeBox.inside(p2)) {
+ return 0;
+ }
- bool Polygon::contains(const Point& p) const { return contains(p, 0) > 0; }
+ // Do intersection check for vertical sides
+ if (p1.y != p2.y) {
+ double invSlope = (p2.x - p1.x) / (p2.y - p1.y);
- /*
- * Return values:
- * -1 if no intersection
- * 0 if maybe an intersection (using fudge)
- * 1 if there is an intersection
- *
- * A ray casting intersection method is used.
- */
- int Polygon::contains(const Point &p, double fudge) const {
- Box fudgeBox(Point(p.x - fudge, p.y - fudge), Point(p.x + fudge, p.y + fudge));
-
- int counter = 0;
- Point p1 = _points[0];
- for (int i = 1; i <= size(); i++) {
- // XXX: why is there a mod here?
- Point p2 = _points[i % size()];
-
- // We need to check whether or not this segment intersects our error box
- if (fudge > 0 &&
- // Points not too far below box
- fudgeBox._min.y <= std::max(p1.y, p2.y) &&
- // Points not too far above box
- fudgeBox._max.y >= std::min(p1.y, p2.y) &&
- // Points not too far to left of box
- fudgeBox._min.x <= std::max(p1.x, p2.x) &&
- // Points not too far to right of box
- fudgeBox._max.x >= std::min(p1.x, p2.x)) {
-
-
- // If our box contains one or more of these points, we need to do an exact
- // check.
- if (fudgeBox.inside(p1)) {
+ double xintersT = (fudgeBox._max.y - p1.y) * invSlope + p1.x;
+ if (fudgeBox._min.x <= xintersT && fudgeBox._max.x >= xintersT) {
return 0;
}
- if (fudgeBox.inside(p2)) {
+
+ double xintersB = (fudgeBox._min.y - p1.y) * invSlope + p1.x;
+ if (fudgeBox._min.x <= xintersB && fudgeBox._max.x >= xintersB) {
return 0;
}
+ }
- // Do intersection check for vertical sides
- if (p1.y != p2.y) {
- double invSlope = (p2.x - p1.x) / (p2.y - p1.y);
+ // Do intersection check for horizontal sides
+ if (p1.x != p2.x) {
+ double slope = (p2.y - p1.y) / (p2.x - p1.x);
- double xintersT = (fudgeBox._max.y - p1.y) * invSlope + p1.x;
- if (fudgeBox._min.x <= xintersT && fudgeBox._max.x >= xintersT) {
- return 0;
- }
-
- double xintersB = (fudgeBox._min.y - p1.y) * invSlope + p1.x;
- if (fudgeBox._min.x <= xintersB && fudgeBox._max.x >= xintersB) {
- return 0;
- }
+ double yintersR = (p1.x - fudgeBox._max.x) * slope + p1.y;
+ if (fudgeBox._min.y <= yintersR && fudgeBox._max.y >= yintersR) {
+ return 0;
}
- // Do intersection check for horizontal sides
- if (p1.x != p2.x) {
- double slope = (p2.y - p1.y) / (p2.x - p1.x);
-
- double yintersR = (p1.x - fudgeBox._max.x) * slope + p1.y;
- if (fudgeBox._min.y <= yintersR && fudgeBox._max.y >= yintersR) {
- return 0;
- }
-
- double yintersL = (p1.x - fudgeBox._min.x) * slope + p1.y;
- if (fudgeBox._min.y <= yintersL && fudgeBox._max.y >= yintersL) {
- return 0;
- }
- }
- } else if (fudge == 0){
- // If this is an exact vertex, we won't intersect, so check this
- if (p.y == p1.y && p.x == p1.x) return 1;
- else if (p.y == p2.y && p.x == p2.x) return 1;
-
- // If this is a horizontal line we won't intersect, so check this
- if (p1.y == p2.y && p.y == p1.y){
- // Check that the x-coord lies in the line
- if (p.x >= std::min(p1.x, p2.x) && p.x <= std::max(p1.x, p2.x))
- return 1;
+ double yintersL = (p1.x - fudgeBox._min.x) * slope + p1.y;
+ if (fudgeBox._min.y <= yintersL && fudgeBox._max.y >= yintersL) {
+ return 0;
}
}
+ } else if (fudge == 0) {
+ // If this is an exact vertex, we won't intersect, so check this
+ if (p.y == p1.y && p.x == p1.x)
+ return 1;
+ else if (p.y == p2.y && p.x == p2.x)
+ return 1;
+
+ // If this is a horizontal line we won't intersect, so check this
+ if (p1.y == p2.y && p.y == p1.y) {
+ // Check that the x-coord lies in the line
+ if (p.x >= std::min(p1.x, p2.x) && p.x <= std::max(p1.x, p2.x))
+ return 1;
+ }
+ }
- // Normal intersection test.
- // TODO: Invert these for clearer logic?
- if (p.y > std::min(p1.y, p2.y)) {
- if (p.y <= std::max(p1.y, p2.y)) {
- if (p.x <= std::max(p1.x, p2.x)) {
- if (p1.y != p2.y) {
- double xinters = (p.y-p1.y)*(p2.x-p1.x)/(p2.y-p1.y)+p1.x;
- // Special case of point on vertical line
- if (p1.x == p2.x && p.x == p1.x){
-
- // Need special case for the vertical edges, for example:
- // 1) \e pe/----->
- // vs.
- // 2) \ep---e/----->
- //
- // if we count exact as intersection, then 1 is in but 2 is out
- // if we count exact as no-int then 1 is out but 2 is in.
-
- return 1;
- } else if (p1.x == p2.x || p.x <= xinters) {
- counter++;
- }
+ // Normal intersection test.
+ // TODO: Invert these for clearer logic?
+ if (p.y > std::min(p1.y, p2.y)) {
+ if (p.y <= std::max(p1.y, p2.y)) {
+ if (p.x <= std::max(p1.x, p2.x)) {
+ if (p1.y != p2.y) {
+ double xinters = (p.y - p1.y) * (p2.x - p1.x) / (p2.y - p1.y) + p1.x;
+ // Special case of point on vertical line
+ if (p1.x == p2.x && p.x == p1.x) {
+ // Need special case for the vertical edges, for example:
+ // 1) \e pe/----->
+ // vs.
+ // 2) \ep---e/----->
+ //
+ // if we count exact as intersection, then 1 is in but 2 is out
+ // if we count exact as no-int then 1 is out but 2 is in.
+
+ return 1;
+ } else if (p1.x == p2.x || p.x <= xinters) {
+ counter++;
}
}
}
}
-
- p1 = p2;
- }
-
- if (counter % 2 == 0) {
- return -1;
- } else {
- return 1;
- }
- }
-
- const Point& Polygon::centroid() const {
-
- if (_centroid) {
- return *_centroid;
- }
-
- _centroid.reset(new Point());
-
- double signedArea = 0.0;
- double area = 0.0; // Partial signed area
-
- /// For all vertices except last
- int i = 0;
- for (i = 0; i < size() - 1; ++i) {
- area = _points[i].x * _points[i+1].y - _points[i+1].x * _points[i].y ;
- signedArea += area;
- _centroid->x += (_points[i].x + _points[i+1].x) * area;
- _centroid->y += (_points[i].y + _points[i+1].y) * area;
}
- // Do last vertex
- area = _points[i].x * _points[0].y - _points[0].x * _points[i].y;
- _centroid->x += (_points[i].x + _points[0].x) * area;
- _centroid->y += (_points[i].y + _points[0].y) * area;
- signedArea += area;
- signedArea *= 0.5;
- _centroid->x /= (6 * signedArea);
- _centroid->y /= (6 * signedArea);
-
- return *_centroid;
+ p1 = p2;
}
- const Box& Polygon::bounds() const {
-
- if (_bounds) {
- return *_bounds;
- }
-
- _bounds.reset(new Box(_points[0], _points[0]));
-
- for (int i = 1; i < size(); i++) {
- _bounds->expandToInclude(_points[i]);
- }
-
- return *_bounds;
+ if (counter % 2 == 0) {
+ return -1;
+ } else {
+ return 1;
}
+}
- R2Annulus::R2Annulus() :
- _inner(0.0), _outer(0.0) {
+const Point& Polygon::centroid() const {
+ if (_centroid) {
+ return *_centroid;
}
- R2Annulus::R2Annulus(const Point& center, double inner, double outer) :
- _center(center), _inner(inner), _outer(outer) {
- }
+ _centroid.reset(new Point());
- const Point& R2Annulus::center() const {
- return _center;
- }
-
- double R2Annulus::getInner() const {
- return _inner;
- }
+ double signedArea = 0.0;
+ double area = 0.0; // Partial signed area
- double R2Annulus::getOuter() const {
- return _outer;
+ /// For all vertices except last
+ int i = 0;
+ for (i = 0; i < size() - 1; ++i) {
+ area = _points[i].x * _points[i + 1].y - _points[i + 1].x * _points[i].y;
+ signedArea += area;
+ _centroid->x += (_points[i].x + _points[i + 1].x) * area;
+ _centroid->y += (_points[i].y + _points[i + 1].y) * area;
}
- bool R2Annulus::contains(const Point& point) const {
+ // Do last vertex
+ area = _points[i].x * _points[0].y - _points[0].x * _points[i].y;
+ _centroid->x += (_points[i].x + _points[0].x) * area;
+ _centroid->y += (_points[i].y + _points[0].y) * area;
+ signedArea += area;
+ signedArea *= 0.5;
+ _centroid->x /= (6 * signedArea);
+ _centroid->y /= (6 * signedArea);
- // See if we're inside the inner radius
- if (distanceCompare(point, _center, _inner) < 0) {
- return false;
- }
+ return *_centroid;
+}
- // See if we're outside the outer radius
- if (distanceCompare(point, _center, _outer) > 0) {
- return false;
- }
-
- return true;
+const Box& Polygon::bounds() const {
+ if (_bounds) {
+ return *_bounds;
}
- Box R2Annulus::getR2Bounds() const {
- return Box(_center.x - _outer, _center.y - _outer, 2 * _outer); // Box(_min.x, _min.y, edgeLength)
- }
+ _bounds.reset(new Box(_points[0], _points[0]));
- bool R2Annulus::fastContains(const Box& other) const {
- return circleContainsBox(Circle(_outer, _center), other)
- && !circleInteriorIntersectsWithBox(Circle(_inner, _center), other);
+ for (int i = 1; i < size(); i++) {
+ _bounds->expandToInclude(_points[i]);
}
- bool R2Annulus::fastDisjoint(const Box& other) const {
- return !circleIntersectsWithBox(Circle(_outer, _center), other)
- || circleInteriorContainsBox(Circle(_inner, _center), other);
- }
+ return *_bounds;
+}
- string R2Annulus::toString() const {
- return str::stream() << "center: " << _center.toString() << " inner: " << _inner
- << " outer: " << _outer;
- }
+R2Annulus::R2Annulus() : _inner(0.0), _outer(0.0) {}
- /////// Other methods
+R2Annulus::R2Annulus(const Point& center, double inner, double outer)
+ : _center(center), _inner(inner), _outer(outer) {}
- double S2Distance::distanceRad(const S2Point& pointA, const S2Point& pointB) {
- S1Angle angle(pointA, pointB);
- return angle.radians();
- }
+const Point& R2Annulus::center() const {
+ return _center;
+}
- double S2Distance::minDistanceRad(const S2Point& point, const S2Polyline& line) {
- int tmp;
- S1Angle angle(point, line.Project(point, &tmp));
- return angle.radians();
- }
+double R2Annulus::getInner() const {
+ return _inner;
+}
- double S2Distance::minDistanceRad(const S2Point& point, const S2Polygon& polygon) {
- S1Angle angle(point, polygon.Project(point));
- return angle.radians();
- }
+double R2Annulus::getOuter() const {
+ return _outer;
+}
- double S2Distance::minDistanceRad(const S2Point& point, const S2Cap& cap) {
- S1Angle angleToCenter(point, cap.axis());
- return (angleToCenter - cap.angle()).radians();
+bool R2Annulus::contains(const Point& point) const {
+ // See if we're inside the inner radius
+ if (distanceCompare(point, _center, _inner) < 0) {
+ return false;
}
- /**
- * Distance method that compares x or y coords when other direction is zero,
- * avoids numerical error when distances are very close to radius but axis-aligned.
- *
- * An example of the problem is:
- * (52.0 - 51.9999) - 0.0001 = 3.31965e-15 and 52.0 - 51.9999 > 0.0001 in double arithmetic
- * but:
- * 51.9999 + 0.0001 <= 52.0
- *
- * This avoids some (but not all!) suprising results in $center queries where points are
- * (radius + center.x, center.y) or vice-versa.
- */
- bool distanceWithin(const Point &p1, const Point &p2, double radius) {
- return distanceCompare(p1, p2, radius) <= 0.0;
+ // See if we're outside the outer radius
+ if (distanceCompare(point, _center, _outer) > 0) {
+ return false;
}
- // Compare the distance between p1 and p2 with the radius.
- // Float-number comparison might be inaccurate.
- //
- // > 0: distance is greater than radius
- // = 0: distance equals radius
- // < 0: distance is less than radius
- double distanceCompare(const Point &p1, const Point &p2, double radius) {
- double a = p2.x - p1.x;
- double b = p2.y - p1.y;
-
- if (a == 0) {
- //
- // Note: For some, unknown reason, when a 32-bit g++ optimizes this call, the sum is
- // calculated imprecisely. We need to force the compiler to always evaluate it
- // correctly, hence the weirdness.
- //
- // On some 32-bit linux machines, removing the volatile keyword or calculating the sum
- // inline will make certain geo tests fail. Of course this check will force volatile
- // for all 32-bit systems, not just affected systems.
- if (sizeof(void*) <= 4){
- volatile double sum = p2.y > p1.y ? p1.y + radius : p2.y + radius;
- return p2.y > p1.y ? p2.y - sum : p1.y - sum;
- } else {
- // Original math, correct for most systems
- return p2.y > p1.y ? p2.y - (p1.y + radius) : p1.y - (p2.y + radius);
- }
- }
+ return true;
+}
- if (b == 0) {
- if (sizeof(void*) <= 4){
- volatile double sum = p2.x > p1.x ? p1.x + radius : p2.x + radius;
- return p2.x > p1.x ? p2.x - sum : p1.x - sum;
- } else {
- return p2.x > p1.x ? p2.x - (p1.x + radius) : p1.x - (p2.x + radius);
- }
- }
+Box R2Annulus::getR2Bounds() const {
+ return Box(
+ _center.x - _outer, _center.y - _outer, 2 * _outer); // Box(_min.x, _min.y, edgeLength)
+}
- return sqrt((a * a) + (b * b)) - radius;
- }
+bool R2Annulus::fastContains(const Box& other) const {
+ return circleContainsBox(Circle(_outer, _center), other) &&
+ !circleInteriorIntersectsWithBox(Circle(_inner, _center), other);
+}
- // note: multiply by earth radius for distance
- double spheredist_rad(const Point& p1, const Point& p2) {
- // this uses the n-vector formula: http://en.wikipedia.org/wiki/N-vector
- // If you try to match the code to the formula, note that I inline the cross-product.
-
- double sinx1(sin(p1.x)), cosx1(cos(p1.x));
- double siny1(sin(p1.y)), cosy1(cos(p1.y));
- double sinx2(sin(p2.x)), cosx2(cos(p2.x));
- double siny2(sin(p2.y)), cosy2(cos(p2.y));
-
- double cross_prod =
- (cosy1*cosx1 * cosy2*cosx2) +
- (cosy1*sinx1 * cosy2*sinx2) +
- (siny1 * siny2);
-
- if (cross_prod >= 1 || cross_prod <= -1) {
- // fun with floats
- verify(fabs(cross_prod)-1 < 1e-6);
- return cross_prod > 0 ? 0 : M_PI;
- }
+bool R2Annulus::fastDisjoint(const Box& other) const {
+ return !circleIntersectsWithBox(Circle(_outer, _center), other) ||
+ circleInteriorContainsBox(Circle(_inner, _center), other);
+}
- return acos(cross_prod);
- }
+string R2Annulus::toString() const {
+ return str::stream() << "center: " << _center.toString() << " inner: " << _inner
+ << " outer: " << _outer;
+}
- // @param p1 A point on the sphere where x and y are degrees.
- // @param p2 A point on the sphere where x and y are degrees.
- // @return The distance between the two points in RADIANS. Multiply by radius to get arc
- // length.
- double spheredist_deg(const Point& p1, const Point& p2) {
- return spheredist_rad(Point(deg2rad(p1.x), deg2rad(p1.y)),
- Point(deg2rad(p2.x), deg2rad(p2.y)));
- }
+/////// Other methods
- // Technically lat/long bounds, not really tied to earth radius.
- bool isValidLngLat(double lng, double lat) {
- return abs(lng) <= 180 && abs(lat) <= 90;
- }
+double S2Distance::distanceRad(const S2Point& pointA, const S2Point& pointB) {
+ S1Angle angle(pointA, pointB);
+ return angle.radians();
+}
- double distance(const Point& p1, const Point &p2) {
- double a = p1.x - p2.x;
- double b = p1.y - p2.y;
+double S2Distance::minDistanceRad(const S2Point& point, const S2Polyline& line) {
+ int tmp;
+ S1Angle angle(point, line.Project(point, &tmp));
+ return angle.radians();
+}
- // Avoid numerical error if possible...
- if (a == 0) return abs(b);
- if (b == 0) return abs(a);
+double S2Distance::minDistanceRad(const S2Point& point, const S2Polygon& polygon) {
+ S1Angle angle(point, polygon.Project(point));
+ return angle.radians();
+}
- return sqrt((a * a) + (b * b));
- }
+double S2Distance::minDistanceRad(const S2Point& point, const S2Cap& cap) {
+ S1Angle angleToCenter(point, cap.axis());
+ return (angleToCenter - cap.angle()).radians();
+}
- static inline Vector2_d toVector2(const Point& p) {
- return Vector2_d(p.x, p.y);
- }
-
- // Given a segment (A, B) and a segment (C, D), check whether they intersect.
- bool linesIntersect(const Point& pA, const Point& pB, const Point& pC, const Point& pD) {
- Vector2_d a = toVector2(pA);
- Vector2_d b = toVector2(pB);
- Vector2_d c = toVector2(pC);
- Vector2_d d = toVector2(pD);
-
- // The normal of line AB
- Vector2_d normalAB = (b - a).Ortho();
-
- // Dot products of AC and the normal of AB
- // = 0 : C is on the line AB
- // > 0 : C is on one side
- // < 0 : C is on the other side
- double dotProdNormalAB_AC = normalAB.DotProd(c - a);
- double dotProdNormalAB_AD = normalAB.DotProd(d - a);
-
- // C and D can not on the same side of line AB
- if (dotProdNormalAB_AC * dotProdNormalAB_AD > 0) return false;
-
- // AB and CD are on the same line
- if (dotProdNormalAB_AC == 0 && dotProdNormalAB_AD == 0) {
- // Test if C or D is on segment AB.
- return (c - a).DotProd(c - b) <= 0 || (d - a).DotProd(d - b) <= 0;
- }
-
- // Check if A and B are on different sides of line CD.
- Vector2_d normalCD = (d - c).Ortho();
- double dotProdNormalCD_CA = normalCD.DotProd(a - c);
- double dotProdNormalCD_CB = normalCD.DotProd(b - c);
- return dotProdNormalCD_CA * dotProdNormalCD_CB <= 0; // Perhaps A or B is on line CD
- }
-
- static bool circleContainsBoxInternal(const Circle& circle,
- const Box& box,
- bool includeCircleBoundary) {
-
- // NOTE: a circle of zero radius is a point, and there are NO points contained inside a
- // zero-radius circle, not even the point itself.
-
- const Point& a = box._min;
- const Point& b = box._max;
- double compareLL = distanceCompare( circle.center, a, circle.radius ); // Lower left
- double compareUR = distanceCompare( circle.center, b, circle.radius ); // Upper right
- // Upper Left
- double compareUL = distanceCompare( circle.center, Point( a.x, b.y ), circle.radius );
- // Lower right
- double compareLR = distanceCompare( circle.center, Point( b.x, a.y ), circle.radius );
- if ( includeCircleBoundary ) {
- return compareLL <= 0 && compareUR <= 0 && compareUL <= 0 && compareLR <= 0;
- }
- else {
- return compareLL < 0 && compareUR < 0 && compareUL < 0 && compareLR < 0;
+/**
+ * Distance method that compares x or y coords when other direction is zero,
+ * avoids numerical error when distances are very close to radius but axis-aligned.
+ *
+ * An example of the problem is:
+ * (52.0 - 51.9999) - 0.0001 = 3.31965e-15 and 52.0 - 51.9999 > 0.0001 in double arithmetic
+ * but:
+ * 51.9999 + 0.0001 <= 52.0
+ *
+ * This avoids some (but not all!) suprising results in $center queries where points are
+ * (radius + center.x, center.y) or vice-versa.
+ */
+bool distanceWithin(const Point& p1, const Point& p2, double radius) {
+ return distanceCompare(p1, p2, radius) <= 0.0;
+}
+
+// Compare the distance between p1 and p2 with the radius.
+// Float-number comparison might be inaccurate.
+//
+// > 0: distance is greater than radius
+// = 0: distance equals radius
+// < 0: distance is less than radius
+double distanceCompare(const Point& p1, const Point& p2, double radius) {
+ double a = p2.x - p1.x;
+ double b = p2.y - p1.y;
+
+ if (a == 0) {
+ //
+ // Note: For some, unknown reason, when a 32-bit g++ optimizes this call, the sum is
+ // calculated imprecisely. We need to force the compiler to always evaluate it
+ // correctly, hence the weirdness.
+ //
+ // On some 32-bit linux machines, removing the volatile keyword or calculating the sum
+ // inline will make certain geo tests fail. Of course this check will force volatile
+ // for all 32-bit systems, not just affected systems.
+ if (sizeof(void*) <= 4) {
+ volatile double sum = p2.y > p1.y ? p1.y + radius : p2.y + radius;
+ return p2.y > p1.y ? p2.y - sum : p1.y - sum;
+ } else {
+ // Original math, correct for most systems
+ return p2.y > p1.y ? p2.y - (p1.y + radius) : p1.y - (p2.y + radius);
}
}
- bool circleContainsBox(const Circle& circle, const Box& box) {
- return circleContainsBoxInternal(circle, box, true);
- }
-
- bool circleInteriorContainsBox(const Circle& circle, const Box& box) {
- return circleContainsBoxInternal(circle, box, false);
- }
-
- // Check the intersection by measuring the distance between circle center and box center.
- static bool circleIntersectsWithBoxInternal(const Circle& circle,
- const Box& box,
- bool includeCircleBoundary) {
-
- // NOTE: a circle of zero radius is a point, and there are NO points to intersect inside a
- // zero-radius circle, not even the point itself.
- if (circle.radius == 0.0 && !includeCircleBoundary)
- return false;
-
- /* Collapses the four quadrants down into one.
- * ________
- * r|___B___ \ <- a quarter round corner here. Let's name it "D".
- * | | |
- * h| | |
- * | A |C|
- * |_______|_|
- * w r
- */
-
- Point boxCenter = box.center();
- double dx = abs(circle.center.x - boxCenter.x);
- double dy = abs(circle.center.y - boxCenter.y);
- double w = (box._max.x - box._min.x) / 2;
- double h = (box._max.y - box._min.y) / 2;
- const double& r = circle.radius;
-
- // Check if circle.center is in A, B or C.
- // The circle center could be above the box (B) or right to the box (C), but close enough.
- if (includeCircleBoundary) {
- if ((dx <= w + r && dy <= h) || (dx <= w && dy <= h + r)) return true;
+ if (b == 0) {
+ if (sizeof(void*) <= 4) {
+ volatile double sum = p2.x > p1.x ? p1.x + radius : p2.x + radius;
+ return p2.x > p1.x ? p2.x - sum : p1.x - sum;
} else {
- if ((dx < w + r && dy < h) || (dx < w && dy < h + r)) return true;
+ return p2.x > p1.x ? p2.x - (p1.x + radius) : p1.x - (p2.x + radius);
}
-
- // Now check if circle.center is in the round corner "D".
- double compareResult = distanceCompare(Point(dx, dy), Point(w, h), r);
- return compareResult < 0 || (compareResult == 0 && includeCircleBoundary);
}
- bool circleIntersectsWithBox(const Circle& circle, const Box& box) {
- return circleIntersectsWithBoxInternal(circle, box, true);
- }
+ return sqrt((a * a) + (b * b)) - radius;
+}
+
+// note: multiply by earth radius for distance
+double spheredist_rad(const Point& p1, const Point& p2) {
+ // this uses the n-vector formula: http://en.wikipedia.org/wiki/N-vector
+ // If you try to match the code to the formula, note that I inline the cross-product.
+
+ double sinx1(sin(p1.x)), cosx1(cos(p1.x));
+ double siny1(sin(p1.y)), cosy1(cos(p1.y));
+ double sinx2(sin(p2.x)), cosx2(cos(p2.x));
+ double siny2(sin(p2.y)), cosy2(cos(p2.y));
+
+ double cross_prod =
+ (cosy1 * cosx1 * cosy2 * cosx2) + (cosy1 * sinx1 * cosy2 * sinx2) + (siny1 * siny2);
+
+ if (cross_prod >= 1 || cross_prod <= -1) {
+ // fun with floats
+ verify(fabs(cross_prod) - 1 < 1e-6);
+ return cross_prod > 0 ? 0 : M_PI;
+ }
+
+ return acos(cross_prod);
+}
+
+// @param p1 A point on the sphere where x and y are degrees.
+// @param p2 A point on the sphere where x and y are degrees.
+// @return The distance between the two points in RADIANS. Multiply by radius to get arc
+// length.
+double spheredist_deg(const Point& p1, const Point& p2) {
+ return spheredist_rad(Point(deg2rad(p1.x), deg2rad(p1.y)), Point(deg2rad(p2.x), deg2rad(p2.y)));
+}
+
+// Technically lat/long bounds, not really tied to earth radius.
+bool isValidLngLat(double lng, double lat) {
+ return abs(lng) <= 180 && abs(lat) <= 90;
+}
+
+double distance(const Point& p1, const Point& p2) {
+ double a = p1.x - p2.x;
+ double b = p1.y - p2.y;
+
+ // Avoid numerical error if possible...
+ if (a == 0)
+ return abs(b);
+ if (b == 0)
+ return abs(a);
+
+ return sqrt((a * a) + (b * b));
+}
+
+static inline Vector2_d toVector2(const Point& p) {
+ return Vector2_d(p.x, p.y);
+}
+
+// Given a segment (A, B) and a segment (C, D), check whether they intersect.
+bool linesIntersect(const Point& pA, const Point& pB, const Point& pC, const Point& pD) {
+ Vector2_d a = toVector2(pA);
+ Vector2_d b = toVector2(pB);
+ Vector2_d c = toVector2(pC);
+ Vector2_d d = toVector2(pD);
+
+ // The normal of line AB
+ Vector2_d normalAB = (b - a).Ortho();
+
+ // Dot products of AC and the normal of AB
+ // = 0 : C is on the line AB
+ // > 0 : C is on one side
+ // < 0 : C is on the other side
+ double dotProdNormalAB_AC = normalAB.DotProd(c - a);
+ double dotProdNormalAB_AD = normalAB.DotProd(d - a);
+
+ // C and D can not on the same side of line AB
+ if (dotProdNormalAB_AC * dotProdNormalAB_AD > 0)
+ return false;
+
+ // AB and CD are on the same line
+ if (dotProdNormalAB_AC == 0 && dotProdNormalAB_AD == 0) {
+ // Test if C or D is on segment AB.
+ return (c - a).DotProd(c - b) <= 0 || (d - a).DotProd(d - b) <= 0;
+ }
+
+ // Check if A and B are on different sides of line CD.
+ Vector2_d normalCD = (d - c).Ortho();
+ double dotProdNormalCD_CA = normalCD.DotProd(a - c);
+ double dotProdNormalCD_CB = normalCD.DotProd(b - c);
+ return dotProdNormalCD_CA * dotProdNormalCD_CB <= 0; // Perhaps A or B is on line CD
+}
+
+static bool circleContainsBoxInternal(const Circle& circle,
+ const Box& box,
+ bool includeCircleBoundary) {
+ // NOTE: a circle of zero radius is a point, and there are NO points contained inside a
+ // zero-radius circle, not even the point itself.
+
+ const Point& a = box._min;
+ const Point& b = box._max;
+ double compareLL = distanceCompare(circle.center, a, circle.radius); // Lower left
+ double compareUR = distanceCompare(circle.center, b, circle.radius); // Upper right
+ // Upper Left
+ double compareUL = distanceCompare(circle.center, Point(a.x, b.y), circle.radius);
+ // Lower right
+ double compareLR = distanceCompare(circle.center, Point(b.x, a.y), circle.radius);
+ if (includeCircleBoundary) {
+ return compareLL <= 0 && compareUR <= 0 && compareUL <= 0 && compareLR <= 0;
+ } else {
+ return compareLL < 0 && compareUR < 0 && compareUL < 0 && compareLR < 0;
+ }
+}
+
+bool circleContainsBox(const Circle& circle, const Box& box) {
+ return circleContainsBoxInternal(circle, box, true);
+}
+
+bool circleInteriorContainsBox(const Circle& circle, const Box& box) {
+ return circleContainsBoxInternal(circle, box, false);
+}
+
+// Check the intersection by measuring the distance between circle center and box center.
+static bool circleIntersectsWithBoxInternal(const Circle& circle,
+ const Box& box,
+ bool includeCircleBoundary) {
+ // NOTE: a circle of zero radius is a point, and there are NO points to intersect inside a
+ // zero-radius circle, not even the point itself.
+ if (circle.radius == 0.0 && !includeCircleBoundary)
+ return false;
+
+ /* Collapses the four quadrants down into one.
+ * ________
+ * r|___B___ \ <- a quarter round corner here. Let's name it "D".
+ * | | |
+ * h| | |
+ * | A |C|
+ * |_______|_|
+ * w r
+ */
- bool circleInteriorIntersectsWithBox(const Circle& circle, const Box& box) {
- return circleIntersectsWithBoxInternal(circle, box, false);
+ Point boxCenter = box.center();
+ double dx = abs(circle.center.x - boxCenter.x);
+ double dy = abs(circle.center.y - boxCenter.y);
+ double w = (box._max.x - box._min.x) / 2;
+ double h = (box._max.y - box._min.y) / 2;
+ const double& r = circle.radius;
+
+ // Check if circle.center is in A, B or C.
+ // The circle center could be above the box (B) or right to the box (C), but close enough.
+ if (includeCircleBoundary) {
+ if ((dx <= w + r && dy <= h) || (dx <= w && dy <= h + r))
+ return true;
+ } else {
+ if ((dx < w + r && dy < h) || (dx < w && dy < h + r))
+ return true;
}
- bool lineIntersectsWithBox(const Point& a, const Point& b, const Box& box) {
- Point upperLeft(box._min.x, box._max.y);
- Point lowerRight(box._max.x, box._min.y);
+ // Now check if circle.center is in the round corner "D".
+ double compareResult = distanceCompare(Point(dx, dy), Point(w, h), r);
+ return compareResult < 0 || (compareResult == 0 && includeCircleBoundary);
+}
- return linesIntersect(a, b, upperLeft, box._min)
- || linesIntersect(a, b, box._min, lowerRight)
- || linesIntersect(a, b, lowerRight, box._max)
- || linesIntersect(a, b, box._max, upperLeft);
- }
+bool circleIntersectsWithBox(const Circle& circle, const Box& box) {
+ return circleIntersectsWithBoxInternal(circle, box, true);
+}
- // Doc: The last point specified is always implicitly connected to the first.
- // [[ 0 , 0 ], [ 3 , 6 ], [ 6 , 0 ]]
- bool edgesIntersectsWithBox(const vector<Point>& vertices, const Box& box) {
- for (size_t i = 0; i < vertices.size() - 1; i++) {
- if (lineIntersectsWithBox(vertices[i], vertices[i+1], box)) return true;
- }
- // The last point and first point.
- return lineIntersectsWithBox(vertices[vertices.size() - 1], vertices[0], box);
- }
+bool circleInteriorIntersectsWithBox(const Circle& circle, const Box& box) {
+ return circleIntersectsWithBoxInternal(circle, box, false);
+}
- bool polygonContainsBox(const Polygon& polygon, const Box& box) {
- // All vertices of box have to be inside the polygon.
- if (!polygon.contains(box._min)
- || !polygon.contains(box._max)
- || !polygon.contains(Point(box._min.x, box._max.y))
- || !polygon.contains(Point(box._max.x, box._min.y)))
- return false;
+bool lineIntersectsWithBox(const Point& a, const Point& b, const Box& box) {
+ Point upperLeft(box._min.x, box._max.y);
+ Point lowerRight(box._max.x, box._min.y);
- // No intersection between the polygon edges and the box.
- return !edgesIntersectsWithBox(polygon.points(), box);
- }
+ return linesIntersect(a, b, upperLeft, box._min) ||
+ linesIntersect(a, b, box._min, lowerRight) || linesIntersect(a, b, lowerRight, box._max) ||
+ linesIntersect(a, b, box._max, upperLeft);
+}
- bool polygonIntersectsWithBox(const Polygon& polygon, const Box& box) {
- // 1. Polygon contains the box.
- // Check the relaxed condition that whether the polygon include any vertex of the box.
- if (polygon.contains(box._min)
- || polygon.contains(box._max)
- || polygon.contains(Point(box._min.x, box._max.y))
- || polygon.contains(Point(box._max.x, box._min.y)))
+// Doc: The last point specified is always implicitly connected to the first.
+// [[ 0 , 0 ], [ 3 , 6 ], [ 6 , 0 ]]
+bool edgesIntersectsWithBox(const vector<Point>& vertices, const Box& box) {
+ for (size_t i = 0; i < vertices.size() - 1; i++) {
+ if (lineIntersectsWithBox(vertices[i], vertices[i + 1], box))
return true;
-
- // 2. Box contains polygon.
- // Check the relaxed condition that whether the box include any vertex of the polygon.
- for (vector<Point>::const_iterator it = polygon.points().begin();
- it != polygon.points().end(); it++) {
- if (box.inside(*it)) return true;
- }
-
- // 3. Otherwise they intersect on a portion of both shapes.
- // Edges intersects
- return edgesIntersectsWithBox(polygon.points(), box);
}
+ // The last point and first point.
+ return lineIntersectsWithBox(vertices[vertices.size() - 1], vertices[0], box);
+}
+
+bool polygonContainsBox(const Polygon& polygon, const Box& box) {
+ // All vertices of box have to be inside the polygon.
+ if (!polygon.contains(box._min) || !polygon.contains(box._max) ||
+ !polygon.contains(Point(box._min.x, box._max.y)) ||
+ !polygon.contains(Point(box._max.x, box._min.y)))
+ return false;
+
+ // No intersection between the polygon edges and the box.
+ return !edgesIntersectsWithBox(polygon.points(), box);
+}
+
+bool polygonIntersectsWithBox(const Polygon& polygon, const Box& box) {
+ // 1. Polygon contains the box.
+ // Check the relaxed condition that whether the polygon include any vertex of the box.
+ if (polygon.contains(box._min) || polygon.contains(box._max) ||
+ polygon.contains(Point(box._min.x, box._max.y)) ||
+ polygon.contains(Point(box._max.x, box._min.y)))
+ return true;
- bool ShapeProjection::supportsProject(const PointWithCRS& point, const CRS crs) {
-
- // Can always trivially project or project from SPHERE->FLAT
- if (point.crs == crs || point.crs == SPHERE)
+ // 2. Box contains polygon.
+ // Check the relaxed condition that whether the box include any vertex of the polygon.
+ for (vector<Point>::const_iterator it = polygon.points().begin(); it != polygon.points().end();
+ it++) {
+ if (box.inside(*it))
return true;
-
- invariant(point.crs == FLAT);
- // If crs is FLAT, we might be able to upgrade the point to SPHERE if it's a valid SPHERE
- // point (lng/lat in bounds). In this case, we can use FLAT data with SPHERE predicates.
- return isValidLngLat(point.oldPoint.x, point.oldPoint.y);
}
- bool ShapeProjection::supportsProject(const PolygonWithCRS& polygon, const CRS crs) {
- return polygon.crs == crs
- || (polygon.crs == STRICT_SPHERE && crs == SPHERE);
- }
+ // 3. Otherwise they intersect on a portion of both shapes.
+ // Edges intersects
+ return edgesIntersectsWithBox(polygon.points(), box);
+}
- void ShapeProjection::projectInto(PointWithCRS* point, CRS crs) {
- dassert(supportsProject(*point, crs));
-
- if (point->crs == crs)
- return;
+bool ShapeProjection::supportsProject(const PointWithCRS& point, const CRS crs) {
+ // Can always trivially project or project from SPHERE->FLAT
+ if (point.crs == crs || point.crs == SPHERE)
+ return true;
- if (FLAT == point->crs) {
- // Prohibit projection to STRICT_SPHERE CRS
- invariant(SPHERE == crs);
+ invariant(point.crs == FLAT);
+ // If crs is FLAT, we might be able to upgrade the point to SPHERE if it's a valid SPHERE
+ // point (lng/lat in bounds). In this case, we can use FLAT data with SPHERE predicates.
+ return isValidLngLat(point.oldPoint.x, point.oldPoint.y);
+}
- // Note that it's (lat, lng) for S2 but (lng, lat) for MongoDB.
- S2LatLng latLng =
- S2LatLng::FromDegrees(point->oldPoint.y, point->oldPoint.x).Normalized();
- dassert(latLng.is_valid());
- point->point = latLng.ToPoint();
- point->cell = S2Cell(point->point);
- point->crs = SPHERE;
- return;
- }
+bool ShapeProjection::supportsProject(const PolygonWithCRS& polygon, const CRS crs) {
+ return polygon.crs == crs || (polygon.crs == STRICT_SPHERE && crs == SPHERE);
+}
- // Prohibit projection to STRICT_SPHERE CRS
- invariant(SPHERE == point->crs && FLAT == crs);
- // Just remove the additional spherical information
- point->point = S2Point();
- point->cell = S2Cell();
- point->crs = FLAT;
- }
+void ShapeProjection::projectInto(PointWithCRS* point, CRS crs) {
+ dassert(supportsProject(*point, crs));
- void ShapeProjection::projectInto(PolygonWithCRS* polygon, CRS crs) {
- if (polygon->crs == crs) return;
+ if (point->crs == crs)
+ return;
- // Only project from STRICT_SPHERE to SPHERE
- invariant(STRICT_SPHERE == polygon->crs && SPHERE == crs);
- polygon->crs = SPHERE;
- }
+ if (FLAT == point->crs) {
+ // Prohibit projection to STRICT_SPHERE CRS
+ invariant(SPHERE == crs);
+
+ // Note that it's (lat, lng) for S2 but (lng, lat) for MongoDB.
+ S2LatLng latLng = S2LatLng::FromDegrees(point->oldPoint.y, point->oldPoint.x).Normalized();
+ dassert(latLng.is_valid());
+ point->point = latLng.ToPoint();
+ point->cell = S2Cell(point->point);
+ point->crs = SPHERE;
+ return;
+ }
+
+ // Prohibit projection to STRICT_SPHERE CRS
+ invariant(SPHERE == point->crs && FLAT == crs);
+ // Just remove the additional spherical information
+ point->point = S2Point();
+ point->cell = S2Cell();
+ point->crs = FLAT;
+}
+
+void ShapeProjection::projectInto(PolygonWithCRS* polygon, CRS crs) {
+ if (polygon->crs == crs)
+ return;
+
+ // Only project from STRICT_SPHERE to SPHERE
+ invariant(STRICT_SPHERE == polygon->crs && SPHERE == crs);
+ polygon->crs = SPHERE;
+}
} // namespace mongo
diff --git a/src/mongo/db/geo/shapes.h b/src/mongo/db/geo/shapes.h
index 5eb2f8bceaa..3d8863ff964 100644
--- a/src/mongo/db/geo/shapes.h
+++ b/src/mongo/db/geo/shapes.h
@@ -43,325 +43,312 @@
#include "third_party/s2/s2polyline.h"
#ifndef M_PI
-# define M_PI 3.14159265358979323846
+#define M_PI 3.14159265358979323846
#endif
namespace mongo {
- struct Point;
- struct Circle;
- class Box;
- class Polygon;
+struct Point;
+struct Circle;
+class Box;
+class Polygon;
+
+inline double deg2rad(const double deg) {
+ return deg * (M_PI / 180.0);
+}
+
+inline double rad2deg(const double rad) {
+ return rad * (180.0 / M_PI);
+}
+
+inline double computeXScanDistance(double y, double maxDistDegrees) {
+ // TODO: this overestimates for large maxDistDegrees far from the equator
+ return maxDistDegrees / std::min(cos(deg2rad(std::min(+89.0, y + maxDistDegrees))),
+ cos(deg2rad(std::max(-89.0, y - maxDistDegrees))));
+}
+
+bool isValidLngLat(double lng, double lat);
+bool linesIntersect(const Point& pA, const Point& pB, const Point& pC, const Point& pD);
+bool circleContainsBox(const Circle& circle, const Box& box);
+bool circleInteriorContainsBox(const Circle& circle, const Box& box);
+bool circleIntersectsWithBox(const Circle& circle, const Box& box);
+bool circleInteriorIntersectsWithBox(const Circle& circle, const Box& box);
+bool edgesIntersectsWithBox(const std::vector<Point>& vertices, const Box& box);
+bool polygonContainsBox(const Polygon& polygon, const Box& box);
+bool polygonIntersectsWithBox(const Polygon& polygon, const Box& box);
- inline double deg2rad(const double deg) { return deg * (M_PI / 180.0); }
-
- inline double rad2deg(const double rad) { return rad * (180.0 / M_PI); }
-
- inline double computeXScanDistance(double y, double maxDistDegrees) {
- // TODO: this overestimates for large maxDistDegrees far from the equator
- return maxDistDegrees / std::min(cos(deg2rad(std::min(+89.0, y + maxDistDegrees))),
- cos(deg2rad(std::max(-89.0, y - maxDistDegrees))));
- }
+/**
+ * Distance utilities for R2 geometries
+ */
+double distance(const Point& p1, const Point& p2);
+bool distanceWithin(const Point& p1, const Point& p2, double radius);
+double distanceCompare(const Point& p1, const Point& p2, double radius);
+// Still needed for non-wrapping $nearSphere
+double spheredist_rad(const Point& p1, const Point& p2);
+double spheredist_deg(const Point& p1, const Point& p2);
- bool isValidLngLat(double lng, double lat);
- bool linesIntersect(const Point& pA, const Point& pB, const Point& pC, const Point& pD);
- bool circleContainsBox(const Circle& circle, const Box& box);
- bool circleInteriorContainsBox(const Circle& circle, const Box& box);
- bool circleIntersectsWithBox(const Circle& circle, const Box& box);
- bool circleInteriorIntersectsWithBox(const Circle& circle, const Box& box);
- bool edgesIntersectsWithBox(const std::vector<Point>& vertices, const Box& box);
- bool polygonContainsBox(const Polygon& polygon, const Box& box);
- bool polygonIntersectsWithBox(const Polygon& polygon, const Box& box);
- /**
- * Distance utilities for R2 geometries
+/**
+ * Distance utilities for S2 geometries
+ */
+struct S2Distance {
+ static double distanceRad(const S2Point& pointA, const S2Point& pointB);
+ static double minDistanceRad(const S2Point& point, const S2Polyline& line);
+ static double minDistanceRad(const S2Point& point, const S2Polygon& polygon);
+ static double minDistanceRad(const S2Point& point, const S2Cap& cap);
+};
+
+struct Point {
+ Point();
+ Point(double x, double y);
+ explicit Point(const BSONElement& e);
+ explicit Point(const BSONObj& o);
+ std::string toString() const;
+
+ double x;
+ double y;
+};
+
+struct Circle {
+ Circle();
+ Circle(double radius, Point center);
+
+ double radius;
+ Point center;
+};
+
+class Box {
+public:
+ Box();
+ Box(double x, double y, double size);
+ Box(const Point& ptA, const Point& ptB);
+
+ void init(const Point& ptA, const Point& ptB);
+ void init(const Box& other);
+
+ BSONArray toBSON() const;
+ std::string toString() const;
+
+ bool between(double min, double max, double val, double fudge = 0) const;
+ bool onBoundary(double bound, double val, double fudge = 0) const;
+ bool mid(double amin, double amax, double bmin, double bmax, bool min, double* res) const;
+
+ double area() const;
+ double maxDim() const;
+ Point center() const;
+
+ // NOTE: Box boundaries are *inclusive*
+ bool onBoundary(Point p, double fudge = 0) const;
+ bool inside(Point p, double fudge = 0) const;
+ bool inside(double x, double y, double fudge = 0) const;
+ bool contains(const Box& other, double fudge = 0) const;
+ bool intersects(const Box& other) const;
+
+ // Box modifications
+ void truncate(double min, double max);
+ void fudge(double error);
+ void expandToInclude(const Point& pt);
+
+ // TODO: Remove after 2D near dependency goes away
+ double legacyIntersectFraction(const Box& other) const;
+
+ Point _min;
+ Point _max;
+};
+
+class Polygon {
+public:
+ Polygon();
+ Polygon(const std::vector<Point>& points);
+
+ void init(const std::vector<Point>& points);
+ void init(const Polygon& other);
+
+ int size() const;
+
+ bool contains(const Point& p) const;
+
+ /*
+ * Return values:
+ * -1 if no intersection
+ * 0 if maybe an intersection (using fudge)
+ * 1 if there is an intersection
*/
- double distance(const Point& p1, const Point &p2);
- bool distanceWithin(const Point &p1, const Point &p2, double radius);
- double distanceCompare(const Point &p1, const Point &p2, double radius);
- // Still needed for non-wrapping $nearSphere
- double spheredist_rad(const Point& p1, const Point& p2);
- double spheredist_deg(const Point& p1, const Point& p2);
-
-
+ int contains(const Point& p, double fudge) const;
/**
- * Distance utilities for S2 geometries
+ * Get the centroid of the polygon object.
*/
- struct S2Distance {
-
- static double distanceRad(const S2Point& pointA, const S2Point& pointB);
- static double minDistanceRad(const S2Point& point, const S2Polyline& line);
- static double minDistanceRad(const S2Point& point, const S2Polygon& polygon);
- static double minDistanceRad(const S2Point& point, const S2Cap& cap);
-
- };
-
- struct Point {
- Point();
- Point(double x, double y);
- explicit Point(const BSONElement& e);
- explicit Point(const BSONObj& o);
- std::string toString() const;
-
- double x;
- double y;
- };
-
- struct Circle {
- Circle();
- Circle(double radius, Point center);
-
- double radius;
- Point center;
- };
-
- class Box {
- public:
-
- Box();
- Box(double x, double y, double size);
- Box(const Point& ptA, const Point& ptB);
-
- void init(const Point& ptA, const Point& ptB);
- void init(const Box& other);
-
- BSONArray toBSON() const;
- std::string toString() const;
-
- bool between(double min, double max, double val, double fudge = 0) const;
- bool onBoundary(double bound, double val, double fudge = 0) const;
- bool mid(double amin, double amax, double bmin, double bmax, bool min, double* res) const;
-
- double area() const;
- double maxDim() const;
- Point center() const;
-
- // NOTE: Box boundaries are *inclusive*
- bool onBoundary(Point p, double fudge = 0) const;
- bool inside(Point p, double fudge = 0) const;
- bool inside(double x, double y, double fudge = 0) const;
- bool contains(const Box& other, double fudge = 0) const;
- bool intersects(const Box& other) const;
-
- // Box modifications
- void truncate(double min, double max);
- void fudge(double error);
- void expandToInclude(const Point& pt);
-
- // TODO: Remove after 2D near dependency goes away
- double legacyIntersectFraction(const Box& other) const;
-
- Point _min;
- Point _max;
- };
-
- class Polygon {
- public:
-
- Polygon();
- Polygon(const std::vector<Point>& points);
-
- void init(const std::vector<Point>& points);
- void init(const Polygon& other);
-
- int size() const;
-
- bool contains(const Point& p) const;
-
- /*
- * Return values:
- * -1 if no intersection
- * 0 if maybe an intersection (using fudge)
- * 1 if there is an intersection
- */
- int contains(const Point &p, double fudge) const;
-
- /**
- * Get the centroid of the polygon object.
- */
- const Point& centroid() const;
- const Box& bounds() const;
- const std::vector<Point>& points() const { return _points; }
-
- private:
-
- // Only modified on creation and init()
- std::vector<Point> _points;
-
- // Cached attributes of the polygon
- mutable std::unique_ptr<Box> _bounds;
- mutable std::unique_ptr<Point> _centroid;
- };
-
- class R2Region {
- public:
-
- virtual ~R2Region() {
- }
-
- virtual Box getR2Bounds() const = 0;
-
- /**
- * Fast heuristic containment check
- *
- * Returns true if the region definitely contains the box.
- * Returns false if not or if too expensive to find out one way or another.
- */
- virtual bool fastContains(const Box& other) const = 0;
-
- /**
- * Fast heuristic disjoint check
- *
- * Returns true if the region definitely is disjoint from the box.
- * Returns false if not or if too expensive to find out one way or another.
- */
- virtual bool fastDisjoint(const Box& other) const = 0;
- };
-
- // Annulus is used by GeoNear. Both inner and outer circles are inlcuded.
- class R2Annulus : public R2Region {
- public:
-
- R2Annulus();
- R2Annulus(const Point& center, double inner, double outer);
-
- const Point& center() const;
-
- double getInner() const;
- double getOuter() const;
-
- bool contains(const Point& point) const;
-
- // R2Region interface
- Box getR2Bounds() const;
- bool fastContains(const Box& other) const;
- bool fastDisjoint(const Box& other) const;
-
- // For debugging
- std::string toString() const;
-
- private:
-
- Point _center;
- double _inner;
- double _outer;
- };
-
- // Clearly this isn't right but currently it's sufficient.
- enum CRS {
- UNSET,
- FLAT, // Equirectangular flat projection (i.e. trivial long/lat projection to flat map)
- SPHERE, // WGS84
- STRICT_SPHERE // WGS84 with strict winding order
- };
-
- // TODO: Make S2 less integral to these types - additional S2 shapes should be an optimization
- // when our CRS is not projected, i.e. SPHERE for now.
- // Generic shapes (Point, Line, Polygon) should hold the raw coordinate data - right now oldXXX
- // is a misnomer - this is the *original* data and the S2 transformation just an optimization.
-
- struct PointWithCRS {
-
- PointWithCRS() : crs(UNSET) {}
-
- S2Point point;
- S2Cell cell;
- Point oldPoint;
- CRS crs;
- };
-
- struct LineWithCRS {
-
- LineWithCRS() : crs(UNSET) {}
-
- S2Polyline line;
- CRS crs;
- };
-
- struct CapWithCRS {
-
- CapWithCRS() : crs(UNSET) {}
-
- S2Cap cap;
- Circle circle;
- CRS crs;
- };
-
- struct BoxWithCRS {
-
- BoxWithCRS() : crs(UNSET) {}
-
- Box box;
- CRS crs;
- };
-
- struct PolygonWithCRS {
-
- PolygonWithCRS() : crs(UNSET) {}
-
- std::unique_ptr<S2Polygon> s2Polygon;
- // Simple polygons with strict winding order may be bigger or smaller than a hemisphere.
- // Only used for query. We don't support storing/indexing big polygons.
- std::unique_ptr<BigSimplePolygon> bigPolygon;
-
- Polygon oldPolygon;
- CRS crs;
- };
-
- struct MultiPointWithCRS {
-
- MultiPointWithCRS() : crs(UNSET) {}
-
- std::vector<S2Point> points;
- std::vector<S2Cell> cells;
- CRS crs;
- };
+ const Point& centroid() const;
+ const Box& bounds() const;
+ const std::vector<Point>& points() const {
+ return _points;
+ }
- struct MultiLineWithCRS {
+private:
+ // Only modified on creation and init()
+ std::vector<Point> _points;
- MultiLineWithCRS() : crs(UNSET) {}
+ // Cached attributes of the polygon
+ mutable std::unique_ptr<Box> _bounds;
+ mutable std::unique_ptr<Point> _centroid;
+};
- OwnedPointerVector<S2Polyline> lines;
- CRS crs;
- };
+class R2Region {
+public:
+ virtual ~R2Region() {}
- struct MultiPolygonWithCRS {
+ virtual Box getR2Bounds() const = 0;
- MultiPolygonWithCRS() : crs(UNSET) {}
+ /**
+ * Fast heuristic containment check
+ *
+ * Returns true if the region definitely contains the box.
+ * Returns false if not or if too expensive to find out one way or another.
+ */
+ virtual bool fastContains(const Box& other) const = 0;
- OwnedPointerVector<S2Polygon> polygons;
- CRS crs;
- };
+ /**
+ * Fast heuristic disjoint check
+ *
+ * Returns true if the region definitely is disjoint from the box.
+ * Returns false if not or if too expensive to find out one way or another.
+ */
+ virtual bool fastDisjoint(const Box& other) const = 0;
+};
- struct GeometryCollection {
+// Annulus is used by GeoNear. Both inner and outer circles are inlcuded.
+class R2Annulus : public R2Region {
+public:
+ R2Annulus();
+ R2Annulus(const Point& center, double inner, double outer);
- std::vector<PointWithCRS> points;
+ const Point& center() const;
- // The amount of indirection here is painful but we can't operator= unique_ptr or
- // OwnedPointerVector.
- OwnedPointerVector<LineWithCRS> lines;
- OwnedPointerVector<PolygonWithCRS> polygons;
- OwnedPointerVector<MultiPointWithCRS> multiPoints;
- OwnedPointerVector<MultiLineWithCRS> multiLines;
- OwnedPointerVector<MultiPolygonWithCRS> multiPolygons;
+ double getInner() const;
+ double getOuter() const;
- bool supportsContains() {
- // Only polygons (and multiPolygons) support containment.
- return (polygons.vector().size() > 0 || multiPolygons.vector().size() > 0);
- }
- };
+ bool contains(const Point& point) const;
+
+ // R2Region interface
+ Box getR2Bounds() const;
+ bool fastContains(const Box& other) const;
+ bool fastDisjoint(const Box& other) const;
+
+ // For debugging
+ std::string toString() const;
+
+private:
+ Point _center;
+ double _inner;
+ double _outer;
+};
+
+// Clearly this isn't right but currently it's sufficient.
+enum CRS {
+ UNSET,
+ FLAT, // Equirectangular flat projection (i.e. trivial long/lat projection to flat map)
+ SPHERE, // WGS84
+ STRICT_SPHERE // WGS84 with strict winding order
+};
+
+// TODO: Make S2 less integral to these types - additional S2 shapes should be an optimization
+// when our CRS is not projected, i.e. SPHERE for now.
+// Generic shapes (Point, Line, Polygon) should hold the raw coordinate data - right now oldXXX
+// is a misnomer - this is the *original* data and the S2 transformation just an optimization.
+
+struct PointWithCRS {
+ PointWithCRS() : crs(UNSET) {}
+
+ S2Point point;
+ S2Cell cell;
+ Point oldPoint;
+ CRS crs;
+};
+
+struct LineWithCRS {
+ LineWithCRS() : crs(UNSET) {}
+
+ S2Polyline line;
+ CRS crs;
+};
+
+struct CapWithCRS {
+ CapWithCRS() : crs(UNSET) {}
+
+ S2Cap cap;
+ Circle circle;
+ CRS crs;
+};
+
+struct BoxWithCRS {
+ BoxWithCRS() : crs(UNSET) {}
+
+ Box box;
+ CRS crs;
+};
+
+struct PolygonWithCRS {
+ PolygonWithCRS() : crs(UNSET) {}
+
+ std::unique_ptr<S2Polygon> s2Polygon;
+ // Simple polygons with strict winding order may be bigger or smaller than a hemisphere.
+ // Only used for query. We don't support storing/indexing big polygons.
+ std::unique_ptr<BigSimplePolygon> bigPolygon;
+
+ Polygon oldPolygon;
+ CRS crs;
+};
+
+struct MultiPointWithCRS {
+ MultiPointWithCRS() : crs(UNSET) {}
+
+ std::vector<S2Point> points;
+ std::vector<S2Cell> cells;
+ CRS crs;
+};
+
+struct MultiLineWithCRS {
+ MultiLineWithCRS() : crs(UNSET) {}
+
+ OwnedPointerVector<S2Polyline> lines;
+ CRS crs;
+};
+
+struct MultiPolygonWithCRS {
+ MultiPolygonWithCRS() : crs(UNSET) {}
+
+ OwnedPointerVector<S2Polygon> polygons;
+ CRS crs;
+};
- //
- // Projection functions - we only project following types for now
- // - Point
- // - Polygon (from STRICT_SPHERE TO SPHERE)
- //
- struct ShapeProjection {
- static bool supportsProject(const PointWithCRS& point, const CRS crs);
- static bool supportsProject(const PolygonWithCRS& polygon, const CRS crs);
- static void projectInto(PointWithCRS* point, CRS crs);
- static void projectInto(PolygonWithCRS* point, CRS crs);
- };
+struct GeometryCollection {
+ std::vector<PointWithCRS> points;
+
+ // The amount of indirection here is painful but we can't operator= unique_ptr or
+ // OwnedPointerVector.
+ OwnedPointerVector<LineWithCRS> lines;
+ OwnedPointerVector<PolygonWithCRS> polygons;
+ OwnedPointerVector<MultiPointWithCRS> multiPoints;
+ OwnedPointerVector<MultiLineWithCRS> multiLines;
+ OwnedPointerVector<MultiPolygonWithCRS> multiPolygons;
+
+ bool supportsContains() {
+ // Only polygons (and multiPolygons) support containment.
+ return (polygons.vector().size() > 0 || multiPolygons.vector().size() > 0);
+ }
+};
+
+//
+// Projection functions - we only project following types for now
+// - Point
+// - Polygon (from STRICT_SPHERE TO SPHERE)
+//
+struct ShapeProjection {
+ static bool supportsProject(const PointWithCRS& point, const CRS crs);
+ static bool supportsProject(const PolygonWithCRS& polygon, const CRS crs);
+ static void projectInto(PointWithCRS* point, CRS crs);
+ static void projectInto(PolygonWithCRS* point, CRS crs);
+};
} // namespace mongo
diff --git a/src/mongo/db/global_timestamp.cpp b/src/mongo/db/global_timestamp.cpp
index 536d9d17b98..36ac4f539f8 100644
--- a/src/mongo/db/global_timestamp.cpp
+++ b/src/mongo/db/global_timestamp.cpp
@@ -34,48 +34,46 @@
#include "mongo/util/log.h"
namespace {
- mongo::stdx::mutex globalTimestampMutex;
- mongo::Timestamp globalTimestamp(0, 0);
+mongo::stdx::mutex globalTimestampMutex;
+mongo::Timestamp globalTimestamp(0, 0);
- bool skewed(const mongo::Timestamp& val) {
- if (val.getInc() & 0x80000000) {
- mongo::warning() << "clock skew detected prev: " << val.getSecs()
- << " now: " << (unsigned) time(0) << std::endl;
- return true;
- }
-
- return false;
+bool skewed(const mongo::Timestamp& val) {
+ if (val.getInc() & 0x80000000) {
+ mongo::warning() << "clock skew detected prev: " << val.getSecs()
+ << " now: " << (unsigned)time(0) << std::endl;
+ return true;
}
+
+ return false;
+}
}
namespace mongo {
- void setGlobalTimestamp(const Timestamp& newTime) {
- stdx::lock_guard<stdx::mutex> lk(globalTimestampMutex);
- globalTimestamp = newTime;
- }
-
- Timestamp getLastSetTimestamp() {
- stdx::lock_guard<stdx::mutex> lk(globalTimestampMutex);
- return globalTimestamp;
- }
+void setGlobalTimestamp(const Timestamp& newTime) {
+ stdx::lock_guard<stdx::mutex> lk(globalTimestampMutex);
+ globalTimestamp = newTime;
+}
- Timestamp getNextGlobalTimestamp() {
- stdx::lock_guard<stdx::mutex> lk(globalTimestampMutex);
+Timestamp getLastSetTimestamp() {
+ stdx::lock_guard<stdx::mutex> lk(globalTimestampMutex);
+ return globalTimestamp;
+}
- const unsigned now = (unsigned) time(0);
- const unsigned globalSecs = globalTimestamp.getSecs();
- if ( globalSecs == now ) {
- globalTimestamp = Timestamp(globalSecs, globalTimestamp.getInc() + 1);
- }
- else if ( now < globalSecs ) {
- globalTimestamp = Timestamp(globalSecs, globalTimestamp.getInc() + 1);
- // separate function to keep out of the hot code path
- fassert(17449, !skewed(globalTimestamp));
- }
- else {
- globalTimestamp = Timestamp(now, 1);
- }
+Timestamp getNextGlobalTimestamp() {
+ stdx::lock_guard<stdx::mutex> lk(globalTimestampMutex);
- return globalTimestamp;
+ const unsigned now = (unsigned)time(0);
+ const unsigned globalSecs = globalTimestamp.getSecs();
+ if (globalSecs == now) {
+ globalTimestamp = Timestamp(globalSecs, globalTimestamp.getInc() + 1);
+ } else if (now < globalSecs) {
+ globalTimestamp = Timestamp(globalSecs, globalTimestamp.getInc() + 1);
+ // separate function to keep out of the hot code path
+ fassert(17449, !skewed(globalTimestamp));
+ } else {
+ globalTimestamp = Timestamp(now, 1);
}
+
+ return globalTimestamp;
+}
}
diff --git a/src/mongo/db/global_timestamp.h b/src/mongo/db/global_timestamp.h
index d39a5364fd5..64d1ce551e6 100644
--- a/src/mongo/db/global_timestamp.h
+++ b/src/mongo/db/global_timestamp.h
@@ -31,15 +31,15 @@
#include "mongo/bson/timestamp.h"
namespace mongo {
- void setGlobalTimestamp(const Timestamp& newTime);
+void setGlobalTimestamp(const Timestamp& newTime);
- /**
- * Returns the value of the global Timestamp generated last time or set.
- */
- Timestamp getLastSetTimestamp();
+/**
+ * Returns the value of the global Timestamp generated last time or set.
+ */
+Timestamp getLastSetTimestamp();
- /**
- * Generates a new and unique Timestamp.
- */
- Timestamp getNextGlobalTimestamp();
+/**
+ * Generates a new and unique Timestamp.
+ */
+Timestamp getNextGlobalTimestamp();
}
diff --git a/src/mongo/db/hasher.cpp b/src/mongo/db/hasher.cpp
index 3ae131a7dee..b0a22380bff 100644
--- a/src/mongo/db/hasher.cpp
+++ b/src/mongo/db/hasher.cpp
@@ -40,79 +40,73 @@
namespace mongo {
- using std::unique_ptr;
+using std::unique_ptr;
- Hasher::Hasher( HashSeed seed ) : _seed( seed ) {
- md5_init( &_md5State );
- md5_append( &_md5State , reinterpret_cast< const md5_byte_t * >( & _seed ) , sizeof( _seed ) );
- }
+Hasher::Hasher(HashSeed seed) : _seed(seed) {
+ md5_init(&_md5State);
+ md5_append(&_md5State, reinterpret_cast<const md5_byte_t*>(&_seed), sizeof(_seed));
+}
- void Hasher::addData( const void * keyData , size_t numBytes ) {
- md5_append( &_md5State , static_cast< const md5_byte_t * >( keyData ), numBytes );
- }
+void Hasher::addData(const void* keyData, size_t numBytes) {
+ md5_append(&_md5State, static_cast<const md5_byte_t*>(keyData), numBytes);
+}
- void Hasher::finish( HashDigest out ) {
- md5_finish( &_md5State , out );
- }
+void Hasher::finish(HashDigest out) {
+ md5_finish(&_md5State, out);
+}
- long long int BSONElementHasher::hash64( const BSONElement& e , HashSeed seed ){
- unique_ptr<Hasher> h( HasherFactory::createHasher( seed ) );
- recursiveHash( h.get() , e , false );
- HashDigest d;
- h->finish(d);
- //HashDigest is actually 16 bytes, but we just get 8 via truncation
- // NOTE: assumes little-endian
- return *reinterpret_cast< long long int * >( d );
- }
+long long int BSONElementHasher::hash64(const BSONElement& e, HashSeed seed) {
+ unique_ptr<Hasher> h(HasherFactory::createHasher(seed));
+ recursiveHash(h.get(), e, false);
+ HashDigest d;
+ h->finish(d);
+ // HashDigest is actually 16 bytes, but we just get 8 via truncation
+ // NOTE: assumes little-endian
+ return *reinterpret_cast<long long int*>(d);
+}
- void BSONElementHasher::recursiveHash( Hasher* h ,
- const BSONElement& e ,
- bool includeFieldName ) {
+void BSONElementHasher::recursiveHash(Hasher* h, const BSONElement& e, bool includeFieldName) {
+ int canonicalType = e.canonicalType();
+ h->addData(&canonicalType, sizeof(canonicalType));
- int canonicalType = e.canonicalType();
- h->addData( &canonicalType , sizeof( canonicalType ) );
+ if (includeFieldName) {
+ h->addData(e.fieldName(), e.fieldNameSize());
+ }
- if ( includeFieldName ){
- h->addData( e.fieldName() , e.fieldNameSize() );
+ if (!e.mayEncapsulate()) {
+ // if there are no embedded objects (subobjects or arrays),
+ // compute the hash, squashing numeric types to 64-bit ints
+ if (e.isNumber()) {
+ long long int i = e.safeNumberLong(); // well-defined for troublesome doubles
+ h->addData(&i, sizeof(i));
+ } else {
+ h->addData(e.value(), e.valuesize());
}
-
- if ( !e.mayEncapsulate() ){
- //if there are no embedded objects (subobjects or arrays),
- //compute the hash, squashing numeric types to 64-bit ints
- if ( e.isNumber() ){
- long long int i = e.safeNumberLong(); //well-defined for troublesome doubles
- h->addData( &i , sizeof( i ) );
- }
- else {
- h->addData( e.value() , e.valuesize() );
- }
+ } else {
+ // else identify the subobject.
+ // hash any preceding stuff (in the case of codeWscope)
+ // then each sub-element
+ // then finish with the EOO element.
+ BSONObj b;
+ if (e.type() == CodeWScope) {
+ h->addData(e.codeWScopeCode(), e.codeWScopeCodeLen());
+ b = e.codeWScopeObject();
+ } else {
+ b = e.embeddedObject();
}
- else {
- //else identify the subobject.
- //hash any preceding stuff (in the case of codeWscope)
- //then each sub-element
- //then finish with the EOO element.
- BSONObj b;
- if ( e.type() == CodeWScope ) {
- h->addData( e.codeWScopeCode() , e.codeWScopeCodeLen() );
- b = e.codeWScopeObject();
- }
- else {
- b = e.embeddedObject();
- }
- BSONObjIterator i(b);
- while( i.moreWithEOO() ) {
- BSONElement el = i.next();
- recursiveHash( h , el , true );
- }
+ BSONObjIterator i(b);
+ while (i.moreWithEOO()) {
+ BSONElement el = i.next();
+ recursiveHash(h, el, true);
}
}
+}
- struct HasherUnitTest : public StartupTest {
- void run() {
- // Hard-coded check to ensure the hash function is consistent across platforms
- BSONObj o = BSON( "check" << 42 );
- verify( BSONElementHasher::hash64( o.firstElement(), 0 ) == -944302157085130861LL );
- }
- } hasherUnitTest;
+struct HasherUnitTest : public StartupTest {
+ void run() {
+ // Hard-coded check to ensure the hash function is consistent across platforms
+ BSONObj o = BSON("check" << 42);
+ verify(BSONElementHasher::hash64(o.firstElement(), 0) == -944302157085130861LL);
+ }
+} hasherUnitTest;
}
diff --git a/src/mongo/db/hasher.h b/src/mongo/db/hasher.h
index e5bbb39d116..fd634b69ba5 100644
--- a/src/mongo/db/hasher.h
+++ b/src/mongo/db/hasher.h
@@ -41,82 +41,81 @@
namespace mongo {
- typedef int HashSeed;
- typedef unsigned char HashDigest[16];
-
- class Hasher {
- MONGO_DISALLOW_COPYING(Hasher);
- public:
-
- explicit Hasher( HashSeed seed );
- ~Hasher() { };
-
- //pointer to next part of input key, length in bytes to read
- void addData( const void * keyData , size_t numBytes );
-
- //finish computing the hash, put the result in the digest
- //only call this once per Hasher
- void finish( HashDigest out );
-
- private:
- md5_state_t _md5State;
- HashSeed _seed;
- };
-
- class HasherFactory {
- MONGO_DISALLOW_COPYING(HasherFactory);
- public:
- /* Eventually this may be a more sophisticated factory
- * for creating other hashers, but for now use MD5.
- */
- static Hasher* createHasher( HashSeed seed ) {
- return new Hasher( seed );
- }
-
- private:
- HasherFactory();
- };
-
- class BSONElementHasher {
- MONGO_DISALLOW_COPYING(BSONElementHasher);
- public:
-
- /* The hash function we use can be given a seed, to effectively randomize it
- * by choosing from among a family of hash functions. When it is not specified,
- * use this.
- *
- * WARNING: do not change the hash see value. Hash-based sharding clusters will
- * expect that value to be zero.
- */
- static const int DEFAULT_HASH_SEED = 0;
-
- /* This computes a 64-bit hash of the value part of BSONElement "e",
- * preceded by the seed "seed". Squashes element (and any sub-elements)
- * of the same canonical type, so hash({a:{b:4}}) will be the same
- * as hash({a:{b:4.1}}). In particular, this squashes doubles to 64-bit long
- * ints via truncation, so floating point values round towards 0 to the
- * nearest int representable as a 64-bit long.
- *
- * This function is used in the computation of hashed indexes
- * and hashed shard keys, and thus should not be changed unless
- * the associated "getKeys" and "makeSingleKey" method in the
- * hashindex type is changed accordingly.
- */
- static long long int hash64( const BSONElement& e , HashSeed seed );
-
- /* This incrementally computes the hash of BSONElement "e"
- * using hash function "h". If "includeFieldName" is true,
- * then the name of the field is hashed in between the type of
- * the element and the element value. The hash function "h"
- * is applied recursively to any sub-elements (arrays/sub-documents),
- * squashing elements of the same canonical type.
- * Used as a helper for hash64 above.
- */
- static void recursiveHash( Hasher* h , const BSONElement& e , bool includeFieldName );
-
- private:
- BSONElementHasher();
-
- };
-
+typedef int HashSeed;
+typedef unsigned char HashDigest[16];
+
+class Hasher {
+ MONGO_DISALLOW_COPYING(Hasher);
+
+public:
+ explicit Hasher(HashSeed seed);
+ ~Hasher(){};
+
+ // pointer to next part of input key, length in bytes to read
+ void addData(const void* keyData, size_t numBytes);
+
+ // finish computing the hash, put the result in the digest
+ // only call this once per Hasher
+ void finish(HashDigest out);
+
+private:
+ md5_state_t _md5State;
+ HashSeed _seed;
+};
+
+class HasherFactory {
+ MONGO_DISALLOW_COPYING(HasherFactory);
+
+public:
+ /* Eventually this may be a more sophisticated factory
+ * for creating other hashers, but for now use MD5.
+ */
+ static Hasher* createHasher(HashSeed seed) {
+ return new Hasher(seed);
+ }
+
+private:
+ HasherFactory();
+};
+
+class BSONElementHasher {
+ MONGO_DISALLOW_COPYING(BSONElementHasher);
+
+public:
+ /* The hash function we use can be given a seed, to effectively randomize it
+ * by choosing from among a family of hash functions. When it is not specified,
+ * use this.
+ *
+ * WARNING: do not change the hash see value. Hash-based sharding clusters will
+ * expect that value to be zero.
+ */
+ static const int DEFAULT_HASH_SEED = 0;
+
+ /* This computes a 64-bit hash of the value part of BSONElement "e",
+ * preceded by the seed "seed". Squashes element (and any sub-elements)
+ * of the same canonical type, so hash({a:{b:4}}) will be the same
+ * as hash({a:{b:4.1}}). In particular, this squashes doubles to 64-bit long
+ * ints via truncation, so floating point values round towards 0 to the
+ * nearest int representable as a 64-bit long.
+ *
+ * This function is used in the computation of hashed indexes
+ * and hashed shard keys, and thus should not be changed unless
+ * the associated "getKeys" and "makeSingleKey" method in the
+ * hashindex type is changed accordingly.
+ */
+ static long long int hash64(const BSONElement& e, HashSeed seed);
+
+ /* This incrementally computes the hash of BSONElement "e"
+ * using hash function "h". If "includeFieldName" is true,
+ * then the name of the field is hashed in between the type of
+ * the element and the element value. The hash function "h"
+ * is applied recursively to any sub-elements (arrays/sub-documents),
+ * squashing elements of the same canonical type.
+ * Used as a helper for hash64 above.
+ */
+ static void recursiveHash(Hasher* h, const BSONElement& e, bool includeFieldName);
+
+private:
+ BSONElementHasher();
+};
}
diff --git a/src/mongo/db/hasher_test.cpp b/src/mongo/db/hasher_test.cpp
index 0966bfbf3f8..4dbf147e768 100644
--- a/src/mongo/db/hasher_test.cpp
+++ b/src/mongo/db/hasher_test.cpp
@@ -38,348 +38,323 @@
namespace mongo {
namespace {
- // Helper methods
- long long hashIt( const BSONObj& object, int seed ) {
- return BSONElementHasher::hash64( object.firstElement(), seed );
- }
- long long hashIt( const BSONObj& object ) {
- int seed = 0;
- return hashIt( object, seed );
- }
-
- // Test different oids hash to different things
- TEST( BSONElementHasher, DifferentOidsAreDifferentHashes ) {
- int seed = 0;
-
- long long int oidHash = BSONElementHasher::hash64(
- BSONObjBuilder().genOID().obj().firstElement() , seed );
- long long int oidHash2 = BSONElementHasher::hash64(
- BSONObjBuilder().genOID().obj().firstElement() , seed );
- long long int oidHash3 = BSONElementHasher::hash64(
- BSONObjBuilder().genOID().obj().firstElement() , seed );
-
- ASSERT_NOT_EQUALS( oidHash , oidHash2 );
- ASSERT_NOT_EQUALS( oidHash , oidHash3 );
- ASSERT_NOT_EQUALS( oidHash3 , oidHash2 );
- }
-
- // Test 32-bit ints, 64-bit ints, doubles hash to same thing
- TEST( BSONElementHasher, ConsistentHashOfIntLongAndDouble ) {
- int i = 3;
- BSONObj p1 = BSON("a" << i);
- long long int intHash = hashIt( p1 );
-
- long long int ilong = 3;
- BSONObj p2 = BSON("a" << ilong);
- long long int longHash = hashIt( p2 );
-
- double d = 3.1;
- BSONObj p3 = BSON("a" << d);
- long long int doubleHash = hashIt( p3 );
-
- ASSERT_EQUALS( intHash, longHash );
- ASSERT_EQUALS( doubleHash, longHash );
- }
-
- // Test different ints don't hash to same thing
- TEST( BSONElementHasher, DifferentIntHashesDiffer ) {
- ASSERT_NOT_EQUALS(
- hashIt( BSON("a" << 3) ) ,
- hashIt( BSON("a" << 4) )
- );
- }
-
- // Test seed makes a difference
- TEST( BSONElementHasher, SeedMatters ) {
- ASSERT_NOT_EQUALS(
- hashIt( BSON("a" << 4), 0 ) ,
- hashIt( BSON("a" << 4), 1 )
- );
- }
-
- // Test strings hash to different things
- TEST( BSONElementHasher, IntAndStringHashesDiffer ) {
- ASSERT_NOT_EQUALS(
- hashIt( BSON("a" << 3) ) ,
- hashIt( BSON("a" << "3") )
- );
- }
-
- // Test regexps and strings hash to different things
- TEST( BSONElementHasher, RegexAndStringHashesDiffer ) {
- BSONObjBuilder builder;
-
- ASSERT_NOT_EQUALS(
- hashIt( BSON("a" << "3") ) ,
- hashIt( builder.appendRegex("a","3").obj() )
- );
- }
-
- // Test arrays and subobject hash to different things
- TEST( BSONElementHasher, ArrayAndSubobjectHashesDiffer ) {
- ASSERT_NOT_EQUALS(
- hashIt( fromjson("{a : {'0' : 0 , '1' : 1}}") ) ,
- hashIt( fromjson("{a : [0,1]}") )
- );
- }
-
- // Testing sub-document grouping
- TEST( BSONElementHasher, SubDocumentGroupingHashesDiffer ) {
- ASSERT_NOT_EQUALS(
- hashIt( fromjson("{x : {a : {}, b : 1}}") ) ,
- hashIt( fromjson("{x : {a : {b : 1}}}") )
- );
- }
-
- // Testing codeWscope scope squashing
- TEST( BSONElementHasher, CodeWithScopeSquashesScopeIntsAndDoubles ) {
- int seed = 0;
-
- BSONObjBuilder b1;
- b1.appendCodeWScope("a","print('this is some stupid code')", BSON("a" << 3));
- BSONObj p10 = b1.obj();
-
- BSONObjBuilder b2;
- b2.appendCodeWScope("a","print('this is some stupid code')", BSON("a" << 3.1));
-
- BSONObjBuilder b3;
- b3.appendCodeWScope("a","print('this is \nsome stupider code')", BSON("a" << 3));
- ASSERT_EQUALS(
- BSONElementHasher::hash64( p10.firstElement() , seed ) ,
- BSONElementHasher::hash64( b2.obj().firstElement() , seed )
- );
- ASSERT_NOT_EQUALS(
- BSONElementHasher::hash64( p10.firstElement() , seed ) ,
- BSONElementHasher::hash64( b3.obj().firstElement() , seed )
- );
- }
-
- // Test some recursive squashing
- TEST( BSONElementHasher, RecursiveSquashingIntsAndDoubles ) {
- ASSERT_EQUALS(
- hashIt( fromjson("{x : {a : 3 , b : [ 3.1, {c : 3 }]}}") ) ,
- hashIt( fromjson("{x : {a : 3.1 , b : [ 3, {c : 3.0}]}}") )
- );
- }
-
- // Test minkey and maxkey don't hash to same thing
- TEST( BSONElementHasher, MinKeyMaxKeyHashesDiffer ) {
- ASSERT_NOT_EQUALS(
- hashIt( BSON("a" << MAXKEY) ) ,
- hashIt( BSON("a" << MINKEY) )
- );
- }
-
- // Test squashing very large doubles and very small doubles
- TEST( BSONElementHasher, VeryLargeAndSmallDoubles ) {
- long long maxInt = std::numeric_limits<long long>::max();
- double smallerDouble = maxInt/2;
- double biggerDouble = ( (double)maxInt )*( (double)maxInt );
- ASSERT_NOT_EQUALS(
- hashIt( BSON("a" << maxInt ) ) ,
- hashIt( BSON("a" << smallerDouble ) )
- );
- ASSERT_EQUALS(
- hashIt( BSON("a" << maxInt ) ) ,
- hashIt( BSON("a" << biggerDouble ) )
- );
-
- long long minInt = std::numeric_limits<long long>::min();
- double negativeDouble = -( (double)maxInt )*( (double)maxInt );
- ASSERT_EQUALS(
- hashIt( BSON("a" << minInt ) ) ,
- hashIt( BSON("a" << negativeDouble ) )
- );
- }
-
- // Remaining tests are hard-coded checks to ensure the hash function is
- // consistent across platforms and server versions
- //
- // All of the values in the remaining tests have been determined experimentally.
- TEST( BSONElementHasher, HashIntOrLongOrDouble ) {
- BSONObj o = BSON( "check" << 42 );
- ASSERT_EQUALS( hashIt( o ), -944302157085130861LL );
- o = BSON( "check" << 42.123 );
- ASSERT_EQUALS( hashIt( o ), -944302157085130861LL );
- o = BSON( "check" << (long long) 42 );
- ASSERT_EQUALS( hashIt( o ), -944302157085130861LL );
-
- o = BSON( "check" << 0 );
- ASSERT_EQUALS( hashIt( o ), 4854801880128277513LL );
- o = BSON( "check" << 0.456 );
- ASSERT_EQUALS( hashIt( o ), 4854801880128277513LL );
- o = BSON( "check" << (long long) 0 );
- ASSERT_EQUALS( hashIt( o ), 4854801880128277513LL );
- // NAN is treated as zero.
- o = BSON( "check" << std::numeric_limits<double>::signaling_NaN() );
- ASSERT_EQUALS( hashIt( o ), 4854801880128277513LL );
- o = BSON( "check" << std::numeric_limits<double>::quiet_NaN() );
- ASSERT_EQUALS( hashIt( o ), 4854801880128277513LL );
-
- o = BSON( "check" << 1 );
- ASSERT_EQUALS( hashIt( o ), 5902408780260971510LL );
- o = BSON( "check" << 1.987 );
- ASSERT_EQUALS( hashIt( o ), 5902408780260971510LL );
- o = BSON( "check" << (long long) 1 );
- ASSERT_EQUALS( hashIt( o ), 5902408780260971510LL );
-
- o = BSON( "check" << -1 );
- ASSERT_EQUALS( hashIt( o ), 1140205862565771219LL );
- o = BSON( "check" << -1.789 );
- ASSERT_EQUALS( hashIt( o ), 1140205862565771219LL );
- o = BSON( "check" << (long long) -1 );
- ASSERT_EQUALS( hashIt( o ), 1140205862565771219LL );
-
- o = BSON( "check" << std::numeric_limits<int>::min() );
- ASSERT_EQUALS( hashIt( o ), 6165898260261354870LL );
- o = BSON( "check" << (double) std::numeric_limits<int>::min() );
- ASSERT_EQUALS( hashIt( o ), 6165898260261354870LL );
- o = BSON( "check" << (long long) std::numeric_limits<int>::min() );
- ASSERT_EQUALS( hashIt( o ), 6165898260261354870LL );
-
- o = BSON( "check" << std::numeric_limits<int>::max() );
- ASSERT_EQUALS( hashIt( o ), 1143184177162245883LL );
- o = BSON( "check" << (double) std::numeric_limits<int>::max() );
- ASSERT_EQUALS( hashIt( o ), 1143184177162245883LL );
- o = BSON( "check" << (long long) std::numeric_limits<int>::max() );
- ASSERT_EQUALS( hashIt( o ), 1143184177162245883LL );
-
- // Large/small double values.
- ASSERT( std::numeric_limits<long long>::max() < std::numeric_limits<double>::max() );
- o = BSON( "check" << std::numeric_limits<double>::max() );
- ASSERT_EQUALS( hashIt( o ), 921523596458303250LL );
- o = BSON( "check" << std::numeric_limits<long long>::max() ); // 9223372036854775807
- ASSERT_EQUALS( hashIt( o ), 921523596458303250LL );
-
- // Have to create our own small double.
- // std::numeric_limits<double>::lowest() - Not available until C++11
- // std::numeric_limits<double>::min() - Closest positive value to zero, not most negative.
- double smallDouble = - std::numeric_limits<double>::max();
- ASSERT( smallDouble < static_cast<double>( std::numeric_limits<long long>::min() ) );
- o = BSON( "check" << smallDouble );
- ASSERT_EQUALS( hashIt( o ), 4532067210535695462LL );
- o = BSON( "check" << std::numeric_limits<long long>::min() ); // -9223372036854775808
- ASSERT_EQUALS( hashIt( o ), 4532067210535695462LL );
- }
-
- TEST( BSONElementHasher, HashMinKey ) {
- BSONObj o = BSON( "check" << MINKEY );
- ASSERT_EQUALS( hashIt( o ), 7961148599568647290LL );
- }
-
- TEST( BSONElementHasher, HashMaxKey ) {
- BSONObj o = BSON( "check" << MAXKEY );
- ASSERT_EQUALS( hashIt( o ), 5504842513779440750LL );
- }
-
- TEST( BSONElementHasher, HashUndefined ) {
- BSONObj o = BSON( "check" << BSONUndefined );
- ASSERT_EQUALS( hashIt( o ), 40158834000849533LL );
- }
-
- TEST( BSONElementHasher, HashNull ) {
- BSONObj o = BSON( "check" << BSONNULL );
- ASSERT_EQUALS( hashIt( o ), 2338878944348059895LL );
- }
-
- TEST( BSONElementHasher, HashString ) {
- BSONObj o = BSON( "check" << "abc" );
- ASSERT_EQUALS( hashIt( o ), 8478485326885698097LL );
- o = BSON( "check" << BSONSymbol( "abc" ) );
- ASSERT_EQUALS( hashIt( o ), 8478485326885698097LL );
-
- o = BSON( "check" << "" );
- ASSERT_EQUALS( hashIt( o ), 2049396243249673340LL );
- o = BSON( "check" << BSONSymbol( "" ) );
- ASSERT_EQUALS( hashIt( o ), 2049396243249673340LL );
- }
-
- TEST( BSONElementHasher, HashObject ) {
- BSONObj o = BSON( "check" << BSON( "a" << "abc" << "b" << 123LL ) );
- ASSERT_EQUALS( hashIt( o ), 4771603801758380216LL );
-
- o = BSON( "check" << BSONObj() );
- ASSERT_EQUALS( hashIt( o ), 7980500913326740417LL );
- }
-
- TEST( BSONElementHasher, HashArray ) {
- BSONObj o = BSON( "check" << BSON_ARRAY( "bar" << "baz" << "qux" ) );
- ASSERT_EQUALS( hashIt( o ), -2938911267422831539LL );
-
- o = BSON( "check" << BSONArray() );
- ASSERT_EQUALS( hashIt( o ), 8849948234993459283LL );
- }
-
- TEST( BSONElementHasher, HashBinary ) {
- uint8_t bytes[] = { 0, 1, 2, 3, 4, 6 };
- BSONObj o = BSON( "check" << BSONBinData( bytes, 6, BinDataGeneral ) );
- ASSERT_EQUALS( hashIt( o ), 7252465090394235301LL );
-
- o = BSON( "check" << BSONBinData( bytes, 6, bdtCustom ) );
- ASSERT_EQUALS( hashIt( o ), 5736670452907618262LL );
-
- uint8_t uuidBytes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
- o = BSON( "check" << BSONBinData( uuidBytes, 16, newUUID ) );
- ASSERT_EQUALS( hashIt( o ), 6084661258071355978LL );
- }
-
- TEST( BSONElementHasher, HashObjectId ) {
- BSONObj o = BSON( "check" << OID( "010203040506070809101112" ) );
- ASSERT_EQUALS( hashIt( o ), -5588663249627035708LL );
-
- o = BSON( "check" << OID( "000000000000000000000000" ) );
- ASSERT_EQUALS( hashIt( o ), -4293118519463489418LL );
- }
-
- TEST( BSONElementHasher, HashBoolean ) {
- BSONObj o = BSON( "check" << true );
- ASSERT_EQUALS( hashIt( o ), 6405873908747105701LL );
-
- o = BSON( "check" << false );
- ASSERT_EQUALS( hashIt( o ), 6289544573401934092LL );
- }
-
- TEST( BSONElementHasher, HashTimeStamp ) {
- BSONObjBuilder builder1;
- BSONObjBuilder builder2;
-
- BSONObj o = BSON( "check" << Date_t::fromMillisSinceEpoch( 0x5566778811223344LL ) );
- ASSERT_EQUALS( hashIt( o ), 4476222765095560467LL );
- o = builder1.append( "check", Timestamp(0x55667788LL, 0x11223344LL) ).obj();
- ASSERT_EQUALS( hashIt( o ), 4873046866288452390LL );
-
- o = BSON( "check" << Date_t() );
- ASSERT_EQUALS( hashIt( o ), -1178696894582842035LL );
- o = builder2.appendTimestamp( "check", 0 ).obj();
- ASSERT_EQUALS( hashIt( o ), -7867208682377458672LL );
- }
-
- TEST( BSONElementHasher, HashRegEx ) {
- BSONObj o = BSON( "check" << BSONRegEx( "mongodb" ) );
- ASSERT_EQUALS( hashIt( o ), -7275792090268217043LL );
-
- o = BSON( "check" << BSONRegEx( ".*", "i" ) );
- ASSERT_EQUALS( hashIt( o ), 7095855029187981886LL );
- }
-
- TEST( BSONElementHasher, HashDBRef ) {
- BSONObj o = BSON( "check" << BSONDBRef( "c", OID( "010203040506070809101112" ) ) );
- ASSERT_EQUALS( hashIt( o ), 940175826736461384LL );
-
- o = BSON( "check" << BSONDBRef( "db.c", OID( "010203040506070809101112" ) ) );
- ASSERT_EQUALS( hashIt( o ), 2426768198104018194LL );
- }
-
- TEST( BSONElementHasher, HashCode ) {
- BSONObj o = BSON( "check" << BSONCode( "func f() { return 1; }" ) );
- ASSERT_EQUALS( hashIt( o ), 6861638109178014270LL );
- }
-
- TEST( BSONElementHasher, HashCodeWScope ) {
- BSONObj o = BSON( "check" <<
- BSONCodeWScope( "func f() { return 1; }", BSON( "c" << true ) ) );
- ASSERT_EQUALS( hashIt( o ), 501342939894575968LL );
- }
-
-} // namespace
-} // namespace mongo
+// Helper methods
+long long hashIt(const BSONObj& object, int seed) {
+ return BSONElementHasher::hash64(object.firstElement(), seed);
+}
+long long hashIt(const BSONObj& object) {
+ int seed = 0;
+ return hashIt(object, seed);
+}
+
+// Test different oids hash to different things
+TEST(BSONElementHasher, DifferentOidsAreDifferentHashes) {
+ int seed = 0;
+
+ long long int oidHash =
+ BSONElementHasher::hash64(BSONObjBuilder().genOID().obj().firstElement(), seed);
+ long long int oidHash2 =
+ BSONElementHasher::hash64(BSONObjBuilder().genOID().obj().firstElement(), seed);
+ long long int oidHash3 =
+ BSONElementHasher::hash64(BSONObjBuilder().genOID().obj().firstElement(), seed);
+
+ ASSERT_NOT_EQUALS(oidHash, oidHash2);
+ ASSERT_NOT_EQUALS(oidHash, oidHash3);
+ ASSERT_NOT_EQUALS(oidHash3, oidHash2);
+}
+
+// Test 32-bit ints, 64-bit ints, doubles hash to same thing
+TEST(BSONElementHasher, ConsistentHashOfIntLongAndDouble) {
+ int i = 3;
+ BSONObj p1 = BSON("a" << i);
+ long long int intHash = hashIt(p1);
+
+ long long int ilong = 3;
+ BSONObj p2 = BSON("a" << ilong);
+ long long int longHash = hashIt(p2);
+
+ double d = 3.1;
+ BSONObj p3 = BSON("a" << d);
+ long long int doubleHash = hashIt(p3);
+
+ ASSERT_EQUALS(intHash, longHash);
+ ASSERT_EQUALS(doubleHash, longHash);
+}
+
+// Test different ints don't hash to same thing
+TEST(BSONElementHasher, DifferentIntHashesDiffer) {
+ ASSERT_NOT_EQUALS(hashIt(BSON("a" << 3)), hashIt(BSON("a" << 4)));
+}
+
+// Test seed makes a difference
+TEST(BSONElementHasher, SeedMatters) {
+ ASSERT_NOT_EQUALS(hashIt(BSON("a" << 4), 0), hashIt(BSON("a" << 4), 1));
+}
+
+// Test strings hash to different things
+TEST(BSONElementHasher, IntAndStringHashesDiffer) {
+ ASSERT_NOT_EQUALS(hashIt(BSON("a" << 3)),
+ hashIt(BSON("a"
+ << "3")));
+}
+
+// Test regexps and strings hash to different things
+TEST(BSONElementHasher, RegexAndStringHashesDiffer) {
+ BSONObjBuilder builder;
+
+ ASSERT_NOT_EQUALS(hashIt(BSON("a"
+ << "3")),
+ hashIt(builder.appendRegex("a", "3").obj()));
+}
+
+// Test arrays and subobject hash to different things
+TEST(BSONElementHasher, ArrayAndSubobjectHashesDiffer) {
+ ASSERT_NOT_EQUALS(hashIt(fromjson("{a : {'0' : 0 , '1' : 1}}")),
+ hashIt(fromjson("{a : [0,1]}")));
+}
+
+// Testing sub-document grouping
+TEST(BSONElementHasher, SubDocumentGroupingHashesDiffer) {
+ ASSERT_NOT_EQUALS(hashIt(fromjson("{x : {a : {}, b : 1}}")),
+ hashIt(fromjson("{x : {a : {b : 1}}}")));
+}
+
+// Testing codeWscope scope squashing
+TEST(BSONElementHasher, CodeWithScopeSquashesScopeIntsAndDoubles) {
+ int seed = 0;
+
+ BSONObjBuilder b1;
+ b1.appendCodeWScope("a", "print('this is some stupid code')", BSON("a" << 3));
+ BSONObj p10 = b1.obj();
+
+ BSONObjBuilder b2;
+ b2.appendCodeWScope("a", "print('this is some stupid code')", BSON("a" << 3.1));
+
+ BSONObjBuilder b3;
+ b3.appendCodeWScope("a", "print('this is \nsome stupider code')", BSON("a" << 3));
+ ASSERT_EQUALS(BSONElementHasher::hash64(p10.firstElement(), seed),
+ BSONElementHasher::hash64(b2.obj().firstElement(), seed));
+ ASSERT_NOT_EQUALS(BSONElementHasher::hash64(p10.firstElement(), seed),
+ BSONElementHasher::hash64(b3.obj().firstElement(), seed));
+}
+
+// Test some recursive squashing
+TEST(BSONElementHasher, RecursiveSquashingIntsAndDoubles) {
+ ASSERT_EQUALS(hashIt(fromjson("{x : {a : 3 , b : [ 3.1, {c : 3 }]}}")),
+ hashIt(fromjson("{x : {a : 3.1 , b : [ 3, {c : 3.0}]}}")));
+}
+
+// Test minkey and maxkey don't hash to same thing
+TEST(BSONElementHasher, MinKeyMaxKeyHashesDiffer) {
+ ASSERT_NOT_EQUALS(hashIt(BSON("a" << MAXKEY)), hashIt(BSON("a" << MINKEY)));
+}
+
+// Test squashing very large doubles and very small doubles
+TEST(BSONElementHasher, VeryLargeAndSmallDoubles) {
+ long long maxInt = std::numeric_limits<long long>::max();
+ double smallerDouble = maxInt / 2;
+ double biggerDouble = ((double)maxInt) * ((double)maxInt);
+ ASSERT_NOT_EQUALS(hashIt(BSON("a" << maxInt)), hashIt(BSON("a" << smallerDouble)));
+ ASSERT_EQUALS(hashIt(BSON("a" << maxInt)), hashIt(BSON("a" << biggerDouble)));
+
+ long long minInt = std::numeric_limits<long long>::min();
+ double negativeDouble = -((double)maxInt) * ((double)maxInt);
+ ASSERT_EQUALS(hashIt(BSON("a" << minInt)), hashIt(BSON("a" << negativeDouble)));
+}
+
+// Remaining tests are hard-coded checks to ensure the hash function is
+// consistent across platforms and server versions
+//
+// All of the values in the remaining tests have been determined experimentally.
+TEST(BSONElementHasher, HashIntOrLongOrDouble) {
+ BSONObj o = BSON("check" << 42);
+ ASSERT_EQUALS(hashIt(o), -944302157085130861LL);
+ o = BSON("check" << 42.123);
+ ASSERT_EQUALS(hashIt(o), -944302157085130861LL);
+ o = BSON("check" << (long long)42);
+ ASSERT_EQUALS(hashIt(o), -944302157085130861LL);
+
+ o = BSON("check" << 0);
+ ASSERT_EQUALS(hashIt(o), 4854801880128277513LL);
+ o = BSON("check" << 0.456);
+ ASSERT_EQUALS(hashIt(o), 4854801880128277513LL);
+ o = BSON("check" << (long long)0);
+ ASSERT_EQUALS(hashIt(o), 4854801880128277513LL);
+ // NAN is treated as zero.
+ o = BSON("check" << std::numeric_limits<double>::signaling_NaN());
+ ASSERT_EQUALS(hashIt(o), 4854801880128277513LL);
+ o = BSON("check" << std::numeric_limits<double>::quiet_NaN());
+ ASSERT_EQUALS(hashIt(o), 4854801880128277513LL);
+
+ o = BSON("check" << 1);
+ ASSERT_EQUALS(hashIt(o), 5902408780260971510LL);
+ o = BSON("check" << 1.987);
+ ASSERT_EQUALS(hashIt(o), 5902408780260971510LL);
+ o = BSON("check" << (long long)1);
+ ASSERT_EQUALS(hashIt(o), 5902408780260971510LL);
+
+ o = BSON("check" << -1);
+ ASSERT_EQUALS(hashIt(o), 1140205862565771219LL);
+ o = BSON("check" << -1.789);
+ ASSERT_EQUALS(hashIt(o), 1140205862565771219LL);
+ o = BSON("check" << (long long)-1);
+ ASSERT_EQUALS(hashIt(o), 1140205862565771219LL);
+
+ o = BSON("check" << std::numeric_limits<int>::min());
+ ASSERT_EQUALS(hashIt(o), 6165898260261354870LL);
+ o = BSON("check" << (double)std::numeric_limits<int>::min());
+ ASSERT_EQUALS(hashIt(o), 6165898260261354870LL);
+ o = BSON("check" << (long long)std::numeric_limits<int>::min());
+ ASSERT_EQUALS(hashIt(o), 6165898260261354870LL);
+
+ o = BSON("check" << std::numeric_limits<int>::max());
+ ASSERT_EQUALS(hashIt(o), 1143184177162245883LL);
+ o = BSON("check" << (double)std::numeric_limits<int>::max());
+ ASSERT_EQUALS(hashIt(o), 1143184177162245883LL);
+ o = BSON("check" << (long long)std::numeric_limits<int>::max());
+ ASSERT_EQUALS(hashIt(o), 1143184177162245883LL);
+
+ // Large/small double values.
+ ASSERT(std::numeric_limits<long long>::max() < std::numeric_limits<double>::max());
+ o = BSON("check" << std::numeric_limits<double>::max());
+ ASSERT_EQUALS(hashIt(o), 921523596458303250LL);
+ o = BSON("check" << std::numeric_limits<long long>::max()); // 9223372036854775807
+ ASSERT_EQUALS(hashIt(o), 921523596458303250LL);
+
+ // Have to create our own small double.
+ // std::numeric_limits<double>::lowest() - Not available until C++11
+ // std::numeric_limits<double>::min() - Closest positive value to zero, not most negative.
+ double smallDouble = -std::numeric_limits<double>::max();
+ ASSERT(smallDouble < static_cast<double>(std::numeric_limits<long long>::min()));
+ o = BSON("check" << smallDouble);
+ ASSERT_EQUALS(hashIt(o), 4532067210535695462LL);
+ o = BSON("check" << std::numeric_limits<long long>::min()); // -9223372036854775808
+ ASSERT_EQUALS(hashIt(o), 4532067210535695462LL);
+}
+
+TEST(BSONElementHasher, HashMinKey) {
+ BSONObj o = BSON("check" << MINKEY);
+ ASSERT_EQUALS(hashIt(o), 7961148599568647290LL);
+}
+
+TEST(BSONElementHasher, HashMaxKey) {
+ BSONObj o = BSON("check" << MAXKEY);
+ ASSERT_EQUALS(hashIt(o), 5504842513779440750LL);
+}
+
+TEST(BSONElementHasher, HashUndefined) {
+ BSONObj o = BSON("check" << BSONUndefined);
+ ASSERT_EQUALS(hashIt(o), 40158834000849533LL);
+}
+
+TEST(BSONElementHasher, HashNull) {
+ BSONObj o = BSON("check" << BSONNULL);
+ ASSERT_EQUALS(hashIt(o), 2338878944348059895LL);
+}
+
+TEST(BSONElementHasher, HashString) {
+ BSONObj o = BSON("check"
+ << "abc");
+ ASSERT_EQUALS(hashIt(o), 8478485326885698097LL);
+ o = BSON("check" << BSONSymbol("abc"));
+ ASSERT_EQUALS(hashIt(o), 8478485326885698097LL);
+
+ o = BSON("check"
+ << "");
+ ASSERT_EQUALS(hashIt(o), 2049396243249673340LL);
+ o = BSON("check" << BSONSymbol(""));
+ ASSERT_EQUALS(hashIt(o), 2049396243249673340LL);
+}
+
+TEST(BSONElementHasher, HashObject) {
+ BSONObj o = BSON("check" << BSON("a"
+ << "abc"
+ << "b" << 123LL));
+ ASSERT_EQUALS(hashIt(o), 4771603801758380216LL);
+
+ o = BSON("check" << BSONObj());
+ ASSERT_EQUALS(hashIt(o), 7980500913326740417LL);
+}
+
+TEST(BSONElementHasher, HashArray) {
+ BSONObj o = BSON("check" << BSON_ARRAY("bar"
+ << "baz"
+ << "qux"));
+ ASSERT_EQUALS(hashIt(o), -2938911267422831539LL);
+
+ o = BSON("check" << BSONArray());
+ ASSERT_EQUALS(hashIt(o), 8849948234993459283LL);
+}
+
+TEST(BSONElementHasher, HashBinary) {
+ uint8_t bytes[] = {0, 1, 2, 3, 4, 6};
+ BSONObj o = BSON("check" << BSONBinData(bytes, 6, BinDataGeneral));
+ ASSERT_EQUALS(hashIt(o), 7252465090394235301LL);
+
+ o = BSON("check" << BSONBinData(bytes, 6, bdtCustom));
+ ASSERT_EQUALS(hashIt(o), 5736670452907618262LL);
+
+ uint8_t uuidBytes[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+ o = BSON("check" << BSONBinData(uuidBytes, 16, newUUID));
+ ASSERT_EQUALS(hashIt(o), 6084661258071355978LL);
+}
+
+TEST(BSONElementHasher, HashObjectId) {
+ BSONObj o = BSON("check" << OID("010203040506070809101112"));
+ ASSERT_EQUALS(hashIt(o), -5588663249627035708LL);
+
+ o = BSON("check" << OID("000000000000000000000000"));
+ ASSERT_EQUALS(hashIt(o), -4293118519463489418LL);
+}
+
+TEST(BSONElementHasher, HashBoolean) {
+ BSONObj o = BSON("check" << true);
+ ASSERT_EQUALS(hashIt(o), 6405873908747105701LL);
+
+ o = BSON("check" << false);
+ ASSERT_EQUALS(hashIt(o), 6289544573401934092LL);
+}
+
+TEST(BSONElementHasher, HashTimeStamp) {
+ BSONObjBuilder builder1;
+ BSONObjBuilder builder2;
+
+ BSONObj o = BSON("check" << Date_t::fromMillisSinceEpoch(0x5566778811223344LL));
+ ASSERT_EQUALS(hashIt(o), 4476222765095560467LL);
+ o = builder1.append("check", Timestamp(0x55667788LL, 0x11223344LL)).obj();
+ ASSERT_EQUALS(hashIt(o), 4873046866288452390LL);
+
+ o = BSON("check" << Date_t());
+ ASSERT_EQUALS(hashIt(o), -1178696894582842035LL);
+ o = builder2.appendTimestamp("check", 0).obj();
+ ASSERT_EQUALS(hashIt(o), -7867208682377458672LL);
+}
+
+TEST(BSONElementHasher, HashRegEx) {
+ BSONObj o = BSON("check" << BSONRegEx("mongodb"));
+ ASSERT_EQUALS(hashIt(o), -7275792090268217043LL);
+
+ o = BSON("check" << BSONRegEx(".*", "i"));
+ ASSERT_EQUALS(hashIt(o), 7095855029187981886LL);
+}
+
+TEST(BSONElementHasher, HashDBRef) {
+ BSONObj o = BSON("check" << BSONDBRef("c", OID("010203040506070809101112")));
+ ASSERT_EQUALS(hashIt(o), 940175826736461384LL);
+
+ o = BSON("check" << BSONDBRef("db.c", OID("010203040506070809101112")));
+ ASSERT_EQUALS(hashIt(o), 2426768198104018194LL);
+}
+
+TEST(BSONElementHasher, HashCode) {
+ BSONObj o = BSON("check" << BSONCode("func f() { return 1; }"));
+ ASSERT_EQUALS(hashIt(o), 6861638109178014270LL);
+}
+
+TEST(BSONElementHasher, HashCodeWScope) {
+ BSONObj o = BSON("check" << BSONCodeWScope("func f() { return 1; }", BSON("c" << true)));
+ ASSERT_EQUALS(hashIt(o), 501342939894575968LL);
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/index/2d_access_method.cpp b/src/mongo/db/index/2d_access_method.cpp
index db421cb0ffe..531df3369b1 100644
--- a/src/mongo/db/index/2d_access_method.cpp
+++ b/src/mongo/db/index/2d_access_method.cpp
@@ -39,23 +39,21 @@
namespace mongo {
- TwoDAccessMethod::TwoDAccessMethod(IndexCatalogEntry* btreeState,
- SortedDataInterface* btree)
- : IndexAccessMethod(btreeState, btree) {
-
- const IndexDescriptor* descriptor = btreeState->descriptor();
-
- ExpressionParams::parseTwoDParams(descriptor->infoObj(), &_params);
- }
-
- /** Finds the key objects to put in an index */
- void TwoDAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
- ExpressionKeysPrivate::get2DKeys(obj, _params, keys, NULL);
- }
-
- /** Finds all locations in a geo-indexed object */
- void TwoDAccessMethod::getKeys(const BSONObj& obj, vector<BSONObj>& locs) const {
- ExpressionKeysPrivate::get2DKeys(obj, _params, NULL, &locs);
- }
+TwoDAccessMethod::TwoDAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
+ : IndexAccessMethod(btreeState, btree) {
+ const IndexDescriptor* descriptor = btreeState->descriptor();
+
+ ExpressionParams::parseTwoDParams(descriptor->infoObj(), &_params);
+}
+
+/** Finds the key objects to put in an index */
+void TwoDAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ ExpressionKeysPrivate::get2DKeys(obj, _params, keys, NULL);
+}
+
+/** Finds all locations in a geo-indexed object */
+void TwoDAccessMethod::getKeys(const BSONObj& obj, vector<BSONObj>& locs) const {
+ ExpressionKeysPrivate::get2DKeys(obj, _params, NULL, &locs);
+}
} // namespace mongo
diff --git a/src/mongo/db/index/2d_access_method.h b/src/mongo/db/index/2d_access_method.h
index 208df9235cd..644b6addc5b 100644
--- a/src/mongo/db/index/2d_access_method.h
+++ b/src/mongo/db/index/2d_access_method.h
@@ -35,25 +35,28 @@
namespace mongo {
- class IndexCatalogEntry;
- class IndexDescriptor;
- struct TwoDIndexingParams;
+class IndexCatalogEntry;
+class IndexDescriptor;
+struct TwoDIndexingParams;
- class TwoDAccessMethod : public IndexAccessMethod {
- public:
- TwoDAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+class TwoDAccessMethod : public IndexAccessMethod {
+public:
+ TwoDAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
- private:
+private:
+ const IndexDescriptor* getDescriptor() {
+ return _descriptor;
+ }
+ TwoDIndexingParams& getParams() {
+ return _params;
+ }
- const IndexDescriptor* getDescriptor() { return _descriptor; }
- TwoDIndexingParams& getParams() { return _params; }
+ // This really gets the 'locs' from the provided obj.
+ void getKeys(const BSONObj& obj, std::vector<BSONObj>& locs) const;
- // This really gets the 'locs' from the provided obj.
- void getKeys(const BSONObj& obj, std::vector<BSONObj>& locs) const;
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
- virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
-
- TwoDIndexingParams _params;
- };
+ TwoDIndexingParams _params;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/2d_common.h b/src/mongo/db/index/2d_common.h
index d30b8cd07df..e3ed231f9a4 100644
--- a/src/mongo/db/index/2d_common.h
+++ b/src/mongo/db/index/2d_common.h
@@ -35,10 +35,10 @@
namespace mongo {
- struct TwoDIndexingParams {
- std::string geo;
- std::vector<std::pair<std::string, int> > other;
- std::shared_ptr<GeoHashConverter> geoHashConverter;
- };
+struct TwoDIndexingParams {
+ std::string geo;
+ std::vector<std::pair<std::string, int>> other;
+ std::shared_ptr<GeoHashConverter> geoHashConverter;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/btree_access_method.cpp b/src/mongo/db/index/btree_access_method.cpp
index 7bb294342c8..6bc97956fe8 100644
--- a/src/mongo/db/index/btree_access_method.cpp
+++ b/src/mongo/db/index/btree_access_method.cpp
@@ -36,36 +36,33 @@
namespace mongo {
- using std::vector;
+using std::vector;
- // Standard Btree implementation below.
- BtreeAccessMethod::BtreeAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree )
- : IndexAccessMethod(btreeState, btree) {
+// Standard Btree implementation below.
+BtreeAccessMethod::BtreeAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
+ : IndexAccessMethod(btreeState, btree) {
+ // The key generation wants these values.
+ vector<const char*> fieldNames;
+ vector<BSONElement> fixed;
- // The key generation wants these values.
- vector<const char*> fieldNames;
- vector<BSONElement> fixed;
-
- BSONObjIterator it(_descriptor->keyPattern());
- while (it.more()) {
- BSONElement elt = it.next();
- fieldNames.push_back(elt.fieldName());
- fixed.push_back(BSONElement());
- }
-
- if (0 == _descriptor->version()) {
- _keyGenerator.reset(new BtreeKeyGeneratorV0(fieldNames, fixed,
- _descriptor->isSparse()));
- } else if (1 == _descriptor->version()) {
- _keyGenerator.reset(new BtreeKeyGeneratorV1(fieldNames, fixed,
- _descriptor->isSparse()));
- } else {
- massert(16745, "Invalid index version for key generation.", false );
- }
+ BSONObjIterator it(_descriptor->keyPattern());
+ while (it.more()) {
+ BSONElement elt = it.next();
+ fieldNames.push_back(elt.fieldName());
+ fixed.push_back(BSONElement());
}
- void BtreeAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
- _keyGenerator->getKeys(obj, keys);
+ if (0 == _descriptor->version()) {
+ _keyGenerator.reset(new BtreeKeyGeneratorV0(fieldNames, fixed, _descriptor->isSparse()));
+ } else if (1 == _descriptor->version()) {
+ _keyGenerator.reset(new BtreeKeyGeneratorV1(fieldNames, fixed, _descriptor->isSparse()));
+ } else {
+ massert(16745, "Invalid index version for key generation.", false);
}
+}
+
+void BtreeAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ _keyGenerator->getKeys(obj, keys);
+}
} // namespace mongo
diff --git a/src/mongo/db/index/btree_access_method.h b/src/mongo/db/index/btree_access_method.h
index 4133b41b892..4c20deeb931 100644
--- a/src/mongo/db/index/btree_access_method.h
+++ b/src/mongo/db/index/btree_access_method.h
@@ -37,21 +37,21 @@
namespace mongo {
- class IndexDescriptor;
-
- /**
- * The IndexAccessMethod for a Btree index.
- * Any index created with {field: 1} or {field: -1} uses this.
- */
- class BtreeAccessMethod : public IndexAccessMethod {
- public:
- BtreeAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree );
-
- private:
- virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
-
- // Our keys differ for V0 and V1.
- std::unique_ptr<BtreeKeyGenerator> _keyGenerator;
- };
+class IndexDescriptor;
+
+/**
+ * The IndexAccessMethod for a Btree index.
+ * Any index created with {field: 1} or {field: -1} uses this.
+ */
+class BtreeAccessMethod : public IndexAccessMethod {
+public:
+ BtreeAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+
+private:
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
+
+ // Our keys differ for V0 and V1.
+ std::unique_ptr<BtreeKeyGenerator> _keyGenerator;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/btree_key_generator.cpp b/src/mongo/db/index/btree_key_generator.cpp
index bccf688e468..e323272b044 100644
--- a/src/mongo/db/index/btree_key_generator.cpp
+++ b/src/mongo/db/index/btree_key_generator.cpp
@@ -32,361 +32,358 @@
namespace mongo {
- // SortStage checks for this error code in order to informatively error when we try to sort keys
- // with parallel arrays.
- const int BtreeKeyGenerator::ParallelArraysCode = 10088;
+// SortStage checks for this error code in order to informatively error when we try to sort keys
+// with parallel arrays.
+const int BtreeKeyGenerator::ParallelArraysCode = 10088;
namespace {
- const BSONObj nullObj = BSON("" << BSONNULL);
- const BSONElement nullElt = nullObj.firstElement();
- const BSONObj undefinedObj = BSON("" << BSONUndefined);
- const BSONElement undefinedElt = undefinedObj.firstElement();
+const BSONObj nullObj = BSON("" << BSONNULL);
+const BSONElement nullElt = nullObj.firstElement();
+const BSONObj undefinedObj = BSON("" << BSONUndefined);
+const BSONElement undefinedElt = undefinedObj.firstElement();
-} // namespace
+} // namespace
- BtreeKeyGenerator::BtreeKeyGenerator(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- bool isSparse)
- : _fieldNames(fieldNames), _isSparse(isSparse), _fixed(fixed) {
-
- BSONObjBuilder nullKeyBuilder;
- for (size_t i = 0; i < fieldNames.size(); ++i) {
- nullKeyBuilder.appendNull("");
- }
- _nullKey = nullKeyBuilder.obj();
-
- _isIdIndex = fieldNames.size() == 1 && std::string("_id") == fieldNames[0];
+BtreeKeyGenerator::BtreeKeyGenerator(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ bool isSparse)
+ : _fieldNames(fieldNames), _isSparse(isSparse), _fixed(fixed) {
+ BSONObjBuilder nullKeyBuilder;
+ for (size_t i = 0; i < fieldNames.size(); ++i) {
+ nullKeyBuilder.appendNull("");
}
+ _nullKey = nullKeyBuilder.obj();
- void BtreeKeyGenerator::getKeys(const BSONObj &obj, BSONObjSet *keys) const {
- if (_isIdIndex) {
- // we special case for speed
- BSONElement e = obj["_id"];
- if ( e.eoo() ) {
- keys->insert(_nullKey);
- }
- else {
- int size = e.size() + 5 /* bson over head*/ - 3 /* remove _id string */;
- BSONObjBuilder b(size);
- b.appendAs(e, "");
- keys->insert(b.obj());
- invariant(keys->begin()->objsize() == size);
- }
- return;
- }
+ _isIdIndex = fieldNames.size() == 1 && std::string("_id") == fieldNames[0];
+}
- // '_fieldNames' and '_fixed' are passed by value so that they can be mutated as part of the
- // getKeys call. :|
- getKeysImpl(_fieldNames, _fixed, obj, keys);
- if (keys->empty() && ! _isSparse) {
+void BtreeKeyGenerator::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ if (_isIdIndex) {
+ // we special case for speed
+ BSONElement e = obj["_id"];
+ if (e.eoo()) {
keys->insert(_nullKey);
+ } else {
+ int size = e.size() + 5 /* bson over head*/ - 3 /* remove _id string */;
+ BSONObjBuilder b(size);
+ b.appendAs(e, "");
+ keys->insert(b.obj());
+ invariant(keys->begin()->objsize() == size);
}
+ return;
}
- static void assertParallelArrays( const char *first, const char *second ) {
- std::stringstream ss;
- ss << "cannot index parallel arrays [" << first << "] [" << second << "]";
- uasserted( BtreeKeyGenerator::ParallelArraysCode , ss.str() );
+ // '_fieldNames' and '_fixed' are passed by value so that they can be mutated as part of the
+ // getKeys call. :|
+ getKeysImpl(_fieldNames, _fixed, obj, keys);
+ if (keys->empty() && !_isSparse) {
+ keys->insert(_nullKey);
}
+}
- BtreeKeyGeneratorV0::BtreeKeyGeneratorV0(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- bool isSparse)
- : BtreeKeyGenerator(fieldNames, fixed, isSparse) { }
-
- void BtreeKeyGeneratorV0::getKeysImpl(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj &obj,
- BSONObjSet *keys) const {
- BSONElement arrElt;
- unsigned arrIdx = ~0;
- unsigned numNotFound = 0;
-
- for ( unsigned i = 0; i < fieldNames.size(); ++i ) {
- if ( *fieldNames[ i ] == '\0' )
- continue;
+static void assertParallelArrays(const char* first, const char* second) {
+ std::stringstream ss;
+ ss << "cannot index parallel arrays [" << first << "] [" << second << "]";
+ uasserted(BtreeKeyGenerator::ParallelArraysCode, ss.str());
+}
- BSONElement e = obj.getFieldDottedOrArray( fieldNames[ i ] );
+BtreeKeyGeneratorV0::BtreeKeyGeneratorV0(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ bool isSparse)
+ : BtreeKeyGenerator(fieldNames, fixed, isSparse) {}
- if ( e.eoo() ) {
- e = nullElt; // no matching field
- numNotFound++;
- }
+void BtreeKeyGeneratorV0::getKeysImpl(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys) const {
+ BSONElement arrElt;
+ unsigned arrIdx = ~0;
+ unsigned numNotFound = 0;
- if ( e.type() != Array )
- fieldNames[ i ] = ""; // no matching field or non-array match
+ for (unsigned i = 0; i < fieldNames.size(); ++i) {
+ if (*fieldNames[i] == '\0')
+ continue;
- if ( *fieldNames[ i ] == '\0' )
- // no need for further object expansion (though array expansion still possible)
- fixed[ i ] = e;
+ BSONElement e = obj.getFieldDottedOrArray(fieldNames[i]);
- if ( e.type() == Array && arrElt.eoo() ) {
- // we only expand arrays on a single path -- track the path here
- arrIdx = i;
- arrElt = e;
- }
+ if (e.eoo()) {
+ e = nullElt; // no matching field
+ numNotFound++;
+ }
- // enforce single array path here
- if ( e.type() == Array && e.rawdata() != arrElt.rawdata() ) {
- assertParallelArrays( e.fieldName(), arrElt.fieldName() );
- }
+ if (e.type() != Array)
+ fieldNames[i] = ""; // no matching field or non-array match
+
+ if (*fieldNames[i] == '\0')
+ // no need for further object expansion (though array expansion still possible)
+ fixed[i] = e;
+
+ if (e.type() == Array && arrElt.eoo()) {
+ // we only expand arrays on a single path -- track the path here
+ arrIdx = i;
+ arrElt = e;
}
- bool allFound = true; // have we found elements for all field names in the key spec?
- for (std::vector<const char*>::const_iterator i = fieldNames.begin(); i != fieldNames.end();
- ++i ) {
- if ( **i != '\0' ) {
- allFound = false;
- break;
- }
+ // enforce single array path here
+ if (e.type() == Array && e.rawdata() != arrElt.rawdata()) {
+ assertParallelArrays(e.fieldName(), arrElt.fieldName());
}
+ }
- if ( _isSparse && numNotFound == _fieldNames.size()) {
- // we didn't find any fields
- // so we're not going to index this document
- return;
+ bool allFound = true; // have we found elements for all field names in the key spec?
+ for (std::vector<const char*>::const_iterator i = fieldNames.begin(); i != fieldNames.end();
+ ++i) {
+ if (**i != '\0') {
+ allFound = false;
+ break;
}
+ }
- bool insertArrayNull = false;
+ if (_isSparse && numNotFound == _fieldNames.size()) {
+ // we didn't find any fields
+ // so we're not going to index this document
+ return;
+ }
- if ( allFound ) {
- if ( arrElt.eoo() ) {
- // no terminal array element to expand
- BSONObjBuilder b(_sizeTracker);
- for (std::vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i)
- b.appendAs( *i, "" );
- keys->insert( b.obj() );
- }
- else {
- // terminal array element to expand, so generate all keys
- BSONObjIterator i( arrElt.embeddedObject() );
- if ( i.more() ) {
- while (i.more()) {
- BSONObjBuilder b(_sizeTracker);
- for (unsigned j = 0; j < fixed.size(); ++j) {
- if ( j == arrIdx )
- b.appendAs( i.next(), "" );
- else
- b.appendAs( fixed[ j ], "" );
- }
- keys->insert( b.obj() );
- }
- }
- else if ( fixed.size() > 1 ) {
- insertArrayNull = true;
- }
- }
- }
- else {
- // nonterminal array element to expand, so recurse
- verify( !arrElt.eoo() );
- BSONObjIterator i( arrElt.embeddedObject() );
- if ( i.more() ) {
+ bool insertArrayNull = false;
+
+ if (allFound) {
+ if (arrElt.eoo()) {
+ // no terminal array element to expand
+ BSONObjBuilder b(_sizeTracker);
+ for (std::vector<BSONElement>::iterator i = fixed.begin(); i != fixed.end(); ++i)
+ b.appendAs(*i, "");
+ keys->insert(b.obj());
+ } else {
+ // terminal array element to expand, so generate all keys
+ BSONObjIterator i(arrElt.embeddedObject());
+ if (i.more()) {
while (i.more()) {
- BSONElement e = i.next();
- if ( e.type() == Object ) {
- getKeysImpl( fieldNames, fixed, e.embeddedObject(), keys );
+ BSONObjBuilder b(_sizeTracker);
+ for (unsigned j = 0; j < fixed.size(); ++j) {
+ if (j == arrIdx)
+ b.appendAs(i.next(), "");
+ else
+ b.appendAs(fixed[j], "");
}
+ keys->insert(b.obj());
}
- }
- else {
+ } else if (fixed.size() > 1) {
insertArrayNull = true;
}
}
-
- if ( insertArrayNull ) {
- // x : [] - need to insert undefined
- BSONObjBuilder b(_sizeTracker);
- for (unsigned j = 0; j < fixed.size(); ++j) {
- if ( j == arrIdx ) {
- b.appendUndefined( "" );
- }
- else {
- BSONElement e = fixed[j];
- if ( e.eoo() )
- b.appendNull( "" );
- else
- b.appendAs( e , "" );
+ } else {
+ // nonterminal array element to expand, so recurse
+ verify(!arrElt.eoo());
+ BSONObjIterator i(arrElt.embeddedObject());
+ if (i.more()) {
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() == Object) {
+ getKeysImpl(fieldNames, fixed, e.embeddedObject(), keys);
}
}
- keys->insert( b.obj() );
+ } else {
+ insertArrayNull = true;
}
}
- BtreeKeyGeneratorV1::BtreeKeyGeneratorV1(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- bool isSparse)
- : BtreeKeyGenerator(fieldNames, fixed, isSparse),
- _emptyPositionalInfo(fieldNames.size()) {
- }
-
- BSONElement BtreeKeyGeneratorV1::extractNextElement(const BSONObj &obj,
- const PositionalPathInfo& positionalInfo,
- const char** field,
- bool* arrayNestedArray) const {
- std::string firstField = mongoutils::str::before(*field, '.');
- bool haveObjField = !obj.getField(firstField).eoo();
- BSONElement arrField = positionalInfo.positionallyIndexedElt;
-
- // An index component field name cannot exist in both a document
- // array and one of that array's children.
- uassert(16746,
- mongoutils::str::stream() <<
- "Ambiguous field name found in array (do not use numeric field names in "
- "embedded elements in an array), field: '" << arrField.fieldName() <<
- "' for array: " << positionalInfo.arrayObj,
- !haveObjField || !positionalInfo.hasPositionallyIndexedElt());
-
- *arrayNestedArray = false;
- if ( haveObjField ) {
- return obj.getFieldDottedOrArray(*field);
- }
- else if (positionalInfo.hasPositionallyIndexedElt()) {
- if ( arrField.type() == Array ) {
- *arrayNestedArray = true;
+ if (insertArrayNull) {
+ // x : [] - need to insert undefined
+ BSONObjBuilder b(_sizeTracker);
+ for (unsigned j = 0; j < fixed.size(); ++j) {
+ if (j == arrIdx) {
+ b.appendUndefined("");
+ } else {
+ BSONElement e = fixed[j];
+ if (e.eoo())
+ b.appendNull("");
+ else
+ b.appendAs(e, "");
}
- *field = positionalInfo.remainingPath;
- return positionalInfo.dottedElt;
}
- return BSONElement();
+ keys->insert(b.obj());
}
+}
- void BtreeKeyGeneratorV1::_getKeysArrEltFixed(
- std::vector<const char*>* fieldNames,
- std::vector<BSONElement>* fixed,
- const BSONElement& arrEntry,
- BSONObjSet* keys,
- unsigned numNotFound,
- const BSONElement& arrObjElt,
- const std::set<unsigned>& arrIdxs,
- bool mayExpandArrayUnembedded,
- const std::vector<PositionalPathInfo>& positionalInfo) const {
- // Set up any terminal array values.
- for (std::set<unsigned>::const_iterator j = arrIdxs.begin(); j != arrIdxs.end(); ++j) {
- unsigned idx = *j;
- if (*(*fieldNames)[idx] == '\0') {
- (*fixed)[idx] = mayExpandArrayUnembedded ? arrEntry : arrObjElt;
- }
+BtreeKeyGeneratorV1::BtreeKeyGeneratorV1(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ bool isSparse)
+ : BtreeKeyGenerator(fieldNames, fixed, isSparse), _emptyPositionalInfo(fieldNames.size()) {}
+
+BSONElement BtreeKeyGeneratorV1::extractNextElement(const BSONObj& obj,
+ const PositionalPathInfo& positionalInfo,
+ const char** field,
+ bool* arrayNestedArray) const {
+ std::string firstField = mongoutils::str::before(*field, '.');
+ bool haveObjField = !obj.getField(firstField).eoo();
+ BSONElement arrField = positionalInfo.positionallyIndexedElt;
+
+ // An index component field name cannot exist in both a document
+ // array and one of that array's children.
+ uassert(16746,
+ mongoutils::str::stream()
+ << "Ambiguous field name found in array (do not use numeric field names in "
+ "embedded elements in an array), field: '" << arrField.fieldName()
+ << "' for array: " << positionalInfo.arrayObj,
+ !haveObjField || !positionalInfo.hasPositionallyIndexedElt());
+
+ *arrayNestedArray = false;
+ if (haveObjField) {
+ return obj.getFieldDottedOrArray(*field);
+ } else if (positionalInfo.hasPositionallyIndexedElt()) {
+ if (arrField.type() == Array) {
+ *arrayNestedArray = true;
}
-
- // Recurse.
- getKeysImplWithArray(*fieldNames,
- *fixed,
- arrEntry.type() == Object ? arrEntry.embeddedObject() : BSONObj(),
- keys,
- numNotFound,
- positionalInfo);
+ *field = positionalInfo.remainingPath;
+ return positionalInfo.dottedElt;
}
-
- void BtreeKeyGeneratorV1::getKeysImpl(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj& obj,
- BSONObjSet* keys) const {
- getKeysImplWithArray(fieldNames, fixed, obj, keys, 0, _emptyPositionalInfo);
+ return BSONElement();
+}
+
+void BtreeKeyGeneratorV1::_getKeysArrEltFixed(
+ std::vector<const char*>* fieldNames,
+ std::vector<BSONElement>* fixed,
+ const BSONElement& arrEntry,
+ BSONObjSet* keys,
+ unsigned numNotFound,
+ const BSONElement& arrObjElt,
+ const std::set<unsigned>& arrIdxs,
+ bool mayExpandArrayUnembedded,
+ const std::vector<PositionalPathInfo>& positionalInfo) const {
+ // Set up any terminal array values.
+ for (std::set<unsigned>::const_iterator j = arrIdxs.begin(); j != arrIdxs.end(); ++j) {
+ unsigned idx = *j;
+ if (*(*fieldNames)[idx] == '\0') {
+ (*fixed)[idx] = mayExpandArrayUnembedded ? arrEntry : arrObjElt;
+ }
}
- void BtreeKeyGeneratorV1::getKeysImplWithArray(
- std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj& obj,
- BSONObjSet* keys,
- unsigned numNotFound,
- const std::vector<PositionalPathInfo>& positionalInfo) const {
- BSONElement arrElt;
- std::set<unsigned> arrIdxs;
- bool mayExpandArrayUnembedded = true;
- for (unsigned i = 0; i < fieldNames.size(); ++i) {
- if ( *fieldNames[ i ] == '\0' ) {
- continue;
- }
+ // Recurse.
+ getKeysImplWithArray(*fieldNames,
+ *fixed,
+ arrEntry.type() == Object ? arrEntry.embeddedObject() : BSONObj(),
+ keys,
+ numNotFound,
+ positionalInfo);
+}
+
+void BtreeKeyGeneratorV1::getKeysImpl(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys) const {
+ getKeysImplWithArray(fieldNames, fixed, obj, keys, 0, _emptyPositionalInfo);
+}
+
+void BtreeKeyGeneratorV1::getKeysImplWithArray(
+ std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys,
+ unsigned numNotFound,
+ const std::vector<PositionalPathInfo>& positionalInfo) const {
+ BSONElement arrElt;
+ std::set<unsigned> arrIdxs;
+ bool mayExpandArrayUnembedded = true;
+ for (unsigned i = 0; i < fieldNames.size(); ++i) {
+ if (*fieldNames[i] == '\0') {
+ continue;
+ }
- bool arrayNestedArray;
- // Extract element matching fieldName[ i ] from object xor array.
- BSONElement e = extractNextElement(obj,
- positionalInfo[i],
- &fieldNames[i],
- &arrayNestedArray);
-
- if ( e.eoo() ) {
- // if field not present, set to null
- fixed[ i ] = nullElt;
- // done expanding this field name
- fieldNames[ i ] = "";
- numNotFound++;
- }
- else if ( e.type() == Array ) {
- arrIdxs.insert( i );
- if ( arrElt.eoo() ) {
- // we only expand arrays on a single path -- track the path here
- arrElt = e;
- }
- else if ( e.rawdata() != arrElt.rawdata() ) {
- // enforce single array path here
- assertParallelArrays( e.fieldName(), arrElt.fieldName() );
- }
- if ( arrayNestedArray ) {
- mayExpandArrayUnembedded = false;
- }
+ bool arrayNestedArray;
+ // Extract element matching fieldName[ i ] from object xor array.
+ BSONElement e =
+ extractNextElement(obj, positionalInfo[i], &fieldNames[i], &arrayNestedArray);
+
+ if (e.eoo()) {
+ // if field not present, set to null
+ fixed[i] = nullElt;
+ // done expanding this field name
+ fieldNames[i] = "";
+ numNotFound++;
+ } else if (e.type() == Array) {
+ arrIdxs.insert(i);
+ if (arrElt.eoo()) {
+ // we only expand arrays on a single path -- track the path here
+ arrElt = e;
+ } else if (e.rawdata() != arrElt.rawdata()) {
+ // enforce single array path here
+ assertParallelArrays(e.fieldName(), arrElt.fieldName());
}
- else {
- // not an array - no need for further expansion
- fixed[ i ] = e;
+ if (arrayNestedArray) {
+ mayExpandArrayUnembedded = false;
}
+ } else {
+ // not an array - no need for further expansion
+ fixed[i] = e;
}
+ }
- if ( arrElt.eoo() ) {
- // No array, so generate a single key.
- if ( _isSparse && numNotFound == fieldNames.size()) {
- return;
- }
- BSONObjBuilder b(_sizeTracker);
- for (std::vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i) {
- b.appendAs( *i, "" );
- }
- keys->insert( b.obj() );
+ if (arrElt.eoo()) {
+ // No array, so generate a single key.
+ if (_isSparse && numNotFound == fieldNames.size()) {
+ return;
}
- else if ( arrElt.embeddedObject().firstElement().eoo() ) {
- // Empty array, so set matching fields to undefined.
- _getKeysArrEltFixed(&fieldNames, &fixed, undefinedElt, keys, numNotFound, arrElt,
- arrIdxs, true, _emptyPositionalInfo);
+ BSONObjBuilder b(_sizeTracker);
+ for (std::vector<BSONElement>::iterator i = fixed.begin(); i != fixed.end(); ++i) {
+ b.appendAs(*i, "");
}
- else {
- BSONObj arrObj = arrElt.embeddedObject();
-
- // For positional key patterns, e.g. {'a.1.b': 1}, we lookup the indexed array element
- // and then traverse the remainder of the field path up front. This prevents us from
- // having to look up the indexed element again on each recursive call (i.e. once per
- // array element).
- std::vector<PositionalPathInfo> subPositionalInfo(fixed.size());
- for (size_t i = 0; i < fieldNames.size(); ++i) {
- if (*fieldNames[i] == '\0') {
- // We've reached the end of the path.
- continue;
- }
-
- StringData part = fieldNames[i];
- part = part.substr(0, part.find('.'));
- subPositionalInfo[i].positionallyIndexedElt = arrObj[part];
- if (subPositionalInfo[i].positionallyIndexedElt.eoo()) {
- // Not indexing an array by position.
- continue;
- }
-
- // We're indexing an array element by its position. Traverse the remainder of the
- // field path now.
- subPositionalInfo[i].arrayObj = arrObj;
- subPositionalInfo[i].remainingPath = fieldNames[i];
- subPositionalInfo[i].dottedElt =
- arrObj.getFieldDottedOrArray(subPositionalInfo[i].remainingPath);
+ keys->insert(b.obj());
+ } else if (arrElt.embeddedObject().firstElement().eoo()) {
+ // Empty array, so set matching fields to undefined.
+ _getKeysArrEltFixed(&fieldNames,
+ &fixed,
+ undefinedElt,
+ keys,
+ numNotFound,
+ arrElt,
+ arrIdxs,
+ true,
+ _emptyPositionalInfo);
+ } else {
+ BSONObj arrObj = arrElt.embeddedObject();
+
+ // For positional key patterns, e.g. {'a.1.b': 1}, we lookup the indexed array element
+ // and then traverse the remainder of the field path up front. This prevents us from
+ // having to look up the indexed element again on each recursive call (i.e. once per
+ // array element).
+ std::vector<PositionalPathInfo> subPositionalInfo(fixed.size());
+ for (size_t i = 0; i < fieldNames.size(); ++i) {
+ if (*fieldNames[i] == '\0') {
+ // We've reached the end of the path.
+ continue;
}
- // Generate a key for each element of the indexed array.
- BSONObjIterator i(arrObj);
- while (i.more()) {
- _getKeysArrEltFixed(&fieldNames, &fixed, i.next(), keys, numNotFound, arrElt,
- arrIdxs, mayExpandArrayUnembedded, subPositionalInfo);
+ StringData part = fieldNames[i];
+ part = part.substr(0, part.find('.'));
+ subPositionalInfo[i].positionallyIndexedElt = arrObj[part];
+ if (subPositionalInfo[i].positionallyIndexedElt.eoo()) {
+ // Not indexing an array by position.
+ continue;
}
+
+ // We're indexing an array element by its position. Traverse the remainder of the
+ // field path now.
+ subPositionalInfo[i].arrayObj = arrObj;
+ subPositionalInfo[i].remainingPath = fieldNames[i];
+ subPositionalInfo[i].dottedElt =
+ arrObj.getFieldDottedOrArray(subPositionalInfo[i].remainingPath);
+ }
+
+ // Generate a key for each element of the indexed array.
+ BSONObjIterator i(arrObj);
+ while (i.more()) {
+ _getKeysArrEltFixed(&fieldNames,
+ &fixed,
+ i.next(),
+ keys,
+ numNotFound,
+ arrElt,
+ arrIdxs,
+ mayExpandArrayUnembedded,
+ subPositionalInfo);
}
}
+}
} // namespace mongo
diff --git a/src/mongo/db/index/btree_key_generator.h b/src/mongo/db/index/btree_key_generator.h
index fe376d7f32b..cb156354b51 100644
--- a/src/mongo/db/index/btree_key_generator.h
+++ b/src/mongo/db/index/btree_key_generator.h
@@ -34,192 +34,194 @@
namespace mongo {
+/**
+ * Internal class used by BtreeAccessMethod to generate keys for indexed documents.
+ * This class is meant to be kept under the index access layer.
+ */
+class BtreeKeyGenerator {
+public:
+ BtreeKeyGenerator(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ bool isSparse);
+
+ virtual ~BtreeKeyGenerator() {}
+
+ void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
+
+ static const int ParallelArraysCode;
+
+protected:
+ // These are used by the getKeysImpl(s) below.
+ std::vector<const char*> _fieldNames;
+ bool _isIdIndex;
+ bool _isSparse;
+ BSONObj _nullKey; // a full key with all fields null
+ BSONSizeTracker _sizeTracker;
+
+private:
+ // We have V0 and V1. Sigh.
+ virtual void getKeysImpl(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys) const = 0;
+
+ std::vector<BSONElement> _fixed;
+};
+
+class BtreeKeyGeneratorV0 : public BtreeKeyGenerator {
+public:
+ BtreeKeyGeneratorV0(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ bool isSparse);
+
+ virtual ~BtreeKeyGeneratorV0() {}
+
+private:
+ virtual void getKeysImpl(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys) const;
+};
+
+class BtreeKeyGeneratorV1 : public BtreeKeyGenerator {
+public:
+ BtreeKeyGeneratorV1(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ bool isSparse);
+
+ virtual ~BtreeKeyGeneratorV1() {}
+
+private:
/**
- * Internal class used by BtreeAccessMethod to generate keys for indexed documents.
- * This class is meant to be kept under the index access layer.
+ * Stores info regarding traversal of a positional path. A path through a document is
+ * considered positional if this path element names an array element. Generally this means
+ * that the field name consists of [0-9]+, but the implementation just calls .Obj() on
+ * the array and looks for the named field. This logic happens even if the field does
+ * not match [0-9]+.
+ *
+ * Example:
+ * The path 'a.1.b' can sometimes be positional due to path element '1'. In the document
+ * {a: [{b: 98}, {b: 99}]} it would be considered positional, and would refer to
+ * element 99. In the document {a: [{'1': {b: 97}}]}, the path is *not* considered
+ * positional and would refer to element 97.
*/
- class BtreeKeyGenerator {
- public:
- BtreeKeyGenerator(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- bool isSparse);
-
- virtual ~BtreeKeyGenerator() { }
-
- void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
-
- static const int ParallelArraysCode;
-
- protected:
- // These are used by the getKeysImpl(s) below.
- std::vector<const char*> _fieldNames;
- bool _isIdIndex;
- bool _isSparse;
- BSONObj _nullKey; // a full key with all fields null
- BSONSizeTracker _sizeTracker;
-
- private:
- // We have V0 and V1. Sigh.
- virtual void getKeysImpl(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj& obj,
- BSONObjSet* keys) const = 0;
-
- std::vector<BSONElement> _fixed;
+ struct PositionalPathInfo {
+ PositionalPathInfo() : remainingPath("") {}
+
+ bool hasPositionallyIndexedElt() const {
+ return !positionallyIndexedElt.eoo();
+ }
+
+ // Stores the array element indexed by position. If the key pattern has no positional
+ // element, then this is EOO.
+ //
+ // Example:
+ // Suppose the key pattern is {"a.0.x": 1} and we're extracting keys for document
+ // {a: [{x: 98}, {x: 99}]}. We should store element {x: 98} here.
+ BSONElement positionallyIndexedElt;
+
+ // The array to which 'positionallyIndexedElt' belongs.
+ BSONObj arrayObj;
+
+ // If we find a positionally indexed element, we traverse the remainder of the path
+ // until we find either another array element or the end of the path. The result of
+ // this traversal (implemented using getFieldDottedOrArray()), is stored here and used
+ // during the recursive call for each array element.
+ //
+ // Example:
+ // Suppose we have key pattern {"a.1.b.0.c": 1}. The document for which we are
+ // generating keys is {a: [0, {b: [{c: 99}]}]}. We will find that {b: [{c: 99}]}
+ // is a positionally indexed element and store it as 'positionallyIndexedElt'.
+ //
+ // We then call getFieldDottedOrArray() to traverse the remainder of the path,
+ // "b.1.c". The result is the array [{c: 99}] which is stored here as 'dottedElt'.
+ BSONElement dottedElt;
+
+ // The remaining path that must be traversed in 'dottedElt' to find the indexed
+ // element(s).
+ //
+ // Example:
+ // Continuing the example above, 'remainingPath' will be "0.c". Note that the path
+ // "0.c" refers to element 99 in 'dottedElt', [{c: 99}].
+ const char* remainingPath;
};
- class BtreeKeyGeneratorV0 : public BtreeKeyGenerator {
- public:
- BtreeKeyGeneratorV0(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- bool isSparse);
-
- virtual ~BtreeKeyGeneratorV0() { }
+ /**
+ * @param fieldNames - fields to index, may be postfixes in recursive calls
+ * @param fixed - values that have already been identified for their index fields
+ * @param obj - object from which keys should be extracted, based on names in fieldNames
+ * @param keys - set where index keys are written
+ * @param numNotFound - number of index fields that have already been identified as missing
+ * @param array - array from which keys should be extracted, based on names in fieldNames
+ * If obj and array are both nonempty, obj will be one of the elements of array.
+ */
+ virtual void getKeysImpl(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys) const;
- private:
- virtual void getKeysImpl(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj& obj,
- BSONObjSet* keys) const;
- };
+ /**
+ * This recursive method does the heavy-lifting for getKeysImpl().
+ */
+ void getKeysImplWithArray(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys,
+ unsigned numNotFound,
+ const std::vector<PositionalPathInfo>& positionalInfo) const;
+ /**
+ * A call to getKeysImplWithArray() begins by calling this for each field in the key
+ * pattern. It uses getFieldDottedOrArray() to traverse the path '*field' in 'obj'.
+ *
+ * The 'positionalInfo' arg is used for handling a field path where 'obj' has an
+ * array indexed by position. See the comments for PositionalPathInfo for more detail.
+ *
+ * Returns the element extracted as a result of traversing the path, or an indexed array
+ * if we encounter one during the path traversal.
+ *
+ * Out-parameters:
+ * --Sets *field to the remaining path that must be traversed.
+ * --Sets *arrayNestedArray to true if the returned BSONElement is a nested array that is
+ * indexed by position in its parent array. Otherwise sets *arrayNestedArray to false.
+ *
+ * Example:
+ * Suppose we have key pattern {"a.b.c": 1} and we're extracting keys from document
+ * {a: [{b: {c: 98}}, {b: {c: 99}}]}. On the first call to extractNextElement(), 'obj'
+ * will be the full document, {a: [{b: {c: 98}}, {b: {c: 99}}]}. The 'positionalInfo'
+ * argument is not relevant, because the array is not being positionally indexed.
+ * '*field' will point to "a.b.c".
+ *
+ * The return value will be the array element [{b: {c: 98}}, {b: {c: 99}}], because path
+ * traversal stops when an indexed array is encountered. Furthermore, '*field' will be set
+ * to "b.c".
+ *
+ * extractNextElement() will then be called from a recursive call to
+ * getKeysImplWithArray() for each array element. For instance, it will get called with
+ * 'obj' {b: {c: 98}} and '*field' pointing to "b.c". It will return element 98 and
+ * set '*field' to "". Similarly, it will return elemtn 99 and set '*field' to "" for
+ * the second array element.
+ */
+ BSONElement extractNextElement(const BSONObj& obj,
+ const PositionalPathInfo& positionalInfo,
+ const char** field,
+ bool* arrayNestedArray) const;
- class BtreeKeyGeneratorV1 : public BtreeKeyGenerator {
- public:
- BtreeKeyGeneratorV1(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- bool isSparse);
-
- virtual ~BtreeKeyGeneratorV1() { }
-
- private:
- /**
- * Stores info regarding traversal of a positional path. A path through a document is
- * considered positional if this path element names an array element. Generally this means
- * that the field name consists of [0-9]+, but the implementation just calls .Obj() on
- * the array and looks for the named field. This logic happens even if the field does
- * not match [0-9]+.
- *
- * Example:
- * The path 'a.1.b' can sometimes be positional due to path element '1'. In the document
- * {a: [{b: 98}, {b: 99}]} it would be considered positional, and would refer to
- * element 99. In the document {a: [{'1': {b: 97}}]}, the path is *not* considered
- * positional and would refer to element 97.
- */
- struct PositionalPathInfo {
- PositionalPathInfo() : remainingPath("") {}
-
- bool hasPositionallyIndexedElt() const { return !positionallyIndexedElt.eoo(); }
-
- // Stores the array element indexed by position. If the key pattern has no positional
- // element, then this is EOO.
- //
- // Example:
- // Suppose the key pattern is {"a.0.x": 1} and we're extracting keys for document
- // {a: [{x: 98}, {x: 99}]}. We should store element {x: 98} here.
- BSONElement positionallyIndexedElt;
-
- // The array to which 'positionallyIndexedElt' belongs.
- BSONObj arrayObj;
-
- // If we find a positionally indexed element, we traverse the remainder of the path
- // until we find either another array element or the end of the path. The result of
- // this traversal (implemented using getFieldDottedOrArray()), is stored here and used
- // during the recursive call for each array element.
- //
- // Example:
- // Suppose we have key pattern {"a.1.b.0.c": 1}. The document for which we are
- // generating keys is {a: [0, {b: [{c: 99}]}]}. We will find that {b: [{c: 99}]}
- // is a positionally indexed element and store it as 'positionallyIndexedElt'.
- //
- // We then call getFieldDottedOrArray() to traverse the remainder of the path,
- // "b.1.c". The result is the array [{c: 99}] which is stored here as 'dottedElt'.
- BSONElement dottedElt;
-
- // The remaining path that must be traversed in 'dottedElt' to find the indexed
- // element(s).
- //
- // Example:
- // Continuing the example above, 'remainingPath' will be "0.c". Note that the path
- // "0.c" refers to element 99 in 'dottedElt', [{c: 99}].
- const char* remainingPath;
- };
-
- /**
- * @param fieldNames - fields to index, may be postfixes in recursive calls
- * @param fixed - values that have already been identified for their index fields
- * @param obj - object from which keys should be extracted, based on names in fieldNames
- * @param keys - set where index keys are written
- * @param numNotFound - number of index fields that have already been identified as missing
- * @param array - array from which keys should be extracted, based on names in fieldNames
- * If obj and array are both nonempty, obj will be one of the elements of array.
- */
- virtual void getKeysImpl(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj& obj,
- BSONObjSet* keys) const;
-
- /**
- * This recursive method does the heavy-lifting for getKeysImpl().
- */
- void getKeysImplWithArray(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj& obj,
- BSONObjSet* keys,
- unsigned numNotFound,
- const std::vector<PositionalPathInfo>& positionalInfo) const;
- /**
- * A call to getKeysImplWithArray() begins by calling this for each field in the key
- * pattern. It uses getFieldDottedOrArray() to traverse the path '*field' in 'obj'.
- *
- * The 'positionalInfo' arg is used for handling a field path where 'obj' has an
- * array indexed by position. See the comments for PositionalPathInfo for more detail.
- *
- * Returns the element extracted as a result of traversing the path, or an indexed array
- * if we encounter one during the path traversal.
- *
- * Out-parameters:
- * --Sets *field to the remaining path that must be traversed.
- * --Sets *arrayNestedArray to true if the returned BSONElement is a nested array that is
- * indexed by position in its parent array. Otherwise sets *arrayNestedArray to false.
- *
- * Example:
- * Suppose we have key pattern {"a.b.c": 1} and we're extracting keys from document
- * {a: [{b: {c: 98}}, {b: {c: 99}}]}. On the first call to extractNextElement(), 'obj'
- * will be the full document, {a: [{b: {c: 98}}, {b: {c: 99}}]}. The 'positionalInfo'
- * argument is not relevant, because the array is not being positionally indexed.
- * '*field' will point to "a.b.c".
- *
- * The return value will be the array element [{b: {c: 98}}, {b: {c: 99}}], because path
- * traversal stops when an indexed array is encountered. Furthermore, '*field' will be set
- * to "b.c".
- *
- * extractNextElement() will then be called from a recursive call to
- * getKeysImplWithArray() for each array element. For instance, it will get called with
- * 'obj' {b: {c: 98}} and '*field' pointing to "b.c". It will return element 98 and
- * set '*field' to "". Similarly, it will return elemtn 99 and set '*field' to "" for
- * the second array element.
- */
- BSONElement extractNextElement(const BSONObj& obj,
- const PositionalPathInfo& positionalInfo,
- const char** field,
- bool* arrayNestedArray) const;
-
- /**
- * Sets extracted elements in 'fixed' for field paths that we have traversed to the end.
- *
- * Then calls getKeysImplWithArray() recursively.
- */
- void _getKeysArrEltFixed(std::vector<const char*>* fieldNames,
- std::vector<BSONElement>* fixed,
- const BSONElement& arrEntry,
- BSONObjSet* keys,
- unsigned numNotFound,
- const BSONElement& arrObjElt,
- const std::set<unsigned>& arrIdxs,
- bool mayExpandArrayUnembedded,
- const std::vector<PositionalPathInfo>& positionalInfo) const;
-
- const std::vector<PositionalPathInfo> _emptyPositionalInfo;
- };
+ /**
+ * Sets extracted elements in 'fixed' for field paths that we have traversed to the end.
+ *
+ * Then calls getKeysImplWithArray() recursively.
+ */
+ void _getKeysArrEltFixed(std::vector<const char*>* fieldNames,
+ std::vector<BSONElement>* fixed,
+ const BSONElement& arrEntry,
+ BSONObjSet* keys,
+ unsigned numNotFound,
+ const BSONElement& arrObjElt,
+ const std::set<unsigned>& arrIdxs,
+ bool mayExpandArrayUnembedded,
+ const std::vector<PositionalPathInfo>& positionalInfo) const;
+
+ const std::vector<PositionalPathInfo> _emptyPositionalInfo;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/btree_key_generator_test.cpp b/src/mongo/db/index/btree_key_generator_test.cpp
index f01f6552f18..23792fb9b87 100644
--- a/src/mongo/db/index/btree_key_generator_test.cpp
+++ b/src/mongo/db/index/btree_key_generator_test.cpp
@@ -41,791 +41,790 @@ using std::vector;
namespace {
- //
- // Helper functions
- //
+//
+// Helper functions
+//
- std::string dumpKeyset(const BSONObjSet& objs) {
- std::stringstream ss;
- ss << "[ ";
- for (BSONObjSet::iterator i = objs.begin(); i != objs.end(); ++i) {
- ss << i->toString() << " ";
- }
- ss << "]";
-
- return ss.str();
+std::string dumpKeyset(const BSONObjSet& objs) {
+ std::stringstream ss;
+ ss << "[ ";
+ for (BSONObjSet::iterator i = objs.begin(); i != objs.end(); ++i) {
+ ss << i->toString() << " ";
}
+ ss << "]";
- bool keysetsMatch(const BSONObjSet& expected, const BSONObjSet& actual) {
- if (expected.size() != actual.size()) {
- return false;
- }
-
- for (BSONObjSet::iterator i = expected.begin(); i != expected.end(); ++i) {
- if (actual.end() == actual.find(*i)) {
- return false;
- }
- }
+ return ss.str();
+}
- return true;
+bool keysetsMatch(const BSONObjSet& expected, const BSONObjSet& actual) {
+ if (expected.size() != actual.size()) {
+ return false;
}
- bool testKeygen(const BSONObj& kp, const BSONObj& obj,
- const BSONObjSet& expectedKeys, bool sparse = false) {
- //
- // Step 1: construct the btree key generator object, using the
- // index key pattern.
- //
- vector<const char*> fieldNames;
- vector<BSONElement> fixed;
-
- BSONObjIterator it(kp);
- while (it.more()) {
- BSONElement elt = it.next();
- fieldNames.push_back(elt.fieldName());
- fixed.push_back(BSONElement());
- }
-
- unique_ptr<BtreeKeyGenerator> keyGen(
- new BtreeKeyGeneratorV1(fieldNames, fixed, sparse));
-
- //
- // Step 2: ask 'keyGen' to generate index keys for the object 'obj'.
- //
- BSONObjSet actualKeys;
- keyGen->getKeys(obj, &actualKeys);
-
- //
- // Step 3: check that the results match the expected result.
- //
- bool match = keysetsMatch(expectedKeys, actualKeys);
- if (!match) {
- cout << "Expected: " << dumpKeyset(expectedKeys) << ", "
- << "Actual: " << dumpKeyset(actualKeys) << endl;
+ for (BSONObjSet::iterator i = expected.begin(); i != expected.end(); ++i) {
+ if (actual.end() == actual.find(*i)) {
+ return false;
}
-
- return match;
}
+ return true;
+}
+
+bool testKeygen(const BSONObj& kp,
+ const BSONObj& obj,
+ const BSONObjSet& expectedKeys,
+ bool sparse = false) {
//
- // Unit tests
+ // Step 1: construct the btree key generator object, using the
+ // index key pattern.
//
+ vector<const char*> fieldNames;
+ vector<BSONElement> fixed;
- TEST(BtreeKeyGeneratorTest, GetKeysFromObjectSimple) {
- BSONObj keyPattern = fromjson("{a: 1}");
- BSONObj genKeysFrom = fromjson("{b: 4, a: 5}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 5}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromObjectDotted) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: {b: 4}, c: 'foo'}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 4}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromArraySimple) {
- BSONObj keyPattern = fromjson("{a: 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, 2, 3]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromArrayFirstElement) {
- BSONObj keyPattern = fromjson("{a: 1, b: 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, 2, 3], b: 2}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1, '': 2}"));
- expectedKeys.insert(fromjson("{'': 2, '': 2}"));
- expectedKeys.insert(fromjson("{'': 3, '': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+ BSONObjIterator it(kp);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ fieldNames.push_back(elt.fieldName());
+ fixed.push_back(BSONElement());
}
- TEST(BtreeKeyGeneratorTest, GetKeysFromArraySecondElement) {
- BSONObj keyPattern = fromjson("{first: 1, a: 1}");
- BSONObj genKeysFrom = fromjson("{first: 5, a: [1, 2, 3]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 5, '': 1}"));
- expectedKeys.insert(fromjson("{'': 5, '': 2}"));
- expectedKeys.insert(fromjson("{'': 5, '': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromSecondLevelArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: {b: [1, 2, 3]}}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromParallelArraysBasic) {
- BSONObj keyPattern = fromjson("{'a': 1, 'b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, 2, 3], b: [1, 2, 3]}}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubobjectBasic) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{b:1,c:4}, {b:2,c:4}, {b:3,c:4}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysArraySubobjectCompoundIndex) {
- BSONObj keyPattern = fromjson("{'a.b': 1, d: 99}");
- BSONObj genKeysFrom = fromjson("{a: [{b:1,c:4}, {b:2,c:4}, {b:3,c:4}], d: 99}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1, '': 99}"));
- expectedKeys.insert(fromjson("{'': 2, '': 99}"));
- expectedKeys.insert(fromjson("{'': 3, '': 99}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysArraySubobjectSingleMissing) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{foo: 41}, {b:1,c:4}, {b:2,c:4}, {b:3,c:4}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubobjectMissing) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{foo: 41}, {foo: 41}, {foo: 41}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysMissingField) {
- BSONObj keyPattern = fromjson("{a: 1}");
- BSONObj genKeysFrom = fromjson("{b: 1}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysSubobjectMissing) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, 2]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromCompound) {
- BSONObj keyPattern = fromjson("{x: 1, y: 1}");
- BSONObj genKeysFrom = fromjson("{x: 'a', y: 'b'}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 'a', '': 'b'}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
+ unique_ptr<BtreeKeyGenerator> keyGen(new BtreeKeyGeneratorV1(fieldNames, fixed, sparse));
- TEST(BtreeKeyGeneratorTest, GetKeysFromCompoundMissing) {
- BSONObj keyPattern = fromjson("{x: 1, y: 1}");
- BSONObj genKeysFrom = fromjson("{x: 'a'}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 'a', '': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubelementComplex) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:[{b:[2]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromParallelArraysComplex) {
- BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}");
- BSONObj genKeysFrom = fromjson("{a:[{b:[1],c:[2]}]}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysAlternateMissing) {
- BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}");
- BSONObj genKeysFrom = fromjson("{a:[{b:1},{c:2}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null, '': 2}"));
- expectedKeys.insert(fromjson("{'': 1, '': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromMultiComplex) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:[{b:1},{b:[1,2,3]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysArrayEmpty) {
- BSONObj keyPattern = fromjson("{a: 1}");
- BSONObj genKeysFrom = fromjson("{a:[1,2]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a: [1]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a: null}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a: []}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleArray) {
- BSONObj keyPattern = fromjson("{a: 1, a: 1}");
- BSONObj genKeysFrom = fromjson("{a:[1,2]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1, '': 1}"));
- expectedKeys.insert(fromjson("{'': 2, '': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleEmptyArray) {
- BSONObj keyPattern = fromjson("{a: 1, a: 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': undefined, '': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromMultiEmptyArray) {
- BSONObj keyPattern = fromjson("{a: 1, b: 1}");
- BSONObj genKeysFrom = fromjson("{a: 1, b: [1, 2]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1, '': 1}"));
- expectedKeys.insert(fromjson("{'': 1, '': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a: 1, b: [1]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1, '': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a: 1, b: []}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1, '': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromNestedEmptyArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromMultiNestedEmptyArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null, '': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromUnevenNestedEmptyArray) {
- BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': undefined, '': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[{b: 1}]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': {b:1}, '': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[{b: []}]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': {b:[]}, '': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromReverseUnevenNestedEmptyArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1, 'a': 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null, '': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, SparseReverseUnevenNestedEmptyArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1, 'a': 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null, '': undefined}"));
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromSparseEmptyArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:1}");
- BSONObjSet expectedKeys;
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
-
- genKeysFrom = fromjson("{a:[]}");
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
-
- genKeysFrom = fromjson("{a:[{c:1}]}");
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromSparseEmptyArraySecond) {
- BSONObj keyPattern = fromjson("{z: 1, 'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:1}");
- BSONObjSet expectedKeys;
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
-
- genKeysFrom = fromjson("{a:[]}");
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
-
- genKeysFrom = fromjson("{a:[{c:1}]}");
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
- }
-
- TEST(BtreeKeyGeneratorTest, SparseNonObjectMissingNestedField) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
-
- genKeysFrom = fromjson("{a:[1]}");
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
-
- genKeysFrom = fromjson("{a:[1,{b:1}]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1}"));
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromIndexedArrayIndex) {
- BSONObj keyPattern = fromjson("{'a.0': 1}");
- BSONObj genKeysFrom = fromjson("{a:[1]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[1]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': [1]}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:{'0':1}}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[{'0':1}]}");
- expectedKeys.clear();
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
-
- genKeysFrom = fromjson("{a:[1,{'0':2}]}");
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleIndexedArrayIndex) {
- BSONObj keyPattern = fromjson("{'a.0.0': 1}");
- BSONObj genKeysFrom = fromjson("{a:[[1]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[[]]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromObjectWithinArray) {
- BSONObj keyPattern = fromjson("{'a.0.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:[{b:1}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[{b:[1]}]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[{b:[[1]]}]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': [1]}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[{b:1}]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[{b:[1]}]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[{b:[[1]]}]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': [1]}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[{b:[]}]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromArrayWithinObjectWithinArray) {
- BSONObj keyPattern = fromjson("{'a.0.b.0': 1}");
- BSONObj genKeysFrom = fromjson("{a:[{b:[1]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, ParallelArraysInNestedObjects) {
- BSONObj keyPattern = fromjson("{'a.a': 1, 'b.a': 1}");
- BSONObj genKeysFrom = fromjson("{a:{a:[1]}, b:{a:[1]}}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, ParallelArraysUneven) {
- BSONObj keyPattern = fromjson("{'b.a': 1, 'a': 1}");
- BSONObj genKeysFrom = fromjson("{b:{a:[1]}, a:[1,2]}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, MultipleArraysNotParallel) {
- BSONObj keyPattern = fromjson("{'a.b.c': 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, 2, {b: {c: [3, 4]}}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- expectedKeys.insert(fromjson("{'': 4}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, MultipleArraysNotParallelCompound) {
- BSONObj keyPattern = fromjson("{'a.b.c': 1, 'a.b.d': 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, 2, {b: {c: [3, 4], d: 5}}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null, '': null}"));
- expectedKeys.insert(fromjson("{'': 3, '': 5}"));
- expectedKeys.insert(fromjson("{'': 4, '': 5}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysComplexNestedArrays) {
- BSONObj keyPattern = fromjson(
- "{'a.b.c.d': 1, 'a.g': 1, 'a.b.f': 1, 'a.b.c': 1, 'a.b.e': 1}");
- BSONObj genKeysFrom = fromjson(
- "{a: [1, {b: [2, {c: [3, {d: 1}], e: 4}, 5, {f: 6}], g: 7}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'':null, '':null, '':null, '':null, '':null}"));
- expectedKeys.insert(fromjson("{'':null, '':7, '':null, '':null, '':null}"));
- expectedKeys.insert(fromjson("{'':null, '':7, '':null, '':3, '':4}"));
- expectedKeys.insert(fromjson("{'':null, '':7, '':6, '':null, '':null}"));
- expectedKeys.insert(fromjson("{'':1, '':7, '':null, '':{d: 1}, '':4}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. Should future index versions recursively index nested arrays?
- TEST(BtreeKeyGeneratorTest, GetKeys2DArray) {
- BSONObj keyPattern = fromjson("{a: 1}");
- BSONObj genKeysFrom = fromjson("{a: [[2]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': [2]}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. Should parallel indexed arrays be allowed? If not, should empty
- // or single-element arrays be considered for the parallel array check?
- TEST(BtreeKeyGeneratorTest, GetKeysParallelEmptyArrays) {
- BSONObj keyPattern = fromjson("{a: 1, b: 1}");
- BSONObj genKeysFrom = fromjson("{a: [], b: []}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysParallelArraysOneArrayEmpty) {
- BSONObj keyPattern = fromjson("{a: 1, b: 1}");
- BSONObj genKeysFrom = fromjson("{a: [], b: [1, 2, 3]}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysParallelArraysOneArrayEmptyNested) {
- BSONObj keyPattern = fromjson("{'a.b.c': 1, 'a.b.d': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{b: [{c: [1, 2, 3], d: []}]}]}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- // Descriptive test. The semantics for key generation are odd for positional key patterns.
- TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternMissingElement) {
- BSONObj keyPattern = fromjson("{'a.2': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{'2': 5}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 5}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. The semantics for key generation are odd for positional key patterns.
- TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray) {
- BSONObj keyPattern = fromjson("{'a.2': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. The semantics for key generation are odd for positional key patterns.
- TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray2) {
- BSONObj keyPattern = fromjson("{'a.2': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5], [3, 4, 6], [0, 1, 2]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': [0, 1, 2]}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. The semantics for key generation are odd for positional key patterns.
- TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray3) {
- BSONObj keyPattern = fromjson("{'a.2': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{'0': 1, '1': 2, '2': 5}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 5}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. The semantics for key generation are odd for positional key patterns.
- TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray4) {
- BSONObj keyPattern = fromjson("{'a.b.2': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{b: [[1, 2, 5]]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. The semantics for key generation are odd for positional key patterns.
- TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray5) {
- BSONObj keyPattern = fromjson("{'a.2': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5], {'2': 6}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- expectedKeys.insert(fromjson("{'': 6}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetNullKeyNestedArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysUnevenNestedArrays) {
- BSONObj keyPattern = fromjson("{a: 1, 'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, {b: [2, 3, 4]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1, '': null}"));
- expectedKeys.insert(fromjson("{'': {b:[2,3,4]}, '': 2}"));
- expectedKeys.insert(fromjson("{'': {b:[2,3,4]}, '': 3}"));
- expectedKeys.insert(fromjson("{'': {b:[2,3,4]}, '': 4}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. Should we define better semantics for future index versions in the case of
- // repeated field names?
- TEST(BtreeKeyGeneratorTest, GetKeysRepeatedFieldName) {
- BSONObj keyPattern = fromjson("{a: 1}");
- BSONObj genKeysFrom = fromjson("{a: 2, a: 3}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. Future index versions may want different or at least more consistent
- // handling of empty path components.
- TEST(BtreeKeyGeneratorTest, GetKeysEmptyPathPiece) {
- BSONObj keyPattern = fromjson("{'a..c': 1}");
- BSONObj genKeysFrom = fromjson("{a: {'': [{c: 1}, {c: 2}]}}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. Future index versions may want different or at least more consistent
- // handling of empty path components.
- TEST(BtreeKeyGeneratorTest, GetKeysLastPathPieceEmpty) {
- BSONObj keyPattern = fromjson("{'a.': 1}");
-
- BSONObj genKeysFrom = fromjson("{a: 2}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a: {'': 2}}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': {'': 2}}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFirstPathPieceEmpty) {
- BSONObj keyPattern = fromjson("{'.a': 1}");
- BSONObj genKeysFrom = fromjson("{a: 2}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFirstPathPieceEmpty2) {
- BSONObj keyPattern = fromjson("{'.a': 1}");
- BSONObj genKeysFrom = fromjson("{'': [{a: [1, 2, 3]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternParallelArrays) {
- BSONObj keyPattern = fromjson("{a: 1, 'b.0': 1}");
- BSONObj genKeysFrom = fromjson("{a: [1], b: [2]}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- // Descriptive test.
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays) {
- BSONObj keyPattern = fromjson("{'a.0.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[{b: 1}]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test.
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays2) {
- BSONObj keyPattern = fromjson("{'a.0.0.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[{b: 1}]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test.
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays3) {
- BSONObj keyPattern = fromjson("{'a.0.0.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[[ {b: 1} ]]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test.
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays4) {
- BSONObj keyPattern = fromjson("{'a.0.0.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[[[ {b: 1} ]]]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays5) {
- BSONObj keyPattern = fromjson("{'a.b.1': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{b: [1, 2]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test.
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays6) {
- BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1, 'a.0.b':1, 'a.b.0': 1, 'a.0.b.0': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{b: [1,2]}, {b: 3}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': {b:3}, '': 3, '': 1, '': null, '': 1}"));
- expectedKeys.insert(fromjson("{'': {b:3}, '': 3, '': 2, '': null, '': 1}"));
- expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 1, '': 1, '': 1, '': 1}"));
- expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 2, '': 2, '': 1, '': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test.
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays7) {
- BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1, 'a.0.b':1, 'a.b.0': 1, 'a.0.b.0': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{b: [1,2]}, {b: {'0': 3}}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': {b:{'0':3}}, '': {'0':3}, '': 1, '': 3, '': 1}"));
- expectedKeys.insert(fromjson("{'': {b:{'0':3}}, '': {'0':3}, '': 2, '': 3, '': 1}"));
- expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 1, '': 1, '': 1, '': 1}"));
- expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 2, '': 2, '': 1, '': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
+ //
+ // Step 2: ask 'keyGen' to generate index keys for the object 'obj'.
+ //
+ BSONObjSet actualKeys;
+ keyGen->getKeys(obj, &actualKeys);
-} // namespace
+ //
+ // Step 3: check that the results match the expected result.
+ //
+ bool match = keysetsMatch(expectedKeys, actualKeys);
+ if (!match) {
+ cout << "Expected: " << dumpKeyset(expectedKeys) << ", "
+ << "Actual: " << dumpKeyset(actualKeys) << endl;
+ }
+
+ return match;
+}
+
+//
+// Unit tests
+//
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromObjectSimple) {
+ BSONObj keyPattern = fromjson("{a: 1}");
+ BSONObj genKeysFrom = fromjson("{b: 4, a: 5}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 5}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromObjectDotted) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: {b: 4}, c: 'foo'}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 4}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArraySimple) {
+ BSONObj keyPattern = fromjson("{a: 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, 2, 3]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArrayFirstElement) {
+ BSONObj keyPattern = fromjson("{a: 1, b: 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, 2, 3], b: 2}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1, '': 2}"));
+ expectedKeys.insert(fromjson("{'': 2, '': 2}"));
+ expectedKeys.insert(fromjson("{'': 3, '': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArraySecondElement) {
+ BSONObj keyPattern = fromjson("{first: 1, a: 1}");
+ BSONObj genKeysFrom = fromjson("{first: 5, a: [1, 2, 3]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 5, '': 1}"));
+ expectedKeys.insert(fromjson("{'': 5, '': 2}"));
+ expectedKeys.insert(fromjson("{'': 5, '': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromSecondLevelArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: {b: [1, 2, 3]}}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromParallelArraysBasic) {
+ BSONObj keyPattern = fromjson("{'a': 1, 'b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, 2, 3], b: [1, 2, 3]}}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubobjectBasic) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{b:1,c:4}, {b:2,c:4}, {b:3,c:4}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysArraySubobjectCompoundIndex) {
+ BSONObj keyPattern = fromjson("{'a.b': 1, d: 99}");
+ BSONObj genKeysFrom = fromjson("{a: [{b:1,c:4}, {b:2,c:4}, {b:3,c:4}], d: 99}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1, '': 99}"));
+ expectedKeys.insert(fromjson("{'': 2, '': 99}"));
+ expectedKeys.insert(fromjson("{'': 3, '': 99}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysArraySubobjectSingleMissing) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{foo: 41}, {b:1,c:4}, {b:2,c:4}, {b:3,c:4}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubobjectMissing) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{foo: 41}, {foo: 41}, {foo: 41}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysMissingField) {
+ BSONObj keyPattern = fromjson("{a: 1}");
+ BSONObj genKeysFrom = fromjson("{b: 1}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysSubobjectMissing) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, 2]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromCompound) {
+ BSONObj keyPattern = fromjson("{x: 1, y: 1}");
+ BSONObj genKeysFrom = fromjson("{x: 'a', y: 'b'}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 'a', '': 'b'}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromCompoundMissing) {
+ BSONObj keyPattern = fromjson("{x: 1, y: 1}");
+ BSONObj genKeysFrom = fromjson("{x: 'a'}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 'a', '': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubelementComplex) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[{b:[2]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromParallelArraysComplex) {
+ BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[{b:[1],c:[2]}]}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysAlternateMissing) {
+ BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[{b:1},{c:2}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null, '': 2}"));
+ expectedKeys.insert(fromjson("{'': 1, '': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromMultiComplex) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[{b:1},{b:[1,2,3]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysArrayEmpty) {
+ BSONObj keyPattern = fromjson("{a: 1}");
+ BSONObj genKeysFrom = fromjson("{a:[1,2]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a: [1]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a: null}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a: []}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleArray) {
+ BSONObj keyPattern = fromjson("{a: 1, a: 1}");
+ BSONObj genKeysFrom = fromjson("{a:[1,2]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1, '': 1}"));
+ expectedKeys.insert(fromjson("{'': 2, '': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleEmptyArray) {
+ BSONObj keyPattern = fromjson("{a: 1, a: 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': undefined, '': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromMultiEmptyArray) {
+ BSONObj keyPattern = fromjson("{a: 1, b: 1}");
+ BSONObj genKeysFrom = fromjson("{a: 1, b: [1, 2]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1, '': 1}"));
+ expectedKeys.insert(fromjson("{'': 1, '': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a: 1, b: [1]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1, '': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a: 1, b: []}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1, '': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromNestedEmptyArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromMultiNestedEmptyArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null, '': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromUnevenNestedEmptyArray) {
+ BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': undefined, '': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[{b: 1}]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': {b:1}, '': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[{b: []}]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': {b:[]}, '': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromReverseUnevenNestedEmptyArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1, 'a': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null, '': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, SparseReverseUnevenNestedEmptyArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1, 'a': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null, '': undefined}"));
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromSparseEmptyArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:1}");
+ BSONObjSet expectedKeys;
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+
+ genKeysFrom = fromjson("{a:[]}");
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+
+ genKeysFrom = fromjson("{a:[{c:1}]}");
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromSparseEmptyArraySecond) {
+ BSONObj keyPattern = fromjson("{z: 1, 'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:1}");
+ BSONObjSet expectedKeys;
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+
+ genKeysFrom = fromjson("{a:[]}");
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+
+ genKeysFrom = fromjson("{a:[{c:1}]}");
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+}
+
+TEST(BtreeKeyGeneratorTest, SparseNonObjectMissingNestedField) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+
+ genKeysFrom = fromjson("{a:[1]}");
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+
+ genKeysFrom = fromjson("{a:[1,{b:1}]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1}"));
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromIndexedArrayIndex) {
+ BSONObj keyPattern = fromjson("{'a.0': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[1]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[1]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': [1]}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:{'0':1}}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[{'0':1}]}");
+ expectedKeys.clear();
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+
+ genKeysFrom = fromjson("{a:[1,{'0':2}]}");
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleIndexedArrayIndex) {
+ BSONObj keyPattern = fromjson("{'a.0.0': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[[1]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[[]]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromObjectWithinArray) {
+ BSONObj keyPattern = fromjson("{'a.0.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[{b:1}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[{b:[1]}]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[{b:[[1]]}]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': [1]}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[{b:1}]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[{b:[1]}]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[{b:[[1]]}]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': [1]}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[{b:[]}]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArrayWithinObjectWithinArray) {
+ BSONObj keyPattern = fromjson("{'a.0.b.0': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[{b:[1]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, ParallelArraysInNestedObjects) {
+ BSONObj keyPattern = fromjson("{'a.a': 1, 'b.a': 1}");
+ BSONObj genKeysFrom = fromjson("{a:{a:[1]}, b:{a:[1]}}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, ParallelArraysUneven) {
+ BSONObj keyPattern = fromjson("{'b.a': 1, 'a': 1}");
+ BSONObj genKeysFrom = fromjson("{b:{a:[1]}, a:[1,2]}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, MultipleArraysNotParallel) {
+ BSONObj keyPattern = fromjson("{'a.b.c': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, 2, {b: {c: [3, 4]}}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ expectedKeys.insert(fromjson("{'': 4}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, MultipleArraysNotParallelCompound) {
+ BSONObj keyPattern = fromjson("{'a.b.c': 1, 'a.b.d': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, 2, {b: {c: [3, 4], d: 5}}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null, '': null}"));
+ expectedKeys.insert(fromjson("{'': 3, '': 5}"));
+ expectedKeys.insert(fromjson("{'': 4, '': 5}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysComplexNestedArrays) {
+ BSONObj keyPattern = fromjson("{'a.b.c.d': 1, 'a.g': 1, 'a.b.f': 1, 'a.b.c': 1, 'a.b.e': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, {b: [2, {c: [3, {d: 1}], e: 4}, 5, {f: 6}], g: 7}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'':null, '':null, '':null, '':null, '':null}"));
+ expectedKeys.insert(fromjson("{'':null, '':7, '':null, '':null, '':null}"));
+ expectedKeys.insert(fromjson("{'':null, '':7, '':null, '':3, '':4}"));
+ expectedKeys.insert(fromjson("{'':null, '':7, '':6, '':null, '':null}"));
+ expectedKeys.insert(fromjson("{'':1, '':7, '':null, '':{d: 1}, '':4}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. Should future index versions recursively index nested arrays?
+TEST(BtreeKeyGeneratorTest, GetKeys2DArray) {
+ BSONObj keyPattern = fromjson("{a: 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[2]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': [2]}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. Should parallel indexed arrays be allowed? If not, should empty
+// or single-element arrays be considered for the parallel array check?
+TEST(BtreeKeyGeneratorTest, GetKeysParallelEmptyArrays) {
+ BSONObj keyPattern = fromjson("{a: 1, b: 1}");
+ BSONObj genKeysFrom = fromjson("{a: [], b: []}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysParallelArraysOneArrayEmpty) {
+ BSONObj keyPattern = fromjson("{a: 1, b: 1}");
+ BSONObj genKeysFrom = fromjson("{a: [], b: [1, 2, 3]}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysParallelArraysOneArrayEmptyNested) {
+ BSONObj keyPattern = fromjson("{'a.b.c': 1, 'a.b.d': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{b: [{c: [1, 2, 3], d: []}]}]}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+// Descriptive test. The semantics for key generation are odd for positional key patterns.
+TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternMissingElement) {
+ BSONObj keyPattern = fromjson("{'a.2': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{'2': 5}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 5}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. The semantics for key generation are odd for positional key patterns.
+TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray) {
+ BSONObj keyPattern = fromjson("{'a.2': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. The semantics for key generation are odd for positional key patterns.
+TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray2) {
+ BSONObj keyPattern = fromjson("{'a.2': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5], [3, 4, 6], [0, 1, 2]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': [0, 1, 2]}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. The semantics for key generation are odd for positional key patterns.
+TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray3) {
+ BSONObj keyPattern = fromjson("{'a.2': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{'0': 1, '1': 2, '2': 5}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 5}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. The semantics for key generation are odd for positional key patterns.
+TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray4) {
+ BSONObj keyPattern = fromjson("{'a.b.2': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{b: [[1, 2, 5]]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. The semantics for key generation are odd for positional key patterns.
+TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray5) {
+ BSONObj keyPattern = fromjson("{'a.2': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5], {'2': 6}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ expectedKeys.insert(fromjson("{'': 6}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetNullKeyNestedArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysUnevenNestedArrays) {
+ BSONObj keyPattern = fromjson("{a: 1, 'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, {b: [2, 3, 4]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1, '': null}"));
+ expectedKeys.insert(fromjson("{'': {b:[2,3,4]}, '': 2}"));
+ expectedKeys.insert(fromjson("{'': {b:[2,3,4]}, '': 3}"));
+ expectedKeys.insert(fromjson("{'': {b:[2,3,4]}, '': 4}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. Should we define better semantics for future index versions in the case of
+// repeated field names?
+TEST(BtreeKeyGeneratorTest, GetKeysRepeatedFieldName) {
+ BSONObj keyPattern = fromjson("{a: 1}");
+ BSONObj genKeysFrom = fromjson("{a: 2, a: 3}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. Future index versions may want different or at least more consistent
+// handling of empty path components.
+TEST(BtreeKeyGeneratorTest, GetKeysEmptyPathPiece) {
+ BSONObj keyPattern = fromjson("{'a..c': 1}");
+ BSONObj genKeysFrom = fromjson("{a: {'': [{c: 1}, {c: 2}]}}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. Future index versions may want different or at least more consistent
+// handling of empty path components.
+TEST(BtreeKeyGeneratorTest, GetKeysLastPathPieceEmpty) {
+ BSONObj keyPattern = fromjson("{'a.': 1}");
+
+ BSONObj genKeysFrom = fromjson("{a: 2}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a: {'': 2}}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': {'': 2}}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFirstPathPieceEmpty) {
+ BSONObj keyPattern = fromjson("{'.a': 1}");
+ BSONObj genKeysFrom = fromjson("{a: 2}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFirstPathPieceEmpty2) {
+ BSONObj keyPattern = fromjson("{'.a': 1}");
+ BSONObj genKeysFrom = fromjson("{'': [{a: [1, 2, 3]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternParallelArrays) {
+ BSONObj keyPattern = fromjson("{a: 1, 'b.0': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1], b: [2]}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+// Descriptive test.
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays) {
+ BSONObj keyPattern = fromjson("{'a.0.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[{b: 1}]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test.
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays2) {
+ BSONObj keyPattern = fromjson("{'a.0.0.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[{b: 1}]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test.
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays3) {
+ BSONObj keyPattern = fromjson("{'a.0.0.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[[ {b: 1} ]]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test.
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays4) {
+ BSONObj keyPattern = fromjson("{'a.0.0.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[[[ {b: 1} ]]]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays5) {
+ BSONObj keyPattern = fromjson("{'a.b.1': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{b: [1, 2]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test.
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays6) {
+ BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1, 'a.0.b':1, 'a.b.0': 1, 'a.0.b.0': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{b: [1,2]}, {b: 3}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': {b:3}, '': 3, '': 1, '': null, '': 1}"));
+ expectedKeys.insert(fromjson("{'': {b:3}, '': 3, '': 2, '': null, '': 1}"));
+ expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 1, '': 1, '': 1, '': 1}"));
+ expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 2, '': 2, '': 1, '': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test.
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays7) {
+ BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1, 'a.0.b':1, 'a.b.0': 1, 'a.0.b.0': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{b: [1,2]}, {b: {'0': 3}}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': {b:{'0':3}}, '': {'0':3}, '': 1, '': 3, '': 1}"));
+ expectedKeys.insert(fromjson("{'': {b:{'0':3}}, '': {'0':3}, '': 2, '': 3, '': 1}"));
+ expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 1, '': 1, '': 1, '': 1}"));
+ expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 2, '': 2, '': 1, '': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+} // namespace
diff --git a/src/mongo/db/index/expression_keys_private.cpp b/src/mongo/db/index/expression_keys_private.cpp
index 1202b77ce0f..559175444bf 100644
--- a/src/mongo/db/index/expression_keys_private.cpp
+++ b/src/mongo/db/index/expression_keys_private.cpp
@@ -48,471 +48,475 @@
namespace {
- using namespace mongo;
-
- //
- // Helper functions for getHaystackKeys
- //
-
- /**
- * Build a new BSONObj with root in it. If e is non-empty, append that to the key.
- * Insert the BSONObj into keys.
- * Used by getHaystackKeys.
- */
- void addKey(const string& root, const BSONElement& e, BSONObjSet* keys) {
- BSONObjBuilder buf;
- buf.append("", root);
-
- if (e.eoo())
- buf.appendNull("");
- else
- buf.appendAs(e, "");
-
- keys->insert(buf.obj());
- }
+using namespace mongo;
- //
- // Helper functions for getS2Keys
- //
+//
+// Helper functions for getHaystackKeys
+//
- static void S2KeysFromRegion(S2RegionCoverer *coverer, const S2Region &region,
- vector<string> *out) {
- vector<S2CellId> covering;
- coverer->GetCovering(region, &covering);
- for (size_t i = 0; i < covering.size(); ++i) {
- out->push_back(covering[i].toString());
- }
+/**
+ * Build a new BSONObj with root in it. If e is non-empty, append that to the key.
+ * Insert the BSONObj into keys.
+ * Used by getHaystackKeys.
+ */
+void addKey(const string& root, const BSONElement& e, BSONObjSet* keys) {
+ BSONObjBuilder buf;
+ buf.append("", root);
+
+ if (e.eoo())
+ buf.appendNull("");
+ else
+ buf.appendAs(e, "");
+
+ keys->insert(buf.obj());
+}
+
+//
+// Helper functions for getS2Keys
+//
+
+static void S2KeysFromRegion(S2RegionCoverer* coverer,
+ const S2Region& region,
+ vector<string>* out) {
+ vector<S2CellId> covering;
+ coverer->GetCovering(region, &covering);
+ for (size_t i = 0; i < covering.size(); ++i) {
+ out->push_back(covering[i].toString());
}
+}
- Status S2GetKeysForElement(const BSONElement& element,
- const S2IndexingParams& params,
- vector<string>* out) {
- GeometryContainer geoContainer;
- Status status = geoContainer.parseFromStorage(element);
- if (!status.isOK()) return status;
-
- S2RegionCoverer coverer;
- params.configureCoverer(&coverer);
+Status S2GetKeysForElement(const BSONElement& element,
+ const S2IndexingParams& params,
+ vector<string>* out) {
+ GeometryContainer geoContainer;
+ Status status = geoContainer.parseFromStorage(element);
+ if (!status.isOK())
+ return status;
- // Don't index big polygon
- if (geoContainer.getNativeCRS() == STRICT_SPHERE) {
- return Status(ErrorCodes::BadValue, "can't index geometry with strict winding order");
- }
+ S2RegionCoverer coverer;
+ params.configureCoverer(&coverer);
- // Only certain geometries can be indexed in the old index format S2_INDEX_VERSION_1. See
- // definition of S2IndexVersion for details.
- if (params.indexVersion == S2_INDEX_VERSION_1 && !geoContainer.isSimpleContainer()) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "given geometry can't be indexed in the old index format");
- }
-
- // Project the geometry into spherical space
- if (!geoContainer.supportsProject(SPHERE)) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "can't project geometry into spherical CRS: "
- << element.toString(false));
- }
- geoContainer.projectInto(SPHERE);
-
- invariant(geoContainer.hasS2Region());
+ // Don't index big polygon
+ if (geoContainer.getNativeCRS() == STRICT_SPHERE) {
+ return Status(ErrorCodes::BadValue, "can't index geometry with strict winding order");
+ }
- S2KeysFromRegion(&coverer, geoContainer.getS2Region(), out);
- return Status::OK();
+ // Only certain geometries can be indexed in the old index format S2_INDEX_VERSION_1. See
+ // definition of S2IndexVersion for details.
+ if (params.indexVersion == S2_INDEX_VERSION_1 && !geoContainer.isSimpleContainer()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "given geometry can't be indexed in the old index format");
}
+ // Project the geometry into spherical space
+ if (!geoContainer.supportsProject(SPHERE)) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "can't project geometry into spherical CRS: "
+ << element.toString(false));
+ }
+ geoContainer.projectInto(SPHERE);
- /**
- * Get the index keys for elements that are GeoJSON.
- * Used by getS2Keys.
- */
- void getS2GeoKeys(const BSONObj& document, const BSONElementSet& elements,
- const S2IndexingParams& params,
- BSONObjSet* out) {
- for (BSONElementSet::iterator i = elements.begin(); i != elements.end(); ++i) {
- vector<string> cells;
- Status status = S2GetKeysForElement(*i, params, &cells);
- uassert(16755, str::stream() << "Can't extract geo keys: " << document << " "
- << status.reason(), status.isOK());
+ invariant(geoContainer.hasS2Region());
- uassert(16756, "Unable to generate keys for (likely malformed) geometry: "
- + document.toString(),
- cells.size() > 0);
+ S2KeysFromRegion(&coverer, geoContainer.getS2Region(), out);
+ return Status::OK();
+}
- for (vector<string>::const_iterator it = cells.begin(); it != cells.end(); ++it) {
- BSONObjBuilder b;
- b.append("", *it);
- out->insert(b.obj());
- }
- }
- if (0 == out->size()) {
+/**
+ * Get the index keys for elements that are GeoJSON.
+ * Used by getS2Keys.
+ */
+void getS2GeoKeys(const BSONObj& document,
+ const BSONElementSet& elements,
+ const S2IndexingParams& params,
+ BSONObjSet* out) {
+ for (BSONElementSet::iterator i = elements.begin(); i != elements.end(); ++i) {
+ vector<string> cells;
+ Status status = S2GetKeysForElement(*i, params, &cells);
+ uassert(16755,
+ str::stream() << "Can't extract geo keys: " << document << " " << status.reason(),
+ status.isOK());
+
+ uassert(16756,
+ "Unable to generate keys for (likely malformed) geometry: " + document.toString(),
+ cells.size() > 0);
+
+ for (vector<string>::const_iterator it = cells.begin(); it != cells.end(); ++it) {
BSONObjBuilder b;
- b.appendNull("");
+ b.append("", *it);
out->insert(b.obj());
}
}
- /**
- * Expands array and appends items to 'out'.
- * Used by getOneLiteralKey.
- */
- void getS2LiteralKeysArray(const BSONObj& obj, BSONObjSet* out) {
- BSONObjIterator objIt(obj);
- if (!objIt.more()) {
- // Empty arrays are indexed as undefined.
- BSONObjBuilder b;
- b.appendUndefined("");
- out->insert(b.obj());
- } else {
- // Non-empty arrays are exploded.
- while (objIt.more()) {
- BSONObjBuilder b;
- b.appendAs(objIt.next(), "");
- out->insert(b.obj());
- }
- }
+ if (0 == out->size()) {
+ BSONObjBuilder b;
+ b.appendNull("");
+ out->insert(b.obj());
}
+}
- /**
- * If 'elt' is an array, expands elt and adds items to 'out'.
- * Otherwise, adds 'elt' as a single element.
- * Used by getLiteralKeys.
- */
- void getS2OneLiteralKey(const BSONElement& elt, BSONObjSet* out) {
- if (Array == elt.type()) {
- getS2LiteralKeysArray(elt.Obj(), out);
- } else {
- // One thing, not an array, index as-is.
+/**
+ * Expands array and appends items to 'out'.
+ * Used by getOneLiteralKey.
+ */
+void getS2LiteralKeysArray(const BSONObj& obj, BSONObjSet* out) {
+ BSONObjIterator objIt(obj);
+ if (!objIt.more()) {
+ // Empty arrays are indexed as undefined.
+ BSONObjBuilder b;
+ b.appendUndefined("");
+ out->insert(b.obj());
+ } else {
+ // Non-empty arrays are exploded.
+ while (objIt.more()) {
BSONObjBuilder b;
- b.appendAs(elt, "");
+ b.appendAs(objIt.next(), "");
out->insert(b.obj());
}
}
+}
- /**
- * elements is a non-geo field. Add the values literally, expanding arrays.
- * Used by getS2Keys.
- */
- void getS2LiteralKeys(const BSONElementSet& elements, BSONObjSet* out) {
- if (0 == elements.size()) {
- // Missing fields are indexed as null.
- BSONObjBuilder b;
- b.appendNull("");
- out->insert(b.obj());
- } else {
- for (BSONElementSet::iterator i = elements.begin(); i != elements.end(); ++i) {
- getS2OneLiteralKey(*i, out);
- }
+/**
+ * If 'elt' is an array, expands elt and adds items to 'out'.
+ * Otherwise, adds 'elt' as a single element.
+ * Used by getLiteralKeys.
+ */
+void getS2OneLiteralKey(const BSONElement& elt, BSONObjSet* out) {
+ if (Array == elt.type()) {
+ getS2LiteralKeysArray(elt.Obj(), out);
+ } else {
+ // One thing, not an array, index as-is.
+ BSONObjBuilder b;
+ b.appendAs(elt, "");
+ out->insert(b.obj());
+ }
+}
+
+/**
+ * elements is a non-geo field. Add the values literally, expanding arrays.
+ * Used by getS2Keys.
+ */
+void getS2LiteralKeys(const BSONElementSet& elements, BSONObjSet* out) {
+ if (0 == elements.size()) {
+ // Missing fields are indexed as null.
+ BSONObjBuilder b;
+ b.appendNull("");
+ out->insert(b.obj());
+ } else {
+ for (BSONElementSet::iterator i = elements.begin(); i != elements.end(); ++i) {
+ getS2OneLiteralKey(*i, out);
}
}
+}
-} // namespace
+} // namespace
namespace mongo {
- using std::pair;
- using std::string;
- using std::vector;
-
- // static
- void ExpressionKeysPrivate::get2DKeys(const BSONObj &obj,
- const TwoDIndexingParams& params,
- BSONObjSet* keys,
- std::vector<BSONObj>* locs) {
- BSONElementMSet bSet;
+using std::pair;
+using std::string;
+using std::vector;
- // Get all the nested location fields, but don't return individual elements from
- // the last array, if it exists.
- obj.getFieldsDotted(params.geo.c_str(), bSet, false);
+// static
+void ExpressionKeysPrivate::get2DKeys(const BSONObj& obj,
+ const TwoDIndexingParams& params,
+ BSONObjSet* keys,
+ std::vector<BSONObj>* locs) {
+ BSONElementMSet bSet;
- if (bSet.empty())
- return;
+ // Get all the nested location fields, but don't return individual elements from
+ // the last array, if it exists.
+ obj.getFieldsDotted(params.geo.c_str(), bSet, false);
- for (BSONElementMSet::iterator setI = bSet.begin(); setI != bSet.end(); ++setI) {
- BSONElement geo = *setI;
+ if (bSet.empty())
+ return;
- if (geo.eoo() || !geo.isABSONObj())
- continue;
+ for (BSONElementMSet::iterator setI = bSet.begin(); setI != bSet.end(); ++setI) {
+ BSONElement geo = *setI;
- //
- // Grammar for location lookup:
- // locs ::= [loc,loc,...,loc]|{<k>:loc,<k>:loc,...,<k>:loc}|loc
- // loc ::= { <k1> : #, <k2> : # }|[#, #]|{}
- //
- // Empty locations are ignored, preserving single-location semantics
- //
+ if (geo.eoo() || !geo.isABSONObj())
+ continue;
- BSONObj embed = geo.embeddedObject();
- if (embed.isEmpty())
- continue;
+ //
+ // Grammar for location lookup:
+ // locs ::= [loc,loc,...,loc]|{<k>:loc,<k>:loc,...,<k>:loc}|loc
+ // loc ::= { <k1> : #, <k2> : # }|[#, #]|{}
+ //
+ // Empty locations are ignored, preserving single-location semantics
+ //
- // Differentiate between location arrays and locations
- // by seeing if the first element value is a number
- bool singleElement = embed.firstElement().isNumber();
+ BSONObj embed = geo.embeddedObject();
+ if (embed.isEmpty())
+ continue;
- BSONObjIterator oi(embed);
+ // Differentiate between location arrays and locations
+ // by seeing if the first element value is a number
+ bool singleElement = embed.firstElement().isNumber();
- while (oi.more()) {
- BSONObj locObj;
+ BSONObjIterator oi(embed);
- if (singleElement) {
- locObj = embed;
- } else {
- BSONElement locElement = oi.next();
+ while (oi.more()) {
+ BSONObj locObj;
- uassert(16804, mongoutils::str::stream() <<
- "location object expected, location array not in correct format",
- locElement.isABSONObj());
+ if (singleElement) {
+ locObj = embed;
+ } else {
+ BSONElement locElement = oi.next();
- locObj = locElement.embeddedObject();
- if(locObj.isEmpty())
- continue;
- }
+ uassert(16804,
+ mongoutils::str::stream()
+ << "location object expected, location array not in correct format",
+ locElement.isABSONObj());
- BSONObjBuilder b(64);
+ locObj = locElement.embeddedObject();
+ if (locObj.isEmpty())
+ continue;
+ }
- // Remember the actual location object if needed
- if (locs)
- locs->push_back(locObj);
+ BSONObjBuilder b(64);
- // Stop if we don't need to get anything but location objects
- if (!keys) {
- if (singleElement) break;
- else continue;
- }
+ // Remember the actual location object if needed
+ if (locs)
+ locs->push_back(locObj);
- params.geoHashConverter->hash(locObj, &obj).appendHashMin(&b, "");
-
- // Go through all the other index keys
- for (vector<pair<string, int> >::const_iterator i = params.other.begin();
- i != params.other.end(); ++i) {
- // Get *all* fields for the index key
- BSONElementSet eSet;
- obj.getFieldsDotted(i->first, eSet);
-
- if (eSet.size() == 0)
- b.appendNull("");
- else if (eSet.size() == 1)
- b.appendAs(*(eSet.begin()), "");
- else {
- // If we have more than one key, store as an array of the objects
- BSONArrayBuilder aBuilder;
-
- for (BSONElementSet::iterator ei = eSet.begin(); ei != eSet.end();
- ++ei) {
- aBuilder.append(*ei);
- }
+ // Stop if we don't need to get anything but location objects
+ if (!keys) {
+ if (singleElement)
+ break;
+ else
+ continue;
+ }
- b.append("", aBuilder.arr());
+ params.geoHashConverter->hash(locObj, &obj).appendHashMin(&b, "");
+
+ // Go through all the other index keys
+ for (vector<pair<string, int>>::const_iterator i = params.other.begin();
+ i != params.other.end();
+ ++i) {
+ // Get *all* fields for the index key
+ BSONElementSet eSet;
+ obj.getFieldsDotted(i->first, eSet);
+
+ if (eSet.size() == 0)
+ b.appendNull("");
+ else if (eSet.size() == 1)
+ b.appendAs(*(eSet.begin()), "");
+ else {
+ // If we have more than one key, store as an array of the objects
+ BSONArrayBuilder aBuilder;
+
+ for (BSONElementSet::iterator ei = eSet.begin(); ei != eSet.end(); ++ei) {
+ aBuilder.append(*ei);
}
+
+ b.append("", aBuilder.arr());
}
- keys->insert(b.obj());
- if(singleElement) break;
}
+ keys->insert(b.obj());
+ if (singleElement)
+ break;
}
}
-
- // static
- void ExpressionKeysPrivate::getFTSKeys(const BSONObj &obj,
- const fts::FTSSpec& ftsSpec,
- BSONObjSet* keys) {
- fts::FTSIndexFormat::getKeys(ftsSpec, obj, keys);
+}
+
+// static
+void ExpressionKeysPrivate::getFTSKeys(const BSONObj& obj,
+ const fts::FTSSpec& ftsSpec,
+ BSONObjSet* keys) {
+ fts::FTSIndexFormat::getKeys(ftsSpec, obj, keys);
+}
+
+// static
+void ExpressionKeysPrivate::getHashKeys(const BSONObj& obj,
+ const string& hashedField,
+ HashSeed seed,
+ int hashVersion,
+ bool isSparse,
+ BSONObjSet* keys) {
+ const char* cstr = hashedField.c_str();
+ BSONElement fieldVal = obj.getFieldDottedOrArray(cstr);
+ uassert(16766,
+ "Error: hashed indexes do not currently support array values",
+ fieldVal.type() != Array);
+
+ if (!fieldVal.eoo()) {
+ BSONObj key = BSON("" << makeSingleHashKey(fieldVal, seed, hashVersion));
+ keys->insert(key);
+ } else if (!isSparse) {
+ BSONObj nullObj = BSON("" << BSONNULL);
+ keys->insert(BSON("" << makeSingleHashKey(nullObj.firstElement(), seed, hashVersion)));
}
-
- // static
- void ExpressionKeysPrivate::getHashKeys(const BSONObj& obj,
- const string& hashedField,
- HashSeed seed,
- int hashVersion,
- bool isSparse,
+}
+
+// static
+long long int ExpressionKeysPrivate::makeSingleHashKey(const BSONElement& e, HashSeed seed, int v) {
+ massert(16767, "Only HashVersion 0 has been defined", v == 0);
+ return BSONElementHasher::hash64(e, seed);
+}
+
+// static
+void ExpressionKeysPrivate::getHaystackKeys(const BSONObj& obj,
+ const std::string& geoField,
+ const std::vector<std::string>& otherFields,
+ double bucketSize,
BSONObjSet* keys) {
+ BSONElement loc = obj.getFieldDotted(geoField);
- const char* cstr = hashedField.c_str();
- BSONElement fieldVal = obj.getFieldDottedOrArray(cstr);
- uassert(16766, "Error: hashed indexes do not currently support array values",
- fieldVal.type() != Array );
-
- if (!fieldVal.eoo()) {
- BSONObj key = BSON( "" << makeSingleHashKey(fieldVal, seed, hashVersion));
- keys->insert(key);
- }
- else if (!isSparse) {
- BSONObj nullObj = BSON("" << BSONNULL);
- keys->insert(BSON("" << makeSingleHashKey(nullObj.firstElement(), seed, hashVersion)));
- }
+ if (loc.eoo()) {
+ return;
}
- // static
- long long int ExpressionKeysPrivate::makeSingleHashKey(const BSONElement& e,
- HashSeed seed,
- int v) {
- massert(16767, "Only HashVersion 0 has been defined" , v == 0 );
- return BSONElementHasher::hash64(e, seed);
- }
-
- // static
- void ExpressionKeysPrivate::getHaystackKeys(const BSONObj& obj,
- const std::string& geoField,
- const std::vector<std::string>& otherFields,
- double bucketSize,
- BSONObjSet* keys) {
-
- BSONElement loc = obj.getFieldDotted(geoField);
-
- if (loc.eoo()) { return; }
-
- // NOTE: We explicitly test nFields >= 2 to support legacy users who may have indexed
- // (intentionally or unintentionally) objects/arrays with more than two fields.
- uassert(16775, str::stream() << "cannot extract [lng, lat] array or object from " << obj,
+ // NOTE: We explicitly test nFields >= 2 to support legacy users who may have indexed
+ // (intentionally or unintentionally) objects/arrays with more than two fields.
+ uassert(16775,
+ str::stream() << "cannot extract [lng, lat] array or object from " << obj,
loc.isABSONObj() && loc.Obj().nFields() >= 2);
- string root;
- {
- BSONObjIterator i(loc.Obj());
- BSONElement x = i.next();
- BSONElement y = i.next();
- root = makeHaystackString(hashHaystackElement(x, bucketSize),
- hashHaystackElement(y, bucketSize));
- }
-
- verify(otherFields.size() == 1);
-
- BSONElementSet all;
-
- // This is getFieldsDotted (plural not singular) since the object we're indexing
- // may be an array.
- obj.getFieldsDotted(otherFields[0], all);
-
- if (all.size() == 0) {
- // We're indexing a document that doesn't have the secondary non-geo field present.
- // XXX: do we want to add this even if all.size() > 0? result:empty search terms
- // match everything instead of only things w/empty search terms)
- addKey(root, BSONElement(), keys);
- } else {
- // Ex:If our secondary field is type: "foo" or type: {a:"foo", b:"bar"},
- // all.size()==1. We can query on the complete field.
- // Ex: If our secondary field is type: ["A", "B"] all.size()==2 and all has values
- // "A" and "B". The query looks for any of the fields in the array.
- for (BSONElementSet::iterator i = all.begin(); i != all.end(); ++i) {
- addKey(root, *i, keys);
- }
- }
- }
-
- // static
- int ExpressionKeysPrivate::hashHaystackElement(const BSONElement& e, double bucketSize) {
- uassert(16776, "geo field is not a number", e.isNumber());
- double d = e.numberDouble();
- d += 180;
- d /= bucketSize;
- return static_cast<int>(d);
+ string root;
+ {
+ BSONObjIterator i(loc.Obj());
+ BSONElement x = i.next();
+ BSONElement y = i.next();
+ root = makeHaystackString(hashHaystackElement(x, bucketSize),
+ hashHaystackElement(y, bucketSize));
}
- // static
- std::string ExpressionKeysPrivate::makeHaystackString(int hashedX, int hashedY) {
- mongoutils::str::stream ss;
- ss << hashedX << "_" << hashedY;
- return ss;
+ verify(otherFields.size() == 1);
+
+ BSONElementSet all;
+
+ // This is getFieldsDotted (plural not singular) since the object we're indexing
+ // may be an array.
+ obj.getFieldsDotted(otherFields[0], all);
+
+ if (all.size() == 0) {
+ // We're indexing a document that doesn't have the secondary non-geo field present.
+ // XXX: do we want to add this even if all.size() > 0? result:empty search terms
+ // match everything instead of only things w/empty search terms)
+ addKey(root, BSONElement(), keys);
+ } else {
+ // Ex:If our secondary field is type: "foo" or type: {a:"foo", b:"bar"},
+ // all.size()==1. We can query on the complete field.
+ // Ex: If our secondary field is type: ["A", "B"] all.size()==2 and all has values
+ // "A" and "B". The query looks for any of the fields in the array.
+ for (BSONElementSet::iterator i = all.begin(); i != all.end(); ++i) {
+ addKey(root, *i, keys);
+ }
}
-
- void ExpressionKeysPrivate::getS2Keys(const BSONObj& obj,
- const BSONObj& keyPattern,
- const S2IndexingParams& params,
- BSONObjSet* keys) {
- BSONObjSet keysToAdd;
-
- // Does one of our documents have a geo field?
- bool haveGeoField = false;
-
- // We output keys in the same order as the fields we index.
- BSONObjIterator i(keyPattern);
- while (i.more()) {
- BSONElement e = i.next();
-
- // First, we get the keys that this field adds. Either they're added literally from
- // the value of the field, or they're transformed if the field is geo.
- BSONElementSet fieldElements;
- // false means Don't expand the last array, duh.
- obj.getFieldsDotted(e.fieldName(), fieldElements, false);
-
- BSONObjSet keysForThisField;
- if (IndexNames::GEO_2DSPHERE == e.valuestr()) {
- if (S2_INDEX_VERSION_2 == params.indexVersion) {
- // For V2,
- // geo: null,
- // geo: undefined
- // geo: []
- // should all behave like there is no geo field. So we look for these cases and
- // throw out the field elements if we find them.
- if (1 == fieldElements.size()) {
- BSONElement elt = *fieldElements.begin();
- // Get the :null and :undefined cases.
- if (elt.isNull() || Undefined == elt.type()) {
+}
+
+// static
+int ExpressionKeysPrivate::hashHaystackElement(const BSONElement& e, double bucketSize) {
+ uassert(16776, "geo field is not a number", e.isNumber());
+ double d = e.numberDouble();
+ d += 180;
+ d /= bucketSize;
+ return static_cast<int>(d);
+}
+
+// static
+std::string ExpressionKeysPrivate::makeHaystackString(int hashedX, int hashedY) {
+ mongoutils::str::stream ss;
+ ss << hashedX << "_" << hashedY;
+ return ss;
+}
+
+void ExpressionKeysPrivate::getS2Keys(const BSONObj& obj,
+ const BSONObj& keyPattern,
+ const S2IndexingParams& params,
+ BSONObjSet* keys) {
+ BSONObjSet keysToAdd;
+
+ // Does one of our documents have a geo field?
+ bool haveGeoField = false;
+
+ // We output keys in the same order as the fields we index.
+ BSONObjIterator i(keyPattern);
+ while (i.more()) {
+ BSONElement e = i.next();
+
+ // First, we get the keys that this field adds. Either they're added literally from
+ // the value of the field, or they're transformed if the field is geo.
+ BSONElementSet fieldElements;
+ // false means Don't expand the last array, duh.
+ obj.getFieldsDotted(e.fieldName(), fieldElements, false);
+
+ BSONObjSet keysForThisField;
+ if (IndexNames::GEO_2DSPHERE == e.valuestr()) {
+ if (S2_INDEX_VERSION_2 == params.indexVersion) {
+ // For V2,
+ // geo: null,
+ // geo: undefined
+ // geo: []
+ // should all behave like there is no geo field. So we look for these cases and
+ // throw out the field elements if we find them.
+ if (1 == fieldElements.size()) {
+ BSONElement elt = *fieldElements.begin();
+ // Get the :null and :undefined cases.
+ if (elt.isNull() || Undefined == elt.type()) {
+ fieldElements.clear();
+ } else if (elt.isABSONObj()) {
+ // And this is the :[] case.
+ BSONObj obj = elt.Obj();
+ if (0 == obj.nFields()) {
fieldElements.clear();
}
- else if (elt.isABSONObj()) {
- // And this is the :[] case.
- BSONObj obj = elt.Obj();
- if (0 == obj.nFields()) {
- fieldElements.clear();
- }
- }
- }
-
- // V2 2dsphere indices require that at least one geo field to be present in a
- // document in order to index it.
- if (fieldElements.size() > 0) {
- haveGeoField = true;
}
}
- getS2GeoKeys(obj, fieldElements, params, &keysForThisField);
- } else {
- getS2LiteralKeys(fieldElements, &keysForThisField);
+ // V2 2dsphere indices require that at least one geo field to be present in a
+ // document in order to index it.
+ if (fieldElements.size() > 0) {
+ haveGeoField = true;
+ }
}
- // We expect there to be the missing field element present in the keys if data is
- // missing. So, this should be non-empty.
- verify(!keysForThisField.empty());
+ getS2GeoKeys(obj, fieldElements, params, &keysForThisField);
+ } else {
+ getS2LiteralKeys(fieldElements, &keysForThisField);
+ }
- // We take the Cartesian product of all of the keys. This requires that we have
- // some keys to take the Cartesian product with. If keysToAdd.empty(), we
- // initialize it.
- if (keysToAdd.empty()) {
- keysToAdd = keysForThisField;
- continue;
- }
+ // We expect there to be the missing field element present in the keys if data is
+ // missing. So, this should be non-empty.
+ verify(!keysForThisField.empty());
- BSONObjSet updatedKeysToAdd;
- for (BSONObjSet::const_iterator it = keysToAdd.begin(); it != keysToAdd.end();
- ++it) {
- for (BSONObjSet::const_iterator newIt = keysForThisField.begin();
- newIt!= keysForThisField.end(); ++newIt) {
- BSONObjBuilder b;
- b.appendElements(*it);
- b.append(newIt->firstElement());
- updatedKeysToAdd.insert(b.obj());
- }
- }
- keysToAdd = updatedKeysToAdd;
+ // We take the Cartesian product of all of the keys. This requires that we have
+ // some keys to take the Cartesian product with. If keysToAdd.empty(), we
+ // initialize it.
+ if (keysToAdd.empty()) {
+ keysToAdd = keysForThisField;
+ continue;
}
- // Make sure that if we're V2 there's at least one geo field present in the doc.
- if (S2_INDEX_VERSION_2 == params.indexVersion) {
- if (!haveGeoField) {
- return;
+ BSONObjSet updatedKeysToAdd;
+ for (BSONObjSet::const_iterator it = keysToAdd.begin(); it != keysToAdd.end(); ++it) {
+ for (BSONObjSet::const_iterator newIt = keysForThisField.begin();
+ newIt != keysForThisField.end();
+ ++newIt) {
+ BSONObjBuilder b;
+ b.appendElements(*it);
+ b.append(newIt->firstElement());
+ updatedKeysToAdd.insert(b.obj());
}
}
+ keysToAdd = updatedKeysToAdd;
+ }
- if (keysToAdd.size() > params.maxKeysPerInsert) {
- warning() << "Insert of geo object generated a high number of keys."
- << " num keys: " << keysToAdd.size()
- << " obj inserted: " << obj;
+ // Make sure that if we're V2 there's at least one geo field present in the doc.
+ if (S2_INDEX_VERSION_2 == params.indexVersion) {
+ if (!haveGeoField) {
+ return;
}
+ }
- *keys = keysToAdd;
+ if (keysToAdd.size() > params.maxKeysPerInsert) {
+ warning() << "Insert of geo object generated a high number of keys."
+ << " num keys: " << keysToAdd.size() << " obj inserted: " << obj;
}
+ *keys = keysToAdd;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/index/expression_keys_private.h b/src/mongo/db/index/expression_keys_private.h
index 6e3fb1ea9a0..9aba295b579 100644
--- a/src/mongo/db/index/expression_keys_private.h
+++ b/src/mongo/db/index/expression_keys_private.h
@@ -36,95 +36,94 @@
namespace mongo {
- struct TwoDIndexingParams;
- struct S2IndexingParams;
+struct TwoDIndexingParams;
+struct S2IndexingParams;
- namespace fts {
+namespace fts {
- class FTSSpec;
+class FTSSpec;
- } // namespace fts
+} // namespace fts
+
+/**
+ * Do not use this class or any of its methods directly. The key generation of btree-indexed
+ * expression indices is kept outside of the access method for testing and for upgrade
+ * compatibility checking.
+ */
+class ExpressionKeysPrivate {
+public:
+ //
+ // 2d
+ //
+
+ static void get2DKeys(const BSONObj& obj,
+ const TwoDIndexingParams& params,
+ BSONObjSet* keys,
+ std::vector<BSONObj>* locs);
+
+ //
+ // FTS
+ //
+
+ static void getFTSKeys(const BSONObj& obj, const fts::FTSSpec& ftsSpec, BSONObjSet* keys);
+
+ //
+ // Hash
+ //
+
+ /**
+ * Generates keys for hash access method.
+ */
+ static void getHashKeys(const BSONObj& obj,
+ const std::string& hashedField,
+ HashSeed seed,
+ int hashVersion,
+ bool isSparse,
+ BSONObjSet* keys);
/**
- * Do not use this class or any of its methods directly. The key generation of btree-indexed
- * expression indices is kept outside of the access method for testing and for upgrade
- * compatibility checking.
+ * Hashing function used by both getHashKeys and the cursors we create.
+ * Exposed for testing in dbtests/namespacetests.cpp and
+ * so mongo/db/index_legacy.cpp can use it.
*/
- class ExpressionKeysPrivate {
- public:
-
- //
- // 2d
- //
-
- static void get2DKeys(const BSONObj &obj,
- const TwoDIndexingParams& params,
- BSONObjSet* keys,
- std::vector<BSONObj>* locs);
-
- //
- // FTS
- //
-
- static void getFTSKeys(const BSONObj &obj, const fts::FTSSpec& ftsSpec, BSONObjSet* keys);
-
- //
- // Hash
- //
-
- /**
- * Generates keys for hash access method.
- */
- static void getHashKeys(const BSONObj& obj,
- const std::string& hashedField,
- HashSeed seed,
- int hashVersion,
- bool isSparse,
+ static long long int makeSingleHashKey(const BSONElement& e, HashSeed seed, int v);
+
+ //
+ // Haystack
+ //
+
+ /**
+ * Generates keys for haystack access method.
+ */
+ static void getHaystackKeys(const BSONObj& obj,
+ const std::string& geoField,
+ const std::vector<std::string>& otherFields,
+ double bucketSize,
BSONObjSet* keys);
- /**
- * Hashing function used by both getHashKeys and the cursors we create.
- * Exposed for testing in dbtests/namespacetests.cpp and
- * so mongo/db/index_legacy.cpp can use it.
- */
- static long long int makeSingleHashKey(const BSONElement& e, HashSeed seed, int v);
-
- //
- // Haystack
- //
-
- /**
- * Generates keys for haystack access method.
- */
- static void getHaystackKeys(const BSONObj& obj,
- const std::string& geoField,
- const std::vector<std::string>& otherFields,
- double bucketSize,
- BSONObjSet* keys);
-
- /**
- * Returns a hash of a BSON element.
- * Used by getHaystackKeys and HaystackAccessMethod::searchCommand.
- */
- static int hashHaystackElement(const BSONElement& e, double bucketSize);
-
- /**
- * Joins two strings using underscore as separator.
- * Used by getHaystackKeys and HaystackAccessMethod::searchCommand.
- */
- static std::string makeHaystackString(int hashedX, int hashedY);
-
- //
- // S2
- //
-
- /**
- * Generates keys for S2 access method.
- */
- static void getS2Keys(const BSONObj& obj,
- const BSONObj& keyPattern,
- const S2IndexingParams& params,
- BSONObjSet* keys);
- };
+ /**
+ * Returns a hash of a BSON element.
+ * Used by getHaystackKeys and HaystackAccessMethod::searchCommand.
+ */
+ static int hashHaystackElement(const BSONElement& e, double bucketSize);
+
+ /**
+ * Joins two strings using underscore as separator.
+ * Used by getHaystackKeys and HaystackAccessMethod::searchCommand.
+ */
+ static std::string makeHaystackString(int hashedX, int hashedY);
+
+ //
+ // S2
+ //
+
+ /**
+ * Generates keys for S2 access method.
+ */
+ static void getS2Keys(const BSONObj& obj,
+ const BSONObj& keyPattern,
+ const S2IndexingParams& params,
+ BSONObjSet* keys);
+};
} // namespace mongo
diff --git a/src/mongo/db/index/expression_params.h b/src/mongo/db/index/expression_params.h
index e7b190f5eb9..a7613b027d3 100644
--- a/src/mongo/db/index/expression_params.h
+++ b/src/mongo/db/index/expression_params.h
@@ -34,150 +34,151 @@
namespace mongo {
- class ExpressionParams {
- public:
- static void parseTwoDParams(const BSONObj& infoObj, TwoDIndexingParams* out) {
- BSONObjIterator i(infoObj.getObjectField("key"));
-
- while (i.more()) {
- BSONElement e = i.next();
- if (e.type() == String && IndexNames::GEO_2D == e.valuestr()) {
- uassert(16800, "can't have 2 geo fields", out->geo.size() == 0);
- uassert(16801, "2d has to be first in index", out->other.size() == 0);
- out->geo = e.fieldName();
- } else {
- int order = 1;
- if (e.isNumber()) {
- order = static_cast<int>(e.Number());
- }
- out->other.push_back(std::make_pair(e.fieldName(), order));
+class ExpressionParams {
+public:
+ static void parseTwoDParams(const BSONObj& infoObj, TwoDIndexingParams* out) {
+ BSONObjIterator i(infoObj.getObjectField("key"));
+
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() == String && IndexNames::GEO_2D == e.valuestr()) {
+ uassert(16800, "can't have 2 geo fields", out->geo.size() == 0);
+ uassert(16801, "2d has to be first in index", out->other.size() == 0);
+ out->geo = e.fieldName();
+ } else {
+ int order = 1;
+ if (e.isNumber()) {
+ order = static_cast<int>(e.Number());
}
+ out->other.push_back(std::make_pair(e.fieldName(), order));
}
-
- uassert(16802, "no geo field specified", out->geo.size());
-
- GeoHashConverter::Parameters hashParams;
- Status paramStatus = GeoHashConverter::parseParameters(infoObj, &hashParams);
- uassertStatusOK(paramStatus);
-
- out->geoHashConverter.reset(new GeoHashConverter(hashParams));
}
- static void parseHashParams(const BSONObj& infoObj,
- HashSeed* seedOut,
- int* versionOut,
- std::string* fieldOut) {
-
- // Default _seed to DEFAULT_HASH_SEED if "seed" is not included in the index spec
- // or if the value of "seed" is not a number
-
- // *** WARNING ***
- // Choosing non-default seeds will invalidate hashed sharding
- // Changing the seed default will break existing indexes and sharded collections
- if (infoObj["seed"].eoo()) {
- *seedOut = BSONElementHasher::DEFAULT_HASH_SEED;
- }
- else {
- *seedOut = infoObj["seed"].numberInt();
- }
-
- // In case we have hashed indexes based on other hash functions in the future, we store
- // a hashVersion number. If hashVersion changes, "makeSingleHashKey" will need to change
- // accordingly. Defaults to 0 if "hashVersion" is not included in the index spec or if
- // the value of "hashversion" is not a number
- *versionOut = infoObj["hashVersion"].numberInt();
-
- // Get the hashfield name
- BSONElement firstElt = infoObj.getObjectField("key").firstElement();
- massert(16765, "error: no hashed index field",
- firstElt.str().compare(IndexNames::HASHED) == 0);
- *fieldOut = firstElt.fieldName();
+ uassert(16802, "no geo field specified", out->geo.size());
+
+ GeoHashConverter::Parameters hashParams;
+ Status paramStatus = GeoHashConverter::parseParameters(infoObj, &hashParams);
+ uassertStatusOK(paramStatus);
+
+ out->geoHashConverter.reset(new GeoHashConverter(hashParams));
+ }
+
+ static void parseHashParams(const BSONObj& infoObj,
+ HashSeed* seedOut,
+ int* versionOut,
+ std::string* fieldOut) {
+ // Default _seed to DEFAULT_HASH_SEED if "seed" is not included in the index spec
+ // or if the value of "seed" is not a number
+
+ // *** WARNING ***
+ // Choosing non-default seeds will invalidate hashed sharding
+ // Changing the seed default will break existing indexes and sharded collections
+ if (infoObj["seed"].eoo()) {
+ *seedOut = BSONElementHasher::DEFAULT_HASH_SEED;
+ } else {
+ *seedOut = infoObj["seed"].numberInt();
}
- static void parseHaystackParams(const BSONObj& infoObj,
- std::string* geoFieldOut,
- std::vector<std::string>* otherFieldsOut,
- double* bucketSizeOut) {
-
- BSONElement e = infoObj["bucketSize"];
- uassert(16777, "need bucketSize", e.isNumber());
- *bucketSizeOut = e.numberDouble();
- uassert(16769, "bucketSize cannot be zero", *bucketSizeOut != 0.0);
-
- // Example:
- // db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 })
- BSONObjIterator i(infoObj.getObjectField("key"));
- while (i.more()) {
- BSONElement e = i.next();
- if (e.type() == String && IndexNames::GEO_HAYSTACK == e.valuestr()) {
- uassert(16770, "can't have more than one geo field", geoFieldOut->size() == 0);
- uassert(16771, "the geo field has to be first in index",
- otherFieldsOut->size() == 0);
- *geoFieldOut = e.fieldName();
- } else {
- uassert(16772, "geoSearch can only have 1 non-geo field for now",
- otherFieldsOut->size() == 0);
- otherFieldsOut->push_back(e.fieldName());
- }
+ // In case we have hashed indexes based on other hash functions in the future, we store
+ // a hashVersion number. If hashVersion changes, "makeSingleHashKey" will need to change
+ // accordingly. Defaults to 0 if "hashVersion" is not included in the index spec or if
+ // the value of "hashversion" is not a number
+ *versionOut = infoObj["hashVersion"].numberInt();
+
+ // Get the hashfield name
+ BSONElement firstElt = infoObj.getObjectField("key").firstElement();
+ massert(
+ 16765, "error: no hashed index field", firstElt.str().compare(IndexNames::HASHED) == 0);
+ *fieldOut = firstElt.fieldName();
+ }
+
+ static void parseHaystackParams(const BSONObj& infoObj,
+ std::string* geoFieldOut,
+ std::vector<std::string>* otherFieldsOut,
+ double* bucketSizeOut) {
+ BSONElement e = infoObj["bucketSize"];
+ uassert(16777, "need bucketSize", e.isNumber());
+ *bucketSizeOut = e.numberDouble();
+ uassert(16769, "bucketSize cannot be zero", *bucketSizeOut != 0.0);
+
+ // Example:
+ // db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 })
+ BSONObjIterator i(infoObj.getObjectField("key"));
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() == String && IndexNames::GEO_HAYSTACK == e.valuestr()) {
+ uassert(16770, "can't have more than one geo field", geoFieldOut->size() == 0);
+ uassert(
+ 16771, "the geo field has to be first in index", otherFieldsOut->size() == 0);
+ *geoFieldOut = e.fieldName();
+ } else {
+ uassert(16772,
+ "geoSearch can only have 1 non-geo field for now",
+ otherFieldsOut->size() == 0);
+ otherFieldsOut->push_back(e.fieldName());
}
}
-
- static void parse2dsphereParams(const BSONObj& infoObj,
- S2IndexingParams* out) {
- // Set up basic params.
- out->maxKeysPerInsert = 200;
-
- // This is advisory.
- out->maxCellsInCovering = 50;
-
- // Near distances are specified in meters...sometimes.
- out->radius = kRadiusOfEarthInMeters;
-
- // These are not advisory.
- out->finestIndexedLevel = configValueWithDefaultInt(infoObj,
- "finestIndexedLevel",
- S2::kAvgEdge.GetClosestLevel(500.0 / out->radius));
-
- out->coarsestIndexedLevel = configValueWithDefaultInt(infoObj,
- "coarsestIndexedLevel",
- S2::kAvgEdge.GetClosestLevel(100 * 1000.0 / out->radius));
-
- static const std::string kIndexVersionFieldName("2dsphereIndexVersion");
-
- // Determine which version of this index we're using. If none was set in the descriptor,
- // assume S2_INDEX_VERSION_1 (alas, the first version predates the existence of the version
- // field).
- out->indexVersion = static_cast<S2IndexVersion>(configValueWithDefaultInt(infoObj,
- kIndexVersionFieldName,
- S2_INDEX_VERSION_1));
-
- uassert(16747, "coarsestIndexedLevel must be >= 0", out->coarsestIndexedLevel >= 0);
- uassert(16748, "finestIndexedLevel must be <= 30", out->finestIndexedLevel <= 30);
- uassert(16749, "finestIndexedLevel must be >= coarsestIndexedLevel",
- out->finestIndexedLevel >= out->coarsestIndexedLevel);
-
- massert(17395,
- mongoutils::str::stream() << "unsupported geo index version { " << kIndexVersionFieldName
- << " : " << out->indexVersion << " }, only support versions: ["
- << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << "]",
- out->indexVersion == S2_INDEX_VERSION_2 || out->indexVersion == S2_INDEX_VERSION_1);
- }
-
- private:
- static double configValueWithDefaultDouble(const BSONObj& infoObj,
- const std::string& name,
- double def) {
- BSONElement e = infoObj[name];
- if (e.isNumber()) { return e.numberDouble(); }
- return def;
+ }
+
+ static void parse2dsphereParams(const BSONObj& infoObj, S2IndexingParams* out) {
+ // Set up basic params.
+ out->maxKeysPerInsert = 200;
+
+ // This is advisory.
+ out->maxCellsInCovering = 50;
+
+ // Near distances are specified in meters...sometimes.
+ out->radius = kRadiusOfEarthInMeters;
+
+ // These are not advisory.
+ out->finestIndexedLevel = configValueWithDefaultInt(
+ infoObj, "finestIndexedLevel", S2::kAvgEdge.GetClosestLevel(500.0 / out->radius));
+
+ out->coarsestIndexedLevel =
+ configValueWithDefaultInt(infoObj,
+ "coarsestIndexedLevel",
+ S2::kAvgEdge.GetClosestLevel(100 * 1000.0 / out->radius));
+
+ static const std::string kIndexVersionFieldName("2dsphereIndexVersion");
+
+ // Determine which version of this index we're using. If none was set in the descriptor,
+ // assume S2_INDEX_VERSION_1 (alas, the first version predates the existence of the version
+ // field).
+ out->indexVersion = static_cast<S2IndexVersion>(
+ configValueWithDefaultInt(infoObj, kIndexVersionFieldName, S2_INDEX_VERSION_1));
+
+ uassert(16747, "coarsestIndexedLevel must be >= 0", out->coarsestIndexedLevel >= 0);
+ uassert(16748, "finestIndexedLevel must be <= 30", out->finestIndexedLevel <= 30);
+ uassert(16749,
+ "finestIndexedLevel must be >= coarsestIndexedLevel",
+ out->finestIndexedLevel >= out->coarsestIndexedLevel);
+
+ massert(17395,
+ mongoutils::str::stream() << "unsupported geo index version { "
+ << kIndexVersionFieldName << " : " << out->indexVersion
+ << " }, only support versions: [" << S2_INDEX_VERSION_1
+ << "," << S2_INDEX_VERSION_2 << "]",
+ out->indexVersion == S2_INDEX_VERSION_2 || out->indexVersion == S2_INDEX_VERSION_1);
+ }
+
+private:
+ static double configValueWithDefaultDouble(const BSONObj& infoObj,
+ const std::string& name,
+ double def) {
+ BSONElement e = infoObj[name];
+ if (e.isNumber()) {
+ return e.numberDouble();
}
+ return def;
+ }
- static int configValueWithDefaultInt(const BSONObj& infoObj, const std::string& name, int def) {
- BSONElement e = infoObj[name];
- if (e.isNumber()) { return e.numberInt(); }
- return def;
+ static int configValueWithDefaultInt(const BSONObj& infoObj, const std::string& name, int def) {
+ BSONElement e = infoObj[name];
+ if (e.isNumber()) {
+ return e.numberInt();
}
-
- };
+ return def;
+ }
+};
} // namespace mongo
diff --git a/src/mongo/db/index/external_key_generator.cpp b/src/mongo/db/index/external_key_generator.cpp
index 4173880d00b..9f93bd6f439 100644
--- a/src/mongo/db/index/external_key_generator.cpp
+++ b/src/mongo/db/index/external_key_generator.cpp
@@ -42,75 +42,68 @@
namespace mongo {
namespace {
- void getKeysForUpgradeChecking(const BSONObj& infoObj,
- const BSONObj& doc,
- BSONObjSet* keys) {
-
- BSONObj keyPattern = infoObj.getObjectField("key");
-
- string type = IndexNames::findPluginName(keyPattern);
-
- if (IndexNames::GEO_2D == type) {
- TwoDIndexingParams params;
- ExpressionParams::parseTwoDParams(infoObj, &params);
- ExpressionKeysPrivate::get2DKeys(doc, params, keys, NULL);
- }
- else if (IndexNames::GEO_HAYSTACK == type) {
- string geoField;
- vector<string> otherFields;
- double bucketSize;
- ExpressionParams::parseHaystackParams(infoObj, &geoField, &otherFields, &bucketSize);
- ExpressionKeysPrivate::getHaystackKeys(doc, geoField, otherFields, bucketSize, keys);
- }
- else if (IndexNames::GEO_2DSPHERE == type) {
- S2IndexingParams params;
- ExpressionParams::parse2dsphereParams(infoObj, &params);
- ExpressionKeysPrivate::getS2Keys(doc, keyPattern, params, keys);
+void getKeysForUpgradeChecking(const BSONObj& infoObj, const BSONObj& doc, BSONObjSet* keys) {
+ BSONObj keyPattern = infoObj.getObjectField("key");
+
+ string type = IndexNames::findPluginName(keyPattern);
+
+ if (IndexNames::GEO_2D == type) {
+ TwoDIndexingParams params;
+ ExpressionParams::parseTwoDParams(infoObj, &params);
+ ExpressionKeysPrivate::get2DKeys(doc, params, keys, NULL);
+ } else if (IndexNames::GEO_HAYSTACK == type) {
+ string geoField;
+ vector<string> otherFields;
+ double bucketSize;
+ ExpressionParams::parseHaystackParams(infoObj, &geoField, &otherFields, &bucketSize);
+ ExpressionKeysPrivate::getHaystackKeys(doc, geoField, otherFields, bucketSize, keys);
+ } else if (IndexNames::GEO_2DSPHERE == type) {
+ S2IndexingParams params;
+ ExpressionParams::parse2dsphereParams(infoObj, &params);
+ ExpressionKeysPrivate::getS2Keys(doc, keyPattern, params, keys);
+ } else if (IndexNames::TEXT == type) {
+ fts::FTSSpec spec(infoObj);
+ ExpressionKeysPrivate::getFTSKeys(doc, spec, keys);
+ } else if (IndexNames::HASHED == type) {
+ HashSeed seed;
+ int version;
+ string field;
+ ExpressionParams::parseHashParams(infoObj, &seed, &version, &field);
+ ExpressionKeysPrivate::getHashKeys(
+ doc, field, seed, version, infoObj["sparse"].trueValue(), keys);
+ } else {
+ invariant(IndexNames::BTREE == type);
+
+ std::vector<const char*> fieldNames;
+ std::vector<BSONElement> fixed;
+ BSONObjIterator keyIt(keyPattern);
+ while (keyIt.more()) {
+ BSONElement patternElt = keyIt.next();
+ fieldNames.push_back(patternElt.fieldName());
+ fixed.push_back(BSONElement());
}
- else if (IndexNames::TEXT == type) {
- fts::FTSSpec spec(infoObj);
- ExpressionKeysPrivate::getFTSKeys(doc, spec, keys);
- }
- else if (IndexNames::HASHED == type) {
- HashSeed seed;
- int version;
- string field;
- ExpressionParams::parseHashParams(infoObj, &seed, &version, &field);
- ExpressionKeysPrivate::getHashKeys(doc, field, seed, version, infoObj["sparse"].trueValue(), keys);
- }
- else {
- invariant(IndexNames::BTREE == type);
-
- std::vector<const char *> fieldNames;
- std::vector<BSONElement> fixed;
- BSONObjIterator keyIt(keyPattern);
- while (keyIt.more()) {
- BSONElement patternElt = keyIt.next();
- fieldNames.push_back(patternElt.fieldName());
- fixed.push_back(BSONElement());
- }
- // XXX: do we care about version
- BtreeKeyGeneratorV1 keyGen(fieldNames, fixed, infoObj["sparse"].trueValue());
+ // XXX: do we care about version
+ BtreeKeyGeneratorV1 keyGen(fieldNames, fixed, infoObj["sparse"].trueValue());
- keyGen.getKeys(doc, keys);
- }
+ keyGen.getKeys(doc, keys);
}
-
- // cloned from key.cpp to build the below set
- const int binDataCodeLengths[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 32};
- const std::set<int> acceptableBinDataLengths(
- binDataCodeLengths,
- binDataCodeLengths + (sizeof(binDataCodeLengths) / sizeof(int)));
-
- // modified version of the KeyV1Owned constructor that returns the would-be-key's datasize()
- int keyV1Size(const BSONObj& obj) {
- BSONObj::iterator i(obj);
- int size = 0;
- const int traditionalSize = obj.objsize() + 1;
- while (i.more()) {
- BSONElement e = i.next();
- switch (e.type()) {
+}
+
+// cloned from key.cpp to build the below set
+const int binDataCodeLengths[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 32};
+const std::set<int> acceptableBinDataLengths(binDataCodeLengths,
+ binDataCodeLengths +
+ (sizeof(binDataCodeLengths) / sizeof(int)));
+
+// modified version of the KeyV1Owned constructor that returns the would-be-key's datasize()
+int keyV1Size(const BSONObj& obj) {
+ BSONObj::iterator i(obj);
+ int size = 0;
+ const int traditionalSize = obj.objsize() + 1;
+ while (i.more()) {
+ BSONElement e = i.next();
+ switch (e.type()) {
case MinKey:
case jstNULL:
case MaxKey:
@@ -121,86 +114,82 @@ namespace {
size += 1;
size += OID::kOIDSize;
break;
- case BinData:
- {
- int t = e.binDataType();
- // 0-7 and 0x80 to 0x87 are supported by Key
- if( (t & 0x78) == 0 && t != ByteArrayDeprecated ) {
- int len;
- e.binData(len);
- if (acceptableBinDataLengths.count(len)) {
- size += 1;
- size += 1;
- size += len;
- break;
- }
+ case BinData: {
+ int t = e.binDataType();
+ // 0-7 and 0x80 to 0x87 are supported by Key
+ if ((t & 0x78) == 0 && t != ByteArrayDeprecated) {
+ int len;
+ e.binData(len);
+ if (acceptableBinDataLengths.count(len)) {
+ size += 1;
+ size += 1;
+ size += len;
+ break;
}
- return traditionalSize;
}
+ return traditionalSize;
+ }
case Date:
size += 1;
size += sizeof(Date_t);
break;
- case String:
- {
- size += 1;
- // note we do not store the terminating null, to save space.
- unsigned x = (unsigned) e.valuestrsize() - 1;
- if (x > 255) {
- return traditionalSize;
- }
- size += 1;
- size += x;
- break;
+ case String: {
+ size += 1;
+ // note we do not store the terminating null, to save space.
+ unsigned x = (unsigned)e.valuestrsize() - 1;
+ if (x > 255) {
+ return traditionalSize;
}
+ size += 1;
+ size += x;
+ break;
+ }
case NumberInt:
size += 1;
size += sizeof(double);
break;
- case NumberLong:
- {
- long long n = e._numberLong();
- long long m = 2LL << 52;
- if( n >= m || n <= -m ) {
- // can't represent exactly as a double
- return traditionalSize;
- }
- size += 1;
- size += sizeof(double);
- break;
+ case NumberLong: {
+ long long n = e._numberLong();
+ long long m = 2LL << 52;
+ if (n >= m || n <= -m) {
+ // can't represent exactly as a double
+ return traditionalSize;
}
- case NumberDouble:
- {
- double d = e._numberDouble();
- if (std::isnan(d)) {
- return traditionalSize;
- }
- size += 1;
- size += sizeof(double);
- break;
+ size += 1;
+ size += sizeof(double);
+ break;
+ }
+ case NumberDouble: {
+ double d = e._numberDouble();
+ if (std::isnan(d)) {
+ return traditionalSize;
}
+ size += 1;
+ size += sizeof(double);
+ break;
+ }
default:
// if other types involved, store as traditional BSON
return traditionalSize;
- }
}
- return size;
}
+ return size;
+}
-} // namespace
-
- bool isAnyIndexKeyTooLarge(const BSONObj& index, const BSONObj& doc) {
- BSONObjSet keys;
- getKeysForUpgradeChecking(index, doc, &keys);
+} // namespace
- int largestKeySize = 0;
+bool isAnyIndexKeyTooLarge(const BSONObj& index, const BSONObj& doc) {
+ BSONObjSet keys;
+ getKeysForUpgradeChecking(index, doc, &keys);
- for (BSONObjSet::const_iterator it = keys.begin(); it != keys.end(); ++it) {
- largestKeySize = std::max(largestKeySize, keyV1Size(*it));
- }
+ int largestKeySize = 0;
- // BtreeData_V1::KeyMax is 1024
- return largestKeySize > 1024;
+ for (BSONObjSet::const_iterator it = keys.begin(); it != keys.end(); ++it) {
+ largestKeySize = std::max(largestKeySize, keyV1Size(*it));
}
+ // BtreeData_V1::KeyMax is 1024
+ return largestKeySize > 1024;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/index/external_key_generator.h b/src/mongo/db/index/external_key_generator.h
index a340fd56e0e..9dad28f7413 100644
--- a/src/mongo/db/index/external_key_generator.h
+++ b/src/mongo/db/index/external_key_generator.h
@@ -40,7 +40,7 @@
*/
namespace mongo {
- // Returns whether or not the largest key the index will generate for the document is too large.
- bool isAnyIndexKeyTooLarge(const BSONObj& index, const BSONObj& doc);
+// Returns whether or not the largest key the index will generate for the document is too large.
+bool isAnyIndexKeyTooLarge(const BSONObj& index, const BSONObj& doc);
} // namespace mongo
diff --git a/src/mongo/db/index/fts_access_method.cpp b/src/mongo/db/index/fts_access_method.cpp
index 52bc3424518..9676fcbec45 100644
--- a/src/mongo/db/index/fts_access_method.cpp
+++ b/src/mongo/db/index/fts_access_method.cpp
@@ -31,11 +31,11 @@
namespace mongo {
- FTSAccessMethod::FTSAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree )
- : IndexAccessMethod(btreeState, btree), _ftsSpec(btreeState->descriptor()->infoObj()) { }
+FTSAccessMethod::FTSAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
+ : IndexAccessMethod(btreeState, btree), _ftsSpec(btreeState->descriptor()->infoObj()) {}
- void FTSAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
- ExpressionKeysPrivate::getFTSKeys(obj, _ftsSpec, keys);
- }
+void FTSAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ ExpressionKeysPrivate::getFTSKeys(obj, _ftsSpec, keys);
+}
} // namespace mongo
diff --git a/src/mongo/db/index/fts_access_method.h b/src/mongo/db/index/fts_access_method.h
index 9cf3e825c89..794d1efe360 100644
--- a/src/mongo/db/index/fts_access_method.h
+++ b/src/mongo/db/index/fts_access_method.h
@@ -36,17 +36,19 @@
namespace mongo {
- class FTSAccessMethod : public IndexAccessMethod {
- public:
- FTSAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+class FTSAccessMethod : public IndexAccessMethod {
+public:
+ FTSAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
- const fts::FTSSpec& getSpec() const { return _ftsSpec; }
+ const fts::FTSSpec& getSpec() const {
+ return _ftsSpec;
+ }
- private:
- // Implemented:
- virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
+private:
+ // Implemented:
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
- fts::FTSSpec _ftsSpec;
- };
+ fts::FTSSpec _ftsSpec;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/index/hash_access_method.cpp b/src/mongo/db/index/hash_access_method.cpp
index 47504706590..8262c9019ab 100644
--- a/src/mongo/db/index/hash_access_method.cpp
+++ b/src/mongo/db/index/hash_access_method.cpp
@@ -33,26 +33,25 @@
namespace mongo {
- HashAccessMethod::HashAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
- : IndexAccessMethod(btreeState, btree) {
-
- const IndexDescriptor* descriptor = btreeState->descriptor();
-
- // We can change these if the single-field limitation is lifted later.
- uassert(16763, "Currently only single field hashed index supported.",
- 1 == descriptor->getNumFields());
-
- uassert(16764, "Currently hashed indexes cannot guarantee uniqueness. Use a regular index.",
- !descriptor->unique());
-
- ExpressionParams::parseHashParams(descriptor->infoObj(),
- &_seed,
- &_hashVersion,
- &_hashedField);
- }
-
- void HashAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
- ExpressionKeysPrivate::getHashKeys(obj, _hashedField, _seed, _hashVersion, _descriptor->isSparse(), keys);
- }
+HashAccessMethod::HashAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
+ : IndexAccessMethod(btreeState, btree) {
+ const IndexDescriptor* descriptor = btreeState->descriptor();
+
+ // We can change these if the single-field limitation is lifted later.
+ uassert(16763,
+ "Currently only single field hashed index supported.",
+ 1 == descriptor->getNumFields());
+
+ uassert(16764,
+ "Currently hashed indexes cannot guarantee uniqueness. Use a regular index.",
+ !descriptor->unique());
+
+ ExpressionParams::parseHashParams(descriptor->infoObj(), &_seed, &_hashVersion, &_hashedField);
+}
+
+void HashAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ ExpressionKeysPrivate::getHashKeys(
+ obj, _hashedField, _seed, _hashVersion, _descriptor->isSparse(), keys);
+}
} // namespace mongo
diff --git a/src/mongo/db/index/hash_access_method.h b/src/mongo/db/index/hash_access_method.h
index 3e986cbc1b0..ea3f36bb647 100644
--- a/src/mongo/db/index/hash_access_method.h
+++ b/src/mongo/db/index/hash_access_method.h
@@ -38,26 +38,26 @@
namespace mongo {
- /**
- * This is the access method for "hashed" indices.
- */
- class HashAccessMethod : public IndexAccessMethod {
- public:
- HashAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+/**
+ * This is the access method for "hashed" indices.
+ */
+class HashAccessMethod : public IndexAccessMethod {
+public:
+ HashAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
- private:
- virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
+private:
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
- // Only one of our fields is hashed. This is the field name for it.
- std::string _hashedField;
+ // Only one of our fields is hashed. This is the field name for it.
+ std::string _hashedField;
- // _seed defaults to zero.
- HashSeed _seed;
+ // _seed defaults to zero.
+ HashSeed _seed;
- // _hashVersion defaults to zero.
- int _hashVersion;
+ // _hashVersion defaults to zero.
+ int _hashVersion;
- BSONObj _missingKey;
- };
+ BSONObj _missingKey;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp
index 263568af37a..66f1b09e813 100644
--- a/src/mongo/db/index/haystack_access_method.cpp
+++ b/src/mongo/db/index/haystack_access_method.cpp
@@ -44,94 +44,97 @@
namespace mongo {
- using std::unique_ptr;
-
- HaystackAccessMethod::HaystackAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
- : IndexAccessMethod(btreeState, btree) {
+using std::unique_ptr;
+
+HaystackAccessMethod::HaystackAccessMethod(IndexCatalogEntry* btreeState,
+ SortedDataInterface* btree)
+ : IndexAccessMethod(btreeState, btree) {
+ const IndexDescriptor* descriptor = btreeState->descriptor();
+
+ ExpressionParams::parseHaystackParams(
+ descriptor->infoObj(), &_geoField, &_otherFields, &_bucketSize);
+
+ uassert(16773, "no geo field specified", _geoField.size());
+ uassert(16774, "no non-geo fields specified", _otherFields.size());
+}
+
+void HaystackAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ ExpressionKeysPrivate::getHaystackKeys(obj, _geoField, _otherFields, _bucketSize, keys);
+}
+
+void HaystackAccessMethod::searchCommand(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& nearObj,
+ double maxDistance,
+ const BSONObj& search,
+ BSONObjBuilder* result,
+ unsigned limit) {
+ Timer t;
+
+ LOG(1) << "SEARCH near:" << nearObj << " maxDistance:" << maxDistance << " search: " << search
+ << endl;
+ int x, y;
+ {
+ BSONObjIterator i(nearObj);
+ x = ExpressionKeysPrivate::hashHaystackElement(i.next(), _bucketSize);
+ y = ExpressionKeysPrivate::hashHaystackElement(i.next(), _bucketSize);
+ }
+ int scale = static_cast<int>(ceil(maxDistance / _bucketSize));
- const IndexDescriptor* descriptor = btreeState->descriptor();
+ GeoHaystackSearchHopper hopper(txn, nearObj, maxDistance, limit, _geoField, collection);
- ExpressionParams::parseHaystackParams(descriptor->infoObj(),
- &_geoField,
- &_otherFields,
- &_bucketSize);
+ long long btreeMatches = 0;
- uassert(16773, "no geo field specified", _geoField.size());
- uassert(16774, "no non-geo fields specified", _otherFields.size());
- }
-
- void HaystackAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
- ExpressionKeysPrivate::getHaystackKeys(obj, _geoField, _otherFields, _bucketSize, keys);
- }
+ for (int a = -scale; a <= scale && !hopper.limitReached(); ++a) {
+ for (int b = -scale; b <= scale && !hopper.limitReached(); ++b) {
+ BSONObjBuilder bb;
+ bb.append("", ExpressionKeysPrivate::makeHaystackString(x + a, y + b));
- void HaystackAccessMethod::searchCommand(OperationContext* txn, Collection* collection,
- const BSONObj& nearObj, double maxDistance,
- const BSONObj& search, BSONObjBuilder* result,
- unsigned limit) {
- Timer t;
-
- LOG(1) << "SEARCH near:" << nearObj << " maxDistance:" << maxDistance
- << " search: " << search << endl;
- int x, y;
- {
- BSONObjIterator i(nearObj);
- x = ExpressionKeysPrivate::hashHaystackElement(i.next(), _bucketSize);
- y = ExpressionKeysPrivate::hashHaystackElement(i.next(), _bucketSize);
- }
- int scale = static_cast<int>(ceil(maxDistance / _bucketSize));
+ for (unsigned i = 0; i < _otherFields.size(); i++) {
+ // See if the non-geo field we're indexing on is in the provided search term.
+ BSONElement e = search.getFieldDotted(_otherFields[i]);
+ if (e.eoo())
+ bb.appendNull("");
+ else
+ bb.appendAs(e, "");
+ }
- GeoHaystackSearchHopper hopper(txn, nearObj, maxDistance, limit, _geoField, collection);
+ BSONObj key = bb.obj();
- long long btreeMatches = 0;
+ unordered_set<RecordId, RecordId::Hasher> thisPass;
- for (int a = -scale; a <= scale && !hopper.limitReached(); ++a) {
- for (int b = -scale; b <= scale && !hopper.limitReached(); ++b) {
- BSONObjBuilder bb;
- bb.append("", ExpressionKeysPrivate::makeHaystackString(x + a, y + b));
- for (unsigned i = 0; i < _otherFields.size(); i++) {
- // See if the non-geo field we're indexing on is in the provided search term.
- BSONElement e = search.getFieldDotted(_otherFields[i]);
- if (e.eoo())
- bb.appendNull("");
- else
- bb.appendAs(e, "");
+ unique_ptr<PlanExecutor> exec(
+ InternalPlanner::indexScan(txn, collection, _descriptor, key, key, true));
+ PlanExecutor::ExecState state;
+ RecordId loc;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ if (hopper.limitReached()) {
+ break;
}
-
- BSONObj key = bb.obj();
-
- unordered_set<RecordId, RecordId::Hasher> thisPass;
-
-
- unique_ptr<PlanExecutor> exec(InternalPlanner::indexScan(txn, collection,
- _descriptor, key, key, true));
- PlanExecutor::ExecState state;
- RecordId loc;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
- if (hopper.limitReached()) { break; }
- pair<unordered_set<RecordId, RecordId::Hasher>::iterator, bool> p
- = thisPass.insert(loc);
- // If a new element was inserted (haven't seen the RecordId before), p.second
- // is true.
- if (p.second) {
- hopper.consider(loc);
- btreeMatches++;
- }
+ pair<unordered_set<RecordId, RecordId::Hasher>::iterator, bool> p =
+ thisPass.insert(loc);
+ // If a new element was inserted (haven't seen the RecordId before), p.second
+ // is true.
+ if (p.second) {
+ hopper.consider(loc);
+ btreeMatches++;
}
}
}
+ }
- BSONArrayBuilder arr(result->subarrayStart("results"));
- int num = hopper.appendResultsTo(&arr);
- arr.done();
+ BSONArrayBuilder arr(result->subarrayStart("results"));
+ int num = hopper.appendResultsTo(&arr);
+ arr.done();
- {
- BSONObjBuilder b(result->subobjStart("stats"));
- b.append("time", t.millis());
- b.appendNumber("btreeMatches", btreeMatches);
- b.append("n", num);
- b.done();
- }
+ {
+ BSONObjBuilder b(result->subobjStart("stats"));
+ b.append("time", t.millis());
+ b.appendNumber("btreeMatches", btreeMatches);
+ b.append("n", num);
+ b.done();
}
+}
} // namespace mongo
diff --git a/src/mongo/db/index/haystack_access_method.h b/src/mongo/db/index/haystack_access_method.h
index 1aac55a25e8..d79de3bfffc 100644
--- a/src/mongo/db/index/haystack_access_method.h
+++ b/src/mongo/db/index/haystack_access_method.h
@@ -35,41 +35,45 @@
namespace mongo {
- class Collection;
- class OperationContext;
+class Collection;
+class OperationContext;
- /**
- * Maps (lat, lng) to the bucketSize-sided square bucket that contains it.
- * Examines all documents in a given radius of a given point.
- * Returns all documents that match a given search restriction.
- * See http://dochub.mongodb.org/core/haystackindexes
- *
- * Use when you want to look for restaurants within 25 miles with a certain name.
- * Don't use when you want to find the closest open restaurants; see 2d.cpp for that.
- *
- * Usage:
- * db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 })
- * pos is the name of the field to be indexed that has lat/lng data in an array.
- * type is the name of the secondary field to be indexed.
- * bucketSize specifies the dimension of the square bucket for the data in pos.
- * ALL fields are mandatory.
- */
- class HaystackAccessMethod : public IndexAccessMethod {
- public:
- HaystackAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+/**
+ * Maps (lat, lng) to the bucketSize-sided square bucket that contains it.
+ * Examines all documents in a given radius of a given point.
+ * Returns all documents that match a given search restriction.
+ * See http://dochub.mongodb.org/core/haystackindexes
+ *
+ * Use when you want to look for restaurants within 25 miles with a certain name.
+ * Don't use when you want to find the closest open restaurants; see 2d.cpp for that.
+ *
+ * Usage:
+ * db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 })
+ * pos is the name of the field to be indexed that has lat/lng data in an array.
+ * type is the name of the secondary field to be indexed.
+ * bucketSize specifies the dimension of the square bucket for the data in pos.
+ * ALL fields are mandatory.
+ */
+class HaystackAccessMethod : public IndexAccessMethod {
+public:
+ HaystackAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
- protected:
- friend class GeoHaystackSearchCommand;
- void searchCommand(OperationContext* txn, Collection* collection,
- const BSONObj& nearObj, double maxDistance, const BSONObj& search,
- BSONObjBuilder* result, unsigned limit);
+protected:
+ friend class GeoHaystackSearchCommand;
+ void searchCommand(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& nearObj,
+ double maxDistance,
+ const BSONObj& search,
+ BSONObjBuilder* result,
+ unsigned limit);
- private:
- virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
+private:
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
- std::string _geoField;
- std::vector<std::string> _otherFields;
- double _bucketSize;
- };
+ std::string _geoField;
+ std::vector<std::string> _otherFields;
+ double _bucketSize;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/haystack_access_method_internal.h b/src/mongo/db/index/haystack_access_method_internal.h
index 2f068bfc15f..0ab511f42e4 100644
--- a/src/mongo/db/index/haystack_access_method_internal.h
+++ b/src/mongo/db/index/haystack_access_method_internal.h
@@ -35,58 +35,60 @@
namespace mongo {
- class GeoHaystackSearchHopper {
- public:
- /**
- * Constructed with a point, a max distance from that point, and a max number of
- * matched points to store.
- * @param n The centroid that we're searching
- * @param maxDistance The maximum distance to consider from that point
- * @param limit The maximum number of results to return
- * @param geoField Which field in the provided RecordId has the point to test.
- */
- GeoHaystackSearchHopper(OperationContext* txn,
- const BSONObj& nearObj,
- double maxDistance,
- unsigned limit,
- const std::string& geoField,
- const Collection* collection)
- : _txn(txn),
- _collection(collection),
- _near(nearObj),
- _maxDistance(maxDistance),
- _limit(limit),
- _geoField(geoField) { }
+class GeoHaystackSearchHopper {
+public:
+ /**
+ * Constructed with a point, a max distance from that point, and a max number of
+ * matched points to store.
+ * @param n The centroid that we're searching
+ * @param maxDistance The maximum distance to consider from that point
+ * @param limit The maximum number of results to return
+ * @param geoField Which field in the provided RecordId has the point to test.
+ */
+ GeoHaystackSearchHopper(OperationContext* txn,
+ const BSONObj& nearObj,
+ double maxDistance,
+ unsigned limit,
+ const std::string& geoField,
+ const Collection* collection)
+ : _txn(txn),
+ _collection(collection),
+ _near(nearObj),
+ _maxDistance(maxDistance),
+ _limit(limit),
+ _geoField(geoField) {}
- // Consider the point in loc, and keep it if it's within _maxDistance (and we have space for
- // it)
- void consider(const RecordId& loc) {
- if (limitReached()) return;
- Point p(_collection->docFor(_txn, loc).value().getFieldDotted(_geoField));
- if (distance(_near, p) > _maxDistance)
- return;
- _locs.push_back(loc);
- }
+ // Consider the point in loc, and keep it if it's within _maxDistance (and we have space for
+ // it)
+ void consider(const RecordId& loc) {
+ if (limitReached())
+ return;
+ Point p(_collection->docFor(_txn, loc).value().getFieldDotted(_geoField));
+ if (distance(_near, p) > _maxDistance)
+ return;
+ _locs.push_back(loc);
+ }
- int appendResultsTo(BSONArrayBuilder* b) {
- for (unsigned i = 0; i <_locs.size(); i++)
- b->append(_collection->docFor(_txn, _locs[i]).value());
- return _locs.size();
- }
+ int appendResultsTo(BSONArrayBuilder* b) {
+ for (unsigned i = 0; i < _locs.size(); i++)
+ b->append(_collection->docFor(_txn, _locs[i]).value());
+ return _locs.size();
+ }
- // Have we stored as many points as we can?
- bool limitReached() const {
- return _locs.size() >= _limit;
- }
- private:
- OperationContext* _txn;
- const Collection* _collection;
+ // Have we stored as many points as we can?
+ bool limitReached() const {
+ return _locs.size() >= _limit;
+ }
- Point _near;
- double _maxDistance;
- unsigned _limit;
- const std::string _geoField;
- std::vector<RecordId> _locs;
- };
+private:
+ OperationContext* _txn;
+ const Collection* _collection;
+
+ Point _near;
+ double _maxDistance;
+ unsigned _limit;
+ const std::string _geoField;
+ std::vector<RecordId> _locs;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 2e1490f1f4d..18765949789 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -49,408 +49,397 @@
namespace mongo {
- using std::endl;
- using std::set;
- using std::vector;
-
- MONGO_EXPORT_SERVER_PARAMETER(failIndexKeyTooLong, bool, true);
-
- //
- // Comparison for external sorter interface
- //
-
- // Defined in db/structure/btree/key.cpp
- // XXX TODO: rename to something more descriptive, etc. etc.
- int oldCompare(const BSONObj& l,const BSONObj& r, const Ordering &o);
-
- class BtreeExternalSortComparison {
- public:
- BtreeExternalSortComparison(const BSONObj& ordering, int version)
- : _ordering(Ordering::make(ordering)),
- _version(version) {
- invariant(version == 1 || version == 0);
- }
+using std::endl;
+using std::set;
+using std::vector;
- typedef std::pair<BSONObj, RecordId> Data;
+MONGO_EXPORT_SERVER_PARAMETER(failIndexKeyTooLong, bool, true);
- int operator() (const Data& l, const Data& r) const {
- int x = (_version == 1
- ? l.first.woCompare(r.first, _ordering, /*considerfieldname*/false)
- : oldCompare(l.first, r.first, _ordering));
- if (x) { return x; }
- return l.second.compare(r.second);
- }
- private:
- const Ordering _ordering;
- const int _version;
- };
-
- IndexAccessMethod::IndexAccessMethod(IndexCatalogEntry* btreeState,
- SortedDataInterface* btree)
- : _btreeState(btreeState),
- _descriptor(btreeState->descriptor()),
- _newInterface(btree) {
- verify(0 == _descriptor->version() || 1 == _descriptor->version());
- }
+//
+// Comparison for external sorter interface
+//
- bool IndexAccessMethod::ignoreKeyTooLong(OperationContext *txn) {
- // Ignore this error if we're on a secondary or if the user requested it
- return !txn->isPrimaryFor(_btreeState->ns()) || !failIndexKeyTooLong;
+// Defined in db/structure/btree/key.cpp
+// XXX TODO: rename to something more descriptive, etc. etc.
+int oldCompare(const BSONObj& l, const BSONObj& r, const Ordering& o);
+
+class BtreeExternalSortComparison {
+public:
+ BtreeExternalSortComparison(const BSONObj& ordering, int version)
+ : _ordering(Ordering::make(ordering)), _version(version) {
+ invariant(version == 1 || version == 0);
}
- // Find the keys for obj, put them in the tree pointing to loc
- Status IndexAccessMethod::insert(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- const InsertDeleteOptions& options,
- int64_t* numInserted) {
- *numInserted = 0;
+ typedef std::pair<BSONObj, RecordId> Data;
- BSONObjSet keys;
- // Delegate to the subclass.
- getKeys(obj, &keys);
+ int operator()(const Data& l, const Data& r) const {
+ int x = (_version == 1 ? l.first.woCompare(r.first, _ordering, /*considerfieldname*/ false)
+ : oldCompare(l.first, r.first, _ordering));
+ if (x) {
+ return x;
+ }
+ return l.second.compare(r.second);
+ }
- Status ret = Status::OK();
- for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
- Status status = _newInterface->insert(txn, *i, loc, options.dupsAllowed);
+private:
+ const Ordering _ordering;
+ const int _version;
+};
+
+IndexAccessMethod::IndexAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
+ : _btreeState(btreeState), _descriptor(btreeState->descriptor()), _newInterface(btree) {
+ verify(0 == _descriptor->version() || 1 == _descriptor->version());
+}
+
+bool IndexAccessMethod::ignoreKeyTooLong(OperationContext* txn) {
+ // Ignore this error if we're on a secondary or if the user requested it
+ return !txn->isPrimaryFor(_btreeState->ns()) || !failIndexKeyTooLong;
+}
+
+// Find the keys for obj, put them in the tree pointing to loc
+Status IndexAccessMethod::insert(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ int64_t* numInserted) {
+ *numInserted = 0;
+
+ BSONObjSet keys;
+ // Delegate to the subclass.
+ getKeys(obj, &keys);
+
+ Status ret = Status::OK();
+ for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
+ Status status = _newInterface->insert(txn, *i, loc, options.dupsAllowed);
+
+ // Everything's OK, carry on.
+ if (status.isOK()) {
+ ++*numInserted;
+ continue;
+ }
- // Everything's OK, carry on.
- if (status.isOK()) {
- ++*numInserted;
- continue;
- }
+ // Error cases.
- // Error cases.
+ if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ continue;
+ }
- if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ if (status.code() == ErrorCodes::DuplicateKeyValue) {
+ // A document might be indexed multiple times during a background index build
+ // if it moves ahead of the collection scan cursor (e.g. via an update).
+ if (!_btreeState->isReady(txn)) {
+ LOG(3) << "key " << *i << " already in index during background indexing (ok)";
continue;
}
-
- if (status.code() == ErrorCodes::DuplicateKeyValue) {
- // A document might be indexed multiple times during a background index build
- // if it moves ahead of the collection scan cursor (e.g. via an update).
- if (!_btreeState->isReady(txn)) {
- LOG(3) << "key " << *i << " already in index during background indexing (ok)";
- continue;
- }
- }
-
- // Clean up after ourselves.
- for (BSONObjSet::const_iterator j = keys.begin(); j != i; ++j) {
- removeOneKey(txn, *j, loc, options.dupsAllowed);
- *numInserted = 0;
- }
-
- return status;
}
- if (*numInserted > 1) {
- _btreeState->setMultikey( txn );
+ // Clean up after ourselves.
+ for (BSONObjSet::const_iterator j = keys.begin(); j != i; ++j) {
+ removeOneKey(txn, *j, loc, options.dupsAllowed);
+ *numInserted = 0;
}
- return ret;
+ return status;
}
- void IndexAccessMethod::removeOneKey(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) {
- try {
- _newInterface->unindex(txn, key, loc, dupsAllowed);
- } catch (AssertionException& e) {
- log() << "Assertion failure: _unindex failed "
- << _descriptor->indexNamespace() << endl;
- log() << "Assertion failure: _unindex failed: " << e.what()
- << " key:" << key.toString()
- << " dl:" << loc;
- logContext();
- }
+ if (*numInserted > 1) {
+ _btreeState->setMultikey(txn);
}
- std::unique_ptr<SortedDataInterface::Cursor> IndexAccessMethod::newCursor(
- OperationContext* txn,
- bool isForward) const {
- return _newInterface->newCursor(txn, isForward);
- }
+ return ret;
+}
- // Remove the provided doc from the index.
- Status IndexAccessMethod::remove(OperationContext* txn,
- const BSONObj &obj,
+void IndexAccessMethod::removeOneKey(OperationContext* txn,
+ const BSONObj& key,
const RecordId& loc,
- const InsertDeleteOptions &options,
- int64_t* numDeleted) {
-
- BSONObjSet keys;
- getKeys(obj, &keys);
- *numDeleted = 0;
-
- for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
- removeOneKey(txn, *i, loc, options.dupsAllowed);
- ++*numDeleted;
- }
-
- return Status::OK();
+ bool dupsAllowed) {
+ try {
+ _newInterface->unindex(txn, key, loc, dupsAllowed);
+ } catch (AssertionException& e) {
+ log() << "Assertion failure: _unindex failed " << _descriptor->indexNamespace() << endl;
+ log() << "Assertion failure: _unindex failed: " << e.what() << " key:" << key.toString()
+ << " dl:" << loc;
+ logContext();
+ }
+}
+
+std::unique_ptr<SortedDataInterface::Cursor> IndexAccessMethod::newCursor(OperationContext* txn,
+ bool isForward) const {
+ return _newInterface->newCursor(txn, isForward);
+}
+
+// Remove the provided doc from the index.
+Status IndexAccessMethod::remove(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ int64_t* numDeleted) {
+ BSONObjSet keys;
+ getKeys(obj, &keys);
+ *numDeleted = 0;
+
+ for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
+ removeOneKey(txn, *i, loc, options.dupsAllowed);
+ ++*numDeleted;
}
- // Return keys in l that are not in r.
- // Lifted basically verbatim from elsewhere.
- static void setDifference(const BSONObjSet &l, const BSONObjSet &r, vector<BSONObj*> *diff) {
- // l and r must use the same ordering spec.
- verify(l.key_comp().order() == r.key_comp().order());
- BSONObjSet::const_iterator i = l.begin();
- BSONObjSet::const_iterator j = r.begin();
- while ( 1 ) {
- if ( i == l.end() )
- break;
- while ( j != r.end() && j->woCompare( *i ) < 0 )
- j++;
- if ( j == r.end() || i->woCompare(*j) != 0 ) {
- const BSONObj *jo = &*i;
- diff->push_back( (BSONObj *) jo );
- }
- i++;
+ return Status::OK();
+}
+
+// Return keys in l that are not in r.
+// Lifted basically verbatim from elsewhere.
+static void setDifference(const BSONObjSet& l, const BSONObjSet& r, vector<BSONObj*>* diff) {
+ // l and r must use the same ordering spec.
+ verify(l.key_comp().order() == r.key_comp().order());
+ BSONObjSet::const_iterator i = l.begin();
+ BSONObjSet::const_iterator j = r.begin();
+ while (1) {
+ if (i == l.end())
+ break;
+ while (j != r.end() && j->woCompare(*i) < 0)
+ j++;
+ if (j == r.end() || i->woCompare(*j) != 0) {
+ const BSONObj* jo = &*i;
+ diff->push_back((BSONObj*)jo);
}
+ i++;
}
+}
- Status IndexAccessMethod::initializeAsEmpty(OperationContext* txn) {
- return _newInterface->initAsEmpty(txn);
- }
+Status IndexAccessMethod::initializeAsEmpty(OperationContext* txn) {
+ return _newInterface->initAsEmpty(txn);
+}
- Status IndexAccessMethod::touch(OperationContext* txn, const BSONObj& obj) {
- BSONObjSet keys;
- getKeys(obj, &keys);
+Status IndexAccessMethod::touch(OperationContext* txn, const BSONObj& obj) {
+ BSONObjSet keys;
+ getKeys(obj, &keys);
- std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn));
- for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
- cursor->seekExact(*i);
- }
-
- return Status::OK();
+ std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn));
+ for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
+ cursor->seekExact(*i);
}
+ return Status::OK();
+}
- Status IndexAccessMethod::touch( OperationContext* txn ) const {
- return _newInterface->touch(txn);
- }
- RecordId IndexAccessMethod::findSingle(OperationContext* txn, const BSONObj& key) const {
- std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn));
- const auto requestedInfo = kDebugBuild ? SortedDataInterface::Cursor::kKeyAndLoc
- : SortedDataInterface::Cursor::kWantLoc;
- if (auto kv = cursor->seekExact(key, requestedInfo)) {
- // StorageEngine should guarantee these.
- dassert(!kv->loc.isNull());
- dassert(kv->key.woCompare(key, /*order*/BSONObj(), /*considerFieldNames*/false) == 0);
+Status IndexAccessMethod::touch(OperationContext* txn) const {
+ return _newInterface->touch(txn);
+}
- return kv->loc;
- }
-
- return RecordId();
- }
+RecordId IndexAccessMethod::findSingle(OperationContext* txn, const BSONObj& key) const {
+ std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn));
+ const auto requestedInfo = kDebugBuild ? SortedDataInterface::Cursor::kKeyAndLoc
+ : SortedDataInterface::Cursor::kWantLoc;
+ if (auto kv = cursor->seekExact(key, requestedInfo)) {
+ // StorageEngine should guarantee these.
+ dassert(!kv->loc.isNull());
+ dassert(kv->key.woCompare(key, /*order*/ BSONObj(), /*considerFieldNames*/ false) == 0);
- Status IndexAccessMethod::validate(OperationContext* txn, bool full, int64_t* numKeys,
- BSONObjBuilder* output) {
- // XXX: long long vs int64_t
- long long keys = 0;
- _newInterface->fullValidate(txn, full, &keys, output);
- *numKeys = keys;
- return Status::OK();
+ return kv->loc;
}
- bool IndexAccessMethod::appendCustomStats(OperationContext* txn,
- BSONObjBuilder* output,
- double scale) const {
- return _newInterface->appendCustomStats(txn, output, scale);
+ return RecordId();
+}
+
+Status IndexAccessMethod::validate(OperationContext* txn,
+ bool full,
+ int64_t* numKeys,
+ BSONObjBuilder* output) {
+ // XXX: long long vs int64_t
+ long long keys = 0;
+ _newInterface->fullValidate(txn, full, &keys, output);
+ *numKeys = keys;
+ return Status::OK();
+}
+
+bool IndexAccessMethod::appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* output,
+ double scale) const {
+ return _newInterface->appendCustomStats(txn, output, scale);
+}
+
+long long IndexAccessMethod::getSpaceUsedBytes(OperationContext* txn) const {
+ return _newInterface->getSpaceUsedBytes(txn);
+}
+
+Status IndexAccessMethod::validateUpdate(OperationContext* txn,
+ const BSONObj& from,
+ const BSONObj& to,
+ const RecordId& record,
+ const InsertDeleteOptions& options,
+ UpdateTicket* ticket,
+ const MatchExpression* indexFilter) {
+ if (indexFilter == NULL || indexFilter->matchesBSON(from))
+ getKeys(from, &ticket->oldKeys);
+ if (indexFilter == NULL || indexFilter->matchesBSON(to))
+ getKeys(to, &ticket->newKeys);
+ ticket->loc = record;
+ ticket->dupsAllowed = options.dupsAllowed;
+
+ setDifference(ticket->oldKeys, ticket->newKeys, &ticket->removed);
+ setDifference(ticket->newKeys, ticket->oldKeys, &ticket->added);
+
+ ticket->_isValid = true;
+
+ return Status::OK();
+}
+
+Status IndexAccessMethod::update(OperationContext* txn,
+ const UpdateTicket& ticket,
+ int64_t* numUpdated) {
+ if (!ticket._isValid) {
+ return Status(ErrorCodes::InternalError, "Invalid UpdateTicket in update");
}
- long long IndexAccessMethod::getSpaceUsedBytes( OperationContext* txn ) const {
- return _newInterface->getSpaceUsedBytes( txn );
+ if (ticket.oldKeys.size() + ticket.added.size() - ticket.removed.size() > 1) {
+ _btreeState->setMultikey(txn);
}
- Status IndexAccessMethod::validateUpdate(OperationContext* txn,
- const BSONObj &from,
- const BSONObj &to,
- const RecordId &record,
- const InsertDeleteOptions &options,
- UpdateTicket* ticket,
- const MatchExpression* indexFilter) {
-
- if (indexFilter == NULL || indexFilter->matchesBSON(from))
- getKeys(from, &ticket->oldKeys);
- if (indexFilter == NULL || indexFilter->matchesBSON(to))
- getKeys(to, &ticket->newKeys);
- ticket->loc = record;
- ticket->dupsAllowed = options.dupsAllowed;
-
- setDifference(ticket->oldKeys, ticket->newKeys, &ticket->removed);
- setDifference(ticket->newKeys, ticket->oldKeys, &ticket->added);
-
- ticket->_isValid = true;
-
- return Status::OK();
+ for (size_t i = 0; i < ticket.removed.size(); ++i) {
+ _newInterface->unindex(txn, *ticket.removed[i], ticket.loc, ticket.dupsAllowed);
}
- Status IndexAccessMethod::update(OperationContext* txn,
- const UpdateTicket& ticket,
- int64_t* numUpdated) {
- if (!ticket._isValid) {
- return Status(ErrorCodes::InternalError, "Invalid UpdateTicket in update");
- }
-
- if (ticket.oldKeys.size() + ticket.added.size() - ticket.removed.size() > 1) {
- _btreeState->setMultikey( txn );
- }
-
- for (size_t i = 0; i < ticket.removed.size(); ++i) {
- _newInterface->unindex(txn,
- *ticket.removed[i],
- ticket.loc,
- ticket.dupsAllowed);
- }
-
- for (size_t i = 0; i < ticket.added.size(); ++i) {
- Status status = _newInterface->insert(txn,
- *ticket.added[i],
- ticket.loc,
- ticket.dupsAllowed);
- if ( !status.isOK() ) {
- if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
- // Ignore.
- continue;
- }
-
- return status;
+ for (size_t i = 0; i < ticket.added.size(); ++i) {
+ Status status =
+ _newInterface->insert(txn, *ticket.added[i], ticket.loc, ticket.dupsAllowed);
+ if (!status.isOK()) {
+ if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ // Ignore.
+ continue;
}
- }
-
- *numUpdated = ticket.added.size();
- return Status::OK();
+ return status;
+ }
}
- std::unique_ptr<IndexAccessMethod::BulkBuilder> IndexAccessMethod::initiateBulk() {
-
- return std::unique_ptr<BulkBuilder>(new BulkBuilder(this, _descriptor));
+ *numUpdated = ticket.added.size();
+
+ return Status::OK();
+}
+
+std::unique_ptr<IndexAccessMethod::BulkBuilder> IndexAccessMethod::initiateBulk() {
+ return std::unique_ptr<BulkBuilder>(new BulkBuilder(this, _descriptor));
+}
+
+IndexAccessMethod::BulkBuilder::BulkBuilder(const IndexAccessMethod* index,
+ const IndexDescriptor* descriptor)
+ : _sorter(Sorter::make(
+ SortOptions()
+ .TempDir(storageGlobalParams.dbpath + "/_tmp")
+ .ExtSortAllowed()
+ .MaxMemoryUsageBytes(100 * 1024 * 1024),
+ BtreeExternalSortComparison(descriptor->keyPattern(), descriptor->version()))),
+ _real(index) {}
+
+Status IndexAccessMethod::BulkBuilder::insert(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ int64_t* numInserted) {
+ BSONObjSet keys;
+ _real->getKeys(obj, &keys);
+
+ _isMultiKey = _isMultiKey || (keys.size() > 1);
+
+ for (BSONObjSet::iterator it = keys.begin(); it != keys.end(); ++it) {
+ _sorter->add(*it, loc);
+ _keysInserted++;
}
- IndexAccessMethod::BulkBuilder::BulkBuilder(const IndexAccessMethod* index,
- const IndexDescriptor* descriptor)
- : _sorter(Sorter::make(SortOptions().TempDir(storageGlobalParams.dbpath + "/_tmp")
- .ExtSortAllowed()
- .MaxMemoryUsageBytes(100*1024*1024),
- BtreeExternalSortComparison(descriptor->keyPattern(),
- descriptor->version())))
- , _real(index) {
+ if (NULL != numInserted) {
+ *numInserted += keys.size();
}
- Status IndexAccessMethod::BulkBuilder::insert(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- const InsertDeleteOptions& options,
- int64_t* numInserted) {
- BSONObjSet keys;
- _real->getKeys(obj, &keys);
-
- _isMultiKey = _isMultiKey || (keys.size() > 1);
-
- for (BSONObjSet::iterator it = keys.begin(); it != keys.end(); ++it) {
- _sorter->add(*it, loc);
- _keysInserted++;
- }
+ return Status::OK();
+}
- if (NULL != numInserted) {
- *numInserted += keys.size();
- }
- return Status::OK();
- }
+Status IndexAccessMethod::commitBulk(OperationContext* txn,
+ std::unique_ptr<BulkBuilder> bulk,
+ bool mayInterrupt,
+ bool dupsAllowed,
+ set<RecordId>* dupsToDrop) {
+ Timer timer;
+ std::unique_ptr<BulkBuilder::Sorter::Iterator> i(bulk->_sorter->done());
- Status IndexAccessMethod::commitBulk(OperationContext* txn,
- std::unique_ptr<BulkBuilder> bulk,
- bool mayInterrupt,
- bool dupsAllowed,
- set<RecordId>* dupsToDrop) {
+ stdx::unique_lock<Client> lk(*txn->getClient());
+ ProgressMeterHolder pm(*txn->setMessage_inlock("Index Bulk Build: (2/3) btree bottom up",
+ "Index: (2/3) BTree Bottom Up Progress",
+ bulk->_keysInserted,
+ 10));
+ lk.unlock();
- Timer timer;
+ std::unique_ptr<SortedDataBuilderInterface> builder;
- std::unique_ptr<BulkBuilder::Sorter::Iterator> i(bulk->_sorter->done());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
- stdx::unique_lock<Client> lk(*txn->getClient());
- ProgressMeterHolder pm(*txn->setMessage_inlock("Index Bulk Build: (2/3) btree bottom up",
- "Index: (2/3) BTree Bottom Up Progress",
- bulk->_keysInserted,
- 10));
- lk.unlock();
+ if (bulk->_isMultiKey) {
+ _btreeState->setMultikey(txn);
+ }
- std::unique_ptr<SortedDataBuilderInterface> builder;
+ builder.reset(_newInterface->getBulkBuilder(txn, dupsAllowed));
+ wunit.commit();
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "setting index multikey flag", "");
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
+ while (i->more()) {
+ if (mayInterrupt) {
+ txn->checkForInterrupt();
+ }
- if (bulk->_isMultiKey) {
- _btreeState->setMultikey( txn );
- }
+ WriteUnitOfWork wunit(txn);
+ // Improve performance in the btree-building phase by disabling rollback tracking.
+ // This avoids copying all the written bytes to a buffer that is only used to roll back.
+ // Note that this is safe to do, as this entire index-build-in-progress will be cleaned
+ // up by the index system.
+ txn->recoveryUnit()->setRollbackWritesDisabled();
- builder.reset(_newInterface->getBulkBuilder(txn, dupsAllowed));
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "setting index multikey flag", "");
+ // Get the next datum and add it to the builder.
+ BulkBuilder::Sorter::Data d = i->next();
+ Status status = builder->addKey(d.first, d.second);
- while (i->more()) {
- if (mayInterrupt) {
- txn->checkForInterrupt();
+ if (!status.isOK()) {
+ // Overlong key that's OK to skip?
+ if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ continue;
}
- WriteUnitOfWork wunit(txn);
- // Improve performance in the btree-building phase by disabling rollback tracking.
- // This avoids copying all the written bytes to a buffer that is only used to roll back.
- // Note that this is safe to do, as this entire index-build-in-progress will be cleaned
- // up by the index system.
- txn->recoveryUnit()->setRollbackWritesDisabled();
+ // Check if this is a duplicate that's OK to skip
+ if (status.code() == ErrorCodes::DuplicateKey) {
+ invariant(!dupsAllowed); // shouldn't be getting DupKey errors if dupsAllowed.
- // Get the next datum and add it to the builder.
- BulkBuilder::Sorter::Data d = i->next();
- Status status = builder->addKey(d.first, d.second);
-
- if (!status.isOK()) {
- // Overlong key that's OK to skip?
- if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ if (dupsToDrop) {
+ dupsToDrop->insert(d.second);
continue;
}
-
- // Check if this is a duplicate that's OK to skip
- if (status.code() == ErrorCodes::DuplicateKey) {
- invariant(!dupsAllowed); // shouldn't be getting DupKey errors if dupsAllowed.
-
- if (dupsToDrop) {
- dupsToDrop->insert(d.second);
- continue;
- }
- }
-
- return status;
}
- // If we're here either it's a dup and we're cool with it or the addKey went just
- // fine.
- pm.hit();
- wunit.commit();
+ return status;
}
- pm.finished();
-
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setMessage_inlock("Index Bulk Build: (3/3) btree-middle",
- "Index: (3/3) BTree Middle Progress");
- }
+ // If we're here either it's a dup and we're cool with it or the addKey went just
+ // fine.
+ pm.hit();
+ wunit.commit();
+ }
- LOG(timer.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit";
+ pm.finished();
- builder->commit(mayInterrupt);
- return Status::OK();
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ CurOp::get(txn)->setMessage_inlock("Index Bulk Build: (3/3) btree-middle",
+ "Index: (3/3) BTree Middle Progress");
}
+ LOG(timer.seconds() > 10 ? 0 : 1) << "\t done building bottom layer, going to commit";
+
+ builder->commit(mayInterrupt);
+ return Status::OK();
+}
+
} // namespace mongo
#include "mongo/db/sorter/sorter.cpp"
diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h
index 3f311128a2e..5dc88d30d80 100644
--- a/src/mongo/db/index/index_access_method.h
+++ b/src/mongo/db/index/index_access_method.h
@@ -40,250 +40,250 @@
namespace mongo {
- class BSONObjBuilder;
- class MatchExpression;
- class UpdateTicket;
- struct InsertDeleteOptions;
+class BSONObjBuilder;
+class MatchExpression;
+class UpdateTicket;
+struct InsertDeleteOptions;
+
+/**
+ * An IndexAccessMethod is the interface through which all the mutation, lookup, and
+ * traversal of index entries is done. The class is designed so that the underlying index
+ * data structure is opaque to the caller.
+ *
+ * IndexAccessMethods for existing indices are obtained through the system catalog.
+ *
+ * We assume the caller has whatever locks required. This interface is not thread safe.
+ *
+ */
+class IndexAccessMethod {
+ MONGO_DISALLOW_COPYING(IndexAccessMethod);
+
+public:
+ IndexAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+ virtual ~IndexAccessMethod() {}
+
+ //
+ // Lookup, traversal, and mutation support
+ //
/**
- * An IndexAccessMethod is the interface through which all the mutation, lookup, and
- * traversal of index entries is done. The class is designed so that the underlying index
- * data structure is opaque to the caller.
- *
- * IndexAccessMethods for existing indices are obtained through the system catalog.
- *
- * We assume the caller has whatever locks required. This interface is not thread safe.
+ * Internally generate the keys {k1, ..., kn} for 'obj'. For each key k, insert (k ->
+ * 'loc') into the index. 'obj' is the object at the location 'loc'. If not NULL,
+ * 'numInserted' will be set to the number of keys added to the index for the document. If
+ * there is more than one key for 'obj', either all keys will be inserted or none will.
*
+ * The behavior of the insertion can be specified through 'options'.
*/
- class IndexAccessMethod {
- MONGO_DISALLOW_COPYING(IndexAccessMethod);
- public:
+ Status insert(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ int64_t* numInserted);
- IndexAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
- virtual ~IndexAccessMethod() { }
+ /**
+ * Analogous to above, but remove the records instead of inserting them. If not NULL,
+ * numDeleted will be set to the number of keys removed from the index for the document.
+ */
+ Status remove(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ int64_t* numDeleted);
- //
- // Lookup, traversal, and mutation support
- //
+ /**
+ * Checks whether the index entries for the document 'from', which is placed at location
+ * 'loc' on disk, can be changed to the index entries for the doc 'to'. Provides a ticket
+ * for actually performing the update.
+ *
+ * Returns an error if the update is invalid. The ticket will also be marked as invalid.
+ * Returns OK if the update should proceed without error. The ticket is marked as valid.
+ *
+ * There is no obligation to perform the update after performing validation.
+ */
+ Status validateUpdate(OperationContext* txn,
+ const BSONObj& from,
+ const BSONObj& to,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ UpdateTicket* ticket,
+ const MatchExpression* indexFilter);
- /**
- * Internally generate the keys {k1, ..., kn} for 'obj'. For each key k, insert (k ->
- * 'loc') into the index. 'obj' is the object at the location 'loc'. If not NULL,
- * 'numInserted' will be set to the number of keys added to the index for the document. If
- * there is more than one key for 'obj', either all keys will be inserted or none will.
- *
- * The behavior of the insertion can be specified through 'options'.
- */
- Status insert(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- const InsertDeleteOptions& options,
- int64_t* numInserted);
+ /**
+ * Perform a validated update. The keys for the 'from' object will be removed, and the keys
+ * for the object 'to' will be added. Returns OK if the update succeeded, failure if it did
+ * not. If an update does not succeed, the index will be unmodified, and the keys for
+ * 'from' will remain. Assumes that the index has not changed since validateUpdate was
+ * called. If the index was changed, we may return an error, as our ticket may have been
+ * invalidated.
+ */
+ Status update(OperationContext* txn, const UpdateTicket& ticket, int64_t* numUpdated);
- /**
- * Analogous to above, but remove the records instead of inserting them. If not NULL,
- * numDeleted will be set to the number of keys removed from the index for the document.
- */
- Status remove(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- const InsertDeleteOptions& options,
- int64_t* numDeleted);
+ /**
+ * Returns an unpositioned cursor over 'this' index.
+ */
+ std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ bool isForward = true) const;
- /**
- * Checks whether the index entries for the document 'from', which is placed at location
- * 'loc' on disk, can be changed to the index entries for the doc 'to'. Provides a ticket
- * for actually performing the update.
- *
- * Returns an error if the update is invalid. The ticket will also be marked as invalid.
- * Returns OK if the update should proceed without error. The ticket is marked as valid.
- *
- * There is no obligation to perform the update after performing validation.
- */
- Status validateUpdate(OperationContext* txn,
- const BSONObj& from,
- const BSONObj& to,
- const RecordId& loc,
- const InsertDeleteOptions& options,
- UpdateTicket* ticket,
- const MatchExpression* indexFilter);
+ // ------ index level operations ------
- /**
- * Perform a validated update. The keys for the 'from' object will be removed, and the keys
- * for the object 'to' will be added. Returns OK if the update succeeded, failure if it did
- * not. If an update does not succeed, the index will be unmodified, and the keys for
- * 'from' will remain. Assumes that the index has not changed since validateUpdate was
- * called. If the index was changed, we may return an error, as our ticket may have been
- * invalidated.
- */
- Status update(OperationContext* txn, const UpdateTicket& ticket, int64_t* numUpdated);
- /**
- * Returns an unpositioned cursor over 'this' index.
- */
- std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
- bool isForward = true) const;
+ /**
+ * initializes this index
+ * only called once for the lifetime of the index
+ * if called multiple times, is an error
+ */
+ Status initializeAsEmpty(OperationContext* txn);
- // ------ index level operations ------
+ /**
+ * Try to page-in the pages that contain the keys generated from 'obj'.
+ * This can be used to speed up future accesses to an index by trying to ensure the
+ * appropriate pages are not swapped out.
+ * See prefetch.cpp.
+ */
+ Status touch(OperationContext* txn, const BSONObj& obj);
+ /**
+ * this pages in the entire index
+ */
+ Status touch(OperationContext* txn) const;
- /**
- * initializes this index
- * only called once for the lifetime of the index
- * if called multiple times, is an error
- */
- Status initializeAsEmpty(OperationContext* txn);
+ /**
+ * Walk the entire index, checking the internal structure for consistency.
+ * Set numKeys to the number of keys in the index.
+ *
+ * 'output' is used to store results of validate when 'full' is true.
+ * If 'full' is false, 'output' may be NULL.
+ *
+ * Return OK if the index is valid.
+ *
+ * Currently wasserts that the index is invalid. This could/should be changed in
+ * the future to return a Status.
+ */
+ Status validate(OperationContext* txn, bool full, int64_t* numKeys, BSONObjBuilder* output);
- /**
- * Try to page-in the pages that contain the keys generated from 'obj'.
- * This can be used to speed up future accesses to an index by trying to ensure the
- * appropriate pages are not swapped out.
- * See prefetch.cpp.
- */
- Status touch(OperationContext* txn, const BSONObj& obj);
+ /**
+ * Add custom statistics about this index to BSON object builder, for display.
+ *
+ * 'scale' is a scaling factor to apply to all byte statistics.
+ *
+ * Returns true if stats were appended.
+ */
+ bool appendCustomStats(OperationContext* txn, BSONObjBuilder* result, double scale) const;
- /**
- * this pages in the entire index
- */
- Status touch(OperationContext* txn) const;
+ /**
+ * @return The number of bytes consumed by this index.
+ * Exactly what is counted is not defined based on padding, re-use, etc...
+ */
+ long long getSpaceUsedBytes(OperationContext* txn) const;
- /**
- * Walk the entire index, checking the internal structure for consistency.
- * Set numKeys to the number of keys in the index.
- *
- * 'output' is used to store results of validate when 'full' is true.
- * If 'full' is false, 'output' may be NULL.
- *
- * Return OK if the index is valid.
- *
- * Currently wasserts that the index is invalid. This could/should be changed in
- * the future to return a Status.
- */
- Status validate(OperationContext* txn, bool full, int64_t* numKeys, BSONObjBuilder* output);
+ RecordId findSingle(OperationContext* txn, const BSONObj& key) const;
- /**
- * Add custom statistics about this index to BSON object builder, for display.
- *
- * 'scale' is a scaling factor to apply to all byte statistics.
- *
- * Returns true if stats were appended.
- */
- bool appendCustomStats(OperationContext* txn, BSONObjBuilder* result, double scale) const;
+ //
+ // Bulk operations support
+ //
+ class BulkBuilder {
+ public:
/**
- * @return The number of bytes consumed by this index.
- * Exactly what is counted is not defined based on padding, re-use, etc...
+ * Insert into the BulkBuilder as-if inserting into an IndexAccessMethod.
*/
- long long getSpaceUsedBytes( OperationContext* txn ) const;
-
- RecordId findSingle( OperationContext* txn, const BSONObj& key ) const;
-
- //
- // Bulk operations support
- //
-
- class BulkBuilder {
- public:
- /**
- * Insert into the BulkBuilder as-if inserting into an IndexAccessMethod.
- */
- Status insert(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- const InsertDeleteOptions& options,
- int64_t* numInserted);
+ Status insert(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ int64_t* numInserted);
- private:
- friend class IndexAccessMethod;
+ private:
+ friend class IndexAccessMethod;
- using Sorter = mongo::Sorter<BSONObj, RecordId>;
+ using Sorter = mongo::Sorter<BSONObj, RecordId>;
- BulkBuilder(const IndexAccessMethod* index, const IndexDescriptor* descriptor);
+ BulkBuilder(const IndexAccessMethod* index, const IndexDescriptor* descriptor);
- std::unique_ptr<Sorter> _sorter;
- const IndexAccessMethod* _real;
- int64_t _keysInserted = 0;
- bool _isMultiKey = false;
- };
+ std::unique_ptr<Sorter> _sorter;
+ const IndexAccessMethod* _real;
+ int64_t _keysInserted = 0;
+ bool _isMultiKey = false;
+ };
- /**
- * Starts a bulk operation.
- * You work on the returned BulkBuilder and then call commitBulk.
- * This can return NULL, meaning bulk mode is not available.
- *
- * It is only legal to initiate bulk when the index is new and empty.
- */
- std::unique_ptr<BulkBuilder> initiateBulk();
+ /**
+ * Starts a bulk operation.
+ * You work on the returned BulkBuilder and then call commitBulk.
+ * This can return NULL, meaning bulk mode is not available.
+ *
+ * It is only legal to initiate bulk when the index is new and empty.
+ */
+ std::unique_ptr<BulkBuilder> initiateBulk();
- /**
- * Call this when you are ready to finish your bulk work.
- * Pass in the BulkBuilder returned from initiateBulk.
- * @param bulk - something created from initiateBulk
- * @param mayInterrupt - is this commit interruptable (will cancel)
- * @param dupsAllowed - if false, error or fill 'dups' if any duplicate values are found
- * @param dups - if NULL, error out on dups if not allowed
- * if not NULL, put the bad RecordIds there
- */
- Status commitBulk(OperationContext* txn,
- std::unique_ptr<BulkBuilder> bulk,
- bool mayInterrupt,
- bool dupsAllowed,
- std::set<RecordId>* dups);
+ /**
+ * Call this when you are ready to finish your bulk work.
+ * Pass in the BulkBuilder returned from initiateBulk.
+ * @param bulk - something created from initiateBulk
+ * @param mayInterrupt - is this commit interruptable (will cancel)
+ * @param dupsAllowed - if false, error or fill 'dups' if any duplicate values are found
+ * @param dups - if NULL, error out on dups if not allowed
+ * if not NULL, put the bad RecordIds there
+ */
+ Status commitBulk(OperationContext* txn,
+ std::unique_ptr<BulkBuilder> bulk,
+ bool mayInterrupt,
+ bool dupsAllowed,
+ std::set<RecordId>* dups);
- /**
- * Fills 'keys' with the keys that should be generated for 'obj' on this index.
- */
- virtual void getKeys(const BSONObj &obj, BSONObjSet *keys) const = 0;
+ /**
+ * Fills 'keys' with the keys that should be generated for 'obj' on this index.
+ */
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const = 0;
- protected:
- // Determines whether it's OK to ignore ErrorCodes::KeyTooLong for this OperationContext
- bool ignoreKeyTooLong(OperationContext* txn);
+protected:
+ // Determines whether it's OK to ignore ErrorCodes::KeyTooLong for this OperationContext
+ bool ignoreKeyTooLong(OperationContext* txn);
- IndexCatalogEntry* _btreeState; // owned by IndexCatalogEntry
- const IndexDescriptor* _descriptor;
+ IndexCatalogEntry* _btreeState; // owned by IndexCatalogEntry
+ const IndexDescriptor* _descriptor;
- private:
- void removeOneKey(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed);
+private:
+ void removeOneKey(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed);
- const std::unique_ptr<SortedDataInterface> _newInterface;
- };
+ const std::unique_ptr<SortedDataInterface> _newInterface;
+};
- /**
- * Updates are two steps: verify that it's a valid update, and perform it.
- * validateUpdate fills out the UpdateStatus and update actually applies it.
- */
- class UpdateTicket {
- // No public interface
- private:
- friend class IndexAccessMethod;
+/**
+ * Updates are two steps: verify that it's a valid update, and perform it.
+ * validateUpdate fills out the UpdateStatus and update actually applies it.
+ */
+class UpdateTicket {
+ // No public interface
+private:
+ friend class IndexAccessMethod;
- bool _isValid;
+ bool _isValid;
- BSONObjSet oldKeys;
- BSONObjSet newKeys;
+ BSONObjSet oldKeys;
+ BSONObjSet newKeys;
- // These point into the sets oldKeys and newKeys.
- std::vector<BSONObj*> removed;
- std::vector<BSONObj*> added;
+ // These point into the sets oldKeys and newKeys.
+ std::vector<BSONObj*> removed;
+ std::vector<BSONObj*> added;
- RecordId loc;
- bool dupsAllowed;
- };
+ RecordId loc;
+ bool dupsAllowed;
+};
- /**
- * Flags we can set for inserts and deletes (and updates, which are kind of both).
- */
- struct InsertDeleteOptions {
- InsertDeleteOptions() : logIfError(false), dupsAllowed(false) { }
+/**
+ * Flags we can set for inserts and deletes (and updates, which are kind of both).
+ */
+struct InsertDeleteOptions {
+ InsertDeleteOptions() : logIfError(false), dupsAllowed(false) {}
- // If there's an error, log() it.
- bool logIfError;
+ // If there's an error, log() it.
+ bool logIfError;
- // Are duplicate keys allowed in the index?
- bool dupsAllowed;
- };
+ // Are duplicate keys allowed in the index?
+ bool dupsAllowed;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/index_descriptor.cpp b/src/mongo/db/index/index_descriptor.cpp
index 523b778163d..9d0de13b4d1 100644
--- a/src/mongo/db/index/index_descriptor.cpp
+++ b/src/mongo/db/index/index_descriptor.cpp
@@ -35,61 +35,54 @@
namespace mongo {
- namespace {
- void populateOptionsMap( std::map<StringData, BSONElement>& theMap,
- const BSONObj& spec ) {
-
- BSONObjIterator it( spec );
- while ( it.more() ) {
- const BSONElement e = it.next();
-
- StringData fieldName = e.fieldNameStringData();
- if ( fieldName == "key" ||
- fieldName == "ns" ||
- fieldName == "name" ||
- fieldName == "v" || // not considered for equivalence
- fieldName == "textIndexVersion" || // same as "v"
- fieldName == "2dsphereIndexVersion" || // same as "v"
- fieldName == "background" || // this is a creation time option only
- fieldName == "dropDups" || // this is now ignored
- fieldName == "sparse" || // checked specially
- fieldName == "unique" // check specially
- ) {
- continue;
- }
- theMap[ fieldName ] = e;
- }
+namespace {
+void populateOptionsMap(std::map<StringData, BSONElement>& theMap, const BSONObj& spec) {
+ BSONObjIterator it(spec);
+ while (it.more()) {
+ const BSONElement e = it.next();
+
+ StringData fieldName = e.fieldNameStringData();
+ if (fieldName == "key" || fieldName == "ns" || fieldName == "name" ||
+ fieldName == "v" || // not considered for equivalence
+ fieldName == "textIndexVersion" || // same as "v"
+ fieldName == "2dsphereIndexVersion" || // same as "v"
+ fieldName == "background" || // this is a creation time option only
+ fieldName == "dropDups" || // this is now ignored
+ fieldName == "sparse" || // checked specially
+ fieldName == "unique" // check specially
+ ) {
+ continue;
}
+ theMap[fieldName] = e;
}
+}
+}
- bool IndexDescriptor::areIndexOptionsEquivalent( const IndexDescriptor* other ) const {
-
- if ( isSparse() != other->isSparse() ) {
- return false;
- }
-
- if ( !isIdIndex() &&
- unique() != other->unique() ) {
- // Note: { _id: 1 } or { _id: -1 } implies unique: true.
- return false;
- }
+bool IndexDescriptor::areIndexOptionsEquivalent(const IndexDescriptor* other) const {
+ if (isSparse() != other->isSparse()) {
+ return false;
+ }
- // Then compare the rest of the options.
+ if (!isIdIndex() && unique() != other->unique()) {
+ // Note: { _id: 1 } or { _id: -1 } implies unique: true.
+ return false;
+ }
- std::map<StringData, BSONElement> existingOptionsMap;
- populateOptionsMap( existingOptionsMap, infoObj() );
+ // Then compare the rest of the options.
- std::map<StringData, BSONElement> newOptionsMap;
- populateOptionsMap( newOptionsMap, other->infoObj() );
+ std::map<StringData, BSONElement> existingOptionsMap;
+ populateOptionsMap(existingOptionsMap, infoObj());
- return existingOptionsMap == newOptionsMap;
- }
+ std::map<StringData, BSONElement> newOptionsMap;
+ populateOptionsMap(newOptionsMap, other->infoObj());
- void IndexDescriptor::_checkOk() const {
- if ( _magic == 123987 )
- return;
- log() << "uh oh: " << (void*)(this) << " " << _magic;
- invariant(0);
- }
+ return existingOptionsMap == newOptionsMap;
+}
+void IndexDescriptor::_checkOk() const {
+ if (_magic == 123987)
+ return;
+ log() << "uh oh: " << (void*)(this) << " " << _magic;
+ invariant(0);
+}
}
diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h
index 742e5a3ea16..2bceb9aeb0a 100644
--- a/src/mongo/db/index/index_descriptor.h
+++ b/src/mongo/db/index/index_descriptor.h
@@ -39,181 +39,215 @@
namespace mongo {
- class IndexCatalog;
- class IndexCatalogEntry;
- class IndexCatalogEntryContainer;
+class IndexCatalog;
+class IndexCatalogEntry;
+class IndexCatalogEntryContainer;
+/**
+ * A cache of information computed from the memory-mapped per-index data (OnDiskIndexData).
+ * Contains accessors for the various immutable index parameters, and an accessor for the
+ * mutable "head" pointer which is index-specific.
+ *
+ * All synchronization is the responsibility of the caller.
+ */
+class IndexDescriptor {
+public:
/**
- * A cache of information computed from the memory-mapped per-index data (OnDiskIndexData).
- * Contains accessors for the various immutable index parameters, and an accessor for the
- * mutable "head" pointer which is index-specific.
- *
- * All synchronization is the responsibility of the caller.
+ * OnDiskIndexData is a pointer to the memory mapped per-index data.
+ * infoObj is a copy of the index-describing BSONObj contained in the OnDiskIndexData.
*/
- class IndexDescriptor {
- public:
- /**
- * OnDiskIndexData is a pointer to the memory mapped per-index data.
- * infoObj is a copy of the index-describing BSONObj contained in the OnDiskIndexData.
- */
- IndexDescriptor(Collection* collection, const std::string& accessMethodName, BSONObj infoObj)
- : _magic(123987),
- _collection(collection),
- _accessMethodName(accessMethodName),
- _infoObj(infoObj.getOwned()),
- _numFields(infoObj.getObjectField("key").nFields()),
- _keyPattern(infoObj.getObjectField("key").getOwned()),
- _indexName(infoObj.getStringField("name")),
- _parentNS(infoObj.getStringField("ns")),
- _isIdIndex(isIdIndexPattern( _keyPattern )),
- _sparse(infoObj["sparse"].trueValue()),
- _unique( _isIdIndex || infoObj["unique"].trueValue() ),
- _partial(!infoObj["partialFilterExpression"].eoo()),
- _cachedEntry( NULL )
- {
- _indexNamespace = makeIndexNamespace( _parentNS, _indexName );
-
- _version = 0;
- BSONElement e = _infoObj["v"];
- if ( e.isNumber() ) {
- _version = e.numberInt();
- }
- }
-
- ~IndexDescriptor() {
- _magic = 555;
- }
-
- //
- // Information about the key pattern.
- //
-
- /**
- * Return the user-provided index key pattern.
- * Example: {geo: "2dsphere", nonGeo: 1}
- * Example: {foo: 1, bar: -1}
- */
- const BSONObj& keyPattern() const { _checkOk(); return _keyPattern; }
-
- // How many fields do we index / are in the key pattern?
- int getNumFields() const { _checkOk(); return _numFields; }
-
- //
- // Information about the index's namespace / collection.
- //
-
- // Return the name of the index.
- const std::string& indexName() const { _checkOk(); return _indexName; }
-
- // Return the name of the indexed collection.
- const std::string& parentNS() const { return _parentNS; }
-
- // Return the name of this index's storage area (database.table.$index)
- const std::string& indexNamespace() const { return _indexNamespace; }
-
- // Return the name of the access method we must use to access this index's data.
- const std::string& getAccessMethodName() const { return _accessMethodName; }
-
- //
- // Properties every index has
- //
-
- // Return what version of index this is.
- int version() const { return _version; }
-
- // May each key only occur once?
- bool unique() const { return _unique; }
-
- // Is this index sparse?
- bool isSparse() const { return _sparse; }
-
- // Is this a partial index?
- bool isPartial() const { return _partial; }
-
- // Is this index multikey?
- bool isMultikey( OperationContext* txn ) const {
- _checkOk();
- return _collection->getIndexCatalog()->isMultikey( txn, this );
+ IndexDescriptor(Collection* collection, const std::string& accessMethodName, BSONObj infoObj)
+ : _magic(123987),
+ _collection(collection),
+ _accessMethodName(accessMethodName),
+ _infoObj(infoObj.getOwned()),
+ _numFields(infoObj.getObjectField("key").nFields()),
+ _keyPattern(infoObj.getObjectField("key").getOwned()),
+ _indexName(infoObj.getStringField("name")),
+ _parentNS(infoObj.getStringField("ns")),
+ _isIdIndex(isIdIndexPattern(_keyPattern)),
+ _sparse(infoObj["sparse"].trueValue()),
+ _unique(_isIdIndex || infoObj["unique"].trueValue()),
+ _partial(!infoObj["partialFilterExpression"].eoo()),
+ _cachedEntry(NULL) {
+ _indexNamespace = makeIndexNamespace(_parentNS, _indexName);
+
+ _version = 0;
+ BSONElement e = _infoObj["v"];
+ if (e.isNumber()) {
+ _version = e.numberInt();
}
+ }
- bool isIdIndex() const { _checkOk(); return _isIdIndex; }
-
- //
- // Properties that are Index-specific.
- //
-
- // Allow access to arbitrary fields in the per-index info object. Some indices stash
- // index-specific data there.
- BSONElement getInfoElement(const std::string& name) const { return _infoObj[name]; }
+ ~IndexDescriptor() {
+ _magic = 555;
+ }
- //
- // "Internals" of accessing the index, used by IndexAccessMethod(s).
- //
+ //
+ // Information about the key pattern.
+ //
- // Return a (rather compact) std::string representation.
- std::string toString() const { _checkOk(); return _infoObj.toString(); }
-
- // Return the info object.
- const BSONObj& infoObj() const { _checkOk(); return _infoObj; }
-
- // Both the collection and the catalog must outlive the IndexDescriptor
- const Collection* getCollection() const { return _collection; }
- const IndexCatalog* getIndexCatalog() const { return _collection->getIndexCatalog(); }
-
- bool areIndexOptionsEquivalent( const IndexDescriptor* other ) const;
-
- static bool isIdIndexPattern( const BSONObj &pattern ) {
- BSONObjIterator i(pattern);
- BSONElement e = i.next();
- //_id index must have form exactly {_id : 1} or {_id : -1}.
- //Allows an index of form {_id : "hashed"} to exist but
- //do not consider it to be the primary _id index
- if(! ( strcmp(e.fieldName(), "_id") == 0
- && (e.numberInt() == 1 || e.numberInt() == -1)))
- return false;
- return i.next().eoo();
- }
-
- static std::string makeIndexNamespace( StringData ns,
- StringData name ) {
- return ns.toString() + ".$" + name.toString();
- }
-
- private:
-
- void _checkOk() const;
-
- int _magic;
-
- // Related catalog information of the parent collection
- Collection* _collection;
-
- // What access method should we use for this index?
- std::string _accessMethodName;
-
- // The BSONObj describing the index. Accessed through the various members above.
- const BSONObj _infoObj;
-
- // --- cached data from _infoObj
-
- int64_t _numFields; // How many fields are indexed?
- BSONObj _keyPattern;
- std::string _indexName;
- std::string _parentNS;
- std::string _indexNamespace;
- bool _isIdIndex;
- bool _sparse;
- bool _unique;
- bool _partial;
- int _version;
-
- // only used by IndexCatalogEntryContainer to do caching for perf
- // users not allowed to touch, and not part of API
- IndexCatalogEntry* _cachedEntry;
-
- friend class IndexCatalog;
- friend class IndexCatalogEntry;
- friend class IndexCatalogEntryContainer;
- };
+ /**
+ * Return the user-provided index key pattern.
+ * Example: {geo: "2dsphere", nonGeo: 1}
+ * Example: {foo: 1, bar: -1}
+ */
+ const BSONObj& keyPattern() const {
+ _checkOk();
+ return _keyPattern;
+ }
+
+ // How many fields do we index / are in the key pattern?
+ int getNumFields() const {
+ _checkOk();
+ return _numFields;
+ }
+
+ //
+ // Information about the index's namespace / collection.
+ //
+
+ // Return the name of the index.
+ const std::string& indexName() const {
+ _checkOk();
+ return _indexName;
+ }
+
+ // Return the name of the indexed collection.
+ const std::string& parentNS() const {
+ return _parentNS;
+ }
+
+ // Return the name of this index's storage area (database.table.$index)
+ const std::string& indexNamespace() const {
+ return _indexNamespace;
+ }
+
+ // Return the name of the access method we must use to access this index's data.
+ const std::string& getAccessMethodName() const {
+ return _accessMethodName;
+ }
+
+ //
+ // Properties every index has
+ //
+
+ // Return what version of index this is.
+ int version() const {
+ return _version;
+ }
+
+ // May each key only occur once?
+ bool unique() const {
+ return _unique;
+ }
+
+ // Is this index sparse?
+ bool isSparse() const {
+ return _sparse;
+ }
+
+ // Is this a partial index?
+ bool isPartial() const {
+ return _partial;
+ }
+
+ // Is this index multikey?
+ bool isMultikey(OperationContext* txn) const {
+ _checkOk();
+ return _collection->getIndexCatalog()->isMultikey(txn, this);
+ }
+
+ bool isIdIndex() const {
+ _checkOk();
+ return _isIdIndex;
+ }
+
+ //
+ // Properties that are Index-specific.
+ //
+
+ // Allow access to arbitrary fields in the per-index info object. Some indices stash
+ // index-specific data there.
+ BSONElement getInfoElement(const std::string& name) const {
+ return _infoObj[name];
+ }
+
+ //
+ // "Internals" of accessing the index, used by IndexAccessMethod(s).
+ //
+
+ // Return a (rather compact) std::string representation.
+ std::string toString() const {
+ _checkOk();
+ return _infoObj.toString();
+ }
+
+ // Return the info object.
+ const BSONObj& infoObj() const {
+ _checkOk();
+ return _infoObj;
+ }
+
+ // Both the collection and the catalog must outlive the IndexDescriptor
+ const Collection* getCollection() const {
+ return _collection;
+ }
+ const IndexCatalog* getIndexCatalog() const {
+ return _collection->getIndexCatalog();
+ }
+
+ bool areIndexOptionsEquivalent(const IndexDescriptor* other) const;
+
+ static bool isIdIndexPattern(const BSONObj& pattern) {
+ BSONObjIterator i(pattern);
+ BSONElement e = i.next();
+ //_id index must have form exactly {_id : 1} or {_id : -1}.
+ // Allows an index of form {_id : "hashed"} to exist but
+ // do not consider it to be the primary _id index
+ if (!(strcmp(e.fieldName(), "_id") == 0 && (e.numberInt() == 1 || e.numberInt() == -1)))
+ return false;
+ return i.next().eoo();
+ }
+
+ static std::string makeIndexNamespace(StringData ns, StringData name) {
+ return ns.toString() + ".$" + name.toString();
+ }
+
+private:
+ void _checkOk() const;
+
+ int _magic;
+
+ // Related catalog information of the parent collection
+ Collection* _collection;
+
+ // What access method should we use for this index?
+ std::string _accessMethodName;
+
+ // The BSONObj describing the index. Accessed through the various members above.
+ const BSONObj _infoObj;
+
+ // --- cached data from _infoObj
+
+ int64_t _numFields; // How many fields are indexed?
+ BSONObj _keyPattern;
+ std::string _indexName;
+ std::string _parentNS;
+ std::string _indexNamespace;
+ bool _isIdIndex;
+ bool _sparse;
+ bool _unique;
+ bool _partial;
+ int _version;
+
+ // only used by IndexCatalogEntryContainer to do caching for perf
+ // users not allowed to touch, and not part of API
+ IndexCatalogEntry* _cachedEntry;
+
+ friend class IndexCatalog;
+ friend class IndexCatalogEntry;
+ friend class IndexCatalogEntryContainer;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp
index a9c9630ef67..3fcf4d013a1 100644
--- a/src/mongo/db/index/s2_access_method.cpp
+++ b/src/mongo/db/index/s2_access_method.cpp
@@ -44,67 +44,66 @@
namespace mongo {
- static const string kIndexVersionFieldName("2dsphereIndexVersion");
-
- S2AccessMethod::S2AccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
- : IndexAccessMethod(btreeState, btree) {
-
- const IndexDescriptor* descriptor = btreeState->descriptor();
-
- ExpressionParams::parse2dsphereParams(descriptor->infoObj(),
- &_params);
-
- int geoFields = 0;
-
- // Categorize the fields we're indexing and make sure we have a geo field.
- BSONObjIterator i(descriptor->keyPattern());
- while (i.more()) {
- BSONElement e = i.next();
- if (e.type() == String && IndexNames::GEO_2DSPHERE == e.String() ) {
- ++geoFields;
- }
- else {
- // We check for numeric in 2d, so that's the check here
- uassert( 16823, (string)"Cannot use " + IndexNames::GEO_2DSPHERE +
- " index with other special index types: " + e.toString(),
- e.isNumber() );
- }
- }
-
- uassert(16750, "Expect at least one geo field, spec=" + descriptor->keyPattern().toString(),
- geoFields >= 1);
-
- if (descriptor->isSparse()) {
- warning() << "Sparse option ignored for index spec "
- << descriptor->keyPattern().toString() << "\n";
+static const string kIndexVersionFieldName("2dsphereIndexVersion");
+
+S2AccessMethod::S2AccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
+ : IndexAccessMethod(btreeState, btree) {
+ const IndexDescriptor* descriptor = btreeState->descriptor();
+
+ ExpressionParams::parse2dsphereParams(descriptor->infoObj(), &_params);
+
+ int geoFields = 0;
+
+ // Categorize the fields we're indexing and make sure we have a geo field.
+ BSONObjIterator i(descriptor->keyPattern());
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() == String && IndexNames::GEO_2DSPHERE == e.String()) {
+ ++geoFields;
+ } else {
+ // We check for numeric in 2d, so that's the check here
+ uassert(16823,
+ (string) "Cannot use " + IndexNames::GEO_2DSPHERE +
+ " index with other special index types: " + e.toString(),
+ e.isNumber());
}
}
- // static
- BSONObj S2AccessMethod::fixSpec(const BSONObj& specObj) {
- // If the spec object has the field "2dsphereIndexVersion", validate it. If it doesn't, add
- // {2dsphereIndexVersion: 2}, which is the default for newly-built indexes.
-
- BSONElement indexVersionElt = specObj[kIndexVersionFieldName];
- if (indexVersionElt.eoo()) {
- BSONObjBuilder bob;
- bob.appendElements(specObj);
- bob.append(kIndexVersionFieldName, S2_INDEX_VERSION_2);
- return bob.obj();
- }
+ uassert(16750,
+ "Expect at least one geo field, spec=" + descriptor->keyPattern().toString(),
+ geoFields >= 1);
- const int indexVersion = indexVersionElt.numberInt();
- uassert(17394,
- str::stream() << "unsupported geo index version { " << kIndexVersionFieldName
- << " : " << indexVersionElt << " }, only support versions: ["
- << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << "]",
- indexVersionElt.isNumber() && (indexVersion == S2_INDEX_VERSION_2
- || indexVersion == S2_INDEX_VERSION_1));
- return specObj;
+ if (descriptor->isSparse()) {
+ warning() << "Sparse option ignored for index spec " << descriptor->keyPattern().toString()
+ << "\n";
}
-
- void S2AccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
- ExpressionKeysPrivate::getS2Keys(obj, _descriptor->keyPattern(), _params, keys);
+}
+
+// static
+BSONObj S2AccessMethod::fixSpec(const BSONObj& specObj) {
+ // If the spec object has the field "2dsphereIndexVersion", validate it. If it doesn't, add
+ // {2dsphereIndexVersion: 2}, which is the default for newly-built indexes.
+
+ BSONElement indexVersionElt = specObj[kIndexVersionFieldName];
+ if (indexVersionElt.eoo()) {
+ BSONObjBuilder bob;
+ bob.appendElements(specObj);
+ bob.append(kIndexVersionFieldName, S2_INDEX_VERSION_2);
+ return bob.obj();
}
+ const int indexVersion = indexVersionElt.numberInt();
+ uassert(17394,
+ str::stream() << "unsupported geo index version { " << kIndexVersionFieldName << " : "
+ << indexVersionElt << " }, only support versions: [" << S2_INDEX_VERSION_1
+ << "," << S2_INDEX_VERSION_2 << "]",
+ indexVersionElt.isNumber() &&
+ (indexVersion == S2_INDEX_VERSION_2 || indexVersion == S2_INDEX_VERSION_1));
+ return specObj;
+}
+
+void S2AccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ ExpressionKeysPrivate::getS2Keys(obj, _descriptor->keyPattern(), _params, keys);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/index/s2_access_method.h b/src/mongo/db/index/s2_access_method.h
index 9ce655e47f7..15e0c3a773e 100644
--- a/src/mongo/db/index/s2_access_method.h
+++ b/src/mongo/db/index/s2_access_method.h
@@ -36,22 +36,22 @@
namespace mongo {
- class S2AccessMethod : public IndexAccessMethod {
- public:
- S2AccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
-
- /**
- * Takes an index spec object for this index and returns a copy tweaked to conform to the
- * expected format. When an index build is initiated, this function is called on the spec
- * object the user provides, and the return value of this function is the final spec object
- * that gets saved in the index catalog. Throws a UserException if 'specObj' is invalid.
- */
- static BSONObj fixSpec(const BSONObj& specObj);
-
- private:
- virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
-
- S2IndexingParams _params;
- };
+class S2AccessMethod : public IndexAccessMethod {
+public:
+ S2AccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+
+ /**
+ * Takes an index spec object for this index and returns a copy tweaked to conform to the
+ * expected format. When an index build is initiated, this function is called on the spec
+ * object the user provides, and the return value of this function is the final spec object
+ * that gets saved in the index catalog. Throws a UserException if 'specObj' is invalid.
+ */
+ static BSONObj fixSpec(const BSONObj& specObj);
+
+private:
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
+
+ S2IndexingParams _params;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/s2_common.h b/src/mongo/db/index/s2_common.h
index 3df3c833694..fad4087bb89 100644
--- a/src/mongo/db/index/s2_common.h
+++ b/src/mongo/db/index/s2_common.h
@@ -38,52 +38,52 @@
namespace mongo {
- // An enum describing the version of an S2 index.
- enum S2IndexVersion {
- // The first version of the S2 index, introduced in MongoDB 2.4.0. Compatible with MongoDB
- // 2.4.0 and later. Supports the following GeoJSON objects: Point, LineString, Polygon.
- S2_INDEX_VERSION_1 = 1,
+// An enum describing the version of an S2 index.
+enum S2IndexVersion {
+ // The first version of the S2 index, introduced in MongoDB 2.4.0. Compatible with MongoDB
+ // 2.4.0 and later. Supports the following GeoJSON objects: Point, LineString, Polygon.
+ S2_INDEX_VERSION_1 = 1,
- // The current version of the S2 index, introduced in MongoDB 2.6.0. Compatible with
- // MongoDB 2.6.0 and later. Introduced support for the following GeoJSON objects:
- // MultiPoint, MultiLineString, MultiPolygon, GeometryCollection.
- S2_INDEX_VERSION_2 = 2
- };
+ // The current version of the S2 index, introduced in MongoDB 2.6.0. Compatible with
+ // MongoDB 2.6.0 and later. Introduced support for the following GeoJSON objects:
+ // MultiPoint, MultiLineString, MultiPolygon, GeometryCollection.
+ S2_INDEX_VERSION_2 = 2
+};
- struct S2IndexingParams {
- // Since we take the cartesian product when we generate keys for an insert,
- // we need a cap.
- size_t maxKeysPerInsert;
- // This is really an advisory parameter that we pass to the cover generator. The
- // finest/coarsest index level determine the required # of cells.
- int maxCellsInCovering;
- // What's the finest grained level that we'll index? When we query for a point
- // we start at that -- we index nothing finer than this.
- int finestIndexedLevel;
- // And, what's the coarsest? When we search in larger coverings we know we
- // can stop here -- we index nothing coarser than this.
- int coarsestIndexedLevel;
- // Version of this index (specific to the index type).
- S2IndexVersion indexVersion;
+struct S2IndexingParams {
+ // Since we take the cartesian product when we generate keys for an insert,
+ // we need a cap.
+ size_t maxKeysPerInsert;
+ // This is really an advisory parameter that we pass to the cover generator. The
+ // finest/coarsest index level determine the required # of cells.
+ int maxCellsInCovering;
+ // What's the finest grained level that we'll index? When we query for a point
+ // we start at that -- we index nothing finer than this.
+ int finestIndexedLevel;
+ // And, what's the coarsest? When we search in larger coverings we know we
+ // can stop here -- we index nothing coarser than this.
+ int coarsestIndexedLevel;
+ // Version of this index (specific to the index type).
+ S2IndexVersion indexVersion;
- double radius;
+ double radius;
- std::string toString() const {
- std::stringstream ss;
- ss << "maxKeysPerInsert: " << maxKeysPerInsert << std::endl;
- ss << "maxCellsInCovering: " << maxCellsInCovering << std::endl;
- ss << "finestIndexedLevel: " << finestIndexedLevel << std::endl;
- ss << "coarsestIndexedLevel: " << coarsestIndexedLevel << std::endl;
- ss << "indexVersion: " << indexVersion << std::endl;
- return ss.str();
- }
+ std::string toString() const {
+ std::stringstream ss;
+ ss << "maxKeysPerInsert: " << maxKeysPerInsert << std::endl;
+ ss << "maxCellsInCovering: " << maxCellsInCovering << std::endl;
+ ss << "finestIndexedLevel: " << finestIndexedLevel << std::endl;
+ ss << "coarsestIndexedLevel: " << coarsestIndexedLevel << std::endl;
+ ss << "indexVersion: " << indexVersion << std::endl;
+ return ss.str();
+ }
- void configureCoverer(S2RegionCoverer *coverer) const {
- coverer->set_min_level(coarsestIndexedLevel);
- coverer->set_max_level(finestIndexedLevel);
- // This is advisory; the two above are strict.
- coverer->set_max_cells(maxCellsInCovering);
- }
- };
+ void configureCoverer(S2RegionCoverer* coverer) const {
+ coverer->set_min_level(coarsestIndexedLevel);
+ coverer->set_max_level(finestIndexedLevel);
+ // This is advisory; the two above are strict.
+ coverer->set_max_cells(maxCellsInCovering);
+ }
+};
} // namespace mongo
diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp
index 9a7927ba0b9..34cece9f97f 100644
--- a/src/mongo/db/index_builder.cpp
+++ b/src/mongo/db/index_builder.cpp
@@ -47,179 +47,176 @@
namespace mongo {
- using std::endl;
+using std::endl;
- AtomicUInt32 IndexBuilder::_indexBuildCount;
+AtomicUInt32 IndexBuilder::_indexBuildCount;
namespace {
- // Synchronization tools when replication spawns a background index in a new thread.
- // The bool is 'true' when a new background index has started in a new thread but the
- // parent thread has not yet synchronized with it.
- bool _bgIndexStarting(false);
- stdx::mutex _bgIndexStartingMutex;
- stdx::condition_variable _bgIndexStartingCondVar;
-
- void _setBgIndexStarting() {
- stdx::lock_guard<stdx::mutex> lk(_bgIndexStartingMutex);
- invariant(_bgIndexStarting == false);
- _bgIndexStarting = true;
- _bgIndexStartingCondVar.notify_one();
- }
-} // namespace
+// Synchronization tools when replication spawns a background index in a new thread.
+// The bool is 'true' when a new background index has started in a new thread but the
+// parent thread has not yet synchronized with it.
+bool _bgIndexStarting(false);
+stdx::mutex _bgIndexStartingMutex;
+stdx::condition_variable _bgIndexStartingCondVar;
+
+void _setBgIndexStarting() {
+ stdx::lock_guard<stdx::mutex> lk(_bgIndexStartingMutex);
+ invariant(_bgIndexStarting == false);
+ _bgIndexStarting = true;
+ _bgIndexStartingCondVar.notify_one();
+}
+} // namespace
- IndexBuilder::IndexBuilder(const BSONObj& index) :
- BackgroundJob(true /* self-delete */), _index(index.getOwned()),
- _name(str::stream() << "repl index builder " << _indexBuildCount.addAndFetch(1)) {
- }
+IndexBuilder::IndexBuilder(const BSONObj& index)
+ : BackgroundJob(true /* self-delete */),
+ _index(index.getOwned()),
+ _name(str::stream() << "repl index builder " << _indexBuildCount.addAndFetch(1)) {}
- IndexBuilder::~IndexBuilder() {}
+IndexBuilder::~IndexBuilder() {}
- std::string IndexBuilder::name() const {
- return _name;
- }
+std::string IndexBuilder::name() const {
+ return _name;
+}
- void IndexBuilder::run() {
- Client::initThread(name().c_str());
- LOG(2) << "IndexBuilder building index " << _index;
+void IndexBuilder::run() {
+ Client::initThread(name().c_str());
+ LOG(2) << "IndexBuilder building index " << _index;
- OperationContextImpl txn;
- txn.lockState()->setIsBatchWriter(true);
+ OperationContextImpl txn;
+ txn.lockState()->setIsBatchWriter(true);
- AuthorizationSession::get(txn.getClient())->grantInternalAuthorization();
+ AuthorizationSession::get(txn.getClient())->grantInternalAuthorization();
- {
- stdx::lock_guard<Client> lk(*txn.getClient());
- CurOp::get(txn)->setOp_inlock(dbInsert);
- }
- NamespaceString ns(_index["ns"].String());
+ {
+ stdx::lock_guard<Client> lk(*txn.getClient());
+ CurOp::get(txn)->setOp_inlock(dbInsert);
+ }
+ NamespaceString ns(_index["ns"].String());
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dlk(txn.lockState(), ns.db(), MODE_X);
- OldClientContext ctx(&txn, ns.getSystemIndexesCollection());
+ ScopedTransaction transaction(&txn, MODE_IX);
+ Lock::DBLock dlk(txn.lockState(), ns.db(), MODE_X);
+ OldClientContext ctx(&txn, ns.getSystemIndexesCollection());
- Database* db = dbHolder().get(&txn, ns.db().toString());
+ Database* db = dbHolder().get(&txn, ns.db().toString());
- Status status = _build(&txn, db, true, &dlk);
- if ( !status.isOK() ) {
- error() << "IndexBuilder could not build index: " << status.toString();
- fassert(28555, ErrorCodes::isInterruption(status.code()));
- }
+ Status status = _build(&txn, db, true, &dlk);
+ if (!status.isOK()) {
+ error() << "IndexBuilder could not build index: " << status.toString();
+ fassert(28555, ErrorCodes::isInterruption(status.code()));
}
+}
- Status IndexBuilder::buildInForeground(OperationContext* txn, Database* db) const {
- return _build(txn, db, false, NULL);
- }
+Status IndexBuilder::buildInForeground(OperationContext* txn, Database* db) const {
+ return _build(txn, db, false, NULL);
+}
- void IndexBuilder::waitForBgIndexStarting() {
- stdx::unique_lock<stdx::mutex> lk(_bgIndexStartingMutex);
- while (_bgIndexStarting == false) {
- _bgIndexStartingCondVar.wait(lk);
- }
- // Reset for next time.
- _bgIndexStarting = false;
+void IndexBuilder::waitForBgIndexStarting() {
+ stdx::unique_lock<stdx::mutex> lk(_bgIndexStartingMutex);
+ while (_bgIndexStarting == false) {
+ _bgIndexStartingCondVar.wait(lk);
}
+ // Reset for next time.
+ _bgIndexStarting = false;
+}
- Status IndexBuilder::_build(OperationContext* txn,
- Database* db,
- bool allowBackgroundBuilding,
- Lock::DBLock* dbLock) const {
- const NamespaceString ns(_index["ns"].String());
+Status IndexBuilder::_build(OperationContext* txn,
+ Database* db,
+ bool allowBackgroundBuilding,
+ Lock::DBLock* dbLock) const {
+ const NamespaceString ns(_index["ns"].String());
- Collection* c = db->getCollection( ns.ns() );
- if ( !c ) {
- while (true) {
- try {
- WriteUnitOfWork wunit(txn);
- c = db->getOrCreateCollection( txn, ns.ns() );
- verify(c);
- wunit.commit();
- break;
- }
- catch (const WriteConflictException& wce) {
- LOG(2) << "WriteConflictException while creating collection in IndexBuilder"
- << ", retrying.";
- txn->recoveryUnit()->abandonSnapshot();
- continue;
- }
+ Collection* c = db->getCollection(ns.ns());
+ if (!c) {
+ while (true) {
+ try {
+ WriteUnitOfWork wunit(txn);
+ c = db->getOrCreateCollection(txn, ns.ns());
+ verify(c);
+ wunit.commit();
+ break;
+ } catch (const WriteConflictException& wce) {
+ LOG(2) << "WriteConflictException while creating collection in IndexBuilder"
+ << ", retrying.";
+ txn->recoveryUnit()->abandonSnapshot();
+ continue;
}
}
+ }
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- // Show which index we're building in the curop display.
- CurOp::get(txn)->setQuery_inlock(_index);
- }
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ // Show which index we're building in the curop display.
+ CurOp::get(txn)->setQuery_inlock(_index);
+ }
- bool haveSetBgIndexStarting = false;
- while (true) {
- Status status = Status::OK();
- try {
- MultiIndexBlock indexer(txn, c);
- indexer.allowInterruption();
+ bool haveSetBgIndexStarting = false;
+ while (true) {
+ Status status = Status::OK();
+ try {
+ MultiIndexBlock indexer(txn, c);
+ indexer.allowInterruption();
- if (allowBackgroundBuilding)
- indexer.allowBackgroundBuilding();
+ if (allowBackgroundBuilding)
+ indexer.allowBackgroundBuilding();
- try {
- status = indexer.init(_index);
- if ( status.code() == ErrorCodes::IndexAlreadyExists ) {
- if (allowBackgroundBuilding) {
- // Must set this in case anyone is waiting for this build.
- _setBgIndexStarting();
- }
- return Status::OK();
+ try {
+ status = indexer.init(_index);
+ if (status.code() == ErrorCodes::IndexAlreadyExists) {
+ if (allowBackgroundBuilding) {
+ // Must set this in case anyone is waiting for this build.
+ _setBgIndexStarting();
}
+ return Status::OK();
+ }
- if (status.isOK()) {
- if (allowBackgroundBuilding) {
- if (!haveSetBgIndexStarting) {
- _setBgIndexStarting();
- haveSetBgIndexStarting = true;
- }
- invariant(dbLock);
- dbLock->relockWithMode(MODE_IX);
+ if (status.isOK()) {
+ if (allowBackgroundBuilding) {
+ if (!haveSetBgIndexStarting) {
+ _setBgIndexStarting();
+ haveSetBgIndexStarting = true;
}
-
- Lock::CollectionLock colLock(txn->lockState(), ns.ns(), MODE_IX);
- status = indexer.insertAllDocumentsInCollection();
+ invariant(dbLock);
+ dbLock->relockWithMode(MODE_IX);
}
- if (status.isOK()) {
- if (allowBackgroundBuilding) {
- dbLock->relockWithMode(MODE_X);
- }
- WriteUnitOfWork wunit(txn);
- indexer.commit();
- wunit.commit();
- }
- }
- catch (const DBException& e) {
- status = e.toStatus();
+ Lock::CollectionLock colLock(txn->lockState(), ns.ns(), MODE_IX);
+ status = indexer.insertAllDocumentsInCollection();
}
- if (allowBackgroundBuilding) {
- dbLock->relockWithMode(MODE_X);
- Database* reloadDb = dbHolder().get(txn, ns.db());
- fassert(28553, reloadDb);
- fassert(28554, reloadDb->getCollection(ns.ns()));
+ if (status.isOK()) {
+ if (allowBackgroundBuilding) {
+ dbLock->relockWithMode(MODE_X);
+ }
+ WriteUnitOfWork wunit(txn);
+ indexer.commit();
+ wunit.commit();
}
+ } catch (const DBException& e) {
+ status = e.toStatus();
+ }
- if (status.code() == ErrorCodes::InterruptedAtShutdown) {
- // leave it as-if kill -9 happened. This will be handled on restart.
- indexer.abortWithoutCleanup();
- }
+ if (allowBackgroundBuilding) {
+ dbLock->relockWithMode(MODE_X);
+ Database* reloadDb = dbHolder().get(txn, ns.db());
+ fassert(28553, reloadDb);
+ fassert(28554, reloadDb->getCollection(ns.ns()));
}
- catch (const WriteConflictException& wce) {
- status = wce.toStatus();
+
+ if (status.code() == ErrorCodes::InterruptedAtShutdown) {
+ // leave it as-if kill -9 happened. This will be handled on restart.
+ indexer.abortWithoutCleanup();
}
+ } catch (const WriteConflictException& wce) {
+ status = wce.toStatus();
+ }
- if (status.code() != ErrorCodes::WriteConflict)
- return status;
+ if (status.code() != ErrorCodes::WriteConflict)
+ return status;
- LOG(2) << "WriteConflictException while creating index in IndexBuilder, retrying.";
- txn->recoveryUnit()->abandonSnapshot();
- }
+ LOG(2) << "WriteConflictException while creating index in IndexBuilder, retrying.";
+ txn->recoveryUnit()->abandonSnapshot();
}
}
+}
diff --git a/src/mongo/db/index_builder.h b/src/mongo/db/index_builder.h
index dabfe92bae2..2e87a8198a3 100644
--- a/src/mongo/db/index_builder.h
+++ b/src/mongo/db/index_builder.h
@@ -38,58 +38,57 @@
namespace mongo {
- class Collection;
- class Database;
- class OperationContext;
+class Collection;
+class Database;
+class OperationContext;
- /**
- * A helper class for replication to use for building indexes.
- * In standalone mode, we use the client connection thread for building indexes in the
- * background. In replication mode, secondaries must spawn a new thread to build background
- * indexes, since there are no client connection threads to use for such purpose. IndexBuilder
- * is a subclass of BackgroundJob to enable this use.
- * This class is also used for building indexes in the foreground on secondaries, for
- * code convenience. buildInForeground() is directly called by the replication applier to
- * build an index in the foreground; the properties of BackgroundJob are not used for this use
- * case.
- * For background index builds, BackgroundJob::go() is called on the IndexBuilder instance,
- * which begins a new thread at this class's run() method. After go() is called in the
- * parent thread, waitForBgIndexStarting() must be called by the same parent thread,
- * before any other thread calls go() on any other IndexBuilder instance. This is
- * ensured by the replication system, since commands are effectively run single-threaded
- * by the replication applier, and index builds are treated as commands even though they look
- * like inserts on system.indexes.
- */
- class IndexBuilder : public BackgroundJob {
- public:
- IndexBuilder(const BSONObj& index);
- virtual ~IndexBuilder();
-
- virtual void run();
+/**
+ * A helper class for replication to use for building indexes.
+ * In standalone mode, we use the client connection thread for building indexes in the
+ * background. In replication mode, secondaries must spawn a new thread to build background
+ * indexes, since there are no client connection threads to use for such purpose. IndexBuilder
+ * is a subclass of BackgroundJob to enable this use.
+ * This class is also used for building indexes in the foreground on secondaries, for
+ * code convenience. buildInForeground() is directly called by the replication applier to
+ * build an index in the foreground; the properties of BackgroundJob are not used for this use
+ * case.
+ * For background index builds, BackgroundJob::go() is called on the IndexBuilder instance,
+ * which begins a new thread at this class's run() method. After go() is called in the
+ * parent thread, waitForBgIndexStarting() must be called by the same parent thread,
+ * before any other thread calls go() on any other IndexBuilder instance. This is
+ * ensured by the replication system, since commands are effectively run single-threaded
+ * by the replication applier, and index builds are treated as commands even though they look
+ * like inserts on system.indexes.
+ */
+class IndexBuilder : public BackgroundJob {
+public:
+ IndexBuilder(const BSONObj& index);
+ virtual ~IndexBuilder();
- /**
- * name of the builder, not the index
- */
- virtual std::string name() const;
+ virtual void run();
- Status buildInForeground(OperationContext* txn, Database* db) const;
+ /**
+ * name of the builder, not the index
+ */
+ virtual std::string name() const;
- /**
- * Waits for a background index build to register itself. This function must be called
- * after starting a background index build via a BackgroundJob and before starting a
- * subsequent one.
- */
- static void waitForBgIndexStarting();
+ Status buildInForeground(OperationContext* txn, Database* db) const;
- private:
- Status _build(OperationContext* txn,
- Database* db,
- bool allowBackgroundBuilding,
- Lock::DBLock* dbLock) const;
+ /**
+ * Waits for a background index build to register itself. This function must be called
+ * after starting a background index build via a BackgroundJob and before starting a
+ * subsequent one.
+ */
+ static void waitForBgIndexStarting();
- const BSONObj _index;
- std::string _name; // name of this builder, not related to the index
- static AtomicUInt32 _indexBuildCount;
- };
+private:
+ Status _build(OperationContext* txn,
+ Database* db,
+ bool allowBackgroundBuilding,
+ Lock::DBLock* dbLock) const;
+ const BSONObj _index;
+ std::string _name; // name of this builder, not related to the index
+ static AtomicUInt32 _indexBuildCount;
+};
}
diff --git a/src/mongo/db/index_legacy.cpp b/src/mongo/db/index_legacy.cpp
index 9131fedee3c..ca6a97ce3e9 100644
--- a/src/mongo/db/index_legacy.cpp
+++ b/src/mongo/db/index_legacy.cpp
@@ -37,49 +37,49 @@
namespace mongo {
- // static
- BSONObj IndexLegacy::adjustIndexSpecObject(const BSONObj& obj) {
- string pluginName = IndexNames::findPluginName(obj.getObjectField("key"));
+// static
+BSONObj IndexLegacy::adjustIndexSpecObject(const BSONObj& obj) {
+ string pluginName = IndexNames::findPluginName(obj.getObjectField("key"));
- if (IndexNames::TEXT == pluginName) {
- return fts::FTSSpec::fixSpec(obj);
- }
-
- if (IndexNames::GEO_2DSPHERE == pluginName) {
- return S2AccessMethod::fixSpec(obj);
- }
+ if (IndexNames::TEXT == pluginName) {
+ return fts::FTSSpec::fixSpec(obj);
+ }
- return obj;
+ if (IndexNames::GEO_2DSPHERE == pluginName) {
+ return S2AccessMethod::fixSpec(obj);
}
- // static
- BSONObj IndexLegacy::getMissingField(OperationContext* txn,
- Collection* collection,
- const BSONObj& infoObj) {
- BSONObj keyPattern = infoObj.getObjectField( "key" );
- string accessMethodName;
- if ( collection )
- accessMethodName = collection->getIndexCatalog()->getAccessMethodName(txn, keyPattern);
- else
- accessMethodName = IndexNames::findPluginName(keyPattern);
+ return obj;
+}
+
+// static
+BSONObj IndexLegacy::getMissingField(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& infoObj) {
+ BSONObj keyPattern = infoObj.getObjectField("key");
+ string accessMethodName;
+ if (collection)
+ accessMethodName = collection->getIndexCatalog()->getAccessMethodName(txn, keyPattern);
+ else
+ accessMethodName = IndexNames::findPluginName(keyPattern);
- if (IndexNames::HASHED == accessMethodName ) {
- int hashVersion = infoObj["hashVersion"].numberInt();
- HashSeed seed = infoObj["seed"].numberInt();
+ if (IndexNames::HASHED == accessMethodName) {
+ int hashVersion = infoObj["hashVersion"].numberInt();
+ HashSeed seed = infoObj["seed"].numberInt();
- // Explicit null valued fields and missing fields are both represented in hashed indexes
- // using the hash value of the null BSONElement. This is partly for historical reasons
- // (hash of null was used in the initial release of hashed indexes and changing would
- // alter the data format). Additionally, in certain places the hashed index code and
- // the index bound calculation code assume null and missing are indexed identically.
- BSONObj nullObj = BSON("" << BSONNULL);
- return BSON("" << ExpressionKeysPrivate::makeSingleHashKey(nullObj.firstElement(), seed, hashVersion));
- }
- else {
- BSONObjBuilder b;
- b.appendNull("");
- return b.obj();
- }
+ // Explicit null valued fields and missing fields are both represented in hashed indexes
+ // using the hash value of the null BSONElement. This is partly for historical reasons
+ // (hash of null was used in the initial release of hashed indexes and changing would
+ // alter the data format). Additionally, in certain places the hashed index code and
+ // the index bound calculation code assume null and missing are indexed identically.
+ BSONObj nullObj = BSON("" << BSONNULL);
+ return BSON("" << ExpressionKeysPrivate::makeSingleHashKey(
+ nullObj.firstElement(), seed, hashVersion));
+ } else {
+ BSONObjBuilder b;
+ b.appendNull("");
+ return b.obj();
}
+}
} // namespace mongo
diff --git a/src/mongo/db/index_legacy.h b/src/mongo/db/index_legacy.h
index bd8ebf467b0..19640c02e71 100644
--- a/src/mongo/db/index_legacy.h
+++ b/src/mongo/db/index_legacy.h
@@ -33,44 +33,43 @@
namespace mongo {
- class Collection;
- class IndexDescriptor;
- class OperationContext;
+class Collection;
+class IndexDescriptor;
+class OperationContext;
+/**
+ * There has been some behavior concerning indexed access patterns -- both pre and post-index
+ * construction -- that does not quite fit in the access pattern model implemented in
+ * index/index_access_pattern.h. Such behavior can't be changed in the current implementation of
+ * the code.
+ *
+ * We grouped such exception/legacy behavior here.
+ */
+class IndexLegacy {
+public:
/**
- * There has been some behavior concerning indexed access patterns -- both pre and post-index
- * construction -- that does not quite fit in the access pattern model implemented in
- * index/index_access_pattern.h. Such behavior can't be changed in the current implementation of
- * the code.
+ * Adjust the provided index spec BSONObj depending on the type of index obj describes.
*
- * We grouped such exception/legacy behavior here.
+ * This is a no-op unless the object describes a TEXT or a GEO_2DSPHERE index. TEXT and
+ * GEO_2DSPHERE provide additional validation on the index spec, and tweak the index spec
+ * object to conform to their expected format.
*/
- class IndexLegacy {
- public:
- /**
- * Adjust the provided index spec BSONObj depending on the type of index obj describes.
- *
- * This is a no-op unless the object describes a TEXT or a GEO_2DSPHERE index. TEXT and
- * GEO_2DSPHERE provide additional validation on the index spec, and tweak the index spec
- * object to conform to their expected format.
- */
- static BSONObj adjustIndexSpecObject(const BSONObj& obj);
-
- /**
- * Returns the BSONObj that is inserted into an index when the object is missing the keys
- * the index is over.
- *
- * For every index *except hash*, this is the BSON equivalent of jstNULL.
- * For the hash index, it's the hash of BSON("" << BSONNULL).
- *
- * s/d_split.cpp needs to know this.
- *
- * This is a significant leak of index functionality out of the index layer.
- */
- static BSONObj getMissingField(OperationContext* txn,
- Collection* collection,
- const BSONObj& infoObj);
+ static BSONObj adjustIndexSpecObject(const BSONObj& obj);
- };
+ /**
+ * Returns the BSONObj that is inserted into an index when the object is missing the keys
+ * the index is over.
+ *
+ * For every index *except hash*, this is the BSON equivalent of jstNULL.
+ * For the hash index, it's the hash of BSON("" << BSONNULL).
+ *
+ * s/d_split.cpp needs to know this.
+ *
+ * This is a significant leak of index functionality out of the index layer.
+ */
+ static BSONObj getMissingField(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& infoObj);
+};
} // namespace mongo
diff --git a/src/mongo/db/index_names.cpp b/src/mongo/db/index_names.cpp
index 6f0b3f095ab..ce55d302f33 100644
--- a/src/mongo/db/index_names.cpp
+++ b/src/mongo/db/index_names.cpp
@@ -32,66 +32,58 @@
namespace mongo {
- using std::string;
+using std::string;
- const string IndexNames::GEO_2D = "2d";
- const string IndexNames::GEO_HAYSTACK = "geoHaystack";
- const string IndexNames::GEO_2DSPHERE = "2dsphere";
- const string IndexNames::TEXT = "text";
- const string IndexNames::HASHED = "hashed";
- const string IndexNames::BTREE = "";
+const string IndexNames::GEO_2D = "2d";
+const string IndexNames::GEO_HAYSTACK = "geoHaystack";
+const string IndexNames::GEO_2DSPHERE = "2dsphere";
+const string IndexNames::TEXT = "text";
+const string IndexNames::HASHED = "hashed";
+const string IndexNames::BTREE = "";
- // static
- string IndexNames::findPluginName(const BSONObj& keyPattern) {
- BSONObjIterator i(keyPattern);
+// static
+string IndexNames::findPluginName(const BSONObj& keyPattern) {
+ BSONObjIterator i(keyPattern);
- while (i.more()) {
- BSONElement e = i.next();
- if (String != e.type()) { continue; }
- return e.String();
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (String != e.type()) {
+ continue;
}
-
- return IndexNames::BTREE;
+ return e.String();
}
- // static
- bool IndexNames::existedBefore24(const string& name) {
- return name == IndexNames::BTREE
- || name == IndexNames::GEO_2D
- || name == IndexNames::GEO_HAYSTACK
- || name == IndexNames::HASHED;
- }
+ return IndexNames::BTREE;
+}
- // static
- bool IndexNames::isKnownName(const string& name) {
- return name == IndexNames::GEO_2D
- || name == IndexNames::GEO_2DSPHERE
- || name == IndexNames::GEO_HAYSTACK
- || name == IndexNames::TEXT
- || name == IndexNames::HASHED
- || name == IndexNames::BTREE;
- }
+// static
+bool IndexNames::existedBefore24(const string& name) {
+ return name == IndexNames::BTREE || name == IndexNames::GEO_2D ||
+ name == IndexNames::GEO_HAYSTACK || name == IndexNames::HASHED;
+}
- // static
- IndexType IndexNames::nameToType(const string& accessMethod) {
- if (IndexNames::GEO_2D == accessMethod) {
- return INDEX_2D;
- }
- else if (IndexNames::GEO_HAYSTACK == accessMethod) {
- return INDEX_HAYSTACK;
- }
- else if (IndexNames::GEO_2DSPHERE == accessMethod) {
- return INDEX_2DSPHERE;
- }
- else if (IndexNames::TEXT == accessMethod) {
- return INDEX_TEXT;
- }
- else if (IndexNames::HASHED == accessMethod) {
- return INDEX_HASHED;
- }
- else {
- return INDEX_BTREE;
- }
+// static
+bool IndexNames::isKnownName(const string& name) {
+ return name == IndexNames::GEO_2D || name == IndexNames::GEO_2DSPHERE ||
+ name == IndexNames::GEO_HAYSTACK || name == IndexNames::TEXT ||
+ name == IndexNames::HASHED || name == IndexNames::BTREE;
+}
+
+// static
+IndexType IndexNames::nameToType(const string& accessMethod) {
+ if (IndexNames::GEO_2D == accessMethod) {
+ return INDEX_2D;
+ } else if (IndexNames::GEO_HAYSTACK == accessMethod) {
+ return INDEX_HAYSTACK;
+ } else if (IndexNames::GEO_2DSPHERE == accessMethod) {
+ return INDEX_2DSPHERE;
+ } else if (IndexNames::TEXT == accessMethod) {
+ return INDEX_TEXT;
+ } else if (IndexNames::HASHED == accessMethod) {
+ return INDEX_HASHED;
+ } else {
+ return INDEX_BTREE;
}
+}
} // namespace mongo
diff --git a/src/mongo/db/index_names.h b/src/mongo/db/index_names.h
index 817fce2dcf6..037029c661d 100644
--- a/src/mongo/db/index_names.h
+++ b/src/mongo/db/index_names.h
@@ -32,63 +32,63 @@
namespace mongo {
- class BSONObj;
+class BSONObj;
+
+/**
+ * We need to know what 'type' an index is in order to plan correctly. We can't entirely rely
+ * on the key pattern to tell us what kind of index we have.
+ *
+ * An example of the Bad Thing That We Must Avoid:
+ * 1. Create a 2dsphere index in 2.4, insert some docs.
+ * 2. Downgrade to 2.2. Insert some more docs into the collection w/the 2dsphere
+ * index. 2.2 treats the index as a normal btree index and creates keys accordingly.
+ * 3. Using the 2dsphere index in 2.4 gives wrong results or assert-fails or crashes as
+ * the data isn't what we expect.
+ */
+enum IndexType {
+ INDEX_BTREE,
+ INDEX_2D,
+ INDEX_HAYSTACK,
+ INDEX_2DSPHERE,
+ INDEX_TEXT,
+ INDEX_HASHED,
+};
+
+/**
+ * We use the std::string representation of index names all over the place, so we declare them all
+ * once here.
+ */
+class IndexNames {
+public:
+ static const std::string GEO_2D;
+ static const std::string GEO_HAYSTACK;
+ static const std::string GEO_2DSPHERE;
+ static const std::string TEXT;
+ static const std::string HASHED;
+ static const std::string BTREE;
/**
- * We need to know what 'type' an index is in order to plan correctly. We can't entirely rely
- * on the key pattern to tell us what kind of index we have.
- *
- * An example of the Bad Thing That We Must Avoid:
- * 1. Create a 2dsphere index in 2.4, insert some docs.
- * 2. Downgrade to 2.2. Insert some more docs into the collection w/the 2dsphere
- * index. 2.2 treats the index as a normal btree index and creates keys accordingly.
- * 3. Using the 2dsphere index in 2.4 gives wrong results or assert-fails or crashes as
- * the data isn't what we expect.
+ * True if is a regular (non-plugin) index or uses a plugin that existed before 2.4.
+ * These plugins are grandfathered in and allowed to exist in DBs where
+ * DataFileVersion::is24IndexClean() returns false.
*/
- enum IndexType {
- INDEX_BTREE,
- INDEX_2D,
- INDEX_HAYSTACK,
- INDEX_2DSPHERE,
- INDEX_TEXT,
- INDEX_HASHED,
- };
+ static bool existedBefore24(const std::string& name);
/**
- * We use the std::string representation of index names all over the place, so we declare them all
- * once here.
+ * Return the first std::string value in the provided object. For an index key pattern,
+ * a field with a non-string value indicates a "special" (not straight Btree) index.
*/
- class IndexNames {
- public:
- static const std::string GEO_2D;
- static const std::string GEO_HAYSTACK;
- static const std::string GEO_2DSPHERE;
- static const std::string TEXT;
- static const std::string HASHED;
- static const std::string BTREE;
-
- /**
- * True if is a regular (non-plugin) index or uses a plugin that existed before 2.4.
- * These plugins are grandfathered in and allowed to exist in DBs where
- * DataFileVersion::is24IndexClean() returns false.
- */
- static bool existedBefore24(const std::string& name);
+ static std::string findPluginName(const BSONObj& keyPattern);
- /**
- * Return the first std::string value in the provided object. For an index key pattern,
- * a field with a non-string value indicates a "special" (not straight Btree) index.
- */
- static std::string findPluginName(const BSONObj& keyPattern);
-
- /**
- * Is the provided access method name one we recognize?
- */
- static bool isKnownName(const std::string& name);
+ /**
+ * Is the provided access method name one we recognize?
+ */
+ static bool isKnownName(const std::string& name);
- /**
- * Convert an index name to an IndexType.
- */
- static IndexType nameToType(const std::string& accessMethod);
- };
+ /**
+ * Convert an index name to an IndexType.
+ */
+ static IndexType nameToType(const std::string& accessMethod);
+};
} // namespace mongo
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index 3dbbb133e20..d416d9fe087 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -52,129 +52,122 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::vector;
+using std::endl;
+using std::string;
+using std::vector;
namespace {
- void checkNS(OperationContext* txn, const std::list<std::string>& nsToCheck) {
- bool firstTime = true;
- for (std::list<std::string>::const_iterator it = nsToCheck.begin();
- it != nsToCheck.end();
- ++it) {
-
- string ns = *it;
+void checkNS(OperationContext* txn, const std::list<std::string>& nsToCheck) {
+ bool firstTime = true;
+ for (std::list<std::string>::const_iterator it = nsToCheck.begin(); it != nsToCheck.end();
+ ++it) {
+ string ns = *it;
+
+ LOG(3) << "IndexRebuilder::checkNS: " << ns;
+
+ // This write lock is held throughout the index building process
+ // for this namespace.
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), MODE_X);
+ OldClientContext ctx(txn, ns);
+
+ Collection* collection = ctx.db()->getCollection(ns);
+ if (collection == NULL)
+ continue;
+
+ IndexCatalog* indexCatalog = collection->getIndexCatalog();
+
+ if (collection->ns().isOplog() && indexCatalog->numIndexesTotal(txn) > 0) {
+ warning() << ns << " had illegal indexes, removing";
+ indexCatalog->dropAllIndexes(txn, true);
+ continue;
+ }
- LOG(3) << "IndexRebuilder::checkNS: " << ns;
- // This write lock is held throughout the index building process
- // for this namespace.
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), MODE_X);
- OldClientContext ctx(txn, ns);
+ MultiIndexBlock indexer(txn, collection);
- Collection* collection = ctx.db()->getCollection(ns);
- if ( collection == NULL )
- continue;
+ {
+ WriteUnitOfWork wunit(txn);
+ vector<BSONObj> indexesToBuild = indexCatalog->getAndClearUnfinishedIndexes(txn);
- IndexCatalog* indexCatalog = collection->getIndexCatalog();
+ // The indexes have now been removed from system.indexes, so the only record is
+ // in-memory. If there is a journal commit between now and when insert() rewrites
+ // the entry and the db crashes before the new system.indexes entry is journalled,
+ // the index will be lost forever. Thus, we must stay in the same WriteUnitOfWork
+ // to ensure that no journaling will happen between now and the entry being
+ // re-written in MultiIndexBlock::init(). The actual index building is done outside
+ // of this WUOW.
- if ( collection->ns().isOplog() && indexCatalog->numIndexesTotal( txn ) > 0 ) {
- warning() << ns << " had illegal indexes, removing";
- indexCatalog->dropAllIndexes(txn, true);
+ if (indexesToBuild.empty()) {
continue;
}
+ log() << "found " << indexesToBuild.size() << " interrupted index build(s) on " << ns;
- MultiIndexBlock indexer(txn, collection);
-
- {
- WriteUnitOfWork wunit(txn);
- vector<BSONObj> indexesToBuild = indexCatalog->getAndClearUnfinishedIndexes(txn);
-
- // The indexes have now been removed from system.indexes, so the only record is
- // in-memory. If there is a journal commit between now and when insert() rewrites
- // the entry and the db crashes before the new system.indexes entry is journalled,
- // the index will be lost forever. Thus, we must stay in the same WriteUnitOfWork
- // to ensure that no journaling will happen between now and the entry being
- // re-written in MultiIndexBlock::init(). The actual index building is done outside
- // of this WUOW.
-
- if (indexesToBuild.empty()) {
- continue;
- }
-
- log() << "found " << indexesToBuild.size()
- << " interrupted index build(s) on " << ns;
-
- if (firstTime) {
- log() << "note: restart the server with --noIndexBuildRetry "
- << "to skip index rebuilds";
- firstTime = false;
- }
-
- if (!serverGlobalParams.indexBuildRetry) {
- log() << " not rebuilding interrupted indexes";
- wunit.commit();
- continue;
- }
-
- uassertStatusOK(indexer.init(indexesToBuild));
+ if (firstTime) {
+ log() << "note: restart the server with --noIndexBuildRetry "
+ << "to skip index rebuilds";
+ firstTime = false;
+ }
+ if (!serverGlobalParams.indexBuildRetry) {
+ log() << " not rebuilding interrupted indexes";
wunit.commit();
+ continue;
}
- try {
- uassertStatusOK(indexer.insertAllDocumentsInCollection());
+ uassertStatusOK(indexer.init(indexesToBuild));
- WriteUnitOfWork wunit(txn);
- indexer.commit();
- wunit.commit();
- }
- catch (const DBException& e) {
- error() << "Index rebuilding did not complete: " << e.toString();
- log() << "note: restart the server with --noIndexBuildRetry to skip index rebuilds";
- // If anything went wrong, leave the indexes partially built so that we pick them up
- // again on restart.
- indexer.abortWithoutCleanup();
- fassertFailedNoTrace(26100);
- }
- catch (...) {
- // If anything went wrong, leave the indexes partially built so that we pick them up
- // again on restart.
- indexer.abortWithoutCleanup();
- throw;
- }
+ wunit.commit();
}
- }
-} // namespace
- void restartInProgressIndexesFromLastShutdown(OperationContext* txn) {
- AuthorizationSession::get(txn->getClient())->grantInternalAuthorization();
+ try {
+ uassertStatusOK(indexer.insertAllDocumentsInCollection());
+
+ WriteUnitOfWork wunit(txn);
+ indexer.commit();
+ wunit.commit();
+ } catch (const DBException& e) {
+ error() << "Index rebuilding did not complete: " << e.toString();
+ log() << "note: restart the server with --noIndexBuildRetry to skip index rebuilds";
+ // If anything went wrong, leave the indexes partially built so that we pick them up
+ // again on restart.
+ indexer.abortWithoutCleanup();
+ fassertFailedNoTrace(26100);
+ } catch (...) {
+ // If anything went wrong, leave the indexes partially built so that we pick them up
+ // again on restart.
+ indexer.abortWithoutCleanup();
+ throw;
+ }
+ }
+}
+} // namespace
- std::vector<std::string> dbNames;
+void restartInProgressIndexesFromLastShutdown(OperationContext* txn) {
+ AuthorizationSession::get(txn->getClient())->grantInternalAuthorization();
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->listDatabases( &dbNames );
+ std::vector<std::string> dbNames;
- try {
- std::list<std::string> collNames;
- for (std::vector<std::string>::const_iterator dbName = dbNames.begin();
- dbName < dbNames.end();
- ++dbName) {
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ storageEngine->listDatabases(&dbNames);
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, *dbName, MODE_S);
+ try {
+ std::list<std::string> collNames;
+ for (std::vector<std::string>::const_iterator dbName = dbNames.begin();
+ dbName < dbNames.end();
+ ++dbName) {
+ ScopedTransaction scopedXact(txn, MODE_IS);
+ AutoGetDb autoDb(txn, *dbName, MODE_S);
- Database* db = autoDb.getDb();
- db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collNames);
- }
- checkNS(txn, collNames);
- }
- catch (const DBException& e) {
- error() << "Index verification did not complete: " << e.toString();
- fassertFailedNoTrace(18643);
+ Database* db = autoDb.getDb();
+ db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collNames);
}
- LOG(1) << "checking complete" << endl;
+ checkNS(txn, collNames);
+ } catch (const DBException& e) {
+ error() << "Index verification did not complete: " << e.toString();
+ fassertFailedNoTrace(18643);
}
+ LOG(1) << "checking complete" << endl;
+}
}
diff --git a/src/mongo/db/index_rebuilder.h b/src/mongo/db/index_rebuilder.h
index 26ddea997fb..bf01367e783 100644
--- a/src/mongo/db/index_rebuilder.h
+++ b/src/mongo/db/index_rebuilder.h
@@ -30,11 +30,11 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
- /**
- * Restarts building indexes that were in progress during shutdown.
- * Only call this at startup before taking requests.
- */
- void restartInProgressIndexesFromLastShutdown(OperationContext* txn);
+/**
+ * Restarts building indexes that were in progress during shutdown.
+ * Only call this at startup before taking requests.
+ */
+void restartInProgressIndexesFromLastShutdown(OperationContext* txn);
}
diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp
index 474b29ea7c1..62b35767235 100644
--- a/src/mongo/db/initialize_server_global_state.cpp
+++ b/src/mongo/db/initialize_server_global_state.cpp
@@ -71,311 +71,300 @@ namespace fs = boost::filesystem;
namespace mongo {
- using std::cerr;
- using std::cout;
- using std::endl;
+using std::cerr;
+using std::cout;
+using std::endl;
#ifndef _WIN32
- // support for exit value propagation with fork
- void launchSignal( int sig ) {
- if ( sig == SIGUSR2 ) {
- ProcessId cur = ProcessId::getCurrent();
-
- if (cur == serverGlobalParams.parentProc || cur == serverGlobalParams.leaderProc) {
- // signal indicates successful start allowing us to exit
- quickExit(0);
- }
+// support for exit value propagation with fork
+void launchSignal(int sig) {
+ if (sig == SIGUSR2) {
+ ProcessId cur = ProcessId::getCurrent();
+
+ if (cur == serverGlobalParams.parentProc || cur == serverGlobalParams.leaderProc) {
+ // signal indicates successful start allowing us to exit
+ quickExit(0);
}
}
+}
- static void setupLaunchSignals() {
- verify( signal(SIGUSR2 , launchSignal ) != SIG_ERR );
- }
+static void setupLaunchSignals() {
+ verify(signal(SIGUSR2, launchSignal) != SIG_ERR);
+}
- void signalForkSuccess() {
- if (serverGlobalParams.doFork) {
- // killing leader will propagate to parent
- verify(kill(serverGlobalParams.leaderProc.toNative(), SIGUSR2) == 0);
- }
+void signalForkSuccess() {
+ if (serverGlobalParams.doFork) {
+ // killing leader will propagate to parent
+ verify(kill(serverGlobalParams.leaderProc.toNative(), SIGUSR2) == 0);
}
+}
#endif
- static bool forkServer() {
+static bool forkServer() {
#ifndef _WIN32
- if (serverGlobalParams.doFork) {
- fassert(16447, !serverGlobalParams.logpath.empty() ||
- serverGlobalParams.logWithSyslog);
-
- cout.flush();
- cerr.flush();
-
- serverGlobalParams.parentProc = ProcessId::getCurrent();
-
- // facilitate clean exit when child starts successfully
- setupLaunchSignals();
-
- cout << "about to fork child process, waiting until server is ready for connections."
- << endl;
-
- pid_t child1 = fork();
- if (child1 == -1) {
- cout << "ERROR: stage 1 fork() failed: " << errnoWithDescription();
- quickExit(EXIT_ABRUPT);
- }
- else if (child1) {
- // this is run in the original parent process
- int pstat;
- waitpid(child1, &pstat, 0);
-
- if (WIFEXITED(pstat)) {
- if (WEXITSTATUS(pstat)) {
- cout << "ERROR: child process failed, exited with error number "
- << WEXITSTATUS(pstat) << endl;
- }
- else {
- cout << "child process started successfully, parent exiting" << endl;
- }
-
- quickExit(WEXITSTATUS(pstat));
+ if (serverGlobalParams.doFork) {
+ fassert(16447, !serverGlobalParams.logpath.empty() || serverGlobalParams.logWithSyslog);
+
+ cout.flush();
+ cerr.flush();
+
+ serverGlobalParams.parentProc = ProcessId::getCurrent();
+
+ // facilitate clean exit when child starts successfully
+ setupLaunchSignals();
+
+ cout << "about to fork child process, waiting until server is ready for connections."
+ << endl;
+
+ pid_t child1 = fork();
+ if (child1 == -1) {
+ cout << "ERROR: stage 1 fork() failed: " << errnoWithDescription();
+ quickExit(EXIT_ABRUPT);
+ } else if (child1) {
+ // this is run in the original parent process
+ int pstat;
+ waitpid(child1, &pstat, 0);
+
+ if (WIFEXITED(pstat)) {
+ if (WEXITSTATUS(pstat)) {
+ cout << "ERROR: child process failed, exited with error number "
+ << WEXITSTATUS(pstat) << endl;
+ } else {
+ cout << "child process started successfully, parent exiting" << endl;
}
- quickExit(50);
- }
-
- if ( chdir("/") < 0 ) {
- cout << "Cant chdir() while forking server process: " << strerror(errno) << endl;
- quickExit(-1);
+ quickExit(WEXITSTATUS(pstat));
}
- setsid();
-
- serverGlobalParams.leaderProc = ProcessId::getCurrent();
- pid_t child2 = fork();
- if (child2 == -1) {
- cout << "ERROR: stage 2 fork() failed: " << errnoWithDescription();
- quickExit(EXIT_ABRUPT);
- }
- else if (child2) {
- // this is run in the middle process
- int pstat;
- cout << "forked process: " << child2 << endl;
- waitpid(child2, &pstat, 0);
-
- if ( WIFEXITED(pstat) ) {
- quickExit( WEXITSTATUS(pstat) );
- }
+ quickExit(50);
+ }
- quickExit(51);
+ if (chdir("/") < 0) {
+ cout << "Cant chdir() while forking server process: " << strerror(errno) << endl;
+ quickExit(-1);
+ }
+ setsid();
+
+ serverGlobalParams.leaderProc = ProcessId::getCurrent();
+
+ pid_t child2 = fork();
+ if (child2 == -1) {
+ cout << "ERROR: stage 2 fork() failed: " << errnoWithDescription();
+ quickExit(EXIT_ABRUPT);
+ } else if (child2) {
+ // this is run in the middle process
+ int pstat;
+ cout << "forked process: " << child2 << endl;
+ waitpid(child2, &pstat, 0);
+
+ if (WIFEXITED(pstat)) {
+ quickExit(WEXITSTATUS(pstat));
}
- // this is run in the final child process (the server)
+ quickExit(51);
+ }
- FILE* f = freopen("/dev/null", "w", stdout);
- if ( f == NULL ) {
- cout << "Cant reassign stdout while forking server process: " << strerror(errno) << endl;
- return false;
- }
+ // this is run in the final child process (the server)
- f = freopen("/dev/null", "w", stderr);
- if ( f == NULL ) {
- cout << "Cant reassign stderr while forking server process: " << strerror(errno) << endl;
- return false;
- }
+ FILE* f = freopen("/dev/null", "w", stdout);
+ if (f == NULL) {
+ cout << "Cant reassign stdout while forking server process: " << strerror(errno)
+ << endl;
+ return false;
+ }
- f = freopen("/dev/null", "r", stdin);
- if ( f == NULL ) {
- cout << "Cant reassign stdin while forking server process: " << strerror(errno) << endl;
- return false;
- }
+ f = freopen("/dev/null", "w", stderr);
+ if (f == NULL) {
+ cout << "Cant reassign stderr while forking server process: " << strerror(errno)
+ << endl;
+ return false;
}
-#endif // !defined(_WIN32)
- return true;
- }
- void forkServerOrDie() {
- if (!forkServer())
- quickExit(EXIT_FAILURE);
+ f = freopen("/dev/null", "r", stdin);
+ if (f == NULL) {
+ cout << "Cant reassign stdin while forking server process: " << strerror(errno) << endl;
+ return false;
+ }
}
-
- MONGO_INITIALIZER_GENERAL(ServerLogRedirection,
- ("GlobalLogManager", "EndStartupOptionHandling", "ForkServer"),
- ("default"))(
- InitializerContext*) {
-
- using logger::LogManager;
- using logger::MessageEventEphemeral;
- using logger::MessageEventDetailsEncoder;
- using logger::MessageEventWithContextEncoder;
- using logger::MessageLogDomain;
- using logger::RotatableFileAppender;
- using logger::StatusWithRotatableFileWriter;
-
- if (serverGlobalParams.logWithSyslog) {
+#endif // !defined(_WIN32)
+ return true;
+}
+
+void forkServerOrDie() {
+ if (!forkServer())
+ quickExit(EXIT_FAILURE);
+}
+
+MONGO_INITIALIZER_GENERAL(ServerLogRedirection,
+ ("GlobalLogManager", "EndStartupOptionHandling", "ForkServer"),
+ ("default"))(InitializerContext*) {
+ using logger::LogManager;
+ using logger::MessageEventEphemeral;
+ using logger::MessageEventDetailsEncoder;
+ using logger::MessageEventWithContextEncoder;
+ using logger::MessageLogDomain;
+ using logger::RotatableFileAppender;
+ using logger::StatusWithRotatableFileWriter;
+
+ if (serverGlobalParams.logWithSyslog) {
#ifdef _WIN32
- return Status(ErrorCodes::InternalError,
- "Syslog requested in Windows build; command line processor logic error");
+ return Status(ErrorCodes::InternalError,
+ "Syslog requested in Windows build; command line processor logic error");
#else
- using logger::SyslogAppender;
-
- StringBuilder sb;
- sb << serverGlobalParams.binaryName << "." << serverGlobalParams.port;
- openlog(strdup(sb.str().c_str()),
- LOG_PID | LOG_CONS,
- serverGlobalParams.syslogFacility);
- LogManager* manager = logger::globalLogManager();
- manager->getGlobalDomain()->clearAppenders();
- manager->getGlobalDomain()->attachAppender(
- MessageLogDomain::AppenderAutoPtr(
- new SyslogAppender<MessageEventEphemeral>(
- new logger::MessageEventWithContextEncoder)));
- manager->getNamedDomain("javascriptOutput")->attachAppender(
- MessageLogDomain::AppenderAutoPtr(
- new SyslogAppender<MessageEventEphemeral>(
- new logger::MessageEventWithContextEncoder)));
-#endif // defined(_WIN32)
+ using logger::SyslogAppender;
+
+ StringBuilder sb;
+ sb << serverGlobalParams.binaryName << "." << serverGlobalParams.port;
+ openlog(strdup(sb.str().c_str()), LOG_PID | LOG_CONS, serverGlobalParams.syslogFacility);
+ LogManager* manager = logger::globalLogManager();
+ manager->getGlobalDomain()->clearAppenders();
+ manager->getGlobalDomain()->attachAppender(MessageLogDomain::AppenderAutoPtr(
+ new SyslogAppender<MessageEventEphemeral>(new logger::MessageEventWithContextEncoder)));
+ manager->getNamedDomain("javascriptOutput")
+ ->attachAppender(
+ MessageLogDomain::AppenderAutoPtr(new SyslogAppender<MessageEventEphemeral>(
+ new logger::MessageEventWithContextEncoder)));
+#endif // defined(_WIN32)
+ } else if (!serverGlobalParams.logpath.empty()) {
+ fassert(16448, !serverGlobalParams.logWithSyslog);
+ std::string absoluteLogpath = boost::filesystem::absolute(serverGlobalParams.logpath,
+ serverGlobalParams.cwd).string();
+
+ bool exists;
+
+ try {
+ exists = boost::filesystem::exists(absoluteLogpath);
+ } catch (boost::filesystem::filesystem_error& e) {
+ return Status(ErrorCodes::FileNotOpen,
+ mongoutils::str::stream() << "Failed probe for \"" << absoluteLogpath
+ << "\": " << e.code().message());
}
- else if (!serverGlobalParams.logpath.empty()) {
- fassert(16448, !serverGlobalParams.logWithSyslog);
- std::string absoluteLogpath = boost::filesystem::absolute(
- serverGlobalParams.logpath, serverGlobalParams.cwd).string();
-
- bool exists;
-
- try{
- exists = boost::filesystem::exists(absoluteLogpath);
- } catch(boost::filesystem::filesystem_error& e) {
- return Status(ErrorCodes::FileNotOpen, mongoutils::str::stream() <<
- "Failed probe for \"" << absoluteLogpath << "\": " <<
- e.code().message());
- }
- if (exists) {
- if (boost::filesystem::is_directory(absoluteLogpath)) {
- return Status(ErrorCodes::FileNotOpen, mongoutils::str::stream() <<
- "logpath \"" << absoluteLogpath <<
- "\" should name a file, not a directory.");
- }
-
- if (!serverGlobalParams.logAppend &&
- boost::filesystem::is_regular(absoluteLogpath)) {
- std::string renameTarget = absoluteLogpath + "." + terseCurrentTime(false);
- if (0 == rename(absoluteLogpath.c_str(), renameTarget.c_str())) {
- log() << "log file \"" << absoluteLogpath
- << "\" exists; moved to \"" << renameTarget << "\".";
- }
- else {
- return Status(ErrorCodes::FileRenameFailed, mongoutils::str::stream() <<
- "Could not rename preexisting log file \"" <<
- absoluteLogpath << "\" to \"" << renameTarget <<
- "\"; run with --logappend or manually remove file: " <<
- errnoWithDescription());
- }
- }
+ if (exists) {
+ if (boost::filesystem::is_directory(absoluteLogpath)) {
+ return Status(ErrorCodes::FileNotOpen,
+ mongoutils::str::stream()
+ << "logpath \"" << absoluteLogpath
+ << "\" should name a file, not a directory.");
}
- StatusWithRotatableFileWriter writer =
- logger::globalRotatableFileManager()->openFile(absoluteLogpath,
- serverGlobalParams.logAppend);
- if (!writer.isOK()) {
- return writer.getStatus();
- }
-
- LogManager* manager = logger::globalLogManager();
- manager->getGlobalDomain()->clearAppenders();
- manager->getGlobalDomain()->attachAppender(
- MessageLogDomain::AppenderAutoPtr(
- new RotatableFileAppender<MessageEventEphemeral>(
- new MessageEventDetailsEncoder, writer.getValue())));
- manager->getNamedDomain("javascriptOutput")->attachAppender(
- MessageLogDomain::AppenderAutoPtr(
- new RotatableFileAppender<MessageEventEphemeral>(
- new MessageEventDetailsEncoder, writer.getValue())));
-
- if (serverGlobalParams.logAppend && exists) {
- log() << "***** SERVER RESTARTED *****" << endl;
- Status status =
- logger::RotatableFileWriter::Use(writer.getValue()).status();
- if (!status.isOK())
- return status;
+ if (!serverGlobalParams.logAppend && boost::filesystem::is_regular(absoluteLogpath)) {
+ std::string renameTarget = absoluteLogpath + "." + terseCurrentTime(false);
+ if (0 == rename(absoluteLogpath.c_str(), renameTarget.c_str())) {
+ log() << "log file \"" << absoluteLogpath << "\" exists; moved to \""
+ << renameTarget << "\".";
+ } else {
+ return Status(ErrorCodes::FileRenameFailed,
+ mongoutils::str::stream()
+ << "Could not rename preexisting log file \""
+ << absoluteLogpath << "\" to \"" << renameTarget
+ << "\"; run with --logappend or manually remove file: "
+ << errnoWithDescription());
+ }
}
}
- else {
- logger::globalLogManager()->getNamedDomain("javascriptOutput")->attachAppender(
- MessageLogDomain::AppenderAutoPtr(
- new logger::ConsoleAppender<MessageEventEphemeral>(
- new MessageEventDetailsEncoder)));
- }
- logger::globalLogDomain()->attachAppender(
- logger::MessageLogDomain::AppenderAutoPtr(
- new RamLogAppender(RamLog::get("global"))));
+ StatusWithRotatableFileWriter writer = logger::globalRotatableFileManager()->openFile(
+ absoluteLogpath, serverGlobalParams.logAppend);
+ if (!writer.isOK()) {
+ return writer.getStatus();
+ }
- return Status::OK();
+ LogManager* manager = logger::globalLogManager();
+ manager->getGlobalDomain()->clearAppenders();
+ manager->getGlobalDomain()->attachAppender(
+ MessageLogDomain::AppenderAutoPtr(new RotatableFileAppender<MessageEventEphemeral>(
+ new MessageEventDetailsEncoder, writer.getValue())));
+ manager->getNamedDomain("javascriptOutput")
+ ->attachAppender(
+ MessageLogDomain::AppenderAutoPtr(new RotatableFileAppender<MessageEventEphemeral>(
+ new MessageEventDetailsEncoder, writer.getValue())));
+
+ if (serverGlobalParams.logAppend && exists) {
+ log() << "***** SERVER RESTARTED *****" << endl;
+ Status status = logger::RotatableFileWriter::Use(writer.getValue()).status();
+ if (!status.isOK())
+ return status;
+ }
+ } else {
+ logger::globalLogManager()
+ ->getNamedDomain("javascriptOutput")
+ ->attachAppender(MessageLogDomain::AppenderAutoPtr(
+ new logger::ConsoleAppender<MessageEventEphemeral>(
+ new MessageEventDetailsEncoder)));
}
- /**
- * atexit handler to terminate the process before static destructors run.
- *
- * Mongo server processes cannot safely call ::exit() or std::exit(), but
- * some third-party libraries may call one of those functions. In that
- * case, to avoid static-destructor problems in the server, this exits the
- * process immediately with code EXIT_FAILURE.
- *
- * TODO: Remove once exit() executes safely in mongo server processes.
- */
- static void shortCircuitExit() { quickExit(EXIT_FAILURE); }
-
- MONGO_INITIALIZER(RegisterShortCircuitExitHandler)(InitializerContext*) {
- if (std::atexit(&shortCircuitExit) != 0)
- return Status(ErrorCodes::InternalError, "Failed setting short-circuit exit handler.");
- return Status::OK();
- }
+ logger::globalLogDomain()->attachAppender(
+ logger::MessageLogDomain::AppenderAutoPtr(new RamLogAppender(RamLog::get("global"))));
- bool initializeServerGlobalState() {
+ return Status::OK();
+}
- Listener::globalTicketHolder.resize(serverGlobalParams.maxConns);
+/**
+ * atexit handler to terminate the process before static destructors run.
+ *
+ * Mongo server processes cannot safely call ::exit() or std::exit(), but
+ * some third-party libraries may call one of those functions. In that
+ * case, to avoid static-destructor problems in the server, this exits the
+ * process immediately with code EXIT_FAILURE.
+ *
+ * TODO: Remove once exit() executes safely in mongo server processes.
+ */
+static void shortCircuitExit() {
+ quickExit(EXIT_FAILURE);
+}
+
+MONGO_INITIALIZER(RegisterShortCircuitExitHandler)(InitializerContext*) {
+ if (std::atexit(&shortCircuitExit) != 0)
+ return Status(ErrorCodes::InternalError, "Failed setting short-circuit exit handler.");
+ return Status::OK();
+}
+
+bool initializeServerGlobalState() {
+ Listener::globalTicketHolder.resize(serverGlobalParams.maxConns);
#ifndef _WIN32
- if (!fs::is_directory(serverGlobalParams.socket)) {
- cout << serverGlobalParams.socket << " must be a directory" << endl;
- return false;
- }
+ if (!fs::is_directory(serverGlobalParams.socket)) {
+ cout << serverGlobalParams.socket << " must be a directory" << endl;
+ return false;
+ }
#endif
- if (!serverGlobalParams.pidFile.empty()) {
- if (!writePidFile(serverGlobalParams.pidFile)) {
- // error message logged in writePidFile
- return false;
- }
+ if (!serverGlobalParams.pidFile.empty()) {
+ if (!writePidFile(serverGlobalParams.pidFile)) {
+ // error message logged in writePidFile
+ return false;
}
+ }
- int clusterAuthMode = serverGlobalParams.clusterAuthMode.load();
- if (!serverGlobalParams.keyFile.empty() &&
- clusterAuthMode != ServerGlobalParams::ClusterAuthMode_x509) {
- if (!setUpSecurityKey(serverGlobalParams.keyFile)) {
- // error message printed in setUpPrivateKey
- return false;
- }
+ int clusterAuthMode = serverGlobalParams.clusterAuthMode.load();
+ if (!serverGlobalParams.keyFile.empty() &&
+ clusterAuthMode != ServerGlobalParams::ClusterAuthMode_x509) {
+ if (!setUpSecurityKey(serverGlobalParams.keyFile)) {
+ // error message printed in setUpPrivateKey
+ return false;
}
+ }
- // Auto-enable auth except if clusterAuthMode is not set.
- // clusterAuthMode is automatically set if a --keyFile parameter is provided.
- if (clusterAuthMode != ServerGlobalParams::ClusterAuthMode_undefined) {
- getGlobalAuthorizationManager()->setAuthEnabled(true);
- }
+ // Auto-enable auth except if clusterAuthMode is not set.
+ // clusterAuthMode is automatically set if a --keyFile parameter is provided.
+ if (clusterAuthMode != ServerGlobalParams::ClusterAuthMode_undefined) {
+ getGlobalAuthorizationManager()->setAuthEnabled(true);
+ }
#ifdef MONGO_CONFIG_SSL
- if (clusterAuthMode == ServerGlobalParams::ClusterAuthMode_x509 ||
- clusterAuthMode == ServerGlobalParams::ClusterAuthMode_sendX509) {
- setInternalUserAuthParams(BSON(saslCommandMechanismFieldName << "MONGODB-X509" <<
- saslCommandUserDBFieldName << "$external" <<
- saslCommandUserFieldName <<
- getSSLManager()->getSSLConfiguration().clientSubjectName));
- }
-#endif
- return true;
+ if (clusterAuthMode == ServerGlobalParams::ClusterAuthMode_x509 ||
+ clusterAuthMode == ServerGlobalParams::ClusterAuthMode_sendX509) {
+ setInternalUserAuthParams(
+ BSON(saslCommandMechanismFieldName
+ << "MONGODB-X509" << saslCommandUserDBFieldName << "$external"
+ << saslCommandUserFieldName
+ << getSSLManager()->getSSLConfiguration().clientSubjectName));
}
+#endif
+ return true;
+}
} // namespace mongo
diff --git a/src/mongo/db/initialize_server_global_state.h b/src/mongo/db/initialize_server_global_state.h
index 7085274d7af..c4953b70548 100644
--- a/src/mongo/db/initialize_server_global_state.h
+++ b/src/mongo/db/initialize_server_global_state.h
@@ -30,25 +30,25 @@
namespace mongo {
- /**
- * Perform initialization activity common across all mongo server types.
- *
- * Set up logging, daemonize the process, configure SSL, etc.
- */
- bool initializeServerGlobalState();
+/**
+ * Perform initialization activity common across all mongo server types.
+ *
+ * Set up logging, daemonize the process, configure SSL, etc.
+ */
+bool initializeServerGlobalState();
- /**
- * Forks and detaches the server, on platforms that support it, if serverGlobalParams.doFork is
- * true.
- *
- * Call after processing the command line but before running mongo initializers.
- */
- void forkServerOrDie();
+/**
+ * Forks and detaches the server, on platforms that support it, if serverGlobalParams.doFork is
+ * true.
+ *
+ * Call after processing the command line but before running mongo initializers.
+ */
+void forkServerOrDie();
- /**
- * Notify the parent that we forked from that we have successfully completed basic
- * initialization so it can stop waiting and exit.
- */
- void signalForkSuccess();
+/**
+ * Notify the parent that we forked from that we have successfully completed basic
+ * initialization so it can stop waiting and exit.
+ */
+void signalForkSuccess();
} // namespace mongo
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index f207624c4a3..79214191ca8 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -1,4 +1,4 @@
-// instance.cpp
+// instance.cpp
/**
* Copyright (C) 2008 10gen Inc.
@@ -96,7 +96,7 @@
#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/d_state.h"
#include "mongo/s/grid.h"
-#include "mongo/s/stale_exception.h" // for SendStaleConfigException
+#include "mongo/s/stale_exception.h" // for SendStaleConfigException
#include "mongo/scripting/engine.h"
#include "mongo/stdx/memory.h"
#include "mongo/stdx/thread.h"
@@ -109,381 +109,357 @@
namespace mongo {
- using logger::LogComponent;
- using std::endl;
- using std::hex;
- using std::ios;
- using std::ofstream;
- using std::string;
- using std::stringstream;
- using std::unique_ptr;
- using std::vector;
-
- // for diaglog
- inline void opread(Message& m) {
- if (_diaglog.getLevel() & 2) {
- _diaglog.readop(m.singleData().view2ptr(), m.header().getLen());
- }
+using logger::LogComponent;
+using std::endl;
+using std::hex;
+using std::ios;
+using std::ofstream;
+using std::string;
+using std::stringstream;
+using std::unique_ptr;
+using std::vector;
+
+// for diaglog
+inline void opread(Message& m) {
+ if (_diaglog.getLevel() & 2) {
+ _diaglog.readop(m.singleData().view2ptr(), m.header().getLen());
}
+}
- inline void opwrite(Message& m) {
- if (_diaglog.getLevel() & 1) {
- _diaglog.writeop(m.singleData().view2ptr(), m.header().getLen());
- }
+inline void opwrite(Message& m) {
+ if (_diaglog.getLevel() & 1) {
+ _diaglog.writeop(m.singleData().view2ptr(), m.header().getLen());
}
+}
- void receivedKillCursors(OperationContext* txn, Message& m);
+void receivedKillCursors(OperationContext* txn, Message& m);
- void receivedUpdate(OperationContext* txn,
- const NamespaceString& nsString,
- Message& m,
- CurOp& op);
+void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Message& m, CurOp& op);
- void receivedDelete(OperationContext* txn,
- const NamespaceString& nsString,
- Message& m,
- CurOp& op);
+void receivedDelete(OperationContext* txn, const NamespaceString& nsString, Message& m, CurOp& op);
- void receivedInsert(OperationContext* txn,
- const NamespaceString& nsString,
- Message& m,
- CurOp& op);
+void receivedInsert(OperationContext* txn, const NamespaceString& nsString, Message& m, CurOp& op);
- bool receivedGetMore(OperationContext* txn,
- DbResponse& dbresponse,
- Message& m,
- CurOp& curop);
+bool receivedGetMore(OperationContext* txn, DbResponse& dbresponse, Message& m, CurOp& curop);
- int nloggedsome = 0;
-#define LOGWITHRATELIMIT if( ++nloggedsome < 1000 || nloggedsome % 100 == 0 )
+int nloggedsome = 0;
+#define LOGWITHRATELIMIT if (++nloggedsome < 1000 || nloggedsome % 100 == 0)
- string dbExecCommand;
+string dbExecCommand;
- MONGO_FP_DECLARE(rsStopGetMore);
+MONGO_FP_DECLARE(rsStopGetMore);
namespace {
- std::unique_ptr<AuthzManagerExternalState> createAuthzManagerExternalStateMongod() {
- return stdx::make_unique<AuthzManagerExternalStateMongod>();
+std::unique_ptr<AuthzManagerExternalState> createAuthzManagerExternalStateMongod() {
+ return stdx::make_unique<AuthzManagerExternalStateMongod>();
+}
+
+MONGO_INITIALIZER(CreateAuthorizationExternalStateFactory)(InitializerContext* context) {
+ AuthzManagerExternalState::create = &createAuthzManagerExternalStateMongod;
+ return Status::OK();
+}
+
+void generateLegacyQueryErrorResponse(const AssertionException* exception,
+ const QueryMessage& queryMessage,
+ CurOp* curop,
+ Message* response) {
+ curop->debug().exceptionInfo = exception->getInfo();
+
+ log(LogComponent::kQuery) << "assertion " << exception->toString() << " ns:" << queryMessage.ns
+ << " query:" << (queryMessage.query.valid()
+ ? queryMessage.query.toString()
+ : "query object is corrupt");
+ if (queryMessage.ntoskip || queryMessage.ntoreturn) {
+ log(LogComponent::kQuery) << " ntoskip:" << queryMessage.ntoskip
+ << " ntoreturn:" << queryMessage.ntoreturn;
}
- MONGO_INITIALIZER(CreateAuthorizationExternalStateFactory) (InitializerContext* context) {
- AuthzManagerExternalState::create = &createAuthzManagerExternalStateMongod;
- return Status::OK();
+ const SendStaleConfigException* scex = (exception->getCode() == SendStaleConfigCode)
+ ? static_cast<const SendStaleConfigException*>(exception)
+ : NULL;
+
+ BSONObjBuilder err;
+ exception->getInfo().append(err);
+ if (scex) {
+ err.append("ok", 0.0);
+ err.append("ns", scex->getns());
+ scex->getVersionReceived().addToBSON(err, "vReceived");
+ scex->getVersionWanted().addToBSON(err, "vWanted");
}
+ BSONObj errObj = err.done();
- void generateLegacyQueryErrorResponse(const AssertionException* exception,
- const QueryMessage& queryMessage,
- CurOp* curop,
- Message* response) {
- curop->debug().exceptionInfo = exception->getInfo();
-
- log(LogComponent::kQuery) << "assertion " << exception->toString()
- << " ns:" << queryMessage.ns << " query:"
- << (queryMessage.query.valid() ? queryMessage.query.toString()
- : "query object is corrupt");
- if (queryMessage.ntoskip || queryMessage.ntoreturn) {
- log(LogComponent::kQuery) << " ntoskip:" << queryMessage.ntoskip
- << " ntoreturn:" << queryMessage.ntoreturn;
- }
-
- const SendStaleConfigException* scex = (exception->getCode() == SendStaleConfigCode)
- ? static_cast<const SendStaleConfigException*>(exception)
- : NULL;
-
- BSONObjBuilder err;
- exception->getInfo().append(err);
- if (scex) {
- err.append("ok", 0.0);
- err.append("ns", scex->getns());
- scex->getVersionReceived().addToBSON(err, "vReceived");
- scex->getVersionWanted().addToBSON(err, "vWanted");
- }
- BSONObj errObj = err.done();
-
- if (scex) {
- log(LogComponent::kQuery) << "stale version detected during query over "
- << queryMessage.ns << " : " << errObj;
- }
-
- BufBuilder bb;
- bb.skip(sizeof(QueryResult::Value));
- bb.appendBuf((void*) errObj.objdata(), errObj.objsize());
-
- // TODO: call replyToQuery() from here instead of this!!! see dbmessage.h
- QueryResult::View msgdata = bb.buf();
- bb.decouple();
- QueryResult::View qr = msgdata;
- qr.setResultFlags(ResultFlag_ErrSet);
- if (scex) qr.setResultFlags(qr.getResultFlags() | ResultFlag_ShardConfigStale);
- qr.msgdata().setLen(bb.len());
- qr.msgdata().setOperation(opReply);
- qr.setCursorId(0);
- qr.setStartingFrom(0);
- qr.setNReturned(1);
- response->setData(msgdata.view2ptr(), true);
+ if (scex) {
+ log(LogComponent::kQuery) << "stale version detected during query over " << queryMessage.ns
+ << " : " << errObj;
}
-} // namespace
-
- static void receivedCommand(OperationContext* txn,
- const NamespaceString& nss,
- Client& client,
- DbResponse& dbResponse,
- Message& message) {
-
- invariant(nss.isCommand());
+ BufBuilder bb;
+ bb.skip(sizeof(QueryResult::Value));
+ bb.appendBuf((void*)errObj.objdata(), errObj.objsize());
+
+ // TODO: call replyToQuery() from here instead of this!!! see dbmessage.h
+ QueryResult::View msgdata = bb.buf();
+ bb.decouple();
+ QueryResult::View qr = msgdata;
+ qr.setResultFlags(ResultFlag_ErrSet);
+ if (scex)
+ qr.setResultFlags(qr.getResultFlags() | ResultFlag_ShardConfigStale);
+ qr.msgdata().setLen(bb.len());
+ qr.msgdata().setOperation(opReply);
+ qr.setCursorId(0);
+ qr.setStartingFrom(0);
+ qr.setNReturned(1);
+ response->setData(msgdata.view2ptr(), true);
+}
- const MSGID responseTo = message.header().getId();
-
- DbMessage dbMessage(message);
- QueryMessage queryMessage(dbMessage);
+} // namespace
- CurOp* op = CurOp::get(txn);
+static void receivedCommand(OperationContext* txn,
+ const NamespaceString& nss,
+ Client& client,
+ DbResponse& dbResponse,
+ Message& message) {
+ invariant(nss.isCommand());
- rpc::LegacyReplyBuilder builder{};
+ const MSGID responseTo = message.header().getId();
- try {
- // This will throw if the request is on an invalid namespace.
- rpc::LegacyRequest request{&message};
- // Auth checking for Commands happens later.
- int nToReturn = queryMessage.ntoreturn;
- beginQueryOp(txn, nss, queryMessage.query, nToReturn, queryMessage.ntoskip);
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- op->markCommand_inlock();
- }
+ DbMessage dbMessage(message);
+ QueryMessage queryMessage(dbMessage);
- uassert(16979, str::stream() << "bad numberToReturn (" << nToReturn
- << ") for $cmd type ns - can only be 1 or -1",
- nToReturn == 1 || nToReturn == -1);
+ CurOp* op = CurOp::get(txn);
- runCommands(txn, request, &builder);
+ rpc::LegacyReplyBuilder builder{};
- op->debug().iscommand = true;
- // TODO: Does this get overwritten/do we really need to set this twice?
- op->debug().query = request.getCommandArgs();
- }
- catch (const DBException& exception) {
- Command::generateErrorResponse(txn, &builder, exception);
+ try {
+ // This will throw if the request is on an invalid namespace.
+ rpc::LegacyRequest request{&message};
+ // Auth checking for Commands happens later.
+ int nToReturn = queryMessage.ntoreturn;
+ beginQueryOp(txn, nss, queryMessage.query, nToReturn, queryMessage.ntoskip);
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ op->markCommand_inlock();
}
- auto response = builder.done();
+ uassert(16979,
+ str::stream() << "bad numberToReturn (" << nToReturn
+ << ") for $cmd type ns - can only be 1 or -1",
+ nToReturn == 1 || nToReturn == -1);
- op->debug().responseLength = response->header().dataLen();
+ runCommands(txn, request, &builder);
- dbResponse.response = response.release();
- dbResponse.responseTo = responseTo;
+ op->debug().iscommand = true;
+ // TODO: Does this get overwritten/do we really need to set this twice?
+ op->debug().query = request.getCommandArgs();
+ } catch (const DBException& exception) {
+ Command::generateErrorResponse(txn, &builder, exception);
}
-namespace {
+ auto response = builder.done();
- void receivedRpc(OperationContext* txn,
- Client& client,
- DbResponse& dbResponse,
- Message& message) {
+ op->debug().responseLength = response->header().dataLen();
- invariant(message.operation() == dbCommand);
+ dbResponse.response = response.release();
+ dbResponse.responseTo = responseTo;
+}
- const MSGID responseTo = message.header().getId();
+namespace {
- rpc::CommandReplyBuilder replyBuilder{};
+void receivedRpc(OperationContext* txn, Client& client, DbResponse& dbResponse, Message& message) {
+ invariant(message.operation() == dbCommand);
- auto curOp = CurOp::get(txn);
+ const MSGID responseTo = message.header().getId();
- try {
- // database is validated here
- rpc::CommandRequest request{&message};
-
- // We construct a legacy $cmd namespace so we can fill in curOp using
- // the existing logic that existed for OP_QUERY commands
- NamespaceString nss(request.getDatabase(), "$cmd");
- beginQueryOp(txn, nss, request.getCommandArgs(), 1, 0);
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- curOp->markCommand_inlock();
- }
+ rpc::CommandReplyBuilder replyBuilder{};
- runCommands(txn, request, &replyBuilder);
+ auto curOp = CurOp::get(txn);
- curOp->debug().iscommand = true;
- curOp->debug().query = request.getCommandArgs();
+ try {
+ // database is validated here
+ rpc::CommandRequest request{&message};
- }
- catch (const DBException& exception) {
- Command::generateErrorResponse(txn, &replyBuilder, exception);
+ // We construct a legacy $cmd namespace so we can fill in curOp using
+ // the existing logic that existed for OP_QUERY commands
+ NamespaceString nss(request.getDatabase(), "$cmd");
+ beginQueryOp(txn, nss, request.getCommandArgs(), 1, 0);
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ curOp->markCommand_inlock();
}
- auto response = replyBuilder.done();
+ runCommands(txn, request, &replyBuilder);
- curOp->debug().responseLength = response->header().dataLen();
+ curOp->debug().iscommand = true;
+ curOp->debug().query = request.getCommandArgs();
- dbResponse.response = response.release();
- dbResponse.responseTo = responseTo;
+ } catch (const DBException& exception) {
+ Command::generateErrorResponse(txn, &replyBuilder, exception);
}
- // In SERVER-7775 we reimplemented the pseudo-commands fsyncUnlock, inProg, and killOp
- // as ordinary commands. To support old clients for another release, this helper serves
- // to execute the real command from the legacy pseudo-command codepath.
- // TODO: remove after MongoDB 3.2 is released
- void receivedPseudoCommand(OperationContext* txn,
- Client& client,
- DbResponse& dbResponse,
- Message& message,
- StringData realCommandName) {
-
- DbMessage originalDbm(message);
- originalDbm.pullInt(); // ntoskip
- originalDbm.pullInt(); // ntoreturn
- auto cmdParams = originalDbm.nextJsObj();
-
- Message interposed;
- // HACK:
- // legacy pseudo-commands could run on any database. The command replacements
- // can only run on 'admin'. To avoid breaking old shells and a multitude
- // of third-party tools, we rewrite the namespace. As auth is checked
- // later in Command::_checkAuthorizationImpl, we will still properly
- // reject the request if the client is not authorized.
- NamespaceString interposedNss("admin", "$cmd");
-
- BSONObjBuilder cmdBob;
- cmdBob.append(realCommandName, 1);
- cmdBob.appendElements(cmdParams);
- auto cmd = cmdBob.done();
-
- // TODO: use OP_COMMAND here instead of constructing
- // a legacy OP_QUERY style command
- BufBuilder cmdMsgBuf;
-
- int32_t flags = DataView(message.header().data()).read<LittleEndian<int32_t>>();
- cmdMsgBuf.appendNum(flags);
-
- cmdMsgBuf.appendStr(interposedNss.db(), false); // not including null byte
- cmdMsgBuf.appendStr(".$cmd");
- cmdMsgBuf.appendNum(0); // ntoskip
- cmdMsgBuf.appendNum(1); // ntoreturn
- cmdMsgBuf.appendBuf(cmd.objdata(), cmd.objsize());
-
- interposed.setData(dbQuery, cmdMsgBuf.buf(), cmdMsgBuf.len());
- interposed.header().setId(message.header().getId());
-
- receivedCommand(txn, interposedNss, client, dbResponse, interposed);
- }
+ auto response = replyBuilder.done();
+
+ curOp->debug().responseLength = response->header().dataLen();
+
+ dbResponse.response = response.release();
+ dbResponse.responseTo = responseTo;
+}
+
+// In SERVER-7775 we reimplemented the pseudo-commands fsyncUnlock, inProg, and killOp
+// as ordinary commands. To support old clients for another release, this helper serves
+// to execute the real command from the legacy pseudo-command codepath.
+// TODO: remove after MongoDB 3.2 is released
+void receivedPseudoCommand(OperationContext* txn,
+ Client& client,
+ DbResponse& dbResponse,
+ Message& message,
+ StringData realCommandName) {
+ DbMessage originalDbm(message);
+ originalDbm.pullInt(); // ntoskip
+ originalDbm.pullInt(); // ntoreturn
+ auto cmdParams = originalDbm.nextJsObj();
+
+ Message interposed;
+ // HACK:
+ // legacy pseudo-commands could run on any database. The command replacements
+ // can only run on 'admin'. To avoid breaking old shells and a multitude
+ // of third-party tools, we rewrite the namespace. As auth is checked
+ // later in Command::_checkAuthorizationImpl, we will still properly
+ // reject the request if the client is not authorized.
+ NamespaceString interposedNss("admin", "$cmd");
+
+ BSONObjBuilder cmdBob;
+ cmdBob.append(realCommandName, 1);
+ cmdBob.appendElements(cmdParams);
+ auto cmd = cmdBob.done();
+
+ // TODO: use OP_COMMAND here instead of constructing
+ // a legacy OP_QUERY style command
+ BufBuilder cmdMsgBuf;
+
+ int32_t flags = DataView(message.header().data()).read<LittleEndian<int32_t>>();
+ cmdMsgBuf.appendNum(flags);
+
+ cmdMsgBuf.appendStr(interposedNss.db(), false); // not including null byte
+ cmdMsgBuf.appendStr(".$cmd");
+ cmdMsgBuf.appendNum(0); // ntoskip
+ cmdMsgBuf.appendNum(1); // ntoreturn
+ cmdMsgBuf.appendBuf(cmd.objdata(), cmd.objsize());
+
+ interposed.setData(dbQuery, cmdMsgBuf.buf(), cmdMsgBuf.len());
+ interposed.header().setId(message.header().getId());
+
+ receivedCommand(txn, interposedNss, client, dbResponse, interposed);
+}
} // namespace
- static void receivedQuery(OperationContext* txn,
- const NamespaceString& nss,
- Client& c,
- DbResponse& dbResponse,
- Message& m) {
- invariant(!nss.isCommand());
-
- MSGID responseTo = m.header().getId();
+static void receivedQuery(OperationContext* txn,
+ const NamespaceString& nss,
+ Client& c,
+ DbResponse& dbResponse,
+ Message& m) {
+ invariant(!nss.isCommand());
- DbMessage d(m);
- QueryMessage q(d);
- unique_ptr< Message > resp( new Message() );
+ MSGID responseTo = m.header().getId();
- CurOp& op = *CurOp::get(txn);
+ DbMessage d(m);
+ QueryMessage q(d);
+ unique_ptr<Message> resp(new Message());
- try {
- Client* client = txn->getClient();
- Status status = AuthorizationSession::get(client)->checkAuthForQuery(nss, q.query);
- audit::logQueryAuthzCheck(client, nss, q.query, status.code());
- uassertStatusOK(status);
+ CurOp& op = *CurOp::get(txn);
- dbResponse.exhaustNS = runQuery(txn, q, nss, *resp);
- verify( !resp->empty() );
- }
- catch (const AssertionException& exception) {
- resp.reset(new Message());
- generateLegacyQueryErrorResponse(&exception, q, &op, resp.get());
- }
-
- op.debug().responseLength = resp->header().dataLen();
+ try {
+ Client* client = txn->getClient();
+ Status status = AuthorizationSession::get(client)->checkAuthForQuery(nss, q.query);
+ audit::logQueryAuthzCheck(client, nss, q.query, status.code());
+ uassertStatusOK(status);
- dbResponse.response = resp.release();
- dbResponse.responseTo = responseTo;
+ dbResponse.exhaustNS = runQuery(txn, q, nss, *resp);
+ verify(!resp->empty());
+ } catch (const AssertionException& exception) {
+ resp.reset(new Message());
+ generateLegacyQueryErrorResponse(&exception, q, &op, resp.get());
}
- // Mongod on win32 defines a value for this function. In all other executables it is NULL.
- void (*reportEventToSystem)(const char *msg) = 0;
+ op.debug().responseLength = resp->header().dataLen();
- void mongoAbort(const char *msg) {
- if( reportEventToSystem )
- reportEventToSystem(msg);
- severe() << msg;
- ::abort();
- }
+ dbResponse.response = resp.release();
+ dbResponse.responseTo = responseTo;
+}
- // Returns false when request includes 'end'
- void assembleResponse( OperationContext* txn,
- Message& m,
- DbResponse& dbresponse,
- const HostAndPort& remote) {
- // before we lock...
- int op = m.operation();
- bool isCommand = false;
+// Mongod on win32 defines a value for this function. In all other executables it is NULL.
+void (*reportEventToSystem)(const char* msg) = 0;
- DbMessage dbmsg(m);
+void mongoAbort(const char* msg) {
+ if (reportEventToSystem)
+ reportEventToSystem(msg);
+ severe() << msg;
+ ::abort();
+}
- Client& c = *txn->getClient();
- if (!c.isInDirectClient()) {
- LastError::get(c).startRequest();
- AuthorizationSession::get(c)->startRequest(txn);
+// Returns false when request includes 'end'
+void assembleResponse(OperationContext* txn,
+ Message& m,
+ DbResponse& dbresponse,
+ const HostAndPort& remote) {
+ // before we lock...
+ int op = m.operation();
+ bool isCommand = false;
- // We should not be holding any locks at this point
- invariant(!txn->lockState()->isLocked());
- }
+ DbMessage dbmsg(m);
- const char* ns = dbmsg.messageShouldHaveNs() ? dbmsg.getns() : NULL;
- const NamespaceString nsString = ns ? NamespaceString(ns) : NamespaceString();
+ Client& c = *txn->getClient();
+ if (!c.isInDirectClient()) {
+ LastError::get(c).startRequest();
+ AuthorizationSession::get(c)->startRequest(txn);
- if ( op == dbQuery ) {
- if (nsString.isCommand()) {
- isCommand = true;
- opwrite(m);
- }
- // TODO: remove this entire code path after 3.2. Refs SERVER-7775
- else if (nsString.isSpecialCommand()) {
- opwrite(m);
+ // We should not be holding any locks at this point
+ invariant(!txn->lockState()->isLocked());
+ }
- if (nsString.coll() == "$cmd.sys.inprog") {
- receivedPseudoCommand(txn, c, dbresponse, m, "currentOp");
- return;
- }
- if (nsString.coll() == "$cmd.sys.killop") {
- receivedPseudoCommand(txn, c, dbresponse, m, "killOp");
- return;
- }
- if (nsString.coll() == "$cmd.sys.unlock") {
- receivedPseudoCommand(txn, c, dbresponse, m, "fsyncUnlock");
- return;
- }
- }
- else {
- opread(m);
- }
- }
- else if( op == dbGetMore ) {
- opread(m);
- }
- else if ( op == dbCommand ) {
+ const char* ns = dbmsg.messageShouldHaveNs() ? dbmsg.getns() : NULL;
+ const NamespaceString nsString = ns ? NamespaceString(ns) : NamespaceString();
+
+ if (op == dbQuery) {
+ if (nsString.isCommand()) {
isCommand = true;
opwrite(m);
}
- else {
+ // TODO: remove this entire code path after 3.2. Refs SERVER-7775
+ else if (nsString.isSpecialCommand()) {
opwrite(m);
+
+ if (nsString.coll() == "$cmd.sys.inprog") {
+ receivedPseudoCommand(txn, c, dbresponse, m, "currentOp");
+ return;
+ }
+ if (nsString.coll() == "$cmd.sys.killop") {
+ receivedPseudoCommand(txn, c, dbresponse, m, "killOp");
+ return;
+ }
+ if (nsString.coll() == "$cmd.sys.unlock") {
+ receivedPseudoCommand(txn, c, dbresponse, m, "fsyncUnlock");
+ return;
+ }
+ } else {
+ opread(m);
}
+ } else if (op == dbGetMore) {
+ opread(m);
+ } else if (op == dbCommand) {
+ isCommand = true;
+ opwrite(m);
+ } else {
+ opwrite(m);
+ }
- // Increment op counters.
- switch (op) {
+ // Increment op counters.
+ switch (op) {
case dbQuery:
if (!isCommand) {
globalOpCounters.gotQuery();
- }
- else {
+ } else {
// Command counting is deferred, since it is not known yet whether the command
// needs counting.
}
@@ -501,909 +477,842 @@ namespace {
case dbDelete:
globalOpCounters.gotDelete();
break;
- }
+ }
- CurOp& currentOp = *CurOp::get(txn);
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- currentOp.setOp_inlock(op);
- }
+ CurOp& currentOp = *CurOp::get(txn);
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ currentOp.setOp_inlock(op);
+ }
- OpDebug& debug = currentOp.debug();
- debug.op = op;
+ OpDebug& debug = currentOp.debug();
+ debug.op = op;
- long long logThreshold = serverGlobalParams.slowMS;
- LogComponent responseComponent(LogComponent::kQuery);
- if (op == dbInsert ||
- op == dbDelete ||
- op == dbUpdate) {
- responseComponent = LogComponent::kWrite;
- }
- else if (isCommand) {
- responseComponent = LogComponent::kCommand;
- }
+ long long logThreshold = serverGlobalParams.slowMS;
+ LogComponent responseComponent(LogComponent::kQuery);
+ if (op == dbInsert || op == dbDelete || op == dbUpdate) {
+ responseComponent = LogComponent::kWrite;
+ } else if (isCommand) {
+ responseComponent = LogComponent::kCommand;
+ }
- bool shouldLog = logger::globalLogDomain()->shouldLog(responseComponent,
- logger::LogSeverity::Debug(1));
+ bool shouldLog =
+ logger::globalLogDomain()->shouldLog(responseComponent, logger::LogSeverity::Debug(1));
- if ( op == dbQuery ) {
- if (isCommand) {
- receivedCommand(txn, nsString, c, dbresponse, m);
- }
- else {
- receivedQuery(txn, nsString, c, dbresponse, m);
- }
- }
- else if ( op == dbCommand ) {
- receivedRpc(txn, c, dbresponse, m);
- }
- else if ( op == dbGetMore ) {
- if ( ! receivedGetMore(txn, dbresponse, m, currentOp) )
- shouldLog = true;
+ if (op == dbQuery) {
+ if (isCommand) {
+ receivedCommand(txn, nsString, c, dbresponse, m);
+ } else {
+ receivedQuery(txn, nsString, c, dbresponse, m);
}
- else if ( op == dbMsg ) {
- // deprecated - replaced by commands
- const char *p = dbmsg.getns();
-
- int len = strlen(p);
- if ( len > 400 )
- log(LogComponent::kQuery) << curTimeMillis64() % 10000 <<
- " long msg received, len:" << len << endl;
-
- Message *resp = new Message();
- if ( strcmp( "end" , p ) == 0 )
- resp->setData( opReply , "dbMsg end no longer supported" );
- else
- resp->setData( opReply , "i am fine - dbMsg deprecated");
+ } else if (op == dbCommand) {
+ receivedRpc(txn, c, dbresponse, m);
+ } else if (op == dbGetMore) {
+ if (!receivedGetMore(txn, dbresponse, m, currentOp))
+ shouldLog = true;
+ } else if (op == dbMsg) {
+ // deprecated - replaced by commands
+ const char* p = dbmsg.getns();
+
+ int len = strlen(p);
+ if (len > 400)
+ log(LogComponent::kQuery) << curTimeMillis64() % 10000
+ << " long msg received, len:" << len << endl;
+
+ Message* resp = new Message();
+ if (strcmp("end", p) == 0)
+ resp->setData(opReply, "dbMsg end no longer supported");
+ else
+ resp->setData(opReply, "i am fine - dbMsg deprecated");
- dbresponse.response = resp;
- dbresponse.responseTo = m.header().getId();
- }
- else {
- try {
- // The following operations all require authorization.
- // dbInsert, dbUpdate and dbDelete can be easily pre-authorized,
- // here, but dbKillCursors cannot.
- if ( op == dbKillCursors ) {
- currentOp.ensureStarted();
- logThreshold = 10;
- receivedKillCursors(txn, m);
- }
- else if (op != dbInsert && op != dbUpdate && op != dbDelete) {
- log(LogComponent::kQuery) << " operation isn't supported: " << op << endl;
- currentOp.done();
- shouldLog = true;
+ dbresponse.response = resp;
+ dbresponse.responseTo = m.header().getId();
+ } else {
+ try {
+ // The following operations all require authorization.
+ // dbInsert, dbUpdate and dbDelete can be easily pre-authorized,
+ // here, but dbKillCursors cannot.
+ if (op == dbKillCursors) {
+ currentOp.ensureStarted();
+ logThreshold = 10;
+ receivedKillCursors(txn, m);
+ } else if (op != dbInsert && op != dbUpdate && op != dbDelete) {
+ log(LogComponent::kQuery) << " operation isn't supported: " << op << endl;
+ currentOp.done();
+ shouldLog = true;
+ } else {
+ if (remote != DBDirectClient::dummyHost) {
+ const ShardedConnectionInfo* connInfo = ShardedConnectionInfo::get(&c, false);
+ uassert(18663,
+ str::stream() << "legacy writeOps not longer supported for "
+ << "versioned connections, ns: " << nsString.ns()
+ << ", op: " << opToString(op)
+ << ", remote: " << remote.toString(),
+ connInfo == NULL);
}
- else {
- if (remote != DBDirectClient::dummyHost) {
- const ShardedConnectionInfo* connInfo =
- ShardedConnectionInfo::get(&c, false);
- uassert(18663,
- str::stream() << "legacy writeOps not longer supported for "
- << "versioned connections, ns: " << nsString.ns()
- << ", op: " << opToString(op)
- << ", remote: " << remote.toString(),
- connInfo == NULL);
- }
-
- if (!nsString.isValid()) {
- uassert(16257, str::stream() << "Invalid ns [" << ns << "]", false);
- }
- else if (op == dbInsert) {
- receivedInsert(txn, nsString, m, currentOp);
- }
- else if (op == dbUpdate) {
- receivedUpdate(txn, nsString, m, currentOp);
- }
- else if (op == dbDelete) {
- receivedDelete(txn, nsString, m, currentOp);
- }
- else {
- invariant(false);
- }
+
+ if (!nsString.isValid()) {
+ uassert(16257, str::stream() << "Invalid ns [" << ns << "]", false);
+ } else if (op == dbInsert) {
+ receivedInsert(txn, nsString, m, currentOp);
+ } else if (op == dbUpdate) {
+ receivedUpdate(txn, nsString, m, currentOp);
+ } else if (op == dbDelete) {
+ receivedDelete(txn, nsString, m, currentOp);
+ } else {
+ invariant(false);
}
- }
- catch (const UserException& ue) {
- LastError::get(c).setLastError(ue.getCode(), ue.getInfo().msg);
- MONGO_LOG_COMPONENT(3, responseComponent)
- << " Caught Assertion in " << opToString(op) << ", continuing "
- << ue.toString() << endl;
- debug.exceptionInfo = ue.getInfo();
- }
- catch (const AssertionException& e) {
- LastError::get(c).setLastError(e.getCode(), e.getInfo().msg);
- MONGO_LOG_COMPONENT(3, responseComponent)
- << " Caught Assertion in " << opToString(op) << ", continuing "
- << e.toString() << endl;
- debug.exceptionInfo = e.getInfo();
- shouldLog = true;
}
+ } catch (const UserException& ue) {
+ LastError::get(c).setLastError(ue.getCode(), ue.getInfo().msg);
+ MONGO_LOG_COMPONENT(3, responseComponent) << " Caught Assertion in " << opToString(op)
+ << ", continuing " << ue.toString() << endl;
+ debug.exceptionInfo = ue.getInfo();
+ } catch (const AssertionException& e) {
+ LastError::get(c).setLastError(e.getCode(), e.getInfo().msg);
+ MONGO_LOG_COMPONENT(3, responseComponent) << " Caught Assertion in " << opToString(op)
+ << ", continuing " << e.toString() << endl;
+ debug.exceptionInfo = e.getInfo();
+ shouldLog = true;
}
- currentOp.ensureStarted();
- currentOp.done();
- debug.executionTime = currentOp.totalTimeMillis();
+ }
+ currentOp.ensureStarted();
+ currentOp.done();
+ debug.executionTime = currentOp.totalTimeMillis();
- logThreshold += currentOp.getExpectedLatencyMs();
+ logThreshold += currentOp.getExpectedLatencyMs();
- if ( shouldLog || debug.executionTime > logThreshold ) {
- Locker::LockerInfo lockerInfo;
- txn->lockState()->getLockerInfo(&lockerInfo);
+ if (shouldLog || debug.executionTime > logThreshold) {
+ Locker::LockerInfo lockerInfo;
+ txn->lockState()->getLockerInfo(&lockerInfo);
- MONGO_LOG_COMPONENT(0, responseComponent) << debug.report(currentOp, lockerInfo.stats);
- }
+ MONGO_LOG_COMPONENT(0, responseComponent) << debug.report(currentOp, lockerInfo.stats);
+ }
- if (currentOp.shouldDBProfile(debug.executionTime)) {
- // Performance profiling is on
- if (txn->lockState()->isReadLocked()) {
- MONGO_LOG_COMPONENT(1, responseComponent)
- << "note: not profiling because recursive read lock";
- }
- else if (lockedForWriting()) {
- MONGO_LOG_COMPONENT(1, responseComponent)
- << "note: not profiling because doing fsync+lock";
- }
- else {
- profile(txn, op);
- }
+ if (currentOp.shouldDBProfile(debug.executionTime)) {
+ // Performance profiling is on
+ if (txn->lockState()->isReadLocked()) {
+ MONGO_LOG_COMPONENT(1, responseComponent)
+ << "note: not profiling because recursive read lock";
+ } else if (lockedForWriting()) {
+ MONGO_LOG_COMPONENT(1, responseComponent)
+ << "note: not profiling because doing fsync+lock";
+ } else {
+ profile(txn, op);
}
-
- recordCurOpMetrics(txn);
- debug.reset();
}
- void receivedKillCursors(OperationContext* txn, Message& m) {
- LastError::get(txn->getClient()).disable();
- DbMessage dbmessage(m);
- int n = dbmessage.pullInt();
+ recordCurOpMetrics(txn);
+ debug.reset();
+}
- uassert( 13659 , "sent 0 cursors to kill" , n != 0 );
- massert( 13658 , str::stream() << "bad kill cursors size: " << m.dataSize() , m.dataSize() == 8 + ( 8 * n ) );
- uassert( 13004 , str::stream() << "sent negative cursors to kill: " << n , n >= 1 );
+void receivedKillCursors(OperationContext* txn, Message& m) {
+ LastError::get(txn->getClient()).disable();
+ DbMessage dbmessage(m);
+ int n = dbmessage.pullInt();
- if ( n > 2000 ) {
- ( n < 30000 ? warning() : error() ) << "receivedKillCursors, n=" << n << endl;
- verify( n < 30000 );
- }
+ uassert(13659, "sent 0 cursors to kill", n != 0);
+ massert(13658,
+ str::stream() << "bad kill cursors size: " << m.dataSize(),
+ m.dataSize() == 8 + (8 * n));
+ uassert(13004, str::stream() << "sent negative cursors to kill: " << n, n >= 1);
- const char* cursorArray = dbmessage.getArray(n);
+ if (n > 2000) {
+ (n < 30000 ? warning() : error()) << "receivedKillCursors, n=" << n << endl;
+ verify(n < 30000);
+ }
- int found = CursorManager::eraseCursorGlobalIfAuthorized(txn, n, cursorArray);
+ const char* cursorArray = dbmessage.getArray(n);
- if ( shouldLog(logger::LogSeverity::Debug(1)) || found != n ) {
- LOG( found == n ? 1 : 0 ) << "killcursors: found " << found << " of " << n << endl;
- }
+ int found = CursorManager::eraseCursorGlobalIfAuthorized(txn, n, cursorArray);
+ if (shouldLog(logger::LogSeverity::Debug(1)) || found != n) {
+ LOG(found == n ? 1 : 0) << "killcursors: found " << found << " of " << n << endl;
+ }
+}
+
+void receivedUpdate(OperationContext* txn, const NamespaceString& nsString, Message& m, CurOp& op) {
+ DbMessage d(m);
+ uassertStatusOK(userAllowedWriteNS(nsString));
+ op.debug().ns = nsString.ns();
+ int flags = d.pullInt();
+ BSONObj query = d.nextJsObj();
+
+ verify(d.moreJSObjs());
+ verify(query.objsize() < m.header().dataLen());
+ BSONObj toupdate = d.nextJsObj();
+ uassert(10055, "update object too large", toupdate.objsize() <= BSONObjMaxUserSize);
+ verify(toupdate.objsize() < m.header().dataLen());
+ verify(query.objsize() + toupdate.objsize() < m.header().dataLen());
+ bool upsert = flags & UpdateOption_Upsert;
+ bool multi = flags & UpdateOption_Multi;
+ bool broadcast = flags & UpdateOption_Broadcast;
+
+ Status status = AuthorizationSession::get(txn->getClient())
+ ->checkAuthForUpdate(nsString, query, toupdate, upsert);
+ audit::logUpdateAuthzCheck(
+ txn->getClient(), nsString, query, toupdate, upsert, multi, status.code());
+ uassertStatusOK(status);
+
+ op.debug().query = query;
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ op.setQuery_inlock(query);
}
- void receivedUpdate(OperationContext* txn,
- const NamespaceString& nsString,
- Message& m,
- CurOp& op) {
- DbMessage d(m);
- uassertStatusOK(userAllowedWriteNS(nsString));
- op.debug().ns = nsString.ns();
- int flags = d.pullInt();
- BSONObj query = d.nextJsObj();
-
- verify( d.moreJSObjs() );
- verify( query.objsize() < m.header().dataLen() );
- BSONObj toupdate = d.nextJsObj();
- uassert( 10055 , "update object too large", toupdate.objsize() <= BSONObjMaxUserSize);
- verify( toupdate.objsize() < m.header().dataLen() );
- verify( query.objsize() + toupdate.objsize() < m.header().dataLen() );
- bool upsert = flags & UpdateOption_Upsert;
- bool multi = flags & UpdateOption_Multi;
- bool broadcast = flags & UpdateOption_Broadcast;
-
- Status status = AuthorizationSession::get(txn->getClient())->checkAuthForUpdate(nsString,
- query,
- toupdate,
- upsert);
- audit::logUpdateAuthzCheck(txn->getClient(), nsString, query, toupdate, upsert, multi,
- status.code());
- uassertStatusOK(status);
-
- op.debug().query = query;
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- op.setQuery_inlock(query);
- }
+ UpdateRequest request(nsString);
+ request.setUpsert(upsert);
+ request.setMulti(multi);
+ request.setQuery(query);
+ request.setUpdates(toupdate);
+ UpdateLifecycleImpl updateLifecycle(broadcast, nsString);
+ request.setLifecycle(&updateLifecycle);
- UpdateRequest request(nsString);
- request.setUpsert(upsert);
- request.setMulti(multi);
- request.setQuery(query);
- request.setUpdates(toupdate);
- UpdateLifecycleImpl updateLifecycle(broadcast, nsString);
- request.setLifecycle(&updateLifecycle);
-
- request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
-
- int attempt = 1;
- while ( 1 ) {
- try {
- ParsedUpdate parsedUpdate(txn, &request);
- uassertStatusOK(parsedUpdate.parseRequest());
-
- // Tentatively take an intent lock, fix up if we need to create the collection
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), nsString.db(), MODE_IX);
- if (dbHolder().get(txn, nsString.db()) == NULL) {
- // If DB doesn't exist, don't implicitly create it in OldClientContext
- break;
- }
- Lock::CollectionLock collLock(txn->lockState(),
- nsString.ns(),
- parsedUpdate.isIsolated() ? MODE_X : MODE_IX);
- OldClientContext ctx(txn, nsString);
-
- // The common case: no implicit collection creation
- if (!upsert || ctx.db()->getCollection(nsString) != NULL) {
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorUpdate(txn,
- ctx.db()->getCollection(nsString),
- &parsedUpdate,
- &op.debug(),
- &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
-
- // Run the plan and get stats out.
- uassertStatusOK(exec->executePlan());
- UpdateResult res = UpdateStage::makeUpdateResult(exec.get(), &op.debug());
-
- // for getlasterror
- LastError::get(txn->getClient()).recordUpdate(
- res.existing, res.numMatched, res.upserted);
- return;
- }
- break;
- }
- catch ( const WriteConflictException& dle ) {
- op.debug().writeConflicts++;
- if ( multi ) {
- log(LogComponent::kWrite) << "Had WriteConflict during multi update, aborting";
- throw;
- }
- WriteConflictException::logAndBackoff( attempt++, "update", nsString.toString() );
- }
- }
+ request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
- // This is an upsert into a non-existing database, so need an exclusive lock
- // to avoid deadlock
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ int attempt = 1;
+ while (1) {
+ try {
ParsedUpdate parsedUpdate(txn, &request);
uassertStatusOK(parsedUpdate.parseRequest());
+ // Tentatively take an intent lock, fix up if we need to create the collection
ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), nsString.db(), MODE_X);
- OldClientContext ctx(txn, nsString);
- uassert(ErrorCodes::NotMaster,
- str::stream() << "Not primary while performing update on " << nsString.ns(),
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString));
-
- Database* db = ctx.db();
- if (db->getCollection(nsString)) {
- // someone else beat us to it, that's ok
- // we might race while we unlock if someone drops
- // but that's ok, we'll just do nothing and error out
- }
- else {
- WriteUnitOfWork wuow(txn);
- uassertStatusOK(userCreateNS(txn, db, nsString.ns(), BSONObj()));
- wuow.commit();
+ Lock::DBLock dbLock(txn->lockState(), nsString.db(), MODE_IX);
+ if (dbHolder().get(txn, nsString.db()) == NULL) {
+ // If DB doesn't exist, don't implicitly create it in OldClientContext
+ break;
}
+ Lock::CollectionLock collLock(
+ txn->lockState(), nsString.ns(), parsedUpdate.isIsolated() ? MODE_X : MODE_IX);
+ OldClientContext ctx(txn, nsString);
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorUpdate(txn,
- ctx.db()->getCollection(nsString),
- &parsedUpdate,
- &op.debug(),
- &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ // The common case: no implicit collection creation
+ if (!upsert || ctx.db()->getCollection(nsString) != NULL) {
+ PlanExecutor* rawExec;
+ uassertStatusOK(getExecutorUpdate(
+ txn, ctx.db()->getCollection(nsString), &parsedUpdate, &op.debug(), &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
- // Run the plan and get stats out.
- uassertStatusOK(exec->executePlan());
- UpdateResult res = UpdateStage::makeUpdateResult(exec.get(), &op.debug());
+ // Run the plan and get stats out.
+ uassertStatusOK(exec->executePlan());
+ UpdateResult res = UpdateStage::makeUpdateResult(exec.get(), &op.debug());
- LastError::get(txn->getClient()).recordUpdate(
- res.existing, res.numMatched, res.upserted);
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "update", nsString.ns());
+ // for getlasterror
+ LastError::get(txn->getClient())
+ .recordUpdate(res.existing, res.numMatched, res.upserted);
+ return;
+ }
+ break;
+ } catch (const WriteConflictException& dle) {
+ op.debug().writeConflicts++;
+ if (multi) {
+ log(LogComponent::kWrite) << "Had WriteConflict during multi update, aborting";
+ throw;
+ }
+ WriteConflictException::logAndBackoff(attempt++, "update", nsString.toString());
+ }
}
- void receivedDelete(OperationContext* txn,
- const NamespaceString& nsString,
- Message& m,
- CurOp& op) {
- DbMessage d(m);
- uassertStatusOK(userAllowedWriteNS(nsString));
-
- op.debug().ns = nsString.ns();
- int flags = d.pullInt();
- bool justOne = flags & RemoveOption_JustOne;
- verify( d.moreJSObjs() );
- BSONObj pattern = d.nextJsObj();
-
- Status status = AuthorizationSession::get(txn->getClient())->checkAuthForDelete(nsString,
- pattern);
- audit::logDeleteAuthzCheck(txn->getClient(), nsString, pattern, status.code());
- uassertStatusOK(status);
+ // This is an upsert into a non-existing database, so need an exclusive lock
+ // to avoid deadlock
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ParsedUpdate parsedUpdate(txn, &request);
+ uassertStatusOK(parsedUpdate.parseRequest());
- op.debug().query = pattern;
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- op.setQuery_inlock(pattern);
- }
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbLock(txn->lockState(), nsString.db(), MODE_X);
+ OldClientContext ctx(txn, nsString);
+ uassert(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while performing update on " << nsString.ns(),
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString));
- DeleteRequest request(nsString);
- request.setQuery(pattern);
- request.setMulti(!justOne);
+ Database* db = ctx.db();
+ if (db->getCollection(nsString)) {
+ // someone else beat us to it, that's ok
+ // we might race while we unlock if someone drops
+ // but that's ok, we'll just do nothing and error out
+ } else {
+ WriteUnitOfWork wuow(txn);
+ uassertStatusOK(userCreateNS(txn, db, nsString.ns(), BSONObj()));
+ wuow.commit();
+ }
- request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
+ PlanExecutor* rawExec;
+ uassertStatusOK(getExecutorUpdate(
+ txn, ctx.db()->getCollection(nsString), &parsedUpdate, &op.debug(), &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
- int attempt = 1;
- while ( 1 ) {
- try {
- ParsedDelete parsedDelete(txn, &request);
- uassertStatusOK(parsedDelete.parseRequest());
+ // Run the plan and get stats out.
+ uassertStatusOK(exec->executePlan());
+ UpdateResult res = UpdateStage::makeUpdateResult(exec.get(), &op.debug());
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetDb autoDb(txn, nsString.db(), MODE_IX);
- if (!autoDb.getDb()) {
- break;
- }
+ LastError::get(txn->getClient()).recordUpdate(res.existing, res.numMatched, res.upserted);
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "update", nsString.ns());
+}
+
+void receivedDelete(OperationContext* txn, const NamespaceString& nsString, Message& m, CurOp& op) {
+ DbMessage d(m);
+ uassertStatusOK(userAllowedWriteNS(nsString));
+
+ op.debug().ns = nsString.ns();
+ int flags = d.pullInt();
+ bool justOne = flags & RemoveOption_JustOne;
+ verify(d.moreJSObjs());
+ BSONObj pattern = d.nextJsObj();
+
+ Status status =
+ AuthorizationSession::get(txn->getClient())->checkAuthForDelete(nsString, pattern);
+ audit::logDeleteAuthzCheck(txn->getClient(), nsString, pattern, status.code());
+ uassertStatusOK(status);
+
+ op.debug().query = pattern;
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ op.setQuery_inlock(pattern);
+ }
- Lock::CollectionLock collLock(txn->lockState(),
- nsString.ns(),
- parsedDelete.isIsolated() ? MODE_X : MODE_IX);
- OldClientContext ctx(txn, nsString);
+ DeleteRequest request(nsString);
+ request.setQuery(pattern);
+ request.setMulti(!justOne);
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorDelete(txn,
- ctx.db()->getCollection(nsString),
- &parsedDelete,
- &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
- // Run the plan and get the number of docs deleted.
- uassertStatusOK(exec->executePlan());
- long long n = DeleteStage::getNumDeleted(exec.get());
- LastError::get(txn->getClient()).recordDelete(n);
- op.debug().ndeleted = n;
+ int attempt = 1;
+ while (1) {
+ try {
+ ParsedDelete parsedDelete(txn, &request);
+ uassertStatusOK(parsedDelete.parseRequest());
+ ScopedTransaction scopedXact(txn, MODE_IX);
+ AutoGetDb autoDb(txn, nsString.db(), MODE_IX);
+ if (!autoDb.getDb()) {
break;
}
- catch ( const WriteConflictException& dle ) {
- op.debug().writeConflicts++;
- WriteConflictException::logAndBackoff( attempt++, "delete", nsString.toString() );
- }
+
+ Lock::CollectionLock collLock(
+ txn->lockState(), nsString.ns(), parsedDelete.isIsolated() ? MODE_X : MODE_IX);
+ OldClientContext ctx(txn, nsString);
+
+ PlanExecutor* rawExec;
+ uassertStatusOK(
+ getExecutorDelete(txn, ctx.db()->getCollection(nsString), &parsedDelete, &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+
+ // Run the plan and get the number of docs deleted.
+ uassertStatusOK(exec->executePlan());
+ long long n = DeleteStage::getNumDeleted(exec.get());
+ LastError::get(txn->getClient()).recordDelete(n);
+ op.debug().ndeleted = n;
+
+ break;
+ } catch (const WriteConflictException& dle) {
+ op.debug().writeConflicts++;
+ WriteConflictException::logAndBackoff(attempt++, "delete", nsString.toString());
}
}
+}
+
+QueryResult::View emptyMoreResult(long long);
+
+bool receivedGetMore(OperationContext* txn, DbResponse& dbresponse, Message& m, CurOp& curop) {
+ bool ok = true;
+
+ DbMessage d(m);
+
+ const char* ns = d.getns();
+ int ntoreturn = d.pullInt();
+ long long cursorid = d.pullInt64();
+
+ curop.debug().ns = ns;
+ curop.debug().ntoreturn = ntoreturn;
+ curop.debug().cursorid = cursorid;
+
+ unique_ptr<AssertionException> ex;
+ unique_ptr<Timer> timer;
+ int pass = 0;
+ bool exhaust = false;
+ QueryResult::View msgdata = 0;
+ Timestamp last;
+ while (1) {
+ bool isCursorAuthorized = false;
+ try {
+ const NamespaceString nsString(ns);
+ uassert(16258, str::stream() << "Invalid ns [" << ns << "]", nsString.isValid());
- QueryResult::View emptyMoreResult(long long);
-
- bool receivedGetMore(OperationContext* txn,
- DbResponse& dbresponse,
- Message& m,
- CurOp& curop) {
- bool ok = true;
-
- DbMessage d(m);
-
- const char *ns = d.getns();
- int ntoreturn = d.pullInt();
- long long cursorid = d.pullInt64();
-
- curop.debug().ns = ns;
- curop.debug().ntoreturn = ntoreturn;
- curop.debug().cursorid = cursorid;
-
- unique_ptr<AssertionException> ex;
- unique_ptr<Timer> timer;
- int pass = 0;
- bool exhaust = false;
- QueryResult::View msgdata = 0;
- Timestamp last;
- while( 1 ) {
- bool isCursorAuthorized = false;
- try {
- const NamespaceString nsString( ns );
- uassert( 16258, str::stream() << "Invalid ns [" << ns << "]", nsString.isValid() );
-
- Status status = AuthorizationSession::get(txn->getClient())->checkAuthForGetMore(
- nsString, cursorid);
- audit::logGetMoreAuthzCheck(txn->getClient(), nsString, cursorid, status.code());
- uassertStatusOK(status);
-
- if (str::startsWith(ns, "local.oplog.")){
- while (MONGO_FAIL_POINT(rsStopGetMore)) {
- sleepmillis(0);
- }
-
- if (pass == 0) {
- last = getLastSetTimestamp();
- }
- else {
- repl::waitUpToOneSecondForTimestampChange(last);
- }
+ Status status = AuthorizationSession::get(txn->getClient())
+ ->checkAuthForGetMore(nsString, cursorid);
+ audit::logGetMoreAuthzCheck(txn->getClient(), nsString, cursorid, status.code());
+ uassertStatusOK(status);
+
+ if (str::startsWith(ns, "local.oplog.")) {
+ while (MONGO_FAIL_POINT(rsStopGetMore)) {
+ sleepmillis(0);
}
- msgdata = getMore(txn,
- ns,
- ntoreturn,
- cursorid,
- pass,
- exhaust,
- &isCursorAuthorized);
- }
- catch ( AssertionException& e ) {
- if ( isCursorAuthorized ) {
- // If a cursor with id 'cursorid' was authorized, it may have been advanced
- // before an exception terminated processGetMore. Erase the ClientCursor
- // because it may now be out of sync with the client's iteration state.
- // SERVER-7952
- // TODO Temporary code, see SERVER-4563 for a cleanup overview.
- CursorManager::eraseCursorGlobal(txn, cursorid );
+ if (pass == 0) {
+ last = getLastSetTimestamp();
+ } else {
+ repl::waitUpToOneSecondForTimestampChange(last);
}
- ex.reset( new AssertionException( e.getInfo().msg, e.getCode() ) );
- ok = false;
- break;
}
-
- if (msgdata.view2ptr() == 0) {
- // this should only happen with QueryOption_AwaitData
- exhaust = false;
- massert(13073, "shutting down", !inShutdown() );
- if ( ! timer ) {
- timer.reset( new Timer() );
- }
- else {
- if ( timer->seconds() >= 4 ) {
- // after about 4 seconds, return. pass stops at 1000 normally.
- // we want to return occasionally so slave can checkpoint.
- pass = 10000;
- }
- }
- pass++;
- if (kDebugBuild)
- sleepmillis(20);
- else
- sleepmillis(2);
-
- // note: the 1100 is beacuse of the waitForDifferent above
- // should eventually clean this up a bit
- curop.setExpectedLatencyMs( 1100 + timer->millis() );
-
- continue;
+
+ msgdata = getMore(txn, ns, ntoreturn, cursorid, pass, exhaust, &isCursorAuthorized);
+ } catch (AssertionException& e) {
+ if (isCursorAuthorized) {
+ // If a cursor with id 'cursorid' was authorized, it may have been advanced
+ // before an exception terminated processGetMore. Erase the ClientCursor
+ // because it may now be out of sync with the client's iteration state.
+ // SERVER-7952
+ // TODO Temporary code, see SERVER-4563 for a cleanup overview.
+ CursorManager::eraseCursorGlobal(txn, cursorid);
}
+ ex.reset(new AssertionException(e.getInfo().msg, e.getCode()));
+ ok = false;
break;
- };
+ }
- if (ex) {
- BSONObjBuilder err;
- ex->getInfo().append( err );
- BSONObj errObj = err.done();
+ if (msgdata.view2ptr() == 0) {
+ // this should only happen with QueryOption_AwaitData
+ exhaust = false;
+ massert(13073, "shutting down", !inShutdown());
+ if (!timer) {
+ timer.reset(new Timer());
+ } else {
+ if (timer->seconds() >= 4) {
+ // after about 4 seconds, return. pass stops at 1000 normally.
+ // we want to return occasionally so slave can checkpoint.
+ pass = 10000;
+ }
+ }
+ pass++;
+ if (kDebugBuild)
+ sleepmillis(20);
+ else
+ sleepmillis(2);
- curop.debug().exceptionInfo = ex->getInfo();
+ // note: the 1100 is beacuse of the waitForDifferent above
+ // should eventually clean this up a bit
+ curop.setExpectedLatencyMs(1100 + timer->millis());
- replyToQuery(ResultFlag_ErrSet, m, dbresponse, errObj);
- curop.debug().responseLength = dbresponse.response->header().dataLen();
- curop.debug().nreturned = 1;
- return ok;
+ continue;
}
+ break;
+ };
- Message *resp = new Message();
- resp->setData(msgdata.view2ptr(), true);
- curop.debug().responseLength = resp->header().dataLen();
- curop.debug().nreturned = msgdata.getNReturned();
-
- dbresponse.response = resp;
- dbresponse.responseTo = m.header().getId();
+ if (ex) {
+ BSONObjBuilder err;
+ ex->getInfo().append(err);
+ BSONObj errObj = err.done();
- if( exhaust ) {
- curop.debug().exhaust = true;
- dbresponse.exhaustNS = ns;
- }
+ curop.debug().exceptionInfo = ex->getInfo();
+ replyToQuery(ResultFlag_ErrSet, m, dbresponse, errObj);
+ curop.debug().responseLength = dbresponse.response->header().dataLen();
+ curop.debug().nreturned = 1;
return ok;
}
- void checkAndInsert(OperationContext* txn,
- OldClientContext& ctx,
- const char *ns,
- /*modifies*/BSONObj& js) {
-
- StatusWith<BSONObj> fixed = fixDocumentForInsert( js );
- uassertStatusOK( fixed.getStatus() );
- if ( !fixed.getValue().isEmpty() )
- js = fixed.getValue();
-
- int attempt = 0;
- while ( true ) {
- try {
- WriteUnitOfWork wunit(txn);
- Collection* collection = ctx.db()->getCollection( ns );
- if ( !collection ) {
- collection = ctx.db()->createCollection( txn, ns );
- verify( collection );
- }
+ Message* resp = new Message();
+ resp->setData(msgdata.view2ptr(), true);
+ curop.debug().responseLength = resp->header().dataLen();
+ curop.debug().nreturned = msgdata.getNReturned();
- StatusWith<RecordId> status = collection->insertDocument( txn, js, true );
- uassertStatusOK( status.getStatus() );
- wunit.commit();
- break;
- }
- catch( const WriteConflictException& e ) {
- CurOp::get(txn)->debug().writeConflicts++;
- txn->recoveryUnit()->abandonSnapshot();
- WriteConflictException::logAndBackoff( attempt++, "insert", ns);
- }
- }
+ dbresponse.response = resp;
+ dbresponse.responseTo = m.header().getId();
+
+ if (exhaust) {
+ curop.debug().exhaust = true;
+ dbresponse.exhaustNS = ns;
}
- NOINLINE_DECL void insertMulti(OperationContext* txn,
- OldClientContext& ctx,
- bool keepGoing,
- const char *ns,
- vector<BSONObj>& objs,
- CurOp& op) {
- size_t i;
- for (i=0; i<objs.size(); i++){
- try {
- checkAndInsert(txn, ctx, ns, objs[i]);
+ return ok;
+}
+
+void checkAndInsert(OperationContext* txn,
+ OldClientContext& ctx,
+ const char* ns,
+ /*modifies*/ BSONObj& js) {
+ StatusWith<BSONObj> fixed = fixDocumentForInsert(js);
+ uassertStatusOK(fixed.getStatus());
+ if (!fixed.getValue().isEmpty())
+ js = fixed.getValue();
+
+ int attempt = 0;
+ while (true) {
+ try {
+ WriteUnitOfWork wunit(txn);
+ Collection* collection = ctx.db()->getCollection(ns);
+ if (!collection) {
+ collection = ctx.db()->createCollection(txn, ns);
+ verify(collection);
}
- catch (const UserException& ex) {
- if (!keepGoing || i == objs.size()-1){
- globalOpCounters.incInsertInWriteLock(i);
- throw;
- }
- LastError::get(txn->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
- // otherwise ignore and keep going
+
+ StatusWith<RecordId> status = collection->insertDocument(txn, js, true);
+ uassertStatusOK(status.getStatus());
+ wunit.commit();
+ break;
+ } catch (const WriteConflictException& e) {
+ CurOp::get(txn)->debug().writeConflicts++;
+ txn->recoveryUnit()->abandonSnapshot();
+ WriteConflictException::logAndBackoff(attempt++, "insert", ns);
+ }
+ }
+}
+
+NOINLINE_DECL void insertMulti(OperationContext* txn,
+ OldClientContext& ctx,
+ bool keepGoing,
+ const char* ns,
+ vector<BSONObj>& objs,
+ CurOp& op) {
+ size_t i;
+ for (i = 0; i < objs.size(); i++) {
+ try {
+ checkAndInsert(txn, ctx, ns, objs[i]);
+ } catch (const UserException& ex) {
+ if (!keepGoing || i == objs.size() - 1) {
+ globalOpCounters.incInsertInWriteLock(i);
+ throw;
}
+ LastError::get(txn->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
+ // otherwise ignore and keep going
}
-
- globalOpCounters.incInsertInWriteLock(i);
- op.debug().ninserted = i;
}
- static void convertSystemIndexInsertsToCommands(
- DbMessage& d,
- BSONArrayBuilder* allCmdsBuilder) {
- while (d.moreJSObjs()) {
- BSONObj spec = d.nextJsObj();
- BSONElement indexNsElement = spec["ns"];
- uassert(ErrorCodes::NoSuchKey,
- str::stream() << "Missing \"ns\" field while inserting into " << d.getns(),
- !indexNsElement.eoo());
- uassert(ErrorCodes::TypeMismatch,
- str::stream() << "Expected \"ns\" field to have type String, not " <<
- typeName(indexNsElement.type()) << " while inserting into " << d.getns(),
- indexNsElement.type() == String);
- const StringData nsToIndex(indexNsElement.valueStringData());
- BSONObjBuilder cmdObjBuilder(allCmdsBuilder->subobjStart());
- cmdObjBuilder << "createIndexes" << nsToCollectionSubstring(nsToIndex);
- BSONArrayBuilder specArrayBuilder(cmdObjBuilder.subarrayStart("indexes"));
- while (true) {
- BSONObjBuilder specBuilder(specArrayBuilder.subobjStart());
- BSONElement specNsElement = spec["ns"];
- if ((specNsElement.type() != String) ||
- (specNsElement.valueStringData() != nsToIndex)) {
-
- break;
- }
- for (BSONObjIterator iter(spec); iter.more();) {
- BSONElement element = iter.next();
- if (element.fieldNameStringData() != "ns") {
- specBuilder.append(element);
- }
- }
- if (!d.moreJSObjs()) {
- break;
+ globalOpCounters.incInsertInWriteLock(i);
+ op.debug().ninserted = i;
+}
+
+static void convertSystemIndexInsertsToCommands(DbMessage& d, BSONArrayBuilder* allCmdsBuilder) {
+ while (d.moreJSObjs()) {
+ BSONObj spec = d.nextJsObj();
+ BSONElement indexNsElement = spec["ns"];
+ uassert(ErrorCodes::NoSuchKey,
+ str::stream() << "Missing \"ns\" field while inserting into " << d.getns(),
+ !indexNsElement.eoo());
+ uassert(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"ns\" field to have type String, not "
+ << typeName(indexNsElement.type()) << " while inserting into "
+ << d.getns(),
+ indexNsElement.type() == String);
+ const StringData nsToIndex(indexNsElement.valueStringData());
+ BSONObjBuilder cmdObjBuilder(allCmdsBuilder->subobjStart());
+ cmdObjBuilder << "createIndexes" << nsToCollectionSubstring(nsToIndex);
+ BSONArrayBuilder specArrayBuilder(cmdObjBuilder.subarrayStart("indexes"));
+ while (true) {
+ BSONObjBuilder specBuilder(specArrayBuilder.subobjStart());
+ BSONElement specNsElement = spec["ns"];
+ if ((specNsElement.type() != String) ||
+ (specNsElement.valueStringData() != nsToIndex)) {
+ break;
+ }
+ for (BSONObjIterator iter(spec); iter.more();) {
+ BSONElement element = iter.next();
+ if (element.fieldNameStringData() != "ns") {
+ specBuilder.append(element);
}
- spec = d.nextJsObj();
}
+ if (!d.moreJSObjs()) {
+ break;
+ }
+ spec = d.nextJsObj();
}
}
-
- static void insertSystemIndexes(OperationContext* txn, DbMessage& d, CurOp& curOp) {
- BSONArrayBuilder allCmdsBuilder;
+}
+
+static void insertSystemIndexes(OperationContext* txn, DbMessage& d, CurOp& curOp) {
+ BSONArrayBuilder allCmdsBuilder;
+ try {
+ convertSystemIndexInsertsToCommands(d, &allCmdsBuilder);
+ } catch (const DBException& ex) {
+ LastError::get(txn->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
+ curOp.debug().exceptionInfo = ex.getInfo();
+ return;
+ }
+ BSONArray allCmds(allCmdsBuilder.done());
+ Command* createIndexesCmd = Command::findCommand("createIndexes");
+ invariant(createIndexesCmd);
+ const bool keepGoing = d.reservedField() & InsertOption_ContinueOnError;
+ for (BSONObjIterator iter(allCmds); iter.more();) {
try {
- convertSystemIndexInsertsToCommands(d, &allCmdsBuilder);
- }
- catch (const DBException& ex) {
+ BSONObj cmdObj = iter.next().Obj();
+
+ rpc::LegacyRequestBuilder requestBuilder{};
+ auto indexNs = NamespaceString(d.getns());
+ auto cmdRequestMsg = requestBuilder.setDatabase(indexNs.db())
+ .setCommandName("createIndexes")
+ .setMetadata(rpc::makeEmptyMetadata())
+ .setCommandArgs(cmdObj)
+ .done();
+ rpc::LegacyRequest cmdRequest{cmdRequestMsg.get()};
+ rpc::LegacyReplyBuilder cmdReplyBuilder{};
+ Command::execCommand(txn, createIndexesCmd, cmdRequest, &cmdReplyBuilder);
+ auto cmdReplyMsg = cmdReplyBuilder.done();
+ rpc::LegacyReply cmdReply{cmdReplyMsg.get()};
+ uassertStatusOK(Command::getStatusFromCommandResult(cmdReply.getCommandReply()));
+ } catch (const DBException& ex) {
LastError::get(txn->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
curOp.debug().exceptionInfo = ex.getInfo();
- return;
- }
- BSONArray allCmds(allCmdsBuilder.done());
- Command* createIndexesCmd = Command::findCommand("createIndexes");
- invariant(createIndexesCmd);
- const bool keepGoing = d.reservedField() & InsertOption_ContinueOnError;
- for (BSONObjIterator iter(allCmds); iter.more();) {
- try {
- BSONObj cmdObj = iter.next().Obj();
-
- rpc::LegacyRequestBuilder requestBuilder{};
- auto indexNs = NamespaceString(d.getns());
- auto cmdRequestMsg = requestBuilder.setDatabase(indexNs.db())
- .setCommandName("createIndexes")
- .setMetadata(rpc::makeEmptyMetadata())
- .setCommandArgs(cmdObj).done();
- rpc::LegacyRequest cmdRequest{cmdRequestMsg.get()};
- rpc::LegacyReplyBuilder cmdReplyBuilder{};
- Command::execCommand(txn,
- createIndexesCmd,
- cmdRequest,
- &cmdReplyBuilder);
- auto cmdReplyMsg = cmdReplyBuilder.done();
- rpc::LegacyReply cmdReply{cmdReplyMsg.get()};
- uassertStatusOK(Command::getStatusFromCommandResult(cmdReply.getCommandReply()));
- }
- catch (const DBException& ex) {
- LastError::get(txn->getClient()).setLastError(ex.getCode(), ex.getInfo().msg);
- curOp.debug().exceptionInfo = ex.getInfo();
- if (!keepGoing) {
- return;
- }
+ if (!keepGoing) {
+ return;
}
}
}
+}
+
+void receivedInsert(OperationContext* txn, const NamespaceString& nsString, Message& m, CurOp& op) {
+ DbMessage d(m);
+ const char* ns = d.getns();
+ op.debug().ns = ns;
+ uassertStatusOK(userAllowedWriteNS(nsString.ns()));
+ if (nsString.isSystemDotIndexes()) {
+ insertSystemIndexes(txn, d, op);
+ return;
+ }
- void receivedInsert(OperationContext* txn,
- const NamespaceString& nsString,
- Message& m,
- CurOp& op) {
- DbMessage d(m);
- const char* ns = d.getns();
- op.debug().ns = ns;
- uassertStatusOK(userAllowedWriteNS(nsString.ns()));
- if (nsString.isSystemDotIndexes()) {
- insertSystemIndexes(txn, d, op);
- return;
- }
-
- if( !d.moreJSObjs() ) {
- // strange. should we complain?
- return;
- }
-
- vector<BSONObj> multi;
- while (d.moreJSObjs()){
- BSONObj obj = d.nextJsObj();
- multi.push_back(obj);
+ if (!d.moreJSObjs()) {
+ // strange. should we complain?
+ return;
+ }
- // Check auth for insert (also handles checking if this is an index build and checks
- // for the proper privileges in that case).
- Status status = AuthorizationSession::get(txn->getClient())->checkAuthForInsert(nsString, obj);
- audit::logInsertAuthzCheck(txn->getClient(), nsString, obj, status.code());
- uassertStatusOK(status);
- }
+ vector<BSONObj> multi;
+ while (d.moreJSObjs()) {
+ BSONObj obj = d.nextJsObj();
+ multi.push_back(obj);
- const int notMasterCodeForInsert = 10058; // This is different from ErrorCodes::NotMaster
- {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), nsString.db(), MODE_IX);
- Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
-
- // CONCURRENCY TODO: is being read locked in big log sufficient here?
- // writelock is used to synchronize stepdowns w/ writes
- uassert(notMasterCodeForInsert, "not master",
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString));
-
- // OldClientContext may implicitly create a database, so check existence
- if (dbHolder().get(txn, nsString.db()) != NULL) {
- OldClientContext ctx(txn, ns);
- if (ctx.db()->getCollection(nsString)) {
- if (multi.size() > 1) {
- const bool keepGoing = d.reservedField() & InsertOption_ContinueOnError;
- insertMulti(txn, ctx, keepGoing, ns, multi, op);
- }
- else {
- checkAndInsert(txn, ctx, ns, multi[0]);
- globalOpCounters.incInsertInWriteLock(1);
- op.debug().ninserted = 1;
- }
- return;
- }
- }
- }
+ // Check auth for insert (also handles checking if this is an index build and checks
+ // for the proper privileges in that case).
+ Status status =
+ AuthorizationSession::get(txn->getClient())->checkAuthForInsert(nsString, obj);
+ audit::logInsertAuthzCheck(txn->getClient(), nsString, obj, status.code());
+ uassertStatusOK(status);
+ }
- // Collection didn't exist so try again with MODE_X
+ const int notMasterCodeForInsert = 10058; // This is different from ErrorCodes::NotMaster
+ {
ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), nsString.db(), MODE_X);
+ Lock::DBLock dbLock(txn->lockState(), nsString.db(), MODE_IX);
+ Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
// CONCURRENCY TODO: is being read locked in big log sufficient here?
// writelock is used to synchronize stepdowns w/ writes
- uassert(notMasterCodeForInsert, "not master",
+ uassert(notMasterCodeForInsert,
+ "not master",
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString));
- OldClientContext ctx(txn, ns);
-
- if (multi.size() > 1) {
- const bool keepGoing = d.reservedField() & InsertOption_ContinueOnError;
- insertMulti(txn, ctx, keepGoing, ns, multi, op);
- } else {
- checkAndInsert(txn, ctx, ns, multi[0]);
- globalOpCounters.incInsertInWriteLock(1);
- op.debug().ninserted = 1;
+ // OldClientContext may implicitly create a database, so check existence
+ if (dbHolder().get(txn, nsString.db()) != NULL) {
+ OldClientContext ctx(txn, ns);
+ if (ctx.db()->getCollection(nsString)) {
+ if (multi.size() > 1) {
+ const bool keepGoing = d.reservedField() & InsertOption_ContinueOnError;
+ insertMulti(txn, ctx, keepGoing, ns, multi, op);
+ } else {
+ checkAndInsert(txn, ctx, ns, multi[0]);
+ globalOpCounters.incInsertInWriteLock(1);
+ op.debug().ninserted = 1;
+ }
+ return;
+ }
}
}
- static AtomicUInt32 shutdownInProgress(0);
+ // Collection didn't exist so try again with MODE_X
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbLock(txn->lockState(), nsString.db(), MODE_X);
- bool inShutdown() {
- return shutdownInProgress.loadRelaxed() != 0;
- }
+ // CONCURRENCY TODO: is being read locked in big log sufficient here?
+ // writelock is used to synchronize stepdowns w/ writes
+ uassert(notMasterCodeForInsert,
+ "not master",
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString));
+
+ OldClientContext ctx(txn, ns);
- bool inShutdownStrict() {
- return shutdownInProgress.load() != 0;
+ if (multi.size() > 1) {
+ const bool keepGoing = d.reservedField() & InsertOption_ContinueOnError;
+ insertMulti(txn, ctx, keepGoing, ns, multi, op);
+ } else {
+ checkAndInsert(txn, ctx, ns, multi[0]);
+ globalOpCounters.incInsertInWriteLock(1);
+ op.debug().ninserted = 1;
}
+}
- static void shutdownServer() {
- log(LogComponent::kNetwork) << "shutdown: going to close listening sockets..." << endl;
- ListeningSockets::get()->closeAll();
+static AtomicUInt32 shutdownInProgress(0);
- log(LogComponent::kNetwork) << "shutdown: going to flush diaglog..." << endl;
- _diaglog.flush();
+bool inShutdown() {
+ return shutdownInProgress.loadRelaxed() != 0;
+}
- /* must do this before unmapping mem or you may get a seg fault */
- log(LogComponent::kNetwork) << "shutdown: going to close sockets..." << endl;
- stdx::thread close_socket_thread( stdx::bind(MessagingPort::closeAllSockets, 0) );
+bool inShutdownStrict() {
+ return shutdownInProgress.load() != 0;
+}
- getGlobalServiceContext()->shutdownGlobalStorageEngineCleanly();
- }
+static void shutdownServer() {
+ log(LogComponent::kNetwork) << "shutdown: going to close listening sockets..." << endl;
+ ListeningSockets::get()->closeAll();
- // shutdownLock
- //
- // Protects:
- // Ensures shutdown is single threaded.
- // Lock Ordering:
- // No restrictions
- stdx::mutex shutdownLock;
-
- void signalShutdown() {
- // Notify all threads shutdown has started
- shutdownInProgress.fetchAndAdd(1);
- }
+ log(LogComponent::kNetwork) << "shutdown: going to flush diaglog..." << endl;
+ _diaglog.flush();
- void exitCleanly(ExitCode code) {
- // Notify all threads shutdown has started
- shutdownInProgress.fetchAndAdd(1);
+ /* must do this before unmapping mem or you may get a seg fault */
+ log(LogComponent::kNetwork) << "shutdown: going to close sockets..." << endl;
+ stdx::thread close_socket_thread(stdx::bind(MessagingPort::closeAllSockets, 0));
- // Grab the shutdown lock to prevent concurrent callers
- stdx::lock_guard<stdx::mutex> lockguard(shutdownLock);
+ getGlobalServiceContext()->shutdownGlobalStorageEngineCleanly();
+}
- // Global storage engine may not be started in all cases before we exit
- if (getGlobalServiceContext()->getGlobalStorageEngine() == NULL) {
- dbexit(code); // returns only under a windows service
- invariant(code == EXIT_WINDOWS_SERVICE_STOP);
- return;
- }
+// shutdownLock
+//
+// Protects:
+// Ensures shutdown is single threaded.
+// Lock Ordering:
+// No restrictions
+stdx::mutex shutdownLock;
- getGlobalServiceContext()->setKillAllOperations();
+void signalShutdown() {
+ // Notify all threads shutdown has started
+ shutdownInProgress.fetchAndAdd(1);
+}
- repl::getGlobalReplicationCoordinator()->shutdown();
- auto catalogMgr = grid.catalogManager();
- if (catalogMgr) {
- catalogMgr->shutDown();
- }
+void exitCleanly(ExitCode code) {
+ // Notify all threads shutdown has started
+ shutdownInProgress.fetchAndAdd(1);
- // We should always be able to acquire the global lock at shutdown.
- //
- // TODO: This call chain uses the locker directly, because we do not want to start an
- // operation context, which also instantiates a recovery unit. Also, using the
- // lockGlobalBegin/lockGlobalComplete sequence, we avoid taking the flush lock. This will
- // all go away if we start acquiring the global/flush lock as part of ScopedTransaction.
- //
- // For a Windows service, dbexit does not call exit(), so we must leak the lock outside
- // of this function to prevent any operations from running that need a lock.
- //
- DefaultLockerImpl* globalLocker = new DefaultLockerImpl();
- LockResult result = globalLocker->lockGlobalBegin(MODE_X);
- if (result == LOCK_WAITING) {
- result = globalLocker->lockGlobalComplete(UINT_MAX);
- }
+ // Grab the shutdown lock to prevent concurrent callers
+ stdx::lock_guard<stdx::mutex> lockguard(shutdownLock);
- invariant(LOCK_OK == result);
+ // Global storage engine may not be started in all cases before we exit
+ if (getGlobalServiceContext()->getGlobalStorageEngine() == NULL) {
+ dbexit(code); // returns only under a windows service
+ invariant(code == EXIT_WINDOWS_SERVICE_STOP);
+ return;
+ }
- log(LogComponent::kControl) << "now exiting" << endl;
+ getGlobalServiceContext()->setKillAllOperations();
- // Execute the graceful shutdown tasks, such as flushing the outstanding journal
- // and data files, close sockets, etc.
- try {
- shutdownServer();
- }
- catch (const DBException& ex) {
- severe() << "shutdown failed with DBException " << ex;
- std::terminate();
- }
- catch (const std::exception& ex) {
- severe() << "shutdown failed with std::exception: " << ex.what();
- std::terminate();
- }
- catch (...) {
- severe() << "shutdown failed with exception";
- std::terminate();
- }
+ repl::getGlobalReplicationCoordinator()->shutdown();
+ auto catalogMgr = grid.catalogManager();
+ if (catalogMgr) {
+ catalogMgr->shutDown();
+ }
+
+ // We should always be able to acquire the global lock at shutdown.
+ //
+ // TODO: This call chain uses the locker directly, because we do not want to start an
+ // operation context, which also instantiates a recovery unit. Also, using the
+ // lockGlobalBegin/lockGlobalComplete sequence, we avoid taking the flush lock. This will
+ // all go away if we start acquiring the global/flush lock as part of ScopedTransaction.
+ //
+ // For a Windows service, dbexit does not call exit(), so we must leak the lock outside
+ // of this function to prevent any operations from running that need a lock.
+ //
+ DefaultLockerImpl* globalLocker = new DefaultLockerImpl();
+ LockResult result = globalLocker->lockGlobalBegin(MODE_X);
+ if (result == LOCK_WAITING) {
+ result = globalLocker->lockGlobalComplete(UINT_MAX);
+ }
- dbexit( code );
+ invariant(LOCK_OK == result);
+
+ log(LogComponent::kControl) << "now exiting" << endl;
+
+ // Execute the graceful shutdown tasks, such as flushing the outstanding journal
+ // and data files, close sockets, etc.
+ try {
+ shutdownServer();
+ } catch (const DBException& ex) {
+ severe() << "shutdown failed with DBException " << ex;
+ std::terminate();
+ } catch (const std::exception& ex) {
+ severe() << "shutdown failed with std::exception: " << ex.what();
+ std::terminate();
+ } catch (...) {
+ severe() << "shutdown failed with exception";
+ std::terminate();
}
- NOINLINE_DECL void dbexit( ExitCode rc, const char *why ) {
- audit::logShutdown(&cc());
+ dbexit(code);
+}
+
+NOINLINE_DECL void dbexit(ExitCode rc, const char* why) {
+ audit::logShutdown(&cc());
- log(LogComponent::kControl) << "dbexit: " << why << " rc: " << rc;
+ log(LogComponent::kControl) << "dbexit: " << why << " rc: " << rc;
#ifdef _WIN32
- // Windows Service Controller wants to be told when we are down,
- // so don't call quickExit() yet, or say "really exiting now"
- //
- if ( rc == EXIT_WINDOWS_SERVICE_STOP ) {
- return;
- }
+ // Windows Service Controller wants to be told when we are down,
+ // so don't call quickExit() yet, or say "really exiting now"
+ //
+ if (rc == EXIT_WINDOWS_SERVICE_STOP) {
+ return;
+ }
#endif
- quickExit(rc);
+ quickExit(rc);
+}
+
+// ----- BEGIN Diaglog -----
+DiagLog::DiagLog() : f(0), level(0) {}
+
+void DiagLog::openFile() {
+ verify(f == 0);
+ stringstream ss;
+ ss << storageGlobalParams.dbpath << "/diaglog." << hex << time(0);
+ string name = ss.str();
+ f = new ofstream(name.c_str(), ios::out | ios::binary);
+ if (!f->good()) {
+ log() << "diagLogging couldn't open " << name << endl;
+ // todo what is this? :
+ throw 1717;
+ } else {
+ log() << "diagLogging using file " << name << endl;
}
-
- // ----- BEGIN Diaglog -----
- DiagLog::DiagLog() : f(0), level(0) {}
-
- void DiagLog::openFile() {
- verify( f == 0 );
- stringstream ss;
- ss << storageGlobalParams.dbpath << "/diaglog." << hex << time(0);
- string name = ss.str();
- f = new ofstream(name.c_str(), ios::out | ios::binary);
- if ( ! f->good() ) {
- log() << "diagLogging couldn't open " << name << endl;
- // todo what is this? :
- throw 1717;
- }
- else {
- log() << "diagLogging using file " << name << endl;
- }
+}
+
+int DiagLog::setLevel(int newLevel) {
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ int old = level;
+ log() << "diagLogging level=" << newLevel << endl;
+ if (f == 0) {
+ openFile();
}
+ level = newLevel; // must be done AFTER f is set
+ return old;
+}
- int DiagLog::setLevel( int newLevel ) {
+void DiagLog::flush() {
+ if (level) {
+ log() << "flushing diag log" << endl;
stdx::lock_guard<stdx::mutex> lk(mutex);
- int old = level;
- log() << "diagLogging level=" << newLevel << endl;
- if( f == 0 ) {
- openFile();
- }
- level = newLevel; // must be done AFTER f is set
- return old;
+ f->flush();
}
+}
- void DiagLog::flush() {
- if ( level ) {
- log() << "flushing diag log" << endl;
- stdx::lock_guard<stdx::mutex> lk(mutex);
- f->flush();
- }
+void DiagLog::writeop(char* data, int len) {
+ if (level & 1) {
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ f->write(data, len);
}
+}
- void DiagLog::writeop(char *data,int len) {
- if ( level & 1 ) {
+void DiagLog::readop(char* data, int len) {
+ if (level & 2) {
+ bool log = (level & 4) == 0;
+ OCCASIONALLY log = true;
+ if (log) {
stdx::lock_guard<stdx::mutex> lk(mutex);
- f->write(data,len);
- }
- }
-
- void DiagLog::readop(char *data, int len) {
- if ( level & 2 ) {
- bool log = (level & 4) == 0;
- OCCASIONALLY log = true;
- if ( log ) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
- verify( f );
- f->write(data,len);
- }
+ verify(f);
+ f->write(data, len);
}
}
+}
- DiagLog _diaglog;
+DiagLog _diaglog;
- // ----- END Diaglog -----
+// ----- END Diaglog -----
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/instance.h b/src/mongo/db/instance.h
index 097b62d743b..28a8414ff9e 100644
--- a/src/mongo/db/instance.h
+++ b/src/mongo/db/instance.h
@@ -39,39 +39,41 @@
namespace mongo {
- extern std::string dbExecCommand;
+extern std::string dbExecCommand;
- /** a high level recording of operations to the database - sometimes used for diagnostics
- and debugging.
- */
- class DiagLog {
- std::ofstream *f; // note this is never freed
- /* 0 = off; 1 = writes, 2 = reads, 3 = both
- 7 = log a few reads, and all writes.
- */
- int level;
- stdx::mutex mutex;
- void openFile();
+/** a high level recording of operations to the database - sometimes used for diagnostics
+ and debugging.
+ */
+class DiagLog {
+ std::ofstream* f; // note this is never freed
+ /* 0 = off; 1 = writes, 2 = reads, 3 = both
+ 7 = log a few reads, and all writes.
+ */
+ int level;
+ stdx::mutex mutex;
+ void openFile();
- public:
- DiagLog();
- int getLevel() const { return level; }
- /**
- * @return old
- */
- int setLevel( int newLevel );
- void flush();
- void writeop(char *data,int len);
- void readop(char *data, int len);
- };
+public:
+ DiagLog();
+ int getLevel() const {
+ return level;
+ }
+ /**
+ * @return old
+ */
+ int setLevel(int newLevel);
+ void flush();
+ void writeop(char* data, int len);
+ void readop(char* data, int len);
+};
- extern DiagLog _diaglog;
+extern DiagLog _diaglog;
- void assembleResponse( OperationContext* txn,
- Message& m,
- DbResponse& dbresponse,
- const HostAndPort &client );
+void assembleResponse(OperationContext* txn,
+ Message& m,
+ DbResponse& dbresponse,
+ const HostAndPort& client);
- void maybeCreatePidFile();
+void maybeCreatePidFile();
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index b0788ad9d55..8a6cca1dabb 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -46,153 +46,144 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::string;
+using std::unique_ptr;
+using std::endl;
+using std::string;
namespace {
- void _appendUserInfo(const CurOp& c,
- BSONObjBuilder& builder,
- AuthorizationSession* authSession) {
- UserNameIterator nameIter = authSession->getAuthenticatedUserNames();
+void _appendUserInfo(const CurOp& c, BSONObjBuilder& builder, AuthorizationSession* authSession) {
+ UserNameIterator nameIter = authSession->getAuthenticatedUserNames();
- UserName bestUser;
- if (nameIter.more())
- bestUser = *nameIter;
+ UserName bestUser;
+ if (nameIter.more())
+ bestUser = *nameIter;
- std::string opdb( nsToDatabase( c.getNS() ) );
+ std::string opdb(nsToDatabase(c.getNS()));
- BSONArrayBuilder allUsers(builder.subarrayStart("allUsers"));
- for ( ; nameIter.more(); nameIter.next()) {
- BSONObjBuilder nextUser(allUsers.subobjStart());
- nextUser.append(AuthorizationManager::USER_NAME_FIELD_NAME, nameIter->getUser());
- nextUser.append(AuthorizationManager::USER_DB_FIELD_NAME, nameIter->getDB());
- nextUser.doneFast();
+ BSONArrayBuilder allUsers(builder.subarrayStart("allUsers"));
+ for (; nameIter.more(); nameIter.next()) {
+ BSONObjBuilder nextUser(allUsers.subobjStart());
+ nextUser.append(AuthorizationManager::USER_NAME_FIELD_NAME, nameIter->getUser());
+ nextUser.append(AuthorizationManager::USER_DB_FIELD_NAME, nameIter->getDB());
+ nextUser.doneFast();
- if (nameIter->getDB() == opdb) {
- bestUser = *nameIter;
- }
+ if (nameIter->getDB() == opdb) {
+ bestUser = *nameIter;
}
- allUsers.doneFast();
-
- builder.append("user", bestUser.getUser().empty() ? "" : bestUser.getFullName());
-
}
+ allUsers.doneFast();
-} // namespace
+ builder.append("user", bestUser.getUser().empty() ? "" : bestUser.getFullName());
+}
+} // namespace
- void profile(OperationContext* txn, int op) {
- // Initialize with 1kb at start in order to avoid realloc later
- BufBuilder profileBufBuilder(1024);
- BSONObjBuilder b(profileBufBuilder);
+void profile(OperationContext* txn, int op) {
+ // Initialize with 1kb at start in order to avoid realloc later
+ BufBuilder profileBufBuilder(1024);
- {
- Locker::LockerInfo lockerInfo;
- txn->lockState()->getLockerInfo(&lockerInfo);
- CurOp::get(txn)->debug().append(*CurOp::get(txn), lockerInfo.stats, b);
- }
+ BSONObjBuilder b(profileBufBuilder);
- b.appendDate("ts", jsTime());
- b.append("client", txn->getClient()->clientAddress());
+ {
+ Locker::LockerInfo lockerInfo;
+ txn->lockState()->getLockerInfo(&lockerInfo);
+ CurOp::get(txn)->debug().append(*CurOp::get(txn), lockerInfo.stats, b);
+ }
- AuthorizationSession * authSession = AuthorizationSession::get(txn->getClient());
- _appendUserInfo(*CurOp::get(txn), b, authSession);
+ b.appendDate("ts", jsTime());
+ b.append("client", txn->getClient()->clientAddress());
- const BSONObj p = b.done();
+ AuthorizationSession* authSession = AuthorizationSession::get(txn->getClient());
+ _appendUserInfo(*CurOp::get(txn), b, authSession);
- const bool wasLocked = txn->lockState()->isLocked();
+ const BSONObj p = b.done();
- const string dbName(nsToDatabase(CurOp::get(txn)->getNS()));
+ const bool wasLocked = txn->lockState()->isLocked();
- try {
- bool acquireDbXLock = false;
- while (true) {
- ScopedTransaction scopedXact(txn, MODE_IX);
+ const string dbName(nsToDatabase(CurOp::get(txn)->getNS()));
- std::unique_ptr<AutoGetDb> autoGetDb;
- if (acquireDbXLock) {
- autoGetDb.reset(new AutoGetDb(txn, dbName, MODE_X));
- if (autoGetDb->getDb()) {
- createProfileCollection(txn, autoGetDb->getDb());
- }
- }
- else {
- autoGetDb.reset(new AutoGetDb(txn, dbName, MODE_IX));
- }
+ try {
+ bool acquireDbXLock = false;
+ while (true) {
+ ScopedTransaction scopedXact(txn, MODE_IX);
- Database* const db = autoGetDb->getDb();
- if (!db) {
- // Database disappeared
- log() << "note: not profiling because db went away for "
- << CurOp::get(txn)->getNS();
- break;
+ std::unique_ptr<AutoGetDb> autoGetDb;
+ if (acquireDbXLock) {
+ autoGetDb.reset(new AutoGetDb(txn, dbName, MODE_X));
+ if (autoGetDb->getDb()) {
+ createProfileCollection(txn, autoGetDb->getDb());
}
+ } else {
+ autoGetDb.reset(new AutoGetDb(txn, dbName, MODE_IX));
+ }
- Lock::CollectionLock collLock(txn->lockState(), db->getProfilingNS(), MODE_IX);
-
- Collection* const coll = db->getCollection(db->getProfilingNS());
- if (coll) {
- WriteUnitOfWork wuow(txn);
- coll->insertDocument(txn, p, false);
- wuow.commit();
+ Database* const db = autoGetDb->getDb();
+ if (!db) {
+ // Database disappeared
+ log() << "note: not profiling because db went away for "
+ << CurOp::get(txn)->getNS();
+ break;
+ }
- break;
- }
- else if (!acquireDbXLock &&
- (!wasLocked || txn->lockState()->isDbLockedForMode(dbName, MODE_X))) {
- // Try to create the collection only if we are not under lock, in order to
- // avoid deadlocks due to lock conversion. This would only be hit if someone
- // deletes the profiler collection after setting profile level.
- acquireDbXLock = true;
- }
- else {
- // Cannot write the profile information
- break;
- }
+ Lock::CollectionLock collLock(txn->lockState(), db->getProfilingNS(), MODE_IX);
+
+ Collection* const coll = db->getCollection(db->getProfilingNS());
+ if (coll) {
+ WriteUnitOfWork wuow(txn);
+ coll->insertDocument(txn, p, false);
+ wuow.commit();
+
+ break;
+ } else if (!acquireDbXLock &&
+ (!wasLocked || txn->lockState()->isDbLockedForMode(dbName, MODE_X))) {
+ // Try to create the collection only if we are not under lock, in order to
+ // avoid deadlocks due to lock conversion. This would only be hit if someone
+ // deletes the profiler collection after setting profile level.
+ acquireDbXLock = true;
+ } else {
+ // Cannot write the profile information
+ break;
}
}
- catch (const AssertionException& assertionEx) {
- warning() << "Caught Assertion while trying to profile "
- << opToString(op)
- << " against " << CurOp::get(txn)->getNS()
- << ": " << assertionEx.toString() << endl;
- }
+ } catch (const AssertionException& assertionEx) {
+ warning() << "Caught Assertion while trying to profile " << opToString(op) << " against "
+ << CurOp::get(txn)->getNS() << ": " << assertionEx.toString() << endl;
}
+}
- Status createProfileCollection(OperationContext* txn, Database *db) {
- invariant(txn->lockState()->isDbLockedForMode(db->name(), MODE_X));
-
- const std::string dbProfilingNS(db->getProfilingNS());
+Status createProfileCollection(OperationContext* txn, Database* db) {
+ invariant(txn->lockState()->isDbLockedForMode(db->name(), MODE_X));
- Collection* const collection = db->getCollection(dbProfilingNS);
- if (collection) {
- if (!collection->isCapped()) {
- return Status(ErrorCodes::NamespaceExists,
- str::stream() << dbProfilingNS << " exists but isn't capped");
- }
+ const std::string dbProfilingNS(db->getProfilingNS());
- return Status::OK();
+ Collection* const collection = db->getCollection(dbProfilingNS);
+ if (collection) {
+ if (!collection->isCapped()) {
+ return Status(ErrorCodes::NamespaceExists,
+ str::stream() << dbProfilingNS << " exists but isn't capped");
}
- // system.profile namespace doesn't exist; create it
- log() << "Creating profile collection: " << dbProfilingNS << endl;
+ return Status::OK();
+ }
- CollectionOptions collectionOptions;
- collectionOptions.capped = true;
- collectionOptions.cappedSize = 1024 * 1024;
+ // system.profile namespace doesn't exist; create it
+ log() << "Creating profile collection: " << dbProfilingNS << endl;
- WriteUnitOfWork wunit(txn);
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
- invariant(db->createCollection(txn, dbProfilingNS, collectionOptions));
- wunit.commit();
+ CollectionOptions collectionOptions;
+ collectionOptions.capped = true;
+ collectionOptions.cappedSize = 1024 * 1024;
- return Status::OK();
- }
+ WriteUnitOfWork wunit(txn);
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
+ invariant(db->createCollection(txn, dbProfilingNS, collectionOptions));
+ wunit.commit();
+
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/introspect.h b/src/mongo/db/introspect.h
index 2a6ec2a477f..0bfc3863e42 100644
--- a/src/mongo/db/introspect.h
+++ b/src/mongo/db/introspect.h
@@ -32,17 +32,17 @@
namespace mongo {
- class Database;
- class OperationContext;
+class Database;
+class OperationContext;
- /**
- * Invoked when database profile is enabled.
- */
- void profile(OperationContext* txn, int op);
+/**
+ * Invoked when database profile is enabled.
+ */
+void profile(OperationContext* txn, int op);
- /**
- * Pre-creates the profile collection for the specified database.
- */
- Status createProfileCollection(OperationContext* txn, Database *db);
+/**
+ * Pre-creates the profile collection for the specified database.
+ */
+Status createProfileCollection(OperationContext* txn, Database* db);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/invalidation_type.h b/src/mongo/db/invalidation_type.h
index d51df69a67c..70c40ba9103 100644
--- a/src/mongo/db/invalidation_type.h
+++ b/src/mongo/db/invalidation_type.h
@@ -30,13 +30,13 @@
namespace mongo {
- enum InvalidationType {
- // The RecordId is about to be deleted. The receiver of this invalidate call cannot use
- // the RecordId after it returns from the invalidate.
- INVALIDATION_DELETION,
+enum InvalidationType {
+ // The RecordId is about to be deleted. The receiver of this invalidate call cannot use
+ // the RecordId after it returns from the invalidate.
+ INVALIDATION_DELETION,
- // The RecordId's contents are about to change.
- INVALIDATION_MUTATION,
- };
+ // The RecordId's contents are about to change.
+ INVALIDATION_MUTATION,
+};
} // namespace mongo
diff --git a/src/mongo/db/jsobj.h b/src/mongo/db/jsobj.h
index 6a4e962b683..1135b34aa5d 100644
--- a/src/mongo/db/jsobj.h
+++ b/src/mongo/db/jsobj.h
@@ -52,4 +52,3 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/bson/ordering.h"
#include "mongo/base/string_data.h"
-
diff --git a/src/mongo/db/keypattern.cpp b/src/mongo/db/keypattern.cpp
index b62885b97d1..9ef77deb639 100644
--- a/src/mongo/db/keypattern.cpp
+++ b/src/mongo/db/keypattern.cpp
@@ -35,68 +35,67 @@
namespace mongo {
- KeyPattern::KeyPattern( const BSONObj& pattern ): _pattern( pattern ) {}
+KeyPattern::KeyPattern(const BSONObj& pattern) : _pattern(pattern) {}
- bool KeyPattern::isIdKeyPattern(const BSONObj& pattern) {
- BSONObjIterator i(pattern);
- BSONElement e = i.next();
- // _id index must have form exactly {_id : 1} or {_id : -1}.
- // Allows an index of form {_id : "hashed"} to exist but
- // do not consider it to be the primary _id index
- return (0 == strcmp(e.fieldName(), "_id"))
- && (e.numberInt() == 1 || e.numberInt() == -1)
- && i.next().eoo();
- }
+bool KeyPattern::isIdKeyPattern(const BSONObj& pattern) {
+ BSONObjIterator i(pattern);
+ BSONElement e = i.next();
+ // _id index must have form exactly {_id : 1} or {_id : -1}.
+ // Allows an index of form {_id : "hashed"} to exist but
+ // do not consider it to be the primary _id index
+ return (0 == strcmp(e.fieldName(), "_id")) && (e.numberInt() == 1 || e.numberInt() == -1) &&
+ i.next().eoo();
+}
- bool KeyPattern::isOrderedKeyPattern(const BSONObj& pattern) {
- return IndexNames::BTREE == IndexNames::findPluginName(pattern);
- }
+bool KeyPattern::isOrderedKeyPattern(const BSONObj& pattern) {
+ return IndexNames::BTREE == IndexNames::findPluginName(pattern);
+}
- bool KeyPattern::isHashedKeyPattern(const BSONObj& pattern) {
- return IndexNames::HASHED == IndexNames::findPluginName(pattern);
- }
+bool KeyPattern::isHashedKeyPattern(const BSONObj& pattern) {
+ return IndexNames::HASHED == IndexNames::findPluginName(pattern);
+}
- BSONObj KeyPattern::extendRangeBound( const BSONObj& bound , bool makeUpperInclusive ) const {
- BSONObjBuilder newBound( bound.objsize() );
+BSONObj KeyPattern::extendRangeBound(const BSONObj& bound, bool makeUpperInclusive) const {
+ BSONObjBuilder newBound(bound.objsize());
- BSONObjIterator src( bound );
- BSONObjIterator pat( _pattern );
+ BSONObjIterator src(bound);
+ BSONObjIterator pat(_pattern);
- while( src.more() ){
- massert( 16649 ,
- str::stream() << "keyPattern " << _pattern << " shorter than bound " << bound,
- pat.more() );
- BSONElement srcElt = src.next();
- BSONElement patElt = pat.next();
- massert( 16634 ,
- str::stream() << "field names of bound " << bound
- << " do not match those of keyPattern " << _pattern ,
- str::equals( srcElt.fieldName() , patElt.fieldName() ) );
- newBound.append( srcElt );
- }
- while( pat.more() ){
- BSONElement patElt = pat.next();
- // for non 1/-1 field values, like {a : "hashed"}, treat order as ascending
- int order = patElt.isNumber() ? patElt.numberInt() : 1;
- // flip the order semantics if this is an upper bound
- if ( makeUpperInclusive ) order *= -1;
+ while (src.more()) {
+ massert(16649,
+ str::stream() << "keyPattern " << _pattern << " shorter than bound " << bound,
+ pat.more());
+ BSONElement srcElt = src.next();
+ BSONElement patElt = pat.next();
+ massert(16634,
+ str::stream() << "field names of bound " << bound
+ << " do not match those of keyPattern " << _pattern,
+ str::equals(srcElt.fieldName(), patElt.fieldName()));
+ newBound.append(srcElt);
+ }
+ while (pat.more()) {
+ BSONElement patElt = pat.next();
+ // for non 1/-1 field values, like {a : "hashed"}, treat order as ascending
+ int order = patElt.isNumber() ? patElt.numberInt() : 1;
+ // flip the order semantics if this is an upper bound
+ if (makeUpperInclusive)
+ order *= -1;
- if( order > 0 ){
- newBound.appendMinKey( patElt.fieldName() );
- }
- else {
- newBound.appendMaxKey( patElt.fieldName() );
- }
+ if (order > 0) {
+ newBound.appendMinKey(patElt.fieldName());
+ } else {
+ newBound.appendMaxKey(patElt.fieldName());
}
- return newBound.obj();
}
+ return newBound.obj();
+}
- BSONObj KeyPattern::globalMin() const {
- return extendRangeBound(BSONObj(), false);
- }
+BSONObj KeyPattern::globalMin() const {
+ return extendRangeBound(BSONObj(), false);
+}
- BSONObj KeyPattern::globalMax() const {
- return extendRangeBound(BSONObj(), true);
- }
+BSONObj KeyPattern::globalMax() const {
+ return extendRangeBound(BSONObj(), true);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/keypattern.h b/src/mongo/db/keypattern.h
index 78e1f3d59bf..f9ac83983e3 100644
--- a/src/mongo/db/keypattern.h
+++ b/src/mongo/db/keypattern.h
@@ -33,85 +33,88 @@
namespace mongo {
+/**
+ * A KeyPattern is an expression describing a transformation of a document into a
+ * document key. Document keys are used to store documents in indices and to target
+ * sharded queries.
+ *
+ * The root field names of KeyPatterns are always (potentially-dotted) paths, and the values of
+ * the fields describe the type of indexing over the found elements.
+ *
+ * Examples:
+ * { a : 1 }
+ * { a : 1 , b : -1 }
+ * { a : "hashed" }
+ */
+class KeyPattern {
+public:
/**
- * A KeyPattern is an expression describing a transformation of a document into a
- * document key. Document keys are used to store documents in indices and to target
- * sharded queries.
- *
- * The root field names of KeyPatterns are always (potentially-dotted) paths, and the values of
- * the fields describe the type of indexing over the found elements.
- *
- * Examples:
- * { a : 1 }
- * { a : 1 , b : -1 }
- * { a : "hashed" }
+ * Is the provided key pattern the index over the ID field?
+ * The always required ID index is always {_id: 1} or {_id: -1}.
*/
- class KeyPattern {
- public:
-
- /**
- * Is the provided key pattern the index over the ID field?
- * The always required ID index is always {_id: 1} or {_id: -1}.
- */
- static bool isIdKeyPattern(const BSONObj& pattern);
+ static bool isIdKeyPattern(const BSONObj& pattern);
- /**
- * Is the provided key pattern ordered increasing or decreasing or not?
- */
- static bool isOrderedKeyPattern(const BSONObj& pattern);
+ /**
+ * Is the provided key pattern ordered increasing or decreasing or not?
+ */
+ static bool isOrderedKeyPattern(const BSONObj& pattern);
- /**
- * Does the provided key pattern hash its keys?
- */
- static bool isHashedKeyPattern(const BSONObj& pattern);
+ /**
+ * Does the provided key pattern hash its keys?
+ */
+ static bool isHashedKeyPattern(const BSONObj& pattern);
- /**
- * Constructs a new key pattern based on a BSON document
- */
- KeyPattern(const BSONObj& pattern);
+ /**
+ * Constructs a new key pattern based on a BSON document
+ */
+ KeyPattern(const BSONObj& pattern);
- /**
- * Returns a BSON representation of this KeyPattern.
- */
- const BSONObj& toBSON() const { return _pattern; }
+ /**
+ * Returns a BSON representation of this KeyPattern.
+ */
+ const BSONObj& toBSON() const {
+ return _pattern;
+ }
- /**
- * Returns a string representation of this KeyPattern
- */
- std::string toString() const{ return toBSON().toString(); }
+ /**
+ * Returns a string representation of this KeyPattern
+ */
+ std::string toString() const {
+ return toBSON().toString();
+ }
- /* Takes a BSONObj whose field names are a prefix of the fields in this keyPattern, and
- * outputs a new bound with MinKey values appended to match the fields in this keyPattern
- * (or MaxKey values for descending -1 fields). This is useful in sharding for
- * calculating chunk boundaries when tag ranges are specified on a prefix of the actual
- * shard key, or for calculating index bounds when the shard key is a prefix of the actual
- * index used.
- *
- * @param makeUpperInclusive If true, then MaxKeys instead of MinKeys will be appended, so
- * that the output bound will compare *greater* than the bound being extended (note that
- * -1's in the keyPattern will swap MinKey/MaxKey vals. See examples).
- *
- * Examples:
- * If this keyPattern is {a : 1}
- * extendRangeBound( {a : 55}, false) --> {a : 55}
- *
- * If this keyPattern is {a : 1, b : 1}
- * extendRangeBound( {a : 55}, false) --> {a : 55, b : MinKey}
- * extendRangeBound( {a : 55}, true ) --> {a : 55, b : MaxKey}
- *
- * If this keyPattern is {a : 1, b : -1}
- * extendRangeBound( {a : 55}, false) --> {a : 55, b : MaxKey}
- * extendRangeBound( {a : 55}, true ) --> {a : 55, b : MinKey}
- */
- BSONObj extendRangeBound( const BSONObj& bound , bool makeUpperInclusive ) const;
+ /* Takes a BSONObj whose field names are a prefix of the fields in this keyPattern, and
+ * outputs a new bound with MinKey values appended to match the fields in this keyPattern
+ * (or MaxKey values for descending -1 fields). This is useful in sharding for
+ * calculating chunk boundaries when tag ranges are specified on a prefix of the actual
+ * shard key, or for calculating index bounds when the shard key is a prefix of the actual
+ * index used.
+ *
+ * @param makeUpperInclusive If true, then MaxKeys instead of MinKeys will be appended, so
+ * that the output bound will compare *greater* than the bound being extended (note that
+ * -1's in the keyPattern will swap MinKey/MaxKey vals. See examples).
+ *
+ * Examples:
+ * If this keyPattern is {a : 1}
+ * extendRangeBound( {a : 55}, false) --> {a : 55}
+ *
+ * If this keyPattern is {a : 1, b : 1}
+ * extendRangeBound( {a : 55}, false) --> {a : 55, b : MinKey}
+ * extendRangeBound( {a : 55}, true ) --> {a : 55, b : MaxKey}
+ *
+ * If this keyPattern is {a : 1, b : -1}
+ * extendRangeBound( {a : 55}, false) --> {a : 55, b : MaxKey}
+ * extendRangeBound( {a : 55}, true ) --> {a : 55, b : MinKey}
+ */
+ BSONObj extendRangeBound(const BSONObj& bound, bool makeUpperInclusive) const;
- BSONObj globalMin() const;
+ BSONObj globalMin() const;
- BSONObj globalMax() const;
+ BSONObj globalMax() const;
- private:
- BSONObj _pattern;
- };
+private:
+ BSONObj _pattern;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/keypattern_test.cpp b/src/mongo/db/keypattern_test.cpp
index 63b9b325d75..300843ecfea 100644
--- a/src/mongo/db/keypattern_test.cpp
+++ b/src/mongo/db/keypattern_test.cpp
@@ -31,112 +31,111 @@
namespace {
- using namespace mongo;
-
- TEST(KeyPattern, ExtendRangeBound) {
-
- BSONObj bound = BSON("a" << 55);
- BSONObj longBound = BSON("a" << 55 << "b" << 66);
-
- //test keyPattern shorter than bound, should fail
- {
- KeyPattern keyPat(BSON("a" << 1));
- ASSERT_THROWS(keyPat.extendRangeBound(longBound, false), MsgAssertionException);
- }
-
- //test keyPattern doesn't match bound, should fail
- {
- KeyPattern keyPat(BSON("b" << 1));
- ASSERT_THROWS(keyPat.extendRangeBound(bound, false), MsgAssertionException);
- }
- {
- KeyPattern keyPat(BSON("a" << 1 << "c" << 1));
- ASSERT_THROWS(keyPat.extendRangeBound(longBound, false), MsgAssertionException);
- }
-
- //test keyPattern same as bound
- {
- KeyPattern keyPat(BSON("a" << 1));
- BSONObj newB = keyPat.extendRangeBound(bound, false);
- ASSERT_EQUALS(newB, BSON("a" << 55));
- }
- {
- KeyPattern keyPat(BSON("a" << 1));
- BSONObj newB = keyPat.extendRangeBound(bound, false);
- ASSERT_EQUALS(newB, BSON("a" << 55));
- }
-
- //test keyPattern longer than bound, simple
- {
- KeyPattern keyPat(BSON("a" << 1 << "b" << 1));
- BSONObj newB = keyPat.extendRangeBound(bound, false);
- ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MINKEY));
- }
- {
- KeyPattern keyPat(BSON("a" << 1 << "b" << 1));
- BSONObj newB = keyPat.extendRangeBound(bound, true);
- ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MAXKEY));
- }
-
- //test keyPattern longer than bound, more complex pattern directions
- {
- KeyPattern keyPat(BSON("a" << 1 << "b" << -1));
- BSONObj newB = keyPat.extendRangeBound(bound, false);
- ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MAXKEY));
- }
- {
- KeyPattern keyPat(BSON("a" << 1 << "b" << -1));
- BSONObj newB = keyPat.extendRangeBound(bound, true);
- ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MINKEY));
- }
- {
-
- KeyPattern keyPat(BSON("a" << 1 << "b" << -1 << "c" << 1));
- BSONObj newB = keyPat.extendRangeBound(bound, false);
- ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MAXKEY << "c" << MINKEY));
- }
- {
- KeyPattern keyPat(BSON("a" << 1 << "b" << -1 << "c" << 1));
- BSONObj newB = keyPat.extendRangeBound(bound, true);
- ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MINKEY << "c" << MAXKEY));
- }
+using namespace mongo;
+
+TEST(KeyPattern, ExtendRangeBound) {
+ BSONObj bound = BSON("a" << 55);
+ BSONObj longBound = BSON("a" << 55 << "b" << 66);
+
+ // test keyPattern shorter than bound, should fail
+ {
+ KeyPattern keyPat(BSON("a" << 1));
+ ASSERT_THROWS(keyPat.extendRangeBound(longBound, false), MsgAssertionException);
+ }
+
+ // test keyPattern doesn't match bound, should fail
+ {
+ KeyPattern keyPat(BSON("b" << 1));
+ ASSERT_THROWS(keyPat.extendRangeBound(bound, false), MsgAssertionException);
+ }
+ {
+ KeyPattern keyPat(BSON("a" << 1 << "c" << 1));
+ ASSERT_THROWS(keyPat.extendRangeBound(longBound, false), MsgAssertionException);
}
- TEST(KeyPattern, GlobalMinMax) {
+ // test keyPattern same as bound
+ {
+ KeyPattern keyPat(BSON("a" << 1));
+ BSONObj newB = keyPat.extendRangeBound(bound, false);
+ ASSERT_EQUALS(newB, BSON("a" << 55));
+ }
+ {
+ KeyPattern keyPat(BSON("a" << 1));
+ BSONObj newB = keyPat.extendRangeBound(bound, false);
+ ASSERT_EQUALS(newB, BSON("a" << 55));
+ }
+
+ // test keyPattern longer than bound, simple
+ {
+ KeyPattern keyPat(BSON("a" << 1 << "b" << 1));
+ BSONObj newB = keyPat.extendRangeBound(bound, false);
+ ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MINKEY));
+ }
+ {
+ KeyPattern keyPat(BSON("a" << 1 << "b" << 1));
+ BSONObj newB = keyPat.extendRangeBound(bound, true);
+ ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MAXKEY));
+ }
- //
- // Simple KeyPatterns
- //
+ // test keyPattern longer than bound, more complex pattern directions
+ {
+ KeyPattern keyPat(BSON("a" << 1 << "b" << -1));
+ BSONObj newB = keyPat.extendRangeBound(bound, false);
+ ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MAXKEY));
+ }
+ {
+ KeyPattern keyPat(BSON("a" << 1 << "b" << -1));
+ BSONObj newB = keyPat.extendRangeBound(bound, true);
+ ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MINKEY));
+ }
+ {
+ KeyPattern keyPat(BSON("a" << 1 << "b" << -1 << "c" << 1));
+ BSONObj newB = keyPat.extendRangeBound(bound, false);
+ ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MAXKEY << "c" << MINKEY));
+ }
+ {
+ KeyPattern keyPat(BSON("a" << 1 << "b" << -1 << "c" << 1));
+ BSONObj newB = keyPat.extendRangeBound(bound, true);
+ ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MINKEY << "c" << MAXKEY));
+ }
+}
- ASSERT_EQUALS(KeyPattern(BSON("a" << 1)).globalMin(), BSON("a" << MINKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << 1)).globalMax(), BSON("a" << MAXKEY));
+TEST(KeyPattern, GlobalMinMax) {
+ //
+ // Simple KeyPatterns
+ //
- ASSERT_EQUALS(KeyPattern(BSON("a" << -1)).globalMin(), BSON("a" << MAXKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << -1)).globalMax(), BSON("a" << MINKEY));
+ ASSERT_EQUALS(KeyPattern(BSON("a" << 1)).globalMin(), BSON("a" << MINKEY));
+ ASSERT_EQUALS(KeyPattern(BSON("a" << 1)).globalMax(), BSON("a" << MAXKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << 1 << "b" << 1.0)).globalMin(),
- BSON("a" << MINKEY << "b" << MINKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << 1 << "b" << 1.0)).globalMax(),
- BSON("a" << MAXKEY << "b" << MAXKEY));
+ ASSERT_EQUALS(KeyPattern(BSON("a" << -1)).globalMin(), BSON("a" << MAXKEY));
+ ASSERT_EQUALS(KeyPattern(BSON("a" << -1)).globalMax(), BSON("a" << MINKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << 1 << "b" << -1.0f)).globalMin(),
- BSON("a" << MINKEY << "b" << MAXKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << 1 << "b" << -1.0f)).globalMax(),
- BSON("a" << MAXKEY << "b" << MINKEY));
+ ASSERT_EQUALS(KeyPattern(BSON("a" << 1 << "b" << 1.0)).globalMin(),
+ BSON("a" << MINKEY << "b" << MINKEY));
+ ASSERT_EQUALS(KeyPattern(BSON("a" << 1 << "b" << 1.0)).globalMax(),
+ BSON("a" << MAXKEY << "b" << MAXKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << "hashed")).globalMin(), BSON("a" << MINKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << "hashed")).globalMax(), BSON("a" << MAXKEY));
+ ASSERT_EQUALS(KeyPattern(BSON("a" << 1 << "b" << -1.0f)).globalMin(),
+ BSON("a" << MINKEY << "b" << MAXKEY));
+ ASSERT_EQUALS(KeyPattern(BSON("a" << 1 << "b" << -1.0f)).globalMax(),
+ BSON("a" << MAXKEY << "b" << MINKEY));
- //
- // Nested KeyPatterns
- //
+ ASSERT_EQUALS(KeyPattern(BSON("a"
+ << "hashed")).globalMin(),
+ BSON("a" << MINKEY));
+ ASSERT_EQUALS(KeyPattern(BSON("a"
+ << "hashed")).globalMax(),
+ BSON("a" << MAXKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a.b" << 1)).globalMin(), BSON("a.b" << MINKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a.b" << 1)).globalMax(), BSON("a.b" << MAXKEY));
+ //
+ // Nested KeyPatterns
+ //
- ASSERT_EQUALS(KeyPattern(BSON("a.b.c" << -1)).globalMin(), BSON("a.b.c" << MAXKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a.b.c" << -1)).globalMax(), BSON("a.b.c" << MINKEY));
- }
+ ASSERT_EQUALS(KeyPattern(BSON("a.b" << 1)).globalMin(), BSON("a.b" << MINKEY));
+ ASSERT_EQUALS(KeyPattern(BSON("a.b" << 1)).globalMax(), BSON("a.b" << MAXKEY));
+ ASSERT_EQUALS(KeyPattern(BSON("a.b.c" << -1)).globalMin(), BSON("a.b.c" << MAXKEY));
+ ASSERT_EQUALS(KeyPattern(BSON("a.b.c" << -1)).globalMax(), BSON("a.b.c" << MINKEY));
+}
}
-
diff --git a/src/mongo/db/lasterror.cpp b/src/mongo/db/lasterror.cpp
index d242cdedc1a..624329c4ddd 100644
--- a/src/mongo/db/lasterror.cpp
+++ b/src/mongo/db/lasterror.cpp
@@ -36,82 +36,80 @@
namespace mongo {
- LastError LastError::noError;
+LastError LastError::noError;
- const Client::Decoration<LastError> LastError::get = Client::declareDecoration<LastError>();
+const Client::Decoration<LastError> LastError::get = Client::declareDecoration<LastError>();
- void LastError::reset(bool valid) {
- *this = LastError();
- _valid = valid;
- }
+void LastError::reset(bool valid) {
+ *this = LastError();
+ _valid = valid;
+}
- void LastError::setLastError(int code, std::string msg) {
- if (_disabled) {
- return;
- }
- reset(true);
- _code = code;
- _msg = std::move(msg);
+void LastError::setLastError(int code, std::string msg) {
+ if (_disabled) {
+ return;
}
-
- void LastError::recordInsert(long long nObjects) {
- reset(true);
- _nObjects = nObjects;
+ reset(true);
+ _code = code;
+ _msg = std::move(msg);
+}
+
+void LastError::recordInsert(long long nObjects) {
+ reset(true);
+ _nObjects = nObjects;
+}
+
+void LastError::recordUpdate(bool updateObjects, long long nObjects, BSONObj upsertedId) {
+ reset(true);
+ _nObjects = nObjects;
+ _updatedExisting = updateObjects ? True : False;
+ if (upsertedId.valid() && upsertedId.hasField(kUpsertedFieldName))
+ _upsertedId = upsertedId;
+}
+
+void LastError::recordDelete(long long nDeleted) {
+ reset(true);
+ _nObjects = nDeleted;
+}
+
+bool LastError::appendSelf(BSONObjBuilder& b, bool blankErr) const {
+ if (!_valid) {
+ if (blankErr)
+ b.appendNull("err");
+ b.append("n", 0);
+ return false;
}
- void LastError::recordUpdate(bool updateObjects, long long nObjects, BSONObj upsertedId) {
- reset(true);
- _nObjects = nObjects;
- _updatedExisting = updateObjects ? True : False;
- if ( upsertedId.valid() && upsertedId.hasField(kUpsertedFieldName) )
- _upsertedId = upsertedId;
+ if (_msg.empty()) {
+ if (blankErr) {
+ b.appendNull("err");
+ }
+ } else {
+ b.append("err", _msg);
}
- void LastError::recordDelete(long long nDeleted) {
- reset(true);
- _nObjects = nDeleted;
+ if (_code)
+ b.append("code", _code);
+ if (_updatedExisting != NotUpdate)
+ b.appendBool("updatedExisting", _updatedExisting == True);
+ if (!_upsertedId.isEmpty()) {
+ b.append(_upsertedId[kUpsertedFieldName]);
}
+ b.appendNumber("n", _nObjects);
- bool LastError::appendSelf(BSONObjBuilder &b , bool blankErr) const {
+ return !_msg.empty();
+}
- if (!_valid) {
- if (blankErr)
- b.appendNull( "err" );
- b.append( "n", 0 );
- return false;
- }
- if (_msg.empty()) {
- if (blankErr) {
- b.appendNull( "err" );
- }
- }
- else {
- b.append("err", _msg);
- }
+void LastError::disable() {
+ invariant(!_disabled);
+ _disabled = true;
+ _nPrev--; // caller is a command that shouldn't count as an operation
+}
- if (_code)
- b.append("code" , _code);
- if (_updatedExisting != NotUpdate)
- b.appendBool("updatedExisting", _updatedExisting == True);
- if (!_upsertedId.isEmpty()) {
- b.append(_upsertedId[kUpsertedFieldName]);
- }
- b.appendNumber("n", _nObjects);
-
- return !_msg.empty();
- }
-
-
- void LastError::disable() {
- invariant(!_disabled);
- _disabled = true;
- _nPrev--; // caller is a command that shouldn't count as an operation
- }
-
- void LastError::startRequest() {
- _disabled = false;
- ++_nPrev;
- }
+void LastError::startRequest() {
+ _disabled = false;
+ ++_nPrev;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/lasterror.h b/src/mongo/db/lasterror.h
index cc1d0616927..e6f65e4aed7 100644
--- a/src/mongo/db/lasterror.h
+++ b/src/mongo/db/lasterror.h
@@ -35,84 +35,88 @@
#include "mongo/db/jsobj.h"
namespace mongo {
- class BSONObjBuilder;
-
- static const char kUpsertedFieldName[] = "upserted";
-
- class LastError {
+class BSONObjBuilder;
+
+static const char kUpsertedFieldName[] = "upserted";
+
+class LastError {
+public:
+ static const Client::Decoration<LastError> get;
+
+ /**
+ * Resets the object to a newly constructed state. If "valid" is true, marks the last-error
+ * object as "valid".
+ */
+ void reset(bool valid = false);
+
+ /**
+ * when db receives a message/request, call this
+ */
+ void startRequest();
+
+ /**
+ * Disables error recording for the current operation.
+ */
+ void disable();
+
+ /**
+ * Sets the error information for the current operation, if error recording was not
+ * explicitly disabled via a call to disable() since the call to startRequest.
+ */
+ void setLastError(int code, std::string msg);
+
+ void recordInsert(long long nObjects);
+
+ void recordUpdate(bool updateObjects, long long nObjects, BSONObj upsertedId);
+
+ void recordDelete(long long nDeleted);
+
+ /**
+ * Writes the last-error state described by this object to "b".
+ *
+ * If "blankErr" is true, the "err" field will be explicitly set to null in the result
+ * instead of being omitted when the error string is empty.
+ *
+ * Returns true if there is a non-empty error message.
+ */
+ bool appendSelf(BSONObjBuilder& b, bool blankErr) const;
+
+ bool isValid() const {
+ return _valid;
+ }
+ int const getNPrev() const {
+ return _nPrev;
+ }
+
+ class Disabled {
public:
- static const Client::Decoration<LastError> get;
-
- /**
- * Resets the object to a newly constructed state. If "valid" is true, marks the last-error
- * object as "valid".
- */
- void reset(bool valid = false);
-
- /**
- * when db receives a message/request, call this
- */
- void startRequest();
-
- /**
- * Disables error recording for the current operation.
- */
- void disable();
-
- /**
- * Sets the error information for the current operation, if error recording was not
- * explicitly disabled via a call to disable() since the call to startRequest.
- */
- void setLastError(int code, std::string msg);
-
- void recordInsert(long long nObjects);
-
- void recordUpdate(bool updateObjects, long long nObjects, BSONObj upsertedId);
-
- void recordDelete(long long nDeleted);
-
- /**
- * Writes the last-error state described by this object to "b".
- *
- * If "blankErr" is true, the "err" field will be explicitly set to null in the result
- * instead of being omitted when the error string is empty.
- *
- * Returns true if there is a non-empty error message.
- */
- bool appendSelf(BSONObjBuilder &b, bool blankErr) const;
-
- bool isValid() const { return _valid; }
- int const getNPrev() const { return _nPrev; }
-
- class Disabled {
- public:
- explicit Disabled(LastError* le) : _le(le), _prev(le->_disabled) {
- _le->_disabled = true;
- }
-
- ~Disabled() {
- _le->_disabled = _prev;
- }
-
- private:
- LastError * const _le;
- const bool _prev;
- };
-
- static LastError noError;
+ explicit Disabled(LastError* le) : _le(le), _prev(le->_disabled) {
+ _le->_disabled = true;
+ }
+
+ ~Disabled() {
+ _le->_disabled = _prev;
+ }
private:
- enum UpdatedExistingType { NotUpdate, True, False };
-
- int _code = 0;
- std::string _msg = {};
- UpdatedExistingType _updatedExisting = NotUpdate;
- // _id field value from inserted doc, returned as kUpsertedFieldName (above)
- BSONObj _upsertedId = {};
- long long _nObjects = 0;
- int _nPrev = 1;
- bool _valid = false;
- bool _disabled = false;
+ LastError* const _le;
+ const bool _prev;
};
-} // namespace mongo
+ static LastError noError;
+
+private:
+ enum UpdatedExistingType { NotUpdate, True, False };
+
+ int _code = 0;
+ std::string _msg = {};
+ UpdatedExistingType _updatedExisting = NotUpdate;
+ // _id field value from inserted doc, returned as kUpsertedFieldName (above)
+ BSONObj _upsertedId = {};
+ long long _nObjects = 0;
+ int _nPrev = 1;
+ bool _valid = false;
+ bool _disabled = false;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/log_process_details.cpp b/src/mongo/db/log_process_details.cpp
index a7d28928e26..9ba6c91299a 100644
--- a/src/mongo/db/log_process_details.cpp
+++ b/src/mongo/db/log_process_details.cpp
@@ -44,28 +44,27 @@
namespace mongo {
- using std::cout;
- using std::endl;
+using std::cout;
+using std::endl;
- bool is32bit() {
- return ( sizeof(int*) == 4 );
- }
+bool is32bit() {
+ return (sizeof(int*) == 4);
+}
- void logProcessDetails() {
- log() << mongodVersion() << endl;
- printGitVersion();
- printOpenSSLVersion();
- printAllocator();
- printCommandLineOpts();
- }
+void logProcessDetails() {
+ log() << mongodVersion() << endl;
+ printGitVersion();
+ printOpenSSLVersion();
+ printAllocator();
+ printCommandLineOpts();
+}
- void logProcessDetailsForLogRotate() {
- log() << "pid=" << ProcessId::getCurrent()
- << " port=" << serverGlobalParams.port
- << ( is32bit() ? " 32" : " 64" ) << "-bit "
- << "host=" << getHostNameCached();
+void logProcessDetailsForLogRotate() {
+ log() << "pid=" << ProcessId::getCurrent() << " port=" << serverGlobalParams.port
+ << (is32bit() ? " 32" : " 64") << "-bit "
+ << "host=" << getHostNameCached();
- logProcessDetails();
- }
+ logProcessDetails();
+}
-} //mongo
+} // mongo
diff --git a/src/mongo/db/log_process_details.h b/src/mongo/db/log_process_details.h
index a41837a6c49..04ef7ddfe5d 100644
--- a/src/mongo/db/log_process_details.h
+++ b/src/mongo/db/log_process_details.h
@@ -30,15 +30,15 @@
namespace mongo {
- /**
- * Writes useful information about the running process to the diagnostic log on startup.
- */
- void logProcessDetails();
+/**
+ * Writes useful information about the running process to the diagnostic log on startup.
+ */
+void logProcessDetails();
- /**
- * Writes useful information about the running process to diagnostic log
- * for after a log rotation.
- */
- void logProcessDetailsForLogRotate();
+/**
+ * Writes useful information about the running process to diagnostic log
+ * for after a log rotation.
+ */
+void logProcessDetailsForLogRotate();
} // namespace mongo
diff --git a/src/mongo/db/matcher/expression.cpp b/src/mongo/db/matcher/expression.cpp
index 99975a88815..aa6a2ddbc30 100644
--- a/src/mongo/db/matcher/expression.cpp
+++ b/src/mongo/db/matcher/expression.cpp
@@ -35,46 +35,42 @@
namespace mongo {
- using std::string;
+using std::string;
- MatchExpression::MatchExpression( MatchType type )
- : _matchType( type ) { }
-
- string MatchExpression::toString() const {
- StringBuilder buf;
- debugString( buf, 0 );
- return buf.str();
- }
-
- void MatchExpression::_debugAddSpace( StringBuilder& debug, int level ) const {
- for ( int i = 0; i < level; i++ )
- debug << " ";
- }
-
- bool MatchExpression::matchesBSON( const BSONObj& doc, MatchDetails* details ) const {
- BSONMatchableDocument mydoc( doc );
- return matches( &mydoc, details );
- }
+MatchExpression::MatchExpression(MatchType type) : _matchType(type) {}
+string MatchExpression::toString() const {
+ StringBuilder buf;
+ debugString(buf, 0);
+ return buf.str();
+}
- void AtomicMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << "$atomic\n";
- }
+void MatchExpression::_debugAddSpace(StringBuilder& debug, int level) const {
+ for (int i = 0; i < level; i++)
+ debug << " ";
+}
- void AtomicMatchExpression::toBSON(BSONObjBuilder* out) const {
- out->append("$isolated", 1);
- }
+bool MatchExpression::matchesBSON(const BSONObj& doc, MatchDetails* details) const {
+ BSONMatchableDocument mydoc(doc);
+ return matches(&mydoc, details);
+}
- void FalseMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << "$false\n";
- }
- void FalseMatchExpression::toBSON(BSONObjBuilder* out) const {
- out->append("$false", 1);
- }
+void AtomicMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << "$atomic\n";
+}
+void AtomicMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append("$isolated", 1);
}
+void FalseMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << "$false\n";
+}
+void FalseMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append("$false", 1);
+}
+}
diff --git a/src/mongo/db/matcher/expression.h b/src/mongo/db/matcher/expression.h
index c3b11d74258..3911bd0c863 100644
--- a/src/mongo/db/matcher/expression.h
+++ b/src/mongo/db/matcher/expression.h
@@ -40,223 +40,254 @@
namespace mongo {
- class TreeMatchExpression;
+class TreeMatchExpression;
+
+class MatchExpression {
+ MONGO_DISALLOW_COPYING(MatchExpression);
+
+public:
+ enum MatchType {
+ // tree types
+ AND,
+ OR,
+
+ // array types
+ ELEM_MATCH_OBJECT,
+ ELEM_MATCH_VALUE,
+ SIZE,
+
+ // leaf types
+ EQ,
+ LTE,
+ LT,
+ GT,
+ GTE,
+ REGEX,
+ MOD,
+ EXISTS,
+ MATCH_IN,
+
+ // Negations.
+ NOT,
+ NOR,
+
+ // special types
+ TYPE_OPERATOR,
+ GEO,
+ WHERE,
+
+ // things that maybe shouldn't even be nodes
+ ATOMIC,
+ ALWAYS_FALSE,
+
+ // Things that we parse but cannot be answered without an index.
+ GEO_NEAR,
+ TEXT,
+
+ // Expressions that are only created internally
+ INTERNAL_2DSPHERE_KEY_IN_REGION,
+ INTERNAL_2D_KEY_IN_REGION,
+ INTERNAL_2D_POINT_IN_ANNULUS
+ };
- class MatchExpression {
- MONGO_DISALLOW_COPYING( MatchExpression );
- public:
- enum MatchType {
- // tree types
- AND, OR,
-
- // array types
- ELEM_MATCH_OBJECT, ELEM_MATCH_VALUE, SIZE,
-
- // leaf types
- EQ, LTE, LT, GT, GTE, REGEX, MOD, EXISTS, MATCH_IN,
-
- // Negations.
- NOT, NOR,
-
- // special types
- TYPE_OPERATOR, GEO, WHERE,
-
- // things that maybe shouldn't even be nodes
- ATOMIC, ALWAYS_FALSE,
-
- // Things that we parse but cannot be answered without an index.
- GEO_NEAR, TEXT,
-
- // Expressions that are only created internally
- INTERNAL_2DSPHERE_KEY_IN_REGION, INTERNAL_2D_KEY_IN_REGION, INTERNAL_2D_POINT_IN_ANNULUS
- };
-
- MatchExpression( MatchType type );
- virtual ~MatchExpression(){}
-
- //
- // Structural/AST information
- //
-
- /**
- * What type is the node? See MatchType above.
- */
- MatchType matchType() const { return _matchType; }
-
- /**
- * How many children does the node have? Most nodes are leaves so the default impl. is for
- * a leaf.
- */
- virtual size_t numChildren() const { return 0; }
-
- /**
- * Get the i-th child.
- */
- virtual MatchExpression* getChild( size_t i ) const { return NULL; }
-
- /**
- * Get all the children of a node
- */
- virtual std::vector<MatchExpression*>* getChildVector() { return NULL; }
-
- /**
- * Get the path of the leaf. Returns StringData() if there is no path (node is logical).
- */
- virtual const StringData path() const { return StringData(); }
-
- /**
- * Notes on structure:
- * isLogical, isArray, and isLeaf define three partitions of all possible operators.
- *
- * isLogical can have children and its children can be arbitrary operators.
- *
- * isArray can have children and its children are predicates over one field.
- *
- * isLeaf is a predicate over one field.
- */
-
- /**
- * Is this node a logical operator? All of these inherit from ListOfMatchExpression.
- * AND, OR, NOT, NOR.
- */
- bool isLogical() const {
- return AND == _matchType || OR == _matchType || NOT == _matchType || NOR == _matchType;
- }
+ MatchExpression(MatchType type);
+ virtual ~MatchExpression() {}
- /**
- * Is this node an array operator? Array operators have multiple clauses but operate on one
- * field.
- *
- * ELEM_MATCH_VALUE, ELEM_MATCH_OBJECT, SIZE (ArrayMatchingMatchExpression)
- */
- bool isArray() const {
- return SIZE == _matchType
- || ELEM_MATCH_VALUE == _matchType
- || ELEM_MATCH_OBJECT == _matchType;
- }
+ //
+ // Structural/AST information
+ //
- /**
- * Not-internal nodes, predicates over one field. Almost all of these inherit from
- * LeafMatchExpression.
- *
- * Exceptions: WHERE, which doesn't have a field.
- * TYPE_OPERATOR, which inherits from MatchExpression due to unique array
- * semantics.
- */
- bool isLeaf() const {
- return !isArray() && !isLogical();
- }
+ /**
+ * What type is the node? See MatchType above.
+ */
+ MatchType matchType() const {
+ return _matchType;
+ }
- // XXX: document
- virtual MatchExpression* shallowClone() const = 0;
-
- // XXX document
- virtual bool equivalent( const MatchExpression* other ) const = 0;
-
- //
- // Determine if a document satisfies the tree-predicate.
- //
-
- virtual bool matches( const MatchableDocument* doc, MatchDetails* details = 0 ) const = 0;
-
- virtual bool matchesBSON( const BSONObj& doc, MatchDetails* details = 0 ) const;
-
- /**
- * Determines if the element satisfies the tree-predicate.
- * Not valid for all expressions (e.g. $where); in those cases, returns false.
- */
- virtual bool matchesSingleElement( const BSONElement& e ) const = 0;
-
- //
- // Tagging mechanism: Hang data off of the tree for retrieval later.
- //
-
- class TagData {
- public:
- virtual ~TagData() { }
- virtual void debugString(StringBuilder* builder) const = 0;
- virtual TagData* clone() const = 0;
- };
-
- /**
- * Takes ownership
- */
- void setTag(TagData* data) { _tagData.reset(data); }
- TagData* getTag() const { return _tagData.get(); }
- virtual void resetTag() {
- setTag(NULL);
- for (size_t i = 0; i < numChildren(); ++i) {
- getChild(i)->resetTag();
- }
- }
+ /**
+ * How many children does the node have? Most nodes are leaves so the default impl. is for
+ * a leaf.
+ */
+ virtual size_t numChildren() const {
+ return 0;
+ }
- //
- // Debug information
- //
- virtual std::string toString() const;
- virtual void debugString( StringBuilder& debug, int level = 0 ) const = 0;
- virtual void toBSON(BSONObjBuilder* out) const = 0;
+ /**
+ * Get the i-th child.
+ */
+ virtual MatchExpression* getChild(size_t i) const {
+ return NULL;
+ }
- protected:
- void _debugAddSpace( StringBuilder& debug, int level ) const;
+ /**
+ * Get all the children of a node
+ */
+ virtual std::vector<MatchExpression*>* getChildVector() {
+ return NULL;
+ }
- private:
- MatchType _matchType;
- std::unique_ptr<TagData> _tagData;
- };
+ /**
+ * Get the path of the leaf. Returns StringData() if there is no path (node is logical).
+ */
+ virtual const StringData path() const {
+ return StringData();
+ }
/**
- * this isn't really an expression, but a hint to other things
- * not sure where to put it in the end
+ * Notes on structure:
+ * isLogical, isArray, and isLeaf define three partitions of all possible operators.
+ *
+ * isLogical can have children and its children can be arbitrary operators.
+ *
+ * isArray can have children and its children are predicates over one field.
+ *
+ * isLeaf is a predicate over one field.
*/
- class AtomicMatchExpression : public MatchExpression {
- public:
- AtomicMatchExpression() : MatchExpression( ATOMIC ){}
- virtual bool matches( const MatchableDocument* doc, MatchDetails* details = 0 ) const {
- return true;
- }
+ /**
+ * Is this node a logical operator? All of these inherit from ListOfMatchExpression.
+ * AND, OR, NOT, NOR.
+ */
+ bool isLogical() const {
+ return AND == _matchType || OR == _matchType || NOT == _matchType || NOR == _matchType;
+ }
- virtual bool matchesSingleElement( const BSONElement& e ) const {
- return true;
- }
+ /**
+ * Is this node an array operator? Array operators have multiple clauses but operate on one
+ * field.
+ *
+ * ELEM_MATCH_VALUE, ELEM_MATCH_OBJECT, SIZE (ArrayMatchingMatchExpression)
+ */
+ bool isArray() const {
+ return SIZE == _matchType || ELEM_MATCH_VALUE == _matchType ||
+ ELEM_MATCH_OBJECT == _matchType;
+ }
- virtual MatchExpression* shallowClone() const {
- return new AtomicMatchExpression();
- }
+ /**
+ * Not-internal nodes, predicates over one field. Almost all of these inherit from
+ * LeafMatchExpression.
+ *
+ * Exceptions: WHERE, which doesn't have a field.
+ * TYPE_OPERATOR, which inherits from MatchExpression due to unique array
+ * semantics.
+ */
+ bool isLeaf() const {
+ return !isArray() && !isLogical();
+ }
- virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ // XXX: document
+ virtual MatchExpression* shallowClone() const = 0;
- virtual void toBSON(BSONObjBuilder* out) const;
+ // XXX document
+ virtual bool equivalent(const MatchExpression* other) const = 0;
- virtual bool equivalent( const MatchExpression* other ) const {
- return other->matchType() == ATOMIC;
- }
- };
+ //
+ // Determine if a document satisfies the tree-predicate.
+ //
+
+ virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const = 0;
+
+ virtual bool matchesBSON(const BSONObj& doc, MatchDetails* details = 0) const;
+
+ /**
+ * Determines if the element satisfies the tree-predicate.
+ * Not valid for all expressions (e.g. $where); in those cases, returns false.
+ */
+ virtual bool matchesSingleElement(const BSONElement& e) const = 0;
+
+ //
+ // Tagging mechanism: Hang data off of the tree for retrieval later.
+ //
- class FalseMatchExpression : public MatchExpression {
+ class TagData {
public:
- FalseMatchExpression() : MatchExpression( ALWAYS_FALSE ){}
+ virtual ~TagData() {}
+ virtual void debugString(StringBuilder* builder) const = 0;
+ virtual TagData* clone() const = 0;
+ };
- virtual bool matches( const MatchableDocument* doc, MatchDetails* details = 0 ) const {
- return false;
+ /**
+ * Takes ownership
+ */
+ void setTag(TagData* data) {
+ _tagData.reset(data);
+ }
+ TagData* getTag() const {
+ return _tagData.get();
+ }
+ virtual void resetTag() {
+ setTag(NULL);
+ for (size_t i = 0; i < numChildren(); ++i) {
+ getChild(i)->resetTag();
}
+ }
- virtual bool matchesSingleElement( const BSONElement& e ) const {
- return false;
- }
+ //
+ // Debug information
+ //
+ virtual std::string toString() const;
+ virtual void debugString(StringBuilder& debug, int level = 0) const = 0;
+ virtual void toBSON(BSONObjBuilder* out) const = 0;
- virtual MatchExpression* shallowClone() const {
- return new FalseMatchExpression();
- }
+protected:
+ void _debugAddSpace(StringBuilder& debug, int level) const;
- virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+private:
+ MatchType _matchType;
+ std::unique_ptr<TagData> _tagData;
+};
- virtual void toBSON(BSONObjBuilder* out) const;
+/**
+ * this isn't really an expression, but a hint to other things
+ * not sure where to put it in the end
+ */
+class AtomicMatchExpression : public MatchExpression {
+public:
+ AtomicMatchExpression() : MatchExpression(ATOMIC) {}
- virtual bool equivalent( const MatchExpression* other ) const {
- return other->matchType() == ALWAYS_FALSE;
- }
- };
+ virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const {
+ return true;
+ }
+
+ virtual bool matchesSingleElement(const BSONElement& e) const {
+ return true;
+ }
+
+ virtual MatchExpression* shallowClone() const {
+ return new AtomicMatchExpression();
+ }
+
+ virtual void debugString(StringBuilder& debug, int level = 0) const;
+
+ virtual void toBSON(BSONObjBuilder* out) const;
+
+ virtual bool equivalent(const MatchExpression* other) const {
+ return other->matchType() == ATOMIC;
+ }
+};
+
+class FalseMatchExpression : public MatchExpression {
+public:
+ FalseMatchExpression() : MatchExpression(ALWAYS_FALSE) {}
+
+ virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const {
+ return false;
+ }
+
+ virtual bool matchesSingleElement(const BSONElement& e) const {
+ return false;
+ }
+
+ virtual MatchExpression* shallowClone() const {
+ return new FalseMatchExpression();
+ }
+
+ virtual void debugString(StringBuilder& debug, int level = 0) const;
+
+ virtual void toBSON(BSONObjBuilder* out) const;
+ virtual bool equivalent(const MatchExpression* other) const {
+ return other->matchType() == ALWAYS_FALSE;
+ }
+};
}
diff --git a/src/mongo/db/matcher/expression_algo.cpp b/src/mongo/db/matcher/expression_algo.cpp
index 5e4880a1ae5..a8fb260b7d0 100644
--- a/src/mongo/db/matcher/expression_algo.cpp
+++ b/src/mongo/db/matcher/expression_algo.cpp
@@ -37,8 +37,8 @@
namespace mongo {
namespace {
- bool isComparisonMatchExpression(const MatchExpression* expr) {
- switch (expr->matchType()) {
+bool isComparisonMatchExpression(const MatchExpression* expr) {
+ switch (expr->matchType()) {
case MatchExpression::LT:
case MatchExpression::LTE:
case MatchExpression::EQ:
@@ -47,140 +47,139 @@ namespace {
return true;
default:
return false;
- }
}
+}
- bool supportsEquality(const ComparisonMatchExpression* expr) {
- switch (expr->matchType()) {
+bool supportsEquality(const ComparisonMatchExpression* expr) {
+ switch (expr->matchType()) {
case MatchExpression::LTE:
case MatchExpression::EQ:
case MatchExpression::GTE:
return true;
default:
return false;
- }
}
+}
- /**
- * Returns true if the documents matched by 'lhs' are a subset of the documents matched by
- * 'rhs', i.e. a document matched by 'lhs' must also be matched by 'rhs', and false otherwise.
- */
- bool _isSubsetOf(const ComparisonMatchExpression* lhs, const ComparisonMatchExpression* rhs) {
- // An expression can only match a subset of the documents matched by another if they are
- // comparing the same field.
- if (lhs->path() != rhs->path()) {
- return false;
- }
+/**
+ * Returns true if the documents matched by 'lhs' are a subset of the documents matched by
+ * 'rhs', i.e. a document matched by 'lhs' must also be matched by 'rhs', and false otherwise.
+ */
+bool _isSubsetOf(const ComparisonMatchExpression* lhs, const ComparisonMatchExpression* rhs) {
+ // An expression can only match a subset of the documents matched by another if they are
+ // comparing the same field.
+ if (lhs->path() != rhs->path()) {
+ return false;
+ }
- const BSONElement lhsData = lhs->getData();
- const BSONElement rhsData = rhs->getData();
+ const BSONElement lhsData = lhs->getData();
+ const BSONElement rhsData = rhs->getData();
- if (lhsData.canonicalType() != rhsData.canonicalType()) {
- return false;
- }
+ if (lhsData.canonicalType() != rhsData.canonicalType()) {
+ return false;
+ }
- // Special case the handling for NaN values: NaN compares equal only to itself.
- if (std::isnan(lhsData.numberDouble()) || std::isnan(rhsData.numberDouble())) {
- if (supportsEquality(lhs) && supportsEquality(rhs)) {
- return std::isnan(lhsData.numberDouble()) && std::isnan(rhsData.numberDouble());
- }
- return false;
+ // Special case the handling for NaN values: NaN compares equal only to itself.
+ if (std::isnan(lhsData.numberDouble()) || std::isnan(rhsData.numberDouble())) {
+ if (supportsEquality(lhs) && supportsEquality(rhs)) {
+ return std::isnan(lhsData.numberDouble()) && std::isnan(rhsData.numberDouble());
}
+ return false;
+ }
- int cmp = compareElementValues(lhsData, rhsData);
+ int cmp = compareElementValues(lhsData, rhsData);
- // Check whether the two expressions are equivalent.
- if (lhs->matchType() == rhs->matchType() && cmp == 0) {
- return true;
- }
+ // Check whether the two expressions are equivalent.
+ if (lhs->matchType() == rhs->matchType() && cmp == 0) {
+ return true;
+ }
- switch (rhs->matchType()) {
+ switch (rhs->matchType()) {
case MatchExpression::LT:
case MatchExpression::LTE:
switch (lhs->matchType()) {
- case MatchExpression::LT:
- case MatchExpression::LTE:
- case MatchExpression::EQ:
- if (rhs->matchType() == MatchExpression::LTE) {
- return cmp <= 0;
- }
- return cmp < 0;
- default:
- return false;
+ case MatchExpression::LT:
+ case MatchExpression::LTE:
+ case MatchExpression::EQ:
+ if (rhs->matchType() == MatchExpression::LTE) {
+ return cmp <= 0;
+ }
+ return cmp < 0;
+ default:
+ return false;
}
case MatchExpression::GT:
case MatchExpression::GTE:
switch (lhs->matchType()) {
- case MatchExpression::GT:
- case MatchExpression::GTE:
- case MatchExpression::EQ:
- if (rhs->matchType() == MatchExpression::GTE) {
- return cmp >= 0;
- }
- return cmp > 0;
- default:
- return false;
+ case MatchExpression::GT:
+ case MatchExpression::GTE:
+ case MatchExpression::EQ:
+ if (rhs->matchType() == MatchExpression::GTE) {
+ return cmp >= 0;
+ }
+ return cmp > 0;
+ default:
+ return false;
}
default:
return false;
- }
}
+}
- /**
- * Returns true if the documents matched by 'lhs' are a subset of the documents matched by
- * 'rhs', i.e. a document matched by 'lhs' must also be matched by 'rhs', and false otherwise.
- */
- bool _isSubsetOf(const MatchExpression* lhs, const ComparisonMatchExpression* rhs) {
- // An expression can only match a subset of the documents matched by another if they are
- // comparing the same field.
- if (lhs->path() != rhs->path()) {
- return false;
- }
+/**
+ * Returns true if the documents matched by 'lhs' are a subset of the documents matched by
+ * 'rhs', i.e. a document matched by 'lhs' must also be matched by 'rhs', and false otherwise.
+ */
+bool _isSubsetOf(const MatchExpression* lhs, const ComparisonMatchExpression* rhs) {
+ // An expression can only match a subset of the documents matched by another if they are
+ // comparing the same field.
+ if (lhs->path() != rhs->path()) {
+ return false;
+ }
- if (isComparisonMatchExpression(lhs)) {
- return _isSubsetOf(static_cast<const ComparisonMatchExpression*>(lhs), rhs);
- }
+ if (isComparisonMatchExpression(lhs)) {
+ return _isSubsetOf(static_cast<const ComparisonMatchExpression*>(lhs), rhs);
+ }
- if (lhs->matchType() == MatchExpression::MATCH_IN) {
- const InMatchExpression* ime = static_cast<const InMatchExpression*>(lhs);
- const ArrayFilterEntries& arrayEntries = ime->getData();
- if (arrayEntries.numRegexes() > 0) {
+ if (lhs->matchType() == MatchExpression::MATCH_IN) {
+ const InMatchExpression* ime = static_cast<const InMatchExpression*>(lhs);
+ const ArrayFilterEntries& arrayEntries = ime->getData();
+ if (arrayEntries.numRegexes() > 0) {
+ return false;
+ }
+ for (BSONElement elem : arrayEntries.equalities()) {
+ // Each element in the $in-array represents an equality predicate.
+ EqualityMatchExpression equality;
+ equality.init(lhs->path(), elem);
+ if (!_isSubsetOf(&equality, rhs)) {
return false;
}
- for (BSONElement elem : arrayEntries.equalities()) {
- // Each element in the $in-array represents an equality predicate.
- EqualityMatchExpression equality;
- equality.init(lhs->path(), elem);
- if (!_isSubsetOf(&equality, rhs)) {
- return false;
- }
- }
- return true;
}
- return false;
+ return true;
}
+ return false;
+}
- /**
- * Returns true if the documents matched by 'lhs' are a subset of the documents matched by
- * 'rhs', i.e. a document matched by 'lhs' must also be matched by 'rhs', and false otherwise.
- */
- bool _isSubsetOf(const MatchExpression* lhs, const ExistsMatchExpression* rhs) {
- // An expression can only match a subset of the documents matched by another if they are
- // comparing the same field. Defer checking the path for $not expressions until the
- // subexpression is examined.
- if (lhs->matchType() != MatchExpression::NOT && lhs->path() != rhs->path()) {
- return false;
- }
+/**
+ * Returns true if the documents matched by 'lhs' are a subset of the documents matched by
+ * 'rhs', i.e. a document matched by 'lhs' must also be matched by 'rhs', and false otherwise.
+ */
+bool _isSubsetOf(const MatchExpression* lhs, const ExistsMatchExpression* rhs) {
+ // An expression can only match a subset of the documents matched by another if they are
+ // comparing the same field. Defer checking the path for $not expressions until the
+ // subexpression is examined.
+ if (lhs->matchType() != MatchExpression::NOT && lhs->path() != rhs->path()) {
+ return false;
+ }
- if (isComparisonMatchExpression(lhs)) {
- const ComparisonMatchExpression* cme =
- static_cast<const ComparisonMatchExpression*>(lhs);
- // CompareMatchExpression::init() prohibits creating a match expression with EOO or
- // Undefined types, so only need to ensure that the value is not of type jstNULL.
- return cme->getData().type() != jstNULL;
- }
+ if (isComparisonMatchExpression(lhs)) {
+ const ComparisonMatchExpression* cme = static_cast<const ComparisonMatchExpression*>(lhs);
+ // CompareMatchExpression::init() prohibits creating a match expression with EOO or
+ // Undefined types, so only need to ensure that the value is not of type jstNULL.
+ return cme->getData().type() != jstNULL;
+ }
- switch (lhs->matchType()) {
+ switch (lhs->matchType()) {
case MatchExpression::ELEM_MATCH_VALUE:
case MatchExpression::ELEM_MATCH_OBJECT:
case MatchExpression::EXISTS:
@@ -202,76 +201,76 @@ namespace {
}
switch (lhs->getChild(0)->matchType()) {
- case MatchExpression::EQ: {
- const ComparisonMatchExpression* cme =
- static_cast<const ComparisonMatchExpression*>(lhs->getChild(0));
- return cme->getData().type() == jstNULL;
- }
- case MatchExpression::MATCH_IN: {
- const InMatchExpression* ime =
- static_cast<const InMatchExpression*>(lhs->getChild(0));
- return ime->getData().hasNull();
- }
- default:
- return false;
+ case MatchExpression::EQ: {
+ const ComparisonMatchExpression* cme =
+ static_cast<const ComparisonMatchExpression*>(lhs->getChild(0));
+ return cme->getData().type() == jstNULL;
+ }
+ case MatchExpression::MATCH_IN: {
+ const InMatchExpression* ime =
+ static_cast<const InMatchExpression*>(lhs->getChild(0));
+ return ime->getData().hasNull();
+ }
+ default:
+ return false;
}
default:
return false;
- }
}
+}
} // namespace
namespace expression {
- bool isSubsetOf(const MatchExpression* lhs, const MatchExpression* rhs) {
- invariant(lhs);
- invariant(rhs);
+bool isSubsetOf(const MatchExpression* lhs, const MatchExpression* rhs) {
+ invariant(lhs);
+ invariant(rhs);
- if (lhs->equivalent(rhs)) {
- return true;
- }
+ if (lhs->equivalent(rhs)) {
+ return true;
+ }
- if (rhs->matchType() == MatchExpression::AND) {
- // 'lhs' must match a subset of the documents matched by each clause of 'rhs'.
- for (size_t i = 0; i < rhs->numChildren(); i++) {
- if (!isSubsetOf(lhs, rhs->getChild(i))) {
- return false;
- }
+ if (rhs->matchType() == MatchExpression::AND) {
+ // 'lhs' must match a subset of the documents matched by each clause of 'rhs'.
+ for (size_t i = 0; i < rhs->numChildren(); i++) {
+ if (!isSubsetOf(lhs, rhs->getChild(i))) {
+ return false;
}
- return true;
}
+ return true;
+ }
- if (lhs->matchType() == MatchExpression::AND) {
- // At least one clause of 'lhs' must match a subset of the documents matched by 'rhs'.
- for (size_t i = 0; i < lhs->numChildren(); i++) {
- if (isSubsetOf(lhs->getChild(i), rhs)) {
- return true;
- }
+ if (lhs->matchType() == MatchExpression::AND) {
+ // At least one clause of 'lhs' must match a subset of the documents matched by 'rhs'.
+ for (size_t i = 0; i < lhs->numChildren(); i++) {
+ if (isSubsetOf(lhs->getChild(i), rhs)) {
+ return true;
}
- return false;
}
+ return false;
+ }
- if (lhs->matchType() == MatchExpression::OR) {
- // Every clause of 'lhs' must match a subset of the documents matched by 'rhs'.
- for (size_t i = 0; i < lhs->numChildren(); i++) {
- if (!isSubsetOf(lhs->getChild(i), rhs)) {
- return false;
- }
+ if (lhs->matchType() == MatchExpression::OR) {
+ // Every clause of 'lhs' must match a subset of the documents matched by 'rhs'.
+ for (size_t i = 0; i < lhs->numChildren(); i++) {
+ if (!isSubsetOf(lhs->getChild(i), rhs)) {
+ return false;
}
- return true;
- }
-
- if (isComparisonMatchExpression(rhs)) {
- return _isSubsetOf(lhs, static_cast<const ComparisonMatchExpression*>(rhs));
}
+ return true;
+ }
- if (rhs->matchType() == MatchExpression::EXISTS) {
- return _isSubsetOf(lhs, static_cast<const ExistsMatchExpression*>(rhs));
- }
+ if (isComparisonMatchExpression(rhs)) {
+ return _isSubsetOf(lhs, static_cast<const ComparisonMatchExpression*>(rhs));
+ }
- return false;
+ if (rhs->matchType() == MatchExpression::EXISTS) {
+ return _isSubsetOf(lhs, static_cast<const ExistsMatchExpression*>(rhs));
}
+ return false;
+}
+
} // namespace expression
} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_algo.h b/src/mongo/db/matcher/expression_algo.h
index 0ee4459c56f..baccf557104 100644
--- a/src/mongo/db/matcher/expression_algo.h
+++ b/src/mongo/db/matcher/expression_algo.h
@@ -30,34 +30,34 @@
namespace mongo {
- class MatchExpression;
+class MatchExpression;
namespace expression {
- /**
- * Returns true if the documents matched by 'lhs' are a subset of the documents matched by
- * 'rhs', i.e. a document matched by 'lhs' must also be matched by 'rhs', and false otherwise.
- *
- * With respect to partial indexes, 'lhs' corresponds to the query specification and 'rhs'
- * corresponds to the filter specification.
- *
- * e.g.
- *
- * Suppose that
- *
- * lhs = { x : 4 }
- * rhs = { x : { $lte : 5 } }
- *
- * ==> true
- *
- * Suppose that
- *
- * lhs = { x : { $gte: 6 } }
- * rhs = { x : 7 }
- *
- * ==> false
- */
- bool isSubsetOf(const MatchExpression* lhs, const MatchExpression* rhs);
+/**
+ * Returns true if the documents matched by 'lhs' are a subset of the documents matched by
+ * 'rhs', i.e. a document matched by 'lhs' must also be matched by 'rhs', and false otherwise.
+ *
+ * With respect to partial indexes, 'lhs' corresponds to the query specification and 'rhs'
+ * corresponds to the filter specification.
+ *
+ * e.g.
+ *
+ * Suppose that
+ *
+ * lhs = { x : 4 }
+ * rhs = { x : { $lte : 5 } }
+ *
+ * ==> true
+ *
+ * Suppose that
+ *
+ * lhs = { x : { $gte: 6 } }
+ * rhs = { x : 7 }
+ *
+ * ==> false
+ */
+bool isSubsetOf(const MatchExpression* lhs, const MatchExpression* rhs);
} // namespace expression
} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_algo_test.cpp b/src/mongo/db/matcher/expression_algo_test.cpp
index 63af7a52c96..a108a7a7f3b 100644
--- a/src/mongo/db/matcher/expression_algo_test.cpp
+++ b/src/mongo/db/matcher/expression_algo_test.cpp
@@ -40,591 +40,592 @@
namespace mongo {
- /**
- * A MatchExpression does not hold the memory for BSONElements, so use ParsedMatchExpression to
- * ensure that the BSONObj outlives the MatchExpression.
- */
- class ParsedMatchExpression {
- public:
- ParsedMatchExpression(const std::string& str)
- : _obj(fromjson(str)) {
- StatusWithMatchExpression result = MatchExpressionParser::parse(_obj);
- ASSERT_OK(result.getStatus());
- _expr.reset(result.getValue());
- }
-
- const MatchExpression* get() const { return _expr.get(); }
-
- private:
- const BSONObj _obj;
- std::unique_ptr<MatchExpression> _expr;
- };
-
- TEST(ExpressionAlgoIsSubsetOf, NullAndOmittedField) {
- // Verify that ComparisonMatchExpression::init() prohibits creating a match expression with
- // an Undefined type.
- BSONObj undefined = fromjson("{a: undefined}");
- ASSERT_EQUALS(ErrorCodes::BadValue, MatchExpressionParser::parse(undefined).getStatus());
-
- ParsedMatchExpression empty("{}");
- ParsedMatchExpression null("{a: null}");
-
- ASSERT_TRUE(expression::isSubsetOf(null.get(), empty.get()));
- ASSERT_FALSE(expression::isSubsetOf(empty.get(), null.get()));
-
- ParsedMatchExpression b1("{b: 1}");
- ParsedMatchExpression aNullB1("{a: null, b: 1}");
-
- ASSERT_TRUE(expression::isSubsetOf(aNullB1.get(), b1.get()));
- ASSERT_FALSE(expression::isSubsetOf(b1.get(), aNullB1.get()));
-
- ParsedMatchExpression a1C3("{a: 1, c: 3}");
- ParsedMatchExpression a1BNullC3("{a: 1, b: null, c: 3}");
-
- ASSERT_TRUE(expression::isSubsetOf(a1BNullC3.get(), a1C3.get()));
- ASSERT_FALSE(expression::isSubsetOf(a1C3.get(), a1BNullC3.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, NullAndIn) {
- ParsedMatchExpression eqNull("{x: null}");
- ParsedMatchExpression inNull("{x: {$in: [null]}}");
- ParsedMatchExpression inNullOr2("{x: {$in: [null, 2]}}");
-
- ASSERT_TRUE(expression::isSubsetOf(inNull.get(), eqNull.get()));
- ASSERT_FALSE(expression::isSubsetOf(inNullOr2.get(), eqNull.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, NullAndExists) {
- ParsedMatchExpression null("{x: null}");
- ParsedMatchExpression exists("{x: {$exists: true}}");
- ASSERT_FALSE(expression::isSubsetOf(null.get(), exists.get()));
- ASSERT_FALSE(expression::isSubsetOf(exists.get(), null.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Compare_NaN) {
- ParsedMatchExpression nan("{x: NaN}");
- ParsedMatchExpression lt("{x: {$lt: 5}}");
- ParsedMatchExpression lte("{x: {$lte: 5}}");
- ParsedMatchExpression gte("{x: {$gte: 5}}");
- ParsedMatchExpression gt("{x: {$gt: 5}}");
- ParsedMatchExpression in("{x: {$in: [5]}}");
-
- ASSERT_TRUE(expression::isSubsetOf(nan.get(), nan.get()));
- ASSERT_FALSE(expression::isSubsetOf(nan.get(), lt.get()));
- ASSERT_FALSE(expression::isSubsetOf(lt.get(), nan.get()));
- ASSERT_FALSE(expression::isSubsetOf(nan.get(), lte.get()));
- ASSERT_FALSE(expression::isSubsetOf(lte.get(), nan.get()));
- ASSERT_FALSE(expression::isSubsetOf(nan.get(), gte.get()));
- ASSERT_FALSE(expression::isSubsetOf(gte.get(), nan.get()));
- ASSERT_FALSE(expression::isSubsetOf(nan.get(), gt.get()));
- ASSERT_FALSE(expression::isSubsetOf(gt.get(), nan.get()));
- ASSERT_FALSE(expression::isSubsetOf(nan.get(), in.get()));
- ASSERT_FALSE(expression::isSubsetOf(in.get(), nan.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Compare_EQ) {
- ParsedMatchExpression a5("{a: 5}");
- ParsedMatchExpression a6("{a: 6}");
- ParsedMatchExpression b5("{b: 5}");
-
- ASSERT_TRUE(expression::isSubsetOf(a5.get(), a5.get()));
- ASSERT_FALSE(expression::isSubsetOf(a5.get(), a6.get()));
- ASSERT_FALSE(expression::isSubsetOf(a5.get(), b5.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, CompareAnd_EQ) {
- ParsedMatchExpression a1B2("{a: 1, b: 2}");
- ParsedMatchExpression a1B7("{a: 1, b: 7}");
- ParsedMatchExpression a1("{a: 1}");
- ParsedMatchExpression b2("{b: 2}");
-
- ASSERT_TRUE(expression::isSubsetOf(a1B2.get(), a1B2.get()));
- ASSERT_FALSE(expression::isSubsetOf(a1B2.get(), a1B7.get()));
-
- ASSERT_TRUE(expression::isSubsetOf(a1B2.get(), a1.get()));
- ASSERT_TRUE(expression::isSubsetOf(a1B2.get(), b2.get()));
- ASSERT_FALSE(expression::isSubsetOf(a1B7.get(), b2.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, CompareAnd_GT) {
- ParsedMatchExpression filter("{a: {$gt: 5}, b: {$gt: 6}}");
- ParsedMatchExpression query("{a: {$gt: 5}, b: {$gt: 6}, c: {$gt: 7}}");
-
- ASSERT_TRUE(expression::isSubsetOf(query.get(), filter.get()));
- ASSERT_FALSE(expression::isSubsetOf(filter.get(), query.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, CompareOr_LT) {
- ParsedMatchExpression lt5("{a: {$lt: 5}}");
- ParsedMatchExpression eq2OrEq3("{$or: [{a: 2}, {a: 3}]}");
- ParsedMatchExpression eq4OrEq5("{$or: [{a: 4}, {a: 5}]}");
- ParsedMatchExpression eq4OrEq6("{$or: [{a: 4}, {a: 6}]}");
-
- ASSERT_TRUE(expression::isSubsetOf(eq2OrEq3.get(), lt5.get()));
- ASSERT_FALSE(expression::isSubsetOf(eq4OrEq5.get(), lt5.get()));
- ASSERT_FALSE(expression::isSubsetOf(eq4OrEq6.get(), lt5.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, CompareOr_GTE) {
- ParsedMatchExpression gte5("{a: {$gte: 5}}");
- ParsedMatchExpression eq4OrEq6("{$or: [{a: 4}, {a: 6}]}");
- ParsedMatchExpression eq5OrEq6("{$or: [{a: 5}, {a: 6}]}");
- ParsedMatchExpression eq7OrEq8("{$or: [{a: 7}, {a: 8}]}");
-
- ASSERT_FALSE(expression::isSubsetOf(eq4OrEq6.get(), gte5.get()));
- ASSERT_TRUE(expression::isSubsetOf(eq5OrEq6.get(), gte5.get()));
- ASSERT_TRUE(expression::isSubsetOf(eq7OrEq8.get(), gte5.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, DifferentCanonicalTypes) {
- ParsedMatchExpression number("{x: {$gt: 1}}");
- ParsedMatchExpression string("{x: {$gt: 'a'}}");
- ASSERT_FALSE(expression::isSubsetOf(number.get(), string.get()));
- ASSERT_FALSE(expression::isSubsetOf(string.get(), number.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, DifferentNumberTypes) {
- ParsedMatchExpression numberDouble("{x: 5.0}");
- ParsedMatchExpression numberInt("{x: NumberInt(5)}");
- ParsedMatchExpression numberLong("{x: NumberLong(5)}");
-
- ASSERT_TRUE(expression::isSubsetOf(numberDouble.get(), numberInt.get()));
- ASSERT_TRUE(expression::isSubsetOf(numberDouble.get(), numberLong.get()));
- ASSERT_TRUE(expression::isSubsetOf(numberInt.get(), numberDouble.get()));
- ASSERT_TRUE(expression::isSubsetOf(numberInt.get(), numberLong.get()));
- ASSERT_TRUE(expression::isSubsetOf(numberLong.get(), numberDouble.get()));
- ASSERT_TRUE(expression::isSubsetOf(numberLong.get(), numberInt.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, PointInUnboundedRange) {
- ParsedMatchExpression a4("{a: 4}");
- ParsedMatchExpression a5("{a: 5}");
- ParsedMatchExpression a6("{a: 6}");
- ParsedMatchExpression b5("{b: 5}");
-
- ParsedMatchExpression lt5("{a: {$lt: 5}}");
- ParsedMatchExpression lte5("{a: {$lte: 5}}");
- ParsedMatchExpression gte5("{a: {$gte: 5}}");
- ParsedMatchExpression gt5("{a: {$gt: 5}}");
-
- ASSERT_TRUE(expression::isSubsetOf(a4.get(), lte5.get()));
- ASSERT_TRUE(expression::isSubsetOf(a5.get(), lte5.get()));
- ASSERT_FALSE(expression::isSubsetOf(a6.get(), lte5.get()));
-
- ASSERT_TRUE(expression::isSubsetOf(a4.get(), lt5.get()));
- ASSERT_FALSE(expression::isSubsetOf(a5.get(), lt5.get()));
- ASSERT_FALSE(expression::isSubsetOf(a6.get(), lt5.get()));
-
- ASSERT_FALSE(expression::isSubsetOf(a4.get(), gte5.get()));
- ASSERT_TRUE(expression::isSubsetOf(a5.get(), gte5.get()));
- ASSERT_TRUE(expression::isSubsetOf(a6.get(), gte5.get()));
-
- ASSERT_FALSE(expression::isSubsetOf(a4.get(), gt5.get()));
- ASSERT_FALSE(expression::isSubsetOf(a5.get(), gt5.get()));
- ASSERT_TRUE(expression::isSubsetOf(a6.get(), gt5.get()));
-
- // An unbounded range query does not match a subset of documents of a point query.
- ASSERT_FALSE(expression::isSubsetOf(lt5.get(), a5.get()));
- ASSERT_FALSE(expression::isSubsetOf(lte5.get(), a5.get()));
- ASSERT_FALSE(expression::isSubsetOf(gte5.get(), a5.get()));
- ASSERT_FALSE(expression::isSubsetOf(gt5.get(), a5.get()));
-
- // Cannot be a subset if comparing different field names.
- ASSERT_FALSE(expression::isSubsetOf(b5.get(), lt5.get()));
- ASSERT_FALSE(expression::isSubsetOf(b5.get(), lte5.get()));
- ASSERT_FALSE(expression::isSubsetOf(b5.get(), gte5.get()));
- ASSERT_FALSE(expression::isSubsetOf(b5.get(), gt5.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, PointInBoundedRange) {
- ParsedMatchExpression filter("{a: {$gt: 5, $lt: 10}}");
- ParsedMatchExpression query("{a: 6}");
-
- ASSERT_TRUE(expression::isSubsetOf(query.get(), filter.get()));
- ASSERT_FALSE(expression::isSubsetOf(filter.get(), query.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, PointInBoundedRange_FakeAnd) {
- ParsedMatchExpression filter("{a: {$gt: 5, $lt: 10}}");
- ParsedMatchExpression query("{$and: [{a: 6}, {a: 6}]}");
-
- ASSERT_TRUE(expression::isSubsetOf(query.get(), filter.get()));
- ASSERT_FALSE(expression::isSubsetOf(filter.get(), query.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, MultiplePointsInBoundedRange) {
- ParsedMatchExpression filter("{a: {$gt: 5, $lt: 10}}");
- ParsedMatchExpression queryAllInside("{a: {$in: [6, 7, 8]}}");
- ParsedMatchExpression queryStraddleLower("{a: {$in: [4.9, 5.1]}}");
- ParsedMatchExpression queryStraddleUpper("{a: {$in: [9.9, 10.1]}}");
-
- ASSERT_TRUE(expression::isSubsetOf(queryAllInside.get(), filter.get()));
- ASSERT_FALSE(expression::isSubsetOf(queryStraddleLower.get(), filter.get()));
- ASSERT_FALSE(expression::isSubsetOf(queryStraddleUpper.get(), filter.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, PointInCompoundRange) {
- ParsedMatchExpression filter("{a: {$gt: 5}, b: {$gt: 6}, c: {$gt: 7}}");
- ParsedMatchExpression query("{a: 10, b: 10, c: 10}");
-
- ASSERT_TRUE(expression::isSubsetOf(query.get(), filter.get()));
- ASSERT_FALSE(expression::isSubsetOf(filter.get(), query.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Compare_LT_LTE) {
- ParsedMatchExpression lte4("{x: {$lte: 4}}");
- ParsedMatchExpression lt5("{x: {$lt: 5}}");
- ParsedMatchExpression lte5("{x: {$lte: 5}}");
- ParsedMatchExpression lt6("{x: {$lt: 6}}");
-
- ASSERT_TRUE(expression::isSubsetOf(lte4.get(), lte5.get()));
- ASSERT_TRUE(expression::isSubsetOf(lt5.get(), lte5.get()));
- ASSERT_TRUE(expression::isSubsetOf(lte5.get(), lte5.get()));
- ASSERT_FALSE(expression::isSubsetOf(lt6.get(), lte5.get()));
-
- ASSERT_TRUE(expression::isSubsetOf(lte4.get(), lt5.get()));
- ASSERT_TRUE(expression::isSubsetOf(lt5.get(), lt5.get()));
- ASSERT_FALSE(expression::isSubsetOf(lte5.get(), lt5.get()));
- ASSERT_FALSE(expression::isSubsetOf(lt6.get(), lt5.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Compare_GT_GTE) {
- ParsedMatchExpression gte6("{x: {$gte: 6}}");
- ParsedMatchExpression gt5("{x: {$gt: 5}}");
- ParsedMatchExpression gte5("{x: {$gte: 5}}");
- ParsedMatchExpression gt4("{x: {$gt: 4}}");
-
- ASSERT_TRUE(expression::isSubsetOf(gte6.get(), gte5.get()));
- ASSERT_TRUE(expression::isSubsetOf(gt5.get(), gte5.get()));
- ASSERT_TRUE(expression::isSubsetOf(gte5.get(), gte5.get()));
- ASSERT_FALSE(expression::isSubsetOf(gt4.get(), gte5.get()));
-
- ASSERT_TRUE(expression::isSubsetOf(gte6.get(), gt5.get()));
- ASSERT_TRUE(expression::isSubsetOf(gt5.get(), gt5.get()));
- ASSERT_FALSE(expression::isSubsetOf(gte5.get(), gt5.get()));
- ASSERT_FALSE(expression::isSubsetOf(gt4.get(), gt5.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, BoundedRangeInUnboundedRange) {
- ParsedMatchExpression filter("{a: {$gt: 1}}");
- ParsedMatchExpression query("{a: {$gt: 5, $lt: 10}}");
-
- ASSERT_TRUE(expression::isSubsetOf(query.get(), filter.get()));
- ASSERT_FALSE(expression::isSubsetOf(filter.get(), query.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, MultipleRangesInUnboundedRange) {
- ParsedMatchExpression filter("{a: {$gt: 1}}");
- ParsedMatchExpression negative("{$or: [{a: {$gt: 5, $lt: 10}}, {a: {$lt: 0}}]}");
- ParsedMatchExpression unbounded("{$or: [{a: {$gt: 5, $lt: 10}}, {a: {$gt: 15}}]}");
- ParsedMatchExpression bounded("{$or: [{a: {$gt: 5, $lt: 10}}, {a: {$gt: 20, $lt: 30}}]}");
-
- ASSERT_FALSE(expression::isSubsetOf(negative.get(), filter.get()));
- ASSERT_TRUE(expression::isSubsetOf(unbounded.get(), filter.get()));
- ASSERT_TRUE(expression::isSubsetOf(bounded.get(), filter.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, MultipleFields) {
- ParsedMatchExpression filter("{a: {$gt: 5}, b: {$lt: 10}}");
- ParsedMatchExpression onlyA("{$or: [{a: 6, b: {$lt: 4}}, {a: {$gt: 11}}]}");
- ParsedMatchExpression onlyB("{$or: [{b: {$lt: 4}}, {a: {$gt: 11}, b: 9}]}");
- ParsedMatchExpression both("{$or: [{a: 6, b: {$lt: 4}}, {a: {$gt: 11}, b: 9}]}");
-
- ASSERT_FALSE(expression::isSubsetOf(onlyA.get(), filter.get()));
- ASSERT_FALSE(expression::isSubsetOf(onlyB.get(), filter.get()));
- ASSERT_TRUE(expression::isSubsetOf(both.get(), filter.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Compare_LT_In) {
- ParsedMatchExpression lt("{a: {$lt: 5}}");
-
- ParsedMatchExpression inLt("{a: {$in: [4.9]}}");
- ParsedMatchExpression inEq("{a: {$in: [5]}}");
- ParsedMatchExpression inGt("{a: {$in: [5.1]}}");
- ParsedMatchExpression inNull("{a: {$in: [null]}}");
-
- ParsedMatchExpression inAllEq("{a: {$in: [5, 5.0]}}");
- ParsedMatchExpression inAllLte("{a: {$in: [4.9, 5]}}");
- ParsedMatchExpression inAllLt("{a: {$in: [2, 3, 4]}}");
- ParsedMatchExpression inStraddle("{a: {$in: [4, 6]}}");
- ParsedMatchExpression inLtAndNull("{a: {$in: [1, null]}}");
-
- ASSERT_TRUE(expression::isSubsetOf(inLt.get(), lt.get()));
- ASSERT_FALSE(expression::isSubsetOf(inEq.get(), lt.get()));
- ASSERT_FALSE(expression::isSubsetOf(inGt.get(), lt.get()));
-
- ASSERT_FALSE(expression::isSubsetOf(inAllEq.get(), lt.get()));
- ASSERT_FALSE(expression::isSubsetOf(inAllLte.get(), lt.get()));
- ASSERT_TRUE(expression::isSubsetOf(inAllLt.get(), lt.get()));
- ASSERT_FALSE(expression::isSubsetOf(inStraddle.get(), lt.get()));
- ASSERT_FALSE(expression::isSubsetOf(inLtAndNull.get(), lt.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Compare_LTE_In) {
- ParsedMatchExpression lte("{a: {$lte: 5}}");
-
- ParsedMatchExpression inLt("{a: {$in: [4.9]}}");
- ParsedMatchExpression inEq("{a: {$in: [5]}}");
- ParsedMatchExpression inGt("{a: {$in: [5.1]}}");
- ParsedMatchExpression inNull("{a: {$in: [null]}}");
-
- ParsedMatchExpression inAllEq("{a: {$in: [5, 5.0]}}");
- ParsedMatchExpression inAllLte("{a: {$in: [4.9, 5]}}");
- ParsedMatchExpression inAllLt("{a: {$in: [2, 3, 4]}}");
- ParsedMatchExpression inStraddle("{a: {$in: [4, 6]}}");
- ParsedMatchExpression inLtAndNull("{a: {$in: [1, null]}}");
-
- ASSERT_TRUE(expression::isSubsetOf(inLt.get(), lte.get()));
- ASSERT_TRUE(expression::isSubsetOf(inEq.get(), lte.get()));
- ASSERT_FALSE(expression::isSubsetOf(inGt.get(), lte.get()));
-
- ASSERT_TRUE(expression::isSubsetOf(inAllEq.get(), lte.get()));
- ASSERT_TRUE(expression::isSubsetOf(inAllLte.get(), lte.get()));
- ASSERT_TRUE(expression::isSubsetOf(inAllLt.get(), lte.get()));
- ASSERT_FALSE(expression::isSubsetOf(inStraddle.get(), lte.get()));
- ASSERT_FALSE(expression::isSubsetOf(inLtAndNull.get(), lte.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Compare_EQ_In) {
- ParsedMatchExpression eq("{a: 5}");
-
- ParsedMatchExpression inLt("{a: {$in: [4.9]}}");
- ParsedMatchExpression inEq("{a: {$in: [5]}}");
- ParsedMatchExpression inGt("{a: {$in: [5.1]}}");
- ParsedMatchExpression inNull("{a: {$in: [null]}}");
-
- ParsedMatchExpression inAllEq("{a: {$in: [5, 5.0]}}");
- ParsedMatchExpression inStraddle("{a: {$in: [4, 6]}}");
- ParsedMatchExpression inEqAndNull("{a: {$in: [5, null]}}");
-
- ASSERT_FALSE(expression::isSubsetOf(inLt.get(), eq.get()));
- ASSERT_TRUE(expression::isSubsetOf(inEq.get(), eq.get()));
- ASSERT_FALSE(expression::isSubsetOf(inGt.get(), eq.get()));
-
- ASSERT_TRUE(expression::isSubsetOf(inAllEq.get(), eq.get()));
- ASSERT_FALSE(expression::isSubsetOf(inStraddle.get(), eq.get()));
- ASSERT_FALSE(expression::isSubsetOf(inEqAndNull.get(), eq.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Compare_GT_In) {
- ParsedMatchExpression gt("{a: {$gt: 5}}");
-
- ParsedMatchExpression inLt("{a: {$in: [4.9]}}");
- ParsedMatchExpression inEq("{a: {$in: [5]}}");
- ParsedMatchExpression inGt("{a: {$in: [5.1]}}");
- ParsedMatchExpression inNull("{a: {$in: [null]}}");
-
- ParsedMatchExpression inAllEq("{a: {$in: [5, 5.0]}}");
- ParsedMatchExpression inAllGte("{a: {$in: [5, 5.1]}}");
- ParsedMatchExpression inAllGt("{a: {$in: [6, 7, 8]}}");
- ParsedMatchExpression inStraddle("{a: {$in: [4, 6]}}");
- ParsedMatchExpression inGtAndNull("{a: {$in: [9, null]}}");
-
- ASSERT_FALSE(expression::isSubsetOf(inLt.get(), gt.get()));
- ASSERT_FALSE(expression::isSubsetOf(inEq.get(), gt.get()));
- ASSERT_TRUE(expression::isSubsetOf(inGt.get(), gt.get()));
-
- ASSERT_FALSE(expression::isSubsetOf(inAllEq.get(), gt.get()));
- ASSERT_FALSE(expression::isSubsetOf(inAllGte.get(), gt.get()));
- ASSERT_TRUE(expression::isSubsetOf(inAllGt.get(), gt.get()));
- ASSERT_FALSE(expression::isSubsetOf(inStraddle.get(), gt.get()));
- ASSERT_FALSE(expression::isSubsetOf(inGtAndNull.get(), gt.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Compare_GTE_In) {
- ParsedMatchExpression gte("{a: {$gte: 5}}");
-
- ParsedMatchExpression inLt("{a: {$in: [4.9]}}");
- ParsedMatchExpression inEq("{a: {$in: [5]}}");
- ParsedMatchExpression inGt("{a: {$in: [5.1]}}");
- ParsedMatchExpression inNull("{a: {$in: [null]}}");
-
- ParsedMatchExpression inAllEq("{a: {$in: [5, 5.0]}}");
- ParsedMatchExpression inAllGte("{a: {$in: [5, 5.1]}}");
- ParsedMatchExpression inAllGt("{a: {$in: [6, 7, 8]}}");
- ParsedMatchExpression inStraddle("{a: {$in: [4, 6]}}");
- ParsedMatchExpression inGtAndNull("{a: {$in: [9, null]}}");
-
- ASSERT_FALSE(expression::isSubsetOf(inLt.get(), gte.get()));
- ASSERT_TRUE(expression::isSubsetOf(inEq.get(), gte.get()));
- ASSERT_TRUE(expression::isSubsetOf(inGt.get(), gte.get()));
-
- ASSERT_TRUE(expression::isSubsetOf(inAllEq.get(), gte.get()));
- ASSERT_TRUE(expression::isSubsetOf(inAllGte.get(), gte.get()));
- ASSERT_TRUE(expression::isSubsetOf(inAllGt.get(), gte.get()));
- ASSERT_FALSE(expression::isSubsetOf(inStraddle.get(), gte.get()));
- ASSERT_FALSE(expression::isSubsetOf(inGtAndNull.get(), gte.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, RegexAndIn) {
- ParsedMatchExpression eq1("{x: 1}");
- ParsedMatchExpression eqA("{x: 'a'}");
- ParsedMatchExpression inRegexA("{x: {$in: [/a/]}}");
- ParsedMatchExpression inRegexAbc("{x: {$in: [/abc/]}}");
- ParsedMatchExpression inRegexAOrEq1("{x: {$in: [/a/, 1]}}");
- ParsedMatchExpression inRegexAOrNull("{x: {$in: [/a/, null]}}");
-
- ASSERT_TRUE(expression::isSubsetOf(inRegexA.get(), inRegexA.get()));
- ASSERT_FALSE(expression::isSubsetOf(inRegexAbc.get(), inRegexA.get()));
- ASSERT_FALSE(expression::isSubsetOf(inRegexA.get(), inRegexAOrEq1.get()));
- ASSERT_FALSE(expression::isSubsetOf(inRegexAOrEq1.get(), eq1.get()));
- ASSERT_FALSE(expression::isSubsetOf(inRegexA.get(), eqA.get()));
- ASSERT_FALSE(expression::isSubsetOf(inRegexAOrNull.get(), eqA.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Exists) {
- ParsedMatchExpression aExists("{a: {$exists: true}}");
- ParsedMatchExpression bExists("{b: {$exists: true}}");
- ParsedMatchExpression aExistsBExists("{a: {$exists: true}, b: {$exists: true}}");
- ParsedMatchExpression aExistsBExistsC5("{a: {$exists: true}, b: {$exists: true}, c: 5}");
-
- ASSERT_TRUE(expression::isSubsetOf(aExists.get(), aExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(aExists.get(), bExists.get()));
-
- ASSERT_TRUE(expression::isSubsetOf(aExistsBExists.get(), aExists.get()));
- ASSERT_TRUE(expression::isSubsetOf(aExistsBExists.get(), bExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(aExistsBExists.get(), aExistsBExistsC5.get()));
-
- ASSERT_TRUE(expression::isSubsetOf(aExistsBExistsC5.get(), aExists.get()));
- ASSERT_TRUE(expression::isSubsetOf(aExistsBExistsC5.get(), bExists.get()));
- ASSERT_TRUE(expression::isSubsetOf(aExistsBExistsC5.get(), aExistsBExists.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Compare_Exists) {
- ParsedMatchExpression exists("{a: {$exists: true}}");
- ParsedMatchExpression eq("{a: 1}");
- ParsedMatchExpression gt("{a: {$gt: 4}}");
- ParsedMatchExpression lte("{a: {$lte: 7}}");
-
- ASSERT_TRUE(expression::isSubsetOf(eq.get(), exists.get()));
- ASSERT_TRUE(expression::isSubsetOf(gt.get(), exists.get()));
- ASSERT_TRUE(expression::isSubsetOf(lte.get(), exists.get()));
-
- ASSERT_FALSE(expression::isSubsetOf(exists.get(), eq.get()));
- ASSERT_FALSE(expression::isSubsetOf(exists.get(), gt.get()));
- ASSERT_FALSE(expression::isSubsetOf(exists.get(), lte.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Type) {
- ParsedMatchExpression aType1("{a: {$type: 1}}");
- ParsedMatchExpression aType2("{a: {$type: 2}}");
- ParsedMatchExpression bType2("{b: {$type: 2}}");
-
- ASSERT_FALSE(expression::isSubsetOf(aType1.get(), aType2.get()));
- ASSERT_FALSE(expression::isSubsetOf(aType2.get(), aType1.get()));
-
- ASSERT_TRUE(expression::isSubsetOf(aType2.get(), aType2.get()));
- ASSERT_FALSE(expression::isSubsetOf(aType2.get(), bType2.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, TypeAndExists) {
- ParsedMatchExpression aExists("{a: {$exists: true}}");
- ParsedMatchExpression aType2("{a: {$type: 2}}");
- ParsedMatchExpression bType2("{b: {$type: 2}}");
-
- ASSERT_TRUE(expression::isSubsetOf(aType2.get(), aExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(aExists.get(), aType2.get()));
- ASSERT_FALSE(expression::isSubsetOf(bType2.get(), aExists.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, AllAndExists) {
- ParsedMatchExpression aExists("{a: {$exists: true}}");
- ParsedMatchExpression aAll("{a: {$all: ['x', 'y', 'z']}}");
- ParsedMatchExpression bAll("{b: {$all: ['x', 'y', 'z']}}");
- ParsedMatchExpression aAllWithNull("{a: {$all: ['x', null, 'z']}}");
-
- ASSERT_TRUE(expression::isSubsetOf(aAll.get(), aExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(bAll.get(), aExists.get()));
- ASSERT_TRUE(expression::isSubsetOf(aAllWithNull.get(), aExists.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, ElemMatchAndExists_Value) {
- ParsedMatchExpression aExists("{a: {$exists: true}}");
- ParsedMatchExpression aElemMatch("{a: {$elemMatch: {$gt: 5, $lte: 10}}}");
- ParsedMatchExpression bElemMatch("{b: {$elemMatch: {$gt: 5, $lte: 10}}}");
- ParsedMatchExpression aElemMatchNull("{a: {$elemMatch: {$eq: null}}}");
-
- ASSERT_TRUE(expression::isSubsetOf(aElemMatch.get(), aExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(aExists.get(), aElemMatch.get()));
- ASSERT_FALSE(expression::isSubsetOf(bElemMatch.get(), aExists.get()));
- ASSERT_TRUE(expression::isSubsetOf(aElemMatchNull.get(), aExists.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, ElemMatchAndExists_Object) {
- ParsedMatchExpression aExists("{a: {$exists: true}}");
- ParsedMatchExpression aElemMatch("{a: {$elemMatch: {x: {$gt: 5}, y: {$lte: 10}}}}");
- ParsedMatchExpression bElemMatch("{b: {$elemMatch: {x: {$gt: 5}, y: {$lte: 10}}}}");
- ParsedMatchExpression aElemMatchNull("{a: {$elemMatch: {x: null, y: null}}}");
-
- ASSERT_TRUE(expression::isSubsetOf(aElemMatch.get(), aExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(aExists.get(), aElemMatch.get()));
- ASSERT_FALSE(expression::isSubsetOf(bElemMatch.get(), aExists.get()));
- ASSERT_TRUE(expression::isSubsetOf(aElemMatchNull.get(), aExists.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, SizeAndExists) {
- ParsedMatchExpression aExists("{a: {$exists: true}}");
- ParsedMatchExpression aSize0("{a: {$size: 0}}");
- ParsedMatchExpression aSize1("{a: {$size: 1}}");
- ParsedMatchExpression aSize3("{a: {$size: 3}}");
- ParsedMatchExpression bSize3("{b: {$size: 3}}");
-
- ASSERT_TRUE(expression::isSubsetOf(aSize0.get(), aExists.get()));
- ASSERT_TRUE(expression::isSubsetOf(aSize1.get(), aExists.get()));
- ASSERT_TRUE(expression::isSubsetOf(aSize3.get(), aExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(aExists.get(), aSize3.get()));
- ASSERT_FALSE(expression::isSubsetOf(bSize3.get(), aExists.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, ModAndExists) {
- ParsedMatchExpression aExists("{a: {$exists: true}}");
- ParsedMatchExpression aMod5("{a: {$mod: [5, 0]}}");
- ParsedMatchExpression bMod5("{b: {$mod: [5, 0]}}");
-
- ASSERT_TRUE(expression::isSubsetOf(aMod5.get(), aExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(bMod5.get(), aExists.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, RegexAndExists) {
- ParsedMatchExpression aExists("{a: {$exists: true}}");
- ParsedMatchExpression aRegex("{a: {$regex: 'pattern'}}");
- ParsedMatchExpression bRegex("{b: {$regex: 'pattern'}}");
-
- ASSERT_TRUE(expression::isSubsetOf(aRegex.get(), aExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(bRegex.get(), aExists.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, InAndExists) {
- ParsedMatchExpression aExists("{a: {$exists: true}}");
- ParsedMatchExpression aIn("{a: {$in: [1, 2, 3]}}");
- ParsedMatchExpression bIn("{b: {$in: [1, 2, 3]}}");
- ParsedMatchExpression aInWithNull("{a: {$in: [1, null, 3]}}");
-
- ASSERT_TRUE(expression::isSubsetOf(aIn.get(), aExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(bIn.get(), aExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(aInWithNull.get(), aExists.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, NinAndExists) {
- ParsedMatchExpression aExists("{a: {$exists: true}}");
- ParsedMatchExpression aNin("{a: {$nin: [1, 2, 3]}}");
- ParsedMatchExpression bNin("{b: {$nin: [1, 2, 3]}}");
- ParsedMatchExpression aNinWithNull("{a: {$nin: [1, null, 3]}}");
-
- ASSERT_FALSE(expression::isSubsetOf(aNin.get(), aExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(bNin.get(), aExists.get()));
- ASSERT_TRUE(expression::isSubsetOf(aNinWithNull.get(), aExists.get()));
- }
-
- TEST(ExpressionAlgoIsSubsetOf, Compare_Exists_NE) {
- ParsedMatchExpression aExists("{a: {$exists: true}}");
- ParsedMatchExpression aNotEqual1("{a: {$ne: 1}}");
- ParsedMatchExpression bNotEqual1("{b: {$ne: 1}}");
- ParsedMatchExpression aNotEqualNull("{a: {$ne: null}}");
-
- ASSERT_FALSE(expression::isSubsetOf(aNotEqual1.get(), aExists.get()));
- ASSERT_FALSE(expression::isSubsetOf(bNotEqual1.get(), aExists.get()));
- ASSERT_TRUE(expression::isSubsetOf(aNotEqualNull.get(), aExists.get()));
- }
+/**
+ * A MatchExpression does not hold the memory for BSONElements, so use ParsedMatchExpression to
+ * ensure that the BSONObj outlives the MatchExpression.
+ */
+class ParsedMatchExpression {
+public:
+ ParsedMatchExpression(const std::string& str) : _obj(fromjson(str)) {
+ StatusWithMatchExpression result = MatchExpressionParser::parse(_obj);
+ ASSERT_OK(result.getStatus());
+ _expr.reset(result.getValue());
+ }
+
+ const MatchExpression* get() const {
+ return _expr.get();
+ }
+
+private:
+ const BSONObj _obj;
+ std::unique_ptr<MatchExpression> _expr;
+};
+
+TEST(ExpressionAlgoIsSubsetOf, NullAndOmittedField) {
+ // Verify that ComparisonMatchExpression::init() prohibits creating a match expression with
+ // an Undefined type.
+ BSONObj undefined = fromjson("{a: undefined}");
+ ASSERT_EQUALS(ErrorCodes::BadValue, MatchExpressionParser::parse(undefined).getStatus());
+
+ ParsedMatchExpression empty("{}");
+ ParsedMatchExpression null("{a: null}");
+
+ ASSERT_TRUE(expression::isSubsetOf(null.get(), empty.get()));
+ ASSERT_FALSE(expression::isSubsetOf(empty.get(), null.get()));
+
+ ParsedMatchExpression b1("{b: 1}");
+ ParsedMatchExpression aNullB1("{a: null, b: 1}");
+
+ ASSERT_TRUE(expression::isSubsetOf(aNullB1.get(), b1.get()));
+ ASSERT_FALSE(expression::isSubsetOf(b1.get(), aNullB1.get()));
+
+ ParsedMatchExpression a1C3("{a: 1, c: 3}");
+ ParsedMatchExpression a1BNullC3("{a: 1, b: null, c: 3}");
+
+ ASSERT_TRUE(expression::isSubsetOf(a1BNullC3.get(), a1C3.get()));
+ ASSERT_FALSE(expression::isSubsetOf(a1C3.get(), a1BNullC3.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, NullAndIn) {
+ ParsedMatchExpression eqNull("{x: null}");
+ ParsedMatchExpression inNull("{x: {$in: [null]}}");
+ ParsedMatchExpression inNullOr2("{x: {$in: [null, 2]}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(inNull.get(), eqNull.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inNullOr2.get(), eqNull.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, NullAndExists) {
+ ParsedMatchExpression null("{x: null}");
+ ParsedMatchExpression exists("{x: {$exists: true}}");
+ ASSERT_FALSE(expression::isSubsetOf(null.get(), exists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(exists.get(), null.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Compare_NaN) {
+ ParsedMatchExpression nan("{x: NaN}");
+ ParsedMatchExpression lt("{x: {$lt: 5}}");
+ ParsedMatchExpression lte("{x: {$lte: 5}}");
+ ParsedMatchExpression gte("{x: {$gte: 5}}");
+ ParsedMatchExpression gt("{x: {$gt: 5}}");
+ ParsedMatchExpression in("{x: {$in: [5]}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(nan.get(), nan.get()));
+ ASSERT_FALSE(expression::isSubsetOf(nan.get(), lt.get()));
+ ASSERT_FALSE(expression::isSubsetOf(lt.get(), nan.get()));
+ ASSERT_FALSE(expression::isSubsetOf(nan.get(), lte.get()));
+ ASSERT_FALSE(expression::isSubsetOf(lte.get(), nan.get()));
+ ASSERT_FALSE(expression::isSubsetOf(nan.get(), gte.get()));
+ ASSERT_FALSE(expression::isSubsetOf(gte.get(), nan.get()));
+ ASSERT_FALSE(expression::isSubsetOf(nan.get(), gt.get()));
+ ASSERT_FALSE(expression::isSubsetOf(gt.get(), nan.get()));
+ ASSERT_FALSE(expression::isSubsetOf(nan.get(), in.get()));
+ ASSERT_FALSE(expression::isSubsetOf(in.get(), nan.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Compare_EQ) {
+ ParsedMatchExpression a5("{a: 5}");
+ ParsedMatchExpression a6("{a: 6}");
+ ParsedMatchExpression b5("{b: 5}");
+
+ ASSERT_TRUE(expression::isSubsetOf(a5.get(), a5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(a5.get(), a6.get()));
+ ASSERT_FALSE(expression::isSubsetOf(a5.get(), b5.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, CompareAnd_EQ) {
+ ParsedMatchExpression a1B2("{a: 1, b: 2}");
+ ParsedMatchExpression a1B7("{a: 1, b: 7}");
+ ParsedMatchExpression a1("{a: 1}");
+ ParsedMatchExpression b2("{b: 2}");
+
+ ASSERT_TRUE(expression::isSubsetOf(a1B2.get(), a1B2.get()));
+ ASSERT_FALSE(expression::isSubsetOf(a1B2.get(), a1B7.get()));
+
+ ASSERT_TRUE(expression::isSubsetOf(a1B2.get(), a1.get()));
+ ASSERT_TRUE(expression::isSubsetOf(a1B2.get(), b2.get()));
+ ASSERT_FALSE(expression::isSubsetOf(a1B7.get(), b2.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, CompareAnd_GT) {
+ ParsedMatchExpression filter("{a: {$gt: 5}, b: {$gt: 6}}");
+ ParsedMatchExpression query("{a: {$gt: 5}, b: {$gt: 6}, c: {$gt: 7}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(query.get(), filter.get()));
+ ASSERT_FALSE(expression::isSubsetOf(filter.get(), query.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, CompareOr_LT) {
+ ParsedMatchExpression lt5("{a: {$lt: 5}}");
+ ParsedMatchExpression eq2OrEq3("{$or: [{a: 2}, {a: 3}]}");
+ ParsedMatchExpression eq4OrEq5("{$or: [{a: 4}, {a: 5}]}");
+ ParsedMatchExpression eq4OrEq6("{$or: [{a: 4}, {a: 6}]}");
+
+ ASSERT_TRUE(expression::isSubsetOf(eq2OrEq3.get(), lt5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(eq4OrEq5.get(), lt5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(eq4OrEq6.get(), lt5.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, CompareOr_GTE) {
+ ParsedMatchExpression gte5("{a: {$gte: 5}}");
+ ParsedMatchExpression eq4OrEq6("{$or: [{a: 4}, {a: 6}]}");
+ ParsedMatchExpression eq5OrEq6("{$or: [{a: 5}, {a: 6}]}");
+ ParsedMatchExpression eq7OrEq8("{$or: [{a: 7}, {a: 8}]}");
+
+ ASSERT_FALSE(expression::isSubsetOf(eq4OrEq6.get(), gte5.get()));
+ ASSERT_TRUE(expression::isSubsetOf(eq5OrEq6.get(), gte5.get()));
+ ASSERT_TRUE(expression::isSubsetOf(eq7OrEq8.get(), gte5.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, DifferentCanonicalTypes) {
+ ParsedMatchExpression number("{x: {$gt: 1}}");
+ ParsedMatchExpression string("{x: {$gt: 'a'}}");
+ ASSERT_FALSE(expression::isSubsetOf(number.get(), string.get()));
+ ASSERT_FALSE(expression::isSubsetOf(string.get(), number.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, DifferentNumberTypes) {
+ ParsedMatchExpression numberDouble("{x: 5.0}");
+ ParsedMatchExpression numberInt("{x: NumberInt(5)}");
+ ParsedMatchExpression numberLong("{x: NumberLong(5)}");
+
+ ASSERT_TRUE(expression::isSubsetOf(numberDouble.get(), numberInt.get()));
+ ASSERT_TRUE(expression::isSubsetOf(numberDouble.get(), numberLong.get()));
+ ASSERT_TRUE(expression::isSubsetOf(numberInt.get(), numberDouble.get()));
+ ASSERT_TRUE(expression::isSubsetOf(numberInt.get(), numberLong.get()));
+ ASSERT_TRUE(expression::isSubsetOf(numberLong.get(), numberDouble.get()));
+ ASSERT_TRUE(expression::isSubsetOf(numberLong.get(), numberInt.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, PointInUnboundedRange) {
+ ParsedMatchExpression a4("{a: 4}");
+ ParsedMatchExpression a5("{a: 5}");
+ ParsedMatchExpression a6("{a: 6}");
+ ParsedMatchExpression b5("{b: 5}");
+
+ ParsedMatchExpression lt5("{a: {$lt: 5}}");
+ ParsedMatchExpression lte5("{a: {$lte: 5}}");
+ ParsedMatchExpression gte5("{a: {$gte: 5}}");
+ ParsedMatchExpression gt5("{a: {$gt: 5}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(a4.get(), lte5.get()));
+ ASSERT_TRUE(expression::isSubsetOf(a5.get(), lte5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(a6.get(), lte5.get()));
+
+ ASSERT_TRUE(expression::isSubsetOf(a4.get(), lt5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(a5.get(), lt5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(a6.get(), lt5.get()));
+
+ ASSERT_FALSE(expression::isSubsetOf(a4.get(), gte5.get()));
+ ASSERT_TRUE(expression::isSubsetOf(a5.get(), gte5.get()));
+ ASSERT_TRUE(expression::isSubsetOf(a6.get(), gte5.get()));
+
+ ASSERT_FALSE(expression::isSubsetOf(a4.get(), gt5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(a5.get(), gt5.get()));
+ ASSERT_TRUE(expression::isSubsetOf(a6.get(), gt5.get()));
+
+ // An unbounded range query does not match a subset of documents of a point query.
+ ASSERT_FALSE(expression::isSubsetOf(lt5.get(), a5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(lte5.get(), a5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(gte5.get(), a5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(gt5.get(), a5.get()));
+
+ // Cannot be a subset if comparing different field names.
+ ASSERT_FALSE(expression::isSubsetOf(b5.get(), lt5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(b5.get(), lte5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(b5.get(), gte5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(b5.get(), gt5.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, PointInBoundedRange) {
+ ParsedMatchExpression filter("{a: {$gt: 5, $lt: 10}}");
+ ParsedMatchExpression query("{a: 6}");
+
+ ASSERT_TRUE(expression::isSubsetOf(query.get(), filter.get()));
+ ASSERT_FALSE(expression::isSubsetOf(filter.get(), query.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, PointInBoundedRange_FakeAnd) {
+ ParsedMatchExpression filter("{a: {$gt: 5, $lt: 10}}");
+ ParsedMatchExpression query("{$and: [{a: 6}, {a: 6}]}");
+
+ ASSERT_TRUE(expression::isSubsetOf(query.get(), filter.get()));
+ ASSERT_FALSE(expression::isSubsetOf(filter.get(), query.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, MultiplePointsInBoundedRange) {
+ ParsedMatchExpression filter("{a: {$gt: 5, $lt: 10}}");
+ ParsedMatchExpression queryAllInside("{a: {$in: [6, 7, 8]}}");
+ ParsedMatchExpression queryStraddleLower("{a: {$in: [4.9, 5.1]}}");
+ ParsedMatchExpression queryStraddleUpper("{a: {$in: [9.9, 10.1]}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(queryAllInside.get(), filter.get()));
+ ASSERT_FALSE(expression::isSubsetOf(queryStraddleLower.get(), filter.get()));
+ ASSERT_FALSE(expression::isSubsetOf(queryStraddleUpper.get(), filter.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, PointInCompoundRange) {
+ ParsedMatchExpression filter("{a: {$gt: 5}, b: {$gt: 6}, c: {$gt: 7}}");
+ ParsedMatchExpression query("{a: 10, b: 10, c: 10}");
+
+ ASSERT_TRUE(expression::isSubsetOf(query.get(), filter.get()));
+ ASSERT_FALSE(expression::isSubsetOf(filter.get(), query.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Compare_LT_LTE) {
+ ParsedMatchExpression lte4("{x: {$lte: 4}}");
+ ParsedMatchExpression lt5("{x: {$lt: 5}}");
+ ParsedMatchExpression lte5("{x: {$lte: 5}}");
+ ParsedMatchExpression lt6("{x: {$lt: 6}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(lte4.get(), lte5.get()));
+ ASSERT_TRUE(expression::isSubsetOf(lt5.get(), lte5.get()));
+ ASSERT_TRUE(expression::isSubsetOf(lte5.get(), lte5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(lt6.get(), lte5.get()));
+
+ ASSERT_TRUE(expression::isSubsetOf(lte4.get(), lt5.get()));
+ ASSERT_TRUE(expression::isSubsetOf(lt5.get(), lt5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(lte5.get(), lt5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(lt6.get(), lt5.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Compare_GT_GTE) {
+ ParsedMatchExpression gte6("{x: {$gte: 6}}");
+ ParsedMatchExpression gt5("{x: {$gt: 5}}");
+ ParsedMatchExpression gte5("{x: {$gte: 5}}");
+ ParsedMatchExpression gt4("{x: {$gt: 4}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(gte6.get(), gte5.get()));
+ ASSERT_TRUE(expression::isSubsetOf(gt5.get(), gte5.get()));
+ ASSERT_TRUE(expression::isSubsetOf(gte5.get(), gte5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(gt4.get(), gte5.get()));
+
+ ASSERT_TRUE(expression::isSubsetOf(gte6.get(), gt5.get()));
+ ASSERT_TRUE(expression::isSubsetOf(gt5.get(), gt5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(gte5.get(), gt5.get()));
+ ASSERT_FALSE(expression::isSubsetOf(gt4.get(), gt5.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, BoundedRangeInUnboundedRange) {
+ ParsedMatchExpression filter("{a: {$gt: 1}}");
+ ParsedMatchExpression query("{a: {$gt: 5, $lt: 10}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(query.get(), filter.get()));
+ ASSERT_FALSE(expression::isSubsetOf(filter.get(), query.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, MultipleRangesInUnboundedRange) {
+ ParsedMatchExpression filter("{a: {$gt: 1}}");
+ ParsedMatchExpression negative("{$or: [{a: {$gt: 5, $lt: 10}}, {a: {$lt: 0}}]}");
+ ParsedMatchExpression unbounded("{$or: [{a: {$gt: 5, $lt: 10}}, {a: {$gt: 15}}]}");
+ ParsedMatchExpression bounded("{$or: [{a: {$gt: 5, $lt: 10}}, {a: {$gt: 20, $lt: 30}}]}");
+
+ ASSERT_FALSE(expression::isSubsetOf(negative.get(), filter.get()));
+ ASSERT_TRUE(expression::isSubsetOf(unbounded.get(), filter.get()));
+ ASSERT_TRUE(expression::isSubsetOf(bounded.get(), filter.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, MultipleFields) {
+ ParsedMatchExpression filter("{a: {$gt: 5}, b: {$lt: 10}}");
+ ParsedMatchExpression onlyA("{$or: [{a: 6, b: {$lt: 4}}, {a: {$gt: 11}}]}");
+ ParsedMatchExpression onlyB("{$or: [{b: {$lt: 4}}, {a: {$gt: 11}, b: 9}]}");
+ ParsedMatchExpression both("{$or: [{a: 6, b: {$lt: 4}}, {a: {$gt: 11}, b: 9}]}");
+
+ ASSERT_FALSE(expression::isSubsetOf(onlyA.get(), filter.get()));
+ ASSERT_FALSE(expression::isSubsetOf(onlyB.get(), filter.get()));
+ ASSERT_TRUE(expression::isSubsetOf(both.get(), filter.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Compare_LT_In) {
+ ParsedMatchExpression lt("{a: {$lt: 5}}");
+
+ ParsedMatchExpression inLt("{a: {$in: [4.9]}}");
+ ParsedMatchExpression inEq("{a: {$in: [5]}}");
+ ParsedMatchExpression inGt("{a: {$in: [5.1]}}");
+ ParsedMatchExpression inNull("{a: {$in: [null]}}");
+
+ ParsedMatchExpression inAllEq("{a: {$in: [5, 5.0]}}");
+ ParsedMatchExpression inAllLte("{a: {$in: [4.9, 5]}}");
+ ParsedMatchExpression inAllLt("{a: {$in: [2, 3, 4]}}");
+ ParsedMatchExpression inStraddle("{a: {$in: [4, 6]}}");
+ ParsedMatchExpression inLtAndNull("{a: {$in: [1, null]}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(inLt.get(), lt.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inEq.get(), lt.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inGt.get(), lt.get()));
+
+ ASSERT_FALSE(expression::isSubsetOf(inAllEq.get(), lt.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inAllLte.get(), lt.get()));
+ ASSERT_TRUE(expression::isSubsetOf(inAllLt.get(), lt.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inStraddle.get(), lt.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inLtAndNull.get(), lt.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Compare_LTE_In) {
+ ParsedMatchExpression lte("{a: {$lte: 5}}");
+
+ ParsedMatchExpression inLt("{a: {$in: [4.9]}}");
+ ParsedMatchExpression inEq("{a: {$in: [5]}}");
+ ParsedMatchExpression inGt("{a: {$in: [5.1]}}");
+ ParsedMatchExpression inNull("{a: {$in: [null]}}");
+
+ ParsedMatchExpression inAllEq("{a: {$in: [5, 5.0]}}");
+ ParsedMatchExpression inAllLte("{a: {$in: [4.9, 5]}}");
+ ParsedMatchExpression inAllLt("{a: {$in: [2, 3, 4]}}");
+ ParsedMatchExpression inStraddle("{a: {$in: [4, 6]}}");
+ ParsedMatchExpression inLtAndNull("{a: {$in: [1, null]}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(inLt.get(), lte.get()));
+ ASSERT_TRUE(expression::isSubsetOf(inEq.get(), lte.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inGt.get(), lte.get()));
+
+ ASSERT_TRUE(expression::isSubsetOf(inAllEq.get(), lte.get()));
+ ASSERT_TRUE(expression::isSubsetOf(inAllLte.get(), lte.get()));
+ ASSERT_TRUE(expression::isSubsetOf(inAllLt.get(), lte.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inStraddle.get(), lte.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inLtAndNull.get(), lte.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Compare_EQ_In) {
+ ParsedMatchExpression eq("{a: 5}");
+
+ ParsedMatchExpression inLt("{a: {$in: [4.9]}}");
+ ParsedMatchExpression inEq("{a: {$in: [5]}}");
+ ParsedMatchExpression inGt("{a: {$in: [5.1]}}");
+ ParsedMatchExpression inNull("{a: {$in: [null]}}");
+
+ ParsedMatchExpression inAllEq("{a: {$in: [5, 5.0]}}");
+ ParsedMatchExpression inStraddle("{a: {$in: [4, 6]}}");
+ ParsedMatchExpression inEqAndNull("{a: {$in: [5, null]}}");
+
+ ASSERT_FALSE(expression::isSubsetOf(inLt.get(), eq.get()));
+ ASSERT_TRUE(expression::isSubsetOf(inEq.get(), eq.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inGt.get(), eq.get()));
+
+ ASSERT_TRUE(expression::isSubsetOf(inAllEq.get(), eq.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inStraddle.get(), eq.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inEqAndNull.get(), eq.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Compare_GT_In) {
+ ParsedMatchExpression gt("{a: {$gt: 5}}");
+
+ ParsedMatchExpression inLt("{a: {$in: [4.9]}}");
+ ParsedMatchExpression inEq("{a: {$in: [5]}}");
+ ParsedMatchExpression inGt("{a: {$in: [5.1]}}");
+ ParsedMatchExpression inNull("{a: {$in: [null]}}");
+
+ ParsedMatchExpression inAllEq("{a: {$in: [5, 5.0]}}");
+ ParsedMatchExpression inAllGte("{a: {$in: [5, 5.1]}}");
+ ParsedMatchExpression inAllGt("{a: {$in: [6, 7, 8]}}");
+ ParsedMatchExpression inStraddle("{a: {$in: [4, 6]}}");
+ ParsedMatchExpression inGtAndNull("{a: {$in: [9, null]}}");
+
+ ASSERT_FALSE(expression::isSubsetOf(inLt.get(), gt.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inEq.get(), gt.get()));
+ ASSERT_TRUE(expression::isSubsetOf(inGt.get(), gt.get()));
+
+ ASSERT_FALSE(expression::isSubsetOf(inAllEq.get(), gt.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inAllGte.get(), gt.get()));
+ ASSERT_TRUE(expression::isSubsetOf(inAllGt.get(), gt.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inStraddle.get(), gt.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inGtAndNull.get(), gt.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Compare_GTE_In) {
+ ParsedMatchExpression gte("{a: {$gte: 5}}");
+
+ ParsedMatchExpression inLt("{a: {$in: [4.9]}}");
+ ParsedMatchExpression inEq("{a: {$in: [5]}}");
+ ParsedMatchExpression inGt("{a: {$in: [5.1]}}");
+ ParsedMatchExpression inNull("{a: {$in: [null]}}");
+
+ ParsedMatchExpression inAllEq("{a: {$in: [5, 5.0]}}");
+ ParsedMatchExpression inAllGte("{a: {$in: [5, 5.1]}}");
+ ParsedMatchExpression inAllGt("{a: {$in: [6, 7, 8]}}");
+ ParsedMatchExpression inStraddle("{a: {$in: [4, 6]}}");
+ ParsedMatchExpression inGtAndNull("{a: {$in: [9, null]}}");
+
+ ASSERT_FALSE(expression::isSubsetOf(inLt.get(), gte.get()));
+ ASSERT_TRUE(expression::isSubsetOf(inEq.get(), gte.get()));
+ ASSERT_TRUE(expression::isSubsetOf(inGt.get(), gte.get()));
+
+ ASSERT_TRUE(expression::isSubsetOf(inAllEq.get(), gte.get()));
+ ASSERT_TRUE(expression::isSubsetOf(inAllGte.get(), gte.get()));
+ ASSERT_TRUE(expression::isSubsetOf(inAllGt.get(), gte.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inStraddle.get(), gte.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inGtAndNull.get(), gte.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, RegexAndIn) {
+ ParsedMatchExpression eq1("{x: 1}");
+ ParsedMatchExpression eqA("{x: 'a'}");
+ ParsedMatchExpression inRegexA("{x: {$in: [/a/]}}");
+ ParsedMatchExpression inRegexAbc("{x: {$in: [/abc/]}}");
+ ParsedMatchExpression inRegexAOrEq1("{x: {$in: [/a/, 1]}}");
+ ParsedMatchExpression inRegexAOrNull("{x: {$in: [/a/, null]}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(inRegexA.get(), inRegexA.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inRegexAbc.get(), inRegexA.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inRegexA.get(), inRegexAOrEq1.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inRegexAOrEq1.get(), eq1.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inRegexA.get(), eqA.get()));
+ ASSERT_FALSE(expression::isSubsetOf(inRegexAOrNull.get(), eqA.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Exists) {
+ ParsedMatchExpression aExists("{a: {$exists: true}}");
+ ParsedMatchExpression bExists("{b: {$exists: true}}");
+ ParsedMatchExpression aExistsBExists("{a: {$exists: true}, b: {$exists: true}}");
+ ParsedMatchExpression aExistsBExistsC5("{a: {$exists: true}, b: {$exists: true}, c: 5}");
+
+ ASSERT_TRUE(expression::isSubsetOf(aExists.get(), aExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(aExists.get(), bExists.get()));
+
+ ASSERT_TRUE(expression::isSubsetOf(aExistsBExists.get(), aExists.get()));
+ ASSERT_TRUE(expression::isSubsetOf(aExistsBExists.get(), bExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(aExistsBExists.get(), aExistsBExistsC5.get()));
+
+ ASSERT_TRUE(expression::isSubsetOf(aExistsBExistsC5.get(), aExists.get()));
+ ASSERT_TRUE(expression::isSubsetOf(aExistsBExistsC5.get(), bExists.get()));
+ ASSERT_TRUE(expression::isSubsetOf(aExistsBExistsC5.get(), aExistsBExists.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Compare_Exists) {
+ ParsedMatchExpression exists("{a: {$exists: true}}");
+ ParsedMatchExpression eq("{a: 1}");
+ ParsedMatchExpression gt("{a: {$gt: 4}}");
+ ParsedMatchExpression lte("{a: {$lte: 7}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(eq.get(), exists.get()));
+ ASSERT_TRUE(expression::isSubsetOf(gt.get(), exists.get()));
+ ASSERT_TRUE(expression::isSubsetOf(lte.get(), exists.get()));
+
+ ASSERT_FALSE(expression::isSubsetOf(exists.get(), eq.get()));
+ ASSERT_FALSE(expression::isSubsetOf(exists.get(), gt.get()));
+ ASSERT_FALSE(expression::isSubsetOf(exists.get(), lte.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Type) {
+ ParsedMatchExpression aType1("{a: {$type: 1}}");
+ ParsedMatchExpression aType2("{a: {$type: 2}}");
+ ParsedMatchExpression bType2("{b: {$type: 2}}");
+
+ ASSERT_FALSE(expression::isSubsetOf(aType1.get(), aType2.get()));
+ ASSERT_FALSE(expression::isSubsetOf(aType2.get(), aType1.get()));
+
+ ASSERT_TRUE(expression::isSubsetOf(aType2.get(), aType2.get()));
+ ASSERT_FALSE(expression::isSubsetOf(aType2.get(), bType2.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, TypeAndExists) {
+ ParsedMatchExpression aExists("{a: {$exists: true}}");
+ ParsedMatchExpression aType2("{a: {$type: 2}}");
+ ParsedMatchExpression bType2("{b: {$type: 2}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(aType2.get(), aExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(aExists.get(), aType2.get()));
+ ASSERT_FALSE(expression::isSubsetOf(bType2.get(), aExists.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, AllAndExists) {
+ ParsedMatchExpression aExists("{a: {$exists: true}}");
+ ParsedMatchExpression aAll("{a: {$all: ['x', 'y', 'z']}}");
+ ParsedMatchExpression bAll("{b: {$all: ['x', 'y', 'z']}}");
+ ParsedMatchExpression aAllWithNull("{a: {$all: ['x', null, 'z']}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(aAll.get(), aExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(bAll.get(), aExists.get()));
+ ASSERT_TRUE(expression::isSubsetOf(aAllWithNull.get(), aExists.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, ElemMatchAndExists_Value) {
+ ParsedMatchExpression aExists("{a: {$exists: true}}");
+ ParsedMatchExpression aElemMatch("{a: {$elemMatch: {$gt: 5, $lte: 10}}}");
+ ParsedMatchExpression bElemMatch("{b: {$elemMatch: {$gt: 5, $lte: 10}}}");
+ ParsedMatchExpression aElemMatchNull("{a: {$elemMatch: {$eq: null}}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(aElemMatch.get(), aExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(aExists.get(), aElemMatch.get()));
+ ASSERT_FALSE(expression::isSubsetOf(bElemMatch.get(), aExists.get()));
+ ASSERT_TRUE(expression::isSubsetOf(aElemMatchNull.get(), aExists.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, ElemMatchAndExists_Object) {
+ ParsedMatchExpression aExists("{a: {$exists: true}}");
+ ParsedMatchExpression aElemMatch("{a: {$elemMatch: {x: {$gt: 5}, y: {$lte: 10}}}}");
+ ParsedMatchExpression bElemMatch("{b: {$elemMatch: {x: {$gt: 5}, y: {$lte: 10}}}}");
+ ParsedMatchExpression aElemMatchNull("{a: {$elemMatch: {x: null, y: null}}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(aElemMatch.get(), aExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(aExists.get(), aElemMatch.get()));
+ ASSERT_FALSE(expression::isSubsetOf(bElemMatch.get(), aExists.get()));
+ ASSERT_TRUE(expression::isSubsetOf(aElemMatchNull.get(), aExists.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, SizeAndExists) {
+ ParsedMatchExpression aExists("{a: {$exists: true}}");
+ ParsedMatchExpression aSize0("{a: {$size: 0}}");
+ ParsedMatchExpression aSize1("{a: {$size: 1}}");
+ ParsedMatchExpression aSize3("{a: {$size: 3}}");
+ ParsedMatchExpression bSize3("{b: {$size: 3}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(aSize0.get(), aExists.get()));
+ ASSERT_TRUE(expression::isSubsetOf(aSize1.get(), aExists.get()));
+ ASSERT_TRUE(expression::isSubsetOf(aSize3.get(), aExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(aExists.get(), aSize3.get()));
+ ASSERT_FALSE(expression::isSubsetOf(bSize3.get(), aExists.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, ModAndExists) {
+ ParsedMatchExpression aExists("{a: {$exists: true}}");
+ ParsedMatchExpression aMod5("{a: {$mod: [5, 0]}}");
+ ParsedMatchExpression bMod5("{b: {$mod: [5, 0]}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(aMod5.get(), aExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(bMod5.get(), aExists.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, RegexAndExists) {
+ ParsedMatchExpression aExists("{a: {$exists: true}}");
+ ParsedMatchExpression aRegex("{a: {$regex: 'pattern'}}");
+ ParsedMatchExpression bRegex("{b: {$regex: 'pattern'}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(aRegex.get(), aExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(bRegex.get(), aExists.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, InAndExists) {
+ ParsedMatchExpression aExists("{a: {$exists: true}}");
+ ParsedMatchExpression aIn("{a: {$in: [1, 2, 3]}}");
+ ParsedMatchExpression bIn("{b: {$in: [1, 2, 3]}}");
+ ParsedMatchExpression aInWithNull("{a: {$in: [1, null, 3]}}");
+
+ ASSERT_TRUE(expression::isSubsetOf(aIn.get(), aExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(bIn.get(), aExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(aInWithNull.get(), aExists.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, NinAndExists) {
+ ParsedMatchExpression aExists("{a: {$exists: true}}");
+ ParsedMatchExpression aNin("{a: {$nin: [1, 2, 3]}}");
+ ParsedMatchExpression bNin("{b: {$nin: [1, 2, 3]}}");
+ ParsedMatchExpression aNinWithNull("{a: {$nin: [1, null, 3]}}");
+
+ ASSERT_FALSE(expression::isSubsetOf(aNin.get(), aExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(bNin.get(), aExists.get()));
+ ASSERT_TRUE(expression::isSubsetOf(aNinWithNull.get(), aExists.get()));
+}
+
+TEST(ExpressionAlgoIsSubsetOf, Compare_Exists_NE) {
+ ParsedMatchExpression aExists("{a: {$exists: true}}");
+ ParsedMatchExpression aNotEqual1("{a: {$ne: 1}}");
+ ParsedMatchExpression bNotEqual1("{b: {$ne: 1}}");
+ ParsedMatchExpression aNotEqualNull("{a: {$ne: null}}");
+
+ ASSERT_FALSE(expression::isSubsetOf(aNotEqual1.get(), aExists.get()));
+ ASSERT_FALSE(expression::isSubsetOf(bNotEqual1.get(), aExists.get()));
+ ASSERT_TRUE(expression::isSubsetOf(aNotEqualNull.get(), aExists.get()));
+}
} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_array.cpp b/src/mongo/db/matcher/expression_array.cpp
index 265ac57956c..4ca68653e3d 100644
--- a/src/mongo/db/matcher/expression_array.cpp
+++ b/src/mongo/db/matcher/expression_array.cpp
@@ -35,224 +35,222 @@
namespace mongo {
- Status ArrayMatchingMatchExpression::initPath( StringData path ) {
- _path = path;
- Status s = _elementPath.init( _path );
- _elementPath.setTraverseLeafArray( false );
- return s;
- }
+Status ArrayMatchingMatchExpression::initPath(StringData path) {
+ _path = path;
+ Status s = _elementPath.init(_path);
+ _elementPath.setTraverseLeafArray(false);
+ return s;
+}
- bool ArrayMatchingMatchExpression::matches( const MatchableDocument* doc, MatchDetails* details ) const {
- MatchableDocument::IteratorHolder cursor( doc, &_elementPath );
+bool ArrayMatchingMatchExpression::matches(const MatchableDocument* doc,
+ MatchDetails* details) const {
+ MatchableDocument::IteratorHolder cursor(doc, &_elementPath);
- while ( cursor->more() ) {
- ElementIterator::Context e = cursor->next();
- if ( e.element().type() != Array )
- continue;
+ while (cursor->more()) {
+ ElementIterator::Context e = cursor->next();
+ if (e.element().type() != Array)
+ continue;
- bool amIRoot = e.arrayOffset().eoo();
+ bool amIRoot = e.arrayOffset().eoo();
- if ( !matchesArray( e.element().Obj(), amIRoot ? details : NULL ) )
- continue;
+ if (!matchesArray(e.element().Obj(), amIRoot ? details : NULL))
+ continue;
- if ( !amIRoot && details && details->needRecord() && !e.arrayOffset().eoo() ) {
- details->setElemMatchKey( e.arrayOffset().fieldName() );
- }
- return true;
+ if (!amIRoot && details && details->needRecord() && !e.arrayOffset().eoo()) {
+ details->setElemMatchKey(e.arrayOffset().fieldName());
}
- return false;
+ return true;
}
+ return false;
+}
- bool ArrayMatchingMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- if ( e.type() != Array )
- return false;
- return matchesArray( e.Obj(), NULL );
- }
+bool ArrayMatchingMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ if (e.type() != Array)
+ return false;
+ return matchesArray(e.Obj(), NULL);
+}
- bool ArrayMatchingMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
+bool ArrayMatchingMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
- const ArrayMatchingMatchExpression* realOther =
- static_cast<const ArrayMatchingMatchExpression*>( other );
+ const ArrayMatchingMatchExpression* realOther =
+ static_cast<const ArrayMatchingMatchExpression*>(other);
- if ( _path != realOther->_path )
- return false;
+ if (_path != realOther->_path)
+ return false;
- if ( numChildren() != realOther->numChildren() )
- return false;
+ if (numChildren() != realOther->numChildren())
+ return false;
- for ( unsigned i = 0; i < numChildren(); i++ )
- if ( !getChild(i)->equivalent( realOther->getChild(i) ) )
- return false;
- return true;
- }
+ for (unsigned i = 0; i < numChildren(); i++)
+ if (!getChild(i)->equivalent(realOther->getChild(i)))
+ return false;
+ return true;
+}
- // -------
+// -------
- Status ElemMatchObjectMatchExpression::init( StringData path, MatchExpression* sub ) {
- _sub.reset( sub );
- return initPath( path );
- }
+Status ElemMatchObjectMatchExpression::init(StringData path, MatchExpression* sub) {
+ _sub.reset(sub);
+ return initPath(path);
+}
- bool ElemMatchObjectMatchExpression::matchesArray( const BSONObj& anArray, MatchDetails* details ) const {
- BSONObjIterator i( anArray );
- while ( i.more() ) {
- BSONElement inner = i.next();
- if ( !inner.isABSONObj() )
- continue;
- if ( _sub->matchesBSON( inner.Obj(), NULL ) ) {
- if ( details && details->needRecord() ) {
- details->setElemMatchKey( inner.fieldName() );
- }
- return true;
+bool ElemMatchObjectMatchExpression::matchesArray(const BSONObj& anArray,
+ MatchDetails* details) const {
+ BSONObjIterator i(anArray);
+ while (i.more()) {
+ BSONElement inner = i.next();
+ if (!inner.isABSONObj())
+ continue;
+ if (_sub->matchesBSON(inner.Obj(), NULL)) {
+ if (details && details->needRecord()) {
+ details->setElemMatchKey(inner.fieldName());
}
+ return true;
}
- return false;
}
+ return false;
+}
- void ElemMatchObjectMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << path() << " $elemMatch (obj)";
+void ElemMatchObjectMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << path() << " $elemMatch (obj)";
- MatchExpression::TagData* td = getTag();
- if (NULL != td) {
- debug << " ";
- td->debugString(&debug);
- }
- debug << "\n";
- _sub->debugString( debug, level + 1 );
+ MatchExpression::TagData* td = getTag();
+ if (NULL != td) {
+ debug << " ";
+ td->debugString(&debug);
}
+ debug << "\n";
+ _sub->debugString(debug, level + 1);
+}
- void ElemMatchObjectMatchExpression::toBSON(BSONObjBuilder* out) const {
- BSONObjBuilder subBob;
- _sub->toBSON(&subBob);
- if (path().empty()) {
- out->append("$elemMatch", subBob.obj());
- }
- else {
- out->append(path(), BSON("$elemMatch" << subBob.obj()));
- }
+void ElemMatchObjectMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONObjBuilder subBob;
+ _sub->toBSON(&subBob);
+ if (path().empty()) {
+ out->append("$elemMatch", subBob.obj());
+ } else {
+ out->append(path(), BSON("$elemMatch" << subBob.obj()));
}
+}
- // -------
+// -------
- ElemMatchValueMatchExpression::~ElemMatchValueMatchExpression() {
- for ( unsigned i = 0; i < _subs.size(); i++ )
- delete _subs[i];
- _subs.clear();
- }
+ElemMatchValueMatchExpression::~ElemMatchValueMatchExpression() {
+ for (unsigned i = 0; i < _subs.size(); i++)
+ delete _subs[i];
+ _subs.clear();
+}
- Status ElemMatchValueMatchExpression::init( StringData path, MatchExpression* sub ) {
- init( path );
- add( sub );
- return Status::OK();
- }
+Status ElemMatchValueMatchExpression::init(StringData path, MatchExpression* sub) {
+ init(path);
+ add(sub);
+ return Status::OK();
+}
- Status ElemMatchValueMatchExpression::init( StringData path ) {
- return initPath( path );
- }
+Status ElemMatchValueMatchExpression::init(StringData path) {
+ return initPath(path);
+}
- void ElemMatchValueMatchExpression::add( MatchExpression* sub ) {
- verify( sub );
- _subs.push_back( sub );
- }
+void ElemMatchValueMatchExpression::add(MatchExpression* sub) {
+ verify(sub);
+ _subs.push_back(sub);
+}
- bool ElemMatchValueMatchExpression::matchesArray( const BSONObj& anArray, MatchDetails* details ) const {
- BSONObjIterator i( anArray );
- while ( i.more() ) {
- BSONElement inner = i.next();
+bool ElemMatchValueMatchExpression::matchesArray(const BSONObj& anArray,
+ MatchDetails* details) const {
+ BSONObjIterator i(anArray);
+ while (i.more()) {
+ BSONElement inner = i.next();
- if ( _arrayElementMatchesAll( inner ) ) {
- if ( details && details->needRecord() ) {
- details->setElemMatchKey( inner.fieldName() );
- }
- return true;
+ if (_arrayElementMatchesAll(inner)) {
+ if (details && details->needRecord()) {
+ details->setElemMatchKey(inner.fieldName());
}
+ return true;
}
- return false;
}
+ return false;
+}
- bool ElemMatchValueMatchExpression::_arrayElementMatchesAll( const BSONElement& e ) const {
- for ( unsigned i = 0; i < _subs.size(); i++ ) {
- if ( !_subs[i]->matchesSingleElement( e ) )
- return false;
- }
- return true;
+bool ElemMatchValueMatchExpression::_arrayElementMatchesAll(const BSONElement& e) const {
+ for (unsigned i = 0; i < _subs.size(); i++) {
+ if (!_subs[i]->matchesSingleElement(e))
+ return false;
}
+ return true;
+}
- void ElemMatchValueMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << path() << " $elemMatch (value)";
+void ElemMatchValueMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << path() << " $elemMatch (value)";
- MatchExpression::TagData* td = getTag();
- if (NULL != td) {
- debug << " ";
- td->debugString(&debug);
- }
- debug << "\n";
- for ( unsigned i = 0; i < _subs.size(); i++ ) {
- _subs[i]->debugString( debug, level + 1 );
- }
+ MatchExpression::TagData* td = getTag();
+ if (NULL != td) {
+ debug << " ";
+ td->debugString(&debug);
}
-
- void ElemMatchValueMatchExpression::toBSON(BSONObjBuilder* out) const {
- BSONObjBuilder emBob;
- for ( unsigned i = 0; i < _subs.size(); i++ ) {
- _subs[i]->toBSON(&emBob);
- }
- if (path().empty()) {
- out->append("$elemMatch", emBob.obj());
- }
- else {
- out->append(path(), BSON("$elemMatch" << emBob.obj()));
- }
+ debug << "\n";
+ for (unsigned i = 0; i < _subs.size(); i++) {
+ _subs[i]->debugString(debug, level + 1);
}
+}
-
- // ---------
-
- Status SizeMatchExpression::init( StringData path, int size ) {
- _size = size;
- return initPath( path );
+void ElemMatchValueMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONObjBuilder emBob;
+ for (unsigned i = 0; i < _subs.size(); i++) {
+ _subs[i]->toBSON(&emBob);
}
-
- bool SizeMatchExpression::matchesArray( const BSONObj& anArray, MatchDetails* details ) const {
- if ( _size < 0 )
- return false;
- return anArray.nFields() == _size;
+ if (path().empty()) {
+ out->append("$elemMatch", emBob.obj());
+ } else {
+ out->append(path(), BSON("$elemMatch" << emBob.obj()));
}
+}
- void SizeMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << path() << " $size : " << _size << "\n";
- MatchExpression::TagData* td = getTag();
- if (NULL != td) {
- debug << " ";
- td->debugString(&debug);
- }
- }
+// ---------
- void SizeMatchExpression::toBSON(BSONObjBuilder* out) const {
- out->append(path(), BSON("$size" << _size));
- }
+Status SizeMatchExpression::init(StringData path, int size) {
+ _size = size;
+ return initPath(path);
+}
- bool SizeMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
+bool SizeMatchExpression::matchesArray(const BSONObj& anArray, MatchDetails* details) const {
+ if (_size < 0)
+ return false;
+ return anArray.nFields() == _size;
+}
+
+void SizeMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << path() << " $size : " << _size << "\n";
- const SizeMatchExpression* realOther = static_cast<const SizeMatchExpression*>( other );
- return path() == realOther->path() && _size == realOther->_size;
+ MatchExpression::TagData* td = getTag();
+ if (NULL != td) {
+ debug << " ";
+ td->debugString(&debug);
}
+}
+void SizeMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append(path(), BSON("$size" << _size));
+}
- // ------------------
+bool SizeMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
+ const SizeMatchExpression* realOther = static_cast<const SizeMatchExpression*>(other);
+ return path() == realOther->path() && _size == realOther->_size;
+}
+// ------------------
}
diff --git a/src/mongo/db/matcher/expression_array.h b/src/mongo/db/matcher/expression_array.h
index 4bc14230eb9..f9bcfd53167 100644
--- a/src/mongo/db/matcher/expression_array.h
+++ b/src/mongo/db/matcher/expression_array.h
@@ -40,124 +40,137 @@
namespace mongo {
- class ArrayMatchingMatchExpression : public MatchExpression {
- public:
- ArrayMatchingMatchExpression( MatchType matchType ) : MatchExpression( matchType ){}
- virtual ~ArrayMatchingMatchExpression(){}
+class ArrayMatchingMatchExpression : public MatchExpression {
+public:
+ ArrayMatchingMatchExpression(MatchType matchType) : MatchExpression(matchType) {}
+ virtual ~ArrayMatchingMatchExpression() {}
- Status initPath( StringData path );
+ Status initPath(StringData path);
- virtual bool matches( const MatchableDocument* doc, MatchDetails* details ) const;
+ virtual bool matches(const MatchableDocument* doc, MatchDetails* details) const;
- /**
- * @param e - has to be an array. calls matchesArray with e as an array
- */
- virtual bool matchesSingleElement( const BSONElement& e ) const;
+ /**
+ * @param e - has to be an array. calls matchesArray with e as an array
+ */
+ virtual bool matchesSingleElement(const BSONElement& e) const;
- virtual bool matchesArray( const BSONObj& anArray, MatchDetails* details ) const = 0;
+ virtual bool matchesArray(const BSONObj& anArray, MatchDetails* details) const = 0;
- bool equivalent( const MatchExpression* other ) const;
+ bool equivalent(const MatchExpression* other) const;
- const StringData path() const { return _path; }
+ const StringData path() const {
+ return _path;
+ }
- private:
- StringData _path;
- ElementPath _elementPath;
- };
+private:
+ StringData _path;
+ ElementPath _elementPath;
+};
- class ElemMatchObjectMatchExpression : public ArrayMatchingMatchExpression {
- public:
- ElemMatchObjectMatchExpression() : ArrayMatchingMatchExpression( ELEM_MATCH_OBJECT ){}
- Status init( StringData path, MatchExpression* sub );
+class ElemMatchObjectMatchExpression : public ArrayMatchingMatchExpression {
+public:
+ ElemMatchObjectMatchExpression() : ArrayMatchingMatchExpression(ELEM_MATCH_OBJECT) {}
+ Status init(StringData path, MatchExpression* sub);
- bool matchesArray( const BSONObj& anArray, MatchDetails* details ) const;
+ bool matchesArray(const BSONObj& anArray, MatchDetails* details) const;
- virtual ElemMatchObjectMatchExpression* shallowClone() const {
- ElemMatchObjectMatchExpression* e = new ElemMatchObjectMatchExpression();
- e->init(path(), _sub->shallowClone());
- if ( getTag() ) {
- e->setTag(getTag()->clone());
- }
- return e;
+ virtual ElemMatchObjectMatchExpression* shallowClone() const {
+ ElemMatchObjectMatchExpression* e = new ElemMatchObjectMatchExpression();
+ e->init(path(), _sub->shallowClone());
+ if (getTag()) {
+ e->setTag(getTag()->clone());
}
+ return e;
+ }
- virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual void debugString(StringBuilder& debug, int level) const;
- virtual void toBSON(BSONObjBuilder* out) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
- virtual size_t numChildren() const { return 1; }
+ virtual size_t numChildren() const {
+ return 1;
+ }
- virtual MatchExpression* getChild( size_t i ) const { return _sub.get(); }
+ virtual MatchExpression* getChild(size_t i) const {
+ return _sub.get();
+ }
- private:
- std::unique_ptr<MatchExpression> _sub;
- };
+private:
+ std::unique_ptr<MatchExpression> _sub;
+};
- class ElemMatchValueMatchExpression : public ArrayMatchingMatchExpression {
- public:
- ElemMatchValueMatchExpression() : ArrayMatchingMatchExpression( ELEM_MATCH_VALUE ){}
- virtual ~ElemMatchValueMatchExpression();
+class ElemMatchValueMatchExpression : public ArrayMatchingMatchExpression {
+public:
+ ElemMatchValueMatchExpression() : ArrayMatchingMatchExpression(ELEM_MATCH_VALUE) {}
+ virtual ~ElemMatchValueMatchExpression();
- Status init( StringData path );
- Status init( StringData path, MatchExpression* sub );
- void add( MatchExpression* sub );
+ Status init(StringData path);
+ Status init(StringData path, MatchExpression* sub);
+ void add(MatchExpression* sub);
- bool matchesArray( const BSONObj& anArray, MatchDetails* details ) const;
+ bool matchesArray(const BSONObj& anArray, MatchDetails* details) const;
- virtual ElemMatchValueMatchExpression* shallowClone() const {
- ElemMatchValueMatchExpression* e = new ElemMatchValueMatchExpression();
- e->init(path());
- for (size_t i = 0; i < _subs.size(); ++i) {
- e->add(_subs[i]->shallowClone());
- }
- if ( getTag() ) {
- e->setTag(getTag()->clone());
- }
- return e;
+ virtual ElemMatchValueMatchExpression* shallowClone() const {
+ ElemMatchValueMatchExpression* e = new ElemMatchValueMatchExpression();
+ e->init(path());
+ for (size_t i = 0; i < _subs.size(); ++i) {
+ e->add(_subs[i]->shallowClone());
}
+ if (getTag()) {
+ e->setTag(getTag()->clone());
+ }
+ return e;
+ }
- virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual void debugString(StringBuilder& debug, int level) const;
- virtual void toBSON(BSONObjBuilder* out) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
- virtual std::vector<MatchExpression*>* getChildVector() { return &_subs; }
+ virtual std::vector<MatchExpression*>* getChildVector() {
+ return &_subs;
+ }
- virtual size_t numChildren() const { return _subs.size(); }
+ virtual size_t numChildren() const {
+ return _subs.size();
+ }
- virtual MatchExpression* getChild( size_t i ) const { return _subs[i]; }
+ virtual MatchExpression* getChild(size_t i) const {
+ return _subs[i];
+ }
- private:
- bool _arrayElementMatchesAll( const BSONElement& e ) const;
+private:
+ bool _arrayElementMatchesAll(const BSONElement& e) const;
- std::vector<MatchExpression*> _subs;
- };
+ std::vector<MatchExpression*> _subs;
+};
- class SizeMatchExpression : public ArrayMatchingMatchExpression {
- public:
- SizeMatchExpression() : ArrayMatchingMatchExpression( SIZE ){}
- Status init( StringData path, int size );
+class SizeMatchExpression : public ArrayMatchingMatchExpression {
+public:
+ SizeMatchExpression() : ArrayMatchingMatchExpression(SIZE) {}
+ Status init(StringData path, int size);
- virtual SizeMatchExpression* shallowClone() const {
- SizeMatchExpression* e = new SizeMatchExpression();
- e->init(path(), _size);
- if ( getTag() ) {
- e->setTag(getTag()->clone());
- }
- return e;
+ virtual SizeMatchExpression* shallowClone() const {
+ SizeMatchExpression* e = new SizeMatchExpression();
+ e->init(path(), _size);
+ if (getTag()) {
+ e->setTag(getTag()->clone());
}
+ return e;
+ }
- virtual bool matchesArray( const BSONObj& anArray, MatchDetails* details ) const;
-
- virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual bool matchesArray(const BSONObj& anArray, MatchDetails* details) const;
- virtual void toBSON(BSONObjBuilder* out) const;
+ virtual void debugString(StringBuilder& debug, int level) const;
- virtual bool equivalent( const MatchExpression* other ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
- int getData() const { return _size; }
+ virtual bool equivalent(const MatchExpression* other) const;
- private:
- int _size; // >= 0 real, < 0, nothing will match
- };
+ int getData() const {
+ return _size;
+ }
+private:
+ int _size; // >= 0 real, < 0, nothing will match
+};
}
diff --git a/src/mongo/db/matcher/expression_array_test.cpp b/src/mongo/db/matcher/expression_array_test.cpp
index 891d12752f6..201d11fbed4 100644
--- a/src/mongo/db/matcher/expression_array_test.cpp
+++ b/src/mongo/db/matcher/expression_array_test.cpp
@@ -38,448 +38,422 @@
namespace mongo {
- using std::unique_ptr;
-
- TEST( ElemMatchObjectMatchExpression, MatchesElementSingle ) {
- BSONObj baseOperand = BSON( "b" << 5 );
- BSONObj match = BSON( "a" << BSON_ARRAY( BSON( "b" << 5.0 ) ) );
- BSONObj notMatch = BSON( "a" << BSON_ARRAY( BSON( "b" << 6 ) ) );
- unique_ptr<ComparisonMatchExpression> eq( new EqualityMatchExpression() );
- ASSERT( eq->init( "b", baseOperand[ "b" ] ).isOK() );
- ElemMatchObjectMatchExpression op;
- ASSERT( op.init( "a", eq.release() ).isOK() );
- ASSERT( op.matchesSingleElement( match[ "a" ] ) );
- ASSERT( !op.matchesSingleElement( notMatch[ "a" ] ) );
- }
-
- TEST( ElemMatchObjectMatchExpression, MatchesElementArray ) {
- BSONObj baseOperand = BSON( "1" << 5 );
- BSONObj match = BSON( "a" << BSON_ARRAY( BSON_ARRAY( 's' << 5.0 ) ) );
- BSONObj notMatch = BSON( "a" << BSON_ARRAY( BSON_ARRAY( 5 << 6 ) ) );
- unique_ptr<ComparisonMatchExpression> eq( new EqualityMatchExpression() );
- ASSERT( eq->init( "1", baseOperand[ "1" ] ).isOK() );
- ElemMatchObjectMatchExpression op;
- ASSERT( op.init( "a", eq.release() ).isOK() );
- ASSERT( op.matchesSingleElement( match[ "a" ] ) );
- ASSERT( !op.matchesSingleElement( notMatch[ "a" ] ) );
- }
-
- TEST( ElemMatchObjectMatchExpression, MatchesElementMultiple ) {
- BSONObj baseOperand1 = BSON( "b" << 5 );
- BSONObj baseOperand2 = BSON( "b" << 6 );
- BSONObj baseOperand3 = BSON( "c" << 7 );
- BSONObj notMatch1 = BSON( "a" << BSON_ARRAY( BSON( "b" << 5 << "c" << 7 ) ) );
- BSONObj notMatch2 = BSON( "a" << BSON_ARRAY( BSON( "b" << 6 << "c" << 7 ) ) );
- BSONObj notMatch3 = BSON( "a" << BSON_ARRAY( BSON( "b" << BSON_ARRAY( 5 << 6 ) ) ) );
- BSONObj match =
- BSON( "a" << BSON_ARRAY( BSON( "b" << BSON_ARRAY( 5 << 6 ) << "c" << 7 ) ) );
- unique_ptr<ComparisonMatchExpression> eq1( new EqualityMatchExpression() );
- ASSERT( eq1->init( "b", baseOperand1[ "b" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> eq2( new EqualityMatchExpression() );
- ASSERT( eq2->init( "b", baseOperand2[ "b" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> eq3( new EqualityMatchExpression() );
- ASSERT( eq3->init( "c", baseOperand3[ "c" ] ).isOK() );
-
- unique_ptr<AndMatchExpression> andOp( new AndMatchExpression() );
- andOp->add( eq1.release() );
- andOp->add( eq2.release() );
- andOp->add( eq3.release() );
-
- ElemMatchObjectMatchExpression op;
- ASSERT( op.init( "a", andOp.release() ).isOK() );
- ASSERT( !op.matchesSingleElement( notMatch1[ "a" ] ) );
- ASSERT( !op.matchesSingleElement( notMatch2[ "a" ] ) );
- ASSERT( !op.matchesSingleElement( notMatch3[ "a" ] ) );
- ASSERT( op.matchesSingleElement( match[ "a" ] ) );
- }
-
- TEST( ElemMatchObjectMatchExpression, MatchesNonArray ) {
- BSONObj baseOperand = BSON( "b" << 5 );
- unique_ptr<ComparisonMatchExpression> eq( new EqualityMatchExpression() );
- ASSERT( eq->init( "b", baseOperand[ "b" ] ).isOK() );
- ElemMatchObjectMatchExpression op;
- ASSERT( op.init( "a", eq.release() ).isOK() );
- // Directly nested objects are not matched with $elemMatch. An intervening array is
- // required.
- ASSERT( !op.matchesBSON( BSON( "a" << BSON( "b" << 5 ) ), NULL ) );
- ASSERT( !op.matchesBSON( BSON( "a" << BSON( "0" << ( BSON( "b" << 5 ) ) ) ), NULL ) );
- ASSERT( !op.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( ElemMatchObjectMatchExpression, MatchesArrayObject ) {
- BSONObj baseOperand = BSON( "b" << 5 );
- unique_ptr<ComparisonMatchExpression> eq( new EqualityMatchExpression() );
- ASSERT( eq->init( "b", baseOperand[ "b" ] ).isOK() );
- ElemMatchObjectMatchExpression op;
- ASSERT( op.init( "a", eq.release() ).isOK() );
- ASSERT( op.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << 5 ) ) ), NULL ) );
- ASSERT( op.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << BSON( "b" << 5 ) ) ), NULL ) );
- ASSERT( op.matchesBSON( BSON( "a" << BSON_ARRAY( BSONObj() << BSON( "b" << 5 ) ) ), NULL ) );
- ASSERT( op.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << 6 ) << BSON( "b" << 5 ) ) ),
- NULL ) );
- }
-
- TEST( ElemMatchObjectMatchExpression, MatchesMultipleNamedValues ) {
- BSONObj baseOperand = BSON( "c" << 5 );
- unique_ptr<ComparisonMatchExpression> eq( new EqualityMatchExpression() );
- ASSERT( eq->init( "c", baseOperand[ "c" ] ).isOK() );
- ElemMatchObjectMatchExpression op;
- ASSERT( op.init( "a.b", eq.release() ).isOK() );
- ASSERT( op.matchesBSON( BSON( "a" <<
- BSON_ARRAY( BSON( "b" <<
- BSON_ARRAY( BSON( "c" <<
- 5 ) ) ) ) ),
- NULL ) );
- ASSERT( op.matchesBSON( BSON( "a" <<
- BSON_ARRAY( BSON( "b" <<
- BSON_ARRAY( BSON( "c" <<
- 1 ) ) ) <<
- BSON( "b" <<
- BSON_ARRAY( BSON( "c" <<
- 5 ) ) ) ) ),
- NULL ) );
- }
-
- TEST( ElemMatchObjectMatchExpression, ElemMatchKey ) {
- BSONObj baseOperand = BSON( "c" << 6 );
- unique_ptr<ComparisonMatchExpression> eq( new EqualityMatchExpression() );
- ASSERT( eq->init( "c", baseOperand[ "c" ] ).isOK() );
- ElemMatchObjectMatchExpression op;
- ASSERT( op.init( "a.b", eq.release() ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !op.matchesBSON( BSONObj(), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( !op.matchesBSON( BSON( "a" << BSON( "b" << BSON_ARRAY( BSON( "c" << 7 ) ) ) ),
- &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( op.matchesBSON( BSON( "a" << BSON( "b" << BSON_ARRAY( 3 << BSON( "c" << 6 ) ) ) ),
- &details ) );
- ASSERT( details.hasElemMatchKey() );
- // The entry within the $elemMatch array is reported.
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- ASSERT( op.matchesBSON( BSON( "a" <<
- BSON_ARRAY( 1 << 2 <<
- BSON( "b" << BSON_ARRAY( 3 <<
- 5 <<
- BSON( "c" << 6 ) ) ) ) ),
- &details ) );
- ASSERT( details.hasElemMatchKey() );
- // The entry within a parent of the $elemMatch array is reported.
- ASSERT_EQUALS( "2", details.elemMatchKey() );
- }
-
- /**
- TEST( ElemMatchObjectMatchExpression, MatchesIndexKey ) {
- BSONObj baseOperand = BSON( "b" << 5 );
- unique_ptr<ComparisonMatchExpression> eq( new ComparisonMatchExpression() );
- ASSERT( eq->init( "b", baseOperand[ "b" ] ).isOK() );
- ElemMatchObjectMatchExpression op;
- ASSERT( op.init( "a", eq.release() ).isOK() );
- IndexSpec indexSpec( BSON( "a.b" << 1 ) );
- BSONObj indexKey = BSON( "" << "5" );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- op.matchesIndexKey( indexKey, indexSpec ) );
- }
- */
-
- TEST( ElemMatchValueMatchExpression, MatchesElementSingle ) {
- BSONObj baseOperand = BSON( "$gt" << 5 );
- BSONObj match = BSON( "a" << BSON_ARRAY( 6 ) );
- BSONObj notMatch = BSON( "a" << BSON_ARRAY( 4 ) );
- unique_ptr<ComparisonMatchExpression> gt( new GTMatchExpression() );
- ASSERT( gt->init( "", baseOperand[ "$gt" ] ).isOK() );
- ElemMatchValueMatchExpression op;
- ASSERT( op.init( "a", gt.release() ).isOK() );
- ASSERT( op.matchesSingleElement( match[ "a" ] ) );
- ASSERT( !op.matchesSingleElement( notMatch[ "a" ] ) );
- }
-
- TEST( ElemMatchValueMatchExpression, MatchesElementMultiple ) {
- BSONObj baseOperand1 = BSON( "$gt" << 1 );
- BSONObj baseOperand2 = BSON( "$lt" << 10 );
- BSONObj notMatch1 = BSON( "a" << BSON_ARRAY( 0 << 1 ) );
- BSONObj notMatch2 = BSON( "a" << BSON_ARRAY( 10 << 11 ) );
- BSONObj match = BSON( "a" << BSON_ARRAY( 0 << 5 << 11 ) );
- unique_ptr<ComparisonMatchExpression> gt( new GTMatchExpression() );
- ASSERT( gt->init( "", baseOperand1[ "$gt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> lt( new LTMatchExpression() );
- ASSERT( lt->init( "", baseOperand2[ "$lt" ] ).isOK() );
-
- ElemMatchValueMatchExpression op;
- ASSERT( op.init( "a" ).isOK() );
- op.add( gt.release() );
- op.add( lt.release() );
-
- ASSERT( !op.matchesSingleElement( notMatch1[ "a" ] ) );
- ASSERT( !op.matchesSingleElement( notMatch2[ "a" ] ) );
- ASSERT( op.matchesSingleElement( match[ "a" ] ) );
- }
-
- TEST( ElemMatchValueMatchExpression, MatchesNonArray ) {
- BSONObj baseOperand = BSON( "$gt" << 5 );
- unique_ptr<ComparisonMatchExpression> gt( new GTMatchExpression() );
- ASSERT( gt->init( "", baseOperand[ "$gt" ] ).isOK() );
- ElemMatchObjectMatchExpression op;
- ASSERT( op.init( "a", gt.release() ).isOK() );
- // Directly nested objects are not matched with $elemMatch. An intervening array is
- // required.
- ASSERT( !op.matchesBSON( BSON( "a" << 6 ), NULL ) );
- ASSERT( !op.matchesBSON( BSON( "a" << BSON( "0" << 6 ) ), NULL ) );
- }
-
- TEST( ElemMatchValueMatchExpression, MatchesArrayScalar ) {
- BSONObj baseOperand = BSON( "$gt" << 5 );
- unique_ptr<ComparisonMatchExpression> gt( new GTMatchExpression() );
- ASSERT( gt->init( "", baseOperand[ "$gt" ] ).isOK() );
- ElemMatchValueMatchExpression op;
- ASSERT( op.init( "a", gt.release() ).isOK() );
- ASSERT( op.matchesBSON( BSON( "a" << BSON_ARRAY( 6 ) ), NULL ) );
- ASSERT( op.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 6 ) ), NULL ) );
- ASSERT( op.matchesBSON( BSON( "a" << BSON_ARRAY( BSONObj() << 7 ) ), NULL ) );
- }
-
- TEST( ElemMatchValueMatchExpression, MatchesMultipleNamedValues ) {
- BSONObj baseOperand = BSON( "$gt" << 5 );
- unique_ptr<ComparisonMatchExpression> gt( new GTMatchExpression() );
- ASSERT( gt->init( "", baseOperand[ "$gt" ] ).isOK() );
- ElemMatchValueMatchExpression op;
- ASSERT( op.init( "a.b", gt.release() ).isOK() );
- ASSERT( op.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << BSON_ARRAY( 6 ) ) ) ), NULL ) );
- ASSERT( op.matchesBSON( BSON( "a" <<
- BSON_ARRAY( BSON( "b" << BSON_ARRAY( 4 ) ) <<
- BSON( "b" << BSON_ARRAY( 4 << 6 ) ) ) ),
- NULL ) );
- }
-
- TEST( ElemMatchValueMatchExpression, ElemMatchKey ) {
- BSONObj baseOperand = BSON( "$gt" << 6 );
- unique_ptr<ComparisonMatchExpression> gt( new GTMatchExpression() );
- ASSERT( gt->init( "", baseOperand[ "$gt" ] ).isOK() );
- ElemMatchValueMatchExpression op;
- ASSERT( op.init( "a.b", gt.release() ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !op.matchesBSON( BSONObj(), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( !op.matchesBSON( BSON( "a" << BSON( "b" << BSON_ARRAY( 2 ) ) ),
- &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( op.matchesBSON( BSON( "a" << BSON( "b" << BSON_ARRAY( 3 << 7 ) ) ),
- &details ) );
- ASSERT( details.hasElemMatchKey() );
- // The entry within the $elemMatch array is reported.
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- ASSERT( op.matchesBSON( BSON( "a" <<
- BSON_ARRAY( 1 << 2 <<
- BSON( "b" << BSON_ARRAY( 3 << 7 ) ) ) ),
- &details ) );
- ASSERT( details.hasElemMatchKey() );
- // The entry within a parent of the $elemMatch array is reported.
- ASSERT_EQUALS( "2", details.elemMatchKey() );
- }
-
- /**
- TEST( ElemMatchValueMatchExpression, MatchesIndexKey ) {
- BSONObj baseOperand = BSON( "$lt" << 5 );
- unique_ptr<LtOp> lt( new ComparisonMatchExpression() );
- ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
- ElemMatchValueMatchExpression op;
- ASSERT( op.init( "a", lt.release() ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- BSONObj indexKey = BSON( "" << "3" );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- op.matchesIndexKey( indexKey, indexSpec ) );
- }
- */
-
- TEST( AndOfElemMatch, MatchesElement ) {
-
-
- BSONObj baseOperanda1 = BSON( "a" << 1 );
- unique_ptr<ComparisonMatchExpression> eqa1( new EqualityMatchExpression() );
- ASSERT( eqa1->init( "a", baseOperanda1[ "a" ] ).isOK() );
-
- BSONObj baseOperandb1 = BSON( "b" << 1 );
- unique_ptr<ComparisonMatchExpression> eqb1( new EqualityMatchExpression() );
- ASSERT( eqb1->init( "b", baseOperandb1[ "b" ] ).isOK() );
-
- unique_ptr<AndMatchExpression> and1( new AndMatchExpression() );
- and1->add( eqa1.release() );
- and1->add( eqb1.release() );
- // and1 = { a : 1, b : 1 }
-
- unique_ptr<ElemMatchObjectMatchExpression> elemMatch1( new ElemMatchObjectMatchExpression() );
- elemMatch1->init( "x", and1.release() );
- // elemMatch1 = { x : { $elemMatch : { a : 1, b : 1 } } }
-
- BSONObj baseOperanda2 = BSON( "a" << 2 );
- unique_ptr<ComparisonMatchExpression> eqa2( new EqualityMatchExpression() );
- ASSERT( eqa2->init( "a", baseOperanda2[ "a" ] ).isOK() );
-
- BSONObj baseOperandb2 = BSON( "b" << 2 );
- unique_ptr<ComparisonMatchExpression> eqb2( new EqualityMatchExpression() );
- ASSERT( eqb2->init( "b", baseOperandb2[ "b" ] ).isOK() );
-
- unique_ptr<AndMatchExpression> and2( new AndMatchExpression() );
- and2->add( eqa2.release() );
- and2->add( eqb2.release() );
- // and2 = { a : 2, b : 2 }
-
- unique_ptr<ElemMatchObjectMatchExpression> elemMatch2( new ElemMatchObjectMatchExpression() );
- elemMatch2->init( "x", and2.release() );
- // elemMatch2 = { x : { $elemMatch : { a : 2, b : 2 } } }
-
- unique_ptr<AndMatchExpression> andOfEM( new AndMatchExpression() );
- andOfEM->add( elemMatch1.release() );
- andOfEM->add( elemMatch2.release() );
-
- BSONObj nonArray = BSON( "x" << 4 );
- ASSERT( !andOfEM->matchesSingleElement( nonArray[ "x" ] ) );
- BSONObj emptyArray = BSON( "x" << BSONArray() );
- ASSERT( !andOfEM->matchesSingleElement( emptyArray[ "x" ] ) );
- BSONObj nonObjArray = BSON( "x" << BSON_ARRAY( 4 ) );
- ASSERT( !andOfEM->matchesSingleElement( nonObjArray[ "x" ] ) );
- BSONObj singleObjMatch = BSON( "x" << BSON_ARRAY( BSON( "a" << 1 << "b" << 1 ) ) );
- ASSERT( !andOfEM->matchesSingleElement( singleObjMatch[ "x" ] ) );
- BSONObj otherObjMatch = BSON( "x" << BSON_ARRAY( BSON( "a" << 2 << "b" << 2 ) ) );
- ASSERT( !andOfEM->matchesSingleElement( otherObjMatch[ "x" ] ) );
- BSONObj bothObjMatch = BSON( "x" << BSON_ARRAY( BSON( "a" << 1 << "b" << 1 ) <<
- BSON( "a" << 2 << "b" << 2 ) ) );
- ASSERT( andOfEM->matchesSingleElement( bothObjMatch[ "x" ] ) );
- BSONObj noObjMatch = BSON( "x" << BSON_ARRAY( BSON( "a" << 1 << "b" << 2 ) <<
- BSON( "a" << 2 << "b" << 1 ) ) );
- ASSERT( !andOfEM->matchesSingleElement( noObjMatch[ "x" ] ) );
- }
-
- TEST( AndOfElemMatch, Matches ) {
- BSONObj baseOperandgt1 = BSON( "$gt" << 1 );
- unique_ptr<ComparisonMatchExpression> gt1( new GTMatchExpression() );
- ASSERT( gt1->init( "", baseOperandgt1[ "$gt" ] ).isOK() );
-
- BSONObj baseOperandlt1 = BSON( "$lt" << 10 );
- unique_ptr<ComparisonMatchExpression> lt1( new LTMatchExpression() );
- ASSERT( lt1->init( "", baseOperandlt1[ "$lt" ] ).isOK() );
-
- unique_ptr<ElemMatchValueMatchExpression> elemMatch1( new ElemMatchValueMatchExpression() );
- elemMatch1->init( "x" );
- elemMatch1->add( gt1.release() );
- elemMatch1->add( lt1.release() );
- // elemMatch1 = { x : { $elemMatch : { $gt : 1 , $lt : 10 } } }
-
- BSONObj baseOperandgt2 = BSON( "$gt" << 101 );
- unique_ptr<ComparisonMatchExpression> gt2( new GTMatchExpression() );
- ASSERT( gt2->init( "", baseOperandgt2[ "$gt" ] ).isOK() );
-
- BSONObj baseOperandlt2 = BSON( "$lt" << 110 );
- unique_ptr<ComparisonMatchExpression> lt2( new LTMatchExpression() );
- ASSERT( lt2->init( "", baseOperandlt2[ "$lt" ] ).isOK() );
-
- unique_ptr<ElemMatchValueMatchExpression> elemMatch2( new ElemMatchValueMatchExpression() );
- elemMatch2->init( "x" );
- elemMatch2->add( gt2.release() );
- elemMatch2->add( lt2.release() );
- // elemMatch2 = { x : { $elemMatch : { $gt : 101 , $lt : 110 } } }
-
- unique_ptr<AndMatchExpression> andOfEM( new AndMatchExpression() );
- andOfEM->add( elemMatch1.release() );
- andOfEM->add( elemMatch2.release() );
-
- BSONObj nonArray = BSON( "x" << 4 );
- ASSERT( !andOfEM->matchesBSON( nonArray, NULL ) );
- BSONObj emptyArray = BSON( "x" << BSONArray() );
- ASSERT( !andOfEM->matchesBSON( emptyArray, NULL ) );
- BSONObj nonNumberArray = BSON( "x" << BSON_ARRAY( "q" ) );
- ASSERT( !andOfEM->matchesBSON( nonNumberArray, NULL ) );
- BSONObj singleMatch = BSON( "x" << BSON_ARRAY( 5 ) );
- ASSERT( !andOfEM->matchesBSON( singleMatch, NULL ) );
- BSONObj otherMatch = BSON( "x" << BSON_ARRAY( 105 ) );
- ASSERT( !andOfEM->matchesBSON( otherMatch, NULL ) );
- BSONObj bothMatch = BSON( "x" << BSON_ARRAY( 5 << 105 ) );
- ASSERT( andOfEM->matchesBSON( bothMatch, NULL ) );
- BSONObj neitherMatch = BSON( "x" << BSON_ARRAY( 0 << 200 ) );
- ASSERT( !andOfEM->matchesBSON( neitherMatch, NULL ) );
- }
-
- TEST( SizeMatchExpression, MatchesElement ) {
- BSONObj match = BSON( "a" << BSON_ARRAY( 5 << 6 ) );
- BSONObj notMatch = BSON( "a" << BSON_ARRAY( 5 ) );
- SizeMatchExpression size;
- ASSERT( size.init( "", 2 ).isOK() );
- ASSERT( size.matchesSingleElement( match.firstElement() ) );
- ASSERT( !size.matchesSingleElement( notMatch.firstElement() ) );
- }
-
- TEST( SizeMatchExpression, MatchesNonArray ) {
- // Non arrays do not match.
- BSONObj stringValue = BSON( "a" << "z" );
- BSONObj numberValue = BSON( "a" << 0 );
- BSONObj arrayValue = BSON( "a" << BSONArray() );
- SizeMatchExpression size;
- ASSERT( size.init( "", 0 ).isOK() );
- ASSERT( !size.matchesSingleElement( stringValue.firstElement() ) );
- ASSERT( !size.matchesSingleElement( numberValue.firstElement() ) );
- ASSERT( size.matchesSingleElement( arrayValue.firstElement() ) );
- }
-
- TEST( SizeMatchExpression, MatchesArray ) {
- SizeMatchExpression size;
- ASSERT( size.init( "a", 2 ).isOK() );
- ASSERT( size.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 5.5 ) ), NULL ) );
- // Arrays are not unwound to look for matching subarrays.
- ASSERT( !size.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 5.5 << BSON_ARRAY( 1 << 2 ) ) ),
- NULL ) );
- }
-
- TEST( SizeMatchExpression, MatchesNestedArray ) {
- SizeMatchExpression size;
- ASSERT( size.init( "a.2", 2 ).isOK() );
- // A numerically referenced nested array is matched.
- ASSERT( size.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 5.5 << BSON_ARRAY( 1 << 2 ) ) ),
- NULL ) );
- }
-
- TEST( SizeMatchExpression, ElemMatchKey ) {
- SizeMatchExpression size;
- ASSERT( size.init( "a.b", 3 ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !size.matchesBSON( BSON( "a" << 1 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( size.matchesBSON( BSON( "a" << BSON( "b" << BSON_ARRAY( 1 << 2 << 3 ) ) ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( size.matchesBSON( BSON( "a" <<
- BSON_ARRAY( 2 <<
- BSON( "b" << BSON_ARRAY( 1 << 2 << 3 ) ) ) ),
- &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- }
-
- TEST( SizeMatchExpression, Equivalent ) {
- SizeMatchExpression e1;
- SizeMatchExpression e2;
- SizeMatchExpression e3;
-
- e1.init( "a", 5 );
- e2.init( "a", 6 );
- e3.init( "v", 5 );
-
- ASSERT( e1.equivalent( &e1 ) );
- ASSERT( !e1.equivalent( &e2 ) );
- ASSERT( !e1.equivalent( &e3 ) );
- }
-
- /**
- TEST( SizeMatchExpression, MatchesIndexKey ) {
- BSONObj operand = BSON( "$size" << 4 );
- SizeMatchExpression size;
- ASSERT( size.init( "a", operand[ "$size" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- BSONObj indexKey = BSON( "" << 1 );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- size.matchesIndexKey( indexKey, indexSpec ) );
- }
- */
-
-} // namespace mongo
+using std::unique_ptr;
+
+TEST(ElemMatchObjectMatchExpression, MatchesElementSingle) {
+ BSONObj baseOperand = BSON("b" << 5);
+ BSONObj match = BSON("a" << BSON_ARRAY(BSON("b" << 5.0)));
+ BSONObj notMatch = BSON("a" << BSON_ARRAY(BSON("b" << 6)));
+ unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression());
+ ASSERT(eq->init("b", baseOperand["b"]).isOK());
+ ElemMatchObjectMatchExpression op;
+ ASSERT(op.init("a", eq.release()).isOK());
+ ASSERT(op.matchesSingleElement(match["a"]));
+ ASSERT(!op.matchesSingleElement(notMatch["a"]));
+}
+
+TEST(ElemMatchObjectMatchExpression, MatchesElementArray) {
+ BSONObj baseOperand = BSON("1" << 5);
+ BSONObj match = BSON("a" << BSON_ARRAY(BSON_ARRAY('s' << 5.0)));
+ BSONObj notMatch = BSON("a" << BSON_ARRAY(BSON_ARRAY(5 << 6)));
+ unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression());
+ ASSERT(eq->init("1", baseOperand["1"]).isOK());
+ ElemMatchObjectMatchExpression op;
+ ASSERT(op.init("a", eq.release()).isOK());
+ ASSERT(op.matchesSingleElement(match["a"]));
+ ASSERT(!op.matchesSingleElement(notMatch["a"]));
+}
+
+TEST(ElemMatchObjectMatchExpression, MatchesElementMultiple) {
+ BSONObj baseOperand1 = BSON("b" << 5);
+ BSONObj baseOperand2 = BSON("b" << 6);
+ BSONObj baseOperand3 = BSON("c" << 7);
+ BSONObj notMatch1 = BSON("a" << BSON_ARRAY(BSON("b" << 5 << "c" << 7)));
+ BSONObj notMatch2 = BSON("a" << BSON_ARRAY(BSON("b" << 6 << "c" << 7)));
+ BSONObj notMatch3 = BSON("a" << BSON_ARRAY(BSON("b" << BSON_ARRAY(5 << 6))));
+ BSONObj match = BSON("a" << BSON_ARRAY(BSON("b" << BSON_ARRAY(5 << 6) << "c" << 7)));
+ unique_ptr<ComparisonMatchExpression> eq1(new EqualityMatchExpression());
+ ASSERT(eq1->init("b", baseOperand1["b"]).isOK());
+ unique_ptr<ComparisonMatchExpression> eq2(new EqualityMatchExpression());
+ ASSERT(eq2->init("b", baseOperand2["b"]).isOK());
+ unique_ptr<ComparisonMatchExpression> eq3(new EqualityMatchExpression());
+ ASSERT(eq3->init("c", baseOperand3["c"]).isOK());
+
+ unique_ptr<AndMatchExpression> andOp(new AndMatchExpression());
+ andOp->add(eq1.release());
+ andOp->add(eq2.release());
+ andOp->add(eq3.release());
+
+ ElemMatchObjectMatchExpression op;
+ ASSERT(op.init("a", andOp.release()).isOK());
+ ASSERT(!op.matchesSingleElement(notMatch1["a"]));
+ ASSERT(!op.matchesSingleElement(notMatch2["a"]));
+ ASSERT(!op.matchesSingleElement(notMatch3["a"]));
+ ASSERT(op.matchesSingleElement(match["a"]));
+}
+
+TEST(ElemMatchObjectMatchExpression, MatchesNonArray) {
+ BSONObj baseOperand = BSON("b" << 5);
+ unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression());
+ ASSERT(eq->init("b", baseOperand["b"]).isOK());
+ ElemMatchObjectMatchExpression op;
+ ASSERT(op.init("a", eq.release()).isOK());
+ // Directly nested objects are not matched with $elemMatch. An intervening array is
+ // required.
+ ASSERT(!op.matchesBSON(BSON("a" << BSON("b" << 5)), NULL));
+ ASSERT(!op.matchesBSON(BSON("a" << BSON("0" << (BSON("b" << 5)))), NULL));
+ ASSERT(!op.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(ElemMatchObjectMatchExpression, MatchesArrayObject) {
+ BSONObj baseOperand = BSON("b" << 5);
+ unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression());
+ ASSERT(eq->init("b", baseOperand["b"]).isOK());
+ ElemMatchObjectMatchExpression op;
+ ASSERT(op.init("a", eq.release()).isOK());
+ ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 5))), NULL));
+ ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(4 << BSON("b" << 5))), NULL));
+ ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSONObj() << BSON("b" << 5))), NULL));
+ ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 6) << BSON("b" << 5))), NULL));
+}
+
+TEST(ElemMatchObjectMatchExpression, MatchesMultipleNamedValues) {
+ BSONObj baseOperand = BSON("c" << 5);
+ unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression());
+ ASSERT(eq->init("c", baseOperand["c"]).isOK());
+ ElemMatchObjectMatchExpression op;
+ ASSERT(op.init("a.b", eq.release()).isOK());
+ ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSON_ARRAY(BSON("c" << 5))))), NULL));
+ ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSON_ARRAY(BSON("c" << 1)))
+ << BSON("b" << BSON_ARRAY(BSON("c" << 5))))),
+ NULL));
+}
+
+TEST(ElemMatchObjectMatchExpression, ElemMatchKey) {
+ BSONObj baseOperand = BSON("c" << 6);
+ unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression());
+ ASSERT(eq->init("c", baseOperand["c"]).isOK());
+ ElemMatchObjectMatchExpression op;
+ ASSERT(op.init("a.b", eq.release()).isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!op.matchesBSON(BSONObj(), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(!op.matchesBSON(BSON("a" << BSON("b" << BSON_ARRAY(BSON("c" << 7)))), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(op.matchesBSON(BSON("a" << BSON("b" << BSON_ARRAY(3 << BSON("c" << 6)))), &details));
+ ASSERT(details.hasElemMatchKey());
+ // The entry within the $elemMatch array is reported.
+ ASSERT_EQUALS("1", details.elemMatchKey());
+ ASSERT(op.matchesBSON(
+ BSON("a" << BSON_ARRAY(1 << 2 << BSON("b" << BSON_ARRAY(3 << 5 << BSON("c" << 6))))),
+ &details));
+ ASSERT(details.hasElemMatchKey());
+ // The entry within a parent of the $elemMatch array is reported.
+ ASSERT_EQUALS("2", details.elemMatchKey());
+}
+
+/**
+TEST( ElemMatchObjectMatchExpression, MatchesIndexKey ) {
+ BSONObj baseOperand = BSON( "b" << 5 );
+ unique_ptr<ComparisonMatchExpression> eq( new ComparisonMatchExpression() );
+ ASSERT( eq->init( "b", baseOperand[ "b" ] ).isOK() );
+ ElemMatchObjectMatchExpression op;
+ ASSERT( op.init( "a", eq.release() ).isOK() );
+ IndexSpec indexSpec( BSON( "a.b" << 1 ) );
+ BSONObj indexKey = BSON( "" << "5" );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ op.matchesIndexKey( indexKey, indexSpec ) );
+}
+*/
+
+TEST(ElemMatchValueMatchExpression, MatchesElementSingle) {
+ BSONObj baseOperand = BSON("$gt" << 5);
+ BSONObj match = BSON("a" << BSON_ARRAY(6));
+ BSONObj notMatch = BSON("a" << BSON_ARRAY(4));
+ unique_ptr<ComparisonMatchExpression> gt(new GTMatchExpression());
+ ASSERT(gt->init("", baseOperand["$gt"]).isOK());
+ ElemMatchValueMatchExpression op;
+ ASSERT(op.init("a", gt.release()).isOK());
+ ASSERT(op.matchesSingleElement(match["a"]));
+ ASSERT(!op.matchesSingleElement(notMatch["a"]));
+}
+
+TEST(ElemMatchValueMatchExpression, MatchesElementMultiple) {
+ BSONObj baseOperand1 = BSON("$gt" << 1);
+ BSONObj baseOperand2 = BSON("$lt" << 10);
+ BSONObj notMatch1 = BSON("a" << BSON_ARRAY(0 << 1));
+ BSONObj notMatch2 = BSON("a" << BSON_ARRAY(10 << 11));
+ BSONObj match = BSON("a" << BSON_ARRAY(0 << 5 << 11));
+ unique_ptr<ComparisonMatchExpression> gt(new GTMatchExpression());
+ ASSERT(gt->init("", baseOperand1["$gt"]).isOK());
+ unique_ptr<ComparisonMatchExpression> lt(new LTMatchExpression());
+ ASSERT(lt->init("", baseOperand2["$lt"]).isOK());
+
+ ElemMatchValueMatchExpression op;
+ ASSERT(op.init("a").isOK());
+ op.add(gt.release());
+ op.add(lt.release());
+
+ ASSERT(!op.matchesSingleElement(notMatch1["a"]));
+ ASSERT(!op.matchesSingleElement(notMatch2["a"]));
+ ASSERT(op.matchesSingleElement(match["a"]));
+}
+
+TEST(ElemMatchValueMatchExpression, MatchesNonArray) {
+ BSONObj baseOperand = BSON("$gt" << 5);
+ unique_ptr<ComparisonMatchExpression> gt(new GTMatchExpression());
+ ASSERT(gt->init("", baseOperand["$gt"]).isOK());
+ ElemMatchObjectMatchExpression op;
+ ASSERT(op.init("a", gt.release()).isOK());
+ // Directly nested objects are not matched with $elemMatch. An intervening array is
+ // required.
+ ASSERT(!op.matchesBSON(BSON("a" << 6), NULL));
+ ASSERT(!op.matchesBSON(BSON("a" << BSON("0" << 6)), NULL));
+}
+
+TEST(ElemMatchValueMatchExpression, MatchesArrayScalar) {
+ BSONObj baseOperand = BSON("$gt" << 5);
+ unique_ptr<ComparisonMatchExpression> gt(new GTMatchExpression());
+ ASSERT(gt->init("", baseOperand["$gt"]).isOK());
+ ElemMatchValueMatchExpression op;
+ ASSERT(op.init("a", gt.release()).isOK());
+ ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL));
+ ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(4 << 6)), NULL));
+ ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSONObj() << 7)), NULL));
+}
+
+TEST(ElemMatchValueMatchExpression, MatchesMultipleNamedValues) {
+ BSONObj baseOperand = BSON("$gt" << 5);
+ unique_ptr<ComparisonMatchExpression> gt(new GTMatchExpression());
+ ASSERT(gt->init("", baseOperand["$gt"]).isOK());
+ ElemMatchValueMatchExpression op;
+ ASSERT(op.init("a.b", gt.release()).isOK());
+ ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSON_ARRAY(6)))), NULL));
+ ASSERT(op.matchesBSON(
+ BSON("a" << BSON_ARRAY(BSON("b" << BSON_ARRAY(4)) << BSON("b" << BSON_ARRAY(4 << 6)))),
+ NULL));
+}
+
+TEST(ElemMatchValueMatchExpression, ElemMatchKey) {
+ BSONObj baseOperand = BSON("$gt" << 6);
+ unique_ptr<ComparisonMatchExpression> gt(new GTMatchExpression());
+ ASSERT(gt->init("", baseOperand["$gt"]).isOK());
+ ElemMatchValueMatchExpression op;
+ ASSERT(op.init("a.b", gt.release()).isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!op.matchesBSON(BSONObj(), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(!op.matchesBSON(BSON("a" << BSON("b" << BSON_ARRAY(2))), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(op.matchesBSON(BSON("a" << BSON("b" << BSON_ARRAY(3 << 7))), &details));
+ ASSERT(details.hasElemMatchKey());
+ // The entry within the $elemMatch array is reported.
+ ASSERT_EQUALS("1", details.elemMatchKey());
+ ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2 << BSON("b" << BSON_ARRAY(3 << 7)))),
+ &details));
+ ASSERT(details.hasElemMatchKey());
+ // The entry within a parent of the $elemMatch array is reported.
+ ASSERT_EQUALS("2", details.elemMatchKey());
+}
+
+/**
+TEST( ElemMatchValueMatchExpression, MatchesIndexKey ) {
+ BSONObj baseOperand = BSON( "$lt" << 5 );
+ unique_ptr<LtOp> lt( new ComparisonMatchExpression() );
+ ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
+ ElemMatchValueMatchExpression op;
+ ASSERT( op.init( "a", lt.release() ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ BSONObj indexKey = BSON( "" << "3" );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ op.matchesIndexKey( indexKey, indexSpec ) );
+}
+*/
+
+TEST(AndOfElemMatch, MatchesElement) {
+ BSONObj baseOperanda1 = BSON("a" << 1);
+ unique_ptr<ComparisonMatchExpression> eqa1(new EqualityMatchExpression());
+ ASSERT(eqa1->init("a", baseOperanda1["a"]).isOK());
+
+ BSONObj baseOperandb1 = BSON("b" << 1);
+ unique_ptr<ComparisonMatchExpression> eqb1(new EqualityMatchExpression());
+ ASSERT(eqb1->init("b", baseOperandb1["b"]).isOK());
+
+ unique_ptr<AndMatchExpression> and1(new AndMatchExpression());
+ and1->add(eqa1.release());
+ and1->add(eqb1.release());
+ // and1 = { a : 1, b : 1 }
+
+ unique_ptr<ElemMatchObjectMatchExpression> elemMatch1(new ElemMatchObjectMatchExpression());
+ elemMatch1->init("x", and1.release());
+ // elemMatch1 = { x : { $elemMatch : { a : 1, b : 1 } } }
+
+ BSONObj baseOperanda2 = BSON("a" << 2);
+ unique_ptr<ComparisonMatchExpression> eqa2(new EqualityMatchExpression());
+ ASSERT(eqa2->init("a", baseOperanda2["a"]).isOK());
+
+ BSONObj baseOperandb2 = BSON("b" << 2);
+ unique_ptr<ComparisonMatchExpression> eqb2(new EqualityMatchExpression());
+ ASSERT(eqb2->init("b", baseOperandb2["b"]).isOK());
+
+ unique_ptr<AndMatchExpression> and2(new AndMatchExpression());
+ and2->add(eqa2.release());
+ and2->add(eqb2.release());
+ // and2 = { a : 2, b : 2 }
+
+ unique_ptr<ElemMatchObjectMatchExpression> elemMatch2(new ElemMatchObjectMatchExpression());
+ elemMatch2->init("x", and2.release());
+ // elemMatch2 = { x : { $elemMatch : { a : 2, b : 2 } } }
+
+ unique_ptr<AndMatchExpression> andOfEM(new AndMatchExpression());
+ andOfEM->add(elemMatch1.release());
+ andOfEM->add(elemMatch2.release());
+
+ BSONObj nonArray = BSON("x" << 4);
+ ASSERT(!andOfEM->matchesSingleElement(nonArray["x"]));
+ BSONObj emptyArray = BSON("x" << BSONArray());
+ ASSERT(!andOfEM->matchesSingleElement(emptyArray["x"]));
+ BSONObj nonObjArray = BSON("x" << BSON_ARRAY(4));
+ ASSERT(!andOfEM->matchesSingleElement(nonObjArray["x"]));
+ BSONObj singleObjMatch = BSON("x" << BSON_ARRAY(BSON("a" << 1 << "b" << 1)));
+ ASSERT(!andOfEM->matchesSingleElement(singleObjMatch["x"]));
+ BSONObj otherObjMatch = BSON("x" << BSON_ARRAY(BSON("a" << 2 << "b" << 2)));
+ ASSERT(!andOfEM->matchesSingleElement(otherObjMatch["x"]));
+ BSONObj bothObjMatch =
+ BSON("x" << BSON_ARRAY(BSON("a" << 1 << "b" << 1) << BSON("a" << 2 << "b" << 2)));
+ ASSERT(andOfEM->matchesSingleElement(bothObjMatch["x"]));
+ BSONObj noObjMatch =
+ BSON("x" << BSON_ARRAY(BSON("a" << 1 << "b" << 2) << BSON("a" << 2 << "b" << 1)));
+ ASSERT(!andOfEM->matchesSingleElement(noObjMatch["x"]));
+}
+
+TEST(AndOfElemMatch, Matches) {
+ BSONObj baseOperandgt1 = BSON("$gt" << 1);
+ unique_ptr<ComparisonMatchExpression> gt1(new GTMatchExpression());
+ ASSERT(gt1->init("", baseOperandgt1["$gt"]).isOK());
+
+ BSONObj baseOperandlt1 = BSON("$lt" << 10);
+ unique_ptr<ComparisonMatchExpression> lt1(new LTMatchExpression());
+ ASSERT(lt1->init("", baseOperandlt1["$lt"]).isOK());
+
+ unique_ptr<ElemMatchValueMatchExpression> elemMatch1(new ElemMatchValueMatchExpression());
+ elemMatch1->init("x");
+ elemMatch1->add(gt1.release());
+ elemMatch1->add(lt1.release());
+ // elemMatch1 = { x : { $elemMatch : { $gt : 1 , $lt : 10 } } }
+
+ BSONObj baseOperandgt2 = BSON("$gt" << 101);
+ unique_ptr<ComparisonMatchExpression> gt2(new GTMatchExpression());
+ ASSERT(gt2->init("", baseOperandgt2["$gt"]).isOK());
+
+ BSONObj baseOperandlt2 = BSON("$lt" << 110);
+ unique_ptr<ComparisonMatchExpression> lt2(new LTMatchExpression());
+ ASSERT(lt2->init("", baseOperandlt2["$lt"]).isOK());
+
+ unique_ptr<ElemMatchValueMatchExpression> elemMatch2(new ElemMatchValueMatchExpression());
+ elemMatch2->init("x");
+ elemMatch2->add(gt2.release());
+ elemMatch2->add(lt2.release());
+ // elemMatch2 = { x : { $elemMatch : { $gt : 101 , $lt : 110 } } }
+
+ unique_ptr<AndMatchExpression> andOfEM(new AndMatchExpression());
+ andOfEM->add(elemMatch1.release());
+ andOfEM->add(elemMatch2.release());
+
+ BSONObj nonArray = BSON("x" << 4);
+ ASSERT(!andOfEM->matchesBSON(nonArray, NULL));
+ BSONObj emptyArray = BSON("x" << BSONArray());
+ ASSERT(!andOfEM->matchesBSON(emptyArray, NULL));
+ BSONObj nonNumberArray = BSON("x" << BSON_ARRAY("q"));
+ ASSERT(!andOfEM->matchesBSON(nonNumberArray, NULL));
+ BSONObj singleMatch = BSON("x" << BSON_ARRAY(5));
+ ASSERT(!andOfEM->matchesBSON(singleMatch, NULL));
+ BSONObj otherMatch = BSON("x" << BSON_ARRAY(105));
+ ASSERT(!andOfEM->matchesBSON(otherMatch, NULL));
+ BSONObj bothMatch = BSON("x" << BSON_ARRAY(5 << 105));
+ ASSERT(andOfEM->matchesBSON(bothMatch, NULL));
+ BSONObj neitherMatch = BSON("x" << BSON_ARRAY(0 << 200));
+ ASSERT(!andOfEM->matchesBSON(neitherMatch, NULL));
+}
+
+TEST(SizeMatchExpression, MatchesElement) {
+ BSONObj match = BSON("a" << BSON_ARRAY(5 << 6));
+ BSONObj notMatch = BSON("a" << BSON_ARRAY(5));
+ SizeMatchExpression size;
+ ASSERT(size.init("", 2).isOK());
+ ASSERT(size.matchesSingleElement(match.firstElement()));
+ ASSERT(!size.matchesSingleElement(notMatch.firstElement()));
+}
+
+TEST(SizeMatchExpression, MatchesNonArray) {
+ // Non arrays do not match.
+ BSONObj stringValue = BSON("a"
+ << "z");
+ BSONObj numberValue = BSON("a" << 0);
+ BSONObj arrayValue = BSON("a" << BSONArray());
+ SizeMatchExpression size;
+ ASSERT(size.init("", 0).isOK());
+ ASSERT(!size.matchesSingleElement(stringValue.firstElement()));
+ ASSERT(!size.matchesSingleElement(numberValue.firstElement()));
+ ASSERT(size.matchesSingleElement(arrayValue.firstElement()));
+}
+
+TEST(SizeMatchExpression, MatchesArray) {
+ SizeMatchExpression size;
+ ASSERT(size.init("a", 2).isOK());
+ ASSERT(size.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5)), NULL));
+ // Arrays are not unwound to look for matching subarrays.
+ ASSERT(!size.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5 << BSON_ARRAY(1 << 2))), NULL));
+}
+
+TEST(SizeMatchExpression, MatchesNestedArray) {
+ SizeMatchExpression size;
+ ASSERT(size.init("a.2", 2).isOK());
+ // A numerically referenced nested array is matched.
+ ASSERT(size.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5 << BSON_ARRAY(1 << 2))), NULL));
+}
+
+TEST(SizeMatchExpression, ElemMatchKey) {
+ SizeMatchExpression size;
+ ASSERT(size.init("a.b", 3).isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!size.matchesBSON(BSON("a" << 1), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(size.matchesBSON(BSON("a" << BSON("b" << BSON_ARRAY(1 << 2 << 3))), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(size.matchesBSON(BSON("a" << BSON_ARRAY(2 << BSON("b" << BSON_ARRAY(1 << 2 << 3)))),
+ &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("1", details.elemMatchKey());
+}
+
+TEST(SizeMatchExpression, Equivalent) {
+ SizeMatchExpression e1;
+ SizeMatchExpression e2;
+ SizeMatchExpression e3;
+
+ e1.init("a", 5);
+ e2.init("a", 6);
+ e3.init("v", 5);
+
+ ASSERT(e1.equivalent(&e1));
+ ASSERT(!e1.equivalent(&e2));
+ ASSERT(!e1.equivalent(&e3));
+}
+
+/**
+ TEST( SizeMatchExpression, MatchesIndexKey ) {
+ BSONObj operand = BSON( "$size" << 4 );
+ SizeMatchExpression size;
+ ASSERT( size.init( "a", operand[ "$size" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ BSONObj indexKey = BSON( "" << 1 );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ size.matchesIndexKey( indexKey, indexSpec ) );
+ }
+*/
+
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp
index d61dba70343..1cca795b743 100644
--- a/src/mongo/db/matcher/expression_geo.cpp
+++ b/src/mongo/db/matcher/expression_geo.cpp
@@ -39,404 +39,406 @@
namespace mongo {
- using mongoutils::str::equals;
+using mongoutils::str::equals;
+
+//
+// GeoExpression
+//
+
+// Put simple constructors here for unique_ptr.
+GeoExpression::GeoExpression() : field(""), predicate(INVALID) {}
+GeoExpression::GeoExpression(const std::string& f) : field(f), predicate(INVALID) {}
+
+Status GeoExpression::parseQuery(const BSONObj& obj) {
+ BSONObjIterator outerIt(obj);
+ // "within" / "geoWithin" / "geoIntersects"
+ BSONElement queryElt = outerIt.next();
+ if (outerIt.more()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "can't parse extra field: " << outerIt.next());
+ }
- //
- // GeoExpression
- //
+ BSONObj::MatchType matchType = static_cast<BSONObj::MatchType>(queryElt.getGtLtOp());
+ if (BSONObj::opGEO_INTERSECTS == matchType) {
+ predicate = GeoExpression::INTERSECT;
+ } else if (BSONObj::opWITHIN == matchType) {
+ predicate = GeoExpression::WITHIN;
+ } else {
+ // eoo() or unknown query predicate.
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "invalid geo query predicate: " << obj);
+ }
- // Put simple constructors here for unique_ptr.
- GeoExpression::GeoExpression() : field(""), predicate(INVALID) {}
- GeoExpression::GeoExpression(const std::string& f) : field(f), predicate(INVALID) {}
+ // Parse geometry after predicates.
+ if (Object != queryElt.type())
+ return Status(ErrorCodes::BadValue, "geometry must be an object");
+ BSONObj geoObj = queryElt.Obj();
- Status GeoExpression::parseQuery(const BSONObj &obj) {
- BSONObjIterator outerIt(obj);
- // "within" / "geoWithin" / "geoIntersects"
- BSONElement queryElt = outerIt.next();
- if (outerIt.more()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "can't parse extra field: " << outerIt.next());
- }
+ BSONObjIterator geoIt(geoObj);
- BSONObj::MatchType matchType = static_cast<BSONObj::MatchType>(queryElt.getGtLtOp());
- if (BSONObj::opGEO_INTERSECTS == matchType) {
- predicate = GeoExpression::INTERSECT;
- } else if (BSONObj::opWITHIN == matchType) {
- predicate = GeoExpression::WITHIN;
+ while (geoIt.more()) {
+ BSONElement elt = geoIt.next();
+ if (str::equals(elt.fieldName(), "$uniqueDocs")) {
+ // Deprecated "$uniqueDocs" field
+ warning() << "deprecated $uniqueDocs option: " << obj.toString() << endl;
} else {
- // eoo() or unknown query predicate.
- return Status(ErrorCodes::BadValue,
- str::stream() << "invalid geo query predicate: " << obj);
+ // The element must be a geo specifier. "$box", "$center", "$geometry", etc.
+ geoContainer.reset(new GeometryContainer());
+ Status status = geoContainer->parseFromQuery(elt);
+ if (!status.isOK())
+ return status;
}
+ }
- // Parse geometry after predicates.
- if (Object != queryElt.type()) return Status(ErrorCodes::BadValue, "geometry must be an object");
- BSONObj geoObj = queryElt.Obj();
-
- BSONObjIterator geoIt(geoObj);
-
- while (geoIt.more()) {
- BSONElement elt = geoIt.next();
- if (str::equals(elt.fieldName(), "$uniqueDocs")) {
- // Deprecated "$uniqueDocs" field
- warning() << "deprecated $uniqueDocs option: " << obj.toString() << endl;
- } else {
- // The element must be a geo specifier. "$box", "$center", "$geometry", etc.
- geoContainer.reset(new GeometryContainer());
- Status status = geoContainer->parseFromQuery(elt);
- if (!status.isOK()) return status;
- }
- }
+ if (geoContainer == NULL) {
+ return Status(ErrorCodes::BadValue, "geo query doesn't have any geometry");
+ }
- if (geoContainer == NULL) {
- return Status(ErrorCodes::BadValue, "geo query doesn't have any geometry");
- }
+ return Status::OK();
+}
- return Status::OK();
- }
+Status GeoExpression::parseFrom(const BSONObj& obj) {
+ // Initialize geoContainer and parse BSON object
+ Status status = parseQuery(obj);
+ if (!status.isOK())
+ return status;
- Status GeoExpression::parseFrom(const BSONObj &obj) {
- // Initialize geoContainer and parse BSON object
- Status status = parseQuery(obj);
- if (!status.isOK()) return status;
-
- // Why do we only deal with $within {polygon}?
- // 1. Finding things within a point is silly and only valid
- // for points and degenerate lines/polys.
- //
- // 2. Finding points within a line is easy but that's called intersect.
- // Finding lines within a line is kind of tricky given what S2 gives us.
- // Doing line-within-line is a valid yet unsupported feature,
- // though I wonder if we want to preserve orientation for lines or
- // allow (a,b),(c,d) to be within (c,d),(a,b). Anyway, punt on
- // this for now.
- if (GeoExpression::WITHIN == predicate && !geoContainer->supportsContains()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "$within not supported with provided geometry: " << obj);
- }
+ // Why do we only deal with $within {polygon}?
+ // 1. Finding things within a point is silly and only valid
+ // for points and degenerate lines/polys.
+ //
+ // 2. Finding points within a line is easy but that's called intersect.
+ // Finding lines within a line is kind of tricky given what S2 gives us.
+ // Doing line-within-line is a valid yet unsupported feature,
+ // though I wonder if we want to preserve orientation for lines or
+ // allow (a,b),(c,d) to be within (c,d),(a,b). Anyway, punt on
+ // this for now.
+ if (GeoExpression::WITHIN == predicate && !geoContainer->supportsContains()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "$within not supported with provided geometry: " << obj);
+ }
- // Big polygon with strict winding order is represented as an S2Loop in SPHERE CRS.
- // So converting the query to SPHERE CRS makes things easier than projecting all the data
- // into STRICT_SPHERE CRS.
- if (STRICT_SPHERE == geoContainer->getNativeCRS()) {
- if (!geoContainer->supportsProject(SPHERE)) {
- return Status(ErrorCodes::BadValue,
- "only polygon supported with strict winding order");
- }
- geoContainer->projectInto(SPHERE);
+ // Big polygon with strict winding order is represented as an S2Loop in SPHERE CRS.
+ // So converting the query to SPHERE CRS makes things easier than projecting all the data
+ // into STRICT_SPHERE CRS.
+ if (STRICT_SPHERE == geoContainer->getNativeCRS()) {
+ if (!geoContainer->supportsProject(SPHERE)) {
+ return Status(ErrorCodes::BadValue, "only polygon supported with strict winding order");
}
+ geoContainer->projectInto(SPHERE);
+ }
- // $geoIntersect queries are hardcoded to *always* be in SPHERE CRS
- // TODO: This is probably bad semantics, should not do this
- if (GeoExpression::INTERSECT == predicate) {
- if (!geoContainer->supportsProject(SPHERE)) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "$geoIntersect not supported with provided geometry: "
- << obj);
- }
- geoContainer->projectInto(SPHERE);
+ // $geoIntersect queries are hardcoded to *always* be in SPHERE CRS
+ // TODO: This is probably bad semantics, should not do this
+ if (GeoExpression::INTERSECT == predicate) {
+ if (!geoContainer->supportsProject(SPHERE)) {
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "$geoIntersect not supported with provided geometry: " << obj);
}
-
- return Status::OK();
+ geoContainer->projectInto(SPHERE);
}
- //
- // GeoNearExpression
- //
+ return Status::OK();
+}
- GeoNearExpression::GeoNearExpression()
- : minDistance(0),
- maxDistance(std::numeric_limits<double>::max()),
- isNearSphere(false),
- unitsAreRadians(false),
- isWrappingQuery(false) { }
-
- GeoNearExpression::GeoNearExpression(const std::string& f)
- : field(f),
- minDistance(0),
- maxDistance(std::numeric_limits<double>::max()),
- isNearSphere(false),
- unitsAreRadians(false),
- isWrappingQuery(false) { }
-
- bool GeoNearExpression::parseLegacyQuery(const BSONObj &obj) {
-
- bool hasGeometry = false;
-
- // First, try legacy near, e.g.:
- // t.find({ loc : { $nearSphere: [0,0], $minDistance: 1, $maxDistance: 3 }})
- // t.find({ loc : { $nearSphere: [0,0] }})
- // t.find({ loc : { $near : [0, 0, 1] } });
- // t.find({ loc : { $near: { someGeoJSONPoint}})
- // t.find({ loc : { $geoNear: { someGeoJSONPoint}})
- BSONObjIterator it(obj);
- while (it.more()) {
- BSONElement e = it.next();
- if (equals(e.fieldName(), "$near") || equals(e.fieldName(), "$geoNear")
- || equals(e.fieldName(), "$nearSphere")) {
- if (!e.isABSONObj()) { return false; }
- BSONObj embeddedObj = e.embeddedObject();
+//
+// GeoNearExpression
+//
+
+GeoNearExpression::GeoNearExpression()
+ : minDistance(0),
+ maxDistance(std::numeric_limits<double>::max()),
+ isNearSphere(false),
+ unitsAreRadians(false),
+ isWrappingQuery(false) {}
+
+GeoNearExpression::GeoNearExpression(const std::string& f)
+ : field(f),
+ minDistance(0),
+ maxDistance(std::numeric_limits<double>::max()),
+ isNearSphere(false),
+ unitsAreRadians(false),
+ isWrappingQuery(false) {}
+
+bool GeoNearExpression::parseLegacyQuery(const BSONObj& obj) {
+ bool hasGeometry = false;
+
+ // First, try legacy near, e.g.:
+ // t.find({ loc : { $nearSphere: [0,0], $minDistance: 1, $maxDistance: 3 }})
+ // t.find({ loc : { $nearSphere: [0,0] }})
+ // t.find({ loc : { $near : [0, 0, 1] } });
+ // t.find({ loc : { $near: { someGeoJSONPoint}})
+ // t.find({ loc : { $geoNear: { someGeoJSONPoint}})
+ BSONObjIterator it(obj);
+ while (it.more()) {
+ BSONElement e = it.next();
+ if (equals(e.fieldName(), "$near") || equals(e.fieldName(), "$geoNear") ||
+ equals(e.fieldName(), "$nearSphere")) {
+ if (!e.isABSONObj()) {
+ return false;
+ }
+ BSONObj embeddedObj = e.embeddedObject();
- if (GeoParser::parseQueryPoint(e, centroid.get()).isOK()
- || GeoParser::parsePointWithMaxDistance(embeddedObj, centroid.get(), &maxDistance)) {
- uassert(18522, "max distance must be non-negative", maxDistance >= 0.0);
- hasGeometry = true;
- isNearSphere = equals(e.fieldName(), "$nearSphere");
- }
- } else if (equals(e.fieldName(), "$minDistance")) {
- uassert(16893, "$minDistance must be a number", e.isNumber());
- minDistance = e.Number();
- uassert(16894, "$minDistance must be non-negative", minDistance >= 0.0);
- } else if (equals(e.fieldName(), "$maxDistance")) {
- uassert(16895, "$maxDistance must be a number", e.isNumber());
- maxDistance = e.Number();
- uassert(16896, "$maxDistance must be non-negative", maxDistance >= 0.0);
- } else if (equals(e.fieldName(), "$uniqueDocs")) {
- warning() << "ignoring deprecated option $uniqueDocs";
+ if (GeoParser::parseQueryPoint(e, centroid.get()).isOK() ||
+ GeoParser::parsePointWithMaxDistance(embeddedObj, centroid.get(), &maxDistance)) {
+ uassert(18522, "max distance must be non-negative", maxDistance >= 0.0);
+ hasGeometry = true;
+ isNearSphere = equals(e.fieldName(), "$nearSphere");
}
+ } else if (equals(e.fieldName(), "$minDistance")) {
+ uassert(16893, "$minDistance must be a number", e.isNumber());
+ minDistance = e.Number();
+ uassert(16894, "$minDistance must be non-negative", minDistance >= 0.0);
+ } else if (equals(e.fieldName(), "$maxDistance")) {
+ uassert(16895, "$maxDistance must be a number", e.isNumber());
+ maxDistance = e.Number();
+ uassert(16896, "$maxDistance must be non-negative", maxDistance >= 0.0);
+ } else if (equals(e.fieldName(), "$uniqueDocs")) {
+ warning() << "ignoring deprecated option $uniqueDocs";
}
-
- return hasGeometry;
}
- Status GeoNearExpression::parseNewQuery(const BSONObj &obj) {
- bool hasGeometry = false;
+ return hasGeometry;
+}
- BSONObjIterator objIt(obj);
- if (!objIt.more()) {
- return Status(ErrorCodes::BadValue, "empty geo near query object");
- }
- BSONElement e = objIt.next();
- // Just one arg. to $geoNear.
- if (objIt.more()) {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "geo near accepts just one argument when querying for a GeoJSON " <<
- "point. Extra field found: " << objIt.next());
- }
+Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
+ bool hasGeometry = false;
- // Parse "new" near:
- // t.find({"geo" : {"$near" : {"$geometry": pointA, $minDistance: 1, $maxDistance: 3}}})
- // t.find({"geo" : {"$geoNear" : {"$geometry": pointA, $minDistance: 1, $maxDistance: 3}}})
- if (!e.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "geo near query argument is not an object");
- }
- BSONObj::MatchType matchType = static_cast<BSONObj::MatchType>(e.getGtLtOp());
- if (BSONObj::opNEAR != matchType) {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "invalid geo near query operator: " << e.fieldName());
- }
+ BSONObjIterator objIt(obj);
+ if (!objIt.more()) {
+ return Status(ErrorCodes::BadValue, "empty geo near query object");
+ }
+ BSONElement e = objIt.next();
+ // Just one arg. to $geoNear.
+ if (objIt.more()) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "geo near accepts just one argument when querying for a GeoJSON "
+ << "point. Extra field found: " << objIt.next());
+ }
- // Iterate over the argument.
- BSONObjIterator it(e.embeddedObject());
- while (it.more()) {
- BSONElement e = it.next();
- if (equals(e.fieldName(), "$geometry")) {
- if (e.isABSONObj()) {
- BSONObj embeddedObj = e.embeddedObject();
- Status status = GeoParser::parseQueryPoint(e, centroid.get());
- if (!status.isOK()) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "invalid point in geo near query $geometry argument: "
- << embeddedObj << " " << status.reason());
- }
- uassert(16681, "$near requires geojson point, given " + embeddedObj.toString(),
- (SPHERE == centroid->crs));
- hasGeometry = true;
+ // Parse "new" near:
+ // t.find({"geo" : {"$near" : {"$geometry": pointA, $minDistance: 1, $maxDistance: 3}}})
+ // t.find({"geo" : {"$geoNear" : {"$geometry": pointA, $minDistance: 1, $maxDistance: 3}}})
+ if (!e.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "geo near query argument is not an object");
+ }
+ BSONObj::MatchType matchType = static_cast<BSONObj::MatchType>(e.getGtLtOp());
+ if (BSONObj::opNEAR != matchType) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "invalid geo near query operator: " << e.fieldName());
+ }
+
+ // Iterate over the argument.
+ BSONObjIterator it(e.embeddedObject());
+ while (it.more()) {
+ BSONElement e = it.next();
+ if (equals(e.fieldName(), "$geometry")) {
+ if (e.isABSONObj()) {
+ BSONObj embeddedObj = e.embeddedObject();
+ Status status = GeoParser::parseQueryPoint(e, centroid.get());
+ if (!status.isOK()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "invalid point in geo near query $geometry argument: "
+ << embeddedObj << " " << status.reason());
}
- } else if (equals(e.fieldName(), "$minDistance")) {
- uassert(16897, "$minDistance must be a number", e.isNumber());
- minDistance = e.Number();
- uassert(16898, "$minDistance must be non-negative", minDistance >= 0.0);
- } else if (equals(e.fieldName(), "$maxDistance")) {
- uassert(16899, "$maxDistance must be a number", e.isNumber());
- maxDistance = e.Number();
- uassert(16900, "$maxDistance must be non-negative", maxDistance >= 0.0);
+ uassert(16681,
+ "$near requires geojson point, given " + embeddedObj.toString(),
+ (SPHERE == centroid->crs));
+ hasGeometry = true;
}
+ } else if (equals(e.fieldName(), "$minDistance")) {
+ uassert(16897, "$minDistance must be a number", e.isNumber());
+ minDistance = e.Number();
+ uassert(16898, "$minDistance must be non-negative", minDistance >= 0.0);
+ } else if (equals(e.fieldName(), "$maxDistance")) {
+ uassert(16899, "$maxDistance must be a number", e.isNumber());
+ maxDistance = e.Number();
+ uassert(16900, "$maxDistance must be non-negative", maxDistance >= 0.0);
}
-
- if (!hasGeometry) {
- return Status(ErrorCodes::BadValue, "$geometry is required for geo near query");
- }
-
- return Status::OK();
}
+ if (!hasGeometry) {
+ return Status(ErrorCodes::BadValue, "$geometry is required for geo near query");
+ }
- Status GeoNearExpression::parseFrom(const BSONObj &obj) {
-
- Status status = Status::OK();
- centroid.reset(new PointWithCRS());
-
- if (!parseLegacyQuery(obj)) {
- // Clear out any half-baked data.
- minDistance = 0;
- isNearSphere = false;
- maxDistance = std::numeric_limits<double>::max();
- // ...and try parsing new format.
- status = parseNewQuery(obj);
- }
-
- if (!status.isOK())
- return status;
-
- // Fixup the near query for anonoyances caused by $nearSphere
- if (isNearSphere) {
+ return Status::OK();
+}
- // The user-provided point can be flat for a spherical query - needs to be projectable
- uassert(17444,
- "Legacy point is out of bounds for spherical query",
- ShapeProjection::supportsProject(*centroid, SPHERE));
- unitsAreRadians = SPHERE != centroid->crs;
- // GeoJSON points imply wrapping queries
- isWrappingQuery = SPHERE == centroid->crs;
+Status GeoNearExpression::parseFrom(const BSONObj& obj) {
+ Status status = Status::OK();
+ centroid.reset(new PointWithCRS());
- // Project the point to a spherical CRS now that we've got the settings we need
- // We need to manually project here since we aren't using GeometryContainer
- ShapeProjection::projectInto(centroid.get(), SPHERE);
- }
- else {
- unitsAreRadians = false;
- isWrappingQuery = SPHERE == centroid->crs;
- }
+ if (!parseLegacyQuery(obj)) {
+ // Clear out any half-baked data.
+ minDistance = 0;
+ isNearSphere = false;
+ maxDistance = std::numeric_limits<double>::max();
+ // ...and try parsing new format.
+ status = parseNewQuery(obj);
+ }
+ if (!status.isOK())
return status;
+
+ // Fixup the near query for anonoyances caused by $nearSphere
+ if (isNearSphere) {
+ // The user-provided point can be flat for a spherical query - needs to be projectable
+ uassert(17444,
+ "Legacy point is out of bounds for spherical query",
+ ShapeProjection::supportsProject(*centroid, SPHERE));
+
+ unitsAreRadians = SPHERE != centroid->crs;
+ // GeoJSON points imply wrapping queries
+ isWrappingQuery = SPHERE == centroid->crs;
+
+ // Project the point to a spherical CRS now that we've got the settings we need
+ // We need to manually project here since we aren't using GeometryContainer
+ ShapeProjection::projectInto(centroid.get(), SPHERE);
+ } else {
+ unitsAreRadians = false;
+ isWrappingQuery = SPHERE == centroid->crs;
}
- //
- // GeoMatchExpression and GeoNearMatchExpression
- //
+ return status;
+}
- //
- // Geo queries we don't need an index to answer: geoWithin and geoIntersects
- //
+//
+// GeoMatchExpression and GeoNearMatchExpression
+//
- Status GeoMatchExpression::init( StringData path, const GeoExpression* query,
- const BSONObj& rawObj ) {
- _query.reset(query);
- _rawObj = rawObj;
- return initPath( path );
- }
+//
+// Geo queries we don't need an index to answer: geoWithin and geoIntersects
+//
- bool GeoMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- if ( !e.isABSONObj())
- return false;
+Status GeoMatchExpression::init(StringData path,
+ const GeoExpression* query,
+ const BSONObj& rawObj) {
+ _query.reset(query);
+ _rawObj = rawObj;
+ return initPath(path);
+}
- GeometryContainer geometry;
- if ( !geometry.parseFromStorage( e ).isOK() )
- return false;
+bool GeoMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ if (!e.isABSONObj())
+ return false;
- // Never match big polygon
- if (geometry.getNativeCRS() == STRICT_SPHERE)
- return false;
+ GeometryContainer geometry;
+ if (!geometry.parseFromStorage(e).isOK())
+ return false;
- // Project this geometry into the CRS of the query
- if (!geometry.supportsProject(_query->getGeometry().getNativeCRS()))
- return false;
+ // Never match big polygon
+ if (geometry.getNativeCRS() == STRICT_SPHERE)
+ return false;
- geometry.projectInto(_query->getGeometry().getNativeCRS());
+ // Project this geometry into the CRS of the query
+ if (!geometry.supportsProject(_query->getGeometry().getNativeCRS()))
+ return false;
- if (GeoExpression::WITHIN == _query->getPred()) {
- return _query->getGeometry().contains(geometry);
- }
- else {
- verify(GeoExpression::INTERSECT == _query->getPred());
- return _query->getGeometry().intersects(geometry);
- }
- }
+ geometry.projectInto(_query->getGeometry().getNativeCRS());
- void GeoMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << "GEO raw = " << _rawObj.toString();
- MatchExpression::TagData* td = getTag();
- if (NULL != td) {
- debug << " ";
- td->debugString(&debug);
- }
- debug << "\n";
+ if (GeoExpression::WITHIN == _query->getPred()) {
+ return _query->getGeometry().contains(geometry);
+ } else {
+ verify(GeoExpression::INTERSECT == _query->getPred());
+ return _query->getGeometry().intersects(geometry);
}
+}
- void GeoMatchExpression::toBSON(BSONObjBuilder* out) const {
- out->appendElements(_rawObj);
+void GeoMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << "GEO raw = " << _rawObj.toString();
+ MatchExpression::TagData* td = getTag();
+ if (NULL != td) {
+ debug << " ";
+ td->debugString(&debug);
}
+ debug << "\n";
+}
- bool GeoMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
+void GeoMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->appendElements(_rawObj);
+}
- const GeoMatchExpression* realOther = static_cast<const GeoMatchExpression*>( other );
+bool GeoMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
- if ( path() != realOther->path() )
- return false;
+ const GeoMatchExpression* realOther = static_cast<const GeoMatchExpression*>(other);
- return _rawObj == realOther->_rawObj;
- }
+ if (path() != realOther->path())
+ return false;
- LeafMatchExpression* GeoMatchExpression::shallowClone() const {
- GeoMatchExpression* next = new GeoMatchExpression();
- next->init( path(), NULL, _rawObj);
- next->_query = _query;
- if (getTag()) {
- next->setTag(getTag()->clone());
- }
- return next;
+ return _rawObj == realOther->_rawObj;
+}
+
+LeafMatchExpression* GeoMatchExpression::shallowClone() const {
+ GeoMatchExpression* next = new GeoMatchExpression();
+ next->init(path(), NULL, _rawObj);
+ next->_query = _query;
+ if (getTag()) {
+ next->setTag(getTag()->clone());
}
+ return next;
+}
- //
- // Parse-only geo expressions: geoNear (formerly known as near).
- //
+//
+// Parse-only geo expressions: geoNear (formerly known as near).
+//
- Status GeoNearMatchExpression::init( StringData path, const GeoNearExpression* query,
- const BSONObj& rawObj ) {
- _query.reset(query);
- _rawObj = rawObj;
- return initPath( path );
- }
+Status GeoNearMatchExpression::init(StringData path,
+ const GeoNearExpression* query,
+ const BSONObj& rawObj) {
+ _query.reset(query);
+ _rawObj = rawObj;
+ return initPath(path);
+}
- bool GeoNearMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- // See ops/update.cpp.
- // This node is removed by the query planner. It's only ever called if we're getting an
- // elemMatchKey.
- return true;
- }
+bool GeoNearMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ // See ops/update.cpp.
+ // This node is removed by the query planner. It's only ever called if we're getting an
+ // elemMatchKey.
+ return true;
+}
- void GeoNearMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << "GEONEAR " << _query->toString();
- MatchExpression::TagData* td = getTag();
- if (NULL != td) {
- debug << " ";
- td->debugString(&debug);
- }
- debug << "\n";
+void GeoNearMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << "GEONEAR " << _query->toString();
+ MatchExpression::TagData* td = getTag();
+ if (NULL != td) {
+ debug << " ";
+ td->debugString(&debug);
}
+ debug << "\n";
+}
- void GeoNearMatchExpression::toBSON(BSONObjBuilder* out) const {
- out->appendElements(_rawObj);
- }
+void GeoNearMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->appendElements(_rawObj);
+}
- bool GeoNearMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
+bool GeoNearMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
- const GeoNearMatchExpression* realOther = static_cast<const GeoNearMatchExpression*>(other);
+ const GeoNearMatchExpression* realOther = static_cast<const GeoNearMatchExpression*>(other);
- if ( path() != realOther->path() )
- return false;
+ if (path() != realOther->path())
+ return false;
- return _rawObj == realOther->_rawObj;
- }
+ return _rawObj == realOther->_rawObj;
+}
- LeafMatchExpression* GeoNearMatchExpression::shallowClone() const {
- GeoNearMatchExpression* next = new GeoNearMatchExpression();
- next->init( path(), NULL, _rawObj );
- next->_query = _query;
- if (getTag()) {
- next->setTag(getTag()->clone());
- }
- return next;
+LeafMatchExpression* GeoNearMatchExpression::shallowClone() const {
+ GeoNearMatchExpression* next = new GeoNearMatchExpression();
+ next->init(path(), NULL, _rawObj);
+ next->_query = _query;
+ if (getTag()) {
+ next->setTag(getTag()->clone());
}
-
+ return next;
+}
}
diff --git a/src/mongo/db/matcher/expression_geo.h b/src/mongo/db/matcher/expression_geo.h
index 01fa07bf08b..197bb794973 100644
--- a/src/mongo/db/matcher/expression_geo.h
+++ b/src/mongo/db/matcher/expression_geo.h
@@ -38,138 +38,149 @@
namespace mongo {
- struct PointWithCRS;
- class GeometryContainer;
+struct PointWithCRS;
+class GeometryContainer;
- // This represents either a $within or a $geoIntersects.
- class GeoExpression {
- MONGO_DISALLOW_COPYING(GeoExpression);
+// This represents either a $within or a $geoIntersects.
+class GeoExpression {
+ MONGO_DISALLOW_COPYING(GeoExpression);
+
+public:
+ GeoExpression();
+ GeoExpression(const std::string& f);
+
+ enum Predicate { WITHIN, INTERSECT, INVALID };
+
+ // parseFrom() must be called before getGeometry() to ensure initialization of geoContainer
+ Status parseFrom(const BSONObj& obj);
+
+ std::string getField() const {
+ return field;
+ }
+ Predicate getPred() const {
+ return predicate;
+ }
+ const GeometryContainer& getGeometry() const {
+ return *geoContainer;
+ }
- public:
- GeoExpression();
- GeoExpression(const std::string& f);
+private:
+ // Parse geospatial query
+ // e.g.
+ // { "$intersect" : { "$geometry" : { "type" : "Point", "coordinates": [ 40, 5 ] } } }
+ Status parseQuery(const BSONObj& obj);
- enum Predicate {
- WITHIN,
- INTERSECT,
- INVALID
- };
+ // Name of the field in the query.
+ std::string field;
+ std::unique_ptr<GeometryContainer> geoContainer;
+ Predicate predicate;
+};
- // parseFrom() must be called before getGeometry() to ensure initialization of geoContainer
- Status parseFrom(const BSONObj &obj);
+class GeoMatchExpression : public LeafMatchExpression {
+public:
+ GeoMatchExpression() : LeafMatchExpression(GEO) {}
+ virtual ~GeoMatchExpression() {}
- std::string getField() const { return field; }
- Predicate getPred() const { return predicate; }
- const GeometryContainer& getGeometry() const { return *geoContainer; }
+ /**
+ * Takes ownership of the passed-in GeoExpression.
+ */
+ Status init(StringData path, const GeoExpression* query, const BSONObj& rawObj);
+
+ virtual bool matchesSingleElement(const BSONElement& e) const;
+
+ virtual void debugString(StringBuilder& debug, int level = 0) const;
+
+ virtual void toBSON(BSONObjBuilder* out) const;
+
+ virtual bool equivalent(const MatchExpression* other) const;
+
+ virtual LeafMatchExpression* shallowClone() const;
+
+ const GeoExpression& getGeoExpression() const {
+ return *_query;
+ }
+ const BSONObj getRawObj() const {
+ return _rawObj;
+ }
+
+private:
+ BSONObj _rawObj;
+ // Share ownership of our query with all of our clones
+ std::shared_ptr<const GeoExpression> _query;
+};
+
+
+// TODO: Make a struct, turn parse stuff into something like
+// static Status parseNearQuery(const BSONObj& obj, NearQuery** out);
+class GeoNearExpression {
+ MONGO_DISALLOW_COPYING(GeoNearExpression);
+
+public:
+ GeoNearExpression();
+ GeoNearExpression(const std::string& f);
+
+ Status parseFrom(const BSONObj& obj);
+
+ // The name of the field that contains the geometry.
+ std::string field;
+
+ // The starting point of the near search. Use forward declaration of geometries.
+ std::unique_ptr<PointWithCRS> centroid;
+
+ // Min and max distance from centroid that we're willing to search.
+ // Distance is in units of the geometry's CRS, except SPHERE and isNearSphere => radians
+ double minDistance;
+ double maxDistance;
+
+ // Is this a $nearSphere query
+ bool isNearSphere;
+ // $nearSphere with a legacy point implies units are radians
+ bool unitsAreRadians;
+ // $near with a non-legacy point implies a wrapping query, otherwise the query doesn't wrap
+ bool isWrappingQuery;
+
+ std::string toString() const {
+ std::stringstream ss;
+ ss << " field=" << field;
+ ss << " maxdist=" << maxDistance;
+ ss << " isNearSphere=" << isNearSphere;
+ return ss.str();
+ }
+
+private:
+ bool parseLegacyQuery(const BSONObj& obj);
+ Status parseNewQuery(const BSONObj& obj);
+};
+
+class GeoNearMatchExpression : public LeafMatchExpression {
+public:
+ GeoNearMatchExpression() : LeafMatchExpression(GEO_NEAR) {}
+ virtual ~GeoNearMatchExpression() {}
+
+ Status init(StringData path, const GeoNearExpression* query, const BSONObj& rawObj);
+
+ // This shouldn't be called and as such will crash. GeoNear always requires an index.
+ virtual bool matchesSingleElement(const BSONElement& e) const;
- private:
- // Parse geospatial query
- // e.g.
- // { "$intersect" : { "$geometry" : { "type" : "Point", "coordinates": [ 40, 5 ] } } }
- Status parseQuery(const BSONObj &obj);
+ virtual void debugString(StringBuilder& debug, int level = 0) const;
- // Name of the field in the query.
- std::string field;
- std::unique_ptr<GeometryContainer> geoContainer;
- Predicate predicate;
- };
+ virtual void toBSON(BSONObjBuilder* out) const;
- class GeoMatchExpression : public LeafMatchExpression {
- public:
- GeoMatchExpression() : LeafMatchExpression( GEO ){}
- virtual ~GeoMatchExpression(){}
+ virtual bool equivalent(const MatchExpression* other) const;
- /**
- * Takes ownership of the passed-in GeoExpression.
- */
- Status init( StringData path, const GeoExpression* query, const BSONObj& rawObj );
+ virtual LeafMatchExpression* shallowClone() const;
- virtual bool matchesSingleElement( const BSONElement& e ) const;
+ const GeoNearExpression& getData() const {
+ return *_query;
+ }
+ const BSONObj getRawObj() const {
+ return _rawObj;
+ }
- virtual void debugString( StringBuilder& debug, int level = 0 ) const;
-
- virtual void toBSON(BSONObjBuilder* out) const;
-
- virtual bool equivalent( const MatchExpression* other ) const;
-
- virtual LeafMatchExpression* shallowClone() const;
-
- const GeoExpression& getGeoExpression() const { return *_query; }
- const BSONObj getRawObj() const { return _rawObj; }
-
- private:
- BSONObj _rawObj;
- // Share ownership of our query with all of our clones
- std::shared_ptr<const GeoExpression> _query;
- };
-
-
- // TODO: Make a struct, turn parse stuff into something like
- // static Status parseNearQuery(const BSONObj& obj, NearQuery** out);
- class GeoNearExpression {
- MONGO_DISALLOW_COPYING(GeoNearExpression);
-
- public:
- GeoNearExpression();
- GeoNearExpression(const std::string& f);
-
- Status parseFrom(const BSONObj &obj);
-
- // The name of the field that contains the geometry.
- std::string field;
-
- // The starting point of the near search. Use forward declaration of geometries.
- std::unique_ptr<PointWithCRS> centroid;
-
- // Min and max distance from centroid that we're willing to search.
- // Distance is in units of the geometry's CRS, except SPHERE and isNearSphere => radians
- double minDistance;
- double maxDistance;
-
- // Is this a $nearSphere query
- bool isNearSphere;
- // $nearSphere with a legacy point implies units are radians
- bool unitsAreRadians;
- // $near with a non-legacy point implies a wrapping query, otherwise the query doesn't wrap
- bool isWrappingQuery;
-
- std::string toString() const {
- std::stringstream ss;
- ss << " field=" << field;
- ss << " maxdist=" << maxDistance;
- ss << " isNearSphere=" << isNearSphere;
- return ss.str();
- }
-
- private:
- bool parseLegacyQuery(const BSONObj &obj);
- Status parseNewQuery(const BSONObj &obj);
- };
-
- class GeoNearMatchExpression : public LeafMatchExpression {
- public:
- GeoNearMatchExpression() : LeafMatchExpression( GEO_NEAR ){}
- virtual ~GeoNearMatchExpression(){}
-
- Status init( StringData path, const GeoNearExpression* query, const BSONObj& rawObj );
-
- // This shouldn't be called and as such will crash. GeoNear always requires an index.
- virtual bool matchesSingleElement( const BSONElement& e ) const;
-
- virtual void debugString( StringBuilder& debug, int level = 0 ) const;
-
- virtual void toBSON(BSONObjBuilder* out) const;
-
- virtual bool equivalent( const MatchExpression* other ) const;
-
- virtual LeafMatchExpression* shallowClone() const;
-
- const GeoNearExpression& getData() const { return *_query; }
- const BSONObj getRawObj() const { return _rawObj; }
- private:
- BSONObj _rawObj;
- // Share ownership of our query with all of our clones
- std::shared_ptr<const GeoNearExpression> _query;
- };
+private:
+ BSONObj _rawObj;
+ // Share ownership of our query with all of our clones
+ std::shared_ptr<const GeoNearExpression> _query;
+};
} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_geo_test.cpp b/src/mongo/db/matcher/expression_geo_test.cpp
index 67d51f5580c..0bc96f33e7f 100644
--- a/src/mongo/db/matcher/expression_geo_test.cpp
+++ b/src/mongo/db/matcher/expression_geo_test.cpp
@@ -41,137 +41,148 @@
namespace mongo {
- TEST( ExpressionGeoTest, Geo1 ) {
- BSONObj query = fromjson("{loc:{$within:{$box:[{x: 4, y:4},[6,6]]}}}");
+TEST(ExpressionGeoTest, Geo1) {
+ BSONObj query = fromjson("{loc:{$within:{$box:[{x: 4, y:4},[6,6]]}}}");
- std::unique_ptr<GeoExpression> gq(new GeoExpression);
- ASSERT_OK( gq->parseFrom( query["loc"].Obj() ) );
+ std::unique_ptr<GeoExpression> gq(new GeoExpression);
+ ASSERT_OK(gq->parseFrom(query["loc"].Obj()));
- GeoMatchExpression ge;
- ASSERT( ge.init("a", gq.release(), query ).isOK() );
+ GeoMatchExpression ge;
+ ASSERT(ge.init("a", gq.release(), query).isOK());
- ASSERT(!ge.matchesBSON(fromjson("{a: [3,4]}")));
- ASSERT(ge.matchesBSON(fromjson("{a: [4,4]}")));
- ASSERT(ge.matchesBSON(fromjson("{a: [5,5]}")));
- ASSERT(ge.matchesBSON(fromjson("{a: [5,5.1]}")));
- ASSERT(ge.matchesBSON(fromjson("{a: {x: 5, y:5.1}}")));
-
- }
+ ASSERT(!ge.matchesBSON(fromjson("{a: [3,4]}")));
+ ASSERT(ge.matchesBSON(fromjson("{a: [4,4]}")));
+ ASSERT(ge.matchesBSON(fromjson("{a: [5,5]}")));
+ ASSERT(ge.matchesBSON(fromjson("{a: [5,5.1]}")));
+ ASSERT(ge.matchesBSON(fromjson("{a: {x: 5, y:5.1}}")));
+}
- TEST(ExpressionGeoTest, GeoNear1) {
- BSONObj query = fromjson("{loc:{$near:{$maxDistance:100, "
- "$geometry:{type:\"Point\", coordinates:[0,0]}}}}");
- std::unique_ptr<GeoNearExpression> nq(new GeoNearExpression);
- ASSERT_OK(nq->parseFrom(query["loc"].Obj()));
+TEST(ExpressionGeoTest, GeoNear1) {
+ BSONObj query = fromjson(
+ "{loc:{$near:{$maxDistance:100, "
+ "$geometry:{type:\"Point\", coordinates:[0,0]}}}}");
+ std::unique_ptr<GeoNearExpression> nq(new GeoNearExpression);
+ ASSERT_OK(nq->parseFrom(query["loc"].Obj()));
- GeoNearMatchExpression gne;
- ASSERT(gne.init("a", nq.release(), query).isOK());
+ GeoNearMatchExpression gne;
+ ASSERT(gne.init("a", nq.release(), query).isOK());
- // We can't match the data but we can make sure it was parsed OK.
- ASSERT_EQUALS(gne.getData().centroid->crs, SPHERE);
- ASSERT_EQUALS(gne.getData().minDistance, 0);
- ASSERT_EQUALS(gne.getData().maxDistance, 100);
- }
+ // We can't match the data but we can make sure it was parsed OK.
+ ASSERT_EQUALS(gne.getData().centroid->crs, SPHERE);
+ ASSERT_EQUALS(gne.getData().minDistance, 0);
+ ASSERT_EQUALS(gne.getData().maxDistance, 100);
+}
- std::unique_ptr<GeoMatchExpression> makeGeoMatchExpression(const BSONObj& locQuery) {
- std::unique_ptr<GeoExpression> gq(new GeoExpression);
- ASSERT_OK(gq->parseFrom(locQuery));
+std::unique_ptr<GeoMatchExpression> makeGeoMatchExpression(const BSONObj& locQuery) {
+ std::unique_ptr<GeoExpression> gq(new GeoExpression);
+ ASSERT_OK(gq->parseFrom(locQuery));
- std::unique_ptr<GeoMatchExpression> ge = stdx::make_unique<GeoMatchExpression>();
- ASSERT_OK(ge->init("a", gq.release(), locQuery));
+ std::unique_ptr<GeoMatchExpression> ge = stdx::make_unique<GeoMatchExpression>();
+ ASSERT_OK(ge->init("a", gq.release(), locQuery));
- return ge;
- }
+ return ge;
+}
- std::unique_ptr<GeoNearMatchExpression> makeGeoNearMatchExpression(const BSONObj& locQuery) {
- std::unique_ptr<GeoNearExpression> nq(new GeoNearExpression);
- ASSERT_OK(nq->parseFrom(locQuery));
+std::unique_ptr<GeoNearMatchExpression> makeGeoNearMatchExpression(const BSONObj& locQuery) {
+ std::unique_ptr<GeoNearExpression> nq(new GeoNearExpression);
+ ASSERT_OK(nq->parseFrom(locQuery));
- std::unique_ptr<GeoNearMatchExpression> gne = stdx::make_unique<GeoNearMatchExpression>();
- ASSERT_OK(gne->init("a", nq.release(), locQuery));
+ std::unique_ptr<GeoNearMatchExpression> gne = stdx::make_unique<GeoNearMatchExpression>();
+ ASSERT_OK(gne->init("a", nq.release(), locQuery));
- return gne;
- }
+ return gne;
+}
- /**
- * A bunch of cases in which a geo expression is equivalent() to both itself or to another
- * expression.
- */
- TEST(ExpressionGeoTest, GeoEquivalent) {
- {
- BSONObj query = fromjson("{$within: {$box: [{x: 4, y: 4}, [6, 6]]}}");
- std::unique_ptr<GeoMatchExpression> ge(makeGeoMatchExpression(query));
- ASSERT(ge->equivalent(ge.get()));
- }
- {
- BSONObj query = fromjson("{$within: {$geometry: {type: 'Polygon',"
- "coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}}}");
- std::unique_ptr<GeoMatchExpression> ge(makeGeoMatchExpression(query));
- ASSERT(ge->equivalent(ge.get()));
- }
- {
- BSONObj query1 = fromjson("{$within: {$geometry: {type: 'Polygon',"
- "coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}}}"),
- query2 = fromjson("{$within: {$geometry: {type: 'Polygon',"
- "coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}}}");
- std::unique_ptr<GeoMatchExpression> ge1(makeGeoMatchExpression(query1)),
- ge2(makeGeoMatchExpression(query2));
- ASSERT(ge1->equivalent(ge2.get()));
- }
+/**
+ * A bunch of cases in which a geo expression is equivalent() to both itself or to another
+ * expression.
+ */
+TEST(ExpressionGeoTest, GeoEquivalent) {
+ {
+ BSONObj query = fromjson("{$within: {$box: [{x: 4, y: 4}, [6, 6]]}}");
+ std::unique_ptr<GeoMatchExpression> ge(makeGeoMatchExpression(query));
+ ASSERT(ge->equivalent(ge.get()));
}
-
- /**
- * A bunch of cases in which a *geoNear* expression is equivalent both to itself or to
- * another expression.
- */
- TEST(ExpressionGeoTest, GeoNearEquivalent) {
- {
- BSONObj query = fromjson("{$near: {$maxDistance: 100, "
- "$geometry: {type: 'Point', coordinates: [0, 0]}}}");
- std::unique_ptr<GeoNearMatchExpression> gne(makeGeoNearMatchExpression(query));
- ASSERT(gne->equivalent(gne.get()));
- }
- {
- BSONObj query = fromjson("{$near: {$minDistance: 10, $maxDistance: 100,"
- "$geometry: {type: 'Point', coordinates: [0, 0]}}}");
- std::unique_ptr<GeoNearMatchExpression> gne(makeGeoNearMatchExpression(query));
- ASSERT(gne->equivalent(gne.get()));
- }
- {
- BSONObj query1 = fromjson("{$near: {$maxDistance: 100, "
- "$geometry: {type: 'Point', coordinates: [1, 0]}}}"),
- query2 = fromjson("{$near: {$maxDistance: 100, "
- "$geometry: {type: 'Point', coordinates: [1, 0]}}}");
- std::unique_ptr<GeoNearMatchExpression> gne1(makeGeoNearMatchExpression(query1)),
- gne2(makeGeoNearMatchExpression(query2));
- ASSERT(gne1->equivalent(gne2.get()));
- }
+ {
+ BSONObj query = fromjson(
+ "{$within: {$geometry: {type: 'Polygon',"
+ "coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}}}");
+ std::unique_ptr<GeoMatchExpression> ge(makeGeoMatchExpression(query));
+ ASSERT(ge->equivalent(ge.get()));
}
-
- /**
- * A geo expression being not equivalent to another expression.
- */
- TEST(ExpressionGeoTest, GeoNotEquivalent) {
- BSONObj query1 = fromjson("{$within: {$geometry: {type: 'Polygon',"
- "coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}}}"),
- query2 = fromjson("{$within: {$geometry: {type: 'Polygon',"
- "coordinates: [[[0, 0], [3, 6], [6, 2], [0, 0]]]}}}");
+ {
+ BSONObj query1 = fromjson(
+ "{$within: {$geometry: {type: 'Polygon',"
+ "coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}}}"),
+ query2 = fromjson(
+ "{$within: {$geometry: {type: 'Polygon',"
+ "coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}}}");
std::unique_ptr<GeoMatchExpression> ge1(makeGeoMatchExpression(query1)),
- ge2(makeGeoMatchExpression(query2));
- ASSERT(!ge1->equivalent(ge2.get()));
+ ge2(makeGeoMatchExpression(query2));
+ ASSERT(ge1->equivalent(ge2.get()));
}
+}
- /**
- * A *geoNear* expression being not equivalent to another expression.
- */
- TEST(ExpressionGeoTest, GeoNearNotEquivalent) {
- BSONObj query1 = fromjson("{$near: {$maxDistance: 100, "
- "$geometry: {type: 'Point', coordinates: [0, 0]}}}"),
- query2 = fromjson("{$near: {$maxDistance: 100, "
- "$geometry: {type: 'Point', coordinates: [1, 0]}}}");
+/**
+ * A bunch of cases in which a *geoNear* expression is equivalent both to itself or to
+ * another expression.
+ */
+TEST(ExpressionGeoTest, GeoNearEquivalent) {
+ {
+ BSONObj query = fromjson(
+ "{$near: {$maxDistance: 100, "
+ "$geometry: {type: 'Point', coordinates: [0, 0]}}}");
+ std::unique_ptr<GeoNearMatchExpression> gne(makeGeoNearMatchExpression(query));
+ ASSERT(gne->equivalent(gne.get()));
+ }
+ {
+ BSONObj query = fromjson(
+ "{$near: {$minDistance: 10, $maxDistance: 100,"
+ "$geometry: {type: 'Point', coordinates: [0, 0]}}}");
+ std::unique_ptr<GeoNearMatchExpression> gne(makeGeoNearMatchExpression(query));
+ ASSERT(gne->equivalent(gne.get()));
+ }
+ {
+ BSONObj query1 = fromjson(
+ "{$near: {$maxDistance: 100, "
+ "$geometry: {type: 'Point', coordinates: [1, 0]}}}"),
+ query2 = fromjson(
+ "{$near: {$maxDistance: 100, "
+ "$geometry: {type: 'Point', coordinates: [1, 0]}}}");
std::unique_ptr<GeoNearMatchExpression> gne1(makeGeoNearMatchExpression(query1)),
- gne2(makeGeoNearMatchExpression(query2));
- ASSERT(!gne1->equivalent(gne2.get()));
+ gne2(makeGeoNearMatchExpression(query2));
+ ASSERT(gne1->equivalent(gne2.get()));
}
}
+
+/**
+ * A geo expression being not equivalent to another expression.
+ */
+TEST(ExpressionGeoTest, GeoNotEquivalent) {
+ BSONObj query1 = fromjson(
+ "{$within: {$geometry: {type: 'Polygon',"
+ "coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}}}"),
+ query2 = fromjson(
+ "{$within: {$geometry: {type: 'Polygon',"
+ "coordinates: [[[0, 0], [3, 6], [6, 2], [0, 0]]]}}}");
+ std::unique_ptr<GeoMatchExpression> ge1(makeGeoMatchExpression(query1)),
+ ge2(makeGeoMatchExpression(query2));
+ ASSERT(!ge1->equivalent(ge2.get()));
+}
+
+/**
+ * A *geoNear* expression being not equivalent to another expression.
+ */
+TEST(ExpressionGeoTest, GeoNearNotEquivalent) {
+ BSONObj query1 = fromjson(
+ "{$near: {$maxDistance: 100, "
+ "$geometry: {type: 'Point', coordinates: [0, 0]}}}"),
+ query2 = fromjson(
+ "{$near: {$maxDistance: 100, "
+ "$geometry: {type: 'Point', coordinates: [1, 0]}}}");
+ std::unique_ptr<GeoNearMatchExpression> gne1(makeGeoNearMatchExpression(query1)),
+ gne2(makeGeoNearMatchExpression(query2));
+ ASSERT(!gne1->equivalent(gne2.get()));
+}
+}
diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp
index f0a91cffbbf..619fc64133d 100644
--- a/src/mongo/db/matcher/expression_leaf.cpp
+++ b/src/mongo/db/matcher/expression_leaf.cpp
@@ -41,52 +41,50 @@
namespace mongo {
- Status LeafMatchExpression::initPath( StringData path ) {
- _path = path;
- return _elementPath.init( _path );
- }
+Status LeafMatchExpression::initPath(StringData path) {
+ _path = path;
+ return _elementPath.init(_path);
+}
- bool LeafMatchExpression::matches( const MatchableDocument* doc, MatchDetails* details ) const {
- MatchableDocument::IteratorHolder cursor( doc, &_elementPath );
- while ( cursor->more() ) {
- ElementIterator::Context e = cursor->next();
- if ( !matchesSingleElement( e.element() ) )
- continue;
- if ( details && details->needRecord() && !e.arrayOffset().eoo() ) {
- details->setElemMatchKey( e.arrayOffset().fieldName() );
- }
- return true;
+bool LeafMatchExpression::matches(const MatchableDocument* doc, MatchDetails* details) const {
+ MatchableDocument::IteratorHolder cursor(doc, &_elementPath);
+ while (cursor->more()) {
+ ElementIterator::Context e = cursor->next();
+ if (!matchesSingleElement(e.element()))
+ continue;
+ if (details && details->needRecord() && !e.arrayOffset().eoo()) {
+ details->setElemMatchKey(e.arrayOffset().fieldName());
}
- return false;
+ return true;
}
+ return false;
+}
- // -------------
+// -------------
- bool ComparisonMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( other->matchType() != matchType() )
- return false;
- const ComparisonMatchExpression* realOther =
- static_cast<const ComparisonMatchExpression*>( other );
+bool ComparisonMatchExpression::equivalent(const MatchExpression* other) const {
+ if (other->matchType() != matchType())
+ return false;
+ const ComparisonMatchExpression* realOther =
+ static_cast<const ComparisonMatchExpression*>(other);
- return
- path() == realOther->path() &&
- _rhs.valuesEqual( realOther->_rhs );
- }
+ return path() == realOther->path() && _rhs.valuesEqual(realOther->_rhs);
+}
- Status ComparisonMatchExpression::init( StringData path, const BSONElement& rhs ) {
- _rhs = rhs;
+Status ComparisonMatchExpression::init(StringData path, const BSONElement& rhs) {
+ _rhs = rhs;
- if ( rhs.eoo() ) {
- return Status( ErrorCodes::BadValue, "need a real operand" );
- }
+ if (rhs.eoo()) {
+ return Status(ErrorCodes::BadValue, "need a real operand");
+ }
- if ( rhs.type() == Undefined ) {
- return Status( ErrorCodes::BadValue, "cannot compare to undefined" );
- }
+ if (rhs.type() == Undefined) {
+ return Status(ErrorCodes::BadValue, "cannot compare to undefined");
+ }
- switch ( matchType() ) {
+ switch (matchType()) {
case LT:
case LTE:
case EQ:
@@ -94,36 +92,36 @@ namespace mongo {
case GTE:
break;
default:
- return Status( ErrorCodes::BadValue, "bad match type for ComparisonMatchExpression" );
- }
-
- return initPath( path );
+ return Status(ErrorCodes::BadValue, "bad match type for ComparisonMatchExpression");
}
+ return initPath(path);
+}
- bool ComparisonMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- //log() << "\t ComparisonMatchExpression e: " << e << " _rhs: " << _rhs << "\n"
- //<< toString() << std::endl;
- if ( e.canonicalType() != _rhs.canonicalType() ) {
- // some special cases
- // jstNULL and undefined are treated the same
- if ( e.canonicalType() + _rhs.canonicalType() == 5 ) {
- return matchType() == EQ || matchType() == LTE || matchType() == GTE;
- }
+bool ComparisonMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ // log() << "\t ComparisonMatchExpression e: " << e << " _rhs: " << _rhs << "\n"
+ //<< toString() << std::endl;
- if ( _rhs.type() == MaxKey || _rhs.type() == MinKey ) {
- return matchType() != EQ;
- }
+ if (e.canonicalType() != _rhs.canonicalType()) {
+ // some special cases
+ // jstNULL and undefined are treated the same
+ if (e.canonicalType() + _rhs.canonicalType() == 5) {
+ return matchType() == EQ || matchType() == LTE || matchType() == GTE;
+ }
- return false;
+ if (_rhs.type() == MaxKey || _rhs.type() == MinKey) {
+ return matchType() != EQ;
}
- // Special case handling for NaN. NaN is equal to NaN but
- // otherwise always compares to false.
- if (std::isnan(e.numberDouble()) || std::isnan(_rhs.numberDouble())) {
- bool bothNaN = std::isnan(e.numberDouble()) && std::isnan(_rhs.numberDouble());
- switch ( matchType() ) {
+ return false;
+ }
+
+ // Special case handling for NaN. NaN is equal to NaN but
+ // otherwise always compares to false.
+ if (std::isnan(e.numberDouble()) || std::isnan(_rhs.numberDouble())) {
+ bool bothNaN = std::isnan(e.numberDouble()) && std::isnan(_rhs.numberDouble());
+ switch (matchType()) {
case LT:
return false;
case LTE:
@@ -137,15 +135,15 @@ namespace mongo {
default:
// This is a comparison match expression, so it must be either
// a $lt, $lte, $gt, $gte, or equality expression.
- fassertFailed( 17448 );
- }
+ fassertFailed(17448);
}
+ }
- int x = compareElementValues( e, _rhs );
+ int x = compareElementValues(e, _rhs);
- //log() << "\t\t" << x << endl;
+ // log() << "\t\t" << x << endl;
- switch ( matchType() ) {
+ switch (matchType()) {
case LT:
return x < 0;
case LTE:
@@ -159,450 +157,462 @@ namespace mongo {
default:
// This is a comparison match expression, so it must be either
// a $lt, $lte, $gt, $gte, or equality expression.
- fassertFailed( 16828 );
- }
+ fassertFailed(16828);
}
+}
- void ComparisonMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << path() << " ";
- switch ( matchType() ) {
- case LT: debug << "$lt"; break;
- case LTE: debug << "$lte"; break;
- case EQ: debug << "=="; break;
- case GT: debug << "$gt"; break;
- case GTE: debug << "$gte"; break;
- default: debug << " UNKNOWN - should be impossible"; break;
- }
- debug << " " << _rhs.toString( false );
-
- MatchExpression::TagData* td = getTag();
- if (NULL != td) {
- debug << " ";
- td->debugString(&debug);
- }
-
- debug << "\n";
+void ComparisonMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << path() << " ";
+ switch (matchType()) {
+ case LT:
+ debug << "$lt";
+ break;
+ case LTE:
+ debug << "$lte";
+ break;
+ case EQ:
+ debug << "==";
+ break;
+ case GT:
+ debug << "$gt";
+ break;
+ case GTE:
+ debug << "$gte";
+ break;
+ default:
+ debug << " UNKNOWN - should be impossible";
+ break;
}
+ debug << " " << _rhs.toString(false);
- void ComparisonMatchExpression::toBSON(BSONObjBuilder* out) const {
- string opString = "";
- switch ( matchType() ) {
- case LT: opString = "$lt"; break;
- case LTE: opString = "$lte"; break;
- case EQ: opString = "$eq"; break;
- case GT: opString = "$gt"; break;
- case GTE: opString = "$gte"; break;
- default: opString = " UNKNOWN - should be impossible"; break;
- }
-
- out->append(path(), BSON(opString << _rhs));
+ MatchExpression::TagData* td = getTag();
+ if (NULL != td) {
+ debug << " ";
+ td->debugString(&debug);
}
- // ---------------
-
- // TODO: move
- inline pcrecpp::RE_Options flags2options(const char* flags) {
- pcrecpp::RE_Options options;
- options.set_utf8(true);
- while ( flags && *flags ) {
- if ( *flags == 'i' )
- options.set_caseless(true);
- else if ( *flags == 'm' )
- options.set_multiline(true);
- else if ( *flags == 'x' )
- options.set_extended(true);
- else if ( *flags == 's' )
- options.set_dotall(true);
- flags++;
- }
- return options;
+ debug << "\n";
+}
+
+void ComparisonMatchExpression::toBSON(BSONObjBuilder* out) const {
+ string opString = "";
+ switch (matchType()) {
+ case LT:
+ opString = "$lt";
+ break;
+ case LTE:
+ opString = "$lte";
+ break;
+ case EQ:
+ opString = "$eq";
+ break;
+ case GT:
+ opString = "$gt";
+ break;
+ case GTE:
+ opString = "$gte";
+ break;
+ default:
+ opString = " UNKNOWN - should be impossible";
+ break;
}
- RegexMatchExpression::RegexMatchExpression()
- : LeafMatchExpression( REGEX ) {}
+ out->append(path(), BSON(opString << _rhs));
+}
- RegexMatchExpression::~RegexMatchExpression() {}
+// ---------------
+
+// TODO: move
+inline pcrecpp::RE_Options flags2options(const char* flags) {
+ pcrecpp::RE_Options options;
+ options.set_utf8(true);
+ while (flags && *flags) {
+ if (*flags == 'i')
+ options.set_caseless(true);
+ else if (*flags == 'm')
+ options.set_multiline(true);
+ else if (*flags == 'x')
+ options.set_extended(true);
+ else if (*flags == 's')
+ options.set_dotall(true);
+ flags++;
+ }
+ return options;
+}
- bool RegexMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
+RegexMatchExpression::RegexMatchExpression() : LeafMatchExpression(REGEX) {}
- const RegexMatchExpression* realOther = static_cast<const RegexMatchExpression*>( other );
- return
- path() == realOther->path() &&
- _regex == realOther->_regex
- && _flags == realOther->_flags;
- }
+RegexMatchExpression::~RegexMatchExpression() {}
+bool RegexMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
- Status RegexMatchExpression::init( StringData path, const BSONElement& e ) {
- if ( e.type() != RegEx )
- return Status( ErrorCodes::BadValue, "regex not a regex" );
- return init( path, e.regex(), e.regexFlags() );
- }
+ const RegexMatchExpression* realOther = static_cast<const RegexMatchExpression*>(other);
+ return path() == realOther->path() && _regex == realOther->_regex &&
+ _flags == realOther->_flags;
+}
- Status RegexMatchExpression::init( StringData path, StringData regex, StringData options ) {
- if ( regex.size() > MaxPatternSize ) {
- return Status( ErrorCodes::BadValue, "Regular expression is too long" );
- }
+Status RegexMatchExpression::init(StringData path, const BSONElement& e) {
+ if (e.type() != RegEx)
+ return Status(ErrorCodes::BadValue, "regex not a regex");
+ return init(path, e.regex(), e.regexFlags());
+}
- _regex = regex.toString();
- _flags = options.toString();
- _re.reset( new pcrecpp::RE( _regex.c_str(), flags2options( _flags.c_str() ) ) );
- return initPath( path );
+Status RegexMatchExpression::init(StringData path, StringData regex, StringData options) {
+ if (regex.size() > MaxPatternSize) {
+ return Status(ErrorCodes::BadValue, "Regular expression is too long");
}
- bool RegexMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- //log() << "RegexMatchExpression::matchesSingleElement _regex: " << _regex << " e: " << e << std::endl;
- switch (e.type()) {
+ _regex = regex.toString();
+ _flags = options.toString();
+ _re.reset(new pcrecpp::RE(_regex.c_str(), flags2options(_flags.c_str())));
+
+ return initPath(path);
+}
+
+bool RegexMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ // log() << "RegexMatchExpression::matchesSingleElement _regex: " << _regex << " e: " << e << std::endl;
+ switch (e.type()) {
case String:
case Symbol:
// TODO
- //if (rm._prefix.empty())
- return _re->PartialMatch(e.valuestr());
- //else
- //return !strncmp(e.valuestr(), rm._prefix.c_str(), rm._prefix.size());
+ // if (rm._prefix.empty())
+ return _re->PartialMatch(e.valuestr());
+ // else
+ // return !strncmp(e.valuestr(), rm._prefix.c_str(), rm._prefix.size());
case RegEx:
return _regex == e.regex() && _flags == e.regexFlags();
default:
return false;
- }
}
+}
- void RegexMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << path() << " regex /" << _regex << "/" << _flags;
+void RegexMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << path() << " regex /" << _regex << "/" << _flags;
- MatchExpression::TagData* td = getTag();
- if (NULL != td) {
- debug << " ";
- td->debugString(&debug);
- }
- debug << "\n";
+ MatchExpression::TagData* td = getTag();
+ if (NULL != td) {
+ debug << " ";
+ td->debugString(&debug);
}
+ debug << "\n";
+}
- void RegexMatchExpression::toBSON(BSONObjBuilder* out) const {
- out->appendRegex(path(), _regex, _flags);
- }
+void RegexMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->appendRegex(path(), _regex, _flags);
+}
- void RegexMatchExpression::shortDebugString( StringBuilder& debug ) const {
- debug << "/" << _regex << "/" << _flags;
- }
+void RegexMatchExpression::shortDebugString(StringBuilder& debug) const {
+ debug << "/" << _regex << "/" << _flags;
+}
- // ---------
+// ---------
- Status ModMatchExpression::init( StringData path, int divisor, int remainder ) {
- if ( divisor == 0 )
- return Status( ErrorCodes::BadValue, "divisor cannot be 0" );
- _divisor = divisor;
- _remainder = remainder;
- return initPath( path );
- }
+Status ModMatchExpression::init(StringData path, int divisor, int remainder) {
+ if (divisor == 0)
+ return Status(ErrorCodes::BadValue, "divisor cannot be 0");
+ _divisor = divisor;
+ _remainder = remainder;
+ return initPath(path);
+}
- bool ModMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- if ( !e.isNumber() )
- return false;
- return e.numberLong() % _divisor == _remainder;
- }
+bool ModMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ if (!e.isNumber())
+ return false;
+ return e.numberLong() % _divisor == _remainder;
+}
- void ModMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << path() << " mod " << _divisor << " % x == " << _remainder;
- MatchExpression::TagData* td = getTag();
- if (NULL != td) {
- debug << " ";
- td->debugString(&debug);
- }
- debug << "\n";
+void ModMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << path() << " mod " << _divisor << " % x == " << _remainder;
+ MatchExpression::TagData* td = getTag();
+ if (NULL != td) {
+ debug << " ";
+ td->debugString(&debug);
}
+ debug << "\n";
+}
- void ModMatchExpression::toBSON(BSONObjBuilder* out) const {
- out->append(path(), BSON("$mod" << BSON_ARRAY(_divisor << _remainder)));
- }
+void ModMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append(path(), BSON("$mod" << BSON_ARRAY(_divisor << _remainder)));
+}
- bool ModMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
+bool ModMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
- const ModMatchExpression* realOther = static_cast<const ModMatchExpression*>( other );
- return
- path() == realOther->path() &&
- _divisor == realOther->_divisor &&
- _remainder == realOther->_remainder;
- }
+ const ModMatchExpression* realOther = static_cast<const ModMatchExpression*>(other);
+ return path() == realOther->path() && _divisor == realOther->_divisor &&
+ _remainder == realOther->_remainder;
+}
- // ------------------
+// ------------------
- Status ExistsMatchExpression::init( StringData path ) {
- return initPath( path );
- }
+Status ExistsMatchExpression::init(StringData path) {
+ return initPath(path);
+}
- bool ExistsMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- return !e.eoo();
- }
+bool ExistsMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ return !e.eoo();
+}
- void ExistsMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << path() << " exists";
- MatchExpression::TagData* td = getTag();
- if (NULL != td) {
- debug << " ";
- td->debugString(&debug);
- }
- debug << "\n";
+void ExistsMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << path() << " exists";
+ MatchExpression::TagData* td = getTag();
+ if (NULL != td) {
+ debug << " ";
+ td->debugString(&debug);
}
+ debug << "\n";
+}
- void ExistsMatchExpression::toBSON(BSONObjBuilder* out) const {
- out->append(path(), BSON("$exists" << true));
- }
+void ExistsMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append(path(), BSON("$exists" << true));
+}
- bool ExistsMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
+bool ExistsMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
- const ExistsMatchExpression* realOther = static_cast<const ExistsMatchExpression*>( other );
- return path() == realOther->path();
- }
+ const ExistsMatchExpression* realOther = static_cast<const ExistsMatchExpression*>(other);
+ return path() == realOther->path();
+}
- // ----
+// ----
- Status TypeMatchExpression::init( StringData path, int type ) {
- _path = path;
- _type = type;
- return _elementPath.init( _path );
- }
+Status TypeMatchExpression::init(StringData path, int type) {
+ _path = path;
+ _type = type;
+ return _elementPath.init(_path);
+}
- bool TypeMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- return e.type() == _type;
- }
+bool TypeMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ return e.type() == _type;
+}
- bool TypeMatchExpression::matches( const MatchableDocument* doc, MatchDetails* details ) const {
- MatchableDocument::IteratorHolder cursor( doc, &_elementPath );
- while ( cursor->more() ) {
- ElementIterator::Context e = cursor->next();
-
- // In the case where _elementPath is referring to an array,
- // $type should match elements of that array only.
- // outerArray() helps to identify elements of the array
- // and the containing array itself.
- // This matters when we are looking for {$type: Array}.
- // Example (_elementPath refers to field 'a' and _type is Array):
- // a : [ // outer array. should not match
- // 123, // inner array
- // [ 456 ], // inner array. should match
- // ...
- // ]
- if ( _type == mongo::Array && e.outerArray() ) {
- continue;
- }
-
- if ( !matchesSingleElement( e.element() ) ) {
- continue;
- }
-
- if ( details && details->needRecord() && !e.arrayOffset().eoo() ) {
- details->setElemMatchKey( e.arrayOffset().fieldName() );
- }
- return true;
+bool TypeMatchExpression::matches(const MatchableDocument* doc, MatchDetails* details) const {
+ MatchableDocument::IteratorHolder cursor(doc, &_elementPath);
+ while (cursor->more()) {
+ ElementIterator::Context e = cursor->next();
+
+ // In the case where _elementPath is referring to an array,
+ // $type should match elements of that array only.
+ // outerArray() helps to identify elements of the array
+ // and the containing array itself.
+ // This matters when we are looking for {$type: Array}.
+ // Example (_elementPath refers to field 'a' and _type is Array):
+ // a : [ // outer array. should not match
+ // 123, // inner array
+ // [ 456 ], // inner array. should match
+ // ...
+ // ]
+ if (_type == mongo::Array && e.outerArray()) {
+ continue;
}
- return false;
- }
- void TypeMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << _path << " type: " << _type;
- MatchExpression::TagData* td = getTag();
- if (NULL != td) {
- debug << " ";
- td->debugString(&debug);
+ if (!matchesSingleElement(e.element())) {
+ continue;
}
- debug << "\n";
- }
- void TypeMatchExpression::toBSON(BSONObjBuilder* out) const {
- out->append(path(), BSON("$type" << _type));
+ if (details && details->needRecord() && !e.arrayOffset().eoo()) {
+ details->setElemMatchKey(e.arrayOffset().fieldName());
+ }
+ return true;
}
+ return false;
+}
- bool TypeMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
-
- const TypeMatchExpression* realOther = static_cast<const TypeMatchExpression*>( other );
- return _path == realOther->_path && _type == realOther->_type;
+void TypeMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << _path << " type: " << _type;
+ MatchExpression::TagData* td = getTag();
+ if (NULL != td) {
+ debug << " ";
+ td->debugString(&debug);
}
+ debug << "\n";
+}
+void TypeMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append(path(), BSON("$type" << _type));
+}
- // --------
+bool TypeMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
- ArrayFilterEntries::ArrayFilterEntries(){
- _hasNull = false;
- _hasEmptyArray = false;
- }
+ const TypeMatchExpression* realOther = static_cast<const TypeMatchExpression*>(other);
+ return _path == realOther->_path && _type == realOther->_type;
+}
- ArrayFilterEntries::~ArrayFilterEntries() {
- for ( unsigned i = 0; i < _regexes.size(); i++ )
- delete _regexes[i];
- _regexes.clear();
- }
- Status ArrayFilterEntries::addEquality( const BSONElement& e ) {
- if ( e.type() == RegEx )
- return Status( ErrorCodes::BadValue, "ArrayFilterEntries equality cannot be a regex" );
+// --------
- if ( e.type() == Undefined ) {
- return Status( ErrorCodes::BadValue,
- "ArrayFilterEntries equality cannot be undefined" );
- }
+ArrayFilterEntries::ArrayFilterEntries() {
+ _hasNull = false;
+ _hasEmptyArray = false;
+}
- if ( e.type() == jstNULL ) {
- _hasNull = true;
- }
+ArrayFilterEntries::~ArrayFilterEntries() {
+ for (unsigned i = 0; i < _regexes.size(); i++)
+ delete _regexes[i];
+ _regexes.clear();
+}
- if ( e.type() == Array && e.Obj().isEmpty() )
- _hasEmptyArray = true;
+Status ArrayFilterEntries::addEquality(const BSONElement& e) {
+ if (e.type() == RegEx)
+ return Status(ErrorCodes::BadValue, "ArrayFilterEntries equality cannot be a regex");
- _equalities.insert( e );
- return Status::OK();
+ if (e.type() == Undefined) {
+ return Status(ErrorCodes::BadValue, "ArrayFilterEntries equality cannot be undefined");
}
- Status ArrayFilterEntries::addRegex( RegexMatchExpression* expr ) {
- _regexes.push_back( expr );
- return Status::OK();
+ if (e.type() == jstNULL) {
+ _hasNull = true;
}
- bool ArrayFilterEntries::equivalent( const ArrayFilterEntries& other ) const {
- if ( _hasNull != other._hasNull )
- return false;
+ if (e.type() == Array && e.Obj().isEmpty())
+ _hasEmptyArray = true;
- if ( _regexes.size() != other._regexes.size() )
+ _equalities.insert(e);
+ return Status::OK();
+}
+
+Status ArrayFilterEntries::addRegex(RegexMatchExpression* expr) {
+ _regexes.push_back(expr);
+ return Status::OK();
+}
+
+bool ArrayFilterEntries::equivalent(const ArrayFilterEntries& other) const {
+ if (_hasNull != other._hasNull)
+ return false;
+
+ if (_regexes.size() != other._regexes.size())
+ return false;
+ for (unsigned i = 0; i < _regexes.size(); i++)
+ if (!_regexes[i]->equivalent(other._regexes[i]))
return false;
- for ( unsigned i = 0; i < _regexes.size(); i++ )
- if ( !_regexes[i]->equivalent( other._regexes[i] ) )
- return false;
- return _equalities == other._equalities;
- }
+ return _equalities == other._equalities;
+}
- void ArrayFilterEntries::copyTo( ArrayFilterEntries& toFillIn ) const {
- toFillIn._hasNull = _hasNull;
- toFillIn._hasEmptyArray = _hasEmptyArray;
- toFillIn._equalities = _equalities;
- for ( unsigned i = 0; i < _regexes.size(); i++ )
- toFillIn._regexes.push_back( static_cast<RegexMatchExpression*>(_regexes[i]->shallowClone()) );
- }
+void ArrayFilterEntries::copyTo(ArrayFilterEntries& toFillIn) const {
+ toFillIn._hasNull = _hasNull;
+ toFillIn._hasEmptyArray = _hasEmptyArray;
+ toFillIn._equalities = _equalities;
+ for (unsigned i = 0; i < _regexes.size(); i++)
+ toFillIn._regexes.push_back(
+ static_cast<RegexMatchExpression*>(_regexes[i]->shallowClone()));
+}
- void ArrayFilterEntries::debugString( StringBuilder& debug ) const {
- debug << "[ ";
- for (BSONElementSet::const_iterator it = _equalities.begin();
- it != _equalities.end(); ++it) {
- debug << it->toString( false ) << " ";
- }
- for (size_t i = 0; i < _regexes.size(); ++i) {
- _regexes[i]->shortDebugString( debug );
- debug << " ";
- }
- debug << "]";
+void ArrayFilterEntries::debugString(StringBuilder& debug) const {
+ debug << "[ ";
+ for (BSONElementSet::const_iterator it = _equalities.begin(); it != _equalities.end(); ++it) {
+ debug << it->toString(false) << " ";
}
-
- void ArrayFilterEntries::toBSON(BSONArrayBuilder* out) const {
- for (BSONElementSet::const_iterator it = _equalities.begin();
- it != _equalities.end(); ++it) {
- out->append(*it);
- }
- for (size_t i = 0; i < _regexes.size(); ++i) {
- BSONObjBuilder regexBob;
- _regexes[i]->toBSON(&regexBob);
- out->append(regexBob.obj().firstElement());
- }
- out->doneFast();
+ for (size_t i = 0; i < _regexes.size(); ++i) {
+ _regexes[i]->shortDebugString(debug);
+ debug << " ";
}
+ debug << "]";
+}
- // -----------
-
- Status InMatchExpression::init( StringData path ) {
- return initPath( path );
+void ArrayFilterEntries::toBSON(BSONArrayBuilder* out) const {
+ for (BSONElementSet::const_iterator it = _equalities.begin(); it != _equalities.end(); ++it) {
+ out->append(*it);
}
+ for (size_t i = 0; i < _regexes.size(); ++i) {
+ BSONObjBuilder regexBob;
+ _regexes[i]->toBSON(&regexBob);
+ out->append(regexBob.obj().firstElement());
+ }
+ out->doneFast();
+}
- bool InMatchExpression::_matchesRealElement( const BSONElement& e ) const {
- if ( _arrayEntries.contains( e ) )
- return true;
+// -----------
- for ( unsigned i = 0; i < _arrayEntries.numRegexes(); i++ ) {
- if ( _arrayEntries.regex(i)->matchesSingleElement( e ) )
- return true;
- }
+Status InMatchExpression::init(StringData path) {
+ return initPath(path);
+}
- return false;
- }
+bool InMatchExpression::_matchesRealElement(const BSONElement& e) const {
+ if (_arrayEntries.contains(e))
+ return true;
- bool InMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- if ( _arrayEntries.hasNull() && e.eoo() )
+ for (unsigned i = 0; i < _arrayEntries.numRegexes(); i++) {
+ if (_arrayEntries.regex(i)->matchesSingleElement(e))
return true;
+ }
- if ( _matchesRealElement( e ) )
- return true;
+ return false;
+}
- /*
- if ( e.type() == Array ) {
- BSONObjIterator i( e.Obj() );
- while ( i.more() ) {
- BSONElement sub = i.next();
- if ( _matchesRealElement( sub ) )
- return true;
- }
- }
- */
+bool InMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ if (_arrayEntries.hasNull() && e.eoo())
+ return true;
- return false;
- }
+ if (_matchesRealElement(e))
+ return true;
- void InMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << path() << " $in ";
- _arrayEntries.debugString(debug);
- MatchExpression::TagData* td = getTag();
- if (NULL != td) {
- debug << " ";
- td->debugString(&debug);
+ /*
+ if ( e.type() == Array ) {
+ BSONObjIterator i( e.Obj() );
+ while ( i.more() ) {
+ BSONElement sub = i.next();
+ if ( _matchesRealElement( sub ) )
+ return true;
}
- debug << "\n";
- }
-
- void InMatchExpression::toBSON(BSONObjBuilder* out) const {
- BSONObjBuilder inBob(out->subobjStart(path()));
- BSONArrayBuilder arrBob(inBob.subarrayStart("$in"));
- _arrayEntries.toBSON(&arrBob);
- inBob.doneFast();
}
+ */
- bool InMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
- const InMatchExpression* realOther = static_cast<const InMatchExpression*>( other );
- return
- path() == realOther->path() &&
- _arrayEntries.equivalent( realOther->_arrayEntries );
- }
+ return false;
+}
- LeafMatchExpression* InMatchExpression::shallowClone() const {
- InMatchExpression* next = new InMatchExpression();
- copyTo( next );
- if ( getTag() ) {
- next->setTag(getTag()->clone());
- }
- return next;
+void InMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << path() << " $in ";
+ _arrayEntries.debugString(debug);
+ MatchExpression::TagData* td = getTag();
+ if (NULL != td) {
+ debug << " ";
+ td->debugString(&debug);
}
+ debug << "\n";
+}
- void InMatchExpression::copyTo( InMatchExpression* toFillIn ) const {
- toFillIn->init( path() );
- _arrayEntries.copyTo( toFillIn->_arrayEntries );
- }
+void InMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONObjBuilder inBob(out->subobjStart(path()));
+ BSONArrayBuilder arrBob(inBob.subarrayStart("$in"));
+ _arrayEntries.toBSON(&arrBob);
+ inBob.doneFast();
+}
+bool InMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
+ const InMatchExpression* realOther = static_cast<const InMatchExpression*>(other);
+ return path() == realOther->path() && _arrayEntries.equivalent(realOther->_arrayEntries);
}
+LeafMatchExpression* InMatchExpression::shallowClone() const {
+ InMatchExpression* next = new InMatchExpression();
+ copyTo(next);
+ if (getTag()) {
+ next->setTag(getTag()->clone());
+ }
+ return next;
+}
+void InMatchExpression::copyTo(InMatchExpression* toFillIn) const {
+ toFillIn->init(path());
+ _arrayEntries.copyTo(toFillIn->_arrayEntries);
+}
+}
diff --git a/src/mongo/db/matcher/expression_leaf.h b/src/mongo/db/matcher/expression_leaf.h
index 44f889d707b..e0e61ac11ea 100644
--- a/src/mongo/db/matcher/expression_leaf.h
+++ b/src/mongo/db/matcher/expression_leaf.h
@@ -36,366 +36,397 @@
#include "mongo/db/matcher/expression.h"
namespace pcrecpp {
- class RE;
-} // namespace pcrecpp;
+class RE;
+} // namespace pcrecpp;
namespace mongo {
- /**
- * This file contains leaves in the parse tree that are not array-based.
- *
- * LeafMatchExpression: REGEX MOD EXISTS MATCH_IN
- * ComparisonMatchExpression: EQ LTE LT GT GTE
- * MatchExpression: TYPE_OPERATOR
- */
+/**
+ * This file contains leaves in the parse tree that are not array-based.
+ *
+ * LeafMatchExpression: REGEX MOD EXISTS MATCH_IN
+ * ComparisonMatchExpression: EQ LTE LT GT GTE
+ * MatchExpression: TYPE_OPERATOR
+ */
- /**
- * Many operators subclass from this:
- * REGEX, MOD, EXISTS, IN
- * Everything that inherits from ComparisonMatchExpression.
- */
- class LeafMatchExpression : public MatchExpression {
- public:
- LeafMatchExpression( MatchType matchType )
- : MatchExpression( matchType ) {
- }
+/**
+ * Many operators subclass from this:
+ * REGEX, MOD, EXISTS, IN
+ * Everything that inherits from ComparisonMatchExpression.
+ */
+class LeafMatchExpression : public MatchExpression {
+public:
+ LeafMatchExpression(MatchType matchType) : MatchExpression(matchType) {}
- virtual ~LeafMatchExpression(){}
+ virtual ~LeafMatchExpression() {}
- virtual bool matches( const MatchableDocument* doc, MatchDetails* details = 0 ) const;
+ virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const;
- virtual bool matchesSingleElement( const BSONElement& e ) const = 0;
+ virtual bool matchesSingleElement(const BSONElement& e) const = 0;
- virtual const StringData path() const { return _path; }
+ virtual const StringData path() const {
+ return _path;
+ }
- protected:
- Status initPath( StringData path );
+protected:
+ Status initPath(StringData path);
- private:
- StringData _path;
- ElementPath _elementPath;
- };
+private:
+ StringData _path;
+ ElementPath _elementPath;
+};
- /**
- * EQ, LTE, LT, GT, GTE subclass from ComparisonMatchExpression.
- */
- class ComparisonMatchExpression : public LeafMatchExpression {
- public:
- ComparisonMatchExpression( MatchType type ) : LeafMatchExpression( type ){}
+/**
+ * EQ, LTE, LT, GT, GTE subclass from ComparisonMatchExpression.
+ */
+class ComparisonMatchExpression : public LeafMatchExpression {
+public:
+ ComparisonMatchExpression(MatchType type) : LeafMatchExpression(type) {}
- Status init( StringData path, const BSONElement& rhs );
+ Status init(StringData path, const BSONElement& rhs);
- virtual ~ComparisonMatchExpression(){}
+ virtual ~ComparisonMatchExpression() {}
- virtual bool matchesSingleElement( const BSONElement& e ) const;
+ virtual bool matchesSingleElement(const BSONElement& e) const;
- virtual const BSONElement& getRHS() const { return _rhs; }
+ virtual const BSONElement& getRHS() const {
+ return _rhs;
+ }
- virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void debugString(StringBuilder& debug, int level = 0) const;
- virtual void toBSON(BSONObjBuilder* out) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
- virtual bool equivalent( const MatchExpression* other ) const;
+ virtual bool equivalent(const MatchExpression* other) const;
- const BSONElement& getData() const { return _rhs; }
+ const BSONElement& getData() const {
+ return _rhs;
+ }
- protected:
- BSONElement _rhs;
- };
+protected:
+ BSONElement _rhs;
+};
- //
- // ComparisonMatchExpression inheritors
- //
+//
+// ComparisonMatchExpression inheritors
+//
- class EqualityMatchExpression : public ComparisonMatchExpression {
- public:
- EqualityMatchExpression() : ComparisonMatchExpression( EQ ){}
- virtual LeafMatchExpression* shallowClone() const {
- ComparisonMatchExpression* e = new EqualityMatchExpression();
- e->init( path(), _rhs );
- if ( getTag() ) {
- e->setTag(getTag()->clone());
- }
- return e;
+class EqualityMatchExpression : public ComparisonMatchExpression {
+public:
+ EqualityMatchExpression() : ComparisonMatchExpression(EQ) {}
+ virtual LeafMatchExpression* shallowClone() const {
+ ComparisonMatchExpression* e = new EqualityMatchExpression();
+ e->init(path(), _rhs);
+ if (getTag()) {
+ e->setTag(getTag()->clone());
}
- };
-
- class LTEMatchExpression : public ComparisonMatchExpression {
- public:
- LTEMatchExpression() : ComparisonMatchExpression( LTE ){}
- virtual LeafMatchExpression* shallowClone() const {
- ComparisonMatchExpression* e = new LTEMatchExpression();
- e->init( path(), _rhs );
- if ( getTag() ) {
- e->setTag(getTag()->clone());
- }
- return e;
- }
-
- };
-
- class LTMatchExpression : public ComparisonMatchExpression {
- public:
- LTMatchExpression() : ComparisonMatchExpression( LT ){}
- virtual LeafMatchExpression* shallowClone() const {
- ComparisonMatchExpression* e = new LTMatchExpression();
- e->init( path(), _rhs );
- if ( getTag() ) {
- e->setTag(getTag()->clone());
- }
- return e;
+ return e;
+ }
+};
+
+class LTEMatchExpression : public ComparisonMatchExpression {
+public:
+ LTEMatchExpression() : ComparisonMatchExpression(LTE) {}
+ virtual LeafMatchExpression* shallowClone() const {
+ ComparisonMatchExpression* e = new LTEMatchExpression();
+ e->init(path(), _rhs);
+ if (getTag()) {
+ e->setTag(getTag()->clone());
}
-
- };
-
- class GTMatchExpression : public ComparisonMatchExpression {
- public:
- GTMatchExpression() : ComparisonMatchExpression( GT ){}
- virtual LeafMatchExpression* shallowClone() const {
- ComparisonMatchExpression* e = new GTMatchExpression();
- e->init( path(), _rhs );
- if ( getTag() ) {
- e->setTag(getTag()->clone());
- }
- return e;
+ return e;
+ }
+};
+
+class LTMatchExpression : public ComparisonMatchExpression {
+public:
+ LTMatchExpression() : ComparisonMatchExpression(LT) {}
+ virtual LeafMatchExpression* shallowClone() const {
+ ComparisonMatchExpression* e = new LTMatchExpression();
+ e->init(path(), _rhs);
+ if (getTag()) {
+ e->setTag(getTag()->clone());
}
-
- };
-
- class GTEMatchExpression : public ComparisonMatchExpression {
- public:
- GTEMatchExpression() : ComparisonMatchExpression( GTE ){}
- virtual LeafMatchExpression* shallowClone() const {
- ComparisonMatchExpression* e = new GTEMatchExpression();
- e->init( path(), _rhs );
- if ( getTag() ) {
- e->setTag(getTag()->clone());
- }
- return e;
+ return e;
+ }
+};
+
+class GTMatchExpression : public ComparisonMatchExpression {
+public:
+ GTMatchExpression() : ComparisonMatchExpression(GT) {}
+ virtual LeafMatchExpression* shallowClone() const {
+ ComparisonMatchExpression* e = new GTMatchExpression();
+ e->init(path(), _rhs);
+ if (getTag()) {
+ e->setTag(getTag()->clone());
}
-
- };
-
- //
- // LeafMatchExpression inheritors
- //
-
- class RegexMatchExpression : public LeafMatchExpression {
- public:
- /**
- * Maximum pattern size which pcre v8.3 can do matches correctly with
- * LINK_SIZE define macro set to 2 @ pcre's config.h (based on
- * experiments)
- */
- static const size_t MaxPatternSize = 32764;
-
- RegexMatchExpression();
- ~RegexMatchExpression();
-
- Status init( StringData path, StringData regex, StringData options );
- Status init( StringData path, const BSONElement& e );
-
- virtual LeafMatchExpression* shallowClone() const {
- RegexMatchExpression* e = new RegexMatchExpression();
- e->init( path(), _regex, _flags );
- if ( getTag() ) {
- e->setTag(getTag()->clone());
- }
- return e;
+ return e;
+ }
+};
+
+class GTEMatchExpression : public ComparisonMatchExpression {
+public:
+ GTEMatchExpression() : ComparisonMatchExpression(GTE) {}
+ virtual LeafMatchExpression* shallowClone() const {
+ ComparisonMatchExpression* e = new GTEMatchExpression();
+ e->init(path(), _rhs);
+ if (getTag()) {
+ e->setTag(getTag()->clone());
}
+ return e;
+ }
+};
- virtual bool matchesSingleElement( const BSONElement& e ) const;
-
- virtual void debugString( StringBuilder& debug, int level ) const;
-
- virtual void toBSON(BSONObjBuilder* out) const;
+//
+// LeafMatchExpression inheritors
+//
- void shortDebugString( StringBuilder& debug ) const;
+class RegexMatchExpression : public LeafMatchExpression {
+public:
+ /**
+ * Maximum pattern size which pcre v8.3 can do matches correctly with
+ * LINK_SIZE define macro set to 2 @ pcre's config.h (based on
+ * experiments)
+ */
+ static const size_t MaxPatternSize = 32764;
- virtual bool equivalent( const MatchExpression* other ) const;
+ RegexMatchExpression();
+ ~RegexMatchExpression();
- const std::string& getString() const { return _regex; }
- const std::string& getFlags() const { return _flags; }
+ Status init(StringData path, StringData regex, StringData options);
+ Status init(StringData path, const BSONElement& e);
- private:
- std::string _regex;
- std::string _flags;
- std::unique_ptr<pcrecpp::RE> _re;
- };
+ virtual LeafMatchExpression* shallowClone() const {
+ RegexMatchExpression* e = new RegexMatchExpression();
+ e->init(path(), _regex, _flags);
+ if (getTag()) {
+ e->setTag(getTag()->clone());
+ }
+ return e;
+ }
- class ModMatchExpression : public LeafMatchExpression {
- public:
- ModMatchExpression() : LeafMatchExpression( MOD ){}
+ virtual bool matchesSingleElement(const BSONElement& e) const;
- Status init( StringData path, int divisor, int remainder );
+ virtual void debugString(StringBuilder& debug, int level) const;
- virtual LeafMatchExpression* shallowClone() const {
- ModMatchExpression* m = new ModMatchExpression();
- m->init( path(), _divisor, _remainder );
- if ( getTag() ) {
- m->setTag(getTag()->clone());
- }
- return m;
- }
+ virtual void toBSON(BSONObjBuilder* out) const;
- virtual bool matchesSingleElement( const BSONElement& e ) const;
+ void shortDebugString(StringBuilder& debug) const;
- virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual bool equivalent(const MatchExpression* other) const;
- virtual void toBSON(BSONObjBuilder* out) const;
+ const std::string& getString() const {
+ return _regex;
+ }
+ const std::string& getFlags() const {
+ return _flags;
+ }
- virtual bool equivalent( const MatchExpression* other ) const;
+private:
+ std::string _regex;
+ std::string _flags;
+ std::unique_ptr<pcrecpp::RE> _re;
+};
- int getDivisor() const { return _divisor; }
- int getRemainder() const { return _remainder; }
+class ModMatchExpression : public LeafMatchExpression {
+public:
+ ModMatchExpression() : LeafMatchExpression(MOD) {}
- private:
- int _divisor;
- int _remainder;
- };
+ Status init(StringData path, int divisor, int remainder);
- class ExistsMatchExpression : public LeafMatchExpression {
- public:
- ExistsMatchExpression() : LeafMatchExpression( EXISTS ){}
+ virtual LeafMatchExpression* shallowClone() const {
+ ModMatchExpression* m = new ModMatchExpression();
+ m->init(path(), _divisor, _remainder);
+ if (getTag()) {
+ m->setTag(getTag()->clone());
+ }
+ return m;
+ }
- Status init( StringData path );
+ virtual bool matchesSingleElement(const BSONElement& e) const;
- virtual LeafMatchExpression* shallowClone() const {
- ExistsMatchExpression* e = new ExistsMatchExpression();
- e->init( path() );
- if ( getTag() ) {
- e->setTag(getTag()->clone());
- }
- return e;
- }
+ virtual void debugString(StringBuilder& debug, int level) const;
- virtual bool matchesSingleElement( const BSONElement& e ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
- virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual bool equivalent(const MatchExpression* other) const;
- virtual void toBSON(BSONObjBuilder* out) const;
+ int getDivisor() const {
+ return _divisor;
+ }
+ int getRemainder() const {
+ return _remainder;
+ }
- virtual bool equivalent( const MatchExpression* other ) const;
- };
+private:
+ int _divisor;
+ int _remainder;
+};
- /**
- * INTERNAL
- * terrible name
- * holds the entries of an $in or $all
- * either scalars or regex
- */
- class ArrayFilterEntries {
- MONGO_DISALLOW_COPYING( ArrayFilterEntries );
- public:
- ArrayFilterEntries();
- ~ArrayFilterEntries();
+class ExistsMatchExpression : public LeafMatchExpression {
+public:
+ ExistsMatchExpression() : LeafMatchExpression(EXISTS) {}
- Status addEquality( const BSONElement& e );
- Status addRegex( RegexMatchExpression* expr );
+ Status init(StringData path);
- const BSONElementSet& equalities() const { return _equalities; }
- bool contains( const BSONElement& elem ) const { return _equalities.count(elem) > 0; }
+ virtual LeafMatchExpression* shallowClone() const {
+ ExistsMatchExpression* e = new ExistsMatchExpression();
+ e->init(path());
+ if (getTag()) {
+ e->setTag(getTag()->clone());
+ }
+ return e;
+ }
- size_t numRegexes() const { return _regexes.size(); }
- RegexMatchExpression* regex( int idx ) const { return _regexes[idx]; }
+ virtual bool matchesSingleElement(const BSONElement& e) const;
- bool hasNull() const { return _hasNull; }
- bool singleNull() const { return size() == 1 && _hasNull; }
- bool hasEmptyArray() const { return _hasEmptyArray; }
- int size() const { return _equalities.size() + _regexes.size(); }
+ virtual void debugString(StringBuilder& debug, int level) const;
- bool equivalent( const ArrayFilterEntries& other ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
- void copyTo( ArrayFilterEntries& toFillIn ) const;
+ virtual bool equivalent(const MatchExpression* other) const;
+};
- void debugString( StringBuilder& debug ) const;
+/**
+ * INTERNAL
+ * terrible name
+ * holds the entries of an $in or $all
+ * either scalars or regex
+ */
+class ArrayFilterEntries {
+ MONGO_DISALLOW_COPYING(ArrayFilterEntries);
+
+public:
+ ArrayFilterEntries();
+ ~ArrayFilterEntries();
+
+ Status addEquality(const BSONElement& e);
+ Status addRegex(RegexMatchExpression* expr);
+
+ const BSONElementSet& equalities() const {
+ return _equalities;
+ }
+ bool contains(const BSONElement& elem) const {
+ return _equalities.count(elem) > 0;
+ }
+
+ size_t numRegexes() const {
+ return _regexes.size();
+ }
+ RegexMatchExpression* regex(int idx) const {
+ return _regexes[idx];
+ }
+
+ bool hasNull() const {
+ return _hasNull;
+ }
+ bool singleNull() const {
+ return size() == 1 && _hasNull;
+ }
+ bool hasEmptyArray() const {
+ return _hasEmptyArray;
+ }
+ int size() const {
+ return _equalities.size() + _regexes.size();
+ }
+
+ bool equivalent(const ArrayFilterEntries& other) const;
+
+ void copyTo(ArrayFilterEntries& toFillIn) const;
+
+ void debugString(StringBuilder& debug) const;
+
+ void toBSON(BSONArrayBuilder* out) const;
+
+private:
+ bool _hasNull; // if _equalities has a jstNULL element in it
+ bool _hasEmptyArray;
+ BSONElementSet _equalities;
+ std::vector<RegexMatchExpression*> _regexes;
+};
- void toBSON(BSONArrayBuilder* out) const;
+/**
+ * query operator: $in
+ */
+class InMatchExpression : public LeafMatchExpression {
+public:
+ InMatchExpression() : LeafMatchExpression(MATCH_IN) {}
+ Status init(StringData path);
- private:
- bool _hasNull; // if _equalities has a jstNULL element in it
- bool _hasEmptyArray;
- BSONElementSet _equalities;
- std::vector<RegexMatchExpression*> _regexes;
- };
+ virtual LeafMatchExpression* shallowClone() const;
- /**
- * query operator: $in
- */
- class InMatchExpression : public LeafMatchExpression {
- public:
- InMatchExpression() : LeafMatchExpression( MATCH_IN ){}
- Status init( StringData path );
+ ArrayFilterEntries* getArrayFilterEntries() {
+ return &_arrayEntries;
+ }
- virtual LeafMatchExpression* shallowClone() const;
+ virtual bool matchesSingleElement(const BSONElement& e) const;
- ArrayFilterEntries* getArrayFilterEntries() { return &_arrayEntries; }
+ virtual void debugString(StringBuilder& debug, int level) const;
- virtual bool matchesSingleElement( const BSONElement& e ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
- virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual bool equivalent(const MatchExpression* other) const;
- virtual void toBSON(BSONObjBuilder* out) const;
+ void copyTo(InMatchExpression* toFillIn) const;
- virtual bool equivalent( const MatchExpression* other ) const;
+ const ArrayFilterEntries& getData() const {
+ return _arrayEntries;
+ }
- void copyTo( InMatchExpression* toFillIn ) const;
+private:
+ bool _matchesRealElement(const BSONElement& e) const;
+ ArrayFilterEntries _arrayEntries;
+};
- const ArrayFilterEntries& getData() const { return _arrayEntries; }
+//
+// The odd duck out, TYPE_OPERATOR.
+//
- private:
- bool _matchesRealElement( const BSONElement& e ) const;
- ArrayFilterEntries _arrayEntries;
- };
+/**
+ * Type has some odd semantics with arrays and as such it can't inherit from
+ * LeafMatchExpression.
+ */
+class TypeMatchExpression : public MatchExpression {
+public:
+ TypeMatchExpression() : MatchExpression(TYPE_OPERATOR) {}
- //
- // The odd duck out, TYPE_OPERATOR.
- //
+ Status init(StringData path, int type);
- /**
- * Type has some odd semantics with arrays and as such it can't inherit from
- * LeafMatchExpression.
- */
- class TypeMatchExpression : public MatchExpression {
- public:
- TypeMatchExpression() : MatchExpression( TYPE_OPERATOR ){}
-
- Status init( StringData path, int type );
-
- virtual MatchExpression* shallowClone() const {
- TypeMatchExpression* e = new TypeMatchExpression();
- e->init(_path, _type);
- if ( getTag() ) {
- e->setTag(getTag()->clone());
- }
- return e;
+ virtual MatchExpression* shallowClone() const {
+ TypeMatchExpression* e = new TypeMatchExpression();
+ e->init(_path, _type);
+ if (getTag()) {
+ e->setTag(getTag()->clone());
}
+ return e;
+ }
- virtual bool matchesSingleElement( const BSONElement& e ) const;
+ virtual bool matchesSingleElement(const BSONElement& e) const;
- virtual bool matches( const MatchableDocument* doc, MatchDetails* details = 0 ) const;
+ virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const;
- virtual void debugString( StringBuilder& debug, int level ) const;
+ virtual void debugString(StringBuilder& debug, int level) const;
- virtual void toBSON(BSONObjBuilder* out) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
- virtual bool equivalent( const MatchExpression* other ) const;
+ virtual bool equivalent(const MatchExpression* other) const;
- /**
- * What is the type we're matching against?
- */
- int getData() const { return _type; }
+ /**
+ * What is the type we're matching against?
+ */
+ int getData() const {
+ return _type;
+ }
- virtual const StringData path() const { return _path; }
+ virtual const StringData path() const {
+ return _path;
+ }
- private:
- bool _matches( StringData path,
- const MatchableDocument* doc,
- MatchDetails* details = 0 ) const;
+private:
+ bool _matches(StringData path, const MatchableDocument* doc, MatchDetails* details = 0) const;
- StringData _path;
- ElementPath _elementPath;
- int _type;
- };
+ StringData _path;
+ ElementPath _elementPath;
+ int _type;
+};
} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_leaf_test.cpp b/src/mongo/db/matcher/expression_leaf_test.cpp
index ed2f7b259d9..10ae1e8bc10 100644
--- a/src/mongo/db/matcher/expression_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_leaf_test.cpp
@@ -38,1618 +38,1663 @@
namespace mongo {
- using std::string;
-
- TEST( EqOp, MatchesElement ) {
- BSONObj operand = BSON( "a" << 5 );
- BSONObj match = BSON( "a" << 5.0 );
- BSONObj notMatch = BSON( "a" << 6 );
-
- EqualityMatchExpression eq;
- eq.init( "", operand["a"] );
- ASSERT( eq.matchesSingleElement( match.firstElement() ) );
- ASSERT( !eq.matchesSingleElement( notMatch.firstElement() ) );
-
- ASSERT( eq.equivalent( &eq ) );
- }
-
- TEST( EqOp, InvalidEooOperand ) {
- BSONObj operand;
- EqualityMatchExpression eq;
- ASSERT( !eq.init( "", operand.firstElement() ).isOK() );
- }
-
- TEST( EqOp, MatchesScalar ) {
- BSONObj operand = BSON( "a" << 5 );
- EqualityMatchExpression eq;
- eq.init( "a", operand[ "a" ] );
- ASSERT( eq.matchesBSON( BSON( "a" << 5.0 ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( EqOp, MatchesArrayValue ) {
- BSONObj operand = BSON( "a" << 5 );
- EqualityMatchExpression eq;
- eq.init( "a", operand[ "a" ] );
- ASSERT( eq.matchesBSON( BSON( "a" << BSON_ARRAY( 5.0 << 6 ) ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << BSON_ARRAY( 6 << 7 ) ), NULL ) );
- }
-
- TEST( EqOp, MatchesReferencedObjectValue ) {
- BSONObj operand = BSON( "a.b" << 5 );
- EqualityMatchExpression eq;
- eq.init( "a.b", operand[ "a.b" ] );
- ASSERT( eq.matchesBSON( BSON( "a" << BSON( "b" << 5 ) ), NULL ) );
- ASSERT( eq.matchesBSON( BSON( "a" << BSON( "b" << BSON_ARRAY( 5 ) ) ), NULL ) );
- ASSERT( eq.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << 5 ) ) ), NULL ) );
- }
-
- TEST( EqOp, MatchesReferencedArrayValue ) {
- BSONObj operand = BSON( "a.0" << 5 );
- EqualityMatchExpression eq;
- eq.init( "a.0", operand[ "a.0" ] );
- ASSERT( eq.matchesBSON( BSON( "a" << BSON_ARRAY( 5 ) ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 5 ) ) ), NULL ) );
- }
-
- TEST( EqOp, MatchesNull ) {
- BSONObj operand = BSON( "a" << BSONNULL );
- EqualityMatchExpression eq;
- eq.init( "a", operand[ "a" ] );
- ASSERT( eq.matchesBSON( BSONObj(), NULL ) );
- ASSERT( eq.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << 4 ), NULL ) );
- // A non-existent field is treated same way as an empty bson object
- ASSERT( eq.matchesBSON( BSON( "b" << 4 ), NULL ) );
- }
-
- // This test documents how the matcher currently works,
- // not necessarily how it should work ideally.
- TEST( EqOp, MatchesNestedNull ) {
- BSONObj operand = BSON( "a.b" << BSONNULL );
- EqualityMatchExpression eq;
- eq.init( "a.b", operand[ "a.b" ] );
- // null matches any empty object that is on a subpath of a.b
- ASSERT( eq.matchesBSON( BSONObj(), NULL ) );
- ASSERT( eq.matchesBSON( BSON( "a" << BSONObj() ), NULL ) );
- ASSERT( eq.matchesBSON( BSON( "a" << BSON_ARRAY( BSONObj() ) ), NULL ) );
- ASSERT( eq.matchesBSON( BSON( "a" << BSON( "b" << BSONNULL ) ), NULL ) );
- // b does not exist as an element in array under a.
- ASSERT( !eq.matchesBSON( BSON( "a" << BSONArray() ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << BSON_ARRAY( BSONNULL ) ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << BSON_ARRAY( 1 << 2 ) ), NULL ) );
- // a.b exists but is not null.
- ASSERT( !eq.matchesBSON( BSON( "a" << BSON( "b" << 4 ) ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << BSON( "b" << BSONObj() ) ), NULL ) );
- // A non-existent field is treated same way as an empty bson object
- ASSERT( eq.matchesBSON( BSON( "b" << 4 ), NULL ) );
- }
-
- TEST( EqOp, MatchesMinKey ) {
- BSONObj operand = BSON( "a" << MinKey );
- EqualityMatchExpression eq;
- eq.init( "a", operand[ "a" ] );
- ASSERT( eq.matchesBSON( BSON( "a" << MinKey ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << MaxKey ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
-
-
- TEST( EqOp, MatchesMaxKey ) {
- BSONObj operand = BSON( "a" << MaxKey );
- EqualityMatchExpression eq;
- ASSERT( eq.init( "a", operand[ "a" ] ).isOK() );
- ASSERT( eq.matchesBSON( BSON( "a" << MaxKey ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << MinKey ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( EqOp, MatchesFullArray ) {
- BSONObj operand = BSON( "a" << BSON_ARRAY( 1 << 2 ) );
- EqualityMatchExpression eq;
- ASSERT( eq.init( "a", operand[ "a" ] ).isOK() );
- ASSERT( eq.matchesBSON( BSON( "a" << BSON_ARRAY( 1 << 2 ) ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << BSON_ARRAY( 1 << 2 << 3 ) ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << BSON_ARRAY( 1 ) ), NULL ) );
- ASSERT( !eq.matchesBSON( BSON( "a" << 1 ), NULL ) );
- }
-
- TEST( EqOp, MatchesThroughNestedArray ) {
- BSONObj operand = BSON( "a.b.c.d" << 3 );
- EqualityMatchExpression eq;
- eq.init( "a.b.c.d", operand["a.b.c.d"] );
- BSONObj obj = fromjson("{a:{b:[{c:[{d:1},{d:2}]},{c:[{d:3}]}]}}");
- ASSERT( eq.matchesBSON( obj, NULL ) );
- }
-
- TEST( EqOp, ElemMatchKey ) {
- BSONObj operand = BSON( "a" << 5 );
- EqualityMatchExpression eq;
- ASSERT( eq.init( "a", operand[ "a" ] ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !eq.matchesBSON( BSON( "a" << 4 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( eq.matchesBSON( BSON( "a" << 5 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( eq.matchesBSON( BSON( "a" << BSON_ARRAY( 1 << 2 << 5 ) ), &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "2", details.elemMatchKey() );
- }
-
- // SERVER-14886: when an array is being traversed explictly at the same time that a nested array
- // is being traversed implicitly, the elemMatch key should refer to the offset of the array
- // being implicitly traversed.
- TEST( EqOp, ElemMatchKeyWithImplicitAndExplicitTraversal ) {
- BSONObj operand = BSON( "a.0.b" << 3 );
- BSONElement operandFirstElt = operand.firstElement();
- EqualityMatchExpression eq;
- ASSERT( eq.init( operandFirstElt.fieldName(), operandFirstElt ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- BSONObj obj = fromjson("{a: [{b: [2, 3]}, {b: [4, 5]}]}");
- ASSERT( eq.matchesBSON( obj, &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- }
-
- TEST( EqOp, Equality1 ) {
- EqualityMatchExpression eq1;
- EqualityMatchExpression eq2;
- EqualityMatchExpression eq3;
-
- BSONObj operand = BSON( "a" << 5 << "b" << 5 << "c" << 4 );
-
- eq1.init( "a", operand["a"] );
- eq2.init( "a", operand["b"] );
- eq3.init( "c", operand["c"] );
-
- ASSERT( eq1.equivalent( &eq1 ) );
- ASSERT( eq1.equivalent( &eq2 ) );
- ASSERT( !eq1.equivalent( &eq3 ) );
- }
-
- /**
- TEST( EqOp, MatchesIndexKeyScalar ) {
- BSONObj operand = BSON( "a" << 6 );
- EqualityMatchExpression eq;
- ASSERT( eq.init( "a", operand[ "a" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- eq.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- eq.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- eq.matchesIndexKey( BSON( "" << BSON_ARRAY( 6 ) ), indexSpec ) );
- }
-
- TEST( EqOp, MatchesIndexKeyMissing ) {
- BSONObj operand = BSON( "a" << 6 );
- EqualityMatchExpression eq;
- ASSERT( eq.init( "a", operand[ "a" ] ).isOK() );
- IndexSpec indexSpec( BSON( "b" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- eq.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- eq.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- eq.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
- }
-
- TEST( EqOp, MatchesIndexKeyArray ) {
- BSONObj operand = BSON( "a" << BSON_ARRAY( 4 << 5 ) );
- ComparisonMatchExpression eq
- ASSERT( eq.init( "a", operand[ "a" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- eq.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
- }
-
- TEST( EqOp, MatchesIndexKeyArrayValue ) {
- BSONObj operand = BSON( "a" << 6 );
- ComparisonMatchExpression eq
- ASSERT( eq.init( "a", operand[ "a" ] ).isOK() );
- IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- eq.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- eq.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 4 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- eq.matchesIndexKey( BSON( "" << "dummygeohash" <<
- "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
- }
- */
- TEST( LtOp, MatchesElement ) {
- BSONObj operand = BSON( "$lt" << 5 );
- BSONObj match = BSON( "a" << 4.5 );
- BSONObj notMatch = BSON( "a" << 6 );
- BSONObj notMatchEqual = BSON( "a" << 5 );
- BSONObj notMatchWrongType = BSON( "a" << "foo" );
- LTMatchExpression lt;
- ASSERT( lt.init( "", operand[ "$lt" ] ).isOK() );
- ASSERT( lt.matchesSingleElement( match.firstElement() ) );
- ASSERT( !lt.matchesSingleElement( notMatch.firstElement() ) );
- ASSERT( !lt.matchesSingleElement( notMatchEqual.firstElement() ) );
- ASSERT( !lt.matchesSingleElement( notMatchWrongType.firstElement() ) );
- }
-
- TEST( LtOp, InvalidEooOperand ) {
- BSONObj operand;
- LTMatchExpression lt;
- ASSERT( !lt.init( "", operand.firstElement() ).isOK() );
- }
-
- TEST( LtOp, MatchesScalar ) {
- BSONObj operand = BSON( "$lt" << 5 );
- LTMatchExpression lt;
- ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
- ASSERT( lt.matchesBSON( BSON( "a" << 4.5 ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << 6 ), NULL ) );
- }
-
- TEST( LtOp, MatchesScalarEmptyKey ) {
- BSONObj operand = BSON( "$lt" << 5 );
- LTMatchExpression lt;
- ASSERT( lt.init( "", operand[ "$lt" ] ).isOK() );
- ASSERT( lt.matchesBSON( BSON( "" << 4.5 ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "" << 6 ), NULL ) );
- }
-
- TEST( LtOp, MatchesArrayValue ) {
- BSONObj operand = BSON( "$lt" << 5 );
- LTMatchExpression lt;
- ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
- ASSERT( lt.matchesBSON( BSON( "a" << BSON_ARRAY( 6 << 4.5 ) ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << BSON_ARRAY( 6 << 7 ) ), NULL ) );
- }
-
- TEST( LtOp, MatchesWholeArray ) {
- BSONObj operand = BSON( "$lt" << BSON_ARRAY( 5 ) );
- LTMatchExpression lt;
- ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
- ASSERT( lt.matchesBSON( BSON( "a" << BSON_ARRAY( 4 ) ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << BSON_ARRAY( 5 ) ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << BSON_ARRAY( 6 ) ), NULL ) );
- // Nested array.
- ASSERT( lt.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 4 ) ) ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 5 ) ) ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 6 ) ) ), NULL ) );
- }
-
- TEST( LtOp, MatchesNull ) {
- BSONObj operand = BSON( "$lt" << BSONNULL );
- LTMatchExpression lt;
- ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
- ASSERT( !lt.matchesBSON( BSONObj(), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << 4 ), NULL ) );
- // A non-existent field is treated same way as an empty bson object
- ASSERT( !lt.matchesBSON( BSON( "b" << 4 ), NULL ) );
- }
-
- TEST( LtOp, MatchesDotNotationNull) {
- BSONObj operand = BSON( "$lt" << BSONNULL );
- LTMatchExpression lt;
- ASSERT( lt.init( "a.b", operand[ "$lt" ] ).isOK() );
- ASSERT( !lt.matchesBSON( BSONObj(), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << 4 ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << BSONObj() ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << BSONNULL ) ) ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "a" << 4 ) << BSON( "b" << 4 ) ) ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << BSON_ARRAY( 4 ) ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << 4 ) ) ), NULL ) );
- }
-
- TEST( LtOp, MatchesMinKey ) {
- BSONObj operand = BSON( "a" << MinKey );
- LTMatchExpression lt;
- ASSERT( lt.init( "a", operand[ "a" ] ).isOK() );
- ASSERT( !lt.matchesBSON( BSON( "a" << MinKey ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << MaxKey ), NULL ) );
- ASSERT( !lt.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( LtOp, MatchesMaxKey ) {
- BSONObj operand = BSON( "a" << MaxKey );
- LTMatchExpression lt;
- ASSERT( lt.init( "a", operand[ "a" ] ).isOK() );
- ASSERT( !lt.matchesBSON( BSON( "a" << MaxKey ), NULL ) );
- ASSERT( lt.matchesBSON( BSON( "a" << MinKey ), NULL ) );
- ASSERT( lt.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( LtOp, ElemMatchKey ) {
- BSONObj operand = BSON( "$lt" << 5 );
- LTMatchExpression lt;
- ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !lt.matchesBSON( BSON( "a" << 6 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( lt.matchesBSON( BSON( "a" << 4 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( lt.matchesBSON( BSON( "a" << BSON_ARRAY( 6 << 2 << 5 ) ), &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- }
-
- /**
- TEST( LtOp, MatchesIndexKeyScalar ) {
- BSONObj operand = BSON( "$lt" << 6 );
- LtOp lt;
- ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- lt.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- lt.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- lt.matchesIndexKey( BSON( "" << BSON_ARRAY( 5 ) ), indexSpec ) );
- }
-
- TEST( LtOp, MatchesIndexKeyMissing ) {
- BSONObj operand = BSON( "$lt" << 6 );
- LtOp lt;
- ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
- IndexSpec indexSpec( BSON( "b" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- lt.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- lt.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- lt.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
- }
-
- TEST( LtOp, MatchesIndexKeyArray ) {
- BSONObj operand = BSON( "$lt" << BSON_ARRAY( 4 << 5 ) );
- LtOp lt;
- ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- lt.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
- }
-
- TEST( LtOp, MatchesIndexKeyArrayValue ) {
- BSONObj operand = BSON( "$lt" << 6 );
- LtOp lt;
- ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
- IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- lt.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 3 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- lt.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- lt.matchesIndexKey( BSON( "" << "dummygeohash" <<
- "" << BSON_ARRAY( 8 << 6 << 4 ) ), indexSpec ) );
- }
- */
- TEST( LteOp, MatchesElement ) {
- BSONObj operand = BSON( "$lte" << 5 );
- BSONObj match = BSON( "a" << 4.5 );
- BSONObj equalMatch = BSON( "a" << 5 );
- BSONObj notMatch = BSON( "a" << 6 );
- BSONObj notMatchWrongType = BSON( "a" << "foo" );
- LTEMatchExpression lte;
- ASSERT( lte.init( "", operand[ "$lte" ] ).isOK() );
- ASSERT( lte.matchesSingleElement( match.firstElement() ) );
- ASSERT( lte.matchesSingleElement( equalMatch.firstElement() ) );
- ASSERT( !lte.matchesSingleElement( notMatch.firstElement() ) );
- ASSERT( !lte.matchesSingleElement( notMatchWrongType.firstElement() ) );
- }
-
- TEST( LteOp, InvalidEooOperand ) {
- BSONObj operand;
- LTEMatchExpression lte;
- ASSERT( !lte.init( "", operand.firstElement() ).isOK() );
- }
-
- TEST( LteOp, MatchesScalar ) {
- BSONObj operand = BSON( "$lte" << 5 );
- LTEMatchExpression lte;
- ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
- ASSERT( lte.matchesBSON( BSON( "a" << 4.5 ), NULL ) );
- ASSERT( !lte.matchesBSON( BSON( "a" << 6 ), NULL ) );
- }
-
- TEST( LteOp, MatchesArrayValue ) {
- BSONObj operand = BSON( "$lte" << 5 );
- LTEMatchExpression lte;
- ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
- ASSERT( lte.matchesBSON( BSON( "a" << BSON_ARRAY( 6 << 4.5 ) ), NULL ) );
- ASSERT( !lte.matchesBSON( BSON( "a" << BSON_ARRAY( 6 << 7 ) ), NULL ) );
- }
-
- TEST( LteOp, MatchesWholeArray ) {
- BSONObj operand = BSON( "$lte" << BSON_ARRAY( 5 ) );
- LTEMatchExpression lte;
- ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
- ASSERT( lte.matchesBSON( BSON( "a" << BSON_ARRAY( 4 ) ), NULL ) );
- ASSERT( lte.matchesBSON( BSON( "a" << BSON_ARRAY( 5 ) ), NULL ) );
- ASSERT( !lte.matchesBSON( BSON( "a" << BSON_ARRAY( 6 ) ), NULL ) );
- // Nested array.
- ASSERT( lte.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 4 ) ) ), NULL ) );
- ASSERT( lte.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 5 ) ) ), NULL ) );
- ASSERT( !lte.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 6 ) ) ), NULL ) );
- }
-
- TEST( LteOp, MatchesNull ) {
- BSONObj operand = BSON( "$lte" << BSONNULL );
- LTEMatchExpression lte;
- ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
- ASSERT( lte.matchesBSON( BSONObj(), NULL ) );
- ASSERT( lte.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- ASSERT( !lte.matchesBSON( BSON( "a" << 4 ), NULL ) );
- // A non-existent field is treated same way as an empty bson object
- ASSERT( lte.matchesBSON( BSON( "b" << 4 ), NULL ) );
- }
-
- TEST( LteOp, MatchesDotNotationNull) {
- BSONObj operand = BSON( "$lte" << BSONNULL );
- LTEMatchExpression lte;
- ASSERT( lte.init( "a.b", operand[ "$lte" ] ).isOK() );
- ASSERT( lte.matchesBSON( BSONObj(), NULL ) );
- ASSERT( lte.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- ASSERT( lte.matchesBSON( BSON( "a" << 4 ), NULL ) );
- ASSERT( lte.matchesBSON( BSON( "a" << BSONObj() ), NULL ) );
- ASSERT( lte.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << BSONNULL ) ) ), NULL ) );
- ASSERT( lte.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "a" << 4 ) << BSON( "b" << 4 ) ) ), NULL ) );
- ASSERT( !lte.matchesBSON( BSON( "a" << BSON_ARRAY( 4 ) ), NULL ) );
- ASSERT( !lte.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << 4 ) ) ), NULL ) );
- }
-
- TEST( LteOp, MatchesMinKey ) {
- BSONObj operand = BSON( "a" << MinKey );
- LTEMatchExpression lte;
- ASSERT( lte.init( "a", operand[ "a" ] ).isOK() );
- ASSERT( lte.matchesBSON( BSON( "a" << MinKey ), NULL ) );
- ASSERT( !lte.matchesBSON( BSON( "a" << MaxKey ), NULL ) );
- ASSERT( !lte.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( LteOp, MatchesMaxKey ) {
- BSONObj operand = BSON( "a" << MaxKey );
- LTEMatchExpression lte;
- ASSERT( lte.init( "a", operand[ "a" ] ).isOK() );
- ASSERT( lte.matchesBSON( BSON( "a" << MaxKey ), NULL ) );
- ASSERT( lte.matchesBSON( BSON( "a" << MinKey ), NULL ) );
- ASSERT( lte.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
-
- TEST( LteOp, ElemMatchKey ) {
- BSONObj operand = BSON( "$lte" << 5 );
- LTEMatchExpression lte;
- ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !lte.matchesBSON( BSON( "a" << 6 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( lte.matchesBSON( BSON( "a" << 4 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( lte.matchesBSON( BSON( "a" << BSON_ARRAY( 6 << 2 << 5 ) ), &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- }
-
- /**
- TEST( LteOp, MatchesIndexKeyScalar ) {
- BSONObj operand = BSON( "$lte" << 6 );
- LteOp lte;
- ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- lte.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- lte.matchesIndexKey( BSON( "" << 7 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- lte.matchesIndexKey( BSON( "" << BSON_ARRAY( 5 ) ), indexSpec ) );
- }
-
- TEST( LteOp, MatchesIndexKeyMissing ) {
- BSONObj operand = BSON( "$lte" << 6 );
- LteOp lte;
- ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
- IndexSpec indexSpec( BSON( "b" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- lte.matchesIndexKey( BSON( "" << 7 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- lte.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- lte.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
- }
-
- TEST( LteOp, MatchesIndexKeyArray ) {
- BSONObj operand = BSON( "$lte" << BSON_ARRAY( 4 << 5 ) );
- LteOp lte;
- ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- lte.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
- }
-
- TEST( LteOp, MatchesIndexKeyArrayValue ) {
- BSONObj operand = BSON( "$lte" << 6 );
- LteOp lte;
- ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
- IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- lte.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 3 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- lte.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 7 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- lte.matchesIndexKey( BSON( "" << "dummygeohash" <<
- "" << BSON_ARRAY( 8 << 6 << 4 ) ), indexSpec ) );
- }
-
- TEST( GtOp, MatchesElement ) {
- BSONObj operand = BSON( "$gt" << 5 );
- BSONObj match = BSON( "a" << 5.5 );
- BSONObj notMatch = BSON( "a" << 4 );
- BSONObj notMatchEqual = BSON( "a" << 5 );
- BSONObj notMatchWrongType = BSON( "a" << "foo" );
- GtOp gt;
- ASSERT( gt.init( "", operand[ "$gt" ] ).isOK() );
- ASSERT( gt.matchesSingleElement( match.firstElement() ) );
- ASSERT( !gt.matchesSingleElement( notMatch.firstElement() ) );
- ASSERT( !gt.matchesSingleElement( notMatchEqual.firstElement() ) );
- ASSERT( !gt.matchesSingleElement( notMatchWrongType.firstElement() ) );
- }
- */
-
- TEST( GtOp, InvalidEooOperand ) {
- BSONObj operand;
- GTMatchExpression gt;
- ASSERT( !gt.init( "", operand.firstElement() ).isOK() );
- }
-
- TEST( GtOp, MatchesScalar ) {
- BSONObj operand = BSON( "$gt" << 5 );
- GTMatchExpression gt;
- ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
- ASSERT( gt.matchesBSON( BSON( "a" << 5.5 ), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( GtOp, MatchesArrayValue ) {
- BSONObj operand = BSON( "$gt" << 5 );
- GTMatchExpression gt;
- ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
- ASSERT( gt.matchesBSON( BSON( "a" << BSON_ARRAY( 3 << 5.5 ) ), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << BSON_ARRAY( 2 << 4 ) ), NULL ) );
- }
-
- TEST( GtOp, MatchesWholeArray ) {
- BSONObj operand = BSON( "$gt" << BSON_ARRAY( 5 ) );
- GTMatchExpression gt;
- ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
- ASSERT( !gt.matchesBSON( BSON( "a" << BSON_ARRAY( 4 ) ), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << BSON_ARRAY( 5 ) ), NULL ) );
- ASSERT( gt.matchesBSON( BSON( "a" << BSON_ARRAY( 6 ) ), NULL ) );
- // Nested array.
- // XXX: The following assertion documents current behavior.
- ASSERT( gt.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 4 ) ) ), NULL ) );
- // XXX: The following assertion documents current behavior.
- ASSERT( gt.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 5 ) ) ), NULL ) );
- ASSERT( gt.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 6 ) ) ), NULL ) );
- }
-
- TEST( GtOp, MatchesNull ) {
- BSONObj operand = BSON( "$gt" << BSONNULL );
- GTMatchExpression gt;
- ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
- ASSERT( !gt.matchesBSON( BSONObj(), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << 4 ), NULL ) );
- // A non-existent field is treated same way as an empty bson object
- ASSERT( !gt.matchesBSON( BSON( "b" << 4 ), NULL ) );
- }
-
- TEST( GtOp, MatchesDotNotationNull) {
- BSONObj operand = BSON( "$gt" << BSONNULL );
- GTMatchExpression gt;
- ASSERT( gt.init( "a.b", operand[ "$gt" ] ).isOK() );
- ASSERT( !gt.matchesBSON( BSONObj(), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << 4 ), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << BSONObj() ), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << BSONNULL ) ) ), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "a" << 4 ) << BSON( "b" << 4 ) ) ), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << BSON_ARRAY( 4 ) ), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << 4 ) ) ), NULL ) );
- }
-
- TEST( GtOp, MatchesMinKey ) {
- BSONObj operand = BSON( "a" << MinKey );
- GTMatchExpression gt;
- ASSERT( gt.init( "a", operand[ "a" ] ).isOK() );
- ASSERT( !gt.matchesBSON( BSON( "a" << MinKey ), NULL ) );
- ASSERT( gt.matchesBSON( BSON( "a" << MaxKey ), NULL ) );
- ASSERT( gt.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( GtOp, MatchesMaxKey ) {
- BSONObj operand = BSON( "a" << MaxKey );
- GTMatchExpression gt;
- ASSERT( gt.init( "a", operand[ "a" ] ).isOK() );
- ASSERT( !gt.matchesBSON( BSON( "a" << MaxKey ), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << MinKey ), NULL ) );
- ASSERT( !gt.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( GtOp, ElemMatchKey ) {
- BSONObj operand = BSON( "$gt" << 5 );
- GTMatchExpression gt;
- ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !gt.matchesBSON( BSON( "a" << 4 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( gt.matchesBSON( BSON( "a" << 6 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( gt.matchesBSON( BSON( "a" << BSON_ARRAY( 2 << 6 << 5 ) ), &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- }
-
- /**
- TEST( GtOp, MatchesIndexKeyScalar ) {
- BSONObj operand = BSON( "$gt" << 6 );
- GtOp gt;
- ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- gt.matchesIndexKey( BSON( "" << 7 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- gt.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- gt.matchesIndexKey( BSON( "" << BSON_ARRAY( 9 ) ), indexSpec ) );
- }
-
- TEST( GtOp, MatchesIndexKeyMissing ) {
- BSONObj operand = BSON( "$gt" << 6 );
- GtOp gt;
- ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
- IndexSpec indexSpec( BSON( "b" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- gt.matchesIndexKey( BSON( "" << 7 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- gt.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- gt.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
- }
-
- TEST( GtOp, MatchesIndexKeyArray ) {
- BSONObj operand = BSON( "$gt" << BSON_ARRAY( 4 << 5 ) );
- GtOp gt;
- ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- gt.matchesIndexKey( BSON( "" << 8 ), indexSpec ) );
- }
-
- TEST( GtOp, MatchesIndexKeyArrayValue ) {
- BSONObj operand = BSON( "$gt" << 6 );
- GtOp gt;
- ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
- IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- gt.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 7 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- gt.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 3 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- gt.matchesIndexKey( BSON( "" << "dummygeohash" <<
- "" << BSON_ARRAY( 8 << 6 << 4 ) ), indexSpec ) );
- }
- */
-
- TEST( ComparisonMatchExpression, MatchesElement ) {
- BSONObj operand = BSON( "$gte" << 5 );
- BSONObj match = BSON( "a" << 5.5 );
- BSONObj equalMatch = BSON( "a" << 5 );
- BSONObj notMatch = BSON( "a" << 4 );
- BSONObj notMatchWrongType = BSON( "a" << "foo" );
- GTEMatchExpression gte;
- ASSERT( gte.init( "", operand[ "$gte" ] ).isOK() );
- ASSERT( gte.matchesSingleElement( match.firstElement() ) );
- ASSERT( gte.matchesSingleElement( equalMatch.firstElement() ) );
- ASSERT( !gte.matchesSingleElement( notMatch.firstElement() ) );
- ASSERT( !gte.matchesSingleElement( notMatchWrongType.firstElement() ) );
- }
-
- TEST( ComparisonMatchExpression, InvalidEooOperand ) {
- BSONObj operand;
- GTEMatchExpression gte;
- ASSERT( !gte.init( "", operand.firstElement() ).isOK() );
- }
-
- TEST( ComparisonMatchExpression, MatchesScalar ) {
- BSONObj operand = BSON( "$gte" << 5 );
- GTEMatchExpression gte;
- ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
- ASSERT( gte.matchesBSON( BSON( "a" << 5.5 ), NULL ) );
- ASSERT( !gte.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( ComparisonMatchExpression, MatchesArrayValue ) {
- BSONObj operand = BSON( "$gte" << 5 );
- GTEMatchExpression gte;
- ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
- ASSERT( gte.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 5.5 ) ), NULL ) );
- ASSERT( !gte.matchesBSON( BSON( "a" << BSON_ARRAY( 1 << 2 ) ), NULL ) );
- }
-
- TEST( ComparisonMatchExpression, MatchesWholeArray ) {
- BSONObj operand = BSON( "$gte" << BSON_ARRAY( 5 ) );
- GTEMatchExpression gte;
- ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
- ASSERT( !gte.matchesBSON( BSON( "a" << BSON_ARRAY( 4 ) ), NULL ) );
- ASSERT( gte.matchesBSON( BSON( "a" << BSON_ARRAY( 5 ) ), NULL ) );
- ASSERT( gte.matchesBSON( BSON( "a" << BSON_ARRAY( 6 ) ), NULL ) );
- // Nested array.
- // XXX: The following assertion documents current behavior.
- ASSERT( gte.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 4 ) ) ), NULL ) );
- ASSERT( gte.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 5 ) ) ), NULL ) );
- ASSERT( gte.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 6 ) ) ), NULL ) );
- }
-
- TEST( ComparisonMatchExpression, MatchesNull ) {
- BSONObj operand = BSON( "$gte" << BSONNULL );
- GTEMatchExpression gte;
- ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
- ASSERT( gte.matchesBSON( BSONObj(), NULL ) );
- ASSERT( gte.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- ASSERT( !gte.matchesBSON( BSON( "a" << 4 ), NULL ) );
- // A non-existent field is treated same way as an empty bson object
- ASSERT( gte.matchesBSON( BSON( "b" << 4 ), NULL ) );
- }
-
- TEST( ComparisonMatchExpression, MatchesDotNotationNull) {
- BSONObj operand = BSON( "$gte" << BSONNULL );
- GTEMatchExpression gte;
- ASSERT( gte.init( "a.b", operand[ "$gte" ] ).isOK() );
- ASSERT( gte.matchesBSON( BSONObj(), NULL ) );
- ASSERT( gte.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- ASSERT( gte.matchesBSON( BSON( "a" << 4 ), NULL ) );
- ASSERT( gte.matchesBSON( BSON( "a" << BSONObj() ), NULL ) );
- ASSERT( gte.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << BSONNULL ) ) ), NULL ) );
- ASSERT( gte.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "a" << 4 ) << BSON( "b" << 4 ) ) ), NULL ) );
- ASSERT( !gte.matchesBSON( BSON( "a" << BSON_ARRAY( 4 ) ), NULL ) );
- ASSERT( !gte.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << 4 ) ) ), NULL ) );
- }
-
- TEST( ComparisonMatchExpression, MatchesMinKey ) {
- BSONObj operand = BSON( "a" << MinKey );
- GTEMatchExpression gte;
- ASSERT( gte.init( "a", operand[ "a" ] ).isOK() );
- ASSERT( gte.matchesBSON( BSON( "a" << MinKey ), NULL ) );
- ASSERT( gte.matchesBSON( BSON( "a" << MaxKey ), NULL ) );
- ASSERT( gte.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( ComparisonMatchExpression, MatchesMaxKey ) {
- BSONObj operand = BSON( "a" << MaxKey );
- GTEMatchExpression gte;
- ASSERT( gte.init( "a", operand[ "a" ] ).isOK() );
- ASSERT( gte.matchesBSON( BSON( "a" << MaxKey ), NULL ) );
- ASSERT( !gte.matchesBSON( BSON( "a" << MinKey ), NULL ) );
- ASSERT( !gte.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( ComparisonMatchExpression, ElemMatchKey ) {
- BSONObj operand = BSON( "$gte" << 5 );
- GTEMatchExpression gte;
- ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !gte.matchesBSON( BSON( "a" << 4 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( gte.matchesBSON( BSON( "a" << 6 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( gte.matchesBSON( BSON( "a" << BSON_ARRAY( 2 << 6 << 5 ) ), &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- }
-
- /**
- TEST( GteOp, MatchesIndexKeyScalar ) {
- BSONObj operand = BSON( "$gte" << 6 );
- GteOp gte;
- ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- gte.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- gte.matchesIndexKey( BSON( "" << 5 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- gte.matchesIndexKey( BSON( "" << BSON_ARRAY( 7 ) ), indexSpec ) );
- }
-
- TEST( GteOp, MatchesIndexKeyMissing ) {
- BSONObj operand = BSON( "$gte" << 6 );
- GteOp gte;
- ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
- IndexSpec indexSpec( BSON( "b" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- gte.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- gte.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- gte.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
- }
-
- TEST( GteOp, MatchesIndexKeyArray ) {
- BSONObj operand = BSON( "$gte" << BSON_ARRAY( 4 << 5 ) );
- GteOp gte;
- ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- gte.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- }
-
- TEST( GteOp, MatchesIndexKeyArrayValue ) {
- BSONObj operand = BSON( "$gte" << 6 );
- GteOp gte;
- ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
- IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- gte.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- gte.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 3 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- gte.matchesIndexKey( BSON( "" << "dummygeohash" <<
- "" << BSON_ARRAY( 8 << 6 << 4 ) ), indexSpec ) );
- }
- */
-
- TEST( RegexMatchExpression, MatchesElementExact ) {
- BSONObj match = BSON( "a" << "b" );
- BSONObj notMatch = BSON( "a" << "c" );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "b", "" ).isOK() );
- ASSERT( regex.matchesSingleElement( match.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatch.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, TooLargePattern ) {
- string tooLargePattern( 50 * 1000, 'z' );
- RegexMatchExpression regex;
- ASSERT( !regex.init( "a", tooLargePattern, "" ).isOK() );
- }
-
- TEST( RegexMatchExpression, MatchesElementSimplePrefix ) {
- BSONObj match = BSON( "x" << "abc" );
- BSONObj notMatch = BSON( "x" << "adz" );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "^ab", "" ).isOK() );
- ASSERT( regex.matchesSingleElement( match.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatch.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementCaseSensitive ) {
- BSONObj match = BSON( "x" << "abc" );
- BSONObj notMatch = BSON( "x" << "ABC" );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "abc", "" ).isOK() );
- ASSERT( regex.matchesSingleElement( match.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatch.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementCaseInsensitive ) {
- BSONObj match = BSON( "x" << "abc" );
- BSONObj matchUppercase = BSON( "x" << "ABC" );
- BSONObj notMatch = BSON( "x" << "abz" );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "abc", "i" ).isOK() );
- ASSERT( regex.matchesSingleElement( match.firstElement() ) );
- ASSERT( regex.matchesSingleElement( matchUppercase.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatch.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementMultilineOff ) {
- BSONObj match = BSON( "x" << "az" );
- BSONObj notMatch = BSON( "x" << "\naz" );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "^a", "" ).isOK() );
- ASSERT( regex.matchesSingleElement( match.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatch.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementMultilineOn ) {
- BSONObj match = BSON( "x" << "az" );
- BSONObj matchMultiline = BSON( "x" << "\naz" );
- BSONObj notMatch = BSON( "x" << "\n\n" );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "^a", "m" ).isOK() );
- ASSERT( regex.matchesSingleElement( match.firstElement() ) );
- ASSERT( regex.matchesSingleElement( matchMultiline.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatch.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementExtendedOff ) {
- BSONObj match = BSON( "x" << "a b" );
- BSONObj notMatch = BSON( "x" << "ab" );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "a b", "" ).isOK() );
- ASSERT( regex.matchesSingleElement( match.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatch.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementExtendedOn ) {
- BSONObj match = BSON( "x" << "ab" );
- BSONObj notMatch = BSON( "x" << "a b" );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "a b", "x" ).isOK() );
- ASSERT( regex.matchesSingleElement( match.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatch.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementDotAllOff ) {
- BSONObj match = BSON( "x" << "a b" );
- BSONObj notMatch = BSON( "x" << "a\nb" );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "a.b", "" ).isOK() );
- ASSERT( regex.matchesSingleElement( match.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatch.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementDotAllOn ) {
- BSONObj match = BSON( "x" << "a b" );
- BSONObj matchDotAll = BSON( "x" << "a\nb" );
- BSONObj notMatch = BSON( "x" << "ab" );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "a.b", "s" ).isOK() );
- ASSERT( regex.matchesSingleElement( match.firstElement() ) );
- ASSERT( regex.matchesSingleElement( matchDotAll.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatch.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementMultipleFlags ) {
- BSONObj matchMultilineDotAll = BSON( "x" << "\na\nb" );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "^a.b", "ms" ).isOK() );
- ASSERT( regex.matchesSingleElement( matchMultilineDotAll.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementRegexType ) {
- BSONObj match = BSONObjBuilder().appendRegex( "x", "yz", "i" ).obj();
- BSONObj notMatchPattern = BSONObjBuilder().appendRegex( "x", "r", "i" ).obj();
- BSONObj notMatchFlags = BSONObjBuilder().appendRegex( "x", "yz", "s" ).obj();
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "yz", "i" ).isOK() );
- ASSERT( regex.matchesSingleElement( match.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatchPattern.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatchFlags.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementSymbolType ) {
- BSONObj match = BSONObjBuilder().appendSymbol( "x", "yz" ).obj();
- BSONObj notMatch = BSONObjBuilder().appendSymbol( "x", "gg" ).obj();
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "yz", "" ).isOK() );
- ASSERT( regex.matchesSingleElement( match.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatch.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementWrongType ) {
- BSONObj notMatchInt = BSON( "x" << 1 );
- BSONObj notMatchBool = BSON( "x" << true );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "1", "" ).isOK() );
- ASSERT( !regex.matchesSingleElement( notMatchInt.firstElement() ) );
- ASSERT( !regex.matchesSingleElement( notMatchBool.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesElementUtf8 ) {
- BSONObj multiByteCharacter = BSON( "x" << "\xc2\xa5" );
- RegexMatchExpression regex;
- ASSERT( regex.init( "", "^.$", "" ).isOK() );
- ASSERT( regex.matchesSingleElement( multiByteCharacter.firstElement() ) );
- }
-
- TEST( RegexMatchExpression, MatchesScalar ) {
- RegexMatchExpression regex;
- ASSERT( regex.init( "a", "b", "" ).isOK() );
- ASSERT( regex.matchesBSON( BSON( "a" << "b" ), NULL ) );
- ASSERT( !regex.matchesBSON( BSON( "a" << "c" ), NULL ) );
- }
-
- TEST( RegexMatchExpression, MatchesArrayValue ) {
- RegexMatchExpression regex;
- ASSERT( regex.init( "a", "b", "" ).isOK() );
- ASSERT( regex.matchesBSON( BSON( "a" << BSON_ARRAY( "c" << "b" ) ), NULL ) );
- ASSERT( !regex.matchesBSON( BSON( "a" << BSON_ARRAY( "d" << "c" ) ), NULL ) );
- }
-
- TEST( RegexMatchExpression, MatchesNull ) {
- RegexMatchExpression regex;
- ASSERT( regex.init( "a", "b", "" ).isOK() );
- ASSERT( !regex.matchesBSON( BSONObj(), NULL ) );
- ASSERT( !regex.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- }
-
- TEST( RegexMatchExpression, ElemMatchKey ) {
- RegexMatchExpression regex;
- ASSERT( regex.init( "a", "b", "" ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !regex.matchesBSON( BSON( "a" << "c" ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( regex.matchesBSON( BSON( "a" << "b" ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( regex.matchesBSON( BSON( "a" << BSON_ARRAY( "c" << "b" ) ), &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- }
-
- TEST( RegexMatchExpression, Equality1 ) {
- RegexMatchExpression r1;
- RegexMatchExpression r2;
- RegexMatchExpression r3;
- RegexMatchExpression r4;
- ASSERT( r1.init( "a" , "b" ,"" ).isOK() );
- ASSERT( r2.init( "a" , "b" ,"x" ).isOK() );
- ASSERT( r3.init( "a" , "c" ,"" ).isOK() );
- ASSERT( r4.init( "b" , "b" ,"" ).isOK() );
-
- ASSERT( r1.equivalent( &r1 ) );
- ASSERT( !r1.equivalent( &r2 ) );
- ASSERT( !r1.equivalent( &r3 ) );
- ASSERT( !r1.equivalent( &r4 ) );
- }
-
- /**
- TEST( RegexMatchExpression, MatchesIndexKeyScalar ) {
- RegexMatchExpression regex;
- ASSERT( regex.init( "a", "xyz", "" ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- regex.matchesIndexKey( BSON( "" << "z xyz" ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- regex.matchesIndexKey( BSON( "" << "xy" ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- regex.matchesIndexKey( BSON( "" << BSON_ARRAY( "xyz" ) ), indexSpec ) );
- }
-
- TEST( RegexMatchExpression, MatchesIndexKeyMissing ) {
- RegexMatchExpression regex;
- ASSERT( regex.init( "a", "xyz", "" ).isOK() );
- IndexSpec indexSpec( BSON( "b" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- regex.matchesIndexKey( BSON( "" << "z xyz" ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- regex.matchesIndexKey( BSON( "" << "xy" ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- regex.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << "xyz" ) ), indexSpec ) );
- }
-
- TEST( RegexMatchExpression, MatchesIndexKeyArrayValue ) {
- RegexMatchExpression regex;
- ASSERT( regex.init( "a", "xyz", "" ).isOK() );
- IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- regex.matchesIndexKey( BSON( "" << "dummygeohash" << "" << "xyz" ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- regex.matchesIndexKey( BSON( "" << "dummygeohash" << "" << "z" ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- regex.matchesIndexKey( BSON( "" << "dummygeohash" <<
- "" << BSON_ARRAY( "r" << 6 << "xyz" ) ), indexSpec ) );
- }
- */
-
- TEST( ModMatchExpression, MatchesElement ) {
- BSONObj match = BSON( "a" << 1 );
- BSONObj largerMatch = BSON( "a" << 4.0 );
- BSONObj longLongMatch = BSON( "a" << 68719476736LL );
- BSONObj notMatch = BSON( "a" << 6 );
- BSONObj negativeNotMatch = BSON( "a" << -2 );
- ModMatchExpression mod;
- ASSERT( mod.init( "", 3, 1 ).isOK() );
- ASSERT( mod.matchesSingleElement( match.firstElement() ) );
- ASSERT( mod.matchesSingleElement( largerMatch.firstElement() ) );
- ASSERT( mod.matchesSingleElement( longLongMatch.firstElement() ) );
- ASSERT( !mod.matchesSingleElement( notMatch.firstElement() ) );
- ASSERT( !mod.matchesSingleElement( negativeNotMatch.firstElement() ) );
- }
-
- TEST( ModMatchExpression, ZeroDivisor ) {
- ModMatchExpression mod;
- ASSERT( !mod.init( "", 0, 1 ).isOK() );
- }
-
- TEST( ModMatchExpression, MatchesScalar ) {
- ModMatchExpression mod;
- ASSERT( mod.init( "a", 5, 2 ).isOK() );
- ASSERT( mod.matchesBSON( BSON( "a" << 7.0 ), NULL ) );
- ASSERT( !mod.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( ModMatchExpression, MatchesArrayValue ) {
- ModMatchExpression mod;
- ASSERT( mod.init( "a", 5, 2 ).isOK() );
- ASSERT( mod.matchesBSON( BSON( "a" << BSON_ARRAY( 5 << 12LL ) ), NULL ) );
- ASSERT( !mod.matchesBSON( BSON( "a" << BSON_ARRAY( 6 << 8 ) ), NULL ) );
- }
-
- TEST( ModMatchExpression, MatchesNull ) {
- ModMatchExpression mod;
- ASSERT( mod.init( "a", 5, 2 ).isOK() );
- ASSERT( !mod.matchesBSON( BSONObj(), NULL ) );
- ASSERT( !mod.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- }
-
- TEST( ModMatchExpression, ElemMatchKey ) {
- ModMatchExpression mod;
- ASSERT( mod.init( "a", 5, 2 ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !mod.matchesBSON( BSON( "a" << 4 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( mod.matchesBSON( BSON( "a" << 2 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( mod.matchesBSON( BSON( "a" << BSON_ARRAY( 1 << 2 << 5 ) ), &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- }
-
- TEST( ModMatchExpression, Equality1 ) {
- ModMatchExpression m1;
- ModMatchExpression m2;
- ModMatchExpression m3;
- ModMatchExpression m4;
-
- m1.init( "a" , 1 , 2 );
- m2.init( "a" , 2 , 2 );
- m3.init( "a" , 1 , 1 );
- m4.init( "b" , 1 , 2 );
-
- ASSERT( m1.equivalent( &m1 ) );
- ASSERT( !m1.equivalent( &m2 ) );
- ASSERT( !m1.equivalent( &m3 ) );
- ASSERT( !m1.equivalent( &m4 ) );
- }
-
- /**
- TEST( ModMatchExpression, MatchesIndexKey ) {
- BSONObj operand = BSON( "$mod" << BSON_ARRAY( 2 << 1 ) );
- ModMatchExpression mod;
- ASSERT( mod.init( "a", operand[ "$mod" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- BSONObj indexKey = BSON( "" << 1 );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- mod.matchesIndexKey( indexKey, indexSpec ) );
- }
- */
-
- TEST( ExistsMatchExpression, MatchesElement ) {
- BSONObj existsInt = BSON( "a" << 5 );
- BSONObj existsNull = BSON( "a" << BSONNULL );
- BSONObj doesntExist = BSONObj();
- ExistsMatchExpression exists;
- ASSERT( exists.init( "" ).isOK() );
- ASSERT( exists.matchesSingleElement( existsInt.firstElement() ) );
- ASSERT( exists.matchesSingleElement( existsNull.firstElement() ) );
- ASSERT( !exists.matchesSingleElement( doesntExist.firstElement() ) );
- }
-
- TEST( ExistsMatchExpression, MatchesElementExistsTrueValue ) {
- BSONObj exists = BSON( "a" << 5 );
- BSONObj missing = BSONObj();
- ExistsMatchExpression existsTrueValue;
- ASSERT( existsTrueValue.init( "" ).isOK() );
- ASSERT( existsTrueValue.matchesSingleElement( exists.firstElement() ) );
- ASSERT( !existsTrueValue.matchesSingleElement( missing.firstElement() ) );
- }
-
- TEST( ExistsMatchExpression, MatchesScalar ) {
- ExistsMatchExpression exists;
- ASSERT( exists.init( "a" ).isOK() );
- ASSERT( exists.matchesBSON( BSON( "a" << 1 ), NULL ) );
- ASSERT( exists.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- ASSERT( !exists.matchesBSON( BSON( "b" << 1 ), NULL ) );
- }
-
- TEST( ExistsMatchExpression, MatchesArray ) {
- ExistsMatchExpression exists;
- ASSERT( exists.init( "a" ).isOK() );
- ASSERT( exists.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 5.5 ) ), NULL ) );
- }
-
- TEST( ExistsMatchExpression, ElemMatchKey ) {
- ExistsMatchExpression exists;
- ASSERT( exists.init( "a.b" ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !exists.matchesBSON( BSON( "a" << 1 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( exists.matchesBSON( BSON( "a" << BSON( "b" << 6 ) ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( exists.matchesBSON( BSON( "a" << BSON_ARRAY( 2 << BSON( "b" << 7 ) ) ), &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- }
-
- TEST( ExistsMatchExpression, Equivalent ) {
- ExistsMatchExpression e1;
- ExistsMatchExpression e2;
- e1.init( "a" );
- e2.init( "b" );
-
- ASSERT( e1.equivalent( &e1 ) );
- ASSERT( !e1.equivalent( &e2 ) );
- }
-
- /**
- TEST( ExistsMatchExpression, MatchesIndexKey ) {
- BSONObj operand = BSON( "$exists" << true );
- ExistsMatchExpression exists;
- ASSERT( exists.init( "a", operand[ "$exists" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- BSONObj indexKey = BSON( "" << 1 );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- exists.matchesIndexKey( indexKey, indexSpec ) );
- }
- */
-
-
-
- TEST( TypeMatchExpression, MatchesElementStringType ) {
- BSONObj match = BSON( "a" << "abc" );
- BSONObj notMatch = BSON( "a" << 5 );
- TypeMatchExpression type;
- ASSERT( type.init( "", String ).isOK() );
- ASSERT( type.matchesSingleElement( match[ "a" ] ) );
- ASSERT( !type.matchesSingleElement( notMatch[ "a" ] ) );
- }
-
- TEST( TypeMatchExpression, MatchesElementNullType ) {
- BSONObj match = BSON( "a" << BSONNULL );
- BSONObj notMatch = BSON( "a" << "abc" );
- TypeMatchExpression type;
- ASSERT( type.init( "", jstNULL ).isOK() );
- ASSERT( type.matchesSingleElement( match[ "a" ] ) );
- ASSERT( !type.matchesSingleElement( notMatch[ "a" ] ) );
- }
-
- TEST( TypeMatchExpression, InvalidTypeMatchExpressionerand ) {
- // If the provided type number is not a valid BSONType, it is not a parse error. The
- // operator will simply not match anything.
- BSONObj notMatch1 = BSON( "a" << BSONNULL );
- BSONObj notMatch2 = BSON( "a" << "abc" );
- TypeMatchExpression type;
- ASSERT( type.init( "", JSTypeMax + 1 ).isOK() );
- ASSERT( !type.matchesSingleElement( notMatch1[ "a" ] ) );
- ASSERT( !type.matchesSingleElement( notMatch2[ "a" ] ) );
- }
-
- TEST( TypeMatchExpression, MatchesScalar ) {
- TypeMatchExpression type;
- ASSERT( type.init( "a", Bool ).isOK() );
- ASSERT( type.matchesBSON( BSON( "a" << true ), NULL ) );
- ASSERT( !type.matchesBSON( BSON( "a" << 1 ), NULL ) );
- }
-
- TEST( TypeMatchExpression, MatchesArray ) {
- TypeMatchExpression type;
- ASSERT( type.init( "a", NumberInt ).isOK() );
- ASSERT( type.matchesBSON( BSON( "a" << BSON_ARRAY( 4 ) ), NULL ) );
- ASSERT( type.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << "a" ) ), NULL ) );
- ASSERT( type.matchesBSON( BSON( "a" << BSON_ARRAY( "a" << 4 ) ), NULL ) );
- ASSERT( !type.matchesBSON( BSON( "a" << BSON_ARRAY( "a" ) ), NULL ) );
- ASSERT( !type.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 4 ) ) ), NULL ) );
- }
-
- TEST( TypeMatchExpression, MatchesOuterArray ) {
- TypeMatchExpression type;
- ASSERT( type.init( "a", Array ).isOK() );
- // The outer array is not matched.
- ASSERT( !type.matchesBSON( BSON( "a" << BSONArray() ), NULL ) );
- ASSERT( !type.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << "a" ) ), NULL ) );
- ASSERT( type.matchesBSON( BSON( "a" << BSON_ARRAY( BSONArray() << 2 ) ), NULL ) );
- ASSERT( !type.matchesBSON( BSON( "a" << "bar" ), NULL ) );
- }
-
- TEST( TypeMatchExpression, MatchesObject ) {
- TypeMatchExpression type;
- ASSERT( type.init( "a", Object ).isOK() );
- ASSERT( type.matchesBSON( BSON( "a" << BSON( "b" << 1 ) ), NULL ) );
- ASSERT( !type.matchesBSON( BSON( "a" << 1 ), NULL ) );
- }
-
- TEST( TypeMatchExpression, MatchesDotNotationFieldObject ) {
- TypeMatchExpression type;
- ASSERT( type.init( "a.b", Object ).isOK() );
- ASSERT( type.matchesBSON( BSON( "a" << BSON( "b" << BSON( "c" << 1 ) ) ), NULL ) );
- ASSERT( !type.matchesBSON( BSON( "a" << BSON( "b" << 1 ) ), NULL ) );
- }
-
- TEST( TypeMatchExpression, MatchesDotNotationArrayElementArray ) {
- TypeMatchExpression type;
- ASSERT( type.init( "a.0", Array ).isOK() );
- ASSERT( type.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 1 ) ) ), NULL ) );
- ASSERT( !type.matchesBSON( BSON( "a" << BSON_ARRAY( "b" ) ), NULL ) );
- }
-
- TEST( TypeMatchExpression, MatchesDotNotationArrayElementScalar ) {
- TypeMatchExpression type;
- ASSERT( type.init( "a.0", String ).isOK() );
- ASSERT( type.matchesBSON( BSON( "a" << BSON_ARRAY( "b" ) ), NULL ) );
- ASSERT( !type.matchesBSON( BSON( "a" << BSON_ARRAY( 1 ) ), NULL ) );
- }
-
- TEST( TypeMatchExpression, MatchesDotNotationArrayElementObject ) {
- TypeMatchExpression type;
- ASSERT( type.init( "a.0", Object ).isOK() );
- ASSERT( type.matchesBSON( BSON( "a" << BSON_ARRAY( BSON( "b" << 1 ) ) ), NULL ) );
- ASSERT( !type.matchesBSON( BSON( "a" << BSON_ARRAY( 1 ) ), NULL ) );
- }
-
- TEST( TypeMatchExpression, MatchesNull ) {
- TypeMatchExpression type;
- ASSERT( type.init( "a", jstNULL ).isOK() );
- ASSERT( type.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- ASSERT( !type.matchesBSON( BSON( "a" << 4 ), NULL ) );
- ASSERT( !type.matchesBSON( BSONObj(), NULL ) );
- }
-
- TEST( TypeMatchExpression, ElemMatchKey ) {
- TypeMatchExpression type;
- ASSERT( type.init( "a.b", String ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !type.matchesBSON( BSON( "a" << 1 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( type.matchesBSON( BSON( "a" << BSON( "b" << "string" ) ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( type.matchesBSON( BSON( "a" << BSON( "b" << BSON_ARRAY( "string" ) ) ), &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "0", details.elemMatchKey() );
- ASSERT( type.matchesBSON( BSON( "a" <<
- BSON_ARRAY( 2 <<
- BSON( "b" << BSON_ARRAY( "string" ) ) ) ),
- &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- }
-
- TEST( TypeMatchExpression, Equivalent ) {
- TypeMatchExpression e1;
- TypeMatchExpression e2;
- TypeMatchExpression e3;
- e1.init( "a", String );
- e2.init( "a", NumberDouble );
- e3.init( "b", String );
-
- ASSERT( e1.equivalent( &e1 ) );
- ASSERT( !e1.equivalent( &e2 ) );
- ASSERT( !e1.equivalent( &e3 ) );
- }
-
-
- /**
- TEST( TypeMatchExpression, MatchesIndexKey ) {
- BSONObj operand = BSON( "$type" << 2 );
- TypeMatchExpression type;
- ASSERT( type.init( "a", operand[ "$type" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- BSONObj indexKey = BSON( "" << "q" );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- type.matchesIndexKey( indexKey, indexSpec ) );
- }
- */
-
-
- TEST( InMatchExpression, MatchesElementSingle ) {
- BSONArray operand = BSON_ARRAY( 1 );
- BSONObj match = BSON( "a" << 1 );
- BSONObj notMatch = BSON( "a" << 2 );
- InMatchExpression in;
- in.getArrayFilterEntries()->addEquality( operand.firstElement() );
- ASSERT( in.matchesSingleElement( match[ "a" ] ) );
- ASSERT( !in.matchesSingleElement( notMatch[ "a" ] ) );
- }
-
- TEST( InMatchExpression, MatchesEmpty ) {
- InMatchExpression in;
- in.init( "a" );
-
- BSONObj notMatch = BSON( "a" << 2 );
- ASSERT( !in.matchesSingleElement( notMatch[ "a" ] ) );
- ASSERT( !in.matchesBSON( BSON( "a" << 1 ), NULL ) );
- ASSERT( !in.matchesBSON( BSONObj(), NULL ) );
- }
-
- TEST( InMatchExpression, MatchesElementMultiple ) {
- BSONObj operand = BSON_ARRAY( 1 << "r" << true << 1 );
- InMatchExpression in;
- in.getArrayFilterEntries()->addEquality( operand[0] );
- in.getArrayFilterEntries()->addEquality( operand[1] );
- in.getArrayFilterEntries()->addEquality( operand[2] );
- in.getArrayFilterEntries()->addEquality( operand[3] );
-
- BSONObj matchFirst = BSON( "a" << 1 );
- BSONObj matchSecond = BSON( "a" << "r" );
- BSONObj matchThird = BSON( "a" << true );
- BSONObj notMatch = BSON( "a" << false );
- ASSERT( in.matchesSingleElement( matchFirst[ "a" ] ) );
- ASSERT( in.matchesSingleElement( matchSecond[ "a" ] ) );
- ASSERT( in.matchesSingleElement( matchThird[ "a" ] ) );
- ASSERT( !in.matchesSingleElement( notMatch[ "a" ] ) );
- }
-
-
- TEST( InMatchExpression, MatchesScalar ) {
- BSONObj operand = BSON_ARRAY( 5 );
- InMatchExpression in;
- in.init( "a" );
- in.getArrayFilterEntries()->addEquality( operand.firstElement() );
-
- ASSERT( in.matchesBSON( BSON( "a" << 5.0 ), NULL ) );
- ASSERT( !in.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( InMatchExpression, MatchesArrayValue ) {
- BSONObj operand = BSON_ARRAY( 5 );
- InMatchExpression in;
- in.init( "a" );
- in.getArrayFilterEntries()->addEquality( operand.firstElement() );
-
- ASSERT( in.matchesBSON( BSON( "a" << BSON_ARRAY( 5.0 << 6 ) ), NULL ) );
- ASSERT( !in.matchesBSON( BSON( "a" << BSON_ARRAY( 6 << 7 ) ), NULL ) );
- ASSERT( !in.matchesBSON( BSON( "a" << BSON_ARRAY( BSON_ARRAY( 5 ) ) ), NULL ) );
- }
-
- TEST( InMatchExpression, MatchesNull ) {
- BSONObj operand = BSON_ARRAY( BSONNULL );
-
- InMatchExpression in;
- in.init( "a" );
- in.getArrayFilterEntries()->addEquality( operand.firstElement() );
-
- ASSERT( in.matchesBSON( BSONObj(), NULL ) );
- ASSERT( in.matchesBSON( BSON( "a" << BSONNULL ), NULL ) );
- ASSERT( !in.matchesBSON( BSON( "a" << 4 ), NULL ) );
- // A non-existent field is treated same way as an empty bson object
- ASSERT( in.matchesBSON( BSON( "b" << 4 ), NULL ) );
- }
-
- TEST( InMatchExpression, MatchesUndefined ) {
- BSONObj operand = BSON_ARRAY( BSONUndefined );
-
- InMatchExpression in;
- in.init( "a" );
- Status s = in.getArrayFilterEntries()->addEquality( operand.firstElement() );
- ASSERT_NOT_OK(s);
- }
-
- TEST( InMatchExpression, MatchesMinKey ) {
- BSONObj operand = BSON_ARRAY( MinKey );
- InMatchExpression in;
- in.init( "a" );
- in.getArrayFilterEntries()->addEquality( operand.firstElement() );
-
- ASSERT( in.matchesBSON( BSON( "a" << MinKey ), NULL ) );
- ASSERT( !in.matchesBSON( BSON( "a" << MaxKey ), NULL ) );
- ASSERT( !in.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( InMatchExpression, MatchesMaxKey ) {
- BSONObj operand = BSON_ARRAY( MaxKey );
- InMatchExpression in;
- in.init( "a" );
- in.getArrayFilterEntries()->addEquality( operand.firstElement() );
-
- ASSERT( in.matchesBSON( BSON( "a" << MaxKey ), NULL ) );
- ASSERT( !in.matchesBSON( BSON( "a" << MinKey ), NULL ) );
- ASSERT( !in.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( InMatchExpression, MatchesFullArray ) {
- BSONObj operand = BSON_ARRAY( BSON_ARRAY( 1 << 2 ) << 4 << 5 );
- InMatchExpression in;
- in.init( "a" );
- in.getArrayFilterEntries()->addEquality( operand[0] );
- in.getArrayFilterEntries()->addEquality( operand[1] );
- in.getArrayFilterEntries()->addEquality( operand[2] );
-
- ASSERT( in.matchesBSON( BSON( "a" << BSON_ARRAY( 1 << 2 ) ), NULL ) );
- ASSERT( !in.matchesBSON( BSON( "a" << BSON_ARRAY( 1 << 2 << 3 ) ), NULL ) );
- ASSERT( !in.matchesBSON( BSON( "a" << BSON_ARRAY( 1 ) ), NULL ) );
- ASSERT( !in.matchesBSON( BSON( "a" << 1 ), NULL ) );
- }
-
- TEST( InMatchExpression, ElemMatchKey ) {
- BSONObj operand = BSON_ARRAY( 5 << 2 );
- InMatchExpression in;
- in.init( "a" );
- in.getArrayFilterEntries()->addEquality( operand[0] );
- in.getArrayFilterEntries()->addEquality( operand[1] );
-
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !in.matchesBSON( BSON( "a" << 4 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( in.matchesBSON( BSON( "a" << 5 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( in.matchesBSON( BSON( "a" << BSON_ARRAY( 1 << 2 << 5 ) ), &details ) );
- ASSERT( details.hasElemMatchKey() );
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- }
-
- /**
- TEST( InMatchExpression, MatchesIndexKeyScalar ) {
- BSONObj operand = BSON( "$in" << BSON_ARRAY( 6 << 5 ) );
- InMatchExpression in;
- ASSERT( in.init( "a", operand[ "$in" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- in.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- in.matchesIndexKey( BSON( "" << 5 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- in.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- in.matchesIndexKey( BSON( "" << BSON_ARRAY( 6 ) ), indexSpec ) );
- }
-
- TEST( InMatchExpression, MatchesIndexKeyMissing ) {
- BSONObj operand = BSON( "$in" << BSON_ARRAY( 6 ) );
- ComparisonMatchExpression eq
- ASSERT( eq.init( "a", operand[ "$in" ] ).isOK() );
- IndexSpec indexSpec( BSON( "b" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- eq.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- eq.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- eq.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
- }
-
- TEST( InMatchExpression, MatchesIndexKeyArray ) {
- BSONObj operand = BSON( "$in" << BSON_ARRAY( 4 << BSON_ARRAY( 5 ) ) );
- InMatchExpression in;
- ASSERT( in.init( "a", operand[ "$in" ] ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- in.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- in.matchesIndexKey( BSON( "" << 5 ), indexSpec ) );
- }
-
- TEST( InMatchExpression, MatchesIndexKeyArrayValue ) {
- BSONObjBuilder inArray;
- inArray.append( "0", 4 ).append( "1", 5 ).appendRegex( "2", "abc", "" );
- BSONObj operand = BSONObjBuilder().appendArray( "$in", inArray.obj() ).obj();
- InMatchExpression in;
- ASSERT( in.init( "a", operand[ "$in" ] ).isOK() );
- IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- in.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 4 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- in.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 6 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- in.matchesIndexKey( BSON( "" << "dummygeohash" << "" << "abcd" ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- in.matchesIndexKey( BSONObjBuilder()
- .append( "", "dummygeohash" )
- .appendRegex( "", "abc", "" ).obj(),
- indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- in.matchesIndexKey( BSON( "" << "dummygeohash" << "" << "ab" ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- in.matchesIndexKey( BSON( "" << "dummygeohash" <<
- "" << BSON_ARRAY( 8 << 5 ) ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- in.matchesIndexKey( BSON( "" << "dummygeohash" <<
- "" << BSON_ARRAY( 8 << 9 ) ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- in.matchesIndexKey( BSON( "" << "dummygeohash" <<
- "" << BSON_ARRAY( 8 << "abc" ) ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- in.matchesIndexKey( BSON( "" << "dummygeohash" <<
- "" << BSON_ARRAY( 8 << "ac" ) ), indexSpec ) );
- }
- */
+using std::string;
+TEST(EqOp, MatchesElement) {
+ BSONObj operand = BSON("a" << 5);
+ BSONObj match = BSON("a" << 5.0);
+ BSONObj notMatch = BSON("a" << 6);
+
+ EqualityMatchExpression eq;
+ eq.init("", operand["a"]);
+ ASSERT(eq.matchesSingleElement(match.firstElement()));
+ ASSERT(!eq.matchesSingleElement(notMatch.firstElement()));
+
+ ASSERT(eq.equivalent(&eq));
+}
+
+TEST(EqOp, InvalidEooOperand) {
+ BSONObj operand;
+ EqualityMatchExpression eq;
+ ASSERT(!eq.init("", operand.firstElement()).isOK());
+}
+
+TEST(EqOp, MatchesScalar) {
+ BSONObj operand = BSON("a" << 5);
+ EqualityMatchExpression eq;
+ eq.init("a", operand["a"]);
+ ASSERT(eq.matchesBSON(BSON("a" << 5.0), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(EqOp, MatchesArrayValue) {
+ BSONObj operand = BSON("a" << 5);
+ EqualityMatchExpression eq;
+ eq.init("a", operand["a"]);
+ ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(5.0 << 6)), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL));
+}
+
+TEST(EqOp, MatchesReferencedObjectValue) {
+ BSONObj operand = BSON("a.b" << 5);
+ EqualityMatchExpression eq;
+ eq.init("a.b", operand["a.b"]);
+ ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << 5)), NULL));
+ ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << BSON_ARRAY(5))), NULL));
+ ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 5))), NULL));
+}
+
+TEST(EqOp, MatchesReferencedArrayValue) {
+ BSONObj operand = BSON("a.0" << 5);
+ EqualityMatchExpression eq;
+ eq.init("a.0", operand["a.0"]);
+ ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL));
+}
+
+TEST(EqOp, MatchesNull) {
+ BSONObj operand = BSON("a" << BSONNULL);
+ EqualityMatchExpression eq;
+ eq.init("a", operand["a"]);
+ ASSERT(eq.matchesBSON(BSONObj(), NULL));
+ ASSERT(eq.matchesBSON(BSON("a" << BSONNULL), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << 4), NULL));
+ // A non-existent field is treated same way as an empty bson object
+ ASSERT(eq.matchesBSON(BSON("b" << 4), NULL));
+}
+
+// This test documents how the matcher currently works,
+// not necessarily how it should work ideally.
+TEST(EqOp, MatchesNestedNull) {
+ BSONObj operand = BSON("a.b" << BSONNULL);
+ EqualityMatchExpression eq;
+ eq.init("a.b", operand["a.b"]);
+ // null matches any empty object that is on a subpath of a.b
+ ASSERT(eq.matchesBSON(BSONObj(), NULL));
+ ASSERT(eq.matchesBSON(BSON("a" << BSONObj()), NULL));
+ ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(BSONObj())), NULL));
+ ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << BSONNULL)), NULL));
+ // b does not exist as an element in array under a.
+ ASSERT(!eq.matchesBSON(BSON("a" << BSONArray()), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(BSONNULL)), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), NULL));
+ // a.b exists but is not null.
+ ASSERT(!eq.matchesBSON(BSON("a" << BSON("b" << 4)), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << BSON("b" << BSONObj())), NULL));
+ // A non-existent field is treated same way as an empty bson object
+ ASSERT(eq.matchesBSON(BSON("b" << 4), NULL));
+}
+
+TEST(EqOp, MatchesMinKey) {
+ BSONObj operand = BSON("a" << MinKey);
+ EqualityMatchExpression eq;
+ eq.init("a", operand["a"]);
+ ASSERT(eq.matchesBSON(BSON("a" << MinKey), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << MaxKey), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << 4), NULL));
+}
+
+
+TEST(EqOp, MatchesMaxKey) {
+ BSONObj operand = BSON("a" << MaxKey);
+ EqualityMatchExpression eq;
+ ASSERT(eq.init("a", operand["a"]).isOK());
+ ASSERT(eq.matchesBSON(BSON("a" << MaxKey), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << MinKey), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(EqOp, MatchesFullArray) {
+ BSONObj operand = BSON("a" << BSON_ARRAY(1 << 2));
+ EqualityMatchExpression eq;
+ ASSERT(eq.init("a", operand["a"]).isOK());
+ ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2 << 3)), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(1)), NULL));
+ ASSERT(!eq.matchesBSON(BSON("a" << 1), NULL));
+}
+
+TEST(EqOp, MatchesThroughNestedArray) {
+ BSONObj operand = BSON("a.b.c.d" << 3);
+ EqualityMatchExpression eq;
+ eq.init("a.b.c.d", operand["a.b.c.d"]);
+ BSONObj obj = fromjson("{a:{b:[{c:[{d:1},{d:2}]},{c:[{d:3}]}]}}");
+ ASSERT(eq.matchesBSON(obj, NULL));
+}
+
+TEST(EqOp, ElemMatchKey) {
+ BSONObj operand = BSON("a" << 5);
+ EqualityMatchExpression eq;
+ ASSERT(eq.init("a", operand["a"]).isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!eq.matchesBSON(BSON("a" << 4), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(eq.matchesBSON(BSON("a" << 5), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2 << 5)), &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("2", details.elemMatchKey());
+}
+
+// SERVER-14886: when an array is being traversed explictly at the same time that a nested array
+// is being traversed implicitly, the elemMatch key should refer to the offset of the array
+// being implicitly traversed.
+TEST(EqOp, ElemMatchKeyWithImplicitAndExplicitTraversal) {
+ BSONObj operand = BSON("a.0.b" << 3);
+ BSONElement operandFirstElt = operand.firstElement();
+ EqualityMatchExpression eq;
+ ASSERT(eq.init(operandFirstElt.fieldName(), operandFirstElt).isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ BSONObj obj = fromjson("{a: [{b: [2, 3]}, {b: [4, 5]}]}");
+ ASSERT(eq.matchesBSON(obj, &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("1", details.elemMatchKey());
+}
+
+TEST(EqOp, Equality1) {
+ EqualityMatchExpression eq1;
+ EqualityMatchExpression eq2;
+ EqualityMatchExpression eq3;
+
+ BSONObj operand = BSON("a" << 5 << "b" << 5 << "c" << 4);
+
+ eq1.init("a", operand["a"]);
+ eq2.init("a", operand["b"]);
+ eq3.init("c", operand["c"]);
+
+ ASSERT(eq1.equivalent(&eq1));
+ ASSERT(eq1.equivalent(&eq2));
+ ASSERT(!eq1.equivalent(&eq3));
+}
+
+/**
+ TEST( EqOp, MatchesIndexKeyScalar ) {
+ BSONObj operand = BSON( "a" << 6 );
+ EqualityMatchExpression eq;
+ ASSERT( eq.init( "a", operand[ "a" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ eq.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ eq.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ eq.matchesIndexKey( BSON( "" << BSON_ARRAY( 6 ) ), indexSpec ) );
+ }
+
+ TEST( EqOp, MatchesIndexKeyMissing ) {
+ BSONObj operand = BSON( "a" << 6 );
+ EqualityMatchExpression eq;
+ ASSERT( eq.init( "a", operand[ "a" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "b" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ eq.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ eq.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ eq.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
+ }
+
+ TEST( EqOp, MatchesIndexKeyArray ) {
+ BSONObj operand = BSON( "a" << BSON_ARRAY( 4 << 5 ) );
+ ComparisonMatchExpression eq
+ ASSERT( eq.init( "a", operand[ "a" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ eq.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
+ }
+
+ TEST( EqOp, MatchesIndexKeyArrayValue ) {
+ BSONObj operand = BSON( "a" << 6 );
+ ComparisonMatchExpression eq
+ ASSERT( eq.init( "a", operand[ "a" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ eq.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ eq.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 4 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ eq.matchesIndexKey( BSON( "" << "dummygeohash" <<
+ "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
+ }
+*/
+TEST(LtOp, MatchesElement) {
+ BSONObj operand = BSON("$lt" << 5);
+ BSONObj match = BSON("a" << 4.5);
+ BSONObj notMatch = BSON("a" << 6);
+ BSONObj notMatchEqual = BSON("a" << 5);
+ BSONObj notMatchWrongType = BSON("a"
+ << "foo");
+ LTMatchExpression lt;
+ ASSERT(lt.init("", operand["$lt"]).isOK());
+ ASSERT(lt.matchesSingleElement(match.firstElement()));
+ ASSERT(!lt.matchesSingleElement(notMatch.firstElement()));
+ ASSERT(!lt.matchesSingleElement(notMatchEqual.firstElement()));
+ ASSERT(!lt.matchesSingleElement(notMatchWrongType.firstElement()));
+}
+
+TEST(LtOp, InvalidEooOperand) {
+ BSONObj operand;
+ LTMatchExpression lt;
+ ASSERT(!lt.init("", operand.firstElement()).isOK());
+}
+
+TEST(LtOp, MatchesScalar) {
+ BSONObj operand = BSON("$lt" << 5);
+ LTMatchExpression lt;
+ ASSERT(lt.init("a", operand["$lt"]).isOK());
+ ASSERT(lt.matchesBSON(BSON("a" << 4.5), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << 6), NULL));
+}
+
+TEST(LtOp, MatchesScalarEmptyKey) {
+ BSONObj operand = BSON("$lt" << 5);
+ LTMatchExpression lt;
+ ASSERT(lt.init("", operand["$lt"]).isOK());
+ ASSERT(lt.matchesBSON(BSON("" << 4.5), NULL));
+ ASSERT(!lt.matchesBSON(BSON("" << 6), NULL));
+}
+
+TEST(LtOp, MatchesArrayValue) {
+ BSONObj operand = BSON("$lt" << 5);
+ LTMatchExpression lt;
+ ASSERT(lt.init("a", operand["$lt"]).isOK());
+ ASSERT(lt.matchesBSON(BSON("a" << BSON_ARRAY(6 << 4.5)), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL));
+}
+
+TEST(LtOp, MatchesWholeArray) {
+ BSONObj operand = BSON("$lt" << BSON_ARRAY(5));
+ LTMatchExpression lt;
+ ASSERT(lt.init("a", operand["$lt"]).isOK());
+ ASSERT(lt.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL));
+ // Nested array.
+ ASSERT(lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), NULL));
+}
+
+TEST(LtOp, MatchesNull) {
+ BSONObj operand = BSON("$lt" << BSONNULL);
+ LTMatchExpression lt;
+ ASSERT(lt.init("a", operand["$lt"]).isOK());
+ ASSERT(!lt.matchesBSON(BSONObj(), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << BSONNULL), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << 4), NULL));
+ // A non-existent field is treated same way as an empty bson object
+ ASSERT(!lt.matchesBSON(BSON("b" << 4), NULL));
+}
+
+TEST(LtOp, MatchesDotNotationNull) {
+ BSONObj operand = BSON("$lt" << BSONNULL);
+ LTMatchExpression lt;
+ ASSERT(lt.init("a.b", operand["$lt"]).isOK());
+ ASSERT(!lt.matchesBSON(BSONObj(), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << BSONNULL), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << 4), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << BSONObj()), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), NULL));
+}
+
+TEST(LtOp, MatchesMinKey) {
+ BSONObj operand = BSON("a" << MinKey);
+ LTMatchExpression lt;
+ ASSERT(lt.init("a", operand["a"]).isOK());
+ ASSERT(!lt.matchesBSON(BSON("a" << MinKey), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << MaxKey), NULL));
+ ASSERT(!lt.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(LtOp, MatchesMaxKey) {
+ BSONObj operand = BSON("a" << MaxKey);
+ LTMatchExpression lt;
+ ASSERT(lt.init("a", operand["a"]).isOK());
+ ASSERT(!lt.matchesBSON(BSON("a" << MaxKey), NULL));
+ ASSERT(lt.matchesBSON(BSON("a" << MinKey), NULL));
+ ASSERT(lt.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(LtOp, ElemMatchKey) {
+ BSONObj operand = BSON("$lt" << 5);
+ LTMatchExpression lt;
+ ASSERT(lt.init("a", operand["$lt"]).isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!lt.matchesBSON(BSON("a" << 6), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(lt.matchesBSON(BSON("a" << 4), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(lt.matchesBSON(BSON("a" << BSON_ARRAY(6 << 2 << 5)), &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("1", details.elemMatchKey());
+}
+
+/**
+ TEST( LtOp, MatchesIndexKeyScalar ) {
+ BSONObj operand = BSON( "$lt" << 6 );
+ LtOp lt;
+ ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ lt.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ lt.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ lt.matchesIndexKey( BSON( "" << BSON_ARRAY( 5 ) ), indexSpec ) );
+ }
+
+ TEST( LtOp, MatchesIndexKeyMissing ) {
+ BSONObj operand = BSON( "$lt" << 6 );
+ LtOp lt;
+ ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "b" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ lt.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ lt.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ lt.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
+ }
+
+ TEST( LtOp, MatchesIndexKeyArray ) {
+ BSONObj operand = BSON( "$lt" << BSON_ARRAY( 4 << 5 ) );
+ LtOp lt;
+ ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ lt.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
+ }
+
+ TEST( LtOp, MatchesIndexKeyArrayValue ) {
+ BSONObj operand = BSON( "$lt" << 6 );
+ LtOp lt;
+ ASSERT( lt.init( "a", operand[ "$lt" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ lt.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 3 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ lt.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ lt.matchesIndexKey( BSON( "" << "dummygeohash" <<
+ "" << BSON_ARRAY( 8 << 6 << 4 ) ), indexSpec ) );
+ }
+*/
+TEST(LteOp, MatchesElement) {
+ BSONObj operand = BSON("$lte" << 5);
+ BSONObj match = BSON("a" << 4.5);
+ BSONObj equalMatch = BSON("a" << 5);
+ BSONObj notMatch = BSON("a" << 6);
+ BSONObj notMatchWrongType = BSON("a"
+ << "foo");
+ LTEMatchExpression lte;
+ ASSERT(lte.init("", operand["$lte"]).isOK());
+ ASSERT(lte.matchesSingleElement(match.firstElement()));
+ ASSERT(lte.matchesSingleElement(equalMatch.firstElement()));
+ ASSERT(!lte.matchesSingleElement(notMatch.firstElement()));
+ ASSERT(!lte.matchesSingleElement(notMatchWrongType.firstElement()));
+}
+
+TEST(LteOp, InvalidEooOperand) {
+ BSONObj operand;
+ LTEMatchExpression lte;
+ ASSERT(!lte.init("", operand.firstElement()).isOK());
+}
+
+TEST(LteOp, MatchesScalar) {
+ BSONObj operand = BSON("$lte" << 5);
+ LTEMatchExpression lte;
+ ASSERT(lte.init("a", operand["$lte"]).isOK());
+ ASSERT(lte.matchesBSON(BSON("a" << 4.5), NULL));
+ ASSERT(!lte.matchesBSON(BSON("a" << 6), NULL));
+}
+
+TEST(LteOp, MatchesArrayValue) {
+ BSONObj operand = BSON("$lte" << 5);
+ LTEMatchExpression lte;
+ ASSERT(lte.init("a", operand["$lte"]).isOK());
+ ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(6 << 4.5)), NULL));
+ ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL));
+}
+
+TEST(LteOp, MatchesWholeArray) {
+ BSONObj operand = BSON("$lte" << BSON_ARRAY(5));
+ LTEMatchExpression lte;
+ ASSERT(lte.init("a", operand["$lte"]).isOK());
+ ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL));
+ ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL));
+ ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL));
+ // Nested array.
+ ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), NULL));
+ ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL));
+ ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), NULL));
+}
+
+TEST(LteOp, MatchesNull) {
+ BSONObj operand = BSON("$lte" << BSONNULL);
+ LTEMatchExpression lte;
+ ASSERT(lte.init("a", operand["$lte"]).isOK());
+ ASSERT(lte.matchesBSON(BSONObj(), NULL));
+ ASSERT(lte.matchesBSON(BSON("a" << BSONNULL), NULL));
+ ASSERT(!lte.matchesBSON(BSON("a" << 4), NULL));
+ // A non-existent field is treated same way as an empty bson object
+ ASSERT(lte.matchesBSON(BSON("b" << 4), NULL));
+}
+
+TEST(LteOp, MatchesDotNotationNull) {
+ BSONObj operand = BSON("$lte" << BSONNULL);
+ LTEMatchExpression lte;
+ ASSERT(lte.init("a.b", operand["$lte"]).isOK());
+ ASSERT(lte.matchesBSON(BSONObj(), NULL));
+ ASSERT(lte.matchesBSON(BSON("a" << BSONNULL), NULL));
+ ASSERT(lte.matchesBSON(BSON("a" << 4), NULL));
+ ASSERT(lte.matchesBSON(BSON("a" << BSONObj()), NULL));
+ ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), NULL));
+ ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), NULL));
+ ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL));
+ ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), NULL));
+}
+
+TEST(LteOp, MatchesMinKey) {
+ BSONObj operand = BSON("a" << MinKey);
+ LTEMatchExpression lte;
+ ASSERT(lte.init("a", operand["a"]).isOK());
+ ASSERT(lte.matchesBSON(BSON("a" << MinKey), NULL));
+ ASSERT(!lte.matchesBSON(BSON("a" << MaxKey), NULL));
+ ASSERT(!lte.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(LteOp, MatchesMaxKey) {
+ BSONObj operand = BSON("a" << MaxKey);
+ LTEMatchExpression lte;
+ ASSERT(lte.init("a", operand["a"]).isOK());
+ ASSERT(lte.matchesBSON(BSON("a" << MaxKey), NULL));
+ ASSERT(lte.matchesBSON(BSON("a" << MinKey), NULL));
+ ASSERT(lte.matchesBSON(BSON("a" << 4), NULL));
+}
+
+
+TEST(LteOp, ElemMatchKey) {
+ BSONObj operand = BSON("$lte" << 5);
+ LTEMatchExpression lte;
+ ASSERT(lte.init("a", operand["$lte"]).isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!lte.matchesBSON(BSON("a" << 6), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(lte.matchesBSON(BSON("a" << 4), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(6 << 2 << 5)), &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("1", details.elemMatchKey());
+}
+
+/**
+ TEST( LteOp, MatchesIndexKeyScalar ) {
+ BSONObj operand = BSON( "$lte" << 6 );
+ LteOp lte;
+ ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ lte.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ lte.matchesIndexKey( BSON( "" << 7 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ lte.matchesIndexKey( BSON( "" << BSON_ARRAY( 5 ) ), indexSpec ) );
+ }
+
+ TEST( LteOp, MatchesIndexKeyMissing ) {
+ BSONObj operand = BSON( "$lte" << 6 );
+ LteOp lte;
+ ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "b" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ lte.matchesIndexKey( BSON( "" << 7 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ lte.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ lte.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
+ }
+
+ TEST( LteOp, MatchesIndexKeyArray ) {
+ BSONObj operand = BSON( "$lte" << BSON_ARRAY( 4 << 5 ) );
+ LteOp lte;
+ ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ lte.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
+ }
+
+ TEST( LteOp, MatchesIndexKeyArrayValue ) {
+ BSONObj operand = BSON( "$lte" << 6 );
+ LteOp lte;
+ ASSERT( lte.init( "a", operand[ "$lte" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ lte.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 3 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ lte.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 7 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ lte.matchesIndexKey( BSON( "" << "dummygeohash" <<
+ "" << BSON_ARRAY( 8 << 6 << 4 ) ), indexSpec ) );
+ }
+
+ TEST( GtOp, MatchesElement ) {
+ BSONObj operand = BSON( "$gt" << 5 );
+ BSONObj match = BSON( "a" << 5.5 );
+ BSONObj notMatch = BSON( "a" << 4 );
+ BSONObj notMatchEqual = BSON( "a" << 5 );
+ BSONObj notMatchWrongType = BSON( "a" << "foo" );
+ GtOp gt;
+ ASSERT( gt.init( "", operand[ "$gt" ] ).isOK() );
+ ASSERT( gt.matchesSingleElement( match.firstElement() ) );
+ ASSERT( !gt.matchesSingleElement( notMatch.firstElement() ) );
+ ASSERT( !gt.matchesSingleElement( notMatchEqual.firstElement() ) );
+ ASSERT( !gt.matchesSingleElement( notMatchWrongType.firstElement() ) );
+ }
+*/
+
+TEST(GtOp, InvalidEooOperand) {
+ BSONObj operand;
+ GTMatchExpression gt;
+ ASSERT(!gt.init("", operand.firstElement()).isOK());
+}
+
+TEST(GtOp, MatchesScalar) {
+ BSONObj operand = BSON("$gt" << 5);
+ GTMatchExpression gt;
+ ASSERT(gt.init("a", operand["$gt"]).isOK());
+ ASSERT(gt.matchesBSON(BSON("a" << 5.5), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(GtOp, MatchesArrayValue) {
+ BSONObj operand = BSON("$gt" << 5);
+ GTMatchExpression gt;
+ ASSERT(gt.init("a", operand["$gt"]).isOK());
+ ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(3 << 5.5)), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(2 << 4)), NULL));
+}
+
+TEST(GtOp, MatchesWholeArray) {
+ BSONObj operand = BSON("$gt" << BSON_ARRAY(5));
+ GTMatchExpression gt;
+ ASSERT(gt.init("a", operand["$gt"]).isOK());
+ ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL));
+ ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL));
+ // Nested array.
+ // XXX: The following assertion documents current behavior.
+ ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), NULL));
+ // XXX: The following assertion documents current behavior.
+ ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL));
+ ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), NULL));
+}
+
+TEST(GtOp, MatchesNull) {
+ BSONObj operand = BSON("$gt" << BSONNULL);
+ GTMatchExpression gt;
+ ASSERT(gt.init("a", operand["$gt"]).isOK());
+ ASSERT(!gt.matchesBSON(BSONObj(), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << BSONNULL), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << 4), NULL));
+ // A non-existent field is treated same way as an empty bson object
+ ASSERT(!gt.matchesBSON(BSON("b" << 4), NULL));
+}
+
+TEST(GtOp, MatchesDotNotationNull) {
+ BSONObj operand = BSON("$gt" << BSONNULL);
+ GTMatchExpression gt;
+ ASSERT(gt.init("a.b", operand["$gt"]).isOK());
+ ASSERT(!gt.matchesBSON(BSONObj(), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << BSONNULL), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << 4), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << BSONObj()), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), NULL));
+}
+
+TEST(GtOp, MatchesMinKey) {
+ BSONObj operand = BSON("a" << MinKey);
+ GTMatchExpression gt;
+ ASSERT(gt.init("a", operand["a"]).isOK());
+ ASSERT(!gt.matchesBSON(BSON("a" << MinKey), NULL));
+ ASSERT(gt.matchesBSON(BSON("a" << MaxKey), NULL));
+ ASSERT(gt.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(GtOp, MatchesMaxKey) {
+ BSONObj operand = BSON("a" << MaxKey);
+ GTMatchExpression gt;
+ ASSERT(gt.init("a", operand["a"]).isOK());
+ ASSERT(!gt.matchesBSON(BSON("a" << MaxKey), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << MinKey), NULL));
+ ASSERT(!gt.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(GtOp, ElemMatchKey) {
+ BSONObj operand = BSON("$gt" << 5);
+ GTMatchExpression gt;
+ ASSERT(gt.init("a", operand["$gt"]).isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!gt.matchesBSON(BSON("a" << 4), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(gt.matchesBSON(BSON("a" << 6), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(2 << 6 << 5)), &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("1", details.elemMatchKey());
+}
+
+/**
+ TEST( GtOp, MatchesIndexKeyScalar ) {
+ BSONObj operand = BSON( "$gt" << 6 );
+ GtOp gt;
+ ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ gt.matchesIndexKey( BSON( "" << 7 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ gt.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ gt.matchesIndexKey( BSON( "" << BSON_ARRAY( 9 ) ), indexSpec ) );
+ }
+
+ TEST( GtOp, MatchesIndexKeyMissing ) {
+ BSONObj operand = BSON( "$gt" << 6 );
+ GtOp gt;
+ ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "b" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ gt.matchesIndexKey( BSON( "" << 7 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ gt.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ gt.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
+ }
+
+ TEST( GtOp, MatchesIndexKeyArray ) {
+ BSONObj operand = BSON( "$gt" << BSON_ARRAY( 4 << 5 ) );
+ GtOp gt;
+ ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ gt.matchesIndexKey( BSON( "" << 8 ), indexSpec ) );
+ }
+
+ TEST( GtOp, MatchesIndexKeyArrayValue ) {
+ BSONObj operand = BSON( "$gt" << 6 );
+ GtOp gt;
+ ASSERT( gt.init( "a", operand[ "$gt" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ gt.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 7 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ gt.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 3 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ gt.matchesIndexKey( BSON( "" << "dummygeohash" <<
+ "" << BSON_ARRAY( 8 << 6 << 4 ) ), indexSpec ) );
+ }
+*/
+
+TEST(ComparisonMatchExpression, MatchesElement) {
+ BSONObj operand = BSON("$gte" << 5);
+ BSONObj match = BSON("a" << 5.5);
+ BSONObj equalMatch = BSON("a" << 5);
+ BSONObj notMatch = BSON("a" << 4);
+ BSONObj notMatchWrongType = BSON("a"
+ << "foo");
+ GTEMatchExpression gte;
+ ASSERT(gte.init("", operand["$gte"]).isOK());
+ ASSERT(gte.matchesSingleElement(match.firstElement()));
+ ASSERT(gte.matchesSingleElement(equalMatch.firstElement()));
+ ASSERT(!gte.matchesSingleElement(notMatch.firstElement()));
+ ASSERT(!gte.matchesSingleElement(notMatchWrongType.firstElement()));
+}
+
+TEST(ComparisonMatchExpression, InvalidEooOperand) {
+ BSONObj operand;
+ GTEMatchExpression gte;
+ ASSERT(!gte.init("", operand.firstElement()).isOK());
+}
+
+TEST(ComparisonMatchExpression, MatchesScalar) {
+ BSONObj operand = BSON("$gte" << 5);
+ GTEMatchExpression gte;
+ ASSERT(gte.init("a", operand["$gte"]).isOK());
+ ASSERT(gte.matchesBSON(BSON("a" << 5.5), NULL));
+ ASSERT(!gte.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(ComparisonMatchExpression, MatchesArrayValue) {
+ BSONObj operand = BSON("$gte" << 5);
+ GTEMatchExpression gte;
+ ASSERT(gte.init("a", operand["$gte"]).isOK());
+ ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5)), NULL));
+ ASSERT(!gte.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), NULL));
+}
+
+TEST(ComparisonMatchExpression, MatchesWholeArray) {
+ BSONObj operand = BSON("$gte" << BSON_ARRAY(5));
+ GTEMatchExpression gte;
+ ASSERT(gte.init("a", operand["$gte"]).isOK());
+ ASSERT(!gte.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL));
+ ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL));
+ ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL));
+ // Nested array.
+ // XXX: The following assertion documents current behavior.
+ ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), NULL));
+ ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL));
+ ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), NULL));
+}
+
+TEST(ComparisonMatchExpression, MatchesNull) {
+ BSONObj operand = BSON("$gte" << BSONNULL);
+ GTEMatchExpression gte;
+ ASSERT(gte.init("a", operand["$gte"]).isOK());
+ ASSERT(gte.matchesBSON(BSONObj(), NULL));
+ ASSERT(gte.matchesBSON(BSON("a" << BSONNULL), NULL));
+ ASSERT(!gte.matchesBSON(BSON("a" << 4), NULL));
+ // A non-existent field is treated same way as an empty bson object
+ ASSERT(gte.matchesBSON(BSON("b" << 4), NULL));
+}
+
+TEST(ComparisonMatchExpression, MatchesDotNotationNull) {
+ BSONObj operand = BSON("$gte" << BSONNULL);
+ GTEMatchExpression gte;
+ ASSERT(gte.init("a.b", operand["$gte"]).isOK());
+ ASSERT(gte.matchesBSON(BSONObj(), NULL));
+ ASSERT(gte.matchesBSON(BSON("a" << BSONNULL), NULL));
+ ASSERT(gte.matchesBSON(BSON("a" << 4), NULL));
+ ASSERT(gte.matchesBSON(BSON("a" << BSONObj()), NULL));
+ ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), NULL));
+ ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), NULL));
+ ASSERT(!gte.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL));
+ ASSERT(!gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), NULL));
+}
+
+TEST(ComparisonMatchExpression, MatchesMinKey) {
+ BSONObj operand = BSON("a" << MinKey);
+ GTEMatchExpression gte;
+ ASSERT(gte.init("a", operand["a"]).isOK());
+ ASSERT(gte.matchesBSON(BSON("a" << MinKey), NULL));
+ ASSERT(gte.matchesBSON(BSON("a" << MaxKey), NULL));
+ ASSERT(gte.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(ComparisonMatchExpression, MatchesMaxKey) {
+ BSONObj operand = BSON("a" << MaxKey);
+ GTEMatchExpression gte;
+ ASSERT(gte.init("a", operand["a"]).isOK());
+ ASSERT(gte.matchesBSON(BSON("a" << MaxKey), NULL));
+ ASSERT(!gte.matchesBSON(BSON("a" << MinKey), NULL));
+ ASSERT(!gte.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(ComparisonMatchExpression, ElemMatchKey) {
+ BSONObj operand = BSON("$gte" << 5);
+ GTEMatchExpression gte;
+ ASSERT(gte.init("a", operand["$gte"]).isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!gte.matchesBSON(BSON("a" << 4), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(gte.matchesBSON(BSON("a" << 6), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(2 << 6 << 5)), &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("1", details.elemMatchKey());
+}
+
+/**
+ TEST( GteOp, MatchesIndexKeyScalar ) {
+ BSONObj operand = BSON( "$gte" << 6 );
+ GteOp gte;
+ ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ gte.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ gte.matchesIndexKey( BSON( "" << 5 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ gte.matchesIndexKey( BSON( "" << BSON_ARRAY( 7 ) ), indexSpec ) );
+ }
+
+ TEST( GteOp, MatchesIndexKeyMissing ) {
+ BSONObj operand = BSON( "$gte" << 6 );
+ GteOp gte;
+ ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "b" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ gte.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ gte.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ gte.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
+ }
+
+ TEST( GteOp, MatchesIndexKeyArray ) {
+ BSONObj operand = BSON( "$gte" << BSON_ARRAY( 4 << 5 ) );
+ GteOp gte;
+ ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ gte.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+ }
+
+ TEST( GteOp, MatchesIndexKeyArrayValue ) {
+ BSONObj operand = BSON( "$gte" << 6 );
+ GteOp gte;
+ ASSERT( gte.init( "a", operand[ "$gte" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ gte.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ gte.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 3 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ gte.matchesIndexKey( BSON( "" << "dummygeohash" <<
+ "" << BSON_ARRAY( 8 << 6 << 4 ) ), indexSpec ) );
+ }
+*/
+
+TEST(RegexMatchExpression, MatchesElementExact) {
+ BSONObj match = BSON("a"
+ << "b");
+ BSONObj notMatch = BSON("a"
+ << "c");
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "b", "").isOK());
+ ASSERT(regex.matchesSingleElement(match.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatch.firstElement()));
+}
+
+TEST(RegexMatchExpression, TooLargePattern) {
+ string tooLargePattern(50 * 1000, 'z');
+ RegexMatchExpression regex;
+ ASSERT(!regex.init("a", tooLargePattern, "").isOK());
+}
+
+TEST(RegexMatchExpression, MatchesElementSimplePrefix) {
+ BSONObj match = BSON("x"
+ << "abc");
+ BSONObj notMatch = BSON("x"
+ << "adz");
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "^ab", "").isOK());
+ ASSERT(regex.matchesSingleElement(match.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatch.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementCaseSensitive) {
+ BSONObj match = BSON("x"
+ << "abc");
+ BSONObj notMatch = BSON("x"
+ << "ABC");
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "abc", "").isOK());
+ ASSERT(regex.matchesSingleElement(match.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatch.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementCaseInsensitive) {
+ BSONObj match = BSON("x"
+ << "abc");
+ BSONObj matchUppercase = BSON("x"
+ << "ABC");
+ BSONObj notMatch = BSON("x"
+ << "abz");
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "abc", "i").isOK());
+ ASSERT(regex.matchesSingleElement(match.firstElement()));
+ ASSERT(regex.matchesSingleElement(matchUppercase.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatch.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementMultilineOff) {
+ BSONObj match = BSON("x"
+ << "az");
+ BSONObj notMatch = BSON("x"
+ << "\naz");
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "^a", "").isOK());
+ ASSERT(regex.matchesSingleElement(match.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatch.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementMultilineOn) {
+ BSONObj match = BSON("x"
+ << "az");
+ BSONObj matchMultiline = BSON("x"
+ << "\naz");
+ BSONObj notMatch = BSON("x"
+ << "\n\n");
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "^a", "m").isOK());
+ ASSERT(regex.matchesSingleElement(match.firstElement()));
+ ASSERT(regex.matchesSingleElement(matchMultiline.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatch.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementExtendedOff) {
+ BSONObj match = BSON("x"
+ << "a b");
+ BSONObj notMatch = BSON("x"
+ << "ab");
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "a b", "").isOK());
+ ASSERT(regex.matchesSingleElement(match.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatch.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementExtendedOn) {
+ BSONObj match = BSON("x"
+ << "ab");
+ BSONObj notMatch = BSON("x"
+ << "a b");
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "a b", "x").isOK());
+ ASSERT(regex.matchesSingleElement(match.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatch.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementDotAllOff) {
+ BSONObj match = BSON("x"
+ << "a b");
+ BSONObj notMatch = BSON("x"
+ << "a\nb");
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "a.b", "").isOK());
+ ASSERT(regex.matchesSingleElement(match.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatch.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementDotAllOn) {
+ BSONObj match = BSON("x"
+ << "a b");
+ BSONObj matchDotAll = BSON("x"
+ << "a\nb");
+ BSONObj notMatch = BSON("x"
+ << "ab");
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "a.b", "s").isOK());
+ ASSERT(regex.matchesSingleElement(match.firstElement()));
+ ASSERT(regex.matchesSingleElement(matchDotAll.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatch.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementMultipleFlags) {
+ BSONObj matchMultilineDotAll = BSON("x"
+ << "\na\nb");
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "^a.b", "ms").isOK());
+ ASSERT(regex.matchesSingleElement(matchMultilineDotAll.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementRegexType) {
+ BSONObj match = BSONObjBuilder().appendRegex("x", "yz", "i").obj();
+ BSONObj notMatchPattern = BSONObjBuilder().appendRegex("x", "r", "i").obj();
+ BSONObj notMatchFlags = BSONObjBuilder().appendRegex("x", "yz", "s").obj();
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "yz", "i").isOK());
+ ASSERT(regex.matchesSingleElement(match.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatchPattern.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatchFlags.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementSymbolType) {
+ BSONObj match = BSONObjBuilder().appendSymbol("x", "yz").obj();
+ BSONObj notMatch = BSONObjBuilder().appendSymbol("x", "gg").obj();
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "yz", "").isOK());
+ ASSERT(regex.matchesSingleElement(match.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatch.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementWrongType) {
+ BSONObj notMatchInt = BSON("x" << 1);
+ BSONObj notMatchBool = BSON("x" << true);
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "1", "").isOK());
+ ASSERT(!regex.matchesSingleElement(notMatchInt.firstElement()));
+ ASSERT(!regex.matchesSingleElement(notMatchBool.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesElementUtf8) {
+ BSONObj multiByteCharacter = BSON("x"
+ << "\xc2\xa5");
+ RegexMatchExpression regex;
+ ASSERT(regex.init("", "^.$", "").isOK());
+ ASSERT(regex.matchesSingleElement(multiByteCharacter.firstElement()));
+}
+
+TEST(RegexMatchExpression, MatchesScalar) {
+ RegexMatchExpression regex;
+ ASSERT(regex.init("a", "b", "").isOK());
+ ASSERT(regex.matchesBSON(BSON("a"
+ << "b"),
+ NULL));
+ ASSERT(!regex.matchesBSON(BSON("a"
+ << "c"),
+ NULL));
+}
+
+TEST(RegexMatchExpression, MatchesArrayValue) {
+ RegexMatchExpression regex;
+ ASSERT(regex.init("a", "b", "").isOK());
+ ASSERT(regex.matchesBSON(BSON("a" << BSON_ARRAY("c"
+ << "b")),
+ NULL));
+ ASSERT(!regex.matchesBSON(BSON("a" << BSON_ARRAY("d"
+ << "c")),
+ NULL));
+}
+
+TEST(RegexMatchExpression, MatchesNull) {
+ RegexMatchExpression regex;
+ ASSERT(regex.init("a", "b", "").isOK());
+ ASSERT(!regex.matchesBSON(BSONObj(), NULL));
+ ASSERT(!regex.matchesBSON(BSON("a" << BSONNULL), NULL));
+}
+
+TEST(RegexMatchExpression, ElemMatchKey) {
+ RegexMatchExpression regex;
+ ASSERT(regex.init("a", "b", "").isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!regex.matchesBSON(BSON("a"
+ << "c"),
+ &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(regex.matchesBSON(BSON("a"
+ << "b"),
+ &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(regex.matchesBSON(BSON("a" << BSON_ARRAY("c"
+ << "b")),
+ &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("1", details.elemMatchKey());
+}
+
+TEST(RegexMatchExpression, Equality1) {
+ RegexMatchExpression r1;
+ RegexMatchExpression r2;
+ RegexMatchExpression r3;
+ RegexMatchExpression r4;
+ ASSERT(r1.init("a", "b", "").isOK());
+ ASSERT(r2.init("a", "b", "x").isOK());
+ ASSERT(r3.init("a", "c", "").isOK());
+ ASSERT(r4.init("b", "b", "").isOK());
+
+ ASSERT(r1.equivalent(&r1));
+ ASSERT(!r1.equivalent(&r2));
+ ASSERT(!r1.equivalent(&r3));
+ ASSERT(!r1.equivalent(&r4));
+}
+
+/**
+ TEST( RegexMatchExpression, MatchesIndexKeyScalar ) {
+ RegexMatchExpression regex;
+ ASSERT( regex.init( "a", "xyz", "" ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ regex.matchesIndexKey( BSON( "" << "z xyz" ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ regex.matchesIndexKey( BSON( "" << "xy" ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ regex.matchesIndexKey( BSON( "" << BSON_ARRAY( "xyz" ) ), indexSpec ) );
+ }
+
+ TEST( RegexMatchExpression, MatchesIndexKeyMissing ) {
+ RegexMatchExpression regex;
+ ASSERT( regex.init( "a", "xyz", "" ).isOK() );
+ IndexSpec indexSpec( BSON( "b" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ regex.matchesIndexKey( BSON( "" << "z xyz" ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ regex.matchesIndexKey( BSON( "" << "xy" ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ regex.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << "xyz" ) ), indexSpec ) );
+ }
+
+ TEST( RegexMatchExpression, MatchesIndexKeyArrayValue ) {
+ RegexMatchExpression regex;
+ ASSERT( regex.init( "a", "xyz", "" ).isOK() );
+ IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ regex.matchesIndexKey( BSON( "" << "dummygeohash" << "" << "xyz" ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ regex.matchesIndexKey( BSON( "" << "dummygeohash" << "" << "z" ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ regex.matchesIndexKey( BSON( "" << "dummygeohash" <<
+ "" << BSON_ARRAY( "r" << 6 << "xyz" ) ), indexSpec ) );
+ }
+*/
+
+TEST(ModMatchExpression, MatchesElement) {
+ BSONObj match = BSON("a" << 1);
+ BSONObj largerMatch = BSON("a" << 4.0);
+ BSONObj longLongMatch = BSON("a" << 68719476736LL);
+ BSONObj notMatch = BSON("a" << 6);
+ BSONObj negativeNotMatch = BSON("a" << -2);
+ ModMatchExpression mod;
+ ASSERT(mod.init("", 3, 1).isOK());
+ ASSERT(mod.matchesSingleElement(match.firstElement()));
+ ASSERT(mod.matchesSingleElement(largerMatch.firstElement()));
+ ASSERT(mod.matchesSingleElement(longLongMatch.firstElement()));
+ ASSERT(!mod.matchesSingleElement(notMatch.firstElement()));
+ ASSERT(!mod.matchesSingleElement(negativeNotMatch.firstElement()));
+}
+
+TEST(ModMatchExpression, ZeroDivisor) {
+ ModMatchExpression mod;
+ ASSERT(!mod.init("", 0, 1).isOK());
+}
+
+TEST(ModMatchExpression, MatchesScalar) {
+ ModMatchExpression mod;
+ ASSERT(mod.init("a", 5, 2).isOK());
+ ASSERT(mod.matchesBSON(BSON("a" << 7.0), NULL));
+ ASSERT(!mod.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(ModMatchExpression, MatchesArrayValue) {
+ ModMatchExpression mod;
+ ASSERT(mod.init("a", 5, 2).isOK());
+ ASSERT(mod.matchesBSON(BSON("a" << BSON_ARRAY(5 << 12LL)), NULL));
+ ASSERT(!mod.matchesBSON(BSON("a" << BSON_ARRAY(6 << 8)), NULL));
+}
+
+TEST(ModMatchExpression, MatchesNull) {
+ ModMatchExpression mod;
+ ASSERT(mod.init("a", 5, 2).isOK());
+ ASSERT(!mod.matchesBSON(BSONObj(), NULL));
+ ASSERT(!mod.matchesBSON(BSON("a" << BSONNULL), NULL));
+}
+
+TEST(ModMatchExpression, ElemMatchKey) {
+ ModMatchExpression mod;
+ ASSERT(mod.init("a", 5, 2).isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!mod.matchesBSON(BSON("a" << 4), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(mod.matchesBSON(BSON("a" << 2), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(mod.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2 << 5)), &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("1", details.elemMatchKey());
+}
+
+TEST(ModMatchExpression, Equality1) {
+ ModMatchExpression m1;
+ ModMatchExpression m2;
+ ModMatchExpression m3;
+ ModMatchExpression m4;
+
+ m1.init("a", 1, 2);
+ m2.init("a", 2, 2);
+ m3.init("a", 1, 1);
+ m4.init("b", 1, 2);
+
+ ASSERT(m1.equivalent(&m1));
+ ASSERT(!m1.equivalent(&m2));
+ ASSERT(!m1.equivalent(&m3));
+ ASSERT(!m1.equivalent(&m4));
+}
+
+/**
+ TEST( ModMatchExpression, MatchesIndexKey ) {
+ BSONObj operand = BSON( "$mod" << BSON_ARRAY( 2 << 1 ) );
+ ModMatchExpression mod;
+ ASSERT( mod.init( "a", operand[ "$mod" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ BSONObj indexKey = BSON( "" << 1 );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ mod.matchesIndexKey( indexKey, indexSpec ) );
+ }
+*/
+
+TEST(ExistsMatchExpression, MatchesElement) {
+ BSONObj existsInt = BSON("a" << 5);
+ BSONObj existsNull = BSON("a" << BSONNULL);
+ BSONObj doesntExist = BSONObj();
+ ExistsMatchExpression exists;
+ ASSERT(exists.init("").isOK());
+ ASSERT(exists.matchesSingleElement(existsInt.firstElement()));
+ ASSERT(exists.matchesSingleElement(existsNull.firstElement()));
+ ASSERT(!exists.matchesSingleElement(doesntExist.firstElement()));
+}
+
+TEST(ExistsMatchExpression, MatchesElementExistsTrueValue) {
+ BSONObj exists = BSON("a" << 5);
+ BSONObj missing = BSONObj();
+ ExistsMatchExpression existsTrueValue;
+ ASSERT(existsTrueValue.init("").isOK());
+ ASSERT(existsTrueValue.matchesSingleElement(exists.firstElement()));
+ ASSERT(!existsTrueValue.matchesSingleElement(missing.firstElement()));
+}
+
+TEST(ExistsMatchExpression, MatchesScalar) {
+ ExistsMatchExpression exists;
+ ASSERT(exists.init("a").isOK());
+ ASSERT(exists.matchesBSON(BSON("a" << 1), NULL));
+ ASSERT(exists.matchesBSON(BSON("a" << BSONNULL), NULL));
+ ASSERT(!exists.matchesBSON(BSON("b" << 1), NULL));
+}
+
+TEST(ExistsMatchExpression, MatchesArray) {
+ ExistsMatchExpression exists;
+ ASSERT(exists.init("a").isOK());
+ ASSERT(exists.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5)), NULL));
+}
+
+TEST(ExistsMatchExpression, ElemMatchKey) {
+ ExistsMatchExpression exists;
+ ASSERT(exists.init("a.b").isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!exists.matchesBSON(BSON("a" << 1), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(exists.matchesBSON(BSON("a" << BSON("b" << 6)), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(exists.matchesBSON(BSON("a" << BSON_ARRAY(2 << BSON("b" << 7))), &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("1", details.elemMatchKey());
+}
+
+TEST(ExistsMatchExpression, Equivalent) {
+ ExistsMatchExpression e1;
+ ExistsMatchExpression e2;
+ e1.init("a");
+ e2.init("b");
+
+ ASSERT(e1.equivalent(&e1));
+ ASSERT(!e1.equivalent(&e2));
+}
+
+/**
+ TEST( ExistsMatchExpression, MatchesIndexKey ) {
+ BSONObj operand = BSON( "$exists" << true );
+ ExistsMatchExpression exists;
+ ASSERT( exists.init( "a", operand[ "$exists" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ BSONObj indexKey = BSON( "" << 1 );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ exists.matchesIndexKey( indexKey, indexSpec ) );
+ }
+*/
+
+
+TEST(TypeMatchExpression, MatchesElementStringType) {
+ BSONObj match = BSON("a"
+ << "abc");
+ BSONObj notMatch = BSON("a" << 5);
+ TypeMatchExpression type;
+ ASSERT(type.init("", String).isOK());
+ ASSERT(type.matchesSingleElement(match["a"]));
+ ASSERT(!type.matchesSingleElement(notMatch["a"]));
+}
+
+TEST(TypeMatchExpression, MatchesElementNullType) {
+ BSONObj match = BSON("a" << BSONNULL);
+ BSONObj notMatch = BSON("a"
+ << "abc");
+ TypeMatchExpression type;
+ ASSERT(type.init("", jstNULL).isOK());
+ ASSERT(type.matchesSingleElement(match["a"]));
+ ASSERT(!type.matchesSingleElement(notMatch["a"]));
+}
+
+TEST(TypeMatchExpression, InvalidTypeMatchExpressionerand) {
+ // If the provided type number is not a valid BSONType, it is not a parse error. The
+ // operator will simply not match anything.
+ BSONObj notMatch1 = BSON("a" << BSONNULL);
+ BSONObj notMatch2 = BSON("a"
+ << "abc");
+ TypeMatchExpression type;
+ ASSERT(type.init("", JSTypeMax + 1).isOK());
+ ASSERT(!type.matchesSingleElement(notMatch1["a"]));
+ ASSERT(!type.matchesSingleElement(notMatch2["a"]));
+}
+
+TEST(TypeMatchExpression, MatchesScalar) {
+ TypeMatchExpression type;
+ ASSERT(type.init("a", Bool).isOK());
+ ASSERT(type.matchesBSON(BSON("a" << true), NULL));
+ ASSERT(!type.matchesBSON(BSON("a" << 1), NULL));
+}
+
+TEST(TypeMatchExpression, MatchesArray) {
+ TypeMatchExpression type;
+ ASSERT(type.init("a", NumberInt).isOK());
+ ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL));
+ ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(4 << "a")), NULL));
+ ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY("a" << 4)), NULL));
+ ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY("a")), NULL));
+ ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), NULL));
+}
+
+TEST(TypeMatchExpression, MatchesOuterArray) {
+ TypeMatchExpression type;
+ ASSERT(type.init("a", Array).isOK());
+ // The outer array is not matched.
+ ASSERT(!type.matchesBSON(BSON("a" << BSONArray()), NULL));
+ ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY(4 << "a")), NULL));
+ ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(BSONArray() << 2)), NULL));
+ ASSERT(!type.matchesBSON(BSON("a"
+ << "bar"),
+ NULL));
+}
+
+TEST(TypeMatchExpression, MatchesObject) {
+ TypeMatchExpression type;
+ ASSERT(type.init("a", Object).isOK());
+ ASSERT(type.matchesBSON(BSON("a" << BSON("b" << 1)), NULL));
+ ASSERT(!type.matchesBSON(BSON("a" << 1), NULL));
+}
+
+TEST(TypeMatchExpression, MatchesDotNotationFieldObject) {
+ TypeMatchExpression type;
+ ASSERT(type.init("a.b", Object).isOK());
+ ASSERT(type.matchesBSON(BSON("a" << BSON("b" << BSON("c" << 1))), NULL));
+ ASSERT(!type.matchesBSON(BSON("a" << BSON("b" << 1)), NULL));
+}
+
+TEST(TypeMatchExpression, MatchesDotNotationArrayElementArray) {
+ TypeMatchExpression type;
+ ASSERT(type.init("a.0", Array).isOK());
+ ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(1))), NULL));
+ ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY("b")), NULL));
+}
+
+TEST(TypeMatchExpression, MatchesDotNotationArrayElementScalar) {
+ TypeMatchExpression type;
+ ASSERT(type.init("a.0", String).isOK());
+ ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY("b")), NULL));
+ ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY(1)), NULL));
+}
+
+TEST(TypeMatchExpression, MatchesDotNotationArrayElementObject) {
+ TypeMatchExpression type;
+ ASSERT(type.init("a.0", Object).isOK());
+ ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 1))), NULL));
+ ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY(1)), NULL));
+}
+
+TEST(TypeMatchExpression, MatchesNull) {
+ TypeMatchExpression type;
+ ASSERT(type.init("a", jstNULL).isOK());
+ ASSERT(type.matchesBSON(BSON("a" << BSONNULL), NULL));
+ ASSERT(!type.matchesBSON(BSON("a" << 4), NULL));
+ ASSERT(!type.matchesBSON(BSONObj(), NULL));
+}
+
+TEST(TypeMatchExpression, ElemMatchKey) {
+ TypeMatchExpression type;
+ ASSERT(type.init("a.b", String).isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!type.matchesBSON(BSON("a" << 1), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(type.matchesBSON(BSON("a" << BSON("b"
+ << "string")),
+ &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(type.matchesBSON(BSON("a" << BSON("b" << BSON_ARRAY("string"))), &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("0", details.elemMatchKey());
+ ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(2 << BSON("b" << BSON_ARRAY("string")))),
+ &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("1", details.elemMatchKey());
+}
+
+TEST(TypeMatchExpression, Equivalent) {
+ TypeMatchExpression e1;
+ TypeMatchExpression e2;
+ TypeMatchExpression e3;
+ e1.init("a", String);
+ e2.init("a", NumberDouble);
+ e3.init("b", String);
+
+ ASSERT(e1.equivalent(&e1));
+ ASSERT(!e1.equivalent(&e2));
+ ASSERT(!e1.equivalent(&e3));
+}
+
+
+/**
+ TEST( TypeMatchExpression, MatchesIndexKey ) {
+ BSONObj operand = BSON( "$type" << 2 );
+ TypeMatchExpression type;
+ ASSERT( type.init( "a", operand[ "$type" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ BSONObj indexKey = BSON( "" << "q" );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ type.matchesIndexKey( indexKey, indexSpec ) );
+ }
+*/
+
+
+TEST(InMatchExpression, MatchesElementSingle) {
+ BSONArray operand = BSON_ARRAY(1);
+ BSONObj match = BSON("a" << 1);
+ BSONObj notMatch = BSON("a" << 2);
+ InMatchExpression in;
+ in.getArrayFilterEntries()->addEquality(operand.firstElement());
+ ASSERT(in.matchesSingleElement(match["a"]));
+ ASSERT(!in.matchesSingleElement(notMatch["a"]));
+}
+
+TEST(InMatchExpression, MatchesEmpty) {
+ InMatchExpression in;
+ in.init("a");
+
+ BSONObj notMatch = BSON("a" << 2);
+ ASSERT(!in.matchesSingleElement(notMatch["a"]));
+ ASSERT(!in.matchesBSON(BSON("a" << 1), NULL));
+ ASSERT(!in.matchesBSON(BSONObj(), NULL));
+}
+
+TEST(InMatchExpression, MatchesElementMultiple) {
+ BSONObj operand = BSON_ARRAY(1 << "r" << true << 1);
+ InMatchExpression in;
+ in.getArrayFilterEntries()->addEquality(operand[0]);
+ in.getArrayFilterEntries()->addEquality(operand[1]);
+ in.getArrayFilterEntries()->addEquality(operand[2]);
+ in.getArrayFilterEntries()->addEquality(operand[3]);
+
+ BSONObj matchFirst = BSON("a" << 1);
+ BSONObj matchSecond = BSON("a"
+ << "r");
+ BSONObj matchThird = BSON("a" << true);
+ BSONObj notMatch = BSON("a" << false);
+ ASSERT(in.matchesSingleElement(matchFirst["a"]));
+ ASSERT(in.matchesSingleElement(matchSecond["a"]));
+ ASSERT(in.matchesSingleElement(matchThird["a"]));
+ ASSERT(!in.matchesSingleElement(notMatch["a"]));
+}
+
+
+TEST(InMatchExpression, MatchesScalar) {
+ BSONObj operand = BSON_ARRAY(5);
+ InMatchExpression in;
+ in.init("a");
+ in.getArrayFilterEntries()->addEquality(operand.firstElement());
+
+ ASSERT(in.matchesBSON(BSON("a" << 5.0), NULL));
+ ASSERT(!in.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(InMatchExpression, MatchesArrayValue) {
+ BSONObj operand = BSON_ARRAY(5);
+ InMatchExpression in;
+ in.init("a");
+ in.getArrayFilterEntries()->addEquality(operand.firstElement());
+
+ ASSERT(in.matchesBSON(BSON("a" << BSON_ARRAY(5.0 << 6)), NULL));
+ ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL));
+ ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL));
+}
+
+TEST(InMatchExpression, MatchesNull) {
+ BSONObj operand = BSON_ARRAY(BSONNULL);
+
+ InMatchExpression in;
+ in.init("a");
+ in.getArrayFilterEntries()->addEquality(operand.firstElement());
+
+ ASSERT(in.matchesBSON(BSONObj(), NULL));
+ ASSERT(in.matchesBSON(BSON("a" << BSONNULL), NULL));
+ ASSERT(!in.matchesBSON(BSON("a" << 4), NULL));
+ // A non-existent field is treated same way as an empty bson object
+ ASSERT(in.matchesBSON(BSON("b" << 4), NULL));
+}
+
+TEST(InMatchExpression, MatchesUndefined) {
+ BSONObj operand = BSON_ARRAY(BSONUndefined);
+
+ InMatchExpression in;
+ in.init("a");
+ Status s = in.getArrayFilterEntries()->addEquality(operand.firstElement());
+ ASSERT_NOT_OK(s);
+}
+
+TEST(InMatchExpression, MatchesMinKey) {
+ BSONObj operand = BSON_ARRAY(MinKey);
+ InMatchExpression in;
+ in.init("a");
+ in.getArrayFilterEntries()->addEquality(operand.firstElement());
+
+ ASSERT(in.matchesBSON(BSON("a" << MinKey), NULL));
+ ASSERT(!in.matchesBSON(BSON("a" << MaxKey), NULL));
+ ASSERT(!in.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(InMatchExpression, MatchesMaxKey) {
+ BSONObj operand = BSON_ARRAY(MaxKey);
+ InMatchExpression in;
+ in.init("a");
+ in.getArrayFilterEntries()->addEquality(operand.firstElement());
+
+ ASSERT(in.matchesBSON(BSON("a" << MaxKey), NULL));
+ ASSERT(!in.matchesBSON(BSON("a" << MinKey), NULL));
+ ASSERT(!in.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(InMatchExpression, MatchesFullArray) {
+ BSONObj operand = BSON_ARRAY(BSON_ARRAY(1 << 2) << 4 << 5);
+ InMatchExpression in;
+ in.init("a");
+ in.getArrayFilterEntries()->addEquality(operand[0]);
+ in.getArrayFilterEntries()->addEquality(operand[1]);
+ in.getArrayFilterEntries()->addEquality(operand[2]);
+
+ ASSERT(in.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), NULL));
+ ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2 << 3)), NULL));
+ ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(1)), NULL));
+ ASSERT(!in.matchesBSON(BSON("a" << 1), NULL));
+}
+
+TEST(InMatchExpression, ElemMatchKey) {
+ BSONObj operand = BSON_ARRAY(5 << 2);
+ InMatchExpression in;
+ in.init("a");
+ in.getArrayFilterEntries()->addEquality(operand[0]);
+ in.getArrayFilterEntries()->addEquality(operand[1]);
+
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!in.matchesBSON(BSON("a" << 4), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(in.matchesBSON(BSON("a" << 5), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(in.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2 << 5)), &details));
+ ASSERT(details.hasElemMatchKey());
+ ASSERT_EQUALS("1", details.elemMatchKey());
+}
+
+/**
+TEST( InMatchExpression, MatchesIndexKeyScalar ) {
+ BSONObj operand = BSON( "$in" << BSON_ARRAY( 6 << 5 ) );
+ InMatchExpression in;
+ ASSERT( in.init( "a", operand[ "$in" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ in.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ in.matchesIndexKey( BSON( "" << 5 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ in.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ in.matchesIndexKey( BSON( "" << BSON_ARRAY( 6 ) ), indexSpec ) );
+}
+
+TEST( InMatchExpression, MatchesIndexKeyMissing ) {
+ BSONObj operand = BSON( "$in" << BSON_ARRAY( 6 ) );
+ ComparisonMatchExpression eq
+ ASSERT( eq.init( "a", operand[ "$in" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "b" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ eq.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ eq.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ eq.matchesIndexKey( BSON( "" << BSON_ARRAY( 8 << 6 ) ), indexSpec ) );
+}
+
+TEST( InMatchExpression, MatchesIndexKeyArray ) {
+ BSONObj operand = BSON( "$in" << BSON_ARRAY( 4 << BSON_ARRAY( 5 ) ) );
+ InMatchExpression in;
+ ASSERT( in.init( "a", operand[ "$in" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ in.matchesIndexKey( BSON( "" << 4 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ in.matchesIndexKey( BSON( "" << 5 ), indexSpec ) );
+}
+
+TEST( InMatchExpression, MatchesIndexKeyArrayValue ) {
+ BSONObjBuilder inArray;
+ inArray.append( "0", 4 ).append( "1", 5 ).appendRegex( "2", "abc", "" );
+ BSONObj operand = BSONObjBuilder().appendArray( "$in", inArray.obj() ).obj();
+ InMatchExpression in;
+ ASSERT( in.init( "a", operand[ "$in" ] ).isOK() );
+ IndexSpec indexSpec( BSON( "loc" << "mockarrayvalue" << "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ in.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 4 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ in.matchesIndexKey( BSON( "" << "dummygeohash" << "" << 6 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ in.matchesIndexKey( BSON( "" << "dummygeohash" << "" << "abcd" ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ in.matchesIndexKey( BSONObjBuilder()
+ .append( "", "dummygeohash" )
+ .appendRegex( "", "abc", "" ).obj(),
+ indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ in.matchesIndexKey( BSON( "" << "dummygeohash" << "" << "ab" ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ in.matchesIndexKey( BSON( "" << "dummygeohash" <<
+ "" << BSON_ARRAY( 8 << 5 ) ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ in.matchesIndexKey( BSON( "" << "dummygeohash" <<
+ "" << BSON_ARRAY( 8 << 9 ) ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ in.matchesIndexKey( BSON( "" << "dummygeohash" <<
+ "" << BSON_ARRAY( 8 << "abc" ) ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ in.matchesIndexKey( BSON( "" << "dummygeohash" <<
+ "" << BSON_ARRAY( 8 << "ac" ) ), indexSpec ) );
+}
+*/
}
diff --git a/src/mongo/db/matcher/expression_parser.cpp b/src/mongo/db/matcher/expression_parser.cpp
index b2977e34117..4146051bfb5 100644
--- a/src/mongo/db/matcher/expression_parser.cpp
+++ b/src/mongo/db/matcher/expression_parser.cpp
@@ -41,774 +41,743 @@
namespace {
- using namespace mongo;
+using namespace mongo;
- /**
- * Returns true if subtree contains MatchExpression 'type'.
- */
- bool hasNode(const MatchExpression* root, MatchExpression::MatchType type) {
- if (type == root->matchType()) {
+/**
+ * Returns true if subtree contains MatchExpression 'type'.
+ */
+bool hasNode(const MatchExpression* root, MatchExpression::MatchType type) {
+ if (type == root->matchType()) {
+ return true;
+ }
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ if (hasNode(root->getChild(i), type)) {
return true;
}
- for (size_t i = 0; i < root->numChildren(); ++i) {
- if (hasNode(root->getChild(i), type)) {
- return true;
- }
- }
- return false;
}
+ return false;
+}
-} // namespace
+} // namespace
namespace mongo {
- using std::string;
+using std::string;
- StatusWithMatchExpression MatchExpressionParser::_parseComparison( const char* name,
- ComparisonMatchExpression* cmp,
- const BSONElement& e ) {
- std::unique_ptr<ComparisonMatchExpression> temp(cmp);
+StatusWithMatchExpression MatchExpressionParser::_parseComparison(const char* name,
+ ComparisonMatchExpression* cmp,
+ const BSONElement& e) {
+ std::unique_ptr<ComparisonMatchExpression> temp(cmp);
- // Non-equality comparison match expressions cannot have
- // a regular expression as the argument (e.g. {a: {$gt: /b/}} is illegal).
- if (MatchExpression::EQ != cmp->matchType() && RegEx == e.type()) {
- std::stringstream ss;
- ss << "Can't have RegEx as arg to predicate over field '" << name << "'.";
- return StatusWithMatchExpression(Status(ErrorCodes::BadValue, ss.str()));
- }
-
- Status s = temp->init( name, e );
- if ( !s.isOK() )
- return StatusWithMatchExpression(s);
-
- return StatusWithMatchExpression( temp.release() );
+ // Non-equality comparison match expressions cannot have
+ // a regular expression as the argument (e.g. {a: {$gt: /b/}} is illegal).
+ if (MatchExpression::EQ != cmp->matchType() && RegEx == e.type()) {
+ std::stringstream ss;
+ ss << "Can't have RegEx as arg to predicate over field '" << name << "'.";
+ return StatusWithMatchExpression(Status(ErrorCodes::BadValue, ss.str()));
}
- StatusWithMatchExpression MatchExpressionParser::_parseSubField( const BSONObj& context,
- const AndMatchExpression* andSoFar,
- const char* name,
- const BSONElement& e,
- int level ) {
+ Status s = temp->init(name, e);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+
+ return StatusWithMatchExpression(temp.release());
+}
- // TODO: these should move to getGtLtOp, or its replacement
+StatusWithMatchExpression MatchExpressionParser::_parseSubField(const BSONObj& context,
+ const AndMatchExpression* andSoFar,
+ const char* name,
+ const BSONElement& e,
+ int level) {
+ // TODO: these should move to getGtLtOp, or its replacement
- if ( mongoutils::str::equals( "$eq", e.fieldName() ) )
- return _parseComparison( name, new EqualityMatchExpression(), e );
+ if (mongoutils::str::equals("$eq", e.fieldName()))
+ return _parseComparison(name, new EqualityMatchExpression(), e);
- if ( mongoutils::str::equals( "$not", e.fieldName() ) ) {
- return _parseNot( name, e, level );
- }
+ if (mongoutils::str::equals("$not", e.fieldName())) {
+ return _parseNot(name, e, level);
+ }
- int x = e.getGtLtOp(-1);
- switch ( x ) {
+ int x = e.getGtLtOp(-1);
+ switch (x) {
case -1:
// $where cannot be a sub-expression because it works on top-level documents only.
- if ( mongoutils::str::equals( "$where", e.fieldName() ) ) {
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- "$where cannot be applied to a field" );
+ if (mongoutils::str::equals("$where", e.fieldName())) {
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "$where cannot be applied to a field");
}
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- mongoutils::str::stream() << "unknown operator: "
- << e.fieldName() );
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "unknown operator: " << e.fieldName());
case BSONObj::LT:
- return _parseComparison( name, new LTMatchExpression(), e );
+ return _parseComparison(name, new LTMatchExpression(), e);
case BSONObj::LTE:
- return _parseComparison( name, new LTEMatchExpression(), e );
+ return _parseComparison(name, new LTEMatchExpression(), e);
case BSONObj::GT:
- return _parseComparison( name, new GTMatchExpression(), e );
+ return _parseComparison(name, new GTMatchExpression(), e);
case BSONObj::GTE:
- return _parseComparison( name, new GTEMatchExpression(), e );
+ return _parseComparison(name, new GTEMatchExpression(), e);
case BSONObj::NE: {
if (RegEx == e.type()) {
// Just because $ne can be rewritten as the negation of an
// equality does not mean that $ne of a regex is allowed. See SERVER-1705.
- return StatusWithMatchExpression(Status(ErrorCodes::BadValue,
- "Can't have regex as arg to $ne."));
+ return StatusWithMatchExpression(
+ Status(ErrorCodes::BadValue, "Can't have regex as arg to $ne."));
}
- StatusWithMatchExpression s = _parseComparison( name, new EqualityMatchExpression(), e );
- if ( !s.isOK() )
+ StatusWithMatchExpression s = _parseComparison(name, new EqualityMatchExpression(), e);
+ if (!s.isOK())
return s;
- std::unique_ptr<NotMatchExpression> n( new NotMatchExpression() );
- Status s2 = n->init( s.getValue() );
- if ( !s2.isOK() )
- return StatusWithMatchExpression( s2 );
- return StatusWithMatchExpression( n.release() );
+ std::unique_ptr<NotMatchExpression> n(new NotMatchExpression());
+ Status s2 = n->init(s.getValue());
+ if (!s2.isOK())
+ return StatusWithMatchExpression(s2);
+ return StatusWithMatchExpression(n.release());
}
case BSONObj::Equality:
- return _parseComparison( name, new EqualityMatchExpression(), e );
+ return _parseComparison(name, new EqualityMatchExpression(), e);
case BSONObj::opIN: {
- if ( e.type() != Array )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$in needs an array" );
- std::unique_ptr<InMatchExpression> temp( new InMatchExpression() );
- Status s = temp->init( name );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- s = _parseArrayFilterEntries( temp->getArrayFilterEntries(), e.Obj() );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- return StatusWithMatchExpression( temp.release() );
+ if (e.type() != Array)
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$in needs an array");
+ std::unique_ptr<InMatchExpression> temp(new InMatchExpression());
+ Status s = temp->init(name);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ s = _parseArrayFilterEntries(temp->getArrayFilterEntries(), e.Obj());
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(temp.release());
}
case BSONObj::NIN: {
- if ( e.type() != Array )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$nin needs an array" );
- std::unique_ptr<InMatchExpression> temp( new InMatchExpression() );
- Status s = temp->init( name );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- s = _parseArrayFilterEntries( temp->getArrayFilterEntries(), e.Obj() );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
-
- std::unique_ptr<NotMatchExpression> temp2( new NotMatchExpression() );
- s = temp2->init( temp.release() );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
-
- return StatusWithMatchExpression( temp2.release() );
+ if (e.type() != Array)
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$nin needs an array");
+ std::unique_ptr<InMatchExpression> temp(new InMatchExpression());
+ Status s = temp->init(name);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ s = _parseArrayFilterEntries(temp->getArrayFilterEntries(), e.Obj());
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+
+ std::unique_ptr<NotMatchExpression> temp2(new NotMatchExpression());
+ s = temp2->init(temp.release());
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+
+ return StatusWithMatchExpression(temp2.release());
}
case BSONObj::opSIZE: {
int size = 0;
- if ( e.type() == String ) {
+ if (e.type() == String) {
// matching old odd semantics
size = 0;
- }
- else if ( e.type() == NumberInt || e.type() == NumberLong ) {
+ } else if (e.type() == NumberInt || e.type() == NumberLong) {
if (e.numberLong() < 0) {
// SERVER-11952. Setting 'size' to -1 means that no documents
// should match this $size expression.
size = -1;
- }
- else {
+ } else {
size = e.numberInt();
}
- }
- else if ( e.type() == NumberDouble ) {
- if ( e.numberInt() == e.numberDouble() ) {
+ } else if (e.type() == NumberDouble) {
+ if (e.numberInt() == e.numberDouble()) {
size = e.numberInt();
- }
- else {
+ } else {
// old semantcs require exact numeric match
// so [1,2] != 1 or 2
size = -1;
}
- }
- else {
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$size needs a number" );
+ } else {
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$size needs a number");
}
- std::unique_ptr<SizeMatchExpression> temp( new SizeMatchExpression() );
- Status s = temp->init( name, size );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- return StatusWithMatchExpression( temp.release() );
+ std::unique_ptr<SizeMatchExpression> temp(new SizeMatchExpression());
+ Status s = temp->init(name, size);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(temp.release());
}
case BSONObj::opEXISTS: {
- if ( e.eoo() )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$exists can't be eoo" );
- std::unique_ptr<ExistsMatchExpression> temp( new ExistsMatchExpression() );
- Status s = temp->init( name );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- if ( e.trueValue() )
- return StatusWithMatchExpression( temp.release() );
- std::unique_ptr<NotMatchExpression> temp2( new NotMatchExpression() );
- s = temp2->init( temp.release() );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- return StatusWithMatchExpression( temp2.release() );
+ if (e.eoo())
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$exists can't be eoo");
+ std::unique_ptr<ExistsMatchExpression> temp(new ExistsMatchExpression());
+ Status s = temp->init(name);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ if (e.trueValue())
+ return StatusWithMatchExpression(temp.release());
+ std::unique_ptr<NotMatchExpression> temp2(new NotMatchExpression());
+ s = temp2->init(temp.release());
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(temp2.release());
}
case BSONObj::opTYPE: {
- if ( !e.isNumber() )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$type has to be a number" );
+ if (!e.isNumber())
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$type has to be a number");
int type = e.numberInt();
- if ( e.type() != NumberInt && type != e.number() )
+ if (e.type() != NumberInt && type != e.number())
type = -1;
- std::unique_ptr<TypeMatchExpression> temp( new TypeMatchExpression() );
- Status s = temp->init( name, type );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- return StatusWithMatchExpression( temp.release() );
+ std::unique_ptr<TypeMatchExpression> temp(new TypeMatchExpression());
+ Status s = temp->init(name, type);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(temp.release());
}
case BSONObj::opMOD:
- return _parseMOD( name, e );
+ return _parseMOD(name, e);
case BSONObj::opOPTIONS: {
// TODO: try to optimize this
// we have to do this since $options can be before or after a $regex
// but we validate here
- BSONObjIterator i( context );
- while ( i.more() ) {
+ BSONObjIterator i(context);
+ while (i.more()) {
BSONElement temp = i.next();
- if ( temp.getGtLtOp( -1 ) == BSONObj::opREGEX )
- return StatusWithMatchExpression( NULL );
+ if (temp.getGtLtOp(-1) == BSONObj::opREGEX)
+ return StatusWithMatchExpression(NULL);
}
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$options needs a $regex" );
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$options needs a $regex");
}
case BSONObj::opREGEX: {
- return _parseRegexDocument( name, context );
+ return _parseRegexDocument(name, context);
}
case BSONObj::opELEM_MATCH:
- return _parseElemMatch( name, e, level );
+ return _parseElemMatch(name, e, level);
case BSONObj::opALL:
- return _parseAll( name, e, level );
+ return _parseAll(name, e, level);
case BSONObj::opWITHIN:
case BSONObj::opGEO_INTERSECTS:
- return expressionParserGeoCallback( name, x, context );
- }
-
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- mongoutils::str::stream() << "not handled: " << e.fieldName() );
+ return expressionParserGeoCallback(name, x, context);
}
- StatusWithMatchExpression MatchExpressionParser::_parse( const BSONObj& obj, int level ) {
- if (level > kMaximumTreeDepth) {
- mongoutils::str::stream ss;
- ss << "exceeded maximum query tree depth of " << kMaximumTreeDepth
- << " at " << obj.toString();
- return StatusWithMatchExpression( ErrorCodes::BadValue, ss );
- }
-
- std::unique_ptr<AndMatchExpression> root( new AndMatchExpression() );
-
- bool topLevel = (level == 0);
- level++;
-
- BSONObjIterator i( obj );
- while ( i.more() ){
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ mongoutils::str::stream() << "not handled: " << e.fieldName());
+}
- BSONElement e = i.next();
- if ( e.fieldName()[0] == '$' ) {
- const char * rest = e.fieldName() + 1;
+StatusWithMatchExpression MatchExpressionParser::_parse(const BSONObj& obj, int level) {
+ if (level > kMaximumTreeDepth) {
+ mongoutils::str::stream ss;
+ ss << "exceeded maximum query tree depth of " << kMaximumTreeDepth << " at "
+ << obj.toString();
+ return StatusWithMatchExpression(ErrorCodes::BadValue, ss);
+ }
- // TODO: optimize if block?
- if ( mongoutils::str::equals( "or", rest ) ) {
- if ( e.type() != Array )
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- "$or needs an array" );
- std::unique_ptr<OrMatchExpression> temp( new OrMatchExpression() );
- Status s = _parseTreeList( e.Obj(), temp.get(), level );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- root->add( temp.release() );
- }
- else if ( mongoutils::str::equals( "and", rest ) ) {
- if ( e.type() != Array )
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- "and needs an array" );
- std::unique_ptr<AndMatchExpression> temp( new AndMatchExpression() );
- Status s = _parseTreeList( e.Obj(), temp.get(), level );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- root->add( temp.release() );
- }
- else if ( mongoutils::str::equals( "nor", rest ) ) {
- if ( e.type() != Array )
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- "and needs an array" );
- std::unique_ptr<NorMatchExpression> temp( new NorMatchExpression() );
- Status s = _parseTreeList( e.Obj(), temp.get(), level );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- root->add( temp.release() );
- }
- else if ( mongoutils::str::equals( "atomic", rest ) ||
- mongoutils::str::equals( "isolated", rest ) ) {
- if ( !topLevel )
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- "$atomic/$isolated has to be at the top level" );
- if ( e.trueValue() )
- root->add( new AtomicMatchExpression() );
- }
- else if ( mongoutils::str::equals( "where", rest ) ) {
- StatusWithMatchExpression s = _whereCallback->parseWhere(e);
- if ( !s.isOK() )
- return s;
- root->add( s.getValue() );
- }
- else if ( mongoutils::str::equals( "text", rest ) ) {
- if ( e.type() != Object ) {
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- "$text expects an object" );
- }
- StatusWithMatchExpression s = expressionParserTextCallback( e.Obj() );
- if ( !s.isOK() ) {
- return s;
- }
- root->add( s.getValue() );
- }
- else if ( mongoutils::str::equals( "comment", rest ) ) {
- }
- else if ( mongoutils::str::equals( "ref", rest ) ||
- mongoutils::str::equals( "id", rest ) ||
- mongoutils::str::equals( "db", rest ) ) {
- // DBRef fields.
- std::unique_ptr<ComparisonMatchExpression> eq( new EqualityMatchExpression() );
- Status s = eq->init( e.fieldName(), e );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
-
- root->add( eq.release() );
+ std::unique_ptr<AndMatchExpression> root(new AndMatchExpression());
+
+ bool topLevel = (level == 0);
+ level++;
+
+ BSONObjIterator i(obj);
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.fieldName()[0] == '$') {
+ const char* rest = e.fieldName() + 1;
+
+ // TODO: optimize if block?
+ if (mongoutils::str::equals("or", rest)) {
+ if (e.type() != Array)
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$or needs an array");
+ std::unique_ptr<OrMatchExpression> temp(new OrMatchExpression());
+ Status s = _parseTreeList(e.Obj(), temp.get(), level);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ root->add(temp.release());
+ } else if (mongoutils::str::equals("and", rest)) {
+ if (e.type() != Array)
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "and needs an array");
+ std::unique_ptr<AndMatchExpression> temp(new AndMatchExpression());
+ Status s = _parseTreeList(e.Obj(), temp.get(), level);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ root->add(temp.release());
+ } else if (mongoutils::str::equals("nor", rest)) {
+ if (e.type() != Array)
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "and needs an array");
+ std::unique_ptr<NorMatchExpression> temp(new NorMatchExpression());
+ Status s = _parseTreeList(e.Obj(), temp.get(), level);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ root->add(temp.release());
+ } else if (mongoutils::str::equals("atomic", rest) ||
+ mongoutils::str::equals("isolated", rest)) {
+ if (!topLevel)
+ return StatusWithMatchExpression(
+ ErrorCodes::BadValue, "$atomic/$isolated has to be at the top level");
+ if (e.trueValue())
+ root->add(new AtomicMatchExpression());
+ } else if (mongoutils::str::equals("where", rest)) {
+ StatusWithMatchExpression s = _whereCallback->parseWhere(e);
+ if (!s.isOK())
+ return s;
+ root->add(s.getValue());
+ } else if (mongoutils::str::equals("text", rest)) {
+ if (e.type() != Object) {
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "$text expects an object");
}
- else {
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "unknown top level operator: "
- << e.fieldName() );
+ StatusWithMatchExpression s = expressionParserTextCallback(e.Obj());
+ if (!s.isOK()) {
+ return s;
}
-
- continue;
- }
-
- if ( _isExpressionDocument( e, false ) ) {
- Status s = _parseSub( e.fieldName(), e.Obj(), root.get(), level );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- continue;
+ root->add(s.getValue());
+ } else if (mongoutils::str::equals("comment", rest)) {
+ } else if (mongoutils::str::equals("ref", rest) ||
+ mongoutils::str::equals("id", rest) || mongoutils::str::equals("db", rest)) {
+ // DBRef fields.
+ std::unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression());
+ Status s = eq->init(e.fieldName(), e);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+
+ root->add(eq.release());
+ } else {
+ return StatusWithMatchExpression(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream() << "unknown top level operator: " << e.fieldName());
}
- if ( e.type() == RegEx ) {
- StatusWithMatchExpression result = _parseRegexElement( e.fieldName(), e );
- if ( !result.isOK() )
- return result;
- root->add( result.getValue() );
- continue;
- }
-
- std::unique_ptr<ComparisonMatchExpression> eq( new EqualityMatchExpression() );
- Status s = eq->init( e.fieldName(), e );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
+ continue;
+ }
- root->add( eq.release() );
+ if (_isExpressionDocument(e, false)) {
+ Status s = _parseSub(e.fieldName(), e.Obj(), root.get(), level);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ continue;
}
- if ( root->numChildren() == 1 ) {
- const MatchExpression* real = root->getChild(0);
- root->clearAndRelease();
- return StatusWithMatchExpression( const_cast<MatchExpression*>(real) );
+ if (e.type() == RegEx) {
+ StatusWithMatchExpression result = _parseRegexElement(e.fieldName(), e);
+ if (!result.isOK())
+ return result;
+ root->add(result.getValue());
+ continue;
}
- return StatusWithMatchExpression( root.release() );
+ std::unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression());
+ Status s = eq->init(e.fieldName(), e);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+
+ root->add(eq.release());
}
- Status MatchExpressionParser::_parseSub( const char* name,
- const BSONObj& sub,
- AndMatchExpression* root,
- int level ) {
- // The one exception to {field : {fully contained argument} } is, of course, geo. Example:
- // sub == { field : {$near[Sphere]: [0,0], $maxDistance: 1000, $minDistance: 10 } }
- // We peek inside of 'sub' to see if it's possibly a $near. If so, we can't iterate over
- // its subfields and parse them one at a time (there is no $maxDistance without $near), so
- // we hand the entire object over to the geo parsing routines.
-
- if (level > kMaximumTreeDepth) {
- mongoutils::str::stream ss;
- ss << "exceeded maximum query tree depth of " << kMaximumTreeDepth
- << " at " << sub.toString();
- return Status( ErrorCodes::BadValue, ss );
- }
+ if (root->numChildren() == 1) {
+ const MatchExpression* real = root->getChild(0);
+ root->clearAndRelease();
+ return StatusWithMatchExpression(const_cast<MatchExpression*>(real));
+ }
+
+ return StatusWithMatchExpression(root.release());
+}
- level++;
-
- BSONObjIterator geoIt(sub);
- if (geoIt.more()) {
- BSONElement firstElt = geoIt.next();
- if (firstElt.isABSONObj()) {
- const char* fieldName = firstElt.fieldName();
- // TODO: Having these $fields here isn't ideal but we don't want to pull in anything
- // from db/geo at this point, since it may not actually be linked in...
- if (mongoutils::str::equals(fieldName, "$near")
- || mongoutils::str::equals(fieldName, "$nearSphere")
- || mongoutils::str::equals(fieldName, "$geoNear")
- || mongoutils::str::equals(fieldName, "$maxDistance")
- || mongoutils::str::equals(fieldName, "$minDistance")) {
-
- StatusWithMatchExpression s = expressionParserGeoCallback(name,
- firstElt.getGtLtOp(),
- sub);
- if (s.isOK()) {
- root->add(s.getValue());
- }
-
- // Propagate geo parsing result to caller.
- return s.getStatus();
+Status MatchExpressionParser::_parseSub(const char* name,
+ const BSONObj& sub,
+ AndMatchExpression* root,
+ int level) {
+ // The one exception to {field : {fully contained argument} } is, of course, geo. Example:
+ // sub == { field : {$near[Sphere]: [0,0], $maxDistance: 1000, $minDistance: 10 } }
+ // We peek inside of 'sub' to see if it's possibly a $near. If so, we can't iterate over
+ // its subfields and parse them one at a time (there is no $maxDistance without $near), so
+ // we hand the entire object over to the geo parsing routines.
+
+ if (level > kMaximumTreeDepth) {
+ mongoutils::str::stream ss;
+ ss << "exceeded maximum query tree depth of " << kMaximumTreeDepth << " at "
+ << sub.toString();
+ return Status(ErrorCodes::BadValue, ss);
+ }
+
+ level++;
+
+ BSONObjIterator geoIt(sub);
+ if (geoIt.more()) {
+ BSONElement firstElt = geoIt.next();
+ if (firstElt.isABSONObj()) {
+ const char* fieldName = firstElt.fieldName();
+ // TODO: Having these $fields here isn't ideal but we don't want to pull in anything
+ // from db/geo at this point, since it may not actually be linked in...
+ if (mongoutils::str::equals(fieldName, "$near") ||
+ mongoutils::str::equals(fieldName, "$nearSphere") ||
+ mongoutils::str::equals(fieldName, "$geoNear") ||
+ mongoutils::str::equals(fieldName, "$maxDistance") ||
+ mongoutils::str::equals(fieldName, "$minDistance")) {
+ StatusWithMatchExpression s =
+ expressionParserGeoCallback(name, firstElt.getGtLtOp(), sub);
+ if (s.isOK()) {
+ root->add(s.getValue());
}
+
+ // Propagate geo parsing result to caller.
+ return s.getStatus();
}
}
+ }
- BSONObjIterator j( sub );
- while ( j.more() ) {
- BSONElement deep = j.next();
-
- StatusWithMatchExpression s = _parseSubField( sub, root, name, deep, level );
- if ( !s.isOK() )
- return s.getStatus();
+ BSONObjIterator j(sub);
+ while (j.more()) {
+ BSONElement deep = j.next();
- if ( s.getValue() )
- root->add( s.getValue() );
- }
+ StatusWithMatchExpression s = _parseSubField(sub, root, name, deep, level);
+ if (!s.isOK())
+ return s.getStatus();
- return Status::OK();
+ if (s.getValue())
+ root->add(s.getValue());
}
- bool MatchExpressionParser::_isExpressionDocument( const BSONElement& e,
- bool allowIncompleteDBRef ) {
- if ( e.type() != Object )
- return false;
+ return Status::OK();
+}
- BSONObj o = e.Obj();
- if ( o.isEmpty() )
- return false;
+bool MatchExpressionParser::_isExpressionDocument(const BSONElement& e, bool allowIncompleteDBRef) {
+ if (e.type() != Object)
+ return false;
- const char* name = o.firstElement().fieldName();
- if ( name[0] != '$' )
- return false;
+ BSONObj o = e.Obj();
+ if (o.isEmpty())
+ return false;
- if ( _isDBRefDocument( o, allowIncompleteDBRef ) ) {
- return false;
- }
+ const char* name = o.firstElement().fieldName();
+ if (name[0] != '$')
+ return false;
- return true;
+ if (_isDBRefDocument(o, allowIncompleteDBRef)) {
+ return false;
}
- /**
- * DBRef fields are ordered in the collection.
- * In the query, we consider an embedded object a query on
- * a DBRef as long as it contains $ref and $id.
- * Required fields: $ref and $id (if incomplete DBRefs are not allowed)
- *
- * If incomplete DBRefs are allowed, we accept the BSON object as long as it
- * contains $ref, $id or $db.
- *
- * Field names are checked but not field types.
- */
- bool MatchExpressionParser::_isDBRefDocument( const BSONObj& obj, bool allowIncompleteDBRef ) {
- bool hasRef = false;
- bool hasID = false;
- bool hasDB = false;
-
- BSONObjIterator i( obj );
- while ( i.more() && !( hasRef && hasID ) ) {
- BSONElement element = i.next();
- const char *fieldName = element.fieldName();
- // $ref
- if ( !hasRef && mongoutils::str::equals( "$ref", fieldName ) ) {
- hasRef = true;
- }
- // $id
- else if ( !hasID && mongoutils::str::equals( "$id", fieldName ) ) {
- hasID = true;
- }
- // $db
- else if ( !hasDB && mongoutils::str::equals( "$db", fieldName ) ) {
- hasDB = true;
- }
- }
+ return true;
+}
- if (allowIncompleteDBRef) {
- return hasRef || hasID || hasDB;
+/**
+ * DBRef fields are ordered in the collection.
+ * In the query, we consider an embedded object a query on
+ * a DBRef as long as it contains $ref and $id.
+ * Required fields: $ref and $id (if incomplete DBRefs are not allowed)
+ *
+ * If incomplete DBRefs are allowed, we accept the BSON object as long as it
+ * contains $ref, $id or $db.
+ *
+ * Field names are checked but not field types.
+ */
+bool MatchExpressionParser::_isDBRefDocument(const BSONObj& obj, bool allowIncompleteDBRef) {
+ bool hasRef = false;
+ bool hasID = false;
+ bool hasDB = false;
+
+ BSONObjIterator i(obj);
+ while (i.more() && !(hasRef && hasID)) {
+ BSONElement element = i.next();
+ const char* fieldName = element.fieldName();
+ // $ref
+ if (!hasRef && mongoutils::str::equals("$ref", fieldName)) {
+ hasRef = true;
+ }
+ // $id
+ else if (!hasID && mongoutils::str::equals("$id", fieldName)) {
+ hasID = true;
+ }
+ // $db
+ else if (!hasDB && mongoutils::str::equals("$db", fieldName)) {
+ hasDB = true;
}
-
- return hasRef && hasID;
}
- StatusWithMatchExpression MatchExpressionParser::_parseMOD( const char* name,
- const BSONElement& e ) {
-
- if ( e.type() != Array )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "malformed mod, needs to be an array" );
-
- BSONObjIterator i( e.Obj() );
-
- if ( !i.more() )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "malformed mod, not enough elements" );
- BSONElement d = i.next();
- if ( !d.isNumber() )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "malformed mod, divisor not a number" );
-
- if ( !i.more() )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "malformed mod, not enough elements" );
- BSONElement r = i.next();
- if ( !d.isNumber() )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "malformed mod, remainder not a number" );
+ if (allowIncompleteDBRef) {
+ return hasRef || hasID || hasDB;
+ }
- if ( i.more() )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "malformed mod, too many elements" );
+ return hasRef && hasID;
+}
- std::unique_ptr<ModMatchExpression> temp( new ModMatchExpression() );
- Status s = temp->init( name, d.numberInt(), r.numberInt() );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- return StatusWithMatchExpression( temp.release() );
- }
+StatusWithMatchExpression MatchExpressionParser::_parseMOD(const char* name, const BSONElement& e) {
+ if (e.type() != Array)
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "malformed mod, needs to be an array");
+
+ BSONObjIterator i(e.Obj());
+
+ if (!i.more())
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "malformed mod, not enough elements");
+ BSONElement d = i.next();
+ if (!d.isNumber())
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "malformed mod, divisor not a number");
+
+ if (!i.more())
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "malformed mod, not enough elements");
+ BSONElement r = i.next();
+ if (!d.isNumber())
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "malformed mod, remainder not a number");
+
+ if (i.more())
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "malformed mod, too many elements");
+
+ std::unique_ptr<ModMatchExpression> temp(new ModMatchExpression());
+ Status s = temp->init(name, d.numberInt(), r.numberInt());
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(temp.release());
+}
- StatusWithMatchExpression MatchExpressionParser::_parseRegexElement( const char* name,
- const BSONElement& e ) {
- if ( e.type() != RegEx )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "not a regex" );
+StatusWithMatchExpression MatchExpressionParser::_parseRegexElement(const char* name,
+ const BSONElement& e) {
+ if (e.type() != RegEx)
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "not a regex");
- std::unique_ptr<RegexMatchExpression> temp( new RegexMatchExpression() );
- Status s = temp->init( name, e.regex(), e.regexFlags() );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- return StatusWithMatchExpression( temp.release() );
- }
+ std::unique_ptr<RegexMatchExpression> temp(new RegexMatchExpression());
+ Status s = temp->init(name, e.regex(), e.regexFlags());
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(temp.release());
+}
- StatusWithMatchExpression MatchExpressionParser::_parseRegexDocument( const char* name,
- const BSONObj& doc ) {
- string regex;
- string regexOptions;
+StatusWithMatchExpression MatchExpressionParser::_parseRegexDocument(const char* name,
+ const BSONObj& doc) {
+ string regex;
+ string regexOptions;
- BSONObjIterator i( doc );
- while ( i.more() ) {
- BSONElement e = i.next();
- switch ( e.getGtLtOp() ) {
+ BSONObjIterator i(doc);
+ while (i.more()) {
+ BSONElement e = i.next();
+ switch (e.getGtLtOp()) {
case BSONObj::opREGEX:
- if ( e.type() == String ) {
+ if (e.type() == String) {
regex = e.String();
- }
- else if ( e.type() == RegEx ) {
+ } else if (e.type() == RegEx) {
regex = e.regex();
regexOptions = e.regexFlags();
- }
- else {
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- "$regex has to be a string" );
+ } else {
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "$regex has to be a string");
}
break;
case BSONObj::opOPTIONS:
- if ( e.type() != String )
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- "$options has to be a string" );
+ if (e.type() != String)
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "$options has to be a string");
regexOptions = e.String();
break;
default:
break;
- }
-
}
-
- std::unique_ptr<RegexMatchExpression> temp( new RegexMatchExpression() );
- Status s = temp->init( name, regex, regexOptions );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- return StatusWithMatchExpression( temp.release() );
-
}
- Status MatchExpressionParser::_parseArrayFilterEntries( ArrayFilterEntries* entries,
- const BSONObj& theArray ) {
-
- BSONObjIterator i( theArray );
- while ( i.more() ) {
- BSONElement e = i.next();
+ std::unique_ptr<RegexMatchExpression> temp(new RegexMatchExpression());
+ Status s = temp->init(name, regex, regexOptions);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(temp.release());
+}
- // allow DBRefs but reject all fields with names starting wiht $
- if ( _isExpressionDocument( e, false ) ) {
- return Status( ErrorCodes::BadValue, "cannot nest $ under $in" );
- }
+Status MatchExpressionParser::_parseArrayFilterEntries(ArrayFilterEntries* entries,
+ const BSONObj& theArray) {
+ BSONObjIterator i(theArray);
+ while (i.more()) {
+ BSONElement e = i.next();
- if ( e.type() == RegEx ) {
- std::unique_ptr<RegexMatchExpression> r( new RegexMatchExpression() );
- Status s = r->init( "", e );
- if ( !s.isOK() )
- return s;
- s = entries->addRegex( r.release() );
- if ( !s.isOK() )
- return s;
- }
- else {
- Status s = entries->addEquality( e );
- if ( !s.isOK() )
- return s;
- }
+ // allow DBRefs but reject all fields with names starting wiht $
+ if (_isExpressionDocument(e, false)) {
+ return Status(ErrorCodes::BadValue, "cannot nest $ under $in");
}
- return Status::OK();
+ if (e.type() == RegEx) {
+ std::unique_ptr<RegexMatchExpression> r(new RegexMatchExpression());
+ Status s = r->init("", e);
+ if (!s.isOK())
+ return s;
+ s = entries->addRegex(r.release());
+ if (!s.isOK())
+ return s;
+ } else {
+ Status s = entries->addEquality(e);
+ if (!s.isOK())
+ return s;
+ }
}
+ return Status::OK();
+}
- StatusWithMatchExpression MatchExpressionParser::_parseElemMatch( const char* name,
- const BSONElement& e,
- int level ) {
- if ( e.type() != Object )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$elemMatch needs an Object" );
-
- BSONObj obj = e.Obj();
-
- // $elemMatch value case applies when the children all
- // work on the field 'name'.
- // This is the case when:
- // 1) the argument is an expression document; and
- // 2) expression is not a AND/NOR/OR logical operator. Children of
- // these logical operators are initialized with field names.
- // 3) expression is not a WHERE operator. WHERE works on objects instead
- // of specific field.
- bool isElemMatchValue = false;
- if ( _isExpressionDocument( e, true ) ) {
- BSONObj o = e.Obj();
- BSONElement elt = o.firstElement();
- invariant( !elt.eoo() );
-
- isElemMatchValue = !mongoutils::str::equals( "$and", elt.fieldName() ) &&
- !mongoutils::str::equals( "$nor", elt.fieldName() ) &&
- !mongoutils::str::equals( "$or", elt.fieldName() ) &&
- !mongoutils::str::equals( "$where", elt.fieldName() );
- }
+StatusWithMatchExpression MatchExpressionParser::_parseElemMatch(const char* name,
+ const BSONElement& e,
+ int level) {
+ if (e.type() != Object)
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$elemMatch needs an Object");
+
+ BSONObj obj = e.Obj();
+
+ // $elemMatch value case applies when the children all
+ // work on the field 'name'.
+ // This is the case when:
+ // 1) the argument is an expression document; and
+ // 2) expression is not a AND/NOR/OR logical operator. Children of
+ // these logical operators are initialized with field names.
+ // 3) expression is not a WHERE operator. WHERE works on objects instead
+ // of specific field.
+ bool isElemMatchValue = false;
+ if (_isExpressionDocument(e, true)) {
+ BSONObj o = e.Obj();
+ BSONElement elt = o.firstElement();
+ invariant(!elt.eoo());
- if ( isElemMatchValue ) {
- // value case
+ isElemMatchValue = !mongoutils::str::equals("$and", elt.fieldName()) &&
+ !mongoutils::str::equals("$nor", elt.fieldName()) &&
+ !mongoutils::str::equals("$or", elt.fieldName()) &&
+ !mongoutils::str::equals("$where", elt.fieldName());
+ }
- AndMatchExpression theAnd;
- Status s = _parseSub( "", obj, &theAnd, level );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
+ if (isElemMatchValue) {
+ // value case
- std::unique_ptr<ElemMatchValueMatchExpression> temp( new ElemMatchValueMatchExpression() );
- s = temp->init( name );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
+ AndMatchExpression theAnd;
+ Status s = _parseSub("", obj, &theAnd, level);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
- for ( size_t i = 0; i < theAnd.numChildren(); i++ ) {
- temp->add( theAnd.getChild( i ) );
- }
- theAnd.clearAndRelease();
+ std::unique_ptr<ElemMatchValueMatchExpression> temp(new ElemMatchValueMatchExpression());
+ s = temp->init(name);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
- return StatusWithMatchExpression( temp.release() );
+ for (size_t i = 0; i < theAnd.numChildren(); i++) {
+ temp->add(theAnd.getChild(i));
}
+ theAnd.clearAndRelease();
- // DBRef value case
- // A DBRef document under a $elemMatch should be treated as an object case
- // because it may contain non-DBRef fields in addition to $ref, $id and $db.
-
- // object case
+ return StatusWithMatchExpression(temp.release());
+ }
- StatusWithMatchExpression subRaw = _parse( obj, level );
- if ( !subRaw.isOK() )
- return subRaw;
- std::unique_ptr<MatchExpression> sub( subRaw.getValue() );
+ // DBRef value case
+ // A DBRef document under a $elemMatch should be treated as an object case
+ // because it may contain non-DBRef fields in addition to $ref, $id and $db.
- // $where is not supported under $elemMatch because $where
- // applies to top-level document, not array elements in a field.
- if ( hasNode( sub.get(), MatchExpression::WHERE ) ) {
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- "$elemMatch cannot contain $where expression" );
- }
+ // object case
- std::unique_ptr<ElemMatchObjectMatchExpression> temp( new ElemMatchObjectMatchExpression() );
- Status status = temp->init( name, sub.release() );
- if ( !status.isOK() )
- return StatusWithMatchExpression( status );
+ StatusWithMatchExpression subRaw = _parse(obj, level);
+ if (!subRaw.isOK())
+ return subRaw;
+ std::unique_ptr<MatchExpression> sub(subRaw.getValue());
- return StatusWithMatchExpression( temp.release() );
+ // $where is not supported under $elemMatch because $where
+ // applies to top-level document, not array elements in a field.
+ if (hasNode(sub.get(), MatchExpression::WHERE)) {
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "$elemMatch cannot contain $where expression");
}
- StatusWithMatchExpression MatchExpressionParser::_parseAll( const char* name,
- const BSONElement& e,
- int level ) {
- if ( e.type() != Array )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$all needs an array" );
-
- BSONObj arr = e.Obj();
- std::unique_ptr<AndMatchExpression> myAnd( new AndMatchExpression() );
- BSONObjIterator i( arr );
-
- if ( arr.firstElement().type() == Object &&
- mongoutils::str::equals( "$elemMatch",
- arr.firstElement().Obj().firstElement().fieldName() ) ) {
- // $all : [ { $elemMatch : {} } ... ]
-
- while ( i.more() ) {
- BSONElement hopefullyElemMatchElement = i.next();
-
- if ( hopefullyElemMatchElement.type() != Object ) {
- // $all : [ { $elemMatch : ... }, 5 ]
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- "$all/$elemMatch has to be consistent" );
- }
+ std::unique_ptr<ElemMatchObjectMatchExpression> temp(new ElemMatchObjectMatchExpression());
+ Status status = temp->init(name, sub.release());
+ if (!status.isOK())
+ return StatusWithMatchExpression(status);
- BSONObj hopefullyElemMatchObj = hopefullyElemMatchElement.Obj();
- if ( !mongoutils::str::equals( "$elemMatch",
- hopefullyElemMatchObj.firstElement().fieldName() ) ) {
- // $all : [ { $elemMatch : ... }, { x : 5 } ]
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- "$all/$elemMatch has to be consistent" );
- }
+ return StatusWithMatchExpression(temp.release());
+}
- StatusWithMatchExpression inner =
- _parseElemMatch( name, hopefullyElemMatchObj.firstElement(), level );
- if ( !inner.isOK() )
- return inner;
- myAnd->add( inner.getValue() );
- }
+StatusWithMatchExpression MatchExpressionParser::_parseAll(const char* name,
+ const BSONElement& e,
+ int level) {
+ if (e.type() != Array)
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$all needs an array");
- return StatusWithMatchExpression( myAnd.release() );
- }
+ BSONObj arr = e.Obj();
+ std::unique_ptr<AndMatchExpression> myAnd(new AndMatchExpression());
+ BSONObjIterator i(arr);
- while ( i.more() ) {
- BSONElement e = i.next();
+ if (arr.firstElement().type() == Object &&
+ mongoutils::str::equals("$elemMatch",
+ arr.firstElement().Obj().firstElement().fieldName())) {
+ // $all : [ { $elemMatch : {} } ... ]
- if ( e.type() == RegEx ) {
- std::unique_ptr<RegexMatchExpression> r( new RegexMatchExpression() );
- Status s = r->init( name, e );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- myAnd->add( r.release() );
- }
- else if ( e.type() == Object && e.Obj().firstElement().getGtLtOp(-1) != -1 ) {
- return StatusWithMatchExpression( ErrorCodes::BadValue, "no $ expressions in $all" );
+ while (i.more()) {
+ BSONElement hopefullyElemMatchElement = i.next();
+
+ if (hopefullyElemMatchElement.type() != Object) {
+ // $all : [ { $elemMatch : ... }, 5 ]
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "$all/$elemMatch has to be consistent");
}
- else {
- std::unique_ptr<EqualityMatchExpression> x( new EqualityMatchExpression() );
- Status s = x->init( name, e );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- myAnd->add( x.release() );
+
+ BSONObj hopefullyElemMatchObj = hopefullyElemMatchElement.Obj();
+ if (!mongoutils::str::equals("$elemMatch",
+ hopefullyElemMatchObj.firstElement().fieldName())) {
+ // $all : [ { $elemMatch : ... }, { x : 5 } ]
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "$all/$elemMatch has to be consistent");
}
- }
- if ( myAnd->numChildren() == 0 ) {
- return StatusWithMatchExpression( new FalseMatchExpression() );
+ StatusWithMatchExpression inner =
+ _parseElemMatch(name, hopefullyElemMatchObj.firstElement(), level);
+ if (!inner.isOK())
+ return inner;
+ myAnd->add(inner.getValue());
}
- return StatusWithMatchExpression( myAnd.release() );
+ return StatusWithMatchExpression(myAnd.release());
}
- StatusWithMatchExpression MatchExpressionParser::WhereCallback::parseWhere(
- const BSONElement& where) const {
- return StatusWithMatchExpression(ErrorCodes::NoWhereParseContext,
- "no context for parsing $where");
+ while (i.more()) {
+ BSONElement e = i.next();
+
+ if (e.type() == RegEx) {
+ std::unique_ptr<RegexMatchExpression> r(new RegexMatchExpression());
+ Status s = r->init(name, e);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ myAnd->add(r.release());
+ } else if (e.type() == Object && e.Obj().firstElement().getGtLtOp(-1) != -1) {
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "no $ expressions in $all");
+ } else {
+ std::unique_ptr<EqualityMatchExpression> x(new EqualityMatchExpression());
+ Status s = x->init(name, e);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ myAnd->add(x.release());
+ }
}
- // Geo
- StatusWithMatchExpression expressionParserGeoCallbackDefault( const char* name,
- int type,
- const BSONObj& section ) {
- return StatusWithMatchExpression( ErrorCodes::BadValue, "geo not linked in" );
+ if (myAnd->numChildren() == 0) {
+ return StatusWithMatchExpression(new FalseMatchExpression());
}
- MatchExpressionParserGeoCallback expressionParserGeoCallback =
- expressionParserGeoCallbackDefault;
+ return StatusWithMatchExpression(myAnd.release());
+}
- // Text
- StatusWithMatchExpression expressionParserTextCallbackDefault( const BSONObj& queryObj ) {
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$text not linked in" );
- }
+StatusWithMatchExpression MatchExpressionParser::WhereCallback::parseWhere(
+ const BSONElement& where) const {
+ return StatusWithMatchExpression(ErrorCodes::NoWhereParseContext,
+ "no context for parsing $where");
+}
- MatchExpressionParserTextCallback expressionParserTextCallback =
- expressionParserTextCallbackDefault;
+// Geo
+StatusWithMatchExpression expressionParserGeoCallbackDefault(const char* name,
+ int type,
+ const BSONObj& section) {
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "geo not linked in");
+}
+
+MatchExpressionParserGeoCallback expressionParserGeoCallback = expressionParserGeoCallbackDefault;
+
+// Text
+StatusWithMatchExpression expressionParserTextCallbackDefault(const BSONObj& queryObj) {
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$text not linked in");
+}
+MatchExpressionParserTextCallback expressionParserTextCallback =
+ expressionParserTextCallbackDefault;
}
diff --git a/src/mongo/db/matcher/expression_parser.h b/src/mongo/db/matcher/expression_parser.h
index fdec56d1ed7..92c48bd7e11 100644
--- a/src/mongo/db/matcher/expression_parser.h
+++ b/src/mongo/db/matcher/expression_parser.h
@@ -39,175 +39,157 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
- typedef StatusWith<MatchExpression*> StatusWithMatchExpression;
+typedef StatusWith<MatchExpression*> StatusWithMatchExpression;
- class MatchExpressionParser {
+class MatchExpressionParser {
+public:
+ /**
+ * In general, expression parsing and matching should not require context, but the $where
+ * clause is an exception in that it needs to read the sys.js collection.
+ *
+ * The default behaviour is to return an error status that $where context is not present.
+ *
+ * Do not use this class to pass-in generic context as it should only be used for $where.
+ */
+ class WhereCallback {
public:
+ virtual StatusWithMatchExpression parseWhere(const BSONElement& where) const;
- /**
- * In general, expression parsing and matching should not require context, but the $where
- * clause is an exception in that it needs to read the sys.js collection.
- *
- * The default behaviour is to return an error status that $where context is not present.
- *
- * Do not use this class to pass-in generic context as it should only be used for $where.
- */
- class WhereCallback {
- public:
- virtual StatusWithMatchExpression parseWhere(const BSONElement& where) const;
-
- virtual ~WhereCallback() { }
- };
-
- /**
- * caller has to maintain ownership obj
- * the tree has views (BSONElement) into obj
- */
- static StatusWithMatchExpression parse(
- const BSONObj& obj,
- const WhereCallback& whereCallback = WhereCallback()) {
- // The 0 initializes the match expression tree depth.
- return MatchExpressionParser(&whereCallback)._parse(obj, 0);
- }
-
- private:
-
- explicit MatchExpressionParser(const WhereCallback* whereCallback)
- : _whereCallback(whereCallback) {
-
- }
-
- /**
- * 5 = false
- * { a : 5 } = false
- * { $lt : 5 } = true
- * { $ref: "s", $id: "x" } = false
- * { $ref: "s", $id: "x", $db: "mydb" } = false
- * { $ref : "s" } = false (if incomplete DBRef is allowed)
- * { $id : "x" } = false (if incomplete DBRef is allowed)
- * { $db : "mydb" } = false (if incomplete DBRef is allowed)
- */
- bool _isExpressionDocument( const BSONElement& e, bool allowIncompleteDBRef );
-
- /**
- * { $ref: "s", $id: "x" } = true
- * { $ref : "s" } = true (if incomplete DBRef is allowed)
- * { $id : "x" } = true (if incomplete DBRef is allowed)
- * { $db : "x" } = true (if incomplete DBRef is allowed)
- */
- bool _isDBRefDocument( const BSONObj& obj, bool allowIncompleteDBRef );
-
- /**
- * Parse 'obj' and return either a MatchExpression or an error.
- *
- * 'level' tracks the current depth of the tree across recursive calls to this
- * function. Used in order to apply special logic at the top-level and to return an
- * error if the tree exceeds the maximum allowed depth.
- */
- StatusWithMatchExpression _parse( const BSONObj& obj, int level );
-
- /**
- * parses a field in a sub expression
- * if the query is { x : { $gt : 5, $lt : 8 } }
- * e is { $gt : 5, $lt : 8 }
- */
- Status _parseSub( const char* name,
- const BSONObj& obj,
- AndMatchExpression* root,
- int level );
-
- /**
- * parses a single field in a sub expression
- * if the query is { x : { $gt : 5, $lt : 8 } }
- * e is $gt : 5
- */
- StatusWithMatchExpression _parseSubField( const BSONObj& context,
- const AndMatchExpression* andSoFar,
- const char* name,
- const BSONElement& e,
- int level );
-
- StatusWithMatchExpression _parseComparison( const char* name,
- ComparisonMatchExpression* cmp,
- const BSONElement& e );
-
- StatusWithMatchExpression _parseMOD( const char* name,
- const BSONElement& e );
-
- StatusWithMatchExpression _parseRegexElement( const char* name,
- const BSONElement& e );
-
- StatusWithMatchExpression _parseRegexDocument( const char* name,
- const BSONObj& doc );
-
-
- Status _parseArrayFilterEntries( ArrayFilterEntries* entries,
- const BSONObj& theArray );
-
- // arrays
-
- StatusWithMatchExpression _parseElemMatch( const char* name,
- const BSONElement& e,
- int level );
-
- StatusWithMatchExpression _parseAll( const char* name,
- const BSONElement& e,
- int level );
-
- // tree
-
- Status _parseTreeList( const BSONObj& arr, ListOfMatchExpression* out, int level );
-
- StatusWithMatchExpression _parseNot( const char* name,
- const BSONElement& e,
- int level );
-
- // The maximum allowed depth of a query tree. Just to guard against stack overflow.
- static const int kMaximumTreeDepth;
-
- // Performs parsing for the $where clause. We do not own this pointer - it has to live
- // as long as the parser is active.
- const WhereCallback* _whereCallback;
+ virtual ~WhereCallback() {}
};
/**
- * This implementation is used for the server-side code.
+ * caller has to maintain ownership obj
+ * the tree has views (BSONElement) into obj
*/
- class WhereCallbackReal : public MatchExpressionParser::WhereCallback {
- public:
+ static StatusWithMatchExpression parse(const BSONObj& obj,
+ const WhereCallback& whereCallback = WhereCallback()) {
+ // The 0 initializes the match expression tree depth.
+ return MatchExpressionParser(&whereCallback)._parse(obj, 0);
+ }
- /**
- * The OperationContext passed here is not owned, but just referenced. It gets assigned to
- * any $where parsers, which this callback generates. Therefore, the op context must only
- * be destroyed after these parsers and their clones (shallowClone) have been destroyed.
- */
- WhereCallbackReal(OperationContext* txn, StringData dbName);
+private:
+ explicit MatchExpressionParser(const WhereCallback* whereCallback)
+ : _whereCallback(whereCallback) {}
- virtual StatusWithMatchExpression parseWhere(const BSONElement& where) const;
+ /**
+ * 5 = false
+ * { a : 5 } = false
+ * { $lt : 5 } = true
+ * { $ref: "s", $id: "x" } = false
+ * { $ref: "s", $id: "x", $db: "mydb" } = false
+ * { $ref : "s" } = false (if incomplete DBRef is allowed)
+ * { $id : "x" } = false (if incomplete DBRef is allowed)
+ * { $db : "mydb" } = false (if incomplete DBRef is allowed)
+ */
+ bool _isExpressionDocument(const BSONElement& e, bool allowIncompleteDBRef);
- private:
- // Not owned here
- OperationContext* const _txn;
- const StringData _dbName;
- };
+ /**
+ * { $ref: "s", $id: "x" } = true
+ * { $ref : "s" } = true (if incomplete DBRef is allowed)
+ * { $id : "x" } = true (if incomplete DBRef is allowed)
+ * { $db : "x" } = true (if incomplete DBRef is allowed)
+ */
+ bool _isDBRefDocument(const BSONObj& obj, bool allowIncompleteDBRef);
/**
- * This is just a pass-through implementation, used by sharding only.
+ * Parse 'obj' and return either a MatchExpression or an error.
+ *
+ * 'level' tracks the current depth of the tree across recursive calls to this
+ * function. Used in order to apply special logic at the top-level and to return an
+ * error if the tree exceeds the maximum allowed depth.
*/
- class WhereCallbackNoop : public MatchExpressionParser::WhereCallback {
- public:
- WhereCallbackNoop();
+ StatusWithMatchExpression _parse(const BSONObj& obj, int level);
- virtual StatusWithMatchExpression parseWhere(const BSONElement& where) const;
- };
+ /**
+ * parses a field in a sub expression
+ * if the query is { x : { $gt : 5, $lt : 8 } }
+ * e is { $gt : 5, $lt : 8 }
+ */
+ Status _parseSub(const char* name, const BSONObj& obj, AndMatchExpression* root, int level);
+
+ /**
+ * parses a single field in a sub expression
+ * if the query is { x : { $gt : 5, $lt : 8 } }
+ * e is $gt : 5
+ */
+ StatusWithMatchExpression _parseSubField(const BSONObj& context,
+ const AndMatchExpression* andSoFar,
+ const char* name,
+ const BSONElement& e,
+ int level);
+
+ StatusWithMatchExpression _parseComparison(const char* name,
+ ComparisonMatchExpression* cmp,
+ const BSONElement& e);
+
+ StatusWithMatchExpression _parseMOD(const char* name, const BSONElement& e);
+
+ StatusWithMatchExpression _parseRegexElement(const char* name, const BSONElement& e);
+
+ StatusWithMatchExpression _parseRegexDocument(const char* name, const BSONObj& doc);
+
+
+ Status _parseArrayFilterEntries(ArrayFilterEntries* entries, const BSONObj& theArray);
+
+ // arrays
+
+ StatusWithMatchExpression _parseElemMatch(const char* name, const BSONElement& e, int level);
+
+ StatusWithMatchExpression _parseAll(const char* name, const BSONElement& e, int level);
+
+ // tree
+
+ Status _parseTreeList(const BSONObj& arr, ListOfMatchExpression* out, int level);
+
+ StatusWithMatchExpression _parseNot(const char* name, const BSONElement& e, int level);
+
+ // The maximum allowed depth of a query tree. Just to guard against stack overflow.
+ static const int kMaximumTreeDepth;
+
+ // Performs parsing for the $where clause. We do not own this pointer - it has to live
+ // as long as the parser is active.
+ const WhereCallback* _whereCallback;
+};
+
+/**
+ * This implementation is used for the server-side code.
+ */
+class WhereCallbackReal : public MatchExpressionParser::WhereCallback {
+public:
+ /**
+ * The OperationContext passed here is not owned, but just referenced. It gets assigned to
+ * any $where parsers, which this callback generates. Therefore, the op context must only
+ * be destroyed after these parsers and their clones (shallowClone) have been destroyed.
+ */
+ WhereCallbackReal(OperationContext* txn, StringData dbName);
+
+ virtual StatusWithMatchExpression parseWhere(const BSONElement& where) const;
+
+private:
+ // Not owned here
+ OperationContext* const _txn;
+ const StringData _dbName;
+};
+
+/**
+ * This is just a pass-through implementation, used by sharding only.
+ */
+class WhereCallbackNoop : public MatchExpressionParser::WhereCallback {
+public:
+ WhereCallbackNoop();
+ virtual StatusWithMatchExpression parseWhere(const BSONElement& where) const;
+};
- typedef stdx::function<StatusWithMatchExpression(const char* name, int type, const BSONObj& section)> MatchExpressionParserGeoCallback;
- extern MatchExpressionParserGeoCallback expressionParserGeoCallback;
- typedef stdx::function<StatusWithMatchExpression(const BSONObj& queryObj)> MatchExpressionParserTextCallback;
- extern MatchExpressionParserTextCallback expressionParserTextCallback;
+typedef stdx::function<StatusWithMatchExpression(
+ const char* name, int type, const BSONObj& section)> MatchExpressionParserGeoCallback;
+extern MatchExpressionParserGeoCallback expressionParserGeoCallback;
+typedef stdx::function<StatusWithMatchExpression(const BSONObj& queryObj)>
+ MatchExpressionParserTextCallback;
+extern MatchExpressionParserTextCallback expressionParserTextCallback;
}
diff --git a/src/mongo/db/matcher/expression_parser_array_test.cpp b/src/mongo/db/matcher/expression_parser_array_test.cpp
index 26179f9b0c8..01bd6301d73 100644
--- a/src/mongo/db/matcher/expression_parser_array_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_array_test.cpp
@@ -39,638 +39,664 @@
namespace mongo {
- using std::string;
-
- TEST( MatchExpressionParserArrayTest, Size1 ) {
- BSONObj query = BSON( "x" << BSON( "$size" << 2 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 << 2 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 << 2 << 3 ) ) ) );
- delete result.getValue();
- }
-
- TEST( MatchExpressionParserArrayTest, SizeAsString ) {
- BSONObj query = BSON( "x" << BSON( "$size" << "a" ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 << 2 ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSONArray() ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 ) ) ) );
- delete result.getValue();
- }
-
- TEST( MatchExpressionParserArrayTest, SizeWithDouble ) {
- BSONObj query = BSON( "x" << BSON( "$size" << 2.5 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 << 2 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSONArray() ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 << 2 << 3 ) ) ) );
- delete result.getValue();
- }
-
- TEST( MatchExpressionParserArrayTest, SizeBad ) {
- BSONObj query = BSON( "x" << BSON( "$size" << BSONNULL ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
- // ---------
-
- TEST( MatchExpressionParserArrayTest, ElemMatchArr1 ) {
- BSONObj query = BSON( "x" << BSON( "$elemMatch" << BSON( "x" << 1 << "y" << 2 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 << 2 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSON( "x" << 1 ) ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" <<
- BSON_ARRAY( BSON( "x" << 1 << "y" << 2 ) ) ) ) );
- delete result.getValue();
-
- }
-
- TEST( MatchExpressionParserArrayTest, ElemMatchAnd ) {
- BSONObj query = BSON( "x" <<
- BSON( "$elemMatch" <<
- BSON( "$and" << BSON_ARRAY( BSON( "x" << 1 << "y" << 2 ) ) ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 << 2 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSON( "x" << 1 ) ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" <<
- BSON_ARRAY( BSON( "x" << 1 << "y" << 2 ) ) ) ) );
- delete result.getValue();
-
- }
-
- TEST( MatchExpressionParserArrayTest, ElemMatchNor ) {
- BSONObj query = BSON( "x" <<
- BSON( "$elemMatch" <<
- BSON( "$nor" << BSON_ARRAY( BSON( "x" << 1 ) ) ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 << 2 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSON( "x" << 1 ) ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" <<
- BSON_ARRAY( BSON( "x" << 2 << "y" << 2 ) ) ) ) );
- delete result.getValue();
-
- }
-
- TEST( MatchExpressionParserArrayTest, ElemMatchOr ) {
- BSONObj query = BSON( "x" <<
- BSON( "$elemMatch" <<
- BSON( "$or" << BSON_ARRAY( BSON( "x" << 1 << "y" << 2 ) ) ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 << 2 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSON( "x" << 1 ) ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" <<
- BSON_ARRAY( BSON( "x" << 1 << "y" << 2 ) ) ) ) );
- delete result.getValue();
-
- }
-
- TEST( MatchExpressionParserArrayTest, ElemMatchVal1 ) {
- BSONObj query = BSON( "x" << BSON( "$elemMatch" << BSON( "$gt" << 5 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 4 ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 6 ) ) ) );
- delete result.getValue();
- }
-
- // with explicit $eq
- TEST( MatchExpressionParserArrayTest, ElemMatchDBRef1 ) {
- OID oid = OID::gen();
- BSONObj match = BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db" );
- OID oidx = OID::gen();
- BSONObj notMatch = BSON( "$ref" << "coll" << "$id" << oidx << "$db" << "db" );
-
- BSONObj query = BSON( "x" << BSON( "$elemMatch" << BSON( "$eq" << match ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << match ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( notMatch ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( match ) ) ) );
- delete result.getValue();
- }
-
- TEST( MatchExpressionParserArrayTest, ElemMatchDBRef2 ) {
- OID oid = OID::gen();
- BSONObj match = BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db" );
- OID oidx = OID::gen();
- BSONObj notMatch = BSON( "$ref" << "coll" << "$id" << oidx << "$db" << "db" );
-
- BSONObj query = BSON( "x" << BSON( "$elemMatch" << match ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << match ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( notMatch ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( match ) ) ) );
- delete result.getValue();
- }
-
- // Additional fields after $ref and $id.
- TEST( MatchExpressionParserArrayTest, ElemMatchDBRef3 ) {
- OID oid = OID::gen();
- BSONObj match = BSON( "$ref" << "coll" << "$id" << oid << "foo" << 12345 );
- OID oidx = OID::gen();
- BSONObj notMatch = BSON( "$ref" << "coll" << "$id" << oidx << "foo" << 12345 );
-
- BSONObj query = BSON( "x" << BSON( "$elemMatch" << match ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << match ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( notMatch ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( match ) ) ) );
-
- // Document contains fields not referred to in $elemMatch query.
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "coll" << "$id" << oid << "foo" << 12345 << "bar" << 678 ) ) ) ) );
- delete result.getValue();
- }
-
- // Query with DBRef fields out of order.
- TEST( MatchExpressionParserArrayTest, ElemMatchDBRef4 ) {
- OID oid = OID::gen();
- BSONObj match = BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db" );
- BSONObj matchOutOfOrder = BSON( "$db" << "db" << "$id" << oid << "$ref" << "coll" );
- OID oidx = OID::gen();
- BSONObj notMatch = BSON( "$ref" << "coll" << "$id" << oidx << "$db" << "db" );
-
- BSONObj query = BSON( "x" << BSON( "$elemMatch" << matchOutOfOrder ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << match ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( notMatch ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( match ) ) ) );
- delete result.getValue();
- }
-
- // Query with DBRef fields out of order.
- // Additional fields besides $ref and $id.
- TEST( MatchExpressionParserArrayTest, ElemMatchDBRef5 ) {
- OID oid = OID::gen();
- BSONObj match = BSON( "$ref" << "coll" << "$id" << oid << "foo" << 12345 );
- BSONObj matchOutOfOrder = BSON( "foo" << 12345 << "$id" << oid << "$ref" << "coll" );
- OID oidx = OID::gen();
- BSONObj notMatch = BSON( "$ref" << "coll" << "$id" << oidx << "foo" << 12345 );
-
- BSONObj query = BSON( "x" << BSON( "$elemMatch" << matchOutOfOrder ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << match ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( notMatch ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( match ) ) ) );
-
- // Document contains fields not referred to in $elemMatch query.
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "coll" << "$id" << oid << "foo" << 12345 << "bar" << 678 ) ) ) ) );
- delete result.getValue();
- }
-
- // Incomplete DBRef - $id missing.
- TEST( MatchExpressionParserArrayTest, ElemMatchDBRef6 ) {
- OID oid = OID::gen();
- BSONObj match = BSON( "$ref" << "coll" << "$id" << oid << "foo" << 12345 );
- BSONObj matchMissingID = BSON( "$ref" << "coll" << "foo" << 12345 );
- BSONObj notMatch = BSON( "$ref" << "collx" << "$id" << oid << "foo" << 12345 );
-
- BSONObj query = BSON( "x" << BSON( "$elemMatch" << matchMissingID ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << match ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( notMatch ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( match ) ) ) );
-
- // Document contains fields not referred to in $elemMatch query.
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "coll" << "$id" << oid << "foo" << 12345 << "bar" << 678 ) ) ) ) );
- delete result.getValue();
- }
-
- // Incomplete DBRef - $ref missing.
- TEST( MatchExpressionParserArrayTest, ElemMatchDBRef7 ) {
- OID oid = OID::gen();
- BSONObj match = BSON( "$ref" << "coll" << "$id" << oid << "foo" << 12345 );
- BSONObj matchMissingRef = BSON( "$id" << oid << "foo" << 12345 );
- OID oidx = OID::gen();
- BSONObj notMatch = BSON( "$ref" << "coll" << "$id" << oidx << "foo" << 12345 );
-
- BSONObj query = BSON( "x" << BSON( "$elemMatch" << matchMissingRef ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << match ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( notMatch ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( match ) ) ) );
-
- // Document contains fields not referred to in $elemMatch query.
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "coll" << "$id" << oid << "foo" << 12345 << "bar" << 678 ) ) ) ) );
- delete result.getValue();
- }
-
- // Incomplete DBRef - $db only.
- TEST( MatchExpressionParserArrayTest, ElemMatchDBRef8 ) {
- OID oid = OID::gen();
- BSONObj match = BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db"
- << "foo" << 12345 );
- BSONObj matchDBOnly = BSON( "$db" << "db" << "foo" << 12345 );
- BSONObj notMatch = BSON( "$ref" << "coll" << "$id" << oid << "$db" << "dbx"
- << "foo" << 12345 );
-
- BSONObj query = BSON( "x" << BSON( "$elemMatch" << matchDBOnly ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << match ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( notMatch ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( match ) ) ) );
-
- // Document contains fields not referred to in $elemMatch query.
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db"
- << "foo" << 12345 << "bar" << 678 ) ) ) ) );
- delete result.getValue();
- }
-
- TEST( MatchExpressionParserArrayTest, All1 ) {
- BSONObj query = BSON( "x" << BSON( "$all" << BSON_ARRAY( 1 << 2 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- // Verify that the $all got parsed to AND.
- ASSERT_EQUALS( MatchExpression::AND, result.getValue()->matchType() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 2 ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 << 2 ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 << 2 << 3 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 2 << 3 ) ) ) );
- delete result.getValue();
- }
-
- TEST( MatchExpressionParserArrayTest, AllNull ) {
- BSONObj query = BSON( "x" << BSON( "$all" << BSON_ARRAY( BSONNULL ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- // Verify that the $all got parsed to AND.
- ASSERT_EQUALS( MatchExpression::AND, result.getValue()->matchType() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSONNULL ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONNULL ) ) ) );
- delete result.getValue();
- }
-
- TEST( MatchExpressionParserArrayTest, AllBadArg ) {
- BSONObj query = BSON( "x" << BSON( "$all" << 1 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
- TEST( MatchExpressionParserArrayTest, AllBadRegexArg ) {
- string tooLargePattern( 50 * 1000, 'z' );
- BSONObjBuilder allArray;
- allArray.appendRegex( "0", tooLargePattern, "" );
- BSONObjBuilder operand;
- operand.appendArray( "$all", allArray.obj() );
-
- BSONObj query = BSON( "x" << operand.obj() );
-
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
-
- TEST( MatchExpressionParserArrayTest, AllRegex1 ) {
- BSONObjBuilder allArray;
- allArray.appendRegex( "0", "^a", "" );
- allArray.appendRegex( "1", "B", "i" );
- BSONObjBuilder all;
- all.appendArray( "$all", allArray.obj() );
- BSONObj query = BSON( "a" << all.obj() );
-
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- // Verify that the $all got parsed to AND.
- ASSERT_EQUALS( MatchExpression::AND, result.getValue()->matchType() );
-
- BSONObj notMatchFirst = BSON( "a" << "ax" );
- BSONObj notMatchSecond = BSON( "a" << "qqb" );
- BSONObj matchesBoth = BSON( "a" << "ab" );
-
- ASSERT( !result.getValue()->matchesSingleElement( notMatchFirst[ "a" ] ) );
- ASSERT( !result.getValue()->matchesSingleElement( notMatchSecond[ "a" ] ) );
- ASSERT( result.getValue()->matchesSingleElement( matchesBoth[ "a" ] ) );
- delete result.getValue();
- }
-
- TEST( MatchExpressionParserArrayTest, AllRegex2 ) {
- BSONObjBuilder allArray;
- allArray.appendRegex( "0", "^a", "" );
- allArray.append( "1", "abc" );
- BSONObjBuilder all;
- all.appendArray( "$all", allArray.obj() );
- BSONObj query = BSON( "a" << all.obj() );
-
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- // Verify that the $all got parsed to AND.
- ASSERT_EQUALS( MatchExpression::AND, result.getValue()->matchType() );
-
- BSONObj notMatchFirst = BSON( "a" << "ax" );
- BSONObj matchesBoth = BSON( "a" << "abc" );
-
- ASSERT( !result.getValue()->matchesSingleElement( notMatchFirst[ "a" ] ) );
- ASSERT( result.getValue()->matchesSingleElement( matchesBoth[ "a" ] ) );
- delete result.getValue();
- }
-
- TEST( MatchExpressionParserArrayTest, AllNonArray ) {
- BSONObj query = BSON( "x" << BSON( "$all" << BSON_ARRAY( 5 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- // Verify that the $all got parsed to AND.
- ASSERT_EQUALS( MatchExpression::AND, result.getValue()->matchType() );
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 5 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 5 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 4 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 4 ) ) ) );
- delete result.getValue();
- }
-
-
- TEST( MatchExpressionParserArrayTest, AllElemMatch1 ) {
- BSONObj internal = BSON( "x" << 1 << "y" << 2 );
- BSONObj query = BSON( "x" << BSON( "$all" << BSON_ARRAY( BSON( "$elemMatch" << internal ) ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- // Verify that the $all got parsed to an AND with a single ELEM_MATCH_OBJECT child.
- ASSERT_EQUALS( MatchExpression::AND, result.getValue()->matchType() );
- ASSERT_EQUALS( 1U, result.getValue()->numChildren() );
- MatchExpression* child = result.getValue()->getChild( 0 );
- ASSERT_EQUALS( MatchExpression::ELEM_MATCH_OBJECT, child->matchType() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( 1 << 2 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSON( "x" << 1 ) ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" <<
- BSON_ARRAY( BSON( "x" << 1 << "y" << 2 ) ) ) ) );
- delete result.getValue();
-
- }
-
- // $all and $elemMatch on dotted field.
- // Top level field can be either document or array.
- TEST( MatchExpressionParserArrayTest, AllElemMatch2 ) {
- BSONObj internal = BSON( "z" << 1 );
- BSONObj query = BSON( "x.y" << BSON( "$all" <<
- BSON_ARRAY( BSON( "$elemMatch" << internal ) ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- // Verify that the $all got parsed to an AND with a single ELEM_MATCH_OBJECT child.
- ASSERT_EQUALS( MatchExpression::AND, result.getValue()->matchType() );
- ASSERT_EQUALS( 1U, result.getValue()->numChildren() );
- MatchExpression* child = result.getValue()->getChild( 0 );
- ASSERT_EQUALS( MatchExpression::ELEM_MATCH_OBJECT, child->matchType() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON( "y" << 1 ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON( "y" <<
- BSON_ARRAY( 1 << 2 ) ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" <<
- BSON( "y" <<
- BSON_ARRAY( BSON( "x" << 1 ) ) ) ) ) );
- // x is a document. Internal document does not contain z.
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" <<
- BSON( "y" <<
- BSON_ARRAY(
- BSON( "x" << 1 << "y" << 1 ) ) ) ) ) );
- // x is an array. Internal document does not contain z.
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" <<
- BSON_ARRAY(
- BSON( "y" <<
- BSON_ARRAY(
- BSON( "x" << 1 << "y" << 1 ) ) ) ) ) ) );
- // x is a document but y is not an array.
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" <<
- BSON( "y" <<
- BSON( "x" << 1 << "z" << 1 ) ) ) ) );
- // x is an array but y is not an array.
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" <<
- BSON_ARRAY(
- BSON( "y" <<
- BSON( "x" << 1 << "z" << 1 ) ) ) ) ) );
- // x is a document.
- ASSERT( result.getValue()->matchesBSON( BSON( "x" <<
- BSON( "y" <<
- BSON_ARRAY(
- BSON( "x" << 1 << "z" << 1 ) ) ) ) ) );
- // x is an array.
- ASSERT( result.getValue()->matchesBSON( BSON( "x" <<
- BSON_ARRAY(
- BSON( "y" <<
- BSON_ARRAY(
- BSON( "x" << 1 << "z" << 1 ) ) ) ) ) ) );
- delete result.getValue();
- }
-
- // Check the structure of the resulting MatchExpression, and make sure that the paths
- // are correct.
- TEST( MatchExpressionParserArrayTest, AllElemMatch3 ) {
- BSONObj query = fromjson( "{x: {$all: [{$elemMatch: {y: 1, z: 1}}]}}" );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- std::unique_ptr<MatchExpression> expr( result.getValue() );
-
- // Root node should be an AND with one child.
- ASSERT_EQUALS( MatchExpression::AND, expr->matchType() );
- ASSERT_EQUALS( 1U, expr->numChildren() );
-
- // Child should be an ELEM_MATCH_OBJECT with one child and path "x".
- MatchExpression* emObject = expr->getChild( 0 );
- ASSERT_EQUALS( MatchExpression::ELEM_MATCH_OBJECT, emObject->matchType() );
- ASSERT_EQUALS( 1U, emObject->numChildren() );
- ASSERT_EQUALS( "x", emObject->path().toString() );
-
- // Child should be another AND with two children.
- MatchExpression* and2 = emObject->getChild( 0 );
- ASSERT_EQUALS( MatchExpression::AND, and2->matchType() );
- ASSERT_EQUALS( 2U, and2->numChildren() );
-
- // Both children should be equalites, with paths "y" and "z".
- MatchExpression* leaf1 = and2->getChild( 0 );
- ASSERT_EQUALS( MatchExpression::EQ, leaf1->matchType() );
- ASSERT_EQUALS( 0U, leaf1->numChildren() );
- ASSERT_EQUALS( "y", leaf1->path().toString() );
- MatchExpression* leaf2 = and2->getChild( 1 );
- ASSERT_EQUALS( MatchExpression::EQ, leaf2->matchType() );
- ASSERT_EQUALS( 0U, leaf2->numChildren() );
- ASSERT_EQUALS( "z", leaf2->path().toString() );
- }
-
- TEST( MatchExpressionParserArrayTest, AllElemMatchBad ) {
- BSONObj internal = BSON( "x" << 1 << "y" << 2 );
-
- BSONObj query = BSON( "x" << BSON( "$all" << BSON_ARRAY( BSON( "$elemMatch" << internal ) << 5 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
-
- query = BSON( "x" << BSON( "$all" << BSON_ARRAY( 5 << BSON( "$elemMatch" << internal ) ) ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
- // You can't mix $elemMatch and regular equality inside $all.
- TEST( MatchExpressionParserArrayTest, AllElemMatchBadMixed ) {
- // $elemMatch first, equality second.
- BSONObj bad1 = fromjson( "{x: {$all: [{$elemMatch: {y: 1}}, 3]}}" );
- StatusWithMatchExpression result1 = MatchExpressionParser::parse( bad1 );
- ASSERT_FALSE( result1.isOK() );
-
- // equality first, $elemMatch second
- BSONObj bad2 = fromjson( "{x: {$all: [3, {$elemMatch: {y: 1}}]}}" );
- StatusWithMatchExpression result2 = MatchExpressionParser::parse( bad2 );
- ASSERT_FALSE( result1.isOK() );
-
- // $elemMatch first, object second
- BSONObj bad3 = fromjson( "{x: {$all: [{$elemMatch: {y: 1}}, {z: 1}]}}" );
- StatusWithMatchExpression result3 = MatchExpressionParser::parse( bad3 );
- ASSERT_FALSE( result3.isOK() );
-
- // object first, $elemMatch second
- BSONObj bad4 = fromjson( "{x: {$all: [{z: 1}, {$elemMatch: {y: 1}}]}}" );
- StatusWithMatchExpression result4 = MatchExpressionParser::parse( bad4 );
- ASSERT_FALSE( result4.isOK() );
- }
-
- // $all with empty string.
- TEST( MatchExpressionParserArrayTest, AllEmptyString ) {
- BSONObj query = BSON( "x" << BSON( "$all" << BSON_ARRAY( "" ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << "a" ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONNULL << "a" ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONObj() << "a" ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSONArray() ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << "" ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONNULL << "" ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONObj() << "" ) ) ) );
- delete result.getValue();
- }
-
- // $all with ISO date.
- TEST( MatchExpressionParserArrayTest, AllISODate ) {
- StatusWith<Date_t> matchResult = dateFromISOString("2014-12-31T00:00:00.000Z");
- ASSERT_TRUE( matchResult.isOK() );
- const Date_t& match = matchResult.getValue();
- StatusWith<Date_t> notMatchResult = dateFromISOString("2014-12-30T00:00:00.000Z");
- ASSERT_TRUE( notMatchResult.isOK() );
- const Date_t& notMatch = notMatchResult.getValue();
-
- BSONObj query = BSON( "x" << BSON( "$all" << BSON_ARRAY( match ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << notMatch ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONNULL <<
- notMatch ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONObj() <<
- notMatch ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSONArray() ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << match ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONNULL <<
- match ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONObj() <<
- match ) ) ) );
- delete result.getValue();
- }
-
- // $all on array element with empty string.
- TEST( MatchExpressionParserArrayTest, AllDottedEmptyString ) {
- BSONObj query = BSON( "x.1" << BSON( "$all" << BSON_ARRAY( "" ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << "a" ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONNULL << "a" ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONObj() << "a" ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( "" << BSONNULL ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( "" << BSONObj() ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSONArray() ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << "" ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONNULL << "" ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONObj() << "" ) ) ) );
- delete result.getValue();
- }
-
- // $all on array element with ISO date.
- TEST( MatchExpressionParserArrayTest, AllDottedISODate ) {
- StatusWith<Date_t> matchResult = dateFromISOString("2014-12-31T00:00:00.000Z");
- ASSERT_TRUE( matchResult.isOK() );
- const Date_t& match = matchResult.getValue();
- StatusWith<Date_t> notMatchResult = dateFromISOString("2014-12-30T00:00:00.000Z");
- ASSERT_TRUE( notMatchResult.isOK() );
- const Date_t& notMatch = notMatchResult.getValue();
-
- BSONObj query = BSON( "x.1" << BSON( "$all" << BSON_ARRAY( match ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << notMatch ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONNULL <<
- notMatch ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONObj() <<
- notMatch ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( match <<
- BSONNULL ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( match <<
- BSONObj() ) ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << BSONArray() ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << match ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONNULL <<
- match ) ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << BSON_ARRAY( BSONObj() <<
- match ) ) ) );
- delete result.getValue();
- }
+using std::string;
+
+TEST(MatchExpressionParserArrayTest, Size1) {
+ BSONObj query = BSON("x" << BSON("$size" << 2));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1 << 2))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1 << 2 << 3))));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, SizeAsString) {
+ BSONObj query = BSON("x" << BSON("$size"
+ << "a"));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1 << 2))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSONArray())));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1))));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, SizeWithDouble) {
+ BSONObj query = BSON("x" << BSON("$size" << 2.5));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1 << 2))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSONArray())));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1 << 2 << 3))));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, SizeBad) {
+ BSONObj query = BSON("x" << BSON("$size" << BSONNULL));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+// ---------
+
+TEST(MatchExpressionParserArrayTest, ElemMatchArr1) {
+ BSONObj query = BSON("x" << BSON("$elemMatch" << BSON("x" << 1 << "y" << 2)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1 << 2))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("x" << 1)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("x" << 1 << "y" << 2)))));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, ElemMatchAnd) {
+ BSONObj query =
+ BSON("x" << BSON("$elemMatch" << BSON("$and" << BSON_ARRAY(BSON("x" << 1 << "y" << 2)))));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1 << 2))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("x" << 1)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("x" << 1 << "y" << 2)))));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, ElemMatchNor) {
+ BSONObj query = BSON("x" << BSON("$elemMatch" << BSON("$nor" << BSON_ARRAY(BSON("x" << 1)))));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1 << 2))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("x" << 1)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("x" << 2 << "y" << 2)))));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, ElemMatchOr) {
+ BSONObj query =
+ BSON("x" << BSON("$elemMatch" << BSON("$or" << BSON_ARRAY(BSON("x" << 1 << "y" << 2)))));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1 << 2))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("x" << 1)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("x" << 1 << "y" << 2)))));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, ElemMatchVal1) {
+ BSONObj query = BSON("x" << BSON("$elemMatch" << BSON("$gt" << 5)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(4))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(6))));
+ delete result.getValue();
+}
+
+// with explicit $eq
+TEST(MatchExpressionParserArrayTest, ElemMatchDBRef1) {
+ OID oid = OID::gen();
+ BSONObj match = BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db");
+ OID oidx = OID::gen();
+ BSONObj notMatch = BSON("$ref"
+ << "coll"
+ << "$id" << oidx << "$db"
+ << "db");
+
+ BSONObj query = BSON("x" << BSON("$elemMatch" << BSON("$eq" << match)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << match)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(notMatch))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, ElemMatchDBRef2) {
+ OID oid = OID::gen();
+ BSONObj match = BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db");
+ OID oidx = OID::gen();
+ BSONObj notMatch = BSON("$ref"
+ << "coll"
+ << "$id" << oidx << "$db"
+ << "db");
+
+ BSONObj query = BSON("x" << BSON("$elemMatch" << match));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << match)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(notMatch))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
+ delete result.getValue();
+}
+
+// Additional fields after $ref and $id.
+TEST(MatchExpressionParserArrayTest, ElemMatchDBRef3) {
+ OID oid = OID::gen();
+ BSONObj match = BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345);
+ OID oidx = OID::gen();
+ BSONObj notMatch = BSON("$ref"
+ << "coll"
+ << "$id" << oidx << "foo" << 12345);
+
+ BSONObj query = BSON("x" << BSON("$elemMatch" << match));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << match)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(notMatch))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
+
+ // Document contains fields not referred to in $elemMatch query.
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
+ delete result.getValue();
+}
+
+// Query with DBRef fields out of order.
+TEST(MatchExpressionParserArrayTest, ElemMatchDBRef4) {
+ OID oid = OID::gen();
+ BSONObj match = BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db");
+ BSONObj matchOutOfOrder = BSON("$db"
+ << "db"
+ << "$id" << oid << "$ref"
+ << "coll");
+ OID oidx = OID::gen();
+ BSONObj notMatch = BSON("$ref"
+ << "coll"
+ << "$id" << oidx << "$db"
+ << "db");
+
+ BSONObj query = BSON("x" << BSON("$elemMatch" << matchOutOfOrder));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << match)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(notMatch))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
+ delete result.getValue();
+}
+
+// Query with DBRef fields out of order.
+// Additional fields besides $ref and $id.
+TEST(MatchExpressionParserArrayTest, ElemMatchDBRef5) {
+ OID oid = OID::gen();
+ BSONObj match = BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345);
+ BSONObj matchOutOfOrder = BSON("foo" << 12345 << "$id" << oid << "$ref"
+ << "coll");
+ OID oidx = OID::gen();
+ BSONObj notMatch = BSON("$ref"
+ << "coll"
+ << "$id" << oidx << "foo" << 12345);
+
+ BSONObj query = BSON("x" << BSON("$elemMatch" << matchOutOfOrder));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << match)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(notMatch))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
+
+ // Document contains fields not referred to in $elemMatch query.
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
+ delete result.getValue();
+}
+
+// Incomplete DBRef - $id missing.
+TEST(MatchExpressionParserArrayTest, ElemMatchDBRef6) {
+ OID oid = OID::gen();
+ BSONObj match = BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345);
+ BSONObj matchMissingID = BSON("$ref"
+ << "coll"
+ << "foo" << 12345);
+ BSONObj notMatch = BSON("$ref"
+ << "collx"
+ << "$id" << oid << "foo" << 12345);
+
+ BSONObj query = BSON("x" << BSON("$elemMatch" << matchMissingID));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << match)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(notMatch))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
+
+ // Document contains fields not referred to in $elemMatch query.
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
+ delete result.getValue();
+}
+
+// Incomplete DBRef - $ref missing.
+TEST(MatchExpressionParserArrayTest, ElemMatchDBRef7) {
+ OID oid = OID::gen();
+ BSONObj match = BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345);
+ BSONObj matchMissingRef = BSON("$id" << oid << "foo" << 12345);
+ OID oidx = OID::gen();
+ BSONObj notMatch = BSON("$ref"
+ << "coll"
+ << "$id" << oidx << "foo" << 12345);
+
+ BSONObj query = BSON("x" << BSON("$elemMatch" << matchMissingRef));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << match)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(notMatch))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
+
+ // Document contains fields not referred to in $elemMatch query.
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
+ delete result.getValue();
+}
+
+// Incomplete DBRef - $db only.
+TEST(MatchExpressionParserArrayTest, ElemMatchDBRef8) {
+ OID oid = OID::gen();
+ BSONObj match = BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db"
+ << "foo" << 12345);
+ BSONObj matchDBOnly = BSON("$db"
+ << "db"
+ << "foo" << 12345);
+ BSONObj notMatch = BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "dbx"
+ << "foo" << 12345);
+
+ BSONObj query = BSON("x" << BSON("$elemMatch" << matchDBOnly));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << match)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(notMatch))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
+
+ // Document contains fields not referred to in $elemMatch query.
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db"
+ << "foo" << 12345 << "bar" << 678)))));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, All1) {
+ BSONObj query = BSON("x" << BSON("$all" << BSON_ARRAY(1 << 2)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ // Verify that the $all got parsed to AND.
+ ASSERT_EQUALS(MatchExpression::AND, result.getValue()->matchType());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(2))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1 << 2))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1 << 2 << 3))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(2 << 3))));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, AllNull) {
+ BSONObj query = BSON("x" << BSON("$all" << BSON_ARRAY(BSONNULL)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ // Verify that the $all got parsed to AND.
+ ASSERT_EQUALS(MatchExpression::AND, result.getValue()->matchType());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSONNULL)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONNULL))));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, AllBadArg) {
+ BSONObj query = BSON("x" << BSON("$all" << 1));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+TEST(MatchExpressionParserArrayTest, AllBadRegexArg) {
+ string tooLargePattern(50 * 1000, 'z');
+ BSONObjBuilder allArray;
+ allArray.appendRegex("0", tooLargePattern, "");
+ BSONObjBuilder operand;
+ operand.appendArray("$all", allArray.obj());
+
+ BSONObj query = BSON("x" << operand.obj());
+
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+
+TEST(MatchExpressionParserArrayTest, AllRegex1) {
+ BSONObjBuilder allArray;
+ allArray.appendRegex("0", "^a", "");
+ allArray.appendRegex("1", "B", "i");
+ BSONObjBuilder all;
+ all.appendArray("$all", allArray.obj());
+ BSONObj query = BSON("a" << all.obj());
+
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ // Verify that the $all got parsed to AND.
+ ASSERT_EQUALS(MatchExpression::AND, result.getValue()->matchType());
+
+ BSONObj notMatchFirst = BSON("a"
+ << "ax");
+ BSONObj notMatchSecond = BSON("a"
+ << "qqb");
+ BSONObj matchesBoth = BSON("a"
+ << "ab");
+
+ ASSERT(!result.getValue()->matchesSingleElement(notMatchFirst["a"]));
+ ASSERT(!result.getValue()->matchesSingleElement(notMatchSecond["a"]));
+ ASSERT(result.getValue()->matchesSingleElement(matchesBoth["a"]));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, AllRegex2) {
+ BSONObjBuilder allArray;
+ allArray.appendRegex("0", "^a", "");
+ allArray.append("1", "abc");
+ BSONObjBuilder all;
+ all.appendArray("$all", allArray.obj());
+ BSONObj query = BSON("a" << all.obj());
+
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ // Verify that the $all got parsed to AND.
+ ASSERT_EQUALS(MatchExpression::AND, result.getValue()->matchType());
+
+ BSONObj notMatchFirst = BSON("a"
+ << "ax");
+ BSONObj matchesBoth = BSON("a"
+ << "abc");
+
+ ASSERT(!result.getValue()->matchesSingleElement(notMatchFirst["a"]));
+ ASSERT(result.getValue()->matchesSingleElement(matchesBoth["a"]));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, AllNonArray) {
+ BSONObj query = BSON("x" << BSON("$all" << BSON_ARRAY(5)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ // Verify that the $all got parsed to AND.
+ ASSERT_EQUALS(MatchExpression::AND, result.getValue()->matchType());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 5)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(5))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 4)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(4))));
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserArrayTest, AllElemMatch1) {
+ BSONObj internal = BSON("x" << 1 << "y" << 2);
+ BSONObj query = BSON("x" << BSON("$all" << BSON_ARRAY(BSON("$elemMatch" << internal))));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ // Verify that the $all got parsed to an AND with a single ELEM_MATCH_OBJECT child.
+ ASSERT_EQUALS(MatchExpression::AND, result.getValue()->matchType());
+ ASSERT_EQUALS(1U, result.getValue()->numChildren());
+ MatchExpression* child = result.getValue()->getChild(0);
+ ASSERT_EQUALS(MatchExpression::ELEM_MATCH_OBJECT, child->matchType());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(1 << 2))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("x" << 1)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("x" << 1 << "y" << 2)))));
+ delete result.getValue();
+}
+
+// $all and $elemMatch on dotted field.
+// Top level field can be either document or array.
+TEST(MatchExpressionParserArrayTest, AllElemMatch2) {
+ BSONObj internal = BSON("z" << 1);
+ BSONObj query = BSON("x.y" << BSON("$all" << BSON_ARRAY(BSON("$elemMatch" << internal))));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ // Verify that the $all got parsed to an AND with a single ELEM_MATCH_OBJECT child.
+ ASSERT_EQUALS(MatchExpression::AND, result.getValue()->matchType());
+ ASSERT_EQUALS(1U, result.getValue()->numChildren());
+ MatchExpression* child = result.getValue()->getChild(0);
+ ASSERT_EQUALS(MatchExpression::ELEM_MATCH_OBJECT, child->matchType());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("y" << 1))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("y" << BSON_ARRAY(1 << 2)))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("y" << BSON_ARRAY(BSON("x" << 1))))));
+ // x is a document. Internal document does not contain z.
+ ASSERT(!result.getValue()->matchesBSON(
+ BSON("x" << BSON("y" << BSON_ARRAY(BSON("x" << 1 << "y" << 1))))));
+ // x is an array. Internal document does not contain z.
+ ASSERT(!result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("y" << BSON_ARRAY(BSON("x" << 1 << "y" << 1)))))));
+ // x is a document but y is not an array.
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("y" << BSON("x" << 1 << "z" << 1)))));
+ // x is an array but y is not an array.
+ ASSERT(!result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("y" << BSON("x" << 1 << "z" << 1))))));
+ // x is a document.
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON("y" << BSON_ARRAY(BSON("x" << 1 << "z" << 1))))));
+ // x is an array.
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("y" << BSON_ARRAY(BSON("x" << 1 << "z" << 1)))))));
+ delete result.getValue();
+}
+
+// Check the structure of the resulting MatchExpression, and make sure that the paths
+// are correct.
+TEST(MatchExpressionParserArrayTest, AllElemMatch3) {
+ BSONObj query = fromjson("{x: {$all: [{$elemMatch: {y: 1, z: 1}}]}}");
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ std::unique_ptr<MatchExpression> expr(result.getValue());
+
+ // Root node should be an AND with one child.
+ ASSERT_EQUALS(MatchExpression::AND, expr->matchType());
+ ASSERT_EQUALS(1U, expr->numChildren());
+
+ // Child should be an ELEM_MATCH_OBJECT with one child and path "x".
+ MatchExpression* emObject = expr->getChild(0);
+ ASSERT_EQUALS(MatchExpression::ELEM_MATCH_OBJECT, emObject->matchType());
+ ASSERT_EQUALS(1U, emObject->numChildren());
+ ASSERT_EQUALS("x", emObject->path().toString());
+
+ // Child should be another AND with two children.
+ MatchExpression* and2 = emObject->getChild(0);
+ ASSERT_EQUALS(MatchExpression::AND, and2->matchType());
+ ASSERT_EQUALS(2U, and2->numChildren());
+
+ // Both children should be equalites, with paths "y" and "z".
+ MatchExpression* leaf1 = and2->getChild(0);
+ ASSERT_EQUALS(MatchExpression::EQ, leaf1->matchType());
+ ASSERT_EQUALS(0U, leaf1->numChildren());
+ ASSERT_EQUALS("y", leaf1->path().toString());
+ MatchExpression* leaf2 = and2->getChild(1);
+ ASSERT_EQUALS(MatchExpression::EQ, leaf2->matchType());
+ ASSERT_EQUALS(0U, leaf2->numChildren());
+ ASSERT_EQUALS("z", leaf2->path().toString());
+}
+
+TEST(MatchExpressionParserArrayTest, AllElemMatchBad) {
+ BSONObj internal = BSON("x" << 1 << "y" << 2);
+
+ BSONObj query = BSON("x" << BSON("$all" << BSON_ARRAY(BSON("$elemMatch" << internal) << 5)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+
+ query = BSON("x" << BSON("$all" << BSON_ARRAY(5 << BSON("$elemMatch" << internal))));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+// You can't mix $elemMatch and regular equality inside $all.
+TEST(MatchExpressionParserArrayTest, AllElemMatchBadMixed) {
+ // $elemMatch first, equality second.
+ BSONObj bad1 = fromjson("{x: {$all: [{$elemMatch: {y: 1}}, 3]}}");
+ StatusWithMatchExpression result1 = MatchExpressionParser::parse(bad1);
+ ASSERT_FALSE(result1.isOK());
+
+ // equality first, $elemMatch second
+ BSONObj bad2 = fromjson("{x: {$all: [3, {$elemMatch: {y: 1}}]}}");
+ StatusWithMatchExpression result2 = MatchExpressionParser::parse(bad2);
+ ASSERT_FALSE(result1.isOK());
+
+ // $elemMatch first, object second
+ BSONObj bad3 = fromjson("{x: {$all: [{$elemMatch: {y: 1}}, {z: 1}]}}");
+ StatusWithMatchExpression result3 = MatchExpressionParser::parse(bad3);
+ ASSERT_FALSE(result3.isOK());
+
+ // object first, $elemMatch second
+ BSONObj bad4 = fromjson("{x: {$all: [{z: 1}, {$elemMatch: {y: 1}}]}}");
+ StatusWithMatchExpression result4 = MatchExpressionParser::parse(bad4);
+ ASSERT_FALSE(result4.isOK());
+}
+
+// $all with empty string.
+TEST(MatchExpressionParserArrayTest, AllEmptyString) {
+ BSONObj query = BSON("x" << BSON("$all" << BSON_ARRAY("")));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x"
+ << "a")));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONNULL << "a"))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONObj() << "a"))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSONArray())));
+ ASSERT(result.getValue()->matchesBSON(BSON("x"
+ << "")));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONNULL << ""))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONObj() << ""))));
+ delete result.getValue();
+}
+
+// $all with ISO date.
+TEST(MatchExpressionParserArrayTest, AllISODate) {
+ StatusWith<Date_t> matchResult = dateFromISOString("2014-12-31T00:00:00.000Z");
+ ASSERT_TRUE(matchResult.isOK());
+ const Date_t& match = matchResult.getValue();
+ StatusWith<Date_t> notMatchResult = dateFromISOString("2014-12-30T00:00:00.000Z");
+ ASSERT_TRUE(notMatchResult.isOK());
+ const Date_t& notMatch = notMatchResult.getValue();
+
+ BSONObj query = BSON("x" << BSON("$all" << BSON_ARRAY(match)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << notMatch)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONNULL << notMatch))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONObj() << notMatch))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSONArray())));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << match)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONNULL << match))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONObj() << match))));
+ delete result.getValue();
+}
+
+// $all on array element with empty string.
+TEST(MatchExpressionParserArrayTest, AllDottedEmptyString) {
+ BSONObj query = BSON("x.1" << BSON("$all" << BSON_ARRAY("")));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x"
+ << "a")));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONNULL << "a"))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONObj() << "a"))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY("" << BSONNULL))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY("" << BSONObj()))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSONArray())));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x"
+ << "")));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONNULL << ""))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONObj() << ""))));
+ delete result.getValue();
+}
+
+// $all on array element with ISO date.
+TEST(MatchExpressionParserArrayTest, AllDottedISODate) {
+ StatusWith<Date_t> matchResult = dateFromISOString("2014-12-31T00:00:00.000Z");
+ ASSERT_TRUE(matchResult.isOK());
+ const Date_t& match = matchResult.getValue();
+ StatusWith<Date_t> notMatchResult = dateFromISOString("2014-12-30T00:00:00.000Z");
+ ASSERT_TRUE(notMatchResult.isOK());
+ const Date_t& notMatch = notMatchResult.getValue();
+
+ BSONObj query = BSON("x.1" << BSON("$all" << BSON_ARRAY(match)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << notMatch)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONNULL << notMatch))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONObj() << notMatch))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match << BSONNULL))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match << BSONObj()))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSONArray())));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << match)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONNULL << match))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSONObj() << match))));
+ delete result.getValue();
+}
}
diff --git a/src/mongo/db/matcher/expression_parser_geo.cpp b/src/mongo/db/matcher/expression_parser_geo.cpp
index 837d61b448c..f740a93abac 100644
--- a/src/mongo/db/matcher/expression_parser_geo.cpp
+++ b/src/mongo/db/matcher/expression_parser_geo.cpp
@@ -37,52 +37,51 @@
namespace mongo {
- using std::unique_ptr;
+using std::unique_ptr;
- StatusWithMatchExpression expressionParserGeoCallbackReal( const char* name,
- int type,
- const BSONObj& section ) {
- if (BSONObj::opWITHIN == type || BSONObj::opGEO_INTERSECTS == type) {
- unique_ptr<GeoExpression> gq(new GeoExpression(name));
- Status parseStatus = gq->parseFrom(section);
+StatusWithMatchExpression expressionParserGeoCallbackReal(const char* name,
+ int type,
+ const BSONObj& section) {
+ if (BSONObj::opWITHIN == type || BSONObj::opGEO_INTERSECTS == type) {
+ unique_ptr<GeoExpression> gq(new GeoExpression(name));
+ Status parseStatus = gq->parseFrom(section);
- if (!parseStatus.isOK()) return StatusWithMatchExpression(parseStatus);
+ if (!parseStatus.isOK())
+ return StatusWithMatchExpression(parseStatus);
- unique_ptr<GeoMatchExpression> e( new GeoMatchExpression() );
+ unique_ptr<GeoMatchExpression> e(new GeoMatchExpression());
- // Until the index layer accepts non-BSON predicates, or special indices are moved into
- // stages, we have to clean up the raw object so it can be passed down to the index
- // layer.
- BSONObjBuilder bob;
- bob.append(name, section);
- Status s = e->init( name, gq.release(), bob.obj() );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- return StatusWithMatchExpression( e.release() );
+ // Until the index layer accepts non-BSON predicates, or special indices are moved into
+ // stages, we have to clean up the raw object so it can be passed down to the index
+ // layer.
+ BSONObjBuilder bob;
+ bob.append(name, section);
+ Status s = e->init(name, gq.release(), bob.obj());
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(e.release());
+ } else {
+ verify(BSONObj::opNEAR == type);
+ unique_ptr<GeoNearExpression> nq(new GeoNearExpression(name));
+ Status s = nq->parseFrom(section);
+ if (!s.isOK()) {
+ return StatusWithMatchExpression(s);
}
- else {
- verify(BSONObj::opNEAR == type);
- unique_ptr<GeoNearExpression> nq(new GeoNearExpression(name));
- Status s = nq->parseFrom( section );
- if ( !s.isOK() ) {
- return StatusWithMatchExpression( s );
- }
- unique_ptr<GeoNearMatchExpression> e( new GeoNearMatchExpression() );
- // Until the index layer accepts non-BSON predicates, or special indices are moved into
- // stages, we have to clean up the raw object so it can be passed down to the index
- // layer.
- BSONObjBuilder bob;
- bob.append(name, section);
- s = e->init( name, nq.release(), bob.obj() );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- return StatusWithMatchExpression( e.release() );
- }
- }
-
- MONGO_INITIALIZER( MatchExpressionParserGeo )( ::mongo::InitializerContext* context ) {
- expressionParserGeoCallback = expressionParserGeoCallbackReal;
- return Status::OK();
+ unique_ptr<GeoNearMatchExpression> e(new GeoNearMatchExpression());
+ // Until the index layer accepts non-BSON predicates, or special indices are moved into
+ // stages, we have to clean up the raw object so it can be passed down to the index
+ // layer.
+ BSONObjBuilder bob;
+ bob.append(name, section);
+ s = e->init(name, nq.release(), bob.obj());
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(e.release());
}
+}
+MONGO_INITIALIZER(MatchExpressionParserGeo)(::mongo::InitializerContext* context) {
+ expressionParserGeoCallback = expressionParserGeoCallbackReal;
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/matcher/expression_parser_geo_test.cpp b/src/mongo/db/matcher/expression_parser_geo_test.cpp
index 503958ddbab..f86366754a9 100644
--- a/src/mongo/db/matcher/expression_parser_geo_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_geo_test.cpp
@@ -39,42 +39,43 @@
namespace mongo {
- TEST( MatchExpressionParserGeo, WithinBox ) {
- BSONObj query = fromjson("{a:{$within:{$box:[{x: 4, y:4},[6,6]]}}}");
+TEST(MatchExpressionParserGeo, WithinBox) {
+ BSONObj query = fromjson("{a:{$within:{$box:[{x: 4, y:4},[6,6]]}}}");
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
- ASSERT(!result.getValue()->matchesBSON(fromjson("{a: [3,4]}")));
- ASSERT(result.getValue()->matchesBSON(fromjson("{a: [4,4]}")));
- ASSERT(result.getValue()->matchesBSON(fromjson("{a: [5,5]}")));
- ASSERT(result.getValue()->matchesBSON(fromjson("{a: [5,5.1]}")));
- ASSERT(result.getValue()->matchesBSON(fromjson("{a: {x: 5, y:5.1}}")));
-
- }
+ ASSERT(!result.getValue()->matchesBSON(fromjson("{a: [3,4]}")));
+ ASSERT(result.getValue()->matchesBSON(fromjson("{a: [4,4]}")));
+ ASSERT(result.getValue()->matchesBSON(fromjson("{a: [5,5]}")));
+ ASSERT(result.getValue()->matchesBSON(fromjson("{a: [5,5.1]}")));
+ ASSERT(result.getValue()->matchesBSON(fromjson("{a: {x: 5, y:5.1}}")));
+}
- TEST( MatchExpressionParserGeoNear, ParseNear ) {
- BSONObj query = fromjson("{loc:{$near:{$maxDistance:100, "
- "$geometry:{type:\"Point\", coordinates:[0,0]}}}}");
+TEST(MatchExpressionParserGeoNear, ParseNear) {
+ BSONObj query = fromjson(
+ "{loc:{$near:{$maxDistance:100, "
+ "$geometry:{type:\"Point\", coordinates:[0,0]}}}}");
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
- MatchExpression* exp = result.getValue();
- ASSERT_EQUALS(MatchExpression::GEO_NEAR, exp->matchType());
+ MatchExpression* exp = result.getValue();
+ ASSERT_EQUALS(MatchExpression::GEO_NEAR, exp->matchType());
- GeoNearMatchExpression* gnexp = static_cast<GeoNearMatchExpression*>(exp);
- ASSERT_EQUALS(gnexp->getData().maxDistance, 100);
- }
+ GeoNearMatchExpression* gnexp = static_cast<GeoNearMatchExpression*>(exp);
+ ASSERT_EQUALS(gnexp->getData().maxDistance, 100);
+}
- // $near must be the only field in the expression object.
- TEST( MatchExpressionParserGeoNear, ParseNearExtraField ) {
- BSONObj query = fromjson("{loc:{$near:{$maxDistance:100, "
- "$geometry:{type:\"Point\", coordinates:[0,0]}}, foo: 1}}");
+// $near must be the only field in the expression object.
+TEST(MatchExpressionParserGeoNear, ParseNearExtraField) {
+ BSONObj query = fromjson(
+ "{loc:{$near:{$maxDistance:100, "
+ "$geometry:{type:\"Point\", coordinates:[0,0]}}, foo: 1}}");
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
}
diff --git a/src/mongo/db/matcher/expression_parser_leaf_test.cpp b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
index 10655322580..7660fa92b3b 100644
--- a/src/mongo/db/matcher/expression_parser_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
@@ -42,528 +42,623 @@
namespace mongo {
- using std::endl;
- using std::string;
-
- TEST( MatchExpressionParserLeafTest, SimpleEQ2 ) {
- BSONObj query = BSON( "x" << BSON( "$eq" << 2 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 2 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, SimpleEQUndefined ) {
- BSONObj query = BSON( "x" << BSON( "$eq" << BSONUndefined ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
- TEST( MatchExpressionParserLeafTest, SimpleGT1 ) {
- BSONObj query = BSON( "x" << BSON( "$gt" << 2 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 2 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, SimpleLT1 ) {
- BSONObj query = BSON( "x" << BSON( "$lt" << 2 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 2 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, SimpleGTE1 ) {
- BSONObj query = BSON( "x" << BSON( "$gte" << 2 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 2 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, SimpleLTE1 ) {
- BSONObj query = BSON( "x" << BSON( "$lte" << 2 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 2 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, SimpleNE1 ) {
- BSONObj query = BSON( "x" << BSON( "$ne" << 2 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 2 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, SimpleModBad1 ) {
- BSONObj query = BSON( "x" << BSON( "$mod" << BSON_ARRAY( 3 << 2 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy1(result.getValue());
-
- query = BSON( "x" << BSON( "$mod" << BSON_ARRAY( 3 ) ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( !result.isOK() );
-
- query = BSON( "x" << BSON( "$mod" << BSON_ARRAY( 3 << 2 << 4 ) ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( !result.isOK() );
-
- query = BSON( "x" << BSON( "$mod" << BSON_ARRAY( "q" << 2 ) ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( !result.isOK() );
-
- query = BSON( "x" << BSON( "$mod" << 3 ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( !result.isOK() );
-
- query = BSON( "x" << BSON( "$mod" << BSON( "a" << 1 << "b" << 2 ) ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( !result.isOK() );
- }
-
- TEST( MatchExpressionParserLeafTest, SimpleMod1 ) {
- BSONObj query = BSON( "x" << BSON( "$mod" << BSON_ARRAY( 3 << 2 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 5 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 4 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 8 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, SimpleModNotNumber ) {
- BSONObj query = BSON( "x" << BSON( "$mod" << BSON_ARRAY( 2 << "r" ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 2 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 4 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 5 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << "a" ) ) );
- }
-
-
- TEST( MatchExpressionParserLeafTest, SimpleIN1 ) {
- BSONObj query = BSON( "x" << BSON( "$in" << BSON_ARRAY( 2 << 3 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 2 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, INSingleDBRef ) {
- OID oid = OID::gen();
- BSONObj query =
- BSON( "x" << BSON( "$in" << BSON_ARRAY(
- BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db" ) ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- OID oidx = OID::gen();
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$ref" << "collx" << "$id" << oidx << "$db" << "db" ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$ref" << "coll" << "$id" << oidx << "$db" << "db" ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$id" << oid << "$ref" << "coll" << "$db" << "db" ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$id" << oid << "$ref" << "coll" << "$db" << "db" ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$id" << oid << "$ref" << "coll" << "$db" << "db" ) ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$ref" << "coll" << "$id" << oid << "$db" << "dbx" ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$db" << "db" << "$ref" << "coll" << "$id" << oid ) ) ) );
- ASSERT( result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db" ) ) ) );
- ASSERT( result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db" ) ) ) ) );
- ASSERT( result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "collx" << "$id" << oidx << "$db" << "db" ) <<
- BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db" ) ) ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, INMultipleDBRef ) {
- OID oid = OID::gen();
- OID oidy = OID::gen();
- BSONObj query = BSON( "x" << BSON( "$in" << BSON_ARRAY(
- BSON( "$ref" << "colly" << "$id" << oidy << "$db" << "db" ) <<
- BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db" ) ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- OID oidx = OID::gen();
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$ref" << "collx" << "$id" << oidx << "$db" << "db" ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$ref" << "coll" << "$id" << oidx << "$db" << "db" ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$id" << oid << "$ref" << "coll" << "$db" << "db" ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "coll" << "$id" << oidy << "$db" << "db" ) ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "colly" << "$id" << oid << "$db" << "db" ) ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$id" << oid << "$ref" << "coll" << "$db" << "db" ) ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "coll" << "$id" << oid << "$db" << "dbx" ) ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$id" << oidy << "$ref" << "colly" << "$db" << "db" ) ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "collx" << "$id" << oidx << "$db" << "db" ) <<
- BSON( "$ref" << "coll" << "$id" << oidx << "$db" << "db" ) ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "collx" << "$id" << oidx << "$db" << "db" ) <<
- BSON( "$ref" << "colly" << "$id" << oidx << "$db" << "db" ) ) ) ) );
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "collx" << "$id" << oidx << "$db" << "db" ) <<
- BSON( "$ref" << "coll" << "$id" << oid << "$db" << "dbx" ) ) ) ) );
- ASSERT( result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db" ) ) ) );
- ASSERT( result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$ref" << "colly" << "$id" << oidy << "$db" << "db" ) ) ) );
- ASSERT( result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db" ) ) ) ) );
- ASSERT( result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "colly" << "$id" << oidy << "$db" << "db" ) ) ) ) );
- ASSERT( result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "collx" << "$id" << oidx << "$db" << "db" ) <<
- BSON( "$ref" << "coll" << "$id" << oid << "$db" << "db" ) ) ) ) );
- ASSERT( result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "collx" << "$id" << oidx << "$db" << "db" ) <<
- BSON( "$ref" << "colly" << "$id" << oidy << "$db" << "db" ) ) ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, INDBRefWithOptionalField1 ) {
- OID oid = OID::gen();
- BSONObj query =
- BSON( "x" << BSON( "$in" << BSON_ARRAY(
- BSON( "$ref" << "coll" << "$id" << oid << "foo" << 12345 ) ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- OID oidx = OID::gen();
- ASSERT( !result.getValue()->matchesBSON(
- BSON( "x" << BSON( "$ref" << "coll" << "$id" << oidx << "$db" << "db" ) ) ) );
- ASSERT( result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "coll" << "$id" << oid << "foo" << 12345 ) ) ) ) );
- ASSERT( result.getValue()->matchesBSON(
- BSON( "x" << BSON_ARRAY(
- BSON( "$ref" << "collx" << "$id" << oidx << "foo" << 12345 ) <<
- BSON( "$ref" << "coll" << "$id" << oid << "foo" << 12345 ) ) ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, INInvalidDBRefs ) {
- // missing $id
- BSONObj query = BSON( "x" << BSON( "$in" << BSON_ARRAY(
- BSON( "$ref" << "coll" ) ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- result = MatchExpressionParser::parse( query );
-
- // second field is not $id
- query = BSON( "x" << BSON( "$in" << BSON_ARRAY(
- BSON( "$ref" << "coll" <<
- "$foo" << 1 ) ) ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
-
- OID oid = OID::gen();
-
- // missing $ref field
- query = BSON( "x" << BSON( "$in" << BSON_ARRAY(
- BSON( "$id" << oid <<
- "foo" << 3 ) ) ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
-
- // missing $id and $ref field
- query = BSON( "x" << BSON( "$in" << BSON_ARRAY(
- BSON( "$db" << "test" <<
- "foo" << 3 ) ) ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
-
- }
-
- TEST( MatchExpressionParserLeafTest, INExpressionDocument ) {
- BSONObj query = BSON( "x" << BSON( "$in" << BSON_ARRAY( BSON( "$foo" << 1 ) ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
- TEST( MatchExpressionParserLeafTest, INNotArray ) {
- BSONObj query = BSON( "x" << BSON( "$in" << 5 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
- TEST( MatchExpressionParserLeafTest, INUndefined ) {
- BSONObj query = BSON( "x" << BSON( "$in" << BSON_ARRAY( BSONUndefined ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
- TEST( MatchExpressionParserLeafTest, INNotElemMatch ) {
- BSONObj query = BSON( "x" << BSON( "$in" << BSON_ARRAY( BSON( "$elemMatch" << 1 ) ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
- TEST( MatchExpressionParserLeafTest, INRegexTooLong ) {
- string tooLargePattern( 50 * 1000, 'z' );
- BSONObjBuilder inArray;
- inArray.appendRegex( "0", tooLargePattern, "" );
- BSONObjBuilder operand;
- operand.appendArray( "$in", inArray.obj() );
- BSONObj query = BSON( "x" << operand.obj() );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
- TEST( MatchExpressionParserLeafTest, INRegexTooLong2 ) {
- string tooLargePattern( 50 * 1000, 'z' );
- BSONObj query = BSON( "x" << BSON( "$in" << BSON_ARRAY( BSON( "$regex" << tooLargePattern ) ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
- TEST( MatchExpressionParserLeafTest, INRegexStuff ) {
- BSONObjBuilder inArray;
- inArray.appendRegex( "0", "^a", "" );
- inArray.appendRegex( "1", "B", "i" );
- inArray.append( "2", 4 );
- BSONObjBuilder operand;
- operand.appendArray( "$in", inArray.obj() );
-
- BSONObj query = BSON( "a" << operand.obj() );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- BSONObj matchFirst = BSON( "a" << "ax" );
- BSONObj matchFirstRegex = BSONObjBuilder().appendRegex( "a", "^a", "" ).obj();
- BSONObj matchSecond = BSON( "a" << "qqb" );
- BSONObj matchSecondRegex = BSONObjBuilder().appendRegex( "a", "B", "i" ).obj();
- BSONObj matchThird = BSON( "a" << 4 );
- BSONObj notMatch = BSON( "a" << "l" );
- BSONObj notMatchRegex = BSONObjBuilder().appendRegex( "a", "B", "" ).obj();
-
- ASSERT( result.getValue()->matchesBSON( matchFirst ) );
- ASSERT( result.getValue()->matchesBSON( matchFirstRegex ) );
- ASSERT( result.getValue()->matchesBSON( matchSecond ) );
- ASSERT( result.getValue()->matchesBSON( matchSecondRegex ) );
- ASSERT( result.getValue()->matchesBSON( matchThird ) );
- ASSERT( !result.getValue()->matchesBSON( notMatch ) );
- ASSERT( !result.getValue()->matchesBSON( notMatchRegex ) );
- }
-
- TEST( MatchExpressionParserLeafTest, SimpleNIN1 ) {
- BSONObj query = BSON( "x" << BSON( "$nin" << BSON_ARRAY( 2 << 3 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 2 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, NINNotArray ) {
- BSONObj query = BSON( "x" << BSON( "$nin" << 5 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
-
- TEST( MatchExpressionParserLeafTest, Regex1 ) {
- BSONObjBuilder b;
- b.appendRegex( "x", "abc", "i" );
- BSONObj query = b.obj();
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << "abc" ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << "ABC" ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << "AC" ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, Regex2 ) {
- BSONObj query = BSON( "x" << BSON( "$regex" << "abc" << "$options" << "i" ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << "abc" ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << "ABC" ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << "AC" ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, Regex3 ) {
- BSONObj query = BSON( "x" << BSON( "$options" << "i" << "$regex" << "abc" ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- log() << "result: " << result.getStatus() << endl;
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << "abc" ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << "ABC" ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << "AC" ) ) );
- }
-
-
- TEST( MatchExpressionParserLeafTest, RegexBad ) {
- BSONObj query = BSON( "x" << BSON( "$regex" << "abc" << "$optionas" << "i" ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
-
- // $regex does not with numbers
- query = BSON( "x" << BSON( "$regex" << 123 ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
-
- query = BSON( "x" << BSON( "$regex" << BSON_ARRAY("abc") ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
-
- query = BSON( "x" << BSON( "$optionas" << "i" ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
-
- query = BSON( "x" << BSON( "$options" << "i" ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
- TEST( MatchExpressionParserLeafTest, ExistsYes1 ) {
- BSONObjBuilder b;
- b.appendBool( "$exists", true );
- BSONObj query = BSON( "x" << b.obj() );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << "abc" ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "y" << "AC" ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, ExistsNO1 ) {
- BSONObjBuilder b;
- b.appendBool( "$exists", false );
- BSONObj query = BSON( "x" << b.obj() );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << "abc" ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "y" << "AC" ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, Type1 ) {
- BSONObj query = BSON( "x" << BSON( "$type" << String ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << "abc" ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 5 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, Type2 ) {
- BSONObj query = BSON( "x" << BSON( "$type" << (double)NumberDouble ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 5.3 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 5 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, TypeDoubleOperator ) {
- BSONObj query = BSON( "x" << BSON( "$type" << 1.5 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 5.3 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 5 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, TypeNull ) {
- BSONObj query = BSON( "x" << BSON( "$type" << jstNULL ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( !result.getValue()->matchesBSON( BSONObj() ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 5 ) ) );
- BSONObjBuilder b;
- b.appendNull( "x" );
- ASSERT( result.getValue()->matchesBSON( b.obj() ) );
- }
-
- TEST( MatchExpressionParserLeafTest, TypeBadType ) {
- BSONObjBuilder b;
- b.append( "$type", ( JSTypeMax + 1 ) );
- BSONObj query = BSON( "x" << b.obj() );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- std::unique_ptr<MatchExpression> destroy(result.getValue());
-
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 5.3 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 5 ) ) );
- }
-
- TEST( MatchExpressionParserLeafTest, TypeBad ) {
- BSONObj query = BSON( "x" << BSON( "$type" << BSON( "x" << 1 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
+using std::endl;
+using std::string;
+
+TEST(MatchExpressionParserLeafTest, SimpleEQ2) {
+ BSONObj query = BSON("x" << BSON("$eq" << 2));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 2)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 3)));
+}
+
+TEST(MatchExpressionParserLeafTest, SimpleEQUndefined) {
+ BSONObj query = BSON("x" << BSON("$eq" << BSONUndefined));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+TEST(MatchExpressionParserLeafTest, SimpleGT1) {
+ BSONObj query = BSON("x" << BSON("$gt" << 2));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 2)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 3)));
+}
+
+TEST(MatchExpressionParserLeafTest, SimpleLT1) {
+ BSONObj query = BSON("x" << BSON("$lt" << 2));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 2)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 3)));
+}
+
+TEST(MatchExpressionParserLeafTest, SimpleGTE1) {
+ BSONObj query = BSON("x" << BSON("$gte" << 2));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 2)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 3)));
+}
+
+TEST(MatchExpressionParserLeafTest, SimpleLTE1) {
+ BSONObj query = BSON("x" << BSON("$lte" << 2));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 2)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 3)));
+}
+
+TEST(MatchExpressionParserLeafTest, SimpleNE1) {
+ BSONObj query = BSON("x" << BSON("$ne" << 2));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 2)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 3)));
+}
+
+TEST(MatchExpressionParserLeafTest, SimpleModBad1) {
+ BSONObj query = BSON("x" << BSON("$mod" << BSON_ARRAY(3 << 2)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy1(result.getValue());
+
+ query = BSON("x" << BSON("$mod" << BSON_ARRAY(3)));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(!result.isOK());
+
+ query = BSON("x" << BSON("$mod" << BSON_ARRAY(3 << 2 << 4)));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(!result.isOK());
+
+ query = BSON("x" << BSON("$mod" << BSON_ARRAY("q" << 2)));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(!result.isOK());
+
+ query = BSON("x" << BSON("$mod" << 3));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(!result.isOK());
+
+ query = BSON("x" << BSON("$mod" << BSON("a" << 1 << "b" << 2)));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(!result.isOK());
+}
+
+TEST(MatchExpressionParserLeafTest, SimpleMod1) {
+ BSONObj query = BSON("x" << BSON("$mod" << BSON_ARRAY(3 << 2)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 5)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 4)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 8)));
+}
+
+TEST(MatchExpressionParserLeafTest, SimpleModNotNumber) {
+ BSONObj query = BSON("x" << BSON("$mod" << BSON_ARRAY(2 << "r")));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 2)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 4)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 5)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x"
+ << "a")));
+}
+
+
+TEST(MatchExpressionParserLeafTest, SimpleIN1) {
+ BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(2 << 3)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 2)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 3)));
+}
+
+TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
+ OID oid = OID::gen();
+ BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db"))));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ OID oidx = OID::gen();
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
+ << "collx"
+ << "$id" << oidx << "$db"
+ << "db"))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
+ << "coll"
+ << "$id" << oidx << "$db"
+ << "db"))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
+ << "coll"
+ << "$db"
+ << "db"))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
+ << "coll"
+ << "$db"
+ << "db"))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oid << "$ref"
+ << "coll"
+ << "$db"
+ << "db")))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "dbx"))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$db"
+ << "db"
+ << "$ref"
+ << "coll"
+ << "$id" << oid))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db"))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db")))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "collx"
+ << "$id" << oidx << "$db"
+ << "db")
+ << BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db")))));
+}
+
+TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
+ OID oid = OID::gen();
+ OID oidy = OID::gen();
+ BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
+ << "colly"
+ << "$id" << oidy << "$db"
+ << "db")
+ << BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db"))));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ OID oidx = OID::gen();
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
+ << "collx"
+ << "$id" << oidx << "$db"
+ << "db"))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
+ << "coll"
+ << "$id" << oidx << "$db"
+ << "db"))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
+ << "coll"
+ << "$db"
+ << "db"))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oidy << "$db"
+ << "db")))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "colly"
+ << "$id" << oid << "$db"
+ << "db")))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oid << "$ref"
+ << "coll"
+ << "$db"
+ << "db")))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "dbx")))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oidy << "$ref"
+ << "colly"
+ << "$db"
+ << "db")))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "collx"
+ << "$id" << oidx << "$db"
+ << "db")
+ << BSON("$ref"
+ << "coll"
+ << "$id" << oidx << "$db"
+ << "db")))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "collx"
+ << "$id" << oidx << "$db"
+ << "db")
+ << BSON("$ref"
+ << "colly"
+ << "$id" << oidx << "$db"
+ << "db")))));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "collx"
+ << "$id" << oidx << "$db"
+ << "db")
+ << BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "dbx")))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db"))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
+ << "colly"
+ << "$id" << oidy << "$db"
+ << "db"))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db")))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "colly"
+ << "$id" << oidy << "$db"
+ << "db")))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "collx"
+ << "$id" << oidx << "$db"
+ << "db")
+ << BSON("$ref"
+ << "coll"
+ << "$id" << oid << "$db"
+ << "db")))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "collx"
+ << "$id" << oidx << "$db"
+ << "db")
+ << BSON("$ref"
+ << "colly"
+ << "$id" << oidy << "$db"
+ << "db")))));
+}
+
+TEST(MatchExpressionParserLeafTest, INDBRefWithOptionalField1) {
+ OID oid = OID::gen();
+ BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345))));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ OID oidx = OID::gen();
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
+ << "coll"
+ << "$id" << oidx << "$db"
+ << "db"))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345)))));
+ ASSERT(result.getValue()->matchesBSON(
+ BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "collx"
+ << "$id" << oidx << "foo" << 12345)
+ << BSON("$ref"
+ << "coll"
+ << "$id" << oid << "foo" << 12345)))));
+}
+
+TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
+ // missing $id
+ BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
+ << "coll"))));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ result = MatchExpressionParser::parse(query);
+
+ // second field is not $id
+ query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$foo" << 1))));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+
+ OID oid = OID::gen();
+
+ // missing $ref field
+ query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$id" << oid << "foo" << 3))));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+
+ // missing $id and $ref field
+ query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$db"
+ << "test"
+ << "foo" << 3))));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+TEST(MatchExpressionParserLeafTest, INExpressionDocument) {
+ BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$foo" << 1))));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+TEST(MatchExpressionParserLeafTest, INNotArray) {
+ BSONObj query = BSON("x" << BSON("$in" << 5));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+TEST(MatchExpressionParserLeafTest, INUndefined) {
+ BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSONUndefined)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+TEST(MatchExpressionParserLeafTest, INNotElemMatch) {
+ BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$elemMatch" << 1))));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+TEST(MatchExpressionParserLeafTest, INRegexTooLong) {
+ string tooLargePattern(50 * 1000, 'z');
+ BSONObjBuilder inArray;
+ inArray.appendRegex("0", tooLargePattern, "");
+ BSONObjBuilder operand;
+ operand.appendArray("$in", inArray.obj());
+ BSONObj query = BSON("x" << operand.obj());
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+TEST(MatchExpressionParserLeafTest, INRegexTooLong2) {
+ string tooLargePattern(50 * 1000, 'z');
+ BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$regex" << tooLargePattern))));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+TEST(MatchExpressionParserLeafTest, INRegexStuff) {
+ BSONObjBuilder inArray;
+ inArray.appendRegex("0", "^a", "");
+ inArray.appendRegex("1", "B", "i");
+ inArray.append("2", 4);
+ BSONObjBuilder operand;
+ operand.appendArray("$in", inArray.obj());
+
+ BSONObj query = BSON("a" << operand.obj());
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ BSONObj matchFirst = BSON("a"
+ << "ax");
+ BSONObj matchFirstRegex = BSONObjBuilder().appendRegex("a", "^a", "").obj();
+ BSONObj matchSecond = BSON("a"
+ << "qqb");
+ BSONObj matchSecondRegex = BSONObjBuilder().appendRegex("a", "B", "i").obj();
+ BSONObj matchThird = BSON("a" << 4);
+ BSONObj notMatch = BSON("a"
+ << "l");
+ BSONObj notMatchRegex = BSONObjBuilder().appendRegex("a", "B", "").obj();
+
+ ASSERT(result.getValue()->matchesBSON(matchFirst));
+ ASSERT(result.getValue()->matchesBSON(matchFirstRegex));
+ ASSERT(result.getValue()->matchesBSON(matchSecond));
+ ASSERT(result.getValue()->matchesBSON(matchSecondRegex));
+ ASSERT(result.getValue()->matchesBSON(matchThird));
+ ASSERT(!result.getValue()->matchesBSON(notMatch));
+ ASSERT(!result.getValue()->matchesBSON(notMatchRegex));
+}
+
+TEST(MatchExpressionParserLeafTest, SimpleNIN1) {
+ BSONObj query = BSON("x" << BSON("$nin" << BSON_ARRAY(2 << 3)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 2)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 3)));
+}
+
+TEST(MatchExpressionParserLeafTest, NINNotArray) {
+ BSONObj query = BSON("x" << BSON("$nin" << 5));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+
+TEST(MatchExpressionParserLeafTest, Regex1) {
+ BSONObjBuilder b;
+ b.appendRegex("x", "abc", "i");
+ BSONObj query = b.obj();
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x"
+ << "abc")));
+ ASSERT(result.getValue()->matchesBSON(BSON("x"
+ << "ABC")));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x"
+ << "AC")));
+}
+
+TEST(MatchExpressionParserLeafTest, Regex2) {
+ BSONObj query = BSON("x" << BSON("$regex"
+ << "abc"
+ << "$options"
+ << "i"));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x"
+ << "abc")));
+ ASSERT(result.getValue()->matchesBSON(BSON("x"
+ << "ABC")));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x"
+ << "AC")));
+}
+
+TEST(MatchExpressionParserLeafTest, Regex3) {
+ BSONObj query = BSON("x" << BSON("$options"
+ << "i"
+ << "$regex"
+ << "abc"));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ log() << "result: " << result.getStatus() << endl;
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x"
+ << "abc")));
+ ASSERT(result.getValue()->matchesBSON(BSON("x"
+ << "ABC")));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x"
+ << "AC")));
+}
+
+TEST(MatchExpressionParserLeafTest, RegexBad) {
+ BSONObj query = BSON("x" << BSON("$regex"
+ << "abc"
+ << "$optionas"
+ << "i"));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+
+ // $regex does not with numbers
+ query = BSON("x" << BSON("$regex" << 123));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+
+ query = BSON("x" << BSON("$regex" << BSON_ARRAY("abc")));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+
+ query = BSON("x" << BSON("$optionas"
+ << "i"));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+
+ query = BSON("x" << BSON("$options"
+ << "i"));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+TEST(MatchExpressionParserLeafTest, ExistsYes1) {
+ BSONObjBuilder b;
+ b.appendBool("$exists", true);
+ BSONObj query = BSON("x" << b.obj());
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x"
+ << "abc")));
+ ASSERT(!result.getValue()->matchesBSON(BSON("y"
+ << "AC")));
+}
+
+TEST(MatchExpressionParserLeafTest, ExistsNO1) {
+ BSONObjBuilder b;
+ b.appendBool("$exists", false);
+ BSONObj query = BSON("x" << b.obj());
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x"
+ << "abc")));
+ ASSERT(result.getValue()->matchesBSON(BSON("y"
+ << "AC")));
+}
+
+TEST(MatchExpressionParserLeafTest, Type1) {
+ BSONObj query = BSON("x" << BSON("$type" << String));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x"
+ << "abc")));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 5)));
+}
+
+TEST(MatchExpressionParserLeafTest, Type2) {
+ BSONObj query = BSON("x" << BSON("$type" << (double)NumberDouble));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 5.3)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 5)));
+}
+
+TEST(MatchExpressionParserLeafTest, TypeDoubleOperator) {
+ BSONObj query = BSON("x" << BSON("$type" << 1.5));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 5.3)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 5)));
+}
+
+TEST(MatchExpressionParserLeafTest, TypeNull) {
+ BSONObj query = BSON("x" << BSON("$type" << jstNULL));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(!result.getValue()->matchesBSON(BSONObj()));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 5)));
+ BSONObjBuilder b;
+ b.appendNull("x");
+ ASSERT(result.getValue()->matchesBSON(b.obj()));
+}
+
+TEST(MatchExpressionParserLeafTest, TypeBadType) {
+ BSONObjBuilder b;
+ b.append("$type", (JSTypeMax + 1));
+ BSONObj query = BSON("x" << b.obj());
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ std::unique_ptr<MatchExpression> destroy(result.getValue());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 5.3)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 5)));
+}
+
+TEST(MatchExpressionParserLeafTest, TypeBad) {
+ BSONObj query = BSON("x" << BSON("$type" << BSON("x" << 1)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
}
diff --git a/src/mongo/db/matcher/expression_parser_test.cpp b/src/mongo/db/matcher/expression_parser_test.cpp
index 291c3465c8f..5af0c7a2843 100644
--- a/src/mongo/db/matcher/expression_parser_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_test.cpp
@@ -39,77 +39,79 @@
namespace mongo {
- TEST( MatchExpressionParserTest, SimpleEQ1 ) {
- BSONObj query = BSON( "x" << 2 );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 2 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
-
- delete result.getValue();
- }
-
- TEST( MatchExpressionParserTest, Multiple1 ) {
- BSONObj query = BSON( "x" << 5 << "y" << BSON( "$gt" << 5 << "$lt" << 8 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 5 << "y" << 7 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 5 << "y" << 6 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 6 << "y" << 7 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 5 << "y" << 9 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 5 << "y" << 4 ) ) );
-
- delete result.getValue();
- }
-
- TEST( AtomicMatchExpressionTest, Simple1 ) {
- BSONObj query = BSON( "x" << 5 << "$atomic" << BSON( "$gt" << 5 << "$lt" << 8 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- delete result.getValue();
-
- query = BSON( "x" << 5 << "$isolated" << 1 );
- result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
- delete result.getValue();
-
- query = BSON( "x" << 5 << "y" << BSON( "$isolated" << 1 ) );
- result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
-
- StatusWith<int> fib( int n ) {
- if ( n < 0 ) return StatusWith<int>( ErrorCodes::BadValue, "paramter to fib has to be >= 0" );
- if ( n <= 1 ) return StatusWith<int>( 1 );
- StatusWith<int> a = fib( n - 1 );
- StatusWith<int> b = fib( n - 2 );
- if ( !a.isOK() ) return a;
- if ( !b.isOK() ) return b;
- return StatusWith<int>( a.getValue() + b.getValue() );
- }
-
- TEST( StatusWithTest, Fib1 ) {
- StatusWith<int> x = fib( -2 );
- ASSERT( !x.isOK() );
-
- x = fib(0);
- ASSERT( x.isOK() );
- ASSERT( 1 == x.getValue() );
-
- x = fib(1);
- ASSERT( x.isOK() );
- ASSERT( 1 == x.getValue() );
-
- x = fib(2);
- ASSERT( x.isOK() );
- ASSERT( 2 == x.getValue() );
-
- x = fib(3);
- ASSERT( x.isOK() );
- ASSERT( 3 == x.getValue() );
-
-
- }
+TEST(MatchExpressionParserTest, SimpleEQ1) {
+ BSONObj query = BSON("x" << 2);
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 2)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 3)));
+
+ delete result.getValue();
+}
+
+TEST(MatchExpressionParserTest, Multiple1) {
+ BSONObj query = BSON("x" << 5 << "y" << BSON("$gt" << 5 << "$lt" << 8));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 5 << "y" << 7)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 5 << "y" << 6)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 6 << "y" << 7)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 5 << "y" << 9)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 5 << "y" << 4)));
+
+ delete result.getValue();
+}
+
+TEST(AtomicMatchExpressionTest, Simple1) {
+ BSONObj query = BSON("x" << 5 << "$atomic" << BSON("$gt" << 5 << "$lt" << 8));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ delete result.getValue();
+
+ query = BSON("x" << 5 << "$isolated" << 1);
+ result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+ delete result.getValue();
+
+ query = BSON("x" << 5 << "y" << BSON("$isolated" << 1));
+ result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+StatusWith<int> fib(int n) {
+ if (n < 0)
+ return StatusWith<int>(ErrorCodes::BadValue, "paramter to fib has to be >= 0");
+ if (n <= 1)
+ return StatusWith<int>(1);
+ StatusWith<int> a = fib(n - 1);
+ StatusWith<int> b = fib(n - 2);
+ if (!a.isOK())
+ return a;
+ if (!b.isOK())
+ return b;
+ return StatusWith<int>(a.getValue() + b.getValue());
+}
+
+TEST(StatusWithTest, Fib1) {
+ StatusWith<int> x = fib(-2);
+ ASSERT(!x.isOK());
+
+ x = fib(0);
+ ASSERT(x.isOK());
+ ASSERT(1 == x.getValue());
+
+ x = fib(1);
+ ASSERT(x.isOK());
+ ASSERT(1 == x.getValue());
+
+ x = fib(2);
+ ASSERT(x.isOK());
+ ASSERT(2 == x.getValue());
+
+ x = fib(3);
+ ASSERT(x.isOK());
+ ASSERT(3 == x.getValue());
+}
}
diff --git a/src/mongo/db/matcher/expression_parser_text.cpp b/src/mongo/db/matcher/expression_parser_text.cpp
index f8b40a1823b..de46f9e1169 100644
--- a/src/mongo/db/matcher/expression_parser_text.cpp
+++ b/src/mongo/db/matcher/expression_parser_text.cpp
@@ -37,64 +37,62 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
+using std::unique_ptr;
+using std::string;
- StatusWithMatchExpression expressionParserTextCallbackReal( const BSONObj& queryObj ) {
- // Validate queryObj, but defer construction of FTSQuery (which requires access to the
- // target namespace) until stage building time.
+StatusWithMatchExpression expressionParserTextCallbackReal(const BSONObj& queryObj) {
+ // Validate queryObj, but defer construction of FTSQuery (which requires access to the
+ // target namespace) until stage building time.
- int expectedFieldCount = 1;
+ int expectedFieldCount = 1;
- if ( mongo::String != queryObj["$search"].type() ) {
- return StatusWithMatchExpression( ErrorCodes::TypeMismatch,
- "$search requires a string value" );
- }
+ if (mongo::String != queryObj["$search"].type()) {
+ return StatusWithMatchExpression(ErrorCodes::TypeMismatch,
+ "$search requires a string value");
+ }
- string language = "";
- BSONElement languageElt = queryObj["$language"];
- if ( !languageElt.eoo() ) {
- expectedFieldCount++;
- if ( mongo::String != languageElt.type() ) {
- return StatusWithMatchExpression( ErrorCodes::TypeMismatch,
- "$language requires a string value" );
- }
- language = languageElt.String();
- Status status =
- fts::FTSLanguage::make( language, fts::TEXT_INDEX_VERSION_2 ).getStatus();
- if ( !status.isOK() ) {
- return StatusWithMatchExpression( ErrorCodes::BadValue,
- "$language specifies unsupported language" );
- }
+ string language = "";
+ BSONElement languageElt = queryObj["$language"];
+ if (!languageElt.eoo()) {
+ expectedFieldCount++;
+ if (mongo::String != languageElt.type()) {
+ return StatusWithMatchExpression(ErrorCodes::TypeMismatch,
+ "$language requires a string value");
}
- string query = queryObj["$search"].String();
-
- BSONElement caseSensitiveElt = queryObj["$caseSensitive"];
- bool caseSensitive = fts::FTSQuery::caseSensitiveDefault;
- if ( !caseSensitiveElt.eoo() ) {
- expectedFieldCount++;
- if ( mongo::Bool != caseSensitiveElt.type() ) {
- return StatusWithMatchExpression( ErrorCodes::TypeMismatch,
- "$caseSensitive requires a boolean value" );
- }
- caseSensitive = caseSensitiveElt.trueValue();
+ language = languageElt.String();
+ Status status = fts::FTSLanguage::make(language, fts::TEXT_INDEX_VERSION_2).getStatus();
+ if (!status.isOK()) {
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "$language specifies unsupported language");
}
+ }
+ string query = queryObj["$search"].String();
- if ( queryObj.nFields() != expectedFieldCount ) {
- return StatusWithMatchExpression( ErrorCodes::BadValue, "extra fields in $text" );
+ BSONElement caseSensitiveElt = queryObj["$caseSensitive"];
+ bool caseSensitive = fts::FTSQuery::caseSensitiveDefault;
+ if (!caseSensitiveElt.eoo()) {
+ expectedFieldCount++;
+ if (mongo::Bool != caseSensitiveElt.type()) {
+ return StatusWithMatchExpression(ErrorCodes::TypeMismatch,
+ "$caseSensitive requires a boolean value");
}
+ caseSensitive = caseSensitiveElt.trueValue();
+ }
- unique_ptr<TextMatchExpression> e( new TextMatchExpression() );
- Status s = e->init( query, language, caseSensitive );
- if ( !s.isOK() ) {
- return StatusWithMatchExpression( s );
- }
- return StatusWithMatchExpression( e.release() );
+ if (queryObj.nFields() != expectedFieldCount) {
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "extra fields in $text");
}
- MONGO_INITIALIZER( MatchExpressionParserText )( ::mongo::InitializerContext* context ) {
- expressionParserTextCallback = expressionParserTextCallbackReal;
- return Status::OK();
+ unique_ptr<TextMatchExpression> e(new TextMatchExpression());
+ Status s = e->init(query, language, caseSensitive);
+ if (!s.isOK()) {
+ return StatusWithMatchExpression(s);
}
+ return StatusWithMatchExpression(e.release());
+}
+MONGO_INITIALIZER(MatchExpressionParserText)(::mongo::InitializerContext* context) {
+ expressionParserTextCallback = expressionParserTextCallbackReal;
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/matcher/expression_parser_text_test.cpp b/src/mongo/db/matcher/expression_parser_text_test.cpp
index 2933c825b08..fe5eaebc0f7 100644
--- a/src/mongo/db/matcher/expression_parser_text_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_text_test.cpp
@@ -39,55 +39,55 @@
namespace mongo {
- TEST( MatchExpressionParserText, Basic ) {
- BSONObj query = fromjson( "{$text: {$search:\"awesome\", $language:\"english\"}}" );
-
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT_EQUALS( MatchExpression::TEXT, result.getValue()->matchType() );
- std::unique_ptr<TextMatchExpression> textExp(
- static_cast<TextMatchExpression*>( result.getValue() ) );
- ASSERT_EQUALS( textExp->getQuery(), "awesome" );
- ASSERT_EQUALS( textExp->getLanguage(), "english" );
- ASSERT_EQUALS( textExp->getCaseSensitive(), fts::FTSQuery::caseSensitiveDefault );
- }
+TEST(MatchExpressionParserText, Basic) {
+ BSONObj query = fromjson("{$text: {$search:\"awesome\", $language:\"english\"}}");
+
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT_EQUALS(MatchExpression::TEXT, result.getValue()->matchType());
+ std::unique_ptr<TextMatchExpression> textExp(
+ static_cast<TextMatchExpression*>(result.getValue()));
+ ASSERT_EQUALS(textExp->getQuery(), "awesome");
+ ASSERT_EQUALS(textExp->getLanguage(), "english");
+ ASSERT_EQUALS(textExp->getCaseSensitive(), fts::FTSQuery::caseSensitiveDefault);
+}
- TEST( MatchExpressionParserText, LanguageError ) {
- BSONObj query = fromjson( "{$text: {$search:\"awesome\", $language:\"spanglish\"}}" );
+TEST(MatchExpressionParserText, LanguageError) {
+ BSONObj query = fromjson("{$text: {$search:\"awesome\", $language:\"spanglish\"}}");
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
- TEST( MatchExpressionParserText, CaseSensitiveTrue ) {
- BSONObj query = fromjson( "{$text: {$search:\"awesome\", $caseSensitive: true}}" );
+TEST(MatchExpressionParserText, CaseSensitiveTrue) {
+ BSONObj query = fromjson("{$text: {$search:\"awesome\", $caseSensitive: true}}");
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
- ASSERT_EQUALS( MatchExpression::TEXT, result.getValue()->matchType() );
- std::unique_ptr<TextMatchExpression> textExp(
- static_cast<TextMatchExpression*>( result.getValue() ) );
- ASSERT_EQUALS( textExp->getCaseSensitive(), true );
- }
+ ASSERT_EQUALS(MatchExpression::TEXT, result.getValue()->matchType());
+ std::unique_ptr<TextMatchExpression> textExp(
+ static_cast<TextMatchExpression*>(result.getValue()));
+ ASSERT_EQUALS(textExp->getCaseSensitive(), true);
+}
- TEST( MatchExpressionParserText, CaseSensitiveFalse ) {
- BSONObj query = fromjson( "{$text: {$search:\"awesome\", $caseSensitive: false}}" );
+TEST(MatchExpressionParserText, CaseSensitiveFalse) {
+ BSONObj query = fromjson("{$text: {$search:\"awesome\", $caseSensitive: false}}");
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
- ASSERT_EQUALS( MatchExpression::TEXT, result.getValue()->matchType() );
- std::unique_ptr<TextMatchExpression> textExp(
- static_cast<TextMatchExpression*>( result.getValue() ) );
- ASSERT_EQUALS( textExp->getCaseSensitive(), false );
- }
+ ASSERT_EQUALS(MatchExpression::TEXT, result.getValue()->matchType());
+ std::unique_ptr<TextMatchExpression> textExp(
+ static_cast<TextMatchExpression*>(result.getValue()));
+ ASSERT_EQUALS(textExp->getCaseSensitive(), false);
+}
- TEST( MatchExpressionParserText, CaseSensitiveError ) {
- BSONObj query = fromjson( "{$text:{$search:\"awesome\", $caseSensitive: 0}}" );
+TEST(MatchExpressionParserText, CaseSensitiveError) {
+ BSONObj query = fromjson("{$text:{$search:\"awesome\", $caseSensitive: 0}}");
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
- }
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
}
diff --git a/src/mongo/db/matcher/expression_parser_tree.cpp b/src/mongo/db/matcher/expression_parser_tree.cpp
index 7c5359e6318..9a3149a8c5e 100644
--- a/src/mongo/db/matcher/expression_parser_tree.cpp
+++ b/src/mongo/db/matcher/expression_parser_tree.cpp
@@ -39,71 +39,68 @@
namespace mongo {
- // static
- const int MatchExpressionParser::kMaximumTreeDepth = 100;
-
- Status MatchExpressionParser::_parseTreeList( const BSONObj& arr,
- ListOfMatchExpression* out,
- int level ) {
- if ( arr.isEmpty() )
- return Status( ErrorCodes::BadValue,
- "$and/$or/$nor must be a nonempty array" );
-
- BSONObjIterator i( arr );
- while ( i.more() ) {
- BSONElement e = i.next();
-
- if ( e.type() != Object )
- return Status( ErrorCodes::BadValue,
- "$or/$and/$nor entries need to be full objects" );
-
- StatusWithMatchExpression sub = _parse( e.Obj(), level );
- if ( !sub.isOK() )
- return sub.getStatus();
-
- out->add( sub.getValue() );
- }
- return Status::OK();
+// static
+const int MatchExpressionParser::kMaximumTreeDepth = 100;
+
+Status MatchExpressionParser::_parseTreeList(const BSONObj& arr,
+ ListOfMatchExpression* out,
+ int level) {
+ if (arr.isEmpty())
+ return Status(ErrorCodes::BadValue, "$and/$or/$nor must be a nonempty array");
+
+ BSONObjIterator i(arr);
+ while (i.more()) {
+ BSONElement e = i.next();
+
+ if (e.type() != Object)
+ return Status(ErrorCodes::BadValue, "$or/$and/$nor entries need to be full objects");
+
+ StatusWithMatchExpression sub = _parse(e.Obj(), level);
+ if (!sub.isOK())
+ return sub.getStatus();
+
+ out->add(sub.getValue());
}
+ return Status::OK();
+}
- StatusWithMatchExpression MatchExpressionParser::_parseNot( const char* name,
- const BSONElement& e,
- int level ) {
- if ( e.type() == RegEx ) {
- StatusWithMatchExpression s = _parseRegexElement( name, e );
- if ( !s.isOK() )
- return s;
- std::unique_ptr<NotMatchExpression> n( new NotMatchExpression() );
- Status s2 = n->init( s.getValue() );
- if ( !s2.isOK() )
- return StatusWithMatchExpression( s2 );
- return StatusWithMatchExpression( n.release() );
- }
-
- if ( e.type() != Object )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$not needs a regex or a document" );
-
- BSONObj notObject = e.Obj();
- if ( notObject.isEmpty() )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$not cannot be empty" );
-
- std::unique_ptr<AndMatchExpression> theAnd( new AndMatchExpression() );
- Status s = _parseSub( name, notObject, theAnd.get(), level );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
-
- // TODO: this seems arbitrary?
- // tested in jstests/not2.js
- for ( unsigned i = 0; i < theAnd->numChildren(); i++ )
- if ( theAnd->getChild(i)->matchType() == MatchExpression::REGEX )
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$not cannot have a regex" );
-
- std::unique_ptr<NotMatchExpression> theNot( new NotMatchExpression() );
- s = theNot->init( theAnd.release() );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
-
- return StatusWithMatchExpression( theNot.release() );
+StatusWithMatchExpression MatchExpressionParser::_parseNot(const char* name,
+ const BSONElement& e,
+ int level) {
+ if (e.type() == RegEx) {
+ StatusWithMatchExpression s = _parseRegexElement(name, e);
+ if (!s.isOK())
+ return s;
+ std::unique_ptr<NotMatchExpression> n(new NotMatchExpression());
+ Status s2 = n->init(s.getValue());
+ if (!s2.isOK())
+ return StatusWithMatchExpression(s2);
+ return StatusWithMatchExpression(n.release());
}
+ if (e.type() != Object)
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$not needs a regex or a document");
+
+ BSONObj notObject = e.Obj();
+ if (notObject.isEmpty())
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$not cannot be empty");
+
+ std::unique_ptr<AndMatchExpression> theAnd(new AndMatchExpression());
+ Status s = _parseSub(name, notObject, theAnd.get(), level);
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+
+ // TODO: this seems arbitrary?
+ // tested in jstests/not2.js
+ for (unsigned i = 0; i < theAnd->numChildren(); i++)
+ if (theAnd->getChild(i)->matchType() == MatchExpression::REGEX)
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$not cannot have a regex");
+
+ std::unique_ptr<NotMatchExpression> theNot(new NotMatchExpression());
+ s = theNot->init(theAnd.release());
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+
+ return StatusWithMatchExpression(theNot.release());
+}
}
diff --git a/src/mongo/db/matcher/expression_parser_tree_test.cpp b/src/mongo/db/matcher/expression_parser_tree_test.cpp
index a5cc3356463..a650803d283 100644
--- a/src/mongo/db/matcher/expression_parser_tree_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_tree_test.cpp
@@ -39,163 +39,161 @@
namespace mongo {
- TEST( MatchExpressionParserTreeTest, OR1 ) {
- BSONObj query = BSON( "$or" << BSON_ARRAY( BSON( "x" << 1 ) <<
- BSON( "y" << 2 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
-
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "y" << 2 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "y" << 1 ) ) );
-
- delete result.getValue();
- }
+TEST(MatchExpressionParserTreeTest, OR1) {
+ BSONObj query = BSON("$or" << BSON_ARRAY(BSON("x" << 1) << BSON("y" << 2)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
- TEST( MatchExpressionParserTreeTest, OREmbedded ) {
- BSONObj query1 = BSON( "$or" << BSON_ARRAY( BSON( "x" << 1 ) <<
- BSON( "y" << 2 ) ) );
- BSONObj query2 = BSON( "$or" << BSON_ARRAY( query1 ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query2 );
- ASSERT_TRUE( result.isOK() );
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(result.getValue()->matchesBSON(BSON("y" << 2)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 3)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("y" << 1)));
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "y" << 2 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "y" << 1 ) ) );
+ delete result.getValue();
+}
- delete result.getValue();
- }
+TEST(MatchExpressionParserTreeTest, OREmbedded) {
+ BSONObj query1 = BSON("$or" << BSON_ARRAY(BSON("x" << 1) << BSON("y" << 2)));
+ BSONObj query2 = BSON("$or" << BSON_ARRAY(query1));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query2);
+ ASSERT_TRUE(result.isOK());
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(result.getValue()->matchesBSON(BSON("y" << 2)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 3)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("y" << 1)));
- TEST( MatchExpressionParserTreeTest, AND1 ) {
- BSONObj query = BSON( "$and" << BSON_ARRAY( BSON( "x" << 1 ) <<
- BSON( "y" << 2 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
+ delete result.getValue();
+}
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "y" << 2 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "y" << 1 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 1 << "y" << 2 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 2 << "y" << 2 ) ) );
- delete result.getValue();
- }
+TEST(MatchExpressionParserTreeTest, AND1) {
+ BSONObj query = BSON("$and" << BSON_ARRAY(BSON("x" << 1) << BSON("y" << 2)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
- TEST( MatchExpressionParserTreeTest, NOREmbedded ) {
- BSONObj query = BSON( "$nor" << BSON_ARRAY( BSON( "x" << 1 ) <<
- BSON( "y" << 2 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("y" << 2)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 3)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("y" << 1)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 1 << "y" << 2)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 2 << "y" << 2)));
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 1 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "y" << 2 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 3 ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "y" << 1 ) ) );
+ delete result.getValue();
+}
- delete result.getValue();
- }
+TEST(MatchExpressionParserTreeTest, NOREmbedded) {
+ BSONObj query = BSON("$nor" << BSON_ARRAY(BSON("x" << 1) << BSON("y" << 2)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
- TEST( MatchExpressionParserTreeTest, NOT1 ) {
- BSONObj query = BSON( "x" << BSON( "$not" << BSON( "$gt" << 5 ) ) );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 1)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("y" << 2)));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 3)));
+ ASSERT(result.getValue()->matchesBSON(BSON("y" << 1)));
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << 2 ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << 8 ) ) );
+ delete result.getValue();
+}
- delete result.getValue();
- }
+TEST(MatchExpressionParserTreeTest, NOT1) {
+ BSONObj query = BSON("x" << BSON("$not" << BSON("$gt" << 5)));
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
- // Test a deep match tree that is not deep enough to hit the maximum depth limit.
- TEST( MatchExpressionParserTreeTest, MaximumTreeDepthNotExceed ) {
- static const int depth = 60;
-
- std::stringstream ss;
- for (int i = 0; i < depth/2; i++) {
- ss << "{$and: [{a: 3}, {$or: [{b: 2},";
- }
- ss << "{b: 4}";
- for (int i = 0; i < depth/2; i++) {
- ss << "]}]}";
- }
-
- BSONObj query = fromjson( ss.str() );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT( result.isOK() );
- delete result.getValue();
- }
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << 2)));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x" << 8)));
- // Test a tree that exceeds the maximum depth limit.
- TEST( MatchExpressionParserTreeTest, MaximumTreeDepthExceed ) {
- static const int depth = 105;
-
- std::stringstream ss;
- for (int i = 0; i < depth/2; i++) {
- ss << "{$and: [{a: 3}, {$or: [{b: 2},";
- }
- ss << "{b: 4}";
- for (int i = 0; i < depth/2; i++) {
- ss << "]}]}";
- }
-
- BSONObj query = fromjson( ss.str() );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
+ delete result.getValue();
+}
+
+// Test a deep match tree that is not deep enough to hit the maximum depth limit.
+TEST(MatchExpressionParserTreeTest, MaximumTreeDepthNotExceed) {
+ static const int depth = 60;
+
+ std::stringstream ss;
+ for (int i = 0; i < depth / 2; i++) {
+ ss << "{$and: [{a: 3}, {$or: [{b: 2},";
+ }
+ ss << "{b: 4}";
+ for (int i = 0; i < depth / 2; i++) {
+ ss << "]}]}";
}
- // We should also exceed the depth limit through deeply nested $not.
- TEST( MatchExpressionParserTreeTest, MaximumTreeDepthExceededNestedNots ) {
- static const int depth = 105;
-
- std::stringstream ss;
- ss << "{a: ";
- for (int i = 0; i < depth; i++) {
- ss << "{$not: ";
- }
- ss << "{$eq: 5}";
- for (int i = 0; i < depth+1; i++) {
- ss << "}";
- }
-
- BSONObj query = fromjson( ss.str() );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
+ BSONObj query = fromjson(ss.str());
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT(result.isOK());
+ delete result.getValue();
+}
+
+// Test a tree that exceeds the maximum depth limit.
+TEST(MatchExpressionParserTreeTest, MaximumTreeDepthExceed) {
+ static const int depth = 105;
+
+ std::stringstream ss;
+ for (int i = 0; i < depth / 2; i++) {
+ ss << "{$and: [{a: 3}, {$or: [{b: 2},";
+ }
+ ss << "{b: 4}";
+ for (int i = 0; i < depth / 2; i++) {
+ ss << "]}]}";
}
- // Depth limit with nested $elemMatch object.
- TEST( MatchExpressionParserTreeTest, MaximumTreeDepthExceededNestedElemMatch ) {
- static const int depth = 105;
-
- std::stringstream ss;
- for (int i = 0; i < depth; i++) {
- ss << "{a: {$elemMatch: ";
- }
- ss << "{b: 5}";
- for (int i = 0; i < depth; i++) {
- ss << "}}";
- }
-
- BSONObj query = fromjson( ss.str() );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_FALSE( result.isOK() );
+ BSONObj query = fromjson(ss.str());
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+// We should also exceed the depth limit through deeply nested $not.
+TEST(MatchExpressionParserTreeTest, MaximumTreeDepthExceededNestedNots) {
+ static const int depth = 105;
+
+ std::stringstream ss;
+ ss << "{a: ";
+ for (int i = 0; i < depth; i++) {
+ ss << "{$not: ";
+ }
+ ss << "{$eq: 5}";
+ for (int i = 0; i < depth + 1; i++) {
+ ss << "}";
}
- TEST( MatchExpressionParserLeafTest, NotRegex1 ) {
- BSONObjBuilder b;
- b.appendRegex( "$not", "abc", "i" );
- BSONObj query = BSON( "x" << b.obj() );
- StatusWithMatchExpression result = MatchExpressionParser::parse( query );
- ASSERT_TRUE( result.isOK() );
+ BSONObj query = fromjson(ss.str());
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << "abc" ) ) );
- ASSERT( !result.getValue()->matchesBSON( BSON( "x" << "ABC" ) ) );
- ASSERT( result.getValue()->matchesBSON( BSON( "x" << "AC" ) ) );
+// Depth limit with nested $elemMatch object.
+TEST(MatchExpressionParserTreeTest, MaximumTreeDepthExceededNestedElemMatch) {
+ static const int depth = 105;
- delete result.getValue();
+ std::stringstream ss;
+ for (int i = 0; i < depth; i++) {
+ ss << "{a: {$elemMatch: ";
+ }
+ ss << "{b: 5}";
+ for (int i = 0; i < depth; i++) {
+ ss << "}}";
}
+ BSONObj query = fromjson(ss.str());
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_FALSE(result.isOK());
+}
+
+TEST(MatchExpressionParserLeafTest, NotRegex1) {
+ BSONObjBuilder b;
+ b.appendRegex("$not", "abc", "i");
+ BSONObj query = BSON("x" << b.obj());
+ StatusWithMatchExpression result = MatchExpressionParser::parse(query);
+ ASSERT_TRUE(result.isOK());
+
+ ASSERT(!result.getValue()->matchesBSON(BSON("x"
+ << "abc")));
+ ASSERT(!result.getValue()->matchesBSON(BSON("x"
+ << "ABC")));
+ ASSERT(result.getValue()->matchesBSON(BSON("x"
+ << "AC")));
+
+ delete result.getValue();
+}
}
diff --git a/src/mongo/db/matcher/expression_test.cpp b/src/mongo/db/matcher/expression_test.cpp
index 62b4631a717..90d50cf5f47 100644
--- a/src/mongo/db/matcher/expression_test.cpp
+++ b/src/mongo/db/matcher/expression_test.cpp
@@ -39,69 +39,66 @@
namespace mongo {
- TEST( MatchExpressionTest, Parse1 ) {
- //TreeMatchExpression* e = NULL;
- //Status s = MatchExpression::parse( BSON( "x" << 1 ), &e );
- //ASSERT_TRUE( s.isOK() );
- }
+TEST(MatchExpressionTest, Parse1) {
+ // TreeMatchExpression* e = NULL;
+ // Status s = MatchExpression::parse( BSON( "x" << 1 ), &e );
+ // ASSERT_TRUE( s.isOK() );
+}
- TEST( LeafMatchExpressionTest, Equal1 ) {
- BSONObj temp = BSON( "x" << 5 );
- EqualityMatchExpression e;
- e.init( "x", temp["x"] );
+TEST(LeafMatchExpressionTest, Equal1) {
+ BSONObj temp = BSON("x" << 5);
+ EqualityMatchExpression e;
+ e.init("x", temp["x"]);
+
+ ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 5 }")));
+ ASSERT_TRUE(e.matchesBSON(fromjson("{ x : [5] }")));
+ ASSERT_TRUE(e.matchesBSON(fromjson("{ x : [1,5] }")));
+ ASSERT_TRUE(e.matchesBSON(fromjson("{ x : [1,5,2] }")));
+ ASSERT_TRUE(e.matchesBSON(fromjson("{ x : [5,2] }")));
+
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : null }")));
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 6 }")));
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : [4,2] }")));
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : [[5]] }")));
+}
- ASSERT_TRUE( e.matchesBSON( fromjson( "{ x : 5 }" ) ) );
- ASSERT_TRUE( e.matchesBSON( fromjson( "{ x : [5] }" ) ) );
- ASSERT_TRUE( e.matchesBSON( fromjson( "{ x : [1,5] }" ) ) );
- ASSERT_TRUE( e.matchesBSON( fromjson( "{ x : [1,5,2] }" ) ) );
- ASSERT_TRUE( e.matchesBSON( fromjson( "{ x : [5,2] }" ) ) );
+TEST(LeafMatchExpressionTest, Comp1) {
+ BSONObj temp = BSON("x" << 5);
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : null }" ) ) );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : 6 }" ) ) );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : [4,2] }" ) ) );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : [[5]] }" ) ) );
+ {
+ LTEMatchExpression e;
+ e.init("x", temp["x"]);
+ ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 5 }")));
+ ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 4 }")));
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 6 }")));
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 'eliot' }")));
}
- TEST( LeafMatchExpressionTest, Comp1 ) {
- BSONObj temp = BSON( "x" << 5 );
-
- {
- LTEMatchExpression e;
- e.init( "x", temp["x"] );
- ASSERT_TRUE( e.matchesBSON( fromjson( "{ x : 5 }" ) ) );
- ASSERT_TRUE( e.matchesBSON( fromjson( "{ x : 4 }" ) ) );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : 6 }" ) ) );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : 'eliot' }" ) ) );
- }
-
- {
- LTMatchExpression e;
- e.init( "x", temp["x"] );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : 5 }" ) ) );
- ASSERT_TRUE( e.matchesBSON( fromjson( "{ x : 4 }" ) ) );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : 6 }" ) ) );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : 'eliot' }" ) ) );
- }
-
- {
- GTEMatchExpression e;
- e.init( "x", temp["x"] );
- ASSERT_TRUE( e.matchesBSON( fromjson( "{ x : 5 }" ) ) );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : 4 }" ) ) );
- ASSERT_TRUE( e.matchesBSON( fromjson( "{ x : 6 }" ) ) );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : 'eliot' }" ) ) );
- }
-
- {
- GTMatchExpression e;
- e.init( "x", temp["x"] );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : 5 }" ) ) );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : 4 }" ) ) );
- ASSERT_TRUE( e.matchesBSON( fromjson( "{ x : 6 }" ) ) );
- ASSERT_FALSE( e.matchesBSON( fromjson( "{ x : 'eliot' }" ) ) );
- }
-
+ {
+ LTMatchExpression e;
+ e.init("x", temp["x"]);
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 5 }")));
+ ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 4 }")));
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 6 }")));
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 'eliot' }")));
+ }
+ {
+ GTEMatchExpression e;
+ e.init("x", temp["x"]);
+ ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 5 }")));
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 4 }")));
+ ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 6 }")));
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 'eliot' }")));
}
+ {
+ GTMatchExpression e;
+ e.init("x", temp["x"]);
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 5 }")));
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 4 }")));
+ ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 6 }")));
+ ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 'eliot' }")));
+ }
+}
}
diff --git a/src/mongo/db/matcher/expression_text.cpp b/src/mongo/db/matcher/expression_text.cpp
index 320136c8751..e1f1ccd065f 100644
--- a/src/mongo/db/matcher/expression_text.cpp
+++ b/src/mongo/db/matcher/expression_text.cpp
@@ -33,72 +33,67 @@
namespace mongo {
- using std::string;
+using std::string;
- Status TextMatchExpression::init( const string& query,
- const string& language,
- bool caseSensitive ) {
- _query = query;
- _language = language;
- _caseSensitive = caseSensitive;
- return initPath( "_fts" );
- }
-
- bool TextMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- // See ops/update.cpp.
- // This node is removed by the query planner. It's only ever called if we're getting an
- // elemMatchKey.
- return true;
- }
+Status TextMatchExpression::init(const string& query, const string& language, bool caseSensitive) {
+ _query = query;
+ _language = language;
+ _caseSensitive = caseSensitive;
+ return initPath("_fts");
+}
- void TextMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace(debug, level);
- debug << "TEXT : query=" << _query << ", language="
- << _language << ", caseSensitive="
- << _caseSensitive << ", tag=";
- MatchExpression::TagData* td = getTag();
- if ( NULL != td ) {
- td->debugString( &debug );
- }
- else {
- debug << "NULL";
- }
- debug << "\n";
- }
+bool TextMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ // See ops/update.cpp.
+ // This node is removed by the query planner. It's only ever called if we're getting an
+ // elemMatchKey.
+ return true;
+}
- void TextMatchExpression::toBSON(BSONObjBuilder* out) const {
- out->append("$text", BSON("$search" << _query <<
- "$language" << _language <<
- "$caseSensitive" << _caseSensitive));
+void TextMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << "TEXT : query=" << _query << ", language=" << _language
+ << ", caseSensitive=" << _caseSensitive << ", tag=";
+ MatchExpression::TagData* td = getTag();
+ if (NULL != td) {
+ td->debugString(&debug);
+ } else {
+ debug << "NULL";
}
+ debug << "\n";
+}
- bool TextMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() ) {
- return false;
- }
- const TextMatchExpression* realOther = static_cast<const TextMatchExpression*>( other );
+void TextMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append("$text",
+ BSON("$search" << _query << "$language" << _language << "$caseSensitive"
+ << _caseSensitive));
+}
- // TODO This is way too crude. It looks for string equality, but it should be looking for
- // common parsed form
- if ( realOther->getQuery() != _query ) {
- return false;
- }
- if ( realOther->getLanguage() != _language ) {
- return false;
- }
- if ( realOther->getCaseSensitive() != _caseSensitive ) {
- return false;
- }
- return true;
+bool TextMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType()) {
+ return false;
}
+ const TextMatchExpression* realOther = static_cast<const TextMatchExpression*>(other);
- LeafMatchExpression* TextMatchExpression::shallowClone() const {
- TextMatchExpression* next = new TextMatchExpression();
- next->init( _query, _language, _caseSensitive );
- if ( getTag() ) {
- next->setTag( getTag()->clone() );
- }
- return next;
+ // TODO This is way too crude. It looks for string equality, but it should be looking for
+ // common parsed form
+ if (realOther->getQuery() != _query) {
+ return false;
+ }
+ if (realOther->getLanguage() != _language) {
+ return false;
+ }
+ if (realOther->getCaseSensitive() != _caseSensitive) {
+ return false;
}
+ return true;
+}
+LeafMatchExpression* TextMatchExpression::shallowClone() const {
+ TextMatchExpression* next = new TextMatchExpression();
+ next->init(_query, _language, _caseSensitive);
+ if (getTag()) {
+ next->setTag(getTag()->clone());
+ }
+ return next;
+}
}
diff --git a/src/mongo/db/matcher/expression_text.h b/src/mongo/db/matcher/expression_text.h
index 8d853de5621..3841bf4a608 100644
--- a/src/mongo/db/matcher/expression_text.h
+++ b/src/mongo/db/matcher/expression_text.h
@@ -36,30 +36,37 @@
namespace mongo {
- class TextMatchExpression : public LeafMatchExpression {
- public:
- TextMatchExpression() : LeafMatchExpression( TEXT ) {}
- virtual ~TextMatchExpression() {}
+class TextMatchExpression : public LeafMatchExpression {
+public:
+ TextMatchExpression() : LeafMatchExpression(TEXT) {}
+ virtual ~TextMatchExpression() {}
- Status init( const std::string& query, const std::string& language, bool caseSensitive );
+ Status init(const std::string& query, const std::string& language, bool caseSensitive);
- virtual bool matchesSingleElement( const BSONElement& e ) const;
+ virtual bool matchesSingleElement(const BSONElement& e) const;
- virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void debugString(StringBuilder& debug, int level = 0) const;
- virtual void toBSON(BSONObjBuilder* out) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
- virtual bool equivalent( const MatchExpression* other ) const;
+ virtual bool equivalent(const MatchExpression* other) const;
- virtual LeafMatchExpression* shallowClone() const;
+ virtual LeafMatchExpression* shallowClone() const;
- const std::string& getQuery() const { return _query; }
- const std::string& getLanguage() const { return _language; }
- bool getCaseSensitive() const { return _caseSensitive; }
- private:
- std::string _query;
- std::string _language;
- bool _caseSensitive;
- };
+ const std::string& getQuery() const {
+ return _query;
+ }
+ const std::string& getLanguage() const {
+ return _language;
+ }
+ bool getCaseSensitive() const {
+ return _caseSensitive;
+ }
-} // namespace mongo
+private:
+ std::string _query;
+ std::string _language;
+ bool _caseSensitive;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/matcher/expression_tree.cpp b/src/mongo/db/matcher/expression_tree.cpp
index 84c929f4c37..bc07445d0cc 100644
--- a/src/mongo/db/matcher/expression_tree.cpp
+++ b/src/mongo/db/matcher/expression_tree.cpp
@@ -36,164 +36,163 @@
namespace mongo {
- ListOfMatchExpression::~ListOfMatchExpression() {
- for ( unsigned i = 0; i < _expressions.size(); i++ )
- delete _expressions[i];
- _expressions.clear();
- }
+ListOfMatchExpression::~ListOfMatchExpression() {
+ for (unsigned i = 0; i < _expressions.size(); i++)
+ delete _expressions[i];
+ _expressions.clear();
+}
- void ListOfMatchExpression::add( MatchExpression* e ) {
- verify( e );
- _expressions.push_back( e );
- }
+void ListOfMatchExpression::add(MatchExpression* e) {
+ verify(e);
+ _expressions.push_back(e);
+}
- void ListOfMatchExpression::_debugList( StringBuilder& debug, int level ) const {
- for ( unsigned i = 0; i < _expressions.size(); i++ )
- _expressions[i]->debugString( debug, level + 1 );
- }
+void ListOfMatchExpression::_debugList(StringBuilder& debug, int level) const {
+ for (unsigned i = 0; i < _expressions.size(); i++)
+ _expressions[i]->debugString(debug, level + 1);
+}
- void ListOfMatchExpression::_listToBSON(BSONArrayBuilder* out) const {
- for ( unsigned i = 0; i < _expressions.size(); i++ ) {
- BSONObjBuilder childBob(out->subobjStart());
- _expressions[i]->toBSON(&childBob);
- }
- out->doneFast();
+void ListOfMatchExpression::_listToBSON(BSONArrayBuilder* out) const {
+ for (unsigned i = 0; i < _expressions.size(); i++) {
+ BSONObjBuilder childBob(out->subobjStart());
+ _expressions[i]->toBSON(&childBob);
}
+ out->doneFast();
+}
- bool ListOfMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
+bool ListOfMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
- const ListOfMatchExpression* realOther = static_cast<const ListOfMatchExpression*>( other );
+ const ListOfMatchExpression* realOther = static_cast<const ListOfMatchExpression*>(other);
- if ( _expressions.size() != realOther->_expressions.size() )
- return false;
+ if (_expressions.size() != realOther->_expressions.size())
+ return false;
- // TOOD: order doesn't matter
- for ( unsigned i = 0; i < _expressions.size(); i++ )
- if ( !_expressions[i]->equivalent( realOther->_expressions[i] ) )
- return false;
+ // TOOD: order doesn't matter
+ for (unsigned i = 0; i < _expressions.size(); i++)
+ if (!_expressions[i]->equivalent(realOther->_expressions[i]))
+ return false;
- return true;
- }
+ return true;
+}
- // -----
+// -----
- bool AndMatchExpression::matches( const MatchableDocument* doc, MatchDetails* details ) const {
- for ( size_t i = 0; i < numChildren(); i++ ) {
- if ( !getChild(i)->matches( doc, details ) ) {
- if ( details )
- details->resetOutput();
- return false;
- }
+bool AndMatchExpression::matches(const MatchableDocument* doc, MatchDetails* details) const {
+ for (size_t i = 0; i < numChildren(); i++) {
+ if (!getChild(i)->matches(doc, details)) {
+ if (details)
+ details->resetOutput();
+ return false;
}
- return true;
}
+ return true;
+}
- bool AndMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- for ( size_t i = 0; i < numChildren(); i++ ) {
- if ( !getChild(i)->matchesSingleElement( e ) ) {
- return false;
- }
+bool AndMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ for (size_t i = 0; i < numChildren(); i++) {
+ if (!getChild(i)->matchesSingleElement(e)) {
+ return false;
}
- return true;
}
+ return true;
+}
- void AndMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << "$and\n";
- _debugList( debug, level );
- }
+void AndMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << "$and\n";
+ _debugList(debug, level);
+}
- void AndMatchExpression::toBSON(BSONObjBuilder* out) const {
- BSONArrayBuilder arrBob(out->subarrayStart("$and"));
- _listToBSON(&arrBob);
- }
+void AndMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONArrayBuilder arrBob(out->subarrayStart("$and"));
+ _listToBSON(&arrBob);
+}
- // -----
+// -----
- bool OrMatchExpression::matches( const MatchableDocument* doc, MatchDetails* details ) const {
- for ( size_t i = 0; i < numChildren(); i++ ) {
- if ( getChild(i)->matches( doc, NULL ) ) {
- return true;
- }
+bool OrMatchExpression::matches(const MatchableDocument* doc, MatchDetails* details) const {
+ for (size_t i = 0; i < numChildren(); i++) {
+ if (getChild(i)->matches(doc, NULL)) {
+ return true;
}
- return false;
}
+ return false;
+}
- bool OrMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- for ( size_t i = 0; i < numChildren(); i++ ) {
- if ( getChild(i)->matchesSingleElement( e ) ) {
- return true;
- }
+bool OrMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ for (size_t i = 0; i < numChildren(); i++) {
+ if (getChild(i)->matchesSingleElement(e)) {
+ return true;
}
- return false;
}
+ return false;
+}
- void OrMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << "$or\n";
- _debugList( debug, level );
- }
+void OrMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << "$or\n";
+ _debugList(debug, level);
+}
- void OrMatchExpression::toBSON(BSONObjBuilder* out) const {
- BSONArrayBuilder arrBob(out->subarrayStart("$or"));
- _listToBSON(&arrBob);
- }
+void OrMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONArrayBuilder arrBob(out->subarrayStart("$or"));
+ _listToBSON(&arrBob);
+}
- // ----
+// ----
- bool NorMatchExpression::matches( const MatchableDocument* doc, MatchDetails* details ) const {
- for ( size_t i = 0; i < numChildren(); i++ ) {
- if ( getChild(i)->matches( doc, NULL ) ) {
- return false;
- }
+bool NorMatchExpression::matches(const MatchableDocument* doc, MatchDetails* details) const {
+ for (size_t i = 0; i < numChildren(); i++) {
+ if (getChild(i)->matches(doc, NULL)) {
+ return false;
}
- return true;
}
+ return true;
+}
- bool NorMatchExpression::matchesSingleElement( const BSONElement& e ) const {
- for ( size_t i = 0; i < numChildren(); i++ ) {
- if ( getChild(i)->matchesSingleElement( e ) ) {
- return false;
- }
+bool NorMatchExpression::matchesSingleElement(const BSONElement& e) const {
+ for (size_t i = 0; i < numChildren(); i++) {
+ if (getChild(i)->matchesSingleElement(e)) {
+ return false;
}
- return true;
- }
-
- void NorMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << "$nor\n";
- _debugList( debug, level );
}
+ return true;
+}
- void NorMatchExpression::toBSON(BSONObjBuilder* out) const {
- BSONArrayBuilder arrBob(out->subarrayStart("$nor"));
- _listToBSON(&arrBob);
- }
+void NorMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << "$nor\n";
+ _debugList(debug, level);
+}
- // -------
+void NorMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONArrayBuilder arrBob(out->subarrayStart("$nor"));
+ _listToBSON(&arrBob);
+}
- void NotMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << "$not\n";
- _exp->debugString( debug, level + 1 );
- }
+// -------
- void NotMatchExpression::toBSON(BSONObjBuilder* out) const {
- BSONObjBuilder childBob(out->subobjStart("$not"));
- _exp->toBSON(&childBob);
- childBob.doneFast();
- }
+void NotMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << "$not\n";
+ _exp->debugString(debug, level + 1);
+}
- bool NotMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
+void NotMatchExpression::toBSON(BSONObjBuilder* out) const {
+ BSONObjBuilder childBob(out->subobjStart("$not"));
+ _exp->toBSON(&childBob);
+ childBob.doneFast();
+}
- return _exp->equivalent( other->getChild(0) );
- }
+bool NotMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
+ return _exp->equivalent(other->getChild(0));
+}
}
diff --git a/src/mongo/db/matcher/expression_tree.h b/src/mongo/db/matcher/expression_tree.h
index 6e35af8df72..5069045727c 100644
--- a/src/mongo/db/matcher/expression_tree.h
+++ b/src/mongo/db/matcher/expression_tree.h
@@ -39,157 +39,172 @@
*/
namespace mongo {
- class ListOfMatchExpression : public MatchExpression {
- public:
- ListOfMatchExpression( MatchType type ) : MatchExpression( type ){}
- virtual ~ListOfMatchExpression();
-
- /**
- * @param e - I take ownership
- */
- void add( MatchExpression* e );
-
- /**
- * clears all the thingsd we own, and does NOT delete
- * someone else has taken ownership
- */
- void clearAndRelease() { _expressions.clear(); }
-
- virtual size_t numChildren() const { return _expressions.size(); }
-
- virtual MatchExpression* getChild( size_t i ) const { return _expressions[i]; }
-
- virtual std::vector<MatchExpression*>* getChildVector() { return &_expressions; }
-
- bool equivalent( const MatchExpression* other ) const;
-
- protected:
- void _debugList( StringBuilder& debug, int level ) const;
-
- void _listToBSON(BSONArrayBuilder* out) const;
-
- private:
- std::vector< MatchExpression* > _expressions;
- };
-
- class AndMatchExpression : public ListOfMatchExpression {
- public:
- AndMatchExpression() : ListOfMatchExpression( AND ){}
- virtual ~AndMatchExpression(){}
-
- virtual bool matches( const MatchableDocument* doc, MatchDetails* details = 0 ) const;
- virtual bool matchesSingleElement( const BSONElement& e ) const;
-
- virtual MatchExpression* shallowClone() const {
- AndMatchExpression* self = new AndMatchExpression();
- for (size_t i = 0; i < numChildren(); ++i) {
- self->add(getChild(i)->shallowClone());
- }
- if ( getTag() ) {
- self->setTag(getTag()->clone());
- }
- return self;
+class ListOfMatchExpression : public MatchExpression {
+public:
+ ListOfMatchExpression(MatchType type) : MatchExpression(type) {}
+ virtual ~ListOfMatchExpression();
+
+ /**
+ * @param e - I take ownership
+ */
+ void add(MatchExpression* e);
+
+ /**
+ * clears all the thingsd we own, and does NOT delete
+ * someone else has taken ownership
+ */
+ void clearAndRelease() {
+ _expressions.clear();
+ }
+
+ virtual size_t numChildren() const {
+ return _expressions.size();
+ }
+
+ virtual MatchExpression* getChild(size_t i) const {
+ return _expressions[i];
+ }
+
+ virtual std::vector<MatchExpression*>* getChildVector() {
+ return &_expressions;
+ }
+
+ bool equivalent(const MatchExpression* other) const;
+
+protected:
+ void _debugList(StringBuilder& debug, int level) const;
+
+ void _listToBSON(BSONArrayBuilder* out) const;
+
+private:
+ std::vector<MatchExpression*> _expressions;
+};
+
+class AndMatchExpression : public ListOfMatchExpression {
+public:
+ AndMatchExpression() : ListOfMatchExpression(AND) {}
+ virtual ~AndMatchExpression() {}
+
+ virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const;
+ virtual bool matchesSingleElement(const BSONElement& e) const;
+
+ virtual MatchExpression* shallowClone() const {
+ AndMatchExpression* self = new AndMatchExpression();
+ for (size_t i = 0; i < numChildren(); ++i) {
+ self->add(getChild(i)->shallowClone());
+ }
+ if (getTag()) {
+ self->setTag(getTag()->clone());
}
+ return self;
+ }
- virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void debugString(StringBuilder& debug, int level = 0) const;
- virtual void toBSON(BSONObjBuilder* out) const;
- };
+ virtual void toBSON(BSONObjBuilder* out) const;
+};
- class OrMatchExpression : public ListOfMatchExpression {
- public:
- OrMatchExpression() : ListOfMatchExpression( OR ){}
- virtual ~OrMatchExpression(){}
+class OrMatchExpression : public ListOfMatchExpression {
+public:
+ OrMatchExpression() : ListOfMatchExpression(OR) {}
+ virtual ~OrMatchExpression() {}
- virtual bool matches( const MatchableDocument* doc, MatchDetails* details = 0 ) const;
- virtual bool matchesSingleElement( const BSONElement& e ) const;
+ virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const;
+ virtual bool matchesSingleElement(const BSONElement& e) const;
- virtual MatchExpression* shallowClone() const {
- OrMatchExpression* self = new OrMatchExpression();
- for (size_t i = 0; i < numChildren(); ++i) {
- self->add(getChild(i)->shallowClone());
- }
- if ( getTag() ) {
- self->setTag(getTag()->clone());
- }
- return self;
+ virtual MatchExpression* shallowClone() const {
+ OrMatchExpression* self = new OrMatchExpression();
+ for (size_t i = 0; i < numChildren(); ++i) {
+ self->add(getChild(i)->shallowClone());
}
+ if (getTag()) {
+ self->setTag(getTag()->clone());
+ }
+ return self;
+ }
- virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void debugString(StringBuilder& debug, int level = 0) const;
- virtual void toBSON(BSONObjBuilder* out) const;
- };
+ virtual void toBSON(BSONObjBuilder* out) const;
+};
- class NorMatchExpression : public ListOfMatchExpression {
- public:
- NorMatchExpression() : ListOfMatchExpression( NOR ){}
- virtual ~NorMatchExpression(){}
+class NorMatchExpression : public ListOfMatchExpression {
+public:
+ NorMatchExpression() : ListOfMatchExpression(NOR) {}
+ virtual ~NorMatchExpression() {}
- virtual bool matches( const MatchableDocument* doc, MatchDetails* details = 0 ) const;
- virtual bool matchesSingleElement( const BSONElement& e ) const;
+ virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const;
+ virtual bool matchesSingleElement(const BSONElement& e) const;
- virtual MatchExpression* shallowClone() const {
- NorMatchExpression* self = new NorMatchExpression();
- for (size_t i = 0; i < numChildren(); ++i) {
- self->add(getChild(i)->shallowClone());
- }
- if ( getTag() ) {
- self->setTag(getTag()->clone());
- }
- return self;
+ virtual MatchExpression* shallowClone() const {
+ NorMatchExpression* self = new NorMatchExpression();
+ for (size_t i = 0; i < numChildren(); ++i) {
+ self->add(getChild(i)->shallowClone());
}
-
- virtual void debugString( StringBuilder& debug, int level = 0 ) const;
-
- virtual void toBSON(BSONObjBuilder* out) const;
- };
-
- class NotMatchExpression : public MatchExpression {
- public:
- NotMatchExpression() : MatchExpression( NOT ){}
- NotMatchExpression( MatchExpression* e ) : MatchExpression( NOT ), _exp( e ){}
- /**
- * @param exp - I own it, and will delete
- */
- virtual Status init( MatchExpression* exp ) {
- _exp.reset( exp );
- return Status::OK();
+ if (getTag()) {
+ self->setTag(getTag()->clone());
}
-
- virtual MatchExpression* shallowClone() const {
- NotMatchExpression* self = new NotMatchExpression();
- MatchExpression* child = _exp->shallowClone();
- self->init(child);
- if ( getTag() ) {
- self->setTag(getTag()->clone());
- }
- return self;
- }
-
- virtual bool matches( const MatchableDocument* doc, MatchDetails* details = 0 ) const {
- return !_exp->matches( doc, NULL );
+ return self;
+ }
+
+ virtual void debugString(StringBuilder& debug, int level = 0) const;
+
+ virtual void toBSON(BSONObjBuilder* out) const;
+};
+
+class NotMatchExpression : public MatchExpression {
+public:
+ NotMatchExpression() : MatchExpression(NOT) {}
+ NotMatchExpression(MatchExpression* e) : MatchExpression(NOT), _exp(e) {}
+ /**
+ * @param exp - I own it, and will delete
+ */
+ virtual Status init(MatchExpression* exp) {
+ _exp.reset(exp);
+ return Status::OK();
+ }
+
+ virtual MatchExpression* shallowClone() const {
+ NotMatchExpression* self = new NotMatchExpression();
+ MatchExpression* child = _exp->shallowClone();
+ self->init(child);
+ if (getTag()) {
+ self->setTag(getTag()->clone());
}
+ return self;
+ }
- virtual bool matchesSingleElement( const BSONElement& e ) const {
- return !_exp->matchesSingleElement( e );
- }
+ virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const {
+ return !_exp->matches(doc, NULL);
+ }
- virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual bool matchesSingleElement(const BSONElement& e) const {
+ return !_exp->matchesSingleElement(e);
+ }
- virtual void toBSON(BSONObjBuilder* out) const;
+ virtual void debugString(StringBuilder& debug, int level = 0) const;
- bool equivalent( const MatchExpression* other ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
- virtual size_t numChildren() const { return 1; }
+ bool equivalent(const MatchExpression* other) const;
- virtual MatchExpression* getChild( size_t i ) const { return _exp.get(); }
+ virtual size_t numChildren() const {
+ return 1;
+ }
- MatchExpression* releaseChild(void) { return _exp.release(); }
+ virtual MatchExpression* getChild(size_t i) const {
+ return _exp.get();
+ }
- void resetChild( MatchExpression* newChild) { _exp.reset(newChild); }
+ MatchExpression* releaseChild(void) {
+ return _exp.release();
+ }
- private:
- std::unique_ptr<MatchExpression> _exp;
- };
+ void resetChild(MatchExpression* newChild) {
+ _exp.reset(newChild);
+ }
+private:
+ std::unique_ptr<MatchExpression> _exp;
+};
}
diff --git a/src/mongo/db/matcher/expression_tree_test.cpp b/src/mongo/db/matcher/expression_tree_test.cpp
index 72e9766404a..a63439a1de2 100644
--- a/src/mongo/db/matcher/expression_tree_test.cpp
+++ b/src/mongo/db/matcher/expression_tree_test.cpp
@@ -38,536 +38,536 @@
namespace mongo {
- using std::unique_ptr;
-
- TEST( NotMatchExpression, MatchesScalar ) {
- BSONObj baseOperand = BSON( "$lt" << 5 );
- unique_ptr<ComparisonMatchExpression> lt( new LTMatchExpression() );
- ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
- NotMatchExpression notOp;
- ASSERT( notOp.init( lt.release() ).isOK() );
- ASSERT( notOp.matchesBSON( BSON( "a" << 6 ), NULL ) );
- ASSERT( !notOp.matchesBSON( BSON( "a" << 4 ), NULL ) );
- }
-
- TEST( NotMatchExpression, MatchesArray ) {
- BSONObj baseOperand = BSON( "$lt" << 5 );
- unique_ptr<ComparisonMatchExpression> lt( new LTMatchExpression() );
- ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
- NotMatchExpression notOp;
- ASSERT( notOp.init( lt.release() ).isOK() );
- ASSERT( notOp.matchesBSON( BSON( "a" << BSON_ARRAY( 6 ) ), NULL ) );
- ASSERT( !notOp.matchesBSON( BSON( "a" << BSON_ARRAY( 4 ) ), NULL ) );
- // All array elements must match.
- ASSERT( !notOp.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 5 << 6 ) ), NULL ) );
- }
-
- TEST( NotMatchExpression, ElemMatchKey ) {
- BSONObj baseOperand = BSON( "$lt" << 5 );
- unique_ptr<ComparisonMatchExpression> lt( new LTMatchExpression() );
- ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
- NotMatchExpression notOp;
- ASSERT( notOp.init( lt.release() ).isOK() );
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !notOp.matchesBSON( BSON( "a" << BSON_ARRAY( 1 ) ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( notOp.matchesBSON( BSON( "a" << 6 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( notOp.matchesBSON( BSON( "a" << BSON_ARRAY( 6 ) ), &details ) );
- // elemMatchKey is not implemented for negative match operators.
- ASSERT( !details.hasElemMatchKey() );
- }
- /*
- TEST( NotMatchExpression, MatchesIndexKey ) {
- BSONObj baseOperand = BSON( "$lt" << 5 );
- unique_ptr<ComparisonMatchExpression> lt( new ComparisonMatchExpression() );
- ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
- NotMatchExpression notOp;
- ASSERT( notOp.init( lt.release() ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- BSONObj indexKey = BSON( "" << "7" );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- notOp.matchesIndexKey( indexKey, indexSpec ) );
- }
- */
-
- /**
- TEST( AndOp, MatchesElementSingleClause ) {
- BSONObj baseOperand = BSON( "$lt" << 5 );
- BSONObj match = BSON( "a" << 4 );
- BSONObj notMatch = BSON( "a" << 5 );
- unique_ptr<ComparisonMatchExpression> lt( new ComparisonMatchExpression() );
- ASSERT( lt->init( "", baseOperand[ "$lt" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
- subMatchExpressions.mutableVector().push_back( lt.release() );
- AndOp andOp;
- ASSERT( andOp.init( &subMatchExpressions ).isOK() );
- ASSERT( andOp.matchesSingleElement( match[ "a" ] ) );
- ASSERT( !andOp.matchesSingleElement( notMatch[ "a" ] ) );
- }
- */
-
- TEST( AndOp, NoClauses ) {
- AndMatchExpression andMatchExpression;
- ASSERT( andMatchExpression.matchesBSON( BSONObj(), NULL ) );
- }
-
- TEST( AndOp, MatchesElementThreeClauses ) {
- BSONObj baseOperand1 = BSON( "$lt" << "z1" );
- BSONObj baseOperand2 = BSON( "$gt" << "a1" );
- BSONObj match = BSON( "a" << "r1" );
- BSONObj notMatch1 = BSON( "a" << "z1" );
- BSONObj notMatch2 = BSON( "a" << "a1" );
- BSONObj notMatch3 = BSON( "a" << "r" );
-
- unique_ptr<ComparisonMatchExpression> sub1( new LTMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "$lt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub2( new GTMatchExpression() );
- ASSERT( sub2->init( "a", baseOperand2[ "$gt" ] ).isOK() );
- unique_ptr<RegexMatchExpression> sub3( new RegexMatchExpression() );
- ASSERT( sub3->init( "a", "1", "" ).isOK() );
-
- AndMatchExpression andOp;
- andOp.add( sub1.release() );
- andOp.add( sub2.release() );
- andOp.add( sub3.release() );
-
- ASSERT( andOp.matchesBSON( match ) );
- ASSERT( !andOp.matchesBSON( notMatch1 ) );
- ASSERT( !andOp.matchesBSON( notMatch2 ) );
- ASSERT( !andOp.matchesBSON( notMatch3 ) );
- }
-
- TEST( AndOp, MatchesSingleClause ) {
- BSONObj baseOperand = BSON( "$ne" << 5 );
- unique_ptr<ComparisonMatchExpression> eq( new EqualityMatchExpression() );
- ASSERT( eq->init( "a", baseOperand[ "$ne" ] ).isOK() );
- unique_ptr<NotMatchExpression> ne( new NotMatchExpression() );
- ASSERT( ne->init( eq.release() ).isOK() );
-
- AndMatchExpression andOp;
- andOp.add( ne.release() );
-
- ASSERT( andOp.matchesBSON( BSON( "a" << 4 ), NULL ) );
- ASSERT( andOp.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 6 ) ), NULL ) );
- ASSERT( !andOp.matchesBSON( BSON( "a" << 5 ), NULL ) );
- ASSERT( !andOp.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 5 ) ), NULL ) );
- }
-
- TEST( AndOp, MatchesThreeClauses ) {
- BSONObj baseOperand1 = BSON( "$gt" << 1 );
- BSONObj baseOperand2 = BSON( "$lt" << 10 );
- BSONObj baseOperand3 = BSON( "$lt" << 100 );
-
- unique_ptr<ComparisonMatchExpression> sub1( new GTMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
-
- unique_ptr<ComparisonMatchExpression> sub2( new LTMatchExpression() );
- ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
-
- unique_ptr<ComparisonMatchExpression> sub3( new LTMatchExpression() );
- ASSERT( sub3->init( "b", baseOperand3[ "$lt" ] ).isOK() );
-
- AndMatchExpression andOp;
- andOp.add( sub1.release() );
- andOp.add( sub2.release() );
- andOp.add( sub3.release() );
-
- ASSERT( andOp.matchesBSON( BSON( "a" << 5 << "b" << 6 ), NULL ) );
- ASSERT( !andOp.matchesBSON( BSON( "a" << 5 ), NULL ) );
- ASSERT( !andOp.matchesBSON( BSON( "b" << 6 ), NULL ) );
- ASSERT( !andOp.matchesBSON( BSON( "a" << 1 << "b" << 6 ), NULL ) );
- ASSERT( !andOp.matchesBSON( BSON( "a" << 10 << "b" << 6 ), NULL ) );
- }
-
- TEST( AndOp, ElemMatchKey ) {
- BSONObj baseOperand1 = BSON( "a" << 1 );
- BSONObj baseOperand2 = BSON( "b" << 2 );
-
- unique_ptr<ComparisonMatchExpression> sub1( new EqualityMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "a" ] ).isOK() );
-
- unique_ptr<ComparisonMatchExpression> sub2( new EqualityMatchExpression() );
- ASSERT( sub2->init( "b", baseOperand2[ "b" ] ).isOK() );
-
- AndMatchExpression andOp;
- andOp.add( sub1.release() );
- andOp.add( sub2.release() );
-
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !andOp.matchesBSON( BSON( "a" << BSON_ARRAY( 1 ) ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( !andOp.matchesBSON( BSON( "b" << BSON_ARRAY( 2 ) ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( andOp.matchesBSON( BSON( "a" << BSON_ARRAY( 1 ) << "b" << BSON_ARRAY( 1 << 2 ) ),
- &details ) );
- ASSERT( details.hasElemMatchKey() );
- // The elem match key for the second $and clause is recorded.
- ASSERT_EQUALS( "1", details.elemMatchKey() );
- }
-
- /**
- TEST( AndOp, MatchesIndexKeyWithoutUnknown ) {
- BSONObj baseOperand1 = BSON( "$gt" << 1 );
- BSONObj baseOperand2 = BSON( "$lt" << 5 );
- unique_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
- ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
- subMatchExpressions.mutableVector().push_back( sub1.release() );
- subMatchExpressions.mutableVector().push_back( sub2.release() );
- AndOp andOp;
- ASSERT( andOp.init( &subMatchExpressions ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- andOp.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- andOp.matchesIndexKey( BSON( "" << 0 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- andOp.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- }
-
- TEST( AndOp, MatchesIndexKeyWithUnknown ) {
- BSONObj baseOperand1 = BSON( "$gt" << 1 );
- BSONObj baseOperand2 = BSON( "$lt" << 5 );
- // This part will return PartialMatchResult_Unknown.
- BSONObj baseOperand3 = BSON( "$ne" << 5 );
- unique_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
- ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
- unique_ptr<NeOp> sub3( new NeOp() );
- ASSERT( sub3->init( "a", baseOperand3[ "$ne" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
- subMatchExpressions.mutableVector().push_back( sub1.release() );
- subMatchExpressions.mutableVector().push_back( sub2.release() );
- subMatchExpressions.mutableVector().push_back( sub3.release() );
- AndOp andOp;
- ASSERT( andOp.init( &subMatchExpressions ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- andOp.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- andOp.matchesIndexKey( BSON( "" << 0 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- andOp.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- }
- */
-
- /**
- TEST( OrOp, MatchesElementSingleClause ) {
- BSONObj baseOperand = BSON( "$lt" << 5 );
- BSONObj match = BSON( "a" << 4 );
- BSONObj notMatch = BSON( "a" << 5 );
- unique_ptr<ComparisonMatchExpression> lt( new ComparisonMatchExpression() );
- ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
- subMatchExpressions.mutableVector().push_back( lt.release() );
- OrOp orOp;
- ASSERT( orOp.init( &subMatchExpressions ).isOK() );
- ASSERT( orOp.matchesSingleElement( match[ "a" ] ) );
- ASSERT( !orOp.matchesSingleElement( notMatch[ "a" ] ) );
- }
- */
-
- TEST( OrOp, NoClauses ) {
- OrMatchExpression orOp;
- ASSERT( !orOp.matchesBSON( BSONObj(), NULL ) );
- }
- /*
- TEST( OrOp, MatchesElementThreeClauses ) {
- BSONObj baseOperand1 = BSON( "$lt" << 0 );
- BSONObj baseOperand2 = BSON( "$gt" << 10 );
- BSONObj baseOperand3 = BSON( "a" << 5 );
- BSONObj match1 = BSON( "a" << -1 );
- BSONObj match2 = BSON( "a" << 11 );
- BSONObj match3 = BSON( "a" << 5 );
- BSONObj notMatch = BSON( "a" << "6" );
- unique_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "$lt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
- ASSERT( sub2->init( "a", baseOperand2[ "$gt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub3( new ComparisonMatchExpression() );
- ASSERT( sub3->init( "a", baseOperand3[ "a" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
- subMatchExpressions.mutableVector().push_back( sub1.release() );
- subMatchExpressions.mutableVector().push_back( sub2.release() );
- subMatchExpressions.mutableVector().push_back( sub3.release() );
- OrOp orOp;
- ASSERT( orOp.init( &subMatchExpressions ).isOK() );
- ASSERT( orOp.matchesSingleElement( match1[ "a" ] ) );
- ASSERT( orOp.matchesSingleElement( match2[ "a" ] ) );
- ASSERT( orOp.matchesSingleElement( match3[ "a" ] ) );
- ASSERT( !orOp.matchesSingleElement( notMatch[ "a" ] ) );
- }
- */
- TEST( OrOp, MatchesSingleClause ) {
- BSONObj baseOperand = BSON( "$ne" << 5 );
- unique_ptr<ComparisonMatchExpression> eq( new EqualityMatchExpression() );
- ASSERT( eq->init( "a", baseOperand[ "$ne" ] ).isOK() );
- unique_ptr<NotMatchExpression> ne( new NotMatchExpression() );
- ASSERT( ne->init( eq.release() ).isOK() );
-
- OrMatchExpression orOp;
- orOp.add( ne.release() );
-
- ASSERT( orOp.matchesBSON( BSON( "a" << 4 ), NULL ) );
- ASSERT( orOp.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 6 ) ), NULL ) );
- ASSERT( !orOp.matchesBSON( BSON( "a" << 5 ), NULL ) );
- ASSERT( !orOp.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 5 ) ), NULL ) );
- }
-
- TEST( OrOp, MatchesThreeClauses ) {
- BSONObj baseOperand1 = BSON( "$gt" << 10 );
- BSONObj baseOperand2 = BSON( "$lt" << 0 );
- BSONObj baseOperand3 = BSON( "b" << 100 );
- unique_ptr<ComparisonMatchExpression> sub1( new GTMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub2( new LTMatchExpression() );
- ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub3( new EqualityMatchExpression() );
- ASSERT( sub3->init( "b", baseOperand3[ "b" ] ).isOK() );
-
- OrMatchExpression orOp;
- orOp.add( sub1.release() );
- orOp.add( sub2.release() );
- orOp.add( sub3.release() );
-
- ASSERT( orOp.matchesBSON( BSON( "a" << -1 ), NULL ) );
- ASSERT( orOp.matchesBSON( BSON( "a" << 11 ), NULL ) );
- ASSERT( !orOp.matchesBSON( BSON( "a" << 5 ), NULL ) );
- ASSERT( orOp.matchesBSON( BSON( "b" << 100 ), NULL ) );
- ASSERT( !orOp.matchesBSON( BSON( "b" << 101 ), NULL ) );
- ASSERT( !orOp.matchesBSON( BSONObj(), NULL ) );
- ASSERT( orOp.matchesBSON( BSON( "a" << 11 << "b" << 100 ), NULL ) );
- }
-
- TEST( OrOp, ElemMatchKey ) {
- BSONObj baseOperand1 = BSON( "a" << 1 );
- BSONObj baseOperand2 = BSON( "b" << 2 );
- unique_ptr<ComparisonMatchExpression> sub1( new EqualityMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "a" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub2( new EqualityMatchExpression() );
- ASSERT( sub2->init( "b", baseOperand2[ "b" ] ).isOK() );
-
- OrMatchExpression orOp;
- orOp.add( sub1.release() );
- orOp.add( sub2.release() );
-
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !orOp.matchesBSON( BSONObj(), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( !orOp.matchesBSON( BSON( "a" << BSON_ARRAY( 10 ) << "b" << BSON_ARRAY( 10 ) ),
- &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( orOp.matchesBSON( BSON( "a" << BSON_ARRAY( 1 ) << "b" << BSON_ARRAY( 1 << 2 ) ),
- &details ) );
- // The elem match key feature is not implemented for $or.
- ASSERT( !details.hasElemMatchKey() );
- }
-
- /**
- TEST( OrOp, MatchesIndexKeyWithoutUnknown ) {
- BSONObj baseOperand1 = BSON( "$gt" << 5 );
- BSONObj baseOperand2 = BSON( "$lt" << 1 );
- unique_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
- ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
- subMatchExpressions.mutableVector().push_back( sub1.release() );
- subMatchExpressions.mutableVector().push_back( sub2.release() );
- OrOp orOp;
- ASSERT( orOp.init( &subMatchExpressions ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_False ==
- orOp.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- orOp.matchesIndexKey( BSON( "" << 0 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- orOp.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- }
-
- TEST( OrOp, MatchesIndexKeyWithUnknown ) {
- BSONObj baseOperand1 = BSON( "$gt" << 5 );
- BSONObj baseOperand2 = BSON( "$lt" << 1 );
- // This part will return PartialMatchResult_Unknown.
- BSONObj baseOperand3 = BSON( "$ne" << 5 );
- unique_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
- ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
- unique_ptr<NeOp> sub3( new NeOp() );
- ASSERT( sub3->init( "a", baseOperand3[ "$ne" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
- subMatchExpressions.mutableVector().push_back( sub1.release() );
- subMatchExpressions.mutableVector().push_back( sub2.release() );
- subMatchExpressions.mutableVector().push_back( sub3.release() );
- OrOp orOp;
- ASSERT( orOp.init( &subMatchExpressions ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- orOp.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- orOp.matchesIndexKey( BSON( "" << 0 ), indexSpec ) );
- ASSERT( MatchMatchExpression::PartialMatchResult_True ==
- orOp.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
- }
- */
-
- /**
- TEST( NorOp, MatchesElementSingleClause ) {
- BSONObj baseOperand = BSON( "$lt" << 5 );
- BSONObj match = BSON( "a" << 5 );
- BSONObj notMatch = BSON( "a" << 4 );
- unique_ptr<ComparisonMatchExpression> lt( new ComparisonMatchExpression() );
- ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
- subMatchExpressions.mutableVector().push_back( lt.release() );
- NorOp norOp;
- ASSERT( norOp.init( &subMatchExpressions ).isOK() );
- ASSERT( norOp.matchesSingleElement( match[ "a" ] ) );
- ASSERT( !norOp.matchesSingleElement( notMatch[ "a" ] ) );
- }
- */
-
- TEST( NorOp, NoClauses ) {
- NorMatchExpression norOp;
- ASSERT( norOp.matchesBSON( BSONObj(), NULL ) );
- }
- /*
- TEST( NorOp, MatchesElementThreeClauses ) {
- BSONObj baseOperand1 = BSON( "$lt" << 0 );
- BSONObj baseOperand2 = BSON( "$gt" << 10 );
- BSONObj baseOperand3 = BSON( "a" << 5 );
- BSONObj notMatch1 = BSON( "a" << -1 );
- BSONObj notMatch2 = BSON( "a" << 11 );
- BSONObj notMatch3 = BSON( "a" << 5 );
- BSONObj match = BSON( "a" << "6" );
- unique_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "$lt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
- ASSERT( sub2->init( "a", baseOperand2[ "$gt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub3( new ComparisonMatchExpression() );
- ASSERT( sub3->init( "a", baseOperand3[ "a" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
- subMatchExpressions.mutableVector().push_back( sub1.release() );
- subMatchExpressions.mutableVector().push_back( sub2.release() );
- subMatchExpressions.mutableVector().push_back( sub3.release() );
- NorOp norOp;
- ASSERT( norOp.init( &subMatchExpressions ).isOK() );
- ASSERT( !norOp.matchesSingleElement( notMatch1[ "a" ] ) );
- ASSERT( !norOp.matchesSingleElement( notMatch2[ "a" ] ) );
- ASSERT( !norOp.matchesSingleElement( notMatch3[ "a" ] ) );
- ASSERT( norOp.matchesSingleElement( match[ "a" ] ) );
- }
- */
-
- TEST( NorOp, MatchesSingleClause ) {
- BSONObj baseOperand = BSON( "$ne" << 5 );
- unique_ptr<ComparisonMatchExpression> eq( new EqualityMatchExpression() );
- ASSERT( eq->init( "a", baseOperand[ "$ne" ] ).isOK() );
- unique_ptr<NotMatchExpression> ne( new NotMatchExpression() );
- ASSERT( ne->init( eq.release() ).isOK() );
-
- NorMatchExpression norOp;
- norOp.add( ne.release() );
-
- ASSERT( !norOp.matchesBSON( BSON( "a" << 4 ), NULL ) );
- ASSERT( !norOp.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 6 ) ), NULL ) );
- ASSERT( norOp.matchesBSON( BSON( "a" << 5 ), NULL ) );
- ASSERT( norOp.matchesBSON( BSON( "a" << BSON_ARRAY( 4 << 5 ) ), NULL ) );
- }
-
- TEST( NorOp, MatchesThreeClauses ) {
- BSONObj baseOperand1 = BSON( "$gt" << 10 );
- BSONObj baseOperand2 = BSON( "$lt" << 0 );
- BSONObj baseOperand3 = BSON( "b" << 100 );
-
- unique_ptr<ComparisonMatchExpression> sub1( new GTMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub2( new LTMatchExpression() );
- ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub3( new EqualityMatchExpression() );
- ASSERT( sub3->init( "b", baseOperand3[ "b" ] ).isOK() );
-
- NorMatchExpression norOp;
- norOp.add( sub1.release() );
- norOp.add( sub2.release() );
- norOp.add( sub3.release() );
-
- ASSERT( !norOp.matchesBSON( BSON( "a" << -1 ), NULL ) );
- ASSERT( !norOp.matchesBSON( BSON( "a" << 11 ), NULL ) );
- ASSERT( norOp.matchesBSON( BSON( "a" << 5 ), NULL ) );
- ASSERT( !norOp.matchesBSON( BSON( "b" << 100 ), NULL ) );
- ASSERT( norOp.matchesBSON( BSON( "b" << 101 ), NULL ) );
- ASSERT( norOp.matchesBSON( BSONObj(), NULL ) );
- ASSERT( !norOp.matchesBSON( BSON( "a" << 11 << "b" << 100 ), NULL ) );
- }
-
- TEST( NorOp, ElemMatchKey ) {
- BSONObj baseOperand1 = BSON( "a" << 1 );
- BSONObj baseOperand2 = BSON( "b" << 2 );
- unique_ptr<ComparisonMatchExpression> sub1( new EqualityMatchExpression() );
- ASSERT( sub1->init( "a", baseOperand1[ "a" ] ).isOK() );
- unique_ptr<ComparisonMatchExpression> sub2( new EqualityMatchExpression() );
- ASSERT( sub2->init( "b", baseOperand2[ "b" ] ).isOK() );
-
- NorMatchExpression norOp;
- norOp.add( sub1.release() );
- norOp.add( sub2.release() );
-
- MatchDetails details;
- details.requestElemMatchKey();
- ASSERT( !norOp.matchesBSON( BSON( "a" << 1 ), &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( !norOp.matchesBSON( BSON( "a" << BSON_ARRAY( 1 ) << "b" << BSON_ARRAY( 10 ) ),
- &details ) );
- ASSERT( !details.hasElemMatchKey() );
- ASSERT( norOp.matchesBSON( BSON( "a" << BSON_ARRAY( 3 ) << "b" << BSON_ARRAY( 4 ) ),
- &details ) );
- // The elem match key feature is not implemented for $nor.
- ASSERT( !details.hasElemMatchKey() );
- }
-
-
- TEST( NorOp, Equivalent ) {
- BSONObj baseOperand1 = BSON( "a" << 1 );
- BSONObj baseOperand2 = BSON( "b" << 2 );
- EqualityMatchExpression sub1;
- ASSERT( sub1.init( "a", baseOperand1[ "a" ] ).isOK() );
- EqualityMatchExpression sub2;
- ASSERT( sub2.init( "b", baseOperand2[ "b" ] ).isOK() );
-
- NorMatchExpression e1;
- e1.add( sub1.shallowClone() );
- e1.add( sub2.shallowClone() );
-
- NorMatchExpression e2;
- e2.add( sub1.shallowClone() );
-
- ASSERT( e1.equivalent( &e1 ) );
- ASSERT( !e1.equivalent( &e2 ) );
- }
-
- /**
- TEST( NorOp, MatchesIndexKey ) {
- BSONObj baseOperand = BSON( "a" << 5 );
- unique_ptr<ComparisonMatchExpression> eq( new ComparisonMatchExpression() );
- ASSERT( eq->init( "a", baseOperand[ "a" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
- subMatchExpressions.mutableVector().push_back( eq.release() );
- NorOp norOp;
- ASSERT( norOp.init( &subMatchExpressions ).isOK() );
- IndexSpec indexSpec( BSON( "a" << 1 ) );
- BSONObj indexKey = BSON( "" << "7" );
- ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
- norOp.matchesIndexKey( indexKey, indexSpec ) );
- }
- */
+using std::unique_ptr;
+
+TEST(NotMatchExpression, MatchesScalar) {
+ BSONObj baseOperand = BSON("$lt" << 5);
+ unique_ptr<ComparisonMatchExpression> lt(new LTMatchExpression());
+ ASSERT(lt->init("a", baseOperand["$lt"]).isOK());
+ NotMatchExpression notOp;
+ ASSERT(notOp.init(lt.release()).isOK());
+ ASSERT(notOp.matchesBSON(BSON("a" << 6), NULL));
+ ASSERT(!notOp.matchesBSON(BSON("a" << 4), NULL));
+}
+
+TEST(NotMatchExpression, MatchesArray) {
+ BSONObj baseOperand = BSON("$lt" << 5);
+ unique_ptr<ComparisonMatchExpression> lt(new LTMatchExpression());
+ ASSERT(lt->init("a", baseOperand["$lt"]).isOK());
+ NotMatchExpression notOp;
+ ASSERT(notOp.init(lt.release()).isOK());
+ ASSERT(notOp.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL));
+ ASSERT(!notOp.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL));
+ // All array elements must match.
+ ASSERT(!notOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5 << 6)), NULL));
+}
+
+TEST(NotMatchExpression, ElemMatchKey) {
+ BSONObj baseOperand = BSON("$lt" << 5);
+ unique_ptr<ComparisonMatchExpression> lt(new LTMatchExpression());
+ ASSERT(lt->init("a", baseOperand["$lt"]).isOK());
+ NotMatchExpression notOp;
+ ASSERT(notOp.init(lt.release()).isOK());
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!notOp.matchesBSON(BSON("a" << BSON_ARRAY(1)), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(notOp.matchesBSON(BSON("a" << 6), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(notOp.matchesBSON(BSON("a" << BSON_ARRAY(6)), &details));
+ // elemMatchKey is not implemented for negative match operators.
+ ASSERT(!details.hasElemMatchKey());
+}
+/*
+ TEST( NotMatchExpression, MatchesIndexKey ) {
+ BSONObj baseOperand = BSON( "$lt" << 5 );
+ unique_ptr<ComparisonMatchExpression> lt( new ComparisonMatchExpression() );
+ ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
+ NotMatchExpression notOp;
+ ASSERT( notOp.init( lt.release() ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ BSONObj indexKey = BSON( "" << "7" );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ notOp.matchesIndexKey( indexKey, indexSpec ) );
+ }
+*/
+
+/**
+TEST( AndOp, MatchesElementSingleClause ) {
+ BSONObj baseOperand = BSON( "$lt" << 5 );
+ BSONObj match = BSON( "a" << 4 );
+ BSONObj notMatch = BSON( "a" << 5 );
+ unique_ptr<ComparisonMatchExpression> lt( new ComparisonMatchExpression() );
+ ASSERT( lt->init( "", baseOperand[ "$lt" ] ).isOK() );
+ OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ subMatchExpressions.mutableVector().push_back( lt.release() );
+ AndOp andOp;
+ ASSERT( andOp.init( &subMatchExpressions ).isOK() );
+ ASSERT( andOp.matchesSingleElement( match[ "a" ] ) );
+ ASSERT( !andOp.matchesSingleElement( notMatch[ "a" ] ) );
+}
+*/
+
+TEST(AndOp, NoClauses) {
+ AndMatchExpression andMatchExpression;
+ ASSERT(andMatchExpression.matchesBSON(BSONObj(), NULL));
+}
+
+TEST(AndOp, MatchesElementThreeClauses) {
+ BSONObj baseOperand1 = BSON("$lt"
+ << "z1");
+ BSONObj baseOperand2 = BSON("$gt"
+ << "a1");
+ BSONObj match = BSON("a"
+ << "r1");
+ BSONObj notMatch1 = BSON("a"
+ << "z1");
+ BSONObj notMatch2 = BSON("a"
+ << "a1");
+ BSONObj notMatch3 = BSON("a"
+ << "r");
+
+ unique_ptr<ComparisonMatchExpression> sub1(new LTMatchExpression());
+ ASSERT(sub1->init("a", baseOperand1["$lt"]).isOK());
+ unique_ptr<ComparisonMatchExpression> sub2(new GTMatchExpression());
+ ASSERT(sub2->init("a", baseOperand2["$gt"]).isOK());
+ unique_ptr<RegexMatchExpression> sub3(new RegexMatchExpression());
+ ASSERT(sub3->init("a", "1", "").isOK());
+
+ AndMatchExpression andOp;
+ andOp.add(sub1.release());
+ andOp.add(sub2.release());
+ andOp.add(sub3.release());
+
+ ASSERT(andOp.matchesBSON(match));
+ ASSERT(!andOp.matchesBSON(notMatch1));
+ ASSERT(!andOp.matchesBSON(notMatch2));
+ ASSERT(!andOp.matchesBSON(notMatch3));
+}
+
+TEST(AndOp, MatchesSingleClause) {
+ BSONObj baseOperand = BSON("$ne" << 5);
+ unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression());
+ ASSERT(eq->init("a", baseOperand["$ne"]).isOK());
+ unique_ptr<NotMatchExpression> ne(new NotMatchExpression());
+ ASSERT(ne->init(eq.release()).isOK());
+
+ AndMatchExpression andOp;
+ andOp.add(ne.release());
+
+ ASSERT(andOp.matchesBSON(BSON("a" << 4), NULL));
+ ASSERT(andOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 6)), NULL));
+ ASSERT(!andOp.matchesBSON(BSON("a" << 5), NULL));
+ ASSERT(!andOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5)), NULL));
+}
+
+TEST(AndOp, MatchesThreeClauses) {
+ BSONObj baseOperand1 = BSON("$gt" << 1);
+ BSONObj baseOperand2 = BSON("$lt" << 10);
+ BSONObj baseOperand3 = BSON("$lt" << 100);
+
+ unique_ptr<ComparisonMatchExpression> sub1(new GTMatchExpression());
+ ASSERT(sub1->init("a", baseOperand1["$gt"]).isOK());
+
+ unique_ptr<ComparisonMatchExpression> sub2(new LTMatchExpression());
+ ASSERT(sub2->init("a", baseOperand2["$lt"]).isOK());
+
+ unique_ptr<ComparisonMatchExpression> sub3(new LTMatchExpression());
+ ASSERT(sub3->init("b", baseOperand3["$lt"]).isOK());
+
+ AndMatchExpression andOp;
+ andOp.add(sub1.release());
+ andOp.add(sub2.release());
+ andOp.add(sub3.release());
+
+ ASSERT(andOp.matchesBSON(BSON("a" << 5 << "b" << 6), NULL));
+ ASSERT(!andOp.matchesBSON(BSON("a" << 5), NULL));
+ ASSERT(!andOp.matchesBSON(BSON("b" << 6), NULL));
+ ASSERT(!andOp.matchesBSON(BSON("a" << 1 << "b" << 6), NULL));
+ ASSERT(!andOp.matchesBSON(BSON("a" << 10 << "b" << 6), NULL));
+}
+
+TEST(AndOp, ElemMatchKey) {
+ BSONObj baseOperand1 = BSON("a" << 1);
+ BSONObj baseOperand2 = BSON("b" << 2);
+
+ unique_ptr<ComparisonMatchExpression> sub1(new EqualityMatchExpression());
+ ASSERT(sub1->init("a", baseOperand1["a"]).isOK());
+
+ unique_ptr<ComparisonMatchExpression> sub2(new EqualityMatchExpression());
+ ASSERT(sub2->init("b", baseOperand2["b"]).isOK());
+
+ AndMatchExpression andOp;
+ andOp.add(sub1.release());
+ andOp.add(sub2.release());
+
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!andOp.matchesBSON(BSON("a" << BSON_ARRAY(1)), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(!andOp.matchesBSON(BSON("b" << BSON_ARRAY(2)), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(andOp.matchesBSON(BSON("a" << BSON_ARRAY(1) << "b" << BSON_ARRAY(1 << 2)), &details));
+ ASSERT(details.hasElemMatchKey());
+ // The elem match key for the second $and clause is recorded.
+ ASSERT_EQUALS("1", details.elemMatchKey());
+}
+
+/**
+TEST( AndOp, MatchesIndexKeyWithoutUnknown ) {
+ BSONObj baseOperand1 = BSON( "$gt" << 1 );
+ BSONObj baseOperand2 = BSON( "$lt" << 5 );
+ unique_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
+ ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
+ unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
+ ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
+ OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ subMatchExpressions.mutableVector().push_back( sub1.release() );
+ subMatchExpressions.mutableVector().push_back( sub2.release() );
+ AndOp andOp;
+ ASSERT( andOp.init( &subMatchExpressions ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ andOp.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ andOp.matchesIndexKey( BSON( "" << 0 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ andOp.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+}
+
+TEST( AndOp, MatchesIndexKeyWithUnknown ) {
+ BSONObj baseOperand1 = BSON( "$gt" << 1 );
+ BSONObj baseOperand2 = BSON( "$lt" << 5 );
+ // This part will return PartialMatchResult_Unknown.
+ BSONObj baseOperand3 = BSON( "$ne" << 5 );
+ unique_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
+ ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
+ unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
+ ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
+ unique_ptr<NeOp> sub3( new NeOp() );
+ ASSERT( sub3->init( "a", baseOperand3[ "$ne" ] ).isOK() );
+ OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ subMatchExpressions.mutableVector().push_back( sub1.release() );
+ subMatchExpressions.mutableVector().push_back( sub2.release() );
+ subMatchExpressions.mutableVector().push_back( sub3.release() );
+ AndOp andOp;
+ ASSERT( andOp.init( &subMatchExpressions ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ andOp.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ andOp.matchesIndexKey( BSON( "" << 0 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ andOp.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+}
+*/
+
+/**
+TEST( OrOp, MatchesElementSingleClause ) {
+ BSONObj baseOperand = BSON( "$lt" << 5 );
+ BSONObj match = BSON( "a" << 4 );
+ BSONObj notMatch = BSON( "a" << 5 );
+ unique_ptr<ComparisonMatchExpression> lt( new ComparisonMatchExpression() );
+ ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
+ OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ subMatchExpressions.mutableVector().push_back( lt.release() );
+ OrOp orOp;
+ ASSERT( orOp.init( &subMatchExpressions ).isOK() );
+ ASSERT( orOp.matchesSingleElement( match[ "a" ] ) );
+ ASSERT( !orOp.matchesSingleElement( notMatch[ "a" ] ) );
+}
+*/
+TEST(OrOp, NoClauses) {
+ OrMatchExpression orOp;
+ ASSERT(!orOp.matchesBSON(BSONObj(), NULL));
+}
+/*
+TEST( OrOp, MatchesElementThreeClauses ) {
+ BSONObj baseOperand1 = BSON( "$lt" << 0 );
+ BSONObj baseOperand2 = BSON( "$gt" << 10 );
+ BSONObj baseOperand3 = BSON( "a" << 5 );
+ BSONObj match1 = BSON( "a" << -1 );
+ BSONObj match2 = BSON( "a" << 11 );
+ BSONObj match3 = BSON( "a" << 5 );
+ BSONObj notMatch = BSON( "a" << "6" );
+ unique_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
+ ASSERT( sub1->init( "a", baseOperand1[ "$lt" ] ).isOK() );
+ unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
+ ASSERT( sub2->init( "a", baseOperand2[ "$gt" ] ).isOK() );
+ unique_ptr<ComparisonMatchExpression> sub3( new ComparisonMatchExpression() );
+ ASSERT( sub3->init( "a", baseOperand3[ "a" ] ).isOK() );
+ OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ subMatchExpressions.mutableVector().push_back( sub1.release() );
+ subMatchExpressions.mutableVector().push_back( sub2.release() );
+ subMatchExpressions.mutableVector().push_back( sub3.release() );
+ OrOp orOp;
+ ASSERT( orOp.init( &subMatchExpressions ).isOK() );
+ ASSERT( orOp.matchesSingleElement( match1[ "a" ] ) );
+ ASSERT( orOp.matchesSingleElement( match2[ "a" ] ) );
+ ASSERT( orOp.matchesSingleElement( match3[ "a" ] ) );
+ ASSERT( !orOp.matchesSingleElement( notMatch[ "a" ] ) );
+}
+*/
+TEST(OrOp, MatchesSingleClause) {
+ BSONObj baseOperand = BSON("$ne" << 5);
+ unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression());
+ ASSERT(eq->init("a", baseOperand["$ne"]).isOK());
+ unique_ptr<NotMatchExpression> ne(new NotMatchExpression());
+ ASSERT(ne->init(eq.release()).isOK());
+
+ OrMatchExpression orOp;
+ orOp.add(ne.release());
+
+ ASSERT(orOp.matchesBSON(BSON("a" << 4), NULL));
+ ASSERT(orOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 6)), NULL));
+ ASSERT(!orOp.matchesBSON(BSON("a" << 5), NULL));
+ ASSERT(!orOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5)), NULL));
+}
+
+TEST(OrOp, MatchesThreeClauses) {
+ BSONObj baseOperand1 = BSON("$gt" << 10);
+ BSONObj baseOperand2 = BSON("$lt" << 0);
+ BSONObj baseOperand3 = BSON("b" << 100);
+ unique_ptr<ComparisonMatchExpression> sub1(new GTMatchExpression());
+ ASSERT(sub1->init("a", baseOperand1["$gt"]).isOK());
+ unique_ptr<ComparisonMatchExpression> sub2(new LTMatchExpression());
+ ASSERT(sub2->init("a", baseOperand2["$lt"]).isOK());
+ unique_ptr<ComparisonMatchExpression> sub3(new EqualityMatchExpression());
+ ASSERT(sub3->init("b", baseOperand3["b"]).isOK());
+
+ OrMatchExpression orOp;
+ orOp.add(sub1.release());
+ orOp.add(sub2.release());
+ orOp.add(sub3.release());
+
+ ASSERT(orOp.matchesBSON(BSON("a" << -1), NULL));
+ ASSERT(orOp.matchesBSON(BSON("a" << 11), NULL));
+ ASSERT(!orOp.matchesBSON(BSON("a" << 5), NULL));
+ ASSERT(orOp.matchesBSON(BSON("b" << 100), NULL));
+ ASSERT(!orOp.matchesBSON(BSON("b" << 101), NULL));
+ ASSERT(!orOp.matchesBSON(BSONObj(), NULL));
+ ASSERT(orOp.matchesBSON(BSON("a" << 11 << "b" << 100), NULL));
+}
+
+TEST(OrOp, ElemMatchKey) {
+ BSONObj baseOperand1 = BSON("a" << 1);
+ BSONObj baseOperand2 = BSON("b" << 2);
+ unique_ptr<ComparisonMatchExpression> sub1(new EqualityMatchExpression());
+ ASSERT(sub1->init("a", baseOperand1["a"]).isOK());
+ unique_ptr<ComparisonMatchExpression> sub2(new EqualityMatchExpression());
+ ASSERT(sub2->init("b", baseOperand2["b"]).isOK());
+
+ OrMatchExpression orOp;
+ orOp.add(sub1.release());
+ orOp.add(sub2.release());
+
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!orOp.matchesBSON(BSONObj(), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(!orOp.matchesBSON(BSON("a" << BSON_ARRAY(10) << "b" << BSON_ARRAY(10)), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(orOp.matchesBSON(BSON("a" << BSON_ARRAY(1) << "b" << BSON_ARRAY(1 << 2)), &details));
+ // The elem match key feature is not implemented for $or.
+ ASSERT(!details.hasElemMatchKey());
+}
+
+/**
+TEST( OrOp, MatchesIndexKeyWithoutUnknown ) {
+ BSONObj baseOperand1 = BSON( "$gt" << 5 );
+ BSONObj baseOperand2 = BSON( "$lt" << 1 );
+ unique_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
+ ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
+ unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
+ ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
+ OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ subMatchExpressions.mutableVector().push_back( sub1.release() );
+ subMatchExpressions.mutableVector().push_back( sub2.release() );
+ OrOp orOp;
+ ASSERT( orOp.init( &subMatchExpressions ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_False ==
+ orOp.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ orOp.matchesIndexKey( BSON( "" << 0 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ orOp.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+}
+
+TEST( OrOp, MatchesIndexKeyWithUnknown ) {
+ BSONObj baseOperand1 = BSON( "$gt" << 5 );
+ BSONObj baseOperand2 = BSON( "$lt" << 1 );
+ // This part will return PartialMatchResult_Unknown.
+ BSONObj baseOperand3 = BSON( "$ne" << 5 );
+ unique_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
+ ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
+ unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
+ ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
+ unique_ptr<NeOp> sub3( new NeOp() );
+ ASSERT( sub3->init( "a", baseOperand3[ "$ne" ] ).isOK() );
+ OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ subMatchExpressions.mutableVector().push_back( sub1.release() );
+ subMatchExpressions.mutableVector().push_back( sub2.release() );
+ subMatchExpressions.mutableVector().push_back( sub3.release() );
+ OrOp orOp;
+ ASSERT( orOp.init( &subMatchExpressions ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ orOp.matchesIndexKey( BSON( "" << 3 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ orOp.matchesIndexKey( BSON( "" << 0 ), indexSpec ) );
+ ASSERT( MatchMatchExpression::PartialMatchResult_True ==
+ orOp.matchesIndexKey( BSON( "" << 6 ), indexSpec ) );
+}
+*/
+
+/**
+TEST( NorOp, MatchesElementSingleClause ) {
+ BSONObj baseOperand = BSON( "$lt" << 5 );
+ BSONObj match = BSON( "a" << 5 );
+ BSONObj notMatch = BSON( "a" << 4 );
+ unique_ptr<ComparisonMatchExpression> lt( new ComparisonMatchExpression() );
+ ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
+ OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ subMatchExpressions.mutableVector().push_back( lt.release() );
+ NorOp norOp;
+ ASSERT( norOp.init( &subMatchExpressions ).isOK() );
+ ASSERT( norOp.matchesSingleElement( match[ "a" ] ) );
+ ASSERT( !norOp.matchesSingleElement( notMatch[ "a" ] ) );
+}
+*/
+
+TEST(NorOp, NoClauses) {
+ NorMatchExpression norOp;
+ ASSERT(norOp.matchesBSON(BSONObj(), NULL));
+}
+/*
+TEST( NorOp, MatchesElementThreeClauses ) {
+ BSONObj baseOperand1 = BSON( "$lt" << 0 );
+ BSONObj baseOperand2 = BSON( "$gt" << 10 );
+ BSONObj baseOperand3 = BSON( "a" << 5 );
+ BSONObj notMatch1 = BSON( "a" << -1 );
+ BSONObj notMatch2 = BSON( "a" << 11 );
+ BSONObj notMatch3 = BSON( "a" << 5 );
+ BSONObj match = BSON( "a" << "6" );
+ unique_ptr<ComparisonMatchExpression> sub1( new ComparisonMatchExpression() );
+ ASSERT( sub1->init( "a", baseOperand1[ "$lt" ] ).isOK() );
+ unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
+ ASSERT( sub2->init( "a", baseOperand2[ "$gt" ] ).isOK() );
+ unique_ptr<ComparisonMatchExpression> sub3( new ComparisonMatchExpression() );
+ ASSERT( sub3->init( "a", baseOperand3[ "a" ] ).isOK() );
+ OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ subMatchExpressions.mutableVector().push_back( sub1.release() );
+ subMatchExpressions.mutableVector().push_back( sub2.release() );
+ subMatchExpressions.mutableVector().push_back( sub3.release() );
+ NorOp norOp;
+ ASSERT( norOp.init( &subMatchExpressions ).isOK() );
+ ASSERT( !norOp.matchesSingleElement( notMatch1[ "a" ] ) );
+ ASSERT( !norOp.matchesSingleElement( notMatch2[ "a" ] ) );
+ ASSERT( !norOp.matchesSingleElement( notMatch3[ "a" ] ) );
+ ASSERT( norOp.matchesSingleElement( match[ "a" ] ) );
+}
+*/
+
+TEST(NorOp, MatchesSingleClause) {
+ BSONObj baseOperand = BSON("$ne" << 5);
+ unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression());
+ ASSERT(eq->init("a", baseOperand["$ne"]).isOK());
+ unique_ptr<NotMatchExpression> ne(new NotMatchExpression());
+ ASSERT(ne->init(eq.release()).isOK());
+
+ NorMatchExpression norOp;
+ norOp.add(ne.release());
+
+ ASSERT(!norOp.matchesBSON(BSON("a" << 4), NULL));
+ ASSERT(!norOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 6)), NULL));
+ ASSERT(norOp.matchesBSON(BSON("a" << 5), NULL));
+ ASSERT(norOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5)), NULL));
+}
+
+TEST(NorOp, MatchesThreeClauses) {
+ BSONObj baseOperand1 = BSON("$gt" << 10);
+ BSONObj baseOperand2 = BSON("$lt" << 0);
+ BSONObj baseOperand3 = BSON("b" << 100);
+
+ unique_ptr<ComparisonMatchExpression> sub1(new GTMatchExpression());
+ ASSERT(sub1->init("a", baseOperand1["$gt"]).isOK());
+ unique_ptr<ComparisonMatchExpression> sub2(new LTMatchExpression());
+ ASSERT(sub2->init("a", baseOperand2["$lt"]).isOK());
+ unique_ptr<ComparisonMatchExpression> sub3(new EqualityMatchExpression());
+ ASSERT(sub3->init("b", baseOperand3["b"]).isOK());
+
+ NorMatchExpression norOp;
+ norOp.add(sub1.release());
+ norOp.add(sub2.release());
+ norOp.add(sub3.release());
+
+ ASSERT(!norOp.matchesBSON(BSON("a" << -1), NULL));
+ ASSERT(!norOp.matchesBSON(BSON("a" << 11), NULL));
+ ASSERT(norOp.matchesBSON(BSON("a" << 5), NULL));
+ ASSERT(!norOp.matchesBSON(BSON("b" << 100), NULL));
+ ASSERT(norOp.matchesBSON(BSON("b" << 101), NULL));
+ ASSERT(norOp.matchesBSON(BSONObj(), NULL));
+ ASSERT(!norOp.matchesBSON(BSON("a" << 11 << "b" << 100), NULL));
+}
+
+TEST(NorOp, ElemMatchKey) {
+ BSONObj baseOperand1 = BSON("a" << 1);
+ BSONObj baseOperand2 = BSON("b" << 2);
+ unique_ptr<ComparisonMatchExpression> sub1(new EqualityMatchExpression());
+ ASSERT(sub1->init("a", baseOperand1["a"]).isOK());
+ unique_ptr<ComparisonMatchExpression> sub2(new EqualityMatchExpression());
+ ASSERT(sub2->init("b", baseOperand2["b"]).isOK());
+
+ NorMatchExpression norOp;
+ norOp.add(sub1.release());
+ norOp.add(sub2.release());
+
+ MatchDetails details;
+ details.requestElemMatchKey();
+ ASSERT(!norOp.matchesBSON(BSON("a" << 1), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(!norOp.matchesBSON(BSON("a" << BSON_ARRAY(1) << "b" << BSON_ARRAY(10)), &details));
+ ASSERT(!details.hasElemMatchKey());
+ ASSERT(norOp.matchesBSON(BSON("a" << BSON_ARRAY(3) << "b" << BSON_ARRAY(4)), &details));
+ // The elem match key feature is not implemented for $nor.
+ ASSERT(!details.hasElemMatchKey());
+}
+
+
+TEST(NorOp, Equivalent) {
+ BSONObj baseOperand1 = BSON("a" << 1);
+ BSONObj baseOperand2 = BSON("b" << 2);
+ EqualityMatchExpression sub1;
+ ASSERT(sub1.init("a", baseOperand1["a"]).isOK());
+ EqualityMatchExpression sub2;
+ ASSERT(sub2.init("b", baseOperand2["b"]).isOK());
+
+ NorMatchExpression e1;
+ e1.add(sub1.shallowClone());
+ e1.add(sub2.shallowClone());
+
+ NorMatchExpression e2;
+ e2.add(sub1.shallowClone());
+
+ ASSERT(e1.equivalent(&e1));
+ ASSERT(!e1.equivalent(&e2));
+}
+
+/**
+TEST( NorOp, MatchesIndexKey ) {
+ BSONObj baseOperand = BSON( "a" << 5 );
+ unique_ptr<ComparisonMatchExpression> eq( new ComparisonMatchExpression() );
+ ASSERT( eq->init( "a", baseOperand[ "a" ] ).isOK() );
+ OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ subMatchExpressions.mutableVector().push_back( eq.release() );
+ NorOp norOp;
+ ASSERT( norOp.init( &subMatchExpressions ).isOK() );
+ IndexSpec indexSpec( BSON( "a" << 1 ) );
+ BSONObj indexKey = BSON( "" << "7" );
+ ASSERT( MatchMatchExpression::PartialMatchResult_Unknown ==
+ norOp.matchesIndexKey( indexKey, indexSpec ) );
+}
+*/
}
diff --git a/src/mongo/db/matcher/expression_where.cpp b/src/mongo/db/matcher/expression_where.cpp
index 804ae7dc687..f3d99b79136 100644
--- a/src/mongo/db/matcher/expression_where.cpp
+++ b/src/mongo/db/matcher/expression_where.cpp
@@ -42,175 +42,162 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::string;
- using std::stringstream;
+using std::unique_ptr;
+using std::endl;
+using std::string;
+using std::stringstream;
- class WhereMatchExpression : public MatchExpression {
- public:
- WhereMatchExpression(OperationContext* txn)
- : MatchExpression(WHERE),
- _txn(txn) {
+class WhereMatchExpression : public MatchExpression {
+public:
+ WhereMatchExpression(OperationContext* txn) : MatchExpression(WHERE), _txn(txn) {
+ invariant(_txn != NULL);
- invariant(_txn != NULL);
+ _func = 0;
+ }
- _func = 0;
- }
+ virtual ~WhereMatchExpression() {}
- virtual ~WhereMatchExpression(){}
+ Status init(StringData dbName, StringData theCode, const BSONObj& scope);
- Status init(StringData dbName, StringData theCode, const BSONObj& scope);
+ virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const;
- virtual bool matches( const MatchableDocument* doc, MatchDetails* details = 0 ) const;
+ virtual bool matchesSingleElement(const BSONElement& e) const {
+ return false;
+ }
- virtual bool matchesSingleElement( const BSONElement& e ) const {
- return false;
+ virtual MatchExpression* shallowClone() const {
+ WhereMatchExpression* e = new WhereMatchExpression(_txn);
+ e->init(_dbName, _code, _userScope);
+ if (getTag()) {
+ e->setTag(getTag()->clone());
}
+ return e;
+ }
- virtual MatchExpression* shallowClone() const {
- WhereMatchExpression* e = new WhereMatchExpression(_txn);
- e->init(_dbName, _code, _userScope);
- if (getTag()) {
- e->setTag(getTag()->clone());
- }
- return e;
- }
+ virtual void debugString(StringBuilder& debug, int level = 0) const;
- virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ virtual void toBSON(BSONObjBuilder* out) const;
- virtual void toBSON(BSONObjBuilder* out) const;
+ virtual bool equivalent(const MatchExpression* other) const;
- virtual bool equivalent( const MatchExpression* other ) const ;
+ virtual void resetTag() {
+ setTag(NULL);
+ }
- virtual void resetTag() { setTag(NULL); }
+private:
+ string _dbName;
+ string _code;
+ BSONObj _userScope;
- private:
+ unique_ptr<Scope> _scope;
+ ScriptingFunction _func;
- string _dbName;
- string _code;
- BSONObj _userScope;
+ // Not owned. See comments insde WhereCallbackReal for the lifetime of this pointer.
+ OperationContext* _txn;
+};
- unique_ptr<Scope> _scope;
- ScriptingFunction _func;
+Status WhereMatchExpression::init(StringData dbName, StringData theCode, const BSONObj& scope) {
+ if (dbName.size() == 0) {
+ return Status(ErrorCodes::BadValue, "ns for $where cannot be empty");
+ }
- // Not owned. See comments insde WhereCallbackReal for the lifetime of this pointer.
- OperationContext* _txn;
- };
+ if (theCode.size() == 0) {
+ return Status(ErrorCodes::BadValue, "code for $where cannot be empty");
+ }
- Status WhereMatchExpression::init( StringData dbName,
- StringData theCode,
- const BSONObj& scope ) {
+ _dbName = dbName.toString();
+ _code = theCode.toString();
+ _userScope = scope.getOwned();
- if (dbName.size() == 0) {
- return Status(ErrorCodes::BadValue, "ns for $where cannot be empty");
- }
+ const string userToken =
+ AuthorizationSession::get(ClientBasic::getCurrent())->getAuthenticatedUserNamesToken();
- if (theCode.size() == 0) {
- return Status(ErrorCodes::BadValue, "code for $where cannot be empty");
- }
-
- _dbName = dbName.toString();
- _code = theCode.toString();
- _userScope = scope.getOwned();
+ try {
+ _scope = globalScriptEngine->getPooledScope(_txn, _dbName, "where" + userToken);
+ _func = _scope->createFunction(_code.c_str());
+ } catch (...) {
+ return exceptionToStatus();
+ }
- const string userToken = AuthorizationSession::get(ClientBasic::getCurrent())
- ->getAuthenticatedUserNamesToken();
+ if (!_func)
+ return Status(ErrorCodes::BadValue, "$where compile error");
- try {
- _scope = globalScriptEngine->getPooledScope(_txn, _dbName, "where" + userToken);
- _func = _scope->createFunction(_code.c_str());
- } catch (...) {
- return exceptionToStatus();
- }
+ return Status::OK();
+}
- if ( !_func )
- return Status( ErrorCodes::BadValue, "$where compile error" );
+bool WhereMatchExpression::matches(const MatchableDocument* doc, MatchDetails* details) const {
+ uassert(28692, "$where compile error", _func);
+ BSONObj obj = doc->toBSON();
- return Status::OK();
+ if (!_userScope.isEmpty()) {
+ _scope->init(&_userScope);
}
- bool WhereMatchExpression::matches( const MatchableDocument* doc, MatchDetails* details ) const {
- uassert(28692, "$where compile error", _func);
- BSONObj obj = doc->toBSON();
-
- if ( ! _userScope.isEmpty() ) {
- _scope->init( &_userScope );
- }
-
- _scope->setObject( "obj", const_cast< BSONObj & >( obj ) );
- _scope->setBoolean( "fullObject" , true ); // this is a hack b/c fullObject used to be relevant
+ _scope->setObject("obj", const_cast<BSONObj&>(obj));
+ _scope->setBoolean("fullObject", true); // this is a hack b/c fullObject used to be relevant
- int err = _scope->invoke( _func, 0, &obj, 1000 * 60, false );
- if ( err == -3 ) { // INVOKE_ERROR
- stringstream ss;
- ss << "error on invocation of $where function:\n"
- << _scope->getError();
- uassert( 16812, ss.str(), false);
- }
- else if ( err != 0 ) { // ! INVOKE_SUCCESS
- uassert( 16813, "unknown error in invocation of $where function", false);
- }
-
- return _scope->getBoolean( "__returnValue" ) != 0;
+ int err = _scope->invoke(_func, 0, &obj, 1000 * 60, false);
+ if (err == -3) { // INVOKE_ERROR
+ stringstream ss;
+ ss << "error on invocation of $where function:\n" << _scope->getError();
+ uassert(16812, ss.str(), false);
+ } else if (err != 0) { // ! INVOKE_SUCCESS
+ uassert(16813, "unknown error in invocation of $where function", false);
}
- void WhereMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << "$where\n";
+ return _scope->getBoolean("__returnValue") != 0;
+}
- _debugAddSpace( debug, level + 1 );
- debug << "dbName: " << _dbName << "\n";
+void WhereMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << "$where\n";
- _debugAddSpace( debug, level + 1 );
- debug << "code: " << _code << "\n";
+ _debugAddSpace(debug, level + 1);
+ debug << "dbName: " << _dbName << "\n";
- _debugAddSpace( debug, level + 1 );
- debug << "scope: " << _userScope << "\n";
- }
+ _debugAddSpace(debug, level + 1);
+ debug << "code: " << _code << "\n";
- void WhereMatchExpression::toBSON(BSONObjBuilder* out) const {
- out->append("$where", _code);
- }
+ _debugAddSpace(debug, level + 1);
+ debug << "scope: " << _userScope << "\n";
+}
- bool WhereMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
- const WhereMatchExpression* realOther = static_cast<const WhereMatchExpression*>(other);
- return
- _dbName == realOther->_dbName &&
- _code == realOther->_code &&
- _userScope == realOther->_userScope;
- }
+void WhereMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append("$where", _code);
+}
- WhereCallbackReal::WhereCallbackReal(OperationContext* txn, StringData dbName)
- : _txn(txn),
- _dbName(dbName) {
+bool WhereMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
+ const WhereMatchExpression* realOther = static_cast<const WhereMatchExpression*>(other);
+ return _dbName == realOther->_dbName && _code == realOther->_code &&
+ _userScope == realOther->_userScope;
+}
- }
+WhereCallbackReal::WhereCallbackReal(OperationContext* txn, StringData dbName)
+ : _txn(txn), _dbName(dbName) {}
- StatusWithMatchExpression WhereCallbackReal::parseWhere(const BSONElement& where) const {
- if (!globalScriptEngine)
- return StatusWithMatchExpression(ErrorCodes::BadValue,
- "no globalScriptEngine in $where parsing");
-
- unique_ptr<WhereMatchExpression> exp(new WhereMatchExpression(_txn));
- if (where.type() == String || where.type() == Code) {
- Status s = exp->init(_dbName, where.valuestr(), BSONObj());
- if (!s.isOK())
- return StatusWithMatchExpression(s);
- return StatusWithMatchExpression(exp.release());
- }
+StatusWithMatchExpression WhereCallbackReal::parseWhere(const BSONElement& where) const {
+ if (!globalScriptEngine)
+ return StatusWithMatchExpression(ErrorCodes::BadValue,
+ "no globalScriptEngine in $where parsing");
- if (where.type() == CodeWScope) {
- Status s = exp->init(_dbName,
- where.codeWScopeCode(),
- BSONObj(where.codeWScopeScopeDataUnsafe()));
- if (!s.isOK())
- return StatusWithMatchExpression(s);
- return StatusWithMatchExpression(exp.release());
- }
+ unique_ptr<WhereMatchExpression> exp(new WhereMatchExpression(_txn));
+ if (where.type() == String || where.type() == Code) {
+ Status s = exp->init(_dbName, where.valuestr(), BSONObj());
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(exp.release());
+ }
- return StatusWithMatchExpression(ErrorCodes::BadValue, "$where got bad type");
+ if (where.type() == CodeWScope) {
+ Status s =
+ exp->init(_dbName, where.codeWScopeCode(), BSONObj(where.codeWScopeScopeDataUnsafe()));
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(exp.release());
}
+
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$where got bad type");
+}
}
diff --git a/src/mongo/db/matcher/expression_where_noop.cpp b/src/mongo/db/matcher/expression_where_noop.cpp
index d7f5a3c74e1..d505542e818 100644
--- a/src/mongo/db/matcher/expression_where_noop.cpp
+++ b/src/mongo/db/matcher/expression_where_noop.cpp
@@ -35,104 +35,102 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
-
- /**
- * Bogus no-op $where match expression to parse $where in mongos,
- * since mongos doesn't have script engine to compile JS functions.
- *
- * Linked into mongos, instead of the real WhereMatchExpression.
- */
- class WhereNoOpMatchExpression : public MatchExpression {
- public:
- WhereNoOpMatchExpression() : MatchExpression( WHERE ){ }
- virtual ~WhereNoOpMatchExpression(){}
-
- Status init( StringData theCode );
-
- virtual bool matches( const MatchableDocument* doc, MatchDetails* details = 0 ) const {
- return false;
- }
-
- virtual bool matchesSingleElement( const BSONElement& e ) const {
- return false;
- }
+using std::unique_ptr;
+using std::string;
- virtual MatchExpression* shallowClone() const {
- WhereNoOpMatchExpression* e = new WhereNoOpMatchExpression();
- e->init(_code);
- if ( getTag() ) {
- e->setTag(getTag()->clone());
- }
- return e;
- }
+/**
+ * Bogus no-op $where match expression to parse $where in mongos,
+ * since mongos doesn't have script engine to compile JS functions.
+ *
+ * Linked into mongos, instead of the real WhereMatchExpression.
+ */
+class WhereNoOpMatchExpression : public MatchExpression {
+public:
+ WhereNoOpMatchExpression() : MatchExpression(WHERE) {}
+ virtual ~WhereNoOpMatchExpression() {}
- virtual void debugString( StringBuilder& debug, int level = 0 ) const;
+ Status init(StringData theCode);
- virtual void toBSON(BSONObjBuilder* out) const;
+ virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const {
+ return false;
+ }
- virtual bool equivalent( const MatchExpression* other ) const ;
+ virtual bool matchesSingleElement(const BSONElement& e) const {
+ return false;
+ }
- virtual void resetTag() { setTag(NULL); }
+ virtual MatchExpression* shallowClone() const {
+ WhereNoOpMatchExpression* e = new WhereNoOpMatchExpression();
+ e->init(_code);
+ if (getTag()) {
+ e->setTag(getTag()->clone());
+ }
+ return e;
+ }
- private:
- string _code;
- };
+ virtual void debugString(StringBuilder& debug, int level = 0) const;
- Status WhereNoOpMatchExpression::init(StringData theCode ) {
- if ( theCode.size() == 0 )
- return Status( ErrorCodes::BadValue, "code for $where cannot be empty" );
+ virtual void toBSON(BSONObjBuilder* out) const;
- _code = theCode.toString();
+ virtual bool equivalent(const MatchExpression* other) const;
- return Status::OK();
+ virtual void resetTag() {
+ setTag(NULL);
}
- void WhereNoOpMatchExpression::debugString( StringBuilder& debug, int level ) const {
- _debugAddSpace( debug, level );
- debug << "$where (only in mongos)\n";
+private:
+ string _code;
+};
- _debugAddSpace( debug, level + 1 );
- debug << "code: " << _code << "\n";
- }
+Status WhereNoOpMatchExpression::init(StringData theCode) {
+ if (theCode.size() == 0)
+ return Status(ErrorCodes::BadValue, "code for $where cannot be empty");
- void WhereNoOpMatchExpression::toBSON(BSONObjBuilder* out) const {
- out->append("$where", _code);
- }
+ _code = theCode.toString();
- bool WhereNoOpMatchExpression::equivalent( const MatchExpression* other ) const {
- if ( matchType() != other->matchType() )
- return false;
- const WhereNoOpMatchExpression* noopOther = static_cast<const WhereNoOpMatchExpression*>(other);
- return _code == noopOther->_code;
- }
+ return Status::OK();
+}
+void WhereNoOpMatchExpression::debugString(StringBuilder& debug, int level) const {
+ _debugAddSpace(debug, level);
+ debug << "$where (only in mongos)\n";
- // -----------------
+ _debugAddSpace(debug, level + 1);
+ debug << "code: " << _code << "\n";
+}
- WhereCallbackNoop::WhereCallbackNoop() {
+void WhereNoOpMatchExpression::toBSON(BSONObjBuilder* out) const {
+ out->append("$where", _code);
+}
- }
+bool WhereNoOpMatchExpression::equivalent(const MatchExpression* other) const {
+ if (matchType() != other->matchType())
+ return false;
+ const WhereNoOpMatchExpression* noopOther = static_cast<const WhereNoOpMatchExpression*>(other);
+ return _code == noopOther->_code;
+}
- StatusWithMatchExpression WhereCallbackNoop::parseWhere(const BSONElement& where) const {
+// -----------------
- unique_ptr<WhereNoOpMatchExpression> exp( new WhereNoOpMatchExpression() );
- if ( where.type() == String || where.type() == Code ) {
- Status s = exp->init( where.valuestr() );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- return StatusWithMatchExpression( exp.release() );
- }
+WhereCallbackNoop::WhereCallbackNoop() {}
- if ( where.type() == CodeWScope ) {
- Status s = exp->init( where.codeWScopeCode() );
- if ( !s.isOK() )
- return StatusWithMatchExpression( s );
- return StatusWithMatchExpression( exp.release() );
- }
+StatusWithMatchExpression WhereCallbackNoop::parseWhere(const BSONElement& where) const {
+ unique_ptr<WhereNoOpMatchExpression> exp(new WhereNoOpMatchExpression());
+ if (where.type() == String || where.type() == Code) {
+ Status s = exp->init(where.valuestr());
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(exp.release());
+ }
- return StatusWithMatchExpression( ErrorCodes::BadValue, "$where got bad type" );
+ if (where.type() == CodeWScope) {
+ Status s = exp->init(where.codeWScopeCode());
+ if (!s.isOK())
+ return StatusWithMatchExpression(s);
+ return StatusWithMatchExpression(exp.release());
}
+
+ return StatusWithMatchExpression(ErrorCodes::BadValue, "$where got bad type");
+}
}
diff --git a/src/mongo/db/matcher/match_details.cpp b/src/mongo/db/matcher/match_details.cpp
index 6adcfcdd910..6675bf08e15 100644
--- a/src/mongo/db/matcher/match_details.cpp
+++ b/src/mongo/db/matcher/match_details.cpp
@@ -36,39 +36,37 @@
namespace mongo {
- using std::string;
+using std::string;
- MatchDetails::MatchDetails() :
- _elemMatchKeyRequested() {
- resetOutput();
- }
-
- void MatchDetails::resetOutput() {
- _loadedRecord = false;
- _elemMatchKey.reset();
- }
+MatchDetails::MatchDetails() : _elemMatchKeyRequested() {
+ resetOutput();
+}
- bool MatchDetails::hasElemMatchKey() const {
- return _elemMatchKey.get();
- }
+void MatchDetails::resetOutput() {
+ _loadedRecord = false;
+ _elemMatchKey.reset();
+}
- std::string MatchDetails::elemMatchKey() const {
- verify( hasElemMatchKey() );
- return *(_elemMatchKey.get());
- }
+bool MatchDetails::hasElemMatchKey() const {
+ return _elemMatchKey.get();
+}
- void MatchDetails::setElemMatchKey( const std::string &elemMatchKey ) {
- if ( _elemMatchKeyRequested ) {
- _elemMatchKey.reset( new std::string( elemMatchKey ) );
- }
- }
+std::string MatchDetails::elemMatchKey() const {
+ verify(hasElemMatchKey());
+ return *(_elemMatchKey.get());
+}
- string MatchDetails::toString() const {
- std::stringstream ss;
- ss << "loadedRecord: " << _loadedRecord << " ";
- ss << "elemMatchKeyRequested: " << _elemMatchKeyRequested << " ";
- ss << "elemMatchKey: " << ( _elemMatchKey ? _elemMatchKey->c_str() : "NONE" ) << " ";
- return ss.str();
+void MatchDetails::setElemMatchKey(const std::string& elemMatchKey) {
+ if (_elemMatchKeyRequested) {
+ _elemMatchKey.reset(new std::string(elemMatchKey));
}
+}
+string MatchDetails::toString() const {
+ std::stringstream ss;
+ ss << "loadedRecord: " << _loadedRecord << " ";
+ ss << "elemMatchKeyRequested: " << _elemMatchKeyRequested << " ";
+ ss << "elemMatchKey: " << (_elemMatchKey ? _elemMatchKey->c_str() : "NONE") << " ";
+ return ss.str();
+}
}
diff --git a/src/mongo/db/matcher/match_details.h b/src/mongo/db/matcher/match_details.h
index 8d5f016747f..15a0a606678 100644
--- a/src/mongo/db/matcher/match_details.h
+++ b/src/mongo/db/matcher/match_details.h
@@ -35,39 +35,47 @@
namespace mongo {
- /** Reports information about a match request. */
- class MatchDetails {
- public:
- MatchDetails();
+/** Reports information about a match request. */
+class MatchDetails {
+public:
+ MatchDetails();
- void resetOutput();
+ void resetOutput();
- // for debugging only
- std::string toString() const;
+ // for debugging only
+ std::string toString() const;
- // relating to whether or not we had to load the full record
+ // relating to whether or not we had to load the full record
- void setLoadedRecord( bool loadedRecord ) { _loadedRecord = loadedRecord; }
+ void setLoadedRecord(bool loadedRecord) {
+ _loadedRecord = loadedRecord;
+ }
- bool hasLoadedRecord() const { return _loadedRecord; }
+ bool hasLoadedRecord() const {
+ return _loadedRecord;
+ }
- // this name is wrong
+ // this name is wrong
- bool needRecord() const { return _elemMatchKeyRequested; }
+ bool needRecord() const {
+ return _elemMatchKeyRequested;
+ }
- // if we need to store the offset into an array where we found the match
+ // if we need to store the offset into an array where we found the match
- /** Request that an elemMatchKey be recorded. */
- void requestElemMatchKey() { _elemMatchKeyRequested = true; }
+ /** Request that an elemMatchKey be recorded. */
+ void requestElemMatchKey() {
+ _elemMatchKeyRequested = true;
+ }
- bool hasElemMatchKey() const;
- std::string elemMatchKey() const;
+ bool hasElemMatchKey() const;
+ std::string elemMatchKey() const;
- void setElemMatchKey( const std::string &elemMatchKey );
+ void setElemMatchKey(const std::string& elemMatchKey);
- private:
- bool _loadedRecord;
- bool _elemMatchKeyRequested;
- std::unique_ptr<std::string> _elemMatchKey;
- };
+private:
+ bool _loadedRecord;
+ bool _elemMatchKeyRequested;
+ std::unique_ptr<std::string> _elemMatchKey;
+};
}
diff --git a/src/mongo/db/matcher/matchable.cpp b/src/mongo/db/matcher/matchable.cpp
index b28f78f5ecf..bb5671ea801 100644
--- a/src/mongo/db/matcher/matchable.cpp
+++ b/src/mongo/db/matcher/matchable.cpp
@@ -34,12 +34,9 @@
namespace mongo {
- BSONMatchableDocument::BSONMatchableDocument( const BSONObj& obj )
- : _obj( obj ) {
- _iteratorUsed = false;
- }
-
- BSONMatchableDocument::~BSONMatchableDocument() {
- }
+BSONMatchableDocument::BSONMatchableDocument(const BSONObj& obj) : _obj(obj) {
+ _iteratorUsed = false;
+}
+BSONMatchableDocument::~BSONMatchableDocument() {}
}
diff --git a/src/mongo/db/matcher/matchable.h b/src/mongo/db/matcher/matchable.h
index a3506005901..66d1a417af0 100644
--- a/src/mongo/db/matcher/matchable.h
+++ b/src/mongo/db/matcher/matchable.h
@@ -36,70 +36,72 @@
namespace mongo {
- class MatchableDocument {
- public:
- // Inlining to allow subclasses to see that this is a no-op and avoid a function call.
- // Speeds up query execution measurably.
- virtual ~MatchableDocument() {}
-
- virtual BSONObj toBSON() const = 0;
-
- /**
- * The neewly returned ElementIterator is allowed to keep a pointer to path.
- * So the caller of this function should make sure path is in scope until
- * the ElementIterator is deallocated
- */
- virtual ElementIterator* allocateIterator( const ElementPath* path ) const = 0;
-
- virtual void releaseIterator( ElementIterator* iterator ) const = 0;
-
- class IteratorHolder {
- public:
- IteratorHolder( const MatchableDocument* doc, const ElementPath* path ) {
- _doc = doc;
- _iterator = _doc->allocateIterator( path );
- }
-
- ~IteratorHolder() {
- _doc->releaseIterator( _iterator );
- }
-
- ElementIterator* operator->() const {
- return _iterator;
- }
- private:
- const MatchableDocument* _doc;
- ElementIterator* _iterator;
- };
- };
+class MatchableDocument {
+public:
+ // Inlining to allow subclasses to see that this is a no-op and avoid a function call.
+ // Speeds up query execution measurably.
+ virtual ~MatchableDocument() {}
- class BSONMatchableDocument : public MatchableDocument {
- public:
- BSONMatchableDocument( const BSONObj& obj );
- virtual ~BSONMatchableDocument();
+ virtual BSONObj toBSON() const = 0;
- virtual BSONObj toBSON() const { return _obj; }
+ /**
+ * The neewly returned ElementIterator is allowed to keep a pointer to path.
+ * So the caller of this function should make sure path is in scope until
+ * the ElementIterator is deallocated
+ */
+ virtual ElementIterator* allocateIterator(const ElementPath* path) const = 0;
- virtual ElementIterator* allocateIterator( const ElementPath* path ) const {
- if ( _iteratorUsed )
- return new BSONElementIterator( path, _obj );
- _iteratorUsed = true;
- _iterator.reset( path, _obj );
- return &_iterator;
+ virtual void releaseIterator(ElementIterator* iterator) const = 0;
+
+ class IteratorHolder {
+ public:
+ IteratorHolder(const MatchableDocument* doc, const ElementPath* path) {
+ _doc = doc;
+ _iterator = _doc->allocateIterator(path);
}
- virtual void releaseIterator( ElementIterator* iterator ) const {
- if ( iterator == &_iterator ) {
- _iteratorUsed = false;
- }
- else {
- delete iterator;
- }
+ ~IteratorHolder() {
+ _doc->releaseIterator(_iterator);
+ }
+
+ ElementIterator* operator->() const {
+ return _iterator;
}
private:
- BSONObj _obj;
- mutable BSONElementIterator _iterator;
- mutable bool _iteratorUsed;
+ const MatchableDocument* _doc;
+ ElementIterator* _iterator;
};
+};
+
+class BSONMatchableDocument : public MatchableDocument {
+public:
+ BSONMatchableDocument(const BSONObj& obj);
+ virtual ~BSONMatchableDocument();
+
+ virtual BSONObj toBSON() const {
+ return _obj;
+ }
+
+ virtual ElementIterator* allocateIterator(const ElementPath* path) const {
+ if (_iteratorUsed)
+ return new BSONElementIterator(path, _obj);
+ _iteratorUsed = true;
+ _iterator.reset(path, _obj);
+ return &_iterator;
+ }
+
+ virtual void releaseIterator(ElementIterator* iterator) const {
+ if (iterator == &_iterator) {
+ _iteratorUsed = false;
+ } else {
+ delete iterator;
+ }
+ }
+
+private:
+ BSONObj _obj;
+ mutable BSONElementIterator _iterator;
+ mutable bool _iteratorUsed;
+};
}
diff --git a/src/mongo/db/matcher/matcher.cpp b/src/mongo/db/matcher/matcher.cpp
index e57bae6fff7..da7e68e2f09 100644
--- a/src/mongo/db/matcher/matcher.cpp
+++ b/src/mongo/db/matcher/matcher.cpp
@@ -41,23 +41,21 @@
namespace mongo {
- Matcher::Matcher(const BSONObj& pattern,
- const MatchExpressionParser::WhereCallback& whereCallback)
- : _pattern(pattern) {
-
- StatusWithMatchExpression result = MatchExpressionParser::parse(pattern, whereCallback);
- uassert( 16810,
- mongoutils::str::stream() << "bad query: " << result.getStatus().toString(),
- result.isOK() );
-
- _expression.reset( result.getValue() );
- }
-
- bool Matcher::matches(const BSONObj& doc, MatchDetails* details ) const {
- if ( !_expression )
- return true;
-
- return _expression->matchesBSON( doc, details );
- }
+Matcher::Matcher(const BSONObj& pattern, const MatchExpressionParser::WhereCallback& whereCallback)
+ : _pattern(pattern) {
+ StatusWithMatchExpression result = MatchExpressionParser::parse(pattern, whereCallback);
+ uassert(16810,
+ mongoutils::str::stream() << "bad query: " << result.getStatus().toString(),
+ result.isOK());
+
+ _expression.reset(result.getValue());
+}
+
+bool Matcher::matches(const BSONObj& doc, MatchDetails* details) const {
+ if (!_expression)
+ return true;
+
+ return _expression->matchesBSON(doc, details);
+}
} // namespace mongo
diff --git a/src/mongo/db/matcher/matcher.h b/src/mongo/db/matcher/matcher.h
index 629f08facd5..e7ea9d93f1a 100644
--- a/src/mongo/db/matcher/matcher.h
+++ b/src/mongo/db/matcher/matcher.h
@@ -41,27 +41,31 @@
namespace mongo {
- /**
- * Matcher is a simple wrapper around a BSONObj and the MatchExpression created from it.
- */
- class Matcher {
- MONGO_DISALLOW_COPYING(Matcher);
+/**
+ * Matcher is a simple wrapper around a BSONObj and the MatchExpression created from it.
+ */
+class Matcher {
+ MONGO_DISALLOW_COPYING(Matcher);
- public:
- explicit Matcher(const BSONObj& pattern,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
+public:
+ explicit Matcher(const BSONObj& pattern,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
- bool matches(const BSONObj& doc, MatchDetails* details = NULL ) const;
+ bool matches(const BSONObj& doc, MatchDetails* details = NULL) const;
- const BSONObj* getQuery() const { return &_pattern; };
+ const BSONObj* getQuery() const {
+ return &_pattern;
+ };
- std::string toString() const { return _pattern.toString(); }
+ std::string toString() const {
+ return _pattern.toString();
+ }
- private:
- BSONObj _pattern;
+private:
+ BSONObj _pattern;
- std::unique_ptr<MatchExpression> _expression;
- };
+ std::unique_ptr<MatchExpression> _expression;
+};
} // namespace mongo
diff --git a/src/mongo/db/matcher/path.cpp b/src/mongo/db/matcher/path.cpp
index 80fdd3e8ec8..3dd9374faa7 100644
--- a/src/mongo/db/matcher/path.cpp
+++ b/src/mongo/db/matcher/path.cpp
@@ -35,295 +35,280 @@
namespace mongo {
- Status ElementPath::init( StringData path ) {
- _shouldTraverseNonleafArrays = true;
- _shouldTraverseLeafArray = true;
- _fieldRef.parse( path );
- return Status::OK();
- }
-
- // -----
+Status ElementPath::init(StringData path) {
+ _shouldTraverseNonleafArrays = true;
+ _shouldTraverseLeafArray = true;
+ _fieldRef.parse(path);
+ return Status::OK();
+}
- ElementIterator::~ElementIterator(){
- }
+// -----
- void ElementIterator::Context::reset() {
- _element = BSONElement();
- }
+ElementIterator::~ElementIterator() {}
- void ElementIterator::Context::reset( BSONElement element,
- BSONElement arrayOffset,
- bool outerArray ) {
- _element = element;
- _arrayOffset = arrayOffset;
- _outerArray = outerArray;
- }
+void ElementIterator::Context::reset() {
+ _element = BSONElement();
+}
+void ElementIterator::Context::reset(BSONElement element,
+ BSONElement arrayOffset,
+ bool outerArray) {
+ _element = element;
+ _arrayOffset = arrayOffset;
+ _outerArray = outerArray;
+}
- // ------
- SimpleArrayElementIterator::SimpleArrayElementIterator( const BSONElement& theArray, bool returnArrayLast )
- : _theArray( theArray ), _returnArrayLast( returnArrayLast ), _iterator( theArray.Obj() ) {
+// ------
- }
+SimpleArrayElementIterator::SimpleArrayElementIterator(const BSONElement& theArray,
+ bool returnArrayLast)
+ : _theArray(theArray), _returnArrayLast(returnArrayLast), _iterator(theArray.Obj()) {}
- bool SimpleArrayElementIterator::more() {
- return _iterator.more() || _returnArrayLast;
- }
+bool SimpleArrayElementIterator::more() {
+ return _iterator.more() || _returnArrayLast;
+}
- ElementIterator::Context SimpleArrayElementIterator::next() {
- if ( _iterator.more() ) {
- Context e;
- e.reset( _iterator.next(), BSONElement(), false );
- return e;
- }
- _returnArrayLast = false;
+ElementIterator::Context SimpleArrayElementIterator::next() {
+ if (_iterator.more()) {
Context e;
- e.reset( _theArray, BSONElement(), true );
+ e.reset(_iterator.next(), BSONElement(), false);
return e;
}
+ _returnArrayLast = false;
+ Context e;
+ e.reset(_theArray, BSONElement(), true);
+ return e;
+}
+// ------
+BSONElementIterator::BSONElementIterator() {
+ _path = NULL;
+}
- // ------
- BSONElementIterator::BSONElementIterator() {
- _path = NULL;
- }
-
- BSONElementIterator::BSONElementIterator( const ElementPath* path, const BSONObj& context )
- : _path( path ), _context( context ) {
- _state = BEGIN;
- //log() << "path: " << path.fieldRef().dottedField() << " context: " << context << endl;
- }
+BSONElementIterator::BSONElementIterator(const ElementPath* path, const BSONObj& context)
+ : _path(path), _context(context) {
+ _state = BEGIN;
+ // log() << "path: " << path.fieldRef().dottedField() << " context: " << context << endl;
+}
- BSONElementIterator::~BSONElementIterator() {
- }
+BSONElementIterator::~BSONElementIterator() {}
- void BSONElementIterator::reset( const ElementPath* path, const BSONObj& context ) {
- _path = path;
- _context = context;
- _state = BEGIN;
- _next.reset();
+void BSONElementIterator::reset(const ElementPath* path, const BSONObj& context) {
+ _path = path;
+ _context = context;
+ _state = BEGIN;
+ _next.reset();
- _subCursor.reset();
- _subCursorPath.reset();
- }
+ _subCursor.reset();
+ _subCursorPath.reset();
+}
- void BSONElementIterator::ArrayIterationState::reset( const FieldRef& ref, int start ) {
- restOfPath = ref.dottedField( start ).toString();
- hasMore = restOfPath.size() > 0;
- if ( hasMore ) {
- nextPieceOfPath = ref.getPart( start );
- nextPieceOfPathIsNumber = isAllDigits( nextPieceOfPath );
- }
- else {
- nextPieceOfPathIsNumber = false;
- }
+void BSONElementIterator::ArrayIterationState::reset(const FieldRef& ref, int start) {
+ restOfPath = ref.dottedField(start).toString();
+ hasMore = restOfPath.size() > 0;
+ if (hasMore) {
+ nextPieceOfPath = ref.getPart(start);
+ nextPieceOfPathIsNumber = isAllDigits(nextPieceOfPath);
+ } else {
+ nextPieceOfPathIsNumber = false;
}
+}
- bool BSONElementIterator::ArrayIterationState::isArrayOffsetMatch( StringData fieldName ) const {
- if ( !nextPieceOfPathIsNumber )
- return false;
- return nextPieceOfPath == fieldName;
- }
+bool BSONElementIterator::ArrayIterationState::isArrayOffsetMatch(StringData fieldName) const {
+ if (!nextPieceOfPathIsNumber)
+ return false;
+ return nextPieceOfPath == fieldName;
+}
- void BSONElementIterator::ArrayIterationState::startIterator( BSONElement e ) {
- _theArray = e;
- _iterator.reset( new BSONObjIterator( _theArray.Obj() ) );
- }
+void BSONElementIterator::ArrayIterationState::startIterator(BSONElement e) {
+ _theArray = e;
+ _iterator.reset(new BSONObjIterator(_theArray.Obj()));
+}
- bool BSONElementIterator::ArrayIterationState::more() {
- return _iterator && _iterator->more();
- }
+bool BSONElementIterator::ArrayIterationState::more() {
+ return _iterator && _iterator->more();
+}
- BSONElement BSONElementIterator::ArrayIterationState::next() {
- _current = _iterator->next();
- return _current;
- }
+BSONElement BSONElementIterator::ArrayIterationState::next() {
+ _current = _iterator->next();
+ return _current;
+}
- bool BSONElementIterator::subCursorHasMore() {
- // While we still are still finding arrays along the path, keep traversing deeper.
- while ( _subCursor ) {
+bool BSONElementIterator::subCursorHasMore() {
+ // While we still are still finding arrays along the path, keep traversing deeper.
+ while (_subCursor) {
+ if (_subCursor->more()) {
+ return true;
+ }
+ _subCursor.reset();
- if ( _subCursor->more() ) {
+ // If the subcursor doesn't have more, see if the current element is an array offset
+ // match (see comment in BSONElementIterator::more() for an example). If it is indeed
+ // an array offset match, create a new subcursor and examine it.
+ if (_arrayIterationState.isArrayOffsetMatch(_arrayIterationState._current.fieldName())) {
+ if (_arrayIterationState.nextEntireRest()) {
+ // Our path terminates at the array offset. _next should point at the current
+ // array element.
+ _next.reset(_arrayIterationState._current, _arrayIterationState._current, true);
+ _arrayIterationState._current = BSONElement();
return true;
}
- _subCursor.reset();
-
- // If the subcursor doesn't have more, see if the current element is an array offset
- // match (see comment in BSONElementIterator::more() for an example). If it is indeed
- // an array offset match, create a new subcursor and examine it.
- if ( _arrayIterationState.isArrayOffsetMatch( _arrayIterationState._current.fieldName() ) ) {
- if ( _arrayIterationState.nextEntireRest() ) {
- // Our path terminates at the array offset. _next should point at the current
- // array element.
- _next.reset( _arrayIterationState._current,
- _arrayIterationState._current,
- true );
- _arrayIterationState._current = BSONElement();
- return true;
- }
- _subCursorPath.reset( new ElementPath() );
- _subCursorPath->init( _arrayIterationState.restOfPath.substr( _arrayIterationState.nextPieceOfPath.size() + 1 ) );
- _subCursorPath->setTraverseLeafArray( _path->shouldTraverseLeafArray() );
+ _subCursorPath.reset(new ElementPath());
+ _subCursorPath->init(_arrayIterationState.restOfPath.substr(
+ _arrayIterationState.nextPieceOfPath.size() + 1));
+ _subCursorPath->setTraverseLeafArray(_path->shouldTraverseLeafArray());
- // If we're here, we must be able to traverse nonleaf arrays.
- dassert(_path->shouldTraverseNonleafArrays());
- dassert(_subCursorPath->shouldTraverseNonleafArrays());
-
- _subCursor.reset( new BSONElementIterator( _subCursorPath.get(),
- _arrayIterationState._current.Obj() ) );
- _arrayIterationState._current = BSONElement();
- }
+ // If we're here, we must be able to traverse nonleaf arrays.
+ dassert(_path->shouldTraverseNonleafArrays());
+ dassert(_subCursorPath->shouldTraverseNonleafArrays());
+ _subCursor.reset(
+ new BSONElementIterator(_subCursorPath.get(), _arrayIterationState._current.Obj()));
+ _arrayIterationState._current = BSONElement();
}
+ }
+ return false;
+}
+
+bool BSONElementIterator::more() {
+ if (subCursorHasMore()) {
+ return true;
+ }
+
+ if (!_next.element().eoo()) {
+ return true;
+ }
+
+ if (_state == DONE) {
return false;
}
- bool BSONElementIterator::more() {
- if ( subCursorHasMore() ) {
- return true;
- }
+ if (_state == BEGIN) {
+ size_t idxPath = 0;
+ BSONElement e = getFieldDottedOrArray(_context, _path->fieldRef(), &idxPath);
- if ( !_next.element().eoo() ) {
+ if (e.type() != Array) {
+ _next.reset(e, BSONElement(), false);
+ _state = DONE;
return true;
}
- if ( _state == DONE ){
+ // It's an array.
+
+ _arrayIterationState.reset(_path->fieldRef(), idxPath + 1);
+
+ if (_arrayIterationState.hasMore && !_path->shouldTraverseNonleafArrays()) {
+ // Don't allow traversing the array.
+ _state = DONE;
return false;
+ } else if (!_arrayIterationState.hasMore && !_path->shouldTraverseLeafArray()) {
+ // Return the leaf array.
+ _next.reset(e, BSONElement(), true);
+ _state = DONE;
+ return true;
}
- if ( _state == BEGIN ) {
- size_t idxPath = 0;
- BSONElement e = getFieldDottedOrArray( _context, _path->fieldRef(), &idxPath );
-
- if ( e.type() != Array ) {
- _next.reset( e, BSONElement(), false );
- _state = DONE;
- return true;
- }
+ _arrayIterationState.startIterator(e);
+ _state = IN_ARRAY;
- // It's an array.
+ invariant(_next.element().eoo());
+ }
- _arrayIterationState.reset( _path->fieldRef(), idxPath + 1 );
+ if (_state == IN_ARRAY) {
+ // We're traversing an array. Look at each array element.
- if (_arrayIterationState.hasMore && !_path->shouldTraverseNonleafArrays()) {
- // Don't allow traversing the array.
- _state = DONE;
- return false;
- }
- else if (!_arrayIterationState.hasMore && !_path->shouldTraverseLeafArray()) {
- // Return the leaf array.
- _next.reset(e, BSONElement(), true);
- _state = DONE;
+ while (_arrayIterationState.more()) {
+ BSONElement eltInArray = _arrayIterationState.next();
+ if (!_arrayIterationState.hasMore) {
+ // Our path terminates at this array. _next should point at the current array
+ // element.
+ _next.reset(eltInArray, eltInArray, false);
return true;
}
- _arrayIterationState.startIterator( e );
- _state = IN_ARRAY;
+ // Our path does not terminate at this array; there's a subpath left over. Inspect
+ // the current array element to see if it could match the subpath.
- invariant( _next.element().eoo() );
- }
-
- if ( _state == IN_ARRAY ) {
- // We're traversing an array. Look at each array element.
-
- while ( _arrayIterationState.more() ) {
+ if (eltInArray.type() == Object) {
+ // The current array element is a subdocument. See if the subdocument generates
+ // any elements matching the remaining subpath.
+ _subCursorPath.reset(new ElementPath());
+ _subCursorPath->init(_arrayIterationState.restOfPath);
+ _subCursorPath->setTraverseLeafArray(_path->shouldTraverseLeafArray());
- BSONElement eltInArray = _arrayIterationState.next();
- if ( !_arrayIterationState.hasMore ) {
- // Our path terminates at this array. _next should point at the current array
- // element.
- _next.reset( eltInArray, eltInArray, false );
+ _subCursor.reset(new BSONElementIterator(_subCursorPath.get(), eltInArray.Obj()));
+ if (subCursorHasMore()) {
return true;
}
-
- // Our path does not terminate at this array; there's a subpath left over. Inspect
- // the current array element to see if it could match the subpath.
-
- if ( eltInArray.type() == Object ) {
- // The current array element is a subdocument. See if the subdocument generates
- // any elements matching the remaining subpath.
- _subCursorPath.reset( new ElementPath() );
- _subCursorPath->init( _arrayIterationState.restOfPath );
- _subCursorPath->setTraverseLeafArray( _path->shouldTraverseLeafArray() );
-
- _subCursor.reset( new BSONElementIterator( _subCursorPath.get(),
- eltInArray.Obj() ) );
- if ( subCursorHasMore() ) {
- return true;
- }
+ } else if (_arrayIterationState.isArrayOffsetMatch(eltInArray.fieldName())) {
+ // The path we're traversing has an array offset component, and the current
+ // array element corresponds to the offset we're looking for (for example: our
+ // path has a ".0" component, and we're looking at the first element of the
+ // array, so we should look inside this element).
+
+ if (_arrayIterationState.nextEntireRest()) {
+ // Our path terminates at the array offset. _next should point at the
+ // current array element.
+ _next.reset(eltInArray, eltInArray, false);
+ return true;
}
- else if ( _arrayIterationState.isArrayOffsetMatch( eltInArray.fieldName() ) ) {
- // The path we're traversing has an array offset component, and the current
- // array element corresponds to the offset we're looking for (for example: our
- // path has a ".0" component, and we're looking at the first element of the
- // array, so we should look inside this element).
-
- if ( _arrayIterationState.nextEntireRest() ) {
- // Our path terminates at the array offset. _next should point at the
- // current array element.
- _next.reset( eltInArray, eltInArray, false );
- return true;
- }
- invariant( eltInArray.type() != Object ); // Handled above.
- if ( eltInArray.type() == Array ) {
- // The current array element is itself an array. See if the nested array
- // has any elements matching the remainihng.
- _subCursorPath.reset( new ElementPath() );
- _subCursorPath->init( _arrayIterationState.restOfPath.substr( _arrayIterationState.nextPieceOfPath.size() + 1 ) );
- _subCursorPath->setTraverseLeafArray( _path->shouldTraverseLeafArray() );
- BSONElementIterator* real =
- new BSONElementIterator( _subCursorPath.get(),
- _arrayIterationState._current.Obj() );
- _subCursor.reset( real );
- real->_arrayIterationState.reset( _subCursorPath->fieldRef(), 0 );
- real->_arrayIterationState.startIterator( eltInArray );
- real->_state = IN_ARRAY;
- _arrayIterationState._current = BSONElement();
- if ( subCursorHasMore() ) {
- return true;
- }
+ invariant(eltInArray.type() != Object); // Handled above.
+ if (eltInArray.type() == Array) {
+ // The current array element is itself an array. See if the nested array
+ // has any elements matching the remainihng.
+ _subCursorPath.reset(new ElementPath());
+ _subCursorPath->init(_arrayIterationState.restOfPath.substr(
+ _arrayIterationState.nextPieceOfPath.size() + 1));
+ _subCursorPath->setTraverseLeafArray(_path->shouldTraverseLeafArray());
+ BSONElementIterator* real = new BSONElementIterator(
+ _subCursorPath.get(), _arrayIterationState._current.Obj());
+ _subCursor.reset(real);
+ real->_arrayIterationState.reset(_subCursorPath->fieldRef(), 0);
+ real->_arrayIterationState.startIterator(eltInArray);
+ real->_state = IN_ARRAY;
+ _arrayIterationState._current = BSONElement();
+ if (subCursorHasMore()) {
+ return true;
}
}
-
- }
-
- if ( _arrayIterationState.hasMore ) {
- return false;
}
+ }
- _next.reset( _arrayIterationState._theArray, BSONElement(), true );
- _state = DONE;
- return true;
+ if (_arrayIterationState.hasMore) {
+ return false;
}
- return false;
+ _next.reset(_arrayIterationState._theArray, BSONElement(), true);
+ _state = DONE;
+ return true;
}
- ElementIterator::Context BSONElementIterator::next() {
- if ( _subCursor ) {
- Context e = _subCursor->next();
- // Use our array offset if we have one, otherwise copy our subcursor's. This has the
- // effect of preferring the outermost array offset, in the case where we are implicitly
- // traversing nested arrays and have multiple candidate array offsets. For example,
- // when we use the path "a.b" to generate elements from the document {a: [{b: [1, 2]}]},
- // the element with a value of 2 should be returned with an array offset of 0.
- if ( !_arrayIterationState._current.eoo() ) {
- e.setArrayOffset( _arrayIterationState._current );
- }
- return e;
+ return false;
+}
+
+ElementIterator::Context BSONElementIterator::next() {
+ if (_subCursor) {
+ Context e = _subCursor->next();
+ // Use our array offset if we have one, otherwise copy our subcursor's. This has the
+ // effect of preferring the outermost array offset, in the case where we are implicitly
+ // traversing nested arrays and have multiple candidate array offsets. For example,
+ // when we use the path "a.b" to generate elements from the document {a: [{b: [1, 2]}]},
+ // the element with a value of 2 should be returned with an array offset of 0.
+ if (!_arrayIterationState._current.eoo()) {
+ e.setArrayOffset(_arrayIterationState._current);
}
- Context x = _next;
- _next.reset();
- return x;
+ return e;
}
-
-
+ Context x = _next;
+ _next.reset();
+ return x;
+}
}
diff --git a/src/mongo/db/matcher/path.h b/src/mongo/db/matcher/path.h
index 11fec9ff673..471a1af47e6 100644
--- a/src/mongo/db/matcher/path.h
+++ b/src/mongo/db/matcher/path.h
@@ -38,132 +38,152 @@
namespace mongo {
- class ElementPath {
+class ElementPath {
+public:
+ Status init(StringData path);
+
+ void setTraverseNonleafArrays(bool b) {
+ _shouldTraverseNonleafArrays = b;
+ }
+ void setTraverseLeafArray(bool b) {
+ _shouldTraverseLeafArray = b;
+ }
+
+ const FieldRef& fieldRef() const {
+ return _fieldRef;
+ }
+ bool shouldTraverseNonleafArrays() const {
+ return _shouldTraverseNonleafArrays;
+ }
+ bool shouldTraverseLeafArray() const {
+ return _shouldTraverseLeafArray;
+ }
+
+private:
+ FieldRef _fieldRef;
+ bool _shouldTraverseNonleafArrays;
+ bool _shouldTraverseLeafArray;
+};
+
+class ElementIterator {
+public:
+ class Context {
public:
- Status init( StringData path );
+ void reset();
- void setTraverseNonleafArrays( bool b ) { _shouldTraverseNonleafArrays = b; }
- void setTraverseLeafArray( bool b ) { _shouldTraverseLeafArray = b; }
+ void reset(BSONElement element, BSONElement arrayOffset, bool outerArray);
- const FieldRef& fieldRef() const { return _fieldRef; }
- bool shouldTraverseNonleafArrays() const { return _shouldTraverseNonleafArrays; }
- bool shouldTraverseLeafArray() const { return _shouldTraverseLeafArray; }
+ void setArrayOffset(BSONElement e) {
+ _arrayOffset = e;
+ }
+
+ BSONElement element() const {
+ return _element;
+ }
+ BSONElement arrayOffset() const {
+ return _arrayOffset;
+ }
+ bool outerArray() const {
+ return _outerArray;
+ }
private:
- FieldRef _fieldRef;
- bool _shouldTraverseNonleafArrays;
- bool _shouldTraverseLeafArray;
+ BSONElement _element;
+ BSONElement _arrayOffset;
+ bool _outerArray;
};
- class ElementIterator {
- public:
- class Context {
- public:
+ virtual ~ElementIterator();
- void reset();
+ virtual bool more() = 0;
+ virtual Context next() = 0;
+};
- void reset( BSONElement element, BSONElement arrayOffset, bool outerArray );
+// ---------------------------------------------------------------
- void setArrayOffset( BSONElement e ) { _arrayOffset = e; }
+class SingleElementElementIterator : public ElementIterator {
+public:
+ explicit SingleElementElementIterator(BSONElement e) : _seen(false) {
+ _element.reset(e, BSONElement(), false);
+ }
+ virtual ~SingleElementElementIterator() {}
- BSONElement element() const { return _element; }
- BSONElement arrayOffset() const { return _arrayOffset; }
- bool outerArray() const { return _outerArray; }
+ virtual bool more() {
+ return !_seen;
+ }
+ virtual Context next() {
+ _seen = true;
+ return _element;
+ }
- private:
- BSONElement _element;
- BSONElement _arrayOffset;
- bool _outerArray;
- };
+private:
+ bool _seen;
+ ElementIterator::Context _element;
+};
- virtual ~ElementIterator();
+class SimpleArrayElementIterator : public ElementIterator {
+public:
+ SimpleArrayElementIterator(const BSONElement& theArray, bool returnArrayLast);
- virtual bool more() = 0;
- virtual Context next() = 0;
+ virtual bool more();
+ virtual Context next();
- };
+private:
+ BSONElement _theArray;
+ bool _returnArrayLast;
+ BSONObjIterator _iterator;
+};
- // ---------------------------------------------------------------
+class BSONElementIterator : public ElementIterator {
+public:
+ BSONElementIterator();
+ BSONElementIterator(const ElementPath* path, const BSONObj& context);
- class SingleElementElementIterator : public ElementIterator {
- public:
- explicit SingleElementElementIterator( BSONElement e )
- : _seen( false ) {
- _element.reset( e, BSONElement(), false );
- }
- virtual ~SingleElementElementIterator(){}
+ virtual ~BSONElementIterator();
- virtual bool more() { return !_seen; }
- virtual Context next() { _seen = true; return _element; }
+ void reset(const ElementPath* path, const BSONObj& context);
- private:
- bool _seen;
- ElementIterator::Context _element;
- };
+ bool more();
+ Context next();
- class SimpleArrayElementIterator : public ElementIterator {
- public:
- SimpleArrayElementIterator( const BSONElement& theArray, bool returnArrayLast );
-
- virtual bool more();
- virtual Context next();
-
- private:
- BSONElement _theArray;
- bool _returnArrayLast;
- BSONObjIterator _iterator;
- };
+private:
+ /**
+ * Helper for more(). Recurs on _subCursor (which traverses the remainder of a path through
+ * subdocuments of an array).
+ */
+ bool subCursorHasMore();
- class BSONElementIterator : public ElementIterator {
- public:
- BSONElementIterator();
- BSONElementIterator( const ElementPath* path, const BSONObj& context );
+ const ElementPath* _path;
+ BSONObj _context;
- virtual ~BSONElementIterator();
+ enum State { BEGIN, IN_ARRAY, DONE } _state;
+ Context _next;
- void reset( const ElementPath* path, const BSONObj& context );
+ struct ArrayIterationState {
+ void reset(const FieldRef& ref, int start);
+ void startIterator(BSONElement theArray);
bool more();
- Context next();
-
- private:
- /**
- * Helper for more(). Recurs on _subCursor (which traverses the remainder of a path through
- * subdocuments of an array).
- */
- bool subCursorHasMore();
-
- const ElementPath* _path;
- BSONObj _context;
-
- enum State { BEGIN, IN_ARRAY, DONE } _state;
- Context _next;
-
- struct ArrayIterationState {
+ BSONElement next();
- void reset( const FieldRef& ref, int start );
- void startIterator( BSONElement theArray );
-
- bool more();
- BSONElement next();
-
- bool isArrayOffsetMatch( StringData fieldName ) const;
- bool nextEntireRest() const { return nextPieceOfPath.size() == restOfPath.size(); }
-
- std::string restOfPath;
- bool hasMore;
- StringData nextPieceOfPath;
- bool nextPieceOfPathIsNumber;
-
- BSONElement _theArray;
- BSONElement _current;
- std::unique_ptr<BSONObjIterator> _iterator;
- };
+ bool isArrayOffsetMatch(StringData fieldName) const;
+ bool nextEntireRest() const {
+ return nextPieceOfPath.size() == restOfPath.size();
+ }
- ArrayIterationState _arrayIterationState;
+ std::string restOfPath;
+ bool hasMore;
+ StringData nextPieceOfPath;
+ bool nextPieceOfPathIsNumber;
- std::unique_ptr<ElementIterator> _subCursor;
- std::unique_ptr<ElementPath> _subCursorPath;
+ BSONElement _theArray;
+ BSONElement _current;
+ std::unique_ptr<BSONObjIterator> _iterator;
};
+ ArrayIterationState _arrayIterationState;
+
+ std::unique_ptr<ElementIterator> _subCursor;
+ std::unique_ptr<ElementPath> _subCursorPath;
+};
}
diff --git a/src/mongo/db/matcher/path_internal.cpp b/src/mongo/db/matcher/path_internal.cpp
index d8ccfe0ea9b..31b850e6834 100644
--- a/src/mongo/db/matcher/path_internal.cpp
+++ b/src/mongo/db/matcher/path_internal.cpp
@@ -32,31 +32,27 @@
namespace mongo {
- bool isAllDigits( StringData str ) {
- for ( unsigned i = 0; i < str.size(); i++ ) {
- if ( !isdigit( str[i] ) )
- return false;
- }
- return true;
+bool isAllDigits(StringData str) {
+ for (unsigned i = 0; i < str.size(); i++) {
+ if (!isdigit(str[i]))
+ return false;
}
+ return true;
+}
- BSONElement getFieldDottedOrArray( const BSONObj& doc,
- const FieldRef& path,
- size_t* idxPath ) {
- if ( path.numParts() == 0 )
- return doc.getField( "" );
-
- BSONElement res;
-
- BSONObj curr = doc;
- bool stop = false;
- size_t partNum = 0;
- while ( partNum < path.numParts() && !stop ) {
+BSONElement getFieldDottedOrArray(const BSONObj& doc, const FieldRef& path, size_t* idxPath) {
+ if (path.numParts() == 0)
+ return doc.getField("");
- res = curr.getField( path.getPart( partNum ) );
+ BSONElement res;
- switch ( res.type() ) {
+ BSONObj curr = doc;
+ bool stop = false;
+ size_t partNum = 0;
+ while (partNum < path.numParts() && !stop) {
+ res = curr.getField(path.getPart(partNum));
+ switch (res.type()) {
case EOO:
stop = true;
break;
@@ -71,17 +67,16 @@ namespace mongo {
break;
default:
- if ( partNum+1 < path.numParts() ) {
+ if (partNum + 1 < path.numParts()) {
res = BSONElement();
}
stop = true;
-
- }
}
-
- *idxPath = partNum;
- return res;
}
+ *idxPath = partNum;
+ return res;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/matcher/path_internal.h b/src/mongo/db/matcher/path_internal.h
index 82d94bc6f32..9d44d1877f1 100644
--- a/src/mongo/db/matcher/path_internal.h
+++ b/src/mongo/db/matcher/path_internal.h
@@ -37,12 +37,10 @@
namespace mongo {
- bool isAllDigits( StringData str );
+bool isAllDigits(StringData str);
- // XXX document me
- // Replaces getFieldDottedOrArray without recursion nor std::string manipulation
- BSONElement getFieldDottedOrArray( const BSONObj& doc,
- const FieldRef& path,
- size_t* idxPath );
+// XXX document me
+// Replaces getFieldDottedOrArray without recursion nor std::string manipulation
+BSONElement getFieldDottedOrArray(const BSONObj& doc, const FieldRef& path, size_t* idxPath);
} // namespace mongo
diff --git a/src/mongo/db/matcher/path_test.cpp b/src/mongo/db/matcher/path_test.cpp
index 12091c4749c..73bbba4961f 100644
--- a/src/mongo/db/matcher/path_test.cpp
+++ b/src/mongo/db/matcher/path_test.cpp
@@ -37,451 +37,445 @@
namespace mongo {
- using std::string;
+using std::string;
- TEST( Path, Root1 ) {
- ElementPath p;
- ASSERT( p.init( "a" ).isOK() );
+TEST(Path, Root1) {
+ ElementPath p;
+ ASSERT(p.init("a").isOK());
- BSONObj doc = BSON( "x" << 4 << "a" << 5 );
+ BSONObj doc = BSON("x" << 4 << "a" << 5);
- BSONElementIterator cursor( &p, doc );
- ASSERT( cursor.more() );
- ElementIterator::Context e = cursor.next();
- ASSERT_EQUALS( (string)"a", e.element().fieldName() );
- ASSERT_EQUALS( 5, e.element().numberInt() );
- ASSERT( !cursor.more() );
- }
+ BSONElementIterator cursor(&p, doc);
+ ASSERT(cursor.more());
+ ElementIterator::Context e = cursor.next();
+ ASSERT_EQUALS((string) "a", e.element().fieldName());
+ ASSERT_EQUALS(5, e.element().numberInt());
+ ASSERT(!cursor.more());
+}
- TEST( Path, RootArray1 ) {
- ElementPath p;
- ASSERT( p.init( "a" ).isOK() );
+TEST(Path, RootArray1) {
+ ElementPath p;
+ ASSERT(p.init("a").isOK());
- BSONObj doc = BSON( "x" << 4 << "a" << BSON_ARRAY( 5 << 6 ) );
+ BSONObj doc = BSON("x" << 4 << "a" << BSON_ARRAY(5 << 6));
- BSONElementIterator cursor( &p, doc );
+ BSONElementIterator cursor(&p, doc);
- ASSERT( cursor.more() );
- BSONElementIterator::Context e = cursor.next();
- ASSERT_EQUALS( 5, e.element().numberInt() );
+ ASSERT(cursor.more());
+ BSONElementIterator::Context e = cursor.next();
+ ASSERT_EQUALS(5, e.element().numberInt());
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( 6, e.element().numberInt() );
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(6, e.element().numberInt());
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( Array, e.element().type() );
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(Array, e.element().type());
- ASSERT( !cursor.more() );
- }
+ ASSERT(!cursor.more());
+}
- TEST( Path, RootArray2 ) {
- ElementPath p;
- ASSERT( p.init( "a" ).isOK() );
- p.setTraverseLeafArray( false );
+TEST(Path, RootArray2) {
+ ElementPath p;
+ ASSERT(p.init("a").isOK());
+ p.setTraverseLeafArray(false);
- BSONObj doc = BSON( "x" << 4 << "a" << BSON_ARRAY( 5 << 6 ) );
+ BSONObj doc = BSON("x" << 4 << "a" << BSON_ARRAY(5 << 6));
- BSONElementIterator cursor( &p, doc );
+ BSONElementIterator cursor(&p, doc);
- ASSERT( cursor.more() );
- BSONElementIterator::Context e = cursor.next();
- ASSERT( e.element().type() == Array );
+ ASSERT(cursor.more());
+ BSONElementIterator::Context e = cursor.next();
+ ASSERT(e.element().type() == Array);
- ASSERT( !cursor.more() );
- }
+ ASSERT(!cursor.more());
+}
- TEST( Path, Nested1 ) {
- ElementPath p;
- ASSERT( p.init( "a.b" ).isOK() );
-
- BSONObj doc = BSON( "a" << BSON_ARRAY( BSON( "b" << 5 ) <<
- 3 <<
- BSONObj() <<
- BSON( "b" << BSON_ARRAY( 9 << 11 ) ) <<
- BSON( "b" << 7 ) ) );
-
- BSONElementIterator cursor( &p, doc );
-
- ASSERT( cursor.more() );
- BSONElementIterator::Context e = cursor.next();
- ASSERT_EQUALS( 5, e.element().numberInt() );
- ASSERT( !e.outerArray() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT( e.element().eoo() );
- ASSERT_EQUALS( (string)"2", e.arrayOffset().fieldName() );
- ASSERT( !e.outerArray() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( 9, e.element().numberInt() );
- ASSERT( !e.outerArray() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( 11, e.element().numberInt() );
- ASSERT( !e.outerArray() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( Array, e.element().type() );
- ASSERT_EQUALS( 2, e.element().Obj().nFields() );
- ASSERT( e.outerArray() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( 7, e.element().numberInt() );
- ASSERT( !e.outerArray() );
-
- ASSERT( !cursor.more() );
- }
+TEST(Path, Nested1) {
+ ElementPath p;
+ ASSERT(p.init("a.b").isOK());
+
+ BSONObj doc =
+ BSON("a" << BSON_ARRAY(BSON("b" << 5) << 3 << BSONObj() << BSON("b" << BSON_ARRAY(9 << 11))
+ << BSON("b" << 7)));
+
+ BSONElementIterator cursor(&p, doc);
+
+ ASSERT(cursor.more());
+ BSONElementIterator::Context e = cursor.next();
+ ASSERT_EQUALS(5, e.element().numberInt());
+ ASSERT(!e.outerArray());
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT(e.element().eoo());
+ ASSERT_EQUALS((string) "2", e.arrayOffset().fieldName());
+ ASSERT(!e.outerArray());
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(9, e.element().numberInt());
+ ASSERT(!e.outerArray());
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(11, e.element().numberInt());
+ ASSERT(!e.outerArray());
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(Array, e.element().type());
+ ASSERT_EQUALS(2, e.element().Obj().nFields());
+ ASSERT(e.outerArray());
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(7, e.element().numberInt());
+ ASSERT(!e.outerArray());
+
+ ASSERT(!cursor.more());
+}
- TEST( Path, NestedPartialMatchScalar ) {
- ElementPath p;
- ASSERT( p.init( "a.b" ).isOK() );
+TEST(Path, NestedPartialMatchScalar) {
+ ElementPath p;
+ ASSERT(p.init("a.b").isOK());
- BSONObj doc = BSON( "a" << 4 );
+ BSONObj doc = BSON("a" << 4);
- BSONElementIterator cursor( &p, doc );
+ BSONElementIterator cursor(&p, doc);
- ASSERT( cursor.more() );
- BSONElementIterator::Context e = cursor.next();
- ASSERT( e.element().eoo() );
- ASSERT( e.arrayOffset().eoo() );
- ASSERT( !e.outerArray() );
+ ASSERT(cursor.more());
+ BSONElementIterator::Context e = cursor.next();
+ ASSERT(e.element().eoo());
+ ASSERT(e.arrayOffset().eoo());
+ ASSERT(!e.outerArray());
- ASSERT( !cursor.more() );
- }
+ ASSERT(!cursor.more());
+}
- // When the path (partially or in its entirety) refers to an array,
- // the iteration logic does not return an EOO.
- // what we want ideally.
- TEST( Path, NestedPartialMatchArray ) {
- ElementPath p;
- ASSERT( p.init( "a.b" ).isOK() );
+// When the path (partially or in its entirety) refers to an array,
+// the iteration logic does not return an EOO.
+// what we want ideally.
+TEST(Path, NestedPartialMatchArray) {
+ ElementPath p;
+ ASSERT(p.init("a.b").isOK());
- BSONObj doc = BSON( "a" << BSON_ARRAY( 4 ) );
+ BSONObj doc = BSON("a" << BSON_ARRAY(4));
- BSONElementIterator cursor( &p, doc );
+ BSONElementIterator cursor(&p, doc);
- ASSERT( !cursor.more() );
- }
+ ASSERT(!cursor.more());
+}
- // Note that this describes existing behavior and not necessarily
- TEST( Path, NestedEmptyArray ) {
- ElementPath p;
- ASSERT( p.init( "a.b" ).isOK() );
+// Note that this describes existing behavior and not necessarily
+TEST(Path, NestedEmptyArray) {
+ ElementPath p;
+ ASSERT(p.init("a.b").isOK());
- BSONObj doc = BSON( "a" << BSON( "b" << BSONArray() ) );
+ BSONObj doc = BSON("a" << BSON("b" << BSONArray()));
- BSONElementIterator cursor( &p, doc );
+ BSONElementIterator cursor(&p, doc);
- ASSERT( cursor.more() );
- BSONElementIterator::Context e = cursor.next();
- ASSERT_EQUALS( Array, e.element().type() );
- ASSERT_EQUALS( 0, e.element().Obj().nFields() );
- ASSERT( e.outerArray() );
+ ASSERT(cursor.more());
+ BSONElementIterator::Context e = cursor.next();
+ ASSERT_EQUALS(Array, e.element().type());
+ ASSERT_EQUALS(0, e.element().Obj().nFields());
+ ASSERT(e.outerArray());
- ASSERT( !cursor.more() );
- }
+ ASSERT(!cursor.more());
+}
- TEST( Path, NestedNoLeaf1 ) {
- ElementPath p;
- ASSERT( p.init( "a.b" ).isOK() );
- p.setTraverseLeafArray( false );
-
- BSONObj doc = BSON( "a" << BSON_ARRAY( BSON( "b" << 5 ) <<
- 3 <<
- BSONObj() <<
- BSON( "b" << BSON_ARRAY( 9 << 11 ) ) <<
- BSON( "b" << 7 ) ) );
-
- BSONElementIterator cursor( &p, doc );
-
- ASSERT( cursor.more() );
- BSONElementIterator::Context e = cursor.next();
- ASSERT_EQUALS( 5, e.element().numberInt() );
- ASSERT( !e.outerArray() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT( e.element().eoo() );
- ASSERT_EQUALS( (string)"2", e.arrayOffset().fieldName() );
- ASSERT( !e.outerArray() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( Array, e.element().type() );
- ASSERT_EQUALS( 2, e.element().Obj().nFields() );
- ASSERT( e.outerArray() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( 7, e.element().numberInt() );
- ASSERT( !e.outerArray() );
-
- ASSERT( !cursor.more() );
- }
+TEST(Path, NestedNoLeaf1) {
+ ElementPath p;
+ ASSERT(p.init("a.b").isOK());
+ p.setTraverseLeafArray(false);
+ BSONObj doc =
+ BSON("a" << BSON_ARRAY(BSON("b" << 5) << 3 << BSONObj() << BSON("b" << BSON_ARRAY(9 << 11))
+ << BSON("b" << 7)));
- TEST( Path, ArrayIndex1 ) {
- ElementPath p;
- ASSERT( p.init( "a.1" ).isOK() );
+ BSONElementIterator cursor(&p, doc);
- BSONObj doc = BSON( "a" << BSON_ARRAY( 5 << 7 << 3 ) );
+ ASSERT(cursor.more());
+ BSONElementIterator::Context e = cursor.next();
+ ASSERT_EQUALS(5, e.element().numberInt());
+ ASSERT(!e.outerArray());
- BSONElementIterator cursor( &p, doc );
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT(e.element().eoo());
+ ASSERT_EQUALS((string) "2", e.arrayOffset().fieldName());
+ ASSERT(!e.outerArray());
- ASSERT( cursor.more() );
- BSONElementIterator::Context e = cursor.next();
- ASSERT_EQUALS( 7, e.element().numberInt() );
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(Array, e.element().type());
+ ASSERT_EQUALS(2, e.element().Obj().nFields());
+ ASSERT(e.outerArray());
- ASSERT( !cursor.more() );
- }
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(7, e.element().numberInt());
+ ASSERT(!e.outerArray());
- TEST( Path, ArrayIndex2 ) {
- ElementPath p;
- ASSERT( p.init( "a.1" ).isOK() );
+ ASSERT(!cursor.more());
+}
- BSONObj doc = BSON( "a" << BSON_ARRAY( 5 << BSON_ARRAY( 2 << 4 ) << 3 ) );
- BSONElementIterator cursor( &p, doc );
+TEST(Path, ArrayIndex1) {
+ ElementPath p;
+ ASSERT(p.init("a.1").isOK());
- ASSERT( cursor.more() );
- BSONElementIterator::Context e = cursor.next();
- ASSERT_EQUALS( Array, e.element().type() );
+ BSONObj doc = BSON("a" << BSON_ARRAY(5 << 7 << 3));
- ASSERT( !cursor.more() );
- }
+ BSONElementIterator cursor(&p, doc);
- TEST( Path, ArrayIndex3 ) {
- ElementPath p;
- ASSERT( p.init( "a.1" ).isOK() );
+ ASSERT(cursor.more());
+ BSONElementIterator::Context e = cursor.next();
+ ASSERT_EQUALS(7, e.element().numberInt());
- BSONObj doc = BSON( "a" << BSON_ARRAY( 5 << BSON( "1" << 4 ) << 3 ) );
+ ASSERT(!cursor.more());
+}
- BSONElementIterator cursor( &p, doc );
+TEST(Path, ArrayIndex2) {
+ ElementPath p;
+ ASSERT(p.init("a.1").isOK());
- ASSERT( cursor.more() );
- BSONElementIterator::Context e = cursor.next();
- ASSERT_EQUALS( 4, e.element().numberInt() );
- ASSERT( !e.outerArray() );
+ BSONObj doc = BSON("a" << BSON_ARRAY(5 << BSON_ARRAY(2 << 4) << 3));
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( BSON( "1" << 4 ), e.element().Obj() );
- ASSERT( e.outerArray() );
+ BSONElementIterator cursor(&p, doc);
- ASSERT( !cursor.more() );
- }
+ ASSERT(cursor.more());
+ BSONElementIterator::Context e = cursor.next();
+ ASSERT_EQUALS(Array, e.element().type());
- TEST( Path, ArrayIndexNested1 ) {
- ElementPath p;
- ASSERT( p.init( "a.1.b" ).isOK() );
+ ASSERT(!cursor.more());
+}
- BSONObj doc = BSON( "a" << BSON_ARRAY( 5 << BSON( "b" << 4 ) << 3 ) );
+TEST(Path, ArrayIndex3) {
+ ElementPath p;
+ ASSERT(p.init("a.1").isOK());
- BSONElementIterator cursor( &p, doc );
+ BSONObj doc = BSON("a" << BSON_ARRAY(5 << BSON("1" << 4) << 3));
- ASSERT( cursor.more() );
- BSONElementIterator::Context e = cursor.next();
- ASSERT( e.element().eoo() );
+ BSONElementIterator cursor(&p, doc);
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( 4, e.element().numberInt() );
+ ASSERT(cursor.more());
+ BSONElementIterator::Context e = cursor.next();
+ ASSERT_EQUALS(4, e.element().numberInt());
+ ASSERT(!e.outerArray());
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(BSON("1" << 4), e.element().Obj());
+ ASSERT(e.outerArray());
- ASSERT( !cursor.more() );
- }
+ ASSERT(!cursor.more());
+}
- TEST( Path, ArrayIndexNested2 ) {
- ElementPath p;
- ASSERT( p.init( "a.1.b" ).isOK() );
+TEST(Path, ArrayIndexNested1) {
+ ElementPath p;
+ ASSERT(p.init("a.1.b").isOK());
- BSONObj doc = BSON( "a" << BSON_ARRAY( 5 << BSON_ARRAY( BSON( "b" << 4 ) ) << 3 ) );
+ BSONObj doc = BSON("a" << BSON_ARRAY(5 << BSON("b" << 4) << 3));
- BSONElementIterator cursor( &p, doc );
+ BSONElementIterator cursor(&p, doc);
- ASSERT( cursor.more() );
- BSONElementIterator::Context e = cursor.next();
- ASSERT_EQUALS( 4, e.element().numberInt() );
+ ASSERT(cursor.more());
+ BSONElementIterator::Context e = cursor.next();
+ ASSERT(e.element().eoo());
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(4, e.element().numberInt());
- ASSERT( !cursor.more() );
- }
- // SERVER-15899: test iteration using a path that generates no elements, but traverses a long
- // array containing subdocuments with nested arrays.
- TEST( Path, NonMatchingLongArrayOfSubdocumentsWithNestedArrays ) {
- ElementPath p;
- ASSERT( p.init( "a.b.x" ).isOK() );
+ ASSERT(!cursor.more());
+}
- // Build the document {a: [{b: []}, {b: []}, {b: []}, ...]}.
- BSONObj subdoc = BSON( "b" << BSONArray() );
- BSONArrayBuilder builder;
- for ( int i = 0; i < 100 * 1000; ++i ) {
- builder.append( subdoc );
- }
- BSONObj doc = BSON( "a" << builder.arr() );
+TEST(Path, ArrayIndexNested2) {
+ ElementPath p;
+ ASSERT(p.init("a.1.b").isOK());
- BSONElementIterator cursor( &p, doc );
+ BSONObj doc = BSON("a" << BSON_ARRAY(5 << BSON_ARRAY(BSON("b" << 4)) << 3));
- // The path "a.b.x" matches no elements.
- ASSERT( !cursor.more() );
- }
+ BSONElementIterator cursor(&p, doc);
+
+ ASSERT(cursor.more());
+ BSONElementIterator::Context e = cursor.next();
+ ASSERT_EQUALS(4, e.element().numberInt());
- // When multiple arrays are traversed implicitly in the same path,
- // ElementIterator::Context::arrayOffset() should always refer to the current offset of the
- // outermost array that is implicitly traversed.
- TEST( Path, NestedArrayImplicitTraversal ) {
- ElementPath p;
- ASSERT( p.init( "a.b" ).isOK() );
- BSONObj doc = fromjson("{a: [{b: [2, 3]}, {b: [4, 5]}]}");
- BSONElementIterator cursor( &p, doc );
-
- ASSERT( cursor.more() );
- ElementIterator::Context e = cursor.next();
- ASSERT_EQUALS( NumberInt, e.element().type() );
- ASSERT_EQUALS( 2, e.element().numberInt() );
- ASSERT_EQUALS( "0", e.arrayOffset().fieldNameStringData() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( NumberInt, e.element().type() );
- ASSERT_EQUALS( 3, e.element().numberInt() );
- ASSERT_EQUALS( "0", e.arrayOffset().fieldNameStringData() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( Array, e.element().type() );
- ASSERT_EQUALS( BSON( "0" << 2 << "1" << 3 ), e.element().Obj() );
- ASSERT_EQUALS( "0", e.arrayOffset().fieldNameStringData() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( NumberInt, e.element().type() );
- ASSERT_EQUALS( 4, e.element().numberInt() );
- ASSERT_EQUALS( "1", e.arrayOffset().fieldNameStringData() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( NumberInt, e.element().type() );
- ASSERT_EQUALS( 5, e.element().numberInt() );
- ASSERT_EQUALS( "1", e.arrayOffset().fieldNameStringData() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( Array, e.element().type() );
- ASSERT_EQUALS( BSON( "0" << 4 << "1" << 5 ), e.element().Obj() );
- ASSERT_EQUALS( "1", e.arrayOffset().fieldNameStringData() );
-
- ASSERT( !cursor.more() );
- }
- // SERVER-14886: when an array is being traversed explictly at the same time that a nested array
- // is being traversed implicitly, ElementIterator::Context::arrayOffset() should return the
- // current offset of the array being implicitly traversed.
- TEST( Path, ArrayOffsetWithImplicitAndExplicitTraversal ) {
- ElementPath p;
- ASSERT( p.init( "a.0.b" ).isOK() );
- BSONObj doc = fromjson("{a: [{b: [2, 3]}, {b: [4, 5]}]}");
- BSONElementIterator cursor( &p, doc );
-
- ASSERT( cursor.more() );
- ElementIterator::Context e = cursor.next();
- ASSERT_EQUALS( EOO, e.element().type() );
- ASSERT_EQUALS( "0", e.arrayOffset().fieldNameStringData() ); // First elt of outer array.
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( NumberInt, e.element().type() );
- ASSERT_EQUALS( 2, e.element().numberInt() );
- ASSERT_EQUALS( "0", e.arrayOffset().fieldNameStringData() ); // First elt of inner array.
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( NumberInt, e.element().type() );
- ASSERT_EQUALS( 3, e.element().numberInt() );
- ASSERT_EQUALS( "1", e.arrayOffset().fieldNameStringData() ); // Second elt of inner array.
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( Array, e.element().type() );
- ASSERT_EQUALS( BSON( "0" << 2 << "1" << 3 ), e.element().Obj() );
- ASSERT( e.arrayOffset().eoo() );
-
- ASSERT( cursor.more() );
- e = cursor.next();
- ASSERT_EQUALS( EOO, e.element().type() );
- ASSERT_EQUALS( "1", e.arrayOffset().fieldNameStringData() ); // Second elt of outer array.
-
- ASSERT( !cursor.more() );
+ ASSERT(!cursor.more());
+}
+
+// SERVER-15899: test iteration using a path that generates no elements, but traverses a long
+// array containing subdocuments with nested arrays.
+TEST(Path, NonMatchingLongArrayOfSubdocumentsWithNestedArrays) {
+ ElementPath p;
+ ASSERT(p.init("a.b.x").isOK());
+
+ // Build the document {a: [{b: []}, {b: []}, {b: []}, ...]}.
+ BSONObj subdoc = BSON("b" << BSONArray());
+ BSONArrayBuilder builder;
+ for (int i = 0; i < 100 * 1000; ++i) {
+ builder.append(subdoc);
}
+ BSONObj doc = BSON("a" << builder.arr());
+
+ BSONElementIterator cursor(&p, doc);
- TEST( SimpleArrayElementIterator, SimpleNoArrayLast1 ) {
- BSONObj obj = BSON( "a" << BSON_ARRAY( 5 << BSON( "x" << 6 ) << BSON_ARRAY( 7 << 9 ) << 11 ) );
- SimpleArrayElementIterator i( obj["a"], false );
+ // The path "a.b.x" matches no elements.
+ ASSERT(!cursor.more());
+}
- ASSERT( i.more() );
- ElementIterator::Context e = i.next();
- ASSERT_EQUALS( 5, e.element().numberInt() );
+// When multiple arrays are traversed implicitly in the same path,
+// ElementIterator::Context::arrayOffset() should always refer to the current offset of the
+// outermost array that is implicitly traversed.
+TEST(Path, NestedArrayImplicitTraversal) {
+ ElementPath p;
+ ASSERT(p.init("a.b").isOK());
+ BSONObj doc = fromjson("{a: [{b: [2, 3]}, {b: [4, 5]}]}");
+ BSONElementIterator cursor(&p, doc);
+
+ ASSERT(cursor.more());
+ ElementIterator::Context e = cursor.next();
+ ASSERT_EQUALS(NumberInt, e.element().type());
+ ASSERT_EQUALS(2, e.element().numberInt());
+ ASSERT_EQUALS("0", e.arrayOffset().fieldNameStringData());
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(NumberInt, e.element().type());
+ ASSERT_EQUALS(3, e.element().numberInt());
+ ASSERT_EQUALS("0", e.arrayOffset().fieldNameStringData());
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(Array, e.element().type());
+ ASSERT_EQUALS(BSON("0" << 2 << "1" << 3), e.element().Obj());
+ ASSERT_EQUALS("0", e.arrayOffset().fieldNameStringData());
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(NumberInt, e.element().type());
+ ASSERT_EQUALS(4, e.element().numberInt());
+ ASSERT_EQUALS("1", e.arrayOffset().fieldNameStringData());
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(NumberInt, e.element().type());
+ ASSERT_EQUALS(5, e.element().numberInt());
+ ASSERT_EQUALS("1", e.arrayOffset().fieldNameStringData());
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(Array, e.element().type());
+ ASSERT_EQUALS(BSON("0" << 4 << "1" << 5), e.element().Obj());
+ ASSERT_EQUALS("1", e.arrayOffset().fieldNameStringData());
+
+ ASSERT(!cursor.more());
+}
- ASSERT( i.more() );
- e = i.next();
- ASSERT_EQUALS( 6, e.element().Obj()["x"].numberInt() );
+// SERVER-14886: when an array is being traversed explictly at the same time that a nested array
+// is being traversed implicitly, ElementIterator::Context::arrayOffset() should return the
+// current offset of the array being implicitly traversed.
+TEST(Path, ArrayOffsetWithImplicitAndExplicitTraversal) {
+ ElementPath p;
+ ASSERT(p.init("a.0.b").isOK());
+ BSONObj doc = fromjson("{a: [{b: [2, 3]}, {b: [4, 5]}]}");
+ BSONElementIterator cursor(&p, doc);
+
+ ASSERT(cursor.more());
+ ElementIterator::Context e = cursor.next();
+ ASSERT_EQUALS(EOO, e.element().type());
+ ASSERT_EQUALS("0", e.arrayOffset().fieldNameStringData()); // First elt of outer array.
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(NumberInt, e.element().type());
+ ASSERT_EQUALS(2, e.element().numberInt());
+ ASSERT_EQUALS("0", e.arrayOffset().fieldNameStringData()); // First elt of inner array.
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(NumberInt, e.element().type());
+ ASSERT_EQUALS(3, e.element().numberInt());
+ ASSERT_EQUALS("1", e.arrayOffset().fieldNameStringData()); // Second elt of inner array.
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(Array, e.element().type());
+ ASSERT_EQUALS(BSON("0" << 2 << "1" << 3), e.element().Obj());
+ ASSERT(e.arrayOffset().eoo());
+
+ ASSERT(cursor.more());
+ e = cursor.next();
+ ASSERT_EQUALS(EOO, e.element().type());
+ ASSERT_EQUALS("1", e.arrayOffset().fieldNameStringData()); // Second elt of outer array.
+
+ ASSERT(!cursor.more());
+}
- ASSERT( i.more() );
- e = i.next();
- ASSERT_EQUALS( 7, e.element().Obj().firstElement().numberInt() );
+TEST(SimpleArrayElementIterator, SimpleNoArrayLast1) {
+ BSONObj obj = BSON("a" << BSON_ARRAY(5 << BSON("x" << 6) << BSON_ARRAY(7 << 9) << 11));
+ SimpleArrayElementIterator i(obj["a"], false);
- ASSERT( i.more() );
- e = i.next();
- ASSERT_EQUALS( 11, e.element().numberInt() );
+ ASSERT(i.more());
+ ElementIterator::Context e = i.next();
+ ASSERT_EQUALS(5, e.element().numberInt());
- ASSERT( !i.more() );
- }
+ ASSERT(i.more());
+ e = i.next();
+ ASSERT_EQUALS(6, e.element().Obj()["x"].numberInt());
+
+ ASSERT(i.more());
+ e = i.next();
+ ASSERT_EQUALS(7, e.element().Obj().firstElement().numberInt());
- TEST( SimpleArrayElementIterator, SimpleArrayLast1 ) {
- BSONObj obj = BSON( "a" << BSON_ARRAY( 5 << BSON( "x" << 6 ) << BSON_ARRAY( 7 << 9 ) << 11 ) );
- SimpleArrayElementIterator i( obj["a"], true );
+ ASSERT(i.more());
+ e = i.next();
+ ASSERT_EQUALS(11, e.element().numberInt());
- ASSERT( i.more() );
- ElementIterator::Context e = i.next();
- ASSERT_EQUALS( 5, e.element().numberInt() );
+ ASSERT(!i.more());
+}
- ASSERT( i.more() );
- e = i.next();
- ASSERT_EQUALS( 6, e.element().Obj()["x"].numberInt() );
+TEST(SimpleArrayElementIterator, SimpleArrayLast1) {
+ BSONObj obj = BSON("a" << BSON_ARRAY(5 << BSON("x" << 6) << BSON_ARRAY(7 << 9) << 11));
+ SimpleArrayElementIterator i(obj["a"], true);
- ASSERT( i.more() );
- e = i.next();
- ASSERT_EQUALS( 7, e.element().Obj().firstElement().numberInt() );
+ ASSERT(i.more());
+ ElementIterator::Context e = i.next();
+ ASSERT_EQUALS(5, e.element().numberInt());
- ASSERT( i.more() );
- e = i.next();
- ASSERT_EQUALS( 11, e.element().numberInt() );
+ ASSERT(i.more());
+ e = i.next();
+ ASSERT_EQUALS(6, e.element().Obj()["x"].numberInt());
- ASSERT( i.more() );
- e = i.next();
- ASSERT_EQUALS( Array, e.element().type() );
+ ASSERT(i.more());
+ e = i.next();
+ ASSERT_EQUALS(7, e.element().Obj().firstElement().numberInt());
- ASSERT( !i.more() );
- }
+ ASSERT(i.more());
+ e = i.next();
+ ASSERT_EQUALS(11, e.element().numberInt());
- TEST( SingleElementElementIterator, Simple1 ) {
- BSONObj obj = BSON( "x" << 3 << "y" << 5 );
- SingleElementElementIterator i( obj["y"] );
+ ASSERT(i.more());
+ e = i.next();
+ ASSERT_EQUALS(Array, e.element().type());
- ASSERT( i.more() );
- ElementIterator::Context e = i.next();
- ASSERT_EQUALS( 5, e.element().numberInt() );
+ ASSERT(!i.more());
+}
- ASSERT( !i.more() );
+TEST(SingleElementElementIterator, Simple1) {
+ BSONObj obj = BSON("x" << 3 << "y" << 5);
+ SingleElementElementIterator i(obj["y"]);
- }
+ ASSERT(i.more());
+ ElementIterator::Context e = i.next();
+ ASSERT_EQUALS(5, e.element().numberInt());
+ ASSERT(!i.more());
+}
}
diff --git a/src/mongo/db/max_time.h b/src/mongo/db/max_time.h
index 47ee245c2ba..93ddcab93b5 100644
--- a/src/mongo/db/max_time.h
+++ b/src/mongo/db/max_time.h
@@ -30,9 +30,6 @@
namespace mongo {
- enum {
- kMaxTimeCursorTimeLimitExpired = -1,
- kMaxTimeCursorNoTimeLimit = 0
- };
+enum { kMaxTimeCursorTimeLimitExpired = -1, kMaxTimeCursorNoTimeLimit = 0 };
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index a2daab9c201..27e929447a1 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -54,1175 +54,1222 @@
namespace mongo {
- using std::cout;
- using std::endl;
- using std::string;
+using std::cout;
+using std::endl;
+using std::string;
- MongodGlobalParams mongodGlobalParams;
+MongodGlobalParams mongodGlobalParams;
- extern DiagLog _diaglog;
+extern DiagLog _diaglog;
- Status addMongodOptions(moe::OptionSection* options) {
+Status addMongodOptions(moe::OptionSection* options) {
+ moe::OptionSection general_options("General options");
- moe::OptionSection general_options("General options");
-
- Status ret = addGeneralServerOptions(&general_options);
- if (!ret.isOK()) {
- return ret;
- }
+ Status ret = addGeneralServerOptions(&general_options);
+ if (!ret.isOK()) {
+ return ret;
+ }
#if defined(_WIN32)
- moe::OptionSection windows_scm_options("Windows Service Control Manager options");
+ moe::OptionSection windows_scm_options("Windows Service Control Manager options");
- ret = addWindowsServerOptions(&windows_scm_options);
- if (!ret.isOK()) {
- return ret;
- }
+ ret = addWindowsServerOptions(&windows_scm_options);
+ if (!ret.isOK()) {
+ return ret;
+ }
#endif
#ifdef MONGO_CONFIG_SSL
- moe::OptionSection ssl_options("SSL options");
+ moe::OptionSection ssl_options("SSL options");
- ret = addSSLServerOptions(&ssl_options);
- if (!ret.isOK()) {
- return ret;
- }
+ ret = addSSLServerOptions(&ssl_options);
+ if (!ret.isOK()) {
+ return ret;
+ }
#endif
- moe::OptionSection ms_options("Master/slave options (old; use replica sets instead)");
- moe::OptionSection rs_options("Replica set options");
- moe::OptionSection replication_options("Replication options");
- moe::OptionSection sharding_options("Sharding options");
- moe::OptionSection storage_options("Storage options");
-
- // Authentication Options
-
- // Way to enable or disable auth on command line and in Legacy config file
- general_options.addOptionChaining("auth", "auth", moe::Switch, "run with security")
- .setSources(moe::SourceAllLegacy)
- .incompatibleWith("noauth");
-
- general_options.addOptionChaining("noauth", "noauth", moe::Switch, "run without security")
- .setSources(moe::SourceAllLegacy)
- .incompatibleWith("auth");
-
- // Way to enable or disable auth in JSON Config
- general_options.addOptionChaining("security.authorization", "", moe::String,
- "How the database behaves with respect to authorization of clients. "
- "Options are \"disabled\", which means that authorization checks are not "
- "performed, and \"enabled\" which means that a client cannot perform actions it is "
- "not authorized to do.")
- .setSources(moe::SourceYAMLConfig)
- .format("(:?disabled)|(:?enabled)",
- "(disabled/enabled)");
-
- // setParameter parameters that we want as config file options
- // TODO: Actually read these into our environment. Currently they have no effect
- general_options.addOptionChaining("security.authSchemaVersion", "", moe::String, "TODO")
- .setSources(moe::SourceYAMLConfig);
-
- general_options.addOptionChaining("security.enableLocalhostAuthBypass", "", moe::String,
- "TODO")
- .setSources(moe::SourceYAMLConfig);
-
-
- // Network Options
-
- general_options.addOptionChaining("net.http.JSONPEnabled", "jsonp", moe::Switch,
- "allow JSONP access via http (has security implications)");
-
- general_options.addOptionChaining("net.http.RESTInterfaceEnabled", "rest", moe::Switch,
- "turn on simple rest api");
-
- // Diagnostic Options
-
- general_options.addOptionChaining("diaglog", "diaglog", moe::Int,
- "DEPRECATED: 0=off 1=W 2=R 3=both 7=W+some reads")
- .hidden()
- .setSources(moe::SourceAllLegacy);
-
- general_options.addOptionChaining("operationProfiling.slowOpThresholdMs", "slowms",
- moe::Int, "value of slow for profile and console log")
- .setDefault(moe::Value(100));
-
- general_options.addOptionChaining("profile", "profile", moe::Int,
- "0=off 1=slow, 2=all")
- .setSources(moe::SourceAllLegacy);
-
- general_options.addOptionChaining("operationProfiling.mode", "", moe::String,
- "(off/slowOp/all)")
- .setSources(moe::SourceYAMLConfig)
- .format("(:?off)|(:?slowOp)|(:?all)", "(off/slowOp/all)");
-
- general_options.addOptionChaining("cpu", "cpu", moe::Switch,
- "periodically show cpu and iowait utilization")
- .setSources(moe::SourceAllLegacy);
-
- general_options.addOptionChaining("sysinfo", "sysinfo", moe::Switch,
- "print some diagnostic system information")
- .setSources(moe::SourceAllLegacy);
-
- // Storage Options
-
- storage_options.addOptionChaining("storage.engine", "storageEngine", moe::String,
- "what storage engine to use - defaults to wiredTiger if no data files present");
+ moe::OptionSection ms_options("Master/slave options (old; use replica sets instead)");
+ moe::OptionSection rs_options("Replica set options");
+ moe::OptionSection replication_options("Replication options");
+ moe::OptionSection sharding_options("Sharding options");
+ moe::OptionSection storage_options("Storage options");
+
+ // Authentication Options
+
+ // Way to enable or disable auth on command line and in Legacy config file
+ general_options.addOptionChaining("auth", "auth", moe::Switch, "run with security")
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("noauth");
+
+ general_options.addOptionChaining("noauth", "noauth", moe::Switch, "run without security")
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("auth");
+
+ // Way to enable or disable auth in JSON Config
+ general_options
+ .addOptionChaining(
+ "security.authorization",
+ "",
+ moe::String,
+ "How the database behaves with respect to authorization of clients. "
+ "Options are \"disabled\", which means that authorization checks are not "
+ "performed, and \"enabled\" which means that a client cannot perform actions it is "
+ "not authorized to do.")
+ .setSources(moe::SourceYAMLConfig)
+ .format("(:?disabled)|(:?enabled)", "(disabled/enabled)");
+
+ // setParameter parameters that we want as config file options
+ // TODO: Actually read these into our environment. Currently they have no effect
+ general_options.addOptionChaining("security.authSchemaVersion", "", moe::String, "TODO")
+ .setSources(moe::SourceYAMLConfig);
+
+ general_options.addOptionChaining("security.enableLocalhostAuthBypass", "", moe::String, "TODO")
+ .setSources(moe::SourceYAMLConfig);
+
+
+ // Network Options
+
+ general_options.addOptionChaining("net.http.JSONPEnabled",
+ "jsonp",
+ moe::Switch,
+ "allow JSONP access via http (has security implications)");
+
+ general_options.addOptionChaining(
+ "net.http.RESTInterfaceEnabled", "rest", moe::Switch, "turn on simple rest api");
+
+ // Diagnostic Options
+
+ general_options.addOptionChaining("diaglog",
+ "diaglog",
+ moe::Int,
+ "DEPRECATED: 0=off 1=W 2=R 3=both 7=W+some reads")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
+
+ general_options.addOptionChaining("operationProfiling.slowOpThresholdMs",
+ "slowms",
+ moe::Int,
+ "value of slow for profile and console log")
+ .setDefault(moe::Value(100));
+
+ general_options.addOptionChaining("profile", "profile", moe::Int, "0=off 1=slow, 2=all")
+ .setSources(moe::SourceAllLegacy);
+
+ general_options.addOptionChaining(
+ "operationProfiling.mode", "", moe::String, "(off/slowOp/all)")
+ .setSources(moe::SourceYAMLConfig)
+ .format("(:?off)|(:?slowOp)|(:?all)", "(off/slowOp/all)");
+
+ general_options.addOptionChaining(
+ "cpu", "cpu", moe::Switch, "periodically show cpu and iowait utilization")
+ .setSources(moe::SourceAllLegacy);
+
+ general_options.addOptionChaining("sysinfo",
+ "sysinfo",
+ moe::Switch,
+ "print some diagnostic system information")
+ .setSources(moe::SourceAllLegacy);
+
+ // Storage Options
+
+ storage_options.addOptionChaining(
+ "storage.engine",
+ "storageEngine",
+ moe::String,
+ "what storage engine to use - defaults to wiredTiger if no data files present");
#ifdef _WIN32
- boost::filesystem::path currentPath = boost::filesystem::current_path();
+ boost::filesystem::path currentPath = boost::filesystem::current_path();
- std::string defaultPath = currentPath.root_name().string()
- + storageGlobalParams.kDefaultDbPath;
- storage_options.addOptionChaining("storage.dbPath", "dbpath", moe::String,
- std::string("directory for datafiles - defaults to ")
- + storageGlobalParams.kDefaultDbPath
- + " which is " + defaultPath + " based on the current working drive");
+ std::string defaultPath = currentPath.root_name().string() + storageGlobalParams.kDefaultDbPath;
+ storage_options.addOptionChaining("storage.dbPath",
+ "dbpath",
+ moe::String,
+ std::string("directory for datafiles - defaults to ") +
+ storageGlobalParams.kDefaultDbPath + " which is " +
+ defaultPath + " based on the current working drive");
#else
- storage_options.addOptionChaining("storage.dbPath", "dbpath", moe::String,
- std::string("directory for datafiles - defaults to ")
- + storageGlobalParams.kDefaultDbPath);
+ storage_options.addOptionChaining("storage.dbPath",
+ "dbpath",
+ moe::String,
+ std::string("directory for datafiles - defaults to ") +
+ storageGlobalParams.kDefaultDbPath);
#endif
- storage_options.addOptionChaining("storage.directoryPerDB", "directoryperdb",
- moe::Switch,
- "each database will be stored in a separate directory");
-
- general_options.addOptionChaining("noIndexBuildRetry", "noIndexBuildRetry", moe::Switch,
- "don't retry any index builds that were interrupted by shutdown")
- .setSources(moe::SourceAllLegacy);
-
- general_options.addOptionChaining("storage.indexBuildRetry", "", moe::Bool,
- "don't retry any index builds that were interrupted by shutdown")
- .setSources(moe::SourceYAMLConfig);
-
- storage_options.addOptionChaining("noprealloc", "noprealloc", moe::Switch,
- "disable data file preallocation - will often hurt performance")
- .setSources(moe::SourceAllLegacy);
-
- storage_options.addOptionChaining("storage.mmapv1.preallocDataFiles", "", moe::Bool,
- "disable data file preallocation - will often hurt performance",
- "storage.preallocDataFiles")
- .setSources(moe::SourceYAMLConfig);
-
- storage_options.addOptionChaining("storage.mmapv1.nsSize", "nssize", moe::Int,
- ".ns file size (in MB) for new databases",
- "storage.nsSize")
- .setDefault(moe::Value(16));
-
- storage_options.addOptionChaining("storage.mmapv1.quota.enforced", "quota", moe::Switch,
- "limits each database to a certain number of files (8 default)",
- "storage.quota.enforced")
- .incompatibleWith("keyFile");
-
- storage_options.addOptionChaining("storage.mmapv1.quota.maxFilesPerDB", "quotaFiles",
- moe::Int,
- "number of files allowed per db, implies --quota",
- "storage.quota.maxFilesPerDB");
-
- storage_options.addOptionChaining("storage.mmapv1.smallFiles", "smallfiles", moe::Switch,
- "use a smaller default file size",
- "storage.smallFiles");
-
- storage_options.addOptionChaining("storage.syncPeriodSecs", "syncdelay", moe::Double,
- "seconds between disk syncs (0=never, but not recommended)")
- .setDefault(moe::Value(60.0));
-
- // Upgrade and repair are disallowed in JSON configs since they trigger very heavyweight
- // actions rather than specify configuration data
- storage_options.addOptionChaining("upgrade", "upgrade", moe::Switch,
- "upgrade db if needed")
- .setSources(moe::SourceAllLegacy);
-
- storage_options.addOptionChaining("repair", "repair", moe::Switch,
- "run repair on all dbs")
- .setSources(moe::SourceAllLegacy);
-
- storage_options.addOptionChaining("storage.repairPath", "repairpath", moe::String,
- "root directory for repair files - defaults to dbpath");
-
- // Javascript Options
-
- general_options.addOptionChaining("noscripting", "noscripting", moe::Switch,
- "disable scripting engine")
- .setSources(moe::SourceAllLegacy);
-
- general_options.addOptionChaining("security.javascriptEnabled", "", moe::Bool,
- "Enable javascript execution")
- .setSources(moe::SourceYAMLConfig);
-
- // Query Options
-
- general_options.addOptionChaining("notablescan", "notablescan", moe::Switch,
- "do not allow table scans")
- .setSources(moe::SourceAllLegacy);
-
- // Journaling Options
-
- // Way to enable or disable journaling on command line and in Legacy config file
- storage_options.addOptionChaining("journal", "journal", moe::Switch, "enable journaling")
- .setSources(moe::SourceAllLegacy);
-
- storage_options.addOptionChaining("nojournal", "nojournal", moe::Switch,
- "disable journaling (journaling is on by default for 64 bit)")
- .setSources(moe::SourceAllLegacy);
-
- storage_options.addOptionChaining("dur", "dur", moe::Switch, "enable journaling")
- .hidden()
- .setSources(moe::SourceAllLegacy);
-
- storage_options.addOptionChaining("nodur", "nodur", moe::Switch, "disable journaling")
- .hidden()
- .setSources(moe::SourceAllLegacy);
-
- // Way to enable or disable journaling in JSON Config
- general_options.addOptionChaining("storage.journal.enabled", "", moe::Bool,
- "enable journaling")
- .setSources(moe::SourceYAMLConfig);
-
- // Two ways to set durability diagnostic options. durOptions is deprecated
- storage_options.addOptionChaining("storage.mmapv1.journal.debugFlags", "journalOptions",
- moe::Int,
- "journal diagnostic options",
- "storage.journal.debugFlags")
- .incompatibleWith("durOptions");
-
- storage_options.addOptionChaining("durOptions", "durOptions", moe::Int,
- "durability diagnostic options")
- .hidden()
- .setSources(moe::SourceAllLegacy)
- .incompatibleWith("storage.mmapv1.journal.debugFlags");
-
- storage_options.addOptionChaining("storage.mmapv1.journal.commitIntervalMs",
- "journalCommitInterval", moe::Unsigned, "how often to group/batch commit (ms)",
- "storage.journal.commitIntervalMs");
-
- // Deprecated option that we don't want people to use for performance reasons
- storage_options.addOptionChaining("nopreallocj", "nopreallocj", moe::Switch,
- "don't preallocate journal files")
- .hidden()
- .setSources(moe::SourceAllLegacy);
+ storage_options.addOptionChaining("storage.directoryPerDB",
+ "directoryperdb",
+ moe::Switch,
+ "each database will be stored in a separate directory");
+
+ general_options.addOptionChaining(
+ "noIndexBuildRetry",
+ "noIndexBuildRetry",
+ moe::Switch,
+ "don't retry any index builds that were interrupted by shutdown")
+ .setSources(moe::SourceAllLegacy);
+
+ general_options.addOptionChaining(
+ "storage.indexBuildRetry",
+ "",
+ moe::Bool,
+ "don't retry any index builds that were interrupted by shutdown")
+ .setSources(moe::SourceYAMLConfig);
+
+ storage_options.addOptionChaining(
+ "noprealloc",
+ "noprealloc",
+ moe::Switch,
+ "disable data file preallocation - will often hurt performance")
+ .setSources(moe::SourceAllLegacy);
+
+ storage_options.addOptionChaining(
+ "storage.mmapv1.preallocDataFiles",
+ "",
+ moe::Bool,
+ "disable data file preallocation - will often hurt performance",
+ "storage.preallocDataFiles").setSources(moe::SourceYAMLConfig);
+
+ storage_options.addOptionChaining("storage.mmapv1.nsSize",
+ "nssize",
+ moe::Int,
+ ".ns file size (in MB) for new databases",
+ "storage.nsSize").setDefault(moe::Value(16));
+
+ storage_options.addOptionChaining(
+ "storage.mmapv1.quota.enforced",
+ "quota",
+ moe::Switch,
+ "limits each database to a certain number of files (8 default)",
+ "storage.quota.enforced").incompatibleWith("keyFile");
+
+ storage_options.addOptionChaining("storage.mmapv1.quota.maxFilesPerDB",
+ "quotaFiles",
+ moe::Int,
+ "number of files allowed per db, implies --quota",
+ "storage.quota.maxFilesPerDB");
+
+ storage_options.addOptionChaining("storage.mmapv1.smallFiles",
+ "smallfiles",
+ moe::Switch,
+ "use a smaller default file size",
+ "storage.smallFiles");
+
+ storage_options.addOptionChaining("storage.syncPeriodSecs",
+ "syncdelay",
+ moe::Double,
+ "seconds between disk syncs (0=never, but not recommended)")
+ .setDefault(moe::Value(60.0));
+
+ // Upgrade and repair are disallowed in JSON configs since they trigger very heavyweight
+ // actions rather than specify configuration data
+ storage_options.addOptionChaining("upgrade", "upgrade", moe::Switch, "upgrade db if needed")
+ .setSources(moe::SourceAllLegacy);
+
+ storage_options.addOptionChaining("repair", "repair", moe::Switch, "run repair on all dbs")
+ .setSources(moe::SourceAllLegacy);
+
+ storage_options.addOptionChaining("storage.repairPath",
+ "repairpath",
+ moe::String,
+ "root directory for repair files - defaults to dbpath");
+
+ // Javascript Options
+
+ general_options.addOptionChaining(
+ "noscripting", "noscripting", moe::Switch, "disable scripting engine")
+ .setSources(moe::SourceAllLegacy);
+
+ general_options.addOptionChaining(
+ "security.javascriptEnabled", "", moe::Bool, "Enable javascript execution")
+ .setSources(moe::SourceYAMLConfig);
+
+ // Query Options
+
+ general_options.addOptionChaining(
+ "notablescan", "notablescan", moe::Switch, "do not allow table scans")
+ .setSources(moe::SourceAllLegacy);
+
+ // Journaling Options
+
+ // Way to enable or disable journaling on command line and in Legacy config file
+ storage_options.addOptionChaining("journal", "journal", moe::Switch, "enable journaling")
+ .setSources(moe::SourceAllLegacy);
+
+ storage_options.addOptionChaining("nojournal",
+ "nojournal",
+ moe::Switch,
+ "disable journaling (journaling is on by default for 64 bit)")
+ .setSources(moe::SourceAllLegacy);
+
+ storage_options.addOptionChaining("dur", "dur", moe::Switch, "enable journaling")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
+
+ storage_options.addOptionChaining("nodur", "nodur", moe::Switch, "disable journaling")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
+
+ // Way to enable or disable journaling in JSON Config
+ general_options.addOptionChaining("storage.journal.enabled", "", moe::Bool, "enable journaling")
+ .setSources(moe::SourceYAMLConfig);
+
+ // Two ways to set durability diagnostic options. durOptions is deprecated
+ storage_options.addOptionChaining("storage.mmapv1.journal.debugFlags",
+ "journalOptions",
+ moe::Int,
+ "journal diagnostic options",
+ "storage.journal.debugFlags").incompatibleWith("durOptions");
+
+ storage_options.addOptionChaining(
+ "durOptions", "durOptions", moe::Int, "durability diagnostic options")
+ .hidden()
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("storage.mmapv1.journal.debugFlags");
+
+ storage_options.addOptionChaining("storage.mmapv1.journal.commitIntervalMs",
+ "journalCommitInterval",
+ moe::Unsigned,
+ "how often to group/batch commit (ms)",
+ "storage.journal.commitIntervalMs");
+
+ // Deprecated option that we don't want people to use for performance reasons
+ storage_options.addOptionChaining("nopreallocj",
+ "nopreallocj",
+ moe::Switch,
+ "don't preallocate journal files")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
#if defined(__linux__)
- general_options.addOptionChaining("shutdown", "shutdown", moe::Switch,
- "kill a running server (for init scripts)");
+ general_options.addOptionChaining(
+ "shutdown", "shutdown", moe::Switch, "kill a running server (for init scripts)");
#endif
- // Master Slave Options
-
- ms_options.addOptionChaining("master", "master", moe::Switch, "master mode")
- .incompatibleWith("replication.replSet")
- .incompatibleWith("replication.replSetName")
- .setSources(moe::SourceAllLegacy);
-
- ms_options.addOptionChaining("slave", "slave", moe::Switch, "slave mode")
- .incompatibleWith("replication.replSet")
- .incompatibleWith("replication.replSetName")
- .setSources(moe::SourceAllLegacy);
-
- ms_options.addOptionChaining("source", "source", moe::String,
- "when slave: specify master as <server:port>")
- .incompatibleWith("replication.replSet")
- .incompatibleWith("replication.replSetName")
- .setSources(moe::SourceAllLegacy);
-
- ms_options.addOptionChaining("only", "only", moe::String,
- "when slave: specify a single database to replicate")
- .incompatibleWith("replication.replSet")
- .incompatibleWith("replication.replSetName")
- .setSources(moe::SourceAllLegacy);
-
- ms_options.addOptionChaining("slavedelay", "slavedelay", moe::Int,
- "specify delay (in seconds) to be used when applying master ops to slave")
- .incompatibleWith("replication.replSet")
- .incompatibleWith("replication.replSetName")
- .setSources(moe::SourceAllLegacy);
-
- ms_options.addOptionChaining("autoresync", "autoresync", moe::Switch,
- "automatically resync if slave data is stale")
- .incompatibleWith("replication.replSet")
- .incompatibleWith("replication.replSetName")
- .setSources(moe::SourceAllLegacy);
-
- // Replication Options
-
- replication_options.addOptionChaining("replication.oplogSizeMB", "oplogSize", moe::Int,
- "size to use (in MB) for replication op log. default is 5% of disk space "
- "(i.e. large is good)");
-
- rs_options.addOptionChaining("replication.replSet", "replSet", moe::String,
- "arg is <setname>[/<optionalseedhostlist>]")
- .setSources(moe::SourceAllLegacy);
-
- rs_options.addOptionChaining("replication.replSetName", "", moe::String, "arg is <setname>")
- .setSources(moe::SourceYAMLConfig)
- .format("[^/]+", "[replica set name with no \"/\"]");
-
- rs_options.addOptionChaining("replication.secondaryIndexPrefetch", "replIndexPrefetch", moe::String,
- "specify index prefetching behavior (if secondary) [none|_id_only|all]")
- .format("(:?none)|(:?_id_only)|(:?all)",
- "(none/_id_only/all)");
-
- // Sharding Options
-
- sharding_options.addOptionChaining("configsvr", "configsvr", moe::Switch,
- "declare this is a config db of a cluster; default port 27019; "
- "default dir /data/configdb")
- .setSources(moe::SourceAllLegacy)
- .incompatibleWith("shardsvr");
-
- sharding_options.addOptionChaining("shardsvr", "shardsvr", moe::Switch,
- "declare this is a shard db of a cluster; default port 27018")
- .setSources(moe::SourceAllLegacy)
- .incompatibleWith("configsvr");
-
- sharding_options.addOptionChaining("sharding.clusterRole", "", moe::String,
- "Choose what role this mongod has in a sharded cluster. Possible values are:\n"
- " \"configsvr\": Start this node as a config server. Starts on port 27019 by "
- "default."
- " \"shardsvr\": Start this node as a shard server. Starts on port 27018 by "
- "default.")
- .setSources(moe::SourceYAMLConfig)
- .format("(:?configsvr)|(:?shardsvr)",
- "(configsvr/shardsvr)");
-
- sharding_options.addOptionChaining("noMoveParanoia", "noMoveParanoia", moe::Switch,
- "turn off paranoid saving of data for the moveChunk command; default")
- .hidden()
- .setSources(moe::SourceAllLegacy)
- .incompatibleWith("moveParanoia");
-
- sharding_options.addOptionChaining("moveParanoia", "moveParanoia",
- moe::Switch, "turn on paranoid saving of data during the moveChunk command "
- "(used for internal system diagnostics)")
- .hidden()
- .setSources(moe::SourceAllLegacy)
- .incompatibleWith("noMoveParanoia");
-
- sharding_options.addOptionChaining("sharding.archiveMovedChunks", "",
- moe::Bool, "config file option to turn on paranoid saving of data during the "
- "moveChunk command (used for internal system diagnostics)")
- .hidden()
- .setSources(moe::SourceYAMLConfig);
-
-
- options->addSection(general_options);
+ // Master Slave Options
+
+ ms_options.addOptionChaining("master", "master", moe::Switch, "master mode")
+ .incompatibleWith("replication.replSet")
+ .incompatibleWith("replication.replSetName")
+ .setSources(moe::SourceAllLegacy);
+
+ ms_options.addOptionChaining("slave", "slave", moe::Switch, "slave mode")
+ .incompatibleWith("replication.replSet")
+ .incompatibleWith("replication.replSetName")
+ .setSources(moe::SourceAllLegacy);
+
+ ms_options.addOptionChaining(
+ "source", "source", moe::String, "when slave: specify master as <server:port>")
+ .incompatibleWith("replication.replSet")
+ .incompatibleWith("replication.replSetName")
+ .setSources(moe::SourceAllLegacy);
+
+ ms_options.addOptionChaining("only",
+ "only",
+ moe::String,
+ "when slave: specify a single database to replicate")
+ .incompatibleWith("replication.replSet")
+ .incompatibleWith("replication.replSetName")
+ .setSources(moe::SourceAllLegacy);
+
+ ms_options.addOptionChaining(
+ "slavedelay",
+ "slavedelay",
+ moe::Int,
+ "specify delay (in seconds) to be used when applying master ops to slave")
+ .incompatibleWith("replication.replSet")
+ .incompatibleWith("replication.replSetName")
+ .setSources(moe::SourceAllLegacy);
+
+ ms_options.addOptionChaining("autoresync",
+ "autoresync",
+ moe::Switch,
+ "automatically resync if slave data is stale")
+ .incompatibleWith("replication.replSet")
+ .incompatibleWith("replication.replSetName")
+ .setSources(moe::SourceAllLegacy);
+
+ // Replication Options
+
+ replication_options.addOptionChaining(
+ "replication.oplogSizeMB",
+ "oplogSize",
+ moe::Int,
+ "size to use (in MB) for replication op log. default is 5% of disk space "
+ "(i.e. large is good)");
+
+ rs_options.addOptionChaining("replication.replSet",
+ "replSet",
+ moe::String,
+ "arg is <setname>[/<optionalseedhostlist>]")
+ .setSources(moe::SourceAllLegacy);
+
+ rs_options.addOptionChaining("replication.replSetName", "", moe::String, "arg is <setname>")
+ .setSources(moe::SourceYAMLConfig)
+ .format("[^/]+", "[replica set name with no \"/\"]");
+
+ rs_options.addOptionChaining(
+ "replication.secondaryIndexPrefetch",
+ "replIndexPrefetch",
+ moe::String,
+ "specify index prefetching behavior (if secondary) [none|_id_only|all]")
+ .format("(:?none)|(:?_id_only)|(:?all)", "(none/_id_only/all)");
+
+ // Sharding Options
+
+ sharding_options.addOptionChaining(
+ "configsvr",
+ "configsvr",
+ moe::Switch,
+ "declare this is a config db of a cluster; default port 27019; "
+ "default dir /data/configdb")
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("shardsvr");
+
+ sharding_options.addOptionChaining(
+ "shardsvr",
+ "shardsvr",
+ moe::Switch,
+ "declare this is a shard db of a cluster; default port 27018")
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("configsvr");
+
+ sharding_options
+ .addOptionChaining(
+ "sharding.clusterRole",
+ "",
+ moe::String,
+ "Choose what role this mongod has in a sharded cluster. Possible values are:\n"
+ " \"configsvr\": Start this node as a config server. Starts on port 27019 by "
+ "default."
+ " \"shardsvr\": Start this node as a shard server. Starts on port 27018 by "
+ "default.")
+ .setSources(moe::SourceYAMLConfig)
+ .format("(:?configsvr)|(:?shardsvr)", "(configsvr/shardsvr)");
+
+ sharding_options.addOptionChaining(
+ "noMoveParanoia",
+ "noMoveParanoia",
+ moe::Switch,
+ "turn off paranoid saving of data for the moveChunk command; default")
+ .hidden()
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("moveParanoia");
+
+ sharding_options.addOptionChaining(
+ "moveParanoia",
+ "moveParanoia",
+ moe::Switch,
+ "turn on paranoid saving of data during the moveChunk command "
+ "(used for internal system diagnostics)")
+ .hidden()
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("noMoveParanoia");
+
+ sharding_options.addOptionChaining(
+ "sharding.archiveMovedChunks",
+ "",
+ moe::Bool,
+ "config file option to turn on paranoid saving of data during the "
+ "moveChunk command (used for internal system diagnostics)")
+ .hidden()
+ .setSources(moe::SourceYAMLConfig);
+
+
+ options->addSection(general_options);
#if defined(_WIN32)
- options->addSection(windows_scm_options);
+ options->addSection(windows_scm_options);
#endif
- options->addSection(replication_options);
- options->addSection(ms_options);
- options->addSection(rs_options);
- options->addSection(sharding_options);
+ options->addSection(replication_options);
+ options->addSection(ms_options);
+ options->addSection(rs_options);
+ options->addSection(sharding_options);
#ifdef MONGO_CONFIG_SSL
- options->addSection(ssl_options);
+ options->addSection(ssl_options);
#endif
- options->addSection(storage_options);
-
- // The following are legacy options that are disallowed in the JSON config file
-
- options->addOptionChaining("fastsync", "fastsync", moe::Switch,
- "indicate that this instance is starting from a dbpath snapshot of the repl peer")
- .hidden()
- .setSources(moe::SourceAllLegacy);
-
- options->addOptionChaining("pretouch", "pretouch", moe::Int,
- "n pretouch threads for applying master/slave operations")
- .hidden()
- .setSources(moe::SourceAllLegacy);
-
- // This is a deprecated option that we are supporting for backwards compatibility
- // The first value for this option can be either 'dbpath' or 'run'.
- // If it is 'dbpath', mongod prints the dbpath and exits. Any extra values are ignored.
- // If it is 'run', mongod runs normally. Providing extra values is an error.
- options->addOptionChaining("command", "command", moe::StringVector, "command")
- .hidden()
- .positional(1, 3)
- .setSources(moe::SourceAllLegacy);
-
- options->addOptionChaining("cacheSize", "cacheSize", moe::Long,
- "cache size (in MB) for rec store")
- .hidden()
- .setSources(moe::SourceAllLegacy);
-
- // deprecated pairing command line options
- options->addOptionChaining("pairwith", "pairwith", moe::Switch, "DEPRECATED")
- .hidden()
- .setSources(moe::SourceAllLegacy);
+ options->addSection(storage_options);
+
+ // The following are legacy options that are disallowed in the JSON config file
+
+ options->addOptionChaining(
+ "fastsync",
+ "fastsync",
+ moe::Switch,
+ "indicate that this instance is starting from a dbpath snapshot of the repl peer")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
+
+ options->addOptionChaining("pretouch",
+ "pretouch",
+ moe::Int,
+ "n pretouch threads for applying master/slave operations")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
+
+ // This is a deprecated option that we are supporting for backwards compatibility
+ // The first value for this option can be either 'dbpath' or 'run'.
+ // If it is 'dbpath', mongod prints the dbpath and exits. Any extra values are ignored.
+ // If it is 'run', mongod runs normally. Providing extra values is an error.
+ options->addOptionChaining("command", "command", moe::StringVector, "command")
+ .hidden()
+ .positional(1, 3)
+ .setSources(moe::SourceAllLegacy);
+
+ options->addOptionChaining(
+ "cacheSize", "cacheSize", moe::Long, "cache size (in MB) for rec store")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
+
+ // deprecated pairing command line options
+ options->addOptionChaining("pairwith", "pairwith", moe::Switch, "DEPRECATED")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
+
+ options->addOptionChaining("arbiter", "arbiter", moe::Switch, "DEPRECATED")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
+
+ options->addOptionChaining("opIdMem", "opIdMem", moe::Switch, "DEPRECATED")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
+
+ return Status::OK();
+}
+
+void printMongodHelp(const moe::OptionSection& options) {
+ std::cout << options.helpString() << std::endl;
+};
- options->addOptionChaining("arbiter", "arbiter", moe::Switch, "DEPRECATED")
- .hidden()
- .setSources(moe::SourceAllLegacy);
-
- options->addOptionChaining("opIdMem", "opIdMem", moe::Switch, "DEPRECATED")
- .hidden()
- .setSources(moe::SourceAllLegacy);
-
- return Status::OK();
- }
-
- void printMongodHelp(const moe::OptionSection& options) {
- std::cout << options.helpString() << std::endl;
- };
-
- namespace {
- void sysRuntimeInfo() {
+namespace {
+void sysRuntimeInfo() {
#if defined(_SC_PAGE_SIZE)
- log() << " page size: " << (int) sysconf(_SC_PAGE_SIZE) << endl;
+ log() << " page size: " << (int)sysconf(_SC_PAGE_SIZE) << endl;
#endif
#if defined(_SC_PHYS_PAGES)
- log() << " _SC_PHYS_PAGES: " << sysconf(_SC_PHYS_PAGES) << endl;
+ log() << " _SC_PHYS_PAGES: " << sysconf(_SC_PHYS_PAGES) << endl;
#endif
#if defined(_SC_AVPHYS_PAGES)
- log() << " _SC_AVPHYS_PAGES: " << sysconf(_SC_AVPHYS_PAGES) << endl;
+ log() << " _SC_AVPHYS_PAGES: " << sysconf(_SC_AVPHYS_PAGES) << endl;
#endif
- }
- } // namespace
-
- void setPlainConsoleLogger() {
- logger::LogManager* manager = logger::globalLogManager();
- manager->getGlobalDomain()->clearAppenders();
- manager->getGlobalDomain()->attachAppender(
- logger::MessageLogDomain::AppenderAutoPtr(
- new logger::ConsoleAppender<logger::MessageEventEphemeral>(
- new logger::MessageEventUnadornedEncoder)));
- }
-
- bool handlePreValidationMongodOptions(const moe::Environment& params,
- const std::vector<std::string>& args) {
- if (params.count("help") &&
- params["help"].as<bool>() == true) {
- printMongodHelp(moe::startupOptions);
- return false;
- }
- if (params.count("version") &&
- params["version"].as<bool>() == true) {
- setPlainConsoleLogger();
- log() << mongodVersion() << endl;
- printGitVersion();
- printOpenSSLVersion();
- return false;
- }
- if (params.count("sysinfo") &&
- params["sysinfo"].as<bool>() == true) {
- setPlainConsoleLogger();
- sysRuntimeInfo();
- return false;
- }
-
- return true;
+}
+} // namespace
+
+void setPlainConsoleLogger() {
+ logger::LogManager* manager = logger::globalLogManager();
+ manager->getGlobalDomain()->clearAppenders();
+ manager->getGlobalDomain()->attachAppender(logger::MessageLogDomain::AppenderAutoPtr(
+ new logger::ConsoleAppender<logger::MessageEventEphemeral>(
+ new logger::MessageEventUnadornedEncoder)));
+}
+
+bool handlePreValidationMongodOptions(const moe::Environment& params,
+ const std::vector<std::string>& args) {
+ if (params.count("help") && params["help"].as<bool>() == true) {
+ printMongodHelp(moe::startupOptions);
+ return false;
+ }
+ if (params.count("version") && params["version"].as<bool>() == true) {
+ setPlainConsoleLogger();
+ log() << mongodVersion() << endl;
+ printGitVersion();
+ printOpenSSLVersion();
+ return false;
+ }
+ if (params.count("sysinfo") && params["sysinfo"].as<bool>() == true) {
+ setPlainConsoleLogger();
+ sysRuntimeInfo();
+ return false;
}
- Status validateMongodOptions(const moe::Environment& params) {
+ return true;
+}
- Status ret = validateServerOptions(params);
- if (!ret.isOK()) {
- return ret;
- }
+Status validateMongodOptions(const moe::Environment& params) {
+ Status ret = validateServerOptions(params);
+ if (!ret.isOK()) {
+ return ret;
+ }
- if ((params.count("nodur") || params.count("nojournal")) &&
- (params.count("dur") || params.count("journal"))) {
- return Status(ErrorCodes::BadValue,
- "Can't specify both --journal and --nojournal options.");
- }
+ if ((params.count("nodur") || params.count("nojournal")) &&
+ (params.count("dur") || params.count("journal"))) {
+ return Status(ErrorCodes::BadValue,
+ "Can't specify both --journal and --nojournal options.");
+ }
- // SERVER-10019 Enabling rest/jsonp without --httpinterface should break in all cases in the
- // future
- if (params.count("net.http.RESTInterfaceEnabled") &&
- params["net.http.RESTInterfaceEnabled"].as<bool>() == true) {
-
- // If we are explicitly setting httpinterface to false in the config file (the source of
- // "net.http.enabled") and not overriding it on the command line (the source of
- // "httpinterface"), then we can fail with an error message without breaking backwards
- // compatibility.
- if (!params.count("httpinterface") &&
- params.count("net.http.enabled") &&
- params["net.http.enabled"].as<bool>() == false) {
- return Status(ErrorCodes::BadValue,
- "httpinterface must be enabled to use the rest api");
- }
+ // SERVER-10019 Enabling rest/jsonp without --httpinterface should break in all cases in the
+ // future
+ if (params.count("net.http.RESTInterfaceEnabled") &&
+ params["net.http.RESTInterfaceEnabled"].as<bool>() == true) {
+ // If we are explicitly setting httpinterface to false in the config file (the source of
+ // "net.http.enabled") and not overriding it on the command line (the source of
+ // "httpinterface"), then we can fail with an error message without breaking backwards
+ // compatibility.
+ if (!params.count("httpinterface") && params.count("net.http.enabled") &&
+ params["net.http.enabled"].as<bool>() == false) {
+ return Status(ErrorCodes::BadValue,
+ "httpinterface must be enabled to use the rest api");
}
+ }
- if (params.count("net.http.JSONPEnabled") &&
- params["net.http.JSONPEnabled"].as<bool>() == true) {
-
- // If we are explicitly setting httpinterface to false in the config file (the source of
- // "net.http.enabled") and not overriding it on the command line (the source of
- // "httpinterface"), then we can fail with an error message without breaking backwards
- // compatibility.
- if (!params.count("httpinterface") &&
- params.count("net.http.enabled") &&
- params["net.http.enabled"].as<bool>() == false) {
- return Status(ErrorCodes::BadValue,
- "httpinterface must be enabled to use jsonp");
- }
+ if (params.count("net.http.JSONPEnabled") &&
+ params["net.http.JSONPEnabled"].as<bool>() == true) {
+ // If we are explicitly setting httpinterface to false in the config file (the source of
+ // "net.http.enabled") and not overriding it on the command line (the source of
+ // "httpinterface"), then we can fail with an error message without breaking backwards
+ // compatibility.
+ if (!params.count("httpinterface") && params.count("net.http.enabled") &&
+ params["net.http.enabled"].as<bool>() == false) {
+ return Status(ErrorCodes::BadValue, "httpinterface must be enabled to use jsonp");
}
+ }
#ifdef _WIN32
- if (params.count("install") || params.count("reinstall")) {
- if (params.count("storage.dbPath") &&
- !boost::filesystem::path(params["storage.dbPath"].as<string>()).is_absolute()) {
- return Status(ErrorCodes::BadValue,
- "dbPath requires an absolute file path with Windows services");
- }
+ if (params.count("install") || params.count("reinstall")) {
+ if (params.count("storage.dbPath") &&
+ !boost::filesystem::path(params["storage.dbPath"].as<string>()).is_absolute()) {
+ return Status(ErrorCodes::BadValue,
+ "dbPath requires an absolute file path with Windows services");
}
-#endif
-
- return Status::OK();
}
-
- Status canonicalizeMongodOptions(moe::Environment* params) {
-
- // Need to handle this before canonicalizing the general "server options", since
- // httpinterface and nohttpinterface are shared between mongos and mongod, but mongod has
- // extra validation required.
- if (params->count("net.http.RESTInterfaceEnabled") &&
- (*params)["net.http.RESTInterfaceEnabled"].as<bool>() == true) {
- bool httpEnabled = false;
- if (params->count("net.http.enabled")) {
- Status ret = params->get("net.http.enabled", &httpEnabled);
- if (!ret.isOK()) {
- return ret;
- }
- }
- if (params->count("nohttpinterface")) {
- log() << "** WARNING: Should not specify both --rest and --nohttpinterface" <<
- startupWarningsLog;
- }
- else if (!(params->count("httpinterface") ||
- (params->count("net.http.enabled") && httpEnabled == true))) {
- log() << "** WARNING: --rest is specified without --httpinterface," <<
- startupWarningsLog;
- log() << "** enabling http interface" << startupWarningsLog;
- Status ret = params->set("httpinterface", moe::Value(true));
- if (!ret.isOK()) {
- return ret;
- }
- }
- }
-
- if (params->count("net.http.JSONPEnabled") &&
- (*params)["net.http.JSONPEnabled"].as<bool>() == true) {
- if (params->count("nohttpinterface")) {
- log() << "** WARNING: Should not specify both --jsonp and --nohttpinterface" <<
- startupWarningsLog;
- }
- else if (!params->count("httpinterface")) {
- log() << "** WARNING --jsonp is specified without --httpinterface," <<
- startupWarningsLog;
- log() << "** enabling http interface" << startupWarningsLog;
- Status ret = params->set("httpinterface", moe::Value(true));
- if (!ret.isOK()) {
- return ret;
- }
- }
- }
-
- Status ret = canonicalizeServerOptions(params);
- if (!ret.isOK()) {
- return ret;
- }
-
-#ifdef MONGO_CONFIG_SSL
- ret = canonicalizeSSLServerOptions(params);
- if (!ret.isOK()) {
- return ret;
- }
#endif
- // "storage.journal.enabled" comes from the config file, so override it if any of "journal",
- // "nojournal", "dur", and "nodur" are set, since those come from the command line.
- if (params->count("journal")) {
- Status ret = params->set("storage.journal.enabled",
- moe::Value((*params)["journal"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("journal");
+ return Status::OK();
+}
+
+Status canonicalizeMongodOptions(moe::Environment* params) {
+ // Need to handle this before canonicalizing the general "server options", since
+ // httpinterface and nohttpinterface are shared between mongos and mongod, but mongod has
+ // extra validation required.
+ if (params->count("net.http.RESTInterfaceEnabled") &&
+ (*params)["net.http.RESTInterfaceEnabled"].as<bool>() == true) {
+ bool httpEnabled = false;
+ if (params->count("net.http.enabled")) {
+ Status ret = params->get("net.http.enabled", &httpEnabled);
if (!ret.isOK()) {
return ret;
}
}
- if (params->count("nojournal")) {
- Status ret = params->set("storage.journal.enabled",
- moe::Value(!(*params)["nojournal"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("nojournal");
- if (!ret.isOK()) {
- return ret;
- }
- }
- if (params->count("dur")) {
- Status ret = params->set("storage.journal.enabled",
- moe::Value((*params)["dur"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("dur");
- if (!ret.isOK()) {
- return ret;
- }
- }
- if (params->count("nodur")) {
- Status ret = params->set("storage.journal.enabled",
- moe::Value(!(*params)["nodur"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("nodur");
+ if (params->count("nohttpinterface")) {
+ log() << "** WARNING: Should not specify both --rest and --nohttpinterface"
+ << startupWarningsLog;
+ } else if (!(params->count("httpinterface") ||
+ (params->count("net.http.enabled") && httpEnabled == true))) {
+ log() << "** WARNING: --rest is specified without --httpinterface,"
+ << startupWarningsLog;
+ log() << "** enabling http interface" << startupWarningsLog;
+ Status ret = params->set("httpinterface", moe::Value(true));
if (!ret.isOK()) {
return ret;
}
}
+ }
- // "storage.mmapv1.journal.durOptions" comes from the config file, so override it
- // if "durOptions" is set since that comes from the command line.
- if (params->count("durOptions")) {
- int durOptions;
- Status ret = params->get("durOptions", &durOptions);
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("durOptions");
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->set("storage.mmapv1.journal.debugFlags", moe::Value(durOptions));
+ if (params->count("net.http.JSONPEnabled") &&
+ (*params)["net.http.JSONPEnabled"].as<bool>() == true) {
+ if (params->count("nohttpinterface")) {
+ log() << "** WARNING: Should not specify both --jsonp and --nohttpinterface"
+ << startupWarningsLog;
+ } else if (!params->count("httpinterface")) {
+ log() << "** WARNING --jsonp is specified without --httpinterface,"
+ << startupWarningsLog;
+ log() << "** enabling http interface" << startupWarningsLog;
+ Status ret = params->set("httpinterface", moe::Value(true));
if (!ret.isOK()) {
return ret;
}
}
+ }
- // "security.authorization" comes from the config file, so override it if "noauth" or
- // "auth" are set since those come from the command line.
- if (params->count("noauth")) {
- Status ret = params->set("security.authorization",
- (*params)["noauth"].as<bool>() ?
- moe::Value(std::string("disabled")) :
- moe::Value(std::string("enabled")));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("noauth");
- if (!ret.isOK()) {
- return ret;
- }
- }
- if (params->count("auth")) {
- Status ret = params->set("security.authorization",
- (*params)["auth"].as<bool>() ?
- moe::Value(std::string("enabled")) :
- moe::Value(std::string("disabled")));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("auth");
- if (!ret.isOK()) {
- return ret;
- }
- }
+ Status ret = canonicalizeServerOptions(params);
+ if (!ret.isOK()) {
+ return ret;
+ }
- // "storage.mmapv1.preallocDataFiles" comes from the config file, so override it if "noprealloc" is
- // set since that comes from the command line.
- if (params->count("noprealloc")) {
- Status ret = params->set("storage.mmapv1.preallocDataFiles",
- moe::Value(!(*params)["noprealloc"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("noprealloc");
- if (!ret.isOK()) {
- return ret;
- }
- }
+#ifdef MONGO_CONFIG_SSL
+ ret = canonicalizeSSLServerOptions(params);
+ if (!ret.isOK()) {
+ return ret;
+ }
+#endif
- // "sharding.archiveMovedChunks" comes from the config file, so override it if
- // "noMoveParanoia" or "moveParanoia" are set since those come from the command line.
- if (params->count("noMoveParanoia")) {
- Status ret = params->set("sharding.archiveMovedChunks",
- moe::Value(!(*params)["noMoveParanoia"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("noMoveParanoia");
- if (!ret.isOK()) {
- return ret;
- }
+ // "storage.journal.enabled" comes from the config file, so override it if any of "journal",
+ // "nojournal", "dur", and "nodur" are set, since those come from the command line.
+ if (params->count("journal")) {
+ Status ret =
+ params->set("storage.journal.enabled", moe::Value((*params)["journal"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
- if (params->count("moveParanoia")) {
- Status ret = params->set("sharding.archiveMovedChunks",
- moe::Value((*params)["moveParanoia"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("moveParanoia");
- if (!ret.isOK()) {
- return ret;
- }
+ ret = params->remove("journal");
+ if (!ret.isOK()) {
+ return ret;
}
-
- // "sharding.clusterRole" comes from the config file, so override it if "configsvr" or
- // "shardsvr" are set since those come from the command line.
- if (params->count("configsvr")) {
- if ((*params)["configsvr"].as<bool>() == false) {
- // Handle the case where "configsvr" comes from the legacy config file and is set to
- // false. This option is not allowed in the YAML config.
- return Status(ErrorCodes::BadValue,
- "configsvr option cannot be set to false in config file");
- }
- Status ret = params->set("sharding.clusterRole", moe::Value(std::string("configsvr")));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("configsvr");
- if (!ret.isOK()) {
- return ret;
- }
+ }
+ if (params->count("nojournal")) {
+ Status ret =
+ params->set("storage.journal.enabled", moe::Value(!(*params)["nojournal"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
- if (params->count("shardsvr")) {
- if ((*params)["shardsvr"].as<bool>() == false) {
- // Handle the case where "shardsvr" comes from the legacy config file and is set to
- // false. This option is not allowed in the YAML config.
- return Status(ErrorCodes::BadValue,
- "shardsvr option cannot be set to false in config file");
- }
- Status ret = params->set("sharding.clusterRole", moe::Value(std::string("shardsvr")));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("shardsvr");
- if (!ret.isOK()) {
- return ret;
- }
+ ret = params->remove("nojournal");
+ if (!ret.isOK()) {
+ return ret;
}
-
- if (params->count("profile")) {
- int profilingMode;
- Status ret = params->get("profile", &profilingMode);
- if (!ret.isOK()) {
- return ret;
- }
- std::string profilingModeString;
- if (profilingMode == 0) {
- profilingModeString = "off";
- }
- else if (profilingMode == 1) {
- profilingModeString = "slowOp";
- }
- else if (profilingMode == 2) {
- profilingModeString = "all";
- }
- else {
- StringBuilder sb;
- sb << "Bad value for profile: " << profilingMode
- << ". Supported modes are: (0=off|1=slowOp|2=all)";
- return Status(ErrorCodes::BadValue, sb.str());
- }
- ret = params->set("operationProfiling.mode", moe::Value(profilingModeString));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("profile");
- if (!ret.isOK()) {
- return ret;
- }
+ }
+ if (params->count("dur")) {
+ Status ret =
+ params->set("storage.journal.enabled", moe::Value((*params)["dur"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
-
- // "storage.indexBuildRetry" comes from the config file, so override it if
- // "noIndexBuildRetry" is set since that comes from the command line.
- if (params->count("noIndexBuildRetry")) {
- Status ret = params->set("storage.indexBuildRetry",
- moe::Value(!(*params)["noIndexBuildRetry"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("noIndexBuildRetry");
- if (!ret.isOK()) {
- return ret;
- }
+ ret = params->remove("dur");
+ if (!ret.isOK()) {
+ return ret;
}
-
- // Ensure that "replication.replSet" logically overrides "replication.replSetName". We
- // can't canonicalize them as the same option, because they mean slightly different things.
- // "replication.replSet" can include a seed list, while "replication.replSetName" just has
- // the replica set name.
- if (params->count("replication.replSet") && params->count("replication.replSetName")) {
- ret = params->remove("replication.replSetName");
- if (!ret.isOK()) {
- return ret;
- }
+ }
+ if (params->count("nodur")) {
+ Status ret =
+ params->set("storage.journal.enabled", moe::Value(!(*params)["nodur"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
-
- // "security.javascriptEnabled" comes from the config file, so override it if "noscripting"
- // is set since that comes from the command line.
- if (params->count("noscripting")) {
- Status ret = params->set("security.javascriptEnabled",
- moe::Value(!(*params)["noscripting"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("noscripting");
- if (!ret.isOK()) {
- return ret;
- }
+ ret = params->remove("nodur");
+ if (!ret.isOK()) {
+ return ret;
}
-
- return Status::OK();
}
- Status storeMongodOptions(const moe::Environment& params,
- const std::vector<std::string>& args) {
-
- Status ret = storeServerOptions(params, args);
+ // "storage.mmapv1.journal.durOptions" comes from the config file, so override it
+ // if "durOptions" is set since that comes from the command line.
+ if (params->count("durOptions")) {
+ int durOptions;
+ Status ret = params->get("durOptions", &durOptions);
if (!ret.isOK()) {
return ret;
}
-
- // TODO: Integrate these options with their setParameter counterparts
- if (params.count("security.authSchemaVersion")) {
- return Status(ErrorCodes::BadValue,
- "security.authSchemaVersion is currently not supported in config files");
+ ret = params->remove("durOptions");
+ if (!ret.isOK()) {
+ return ret;
}
-
- if (params.count("security.enableLocalhostAuthBypass")) {
- return Status(ErrorCodes::BadValue,
- "security.enableLocalhostAuthBypass is currently not supported in config "
- "files");
+ ret = params->set("storage.mmapv1.journal.debugFlags", moe::Value(durOptions));
+ if (!ret.isOK()) {
+ return ret;
}
+ }
- if (params.count("storage.engine")) {
- storageGlobalParams.engine = params["storage.engine"].as<string>();
- storageGlobalParams.engineSetByUser = true;
+ // "security.authorization" comes from the config file, so override it if "noauth" or
+ // "auth" are set since those come from the command line.
+ if (params->count("noauth")) {
+ Status ret =
+ params->set("security.authorization",
+ (*params)["noauth"].as<bool>() ? moe::Value(std::string("disabled"))
+ : moe::Value(std::string("enabled")));
+ if (!ret.isOK()) {
+ return ret;
}
-
- if (params.count("storage.dbPath")) {
- storageGlobalParams.dbpath = params["storage.dbPath"].as<string>();
- if (params.count("processManagement.fork") && storageGlobalParams.dbpath[0] != '/') {
- // we need to change dbpath if we fork since we change
- // cwd to "/"
- // fork only exists on *nix
- // so '/' is safe
- storageGlobalParams.dbpath = serverGlobalParams.cwd + "/" +
- storageGlobalParams.dbpath;
- }
+ ret = params->remove("noauth");
+ if (!ret.isOK()) {
+ return ret;
}
-#ifdef _WIN32
- if (storageGlobalParams.dbpath.size() > 1 &&
- storageGlobalParams.dbpath[storageGlobalParams.dbpath.size()-1] == '/') {
- // size() check is for the unlikely possibility of --dbpath "/"
- storageGlobalParams.dbpath =
- storageGlobalParams.dbpath.erase(storageGlobalParams.dbpath.size()-1);
+ }
+ if (params->count("auth")) {
+ Status ret =
+ params->set("security.authorization",
+ (*params)["auth"].as<bool>() ? moe::Value(std::string("enabled"))
+ : moe::Value(std::string("disabled")));
+ if (!ret.isOK()) {
+ return ret;
}
-#endif
-
- if (params.count("operationProfiling.mode")) {
- std::string profilingMode = params["operationProfiling.mode"].as<std::string>();
- if (profilingMode == "off") {
- serverGlobalParams.defaultProfile = 0;
- }
- else if (profilingMode == "slowOp") {
- serverGlobalParams.defaultProfile = 1;
- }
- else if (profilingMode == "all") {
- serverGlobalParams.defaultProfile = 2;
- }
- else {
- StringBuilder sb;
- sb << "Bad value for operationProfiling.mode: " << profilingMode
- << ". Supported modes are: (off|slowOp|all)";
- return Status(ErrorCodes::BadValue, sb.str());
- }
+ ret = params->remove("auth");
+ if (!ret.isOK()) {
+ return ret;
}
+ }
- if ( params.count("operationProfiling.slowOpThresholdMs")) {
- serverGlobalParams.slowMS = params["operationProfiling.slowOpThresholdMs"].as<int>();
+ // "storage.mmapv1.preallocDataFiles" comes from the config file, so override it if "noprealloc" is
+ // set since that comes from the command line.
+ if (params->count("noprealloc")) {
+ Status ret = params->set("storage.mmapv1.preallocDataFiles",
+ moe::Value(!(*params)["noprealloc"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
-
- if ( params.count("storage.syncPeriodSecs")) {
- storageGlobalParams.syncdelay = params["storage.syncPeriodSecs"].as<double>();
+ ret = params->remove("noprealloc");
+ if (!ret.isOK()) {
+ return ret;
}
+ }
- if (params.count("storage.directoryPerDB")) {
- storageGlobalParams.directoryperdb = params["storage.directoryPerDB"].as<bool>();
+ // "sharding.archiveMovedChunks" comes from the config file, so override it if
+ // "noMoveParanoia" or "moveParanoia" are set since those come from the command line.
+ if (params->count("noMoveParanoia")) {
+ Status ret = params->set("sharding.archiveMovedChunks",
+ moe::Value(!(*params)["noMoveParanoia"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
- if (params.count("cpu")) {
- serverGlobalParams.cpu = params["cpu"].as<bool>();
+ ret = params->remove("noMoveParanoia");
+ if (!ret.isOK()) {
+ return ret;
}
- if (params.count("security.authorization") &&
- params["security.authorization"].as<std::string>() == "disabled") {
- serverGlobalParams.isAuthEnabled = false;
+ }
+ if (params->count("moveParanoia")) {
+ Status ret = params->set("sharding.archiveMovedChunks",
+ moe::Value((*params)["moveParanoia"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
- if (params.count("security.authorization") &&
- params["security.authorization"].as<std::string>() == "enabled") {
- serverGlobalParams.isAuthEnabled = true;
+ ret = params->remove("moveParanoia");
+ if (!ret.isOK()) {
+ return ret;
}
- if (params.count("storage.mmapv1.quota.enforced")) {
- mmapv1GlobalOptions.quota = params["storage.mmapv1.quota.enforced"].as<bool>();
+ }
+
+ // "sharding.clusterRole" comes from the config file, so override it if "configsvr" or
+ // "shardsvr" are set since those come from the command line.
+ if (params->count("configsvr")) {
+ if ((*params)["configsvr"].as<bool>() == false) {
+ // Handle the case where "configsvr" comes from the legacy config file and is set to
+ // false. This option is not allowed in the YAML config.
+ return Status(ErrorCodes::BadValue,
+ "configsvr option cannot be set to false in config file");
}
- if (params.count("storage.mmapv1.quota.maxFilesPerDB")) {
- mmapv1GlobalOptions.quota = true;
- mmapv1GlobalOptions.quotaFiles =
- params["storage.mmapv1.quota.maxFilesPerDB"].as<int>() - 1;
+ Status ret = params->set("sharding.clusterRole", moe::Value(std::string("configsvr")));
+ if (!ret.isOK()) {
+ return ret;
}
-
- if (params.count("storage.journal.enabled")) {
- storageGlobalParams.dur = params["storage.journal.enabled"].as<bool>();
+ ret = params->remove("configsvr");
+ if (!ret.isOK()) {
+ return ret;
}
-
- if (params.count("storage.mmapv1.journal.commitIntervalMs")) {
- // don't check if dur is false here as many will just use the default, and will default
- // to off on win32. ie no point making life a little more complex by giving an error on
- // a dev environment.
- mmapv1GlobalOptions.journalCommitInterval =
- params["storage.mmapv1.journal.commitIntervalMs"].as<unsigned>();
- if (mmapv1GlobalOptions.journalCommitInterval <= 1 ||
- mmapv1GlobalOptions.journalCommitInterval > 300) {
- return Status(ErrorCodes::BadValue,
- "--journalCommitInterval out of allowed range (0-300ms)");
- }
+ }
+ if (params->count("shardsvr")) {
+ if ((*params)["shardsvr"].as<bool>() == false) {
+ // Handle the case where "shardsvr" comes from the legacy config file and is set to
+ // false. This option is not allowed in the YAML config.
+ return Status(ErrorCodes::BadValue,
+ "shardsvr option cannot be set to false in config file");
}
- if (params.count("storage.mmapv1.journal.debugFlags")) {
- mmapv1GlobalOptions.journalOptions =
- params["storage.mmapv1.journal.debugFlags"].as<int>();
+ Status ret = params->set("sharding.clusterRole", moe::Value(std::string("shardsvr")));
+ if (!ret.isOK()) {
+ return ret;
}
- if (params.count("nopreallocj")) {
- mmapv1GlobalOptions.preallocj = !params["nopreallocj"].as<bool>();
+ ret = params->remove("shardsvr");
+ if (!ret.isOK()) {
+ return ret;
}
+ }
- if (params.count("net.http.RESTInterfaceEnabled")) {
- serverGlobalParams.rest = params["net.http.RESTInterfaceEnabled"].as<bool>();
- }
- if (params.count("net.http.JSONPEnabled")) {
- serverGlobalParams.jsonp = params["net.http.JSONPEnabled"].as<bool>();
+ if (params->count("profile")) {
+ int profilingMode;
+ Status ret = params->get("profile", &profilingMode);
+ if (!ret.isOK()) {
+ return ret;
}
- if (params.count("security.javascriptEnabled")) {
- mongodGlobalParams.scriptingEnabled = params["security.javascriptEnabled"].as<bool>();
+ std::string profilingModeString;
+ if (profilingMode == 0) {
+ profilingModeString = "off";
+ } else if (profilingMode == 1) {
+ profilingModeString = "slowOp";
+ } else if (profilingMode == 2) {
+ profilingModeString = "all";
+ } else {
+ StringBuilder sb;
+ sb << "Bad value for profile: " << profilingMode
+ << ". Supported modes are: (0=off|1=slowOp|2=all)";
+ return Status(ErrorCodes::BadValue, sb.str());
+ }
+ ret = params->set("operationProfiling.mode", moe::Value(profilingModeString));
+ if (!ret.isOK()) {
+ return ret;
}
- if (params.count("storage.mmapv1.preallocDataFiles")) {
- mmapv1GlobalOptions.prealloc = params["storage.mmapv1.preallocDataFiles"].as<bool>();
- cout << "note: noprealloc may hurt performance in many applications" << endl;
+ ret = params->remove("profile");
+ if (!ret.isOK()) {
+ return ret;
}
- if (params.count("storage.mmapv1.smallFiles")) {
- mmapv1GlobalOptions.smallfiles = params["storage.mmapv1.smallFiles"].as<bool>();
+ }
+
+ // "storage.indexBuildRetry" comes from the config file, so override it if
+ // "noIndexBuildRetry" is set since that comes from the command line.
+ if (params->count("noIndexBuildRetry")) {
+ Status ret = params->set("storage.indexBuildRetry",
+ moe::Value(!(*params)["noIndexBuildRetry"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
- if (params.count("diaglog")) {
- warning() << "--diaglog is deprecated and will be removed in a future release"
- << startupWarningsLog;
- int x = params["diaglog"].as<int>();
- if ( x < 0 || x > 7 ) {
- return Status(ErrorCodes::BadValue, "can't interpret --diaglog setting");
- }
- _diaglog.setLevel(x);
+ ret = params->remove("noIndexBuildRetry");
+ if (!ret.isOK()) {
+ return ret;
}
+ }
- if ((params.count("storage.journal.enabled") &&
- params["storage.journal.enabled"].as<bool>() == true) && params.count("repair")) {
- return Status(ErrorCodes::BadValue,
- "Can't have journaling enabled when using --repair option.");
+ // Ensure that "replication.replSet" logically overrides "replication.replSetName". We
+ // can't canonicalize them as the same option, because they mean slightly different things.
+ // "replication.replSet" can include a seed list, while "replication.replSetName" just has
+ // the replica set name.
+ if (params->count("replication.replSet") && params->count("replication.replSetName")) {
+ ret = params->remove("replication.replSetName");
+ if (!ret.isOK()) {
+ return ret;
}
+ }
- if (params.count("repair") && params["repair"].as<bool>() == true) {
- storageGlobalParams.upgrade = 1; // --repair implies --upgrade
- storageGlobalParams.repair = 1;
- storageGlobalParams.dur = false;
- }
- if (params.count("upgrade") && params["upgrade"].as<bool>() == true) {
- storageGlobalParams.upgrade = 1;
+ // "security.javascriptEnabled" comes from the config file, so override it if "noscripting"
+ // is set since that comes from the command line.
+ if (params->count("noscripting")) {
+ Status ret = params->set("security.javascriptEnabled",
+ moe::Value(!(*params)["noscripting"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
- if (params.count("notablescan")) {
- storageGlobalParams.noTableScan = params["notablescan"].as<bool>();
+ ret = params->remove("noscripting");
+ if (!ret.isOK()) {
+ return ret;
}
+ }
- repl::ReplSettings replSettings;
- if (params.count("master")) {
- replSettings.master = params["master"].as<bool>();
- }
- if (params.count("slave") && params["slave"].as<bool>() == true) {
- replSettings.slave = repl::SimpleSlave;
- }
- if (params.count("slavedelay")) {
- replSettings.slavedelay = params["slavedelay"].as<int>();
- }
- if (params.count("fastsync")) {
- if (replSettings.slave != repl::SimpleSlave) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "--fastsync must only be used with --slave");
- }
- replSettings.fastsync = params["fastsync"].as<bool>();
- }
- if (params.count("autoresync")) {
- replSettings.autoresync = params["autoresync"].as<bool>();
- }
- if (params.count("source")) {
- /* specifies what the source in local.sources should be */
- replSettings.source = params["source"].as<string>().c_str();
- }
- if( params.count("pretouch") ) {
- replSettings.pretouch = params["pretouch"].as<int>();
- }
- if (params.count("replication.replSetName")) {
- replSettings.replSet = params["replication.replSetName"].as<string>().c_str();
+ return Status::OK();
+}
+
+Status storeMongodOptions(const moe::Environment& params, const std::vector<std::string>& args) {
+ Status ret = storeServerOptions(params, args);
+ if (!ret.isOK()) {
+ return ret;
+ }
+
+ // TODO: Integrate these options with their setParameter counterparts
+ if (params.count("security.authSchemaVersion")) {
+ return Status(ErrorCodes::BadValue,
+ "security.authSchemaVersion is currently not supported in config files");
+ }
+
+ if (params.count("security.enableLocalhostAuthBypass")) {
+ return Status(ErrorCodes::BadValue,
+ "security.enableLocalhostAuthBypass is currently not supported in config "
+ "files");
+ }
+
+ if (params.count("storage.engine")) {
+ storageGlobalParams.engine = params["storage.engine"].as<string>();
+ storageGlobalParams.engineSetByUser = true;
+ }
+
+ if (params.count("storage.dbPath")) {
+ storageGlobalParams.dbpath = params["storage.dbPath"].as<string>();
+ if (params.count("processManagement.fork") && storageGlobalParams.dbpath[0] != '/') {
+ // we need to change dbpath if we fork since we change
+ // cwd to "/"
+ // fork only exists on *nix
+ // so '/' is safe
+ storageGlobalParams.dbpath = serverGlobalParams.cwd + "/" + storageGlobalParams.dbpath;
}
- if (params.count("replication.replSet")) {
- /* seed list of hosts for the repl set */
- replSettings.replSet = params["replication.replSet"].as<string>().c_str();
+ }
+#ifdef _WIN32
+ if (storageGlobalParams.dbpath.size() > 1 &&
+ storageGlobalParams.dbpath[storageGlobalParams.dbpath.size() - 1] == '/') {
+ // size() check is for the unlikely possibility of --dbpath "/"
+ storageGlobalParams.dbpath =
+ storageGlobalParams.dbpath.erase(storageGlobalParams.dbpath.size() - 1);
+ }
+#endif
+
+ if (params.count("operationProfiling.mode")) {
+ std::string profilingMode = params["operationProfiling.mode"].as<std::string>();
+ if (profilingMode == "off") {
+ serverGlobalParams.defaultProfile = 0;
+ } else if (profilingMode == "slowOp") {
+ serverGlobalParams.defaultProfile = 1;
+ } else if (profilingMode == "all") {
+ serverGlobalParams.defaultProfile = 2;
+ } else {
+ StringBuilder sb;
+ sb << "Bad value for operationProfiling.mode: " << profilingMode
+ << ". Supported modes are: (off|slowOp|all)";
+ return Status(ErrorCodes::BadValue, sb.str());
}
- if (params.count("replication.secondaryIndexPrefetch")) {
- replSettings.rsIndexPrefetch =
- params["replication.secondaryIndexPrefetch"].as<std::string>();
+ }
+
+ if (params.count("operationProfiling.slowOpThresholdMs")) {
+ serverGlobalParams.slowMS = params["operationProfiling.slowOpThresholdMs"].as<int>();
+ }
+
+ if (params.count("storage.syncPeriodSecs")) {
+ storageGlobalParams.syncdelay = params["storage.syncPeriodSecs"].as<double>();
+ }
+
+ if (params.count("storage.directoryPerDB")) {
+ storageGlobalParams.directoryperdb = params["storage.directoryPerDB"].as<bool>();
+ }
+ if (params.count("cpu")) {
+ serverGlobalParams.cpu = params["cpu"].as<bool>();
+ }
+ if (params.count("security.authorization") &&
+ params["security.authorization"].as<std::string>() == "disabled") {
+ serverGlobalParams.isAuthEnabled = false;
+ }
+ if (params.count("security.authorization") &&
+ params["security.authorization"].as<std::string>() == "enabled") {
+ serverGlobalParams.isAuthEnabled = true;
+ }
+ if (params.count("storage.mmapv1.quota.enforced")) {
+ mmapv1GlobalOptions.quota = params["storage.mmapv1.quota.enforced"].as<bool>();
+ }
+ if (params.count("storage.mmapv1.quota.maxFilesPerDB")) {
+ mmapv1GlobalOptions.quota = true;
+ mmapv1GlobalOptions.quotaFiles = params["storage.mmapv1.quota.maxFilesPerDB"].as<int>() - 1;
+ }
+
+ if (params.count("storage.journal.enabled")) {
+ storageGlobalParams.dur = params["storage.journal.enabled"].as<bool>();
+ }
+
+ if (params.count("storage.mmapv1.journal.commitIntervalMs")) {
+ // don't check if dur is false here as many will just use the default, and will default
+ // to off on win32. ie no point making life a little more complex by giving an error on
+ // a dev environment.
+ mmapv1GlobalOptions.journalCommitInterval =
+ params["storage.mmapv1.journal.commitIntervalMs"].as<unsigned>();
+ if (mmapv1GlobalOptions.journalCommitInterval <= 1 ||
+ mmapv1GlobalOptions.journalCommitInterval > 300) {
+ return Status(ErrorCodes::BadValue,
+ "--journalCommitInterval out of allowed range (0-300ms)");
}
+ }
+ if (params.count("storage.mmapv1.journal.debugFlags")) {
+ mmapv1GlobalOptions.journalOptions = params["storage.mmapv1.journal.debugFlags"].as<int>();
+ }
+ if (params.count("nopreallocj")) {
+ mmapv1GlobalOptions.preallocj = !params["nopreallocj"].as<bool>();
+ }
+
+ if (params.count("net.http.RESTInterfaceEnabled")) {
+ serverGlobalParams.rest = params["net.http.RESTInterfaceEnabled"].as<bool>();
+ }
+ if (params.count("net.http.JSONPEnabled")) {
+ serverGlobalParams.jsonp = params["net.http.JSONPEnabled"].as<bool>();
+ }
+ if (params.count("security.javascriptEnabled")) {
+ mongodGlobalParams.scriptingEnabled = params["security.javascriptEnabled"].as<bool>();
+ }
+ if (params.count("storage.mmapv1.preallocDataFiles")) {
+ mmapv1GlobalOptions.prealloc = params["storage.mmapv1.preallocDataFiles"].as<bool>();
+ cout << "note: noprealloc may hurt performance in many applications" << endl;
+ }
+ if (params.count("storage.mmapv1.smallFiles")) {
+ mmapv1GlobalOptions.smallfiles = params["storage.mmapv1.smallFiles"].as<bool>();
+ }
+ if (params.count("diaglog")) {
+ warning() << "--diaglog is deprecated and will be removed in a future release"
+ << startupWarningsLog;
+ int x = params["diaglog"].as<int>();
+ if (x < 0 || x > 7) {
+ return Status(ErrorCodes::BadValue, "can't interpret --diaglog setting");
+ }
+ _diaglog.setLevel(x);
+ }
+
+ if ((params.count("storage.journal.enabled") &&
+ params["storage.journal.enabled"].as<bool>() == true) &&
+ params.count("repair")) {
+ return Status(ErrorCodes::BadValue,
+ "Can't have journaling enabled when using --repair option.");
+ }
- if (params.count("storage.indexBuildRetry")) {
- serverGlobalParams.indexBuildRetry = params["storage.indexBuildRetry"].as<bool>();
+ if (params.count("repair") && params["repair"].as<bool>() == true) {
+ storageGlobalParams.upgrade = 1; // --repair implies --upgrade
+ storageGlobalParams.repair = 1;
+ storageGlobalParams.dur = false;
+ }
+ if (params.count("upgrade") && params["upgrade"].as<bool>() == true) {
+ storageGlobalParams.upgrade = 1;
+ }
+ if (params.count("notablescan")) {
+ storageGlobalParams.noTableScan = params["notablescan"].as<bool>();
+ }
+
+ repl::ReplSettings replSettings;
+ if (params.count("master")) {
+ replSettings.master = params["master"].as<bool>();
+ }
+ if (params.count("slave") && params["slave"].as<bool>() == true) {
+ replSettings.slave = repl::SimpleSlave;
+ }
+ if (params.count("slavedelay")) {
+ replSettings.slavedelay = params["slavedelay"].as<int>();
+ }
+ if (params.count("fastsync")) {
+ if (replSettings.slave != repl::SimpleSlave) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "--fastsync must only be used with --slave");
}
+ replSettings.fastsync = params["fastsync"].as<bool>();
+ }
+ if (params.count("autoresync")) {
+ replSettings.autoresync = params["autoresync"].as<bool>();
+ }
+ if (params.count("source")) {
+ /* specifies what the source in local.sources should be */
+ replSettings.source = params["source"].as<string>().c_str();
+ }
+ if (params.count("pretouch")) {
+ replSettings.pretouch = params["pretouch"].as<int>();
+ }
+ if (params.count("replication.replSetName")) {
+ replSettings.replSet = params["replication.replSetName"].as<string>().c_str();
+ }
+ if (params.count("replication.replSet")) {
+ /* seed list of hosts for the repl set */
+ replSettings.replSet = params["replication.replSet"].as<string>().c_str();
+ }
+ if (params.count("replication.secondaryIndexPrefetch")) {
+ replSettings.rsIndexPrefetch =
+ params["replication.secondaryIndexPrefetch"].as<std::string>();
+ }
- if (params.count("only")) {
- replSettings.only = params["only"].as<string>().c_str();
+ if (params.count("storage.indexBuildRetry")) {
+ serverGlobalParams.indexBuildRetry = params["storage.indexBuildRetry"].as<bool>();
+ }
+
+ if (params.count("only")) {
+ replSettings.only = params["only"].as<string>().c_str();
+ }
+ if (params.count("storage.mmapv1.nsSize")) {
+ int x = params["storage.mmapv1.nsSize"].as<int>();
+ if (x <= 0 || x > (0x7fffffff / 1024 / 1024)) {
+ return Status(ErrorCodes::BadValue, "bad --nssize arg");
}
- if( params.count("storage.mmapv1.nsSize") ) {
- int x = params["storage.mmapv1.nsSize"].as<int>();
- if (x <= 0 || x > (0x7fffffff/1024/1024)) {
- return Status(ErrorCodes::BadValue, "bad --nssize arg");
- }
- mmapv1GlobalOptions.lenForNewNsFiles = x * 1024 * 1024;
- verify(mmapv1GlobalOptions.lenForNewNsFiles > 0);
+ mmapv1GlobalOptions.lenForNewNsFiles = x * 1024 * 1024;
+ verify(mmapv1GlobalOptions.lenForNewNsFiles > 0);
+ }
+ if (params.count("replication.oplogSizeMB")) {
+ long long x = params["replication.oplogSizeMB"].as<int>();
+ if (x <= 0) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "bad --oplogSize, arg must be greater than 0,"
+ "found: " << x);
+ }
+ // note a small size such as x==1 is ok for an arbiter.
+ if (x > 1000 && sizeof(void*) == 4) {
+ StringBuilder sb;
+ sb << "--oplogSize of " << x
+ << "MB is too big for 32 bit version. Use 64 bit build instead.";
+ return Status(ErrorCodes::BadValue, sb.str());
+ }
+ replSettings.oplogSize = x * 1024 * 1024;
+ invariant(replSettings.oplogSize > 0);
+ }
+ if (params.count("cacheSize")) {
+ long x = params["cacheSize"].as<long>();
+ if (x <= 0) {
+ return Status(ErrorCodes::BadValue, "bad --cacheSize arg");
}
- if (params.count("replication.oplogSizeMB")) {
- long long x = params["replication.oplogSizeMB"].as<int>();
- if (x <= 0) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "bad --oplogSize, arg must be greater than 0,"
- "found: " << x);
- }
- // note a small size such as x==1 is ok for an arbiter.
- if( x > 1000 && sizeof(void*) == 4 ) {
+ return Status(ErrorCodes::BadValue, "--cacheSize option not currently supported");
+ }
+ if (!params.count("net.port")) {
+ if (params.count("sharding.clusterRole")) {
+ std::string clusterRole = params["sharding.clusterRole"].as<std::string>();
+ if (clusterRole == "configsvr") {
+ serverGlobalParams.port = ServerGlobalParams::ConfigServerPort;
+ } else if (clusterRole == "shardsvr") {
+ serverGlobalParams.port = ServerGlobalParams::ShardServerPort;
+ } else {
StringBuilder sb;
- sb << "--oplogSize of " << x
- << "MB is too big for 32 bit version. Use 64 bit build instead.";
+ sb << "Bad value for sharding.clusterRole: " << clusterRole
+ << ". Supported modes are: (configsvr|shardsvr)";
return Status(ErrorCodes::BadValue, sb.str());
}
- replSettings.oplogSize = x * 1024 * 1024;
- invariant(replSettings.oplogSize > 0);
}
- if (params.count("cacheSize")) {
- long x = params["cacheSize"].as<long>();
- if (x <= 0) {
- return Status(ErrorCodes::BadValue, "bad --cacheSize arg");
- }
- return Status(ErrorCodes::BadValue, "--cacheSize option not currently supported");
+ } else {
+ if (serverGlobalParams.port <= 0 || serverGlobalParams.port > 65535) {
+ return Status(ErrorCodes::BadValue, "bad --port number");
}
- if (!params.count("net.port")) {
- if (params.count("sharding.clusterRole")) {
- std::string clusterRole = params["sharding.clusterRole"].as<std::string>();
- if (clusterRole == "configsvr") {
- serverGlobalParams.port = ServerGlobalParams::ConfigServerPort;
- }
- else if (clusterRole == "shardsvr") {
- serverGlobalParams.port = ServerGlobalParams::ShardServerPort;
- }
- else {
- StringBuilder sb;
- sb << "Bad value for sharding.clusterRole: " << clusterRole
- << ". Supported modes are: (configsvr|shardsvr)";
- return Status(ErrorCodes::BadValue, sb.str());
- }
- }
- }
- else {
- if (serverGlobalParams.port <= 0 || serverGlobalParams.port > 65535) {
- return Status(ErrorCodes::BadValue, "bad --port number");
- }
- }
- if (params.count("sharding.clusterRole") &&
- params["sharding.clusterRole"].as<std::string>() == "configsvr") {
- serverGlobalParams.configsvr = true;
- mmapv1GlobalOptions.smallfiles = true; // config server implies small files
-
- // If we haven't explicitly specified a journal option, default journaling to true for
- // the config server role
- if (!params.count("storage.journal.enabled")) {
- storageGlobalParams.dur = true;
- }
+ }
+ if (params.count("sharding.clusterRole") &&
+ params["sharding.clusterRole"].as<std::string>() == "configsvr") {
+ serverGlobalParams.configsvr = true;
+ mmapv1GlobalOptions.smallfiles = true; // config server implies small files
- if (!params.count("storage.dbPath")) {
- storageGlobalParams.dbpath = storageGlobalParams.kDefaultConfigDbPath;
- }
- replSettings.master = true;
- if (!params.count("replication.oplogSizeMB"))
- replSettings.oplogSize = 5 * 1024 * 1024;
+ // If we haven't explicitly specified a journal option, default journaling to true for
+ // the config server role
+ if (!params.count("storage.journal.enabled")) {
+ storageGlobalParams.dur = true;
}
- if (params.count("sharding.archiveMovedChunks")) {
- serverGlobalParams.moveParanoia = params["sharding.archiveMovedChunks"].as<bool>();
+ if (!params.count("storage.dbPath")) {
+ storageGlobalParams.dbpath = storageGlobalParams.kDefaultConfigDbPath;
}
+ replSettings.master = true;
+ if (!params.count("replication.oplogSizeMB"))
+ replSettings.oplogSize = 5 * 1024 * 1024;
+ }
- if (params.count("pairwith") || params.count("arbiter") || params.count("opIdMem")) {
- return Status(ErrorCodes::BadValue,
- "****\n"
- "Replica Pairs have been deprecated. Invalid options: "
- "--pairwith, --arbiter, and/or --opIdMem\n"
- "<http://dochub.mongodb.org/core/replicapairs>\n"
- "****");
- }
+ if (params.count("sharding.archiveMovedChunks")) {
+ serverGlobalParams.moveParanoia = params["sharding.archiveMovedChunks"].as<bool>();
+ }
- // needs to be after things like --configsvr parsing, thus here.
- if (params.count("storage.repairPath")) {
- storageGlobalParams.repairpath = params["storage.repairPath"].as<string>();
- if (!storageGlobalParams.repairpath.size()) {
- return Status(ErrorCodes::BadValue, "repairpath is empty");
- }
+ if (params.count("pairwith") || params.count("arbiter") || params.count("opIdMem")) {
+ return Status(ErrorCodes::BadValue,
+ "****\n"
+ "Replica Pairs have been deprecated. Invalid options: "
+ "--pairwith, --arbiter, and/or --opIdMem\n"
+ "<http://dochub.mongodb.org/core/replicapairs>\n"
+ "****");
+ }
- if (storageGlobalParams.dur &&
- !str::startsWith(storageGlobalParams.repairpath,
- storageGlobalParams.dbpath)) {
- return Status(ErrorCodes::BadValue,
- "You must use a --repairpath that is a subdirectory of --dbpath when "
- "using journaling");
- }
+ // needs to be after things like --configsvr parsing, thus here.
+ if (params.count("storage.repairPath")) {
+ storageGlobalParams.repairpath = params["storage.repairPath"].as<string>();
+ if (!storageGlobalParams.repairpath.size()) {
+ return Status(ErrorCodes::BadValue, "repairpath is empty");
}
- else {
- storageGlobalParams.repairpath = storageGlobalParams.dbpath;
+
+ if (storageGlobalParams.dur &&
+ !str::startsWith(storageGlobalParams.repairpath, storageGlobalParams.dbpath)) {
+ return Status(ErrorCodes::BadValue,
+ "You must use a --repairpath that is a subdirectory of --dbpath when "
+ "using journaling");
}
+ } else {
+ storageGlobalParams.repairpath = storageGlobalParams.dbpath;
+ }
- if (replSettings.pretouch)
- log() << "--pretouch " << replSettings.pretouch;
+ if (replSettings.pretouch)
+ log() << "--pretouch " << replSettings.pretouch;
- // Check if we are 32 bit and have not explicitly specified any journaling options
- if (sizeof(void*) == 4 && !params.count("storage.journal.enabled")) {
- // trying to make this stand out more like startup warnings
- log() << endl;
- warning() << "32-bit servers don't have journaling enabled by default. "
- << "Please use --journal if you want durability." << endl;
- log() << endl;
- }
+ // Check if we are 32 bit and have not explicitly specified any journaling options
+ if (sizeof(void*) == 4 && !params.count("storage.journal.enabled")) {
+ // trying to make this stand out more like startup warnings
+ log() << endl;
+ warning() << "32-bit servers don't have journaling enabled by default. "
+ << "Please use --journal if you want durability." << endl;
+ log() << endl;
+ }
#ifdef _WIN32
- // If dbPath is a default value, prepend with drive name so log entries are explicit
- if (storageGlobalParams.dbpath == storageGlobalParams.kDefaultDbPath
- || storageGlobalParams.dbpath == storageGlobalParams.kDefaultConfigDbPath) {
- boost::filesystem::path currentPath = boost::filesystem::current_path();
- storageGlobalParams.dbpath = currentPath.root_name().string()
- + storageGlobalParams.dbpath;
- }
+ // If dbPath is a default value, prepend with drive name so log entries are explicit
+ if (storageGlobalParams.dbpath == storageGlobalParams.kDefaultDbPath ||
+ storageGlobalParams.dbpath == storageGlobalParams.kDefaultConfigDbPath) {
+ boost::filesystem::path currentPath = boost::filesystem::current_path();
+ storageGlobalParams.dbpath = currentPath.root_name().string() + storageGlobalParams.dbpath;
+ }
#endif
- setGlobalReplSettings(replSettings);
- return Status::OK();
- }
+ setGlobalReplSettings(replSettings);
+ return Status::OK();
+}
namespace {
- repl::ReplSettings globalReplSettings;
-} // namespace
+repl::ReplSettings globalReplSettings;
+} // namespace
- void setGlobalReplSettings(const repl::ReplSettings& settings) {
- globalReplSettings = settings;
- }
+void setGlobalReplSettings(const repl::ReplSettings& settings) {
+ globalReplSettings = settings;
+}
- const repl::ReplSettings& getGlobalReplSettings() {
- return globalReplSettings;
- }
+const repl::ReplSettings& getGlobalReplSettings() {
+ return globalReplSettings;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/mongod_options.h b/src/mongo/db/mongod_options.h
index 27dc9af10ab..942ecd27e63 100644
--- a/src/mongo/db/mongod_options.h
+++ b/src/mongo/db/mongod_options.h
@@ -37,55 +37,53 @@
namespace mongo {
- namespace optionenvironment {
- class OptionSection;
- class Environment;
- } // namespace optionenvironment
+namespace optionenvironment {
+class OptionSection;
+class Environment;
+} // namespace optionenvironment
- namespace moe = mongo::optionenvironment;
+namespace moe = mongo::optionenvironment;
- struct MongodGlobalParams {
- bool scriptingEnabled; // --noscripting
+struct MongodGlobalParams {
+ bool scriptingEnabled; // --noscripting
- MongodGlobalParams() :
- scriptingEnabled(true)
- { }
- };
+ MongodGlobalParams() : scriptingEnabled(true) {}
+};
- extern MongodGlobalParams mongodGlobalParams;
+extern MongodGlobalParams mongodGlobalParams;
- Status addMongodOptions(moe::OptionSection* options);
+Status addMongodOptions(moe::OptionSection* options);
- void printMongodHelp(const moe::OptionSection& options);
+void printMongodHelp(const moe::OptionSection& options);
- /**
- * Handle options that should come before validation, such as "help".
- *
- * Returns false if an option was found that implies we should prematurely exit with success.
- */
- bool handlePreValidationMongodOptions(const moe::Environment& params,
- const std::vector<std::string>& args);
+/**
+ * Handle options that should come before validation, such as "help".
+ *
+ * Returns false if an option was found that implies we should prematurely exit with success.
+ */
+bool handlePreValidationMongodOptions(const moe::Environment& params,
+ const std::vector<std::string>& args);
- /**
- * Handle custom validation of mongod options that can not currently be done by using
- * Constraints in the Environment. See the "validate" function in the Environment class for
- * more details.
- */
- Status validateMongodOptions(const moe::Environment& params);
+/**
+ * Handle custom validation of mongod options that can not currently be done by using
+ * Constraints in the Environment. See the "validate" function in the Environment class for
+ * more details.
+ */
+Status validateMongodOptions(const moe::Environment& params);
- /**
- * Canonicalize mongod options for the given environment.
- *
- * For example, the options "dur", "nodur", "journal", "nojournal", and
- * "storage.journaling.enabled" should all be merged into "storage.journaling.enabled".
- */
- Status canonicalizeMongodOptions(moe::Environment* params);
+/**
+ * Canonicalize mongod options for the given environment.
+ *
+ * For example, the options "dur", "nodur", "journal", "nojournal", and
+ * "storage.journaling.enabled" should all be merged into "storage.journaling.enabled".
+ */
+Status canonicalizeMongodOptions(moe::Environment* params);
- // Must be called after "storeMongodOptions"
- StatusWith<repl::ReplSettings> parseMongodReplicationOptions(const moe::Environment& params);
+// Must be called after "storeMongodOptions"
+StatusWith<repl::ReplSettings> parseMongodReplicationOptions(const moe::Environment& params);
- Status storeMongodOptions(const moe::Environment& params, const std::vector<std::string>& args);
+Status storeMongodOptions(const moe::Environment& params, const std::vector<std::string>& args);
- void setGlobalReplSettings(const repl::ReplSettings& settings);
- const repl::ReplSettings& getGlobalReplSettings();
+void setGlobalReplSettings(const repl::ReplSettings& settings);
+const repl::ReplSettings& getGlobalReplSettings();
}
diff --git a/src/mongo/db/mongod_options_init.cpp b/src/mongo/db/mongod_options_init.cpp
index a8c8ebb74cd..cb9bbcc0cac 100644
--- a/src/mongo/db/mongod_options_init.cpp
+++ b/src/mongo/db/mongod_options_init.cpp
@@ -35,47 +35,46 @@
#include "mongo/util/quick_exit.h"
namespace mongo {
- MONGO_GENERAL_STARTUP_OPTIONS_REGISTER(MongodOptions)(InitializerContext* context) {
- return addMongodOptions(&moe::startupOptions);
- }
+MONGO_GENERAL_STARTUP_OPTIONS_REGISTER(MongodOptions)(InitializerContext* context) {
+ return addMongodOptions(&moe::startupOptions);
+}
- MONGO_STARTUP_OPTIONS_VALIDATE(MongodOptions)(InitializerContext* context) {
- if (!handlePreValidationMongodOptions(moe::startupOptionsParsed, context->args())) {
- quickExit(EXIT_SUCCESS);
- }
- // Run validation, but tell the Environment that we don't want it to be set as "valid",
- // since we may be making it invalid in the canonicalization process.
- Status ret = moe::startupOptionsParsed.validate(false/*setValid*/);
- if (!ret.isOK()) {
- return ret;
- }
- ret = validateMongodOptions(moe::startupOptionsParsed);
- if (!ret.isOK()) {
- return ret;
- }
- ret = canonicalizeMongodOptions(&moe::startupOptionsParsed);
- if (!ret.isOK()) {
- return ret;
- }
- ret = moe::startupOptionsParsed.validate();
- if (!ret.isOK()) {
- return ret;
- }
- return Status::OK();
+MONGO_STARTUP_OPTIONS_VALIDATE(MongodOptions)(InitializerContext* context) {
+ if (!handlePreValidationMongodOptions(moe::startupOptionsParsed, context->args())) {
+ quickExit(EXIT_SUCCESS);
+ }
+ // Run validation, but tell the Environment that we don't want it to be set as "valid",
+ // since we may be making it invalid in the canonicalization process.
+ Status ret = moe::startupOptionsParsed.validate(false /*setValid*/);
+ if (!ret.isOK()) {
+ return ret;
+ }
+ ret = validateMongodOptions(moe::startupOptionsParsed);
+ if (!ret.isOK()) {
+ return ret;
+ }
+ ret = canonicalizeMongodOptions(&moe::startupOptionsParsed);
+ if (!ret.isOK()) {
+ return ret;
+ }
+ ret = moe::startupOptionsParsed.validate();
+ if (!ret.isOK()) {
+ return ret;
}
+ return Status::OK();
+}
- MONGO_INITIALIZER_GENERAL(MongodOptions_Store,
- ("BeginStartupOptionStorage"),
- ("EndStartupOptionStorage"))
- (InitializerContext* context) {
- Status ret = storeMongodOptions(moe::startupOptionsParsed, context->args());
- if (!ret.isOK()) {
- std::cerr << ret.toString() << std::endl;
- std::cerr << "try '" << context->args()[0] << " --help' for more information"
- << std::endl;
- quickExit(EXIT_BADOPTIONS);
- }
- return Status::OK();
+MONGO_INITIALIZER_GENERAL(MongodOptions_Store,
+ ("BeginStartupOptionStorage"),
+ ("EndStartupOptionStorage"))
+(InitializerContext* context) {
+ Status ret = storeMongodOptions(moe::startupOptionsParsed, context->args());
+ if (!ret.isOK()) {
+ std::cerr << ret.toString() << std::endl;
+ std::cerr << "try '" << context->args()[0] << " --help' for more information" << std::endl;
+ quickExit(EXIT_BADOPTIONS);
}
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/namespace_string-inl.h b/src/mongo/db/namespace_string-inl.h
index 5f1b25c3113..8e8808fb9b8 100644
--- a/src/mongo/db/namespace_string-inl.h
+++ b/src/mongo/db/namespace_string-inl.h
@@ -31,38 +31,35 @@
namespace mongo {
- inline StringData NamespaceString::db() const {
- return _dotIndex == std::string::npos ?
- StringData() :
- StringData( _ns.c_str(), _dotIndex );
- }
+inline StringData NamespaceString::db() const {
+ return _dotIndex == std::string::npos ? StringData() : StringData(_ns.c_str(), _dotIndex);
+}
- inline StringData NamespaceString::coll() const {
- return _dotIndex == std::string::npos ?
- StringData() :
- StringData( _ns.c_str() + _dotIndex + 1, _ns.size() - 1 - _dotIndex );
- }
+inline StringData NamespaceString::coll() const {
+ return _dotIndex == std::string::npos ? StringData() : StringData(_ns.c_str() + _dotIndex + 1,
+ _ns.size() - 1 - _dotIndex);
+}
- inline bool NamespaceString::normal(StringData ns) {
- if ( ns.find( '$' ) == std::string::npos )
- return true;
- return oplog(ns);
- }
+inline bool NamespaceString::normal(StringData ns) {
+ if (ns.find('$') == std::string::npos)
+ return true;
+ return oplog(ns);
+}
- inline bool NamespaceString::oplog(StringData ns) {
- return ns.startsWith("local.oplog.");
- }
+inline bool NamespaceString::oplog(StringData ns) {
+ return ns.startsWith("local.oplog.");
+}
- inline bool NamespaceString::special(StringData ns) {
- return !normal(ns) || ns.substr(ns.find('.')).startsWith(".system.");
- }
+inline bool NamespaceString::special(StringData ns) {
+ return !normal(ns) || ns.substr(ns.find('.')).startsWith(".system.");
+}
- inline bool NamespaceString::validDBName( StringData db ) {
- if ( db.size() == 0 || db.size() > 64 )
- return false;
+inline bool NamespaceString::validDBName(StringData db) {
+ if (db.size() == 0 || db.size() > 64)
+ return false;
- for (StringData::const_iterator iter = db.begin(), end = db.end(); iter != end; ++iter) {
- switch (*iter) {
+ for (StringData::const_iterator iter = db.begin(), end = db.end(); iter != end; ++iter) {
+ switch (*iter) {
case '\0':
case '/':
case '\\':
@@ -82,122 +79,117 @@ namespace mongo {
#endif
default:
continue;
- }
}
- return true;
}
+ return true;
+}
- inline bool NamespaceString::validCollectionComponent(StringData ns){
- size_t idx = ns.find( '.' );
- if ( idx == std::string::npos )
- return false;
+inline bool NamespaceString::validCollectionComponent(StringData ns) {
+ size_t idx = ns.find('.');
+ if (idx == std::string::npos)
+ return false;
- return validCollectionName(ns.substr(idx + 1)) || oplog(ns);
- }
+ return validCollectionName(ns.substr(idx + 1)) || oplog(ns);
+}
- inline bool NamespaceString::validCollectionName(StringData coll){
- if (coll.empty())
- return false;
+inline bool NamespaceString::validCollectionName(StringData coll) {
+ if (coll.empty())
+ return false;
- if (coll[0] == '.')
- return false;
+ if (coll[0] == '.')
+ return false;
- for (StringData::const_iterator iter = coll.begin(), end = coll.end();
- iter != end; ++iter) {
- switch (*iter) {
+ for (StringData::const_iterator iter = coll.begin(), end = coll.end(); iter != end; ++iter) {
+ switch (*iter) {
case '\0':
case '$':
return false;
default:
continue;
- }
}
-
- return true;
}
- inline NamespaceString::NamespaceString() : _ns(), _dotIndex(0) {}
- inline NamespaceString::NamespaceString( StringData nsIn ) {
- _ns = nsIn.toString(); // copy to our buffer
- _dotIndex = _ns.find( '.' );
- }
-
- inline NamespaceString::NamespaceString( StringData dbName,
- StringData collectionName )
- : _ns(dbName.size() + collectionName.size() + 1, '\0') {
-
- uassert(17235,
- "'.' is an invalid character in a database name",
- dbName.find('.') == std::string::npos);
- uassert(17246,
- "Collection names cannot start with '.'",
- collectionName.empty() || collectionName[0] != '.');
- std::string::iterator it = std::copy(dbName.begin(), dbName.end(), _ns.begin());
- *it = '.';
- ++it;
- it = std::copy(collectionName.begin(), collectionName.end(), it);
- _dotIndex = dbName.size();
- dassert(it == _ns.end());
- dassert(_ns[_dotIndex] == '.');
- uassert(17295, "namespaces cannot have embedded null characters",
- _ns.find('\0') == std::string::npos);
- }
-
- inline int nsDBHash( const std::string& ns ) {
- int hash = 7;
- for ( size_t i = 0; i < ns.size(); i++ ) {
- if ( ns[i] == '.' )
- break;
- hash += 11 * ( ns[i] );
- hash *= 3;
- }
- return hash;
- }
+ return true;
+}
- inline bool nsDBEquals( const std::string& a, const std::string& b ) {
- for ( size_t i = 0; i < a.size(); i++ ) {
+inline NamespaceString::NamespaceString() : _ns(), _dotIndex(0) {}
+inline NamespaceString::NamespaceString(StringData nsIn) {
+ _ns = nsIn.toString(); // copy to our buffer
+ _dotIndex = _ns.find('.');
+}
- if ( a[i] == '.' ) {
- // b has to either be done or a '.'
+inline NamespaceString::NamespaceString(StringData dbName, StringData collectionName)
+ : _ns(dbName.size() + collectionName.size() + 1, '\0') {
+ uassert(17235,
+ "'.' is an invalid character in a database name",
+ dbName.find('.') == std::string::npos);
+ uassert(17246,
+ "Collection names cannot start with '.'",
+ collectionName.empty() || collectionName[0] != '.');
+ std::string::iterator it = std::copy(dbName.begin(), dbName.end(), _ns.begin());
+ *it = '.';
+ ++it;
+ it = std::copy(collectionName.begin(), collectionName.end(), it);
+ _dotIndex = dbName.size();
+ dassert(it == _ns.end());
+ dassert(_ns[_dotIndex] == '.');
+ uassert(17295,
+ "namespaces cannot have embedded null characters",
+ _ns.find('\0') == std::string::npos);
+}
- if ( b.size() == i )
- return true;
+inline int nsDBHash(const std::string& ns) {
+ int hash = 7;
+ for (size_t i = 0; i < ns.size(); i++) {
+ if (ns[i] == '.')
+ break;
+ hash += 11 * (ns[i]);
+ hash *= 3;
+ }
+ return hash;
+}
- if ( b[i] == '.' )
- return true;
+inline bool nsDBEquals(const std::string& a, const std::string& b) {
+ for (size_t i = 0; i < a.size(); i++) {
+ if (a[i] == '.') {
+ // b has to either be done or a '.'
- return false;
- }
+ if (b.size() == i)
+ return true;
- // a is another character
- if ( b.size() == i )
- return false;
+ if (b[i] == '.')
+ return true;
- if ( b[i] != a[i] )
- return false;
+ return false;
}
- // a is done
- // make sure b is done
- if ( b.size() == a.size() ||
- b[a.size()] == '.' )
- return true;
+ // a is another character
+ if (b.size() == i)
+ return false;
- return false;
+ if (b[i] != a[i])
+ return false;
}
- /* future : this doesn't need to be an inline. */
- inline std::string NamespaceString::getSisterNS( StringData local ) const {
- verify( local.size() && local[0] != '.' );
- return db().toString() + "." + local.toString();
- }
+ // a is done
+ // make sure b is done
+ if (b.size() == a.size() || b[a.size()] == '.')
+ return true;
- inline std::string NamespaceString::getSystemIndexesCollection() const {
- return db().toString() + ".system.indexes";
- }
+ return false;
+}
- inline std::string NamespaceString::getCommandNS() const {
- return db().toString() + ".$cmd";
- }
+/* future : this doesn't need to be an inline. */
+inline std::string NamespaceString::getSisterNS(StringData local) const {
+ verify(local.size() && local[0] != '.');
+ return db().toString() + "." + local.toString();
+}
+inline std::string NamespaceString::getSystemIndexesCollection() const {
+ return db().toString() + ".system.indexes";
+}
+
+inline std::string NamespaceString::getCommandNS() const {
+ return db().toString() + ".$cmd";
+}
}
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index fe50e2039be..f8c0b6751bb 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -32,84 +32,84 @@
namespace mongo {
- using std::string;
-
- /* A map of characters to escape. Instead of printing certain characters we output
- * based on the following table.
- */
- static const std::string escapeTable[256] = {
- ".00", ".01", ".02", ".03", ".04", ".05", ".06", ".07", ".08", ".09",
- ".10", ".11", ".12", ".13", ".14", ".15", ".16", ".17", ".18", ".19",
- ".20", ".21", ".22", ".23", ".24", ".25", ".26", ".27", ".28", ".29",
- ".30", ".31", ".32", ".33", ".34", ".35", ".36", ".37", ".38", ".39",
- ".40", ".41", ".42", ".43", ".44", ".45", ".", ".47", "0", "1",
- "2", "3", "4", "5", "6", "7", "8", "9", ".58", ".59",
- ".60", ".61", ".62", ".63", ".64", "A", "B", "C", "D", "E",
- "F", "G", "H", "I", "J", "K", "L", "M", "N", "O",
- "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y",
- "Z", ".91", ".92", ".93", ".94", "_", ".96", "a", "b", "c",
- "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
- "n", "o", "p", "q", "r", "s", "t", "u", "v", "w",
- "x", "y", "z", ".123", ".124", ".125", ".126", ".127", ".128", ".129",
- ".130", ".131", ".132", ".133", ".134", ".135", ".136", ".137", ".138", ".139",
- ".140", ".141", ".142", ".143", ".144", ".145", ".146", ".147", ".148", ".149",
- ".150", ".151", ".152", ".153", ".154", ".155", ".156", ".157", ".158", ".159",
- ".160", ".161", ".162", ".163", ".164", ".165", ".166", ".167", ".168", ".169",
- ".170", ".171", ".172", ".173", ".174", ".175", ".176", ".177", ".178", ".179",
- ".180", ".181", ".182", ".183", ".184", ".185", ".186", ".187", ".188", ".189",
- ".190", ".191", ".192", ".193", ".194", ".195", ".196", ".197", ".198", ".199",
- ".200", ".201", ".202", ".203", ".204", ".205", ".206", ".207", ".208", ".209",
- ".210", ".211", ".212", ".213", ".214", ".215", ".216", ".217", ".218", ".219",
- ".220", ".221", ".222", ".223", ".224", ".225", ".226", ".227", ".228", ".229",
- ".230", ".231", ".232", ".233", ".234", ".235", ".236", ".237", ".238", ".239",
- ".240", ".241", ".242", ".243", ".244", ".245", ".246", ".247", ".248", ".249",
- ".250", ".251", ".252", ".253", ".254", ".255"
- };
-
- bool legalClientSystemNS( StringData ns , bool write ) {
- if( ns == "local.system.replset" ) return true;
-
- if ( ns.find( ".system.users" ) != string::npos )
- return true;
-
- if ( ns == "admin.system.roles" ) return true;
- if ( ns == "admin.system.version" ) return true;
- if ( ns == "admin.system.new_users" ) return true;
- if ( ns == "admin.system.backup_users" ) return true;
-
- if ( ns.find( ".system.js" ) != string::npos ) return true;
-
- return false;
- }
+using std::string;
+
+/* A map of characters to escape. Instead of printing certain characters we output
+ * based on the following table.
+ */
+static const std::string escapeTable[256] = {
+ ".00", ".01", ".02", ".03", ".04", ".05", ".06", ".07", ".08", ".09", ".10", ".11",
+ ".12", ".13", ".14", ".15", ".16", ".17", ".18", ".19", ".20", ".21", ".22", ".23",
+ ".24", ".25", ".26", ".27", ".28", ".29", ".30", ".31", ".32", ".33", ".34", ".35",
+ ".36", ".37", ".38", ".39", ".40", ".41", ".42", ".43", ".44", ".45", ".", ".47",
+ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ".58", ".59",
+ ".60", ".61", ".62", ".63", ".64", "A", "B", "C", "D", "E", "F", "G",
+ "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S",
+ "T", "U", "V", "W", "X", "Y", "Z", ".91", ".92", ".93", ".94", "_",
+ ".96", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k",
+ "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w",
+ "x", "y", "z", ".123", ".124", ".125", ".126", ".127", ".128", ".129", ".130", ".131",
+ ".132", ".133", ".134", ".135", ".136", ".137", ".138", ".139", ".140", ".141", ".142", ".143",
+ ".144", ".145", ".146", ".147", ".148", ".149", ".150", ".151", ".152", ".153", ".154", ".155",
+ ".156", ".157", ".158", ".159", ".160", ".161", ".162", ".163", ".164", ".165", ".166", ".167",
+ ".168", ".169", ".170", ".171", ".172", ".173", ".174", ".175", ".176", ".177", ".178", ".179",
+ ".180", ".181", ".182", ".183", ".184", ".185", ".186", ".187", ".188", ".189", ".190", ".191",
+ ".192", ".193", ".194", ".195", ".196", ".197", ".198", ".199", ".200", ".201", ".202", ".203",
+ ".204", ".205", ".206", ".207", ".208", ".209", ".210", ".211", ".212", ".213", ".214", ".215",
+ ".216", ".217", ".218", ".219", ".220", ".221", ".222", ".223", ".224", ".225", ".226", ".227",
+ ".228", ".229", ".230", ".231", ".232", ".233", ".234", ".235", ".236", ".237", ".238", ".239",
+ ".240", ".241", ".242", ".243", ".244", ".245", ".246", ".247", ".248", ".249", ".250", ".251",
+ ".252", ".253", ".254", ".255"};
+
+bool legalClientSystemNS(StringData ns, bool write) {
+ if (ns == "local.system.replset")
+ return true;
+
+ if (ns.find(".system.users") != string::npos)
+ return true;
+
+ if (ns == "admin.system.roles")
+ return true;
+ if (ns == "admin.system.version")
+ return true;
+ if (ns == "admin.system.new_users")
+ return true;
+ if (ns == "admin.system.backup_users")
+ return true;
+
+ if (ns.find(".system.js") != string::npos)
+ return true;
+
+ return false;
+}
- bool NamespaceString::isListCollectionsGetMore() const {
- return coll() == StringData("$cmd.listCollections", StringData::LiteralTag());
- }
+bool NamespaceString::isListCollectionsGetMore() const {
+ return coll() == StringData("$cmd.listCollections", StringData::LiteralTag());
+}
- namespace {
- const StringData listIndexesGetMoreNSPrefix("$cmd.listIndexes.", StringData::LiteralTag());
- } // namespace
+namespace {
+const StringData listIndexesGetMoreNSPrefix("$cmd.listIndexes.", StringData::LiteralTag());
+} // namespace
- bool NamespaceString::isListIndexesGetMore() const {
- return coll().size() > listIndexesGetMoreNSPrefix.size() &&
- coll().startsWith(listIndexesGetMoreNSPrefix);
- }
+bool NamespaceString::isListIndexesGetMore() const {
+ return coll().size() > listIndexesGetMoreNSPrefix.size() &&
+ coll().startsWith(listIndexesGetMoreNSPrefix);
+}
- NamespaceString NamespaceString::getTargetNSForListIndexesGetMore() const {
- dassert(isListIndexesGetMore());
- return NamespaceString(db(), coll().substr(listIndexesGetMoreNSPrefix.size()));
- }
+NamespaceString NamespaceString::getTargetNSForListIndexesGetMore() const {
+ dassert(isListIndexesGetMore());
+ return NamespaceString(db(), coll().substr(listIndexesGetMoreNSPrefix.size()));
+}
- string NamespaceString::escapeDbName( const StringData dbname ) {
- std::string escapedDbName;
+string NamespaceString::escapeDbName(const StringData dbname) {
+ std::string escapedDbName;
- // pre-alloc the return string as it will always be the same as dbname at a minimum.
- escapedDbName.reserve(dbname.size());
+ // pre-alloc the return string as it will always be the same as dbname at a minimum.
+ escapedDbName.reserve(dbname.size());
- for (unsigned char c : dbname) {
- escapedDbName += escapeTable[c];
- }
- return escapedDbName;
+ for (unsigned char c : dbname) {
+ escapedDbName += escapeTable[c];
}
-
+ return escapedDbName;
+}
}
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index 655dbf68a73..cd1067b7b45 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -38,271 +38,314 @@
namespace mongo {
- /* in the mongo source code, "client" means "database". */
-
- const size_t MaxDatabaseNameLen = 128; // max str len for the db name, including null char
-
- /** @return true if a client can modify this namespace even though it is under ".system."
- For example <dbname>.system.users is ok for regular clients to update.
- @param write used when .system.js
- */
- bool legalClientSystemNS( StringData ns , bool write );
-
- /* e.g.
- NamespaceString ns("acme.orders");
- cout << ns.coll; // "orders"
- */
- class NamespaceString {
- public:
- /**
- * Constructs an empty NamespaceString.
- */
- NamespaceString();
-
- /**
- * Constructs a NamespaceString from the fully qualified namespace named in "ns".
- */
- explicit NamespaceString( StringData ns );
-
- /**
- * Constructs a NamespaceString for the given database and collection names.
- * "dbName" must not contain a ".", and "collectionName" must not start with one.
- */
- NamespaceString( StringData dbName, StringData collectionName );
-
- /**
- * Note that these values are derived from the mmap_v1 implementation and that
- * is the only reason they are constrained as such.
- */
- enum MaxNsLenValue {
- // Maximum possible length of name any namespace, including special ones like $extra.
- // This includes rum for the NUL byte so it can be used when sizing buffers.
- MaxNsLenWithNUL = 128,
-
- // MaxNsLenWithNUL excluding the NUL byte. Use this when comparing std::string lengths.
- MaxNsLen = MaxNsLenWithNUL - 1,
-
- // Maximum allowed length of fully qualified namespace name of any real collection.
- // Does not include NUL so it can be directly compared to std::string lengths.
- MaxNsCollectionLen = MaxNsLen - 7/*strlen(".$extra")*/,
- };
-
- StringData db() const;
- StringData coll() const;
-
- const std::string& ns() const { return _ns; }
-
- operator const std::string&() const { return ns(); }
- const std::string& toString() const { return ns(); }
-
- size_t size() const { return _ns.size(); }
-
- //
- // The following methods assume isValid() is true for this NamespaceString.
- //
-
- bool isSystem() const { return coll().startsWith( "system." ); }
- bool isSystemDotIndexes() const { return coll() == "system.indexes"; }
- bool isSystemDotProfile() const { return coll() == "system.profile"; }
- bool isConfigDB() const { return db() == "config"; }
- bool isCommand() const { return coll() == "$cmd"; }
- bool isOplog() const { return oplog( _ns ); }
- bool isSpecialCommand() const { return coll().startsWith("$cmd.sys"); }
- bool isSpecial() const { return special( _ns ); }
- bool isOnInternalDb() const { return internalDb(db()); }
- bool isNormal() const { return normal( _ns ); }
- bool isListCollectionsGetMore() const;
- bool isListIndexesGetMore() const;
-
- /**
- * Given a NamespaceString for which isListIndexesGetMore() returns true, returns the
- * NamespaceString for the collection that the "listIndexesGetMore" targets.
- */
- NamespaceString getTargetNSForListIndexesGetMore() const;
-
- /**
- * @return true if the namespace is valid. Special namespaces for internal use are considered as valid.
- */
- bool isValid() const { return validDBName( db() ) && !coll().empty(); }
-
- bool operator==( const std::string& nsIn ) const { return nsIn == _ns; }
- bool operator==( StringData nsIn ) const { return nsIn == _ns; }
- bool operator==( const NamespaceString& nsIn ) const { return nsIn._ns == _ns; }
-
- bool operator!=( const std::string& nsIn ) const { return nsIn != _ns; }
- bool operator!=( const NamespaceString& nsIn ) const { return nsIn._ns != _ns; }
-
- bool operator<( const NamespaceString& rhs ) const { return _ns < rhs._ns; }
-
- /** ( foo.bar ).getSisterNS( "blah" ) == foo.blah
- */
- std::string getSisterNS( StringData local ) const;
-
- // @return db() + ".system.indexes"
- std::string getSystemIndexesCollection() const;
-
- // @return db() + ".$cmd"
- std::string getCommandNS() const;
-
- /**
- * Function to escape most non-alpha characters from file names
- */
- static std::string escapeDbName( const StringData dbname );
-
- /**
- * @return true if ns is 'normal'. A "$" is used for namespaces holding index data,
- * which do not contain BSON objects in their records. ("oplog.$main" is the exception)
- */
- static bool normal(StringData ns);
-
- /**
- * @return true if the ns is an oplog one, otherwise false.
- */
- static bool oplog(StringData ns);
-
- static bool special(StringData ns);
-
- /**
- * Returns true for DBs with special meaning to mongodb.
- */
- static bool internalDb(StringData ns) {
- if (ns == "admin") return true;
- if (ns == "local") return true;
- if (ns == "config") return true;
- return false;
- }
-
- /**
- * samples:
- * good
- * foo
- * bar
- * foo-bar
- * bad:
- * foo bar
- * foo.bar
- * foo"bar
- *
- * @param db - a possible database name
- * @return if db is an allowed database name
- */
- static bool validDBName( StringData dbin );
-
- /**
- * Takes a fully qualified namespace (ie dbname.collectionName), and returns true if
- * the collection name component of the namespace is valid.
- * samples:
- * good:
- * foo.bar
- * bad:
- * foo.
- *
- * @param ns - a full namespace (a.b)
- * @return if db.coll is an allowed collection name
- */
- static bool validCollectionComponent(StringData ns);
-
- /**
- * Takes a collection name and returns true if it is a valid collection name.
- * samples:
- * good:
- * foo
- * system.indexes
- * bad:
- * $foo
- * @param coll - a collection name component of a namespace
- * @return if the input is a valid collection name
- */
- static bool validCollectionName(StringData coll);
-
- private:
-
- std::string _ns;
- size_t _dotIndex;
+/* in the mongo source code, "client" means "database". */
+
+const size_t MaxDatabaseNameLen = 128; // max str len for the db name, including null char
+
+/** @return true if a client can modify this namespace even though it is under ".system."
+ For example <dbname>.system.users is ok for regular clients to update.
+ @param write used when .system.js
+*/
+bool legalClientSystemNS(StringData ns, bool write);
+
+/* e.g.
+ NamespaceString ns("acme.orders");
+ cout << ns.coll; // "orders"
+*/
+class NamespaceString {
+public:
+ /**
+ * Constructs an empty NamespaceString.
+ */
+ NamespaceString();
+
+ /**
+ * Constructs a NamespaceString from the fully qualified namespace named in "ns".
+ */
+ explicit NamespaceString(StringData ns);
+
+ /**
+ * Constructs a NamespaceString for the given database and collection names.
+ * "dbName" must not contain a ".", and "collectionName" must not start with one.
+ */
+ NamespaceString(StringData dbName, StringData collectionName);
+
+ /**
+ * Note that these values are derived from the mmap_v1 implementation and that
+ * is the only reason they are constrained as such.
+ */
+ enum MaxNsLenValue {
+ // Maximum possible length of name any namespace, including special ones like $extra.
+ // This includes rum for the NUL byte so it can be used when sizing buffers.
+ MaxNsLenWithNUL = 128,
+
+ // MaxNsLenWithNUL excluding the NUL byte. Use this when comparing std::string lengths.
+ MaxNsLen = MaxNsLenWithNUL - 1,
+
+ // Maximum allowed length of fully qualified namespace name of any real collection.
+ // Does not include NUL so it can be directly compared to std::string lengths.
+ MaxNsCollectionLen = MaxNsLen - 7 /*strlen(".$extra")*/,
};
+ StringData db() const;
+ StringData coll() const;
- // "database.a.b.c" -> "database"
- inline StringData nsToDatabaseSubstring( StringData ns ) {
- size_t i = ns.find( '.' );
- if ( i == std::string::npos ) {
- massert(10078, "nsToDatabase: db too long", ns.size() < MaxDatabaseNameLen );
- return ns;
- }
- massert(10088, "nsToDatabase: db too long", i < static_cast<size_t>(MaxDatabaseNameLen));
- return ns.substr( 0, i );
+ const std::string& ns() const {
+ return _ns;
}
- // "database.a.b.c" -> "database"
- inline void nsToDatabase(StringData ns, char *database) {
- StringData db = nsToDatabaseSubstring( ns );
- db.copyTo( database, true );
+ operator const std::string&() const {
+ return ns();
+ }
+ const std::string& toString() const {
+ return ns();
}
- // TODO: make this return a StringData
- inline std::string nsToDatabase(StringData ns) {
- return nsToDatabaseSubstring( ns ).toString();
+ size_t size() const {
+ return _ns.size();
}
- // "database.a.b.c" -> "a.b.c"
- inline StringData nsToCollectionSubstring( StringData ns ) {
- size_t i = ns.find( '.' );
- massert(16886, "nsToCollectionSubstring: no .", i != std::string::npos );
- return ns.substr( i + 1 );
+ //
+ // The following methods assume isValid() is true for this NamespaceString.
+ //
+
+ bool isSystem() const {
+ return coll().startsWith("system.");
+ }
+ bool isSystemDotIndexes() const {
+ return coll() == "system.indexes";
+ }
+ bool isSystemDotProfile() const {
+ return coll() == "system.profile";
}
+ bool isConfigDB() const {
+ return db() == "config";
+ }
+ bool isCommand() const {
+ return coll() == "$cmd";
+ }
+ bool isOplog() const {
+ return oplog(_ns);
+ }
+ bool isSpecialCommand() const {
+ return coll().startsWith("$cmd.sys");
+ }
+ bool isSpecial() const {
+ return special(_ns);
+ }
+ bool isOnInternalDb() const {
+ return internalDb(db());
+ }
+ bool isNormal() const {
+ return normal(_ns);
+ }
+ bool isListCollectionsGetMore() const;
+ bool isListIndexesGetMore() const;
/**
- * foo = false
- * foo. = false
- * foo.a = true
+ * Given a NamespaceString for which isListIndexesGetMore() returns true, returns the
+ * NamespaceString for the collection that the "listIndexesGetMore" targets.
*/
- inline bool nsIsFull( StringData ns ) {
- size_t i = ns.find( '.' );
- if ( i == std::string::npos )
- return false;
- if ( i == ns.size() - 1 )
- return false;
- return true;
+ NamespaceString getTargetNSForListIndexesGetMore() const;
+
+ /**
+ * @return true if the namespace is valid. Special namespaces for internal use are considered as valid.
+ */
+ bool isValid() const {
+ return validDBName(db()) && !coll().empty();
}
+ bool operator==(const std::string& nsIn) const {
+ return nsIn == _ns;
+ }
+ bool operator==(StringData nsIn) const {
+ return nsIn == _ns;
+ }
+ bool operator==(const NamespaceString& nsIn) const {
+ return nsIn._ns == _ns;
+ }
+
+ bool operator!=(const std::string& nsIn) const {
+ return nsIn != _ns;
+ }
+ bool operator!=(const NamespaceString& nsIn) const {
+ return nsIn._ns != _ns;
+ }
+
+ bool operator<(const NamespaceString& rhs) const {
+ return _ns < rhs._ns;
+ }
+
+ /** ( foo.bar ).getSisterNS( "blah" ) == foo.blah
+ */
+ std::string getSisterNS(StringData local) const;
+
+ // @return db() + ".system.indexes"
+ std::string getSystemIndexesCollection() const;
+
+ // @return db() + ".$cmd"
+ std::string getCommandNS() const;
+
+ /**
+ * Function to escape most non-alpha characters from file names
+ */
+ static std::string escapeDbName(const StringData dbname);
+
+ /**
+ * @return true if ns is 'normal'. A "$" is used for namespaces holding index data,
+ * which do not contain BSON objects in their records. ("oplog.$main" is the exception)
+ */
+ static bool normal(StringData ns);
+
+ /**
+ * @return true if the ns is an oplog one, otherwise false.
+ */
+ static bool oplog(StringData ns);
+
+ static bool special(StringData ns);
+
/**
- * foo = true
- * foo. = false
- * foo.a = false
+ * Returns true for DBs with special meaning to mongodb.
*/
- inline bool nsIsDbOnly(StringData ns) {
- size_t i = ns.find('.');
- if (i == std::string::npos)
+ static bool internalDb(StringData ns) {
+ if (ns == "admin")
+ return true;
+ if (ns == "local")
+ return true;
+ if (ns == "config")
return true;
return false;
}
/**
- * NamespaceDBHash and NamespaceDBEquals allow you to do something like
- * unordered_map<std::string,int,NamespaceDBHash,NamespaceDBEquals>
- * and use the full namespace for the string
- * but comparisons are done only on the db piece
+ * samples:
+ * good
+ * foo
+ * bar
+ * foo-bar
+ * bad:
+ * foo bar
+ * foo.bar
+ * foo"bar
+ *
+ * @param db - a possible database name
+ * @return if db is an allowed database name
*/
+ static bool validDBName(StringData dbin);
/**
- * this can change, do not store on disk
+ * Takes a fully qualified namespace (ie dbname.collectionName), and returns true if
+ * the collection name component of the namespace is valid.
+ * samples:
+ * good:
+ * foo.bar
+ * bad:
+ * foo.
+ *
+ * @param ns - a full namespace (a.b)
+ * @return if db.coll is an allowed collection name
*/
- int nsDBHash( const std::string& ns );
+ static bool validCollectionComponent(StringData ns);
- bool nsDBEquals( const std::string& a, const std::string& b );
+ /**
+ * Takes a collection name and returns true if it is a valid collection name.
+ * samples:
+ * good:
+ * foo
+ * system.indexes
+ * bad:
+ * $foo
+ * @param coll - a collection name component of a namespace
+ * @return if the input is a valid collection name
+ */
+ static bool validCollectionName(StringData coll);
- struct NamespaceDBHash {
- int operator()( const std::string& ns ) const {
- return nsDBHash( ns );
- }
- };
+private:
+ std::string _ns;
+ size_t _dotIndex;
+};
- struct NamespaceDBEquals {
- bool operator()( const std::string& a, const std::string& b ) const {
- return nsDBEquals( a, b );
- }
- };
+// "database.a.b.c" -> "database"
+inline StringData nsToDatabaseSubstring(StringData ns) {
+ size_t i = ns.find('.');
+ if (i == std::string::npos) {
+ massert(10078, "nsToDatabase: db too long", ns.size() < MaxDatabaseNameLen);
+ return ns;
+ }
+ massert(10088, "nsToDatabase: db too long", i < static_cast<size_t>(MaxDatabaseNameLen));
+ return ns.substr(0, i);
+}
+
+// "database.a.b.c" -> "database"
+inline void nsToDatabase(StringData ns, char* database) {
+ StringData db = nsToDatabaseSubstring(ns);
+ db.copyTo(database, true);
+}
+
+// TODO: make this return a StringData
+inline std::string nsToDatabase(StringData ns) {
+ return nsToDatabaseSubstring(ns).toString();
+}
+
+// "database.a.b.c" -> "a.b.c"
+inline StringData nsToCollectionSubstring(StringData ns) {
+ size_t i = ns.find('.');
+ massert(16886, "nsToCollectionSubstring: no .", i != std::string::npos);
+ return ns.substr(i + 1);
+}
+
+/**
+ * foo = false
+ * foo. = false
+ * foo.a = true
+ */
+inline bool nsIsFull(StringData ns) {
+ size_t i = ns.find('.');
+ if (i == std::string::npos)
+ return false;
+ if (i == ns.size() - 1)
+ return false;
+ return true;
+}
+
+/**
+ * foo = true
+ * foo. = false
+ * foo.a = false
+ */
+inline bool nsIsDbOnly(StringData ns) {
+ size_t i = ns.find('.');
+ if (i == std::string::npos)
+ return true;
+ return false;
+}
+
+/**
+ * NamespaceDBHash and NamespaceDBEquals allow you to do something like
+ * unordered_map<std::string,int,NamespaceDBHash,NamespaceDBEquals>
+ * and use the full namespace for the string
+ * but comparisons are done only on the db piece
+ */
+
+/**
+ * this can change, do not store on disk
+ */
+int nsDBHash(const std::string& ns);
+
+bool nsDBEquals(const std::string& a, const std::string& b);
+
+struct NamespaceDBHash {
+ int operator()(const std::string& ns) const {
+ return nsDBHash(ns);
+ }
+};
+
+struct NamespaceDBEquals {
+ bool operator()(const std::string& a, const std::string& b) const {
+ return nsDBEquals(a, b);
+ }
+};
}
diff --git a/src/mongo/db/namespace_string_test.cpp b/src/mongo/db/namespace_string_test.cpp
index 64ee9d230ae..d1b3c7b4420 100644
--- a/src/mongo/db/namespace_string_test.cpp
+++ b/src/mongo/db/namespace_string_test.cpp
@@ -33,206 +33,205 @@
namespace mongo {
- using std::string;
-
- TEST( NamespaceStringTest, Normal ) {
- ASSERT( NamespaceString::normal( "a" ) );
- ASSERT( NamespaceString::normal( "a.b" ) );
- ASSERT( NamespaceString::normal( "a.b.c" ) );
-
- ASSERT( !NamespaceString::normal( "a.b.$c" ) );
- ASSERT( !NamespaceString::normal( "a.b.$.c" ) );
-
- ASSERT( NamespaceString::normal( "local.oplog.$main" ) );
- ASSERT( NamespaceString::normal( "local.oplog.rs" ) );
- }
-
- TEST( NamespaceStringTest, Oplog ) {
- ASSERT( !NamespaceString::oplog( "a" ) );
- ASSERT( !NamespaceString::oplog( "a.b" ) );
-
- ASSERT( NamespaceString::oplog( "local.oplog.rs" ) );
- ASSERT( NamespaceString::oplog( "local.oplog.foo" ) );
- ASSERT( NamespaceString::oplog( "local.oplog.$main" ) );
- ASSERT( NamespaceString::oplog( "local.oplog.$foo" ) );
- }
-
- TEST( NamespaceStringTest, Special ) {
- ASSERT( NamespaceString::special( "a.$.b" ) );
- ASSERT( NamespaceString::special( "a.system.foo" ) );
- ASSERT( !NamespaceString::special( "a.foo" ) );
- ASSERT( !NamespaceString::special( "a.foo.system.bar" ) );
- ASSERT( !NamespaceString::special( "a.systemfoo" ) );
- }
-
- TEST( NamespaceStringTest, DatabaseValidNames ) {
- ASSERT( NamespaceString::validDBName( "foo" ) );
- ASSERT( !NamespaceString::validDBName( "foo/bar" ) );
- ASSERT( !NamespaceString::validDBName( "foo bar" ) );
- ASSERT( !NamespaceString::validDBName( "foo.bar" ) );
- ASSERT( !NamespaceString::validDBName( "foo.bar" ) );
- ASSERT( !NamespaceString::validDBName( "foo\\bar" ) );
- ASSERT( !NamespaceString::validDBName( "foo\"bar" ) );
- ASSERT( !NamespaceString::validDBName( StringData( "a\0b", StringData::LiteralTag() ) ) );
+using std::string;
+
+TEST(NamespaceStringTest, Normal) {
+ ASSERT(NamespaceString::normal("a"));
+ ASSERT(NamespaceString::normal("a.b"));
+ ASSERT(NamespaceString::normal("a.b.c"));
+
+ ASSERT(!NamespaceString::normal("a.b.$c"));
+ ASSERT(!NamespaceString::normal("a.b.$.c"));
+
+ ASSERT(NamespaceString::normal("local.oplog.$main"));
+ ASSERT(NamespaceString::normal("local.oplog.rs"));
+}
+
+TEST(NamespaceStringTest, Oplog) {
+ ASSERT(!NamespaceString::oplog("a"));
+ ASSERT(!NamespaceString::oplog("a.b"));
+
+ ASSERT(NamespaceString::oplog("local.oplog.rs"));
+ ASSERT(NamespaceString::oplog("local.oplog.foo"));
+ ASSERT(NamespaceString::oplog("local.oplog.$main"));
+ ASSERT(NamespaceString::oplog("local.oplog.$foo"));
+}
+
+TEST(NamespaceStringTest, Special) {
+ ASSERT(NamespaceString::special("a.$.b"));
+ ASSERT(NamespaceString::special("a.system.foo"));
+ ASSERT(!NamespaceString::special("a.foo"));
+ ASSERT(!NamespaceString::special("a.foo.system.bar"));
+ ASSERT(!NamespaceString::special("a.systemfoo"));
+}
+
+TEST(NamespaceStringTest, DatabaseValidNames) {
+ ASSERT(NamespaceString::validDBName("foo"));
+ ASSERT(!NamespaceString::validDBName("foo/bar"));
+ ASSERT(!NamespaceString::validDBName("foo bar"));
+ ASSERT(!NamespaceString::validDBName("foo.bar"));
+ ASSERT(!NamespaceString::validDBName("foo.bar"));
+ ASSERT(!NamespaceString::validDBName("foo\\bar"));
+ ASSERT(!NamespaceString::validDBName("foo\"bar"));
+ ASSERT(!NamespaceString::validDBName(StringData("a\0b", StringData::LiteralTag())));
#ifdef _WIN32
- ASSERT( !NamespaceString::validDBName( "foo*bar" ) );
- ASSERT( !NamespaceString::validDBName( "foo<bar" ) );
- ASSERT( !NamespaceString::validDBName( "foo>bar" ) );
- ASSERT( !NamespaceString::validDBName( "foo:bar" ) );
- ASSERT( !NamespaceString::validDBName( "foo|bar" ) );
- ASSERT( !NamespaceString::validDBName( "foo?bar" ) );
+ ASSERT(!NamespaceString::validDBName("foo*bar"));
+ ASSERT(!NamespaceString::validDBName("foo<bar"));
+ ASSERT(!NamespaceString::validDBName("foo>bar"));
+ ASSERT(!NamespaceString::validDBName("foo:bar"));
+ ASSERT(!NamespaceString::validDBName("foo|bar"));
+ ASSERT(!NamespaceString::validDBName("foo?bar"));
#endif
- ASSERT( NamespaceString::normal( "asdads" ) );
- ASSERT( !NamespaceString::normal( "asda$ds" ) );
- ASSERT( NamespaceString::normal( "local.oplog.$main" ) );
- }
-
- TEST(NamespaceStringTest, ListCollectionsGetMore) {
- ASSERT(NamespaceString("test.$cmd.listCollections").isListCollectionsGetMore());
-
- ASSERT(!NamespaceString("test.foo").isListCollectionsGetMore());
- ASSERT(!NamespaceString("test.foo.$cmd.listCollections").isListCollectionsGetMore());
- ASSERT(!NamespaceString("test.$cmd.").isListCollectionsGetMore());
- ASSERT(!NamespaceString("test.$cmd.foo.").isListCollectionsGetMore());
- ASSERT(!NamespaceString("test.$cmd.listCollections.").isListCollectionsGetMore());
- ASSERT(!NamespaceString("test.$cmd.listIndexes").isListCollectionsGetMore());
- ASSERT(!NamespaceString("test.$cmd.listIndexes.foo").isListCollectionsGetMore());
- }
-
- TEST(NamespaceStringTest, ListIndexesGetMore) {
- NamespaceString ns1("test.$cmd.listIndexes.f");
- ASSERT(ns1.isListIndexesGetMore());
- ASSERT("test.f" == ns1.getTargetNSForListIndexesGetMore().ns());
-
- NamespaceString ns2("test.$cmd.listIndexes.foo");
- ASSERT(ns2.isListIndexesGetMore());
- ASSERT("test.foo" == ns2.getTargetNSForListIndexesGetMore().ns());
-
- NamespaceString ns3("test.$cmd.listIndexes.foo.bar");
- ASSERT(ns3.isListIndexesGetMore());
- ASSERT("test.foo.bar" == ns3.getTargetNSForListIndexesGetMore().ns());
-
- ASSERT(!NamespaceString("test.foo").isListIndexesGetMore());
- ASSERT(!NamespaceString("test.foo.$cmd.listIndexes").isListIndexesGetMore());
- ASSERT(!NamespaceString("test.$cmd.").isListIndexesGetMore());
- ASSERT(!NamespaceString("test.$cmd.foo.").isListIndexesGetMore());
- ASSERT(!NamespaceString("test.$cmd.listIndexes").isListIndexesGetMore());
- ASSERT(!NamespaceString("test.$cmd.listIndexes.").isListIndexesGetMore());
- ASSERT(!NamespaceString("test.$cmd.listCollections").isListIndexesGetMore());
- ASSERT(!NamespaceString("test.$cmd.listCollections.foo").isListIndexesGetMore());
- }
-
- TEST( NamespaceStringTest, CollectionComponentValidNames ) {
- ASSERT( NamespaceString::validCollectionComponent( "a.b" ) );
- ASSERT( NamespaceString::validCollectionComponent( "a.b" ) );
- ASSERT( !NamespaceString::validCollectionComponent( "a." ) );
- ASSERT( !NamespaceString::validCollectionComponent( "a..foo" ) );
- ASSERT( NamespaceString::validCollectionComponent( "a.b." ) ); // TODO: should this change?
- }
-
- TEST( NamespaceStringTest, CollectionValidNames ) {
- ASSERT( NamespaceString::validCollectionName( "a" ) );
- ASSERT( NamespaceString::validCollectionName( "a.b" ) );
- ASSERT( NamespaceString::validCollectionName( "a." ) ); // TODO: should this change?
- ASSERT( NamespaceString::validCollectionName( "a.b." ) ); // TODO: should this change?
- ASSERT( !NamespaceString::validCollectionName( ".a" ) );
- ASSERT( !NamespaceString::validCollectionName( "$a" ) );
- ASSERT( !NamespaceString::validCollectionName( "a$b" ) );
- ASSERT( !NamespaceString::validCollectionName( "" ) );
- ASSERT( !NamespaceString::validCollectionName(
- StringData( "a\0b", StringData::LiteralTag() ) ) );
- }
-
- TEST( NamespaceStringTest, DBHash ) {
- ASSERT_EQUALS( nsDBHash( "foo" ), nsDBHash( "foo" ) );
- ASSERT_EQUALS( nsDBHash( "foo" ), nsDBHash( "foo.a" ) );
- ASSERT_EQUALS( nsDBHash( "foo" ), nsDBHash( "foo." ) );
-
- ASSERT_EQUALS( nsDBHash( "" ), nsDBHash( "" ) );
- ASSERT_EQUALS( nsDBHash( "" ), nsDBHash( ".a" ) );
- ASSERT_EQUALS( nsDBHash( "" ), nsDBHash( "." ) );
-
- ASSERT_NOT_EQUALS( nsDBHash( "foo" ), nsDBHash( "food" ) );
- ASSERT_NOT_EQUALS( nsDBHash( "foo." ), nsDBHash( "food" ) );
- ASSERT_NOT_EQUALS( nsDBHash( "foo.d" ), nsDBHash( "food" ) );
- }
-
-#define testEqualsBothWays(X,Y) ASSERT_TRUE( nsDBEquals( (X), (Y) ) ); ASSERT_TRUE( nsDBEquals( (Y), (X) ) );
-#define testNotEqualsBothWays(X,Y) ASSERT_FALSE( nsDBEquals( (X), (Y) ) ); ASSERT_FALSE( nsDBEquals( (Y), (X) ) );
-
- TEST( NamespaceStringTest, DBEquals ) {
- testEqualsBothWays( "foo" , "foo" );
- testEqualsBothWays( "foo" , "foo.a" );
- testEqualsBothWays( "foo.a" , "foo.a" );
- testEqualsBothWays( "foo.a" , "foo.b" );
-
- testEqualsBothWays( "" , "" );
- testEqualsBothWays( "" , "." );
- testEqualsBothWays( "" , ".x" );
-
- testNotEqualsBothWays( "foo" , "bar" );
- testNotEqualsBothWays( "foo" , "food" );
- testNotEqualsBothWays( "foo." , "food" );
-
- testNotEqualsBothWays( "" , "x" );
- testNotEqualsBothWays( "" , "x." );
- testNotEqualsBothWays( "" , "x.y" );
- testNotEqualsBothWays( "." , "x" );
- testNotEqualsBothWays( "." , "x." );
- testNotEqualsBothWays( "." , "x.y" );
- }
-
- TEST( NamespaceStringTest, nsToDatabase1 ) {
- ASSERT_EQUALS( "foo", nsToDatabaseSubstring( "foo.bar" ) );
- ASSERT_EQUALS( "foo", nsToDatabaseSubstring( "foo" ) );
- ASSERT_EQUALS( "foo", nsToDatabase( "foo.bar" ) );
- ASSERT_EQUALS( "foo", nsToDatabase( "foo" ) );
- ASSERT_EQUALS( "foo", nsToDatabase( string("foo.bar") ) );
- ASSERT_EQUALS( "foo", nsToDatabase( string("foo") ) );
- }
-
- TEST( NamespaceStringTest, nsToDatabase2 ) {
- char buf[MaxDatabaseNameLen];
-
- nsToDatabase( "foo.bar", buf );
- ASSERT_EQUALS( 'f', buf[0] );
- ASSERT_EQUALS( 'o', buf[1] );
- ASSERT_EQUALS( 'o', buf[2] );
- ASSERT_EQUALS( 0, buf[3] );
-
- nsToDatabase( "bar", buf );
- ASSERT_EQUALS( 'b', buf[0] );
- ASSERT_EQUALS( 'a', buf[1] );
- ASSERT_EQUALS( 'r', buf[2] );
- ASSERT_EQUALS( 0, buf[3] );
-
-
- }
-
- TEST( NamespaceStringTest, NamespaceStringParse1 ) {
- NamespaceString ns( "a.b" );
- ASSERT_EQUALS( (string)"a", ns.db() );
- ASSERT_EQUALS( (string)"b", ns.coll() );
- }
-
- TEST( NamespaceStringTest, NamespaceStringParse2 ) {
- NamespaceString ns( "a.b.c" );
- ASSERT_EQUALS( (string)"a", ns.db() );
- ASSERT_EQUALS( (string)"b.c", ns.coll() );
- }
-
- TEST( NamespaceStringTest, NamespaceStringParse3 ) {
- NamespaceString ns( "abc" );
- ASSERT_EQUALS( (string)"", ns.db() );
- ASSERT_EQUALS( (string)"", ns.coll() );
- }
-
- TEST( NamespaceStringTest, NamespaceStringParse4 ) {
- NamespaceString ns( "abc." );
- ASSERT_EQUALS( (string)"abc", ns.db() );
- ASSERT_EQUALS( (string)"", ns.coll() );
- }
+ ASSERT(NamespaceString::normal("asdads"));
+ ASSERT(!NamespaceString::normal("asda$ds"));
+ ASSERT(NamespaceString::normal("local.oplog.$main"));
+}
+
+TEST(NamespaceStringTest, ListCollectionsGetMore) {
+ ASSERT(NamespaceString("test.$cmd.listCollections").isListCollectionsGetMore());
+
+ ASSERT(!NamespaceString("test.foo").isListCollectionsGetMore());
+ ASSERT(!NamespaceString("test.foo.$cmd.listCollections").isListCollectionsGetMore());
+ ASSERT(!NamespaceString("test.$cmd.").isListCollectionsGetMore());
+ ASSERT(!NamespaceString("test.$cmd.foo.").isListCollectionsGetMore());
+ ASSERT(!NamespaceString("test.$cmd.listCollections.").isListCollectionsGetMore());
+ ASSERT(!NamespaceString("test.$cmd.listIndexes").isListCollectionsGetMore());
+ ASSERT(!NamespaceString("test.$cmd.listIndexes.foo").isListCollectionsGetMore());
+}
+
+TEST(NamespaceStringTest, ListIndexesGetMore) {
+ NamespaceString ns1("test.$cmd.listIndexes.f");
+ ASSERT(ns1.isListIndexesGetMore());
+ ASSERT("test.f" == ns1.getTargetNSForListIndexesGetMore().ns());
+
+ NamespaceString ns2("test.$cmd.listIndexes.foo");
+ ASSERT(ns2.isListIndexesGetMore());
+ ASSERT("test.foo" == ns2.getTargetNSForListIndexesGetMore().ns());
+
+ NamespaceString ns3("test.$cmd.listIndexes.foo.bar");
+ ASSERT(ns3.isListIndexesGetMore());
+ ASSERT("test.foo.bar" == ns3.getTargetNSForListIndexesGetMore().ns());
+
+ ASSERT(!NamespaceString("test.foo").isListIndexesGetMore());
+ ASSERT(!NamespaceString("test.foo.$cmd.listIndexes").isListIndexesGetMore());
+ ASSERT(!NamespaceString("test.$cmd.").isListIndexesGetMore());
+ ASSERT(!NamespaceString("test.$cmd.foo.").isListIndexesGetMore());
+ ASSERT(!NamespaceString("test.$cmd.listIndexes").isListIndexesGetMore());
+ ASSERT(!NamespaceString("test.$cmd.listIndexes.").isListIndexesGetMore());
+ ASSERT(!NamespaceString("test.$cmd.listCollections").isListIndexesGetMore());
+ ASSERT(!NamespaceString("test.$cmd.listCollections.foo").isListIndexesGetMore());
+}
+
+TEST(NamespaceStringTest, CollectionComponentValidNames) {
+ ASSERT(NamespaceString::validCollectionComponent("a.b"));
+ ASSERT(NamespaceString::validCollectionComponent("a.b"));
+ ASSERT(!NamespaceString::validCollectionComponent("a."));
+ ASSERT(!NamespaceString::validCollectionComponent("a..foo"));
+ ASSERT(NamespaceString::validCollectionComponent("a.b.")); // TODO: should this change?
+}
+
+TEST(NamespaceStringTest, CollectionValidNames) {
+ ASSERT(NamespaceString::validCollectionName("a"));
+ ASSERT(NamespaceString::validCollectionName("a.b"));
+ ASSERT(NamespaceString::validCollectionName("a.")); // TODO: should this change?
+ ASSERT(NamespaceString::validCollectionName("a.b.")); // TODO: should this change?
+ ASSERT(!NamespaceString::validCollectionName(".a"));
+ ASSERT(!NamespaceString::validCollectionName("$a"));
+ ASSERT(!NamespaceString::validCollectionName("a$b"));
+ ASSERT(!NamespaceString::validCollectionName(""));
+ ASSERT(!NamespaceString::validCollectionName(StringData("a\0b", StringData::LiteralTag())));
+}
+
+TEST(NamespaceStringTest, DBHash) {
+ ASSERT_EQUALS(nsDBHash("foo"), nsDBHash("foo"));
+ ASSERT_EQUALS(nsDBHash("foo"), nsDBHash("foo.a"));
+ ASSERT_EQUALS(nsDBHash("foo"), nsDBHash("foo."));
+
+ ASSERT_EQUALS(nsDBHash(""), nsDBHash(""));
+ ASSERT_EQUALS(nsDBHash(""), nsDBHash(".a"));
+ ASSERT_EQUALS(nsDBHash(""), nsDBHash("."));
+
+ ASSERT_NOT_EQUALS(nsDBHash("foo"), nsDBHash("food"));
+ ASSERT_NOT_EQUALS(nsDBHash("foo."), nsDBHash("food"));
+ ASSERT_NOT_EQUALS(nsDBHash("foo.d"), nsDBHash("food"));
+}
+#define testEqualsBothWays(X, Y) \
+ ASSERT_TRUE(nsDBEquals((X), (Y))); \
+ ASSERT_TRUE(nsDBEquals((Y), (X)));
+#define testNotEqualsBothWays(X, Y) \
+ ASSERT_FALSE(nsDBEquals((X), (Y))); \
+ ASSERT_FALSE(nsDBEquals((Y), (X)));
+
+TEST(NamespaceStringTest, DBEquals) {
+ testEqualsBothWays("foo", "foo");
+ testEqualsBothWays("foo", "foo.a");
+ testEqualsBothWays("foo.a", "foo.a");
+ testEqualsBothWays("foo.a", "foo.b");
+
+ testEqualsBothWays("", "");
+ testEqualsBothWays("", ".");
+ testEqualsBothWays("", ".x");
+
+ testNotEqualsBothWays("foo", "bar");
+ testNotEqualsBothWays("foo", "food");
+ testNotEqualsBothWays("foo.", "food");
+
+ testNotEqualsBothWays("", "x");
+ testNotEqualsBothWays("", "x.");
+ testNotEqualsBothWays("", "x.y");
+ testNotEqualsBothWays(".", "x");
+ testNotEqualsBothWays(".", "x.");
+ testNotEqualsBothWays(".", "x.y");
}
+TEST(NamespaceStringTest, nsToDatabase1) {
+ ASSERT_EQUALS("foo", nsToDatabaseSubstring("foo.bar"));
+ ASSERT_EQUALS("foo", nsToDatabaseSubstring("foo"));
+ ASSERT_EQUALS("foo", nsToDatabase("foo.bar"));
+ ASSERT_EQUALS("foo", nsToDatabase("foo"));
+ ASSERT_EQUALS("foo", nsToDatabase(string("foo.bar")));
+ ASSERT_EQUALS("foo", nsToDatabase(string("foo")));
+}
+
+TEST(NamespaceStringTest, nsToDatabase2) {
+ char buf[MaxDatabaseNameLen];
+
+ nsToDatabase("foo.bar", buf);
+ ASSERT_EQUALS('f', buf[0]);
+ ASSERT_EQUALS('o', buf[1]);
+ ASSERT_EQUALS('o', buf[2]);
+ ASSERT_EQUALS(0, buf[3]);
+
+ nsToDatabase("bar", buf);
+ ASSERT_EQUALS('b', buf[0]);
+ ASSERT_EQUALS('a', buf[1]);
+ ASSERT_EQUALS('r', buf[2]);
+ ASSERT_EQUALS(0, buf[3]);
+}
+
+TEST(NamespaceStringTest, NamespaceStringParse1) {
+ NamespaceString ns("a.b");
+ ASSERT_EQUALS((string) "a", ns.db());
+ ASSERT_EQUALS((string) "b", ns.coll());
+}
+
+TEST(NamespaceStringTest, NamespaceStringParse2) {
+ NamespaceString ns("a.b.c");
+ ASSERT_EQUALS((string) "a", ns.db());
+ ASSERT_EQUALS((string) "b.c", ns.coll());
+}
+
+TEST(NamespaceStringTest, NamespaceStringParse3) {
+ NamespaceString ns("abc");
+ ASSERT_EQUALS((string) "", ns.db());
+ ASSERT_EQUALS((string) "", ns.coll());
+}
+
+TEST(NamespaceStringTest, NamespaceStringParse4) {
+ NamespaceString ns("abc.");
+ ASSERT_EQUALS((string) "abc", ns.db());
+ ASSERT_EQUALS((string) "", ns.coll());
+}
+}
diff --git a/src/mongo/db/op_observer.cpp b/src/mongo/db/op_observer.cpp
index 978f0677dfa..f152de49dad 100644
--- a/src/mongo/db/op_observer.cpp
+++ b/src/mongo/db/op_observer.cpp
@@ -43,183 +43,174 @@
namespace mongo {
- void OpObserver::onCreateIndex(OperationContext* txn,
- const std::string& ns,
- BSONObj indexDoc,
- bool fromMigrate) {
- repl::_logOp(txn, "i", ns.c_str(), indexDoc, nullptr, fromMigrate);
-
- getGlobalAuthorizationManager()->logOp(txn, "i", ns.c_str(), indexDoc, nullptr);
- logOpForSharding(txn, "i", ns.c_str(), indexDoc, nullptr, fromMigrate);
- logOpForDbHash(txn, ns.c_str());
+void OpObserver::onCreateIndex(OperationContext* txn,
+ const std::string& ns,
+ BSONObj indexDoc,
+ bool fromMigrate) {
+ repl::_logOp(txn, "i", ns.c_str(), indexDoc, nullptr, fromMigrate);
+
+ getGlobalAuthorizationManager()->logOp(txn, "i", ns.c_str(), indexDoc, nullptr);
+ logOpForSharding(txn, "i", ns.c_str(), indexDoc, nullptr, fromMigrate);
+ logOpForDbHash(txn, ns.c_str());
+}
+
+void OpObserver::onInsert(OperationContext* txn,
+ const NamespaceString& ns,
+ BSONObj doc,
+ bool fromMigrate) {
+ repl::_logOp(txn, "i", ns.ns().c_str(), doc, nullptr, fromMigrate);
+
+ getGlobalAuthorizationManager()->logOp(txn, "i", ns.ns().c_str(), doc, nullptr);
+ logOpForSharding(txn, "i", ns.ns().c_str(), doc, nullptr, fromMigrate);
+ logOpForDbHash(txn, ns.ns().c_str());
+ if (strstr(ns.ns().c_str(), ".system.js")) {
+ Scope::storedFuncMod(txn);
}
+}
- void OpObserver::onInsert(OperationContext* txn,
- const NamespaceString& ns,
- BSONObj doc,
- bool fromMigrate) {
- repl::_logOp(txn, "i", ns.ns().c_str(), doc, nullptr, fromMigrate);
-
- getGlobalAuthorizationManager()->logOp(txn, "i", ns.ns().c_str(), doc, nullptr);
- logOpForSharding(txn, "i", ns.ns().c_str(), doc, nullptr, fromMigrate);
- logOpForDbHash(txn, ns.ns().c_str());
- if (strstr(ns.ns().c_str(), ".system.js")) {
- Scope::storedFuncMod(txn);
- }
- }
-
- void OpObserver::onUpdate(OperationContext* txn,
- oplogUpdateEntryArgs args) {
- repl::_logOp(txn, "u", args.ns.c_str(), args.update, &args.criteria, args.fromMigrate);
-
- getGlobalAuthorizationManager()->logOp(txn,
- "u",
- args.ns.c_str(),
- args.update,
- &args.criteria);
- logOpForSharding(txn, "u", args.ns.c_str(), args.update, &args.criteria, args.fromMigrate);
- logOpForDbHash(txn, args.ns.c_str());
- if (strstr(args.ns.c_str(), ".system.js")) {
- Scope::storedFuncMod(txn);
- }
- }
+void OpObserver::onUpdate(OperationContext* txn, oplogUpdateEntryArgs args) {
+ repl::_logOp(txn, "u", args.ns.c_str(), args.update, &args.criteria, args.fromMigrate);
- void OpObserver::onDelete(OperationContext* txn,
- const std::string& ns,
- const BSONObj& idDoc,
- bool fromMigrate) {
- repl::_logOp(txn, "d", ns.c_str(), idDoc, nullptr, fromMigrate);
-
- getGlobalAuthorizationManager()->logOp(txn, "d", ns.c_str(), idDoc, nullptr);
- logOpForSharding(txn, "d", ns.c_str(), idDoc, nullptr, fromMigrate);
- logOpForDbHash(txn, ns.c_str());
- if (strstr(ns.c_str(), ".system.js")) {
- Scope::storedFuncMod(txn);
- }
+ getGlobalAuthorizationManager()->logOp(txn, "u", args.ns.c_str(), args.update, &args.criteria);
+ logOpForSharding(txn, "u", args.ns.c_str(), args.update, &args.criteria, args.fromMigrate);
+ logOpForDbHash(txn, args.ns.c_str());
+ if (strstr(args.ns.c_str(), ".system.js")) {
+ Scope::storedFuncMod(txn);
}
-
- void OpObserver::onOpMessage(OperationContext* txn, const BSONObj& msgObj) {
- repl::_logOp(txn, "n", "", msgObj, nullptr, false);
+}
+
+void OpObserver::onDelete(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& idDoc,
+ bool fromMigrate) {
+ repl::_logOp(txn, "d", ns.c_str(), idDoc, nullptr, fromMigrate);
+
+ getGlobalAuthorizationManager()->logOp(txn, "d", ns.c_str(), idDoc, nullptr);
+ logOpForSharding(txn, "d", ns.c_str(), idDoc, nullptr, fromMigrate);
+ logOpForDbHash(txn, ns.c_str());
+ if (strstr(ns.c_str(), ".system.js")) {
+ Scope::storedFuncMod(txn);
}
-
- void OpObserver::onCreateCollection(OperationContext* txn,
- const NamespaceString& collectionName,
- const CollectionOptions& options) {
- std::string dbName = collectionName.db().toString() + ".$cmd";
- BSONObjBuilder b;
- b.append("create", collectionName.coll().toString());
- b.appendElements(options.toBSON());
- BSONObj cmdObj = b.obj();
-
- if (!collectionName.isSystemDotProfile()) {
- // do not replicate system.profile modifications
- repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
- }
-
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
- logOpForDbHash(txn, dbName.c_str());
+}
+
+void OpObserver::onOpMessage(OperationContext* txn, const BSONObj& msgObj) {
+ repl::_logOp(txn, "n", "", msgObj, nullptr, false);
+}
+
+void OpObserver::onCreateCollection(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const CollectionOptions& options) {
+ std::string dbName = collectionName.db().toString() + ".$cmd";
+ BSONObjBuilder b;
+ b.append("create", collectionName.coll().toString());
+ b.appendElements(options.toBSON());
+ BSONObj cmdObj = b.obj();
+
+ if (!collectionName.isSystemDotProfile()) {
+ // do not replicate system.profile modifications
+ repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
}
- void OpObserver::onCollMod(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& collModCmd) {
- BSONElement first = collModCmd.firstElement();
- std::string coll = first.valuestr();
+ getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
+ logOpForDbHash(txn, dbName.c_str());
+}
- if (!NamespaceString(NamespaceString(dbName).db(), coll).isSystemDotProfile()) {
- // do not replicate system.profile modifications
- repl::_logOp(txn, "c", dbName.c_str(), collModCmd, nullptr, false);
- }
+void OpObserver::onCollMod(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& collModCmd) {
+ BSONElement first = collModCmd.firstElement();
+ std::string coll = first.valuestr();
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), collModCmd, nullptr);
- logOpForDbHash(txn, dbName.c_str());
+ if (!NamespaceString(NamespaceString(dbName).db(), coll).isSystemDotProfile()) {
+ // do not replicate system.profile modifications
+ repl::_logOp(txn, "c", dbName.c_str(), collModCmd, nullptr, false);
}
- void OpObserver::onDropDatabase(OperationContext* txn,
- const std::string& dbName) {
- BSONObj cmdObj = BSON("dropDatabase" << 1);
+ getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), collModCmd, nullptr);
+ logOpForDbHash(txn, dbName.c_str());
+}
- repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
-
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
- logOpForDbHash(txn, dbName.c_str());
- }
+void OpObserver::onDropDatabase(OperationContext* txn, const std::string& dbName) {
+ BSONObj cmdObj = BSON("dropDatabase" << 1);
- void OpObserver::onDropCollection(OperationContext* txn,
- const NamespaceString& collectionName) {
- std::string dbName = collectionName.db().toString() + ".$cmd";
- BSONObj cmdObj = BSON("drop" << collectionName.coll().toString());
+ repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
- if (!collectionName.isSystemDotProfile()) {
- // do not replicate system.profile modifications
- repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
- }
-
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
- logOpForDbHash(txn, dbName.c_str());
- }
+ getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
+ logOpForDbHash(txn, dbName.c_str());
+}
- void OpObserver::onDropIndex(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& idxDescriptor) {
- repl::_logOp(txn, "c", dbName.c_str(), idxDescriptor, nullptr, false);
-
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), idxDescriptor, nullptr);
- logOpForDbHash(txn, dbName.c_str());
- }
-
- void OpObserver::onRenameCollection(OperationContext* txn,
- const NamespaceString& fromCollection,
- const NamespaceString& toCollection,
- bool dropTarget,
- bool stayTemp) {
- std::string dbName = fromCollection.db().toString() + ".$cmd";
- BSONObj cmdObj = BSON("renameCollection" << fromCollection <<
- "to" << toCollection <<
- "stayTemp" << stayTemp <<
- "dropTarget" << dropTarget);
+void OpObserver::onDropCollection(OperationContext* txn, const NamespaceString& collectionName) {
+ std::string dbName = collectionName.db().toString() + ".$cmd";
+ BSONObj cmdObj = BSON("drop" << collectionName.coll().toString());
+ if (!collectionName.isSystemDotProfile()) {
+ // do not replicate system.profile modifications
repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
-
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
- logOpForDbHash(txn, dbName.c_str());
}
- void OpObserver::onApplyOps(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& applyOpCmd) {
- repl::_logOp(txn, "c", dbName.c_str(), applyOpCmd, nullptr, false);
-
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), applyOpCmd, nullptr);
- logOpForDbHash(txn, dbName.c_str());
+ getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
+ logOpForDbHash(txn, dbName.c_str());
+}
+
+void OpObserver::onDropIndex(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& idxDescriptor) {
+ repl::_logOp(txn, "c", dbName.c_str(), idxDescriptor, nullptr, false);
+
+ getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), idxDescriptor, nullptr);
+ logOpForDbHash(txn, dbName.c_str());
+}
+
+void OpObserver::onRenameCollection(OperationContext* txn,
+ const NamespaceString& fromCollection,
+ const NamespaceString& toCollection,
+ bool dropTarget,
+ bool stayTemp) {
+ std::string dbName = fromCollection.db().toString() + ".$cmd";
+ BSONObj cmdObj = BSON("renameCollection" << fromCollection << "to" << toCollection << "stayTemp"
+ << stayTemp << "dropTarget" << dropTarget);
+
+ repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
+
+ getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
+ logOpForDbHash(txn, dbName.c_str());
+}
+
+void OpObserver::onApplyOps(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& applyOpCmd) {
+ repl::_logOp(txn, "c", dbName.c_str(), applyOpCmd, nullptr, false);
+
+ getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), applyOpCmd, nullptr);
+ logOpForDbHash(txn, dbName.c_str());
+}
+
+void OpObserver::onConvertToCapped(OperationContext* txn,
+ const NamespaceString& collectionName,
+ double size) {
+ std::string dbName = collectionName.db().toString() + ".$cmd";
+ BSONObj cmdObj = BSON("convertToCapped" << collectionName.coll() << "size" << size);
+
+ if (!collectionName.isSystemDotProfile()) {
+ // do not replicate system.profile modifications
+ repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
}
- void OpObserver::onConvertToCapped(OperationContext* txn,
- const NamespaceString& collectionName,
- double size) {
- std::string dbName = collectionName.db().toString() + ".$cmd";
- BSONObj cmdObj = BSON("convertToCapped" << collectionName.coll() << "size" << size);
+ getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
+ logOpForDbHash(txn, dbName.c_str());
+}
- if (!collectionName.isSystemDotProfile()) {
- // do not replicate system.profile modifications
- repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
- }
+void OpObserver::onEmptyCapped(OperationContext* txn, const NamespaceString& collectionName) {
+ std::string dbName = collectionName.db().toString() + ".$cmd";
+ BSONObj cmdObj = BSON("emptycapped" << collectionName.coll());
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
- logOpForDbHash(txn, dbName.c_str());
+ if (!collectionName.isSystemDotProfile()) {
+ // do not replicate system.profile modifications
+ repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
}
- void OpObserver::onEmptyCapped(OperationContext* txn, const NamespaceString& collectionName) {
- std::string dbName = collectionName.db().toString() + ".$cmd";
- BSONObj cmdObj = BSON("emptycapped" << collectionName.coll());
-
- if (!collectionName.isSystemDotProfile()) {
- // do not replicate system.profile modifications
- repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
- }
-
- getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
- logOpForDbHash(txn, dbName.c_str());
- }
+ getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
+ logOpForDbHash(txn, dbName.c_str());
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/op_observer.h b/src/mongo/db/op_observer.h
index 4769c0733a4..4ecf7df20d0 100644
--- a/src/mongo/db/op_observer.h
+++ b/src/mongo/db/op_observer.h
@@ -34,59 +34,56 @@
#include "mongo/db/jsobj.h"
namespace mongo {
- struct CollectionOptions;
- class NamespaceString;
- class OperationContext;
+struct CollectionOptions;
+class NamespaceString;
+class OperationContext;
- struct oplogUpdateEntryArgs {
- std::string ns;
- BSONObj update;
- BSONObj criteria;
- bool fromMigrate;
- };
+struct oplogUpdateEntryArgs {
+ std::string ns;
+ BSONObj update;
+ BSONObj criteria;
+ bool fromMigrate;
+};
- class OpObserver {
- MONGO_DISALLOW_COPYING(OpObserver);
+class OpObserver {
+ MONGO_DISALLOW_COPYING(OpObserver);
- public:
- OpObserver() {}
- ~OpObserver() {}
- void onCreateIndex(OperationContext* txn,
- const std::string& ns,
- BSONObj indexDoc,
- bool fromMigrate = false);
- void onInsert(OperationContext* txn,
- const NamespaceString& ns,
- BSONObj doc,
- bool fromMigrate = false);
- void onUpdate(OperationContext* txn,
- oplogUpdateEntryArgs args);
- void onDelete(OperationContext* txn,
- const std::string& ns,
- const BSONObj& idDoc,
- bool fromMigrate = false);
- void onOpMessage(OperationContext* txn, const BSONObj& msgObj);
- void onCreateCollection(OperationContext* txn,
- const NamespaceString& collectionName,
- const CollectionOptions& options);
- void onCollMod(OperationContext* txn, const std::string& dbName, const BSONObj& collModCmd);
- void onDropDatabase(OperationContext* txn, const std::string& dbName);
- void onDropCollection(OperationContext* txn, const NamespaceString& collectionName);
- void onDropIndex(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& idxDescriptor);
- void onRenameCollection(OperationContext* txn,
- const NamespaceString& fromCollection,
- const NamespaceString& toCollection,
- bool dropTarget,
- bool stayTemp);
- void onApplyOps(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& applyOpCmd);
- void onEmptyCapped(OperationContext* txn, const NamespaceString& collectionName);
- void onConvertToCapped(OperationContext* txn,
- const NamespaceString& collectionName,
- double size);
- };
+public:
+ OpObserver() {}
+ ~OpObserver() {}
+ void onCreateIndex(OperationContext* txn,
+ const std::string& ns,
+ BSONObj indexDoc,
+ bool fromMigrate = false);
+ void onInsert(OperationContext* txn,
+ const NamespaceString& ns,
+ BSONObj doc,
+ bool fromMigrate = false);
+ void onUpdate(OperationContext* txn, oplogUpdateEntryArgs args);
+ void onDelete(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& idDoc,
+ bool fromMigrate = false);
+ void onOpMessage(OperationContext* txn, const BSONObj& msgObj);
+ void onCreateCollection(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const CollectionOptions& options);
+ void onCollMod(OperationContext* txn, const std::string& dbName, const BSONObj& collModCmd);
+ void onDropDatabase(OperationContext* txn, const std::string& dbName);
+ void onDropCollection(OperationContext* txn, const NamespaceString& collectionName);
+ void onDropIndex(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& idxDescriptor);
+ void onRenameCollection(OperationContext* txn,
+ const NamespaceString& fromCollection,
+ const NamespaceString& toCollection,
+ bool dropTarget,
+ bool stayTemp);
+ void onApplyOps(OperationContext* txn, const std::string& dbName, const BSONObj& applyOpCmd);
+ void onEmptyCapped(OperationContext* txn, const NamespaceString& collectionName);
+ void onConvertToCapped(OperationContext* txn,
+ const NamespaceString& collectionName,
+ double size);
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/operation_context.cpp b/src/mongo/db/operation_context.cpp
index 703eca38030..a327d2c18f3 100644
--- a/src/mongo/db/operation_context.cpp
+++ b/src/mongo/db/operation_context.cpp
@@ -34,21 +34,20 @@
namespace mongo {
- OperationContext::OperationContext(Client* client, unsigned int opId, Locker* locker) :
- _client(client), _opId(opId), _locker(locker) {
- }
-
- Client* OperationContext::getClient() const {
- invariant(_client);
- return _client;
- }
-
- void OperationContext::markKilled() {
- _killPending.store(1);
- }
-
- bool OperationContext::isKillPending() const {
- return _killPending.loadRelaxed();
- }
+OperationContext::OperationContext(Client* client, unsigned int opId, Locker* locker)
+ : _client(client), _opId(opId), _locker(locker) {}
+
+Client* OperationContext::getClient() const {
+ invariant(_client);
+ return _client;
+}
+
+void OperationContext::markKilled() {
+ _killPending.store(1);
+}
+
+bool OperationContext::isKillPending() const {
+ return _killPending.loadRelaxed();
+}
} // namespace mongo
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index e7a05b65b20..741279f48e0 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -38,253 +38,256 @@
namespace mongo {
- class Client;
- class CurOp;
- class Locker;
- class ProgressMeter;
- class StringData;
- class WriteUnitOfWork;
+class Client;
+class CurOp;
+class Locker;
+class ProgressMeter;
+class StringData;
+class WriteUnitOfWork;
+/**
+ * This class encompasses the state required by an operation and lives from the time a network
+ * peration is dispatched until its execution is finished. Note that each "getmore" on a cursor
+ * is a separate operation. On construction, an OperationContext associates itself with the
+ * current client, and only on destruction it deassociates itself. At any time a client can be
+ * associated with at most one OperationContext. Each OperationContext has a RecoveryUnit
+ * associated with it, though the lifetime is not necesarily the same, see releaseRecoveryUnit
+ * and setRecoveryUnit. The operation context also keeps track of some transaction state
+ * (RecoveryUnitState) to reduce complexity and duplication in the storage-engine specific
+ * RecoveryUnit and to allow better invariant checking.
+ */
+class OperationContext : public Decorable<OperationContext> {
+ MONGO_DISALLOW_COPYING(OperationContext);
+
+public:
/**
- * This class encompasses the state required by an operation and lives from the time a network
- * peration is dispatched until its execution is finished. Note that each "getmore" on a cursor
- * is a separate operation. On construction, an OperationContext associates itself with the
- * current client, and only on destruction it deassociates itself. At any time a client can be
- * associated with at most one OperationContext. Each OperationContext has a RecoveryUnit
- * associated with it, though the lifetime is not necesarily the same, see releaseRecoveryUnit
- * and setRecoveryUnit. The operation context also keeps track of some transaction state
- * (RecoveryUnitState) to reduce complexity and duplication in the storage-engine specific
- * RecoveryUnit and to allow better invariant checking.
+ * The RecoveryUnitState is used by WriteUnitOfWork to ensure valid state transitions.
*/
- class OperationContext : public Decorable<OperationContext> {
- MONGO_DISALLOW_COPYING(OperationContext);
-
- public:
- /**
- * The RecoveryUnitState is used by WriteUnitOfWork to ensure valid state transitions.
- */
- enum RecoveryUnitState {
- kNotInUnitOfWork, // not in a unit of work, no writes allowed
- kActiveUnitOfWork, // in a unit of work that still may either commit or abort
- kFailedUnitOfWork // in a unit of work that has failed and must be aborted
- };
-
- virtual ~OperationContext() = default;
-
- /**
- * Interface for durability. Caller DOES NOT own pointer.
- */
- virtual RecoveryUnit* recoveryUnit() const = 0;
-
-
- /**
- * Returns the RecoveryUnit (same return value as recoveryUnit()) but the caller takes
- * ownership of the returned RecoveryUnit, and the OperationContext instance relinquishes
- * ownership. Sets the RecoveryUnit to NULL.
- *
- * Used to transfer ownership of storage engine state from OperationContext
- * to ClientCursor for getMore-able queries.
- *
- * Note that we don't allow the top-level locks to be stored across getMore.
- * We rely on active cursors being killed when collections or databases are dropped,
- * or when collection metadata changes.
- */
- virtual RecoveryUnit* releaseRecoveryUnit() = 0;
-
- /**
- * Associates the OperatingContext with a different RecoveryUnit for getMore or
- * subtransactions, see RecoveryUnitSwap. The new state is passed and the old state is
- * returned separately even though the state logically belongs to the RecoveryUnit,
- * as it is managed by the OperationContext.
- */
- virtual RecoveryUnitState setRecoveryUnit(RecoveryUnit* unit, RecoveryUnitState state) = 0;
-
- /**
- * Interface for locking. Caller DOES NOT own pointer.
- */
- Locker* lockState() const { return _locker; }
-
- // --- operation level info? ---
-
- /**
- * Raises a UserAssertion if this operation is in a killed state.
- */
- virtual void checkForInterrupt() = 0;
-
- /**
- * Returns Status::OK() unless this operation is in a killed state.
- */
- virtual Status checkForInterruptNoAssert() = 0;
-
- /**
- * Delegates to CurOp, but is included here to break dependencies.
- * Caller does not own the pointer.
- *
- * Caller must have locked the "Client" associated with this context.
- */
- virtual ProgressMeter* setMessage_inlock(const char* msg,
- const std::string& name = "Progress",
- unsigned long long progressMeterTotal = 0,
- int secondsBetween = 3) = 0;
-
- /**
- * Delegates to CurOp, but is included here to break dependencies.
- *
- * TODO: We return a string because of hopefully transient CurOp thread-unsafe insanity.
- */
- virtual std::string getNS() const = 0;
-
- /**
- * Returns the client under which this context runs.
- */
- Client* getClient() const;
-
- virtual uint64_t getRemainingMaxTimeMicros() const = 0;
-
- /**
- * Returns the operation ID associated with this operation.
- */
- unsigned int getOpID() const { return _opId; }
-
- /**
- * @return true if this instance is primary for this namespace
- */
- virtual bool isPrimaryFor( StringData ns ) = 0;
-
- /**
- * Returns WriteConcernOptions of the current operation
- */
- const WriteConcernOptions& getWriteConcern() const {
- return _writeConcern;
- }
+ enum RecoveryUnitState {
+ kNotInUnitOfWork, // not in a unit of work, no writes allowed
+ kActiveUnitOfWork, // in a unit of work that still may either commit or abort
+ kFailedUnitOfWork // in a unit of work that has failed and must be aborted
+ };
- void setWriteConcern(const WriteConcernOptions& writeConcern) {
- _writeConcern = writeConcern;
- }
+ virtual ~OperationContext() = default;
- /**
- * Set whether or not operations should generate oplog entries.
- */
- virtual void setReplicatedWrites(bool writesAreReplicated = true) = 0;
-
- /**
- * Returns true if operations should generate oplog entries.
- */
- virtual bool writesAreReplicated() const = 0;
-
- /**
- * Marks this operation as killed.
- *
- * Subsequent calls to checkForInterrupt and checkForInterruptNoAssert by the thread
- * executing the operation will indicate that the operation has been killed.
- *
- * May be called by any thread that has locked the Client owning this operation context,
- * or by the thread executing on behalf of this operation context.
- */
- void markKilled();
-
- /**
- * Returns true if markKilled has been called on this operation context.
- *
- * May be called by any thread that has locked the Client owning this operation context,
- * or by the thread executing on behalf of this operation context.
- */
- bool isKillPending() const;
-
- protected:
- OperationContext(Client* client,
- unsigned int opId,
- Locker* locker);
-
- RecoveryUnitState _ruState = kNotInUnitOfWork;
-
- private:
- friend class WriteUnitOfWork;
- Client* const _client;
- const unsigned int _opId;
-
- // The lifetime of locker is managed by subclasses of OperationContext, so it is not
- // safe to access _locker in the destructor of OperationContext.
- Locker* const _locker;
-
- AtomicInt32 _killPending{0};
- WriteConcernOptions _writeConcern;
- };
+ /**
+ * Interface for durability. Caller DOES NOT own pointer.
+ */
+ virtual RecoveryUnit* recoveryUnit() const = 0;
- class WriteUnitOfWork {
- MONGO_DISALLOW_COPYING(WriteUnitOfWork);
- public:
- WriteUnitOfWork(OperationContext* txn)
- : _txn(txn),
- _committed(false),
- _toplevel(txn->_ruState == OperationContext::kNotInUnitOfWork) {
- _txn->lockState()->beginWriteUnitOfWork();
- if (_toplevel) {
- _txn->recoveryUnit()->beginUnitOfWork(_txn);
- _txn->_ruState = OperationContext::kActiveUnitOfWork;
- }
- }
- ~WriteUnitOfWork() {
- if (!_committed) {
- invariant(_txn->_ruState != OperationContext::kNotInUnitOfWork);
- if (_toplevel) {
- _txn->recoveryUnit()->abortUnitOfWork();
- _txn->_ruState = OperationContext::kNotInUnitOfWork;
- }
- else {
- _txn->_ruState = OperationContext::kFailedUnitOfWork;
- }
- _txn->lockState()->endWriteUnitOfWork();
- }
+ /**
+ * Returns the RecoveryUnit (same return value as recoveryUnit()) but the caller takes
+ * ownership of the returned RecoveryUnit, and the OperationContext instance relinquishes
+ * ownership. Sets the RecoveryUnit to NULL.
+ *
+ * Used to transfer ownership of storage engine state from OperationContext
+ * to ClientCursor for getMore-able queries.
+ *
+ * Note that we don't allow the top-level locks to be stored across getMore.
+ * We rely on active cursors being killed when collections or databases are dropped,
+ * or when collection metadata changes.
+ */
+ virtual RecoveryUnit* releaseRecoveryUnit() = 0;
+
+ /**
+ * Associates the OperatingContext with a different RecoveryUnit for getMore or
+ * subtransactions, see RecoveryUnitSwap. The new state is passed and the old state is
+ * returned separately even though the state logically belongs to the RecoveryUnit,
+ * as it is managed by the OperationContext.
+ */
+ virtual RecoveryUnitState setRecoveryUnit(RecoveryUnit* unit, RecoveryUnitState state) = 0;
+
+ /**
+ * Interface for locking. Caller DOES NOT own pointer.
+ */
+ Locker* lockState() const {
+ return _locker;
+ }
+
+ // --- operation level info? ---
+
+ /**
+ * Raises a UserAssertion if this operation is in a killed state.
+ */
+ virtual void checkForInterrupt() = 0;
+
+ /**
+ * Returns Status::OK() unless this operation is in a killed state.
+ */
+ virtual Status checkForInterruptNoAssert() = 0;
+
+ /**
+ * Delegates to CurOp, but is included here to break dependencies.
+ * Caller does not own the pointer.
+ *
+ * Caller must have locked the "Client" associated with this context.
+ */
+ virtual ProgressMeter* setMessage_inlock(const char* msg,
+ const std::string& name = "Progress",
+ unsigned long long progressMeterTotal = 0,
+ int secondsBetween = 3) = 0;
+
+ /**
+ * Delegates to CurOp, but is included here to break dependencies.
+ *
+ * TODO: We return a string because of hopefully transient CurOp thread-unsafe insanity.
+ */
+ virtual std::string getNS() const = 0;
+
+ /**
+ * Returns the client under which this context runs.
+ */
+ Client* getClient() const;
+
+ virtual uint64_t getRemainingMaxTimeMicros() const = 0;
+
+ /**
+ * Returns the operation ID associated with this operation.
+ */
+ unsigned int getOpID() const {
+ return _opId;
+ }
+
+ /**
+ * @return true if this instance is primary for this namespace
+ */
+ virtual bool isPrimaryFor(StringData ns) = 0;
+
+ /**
+ * Returns WriteConcernOptions of the current operation
+ */
+ const WriteConcernOptions& getWriteConcern() const {
+ return _writeConcern;
+ }
+
+ void setWriteConcern(const WriteConcernOptions& writeConcern) {
+ _writeConcern = writeConcern;
+ }
+
+ /**
+ * Set whether or not operations should generate oplog entries.
+ */
+ virtual void setReplicatedWrites(bool writesAreReplicated = true) = 0;
+
+ /**
+ * Returns true if operations should generate oplog entries.
+ */
+ virtual bool writesAreReplicated() const = 0;
+
+ /**
+ * Marks this operation as killed.
+ *
+ * Subsequent calls to checkForInterrupt and checkForInterruptNoAssert by the thread
+ * executing the operation will indicate that the operation has been killed.
+ *
+ * May be called by any thread that has locked the Client owning this operation context,
+ * or by the thread executing on behalf of this operation context.
+ */
+ void markKilled();
+
+ /**
+ * Returns true if markKilled has been called on this operation context.
+ *
+ * May be called by any thread that has locked the Client owning this operation context,
+ * or by the thread executing on behalf of this operation context.
+ */
+ bool isKillPending() const;
+
+protected:
+ OperationContext(Client* client, unsigned int opId, Locker* locker);
+
+ RecoveryUnitState _ruState = kNotInUnitOfWork;
+
+private:
+ friend class WriteUnitOfWork;
+ Client* const _client;
+ const unsigned int _opId;
+
+ // The lifetime of locker is managed by subclasses of OperationContext, so it is not
+ // safe to access _locker in the destructor of OperationContext.
+ Locker* const _locker;
+
+ AtomicInt32 _killPending{0};
+ WriteConcernOptions _writeConcern;
+};
+
+class WriteUnitOfWork {
+ MONGO_DISALLOW_COPYING(WriteUnitOfWork);
+
+public:
+ WriteUnitOfWork(OperationContext* txn)
+ : _txn(txn),
+ _committed(false),
+ _toplevel(txn->_ruState == OperationContext::kNotInUnitOfWork) {
+ _txn->lockState()->beginWriteUnitOfWork();
+ if (_toplevel) {
+ _txn->recoveryUnit()->beginUnitOfWork(_txn);
+ _txn->_ruState = OperationContext::kActiveUnitOfWork;
}
+ }
- void commit() {
- invariant(!_committed);
- invariant (_txn->_ruState == OperationContext::kActiveUnitOfWork);
+ ~WriteUnitOfWork() {
+ if (!_committed) {
+ invariant(_txn->_ruState != OperationContext::kNotInUnitOfWork);
if (_toplevel) {
- _txn->recoveryUnit()->commitUnitOfWork();
+ _txn->recoveryUnit()->abortUnitOfWork();
_txn->_ruState = OperationContext::kNotInUnitOfWork;
+ } else {
+ _txn->_ruState = OperationContext::kFailedUnitOfWork;
}
_txn->lockState()->endWriteUnitOfWork();
- _committed = true;
}
+ }
+
+ void commit() {
+ invariant(!_committed);
+ invariant(_txn->_ruState == OperationContext::kActiveUnitOfWork);
+ if (_toplevel) {
+ _txn->recoveryUnit()->commitUnitOfWork();
+ _txn->_ruState = OperationContext::kNotInUnitOfWork;
+ }
+ _txn->lockState()->endWriteUnitOfWork();
+ _committed = true;
+ }
- private:
- OperationContext* const _txn;
+private:
+ OperationContext* const _txn;
+
+ bool _committed;
+ bool _toplevel;
+};
- bool _committed;
- bool _toplevel;
- };
+/**
+ * RAII-style class to mark the scope of a transaction. ScopedTransactions may be nested.
+ * An outermost ScopedTransaction calls abandonSnapshot() on destruction, so that the storage
+ * engine can release resources, such as snapshots or locks, that it may have acquired during
+ * the transaction. Note that any writes are committed in nested WriteUnitOfWork scopes,
+ * so write conflicts cannot happen on completing a ScopedTransaction.
+ *
+ * TODO: The ScopedTransaction should hold the global lock
+ */
+class ScopedTransaction {
+ MONGO_DISALLOW_COPYING(ScopedTransaction);
+public:
/**
- * RAII-style class to mark the scope of a transaction. ScopedTransactions may be nested.
- * An outermost ScopedTransaction calls abandonSnapshot() on destruction, so that the storage
- * engine can release resources, such as snapshots or locks, that it may have acquired during
- * the transaction. Note that any writes are committed in nested WriteUnitOfWork scopes,
- * so write conflicts cannot happen on completing a ScopedTransaction.
- *
- * TODO: The ScopedTransaction should hold the global lock
+ * The mode for the transaction indicates whether the transaction will write (MODE_IX) or
+ * only read (MODE_IS), or needs to run without other writers (MODE_S) or any other
+ * operations (MODE_X) on the server.
*/
- class ScopedTransaction {
- MONGO_DISALLOW_COPYING(ScopedTransaction);
- public:
- /**
- * The mode for the transaction indicates whether the transaction will write (MODE_IX) or
- * only read (MODE_IS), or needs to run without other writers (MODE_S) or any other
- * operations (MODE_X) on the server.
- */
- ScopedTransaction(OperationContext* txn, LockMode mode) : _txn(txn) { }
-
- ~ScopedTransaction() {
- if (!_txn->lockState()->isLocked()) {
- _txn->recoveryUnit()->abandonSnapshot();
- }
+ ScopedTransaction(OperationContext* txn, LockMode mode) : _txn(txn) {}
+
+ ~ScopedTransaction() {
+ if (!_txn->lockState()->isLocked()) {
+ _txn->recoveryUnit()->abandonSnapshot();
}
+ }
- private:
- OperationContext* _txn;
- };
+private:
+ OperationContext* _txn;
+};
} // namespace mongo
diff --git a/src/mongo/db/operation_context_impl.cpp b/src/mongo/db/operation_context_impl.cpp
index 5c677c44e41..28ff6585620 100644
--- a/src/mongo/db/operation_context_impl.cpp
+++ b/src/mongo/db/operation_context_impl.cpp
@@ -49,176 +49,175 @@
namespace mongo {
namespace {
- std::unique_ptr<Locker> newLocker() {
- if (isMMAPV1()) return stdx::make_unique<MMAPV1LockerImpl>();
- return stdx::make_unique<DefaultLockerImpl>();
- }
-
- class ClientOperationInfo {
- public:
- Locker* getLocker() {
- if (!_locker) {
- _locker = newLocker();
- }
- return _locker.get();
+std::unique_ptr<Locker> newLocker() {
+ if (isMMAPV1())
+ return stdx::make_unique<MMAPV1LockerImpl>();
+ return stdx::make_unique<DefaultLockerImpl>();
+}
+
+class ClientOperationInfo {
+public:
+ Locker* getLocker() {
+ if (!_locker) {
+ _locker = newLocker();
}
+ return _locker.get();
+ }
- private:
- std::unique_ptr<Locker> _locker;
- };
+private:
+ std::unique_ptr<Locker> _locker;
+};
- const auto clientOperationInfoDecoration = Client::declareDecoration<ClientOperationInfo>();
+const auto clientOperationInfoDecoration = Client::declareDecoration<ClientOperationInfo>();
- AtomicUInt32 nextOpId{1};
+AtomicUInt32 nextOpId{1};
} // namespace
- using std::string;
+using std::string;
+
+OperationContextImpl::OperationContextImpl()
+ : OperationContext(
+ &cc(), nextOpId.fetchAndAdd(1), clientOperationInfoDecoration(cc()).getLocker()),
+ _writesAreReplicated(true) {
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ _recovery.reset(storageEngine->newRecoveryUnit());
+
+ auto client = getClient();
+ stdx::lock_guard<Client> lk(*client);
+ client->setOperationContext(this);
+}
+
+OperationContextImpl::~OperationContextImpl() {
+ lockState()->assertEmptyAndReset();
+ auto client = getClient();
+ stdx::lock_guard<Client> lk(*client);
+ client->resetOperationContext();
+}
+
+RecoveryUnit* OperationContextImpl::recoveryUnit() const {
+ return _recovery.get();
+}
+
+RecoveryUnit* OperationContextImpl::releaseRecoveryUnit() {
+ if (_recovery.get())
+ _recovery->beingReleasedFromOperationContext();
+ return _recovery.release();
+}
+
+OperationContext::RecoveryUnitState OperationContextImpl::setRecoveryUnit(RecoveryUnit* unit,
+ RecoveryUnitState state) {
+ _recovery.reset(unit);
+ RecoveryUnitState oldState = _ruState;
+ _ruState = state;
+ if (unit)
+ unit->beingSetOnOperationContext();
+ return oldState;
+}
+
+ProgressMeter* OperationContextImpl::setMessage_inlock(const char* msg,
+ const std::string& name,
+ unsigned long long progressMeterTotal,
+ int secondsBetween) {
+ return &CurOp::get(this)->setMessage_inlock(msg, name, progressMeterTotal, secondsBetween);
+}
+
+string OperationContextImpl::getNS() const {
+ return CurOp::get(this)->getNS();
+}
+
+uint64_t OperationContextImpl::getRemainingMaxTimeMicros() const {
+ return CurOp::get(this)->getRemainingMaxTimeMicros();
+}
+
+// Enabling the checkForInterruptFail fail point will start a game of random chance on the
+// connection specified in the fail point data, generating an interrupt with a given fixed
+// probability. Example invocation:
+//
+// {configureFailPoint: "checkForInterruptFail",
+// mode: "alwaysOn",
+// data: {conn: 17, chance: .01, allowNested: true}}
+//
+// All three data fields must be specified. In the above example, all interrupt points on
+// connection 17 will generate a kill on the current operation with probability p(.01),
+// including interrupt points of nested operations. If "allowNested" is false, nested
+// operations are not targeted. "chance" must be a double between 0 and 1, inclusive.
+MONGO_FP_DECLARE(checkForInterruptFail);
- OperationContextImpl::OperationContextImpl()
- : OperationContext(&cc(),
- nextOpId.fetchAndAdd(1),
- clientOperationInfoDecoration(cc()).getLocker()),
- _writesAreReplicated(true) {
+namespace {
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- _recovery.reset(storageEngine->newRecoveryUnit());
+// Global state for checkForInterrupt fail point.
+PseudoRandom checkForInterruptPRNG(static_cast<int64_t>(time(NULL)));
- auto client = getClient();
- stdx::lock_guard<Client> lk(*client);
- client->setOperationContext(this);
+// Helper function for checkForInterrupt fail point. Decides whether the operation currently
+// being run by the given Client meet the (probabilistic) conditions for interruption as
+// specified in the fail point info.
+bool opShouldFail(const OperationContextImpl* opCtx, const BSONObj& failPointInfo) {
+ // Only target the client with the specified connection number.
+ if (opCtx->getClient()->getConnectionId() != failPointInfo["conn"].safeNumberLong()) {
+ return false;
}
- OperationContextImpl::~OperationContextImpl() {
- lockState()->assertEmptyAndReset();
- auto client = getClient();
- stdx::lock_guard<Client> lk(*client);
- client->resetOperationContext();
+ // Only target nested operations if requested.
+ if (!failPointInfo["allowNested"].trueValue() && CurOp::get(opCtx)->parent() != NULL) {
+ return false;
}
- RecoveryUnit* OperationContextImpl::recoveryUnit() const {
- return _recovery.get();
+ // Return true with (approx) probability p = "chance". Recall: 0 <= chance <= 1.
+ double next = static_cast<double>(std::abs(checkForInterruptPRNG.nextInt64()));
+ double upperBound =
+ std::numeric_limits<int64_t>::max() * failPointInfo["chance"].numberDouble();
+ if (next > upperBound) {
+ return false;
}
+ return true;
+}
- RecoveryUnit* OperationContextImpl::releaseRecoveryUnit() {
- if ( _recovery.get() )
- _recovery->beingReleasedFromOperationContext();
- return _recovery.release();
- }
+} // namespace
- OperationContext::RecoveryUnitState OperationContextImpl::setRecoveryUnit(RecoveryUnit* unit,
- RecoveryUnitState state) {
- _recovery.reset(unit);
- RecoveryUnitState oldState = _ruState;
- _ruState = state;
- if ( unit )
- unit->beingSetOnOperationContext();
- return oldState;
- }
+void OperationContextImpl::checkForInterrupt() {
+ // We cannot interrupt operation, while it's inside of a write unit of work, because logOp
+ // cannot handle being iterrupted.
+ if (lockState()->inAWriteUnitOfWork())
+ return;
- ProgressMeter* OperationContextImpl::setMessage_inlock(const char * msg,
- const std::string &name,
- unsigned long long progressMeterTotal,
- int secondsBetween) {
- return &CurOp::get(this)->setMessage_inlock(msg, name, progressMeterTotal, secondsBetween);
- }
+ uassertStatusOK(checkForInterruptNoAssert());
+}
- string OperationContextImpl::getNS() const {
- return CurOp::get(this)->getNS();
+Status OperationContextImpl::checkForInterruptNoAssert() {
+ if (getGlobalServiceContext()->getKillAllOperations()) {
+ return Status(ErrorCodes::InterruptedAtShutdown, "interrupted at shutdown");
}
- uint64_t OperationContextImpl::getRemainingMaxTimeMicros() const {
- return CurOp::get(this)->getRemainingMaxTimeMicros();
+ CurOp* curOp = CurOp::get(this);
+ if (curOp->maxTimeHasExpired()) {
+ markKilled();
+ return Status(ErrorCodes::ExceededTimeLimit, "operation exceeded time limit");
}
- // Enabling the checkForInterruptFail fail point will start a game of random chance on the
- // connection specified in the fail point data, generating an interrupt with a given fixed
- // probability. Example invocation:
- //
- // {configureFailPoint: "checkForInterruptFail",
- // mode: "alwaysOn",
- // data: {conn: 17, chance: .01, allowNested: true}}
- //
- // All three data fields must be specified. In the above example, all interrupt points on
- // connection 17 will generate a kill on the current operation with probability p(.01),
- // including interrupt points of nested operations. If "allowNested" is false, nested
- // operations are not targeted. "chance" must be a double between 0 and 1, inclusive.
- MONGO_FP_DECLARE(checkForInterruptFail);
-
- namespace {
-
- // Global state for checkForInterrupt fail point.
- PseudoRandom checkForInterruptPRNG(static_cast<int64_t>(time(NULL)));
-
- // Helper function for checkForInterrupt fail point. Decides whether the operation currently
- // being run by the given Client meet the (probabilistic) conditions for interruption as
- // specified in the fail point info.
- bool opShouldFail(const OperationContextImpl* opCtx, const BSONObj& failPointInfo) {
- // Only target the client with the specified connection number.
- if (opCtx->getClient()->getConnectionId() != failPointInfo["conn"].safeNumberLong()) {
- return false;
- }
-
- // Only target nested operations if requested.
- if (!failPointInfo["allowNested"].trueValue() && CurOp::get(opCtx)->parent() != NULL) {
- return false;
- }
-
- // Return true with (approx) probability p = "chance". Recall: 0 <= chance <= 1.
- double next = static_cast<double>(std::abs(checkForInterruptPRNG.nextInt64()));
- double upperBound =
- std::numeric_limits<int64_t>::max() * failPointInfo["chance"].numberDouble();
- if (next > upperBound) {
- return false;
- }
- return true;
- }
-
- } // namespace
-
- void OperationContextImpl::checkForInterrupt() {
- // We cannot interrupt operation, while it's inside of a write unit of work, because logOp
- // cannot handle being iterrupted.
- if (lockState()->inAWriteUnitOfWork()) return;
-
- uassertStatusOK(checkForInterruptNoAssert());
- }
-
- Status OperationContextImpl::checkForInterruptNoAssert() {
- if (getGlobalServiceContext()->getKillAllOperations()) {
- return Status(ErrorCodes::InterruptedAtShutdown, "interrupted at shutdown");
- }
-
- CurOp* curOp = CurOp::get(this);
- if (curOp->maxTimeHasExpired()) {
+ MONGO_FAIL_POINT_BLOCK(checkForInterruptFail, scopedFailPoint) {
+ if (opShouldFail(this, scopedFailPoint.getData())) {
+ log() << "set pending kill on " << (curOp->parent() ? "nested" : "top-level") << " op "
+ << getOpID() << ", for checkForInterruptFail";
markKilled();
- return Status(ErrorCodes::ExceededTimeLimit, "operation exceeded time limit");
- }
-
- MONGO_FAIL_POINT_BLOCK(checkForInterruptFail, scopedFailPoint) {
- if (opShouldFail(this, scopedFailPoint.getData())) {
- log() << "set pending kill on "
- << (curOp->parent() ? "nested" : "top-level")
- << " op " << getOpID() << ", for checkForInterruptFail";
- markKilled();
- }
- }
-
- if (isKillPending()) {
- return Status(ErrorCodes::Interrupted, "operation was interrupted");
}
-
- return Status::OK();
}
- bool OperationContextImpl::isPrimaryFor( StringData ns ) {
- return repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(NamespaceString(ns));
+ if (isKillPending()) {
+ return Status(ErrorCodes::Interrupted, "operation was interrupted");
}
- void OperationContextImpl::setReplicatedWrites(bool writesAreReplicated) {
- _writesAreReplicated = writesAreReplicated;
- }
+ return Status::OK();
+}
- bool OperationContextImpl::writesAreReplicated() const {
- return _writesAreReplicated;
- }
+bool OperationContextImpl::isPrimaryFor(StringData ns) {
+ return repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(NamespaceString(ns));
+}
+
+void OperationContextImpl::setReplicatedWrites(bool writesAreReplicated) {
+ _writesAreReplicated = writesAreReplicated;
+}
+
+bool OperationContextImpl::writesAreReplicated() const {
+ return _writesAreReplicated;
+}
} // namespace mongo
diff --git a/src/mongo/db/operation_context_impl.h b/src/mongo/db/operation_context_impl.h
index 36b6688bfab..b24772d1313 100644
--- a/src/mongo/db/operation_context_impl.h
+++ b/src/mongo/db/operation_context_impl.h
@@ -33,39 +33,38 @@
namespace mongo {
- class OperationContextImpl : public OperationContext {
- public:
- OperationContextImpl();
+class OperationContextImpl : public OperationContext {
+public:
+ OperationContextImpl();
- virtual ~OperationContextImpl();
+ virtual ~OperationContextImpl();
- virtual RecoveryUnit* recoveryUnit() const override;
+ virtual RecoveryUnit* recoveryUnit() const override;
- virtual RecoveryUnit* releaseRecoveryUnit() override;
+ virtual RecoveryUnit* releaseRecoveryUnit() override;
- virtual RecoveryUnitState setRecoveryUnit(RecoveryUnit* unit,
- RecoveryUnitState state) override;
+ virtual RecoveryUnitState setRecoveryUnit(RecoveryUnit* unit, RecoveryUnitState state) override;
- virtual ProgressMeter* setMessage_inlock(const char* msg,
- const std::string& name,
- unsigned long long progressMeterTotal,
- int secondsBetween) override;
+ virtual ProgressMeter* setMessage_inlock(const char* msg,
+ const std::string& name,
+ unsigned long long progressMeterTotal,
+ int secondsBetween) override;
- virtual std::string getNS() const override;
+ virtual std::string getNS() const override;
- virtual uint64_t getRemainingMaxTimeMicros() const override;
+ virtual uint64_t getRemainingMaxTimeMicros() const override;
- virtual void checkForInterrupt() override;
- virtual Status checkForInterruptNoAssert() override;
+ virtual void checkForInterrupt() override;
+ virtual Status checkForInterruptNoAssert() override;
- virtual bool isPrimaryFor( StringData ns ) override;
+ virtual bool isPrimaryFor(StringData ns) override;
- virtual void setReplicatedWrites(bool writesAreReplicated = true) override;
- virtual bool writesAreReplicated() const override;
+ virtual void setReplicatedWrites(bool writesAreReplicated = true) override;
+ virtual bool writesAreReplicated() const override;
- private:
- std::unique_ptr<RecoveryUnit> _recovery;
- bool _writesAreReplicated;
- };
+private:
+ std::unique_ptr<RecoveryUnit> _recovery;
+ bool _writesAreReplicated;
+};
} // namespace mongo
diff --git a/src/mongo/db/operation_context_noop.h b/src/mongo/db/operation_context_noop.h
index 5466806b276..172ba5f5d5d 100644
--- a/src/mongo/db/operation_context_noop.h
+++ b/src/mongo/db/operation_context_noop.h
@@ -36,84 +36,78 @@
namespace mongo {
- class OperationContextNoop : public OperationContext {
- public:
-
- OperationContextNoop() : OperationContextNoop(new RecoveryUnitNoop()) {}
-
- OperationContextNoop(RecoveryUnit* ru) : OperationContextNoop(nullptr, 0, ru) {}
-
- OperationContextNoop(Client* client, unsigned int opId)
- : OperationContextNoop(client, opId, new RecoveryUnitNoop()) {
- }
-
- OperationContextNoop(Client* client, unsigned int opId, RecoveryUnit* ru)
- : OperationContextNoop(client, opId, new LockerNoop(), ru) {
- }
-
- OperationContextNoop(Client* client, unsigned int opId, Locker* locker)
- : OperationContextNoop(client, opId, locker, new RecoveryUnitNoop()) {
- }
-
- OperationContextNoop(Client* client, unsigned int opId, Locker* locker, RecoveryUnit* ru)
- : OperationContext(client, opId, locker),
- _recoveryUnit(ru) {
-
- _locker.reset(lockState());
- }
-
- virtual ~OperationContextNoop() = default;
-
- virtual RecoveryUnit* recoveryUnit() const override {
- return _recoveryUnit.get();
- }
-
- virtual RecoveryUnit* releaseRecoveryUnit() override {
- return _recoveryUnit.release();
- }
-
- virtual RecoveryUnitState setRecoveryUnit(RecoveryUnit* unit,
- RecoveryUnitState state) override {
- RecoveryUnitState oldState = _ruState;
- _recoveryUnit.reset(unit);
- _ruState = state;
- return oldState;
- }
-
- virtual ProgressMeter* setMessage_inlock(const char * msg,
- const std::string &name,
- unsigned long long progressMeterTotal,
- int secondsBetween) override {
- return &_pm;
- }
-
- virtual void checkForInterrupt() override { }
- virtual Status checkForInterruptNoAssert() override {
- return Status::OK();
- }
-
- virtual bool isPrimaryFor( StringData ns ) override {
- return true;
- }
-
- virtual std::string getNS() const override {
- return std::string();
- };
+class OperationContextNoop : public OperationContext {
+public:
+ OperationContextNoop() : OperationContextNoop(new RecoveryUnitNoop()) {}
+
+ OperationContextNoop(RecoveryUnit* ru) : OperationContextNoop(nullptr, 0, ru) {}
+
+ OperationContextNoop(Client* client, unsigned int opId)
+ : OperationContextNoop(client, opId, new RecoveryUnitNoop()) {}
+
+ OperationContextNoop(Client* client, unsigned int opId, RecoveryUnit* ru)
+ : OperationContextNoop(client, opId, new LockerNoop(), ru) {}
+
+ OperationContextNoop(Client* client, unsigned int opId, Locker* locker)
+ : OperationContextNoop(client, opId, locker, new RecoveryUnitNoop()) {}
+
+ OperationContextNoop(Client* client, unsigned int opId, Locker* locker, RecoveryUnit* ru)
+ : OperationContext(client, opId, locker), _recoveryUnit(ru) {
+ _locker.reset(lockState());
+ }
+
+ virtual ~OperationContextNoop() = default;
+
+ virtual RecoveryUnit* recoveryUnit() const override {
+ return _recoveryUnit.get();
+ }
+
+ virtual RecoveryUnit* releaseRecoveryUnit() override {
+ return _recoveryUnit.release();
+ }
+
+ virtual RecoveryUnitState setRecoveryUnit(RecoveryUnit* unit,
+ RecoveryUnitState state) override {
+ RecoveryUnitState oldState = _ruState;
+ _recoveryUnit.reset(unit);
+ _ruState = state;
+ return oldState;
+ }
+
+ virtual ProgressMeter* setMessage_inlock(const char* msg,
+ const std::string& name,
+ unsigned long long progressMeterTotal,
+ int secondsBetween) override {
+ return &_pm;
+ }
+
+ virtual void checkForInterrupt() override {}
+ virtual Status checkForInterruptNoAssert() override {
+ return Status::OK();
+ }
+
+ virtual bool isPrimaryFor(StringData ns) override {
+ return true;
+ }
+
+ virtual std::string getNS() const override {
+ return std::string();
+ };
- void setReplicatedWrites(bool writesAreReplicated = true) override {}
+ void setReplicatedWrites(bool writesAreReplicated = true) override {}
- bool writesAreReplicated() const override {
- return false;
- }
+ bool writesAreReplicated() const override {
+ return false;
+ }
- virtual uint64_t getRemainingMaxTimeMicros() const override {
- return 0;
- }
+ virtual uint64_t getRemainingMaxTimeMicros() const override {
+ return 0;
+ }
- private:
- std::unique_ptr<RecoveryUnit> _recoveryUnit;
- std::unique_ptr<Locker> _locker;
- ProgressMeter _pm;
- };
+private:
+ std::unique_ptr<RecoveryUnit> _recoveryUnit;
+ std::unique_ptr<Locker> _locker;
+ ProgressMeter _pm;
+};
} // namespace mongo
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index ecf4f752873..86ed88395b5 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -38,41 +38,41 @@
namespace mongo {
- /* ns: namespace, e.g. <database>.<collection>
- pattern: the "where" clause / criteria
- justOne: stop after 1 match
- god: allow access to system namespaces, and don't yield
- */
- long long deleteObjects(OperationContext* txn,
- Database* db,
- StringData ns,
- BSONObj pattern,
- PlanExecutor::YieldPolicy policy,
- bool justOne,
- bool god,
- bool fromMigrate) {
- NamespaceString nsString(ns);
- DeleteRequest request(nsString);
- request.setQuery(pattern);
- request.setMulti(!justOne);
- request.setGod(god);
- request.setFromMigrate(fromMigrate);
- request.setYieldPolicy(policy);
+/* ns: namespace, e.g. <database>.<collection>
+ pattern: the "where" clause / criteria
+ justOne: stop after 1 match
+ god: allow access to system namespaces, and don't yield
+*/
+long long deleteObjects(OperationContext* txn,
+ Database* db,
+ StringData ns,
+ BSONObj pattern,
+ PlanExecutor::YieldPolicy policy,
+ bool justOne,
+ bool god,
+ bool fromMigrate) {
+ NamespaceString nsString(ns);
+ DeleteRequest request(nsString);
+ request.setQuery(pattern);
+ request.setMulti(!justOne);
+ request.setGod(god);
+ request.setFromMigrate(fromMigrate);
+ request.setYieldPolicy(policy);
- Collection* collection = NULL;
- if (db) {
- collection = db->getCollection(nsString.ns());
- }
+ Collection* collection = NULL;
+ if (db) {
+ collection = db->getCollection(nsString.ns());
+ }
- ParsedDelete parsedDelete(txn, &request);
- uassertStatusOK(parsedDelete.parseRequest());
+ ParsedDelete parsedDelete(txn, &request);
+ uassertStatusOK(parsedDelete.parseRequest());
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ PlanExecutor* rawExec;
+ uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete, &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
- uassertStatusOK(exec->executePlan());
- return DeleteStage::getNumDeleted(exec.get());
- }
+ uassertStatusOK(exec->executePlan());
+ return DeleteStage::getNumDeleted(exec.get());
+}
} // namespace mongo
diff --git a/src/mongo/db/ops/delete.h b/src/mongo/db/ops/delete.h
index 563734428ab..b4b7345d70b 100644
--- a/src/mongo/db/ops/delete.h
+++ b/src/mongo/db/ops/delete.h
@@ -36,16 +36,15 @@
namespace mongo {
- class Database;
- class OperationContext;
-
- long long deleteObjects(OperationContext* txn,
- Database* db,
- StringData ns,
- BSONObj pattern,
- PlanExecutor::YieldPolicy policy,
- bool justOne,
- bool god = false,
- bool fromMigrate = false);
-
+class Database;
+class OperationContext;
+
+long long deleteObjects(OperationContext* txn,
+ Database* db,
+ StringData ns,
+ BSONObj pattern,
+ PlanExecutor::YieldPolicy policy,
+ bool justOne,
+ bool god = false,
+ bool fromMigrate = false);
}
diff --git a/src/mongo/db/ops/delete_request.h b/src/mongo/db/ops/delete_request.h
index 27ea4f2bbb9..8ab27dab596 100644
--- a/src/mongo/db/ops/delete_request.h
+++ b/src/mongo/db/ops/delete_request.h
@@ -36,52 +36,91 @@
namespace mongo {
- class DeleteRequest {
- MONGO_DISALLOW_COPYING(DeleteRequest);
- public:
- explicit DeleteRequest(const NamespaceString& nsString) :
- _nsString(nsString),
- _multi(false),
- _god(false),
- _fromMigrate(false),
- _isExplain(false),
- _returnDeleted(false),
- _yieldPolicy(PlanExecutor::YIELD_MANUAL) {}
+class DeleteRequest {
+ MONGO_DISALLOW_COPYING(DeleteRequest);
- void setQuery(const BSONObj& query) { _query = query; }
- void setProj(const BSONObj& proj) { _proj = proj; }
- void setSort(const BSONObj& sort) { _sort = sort; }
- void setMulti(bool multi = true) { _multi = multi; }
- void setGod(bool god = true) { _god = god; }
- void setFromMigrate(bool fromMigrate = true) { _fromMigrate = fromMigrate; }
- void setExplain(bool isExplain = true) { _isExplain = isExplain; }
- void setReturnDeleted(bool returnDeleted = true) { _returnDeleted = returnDeleted; }
- void setYieldPolicy(PlanExecutor::YieldPolicy yieldPolicy) { _yieldPolicy = yieldPolicy; }
+public:
+ explicit DeleteRequest(const NamespaceString& nsString)
+ : _nsString(nsString),
+ _multi(false),
+ _god(false),
+ _fromMigrate(false),
+ _isExplain(false),
+ _returnDeleted(false),
+ _yieldPolicy(PlanExecutor::YIELD_MANUAL) {}
- const NamespaceString& getNamespaceString() const { return _nsString; }
- const BSONObj& getQuery() const { return _query; }
- const BSONObj& getProj() const { return _proj; }
- const BSONObj& getSort() const { return _sort; }
- bool isMulti() const { return _multi; }
- bool isGod() const { return _god; }
- bool isFromMigrate() const { return _fromMigrate; }
- bool isExplain() const { return _isExplain; }
- bool shouldReturnDeleted() const { return _returnDeleted; }
- PlanExecutor::YieldPolicy getYieldPolicy() const { return _yieldPolicy; }
+ void setQuery(const BSONObj& query) {
+ _query = query;
+ }
+ void setProj(const BSONObj& proj) {
+ _proj = proj;
+ }
+ void setSort(const BSONObj& sort) {
+ _sort = sort;
+ }
+ void setMulti(bool multi = true) {
+ _multi = multi;
+ }
+ void setGod(bool god = true) {
+ _god = god;
+ }
+ void setFromMigrate(bool fromMigrate = true) {
+ _fromMigrate = fromMigrate;
+ }
+ void setExplain(bool isExplain = true) {
+ _isExplain = isExplain;
+ }
+ void setReturnDeleted(bool returnDeleted = true) {
+ _returnDeleted = returnDeleted;
+ }
+ void setYieldPolicy(PlanExecutor::YieldPolicy yieldPolicy) {
+ _yieldPolicy = yieldPolicy;
+ }
- std::string toString() const;
+ const NamespaceString& getNamespaceString() const {
+ return _nsString;
+ }
+ const BSONObj& getQuery() const {
+ return _query;
+ }
+ const BSONObj& getProj() const {
+ return _proj;
+ }
+ const BSONObj& getSort() const {
+ return _sort;
+ }
+ bool isMulti() const {
+ return _multi;
+ }
+ bool isGod() const {
+ return _god;
+ }
+ bool isFromMigrate() const {
+ return _fromMigrate;
+ }
+ bool isExplain() const {
+ return _isExplain;
+ }
+ bool shouldReturnDeleted() const {
+ return _returnDeleted;
+ }
+ PlanExecutor::YieldPolicy getYieldPolicy() const {
+ return _yieldPolicy;
+ }
- private:
- const NamespaceString& _nsString;
- BSONObj _query;
- BSONObj _proj;
- BSONObj _sort;
- bool _multi;
- bool _god;
- bool _fromMigrate;
- bool _isExplain;
- bool _returnDeleted;
- PlanExecutor::YieldPolicy _yieldPolicy;
- };
+ std::string toString() const;
+
+private:
+ const NamespaceString& _nsString;
+ BSONObj _query;
+ BSONObj _proj;
+ BSONObj _sort;
+ bool _multi;
+ bool _god;
+ bool _fromMigrate;
+ bool _isExplain;
+ bool _returnDeleted;
+ PlanExecutor::YieldPolicy _yieldPolicy;
+};
} // namespace mongo
diff --git a/src/mongo/db/ops/field_checker.cpp b/src/mongo/db/ops/field_checker.cpp
index da6607ac229..0c71c7e5d07 100644
--- a/src/mongo/db/ops/field_checker.cpp
+++ b/src/mongo/db/ops/field_checker.cpp
@@ -34,51 +34,50 @@
namespace mongo {
- using mongoutils::str::stream;
+using mongoutils::str::stream;
namespace fieldchecker {
- Status isUpdatable(const FieldRef& field) {
- const size_t numParts = field.numParts();
+Status isUpdatable(const FieldRef& field) {
+ const size_t numParts = field.numParts();
- if (numParts == 0) {
- return Status(ErrorCodes::EmptyFieldName,
- "An empty update path is not valid.");
- }
+ if (numParts == 0) {
+ return Status(ErrorCodes::EmptyFieldName, "An empty update path is not valid.");
+ }
- for (size_t i = 0; i != numParts; ++i) {
- const StringData part = field.getPart(i);
+ for (size_t i = 0; i != numParts; ++i) {
+ const StringData part = field.getPart(i);
- if (part.empty()) {
- return Status(ErrorCodes::EmptyFieldName,
- mongoutils::str::stream() << "The update path '"
- << field.dottedField()
+ if (part.empty()) {
+ return Status(ErrorCodes::EmptyFieldName,
+ mongoutils::str::stream()
+ << "The update path '" << field.dottedField()
<< "' contains an empty field name, which is not allowed.");
- }
}
-
- return Status::OK();
}
- bool isPositional(const FieldRef& fieldRef, size_t* pos, size_t* count) {
+ return Status::OK();
+}
- // 'count' is optional.
- size_t dummy;
- if (count == NULL) {
- count = &dummy;
- }
+bool isPositional(const FieldRef& fieldRef, size_t* pos, size_t* count) {
+ // 'count' is optional.
+ size_t dummy;
+ if (count == NULL) {
+ count = &dummy;
+ }
- *count = 0;
- size_t size = fieldRef.numParts();
- for (size_t i=0; i<size; i++) {
- StringData fieldPart = fieldRef.getPart(i);
- if ((fieldPart.size() == 1) && (fieldPart[0] == '$')) {
- if (*count == 0) *pos = i;
- (*count)++;
- }
+ *count = 0;
+ size_t size = fieldRef.numParts();
+ for (size_t i = 0; i < size; i++) {
+ StringData fieldPart = fieldRef.getPart(i);
+ if ((fieldPart.size() == 1) && (fieldPart[0] == '$')) {
+ if (*count == 0)
+ *pos = i;
+ (*count)++;
}
- return *count > 0;
}
+ return *count > 0;
+}
-} // namespace fieldchecker
-} // namespace mongo
+} // namespace fieldchecker
+} // namespace mongo
diff --git a/src/mongo/db/ops/field_checker.h b/src/mongo/db/ops/field_checker.h
index 544ea131074..772034d4773 100644
--- a/src/mongo/db/ops/field_checker.h
+++ b/src/mongo/db/ops/field_checker.h
@@ -32,28 +32,28 @@
namespace mongo {
- class FieldRef;
-
- namespace fieldchecker {
-
- /**
- * Returns OK if all the below conditions on 'field' are valid:
- * + Non-empty
- * + Does not start or end with a '.'
- * Otherwise returns a code indicating cause of failure.
- */
- Status isUpdatable(const FieldRef& field);
-
- /**
- * Returns true, the position 'pos' of the first $-sign if present in 'fieldRef', and
- * how many other $-signs were found in 'count'. Otherwise return false.
- *
- * Note:
- * isPositional assumes that the field is updatable. Call isUpdatable() above to
- * verify.
- */
- bool isPositional(const FieldRef& fieldRef, size_t* pos, size_t* count = NULL);
-
- } // namespace fieldchecker
-
-} // namespace mongo
+class FieldRef;
+
+namespace fieldchecker {
+
+/**
+ * Returns OK if all the below conditions on 'field' are valid:
+ * + Non-empty
+ * + Does not start or end with a '.'
+ * Otherwise returns a code indicating cause of failure.
+ */
+Status isUpdatable(const FieldRef& field);
+
+/**
+ * Returns true, the position 'pos' of the first $-sign if present in 'fieldRef', and
+ * how many other $-signs were found in 'count'. Otherwise return false.
+ *
+ * Note:
+ * isPositional assumes that the field is updatable. Call isUpdatable() above to
+ * verify.
+ */
+bool isPositional(const FieldRef& fieldRef, size_t* pos, size_t* count = NULL);
+
+} // namespace fieldchecker
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/field_checker_test.cpp b/src/mongo/db/ops/field_checker_test.cpp
index 470197e5e39..5ee608688e9 100644
--- a/src/mongo/db/ops/field_checker_test.cpp
+++ b/src/mongo/db/ops/field_checker_test.cpp
@@ -35,72 +35,72 @@
namespace {
- using mongo::ErrorCodes;
- using mongo::FieldRef;
- using mongo::fieldchecker::isUpdatable;
- using mongo::fieldchecker::isPositional;
- using mongo::Status;
-
- TEST(IsUpdatable, Basics) {
- FieldRef fieldRef("x");
- ASSERT_OK(isUpdatable(fieldRef));
- }
-
- TEST(IsUpdatable, DottedFields) {
- FieldRef fieldRef("x.y.z");
- ASSERT_OK(isUpdatable(fieldRef));
- }
-
- TEST(IsUpdatable, EmptyFields) {
- FieldRef fieldRef("");
- ASSERT_NOT_OK(isUpdatable(fieldRef));
-
- FieldRef fieldRefDot(".");
- ASSERT_NOT_OK(isUpdatable(fieldRefDot));
-
- /* TODO: Re-enable after review
- FieldRef fieldRefDollar;
- fieldRefDollar.parse("$");
- ASSERT_NOT_OK(isUpdatable(fieldRefDollar));
+using mongo::ErrorCodes;
+using mongo::FieldRef;
+using mongo::fieldchecker::isUpdatable;
+using mongo::fieldchecker::isPositional;
+using mongo::Status;
+
+TEST(IsUpdatable, Basics) {
+ FieldRef fieldRef("x");
+ ASSERT_OK(isUpdatable(fieldRef));
+}
+
+TEST(IsUpdatable, DottedFields) {
+ FieldRef fieldRef("x.y.z");
+ ASSERT_OK(isUpdatable(fieldRef));
+}
+
+TEST(IsUpdatable, EmptyFields) {
+ FieldRef fieldRef("");
+ ASSERT_NOT_OK(isUpdatable(fieldRef));
+
+ FieldRef fieldRefDot(".");
+ ASSERT_NOT_OK(isUpdatable(fieldRefDot));
+
+ /* TODO: Re-enable after review
+ FieldRef fieldRefDollar;
+ fieldRefDollar.parse("$");
+ ASSERT_NOT_OK(isUpdatable(fieldRefDollar));
*/
- FieldRef fieldRefADot("a.");
- ASSERT_NOT_OK(isUpdatable(fieldRefADot));
-
- FieldRef fieldRefDotB(".b");
- ASSERT_NOT_OK(isUpdatable(fieldRefDotB));
-
- FieldRef fieldRefEmptyMiddle;
- fieldRefEmptyMiddle.parse("a..b");
- ASSERT_NOT_OK(isUpdatable(fieldRefEmptyMiddle));
- }
-
- // Positional checks
- TEST(isPositional, EntireArrayItem) {
- FieldRef fieldRefPositional("a.$");
- size_t pos;
- size_t count;
- ASSERT_TRUE(isPositional(fieldRefPositional, &pos, &count));
- ASSERT_EQUALS(pos, 1u);
- ASSERT_EQUALS(count, 1u);
- }
-
- TEST(isPositional, ArraySubObject) {
- FieldRef fieldRefPositional("a.$.b");
- size_t pos;
- size_t count;
- ASSERT_TRUE(isPositional(fieldRefPositional, &pos, &count));
- ASSERT_EQUALS(pos, 1u);
- ASSERT_EQUALS(count, 1u);
- }
-
- TEST(isPositional, MultiplePositional) {
- FieldRef fieldRefPositional("a.$.b.$.c");
- size_t pos;
- size_t count;
- ASSERT_TRUE(isPositional(fieldRefPositional, &pos, &count));
- ASSERT_EQUALS(pos, 1u);
- ASSERT_EQUALS(count, 2u);
- }
-} // unnamed namespace
+ FieldRef fieldRefADot("a.");
+ ASSERT_NOT_OK(isUpdatable(fieldRefADot));
+
+ FieldRef fieldRefDotB(".b");
+ ASSERT_NOT_OK(isUpdatable(fieldRefDotB));
+
+ FieldRef fieldRefEmptyMiddle;
+ fieldRefEmptyMiddle.parse("a..b");
+ ASSERT_NOT_OK(isUpdatable(fieldRefEmptyMiddle));
+}
+
+// Positional checks
+TEST(isPositional, EntireArrayItem) {
+ FieldRef fieldRefPositional("a.$");
+ size_t pos;
+ size_t count;
+ ASSERT_TRUE(isPositional(fieldRefPositional, &pos, &count));
+ ASSERT_EQUALS(pos, 1u);
+ ASSERT_EQUALS(count, 1u);
+}
+
+TEST(isPositional, ArraySubObject) {
+ FieldRef fieldRefPositional("a.$.b");
+ size_t pos;
+ size_t count;
+ ASSERT_TRUE(isPositional(fieldRefPositional, &pos, &count));
+ ASSERT_EQUALS(pos, 1u);
+ ASSERT_EQUALS(count, 1u);
+}
+
+TEST(isPositional, MultiplePositional) {
+ FieldRef fieldRefPositional("a.$.b.$.c");
+ size_t pos;
+ size_t count;
+ ASSERT_TRUE(isPositional(fieldRefPositional, &pos, &count));
+ ASSERT_EQUALS(pos, 1u);
+ ASSERT_EQUALS(count, 2u);
+}
+} // unnamed namespace
diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp
index 38a2a478043..827bdc69c39 100644
--- a/src/mongo/db/ops/insert.cpp
+++ b/src/mongo/db/ops/insert.cpp
@@ -34,174 +34,174 @@
namespace mongo {
- using std::string;
-
- using namespace mongoutils;
-
- StatusWith<BSONObj> fixDocumentForInsert( const BSONObj& doc ) {
- if ( doc.objsize() > BSONObjMaxUserSize )
- return StatusWith<BSONObj>( ErrorCodes::BadValue,
- str::stream()
- << "object to insert too large"
- << ". size in bytes: " << doc.objsize()
- << ", max size: " << BSONObjMaxUserSize );
-
- bool firstElementIsId = doc.firstElement().fieldNameStringData() == "_id";
- bool hasTimestampToFix = false;
- {
- BSONObjIterator i( doc );
- while ( i.more() ) {
- BSONElement e = i.next();
-
- if ( e.type() == bsonTimestamp && e.timestampValue() == 0 ) {
- // we replace Timestamp(0,0) at the top level with a correct value
- // in the fast pass, we just mark that we want to swap
- hasTimestampToFix = true;
- }
-
- const char* fieldName = e.fieldName();
-
- if ( fieldName[0] == '$' ) {
- return StatusWith<BSONObj>( ErrorCodes::BadValue,
- str::stream()
- << "Document can't have $ prefixed field names: "
- << e.fieldName() );
- }
-
- // check no regexp for _id (SERVER-9502)
- // also, disallow undefined and arrays
- if ( str::equals( fieldName, "_id") ) {
- if ( e.type() == RegEx ) {
- return StatusWith<BSONObj>( ErrorCodes::BadValue,
- "can't use a regex for _id" );
- }
- if ( e.type() == Undefined ) {
- return StatusWith<BSONObj>( ErrorCodes::BadValue,
- "can't use a undefined for _id" );
- }
- if ( e.type() == Array ) {
- return StatusWith<BSONObj>( ErrorCodes::BadValue,
- "can't use an array for _id" );
- }
- if ( e.type() == Object ) {
- BSONObj o = e.Obj();
- Status s = o.storageValidEmbedded();
- if ( !s.isOK() )
- return StatusWith<BSONObj>( s );
- }
- }
+using std::string;
+
+using namespace mongoutils;
+
+StatusWith<BSONObj> fixDocumentForInsert(const BSONObj& doc) {
+ if (doc.objsize() > BSONObjMaxUserSize)
+ return StatusWith<BSONObj>(ErrorCodes::BadValue,
+ str::stream() << "object to insert too large"
+ << ". size in bytes: " << doc.objsize()
+ << ", max size: " << BSONObjMaxUserSize);
+
+ bool firstElementIsId = doc.firstElement().fieldNameStringData() == "_id";
+ bool hasTimestampToFix = false;
+ {
+ BSONObjIterator i(doc);
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() == bsonTimestamp && e.timestampValue() == 0) {
+ // we replace Timestamp(0,0) at the top level with a correct value
+ // in the fast pass, we just mark that we want to swap
+ hasTimestampToFix = true;
}
- }
-
- if ( firstElementIsId && !hasTimestampToFix )
- return StatusWith<BSONObj>( BSONObj() );
- bool hadId = firstElementIsId;
+ const char* fieldName = e.fieldName();
- BSONObjIterator i( doc );
-
- BSONObjBuilder b( doc.objsize() + 16 );
- if ( firstElementIsId ) {
- b.append( doc.firstElement() );
- i.next();
- }
- else {
- BSONElement e = doc["_id"];
- if ( e.type() ) {
- b.append( e );
- hadId = true;
+ if (fieldName[0] == '$') {
+ return StatusWith<BSONObj>(ErrorCodes::BadValue,
+ str::stream()
+ << "Document can't have $ prefixed field names: "
+ << e.fieldName());
}
- else {
- b.appendOID( "_id", NULL, true );
- }
- }
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( hadId && e.fieldNameStringData() == "_id" ) {
- // no-op
- }
- else if ( e.type() == bsonTimestamp && e.timestampValue() == 0 ) {
- b.append( e.fieldName(), getNextGlobalTimestamp() );
- }
- else {
- b.append( e );
+ // check no regexp for _id (SERVER-9502)
+ // also, disallow undefined and arrays
+ if (str::equals(fieldName, "_id")) {
+ if (e.type() == RegEx) {
+ return StatusWith<BSONObj>(ErrorCodes::BadValue, "can't use a regex for _id");
+ }
+ if (e.type() == Undefined) {
+ return StatusWith<BSONObj>(ErrorCodes::BadValue,
+ "can't use a undefined for _id");
+ }
+ if (e.type() == Array) {
+ return StatusWith<BSONObj>(ErrorCodes::BadValue, "can't use an array for _id");
+ }
+ if (e.type() == Object) {
+ BSONObj o = e.Obj();
+ Status s = o.storageValidEmbedded();
+ if (!s.isOK())
+ return StatusWith<BSONObj>(s);
+ }
}
}
- return StatusWith<BSONObj>( b.obj() );
}
- Status userAllowedWriteNS( StringData ns ) {
- return userAllowedWriteNS( nsToDatabaseSubstring( ns ), nsToCollectionSubstring( ns ) );
- }
+ if (firstElementIsId && !hasTimestampToFix)
+ return StatusWith<BSONObj>(BSONObj());
- Status userAllowedWriteNS( const NamespaceString& ns ) {
- return userAllowedWriteNS( ns.db(), ns.coll() );
- }
+ bool hadId = firstElementIsId;
+
+ BSONObjIterator i(doc);
- Status userAllowedWriteNS( StringData db, StringData coll ) {
- if ( coll == "system.profile" ) {
- return Status( ErrorCodes::BadValue,
- str::stream() << "cannot write to '" << db << ".system.profile'" );
+ BSONObjBuilder b(doc.objsize() + 16);
+ if (firstElementIsId) {
+ b.append(doc.firstElement());
+ i.next();
+ } else {
+ BSONElement e = doc["_id"];
+ if (e.type()) {
+ b.append(e);
+ hadId = true;
+ } else {
+ b.appendOID("_id", NULL, true);
}
- return userAllowedCreateNS( db, coll );
}
- Status userAllowedCreateNS( StringData db, StringData coll ) {
- // validity checking
-
- if ( db.size() == 0 )
- return Status( ErrorCodes::BadValue, "db cannot be blank" );
-
- if ( !NamespaceString::validDBName( db ) )
- return Status( ErrorCodes::BadValue, "invalid db name" );
-
- if ( coll.size() == 0 )
- return Status( ErrorCodes::BadValue, "collection cannot be blank" );
-
- if ( !NamespaceString::validCollectionName( coll ) )
- return Status( ErrorCodes::BadValue, "invalid collection name" );
-
- if ( db.size() + 1 /* dot */ + coll.size() > NamespaceString::MaxNsCollectionLen )
- return Status( ErrorCodes::BadValue,
- str::stream()
- << "fully qualified namespace " << db << '.' << coll << " is too long "
- << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)" );
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (hadId && e.fieldNameStringData() == "_id") {
+ // no-op
+ } else if (e.type() == bsonTimestamp && e.timestampValue() == 0) {
+ b.append(e.fieldName(), getNextGlobalTimestamp());
+ } else {
+ b.append(e);
+ }
+ }
+ return StatusWith<BSONObj>(b.obj());
+}
- // check spceial areas
+Status userAllowedWriteNS(StringData ns) {
+ return userAllowedWriteNS(nsToDatabaseSubstring(ns), nsToCollectionSubstring(ns));
+}
- if ( db == "system" )
- return Status( ErrorCodes::BadValue, "cannot use 'system' database" );
+Status userAllowedWriteNS(const NamespaceString& ns) {
+ return userAllowedWriteNS(ns.db(), ns.coll());
+}
+Status userAllowedWriteNS(StringData db, StringData coll) {
+ if (coll == "system.profile") {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "cannot write to '" << db << ".system.profile'");
+ }
+ return userAllowedCreateNS(db, coll);
+}
- if ( coll.startsWith( "system." ) ) {
- if ( coll == "system.indexes" ) return Status::OK();
- if ( coll == "system.js" ) return Status::OK();
- if ( coll == "system.profile" ) return Status::OK();
- if ( coll == "system.users" ) return Status::OK();
- if ( db == "admin" ) {
- if ( coll == "system.version" ) return Status::OK();
- if ( coll == "system.roles" ) return Status::OK();
- if ( coll == "system.new_users" ) return Status::OK();
- if ( coll == "system.backup_users" ) return Status::OK();
- }
- if ( db == "local" ) {
- if ( coll == "system.replset" ) return Status::OK();
- }
- return Status( ErrorCodes::BadValue,
- str::stream() << "cannot write to '" << db << "." << coll << "'" );
+Status userAllowedCreateNS(StringData db, StringData coll) {
+ // validity checking
+
+ if (db.size() == 0)
+ return Status(ErrorCodes::BadValue, "db cannot be blank");
+
+ if (!NamespaceString::validDBName(db))
+ return Status(ErrorCodes::BadValue, "invalid db name");
+
+ if (coll.size() == 0)
+ return Status(ErrorCodes::BadValue, "collection cannot be blank");
+
+ if (!NamespaceString::validCollectionName(coll))
+ return Status(ErrorCodes::BadValue, "invalid collection name");
+
+ if (db.size() + 1 /* dot */ + coll.size() > NamespaceString::MaxNsCollectionLen)
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "fully qualified namespace " << db << '.' << coll << " is too long "
+ << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)");
+
+ // check spceial areas
+
+ if (db == "system")
+ return Status(ErrorCodes::BadValue, "cannot use 'system' database");
+
+
+ if (coll.startsWith("system.")) {
+ if (coll == "system.indexes")
+ return Status::OK();
+ if (coll == "system.js")
+ return Status::OK();
+ if (coll == "system.profile")
+ return Status::OK();
+ if (coll == "system.users")
+ return Status::OK();
+ if (db == "admin") {
+ if (coll == "system.version")
+ return Status::OK();
+ if (coll == "system.roles")
+ return Status::OK();
+ if (coll == "system.new_users")
+ return Status::OK();
+ if (coll == "system.backup_users")
+ return Status::OK();
}
-
- // some special rules
-
- if ( coll.find( ".system." ) != string::npos ) {
- // this matches old (2.4 and older) behavior, but I'm not sure its a good idea
- return Status( ErrorCodes::BadValue,
- str::stream() << "cannot write to '" << db << "." << coll << "'" );
+ if (db == "local") {
+ if (coll == "system.replset")
+ return Status::OK();
}
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "cannot write to '" << db << "." << coll << "'");
+ }
- return Status::OK();
+ // some special rules
+
+ if (coll.find(".system.") != string::npos) {
+ // this matches old (2.4 and older) behavior, but I'm not sure its a good idea
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "cannot write to '" << db << "." << coll << "'");
}
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/ops/insert.h b/src/mongo/db/ops/insert.h
index f55f8e80ea6..e0204c290c7 100644
--- a/src/mongo/db/ops/insert.h
+++ b/src/mongo/db/ops/insert.h
@@ -33,25 +33,24 @@
namespace mongo {
- /**
- * if doc is ok, then return is BSONObj()
- * otherwise, BSONObj is what should be inserted instead
- */
- StatusWith<BSONObj> fixDocumentForInsert( const BSONObj& doc );
-
+/**
+ * if doc is ok, then return is BSONObj()
+ * otherwise, BSONObj is what should be inserted instead
+ */
+StatusWith<BSONObj> fixDocumentForInsert(const BSONObj& doc);
- /**
- * Returns Status::OK() if this namespace is valid for user write operations. If not, returns
- * an error Status.
- */
- Status userAllowedWriteNS( StringData db, StringData coll );
- Status userAllowedWriteNS( StringData ns );
- Status userAllowedWriteNS( const NamespaceString& ns );
- /**
- * Returns Status::OK() if the namespace described by (db, coll) is valid for user create
- * operations. If not, returns an error Status.
- */
- Status userAllowedCreateNS( StringData db, StringData coll );
+/**
+ * Returns Status::OK() if this namespace is valid for user write operations. If not, returns
+ * an error Status.
+ */
+Status userAllowedWriteNS(StringData db, StringData coll);
+Status userAllowedWriteNS(StringData ns);
+Status userAllowedWriteNS(const NamespaceString& ns);
+/**
+ * Returns Status::OK() if the namespace described by (db, coll) is valid for user create
+ * operations. If not, returns an error Status.
+ */
+Status userAllowedCreateNS(StringData db, StringData coll);
}
diff --git a/src/mongo/db/ops/log_builder.cpp b/src/mongo/db/ops/log_builder.cpp
index 437c9056e5e..21baffe246c 100644
--- a/src/mongo/db/ops/log_builder.cpp
+++ b/src/mongo/db/ops/log_builder.cpp
@@ -31,143 +31,128 @@
namespace mongo {
- using mutablebson::Document;
- using mutablebson::Element;
- namespace str = mongoutils::str;
-
- namespace {
- const char kSet[] = "$set";
- const char kUnset[] = "$unset";
- } // namespace
-
- inline Status LogBuilder::addToSection(Element newElt,
- Element* section,
- const char* sectionName) {
-
- // If we don't already have this section, try to create it now.
- if (!section->ok()) {
-
- // If we already have object replacement data, we can't also have section entries.
- if (hasObjectReplacement())
- return Status(
- ErrorCodes::IllegalOperation,
- "LogBuilder: Invalid attempt to add a $set/$unset entry"
- "to a log with an existing object replacement");
-
- Document& doc = _logRoot.getDocument();
-
- // We should not already have an element with the section name under the root.
- dassert(_logRoot[sectionName] == doc.end());
-
- // Construct a new object element to represent this section in the log.
- const Element newElement = doc.makeElementObject(sectionName);
- if (!newElement.ok())
- return Status(ErrorCodes::InternalError,
- "LogBuilder: failed to construct Object Element for $set/$unset");
-
- // Enqueue the new section under the root, and record it as our out parameter.
- Status result = _logRoot.pushBack(newElement);
- if (!result.isOK())
- return result;
- *section = newElement;
-
- // Invalidate attempts to add an object replacement, now that we have a named
- // section under the root.
- _objectReplacementAccumulator = doc.end();
- }
-
- // Whatever transpired, we should now have an ok accumulator for the section, and not
- // have a replacement accumulator.
- dassert(section->ok());
- dassert(!_objectReplacementAccumulator.ok());
-
- // Enqueue the provided element to the section and propagate the result.
- return section->pushBack(newElt);
- }
-
- Status LogBuilder::addToSets(Element elt) {
- return addToSection(elt, &_setAccumulator, kSet);
- }
+using mutablebson::Document;
+using mutablebson::Element;
+namespace str = mongoutils::str;
+
+namespace {
+const char kSet[] = "$set";
+const char kUnset[] = "$unset";
+} // namespace
+
+inline Status LogBuilder::addToSection(Element newElt, Element* section, const char* sectionName) {
+ // If we don't already have this section, try to create it now.
+ if (!section->ok()) {
+ // If we already have object replacement data, we can't also have section entries.
+ if (hasObjectReplacement())
+ return Status(ErrorCodes::IllegalOperation,
+ "LogBuilder: Invalid attempt to add a $set/$unset entry"
+ "to a log with an existing object replacement");
- Status LogBuilder::addToSetsWithNewFieldName(StringData name,
- const mutablebson::Element val) {
- mutablebson::Element elemToSet =
- _logRoot.getDocument().makeElementWithNewFieldName(name, val);
- if (!elemToSet.ok())
- return Status(ErrorCodes::InternalError,
- str::stream() << "Could not create new '"
- << name << "' element from existing element '"
- << val.getFieldName() << "' of type "
- << typeName(val.getType()));
+ Document& doc = _logRoot.getDocument();
- return addToSets(elemToSet);
- }
+ // We should not already have an element with the section name under the root.
+ dassert(_logRoot[sectionName] == doc.end());
- Status LogBuilder::addToSetsWithNewFieldName(StringData name,
- const BSONElement& val){
- mutablebson::Element elemToSet =
- _logRoot.getDocument().makeElementWithNewFieldName(name, val);
- if (!elemToSet.ok())
+ // Construct a new object element to represent this section in the log.
+ const Element newElement = doc.makeElementObject(sectionName);
+ if (!newElement.ok())
return Status(ErrorCodes::InternalError,
- str::stream() << "Could not create new '"
- << name << "' element from existing element '"
- << val.fieldName() << "' of type "
- << typeName(val.type()));
+ "LogBuilder: failed to construct Object Element for $set/$unset");
- return addToSets(elemToSet);
- }
+ // Enqueue the new section under the root, and record it as our out parameter.
+ Status result = _logRoot.pushBack(newElement);
+ if (!result.isOK())
+ return result;
+ *section = newElement;
- Status LogBuilder::addToSets(StringData name, const SafeNum& val){
- mutablebson::Element elemToSet = _logRoot.getDocument().makeElementSafeNum(name, val);
- if (!elemToSet.ok())
- return Status(ErrorCodes::InternalError,
- str::stream() << "Could not create new '"
- << name << "' SafeNum from "
- << val.debugString());
-
- return addToSets(elemToSet);
+ // Invalidate attempts to add an object replacement, now that we have a named
+ // section under the root.
+ _objectReplacementAccumulator = doc.end();
}
- Status LogBuilder::addToUnsets(StringData path) {
- mutablebson::Element logElement = _logRoot.getDocument().makeElementBool(path, true);
- if (!logElement.ok())
- return Status(ErrorCodes::InternalError,
- str::stream() << "Cannot create $unset oplog entry for path" << path);
-
- return addToSection(logElement, &_unsetAccumulator, kUnset);
+ // Whatever transpired, we should now have an ok accumulator for the section, and not
+ // have a replacement accumulator.
+ dassert(section->ok());
+ dassert(!_objectReplacementAccumulator.ok());
+
+ // Enqueue the provided element to the section and propagate the result.
+ return section->pushBack(newElt);
+}
+
+Status LogBuilder::addToSets(Element elt) {
+ return addToSection(elt, &_setAccumulator, kSet);
+}
+
+Status LogBuilder::addToSetsWithNewFieldName(StringData name, const mutablebson::Element val) {
+ mutablebson::Element elemToSet = _logRoot.getDocument().makeElementWithNewFieldName(name, val);
+ if (!elemToSet.ok())
+ return Status(ErrorCodes::InternalError,
+ str::stream() << "Could not create new '" << name
+ << "' element from existing element '" << val.getFieldName()
+ << "' of type " << typeName(val.getType()));
+
+ return addToSets(elemToSet);
+}
+
+Status LogBuilder::addToSetsWithNewFieldName(StringData name, const BSONElement& val) {
+ mutablebson::Element elemToSet = _logRoot.getDocument().makeElementWithNewFieldName(name, val);
+ if (!elemToSet.ok())
+ return Status(ErrorCodes::InternalError,
+ str::stream() << "Could not create new '" << name
+ << "' element from existing element '" << val.fieldName()
+ << "' of type " << typeName(val.type()));
+
+ return addToSets(elemToSet);
+}
+
+Status LogBuilder::addToSets(StringData name, const SafeNum& val) {
+ mutablebson::Element elemToSet = _logRoot.getDocument().makeElementSafeNum(name, val);
+ if (!elemToSet.ok())
+ return Status(ErrorCodes::InternalError,
+ str::stream() << "Could not create new '" << name << "' SafeNum from "
+ << val.debugString());
+
+ return addToSets(elemToSet);
+}
+
+Status LogBuilder::addToUnsets(StringData path) {
+ mutablebson::Element logElement = _logRoot.getDocument().makeElementBool(path, true);
+ if (!logElement.ok())
+ return Status(ErrorCodes::InternalError,
+ str::stream() << "Cannot create $unset oplog entry for path" << path);
+
+ return addToSection(logElement, &_unsetAccumulator, kUnset);
+}
+
+Status LogBuilder::getReplacementObject(Element* outElt) {
+ // If the replacement accumulator is not ok, we must have started a $set or $unset
+ // already, so an object replacement is not permitted.
+ if (!_objectReplacementAccumulator.ok()) {
+ dassert(_setAccumulator.ok() || _unsetAccumulator.ok());
+ return Status(ErrorCodes::IllegalOperation,
+ "LogBuilder: Invalid attempt to obtain the object replacement slot "
+ "for a log containing $set or $unset entries");
}
- Status LogBuilder::getReplacementObject(Element* outElt) {
+ if (hasObjectReplacement())
+ return Status(ErrorCodes::IllegalOperation,
+ "LogBuilder: Invalid attempt to acquire the replacement object "
+ "in a log with existing object replacement data");
- // If the replacement accumulator is not ok, we must have started a $set or $unset
- // already, so an object replacement is not permitted.
- if (!_objectReplacementAccumulator.ok()) {
- dassert(_setAccumulator.ok() || _unsetAccumulator.ok());
- return Status(
- ErrorCodes::IllegalOperation,
- "LogBuilder: Invalid attempt to obtain the object replacement slot "
- "for a log containing $set or $unset entries");
- }
+ // OK to enqueue object replacement items.
+ *outElt = _objectReplacementAccumulator;
+ return Status::OK();
+}
- if (hasObjectReplacement())
- return Status(
- ErrorCodes::IllegalOperation,
- "LogBuilder: Invalid attempt to acquire the replacement object "
- "in a log with existing object replacement data");
-
- // OK to enqueue object replacement items.
- *outElt = _objectReplacementAccumulator;
- return Status::OK();
- }
-
- inline bool LogBuilder::hasObjectReplacement() const {
- if (!_objectReplacementAccumulator.ok())
- return false;
+inline bool LogBuilder::hasObjectReplacement() const {
+ if (!_objectReplacementAccumulator.ok())
+ return false;
- dassert(!_setAccumulator.ok());
- dassert(!_unsetAccumulator.ok());
+ dassert(!_setAccumulator.ok());
+ dassert(!_unsetAccumulator.ok());
- return _objectReplacementAccumulator.hasChildren();
- }
+ return _objectReplacementAccumulator.hasChildren();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/log_builder.h b/src/mongo/db/ops/log_builder.h
index d35f48846f4..945ba18cf8f 100644
--- a/src/mongo/db/ops/log_builder.h
+++ b/src/mongo/db/ops/log_builder.h
@@ -33,90 +33,89 @@
namespace mongo {
- /** LogBuilder abstracts away some of the details of producing a properly constructed oplog
- * update entry. It manages separate regions into which it accumulates $set and $unset
- * operations, and distinguishes object replacement style oplog generation from
- * $set/$unset style generation and prevents admixture.
+/** LogBuilder abstracts away some of the details of producing a properly constructed oplog
+ * update entry. It manages separate regions into which it accumulates $set and $unset
+ * operations, and distinguishes object replacement style oplog generation from
+ * $set/$unset style generation and prevents admixture.
+ */
+class LogBuilder {
+public:
+ /** Construct a new LogBuilder. Log entries will be recorded as new children under the
+ * 'logRoot' Element, which must be of type mongo::Object and have no children.
*/
- class LogBuilder {
- public:
- /** Construct a new LogBuilder. Log entries will be recorded as new children under the
- * 'logRoot' Element, which must be of type mongo::Object and have no children.
- */
- inline LogBuilder(mutablebson::Element logRoot)
- : _logRoot(logRoot)
- , _objectReplacementAccumulator(_logRoot)
- , _setAccumulator(_logRoot.getDocument().end())
- , _unsetAccumulator(_setAccumulator) {
- dassert(logRoot.isType(mongo::Object));
- dassert(!logRoot.hasChildren());
- }
+ inline LogBuilder(mutablebson::Element logRoot)
+ : _logRoot(logRoot),
+ _objectReplacementAccumulator(_logRoot),
+ _setAccumulator(_logRoot.getDocument().end()),
+ _unsetAccumulator(_setAccumulator) {
+ dassert(logRoot.isType(mongo::Object));
+ dassert(!logRoot.hasChildren());
+ }
- /** Return the Document to which the logging root belongs. */
- inline mutablebson::Document& getDocument() {
- return _logRoot.getDocument();
- }
+ /** Return the Document to which the logging root belongs. */
+ inline mutablebson::Document& getDocument() {
+ return _logRoot.getDocument();
+ }
- /** Add the given Element as a new entry in the '$set' section of the log. If a $set
- * section does not yet exist, it will be created. If this LogBuilder is currently
- * configured to contain an object replacement, the request to add to the $set section
- * will return an Error.
- */
- Status addToSets(mutablebson::Element elt);
+ /** Add the given Element as a new entry in the '$set' section of the log. If a $set
+ * section does not yet exist, it will be created. If this LogBuilder is currently
+ * configured to contain an object replacement, the request to add to the $set section
+ * will return an Error.
+ */
+ Status addToSets(mutablebson::Element elt);
- /**
- * Convenience method which calls addToSets after
- * creating a new Element to wrap the SafeNum value.
- *
- * If any problem occurs then the operation will stop and return that error Status.
- */
- Status addToSets(StringData name, const SafeNum& val);
+ /**
+ * Convenience method which calls addToSets after
+ * creating a new Element to wrap the SafeNum value.
+ *
+ * If any problem occurs then the operation will stop and return that error Status.
+ */
+ Status addToSets(StringData name, const SafeNum& val);
- /**
- * Convenience method which calls addToSets after
- * creating a new Element to wrap the old one.
- *
- * If any problem occurs then the operation will stop and return that error Status.
- */
- Status addToSetsWithNewFieldName(StringData name, const mutablebson::Element val);
+ /**
+ * Convenience method which calls addToSets after
+ * creating a new Element to wrap the old one.
+ *
+ * If any problem occurs then the operation will stop and return that error Status.
+ */
+ Status addToSetsWithNewFieldName(StringData name, const mutablebson::Element val);
- /**
- * Convenience method which calls addToSets after
- * creating a new Element to wrap the old one.
- *
- * If any problem occurs then the operation will stop and return that error Status.
- */
- Status addToSetsWithNewFieldName(StringData name, const BSONElement& val);
+ /**
+ * Convenience method which calls addToSets after
+ * creating a new Element to wrap the old one.
+ *
+ * If any problem occurs then the operation will stop and return that error Status.
+ */
+ Status addToSetsWithNewFieldName(StringData name, const BSONElement& val);
- /** Add the given path as a new entry in the '$unset' section of the log. If an
- * '$unset' section does not yet exist, it will be created. If this LogBuilder is
- * currently configured to contain an object replacement, the request to add to the
- * $unset section will return an Error.
- */
- Status addToUnsets(StringData path);
+ /** Add the given path as a new entry in the '$unset' section of the log. If an
+ * '$unset' section does not yet exist, it will be created. If this LogBuilder is
+ * currently configured to contain an object replacement, the request to add to the
+ * $unset section will return an Error.
+ */
+ Status addToUnsets(StringData path);
- /** Obtain, via the out parameter 'outElt', a pointer to the mongo::Object type Element
- * to which the components of an object replacement should be recorded. It is an error
- * to call this if any Elements have been added by calling either addToSets or
- * addToUnsets, and attempts to do so will return a non-OK Status. Similarly, if there
- * is already object replacement data recorded for this log, the call will fail.
- */
- Status getReplacementObject(mutablebson::Element* outElt);
+ /** Obtain, via the out parameter 'outElt', a pointer to the mongo::Object type Element
+ * to which the components of an object replacement should be recorded. It is an error
+ * to call this if any Elements have been added by calling either addToSets or
+ * addToUnsets, and attempts to do so will return a non-OK Status. Similarly, if there
+ * is already object replacement data recorded for this log, the call will fail.
+ */
+ Status getReplacementObject(mutablebson::Element* outElt);
- private:
- // Returns true if the object replacement accumulator is valid and has children, false
- // otherwise.
- inline bool hasObjectReplacement() const;
+private:
+ // Returns true if the object replacement accumulator is valid and has children, false
+ // otherwise.
+ inline bool hasObjectReplacement() const;
- inline Status addToSection(
- mutablebson::Element newElt,
- mutablebson::Element* section,
- const char* sectionName);
+ inline Status addToSection(mutablebson::Element newElt,
+ mutablebson::Element* section,
+ const char* sectionName);
- mutablebson::Element _logRoot;
- mutablebson::Element _objectReplacementAccumulator;
- mutablebson::Element _setAccumulator;
- mutablebson::Element _unsetAccumulator;
- };
+ mutablebson::Element _logRoot;
+ mutablebson::Element _objectReplacementAccumulator;
+ mutablebson::Element _setAccumulator;
+ mutablebson::Element _unsetAccumulator;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/log_builder_test.cpp b/src/mongo/db/ops/log_builder_test.cpp
index 253dd70e5a1..f2a3d20aa78 100644
--- a/src/mongo/db/ops/log_builder_test.cpp
+++ b/src/mongo/db/ops/log_builder_test.cpp
@@ -36,243 +36,240 @@
namespace {
- namespace mmb = mongo::mutablebson;
- using mongo::LogBuilder;
+namespace mmb = mongo::mutablebson;
+using mongo::LogBuilder;
- TEST(LogBuilder, Initialization) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
- ASSERT_EQUALS(&doc, &lb.getDocument());
- }
+TEST(LogBuilder, Initialization) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+ ASSERT_EQUALS(&doc, &lb.getDocument());
+}
- TEST(LogBuilder, AddOneToSet) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
+TEST(LogBuilder, AddOneToSet) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
- const mmb::Element elt_ab = doc.makeElementInt("a.b", 1);
- ASSERT_TRUE(elt_ab.ok());
- ASSERT_OK(lb.addToSets(elt_ab));
+ const mmb::Element elt_ab = doc.makeElementInt("a.b", 1);
+ ASSERT_TRUE(elt_ab.ok());
+ ASSERT_OK(lb.addToSets(elt_ab));
- ASSERT_EQUALS(mongo::fromjson("{ $set : { 'a.b' : 1 } }"), doc);
- }
+ ASSERT_EQUALS(mongo::fromjson("{ $set : { 'a.b' : 1 } }"), doc);
+}
- TEST(LogBuilder, AddElementToSet) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
+TEST(LogBuilder, AddElementToSet) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
- const mmb::Element elt_ab = doc.makeElementInt("", 1);
- ASSERT_TRUE(elt_ab.ok());
- ASSERT_OK(lb.addToSetsWithNewFieldName("a.b", elt_ab));
+ const mmb::Element elt_ab = doc.makeElementInt("", 1);
+ ASSERT_TRUE(elt_ab.ok());
+ ASSERT_OK(lb.addToSetsWithNewFieldName("a.b", elt_ab));
- ASSERT_EQUALS(mongo::fromjson("{ $set : { 'a.b' : 1 } }"), doc);
- }
+ ASSERT_EQUALS(mongo::fromjson("{ $set : { 'a.b' : 1 } }"), doc);
+}
- TEST(LogBuilder, AddBSONElementToSet) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
+TEST(LogBuilder, AddBSONElementToSet) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
- mongo::BSONObj obj = mongo::fromjson("{'':1}");
-
- ASSERT_OK(lb.addToSetsWithNewFieldName("a.b", obj.firstElement()));
+ mongo::BSONObj obj = mongo::fromjson("{'':1}");
- ASSERT_EQUALS(mongo::fromjson("{ $set : { 'a.b' : 1 } }"), doc);
- }
+ ASSERT_OK(lb.addToSetsWithNewFieldName("a.b", obj.firstElement()));
- TEST(LogBuilder, AddSafeNumToSet) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
+ ASSERT_EQUALS(mongo::fromjson("{ $set : { 'a.b' : 1 } }"), doc);
+}
- mongo::BSONObj obj = mongo::fromjson("{'':1}");
-
- ASSERT_OK(lb.addToSets("a.b", mongo::SafeNum(1)));
-
- ASSERT_EQUALS(mongo::fromjson("{ $set : { 'a.b' : 1 } }"), doc);
- }
-
- TEST(LogBuilder, AddOneToUnset) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
- ASSERT_OK(lb.addToUnsets("x.y"));
- ASSERT_EQUALS(mongo::fromjson("{ $unset : { 'x.y' : true } }"), doc);
- }
-
- TEST(LogBuilder, AddOneToEach) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
-
- const mmb::Element elt_ab = doc.makeElementInt("a.b", 1);
- ASSERT_TRUE(elt_ab.ok());
- ASSERT_OK(lb.addToSets(elt_ab));
-
- ASSERT_OK(lb.addToUnsets("x.y"));
-
- ASSERT_EQUALS(
- mongo::fromjson(
- "{ "
- " $set : { 'a.b' : 1 }, "
- " $unset : { 'x.y' : true } "
- "}"
- ), doc);
- }
-
- TEST(LogBuilder, AddOneObjectReplacementEntry) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
-
- mmb::Element replacement = doc.end();
- ASSERT_FALSE(replacement.ok());
- ASSERT_OK(lb.getReplacementObject(&replacement));
- ASSERT_TRUE(replacement.ok());
- ASSERT_TRUE(replacement.isType(mongo::Object));
-
- const mmb::Element elt_a = doc.makeElementInt("a", 1);
- ASSERT_TRUE(elt_a.ok());
- ASSERT_OK(replacement.pushBack(elt_a));
-
- ASSERT_EQUALS(mongo::fromjson("{ a : 1 }"), doc);
- }
-
- TEST(LogBuilder, AddTwoObjectReplacementEntry) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
-
- mmb::Element replacement = doc.end();
- ASSERT_FALSE(replacement.ok());
- ASSERT_OK(lb.getReplacementObject(&replacement));
- ASSERT_TRUE(replacement.ok());
- ASSERT_TRUE(replacement.isType(mongo::Object));
-
- const mmb::Element elt_a = doc.makeElementInt("a", 1);
- ASSERT_TRUE(elt_a.ok());
- ASSERT_OK(replacement.pushBack(elt_a));
-
- const mmb::Element elt_b = doc.makeElementInt("b", 2);
- ASSERT_TRUE(elt_b.ok());
- ASSERT_OK(replacement.pushBack(elt_b));
-
- ASSERT_EQUALS(mongo::fromjson("{ a : 1, b: 2 }"), doc);
- }
-
- TEST(LogBuilder, VerifySetsAreGrouped) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
-
- const mmb::Element elt_ab = doc.makeElementInt("a.b", 1);
- ASSERT_TRUE(elt_ab.ok());
- ASSERT_OK(lb.addToSets(elt_ab));
-
- const mmb::Element elt_xy = doc.makeElementInt("x.y", 1);
- ASSERT_TRUE(elt_xy.ok());
- ASSERT_OK(lb.addToSets(elt_xy));
-
- ASSERT_EQUALS(
- mongo::fromjson(
- "{ $set : {"
- " 'a.b' : 1, "
- " 'x.y' : 1 "
- "} }"
- ), doc);
- }
-
- TEST(LogBuilder, VerifyUnsetsAreGrouped) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
-
- ASSERT_OK(lb.addToUnsets("a.b"));
- ASSERT_OK(lb.addToUnsets("x.y"));
-
- ASSERT_EQUALS(
- mongo::fromjson(
- "{ $unset : {"
- " 'a.b' : true, "
- " 'x.y' : true "
- "} }"
- ), doc);
- }
-
- TEST(LogBuilder, PresenceOfSetPreventsObjectReplacement) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
-
- mmb::Element replacement = doc.end();
- ASSERT_FALSE(replacement.ok());
- ASSERT_OK(lb.getReplacementObject(&replacement));
- ASSERT_TRUE(replacement.ok());
-
- const mmb::Element elt_ab = doc.makeElementInt("a.b", 1);
- ASSERT_TRUE(elt_ab.ok());
- ASSERT_OK(lb.addToSets(elt_ab));
-
- replacement = doc.end();
- ASSERT_FALSE(replacement.ok());
- ASSERT_NOT_OK(lb.getReplacementObject(&replacement));
- ASSERT_FALSE(replacement.ok());
- }
-
- TEST(LogBuilder, PresenceOfUnsetPreventsObjectReplacement) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
-
- mmb::Element replacement = doc.end();
- ASSERT_FALSE(replacement.ok());
- ASSERT_OK(lb.getReplacementObject(&replacement));
- ASSERT_TRUE(replacement.ok());
-
- const mmb::Element elt_ab = doc.makeElementInt("a.b", 1);
- ASSERT_TRUE(elt_ab.ok());
- ASSERT_OK(lb.addToSets(elt_ab));
-
- replacement = doc.end();
- ASSERT_FALSE(replacement.ok());
- ASSERT_NOT_OK(lb.getReplacementObject(&replacement));
- ASSERT_FALSE(replacement.ok());
- }
-
- TEST(LogBuilder, CantAddSetWithObjectReplacementDataPresent) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
-
- mmb::Element replacement = doc.end();
- ASSERT_FALSE(replacement.ok());
- ASSERT_OK(lb.getReplacementObject(&replacement));
- ASSERT_TRUE(replacement.ok());
- ASSERT_OK(replacement.appendInt("a", 1));
-
- mmb::Element setCandidate = doc.makeElementInt("x", 0);
- ASSERT_NOT_OK(lb.addToSets(setCandidate));
- }
-
- TEST(LogBuilder, CantAddUnsetWithObjectReplacementDataPresent) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
-
- mmb::Element replacement = doc.end();
- ASSERT_FALSE(replacement.ok());
- ASSERT_OK(lb.getReplacementObject(&replacement));
- ASSERT_TRUE(replacement.ok());
- ASSERT_OK(replacement.appendInt("a", 1));
-
- ASSERT_NOT_OK(lb.addToUnsets("x"));
- }
-
- // Ensure that once you have obtained the object replacement slot and mutated it, that the
- // object replacement slot becomes in accessible. This is a bit paranoid, since in practice
- // the modifier conflict detection logic should prevent that outcome at a higher level, but
- // preventing it here costs us nothing and add an extra safety check.
- TEST(LogBuilder, CantReacquireObjectReplacementData) {
- mmb::Document doc;
- LogBuilder lb(doc.root());
-
- mmb::Element replacement = doc.end();
- ASSERT_FALSE(replacement.ok());
- ASSERT_OK(lb.getReplacementObject(&replacement));
- ASSERT_TRUE(replacement.ok());
- ASSERT_OK(replacement.appendInt("a", 1));
-
- mmb::Element again = doc.end();
- ASSERT_FALSE(again.ok());
- ASSERT_NOT_OK(lb.getReplacementObject(&again));
- ASSERT_FALSE(again.ok());
- }
-
-} // namespace
+TEST(LogBuilder, AddSafeNumToSet) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+
+ mongo::BSONObj obj = mongo::fromjson("{'':1}");
+
+ ASSERT_OK(lb.addToSets("a.b", mongo::SafeNum(1)));
+
+ ASSERT_EQUALS(mongo::fromjson("{ $set : { 'a.b' : 1 } }"), doc);
+}
+
+TEST(LogBuilder, AddOneToUnset) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+ ASSERT_OK(lb.addToUnsets("x.y"));
+ ASSERT_EQUALS(mongo::fromjson("{ $unset : { 'x.y' : true } }"), doc);
+}
+
+TEST(LogBuilder, AddOneToEach) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+
+ const mmb::Element elt_ab = doc.makeElementInt("a.b", 1);
+ ASSERT_TRUE(elt_ab.ok());
+ ASSERT_OK(lb.addToSets(elt_ab));
+
+ ASSERT_OK(lb.addToUnsets("x.y"));
+
+ ASSERT_EQUALS(mongo::fromjson(
+ "{ "
+ " $set : { 'a.b' : 1 }, "
+ " $unset : { 'x.y' : true } "
+ "}"),
+ doc);
+}
+
+TEST(LogBuilder, AddOneObjectReplacementEntry) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+
+ mmb::Element replacement = doc.end();
+ ASSERT_FALSE(replacement.ok());
+ ASSERT_OK(lb.getReplacementObject(&replacement));
+ ASSERT_TRUE(replacement.ok());
+ ASSERT_TRUE(replacement.isType(mongo::Object));
+
+ const mmb::Element elt_a = doc.makeElementInt("a", 1);
+ ASSERT_TRUE(elt_a.ok());
+ ASSERT_OK(replacement.pushBack(elt_a));
+
+ ASSERT_EQUALS(mongo::fromjson("{ a : 1 }"), doc);
+}
+
+TEST(LogBuilder, AddTwoObjectReplacementEntry) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+
+ mmb::Element replacement = doc.end();
+ ASSERT_FALSE(replacement.ok());
+ ASSERT_OK(lb.getReplacementObject(&replacement));
+ ASSERT_TRUE(replacement.ok());
+ ASSERT_TRUE(replacement.isType(mongo::Object));
+
+ const mmb::Element elt_a = doc.makeElementInt("a", 1);
+ ASSERT_TRUE(elt_a.ok());
+ ASSERT_OK(replacement.pushBack(elt_a));
+
+ const mmb::Element elt_b = doc.makeElementInt("b", 2);
+ ASSERT_TRUE(elt_b.ok());
+ ASSERT_OK(replacement.pushBack(elt_b));
+
+ ASSERT_EQUALS(mongo::fromjson("{ a : 1, b: 2 }"), doc);
+}
+
+TEST(LogBuilder, VerifySetsAreGrouped) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+
+ const mmb::Element elt_ab = doc.makeElementInt("a.b", 1);
+ ASSERT_TRUE(elt_ab.ok());
+ ASSERT_OK(lb.addToSets(elt_ab));
+
+ const mmb::Element elt_xy = doc.makeElementInt("x.y", 1);
+ ASSERT_TRUE(elt_xy.ok());
+ ASSERT_OK(lb.addToSets(elt_xy));
+
+ ASSERT_EQUALS(mongo::fromjson(
+ "{ $set : {"
+ " 'a.b' : 1, "
+ " 'x.y' : 1 "
+ "} }"),
+ doc);
+}
+
+TEST(LogBuilder, VerifyUnsetsAreGrouped) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+
+ ASSERT_OK(lb.addToUnsets("a.b"));
+ ASSERT_OK(lb.addToUnsets("x.y"));
+
+ ASSERT_EQUALS(mongo::fromjson(
+ "{ $unset : {"
+ " 'a.b' : true, "
+ " 'x.y' : true "
+ "} }"),
+ doc);
+}
+
+TEST(LogBuilder, PresenceOfSetPreventsObjectReplacement) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+
+ mmb::Element replacement = doc.end();
+ ASSERT_FALSE(replacement.ok());
+ ASSERT_OK(lb.getReplacementObject(&replacement));
+ ASSERT_TRUE(replacement.ok());
+
+ const mmb::Element elt_ab = doc.makeElementInt("a.b", 1);
+ ASSERT_TRUE(elt_ab.ok());
+ ASSERT_OK(lb.addToSets(elt_ab));
+
+ replacement = doc.end();
+ ASSERT_FALSE(replacement.ok());
+ ASSERT_NOT_OK(lb.getReplacementObject(&replacement));
+ ASSERT_FALSE(replacement.ok());
+}
+
+TEST(LogBuilder, PresenceOfUnsetPreventsObjectReplacement) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+
+ mmb::Element replacement = doc.end();
+ ASSERT_FALSE(replacement.ok());
+ ASSERT_OK(lb.getReplacementObject(&replacement));
+ ASSERT_TRUE(replacement.ok());
+
+ const mmb::Element elt_ab = doc.makeElementInt("a.b", 1);
+ ASSERT_TRUE(elt_ab.ok());
+ ASSERT_OK(lb.addToSets(elt_ab));
+
+ replacement = doc.end();
+ ASSERT_FALSE(replacement.ok());
+ ASSERT_NOT_OK(lb.getReplacementObject(&replacement));
+ ASSERT_FALSE(replacement.ok());
+}
+
+TEST(LogBuilder, CantAddSetWithObjectReplacementDataPresent) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+
+ mmb::Element replacement = doc.end();
+ ASSERT_FALSE(replacement.ok());
+ ASSERT_OK(lb.getReplacementObject(&replacement));
+ ASSERT_TRUE(replacement.ok());
+ ASSERT_OK(replacement.appendInt("a", 1));
+
+ mmb::Element setCandidate = doc.makeElementInt("x", 0);
+ ASSERT_NOT_OK(lb.addToSets(setCandidate));
+}
+
+TEST(LogBuilder, CantAddUnsetWithObjectReplacementDataPresent) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+
+ mmb::Element replacement = doc.end();
+ ASSERT_FALSE(replacement.ok());
+ ASSERT_OK(lb.getReplacementObject(&replacement));
+ ASSERT_TRUE(replacement.ok());
+ ASSERT_OK(replacement.appendInt("a", 1));
+
+ ASSERT_NOT_OK(lb.addToUnsets("x"));
+}
+
+// Ensure that once you have obtained the object replacement slot and mutated it, that the
+// object replacement slot becomes in accessible. This is a bit paranoid, since in practice
+// the modifier conflict detection logic should prevent that outcome at a higher level, but
+// preventing it here costs us nothing and add an extra safety check.
+TEST(LogBuilder, CantReacquireObjectReplacementData) {
+ mmb::Document doc;
+ LogBuilder lb(doc.root());
+
+ mmb::Element replacement = doc.end();
+ ASSERT_FALSE(replacement.ok());
+ ASSERT_OK(lb.getReplacementObject(&replacement));
+ ASSERT_TRUE(replacement.ok());
+ ASSERT_OK(replacement.appendInt("a", 1));
+
+ mmb::Element again = doc.end();
+ ASSERT_FALSE(again.ok());
+ ASSERT_NOT_OK(lb.getReplacementObject(&again));
+ ASSERT_FALSE(again.ok());
+}
+
+} // namespace
diff --git a/src/mongo/db/ops/modifier_add_to_set.cpp b/src/mongo/db/ops/modifier_add_to_set.cpp
index 381d06632e3..383991c2b1f 100644
--- a/src/mongo/db/ops/modifier_add_to_set.cpp
+++ b/src/mongo/db/ops/modifier_add_to_set.cpp
@@ -37,155 +37,145 @@
namespace mongo {
- namespace mb = mutablebson;
- namespace str = mongoutils::str;
-
- namespace {
-
- template<typename Ordering, typename Equality>
- void deduplicate(mb::Element parent, Ordering comp, Equality equal) {
-
- // First, build a vector of the children.
- std::vector<mb::Element> children;
- mb::Element current = parent.leftChild();
- while (current.ok()) {
- children.push_back(current);
- current = current.rightSibling();
- }
+namespace mb = mutablebson;
+namespace str = mongoutils::str;
+
+namespace {
+
+template <typename Ordering, typename Equality>
+void deduplicate(mb::Element parent, Ordering comp, Equality equal) {
+ // First, build a vector of the children.
+ std::vector<mb::Element> children;
+ mb::Element current = parent.leftChild();
+ while (current.ok()) {
+ children.push_back(current);
+ current = current.rightSibling();
+ }
- // Then, sort the child vector with our comparator.
- std::sort(children.begin(), children.end(), comp);
+ // Then, sort the child vector with our comparator.
+ std::sort(children.begin(), children.end(), comp);
- // Next, remove duplicates by walking the vector.
- std::vector<mb::Element>::iterator where = children.begin();
- const std::vector<mb::Element>::iterator end = children.end();
+ // Next, remove duplicates by walking the vector.
+ std::vector<mb::Element>::iterator where = children.begin();
+ const std::vector<mb::Element>::iterator end = children.end();
- while( where != end ) {
- std::vector<mb::Element>::iterator next = where; ++next;
- while (next != end && equal(*where, *next)) {
- next->remove();
- ++next;
- }
- where = next;
- }
+ while (where != end) {
+ std::vector<mb::Element>::iterator next = where;
+ ++next;
+ while (next != end && equal(*where, *next)) {
+ next->remove();
+ ++next;
}
+ where = next;
+ }
+}
- } // namespace
+} // namespace
- struct ModifierAddToSet::PreparedState {
+struct ModifierAddToSet::PreparedState {
+ PreparedState(mb::Document& doc)
+ : doc(doc),
+ idxFound(0),
+ elemFound(doc.end()),
+ addAll(false),
+ elementsToAdd(),
+ noOp(false) {}
- PreparedState(mb::Document& doc)
- : doc(doc)
- , idxFound(0)
- , elemFound(doc.end())
- , addAll(false)
- , elementsToAdd()
- , noOp(false) {
- }
+ // Document that is going to be changed.
+ mb::Document& doc;
- // Document that is going to be changed.
- mb::Document& doc;
+ // Index in _fieldRef for which an Element exist in the document.
+ size_t idxFound;
- // Index in _fieldRef for which an Element exist in the document.
- size_t idxFound;
+ // Element corresponding to _fieldRef[0.._idxFound].
+ mb::Element elemFound;
- // Element corresponding to _fieldRef[0.._idxFound].
- mb::Element elemFound;
+ // Are we adding all of the $each elements, or just a subset?
+ bool addAll;
- // Are we adding all of the $each elements, or just a subset?
- bool addAll;
+ // Values to be applied.
+ std::vector<mb::Element> elementsToAdd;
- // Values to be applied.
- std::vector<mb::Element> elementsToAdd;
+ // True if this update is a no-op
+ bool noOp;
+};
- // True if this update is a no-op
- bool noOp;
- };
+ModifierAddToSet::ModifierAddToSet()
+ : ModifierInterface(), _fieldRef(), _posDollar(0), _valDoc(), _val(_valDoc.end()) {}
- ModifierAddToSet::ModifierAddToSet()
- : ModifierInterface ()
- , _fieldRef()
- , _posDollar(0)
- , _valDoc()
- , _val(_valDoc.end()) {
- }
+ModifierAddToSet::~ModifierAddToSet() {}
- ModifierAddToSet::~ModifierAddToSet() {
+Status ModifierAddToSet::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
+ // Perform standard field name and updateable checks.
+ _fieldRef.parse(modExpr.fieldName());
+ Status status = fieldchecker::isUpdatable(_fieldRef);
+ if (!status.isOK()) {
+ return status;
}
- Status ModifierAddToSet::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
+ // If a $-positional operator was used, get the index in which it occurred
+ // and ensure only one occurrence.
+ size_t foundCount;
+ bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
- // Perform standard field name and updateable checks.
- _fieldRef.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatable(_fieldRef);
- if (! status.isOK()) {
- return status;
- }
+ if (positional)
+ *positional = foundDollar;
- // If a $-positional operator was used, get the index in which it occurred
- // and ensure only one occurrence.
- size_t foundCount;
- bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
+ if (foundDollar && foundCount > 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Too many positional (i.e. '$') elements found in path '"
+ << _fieldRef.dottedField() << "'");
+ }
- if (positional)
- *positional = foundDollar;
+ // TODO: The driver could potentially do this re-writing.
- if (foundDollar && foundCount > 1) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
- }
-
- // TODO: The driver could potentially do this re-writing.
-
- // If the type of the value is 'Object', we might be dealing with a $each. See if that
- // is the case.
- if (modExpr.type() == mongo::Object) {
- BSONElement modExprObjPayload = modExpr.embeddedObject().firstElement();
- if (!modExprObjPayload.eoo() && StringData(modExprObjPayload.fieldName()) == "$each") {
- // It is a $each. Verify that the payload is an array as is required for $each,
- // set our flag, and store the array as our value.
- if (modExprObjPayload.type() != mongo::Array) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The argument to $each in $addToSet must "
- "be an array but it was of type "
- << typeName(modExprObjPayload.type()));
- }
-
- status = _valDoc.root().appendElement(modExprObjPayload);
- if (!status.isOK())
- return status;
-
- _val = _valDoc.root().leftChild();
-
- deduplicate(_val, mb::woLess(false), mb::woEqual(false));
+ // If the type of the value is 'Object', we might be dealing with a $each. See if that
+ // is the case.
+ if (modExpr.type() == mongo::Object) {
+ BSONElement modExprObjPayload = modExpr.embeddedObject().firstElement();
+ if (!modExprObjPayload.eoo() && StringData(modExprObjPayload.fieldName()) == "$each") {
+ // It is a $each. Verify that the payload is an array as is required for $each,
+ // set our flag, and store the array as our value.
+ if (modExprObjPayload.type() != mongo::Array) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The argument to $each in $addToSet must "
+ "be an array but it was of type "
+ << typeName(modExprObjPayload.type()));
}
- }
-
- // If this wasn't an 'each', turn it into one. No need to sort or de-dup since we only
- // have one element.
- if (_val == _valDoc.end()) {
- mb::Element each = _valDoc.makeElementArray("$each");
- status = each.appendElement(modExpr);
+ status = _valDoc.root().appendElement(modExprObjPayload);
if (!status.isOK())
return status;
- status = _valDoc.root().pushBack(each);
- if (!status.isOK())
- return status;
+ _val = _valDoc.root().leftChild();
- _val = each;
+ deduplicate(_val, mb::woLess(false), mb::woEqual(false));
}
+ }
- // Check if no invalid data (such as fields with '$'s) are being used in the $each
- // clause.
- mb::ConstElement valCursor = _val.leftChild();
- while (valCursor.ok()) {
- const BSONType type = valCursor.getType();
- dassert(valCursor.hasValue());
- switch(type) {
+ // If this wasn't an 'each', turn it into one. No need to sort or de-dup since we only
+ // have one element.
+ if (_val == _valDoc.end()) {
+ mb::Element each = _valDoc.makeElementArray("$each");
+
+ status = each.appendElement(modExpr);
+ if (!status.isOK())
+ return status;
+
+ status = _valDoc.root().pushBack(each);
+ if (!status.isOK())
+ return status;
+
+ _val = each;
+ }
+
+ // Check if no invalid data (such as fields with '$'s) are being used in the $each
+ // clause.
+ mb::ConstElement valCursor = _val.leftChild();
+ while (valCursor.ok()) {
+ const BSONType type = valCursor.getType();
+ dassert(valCursor.hasValue());
+ switch (type) {
case mongo::Object: {
Status s = valCursor.getValueObject().storageValidEmbedded();
if (!s.isOK())
@@ -202,231 +192,207 @@ namespace mongo {
}
default:
break;
- }
-
- valCursor = valCursor.rightSibling();
}
- return Status::OK();
+ valCursor = valCursor.rightSibling();
}
- Status ModifierAddToSet::prepare(mb::Element root,
- StringData matchedField,
- ExecInfo* execInfo) {
+ return Status::OK();
+}
- _preparedState.reset(new PreparedState(root.getDocument()));
+Status ModifierAddToSet::prepare(mb::Element root, StringData matchedField, ExecInfo* execInfo) {
+ _preparedState.reset(new PreparedState(root.getDocument()));
- // If we have a $-positional field, it is time to bind it to an actual field part.
- if (_posDollar) {
- if (matchedField.empty()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The positional operator did not find the match "
- "needed from the query. Unexpanded update: "
- << _fieldRef.dottedField());
- }
- _fieldRef.setPart(_posDollar, matchedField);
- }
-
- // Locate the field name in 'root'.
- Status status = pathsupport::findLongestPrefix(_fieldRef,
- root,
- &_preparedState->idxFound,
- &_preparedState->elemFound);
-
- // FindLongestPrefix may say the path does not exist at all, which is fine here, or
- // that the path was not viable or otherwise wrong, in which case, the mod cannot
- // proceed.
- if (status.code() == ErrorCodes::NonExistentPath) {
- _preparedState->elemFound = root.getDocument().end();
- } else if (!status.isOK()) {
- return status;
+ // If we have a $-positional field, it is time to bind it to an actual field part.
+ if (_posDollar) {
+ if (matchedField.empty()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The positional operator did not find the match "
+ "needed from the query. Unexpanded update: "
+ << _fieldRef.dottedField());
}
+ _fieldRef.setPart(_posDollar, matchedField);
+ }
- // We register interest in the field name. The driver needs this info to sort out if
- // there is any conflict among mods.
- execInfo->fieldRef[0] = &_fieldRef;
-
- //
- // in-place and no-op logic
- //
-
- // If the field path is not fully present, then this mod cannot be in place, nor is a
- // noOp.
- if (!_preparedState->elemFound.ok() ||
- _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
- // If no target element exists, we will simply be creating a new array.
- _preparedState->addAll = true;
- return Status::OK();
- }
+ // Locate the field name in 'root'.
+ Status status = pathsupport::findLongestPrefix(
+ _fieldRef, root, &_preparedState->idxFound, &_preparedState->elemFound);
+
+ // FindLongestPrefix may say the path does not exist at all, which is fine here, or
+ // that the path was not viable or otherwise wrong, in which case, the mod cannot
+ // proceed.
+ if (status.code() == ErrorCodes::NonExistentPath) {
+ _preparedState->elemFound = root.getDocument().end();
+ } else if (!status.isOK()) {
+ return status;
+ }
- // This operation only applies to arrays
- if (_preparedState->elemFound.getType() != mongo::Array) {
- mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Cannot apply $addToSet to a non-array field. Field named '"
- << _preparedState->elemFound.getFieldName()
- << "' has a non-array type "
- << typeName(_preparedState->elemFound.getType())
- << " in the document "
- << idElem.toString());
- }
+ // We register interest in the field name. The driver needs this info to sort out if
+ // there is any conflict among mods.
+ execInfo->fieldRef[0] = &_fieldRef;
- // If the array is empty, then we don't need to check anything: all of the values are
- // going to be added.
- if (!_preparedState->elemFound.hasChildren()) {
- _preparedState->addAll = true;
- return Status::OK();
- }
+ //
+ // in-place and no-op logic
+ //
- // For each value in the $each clause, compare it against the values in the array. If
- // the element is not present, record it as one to add.
- mb::Element eachIter = _val.leftChild();
- while (eachIter.ok()) {
- mb::Element where = mb::findElement(
- _preparedState->elemFound.leftChild(),
- mb::woEqualTo(eachIter, false));
- if (!where.ok()) {
- // The element was not found. Record the element from $each as one to be added.
- _preparedState->elementsToAdd.push_back(eachIter);
- }
- eachIter = eachIter.rightSibling();
- }
+ // If the field path is not fully present, then this mod cannot be in place, nor is a
+ // noOp.
+ if (!_preparedState->elemFound.ok() || _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
+ // If no target element exists, we will simply be creating a new array.
+ _preparedState->addAll = true;
+ return Status::OK();
+ }
- // If we didn't find any elements to add, then this is a no-op.
- if (_preparedState->elementsToAdd.empty()) {
- _preparedState->noOp = execInfo->noOp = true;
- }
+ // This operation only applies to arrays
+ if (_preparedState->elemFound.getType() != mongo::Array) {
+ mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Cannot apply $addToSet to a non-array field. Field named '"
+ << _preparedState->elemFound.getFieldName() << "' has a non-array type "
+ << typeName(_preparedState->elemFound.getType()) << " in the document "
+ << idElem.toString());
+ }
+ // If the array is empty, then we don't need to check anything: all of the values are
+ // going to be added.
+ if (!_preparedState->elemFound.hasChildren()) {
+ _preparedState->addAll = true;
return Status::OK();
}
- Status ModifierAddToSet::apply() const {
- dassert(_preparedState->noOp == false);
+ // For each value in the $each clause, compare it against the values in the array. If
+ // the element is not present, record it as one to add.
+ mb::Element eachIter = _val.leftChild();
+ while (eachIter.ok()) {
+ mb::Element where =
+ mb::findElement(_preparedState->elemFound.leftChild(), mb::woEqualTo(eachIter, false));
+ if (!where.ok()) {
+ // The element was not found. Record the element from $each as one to be added.
+ _preparedState->elementsToAdd.push_back(eachIter);
+ }
+ eachIter = eachIter.rightSibling();
+ }
- // TODO: The contents of this block are lifted directly from $push.
+ // If we didn't find any elements to add, then this is a no-op.
+ if (_preparedState->elementsToAdd.empty()) {
+ _preparedState->noOp = execInfo->noOp = true;
+ }
- // If the array field is not there, create it as an array and attach it to the
- // document.
- if (!_preparedState->elemFound.ok() ||
- _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
-
- // Creates the array element
- mb::Document& doc = _preparedState->doc;
- StringData lastPart = _fieldRef.getPart(_fieldRef.numParts() - 1);
- mb::Element baseArray = doc.makeElementArray(lastPart);
- if (!baseArray.ok()) {
- return Status(ErrorCodes::InternalError, "can't create new base array");
- }
+ return Status::OK();
+}
- // Now, we can be in two cases here, as far as attaching the element being set
- // goes: (a) none of the parts in the element's path exist, or (b) some parts of
- // the path exist but not all.
- if (!_preparedState->elemFound.ok()) {
- _preparedState->elemFound = doc.root();
- _preparedState->idxFound = 0;
- }
- else {
- _preparedState->idxFound++;
- }
+Status ModifierAddToSet::apply() const {
+ dassert(_preparedState->noOp == false);
- // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
- Status status = pathsupport::createPathAt(_fieldRef,
- _preparedState->idxFound,
- _preparedState->elemFound,
- baseArray);
- if (!status.isOK()) {
- return status;
- }
+ // TODO: The contents of this block are lifted directly from $push.
- // Point to the base array just created. The subsequent code expects it to exist
- // already.
- _preparedState->elemFound = baseArray;
+ // If the array field is not there, create it as an array and attach it to the
+ // document.
+ if (!_preparedState->elemFound.ok() || _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
+ // Creates the array element
+ mb::Document& doc = _preparedState->doc;
+ StringData lastPart = _fieldRef.getPart(_fieldRef.numParts() - 1);
+ mb::Element baseArray = doc.makeElementArray(lastPart);
+ if (!baseArray.ok()) {
+ return Status(ErrorCodes::InternalError, "can't create new base array");
}
- if (_preparedState->addAll) {
-
- // If we are adding all the values, we can just walk over _val;
+ // Now, we can be in two cases here, as far as attaching the element being set
+ // goes: (a) none of the parts in the element's path exist, or (b) some parts of
+ // the path exist but not all.
+ if (!_preparedState->elemFound.ok()) {
+ _preparedState->elemFound = doc.root();
+ _preparedState->idxFound = 0;
+ } else {
+ _preparedState->idxFound++;
+ }
- mb::Element where = _val.leftChild();
- while (where.ok()) {
+ // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
+ Status status = pathsupport::createPathAt(
+ _fieldRef, _preparedState->idxFound, _preparedState->elemFound, baseArray);
+ if (!status.isOK()) {
+ return status;
+ }
- dassert(where.hasValue());
+ // Point to the base array just created. The subsequent code expects it to exist
+ // already.
+ _preparedState->elemFound = baseArray;
+ }
- mb::Element toAdd = _preparedState->doc.makeElement(where.getValue());
- Status status = _preparedState->elemFound.pushBack(toAdd);
- if (!status.isOK())
- return status;
+ if (_preparedState->addAll) {
+ // If we are adding all the values, we can just walk over _val;
- where = where.rightSibling();
- }
+ mb::Element where = _val.leftChild();
+ while (where.ok()) {
+ dassert(where.hasValue());
- } else {
+ mb::Element toAdd = _preparedState->doc.makeElement(where.getValue());
+ Status status = _preparedState->elemFound.pushBack(toAdd);
+ if (!status.isOK())
+ return status;
- // Otherwise, we aren't adding all the values, and we need to add exactly those
- // elements that were found to be missing during our scan in prepare.
- std::vector<mb::Element>::const_iterator where =
- _preparedState->elementsToAdd.begin();
+ where = where.rightSibling();
+ }
- const std::vector<mb::Element>::const_iterator end =
- _preparedState->elementsToAdd.end();
+ } else {
+ // Otherwise, we aren't adding all the values, and we need to add exactly those
+ // elements that were found to be missing during our scan in prepare.
+ std::vector<mb::Element>::const_iterator where = _preparedState->elementsToAdd.begin();
- for ( ; where != end; ++where) {
+ const std::vector<mb::Element>::const_iterator end = _preparedState->elementsToAdd.end();
- dassert(where->hasValue());
+ for (; where != end; ++where) {
+ dassert(where->hasValue());
- mb::Element toAdd = _preparedState->doc.makeElement(where->getValue());
- Status status = _preparedState->elemFound.pushBack(toAdd);
- if (!status.isOK())
- return status;
- }
+ mb::Element toAdd = _preparedState->doc.makeElement(where->getValue());
+ Status status = _preparedState->elemFound.pushBack(toAdd);
+ if (!status.isOK())
+ return status;
}
-
- return Status::OK();
}
- Status ModifierAddToSet::log(LogBuilder* logBuilder) const {
+ return Status::OK();
+}
- // TODO: This is copied more or less identically from $push. As a result, it copies the
- // behavior in $push that relies on 'apply' having been called unless this is a no-op.
+Status ModifierAddToSet::log(LogBuilder* logBuilder) const {
+ // TODO: This is copied more or less identically from $push. As a result, it copies the
+ // behavior in $push that relies on 'apply' having been called unless this is a no-op.
- // TODO We can log just a positional set in several cases. For now, let's just log the
- // full resulting array.
+ // TODO We can log just a positional set in several cases. For now, let's just log the
+ // full resulting array.
- // We'd like to create an entry such as {$set: {<fieldname>: [<resulting aray>]}} under
- // 'logRoot'. We start by creating the {$set: ...} Element.
- mb::Document& doc = logBuilder->getDocument();
+ // We'd like to create an entry such as {$set: {<fieldname>: [<resulting aray>]}} under
+ // 'logRoot'. We start by creating the {$set: ...} Element.
+ mb::Document& doc = logBuilder->getDocument();
- // Then we create the {<fieldname>:[]} Element, that is, an empty array.
- mb::Element logElement = doc.makeElementArray(_fieldRef.dottedField());
- if (!logElement.ok()) {
- return Status(ErrorCodes::InternalError, "cannot create details for $addToSet mod");
- }
-
- // Fill up the empty array.
- mb::Element curr = _preparedState->elemFound.leftChild();
- while (curr.ok()) {
+ // Then we create the {<fieldname>:[]} Element, that is, an empty array.
+ mb::Element logElement = doc.makeElementArray(_fieldRef.dottedField());
+ if (!logElement.ok()) {
+ return Status(ErrorCodes::InternalError, "cannot create details for $addToSet mod");
+ }
- dassert(curr.hasValue());
+ // Fill up the empty array.
+ mb::Element curr = _preparedState->elemFound.leftChild();
+ while (curr.ok()) {
+ dassert(curr.hasValue());
- // We need to copy each array entry from the resulting document to the log
- // document.
- mb::Element currCopy = doc.makeElementWithNewFieldName(
- StringData(),
- curr.getValue());
- if (!currCopy.ok()) {
- return Status(ErrorCodes::InternalError, "could create copy element");
- }
- Status status = logElement.pushBack(currCopy);
- if (!status.isOK()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Could not append entry for $addToSet oplog entry."
- << "Underlying cause: " << status.toString());
- }
- curr = curr.rightSibling();
+ // We need to copy each array entry from the resulting document to the log
+ // document.
+ mb::Element currCopy = doc.makeElementWithNewFieldName(StringData(), curr.getValue());
+ if (!currCopy.ok()) {
+ return Status(ErrorCodes::InternalError, "could create copy element");
}
-
- return logBuilder->addToSets(logElement);
+ Status status = logElement.pushBack(currCopy);
+ if (!status.isOK()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Could not append entry for $addToSet oplog entry."
+ << "Underlying cause: " << status.toString());
+ }
+ curr = curr.rightSibling();
}
-} // namespace mongo
+ return logBuilder->addToSets(logElement);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_add_to_set.h b/src/mongo/db/ops/modifier_add_to_set.h
index bbbc37ece1e..66bd372834c 100644
--- a/src/mongo/db/ops/modifier_add_to_set.h
+++ b/src/mongo/db/ops/modifier_add_to_set.h
@@ -36,50 +36,46 @@
namespace mongo {
- class LogBuilder;
+class LogBuilder;
- class ModifierAddToSet : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierAddToSet);
+class ModifierAddToSet : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierAddToSet);
- public:
+public:
+ ModifierAddToSet();
+ virtual ~ModifierAddToSet();
- ModifierAddToSet();
- virtual ~ModifierAddToSet();
+ /** Goes over the array item(s) that are going to be set- unioned and converts them
+ * internally to a mutable bson. Both single and $each forms are supported. Returns OK
+ * if the item(s) are valid otherwise returns a status describing the error.
+ */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
- /** Goes over the array item(s) that are going to be set- unioned and converts them
- * internally to a mutable bson. Both single and $each forms are supported. Returns OK
- * if the item(s) are valid otherwise returns a status describing the error.
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
+ /** Decides which portion of the array items that are going to be set-unioned to root's
+ * document and fills in 'execInfo' accordingly. Returns OK if the document has a
+ * valid array to set-union to, othwise returns a status describing the error.
+ */
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
- /** Decides which portion of the array items that are going to be set-unioned to root's
- * document and fills in 'execInfo' accordingly. Returns OK if the document has a
- * valid array to set-union to, othwise returns a status describing the error.
- */
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
+ /** Updates the Element used in prepare with the effects of the $addToSet operation. */
+ virtual Status apply() const;
- /** Updates the Element used in prepare with the effects of the $addToSet operation. */
- virtual Status apply() const;
+ /** Converts the effects of this $addToSet into one or more equivalent $set operations. */
+ virtual Status log(LogBuilder* logBuilder) const;
- /** Converts the effects of this $addToSet into one or more equivalent $set operations. */
- virtual Status log(LogBuilder* logBuilder) const;
+private:
+ // Access to each component of fieldName that's the target of this mod.
+ FieldRef _fieldRef;
- private:
- // Access to each component of fieldName that's the target of this mod.
- FieldRef _fieldRef;
+ // 0 or index for $-positional in _fieldRef.
+ size_t _posDollar;
- // 0 or index for $-positional in _fieldRef.
- size_t _posDollar;
+ // Array of values to be set-union'ed onto target.
+ mutablebson::Document _valDoc;
+ mutablebson::Element _val;
- // Array of values to be set-union'ed onto target.
- mutablebson::Document _valDoc;
- mutablebson::Element _val;
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
+};
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
- };
-
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_add_to_set_test.cpp b/src/mongo/db/ops/modifier_add_to_set_test.cpp
index 45741b4c000..43335cb4847 100644
--- a/src/mongo/db/ops/modifier_add_to_set_test.cpp
+++ b/src/mongo/db/ops/modifier_add_to_set_test.cpp
@@ -40,354 +40,350 @@
namespace {
- using mongo::BSONObj;
- using mongo::LogBuilder;
- using mongo::ModifierAddToSet;
- using mongo::ModifierInterface;
- using mongo::Status;
- using mongo::StringData;
- using mongo::fromjson;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
-
- /** Helper to build and manipulate a $addToSet mod. */
- class Mod {
- public:
- Mod() : _mod() {}
-
- explicit Mod(BSONObj modObj)
- : _modObj(modObj)
- , _mod() {
- ASSERT_OK(_mod.init(_modObj["$addToSet"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
-
- Status apply() const {
- return _mod.apply();
- }
-
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
-
- ModifierAddToSet& mod() {
- return _mod;
- }
-
- private:
- BSONObj _modObj;
- ModifierAddToSet _mod;
- };
-
- TEST(Init, FailToInitWithInvalidValue) {
- BSONObj modObj;
- ModifierAddToSet mod;
-
- modObj = fromjson("{ $addToSet : { a : { 'x.$.y' : 'bad' } } }");
- ASSERT_NOT_OK(mod.init(modObj["$addToSet"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- modObj = fromjson("{ $addToSet : { a : { $each : [ { 'x.$.y' : 'bad' } ] } } }");
- ASSERT_NOT_OK(mod.init(modObj["$addToSet"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // An int is not valid after $each
- modObj = fromjson("{ $addToSet : { a : { $each : 0 } } }");
- ASSERT_NOT_OK(mod.init(modObj["$addToSet"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // An object is not valid after $each
- modObj = fromjson("{ $addToSet : { a : { $each : { a : 1 } } } }");
- ASSERT_NOT_OK(mod.init(modObj["$addToSet"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+using mongo::BSONObj;
+using mongo::LogBuilder;
+using mongo::ModifierAddToSet;
+using mongo::ModifierInterface;
+using mongo::Status;
+using mongo::StringData;
+using mongo::fromjson;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
+
+/** Helper to build and manipulate a $addToSet mod. */
+class Mod {
+public:
+ Mod() : _mod() {}
+
+ explicit Mod(BSONObj modObj) : _modObj(modObj), _mod() {
+ ASSERT_OK(_mod.init(_modObj["$addToSet"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- TEST(Init, ParsesSimple) {
- Mod(fromjson("{ $addToSet : { a : 1 } }"));
- Mod(fromjson("{ $addToSet : { a : 'foo' } }"));
- Mod(fromjson("{ $addToSet : { a : {} } }"));
- Mod(fromjson("{ $addToSet : { a : { x : 1 } } }"));
- Mod(fromjson("{ $addToSet : { a : [] } }"));
- Mod(fromjson("{ $addToSet : { a : [1, 2] } } }"));
- Mod(fromjson("{ $addToSet : { 'a.b' : 1 } }"));
- Mod(fromjson("{ $addToSet : { 'a.b' : 'foo' } }"));
- Mod(fromjson("{ $addToSet : { 'a.b' : {} } }"));
- Mod(fromjson("{ $addToSet : { 'a.b' : { x : 1} } }"));
- Mod(fromjson("{ $addToSet : { 'a.b' : [] } }"));
- Mod(fromjson("{ $addToSet : { 'a.b' : [1, 2] } } }"));
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
}
- TEST(Init, ParsesEach) {
- Mod(fromjson("{ $addToSet : { a : { $each : [] } } }"));
- Mod(fromjson("{ $addToSet : { a : { $each : [ 1 ] } } }"));
- Mod(fromjson("{ $addToSet : { a : { $each : [ 1, 2 ] } } }"));
- Mod(fromjson("{ $addToSet : { a : { $each : [ 1, 2, 1 ] } } }"));
- Mod(fromjson("{ $addToSet : { a : { $each : [ {} ] } } }"));
- Mod(fromjson("{ $addToSet : { a : { $each : [ { x : 1 } ] } } }"));
- Mod(fromjson("{ $addToSet : { a : { $each : [ { x : 1 }, { y : 2 } ] } } }"));
- Mod(fromjson("{ $addToSet : { a : { $each : [ { x : 1 }, { y : 2 }, { x : 1 } ] } } }"));
+ Status apply() const {
+ return _mod.apply();
}
- TEST(SimpleMod, PrepareOKTargetNotFound) {
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
- }
-
- TEST(SimpleMod, PrepareOKTargetFound) {
- Document doc(fromjson("{ a : [ 1 ] }"));
- Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [ 1 ] } }"), logDoc);
- }
-
- TEST(SimpleMod, PrepareInvalidTargetNumber) {
- Document doc(fromjson("{ a : 1 }"));
- Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
- }
-
- TEST(SimpleMod, PrepareInvalidTarget) {
- Document doc(fromjson("{ a : {} }"));
- Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
- }
-
- TEST(SimpleMod, ApplyAndLogEmptyDocument) {
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [ 1 ] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [ 1 ] } }"), logDoc);
- }
-
- TEST(SimpleMod, ApplyAndLogEmptyArray) {
- Document doc(fromjson("{ a : [] }"));
- Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [ 1 ] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [ 1 ] } }"), logDoc);
- }
-
- TEST(SimpleEachMod, ApplyAndLogEmptyDocument) {
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{ $addToSet : { a : { $each : [1, 2, 3] } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [ 1, 2, 3 ] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 2, 3 ] } }"), logDoc);
- }
-
- TEST(SimpleEachMod, ApplyAndLogEmptyArray) {
- Document doc(fromjson("{ a : [] }"));
- Mod mod(fromjson("{ $addToSet : { a : { $each : [1, 2, 3] } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [ 1, 2, 3 ] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 2, 3 ] } }"), logDoc);
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
}
- TEST(SimpleMod, ApplyAndLogPopulatedArray) {
- Document doc(fromjson("{ a : [ 'x' ] }"));
- Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [ 'x', 1 ] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [ 'x', 1 ] } }"), logDoc);
- }
-
- TEST(SimpleEachMod, ApplyAndLogPopulatedArray) {
- Document doc(fromjson("{ a : [ 'x' ] }"));
- Mod mod(fromjson("{ $addToSet : { a : { $each : [1, 2, 3] } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [ 'x', 1, 2, 3 ] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [ 'x', 1, 2, 3 ] } }"), logDoc);
- }
-
- TEST(NoOp, AddOneExistingIsNoOp) {
- Document doc(fromjson("{ a : [ 1, 2, 3 ] }"));
- Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 2, 3 ] } }"), logDoc);
- }
-
- TEST(NoOp, AddSeveralExistingIsNoOp) {
- Document doc(fromjson("{ a : [ 1, 2, 3 ] }"));
- Mod mod(fromjson("{ $addToSet : { a : { $each : [1, 2] } } }"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 2, 3 ] } }"), logDoc);
- }
-
- TEST(NoOp, AddAllExistingIsNoOp) {
- Document doc(fromjson("{ a : [ 1, 2, 3 ] }"));
- Mod mod(fromjson("{ $addToSet : { a : { $each : [1, 2, 3] } } }"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 2, 3 ] } }"), logDoc);
- }
-
- TEST(Deduplication, ExistingDuplicatesArePreserved) {
- Document doc(fromjson("{ a : [ 1, 1, 2, 1, 2, 2 ] }"));
- Mod mod(fromjson("{ $addToSet : { a : 3 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [ 1, 1, 2, 1, 2, 2, 3] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 1, 2, 1, 2, 2, 3] } }"), logDoc);
- }
-
- TEST(Deduplication, NewDuplicatesAreElided) {
- Document doc(fromjson("{ a : [ 1, 1, 2, 1, 2, 2 ] }"));
- Mod mod(fromjson("{ $addToSet : { a : { $each : [ 4, 1, 3, 2, 3, 1, 3, 3, 2, 4] } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [ 1, 1, 2, 1, 2, 2, 4, 3] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 1, 2, 1, 2, 2, 4, 3] } }"), logDoc);
- }
-
- TEST(Regressions, SERVER_12848) {
- // Proof that the mod works ok (the real issue was in validate).
-
- Document doc(fromjson("{ _id : 1, a : [ 1, [ ] ] }"));
- Mod mod(fromjson("{ $addToSet : { 'a.1' : 1 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.1");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ _id : 1, a : [ 1, [ 1 ] ] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { 'a.1' : [ 1 ] } }"), logDoc);
+ ModifierAddToSet& mod() {
+ return _mod;
}
-} // namespace
+private:
+ BSONObj _modObj;
+ ModifierAddToSet _mod;
+};
+
+TEST(Init, FailToInitWithInvalidValue) {
+ BSONObj modObj;
+ ModifierAddToSet mod;
+
+ modObj = fromjson("{ $addToSet : { a : { 'x.$.y' : 'bad' } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$addToSet"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+ modObj = fromjson("{ $addToSet : { a : { $each : [ { 'x.$.y' : 'bad' } ] } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$addToSet"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+
+ // An int is not valid after $each
+ modObj = fromjson("{ $addToSet : { a : { $each : 0 } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$addToSet"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+
+ // An object is not valid after $each
+ modObj = fromjson("{ $addToSet : { a : { $each : { a : 1 } } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$addToSet"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, ParsesSimple) {
+ Mod(fromjson("{ $addToSet : { a : 1 } }"));
+ Mod(fromjson("{ $addToSet : { a : 'foo' } }"));
+ Mod(fromjson("{ $addToSet : { a : {} } }"));
+ Mod(fromjson("{ $addToSet : { a : { x : 1 } } }"));
+ Mod(fromjson("{ $addToSet : { a : [] } }"));
+ Mod(fromjson("{ $addToSet : { a : [1, 2] } } }"));
+ Mod(fromjson("{ $addToSet : { 'a.b' : 1 } }"));
+ Mod(fromjson("{ $addToSet : { 'a.b' : 'foo' } }"));
+ Mod(fromjson("{ $addToSet : { 'a.b' : {} } }"));
+ Mod(fromjson("{ $addToSet : { 'a.b' : { x : 1} } }"));
+ Mod(fromjson("{ $addToSet : { 'a.b' : [] } }"));
+ Mod(fromjson("{ $addToSet : { 'a.b' : [1, 2] } } }"));
+}
+
+TEST(Init, ParsesEach) {
+ Mod(fromjson("{ $addToSet : { a : { $each : [] } } }"));
+ Mod(fromjson("{ $addToSet : { a : { $each : [ 1 ] } } }"));
+ Mod(fromjson("{ $addToSet : { a : { $each : [ 1, 2 ] } } }"));
+ Mod(fromjson("{ $addToSet : { a : { $each : [ 1, 2, 1 ] } } }"));
+ Mod(fromjson("{ $addToSet : { a : { $each : [ {} ] } } }"));
+ Mod(fromjson("{ $addToSet : { a : { $each : [ { x : 1 } ] } } }"));
+ Mod(fromjson("{ $addToSet : { a : { $each : [ { x : 1 }, { y : 2 } ] } } }"));
+ Mod(fromjson("{ $addToSet : { a : { $each : [ { x : 1 }, { y : 2 }, { x : 1 } ] } } }"));
+}
+
+TEST(SimpleMod, PrepareOKTargetNotFound) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+}
+
+TEST(SimpleMod, PrepareOKTargetFound) {
+ Document doc(fromjson("{ a : [ 1 ] }"));
+ Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [ 1 ] } }"), logDoc);
+}
+
+TEST(SimpleMod, PrepareInvalidTargetNumber) {
+ Document doc(fromjson("{ a : 1 }"));
+ Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(SimpleMod, PrepareInvalidTarget) {
+ Document doc(fromjson("{ a : {} }"));
+ Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(SimpleMod, ApplyAndLogEmptyDocument) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [ 1 ] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [ 1 ] } }"), logDoc);
+}
+
+TEST(SimpleMod, ApplyAndLogEmptyArray) {
+ Document doc(fromjson("{ a : [] }"));
+ Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [ 1 ] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [ 1 ] } }"), logDoc);
+}
+
+TEST(SimpleEachMod, ApplyAndLogEmptyDocument) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{ $addToSet : { a : { $each : [1, 2, 3] } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [ 1, 2, 3 ] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 2, 3 ] } }"), logDoc);
+}
+
+TEST(SimpleEachMod, ApplyAndLogEmptyArray) {
+ Document doc(fromjson("{ a : [] }"));
+ Mod mod(fromjson("{ $addToSet : { a : { $each : [1, 2, 3] } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [ 1, 2, 3 ] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 2, 3 ] } }"), logDoc);
+}
+
+TEST(SimpleMod, ApplyAndLogPopulatedArray) {
+ Document doc(fromjson("{ a : [ 'x' ] }"));
+ Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [ 'x', 1 ] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [ 'x', 1 ] } }"), logDoc);
+}
+
+TEST(SimpleEachMod, ApplyAndLogPopulatedArray) {
+ Document doc(fromjson("{ a : [ 'x' ] }"));
+ Mod mod(fromjson("{ $addToSet : { a : { $each : [1, 2, 3] } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [ 'x', 1, 2, 3 ] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [ 'x', 1, 2, 3 ] } }"), logDoc);
+}
+
+TEST(NoOp, AddOneExistingIsNoOp) {
+ Document doc(fromjson("{ a : [ 1, 2, 3 ] }"));
+ Mod mod(fromjson("{ $addToSet : { a : 1 } }"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 2, 3 ] } }"), logDoc);
+}
+
+TEST(NoOp, AddSeveralExistingIsNoOp) {
+ Document doc(fromjson("{ a : [ 1, 2, 3 ] }"));
+ Mod mod(fromjson("{ $addToSet : { a : { $each : [1, 2] } } }"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 2, 3 ] } }"), logDoc);
+}
+
+TEST(NoOp, AddAllExistingIsNoOp) {
+ Document doc(fromjson("{ a : [ 1, 2, 3 ] }"));
+ Mod mod(fromjson("{ $addToSet : { a : { $each : [1, 2, 3] } } }"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 2, 3 ] } }"), logDoc);
+}
+
+TEST(Deduplication, ExistingDuplicatesArePreserved) {
+ Document doc(fromjson("{ a : [ 1, 1, 2, 1, 2, 2 ] }"));
+ Mod mod(fromjson("{ $addToSet : { a : 3 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [ 1, 1, 2, 1, 2, 2, 3] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 1, 2, 1, 2, 2, 3] } }"), logDoc);
+}
+
+TEST(Deduplication, NewDuplicatesAreElided) {
+ Document doc(fromjson("{ a : [ 1, 1, 2, 1, 2, 2 ] }"));
+ Mod mod(fromjson("{ $addToSet : { a : { $each : [ 4, 1, 3, 2, 3, 1, 3, 3, 2, 4] } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [ 1, 1, 2, 1, 2, 2, 4, 3] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [ 1, 1, 2, 1, 2, 2, 4, 3] } }"), logDoc);
+}
+
+TEST(Regressions, SERVER_12848) {
+ // Proof that the mod works ok (the real issue was in validate).
+
+ Document doc(fromjson("{ _id : 1, a : [ 1, [ ] ] }"));
+ Mod mod(fromjson("{ $addToSet : { 'a.1' : 1 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.1");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ _id : 1, a : [ 1, [ 1 ] ] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { 'a.1' : [ 1 ] } }"), logDoc);
+}
+
+} // namespace
diff --git a/src/mongo/db/ops/modifier_bit.cpp b/src/mongo/db/ops/modifier_bit.cpp
index c6d221875f9..bdddb9415f2 100644
--- a/src/mongo/db/ops/modifier_bit.cpp
+++ b/src/mongo/db/ops/modifier_bit.cpp
@@ -38,263 +38,233 @@
namespace mongo {
- namespace mb = mutablebson;
- namespace str = mongoutils::str;
+namespace mb = mutablebson;
+namespace str = mongoutils::str;
- struct ModifierBit::PreparedState {
+struct ModifierBit::PreparedState {
+ PreparedState(mutablebson::Document& doc)
+ : doc(doc), idxFound(0), elemFound(doc.end()), noOp(false) {}
- PreparedState(mutablebson::Document& doc)
- : doc(doc)
- , idxFound(0)
- , elemFound(doc.end())
- , noOp(false) {
- }
+ // Document that is going to be changed.
+ mutablebson::Document& doc;
+
+ // Index in _fieldRef for which an Element exist in the document.
+ size_t idxFound;
- // Document that is going to be changed.
- mutablebson::Document& doc;
+ // Element corresponding to _fieldRef[0.._idxFound].
+ mutablebson::Element elemFound;
- // Index in _fieldRef for which an Element exist in the document.
- size_t idxFound;
+ // Value to be applied.
+ SafeNum newValue;
- // Element corresponding to _fieldRef[0.._idxFound].
- mutablebson::Element elemFound;
+ // True if this update is a no-op
+ bool noOp;
+};
- // Value to be applied.
- SafeNum newValue;
+ModifierBit::ModifierBit() : ModifierInterface(), _fieldRef(), _posDollar(0), _ops() {}
- // True if this update is a no-op
- bool noOp;
- };
+ModifierBit::~ModifierBit() {}
- ModifierBit::ModifierBit()
- : ModifierInterface ()
- , _fieldRef()
- , _posDollar(0)
- , _ops() {
+Status ModifierBit::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
+ // Perform standard field name and updateable checks.
+ _fieldRef.parse(modExpr.fieldName());
+ Status status = fieldchecker::isUpdatable(_fieldRef);
+ if (!status.isOK()) {
+ return status;
}
- ModifierBit::~ModifierBit() {
+ // If a $-positional operator was used, get the index in which it occurred
+ // and ensure only one occurrence.
+ size_t foundCount;
+ bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
+
+ if (positional)
+ *positional = foundDollar;
+
+ if (foundDollar && foundCount > 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Too many positional (i.e. '$') elements found in path '"
+ << _fieldRef.dottedField() << "'");
}
- Status ModifierBit::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
+ if (modExpr.type() != mongo::Object)
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The $bit modifier is not compatible with a "
+ << typeName(modExpr.type())
+ << ". You must pass in an embedded document: "
+ "{$bit: {field: {and/or/xor: #}}");
- // Perform standard field name and updateable checks.
- _fieldRef.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatable(_fieldRef);
- if (! status.isOK()) {
- return status;
- }
+ BSONObjIterator opsIterator(modExpr.embeddedObject());
- // If a $-positional operator was used, get the index in which it occurred
- // and ensure only one occurrence.
- size_t foundCount;
- bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
+ while (opsIterator.more()) {
+ BSONElement curOp = opsIterator.next();
- if (positional)
- *positional = foundDollar;
+ const StringData payloadFieldName = curOp.fieldName();
- if (foundDollar && foundCount > 1) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
- }
+ SafeNumOp op = NULL;
- if (modExpr.type() != mongo::Object)
+ if (payloadFieldName == "and") {
+ op = &SafeNum::bitAnd;
+ } else if (payloadFieldName == "or") {
+ op = &SafeNum::bitOr;
+ } else if (payloadFieldName == "xor") {
+ op = &SafeNum::bitXor;
+ } else {
return Status(ErrorCodes::BadValue,
- str::stream() << "The $bit modifier is not compatible with a "
- << typeName(modExpr.type())
- << ". You must pass in an embedded document: "
- "{$bit: {field: {and/or/xor: #}}");
-
- BSONObjIterator opsIterator(modExpr.embeddedObject());
-
- while (opsIterator.more()) {
- BSONElement curOp = opsIterator.next();
-
- const StringData payloadFieldName = curOp.fieldName();
-
- SafeNumOp op = NULL;
-
- if (payloadFieldName == "and") {
- op = &SafeNum::bitAnd;
- }
- else if (payloadFieldName == "or") {
- op = &SafeNum::bitOr;
- }
- else if (payloadFieldName == "xor") {
- op = &SafeNum::bitXor;
- }
- else {
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "The $bit modifier only supports 'and', 'or', and 'xor', not '"
- << payloadFieldName
- << "' which is an unknown operator: {" << curOp << "}");
- }
-
- if ((curOp.type() != mongo::NumberInt) &&
- (curOp.type() != mongo::NumberLong))
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "The $bit modifier field must be an Integer(32/64 bit); a '"
- << typeName(curOp.type())
- << "' is not supported here: {" << curOp << "}");
-
- const OpEntry entry = {SafeNum(curOp), op};
- _ops.push_back(entry);
+ str::stream()
+ << "The $bit modifier only supports 'and', 'or', and 'xor', not '"
+ << payloadFieldName << "' which is an unknown operator: {" << curOp
+ << "}");
}
- dassert(!_ops.empty());
+ if ((curOp.type() != mongo::NumberInt) && (curOp.type() != mongo::NumberLong))
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "The $bit modifier field must be an Integer(32/64 bit); a '"
+ << typeName(curOp.type()) << "' is not supported here: {" << curOp
+ << "}");
- return Status::OK();
+ const OpEntry entry = {SafeNum(curOp), op};
+ _ops.push_back(entry);
}
- Status ModifierBit::prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo) {
-
- _preparedState.reset(new PreparedState(root.getDocument()));
-
- // If we have a $-positional field, it is time to bind it to an actual field part.
- if (_posDollar) {
- if (matchedField.empty()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The positional operator did not find the match "
- "needed from the query. Unexpanded update: "
- << _fieldRef.dottedField());
- }
- _fieldRef.setPart(_posDollar, matchedField);
- }
+ dassert(!_ops.empty());
- // Locate the field name in 'root'.
- Status status = pathsupport::findLongestPrefix(_fieldRef,
- root,
- &_preparedState->idxFound,
- &_preparedState->elemFound);
+ return Status::OK();
+}
+Status ModifierBit::prepare(mutablebson::Element root,
+ StringData matchedField,
+ ExecInfo* execInfo) {
+ _preparedState.reset(new PreparedState(root.getDocument()));
- // FindLongestPrefix may say the path does not exist at all, which is fine here, or
- // that the path was not viable or otherwise wrong, in which case, the mod cannot
- // proceed.
- if (status.code() == ErrorCodes::NonExistentPath) {
- _preparedState->elemFound = root.getDocument().end();
- }
- else if (!status.isOK()) {
- return status;
+ // If we have a $-positional field, it is time to bind it to an actual field part.
+ if (_posDollar) {
+ if (matchedField.empty()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The positional operator did not find the match "
+ "needed from the query. Unexpanded update: "
+ << _fieldRef.dottedField());
}
+ _fieldRef.setPart(_posDollar, matchedField);
+ }
- // We register interest in the field name. The driver needs this info to sort out if
- // there is any conflict among mods.
- execInfo->fieldRef[0] = &_fieldRef;
-
- //
- // in-place and no-op logic
- //
-
- // If the field path is not fully present, then this mod cannot be in place, nor is a
- // noOp.
- if (!_preparedState->elemFound.ok() ||
- _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
- // If no target element exists, the value we will write is the result of applying
- // the operation to a zero-initialized integer element.
- _preparedState->newValue = apply(SafeNum(static_cast<int>(0)));
- return Status::OK();
- }
+ // Locate the field name in 'root'.
+ Status status = pathsupport::findLongestPrefix(
+ _fieldRef, root, &_preparedState->idxFound, &_preparedState->elemFound);
- if (!_preparedState->elemFound.isIntegral()) {
- mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Cannot apply $bit to a value of non-integral type."
- << idElem.toString()
- << " has the field " << _preparedState->elemFound.getFieldName()
- << " of non-integer type "
- << typeName(_preparedState->elemFound.getType()));
- }
- const SafeNum currentValue = _preparedState->elemFound.getValueSafeNum();
+ // FindLongestPrefix may say the path does not exist at all, which is fine here, or
+ // that the path was not viable or otherwise wrong, in which case, the mod cannot
+ // proceed.
+ if (status.code() == ErrorCodes::NonExistentPath) {
+ _preparedState->elemFound = root.getDocument().end();
+ } else if (!status.isOK()) {
+ return status;
+ }
- // Apply the op over the existing value and the mod value, and capture the result.
- _preparedState->newValue = apply(currentValue);
+ // We register interest in the field name. The driver needs this info to sort out if
+ // there is any conflict among mods.
+ execInfo->fieldRef[0] = &_fieldRef;
- if (!_preparedState->newValue.isValid()) {
- // TODO: Include list of ops, if that is easy, at some future point.
- return Status(ErrorCodes::BadValue,
- str::stream() << "Failed to apply $bit operations to current value: "
- << currentValue.debugString());
- }
- // If the values are identical (same type, same value), then this is a no-op.
- if (_preparedState->newValue.isIdentical(currentValue)) {
- _preparedState->noOp = execInfo->noOp = true;
- return Status::OK();
- }
+ //
+ // in-place and no-op logic
+ //
+ // If the field path is not fully present, then this mod cannot be in place, nor is a
+ // noOp.
+ if (!_preparedState->elemFound.ok() || _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
+ // If no target element exists, the value we will write is the result of applying
+ // the operation to a zero-initialized integer element.
+ _preparedState->newValue = apply(SafeNum(static_cast<int>(0)));
return Status::OK();
}
- Status ModifierBit::apply() const {
- dassert(_preparedState->noOp == false);
-
- // If there's no need to create any further field part, the $bit is simply a value
- // assignment.
- if (_preparedState->elemFound.ok() &&
- _preparedState->idxFound == (_fieldRef.numParts() - 1)) {
- return _preparedState->elemFound.setValueSafeNum(_preparedState->newValue);
- }
-
- //
- // Complete document path logic
- //
+ if (!_preparedState->elemFound.isIntegral()) {
+ mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Cannot apply $bit to a value of non-integral type."
+ << idElem.toString() << " has the field "
+ << _preparedState->elemFound.getFieldName()
+ << " of non-integer type "
+ << typeName(_preparedState->elemFound.getType()));
+ }
- // Creates the final element that's going to be $set in 'doc'.
- mutablebson::Document& doc = _preparedState->doc;
- StringData lastPart = _fieldRef.getPart(_fieldRef.numParts() - 1);
- mutablebson::Element elemToSet = doc.makeElementSafeNum(lastPart, _preparedState->newValue);
- if (!elemToSet.ok()) {
- return Status(ErrorCodes::InternalError, "can't create new element");
- }
+ const SafeNum currentValue = _preparedState->elemFound.getValueSafeNum();
- // Now, we can be in two cases here, as far as attaching the element being set goes:
- // (a) none of the parts in the element's path exist, or (b) some parts of the path
- // exist but not all.
- if (!_preparedState->elemFound.ok()) {
- _preparedState->elemFound = doc.root();
- _preparedState->idxFound = 0;
- }
- else {
- _preparedState->idxFound++;
- }
+ // Apply the op over the existing value and the mod value, and capture the result.
+ _preparedState->newValue = apply(currentValue);
- // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
- return pathsupport::createPathAt(_fieldRef,
- _preparedState->idxFound,
- _preparedState->elemFound,
- elemToSet);
+ if (!_preparedState->newValue.isValid()) {
+ // TODO: Include list of ops, if that is easy, at some future point.
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Failed to apply $bit operations to current value: "
+ << currentValue.debugString());
+ }
+ // If the values are identical (same type, same value), then this is a no-op.
+ if (_preparedState->newValue.isIdentical(currentValue)) {
+ _preparedState->noOp = execInfo->noOp = true;
+ return Status::OK();
}
- Status ModifierBit::log(LogBuilder* logBuilder) const {
+ return Status::OK();
+}
- mutablebson::Element logElement = logBuilder->getDocument().makeElementSafeNum(
- _fieldRef.dottedField(),
- _preparedState->newValue);
+Status ModifierBit::apply() const {
+ dassert(_preparedState->noOp == false);
- if (!logElement.ok()) {
- return Status(ErrorCodes::InternalError,
- str::stream() << "Could not append entry to $bit oplog entry: "
- << "set '" << _fieldRef.dottedField() << "' -> "
- << _preparedState->newValue.debugString() );
- }
- return logBuilder->addToSets(logElement);
+ // If there's no need to create any further field part, the $bit is simply a value
+ // assignment.
+ if (_preparedState->elemFound.ok() && _preparedState->idxFound == (_fieldRef.numParts() - 1)) {
+ return _preparedState->elemFound.setValueSafeNum(_preparedState->newValue);
+ }
+ //
+ // Complete document path logic
+ //
+
+ // Creates the final element that's going to be $set in 'doc'.
+ mutablebson::Document& doc = _preparedState->doc;
+ StringData lastPart = _fieldRef.getPart(_fieldRef.numParts() - 1);
+ mutablebson::Element elemToSet = doc.makeElementSafeNum(lastPart, _preparedState->newValue);
+ if (!elemToSet.ok()) {
+ return Status(ErrorCodes::InternalError, "can't create new element");
}
- SafeNum ModifierBit::apply(SafeNum value) const {
- OpEntries::const_iterator where = _ops.begin();
- const OpEntries::const_iterator end = _ops.end();
- for (; where != end; ++where)
- value = (value.*(where->op))(where->val);
- return value;
+ // Now, we can be in two cases here, as far as attaching the element being set goes:
+ // (a) none of the parts in the element's path exist, or (b) some parts of the path
+ // exist but not all.
+ if (!_preparedState->elemFound.ok()) {
+ _preparedState->elemFound = doc.root();
+ _preparedState->idxFound = 0;
+ } else {
+ _preparedState->idxFound++;
}
-} // namespace mongo
+ // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
+ return pathsupport::createPathAt(
+ _fieldRef, _preparedState->idxFound, _preparedState->elemFound, elemToSet);
+}
+
+Status ModifierBit::log(LogBuilder* logBuilder) const {
+ mutablebson::Element logElement = logBuilder->getDocument().makeElementSafeNum(
+ _fieldRef.dottedField(), _preparedState->newValue);
+
+ if (!logElement.ok()) {
+ return Status(ErrorCodes::InternalError,
+ str::stream() << "Could not append entry to $bit oplog entry: "
+ << "set '" << _fieldRef.dottedField() << "' -> "
+ << _preparedState->newValue.debugString());
+ }
+ return logBuilder->addToSets(logElement);
+}
+
+SafeNum ModifierBit::apply(SafeNum value) const {
+ OpEntries::const_iterator where = _ops.begin();
+ const OpEntries::const_iterator end = _ops.end();
+ for (; where != end; ++where)
+ value = (value.*(where->op))(where->val);
+ return value;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_bit.h b/src/mongo/db/ops/modifier_bit.h
index 812ac8f41e1..7ecc416a42d 100644
--- a/src/mongo/db/ops/modifier_bit.h
+++ b/src/mongo/db/ops/modifier_bit.h
@@ -39,61 +39,57 @@
namespace mongo {
- class LogBuilder;
+class LogBuilder;
- class ModifierBit : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierBit);
+class ModifierBit : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierBit);
- public:
+public:
+ ModifierBit();
+ virtual ~ModifierBit();
- ModifierBit();
- virtual ~ModifierBit();
+ /**
+ * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $bit mod such as
+ * {$bit: {<field: { [and|or] : <value>}}. init() extracts the field name, the
+ * operation subtype, and the value to be assigned to it from 'modExpr'. It returns OK
+ * if successful or a status describing the error.
+ */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
- /**
- * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $bit mod such as
- * {$bit: {<field: { [and|or] : <value>}}. init() extracts the field name, the
- * operation subtype, and the value to be assigned to it from 'modExpr'. It returns OK
- * if successful or a status describing the error.
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
+ /** Validates the potential application of the init'ed mod to the given Element and
+ * configures the internal state of the mod as necessary.
+ */
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
- /** Validates the potential application of the init'ed mod to the given Element and
- * configures the internal state of the mod as necessary.
- */
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
+ /** Updates the Element used in prepare with the effects of the $bit operation */
+ virtual Status apply() const;
- /** Updates the Element used in prepare with the effects of the $bit operation */
- virtual Status apply() const;
+ /** Converts the effects of this $bit into an equivalent $set */
+ virtual Status log(LogBuilder* logBuilder) const;
- /** Converts the effects of this $bit into an equivalent $set */
- virtual Status log(LogBuilder* logBuilder) const;
+private:
+ SafeNum apply(SafeNum value) const;
- private:
- SafeNum apply(SafeNum value) const;
+ // Access to each component of fieldName that's the target of this mod.
+ FieldRef _fieldRef;
- // Access to each component of fieldName that's the target of this mod.
- FieldRef _fieldRef;
+ // 0 or index for $-positional in _fieldRef.
+ size_t _posDollar;
- // 0 or index for $-positional in _fieldRef.
- size_t _posDollar;
+ // The operator on SafeNum that we will invoke.
+ typedef SafeNum (SafeNum::*SafeNumOp)(const SafeNum&) const;
- // The operator on SafeNum that we will invoke.
- typedef SafeNum (SafeNum::* SafeNumOp)(const SafeNum&) const;
-
- struct OpEntry {
- SafeNum val;
- SafeNumOp op;
- };
+ struct OpEntry {
+ SafeNum val;
+ SafeNumOp op;
+ };
- typedef std::vector<OpEntry> OpEntries;
+ typedef std::vector<OpEntry> OpEntries;
- OpEntries _ops;
+ OpEntries _ops;
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
- };
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_bit_test.cpp b/src/mongo/db/ops/modifier_bit_test.cpp
index 3d19808757e..59e13f89e24 100644
--- a/src/mongo/db/ops/modifier_bit_test.cpp
+++ b/src/mongo/db/ops/modifier_bit_test.cpp
@@ -40,697 +40,693 @@
namespace {
- using mongo::BSONObj;
- using mongo::LogBuilder;
- using mongo::ModifierBit;
- using mongo::ModifierInterface;
- using mongo::Status;
- using mongo::StringData;
- using mongo::fromjson;
- using mongo::mutablebson::ConstElement;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
-
- /** Helper to build and manipulate a $bit mod. */
- class Mod {
- public:
- Mod() : _mod() {}
-
- explicit Mod(BSONObj modObj)
- : _modObj(modObj)
- , _mod() {
- ASSERT_OK(_mod.init(_modObj["$bit"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- }
-
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
-
- Status apply() const {
- return _mod.apply();
- }
-
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
-
- ModifierBit& mod() { return _mod; }
-
- private:
- BSONObj _modObj;
- ModifierBit _mod;
- };
-
-
- TEST(Init, FailToInitWithInvalidValue) {
- BSONObj modObj;
- ModifierBit mod;
-
- // String is an invalid $bit argument
- modObj = fromjson("{ $bit : { a : '' } }");
- ASSERT_NOT_OK(mod.init(modObj["$bit"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // Array is an invalid $bit argument
- modObj = fromjson("{ $bit : { a : [] } }");
- ASSERT_NOT_OK(mod.init(modObj["$bit"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // An object with value not in ('and', 'or') is an invalid $bit argument
- modObj = fromjson("{ $bit : { a : { foo : 4 } } }");
- ASSERT_NOT_OK(mod.init(modObj["$bit"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // The argument to the sub-operator must be numeric
- modObj = fromjson("{ $bit : { a : { or : [] } } }");
- ASSERT_NOT_OK(mod.init(modObj["$bit"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- modObj = fromjson("{ $bit : { a : { or : 'foo' } } }");
- ASSERT_NOT_OK(mod.init(modObj["$bit"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // The argument to the sub-operator must be integral
- modObj = fromjson("{ $bit : { a : { or : 1.0 } } }");
- ASSERT_NOT_OK(mod.init(modObj["$bit"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+using mongo::BSONObj;
+using mongo::LogBuilder;
+using mongo::ModifierBit;
+using mongo::ModifierInterface;
+using mongo::Status;
+using mongo::StringData;
+using mongo::fromjson;
+using mongo::mutablebson::ConstElement;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
- TEST(Init, ParsesAndInt) {
- Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<int>(1)))));
- }
+/** Helper to build and manipulate a $bit mod. */
+class Mod {
+public:
+ Mod() : _mod() {}
- TEST(Init, ParsesOrInt) {
- Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<int>(1)))));
+ explicit Mod(BSONObj modObj) : _modObj(modObj), _mod() {
+ ASSERT_OK(_mod.init(_modObj["$bit"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- TEST(Init, ParsesXorInt) {
- Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<int>(1)))));
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
}
- TEST(Init, ParsesAndLong) {
- Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<long long>(1)))));
+ Status apply() const {
+ return _mod.apply();
}
- TEST(Init, ParsesOrLong) {
- Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<long long>(1)))));
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
}
- TEST(Init, ParsesXorLong) {
- Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<long long>(1)))));
+ ModifierBit& mod() {
+ return _mod;
}
- TEST(SimpleMod, PrepareOKTargetNotFound) {
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{ $bit : { a : { and : 1 } } }"));
+private:
+ BSONObj _modObj;
+ ModifierBit _mod;
+};
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- }
+TEST(Init, FailToInitWithInvalidValue) {
+ BSONObj modObj;
+ ModifierBit mod;
- TEST(SimpleMod, PrepareOKTargetFound) {
- Document doc(fromjson("{ a : 1 }"));
- Mod mod(fromjson("{ $bit : { a : { and : 1 } } }"));
+ // String is an invalid $bit argument
+ modObj = fromjson("{ $bit : { a : '' } }");
+ ASSERT_NOT_OK(mod.init(modObj["$bit"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- ModifierInterface::ExecInfo execInfo;
+ // Array is an invalid $bit argument
+ modObj = fromjson("{ $bit : { a : [] } }");
+ ASSERT_NOT_OK(mod.init(modObj["$bit"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
+ // An object with value not in ('and', 'or') is an invalid $bit argument
+ modObj = fromjson("{ $bit : { a : { foo : 4 } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$bit"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 1 } }"), logDoc);
- }
+ // The argument to the sub-operator must be numeric
+ modObj = fromjson("{ $bit : { a : { or : [] } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$bit"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- TEST(SimpleMod, PrepareSimpleNonNumericObject) {
- Document doc(fromjson("{ a : {} }"));
- Mod mod(fromjson("{ $bit : { a : { or : 1 } } }"));
+ modObj = fromjson("{ $bit : { a : { or : 'foo' } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$bit"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
- }
+ // The argument to the sub-operator must be integral
+ modObj = fromjson("{ $bit : { a : { or : 1.0 } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$bit"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(SimpleMod, PrepareSimpleNonNumericArray) {
+TEST(Init, ParsesAndInt) {
+ Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<int>(1)))));
+}
- Document doc(fromjson("{ a : [] }"));
- Mod mod(fromjson("{ $bit : { a : { and : 1 } } }"));
+TEST(Init, ParsesOrInt) {
+ Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<int>(1)))));
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
- }
+TEST(Init, ParsesXorInt) {
+ Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<int>(1)))));
+}
- TEST(SimpleMod, PrepareSimpleNonNumericString) {
- Document doc(fromjson("{ a : '' }"));
- Mod mod(fromjson("{ $bit : { a : { or : 1 } } }"));
+TEST(Init, ParsesAndLong) {
+ Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<long long>(1)))));
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
- }
+TEST(Init, ParsesOrLong) {
+ Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<long long>(1)))));
+}
- TEST(SimpleMod, ApplyAndLogEmptyDocumentAnd) {
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{ $bit : { a : { and : 1 } } }"));
+TEST(Init, ParsesXorLong) {
+ Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<long long>(1)))));
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
+TEST(SimpleMod, PrepareOKTargetNotFound) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{ $bit : { a : { and : 1 } } }"));
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 0 }"), doc);
+ ModifierInterface::ExecInfo execInfo;
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 0 } }"), logDoc);
- }
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+}
- TEST(SimpleMod, ApplyAndLogEmptyDocumentOr) {
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{ $bit : { a : { or : 1 } } }"));
+TEST(SimpleMod, PrepareOKTargetFound) {
+ Document doc(fromjson("{ a : 1 }"));
+ Mod mod(fromjson("{ $bit : { a : { and : 1 } } }"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 1 } }"), logDoc);
- }
-
- TEST(SimpleMod, ApplyAndLogEmptyDocumentXor) {
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{ $bit : { a : { xor : 1 } } }"));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 1 } }"), logDoc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
+TEST(SimpleMod, PrepareSimpleNonNumericObject) {
+ Document doc(fromjson("{ a : {} }"));
+ Mod mod(fromjson("{ $bit : { a : { or : 1 } } }"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(SimpleMod, PrepareSimpleNonNumericArray) {
+ Document doc(fromjson("{ a : [] }"));
+ Mod mod(fromjson("{ $bit : { a : { and : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(SimpleMod, PrepareSimpleNonNumericString) {
+ Document doc(fromjson("{ a : '' }"));
+ Mod mod(fromjson("{ $bit : { a : { or : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(SimpleMod, ApplyAndLogEmptyDocumentAnd) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{ $bit : { a : { and : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 0 }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 0 } }"), logDoc);
+}
+
+TEST(SimpleMod, ApplyAndLogEmptyDocumentOr) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{ $bit : { a : { or : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 1 } }"), logDoc);
+}
+
+TEST(SimpleMod, ApplyAndLogEmptyDocumentXor) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{ $bit : { a : { xor : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 1 } }"), logDoc);
+}
+
+TEST(SimpleMod, ApplyAndLogSimpleDocumentAnd) {
+ Document doc(fromjson("{ a : 5 }"));
+ Mod mod(fromjson("{ $bit : { a : { and : 6 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 4 }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 4 } }"), logDoc);
+}
+
+TEST(SimpleMod, ApplyAndLogSimpleDocumentOr) {
+ Document doc(fromjson("{ a : 5 }"));
+ Mod mod(fromjson("{ $bit : { a : { or : 6 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 7 }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 7 } }"), logDoc);
+}
+
+TEST(SimpleMod, ApplyAndLogSimpleDocumentXor) {
+ Document doc(fromjson("{ a : 5 }"));
+ Mod mod(fromjson("{ $bit : { a : { xor : 6 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 3 }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 3 } }"), logDoc);
+}
+
+TEST(InPlace, IntToIntAndIsInPlace) {
+ Document doc(BSON("a" << static_cast<int>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<int>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(1))), logDoc);
+}
+
+TEST(InPlace, IntToIntOrIsInPlace) {
+ Document doc(BSON("a" << static_cast<int>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<int>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(1))), logDoc);
+}
+
+TEST(InPlace, IntToIntXorIsInPlace) {
+ Document doc(BSON("a" << static_cast<int>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<int>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(0))), logDoc);
+}
+
+TEST(InPlace, LongToLongAndIsInPlace) {
+ Document doc(BSON("a" << static_cast<long long>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<long long>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<long long>(1))), logDoc);
+}
+
+TEST(InPlace, LongToLongOrIsInPlace) {
+ Document doc(BSON("a" << static_cast<long long>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<long long>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<long long>(1))), logDoc);
+}
+
+TEST(InPlace, LongToLongXorIsInPlace) {
+ Document doc(BSON("a" << static_cast<long long>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<long long>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<long long>(0))), logDoc);
+}
+
+TEST(InPlace, IntToLongAndIsNotInPlace) {
+ Document doc(BSON("a" << static_cast<int>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<long long>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+}
+
+TEST(InPlace, IntToLongOrIsNotInPlace) {
+ Document doc(BSON("a" << static_cast<int>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<long long>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+}
+
+TEST(InPlace, IntToLongXorIsNotInPlace) {
+ Document doc(BSON("a" << static_cast<int>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<long long>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+}
+
+TEST(NoOp, IntAnd) {
+ Document doc(BSON("a" << static_cast<int>(0xABCD1234U)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<int>(0xFFFFFFFFU)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(0xABCD1234U))), logDoc);
+}
+
+TEST(NoOp, IntOr) {
+ Document doc(BSON("a" << static_cast<int>(0xABCD1234U)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<int>(0x0U)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(0xABCD1234U))), logDoc);
+}
+
+TEST(NoOp, IntXor) {
+ Document doc(BSON("a" << static_cast<int>(0xABCD1234U)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<int>(0x0U)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(0xABCD1234U))), logDoc);
+}
+
+TEST(NoOp, LongAnd) {
+ Document doc(BSON("a" << static_cast<long long>(0xABCD1234EF981234ULL)));
+ Mod mod(
+ BSON("$bit" << BSON("a" << BSON("and" << static_cast<long long>(0xFFFFFFFFFFFFFFFFULL)))));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<long long>(0xABCD1234EF981234ULL))),
+ logDoc);
+}
+
+TEST(NoOp, LongOr) {
+ Document doc(BSON("a" << static_cast<long long>(0xABCD1234EF981234ULL)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<long long>(0x0ULL)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<long long>(0xABCD1234EF981234ULL))),
+ logDoc);
+}
+
+TEST(NoOp, LongXor) {
+ Document doc(BSON("a" << static_cast<long long>(0xABCD1234EF981234ULL)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<long long>(0x0ULL)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<long long>(0xABCD1234EF981234ULL))),
+ logDoc);
+}
+
+TEST(Upcasting, UpcastIntToLongAnd) {
+ Document doc(BSON("a" << static_cast<int>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<long long>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
+ ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
+}
+
+TEST(Upcasting, UpcastIntToLongOr) {
+ Document doc(BSON("a" << static_cast<int>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<long long>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
+ ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
+}
+
+TEST(Upcasting, UpcastIntToLongXor) {
+ Document doc(BSON("a" << static_cast<int>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<long long>(0)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
+ ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
+}
+
+TEST(Upcasting, LongsStayLongsAnd) {
+ Document doc(BSON("a" << static_cast<long long>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<int>(2)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 0 }"), doc);
+ ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
+}
+
+TEST(Upcasting, LongsStayLongsOr) {
+ Document doc(BSON("a" << static_cast<long long>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<int>(2)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 3 }"), doc);
+ ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
+}
+
+TEST(Upcasting, LongsStayLongsXor) {
+ Document doc(BSON("a" << static_cast<long long>(1)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<int>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 0 }"), doc);
+ ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
+}
+
+// The following tests are re-created from the previous $bit tests in updatetests.cpp. They
+// are probably redundant with the tests above in various ways.
+
+TEST(DbUpdateTests, BitRewriteExistingField) {
+ Document doc(BSON("a" << static_cast<int>(0)));
+ Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<int>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(BSON("a" << static_cast<int>(1)), doc);
+ ASSERT_EQUALS(mongo::NumberInt, doc.root()["a"].getType());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(1))), logDoc);
+}
+
+TEST(DbUpdateTests, BitRewriteNonExistingField) {
+ Document doc(BSON("a" << static_cast<int>(0)));
+ Mod mod(BSON("$bit" << BSON("b" << BSON("or" << static_cast<int>(1)))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(BSON("a" << static_cast<int>(0) << "b" << static_cast<int>(1)), doc);
+ ASSERT_EQUALS(mongo::NumberInt, doc.root()["a"].getType());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("b" << static_cast<int>(1))), logDoc);
+}
+
+TEST(DbUpdateTests, Bit1_1) {
+ Document doc(BSON("_id" << 1 << "x" << 3));
+ Mod mod(BSON("$bit" << BSON("x" << BSON("and" << 2))));
+ const BSONObj result(BSON("_id" << 1 << "x" << (3 & 2)));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ if (!execInfo.noOp)
ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 1 } }"), logDoc);
- }
+ ASSERT_EQUALS(result, doc);
- TEST(SimpleMod, ApplyAndLogSimpleDocumentAnd) {
- Document doc(fromjson("{ a : 5 }"));
- Mod mod(fromjson("{ $bit : { a : { and : 6 } } }"));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("x" << (3 & 2))), logDoc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
+TEST(DbUpdateTests, Bit1_2) {
+ Document doc(BSON("_id" << 1 << "x" << 1));
+ Mod mod(BSON("$bit" << BSON("x" << BSON("or" << 4))));
+ const BSONObj result(BSON("_id" << 1 << "x" << (1 | 4)));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ if (!execInfo.noOp)
ASSERT_OK(mod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 4 }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 4 } }"), logDoc);
- }
-
- TEST(SimpleMod, ApplyAndLogSimpleDocumentOr) {
- Document doc(fromjson("{ a : 5 }"));
- Mod mod(fromjson("{ $bit : { a : { or : 6 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS(result, doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("x" << (1 | 4))), logDoc);
+}
+
+TEST(DbUpdateTests, Bit1_3) {
+ Document doc(BSON("_id" << 1 << "x" << 3));
+ Mod mod1(BSON("$bit" << BSON("x" << BSON("and" << 2))));
+ Mod mod2(BSON("$bit" << BSON("x" << BSON("or" << 8))));
+ const BSONObj result(BSON("_id" << 1 << "x" << ((3 & 2) | 8)));
+
+ ModifierInterface::ExecInfo execInfo1;
+ ASSERT_OK(mod1.prepare(doc.root(), "", &execInfo1));
+ if (!execInfo1.noOp)
+ ASSERT_OK(mod1.apply());
+
+ ModifierInterface::ExecInfo execInfo2;
+ ASSERT_OK(mod2.prepare(doc.root(), "", &execInfo2));
+ if (!execInfo2.noOp)
+ ASSERT_OK(mod2.apply());
+
+ ASSERT_EQUALS(result, doc);
+}
+
+TEST(DbUpdateTests, Bit1_3_Combined) {
+ Document doc(BSON("_id" << 1 << "x" << 3));
+ Mod mod(BSON("$bit" << BSON("x" << BSON("and" << 2 << "or" << 8))));
+ const BSONObj result(BSON("_id" << 1 << "x" << ((3 & 2) | 8)));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ if (!execInfo.noOp)
ASSERT_OK(mod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 7 }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 7 } }"), logDoc);
- }
-
- TEST(SimpleMod, ApplyAndLogSimpleDocumentXor) {
- Document doc(fromjson("{ a : 5 }"));
- Mod mod(fromjson("{ $bit : { a : { xor : 6 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS(result, doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("x" << ((3 & 2) | 8))), logDoc);
+}
+
+TEST(DbUpdateTests, Bit1_4) {
+ Document doc(BSON("_id" << 1 << "x" << 3));
+ Mod mod1(BSON("$bit" << BSON("x" << BSON("or" << 2))));
+ Mod mod2(BSON("$bit" << BSON("x" << BSON("and" << 8))));
+ const BSONObj result(BSON("_id" << 1 << "x" << ((3 | 2) & 8)));
+
+ ModifierInterface::ExecInfo execInfo1;
+ ASSERT_OK(mod1.prepare(doc.root(), "", &execInfo1));
+ if (!execInfo1.noOp)
+ ASSERT_OK(mod1.apply());
+
+ ModifierInterface::ExecInfo execInfo2;
+ ASSERT_OK(mod2.prepare(doc.root(), "", &execInfo2));
+ if (!execInfo2.noOp)
+ ASSERT_OK(mod2.apply());
+
+ ASSERT_EQUALS(result, doc);
+}
+
+TEST(DbUpdateTests, Bit1_4_Combined) {
+ Document doc(BSON("_id" << 1 << "x" << 3));
+ Mod mod(BSON("$bit" << BSON("x" << BSON("or" << 2 << "and" << 8))));
+ const BSONObj result(BSON("_id" << 1 << "x" << ((3 | 2) & 8)));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ if (!execInfo.noOp)
ASSERT_OK(mod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 3 }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 3 } }"), logDoc);
- }
-
- TEST(InPlace, IntToIntAndIsInPlace) {
- Document doc(BSON("a" << static_cast<int>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<int>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(1))), logDoc);
- }
-
- TEST(InPlace, IntToIntOrIsInPlace) {
- Document doc(BSON("a" << static_cast<int>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<int>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(1))), logDoc);
- }
-
- TEST(InPlace, IntToIntXorIsInPlace) {
- Document doc(BSON("a" << static_cast<int>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<int>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(0))), logDoc);
- }
-
- TEST(InPlace, LongToLongAndIsInPlace) {
- Document doc(BSON("a" << static_cast<long long>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<long long>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<long long>(1))), logDoc);
- }
-
- TEST(InPlace, LongToLongOrIsInPlace) {
- Document doc(BSON("a" << static_cast<long long>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<long long>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<long long>(1))), logDoc);
- }
-
- TEST(InPlace, LongToLongXorIsInPlace) {
- Document doc(BSON("a" << static_cast<long long>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<long long>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<long long>(0))), logDoc);
- }
-
- TEST(InPlace, IntToLongAndIsNotInPlace) {
- Document doc(BSON("a" << static_cast<int>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<long long>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- }
-
- TEST(InPlace, IntToLongOrIsNotInPlace) {
- Document doc(BSON("a" << static_cast<int>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<long long>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- }
-
- TEST(InPlace, IntToLongXorIsNotInPlace) {
- Document doc(BSON("a" << static_cast<int>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<long long>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- }
-
- TEST(NoOp, IntAnd) {
- Document doc(BSON("a" << static_cast<int>(0xABCD1234U)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<int>(0xFFFFFFFFU)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(0xABCD1234U))), logDoc);
- }
-
- TEST(NoOp, IntOr) {
- Document doc(BSON("a" << static_cast<int>(0xABCD1234U)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<int>(0x0U)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(0xABCD1234U))), logDoc);
- }
-
- TEST(NoOp, IntXor) {
- Document doc(BSON("a" << static_cast<int>(0xABCD1234U)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<int>(0x0U)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(0xABCD1234U))), logDoc);
- }
-
- TEST(NoOp, LongAnd) {
- Document doc(BSON("a" << static_cast<long long>(0xABCD1234EF981234ULL)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("and" <<
- static_cast<long long>(0xFFFFFFFFFFFFFFFFULL)))));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" <<
- static_cast<long long>(0xABCD1234EF981234ULL))), logDoc);
- }
-
- TEST(NoOp, LongOr) {
- Document doc(BSON("a" << static_cast<long long>(0xABCD1234EF981234ULL)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<long long>(0x0ULL)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" <<
- static_cast<long long>(0xABCD1234EF981234ULL))), logDoc);
- }
- TEST(NoOp, LongXor) {
- Document doc(BSON("a" << static_cast<long long>(0xABCD1234EF981234ULL)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<long long>(0x0ULL)))));
+ ASSERT_EQUALS(result, doc);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" <<
- static_cast<long long>(0xABCD1234EF981234ULL))), logDoc);
- }
-
- TEST(Upcasting, UpcastIntToLongAnd) {
- Document doc(BSON("a" << static_cast<int>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<long long>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
- ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
- }
-
- TEST(Upcasting, UpcastIntToLongOr) {
- Document doc(BSON("a" << static_cast<int>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<long long>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
- ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
- }
-
- TEST(Upcasting, UpcastIntToLongXor) {
- Document doc(BSON("a" << static_cast<int>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<long long>(0)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
- ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
- }
-
- TEST(Upcasting, LongsStayLongsAnd) {
- Document doc(BSON("a" << static_cast<long long>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("and" << static_cast<int>(2)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 0 }"), doc);
- ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
- }
-
- TEST(Upcasting, LongsStayLongsOr) {
- Document doc(BSON("a" << static_cast<long long>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<int>(2)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 3 }"), doc);
- ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
- }
-
- TEST(Upcasting, LongsStayLongsXor) {
- Document doc(BSON("a" << static_cast<long long>(1)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("xor" << static_cast<int>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 0 }"), doc);
- ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
- }
-
- // The following tests are re-created from the previous $bit tests in updatetests.cpp. They
- // are probably redundant with the tests above in various ways.
-
- TEST(DbUpdateTests, BitRewriteExistingField) {
- Document doc(BSON("a" << static_cast<int>(0)));
- Mod mod(BSON("$bit" << BSON("a" << BSON("or" << static_cast<int>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(BSON("a" << static_cast<int>(1)), doc);
- ASSERT_EQUALS(mongo::NumberInt, doc.root()["a"].getType());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("a" << static_cast<int>(1))), logDoc);
- }
-
- TEST(DbUpdateTests, BitRewriteNonExistingField) {
- Document doc(BSON("a" << static_cast<int>(0)));
- Mod mod(BSON("$bit" << BSON("b" << BSON("or" << static_cast<int>(1)))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(BSON("a" << static_cast<int>(0) << "b" << static_cast<int>(1)), doc);
- ASSERT_EQUALS(mongo::NumberInt, doc.root()["a"].getType());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("b" << static_cast<int>(1))), logDoc);
- }
-
- TEST(DbUpdateTests, Bit1_1) {
- Document doc(BSON("_id" << 1 << "x" << 3));
- Mod mod(BSON("$bit" << BSON("x" << BSON("and" << 2))));
- const BSONObj result(BSON("_id" << 1 << "x" << (3 & 2)));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- if (!execInfo.noOp)
- ASSERT_OK(mod.apply());
-
- ASSERT_EQUALS(result, doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("x" << (3 & 2))), logDoc);
- }
-
- TEST(DbUpdateTests, Bit1_2) {
- Document doc(BSON("_id" << 1 << "x" << 1));
- Mod mod(BSON("$bit" << BSON("x" << BSON("or" << 4))));
- const BSONObj result(BSON("_id" << 1 << "x" << (1 | 4)));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- if (!execInfo.noOp)
- ASSERT_OK(mod.apply());
-
- ASSERT_EQUALS(result, doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("x" << (1 | 4))), logDoc);
- }
-
- TEST(DbUpdateTests, Bit1_3) {
- Document doc(BSON("_id" << 1 << "x" << 3));
- Mod mod1(BSON("$bit" << BSON("x" << BSON("and" << 2))));
- Mod mod2(BSON("$bit" << BSON("x" << BSON("or" << 8))));
- const BSONObj result(BSON("_id" << 1 << "x" << ((3 & 2) | 8)));
-
- ModifierInterface::ExecInfo execInfo1;
- ASSERT_OK(mod1.prepare(doc.root(), "", &execInfo1));
- if (!execInfo1.noOp)
- ASSERT_OK(mod1.apply());
-
- ModifierInterface::ExecInfo execInfo2;
- ASSERT_OK(mod2.prepare(doc.root(), "", &execInfo2));
- if (!execInfo2.noOp)
- ASSERT_OK(mod2.apply());
-
- ASSERT_EQUALS(result, doc);
- }
-
- TEST(DbUpdateTests, Bit1_3_Combined) {
- Document doc(BSON("_id" << 1 << "x" << 3));
- Mod mod(BSON("$bit" << BSON("x" << BSON("and" << 2 << "or" << 8))));
- const BSONObj result(BSON("_id" << 1 << "x" << ((3 & 2) | 8)));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- if (!execInfo.noOp)
- ASSERT_OK(mod.apply());
-
- ASSERT_EQUALS(result, doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("x" << ((3 & 2) | 8))), logDoc);
- }
-
- TEST(DbUpdateTests, Bit1_4) {
- Document doc(BSON("_id" << 1 << "x" << 3));
- Mod mod1(BSON("$bit" << BSON("x" << BSON("or" << 2))));
- Mod mod2(BSON("$bit" << BSON("x" << BSON("and" << 8))));
- const BSONObj result(BSON("_id" << 1 << "x" << ((3 | 2) & 8)));
-
- ModifierInterface::ExecInfo execInfo1;
- ASSERT_OK(mod1.prepare(doc.root(), "", &execInfo1));
- if (!execInfo1.noOp)
- ASSERT_OK(mod1.apply());
-
- ModifierInterface::ExecInfo execInfo2;
- ASSERT_OK(mod2.prepare(doc.root(), "", &execInfo2));
- if (!execInfo2.noOp)
- ASSERT_OK(mod2.apply());
-
- ASSERT_EQUALS(result, doc);
- }
-
- TEST(DbUpdateTests, Bit1_4_Combined) {
- Document doc(BSON("_id" << 1 << "x" << 3));
- Mod mod(BSON("$bit" << BSON("x" << BSON("or" << 2 << "and" << 8))));
- const BSONObj result(BSON("_id" << 1 << "x" << ((3 | 2) & 8)));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- if (!execInfo.noOp)
- ASSERT_OK(mod.apply());
-
- ASSERT_EQUALS(result, doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(BSON("$set" << BSON("x" << ((3 | 2) & 8))), logDoc);
- }
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(BSON("$set" << BSON("x" << ((3 | 2) & 8))), logDoc);
+}
-} // namespace
+} // namespace
diff --git a/src/mongo/db/ops/modifier_compare.cpp b/src/mongo/db/ops/modifier_compare.cpp
index 6d37e4b2797..36f800202e4 100644
--- a/src/mongo/db/ops/modifier_compare.cpp
+++ b/src/mongo/db/ops/modifier_compare.cpp
@@ -37,156 +37,137 @@
namespace mongo {
- namespace str = mongoutils::str;
+namespace str = mongoutils::str;
- struct ModifierCompare::PreparedState {
+struct ModifierCompare::PreparedState {
+ PreparedState(mutablebson::Document& targetDoc)
+ : doc(targetDoc), idxFound(0), elemFound(doc.end()) {}
- PreparedState(mutablebson::Document& targetDoc)
- : doc(targetDoc)
- , idxFound(0)
- , elemFound(doc.end()) {
- }
+ // Document that is going to be changed.
+ mutablebson::Document& doc;
- // Document that is going to be changed.
- mutablebson::Document& doc;
+ // Index in _fieldRef for which an Element exist in the document.
+ size_t idxFound;
- // Index in _fieldRef for which an Element exist in the document.
- size_t idxFound;
+ // Element corresponding to _fieldRef[0.._idxFound].
+ mutablebson::Element elemFound;
+};
- // Element corresponding to _fieldRef[0.._idxFound].
- mutablebson::Element elemFound;
- };
+ModifierCompare::ModifierCompare(ModifierCompare::ModifierCompareMode mode)
+ : _mode(mode), _pathReplacementPosition(0) {}
- ModifierCompare::ModifierCompare(ModifierCompare::ModifierCompareMode mode)
- : _mode(mode)
- , _pathReplacementPosition(0) {
- }
+ModifierCompare::~ModifierCompare() {}
- ModifierCompare::~ModifierCompare() {
+Status ModifierCompare::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
+ _updatePath.parse(modExpr.fieldName());
+ Status status = fieldchecker::isUpdatable(_updatePath);
+ if (!status.isOK()) {
+ return status;
}
- Status ModifierCompare::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
-
- _updatePath.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatable(_updatePath);
- if (!status.isOK()) {
- return status;
- }
+ // If a $-positional operator was used, get the index in which it occurred
+ // and ensure only one occurrence.
+ size_t foundCount;
+ bool foundDollar =
+ fieldchecker::isPositional(_updatePath, &_pathReplacementPosition, &foundCount);
- // If a $-positional operator was used, get the index in which it occurred
- // and ensure only one occurrence.
- size_t foundCount;
- bool foundDollar = fieldchecker::isPositional(
- _updatePath, &_pathReplacementPosition, &foundCount);
-
- if (positional)
- *positional = foundDollar;
-
- if (foundDollar && foundCount > 1) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _updatePath.dottedField() << "'");
- }
+ if (positional)
+ *positional = foundDollar;
- // Store value for later.
- _val = modExpr;
- return Status::OK();
+ if (foundDollar && foundCount > 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Too many positional (i.e. '$') elements found in path '"
+ << _updatePath.dottedField() << "'");
}
- Status ModifierCompare::prepare(mutablebson::Element root,
+ // Store value for later.
+ _val = modExpr;
+ return Status::OK();
+}
+
+Status ModifierCompare::prepare(mutablebson::Element root,
StringData matchedField,
ExecInfo* execInfo) {
+ _preparedState.reset(new PreparedState(root.getDocument()));
- _preparedState.reset(new PreparedState(root.getDocument()));
-
- // If we have a $-positional field, it is time to bind it to an actual field part.
- if (_pathReplacementPosition) {
- if (matchedField.empty()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The positional operator did not find the match "
- "needed from the query. Unexpanded update: "
- << _updatePath.dottedField());
- }
- _updatePath.setPart(_pathReplacementPosition, matchedField);
- }
-
- // Locate the field name in 'root'. Note that we may not have all the parts in the path
- // in the doc -- which is fine. Our goal now is merely to reason about whether this mod
- // apply is a noOp or whether is can be in place. The remaining path, if missing, will
- // be created during the apply.
- Status status = pathsupport::findLongestPrefix(_updatePath,
- root,
- &_preparedState->idxFound,
- &_preparedState->elemFound);
-
- // FindLongestPrefix may say the path does not exist at all, which is fine here, or
- // that the path was not viable or otherwise wrong, in which case, the mod cannot
- // proceed.
- if (status.code() == ErrorCodes::NonExistentPath) {
- _preparedState->elemFound = root.getDocument().end();
- }
- else if (!status.isOK()) {
- return status;
- }
-
- // We register interest in the field name. The driver needs this info to sort out if
- // there is any conflict among mods.
- execInfo->fieldRef[0] = &_updatePath;
-
- const bool destExists = (_preparedState->elemFound.ok() &&
- _preparedState->idxFound == (_updatePath.numParts() - 1));
- if (!destExists) {
- execInfo->noOp = false;
- }
- else {
- const int compareVal = _preparedState->elemFound.compareWithBSONElement(_val, false);
- execInfo->noOp = (compareVal == 0) ||
- ((_mode == ModifierCompare::MAX) ?
- (compareVal > 0) : (compareVal < 0));
+ // If we have a $-positional field, it is time to bind it to an actual field part.
+ if (_pathReplacementPosition) {
+ if (matchedField.empty()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The positional operator did not find the match "
+ "needed from the query. Unexpanded update: "
+ << _updatePath.dottedField());
}
+ _updatePath.setPart(_pathReplacementPosition, matchedField);
+ }
- return Status::OK();
+ // Locate the field name in 'root'. Note that we may not have all the parts in the path
+ // in the doc -- which is fine. Our goal now is merely to reason about whether this mod
+ // apply is a noOp or whether is can be in place. The remaining path, if missing, will
+ // be created during the apply.
+ Status status = pathsupport::findLongestPrefix(
+ _updatePath, root, &_preparedState->idxFound, &_preparedState->elemFound);
+
+ // FindLongestPrefix may say the path does not exist at all, which is fine here, or
+ // that the path was not viable or otherwise wrong, in which case, the mod cannot
+ // proceed.
+ if (status.code() == ErrorCodes::NonExistentPath) {
+ _preparedState->elemFound = root.getDocument().end();
+ } else if (!status.isOK()) {
+ return status;
}
- Status ModifierCompare::apply() const {
+ // We register interest in the field name. The driver needs this info to sort out if
+ // there is any conflict among mods.
+ execInfo->fieldRef[0] = &_updatePath;
+
+ const bool destExists = (_preparedState->elemFound.ok() &&
+ _preparedState->idxFound == (_updatePath.numParts() - 1));
+ if (!destExists) {
+ execInfo->noOp = false;
+ } else {
+ const int compareVal = _preparedState->elemFound.compareWithBSONElement(_val, false);
+ execInfo->noOp = (compareVal == 0) ||
+ ((_mode == ModifierCompare::MAX) ? (compareVal > 0) : (compareVal < 0));
+ }
- const bool destExists = (_preparedState->elemFound.ok() &&
- _preparedState->idxFound == (_updatePath.numParts() - 1));
- // If there's no need to create any further field part, the $set is simply a value
- // assignment.
- if (destExists) {
- return _preparedState->elemFound.setValueBSONElement(_val);
- }
+ return Status::OK();
+}
- mutablebson::Document& doc = _preparedState->doc;
- StringData lastPart = _updatePath.getPart(_updatePath.numParts() - 1);
- // If the element exists and is the same type, then that is what we want to work with
- mutablebson::Element elemToSet = doc.makeElementWithNewFieldName(lastPart, _val);
- if (!elemToSet.ok()) {
- return Status(ErrorCodes::InternalError, "can't create new element");
- }
-
- // Now, we can be in two cases here, as far as attaching the element being set goes:
- // (a) none of the parts in the element's path exist, or (b) some parts of the path
- // exist but not all.
- if (!_preparedState->elemFound.ok()) {
- _preparedState->elemFound = doc.root();
- _preparedState->idxFound = 0;
- }
- else {
- _preparedState->idxFound++;
- }
+Status ModifierCompare::apply() const {
+ const bool destExists = (_preparedState->elemFound.ok() &&
+ _preparedState->idxFound == (_updatePath.numParts() - 1));
+ // If there's no need to create any further field part, the $set is simply a value
+ // assignment.
+ if (destExists) {
+ return _preparedState->elemFound.setValueBSONElement(_val);
+ }
- // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
- return pathsupport::createPathAt(_updatePath,
- _preparedState->idxFound,
- _preparedState->elemFound,
- elemToSet);
+ mutablebson::Document& doc = _preparedState->doc;
+ StringData lastPart = _updatePath.getPart(_updatePath.numParts() - 1);
+ // If the element exists and is the same type, then that is what we want to work with
+ mutablebson::Element elemToSet = doc.makeElementWithNewFieldName(lastPart, _val);
+ if (!elemToSet.ok()) {
+ return Status(ErrorCodes::InternalError, "can't create new element");
}
- Status ModifierCompare::log(LogBuilder* logBuilder) const {
- return logBuilder->addToSetsWithNewFieldName(_updatePath.dottedField(), _val);
+ // Now, we can be in two cases here, as far as attaching the element being set goes:
+ // (a) none of the parts in the element's path exist, or (b) some parts of the path
+ // exist but not all.
+ if (!_preparedState->elemFound.ok()) {
+ _preparedState->elemFound = doc.root();
+ _preparedState->idxFound = 0;
+ } else {
+ _preparedState->idxFound++;
}
-} // namespace mongo
+ // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
+ return pathsupport::createPathAt(
+ _updatePath, _preparedState->idxFound, _preparedState->elemFound, elemToSet);
+}
+
+Status ModifierCompare::log(LogBuilder* logBuilder) const {
+ return logBuilder->addToSetsWithNewFieldName(_updatePath.dottedField(), _val);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_compare.h b/src/mongo/db/ops/modifier_compare.h
index 8285f3e10ad..f0be887d136 100644
--- a/src/mongo/db/ops/modifier_compare.h
+++ b/src/mongo/db/ops/modifier_compare.h
@@ -38,73 +38,68 @@
namespace mongo {
- class LogBuilder;
-
- class ModifierCompare : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierCompare);
-
- public:
-
- enum ModifierCompareMode { MAX, MIN };
- explicit ModifierCompare(ModifierCompareMode mode = MAX);
-
- virtual ~ModifierCompare();
-
- //
- // Modifier interface implementation
- //
-
- /**
- * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $set mod such as
- * {$set: {<fieldname: <value>}}. init() extracts the field name and the value to be
- * assigned to it from 'modExpr'. It returns OK if successful or a status describing
- * the error.
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
-
- /**
- * Looks up the field name in the sub-tree rooted at 'root', and binds, if necessary,
- * the '$' field part using the 'matchedfield' number. prepare() returns OK and
- * fills in 'execInfo' with information of whether this mod is a no-op on 'root' and
- * whether it is an in-place candidate. Otherwise, returns a status describing the
- * error.
- */
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
-
- /**
- * Applies the prepared mod over the element 'root' specified in the prepare()
- * call. Returns OK if successful or a status describing the error.
- */
- virtual Status apply() const;
-
- /**
- * Adds a log entry to logRoot corresponding to the operation applied here. Returns OK
- * if successful or a status describing the error.
- */
- virtual Status log(LogBuilder* logBuilder) const;
-
- private:
-
- // Compare mode: min/max
- const ModifierCompareMode _mode;
-
- // Access to each component of fieldName that's the target of this mod.
- FieldRef _updatePath;
-
- // 0 or index for $-positional in _updatePath.
- size_t _pathReplacementPosition;
-
- // Element of the mod expression.
- BSONElement _val;
-
- // The instance of the field in the provided doc. This state is valid after a
- // prepare() was issued and until a log() is issued. The document this mod is
- // being prepared against must be live throughout all the calls.
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
- };
-
-} // namespace mongo
+class LogBuilder;
+
+class ModifierCompare : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierCompare);
+
+public:
+ enum ModifierCompareMode { MAX, MIN };
+ explicit ModifierCompare(ModifierCompareMode mode = MAX);
+
+ virtual ~ModifierCompare();
+
+ //
+ // Modifier interface implementation
+ //
+
+ /**
+ * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $set mod such as
+ * {$set: {<fieldname: <value>}}. init() extracts the field name and the value to be
+ * assigned to it from 'modExpr'. It returns OK if successful or a status describing
+ * the error.
+ */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
+
+ /**
+ * Looks up the field name in the sub-tree rooted at 'root', and binds, if necessary,
+ * the '$' field part using the 'matchedfield' number. prepare() returns OK and
+ * fills in 'execInfo' with information of whether this mod is a no-op on 'root' and
+ * whether it is an in-place candidate. Otherwise, returns a status describing the
+ * error.
+ */
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
+
+ /**
+ * Applies the prepared mod over the element 'root' specified in the prepare()
+ * call. Returns OK if successful or a status describing the error.
+ */
+ virtual Status apply() const;
+
+ /**
+ * Adds a log entry to logRoot corresponding to the operation applied here. Returns OK
+ * if successful or a status describing the error.
+ */
+ virtual Status log(LogBuilder* logBuilder) const;
+
+private:
+ // Compare mode: min/max
+ const ModifierCompareMode _mode;
+
+ // Access to each component of fieldName that's the target of this mod.
+ FieldRef _updatePath;
+
+ // 0 or index for $-positional in _updatePath.
+ size_t _pathReplacementPosition;
+
+ // Element of the mod expression.
+ BSONElement _val;
+
+ // The instance of the field in the provided doc. This state is valid after a
+ // prepare() was issued and until a log() is issued. The document this mod is
+ // being prepared against must be live throughout all the calls.
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_compare_test.cpp b/src/mongo/db/ops/modifier_compare_test.cpp
index 9f19dd1d718..fb6bcf8d84f 100644
--- a/src/mongo/db/ops/modifier_compare_test.cpp
+++ b/src/mongo/db/ops/modifier_compare_test.cpp
@@ -39,257 +39,256 @@
namespace {
- using mongo::BSONObj;
- using mongo::LogBuilder;
- using mongo::ModifierCompare;
- using mongo::ModifierInterface;
- using mongo::Status;
- using mongo::StringData;
- using mongo::fromjson;
- using mongo::mutablebson::ConstElement;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
-
- const char kModNameMin[] = "$min";
- const char kModNameMax[] = "$max";
-
- /** Helper to build and manipulate a $min/max mod. */
- class Mod {
- public:
- Mod() : _mod() {}
-
- explicit Mod(BSONObj modObj)
- : _modObj(modObj)
- , _mod((modObj.firstElement().fieldNameStringData() == "$min") ?
- ModifierCompare::MIN :
- ModifierCompare::MAX) {
- StringData modName = modObj.firstElement().fieldName();
- ASSERT_OK(_mod.init(modObj[modName].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
-
- Status apply() const {
- return _mod.apply();
- }
-
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
-
- ModifierCompare& mod() { return _mod; }
-
- private:
- BSONObj _modObj;
- ModifierCompare _mod;
- };
-
- TEST(Init, ValidValues) {
- BSONObj modObj;
- ModifierCompare mod;
-
- modObj = fromjson("{ $min : { a : 2 } }");
- ASSERT_OK(mod.init(modObj[kModNameMin].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- modObj = fromjson("{ $max : { a : 1 } }");
- ASSERT_OK(mod.init(modObj[kModNameMax].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- modObj = fromjson("{ $min : { a : {$date : 0 } } }");
- ASSERT_OK(mod.init(modObj[kModNameMin].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+using mongo::BSONObj;
+using mongo::LogBuilder;
+using mongo::ModifierCompare;
+using mongo::ModifierInterface;
+using mongo::Status;
+using mongo::StringData;
+using mongo::fromjson;
+using mongo::mutablebson::ConstElement;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
+
+const char kModNameMin[] = "$min";
+const char kModNameMax[] = "$max";
+
+/** Helper to build and manipulate a $min/max mod. */
+class Mod {
+public:
+ Mod() : _mod() {}
+
+ explicit Mod(BSONObj modObj)
+ : _modObj(modObj),
+ _mod((modObj.firstElement().fieldNameStringData() == "$min") ? ModifierCompare::MIN
+ : ModifierCompare::MAX) {
+ StringData modName = modObj.firstElement().fieldName();
+ ASSERT_OK(_mod.init(modObj[modName].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- TEST(ExistingNumber, MaxSameNumber) {
- Document doc(fromjson("{a: 1 }"));
- Mod mod(fromjson("{$max: {a: 1} }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(ExistingNumber, MinSameNumber) {
- Document doc(fromjson("{a: 1 }"));
- Mod mod(fromjson("{$min: {a: 1} }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(ExistingNumber, MaxNumberIsLess) {
- Document doc(fromjson("{a: 1 }"));
- Mod mod(fromjson("{$max: {a: 0} }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(ExistingNumber, MinNumberIsMore) {
- Document doc(fromjson("{a: 1 }"));
- Mod mod(fromjson("{$min: {a: 2} }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(ExistingDouble, MaxSameValInt) {
- Document doc(fromjson("{a: 1.0 }"));
- Mod mod(BSON("$max" << BSON("a" << 1LL)));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(ExistingDoubleZero, MaxSameValIntZero) {
- Document doc(fromjson("{a: 0.0 }"));
- Mod mod(BSON("$max" << BSON("a" << 0LL)));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(ExistingDoubleZero, MinSameValIntZero) {
- Document doc(fromjson("{a: 0.0 }"));
- Mod mod(BSON("$min" << BSON("a" << 0LL)));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(MissingField, MinNumber) {
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{$min: {a: 0} }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(fromjson("{a : 0}"), doc);
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 0 } }"), logDoc);
- }
-
- TEST(ExistingNumber, MinNumber) {
- Document doc(fromjson("{a: 1 }"));
- Mod mod(fromjson("{$min: {a: 0} }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(fromjson("{a : 0}"), doc);
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 0 } }"), logDoc);
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
}
- TEST(MissingField, MaxNumber) {
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{$max: {a: 0} }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(fromjson("{a : 0}"), doc);
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 0 } }"), logDoc);
+ Status apply() const {
+ return _mod.apply();
}
- TEST(ExistingNumber, MaxNumber) {
- Document doc(fromjson("{a: 1 }"));
- Mod mod(fromjson("{$max: {a: 2} }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(fromjson("{a : 2}"), doc);
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 2 } }"), logDoc);
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
}
- TEST(ExistingDate, MaxDate) {
- Document doc(fromjson("{a: {$date: 0} }"));
- Mod mod(fromjson("{$max: {a: {$date: 123123123}} }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(fromjson("{a: {$date: 123123123}}"), doc);
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{$set: {a: {$date: 123123123}} }"), logDoc);
- }
-
- TEST(ExistingEmbeddedDoc, MaxDoc) {
- Document doc(fromjson("{a: {b: 2}}"));
- Mod mod(fromjson("{$max: {a: {b: 3}}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(fromjson("{a: {b: 3}}}"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{$set: {a: {b: 3}} }"), logDoc);
- }
-
- TEST(ExistingEmbeddedDoc, MaxNumber) {
- Document doc(fromjson("{a: {b: 2}}"));
- Mod mod(fromjson("{$max: {a: 3}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+ ModifierCompare& mod() {
+ return _mod;
}
-} // namespace
+private:
+ BSONObj _modObj;
+ ModifierCompare _mod;
+};
+
+TEST(Init, ValidValues) {
+ BSONObj modObj;
+ ModifierCompare mod;
+
+ modObj = fromjson("{ $min : { a : 2 } }");
+ ASSERT_OK(mod.init(modObj[kModNameMin].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+
+ modObj = fromjson("{ $max : { a : 1 } }");
+ ASSERT_OK(mod.init(modObj[kModNameMax].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+
+ modObj = fromjson("{ $min : { a : {$date : 0 } } }");
+ ASSERT_OK(mod.init(modObj[kModNameMin].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(ExistingNumber, MaxSameNumber) {
+ Document doc(fromjson("{a: 1 }"));
+ Mod mod(fromjson("{$max: {a: 1} }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
+
+TEST(ExistingNumber, MinSameNumber) {
+ Document doc(fromjson("{a: 1 }"));
+ Mod mod(fromjson("{$min: {a: 1} }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
+
+TEST(ExistingNumber, MaxNumberIsLess) {
+ Document doc(fromjson("{a: 1 }"));
+ Mod mod(fromjson("{$max: {a: 0} }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
+
+TEST(ExistingNumber, MinNumberIsMore) {
+ Document doc(fromjson("{a: 1 }"));
+ Mod mod(fromjson("{$min: {a: 2} }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
+
+TEST(ExistingDouble, MaxSameValInt) {
+ Document doc(fromjson("{a: 1.0 }"));
+ Mod mod(BSON("$max" << BSON("a" << 1LL)));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
+
+TEST(ExistingDoubleZero, MaxSameValIntZero) {
+ Document doc(fromjson("{a: 0.0 }"));
+ Mod mod(BSON("$max" << BSON("a" << 0LL)));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
+
+TEST(ExistingDoubleZero, MinSameValIntZero) {
+ Document doc(fromjson("{a: 0.0 }"));
+ Mod mod(BSON("$min" << BSON("a" << 0LL)));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
+
+TEST(MissingField, MinNumber) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{$min: {a: 0} }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(fromjson("{a : 0}"), doc);
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 0 } }"), logDoc);
+}
+
+TEST(ExistingNumber, MinNumber) {
+ Document doc(fromjson("{a: 1 }"));
+ Mod mod(fromjson("{$min: {a: 0} }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(fromjson("{a : 0}"), doc);
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 0 } }"), logDoc);
+}
+
+TEST(MissingField, MaxNumber) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{$max: {a: 0} }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(fromjson("{a : 0}"), doc);
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 0 } }"), logDoc);
+}
+
+TEST(ExistingNumber, MaxNumber) {
+ Document doc(fromjson("{a: 1 }"));
+ Mod mod(fromjson("{$max: {a: 2} }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(fromjson("{a : 2}"), doc);
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 2 } }"), logDoc);
+}
+
+TEST(ExistingDate, MaxDate) {
+ Document doc(fromjson("{a: {$date: 0} }"));
+ Mod mod(fromjson("{$max: {a: {$date: 123123123}} }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(fromjson("{a: {$date: 123123123}}"), doc);
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{$set: {a: {$date: 123123123}} }"), logDoc);
+}
+
+TEST(ExistingEmbeddedDoc, MaxDoc) {
+ Document doc(fromjson("{a: {b: 2}}"));
+ Mod mod(fromjson("{$max: {a: {b: 3}}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(fromjson("{a: {b: 3}}}"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{$set: {a: {b: 3}} }"), logDoc);
+}
+
+TEST(ExistingEmbeddedDoc, MaxNumber) {
+ Document doc(fromjson("{a: {b: 2}}"));
+ Mod mod(fromjson("{$max: {a: 3}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+}
+
+} // namespace
diff --git a/src/mongo/db/ops/modifier_current_date.cpp b/src/mongo/db/ops/modifier_current_date.cpp
index f38cc4e4441..75d0be014e3 100644
--- a/src/mongo/db/ops/modifier_current_date.cpp
+++ b/src/mongo/db/ops/modifier_current_date.cpp
@@ -38,239 +38,215 @@
namespace mongo {
- namespace str = mongoutils::str;
+namespace str = mongoutils::str;
- namespace {
- const char kType[] = "$type";
- const char kDate[] = "date";
- const char kTimestamp[] = "timestamp";
- }
+namespace {
+const char kType[] = "$type";
+const char kDate[] = "date";
+const char kTimestamp[] = "timestamp";
+}
- struct ModifierCurrentDate::PreparedState {
+struct ModifierCurrentDate::PreparedState {
+ PreparedState(mutablebson::Document& doc) : doc(doc), elemFound(doc.end()), idxFound(0) {}
- PreparedState(mutablebson::Document& doc)
- : doc(doc)
- , elemFound(doc.end())
- , idxFound(0) {
- }
+ // Document that is going to be changed.
+ mutablebson::Document& doc;
- // Document that is going to be changed.
- mutablebson::Document& doc;
+ // Element corresponding to _fieldRef[0.._idxFound].
+ mutablebson::Element elemFound;
- // Element corresponding to _fieldRef[0.._idxFound].
- mutablebson::Element elemFound;
+ // Index in _fieldRef for which an Element exist in the document.
+ size_t idxFound;
+};
- // Index in _fieldRef for which an Element exist in the document.
- size_t idxFound;
- };
+ModifierCurrentDate::ModifierCurrentDate() : _pathReplacementPosition(0), _typeIsDate(true) {}
- ModifierCurrentDate::ModifierCurrentDate()
- : _pathReplacementPosition(0)
- , _typeIsDate(true) {
- }
+ModifierCurrentDate::~ModifierCurrentDate() {}
- ModifierCurrentDate::~ModifierCurrentDate() {
+Status ModifierCurrentDate::init(const BSONElement& modExpr,
+ const Options& opts,
+ bool* positional) {
+ _updatePath.parse(modExpr.fieldName());
+ Status status = fieldchecker::isUpdatable(_updatePath);
+ if (!status.isOK()) {
+ return status;
}
- Status ModifierCurrentDate::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
-
- _updatePath.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatable(_updatePath);
- if (!status.isOK()) {
- return status;
- }
-
- // If a $-positional operator was used, get the index in which it occurred
- // and ensure only one occurrence.
- size_t foundCount;
- bool foundDollar = fieldchecker::isPositional(_updatePath,
- &_pathReplacementPosition,
- &foundCount);
+ // If a $-positional operator was used, get the index in which it occurred
+ // and ensure only one occurrence.
+ size_t foundCount;
+ bool foundDollar =
+ fieldchecker::isPositional(_updatePath, &_pathReplacementPosition, &foundCount);
- if (positional)
- *positional = foundDollar;
+ if (positional)
+ *positional = foundDollar;
- if (foundDollar && foundCount > 1) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _updatePath.dottedField() << "'");
- }
+ if (foundDollar && foundCount > 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Too many positional (i.e. '$') elements found in path '"
+ << _updatePath.dottedField() << "'");
+ }
- // Validate and store the type to produce
- switch (modExpr.type()) {
- case Bool:
- _typeIsDate = true;
- break;
- case Object: {
- const BSONObj argObj = modExpr.embeddedObject();
- const BSONElement typeElem = argObj.getField(kType);
- bool badInput = typeElem.eoo() || !(typeElem.type() == String);
+ // Validate and store the type to produce
+ switch (modExpr.type()) {
+ case Bool:
+ _typeIsDate = true;
+ break;
+ case Object: {
+ const BSONObj argObj = modExpr.embeddedObject();
+ const BSONElement typeElem = argObj.getField(kType);
+ bool badInput = typeElem.eoo() || !(typeElem.type() == String);
+
+ if (!badInput) {
+ std::string typeVal = typeElem.String();
+ badInput = !(typeElem.String() == kDate || typeElem.String() == kTimestamp);
+ if (!badInput)
+ _typeIsDate = (typeVal == kDate);
if (!badInput) {
- std::string typeVal = typeElem.String();
- badInput = !(typeElem.String() == kDate || typeElem.String() == kTimestamp);
- if (!badInput)
- _typeIsDate = (typeVal == kDate);
-
- if (!badInput) {
- // Check to make sure only the $type field was given as an arg
- BSONObjIterator i( argObj );
- const bool onlyHasTypeField = ((i.next().fieldNameStringData() == kType)
- && i.next().eoo());
- if (!onlyHasTypeField) {
- return Status(ErrorCodes::BadValue,
- str::stream() <<
- "The only valid field of the option is '$type': "
- "{$currentDate: {field : {$type: 'date/timestamp'}}}; "
+ // Check to make sure only the $type field was given as an arg
+ BSONObjIterator i(argObj);
+ const bool onlyHasTypeField =
+ ((i.next().fieldNameStringData() == kType) && i.next().eoo());
+ if (!onlyHasTypeField) {
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "The only valid field of the option is '$type': "
+ "{$currentDate: {field : {$type: 'date/timestamp'}}}; "
<< "arg: " << argObj);
- }
-
}
-
- }
-
- if (badInput) {
- return Status(ErrorCodes::BadValue,
- "The '$type' string field is required "
- "to be 'date' or 'timestamp': "
- "{$currentDate: {field : {$type: 'date'}}}");
}
- break;
}
- default:
- return Status(ErrorCodes::BadValue,
- str::stream() << typeName(modExpr.type())
- << " is not valid type for $currentDate."
- " Please use a boolean ('true')"
- " or a $type expression ({$type: 'timestamp/date'}).");
- }
-
- return Status::OK();
- }
-
- Status ModifierCurrentDate::prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo) {
-
- _preparedState.reset(new PreparedState(root.getDocument()));
- // If we have a $-positional field, it is time to bind it to an actual field part.
- if (_pathReplacementPosition) {
- if (matchedField.empty()) {
+ if (badInput) {
return Status(ErrorCodes::BadValue,
- str::stream() << "The positional operator did not find the match "
- "needed from the query. Unexpanded update: "
- << _updatePath.dottedField());
+ "The '$type' string field is required "
+ "to be 'date' or 'timestamp': "
+ "{$currentDate: {field : {$type: 'date'}}}");
}
- _updatePath.setPart(_pathReplacementPosition, matchedField);
+ break;
}
+ default:
+ return Status(ErrorCodes::BadValue,
+ str::stream() << typeName(modExpr.type())
+ << " is not valid type for $currentDate."
+ " Please use a boolean ('true')"
+ " or a $type expression ({$type: 'timestamp/date'}).");
+ }
- // Locate the field name in 'root'. Note that we may not have all the parts in the path
- // in the doc -- which is fine. Our goal now is merely to reason about whether this mod
- // apply is a noOp or whether is can be in place. The remaining path, if missing, will
- // be created during the apply.
- Status status = pathsupport::findLongestPrefix(_updatePath,
- root,
- &_preparedState->idxFound,
- &_preparedState->elemFound);
-
- // FindLongestPrefix may say the path does not exist at all, which is fine here, or
- // that the path was not viable or otherwise wrong, in which case, the mod cannot
- // proceed.
- if (status.code() == ErrorCodes::NonExistentPath) {
- _preparedState->elemFound = root.getDocument().end();
- }
- else if (!status.isOK()) {
- return status;
- }
+ return Status::OK();
+}
- // We register interest in the field name. The driver needs this info to sort out if
- // there is any conflict among mods.
- execInfo->fieldRef[0] = &_updatePath;
+Status ModifierCurrentDate::prepare(mutablebson::Element root,
+ StringData matchedField,
+ ExecInfo* execInfo) {
+ _preparedState.reset(new PreparedState(root.getDocument()));
- return Status::OK();
+ // If we have a $-positional field, it is time to bind it to an actual field part.
+ if (_pathReplacementPosition) {
+ if (matchedField.empty()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The positional operator did not find the match "
+ "needed from the query. Unexpanded update: "
+ << _updatePath.dottedField());
+ }
+ _updatePath.setPart(_pathReplacementPosition, matchedField);
}
- Status ModifierCurrentDate::apply() const {
+ // Locate the field name in 'root'. Note that we may not have all the parts in the path
+ // in the doc -- which is fine. Our goal now is merely to reason about whether this mod
+ // apply is a noOp or whether is can be in place. The remaining path, if missing, will
+ // be created during the apply.
+ Status status = pathsupport::findLongestPrefix(
+ _updatePath, root, &_preparedState->idxFound, &_preparedState->elemFound);
+
+ // FindLongestPrefix may say the path does not exist at all, which is fine here, or
+ // that the path was not viable or otherwise wrong, in which case, the mod cannot
+ // proceed.
+ if (status.code() == ErrorCodes::NonExistentPath) {
+ _preparedState->elemFound = root.getDocument().end();
+ } else if (!status.isOK()) {
+ return status;
+ }
- const bool destExists = (_preparedState->elemFound.ok() &&
- _preparedState->idxFound == (_updatePath.numParts() - 1));
+ // We register interest in the field name. The driver needs this info to sort out if
+ // there is any conflict among mods.
+ execInfo->fieldRef[0] = &_updatePath;
- mutablebson::Document& doc = _preparedState->doc;
- StringData lastPart = _updatePath.getPart(_updatePath.numParts() - 1);
- // If the element exists and is the same type, then that is what we want to work with
- mutablebson::Element elemToSet = destExists ?
- _preparedState->elemFound:
- doc.end() ;
+ return Status::OK();
+}
- if (!destExists) {
- // Creates the final element that's going to be $set in 'doc'.
- // fills in the value with place-holder/empty
+Status ModifierCurrentDate::apply() const {
+ const bool destExists = (_preparedState->elemFound.ok() &&
+ _preparedState->idxFound == (_updatePath.numParts() - 1));
- elemToSet = _typeIsDate ?
- doc.makeElementDate(lastPart, Date_t()) :
- doc.makeElementTimestamp(lastPart, Timestamp());
+ mutablebson::Document& doc = _preparedState->doc;
+ StringData lastPart = _updatePath.getPart(_updatePath.numParts() - 1);
+ // If the element exists and is the same type, then that is what we want to work with
+ mutablebson::Element elemToSet = destExists ? _preparedState->elemFound : doc.end();
- if (!elemToSet.ok()) {
- return Status(ErrorCodes::InternalError, "can't create new element");
- }
+ if (!destExists) {
+ // Creates the final element that's going to be $set in 'doc'.
+ // fills in the value with place-holder/empty
- // Now, we can be in two cases here, as far as attaching the element being set goes:
- // (a) none of the parts in the element's path exist, or (b) some parts of the path
- // exist but not all.
- if (!_preparedState->elemFound.ok()) {
- _preparedState->elemFound = doc.root();
- _preparedState->idxFound = 0;
- }
- else {
- _preparedState->idxFound++;
- }
+ elemToSet = _typeIsDate ? doc.makeElementDate(lastPart, Date_t())
+ : doc.makeElementTimestamp(lastPart, Timestamp());
- // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
- Status s = pathsupport::createPathAt(_updatePath,
- _preparedState->idxFound,
- _preparedState->elemFound,
- elemToSet);
- if (!s.isOK())
- return s;
+ if (!elemToSet.ok()) {
+ return Status(ErrorCodes::InternalError, "can't create new element");
}
- dassert(elemToSet.ok());
-
- // By the time we are here the element is in place and we just need to update the value
- if (_typeIsDate) {
- const mongo::Date_t now = mongo::jsTime();
- Status s = elemToSet.setValueDate(now);
- if (!s.isOK())
- return s;
- }
- else {
- Status s = elemToSet.setValueTimestamp(getNextGlobalTimestamp());
- if (!s.isOK())
- return s;
+ // Now, we can be in two cases here, as far as attaching the element being set goes:
+ // (a) none of the parts in the element's path exist, or (b) some parts of the path
+ // exist but not all.
+ if (!_preparedState->elemFound.ok()) {
+ _preparedState->elemFound = doc.root();
+ _preparedState->idxFound = 0;
+ } else {
+ _preparedState->idxFound++;
}
- // Set the elemFound, idxFound to the changed element for oplog logging.
- _preparedState->elemFound = elemToSet;
- _preparedState->idxFound = (_updatePath.numParts() - 1);
+ // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
+ Status s = pathsupport::createPathAt(
+ _updatePath, _preparedState->idxFound, _preparedState->elemFound, elemToSet);
+ if (!s.isOK())
+ return s;
+ }
- return Status::OK();
+ dassert(elemToSet.ok());
+
+ // By the time we are here the element is in place and we just need to update the value
+ if (_typeIsDate) {
+ const mongo::Date_t now = mongo::jsTime();
+ Status s = elemToSet.setValueDate(now);
+ if (!s.isOK())
+ return s;
+ } else {
+ Status s = elemToSet.setValueTimestamp(getNextGlobalTimestamp());
+ if (!s.isOK())
+ return s;
}
- Status ModifierCurrentDate::log(LogBuilder* logBuilder) const {
- // TODO: None of this checks should be needed unless someone calls if we are a noOp
- // When we cleanup we should build in testing that no-one calls in the noOp case
- const bool destExists = (_preparedState->elemFound.ok() &&
- _preparedState->idxFound == (_updatePath.numParts() - 1));
+ // Set the elemFound, idxFound to the changed element for oplog logging.
+ _preparedState->elemFound = elemToSet;
+ _preparedState->idxFound = (_updatePath.numParts() - 1);
- // If the destination doesn't exist then we have nothing to log.
- // This would only happen if apply isn't called or fails when it had to create an element
- if (!destExists)
- return Status::OK();
+ return Status::OK();
+}
- return logBuilder->addToSetsWithNewFieldName(_updatePath.dottedField(),
- _preparedState->elemFound);
- }
+Status ModifierCurrentDate::log(LogBuilder* logBuilder) const {
+ // TODO: None of this checks should be needed unless someone calls if we are a noOp
+ // When we cleanup we should build in testing that no-one calls in the noOp case
+ const bool destExists = (_preparedState->elemFound.ok() &&
+ _preparedState->idxFound == (_updatePath.numParts() - 1));
+
+ // If the destination doesn't exist then we have nothing to log.
+ // This would only happen if apply isn't called or fails when it had to create an element
+ if (!destExists)
+ return Status::OK();
+
+ return logBuilder->addToSetsWithNewFieldName(_updatePath.dottedField(),
+ _preparedState->elemFound);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_current_date.h b/src/mongo/db/ops/modifier_current_date.h
index 291c05b377a..d7e1bd5e875 100644
--- a/src/mongo/db/ops/modifier_current_date.h
+++ b/src/mongo/db/ops/modifier_current_date.h
@@ -37,53 +37,48 @@
namespace mongo {
- class LogBuilder;
+class LogBuilder;
- class ModifierCurrentDate : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierCurrentDate);
+class ModifierCurrentDate : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierCurrentDate);
- public:
+public:
+ ModifierCurrentDate();
+ virtual ~ModifierCurrentDate();
- ModifierCurrentDate();
- virtual ~ModifierCurrentDate();
+ /**
+ * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming
+ * from a $currentDate mod such as
+ * {$currentDate: {<fieldname: true/{$type: "date/timestamp"}}.
+ * init() extracts the field name and the value to be
+ * assigned to it from 'modExpr'. It returns OK if successful or a status describing
+ * the error.
+ */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
- /**
- * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming
- * from a $currentDate mod such as
- * {$currentDate: {<fieldname: true/{$type: "date/timestamp"}}.
- * init() extracts the field name and the value to be
- * assigned to it from 'modExpr'. It returns OK if successful or a status describing
- * the error.
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
+ /** Evaluates the validity of applying $currentDate.
+ */
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
- /** Evaluates the validity of applying $currentDate.
- */
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
+ /** Updates the node passed in prepare with the results from prepare */
+ virtual Status apply() const;
- /** Updates the node passed in prepare with the results from prepare */
- virtual Status apply() const;
+ /** Converts the result into a $set */
+ virtual Status log(LogBuilder* logBuilder) const;
- /** Converts the result into a $set */
- virtual Status log(LogBuilder* logBuilder) const;
+private:
+ // Access to each component of fieldName that's the target of this mod.
+ FieldRef _updatePath;
- private:
+ // 0 or index for $-positional in _updatePath.
+ size_t _pathReplacementPosition;
- // Access to each component of fieldName that's the target of this mod.
- FieldRef _updatePath;
+ // Is the final type being added a Date or Timestamp?
+ bool _typeIsDate;
- // 0 or index for $-positional in _updatePath.
- size_t _pathReplacementPosition;
+ // State which changes with each call of the mod.
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
+};
- // Is the final type being added a Date or Timestamp?
- bool _typeIsDate;
-
- // State which changes with each call of the mod.
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
- };
-
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_current_date_test.cpp b/src/mongo/db/ops/modifier_current_date_test.cpp
index 51eb4a79659..ecb4b86bac8 100644
--- a/src/mongo/db/ops/modifier_current_date_test.cpp
+++ b/src/mongo/db/ops/modifier_current_date_test.cpp
@@ -39,327 +39,323 @@
namespace {
- using mongo::BSONObj;
- using mongo::LogBuilder;
- using mongo::ModifierCurrentDate;
- using mongo::ModifierInterface;
- using mongo::Timestamp;
- using mongo::Status;
- using mongo::StringData;
- using mongo::fromjson;
- using mongo::mutablebson::ConstElement;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
-
- /**
- * Helper to validate oplog entries in the tests below.
- */
- void validateOplogEntry(BSONObj& oplogFormat, Document& doc){
- // Ensure that the field is the same
- ASSERT_EQUALS(oplogFormat.firstElement().fieldName(),
- doc.root().leftChild().getFieldName());
-
- // Ensure the field names are the same
- ASSERT_EQUALS(oplogFormat.firstElement().embeddedObject().firstElement().fieldName(),
- doc.root().leftChild().leftChild().getFieldName());
-
- // Ensure the type is the same in the document as the oplog
- ASSERT_EQUALS(oplogFormat.firstElement().embeddedObject().firstElement().type(),
- doc.root().leftChild().leftChild().getType());
+using mongo::BSONObj;
+using mongo::LogBuilder;
+using mongo::ModifierCurrentDate;
+using mongo::ModifierInterface;
+using mongo::Timestamp;
+using mongo::Status;
+using mongo::StringData;
+using mongo::fromjson;
+using mongo::mutablebson::ConstElement;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
+
+/**
+ * Helper to validate oplog entries in the tests below.
+ */
+void validateOplogEntry(BSONObj& oplogFormat, Document& doc) {
+ // Ensure that the field is the same
+ ASSERT_EQUALS(oplogFormat.firstElement().fieldName(), doc.root().leftChild().getFieldName());
+
+ // Ensure the field names are the same
+ ASSERT_EQUALS(oplogFormat.firstElement().embeddedObject().firstElement().fieldName(),
+ doc.root().leftChild().leftChild().getFieldName());
+
+ // Ensure the type is the same in the document as the oplog
+ ASSERT_EQUALS(oplogFormat.firstElement().embeddedObject().firstElement().type(),
+ doc.root().leftChild().leftChild().getType());
+}
+
+/** Helper to build and manipulate a $currentDate mod. */
+class Mod {
+public:
+ Mod() : _mod() {}
+
+ explicit Mod(BSONObj modObj) : _modObj(modObj), _mod() {
+ ASSERT_OK(_mod.init(_modObj["$currentDate"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- /** Helper to build and manipulate a $currentDate mod. */
- class Mod {
- public:
- Mod() : _mod() {}
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
+ }
- explicit Mod(BSONObj modObj)
- : _modObj(modObj)
- , _mod() {
- ASSERT_OK(_mod.init(_modObj["$currentDate"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+ Status apply() const {
+ return _mod.apply();
+ }
- }
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
+ }
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
+ ModifierCurrentDate& mod() {
+ return _mod;
+ }
- Status apply() const {
- return _mod.apply();
- }
+private:
+ BSONObj _modObj;
+ ModifierCurrentDate _mod;
+};
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
+TEST(Init, ValidValues) {
+ BSONObj modObj;
+ ModifierCurrentDate mod;
- ModifierCurrentDate& mod() { return _mod; }
+ modObj = fromjson("{ $currentDate : { a : true } }");
+ ASSERT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- private:
- BSONObj _modObj;
- ModifierCurrentDate _mod;
- };
+ modObj = fromjson("{ $currentDate : { a : {$type : 'timestamp' } } }");
+ ASSERT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- TEST(Init, ValidValues) {
- BSONObj modObj;
- ModifierCurrentDate mod;
+ modObj = fromjson("{ $currentDate : { a : {$type : 'date' } } }");
+ ASSERT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- modObj = fromjson("{ $currentDate : { a : true } }");
- ASSERT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+TEST(Init, FailToInitWithInvalidValue) {
+ BSONObj modObj;
+ ModifierCurrentDate mod;
- modObj = fromjson("{ $currentDate : { a : {$type : 'timestamp' } } }");
- ASSERT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
+ // String is an invalid $currentDate argument
+ modObj = fromjson("{ $currentDate : { a : 'Oct 11, 2001' } }");
+ ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- modObj = fromjson("{ $currentDate : { a : {$type : 'date' } } }");
- ASSERT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
+ // Array is an invalid $currentDate argument
+ modObj = fromjson("{ $currentDate : { a : [] } }");
+ ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- }
-
- TEST(Init, FailToInitWithInvalidValue) {
- BSONObj modObj;
- ModifierCurrentDate mod;
-
- // String is an invalid $currentDate argument
- modObj = fromjson("{ $currentDate : { a : 'Oct 11, 2001' } }");
- ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // Array is an invalid $currentDate argument
- modObj = fromjson("{ $currentDate : { a : [] } }");
- ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // Number is an invalid $currentDate argument
- modObj = fromjson("{ $currentDate : { a : 1 } }");
- ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // Regex is an invalid $currentDate argument
- modObj = fromjson("{ $currentDate : { a : /1/ } }");
- ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // An object with missing $type field is an invalid $currentDate argument
- modObj = fromjson("{ $currentDate : { a : { foo : 4 } } }");
- ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // An object with extra fields, including the $type field is bad
- modObj = fromjson("{ $currentDate : { a : { $type: 'date', foo : 4 } } }");
- ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // An object with extra fields, including the $type field is bad
- modObj = fromjson("{ $currentDate : { a : { foo: 4, $type : 'date' } } }");
- ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // An object with non-date/timestamp $type field is an invalid $currentDate argument
- modObj = fromjson("{ $currentDate : { a : { $type : 4 } } }");
- ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // An object with non-date/timestamp $type field is an invalid $currentDate argument
- modObj = fromjson("{ $currentDate : { a : { $type : 'foo' } } }");
- ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(BoolInput, EmptyStartDoc) {
- Document doc(fromjson("{ }"));
- Mod mod(fromjson("{ $currentDate : { a : true } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
- ASSERT_OK(mod.apply());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- BSONObj oplogFormat = fromjson("{ $set : { a : { $date : 0 } } }");
- validateOplogEntry(oplogFormat, logDoc);
- }
-
- TEST(DateInput, EmptyStartDoc) {
- Document doc(fromjson("{ }"));
- Mod mod(fromjson("{ $currentDate : { a : {$type: 'date' } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
- ASSERT_OK(mod.apply());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- BSONObj oplogFormat = fromjson("{ $set : { a : { $date : 0 } } }");
- validateOplogEntry(oplogFormat, logDoc);
- }
- TEST(TimestampInput, EmptyStartDoc) {
- Document doc(fromjson("{ }"));
- Mod mod(fromjson("{ $currentDate : { a : {$type : 'timestamp' } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- mongo::Timestamp ts;
- BSONObj olderDateObj = BSON("a" << ts);
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- BSONObj oplogFormat = fromjson("{ $set : { a : { $timestamp : {t:0, i:0} } } }");
- validateOplogEntry(oplogFormat, logDoc);
- }
-
- TEST(BoolInput, ExistingStringDoc) {
- Document doc(fromjson("{ a: 'a' }"));
- Mod mod(fromjson("{ $currentDate : { a : true } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
- ASSERT_OK(mod.apply());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- BSONObj oplogFormat = fromjson("{ $set : { a : { $date : 0 } } }");
- validateOplogEntry(oplogFormat, logDoc);
- }
+ // Number is an invalid $currentDate argument
+ modObj = fromjson("{ $currentDate : { a : 1 } }");
+ ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- TEST(BoolInput, ExistingDateDoc) {
- Document doc(fromjson("{ a: {$date: 0 } }"));
- Mod mod(fromjson("{ $currentDate : { a : true } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
- ASSERT_OK(mod.apply());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- BSONObj oplogFormat = fromjson("{ $set : { a : { $date : 0 } } }");
- validateOplogEntry(oplogFormat, logDoc);
- }
+ // Regex is an invalid $currentDate argument
+ modObj = fromjson("{ $currentDate : { a : /1/ } }");
+ ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- TEST(DateInput, ExistingDateDoc) {
- Document doc(fromjson("{ a: {$date: 0 } }"));
- Mod mod(fromjson("{ $currentDate : { a : {$type: 'date' } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
- ASSERT_OK(mod.apply());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- BSONObj oplogFormat = fromjson("{ $set : { a : { $date : 0 } } }");
- validateOplogEntry(oplogFormat, logDoc);
- }
+ // An object with missing $type field is an invalid $currentDate argument
+ modObj = fromjson("{ $currentDate : { a : { foo : 4 } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- TEST(TimestampInput, ExistingDateDoc) {
- Document doc(fromjson("{ a: {$date: 0 } }"));
- Mod mod(fromjson("{ $currentDate : { a : {$type : 'timestamp' } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
-
- mongo::Timestamp ts;
- BSONObj olderDateObj = BSON("a" << ts);
- ASSERT_OK(mod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled()); //Same Size as Date
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- BSONObj oplogFormat = fromjson("{ $set : { a : { $timestamp : {t:0, i:0} } } }");
- validateOplogEntry(oplogFormat, logDoc);
- }
+ // An object with extra fields, including the $type field is bad
+ modObj = fromjson("{ $currentDate : { a : { $type: 'date', foo : 4 } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- TEST(TimestampInput, ExistingEmbeddedDateDoc) {
- Document doc(fromjson("{ a: {b: {$date: 0 } } }"));
- Mod mod(fromjson("{ $currentDate : { 'a.b' : {$type : 'timestamp' } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a.b", execInfo.fieldRef[0]->dottedField());
-
- mongo::Timestamp ts;
- BSONObj olderDateObj = BSON("a" << BSON( "b" << ts));
- ASSERT_OK(mod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled()); //Same Size as Date
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- BSONObj oplogFormat = fromjson("{ $set : { 'a.b' : { $timestamp : {t:0, i:0} } } }");
- validateOplogEntry(oplogFormat, logDoc);
- }
+ // An object with extra fields, including the $type field is bad
+ modObj = fromjson("{ $currentDate : { a : { foo: 4, $type : 'date' } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- TEST(DottedTimestampInput, EmptyStartDoc) {
- Document doc(fromjson("{ }"));
- Mod mod(fromjson("{ $currentDate : { 'a.b' : {$type : 'timestamp' } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS("a.b", execInfo.fieldRef[0]->dottedField());
-
- mongo::Timestamp ts;
- BSONObj olderDateObj = BSON("a" << BSON( "b" << ts));
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- BSONObj oplogFormat = fromjson("{ $set : { 'a.b' : { $timestamp : {t:0, i:0} } } }");
- validateOplogEntry(oplogFormat, logDoc);
- }
+ // An object with non-date/timestamp $type field is an invalid $currentDate argument
+ modObj = fromjson("{ $currentDate : { a : { $type : 4 } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
-} // namespace
+ // An object with non-date/timestamp $type field is an invalid $currentDate argument
+ modObj = fromjson("{ $currentDate : { a : { $type : 'foo' } } }");
+ ASSERT_NOT_OK(mod.init(modObj["$currentDate"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(BoolInput, EmptyStartDoc) {
+ Document doc(fromjson("{ }"));
+ Mod mod(fromjson("{ $currentDate : { a : true } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
+ ASSERT_OK(mod.apply());
+ ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ BSONObj oplogFormat = fromjson("{ $set : { a : { $date : 0 } } }");
+ validateOplogEntry(oplogFormat, logDoc);
+}
+
+TEST(DateInput, EmptyStartDoc) {
+ Document doc(fromjson("{ }"));
+ Mod mod(fromjson("{ $currentDate : { a : {$type: 'date' } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
+ ASSERT_OK(mod.apply());
+ ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ BSONObj oplogFormat = fromjson("{ $set : { a : { $date : 0 } } }");
+ validateOplogEntry(oplogFormat, logDoc);
+}
+
+TEST(TimestampInput, EmptyStartDoc) {
+ Document doc(fromjson("{ }"));
+ Mod mod(fromjson("{ $currentDate : { a : {$type : 'timestamp' } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ mongo::Timestamp ts;
+ BSONObj olderDateObj = BSON("a" << ts);
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ BSONObj oplogFormat = fromjson("{ $set : { a : { $timestamp : {t:0, i:0} } } }");
+ validateOplogEntry(oplogFormat, logDoc);
+}
+
+TEST(BoolInput, ExistingStringDoc) {
+ Document doc(fromjson("{ a: 'a' }"));
+ Mod mod(fromjson("{ $currentDate : { a : true } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
+ ASSERT_OK(mod.apply());
+ ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ BSONObj oplogFormat = fromjson("{ $set : { a : { $date : 0 } } }");
+ validateOplogEntry(oplogFormat, logDoc);
+}
+
+TEST(BoolInput, ExistingDateDoc) {
+ Document doc(fromjson("{ a: {$date: 0 } }"));
+ Mod mod(fromjson("{ $currentDate : { a : true } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
+ ASSERT_OK(mod.apply());
+ ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ BSONObj oplogFormat = fromjson("{ $set : { a : { $date : 0 } } }");
+ validateOplogEntry(oplogFormat, logDoc);
+}
+
+TEST(DateInput, ExistingDateDoc) {
+ Document doc(fromjson("{ a: {$date: 0 } }"));
+ Mod mod(fromjson("{ $currentDate : { a : {$type: 'date' } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
+ ASSERT_OK(mod.apply());
+ ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ BSONObj oplogFormat = fromjson("{ $set : { a : { $date : 0 } } }");
+ validateOplogEntry(oplogFormat, logDoc);
+}
+
+TEST(TimestampInput, ExistingDateDoc) {
+ Document doc(fromjson("{ a: {$date: 0 } }"));
+ Mod mod(fromjson("{ $currentDate : { a : {$type : 'timestamp' } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a", execInfo.fieldRef[0]->dottedField());
+
+ mongo::Timestamp ts;
+ BSONObj olderDateObj = BSON("a" << ts);
+ ASSERT_OK(mod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled()); // Same Size as Date
+ ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ BSONObj oplogFormat = fromjson("{ $set : { a : { $timestamp : {t:0, i:0} } } }");
+ validateOplogEntry(oplogFormat, logDoc);
+}
+
+TEST(TimestampInput, ExistingEmbeddedDateDoc) {
+ Document doc(fromjson("{ a: {b: {$date: 0 } } }"));
+ Mod mod(fromjson("{ $currentDate : { 'a.b' : {$type : 'timestamp' } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a.b", execInfo.fieldRef[0]->dottedField());
+
+ mongo::Timestamp ts;
+ BSONObj olderDateObj = BSON("a" << BSON("b" << ts));
+ ASSERT_OK(mod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled()); // Same Size as Date
+ ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ BSONObj oplogFormat = fromjson("{ $set : { 'a.b' : { $timestamp : {t:0, i:0} } } }");
+ validateOplogEntry(oplogFormat, logDoc);
+}
+
+TEST(DottedTimestampInput, EmptyStartDoc) {
+ Document doc(fromjson("{ }"));
+ Mod mod(fromjson("{ $currentDate : { 'a.b' : {$type : 'timestamp' } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS("a.b", execInfo.fieldRef[0]->dottedField());
+
+ mongo::Timestamp ts;
+ BSONObj olderDateObj = BSON("a" << BSON("b" << ts));
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ BSONObj oplogFormat = fromjson("{ $set : { 'a.b' : { $timestamp : {t:0, i:0} } } }");
+ validateOplogEntry(oplogFormat, logDoc);
+}
+
+} // namespace
diff --git a/src/mongo/db/ops/modifier_inc.cpp b/src/mongo/db/ops/modifier_inc.cpp
index a967e0a734a..b683a1f9561 100644
--- a/src/mongo/db/ops/modifier_inc.cpp
+++ b/src/mongo/db/ops/modifier_inc.cpp
@@ -38,260 +38,229 @@
namespace mongo {
- namespace mb = mutablebson;
- namespace str = mongoutils::str;
+namespace mb = mutablebson;
+namespace str = mongoutils::str;
- struct ModifierInc::PreparedState {
+struct ModifierInc::PreparedState {
+ PreparedState(mutablebson::Document& doc)
+ : doc(doc), idxFound(0), elemFound(doc.end()), newValue(), noOp(false) {}
- PreparedState(mutablebson::Document& doc)
- : doc(doc)
- , idxFound(0)
- , elemFound(doc.end())
- , newValue()
- , noOp(false) {
- }
+ // Document that is going to be changed.
+ mutablebson::Document& doc;
+
+ // Index in _fieldRef for which an Element exist in the document.
+ size_t idxFound;
+
+ // Element corresponding to _fieldRef[0.._idxFound].
+ mutablebson::Element elemFound;
- // Document that is going to be changed.
- mutablebson::Document& doc;
+ // Value to be applied
+ SafeNum newValue;
- // Index in _fieldRef for which an Element exist in the document.
- size_t idxFound;
+ // This $inc is a no-op?
+ bool noOp;
+};
- // Element corresponding to _fieldRef[0.._idxFound].
- mutablebson::Element elemFound;
+ModifierInc::ModifierInc(ModifierIncMode mode)
+ : ModifierInterface(), _mode(mode), _fieldRef(), _posDollar(0), _val() {}
- // Value to be applied
- SafeNum newValue;
+ModifierInc::~ModifierInc() {}
- // This $inc is a no-op?
- bool noOp;
- };
+Status ModifierInc::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
+ //
+ // field name analysis
+ //
- ModifierInc::ModifierInc(ModifierIncMode mode)
- : ModifierInterface ()
- , _mode(mode)
- , _fieldRef()
- , _posDollar(0)
- , _val() {
+ // Perform standard field name and updateable checks.
+ _fieldRef.parse(modExpr.fieldName());
+ Status status = fieldchecker::isUpdatable(_fieldRef);
+ if (!status.isOK()) {
+ return status;
}
- ModifierInc::~ModifierInc() {
+ // If a $-positional operator was used, get the index in which it occurred
+ // and ensure only one occurrence.
+ size_t foundCount;
+ bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
+
+ if (positional)
+ *positional = foundDollar;
+
+ if (foundDollar && foundCount > 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Too many positional (i.e. '$') elements found in path '"
+ << _fieldRef.dottedField() << "'");
}
- Status ModifierInc::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
+ //
+ // value analysis
+ //
- //
- // field name analysis
- //
+ if (!modExpr.isNumber()) {
+ // TODO: Context for mod error messages would be helpful
+ // include mod code, etc.
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Cannot " << (_mode == MODE_INC ? "increment" : "multiply")
+ << " with non-numeric argument: {" << modExpr << "}");
+ }
- // Perform standard field name and updateable checks.
- _fieldRef.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatable(_fieldRef);
- if (! status.isOK()) {
- return status;
- }
+ _val = modExpr;
+ dassert(_val.isValid());
- // If a $-positional operator was used, get the index in which it occurred
- // and ensure only one occurrence.
- size_t foundCount;
- bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
+ return Status::OK();
+}
- if (positional)
- *positional = foundDollar;
+Status ModifierInc::prepare(mutablebson::Element root,
+ StringData matchedField,
+ ExecInfo* execInfo) {
+ _preparedState.reset(new PreparedState(root.getDocument()));
- if (foundDollar && foundCount > 1) {
+ // If we have a $-positional field, it is time to bind it to an actual field part.
+ if (_posDollar) {
+ if (matchedField.empty()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ str::stream() << "The positional operator did not find the match "
+ "needed from the query. Unexpanded update: "
+ << _fieldRef.dottedField());
}
+ _fieldRef.setPart(_posDollar, matchedField);
+ }
- //
- // value analysis
- //
-
- if (!modExpr.isNumber()) {
- // TODO: Context for mod error messages would be helpful
- // include mod code, etc.
- return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Cannot "
- << (_mode == MODE_INC ? "increment" : "multiply")
- << " with non-numeric argument: {"
- << modExpr << "}");
- }
+ // Locate the field name in 'root'. Note that we may not have all the parts in the path
+ // in the doc -- which is fine. Our goal now is merely to reason about whether this mod
+ // apply is a noOp or whether is can be in place. The remaining path, if missing, will
+ // be created during the apply.
+ Status status = pathsupport::findLongestPrefix(
+ _fieldRef, root, &_preparedState->idxFound, &_preparedState->elemFound);
+
+ // FindLongestPrefix may say the path does not exist at all, which is fine here, or
+ // that the path was not viable or otherwise wrong, in which case, the mod cannot
+ // proceed.
+ if (status.code() == ErrorCodes::NonExistentPath) {
+ _preparedState->elemFound = root.getDocument().end();
+ } else if (!status.isOK()) {
+ return status;
+ }
- _val = modExpr;
- dassert(_val.isValid());
+ // We register interest in the field name. The driver needs this info to sort out if
+ // there is any conflict among mods.
+ execInfo->fieldRef[0] = &_fieldRef;
+
+ // Capture the value we are going to write. At this point, there may not be a value
+ // against which to operate, so the result will be simply _val.
+ _preparedState->newValue = _val;
+
+ //
+ // in-place and no-op logic
+ //
+ // If the field path is not fully present, then this mod cannot be in place, nor is a
+ // noOp.
+ if (!_preparedState->elemFound.ok() || _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
+ // For multiplication, we treat ops against missing as yielding zero. We take
+ // advantage here of the promotion rules for SafeNum; the expression below will
+ // always yield a zero of the same type of operand that the user provided
+ // (e.g. double).
+ if (_mode == MODE_MUL)
+ _preparedState->newValue *= SafeNum(static_cast<int>(0));
return Status::OK();
}
- Status ModifierInc::prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo) {
-
- _preparedState.reset(new PreparedState(root.getDocument()));
-
- // If we have a $-positional field, it is time to bind it to an actual field part.
- if (_posDollar) {
- if (matchedField.empty()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The positional operator did not find the match "
- "needed from the query. Unexpanded update: "
- << _fieldRef.dottedField());
- }
- _fieldRef.setPart(_posDollar, matchedField);
- }
-
- // Locate the field name in 'root'. Note that we may not have all the parts in the path
- // in the doc -- which is fine. Our goal now is merely to reason about whether this mod
- // apply is a noOp or whether is can be in place. The remaining path, if missing, will
- // be created during the apply.
- Status status = pathsupport::findLongestPrefix(_fieldRef,
- root,
- &_preparedState->idxFound,
- &_preparedState->elemFound);
-
- // FindLongestPrefix may say the path does not exist at all, which is fine here, or
- // that the path was not viable or otherwise wrong, in which case, the mod cannot
- // proceed.
- if (status.code() == ErrorCodes::NonExistentPath) {
- _preparedState->elemFound = root.getDocument().end();
- }
- else if (!status.isOK()) {
- return status;
- }
-
- // We register interest in the field name. The driver needs this info to sort out if
- // there is any conflict among mods.
- execInfo->fieldRef[0] = &_fieldRef;
-
- // Capture the value we are going to write. At this point, there may not be a value
- // against which to operate, so the result will be simply _val.
- _preparedState->newValue = _val;
-
- //
- // in-place and no-op logic
- //
- // If the field path is not fully present, then this mod cannot be in place, nor is a
- // noOp.
- if (!_preparedState->elemFound.ok() ||
- _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
-
- // For multiplication, we treat ops against missing as yielding zero. We take
- // advantage here of the promotion rules for SafeNum; the expression below will
- // always yield a zero of the same type of operand that the user provided
- // (e.g. double).
- if (_mode == MODE_MUL)
- _preparedState->newValue *= SafeNum(static_cast<int>(0));
-
- return Status::OK();
- }
-
- // If the value being $inc'ed is the same as the one already in the doc, than this is a
- // noOp.
- if (!_preparedState->elemFound.isNumeric()) {
- mb::Element idElem = mb::findFirstChildNamed(root, "_id");
- return Status(
- ErrorCodes::TypeMismatch,
- str::stream() << "Cannot apply "
- << (_mode == MODE_INC ? "$inc" : "$mul")
- << " to a value of non-numeric type. {"
- << idElem.toString()
- << "} has the field '" << _preparedState->elemFound.getFieldName()
- << "' of non-numeric type "
- << typeName(_preparedState->elemFound.getType()));
- }
- const SafeNum currentValue = _preparedState->elemFound.getValueSafeNum();
-
- // Update newValue w.r.t to the current value of the found element.
- if (_mode == MODE_INC)
- _preparedState->newValue += currentValue;
- else
- _preparedState->newValue *= currentValue;
-
- // If the result of the addition is invalid, we must return an error.
- if (!_preparedState->newValue.isValid()) {
- mb::Element idElem = mb::findFirstChildNamed(root, "_id");
- return Status(ErrorCodes::BadValue,
- str::stream() << "Failed to apply $inc operations to current value ("
- << currentValue.debugString() << ") for document {"
- << idElem.toString() << "}");
- }
-
- // If the values are identical (same type, same value), then this is a no-op.
- if (_preparedState->newValue.isIdentical(currentValue)) {
- _preparedState->noOp = execInfo->noOp = true;
- return Status::OK();
- }
+ // If the value being $inc'ed is the same as the one already in the doc, than this is a
+ // noOp.
+ if (!_preparedState->elemFound.isNumeric()) {
+ mb::Element idElem = mb::findFirstChildNamed(root, "_id");
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Cannot apply " << (_mode == MODE_INC ? "$inc" : "$mul")
+ << " to a value of non-numeric type. {" << idElem.toString()
+ << "} has the field '"
+ << _preparedState->elemFound.getFieldName()
+ << "' of non-numeric type "
+ << typeName(_preparedState->elemFound.getType()));
+ }
+ const SafeNum currentValue = _preparedState->elemFound.getValueSafeNum();
+
+ // Update newValue w.r.t to the current value of the found element.
+ if (_mode == MODE_INC)
+ _preparedState->newValue += currentValue;
+ else
+ _preparedState->newValue *= currentValue;
+
+ // If the result of the addition is invalid, we must return an error.
+ if (!_preparedState->newValue.isValid()) {
+ mb::Element idElem = mb::findFirstChildNamed(root, "_id");
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Failed to apply $inc operations to current value ("
+ << currentValue.debugString() << ") for document {"
+ << idElem.toString() << "}");
+ }
+ // If the values are identical (same type, same value), then this is a no-op.
+ if (_preparedState->newValue.isIdentical(currentValue)) {
+ _preparedState->noOp = execInfo->noOp = true;
return Status::OK();
}
- Status ModifierInc::apply() const {
- dassert(_preparedState->noOp == false);
+ return Status::OK();
+}
- // If there's no need to create any further field part, the $inc is simply a value
- // assignment.
- if (_preparedState->elemFound.ok() &&
- _preparedState->idxFound == (_fieldRef.numParts() - 1)) {
- return _preparedState->elemFound.setValueSafeNum(_preparedState->newValue);
- }
+Status ModifierInc::apply() const {
+ dassert(_preparedState->noOp == false);
- //
- // Complete document path logic
- //
-
- // Creates the final element that's going to be $set in 'doc'.
- mutablebson::Document& doc = _preparedState->doc;
- StringData lastPart = _fieldRef.getPart(_fieldRef.numParts() - 1);
- mutablebson::Element elemToSet = doc.makeElementSafeNum(lastPart, _preparedState->newValue);
- if (!elemToSet.ok()) {
- return Status(ErrorCodes::InternalError, "can't create new element");
- }
+ // If there's no need to create any further field part, the $inc is simply a value
+ // assignment.
+ if (_preparedState->elemFound.ok() && _preparedState->idxFound == (_fieldRef.numParts() - 1)) {
+ return _preparedState->elemFound.setValueSafeNum(_preparedState->newValue);
+ }
- // Now, we can be in two cases here, as far as attaching the element being set goes:
- // (a) none of the parts in the element's path exist, or (b) some parts of the path
- // exist but not all.
- if (!_preparedState->elemFound.ok()) {
- _preparedState->elemFound = doc.root();
- _preparedState->idxFound = 0;
- }
- else {
- _preparedState->idxFound++;
- }
+ //
+ // Complete document path logic
+ //
- // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
- return pathsupport::createPathAt(_fieldRef,
- _preparedState->idxFound,
- _preparedState->elemFound,
- elemToSet);
+ // Creates the final element that's going to be $set in 'doc'.
+ mutablebson::Document& doc = _preparedState->doc;
+ StringData lastPart = _fieldRef.getPart(_fieldRef.numParts() - 1);
+ mutablebson::Element elemToSet = doc.makeElementSafeNum(lastPart, _preparedState->newValue);
+ if (!elemToSet.ok()) {
+ return Status(ErrorCodes::InternalError, "can't create new element");
}
- Status ModifierInc::log(LogBuilder* logBuilder) const {
+ // Now, we can be in two cases here, as far as attaching the element being set goes:
+ // (a) none of the parts in the element's path exist, or (b) some parts of the path
+ // exist but not all.
+ if (!_preparedState->elemFound.ok()) {
+ _preparedState->elemFound = doc.root();
+ _preparedState->idxFound = 0;
+ } else {
+ _preparedState->idxFound++;
+ }
- dassert(_preparedState->newValue.isValid());
+ // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
+ return pathsupport::createPathAt(
+ _fieldRef, _preparedState->idxFound, _preparedState->elemFound, elemToSet);
+}
- // We'd like to create an entry such as {$set: {<fieldname>: <value>}} under 'logRoot'.
- // We start by creating the {$set: ...} Element.
- mutablebson::Document& doc = logBuilder->getDocument();
+Status ModifierInc::log(LogBuilder* logBuilder) const {
+ dassert(_preparedState->newValue.isValid());
- // Then we create the {<fieldname>: <value>} Element.
- mutablebson::Element logElement = doc.makeElementSafeNum(
- _fieldRef.dottedField(),
- _preparedState->newValue);
+ // We'd like to create an entry such as {$set: {<fieldname>: <value>}} under 'logRoot'.
+ // We start by creating the {$set: ...} Element.
+ mutablebson::Document& doc = logBuilder->getDocument();
- if (!logElement.ok()) {
- return Status(ErrorCodes::InternalError,
- str::stream() << "Could not append entry to "
- << (_mode == MODE_INC ? "$inc" : "$mul")
- << " oplog entry: "
- << "set '" << _fieldRef.dottedField() << "' -> "
- << _preparedState->newValue.debugString() );
- }
+ // Then we create the {<fieldname>: <value>} Element.
+ mutablebson::Element logElement =
+ doc.makeElementSafeNum(_fieldRef.dottedField(), _preparedState->newValue);
- // Now, we attach the {<fieldname>: <value>} Element under the {$set: ...} segment.
- return logBuilder->addToSets(logElement);
+ if (!logElement.ok()) {
+ return Status(ErrorCodes::InternalError,
+ str::stream() << "Could not append entry to "
+ << (_mode == MODE_INC ? "$inc" : "$mul") << " oplog entry: "
+ << "set '" << _fieldRef.dottedField() << "' -> "
+ << _preparedState->newValue.debugString());
}
-} // namespace mongo
+ // Now, we attach the {<fieldname>: <value>} Element under the {$set: ...} segment.
+ return logBuilder->addToSets(logElement);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_inc.h b/src/mongo/db/ops/modifier_inc.h
index cfb7bbdf25d..17c76acf88f 100644
--- a/src/mongo/db/ops/modifier_inc.h
+++ b/src/mongo/db/ops/modifier_inc.h
@@ -37,64 +37,57 @@
namespace mongo {
- class LogBuilder;
-
- class ModifierInc : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierInc);
-
- public:
-
- // TODO: This is a shortcut to implementing $mul by hijacking $inc. In the near future,
- // we should consider either pulling $mul into its own operator, or creating a general
- // purpose "numeric binary op" operator. Potentially, that operator could also subsume
- // $bit (thought there are some subtleties, like that $bit can have multiple
- // operations, and doing so with arbirary math operations introduces potential
- // associativity difficulties). At the very least, if this mechanism is retained, then
- // this class should be renamed at some point away from ModifierInc.
- enum ModifierIncMode {
- MODE_INC,
- MODE_MUL
- };
-
- ModifierInc(ModifierIncMode mode = MODE_INC);
- virtual ~ModifierInc();
-
- /**
- * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $inc mod such as
- * {$inc: {<fieldname: <value>}}. init() extracts the field name and the value to be
- * assigned to it from 'modExpr'. It returns OK if successful or a status describing
- * the error.
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
-
- /** Evaluates the validity of applying $inc to the identified node, and computes
- * effects, handling upcasting and overflow as necessary.
- */
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
-
- /** Updates the node passed in prepare with the results of the $inc */
- virtual Status apply() const;
-
- /** Converts the result of the $inc into an equivalent $set under logRoot */
- virtual Status log(LogBuilder* logBuilder) const;
-
- private:
- const ModifierIncMode _mode;
-
- // Access to each component of fieldName that's the target of this mod.
- FieldRef _fieldRef;
-
- // 0 or index for $-positional in _fieldRef.
- size_t _posDollar;
-
- // Element of the $set expression.
- SafeNum _val;
-
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
- };
-
-} // namespace mongo
+class LogBuilder;
+
+class ModifierInc : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierInc);
+
+public:
+ // TODO: This is a shortcut to implementing $mul by hijacking $inc. In the near future,
+ // we should consider either pulling $mul into its own operator, or creating a general
+ // purpose "numeric binary op" operator. Potentially, that operator could also subsume
+ // $bit (thought there are some subtleties, like that $bit can have multiple
+ // operations, and doing so with arbirary math operations introduces potential
+ // associativity difficulties). At the very least, if this mechanism is retained, then
+ // this class should be renamed at some point away from ModifierInc.
+ enum ModifierIncMode { MODE_INC, MODE_MUL };
+
+ ModifierInc(ModifierIncMode mode = MODE_INC);
+ virtual ~ModifierInc();
+
+ /**
+ * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $inc mod such as
+ * {$inc: {<fieldname: <value>}}. init() extracts the field name and the value to be
+ * assigned to it from 'modExpr'. It returns OK if successful or a status describing
+ * the error.
+ */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
+
+ /** Evaluates the validity of applying $inc to the identified node, and computes
+ * effects, handling upcasting and overflow as necessary.
+ */
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
+
+ /** Updates the node passed in prepare with the results of the $inc */
+ virtual Status apply() const;
+
+ /** Converts the result of the $inc into an equivalent $set under logRoot */
+ virtual Status log(LogBuilder* logBuilder) const;
+
+private:
+ const ModifierIncMode _mode;
+
+ // Access to each component of fieldName that's the target of this mod.
+ FieldRef _fieldRef;
+
+ // 0 or index for $-positional in _fieldRef.
+ size_t _posDollar;
+
+ // Element of the $set expression.
+ SafeNum _val;
+
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_inc_test.cpp b/src/mongo/db/ops/modifier_inc_test.cpp
index 8a97bc490e7..52ab8d0e41c 100644
--- a/src/mongo/db/ops/modifier_inc_test.cpp
+++ b/src/mongo/db/ops/modifier_inc_test.cpp
@@ -42,483 +42,482 @@
namespace {
- using mongo::BSONObj;
- using mongo::LogBuilder;
- using mongo::ModifierInc;
- using mongo::ModifierInterface;
- using mongo::NumberInt;
- using mongo::Status;
- using mongo::StringData;
- using mongo::fromjson;
- using mongo::mutablebson::ConstElement;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
- using mongo::mutablebson::countChildren;
-
- /** Helper to build and manipulate a $inc mod. */
- class Mod {
- public:
-
- explicit Mod(BSONObj modObj)
- : _modObj(modObj)
- , _mod(mongoutils::str::equals(modObj.firstElement().fieldName(), "$mul") ?
- ModifierInc::MODE_MUL : ModifierInc::MODE_INC) {
- StringData modName = modObj.firstElement().fieldName();
- ASSERT_OK(_mod.init(_modObj[modName].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
-
- Status apply() const {
- return _mod.apply();
- }
-
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
-
- ModifierInc& mod() { return _mod; }
-
- private:
- BSONObj _modObj;
- ModifierInc _mod;
- };
-
- TEST(Init, FailToInitWithInvalidValue) {
- BSONObj modObj;
- ModifierInc mod;
-
- // String is an invalid increment argument
- modObj = fromjson("{ $inc : { a : '' } }");
- ASSERT_NOT_OK(mod.init(modObj["$inc"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // Object is an invalid increment argument
- modObj = fromjson("{ $inc : { a : {} } }");
- ASSERT_NOT_OK(mod.init(modObj["$inc"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- // Array is an invalid increment argument
- modObj = fromjson("{ $inc : { a : [] } }");
- ASSERT_NOT_OK(mod.init(modObj["$inc"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+using mongo::BSONObj;
+using mongo::LogBuilder;
+using mongo::ModifierInc;
+using mongo::ModifierInterface;
+using mongo::NumberInt;
+using mongo::Status;
+using mongo::StringData;
+using mongo::fromjson;
+using mongo::mutablebson::ConstElement;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
+using mongo::mutablebson::countChildren;
+
+/** Helper to build and manipulate a $inc mod. */
+class Mod {
+public:
+ explicit Mod(BSONObj modObj)
+ : _modObj(modObj),
+ _mod(mongoutils::str::equals(modObj.firstElement().fieldName(), "$mul")
+ ? ModifierInc::MODE_MUL
+ : ModifierInc::MODE_INC) {
+ StringData modName = modObj.firstElement().fieldName();
+ ASSERT_OK(_mod.init(_modObj[modName].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- TEST(Init, InitParsesNumberInt) {
- Mod incMod(BSON("$inc" << BSON("a" << static_cast<int>(1))));
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
}
- TEST(Init, InitParsesNumberLong) {
- Mod incMod(BSON("$inc" << BSON("a" << static_cast<long long>(1))));
+ Status apply() const {
+ return _mod.apply();
}
- TEST(Init, InitParsesNumberDouble) {
- Mod incMod(BSON("$inc" << BSON("a" << 1.0)));
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
}
- TEST(SimpleMod, PrepareSimpleOK) {
- Document doc(fromjson("{ a : 1 }"));
- Mod incMod(fromjson("{ $inc: { a : 1 }}"));
-
- ModifierInterface::ExecInfo execInfo;
-
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_FALSE(execInfo.noOp);
- }
-
- TEST(SimpleMod, PrepareSimpleNonNumericObject) {
- Document doc(fromjson("{ a : {} }"));
- Mod incMod(fromjson("{ $inc: { a : 1 }}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(incMod.prepare(doc.root(), "", &execInfo));
- }
-
- TEST(SimpleMod, PrepareSimpleNonNumericArray) {
-
- Document doc(fromjson("{ a : [] }"));
- Mod incMod(fromjson("{ $inc: { a : 1 }}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(incMod.prepare(doc.root(), "", &execInfo));
- }
-
- TEST(SimpleMod, PrepareSimpleNonNumericString) {
- Document doc(fromjson("{ a : '' }"));
- Mod incMod(fromjson("{ $inc: { a : 1 }}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(incMod.prepare(doc.root(), "", &execInfo));
- }
-
- TEST(SimpleMod, ApplyAndLogEmptyDocument) {
- Document doc(fromjson("{}"));
- Mod incMod(fromjson("{ $inc: { a : 1 }}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(incMod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 1 } }"), logDoc);
- }
-
- TEST(SimpleMod, LogWithoutApplyEmptyDocument) {
- Document doc(fromjson("{}"));
- Mod incMod(fromjson("{ $inc: { a : 1 }}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(incMod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 1 } }"), logDoc);
- }
-
- TEST(SimpleMod, ApplyAndLogSimpleDocument) {
- Document doc(fromjson("{ a : 2 }"));
- Mod incMod(fromjson("{ $inc: { a : 1 }}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 3 }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(incMod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 3 } }"), logDoc);
+ ModifierInc& mod() {
+ return _mod;
}
- TEST(DottedMod, ApplyAndLogSimpleDocument) {
- Document doc(fromjson("{ a : { b : 2 } }"));
- Mod incMod(fromjson("{ $inc: { 'a.b' : 1 } }"));
+private:
+ BSONObj _modObj;
+ ModifierInc _mod;
+};
+
+TEST(Init, FailToInitWithInvalidValue) {
+ BSONObj modObj;
+ ModifierInc mod;
+
+ // String is an invalid increment argument
+ modObj = fromjson("{ $inc : { a : '' } }");
+ ASSERT_NOT_OK(mod.init(modObj["$inc"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : { b : 3 } }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(incMod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { 'a.b' : 3 } }"), logDoc);
- }
-
- TEST(InPlace, IntToInt) {
- Document doc(BSON("a" << static_cast<int>(1)));
- Mod incMod(BSON("$inc" << BSON("a" << static_cast<int>(1))));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- }
-
- TEST(InPlace, LongToLong) {
- Document doc(BSON("a" << static_cast<long long>(1)));
- Mod incMod(BSON("$inc" << BSON("a" << static_cast<long long>(1))));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- }
-
- TEST(InPlace, DoubleToDouble) {
- Document doc(BSON("a" << 1.0));
- Mod incMod(BSON("$inc" << BSON("a" << 1.0 )));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- }
-
- TEST(NoOp, Int) {
- Document doc(BSON("a" << static_cast<int>(1)));
- Mod incMod(BSON("$inc" << BSON("a" << static_cast<int>(0))));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(NoOp, Long) {
- Document doc(BSON("a" << static_cast<long long>(1)));
- Mod incMod(BSON("$inc" << BSON("a" << static_cast<long long>(0))));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(NoOp, Double) {
- Document doc(BSON("a" << 1.0));
- Mod incMod(BSON("$inc" << BSON("a" << 0.0)));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(Upcasting, UpcastIntToLong) {
- // Checks that $inc : NumberLong(0) turns a NumberInt into a NumberLong and logs it
- // correctly.
- Document doc(BSON("a" << static_cast<int>(1)));
- ASSERT_EQUALS(mongo::NumberInt, doc.root()["a"].getType());
-
- Mod incMod(BSON("$inc" << BSON("a" << static_cast<long long>(0))));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
- ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(incMod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 1 } }"), logDoc);
- ASSERT_EQUALS(mongo::NumberLong, logDoc.root()["$set"]["a"].getType());
- }
-
- TEST(Upcasting, UpcastIntToDouble) {
- // Checks that $inc : 0.0 turns a NumberInt into a NumberDouble and logs it
- // correctly.
- Document doc(BSON("a" << static_cast<int>(1)));
- ASSERT_EQUALS(mongo::NumberInt, doc.root()["a"].getType());
-
- Mod incMod(fromjson("{ $inc : { a : 0.0 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 1.0 }"), doc);
- ASSERT_EQUALS(mongo::NumberDouble, doc.root()["a"].getType());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(incMod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 1.0 } }"), logDoc);
- ASSERT_EQUALS(mongo::NumberDouble, logDoc.root()["$set"]["a"].getType());
- }
+ // Object is an invalid increment argument
+ modObj = fromjson("{ $inc : { a : {} } }");
+ ASSERT_NOT_OK(mod.init(modObj["$inc"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+
+ // Array is an invalid increment argument
+ modObj = fromjson("{ $inc : { a : [] } }");
+ ASSERT_NOT_OK(mod.init(modObj["$inc"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, InitParsesNumberInt) {
+ Mod incMod(BSON("$inc" << BSON("a" << static_cast<int>(1))));
+}
+
+TEST(Init, InitParsesNumberLong) {
+ Mod incMod(BSON("$inc" << BSON("a" << static_cast<long long>(1))));
+}
+
+TEST(Init, InitParsesNumberDouble) {
+ Mod incMod(BSON("$inc" << BSON("a" << 1.0)));
+}
+
+TEST(SimpleMod, PrepareSimpleOK) {
+ Document doc(fromjson("{ a : 1 }"));
+ Mod incMod(fromjson("{ $inc: { a : 1 }}"));
+
+ ModifierInterface::ExecInfo execInfo;
+
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_FALSE(execInfo.noOp);
+}
+
+TEST(SimpleMod, PrepareSimpleNonNumericObject) {
+ Document doc(fromjson("{ a : {} }"));
+ Mod incMod(fromjson("{ $inc: { a : 1 }}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(incMod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(SimpleMod, PrepareSimpleNonNumericArray) {
+ Document doc(fromjson("{ a : [] }"));
+ Mod incMod(fromjson("{ $inc: { a : 1 }}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(incMod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(SimpleMod, PrepareSimpleNonNumericString) {
+ Document doc(fromjson("{ a : '' }"));
+ Mod incMod(fromjson("{ $inc: { a : 1 }}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(incMod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(SimpleMod, ApplyAndLogEmptyDocument) {
+ Document doc(fromjson("{}"));
+ Mod incMod(fromjson("{ $inc: { a : 1 }}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(incMod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 1 } }"), logDoc);
+}
+
+TEST(SimpleMod, LogWithoutApplyEmptyDocument) {
+ Document doc(fromjson("{}"));
+ Mod incMod(fromjson("{ $inc: { a : 1 }}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(incMod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 1 } }"), logDoc);
+}
+
+TEST(SimpleMod, ApplyAndLogSimpleDocument) {
+ Document doc(fromjson("{ a : 2 }"));
+ Mod incMod(fromjson("{ $inc: { a : 1 }}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 3 }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(incMod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 3 } }"), logDoc);
+}
+
+TEST(DottedMod, ApplyAndLogSimpleDocument) {
+ Document doc(fromjson("{ a : { b : 2 } }"));
+ Mod incMod(fromjson("{ $inc: { 'a.b' : 1 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : { b : 3 } }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(incMod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { 'a.b' : 3 } }"), logDoc);
+}
+
+TEST(InPlace, IntToInt) {
+ Document doc(BSON("a" << static_cast<int>(1)));
+ Mod incMod(BSON("$inc" << BSON("a" << static_cast<int>(1))));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+}
+
+TEST(InPlace, LongToLong) {
+ Document doc(BSON("a" << static_cast<long long>(1)));
+ Mod incMod(BSON("$inc" << BSON("a" << static_cast<long long>(1))));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+}
+
+TEST(InPlace, DoubleToDouble) {
+ Document doc(BSON("a" << 1.0));
+ Mod incMod(BSON("$inc" << BSON("a" << 1.0)));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+}
+
+TEST(NoOp, Int) {
+ Document doc(BSON("a" << static_cast<int>(1)));
+ Mod incMod(BSON("$inc" << BSON("a" << static_cast<int>(0))));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
+
+TEST(NoOp, Long) {
+ Document doc(BSON("a" << static_cast<long long>(1)));
+ Mod incMod(BSON("$inc" << BSON("a" << static_cast<long long>(0))));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
+
+TEST(NoOp, Double) {
+ Document doc(BSON("a" << 1.0));
+ Mod incMod(BSON("$inc" << BSON("a" << 0.0)));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
+
+TEST(Upcasting, UpcastIntToLong) {
+ // Checks that $inc : NumberLong(0) turns a NumberInt into a NumberLong and logs it
+ // correctly.
+ Document doc(BSON("a" << static_cast<int>(1)));
+ ASSERT_EQUALS(mongo::NumberInt, doc.root()["a"].getType());
+
+ Mod incMod(BSON("$inc" << BSON("a" << static_cast<long long>(0))));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 1 }"), doc);
+ ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(incMod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 1 } }"), logDoc);
+ ASSERT_EQUALS(mongo::NumberLong, logDoc.root()["$set"]["a"].getType());
+}
+
+TEST(Upcasting, UpcastIntToDouble) {
+ // Checks that $inc : 0.0 turns a NumberInt into a NumberDouble and logs it
+ // correctly.
+ Document doc(BSON("a" << static_cast<int>(1)));
+ ASSERT_EQUALS(mongo::NumberInt, doc.root()["a"].getType());
+
+ Mod incMod(fromjson("{ $inc : { a : 0.0 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 1.0 }"), doc);
+ ASSERT_EQUALS(mongo::NumberDouble, doc.root()["a"].getType());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(incMod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 1.0 } }"), logDoc);
+ ASSERT_EQUALS(mongo::NumberDouble, logDoc.root()["$set"]["a"].getType());
+}
+
+TEST(Upcasting, UpcastLongToDouble) {
+ // Checks that $inc : 0.0 turns a NumberLong into a NumberDouble and logs it
+ // correctly.
+ Document doc(BSON("a" << static_cast<long long>(1)));
+ ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
+
+ Mod incMod(fromjson("{ $inc : { a : 0.0 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 1.0 }"), doc);
+ ASSERT_EQUALS(mongo::NumberDouble, doc.root()["a"].getType());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(incMod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 1.0 } }"), logDoc);
+ ASSERT_EQUALS(mongo::NumberDouble, logDoc.root()["$set"]["a"].getType());
+}
+
+TEST(Upcasting, DoublesStayDoubles) {
+ // Checks that $inc : 0 doesn't change a NumberDouble away from double
+ Document doc(fromjson("{ a : 1.0 }"));
+ ASSERT_EQUALS(mongo::NumberDouble, doc.root()["a"].getType());
+
+ Mod incMod(fromjson("{ $inc : { a : 1 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 2.0 }"), doc);
+ ASSERT_EQUALS(mongo::NumberDouble, doc.root()["a"].getType());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(incMod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : 2.0 } }"), logDoc);
+ ASSERT_EQUALS(mongo::NumberDouble, logDoc.root()["$set"]["a"].getType());
+}
+
+// The only interesting overflow cases are int->long via increment: we never overflow to
+// double, and we never decrease precision on decrement.
+
+TEST(Spilling, OverflowIntToLong) {
+ const int initial_value = std::numeric_limits<int32_t>::max();
+
+ Document doc(BSON("a" << static_cast<int>(initial_value)));
+ ASSERT_EQUALS(mongo::NumberInt, doc.root()["a"].getType());
+
+ Mod incMod(fromjson("{ $inc : { a : 1 } }"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ const long long target_value = static_cast<long long>(initial_value) + 1;
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(BSON("a" << target_value), doc);
+}
+
+TEST(Spilling, UnderflowIntToLong) {
+ const int initial_value = std::numeric_limits<int32_t>::min();
+
+ Document doc(BSON("a" << static_cast<int>(initial_value)));
+ ASSERT_EQUALS(mongo::NumberInt, doc.root()["a"].getType());
+
+ Mod incMod(fromjson("{ $inc : { a : -1 } }"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ const long long target_value = static_cast<long long>(initial_value) - 1;
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(BSON("a" << target_value), doc);
+}
+
+TEST(Lifecycle, IncModCanBeReused) {
+ Document doc1(fromjson("{ a : 1 }"));
+ Document doc2(fromjson("{ a : 1 }"));
+
+ Mod incMod(fromjson("{ $inc: { a : 1 }}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc1.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_TRUE(doc1.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 2 }"), doc1);
+
+ ASSERT_OK(incMod.prepare(doc2.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_TRUE(doc2.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 2 }"), doc2);
+}
+
+// Given the current implementation of $mul, we really only need one test for
+// $mul. However, in the future, we should probably write additional ones, or, perhaps find
+// a way to run all the obove tests in both modes.
+TEST(Multiplication, ApplyAndLogSimpleDocument) {
+ Document doc(fromjson("{ a : { b : 2 } }"));
+ Mod incMod(fromjson("{ $mul: { 'a.b' : 3 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : { b : 6 } }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(incMod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { 'a.b' : 6 } }"), logDoc);
+}
- TEST(Upcasting, UpcastLongToDouble) {
- // Checks that $inc : 0.0 turns a NumberLong into a NumberDouble and logs it
- // correctly.
- Document doc(BSON("a" << static_cast<long long>(1)));
- ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType());
-
- Mod incMod(fromjson("{ $inc : { a : 0.0 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 1.0 }"), doc);
- ASSERT_EQUALS(mongo::NumberDouble, doc.root()["a"].getType());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(incMod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 1.0 } }"), logDoc);
- ASSERT_EQUALS(mongo::NumberDouble, logDoc.root()["$set"]["a"].getType());
- }
-
- TEST(Upcasting, DoublesStayDoubles) {
- // Checks that $inc : 0 doesn't change a NumberDouble away from double
- Document doc(fromjson("{ a : 1.0 }"));
- ASSERT_EQUALS(mongo::NumberDouble, doc.root()["a"].getType());
-
- Mod incMod(fromjson("{ $inc : { a : 1 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 2.0 }"), doc);
- ASSERT_EQUALS(mongo::NumberDouble, doc.root()["a"].getType());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(incMod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : 2.0 } }"), logDoc);
- ASSERT_EQUALS(mongo::NumberDouble, logDoc.root()["$set"]["a"].getType());
- }
-
- // The only interesting overflow cases are int->long via increment: we never overflow to
- // double, and we never decrease precision on decrement.
-
- TEST(Spilling, OverflowIntToLong) {
- const int initial_value = std::numeric_limits<int32_t>::max();
-
- Document doc(BSON("a" << static_cast<int>(initial_value)));
- ASSERT_EQUALS(mongo::NumberInt, doc.root()["a"].getType());
-
- Mod incMod(fromjson("{ $inc : { a : 1 } }"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- const long long target_value = static_cast<long long>(initial_value) + 1;
-
- ASSERT_OK(incMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(BSON("a" << target_value), doc);
- }
-
- TEST(Spilling, UnderflowIntToLong) {
- const int initial_value = std::numeric_limits<int32_t>::min();
-
- Document doc(BSON("a" << static_cast<int>(initial_value)));
- ASSERT_EQUALS(mongo::NumberInt, doc.root()["a"].getType());
-
- Mod incMod(fromjson("{ $inc : { a : -1 } }"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- const long long target_value = static_cast<long long>(initial_value) - 1;
-
- ASSERT_OK(incMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(BSON("a" << target_value), doc);
- }
-
- TEST(Lifecycle, IncModCanBeReused) {
- Document doc1(fromjson("{ a : 1 }"));
- Document doc2(fromjson("{ a : 1 }"));
-
- Mod incMod(fromjson("{ $inc: { a : 1 }}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc1.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_TRUE(doc1.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 2 }"), doc1);
-
- ASSERT_OK(incMod.prepare(doc2.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_TRUE(doc2.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 2 }"), doc2);
- }
-
- // Given the current implementation of $mul, we really only need one test for
- // $mul. However, in the future, we should probably write additional ones, or, perhaps find
- // a way to run all the obove tests in both modes.
- TEST(Multiplication, ApplyAndLogSimpleDocument) {
- Document doc(fromjson("{ a : { b : 2 } }"));
- Mod incMod(fromjson("{ $mul: { 'a.b' : 3 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : { b : 6 } }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(incMod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { 'a.b' : 6 } }"), logDoc);
- }
-
- TEST(Multiplication, ApplyAndLogMissingElement) {
- Document doc(fromjson("{ a : 0 }"));
- Mod incMod(fromjson("{ $mul : { b : 3 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : 0, b : 0 }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(incMod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { b : 0 } }"), logDoc);
- }
-
- TEST(Multiplication, ApplyMissingElementInt) {
- const int int_zero = 0;
- const int int_three = 3;
-
- Document doc(BSON("a" << int_zero));
- Mod incMod(BSON("$mul" << BSON("b" << int_three)));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(BSON("a" << int_zero << "b" << int_zero), doc);
- ASSERT_EQUALS(mongo::NumberInt, doc.root().rightChild().getType());
- }
-
- TEST(Multiplication, ApplyMissingElementLongLong) {
- const long long ll_zero = 0;
- const long long ll_three = 3;
-
- Document doc(BSON("a" << ll_zero));
- Mod incMod(BSON("$mul" << BSON("b" << ll_three)));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(BSON("a" << ll_zero << "b" << ll_zero), doc);
- ASSERT_EQUALS(mongo::NumberLong, doc.root().rightChild().getType());
- }
-
- TEST(Multiplication, ApplyMissingElementDouble) {
- const double double_zero = 0;
- const double double_three = 3;
-
- Document doc(BSON("a" << double_zero));
- Mod incMod(BSON("$mul" << BSON("b" << double_three)));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(incMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(BSON("a" << double_zero << "b" << 0), doc);
- ASSERT_EQUALS(mongo::NumberDouble, doc.root().rightChild().getType());
- }
+TEST(Multiplication, ApplyAndLogMissingElement) {
+ Document doc(fromjson("{ a : 0 }"));
+ Mod incMod(fromjson("{ $mul : { b : 3 } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : 0, b : 0 }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(incMod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { b : 0 } }"), logDoc);
+}
-} // namespace
+TEST(Multiplication, ApplyMissingElementInt) {
+ const int int_zero = 0;
+ const int int_three = 3;
+
+ Document doc(BSON("a" << int_zero));
+ Mod incMod(BSON("$mul" << BSON("b" << int_three)));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(BSON("a" << int_zero << "b" << int_zero), doc);
+ ASSERT_EQUALS(mongo::NumberInt, doc.root().rightChild().getType());
+}
+
+TEST(Multiplication, ApplyMissingElementLongLong) {
+ const long long ll_zero = 0;
+ const long long ll_three = 3;
+
+ Document doc(BSON("a" << ll_zero));
+ Mod incMod(BSON("$mul" << BSON("b" << ll_three)));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(BSON("a" << ll_zero << "b" << ll_zero), doc);
+ ASSERT_EQUALS(mongo::NumberLong, doc.root().rightChild().getType());
+}
+
+TEST(Multiplication, ApplyMissingElementDouble) {
+ const double double_zero = 0;
+ const double double_three = 3;
+
+ Document doc(BSON("a" << double_zero));
+ Mod incMod(BSON("$mul" << BSON("b" << double_three)));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(incMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(incMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(BSON("a" << double_zero << "b" << 0), doc);
+ ASSERT_EQUALS(mongo::NumberDouble, doc.root().rightChild().getType());
+}
+
+} // namespace
diff --git a/src/mongo/db/ops/modifier_interface.h b/src/mongo/db/ops/modifier_interface.h
index 3574615e6f4..eba8b6c324d 100644
--- a/src/mongo/db/ops/modifier_interface.h
+++ b/src/mongo/db/ops/modifier_interface.h
@@ -36,160 +36,167 @@
namespace mongo {
- class LogBuilder;
+class LogBuilder;
+/**
+ * Abstract base class for update "modifiers" (a.k.a "$ operators"). To create a new
+ * operator, implement a new derived class.
+ *
+ * A typical call sequence for the class is:
+ *
+ * + init() with the mod arguments
+ *
+ * + For each document that is being touched on that update, the following methods are
+ * going to be called once for that document and in the order the calls appear here.
+ *
+ * + prepare() to check if mod is viable over the document
+ *
+ * + apply(), effectively computing the update
+ *
+ * + log() registering the change in the log for replication purposes
+ *
+ * Again, a modifier implementation may rely on these last three calls being made and in
+ * that particular order and therefore can keep and reuse state between these calls, when
+ * appropriate.
+ *
+ * TODO:
+ * For a reference implementation, see modifier_identity.{h,cpp} used in tests.
+ */
+class ModifierInterface {
+public:
+ virtual ~ModifierInterface() {}
+
+ struct Options;
/**
- * Abstract base class for update "modifiers" (a.k.a "$ operators"). To create a new
- * operator, implement a new derived class.
+ * Returns OK and extracts the parameters for this given mod from 'modExpr'. For
+ * instance, for a $inc, extracts the increment value. The init() method would be
+ * called only once per operand, that is, if a { $inc: { a: 1, b: 1 } } is issued,
+ * there would be one instance of the operator working on 'a' and one on 'b'. In each
+ * case, init() would be called once with the respective bson element.
*
- * A typical call sequence for the class is:
+ * If 'modExpr' is invalid, returns an error status with a reason description.
*
- * + init() with the mod arguments
+ * The optional bool out parameter 'positional', if provided, will be set to 'true' if
+ * the mod requires matched field details to be provided when calling 'prepare'. The
+ * field is optional since this is a hint to the caller about what work is needed to
+ * correctly invoke 'prepare'. It is always legal to provide any match details
+ * unconditionally. The value set in 'positional' if any, is only meaningful if 'init'
+ * returns an OK status.
*
- * + For each document that is being touched on that update, the following methods are
- * going to be called once for that document and in the order the calls appear here.
+ * Note:
*
- * + prepare() to check if mod is viable over the document
+ * + An operator may assume the modExpr passed here will be unchanged throughout all
+ * the mod object lifetime and also that the modExrp's lifetime exceeds the life
+ * time of this mod. Therefore, taking references to elements inside modExpr is
+ * valid.
+ */
+ virtual Status init(const BSONElement& modExpr,
+ const Options& opts,
+ bool* positional = NULL) = 0;
+
+ /**
+ * Returns OK if it would be correct to apply this mod over the document 'root' (e.g, if
+ * we're $inc-ing a field, is that field numeric in the current doc?).
*
- * + apply(), effectively computing the update
+ * If the field this mod is targeted to contains a $-positional parameter, that value
+ * can be bound with 'matchedField', passed by the caller.
*
- * + log() registering the change in the log for replication purposes
+ * In addition, the call also identifies which fields(s) of 'root' the mod is interested
+ * in changing (note that the modifier may want to add a field that's not present in
+ * the document). The call also determines whether it could modify the document in
+ * place and whether it is a no-op for the given document. All this information is in
+ * the passed 'execInfo', which is filled inside the call.
*
- * Again, a modifier implementation may rely on these last three calls being made and in
- * that particular order and therefore can keep and reuse state between these calls, when
- * appropriate.
+ * If the mod cannot be applied over 'root', returns an error status with a reason
+ * description.
*
- * TODO:
- * For a reference implementation, see modifier_identity.{h,cpp} used in tests.
+ * Note that you must provide a meaningful 'matchedField' here, unless 'init' set
+ * 'positional' to 'false', in which case you may pass an empty StringData object.
*/
- class ModifierInterface {
- public:
- virtual ~ModifierInterface() { }
-
- struct Options;
- /**
- * Returns OK and extracts the parameters for this given mod from 'modExpr'. For
- * instance, for a $inc, extracts the increment value. The init() method would be
- * called only once per operand, that is, if a { $inc: { a: 1, b: 1 } } is issued,
- * there would be one instance of the operator working on 'a' and one on 'b'. In each
- * case, init() would be called once with the respective bson element.
- *
- * If 'modExpr' is invalid, returns an error status with a reason description.
- *
- * The optional bool out parameter 'positional', if provided, will be set to 'true' if
- * the mod requires matched field details to be provided when calling 'prepare'. The
- * field is optional since this is a hint to the caller about what work is needed to
- * correctly invoke 'prepare'. It is always legal to provide any match details
- * unconditionally. The value set in 'positional' if any, is only meaningful if 'init'
- * returns an OK status.
- *
- * Note:
- *
- * + An operator may assume the modExpr passed here will be unchanged throughout all
- * the mod object lifetime and also that the modExrp's lifetime exceeds the life
- * time of this mod. Therefore, taking references to elements inside modExpr is
- * valid.
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL) = 0;
-
- /**
- * Returns OK if it would be correct to apply this mod over the document 'root' (e.g, if
- * we're $inc-ing a field, is that field numeric in the current doc?).
- *
- * If the field this mod is targeted to contains a $-positional parameter, that value
- * can be bound with 'matchedField', passed by the caller.
- *
- * In addition, the call also identifies which fields(s) of 'root' the mod is interested
- * in changing (note that the modifier may want to add a field that's not present in
- * the document). The call also determines whether it could modify the document in
- * place and whether it is a no-op for the given document. All this information is in
- * the passed 'execInfo', which is filled inside the call.
- *
- * If the mod cannot be applied over 'root', returns an error status with a reason
- * description.
- *
- * Note that you must provide a meaningful 'matchedField' here, unless 'init' set
- * 'positional' to 'false', in which case you may pass an empty StringData object.
- */
- struct ExecInfo;
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- /* IN-OUT */ ExecInfo* execInfo) = 0;
-
- /**
- * Returns OK and modifies (or adds) an element (or elements) from the 'root' passed on
- * the prepareMod call. This may act on multiple fields but should only be called once
- * per operator.
- *
- * For this call to be issued, the call to 'prepareElem' must have necessarily turned
- * off 'ExecInfo.noOp', ie this mod over this document is not a no-op.
- *
- * If the mod could not be applied, returns an error status with a reason description.
- */
- virtual Status apply() const = 0 ;
-
- /**
- * Returns OK and records the result of this mod in the provided LogBuilder. The mod
- * must have kept enough state to be able to produce the log record (see idempotency
- * note below). This call may be issued even if apply() was not.
- *
- * If the mod could not be logged, returns an error status with a reason description.
- *
- * Idempotency Note:
- *
- * + The modifier must log a mod that is idempotent, ie, applying it more than once
- * to a base collection would produce the same result as applying it only once. For
- * example, a $inc can be switched to a $set for the resulting incremented value,
- * for logging purposes. An array based operator may check the contents of the
- * array before operating on it.
- */
- virtual Status log(LogBuilder* logBuilder) const = 0;
- };
+ struct ExecInfo;
+ virtual Status prepare(mutablebson::Element root,
+ StringData matchedField,
+ /* IN-OUT */ ExecInfo* execInfo) = 0;
/**
- * Options used to control Modifier behavior
+ * Returns OK and modifies (or adds) an element (or elements) from the 'root' passed on
+ * the prepareMod call. This may act on multiple fields but should only be called once
+ * per operator.
+ *
+ * For this call to be issued, the call to 'prepareElem' must have necessarily turned
+ * off 'ExecInfo.noOp', ie this mod over this document is not a no-op.
+ *
+ * If the mod could not be applied, returns an error status with a reason description.
*/
- struct ModifierInterface::Options {
- Options() : fromReplication(false), enforceOkForStorage(true) {}
- Options(bool repl, bool ofs) : fromReplication(repl), enforceOkForStorage(ofs) {}
+ virtual Status apply() const = 0;
- static Options normal() { return Options(false, true); }
- static Options fromRepl() { return Options(true, false); }
- static Options unchecked() { return Options(false, false); }
-
- bool fromReplication;
- bool enforceOkForStorage;
- };
+ /**
+ * Returns OK and records the result of this mod in the provided LogBuilder. The mod
+ * must have kept enough state to be able to produce the log record (see idempotency
+ * note below). This call may be issued even if apply() was not.
+ *
+ * If the mod could not be logged, returns an error status with a reason description.
+ *
+ * Idempotency Note:
+ *
+ * + The modifier must log a mod that is idempotent, ie, applying it more than once
+ * to a base collection would produce the same result as applying it only once. For
+ * example, a $inc can be switched to a $set for the resulting incremented value,
+ * for logging purposes. An array based operator may check the contents of the
+ * array before operating on it.
+ */
+ virtual Status log(LogBuilder* logBuilder) const = 0;
+};
- struct ModifierInterface::ExecInfo {
- static const int MAX_NUM_FIELDS = 2;
+/**
+ * Options used to control Modifier behavior
+ */
+struct ModifierInterface::Options {
+ Options() : fromReplication(false), enforceOkForStorage(true) {}
+ Options(bool repl, bool ofs) : fromReplication(repl), enforceOkForStorage(ofs) {}
+
+ static Options normal() {
+ return Options(false, true);
+ }
+ static Options fromRepl() {
+ return Options(true, false);
+ }
+ static Options unchecked() {
+ return Options(false, false);
+ }
+
+ bool fromReplication;
+ bool enforceOkForStorage;
+};
+
+struct ModifierInterface::ExecInfo {
+ static const int MAX_NUM_FIELDS = 2;
- /**
- * An update mod may specify that it wishes to the applied only if the context
- * of the update turns out a certain way.
- */
- enum UpdateContext {
- // This mod wants to be applied only if the update turns out to be an insert.
- INSERT_CONTEXT,
+ /**
+ * An update mod may specify that it wishes to the applied only if the context
+ * of the update turns out a certain way.
+ */
+ enum UpdateContext {
+ // This mod wants to be applied only if the update turns out to be an insert.
+ INSERT_CONTEXT,
- // This mod wants to be applied only if the update is not an insert.
- UPDATE_CONTEXT,
+ // This mod wants to be applied only if the update is not an insert.
+ UPDATE_CONTEXT,
- // This mod doesn't care if the update will be an update or an upsert.
- ANY_CONTEXT
- };
+ // This mod doesn't care if the update will be an update or an upsert.
+ ANY_CONTEXT
+ };
- ExecInfo() : noOp(false), context(ANY_CONTEXT) {
- for (int i = 0; i < MAX_NUM_FIELDS; i++) {
- fieldRef[i] = NULL;
- }
+ ExecInfo() : noOp(false), context(ANY_CONTEXT) {
+ for (int i = 0; i < MAX_NUM_FIELDS; i++) {
+ fieldRef[i] = NULL;
}
+ }
- // The fields of concern to the driver: no other op may modify the fields listed here.
- FieldRef* fieldRef[MAX_NUM_FIELDS]; // not owned here
- bool noOp;
- UpdateContext context;
- };
+ // The fields of concern to the driver: no other op may modify the fields listed here.
+ FieldRef* fieldRef[MAX_NUM_FIELDS]; // not owned here
+ bool noOp;
+ UpdateContext context;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_object_replace.cpp b/src/mongo/db/ops/modifier_object_replace.cpp
index 5ebbb4bd342..700f111e5b3 100644
--- a/src/mongo/db/ops/modifier_object_replace.cpp
+++ b/src/mongo/db/ops/modifier_object_replace.cpp
@@ -36,161 +36,151 @@
namespace mongo {
- namespace str = mongoutils::str;
-
- namespace {
- const char idFieldName[] = "_id";
-
- Status fixupTimestamps( const BSONObj& obj ) {
- BSONObjIterator i(obj);
- while (i.more()) {
- BSONElement e = i.next();
-
- // Skip _id field -- we do not replace it
- if (e.type() == bsonTimestamp && e.fieldNameStringData() != idFieldName) {
- // TODO(emilkie): This is not endian-safe.
- unsigned long long &timestamp =
- *(reinterpret_cast<unsigned long long*>(
- const_cast<char *>(e.value())));
- if (timestamp == 0) {
- // performance note, this locks a mutex:
- Timestamp ts(getNextGlobalTimestamp());
- timestamp = ts.asULL();
- }
- }
+namespace str = mongoutils::str;
+
+namespace {
+const char idFieldName[] = "_id";
+
+Status fixupTimestamps(const BSONObj& obj) {
+ BSONObjIterator i(obj);
+ while (i.more()) {
+ BSONElement e = i.next();
+
+ // Skip _id field -- we do not replace it
+ if (e.type() == bsonTimestamp && e.fieldNameStringData() != idFieldName) {
+ // TODO(emilkie): This is not endian-safe.
+ unsigned long long& timestamp =
+ *(reinterpret_cast<unsigned long long*>(const_cast<char*>(e.value())));
+ if (timestamp == 0) {
+ // performance note, this locks a mutex:
+ Timestamp ts(getNextGlobalTimestamp());
+ timestamp = ts.asULL();
}
-
- return Status::OK();
}
}
- struct ModifierObjectReplace::PreparedState {
+ return Status::OK();
+}
+}
- PreparedState(mutablebson::Document* targetDoc)
- : doc(*targetDoc)
- , noOp(false) {
- }
+struct ModifierObjectReplace::PreparedState {
+ PreparedState(mutablebson::Document* targetDoc) : doc(*targetDoc), noOp(false) {}
- // Document that is going to be changed
- mutablebson::Document& doc;
+ // Document that is going to be changed
+ mutablebson::Document& doc;
- // This is a no op
- bool noOp;
+ // This is a no op
+ bool noOp;
+};
- };
+ModifierObjectReplace::ModifierObjectReplace() : _val() {}
- ModifierObjectReplace::ModifierObjectReplace() : _val() {
- }
+ModifierObjectReplace::~ModifierObjectReplace() {}
- ModifierObjectReplace::~ModifierObjectReplace() {
+Status ModifierObjectReplace::init(const BSONElement& modExpr,
+ const Options& opts,
+ bool* positional) {
+ if (modExpr.type() != Object) {
+ // Impossible, really since the caller check this already...
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Document replacement expects a complete document"
+ " but the type supplied was " << modExpr.type());
}
- Status ModifierObjectReplace::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
-
- if (modExpr.type() != Object) {
- // Impossible, really since the caller check this already...
- return Status(ErrorCodes::BadValue,
- str::stream() << "Document replacement expects a complete document"
- " but the type supplied was "
- << modExpr.type());
- }
+ // Object replacements never have positional operator.
+ if (positional)
+ *positional = false;
+
+ // We make a copy of the object here because the update driver does not guarantees, in
+ // the case of object replacement, that the modExpr is going to outlive this mod.
+ _val = modExpr.embeddedObject().getOwned();
+ return fixupTimestamps(_val);
+}
+
+Status ModifierObjectReplace::prepare(mutablebson::Element root,
+ StringData matchedField,
+ ExecInfo* execInfo) {
+ _preparedState.reset(new PreparedState(&root.getDocument()));
+
+ // objectSize checked by binaryEqual (optimization)
+ BSONObj objOld = root.getDocument().getObject();
+ if (objOld.binaryEqual(_val)) {
+ _preparedState->noOp = true;
+ execInfo->noOp = true;
+ }
- // Object replacements never have positional operator.
- if (positional)
- *positional = false;
+ return Status::OK();
+}
- // We make a copy of the object here because the update driver does not guarantees, in
- // the case of object replacement, that the modExpr is going to outlive this mod.
- _val = modExpr.embeddedObject().getOwned();
- return fixupTimestamps(_val);
- }
+Status ModifierObjectReplace::apply() const {
+ dassert(!_preparedState->noOp);
- Status ModifierObjectReplace::prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo) {
- _preparedState.reset(new PreparedState(&root.getDocument()));
+ // Remove the contents of the provided doc.
+ mutablebson::Document& doc = _preparedState->doc;
+ mutablebson::Element current = doc.root().leftChild();
+ mutablebson::Element srcIdElement = doc.end();
+ while (current.ok()) {
+ mutablebson::Element toRemove = current;
+ current = current.rightSibling();
- // objectSize checked by binaryEqual (optimization)
- BSONObj objOld = root.getDocument().getObject();
- if (objOld.binaryEqual(_val)) {
- _preparedState->noOp = true;
- execInfo->noOp = true;
+ // Skip _id field element -- it should not change
+ if (toRemove.getFieldName() == idFieldName) {
+ srcIdElement = toRemove;
+ continue;
}
- return Status::OK();
+ Status status = toRemove.remove();
+ if (!status.isOK()) {
+ return status;
+ }
}
- Status ModifierObjectReplace::apply() const {
- dassert(!_preparedState->noOp);
-
- // Remove the contents of the provided doc.
- mutablebson::Document& doc = _preparedState->doc;
- mutablebson::Element current = doc.root().leftChild();
- mutablebson::Element srcIdElement = doc.end();
- while (current.ok()) {
- mutablebson::Element toRemove = current;
- current = current.rightSibling();
-
- // Skip _id field element -- it should not change
- if (toRemove.getFieldName() == idFieldName) {
- srcIdElement = toRemove;
+ // Insert the provided contents instead.
+ BSONElement dstIdElement;
+ BSONObjIterator it(_val);
+ while (it.more()) {
+ BSONElement elem = it.next();
+ if (elem.fieldNameStringData() == idFieldName) {
+ dstIdElement = elem;
+
+ // Do not duplicate _id field
+ if (srcIdElement.ok()) {
+ if (srcIdElement.compareWithBSONElement(dstIdElement, true) != 0) {
+ return Status(ErrorCodes::ImmutableField,
+ str::stream() << "The _id field cannot be changed from {"
+ << srcIdElement.toString() << "} to {"
+ << dstIdElement.toString() << "}.");
+ }
continue;
}
-
- Status status = toRemove.remove();
- if (!status.isOK()) {
- return status;
- }
}
- // Insert the provided contents instead.
- BSONElement dstIdElement;
- BSONObjIterator it(_val);
- while (it.more()) {
- BSONElement elem = it.next();
- if (elem.fieldNameStringData() == idFieldName) {
- dstIdElement = elem;
-
- // Do not duplicate _id field
- if (srcIdElement.ok()) {
- if (srcIdElement.compareWithBSONElement(dstIdElement, true) != 0) {
- return Status(ErrorCodes::ImmutableField,
- str::stream() << "The _id field cannot be changed from {"
- << srcIdElement.toString() << "} to {"
- << dstIdElement.toString() << "}.");
- }
- continue;
- }
- }
-
- Status status = doc.root().appendElement(elem);
- if (!status.isOK()) {
- return status;
- }
+ Status status = doc.root().appendElement(elem);
+ if (!status.isOK()) {
+ return status;
}
-
- return Status::OK();
}
- Status ModifierObjectReplace::log(LogBuilder* logBuilder) const {
+ return Status::OK();
+}
- mutablebson::Document& doc = logBuilder->getDocument();
+Status ModifierObjectReplace::log(LogBuilder* logBuilder) const {
+ mutablebson::Document& doc = logBuilder->getDocument();
- mutablebson::Element replacementObject = doc.end();
- Status status = logBuilder->getReplacementObject(&replacementObject);
+ mutablebson::Element replacementObject = doc.end();
+ Status status = logBuilder->getReplacementObject(&replacementObject);
- if (status.isOK()) {
- mutablebson::Element current = _preparedState->doc.root().leftChild();
- while (current.ok()) {
- status = replacementObject.appendElement(current.getValue());
- if (!status.isOK())
- return status;
- current = current.rightSibling();
- }
+ if (status.isOK()) {
+ mutablebson::Element current = _preparedState->doc.root().leftChild();
+ while (current.ok()) {
+ status = replacementObject.appendElement(current.getValue());
+ if (!status.isOK())
+ return status;
+ current = current.rightSibling();
}
-
- return status;
}
-} // namespace mongo
+ return status;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_object_replace.h b/src/mongo/db/ops/modifier_object_replace.h
index 4e923e4a7fd..add180d472e 100644
--- a/src/mongo/db/ops/modifier_object_replace.h
+++ b/src/mongo/db/ops/modifier_object_replace.h
@@ -37,62 +37,57 @@
namespace mongo {
- class LogBuilder;
-
- class ModifierObjectReplace : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierObjectReplace);
-
- public:
-
- ModifierObjectReplace();
-
- //
- // Modifier interface implementation
- //
-
- virtual ~ModifierObjectReplace();
-
-
- /**
- * Returns true and takes the embedded object contained in 'modExpr' to be the object
- * we're replacing for. The field name of 'modExpr' is ignored. If 'modExpr' is in an
- * unexpected format or if it can't be parsed for some reason, returns an error status
- * describing the error.
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
-
- /**
- * Registers the that 'root' is in the document that we want to fully replace.
- * prepare() returns OK and always fills 'execInfo' with true for
- * noOp.
- */
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
-
- /**
- * Replaces the document passed in prepare() for the object passed in init(). Returns
- * OK if successful or a status describing the error.
- */
- virtual Status apply() const;
-
- /**
- * Adds a log entry to logRoot corresponding to full object replacement. Returns OK if
- * successful or a status describing the error.
- */
- virtual Status log(LogBuilder* logBuilder) const;
-
- private:
-
- // Object to replace with.
- BSONObj _val;
-
- // The document whose value needs to be replaced. This state is valid after a prepare()
- // was issued and until a log() is issued. The document this mod is being prepared
- // against must e live throughout all the calls.
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
- };
-
-} // namespace mongo
+class LogBuilder;
+
+class ModifierObjectReplace : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierObjectReplace);
+
+public:
+ ModifierObjectReplace();
+
+ //
+ // Modifier interface implementation
+ //
+
+ virtual ~ModifierObjectReplace();
+
+
+ /**
+ * Returns true and takes the embedded object contained in 'modExpr' to be the object
+ * we're replacing for. The field name of 'modExpr' is ignored. If 'modExpr' is in an
+ * unexpected format or if it can't be parsed for some reason, returns an error status
+ * describing the error.
+ */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
+
+ /**
+ * Registers the that 'root' is in the document that we want to fully replace.
+ * prepare() returns OK and always fills 'execInfo' with true for
+ * noOp.
+ */
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
+
+ /**
+ * Replaces the document passed in prepare() for the object passed in init(). Returns
+ * OK if successful or a status describing the error.
+ */
+ virtual Status apply() const;
+
+ /**
+ * Adds a log entry to logRoot corresponding to full object replacement. Returns OK if
+ * successful or a status describing the error.
+ */
+ virtual Status log(LogBuilder* logBuilder) const;
+
+private:
+ // Object to replace with.
+ BSONObj _val;
+
+ // The document whose value needs to be replaced. This state is valid after a prepare()
+ // was issued and until a log() is issued. The document this mod is being prepared
+ // against must e live throughout all the calls.
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_object_replace_test.cpp b/src/mongo/db/ops/modifier_object_replace_test.cpp
index 3a5de50fead..083981fb34f 100644
--- a/src/mongo/db/ops/modifier_object_replace_test.cpp
+++ b/src/mongo/db/ops/modifier_object_replace_test.cpp
@@ -43,267 +43,267 @@
namespace {
- using mongo::BSONObj;
- using mongo::fromjson;
- using mongo::LogBuilder;
- using mongo::ModifierInterface;
- using mongo::ModifierObjectReplace;
- using mongo::mutablebson::ConstElement;
- using mongo::mutablebson::countChildren;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
- using mongo::mutablebson::findFirstChildNamed;
- using mongo::NumberInt;
- using mongo::Status;
- using mongo::StringData;
-
- /** Helper to build and manipulate a $set mod. */
- class Mod {
- public:
- Mod() : _mod() {}
-
- explicit Mod(BSONObj modObj)
- : _mod() {
- _modObj = modObj;
- ASSERT_OK(_mod.init(BSON("" << modObj).firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
-
- Status apply() const {
- return _mod.apply();
- }
-
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
-
- ModifierObjectReplace& mod() { return _mod; }
- BSONObj& obj() { return _modObj; }
-
- private:
- ModifierObjectReplace _mod;
- BSONObj _modObj;
- };
-
- // Normal replacements below
- TEST(Normal, SingleFieldDoc){
- Document doc(fromjson("{_id:1, a:1}"));
- Mod mod(fromjson("{_id:1, b:12}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(doc, fromjson("{_id:1, b:12}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(doc, logDoc);
+using mongo::BSONObj;
+using mongo::fromjson;
+using mongo::LogBuilder;
+using mongo::ModifierInterface;
+using mongo::ModifierObjectReplace;
+using mongo::mutablebson::ConstElement;
+using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
+using mongo::mutablebson::findFirstChildNamed;
+using mongo::NumberInt;
+using mongo::Status;
+using mongo::StringData;
+
+/** Helper to build and manipulate a $set mod. */
+class Mod {
+public:
+ Mod() : _mod() {}
+
+ explicit Mod(BSONObj modObj) : _mod() {
+ _modObj = modObj;
+ ASSERT_OK(
+ _mod.init(BSON("" << modObj).firstElement(), ModifierInterface::Options::normal()));
}
- TEST(Normal, ComplexDoc){
- Document doc(fromjson("{_id:1, a:1}"));
- Mod mod(fromjson("{_id:1, b:[123], c: {r:true}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(doc, fromjson("{_id:1, b:[123], c: {r:true}}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(doc, logDoc);
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
}
- TEST(Normal, OnlyIdField){
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{_id:1}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(doc, mod.obj());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(doc, logDoc);
+ Status apply() const {
+ return _mod.apply();
}
- // These updates have to do with updates without an _id field
- // (the existing _id isn't removed)
- TEST(IdLeft, EmptyDocReplacement){
- Document doc(fromjson("{_id:1}"));
- Mod mod(fromjson("{}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(doc, fromjson("{_id:1}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(doc, logDoc);
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
}
- TEST(IdLeft, EmptyDoc){
- Document doc(fromjson("{_id:1}"));
- Mod mod(fromjson("{}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(doc, fromjson("{_id:1}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(doc, logDoc);
+ ModifierObjectReplace& mod() {
+ return _mod;
}
-
- TEST(IdLeft, SingleFieldAddition){
- Document doc(fromjson("{_id:1}"));
- Mod mod(fromjson("{a:1}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(doc, fromjson("{_id:1, a:1}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(doc, logDoc);
- }
-
- TEST(IdLeft, SingleFieldReplaced){
- Document doc(fromjson("{a: []}"));
- Mod mod(fromjson("{a:10}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(doc, mod.obj());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(doc, logDoc);
- }
-
- TEST(IdLeft, SwapFields){
- Document doc(fromjson("{_id:1, a:1}"));
- Mod mod(fromjson("{b:1}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(doc, fromjson("{_id:1, b:1}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(doc, logDoc);
- }
-
- TEST(IdImmutable, ReplaceIdNumber){
- Document doc(fromjson("{_id:1, a:1}"));
- Mod mod(fromjson("{_id:2}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_NOT_OK(mod.apply());
- }
-
- TEST(IdImmutable, ReplaceIdNumberSameVal){
- Document doc(fromjson("{_id:1, a:1}"));
- Mod mod(fromjson("{_id:2}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_NOT_OK(mod.apply());
- }
-
- TEST(IdImmutable, ReplaceEmbeddedId){
- Document doc(fromjson("{_id:{a:1, b:2}, a:1}"));
- Mod mod(fromjson("{_id:{b:2, a:1}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_NOT_OK(mod.apply());
- }
-
- TEST(Timestamp, IdNotReplaced){
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{_id:Timestamp(0,0), a:1}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_EQUALS(doc, mod.obj());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(mod.obj(), logDoc);
-
- Element idElem = findFirstChildNamed(logDoc.root(), "_id");
- ASSERT(idElem.ok());
- ASSERT(idElem.getValueTimestamp().isNull());
-
- }
-
- TEST(Timestamp, ReplaceAll){
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{a:Timestamp(0,0), r:1, x:1, b:Timestamp(0,0)}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
-
- Element elem = findFirstChildNamed(doc.root(), "a");
- ASSERT(elem.ok());
- ASSERT_NOT_EQUALS(0U, elem.getValueTimestamp().getSecs());
- ASSERT_NOT_EQUALS(0U, elem.getValueTimestamp().getInc());
-
- elem = findFirstChildNamed(doc.root(), "b");
- ASSERT(elem.ok());
- ASSERT_NOT_EQUALS(0U, elem.getValueTimestamp().getSecs());
- ASSERT_NOT_EQUALS(0U, elem.getValueTimestamp().getInc());
+ BSONObj& obj() {
+ return _modObj;
}
-} // unnamed namespace
+private:
+ ModifierObjectReplace _mod;
+ BSONObj _modObj;
+};
+
+// Normal replacements below
+TEST(Normal, SingleFieldDoc) {
+ Document doc(fromjson("{_id:1, a:1}"));
+ Mod mod(fromjson("{_id:1, b:12}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(doc, fromjson("{_id:1, b:12}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(doc, logDoc);
+}
+
+TEST(Normal, ComplexDoc) {
+ Document doc(fromjson("{_id:1, a:1}"));
+ Mod mod(fromjson("{_id:1, b:[123], c: {r:true}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(doc, fromjson("{_id:1, b:[123], c: {r:true}}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(doc, logDoc);
+}
+
+TEST(Normal, OnlyIdField) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{_id:1}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(doc, mod.obj());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(doc, logDoc);
+}
+
+// These updates have to do with updates without an _id field
+// (the existing _id isn't removed)
+TEST(IdLeft, EmptyDocReplacement) {
+ Document doc(fromjson("{_id:1}"));
+ Mod mod(fromjson("{}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(doc, fromjson("{_id:1}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(doc, logDoc);
+}
+
+TEST(IdLeft, EmptyDoc) {
+ Document doc(fromjson("{_id:1}"));
+ Mod mod(fromjson("{}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(doc, fromjson("{_id:1}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(doc, logDoc);
+}
+
+TEST(IdLeft, SingleFieldAddition) {
+ Document doc(fromjson("{_id:1}"));
+ Mod mod(fromjson("{a:1}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(doc, fromjson("{_id:1, a:1}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(doc, logDoc);
+}
+
+TEST(IdLeft, SingleFieldReplaced) {
+ Document doc(fromjson("{a: []}"));
+ Mod mod(fromjson("{a:10}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(doc, mod.obj());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(doc, logDoc);
+}
+
+TEST(IdLeft, SwapFields) {
+ Document doc(fromjson("{_id:1, a:1}"));
+ Mod mod(fromjson("{b:1}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(doc, fromjson("{_id:1, b:1}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(doc, logDoc);
+}
+
+TEST(IdImmutable, ReplaceIdNumber) {
+ Document doc(fromjson("{_id:1, a:1}"));
+ Mod mod(fromjson("{_id:2}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_NOT_OK(mod.apply());
+}
+
+TEST(IdImmutable, ReplaceIdNumberSameVal) {
+ Document doc(fromjson("{_id:1, a:1}"));
+ Mod mod(fromjson("{_id:2}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_NOT_OK(mod.apply());
+}
+
+TEST(IdImmutable, ReplaceEmbeddedId) {
+ Document doc(fromjson("{_id:{a:1, b:2}, a:1}"));
+ Mod mod(fromjson("{_id:{b:2, a:1}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_NOT_OK(mod.apply());
+}
+
+TEST(Timestamp, IdNotReplaced) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{_id:Timestamp(0,0), a:1}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_EQUALS(doc, mod.obj());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(mod.obj(), logDoc);
+
+ Element idElem = findFirstChildNamed(logDoc.root(), "_id");
+ ASSERT(idElem.ok());
+ ASSERT(idElem.getValueTimestamp().isNull());
+}
+
+TEST(Timestamp, ReplaceAll) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{a:Timestamp(0,0), r:1, x:1, b:Timestamp(0,0)}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+
+ Element elem = findFirstChildNamed(doc.root(), "a");
+ ASSERT(elem.ok());
+ ASSERT_NOT_EQUALS(0U, elem.getValueTimestamp().getSecs());
+ ASSERT_NOT_EQUALS(0U, elem.getValueTimestamp().getInc());
+
+ elem = findFirstChildNamed(doc.root(), "b");
+ ASSERT(elem.ok());
+ ASSERT_NOT_EQUALS(0U, elem.getValueTimestamp().getSecs());
+ ASSERT_NOT_EQUALS(0U, elem.getValueTimestamp().getInc());
+}
+
+} // unnamed namespace
diff --git a/src/mongo/db/ops/modifier_pop.cpp b/src/mongo/db/ops/modifier_pop.cpp
index e5fd836dea7..c46fdd7a9bf 100644
--- a/src/mongo/db/ops/modifier_pop.cpp
+++ b/src/mongo/db/ops/modifier_pop.cpp
@@ -38,176 +38,161 @@
namespace mongo {
- namespace mb = mutablebson;
- namespace str = mongoutils::str;
+namespace mb = mutablebson;
+namespace str = mongoutils::str;
- struct ModifierPop::PreparedState {
+struct ModifierPop::PreparedState {
+ PreparedState(mutablebson::Document* targetDoc)
+ : doc(*targetDoc),
+ elementToRemove(doc.end()),
+ pathFoundIndex(0),
+ pathFoundElement(doc.end()) {}
- PreparedState(mutablebson::Document* targetDoc)
- : doc(*targetDoc)
- , elementToRemove(doc.end())
- , pathFoundIndex(0)
- , pathFoundElement(doc.end()) {
- }
+ // Document that is going to be changed.
+ mutablebson::Document& doc;
- // Document that is going to be changed.
- mutablebson::Document& doc;
+ // Element to be removed
+ mutablebson::Element elementToRemove;
- // Element to be removed
- mutablebson::Element elementToRemove;
+ // Index in _fieldRef for which an Element exist in the document.
+ size_t pathFoundIndex;
- // Index in _fieldRef for which an Element exist in the document.
- size_t pathFoundIndex;
+ // Element corresponding to _fieldRef[0.._idxFound].
+ mutablebson::Element pathFoundElement;
+};
- // Element corresponding to _fieldRef[0.._idxFound].
- mutablebson::Element pathFoundElement;
- };
+ModifierPop::ModifierPop() : _fieldRef(), _positionalPathIndex(0), _fromTop(false) {}
- ModifierPop::ModifierPop()
- : _fieldRef()
- , _positionalPathIndex(0)
- , _fromTop(false) {
- }
+ModifierPop::~ModifierPop() {}
- ModifierPop::~ModifierPop() {
- }
+Status ModifierPop::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
+ //
+ // field name analysis
+ //
- Status ModifierPop::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
- //
- // field name analysis
- //
-
- // Break down the field name into its 'dotted' components (aka parts) and check that
- // there are no empty parts.
- _fieldRef.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatable(_fieldRef);
- if (! status.isOK()) {
- return status;
- }
+ // Break down the field name into its 'dotted' components (aka parts) and check that
+ // there are no empty parts.
+ _fieldRef.parse(modExpr.fieldName());
+ Status status = fieldchecker::isUpdatable(_fieldRef);
+ if (!status.isOK()) {
+ return status;
+ }
- // If a $-positional operator was used, get the index in which it occurred
- // and ensure only one occurrence.
- size_t foundCount;
- bool foundDollar = fieldchecker::isPositional(_fieldRef,
- &_positionalPathIndex,
- &foundCount);
+ // If a $-positional operator was used, get the index in which it occurred
+ // and ensure only one occurrence.
+ size_t foundCount;
+ bool foundDollar = fieldchecker::isPositional(_fieldRef, &_positionalPathIndex, &foundCount);
- if (positional)
- *positional = foundDollar;
+ if (positional)
+ *positional = foundDollar;
- if (foundDollar && foundCount > 1) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
- }
+ if (foundDollar && foundCount > 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Too many positional (i.e. '$') elements found in path '"
+ << _fieldRef.dottedField() << "'");
+ }
- //
- // value analysis
- //
+ //
+ // value analysis
+ //
- // TODO: tighten validation to numbers and just 1/-1 explicitly
- //if (!modExpr.isNumber()) {
- // return Status(ErrorCodes::BadValue, "Must be a number");
- //}
+ // TODO: tighten validation to numbers and just 1/-1 explicitly
+ // if (!modExpr.isNumber()) {
+ // return Status(ErrorCodes::BadValue, "Must be a number");
+ //}
- _fromTop = (modExpr.isNumber() && modExpr.number() < 0) ? true : false;
+ _fromTop = (modExpr.isNumber() && modExpr.number() < 0) ? true : false;
- return Status::OK();
- }
+ return Status::OK();
+}
- Status ModifierPop::prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo) {
+Status ModifierPop::prepare(mutablebson::Element root,
+ StringData matchedField,
+ ExecInfo* execInfo) {
+ _preparedState.reset(new PreparedState(&root.getDocument()));
- _preparedState.reset(new PreparedState(&root.getDocument()));
+ // If we have a $-positional field, it is time to bind it to an actual field part.
+ if (_positionalPathIndex) {
+ if (matchedField.empty()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The positional operator did not find the match "
+ "needed from the query. Unexpanded update: "
+ << _fieldRef.dottedField());
+ }
+ _fieldRef.setPart(_positionalPathIndex, matchedField);
+ }
- // If we have a $-positional field, it is time to bind it to an actual field part.
- if (_positionalPathIndex) {
- if (matchedField.empty()) {
+ // Locate the field name in 'root'. Note that if we don't have the full path in the
+ // doc, there isn't anything to unset, really.
+ Status status = pathsupport::findLongestPrefix(
+ _fieldRef, root, &_preparedState->pathFoundIndex, &_preparedState->pathFoundElement);
+ // Check if we didn't find the full path
+ if (status.isOK()) {
+ const bool destExists = (_preparedState->pathFoundIndex == (_fieldRef.numParts() - 1));
+ if (!destExists) {
+ execInfo->noOp = true;
+ } else {
+ // If the path exists, we require the target field to be already an
+ // array.
+ if (_preparedState->pathFoundElement.getType() != Array) {
+ mb::Element idElem = mb::findFirstChildNamed(root, "_id");
return Status(ErrorCodes::BadValue,
- str::stream() << "The positional operator did not find the match "
- "needed from the query. Unexpanded update: "
- << _fieldRef.dottedField());
+ str::stream()
+ << "Can only $pop from arrays. {" << idElem.toString()
+ << "} has the field '"
+ << _preparedState->pathFoundElement.getFieldName()
+ << "' of non-array type "
+ << typeName(_preparedState->pathFoundElement.getType()));
}
- _fieldRef.setPart(_positionalPathIndex, matchedField);
- }
- // Locate the field name in 'root'. Note that if we don't have the full path in the
- // doc, there isn't anything to unset, really.
- Status status = pathsupport::findLongestPrefix(_fieldRef,
- root,
- &_preparedState->pathFoundIndex,
- &_preparedState->pathFoundElement);
- // Check if we didn't find the full path
- if (status.isOK()) {
- const bool destExists = (_preparedState->pathFoundIndex == (_fieldRef.numParts()-1));
- if (!destExists) {
+ // No children, nothing to do -- not an error state
+ if (!_preparedState->pathFoundElement.hasChildren()) {
execInfo->noOp = true;
} else {
- // If the path exists, we require the target field to be already an
- // array.
- if (_preparedState->pathFoundElement.getType() != Array) {
- mb::Element idElem = mb::findFirstChildNamed(root, "_id");
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Can only $pop from arrays. {"
- << idElem.toString()
- << "} has the field '"
- << _preparedState->pathFoundElement.getFieldName()
- << "' of non-array type "
- << typeName(_preparedState->pathFoundElement.getType()));
- }
-
- // No children, nothing to do -- not an error state
- if (!_preparedState->pathFoundElement.hasChildren()) {
- execInfo->noOp = true;
- } else {
- _preparedState->elementToRemove = _fromTop ?
- _preparedState->pathFoundElement.leftChild() :
- _preparedState->pathFoundElement.rightChild();
- }
+ _preparedState->elementToRemove = _fromTop
+ ? _preparedState->pathFoundElement.leftChild()
+ : _preparedState->pathFoundElement.rightChild();
}
- } else {
- // Let the caller know we can't do anything given the mod, _fieldRef, and doc.
- execInfo->noOp = true;
- _preparedState->pathFoundElement = root.getDocument().end();
-
- //okay if path not found
- if (status.code() == ErrorCodes::NonExistentPath)
- status = Status::OK();
}
+ } else {
+ // Let the caller know we can't do anything given the mod, _fieldRef, and doc.
+ execInfo->noOp = true;
+ _preparedState->pathFoundElement = root.getDocument().end();
+
+ // okay if path not found
+ if (status.code() == ErrorCodes::NonExistentPath)
+ status = Status::OK();
+ }
- // Let the caller know what field we care about
- execInfo->fieldRef[0] = &_fieldRef;
+ // Let the caller know what field we care about
+ execInfo->fieldRef[0] = &_fieldRef;
- return status;
- }
+ return status;
+}
- Status ModifierPop::apply() const {
- return _preparedState->elementToRemove.remove();
- }
+Status ModifierPop::apply() const {
+ return _preparedState->elementToRemove.remove();
+}
- Status ModifierPop::log(LogBuilder* logBuilder) const {
- // log document
- mutablebson::Document& doc = logBuilder->getDocument();
- const bool pathExists = _preparedState->pathFoundElement.ok() &&
- (_preparedState->pathFoundIndex == (_fieldRef.numParts() - 1));
-
- if (!pathExists)
- return logBuilder->addToUnsets(_fieldRef.dottedField());
-
- // value for the logElement ("field.path.name": <value>)
- mutablebson::Element logElement = doc.makeElementWithNewFieldName(
- _fieldRef.dottedField(),
- _preparedState->pathFoundElement);
-
- if (!logElement.ok()) {
- return Status(ErrorCodes::InternalError,
- str::stream() << "Could not append entry to $pop oplog entry: "
- << "set '" << _fieldRef.dottedField() << "' -> "
- << _preparedState->pathFoundElement.toString() );
- }
- return logBuilder->addToSets(logElement);
+Status ModifierPop::log(LogBuilder* logBuilder) const {
+ // log document
+ mutablebson::Document& doc = logBuilder->getDocument();
+ const bool pathExists = _preparedState->pathFoundElement.ok() &&
+ (_preparedState->pathFoundIndex == (_fieldRef.numParts() - 1));
+
+ if (!pathExists)
+ return logBuilder->addToUnsets(_fieldRef.dottedField());
+
+ // value for the logElement ("field.path.name": <value>)
+ mutablebson::Element logElement =
+ doc.makeElementWithNewFieldName(_fieldRef.dottedField(), _preparedState->pathFoundElement);
+
+ if (!logElement.ok()) {
+ return Status(ErrorCodes::InternalError,
+ str::stream() << "Could not append entry to $pop oplog entry: "
+ << "set '" << _fieldRef.dottedField() << "' -> "
+ << _preparedState->pathFoundElement.toString());
}
-} // namespace mongo
+ return logBuilder->addToSets(logElement);
+}
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_pop.h b/src/mongo/db/ops/modifier_pop.h
index 41501275619..b0de117a946 100644
--- a/src/mongo/db/ops/modifier_pop.h
+++ b/src/mongo/db/ops/modifier_pop.h
@@ -38,51 +38,46 @@
namespace mongo {
- class LogBuilder;
+class LogBuilder;
- class ModifierPop : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierPop);
+class ModifierPop : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierPop);
- public:
+public:
+ ModifierPop();
+ virtual ~ModifierPop();
- ModifierPop();
- virtual ~ModifierPop();
+ /**
+ * The format of this modifier ($pop) is {<fieldname>: <value>}.
+ * If the value is number and greater than -1 then an element is removed from the bottom,
+ * otherwise the top. Currently the value can be any anything but we document
+ * the use of the numbers "1, -1" only.
+ *
+ * Ex. $pop: {'a':1} will remove the last item from this array: [1,2,3] -> [1,2]
+ */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
- /**
- * The format of this modifier ($pop) is {<fieldname>: <value>}.
- * If the value is number and greater than -1 then an element is removed from the bottom,
- * otherwise the top. Currently the value can be any anything but we document
- * the use of the numbers "1, -1" only.
- *
- * Ex. $pop: {'a':1} will remove the last item from this array: [1,2,3] -> [1,2]
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
+ virtual Status apply() const;
- virtual Status apply() const;
+ virtual Status log(LogBuilder* logBuilder) const;
- virtual Status log(LogBuilder* logBuilder) const;
+private:
+ // Access to each component of fieldName that's the target of this mod.
+ FieldRef _fieldRef;
- private:
+ // 0 or index for $-positional in _fieldRef.
+ size_t _positionalPathIndex;
- // Access to each component of fieldName that's the target of this mod.
- FieldRef _fieldRef;
+ // element position to remove from
+ bool _fromTop;
- // 0 or index for $-positional in _fieldRef.
- size_t _positionalPathIndex;
+ // The instance of the field in the provided doc.
+ // This data is valid after prepare, for use by log and apply
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
+};
- // element position to remove from
- bool _fromTop;
-
- // The instance of the field in the provided doc.
- // This data is valid after prepare, for use by log and apply
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
- };
-
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_pop_test.cpp b/src/mongo/db/ops/modifier_pop_test.cpp
index 283b9bc5a81..373c61aeca6 100644
--- a/src/mongo/db/ops/modifier_pop_test.cpp
+++ b/src/mongo/db/ops/modifier_pop_test.cpp
@@ -42,275 +42,277 @@
namespace {
- using mongo::Array;
- using mongo::BSONObj;
- using mongo::LogBuilder;
- using mongo::fromjson;
- using mongo::ModifierInterface;
- using mongo::ModifierPop;
- using mongo::Status;
- using mongo::StringData;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
-
- /** Helper to build and manipulate a $pop mod. */
- class Mod {
- public:
- Mod() : _mod() {}
-
- explicit Mod(BSONObj modObj) {
- _modObj = modObj;
- ASSERT_OK(_mod.init(_modObj["$pop"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
-
- Status apply() const {
- return _mod.apply();
- }
-
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
-
- ModifierPop& mod() { return _mod; }
-
- BSONObj modObj() { return _modObj; }
-
- private:
- ModifierPop _mod;
- BSONObj _modObj;
- };
-
- //
- // Test init values which aren't numbers.
- // These are going to cause a pop from the bottom.
- //
- TEST(Init, StringArg) {
- BSONObj modObj = fromjson("{$pop: {a: 'hi'}}");
- ModifierPop mod;
- ASSERT_OK(mod.init(modObj["$pop"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+using mongo::Array;
+using mongo::BSONObj;
+using mongo::LogBuilder;
+using mongo::fromjson;
+using mongo::ModifierInterface;
+using mongo::ModifierPop;
+using mongo::Status;
+using mongo::StringData;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
+
+/** Helper to build and manipulate a $pop mod. */
+class Mod {
+public:
+ Mod() : _mod() {}
+
+ explicit Mod(BSONObj modObj) {
+ _modObj = modObj;
+ ASSERT_OK(_mod.init(_modObj["$pop"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- TEST(Init, BoolTrueArg) {
- BSONObj modObj = fromjson("{$pop: {a: true}}");
- ModifierPop mod;
- ASSERT_OK(mod.init(modObj["$pop"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
}
- TEST(Init, BoolFalseArg) {
- BSONObj modObj = fromjson("{$pop: {a: false}}");
- ModifierPop mod;
- ASSERT_OK(mod.init(modObj["$pop"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+ Status apply() const {
+ return _mod.apply();
}
- TEST(MissingField, AllButApply) {
- Document doc(fromjson("{a: [1,2]}"));
- Mod mod(fromjson("{$pop: {s: 1}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "s");
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{$unset: {'s': true}}"), logDoc);
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
}
- TEST(SimpleMod, PrepareBottom) {
- Document doc(fromjson("{a: [1,2]}"));
- Mod mod(fromjson("{$pop: {a: 1}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierPop& mod() {
+ return _mod;
}
- TEST(SimpleMod, PrepareApplyBottomO) {
- Document doc(fromjson("{a: [1,2]}"));
- Mod mod(fromjson("{$pop: {a: 0}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(("{a: [1]}")), doc);
+ BSONObj modObj() {
+ return _modObj;
}
- TEST(SimpleMod, PrepareTop) {
- Document doc(fromjson("{a: [1,2]}"));
- Mod mod(fromjson("{$pop: {a: -1}}"));
+private:
+ ModifierPop _mod;
+ BSONObj _modObj;
+};
+
+//
+// Test init values which aren't numbers.
+// These are going to cause a pop from the bottom.
+//
+TEST(Init, StringArg) {
+ BSONObj modObj = fromjson("{$pop: {a: 'hi'}}");
+ ModifierPop mod;
+ ASSERT_OK(mod.init(modObj["$pop"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, BoolTrueArg) {
+ BSONObj modObj = fromjson("{$pop: {a: true}}");
+ ModifierPop mod;
+ ASSERT_OK(mod.init(modObj["$pop"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, BoolFalseArg) {
+ BSONObj modObj = fromjson("{$pop: {a: false}}");
+ ModifierPop mod;
+ ASSERT_OK(mod.init(modObj["$pop"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(MissingField, AllButApply) {
+ Document doc(fromjson("{a: [1,2]}"));
+ Mod mod(fromjson("{$pop: {s: 1}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "s");
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{$unset: {'s': true}}"), logDoc);
+}
+
+TEST(SimpleMod, PrepareBottom) {
+ Document doc(fromjson("{a: [1,2]}"));
+ Mod mod(fromjson("{$pop: {a: 1}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+}
+
+TEST(SimpleMod, PrepareApplyBottomO) {
+ Document doc(fromjson("{a: [1,2]}"));
+ Mod mod(fromjson("{$pop: {a: 0}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(("{a: [1]}")), doc);
+}
+
+TEST(SimpleMod, PrepareTop) {
+ Document doc(fromjson("{a: [1,2]}"));
+ Mod mod(fromjson("{$pop: {a: -1}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+}
+
+TEST(SimpleMod, ApplyTopPop) {
+ Document doc(fromjson("{a: [1,2]}"));
+ Mod mod(fromjson("{$pop: {a: -1}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(("{a: [2]}")), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+TEST(SimpleMod, ApplyBottomPop) {
+ Document doc(fromjson("{a: [1,2]}"));
+ Mod mod(fromjson("{$pop: {a: 1}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(("{a: [1]}")), doc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
- }
+TEST(SimpleMod, ApplyLogBottomPop) {
+ Document doc(fromjson("{a: [1,2]}"));
+ Mod mod(fromjson("{$pop: {a: 1}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- TEST(SimpleMod, ApplyTopPop) {
- Document doc(fromjson("{a: [1,2]}"));
- Mod mod(fromjson("{$pop: {a: -1}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(("{a:[1]}")), doc);
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{$set: {a: [1]}}"), logDoc);
+}
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(("{a: [2]}")), doc);
- }
+TEST(EmptyArray, PrepareNoOp) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{$pop: {a: 1}}"));
- TEST(SimpleMod, ApplyBottomPop) {
- Document doc(fromjson("{a: [1,2]}"));
- Mod mod(fromjson("{$pop: {a: 1}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_TRUE(execInfo.noOp);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(SingleElemArray, ApplyLog) {
+ Document doc(fromjson("{a: [1]}"));
+ Mod mod(fromjson("{$pop: {a: 1}}"));
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(("{a: [1]}")), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- TEST(SimpleMod, ApplyLogBottomPop) {
- Document doc(fromjson("{a: [1,2]}"));
- Mod mod(fromjson("{$pop: {a: 1}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(("{a:[]}")), doc);
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{$set: {a: []}}"), logDoc);
+}
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(("{a:[1]}")), doc);
+TEST(ArrayOfArray, ApplyLogPop) {
+ Document doc(fromjson("{a: [[1,2], 1]}"));
+ Mod mod(fromjson("{$pop: {'a.0': 1}}"));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{$set: {a: [1]}}"), logDoc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- TEST(EmptyArray, PrepareNoOp) {
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{$pop: {a: 1}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(("{a:[[1], 1]}")), doc);
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_TRUE(execInfo.noOp);
- }
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{$set: { 'a.0': [1]}}"), logDoc);
+}
- TEST(SingleElemArray, ApplyLog) {
- Document doc(fromjson("{a: [1]}"));
- Mod mod(fromjson("{$pop: {a: 1}}"));
+TEST(ArrayOfArray, ApplyLogPopOnlyElement) {
+ Document doc(fromjson("{a: [[1], 1]}"));
+ Mod mod(fromjson("{$pop: {'a.0': 1}}"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
+ ASSERT_FALSE(execInfo.noOp);
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(("{a:[]}")), doc);
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(("{a:[[], 1]}")), doc);
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{$set: {a: []}}"), logDoc);
- }
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{$set: { 'a.0': []}}"), logDoc);
+}
- TEST(ArrayOfArray, ApplyLogPop) {
- Document doc(fromjson("{a: [[1,2], 1]}"));
- Mod mod(fromjson("{$pop: {'a.0': 1}}"));
+TEST(Prepare, MissingPath) {
+ Document doc(fromjson("{ a : [1, 2] }"));
+ Mod mod(fromjson("{ $pop : { b : 1 } }"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
- ASSERT_FALSE(execInfo.noOp);
+// from SERVER-12846
+TEST(Prepare, MissingArrayElementPath) {
+ Document doc(fromjson("{_id : 1, a : [1, 2]}"));
+ Mod mod(fromjson("{ $pop : { 'a.3' : 1 } }"));
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(("{a:[[1], 1]}")), doc);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{$set: { 'a.0': [1]}}"), logDoc);
- }
+TEST(Prepare, FromArrayElementPath) {
+ Document doc(fromjson("{ a : [1, 2] }"));
+ Mod mod(fromjson("{ $pop : { 'a.0' : 1 } }"));
- TEST(ArrayOfArray, ApplyLogPopOnlyElement) {
- Document doc(fromjson("{a: [[1], 1]}"));
- Mod mod(fromjson("{$pop: {'a.0': 1}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(("{a:[[], 1]}")), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{$set: { 'a.0': []}}"), logDoc);
- }
-
- TEST(Prepare, MissingPath) {
- Document doc(fromjson("{ a : [1, 2] }"));
- Mod mod(fromjson("{ $pop : { b : 1 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- // from SERVER-12846
- TEST(Prepare, MissingArrayElementPath) {
- Document doc(fromjson("{_id : 1, a : [1, 2]}"));
- Mod mod(fromjson("{ $pop : { 'a.3' : 1 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(Prepare, FromArrayElementPath) {
- Document doc(fromjson("{ a : [1, 2] }"));
- Mod mod(fromjson("{ $pop : { 'a.0' : 1 } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
+}
-} // unnamed namespace
+} // unnamed namespace
diff --git a/src/mongo/db/ops/modifier_pull.cpp b/src/mongo/db/ops/modifier_pull.cpp
index 5fe94e878ea..3b45064afc7 100644
--- a/src/mongo/db/ops/modifier_pull.cpp
+++ b/src/mongo/db/ops/modifier_pull.cpp
@@ -38,265 +38,238 @@
namespace mongo {
- namespace mb = mutablebson;
- namespace str = mongoutils::str;
+namespace mb = mutablebson;
+namespace str = mongoutils::str;
+
+struct ModifierPull::PreparedState {
+ PreparedState(mb::Document& doc)
+ : doc(doc), idxFound(0), elemFound(doc.end()), elementsToRemove(), noOp(false) {}
+
+ // Document that is going to be changed.
+ mb::Document& doc;
+
+ // Index in _fieldRef for which an Element exist in the document.
+ size_t idxFound;
+
+ // Element corresponding to _fieldRef[0.._idxFound].
+ mb::Element elemFound;
+
+ // Values to be removed.
+ std::vector<mb::Element> elementsToRemove;
+
+ // True if this update is a no-op
+ bool noOp;
+};
+
+ModifierPull::ModifierPull()
+ : ModifierInterface(),
+ _fieldRef(),
+ _posDollar(0),
+ _exprElt(),
+ _exprObj(),
+ _matchExpr(),
+ _matcherOnPrimitive(false),
+ _preparedState() {}
+
+ModifierPull::~ModifierPull() {}
+
+Status ModifierPull::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
+ // Perform standard field name and updateable checks.
+ _fieldRef.parse(modExpr.fieldName());
+ Status status = fieldchecker::isUpdatable(_fieldRef);
+ if (!status.isOK()) {
+ return status;
+ }
- struct ModifierPull::PreparedState {
+ // If a $-positional operator was used, get the index in which it occurred
+ // and ensure only one occurrence.
+ size_t foundCount;
+ bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
- PreparedState(mb::Document& doc)
- : doc(doc)
- , idxFound(0)
- , elemFound(doc.end())
- , elementsToRemove()
- , noOp(false) {
- }
+ if (positional)
+ *positional = foundDollar;
- // Document that is going to be changed.
- mb::Document& doc;
+ if (foundDollar && foundCount > 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Too many positional (i.e. '$') elements found in path '"
+ << _fieldRef.dottedField() << "'");
+ }
- // Index in _fieldRef for which an Element exist in the document.
- size_t idxFound;
+ _exprElt = modExpr;
- // Element corresponding to _fieldRef[0.._idxFound].
- mb::Element elemFound;
+ // If the element in the mod is actually an object or a regular expression, we need to
+ // build a matcher, instead of just doing an equality comparision.
+ if ((_exprElt.type() == mongo::Object) || (_exprElt.type() == mongo::RegEx)) {
+ if (_exprElt.type() == Object) {
+ _exprObj = _exprElt.embeddedObject();
- // Values to be removed.
- std::vector<mb::Element> elementsToRemove;
+ // If not is not a query operator, then it is a primitive.
+ _matcherOnPrimitive = (_exprObj.firstElement().getGtLtOp() != 0);
- // True if this update is a no-op
- bool noOp;
- };
+ // If the object is primitive then wrap it up into an object.
+ if (_matcherOnPrimitive)
+ _exprObj = BSON("" << _exprObj);
+ } else {
+ // For a regex, we also need to wrap and treat like a primitive.
+ _matcherOnPrimitive = true;
+ _exprObj = _exprElt.wrap("");
+ }
- ModifierPull::ModifierPull()
- : ModifierInterface()
- , _fieldRef()
- , _posDollar(0)
- , _exprElt()
- , _exprObj()
- , _matchExpr()
- , _matcherOnPrimitive(false)
- , _preparedState() {
- }
+ // Build the matcher around the object we built above. Currently, we do not allow
+ // $pull operations to contain $where clauses, so preserving this behaviour.
+ StatusWithMatchExpression parseResult =
+ MatchExpressionParser::parse(_exprObj, MatchExpressionParser::WhereCallback());
+ if (!parseResult.isOK())
+ return parseResult.getStatus();
- ModifierPull::~ModifierPull() {
+ _matchExpr.reset(parseResult.getValue());
}
- Status ModifierPull::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
- // Perform standard field name and updateable checks.
- _fieldRef.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatable(_fieldRef);
- if (! status.isOK()) {
- return status;
- }
-
- // If a $-positional operator was used, get the index in which it occurred
- // and ensure only one occurrence.
- size_t foundCount;
- bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
+ return Status::OK();
+}
- if (positional)
- *positional = foundDollar;
+Status ModifierPull::prepare(mb::Element root, StringData matchedField, ExecInfo* execInfo) {
+ _preparedState.reset(new PreparedState(root.getDocument()));
- if (foundDollar && foundCount > 1) {
+ // If we have a $-positional field, it is time to bind it to an actual field part.
+ if (_posDollar) {
+ if (matchedField.empty()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ str::stream() << "The positional operator did not find the match "
+ "needed from the query. Unexpanded update: "
+ << _fieldRef.dottedField());
}
+ _fieldRef.setPart(_posDollar, matchedField);
+ }
- _exprElt = modExpr;
-
- // If the element in the mod is actually an object or a regular expression, we need to
- // build a matcher, instead of just doing an equality comparision.
- if ((_exprElt.type() == mongo::Object) || (_exprElt.type() == mongo::RegEx)) {
- if (_exprElt.type() == Object) {
- _exprObj = _exprElt.embeddedObject();
-
- // If not is not a query operator, then it is a primitive.
- _matcherOnPrimitive = (_exprObj.firstElement().getGtLtOp() != 0);
-
- // If the object is primitive then wrap it up into an object.
- if (_matcherOnPrimitive)
- _exprObj = BSON( "" << _exprObj );
- }
- else {
- // For a regex, we also need to wrap and treat like a primitive.
- _matcherOnPrimitive = true;
- _exprObj = _exprElt.wrap("");
- }
-
- // Build the matcher around the object we built above. Currently, we do not allow
- // $pull operations to contain $where clauses, so preserving this behaviour.
- StatusWithMatchExpression parseResult =
- MatchExpressionParser::parse(_exprObj,
- MatchExpressionParser::WhereCallback());
- if (!parseResult.isOK())
- return parseResult.getStatus();
+ // Locate the field name in 'root'.
+ Status status = pathsupport::findLongestPrefix(
+ _fieldRef, root, &_preparedState->idxFound, &_preparedState->elemFound);
+
+ // FindLongestPrefix may say the path does not exist at all, which is fine here, or
+ // that the path was not viable or otherwise wrong, in which case, the mod cannot
+ // proceed.
+ if (status.code() == ErrorCodes::NonExistentPath) {
+ _preparedState->elemFound = root.getDocument().end();
+ } else if (!status.isOK()) {
+ return status;
+ }
- _matchExpr.reset(parseResult.getValue());
- }
+ // We register interest in the field name. The driver needs this info to sort out if
+ // there is any conflict among mods.
+ execInfo->fieldRef[0] = &_fieldRef;
+ if (!_preparedState->elemFound.ok() || _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
+ // If no target element exists, then there is nothing to do here.
+ _preparedState->noOp = execInfo->noOp = true;
return Status::OK();
}
- Status ModifierPull::prepare(mb::Element root,
- StringData matchedField,
- ExecInfo* execInfo) {
-
- _preparedState.reset(new PreparedState(root.getDocument()));
-
- // If we have a $-positional field, it is time to bind it to an actual field part.
- if (_posDollar) {
- if (matchedField.empty()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The positional operator did not find the match "
- "needed from the query. Unexpanded update: "
- << _fieldRef.dottedField());
- }
- _fieldRef.setPart(_posDollar, matchedField);
- }
-
- // Locate the field name in 'root'.
- Status status = pathsupport::findLongestPrefix(_fieldRef,
- root,
- &_preparedState->idxFound,
- &_preparedState->elemFound);
-
- // FindLongestPrefix may say the path does not exist at all, which is fine here, or
- // that the path was not viable or otherwise wrong, in which case, the mod cannot
- // proceed.
- if (status.code() == ErrorCodes::NonExistentPath) {
- _preparedState->elemFound = root.getDocument().end();
- } else if (!status.isOK()) {
- return status;
- }
-
- // We register interest in the field name. The driver needs this info to sort out if
- // there is any conflict among mods.
- execInfo->fieldRef[0] = &_fieldRef;
-
- if (!_preparedState->elemFound.ok() ||
- _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
- // If no target element exists, then there is nothing to do here.
- _preparedState->noOp = execInfo->noOp = true;
- return Status::OK();
- }
-
- // This operation only applies to arrays
- if (_preparedState->elemFound.getType() != mongo::Array)
- return Status(
- ErrorCodes::BadValue,
- "Cannot apply $pull to a non-array value");
-
- // If the array is empty, there is nothing to pull, so this is a noop.
- if (!_preparedState->elemFound.hasChildren()) {
- _preparedState->noOp = execInfo->noOp = true;
- return Status::OK();
- }
+ // This operation only applies to arrays
+ if (_preparedState->elemFound.getType() != mongo::Array)
+ return Status(ErrorCodes::BadValue, "Cannot apply $pull to a non-array value");
- // Walk the values in the array
- mb::Element cursor = _preparedState->elemFound.leftChild();
- while (cursor.ok()) {
- if (isMatch(cursor))
- _preparedState->elementsToRemove.push_back(cursor);
- cursor = cursor.rightSibling();
- }
+ // If the array is empty, there is nothing to pull, so this is a noop.
+ if (!_preparedState->elemFound.hasChildren()) {
+ _preparedState->noOp = execInfo->noOp = true;
+ return Status::OK();
+ }
- // If we didn't find any elements to add, then this is a no-op, and therefore in place.
- if (_preparedState->elementsToRemove.empty()) {
- _preparedState->noOp = execInfo->noOp = true;
- }
+ // Walk the values in the array
+ mb::Element cursor = _preparedState->elemFound.leftChild();
+ while (cursor.ok()) {
+ if (isMatch(cursor))
+ _preparedState->elementsToRemove.push_back(cursor);
+ cursor = cursor.rightSibling();
+ }
- return Status::OK();
+ // If we didn't find any elements to add, then this is a no-op, and therefore in place.
+ if (_preparedState->elementsToRemove.empty()) {
+ _preparedState->noOp = execInfo->noOp = true;
}
- Status ModifierPull::apply() const {
- dassert(_preparedState->noOp == false);
+ return Status::OK();
+}
- dassert(_preparedState->elemFound.ok() &&
- _preparedState->idxFound == (_fieldRef.numParts() - 1));
+Status ModifierPull::apply() const {
+ dassert(_preparedState->noOp == false);
- std::vector<mb::Element>::const_iterator where = _preparedState->elementsToRemove.begin();
- const std::vector<mb::Element>::const_iterator end = _preparedState->elementsToRemove.end();
- for ( ; where != end; ++where)
- const_cast<mb::Element&>(*where).remove();
+ dassert(_preparedState->elemFound.ok() &&
+ _preparedState->idxFound == (_fieldRef.numParts() - 1));
- return Status::OK();
- }
+ std::vector<mb::Element>::const_iterator where = _preparedState->elementsToRemove.begin();
+ const std::vector<mb::Element>::const_iterator end = _preparedState->elementsToRemove.end();
+ for (; where != end; ++where)
+ const_cast<mb::Element&>(*where).remove();
- Status ModifierPull::log(LogBuilder* logBuilder) const {
+ return Status::OK();
+}
- mb::Document& doc = logBuilder->getDocument();
+Status ModifierPull::log(LogBuilder* logBuilder) const {
+ mb::Document& doc = logBuilder->getDocument();
- if (!_preparedState->elemFound.ok() ||
- _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
+ if (!_preparedState->elemFound.ok() || _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
+ // If we didn't find the element that we wanted to pull from, we log an unset for
+ // that element.
+ return logBuilder->addToUnsets(_fieldRef.dottedField());
- // If we didn't find the element that we wanted to pull from, we log an unset for
- // that element.
- return logBuilder->addToUnsets(_fieldRef.dottedField());
+ } else {
+ // TODO: This is copied more or less identically from $push. As a result, it copies the
+ // behavior in $push that relies on 'apply' having been called unless this is a no-op.
- } else {
+ // TODO We can log just a positional unset in several cases. For now, let's just log
+ // the full resulting array.
- // TODO: This is copied more or less identically from $push. As a result, it copies the
- // behavior in $push that relies on 'apply' having been called unless this is a no-op.
+ // We'd like to create an entry such as {$set: {<fieldname>: [<resulting aray>]}} under
+ // 'logRoot'. We start by creating the {$set: ...} Element.
- // TODO We can log just a positional unset in several cases. For now, let's just log
- // the full resulting array.
+ // Then we create the {<fieldname>:[]} Element, that is, an empty array.
+ mb::Element logElement = doc.makeElementArray(_fieldRef.dottedField());
+ if (!logElement.ok()) {
+ return Status(ErrorCodes::InternalError, "cannot create details for $pull mod");
+ }
- // We'd like to create an entry such as {$set: {<fieldname>: [<resulting aray>]}} under
- // 'logRoot'. We start by creating the {$set: ...} Element.
+ mb::Element curr = _preparedState->elemFound.leftChild();
+ while (curr.ok()) {
+ dassert(curr.hasValue());
- // Then we create the {<fieldname>:[]} Element, that is, an empty array.
- mb::Element logElement = doc.makeElementArray(_fieldRef.dottedField());
- if (!logElement.ok()) {
- return Status(ErrorCodes::InternalError, "cannot create details for $pull mod");
+ // We need to copy each array entry from the resulting document to the log
+ // document.
+ mb::Element currCopy = doc.makeElementWithNewFieldName(StringData(), curr.getValue());
+ if (!currCopy.ok()) {
+ return Status(ErrorCodes::InternalError, "could create copy element");
}
-
- mb::Element curr = _preparedState->elemFound.leftChild();
- while (curr.ok()) {
-
- dassert(curr.hasValue());
-
- // We need to copy each array entry from the resulting document to the log
- // document.
- mb::Element currCopy = doc.makeElementWithNewFieldName(
- StringData(),
- curr.getValue());
- if (!currCopy.ok()) {
- return Status(ErrorCodes::InternalError, "could create copy element");
- }
- Status status = logElement.pushBack(currCopy);
- if (!status.isOK()) {
- return Status(ErrorCodes::BadValue, "could not append entry for $pull log");
- }
- curr = curr.rightSibling();
+ Status status = logElement.pushBack(currCopy);
+ if (!status.isOK()) {
+ return Status(ErrorCodes::BadValue, "could not append entry for $pull log");
}
-
- return logBuilder->addToSets(logElement);
+ curr = curr.rightSibling();
}
- }
- bool ModifierPull::isMatch(mutablebson::ConstElement element) {
+ return logBuilder->addToSets(logElement);
+ }
+}
- // TODO: We are assuming that 'element' hasValue is true. That might be OK if the
- // conflict detection logic will prevent us from ever seeing a deserialized element,
- // but are we sure about that?
+bool ModifierPull::isMatch(mutablebson::ConstElement element) {
+ // TODO: We are assuming that 'element' hasValue is true. That might be OK if the
+ // conflict detection logic will prevent us from ever seeing a deserialized element,
+ // but are we sure about that?
- dassert(element.hasValue());
+ dassert(element.hasValue());
- if (!_matchExpr)
- return (element.compareWithBSONElement(_exprElt, false) == 0);
+ if (!_matchExpr)
+ return (element.compareWithBSONElement(_exprElt, false) == 0);
- if (_matcherOnPrimitive) {
- // TODO: This is kinda slow.
- BSONObj candidate = element.getValue().wrap("");
- return _matchExpr->matchesBSON(candidate);
- }
+ if (_matcherOnPrimitive) {
+ // TODO: This is kinda slow.
+ BSONObj candidate = element.getValue().wrap("");
+ return _matchExpr->matchesBSON(candidate);
+ }
- if (element.getType() != Object)
- return false;
+ if (element.getType() != Object)
+ return false;
- return _matchExpr->matchesBSON(element.getValueObject());
- }
+ return _matchExpr->matchesBSON(element.getValueObject());
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_pull.h b/src/mongo/db/ops/modifier_pull.h
index 4660fd28b62..957750518f0 100644
--- a/src/mongo/db/ops/modifier_pull.h
+++ b/src/mongo/db/ops/modifier_pull.h
@@ -36,52 +36,49 @@
namespace mongo {
- class MatchExpression;
+class MatchExpression;
- class ModifierPull : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierPull);
+class ModifierPull : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierPull);
- public:
- ModifierPull();
- virtual ~ModifierPull();
+public:
+ ModifierPull();
+ virtual ~ModifierPull();
- /** Evaluates the array items to be removed and the match expression. */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
+ /** Evaluates the array items to be removed and the match expression. */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
- /** Decides which portion of the array items will be removed from the provided element */
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
+ /** Decides which portion of the array items will be removed from the provided element */
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
- /** Updates the Element used in prepare with the effects of the $pull operation. */
- virtual Status apply() const;
+ /** Updates the Element used in prepare with the effects of the $pull operation. */
+ virtual Status apply() const;
- /** Converts the effects of this $pull into one or more equivalent $unset operations. */
- virtual Status log(LogBuilder* logBuilder) const;
+ /** Converts the effects of this $pull into one or more equivalent $unset operations. */
+ virtual Status log(LogBuilder* logBuilder) const;
- private:
- bool isMatch(mutablebson::ConstElement element);
+private:
+ bool isMatch(mutablebson::ConstElement element);
- // Access to each component of fieldName that's the target of this mod.
- FieldRef _fieldRef;
+ // Access to each component of fieldName that's the target of this mod.
+ FieldRef _fieldRef;
- // 0 or index for $-positional in _fieldRef.
- size_t _posDollar;
+ // 0 or index for $-positional in _fieldRef.
+ size_t _posDollar;
- // If we aren't using a matcher, we just keep modExpr as _exprElt and use that to match
- // with woCompare.
- BSONElement _exprElt;
+ // If we aren't using a matcher, we just keep modExpr as _exprElt and use that to match
+ // with woCompare.
+ BSONElement _exprElt;
- // If we are using a matcher, we need to keep around a BSONObj for it.
- BSONObj _exprObj;
+ // If we are using a matcher, we need to keep around a BSONObj for it.
+ BSONObj _exprObj;
- // If we are using the matcher, this is the match expression we built around _exprObj.
- std::unique_ptr<MatchExpression> _matchExpr;
- bool _matcherOnPrimitive;
+ // If we are using the matcher, this is the match expression we built around _exprObj.
+ std::unique_ptr<MatchExpression> _matchExpr;
+ bool _matcherOnPrimitive;
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
- };
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_pull_all.cpp b/src/mongo/db/ops/modifier_pull_all.cpp
index c8a73abe1e0..287dc4828b4 100644
--- a/src/mongo/db/ops/modifier_pull_all.cpp
+++ b/src/mongo/db/ops/modifier_pull_all.cpp
@@ -38,217 +38,198 @@
namespace mongo {
- using std::vector;
+using std::vector;
- namespace mb = mutablebson;
- namespace str = mongoutils::str;
+namespace mb = mutablebson;
+namespace str = mongoutils::str;
- struct ModifierPullAll::PreparedState {
+struct ModifierPullAll::PreparedState {
+ PreparedState(mutablebson::Document* targetDoc)
+ : doc(*targetDoc),
+ pathFoundIndex(0),
+ pathFoundElement(doc.end()),
+ applyCalled(false),
+ elementsToRemove() {}
- PreparedState(mutablebson::Document* targetDoc)
- : doc(*targetDoc)
- , pathFoundIndex(0)
- , pathFoundElement(doc.end())
- , applyCalled(false)
- , elementsToRemove() {
- }
-
- // Document that is going to be changed.
- mutablebson::Document& doc;
+ // Document that is going to be changed.
+ mutablebson::Document& doc;
- // Index in _fieldRef for which an Element exist in the document.
- size_t pathFoundIndex;
+ // Index in _fieldRef for which an Element exist in the document.
+ size_t pathFoundIndex;
- // Element corresponding to _fieldRef[0.._idxFound].
- mutablebson::Element pathFoundElement;
+ // Element corresponding to _fieldRef[0.._idxFound].
+ mutablebson::Element pathFoundElement;
- bool applyCalled;
+ bool applyCalled;
- // Elements to be removed
- vector<mutablebson::Element> elementsToRemove;
- };
+ // Elements to be removed
+ vector<mutablebson::Element> elementsToRemove;
+};
- namespace {
+namespace {
- struct mutableElementEqualsBSONElement : std::unary_function<BSONElement, bool>
- {
- mutableElementEqualsBSONElement(const mutablebson::Element& elem) : _what(elem) {}
- bool operator()(const BSONElement& elem) const {
- return _what.compareWithBSONElement(elem, false) == 0;
- }
- const mutablebson::Element& _what;
- };
- } // namespace
-
- ModifierPullAll::ModifierPullAll()
- : _fieldRef()
- , _positionalPathIndex(0)
- , _elementsToFind() {
+struct mutableElementEqualsBSONElement : std::unary_function<BSONElement, bool> {
+ mutableElementEqualsBSONElement(const mutablebson::Element& elem) : _what(elem) {}
+ bool operator()(const BSONElement& elem) const {
+ return _what.compareWithBSONElement(elem, false) == 0;
}
+ const mutablebson::Element& _what;
+};
+} // namespace
+
+ModifierPullAll::ModifierPullAll() : _fieldRef(), _positionalPathIndex(0), _elementsToFind() {}
+
+ModifierPullAll::~ModifierPullAll() {}
- ModifierPullAll::~ModifierPullAll() {
+Status ModifierPullAll::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
+ //
+ // field name analysis
+ //
+
+ // Break down the field name into its 'dotted' components (aka parts) and check that
+ // there are no empty parts.
+ _fieldRef.parse(modExpr.fieldName());
+ Status status = fieldchecker::isUpdatable(_fieldRef);
+ if (!status.isOK()) {
+ return status;
}
- Status ModifierPullAll::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
+ // If a $-positional operator was used, get the index in which it occurred
+ // and ensure only one occurrence.
+ size_t foundCount;
+ bool foundDollar = fieldchecker::isPositional(_fieldRef, &_positionalPathIndex, &foundCount);
- //
- // field name analysis
- //
+ if (positional)
+ *positional = foundDollar;
- // Break down the field name into its 'dotted' components (aka parts) and check that
- // there are no empty parts.
- _fieldRef.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatable(_fieldRef);
- if (! status.isOK()) {
- return status;
- }
+ if (foundDollar && foundCount > 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Too many positional (i.e. '$') elements found in path '"
+ << _fieldRef.dottedField() << "'");
+ }
- // If a $-positional operator was used, get the index in which it occurred
- // and ensure only one occurrence.
- size_t foundCount;
- bool foundDollar = fieldchecker::isPositional(_fieldRef,
- &_positionalPathIndex,
- &foundCount);
+ //
+ // value analysis
+ //
- if (positional)
- *positional = foundDollar;
+ if (modExpr.type() != Array) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "$pullAll requires an array argument but was given a "
+ << typeName(modExpr.type()));
+ }
- if (foundDollar && foundCount > 1) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
- }
+ // store the stuff to remove later
+ _elementsToFind = modExpr.Array();
+
+ return Status::OK();
+}
- //
- // value analysis
- //
+Status ModifierPullAll::prepare(mutablebson::Element root,
+ StringData matchedField,
+ ExecInfo* execInfo) {
+ _preparedState.reset(new PreparedState(&root.getDocument()));
- if (modExpr.type() != Array) {
+ // If we have a $-positional field, it is time to bind it to an actual field part.
+ if (_positionalPathIndex) {
+ if (matchedField.empty()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "$pullAll requires an array argument but was given a "
- << typeName(modExpr.type()));
+ str::stream() << "The positional operator did not find the match "
+ "needed from the query. Unexpanded update: "
+ << _fieldRef.dottedField());
}
-
- // store the stuff to remove later
- _elementsToFind = modExpr.Array();
-
- return Status::OK();
+ _fieldRef.setPart(_positionalPathIndex, matchedField);
}
- Status ModifierPullAll::prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo) {
+ // Locate the field name in 'root'. Note that if we don't have the full path in the
+ // doc, there isn't anything to unset, really.
+ Status status = pathsupport::findLongestPrefix(
+ _fieldRef, root, &_preparedState->pathFoundIndex, &_preparedState->pathFoundElement);
+ // Check if we didn't find the full path
+ if (status.isOK()) {
+ const bool destExists = (_preparedState->pathFoundIndex == (_fieldRef.numParts() - 1));
- _preparedState.reset(new PreparedState(&root.getDocument()));
-
- // If we have a $-positional field, it is time to bind it to an actual field part.
- if (_positionalPathIndex) {
- if (matchedField.empty()) {
+ if (!destExists) {
+ execInfo->noOp = true;
+ } else {
+ // If the path exists, we require the target field to be already an
+ // array.
+ if (_preparedState->pathFoundElement.getType() != Array) {
+ mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
return Status(ErrorCodes::BadValue,
- str::stream() << "The positional operator did not find the match "
- "needed from the query. Unexpanded update: "
- << _fieldRef.dottedField());
+ str::stream()
+ << "Can only apply $pullAll to an array. " << idElem.toString()
+ << " has the field "
+ << _preparedState->pathFoundElement.getFieldName()
+ << " of non-array type "
+ << typeName(_preparedState->pathFoundElement.getType()));
}
- _fieldRef.setPart(_positionalPathIndex, matchedField);
- }
- // Locate the field name in 'root'. Note that if we don't have the full path in the
- // doc, there isn't anything to unset, really.
- Status status = pathsupport::findLongestPrefix(_fieldRef,
- root,
- &_preparedState->pathFoundIndex,
- &_preparedState->pathFoundElement);
- // Check if we didn't find the full path
- if (status.isOK()) {
- const bool destExists = (_preparedState->pathFoundIndex == (_fieldRef.numParts()-1));
-
- if (!destExists) {
+ // No children, nothing to do -- not an error state
+ if (!_preparedState->pathFoundElement.hasChildren()) {
execInfo->noOp = true;
} else {
- // If the path exists, we require the target field to be already an
- // array.
- if (_preparedState->pathFoundElement.getType() != Array) {
- mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
- return Status(
- ErrorCodes::BadValue,
- str::stream() << "Can only apply $pullAll to an array. "
- << idElem.toString()
- << " has the field "
- << _preparedState->pathFoundElement.getFieldName()
- << " of non-array type "
- << typeName(_preparedState->pathFoundElement.getType()));
+ mutablebson::Element elem = _preparedState->pathFoundElement.leftChild();
+ while (elem.ok()) {
+ if (std::find_if(_elementsToFind.begin(),
+ _elementsToFind.end(),
+ mutableElementEqualsBSONElement(elem)) !=
+ _elementsToFind.end()) {
+ _preparedState->elementsToRemove.push_back(elem);
+ }
+ elem = elem.rightSibling();
}
- // No children, nothing to do -- not an error state
- if (!_preparedState->pathFoundElement.hasChildren()) {
+ // Nothing to remove so it is a noOp.
+ if (_preparedState->elementsToRemove.empty())
execInfo->noOp = true;
- } else {
- mutablebson::Element elem = _preparedState->pathFoundElement.leftChild();
- while (elem.ok()) {
- if (std::find_if(_elementsToFind.begin(),
- _elementsToFind.end(),
- mutableElementEqualsBSONElement(elem))
- != _elementsToFind.end()) {
- _preparedState->elementsToRemove.push_back(elem);
- }
- elem = elem.rightSibling();
- }
-
- // Nothing to remove so it is a noOp.
- if (_preparedState->elementsToRemove.empty())
- execInfo->noOp = true;
- }
}
- } else {
- // Let the caller know we can't do anything given the mod, _fieldRef, and doc.
- execInfo->noOp = true;
-
-
- //okay if path not found
- if (status.code() == ErrorCodes::NonExistentPath)
- status = Status::OK();
}
+ } else {
+ // Let the caller know we can't do anything given the mod, _fieldRef, and doc.
+ execInfo->noOp = true;
- // Let the caller know what field we care about
- execInfo->fieldRef[0] = &_fieldRef;
- return status;
+ // okay if path not found
+ if (status.code() == ErrorCodes::NonExistentPath)
+ status = Status::OK();
}
- Status ModifierPullAll::apply() const {
- _preparedState->applyCalled = true;
+ // Let the caller know what field we care about
+ execInfo->fieldRef[0] = &_fieldRef;
- vector<mutablebson::Element>::const_iterator curr =
- _preparedState->elementsToRemove.begin();
- const vector<mutablebson::Element>::const_iterator end =
- _preparedState->elementsToRemove.end();
- for ( ; curr != end; ++curr) {
- const_cast<mutablebson::Element&>(*curr).remove();
- }
- return Status::OK();
- }
+ return status;
+}
- Status ModifierPullAll::log(LogBuilder* logBuilder) const {
- // log document
- mutablebson::Document& doc = logBuilder->getDocument();
- const bool pathExists = _preparedState->pathFoundElement.ok() &&
- (_preparedState->pathFoundIndex == (_fieldRef.numParts() - 1));
-
- if (!pathExists)
- return logBuilder->addToUnsets(_fieldRef.dottedField());
-
- // value for the logElement ("field.path.name": <value>)
- mutablebson::Element logElement = doc.makeElementWithNewFieldName(
- _fieldRef.dottedField(),
- _preparedState->pathFoundElement);
-
- if (!logElement.ok()) {
- return Status(ErrorCodes::InternalError,
- str::stream() << "Could not append entry to $pullAll oplog entry: "
- << "set '" << _fieldRef.dottedField() << "' -> "
- << _preparedState->pathFoundElement.toString() );
- }
- return logBuilder->addToSets(logElement);
+Status ModifierPullAll::apply() const {
+ _preparedState->applyCalled = true;
+
+ vector<mutablebson::Element>::const_iterator curr = _preparedState->elementsToRemove.begin();
+ const vector<mutablebson::Element>::const_iterator end = _preparedState->elementsToRemove.end();
+ for (; curr != end; ++curr) {
+ const_cast<mutablebson::Element&>(*curr).remove();
+ }
+ return Status::OK();
+}
+
+Status ModifierPullAll::log(LogBuilder* logBuilder) const {
+ // log document
+ mutablebson::Document& doc = logBuilder->getDocument();
+ const bool pathExists = _preparedState->pathFoundElement.ok() &&
+ (_preparedState->pathFoundIndex == (_fieldRef.numParts() - 1));
+
+ if (!pathExists)
+ return logBuilder->addToUnsets(_fieldRef.dottedField());
+
+ // value for the logElement ("field.path.name": <value>)
+ mutablebson::Element logElement =
+ doc.makeElementWithNewFieldName(_fieldRef.dottedField(), _preparedState->pathFoundElement);
+
+ if (!logElement.ok()) {
+ return Status(ErrorCodes::InternalError,
+ str::stream() << "Could not append entry to $pullAll oplog entry: "
+ << "set '" << _fieldRef.dottedField() << "' -> "
+ << _preparedState->pathFoundElement.toString());
}
-} // namespace mongo
+ return logBuilder->addToSets(logElement);
+}
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_pull_all.h b/src/mongo/db/ops/modifier_pull_all.h
index fef6c7b03f3..9f7efb8ac8d 100644
--- a/src/mongo/db/ops/modifier_pull_all.h
+++ b/src/mongo/db/ops/modifier_pull_all.h
@@ -38,50 +38,44 @@
namespace mongo {
- class LogBuilder;
+class LogBuilder;
- class ModifierPullAll : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierPullAll);
+class ModifierPullAll : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierPullAll);
- public:
+public:
+ ModifierPullAll();
+ virtual ~ModifierPullAll();
- ModifierPullAll();
- virtual ~ModifierPullAll();
+ /**
+ * The modifier $pullAll takes an array of values to match literally, and remove
+ *
+ * Ex. {$pullAll : {<field> : [<values>]}}
+ * {$pullAll :{ array : [1,2] } } will transform {array: [1,2,3]} -> {array: [3]}
+ */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
- /**
- * The modifier $pullAll takes an array of values to match literally, and remove
- *
- * Ex. {$pullAll : {<field> : [<values>]}}
- * {$pullAll :{ array : [1,2] } } will transform {array: [1,2,3]} -> {array: [3]}
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
+ virtual Status apply() const;
- virtual Status apply() const;
+ virtual Status log(LogBuilder* logBuilder) const;
- virtual Status log(LogBuilder* logBuilder) const;
+private:
+ // Access to each component of fieldName that's the target of this mod.
+ FieldRef _fieldRef;
- private:
+ // 0 or index for $-positional in _fieldRef.
+ size_t _positionalPathIndex;
- // Access to each component of fieldName that's the target of this mod.
- FieldRef _fieldRef;
+ // The instance of the field in the provided doc.
+ // This data is valid after prepare, for use by log and apply
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
- // 0 or index for $-positional in _fieldRef.
- size_t _positionalPathIndex;
+ // User specified elements to remove
+ std::vector<BSONElement> _elementsToFind;
+};
- // The instance of the field in the provided doc.
- // This data is valid after prepare, for use by log and apply
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
-
- // User specified elements to remove
- std::vector<BSONElement> _elementsToFind;
-
- };
-
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_pull_all_test.cpp b/src/mongo/db/ops/modifier_pull_all_test.cpp
index 4d689f36ee5..4b22f4d97a8 100644
--- a/src/mongo/db/ops/modifier_pull_all_test.cpp
+++ b/src/mongo/db/ops/modifier_pull_all_test.cpp
@@ -42,209 +42,206 @@
namespace {
- using mongo::BSONObj;
- using mongo::LogBuilder;
- using mongo::ModifierPullAll;
- using mongo::ModifierInterface;
- using mongo::NumberInt;
- using mongo::Status;
- using mongo::StringData;
- using mongo::fromjson;
- using mongo::mutablebson::ConstElement;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
-
- /** Helper to build and manipulate the mod. */
- class Mod {
- public:
- Mod() : _mod() {}
-
- explicit Mod(BSONObj modObj)
- : _modObj(modObj)
- , _mod() {
- ASSERT_OK(_mod.init(_modObj["$pullAll"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
-
- Status apply() const {
- return _mod.apply();
- }
-
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
-
- ModifierPullAll& mod() { return _mod; }
-
- private:
- BSONObj _modObj;
- ModifierPullAll _mod;
- };
-
- TEST(Init, BadThings) {
- BSONObj modObj;
- ModifierPullAll mod;
-
- modObj = fromjson("{$pullAll: {a:1}}");
- ASSERT_NOT_OK(mod.init(modObj["$pullAll"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- modObj = fromjson("{$pullAll: {a:'test'}}");
- ASSERT_NOT_OK(mod.init(modObj["$pullAll"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- modObj = fromjson("{$pullAll: {a:{}}}");
- ASSERT_NOT_OK(mod.init(modObj["$pullAll"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- modObj = fromjson("{$pullAll: {a:true}}");
- ASSERT_NOT_OK(mod.init(modObj["$pullAll"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
-
- }
-
- TEST(PrepareApply, SimpleNumber) {
- Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
- Mod mod(fromjson("{ $pullAll : { a : [1] } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : ['a', {r:1, b:2}] }"), doc);
- }
-
- TEST(PrepareApply, MissingElement) {
- Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
- Mod mod(fromjson("{ $pullAll : { a : ['r'] } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [1, 'a', {r:1, b:2}] }"), doc);
- }
-
- TEST(PrepareApply, TwoElements) {
- Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
- Mod mod(fromjson("{ $pullAll : { a : [1, 'a'] } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [{r:1, b:2}] }"), doc);
- }
-
- TEST(EmptyResult, RemoveEverythingOutOfOrder) {
- Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
- Mod mod(fromjson("{ $pullAll : {a : [ {r:1, b:2}, 1, 'a' ] }}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [] } }"), logDoc);
+using mongo::BSONObj;
+using mongo::LogBuilder;
+using mongo::ModifierPullAll;
+using mongo::ModifierInterface;
+using mongo::NumberInt;
+using mongo::Status;
+using mongo::StringData;
+using mongo::fromjson;
+using mongo::mutablebson::ConstElement;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
+
+/** Helper to build and manipulate the mod. */
+class Mod {
+public:
+ Mod() : _mod() {}
+
+ explicit Mod(BSONObj modObj) : _modObj(modObj), _mod() {
+ ASSERT_OK(_mod.init(_modObj["$pullAll"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- TEST(EmptyResult, RemoveEverythingInOrder) {
- Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
- Mod mod(fromjson("{ $pullAll : { a : [1, 'a', {r:1, b:2} ] } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [] } }"), logDoc);
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
}
- TEST(EmptyResult, RemoveEverythingAndThenSome) {
- Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
- Mod mod(fromjson("{ $pullAll : { a : [2,3,1,'r', {r:1, b:2}, 'a' ] } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [] } }"), logDoc);
+ Status apply() const {
+ return _mod.apply();
}
- TEST(PrepareLog, MissingPullValue) {
- Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
- Mod mod(fromjson("{ $pullAll : { a : [2] } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [1, 'a', {r:1, b:2}] } }"), logDoc);
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
}
- TEST(PrepareLog, MissingPath) {
- Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
- Mod mod(fromjson("{ $pullAll : { b : [1] } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $unset : { b : true } }"), logDoc);
- }
-
- TEST(Prepare, MissingArrayElementPath) {
- Document doc(fromjson("{ a : [1, 2] }"));
- Mod mod(fromjson("{ $pullAll : { 'a.2' : [1] } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(Prepare, FromArrayElementPath) {
- Document doc(fromjson("{ a : [1, 2] }"));
- Mod mod(fromjson("{ $pullAll : { 'a.0' : [1] } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ModifierPullAll& mod() {
+ return _mod;
}
-} // namespace
+private:
+ BSONObj _modObj;
+ ModifierPullAll _mod;
+};
+
+TEST(Init, BadThings) {
+ BSONObj modObj;
+ ModifierPullAll mod;
+
+ modObj = fromjson("{$pullAll: {a:1}}");
+ ASSERT_NOT_OK(mod.init(modObj["$pullAll"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+
+ modObj = fromjson("{$pullAll: {a:'test'}}");
+ ASSERT_NOT_OK(mod.init(modObj["$pullAll"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+
+ modObj = fromjson("{$pullAll: {a:{}}}");
+ ASSERT_NOT_OK(mod.init(modObj["$pullAll"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+
+ modObj = fromjson("{$pullAll: {a:true}}");
+ ASSERT_NOT_OK(mod.init(modObj["$pullAll"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(PrepareApply, SimpleNumber) {
+ Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
+ Mod mod(fromjson("{ $pullAll : { a : [1] } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : ['a', {r:1, b:2}] }"), doc);
+}
+
+TEST(PrepareApply, MissingElement) {
+ Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
+ Mod mod(fromjson("{ $pullAll : { a : ['r'] } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [1, 'a', {r:1, b:2}] }"), doc);
+}
+
+TEST(PrepareApply, TwoElements) {
+ Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
+ Mod mod(fromjson("{ $pullAll : { a : [1, 'a'] } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [{r:1, b:2}] }"), doc);
+}
+
+TEST(EmptyResult, RemoveEverythingOutOfOrder) {
+ Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
+ Mod mod(fromjson("{ $pullAll : {a : [ {r:1, b:2}, 1, 'a' ] }}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [] } }"), logDoc);
+}
+
+TEST(EmptyResult, RemoveEverythingInOrder) {
+ Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
+ Mod mod(fromjson("{ $pullAll : { a : [1, 'a', {r:1, b:2} ] } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [] } }"), logDoc);
+}
+
+TEST(EmptyResult, RemoveEverythingAndThenSome) {
+ Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
+ Mod mod(fromjson("{ $pullAll : { a : [2,3,1,'r', {r:1, b:2}, 'a' ] } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [] } }"), logDoc);
+}
+
+TEST(PrepareLog, MissingPullValue) {
+ Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
+ Mod mod(fromjson("{ $pullAll : { a : [2] } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [1, 'a', {r:1, b:2}] } }"), logDoc);
+}
+
+TEST(PrepareLog, MissingPath) {
+ Document doc(fromjson("{ a : [1, 'a', {r:1, b:2}] }"));
+ Mod mod(fromjson("{ $pullAll : { b : [1] } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $unset : { b : true } }"), logDoc);
+}
+
+TEST(Prepare, MissingArrayElementPath) {
+ Document doc(fromjson("{ a : [1, 2] }"));
+ Mod mod(fromjson("{ $pullAll : { 'a.2' : [1] } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+}
+
+TEST(Prepare, FromArrayElementPath) {
+ Document doc(fromjson("{ a : [1, 2] }"));
+ Mod mod(fromjson("{ $pullAll : { 'a.0' : [1] } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
+}
+
+} // namespace
diff --git a/src/mongo/db/ops/modifier_pull_test.cpp b/src/mongo/db/ops/modifier_pull_test.cpp
index e0918b24db5..8862ac62575 100644
--- a/src/mongo/db/ops/modifier_pull_test.cpp
+++ b/src/mongo/db/ops/modifier_pull_test.cpp
@@ -40,537 +40,516 @@
namespace {
- using mongo::BSONObj;
- using mongo::LogBuilder;
- using mongo::ModifierPull;
- using mongo::ModifierInterface;
- using mongo::Status;
- using mongo::StringData;
- using mongo::fromjson;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
-
- /** Helper to build and manipulate a $pull mod. */
- class Mod {
- public:
- Mod() : _mod() {}
-
- explicit Mod(BSONObj modObj)
- : _modObj(modObj)
- , _mod() {
- ASSERT_OK(_mod.init(_modObj["$pull"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
-
- Status apply() const {
- return _mod.apply();
- }
-
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
-
- ModifierPull& mod() {
- return _mod;
- }
-
- private:
- BSONObj _modObj;
- ModifierPull _mod;
- };
-
- TEST(SimpleMod, PrepareOKTargetNotFound) {
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $unset : { a : true } }"), logDoc);
- }
-
- TEST(SimpleMod, PrepareOKTargetFound) {
- Document doc(fromjson("{ a : [ 0, 1, 2, 3 ] }"));
- Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
- }
-
- TEST(SimpleMod, PrepareInvalidTargetString) {
- Document doc(fromjson("{ a : 'foo' }"));
- Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
+using mongo::BSONObj;
+using mongo::LogBuilder;
+using mongo::ModifierPull;
+using mongo::ModifierInterface;
+using mongo::Status;
+using mongo::StringData;
+using mongo::fromjson;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
+
+/** Helper to build and manipulate a $pull mod. */
+class Mod {
+public:
+ Mod() : _mod() {}
+
+ explicit Mod(BSONObj modObj) : _modObj(modObj), _mod() {
+ ASSERT_OK(_mod.init(_modObj["$pull"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- TEST(SimpleMod, PrepareInvalidTargetObject) {
- Document doc(fromjson("{ a : { 'foo' : 'bar' } }"));
- Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
- }
-
- TEST(SimpleMod, PrepareAndLogEmptyDocument) {
- Document doc(fromjson("{}"));
- Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $unset : { a : true } }"), logDoc);
- }
-
- TEST(SimpleMod, PrepareAndLogMissingElementAfterFoundPath) {
- Document doc(fromjson("{ a : { b : { c : {} } } }"));
- Mod mod(fromjson("{ $pull : { 'a.b.c.d' : { $lt : 1 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b.c.d");
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $unset : { 'a.b.c.d' : true } }"), logDoc);
- }
-
- TEST(SimpleMod, PrepareAndLogEmptyArray) {
- Document doc(fromjson("{ a : [] }"));
- Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [] } }"), logDoc);
- }
-
- TEST(SimpleMod, PullMatchesNone) {
- Document doc(fromjson("{ a : [2, 3, 4, 5] }"));
- Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [2, 3, 4, 5] } }"), logDoc);
- }
-
- TEST(SimpleMod, ApplyAndLogPullMatchesOne) {
- Document doc(fromjson("{ a : [0, 1, 2, 3, 4, 5] }"));
- Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [ 1, 2, 3, 4, 5 ] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [1, 2, 3, 4, 5] } }"), logDoc);
- }
-
- TEST(SimpleMod, ApplyAndLogPullMatchesSeveral) {
- Document doc(fromjson("{ a : [0, 1, 0, 2, 0, 3, 0, 4, 0, 5] }"));
- Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [ 1, 2, 3, 4, 5 ] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [1, 2, 3, 4, 5] } }"), logDoc);
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
}
- TEST(SimpleMod, ApplyAndLogPullMatchesAll) {
- Document doc(fromjson("{ a : [0, -1, -2, -3, -4, -5] }"));
- Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ a : [] }"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{ $set : { a : [] } }"), logDoc);
+ Status apply() const {
+ return _mod.apply();
}
- TEST(ComplexMod, ApplyAndLogComplexDocAndMatching1) {
-
- const char* const strings[] = {
- // Document:
- "{ a : { b : [ { x : 1 }, { y : 'y' }, { x : 2 }, { z : 'z' } ] } }",
-
- // Modifier:
- "{ $pull : { 'a.b' : { $or : [ "
- " { 'y' : { $exists : true } }, "
- " { 'z' : { $exists : true } } "
- "] } } }",
-
- // Document result:
- "{ a : { b : [ { x : 1 }, { x : 2 } ] } }",
-
- // Log result:
- "{ $set : { 'a.b' : [ { x : 1 }, { x : 2 } ] } }"
- };
-
- Document doc(fromjson(strings[0]));
- Mod mod(fromjson(strings[1]));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(strings[2]), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson(strings[3]), logDoc);
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
}
- TEST(ComplexMod, ApplyAndLogComplexDocAndMatching2) {
-
- const char* const strings[] = {
- // Document:
- "{ a : { b : [ { x : 1 }, { y : 'y' }, { x : 2 }, { z : 'z' } ] } }",
-
- // Modifier:
- "{ $pull : { 'a.b' : { 'y' : { $exists : true } } } }",
-
- // Document result:
- "{ a : { b : [ { x : 1 }, { x : 2 }, { z : 'z' } ] } }",
-
- // Log result:
- "{ $set : { 'a.b' : [ { x : 1 }, { x : 2 }, { z : 'z' } ] } }"
- };
-
- Document doc(fromjson(strings[0]));
- Mod mod(fromjson(strings[1]));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(strings[2]), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson(strings[3]), logDoc);
+ ModifierPull& mod() {
+ return _mod;
}
- TEST(ComplexMod, ApplyAndLogComplexDocAndMatching3) {
+private:
+ BSONObj _modObj;
+ ModifierPull _mod;
+};
+
+TEST(SimpleMod, PrepareOKTargetNotFound) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $unset : { a : true } }"), logDoc);
+}
+
+TEST(SimpleMod, PrepareOKTargetFound) {
+ Document doc(fromjson("{ a : [ 0, 1, 2, 3 ] }"));
+ Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+}
+
+TEST(SimpleMod, PrepareInvalidTargetString) {
+ Document doc(fromjson("{ a : 'foo' }"));
+ Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(SimpleMod, PrepareInvalidTargetObject) {
+ Document doc(fromjson("{ a : { 'foo' : 'bar' } }"));
+ Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(mod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(SimpleMod, PrepareAndLogEmptyDocument) {
+ Document doc(fromjson("{}"));
+ Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $unset : { a : true } }"), logDoc);
+}
+
+TEST(SimpleMod, PrepareAndLogMissingElementAfterFoundPath) {
+ Document doc(fromjson("{ a : { b : { c : {} } } }"));
+ Mod mod(fromjson("{ $pull : { 'a.b.c.d' : { $lt : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b.c.d");
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $unset : { 'a.b.c.d' : true } }"), logDoc);
+}
+
+TEST(SimpleMod, PrepareAndLogEmptyArray) {
+ Document doc(fromjson("{ a : [] }"));
+ Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [] } }"), logDoc);
+}
+
+TEST(SimpleMod, PullMatchesNone) {
+ Document doc(fromjson("{ a : [2, 3, 4, 5] }"));
+ Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [2, 3, 4, 5] } }"), logDoc);
+}
+
+TEST(SimpleMod, ApplyAndLogPullMatchesOne) {
+ Document doc(fromjson("{ a : [0, 1, 2, 3, 4, 5] }"));
+ Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [ 1, 2, 3, 4, 5 ] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [1, 2, 3, 4, 5] } }"), logDoc);
+}
+
+TEST(SimpleMod, ApplyAndLogPullMatchesSeveral) {
+ Document doc(fromjson("{ a : [0, 1, 0, 2, 0, 3, 0, 4, 0, 5] }"));
+ Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [ 1, 2, 3, 4, 5 ] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [1, 2, 3, 4, 5] } }"), logDoc);
+}
+
+TEST(SimpleMod, ApplyAndLogPullMatchesAll) {
+ Document doc(fromjson("{ a : [0, -1, -2, -3, -4, -5] }"));
+ Mod mod(fromjson("{ $pull : { a : { $lt : 1 } } }"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ a : [] }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{ $set : { a : [] } }"), logDoc);
+}
+
+TEST(ComplexMod, ApplyAndLogComplexDocAndMatching1) {
+ const char* const strings[] = {
+ // Document:
+ "{ a : { b : [ { x : 1 }, { y : 'y' }, { x : 2 }, { z : 'z' } ] } }",
+
+ // Modifier:
+ "{ $pull : { 'a.b' : { $or : [ "
+ " { 'y' : { $exists : true } }, "
+ " { 'z' : { $exists : true } } "
+ "] } } }",
+
+ // Document result:
+ "{ a : { b : [ { x : 1 }, { x : 2 } ] } }",
+
+ // Log result:
+ "{ $set : { 'a.b' : [ { x : 1 }, { x : 2 } ] } }"};
+
+ Document doc(fromjson(strings[0]));
+ Mod mod(fromjson(strings[1]));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(strings[2]), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson(strings[3]), logDoc);
+}
+
+TEST(ComplexMod, ApplyAndLogComplexDocAndMatching2) {
+ const char* const strings[] = {
+ // Document:
+ "{ a : { b : [ { x : 1 }, { y : 'y' }, { x : 2 }, { z : 'z' } ] } }",
+
+ // Modifier:
+ "{ $pull : { 'a.b' : { 'y' : { $exists : true } } } }",
+
+ // Document result:
+ "{ a : { b : [ { x : 1 }, { x : 2 }, { z : 'z' } ] } }",
+
+ // Log result:
+ "{ $set : { 'a.b' : [ { x : 1 }, { x : 2 }, { z : 'z' } ] } }"};
+
+ Document doc(fromjson(strings[0]));
+ Mod mod(fromjson(strings[1]));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(strings[2]), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson(strings[3]), logDoc);
+}
+
+TEST(ComplexMod, ApplyAndLogComplexDocAndMatching3) {
+ const char* const strings[] = {
+ // Document:
+ "{ a : { b : [ { x : 1 }, { y : 'y' }, { x : 2 }, { z : 'z' } ] } }",
+
+ // Modifier:
+ "{ $pull : { 'a.b' : { $in : [ { x : 1 }, { y : 'y' } ] } } }",
- const char* const strings[] = {
- // Document:
- "{ a : { b : [ { x : 1 }, { y : 'y' }, { x : 2 }, { z : 'z' } ] } }",
+ // Document result:
+ "{ a : { b : [ { x : 2 }, { z : 'z' } ] } }",
- // Modifier:
- "{ $pull : { 'a.b' : { $in : [ { x : 1 }, { y : 'y' } ] } } }",
+ // Log result:
+ "{ $set : { 'a.b' : [ { x : 2 }, { z : 'z' } ] } }"};
- // Document result:
- "{ a : { b : [ { x : 2 }, { z : 'z' } ] } }",
+ Document doc(fromjson(strings[0]));
+ Mod mod(fromjson(strings[1]));
- // Log result:
- "{ $set : { 'a.b' : [ { x : 2 }, { z : 'z' } ] } }"
- };
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
- Document doc(fromjson(strings[0]));
- Mod mod(fromjson(strings[1]));
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(strings[2]), doc);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson(strings[3]), logDoc);
+}
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(strings[2]), doc);
+TEST(ValueMod, ApplyAndLogScalarValueMod) {
+ const char* const strings[] = {// Document:
+ "{ a : [1, 2, 1, 2, 1, 2] }",
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson(strings[3]), logDoc);
- }
+ // Modifier:
+ "{ $pull : { a : 1 } }",
- TEST(ValueMod, ApplyAndLogScalarValueMod) {
+ // Document result:
+ "{ a : [ 2, 2, 2] }",
- const char* const strings[] = {
- // Document:
- "{ a : [1, 2, 1, 2, 1, 2] }",
+ // Log result:
+ "{ $set : { a : [ 2, 2, 2 ] } }"};
- // Modifier:
- "{ $pull : { a : 1 } }",
+ Document doc(fromjson(strings[0]));
+ Mod mod(fromjson(strings[1]));
- // Document result:
- "{ a : [ 2, 2, 2] }",
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- // Log result:
- "{ $set : { a : [ 2, 2, 2 ] } }"
- };
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(strings[2]), doc);
- Document doc(fromjson(strings[0]));
- Mod mod(fromjson(strings[1]));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson(strings[3]), logDoc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(ValueMod, ApplyAndLogObjectValueMod) {
+ const char* const strings[] = {// Document:
+ "{ a : [ { x : 1 }, { y : 2 }, { x : 1 }, { y : 2 } ] }",
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(strings[2]), doc);
+ // Modifier:
+ "{ $pull : { a : { y : 2 } } }",
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson(strings[3]), logDoc);
- }
+ // Document result:
+ "{ a : [ { x : 1 }, { x : 1 }] }",
- TEST(ValueMod, ApplyAndLogObjectValueMod) {
+ // Log result:
+ "{ $set : { a : [ { x : 1 }, { x : 1 } ] } }"};
- const char* const strings[] = {
- // Document:
- "{ a : [ { x : 1 }, { y : 2 }, { x : 1 }, { y : 2 } ] }",
+ Document doc(fromjson(strings[0]));
+ Mod mod(fromjson(strings[1]));
- // Modifier:
- "{ $pull : { a : { y : 2 } } }",
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- // Document result:
- "{ a : [ { x : 1 }, { x : 1 }] }",
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(strings[2]), doc);
- // Log result:
- "{ $set : { a : [ { x : 1 }, { x : 1 } ] } }"
- };
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson(strings[3]), logDoc);
+}
- Document doc(fromjson(strings[0]));
- Mod mod(fromjson(strings[1]));
+TEST(DocumentationTests, Example1) {
+ const char* const strings[] = {
+ // Document:
+ "{ flags: ['vme', 'de', 'pse', 'tsc', 'msr', 'pae', 'mce' ] }",
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ // Modifier:
+ "{ $pull: { flags: 'msr' } }",
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(strings[2]), doc);
+ // Document result:
+ "{ flags: ['vme', 'de', 'pse', 'tsc', 'pae', 'mce' ] }",
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson(strings[3]), logDoc);
- }
+ // Log result:
+ "{ $set : { flags: ['vme', 'de', 'pse', 'tsc', 'pae', 'mce' ] } }"};
- TEST(DocumentationTests, Example1) {
- const char* const strings[] = {
- // Document:
- "{ flags: ['vme', 'de', 'pse', 'tsc', 'msr', 'pae', 'mce' ] }",
+ Document doc(fromjson(strings[0]));
+ Mod mod(fromjson(strings[1]));
- // Modifier:
- "{ $pull: { flags: 'msr' } }",
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "flags");
+ ASSERT_FALSE(execInfo.noOp);
- // Document result:
- "{ flags: ['vme', 'de', 'pse', 'tsc', 'pae', 'mce' ] }",
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(strings[2]), doc);
- // Log result:
- "{ $set : { flags: ['vme', 'de', 'pse', 'tsc', 'pae', 'mce' ] } }"
- };
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson(strings[3]), logDoc);
+}
- Document doc(fromjson(strings[0]));
- Mod mod(fromjson(strings[1]));
+TEST(DocumentationTests, Example2a) {
+ const char* const strings[] = {// Document:
+ "{ votes: [ 3, 5, 6, 7, 7, 8 ] }",
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "flags");
- ASSERT_FALSE(execInfo.noOp);
+ // Modifier:
+ "{ $pull: { votes: 7 } }",
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(strings[2]), doc);
+ // Document result:
+ "{ votes: [ 3, 5, 6, 8 ] }",
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson(strings[3]), logDoc);
- }
+ // Log result:
+ "{ $set : { votes: [ 3, 5, 6, 8 ] } }"};
- TEST(DocumentationTests, Example2a) {
- const char* const strings[] = {
- // Document:
- "{ votes: [ 3, 5, 6, 7, 7, 8 ] }",
+ Document doc(fromjson(strings[0]));
+ Mod mod(fromjson(strings[1]));
- // Modifier:
- "{ $pull: { votes: 7 } }",
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "votes");
+ ASSERT_FALSE(execInfo.noOp);
- // Document result:
- "{ votes: [ 3, 5, 6, 8 ] }",
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(strings[2]), doc);
- // Log result:
- "{ $set : { votes: [ 3, 5, 6, 8 ] } }"
- };
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson(strings[3]), logDoc);
+}
- Document doc(fromjson(strings[0]));
- Mod mod(fromjson(strings[1]));
+TEST(DocumentationTests, Example2b) {
+ const char* const strings[] = {// Document:
+ "{ votes: [ 3, 5, 6, 7, 7, 8 ] }",
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "votes");
- ASSERT_FALSE(execInfo.noOp);
+ // Modifier:
+ "{ $pull: { votes: { $gt: 6 } } }",
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(strings[2]), doc);
+ // Document result:
+ "{ votes: [ 3, 5, 6 ] }",
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson(strings[3]), logDoc);
- }
+ // Log result:
+ "{ $set : { votes: [ 3, 5, 6 ] } }"};
- TEST(DocumentationTests, Example2b) {
- const char* const strings[] = {
- // Document:
- "{ votes: [ 3, 5, 6, 7, 7, 8 ] }",
+ Document doc(fromjson(strings[0]));
+ Mod mod(fromjson(strings[1]));
- // Modifier:
- "{ $pull: { votes: { $gt: 6 } } }",
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "votes");
+ ASSERT_FALSE(execInfo.noOp);
- // Document result:
- "{ votes: [ 3, 5, 6 ] }",
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(strings[2]), doc);
- // Log result:
- "{ $set : { votes: [ 3, 5, 6 ] } }"
- };
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson(strings[3]), logDoc);
+}
- Document doc(fromjson(strings[0]));
- Mod mod(fromjson(strings[1]));
+TEST(MatchingEdgeCases, NonObjectShortCircuit) {
+ const char* const strings[] = {
+ "{ a: [ { x: 1 }, 2 ] }",
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "votes");
- ASSERT_FALSE(execInfo.noOp);
+ "{ $pull: { a: { x: 1 } } }",
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(strings[2]), doc);
+ "{ a: [ 2 ] }",
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson(strings[3]), logDoc);
- }
-
- TEST(MatchingEdgeCases, NonObjectShortCircuit) {
- const char* const strings[] = {
- "{ a: [ { x: 1 }, 2 ] }",
-
- "{ $pull: { a: { x: 1 } } }",
-
- "{ a: [ 2 ] }",
-
- "{ $set : { a: [ 2 ] } }",
- };
+ "{ $set : { a: [ 2 ] } }",
+ };
- Document doc(fromjson(strings[0]));
- Mod mod(fromjson(strings[1]));
+ Document doc(fromjson(strings[0]));
+ Mod mod(fromjson(strings[1]));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(strings[2]), doc);
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(strings[2]), doc);
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson(strings[3]), logDoc);
- }
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson(strings[3]), logDoc);
+}
- TEST(MatchingRegressions, SERVER_3988) {
- const char* const strings[] = {
- "{ x: 1, y: [ 2, 3, 4, 'abc', 'xyz' ] }",
+TEST(MatchingRegressions, SERVER_3988) {
+ const char* const strings[] = {
+ "{ x: 1, y: [ 2, 3, 4, 'abc', 'xyz' ] }",
- "{ $pull: { y: /yz/ } }",
+ "{ $pull: { y: /yz/ } }",
- "{ x: 1, y: [ 2, 3, 4, 'abc' ] }",
+ "{ x: 1, y: [ 2, 3, 4, 'abc' ] }",
- "{ $set : { y: [ 2, 3, 4, 'abc' ] } }",
- };
+ "{ $set : { y: [ 2, 3, 4, 'abc' ] } }",
+ };
- Document doc(fromjson(strings[0]));
- Mod mod(fromjson(strings[1]));
+ Document doc(fromjson(strings[0]));
+ Mod mod(fromjson(strings[1]));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "y");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "y");
+ ASSERT_FALSE(execInfo.noOp);
- ASSERT_OK(mod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(strings[2]), doc);
+ ASSERT_OK(mod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(strings[2]), doc);
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod.log(&logBuilder));
- ASSERT_EQUALS(fromjson(strings[3]), logDoc);
- }
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod.log(&logBuilder));
+ ASSERT_EQUALS(fromjson(strings[3]), logDoc);
+}
-} // namespace
+} // namespace
diff --git a/src/mongo/db/ops/modifier_push.cpp b/src/mongo/db/ops/modifier_push.cpp
index 70a227fddbd..636a9f4450a 100644
--- a/src/mongo/db/ops/modifier_push.cpp
+++ b/src/mongo/db/ops/modifier_push.cpp
@@ -44,220 +44,196 @@
namespace mongo {
- using std::abs;
- using std::numeric_limits;
+using std::abs;
+using std::numeric_limits;
- namespace mb = mutablebson;
- namespace str = mongoutils::str;
+namespace mb = mutablebson;
+namespace str = mongoutils::str;
- namespace {
+namespace {
- const char kEach[] = "$each";
- const char kSlice[] = "$slice";
- const char kSort[] = "$sort";
- const char kPosition[] = "$position";
+const char kEach[] = "$each";
+const char kSlice[] = "$slice";
+const char kSort[] = "$sort";
+const char kPosition[] = "$position";
- bool isPatternElement(const BSONElement& pattern) {
- if (!pattern.isNumber()) {
- return false;
- }
+bool isPatternElement(const BSONElement& pattern) {
+ if (!pattern.isNumber()) {
+ return false;
+ }
- // Patterns can be only 1 or -1.
- double val = pattern.Number();
- if (val != 1 && val != -1) {
- return false;
- }
+ // Patterns can be only 1 or -1.
+ double val = pattern.Number();
+ if (val != 1 && val != -1) {
+ return false;
+ }
- return true;
- }
+ return true;
+}
- bool inEachMode(const BSONElement& modExpr) {
- if (modExpr.type() != Object) {
- return false;
- }
- BSONObj obj = modExpr.embeddedObject();
- if (obj[kEach].type() == EOO) {
- return false;
- }
- return true;
+bool inEachMode(const BSONElement& modExpr) {
+ if (modExpr.type() != Object) {
+ return false;
+ }
+ BSONObj obj = modExpr.embeddedObject();
+ if (obj[kEach].type() == EOO) {
+ return false;
+ }
+ return true;
+}
+
+Status parseEachMode(ModifierPush::ModifierPushMode pushMode,
+ const BSONElement& modExpr,
+ BSONElement* eachElem,
+ BSONElement* sliceElem,
+ BSONElement* sortElem,
+ BSONElement* positionElem) {
+ Status status = Status::OK();
+
+ // If in $pushAll mode, all we need is the array.
+ if (pushMode == ModifierPush::PUSH_ALL) {
+ if (modExpr.type() != Array) {
+ return Status(ErrorCodes::BadValue, "$pushAll requires an array");
}
+ *eachElem = modExpr;
+ return Status::OK();
+ }
- Status parseEachMode(ModifierPush::ModifierPushMode pushMode,
- const BSONElement& modExpr,
- BSONElement* eachElem,
- BSONElement* sliceElem,
- BSONElement* sortElem,
- BSONElement* positionElem) {
-
- Status status = Status::OK();
-
- // If in $pushAll mode, all we need is the array.
- if (pushMode == ModifierPush::PUSH_ALL) {
- if (modExpr.type() != Array) {
- return Status(ErrorCodes::BadValue, "$pushAll requires an array");
- }
- *eachElem = modExpr;
- return Status::OK();
- }
-
- // The $each clause must be an array.
- *eachElem = modExpr.embeddedObject()[kEach];
- if (eachElem->type() != Array) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The argument to $each in $push must be"
- " an array but it was of type "
- << typeName(eachElem->type()));
- }
-
- // There must be only one $each clause.
- bool seenEach = false;
- BSONObjIterator itMod(modExpr.embeddedObject());
- while (itMod.more()) {
- BSONElement modElem = itMod.next();
- if (mongoutils::str::equals(modElem.fieldName(), kEach)) {
- if (seenEach) {
- return Status(ErrorCodes::BadValue,
- "Only one $each clause is supported.");
- }
- seenEach = true;
- }
- }
+ // The $each clause must be an array.
+ *eachElem = modExpr.embeddedObject()[kEach];
+ if (eachElem->type() != Array) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The argument to $each in $push must be"
+ " an array but it was of type "
+ << typeName(eachElem->type()));
+ }
- // Slice, sort, position are optional and may be present in any order.
- bool seenSlice = false;
- bool seenSort = false;
- bool seenPosition = false;
- BSONObjIterator itPush(modExpr.embeddedObject());
- while (itPush.more()) {
- BSONElement elem = itPush.next();
- if (mongoutils::str::equals(elem.fieldName(), kSlice)) {
- if (seenSlice) {
- return Status(ErrorCodes::BadValue,
- "Only one $slice clause is supported.");
- }
- *sliceElem = elem;
- seenSlice = true;
- }
- else if (mongoutils::str::equals(elem.fieldName(), kSort)) {
- if (seenSort) {
- return Status(ErrorCodes::BadValue,
- "Only one $sort clause is supported.");
- }
- *sortElem = elem;
- seenSort = true;
- }
- else if (mongoutils::str::equals(elem.fieldName(), kPosition)) {
- if (seenPosition) {
- return Status(ErrorCodes::BadValue,
- "Only one $position clause is supported.");
- }
- *positionElem = elem;
- seenPosition = true;
- }
- else if (!mongoutils::str::equals(elem.fieldName(), kEach)) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Unrecognized clause in $push: "
- << elem.fieldNameStringData());
- }
+ // There must be only one $each clause.
+ bool seenEach = false;
+ BSONObjIterator itMod(modExpr.embeddedObject());
+ while (itMod.more()) {
+ BSONElement modElem = itMod.next();
+ if (mongoutils::str::equals(modElem.fieldName(), kEach)) {
+ if (seenEach) {
+ return Status(ErrorCodes::BadValue, "Only one $each clause is supported.");
}
-
- return Status::OK();
+ seenEach = true;
}
+ }
- } // unnamed namespace
-
- struct ModifierPush::PreparedState {
-
- PreparedState(mutablebson::Document* targetDoc)
- : doc(*targetDoc)
- , idxFound(0)
- , elemFound(doc.end())
- , arrayPreModSize(0) {
+ // Slice, sort, position are optional and may be present in any order.
+ bool seenSlice = false;
+ bool seenSort = false;
+ bool seenPosition = false;
+ BSONObjIterator itPush(modExpr.embeddedObject());
+ while (itPush.more()) {
+ BSONElement elem = itPush.next();
+ if (mongoutils::str::equals(elem.fieldName(), kSlice)) {
+ if (seenSlice) {
+ return Status(ErrorCodes::BadValue, "Only one $slice clause is supported.");
+ }
+ *sliceElem = elem;
+ seenSlice = true;
+ } else if (mongoutils::str::equals(elem.fieldName(), kSort)) {
+ if (seenSort) {
+ return Status(ErrorCodes::BadValue, "Only one $sort clause is supported.");
+ }
+ *sortElem = elem;
+ seenSort = true;
+ } else if (mongoutils::str::equals(elem.fieldName(), kPosition)) {
+ if (seenPosition) {
+ return Status(ErrorCodes::BadValue, "Only one $position clause is supported.");
+ }
+ *positionElem = elem;
+ seenPosition = true;
+ } else if (!mongoutils::str::equals(elem.fieldName(), kEach)) {
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Unrecognized clause in $push: " << elem.fieldNameStringData());
}
+ }
- // Document that is going to be changed.
- mutablebson::Document& doc;
+ return Status::OK();
+}
- // Index in _fieldRef for which an Element exist in the document.
- size_t idxFound;
+} // unnamed namespace
- // Element corresponding to _fieldRef[0.._idxFound].
- mutablebson::Element elemFound;
+struct ModifierPush::PreparedState {
+ PreparedState(mutablebson::Document* targetDoc)
+ : doc(*targetDoc), idxFound(0), elemFound(doc.end()), arrayPreModSize(0) {}
- size_t arrayPreModSize;
+ // Document that is going to be changed.
+ mutablebson::Document& doc;
- };
+ // Index in _fieldRef for which an Element exist in the document.
+ size_t idxFound;
- ModifierPush::ModifierPush(ModifierPush::ModifierPushMode pushMode)
- : _fieldRef()
- , _posDollar(0)
- , _eachMode(false)
- , _eachElem()
- , _slicePresent(false)
- , _slice(0)
- , _sortPresent(false)
- , _startPosition(std::numeric_limits<std::size_t>::max())
- , _sort()
- , _pushMode(pushMode)
- , _val() {
- }
+ // Element corresponding to _fieldRef[0.._idxFound].
+ mutablebson::Element elemFound;
- ModifierPush::~ModifierPush() {
- }
+ size_t arrayPreModSize;
+};
- Status ModifierPush::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
+ModifierPush::ModifierPush(ModifierPush::ModifierPushMode pushMode)
+ : _fieldRef(),
+ _posDollar(0),
+ _eachMode(false),
+ _eachElem(),
+ _slicePresent(false),
+ _slice(0),
+ _sortPresent(false),
+ _startPosition(std::numeric_limits<std::size_t>::max()),
+ _sort(),
+ _pushMode(pushMode),
+ _val() {}
- //
- // field name analysis
- //
+ModifierPush::~ModifierPush() {}
- // Break down the field name into its 'dotted' components (aka parts) and check that
- // the field is fit for updates.
- _fieldRef.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatable(_fieldRef);
- if (! status.isOK()) {
- return status;
- }
+Status ModifierPush::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
+ //
+ // field name analysis
+ //
- // If a $-positional operator was used, get the index in which it occurred
- // and ensure only one occurrence.
- size_t foundCount;
- bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
+ // Break down the field name into its 'dotted' components (aka parts) and check that
+ // the field is fit for updates.
+ _fieldRef.parse(modExpr.fieldName());
+ Status status = fieldchecker::isUpdatable(_fieldRef);
+ if (!status.isOK()) {
+ return status;
+ }
- if (positional)
- *positional = foundDollar;
+ // If a $-positional operator was used, get the index in which it occurred
+ // and ensure only one occurrence.
+ size_t foundCount;
+ bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
- if (foundDollar && foundCount > 1) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
- }
+ if (positional)
+ *positional = foundDollar;
- //
- // value analysis
- //
+ if (foundDollar && foundCount > 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Too many positional (i.e. '$') elements found in path '"
+ << _fieldRef.dottedField() << "'");
+ }
- // Are the target push values safe to store?
- BSONElement sliceElem;
- BSONElement sortElem;
- BSONElement positionElem;
- switch (modExpr.type()) {
+ //
+ // value analysis
+ //
+ // Are the target push values safe to store?
+ BSONElement sliceElem;
+ BSONElement sortElem;
+ BSONElement positionElem;
+ switch (modExpr.type()) {
case Array:
if (_pushMode == PUSH_ALL) {
_eachMode = true;
- Status status = parseEachMode(PUSH_ALL,
- modExpr,
- &_eachElem,
- &sliceElem,
- &sortElem,
- &positionElem);
+ Status status = parseEachMode(
+ PUSH_ALL, modExpr, &_eachElem, &sliceElem, &sortElem, &positionElem);
if (!status.isOK()) {
return status;
}
- }
- else {
+ } else {
_val = modExpr;
}
break;
@@ -266,24 +242,19 @@ namespace mongo {
if (_pushMode == PUSH_ALL) {
return Status(ErrorCodes::BadValue,
str::stream() << "$pushAll requires an array of values "
- "but was given an embedded document.");
+ "but was given an embedded document.");
}
// If any known clause ($each, $slice, or $sort) is present, we'd assume
// we're using the $each variation of push and would parse accodingly.
_eachMode = inEachMode(modExpr);
if (_eachMode) {
- Status status = parseEachMode(PUSH_NORMAL,
- modExpr,
- &_eachElem,
- &sliceElem,
- &sortElem,
- &positionElem);
+ Status status = parseEachMode(
+ PUSH_NORMAL, modExpr, &_eachElem, &sliceElem, &sortElem, &positionElem);
if (!status.isOK()) {
return status;
}
- }
- else {
+ } else {
_val = modExpr;
}
break;
@@ -292,435 +263,400 @@ namespace mongo {
if (_pushMode == PUSH_ALL) {
return Status(ErrorCodes::BadValue,
str::stream() << "$pushAll requires an array of values "
- "but was given an "
- << typeName(modExpr.type()));
+ "but was given an " << typeName(modExpr.type()));
}
_val = modExpr;
break;
+ }
+
+ // Is slice present and correct?
+ if (sliceElem.type() != EOO) {
+ if (_pushMode == PUSH_ALL) {
+ return Status(ErrorCodes::BadValue, "cannot use $slice in $pushAll");
}
- // Is slice present and correct?
- if (sliceElem.type() != EOO) {
- if (_pushMode == PUSH_ALL) {
- return Status(ErrorCodes::BadValue, "cannot use $slice in $pushAll");
- }
+ if (!sliceElem.isNumber()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The value for $slice must "
+ "be a numeric value not a "
+ << typeName(sliceElem.type()));
+ }
- if (!sliceElem.isNumber()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The value for $slice must "
- "be a numeric value not a "
- << typeName(sliceElem.type()));
- }
+ // TODO: Cleanup and unify numbers wrt getting int32/64 bson values (from doubles)
- // TODO: Cleanup and unify numbers wrt getting int32/64 bson values (from doubles)
+ // If the value of slice is not fraction, even if it's a double, we allow it. The
+ // reason here is that the shell will use doubles by default unless told otherwise.
+ const double doubleVal = sliceElem.numberDouble();
+ if (doubleVal - static_cast<int64_t>(doubleVal) != 0) {
+ return Status(ErrorCodes::BadValue, "The $slice value in $push cannot be fractional");
+ }
- // If the value of slice is not fraction, even if it's a double, we allow it. The
- // reason here is that the shell will use doubles by default unless told otherwise.
- const double doubleVal = sliceElem.numberDouble();
- if (doubleVal - static_cast<int64_t>(doubleVal) != 0) {
- return Status(ErrorCodes::BadValue,
- "The $slice value in $push cannot be fractional");
- }
+ _slice = sliceElem.numberLong();
+ _slicePresent = true;
+ }
- _slice = sliceElem.numberLong();
- _slicePresent = true;
+ // Is position present and correct?
+ if (positionElem.type() != EOO) {
+ if (_pushMode == PUSH_ALL) {
+ return Status(ErrorCodes::BadValue, "cannot use $position in $pushAll");
}
- // Is position present and correct?
- if (positionElem.type() != EOO) {
- if (_pushMode == PUSH_ALL) {
- return Status(ErrorCodes::BadValue, "cannot use $position in $pushAll");
- }
+ if (!positionElem.isNumber()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The value for $position must "
+ "be a positive numeric value not a "
+ << typeName(positionElem.type()));
+ }
- if (!positionElem.isNumber()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The value for $position must "
- "be a positive numeric value not a "
- << typeName(positionElem.type()));
- }
+ // TODO: Cleanup and unify numbers wrt getting int32/64 bson values (from doubles)
- // TODO: Cleanup and unify numbers wrt getting int32/64 bson values (from doubles)
+ // If the value of position is not fraction, even if it's a double, we allow it. The
+ // reason here is that the shell will use doubles by default unless told otherwise.
+ const double doubleVal = positionElem.numberDouble();
+ if (doubleVal - static_cast<int64_t>(doubleVal) != 0) {
+ return Status(ErrorCodes::BadValue,
+ "The $position value in $push cannot be fractional");
+ }
- // If the value of position is not fraction, even if it's a double, we allow it. The
- // reason here is that the shell will use doubles by default unless told otherwise.
- const double doubleVal = positionElem.numberDouble();
- if (doubleVal - static_cast<int64_t>(doubleVal) != 0) {
- return Status(ErrorCodes::BadValue,
- "The $position value in $push cannot be fractional");
- }
+ if (static_cast<double>(numeric_limits<int64_t>::max()) < doubleVal) {
+ return Status(ErrorCodes::BadValue,
+ "The $position value in $push is too large a number.");
+ }
- if (static_cast<double>(numeric_limits<int64_t>::max()) < doubleVal) {
- return Status(ErrorCodes::BadValue,
- "The $position value in $push is too large a number.");
- }
+ if (static_cast<double>(numeric_limits<int64_t>::min()) > doubleVal) {
+ return Status(ErrorCodes::BadValue,
+ "The $position value in $push is too small a number.");
+ }
- if (static_cast<double>(numeric_limits<int64_t>::min()) > doubleVal) {
- return Status(ErrorCodes::BadValue,
- "The $position value in $push is too small a number.");
- }
+ const int64_t tempVal = positionElem.numberLong();
+ if (tempVal < 0)
+ return Status(ErrorCodes::BadValue, "The $position value in $push must be positive.");
- const int64_t tempVal = positionElem.numberLong();
- if (tempVal < 0)
- return Status(ErrorCodes::BadValue,
- "The $position value in $push must be positive.");
+ _startPosition = size_t(tempVal);
+ }
- _startPosition = size_t(tempVal);
+ // Is sort present and correct?
+ if (sortElem.type() != EOO) {
+ if (_pushMode == PUSH_ALL) {
+ return Status(ErrorCodes::BadValue, "cannot use $sort in $pushAll");
}
- // Is sort present and correct?
- if (sortElem.type() != EOO) {
- if (_pushMode == PUSH_ALL) {
- return Status(ErrorCodes::BadValue,
- "cannot use $sort in $pushAll");
- }
+ if (sortElem.type() != Object && !sortElem.isNumber()) {
+ return Status(ErrorCodes::BadValue,
+ "The $sort is invalid: use 1/-1 to sort the whole element, "
+ "or {field:1/-1} to sort embedded fields");
+ }
- if (sortElem.type() != Object && !sortElem.isNumber()) {
+ if (sortElem.isABSONObj()) {
+ BSONObj sortObj = sortElem.embeddedObject();
+ if (sortObj.isEmpty()) {
return Status(ErrorCodes::BadValue,
- "The $sort is invalid: use 1/-1 to sort the whole element, "
- "or {field:1/-1} to sort embedded fields");
+ "The $sort pattern is empty when it should be a set of fields.");
}
- if (sortElem.isABSONObj()) {
- BSONObj sortObj = sortElem.embeddedObject();
- if (sortObj.isEmpty()) {
+ // Check if the sort pattern is sound.
+ BSONObjIterator sortIter(sortObj);
+ while (sortIter.more()) {
+ BSONElement sortPatternElem = sortIter.next();
+
+ // We require either <field>: 1 or -1 for asc and desc.
+ if (!isPatternElement(sortPatternElem)) {
return Status(ErrorCodes::BadValue,
- "The $sort pattern is empty when it should be a set of fields.");
+ "The $sort element value must be either 1 or -1");
}
- // Check if the sort pattern is sound.
- BSONObjIterator sortIter(sortObj);
- while (sortIter.more()) {
-
- BSONElement sortPatternElem = sortIter.next();
-
- // We require either <field>: 1 or -1 for asc and desc.
- if (!isPatternElement(sortPatternElem)) {
- return Status(ErrorCodes::BadValue,
- "The $sort element value must be either 1 or -1");
- }
+ // All fields parts must be valid.
+ FieldRef sortField(sortPatternElem.fieldName());
+ if (sortField.numParts() == 0) {
+ return Status(ErrorCodes::BadValue, "The $sort field cannot be empty");
+ }
- // All fields parts must be valid.
- FieldRef sortField(sortPatternElem.fieldName());
- if (sortField.numParts() == 0) {
+ for (size_t i = 0; i < sortField.numParts(); i++) {
+ if (sortField.getPart(i).size() == 0) {
return Status(ErrorCodes::BadValue,
- "The $sort field cannot be empty");
- }
-
- for (size_t i = 0; i < sortField.numParts(); i++) {
- if (sortField.getPart(i).size() == 0) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The $sort field is a dotted field "
- "but has an empty part: "
- << sortField.dottedField());
- }
+ str::stream()
+ << "The $sort field is a dotted field "
+ "but has an empty part: " << sortField.dottedField());
}
}
-
- _sort = PatternElementCmp(sortElem.embeddedObject());
}
- else {
- // Ensure the sortElem number is valid.
- if (!isPatternElement(sortElem)) {
- return Status(ErrorCodes::BadValue,
- "The $sort element value must be either 1 or -1");
- }
- _sort = PatternElementCmp(BSON("" << sortElem.number()));
+ _sort = PatternElementCmp(sortElem.embeddedObject());
+ } else {
+ // Ensure the sortElem number is valid.
+ if (!isPatternElement(sortElem)) {
+ return Status(ErrorCodes::BadValue,
+ "The $sort element value must be either 1 or -1");
}
- _sortPresent = true;
+ _sort = PatternElementCmp(BSON("" << sortElem.number()));
}
- return Status::OK();
+ _sortPresent = true;
}
- Status ModifierPush::prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo) {
+ return Status::OK();
+}
- _preparedState.reset(new PreparedState(&root.getDocument()));
+Status ModifierPush::prepare(mutablebson::Element root,
+ StringData matchedField,
+ ExecInfo* execInfo) {
+ _preparedState.reset(new PreparedState(&root.getDocument()));
- // If we have a $-positional field, it is time to bind it to an actual field part.
- if (_posDollar) {
- if (matchedField.empty()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The positional operator did not find the match "
- "needed from the query. Unexpanded update: "
- << _fieldRef.dottedField());
- }
- _fieldRef.setPart(_posDollar, matchedField);
+ // If we have a $-positional field, it is time to bind it to an actual field part.
+ if (_posDollar) {
+ if (matchedField.empty()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The positional operator did not find the match "
+ "needed from the query. Unexpanded update: "
+ << _fieldRef.dottedField());
+ }
+ _fieldRef.setPart(_posDollar, matchedField);
+ }
+
+ // Locate the field name in 'root'. Note that we may not have all the parts in the path
+ // in the doc -- which is fine. Our goal now is merely to reason about whether this mod
+ // apply is a noOp or whether is can be in place. The remainin path, if missing, will
+ // be created during the apply.
+ Status status = pathsupport::findLongestPrefix(
+ _fieldRef, root, &_preparedState->idxFound, &_preparedState->elemFound);
+
+ // FindLongestPrefix may say the path does not exist at all, which is fine here, or
+ // that the path was not viable or otherwise wrong, in which case, the mod cannot
+ // proceed.
+ if (status.code() == ErrorCodes::NonExistentPath) {
+ _preparedState->elemFound = root.getDocument().end();
+
+ } else if (status.isOK()) {
+ const bool destExists = (_preparedState->idxFound == (_fieldRef.numParts() - 1));
+ // If the path exists, we require the target field to be already an
+ // array.
+ if (destExists && _preparedState->elemFound.getType() != Array) {
+ mb::Element idElem = mb::findFirstChildNamed(root, "_id");
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The field '" << _fieldRef.dottedField() << "'"
+ << " must be an array but is of type "
+ << typeName(_preparedState->elemFound.getType())
+ << " in document {" << idElem.toString() << "}");
+ }
+ } else {
+ return status;
+ }
+
+ // We register interest in the field name. The driver needs this info to sort out if
+ // there is any conflict among mods.
+ execInfo->fieldRef[0] = &_fieldRef;
+
+ return Status::OK();
+}
+
+namespace {
+Status pushFirstElement(mb::Element& arrayElem,
+ const size_t arraySize,
+ const size_t pos,
+ mb::Element& elem) {
+ // Empty array or pushing to the front
+ if (arraySize == 0 || pos == 0) {
+ return arrayElem.pushFront(elem);
+ } else {
+ // Push position is at the end, or beyond
+ if (pos >= arraySize) {
+ return arrayElem.pushBack(elem);
}
- // Locate the field name in 'root'. Note that we may not have all the parts in the path
- // in the doc -- which is fine. Our goal now is merely to reason about whether this mod
- // apply is a noOp or whether is can be in place. The remainin path, if missing, will
- // be created during the apply.
- Status status = pathsupport::findLongestPrefix(_fieldRef,
- root,
- &_preparedState->idxFound,
- &_preparedState->elemFound);
-
- // FindLongestPrefix may say the path does not exist at all, which is fine here, or
- // that the path was not viable or otherwise wrong, in which case, the mod cannot
- // proceed.
- if (status.code() == ErrorCodes::NonExistentPath) {
- _preparedState->elemFound = root.getDocument().end();
+ const size_t appendPos = pos - 1;
+ mutablebson::Element fromElem = getNthChild(arrayElem, appendPos);
+ // This should not be possible since the checks above should
+ // cover us but error just in case
+ if (!fromElem.ok()) {
+ return Status(ErrorCodes::InvalidLength,
+ str::stream() << "The specified position (" << appendPos << "/" << pos
+ << ") is invalid based on the length ( " << arraySize
+ << ") of the array");
}
- else if (status.isOK()) {
- const bool destExists = (_preparedState->idxFound == (_fieldRef.numParts()-1));
- // If the path exists, we require the target field to be already an
- // array.
- if (destExists && _preparedState->elemFound.getType() != Array) {
- mb::Element idElem = mb::findFirstChildNamed(root, "_id");
- return Status(ErrorCodes::BadValue,
- str::stream() << "The field '" << _fieldRef.dottedField() << "'"
- << " must be an array but is of type "
- << typeName(_preparedState->elemFound.getType())
- << " in document {" << idElem.toString() << "}");
- }
+ return fromElem.addSiblingRight(elem);
+ }
+}
+} // unamed namespace
+
+Status ModifierPush::apply() const {
+ Status status = Status::OK();
+
+ //
+ // Applying a $push with an $clause has the following steps
+ // 1. Create the doc array we'll push into, if it is not there
+ // 2. Add the items in the $each array (or the simple $push) to the doc array
+ // 3. Sort the resulting array according to $sort clause, if present
+ // 4. Trim the resulting array according the $slice clasue, if present
+ //
+ // TODO There are _lots_ of optimization opportunities that we'll consider once the
+ // test coverage is adequate.
+ //
+
+ // 1. If the array field is not there, create it as an array and attach it to the
+ // document.
+ if (!_preparedState->elemFound.ok() || _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
+ // Creates the array element
+ mutablebson::Document& doc = _preparedState->doc;
+ StringData lastPart = _fieldRef.getPart(_fieldRef.numParts() - 1);
+ mutablebson::Element baseArray = doc.makeElementArray(lastPart);
+ if (!baseArray.ok()) {
+ return Status(ErrorCodes::InternalError, "can't create new base array");
}
- else {
- return status;
+
+ // Now, we can be in two cases here, as far as attaching the element being set
+ // goes: (a) none of the parts in the element's path exist, or (b) some parts of
+ // the path exist but not all.
+ if (!_preparedState->elemFound.ok()) {
+ _preparedState->elemFound = doc.root();
+ _preparedState->idxFound = 0;
+ } else {
+ _preparedState->idxFound++;
}
- // We register interest in the field name. The driver needs this info to sort out if
- // there is any conflict among mods.
- execInfo->fieldRef[0] = &_fieldRef;
+ // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
+ status = pathsupport::createPathAt(
+ _fieldRef, _preparedState->idxFound, _preparedState->elemFound, baseArray);
+ if (!status.isOK()) {
+ return status;
+ }
- return Status::OK();
+ // Point to the base array just created. The subsequent code expects it to exist
+ // already.
+ _preparedState->elemFound = baseArray;
}
- namespace {
- Status pushFirstElement(mb::Element& arrayElem,
- const size_t arraySize,
- const size_t pos,
- mb::Element& elem) {
+ // This is the count of the array before we change it, or 0 if missing from the doc.
+ _preparedState->arrayPreModSize = countChildren(_preparedState->elemFound);
+ // 2. Add new elements to the array either by going over the $each array or by
+ // appending the (old style $push) element.
+ if (_eachMode || _pushMode == PUSH_ALL) {
+ BSONObjIterator itEach(_eachElem.embeddedObject());
- // Empty array or pushing to the front
- if (arraySize == 0 || pos == 0) {
- return arrayElem.pushFront(elem);
- }
- else {
+ // When adding more than one element we keep track of the previous one
+ // so we can add right siblings to it.
+ mutablebson::Element prevElem = _preparedState->doc.end();
- // Push position is at the end, or beyond
- if (pos >= arraySize) {
- return arrayElem.pushBack(elem);
- }
+ // The first element is special below
+ bool first = true;
- const size_t appendPos = pos - 1;
- mutablebson::Element fromElem = getNthChild(arrayElem, appendPos);
-
- // This should not be possible since the checks above should
- // cover us but error just in case
- if (!fromElem.ok()){
- return Status(ErrorCodes::InvalidLength,
- str::stream() << "The specified position (" << appendPos << "/"
- << pos
- << ") is invalid based on the length ( "
- << arraySize
- << ") of the array");
- }
+ while (itEach.more()) {
+ BSONElement eachItem = itEach.next();
+ mutablebson::Element elem =
+ _preparedState->doc.makeElementWithNewFieldName(StringData(), eachItem);
- return fromElem.addSiblingRight(elem);
- }
- }
- } //unamed namespace
-
- Status ModifierPush::apply() const {
-
- Status status = Status::OK();
-
- //
- // Applying a $push with an $clause has the following steps
- // 1. Create the doc array we'll push into, if it is not there
- // 2. Add the items in the $each array (or the simple $push) to the doc array
- // 3. Sort the resulting array according to $sort clause, if present
- // 4. Trim the resulting array according the $slice clasue, if present
- //
- // TODO There are _lots_ of optimization opportunities that we'll consider once the
- // test coverage is adequate.
- //
-
- // 1. If the array field is not there, create it as an array and attach it to the
- // document.
- if (!_preparedState->elemFound.ok() ||
- _preparedState->idxFound < (_fieldRef.numParts()-1)) {
-
- // Creates the array element
- mutablebson::Document& doc = _preparedState->doc;
- StringData lastPart = _fieldRef.getPart(_fieldRef.numParts()-1);
- mutablebson::Element baseArray = doc.makeElementArray(lastPart);
- if (!baseArray.ok()) {
- return Status(ErrorCodes::InternalError, "can't create new base array");
+ if (first) {
+ status = pushFirstElement(_preparedState->elemFound,
+ _preparedState->arrayPreModSize,
+ _startPosition,
+ elem);
+ } else {
+ status = prevElem.addSiblingRight(elem);
}
- // Now, we can be in two cases here, as far as attaching the element being set
- // goes: (a) none of the parts in the element's path exist, or (b) some parts of
- // the path exist but not all.
- if (!_preparedState->elemFound.ok()) {
- _preparedState->elemFound = doc.root();
- _preparedState->idxFound = 0;
- }
- else {
- _preparedState->idxFound++;
- }
-
- // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
- status = pathsupport::createPathAt(_fieldRef,
- _preparedState->idxFound,
- _preparedState->elemFound,
- baseArray);
if (!status.isOK()) {
return status;
}
- // Point to the base array just created. The subsequent code expects it to exist
- // already.
- _preparedState->elemFound = baseArray;
-
- }
-
- // This is the count of the array before we change it, or 0 if missing from the doc.
- _preparedState->arrayPreModSize = countChildren(_preparedState->elemFound);
-
- // 2. Add new elements to the array either by going over the $each array or by
- // appending the (old style $push) element.
- if (_eachMode || _pushMode == PUSH_ALL) {
- BSONObjIterator itEach(_eachElem.embeddedObject());
-
- // When adding more than one element we keep track of the previous one
- // so we can add right siblings to it.
- mutablebson::Element prevElem = _preparedState->doc.end();
-
- // The first element is special below
- bool first = true;
-
- while (itEach.more()) {
- BSONElement eachItem = itEach.next();
- mutablebson::Element elem =
- _preparedState->doc.makeElementWithNewFieldName(StringData(), eachItem);
-
- if (first) {
- status = pushFirstElement(_preparedState->elemFound,
- _preparedState->arrayPreModSize,
- _startPosition,
- elem); }
- else {
- status = prevElem.addSiblingRight(elem);
- }
-
- if (!status.isOK()) {
- return status;
- }
-
- // For the next iteration the previous element will be the left sibling
- prevElem = elem;
- first = false;
- }
+ // For the next iteration the previous element will be the left sibling
+ prevElem = elem;
+ first = false;
}
- else {
- mutablebson::Element elem =
- _preparedState->doc.makeElementWithNewFieldName(StringData(), _val);
- if (!elem.ok()) {
- return Status(ErrorCodes::InternalError, "can't wrap element being $push-ed");
- }
- return pushFirstElement(_preparedState->elemFound,
- _preparedState->arrayPreModSize,
- _startPosition,
- elem);
- }
-
- // 3. Sort the resulting array, if $sort was requested.
- if (_sortPresent) {
- sortChildren(_preparedState->elemFound, _sort);
+ } else {
+ mutablebson::Element elem =
+ _preparedState->doc.makeElementWithNewFieldName(StringData(), _val);
+ if (!elem.ok()) {
+ return Status(ErrorCodes::InternalError, "can't wrap element being $push-ed");
}
+ return pushFirstElement(
+ _preparedState->elemFound, _preparedState->arrayPreModSize, _startPosition, elem);
+ }
- // 4. Trim the resulting array according to $slice, if present.
- if (_slicePresent) {
+ // 3. Sort the resulting array, if $sort was requested.
+ if (_sortPresent) {
+ sortChildren(_preparedState->elemFound, _sort);
+ }
- // Slice 0 means to remove all
- if (_slice == 0) {
- while(_preparedState->elemFound.ok() &&
- _preparedState->elemFound.rightChild().ok()) {
- _preparedState->elemFound.rightChild().remove();
- }
+ // 4. Trim the resulting array according to $slice, if present.
+ if (_slicePresent) {
+ // Slice 0 means to remove all
+ if (_slice == 0) {
+ while (_preparedState->elemFound.ok() && _preparedState->elemFound.rightChild().ok()) {
+ _preparedState->elemFound.rightChild().remove();
}
+ }
- const int64_t numChildren = mutablebson::countChildren(_preparedState->elemFound);
- int64_t countRemoved = std::max(static_cast<int64_t>(0), numChildren - abs(_slice));
+ const int64_t numChildren = mutablebson::countChildren(_preparedState->elemFound);
+ int64_t countRemoved = std::max(static_cast<int64_t>(0), numChildren - abs(_slice));
- // If _slice is negative, remove from the bottom, otherwise from the top
- const bool removeFromEnd = (_slice > 0);
+ // If _slice is negative, remove from the bottom, otherwise from the top
+ const bool removeFromEnd = (_slice > 0);
- // Either start at right or left depending if we are taking from top or bottom
- mutablebson::Element curr = removeFromEnd ?
- _preparedState->elemFound.rightChild() :
- _preparedState->elemFound.leftChild();
- while (curr.ok() && countRemoved > 0) {
- mutablebson::Element toRemove = curr;
- // Either go right or left depending if we are taking from top or bottom
- curr = removeFromEnd ? curr.leftSibling() : curr.rightSibling();
+ // Either start at right or left depending if we are taking from top or bottom
+ mutablebson::Element curr = removeFromEnd ? _preparedState->elemFound.rightChild()
+ : _preparedState->elemFound.leftChild();
+ while (curr.ok() && countRemoved > 0) {
+ mutablebson::Element toRemove = curr;
+ // Either go right or left depending if we are taking from top or bottom
+ curr = removeFromEnd ? curr.leftSibling() : curr.rightSibling();
- status = toRemove.remove();
- if (!status.isOK()) {
- return status;
- }
- countRemoved--;
+ status = toRemove.remove();
+ if (!status.isOK()) {
+ return status;
}
+ countRemoved--;
}
-
- return status;
}
- Status ModifierPush::log(LogBuilder* logBuilder) const {
+ return status;
+}
- // The start position to use for positional (ordinal) updates to the array
- // (We will increment as we append elements to the oplog entry so can't be const)
- size_t position = _preparedState->arrayPreModSize;
+Status ModifierPush::log(LogBuilder* logBuilder) const {
+ // The start position to use for positional (ordinal) updates to the array
+ // (We will increment as we append elements to the oplog entry so can't be const)
+ size_t position = _preparedState->arrayPreModSize;
- // NOTE: Idempotence Requirement
- // In the case that the document does't have an array or it is empty we need to make sure
- // that the first time the field gets filled with items that it is a full set of the array.
+ // NOTE: Idempotence Requirement
+ // In the case that the document does't have an array or it is empty we need to make sure
+ // that the first time the field gets filled with items that it is a full set of the array.
- // If we sorted, sliced, or added the first items to the array, make a full array copy.
- const bool doFullCopy = _slicePresent || _sortPresent
- || (position == 0) // first element in new/empty array
- || (_startPosition < _preparedState->arrayPreModSize); // add in middle
+ // If we sorted, sliced, or added the first items to the array, make a full array copy.
+ const bool doFullCopy = _slicePresent || _sortPresent ||
+ (position == 0) // first element in new/empty array
+ || (_startPosition < _preparedState->arrayPreModSize); // add in middle
- if (doFullCopy) {
- return logBuilder->addToSetsWithNewFieldName(_fieldRef.dottedField(),
- _preparedState->elemFound);
- }
- else {
- // Set only the positional elements appended
- if (_eachMode || _pushMode == PUSH_ALL) {
- // For each input element log it as a posisional $set
- BSONObjIterator itEach(_eachElem.embeddedObject());
- while (itEach.more()) {
- BSONElement eachItem = itEach.next();
- // value for the logElement ("field.path.name.N": <value>)
- const std::string positionalName =
- mongoutils::str::stream() << _fieldRef.dottedField() << "." << position++;
-
- Status s = logBuilder->addToSetsWithNewFieldName(positionalName, eachItem);
- if (!s.isOK())
- return s;
- }
+ if (doFullCopy) {
+ return logBuilder->addToSetsWithNewFieldName(_fieldRef.dottedField(),
+ _preparedState->elemFound);
+ } else {
+ // Set only the positional elements appended
+ if (_eachMode || _pushMode == PUSH_ALL) {
+ // For each input element log it as a posisional $set
+ BSONObjIterator itEach(_eachElem.embeddedObject());
+ while (itEach.more()) {
+ BSONElement eachItem = itEach.next();
+ // value for the logElement ("field.path.name.N": <value>)
+ const std::string positionalName = mongoutils::str::stream()
+ << _fieldRef.dottedField() << "." << position++;
- return Status::OK();
+ Status s = logBuilder->addToSetsWithNewFieldName(positionalName, eachItem);
+ if (!s.isOK())
+ return s;
}
- else {
- // single value for the logElement ("field.path.name.N": <value>)
- const std::string positionalName =
- mongoutils::str::stream() << _fieldRef.dottedField() << "." << position++;
- return logBuilder->addToSetsWithNewFieldName(positionalName, _val);
- }
+ return Status::OK();
+ } else {
+ // single value for the logElement ("field.path.name.N": <value>)
+ const std::string positionalName = mongoutils::str::stream() << _fieldRef.dottedField()
+ << "." << position++;
+
+ return logBuilder->addToSetsWithNewFieldName(positionalName, _val);
}
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_push.h b/src/mongo/db/ops/modifier_push.h
index 708c6431d97..df9c4b65db9 100644
--- a/src/mongo/db/ops/modifier_push.h
+++ b/src/mongo/db/ops/modifier_push.h
@@ -39,97 +39,91 @@
namespace mongo {
- class LogBuilder;
-
- class ModifierPush : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierPush);
-
- public:
-
- enum ModifierPushMode { PUSH_NORMAL, PUSH_ALL };
- explicit ModifierPush(ModifierPushMode mode= PUSH_NORMAL);
-
- //
- // Modifier interface implementation
- //
-
- virtual ~ModifierPush();
-
- /**
- * A 'modExpr' here is a BSONElement {<fieldname>: <each clause>, <slice clause>, <sort
- * clause>} coming from a $push mod such as {$set: {x: $each[{a:1}], $slice:3,
- * $sort{b:1}}}. init() extracts and validates the field name and the clauses. It
- * returns OK if successful or a status describing the error.
- *
- * There are currently a few restrictions concerning the clauses (but all can be
- * lifted):
- * + $slice can be negative only (ie, slicing from the recent end)
- * + $sort requires $slice to be present
- * + $sort can only sort objects (as opposed to basic types), so it only takes
- * object as patterns
- * + Because of the previous, $sort requires that the array being pushed to be made
- * of objects
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
-
- /**
- * Locates the array to be pushed into in the 'root', if it exists, and fills in
- * execInfo accordingly. Returns true if $push would succeed in 'root', otherwise
- * return a status describing the error.
- *
- * Note that a $push is never in-place. The cost of checking if it is a no-op makes it
- * so that we don't do such check either. As such, execInfo is always filled with
- * 'false' for those two options.
- */
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
-
- /**
- * Pushes the array into the prepared position and "sort/slice"s the resulting array
- * according to that call's instructions.
- */
- virtual Status apply() const;
-
- /**
- * $push currently logs the entire resulting array as a $set.
- *
- * TODO Log a positional $set in the array, whenever possible.
- */
- virtual Status log(LogBuilder* logBuilder) const;
-
- private:
-
- // Access to each component of fieldName that's the target of this mod.
- FieldRef _fieldRef;
-
- // 0 or index for $-positional in _fieldRef.
- size_t _posDollar;
-
- // Clauses for the $push that are filled when the $each variation of the command is used.
- bool _eachMode;
- BSONElement _eachElem;
- bool _slicePresent;
- int64_t _slice;
- bool _sortPresent;
- size_t _startPosition;
-
- PatternElementCmp _sort;
-
- // Whether this mod is supposed to be parsed as a $pushAll.
- const ModifierPushMode _pushMode;
-
- // Simple (old style) push value when the $each variation of the command is not
- // used. The _eachMode flag would be off if we're this mode.
- BSONElement _val;
-
- // The instance of the field in the provided doc. This state is valid after a
- // prepare() was issued and until a log() is issued. The document this mod is
- // being prepared against must be live throughout all the calls.
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
-
- };
-
-} // namespace mongo
+class LogBuilder;
+
+class ModifierPush : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierPush);
+
+public:
+ enum ModifierPushMode { PUSH_NORMAL, PUSH_ALL };
+ explicit ModifierPush(ModifierPushMode mode = PUSH_NORMAL);
+
+ //
+ // Modifier interface implementation
+ //
+
+ virtual ~ModifierPush();
+
+ /**
+ * A 'modExpr' here is a BSONElement {<fieldname>: <each clause>, <slice clause>, <sort
+ * clause>} coming from a $push mod such as {$set: {x: $each[{a:1}], $slice:3,
+ * $sort{b:1}}}. init() extracts and validates the field name and the clauses. It
+ * returns OK if successful or a status describing the error.
+ *
+ * There are currently a few restrictions concerning the clauses (but all can be
+ * lifted):
+ * + $slice can be negative only (ie, slicing from the recent end)
+ * + $sort requires $slice to be present
+ * + $sort can only sort objects (as opposed to basic types), so it only takes
+ * object as patterns
+ * + Because of the previous, $sort requires that the array being pushed to be made
+ * of objects
+ */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
+
+ /**
+ * Locates the array to be pushed into in the 'root', if it exists, and fills in
+ * execInfo accordingly. Returns true if $push would succeed in 'root', otherwise
+ * return a status describing the error.
+ *
+ * Note that a $push is never in-place. The cost of checking if it is a no-op makes it
+ * so that we don't do such check either. As such, execInfo is always filled with
+ * 'false' for those two options.
+ */
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
+
+ /**
+ * Pushes the array into the prepared position and "sort/slice"s the resulting array
+ * according to that call's instructions.
+ */
+ virtual Status apply() const;
+
+ /**
+ * $push currently logs the entire resulting array as a $set.
+ *
+ * TODO Log a positional $set in the array, whenever possible.
+ */
+ virtual Status log(LogBuilder* logBuilder) const;
+
+private:
+ // Access to each component of fieldName that's the target of this mod.
+ FieldRef _fieldRef;
+
+ // 0 or index for $-positional in _fieldRef.
+ size_t _posDollar;
+
+ // Clauses for the $push that are filled when the $each variation of the command is used.
+ bool _eachMode;
+ BSONElement _eachElem;
+ bool _slicePresent;
+ int64_t _slice;
+ bool _sortPresent;
+ size_t _startPosition;
+
+ PatternElementCmp _sort;
+
+ // Whether this mod is supposed to be parsed as a $pushAll.
+ const ModifierPushMode _pushMode;
+
+ // Simple (old style) push value when the $each variation of the command is not
+ // used. The _eachMode flag would be off if we're this mode.
+ BSONElement _val;
+
+ // The instance of the field in the provided doc. This state is valid after a
+ // prepare() was issued and until a log() is issued. The document this mod is
+ // being prepared against must be live throughout all the calls.
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_push_sorter.h b/src/mongo/db/ops/modifier_push_sorter.h
index 930ab539d57..c942f4e5da3 100644
--- a/src/mongo/db/ops/modifier_push_sorter.h
+++ b/src/mongo/db/ops/modifier_push_sorter.h
@@ -34,44 +34,37 @@
namespace mongo {
- // Extracts the value for 'pattern' for both 'lhs' and 'rhs' and return true if 'lhs' <
- // 'rhs'. We expect that both 'lhs' and 'rhs' be key patterns.
- struct PatternElementCmp {
- BSONObj sortPattern;
- bool useWholeValue;
+// Extracts the value for 'pattern' for both 'lhs' and 'rhs' and return true if 'lhs' <
+// 'rhs'. We expect that both 'lhs' and 'rhs' be key patterns.
+struct PatternElementCmp {
+ BSONObj sortPattern;
+ bool useWholeValue;
- PatternElementCmp()
- : sortPattern(BSONObj())
- , useWholeValue(true) {}
+ PatternElementCmp() : sortPattern(BSONObj()), useWholeValue(true) {}
- PatternElementCmp(const BSONObj& pattern)
- : sortPattern(pattern)
- , useWholeValue(pattern.hasField("")){
- }
+ PatternElementCmp(const BSONObj& pattern)
+ : sortPattern(pattern), useWholeValue(pattern.hasField("")) {}
- bool operator()(const mutablebson::Element& lhs, const mutablebson::Element& rhs) const {
- if (useWholeValue) {
- const int comparedValue = lhs.compareWithElement( rhs, false );
+ bool operator()(const mutablebson::Element& lhs, const mutablebson::Element& rhs) const {
+ if (useWholeValue) {
+ const int comparedValue = lhs.compareWithElement(rhs, false);
- const bool reversed = (sortPattern.firstElement().number() < 0 );
+ const bool reversed = (sortPattern.firstElement().number() < 0);
- return (reversed ? comparedValue > 0 : comparedValue < 0);
- }
- else {
- //TODO: Push on to mutable in the future, and to support non-contiguous Elements.
- BSONObj lhsObj = lhs.getType() == Object ?
- lhs.getValueObject() :
- lhs.getValue().wrap("");
- BSONObj rhsObj = rhs.getType() == Object ?
- rhs.getValueObject() :
- rhs.getValue().wrap("");
+ return (reversed ? comparedValue > 0 : comparedValue < 0);
+ } else {
+ // TODO: Push on to mutable in the future, and to support non-contiguous Elements.
+ BSONObj lhsObj =
+ lhs.getType() == Object ? lhs.getValueObject() : lhs.getValue().wrap("");
+ BSONObj rhsObj =
+ rhs.getType() == Object ? rhs.getValueObject() : rhs.getValue().wrap("");
- BSONObj lhsKey = lhsObj.extractFields(sortPattern, true);
- BSONObj rhsKey = rhsObj.extractFields(sortPattern, true);
+ BSONObj lhsKey = lhsObj.extractFields(sortPattern, true);
+ BSONObj rhsKey = rhsObj.extractFields(sortPattern, true);
- return lhsKey.woCompare(rhsKey, sortPattern) < 0;
- }
+ return lhsKey.woCompare(rhsKey, sortPattern) < 0;
}
- };
+ }
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_push_sorter_test.cpp b/src/mongo/db/ops/modifier_push_sorter_test.cpp
index 4df1c4dfadc..d60eea1e942 100644
--- a/src/mongo/db/ops/modifier_push_sorter_test.cpp
+++ b/src/mongo/db/ops/modifier_push_sorter_test.cpp
@@ -37,143 +37,143 @@
namespace {
- using mongo::BSONObj;
- using mongo::fromjson;
- using mongo::PatternElementCmp;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
- using mongo::mutablebson::sortChildren;
-
- class ObjectArray : public mongo::unittest::Test {
- public:
- ObjectArray() : _doc(), _size(0) {}
-
- virtual void setUp() {
- Element arr = _doc.makeElementArray("x");
- ASSERT_TRUE(arr.ok());
- ASSERT_OK(_doc.root().pushBack(arr));
- }
-
- void addObj(BSONObj obj) {
- ASSERT_LESS_THAN_OR_EQUALS(_size, 3u);
- _objs[_size] = obj;
- _size++;
-
- ASSERT_OK(_doc.root()["x"].appendObject(mongo::StringData(), obj));
- }
-
- BSONObj getOrigObj(size_t i) {
- return _objs[i];
- }
-
- BSONObj getSortedObj(size_t i) {
- return getArray()[i].getValueObject();
- }
-
- Element getArray() {
- return _doc.root()["x"];
- }
-
- private:
- Document _doc;
- BSONObj _objs[3];
- size_t _size;
- };
-
- TEST_F(ObjectArray, NormalOrder) {
- addObj(fromjson("{b:1, a:1}"));
- addObj(fromjson("{a:3, b:2}"));
- addObj(fromjson("{b:3, a:2}"));
-
- sortChildren(getArray(), PatternElementCmp(fromjson("{'a':1,'b':1}")));
-
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(2));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
+using mongo::BSONObj;
+using mongo::fromjson;
+using mongo::PatternElementCmp;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
+using mongo::mutablebson::sortChildren;
+
+class ObjectArray : public mongo::unittest::Test {
+public:
+ ObjectArray() : _doc(), _size(0) {}
+
+ virtual void setUp() {
+ Element arr = _doc.makeElementArray("x");
+ ASSERT_TRUE(arr.ok());
+ ASSERT_OK(_doc.root().pushBack(arr));
}
- TEST_F(ObjectArray, MixedOrder) {
- addObj(fromjson("{b:1, a:1}"));
- addObj(fromjson("{a:3, b:2}"));
- addObj(fromjson("{b:3, a:2}"));
+ void addObj(BSONObj obj) {
+ ASSERT_LESS_THAN_OR_EQUALS(_size, 3u);
+ _objs[_size] = obj;
+ _size++;
- sortChildren(getArray(), PatternElementCmp(fromjson("{b:1,a:-1}")));
-
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(1));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(2));
+ ASSERT_OK(_doc.root()["x"].appendObject(mongo::StringData(), obj));
}
- TEST_F(ObjectArray, ExtraFields) {
- addObj(fromjson("{b:1, c:2, a:1}"));
- addObj(fromjson("{c:1, a:3, b:2}"));
- addObj(fromjson("{b:3, a:2}"));
+ BSONObj getOrigObj(size_t i) {
+ return _objs[i];
+ }
- sortChildren(getArray(), PatternElementCmp(fromjson("{a:1,b:1}")));
+ BSONObj getSortedObj(size_t i) {
+ return getArray()[i].getValueObject();
+ }
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(2));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
+ Element getArray() {
+ return _doc.root()["x"];
}
- TEST_F(ObjectArray, MissingFields) {
- addObj(fromjson("{a:2, b:2}"));
- addObj(fromjson("{a:1}"));
- addObj(fromjson("{a:3, b:3, c:3}"));
+private:
+ Document _doc;
+ BSONObj _objs[3];
+ size_t _size;
+};
- sortChildren(getArray(), PatternElementCmp(fromjson("{b:1,c:1}")));
+TEST_F(ObjectArray, NormalOrder) {
+ addObj(fromjson("{b:1, a:1}"));
+ addObj(fromjson("{a:3, b:2}"));
+ addObj(fromjson("{b:3, a:2}"));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(1));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(2));
- }
+ sortChildren(getArray(), PatternElementCmp(fromjson("{'a':1,'b':1}")));
- TEST_F(ObjectArray, NestedFields) {
- addObj(fromjson("{a:{b:{c:2, d:0}}}"));
- addObj(fromjson("{a:{b:{c:1, d:2}}}"));
- addObj(fromjson("{a:{b:{c:3, d:1}}}"));
+ ASSERT_EQUALS(getOrigObj(0), getSortedObj(0));
+ ASSERT_EQUALS(getOrigObj(1), getSortedObj(2));
+ ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
+}
- sortChildren(getArray(), PatternElementCmp(fromjson("{'a.b':1}")));
+TEST_F(ObjectArray, MixedOrder) {
+ addObj(fromjson("{b:1, a:1}"));
+ addObj(fromjson("{a:3, b:2}"));
+ addObj(fromjson("{b:3, a:2}"));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(1));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(2));
- }
+ sortChildren(getArray(), PatternElementCmp(fromjson("{b:1,a:-1}")));
- TEST_F(ObjectArray, SimpleNestedFields) {
- addObj(fromjson("{a:{b: -1}}"));
- addObj(fromjson("{a:{b: -100}}"));
- addObj(fromjson("{a:{b: 34}}"));
+ ASSERT_EQUALS(getOrigObj(0), getSortedObj(0));
+ ASSERT_EQUALS(getOrigObj(1), getSortedObj(1));
+ ASSERT_EQUALS(getOrigObj(2), getSortedObj(2));
+}
- sortChildren(getArray(), PatternElementCmp(fromjson("{'a.b':1}")));
+TEST_F(ObjectArray, ExtraFields) {
+ addObj(fromjson("{b:1, c:2, a:1}"));
+ addObj(fromjson("{c:1, a:3, b:2}"));
+ addObj(fromjson("{b:3, a:2}"));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(1));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(2));
- }
+ sortChildren(getArray(), PatternElementCmp(fromjson("{a:1,b:1}")));
- TEST_F(ObjectArray, NestedInnerObjectDescending) {
- addObj(fromjson("{a:{b:{c:2, d:0}}}"));
- addObj(fromjson("{a:{b:{c:1, d:2}}}"));
- addObj(fromjson("{a:{b:{c:3, d:1}}}"));
+ ASSERT_EQUALS(getOrigObj(0), getSortedObj(0));
+ ASSERT_EQUALS(getOrigObj(1), getSortedObj(2));
+ ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
+}
- sortChildren(getArray(), PatternElementCmp(fromjson("{'a.b.d':-1}")));
+TEST_F(ObjectArray, MissingFields) {
+ addObj(fromjson("{a:2, b:2}"));
+ addObj(fromjson("{a:1}"));
+ addObj(fromjson("{a:3, b:3, c:3}"));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(2));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
- }
+ sortChildren(getArray(), PatternElementCmp(fromjson("{b:1,c:1}")));
- TEST_F(ObjectArray, NestedInnerObjectAscending) {
- addObj(fromjson("{a:{b:{c:2, d:0}}}"));
- addObj(fromjson("{a:{b:{c:1, d:2}}}"));
- addObj(fromjson("{a:{b:{c:3, d:1}}}"));
+ ASSERT_EQUALS(getOrigObj(0), getSortedObj(1));
+ ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
+ ASSERT_EQUALS(getOrigObj(2), getSortedObj(2));
+}
- sortChildren(getArray(), PatternElementCmp(fromjson("{'a.b.d':1}")));
+TEST_F(ObjectArray, NestedFields) {
+ addObj(fromjson("{a:{b:{c:2, d:0}}}"));
+ addObj(fromjson("{a:{b:{c:1, d:2}}}"));
+ addObj(fromjson("{a:{b:{c:3, d:1}}}"));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(2));
- }
+ sortChildren(getArray(), PatternElementCmp(fromjson("{'a.b':1}")));
+
+ ASSERT_EQUALS(getOrigObj(0), getSortedObj(1));
+ ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
+ ASSERT_EQUALS(getOrigObj(2), getSortedObj(2));
+}
+
+TEST_F(ObjectArray, SimpleNestedFields) {
+ addObj(fromjson("{a:{b: -1}}"));
+ addObj(fromjson("{a:{b: -100}}"));
+ addObj(fromjson("{a:{b: 34}}"));
+
+ sortChildren(getArray(), PatternElementCmp(fromjson("{'a.b':1}")));
+
+ ASSERT_EQUALS(getOrigObj(0), getSortedObj(1));
+ ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
+ ASSERT_EQUALS(getOrigObj(2), getSortedObj(2));
+}
+
+TEST_F(ObjectArray, NestedInnerObjectDescending) {
+ addObj(fromjson("{a:{b:{c:2, d:0}}}"));
+ addObj(fromjson("{a:{b:{c:1, d:2}}}"));
+ addObj(fromjson("{a:{b:{c:3, d:1}}}"));
+
+ sortChildren(getArray(), PatternElementCmp(fromjson("{'a.b.d':-1}")));
+
+ ASSERT_EQUALS(getOrigObj(0), getSortedObj(2));
+ ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
+ ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
+}
+
+TEST_F(ObjectArray, NestedInnerObjectAscending) {
+ addObj(fromjson("{a:{b:{c:2, d:0}}}"));
+ addObj(fromjson("{a:{b:{c:1, d:2}}}"));
+ addObj(fromjson("{a:{b:{c:3, d:1}}}"));
+
+ sortChildren(getArray(), PatternElementCmp(fromjson("{'a.b.d':1}")));
+
+ ASSERT_EQUALS(getOrigObj(0), getSortedObj(0));
+ ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
+ ASSERT_EQUALS(getOrigObj(1), getSortedObj(2));
+}
-} // unnamed namespace
+} // unnamed namespace
diff --git a/src/mongo/db/ops/modifier_push_test.cpp b/src/mongo/db/ops/modifier_push_test.cpp
index 884566d48ac..275e1a4945e 100644
--- a/src/mongo/db/ops/modifier_push_test.cpp
+++ b/src/mongo/db/ops/modifier_push_test.cpp
@@ -47,1409 +47,1394 @@
namespace {
- using mongo::BSONObj;
- using mongo::BSONObjBuilder;
- using mongo::BSONArrayBuilder;
- using mongo::fromjson;
- using mongo::LogBuilder;
- using mongo::ModifierInterface;
- using mongo::ModifierPush;
- using mongo::NumberInt;
- using mongo::Ordering;
- using mongo::Status;
- using mongo::StringData;
- using mongo::mutablebson::ConstElement;
- using mongo::mutablebson::countChildren;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
- using std::sort;
- using std::vector;
-
- void combineVec(const vector<int>& origVec,
- const vector<int>& modVec,
- int32_t slice,
- vector<int>* combined) {
-
- using namespace std;
- combined->clear();
-
- // Slice 0 means the result is empty
- if (slice == 0)
- return;
-
- // Combine both vectors
- *combined = origVec;
- combined->insert(combined->end(), modVec.begin(), modVec.end());
-
- // Remove sliced items
- bool removeFromFront = (slice < 0);
-
- // if abs(slice) is larger than the size, nothing to do.
- if (abs(slice) >= int32_t(combined->size()))
- return;
-
- if (removeFromFront) {
- // Slice is negative.
- int32_t removeCount = combined->size() + slice;
- combined->erase(combined->begin(), combined->begin() + removeCount);
- }
- else {
- combined->resize(std::min(combined->size(), size_t(slice)));
- }
- }
-
- /**
- * Comparator between two BSONObjects that takes in consideration only the keys and
- * direction described in the sort pattern.
- */
- struct ProjectKeyCmp {
- BSONObj sortPattern;
- bool useWholeValue;
-
- ProjectKeyCmp(BSONObj pattern) : sortPattern(pattern) {
- useWholeValue = pattern.hasField("");
- }
-
- int operator()(const BSONObj& left, const BSONObj& right) const {
- int ret = 0;
- if (useWholeValue) {
- ret = left.woCompare( right, Ordering::make(sortPattern), false );
- } else {
- BSONObj lhsKey = left.extractFields(sortPattern, true);
- BSONObj rhsKey = right.extractFields(sortPattern, true);
- ret = lhsKey.woCompare(rhsKey, sortPattern);
- }
- return ret < 0;
- }
- };
-
- void combineAndSortVec(const vector<BSONObj>& origVec,
- const vector<BSONObj>& modVec,
- int32_t slice,
- BSONObj sortOrder,
- vector<BSONObj>* combined) {
-
- combined->clear();
+using mongo::BSONObj;
+using mongo::BSONObjBuilder;
+using mongo::BSONArrayBuilder;
+using mongo::fromjson;
+using mongo::LogBuilder;
+using mongo::ModifierInterface;
+using mongo::ModifierPush;
+using mongo::NumberInt;
+using mongo::Ordering;
+using mongo::Status;
+using mongo::StringData;
+using mongo::mutablebson::ConstElement;
+using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
+using std::sort;
+using std::vector;
+
+void combineVec(const vector<int>& origVec,
+ const vector<int>& modVec,
+ int32_t slice,
+ vector<int>* combined) {
+ using namespace std;
+ combined->clear();
+
+ // Slice 0 means the result is empty
+ if (slice == 0)
+ return;
+
+ // Combine both vectors
+ *combined = origVec;
+ combined->insert(combined->end(), modVec.begin(), modVec.end());
+
+ // Remove sliced items
+ bool removeFromFront = (slice < 0);
+
+ // if abs(slice) is larger than the size, nothing to do.
+ if (abs(slice) >= int32_t(combined->size()))
+ return;
+
+ if (removeFromFront) {
+ // Slice is negative.
+ int32_t removeCount = combined->size() + slice;
+ combined->erase(combined->begin(), combined->begin() + removeCount);
+ } else {
+ combined->resize(std::min(combined->size(), size_t(slice)));
+ }
+}
- // Slice 0 means the result is empty
- if (slice == 0)
- return;
-
- *combined = origVec;
- combined->insert(combined->end(), modVec.begin(), modVec.end());
-
- sort(combined->begin(), combined->end(), ProjectKeyCmp(sortOrder));
-
- // Remove sliced items
- bool removeFromFront = (slice < 0);
-
- // if abs(slice) is larger than the size, nothing to do.
- if (abs(slice) >= int32_t(combined->size()))
- return;
-
- if (removeFromFront) {
- // Slice is negative.
- int32_t removeCount = combined->size() + slice;
- combined->erase(combined->begin(), combined->begin() + removeCount);
- }
- else {
- combined->resize(std::min(combined->size(), size_t(slice)));
+/**
+ * Comparator between two BSONObjects that takes in consideration only the keys and
+ * direction described in the sort pattern.
+ */
+struct ProjectKeyCmp {
+ BSONObj sortPattern;
+ bool useWholeValue;
+
+ ProjectKeyCmp(BSONObj pattern) : sortPattern(pattern) {
+ useWholeValue = pattern.hasField("");
+ }
+
+ int operator()(const BSONObj& left, const BSONObj& right) const {
+ int ret = 0;
+ if (useWholeValue) {
+ ret = left.woCompare(right, Ordering::make(sortPattern), false);
+ } else {
+ BSONObj lhsKey = left.extractFields(sortPattern, true);
+ BSONObj rhsKey = right.extractFields(sortPattern, true);
+ ret = lhsKey.woCompare(rhsKey, sortPattern);
}
- }
-
- //
- // Init testing (module field checking, which is done in 'fieldchecker'
- //
-
- TEST(Init, SimplePush) {
- BSONObj modObj = fromjson("{$push: {x: 0}}");
- ModifierPush mod;
- ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ return ret < 0;
+ }
+};
+
+void combineAndSortVec(const vector<BSONObj>& origVec,
+ const vector<BSONObj>& modVec,
+ int32_t slice,
+ BSONObj sortOrder,
+ vector<BSONObj>* combined) {
+ combined->clear();
+
+ // Slice 0 means the result is empty
+ if (slice == 0)
+ return;
+
+ *combined = origVec;
+ combined->insert(combined->end(), modVec.begin(), modVec.end());
+
+ sort(combined->begin(), combined->end(), ProjectKeyCmp(sortOrder));
+
+ // Remove sliced items
+ bool removeFromFront = (slice < 0);
+
+ // if abs(slice) is larger than the size, nothing to do.
+ if (abs(slice) >= int32_t(combined->size()))
+ return;
+
+ if (removeFromFront) {
+ // Slice is negative.
+ int32_t removeCount = combined->size() + slice;
+ combined->erase(combined->begin(), combined->begin() + removeCount);
+ } else {
+ combined->resize(std::min(combined->size(), size_t(slice)));
+ }
+}
+
+//
+// Init testing (module field checking, which is done in 'fieldchecker'
+//
+
+TEST(Init, SimplePush) {
+ BSONObj modObj = fromjson("{$push: {x: 0}}");
+ ModifierPush mod;
+ ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+//
+// If present, is the $each clause valid?
+//
+
+TEST(Init, PushEachNormal) {
+ BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2]}}}");
+ ModifierPush mod;
+ ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushEachMixed) {
+ BSONObj modObj = fromjson("{$push: {x: {$each: [1, {a: 2}]}}}");
+ ModifierPush mod;
+ ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushEachObject) {
+ // $each must be an array
+ BSONObj modObj = fromjson("{$push: {x: {$each: {'0': 1}}}}");
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- }
+}
- //
- // If present, is the $each clause valid?
- //
-
- TEST(Init, PushEachNormal) {
- BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2]}}}");
- ModifierPush mod;
- ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+TEST(Init, PushEachSimpleType) {
+ // $each must be an array.
+ BSONObj modObj = fromjson("{$push: {x: {$each: 1}}}");
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachMixed) {
- BSONObj modObj = fromjson("{$push: {x: {$each: [1, {a: 2}]}}}");
- ModifierPush mod;
- ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+}
+
+TEST(Init, PushEachEmpty) {
+ BSONObj modObj = fromjson("{$push: {x: {$each: []}}}");
+ ModifierPush mod;
+ ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushEachInvalidType) {
+ // $each must be an array.
+ BSONObj modObj = fromjson("{$push: {x: {$each: {b: 1}}}}");
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachObject) {
- // $each must be an array
- BSONObj modObj = fromjson("{$push: {x: {$each: {'0': 1}}}}");
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachSimpleType) {
- // $each must be an array.
- BSONObj modObj = fromjson("{$push: {x: {$each: 1}}}");
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachEmpty) {
- BSONObj modObj = fromjson("{$push: {x: {$each: []}}}");
- ModifierPush mod;
- ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+}
+
+//
+// If present, is the $slice clause valid?
+//
+
+TEST(Init, PushEachWithSliceBottom) {
+ BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: -3}}}");
+ ModifierPush mod;
+ ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushEachWithSliceTop) {
+ BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: 3}}}");
+ ModifierPush mod;
+ ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushEachWithInvalidSliceObject) {
+ BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: {a: 1}}}}");
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachInvalidType) {
- // $each must be an array.
- BSONObj modObj = fromjson("{$push: {x: {$each: {b: 1}}}}");
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+}
- //
- // If present, is the $slice clause valid?
- //
-
- TEST(Init, PushEachWithSliceBottom) {
- BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: -3}}}");
- ModifierPush mod;
- ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+TEST(Init, PushEachWithInvalidSliceDouble) {
+ BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: -2.1}}}");
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachWithSliceTop) {
- BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: 3}}}");
- ModifierPush mod;
- ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachWithInvalidSliceObject) {
- BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: {a: 1}}}}");
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachWithInvalidSliceDouble) {
- BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: -2.1}}}");
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachWithValidSliceDouble) {
- BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: -2.0}}}");
- ModifierPush mod;
- ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+}
+
+TEST(Init, PushEachWithValidSliceDouble) {
+ BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: -2.0}}}");
+ ModifierPush mod;
+ ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushEachWithUnsupportedFullSlice) {
+ BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: [1,2]}}}");
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- }
+}
- TEST(Init, PushEachWithUnsupportedFullSlice) {
- BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: [1,2]}}}");
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachWithWrongTypeSlice) {
- BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: '-1'}}}");
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- //
- // If present, is the sort $sort clause valid?
- //
-
- TEST(Init, PushEachWithObjectSort) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {a:1}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+TEST(Init, PushEachWithWrongTypeSlice) {
+ BSONObj modObj = fromjson("{$push: {x: {$each: [1, 2], $slice: '-1'}}}");
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachWithNumbericSort) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort:1 }}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachWithInvalidSortType) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: [{a:1}]}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachDuplicateSortPattern) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: [{a:1,a:1}]}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(Init, PushEachWithInvalidSortValue) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {a:100}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+}
+
+//
+// If present, is the sort $sort clause valid?
+//
+
+TEST(Init, PushEachWithObjectSort) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {a:1}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushEachWithNumbericSort) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort:1 }}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushEachWithInvalidSortType) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: [{a:1}]}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(Init, PushEachWithEmptySortField) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {'':1}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+TEST(Init, PushEachDuplicateSortPattern) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: [{a:1,a:1}]}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(Init, PushEachWithEmptyDottedSortField) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {'.':1}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+TEST(Init, PushEachWithInvalidSortValue) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {a:100}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(Init, PushEachWithMissingSortFieldSuffix) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {'a.':1}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+TEST(Init, PushEachWithEmptySortField) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {'':1}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(Init, PushEachWithMissingSortFieldPreffix) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {'.b':1}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+TEST(Init, PushEachWithEmptyDottedSortField) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {'.':1}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(Init, PushEachWithMissingSortFieldMiddle) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {'a..b':1}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+TEST(Init, PushEachWithMissingSortFieldSuffix) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {'a.':1}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(Init, PushEachWithEmptySort) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort:{} }}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+TEST(Init, PushEachWithMissingSortFieldPreffix) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {'.b':1}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- //
- // If in $pushAll semantics, do we check the array and that nothing else is there?
- //
+TEST(Init, PushEachWithMissingSortFieldMiddle) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {'a..b':1}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(Init, PushAllSimple) {
- BSONObj modObj = fromjson("{$pushAll: {x: [0]}}");
- ModifierPush mod(ModifierPush::PUSH_ALL);
- ASSERT_OK(mod.init(modObj["$pushAll"].embeddedObject().firstElement(),
+TEST(Init, PushEachWithEmptySort) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort:{} }}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- }
+}
+
+//
+// If in $pushAll semantics, do we check the array and that nothing else is there?
+//
+
+TEST(Init, PushAllSimple) {
+ BSONObj modObj = fromjson("{$pushAll: {x: [0]}}");
+ ModifierPush mod(ModifierPush::PUSH_ALL);
+ ASSERT_OK(mod.init(modObj["$pushAll"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushAllMultiple) {
+ BSONObj modObj = fromjson("{$pushAll: {x: [1,2,3]}}");
+ ModifierPush mod(ModifierPush::PUSH_ALL);
+ ASSERT_OK(mod.init(modObj["$pushAll"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushAllObject) {
+ BSONObj modObj = fromjson("{$pushAll: {x: [{a:1},{a:2}]}}");
+ ModifierPush mod(ModifierPush::PUSH_ALL);
+ ASSERT_OK(mod.init(modObj["$pushAll"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushAllMixed) {
+ BSONObj modObj = fromjson("{$pushAll: {x: [1,{a:2}]}}");
+ ModifierPush mod(ModifierPush::PUSH_ALL);
+ ASSERT_OK(mod.init(modObj["$pushAll"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushAllWrongType) {
+ BSONObj modObj = fromjson("{$pushAll: {x: 1}}");
+ ModifierPush mod(ModifierPush::PUSH_ALL);
+ ASSERT_NOT_OK(mod.init(modObj["$pushAll"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(Init, PushAllMultiple) {
- BSONObj modObj = fromjson("{$pushAll: {x: [1,2,3]}}");
- ModifierPush mod(ModifierPush::PUSH_ALL);
- ASSERT_OK(mod.init(modObj["$pushAll"].embeddedObject().firstElement(),
+TEST(Init, PushAllNotArray) {
+ BSONObj modObj = fromjson("{$pushAll: {x: {a:1}}}");
+ ModifierPush mod(ModifierPush::PUSH_ALL);
+ ASSERT_NOT_OK(mod.init(modObj["$pushAll"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- }
+}
+
+//
+// Are all clauses present? Is anything extroneous? Is anything duplicated?
+//
+
+TEST(Init, PushEachWithSortMissingSlice) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $sort:{a:1}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
+
+TEST(Init, PushEachInvalidClause) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $xxx: -1, $sort:{a:1}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(Init, PushAllObject) {
- BSONObj modObj = fromjson("{$pushAll: {x: [{a:1},{a:2}]}}");
- ModifierPush mod(ModifierPush::PUSH_ALL);
- ASSERT_OK(mod.init(modObj["$pushAll"].embeddedObject().firstElement(),
+TEST(Init, PushEachExtraField) {
+ const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {a:1}, b: 1}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- }
+}
- TEST(Init, PushAllMixed) {
- BSONObj modObj = fromjson("{$pushAll: {x: [1,{a:2}]}}");
- ModifierPush mod(ModifierPush::PUSH_ALL);
- ASSERT_OK(mod.init(modObj["$pushAll"].embeddedObject().firstElement(),
+TEST(Init, PushEachDuplicateSortClause) {
+ const char* c = "{$push: {x:{$each:[{a:1},{a:2}], $slice:-2.0, $sort:{a:1}, $sort:{a:1}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
- }
+}
- TEST(Init, PushAllWrongType) {
- BSONObj modObj = fromjson("{$pushAll: {x: 1}}");
- ModifierPush mod(ModifierPush::PUSH_ALL);
- ASSERT_NOT_OK(mod.init(modObj["$pushAll"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+TEST(Init, PushEachDuplicateSliceClause) {
+ const char* c = "{$push: {x: {$each:[{a:1},{a:2}], $slice:-2.0, $slice:-2, $sort:{a:1}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(Init, PushAllNotArray) {
- BSONObj modObj = fromjson("{$pushAll: {x: {a:1}}}");
- ModifierPush mod(ModifierPush::PUSH_ALL);
- ASSERT_NOT_OK(mod.init(modObj["$pushAll"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+TEST(Init, PushEachDuplicateEachClause) {
+ const char* c = "{$push: {x: {$each:[{a:1}], $each:[{a:2}], $slice:-3, $sort:{a:1}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- //
- // Are all clauses present? Is anything extroneous? Is anything duplicated?
- //
+TEST(Init, PushEachWithSliceFirst) {
+ const char* c = "{$push: {x: {$slice: -2.0, $each: [{a:1},{a:2}], $sort: {a:1}}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(Init, PushEachWithSortMissingSlice) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $sort:{a:1}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+TEST(Init, PushEachWithSortFirst) {
+ const char* c = "{$push: {x: {$sort: {a:1}, $slice: -2.0, $each: [{a:1},{a:2}]}}}";
+ BSONObj modObj = fromjson(c);
+ ModifierPush mod;
+ ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- TEST(Init, PushEachInvalidClause) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $xxx: -1, $sort:{a:1}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+//
+// Simple mod
+//
- TEST(Init, PushEachExtraField) {
- const char* c = "{$push: {x: {$each: [{a:1},{a:2}], $slice: -2.0, $sort: {a:1}, b: 1}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
+/** Helper to build and manipulate a $push or a $pushAll mod. */
+class Mod {
+public:
+ Mod() : _mod() {}
- TEST(Init, PushEachDuplicateSortClause) {
- const char* c = "{$push: {x:{$each:[{a:1},{a:2}], $slice:-2.0, $sort:{a:1}, $sort:{a:1}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+ explicit Mod(BSONObj modObj)
+ : _mod(mongoutils::str::equals(modObj.firstElement().fieldName(), "$pushAll")
+ ? ModifierPush::PUSH_ALL
+ : ModifierPush::PUSH_NORMAL) {
+ _modObj = modObj;
+ StringData modName = modObj.firstElement().fieldName();
+ ASSERT_OK(_mod.init(_modObj[modName].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- TEST(Init, PushEachDuplicateSliceClause) {
- const char* c = "{$push: {x: {$each:[{a:1},{a:2}], $slice:-2.0, $slice:-2, $sort:{a:1}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
}
- TEST(Init, PushEachDuplicateEachClause) {
- const char* c = "{$push: {x: {$each:[{a:1}], $each:[{a:2}], $slice:-3, $sort:{a:1}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_NOT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+ Status apply() const {
+ return _mod.apply();
}
- TEST(Init, PushEachWithSliceFirst) {
- const char* c = "{$push: {x: {$slice: -2.0, $each: [{a:1},{a:2}], $sort: {a:1}}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
}
- TEST(Init, PushEachWithSortFirst) {
- const char* c = "{$push: {x: {$sort: {a:1}, $slice: -2.0, $each: [{a:1},{a:2}]}}}";
- BSONObj modObj = fromjson(c);
- ModifierPush mod;
- ASSERT_OK(mod.init(modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
+ ModifierPush& mod() {
+ return _mod;
}
- //
- // Simple mod
- //
-
- /** Helper to build and manipulate a $push or a $pushAll mod. */
- class Mod {
- public:
- Mod() : _mod() {}
-
- explicit Mod(BSONObj modObj)
- : _mod(mongoutils::str::equals(modObj.firstElement().fieldName(), "$pushAll") ?
- ModifierPush::PUSH_ALL : ModifierPush::PUSH_NORMAL) {
- _modObj = modObj;
- StringData modName = modObj.firstElement().fieldName();
- ASSERT_OK(_mod.init(_modObj[modName].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
-
- Status apply() const {
- return _mod.apply();
- }
+private:
+ ModifierPush _mod;
+ BSONObj _modObj;
+};
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
+TEST(SimpleMod, PrepareNonArray) {
+ Document doc(fromjson("{a: 1}"));
+ Mod pushMod(fromjson("{$push: {a: 1}}"));
- ModifierPush& mod() { return _mod; }
+ ModifierInterface::ExecInfo dummy;
+ ASSERT_NOT_OK(pushMod.prepare(doc.root(), "", &dummy));
+}
- private:
- ModifierPush _mod;
- BSONObj _modObj;
- };
+TEST(SimpleMod, PrepareApplyEmpty) {
+ Document doc(fromjson("{a: []}"));
+ Mod pushMod(fromjson("{$push: {a: 1}}"));
- TEST(SimpleMod, PrepareNonArray) {
- Document doc(fromjson("{a: 1}"));
- Mod pushMod(fromjson("{$push: {a: 1}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo dummy;
- ASSERT_NOT_OK(pushMod.prepare(doc.root(), "", &dummy));
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(SimpleMod, PrepareApplyEmpty) {
- Document doc(fromjson("{a: []}"));
- Mod pushMod(fromjson("{$push: {a: 1}}"));
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {a: [1]}}"), logDoc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(SimpleMod, PrepareApplyInexistent) {
+ Document doc(fromjson("{}"));
+ Mod pushMod(fromjson("{$push: {a: 1}}"));
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {a: [1]}}"), logDoc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(SimpleMod, PrepareApplyInexistent) {
- Document doc(fromjson("{}"));
- Mod pushMod(fromjson("{$push: {a: 1}}"));
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {a: [1]}}"), logDoc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(SimpleMod, PrepareApplyNormal) {
+ Document doc(fromjson("{a: [0]}"));
+ Mod pushMod(fromjson("{$push: {a: 1}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {a: [1]}}"), logDoc);
- }
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [0,1]}"), doc);
- TEST(SimpleMod, PrepareApplyNormal) {
- Document doc(fromjson("{a: [0]}"));
- Mod pushMod(fromjson("{$push: {a: 1}}"));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'a.1':1}}"), logDoc);
+}
+
+//
+// Simple object mod
+//
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+TEST(SimpleObjMod, PrepareNonArray) {
+ Document doc(fromjson("{a: 1}"));
+ Mod pushMod(fromjson("{$push: {a: {b: 1}}}"));
+
+ ModifierInterface::ExecInfo dummy;
+ ASSERT_NOT_OK(pushMod.prepare(doc.root(), "", &dummy));
+}
+
+TEST(SimpleObjMod, PrepareApplyEmpty) {
+ Document doc(fromjson("{a: []}"));
+ Mod pushMod(fromjson("{$push: {a: {b: 1}}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [{b:1}]}"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {a: [{b:1}]}}"), logDoc);
+}
+
+TEST(SimpleObjMod, PrepareApplyInexistent) {
+ Document doc(fromjson("{}"));
+ Mod pushMod(fromjson("{$push: {a: {b: 1}}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [{b:1}]}"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {a: [{b:1}]}}"), logDoc);
+}
+
+TEST(SimpleObjMod, PrepareApplyNormal) {
+ Document doc(fromjson("{a: [{b:0}]}"));
+ Mod pushMod(fromjson("{$push: {a: {b: 1}}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [{b:0},{b:1}]}"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'a.1':{b:1}}}"), logDoc);
+}
+
+TEST(SimpleObjMod, PrepareApplyDotted) {
+ Document doc(fromjson(
+ "{ _id : 1 , "
+ " question : 'a', "
+ " choices : { "
+ " first : { choice : 'b' }, "
+ " second : { choice : 'c' } }"
+ "}"));
+ Mod pushMod(fromjson("{$push: {'choices.first.votes': 1}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "choices.first.votes");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson(
+ "{ _id : 1 , "
+ " question : 'a', "
+ " choices : { "
+ " first : { choice : 'b', votes: [1]}, "
+ " second : { choice : 'c' } }"
+ "}"),
+ doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'choices.first.votes':[1]}}"), logDoc);
+}
+
+
+//
+// $pushAll Variation
+//
+
+TEST(PushAll, PrepareApplyEmpty) {
+ Document doc(fromjson("{a: []}"));
+ Mod pushMod(fromjson("{$pushAll: {a: [1]}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{a: [1]}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(logDoc, fromjson("{$set: {a: [1]}}"));
+}
+
+TEST(PushAll, PrepareApplyInexistent) {
+ Document doc(fromjson("{}"));
+ Mod pushMod(fromjson("{$pushAll: {a: [1]}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{a: [1]}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(logDoc, fromjson("{$set: {a: [1]}}"));
+}
+
+TEST(PushAll, PrepareApplyNormal) {
+ Document doc(fromjson("{a: [0]}"));
+ Mod pushMod(fromjson("{$pushAll: {a: [1,2]}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{a: [0,1,2]}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(logDoc, fromjson("{$set: {'a.1': 1, 'a.2':2}}"));
+}
+
+//
+// Simple $each mod
+//
+
+TEST(SimpleEachMod, PrepareNonArray) {
+ Document doc(fromjson("{a: 1}"));
+ Mod pushMod(fromjson("{$push: {a: {$each: [1]}}}"));
+
+ ModifierInterface::ExecInfo dummy;
+ ASSERT_NOT_OK(pushMod.prepare(doc.root(), "", &dummy));
+}
+
+TEST(SimpleEachMod, PrepareApplyEmpty) {
+ Document doc(fromjson("{a: []}"));
+ Mod pushMod(fromjson("{$push: {a: {$each: [1]}}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {a: [1]}}"), logDoc);
+}
+
+TEST(SimpleEachMod, PrepareApplyInexistent) {
+ Document doc(fromjson("{}"));
+ Mod pushMod(fromjson("{$push: {a: {$each: [1]}}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {a: [1]}}"), logDoc);
+}
+
+TEST(SimpleEachMod, PrepareApplyInexistentMultiple) {
+ Document doc(fromjson("{}"));
+ Mod pushMod(fromjson("{$push: {a: {$each: [1, 2]}}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [1, 2]}"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {a: [1, 2]}}"), logDoc);
+}
+
+TEST(SimpleEachMod, PrepareApplyNormal) {
+ Document doc(fromjson("{a: [0]}"));
+ Mod pushMod(fromjson("{$push: {a: {$each: [1]}}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [0,1]}"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'a.1': 1}}"), logDoc);
+}
+
+TEST(SimpleEachMod, PrepareApplyNormalMultiple) {
+ Document doc(fromjson("{a: [0]}"));
+ Mod pushMod(fromjson("{$push: {a: {$each: [1,2]}}}"));
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [0,1]}"), doc);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'a.1':1}}"), logDoc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [0,1,2]}"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'a.1': 1, 'a.2':2}}"), logDoc);
+}
- //
- // Simple object mod
- //
+/**
+ * Slice variants
+ */
+TEST(SlicePushEach, TopOne) {
+ Document doc(fromjson("{a: [3]}"));
+ Mod pushMod(fromjson("{$push: {a: {$each: [2, -1], $slice:1}}}"));
- TEST(SimpleObjMod, PrepareNonArray) {
- Document doc(fromjson("{a: 1}"));
- Mod pushMod(fromjson("{$push: {a: {b: 1}}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo dummy;
- ASSERT_NOT_OK(pushMod.prepare(doc.root(), "", &dummy));
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(SimpleObjMod, PrepareApplyEmpty) {
- Document doc(fromjson("{a: []}"));
- Mod pushMod(fromjson("{$push: {a: {b: 1}}}"));
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [3]}"), doc);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {a: [3]}}"), logDoc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+/**
+ * Sort for scalar (whole) array elements
+ */
+TEST(SortPushEach, NumberSort) {
+ Document doc(fromjson("{a: [3]}"));
+ Mod pushMod(fromjson("{$push: {a: {$each: [2, -1], $sort:1}}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [-1,2,3]}"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {a: [-1, 2, 3]}}"), logDoc);
+}
+
+TEST(SortPushEach, NumberSortReverse) {
+ Document doc(fromjson("{a: [3]}"));
+ Mod pushMod(fromjson("{$push: {a: {$each: [4, -1], $sort:-1}}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [4,3,-1]}"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {a: [4,3,-1]}}"), logDoc);
+}
+
+TEST(SortPushEach, MixedSortWhole) {
+ Document doc(fromjson("{a: [3, 't', {b:1}, {a:1}]}"));
+ Mod pushMod(fromjson("{$push: {a: {$each: [4, -1], $sort:1}}}"));
+ const BSONObj expectedObj = fromjson("{a: [-1,3,4,'t', {a:1}, {b:1}]}");
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(expectedObj, doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(BSON("$set" << expectedObj), logDoc);
+}
+
+TEST(SortPushEach, MixedSortWholeReverse) {
+ Document doc(fromjson("{a: [3, 't', {b:1}, {a:1}]}"));
+ Mod pushMod(fromjson("{$push: {a: {$each: [4, -1], $sort:-1}}}"));
+ const BSONObj expectedObj = fromjson("{a: [{b:1}, {a:1}, 't', 4, 3, -1]}");
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(expectedObj, doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(BSON("$set" << expectedObj), logDoc);
+}
+
+TEST(SortPushEach, MixedSortEmbeddedField) {
+ Document doc(fromjson("{a: [3, 't', {b:1}, {a:1}]}"));
+ Mod pushMod(fromjson("{$push: {a: {$each: [4, -1], $sort:{a:1}}}}"));
+ const BSONObj expectedObj = fromjson("{a: [3, 't', {b: 1}, 4, -1, {a: 1}]}");
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(expectedObj, doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(BSON("$set" << expectedObj), logDoc);
+}
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [{b:1}]}"), doc);
+/**
+ * This fixture supports building $push mods with parameterized $each arrays and $slices.
+ * It always assume that the array being operated on is called 'a'. To build a mod, one
+ * issues a set*Mod() call.
+ *
+ * The setSimpleMod() call will build a $each array of numbers. The setObjectMod() call
+ * will build a $each array with object. Both these calls take the slice as a parameter as
+ * well.
+ *
+ * Here's a typical test case flow:
+ * + Determine what the original document's 'a' array would contain
+ * + Ditto for the $push's $each arrray
+ * + Loop over slice value
+ * + Apply the $push with current slice value to the doc
+ * + Use the fixture/helpers to combine and slice the mod's and original's 'a'
+ * array
+ * + Build a document with the above and check against the one generated by the mod apply
+ */
+class SlicedMod : public mongo::unittest::Test {
+public:
+ SlicedMod() : _mod() {}
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {a: [{b:1}]}}"), logDoc);
+ virtual void setUp() {
+ // no op; set all state using the setMod() call
}
- TEST(SimpleObjMod, PrepareApplyInexistent) {
- Document doc(fromjson("{}"));
- Mod pushMod(fromjson("{$push: {a: {b: 1}}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ /** Sets up the mod to be {$push: {a: {$each: [<eachArray>], $slice: <slice>}}} */
+ void setSimpleMod(int32_t slice, const vector<int>& eachArray) {
+ BSONArrayBuilder arrBuilder;
+ for (vector<int>::const_iterator it = eachArray.begin(); it != eachArray.end(); ++it) {
+ arrBuilder.append(*it);
+ }
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [{b:1}]}"), doc);
+ _modObj =
+ BSON("$push" << BSON("a" << BSON("$each" << arrBuilder.arr() << "$slice" << slice)));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {a: [{b:1}]}}"), logDoc);
+ ASSERT_OK(_mod.init(_modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- TEST(SimpleObjMod, PrepareApplyNormal) {
- Document doc(fromjson("{a: [{b:0}]}"));
- Mod pushMod(fromjson("{$push: {a: {b: 1}}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ /** Sets up the mod to be {$push: {a: {$each:[<Obj>,...], $slice:<slice>, $sort:<Obj>}}} */
+ void setSortMod(int32_t slice, const vector<BSONObj>& eachArray, BSONObj sort) {
+ BSONArrayBuilder arrBuilder;
+ for (vector<BSONObj>::const_iterator it = eachArray.begin(); it != eachArray.end(); ++it) {
+ arrBuilder.append(*it);
+ }
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [{b:0},{b:1}]}"), doc);
+ _modObj = BSON("$push" << BSON("a" << BSON("$each" << arrBuilder.arr() << "$slice" << slice
+ << "$sort" << sort)));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'a.1':{b:1}}}"), logDoc);
+ ASSERT_OK(_mod.init(_modObj["$push"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- TEST(SimpleObjMod, PrepareApplyDotted) {
- Document doc(fromjson("{ _id : 1 , "
- " question : 'a', "
- " choices : { "
- " first : { choice : 'b' }, "
- " second : { choice : 'c' } }"
- "}"));
- Mod pushMod(fromjson("{$push: {'choices.first.votes': 1}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ /** Returns an object {a: [<'vec's content>]} */
+ BSONObj getObjectUsing(const vector<int>& vec) {
+ BSONArrayBuilder arrBuilder;
+ for (vector<int>::const_iterator it = vec.begin(); it != vec.end(); ++it) {
+ arrBuilder.append(*it);
+ }
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "choices.first.votes");
- ASSERT_FALSE(execInfo.noOp);
+ BSONObjBuilder builder;
+ builder.appendArray("a", arrBuilder.obj());
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson( "{ _id : 1 , "
- " question : 'a', "
- " choices : { "
- " first : { choice : 'b', votes: [1]}, "
- " second : { choice : 'c' } }"
- "}"),
- doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'choices.first.votes':[1]}}"), logDoc);
+ return builder.obj();
}
+ /** Returns an object {a: [<'vec's content>]} */
+ BSONObj getObjectUsing(const vector<BSONObj>& vec) {
+ BSONArrayBuilder arrBuilder;
+ for (vector<BSONObj>::const_iterator it = vec.begin(); it != vec.end(); ++it) {
+ arrBuilder.append(*it);
+ }
- //
- // $pushAll Variation
- //
-
- TEST(PushAll, PrepareApplyEmpty) {
- Document doc(fromjson("{a: []}"));
- Mod pushMod(fromjson("{$pushAll: {a: [1]}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(doc, fromjson("{a: [1]}"));
+ BSONObjBuilder builder;
+ builder.appendArray("a", arrBuilder.obj());
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(logDoc, fromjson("{$set: {a: [1]}}"));
+ return builder.obj();
}
- TEST(PushAll, PrepareApplyInexistent) {
- Document doc(fromjson("{}"));
- Mod pushMod(fromjson("{$pushAll: {a: [1]}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(doc, fromjson("{a: [1]}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(logDoc, fromjson("{$set: {a: [1]}}"));
+ ModifierPush& mod() {
+ return _mod;
}
- TEST(PushAll, PrepareApplyNormal) {
- Document doc(fromjson("{a: [0]}"));
- Mod pushMod(fromjson("{$pushAll: {a: [1,2]}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(doc, fromjson("{a: [0,1,2]}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(logDoc, fromjson("{$set: {'a.1': 1, 'a.2':2}}"));
+ BSONObj modObj() {
+ return _modObj;
}
- //
- // Simple $each mod
- //
+private:
+ ModifierPush _mod;
+ BSONObj _modObj;
+ vector<int> _eachArray;
+};
- TEST(SimpleEachMod, PrepareNonArray) {
- Document doc(fromjson("{a: 1}"));
- Mod pushMod(fromjson("{$push: {a: {$each: [1]}}}"));
+TEST_F(SlicedMod, SimpleArrayFromEmpty) {
+ // We'll simulate the original document having {a: []} and the mod being
+ // {$push: {a: {$each: [1], $slice: <-2..0>}}}
+ vector<int> docArray;
+ vector<int> eachArray;
+ eachArray.push_back(1);
- ModifierInterface::ExecInfo dummy;
- ASSERT_NOT_OK(pushMod.prepare(doc.root(), "", &dummy));
- }
-
- TEST(SimpleEachMod, PrepareApplyEmpty) {
- Document doc(fromjson("{a: []}"));
- Mod pushMod(fromjson("{$push: {a: {$each: [1]}}}"));
+ for (int32_t slice = -2; slice <= 0; slice++) {
+ setSimpleMod(slice, eachArray);
+ Document doc(getObjectUsing(docArray /* {a: []} */));
ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(mod().prepare(doc.root(), "", &execInfo));
ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
ASSERT_FALSE(execInfo.noOp);
- ASSERT_OK(pushMod.apply());
+ ASSERT_OK(mod().apply());
ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {a: [1]}}"), logDoc);
+ vector<int> combinedVec;
+ combineVec(docArray, /* a: [] */
+ eachArray, /* a: [1] */
+ slice,
+ &combinedVec);
+ ASSERT_EQUALS(getObjectUsing(combinedVec), doc);
}
+}
- TEST(SimpleEachMod, PrepareApplyInexistent) {
- Document doc(fromjson("{}"));
- Mod pushMod(fromjson("{$push: {a: {$each: [1]}}}"));
+TEST_F(SlicedMod, SimpleArrayFromExisting) {
+ // We'll simulate the original document having {a: [2,3]} and the mod being
+ // {$push: {a: {$each: [1], $slice: <-4..0>}}}
+ vector<int> docArray;
+ docArray.push_back(2);
+ docArray.push_back(3);
+ vector<int> eachArray;
+ eachArray.push_back(1);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {a: [1]}}"), logDoc);
- }
-
- TEST(SimpleEachMod, PrepareApplyInexistentMultiple) {
- Document doc(fromjson("{}"));
- Mod pushMod(fromjson("{$push: {a: {$each: [1, 2]}}}"));
+ for (int32_t slice = -4; slice <= 0; slice++) {
+ setSimpleMod(slice, eachArray);
+ Document doc(getObjectUsing(docArray /* {a: [2, 3]} */));
ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(mod().prepare(doc.root(), "", &execInfo));
ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
ASSERT_FALSE(execInfo.noOp);
- ASSERT_OK(pushMod.apply());
+ ASSERT_OK(mod().apply());
ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [1, 2]}"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {a: [1, 2]}}"), logDoc);
- }
-
- TEST(SimpleEachMod, PrepareApplyNormal) {
- Document doc(fromjson("{a: [0]}"));
- Mod pushMod(fromjson("{$push: {a: {$each: [1]}}}"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ vector<int> combinedVec;
+ combineVec(docArray, /* a: [2, 3] */
+ eachArray, /* a: [1] */
+ slice,
+ &combinedVec);
+ ASSERT_EQUALS(getObjectUsing(combinedVec), doc);
+ }
+}
+
+TEST_F(SlicedMod, ObjectArrayFromEmpty) {
+ // We'll simulate the original document having {a: []} and the mod being
+ // {$push: {a: {$each: [{a:2,b:1}], $slice: <-4..0>}, $sort: {a:-1/1,b:-1/1}}
+ vector<BSONObj> docArray;
+ vector<BSONObj> eachArray;
+ eachArray.push_back(fromjson("{a:2,b:1}"));
+ eachArray.push_back(fromjson("{a:1,b:2}"));
+
+ for (int32_t aOrB = 0; aOrB < 2; aOrB++) {
+ for (int32_t sortA = 0; sortA < 2; sortA++) {
+ for (int32_t sortB = 0; sortB < 2; sortB++) {
+ for (int32_t slice = -3; slice <= 3; slice++) {
+ BSONObj sortOrder;
+ if (aOrB == 0) {
+ sortOrder = BSON("a" << (sortA ? 1 : -1) << "b" << (sortB ? 1 : -1));
+ } else {
+ sortOrder = BSON("b" << (sortB ? 1 : -1) << "a" << (sortA ? 1 : -1));
+ }
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ setSortMod(slice, eachArray, sortOrder);
+ Document doc(getObjectUsing(docArray /* {a: []} */));
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [0,1]}"), doc);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod().prepare(doc.root(), "", &execInfo));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'a.1': 1}}"), logDoc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(SimpleEachMod, PrepareApplyNormalMultiple) {
- Document doc(fromjson("{a: [0]}"));
- Mod pushMod(fromjson("{$push: {a: {$each: [1,2]}}}"));
+ ASSERT_OK(mod().apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ vector<BSONObj> combinedVec;
+ combineAndSortVec(docArray, /* a: [] */
+ eachArray, /* a: [{a:2,b:1},{a:1,b:2}] */
+ slice,
+ sortOrder,
+ &combinedVec);
+ ASSERT_EQUALS(getObjectUsing(combinedVec), doc);
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [0,1,2]}"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'a.1': 1, 'a.2':2}}"), logDoc);
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod().log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(BSON("$set" << getObjectUsing(combinedVec)), logDoc);
+ }
+ }
+ }
}
+}
+
+TEST_F(SlicedMod, ObjectArrayFromExisting) {
+ // We'll simulate the original document having {a: [{a:2,b:3},{a:3,:b1}]} and the mod being
+ // {$push: {a: {$each: [{a:2,b:1}], $slice: <-4..0>}, $sort: {a:-1/1,b:-1/1}}
+ vector<BSONObj> docArray;
+ docArray.push_back(fromjson("{a:2,b:3}"));
+ docArray.push_back(fromjson("{a:3,b:1}"));
+ vector<BSONObj> eachArray;
+ eachArray.push_back(fromjson("{a:2,b:1}"));
+
+ for (int32_t aOrB = 0; aOrB < 2; aOrB++) {
+ for (int32_t sortA = 0; sortA < 2; sortA++) {
+ for (int32_t sortB = 0; sortB < 2; sortB++) {
+ for (int32_t slice = -4; slice <= 4; slice++) {
+ BSONObj sortOrder;
+ if (aOrB == 0) {
+ sortOrder = BSON("a" << (sortA ? 1 : -1) << "b" << (sortB ? 1 : -1));
+ } else {
+ sortOrder = BSON("b" << (sortB ? 1 : -1) << "a" << (sortA ? 1 : -1));
+ }
- /**
- * Slice variants
- */
- TEST(SlicePushEach, TopOne) {
- Document doc(fromjson("{a: [3]}"));
- Mod pushMod(fromjson("{$push: {a: {$each: [2, -1], $slice:1}}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [3]}"), doc);
+ setSortMod(slice, eachArray, sortOrder);
+ Document doc(getObjectUsing(docArray /* {a: [{a:2,b:b},{a:3,:b1}]} */));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {a: [3]}}"), logDoc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(mod().prepare(doc.root(), "", &execInfo));
- /**
- * Sort for scalar (whole) array elements
- */
- TEST(SortPushEach, NumberSort) {
- Document doc(fromjson("{a: [3]}"));
- Mod pushMod(fromjson("{$push: {a: {$each: [2, -1], $sort:1}}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(mod().apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ vector<BSONObj> combinedVec;
+ combineAndSortVec(docArray, /* a: [{a:2,b:3},{a:3,:b1}] */
+ eachArray, /* a: [{a:2,b:1}] */
+ slice,
+ sortOrder,
+ &combinedVec);
+ ASSERT_EQUALS(getObjectUsing(combinedVec), doc);
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [-1,2,3]}"), doc);
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(mod().log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(BSON("$set" << getObjectUsing(combinedVec)), logDoc);
+ }
+ }
+ }
+ }
+}
+
+// Push to position tests
+
+TEST(ToPosition, BadInputs) {
+ const char* const bad[] = {
+ "{$push: {a: { $each: [1], $position:-1}}}",
+ "{$push: {a: { $each: [1], $position:'s'}}}",
+ "{$push: {a: { $each: [1], $position:{}}}}",
+ "{$push: {a: { $each: [1], $position:[0]}}}",
+ "{$push: {a: { $each: [1], $position:1.1211212}}}",
+ "{$push: {a: { $each: [1], $position:3.000000000001}}}",
+ "{$push: {a: { $each: [1], $position:1.2}}}",
+ "{$push: {a: { $each: [1], $position:-1.2}}}",
+ "{$push: {a: { $each: [1], $position:9.0e19}}}",
+ "{$push: {a: { $each: [1], $position:-9.0e19}}}",
+ NULL,
+ };
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {a: [-1, 2, 3]}}"), logDoc);
+ int i = 0;
+ while (bad[i] != NULL) {
+ ModifierPush pushMod(ModifierPush::PUSH_NORMAL);
+ BSONObj modObj = fromjson(bad[i]);
+ ASSERT_NOT_OK(pushMod.init(modObj.firstElement().embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+ i++;
}
+}
- TEST(SortPushEach, NumberSortReverse) {
- Document doc(fromjson("{a: [3]}"));
- Mod pushMod(fromjson("{$push: {a: {$each: [4, -1], $sort:-1}}}"));
+TEST(ToPosition, GoodInputs) {
+ {
+ Document doc(fromjson("{a: []}"));
+ Mod pushMod(fromjson("{$push: {a: { $each: [1], $position: NumberLong(1)}}}"));
ModifierInterface::ExecInfo execInfo;
ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [4,3,-1]}"), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {a: [4,3,-1]}}"), logDoc);
}
-
- TEST(SortPushEach, MixedSortWhole) {
- Document doc(fromjson("{a: [3, 't', {b:1}, {a:1}]}"));
- Mod pushMod(fromjson("{$push: {a: {$each: [4, -1], $sort:1}}}"));
- const BSONObj expectedObj = fromjson("{a: [-1,3,4,'t', {a:1}, {b:1}]}");
+ {
+ Document doc(fromjson("{a: []}"));
+ Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:NumberInt(100)}}}"));
ModifierInterface::ExecInfo execInfo;
ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(expectedObj, doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(BSON("$set" << expectedObj), logDoc);
}
-
- TEST(SortPushEach, MixedSortWholeReverse) {
- Document doc(fromjson("{a: [3, 't', {b:1}, {a:1}]}"));
- Mod pushMod(fromjson("{$push: {a: {$each: [4, -1], $sort:-1}}}"));
- const BSONObj expectedObj = fromjson("{a: [{b:1}, {a:1}, 't', 4, 3, -1]}");
+ {
+ Document doc(fromjson("{a: []}"));
+ Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:1.0}}}"));
ModifierInterface::ExecInfo execInfo;
ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(expectedObj, doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(BSON("$set" << expectedObj), logDoc);
}
-
- TEST(SortPushEach, MixedSortEmbeddedField) {
- Document doc(fromjson("{a: [3, 't', {b:1}, {a:1}]}"));
- Mod pushMod(fromjson("{$push: {a: {$each: [4, -1], $sort:{a:1}}}}"));
- const BSONObj expectedObj = fromjson("{a: [3, 't', {b: 1}, 4, -1, {a: 1}]}");
+ {
+ Document doc(fromjson("{a: []}"));
+ Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:1000000}}}"));
ModifierInterface::ExecInfo execInfo;
ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(expectedObj, doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(BSON("$set" << expectedObj), logDoc);
- }
-
- /**
- * This fixture supports building $push mods with parameterized $each arrays and $slices.
- * It always assume that the array being operated on is called 'a'. To build a mod, one
- * issues a set*Mod() call.
- *
- * The setSimpleMod() call will build a $each array of numbers. The setObjectMod() call
- * will build a $each array with object. Both these calls take the slice as a parameter as
- * well.
- *
- * Here's a typical test case flow:
- * + Determine what the original document's 'a' array would contain
- * + Ditto for the $push's $each arrray
- * + Loop over slice value
- * + Apply the $push with current slice value to the doc
- * + Use the fixture/helpers to combine and slice the mod's and original's 'a'
- * array
- * + Build a document with the above and check against the one generated by the mod apply
- */
- class SlicedMod : public mongo::unittest::Test {
- public:
- SlicedMod() : _mod() {}
-
- virtual void setUp() {
- // no op; set all state using the setMod() call
- }
-
- /** Sets up the mod to be {$push: {a: {$each: [<eachArray>], $slice: <slice>}}} */
- void setSimpleMod(int32_t slice, const vector<int>& eachArray) {
-
- BSONArrayBuilder arrBuilder;
- for (vector<int>::const_iterator it = eachArray.begin(); it != eachArray.end(); ++it) {
- arrBuilder.append(*it);
- }
-
- _modObj = BSON("$push" <<
- BSON("a"
- << BSON("$each" << arrBuilder.arr() <<
- "$slice" << slice)));
-
- ASSERT_OK(_mod.init(_modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- /** Sets up the mod to be {$push: {a: {$each:[<Obj>,...], $slice:<slice>, $sort:<Obj>}}} */
- void setSortMod(int32_t slice, const vector<BSONObj>& eachArray, BSONObj sort) {
-
- BSONArrayBuilder arrBuilder;
- for (vector<BSONObj>::const_iterator it = eachArray.begin();
- it != eachArray.end();
- ++it) {
- arrBuilder.append(*it);
- }
-
- _modObj = BSON("$push" <<
- BSON("a"
- << BSON("$each" << arrBuilder.arr() <<
- "$slice" << slice <<
- "$sort" << sort)));
-
- ASSERT_OK(_mod.init(_modObj["$push"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- /** Returns an object {a: [<'vec's content>]} */
- BSONObj getObjectUsing(const vector<int>& vec) {
-
- BSONArrayBuilder arrBuilder;
- for (vector<int>::const_iterator it = vec.begin(); it != vec.end(); ++it) {
- arrBuilder.append(*it);
- }
-
- BSONObjBuilder builder;
- builder.appendArray("a", arrBuilder.obj());
-
- return builder.obj();
- }
-
- /** Returns an object {a: [<'vec's content>]} */
- BSONObj getObjectUsing(const vector<BSONObj>& vec) {
-
- BSONArrayBuilder arrBuilder;
- for (vector<BSONObj>::const_iterator it = vec.begin(); it != vec.end(); ++it) {
- arrBuilder.append(*it);
- }
-
- BSONObjBuilder builder;
- builder.appendArray("a", arrBuilder.obj());
-
- return builder.obj();
- }
-
- ModifierPush& mod() { return _mod; }
-
- BSONObj modObj() { return _modObj; }
-
- private:
- ModifierPush _mod;
- BSONObj _modObj;
- vector<int> _eachArray;
- };
-
- TEST_F(SlicedMod, SimpleArrayFromEmpty) {
- // We'll simulate the original document having {a: []} and the mod being
- // {$push: {a: {$each: [1], $slice: <-2..0>}}}
- vector<int> docArray;
- vector<int> eachArray;
- eachArray.push_back(1);
-
- for (int32_t slice = -2; slice <= 0; slice++) {
- setSimpleMod(slice, eachArray);
- Document doc(getObjectUsing(docArray /* {a: []} */));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod().prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod().apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
-
- vector<int> combinedVec;
- combineVec(docArray, /* a: [] */
- eachArray, /* a: [1] */
- slice,
- &combinedVec);
- ASSERT_EQUALS(getObjectUsing(combinedVec), doc);
- }
- }
-
- TEST_F(SlicedMod, SimpleArrayFromExisting) {
- // We'll simulate the original document having {a: [2,3]} and the mod being
- // {$push: {a: {$each: [1], $slice: <-4..0>}}}
- vector<int> docArray;
- docArray.push_back(2);
- docArray.push_back(3);
- vector<int> eachArray;
- eachArray.push_back(1);
-
- for (int32_t slice = -4; slice <= 0; slice++) {
- setSimpleMod(slice, eachArray);
- Document doc(getObjectUsing(docArray /* {a: [2, 3]} */));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod().prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod().apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
-
- vector<int> combinedVec;
- combineVec(docArray, /* a: [2, 3] */
- eachArray, /* a: [1] */
- slice,
- &combinedVec);
- ASSERT_EQUALS(getObjectUsing(combinedVec), doc);
- }
- }
-
- TEST_F(SlicedMod, ObjectArrayFromEmpty) {
- // We'll simulate the original document having {a: []} and the mod being
- // {$push: {a: {$each: [{a:2,b:1}], $slice: <-4..0>}, $sort: {a:-1/1,b:-1/1}}
- vector<BSONObj> docArray;
- vector<BSONObj> eachArray;
- eachArray.push_back(fromjson("{a:2,b:1}"));
- eachArray.push_back(fromjson("{a:1,b:2}"));
-
- for (int32_t aOrB = 0; aOrB < 2 ; aOrB++) {
- for (int32_t sortA = 0; sortA < 2; sortA++) {
- for (int32_t sortB = 0; sortB < 2; sortB++) {
- for (int32_t slice = -3; slice <= 3; slice++) {
-
- BSONObj sortOrder;
- if (aOrB == 0) {
- sortOrder = BSON("a" << (sortA ? 1 : -1) << "b" << (sortB ? 1 : -1));
- }
- else {
- sortOrder = BSON("b" << (sortB ? 1 : -1) << "a" << (sortA ? 1 : -1));
- }
-
- setSortMod(slice, eachArray, sortOrder);
- Document doc(getObjectUsing(docArray /* {a: []} */));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod().prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod().apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
-
- vector<BSONObj> combinedVec;
- combineAndSortVec(docArray, /* a: [] */
- eachArray, /* a: [{a:2,b:1},{a:1,b:2}] */
- slice,
- sortOrder,
- &combinedVec);
- ASSERT_EQUALS(getObjectUsing(combinedVec), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod().log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(BSON("$set" << getObjectUsing(combinedVec)), logDoc);
-
- }
- }
- }
- }
- }
-
- TEST_F(SlicedMod, ObjectArrayFromExisting) {
- // We'll simulate the original document having {a: [{a:2,b:3},{a:3,:b1}]} and the mod being
- // {$push: {a: {$each: [{a:2,b:1}], $slice: <-4..0>}, $sort: {a:-1/1,b:-1/1}}
- vector<BSONObj> docArray;
- docArray.push_back(fromjson("{a:2,b:3}"));
- docArray.push_back(fromjson("{a:3,b:1}"));
- vector<BSONObj> eachArray;
- eachArray.push_back(fromjson("{a:2,b:1}"));
-
- for (int32_t aOrB = 0; aOrB < 2 ; aOrB++) {
- for (int32_t sortA = 0; sortA < 2; sortA++) {
- for (int32_t sortB = 0; sortB < 2; sortB++) {
- for (int32_t slice = -4; slice <= 4; slice++) {
-
- BSONObj sortOrder;
- if (aOrB == 0) {
- sortOrder = BSON("a" << (sortA ? 1 : -1) << "b" << (sortB ? 1 : -1));
- }
- else {
- sortOrder = BSON("b" << (sortB ? 1 : -1) << "a" << (sortA ? 1 : -1));
- }
-
- setSortMod(slice, eachArray, sortOrder);
- Document doc(getObjectUsing(docArray /* {a: [{a:2,b:b},{a:3,:b1}]} */));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(mod().prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(mod().apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
-
- vector<BSONObj> combinedVec;
- combineAndSortVec(docArray, /* a: [{a:2,b:3},{a:3,:b1}] */
- eachArray, /* a: [{a:2,b:1}] */
- slice,
- sortOrder,
- &combinedVec);
- ASSERT_EQUALS(getObjectUsing(combinedVec), doc);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(mod().log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(BSON("$set" << getObjectUsing(combinedVec)), logDoc);
-
- }
- }
- }
- }
- }
-
- // Push to position tests
-
- TEST(ToPosition, BadInputs) {
- const char* const bad[] = {
- "{$push: {a: { $each: [1], $position:-1}}}",
- "{$push: {a: { $each: [1], $position:'s'}}}",
- "{$push: {a: { $each: [1], $position:{}}}}",
- "{$push: {a: { $each: [1], $position:[0]}}}",
- "{$push: {a: { $each: [1], $position:1.1211212}}}",
- "{$push: {a: { $each: [1], $position:3.000000000001}}}",
- "{$push: {a: { $each: [1], $position:1.2}}}",
- "{$push: {a: { $each: [1], $position:-1.2}}}",
- "{$push: {a: { $each: [1], $position:9.0e19}}}",
- "{$push: {a: { $each: [1], $position:-9.0e19}}}",
- NULL,
- };
-
- int i = 0;
- while(bad[i] != NULL)
- {
- ModifierPush pushMod(ModifierPush::PUSH_NORMAL);
- BSONObj modObj = fromjson(bad[i]);
- ASSERT_NOT_OK(pushMod.init(modObj.firstElement().embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- i++;
- }
}
-
- TEST(ToPosition, GoodInputs) {
- {
- Document doc(fromjson("{a: []}"));
- Mod pushMod(fromjson("{$push: {a: { $each: [1], $position: NumberLong(1)}}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- }
- {
- Document doc(fromjson("{a: []}"));
- Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:NumberInt(100)}}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- }
- {
- Document doc(fromjson("{a: []}"));
- Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:1.0}}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- }
- {
- Document doc(fromjson("{a: []}"));
- Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:1000000}}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- }
- {
- Document doc(fromjson("{a: []}"));
- Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:0}}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- }
- }
-
- TEST(ToPosition, EmptyArrayFront) {
+ {
Document doc(fromjson("{a: []}"));
Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:0}}}"));
ModifierInterface::ExecInfo execInfo;
ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ }
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(ToPosition, EmptyArrayFront) {
+ Document doc(fromjson("{a: []}"));
+ Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:0}}}"));
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'a':[1]}}"), logDoc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(ToPosition, EmptyArrayBackBigPosition) {
- Document doc(fromjson("{a: []}"));
- Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:1000}}}"));
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'a':[1]}}"), logDoc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(ToPosition, EmptyArrayBackBigPosition) {
+ Document doc(fromjson("{a: []}"));
+ Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:1000}}}"));
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'a':[1]}}"), logDoc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(ToPosition, EmptyArrayBack) {
- Document doc(fromjson("{a: []}"));
- Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:1}}}"));
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'a':[1]}}"), logDoc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(ToPosition, EmptyArrayBack) {
+ Document doc(fromjson("{a: []}"));
+ Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:1}}}"));
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'a':[1]}}"), logDoc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(ToPosition, Front) {
- Document doc(fromjson("{a: [0]}"));
- Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:0}}}"));
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [1]}"), doc);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'a':[1]}}"), logDoc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(ToPosition, Front) {
+ Document doc(fromjson("{a: [0]}"));
+ Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:0}}}"));
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [1, 0]}"), doc);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'a':[1, 0]}}"), logDoc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(ToPosition, Back) {
- Document doc(fromjson("{a: [0]}"));
- Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:100}}}"));
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [1, 0]}"), doc);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'a':[1, 0]}}"), logDoc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(ToPosition, Back) {
+ Document doc(fromjson("{a: [0]}"));
+ Mod pushMod(fromjson("{$push: {a: { $each: [1], $position:100}}}"));
- ASSERT_OK(pushMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [0,1]}"), doc);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(pushMod.prepare(doc.root(), "", &execInfo));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(pushMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'a.1':1}}"), logDoc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(pushMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [0,1]}"), doc);
-} // unnamed namespace
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(pushMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'a.1':1}}"), logDoc);
+}
+
+} // unnamed namespace
diff --git a/src/mongo/db/ops/modifier_rename.cpp b/src/mongo/db/ops/modifier_rename.cpp
index 13ec0c70beb..123b253f227 100644
--- a/src/mongo/db/ops/modifier_rename.cpp
+++ b/src/mongo/db/ops/modifier_rename.cpp
@@ -38,279 +38,258 @@
namespace mongo {
- namespace str = mongoutils::str;
+namespace str = mongoutils::str;
- struct ModifierRename::PreparedState {
+struct ModifierRename::PreparedState {
+ PreparedState(mutablebson::Element root)
+ : doc(root.getDocument()),
+ fromElemFound(doc.end()),
+ toIdxFound(0),
+ toElemFound(doc.end()),
+ applyCalled(false) {}
- PreparedState(mutablebson::Element root)
- : doc(root.getDocument())
- , fromElemFound(doc.end())
- , toIdxFound(0)
- , toElemFound(doc.end())
- , applyCalled(false){
- }
-
- // Document that is going to be changed.
- mutablebson::Document& doc;
+ // Document that is going to be changed.
+ mutablebson::Document& doc;
- // The element to rename
- mutablebson::Element fromElemFound;
+ // The element to rename
+ mutablebson::Element fromElemFound;
- // Index in _fieldRef for which an Element exist in the document.
- size_t toIdxFound;
+ // Index in _fieldRef for which an Element exist in the document.
+ size_t toIdxFound;
- // Element to remove (in the destination position)
- mutablebson::Element toElemFound;
+ // Element to remove (in the destination position)
+ mutablebson::Element toElemFound;
- // Was apply called?
- bool applyCalled;
+ // Was apply called?
+ bool applyCalled;
+};
- };
+ModifierRename::ModifierRename() : _fromFieldRef(), _toFieldRef() {}
- ModifierRename::ModifierRename()
- : _fromFieldRef()
- , _toFieldRef() {
- }
+ModifierRename::~ModifierRename() {}
- ModifierRename::~ModifierRename() {
+Status ModifierRename::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
+ if (modExpr.type() != String) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The 'to' field for $rename must be a string: " << modExpr);
}
- Status ModifierRename::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
-
- if (modExpr.type() != String) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The 'to' field for $rename must be a string: "
- << modExpr);
- }
+ // Extract the field names from the mod expression
- // Extract the field names from the mod expression
+ _fromFieldRef.parse(modExpr.fieldName());
+ Status status = fieldchecker::isUpdatable(_fromFieldRef);
+ if (!status.isOK())
+ return status;
- _fromFieldRef.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatable(_fromFieldRef);
- if (!status.isOK())
- return status;
+ _toFieldRef.parse(modExpr.String());
+ status = fieldchecker::isUpdatable(_toFieldRef);
+ if (!status.isOK())
+ return status;
- _toFieldRef.parse(modExpr.String());
- status = fieldchecker::isUpdatable(_toFieldRef);
- if (!status.isOK())
+ // TODO: Remove this restriction and make a noOp to lift restriction
+ // Old restriction is that if the fields are the same then it is not allowed.
+ if (_fromFieldRef == _toFieldRef)
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "The source and target field for $rename must differ: " << modExpr);
+
+ // TODO: Remove this restriction by allowing moving deeping from the 'from' path
+ // Old restriction is that if the to/from is on the same path it fails
+ if (_fromFieldRef.isPrefixOf(_toFieldRef) || _toFieldRef.isPrefixOf(_fromFieldRef)) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The source and target field for $rename must "
+ "not be on the same path: " << modExpr);
+ }
+ // TODO: We can remove this restriction as long as there is only one,
+ // or it is the same array -- should think on this a bit.
+ //
+ // If a $-positional operator was used it is an error
+ size_t dummyPos;
+ if (fieldchecker::isPositional(_fromFieldRef, &dummyPos))
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The source field for $rename may not be dynamic: "
+ << _fromFieldRef.dottedField());
+ else if (fieldchecker::isPositional(_toFieldRef, &dummyPos))
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The destination field for $rename may not be dynamic: "
+ << _toFieldRef.dottedField());
+
+ if (positional)
+ *positional = false;
+
+ return Status::OK();
+}
+
+Status ModifierRename::prepare(mutablebson::Element root,
+ StringData matchedField,
+ ExecInfo* execInfo) {
+ // Rename doesn't work with positional fields ($)
+ dassert(matchedField.empty());
+
+ _preparedState.reset(new PreparedState(root));
+
+ // Locate the to field name in 'root', which must exist.
+ size_t fromIdxFound;
+ Status status = pathsupport::findLongestPrefix(
+ _fromFieldRef, root, &fromIdxFound, &_preparedState->fromElemFound);
+
+ const bool sourceExists =
+ (_preparedState->fromElemFound.ok() && fromIdxFound == (_fromFieldRef.numParts() - 1));
+
+ // If we can't find the full element in the from field then we can't do anything.
+ if (!status.isOK() || !sourceExists) {
+ execInfo->noOp = true;
+ _preparedState->fromElemFound = root.getDocument().end();
+
+ // TODO: remove this special case from existing behavior
+ if (status.code() == ErrorCodes::PathNotViable) {
return status;
-
- // TODO: Remove this restriction and make a noOp to lift restriction
- // Old restriction is that if the fields are the same then it is not allowed.
- if (_fromFieldRef == _toFieldRef)
- return Status(ErrorCodes::BadValue,
- str::stream() << "The source and target field for $rename must differ: "
- << modExpr);
-
- // TODO: Remove this restriction by allowing moving deeping from the 'from' path
- // Old restriction is that if the to/from is on the same path it fails
- if (_fromFieldRef.isPrefixOf(_toFieldRef) || _toFieldRef.isPrefixOf(_fromFieldRef)){
- return Status(ErrorCodes::BadValue,
- str::stream() << "The source and target field for $rename must "
- "not be on the same path: "
- << modExpr);
}
- // TODO: We can remove this restriction as long as there is only one,
- // or it is the same array -- should think on this a bit.
- //
- // If a $-positional operator was used it is an error
- size_t dummyPos;
- if (fieldchecker::isPositional(_fromFieldRef, &dummyPos))
- return Status(ErrorCodes::BadValue,
- str::stream() << "The source field for $rename may not be dynamic: "
- << _fromFieldRef.dottedField());
- else if (fieldchecker::isPositional(_toFieldRef, &dummyPos))
- return Status(ErrorCodes::BadValue,
- str::stream() << "The destination field for $rename may not be dynamic: "
- << _toFieldRef.dottedField());
-
- if (positional)
- *positional = false;
return Status::OK();
}
- Status ModifierRename::prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo) {
- // Rename doesn't work with positional fields ($)
- dassert(matchedField.empty());
-
- _preparedState.reset(new PreparedState(root));
-
- // Locate the to field name in 'root', which must exist.
- size_t fromIdxFound;
- Status status = pathsupport::findLongestPrefix(_fromFieldRef,
- root,
- &fromIdxFound,
- &_preparedState->fromElemFound);
+ // Ensure no array in ancestry if what we found is not at the root
+ mutablebson::Element curr = _preparedState->fromElemFound.parent();
+ if (curr != curr.getDocument().root())
+ while (curr.ok() && (curr != curr.getDocument().root())) {
+ if (curr.getType() == Array)
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The source field cannot be an array element, '"
+ << _fromFieldRef.dottedField() << "' in doc with "
+ << findElementNamed(root.leftChild(), "_id").toString()
+ << " has an array field called '" << curr.getFieldName()
+ << "'");
+ curr = curr.parent();
+ }
- const bool sourceExists = (_preparedState->fromElemFound.ok() &&
- fromIdxFound == (_fromFieldRef.numParts() - 1));
+ // "To" side validation below
- // If we can't find the full element in the from field then we can't do anything.
- if (!status.isOK() || !sourceExists) {
- execInfo->noOp = true;
- _preparedState->fromElemFound = root.getDocument().end();
+ status = pathsupport::findLongestPrefix(
+ _toFieldRef, root, &_preparedState->toIdxFound, &_preparedState->toElemFound);
- // TODO: remove this special case from existing behavior
- if (status.code() == ErrorCodes::PathNotViable) {
- return status;
- }
+ // FindLongestPrefix may return not viable or any other error and then we cannot proceed.
+ if (status.code() == ErrorCodes::NonExistentPath) {
+ // Not an error condition as we will create the "to" path as needed.
+ } else if (!status.isOK()) {
+ return status;
+ }
- return Status::OK();
+ const bool destExists = _preparedState->toElemFound.ok() &&
+ (_preparedState->toIdxFound == (_toFieldRef.numParts() - 1));
+
+ // Ensure no array in ancestry of "to" Element
+ // Set to either parent, or node depending on if the full path element was found
+ curr = (destExists ? _preparedState->toElemFound.parent() : _preparedState->toElemFound);
+ if (curr != curr.getDocument().root()) {
+ while (curr.ok()) {
+ if (curr.getType() == Array)
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The destination field cannot be an array element, '"
+ << _fromFieldRef.dottedField() << "' in doc with "
+ << findElementNamed(root.leftChild(), "_id").toString()
+ << " has an array field called '" << curr.getFieldName()
+ << "'");
+ curr = curr.parent();
}
+ }
- // Ensure no array in ancestry if what we found is not at the root
- mutablebson::Element curr = _preparedState->fromElemFound.parent();
- if (curr != curr.getDocument().root())
- while (curr.ok() && (curr != curr.getDocument().root())) {
- if (curr.getType() == Array)
- return Status(ErrorCodes::BadValue,
- str::stream() << "The source field cannot be an array element, '"
- << _fromFieldRef.dottedField() << "' in doc with "
- << findElementNamed(root.leftChild(), "_id").toString()
- << " has an array field called '" << curr.getFieldName() << "'");
- curr = curr.parent();
- }
-
- // "To" side validation below
-
- status = pathsupport::findLongestPrefix(_toFieldRef,
- root,
- &_preparedState->toIdxFound,
- &_preparedState->toElemFound);
-
- // FindLongestPrefix may return not viable or any other error and then we cannot proceed.
- if (status.code() == ErrorCodes::NonExistentPath) {
- // Not an error condition as we will create the "to" path as needed.
- } else if (!status.isOK()) {
- return status;
- }
+ // We register interest in the field name. The driver needs this info to sort out if
+ // there is any conflict among mods.
+ execInfo->fieldRef[0] = &_fromFieldRef;
+ execInfo->fieldRef[1] = &_toFieldRef;
- const bool destExists = _preparedState->toElemFound.ok() &&
- (_preparedState->toIdxFound == (_toFieldRef.numParts()-1));
-
- // Ensure no array in ancestry of "to" Element
- // Set to either parent, or node depending on if the full path element was found
- curr = (destExists ? _preparedState->toElemFound.parent() : _preparedState->toElemFound);
- if (curr != curr.getDocument().root()) {
- while (curr.ok()) {
- if (curr.getType() == Array)
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "The destination field cannot be an array element, '"
- << _fromFieldRef.dottedField() << "' in doc with "
- << findElementNamed(root.leftChild(), "_id").toString()
- << " has an array field called '" << curr.getFieldName() << "'");
- curr = curr.parent();
- }
- }
+ execInfo->noOp = false;
- // We register interest in the field name. The driver needs this info to sort out if
- // there is any conflict among mods.
- execInfo->fieldRef[0] = &_fromFieldRef;
- execInfo->fieldRef[1] = &_toFieldRef;
+ return Status::OK();
+}
- execInfo->noOp = false;
+Status ModifierRename::apply() const {
+ dassert(_preparedState->fromElemFound.ok());
- return Status::OK();
- }
+ _preparedState->applyCalled = true;
- Status ModifierRename::apply() const {
- dassert(_preparedState->fromElemFound.ok());
+ // Remove from source
+ Status removeStatus = _preparedState->fromElemFound.remove();
+ if (!removeStatus.isOK()) {
+ return removeStatus;
+ }
- _preparedState->applyCalled = true;
+ // If there's no need to create any further field part, the op is simply a value
+ // assignment.
+ const bool destExists = _preparedState->toElemFound.ok() &&
+ (_preparedState->toIdxFound == (_toFieldRef.numParts() - 1));
- // Remove from source
- Status removeStatus = _preparedState->fromElemFound.remove();
+ if (destExists) {
+ removeStatus = _preparedState->toElemFound.remove();
if (!removeStatus.isOK()) {
return removeStatus;
}
+ }
- // If there's no need to create any further field part, the op is simply a value
- // assignment.
- const bool destExists = _preparedState->toElemFound.ok() &&
- (_preparedState->toIdxFound == (_toFieldRef.numParts()-1));
-
- if (destExists) {
- removeStatus = _preparedState->toElemFound.remove();
- if (!removeStatus.isOK()) {
- return removeStatus;
- }
- }
+ // Creates the final element that's going to be the in 'doc'.
+ mutablebson::Document& doc = _preparedState->doc;
+ StringData lastPart = _toFieldRef.getPart(_toFieldRef.numParts() - 1);
+ mutablebson::Element elemToSet =
+ doc.makeElementWithNewFieldName(lastPart, _preparedState->fromElemFound);
+ if (!elemToSet.ok()) {
+ return Status(ErrorCodes::InternalError, "can't create new element");
+ }
- // Creates the final element that's going to be the in 'doc'.
- mutablebson::Document& doc = _preparedState->doc;
- StringData lastPart = _toFieldRef.getPart(_toFieldRef.numParts()-1);
- mutablebson::Element elemToSet = doc.makeElementWithNewFieldName(
- lastPart,
- _preparedState->fromElemFound);
- if (!elemToSet.ok()) {
- return Status(ErrorCodes::InternalError, "can't create new element");
- }
+ // Find the new place to put the "to" element:
+ // createPathAt does not use existing prefix elements so we
+ // need to get the prefix match position for createPathAt below
+ size_t tempIdx = 0;
+ mutablebson::Element tempElem = doc.end();
+ Status status = pathsupport::findLongestPrefix(_toFieldRef, doc.root(), &tempIdx, &tempElem);
+
+ // createPathAt will complete the path and attach 'elemToSet' at the end of it.
+ return pathsupport::createPathAt(_toFieldRef,
+ tempElem == doc.end() ? 0 : tempIdx + 1,
+ tempElem == doc.end() ? doc.root() : tempElem,
+ elemToSet);
+}
+
+Status ModifierRename::log(LogBuilder* logBuilder) const {
+ // If there was no element found then it was a noop, so return immediately
+ if (!_preparedState->fromElemFound.ok())
+ return Status::OK();
- // Find the new place to put the "to" element:
- // createPathAt does not use existing prefix elements so we
- // need to get the prefix match position for createPathAt below
- size_t tempIdx = 0;
- mutablebson::Element tempElem = doc.end();
- Status status = pathsupport::findLongestPrefix(_toFieldRef,
- doc.root(),
- &tempIdx,
- &tempElem);
-
- // createPathAt will complete the path and attach 'elemToSet' at the end of it.
- return pathsupport::createPathAt(_toFieldRef,
- tempElem == doc.end() ? 0 : tempIdx + 1,
- tempElem == doc.end() ? doc.root() : tempElem,
- elemToSet);
- }
+ // debug assert if apply not called, since we found an element to move.
+ dassert(_preparedState->applyCalled);
- Status ModifierRename::log(LogBuilder* logBuilder) const {
+ const bool isPrefix = _fromFieldRef.isPrefixOf(_toFieldRef);
+ const StringData setPath = (isPrefix ? _fromFieldRef : _toFieldRef).dottedField();
+ const StringData unsetPath = isPrefix ? StringData() : _fromFieldRef.dottedField();
+ const bool doUnset = !isPrefix;
- // If there was no element found then it was a noop, so return immediately
- if (!_preparedState->fromElemFound.ok())
- return Status::OK();
+ // We'd like to create an entry such as {$set: {<fieldname>: <value>}} under 'logRoot'.
+ // We start by creating the {$set: ...} Element.
+ mutablebson::Document& doc = logBuilder->getDocument();
- // debug assert if apply not called, since we found an element to move.
- dassert(_preparedState->applyCalled);
+ // Create the {<fieldname>: <value>} Element. Note that we log the mod with a
+ // dotted field, if it was applied over a dotted field. The rationale is that the
+ // secondary may be in a different state than the primary and thus make different
+ // decisions about creating the intermediate path in _fieldRef or not.
+ mutablebson::Element logElement =
+ doc.makeElementWithNewFieldName(setPath, _preparedState->fromElemFound.getValue());
- const bool isPrefix = _fromFieldRef.isPrefixOf(_toFieldRef);
- const StringData setPath =
- (isPrefix ? _fromFieldRef : _toFieldRef).dottedField();
- const StringData unsetPath =
- isPrefix ? StringData() : _fromFieldRef.dottedField();
- const bool doUnset = !isPrefix;
+ if (!logElement.ok()) {
+ return Status(ErrorCodes::InternalError, "cannot create details for $rename mod");
+ }
- // We'd like to create an entry such as {$set: {<fieldname>: <value>}} under 'logRoot'.
- // We start by creating the {$set: ...} Element.
- mutablebson::Document& doc = logBuilder->getDocument();
+ // Now, we attach the {<fieldname>: <value>} Element under the {$set: ...} section.
+ Status status = logBuilder->addToSets(logElement);
+ if (status.isOK() && doUnset) {
// Create the {<fieldname>: <value>} Element. Note that we log the mod with a
// dotted field, if it was applied over a dotted field. The rationale is that the
// secondary may be in a different state than the primary and thus make different
// decisions about creating the intermediate path in _fieldRef or not.
- mutablebson::Element logElement = doc.makeElementWithNewFieldName(
- setPath, _preparedState->fromElemFound.getValue());
-
- if (!logElement.ok()) {
- return Status(ErrorCodes::InternalError, "cannot create details for $rename mod");
- }
-
- // Now, we attach the {<fieldname>: <value>} Element under the {$set: ...} section.
- Status status = logBuilder->addToSets(logElement);
-
- if (status.isOK() && doUnset) {
- // Create the {<fieldname>: <value>} Element. Note that we log the mod with a
- // dotted field, if it was applied over a dotted field. The rationale is that the
- // secondary may be in a different state than the primary and thus make different
- // decisions about creating the intermediate path in _fieldRef or not.
- status = logBuilder->addToUnsets(unsetPath);
- }
-
- return status;
+ status = logBuilder->addToUnsets(unsetPath);
}
-} // namespace mongo
+ return status;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_rename.h b/src/mongo/db/ops/modifier_rename.h
index a43523f5320..883443893b3 100644
--- a/src/mongo/db/ops/modifier_rename.h
+++ b/src/mongo/db/ops/modifier_rename.h
@@ -38,61 +38,56 @@
namespace mongo {
- class LogBuilder;
+class LogBuilder;
- /**
- * The $rename modifier moves the field from source to the destination to perform
- * the rename.
- *
- * Example: {$rename: {<source>:<dest>}} where both <source/dest> are field names
- * Start with {a:1} and applying a {$rename: {"a":"b"} } produces {b:1}
- **/
- class ModifierRename : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierRename);
-
- public:
-
- ModifierRename();
- virtual ~ModifierRename();
-
- /**
- * We will check that the to/from are valid paths; in prepare more validation is done
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
-
- /**
- * In prepare we will ensure that all restrictions are met:
- * -- The 'from' field exists, and is valid, else it is a no-op
- * -- The 'to' field is valid as a destination
- * -- The 'to' field is not on the path (or the same path) as the 'from' field
- * -- Neither 'to' nor 'from' have an array ancestor
- */
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
-
- /**
- * We will transform the document by first making sure that the 'to' element
- * is empty before moving the 'from' element there.
- */
- virtual Status apply() const;
-
- /**
- * For the oplog entry we will generate an $unset on the 'from' field, and $set for
- * the 'to' field. If no 'from' element is found then function will return immediately.
- */
- virtual Status log(LogBuilder* logBuilder) const;
+/**
+* The $rename modifier moves the field from source to the destination to perform
+* the rename.
+*
+* Example: {$rename: {<source>:<dest>}} where both <source/dest> are field names
+* Start with {a:1} and applying a {$rename: {"a":"b"} } produces {b:1}
+**/
+class ModifierRename : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierRename);
+
+public:
+ ModifierRename();
+ virtual ~ModifierRename();
- private:
+ /**
+ * We will check that the to/from are valid paths; in prepare more validation is done
+ */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
- // The source and destination fields
- FieldRef _fromFieldRef;
- FieldRef _toFieldRef;
+ /**
+ * In prepare we will ensure that all restrictions are met:
+ * -- The 'from' field exists, and is valid, else it is a no-op
+ * -- The 'to' field is valid as a destination
+ * -- The 'to' field is not on the path (or the same path) as the 'from' field
+ * -- Neither 'to' nor 'from' have an array ancestor
+ */
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
- // The state carried over from prepare for apply/log
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
- };
+ /**
+ * We will transform the document by first making sure that the 'to' element
+ * is empty before moving the 'from' element there.
+ */
+ virtual Status apply() const;
-} // namespace mongo
+ /**
+ * For the oplog entry we will generate an $unset on the 'from' field, and $set for
+ * the 'to' field. If no 'from' element is found then function will return immediately.
+ */
+ virtual Status log(LogBuilder* logBuilder) const;
+
+private:
+ // The source and destination fields
+ FieldRef _fromFieldRef;
+ FieldRef _toFieldRef;
+
+ // The state carried over from prepare for apply/log
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_rename_test.cpp b/src/mongo/db/ops/modifier_rename_test.cpp
index 637926da8e1..4841c4b15a1 100644
--- a/src/mongo/db/ops/modifier_rename_test.cpp
+++ b/src/mongo/db/ops/modifier_rename_test.cpp
@@ -41,355 +41,355 @@
namespace {
- using mongo::BSONObj;
- using mongo::fromjson;
- using mongo::LogBuilder;
- using mongo::ModifierInterface;
- using mongo::NumberInt;
- using mongo::ModifierRename;
- using mongo::Status;
- using mongo::StringData;
- using mongo::mutablebson::ConstElement;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
-
- /** Helper to build and manipulate the mod. */
- class Mod {
- public:
- Mod() : _mod() {}
-
- explicit Mod(BSONObj modObj) {
- _modObj = modObj;
- ASSERT_OK(_mod.init(_modObj["$rename"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
-
- Status apply() const {
- return _mod.apply();
- }
-
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
-
- ModifierRename& mod() { return _mod; }
-
- private:
- ModifierRename _mod;
- BSONObj _modObj;
- };
-
- /**
- * These test negative cases:
- * -- No '$' support for positional operator
- * -- No empty field names (ex. .a, b. )
- * -- Can't rename to an invalid fieldname (empty fieldname part)
- */
- TEST(InvalidInit, FromDbTests) {
- ModifierRename mod;
- ASSERT_NOT_OK(mod.init(fromjson("{'a.$':'b'}").firstElement(),
- ModifierInterface::Options::normal()));
- ASSERT_NOT_OK(mod.init(fromjson("{'a':'b.$'}").firstElement(),
- ModifierInterface::Options::normal()));
- ASSERT_NOT_OK(mod.init(fromjson("{'.b':'a'}").firstElement(),
- ModifierInterface::Options::normal()));
- ASSERT_NOT_OK(mod.init(fromjson("{'b.':'a'}").firstElement(),
- ModifierInterface::Options::normal()));
- ASSERT_NOT_OK(mod.init(fromjson("{'b':'.a'}").firstElement(),
- ModifierInterface::Options::normal()));
- ASSERT_NOT_OK(mod.init(fromjson("{'b':'a.'}").firstElement(),
- ModifierInterface::Options::normal()));
+using mongo::BSONObj;
+using mongo::fromjson;
+using mongo::LogBuilder;
+using mongo::ModifierInterface;
+using mongo::NumberInt;
+using mongo::ModifierRename;
+using mongo::Status;
+using mongo::StringData;
+using mongo::mutablebson::ConstElement;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
+
+/** Helper to build and manipulate the mod. */
+class Mod {
+public:
+ Mod() : _mod() {}
+
+ explicit Mod(BSONObj modObj) {
+ _modObj = modObj;
+ ASSERT_OK(_mod.init(_modObj["$rename"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- TEST(MissingFrom, InitPrepLog) {
- Document doc(fromjson("{a: 2}"));
- Mod setMod(fromjson("{$rename: {'b':'a'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- BSONObj logObj = fromjson("{}");
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(logDoc, logObj);
- }
-
- TEST(MissingFromDotted, InitPrepLog) {
- Document doc(fromjson("{a: {r:2}}"));
- Mod setMod(fromjson("{$rename: {'a.b':'a.c'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_TRUE(execInfo.noOp);
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- BSONObj logObj = fromjson("{}");
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(logDoc, logObj);
- }
-
- TEST(BasicInit, DifferentRoots) {
- Document doc(fromjson("{a: 2}"));
- Mod setMod(fromjson("{$rename: {'a':'f.g'}}"));
- }
-
- TEST(MoveOnSamePath, MoveUp) {
- ModifierRename mod;
- ASSERT_NOT_OK(mod.init(fromjson("{'b.a':'b'}").firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(MoveOnSamePath, MoveDown) {
- ModifierRename mod;
- ASSERT_NOT_OK(mod.init(fromjson("{'b':'b.a'}").firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- TEST(MissingTo, SimpleNumberAtRoot) {
- Document doc(fromjson("{a: 2}"));
- Mod setMod(fromjson("{$rename: {'a':'b'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(doc, fromjson("{b:2}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- BSONObj logObj = fromjson("{$set:{ 'b': 2}, $unset: {'a': true}}");
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(logDoc, logObj);
- }
-
- TEST(SimpleReplace, SameLevel) {
- Document doc(fromjson("{a: 2, b: 1}"));
- Mod setMod(fromjson("{$rename: {'a':'b'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(doc, fromjson("{b:2}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- BSONObj logObj = fromjson("{$set:{ 'b': 2}, $unset: {'a': true}}");
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(logDoc, logObj);
- }
-
- TEST(SimpleReplace, FromDottedElement) {
- Document doc(fromjson("{a: {c: {d: 6}}, b: 1}"));
- Mod setMod(fromjson("{$rename: {'a.c':'b'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.c");
- ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(doc, fromjson("{a: {}, b:{ d: 6}}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- BSONObj logObj = fromjson("{$set:{ 'b': {d: 6}}, $unset: {'a.c': true}}");
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(logDoc, logObj);
- }
-
- TEST(DottedTo, MissingCompleteTo) {
- Document doc(fromjson("{a: 2, b: 1, c: {}}"));
- Mod setMod(fromjson("{$rename: {'a':'c.r.d'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "c.r.d");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(doc, fromjson("{b:1, c: { r: { d: 2}}}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- BSONObj logObj = fromjson("{$set:{ 'c.r.d': 2}, $unset: {'a': true}}");
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(logDoc, logObj);
- }
-
- TEST(DottedTo, ToIsCompletelyMissing) {
- Document doc(fromjson("{a: 2}"));
- Mod setMod(fromjson("{$rename: {'a':'b.c.d'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b.c.d");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(doc, fromjson("{b: {c: {d: 2}}}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- BSONObj logObj = fromjson("{$set:{ 'b.c.d': 2}, $unset: {'a': true}}");
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(logDoc, logObj);
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
}
- TEST(FromArrayOfEmbeddedDocs, ToMissingDottedField) {
- Document doc(fromjson("{a: [ {a:2, b:1} ] }"));
- Mod setMod(fromjson("{$rename: {'a':'b.c.d'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b.c.d");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(doc, fromjson("{b: {c: {d: [ {a:2, b:1} ]}}}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- BSONObj logObj = fromjson("{$set:{ 'b.c.d': [ {a:2, b:1} ]}, $unset: {'a': true}}");
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(logDoc, logObj);
+ Status apply() const {
+ return _mod.apply();
}
- TEST(FromArrayOfEmbeddedDocs, ToArray) {
- Document doc(fromjson("{a: [ {a:2, b:1} ] }"));
- Mod setMod(fromjson("{$rename: {'a.a':'a.b'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
}
- TEST(Arrays, MoveInto) {
- Document doc(fromjson("{a: [1, 2], b:2}"));
- Mod setMod(fromjson("{$rename: {'b':'a.2'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ModifierRename& mod() {
+ return _mod;
}
- TEST(Arrays, MoveOut) {
- Document doc(fromjson("{a: [1, 2]}"));
- Mod setMod(fromjson("{$rename: {'a.0':'b'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
- }
-
- TEST(Arrays, MoveNonexistantEmbeddedFieldOut) {
- Document doc(fromjson("{a: [{a:1}, {b:2}]}"));
- Mod setMod(fromjson("{$rename: {'a.a':'b'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
- }
-
- TEST(Arrays, MoveEmbeddedFieldOutWithElementNumber) {
- Document doc(fromjson("{a: [{a:1}, {b:2}]}"));
- Mod setMod(fromjson("{$rename: {'a.0.a':'b'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
- }
+private:
+ ModifierRename _mod;
+ BSONObj _modObj;
+};
- TEST(Arrays, ReplaceArrayField) {
- Document doc(fromjson("{a: 2, b: []}"));
- Mod setMod(fromjson("{$rename: {'a':'b'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(doc, fromjson("{b:2}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- BSONObj logObj = fromjson("{$set:{ 'b': 2}, $unset: {'a': true}}");
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(logDoc, logObj);
- }
-
-
- TEST(Arrays, ReplaceWithArrayField) {
- Document doc(fromjson("{a: [], b: 2}"));
- Mod setMod(fromjson("{$rename: {'a':'b'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(doc, fromjson("{b:[]}"));
-
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- BSONObj logObj = fromjson("{$set:{ 'b': []}, $unset: {'a': true}}");
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(logDoc, logObj);
- }
-
- TEST(LegacyData, CanRenameFromInvalidFieldName) {
- Document doc(fromjson("{$a: 2}"));
- Mod setMod(fromjson("{$rename: {'$a':'a'}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "$a");
- ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+/**
+ * These test negative cases:
+ * -- No '$' support for positional operator
+ * -- No empty field names (ex. .a, b. )
+ * -- Can't rename to an invalid fieldname (empty fieldname part)
+ */
+TEST(InvalidInit, FromDbTests) {
+ ModifierRename mod;
+ ASSERT_NOT_OK(
+ mod.init(fromjson("{'a.$':'b'}").firstElement(), ModifierInterface::Options::normal()));
+ ASSERT_NOT_OK(
+ mod.init(fromjson("{'a':'b.$'}").firstElement(), ModifierInterface::Options::normal()));
+ ASSERT_NOT_OK(
+ mod.init(fromjson("{'.b':'a'}").firstElement(), ModifierInterface::Options::normal()));
+ ASSERT_NOT_OK(
+ mod.init(fromjson("{'b.':'a'}").firstElement(), ModifierInterface::Options::normal()));
+ ASSERT_NOT_OK(
+ mod.init(fromjson("{'b':'.a'}").firstElement(), ModifierInterface::Options::normal()));
+ ASSERT_NOT_OK(
+ mod.init(fromjson("{'b':'a.'}").firstElement(), ModifierInterface::Options::normal()));
+}
+
+TEST(MissingFrom, InitPrepLog) {
+ Document doc(fromjson("{a: 2}"));
+ Mod setMod(fromjson("{$rename: {'b':'a'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+}
+
+TEST(MissingFromDotted, InitPrepLog) {
+ Document doc(fromjson("{a: {r:2}}"));
+ Mod setMod(fromjson("{$rename: {'a.b':'a.c'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_TRUE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+}
+
+TEST(BasicInit, DifferentRoots) {
+ Document doc(fromjson("{a: 2}"));
+ Mod setMod(fromjson("{$rename: {'a':'f.g'}}"));
+}
+
+TEST(MoveOnSamePath, MoveUp) {
+ ModifierRename mod;
+ ASSERT_NOT_OK(
+ mod.init(fromjson("{'b.a':'b'}").firstElement(), ModifierInterface::Options::normal()));
+}
+
+TEST(MoveOnSamePath, MoveDown) {
+ ModifierRename mod;
+ ASSERT_NOT_OK(
+ mod.init(fromjson("{'b':'b.a'}").firstElement(), ModifierInterface::Options::normal()));
+}
+
+TEST(MissingTo, SimpleNumberAtRoot) {
+ Document doc(fromjson("{a: 2}"));
+ Mod setMod(fromjson("{$rename: {'a':'b'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{b:2}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{$set:{ 'b': 2}, $unset: {'a': true}}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+}
+
+TEST(SimpleReplace, SameLevel) {
+ Document doc(fromjson("{a: 2, b: 1}"));
+ Mod setMod(fromjson("{$rename: {'a':'b'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{b:2}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{$set:{ 'b': 2}, $unset: {'a': true}}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+}
+
+TEST(SimpleReplace, FromDottedElement) {
+ Document doc(fromjson("{a: {c: {d: 6}}, b: 1}"));
+ Mod setMod(fromjson("{$rename: {'a.c':'b'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.c");
+ ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{a: {}, b:{ d: 6}}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{$set:{ 'b': {d: 6}}, $unset: {'a.c': true}}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+}
+
+TEST(DottedTo, MissingCompleteTo) {
+ Document doc(fromjson("{a: 2, b: 1, c: {}}"));
+ Mod setMod(fromjson("{$rename: {'a':'c.r.d'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "c.r.d");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{b:1, c: { r: { d: 2}}}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{$set:{ 'c.r.d': 2}, $unset: {'a': true}}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+}
+
+TEST(DottedTo, ToIsCompletelyMissing) {
+ Document doc(fromjson("{a: 2}"));
+ Mod setMod(fromjson("{$rename: {'a':'b.c.d'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b.c.d");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{b: {c: {d: 2}}}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{$set:{ 'b.c.d': 2}, $unset: {'a': true}}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+}
+
+TEST(FromArrayOfEmbeddedDocs, ToMissingDottedField) {
+ Document doc(fromjson("{a: [ {a:2, b:1} ] }"));
+ Mod setMod(fromjson("{$rename: {'a':'b.c.d'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b.c.d");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{b: {c: {d: [ {a:2, b:1} ]}}}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{$set:{ 'b.c.d': [ {a:2, b:1} ]}, $unset: {'a': true}}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+}
+
+TEST(FromArrayOfEmbeddedDocs, ToArray) {
+ Document doc(fromjson("{a: [ {a:2, b:1} ] }"));
+ Mod setMod(fromjson("{$rename: {'a.a':'a.b'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(Arrays, MoveInto) {
+ Document doc(fromjson("{a: [1, 2], b:2}"));
+ Mod setMod(fromjson("{$rename: {'b':'a.2'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(Arrays, MoveOut) {
+ Document doc(fromjson("{a: [1, 2]}"));
+ Mod setMod(fromjson("{$rename: {'a.0':'b'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(Arrays, MoveNonexistantEmbeddedFieldOut) {
+ Document doc(fromjson("{a: [{a:1}, {b:2}]}"));
+ Mod setMod(fromjson("{$rename: {'a.a':'b'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(Arrays, MoveEmbeddedFieldOutWithElementNumber) {
+ Document doc(fromjson("{a: [{a:1}, {b:2}]}"));
+ Mod setMod(fromjson("{$rename: {'a.0.a':'b'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(Arrays, ReplaceArrayField) {
+ Document doc(fromjson("{a: 2, b: []}"));
+ Mod setMod(fromjson("{$rename: {'a':'b'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{b:2}"));
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{$set:{ 'b': 2}, $unset: {'a': true}}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+}
+
+
+TEST(Arrays, ReplaceWithArrayField) {
+ Document doc(fromjson("{a: [], b: 2}"));
+ Mod setMod(fromjson("{$rename: {'a':'b'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{b:[]}"));
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(doc, fromjson("{a:2}"));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{$set:{ 'b': []}, $unset: {'a': true}}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+}
+
+TEST(LegacyData, CanRenameFromInvalidFieldName) {
+ Document doc(fromjson("{$a: 2}"));
+ Mod setMod(fromjson("{$rename: {'$a':'a'}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "$a");
+ ASSERT_EQUALS(execInfo.fieldRef[1]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(doc, fromjson("{a:2}"));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- BSONObj logObj = fromjson("{$set:{ 'a': 2}, $unset: {'$a': true}}");
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(logDoc, logObj);
- }
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ BSONObj logObj = fromjson("{$set:{ 'a': 2}, $unset: {'$a': true}}");
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(logDoc, logObj);
+}
-} // namespace
+} // namespace
diff --git a/src/mongo/db/ops/modifier_set.cpp b/src/mongo/db/ops/modifier_set.cpp
index fe808138c68..93abc04f892 100644
--- a/src/mongo/db/ops/modifier_set.cpp
+++ b/src/mongo/db/ops/modifier_set.cpp
@@ -37,253 +37,224 @@
namespace mongo {
- namespace str = mongoutils::str;
+namespace str = mongoutils::str;
- struct ModifierSet::PreparedState {
+struct ModifierSet::PreparedState {
+ PreparedState(mutablebson::Document* targetDoc)
+ : doc(*targetDoc), idxFound(0), elemFound(doc.end()), noOp(false), elemIsBlocking(false) {}
- PreparedState(mutablebson::Document* targetDoc)
- : doc(*targetDoc)
- , idxFound(0)
- , elemFound(doc.end())
- , noOp(false)
- , elemIsBlocking(false) {
- }
+ // Document that is going to be changed.
+ mutablebson::Document& doc;
- // Document that is going to be changed.
- mutablebson::Document& doc;
+ // Index in _fieldRef for which an Element exist in the document.
+ size_t idxFound;
- // Index in _fieldRef for which an Element exist in the document.
- size_t idxFound;
+ // Element corresponding to _fieldRef[0.._idxFound].
+ mutablebson::Element elemFound;
- // Element corresponding to _fieldRef[0.._idxFound].
- mutablebson::Element elemFound;
+ // This $set is a no-op?
+ bool noOp;
- // This $set is a no-op?
- bool noOp;
+ // The element we find during a replication operation that blocks our update path
+ bool elemIsBlocking;
+};
- // The element we find during a replication operation that blocks our update path
- bool elemIsBlocking;
+ModifierSet::ModifierSet(ModifierSet::ModifierSetMode mode)
+ : _fieldRef(), _posDollar(0), _setMode(mode), _val(), _modOptions() {}
- };
+ModifierSet::~ModifierSet() {}
- ModifierSet::ModifierSet(ModifierSet::ModifierSetMode mode)
- : _fieldRef()
- , _posDollar(0)
- , _setMode(mode)
- , _val()
- , _modOptions() {
- }
+Status ModifierSet::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
+ //
+ // field name analysis
+ //
- ModifierSet::~ModifierSet() {
+ // Break down the field name into its 'dotted' components (aka parts) and check that
+ // the field is fit for updates
+ _fieldRef.parse(modExpr.fieldName());
+ Status status = fieldchecker::isUpdatable(_fieldRef);
+ if (!status.isOK()) {
+ return status;
}
- Status ModifierSet::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
-
- //
- // field name analysis
- //
-
- // Break down the field name into its 'dotted' components (aka parts) and check that
- // the field is fit for updates
- _fieldRef.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatable(_fieldRef);
- if (! status.isOK()) {
- return status;
- }
-
- // If a $-positional operator was used, get the index in which it occurred
- // and ensure only one occurrence.
- size_t foundCount;
- bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
-
- if (positional)
- *positional = foundDollar;
+ // If a $-positional operator was used, get the index in which it occurred
+ // and ensure only one occurrence.
+ size_t foundCount;
+ bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
- if (foundDollar && foundCount > 1) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
- }
+ if (positional)
+ *positional = foundDollar;
- //
- // value analysis
- //
-
- if (!modExpr.ok())
- return Status(ErrorCodes::BadValue, "cannot $set an empty value");
-
- _val = modExpr;
- _modOptions = opts;
-
- return Status::OK();
+ if (foundDollar && foundCount > 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Too many positional (i.e. '$') elements found in path '"
+ << _fieldRef.dottedField() << "'");
}
- Status ModifierSet::prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo) {
-
- _preparedState.reset(new PreparedState(&root.getDocument()));
-
- // If we have a $-positional field, it is time to bind it to an actual field part.
- if (_posDollar) {
- if (matchedField.empty()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The positional operator did not find the match "
- "needed from the query. Unexpanded update: "
- << _fieldRef.dottedField());
- }
- _fieldRef.setPart(_posDollar, matchedField);
- }
+ //
+ // value analysis
+ //
- // Locate the field name in 'root'. Note that we may not have all the parts in the path
- // in the doc -- which is fine. Our goal now is merely to reason about whether this mod
- // apply is a noOp or whether is can be in place. The remainin path, if missing, will
- // be created during the apply.
- Status status = pathsupport::findLongestPrefix(_fieldRef,
- root,
- &_preparedState->idxFound,
- &_preparedState->elemFound);
-
- // FindLongestPrefix may say the path does not exist at all, which is fine here, or
- // that the path was not viable or otherwise wrong, in which case, the mod cannot
- // proceed.
- if (status.code() == ErrorCodes::NonExistentPath) {
- _preparedState->elemFound = root.getDocument().end();
- }
- else if (_modOptions.fromReplication && status.code() == ErrorCodes::PathNotViable) {
- // If we are coming from replication and it is an invalid path,
- // then push on indicating that we had a blocking element, which we stopped at
- _preparedState->elemIsBlocking = true;
- }
- else if (!status.isOK()) {
- return status;
- }
+ if (!modExpr.ok())
+ return Status(ErrorCodes::BadValue, "cannot $set an empty value");
- if (_setMode == SET_ON_INSERT) {
- execInfo->context = ModifierInterface::ExecInfo::INSERT_CONTEXT;
- }
+ _val = modExpr;
+ _modOptions = opts;
- // We register interest in the field name. The driver needs this info to sort out if
- // there is any conflict among mods.
- execInfo->fieldRef[0] = &_fieldRef;
+ return Status::OK();
+}
- //
- // in-place and no-op logic
- //
+Status ModifierSet::prepare(mutablebson::Element root,
+ StringData matchedField,
+ ExecInfo* execInfo) {
+ _preparedState.reset(new PreparedState(&root.getDocument()));
- // If the field path is not fully present, then this mod cannot be in place, nor is a
- // noOp.
- if (!_preparedState->elemFound.ok() ||
- _preparedState->idxFound < (_fieldRef.numParts()-1)) {
- return Status::OK();
+ // If we have a $-positional field, it is time to bind it to an actual field part.
+ if (_posDollar) {
+ if (matchedField.empty()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The positional operator did not find the match "
+ "needed from the query. Unexpanded update: "
+ << _fieldRef.dottedField());
}
+ _fieldRef.setPart(_posDollar, matchedField);
+ }
- // If the value being $set is the same as the one already in the doc, than this is a
- // noOp.
- if (_preparedState->elemFound.ok() &&
- _preparedState->idxFound == (_fieldRef.numParts()-1) &&
- _preparedState->elemFound.compareWithBSONElement(_val, false /*ignore field*/) == 0) {
- execInfo->noOp = _preparedState->noOp = true;
- }
+ // Locate the field name in 'root'. Note that we may not have all the parts in the path
+ // in the doc -- which is fine. Our goal now is merely to reason about whether this mod
+ // apply is a noOp or whether is can be in place. The remainin path, if missing, will
+ // be created during the apply.
+ Status status = pathsupport::findLongestPrefix(
+ _fieldRef, root, &_preparedState->idxFound, &_preparedState->elemFound);
+
+ // FindLongestPrefix may say the path does not exist at all, which is fine here, or
+ // that the path was not viable or otherwise wrong, in which case, the mod cannot
+ // proceed.
+ if (status.code() == ErrorCodes::NonExistentPath) {
+ _preparedState->elemFound = root.getDocument().end();
+ } else if (_modOptions.fromReplication && status.code() == ErrorCodes::PathNotViable) {
+ // If we are coming from replication and it is an invalid path,
+ // then push on indicating that we had a blocking element, which we stopped at
+ _preparedState->elemIsBlocking = true;
+ } else if (!status.isOK()) {
+ return status;
+ }
- return Status::OK();
+ if (_setMode == SET_ON_INSERT) {
+ execInfo->context = ModifierInterface::ExecInfo::INSERT_CONTEXT;
}
- Status ModifierSet::apply() const {
- dassert(!_preparedState->noOp);
+ // We register interest in the field name. The driver needs this info to sort out if
+ // there is any conflict among mods.
+ execInfo->fieldRef[0] = &_fieldRef;
- const bool destExists = _preparedState->elemFound.ok() &&
- _preparedState->idxFound == (_fieldRef.numParts()-1);
- // If there's no need to create any further field part, the $set is simply a value
- // assignment.
- if (destExists) {
- return _preparedState->elemFound.setValueBSONElement(_val);
- }
+ //
+ // in-place and no-op logic
+ //
- //
- // Complete document path logic
- //
+ // If the field path is not fully present, then this mod cannot be in place, nor is a
+ // noOp.
+ if (!_preparedState->elemFound.ok() || _preparedState->idxFound < (_fieldRef.numParts() - 1)) {
+ return Status::OK();
+ }
- // Creates the final element that's going to be $set in 'doc'.
- mutablebson::Document& doc = _preparedState->doc;
- StringData lastPart = _fieldRef.getPart(_fieldRef.numParts()-1);
- mutablebson::Element elemToSet = doc.makeElementWithNewFieldName(lastPart, _val);
- if (!elemToSet.ok()) {
- return Status(ErrorCodes::InternalError, "can't create new element");
- }
+ // If the value being $set is the same as the one already in the doc, than this is a
+ // noOp.
+ if (_preparedState->elemFound.ok() && _preparedState->idxFound == (_fieldRef.numParts() - 1) &&
+ _preparedState->elemFound.compareWithBSONElement(_val, false /*ignore field*/) == 0) {
+ execInfo->noOp = _preparedState->noOp = true;
+ }
- // Now, we can be in two cases here, as far as attaching the element being set goes:
- // (a) none of the parts in the element's path exist, or (b) some parts of the path
- // exist but not all.
- if (!_preparedState->elemFound.ok()) {
- _preparedState->elemFound = doc.root();
- _preparedState->idxFound = 0;
- }
- else {
- _preparedState->idxFound++;
- }
+ return Status::OK();
+}
- // Remove the blocking element, if we are from replication applier. See comment below.
- if (_modOptions.fromReplication && !destExists && _preparedState->elemFound.ok() &&
- _preparedState->elemIsBlocking &&
- (!(_preparedState->elemFound.isType(Array)) ||
- !(_preparedState->elemFound.isType(Object)))
- ) {
-
- /**
- * With replication we want to be able to remove blocking elements for $set (only).
- * The reason they are blocking elements is that they are not embedded documents
- * (objects) nor an array (a special type of an embedded doc) and we cannot
- * add children to them (because the $set path requires adding children past
- * the blocking element).
- *
- * Imagine that we started with this:
- * {_id:1, a:1} + {$set : {"a.b.c" : 1}} -> {_id:1, a: {b: {c:1}}}
- * Above we found that element (a:1) is blocking at position 1. We now will replace
- * it with an empty object so the normal logic below can be
- * applied from the root (in this example case).
- *
- * Here is an array example:
- * {_id:1, a:[1, 2]} + {$set : {"a.0.c" : 1}} -> {_id:1, a: [ {c:1}, 2]}
- * The blocking element is "a.0" since it is a number, non-object, and we must
- * then replace it with an empty object so we can add c:1 to that empty object
- */
-
- mutablebson::Element blockingElem = _preparedState->elemFound;
- BSONObj newObj;
- // Replace blocking non-object with an empty object
- Status status = blockingElem.setValueObject(newObj);
- if (!status.isOK()) {
- return status;
- }
- }
+Status ModifierSet::apply() const {
+ dassert(!_preparedState->noOp);
- // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
- return pathsupport::createPathAt(_fieldRef,
- _preparedState->idxFound,
- _preparedState->elemFound,
- elemToSet);
+ const bool destExists =
+ _preparedState->elemFound.ok() && _preparedState->idxFound == (_fieldRef.numParts() - 1);
+ // If there's no need to create any further field part, the $set is simply a value
+ // assignment.
+ if (destExists) {
+ return _preparedState->elemFound.setValueBSONElement(_val);
}
- Status ModifierSet::log(LogBuilder* logBuilder) const {
+ //
+ // Complete document path logic
+ //
- // We'd like to create an entry such as {$set: {<fieldname>: <value>}} under 'logRoot'.
- // We start by creating the {$set: ...} Element.
- mutablebson::Document& doc = logBuilder->getDocument();
+ // Creates the final element that's going to be $set in 'doc'.
+ mutablebson::Document& doc = _preparedState->doc;
+ StringData lastPart = _fieldRef.getPart(_fieldRef.numParts() - 1);
+ mutablebson::Element elemToSet = doc.makeElementWithNewFieldName(lastPart, _val);
+ if (!elemToSet.ok()) {
+ return Status(ErrorCodes::InternalError, "can't create new element");
+ }
- // Create the {<fieldname>: <value>} Element. Note that we log the mod with a
- // dotted field, if it was applied over a dotted field. The rationale is that the
- // secondary may be in a different state than the primary and thus make different
- // decisions about creating the intermediate path in _fieldRef or not.
- mutablebson::Element logElement = doc.makeElementWithNewFieldName(
- _fieldRef.dottedField(), _val);
+ // Now, we can be in two cases here, as far as attaching the element being set goes:
+ // (a) none of the parts in the element's path exist, or (b) some parts of the path
+ // exist but not all.
+ if (!_preparedState->elemFound.ok()) {
+ _preparedState->elemFound = doc.root();
+ _preparedState->idxFound = 0;
+ } else {
+ _preparedState->idxFound++;
+ }
- if (!logElement.ok()) {
- return Status(ErrorCodes::InternalError, "cannot create details for $set mod");
+ // Remove the blocking element, if we are from replication applier. See comment below.
+ if (_modOptions.fromReplication && !destExists && _preparedState->elemFound.ok() &&
+ _preparedState->elemIsBlocking && (!(_preparedState->elemFound.isType(Array)) ||
+ !(_preparedState->elemFound.isType(Object)))) {
+ /**
+ * With replication we want to be able to remove blocking elements for $set (only).
+ * The reason they are blocking elements is that they are not embedded documents
+ * (objects) nor an array (a special type of an embedded doc) and we cannot
+ * add children to them (because the $set path requires adding children past
+ * the blocking element).
+ *
+ * Imagine that we started with this:
+ * {_id:1, a:1} + {$set : {"a.b.c" : 1}} -> {_id:1, a: {b: {c:1}}}
+ * Above we found that element (a:1) is blocking at position 1. We now will replace
+ * it with an empty object so the normal logic below can be
+ * applied from the root (in this example case).
+ *
+ * Here is an array example:
+ * {_id:1, a:[1, 2]} + {$set : {"a.0.c" : 1}} -> {_id:1, a: [ {c:1}, 2]}
+ * The blocking element is "a.0" since it is a number, non-object, and we must
+ * then replace it with an empty object so we can add c:1 to that empty object
+ */
+
+ mutablebson::Element blockingElem = _preparedState->elemFound;
+ BSONObj newObj;
+ // Replace blocking non-object with an empty object
+ Status status = blockingElem.setValueObject(newObj);
+ if (!status.isOK()) {
+ return status;
}
+ }
- return logBuilder->addToSets(logElement);
+ // createPathAt() will complete the path and attach 'elemToSet' at the end of it.
+ return pathsupport::createPathAt(
+ _fieldRef, _preparedState->idxFound, _preparedState->elemFound, elemToSet);
+}
+
+Status ModifierSet::log(LogBuilder* logBuilder) const {
+ // We'd like to create an entry such as {$set: {<fieldname>: <value>}} under 'logRoot'.
+ // We start by creating the {$set: ...} Element.
+ mutablebson::Document& doc = logBuilder->getDocument();
+
+ // Create the {<fieldname>: <value>} Element. Note that we log the mod with a
+ // dotted field, if it was applied over a dotted field. The rationale is that the
+ // secondary may be in a different state than the primary and thus make different
+ // decisions about creating the intermediate path in _fieldRef or not.
+ mutablebson::Element logElement =
+ doc.makeElementWithNewFieldName(_fieldRef.dottedField(), _val);
+
+ if (!logElement.ok()) {
+ return Status(ErrorCodes::InternalError, "cannot create details for $set mod");
}
-} // namespace mongo
+ return logBuilder->addToSets(logElement);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_set.h b/src/mongo/db/ops/modifier_set.h
index cc961163234..84d6f0ba8cf 100644
--- a/src/mongo/db/ops/modifier_set.h
+++ b/src/mongo/db/ops/modifier_set.h
@@ -38,77 +38,71 @@
namespace mongo {
- class LogBuilder;
-
- class ModifierSet : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierSet);
-
- public:
-
- enum ModifierSetMode { SET_NORMAL, SET_ON_INSERT };
- explicit ModifierSet(ModifierSetMode mode = SET_NORMAL);
-
- //
- // Modifier interface implementation
- //
-
- virtual ~ModifierSet();
-
- /**
- * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $set mod such as
- * {$set: {<fieldname: <value>}}. init() extracts the field name and the value to be
- * assigned to it from 'modExpr'. It returns OK if successful or a status describing
- * the error.
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
-
- /**
- * Looks up the field name in the sub-tree rooted at 'root', and binds, if necessary,
- * the '$' field part using the 'matchedfield' number. prepare() returns OK and
- * fills in 'execInfo' with information of whether this mod is a no-op on 'root' and
- * whether it is an in-place candidate. Otherwise, returns a status describing the
- * error.
- */
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
-
- /**
- * Applies the prepared mod over the element 'root' specified in the prepare()
- * call. Returns OK if successful or a status describing the error.
- */
- virtual Status apply() const;
-
- /**
- * Adds a log entry to logRoot corresponding to the operation applied here. Returns OK
- * if successful or a status describing the error.
- */
- virtual Status log(LogBuilder* logBuilder) const;
-
- private:
-
- // Access to each component of fieldName that's the target of this mod.
- FieldRef _fieldRef;
-
- // 0 or index for $-positional in _fieldRef.
- size_t _posDollar;
-
- // If on 'on insert' mode, We'd like to apply this mod only if we're in a upsert.
- const ModifierSetMode _setMode;
-
- // Element of the $set expression.
- BSONElement _val;
-
- // See the class comments in modifier_interface.h
- ModifierInterface::Options _modOptions;
-
- // The instance of the field in the provided doc. This state is valid after a
- // prepare() was issued and until a log() is issued. The document this mod is
- // being prepared against must be live throughout all the calls.
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
-
- };
-
-} // namespace mongo
+class LogBuilder;
+
+class ModifierSet : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierSet);
+
+public:
+ enum ModifierSetMode { SET_NORMAL, SET_ON_INSERT };
+ explicit ModifierSet(ModifierSetMode mode = SET_NORMAL);
+
+ //
+ // Modifier interface implementation
+ //
+
+ virtual ~ModifierSet();
+
+ /**
+ * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $set mod such as
+ * {$set: {<fieldname: <value>}}. init() extracts the field name and the value to be
+ * assigned to it from 'modExpr'. It returns OK if successful or a status describing
+ * the error.
+ */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
+
+ /**
+ * Looks up the field name in the sub-tree rooted at 'root', and binds, if necessary,
+ * the '$' field part using the 'matchedfield' number. prepare() returns OK and
+ * fills in 'execInfo' with information of whether this mod is a no-op on 'root' and
+ * whether it is an in-place candidate. Otherwise, returns a status describing the
+ * error.
+ */
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
+
+ /**
+ * Applies the prepared mod over the element 'root' specified in the prepare()
+ * call. Returns OK if successful or a status describing the error.
+ */
+ virtual Status apply() const;
+
+ /**
+ * Adds a log entry to logRoot corresponding to the operation applied here. Returns OK
+ * if successful or a status describing the error.
+ */
+ virtual Status log(LogBuilder* logBuilder) const;
+
+private:
+ // Access to each component of fieldName that's the target of this mod.
+ FieldRef _fieldRef;
+
+ // 0 or index for $-positional in _fieldRef.
+ size_t _posDollar;
+
+ // If on 'on insert' mode, We'd like to apply this mod only if we're in a upsert.
+ const ModifierSetMode _setMode;
+
+ // Element of the $set expression.
+ BSONElement _val;
+
+ // See the class comments in modifier_interface.h
+ ModifierInterface::Options _modOptions;
+
+ // The instance of the field in the provided doc. This state is valid after a
+ // prepare() was issued and until a log() is issued. The document this mod is
+ // being prepared against must be live throughout all the calls.
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_set_test.cpp b/src/mongo/db/ops/modifier_set_test.cpp
index 1a7e1db5a68..315da4e113d 100644
--- a/src/mongo/db/ops/modifier_set_test.cpp
+++ b/src/mongo/db/ops/modifier_set_test.cpp
@@ -42,715 +42,715 @@
namespace {
- using mongo::BSONObj;
- using mongo::fromjson;
- using mongo::LogBuilder;
- using mongo::ModifierInterface;
- using mongo::NumberInt;
- using mongo::ModifierSet;
- using mongo::Status;
- using mongo::StringData;
- using mongo::mutablebson::ConstElement;
- using mongo::mutablebson::countChildren;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
-
- /** Helper to build and manipulate a $set mod. */
- class Mod {
- public:
- Mod() : _mod() {}
-
- explicit Mod(BSONObj modObj, bool fromRepl = false) :
- _mod(mongoutils::str::equals(modObj.firstElement().fieldName(), "$setOnInsert") ?
- ModifierSet::SET_ON_INSERT : ModifierSet::SET_NORMAL) {
-
- _modObj = modObj;
- StringData modName = modObj.firstElement().fieldName();
- ASSERT_OK(_mod.init(_modObj[modName].embeddedObject().firstElement(),
- !fromRepl ? ModifierInterface::Options::normal():
- ModifierInterface::Options::fromRepl()));
- }
-
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
-
- Status apply() const {
- return _mod.apply();
- }
-
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
-
- ModifierSet& mod() { return _mod; }
-
- private:
- ModifierSet _mod;
- BSONObj _modObj;
- };
-
- //
- // Init tests
- //
-
- TEST(Init, EmptyOperation) {
- BSONObj modObj = fromjson("{$set: {}}");
- ModifierSet mod;
- ASSERT_NOT_OK(mod.init(modObj["$set"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal() ));
- }
-
- //
- // Simple Mods
- //
+using mongo::BSONObj;
+using mongo::fromjson;
+using mongo::LogBuilder;
+using mongo::ModifierInterface;
+using mongo::NumberInt;
+using mongo::ModifierSet;
+using mongo::Status;
+using mongo::StringData;
+using mongo::mutablebson::ConstElement;
+using mongo::mutablebson::countChildren;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
- TEST(SimpleMod, PrepareNoOp) {
- Document doc(fromjson("{a: 2}"));
- Mod setMod(fromjson("{$set: {a: 2}}"));
+/** Helper to build and manipulate a $set mod. */
+class Mod {
+public:
+ Mod() : _mod() {}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_TRUE(execInfo.noOp);
+ explicit Mod(BSONObj modObj, bool fromRepl = false)
+ : _mod(mongoutils::str::equals(modObj.firstElement().fieldName(), "$setOnInsert")
+ ? ModifierSet::SET_ON_INSERT
+ : ModifierSet::SET_NORMAL) {
+ _modObj = modObj;
+ StringData modName = modObj.firstElement().fieldName();
+ ASSERT_OK(_mod.init(_modObj[modName].embeddedObject().firstElement(),
+ !fromRepl ? ModifierInterface::Options::normal()
+ : ModifierInterface::Options::fromRepl()));
}
- TEST(SimpleMod, PrepareSetOnInsert) {
- Document doc(fromjson("{a: 1}"));
- Mod setMod(fromjson("{$setOnInsert: {a: 2}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS(execInfo.context, ModifierInterface::ExecInfo::INSERT_CONTEXT);
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
}
- TEST(SimpleMod, PrepareApplyEmptyDocument) {
- Document doc(fromjson("{}"));
- Mod setMod(fromjson("{$set: {a: 2}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: 2}"), doc);
+ Status apply() const {
+ return _mod.apply();
}
- TEST(SimpleMod, PrepareApplyInPlace) {
- Document doc(fromjson("{a: 1}"));
- Mod setMod(fromjson("{$set: {a: 2}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: 2}"), doc);
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
}
- TEST(SimpleMod, PrepareApplyOverridePath) {
- Document doc(fromjson("{a: {b: 1}}"));
- Mod setMod(fromjson("{$set: {a: 2}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: 2}"), doc);
+ ModifierSet& mod() {
+ return _mod;
}
- TEST(SimpleMod, PrepareApplyChangeType) {
- Document doc(fromjson("{a: 'str'}"));
- Mod setMod(fromjson("{$set: {a: 2}}"));
+private:
+ ModifierSet _mod;
+ BSONObj _modObj;
+};
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+//
+// Init tests
+//
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(Init, EmptyOperation) {
+ BSONObj modObj = fromjson("{$set: {}}");
+ ModifierSet mod;
+ ASSERT_NOT_OK(mod.init(modObj["$set"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
+}
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: 2}"), doc);
- }
+//
+// Simple Mods
+//
- TEST(SimpleMod, PrepareApplyNewPath) {
- Document doc(fromjson("{b: 1}"));
- Mod setMod(fromjson("{$set: {a: 2}}"));
+TEST(SimpleMod, PrepareNoOp) {
+ Document doc(fromjson("{a: 2}"));
+ Mod setMod(fromjson("{$set: {a: 2}}"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_TRUE(execInfo.noOp);
+}
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{b: 1, a: 2}"), doc);
- }
+TEST(SimpleMod, PrepareSetOnInsert) {
+ Document doc(fromjson("{a: 1}"));
+ Mod setMod(fromjson("{$setOnInsert: {a: 2}}"));
- TEST(SimpleMod, LogNormal) {
- BSONObj obj = fromjson("{a: 1}");
- Mod setMod(fromjson("{$set: {a: 2}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- Document doc(obj);
- ModifierInterface::ExecInfo dummy;
- ASSERT_OK(setMod.prepare(doc.root(), "", &dummy));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS(execInfo.context, ModifierInterface::ExecInfo::INSERT_CONTEXT);
+}
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {a: 2}}"), logDoc);
- }
-
- //
- // Simple dotted mod
- //
-
- TEST(DottedMod, PrepareNoOp) {
- Document doc(fromjson("{a: {b: 2}}"));
- Mod setMod(fromjson("{$set: {'a.b': 2}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(SimpleMod, PrepareApplyEmptyDocument) {
+ Document doc(fromjson("{}"));
+ Mod setMod(fromjson("{$set: {a: 2}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_TRUE(execInfo.noOp);
- }
-
- TEST(DottedMod, PreparePathNotViable) {
- Document doc(fromjson("{a:1}"));
- Mod setMod(fromjson("{$set: {'a.b': 2}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(DottedMod, PreparePathNotViableArrray) {
- Document doc(fromjson("{a:[{b:1}]}"));
- Mod setMod(fromjson("{$set: {'a.b': 2}}"));
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: 2}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
- }
+TEST(SimpleMod, PrepareApplyInPlace) {
+ Document doc(fromjson("{a: 1}"));
+ Mod setMod(fromjson("{$set: {a: 2}}"));
- TEST(DottedMod, PrepareApplyInPlace) {
- Document doc(fromjson("{a: {b: 1}}"));
- Mod setMod(fromjson("{$set: {'a.b': 2}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc);
- }
-
- TEST(DottedMod, PrepareApplyChangeType) {
- Document doc(fromjson("{a: {b: 'str'}}"));
- Mod setMod(fromjson("{$set: {'a.b': 2}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
-
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc);
- }
+ ASSERT_OK(setMod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: 2}"), doc);
+}
- TEST(DottedMod, PrepareApplyChangePath) {
- Document doc(fromjson("{a: {b: {c: 1}}}"));
- Mod setMod(fromjson("{$set: {'a.b': 2}}"));
+TEST(SimpleMod, PrepareApplyOverridePath) {
+ Document doc(fromjson("{a: {b: 1}}"));
+ Mod setMod(fromjson("{$set: {a: 2}}"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc);
- }
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: 2}"), doc);
+}
- TEST(DottedMod, PrepareApplyExtendPath) {
- Document doc(fromjson("{a: {c: 1}}"));
- Mod setMod(fromjson("{$set: {'a.b': 2}}"));
+TEST(SimpleMod, PrepareApplyChangeType) {
+ Document doc(fromjson("{a: 'str'}"));
+ Mod setMod(fromjson("{$set: {a: 2}}"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: {c: 1, b: 2}}"), doc);
- }
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: 2}"), doc);
+}
- TEST(DottedMod, PrepareApplyNewPath) {
- Document doc(fromjson("{c: 1}"));
- Mod setMod(fromjson("{$set: {'a.b': 2}}"));
+TEST(SimpleMod, PrepareApplyNewPath) {
+ Document doc(fromjson("{b: 1}"));
+ Mod setMod(fromjson("{$set: {a: 2}}"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{c: 1, a: {b: 2}}"), doc);
- }
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{b: 1, a: 2}"), doc);
+}
- TEST(DottedMod, PrepareApplyEmptyDoc) {
- Document doc(fromjson("{}"));
- Mod setMod(fromjson("{$set: {'a.b': 2}}"));
+TEST(SimpleMod, LogNormal) {
+ BSONObj obj = fromjson("{a: 1}");
+ Mod setMod(fromjson("{$set: {a: 2}}"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ Document doc(obj);
+ ModifierInterface::ExecInfo dummy;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &dummy));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {a: 2}}"), logDoc);
+}
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc);
- }
+//
+// Simple dotted mod
+//
- TEST(DottedMod, PrepareApplyFieldWithDot) {
- Document doc(fromjson("{'a.b':4}"));
- Mod setMod(fromjson("{$set: {'a.b': 2}}"));
+TEST(DottedMod, PrepareNoOp) {
+ Document doc(fromjson("{a: {b: 2}}"));
+ Mod setMod(fromjson("{$set: {'a.b': 2}}"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_TRUE(execInfo.noOp);
+}
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{'a.b':4, a: {b: 2}}"), doc);
- }
+TEST(DottedMod, PreparePathNotViable) {
+ Document doc(fromjson("{a:1}"));
+ Mod setMod(fromjson("{$set: {'a.b': 2}}"));
- //
- // Indexed mod
- //
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
+}
- TEST(IndexedMod, PrepareNoOp) {
- Document doc(fromjson("{a: [{b: 0},{b: 1},{b: 2}]}"));
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+TEST(DottedMod, PreparePathNotViableArrray) {
+ Document doc(fromjson("{a:[{b:1}]}"));
+ Mod setMod(fromjson("{$set: {'a.b': 2}}"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
- ASSERT_TRUE(execInfo.noOp);
- }
+TEST(DottedMod, PrepareApplyInPlace) {
+ Document doc(fromjson("{a: {b: 1}}"));
+ Mod setMod(fromjson("{$set: {'a.b': 2}}"));
- TEST(IndexedMod, PrepareNonViablePath) {
- Document doc(fromjson("{a: 0}"));
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(IndexedMod, PrepareApplyInPlace) {
- Document doc(fromjson("{a: [{b: 0},{b: 1},{b: 1}]}"));
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+ ASSERT_OK(setMod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(DottedMod, PrepareApplyChangeType) {
+ Document doc(fromjson("{a: {b: 'str'}}"));
+ Mod setMod(fromjson("{$set: {'a.b': 2}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_OK(setMod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [{b: 0},{b: 1},{b: 2}]}"), doc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(IndexedMod, PrepareApplyNormalArray) {
- Document doc(fromjson("{a: [{b: 0},{b: 1}]}"));
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(DottedMod, PrepareApplyChangePath) {
+ Document doc(fromjson("{a: {b: {c: 1}}}"));
+ Mod setMod(fromjson("{$set: {'a.b': 2}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [{b: 0},{b: 1},{b: 2}]}"), doc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(IndexedMod, PrepareApplyPaddingArray) {
- Document doc(fromjson("{a: [{b: 0}]}"));
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(DottedMod, PrepareApplyExtendPath) {
+ Document doc(fromjson("{a: {c: 1}}"));
+ Mod setMod(fromjson("{$set: {'a.b': 2}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [{b: 0},null,{b: 2}]}"), doc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(IndexedMod, PrepareApplyNumericObject) {
- Document doc(fromjson("{a: {b: 0}}"));
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: {c: 1, b: 2}}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(DottedMod, PrepareApplyNewPath) {
+ Document doc(fromjson("{c: 1}"));
+ Mod setMod(fromjson("{$set: {'a.b': 2}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: {b: 0, '2': {b: 2}}}"), doc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(IndexedMod, PrepareApplyNumericField) {
- Document doc(fromjson("{a: {'2': {b: 1}}}"));
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{c: 1, a: {b: 2}}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(DottedMod, PrepareApplyEmptyDoc) {
+ Document doc(fromjson("{}"));
+ Mod setMod(fromjson("{$set: {'a.b': 2}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_OK(setMod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: {'2': {b: 2}}}"), doc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(IndexedMod, PrepareApplyExtendNumericField) {
- Document doc(fromjson("{a: {'2': {c: 1}}}"));
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(DottedMod, PrepareApplyFieldWithDot) {
+ Document doc(fromjson("{'a.b':4}"));
+ Mod setMod(fromjson("{$set: {'a.b': 2}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: {'2': {c: 1, b: 2}}}"), doc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(IndexedMod, PrepareApplyEmptyObject) {
- Document doc(fromjson("{a: {}}"));
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{'a.b':4, a: {b: 2}}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+//
+// Indexed mod
+//
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
- ASSERT_FALSE(execInfo.noOp);
+TEST(IndexedMod, PrepareNoOp) {
+ Document doc(fromjson("{a: [{b: 0},{b: 1},{b: 2}]}"));
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: {'2': {b: 2}}}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- TEST(IndexedMod, PrepareApplyEmptyArray) {
- Document doc(fromjson("{a: []}"));
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
+ ASSERT_TRUE(execInfo.noOp);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(IndexedMod, PrepareNonViablePath) {
+ Document doc(fromjson("{a: 0}"));
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
+}
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [null, null, {b: 2}]}"), doc);
- }
+TEST(IndexedMod, PrepareApplyInPlace) {
+ Document doc(fromjson("{a: [{b: 0},{b: 1},{b: 1}]}"));
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- TEST(IndexedMod, PrepareApplyInexistent) {
- Document doc(fromjson("{}"));
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
+ ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_OK(setMod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [{b: 0},{b: 1},{b: 2}]}"), doc);
+}
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: {'2': {b: 2}}}"), doc);
- }
+TEST(IndexedMod, PrepareApplyNormalArray) {
+ Document doc(fromjson("{a: [{b: 0},{b: 1}]}"));
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- TEST(IndexedMod, LogNormal) {
- BSONObj obj = fromjson("{a: [{b:0}, {b:1}]}");
- Document doc(obj);
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
+ ASSERT_FALSE(execInfo.noOp);
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'a.2.b': 2}}"), logDoc);
- }
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [{b: 0},{b: 1},{b: 2}]}"), doc);
+}
- TEST(IndexedMod, LogEmptyArray) {
- BSONObj obj = fromjson("{a: []}");
- Document doc(obj);
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+TEST(IndexedMod, PrepareApplyPaddingArray) {
+ Document doc(fromjson("{a: [{b: 0}]}"));
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'a.2.b': 2}}"), logDoc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(IndexedMod, LogEmptyObject) {
- BSONObj obj = fromjson("{a: []}");
- Document doc(obj);
- Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [{b: 0},null,{b: 2}]}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(IndexedMod, PrepareApplyNumericObject) {
+ Document doc(fromjson("{a: {b: 0}}"));
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'a.2.b': 2}}"), logDoc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- //
- // Indexed complex mod
- //
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(IndexedComplexMod, PrepareNoOp) {
- Document doc(fromjson("{a: [{b: {c: 0, d: 0}}, {b: {c: 1, d: 1}}]}}"));
- Mod setMod(fromjson("{$set: {'a.1.b': {c: 1, d: 1}}}"));
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: {b: 0, '2': {b: 2}}}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(IndexedMod, PrepareApplyNumericField) {
+ Document doc(fromjson("{a: {'2': {b: 1}}}"));
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.1.b");
- ASSERT_TRUE(execInfo.noOp);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- TEST(IndexedComplexMod, PrepareSameStructure) {
- Document doc(fromjson("{a: [{b: {c: 0, d: 0}}, {b: {c: 1, xxx: 1}}]}}"));
- Mod setMod(fromjson("{$set: {'a.1.b': {c: 1, d: 1}}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(setMod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: {'2': {b: 2}}}"), doc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.1.b");
- ASSERT_FALSE(execInfo.noOp);
- }
+TEST(IndexedMod, PrepareApplyExtendNumericField) {
+ Document doc(fromjson("{a: {'2': {c: 1}}}"));
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- //
- // Replication version where viable paths don't block modification
- //
- TEST(NonViablePathWithoutRepl, ControlRun) {
- Document doc(fromjson("{a: 1}"));
- Mod setMod(fromjson("{$set: {'a.1.b': 1}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(NonViablePathWithRepl, SingleField) {
- Document doc(fromjson("{_id:1, a: 1}"));
- Mod setMod(fromjson("{$set: {'a.1.b': 1}}"), true);
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: {'2': {c: 1, b: 2}}}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(IndexedMod, PrepareApplyEmptyObject) {
+ Document doc(fromjson("{a: {}}"));
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.1.b");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{_id:1, a: {'1': {b: 1}}}"), doc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(NonViablePathWithRepl, SingleFieldNoId) {
- Document doc(fromjson("{a: 1}"));
- Mod setMod(fromjson("{$set: {'a.1.b': 1}}"), true);
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: {'2': {b: 2}}}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(IndexedMod, PrepareApplyEmptyArray) {
+ Document doc(fromjson("{a: []}"));
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.1.b");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
+ ASSERT_FALSE(execInfo.noOp);
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: {'1': {b: 1}}}"), doc);
- }
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [null, null, {b: 2}]}"), doc);
+}
+
+TEST(IndexedMod, PrepareApplyInexistent) {
+ Document doc(fromjson("{}"));
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- TEST(NonViablePathWithRepl, NestedField) {
- Document doc(fromjson("{_id:1, a: {a: 1}}"));
- Mod setMod(fromjson("{$set: {'a.a.1.b': 1}}"), true);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.2.b");
+ ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.a.1.b");
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: {'2': {b: 2}}}"), doc);
+}
+
+TEST(IndexedMod, LogNormal) {
+ BSONObj obj = fromjson("{a: [{b:0}, {b:1}]}");
+ Document doc(obj);
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{_id:1, a: {a: {'1': {b: 1}}}}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- TEST(NonViablePathWithRepl, DoubleNestedField) {
- Document doc(fromjson("{_id:1, a: {b: {c: 1}}}"));
- Mod setMod(fromjson("{$set: {'a.b.c.d': 2}}"), true);
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'a.2.b': 2}}"), logDoc);
+}
+
+TEST(IndexedMod, LogEmptyArray) {
+ BSONObj obj = fromjson("{a: []}");
+ Document doc(obj);
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b.c.d");
- ASSERT_FALSE(execInfo.noOp);
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'a.2.b': 2}}"), logDoc);
+}
+
+TEST(IndexedMod, LogEmptyObject) {
+ BSONObj obj = fromjson("{a: []}");
+ Document doc(obj);
+ Mod setMod(fromjson("{$set: {'a.2.b': 2}}"));
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{_id:1, a: {b: {c: {d: 2}}}}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- TEST(NonViablePathWithRepl, NestedFieldNoId) {
- Document doc(fromjson("{a: {a: 1}}"));
- Mod setMod(fromjson("{$set: {'a.a.1.b': 1}}"), true);
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'a.2.b': 2}}"), logDoc);
+}
+
+//
+// Indexed complex mod
+//
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(IndexedComplexMod, PrepareNoOp) {
+ Document doc(fromjson("{a: [{b: {c: 0, d: 0}}, {b: {c: 1, d: 1}}]}}"));
+ Mod setMod(fromjson("{$set: {'a.1.b': {c: 1, d: 1}}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.a.1.b");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.1.b");
+ ASSERT_TRUE(execInfo.noOp);
+}
+
+TEST(IndexedComplexMod, PrepareSameStructure) {
+ Document doc(fromjson("{a: [{b: {c: 0, d: 0}}, {b: {c: 1, xxx: 1}}]}}"));
+ Mod setMod(fromjson("{$set: {'a.1.b': {c: 1, d: 1}}}"));
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: {a: {'1': {b: 1}}}}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- TEST(NonViablePathWithRepl, ReplayArrayFieldNotAppendedItermediate) {
- Document doc(fromjson("{_id: 0, a: [1, {b: [1]}]}"));
- Mod setMod(fromjson("{$set: {'a.0.b': [0,2]}}}"), true);
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.1.b");
+ ASSERT_FALSE(execInfo.noOp);
+}
+
+//
+// Replication version where viable paths don't block modification
+//
+TEST(NonViablePathWithoutRepl, ControlRun) {
+ Document doc(fromjson("{a: 1}"));
+ Mod setMod(fromjson("{$set: {'a.1.b': 1}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(setMod.prepare(doc.root(), "", &execInfo));
+}
+
+TEST(NonViablePathWithRepl, SingleField) {
+ Document doc(fromjson("{_id:1, a: 1}"));
+ Mod setMod(fromjson("{$set: {'a.1.b': 1}}"), true);
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.1.b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{_id:1, a: {'1': {b: 1}}}"), doc);
+}
+
+TEST(NonViablePathWithRepl, SingleFieldNoId) {
+ Document doc(fromjson("{a: 1}"));
+ Mod setMod(fromjson("{$set: {'a.1.b': 1}}"), true);
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.1.b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: {'1': {b: 1}}}"), doc);
+}
+
+TEST(NonViablePathWithRepl, NestedField) {
+ Document doc(fromjson("{_id:1, a: {a: 1}}"));
+ Mod setMod(fromjson("{$set: {'a.a.1.b': 1}}"), true);
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.a.1.b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{_id:1, a: {a: {'1': {b: 1}}}}"), doc);
+}
+
+TEST(NonViablePathWithRepl, DoubleNestedField) {
+ Document doc(fromjson("{_id:1, a: {b: {c: 1}}}"));
+ Mod setMod(fromjson("{$set: {'a.b.c.d': 2}}"), true);
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0.b");
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b.c.d");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{_id:1, a: {b: {c: {d: 2}}}}"), doc);
+}
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{_id: 0, a: [{b: [0,2]}, {b: [1]}]}"), doc);
- }
+TEST(NonViablePathWithRepl, NestedFieldNoId) {
+ Document doc(fromjson("{a: {a: 1}}"));
+ Mod setMod(fromjson("{$set: {'a.a.1.b': 1}}"), true);
- // Cases from users/issues/jstests
- TEST(JsTestIssues, Set6) {
- Document doc(fromjson("{_id: 1, r: {a:1, b:2}}"));
- Mod setMod(fromjson("{$set: {'r.a': 2}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.a.1.b");
+ ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "r.a");
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: {a: {'1': {b: 1}}}}"), doc);
+}
+
+TEST(NonViablePathWithRepl, ReplayArrayFieldNotAppendedItermediate) {
+ Document doc(fromjson("{_id: 0, a: [1, {b: [1]}]}"));
+ Mod setMod(fromjson("{$set: {'a.0.b': [0,2]}}}"), true);
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_OK(setMod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{_id: 1, r: {a:2, b:2}}"), doc);
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0.b");
+ ASSERT_FALSE(execInfo.noOp);
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'r.a': 2}}"), logDoc);
- }
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{_id: 0, a: [{b: [0,2]}, {b: [1]}]}"), doc);
+}
+
+// Cases from users/issues/jstests
+TEST(JsTestIssues, Set6) {
+ Document doc(fromjson("{_id: 1, r: {a:1, b:2}}"));
+ Mod setMod(fromjson("{$set: {'r.a': 2}}"));
- // Test which failed before and is here to verify correct execution.
- TEST(JsTestIssues, Set6FromRepl) {
- Document doc(fromjson("{_id: 1, r: {a:1, b:2}}"));
- Mod setMod(fromjson("{$set: { 'r.a': 2}}"), true);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "r.a");
+ ASSERT_FALSE(execInfo.noOp);
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "r.a");
- ASSERT_FALSE(execInfo.noOp);
+ ASSERT_OK(setMod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{_id: 1, r: {a:2, b:2}}"), doc);
- ASSERT_OK(setMod.apply());
- ASSERT_TRUE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{_id: 1, r: {a:2, b:2} }"), doc);
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'r.a': 2}}"), logDoc);
+}
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(setMod.log(&logBuilder));
- ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
- ASSERT_EQUALS(fromjson("{$set: {'r.a': 2}}"), logDoc);
- }
+// Test which failed before and is here to verify correct execution.
+TEST(JsTestIssues, Set6FromRepl) {
+ Document doc(fromjson("{_id: 1, r: {a:1, b:2}}"));
+ Mod setMod(fromjson("{$set: { 'r.a': 2}}"), true);
- TEST(Ephemeral, ApplySetModToEphemeralDocument) {
- // The following mod when applied to a document constructed node by node exposed a
- // latent debug only defect in mutable BSON, so this is more a test of mutable than
- // $set.
- Document doc;
- Element x = doc.makeElementObject("x");
- doc.root().pushBack(x);
- Element a = doc.makeElementInt("a", 100);
- x.pushBack(a);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- Mod setMod(fromjson("{ $set: { x: { a: 100, b: 2 }}}"), true);
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "r.a");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(setMod.apply());
+ ASSERT_TRUE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{_id: 1, r: {a:2, b:2} }"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(setMod.log(&logBuilder));
+ ASSERT_EQUALS(countChildren(logDoc.root()), 1u);
+ ASSERT_EQUALS(fromjson("{$set: {'r.a': 2}}"), logDoc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
+TEST(Ephemeral, ApplySetModToEphemeralDocument) {
+ // The following mod when applied to a document constructed node by node exposed a
+ // latent debug only defect in mutable BSON, so this is more a test of mutable than
+ // $set.
+ Document doc;
+ Element x = doc.makeElementObject("x");
+ doc.root().pushBack(x);
+ Element a = doc.makeElementInt("a", 100);
+ x.pushBack(a);
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "x");
- ASSERT_FALSE(execInfo.noOp);
+ Mod setMod(fromjson("{ $set: { x: { a: 100, b: 2 }}}"), true);
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(setMod.prepare(doc.root(), "", &execInfo));
- ASSERT_OK(setMod.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{ x : { a : 100, b : 2 } }"), doc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "x");
+ ASSERT_FALSE(execInfo.noOp);
-} // unnamed namespace
+ ASSERT_OK(setMod.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{ x : { a : 100, b : 2 } }"), doc);
+}
+
+} // unnamed namespace
diff --git a/src/mongo/db/ops/modifier_table.cpp b/src/mongo/db/ops/modifier_table.cpp
index f35e79cf5c5..81bc612df48 100644
--- a/src/mongo/db/ops/modifier_table.cpp
+++ b/src/mongo/db/ops/modifier_table.cpp
@@ -49,96 +49,93 @@
namespace mongo {
- using std::make_pair;
- using std::string;
+using std::make_pair;
+using std::string;
namespace modifiertable {
- namespace {
+namespace {
- struct ModifierEntry {
- string name;
- ModifierType type;
+struct ModifierEntry {
+ string name;
+ ModifierType type;
- ModifierEntry(StringData name, ModifierType type)
- : name(name.toString())
- , type(type) {
- }
- };
+ ModifierEntry(StringData name, ModifierType type) : name(name.toString()), type(type) {}
+};
- typedef unordered_map<StringData, ModifierEntry*, StringData::Hasher> NameMap;
+typedef unordered_map<StringData, ModifierEntry*, StringData::Hasher> NameMap;
- NameMap* MODIFIER_NAME_MAP;
+NameMap* MODIFIER_NAME_MAP;
- void init(NameMap* nameMap) {
- ModifierEntry* entryAddToSet = new ModifierEntry("$addToSet", MOD_ADD_TO_SET);
- nameMap->insert(make_pair(StringData(entryAddToSet->name), entryAddToSet));
+void init(NameMap* nameMap) {
+ ModifierEntry* entryAddToSet = new ModifierEntry("$addToSet", MOD_ADD_TO_SET);
+ nameMap->insert(make_pair(StringData(entryAddToSet->name), entryAddToSet));
- ModifierEntry* entryBit = new ModifierEntry("$bit", MOD_BIT);
- nameMap->insert(make_pair(StringData(entryBit->name), entryBit));
+ ModifierEntry* entryBit = new ModifierEntry("$bit", MOD_BIT);
+ nameMap->insert(make_pair(StringData(entryBit->name), entryBit));
- ModifierEntry* entryCurrentDate = new ModifierEntry("$currentDate", MOD_CURRENTDATE);
- nameMap->insert(make_pair(StringData(entryCurrentDate->name), entryCurrentDate));
+ ModifierEntry* entryCurrentDate = new ModifierEntry("$currentDate", MOD_CURRENTDATE);
+ nameMap->insert(make_pair(StringData(entryCurrentDate->name), entryCurrentDate));
- ModifierEntry* entryInc = new ModifierEntry("$inc", MOD_INC);
- nameMap->insert(make_pair(StringData(entryInc->name), entryInc));
+ ModifierEntry* entryInc = new ModifierEntry("$inc", MOD_INC);
+ nameMap->insert(make_pair(StringData(entryInc->name), entryInc));
- ModifierEntry* entryMax = new ModifierEntry("$max", MOD_MAX);
- nameMap->insert(make_pair(StringData(entryMax->name), entryMax));
+ ModifierEntry* entryMax = new ModifierEntry("$max", MOD_MAX);
+ nameMap->insert(make_pair(StringData(entryMax->name), entryMax));
- ModifierEntry* entryMin = new ModifierEntry("$min", MOD_MIN);
- nameMap->insert(make_pair(StringData(entryMin->name), entryMin));
+ ModifierEntry* entryMin = new ModifierEntry("$min", MOD_MIN);
+ nameMap->insert(make_pair(StringData(entryMin->name), entryMin));
- ModifierEntry* entryMul = new ModifierEntry("$mul", MOD_MUL);
- nameMap->insert(make_pair(StringData(entryMul->name), entryMul));
+ ModifierEntry* entryMul = new ModifierEntry("$mul", MOD_MUL);
+ nameMap->insert(make_pair(StringData(entryMul->name), entryMul));
- ModifierEntry* entryPop = new ModifierEntry("$pop", MOD_POP);
- nameMap->insert(make_pair(StringData(entryPop->name), entryPop));
+ ModifierEntry* entryPop = new ModifierEntry("$pop", MOD_POP);
+ nameMap->insert(make_pair(StringData(entryPop->name), entryPop));
- ModifierEntry* entryPull = new ModifierEntry("$pull", MOD_PULL);
- nameMap->insert(make_pair(StringData(entryPull->name), entryPull));
+ ModifierEntry* entryPull = new ModifierEntry("$pull", MOD_PULL);
+ nameMap->insert(make_pair(StringData(entryPull->name), entryPull));
- ModifierEntry* entryPullAll = new ModifierEntry("$pullAll", MOD_PULL_ALL);
- nameMap->insert(make_pair(StringData(entryPullAll->name), entryPullAll));
+ ModifierEntry* entryPullAll = new ModifierEntry("$pullAll", MOD_PULL_ALL);
+ nameMap->insert(make_pair(StringData(entryPullAll->name), entryPullAll));
- ModifierEntry* entryPush = new ModifierEntry("$push", MOD_PUSH);
- nameMap->insert(make_pair(StringData(entryPush->name), entryPush));
+ ModifierEntry* entryPush = new ModifierEntry("$push", MOD_PUSH);
+ nameMap->insert(make_pair(StringData(entryPush->name), entryPush));
- ModifierEntry* entryPushAll = new ModifierEntry("$pushAll", MOD_PUSH_ALL);
- nameMap->insert(make_pair(StringData(entryPushAll->name), entryPushAll));
+ ModifierEntry* entryPushAll = new ModifierEntry("$pushAll", MOD_PUSH_ALL);
+ nameMap->insert(make_pair(StringData(entryPushAll->name), entryPushAll));
- ModifierEntry* entrySet = new ModifierEntry("$set", MOD_SET);
- nameMap->insert(make_pair(StringData(entrySet->name), entrySet));
+ ModifierEntry* entrySet = new ModifierEntry("$set", MOD_SET);
+ nameMap->insert(make_pair(StringData(entrySet->name), entrySet));
- ModifierEntry* entrySetOnInsert = new ModifierEntry("$setOnInsert", MOD_SET_ON_INSERT);
- nameMap->insert(make_pair(StringData(entrySetOnInsert->name), entrySetOnInsert));
+ ModifierEntry* entrySetOnInsert = new ModifierEntry("$setOnInsert", MOD_SET_ON_INSERT);
+ nameMap->insert(make_pair(StringData(entrySetOnInsert->name), entrySetOnInsert));
- ModifierEntry* entryRename = new ModifierEntry("$rename", MOD_RENAME);
- nameMap->insert(make_pair(StringData(entryRename->name), entryRename));
+ ModifierEntry* entryRename = new ModifierEntry("$rename", MOD_RENAME);
+ nameMap->insert(make_pair(StringData(entryRename->name), entryRename));
- ModifierEntry* entryUnset = new ModifierEntry("$unset", MOD_UNSET);
- nameMap->insert(make_pair(StringData(entryUnset->name), entryUnset));
- }
+ ModifierEntry* entryUnset = new ModifierEntry("$unset", MOD_UNSET);
+ nameMap->insert(make_pair(StringData(entryUnset->name), entryUnset));
+}
- } // unnamed namespace
+} // unnamed namespace
- MONGO_INITIALIZER(ModifierTable)(InitializerContext* context) {
- MODIFIER_NAME_MAP = new NameMap;
- init(MODIFIER_NAME_MAP);
+MONGO_INITIALIZER(ModifierTable)(InitializerContext* context) {
+ MODIFIER_NAME_MAP = new NameMap;
+ init(MODIFIER_NAME_MAP);
- return Status::OK();
- }
+ return Status::OK();
+}
- ModifierType getType(StringData typeStr) {
- NameMap::const_iterator it = MODIFIER_NAME_MAP->find(typeStr);
- if (it == MODIFIER_NAME_MAP->end()) {
- return MOD_UNKNOWN;
- }
- return it->second->type;
+ModifierType getType(StringData typeStr) {
+ NameMap::const_iterator it = MODIFIER_NAME_MAP->find(typeStr);
+ if (it == MODIFIER_NAME_MAP->end()) {
+ return MOD_UNKNOWN;
}
+ return it->second->type;
+}
- ModifierInterface* makeUpdateMod(ModifierType modType) {
- switch (modType) {
+ModifierInterface* makeUpdateMod(ModifierType modType) {
+ switch (modType) {
case MOD_ADD_TO_SET:
return new ModifierAddToSet;
case MOD_BIT:
@@ -173,8 +170,8 @@ namespace modifiertable {
return new ModifierUnset;
default:
return NULL;
- }
}
+}
-} // namespace modifiertable
-} // namespace mongo
+} // namespace modifiertable
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_table.h b/src/mongo/db/ops/modifier_table.h
index 8087114e657..acefc3290dc 100644
--- a/src/mongo/db/ops/modifier_table.h
+++ b/src/mongo/db/ops/modifier_table.h
@@ -33,38 +33,38 @@
namespace mongo {
namespace modifiertable {
- // NOTE: Please update jstests/verify_update_mods.js or include a jstest for any new mods
- enum ModifierType {
- MOD_ADD_TO_SET,
- MOD_BIT,
- MOD_CURRENTDATE,
- MOD_INC,
- MOD_MAX,
- MOD_MIN,
- MOD_MUL,
- MOD_POP,
- MOD_PULL,
- MOD_PULL_ALL,
- MOD_PUSH,
- MOD_PUSH_ALL,
- MOD_SET,
- MOD_SET_ON_INSERT,
- MOD_RENAME,
- MOD_UNSET,
- MOD_UNKNOWN
- };
+// NOTE: Please update jstests/verify_update_mods.js or include a jstest for any new mods
+enum ModifierType {
+ MOD_ADD_TO_SET,
+ MOD_BIT,
+ MOD_CURRENTDATE,
+ MOD_INC,
+ MOD_MAX,
+ MOD_MIN,
+ MOD_MUL,
+ MOD_POP,
+ MOD_PULL,
+ MOD_PULL_ALL,
+ MOD_PUSH,
+ MOD_PUSH_ALL,
+ MOD_SET,
+ MOD_SET_ON_INSERT,
+ MOD_RENAME,
+ MOD_UNSET,
+ MOD_UNKNOWN
+};
- /**
- * Returns the modifier type for 'typeStr', if it was recognized as an existing update
- * mod, or MOD_UNKNOWN otherwise.
- */
- ModifierType getType(StringData typeStr);
+/**
+ * Returns the modifier type for 'typeStr', if it was recognized as an existing update
+ * mod, or MOD_UNKNOWN otherwise.
+ */
+ModifierType getType(StringData typeStr);
- /**
- * Instantiate an update mod that corresponds to 'modType' or NULL if 'modType' is not
- * valid. The ownership of the new object is the caller's.
- */
- ModifierInterface* makeUpdateMod(ModifierType modType);
+/**
+ * Instantiate an update mod that corresponds to 'modType' or NULL if 'modType' is not
+ * valid. The ownership of the new object is the caller's.
+ */
+ModifierInterface* makeUpdateMod(ModifierType modType);
-} // namespace modifiertable
-} // namespace mongo
+} // namespace modifiertable
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_table_test.cpp b/src/mongo/db/ops/modifier_table_test.cpp
index 239f9eb5a34..c5225142668 100644
--- a/src/mongo/db/ops/modifier_table_test.cpp
+++ b/src/mongo/db/ops/modifier_table_test.cpp
@@ -35,25 +35,25 @@
namespace {
- using namespace mongo::modifiertable;
+using namespace mongo::modifiertable;
- using mongo::ModifierInterface;
- using std::unique_ptr;
+using mongo::ModifierInterface;
+using std::unique_ptr;
- TEST(getType, Normal) {
- ASSERT_EQUALS(getType("$set"), MOD_SET);
- ASSERT_EQUALS(getType("$AModThatDoesn'tExist"), MOD_UNKNOWN);
- ASSERT_EQUALS(getType("NotAModExpression"), MOD_UNKNOWN);
- }
+TEST(getType, Normal) {
+ ASSERT_EQUALS(getType("$set"), MOD_SET);
+ ASSERT_EQUALS(getType("$AModThatDoesn'tExist"), MOD_UNKNOWN);
+ ASSERT_EQUALS(getType("NotAModExpression"), MOD_UNKNOWN);
+}
- TEST(makeUpdateMod, Normal) {
- unique_ptr<ModifierInterface> mod;
+TEST(makeUpdateMod, Normal) {
+ unique_ptr<ModifierInterface> mod;
- mod.reset(makeUpdateMod(MOD_SET));
- ASSERT_NOT_EQUALS(mod.get(), static_cast<ModifierInterface*>(0));
+ mod.reset(makeUpdateMod(MOD_SET));
+ ASSERT_NOT_EQUALS(mod.get(), static_cast<ModifierInterface*>(0));
- mod.reset(makeUpdateMod(MOD_UNKNOWN));
- ASSERT_EQUALS(mod.get(), static_cast<ModifierInterface*>(0));
- }
+ mod.reset(makeUpdateMod(MOD_UNKNOWN));
+ ASSERT_EQUALS(mod.get(), static_cast<ModifierInterface*>(0));
+}
-} // unnamed namespace
+} // unnamed namespace
diff --git a/src/mongo/db/ops/modifier_unset.cpp b/src/mongo/db/ops/modifier_unset.cpp
index 9b54480c62d..673cbdb8d16 100644
--- a/src/mongo/db/ops/modifier_unset.cpp
+++ b/src/mongo/db/ops/modifier_unset.cpp
@@ -37,149 +37,131 @@
namespace mongo {
- namespace str = mongoutils::str;
+namespace str = mongoutils::str;
- struct ModifierUnset::PreparedState {
+struct ModifierUnset::PreparedState {
+ PreparedState(mutablebson::Document* targetDoc)
+ : doc(*targetDoc), idxFound(0), elemFound(doc.end()), noOp(false) {}
- PreparedState(mutablebson::Document* targetDoc)
- : doc(*targetDoc)
- , idxFound(0)
- , elemFound(doc.end())
- , noOp(false) {
- }
+ // Document that is going to be changed.
+ mutablebson::Document& doc;
- // Document that is going to be changed.
- mutablebson::Document& doc;
+ // Index in _fieldRef for which an Element exist in the document.
+ size_t idxFound;
- // Index in _fieldRef for which an Element exist in the document.
- size_t idxFound;
+ // Element corresponding to _fieldRef[0.._idxFound].
+ mutablebson::Element elemFound;
- // Element corresponding to _fieldRef[0.._idxFound].
- mutablebson::Element elemFound;
+ // This $set is a no-op?
+ bool noOp;
+};
- // This $set is a no-op?
- bool noOp;
+ModifierUnset::ModifierUnset() : _fieldRef(), _posDollar(0), _val() {}
- };
+ModifierUnset::~ModifierUnset() {}
- ModifierUnset::ModifierUnset()
- : _fieldRef()
- , _posDollar(0)
- , _val() {
- }
+Status ModifierUnset::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
+ //
+ // field name analysis
+ //
- ModifierUnset::~ModifierUnset() {
+ // Perform standard field name and updateable checks.
+ _fieldRef.parse(modExpr.fieldName());
+ Status status = fieldchecker::isUpdatable(_fieldRef);
+ if (!status.isOK()) {
+ return status;
}
- Status ModifierUnset::init(const BSONElement& modExpr, const Options& opts,
- bool* positional) {
-
- //
- // field name analysis
- //
-
- // Perform standard field name and updateable checks.
- _fieldRef.parse(modExpr.fieldName());
- Status status = fieldchecker::isUpdatable(_fieldRef);
- if (! status.isOK()) {
- return status;
- }
+ // If a $-positional operator was used, get the index in which it occurred
+ // and ensure only one occurrence.
+ size_t foundCount;
+ bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
- // If a $-positional operator was used, get the index in which it occurred
- // and ensure only one occurrence.
- size_t foundCount;
- bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);
+ if (positional)
+ *positional = foundDollar;
- if (positional)
- *positional = foundDollar;
+ if (foundDollar && foundCount > 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Too many positional (i.e. '$') elements found in path '"
+ << _fieldRef.dottedField() << "'");
+ }
- if (foundDollar && foundCount > 1) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
- }
+ //
+ // value analysis
+ //
- //
- // value analysis
- //
+ // Unset takes any value, since there is no semantics attached to such value.
+ _val = modExpr;
- // Unset takes any value, since there is no semantics attached to such value.
- _val = modExpr;
+ return Status::OK();
+}
- return Status::OK();
- }
+Status ModifierUnset::prepare(mutablebson::Element root,
+ StringData matchedField,
+ ExecInfo* execInfo) {
+ _preparedState.reset(new PreparedState(&root.getDocument()));
- Status ModifierUnset::prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo) {
-
- _preparedState.reset(new PreparedState(&root.getDocument()));
-
- // If we have a $-positional field, it is time to bind it to an actual field part.
- if (_posDollar) {
- if (matchedField.empty()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "The positional operator did not find the match "
- "needed from the query. Unexpanded update: "
- << _fieldRef.dottedField());
- }
- _fieldRef.setPart(_posDollar, matchedField);
+ // If we have a $-positional field, it is time to bind it to an actual field part.
+ if (_posDollar) {
+ if (matchedField.empty()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "The positional operator did not find the match "
+ "needed from the query. Unexpanded update: "
+ << _fieldRef.dottedField());
}
+ _fieldRef.setPart(_posDollar, matchedField);
+ }
- // Locate the field name in 'root'. Note that if we don't have the full path in the
- // doc, there isn't anything to unset, really.
- Status status = pathsupport::findLongestPrefix(_fieldRef,
- root,
- &_preparedState->idxFound,
- &_preparedState->elemFound);
- if (!status.isOK() ||
- _preparedState->idxFound != (_fieldRef.numParts() -1)) {
- execInfo->noOp = _preparedState->noOp = true;
- execInfo->fieldRef[0] = &_fieldRef;
-
- return Status::OK();
- }
-
- // If there is indeed something to unset, we register so, along with the interest in
- // the field name. The driver needs this info to sort out if there is any conflict
- // among mods.
+ // Locate the field name in 'root'. Note that if we don't have the full path in the
+ // doc, there isn't anything to unset, really.
+ Status status = pathsupport::findLongestPrefix(
+ _fieldRef, root, &_preparedState->idxFound, &_preparedState->elemFound);
+ if (!status.isOK() || _preparedState->idxFound != (_fieldRef.numParts() - 1)) {
+ execInfo->noOp = _preparedState->noOp = true;
execInfo->fieldRef[0] = &_fieldRef;
- // The only way for an $unset to be inplace is for its target field to be the last one
- // of the object. That is, it is always the right child on its paths. The current
- // rationale is that there should be no holes in a BSONObj and, to be in place, no
- // field boundaries must change.
- //
- // TODO:
- // mutablebson::Element curr = _preparedState->elemFound;
- // while (curr.ok()) {
- // if (curr.rightSibling().ok()) {
- // }
- // curr = curr.parent();
- // }
-
return Status::OK();
}
- Status ModifierUnset::apply() const {
- dassert(!_preparedState->noOp);
-
- // Our semantics says that, if we're unseting an element of an array, we swap that
- // value to null. The rationale is that we don't want other array elements to change
- // indices. (That could be achieved with $pull-ing element from it.)
- if (_preparedState->elemFound.parent().ok() &&
- _preparedState->elemFound.parent().getType() == Array) {
- return _preparedState->elemFound.setValueNull();
- }
- else {
- return _preparedState->elemFound.remove();
- }
+ // If there is indeed something to unset, we register so, along with the interest in
+ // the field name. The driver needs this info to sort out if there is any conflict
+ // among mods.
+ execInfo->fieldRef[0] = &_fieldRef;
+
+ // The only way for an $unset to be inplace is for its target field to be the last one
+ // of the object. That is, it is always the right child on its paths. The current
+ // rationale is that there should be no holes in a BSONObj and, to be in place, no
+ // field boundaries must change.
+ //
+ // TODO:
+ // mutablebson::Element curr = _preparedState->elemFound;
+ // while (curr.ok()) {
+ // if (curr.rightSibling().ok()) {
+ // }
+ // curr = curr.parent();
+ // }
+
+ return Status::OK();
+}
+
+Status ModifierUnset::apply() const {
+ dassert(!_preparedState->noOp);
+
+ // Our semantics says that, if we're unseting an element of an array, we swap that
+ // value to null. The rationale is that we don't want other array elements to change
+ // indices. (That could be achieved with $pull-ing element from it.)
+ if (_preparedState->elemFound.parent().ok() &&
+ _preparedState->elemFound.parent().getType() == Array) {
+ return _preparedState->elemFound.setValueNull();
+ } else {
+ return _preparedState->elemFound.remove();
}
+}
- Status ModifierUnset::log(LogBuilder* logBuilder) const {
- return logBuilder->addToUnsets(_fieldRef.dottedField());
- }
+Status ModifierUnset::log(LogBuilder* logBuilder) const {
+ return logBuilder->addToUnsets(_fieldRef.dottedField());
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_unset.h b/src/mongo/db/ops/modifier_unset.h
index 53417a3c37e..a36b16ac19c 100644
--- a/src/mongo/db/ops/modifier_unset.h
+++ b/src/mongo/db/ops/modifier_unset.h
@@ -38,66 +38,60 @@
namespace mongo {
- class LogBuilder;
-
- class ModifierUnset : public ModifierInterface {
- MONGO_DISALLOW_COPYING(ModifierUnset);
-
- public:
-
- ModifierUnset();
-
- //
- // Modifier interface implementation
- //
-
- virtual ~ModifierUnset();
-
- /**
- * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $set mod such as
- * {$unset: {<fieldname: <value>}}. init() extracts the field name and the value to be
- * assigned to it from 'modExpr'. It returns OK if successful or a status describing
- * the error.
- */
- virtual Status init(const BSONElement& modExpr, const Options& opts,
- bool* positional = NULL);
-
- /**
- * Locates the field to be removed under the 'root' element, if it exist, and fills in
- * 'execInfo' accordingly. Return OK if successful or a status describing the error.
- */
- virtual Status prepare(mutablebson::Element root,
- StringData matchedField,
- ExecInfo* execInfo);
-
- /**
- * Removes the found element from the document. If such element was inside an array,
- * removal means setting that array position to 'null'.
- */
- virtual Status apply() const;
-
- /**
- * Adds the exact $unset mod to the log.
- */
- virtual Status log(LogBuilder* logBuilder) const;
-
- private:
-
- // Access to each component of fieldName that's the target of this mod.
- FieldRef _fieldRef;
-
- // 0 or index for $-positional in _fieldRef.
- size_t _posDollar;
-
- // Element of the $set expression.
- BSONElement _val;
-
- // The instance of the field in the provided doc. This state is valid after a
- // prepare() was issued and until a log() is issued. The document this mod is
- // being prepared against must be live throughout all the calls.
- struct PreparedState;
- std::unique_ptr<PreparedState> _preparedState;
-
- };
-
-} // namespace mongo
+class LogBuilder;
+
+class ModifierUnset : public ModifierInterface {
+ MONGO_DISALLOW_COPYING(ModifierUnset);
+
+public:
+ ModifierUnset();
+
+ //
+ // Modifier interface implementation
+ //
+
+ virtual ~ModifierUnset();
+
+ /**
+ * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $set mod such as
+ * {$unset: {<fieldname: <value>}}. init() extracts the field name and the value to be
+ * assigned to it from 'modExpr'. It returns OK if successful or a status describing
+ * the error.
+ */
+ virtual Status init(const BSONElement& modExpr, const Options& opts, bool* positional = NULL);
+
+ /**
+ * Locates the field to be removed under the 'root' element, if it exist, and fills in
+ * 'execInfo' accordingly. Return OK if successful or a status describing the error.
+ */
+ virtual Status prepare(mutablebson::Element root, StringData matchedField, ExecInfo* execInfo);
+
+ /**
+ * Removes the found element from the document. If such element was inside an array,
+ * removal means setting that array position to 'null'.
+ */
+ virtual Status apply() const;
+
+ /**
+ * Adds the exact $unset mod to the log.
+ */
+ virtual Status log(LogBuilder* logBuilder) const;
+
+private:
+ // Access to each component of fieldName that's the target of this mod.
+ FieldRef _fieldRef;
+
+ // 0 or index for $-positional in _fieldRef.
+ size_t _posDollar;
+
+ // Element of the $set expression.
+ BSONElement _val;
+
+ // The instance of the field in the provided doc. This state is valid after a
+ // prepare() was issued and until a log() is issued. The document this mod is
+ // being prepared against must be live throughout all the calls.
+ struct PreparedState;
+ std::unique_ptr<PreparedState> _preparedState;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/modifier_unset_test.cpp b/src/mongo/db/ops/modifier_unset_test.cpp
index 3ca4957e94c..d68f471c7e7 100644
--- a/src/mongo/db/ops/modifier_unset_test.cpp
+++ b/src/mongo/db/ops/modifier_unset_test.cpp
@@ -42,410 +42,412 @@
namespace {
- using mongo::Array;
- using mongo::BSONObj;
- using mongo::fromjson;
- using mongo::LogBuilder;
- using mongo::ModifierInterface;
- using mongo::ModifierUnset;
- using mongo::Status;
- using mongo::StringData;
- using mongo::mutablebson::Document;
- using mongo::mutablebson::Element;
-
- /** Helper to build and manipulate a $set mod. */
- class Mod {
- public:
- Mod() : _mod() {}
-
- explicit Mod(BSONObj modObj) {
- _modObj = modObj;
- ASSERT_OK(_mod.init(_modObj["$unset"].embeddedObject().firstElement(),
- ModifierInterface::Options::normal()));
- }
-
- Status prepare(Element root,
- StringData matchedField,
- ModifierInterface::ExecInfo* execInfo) {
- return _mod.prepare(root, matchedField, execInfo);
- }
-
- Status apply() const {
- return _mod.apply();
- }
-
- Status log(LogBuilder* logBuilder) const {
- return _mod.log(logBuilder);
- }
-
- ModifierUnset& mod() { return _mod; }
-
- BSONObj modObj() { return _modObj; }
-
- private:
- ModifierUnset _mod;
- BSONObj _modObj;
- };
-
- //
- // Simple mod
- //
-
- TEST(SimpleMod, PrepareNoOp) {
- Document doc(fromjson("{}"));
- Mod modUnset(fromjson("{$unset: {a: true}}"));
-
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
-
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_TRUE(execInfo.noOp);
+using mongo::Array;
+using mongo::BSONObj;
+using mongo::fromjson;
+using mongo::LogBuilder;
+using mongo::ModifierInterface;
+using mongo::ModifierUnset;
+using mongo::Status;
+using mongo::StringData;
+using mongo::mutablebson::Document;
+using mongo::mutablebson::Element;
+
+/** Helper to build and manipulate a $set mod. */
+class Mod {
+public:
+ Mod() : _mod() {}
+
+ explicit Mod(BSONObj modObj) {
+ _modObj = modObj;
+ ASSERT_OK(_mod.init(_modObj["$unset"].embeddedObject().firstElement(),
+ ModifierInterface::Options::normal()));
}
- TEST(SimpleMod, PrepareApplyNormal) {
- Document doc(fromjson("{a: 1, b: 2}"));
- Mod modUnset(fromjson("{$unset: {a: true}}"));
+ Status prepare(Element root, StringData matchedField, ModifierInterface::ExecInfo* execInfo) {
+ return _mod.prepare(root, matchedField, execInfo);
+ }
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ Status apply() const {
+ return _mod.apply();
+ }
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+ Status log(LogBuilder* logBuilder) const {
+ return _mod.log(logBuilder);
+ }
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{b: 2}"), doc);
+ ModifierUnset& mod() {
+ return _mod;
}
- TEST(SimpleMod, PrepareApplyInPlace) {
- Document doc(fromjson("{x: 0, a: 1}"));
- Mod modUnset(fromjson("{$unset: {a: true}}"));
+ BSONObj modObj() {
+ return _modObj;
+ }
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+private:
+ ModifierUnset _mod;
+ BSONObj _modObj;
+};
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+//
+// Simple mod
+//
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled()); // TODO turn in-place on for this.
- ASSERT_EQUALS(fromjson("{x: 0}"), doc);
- }
+TEST(SimpleMod, PrepareNoOp) {
+ Document doc(fromjson("{}"));
+ Mod modUnset(fromjson("{$unset: {a: true}}"));
- TEST(SimpleMod, PrepareApplyGeneratesEmptyDocument) {
- Document doc(fromjson("{a: 1}"));
- Mod modUnset(fromjson("{$unset: {a: true}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_TRUE(execInfo.noOp);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(SimpleMod, PrepareApplyNormal) {
+ Document doc(fromjson("{a: 1, b: 2}"));
+ Mod modUnset(fromjson("{$unset: {a: true}}"));
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled()); // TODO turn in-place on for this.
- ASSERT_EQUALS(fromjson("{}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- TEST(SimpleMod, PrepareApplyUnsetSubtree) {
- Document doc(fromjson("{a: {b: 1}, c: 2}"));
- Mod modUnset(fromjson("{$unset: {a: true}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{b: 2}"), doc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(SimpleMod, PrepareApplyInPlace) {
+ Document doc(fromjson("{x: 0, a: 1}"));
+ Mod modUnset(fromjson("{$unset: {a: true}}"));
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{c: 2}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- TEST(SimpleMod, LogNormal) {
- BSONObj obj = fromjson("{a: 1}");
- Document doc(obj);
- Mod modUnset(fromjson("{$unset: {a: true}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled()); // TODO turn in-place on for this.
+ ASSERT_EQUALS(fromjson("{x: 0}"), doc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
- ASSERT_FALSE(execInfo.noOp);
+TEST(SimpleMod, PrepareApplyGeneratesEmptyDocument) {
+ Document doc(fromjson("{a: 1}"));
+ Mod modUnset(fromjson("{$unset: {a: true}}"));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(modUnset.log(&logBuilder));
- ASSERT_EQUALS(modUnset.modObj(), logDoc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- //
- // Dotted mod
- //
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(DottedMod, PrepareNoOp) {
- Document doc(fromjson("{c:2}"));
- Mod modUnset(fromjson("{$unset: {'a.b': true}}"));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled()); // TODO turn in-place on for this.
+ ASSERT_EQUALS(fromjson("{}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+TEST(SimpleMod, PrepareApplyUnsetSubtree) {
+ Document doc(fromjson("{a: {b: 1}, c: 2}"));
+ Mod modUnset(fromjson("{$unset: {a: true}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_TRUE(execInfo.noOp);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- TEST(DottedMod, PrepareApplyNormal) {
- Document doc(fromjson("{a: {b: 1}, c: 2}"));
- Mod modUnset(fromjson("{$unset: {'a.b': true}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{c: 2}"), doc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
+TEST(SimpleMod, LogNormal) {
+ BSONObj obj = fromjson("{a: 1}");
+ Document doc(obj);
+ Mod modUnset(fromjson("{$unset: {a: true}}"));
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a:{}, c:2}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- TEST(DottedMod, PrepareApplyInPlace) {
- Document doc(fromjson("{x: 0, a: {b: 1}}"));
- Mod modUnset(fromjson("{$unset: {'a.b': true}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(modUnset.log(&logBuilder));
+ ASSERT_EQUALS(modUnset.modObj(), logDoc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
+//
+// Dotted mod
+//
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled()); // TODO turn in-place on for this.
- ASSERT_EQUALS(fromjson("{x: 0, a:{}}"), doc);
- }
+TEST(DottedMod, PrepareNoOp) {
+ Document doc(fromjson("{c:2}"));
+ Mod modUnset(fromjson("{$unset: {'a.b': true}}"));
- TEST(DottedMod, PrepareApplyUnsetNestedSubobject) {
- Document doc(fromjson("{a: {b: {c: 1}}}"));
- Mod modUnset(fromjson("{$unset: {'a.b': true}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_TRUE(execInfo.noOp);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
- ASSERT_FALSE(execInfo.noOp);
+TEST(DottedMod, PrepareApplyNormal) {
+ Document doc(fromjson("{a: {b: 1}, c: 2}"));
+ Mod modUnset(fromjson("{$unset: {'a.b': true}}"));
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled()); // TODO turn in-place on for this.
- ASSERT_EQUALS(fromjson("{a: {}}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- //
- // Indexed mod
- //
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(IndexedMod, PrepareNoOp) {
- Document doc(fromjson("{a:[]}"));
- Mod modUnset(fromjson("{$unset: {'a.0': true}}"));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a:{}, c:2}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+TEST(DottedMod, PrepareApplyInPlace) {
+ Document doc(fromjson("{x: 0, a: {b: 1}}"));
+ Mod modUnset(fromjson("{$unset: {'a.b': true}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
- ASSERT_TRUE(execInfo.noOp);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- TEST(IndexedMod, PrepareApplyNormal) {
- Document doc(fromjson("{a:[0,1,2]}"));
- Mod modUnset(fromjson("{$unset: {'a.0': true}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled()); // TODO turn in-place on for this.
+ ASSERT_EQUALS(fromjson("{x: 0, a:{}}"), doc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
- ASSERT_FALSE(execInfo.noOp);
+TEST(DottedMod, PrepareApplyUnsetNestedSubobject) {
+ Document doc(fromjson("{a: {b: {c: 1}}}"));
+ Mod modUnset(fromjson("{$unset: {'a.b': true}}"));
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a:[null,1,2]}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- TEST(IndexedMod, PrepareApplyInPlace) {
- Document doc(fromjson("{b:1, a:[1]}"));
- Mod modUnset(fromjson("{$unset: {'a.0': true}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.b");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled()); // TODO turn in-place on for this.
+ ASSERT_EQUALS(fromjson("{a: {}}"), doc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
- ASSERT_FALSE(execInfo.noOp);
+//
+// Indexed mod
+//
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled()); // TODO turn in-place on for this.
- ASSERT_EQUALS(fromjson("{b:1, a:[null]}"), doc);
- }
+TEST(IndexedMod, PrepareNoOp) {
+ Document doc(fromjson("{a:[]}"));
+ Mod modUnset(fromjson("{$unset: {'a.0': true}}"));
- TEST(IndexedMod, PrepareApplyInPlaceNuance) {
- // Can't change the encoding in the middle of a bson stream.
- Document doc(fromjson("{a:[1], b:1}"));
- Mod modUnset(fromjson("{$unset: {'a.0': true}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
+ ASSERT_TRUE(execInfo.noOp);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
- ASSERT_FALSE(execInfo.noOp);
+TEST(IndexedMod, PrepareApplyNormal) {
+ Document doc(fromjson("{a:[0,1,2]}"));
+ Mod modUnset(fromjson("{$unset: {'a.0': true}}"));
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a:[null], b:1}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- TEST(IndexedMod, PrepareApplyInnerObject) {
- Document doc(fromjson("{a:[{b:1}]}"));
- Mod modUnset(fromjson("{$unset: {'a.0.b': true}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a:[null,1,2]}"), doc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0.b");
- ASSERT_FALSE(execInfo.noOp);
+TEST(IndexedMod, PrepareApplyInPlace) {
+ Document doc(fromjson("{b:1, a:[1]}"));
+ Mod modUnset(fromjson("{$unset: {'a.0': true}}"));
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a:[{}]}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- TEST(IndexedMod, PrepareApplyObject) {
- Document doc(fromjson("{a:[{b:1}]}"));
- Mod modUnset(fromjson("{$unset: {'a.0': true}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled()); // TODO turn in-place on for this.
+ ASSERT_EQUALS(fromjson("{b:1, a:[null]}"), doc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
- ASSERT_FALSE(execInfo.noOp);
+TEST(IndexedMod, PrepareApplyInPlaceNuance) {
+ // Can't change the encoding in the middle of a bson stream.
+ Document doc(fromjson("{a:[1], b:1}"));
+ Mod modUnset(fromjson("{$unset: {'a.0': true}}"));
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a:[null]}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- TEST(IndexedMod, LogNormal) {
- Document doc(fromjson("{a:[0,1,2]}"));
- Mod modUnset(fromjson("{$unset: {'a.0': true}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a:[null], b:1}"), doc);
+}
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(modUnset.log(&logBuilder));
- ASSERT_EQUALS(modUnset.modObj(), logDoc);
- }
+TEST(IndexedMod, PrepareApplyInnerObject) {
+ Document doc(fromjson("{a:[{b:1}]}"));
+ Mod modUnset(fromjson("{$unset: {'a.0.b': true}}"));
- //
- // Positional mod
- //
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- TEST(PositionalMod, PrepareNoOp) {
- Document doc(fromjson("{a:[{b:0}]}"));
- Mod modUnset(fromjson("{$unset: {'a.$.b': true}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0.b");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "1", &execInfo));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a:[{}]}"), doc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.1.b");
- ASSERT_TRUE(execInfo.noOp);
- }
+TEST(IndexedMod, PrepareApplyObject) {
+ Document doc(fromjson("{a:[{b:1}]}"));
+ Mod modUnset(fromjson("{$unset: {'a.0': true}}"));
- TEST(PositionalMod, PrepareMissingPositional) {
- Document doc(fromjson("{a:[{b:0},{c:1}]}"));
- Mod modUnset(fromjson("{$unset: {'a.$.b': true}}"));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_NOT_OK(modUnset.prepare(doc.root(), "" /* no position */, &execInfo));
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
+ ASSERT_FALSE(execInfo.noOp);
- TEST(PositionalMod, PrepareApplyNormal) {
- Document doc(fromjson("{a:[{b:0},{c:1}]}"));
- Mod modUnset(fromjson("{$unset: {'a.$.b': true}}"));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a:[null]}"), doc);
+}
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "0", &execInfo));
+TEST(IndexedMod, LogNormal) {
+ Document doc(fromjson("{a:[0,1,2]}"));
+ Mod modUnset(fromjson("{$unset: {'a.0': true}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0.b");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "", &execInfo));
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [{}, {c:1}]}"), doc);
- }
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(modUnset.log(&logBuilder));
+ ASSERT_EQUALS(modUnset.modObj(), logDoc);
+}
- TEST(PositionalMod, PrepareApplyObject) {
- Document doc(fromjson("{a:[{b:0},{c:1}]}"));
- Mod modUnset(fromjson("{$unset: {'a.$': true}}"));
+//
+// Positional mod
+//
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "0", &execInfo));
+TEST(PositionalMod, PrepareNoOp) {
+ Document doc(fromjson("{a:[{b:0}]}"));
+ Mod modUnset(fromjson("{$unset: {'a.$.b': true}}"));
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
- ASSERT_FALSE(execInfo.noOp);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "1", &execInfo));
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{a: [null, {c:1}]}"), doc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.1.b");
+ ASSERT_TRUE(execInfo.noOp);
+}
- TEST(PositionalMod, PrepareApplyInPlace) {
- Document doc(fromjson("{b:1, a:[{b:1}]}"));
- Mod modUnset(fromjson("{$unset: {'a.$.b': true}}"));
+TEST(PositionalMod, PrepareMissingPositional) {
+ Document doc(fromjson("{a:[{b:0},{c:1}]}"));
+ Mod modUnset(fromjson("{$unset: {'a.$.b': true}}"));
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "0", &execInfo));
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_NOT_OK(modUnset.prepare(doc.root(), "" /* no position */, &execInfo));
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0.b");
- ASSERT_FALSE(execInfo.noOp);
+TEST(PositionalMod, PrepareApplyNormal) {
+ Document doc(fromjson("{a:[{b:0},{c:1}]}"));
+ Mod modUnset(fromjson("{$unset: {'a.$.b': true}}"));
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled()); // TODO turn in-place on for this.
- ASSERT_EQUALS(fromjson("{b:1, a:[{}]}"), doc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "0", &execInfo));
- TEST(PositionalMod, LogNormal) {
- Document doc(fromjson("{b:1, a:[{b:1}]}"));
- Mod modUnset(fromjson("{$unset: {'a.$.b': true}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0.b");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "0", &execInfo));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [{}, {c:1}]}"), doc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0.b");
- ASSERT_FALSE(execInfo.noOp);
+TEST(PositionalMod, PrepareApplyObject) {
+ Document doc(fromjson("{a:[{b:0},{c:1}]}"));
+ Mod modUnset(fromjson("{$unset: {'a.$': true}}"));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(modUnset.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{$unset: {'a.0.b': true}}"), logDoc);
- }
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "0", &execInfo));
- TEST(LegacyData, CanUnsetInvalidField) {
- Document doc(fromjson("{b:1, a:[{$b:1}]}"));
- Mod modUnset(fromjson("{$unset: {'a.$.$b': true}}"));
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0");
+ ASSERT_FALSE(execInfo.noOp);
- ModifierInterface::ExecInfo execInfo;
- ASSERT_OK(modUnset.prepare(doc.root(), "0", &execInfo));
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{a: [null, {c:1}]}"), doc);
+}
- ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0.$b");
- ASSERT_FALSE(execInfo.noOp);
+TEST(PositionalMod, PrepareApplyInPlace) {
+ Document doc(fromjson("{b:1, a:[{b:1}]}"));
+ Mod modUnset(fromjson("{$unset: {'a.$.b': true}}"));
- ASSERT_OK(modUnset.apply());
- ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson("{b:1, a:[{}]}"), doc);
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "0", &execInfo));
- Document logDoc;
- LogBuilder logBuilder(logDoc.root());
- ASSERT_OK(modUnset.log(&logBuilder));
- ASSERT_EQUALS(fromjson("{$unset: {'a.0.$b': true}}"), logDoc);
- }
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0.b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled()); // TODO turn in-place on for this.
+ ASSERT_EQUALS(fromjson("{b:1, a:[{}]}"), doc);
+}
+
+TEST(PositionalMod, LogNormal) {
+ Document doc(fromjson("{b:1, a:[{b:1}]}"));
+ Mod modUnset(fromjson("{$unset: {'a.$.b': true}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "0", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0.b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(modUnset.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{$unset: {'a.0.b': true}}"), logDoc);
+}
+
+TEST(LegacyData, CanUnsetInvalidField) {
+ Document doc(fromjson("{b:1, a:[{$b:1}]}"));
+ Mod modUnset(fromjson("{$unset: {'a.$.$b': true}}"));
+
+ ModifierInterface::ExecInfo execInfo;
+ ASSERT_OK(modUnset.prepare(doc.root(), "0", &execInfo));
+
+ ASSERT_EQUALS(execInfo.fieldRef[0]->dottedField(), "a.0.$b");
+ ASSERT_FALSE(execInfo.noOp);
+
+ ASSERT_OK(modUnset.apply());
+ ASSERT_FALSE(doc.isInPlaceModeEnabled());
+ ASSERT_EQUALS(fromjson("{b:1, a:[{}]}"), doc);
+
+ Document logDoc;
+ LogBuilder logBuilder(logDoc.root());
+ ASSERT_OK(modUnset.log(&logBuilder));
+ ASSERT_EQUALS(fromjson("{$unset: {'a.0.$b': true}}"), logDoc);
+}
-} // unnamed namespace
+} // unnamed namespace
diff --git a/src/mongo/db/ops/parsed_delete.cpp b/src/mongo/db/ops/parsed_delete.cpp
index 62243d0818d..46a2a33752d 100644
--- a/src/mongo/db/ops/parsed_delete.cpp
+++ b/src/mongo/db/ops/parsed_delete.cpp
@@ -46,88 +46,86 @@
namespace mongo {
- ParsedDelete::ParsedDelete(OperationContext* txn, const DeleteRequest* request) :
- _txn(txn),
- _request(request) { }
+ParsedDelete::ParsedDelete(OperationContext* txn, const DeleteRequest* request)
+ : _txn(txn), _request(request) {}
- Status ParsedDelete::parseRequest() {
- dassert(!_canonicalQuery.get());
- // It is invalid to request that the DeleteStage return the deleted document during a
- // multi-remove.
- invariant(!(_request->shouldReturnDeleted() && _request->isMulti()));
+Status ParsedDelete::parseRequest() {
+ dassert(!_canonicalQuery.get());
+ // It is invalid to request that the DeleteStage return the deleted document during a
+ // multi-remove.
+ invariant(!(_request->shouldReturnDeleted() && _request->isMulti()));
- // It is invalid to request that a ProjectionStage be applied to the DeleteStage if the
- // DeleteStage would not return the deleted document.
- invariant(_request->getProj().isEmpty() || _request->shouldReturnDeleted());
+ // It is invalid to request that a ProjectionStage be applied to the DeleteStage if the
+ // DeleteStage would not return the deleted document.
+ invariant(_request->getProj().isEmpty() || _request->shouldReturnDeleted());
- if (CanonicalQuery::isSimpleIdQuery(_request->getQuery())) {
- return Status::OK();
- }
-
- return parseQueryToCQ();
+ if (CanonicalQuery::isSimpleIdQuery(_request->getQuery())) {
+ return Status::OK();
}
- Status ParsedDelete::parseQueryToCQ() {
- dassert(!_canonicalQuery.get());
-
- CanonicalQuery* cqRaw;
- const WhereCallbackReal whereCallback(_txn, _request->getNamespaceString().db());
-
- // Limit should only used for the findAndModify command when a sort is specified. If a sort
- // is requested, we want to use a top-k sort for efficiency reasons, so should pass the
- // limit through. Generally, a delete stage expects to be able to skip documents that were
- // deleted out from under it, but a limit could inhibit that and give an EOF when the delete
- // has not actually deleted a document. This behavior is fine for findAndModify, but should
- // not apply to deletes in general.
- long long limit = (!_request->isMulti() && !_request->getSort().isEmpty()) ? -1 : 0;
-
- // The projection needs to be applied after the delete operation, so we specify an empty
- // BSONObj as the projection during canonicalization.
- const BSONObj emptyObj;
- Status status = CanonicalQuery::canonicalize(_request->getNamespaceString().ns(),
- _request->getQuery(),
- _request->getSort(),
- emptyObj, // projection
- 0, // skip
- limit,
- emptyObj, // hint
- emptyObj, // min
- emptyObj, // max
- false, // snapshot
- _request->isExplain(),
- &cqRaw,
- whereCallback);
-
- if (status.isOK()) {
- _canonicalQuery.reset(cqRaw);
- }
-
- return status;
+ return parseQueryToCQ();
+}
+
+Status ParsedDelete::parseQueryToCQ() {
+ dassert(!_canonicalQuery.get());
+
+ CanonicalQuery* cqRaw;
+ const WhereCallbackReal whereCallback(_txn, _request->getNamespaceString().db());
+
+ // Limit should only used for the findAndModify command when a sort is specified. If a sort
+ // is requested, we want to use a top-k sort for efficiency reasons, so should pass the
+ // limit through. Generally, a delete stage expects to be able to skip documents that were
+ // deleted out from under it, but a limit could inhibit that and give an EOF when the delete
+ // has not actually deleted a document. This behavior is fine for findAndModify, but should
+ // not apply to deletes in general.
+ long long limit = (!_request->isMulti() && !_request->getSort().isEmpty()) ? -1 : 0;
+
+ // The projection needs to be applied after the delete operation, so we specify an empty
+ // BSONObj as the projection during canonicalization.
+ const BSONObj emptyObj;
+ Status status = CanonicalQuery::canonicalize(_request->getNamespaceString().ns(),
+ _request->getQuery(),
+ _request->getSort(),
+ emptyObj, // projection
+ 0, // skip
+ limit,
+ emptyObj, // hint
+ emptyObj, // min
+ emptyObj, // max
+ false, // snapshot
+ _request->isExplain(),
+ &cqRaw,
+ whereCallback);
+
+ if (status.isOK()) {
+ _canonicalQuery.reset(cqRaw);
}
- const DeleteRequest* ParsedDelete::getRequest() const {
- return _request;
- }
+ return status;
+}
- bool ParsedDelete::canYield() const {
- return !_request->isGod() &&
- PlanExecutor::YIELD_AUTO == _request->getYieldPolicy() &&
- !isIsolated();
- }
+const DeleteRequest* ParsedDelete::getRequest() const {
+ return _request;
+}
- bool ParsedDelete::isIsolated() const {
- return _canonicalQuery.get()
- ? QueryPlannerCommon::hasNode(_canonicalQuery->root(), MatchExpression::ATOMIC)
- : LiteParsedQuery::isQueryIsolated(_request->getQuery());
- }
+bool ParsedDelete::canYield() const {
+ return !_request->isGod() && PlanExecutor::YIELD_AUTO == _request->getYieldPolicy() &&
+ !isIsolated();
+}
- bool ParsedDelete::hasParsedQuery() const {
- return _canonicalQuery.get() != NULL;
- }
+bool ParsedDelete::isIsolated() const {
+ return _canonicalQuery.get()
+ ? QueryPlannerCommon::hasNode(_canonicalQuery->root(), MatchExpression::ATOMIC)
+ : LiteParsedQuery::isQueryIsolated(_request->getQuery());
+}
- CanonicalQuery* ParsedDelete::releaseParsedQuery() {
- invariant(_canonicalQuery.get() != NULL);
- return _canonicalQuery.release();
- }
+bool ParsedDelete::hasParsedQuery() const {
+ return _canonicalQuery.get() != NULL;
+}
+
+CanonicalQuery* ParsedDelete::releaseParsedQuery() {
+ invariant(_canonicalQuery.get() != NULL);
+ return _canonicalQuery.release();
+}
} // namespace mongo
diff --git a/src/mongo/db/ops/parsed_delete.h b/src/mongo/db/ops/parsed_delete.h
index c65f0da69e9..0745cbb85e8 100644
--- a/src/mongo/db/ops/parsed_delete.h
+++ b/src/mongo/db/ops/parsed_delete.h
@@ -34,85 +34,85 @@
namespace mongo {
- class CanonicalQuery;
- class Database;
- class DeleteRequest;
- class OperationContext;
+class CanonicalQuery;
+class Database;
+class DeleteRequest;
+class OperationContext;
+/**
+ * This class takes a pointer to a DeleteRequest, and converts that request into a parsed form
+ * via the parseRequest() method. A ParsedDelete can then be used to retrieve a PlanExecutor
+ * capable of executing the delete.
+ *
+ * It is invalid to request that the DeleteStage return the deleted document during a
+ * multi-remove. It is also invalid to request that a ProjectionStage be applied to the
+ * DeleteStage if the DeleteStage would not return the deleted document.
+ *
+ * A delete request is parsed to a CanonicalQuery, so this class is a thin, delete-specific
+ * wrapper around canonicalization.
+ *
+ * No locks need to be held during parsing.
+ */
+class ParsedDelete {
+ MONGO_DISALLOW_COPYING(ParsedDelete);
+
+public:
/**
- * This class takes a pointer to a DeleteRequest, and converts that request into a parsed form
- * via the parseRequest() method. A ParsedDelete can then be used to retrieve a PlanExecutor
- * capable of executing the delete.
- *
- * It is invalid to request that the DeleteStage return the deleted document during a
- * multi-remove. It is also invalid to request that a ProjectionStage be applied to the
- * DeleteStage if the DeleteStage would not return the deleted document.
- *
- * A delete request is parsed to a CanonicalQuery, so this class is a thin, delete-specific
- * wrapper around canonicalization.
+ * Constructs a parsed delete.
*
- * No locks need to be held during parsing.
+ * The object pointed to by "request" must stay in scope for the life of the constructed
+ * ParsedDelete.
+ */
+ ParsedDelete(OperationContext* txn, const DeleteRequest* request);
+
+ /**
+ * Parses the delete request to a canonical query. On success, the parsed delete can be
+ * used to create a PlanExecutor capable of executing this delete.
+ */
+ Status parseRequest();
+
+ /**
+ * As an optimization, we do not create a canonical query if the predicate is a simple
+ * _id equality. This method can be used to force full parsing to a canonical query,
+ * as a fallback if the idhack path is not available (e.g. no _id index).
+ */
+ Status parseQueryToCQ();
+
+ /**
+ * Get the raw request.
+ */
+ const DeleteRequest* getRequest() const;
+
+ /**
+ * Is this delete allowed to yield?
+ */
+ bool canYield() const;
+
+ /**
+ * Is this update supposed to be isolated?
+ */
+ bool isIsolated() const;
+
+ /**
+ * As an optimization, we don't create a canonical query for updates with simple _id
+ * queries. Use this method to determine whether or not we actually parsed the query.
+ */
+ bool hasParsedQuery() const;
+
+ /**
+ * Releases ownership of the canonical query to the caller.
*/
- class ParsedDelete {
- MONGO_DISALLOW_COPYING(ParsedDelete);
- public:
- /**
- * Constructs a parsed delete.
- *
- * The object pointed to by "request" must stay in scope for the life of the constructed
- * ParsedDelete.
- */
- ParsedDelete(OperationContext* txn, const DeleteRequest* request);
-
- /**
- * Parses the delete request to a canonical query. On success, the parsed delete can be
- * used to create a PlanExecutor capable of executing this delete.
- */
- Status parseRequest();
-
- /**
- * As an optimization, we do not create a canonical query if the predicate is a simple
- * _id equality. This method can be used to force full parsing to a canonical query,
- * as a fallback if the idhack path is not available (e.g. no _id index).
- */
- Status parseQueryToCQ();
-
- /**
- * Get the raw request.
- */
- const DeleteRequest* getRequest() const;
-
- /**
- * Is this delete allowed to yield?
- */
- bool canYield() const;
-
- /**
- * Is this update supposed to be isolated?
- */
- bool isIsolated() const;
-
- /**
- * As an optimization, we don't create a canonical query for updates with simple _id
- * queries. Use this method to determine whether or not we actually parsed the query.
- */
- bool hasParsedQuery() const;
-
- /**
- * Releases ownership of the canonical query to the caller.
- */
- CanonicalQuery* releaseParsedQuery();
-
- private:
- // Transactional context. Not owned by us.
- OperationContext* _txn;
-
- // Unowned pointer to the request object that this executor will process.
- const DeleteRequest* const _request;
-
- // Parsed query object, or NULL if the query proves to be an id hack query.
- std::unique_ptr<CanonicalQuery> _canonicalQuery;
-
- };
+ CanonicalQuery* releaseParsedQuery();
+
+private:
+ // Transactional context. Not owned by us.
+ OperationContext* _txn;
+
+ // Unowned pointer to the request object that this executor will process.
+ const DeleteRequest* const _request;
+
+ // Parsed query object, or NULL if the query proves to be an id hack query.
+ std::unique_ptr<CanonicalQuery> _canonicalQuery;
+};
} // namespace mongo
diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp
index d4651371e3f..80138e0385d 100644
--- a/src/mongo/db/ops/parsed_update.cpp
+++ b/src/mongo/db/ops/parsed_update.cpp
@@ -35,126 +35,120 @@
namespace mongo {
- ParsedUpdate::ParsedUpdate(OperationContext* txn, const UpdateRequest* request) :
- _txn(txn),
- _request(request),
- _driver(UpdateDriver::Options()),
- _canonicalQuery() { }
-
- Status ParsedUpdate::parseRequest() {
- // It is invalid to request that the UpdateStage return the prior or newly-updated version
- // of a document during a multi-update.
- invariant(!(_request->shouldReturnAnyDocs() && _request->isMulti()));
-
- // It is invalid to request that a ProjectionStage be applied to the UpdateStage if the
- // UpdateStage would not return any document.
- invariant(_request->getProj().isEmpty() || _request->shouldReturnAnyDocs());
-
- // We parse the update portion before the query portion because the dispostion of the update
- // may determine whether or not we need to produce a CanonicalQuery at all. For example, if
- // the update involves the positional-dollar operator, we must have a CanonicalQuery even if
- // it isn't required for query execution.
- Status status = parseUpdate();
- if (!status.isOK())
- return status;
- status = parseQuery();
- if (!status.isOK())
- return status;
- return Status::OK();
- }
-
- Status ParsedUpdate::parseQuery() {
- dassert(!_canonicalQuery.get());
+ParsedUpdate::ParsedUpdate(OperationContext* txn, const UpdateRequest* request)
+ : _txn(txn), _request(request), _driver(UpdateDriver::Options()), _canonicalQuery() {}
+
+Status ParsedUpdate::parseRequest() {
+ // It is invalid to request that the UpdateStage return the prior or newly-updated version
+ // of a document during a multi-update.
+ invariant(!(_request->shouldReturnAnyDocs() && _request->isMulti()));
+
+ // It is invalid to request that a ProjectionStage be applied to the UpdateStage if the
+ // UpdateStage would not return any document.
+ invariant(_request->getProj().isEmpty() || _request->shouldReturnAnyDocs());
+
+ // We parse the update portion before the query portion because the dispostion of the update
+ // may determine whether or not we need to produce a CanonicalQuery at all. For example, if
+ // the update involves the positional-dollar operator, we must have a CanonicalQuery even if
+ // it isn't required for query execution.
+ Status status = parseUpdate();
+ if (!status.isOK())
+ return status;
+ status = parseQuery();
+ if (!status.isOK())
+ return status;
+ return Status::OK();
+}
- if (!_driver.needMatchDetails() && CanonicalQuery::isSimpleIdQuery(_request->getQuery())) {
- return Status::OK();
- }
+Status ParsedUpdate::parseQuery() {
+ dassert(!_canonicalQuery.get());
- return parseQueryToCQ();
+ if (!_driver.needMatchDetails() && CanonicalQuery::isSimpleIdQuery(_request->getQuery())) {
+ return Status::OK();
}
- Status ParsedUpdate::parseQueryToCQ() {
- dassert(!_canonicalQuery.get());
-
- CanonicalQuery* cqRaw;
- const WhereCallbackReal whereCallback(_txn, _request->getNamespaceString().db());
-
- // Limit should only used for the findAndModify command when a sort is specified. If a sort
- // is requested, we want to use a top-k sort for efficiency reasons, so should pass the
- // limit through. Generally, a update stage expects to be able to skip documents that were
- // deleted/modified under it, but a limit could inhibit that and give an EOF when the update
- // has not actually updated a document. This behavior is fine for findAndModify, but should
- // not apply to update in general.
- long long limit = (!_request->isMulti() && !_request->getSort().isEmpty()) ? -1 : 0;
-
- // The projection needs to be applied after the update operation, so we specify an empty
- // BSONObj as the projection during canonicalization.
- const BSONObj emptyObj;
- Status status = CanonicalQuery::canonicalize(_request->getNamespaceString().ns(),
- _request->getQuery(),
- _request->getSort(),
- emptyObj, // projection
- 0, // skip
- limit,
- emptyObj, // hint
- emptyObj, // min
- emptyObj, // max
- false, // snapshot
- _request->isExplain(),
- &cqRaw,
- whereCallback);
- if (status.isOK()) {
- _canonicalQuery.reset(cqRaw);
- }
-
- return status;
+ return parseQueryToCQ();
+}
+
+Status ParsedUpdate::parseQueryToCQ() {
+ dassert(!_canonicalQuery.get());
+
+ CanonicalQuery* cqRaw;
+ const WhereCallbackReal whereCallback(_txn, _request->getNamespaceString().db());
+
+ // Limit should only used for the findAndModify command when a sort is specified. If a sort
+ // is requested, we want to use a top-k sort for efficiency reasons, so should pass the
+ // limit through. Generally, a update stage expects to be able to skip documents that were
+ // deleted/modified under it, but a limit could inhibit that and give an EOF when the update
+ // has not actually updated a document. This behavior is fine for findAndModify, but should
+ // not apply to update in general.
+ long long limit = (!_request->isMulti() && !_request->getSort().isEmpty()) ? -1 : 0;
+
+ // The projection needs to be applied after the update operation, so we specify an empty
+ // BSONObj as the projection during canonicalization.
+ const BSONObj emptyObj;
+ Status status = CanonicalQuery::canonicalize(_request->getNamespaceString().ns(),
+ _request->getQuery(),
+ _request->getSort(),
+ emptyObj, // projection
+ 0, // skip
+ limit,
+ emptyObj, // hint
+ emptyObj, // min
+ emptyObj, // max
+ false, // snapshot
+ _request->isExplain(),
+ &cqRaw,
+ whereCallback);
+ if (status.isOK()) {
+ _canonicalQuery.reset(cqRaw);
}
- Status ParsedUpdate::parseUpdate() {
- const NamespaceString& ns(_request->getNamespaceString());
+ return status;
+}
- // Should the modifiers validate their embedded docs via okForStorage
- // Only user updates should be checked. Any system or replication stuff should pass through.
- // Config db docs shouldn't get checked for valid field names since the shard key can have
- // a dot (".") in it.
- const bool shouldValidate = !(!_txn->writesAreReplicated() ||
- ns.isConfigDB() ||
- _request->isFromMigration());
+Status ParsedUpdate::parseUpdate() {
+ const NamespaceString& ns(_request->getNamespaceString());
- _driver.setLogOp(true);
- _driver.setModOptions(ModifierInterface::Options(!_txn->writesAreReplicated(),
- shouldValidate));
+ // Should the modifiers validate their embedded docs via okForStorage
+ // Only user updates should be checked. Any system or replication stuff should pass through.
+ // Config db docs shouldn't get checked for valid field names since the shard key can have
+ // a dot (".") in it.
+ const bool shouldValidate =
+ !(!_txn->writesAreReplicated() || ns.isConfigDB() || _request->isFromMigration());
- return _driver.parse(_request->getUpdates(), _request->isMulti());
- }
+ _driver.setLogOp(true);
+ _driver.setModOptions(ModifierInterface::Options(!_txn->writesAreReplicated(), shouldValidate));
- bool ParsedUpdate::canYield() const {
- return !_request->isGod() &&
- PlanExecutor::YIELD_AUTO == _request->getYieldPolicy() &&
- !isIsolated();
- }
+ return _driver.parse(_request->getUpdates(), _request->isMulti());
+}
- bool ParsedUpdate::isIsolated() const {
- return _canonicalQuery.get()
- ? QueryPlannerCommon::hasNode(_canonicalQuery->root(), MatchExpression::ATOMIC)
- : LiteParsedQuery::isQueryIsolated(_request->getQuery());
- }
+bool ParsedUpdate::canYield() const {
+ return !_request->isGod() && PlanExecutor::YIELD_AUTO == _request->getYieldPolicy() &&
+ !isIsolated();
+}
- bool ParsedUpdate::hasParsedQuery() const {
- return _canonicalQuery.get() != NULL;
- }
+bool ParsedUpdate::isIsolated() const {
+ return _canonicalQuery.get()
+ ? QueryPlannerCommon::hasNode(_canonicalQuery->root(), MatchExpression::ATOMIC)
+ : LiteParsedQuery::isQueryIsolated(_request->getQuery());
+}
- CanonicalQuery* ParsedUpdate::releaseParsedQuery() {
- invariant(_canonicalQuery.get() != NULL);
- return _canonicalQuery.release();
- }
+bool ParsedUpdate::hasParsedQuery() const {
+ return _canonicalQuery.get() != NULL;
+}
- const UpdateRequest* ParsedUpdate::getRequest() const {
- return _request;
- }
+CanonicalQuery* ParsedUpdate::releaseParsedQuery() {
+ invariant(_canonicalQuery.get() != NULL);
+ return _canonicalQuery.release();
+}
- UpdateDriver* ParsedUpdate::getDriver() {
- return &_driver;
- }
+const UpdateRequest* ParsedUpdate::getRequest() const {
+ return _request;
+}
+
+UpdateDriver* ParsedUpdate::getDriver() {
+ return &_driver;
+}
} // namespace mongo
diff --git a/src/mongo/db/ops/parsed_update.h b/src/mongo/db/ops/parsed_update.h
index d109904d1bd..8ce08aaabc3 100644
--- a/src/mongo/db/ops/parsed_update.h
+++ b/src/mongo/db/ops/parsed_update.h
@@ -34,102 +34,103 @@
namespace mongo {
- class CanonicalQuery;
- class OperationContext;
- class UpdateRequest;
+class CanonicalQuery;
+class OperationContext;
+class UpdateRequest;
+/**
+ * This class takes a pointer to an UpdateRequest, and converts that request into a parsed form
+ * via the parseRequest() method. A ParsedUpdate can then be used to retrieve a PlanExecutor
+ * capable of executing the update.
+ *
+ * It is invalid to request that the UpdateStage return the prior or newly-updated version of a
+ * document during a multi-update. It is also invalid to request that a ProjectionStage be
+ * applied to the UpdateStage if the UpdateStage would not return any document.
+ *
+ * No locks need to be held during parsing.
+ *
+ * The query part of the update is parsed to a CanonicalQuery, and the update part is parsed
+ * using the UpdateDriver.
+ */
+class ParsedUpdate {
+ MONGO_DISALLOW_COPYING(ParsedUpdate);
+
+public:
/**
- * This class takes a pointer to an UpdateRequest, and converts that request into a parsed form
- * via the parseRequest() method. A ParsedUpdate can then be used to retrieve a PlanExecutor
- * capable of executing the update.
+ * Constructs a parsed update.
*
- * It is invalid to request that the UpdateStage return the prior or newly-updated version of a
- * document during a multi-update. It is also invalid to request that a ProjectionStage be
- * applied to the UpdateStage if the UpdateStage would not return any document.
- *
- * No locks need to be held during parsing.
- *
- * The query part of the update is parsed to a CanonicalQuery, and the update part is parsed
- * using the UpdateDriver.
+ * The object pointed to by "request" must stay in scope for the life of the constructed
+ * ParsedUpdate.
+ */
+ ParsedUpdate(OperationContext* txn, const UpdateRequest* request);
+
+ /**
+ * Parses the update request to a canonical query and an update driver. On success, the
+ * parsed update can be used to create a PlanExecutor for this update.
+ */
+ Status parseRequest();
+
+ /**
+ * As an optimization, we do not create a canonical query if the predicate is a simple
+ * _id equality. This method can be used to force full parsing to a canonical query,
+ * as a fallback if the idhack path is not available (e.g. no _id index).
+ */
+ Status parseQueryToCQ();
+
+ /**
+ * Get the raw request.
+ */
+ const UpdateRequest* getRequest() const;
+
+ /**
+ * Get a pointer to the update driver, the abstraction which both parses the update and
+ * is capable of applying mods / computing damages.
+ */
+ UpdateDriver* getDriver();
+
+ /**
+ * Is this update allowed to yield?
+ */
+ bool canYield() const;
+
+ /**
+ * Is this update supposed to be isolated?
+ */
+ bool isIsolated() const;
+
+ /**
+ * As an optimization, we don't create a canonical query for updates with simple _id
+ * queries. Use this method to determine whether or not we actually parsed the query.
+ */
+ bool hasParsedQuery() const;
+
+ /**
+ * Releases ownership of the canonical query to the caller.
*/
- class ParsedUpdate {
- MONGO_DISALLOW_COPYING(ParsedUpdate);
- public:
- /**
- * Constructs a parsed update.
- *
- * The object pointed to by "request" must stay in scope for the life of the constructed
- * ParsedUpdate.
- */
- ParsedUpdate(OperationContext* txn, const UpdateRequest* request);
-
- /**
- * Parses the update request to a canonical query and an update driver. On success, the
- * parsed update can be used to create a PlanExecutor for this update.
- */
- Status parseRequest();
-
- /**
- * As an optimization, we do not create a canonical query if the predicate is a simple
- * _id equality. This method can be used to force full parsing to a canonical query,
- * as a fallback if the idhack path is not available (e.g. no _id index).
- */
- Status parseQueryToCQ();
-
- /**
- * Get the raw request.
- */
- const UpdateRequest* getRequest() const;
-
- /**
- * Get a pointer to the update driver, the abstraction which both parses the update and
- * is capable of applying mods / computing damages.
- */
- UpdateDriver* getDriver();
-
- /**
- * Is this update allowed to yield?
- */
- bool canYield() const;
-
- /**
- * Is this update supposed to be isolated?
- */
- bool isIsolated() const;
-
- /**
- * As an optimization, we don't create a canonical query for updates with simple _id
- * queries. Use this method to determine whether or not we actually parsed the query.
- */
- bool hasParsedQuery() const;
-
- /**
- * Releases ownership of the canonical query to the caller.
- */
- CanonicalQuery* releaseParsedQuery();
-
- private:
- /**
- * Parses the query portion of the update request.
- */
- Status parseQuery();
-
- /**
- * Parses the update-descriptor portion of the update request.
- */
- Status parseUpdate();
-
- // Unowned pointer to the transactional context.
- OperationContext* _txn;
-
- // Unowned pointer to the request object to process.
- const UpdateRequest* const _request;
-
- // Driver for processing updates on matched documents.
- UpdateDriver _driver;
-
- // Parsed query object, or NULL if the query proves to be an id hack query.
- std::unique_ptr<CanonicalQuery> _canonicalQuery;
- };
+ CanonicalQuery* releaseParsedQuery();
+
+private:
+ /**
+ * Parses the query portion of the update request.
+ */
+ Status parseQuery();
+
+ /**
+ * Parses the update-descriptor portion of the update request.
+ */
+ Status parseUpdate();
+
+ // Unowned pointer to the transactional context.
+ OperationContext* _txn;
+
+ // Unowned pointer to the request object to process.
+ const UpdateRequest* const _request;
+
+ // Driver for processing updates on matched documents.
+ UpdateDriver _driver;
+
+ // Parsed query object, or NULL if the query proves to be an id hack query.
+ std::unique_ptr<CanonicalQuery> _canonicalQuery;
+};
} // namespace mongo
diff --git a/src/mongo/db/ops/path_support.cpp b/src/mongo/db/ops/path_support.cpp
index 4ac4b03fb36..894b0117393 100644
--- a/src/mongo/db/ops/path_support.cpp
+++ b/src/mongo/db/ops/path_support.cpp
@@ -38,78 +38,72 @@
namespace mongo {
namespace pathsupport {
- using std::string;
- using mongoutils::str::stream;
-
- namespace {
-
- bool isNumeric(StringData str, size_t* num) {
- size_t res = 0;
- for (size_t i = 0; i < str.size(); ++i) {
- if (str[i] < '0' || str[i] > '9') {
- return false;
- }
- else {
- res = res * 10 + (str[i] - '0');
- }
- }
- *num = res;
- return true;
+using std::string;
+using mongoutils::str::stream;
+
+namespace {
+
+bool isNumeric(StringData str, size_t* num) {
+ size_t res = 0;
+ for (size_t i = 0; i < str.size(); ++i) {
+ if (str[i] < '0' || str[i] > '9') {
+ return false;
+ } else {
+ res = res * 10 + (str[i] - '0');
}
+ }
+ *num = res;
+ return true;
+}
- Status maybePadTo(mutablebson::Element* elemArray,
- size_t sizeRequired) {
- dassert(elemArray->getType() == Array);
+Status maybePadTo(mutablebson::Element* elemArray, size_t sizeRequired) {
+ dassert(elemArray->getType() == Array);
- if (sizeRequired > kMaxPaddingAllowed) {
- return Status(ErrorCodes::CannotBackfillArray,
- mongoutils::str::stream() << "can't backfill array to larger than "
- << kMaxPaddingAllowed << " elements");
- }
+ if (sizeRequired > kMaxPaddingAllowed) {
+ return Status(ErrorCodes::CannotBackfillArray,
+ mongoutils::str::stream() << "can't backfill array to larger than "
+ << kMaxPaddingAllowed << " elements");
+ }
- size_t currSize = mutablebson::countChildren(*elemArray);
- if (sizeRequired > currSize) {
- size_t toPad = sizeRequired - currSize;
- for (size_t i = 0; i < toPad; i++) {
- Status status = elemArray->appendNull("");
- if (!status.isOK()) {
- return status;
- }
- }
+ size_t currSize = mutablebson::countChildren(*elemArray);
+ if (sizeRequired > currSize) {
+ size_t toPad = sizeRequired - currSize;
+ for (size_t i = 0; i < toPad; i++) {
+ Status status = elemArray->appendNull("");
+ if (!status.isOK()) {
+ return status;
}
- return Status::OK();
}
+ }
+ return Status::OK();
+}
+
+} // unnamed namespace
+
+Status findLongestPrefix(const FieldRef& prefix,
+ mutablebson::Element root,
+ size_t* idxFound,
+ mutablebson::Element* elemFound) {
+ // If root is empty or the prefix is so, there's no point in looking for a prefix.
+ const size_t prefixSize = prefix.numParts();
+ if (!root.hasChildren() || prefixSize == 0) {
+ return Status(ErrorCodes::NonExistentPath, "either the document or the path are empty");
+ }
- } // unnamed namespace
-
- Status findLongestPrefix(const FieldRef& prefix,
- mutablebson::Element root,
- size_t* idxFound,
- mutablebson::Element* elemFound) {
-
- // If root is empty or the prefix is so, there's no point in looking for a prefix.
- const size_t prefixSize = prefix.numParts();
- if (!root.hasChildren() || prefixSize == 0) {
- return Status(ErrorCodes::NonExistentPath,
- "either the document or the path are empty");
- }
-
- // Loop through prefix's parts. At each iteration, check that the part ('curr') exists
- // in 'root' and that the type of the previous part ('prev') allows for children.
- mutablebson::Element curr = root;
- mutablebson::Element prev = root;
- size_t i = 0;
- size_t numericPart = 0;
- bool viable = true;
- for (;i < prefixSize; i++) {
-
- // If prefix wants to reach 'curr' by applying a non-numeric index to an array
- // 'prev', or if 'curr' wants to traverse a leaf 'prev', then we'd be in a
- // non-viable path (see definition on the header file).
- StringData prefixPart = prefix.getPart(i);
- prev = curr;
- switch (curr.getType()) {
-
+ // Loop through prefix's parts. At each iteration, check that the part ('curr') exists
+ // in 'root' and that the type of the previous part ('prev') allows for children.
+ mutablebson::Element curr = root;
+ mutablebson::Element prev = root;
+ size_t i = 0;
+ size_t numericPart = 0;
+ bool viable = true;
+ for (; i < prefixSize; i++) {
+ // If prefix wants to reach 'curr' by applying a non-numeric index to an array
+ // 'prev', or if 'curr' wants to traverse a leaf 'prev', then we'd be in a
+ // non-viable path (see definition on the header file).
+ StringData prefixPart = prefix.getPart(i);
+ prev = curr;
+ switch (curr.getType()) {
case Object:
curr = prev[prefixPart];
break;
@@ -124,338 +118,307 @@ namespace pathsupport {
default:
viable = false;
- }
-
- // If we couldn't find the next field part of the prefix in the document or if the
- // field part we're in constitutes a non-viable path, we can stop looking.
- if (!curr.ok() || !viable) {
- break;
- }
}
- // We broke out of the loop because one of four things happened. (a) 'prefix' and
- // 'root' have nothing in common, (b) 'prefix' is not viable in 'root', (c) not all the
- // parts in 'prefix' exist in 'root', or (d) all parts do. In each case, we need to
- // figure out what index and Element pointer to return.
- if (i == 0) {
- return Status(ErrorCodes::NonExistentPath,
- "cannot find path in the document");
- }
- else if (!viable) {
- *idxFound = i - 1;
- *elemFound = prev;
- return Status(ErrorCodes::PathNotViable,
- mongoutils::str::stream() << "cannot use the part (" <<
- prefix.getPart(i-1) << " of " << prefix.dottedField() <<
- ") to traverse the element ({" <<
- curr.toString() << "})");
- }
- else if (curr.ok()) {
- *idxFound = i - 1;
- *elemFound = curr;
- return Status::OK();
- }
- else {
- *idxFound = i - 1;
- *elemFound = prev;
- return Status::OK();
+ // If we couldn't find the next field part of the prefix in the document or if the
+ // field part we're in constitutes a non-viable path, we can stop looking.
+ if (!curr.ok() || !viable) {
+ break;
}
}
- Status createPathAt(const FieldRef& prefix,
- size_t idxFound,
- mutablebson::Element elemFound,
- mutablebson::Element newElem) {
- Status status = Status::OK();
-
- // Sanity check that 'idxField' is an actual part.
- const size_t size = prefix.numParts();
- if (idxFound >= size) {
- return Status(ErrorCodes::BadValue, "index larger than path size");
- }
-
- mutablebson::Document& doc = elemFound.getDocument();
-
- // If we are creating children under an array and a numeric index is next, then perhaps
- // we need padding.
- size_t i = idxFound;
- bool inArray = false;
- if (elemFound.getType() == mongo::Array) {
- size_t newIdx = 0;
- if (!isNumeric(prefix.getPart(idxFound), &newIdx)) {
- return Status(ErrorCodes::InvalidPath, "Array require numeric fields");
- }
+ // We broke out of the loop because one of four things happened. (a) 'prefix' and
+ // 'root' have nothing in common, (b) 'prefix' is not viable in 'root', (c) not all the
+ // parts in 'prefix' exist in 'root', or (d) all parts do. In each case, we need to
+ // figure out what index and Element pointer to return.
+ if (i == 0) {
+ return Status(ErrorCodes::NonExistentPath, "cannot find path in the document");
+ } else if (!viable) {
+ *idxFound = i - 1;
+ *elemFound = prev;
+ return Status(ErrorCodes::PathNotViable,
+ mongoutils::str::stream() << "cannot use the part (" << prefix.getPart(i - 1)
+ << " of " << prefix.dottedField()
+ << ") to traverse the element ({" << curr.toString()
+ << "})");
+ } else if (curr.ok()) {
+ *idxFound = i - 1;
+ *elemFound = curr;
+ return Status::OK();
+ } else {
+ *idxFound = i - 1;
+ *elemFound = prev;
+ return Status::OK();
+ }
+}
+
+Status createPathAt(const FieldRef& prefix,
+ size_t idxFound,
+ mutablebson::Element elemFound,
+ mutablebson::Element newElem) {
+ Status status = Status::OK();
+
+ // Sanity check that 'idxField' is an actual part.
+ const size_t size = prefix.numParts();
+ if (idxFound >= size) {
+ return Status(ErrorCodes::BadValue, "index larger than path size");
+ }
- status = maybePadTo(&elemFound, newIdx);
- if (!status.isOK()) {
- return status;
- }
+ mutablebson::Document& doc = elemFound.getDocument();
- // If there is a next field, that would be an array element. We'd like to mark that
- // field because we create array elements differently than we do regular objects.
- if (++i < size) {
- inArray = true;
- }
+ // If we are creating children under an array and a numeric index is next, then perhaps
+ // we need padding.
+ size_t i = idxFound;
+ bool inArray = false;
+ if (elemFound.getType() == mongo::Array) {
+ size_t newIdx = 0;
+ if (!isNumeric(prefix.getPart(idxFound), &newIdx)) {
+ return Status(ErrorCodes::InvalidPath, "Array require numeric fields");
}
- // Create all the remaining parts but the last one.
- for (; i < size - 1 ; i++) {
- mutablebson::Element elem = doc.makeElementObject(prefix.getPart(i));
- if (!elem.ok()) {
- return Status(ErrorCodes::InternalError, "cannot create path");
- }
+ status = maybePadTo(&elemFound, newIdx);
+ if (!status.isOK()) {
+ return status;
+ }
- // If this field is an array element, we wrap it in an object (because array
- // elements are wraped in { "N": <element> } objects.
- if (inArray) {
- // TODO pass empty StringData to makeElementObject, when that's supported.
- mutablebson::Element arrayObj = doc.makeElementObject("" /* it's an array */);
- if (!arrayObj.ok()) {
- return Status(ErrorCodes::InternalError, "cannot create item on array");
- }
- status = arrayObj.pushBack(elem);
- if (!status.isOK()) {
- return status;
- }
- status = elemFound.pushBack(arrayObj);
- if (!status.isOK()) {
- return status;
- }
- inArray = false;
- }
- else {
- status = elemFound.pushBack(elem);
- if (!status.isOK()) {
- return status;
- }
- }
+ // If there is a next field, that would be an array element. We'd like to mark that
+ // field because we create array elements differently than we do regular objects.
+ if (++i < size) {
+ inArray = true;
+ }
+ }
- elemFound = elem;
+ // Create all the remaining parts but the last one.
+ for (; i < size - 1; i++) {
+ mutablebson::Element elem = doc.makeElementObject(prefix.getPart(i));
+ if (!elem.ok()) {
+ return Status(ErrorCodes::InternalError, "cannot create path");
}
- // Attach the last element. Here again, if we're in a field that is an array element,
- // we wrap it in an object first.
+ // If this field is an array element, we wrap it in an object (because array
+ // elements are wraped in { "N": <element> } objects.
if (inArray) {
// TODO pass empty StringData to makeElementObject, when that's supported.
mutablebson::Element arrayObj = doc.makeElementObject("" /* it's an array */);
if (!arrayObj.ok()) {
return Status(ErrorCodes::InternalError, "cannot create item on array");
}
-
- status = arrayObj.pushBack(newElem);
+ status = arrayObj.pushBack(elem);
if (!status.isOK()) {
return status;
}
-
status = elemFound.pushBack(arrayObj);
if (!status.isOK()) {
return status;
}
-
- }
- else {
- status = elemFound.pushBack(newElem);
+ inArray = false;
+ } else {
+ status = elemFound.pushBack(elem);
if (!status.isOK()) {
return status;
}
}
- return Status::OK();
+ elemFound = elem;
}
- Status setElementAtPath(const FieldRef& path,
- const BSONElement& value,
- mutablebson::Document* doc) {
-
- size_t deepestElemPathPart;
- mutablebson::Element deepestElem(doc->end());
-
- // Get the existing parents of this path
- Status status = findLongestPrefix(path,
- doc->root(),
- &deepestElemPathPart,
- &deepestElem);
-
- // TODO: All this is pretty awkward, why not return the position immediately after the
- // consumed path or use a signed sentinel? Why is it a special case when we've consumed the
- // whole path?
+ // Attach the last element. Here again, if we're in a field that is an array element,
+ // we wrap it in an object first.
+ if (inArray) {
+ // TODO pass empty StringData to makeElementObject, when that's supported.
+ mutablebson::Element arrayObj = doc.makeElementObject("" /* it's an array */);
+ if (!arrayObj.ok()) {
+ return Status(ErrorCodes::InternalError, "cannot create item on array");
+ }
- if (!status.isOK() && status.code() != ErrorCodes::NonExistentPath)
+ status = arrayObj.pushBack(newElem);
+ if (!status.isOK()) {
return status;
-
- // Inc the path by one *unless* we matched nothing
- if (status.code() != ErrorCodes::NonExistentPath) {
- ++deepestElemPathPart;
- }
- else {
- deepestElemPathPart = 0;
- deepestElem = doc->root();
}
- if (deepestElemPathPart == path.numParts()) {
- // The full path exists already in the document, so just set a value
- return deepestElem.setValueBSONElement(value);
+ status = elemFound.pushBack(arrayObj);
+ if (!status.isOK()) {
+ return status;
}
- else {
- // Construct the rest of the path we need with empty documents and set the value
- StringData leafFieldName = path.getPart(path.numParts() - 1);
- mutablebson::Element leafElem = doc->makeElementWithNewFieldName(leafFieldName,
- value);
- dassert(leafElem.ok());
- return createPathAt(path, deepestElemPathPart, deepestElem, leafElem);
+
+ } else {
+ status = elemFound.pushBack(newElem);
+ if (!status.isOK()) {
+ return status;
}
}
- const BSONElement& findParentEqualityElement(const EqualityMatches& equalities,
- const FieldRef& path,
- int* parentPathParts) {
+ return Status::OK();
+}
- // We may have an equality match to an object at a higher point in the pattern path, check
- // all path prefixes for equality matches
- // ex: path: 'a.b', query : { 'a' : { b : <value> } }
- // ex: path: 'a.b.c', query : { 'a.b' : { c : <value> } }
- for (int i = static_cast<int>(path.numParts()); i >= 0; --i) {
+Status setElementAtPath(const FieldRef& path,
+ const BSONElement& value,
+ mutablebson::Document* doc) {
+ size_t deepestElemPathPart;
+ mutablebson::Element deepestElem(doc->end());
- // "" element is *not* a parent of anyone but itself
- if (i == 0 && path.numParts() != 0)
- continue;
+ // Get the existing parents of this path
+ Status status = findLongestPrefix(path, doc->root(), &deepestElemPathPart, &deepestElem);
- StringData subPathStr = path.dottedSubstring(0, i);
- EqualityMatches::const_iterator seenIt = equalities.find(subPathStr);
- if (seenIt == equalities.end())
- continue;
+ // TODO: All this is pretty awkward, why not return the position immediately after the
+ // consumed path or use a signed sentinel? Why is it a special case when we've consumed the
+ // whole path?
- *parentPathParts = i;
- return seenIt->second->getData();
- }
+ if (!status.isOK() && status.code() != ErrorCodes::NonExistentPath)
+ return status;
- *parentPathParts = -1;
- static const BSONElement eooElement;
- return eooElement;
+ // Inc the path by one *unless* we matched nothing
+ if (status.code() != ErrorCodes::NonExistentPath) {
+ ++deepestElemPathPart;
+ } else {
+ deepestElemPathPart = 0;
+ deepestElem = doc->root();
}
- /**
- * Helper function to check if the current equality match paths conflict with a new path.
- */
- static Status checkEqualityConflicts(const EqualityMatches& equalities, const FieldRef& path) {
-
- int parentPathPart = -1;
- const BSONElement& parentEl = findParentEqualityElement(equalities,
- path,
- &parentPathPart);
-
- if (parentEl.eoo())
- return Status::OK();
+ if (deepestElemPathPart == path.numParts()) {
+ // The full path exists already in the document, so just set a value
+ return deepestElem.setValueBSONElement(value);
+ } else {
+ // Construct the rest of the path we need with empty documents and set the value
+ StringData leafFieldName = path.getPart(path.numParts() - 1);
+ mutablebson::Element leafElem = doc->makeElementWithNewFieldName(leafFieldName, value);
+ dassert(leafElem.ok());
+ return createPathAt(path, deepestElemPathPart, deepestElem, leafElem);
+ }
+}
+
+const BSONElement& findParentEqualityElement(const EqualityMatches& equalities,
+ const FieldRef& path,
+ int* parentPathParts) {
+ // We may have an equality match to an object at a higher point in the pattern path, check
+ // all path prefixes for equality matches
+ // ex: path: 'a.b', query : { 'a' : { b : <value> } }
+ // ex: path: 'a.b.c', query : { 'a.b' : { c : <value> } }
+ for (int i = static_cast<int>(path.numParts()); i >= 0; --i) {
+ // "" element is *not* a parent of anyone but itself
+ if (i == 0 && path.numParts() != 0)
+ continue;
+
+ StringData subPathStr = path.dottedSubstring(0, i);
+ EqualityMatches::const_iterator seenIt = equalities.find(subPathStr);
+ if (seenIt == equalities.end())
+ continue;
+
+ *parentPathParts = i;
+ return seenIt->second->getData();
+ }
- string errMsg = "cannot infer query fields to set, ";
+ *parentPathParts = -1;
+ static const BSONElement eooElement;
+ return eooElement;
+}
- StringData pathStr = path.dottedField();
- StringData prefixStr = path.dottedSubstring(0, parentPathPart);
- StringData suffixStr = path.dottedSubstring(parentPathPart, path.numParts());
+/**
+ * Helper function to check if the current equality match paths conflict with a new path.
+ */
+static Status checkEqualityConflicts(const EqualityMatches& equalities, const FieldRef& path) {
+ int parentPathPart = -1;
+ const BSONElement& parentEl = findParentEqualityElement(equalities, path, &parentPathPart);
- if (suffixStr.size() != 0)
- errMsg += stream() << "both paths '" << pathStr << "' and '" << prefixStr
- << "' are matched";
- else
- errMsg += stream() << "path '" << pathStr << "' is matched twice";
+ if (parentEl.eoo())
+ return Status::OK();
- return Status(ErrorCodes::NotSingleValueField, errMsg);
- }
+ string errMsg = "cannot infer query fields to set, ";
- /**
- * Helper function to check if path conflicts are all prefixes.
- */
- static Status checkPathIsPrefixOf(const FieldRef& path, const FieldRefSet& conflictPaths) {
+ StringData pathStr = path.dottedField();
+ StringData prefixStr = path.dottedSubstring(0, parentPathPart);
+ StringData suffixStr = path.dottedSubstring(parentPathPart, path.numParts());
- for (FieldRefSet::const_iterator it = conflictPaths.begin(); it != conflictPaths.end();
- ++it) {
+ if (suffixStr.size() != 0)
+ errMsg += stream() << "both paths '" << pathStr << "' and '" << prefixStr
+ << "' are matched";
+ else
+ errMsg += stream() << "path '" << pathStr << "' is matched twice";
- const FieldRef* conflictingPath = *it;
- // Conflicts are always prefixes (or equal to) the path, or vice versa
- if (path.numParts() > conflictingPath->numParts()) {
+ return Status(ErrorCodes::NotSingleValueField, errMsg);
+}
- string errMsg = stream() << "field at '" << conflictingPath->dottedField()
- << "' must be exactly specified, field at sub-path '"
- << path.dottedField() << "'found";
- return Status(ErrorCodes::NotExactValueField, errMsg);
- }
+/**
+ * Helper function to check if path conflicts are all prefixes.
+ */
+static Status checkPathIsPrefixOf(const FieldRef& path, const FieldRefSet& conflictPaths) {
+ for (FieldRefSet::const_iterator it = conflictPaths.begin(); it != conflictPaths.end(); ++it) {
+ const FieldRef* conflictingPath = *it;
+ // Conflicts are always prefixes (or equal to) the path, or vice versa
+ if (path.numParts() > conflictingPath->numParts()) {
+ string errMsg = stream() << "field at '" << conflictingPath->dottedField()
+ << "' must be exactly specified, field at sub-path '"
+ << path.dottedField() << "'found";
+ return Status(ErrorCodes::NotExactValueField, errMsg);
}
-
- return Status::OK();
}
- static Status _extractFullEqualityMatches(const MatchExpression& root,
- const FieldRefSet* fullPathsToExtract,
- EqualityMatches* equalities) {
-
- if (root.matchType() == MatchExpression::EQ) {
-
- // Extract equality matches
- const EqualityMatchExpression& eqChild =
- static_cast<const EqualityMatchExpression&>(root);
-
- FieldRef path(eqChild.path());
+ return Status::OK();
+}
- if (fullPathsToExtract) {
+static Status _extractFullEqualityMatches(const MatchExpression& root,
+ const FieldRefSet* fullPathsToExtract,
+ EqualityMatches* equalities) {
+ if (root.matchType() == MatchExpression::EQ) {
+ // Extract equality matches
+ const EqualityMatchExpression& eqChild = static_cast<const EqualityMatchExpression&>(root);
- FieldRefSet conflictPaths;
- fullPathsToExtract->findConflicts(&path, &conflictPaths);
+ FieldRef path(eqChild.path());
- // Ignore if this path is unrelated to the full paths
- if (conflictPaths.empty())
- return Status::OK();
+ if (fullPathsToExtract) {
+ FieldRefSet conflictPaths;
+ fullPathsToExtract->findConflicts(&path, &conflictPaths);
- // Make sure we're a prefix of all the conflict paths
- Status status = checkPathIsPrefixOf(path, conflictPaths);
- if (!status.isOK())
- return status;
- }
+ // Ignore if this path is unrelated to the full paths
+ if (conflictPaths.empty())
+ return Status::OK();
- Status status = checkEqualityConflicts(*equalities, path);
+ // Make sure we're a prefix of all the conflict paths
+ Status status = checkPathIsPrefixOf(path, conflictPaths);
if (!status.isOK())
return status;
-
- equalities->insert(std::make_pair(eqChild.path(), &eqChild));
- }
- else if (root.matchType() == MatchExpression::AND) {
-
- // Further explore $and matches
- for (size_t i = 0; i < root.numChildren(); ++i) {
- MatchExpression* child = root.getChild(i);
- Status status = _extractFullEqualityMatches(*child, fullPathsToExtract, equalities);
- if (!status.isOK())
- return status;
- }
}
- return Status::OK();
- }
-
- Status extractFullEqualityMatches(const MatchExpression& root,
- const FieldRefSet& fullPathsToExtract,
- EqualityMatches* equalities) {
- return _extractFullEqualityMatches(root, &fullPathsToExtract, equalities);
- }
+ Status status = checkEqualityConflicts(*equalities, path);
+ if (!status.isOK())
+ return status;
- Status extractEqualityMatches(const MatchExpression& root, EqualityMatches* equalities) {
- return _extractFullEqualityMatches(root, NULL, equalities);
+ equalities->insert(std::make_pair(eqChild.path(), &eqChild));
+ } else if (root.matchType() == MatchExpression::AND) {
+ // Further explore $and matches
+ for (size_t i = 0; i < root.numChildren(); ++i) {
+ MatchExpression* child = root.getChild(i);
+ Status status = _extractFullEqualityMatches(*child, fullPathsToExtract, equalities);
+ if (!status.isOK())
+ return status;
+ }
}
- Status addEqualitiesToDoc(const EqualityMatches& equalities, mutablebson::Document* doc) {
+ return Status::OK();
+}
- for (EqualityMatches::const_iterator it = equalities.begin(); it != equalities.end();
- ++it) {
+Status extractFullEqualityMatches(const MatchExpression& root,
+ const FieldRefSet& fullPathsToExtract,
+ EqualityMatches* equalities) {
+ return _extractFullEqualityMatches(root, &fullPathsToExtract, equalities);
+}
- FieldRef path(it->first);
- const BSONElement& data = it->second->getData();
+Status extractEqualityMatches(const MatchExpression& root, EqualityMatches* equalities) {
+ return _extractFullEqualityMatches(root, NULL, equalities);
+}
- Status status = setElementAtPath(path, data, doc);
- if (!status.isOK())
- return status;
- }
+Status addEqualitiesToDoc(const EqualityMatches& equalities, mutablebson::Document* doc) {
+ for (EqualityMatches::const_iterator it = equalities.begin(); it != equalities.end(); ++it) {
+ FieldRef path(it->first);
+ const BSONElement& data = it->second->getData();
- return Status::OK();
+ Status status = setElementAtPath(path, data, doc);
+ if (!status.isOK())
+ return status;
}
-} // namespace pathsupport
-} // namespace mongo
+ return Status::OK();
+}
+
+} // namespace pathsupport
+} // namespace mongo
diff --git a/src/mongo/db/ops/path_support.h b/src/mongo/db/ops/path_support.h
index ed4515506b8..95d0a3da44c 100644
--- a/src/mongo/db/ops/path_support.h
+++ b/src/mongo/db/ops/path_support.h
@@ -40,152 +40,149 @@
namespace mongo {
- namespace pathsupport {
-
- // Cap on the number of nulls we'll add to an array if we're inserting to an index that
- // doesn't exist.
- static const size_t kMaxPaddingAllowed = 1500000;
-
- // Convenience type to hold equality matches at particular paths from a MatchExpression
- typedef std::map<StringData, const EqualityMatchExpression*> EqualityMatches;
-
- /**
- * Finds the longest portion of 'prefix' that exists in document rooted at 'root' and is
- * "viable." A viable path is one that, if fully created on a given doc, would not
- * change the existing types of any fields in that doc. (See examples below.)
- *
- * If a prefix indeed exists, 'idxFound' is set to indicate how many parts in common
- * 'prefix' and 'doc' have. 'elemFound' would point to the Element corresponding to
- * prefix[idxFound] in 'doc'. The call would return an OK status in this case.
- *
- * If a prefix is not viable, returns a status "PathNotViable". 'idxFound' is set to
- * indicate the part in the document that caused the path to be not viable. 'elemFound'
- * would point to the Element corresponding to prefix[idxFound] in 'doc'.
- *
- * If a prefix does not exist, the call returns "NonExistentPath". 'elemFound' and
- * 'idxFound' are indeterminate in this case.
- *
- * Definition of a "Viable Path":
- *
- * A field reference 'p_1.p_2.[...].p_n', where 'p_i' is a field part, is said to be
- * a viable path in a given document D if the creation of each part 'p_i', 0 <= i < n
- * in D does not force 'p_i' to change types. In other words, no existing 'p_i' in D
- * may have a different type, other than the 'p_n'.
- *
- * 'a.b.c' is a viable path in {a: {b: {c: 1}}}
- * 'a.b.c' is a viable path in {a: {b: {c: {d: 1}}}}
- * 'a.b.c' is NOT a viable path in {a: {b: 1}}, because b would have changed types
- * 'a.0.b' is a viable path in {a: [{b: 1}, {c: 1}]}
- * 'a.0.b' is a viable path in {a: {"0": {b: 1}}}
- * 'a.0.b' is NOT a viable path in {a: 1}, because a would have changed types
- * 'a.5.b' is a viable path in in {a: []} (padding would occur)
- */
- Status findLongestPrefix(const FieldRef& prefix,
- mutablebson::Element root,
- size_t* idxFound,
- mutablebson::Element* elemFound);
-
- /**
- * Creates the parts 'prefix[idxRoot]', 'prefix[idxRoot+1]', ...,
- * 'prefix[<numParts>-1]' under 'elemFound' and adds 'newElem' as a child of that
- * path. Returns OK, if successful, or an error code describing why not, otherwise.
- *
- * createPathAt is designed to work with 'findLongestPrefix' in that it can create the
- * field parts in 'prefix' that are missing from a given document. 'elemFound' points
- * to the element in the doc that is the parent of prefix[idxRoot].
- */
- Status createPathAt(const FieldRef& prefix,
- size_t idxRoot,
- mutablebson::Element elemFound,
- mutablebson::Element newElem);
-
- /**
- * Uses the above methods to set the given value at the specified path in a mutable
- * Document, creating parents of the path if necessary.
- *
- * Returns PathNotViable if the path cannot be created without modifying the type of another
- * element, see above.
- */
- Status setElementAtPath(const FieldRef& path,
- const BSONElement& value,
- mutablebson::Document* doc);
-
- /**
- * Finds and returns-by-path all the equality matches in a particular MatchExpression.
- *
- * This method is meant to be used with the methods below, which allow efficient use of the
- * equality matches without needing to serialize to a BSONObj.
- *
- * Returns NotSingleValueField if the match expression has equality operators for
- * conflicting paths - equality paths conflict if they are the same or one path is a prefix
- * of the other.
- *
- * Ex:
- * { a : 1, b : 1 } -> no conflict
- * { a : 1, a.b : 1 } -> conflict
- * { _id : { x : 1 }, _id.y : 1 } -> conflict
- * { a : 1, a : 1 } -> conflict
- */
- Status extractEqualityMatches(const MatchExpression& root, EqualityMatches* equalities);
-
- /**
- * Same as the above, but ignores all paths except for paths in a specified set.
- * Equality matches with paths completely distinct from these paths are ignored.
- *
- * For a full equality match, the path of an equality found must not be a suffix of one of
- * the specified path - otherwise it isn't clear how to construct a full value for a field
- * at that path.
- *
- * Generally this is useful for shard keys and _ids which need unambiguous extraction from
- * queries.
- *
- * Ex:
- * { a : 1 }, full path 'a' -> a $eq 1 extracted
- * { a : 1 }, full path 'a.b' -> a $eq 1 extracted
- * { 'a.b' : 1 }, full path 'a' -> NotExactValueField error
- * ('a.b' doesn't specify 'a' fully)
- * { 'a.b' : 1 }, full path 'a.b' -> 'a.b' $eq 1 extracted
- * { '_id' : 1 }, full path '_id' -> '_id' $eq 1 extracted
- * { '_id.x' : 1 }, full path '_id' -> NotExactValueFieldError
- */
- Status extractFullEqualityMatches(const MatchExpression& root,
- const FieldRefSet& fullPathsToExtract,
- EqualityMatches* equalities);
-
- /**
- * Returns the equality match which is at or a parent of the specified path string. The
- * path string must be a valid dotted path.
- *
- * If a parent equality is found, returns the BSONElement data from that equality (which
- * includes the BSON value), the path of the parent element (prefixStr), and the remainder
- * of the path (which may be empty).
- *
- * EOO() is returned if there were no equalities at any point along the path.
- *
- * Ex:
- * Given equality matches of:
- * 'a.b' : 1, 'c' : 2
- * Path 'a' has no equality match parent (EOO)
- * Path 'c' has an eqmatch parent of 'c' : 2
- * Path 'c.d' has an eqmatch parent of 'c' : 2
- * Path 'a.b' has an eqmatch parent of 'a.b' : 1
- * Path 'a.b.c' has an eqmatch parent of 'a.b' : 1
- *
- */
- const BSONElement& findParentEqualityElement(const EqualityMatches& equalities,
- const FieldRef& path,
- int* parentPathParts);
-
- /**
- * Adds the BSON values from equality matches into the given document at the equality match
- * paths.
- *
- * Returns PathNotViable similar to setElementAtPath above. If equality paths do not
- * conflict, as is enforced by extractEqualityMatches, this function should return OK.
- */
- Status addEqualitiesToDoc(const EqualityMatches& equalities,
- mutablebson::Document* doc);
-
- } // namespace pathsupport
-
-} // namespace mongo
+namespace pathsupport {
+
+// Cap on the number of nulls we'll add to an array if we're inserting to an index that
+// doesn't exist.
+static const size_t kMaxPaddingAllowed = 1500000;
+
+// Convenience type to hold equality matches at particular paths from a MatchExpression
+typedef std::map<StringData, const EqualityMatchExpression*> EqualityMatches;
+
+/**
+ * Finds the longest portion of 'prefix' that exists in document rooted at 'root' and is
+ * "viable." A viable path is one that, if fully created on a given doc, would not
+ * change the existing types of any fields in that doc. (See examples below.)
+ *
+ * If a prefix indeed exists, 'idxFound' is set to indicate how many parts in common
+ * 'prefix' and 'doc' have. 'elemFound' would point to the Element corresponding to
+ * prefix[idxFound] in 'doc'. The call would return an OK status in this case.
+ *
+ * If a prefix is not viable, returns a status "PathNotViable". 'idxFound' is set to
+ * indicate the part in the document that caused the path to be not viable. 'elemFound'
+ * would point to the Element corresponding to prefix[idxFound] in 'doc'.
+ *
+ * If a prefix does not exist, the call returns "NonExistentPath". 'elemFound' and
+ * 'idxFound' are indeterminate in this case.
+ *
+ * Definition of a "Viable Path":
+ *
+ * A field reference 'p_1.p_2.[...].p_n', where 'p_i' is a field part, is said to be
+ * a viable path in a given document D if the creation of each part 'p_i', 0 <= i < n
+ * in D does not force 'p_i' to change types. In other words, no existing 'p_i' in D
+ * may have a different type, other than the 'p_n'.
+ *
+ * 'a.b.c' is a viable path in {a: {b: {c: 1}}}
+ * 'a.b.c' is a viable path in {a: {b: {c: {d: 1}}}}
+ * 'a.b.c' is NOT a viable path in {a: {b: 1}}, because b would have changed types
+ * 'a.0.b' is a viable path in {a: [{b: 1}, {c: 1}]}
+ * 'a.0.b' is a viable path in {a: {"0": {b: 1}}}
+ * 'a.0.b' is NOT a viable path in {a: 1}, because a would have changed types
+ * 'a.5.b' is a viable path in in {a: []} (padding would occur)
+ */
+Status findLongestPrefix(const FieldRef& prefix,
+ mutablebson::Element root,
+ size_t* idxFound,
+ mutablebson::Element* elemFound);
+
+/**
+ * Creates the parts 'prefix[idxRoot]', 'prefix[idxRoot+1]', ...,
+ * 'prefix[<numParts>-1]' under 'elemFound' and adds 'newElem' as a child of that
+ * path. Returns OK, if successful, or an error code describing why not, otherwise.
+ *
+ * createPathAt is designed to work with 'findLongestPrefix' in that it can create the
+ * field parts in 'prefix' that are missing from a given document. 'elemFound' points
+ * to the element in the doc that is the parent of prefix[idxRoot].
+ */
+Status createPathAt(const FieldRef& prefix,
+ size_t idxRoot,
+ mutablebson::Element elemFound,
+ mutablebson::Element newElem);
+
+/**
+ * Uses the above methods to set the given value at the specified path in a mutable
+ * Document, creating parents of the path if necessary.
+ *
+ * Returns PathNotViable if the path cannot be created without modifying the type of another
+ * element, see above.
+ */
+Status setElementAtPath(const FieldRef& path, const BSONElement& value, mutablebson::Document* doc);
+
+/**
+ * Finds and returns-by-path all the equality matches in a particular MatchExpression.
+ *
+ * This method is meant to be used with the methods below, which allow efficient use of the
+ * equality matches without needing to serialize to a BSONObj.
+ *
+ * Returns NotSingleValueField if the match expression has equality operators for
+ * conflicting paths - equality paths conflict if they are the same or one path is a prefix
+ * of the other.
+ *
+ * Ex:
+ * { a : 1, b : 1 } -> no conflict
+ * { a : 1, a.b : 1 } -> conflict
+ * { _id : { x : 1 }, _id.y : 1 } -> conflict
+ * { a : 1, a : 1 } -> conflict
+ */
+Status extractEqualityMatches(const MatchExpression& root, EqualityMatches* equalities);
+
+/**
+ * Same as the above, but ignores all paths except for paths in a specified set.
+ * Equality matches with paths completely distinct from these paths are ignored.
+ *
+ * For a full equality match, the path of an equality found must not be a suffix of one of
+ * the specified path - otherwise it isn't clear how to construct a full value for a field
+ * at that path.
+ *
+ * Generally this is useful for shard keys and _ids which need unambiguous extraction from
+ * queries.
+ *
+ * Ex:
+ * { a : 1 }, full path 'a' -> a $eq 1 extracted
+ * { a : 1 }, full path 'a.b' -> a $eq 1 extracted
+ * { 'a.b' : 1 }, full path 'a' -> NotExactValueField error
+ * ('a.b' doesn't specify 'a' fully)
+ * { 'a.b' : 1 }, full path 'a.b' -> 'a.b' $eq 1 extracted
+ * { '_id' : 1 }, full path '_id' -> '_id' $eq 1 extracted
+ * { '_id.x' : 1 }, full path '_id' -> NotExactValueFieldError
+ */
+Status extractFullEqualityMatches(const MatchExpression& root,
+ const FieldRefSet& fullPathsToExtract,
+ EqualityMatches* equalities);
+
+/**
+ * Returns the equality match which is at or a parent of the specified path string. The
+ * path string must be a valid dotted path.
+ *
+ * If a parent equality is found, returns the BSONElement data from that equality (which
+ * includes the BSON value), the path of the parent element (prefixStr), and the remainder
+ * of the path (which may be empty).
+ *
+ * EOO() is returned if there were no equalities at any point along the path.
+ *
+ * Ex:
+ * Given equality matches of:
+ * 'a.b' : 1, 'c' : 2
+ * Path 'a' has no equality match parent (EOO)
+ * Path 'c' has an eqmatch parent of 'c' : 2
+ * Path 'c.d' has an eqmatch parent of 'c' : 2
+ * Path 'a.b' has an eqmatch parent of 'a.b' : 1
+ * Path 'a.b.c' has an eqmatch parent of 'a.b' : 1
+ *
+ */
+const BSONElement& findParentEqualityElement(const EqualityMatches& equalities,
+ const FieldRef& path,
+ int* parentPathParts);
+
+/**
+ * Adds the BSON values from equality matches into the given document at the equality match
+ * paths.
+ *
+ * Returns PathNotViable similar to setElementAtPath above. If equality paths do not
+ * conflict, as is enforced by extractEqualityMatches, this function should return OK.
+ */
+Status addEqualitiesToDoc(const EqualityMatches& equalities, mutablebson::Document* doc);
+
+} // namespace pathsupport
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/path_support_test.cpp b/src/mongo/db/ops/path_support_test.cpp
index a502a39a0c3..62e6ded46c2 100644
--- a/src/mongo/db/ops/path_support_test.cpp
+++ b/src/mongo/db/ops/path_support_test.cpp
@@ -49,834 +49,852 @@
namespace {
- using namespace mongo;
- using namespace mutablebson;
- using namespace pathsupport;
- using mongoutils::str::stream;
- using std::unique_ptr;
- using std::string;
+using namespace mongo;
+using namespace mutablebson;
+using namespace pathsupport;
+using mongoutils::str::stream;
+using std::unique_ptr;
+using std::string;
- class EmptyDoc : public mongo::unittest::Test {
- public:
- EmptyDoc() : _doc() {}
+class EmptyDoc : public mongo::unittest::Test {
+public:
+ EmptyDoc() : _doc() {}
- Document& doc() { return _doc; }
-
- Element root() { return _doc.root(); }
-
- FieldRef& field() { return _field; }
-
- void setField(StringData str) { _field.parse(str); }
-
- private:
- Document _doc;
- FieldRef _field;
- };
-
- TEST_F(EmptyDoc, EmptyPath) {
- setField("");
-
- size_t idxFound;
- Element elemFound = root();
- Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
- ASSERT_EQUALS(status, ErrorCodes::NonExistentPath);
- }
-
- TEST_F(EmptyDoc, NewField) {
- setField("a");
-
- size_t idxFound;
- Element elemFound = root();
- Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
- ASSERT_EQUALS(status, ErrorCodes::NonExistentPath);
-
- Element newElem = doc().makeElementInt("a", 1);
- ASSERT_TRUE(newElem.ok());
- ASSERT_OK(createPathAt(field(), 0, root(), newElem));
- ASSERT_EQUALS(fromjson("{a: 1}"), doc());
- }
-
- class SimpleDoc : public mongo::unittest::Test {
- public:
- SimpleDoc() : _doc() {}
-
- virtual void setUp() {
- // {a: 1}
- ASSERT_OK(root().appendInt("a", 1));
- }
-
- Document& doc() { return _doc; }
-
- Element root() { return _doc.root(); }
-
- FieldRef& field() { return _field; }
- void setField(StringData str) { _field.parse(str); }
-
- private:
- Document _doc;
- FieldRef _field;
- };
-
- TEST_F(SimpleDoc, EmptyPath) {
- setField("");
-
- size_t idxFound;
- Element elemFound = root();
- Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
- ASSERT_EQUALS(status, ErrorCodes::NonExistentPath);
- }
-
- TEST_F(SimpleDoc, SimplePath) {
- setField("a");
-
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 0U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]), 0);
- }
-
- TEST_F(SimpleDoc, LongerPath) {
- setField("a.b");
-
- size_t idxFound;
- Element elemFound = root();
- Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
- ASSERT_EQUALS(status, ErrorCodes::PathNotViable);
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 0U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]), 0);
+ Document& doc() {
+ return _doc;
}
- TEST_F(SimpleDoc, NotCommonPrefix) {
- setField("b");
-
- size_t idxFound;
- Element elemFound = root();
- Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
- ASSERT_EQUALS(status, ErrorCodes::NonExistentPath);
-
- // From this point on, handles the creation of the '.b' part that wasn't found.
- Element newElem = doc().makeElementInt("b", 1);
- ASSERT_TRUE(newElem.ok());
- ASSERT_EQUALS(countChildren(root()), 1u);
-
- ASSERT_OK(createPathAt(field(), 0, root(), newElem));
- ASSERT_EQUALS(newElem.getFieldName(), "b");
- ASSERT_EQUALS(newElem.getType(), NumberInt);
- ASSERT_TRUE(newElem.hasValue());
- ASSERT_EQUALS(newElem.getValueInt(), 1);
-
- ASSERT_TRUE(newElem.parent().ok() /* root an ok parent */);
- ASSERT_EQUALS(countChildren(root()), 2u);
- ASSERT_EQUALS(root().leftChild().getFieldName(), "a");
- ASSERT_EQUALS(root().leftChild().rightSibling().getFieldName(), "b");
- ASSERT_EQUALS(root().rightChild().getFieldName(), "b");
- ASSERT_EQUALS(root().rightChild().leftSibling().getFieldName(), "a");
+ Element root() {
+ return _doc.root();
}
- class NestedDoc : public mongo::unittest::Test {
- public:
- NestedDoc() : _doc() {}
-
- virtual void setUp() {
- // {a: {b: {c: 1}}}
- Element elemA = _doc.makeElementObject("a");
- ASSERT_TRUE(elemA.ok());
- Element elemB = _doc.makeElementObject("b");
- ASSERT_TRUE(elemB.ok());
- Element elemC = _doc.makeElementInt("c", 1);
- ASSERT_TRUE(elemC.ok());
-
- ASSERT_OK(elemB.pushBack(elemC));
- ASSERT_OK(elemA.pushBack(elemB));
- ASSERT_OK(root().pushBack(elemA));
- }
-
- Document& doc() { return _doc; }
-
- Element root() { return _doc.root(); }
-
- FieldRef& field() { return _field; }
- void setField(StringData str) { _field.parse(str); }
-
- private:
- Document _doc;
- FieldRef _field;
- };
-
- TEST_F(NestedDoc, SimplePath) {
- setField("a");
-
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 0U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]), 0);
+ FieldRef& field() {
+ return _field;
}
- TEST_F(NestedDoc, ShorterPath) {
- setField("a.b");
-
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
- ASSERT_EQUALS(idxFound, 1U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]), 0);
+ void setField(StringData str) {
+ _field.parse(str);
}
- TEST_F(NestedDoc, ExactPath) {
- setField("a.b.c");
+private:
+ Document _doc;
+ FieldRef _field;
+};
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 2U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]["c"]), 0);
- }
-
- TEST_F(NestedDoc, LongerPath) {
- // This would for 'c' to change from NumberInt to Object, which is invalid.
- setField("a.b.c.d");
+TEST_F(EmptyDoc, EmptyPath) {
+ setField("");
- size_t idxFound;
- Element elemFound = root();
- Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
- ASSERT_EQUALS(status.code(), ErrorCodes::PathNotViable);
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 2U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]["c"]), 0);
+ size_t idxFound;
+ Element elemFound = root();
+ Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
+ ASSERT_EQUALS(status, ErrorCodes::NonExistentPath);
+}
- }
+TEST_F(EmptyDoc, NewField) {
+ setField("a");
- TEST_F(NestedDoc, NewFieldNested) {
- setField("a.b.d");
+ size_t idxFound;
+ Element elemFound = root();
+ Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
+ ASSERT_EQUALS(status, ErrorCodes::NonExistentPath);
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
- ASSERT_EQUALS(idxFound, 1U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]), 0);
+ Element newElem = doc().makeElementInt("a", 1);
+ ASSERT_TRUE(newElem.ok());
+ ASSERT_OK(createPathAt(field(), 0, root(), newElem));
+ ASSERT_EQUALS(fromjson("{a: 1}"), doc());
+}
- // From this point on, handles the creation of the '.d' part that wasn't found.
- Element newElem = doc().makeElementInt("d", 1);
- ASSERT_TRUE(newElem.ok());
- ASSERT_EQUALS(countChildren(elemFound), 1u); // 'c' is a child of 'b'
+class SimpleDoc : public mongo::unittest::Test {
+public:
+ SimpleDoc() : _doc() {}
- ASSERT_OK(createPathAt(field(), idxFound+1, elemFound, newElem));
- ASSERT_EQUALS(fromjson("{a: {b: {c: 1, d: 1}}}"), doc());
+ virtual void setUp() {
+ // {a: 1}
+ ASSERT_OK(root().appendInt("a", 1));
}
- TEST_F(NestedDoc, NotStartingFromRoot) {
- setField("b.c");
-
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root()["a"], &idxFound, &elemFound));
- ASSERT_EQUALS(idxFound, 1U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]["c"]), 0);
+ Document& doc() {
+ return _doc;
}
- class ArrayDoc : public mongo::unittest::Test {
- public:
- ArrayDoc() : _doc() {}
-
- virtual void setUp() {
- // {a: []}
- Element elemA = _doc.makeElementArray("a");
- ASSERT_TRUE(elemA.ok());
- ASSERT_OK(root().pushBack(elemA));
-
- // {a: [], b: [{c: 1}]}
- Element elemB = _doc.makeElementArray("b");
- ASSERT_TRUE(elemB.ok());
- Element elemObj = _doc.makeElementObject("dummy" /* field name not used in array */);
- ASSERT_TRUE(elemObj.ok());
- ASSERT_OK(elemObj.appendInt("c",1));
- ASSERT_OK(elemB.pushBack(elemObj));
- ASSERT_OK(root().pushBack(elemB));
- }
-
- Document& doc() { return _doc; }
-
- Element root() { return _doc.root(); }
-
- FieldRef& field() { return _field; }
-
- void setField(StringData str) { _field.parse(str); }
-
- private:
- Document _doc;
- FieldRef _field;
- };
-
- TEST_F(ArrayDoc, PathOnEmptyArray) {
- setField("a.0");
-
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 0U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]), 0);
+ Element root() {
+ return _doc.root();
}
- TEST_F(ArrayDoc, PathOnPopulatedArray) {
- setField("b.0");
-
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 1U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["b"][0]), 0);
+ FieldRef& field() {
+ return _field;
}
-
- TEST_F(ArrayDoc, MixedArrayAndObjectPath) {
- setField("b.0.c");
-
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 2U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["b"][0]["c"]), 0);
+ void setField(StringData str) {
+ _field.parse(str);
}
- TEST_F(ArrayDoc, ExtendingExistingObject) {
- setField("b.0.d");
-
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 1U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["b"][0]), 0);
+private:
+ Document _doc;
+ FieldRef _field;
+};
- // From this point on, handles the creation of the '.0.d' part that wasn't found.
- Element newElem = doc().makeElementInt("d", 1);
- ASSERT_TRUE(newElem.ok());
- ASSERT_EQUALS(countChildren(elemFound), 1u); // '{c:1}' is a child of b.0
+TEST_F(SimpleDoc, EmptyPath) {
+ setField("");
- ASSERT_OK(createPathAt(field(), idxFound+1, elemFound, newElem));
- ASSERT_EQUALS(fromjson("{a: [], b: [{c:1, d:1}]}"), doc());
- }
+ size_t idxFound;
+ Element elemFound = root();
+ Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
+ ASSERT_EQUALS(status, ErrorCodes::NonExistentPath);
+}
- TEST_F(ArrayDoc, NewObjectInsideArray) {
- setField("b.1.c");
+TEST_F(SimpleDoc, SimplePath) {
+ setField("a");
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 0U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["b"]), 0);
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 0U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]), 0);
+}
- // From this point on, handles the creation of the '.1.c' part that wasn't found.
- Element newElem = doc().makeElementInt("c", 2);
- ASSERT_TRUE(newElem.ok());
- ASSERT_EQUALS(countChildren(elemFound), 1u); // '{c:1}' is a child of 'b'
-
- ASSERT_OK(createPathAt(field(), idxFound+1, elemFound, newElem));
- ASSERT_EQUALS(fromjson("{a: [], b: [{c:1},{c:2}]}"), doc());
- }
+TEST_F(SimpleDoc, LongerPath) {
+ setField("a.b");
- TEST_F(ArrayDoc, NewNestedObjectInsideArray) {
- setField("b.1.c.d");
+ size_t idxFound;
+ Element elemFound = root();
+ Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
+ ASSERT_EQUALS(status, ErrorCodes::PathNotViable);
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 0U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]), 0);
+}
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 0U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["b"]), 0);
+TEST_F(SimpleDoc, NotCommonPrefix) {
+ setField("b");
- // From this point on, handles the creation of the '.1.c.d' part that wasn't found.
- Element newElem = doc().makeElementInt("d", 2);
- ASSERT_TRUE(newElem.ok());
- ASSERT_EQUALS(countChildren(elemFound), 1u); // '{c:1}' is a child of 'b'
+ size_t idxFound;
+ Element elemFound = root();
+ Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
+ ASSERT_EQUALS(status, ErrorCodes::NonExistentPath);
+
+ // From this point on, handles the creation of the '.b' part that wasn't found.
+ Element newElem = doc().makeElementInt("b", 1);
+ ASSERT_TRUE(newElem.ok());
+ ASSERT_EQUALS(countChildren(root()), 1u);
+
+ ASSERT_OK(createPathAt(field(), 0, root(), newElem));
+ ASSERT_EQUALS(newElem.getFieldName(), "b");
+ ASSERT_EQUALS(newElem.getType(), NumberInt);
+ ASSERT_TRUE(newElem.hasValue());
+ ASSERT_EQUALS(newElem.getValueInt(), 1);
+
+ ASSERT_TRUE(newElem.parent().ok() /* root an ok parent */);
+ ASSERT_EQUALS(countChildren(root()), 2u);
+ ASSERT_EQUALS(root().leftChild().getFieldName(), "a");
+ ASSERT_EQUALS(root().leftChild().rightSibling().getFieldName(), "b");
+ ASSERT_EQUALS(root().rightChild().getFieldName(), "b");
+ ASSERT_EQUALS(root().rightChild().leftSibling().getFieldName(), "a");
+}
+
+class NestedDoc : public mongo::unittest::Test {
+public:
+ NestedDoc() : _doc() {}
+
+ virtual void setUp() {
+ // {a: {b: {c: 1}}}
+ Element elemA = _doc.makeElementObject("a");
+ ASSERT_TRUE(elemA.ok());
+ Element elemB = _doc.makeElementObject("b");
+ ASSERT_TRUE(elemB.ok());
+ Element elemC = _doc.makeElementInt("c", 1);
+ ASSERT_TRUE(elemC.ok());
+
+ ASSERT_OK(elemB.pushBack(elemC));
+ ASSERT_OK(elemA.pushBack(elemB));
+ ASSERT_OK(root().pushBack(elemA));
+ }
+
+ Document& doc() {
+ return _doc;
+ }
+
+ Element root() {
+ return _doc.root();
+ }
+
+ FieldRef& field() {
+ return _field;
+ }
+ void setField(StringData str) {
+ _field.parse(str);
+ }
+
+private:
+ Document _doc;
+ FieldRef _field;
+};
+
+TEST_F(NestedDoc, SimplePath) {
+ setField("a");
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 0U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]), 0);
+}
+
+TEST_F(NestedDoc, ShorterPath) {
+ setField("a.b");
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+ ASSERT_EQUALS(idxFound, 1U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]), 0);
+}
+
+TEST_F(NestedDoc, ExactPath) {
+ setField("a.b.c");
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 2U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]["c"]), 0);
+}
+
+TEST_F(NestedDoc, LongerPath) {
+ // This would for 'c' to change from NumberInt to Object, which is invalid.
+ setField("a.b.c.d");
+
+ size_t idxFound;
+ Element elemFound = root();
+ Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
+ ASSERT_EQUALS(status.code(), ErrorCodes::PathNotViable);
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 2U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]["c"]), 0);
+}
+
+TEST_F(NestedDoc, NewFieldNested) {
+ setField("a.b.d");
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+ ASSERT_EQUALS(idxFound, 1U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]), 0);
+
+ // From this point on, handles the creation of the '.d' part that wasn't found.
+ Element newElem = doc().makeElementInt("d", 1);
+ ASSERT_TRUE(newElem.ok());
+ ASSERT_EQUALS(countChildren(elemFound), 1u); // 'c' is a child of 'b'
+
+ ASSERT_OK(createPathAt(field(), idxFound + 1, elemFound, newElem));
+ ASSERT_EQUALS(fromjson("{a: {b: {c: 1, d: 1}}}"), doc());
+}
+
+TEST_F(NestedDoc, NotStartingFromRoot) {
+ setField("b.c");
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root()["a"], &idxFound, &elemFound));
+ ASSERT_EQUALS(idxFound, 1U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]["c"]), 0);
+}
+
+class ArrayDoc : public mongo::unittest::Test {
+public:
+ ArrayDoc() : _doc() {}
+
+ virtual void setUp() {
+ // {a: []}
+ Element elemA = _doc.makeElementArray("a");
+ ASSERT_TRUE(elemA.ok());
+ ASSERT_OK(root().pushBack(elemA));
+
+ // {a: [], b: [{c: 1}]}
+ Element elemB = _doc.makeElementArray("b");
+ ASSERT_TRUE(elemB.ok());
+ Element elemObj = _doc.makeElementObject("dummy" /* field name not used in array */);
+ ASSERT_TRUE(elemObj.ok());
+ ASSERT_OK(elemObj.appendInt("c", 1));
+ ASSERT_OK(elemB.pushBack(elemObj));
+ ASSERT_OK(root().pushBack(elemB));
+ }
+
+ Document& doc() {
+ return _doc;
+ }
+
+ Element root() {
+ return _doc.root();
+ }
+
+ FieldRef& field() {
+ return _field;
+ }
+
+ void setField(StringData str) {
+ _field.parse(str);
+ }
+
+private:
+ Document _doc;
+ FieldRef _field;
+};
+
+TEST_F(ArrayDoc, PathOnEmptyArray) {
+ setField("a.0");
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 0U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]), 0);
+}
+
+TEST_F(ArrayDoc, PathOnPopulatedArray) {
+ setField("b.0");
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 1U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["b"][0]), 0);
+}
+
+TEST_F(ArrayDoc, MixedArrayAndObjectPath) {
+ setField("b.0.c");
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 2U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["b"][0]["c"]), 0);
+}
+
+TEST_F(ArrayDoc, ExtendingExistingObject) {
+ setField("b.0.d");
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 1U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["b"][0]), 0);
+
+ // From this point on, handles the creation of the '.0.d' part that wasn't found.
+ Element newElem = doc().makeElementInt("d", 1);
+ ASSERT_TRUE(newElem.ok());
+ ASSERT_EQUALS(countChildren(elemFound), 1u); // '{c:1}' is a child of b.0
+
+ ASSERT_OK(createPathAt(field(), idxFound + 1, elemFound, newElem));
+ ASSERT_EQUALS(fromjson("{a: [], b: [{c:1, d:1}]}"), doc());
+}
+
+TEST_F(ArrayDoc, NewObjectInsideArray) {
+ setField("b.1.c");
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 0U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["b"]), 0);
+
+ // From this point on, handles the creation of the '.1.c' part that wasn't found.
+ Element newElem = doc().makeElementInt("c", 2);
+ ASSERT_TRUE(newElem.ok());
+ ASSERT_EQUALS(countChildren(elemFound), 1u); // '{c:1}' is a child of 'b'
+
+ ASSERT_OK(createPathAt(field(), idxFound + 1, elemFound, newElem));
+ ASSERT_EQUALS(fromjson("{a: [], b: [{c:1},{c:2}]}"), doc());
+}
+
+TEST_F(ArrayDoc, NewNestedObjectInsideArray) {
+ setField("b.1.c.d");
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 0U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["b"]), 0);
+
+ // From this point on, handles the creation of the '.1.c.d' part that wasn't found.
+ Element newElem = doc().makeElementInt("d", 2);
+ ASSERT_TRUE(newElem.ok());
+ ASSERT_EQUALS(countChildren(elemFound), 1u); // '{c:1}' is a child of 'b'
+
+ ASSERT_OK(createPathAt(field(), idxFound + 1, elemFound, newElem));
+ ASSERT_EQUALS(fromjson("{a: [], b: [{c:1},{c:{d:2}}]}"), doc());
+}
+
+TEST_F(ArrayDoc, ArrayPaddingNecessary) {
+ setField("b.5");
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 0U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["b"]), 0);
+
+ // From this point on, handles the creation of the '.5' part that wasn't found.
+ Element newElem = doc().makeElementInt("", 1);
+ ASSERT_TRUE(newElem.ok());
+ ASSERT_EQUALS(countChildren(elemFound), 1u); // '{c:1}' is a child of 'b'
+
+ ASSERT_OK(createPathAt(field(), idxFound + 1, elemFound, newElem));
+ ASSERT_EQUALS(fromjson("{a: [], b: [{c:1},null,null,null,null,1]}"), doc());
+}
+
+TEST_F(ArrayDoc, ExcessivePaddingRequested) {
+ // Try to create an array item beyond what we're allowed to pad.
+ string paddedField = stream() << "b." << mongo::pathsupport::kMaxPaddingAllowed + 1;
+ ;
+ setField(paddedField);
+
+ size_t idxFound;
+ Element elemFound = root();
+ ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
+
+ // From this point on, try to create the padded part that wasn't found.
+ Element newElem = doc().makeElementInt("", 1);
+ ASSERT_TRUE(newElem.ok());
+ ASSERT_EQUALS(countChildren(elemFound), 1u); // '{c:1}' is a child of 'b'
+
+ Status status = createPathAt(field(), idxFound + 1, elemFound, newElem);
+ ASSERT_EQUALS(status.code(), ErrorCodes::CannotBackfillArray);
+}
+
+TEST_F(ArrayDoc, NonNumericPathInArray) {
+ setField("b.z");
+
+ size_t idxFound;
+ Element elemFound = root();
+ Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
+ ASSERT_EQUALS(status.code(), ErrorCodes::PathNotViable);
+ ASSERT_TRUE(elemFound.ok());
+ ASSERT_EQUALS(idxFound, 0U);
+ ASSERT_EQUALS(elemFound.compareWithElement(root()["b"]), 0);
+}
+
+//
+// Tests of equality extraction from MatchExpressions
+// NONGOAL: Testing query/match expression parsing and optimization
+//
+
+static MatchExpression* makeExpr(const BSONObj& exprBSON) {
+ static const WhereCallbackNoop callbackNoop;
+ return MatchExpressionParser::parse(exprBSON, callbackNoop).getValue();
+}
+
+static void assertContains(const EqualityMatches& equalities, const BSONObj& wrapped) {
+ BSONElement value = wrapped.firstElement();
+ StringData path = value.fieldNameStringData();
+
+ EqualityMatches::const_iterator it = equalities.find(path);
+ if (it == equalities.end()) {
+ FAIL(stream() << "Equality matches did not contain path \"" << path << "\"");
+ }
+ if (!it->second->getData().valuesEqual(value)) {
+ FAIL(stream() << "Equality match at path \"" << path << "\" contains value "
+ << it->second->getData() << ", not value " << value);
+ }
+}
+
+static void assertContains(const EqualityMatches& equalities, StringData path, int value) {
+ assertContains(equalities, BSON(path << value));
+}
+
+// NOTE: For tests below, BSONObj expr must exist for lifetime of MatchExpression
+
+TEST(ExtractEqualities, Basic) {
+ BSONObj exprBSON = fromjson("{a:1}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+ ASSERT_EQUALS(equalities.size(), 1u);
+ assertContains(equalities, "a", 1);
+}
+
+TEST(ExtractEqualities, Multiple) {
+ BSONObj exprBSON = fromjson("{a:1, b:2}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+ ASSERT_EQUALS(equalities.size(), 2u);
+ assertContains(equalities, "a", 1);
+ assertContains(equalities, "b", 2);
+}
+
+TEST(ExtractEqualities, EqOperator) {
+ BSONObj exprBSON = fromjson("{a:{$eq:1}}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+ ASSERT_EQUALS(equalities.size(), 1u);
+ assertContains(equalities, "a", 1);
+}
+
+TEST(ExtractEqualities, AndOperator) {
+ BSONObj exprBSON = fromjson("{$and:[{a:{$eq:1}},{b:2}]}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+ ASSERT_EQUALS(equalities.size(), 2u);
+ assertContains(equalities, "a", 1);
+ assertContains(equalities, "b", 2);
+}
+
+TEST(ExtractEqualities, NestedAndOperator) {
+ BSONObj exprBSON = fromjson("{$and:[{$and:[{a:{$eq:1}},{b:2}]},{c:3}]}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+ ASSERT_EQUALS(equalities.size(), 3u);
+ assertContains(equalities, "a", 1);
+ assertContains(equalities, "b", 2);
+ assertContains(equalities, "c", 3);
+}
+
+TEST(ExtractEqualities, NestedPaths) {
+ BSONObj exprBSON = fromjson("{'a.a':1}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+ ASSERT_EQUALS(equalities.size(), 1u);
+ assertContains(equalities, "a.a", 1);
+}
+
+TEST(ExtractEqualities, SiblingPaths) {
+ BSONObj exprBSON = fromjson("{'a.a':1,'a.b':{$eq:2}}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+ ASSERT_EQUALS(equalities.size(), 2u);
+ assertContains(equalities, "a.a", 1);
+ assertContains(equalities, "a.b", 2);
+}
+
+TEST(ExtractEqualities, NestedAndNestedPaths) {
+ BSONObj exprBSON = fromjson("{$and:[{$and:[{'a.a':{$eq:1}},{'a.b':2}]},{'c.c.c':3}]}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+ ASSERT_EQUALS(equalities.size(), 3u);
+ assertContains(equalities, "a.a", 1);
+ assertContains(equalities, "a.b", 2);
+ assertContains(equalities, "c.c.c", 3);
+}
+
+TEST(ExtractEqualities, IdOnly) {
+ BSONObj exprBSON = fromjson("{_id:1}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+ ASSERT_EQUALS(equalities.size(), 1u);
+ assertContains(equalities, "_id", 1);
+}
- ASSERT_OK(createPathAt(field(), idxFound+1, elemFound, newElem));
- ASSERT_EQUALS(fromjson("{a: [], b: [{c:1},{c:{d:2}}]}"), doc());
- }
-
- TEST_F(ArrayDoc, ArrayPaddingNecessary) {
- setField("b.5");
-
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 0U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["b"]), 0);
-
- // From this point on, handles the creation of the '.5' part that wasn't found.
- Element newElem = doc().makeElementInt("", 1);
- ASSERT_TRUE(newElem.ok());
- ASSERT_EQUALS(countChildren(elemFound), 1u); // '{c:1}' is a child of 'b'
-
- ASSERT_OK(createPathAt(field(), idxFound+1, elemFound, newElem));
- ASSERT_EQUALS(fromjson("{a: [], b: [{c:1},null,null,null,null,1]}"), doc());
- }
-
- TEST_F(ArrayDoc, ExcessivePaddingRequested) {
- // Try to create an array item beyond what we're allowed to pad.
- string paddedField = stream() << "b." << mongo::pathsupport::kMaxPaddingAllowed + 1;;
- setField(paddedField);
-
- size_t idxFound;
- Element elemFound = root();
- ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound));
-
- // From this point on, try to create the padded part that wasn't found.
- Element newElem = doc().makeElementInt("", 1);
- ASSERT_TRUE(newElem.ok());
- ASSERT_EQUALS(countChildren(elemFound), 1u); // '{c:1}' is a child of 'b'
-
- Status status = createPathAt(field(), idxFound+1, elemFound, newElem);
- ASSERT_EQUALS(status.code(), ErrorCodes::CannotBackfillArray);
- }
-
- TEST_F(ArrayDoc, NonNumericPathInArray) {
- setField("b.z");
-
- size_t idxFound;
- Element elemFound = root();
- Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound);
- ASSERT_EQUALS(status.code(), ErrorCodes::PathNotViable);
- ASSERT_TRUE(elemFound.ok());
- ASSERT_EQUALS(idxFound, 0U);
- ASSERT_EQUALS(elemFound.compareWithElement(root()["b"]), 0);
- }
-
- //
- // Tests of equality extraction from MatchExpressions
- // NONGOAL: Testing query/match expression parsing and optimization
- //
-
- static MatchExpression* makeExpr(const BSONObj& exprBSON) {
- static const WhereCallbackNoop callbackNoop;
- return MatchExpressionParser::parse(exprBSON, callbackNoop).getValue();
- }
-
- static void assertContains(const EqualityMatches& equalities, const BSONObj& wrapped) {
-
- BSONElement value = wrapped.firstElement();
- StringData path = value.fieldNameStringData();
-
- EqualityMatches::const_iterator it = equalities.find(path);
- if (it == equalities.end()) {
- FAIL(stream() << "Equality matches did not contain path \"" << path << "\"");
- }
- if (!it->second->getData().valuesEqual(value)) {
- FAIL(stream() << "Equality match at path \"" << path << "\" contains value "
- << it->second->getData() << ", not value " << value);
- }
- }
-
- static void assertContains(const EqualityMatches& equalities,
- StringData path,
- int value) {
- assertContains(equalities, BSON(path << value));
- }
-
- // NOTE: For tests below, BSONObj expr must exist for lifetime of MatchExpression
-
- TEST(ExtractEqualities, Basic) {
- BSONObj exprBSON = fromjson("{a:1}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
- ASSERT_EQUALS(equalities.size(), 1u);
- assertContains(equalities, "a", 1);
- }
-
- TEST(ExtractEqualities, Multiple) {
- BSONObj exprBSON = fromjson("{a:1, b:2}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
- ASSERT_EQUALS(equalities.size(), 2u);
- assertContains(equalities, "a", 1);
- assertContains(equalities, "b", 2);
- }
-
- TEST(ExtractEqualities, EqOperator) {
- BSONObj exprBSON = fromjson("{a:{$eq:1}}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
- ASSERT_EQUALS(equalities.size(), 1u);
- assertContains(equalities, "a", 1);
- }
-
- TEST(ExtractEqualities, AndOperator) {
- BSONObj exprBSON = fromjson("{$and:[{a:{$eq:1}},{b:2}]}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
- ASSERT_EQUALS(equalities.size(), 2u);
- assertContains(equalities, "a", 1);
- assertContains(equalities, "b", 2);
- }
-
- TEST(ExtractEqualities, NestedAndOperator) {
- BSONObj exprBSON = fromjson("{$and:[{$and:[{a:{$eq:1}},{b:2}]},{c:3}]}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
- ASSERT_EQUALS(equalities.size(), 3u);
- assertContains(equalities, "a", 1);
- assertContains(equalities, "b", 2);
- assertContains(equalities, "c", 3);
- }
-
- TEST(ExtractEqualities, NestedPaths) {
- BSONObj exprBSON = fromjson("{'a.a':1}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
- ASSERT_EQUALS(equalities.size(), 1u);
- assertContains(equalities, "a.a", 1);
- }
-
- TEST(ExtractEqualities, SiblingPaths) {
- BSONObj exprBSON = fromjson("{'a.a':1,'a.b':{$eq:2}}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
- ASSERT_EQUALS(equalities.size(), 2u);
- assertContains(equalities, "a.a", 1);
- assertContains(equalities, "a.b", 2);
- }
-
- TEST(ExtractEqualities, NestedAndNestedPaths) {
- BSONObj exprBSON = fromjson("{$and:[{$and:[{'a.a':{$eq:1}},{'a.b':2}]},{'c.c.c':3}]}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
- ASSERT_EQUALS(equalities.size(), 3u);
- assertContains(equalities, "a.a", 1);
- assertContains(equalities, "a.b", 2);
- assertContains(equalities, "c.c.c", 3);
- }
-
- TEST(ExtractEqualities, IdOnly) {
- BSONObj exprBSON = fromjson("{_id:1}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
- ASSERT_EQUALS(equalities.size(), 1u);
- assertContains(equalities, "_id", 1);
- }
-
- /**
- * Helper class to allow easy construction of immutable paths
- */
- class ImmutablePaths {
- public:
- ImmutablePaths() {}
-
- void addPath(const string& path) {
- _ownedPaths.mutableVector().push_back(new FieldRef(path));
- FieldRef const* conflictPath = NULL;
- ASSERT(_immutablePathSet.insert(_ownedPaths.vector().back(), &conflictPath));
- }
-
- const FieldRefSet& getPathSet() {
- return _immutablePathSet;
- }
-
- private:
-
- FieldRefSet _immutablePathSet;
- OwnedPointerVector<FieldRef> _ownedPaths;
- };
-
- TEST(ExtractEqualities, IdOnlyMulti) {
- BSONObj exprBSON = fromjson("{_id:{$eq:1},a:1}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- ImmutablePaths immutablePaths;
- immutablePaths.addPath("_id");
-
- EqualityMatches equalities;
- ASSERT_OK(extractFullEqualityMatches(*expr, immutablePaths.getPathSet(), &equalities));
- ASSERT_EQUALS(equalities.size(), 1u);
- assertContains(equalities, "_id", 1);
- }
-
- TEST(ExtractEqualities, IdOnlyIgnoreConflict) {
- BSONObj exprBSON = fromjson("{_id:1,a:1,'a.b':1}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- ImmutablePaths immutablePaths;
- immutablePaths.addPath("_id");
-
- EqualityMatches equalities;
- ASSERT_OK(extractFullEqualityMatches(*expr, immutablePaths.getPathSet(), &equalities));
- ASSERT_EQUALS(equalities.size(), 1u);
- assertContains(equalities, "_id", 1);
- }
-
- TEST(ExtractEqualities, IdOnlyNested) {
- BSONObj exprBSON = fromjson("{'_id.a':1,'_id.b':{$eq:2},c:3}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- ImmutablePaths immutablePaths;
- immutablePaths.addPath("_id");
-
- EqualityMatches equalities;
- Status status = extractFullEqualityMatches(*expr, immutablePaths.getPathSet(), &equalities);
- ASSERT_EQUALS(status.code(), ErrorCodes::NotExactValueField);
- }
-
- TEST(ExtractEqualities, IdAndOtherImmutable) {
- BSONObj exprBSON = fromjson("{_id:1,a:1,b:2}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- ImmutablePaths immutablePaths;
- immutablePaths.addPath("_id");
- immutablePaths.addPath("a");
-
- EqualityMatches equalities;
- ASSERT_OK(extractFullEqualityMatches(*expr, immutablePaths.getPathSet(), &equalities));
- ASSERT_EQUALS(equalities.size(), 2u);
- assertContains(equalities, "_id", 1);
- assertContains(equalities, "a", 1);
- }
-
- TEST(ExtractEqualities, IdAndNestedImmutable) {
- BSONObj exprBSON = fromjson("{_id:1,a:1,'c.d':3}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- ImmutablePaths immutablePaths;
- immutablePaths.addPath("_id");
- immutablePaths.addPath("a.b");
- immutablePaths.addPath("c.d");
-
- EqualityMatches equalities;
- ASSERT_OK(extractFullEqualityMatches(*expr, immutablePaths.getPathSet(), &equalities));
- ASSERT_EQUALS(equalities.size(), 3u);
- assertContains(equalities, "_id", 1);
- assertContains(equalities, "a", 1);
- assertContains(equalities, "c.d", 3);
- }
-
- TEST(ExtractEqualities, NonFullImmutable) {
- BSONObj exprBSON = fromjson("{'a.b':1}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- ImmutablePaths immutablePaths;
- immutablePaths.addPath("a");
-
- EqualityMatches equalities;
- Status status = extractFullEqualityMatches(*expr, immutablePaths.getPathSet(), &equalities);
- ASSERT_EQUALS(status.code(), ErrorCodes::NotExactValueField);
- }
-
- TEST(ExtractEqualities, Empty) {
- BSONObj exprBSON = fromjson("{'':0}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
- ASSERT_EQUALS(equalities.size(), 1u);
- assertContains(equalities, "", 0);
- }
-
- TEST(ExtractEqualities, EmptyMulti) {
- BSONObj exprBSON = fromjson("{'':0,a:{$eq:1}}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
- ASSERT_EQUALS(equalities.size(), 2u);
- assertContains(equalities, "", 0);
- assertContains(equalities, "a", 1);
- }
-
- TEST(ExtractEqualities, EqConflict) {
- BSONObj exprBSON = fromjson("{a:1,a:1}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_EQUALS(extractEqualityMatches(*expr, &equalities).code(),
- ErrorCodes::NotSingleValueField);
- }
-
- TEST(ExtractEqualities, PrefixConflict) {
- BSONObj exprBSON = fromjson("{a:1,'a.b':{$eq:1}}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_EQUALS(extractEqualityMatches(*expr, &equalities).code(),
- ErrorCodes::NotSingleValueField);
- }
-
- TEST(ExtractEqualities, AndPrefixConflict) {
- BSONObj exprBSON = fromjson("{$and:[{a:1},{'a.b':{$eq:1}}]}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_EQUALS(extractEqualityMatches(*expr, &equalities).code(),
- ErrorCodes::NotSingleValueField);
- }
-
- TEST(ExtractEqualities, EmptyConflict) {
- BSONObj exprBSON = fromjson("{'':0,'':{$eq:0}}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
-
- EqualityMatches equalities;
- ASSERT_EQUALS(extractEqualityMatches(*expr, &equalities).code(),
- ErrorCodes::NotSingleValueField);
- }
-
- //
- // Tests for finding parent equality from equalities found in expression
- // NONGOALS: Testing complex equality match extraction - tested above
- //
-
- static void assertParent(const EqualityMatches& equalities,
- StringData pathStr,
- const BSONObj& wrapped) {
-
- FieldRef path(pathStr);
- BSONElement value = wrapped.firstElement();
- StringData parentPath = value.fieldNameStringData();
-
- int parentPathPart;
- BSONElement parentEl = findParentEqualityElement(equalities, path, &parentPathPart);
-
- if (parentEl.eoo()) {
- FAIL(stream() << "Equality matches did not contain parent for \"" << pathStr
- << "\"");
- }
+/**
+ * Helper class to allow easy construction of immutable paths
+ */
+class ImmutablePaths {
+public:
+ ImmutablePaths() {}
+
+ void addPath(const string& path) {
+ _ownedPaths.mutableVector().push_back(new FieldRef(path));
+ FieldRef const* conflictPath = NULL;
+ ASSERT(_immutablePathSet.insert(_ownedPaths.vector().back(), &conflictPath));
+ }
+
+ const FieldRefSet& getPathSet() {
+ return _immutablePathSet;
+ }
+
+private:
+ FieldRefSet _immutablePathSet;
+ OwnedPointerVector<FieldRef> _ownedPaths;
+};
+
+TEST(ExtractEqualities, IdOnlyMulti) {
+ BSONObj exprBSON = fromjson("{_id:{$eq:1},a:1}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ ImmutablePaths immutablePaths;
+ immutablePaths.addPath("_id");
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractFullEqualityMatches(*expr, immutablePaths.getPathSet(), &equalities));
+ ASSERT_EQUALS(equalities.size(), 1u);
+ assertContains(equalities, "_id", 1);
+}
+
+TEST(ExtractEqualities, IdOnlyIgnoreConflict) {
+ BSONObj exprBSON = fromjson("{_id:1,a:1,'a.b':1}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ ImmutablePaths immutablePaths;
+ immutablePaths.addPath("_id");
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractFullEqualityMatches(*expr, immutablePaths.getPathSet(), &equalities));
+ ASSERT_EQUALS(equalities.size(), 1u);
+ assertContains(equalities, "_id", 1);
+}
+
+TEST(ExtractEqualities, IdOnlyNested) {
+ BSONObj exprBSON = fromjson("{'_id.a':1,'_id.b':{$eq:2},c:3}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ ImmutablePaths immutablePaths;
+ immutablePaths.addPath("_id");
+
+ EqualityMatches equalities;
+ Status status = extractFullEqualityMatches(*expr, immutablePaths.getPathSet(), &equalities);
+ ASSERT_EQUALS(status.code(), ErrorCodes::NotExactValueField);
+}
+
+TEST(ExtractEqualities, IdAndOtherImmutable) {
+ BSONObj exprBSON = fromjson("{_id:1,a:1,b:2}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ ImmutablePaths immutablePaths;
+ immutablePaths.addPath("_id");
+ immutablePaths.addPath("a");
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractFullEqualityMatches(*expr, immutablePaths.getPathSet(), &equalities));
+ ASSERT_EQUALS(equalities.size(), 2u);
+ assertContains(equalities, "_id", 1);
+ assertContains(equalities, "a", 1);
+}
+
+TEST(ExtractEqualities, IdAndNestedImmutable) {
+ BSONObj exprBSON = fromjson("{_id:1,a:1,'c.d':3}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ ImmutablePaths immutablePaths;
+ immutablePaths.addPath("_id");
+ immutablePaths.addPath("a.b");
+ immutablePaths.addPath("c.d");
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractFullEqualityMatches(*expr, immutablePaths.getPathSet(), &equalities));
+ ASSERT_EQUALS(equalities.size(), 3u);
+ assertContains(equalities, "_id", 1);
+ assertContains(equalities, "a", 1);
+ assertContains(equalities, "c.d", 3);
+}
+
+TEST(ExtractEqualities, NonFullImmutable) {
+ BSONObj exprBSON = fromjson("{'a.b':1}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ ImmutablePaths immutablePaths;
+ immutablePaths.addPath("a");
+
+ EqualityMatches equalities;
+ Status status = extractFullEqualityMatches(*expr, immutablePaths.getPathSet(), &equalities);
+ ASSERT_EQUALS(status.code(), ErrorCodes::NotExactValueField);
+}
+
+TEST(ExtractEqualities, Empty) {
+ BSONObj exprBSON = fromjson("{'':0}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+ ASSERT_EQUALS(equalities.size(), 1u);
+ assertContains(equalities, "", 0);
+}
+
+TEST(ExtractEqualities, EmptyMulti) {
+ BSONObj exprBSON = fromjson("{'':0,a:{$eq:1}}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+ ASSERT_EQUALS(equalities.size(), 2u);
+ assertContains(equalities, "", 0);
+ assertContains(equalities, "a", 1);
+}
+
+TEST(ExtractEqualities, EqConflict) {
+ BSONObj exprBSON = fromjson("{a:1,a:1}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_EQUALS(extractEqualityMatches(*expr, &equalities).code(),
+ ErrorCodes::NotSingleValueField);
+}
+
+TEST(ExtractEqualities, PrefixConflict) {
+ BSONObj exprBSON = fromjson("{a:1,'a.b':{$eq:1}}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_EQUALS(extractEqualityMatches(*expr, &equalities).code(),
+ ErrorCodes::NotSingleValueField);
+}
+
+TEST(ExtractEqualities, AndPrefixConflict) {
+ BSONObj exprBSON = fromjson("{$and:[{a:1},{'a.b':{$eq:1}}]}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_EQUALS(extractEqualityMatches(*expr, &equalities).code(),
+ ErrorCodes::NotSingleValueField);
+}
+
+TEST(ExtractEqualities, EmptyConflict) {
+ BSONObj exprBSON = fromjson("{'':0,'':{$eq:0}}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+
+ EqualityMatches equalities;
+ ASSERT_EQUALS(extractEqualityMatches(*expr, &equalities).code(),
+ ErrorCodes::NotSingleValueField);
+}
+
+//
+// Tests for finding parent equality from equalities found in expression
+// NONGOALS: Testing complex equality match extraction - tested above
+//
+
+static void assertParent(const EqualityMatches& equalities,
+ StringData pathStr,
+ const BSONObj& wrapped) {
+ FieldRef path(pathStr);
+ BSONElement value = wrapped.firstElement();
+ StringData parentPath = value.fieldNameStringData();
+
+ int parentPathPart;
+ BSONElement parentEl = findParentEqualityElement(equalities, path, &parentPathPart);
+
+ if (parentEl.eoo()) {
+ FAIL(stream() << "Equality matches did not contain parent for \"" << pathStr << "\"");
+ }
+
+ StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
+ if (foundParentPath != parentPath) {
+ FAIL(stream() << "Equality match parent at path \"" << foundParentPath
+ << "\" does not match \"" << parentPath << "\"");
+ }
+
+ if (!parentEl.valuesEqual(value)) {
+ FAIL(stream() << "Equality match parent for \"" << pathStr << "\" at path \"" << parentPath
+ << "\" contains value " << parentEl << ", not value " << value);
+ }
+}
+static void assertParent(const EqualityMatches& equalities,
+ StringData path,
+ StringData parentPath,
+ int value) {
+ assertParent(equalities, path, BSON(parentPath << value));
+}
+
+static void assertNoParent(const EqualityMatches& equalities, StringData pathStr) {
+ FieldRef path(pathStr);
+
+ int parentPathPart;
+ BSONElement parentEl = findParentEqualityElement(equalities, path, &parentPathPart);
+
+ if (!parentEl.eoo()) {
StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
- if (foundParentPath != parentPath) {
- FAIL(stream() << "Equality match parent at path \"" << foundParentPath
- << "\" does not match \"" << parentPath << "\"");
- }
-
- if (!parentEl.valuesEqual(value)) {
- FAIL(stream() << "Equality match parent for \"" << pathStr << "\" at path \""
- << parentPath << "\" contains value " << parentEl << ", not value "
- << value);
- }
- }
-
- static void assertParent(const EqualityMatches& equalities,
- StringData path,
- StringData parentPath,
- int value) {
- assertParent(equalities, path, BSON(parentPath << value));
- }
-
- static void assertNoParent(const EqualityMatches& equalities, StringData pathStr) {
-
- FieldRef path(pathStr);
-
- int parentPathPart;
- BSONElement parentEl = findParentEqualityElement(equalities, path, &parentPathPart);
-
- if (!parentEl.eoo()) {
- StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
- FAIL(stream() << "Equality matches contained parent for \"" << pathStr << "\" at \""
- << foundParentPath << "\"");
- }
- }
-
-
- TEST(FindParentEquality, Basic) {
-
- BSONObj exprBSON = fromjson("{a:1}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
-
- assertNoParent(equalities, "");
- assertParent(equalities, "a", "a", 1);
- assertParent(equalities, "a.b", "a", 1);
- }
-
- TEST(FindParentEquality, Multi) {
-
- BSONObj exprBSON = fromjson("{a:1,b:2}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
-
- assertNoParent(equalities, "");
- assertParent(equalities, "a", "a", 1);
- assertParent(equalities, "a.b", "a", 1);
- assertParent(equalities, "b", "b", 2);
- assertParent(equalities, "b.b", "b", 2);
- }
-
- TEST(FindParentEquality, Nested) {
-
- BSONObj exprBSON = fromjson("{'a.a':1}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
-
- assertNoParent(equalities, "");
- assertNoParent(equalities, "a");
- assertParent(equalities, "a.a", "a.a", 1);
- assertParent(equalities, "a.a.b", "a.a", 1);
- }
-
- TEST(FindParentEquality, NestedMulti) {
-
- BSONObj exprBSON = fromjson("{'a.a':1,'a.b':2,'c.c':3}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
-
- assertNoParent(equalities, "");
- assertNoParent(equalities, "a");
- assertNoParent(equalities, "c");
- assertParent(equalities, "a.a", "a.a", 1);
- assertParent(equalities, "a.a.a", "a.a", 1);
- assertParent(equalities, "a.b", "a.b", 2);
- assertParent(equalities, "a.b.b", "a.b", 2);
- assertParent(equalities, "c.c", "c.c", 3);
- assertParent(equalities, "c.c.c", "c.c", 3);
- }
-
- TEST(FindParentEquality, Empty) {
-
- BSONObj exprBSON = fromjson("{'':0}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
-
- assertParent(equalities, "", "", 0);
- }
-
- TEST(FindParentEquality, EmptyMulti) {
-
- BSONObj exprBSON = fromjson("{'':0,a:1}");
- unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
- EqualityMatches equalities;
- ASSERT_OK(extractEqualityMatches(*expr, &equalities));
-
- assertParent(equalities, "", "", 0);
- assertParent(equalities, "a", "a", 1);
- assertParent(equalities, "a.b", "a", 1);
- }
-
-} // unnamed namespace
+ FAIL(stream() << "Equality matches contained parent for \"" << pathStr << "\" at \""
+ << foundParentPath << "\"");
+ }
+}
+
+
+TEST(FindParentEquality, Basic) {
+ BSONObj exprBSON = fromjson("{a:1}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+
+ assertNoParent(equalities, "");
+ assertParent(equalities, "a", "a", 1);
+ assertParent(equalities, "a.b", "a", 1);
+}
+
+TEST(FindParentEquality, Multi) {
+ BSONObj exprBSON = fromjson("{a:1,b:2}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+
+ assertNoParent(equalities, "");
+ assertParent(equalities, "a", "a", 1);
+ assertParent(equalities, "a.b", "a", 1);
+ assertParent(equalities, "b", "b", 2);
+ assertParent(equalities, "b.b", "b", 2);
+}
+
+TEST(FindParentEquality, Nested) {
+ BSONObj exprBSON = fromjson("{'a.a':1}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+
+ assertNoParent(equalities, "");
+ assertNoParent(equalities, "a");
+ assertParent(equalities, "a.a", "a.a", 1);
+ assertParent(equalities, "a.a.b", "a.a", 1);
+}
+
+TEST(FindParentEquality, NestedMulti) {
+ BSONObj exprBSON = fromjson("{'a.a':1,'a.b':2,'c.c':3}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+
+ assertNoParent(equalities, "");
+ assertNoParent(equalities, "a");
+ assertNoParent(equalities, "c");
+ assertParent(equalities, "a.a", "a.a", 1);
+ assertParent(equalities, "a.a.a", "a.a", 1);
+ assertParent(equalities, "a.b", "a.b", 2);
+ assertParent(equalities, "a.b.b", "a.b", 2);
+ assertParent(equalities, "c.c", "c.c", 3);
+ assertParent(equalities, "c.c.c", "c.c", 3);
+}
+
+TEST(FindParentEquality, Empty) {
+ BSONObj exprBSON = fromjson("{'':0}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+
+ assertParent(equalities, "", "", 0);
+}
+
+TEST(FindParentEquality, EmptyMulti) {
+ BSONObj exprBSON = fromjson("{'':0,a:1}");
+ unique_ptr<MatchExpression> expr(makeExpr(exprBSON));
+ EqualityMatches equalities;
+ ASSERT_OK(extractEqualityMatches(*expr, &equalities));
+
+ assertParent(equalities, "", "", 0);
+ assertParent(equalities, "a", "a", 1);
+ assertParent(equalities, "a.b", "a", 1);
+}
+
+} // unnamed namespace
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index cf60978b4b8..43c9be211ce 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -54,74 +54,74 @@
namespace mongo {
- UpdateResult update(OperationContext* txn,
- Database* db,
- const UpdateRequest& request,
- OpDebug* opDebug) {
- invariant(db);
-
- // Explain should never use this helper.
- invariant(!request.isExplain());
-
- const NamespaceString& nsString = request.getNamespaceString();
- Collection* collection = db->getCollection(nsString.ns());
-
- // The update stage does not create its own collection. As such, if the update is
- // an upsert, create the collection that the update stage inserts into beforehand.
- if (!collection && request.isUpsert()) {
- // We have to have an exclusive lock on the db to be allowed to create the collection.
- // Callers should either get an X or create the collection.
- const Locker* locker = txn->lockState();
- invariant(locker->isW() ||
- locker->isLockHeldForMode(ResourceId(RESOURCE_DATABASE, nsString.db()),
- MODE_X));
-
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), nsString.db(), MODE_X);
-
- const bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString);
-
- if (userInitiatedWritesAndNotPrimary) {
- uassertStatusOK(Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while creating collection " << nsString.ns()
- << " during upsert"));
- }
- WriteUnitOfWork wuow(txn);
- collection = db->createCollection(txn, nsString.ns(), CollectionOptions());
- invariant(collection);
- wuow.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", nsString.ns());
+UpdateResult update(OperationContext* txn,
+ Database* db,
+ const UpdateRequest& request,
+ OpDebug* opDebug) {
+ invariant(db);
+
+ // Explain should never use this helper.
+ invariant(!request.isExplain());
+
+ const NamespaceString& nsString = request.getNamespaceString();
+ Collection* collection = db->getCollection(nsString.ns());
+
+ // The update stage does not create its own collection. As such, if the update is
+ // an upsert, create the collection that the update stage inserts into beforehand.
+ if (!collection && request.isUpsert()) {
+ // We have to have an exclusive lock on the db to be allowed to create the collection.
+ // Callers should either get an X or create the collection.
+ const Locker* locker = txn->lockState();
+ invariant(locker->isW() ||
+ locker->isLockHeldForMode(ResourceId(RESOURCE_DATABASE, nsString.db()), MODE_X));
+
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lk(txn->lockState(), nsString.db(), MODE_X);
+
+ const bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ uassertStatusOK(Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while creating collection "
+ << nsString.ns() << " during upsert"));
+ }
+ WriteUnitOfWork wuow(txn);
+ collection = db->createCollection(txn, nsString.ns(), CollectionOptions());
+ invariant(collection);
+ wuow.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", nsString.ns());
+ }
- // Parse the update, get an executor for it, run the executor, get stats out.
- ParsedUpdate parsedUpdate(txn, &request);
- uassertStatusOK(parsedUpdate.parseRequest());
-
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, opDebug, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ // Parse the update, get an executor for it, run the executor, get stats out.
+ ParsedUpdate parsedUpdate(txn, &request);
+ uassertStatusOK(parsedUpdate.parseRequest());
- uassertStatusOK(exec->executePlan());
- return UpdateStage::makeUpdateResult(exec.get(), opDebug);
- }
+ PlanExecutor* rawExec;
+ uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, opDebug, &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
- BSONObj applyUpdateOperators(const BSONObj& from, const BSONObj& operators) {
- UpdateDriver::Options opts;
- UpdateDriver driver(opts);
- Status status = driver.parse(operators);
- if (!status.isOK()) {
- uasserted(16838, status.reason());
- }
+ uassertStatusOK(exec->executePlan());
+ return UpdateStage::makeUpdateResult(exec.get(), opDebug);
+}
- mutablebson::Document doc(from, mutablebson::Document::kInPlaceDisabled);
- status = driver.update(StringData(), &doc);
- if (!status.isOK()) {
- uasserted(16839, status.reason());
- }
+BSONObj applyUpdateOperators(const BSONObj& from, const BSONObj& operators) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ Status status = driver.parse(operators);
+ if (!status.isOK()) {
+ uasserted(16838, status.reason());
+ }
- return doc.getObject();
+ mutablebson::Document doc(from, mutablebson::Document::kInPlaceDisabled);
+ status = driver.update(StringData(), &doc);
+ if (!status.isOK()) {
+ uasserted(16839, status.reason());
}
+ return doc.getObject();
+}
+
} // namespace mongo
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
index c4c5e71edff..06cec7fa90f 100644
--- a/src/mongo/db/ops/update.h
+++ b/src/mongo/db/ops/update.h
@@ -37,27 +37,27 @@
namespace mongo {
- class CanonicalQuery;
- class Database;
- class OperationContext;
- class UpdateDriver;
+class CanonicalQuery;
+class Database;
+class OperationContext;
+class UpdateDriver;
- /**
- * Utility method to execute an update described by "request".
- *
- * Caller must hold the appropriate database locks.
- */
- UpdateResult update(OperationContext* txn,
- Database* db,
- const UpdateRequest& request,
- OpDebug* opDebug);
+/**
+ * Utility method to execute an update described by "request".
+ *
+ * Caller must hold the appropriate database locks.
+ */
+UpdateResult update(OperationContext* txn,
+ Database* db,
+ const UpdateRequest& request,
+ OpDebug* opDebug);
- /**
- * takes the from document and returns a new document
- * after apply all the operators
- * e.g.
- * applyUpdateOperators( BSON( "x" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
- * returns: { x : 2 }
- */
- BSONObj applyUpdateOperators( const BSONObj& from, const BSONObj& operators );
+/**
+ * takes the from document and returns a new document
+ * after apply all the operators
+ * e.g.
+ * applyUpdateOperators( BSON( "x" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
+ * returns: { x : 2 }
+ */
+BSONObj applyUpdateOperators(const BSONObj& from, const BSONObj& operators);
} // namespace mongo
diff --git a/src/mongo/db/ops/update_driver.cpp b/src/mongo/db/ops/update_driver.cpp
index 28ae06215ae..ccd966dbb81 100644
--- a/src/mongo/db/ops/update_driver.cpp
+++ b/src/mongo/db/ops/update_driver.cpp
@@ -44,353 +44,341 @@
namespace mongo {
- namespace str = mongoutils::str;
- namespace mb = mongo::mutablebson;
+namespace str = mongoutils::str;
+namespace mb = mongo::mutablebson;
- using std::unique_ptr;
- using std::vector;
+using std::unique_ptr;
+using std::vector;
- using pathsupport::EqualityMatches;
+using pathsupport::EqualityMatches;
- UpdateDriver::UpdateDriver(const Options& opts)
- : _replacementMode(false)
- , _indexedFields(NULL)
- , _logOp(opts.logOp)
- , _modOptions(opts.modOptions)
- , _affectIndices(false)
- , _positional(false) {
- }
-
- UpdateDriver::~UpdateDriver() {
- clear();
- }
+UpdateDriver::UpdateDriver(const Options& opts)
+ : _replacementMode(false),
+ _indexedFields(NULL),
+ _logOp(opts.logOp),
+ _modOptions(opts.modOptions),
+ _affectIndices(false),
+ _positional(false) {}
- Status UpdateDriver::parse(const BSONObj& updateExpr, const bool multi) {
- clear();
+UpdateDriver::~UpdateDriver() {
+ clear();
+}
- // Check if the update expression is a full object replacement.
- if (*updateExpr.firstElementFieldName() != '$') {
- if (multi) {
- return Status(ErrorCodes::FailedToParse,
- "multi update only works with $ operators");
- }
+Status UpdateDriver::parse(const BSONObj& updateExpr, const bool multi) {
+ clear();
- // Modifiers expect BSONElements as input. But the input to object replace is, by
- // definition, an object. We wrap the 'updateExpr' as the mod is expecting. Note
- // that the wrapper is temporary so the object replace mod should make a copy of
- // the object.
- unique_ptr<ModifierObjectReplace> mod(new ModifierObjectReplace);
- BSONObj wrapper = BSON( "dummy" << updateExpr );
- Status status = mod->init(wrapper.firstElement(), _modOptions);
- if (!status.isOK()) {
- return status;
- }
-
- _mods.push_back(mod.release());
-
- // Register the fact that this driver will only do full object replacements.
- _replacementMode = true;
+ // Check if the update expression is a full object replacement.
+ if (*updateExpr.firstElementFieldName() != '$') {
+ if (multi) {
+ return Status(ErrorCodes::FailedToParse, "multi update only works with $ operators");
+ }
- return Status::OK();
+ // Modifiers expect BSONElements as input. But the input to object replace is, by
+ // definition, an object. We wrap the 'updateExpr' as the mod is expecting. Note
+ // that the wrapper is temporary so the object replace mod should make a copy of
+ // the object.
+ unique_ptr<ModifierObjectReplace> mod(new ModifierObjectReplace);
+ BSONObj wrapper = BSON("dummy" << updateExpr);
+ Status status = mod->init(wrapper.firstElement(), _modOptions);
+ if (!status.isOK()) {
+ return status;
}
- // The update expression is made of mod operators, that is
- // { <$mod>: {...}, <$mod>: {...}, ... }
- BSONObjIterator outerIter(updateExpr);
- while (outerIter.more()) {
- BSONElement outerModElem = outerIter.next();
-
- // Check whether this is a valid mod type.
- modifiertable::ModifierType modType = modifiertable::getType(outerModElem.fieldName());
- if (modType == modifiertable::MOD_UNKNOWN) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Unknown modifier: " << outerModElem.fieldName());
- }
+ _mods.push_back(mod.release());
- // Check whether there is indeed a list of mods under this modifier.
- if (outerModElem.type() != Object) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Modifiers operate on fields but we found a "
- << typeName(outerModElem.type())
- << " instead. For example: {$mod: {<field>: ...}}"
- << " not {" << outerModElem.toString() << "}");
- }
+ // Register the fact that this driver will only do full object replacements.
+ _replacementMode = true;
- // Check whether there are indeed mods under this modifier.
- if (outerModElem.embeddedObject().isEmpty()) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "'" << outerModElem.fieldName()
- << "' is empty. You must specify a field like so: "
- "{" << outerModElem.fieldName() << ": {<field>: ...}}");
- }
+ return Status::OK();
+ }
- BSONObjIterator innerIter(outerModElem.embeddedObject());
- while (innerIter.more()) {
- BSONElement innerModElem = innerIter.next();
+ // The update expression is made of mod operators, that is
+ // { <$mod>: {...}, <$mod>: {...}, ... }
+ BSONObjIterator outerIter(updateExpr);
+ while (outerIter.more()) {
+ BSONElement outerModElem = outerIter.next();
- Status status = addAndParse(modType, innerModElem);
- if (!status.isOK()) {
- return status;
- }
- }
+ // Check whether this is a valid mod type.
+ modifiertable::ModifierType modType = modifiertable::getType(outerModElem.fieldName());
+ if (modType == modifiertable::MOD_UNKNOWN) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "Unknown modifier: " << outerModElem.fieldName());
}
- // Register the fact that there will be only $mod's in this driver -- no object
- // replacement.
- _replacementMode = false;
-
- return Status::OK();
- }
+ // Check whether there is indeed a list of mods under this modifier.
+ if (outerModElem.type() != Object) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "Modifiers operate on fields but we found a "
+ << typeName(outerModElem.type())
+ << " instead. For example: {$mod: {<field>: ...}}"
+ << " not {" << outerModElem.toString() << "}");
+ }
- inline Status UpdateDriver::addAndParse(const modifiertable::ModifierType type,
- const BSONElement& elem) {
- if (elem.eoo()) {
+ // Check whether there are indeed mods under this modifier.
+ if (outerModElem.embeddedObject().isEmpty()) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "'" << elem.fieldName()
- << "' has no value in : " << elem
- << " which is not allowed for any $" << type << " mod.");
+ str::stream() << "'" << outerModElem.fieldName()
+ << "' is empty. You must specify a field like so: "
+ "{" << outerModElem.fieldName() << ": {<field>: ...}}");
}
- unique_ptr<ModifierInterface> mod(modifiertable::makeUpdateMod(type));
- dassert(mod.get());
+ BSONObjIterator innerIter(outerModElem.embeddedObject());
+ while (innerIter.more()) {
+ BSONElement innerModElem = innerIter.next();
- bool positional = false;
- Status status = mod->init(elem, _modOptions, &positional);
- if (!status.isOK()) {
- return status;
+ Status status = addAndParse(modType, innerModElem);
+ if (!status.isOK()) {
+ return status;
+ }
}
+ }
- // If any modifier indicates that it requires a positional match, toggle the
- // _positional flag to true.
- _positional = _positional || positional;
+ // Register the fact that there will be only $mod's in this driver -- no object
+ // replacement.
+ _replacementMode = false;
- _mods.push_back(mod.release());
+ return Status::OK();
+}
- return Status::OK();
+inline Status UpdateDriver::addAndParse(const modifiertable::ModifierType type,
+ const BSONElement& elem) {
+ if (elem.eoo()) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "'" << elem.fieldName() << "' has no value in : " << elem
+ << " which is not allowed for any $" << type << " mod.");
}
- Status UpdateDriver::populateDocumentWithQueryFields(const BSONObj& query,
- const vector<FieldRef*>* immutablePaths,
- mutablebson::Document& doc) const {
- CanonicalQuery* rawCG;
- // We canonicalize the query to collapse $and/$or, and the first arg (ns) is not needed
- // Also, because this is for the upsert case, where we insert a new document if one was
- // not found, the $where clause does not make sense, hence empty WhereCallback.
- Status s = CanonicalQuery::canonicalize("", query, &rawCG, WhereCallbackNoop());
- if (!s.isOK())
- return s;
- unique_ptr<CanonicalQuery> cq(rawCG);
- return populateDocumentWithQueryFields(rawCG, immutablePaths, doc);
- }
+ unique_ptr<ModifierInterface> mod(modifiertable::makeUpdateMod(type));
+ dassert(mod.get());
- Status UpdateDriver::populateDocumentWithQueryFields(const CanonicalQuery* query,
- const vector<FieldRef*>* immutablePathsPtr,
- mutablebson::Document& doc) const {
- EqualityMatches equalities;
- Status status = Status::OK();
+ bool positional = false;
+ Status status = mod->init(elem, _modOptions, &positional);
+ if (!status.isOK()) {
+ return status;
+ }
- if (isDocReplacement()) {
+ // If any modifier indicates that it requires a positional match, toggle the
+ // _positional flag to true.
+ _positional = _positional || positional;
+
+ _mods.push_back(mod.release());
+
+ return Status::OK();
+}
+
+Status UpdateDriver::populateDocumentWithQueryFields(const BSONObj& query,
+ const vector<FieldRef*>* immutablePaths,
+ mutablebson::Document& doc) const {
+ CanonicalQuery* rawCG;
+ // We canonicalize the query to collapse $and/$or, and the first arg (ns) is not needed
+ // Also, because this is for the upsert case, where we insert a new document if one was
+ // not found, the $where clause does not make sense, hence empty WhereCallback.
+ Status s = CanonicalQuery::canonicalize("", query, &rawCG, WhereCallbackNoop());
+ if (!s.isOK())
+ return s;
+ unique_ptr<CanonicalQuery> cq(rawCG);
+ return populateDocumentWithQueryFields(rawCG, immutablePaths, doc);
+}
+
+Status UpdateDriver::populateDocumentWithQueryFields(const CanonicalQuery* query,
+ const vector<FieldRef*>* immutablePathsPtr,
+ mutablebson::Document& doc) const {
+ EqualityMatches equalities;
+ Status status = Status::OK();
+
+ if (isDocReplacement()) {
+ FieldRefSet pathsToExtract;
+
+ // TODO: Refactor update logic, make _id just another immutable field
+ static const FieldRef idPath("_id");
+ static const vector<FieldRef*> emptyImmutablePaths;
+ const vector<FieldRef*>& immutablePaths =
+ immutablePathsPtr ? *immutablePathsPtr : emptyImmutablePaths;
+
+ pathsToExtract.fillFrom(immutablePaths);
+ pathsToExtract.insert(&idPath);
+
+ // Extract only immutable fields from replacement-style
+ status =
+ pathsupport::extractFullEqualityMatches(*query->root(), pathsToExtract, &equalities);
+ } else {
+ // Extract all fields from op-style
+ status = pathsupport::extractEqualityMatches(*query->root(), &equalities);
+ }
- FieldRefSet pathsToExtract;
+ if (!status.isOK())
+ return status;
- // TODO: Refactor update logic, make _id just another immutable field
- static const FieldRef idPath("_id");
- static const vector<FieldRef*> emptyImmutablePaths;
- const vector<FieldRef*>& immutablePaths =
- immutablePathsPtr ? *immutablePathsPtr : emptyImmutablePaths;
+ status = pathsupport::addEqualitiesToDoc(equalities, &doc);
+ return status;
+}
+
+Status UpdateDriver::update(StringData matchedField,
+ mutablebson::Document* doc,
+ BSONObj* logOpRec,
+ FieldRefSet* updatedFields,
+ bool* docWasModified) {
+ // TODO: assert that update() is called at most once in a !_multi case.
+
+ // Use the passed in FieldRefSet
+ FieldRefSet* targetFields = updatedFields;
+
+ // If we didn't get a FieldRefSet* from the caller, allocate storage and use
+ // the unique_ptr for lifecycle management
+ unique_ptr<FieldRefSet> targetFieldScopedPtr;
+ if (!targetFields) {
+ targetFieldScopedPtr.reset(new FieldRefSet());
+ targetFields = targetFieldScopedPtr.get();
+ }
- pathsToExtract.fillFrom(immutablePaths);
- pathsToExtract.insert(&idPath);
+ _affectIndices = (isDocReplacement() && (_indexedFields != NULL));
- // Extract only immutable fields from replacement-style
- status = pathsupport::extractFullEqualityMatches(*query->root(),
- pathsToExtract,
- &equalities);
- }
- else {
- // Extract all fields from op-style
- status = pathsupport::extractEqualityMatches(*query->root(), &equalities);
- }
+ _logDoc.reset();
+ LogBuilder logBuilder(_logDoc.root());
- if (!status.isOK())
+ // Ask each of the mods to type check whether they can operate over the current document
+ // and, if so, to change that document accordingly.
+ for (vector<ModifierInterface*>::iterator it = _mods.begin(); it != _mods.end(); ++it) {
+ ModifierInterface::ExecInfo execInfo;
+ Status status = (*it)->prepare(doc->root(), matchedField, &execInfo);
+ if (!status.isOK()) {
return status;
+ }
- status = pathsupport::addEqualitiesToDoc(equalities, &doc);
- return status;
- }
+ // If a mod wants to be applied only if this is an upsert (or only if this is a
+ // strict update), we should respect that. If a mod doesn't care, it would state
+ // it is fine with ANY update context.
+ const bool validContext = (execInfo.context == ModifierInterface::ExecInfo::ANY_CONTEXT ||
+ execInfo.context == _context);
- Status UpdateDriver::update(StringData matchedField,
- mutablebson::Document* doc,
- BSONObj* logOpRec,
- FieldRefSet* updatedFields,
- bool* docWasModified) {
- // TODO: assert that update() is called at most once in a !_multi case.
-
- // Use the passed in FieldRefSet
- FieldRefSet* targetFields = updatedFields;
-
- // If we didn't get a FieldRefSet* from the caller, allocate storage and use
- // the unique_ptr for lifecycle management
- unique_ptr<FieldRefSet> targetFieldScopedPtr;
- if (!targetFields) {
- targetFieldScopedPtr.reset(new FieldRefSet());
- targetFields = targetFieldScopedPtr.get();
+ // Nothing to do if not in a valid context.
+ if (!validContext) {
+ continue;
}
- _affectIndices = (isDocReplacement() && (_indexedFields != NULL));
- _logDoc.reset();
- LogBuilder logBuilder(_logDoc.root());
-
- // Ask each of the mods to type check whether they can operate over the current document
- // and, if so, to change that document accordingly.
- for (vector<ModifierInterface*>::iterator it = _mods.begin(); it != _mods.end(); ++it) {
- ModifierInterface::ExecInfo execInfo;
- Status status = (*it)->prepare(doc->root(), matchedField, &execInfo);
- if (!status.isOK()) {
- return status;
+ // Gather which fields this mod is interested on and whether these fields were
+ // "taken" by previous mods. Note that not all mods are multi-field mods. When we
+ // see an empty field, we may stop looking for others.
+ for (int i = 0; i < ModifierInterface::ExecInfo::MAX_NUM_FIELDS; i++) {
+ if (execInfo.fieldRef[i] == 0) {
+ break;
}
- // If a mod wants to be applied only if this is an upsert (or only if this is a
- // strict update), we should respect that. If a mod doesn't care, it would state
- // it is fine with ANY update context.
- const bool validContext = (execInfo.context == ModifierInterface::ExecInfo::ANY_CONTEXT ||
- execInfo.context == _context);
-
- // Nothing to do if not in a valid context.
- if (!validContext) {
- continue;
+ // Record each field being updated but check for conflicts first
+ const FieldRef* other;
+ if (!targetFields->insert(execInfo.fieldRef[i], &other)) {
+ return Status(ErrorCodes::ConflictingUpdateOperators,
+ str::stream() << "Cannot update '" << other->dottedField()
+ << "' and '" << execInfo.fieldRef[i]->dottedField()
+ << "' at the same time");
}
-
- // Gather which fields this mod is interested on and whether these fields were
- // "taken" by previous mods. Note that not all mods are multi-field mods. When we
- // see an empty field, we may stop looking for others.
- for (int i = 0; i < ModifierInterface::ExecInfo::MAX_NUM_FIELDS; i++) {
- if (execInfo.fieldRef[i] == 0) {
- break;
- }
-
- // Record each field being updated but check for conflicts first
- const FieldRef* other;
- if (!targetFields->insert(execInfo.fieldRef[i], &other)) {
- return Status(ErrorCodes::ConflictingUpdateOperators,
- str::stream() << "Cannot update '"
- << other->dottedField()
- << "' and '"
- << execInfo.fieldRef[i]->dottedField()
- << "' at the same time");
- }
-
- // We start with the expectation that a mod will be in-place. But if the mod
- // touched an indexed field and the mod will indeed be executed -- that is, it
- // is not a no-op and it is in a valid context -- then we switch back to a
- // non-in-place mode.
- //
- // TODO: make mightBeIndexed and fieldRef like each other.
- if (!_affectIndices &&
- !execInfo.noOp &&
- _indexedFields &&
- _indexedFields->mightBeIndexed(execInfo.fieldRef[i]->dottedField())) {
- _affectIndices = true;
- doc->disableInPlaceUpdates();
- }
+ // We start with the expectation that a mod will be in-place. But if the mod
+ // touched an indexed field and the mod will indeed be executed -- that is, it
+ // is not a no-op and it is in a valid context -- then we switch back to a
+ // non-in-place mode.
+ //
+ // TODO: make mightBeIndexed and fieldRef like each other.
+ if (!_affectIndices && !execInfo.noOp && _indexedFields &&
+ _indexedFields->mightBeIndexed(execInfo.fieldRef[i]->dottedField())) {
+ _affectIndices = true;
+ doc->disableInPlaceUpdates();
}
+ }
- if (!execInfo.noOp) {
- status = (*it)->apply();
+ if (!execInfo.noOp) {
+ status = (*it)->apply();
- if (docWasModified)
- *docWasModified = true;
+ if (docWasModified)
+ *docWasModified = true;
- if (!status.isOK()) {
- return status;
- }
+ if (!status.isOK()) {
+ return status;
}
+ }
- // If we require a replication oplog entry for this update, go ahead and generate one.
- if (!execInfo.noOp && _logOp && logOpRec) {
- status = (*it)->log(&logBuilder);
- if (!status.isOK()) {
- return status;
- }
+ // If we require a replication oplog entry for this update, go ahead and generate one.
+ if (!execInfo.noOp && _logOp && logOpRec) {
+ status = (*it)->log(&logBuilder);
+ if (!status.isOK()) {
+ return status;
}
-
}
-
- if (_logOp && logOpRec)
- *logOpRec = _logDoc.getObject();
-
- return Status::OK();
}
- size_t UpdateDriver::numMods() const {
- return _mods.size();
+ if (_logOp && logOpRec)
+ *logOpRec = _logDoc.getObject();
+
+ return Status::OK();
+}
+
+size_t UpdateDriver::numMods() const {
+ return _mods.size();
+}
+
+bool UpdateDriver::isDocReplacement() const {
+ return _replacementMode;
+}
+
+bool UpdateDriver::modsAffectIndices() const {
+ return _affectIndices;
+}
+
+void UpdateDriver::refreshIndexKeys(const UpdateIndexData* indexedFields) {
+ _indexedFields = indexedFields;
+}
+
+bool UpdateDriver::logOp() const {
+ return _logOp;
+}
+
+void UpdateDriver::setLogOp(bool logOp) {
+ _logOp = logOp;
+}
+
+ModifierInterface::Options UpdateDriver::modOptions() const {
+ return _modOptions;
+}
+
+void UpdateDriver::setModOptions(ModifierInterface::Options modOpts) {
+ _modOptions = modOpts;
+}
+
+ModifierInterface::ExecInfo::UpdateContext UpdateDriver::context() const {
+ return _context;
+}
+
+void UpdateDriver::setContext(ModifierInterface::ExecInfo::UpdateContext context) {
+ _context = context;
+}
+
+BSONObj UpdateDriver::makeOplogEntryQuery(const BSONObj& doc, bool multi) const {
+ BSONObjBuilder idPattern;
+ BSONElement id;
+ // NOTE: If the matching object lacks an id, we'll log
+ // with the original pattern. This isn't replay-safe.
+ // It might make sense to suppress the log instead
+ // if there's no id.
+ if (doc.getObjectID(id)) {
+ idPattern.append(id);
+ return idPattern.obj();
+ } else {
+ uassert(16980,
+ str::stream() << "Multi-update operations require all documents to "
+ "have an '_id' field. " << doc.toString(false, false),
+ !multi);
+ return doc;
}
-
- bool UpdateDriver::isDocReplacement() const {
- return _replacementMode;
- }
-
- bool UpdateDriver::modsAffectIndices() const {
- return _affectIndices;
- }
-
- void UpdateDriver::refreshIndexKeys(const UpdateIndexData* indexedFields) {
- _indexedFields = indexedFields;
- }
-
- bool UpdateDriver::logOp() const {
- return _logOp;
- }
-
- void UpdateDriver::setLogOp(bool logOp) {
- _logOp = logOp;
- }
-
- ModifierInterface::Options UpdateDriver::modOptions() const {
- return _modOptions;
- }
-
- void UpdateDriver::setModOptions(ModifierInterface::Options modOpts) {
- _modOptions = modOpts;
- }
-
- ModifierInterface::ExecInfo::UpdateContext UpdateDriver::context() const {
- return _context;
- }
-
- void UpdateDriver::setContext(ModifierInterface::ExecInfo::UpdateContext context) {
- _context = context;
- }
-
- BSONObj UpdateDriver::makeOplogEntryQuery(const BSONObj& doc, bool multi) const {
- BSONObjBuilder idPattern;
- BSONElement id;
- // NOTE: If the matching object lacks an id, we'll log
- // with the original pattern. This isn't replay-safe.
- // It might make sense to suppress the log instead
- // if there's no id.
- if ( doc.getObjectID( id ) ) {
- idPattern.append( id );
- return idPattern.obj();
- }
- else {
- uassert( 16980,
- str::stream() << "Multi-update operations require all documents to "
- "have an '_id' field. " << doc.toString(false, false),
- ! multi );
- return doc;
- }
- }
- void UpdateDriver::clear() {
- for (vector<ModifierInterface*>::iterator it = _mods.begin(); it != _mods.end(); ++it) {
- delete *it;
- }
- _mods.clear();
- _indexedFields = NULL;
- _replacementMode = false;
- _positional = false;
+}
+void UpdateDriver::clear() {
+ for (vector<ModifierInterface*>::iterator it = _mods.begin(); it != _mods.end(); ++it) {
+ delete *it;
}
+ _mods.clear();
+ _indexedFields = NULL;
+ _replacementMode = false;
+ _positional = false;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/update_driver.h b/src/mongo/db/ops/update_driver.h
index 067c432ebe2..00e3e2eb10f 100644
--- a/src/mongo/db/ops/update_driver.h
+++ b/src/mongo/db/ops/update_driver.h
@@ -43,153 +43,150 @@
namespace mongo {
- class UpdateDriver {
- public:
-
- struct Options;
- UpdateDriver(const Options& opts);
-
- ~UpdateDriver();
-
- /**
- * Returns OK and fills in '_mods' if 'updateExpr' is correct. Otherwise returns an
- * error status with a corresponding description.
- */
- Status parse(const BSONObj& updateExpr, const bool multi = false);
-
- /**
- * Fills in document with any fields in the query which are valid.
- *
- * Valid fields include equality matches like "a":1, or "a.b":false
- *
- * Each valid field will be expanded (from dot notation) and conflicts will be
- * checked for all fields added to the underlying document.
- *
- * Returns Status::OK() if the document can be used. If there are any error or
- * conflicts along the way then those errors will be returned.
- */
- Status populateDocumentWithQueryFields(const BSONObj& query,
- const std::vector<FieldRef*>* immutablePaths,
- mutablebson::Document& doc) const;
-
- Status populateDocumentWithQueryFields(const CanonicalQuery* query,
- const std::vector<FieldRef*>* immutablePaths,
- mutablebson::Document& doc) const;
-
- /**
- * return a BSONObj with the _id field of the doc passed in, or the doc itself.
- * If no _id and multi, error.
- */
- BSONObj makeOplogEntryQuery(const BSONObj& doc, bool multi) const;
-
- /**
- * Returns OK and executes '_mods' over 'doc', generating 'newObj'. If any mod is
- * positional, use 'matchedField' (index of the array item matched). If doc allows
- * mods to be applied in place and no index updating is involved, then the mods may
- * be applied "in place" over 'doc'.
- *
- * If the driver's '_logOp' mode is turned on, and if 'logOpRec' is not NULL, fills in
- * the latter with the oplog entry corresponding to the update. If '_mods' can't be
- * applied, returns an error status with a corresponding description.
- *
- * If a non-NULL updatedField vector* is supplied,
- * then all updated fields will be added to it.
- */
- Status update(StringData matchedField,
- mutablebson::Document* doc,
- BSONObj* logOpRec = NULL,
- FieldRefSet* updatedFields = NULL,
- bool* docWasModified = NULL);
-
- //
- // Accessors
- //
-
- size_t numMods() const;
-
- bool isDocReplacement() const;
-
- bool modsAffectIndices() const;
- void refreshIndexKeys(const UpdateIndexData* indexedFields);
-
- bool logOp() const;
- void setLogOp(bool logOp);
-
- ModifierInterface::Options modOptions() const;
- void setModOptions(ModifierInterface::Options modOpts);
-
- ModifierInterface::ExecInfo::UpdateContext context() const;
- void setContext(ModifierInterface::ExecInfo::UpdateContext context);
-
- mutablebson::Document& getDocument() {
- return _objDoc;
- }
-
- const mutablebson::Document& getDocument() const {
- return _objDoc;
- }
-
- bool needMatchDetails() const {
- return _positional;
- }
-
- private:
-
- /** Resets the state of the class associated with mods (not the error state) */
- void clear();
-
- /** Create the modifier and add it to the back of the modifiers vector */
- inline Status addAndParse(const modifiertable::ModifierType type,
- const BSONElement& elem);
-
- //
- // immutable properties after parsing
- //
-
- // Is there a list of $mod's on '_mods' or is it just full object replacement?
- bool _replacementMode;
-
- // Collection of update mod instances. Owned here.
- std::vector<ModifierInterface*> _mods;
-
- // What are the list of fields in the collection over which the update is going to be
- // applied that participate in indices?
- //
- // NOTE: Owned by the collection's info cache!.
- const UpdateIndexData* _indexedFields;
-
- //
- // mutable properties after parsing
- //
-
- // Should this driver generate an oplog record when it applies the update?
- bool _logOp;
-
- // The options to initiate the mods with
- ModifierInterface::Options _modOptions;
+class UpdateDriver {
+public:
+ struct Options;
+ UpdateDriver(const Options& opts);
+
+ ~UpdateDriver();
+
+ /**
+ * Returns OK and fills in '_mods' if 'updateExpr' is correct. Otherwise returns an
+ * error status with a corresponding description.
+ */
+ Status parse(const BSONObj& updateExpr, const bool multi = false);
+
+ /**
+ * Fills in document with any fields in the query which are valid.
+ *
+ * Valid fields include equality matches like "a":1, or "a.b":false
+ *
+ * Each valid field will be expanded (from dot notation) and conflicts will be
+ * checked for all fields added to the underlying document.
+ *
+ * Returns Status::OK() if the document can be used. If there are any error or
+ * conflicts along the way then those errors will be returned.
+ */
+ Status populateDocumentWithQueryFields(const BSONObj& query,
+ const std::vector<FieldRef*>* immutablePaths,
+ mutablebson::Document& doc) const;
+
+ Status populateDocumentWithQueryFields(const CanonicalQuery* query,
+ const std::vector<FieldRef*>* immutablePaths,
+ mutablebson::Document& doc) const;
+
+ /**
+ * return a BSONObj with the _id field of the doc passed in, or the doc itself.
+ * If no _id and multi, error.
+ */
+ BSONObj makeOplogEntryQuery(const BSONObj& doc, bool multi) const;
+
+ /**
+ * Returns OK and executes '_mods' over 'doc', generating 'newObj'. If any mod is
+ * positional, use 'matchedField' (index of the array item matched). If doc allows
+ * mods to be applied in place and no index updating is involved, then the mods may
+ * be applied "in place" over 'doc'.
+ *
+ * If the driver's '_logOp' mode is turned on, and if 'logOpRec' is not NULL, fills in
+ * the latter with the oplog entry corresponding to the update. If '_mods' can't be
+ * applied, returns an error status with a corresponding description.
+ *
+ * If a non-NULL updatedField vector* is supplied,
+ * then all updated fields will be added to it.
+ */
+ Status update(StringData matchedField,
+ mutablebson::Document* doc,
+ BSONObj* logOpRec = NULL,
+ FieldRefSet* updatedFields = NULL,
+ bool* docWasModified = NULL);
+
+ //
+ // Accessors
+ //
+
+ size_t numMods() const;
+
+ bool isDocReplacement() const;
+
+ bool modsAffectIndices() const;
+ void refreshIndexKeys(const UpdateIndexData* indexedFields);
+
+ bool logOp() const;
+ void setLogOp(bool logOp);
+
+ ModifierInterface::Options modOptions() const;
+ void setModOptions(ModifierInterface::Options modOpts);
+
+ ModifierInterface::ExecInfo::UpdateContext context() const;
+ void setContext(ModifierInterface::ExecInfo::UpdateContext context);
+
+ mutablebson::Document& getDocument() {
+ return _objDoc;
+ }
+
+ const mutablebson::Document& getDocument() const {
+ return _objDoc;
+ }
+
+ bool needMatchDetails() const {
+ return _positional;
+ }
+
+private:
+ /** Resets the state of the class associated with mods (not the error state) */
+ void clear();
+
+ /** Create the modifier and add it to the back of the modifiers vector */
+ inline Status addAndParse(const modifiertable::ModifierType type, const BSONElement& elem);
+
+ //
+ // immutable properties after parsing
+ //
+
+ // Is there a list of $mod's on '_mods' or is it just full object replacement?
+ bool _replacementMode;
+
+ // Collection of update mod instances. Owned here.
+ std::vector<ModifierInterface*> _mods;
+
+ // What are the list of fields in the collection over which the update is going to be
+ // applied that participate in indices?
+ //
+ // NOTE: Owned by the collection's info cache!.
+ const UpdateIndexData* _indexedFields;
+
+ //
+ // mutable properties after parsing
+ //
+
+ // Should this driver generate an oplog record when it applies the update?
+ bool _logOp;
+
+ // The options to initiate the mods with
+ ModifierInterface::Options _modOptions;
+
+ // Are any of the fields mentioned in the mods participating in any index? Is set anew
+ // at each call to update.
+ bool _affectIndices;
- // Are any of the fields mentioned in the mods participating in any index? Is set anew
- // at each call to update.
- bool _affectIndices;
+ // Do any of the mods require positional match details when calling 'prepare'?
+ bool _positional;
- // Do any of the mods require positional match details when calling 'prepare'?
- bool _positional;
+ // Is this update going to be an upsert?
+ ModifierInterface::ExecInfo::UpdateContext _context;
- // Is this update going to be an upsert?
- ModifierInterface::ExecInfo::UpdateContext _context;
+ // The document used to represent or store the object being updated.
+ mutablebson::Document _objDoc;
- // The document used to represent or store the object being updated.
- mutablebson::Document _objDoc;
+ // The document used to build the oplog entry for the update.
+ mutablebson::Document _logDoc;
+};
- // The document used to build the oplog entry for the update.
- mutablebson::Document _logDoc;
- };
+struct UpdateDriver::Options {
+ bool logOp;
+ ModifierInterface::Options modOptions;
- struct UpdateDriver::Options {
- bool logOp;
- ModifierInterface::Options modOptions;
+ Options() : logOp(false), modOptions() {}
+};
- Options() : logOp(false), modOptions() {}
- };
-
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/update_driver_test.cpp b/src/mongo/db/ops/update_driver_test.cpp
index 1fd93aa27c1..1a778b231a9 100644
--- a/src/mongo/db/ops/update_driver_test.cpp
+++ b/src/mongo/db/ops/update_driver_test.cpp
@@ -42,362 +42,351 @@
namespace {
- using mongo::BSONObj;
- using mongo::BSONElement;
- using mongo::BSONObjIterator;
- using mongo::FieldRef;
- using mongo::fromjson;
- using mongo::OwnedPointerVector;
- using mongo::UpdateIndexData;
- using mongo::mutablebson::Document;
- using mongo::StringData;
- using mongo::UpdateDriver;
- using mongoutils::str::stream;
-
- TEST(Parse, Normal) {
- UpdateDriver::Options opts;
- UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$set:{a:1}}")));
- ASSERT_EQUALS(driver.numMods(), 1U);
- ASSERT_FALSE(driver.isDocReplacement());
- }
-
- TEST(Parse, MultiMods) {
- UpdateDriver::Options opts;
- UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$set:{a:1, b:1}}")));
- ASSERT_EQUALS(driver.numMods(), 2U);
- ASSERT_FALSE(driver.isDocReplacement());
- }
-
- TEST(Parse, MixingMods) {
- UpdateDriver::Options opts;
- UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$set:{a:1}, $unset:{b:1}}")));
- ASSERT_EQUALS(driver.numMods(), 2U);
- ASSERT_FALSE(driver.isDocReplacement());
- }
-
- TEST(Parse, ObjectReplacment) {
- UpdateDriver::Options opts;
- UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{obj: \"obj replacement\"}")));
- ASSERT_TRUE(driver.isDocReplacement());
- }
-
- TEST(Parse, EmptyMod) {
- UpdateDriver::Options opts;
- UpdateDriver driver(opts);
- ASSERT_NOT_OK(driver.parse(fromjson("{$set:{}}")));
- }
-
- TEST(Parse, WrongMod) {
- UpdateDriver::Options opts;
- UpdateDriver driver(opts);
- ASSERT_NOT_OK(driver.parse(fromjson("{$xyz:{a:1}}")));
- }
-
- TEST(Parse, WrongType) {
- UpdateDriver::Options opts;
- UpdateDriver driver(opts);
- ASSERT_NOT_OK(driver.parse(fromjson("{$set:[{a:1}]}")));
- }
-
- TEST(Parse, ModsWithLaterObjReplacement) {
- UpdateDriver::Options opts;
- UpdateDriver driver(opts);
- ASSERT_NOT_OK(driver.parse(fromjson("{$set:{a:1}, obj: \"obj replacement\"}")));
- }
-
- TEST(Parse, PushAll) {
- UpdateDriver::Options opts;
- UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$pushAll:{a:[1,2,3]}}")));
- ASSERT_EQUALS(driver.numMods(), 1U);
- ASSERT_FALSE(driver.isDocReplacement());
- }
-
- TEST(Parse, SetOnInsert) {
- UpdateDriver::Options opts;
- UpdateDriver driver(opts);
- ASSERT_OK(driver.parse(fromjson("{$setOnInsert:{a:1}}")));
- ASSERT_EQUALS(driver.numMods(), 1U);
- ASSERT_FALSE(driver.isDocReplacement());
- }
-
- //
- // Tests of creating a base for an upsert from a query document
- // $or, $and, $all get special handling, as does the _id field
- //
- // NONGOAL: Testing all query parsing and nesting combinations
- //
-
- class CreateFromQueryFixture : public mongo::unittest::Test {
- public:
-
- CreateFromQueryFixture()
- : _driverOps(new UpdateDriver(UpdateDriver::Options())),
- _driverRepl(new UpdateDriver(UpdateDriver::Options())) {
- _driverOps->parse(fromjson("{$set:{'_':1}}"));
- _driverRepl->parse(fromjson("{}"));
- }
-
- Document& doc() {
- return _doc;
- }
-
- UpdateDriver& driverOps() {
- return *_driverOps;
- }
+using mongo::BSONObj;
+using mongo::BSONElement;
+using mongo::BSONObjIterator;
+using mongo::FieldRef;
+using mongo::fromjson;
+using mongo::OwnedPointerVector;
+using mongo::UpdateIndexData;
+using mongo::mutablebson::Document;
+using mongo::StringData;
+using mongo::UpdateDriver;
+using mongoutils::str::stream;
+
+TEST(Parse, Normal) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ ASSERT_OK(driver.parse(fromjson("{$set:{a:1}}")));
+ ASSERT_EQUALS(driver.numMods(), 1U);
+ ASSERT_FALSE(driver.isDocReplacement());
+}
+
+TEST(Parse, MultiMods) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ ASSERT_OK(driver.parse(fromjson("{$set:{a:1, b:1}}")));
+ ASSERT_EQUALS(driver.numMods(), 2U);
+ ASSERT_FALSE(driver.isDocReplacement());
+}
+
+TEST(Parse, MixingMods) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ ASSERT_OK(driver.parse(fromjson("{$set:{a:1}, $unset:{b:1}}")));
+ ASSERT_EQUALS(driver.numMods(), 2U);
+ ASSERT_FALSE(driver.isDocReplacement());
+}
+
+TEST(Parse, ObjectReplacment) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ ASSERT_OK(driver.parse(fromjson("{obj: \"obj replacement\"}")));
+ ASSERT_TRUE(driver.isDocReplacement());
+}
+
+TEST(Parse, EmptyMod) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ ASSERT_NOT_OK(driver.parse(fromjson("{$set:{}}")));
+}
+
+TEST(Parse, WrongMod) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ ASSERT_NOT_OK(driver.parse(fromjson("{$xyz:{a:1}}")));
+}
+
+TEST(Parse, WrongType) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ ASSERT_NOT_OK(driver.parse(fromjson("{$set:[{a:1}]}")));
+}
+
+TEST(Parse, ModsWithLaterObjReplacement) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ ASSERT_NOT_OK(driver.parse(fromjson("{$set:{a:1}, obj: \"obj replacement\"}")));
+}
+
+TEST(Parse, PushAll) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ ASSERT_OK(driver.parse(fromjson("{$pushAll:{a:[1,2,3]}}")));
+ ASSERT_EQUALS(driver.numMods(), 1U);
+ ASSERT_FALSE(driver.isDocReplacement());
+}
+
+TEST(Parse, SetOnInsert) {
+ UpdateDriver::Options opts;
+ UpdateDriver driver(opts);
+ ASSERT_OK(driver.parse(fromjson("{$setOnInsert:{a:1}}")));
+ ASSERT_EQUALS(driver.numMods(), 1U);
+ ASSERT_FALSE(driver.isDocReplacement());
+}
+
+//
+// Tests of creating a base for an upsert from a query document
+// $or, $and, $all get special handling, as does the _id field
+//
+// NONGOAL: Testing all query parsing and nesting combinations
+//
+
+class CreateFromQueryFixture : public mongo::unittest::Test {
+public:
+ CreateFromQueryFixture()
+ : _driverOps(new UpdateDriver(UpdateDriver::Options())),
+ _driverRepl(new UpdateDriver(UpdateDriver::Options())) {
+ _driverOps->parse(fromjson("{$set:{'_':1}}"));
+ _driverRepl->parse(fromjson("{}"));
+ }
+
+ Document& doc() {
+ return _doc;
+ }
+
+ UpdateDriver& driverOps() {
+ return *_driverOps;
+ }
+
+ UpdateDriver& driverRepl() {
+ return *_driverRepl;
+ }
+
+private:
+ std::unique_ptr<UpdateDriver> _driverOps;
+ std::unique_ptr<UpdateDriver> _driverRepl;
+ Document _doc;
+};
+
+// Make name nicer to report
+typedef CreateFromQueryFixture CreateFromQuery;
+
+static void assertSameFields(const BSONObj& docA, const BSONObj& docB);
- UpdateDriver& driverRepl() {
- return *_driverRepl;
- }
-
- private:
- std::unique_ptr<UpdateDriver> _driverOps;
- std::unique_ptr<UpdateDriver> _driverRepl;
- Document _doc;
- };
-
- // Make name nicer to report
- typedef CreateFromQueryFixture CreateFromQuery;
-
- static void assertSameFields(const BSONObj& docA, const BSONObj& docB);
-
- /**
- * Recursively asserts that two BSONElements contain the same data or sub-elements,
- * ignoring element order.
- */
- static void assertSameElements(const BSONElement& elA, const BSONElement& elB) {
- if (elA.type() != elB.type() || (!elA.isABSONObj() && !elA.valuesEqual(elB))) {
+/**
+ * Recursively asserts that two BSONElements contain the same data or sub-elements,
+ * ignoring element order.
+ */
+static void assertSameElements(const BSONElement& elA, const BSONElement& elB) {
+ if (elA.type() != elB.type() || (!elA.isABSONObj() && !elA.valuesEqual(elB))) {
+ FAIL(stream() << "element " << elA << " not equal to " << elB);
+ } else if (elA.type() == mongo::Array) {
+ std::vector<BSONElement> elsA = elA.Array();
+ std::vector<BSONElement> elsB = elB.Array();
+ if (elsA.size() != elsB.size())
FAIL(stream() << "element " << elA << " not equal to " << elB);
- }
- else if (elA.type() == mongo::Array) {
- std::vector<BSONElement> elsA = elA.Array();
- std::vector<BSONElement> elsB = elB.Array();
- if (elsA.size() != elsB.size())
- FAIL(stream() << "element " << elA << " not equal to " << elB);
-
- std::vector<BSONElement>::iterator arrItA = elsA.begin();
- std::vector<BSONElement>::iterator arrItB = elsB.begin();
- for (; arrItA != elsA.end(); ++arrItA, ++arrItB) {
- assertSameElements(*arrItA, *arrItB);
- }
- }
- else if (elA.type() == mongo::Object) {
- assertSameFields(elA.Obj(), elB.Obj());
- }
- }
-
- /**
- * Recursively asserts that two BSONObjects contain the same elements,
- * ignoring element order.
- */
- static void assertSameFields(const BSONObj& docA, const BSONObj& docB) {
-
- if (docA.nFields() != docB.nFields())
- FAIL(stream() << "document " << docA << " has different fields than " << docB);
- std::map<StringData, BSONElement> docAMap;
- BSONObjIterator itA(docA);
- while (itA.more()) {
- BSONElement elA = itA.next();
- docAMap.insert(std::make_pair(elA.fieldNameStringData(), elA));
+ std::vector<BSONElement>::iterator arrItA = elsA.begin();
+ std::vector<BSONElement>::iterator arrItB = elsB.begin();
+ for (; arrItA != elsA.end(); ++arrItA, ++arrItB) {
+ assertSameElements(*arrItA, *arrItB);
}
-
- BSONObjIterator itB(docB);
- while (itB.more()) {
- BSONElement elB = itB.next();
-
- std::map<StringData, BSONElement>::iterator seenIt = docAMap.find(elB
- .fieldNameStringData());
- if (seenIt == docAMap.end())
- FAIL(stream() << "element " << elB << " not found in " << docA);
-
- BSONElement elA = seenIt->second;
- assertSameElements(elA, elB);
- }
- }
-
- TEST_F(CreateFromQuery, BasicOp) {
- BSONObj query = fromjson("{a:1,b:2}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(query, doc().getObject());
- }
-
- TEST_F(CreateFromQuery, BasicOpEq) {
- BSONObj query = fromjson("{a:{$eq:1}}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(fromjson("{a:1}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, BasicOpWithId) {
- BSONObj query = fromjson("{_id:1,a:1,b:2}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(query, doc().getObject());
- }
-
- TEST_F(CreateFromQuery, BasicRepl) {
- BSONObj query = fromjson("{a:1,b:2}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(fromjson("{}"), doc().getObject());
+ } else if (elA.type() == mongo::Object) {
+ assertSameFields(elA.Obj(), elB.Obj());
}
+}
- TEST_F(CreateFromQuery, BasicReplWithId) {
- BSONObj query = fromjson("{_id:1,a:1,b:2}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(fromjson("{_id:1}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, BasicReplWithIdEq) {
- BSONObj query = fromjson("{_id:{$eq:1},a:1,b:2}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(fromjson("{_id:1}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, NoRootIdOp) {
- BSONObj query = fromjson("{'_id.a':1,'_id.b':2}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(fromjson("{_id:{a:1,b:2}}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, NoRootIdRepl) {
- BSONObj query = fromjson("{'_id.a':1,'_id.b':2}");
- ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
- }
-
- TEST_F(CreateFromQuery, NestedSharedRootOp) {
- BSONObj query = fromjson("{'a.c':1,'a.b':{$eq:2}}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(fromjson("{a:{c:1,b:2}}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, OrQueryOp) {
- BSONObj query = fromjson("{$or:[{a:1}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(fromjson("{a:1}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, OrQueryIdRepl) {
- BSONObj query = fromjson("{$or:[{_id:1}]}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(fromjson("{_id:1}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, OrQueryNoExtractOps) {
- BSONObj query = fromjson("{$or:[{a:1}, {b:2}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(BSONObj(), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, OrQueryNoExtractIdRepl) {
- BSONObj query = fromjson("{$or:[{_id:1}, {_id:2}]}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(BSONObj(), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, AndQueryOp) {
- BSONObj query = fromjson("{$and:[{'a.c':1},{'a.b':{$eq:2}}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(fromjson("{a:{c:1,b:2}}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, AndQueryIdRepl) {
- BSONObj query = fromjson("{$and:[{_id:1},{a:{$eq:2}}]}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(fromjson("{_id:1}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, AllArrayOp) {
- BSONObj query = fromjson("{a:{$all:[1]}}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(fromjson("{a:1}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, AllArrayIdRepl) {
- BSONObj query = fromjson("{_id:{$all:[1]}, b:2}");
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(fromjson("{_id:1}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, ConflictFieldsFailOp) {
- BSONObj query = fromjson("{a:1,'a.b':1}");
- ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- }
-
- TEST_F(CreateFromQuery, ConflictFieldsFailSameValueOp) {
- BSONObj query = fromjson("{a:{b:1},'a.b':1}");
- ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- }
-
- TEST_F(CreateFromQuery, ConflictWithIdRepl) {
- BSONObj query = fromjson("{_id:1,'_id.a':1}");
- ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
- }
-
- TEST_F(CreateFromQuery, ConflictAndQueryOp) {
- BSONObj query = fromjson("{$and:[{a:{b:1}},{'a.b':{$eq:1}}]}");
- ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- }
-
- TEST_F(CreateFromQuery, ConflictAllMultipleValsOp) {
- BSONObj query = fromjson("{a:{$all:[1, 2]}}");
- ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- }
-
- TEST_F(CreateFromQuery, NoConflictOrQueryOp) {
- BSONObj query = fromjson("{$or:[{a:{b:1}},{'a.b':{$eq:1}}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(BSONObj(), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, ImmutableFieldsOp) {
- BSONObj query = fromjson("{$or:[{a:{b:1}},{'a.b':{$eq:1}}]}");
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
- assertSameFields(BSONObj(), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, ShardKeyRepl) {
- BSONObj query = fromjson("{a:{$eq:1}}, b:2}");
- OwnedPointerVector<FieldRef> immutablePaths;
- immutablePaths.push_back(new FieldRef("a"));
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query,
- &immutablePaths.vector(),
- doc()));
- assertSameFields(fromjson("{a:1}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, NestedShardKeyRepl) {
- BSONObj query = fromjson("{a:{$eq:1},'b.c':2},d:2}");
- OwnedPointerVector<FieldRef> immutablePaths;
- immutablePaths.push_back(new FieldRef("a"));
- immutablePaths.push_back(new FieldRef("b.c"));
- ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query,
- &immutablePaths.vector(),
- doc()));
- assertSameFields(fromjson("{a:1,b:{c:2}}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, NestedShardKeyOp) {
- BSONObj query = fromjson("{a:{$eq:1},'b.c':2,d:{$all:[3]}},e:2}");
- OwnedPointerVector<FieldRef> immutablePaths;
- immutablePaths.push_back(new FieldRef("a"));
- immutablePaths.push_back(new FieldRef("b.c"));
- ASSERT_OK(driverOps().populateDocumentWithQueryFields(query,
- &immutablePaths.vector(),
- doc()));
- assertSameFields(fromjson("{a:1,b:{c:2},d:3}"), doc().getObject());
- }
-
- TEST_F(CreateFromQuery, NotFullShardKeyRepl) {
- BSONObj query = fromjson("{a:{$eq:1}, 'b.c':2}, d:2}");
- OwnedPointerVector<FieldRef> immutablePaths;
- immutablePaths.push_back(new FieldRef("a"));
- immutablePaths.push_back(new FieldRef("b"));
- ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(query,
- &immutablePaths.vector(),
- doc()));
- }
-
-} // unnamed namespace
+/**
+ * Recursively asserts that two BSONObjects contain the same elements,
+ * ignoring element order.
+ */
+static void assertSameFields(const BSONObj& docA, const BSONObj& docB) {
+ if (docA.nFields() != docB.nFields())
+ FAIL(stream() << "document " << docA << " has different fields than " << docB);
+
+ std::map<StringData, BSONElement> docAMap;
+ BSONObjIterator itA(docA);
+ while (itA.more()) {
+ BSONElement elA = itA.next();
+ docAMap.insert(std::make_pair(elA.fieldNameStringData(), elA));
+ }
+
+ BSONObjIterator itB(docB);
+ while (itB.more()) {
+ BSONElement elB = itB.next();
+
+ std::map<StringData, BSONElement>::iterator seenIt =
+ docAMap.find(elB.fieldNameStringData());
+ if (seenIt == docAMap.end())
+ FAIL(stream() << "element " << elB << " not found in " << docA);
+
+ BSONElement elA = seenIt->second;
+ assertSameElements(elA, elB);
+ }
+}
+
+TEST_F(CreateFromQuery, BasicOp) {
+ BSONObj query = fromjson("{a:1,b:2}");
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(query, doc().getObject());
+}
+
+TEST_F(CreateFromQuery, BasicOpEq) {
+ BSONObj query = fromjson("{a:{$eq:1}}");
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(fromjson("{a:1}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, BasicOpWithId) {
+ BSONObj query = fromjson("{_id:1,a:1,b:2}");
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(query, doc().getObject());
+}
+
+TEST_F(CreateFromQuery, BasicRepl) {
+ BSONObj query = fromjson("{a:1,b:2}");
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(fromjson("{}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, BasicReplWithId) {
+ BSONObj query = fromjson("{_id:1,a:1,b:2}");
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(fromjson("{_id:1}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, BasicReplWithIdEq) {
+ BSONObj query = fromjson("{_id:{$eq:1},a:1,b:2}");
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(fromjson("{_id:1}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, NoRootIdOp) {
+ BSONObj query = fromjson("{'_id.a':1,'_id.b':2}");
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(fromjson("{_id:{a:1,b:2}}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, NoRootIdRepl) {
+ BSONObj query = fromjson("{'_id.a':1,'_id.b':2}");
+ ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
+}
+
+TEST_F(CreateFromQuery, NestedSharedRootOp) {
+ BSONObj query = fromjson("{'a.c':1,'a.b':{$eq:2}}");
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(fromjson("{a:{c:1,b:2}}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, OrQueryOp) {
+ BSONObj query = fromjson("{$or:[{a:1}]}");
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(fromjson("{a:1}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, OrQueryIdRepl) {
+ BSONObj query = fromjson("{$or:[{_id:1}]}");
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(fromjson("{_id:1}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, OrQueryNoExtractOps) {
+ BSONObj query = fromjson("{$or:[{a:1}, {b:2}]}");
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(BSONObj(), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, OrQueryNoExtractIdRepl) {
+ BSONObj query = fromjson("{$or:[{_id:1}, {_id:2}]}");
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(BSONObj(), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, AndQueryOp) {
+ BSONObj query = fromjson("{$and:[{'a.c':1},{'a.b':{$eq:2}}]}");
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(fromjson("{a:{c:1,b:2}}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, AndQueryIdRepl) {
+ BSONObj query = fromjson("{$and:[{_id:1},{a:{$eq:2}}]}");
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(fromjson("{_id:1}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, AllArrayOp) {
+ BSONObj query = fromjson("{a:{$all:[1]}}");
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(fromjson("{a:1}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, AllArrayIdRepl) {
+ BSONObj query = fromjson("{_id:{$all:[1]}, b:2}");
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(fromjson("{_id:1}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, ConflictFieldsFailOp) {
+ BSONObj query = fromjson("{a:1,'a.b':1}");
+ ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+}
+
+TEST_F(CreateFromQuery, ConflictFieldsFailSameValueOp) {
+ BSONObj query = fromjson("{a:{b:1},'a.b':1}");
+ ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+}
+
+TEST_F(CreateFromQuery, ConflictWithIdRepl) {
+ BSONObj query = fromjson("{_id:1,'_id.a':1}");
+ ASSERT_NOT_OK(driverRepl().populateDocumentWithQueryFields(query, NULL, doc()));
+}
+
+TEST_F(CreateFromQuery, ConflictAndQueryOp) {
+ BSONObj query = fromjson("{$and:[{a:{b:1}},{'a.b':{$eq:1}}]}");
+ ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+}
+
+TEST_F(CreateFromQuery, ConflictAllMultipleValsOp) {
+ BSONObj query = fromjson("{a:{$all:[1, 2]}}");
+ ASSERT_NOT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+}
+
+TEST_F(CreateFromQuery, NoConflictOrQueryOp) {
+ BSONObj query = fromjson("{$or:[{a:{b:1}},{'a.b':{$eq:1}}]}");
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(BSONObj(), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, ImmutableFieldsOp) {
+ BSONObj query = fromjson("{$or:[{a:{b:1}},{'a.b':{$eq:1}}]}");
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, NULL, doc()));
+ assertSameFields(BSONObj(), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, ShardKeyRepl) {
+ BSONObj query = fromjson("{a:{$eq:1}}, b:2}");
+ OwnedPointerVector<FieldRef> immutablePaths;
+ immutablePaths.push_back(new FieldRef("a"));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, &immutablePaths.vector(), doc()));
+ assertSameFields(fromjson("{a:1}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, NestedShardKeyRepl) {
+ BSONObj query = fromjson("{a:{$eq:1},'b.c':2},d:2}");
+ OwnedPointerVector<FieldRef> immutablePaths;
+ immutablePaths.push_back(new FieldRef("a"));
+ immutablePaths.push_back(new FieldRef("b.c"));
+ ASSERT_OK(driverRepl().populateDocumentWithQueryFields(query, &immutablePaths.vector(), doc()));
+ assertSameFields(fromjson("{a:1,b:{c:2}}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, NestedShardKeyOp) {
+ BSONObj query = fromjson("{a:{$eq:1},'b.c':2,d:{$all:[3]}},e:2}");
+ OwnedPointerVector<FieldRef> immutablePaths;
+ immutablePaths.push_back(new FieldRef("a"));
+ immutablePaths.push_back(new FieldRef("b.c"));
+ ASSERT_OK(driverOps().populateDocumentWithQueryFields(query, &immutablePaths.vector(), doc()));
+ assertSameFields(fromjson("{a:1,b:{c:2},d:3}"), doc().getObject());
+}
+
+TEST_F(CreateFromQuery, NotFullShardKeyRepl) {
+ BSONObj query = fromjson("{a:{$eq:1}, 'b.c':2}, d:2}");
+ OwnedPointerVector<FieldRef> immutablePaths;
+ immutablePaths.push_back(new FieldRef("a"));
+ immutablePaths.push_back(new FieldRef("b"));
+ ASSERT_NOT_OK(
+ driverRepl().populateDocumentWithQueryFields(query, &immutablePaths.vector(), doc()));
+}
+
+} // unnamed namespace
diff --git a/src/mongo/db/ops/update_lifecycle.h b/src/mongo/db/ops/update_lifecycle.h
index 93811f28cff..9454aef1c1e 100644
--- a/src/mongo/db/ops/update_lifecycle.h
+++ b/src/mongo/db/ops/update_lifecycle.h
@@ -32,42 +32,41 @@
namespace mongo {
- class Collection;
- class FieldRef;
- class OperationContext;
- class UpdateIndexData;
+class Collection;
+class FieldRef;
+class OperationContext;
+class UpdateIndexData;
- class UpdateLifecycle {
- public:
+class UpdateLifecycle {
+public:
+ virtual ~UpdateLifecycle() {}
- virtual ~UpdateLifecycle() {}
+ /**
+ * Update the cached collection pointer that this lifecycle object uses.
+ */
+ virtual void setCollection(Collection* collection) = 0;
- /**
- * Update the cached collection pointer that this lifecycle object uses.
- */
- virtual void setCollection(Collection* collection) = 0;
+ /**
+ * Can the update continue?
+ *
+ * The (only) implementation will check the following:
+ * 1.) Collection still exists
+ * 2.) Shard version has not changed (indicating that the query/update is not valid
+ */
+ virtual bool canContinue() const = 0;
- /**
- * Can the update continue?
- *
- * The (only) implementation will check the following:
- * 1.) Collection still exists
- * 2.) Shard version has not changed (indicating that the query/update is not valid
- */
- virtual bool canContinue() const = 0;
+ /**
+ * Return a pointer to any indexes if there is a collection.
+ */
+ virtual const UpdateIndexData* getIndexKeys(OperationContext* opCtx) const = 0;
- /**
- * Return a pointer to any indexes if there is a collection.
- */
- virtual const UpdateIndexData* getIndexKeys( OperationContext* opCtx ) const = 0;
+ /**
+ * Returns the shard keys as immutable fields
+ * Immutable fields in this case mean that they are required to exist, cannot change values
+ * and must not be multi-valued (in an array, or an array)
+ */
+ virtual const std::vector<FieldRef*>* getImmutableFields() const = 0;
+};
- /**
- * Returns the shard keys as immutable fields
- * Immutable fields in this case mean that they are required to exist, cannot change values
- * and must not be multi-valued (in an array, or an array)
- */
- virtual const std::vector<FieldRef*>* getImmutableFields() const = 0;
- };
-
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/update_lifecycle_impl.cpp b/src/mongo/db/ops/update_lifecycle_impl.cpp
index fed352c4fa4..95c15f2d04b 100644
--- a/src/mongo/db/ops/update_lifecycle_impl.cpp
+++ b/src/mongo/db/ops/update_lifecycle_impl.cpp
@@ -36,46 +36,45 @@
#include "mongo/s/d_state.h"
namespace mongo {
- namespace {
- CollectionMetadataPtr getMetadata(const NamespaceString& nsString) {
- if (shardingState.enabled()) {
- return shardingState.getCollectionMetadata(nsString.ns());
- }
-
- return CollectionMetadataPtr();
- }
+namespace {
+CollectionMetadataPtr getMetadata(const NamespaceString& nsString) {
+ if (shardingState.enabled()) {
+ return shardingState.getCollectionMetadata(nsString.ns());
}
- UpdateLifecycleImpl::UpdateLifecycleImpl(bool ignoreVersion, const NamespaceString& nsStr)
- : _nsString(nsStr)
- , _shardVersion((!ignoreVersion && getMetadata(_nsString)) ?
- getMetadata(_nsString)->getShardVersion() :
- ChunkVersion::IGNORED()) {
- }
+ return CollectionMetadataPtr();
+}
+}
- void UpdateLifecycleImpl::setCollection(Collection* collection) {
- _collection = collection;
- }
+UpdateLifecycleImpl::UpdateLifecycleImpl(bool ignoreVersion, const NamespaceString& nsStr)
+ : _nsString(nsStr),
+ _shardVersion((!ignoreVersion && getMetadata(_nsString))
+ ? getMetadata(_nsString)->getShardVersion()
+ : ChunkVersion::IGNORED()) {}
- bool UpdateLifecycleImpl::canContinue() const {
- // Collection needs to exist to continue
- return _collection;
- }
+void UpdateLifecycleImpl::setCollection(Collection* collection) {
+ _collection = collection;
+}
- const UpdateIndexData* UpdateLifecycleImpl::getIndexKeys(OperationContext* opCtx) const {
- if (_collection)
- return &_collection->infoCache()->indexKeys(opCtx);
- return NULL;
- }
+bool UpdateLifecycleImpl::canContinue() const {
+ // Collection needs to exist to continue
+ return _collection;
+}
+
+const UpdateIndexData* UpdateLifecycleImpl::getIndexKeys(OperationContext* opCtx) const {
+ if (_collection)
+ return &_collection->infoCache()->indexKeys(opCtx);
+ return NULL;
+}
- const std::vector<FieldRef*>* UpdateLifecycleImpl::getImmutableFields() const {
- CollectionMetadataPtr metadata = getMetadata(_nsString);
- if (metadata) {
- const std::vector<FieldRef*>& fields = metadata->getKeyPatternFields();
- // Return shard-keys as immutable for the update system.
- return &fields;
- }
- return NULL;
+const std::vector<FieldRef*>* UpdateLifecycleImpl::getImmutableFields() const {
+ CollectionMetadataPtr metadata = getMetadata(_nsString);
+ if (metadata) {
+ const std::vector<FieldRef*>& fields = metadata->getKeyPatternFields();
+ // Return shard-keys as immutable for the update system.
+ return &fields;
}
+ return NULL;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/update_lifecycle_impl.h b/src/mongo/db/ops/update_lifecycle_impl.h
index c321b43912d..114ebd72ba2 100644
--- a/src/mongo/db/ops/update_lifecycle_impl.h
+++ b/src/mongo/db/ops/update_lifecycle_impl.h
@@ -35,31 +35,30 @@
namespace mongo {
- class UpdateLifecycleImpl : public UpdateLifecycle {
- MONGO_DISALLOW_COPYING(UpdateLifecycleImpl);
+class UpdateLifecycleImpl : public UpdateLifecycle {
+ MONGO_DISALLOW_COPYING(UpdateLifecycleImpl);
- public:
+public:
+ /**
+ * ignoreVersion is for shard version checking and
+ * means that version checks will not be done
+ *
+ * nsString represents the namespace for the
+ */
+ UpdateLifecycleImpl(bool ignoreVersion, const NamespaceString& nsString);
- /**
- * ignoreVersion is for shard version checking and
- * means that version checks will not be done
- *
- * nsString represents the namespace for the
- */
- UpdateLifecycleImpl(bool ignoreVersion, const NamespaceString& nsString);
+ virtual void setCollection(Collection* collection);
- virtual void setCollection(Collection* collection);
+ virtual bool canContinue() const;
- virtual bool canContinue() const;
+ virtual const UpdateIndexData* getIndexKeys(OperationContext* opCtx) const;
- virtual const UpdateIndexData* getIndexKeys(OperationContext* opCtx) const;
+ virtual const std::vector<FieldRef*>* getImmutableFields() const;
- virtual const std::vector<FieldRef*>* getImmutableFields() const;
-
- private:
- Collection* _collection;
- const NamespaceString& _nsString;
- ChunkVersion _shardVersion;
- };
+private:
+ Collection* _collection;
+ const NamespaceString& _nsString;
+ ChunkVersion _shardVersion;
+};
} /* namespace mongo */
diff --git a/src/mongo/db/ops/update_request.h b/src/mongo/db/ops/update_request.h
index d300a8fc4f2..589be047046 100644
--- a/src/mongo/db/ops/update_request.h
+++ b/src/mongo/db/ops/update_request.h
@@ -36,212 +36,206 @@
namespace mongo {
- namespace str = mongoutils::str;
-
- class FieldRef;
- class UpdateLifecycle;
-
- class UpdateRequest {
- public:
- enum ReturnDocOption {
- // Return no document.
- RETURN_NONE,
-
- // Return the document as it was before the update. If the update results in an insert,
- // no document will be returned.
- RETURN_OLD,
-
- // Return the document as it is after the update.
- RETURN_NEW
- };
- inline UpdateRequest(const NamespaceString& nsString)
- : _nsString(nsString)
- , _god(false)
- , _upsert(false)
- , _multi(false)
- , _fromMigration(false)
- , _lifecycle(NULL)
- , _isExplain(false)
- , _returnDocs(ReturnDocOption::RETURN_NONE)
- , _yieldPolicy(PlanExecutor::YIELD_MANUAL) {}
-
- const NamespaceString& getNamespaceString() const {
- return _nsString;
- }
-
- inline void setQuery(const BSONObj& query) {
- _query = query;
- }
-
- inline const BSONObj& getQuery() const {
- return _query;
- }
-
- inline void setProj(const BSONObj& proj) {
- _proj = proj;
- }
-
- inline const BSONObj& getProj() const {
- return _proj;
- }
-
- inline void setSort(const BSONObj& sort) {
- _sort = sort;
- }
-
- inline const BSONObj& getSort() const {
- return _sort;
- }
-
- inline void setUpdates(const BSONObj& updates) {
- _updates = updates;
- }
-
- inline const BSONObj& getUpdates() const {
- return _updates;
- }
-
- // Please see documentation on the private members matching these names for
- // explanations of the following fields.
-
- inline void setGod(bool value = true) {
- _god = value;
- }
-
- bool isGod() const {
- return _god;
- }
-
- inline void setUpsert(bool value = true) {
- _upsert = value;
- }
-
- bool isUpsert() const {
- return _upsert;
- }
-
- inline void setMulti(bool value = true) {
- _multi = value;
- }
-
- bool isMulti() const {
- return _multi;
- }
-
- inline void setFromMigration(bool value = true) {
- _fromMigration = value;
- }
-
- bool isFromMigration() const {
- return _fromMigration;
- }
-
- inline void setLifecycle(UpdateLifecycle* value) {
- _lifecycle = value;
- }
-
- inline UpdateLifecycle* getLifecycle() const {
- return _lifecycle;
- }
-
- inline void setExplain(bool value = true) {
- _isExplain = value;
- }
-
- inline bool isExplain() const {
- return _isExplain;
- }
-
- inline void setReturnDocs(ReturnDocOption value) {
- _returnDocs = value;
- }
-
- inline bool shouldReturnOldDocs() const {
- return _returnDocs == ReturnDocOption::RETURN_OLD;
- }
-
- inline bool shouldReturnNewDocs() const {
- return _returnDocs == ReturnDocOption::RETURN_NEW;
- }
+namespace str = mongoutils::str;
- inline bool shouldReturnAnyDocs() const {
- return shouldReturnOldDocs() || shouldReturnNewDocs();
- }
+class FieldRef;
+class UpdateLifecycle;
- inline void setYieldPolicy(PlanExecutor::YieldPolicy yieldPolicy) {
- _yieldPolicy = yieldPolicy;
- }
+class UpdateRequest {
+public:
+ enum ReturnDocOption {
+ // Return no document.
+ RETURN_NONE,
- inline PlanExecutor::YieldPolicy getYieldPolicy() const {
- return _yieldPolicy;
- }
+ // Return the document as it was before the update. If the update results in an insert,
+ // no document will be returned.
+ RETURN_OLD,
- const std::string toString() const {
- return str::stream()
- << " query: " << _query
- << " projection: " << _proj
- << " sort: " << _sort
- << " updated: " << _updates
- << " god: " << _god
- << " upsert: " << _upsert
- << " multi: " << _multi
- << " fromMigration: " << _fromMigration
- << " isExplain: " << _isExplain;
- }
- private:
-
- const NamespaceString& _nsString;
+ // Return the document as it is after the update.
+ RETURN_NEW
+ };
+ inline UpdateRequest(const NamespaceString& nsString)
+ : _nsString(nsString),
+ _god(false),
+ _upsert(false),
+ _multi(false),
+ _fromMigration(false),
+ _lifecycle(NULL),
+ _isExplain(false),
+ _returnDocs(ReturnDocOption::RETURN_NONE),
+ _yieldPolicy(PlanExecutor::YIELD_MANUAL) {}
+
+ const NamespaceString& getNamespaceString() const {
+ return _nsString;
+ }
+
+ inline void setQuery(const BSONObj& query) {
+ _query = query;
+ }
+
+ inline const BSONObj& getQuery() const {
+ return _query;
+ }
+
+ inline void setProj(const BSONObj& proj) {
+ _proj = proj;
+ }
+
+ inline const BSONObj& getProj() const {
+ return _proj;
+ }
+
+ inline void setSort(const BSONObj& sort) {
+ _sort = sort;
+ }
+
+ inline const BSONObj& getSort() const {
+ return _sort;
+ }
+
+ inline void setUpdates(const BSONObj& updates) {
+ _updates = updates;
+ }
+
+ inline const BSONObj& getUpdates() const {
+ return _updates;
+ }
- // Contains the query that selects documents to update.
- BSONObj _query;
-
- // Contains the projection information.
- BSONObj _proj;
-
- // Contains the sort order information.
- BSONObj _sort;
+ // Please see documentation on the private members matching these names for
+ // explanations of the following fields.
+
+ inline void setGod(bool value = true) {
+ _god = value;
+ }
- // Contains the modifiers to apply to matched objects, or a replacement document.
- BSONObj _updates;
+ bool isGod() const {
+ return _god;
+ }
- // Flags controlling the update.
-
- // God bypasses _id checking and index generation. It is only used on behalf of system
- // updates, never user updates.
- bool _god;
-
- // True if this should insert if no matching document is found.
- bool _upsert;
-
- // True if this update is allowed to affect more than one document.
- bool _multi;
+ inline void setUpsert(bool value = true) {
+ _upsert = value;
+ }
- // True if this update is on behalf of a chunk migration.
- bool _fromMigration;
-
- // The lifecycle data, and events used during the update request.
- UpdateLifecycle* _lifecycle;
-
- // Whether or not we are requesting an explained update. Explained updates are read-only.
- bool _isExplain;
-
- // Specifies which version of the documents to return, if any.
- //
- // RETURN_NONE (default): Never return any documents, old or new.
- // RETURN_OLD: Return ADVANCED when a matching document is encountered, and the value of
- // the document before it was updated. If there were no matches, return
- // IS_EOF instead (even in case of an upsert).
- // RETURN_NEW: Return ADVANCED when a matching document is encountered, and the value of
- // the document after being updated. If an upsert was specified and it
- // resulted in an insert, return the inserted document.
- //
- // This allows findAndModify to execute an update and retrieve the resulting document
- // without another query before or after the update.
- ReturnDocOption _returnDocs;
-
- // Whether or not the update should yield. Defaults to YIELD_MANUAL.
- PlanExecutor::YieldPolicy _yieldPolicy;
+ bool isUpsert() const {
+ return _upsert;
+ }
- };
+ inline void setMulti(bool value = true) {
+ _multi = value;
+ }
+
+ bool isMulti() const {
+ return _multi;
+ }
+
+ inline void setFromMigration(bool value = true) {
+ _fromMigration = value;
+ }
+
+ bool isFromMigration() const {
+ return _fromMigration;
+ }
+
+ inline void setLifecycle(UpdateLifecycle* value) {
+ _lifecycle = value;
+ }
+
+ inline UpdateLifecycle* getLifecycle() const {
+ return _lifecycle;
+ }
+
+ inline void setExplain(bool value = true) {
+ _isExplain = value;
+ }
+
+ inline bool isExplain() const {
+ return _isExplain;
+ }
+
+ inline void setReturnDocs(ReturnDocOption value) {
+ _returnDocs = value;
+ }
+
+ inline bool shouldReturnOldDocs() const {
+ return _returnDocs == ReturnDocOption::RETURN_OLD;
+ }
+
+ inline bool shouldReturnNewDocs() const {
+ return _returnDocs == ReturnDocOption::RETURN_NEW;
+ }
+
+ inline bool shouldReturnAnyDocs() const {
+ return shouldReturnOldDocs() || shouldReturnNewDocs();
+ }
+
+ inline void setYieldPolicy(PlanExecutor::YieldPolicy yieldPolicy) {
+ _yieldPolicy = yieldPolicy;
+ }
+
+ inline PlanExecutor::YieldPolicy getYieldPolicy() const {
+ return _yieldPolicy;
+ }
+
+ const std::string toString() const {
+ return str::stream() << " query: " << _query << " projection: " << _proj
+ << " sort: " << _sort << " updated: " << _updates << " god: " << _god
+ << " upsert: " << _upsert << " multi: " << _multi
+ << " fromMigration: " << _fromMigration
+ << " isExplain: " << _isExplain;
+ }
+
+private:
+ const NamespaceString& _nsString;
+
+ // Contains the query that selects documents to update.
+ BSONObj _query;
+
+ // Contains the projection information.
+ BSONObj _proj;
+
+ // Contains the sort order information.
+ BSONObj _sort;
+
+ // Contains the modifiers to apply to matched objects, or a replacement document.
+ BSONObj _updates;
+
+ // Flags controlling the update.
+
+ // God bypasses _id checking and index generation. It is only used on behalf of system
+ // updates, never user updates.
+ bool _god;
+
+ // True if this should insert if no matching document is found.
+ bool _upsert;
+
+ // True if this update is allowed to affect more than one document.
+ bool _multi;
+
+ // True if this update is on behalf of a chunk migration.
+ bool _fromMigration;
+
+ // The lifecycle data, and events used during the update request.
+ UpdateLifecycle* _lifecycle;
+
+ // Whether or not we are requesting an explained update. Explained updates are read-only.
+ bool _isExplain;
+
+ // Specifies which version of the documents to return, if any.
+ //
+ // RETURN_NONE (default): Never return any documents, old or new.
+ // RETURN_OLD: Return ADVANCED when a matching document is encountered, and the value of
+ // the document before it was updated. If there were no matches, return
+ // IS_EOF instead (even in case of an upsert).
+ // RETURN_NEW: Return ADVANCED when a matching document is encountered, and the value of
+ // the document after being updated. If an upsert was specified and it
+ // resulted in an insert, return the inserted document.
+ //
+ // This allows findAndModify to execute an update and retrieve the resulting document
+ // without another query before or after the update.
+ ReturnDocOption _returnDocs;
+
+ // Whether or not the update should yield. Defaults to YIELD_MANUAL.
+ PlanExecutor::YieldPolicy _yieldPolicy;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/ops/update_result.cpp b/src/mongo/db/ops/update_result.cpp
index 64b33243131..93f48fae668 100644
--- a/src/mongo/db/ops/update_result.cpp
+++ b/src/mongo/db/ops/update_result.cpp
@@ -39,21 +39,20 @@
namespace mongo {
- UpdateResult::UpdateResult(bool existing_,
- bool modifiers_,
- unsigned long long numDocsModified_,
- unsigned long long numMatched_,
- const BSONObj& upsertedObject_)
- : existing(existing_),
- modifiers(modifiers_),
- numDocsModified(numDocsModified_),
- numMatched(numMatched_) {
-
- BSONElement id = upsertedObject_["_id"];
- if ( ! existing && numMatched == 1 && !id.eoo() ) {
- upserted = id.wrap(kUpsertedFieldName);
- }
- LOG(4) << "UpdateResult -- " << toString();
+UpdateResult::UpdateResult(bool existing_,
+ bool modifiers_,
+ unsigned long long numDocsModified_,
+ unsigned long long numMatched_,
+ const BSONObj& upsertedObject_)
+ : existing(existing_),
+ modifiers(modifiers_),
+ numDocsModified(numDocsModified_),
+ numMatched(numMatched_) {
+ BSONElement id = upsertedObject_["_id"];
+ if (!existing && numMatched == 1 && !id.eoo()) {
+ upserted = id.wrap(kUpsertedFieldName);
}
+ LOG(4) << "UpdateResult -- " << toString();
+}
} // namespace mongo
diff --git a/src/mongo/db/ops/update_result.h b/src/mongo/db/ops/update_result.h
index 568da353554..9c1c27c5a93 100644
--- a/src/mongo/db/ops/update_result.h
+++ b/src/mongo/db/ops/update_result.h
@@ -35,40 +35,36 @@
namespace mongo {
- namespace str = mongoutils::str;
+namespace str = mongoutils::str;
- struct UpdateResult {
+struct UpdateResult {
+ UpdateResult(bool existing_,
+ bool modifiers_,
+ unsigned long long numDocsModified_,
+ unsigned long long numMatched_,
+ const BSONObj& upsertedObject_);
- UpdateResult( bool existing_,
- bool modifiers_,
- unsigned long long numDocsModified_,
- unsigned long long numMatched_,
- const BSONObj& upsertedObject_);
+ // if existing objects were modified
+ const bool existing;
- // if existing objects were modified
- const bool existing;
+ // was this a $ mod
+ const bool modifiers;
- // was this a $ mod
- const bool modifiers;
+ // how many docs updated
+ const long long numDocsModified;
- // how many docs updated
- const long long numDocsModified;
+ // how many docs seen by update
+ const long long numMatched;
- // how many docs seen by update
- const long long numMatched;
+ // if something was upserted, the new _id of the object
+ BSONObj upserted;
- // if something was upserted, the new _id of the object
- BSONObj upserted;
+ const std::string toString() const {
+ return str::stream() << " upserted: " << upserted << " modifiers: " << modifiers
+ << " existing: " << existing << " numDocsModified: " << numDocsModified
+ << " numMatched: " << numMatched;
+ }
+};
- const std::string toString() const {
- return str::stream()
- << " upserted: " << upserted
- << " modifiers: " << modifiers
- << " existing: " << existing
- << " numDocsModified: " << numDocsModified
- << " numMatched: " << numMatched;
- }
- };
-
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator.h b/src/mongo/db/pipeline/accumulator.h
index 7fa6cd1a8a9..fcb4fe922e1 100644
--- a/src/mongo/db/pipeline/accumulator.h
+++ b/src/mongo/db/pipeline/accumulator.h
@@ -38,182 +38,182 @@
#include "mongo/db/pipeline/value.h"
namespace mongo {
- class Accumulator : public RefCountable {
- public:
- Accumulator() = default;
-
- /** Process input and update internal state.
- * merging should be true when processing outputs from getValue(true).
- */
- void process(const Value& input, bool merging) {
- processInternal(input, merging);
- }
-
- /** Marks the end of the evaluate() phase and return accumulated result.
- * toBeMerged should be true when the outputs will be merged by process().
- */
- virtual Value getValue(bool toBeMerged) const = 0;
-
- /// The name of the op as used in a serialization of the pipeline.
- virtual const char* getOpName() const = 0;
-
- int memUsageForSorter() const {
- dassert(_memUsageBytes != 0); // This would mean subclass didn't set it
- return _memUsageBytes;
- }
-
- /// Reset this accumulator to a fresh state ready to receive input.
- virtual void reset() = 0;
-
- protected:
- /// Update subclass's internal state based on input
- virtual void processInternal(const Value& input, bool merging) = 0;
-
- /// subclasses are expected to update this as necessary
- int _memUsageBytes = 0;
- };
+class Accumulator : public RefCountable {
+public:
+ Accumulator() = default;
+ /** Process input and update internal state.
+ * merging should be true when processing outputs from getValue(true).
+ */
+ void process(const Value& input, bool merging) {
+ processInternal(input, merging);
+ }
- class AccumulatorAddToSet final : public Accumulator {
- public:
- AccumulatorAddToSet();
+ /** Marks the end of the evaluate() phase and return accumulated result.
+ * toBeMerged should be true when the outputs will be merged by process().
+ */
+ virtual Value getValue(bool toBeMerged) const = 0;
- void processInternal(const Value& input, bool merging) final;
- Value getValue(bool toBeMerged) const final;
- const char* getOpName() const final;
- void reset() final;
+ /// The name of the op as used in a serialization of the pipeline.
+ virtual const char* getOpName() const = 0;
- static boost::intrusive_ptr<Accumulator> create();
+ int memUsageForSorter() const {
+ dassert(_memUsageBytes != 0); // This would mean subclass didn't set it
+ return _memUsageBytes;
+ }
- private:
- typedef boost::unordered_set<Value, Value::Hash> SetType;
- SetType set;
- };
+ /// Reset this accumulator to a fresh state ready to receive input.
+ virtual void reset() = 0;
+protected:
+ /// Update subclass's internal state based on input
+ virtual void processInternal(const Value& input, bool merging) = 0;
- class AccumulatorFirst final : public Accumulator {
- public:
- AccumulatorFirst();
+ /// subclasses are expected to update this as necessary
+ int _memUsageBytes = 0;
+};
- void processInternal(const Value& input, bool merging) final;
- Value getValue(bool toBeMerged) const final;
- const char* getOpName() const final;
- void reset() final;
- static boost::intrusive_ptr<Accumulator> create();
+class AccumulatorAddToSet final : public Accumulator {
+public:
+ AccumulatorAddToSet();
- private:
- bool _haveFirst;
- Value _first;
- };
+ void processInternal(const Value& input, bool merging) final;
+ Value getValue(bool toBeMerged) const final;
+ const char* getOpName() const final;
+ void reset() final;
+ static boost::intrusive_ptr<Accumulator> create();
- class AccumulatorLast final : public Accumulator {
- public:
- AccumulatorLast();
+private:
+ typedef boost::unordered_set<Value, Value::Hash> SetType;
+ SetType set;
+};
- void processInternal(const Value& input, bool merging) final;
- Value getValue(bool toBeMerged) const final;
- const char* getOpName() const final;
- void reset() final;
- static boost::intrusive_ptr<Accumulator> create();
+class AccumulatorFirst final : public Accumulator {
+public:
+ AccumulatorFirst();
- private:
- Value _last;
- };
+ void processInternal(const Value& input, bool merging) final;
+ Value getValue(bool toBeMerged) const final;
+ const char* getOpName() const final;
+ void reset() final;
+ static boost::intrusive_ptr<Accumulator> create();
- class AccumulatorSum final : public Accumulator {
- public:
- AccumulatorSum();
+private:
+ bool _haveFirst;
+ Value _first;
+};
- void processInternal(const Value& input, bool merging) final;
- Value getValue(bool toBeMerged) const final;
- const char* getOpName() const final;
- void reset() final;
- static boost::intrusive_ptr<Accumulator> create();
+class AccumulatorLast final : public Accumulator {
+public:
+ AccumulatorLast();
+
+ void processInternal(const Value& input, bool merging) final;
+ Value getValue(bool toBeMerged) const final;
+ const char* getOpName() const final;
+ void reset() final;
+
+ static boost::intrusive_ptr<Accumulator> create();
+
+private:
+ Value _last;
+};
- private:
- BSONType totalType;
- long long longTotal;
- double doubleTotal;
- };
+class AccumulatorSum final : public Accumulator {
+public:
+ AccumulatorSum();
- class AccumulatorMinMax final : public Accumulator {
- public:
- enum Sense : int {
- MIN = 1,
- MAX = -1, // Used to "scale" comparison.
- };
+ void processInternal(const Value& input, bool merging) final;
+ Value getValue(bool toBeMerged) const final;
+ const char* getOpName() const final;
+ void reset() final;
- explicit AccumulatorMinMax(Sense sense);
+ static boost::intrusive_ptr<Accumulator> create();
- void processInternal(const Value& input, bool merging) final;
- Value getValue(bool toBeMerged) const final;
- const char* getOpName() const final;
- void reset() final;
+private:
+ BSONType totalType;
+ long long longTotal;
+ double doubleTotal;
+};
- static boost::intrusive_ptr<Accumulator> createMin();
- static boost::intrusive_ptr<Accumulator> createMax();
- private:
- Value _val;
- const Sense _sense;
+class AccumulatorMinMax final : public Accumulator {
+public:
+ enum Sense : int {
+ MIN = 1,
+ MAX = -1, // Used to "scale" comparison.
};
+ explicit AccumulatorMinMax(Sense sense);
- class AccumulatorPush final : public Accumulator {
- public:
- AccumulatorPush();
+ void processInternal(const Value& input, bool merging) final;
+ Value getValue(bool toBeMerged) const final;
+ const char* getOpName() const final;
+ void reset() final;
- void processInternal(const Value& input, bool merging) final;
- Value getValue(bool toBeMerged) const final;
- const char* getOpName() const final;
- void reset() final;
+ static boost::intrusive_ptr<Accumulator> createMin();
+ static boost::intrusive_ptr<Accumulator> createMax();
- static boost::intrusive_ptr<Accumulator> create();
+private:
+ Value _val;
+ const Sense _sense;
+};
- private:
- std::vector<Value> vpValue;
- };
+class AccumulatorPush final : public Accumulator {
+public:
+ AccumulatorPush();
- class AccumulatorAvg final : public Accumulator {
- public:
- AccumulatorAvg();
+ void processInternal(const Value& input, bool merging) final;
+ Value getValue(bool toBeMerged) const final;
+ const char* getOpName() const final;
+ void reset() final;
- void processInternal(const Value& input, bool merging) final;
- Value getValue(bool toBeMerged) const final;
- const char* getOpName() const final;
- void reset() final;
+ static boost::intrusive_ptr<Accumulator> create();
- static boost::intrusive_ptr<Accumulator> create();
+private:
+ std::vector<Value> vpValue;
+};
- private:
- double _total;
- long long _count;
- };
+class AccumulatorAvg final : public Accumulator {
+public:
+ AccumulatorAvg();
- class AccumulatorStdDev final : public Accumulator {
- public:
- explicit AccumulatorStdDev(bool isSamp);
+ void processInternal(const Value& input, bool merging) final;
+ Value getValue(bool toBeMerged) const final;
+ const char* getOpName() const final;
+ void reset() final;
- void processInternal(const Value& input, bool merging) final;
- Value getValue(bool toBeMerged) const final;
- const char* getOpName() const final;
- void reset() final;
+ static boost::intrusive_ptr<Accumulator> create();
- static boost::intrusive_ptr<Accumulator> createSamp();
- static boost::intrusive_ptr<Accumulator> createPop();
+private:
+ double _total;
+ long long _count;
+};
- private:
- const bool _isSamp;
- long long _count;
- double _mean;
- double _m2; // Running sum of squares of delta from mean. Named to match algorithm.
- };
+
+class AccumulatorStdDev final : public Accumulator {
+public:
+ explicit AccumulatorStdDev(bool isSamp);
+
+ void processInternal(const Value& input, bool merging) final;
+ Value getValue(bool toBeMerged) const final;
+ const char* getOpName() const final;
+ void reset() final;
+
+ static boost::intrusive_ptr<Accumulator> createSamp();
+ static boost::intrusive_ptr<Accumulator> createPop();
+
+private:
+ const bool _isSamp;
+ long long _count;
+ double _mean;
+ double _m2; // Running sum of squares of delta from mean. Named to match algorithm.
+};
}
diff --git a/src/mongo/db/pipeline/accumulator_add_to_set.cpp b/src/mongo/db/pipeline/accumulator_add_to_set.cpp
index bd151964ffa..bf2bc183afd 100644
--- a/src/mongo/db/pipeline/accumulator_add_to_set.cpp
+++ b/src/mongo/db/pipeline/accumulator_add_to_set.cpp
@@ -34,53 +34,52 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::vector;
+using boost::intrusive_ptr;
+using std::vector;
- void AccumulatorAddToSet::processInternal(const Value& input, bool merging) {
- if (!merging) {
- if (!input.missing()) {
- bool inserted = set.insert(input).second;
- if (inserted) {
- _memUsageBytes += input.getApproximateSize();
- }
+void AccumulatorAddToSet::processInternal(const Value& input, bool merging) {
+ if (!merging) {
+ if (!input.missing()) {
+ bool inserted = set.insert(input).second;
+ if (inserted) {
+ _memUsageBytes += input.getApproximateSize();
}
}
- else {
- // If we're merging, we need to take apart the arrays we
- // receive and put their elements into the array we are collecting.
- // If we didn't, then we'd get an array of arrays, with one array
- // from each merge source.
- verify(input.getType() == Array);
-
- const vector<Value>& array = input.getArray();
- for (size_t i=0; i < array.size(); i++) {
- bool inserted = set.insert(array[i]).second;
- if (inserted) {
- _memUsageBytes += array[i].getApproximateSize();
- }
+ } else {
+ // If we're merging, we need to take apart the arrays we
+ // receive and put their elements into the array we are collecting.
+ // If we didn't, then we'd get an array of arrays, with one array
+ // from each merge source.
+ verify(input.getType() == Array);
+
+ const vector<Value>& array = input.getArray();
+ for (size_t i = 0; i < array.size(); i++) {
+ bool inserted = set.insert(array[i]).second;
+ if (inserted) {
+ _memUsageBytes += array[i].getApproximateSize();
}
}
}
+}
- Value AccumulatorAddToSet::getValue(bool toBeMerged) const {
- return Value(vector<Value>(set.begin(), set.end()));
- }
+Value AccumulatorAddToSet::getValue(bool toBeMerged) const {
+ return Value(vector<Value>(set.begin(), set.end()));
+}
- AccumulatorAddToSet::AccumulatorAddToSet() {
- _memUsageBytes = sizeof(*this);
- }
+AccumulatorAddToSet::AccumulatorAddToSet() {
+ _memUsageBytes = sizeof(*this);
+}
- void AccumulatorAddToSet::reset() {
- SetType().swap(set);
- _memUsageBytes = sizeof(*this);
- }
+void AccumulatorAddToSet::reset() {
+ SetType().swap(set);
+ _memUsageBytes = sizeof(*this);
+}
- intrusive_ptr<Accumulator> AccumulatorAddToSet::create() {
- return new AccumulatorAddToSet();
- }
+intrusive_ptr<Accumulator> AccumulatorAddToSet::create() {
+ return new AccumulatorAddToSet();
+}
- const char *AccumulatorAddToSet::getOpName() const {
- return "$addToSet";
- }
+const char* AccumulatorAddToSet::getOpName() const {
+ return "$addToSet";
+}
}
diff --git a/src/mongo/db/pipeline/accumulator_avg.cpp b/src/mongo/db/pipeline/accumulator_avg.cpp
index 6378dac13e8..ad027e7709d 100644
--- a/src/mongo/db/pipeline/accumulator_avg.cpp
+++ b/src/mongo/db/pipeline/accumulator_avg.cpp
@@ -35,62 +35,56 @@
namespace mongo {
- using boost::intrusive_ptr;
+using boost::intrusive_ptr;
namespace {
- const char subTotalName[] = "subTotal";
- const char countName[] = "count";
+const char subTotalName[] = "subTotal";
+const char countName[] = "count";
}
- void AccumulatorAvg::processInternal(const Value& input, bool merging) {
- if (!merging) {
- // non numeric types have no impact on average
- if (!input.numeric())
- return;
+void AccumulatorAvg::processInternal(const Value& input, bool merging) {
+ if (!merging) {
+ // non numeric types have no impact on average
+ if (!input.numeric())
+ return;
- _total += input.getDouble();
- _count += 1;
- }
- else {
- // We expect an object that contains both a subtotal and a count.
- // This is what getValue(true) produced below.
- verify(input.getType() == Object);
- _total += input[subTotalName].getDouble();
- _count += input[countName].getLong();
- }
+ _total += input.getDouble();
+ _count += 1;
+ } else {
+ // We expect an object that contains both a subtotal and a count.
+ // This is what getValue(true) produced below.
+ verify(input.getType() == Object);
+ _total += input[subTotalName].getDouble();
+ _count += input[countName].getLong();
}
+}
- intrusive_ptr<Accumulator> AccumulatorAvg::create() {
- return new AccumulatorAvg();
- }
+intrusive_ptr<Accumulator> AccumulatorAvg::create() {
+ return new AccumulatorAvg();
+}
- Value AccumulatorAvg::getValue(bool toBeMerged) const {
- if (!toBeMerged) {
- if (_count == 0)
- return Value(0.0);
+Value AccumulatorAvg::getValue(bool toBeMerged) const {
+ if (!toBeMerged) {
+ if (_count == 0)
+ return Value(0.0);
- return Value(_total / static_cast<double>(_count));
- }
- else {
- return Value(DOC(subTotalName << _total
- << countName << _count));
- }
+ return Value(_total / static_cast<double>(_count));
+ } else {
+ return Value(DOC(subTotalName << _total << countName << _count));
}
+}
- AccumulatorAvg::AccumulatorAvg()
- : _total(0)
- , _count(0)
- {
- // This is a fixed size Accumulator so we never need to update this
- _memUsageBytes = sizeof(*this);
- }
+AccumulatorAvg::AccumulatorAvg() : _total(0), _count(0) {
+ // This is a fixed size Accumulator so we never need to update this
+ _memUsageBytes = sizeof(*this);
+}
- void AccumulatorAvg::reset() {
- _total = 0;
- _count = 0;
- }
+void AccumulatorAvg::reset() {
+ _total = 0;
+ _count = 0;
+}
- const char *AccumulatorAvg::getOpName() const {
- return "$avg";
- }
+const char* AccumulatorAvg::getOpName() const {
+ return "$avg";
+}
}
diff --git a/src/mongo/db/pipeline/accumulator_first.cpp b/src/mongo/db/pipeline/accumulator_first.cpp
index ed49fe163f1..9425199793f 100644
--- a/src/mongo/db/pipeline/accumulator_first.cpp
+++ b/src/mongo/db/pipeline/accumulator_first.cpp
@@ -33,40 +33,38 @@
namespace mongo {
- using boost::intrusive_ptr;
+using boost::intrusive_ptr;
- void AccumulatorFirst::processInternal(const Value& input, bool merging) {
- /* only remember the first value seen */
- if (!_haveFirst) {
- // can't use pValue.missing() since we want the first value even if missing
- _haveFirst = true;
- _first = input;
- _memUsageBytes = sizeof(*this) + input.getApproximateSize() - sizeof(Value);
- }
+void AccumulatorFirst::processInternal(const Value& input, bool merging) {
+ /* only remember the first value seen */
+ if (!_haveFirst) {
+ // can't use pValue.missing() since we want the first value even if missing
+ _haveFirst = true;
+ _first = input;
+ _memUsageBytes = sizeof(*this) + input.getApproximateSize() - sizeof(Value);
}
+}
- Value AccumulatorFirst::getValue(bool toBeMerged) const {
- return _first;
- }
+Value AccumulatorFirst::getValue(bool toBeMerged) const {
+ return _first;
+}
- AccumulatorFirst::AccumulatorFirst()
- : _haveFirst(false)
- {
- _memUsageBytes = sizeof(*this);
- }
+AccumulatorFirst::AccumulatorFirst() : _haveFirst(false) {
+ _memUsageBytes = sizeof(*this);
+}
- void AccumulatorFirst::reset() {
- _haveFirst = false;
- _first = Value();
- _memUsageBytes = sizeof(*this);
- }
+void AccumulatorFirst::reset() {
+ _haveFirst = false;
+ _first = Value();
+ _memUsageBytes = sizeof(*this);
+}
- intrusive_ptr<Accumulator> AccumulatorFirst::create() {
- return new AccumulatorFirst();
- }
+intrusive_ptr<Accumulator> AccumulatorFirst::create() {
+ return new AccumulatorFirst();
+}
- const char *AccumulatorFirst::getOpName() const {
- return "$first";
- }
+const char* AccumulatorFirst::getOpName() const {
+ return "$first";
+}
}
diff --git a/src/mongo/db/pipeline/accumulator_last.cpp b/src/mongo/db/pipeline/accumulator_last.cpp
index 4b24cf828b7..c9f4b487d1c 100644
--- a/src/mongo/db/pipeline/accumulator_last.cpp
+++ b/src/mongo/db/pipeline/accumulator_last.cpp
@@ -33,32 +33,32 @@
namespace mongo {
- using boost::intrusive_ptr;
+using boost::intrusive_ptr;
- void AccumulatorLast::processInternal(const Value& input, bool merging) {
- /* always remember the last value seen */
- _last = input;
- _memUsageBytes = sizeof(*this) + _last.getApproximateSize() - sizeof(Value);
- }
+void AccumulatorLast::processInternal(const Value& input, bool merging) {
+ /* always remember the last value seen */
+ _last = input;
+ _memUsageBytes = sizeof(*this) + _last.getApproximateSize() - sizeof(Value);
+}
- Value AccumulatorLast::getValue(bool toBeMerged) const {
- return _last;
- }
+Value AccumulatorLast::getValue(bool toBeMerged) const {
+ return _last;
+}
- AccumulatorLast::AccumulatorLast() {
- _memUsageBytes = sizeof(*this);
- }
+AccumulatorLast::AccumulatorLast() {
+ _memUsageBytes = sizeof(*this);
+}
- void AccumulatorLast::reset() {
- _memUsageBytes = sizeof(*this);
- _last = Value();
- }
+void AccumulatorLast::reset() {
+ _memUsageBytes = sizeof(*this);
+ _last = Value();
+}
- intrusive_ptr<Accumulator> AccumulatorLast::create() {
- return new AccumulatorLast();
- }
+intrusive_ptr<Accumulator> AccumulatorLast::create() {
+ return new AccumulatorLast();
+}
- const char *AccumulatorLast::getOpName() const {
- return "$last";
- }
+const char* AccumulatorLast::getOpName() const {
+ return "$last";
+}
}
diff --git a/src/mongo/db/pipeline/accumulator_min_max.cpp b/src/mongo/db/pipeline/accumulator_min_max.cpp
index 8f7d857de76..da4f280f797 100644
--- a/src/mongo/db/pipeline/accumulator_min_max.cpp
+++ b/src/mongo/db/pipeline/accumulator_min_max.cpp
@@ -33,45 +33,44 @@
namespace mongo {
- using boost::intrusive_ptr;
+using boost::intrusive_ptr;
- void AccumulatorMinMax::processInternal(const Value& input, bool merging) {
- // nullish values should have no impact on result
- if (!input.nullish()) {
- /* compare with the current value; swap if appropriate */
- int cmp = Value::compare(_val, input) * _sense;
- if (cmp > 0 || _val.missing()) { // missing is lower than all other values
- _val = input;
- _memUsageBytes = sizeof(*this) + input.getApproximateSize() - sizeof(Value);
- }
+void AccumulatorMinMax::processInternal(const Value& input, bool merging) {
+ // nullish values should have no impact on result
+ if (!input.nullish()) {
+ /* compare with the current value; swap if appropriate */
+ int cmp = Value::compare(_val, input) * _sense;
+ if (cmp > 0 || _val.missing()) { // missing is lower than all other values
+ _val = input;
+ _memUsageBytes = sizeof(*this) + input.getApproximateSize() - sizeof(Value);
}
}
+}
- Value AccumulatorMinMax::getValue(bool toBeMerged) const {
- return _val;
- }
+Value AccumulatorMinMax::getValue(bool toBeMerged) const {
+ return _val;
+}
- AccumulatorMinMax::AccumulatorMinMax(Sense sense) :
- _sense(sense) {
- _memUsageBytes = sizeof(*this);
- }
+AccumulatorMinMax::AccumulatorMinMax(Sense sense) : _sense(sense) {
+ _memUsageBytes = sizeof(*this);
+}
- void AccumulatorMinMax::reset() {
- _val = Value();
- _memUsageBytes = sizeof(*this);
- }
+void AccumulatorMinMax::reset() {
+ _val = Value();
+ _memUsageBytes = sizeof(*this);
+}
- intrusive_ptr<Accumulator> AccumulatorMinMax::createMin() {
- return new AccumulatorMinMax(Sense::MIN);
- }
+intrusive_ptr<Accumulator> AccumulatorMinMax::createMin() {
+ return new AccumulatorMinMax(Sense::MIN);
+}
- intrusive_ptr<Accumulator> AccumulatorMinMax::createMax() {
- return new AccumulatorMinMax(Sense::MAX);
- }
+intrusive_ptr<Accumulator> AccumulatorMinMax::createMax() {
+ return new AccumulatorMinMax(Sense::MAX);
+}
- const char *AccumulatorMinMax::getOpName() const {
- if (_sense == 1)
- return "$min";
- return "$max";
- }
+const char* AccumulatorMinMax::getOpName() const {
+ if (_sense == 1)
+ return "$min";
+ return "$max";
+}
}
diff --git a/src/mongo/db/pipeline/accumulator_push.cpp b/src/mongo/db/pipeline/accumulator_push.cpp
index e7a2b6b5514..b19ec08f71d 100644
--- a/src/mongo/db/pipeline/accumulator_push.cpp
+++ b/src/mongo/db/pipeline/accumulator_push.cpp
@@ -34,50 +34,49 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::vector;
+using boost::intrusive_ptr;
+using std::vector;
- void AccumulatorPush::processInternal(const Value& input, bool merging) {
- if (!merging) {
- if (!input.missing()) {
- vpValue.push_back(input);
- _memUsageBytes += input.getApproximateSize();
- }
+void AccumulatorPush::processInternal(const Value& input, bool merging) {
+ if (!merging) {
+ if (!input.missing()) {
+ vpValue.push_back(input);
+ _memUsageBytes += input.getApproximateSize();
}
- else {
- // If we're merging, we need to take apart the arrays we
- // receive and put their elements into the array we are collecting.
- // If we didn't, then we'd get an array of arrays, with one array
- // from each merge source.
- verify(input.getType() == Array);
-
- const vector<Value>& vec = input.getArray();
- vpValue.insert(vpValue.end(), vec.begin(), vec.end());
+ } else {
+ // If we're merging, we need to take apart the arrays we
+ // receive and put their elements into the array we are collecting.
+ // If we didn't, then we'd get an array of arrays, with one array
+ // from each merge source.
+ verify(input.getType() == Array);
- for (size_t i=0; i < vec.size(); i++) {
- _memUsageBytes += vec[i].getApproximateSize();
- }
+ const vector<Value>& vec = input.getArray();
+ vpValue.insert(vpValue.end(), vec.begin(), vec.end());
+
+ for (size_t i = 0; i < vec.size(); i++) {
+ _memUsageBytes += vec[i].getApproximateSize();
}
}
+}
- Value AccumulatorPush::getValue(bool toBeMerged) const {
- return Value(vpValue);
- }
+Value AccumulatorPush::getValue(bool toBeMerged) const {
+ return Value(vpValue);
+}
- AccumulatorPush::AccumulatorPush() {
- _memUsageBytes = sizeof(*this);
- }
+AccumulatorPush::AccumulatorPush() {
+ _memUsageBytes = sizeof(*this);
+}
- void AccumulatorPush::reset() {
- vector<Value>().swap(vpValue);
- _memUsageBytes = sizeof(*this);
- }
+void AccumulatorPush::reset() {
+ vector<Value>().swap(vpValue);
+ _memUsageBytes = sizeof(*this);
+}
- intrusive_ptr<Accumulator> AccumulatorPush::create() {
- return new AccumulatorPush();
- }
+intrusive_ptr<Accumulator> AccumulatorPush::create() {
+ return new AccumulatorPush();
+}
- const char *AccumulatorPush::getOpName() const {
- return "$push";
- }
+const char* AccumulatorPush::getOpName() const {
+ return "$push";
+}
}
diff --git a/src/mongo/db/pipeline/accumulator_std_dev.cpp b/src/mongo/db/pipeline/accumulator_std_dev.cpp
index 76957c3d112..b51a21bbe6f 100644
--- a/src/mongo/db/pipeline/accumulator_std_dev.cpp
+++ b/src/mongo/db/pipeline/accumulator_std_dev.cpp
@@ -34,84 +34,75 @@
#include "mongo/db/pipeline/value.h"
namespace mongo {
- using boost::intrusive_ptr;
-
- void AccumulatorStdDev::processInternal(const Value& input, bool merging) {
- if (!merging) {
- // non numeric types have no impact on standard deviation
- if (!input.numeric())
- return;
-
- const double val = input.getDouble();
-
- // This is an implementation of the following algorithm:
- // http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
- _count += 1;
- const double delta = val - _mean;
- _mean += delta / _count;
- _m2 += delta * (val - _mean);
- }
- else {
- // This is what getValue(true) produced below.
- verify(input.getType() == Object);
- const double m2 = input["m2"].getDouble();
- const double mean = input["mean"].getDouble();
- const long long count = input["count"].getLong();
-
- if (count == 0)
- return; // This partition had no data to contribute.
-
- // This is an implementation of the following algorithm:
- // http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
- const double delta = mean - _mean;
- const long long newCount = count + _count;
-
- _mean = ((_count * _mean) + (count * mean)) / newCount;
- _m2 += m2 + (delta * delta * (double(_count) * count / newCount));
- _count = newCount;
- }
+using boost::intrusive_ptr;
+
+void AccumulatorStdDev::processInternal(const Value& input, bool merging) {
+ if (!merging) {
+ // non numeric types have no impact on standard deviation
+ if (!input.numeric())
+ return;
+
+ const double val = input.getDouble();
+
+ // This is an implementation of the following algorithm:
+ // http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
+ _count += 1;
+ const double delta = val - _mean;
+ _mean += delta / _count;
+ _m2 += delta * (val - _mean);
+ } else {
+ // This is what getValue(true) produced below.
+ verify(input.getType() == Object);
+ const double m2 = input["m2"].getDouble();
+ const double mean = input["mean"].getDouble();
+ const long long count = input["count"].getLong();
+
+ if (count == 0)
+ return; // This partition had no data to contribute.
+
+ // This is an implementation of the following algorithm:
+ // http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
+ const double delta = mean - _mean;
+ const long long newCount = count + _count;
+
+ _mean = ((_count * _mean) + (count * mean)) / newCount;
+ _m2 += m2 + (delta * delta * (double(_count) * count / newCount));
+ _count = newCount;
}
+}
- Value AccumulatorStdDev::getValue(bool toBeMerged) const {
- if (!toBeMerged) {
- const long long adjustedCount = (_isSamp ? _count - 1 : _count);
- if (adjustedCount <= 0)
- return Value(BSONNULL); // standard deviation not well defined in this case
-
- return Value(sqrt(_m2 / adjustedCount));
- }
- else {
- return Value(DOC("m2" << _m2
- << "mean" << _mean
- << "count" << _count));
- }
- }
+Value AccumulatorStdDev::getValue(bool toBeMerged) const {
+ if (!toBeMerged) {
+ const long long adjustedCount = (_isSamp ? _count - 1 : _count);
+ if (adjustedCount <= 0)
+ return Value(BSONNULL); // standard deviation not well defined in this case
- intrusive_ptr<Accumulator> AccumulatorStdDev::createSamp() {
- return new AccumulatorStdDev(true);
+ return Value(sqrt(_m2 / adjustedCount));
+ } else {
+ return Value(DOC("m2" << _m2 << "mean" << _mean << "count" << _count));
}
+}
- intrusive_ptr<Accumulator> AccumulatorStdDev::createPop() {
- return new AccumulatorStdDev(false);
- }
+intrusive_ptr<Accumulator> AccumulatorStdDev::createSamp() {
+ return new AccumulatorStdDev(true);
+}
- AccumulatorStdDev::AccumulatorStdDev(bool isSamp)
- : _isSamp(isSamp)
- , _count(0)
- , _mean(0)
- , _m2(0)
- {
- // This is a fixed size Accumulator so we never need to update this
- _memUsageBytes = sizeof(*this);
- }
+intrusive_ptr<Accumulator> AccumulatorStdDev::createPop() {
+ return new AccumulatorStdDev(false);
+}
- void AccumulatorStdDev::reset() {
- _count = 0;
- _mean = 0;
- _m2 = 0;
- }
+AccumulatorStdDev::AccumulatorStdDev(bool isSamp) : _isSamp(isSamp), _count(0), _mean(0), _m2(0) {
+ // This is a fixed size Accumulator so we never need to update this
+ _memUsageBytes = sizeof(*this);
+}
- const char *AccumulatorStdDev::getOpName() const {
- return (_isSamp ? "$stdDevSamp" : "$stdDevPop");
- }
+void AccumulatorStdDev::reset() {
+ _count = 0;
+ _mean = 0;
+ _m2 = 0;
+}
+
+const char* AccumulatorStdDev::getOpName() const {
+ return (_isSamp ? "$stdDevSamp" : "$stdDevPop");
+}
}
diff --git a/src/mongo/db/pipeline/accumulator_sum.cpp b/src/mongo/db/pipeline/accumulator_sum.cpp
index 9076a324dfa..4da24904078 100644
--- a/src/mongo/db/pipeline/accumulator_sum.cpp
+++ b/src/mongo/db/pipeline/accumulator_sum.cpp
@@ -33,66 +33,57 @@
namespace mongo {
- using boost::intrusive_ptr;
+using boost::intrusive_ptr;
- void AccumulatorSum::processInternal(const Value& input, bool merging) {
- // do nothing with non numeric types
- if (!input.numeric())
- return;
+void AccumulatorSum::processInternal(const Value& input, bool merging) {
+ // do nothing with non numeric types
+ if (!input.numeric())
+ return;
- // upgrade to the widest type required to hold the result
- totalType = Value::getWidestNumeric(totalType, input.getType());
+ // upgrade to the widest type required to hold the result
+ totalType = Value::getWidestNumeric(totalType, input.getType());
- if (totalType == NumberInt || totalType == NumberLong) {
- long long v = input.coerceToLong();
- longTotal += v;
- doubleTotal += v;
- }
- else if (totalType == NumberDouble) {
- double v = input.coerceToDouble();
- doubleTotal += v;
- }
- else {
- // non numerics should have returned above so we should never get here
- verify(false);
- }
+ if (totalType == NumberInt || totalType == NumberLong) {
+ long long v = input.coerceToLong();
+ longTotal += v;
+ doubleTotal += v;
+ } else if (totalType == NumberDouble) {
+ double v = input.coerceToDouble();
+ doubleTotal += v;
+ } else {
+ // non numerics should have returned above so we should never get here
+ verify(false);
}
+}
- intrusive_ptr<Accumulator> AccumulatorSum::create() {
- return new AccumulatorSum();
- }
+intrusive_ptr<Accumulator> AccumulatorSum::create() {
+ return new AccumulatorSum();
+}
- Value AccumulatorSum::getValue(bool toBeMerged) const {
- if (totalType == NumberLong) {
- return Value(longTotal);
- }
- else if (totalType == NumberDouble) {
- return Value(doubleTotal);
- }
- else if (totalType == NumberInt) {
- return Value::createIntOrLong(longTotal);
- }
- else {
- massert(16000, "$sum resulted in a non-numeric type", false);
- }
+Value AccumulatorSum::getValue(bool toBeMerged) const {
+ if (totalType == NumberLong) {
+ return Value(longTotal);
+ } else if (totalType == NumberDouble) {
+ return Value(doubleTotal);
+ } else if (totalType == NumberInt) {
+ return Value::createIntOrLong(longTotal);
+ } else {
+ massert(16000, "$sum resulted in a non-numeric type", false);
}
+}
- AccumulatorSum::AccumulatorSum()
- : totalType(NumberInt)
- , longTotal(0)
- , doubleTotal(0)
- {
- // This is a fixed size Accumulator so we never need to update this
- _memUsageBytes = sizeof(*this);
- }
+AccumulatorSum::AccumulatorSum() : totalType(NumberInt), longTotal(0), doubleTotal(0) {
+ // This is a fixed size Accumulator so we never need to update this
+ _memUsageBytes = sizeof(*this);
+}
- void AccumulatorSum::reset() {
- totalType = NumberInt;
- longTotal = 0;
- doubleTotal = 0;
- }
+void AccumulatorSum::reset() {
+ totalType = NumberInt;
+ longTotal = 0;
+ doubleTotal = 0;
+}
- const char *AccumulatorSum::getOpName() const {
- return "$sum";
- }
+const char* AccumulatorSum::getOpName() const {
+ return "$sum";
+}
}
diff --git a/src/mongo/db/pipeline/dependencies.cpp b/src/mongo/db/pipeline/dependencies.cpp
index 128d46bd703..30ced50e06a 100644
--- a/src/mongo/db/pipeline/dependencies.cpp
+++ b/src/mongo/db/pipeline/dependencies.cpp
@@ -35,146 +35,148 @@
namespace mongo {
- using std::set;
- using std::string;
- using std::vector;
+using std::set;
+using std::string;
+using std::vector;
- namespace str = mongoutils::str;
+namespace str = mongoutils::str;
- BSONObj DepsTracker::toProjection() const {
- BSONObjBuilder bb;
+BSONObj DepsTracker::toProjection() const {
+ BSONObjBuilder bb;
- if (needTextScore)
- bb.append(Document::metaFieldTextScore, BSON("$meta" << "textScore"));
+ if (needTextScore)
+ bb.append(Document::metaFieldTextScore,
+ BSON("$meta"
+ << "textScore"));
- if (needWholeDocument)
- return bb.obj();
+ if (needWholeDocument)
+ return bb.obj();
- if (fields.empty()) {
- // Projection language lacks good a way to say no fields needed. This fakes it.
- bb.append("_id", 0);
- bb.append("$noFieldsNeeded", 1);
- return bb.obj();
- }
+ if (fields.empty()) {
+ // Projection language lacks good a way to say no fields needed. This fakes it.
+ bb.append("_id", 0);
+ bb.append("$noFieldsNeeded", 1);
+ return bb.obj();
+ }
- bool needId = false;
- string last;
- for (set<string>::const_iterator it(fields.begin()), end(fields.end()); it!=end; ++it) {
- if (str::startsWith(*it, "_id") && (it->size() == 3 || (*it)[3] == '.')) {
- // _id and subfields are handled specially due in part to SERVER-7502
- needId = true;
- continue;
- }
-
- if (!last.empty() && str::startsWith(*it, last)) {
- // we are including a parent of *it so we don't need to include this field
- // explicitly. In fact, due to SERVER-6527 if we included this field, the parent
- // wouldn't be fully included. This logic relies on on set iterators going in
- // lexicographic order so that a string is always directly before of all fields it
- // prefixes.
- continue;
- }
-
- last = *it + '.';
- bb.append(*it, 1);
+ bool needId = false;
+ string last;
+ for (set<string>::const_iterator it(fields.begin()), end(fields.end()); it != end; ++it) {
+ if (str::startsWith(*it, "_id") && (it->size() == 3 || (*it)[3] == '.')) {
+ // _id and subfields are handled specially due in part to SERVER-7502
+ needId = true;
+ continue;
}
- if (needId) // we are explicit either way
- bb.append("_id", 1);
- else
- bb.append("_id", 0);
+ if (!last.empty() && str::startsWith(*it, last)) {
+ // we are including a parent of *it so we don't need to include this field
+ // explicitly. In fact, due to SERVER-6527 if we included this field, the parent
+ // wouldn't be fully included. This logic relies on on set iterators going in
+ // lexicographic order so that a string is always directly before of all fields it
+ // prefixes.
+ continue;
+ }
- return bb.obj();
+ last = *it + '.';
+ bb.append(*it, 1);
}
- // ParsedDeps::_fields is a simple recursive look-up table. For each field:
- // If the value has type==Bool, the whole field is needed
- // If the value has type==Object, the fields in the subobject are needed
- // All other fields should be missing which means not needed
- boost::optional<ParsedDeps> DepsTracker::toParsedDeps() const {
- MutableDocument md;
+ if (needId) // we are explicit either way
+ bb.append("_id", 1);
+ else
+ bb.append("_id", 0);
- if (needWholeDocument || needTextScore) {
- // can't use ParsedDeps in this case
- return boost::none;
- }
+ return bb.obj();
+}
- string last;
- for (set<string>::const_iterator it(fields.begin()), end(fields.end()); it!=end; ++it) {
- if (!last.empty() && str::startsWith(*it, last)) {
- // we are including a parent of *it so we don't need to include this field
- // explicitly. In fact, if we included this field, the parent wouldn't be fully
- // included. This logic relies on on set iterators going in lexicographic order so
- // that a string is always directly before of all fields it prefixes.
- continue;
- }
- last = *it + '.';
- md.setNestedField(*it, Value(true));
- }
+// ParsedDeps::_fields is a simple recursive look-up table. For each field:
+// If the value has type==Bool, the whole field is needed
+// If the value has type==Object, the fields in the subobject are needed
+// All other fields should be missing which means not needed
+boost::optional<ParsedDeps> DepsTracker::toParsedDeps() const {
+ MutableDocument md;
+
+ if (needWholeDocument || needTextScore) {
+ // can't use ParsedDeps in this case
+ return boost::none;
+ }
- return ParsedDeps(md.freeze());
+ string last;
+ for (set<string>::const_iterator it(fields.begin()), end(fields.end()); it != end; ++it) {
+ if (!last.empty() && str::startsWith(*it, last)) {
+ // we are including a parent of *it so we don't need to include this field
+ // explicitly. In fact, if we included this field, the parent wouldn't be fully
+ // included. This logic relies on on set iterators going in lexicographic order so
+ // that a string is always directly before of all fields it prefixes.
+ continue;
+ }
+ last = *it + '.';
+ md.setNestedField(*it, Value(true));
}
+ return ParsedDeps(md.freeze());
+}
+
namespace {
- // Mutually recursive with arrayHelper
- Document documentHelper(const BSONObj& bson, const Document& neededFields);
-
- // Handles array-typed values for ParsedDeps::extractFields
- Value arrayHelper(const BSONObj& bson, const Document& neededFields) {
- BSONObjIterator it(bson);
-
- vector<Value> values;
- while (it.more()) {
- BSONElement bsonElement(it.next());
- if (bsonElement.type() == Object) {
- Document sub = documentHelper(bsonElement.embeddedObject(), neededFields);
- values.push_back(Value(sub));
- }
-
- if (bsonElement.type() == Array) {
- values.push_back(arrayHelper(bsonElement.embeddedObject(), neededFields));
- }
+// Mutually recursive with arrayHelper
+Document documentHelper(const BSONObj& bson, const Document& neededFields);
+
+// Handles array-typed values for ParsedDeps::extractFields
+Value arrayHelper(const BSONObj& bson, const Document& neededFields) {
+ BSONObjIterator it(bson);
+
+ vector<Value> values;
+ while (it.more()) {
+ BSONElement bsonElement(it.next());
+ if (bsonElement.type() == Object) {
+ Document sub = documentHelper(bsonElement.embeddedObject(), neededFields);
+ values.push_back(Value(sub));
}
- return Value(std::move(values));
+ if (bsonElement.type() == Array) {
+ values.push_back(arrayHelper(bsonElement.embeddedObject(), neededFields));
+ }
}
- // Handles object-typed values including the top-level for ParsedDeps::extractFields
- Document documentHelper(const BSONObj& bson, const Document& neededFields) {
- MutableDocument md(neededFields.size());
+ return Value(std::move(values));
+}
- BSONObjIterator it(bson);
- while (it.more()) {
- BSONElement bsonElement (it.next());
- StringData fieldName = bsonElement.fieldNameStringData();
- Value isNeeded = neededFields[fieldName];
+// Handles object-typed values including the top-level for ParsedDeps::extractFields
+Document documentHelper(const BSONObj& bson, const Document& neededFields) {
+ MutableDocument md(neededFields.size());
- if (isNeeded.missing())
- continue;
+ BSONObjIterator it(bson);
+ while (it.more()) {
+ BSONElement bsonElement(it.next());
+ StringData fieldName = bsonElement.fieldNameStringData();
+ Value isNeeded = neededFields[fieldName];
- if (isNeeded.getType() == Bool) {
- md.addField(fieldName, Value(bsonElement));
- continue;
- }
+ if (isNeeded.missing())
+ continue;
- dassert(isNeeded.getType() == Object);
+ if (isNeeded.getType() == Bool) {
+ md.addField(fieldName, Value(bsonElement));
+ continue;
+ }
- if (bsonElement.type() == Object) {
- Document sub = documentHelper(bsonElement.embeddedObject(), isNeeded.getDocument());
- md.addField(fieldName, Value(sub));
- }
+ dassert(isNeeded.getType() == Object);
- if (bsonElement.type() == Array) {
- md.addField(fieldName, arrayHelper(bsonElement.embeddedObject(),
- isNeeded.getDocument()));
- }
+ if (bsonElement.type() == Object) {
+ Document sub = documentHelper(bsonElement.embeddedObject(), isNeeded.getDocument());
+ md.addField(fieldName, Value(sub));
}
- return md.freeze();
+ if (bsonElement.type() == Array) {
+ md.addField(fieldName,
+ arrayHelper(bsonElement.embeddedObject(), isNeeded.getDocument()));
+ }
}
-} // namespace
- Document ParsedDeps::extractFields(const BSONObj& input) const {
- return documentHelper(input, _fields);
- }
+ return md.freeze();
+}
+} // namespace
+
+Document ParsedDeps::extractFields(const BSONObj& input) const {
+ return documentHelper(input, _fields);
+}
}
diff --git a/src/mongo/db/pipeline/dependencies.h b/src/mongo/db/pipeline/dependencies.h
index 47f8f46c432..b39ff1fcfd3 100644
--- a/src/mongo/db/pipeline/dependencies.h
+++ b/src/mongo/db/pipeline/dependencies.h
@@ -35,43 +35,38 @@
#include "mongo/db/pipeline/document.h"
namespace mongo {
- class ParsedDeps;
+class ParsedDeps;
+
+/**
+ * This struct allows components in an agg pipeline to report what they need from their input.
+ */
+struct DepsTracker {
+ DepsTracker() : needWholeDocument(false), needTextScore(false) {}
/**
- * This struct allows components in an agg pipeline to report what they need from their input.
+ * Returns a projection object covering the dependencies tracked by this class.
*/
- struct DepsTracker {
- DepsTracker()
- : needWholeDocument(false)
- , needTextScore(false)
- {}
-
- /**
- * Returns a projection object covering the dependencies tracked by this class.
- */
- BSONObj toProjection() const;
+ BSONObj toProjection() const;
- boost::optional<ParsedDeps> toParsedDeps() const;
+ boost::optional<ParsedDeps> toParsedDeps() const;
- std::set<std::string> fields; // names of needed fields in dotted notation
- bool needWholeDocument; // if true, ignore fields and assume the whole document is needed
- bool needTextScore;
- };
+ std::set<std::string> fields; // names of needed fields in dotted notation
+ bool needWholeDocument; // if true, ignore fields and assume the whole document is needed
+ bool needTextScore;
+};
- /**
- * This class is designed to quickly extract the needed fields from a BSONObj into a Document.
- * It should only be created by a call to DepsTracker::ParsedDeps
- */
- class ParsedDeps {
- public:
- Document extractFields(const BSONObj& input) const;
+/**
+ * This class is designed to quickly extract the needed fields from a BSONObj into a Document.
+ * It should only be created by a call to DepsTracker::ParsedDeps
+ */
+class ParsedDeps {
+public:
+ Document extractFields(const BSONObj& input) const;
- private:
- friend struct DepsTracker; // so it can call constructor
- explicit ParsedDeps(const Document& fields)
- : _fields(fields)
- {}
+private:
+ friend struct DepsTracker; // so it can call constructor
+ explicit ParsedDeps(const Document& fields) : _fields(fields) {}
- Document _fields;
- };
+ Document _fields;
+};
}
diff --git a/src/mongo/db/pipeline/document.cpp b/src/mongo/db/pipeline/document.cpp
index 741834d39cf..49efa640277 100644
--- a/src/mongo/db/pipeline/document.cpp
+++ b/src/mongo/db/pipeline/document.cpp
@@ -37,425 +37,412 @@
#include "mongo/util/mongoutils/str.h"
namespace mongo {
- using namespace mongoutils;
- using boost::intrusive_ptr;
- using std::string;
- using std::vector;
-
- Position DocumentStorage::findField(StringData requested) const {
- int reqSize = requested.size(); // get size calculation out of the way if needed
-
- if (_numFields >= HASH_TAB_MIN) { // hash lookup
- const unsigned bucket = bucketForKey(requested);
-
- Position pos = _hashTab[bucket];
- while (pos.found()) {
- const ValueElement& elem = getField(pos);
- if (elem.nameLen == reqSize
- && memcmp(requested.rawData(), elem._name, reqSize) == 0) {
- return pos;
- }
-
- // possible collision
- pos = elem.nextCollision;
+using namespace mongoutils;
+using boost::intrusive_ptr;
+using std::string;
+using std::vector;
+
+Position DocumentStorage::findField(StringData requested) const {
+ int reqSize = requested.size(); // get size calculation out of the way if needed
+
+ if (_numFields >= HASH_TAB_MIN) { // hash lookup
+ const unsigned bucket = bucketForKey(requested);
+
+ Position pos = _hashTab[bucket];
+ while (pos.found()) {
+ const ValueElement& elem = getField(pos);
+ if (elem.nameLen == reqSize && memcmp(requested.rawData(), elem._name, reqSize) == 0) {
+ return pos;
}
+
+ // possible collision
+ pos = elem.nextCollision;
}
- else { // linear scan
- for (DocumentStorageIterator it = iteratorAll(); !it.atEnd(); it.advance()) {
- if (it->nameLen == reqSize
- && memcmp(requested.rawData(), it->_name, reqSize) == 0) {
- return it.position();
- }
+ } else { // linear scan
+ for (DocumentStorageIterator it = iteratorAll(); !it.atEnd(); it.advance()) {
+ if (it->nameLen == reqSize && memcmp(requested.rawData(), it->_name, reqSize) == 0) {
+ return it.position();
}
}
-
- // if we got here, there's no such field
- return Position();
}
- Value& DocumentStorage::appendField(StringData name) {
- Position pos = getNextPosition();
- const int nameSize = name.size();
-
- // these are the same for everyone
- const Position nextCollision;
- const Value value;
-
- // Make room for new field (and padding at end for alignment)
- const unsigned newUsed = ValueElement::align(_usedBytes + sizeof(ValueElement) + nameSize);
- if (_buffer + newUsed > _bufferEnd)
- alloc(newUsed);
- _usedBytes = newUsed;
-
- // Append structure of a ValueElement
- char* dest = _buffer + pos.index; // must be after alloc since it changes _buffer
-#define append(x) memcpy(dest, &(x), sizeof(x)); dest += sizeof(x)
- append(value);
- append(nextCollision);
- append(nameSize);
- name.copyTo( dest, true );
- // Padding for alignment handled above
-#undef append
+ // if we got here, there's no such field
+ return Position();
+}
- // Make sure next field starts where we expect it
- fassert(16486, getField(pos).next()->ptr() == _buffer + _usedBytes);
+Value& DocumentStorage::appendField(StringData name) {
+ Position pos = getNextPosition();
+ const int nameSize = name.size();
+
+ // these are the same for everyone
+ const Position nextCollision;
+ const Value value;
+
+ // Make room for new field (and padding at end for alignment)
+ const unsigned newUsed = ValueElement::align(_usedBytes + sizeof(ValueElement) + nameSize);
+ if (_buffer + newUsed > _bufferEnd)
+ alloc(newUsed);
+ _usedBytes = newUsed;
+
+ // Append structure of a ValueElement
+ char* dest = _buffer + pos.index; // must be after alloc since it changes _buffer
+#define append(x) \
+ memcpy(dest, &(x), sizeof(x)); \
+ dest += sizeof(x)
+ append(value);
+ append(nextCollision);
+ append(nameSize);
+ name.copyTo(dest, true);
+// Padding for alignment handled above
+#undef append
- _numFields++;
+ // Make sure next field starts where we expect it
+ fassert(16486, getField(pos).next()->ptr() == _buffer + _usedBytes);
- if (_numFields > HASH_TAB_MIN) {
- addFieldToHashTable(pos);
- }
- else if (_numFields == HASH_TAB_MIN) {
- // adds all fields to hash table (including the one we just added)
- rehash();
- }
+ _numFields++;
- return getField(pos).val;
+ if (_numFields > HASH_TAB_MIN) {
+ addFieldToHashTable(pos);
+ } else if (_numFields == HASH_TAB_MIN) {
+ // adds all fields to hash table (including the one we just added)
+ rehash();
}
- // Call after adding field to _fields and increasing _numFields
- void DocumentStorage::addFieldToHashTable(Position pos) {
- ValueElement& elem = getField(pos);
- elem.nextCollision = Position();
+ return getField(pos).val;
+}
- const unsigned bucket = bucketForKey(elem.nameSD());
+// Call after adding field to _fields and increasing _numFields
+void DocumentStorage::addFieldToHashTable(Position pos) {
+ ValueElement& elem = getField(pos);
+ elem.nextCollision = Position();
- Position* posPtr = &_hashTab[bucket];
- while (posPtr->found()) {
- // collision: walk links and add new to end
- posPtr = &getField(*posPtr).nextCollision;
- }
- *posPtr = Position(pos.index);
+ const unsigned bucket = bucketForKey(elem.nameSD());
+
+ Position* posPtr = &_hashTab[bucket];
+ while (posPtr->found()) {
+ // collision: walk links and add new to end
+ posPtr = &getField(*posPtr).nextCollision;
}
+ *posPtr = Position(pos.index);
+}
- void DocumentStorage::alloc(unsigned newSize) {
- const bool firstAlloc = !_buffer;
- const bool doingRehash = needRehash();
- const size_t oldCapacity = _bufferEnd - _buffer;
-
- // make new bucket count big enough
- while (needRehash() || hashTabBuckets() < HASH_TAB_INIT_SIZE)
- _hashTabMask = hashTabBuckets()*2 - 1;
-
- // only allocate power-of-two sized space > 128 bytes
- size_t capacity = 128;
- while (capacity < newSize + hashTabBytes())
- capacity *= 2;
-
- uassert(16490, "Tried to make oversized document",
- capacity <= size_t(BufferMaxSize));
-
- std::unique_ptr<char[]> oldBuf(_buffer);
- _buffer = new char[capacity];
- _bufferEnd = _buffer + capacity - hashTabBytes();
-
- if (!firstAlloc) {
- // This just copies the elements
- memcpy(_buffer, oldBuf.get(), _usedBytes);
-
- if (_numFields >= HASH_TAB_MIN) {
- // if we were hashing, deal with the hash table
- if (doingRehash) {
- rehash();
- }
- else {
- // no rehash needed so just slide table down to new position
- memcpy(_hashTab, oldBuf.get() + oldCapacity, hashTabBytes());
- }
+void DocumentStorage::alloc(unsigned newSize) {
+ const bool firstAlloc = !_buffer;
+ const bool doingRehash = needRehash();
+ const size_t oldCapacity = _bufferEnd - _buffer;
+
+ // make new bucket count big enough
+ while (needRehash() || hashTabBuckets() < HASH_TAB_INIT_SIZE)
+ _hashTabMask = hashTabBuckets() * 2 - 1;
+
+ // only allocate power-of-two sized space > 128 bytes
+ size_t capacity = 128;
+ while (capacity < newSize + hashTabBytes())
+ capacity *= 2;
+
+ uassert(16490, "Tried to make oversized document", capacity <= size_t(BufferMaxSize));
+
+ std::unique_ptr<char[]> oldBuf(_buffer);
+ _buffer = new char[capacity];
+ _bufferEnd = _buffer + capacity - hashTabBytes();
+
+ if (!firstAlloc) {
+ // This just copies the elements
+ memcpy(_buffer, oldBuf.get(), _usedBytes);
+
+ if (_numFields >= HASH_TAB_MIN) {
+ // if we were hashing, deal with the hash table
+ if (doingRehash) {
+ rehash();
+ } else {
+ // no rehash needed so just slide table down to new position
+ memcpy(_hashTab, oldBuf.get() + oldCapacity, hashTabBytes());
}
}
}
+}
- void DocumentStorage::reserveFields(size_t expectedFields) {
- fassert(16487, !_buffer);
+void DocumentStorage::reserveFields(size_t expectedFields) {
+ fassert(16487, !_buffer);
- unsigned buckets = HASH_TAB_INIT_SIZE;
- while (buckets < expectedFields)
- buckets *= 2;
- _hashTabMask = buckets - 1;
+ unsigned buckets = HASH_TAB_INIT_SIZE;
+ while (buckets < expectedFields)
+ buckets *= 2;
+ _hashTabMask = buckets - 1;
- // Using expectedFields+1 to allow space for long field names
- const size_t newSize = (expectedFields+1) * ValueElement::align(sizeof(ValueElement));
+ // Using expectedFields+1 to allow space for long field names
+ const size_t newSize = (expectedFields + 1) * ValueElement::align(sizeof(ValueElement));
- uassert(16491, "Tried to make oversized document",
- newSize <= size_t(BufferMaxSize));
+ uassert(16491, "Tried to make oversized document", newSize <= size_t(BufferMaxSize));
+
+ _buffer = new char[newSize + hashTabBytes()];
+ _bufferEnd = _buffer + newSize;
+}
- _buffer = new char[newSize + hashTabBytes()];
- _bufferEnd = _buffer + newSize;
+intrusive_ptr<DocumentStorage> DocumentStorage::clone() const {
+ intrusive_ptr<DocumentStorage> out(new DocumentStorage());
+
+ // Make a copy of the buffer.
+ // It is very important that the positions of each field are the same after cloning.
+ const size_t bufferBytes = (_bufferEnd + hashTabBytes()) - _buffer;
+ out->_buffer = new char[bufferBytes];
+ out->_bufferEnd = out->_buffer + (_bufferEnd - _buffer);
+ memcpy(out->_buffer, _buffer, bufferBytes);
+
+ // Copy remaining fields
+ out->_usedBytes = _usedBytes;
+ out->_numFields = _numFields;
+ out->_hashTabMask = _hashTabMask;
+ out->_hasTextScore = _hasTextScore;
+ out->_textScore = _textScore;
+
+ // Tell values that they have been memcpyed (updates ref counts)
+ for (DocumentStorageIterator it = out->iteratorAll(); !it.atEnd(); it.advance()) {
+ it->val.memcpyed();
}
- intrusive_ptr<DocumentStorage> DocumentStorage::clone() const {
- intrusive_ptr<DocumentStorage> out (new DocumentStorage());
-
- // Make a copy of the buffer.
- // It is very important that the positions of each field are the same after cloning.
- const size_t bufferBytes = (_bufferEnd + hashTabBytes()) - _buffer;
- out->_buffer = new char[bufferBytes];
- out->_bufferEnd = out->_buffer + (_bufferEnd - _buffer);
- memcpy(out->_buffer, _buffer, bufferBytes);
-
- // Copy remaining fields
- out->_usedBytes = _usedBytes;
- out->_numFields = _numFields;
- out->_hashTabMask = _hashTabMask;
- out->_hasTextScore = _hasTextScore;
- out->_textScore = _textScore;
-
- // Tell values that they have been memcpyed (updates ref counts)
- for (DocumentStorageIterator it = out->iteratorAll(); !it.atEnd(); it.advance()) {
- it->val.memcpyed();
- }
+ return out;
+}
- return out;
+DocumentStorage::~DocumentStorage() {
+ std::unique_ptr<char[]> deleteBufferAtScopeEnd(_buffer);
+
+ for (DocumentStorageIterator it = iteratorAll(); !it.atEnd(); it.advance()) {
+ it->val.~Value(); // explicit destructor call
}
+}
- DocumentStorage::~DocumentStorage() {
- std::unique_ptr<char[]> deleteBufferAtScopeEnd (_buffer);
+Document::Document(const BSONObj& bson) {
+ MutableDocument md(bson.nFields());
- for (DocumentStorageIterator it = iteratorAll(); !it.atEnd(); it.advance()) {
- it->val.~Value(); // explicit destructor call
- }
+ BSONObjIterator it(bson);
+ while (it.more()) {
+ BSONElement bsonElement(it.next());
+ md.addField(bsonElement.fieldNameStringData(), Value(bsonElement));
}
- Document::Document(const BSONObj& bson) {
- MutableDocument md(bson.nFields());
-
- BSONObjIterator it(bson);
- while(it.more()) {
- BSONElement bsonElement(it.next());
- md.addField(bsonElement.fieldNameStringData(), Value(bsonElement));
- }
+ *this = md.freeze();
+}
- *this = md.freeze();
- }
+BSONObjBuilder& operator<<(BSONObjBuilderValueStream& builder, const Document& doc) {
+ BSONObjBuilder subobj(builder.subobjStart());
+ doc.toBson(&subobj);
+ subobj.doneFast();
+ return builder.builder();
+}
- BSONObjBuilder& operator << (BSONObjBuilderValueStream& builder, const Document& doc) {
- BSONObjBuilder subobj(builder.subobjStart());
- doc.toBson(&subobj);
- subobj.doneFast();
- return builder.builder();
+void Document::toBson(BSONObjBuilder* pBuilder) const {
+ for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
+ *pBuilder << it->nameSD() << it->val;
}
+}
- void Document::toBson(BSONObjBuilder* pBuilder) const {
- for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
- *pBuilder << it->nameSD() << it->val;
- }
- }
+BSONObj Document::toBson() const {
+ BSONObjBuilder bb;
+ toBson(&bb);
+ return bb.obj();
+}
- BSONObj Document::toBson() const {
- BSONObjBuilder bb;
- toBson(&bb);
- return bb.obj();
- }
+const StringData Document::metaFieldTextScore("$textScore", StringData::LiteralTag());
- const StringData Document::metaFieldTextScore("$textScore", StringData::LiteralTag());
+BSONObj Document::toBsonWithMetaData() const {
+ BSONObjBuilder bb;
+ toBson(&bb);
+ if (hasTextScore())
+ bb.append(metaFieldTextScore, getTextScore());
+ return bb.obj();
+}
- BSONObj Document::toBsonWithMetaData() const {
- BSONObjBuilder bb;
- toBson(&bb);
- if (hasTextScore())
- bb.append(metaFieldTextScore, getTextScore());
- return bb.obj();
- }
+Document Document::fromBsonWithMetaData(const BSONObj& bson) {
+ MutableDocument md;
- Document Document::fromBsonWithMetaData(const BSONObj& bson) {
- MutableDocument md;
-
- BSONObjIterator it(bson);
- while(it.more()) {
- BSONElement elem(it.next());
- if (elem.fieldName()[0] == '$') {
- if (elem.fieldNameStringData() == metaFieldTextScore) {
- md.setTextScore(elem.Double());
- continue;
- }
+ BSONObjIterator it(bson);
+ while (it.more()) {
+ BSONElement elem(it.next());
+ if (elem.fieldName()[0] == '$') {
+ if (elem.fieldNameStringData() == metaFieldTextScore) {
+ md.setTextScore(elem.Double());
+ continue;
}
-
- // Note: this will not parse out metadata in embedded documents.
- md.addField(elem.fieldNameStringData(), Value(elem));
}
- return md.freeze();
+ // Note: this will not parse out metadata in embedded documents.
+ md.addField(elem.fieldNameStringData(), Value(elem));
}
- MutableDocument::MutableDocument(size_t expectedFields)
- : _storageHolder(NULL)
- , _storage(_storageHolder)
- {
- if (expectedFields) {
- storage().reserveFields(expectedFields);
- }
- }
+ return md.freeze();
+}
- MutableValue MutableDocument::getNestedFieldHelper(const FieldPath& dottedField,
- size_t level) {
- if (level == dottedField.getPathLength()-1) {
- return getField(dottedField.getFieldName(level));
- }
- else {
- MutableDocument nested (getField(dottedField.getFieldName(level)));
- return nested.getNestedFieldHelper(dottedField, level+1);
- }
+MutableDocument::MutableDocument(size_t expectedFields)
+ : _storageHolder(NULL), _storage(_storageHolder) {
+ if (expectedFields) {
+ storage().reserveFields(expectedFields);
}
+}
- MutableValue MutableDocument::getNestedField(const FieldPath& dottedField) {
- fassert(16601, dottedField.getPathLength());
- return getNestedFieldHelper(dottedField, 0);
+MutableValue MutableDocument::getNestedFieldHelper(const FieldPath& dottedField, size_t level) {
+ if (level == dottedField.getPathLength() - 1) {
+ return getField(dottedField.getFieldName(level));
+ } else {
+ MutableDocument nested(getField(dottedField.getFieldName(level)));
+ return nested.getNestedFieldHelper(dottedField, level + 1);
}
+}
- MutableValue MutableDocument::getNestedFieldHelper(const vector<Position>& positions,
- size_t level) {
- if (level == positions.size()-1) {
- return getField(positions[level]);
- }
- else {
- MutableDocument nested (getField(positions[level]));
- return nested.getNestedFieldHelper(positions, level+1);
- }
- }
+MutableValue MutableDocument::getNestedField(const FieldPath& dottedField) {
+ fassert(16601, dottedField.getPathLength());
+ return getNestedFieldHelper(dottedField, 0);
+}
- MutableValue MutableDocument::getNestedField(const vector<Position>& positions) {
- fassert(16488, !positions.empty());
- return getNestedFieldHelper(positions, 0);
+MutableValue MutableDocument::getNestedFieldHelper(const vector<Position>& positions,
+ size_t level) {
+ if (level == positions.size() - 1) {
+ return getField(positions[level]);
+ } else {
+ MutableDocument nested(getField(positions[level]));
+ return nested.getNestedFieldHelper(positions, level + 1);
}
+}
- static Value getNestedFieldHelper(const Document& doc,
- const FieldPath& fieldNames,
- vector<Position>* positions,
- size_t level) {
+MutableValue MutableDocument::getNestedField(const vector<Position>& positions) {
+ fassert(16488, !positions.empty());
+ return getNestedFieldHelper(positions, 0);
+}
- const string& fieldName = fieldNames.getFieldName(level);
- const Position pos = doc.positionOf(fieldName);
+static Value getNestedFieldHelper(const Document& doc,
+ const FieldPath& fieldNames,
+ vector<Position>* positions,
+ size_t level) {
+ const string& fieldName = fieldNames.getFieldName(level);
+ const Position pos = doc.positionOf(fieldName);
- if (!pos.found())
- return Value();
+ if (!pos.found())
+ return Value();
- if (positions)
- positions->push_back(pos);
+ if (positions)
+ positions->push_back(pos);
- if (level == fieldNames.getPathLength()-1)
- return doc.getField(pos);
+ if (level == fieldNames.getPathLength() - 1)
+ return doc.getField(pos);
- Value val = doc.getField(pos);
- if (val.getType() != Object)
- return Value();
+ Value val = doc.getField(pos);
+ if (val.getType() != Object)
+ return Value();
- return getNestedFieldHelper(val.getDocument(), fieldNames, positions, level+1);
- }
-
- const Value Document::getNestedField(const FieldPath& fieldNames,
- vector<Position>* positions) const {
- fassert(16489, fieldNames.getPathLength());
- return getNestedFieldHelper(*this, fieldNames, positions, 0);
- }
+ return getNestedFieldHelper(val.getDocument(), fieldNames, positions, level + 1);
+}
- size_t Document::getApproximateSize() const {
- if (!_storage)
- return 0; // we've allocated no memory
+const Value Document::getNestedField(const FieldPath& fieldNames,
+ vector<Position>* positions) const {
+ fassert(16489, fieldNames.getPathLength());
+ return getNestedFieldHelper(*this, fieldNames, positions, 0);
+}
- size_t size = sizeof(DocumentStorage);
- size += storage().allocatedBytes();
+size_t Document::getApproximateSize() const {
+ if (!_storage)
+ return 0; // we've allocated no memory
- for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
- size += it->val.getApproximateSize();
- size -= sizeof(Value); // already accounted for above
- }
+ size_t size = sizeof(DocumentStorage);
+ size += storage().allocatedBytes();
- return size;
+ for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
+ size += it->val.getApproximateSize();
+ size -= sizeof(Value); // already accounted for above
}
- void Document::hash_combine(size_t &seed) const {
- for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
- StringData name = it->nameSD();
- boost::hash_range(seed, name.rawData(), name.rawData() + name.size());
- it->val.hash_combine(seed);
- }
+ return size;
+}
+
+void Document::hash_combine(size_t& seed) const {
+ for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
+ StringData name = it->nameSD();
+ boost::hash_range(seed, name.rawData(), name.rawData() + name.size());
+ it->val.hash_combine(seed);
}
+}
- int Document::compare(const Document& rL, const Document& rR) {
- DocumentStorageIterator lIt = rL.storage().iterator();
- DocumentStorageIterator rIt = rR.storage().iterator();
+int Document::compare(const Document& rL, const Document& rR) {
+ DocumentStorageIterator lIt = rL.storage().iterator();
+ DocumentStorageIterator rIt = rR.storage().iterator();
- while (true) {
- if (lIt.atEnd()) {
- if (rIt.atEnd())
- return 0; // documents are the same length
+ while (true) {
+ if (lIt.atEnd()) {
+ if (rIt.atEnd())
+ return 0; // documents are the same length
- return -1; // left document is shorter
- }
+ return -1; // left document is shorter
+ }
- if (rIt.atEnd())
- return 1; // right document is shorter
+ if (rIt.atEnd())
+ return 1; // right document is shorter
- const ValueElement& rField = rIt.get();
- const ValueElement& lField = lIt.get();
+ const ValueElement& rField = rIt.get();
+ const ValueElement& lField = lIt.get();
- // For compatibility with BSONObj::woCompare() consider the canonical type of values
- // before considerting their names.
- const int rCType = canonicalizeBSONType(rField.val.getType());
- const int lCType = canonicalizeBSONType(lField.val.getType());
- if (lCType != rCType)
- return lCType < rCType ? -1 : 1;
+ // For compatibility with BSONObj::woCompare() consider the canonical type of values
+ // before considerting their names.
+ const int rCType = canonicalizeBSONType(rField.val.getType());
+ const int lCType = canonicalizeBSONType(lField.val.getType());
+ if (lCType != rCType)
+ return lCType < rCType ? -1 : 1;
- const int nameCmp = lField.nameSD().compare(rField.nameSD());
- if (nameCmp)
- return nameCmp; // field names are unequal
+ const int nameCmp = lField.nameSD().compare(rField.nameSD());
+ if (nameCmp)
+ return nameCmp; // field names are unequal
- const int valueCmp = Value::compare(lField.val, rField.val);
- if (valueCmp)
- return valueCmp; // fields are unequal
+ const int valueCmp = Value::compare(lField.val, rField.val);
+ if (valueCmp)
+ return valueCmp; // fields are unequal
- rIt.advance();
- lIt.advance();
- }
+ rIt.advance();
+ lIt.advance();
}
+}
- string Document::toString() const {
- if (empty())
- return "{}";
-
- StringBuilder out;
- const char* prefix = "{";
+string Document::toString() const {
+ if (empty())
+ return "{}";
- for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
- out << prefix << it->nameSD() << ": " << it->val.toString();
- prefix = ", ";
- }
- out << '}';
+ StringBuilder out;
+ const char* prefix = "{";
- return out.str();
+ for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
+ out << prefix << it->nameSD() << ": " << it->val.toString();
+ prefix = ", ";
}
+ out << '}';
- void Document::serializeForSorter(BufBuilder& buf) const {
- const int numElems = size();
- buf.appendNum(numElems);
+ return out.str();
+}
- for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
- buf.appendStr(it->nameSD(), /*NUL byte*/ true);
- it->val.serializeForSorter(buf);
- }
+void Document::serializeForSorter(BufBuilder& buf) const {
+ const int numElems = size();
+ buf.appendNum(numElems);
- if (hasTextScore()) {
- buf.appendNum(char(1));
- buf.appendNum(getTextScore());
- }
- else {
- buf.appendNum(char(0));
- }
+ for (DocumentStorageIterator it = storage().iterator(); !it.atEnd(); it.advance()) {
+ buf.appendStr(it->nameSD(), /*NUL byte*/ true);
+ it->val.serializeForSorter(buf);
}
- Document Document::deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&) {
- const int numElems = buf.read<int>();
- MutableDocument doc(numElems);
- for (int i = 0; i < numElems; i++) {
- StringData name = buf.readCStr();
- doc.addField(name, Value::deserializeForSorter(buf,
- Value::SorterDeserializeSettings()));
- }
-
- if (buf.read<char>()) // hasTextScore
- doc.setTextScore(buf.read<double>());
+ if (hasTextScore()) {
+ buf.appendNum(char(1));
+ buf.appendNum(getTextScore());
+ } else {
+ buf.appendNum(char(0));
+ }
+}
- return doc.freeze();
+Document Document::deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&) {
+ const int numElems = buf.read<int>();
+ MutableDocument doc(numElems);
+ for (int i = 0; i < numElems; i++) {
+ StringData name = buf.readCStr();
+ doc.addField(name, Value::deserializeForSorter(buf, Value::SorterDeserializeSettings()));
}
+
+ if (buf.read<char>()) // hasTextScore
+ doc.setTextScore(buf.read<double>());
+
+ return doc.freeze();
+}
}
diff --git a/src/mongo/db/pipeline/document.h b/src/mongo/db/pipeline/document.h
index 491b9c050d3..5010f69b5fa 100644
--- a/src/mongo/db/pipeline/document.h
+++ b/src/mongo/db/pipeline/document.h
@@ -36,529 +36,580 @@
#include "mongo/bson/util/builder.h"
namespace mongo {
- class BSONObj;
- class FieldIterator;
- class FieldPath;
- class Value;
- class MutableDocument;
+class BSONObj;
+class FieldIterator;
+class FieldPath;
+class Value;
+class MutableDocument;
- /** An internal class that represents the position of a field in a document.
+/** An internal class that represents the position of a field in a document.
+ *
+ * This is a low-level class that you usually don't need to worry about.
+ *
+ * The main use of this class for clients is to allow refetching or
+ * setting a field without looking it up again. It has a default
+ * constructor that represents a field not being in a document. It also
+ * has a method 'bool found()' that tells you if a field was found.
+ *
+ * For more details see document_internal.h
+ */
+class Position;
+
+/** A Document is similar to a BSONObj but with a different in-memory representation.
+ *
+ * A Document can be treated as a const std::map<std::string, const Value> that is
+ * very cheap to copy and is Assignable. Therefore, it is acceptable to
+ * pass and return by Value. Note that the data in a Document is
+ * immutable, but you can replace a Document instance with assignment.
+ *
+ * See Also: Value class in Value.h
+ */
+class Document {
+public:
+ /// Empty Document (does no allocation)
+ Document() {}
+
+ /// Create a new Document deep-converted from the given BSONObj.
+ explicit Document(const BSONObj& bson);
+
+ void swap(Document& rhs) {
+ _storage.swap(rhs._storage);
+ }
+
+ /// Look up a field by key name. Returns Value() if no such field. O(1)
+ const Value operator[](StringData key) const {
+ return getField(key);
+ }
+ const Value getField(StringData key) const {
+ return storage().getField(key);
+ }
+
+ /// Look up a field by Position. See positionOf and getNestedField.
+ const Value operator[](Position pos) const {
+ return getField(pos);
+ }
+ const Value getField(Position pos) const {
+ return storage().getField(pos).val;
+ }
+
+ /** Similar to BSONObj::getFieldDotted, but using FieldPath rather than a dotted string.
+ * If you pass a non-NULL positions vector, you get back a path suitable
+ * to pass to MutableDocument::setNestedField.
+ *
+ * TODO a version that doesn't use FieldPath
+ */
+ const Value getNestedField(const FieldPath& fieldNames,
+ std::vector<Position>* positions = NULL) const;
+
+ /// Number of fields in this document. O(n)
+ size_t size() const {
+ return storage().size();
+ }
+
+ /// True if this document has no fields.
+ bool empty() const {
+ return !_storage || storage().iterator().atEnd();
+ }
+
+ /// Create a new FieldIterator that can be used to examine the Document's fields in order.
+ FieldIterator fieldIterator() const;
+
+ /// Convenience type for dealing with fields. Used by FieldIterator.
+ typedef std::pair<StringData, Value> FieldPair;
+
+ /** Get the approximate storage size of the document and sub-values in bytes.
+ * Note: Some memory may be shared with other Documents or between fields within
+ * a single Document so this can overestimate usage.
+ */
+ size_t getApproximateSize() const;
+
+ /** Compare two documents.
*
- * This is a low-level class that you usually don't need to worry about.
+ * BSON document field order is significant, so this just goes through
+ * the fields in order. The comparison is done in roughly the same way
+ * as strings are compared, but comparing one field at a time instead
+ * of one character at a time.
*
- * The main use of this class for clients is to allow refetching or
- * setting a field without looking it up again. It has a default
- * constructor that represents a field not being in a document. It also
- * has a method 'bool found()' that tells you if a field was found.
+ * Note: This does not consider metadata when comparing documents.
*
- * For more details see document_internal.h
+ * @returns an integer less than zero, zero, or an integer greater than
+ * zero, depending on whether lhs < rhs, lhs == rhs, or lhs > rhs
+ * Warning: may return values other than -1, 0, or 1
*/
- class Position;
+ static int compare(const Document& lhs, const Document& rhs);
- /** A Document is similar to a BSONObj but with a different in-memory representation.
- *
- * A Document can be treated as a const std::map<std::string, const Value> that is
- * very cheap to copy and is Assignable. Therefore, it is acceptable to
- * pass and return by Value. Note that the data in a Document is
- * immutable, but you can replace a Document instance with assignment.
+ std::string toString() const;
+
+ friend std::ostream& operator<<(std::ostream& out, const Document& doc) {
+ return out << doc.toString();
+ }
+
+ /** Calculate a hash value.
*
- * See Also: Value class in Value.h
+ * Meant to be used to create composite hashes suitable for
+ * hashed container classes such as unordered_map.
*/
- class Document {
- public:
+ void hash_combine(size_t& seed) const;
- /// Empty Document (does no allocation)
- Document() {}
-
- /// Create a new Document deep-converted from the given BSONObj.
- explicit Document(const BSONObj& bson);
-
- void swap(Document& rhs) { _storage.swap(rhs._storage); }
-
- /// Look up a field by key name. Returns Value() if no such field. O(1)
- const Value operator[] (StringData key) const { return getField(key); }
- const Value getField(StringData key) const { return storage().getField(key); }
-
- /// Look up a field by Position. See positionOf and getNestedField.
- const Value operator[] (Position pos) const { return getField(pos); }
- const Value getField(Position pos) const { return storage().getField(pos).val; }
-
- /** Similar to BSONObj::getFieldDotted, but using FieldPath rather than a dotted string.
- * If you pass a non-NULL positions vector, you get back a path suitable
- * to pass to MutableDocument::setNestedField.
- *
- * TODO a version that doesn't use FieldPath
- */
- const Value getNestedField(const FieldPath& fieldNames,
- std::vector<Position>* positions=NULL) const;
-
- /// Number of fields in this document. O(n)
- size_t size() const { return storage().size(); }
-
- /// True if this document has no fields.
- bool empty() const { return !_storage || storage().iterator().atEnd(); }
-
- /// Create a new FieldIterator that can be used to examine the Document's fields in order.
- FieldIterator fieldIterator() const;
-
- /// Convenience type for dealing with fields. Used by FieldIterator.
- typedef std::pair<StringData, Value> FieldPair;
-
- /** Get the approximate storage size of the document and sub-values in bytes.
- * Note: Some memory may be shared with other Documents or between fields within
- * a single Document so this can overestimate usage.
- */
- size_t getApproximateSize() const;
-
- /** Compare two documents.
- *
- * BSON document field order is significant, so this just goes through
- * the fields in order. The comparison is done in roughly the same way
- * as strings are compared, but comparing one field at a time instead
- * of one character at a time.
- *
- * Note: This does not consider metadata when comparing documents.
- *
- * @returns an integer less than zero, zero, or an integer greater than
- * zero, depending on whether lhs < rhs, lhs == rhs, or lhs > rhs
- * Warning: may return values other than -1, 0, or 1
- */
- static int compare(const Document& lhs, const Document& rhs);
-
- std::string toString() const;
-
- friend
- std::ostream& operator << (std::ostream& out, const Document& doc) { return out << doc.toString(); }
-
- /** Calculate a hash value.
- *
- * Meant to be used to create composite hashes suitable for
- * hashed container classes such as unordered_map.
- */
- void hash_combine(size_t &seed) const;
-
- /**
- * Add this document to the BSONObj under construction with the given BSONObjBuilder.
- * Does not include metadata.
- */
- void toBson(BSONObjBuilder *pBsonObjBuilder) const;
- BSONObj toBson() const;
-
- /**
- * Like toBson, but includes metadata at the top-level.
- * Output is parseable by fromBsonWithMetaData
- */
- BSONObj toBsonWithMetaData() const;
-
- /**
- * Like Document(BSONObj) but treats top-level fields with special names as metadata.
- * Special field names are available as static constants on this class with names starting
- * with metaField.
- */
- static Document fromBsonWithMetaData(const BSONObj& bson);
-
- // Support BSONObjBuilder and BSONArrayBuilder "stream" API
- friend BSONObjBuilder& operator << (BSONObjBuilderValueStream& builder, const Document& d);
-
- /** Return the abstract Position of a field, suitable to pass to operator[] or getField().
- * This can potentially save time if you need to refer to a field multiple times.
- */
- Position positionOf(StringData fieldName) const { return storage().findField(fieldName); }
-
- /** Clone a document.
- *
- * This should only be called by MutableDocument and tests
- *
- * The new document shares all the fields' values with the original.
- * This is not a deep copy. Only the fields on the top-level document
- * are cloned.
- */
- Document clone() const { return Document(storage().clone().get()); }
-
- static const StringData metaFieldTextScore; // "$textScore"
- bool hasTextScore() const { return storage().hasTextScore(); }
- double getTextScore() const { return storage().getTextScore(); }
-
- /// members for Sorter
- struct SorterDeserializeSettings {}; // unused
- void serializeForSorter(BufBuilder& buf) const;
- static Document deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&);
- int memUsageForSorter() const { return getApproximateSize(); }
- Document getOwned() const { return *this; }
-
- /// only for testing
- const void* getPtr() const { return _storage.get(); }
-
- private:
- friend class FieldIterator;
- friend class ValueStorage;
- friend class MutableDocument;
- friend class MutableValue;
-
- explicit Document(const DocumentStorage* ptr) : _storage(ptr) {};
-
- const DocumentStorage& storage() const {
- return (_storage ? *_storage : DocumentStorage::emptyDoc());
- }
- boost::intrusive_ptr<const DocumentStorage> _storage;
- };
+ /**
+ * Add this document to the BSONObj under construction with the given BSONObjBuilder.
+ * Does not include metadata.
+ */
+ void toBson(BSONObjBuilder* pBsonObjBuilder) const;
+ BSONObj toBson() const;
+
+ /**
+ * Like toBson, but includes metadata at the top-level.
+ * Output is parseable by fromBsonWithMetaData
+ */
+ BSONObj toBsonWithMetaData() const;
+
+ /**
+ * Like Document(BSONObj) but treats top-level fields with special names as metadata.
+ * Special field names are available as static constants on this class with names starting
+ * with metaField.
+ */
+ static Document fromBsonWithMetaData(const BSONObj& bson);
+
+ // Support BSONObjBuilder and BSONArrayBuilder "stream" API
+ friend BSONObjBuilder& operator<<(BSONObjBuilderValueStream& builder, const Document& d);
- inline bool operator== (const Document& l, const Document& r) {
- return Document::compare(l, r) == 0;
+ /** Return the abstract Position of a field, suitable to pass to operator[] or getField().
+ * This can potentially save time if you need to refer to a field multiple times.
+ */
+ Position positionOf(StringData fieldName) const {
+ return storage().findField(fieldName);
}
- inline bool operator!= (const Document& l, const Document& r) {
- return Document::compare(l, r) != 0;
+
+ /** Clone a document.
+ *
+ * This should only be called by MutableDocument and tests
+ *
+ * The new document shares all the fields' values with the original.
+ * This is not a deep copy. Only the fields on the top-level document
+ * are cloned.
+ */
+ Document clone() const {
+ return Document(storage().clone().get());
}
- inline bool operator< (const Document& l, const Document& r) {
- return Document::compare(l, r) < 0;
+
+ static const StringData metaFieldTextScore; // "$textScore"
+ bool hasTextScore() const {
+ return storage().hasTextScore();
}
- inline bool operator<= (const Document& l, const Document& r) {
- return Document::compare(l, r) <= 0;
+ double getTextScore() const {
+ return storage().getTextScore();
}
- inline bool operator> (const Document& l, const Document& r) {
- return Document::compare(l, r) > 0;
+
+ /// members for Sorter
+ struct SorterDeserializeSettings {}; // unused
+ void serializeForSorter(BufBuilder& buf) const;
+ static Document deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&);
+ int memUsageForSorter() const {
+ return getApproximateSize();
}
- inline bool operator>= (const Document& l, const Document& r) {
- return Document::compare(l, r) >= 0;
+ Document getOwned() const {
+ return *this;
}
+ /// only for testing
+ const void* getPtr() const {
+ return _storage.get();
+ }
- /** This class is returned by MutableDocument to allow you to modify its values.
- * You are not allowed to hold variables of this type (enforced by the type system).
- */
- class MutableValue {
- public:
- void operator= (const Value& v) { _val = v; }
-
- /** These are designed to allow things like mutDoc["a"]["b"]["c"] = Value(10);
- * It is safe to use even on nonexistent fields.
- */
- MutableValue operator[] (StringData key) { return getField(key); }
- MutableValue operator[] (Position pos) { return getField(pos); }
-
- MutableValue getField(StringData key);
- MutableValue getField(Position pos);
-
- private:
- friend class MutableDocument;
-
- /// can only be constructed or copied by self and friends
- MutableValue(const MutableValue& other): _val(other._val) {}
- explicit MutableValue(Value& val): _val(val) {}
-
- /// Used by MutableDocument(MutableValue)
- const RefCountable*& getDocPtr() {
- if (_val.getType() != Object || _val._storage.genericRCPtr == NULL) {
- // If the current value isn't an object we replace it with a Object-typed Value.
- // Note that we can't just use Document() here because that is a NULL pointer and
- // Value doesn't refcount NULL pointers. This led to a memory leak (SERVER-10554)
- // because MutableDocument::newStorage() would set a non-NULL pointer into the Value
- // without setting the refCounter bit. While allocating a DocumentStorage here could
- // result in an allocation where none is needed, in practice this is only called
- // when we are about to add a field to the sub-document so this just changes where
- // the allocation is done.
- _val = Value(Document(new DocumentStorage()));
- }
-
- return _val._storage.genericRCPtr;
- }
+private:
+ friend class FieldIterator;
+ friend class ValueStorage;
+ friend class MutableDocument;
+ friend class MutableValue;
- MutableValue& operator= (const MutableValue&); // not assignable with another MutableValue
+ explicit Document(const DocumentStorage* ptr) : _storage(ptr){};
- Value& _val;
- };
+ const DocumentStorage& storage() const {
+ return (_storage ? *_storage : DocumentStorage::emptyDoc());
+ }
+ boost::intrusive_ptr<const DocumentStorage> _storage;
+};
- /** MutableDocument is a Document builder that supports both adding and updating fields.
- *
- * This class fills a similar role to BSONObjBuilder, but allows you to
- * change existing fields and more easily write to sub-Documents.
- *
- * To preserve the immutability of Documents, MutableDocument will
- * shallow-clone its storage on write (COW) if it is shared with any other
- * Documents.
+inline bool operator==(const Document& l, const Document& r) {
+ return Document::compare(l, r) == 0;
+}
+inline bool operator!=(const Document& l, const Document& r) {
+ return Document::compare(l, r) != 0;
+}
+inline bool operator<(const Document& l, const Document& r) {
+ return Document::compare(l, r) < 0;
+}
+inline bool operator<=(const Document& l, const Document& r) {
+ return Document::compare(l, r) <= 0;
+}
+inline bool operator>(const Document& l, const Document& r) {
+ return Document::compare(l, r) > 0;
+}
+inline bool operator>=(const Document& l, const Document& r) {
+ return Document::compare(l, r) >= 0;
+}
+
+
+/** This class is returned by MutableDocument to allow you to modify its values.
+ * You are not allowed to hold variables of this type (enforced by the type system).
+ */
+class MutableValue {
+public:
+ void operator=(const Value& v) {
+ _val = v;
+ }
+
+ /** These are designed to allow things like mutDoc["a"]["b"]["c"] = Value(10);
+ * It is safe to use even on nonexistent fields.
*/
- class MutableDocument {
- MONGO_DISALLOW_COPYING(MutableDocument);
- public:
+ MutableValue operator[](StringData key) {
+ return getField(key);
+ }
+ MutableValue operator[](Position pos) {
+ return getField(pos);
+ }
- /** Create a new empty Document.
- *
- * @param expectedFields a hint at what the number of fields will be, if known.
- * this can be used to increase memory allocation efficiency. There is
- * no impact on correctness if this field over or under estimates.
- *
- * TODO: find some way to convey field-name sizes to make even more efficient
- */
- MutableDocument() :_storageHolder(NULL), _storage(_storageHolder) {}
- explicit MutableDocument(size_t expectedFields);
-
- /// No copy yet. Copy-on-write. See storage()
- explicit MutableDocument(const Document& d) : _storageHolder(NULL)
- , _storage(_storageHolder) {
- reset(d);
+ MutableValue getField(StringData key);
+ MutableValue getField(Position pos);
+
+private:
+ friend class MutableDocument;
+
+ /// can only be constructed or copied by self and friends
+ MutableValue(const MutableValue& other) : _val(other._val) {}
+ explicit MutableValue(Value& val) : _val(val) {}
+
+ /// Used by MutableDocument(MutableValue)
+ const RefCountable*& getDocPtr() {
+ if (_val.getType() != Object || _val._storage.genericRCPtr == NULL) {
+ // If the current value isn't an object we replace it with a Object-typed Value.
+ // Note that we can't just use Document() here because that is a NULL pointer and
+ // Value doesn't refcount NULL pointers. This led to a memory leak (SERVER-10554)
+ // because MutableDocument::newStorage() would set a non-NULL pointer into the Value
+ // without setting the refCounter bit. While allocating a DocumentStorage here could
+ // result in an allocation where none is needed, in practice this is only called
+ // when we are about to add a field to the sub-document so this just changes where
+ // the allocation is done.
+ _val = Value(Document(new DocumentStorage()));
}
- ~MutableDocument() {
- if (_storageHolder)
- intrusive_ptr_release(_storageHolder);
- }
+ return _val._storage.genericRCPtr;
+ }
- /** Replace the current base Document with the argument
- *
- * All Positions from the passed in Document are valid and refer to the
- * same field in this MutableDocument.
- */
- void reset(const Document& d=Document()) { reset(d._storage.get()); }
-
- /** Add the given field to the Document.
- *
- * BSON documents' fields are ordered; the new Field will be
- * appended to the current list of fields.
- *
- * Unlike getField/setField, addField does not look for a field with the
- * same name and therefore cannot be used to update fields.
- *
- * It is an error to add a field that has the same name as another field.
- *
- * TODO: This is currently allowed but getField only gets first field.
- * Decide what level of support is needed for duplicate fields.
- * If duplicates are not allowed, consider removing this method.
- */
- void addField(StringData fieldName, const Value& val) {
- storage().appendField(fieldName) = val;
- }
+ MutableValue& operator=(const MutableValue&); // not assignable with another MutableValue
- /** Update field by key. If there is no field with that key, add one.
- *
- * If the new value is missing(), the field is logically removed.
- */
- MutableValue operator[] (StringData key) { return getField(key); }
- void setField(StringData key, const Value& val) { getField(key) = val; }
- MutableValue getField(StringData key) {
- return MutableValue(storage().getField(key));
- }
+ Value& _val;
+};
- /// Update field by Position. Must already be a valid Position.
- MutableValue operator[] (Position pos) { return getField(pos); }
- void setField(Position pos, const Value& val) { getField(pos) = val; }
- MutableValue getField(Position pos) {
- return MutableValue(storage().getField(pos).val);
- }
+/** MutableDocument is a Document builder that supports both adding and updating fields.
+ *
+ * This class fills a similar role to BSONObjBuilder, but allows you to
+ * change existing fields and more easily write to sub-Documents.
+ *
+ * To preserve the immutability of Documents, MutableDocument will
+ * shallow-clone its storage on write (COW) if it is shared with any other
+ * Documents.
+ */
+class MutableDocument {
+ MONGO_DISALLOW_COPYING(MutableDocument);
- /// Logically remove a field. Note that memory usage does not decrease.
- void remove(StringData key) { getField(key) = Value(); }
-
- /** Gets/Sets a nested field given a path.
- *
- * All fields along path are created as empty Documents if they don't exist
- * or are any other type.
- */
- MutableValue getNestedField(const FieldPath& dottedField);
- void setNestedField(const FieldPath& dottedField, const Value& val) {
- getNestedField(dottedField) = val;
- }
+public:
+ /** Create a new empty Document.
+ *
+ * @param expectedFields a hint at what the number of fields will be, if known.
+ * this can be used to increase memory allocation efficiency. There is
+ * no impact on correctness if this field over or under estimates.
+ *
+ * TODO: find some way to convey field-name sizes to make even more efficient
+ */
+ MutableDocument() : _storageHolder(NULL), _storage(_storageHolder) {}
+ explicit MutableDocument(size_t expectedFields);
- /// Takes positions vector from Document::getNestedField. All fields in path must exist.
- MutableValue getNestedField(const std::vector<Position>& positions);
- void setNestedField(const std::vector<Position>& positions, const Value& val) {
- getNestedField(positions) = val;
- }
+ /// No copy yet. Copy-on-write. See storage()
+ explicit MutableDocument(const Document& d) : _storageHolder(NULL), _storage(_storageHolder) {
+ reset(d);
+ }
- /**
- * Copies all metadata from source if it has any.
- * Note: does not clear metadata from this.
- */
- void copyMetaDataFrom(const Document& source) {
- storage().copyMetaDataFrom(source.storage());
- }
+ ~MutableDocument() {
+ if (_storageHolder)
+ intrusive_ptr_release(_storageHolder);
+ }
- void setTextScore(double score) { storage().setTextScore(score); }
-
- /** Convert to a read-only document and release reference.
- *
- * Call this to indicate that you are done with this Document and will
- * not be making further changes from this MutableDocument.
- *
- * TODO: there are some optimizations that may make sense at freeze time.
- */
- Document freeze() {
- // This essentially moves _storage into a new Document by way of temp.
- Document ret;
- boost::intrusive_ptr<const DocumentStorage> temp (storagePtr(), /*inc_ref_count=*/false);
- temp.swap(ret._storage);
- _storage = NULL;
- return ret;
- }
+ /** Replace the current base Document with the argument
+ *
+ * All Positions from the passed in Document are valid and refer to the
+ * same field in this MutableDocument.
+ */
+ void reset(const Document& d = Document()) {
+ reset(d._storage.get());
+ }
- /// Used to simplify the common pattern of creating a value of the document.
- Value freezeToValue() {
- return Value(freeze());
- }
+ /** Add the given field to the Document.
+ *
+ * BSON documents' fields are ordered; the new Field will be
+ * appended to the current list of fields.
+ *
+ * Unlike getField/setField, addField does not look for a field with the
+ * same name and therefore cannot be used to update fields.
+ *
+ * It is an error to add a field that has the same name as another field.
+ *
+ * TODO: This is currently allowed but getField only gets first field.
+ * Decide what level of support is needed for duplicate fields.
+ * If duplicates are not allowed, consider removing this method.
+ */
+ void addField(StringData fieldName, const Value& val) {
+ storage().appendField(fieldName) = val;
+ }
- /** Borrow a readable reference to this Document.
- *
- * Note that unlike freeze(), this indicates intention to continue
- * modifying this document. The returned Document will not observe
- * future changes to this MutableDocument.
- */
- Document peek() {
- return Document(storagePtr());
- }
+ /** Update field by key. If there is no field with that key, add one.
+ *
+ * If the new value is missing(), the field is logically removed.
+ */
+ MutableValue operator[](StringData key) {
+ return getField(key);
+ }
+ void setField(StringData key, const Value& val) {
+ getField(key) = val;
+ }
+ MutableValue getField(StringData key) {
+ return MutableValue(storage().getField(key));
+ }
- private:
- friend class MutableValue; // for access to next constructor
- explicit MutableDocument(MutableValue mv)
- : _storageHolder(NULL)
- , _storage(mv.getDocPtr())
- {}
-
- void reset(const DocumentStorage* ds) {
- if (_storage) intrusive_ptr_release(_storage);
- _storage = ds;
- if (_storage) intrusive_ptr_add_ref(_storage);
- }
+ /// Update field by Position. Must already be a valid Position.
+ MutableValue operator[](Position pos) {
+ return getField(pos);
+ }
+ void setField(Position pos, const Value& val) {
+ getField(pos) = val;
+ }
+ MutableValue getField(Position pos) {
+ return MutableValue(storage().getField(pos).val);
+ }
- // This is split into 3 functions to speed up the fast-path
- DocumentStorage& storage() {
- if (MONGO_unlikely( !_storage ))
- return newStorage();
+ /// Logically remove a field. Note that memory usage does not decrease.
+ void remove(StringData key) {
+ getField(key) = Value();
+ }
- if (MONGO_unlikely( _storage->isShared() ))
- return clonedStorage();
+ /** Gets/Sets a nested field given a path.
+ *
+ * All fields along path are created as empty Documents if they don't exist
+ * or are any other type.
+ */
+ MutableValue getNestedField(const FieldPath& dottedField);
+ void setNestedField(const FieldPath& dottedField, const Value& val) {
+ getNestedField(dottedField) = val;
+ }
- // This function exists to ensure this is safe
- return const_cast<DocumentStorage&>(*storagePtr());
- }
- DocumentStorage& newStorage() {
- reset(new DocumentStorage);
- return const_cast<DocumentStorage&>(*storagePtr());
- }
- DocumentStorage& clonedStorage() {
- reset(storagePtr()->clone().get());
- return const_cast<DocumentStorage&>(*storagePtr());
- }
+ /// Takes positions vector from Document::getNestedField. All fields in path must exist.
+ MutableValue getNestedField(const std::vector<Position>& positions);
+ void setNestedField(const std::vector<Position>& positions, const Value& val) {
+ getNestedField(positions) = val;
+ }
- // recursive helpers for same-named public methods
- MutableValue getNestedFieldHelper(const FieldPath& dottedField, size_t level);
- MutableValue getNestedFieldHelper(const std::vector<Position>& positions, size_t level);
+ /**
+ * Copies all metadata from source if it has any.
+ * Note: does not clear metadata from this.
+ */
+ void copyMetaDataFrom(const Document& source) {
+ storage().copyMetaDataFrom(source.storage());
+ }
- // this should only be called by storage methods and peek/freeze
- const DocumentStorage* storagePtr() const {
- dassert(!_storage || typeid(*_storage) == typeid(const DocumentStorage));
- return static_cast<const DocumentStorage*>(_storage);
- }
+ void setTextScore(double score) {
+ storage().setTextScore(score);
+ }
- // These are both const to prevent modifications bypassing storage() method.
- // They always point to NULL or an object with dynamic type DocumentStorage.
- const RefCountable* _storageHolder; // Only used in constructors and destructor
- const RefCountable*& _storage; // references either above member or genericRCPtr in a Value
- };
+ /** Convert to a read-only document and release reference.
+ *
+ * Call this to indicate that you are done with this Document and will
+ * not be making further changes from this MutableDocument.
+ *
+ * TODO: there are some optimizations that may make sense at freeze time.
+ */
+ Document freeze() {
+ // This essentially moves _storage into a new Document by way of temp.
+ Document ret;
+ boost::intrusive_ptr<const DocumentStorage> temp(storagePtr(), /*inc_ref_count=*/false);
+ temp.swap(ret._storage);
+ _storage = NULL;
+ return ret;
+ }
- /// This is the public iterator over a document
- class FieldIterator {
- public:
- explicit FieldIterator(const Document& doc)
- : _doc(doc)
- , _it(_doc.storage().iterator())
- {}
+ /// Used to simplify the common pattern of creating a value of the document.
+ Value freezeToValue() {
+ return Value(freeze());
+ }
- /// Ask if there are more fields to return.
- bool more() const { return !_it.atEnd(); }
+ /** Borrow a readable reference to this Document.
+ *
+ * Note that unlike freeze(), this indicates intention to continue
+ * modifying this document. The returned Document will not observe
+ * future changes to this MutableDocument.
+ */
+ Document peek() {
+ return Document(storagePtr());
+ }
- /// Get next item and advance iterator
- Document::FieldPair next() {
- verify(more());
+private:
+ friend class MutableValue; // for access to next constructor
+ explicit MutableDocument(MutableValue mv) : _storageHolder(NULL), _storage(mv.getDocPtr()) {}
- Document::FieldPair fp (_it->nameSD(), _it->val);
- _it.advance();
- return fp;
- }
+ void reset(const DocumentStorage* ds) {
+ if (_storage)
+ intrusive_ptr_release(_storage);
+ _storage = ds;
+ if (_storage)
+ intrusive_ptr_add_ref(_storage);
+ }
- private:
- // We'll hang on to the original document to ensure we keep its storage alive
- Document _doc;
- DocumentStorageIterator _it;
- };
+ // This is split into 3 functions to speed up the fast-path
+ DocumentStorage& storage() {
+ if (MONGO_unlikely(!_storage))
+ return newStorage();
- /// Macro to create Document literals. Syntax is the same as the BSON("name" << 123) macro.
-#define DOC(fields) ((DocumentStream() << fields).done())
+ if (MONGO_unlikely(_storage->isShared()))
+ return clonedStorage();
- /** Macro to create Array-typed Value literals.
- * Syntax is the same as the BSON_ARRAY(123 << "foo") macro.
- */
-#define DOC_ARRAY(fields) ((ValueArrayStream() << fields).done())
+ // This function exists to ensure this is safe
+ return const_cast<DocumentStorage&>(*storagePtr());
+ }
+ DocumentStorage& newStorage() {
+ reset(new DocumentStorage);
+ return const_cast<DocumentStorage&>(*storagePtr());
+ }
+ DocumentStorage& clonedStorage() {
+ reset(storagePtr()->clone().get());
+ return const_cast<DocumentStorage&>(*storagePtr());
+ }
+ // recursive helpers for same-named public methods
+ MutableValue getNestedFieldHelper(const FieldPath& dottedField, size_t level);
+ MutableValue getNestedFieldHelper(const std::vector<Position>& positions, size_t level);
- // These classes are only for the implementation of the DOC and DOC_ARRAY macros.
- // They should not be used for any other reason.
- class DocumentStream {
- // The stream alternates between DocumentStream taking a fieldname
- // and ValueStream taking a Value.
- class ValueStream {
- public:
- ValueStream(DocumentStream& builder) :builder(builder) {}
+ // this should only be called by storage methods and peek/freeze
+ const DocumentStorage* storagePtr() const {
+ dassert(!_storage || typeid(*_storage) == typeid(const DocumentStorage));
+ return static_cast<const DocumentStorage*>(_storage);
+ }
- DocumentStream& operator << (const Value& val) {
- builder._md[name] = val;
- return builder;
- }
+ // These are both const to prevent modifications bypassing storage() method.
+ // They always point to NULL or an object with dynamic type DocumentStorage.
+ const RefCountable* _storageHolder; // Only used in constructors and destructor
+ const RefCountable*& _storage; // references either above member or genericRCPtr in a Value
+};
- /// support anything directly supported by a value constructor
- template <typename T>
- DocumentStream& operator << (const T& val) {
- return *this << Value(val);
- }
+/// This is the public iterator over a document
+class FieldIterator {
+public:
+ explicit FieldIterator(const Document& doc) : _doc(doc), _it(_doc.storage().iterator()) {}
- StringData name;
- DocumentStream& builder;
- };
+ /// Ask if there are more fields to return.
+ bool more() const {
+ return !_it.atEnd();
+ }
- public:
- DocumentStream() :_stream(*this) {}
+ /// Get next item and advance iterator
+ Document::FieldPair next() {
+ verify(more());
- ValueStream& operator << (StringData name) {
- _stream.name = name;
- return _stream;
- }
+ Document::FieldPair fp(_it->nameSD(), _it->val);
+ _it.advance();
+ return fp;
+ }
- Document done() { return _md.freeze(); }
+private:
+ // We'll hang on to the original document to ensure we keep its storage alive
+ Document _doc;
+ DocumentStorageIterator _it;
+};
+
+/// Macro to create Document literals. Syntax is the same as the BSON("name" << 123) macro.
+#define DOC(fields) ((DocumentStream() << fields).done())
+
+/** Macro to create Array-typed Value literals.
+ * Syntax is the same as the BSON_ARRAY(123 << "foo") macro.
+ */
+#define DOC_ARRAY(fields) ((ValueArrayStream() << fields).done())
- private:
- ValueStream _stream;
- MutableDocument _md;
- };
- class ValueArrayStream {
+// These classes are only for the implementation of the DOC and DOC_ARRAY macros.
+// They should not be used for any other reason.
+class DocumentStream {
+ // The stream alternates between DocumentStream taking a fieldname
+ // and ValueStream taking a Value.
+ class ValueStream {
public:
- ValueArrayStream& operator << (const Value& val) {
- _array.push_back(val);
- return *this;
+ ValueStream(DocumentStream& builder) : builder(builder) {}
+
+ DocumentStream& operator<<(const Value& val) {
+ builder._md[name] = val;
+ return builder;
}
/// support anything directly supported by a value constructor
template <typename T>
- ValueArrayStream& operator << (const T& val) {
+ DocumentStream& operator<<(const T& val) {
return *this << Value(val);
}
- Value done() { return Value(std::move(_array)); }
-
- private:
- std::vector<Value> _array;
+ StringData name;
+ DocumentStream& builder;
};
- inline void swap(mongo::Document& lhs, mongo::Document& rhs) { lhs.swap(rhs); }
+public:
+ DocumentStream() : _stream(*this) {}
-/* ======================= INLINED IMPLEMENTATIONS ========================== */
+ ValueStream& operator<<(StringData name) {
+ _stream.name = name;
+ return _stream;
+ }
- inline FieldIterator Document::fieldIterator() const {
- return FieldIterator(*this);
+ Document done() {
+ return _md.freeze();
}
- inline MutableValue MutableValue::getField(Position pos) {
- return MutableDocument(*this).getField(pos);
+private:
+ ValueStream _stream;
+ MutableDocument _md;
+};
+
+class ValueArrayStream {
+public:
+ ValueArrayStream& operator<<(const Value& val) {
+ _array.push_back(val);
+ return *this;
}
- inline MutableValue MutableValue::getField(StringData key) {
- return MutableDocument(*this).getField(key);
+
+ /// support anything directly supported by a value constructor
+ template <typename T>
+ ValueArrayStream& operator<<(const T& val) {
+ return *this << Value(val);
+ }
+
+ Value done() {
+ return Value(std::move(_array));
}
+
+private:
+ std::vector<Value> _array;
+};
+
+inline void swap(mongo::Document& lhs, mongo::Document& rhs) {
+ lhs.swap(rhs);
+}
+
+/* ======================= INLINED IMPLEMENTATIONS ========================== */
+
+inline FieldIterator Document::fieldIterator() const {
+ return FieldIterator(*this);
+}
+
+inline MutableValue MutableValue::getField(Position pos) {
+ return MutableDocument(*this).getField(pos);
+}
+inline MutableValue MutableValue::getField(StringData key) {
+ return MutableDocument(*this).getField(key);
+}
}
diff --git a/src/mongo/db/pipeline/document_internal.h b/src/mongo/db/pipeline/document_internal.h
index 93188ffb6f3..fa5988611b5 100644
--- a/src/mongo/db/pipeline/document_internal.h
+++ b/src/mongo/db/pipeline/document_internal.h
@@ -36,300 +36,332 @@
#include "mongo/db/pipeline/value.h"
namespace mongo {
- /** Helper class to make the position in a document abstract
- * Warning: This is NOT guaranteed to be the ordered position.
- * eg. the first field may not be at Position(0)
- */
- class Position {
- public:
- // This represents "not found" similar to std::string::npos
- Position() :index(static_cast<unsigned>(-1)) {}
- bool found() const { return index != Position().index; }
-
- bool operator == (Position rhs) const { return this->index == rhs.index; }
- bool operator != (Position rhs) const { return !(*this == rhs); }
-
- // For debugging and ASSERT_EQUALS in tests.
- template <typename OStream>
- friend OStream& operator<<(OStream& stream, Position p) { return stream << p.index; }
-
- private:
- explicit Position(size_t i) :index(i) {}
- unsigned index;
- friend class DocumentStorage;
- friend class DocumentStorageIterator;
- };
+/** Helper class to make the position in a document abstract
+ * Warning: This is NOT guaranteed to be the ordered position.
+ * eg. the first field may not be at Position(0)
+ */
+class Position {
+public:
+ // This represents "not found" similar to std::string::npos
+ Position() : index(static_cast<unsigned>(-1)) {}
+ bool found() const {
+ return index != Position().index;
+ }
+
+ bool operator==(Position rhs) const {
+ return this->index == rhs.index;
+ }
+ bool operator!=(Position rhs) const {
+ return !(*this == rhs);
+ }
+
+ // For debugging and ASSERT_EQUALS in tests.
+ template <typename OStream>
+ friend OStream& operator<<(OStream& stream, Position p) {
+ return stream << p.index;
+ }
+
+private:
+ explicit Position(size_t i) : index(i) {}
+ unsigned index;
+ friend class DocumentStorage;
+ friend class DocumentStorageIterator;
+};
#pragma pack(1)
- /** This is how values are stored in the DocumentStorage buffer
- * Internal class. Consumers shouldn't care about this.
- */
- class ValueElement {
- MONGO_DISALLOW_COPYING(ValueElement);
- public:
- Value val;
- Position nextCollision; // Position of next field with same hashBucket
- const int nameLen; // doesn't include '\0'
- const char _name[1]; // pointer to start of name (use nameSD instead)
-
- ValueElement* next() {
- return align(plusBytes(sizeof(ValueElement) + nameLen));
- }
-
- const ValueElement* next() const {
- return align(plusBytes(sizeof(ValueElement) + nameLen));
- }
-
- StringData nameSD() const { return StringData(_name, nameLen); }
-
-
- // helpers for doing pointer arithmetic with this class
- // Note: These don't dereference 'this' so they are safe to use with NULL
- char* ptr() { return reinterpret_cast<char*>(this); }
- const char* ptr() const { return reinterpret_cast<const char*>(this); }
- const ValueElement* plusBytes(size_t bytes) const {
- return reinterpret_cast<const ValueElement*>(ptr() + bytes);
- }
- ValueElement* plusBytes(size_t bytes) {
- return reinterpret_cast<ValueElement*>(ptr() + bytes);
- }
-
- // Round number or pointer up to N-byte boundary. No change if already aligned.
- template <typename T>
- static T align(T size) {
- const intmax_t ALIGNMENT = 8; // must be power of 2 and <= 16 (malloc alignment)
- // Can't use c++ cast because of conversion between intmax_t and both ints and pointers
- return (T)(((intmax_t)(size) + (ALIGNMENT-1)) & ~(ALIGNMENT-1));
- }
-
- private:
- ValueElement(); // this class should never be constructed
- ~ValueElement(); // or destructed
- };
- // Real size is sizeof(ValueElement) + nameLen
+/** This is how values are stored in the DocumentStorage buffer
+ * Internal class. Consumers shouldn't care about this.
+ */
+class ValueElement {
+ MONGO_DISALLOW_COPYING(ValueElement);
+
+public:
+ Value val;
+ Position nextCollision; // Position of next field with same hashBucket
+ const int nameLen; // doesn't include '\0'
+ const char _name[1]; // pointer to start of name (use nameSD instead)
+
+ ValueElement* next() {
+ return align(plusBytes(sizeof(ValueElement) + nameLen));
+ }
+
+ const ValueElement* next() const {
+ return align(plusBytes(sizeof(ValueElement) + nameLen));
+ }
+
+ StringData nameSD() const {
+ return StringData(_name, nameLen);
+ }
+
+
+ // helpers for doing pointer arithmetic with this class
+ // Note: These don't dereference 'this' so they are safe to use with NULL
+ char* ptr() {
+ return reinterpret_cast<char*>(this);
+ }
+ const char* ptr() const {
+ return reinterpret_cast<const char*>(this);
+ }
+ const ValueElement* plusBytes(size_t bytes) const {
+ return reinterpret_cast<const ValueElement*>(ptr() + bytes);
+ }
+ ValueElement* plusBytes(size_t bytes) {
+ return reinterpret_cast<ValueElement*>(ptr() + bytes);
+ }
+
+ // Round number or pointer up to N-byte boundary. No change if already aligned.
+ template <typename T>
+ static T align(T size) {
+ const intmax_t ALIGNMENT = 8; // must be power of 2 and <= 16 (malloc alignment)
+ // Can't use c++ cast because of conversion between intmax_t and both ints and pointers
+ return (T)(((intmax_t)(size) + (ALIGNMENT - 1)) & ~(ALIGNMENT - 1));
+ }
+
+private:
+ ValueElement(); // this class should never be constructed
+ ~ValueElement(); // or destructed
+};
+// Real size is sizeof(ValueElement) + nameLen
#pragma pack()
- BOOST_STATIC_ASSERT(sizeof(ValueElement) == (sizeof(Value) +
- sizeof(Position) +
- sizeof(int) +
- 1));
-
- // This is an internal class for Document. See FieldIterator for the public version.
- class DocumentStorageIterator {
- public:
- // DocumentStorage::iterator() and iteratorAll() are easier to use
- DocumentStorageIterator(const ValueElement* first,
- const ValueElement* end,
- bool includeMissing)
- : _first(first)
- , _it(first)
- , _end(end)
- , _includeMissing(includeMissing) {
- if (!_includeMissing)
- skipMissing();
- }
-
- bool atEnd() const { return _it == _end; }
-
- const ValueElement& get() const { return *_it; }
-
- Position position() const { return Position(_it->ptr() - _first->ptr()); }
-
- void advance() {
+BOOST_STATIC_ASSERT(sizeof(ValueElement) == (sizeof(Value) + sizeof(Position) + sizeof(int) + 1));
+
+// This is an internal class for Document. See FieldIterator for the public version.
+class DocumentStorageIterator {
+public:
+ // DocumentStorage::iterator() and iteratorAll() are easier to use
+ DocumentStorageIterator(const ValueElement* first, const ValueElement* end, bool includeMissing)
+ : _first(first), _it(first), _end(end), _includeMissing(includeMissing) {
+ if (!_includeMissing)
+ skipMissing();
+ }
+
+ bool atEnd() const {
+ return _it == _end;
+ }
+
+ const ValueElement& get() const {
+ return *_it;
+ }
+
+ Position position() const {
+ return Position(_it->ptr() - _first->ptr());
+ }
+
+ void advance() {
+ advanceOne();
+ if (!_includeMissing)
+ skipMissing();
+ }
+
+ const ValueElement* operator->() {
+ return _it;
+ }
+ const ValueElement& operator*() {
+ return *_it;
+ }
+
+private:
+ void advanceOne() {
+ _it = _it->next();
+ }
+
+ void skipMissing() {
+ while (!atEnd() && _it->val.missing()) {
advanceOne();
- if (!_includeMissing)
- skipMissing();
- }
-
- const ValueElement* operator-> () { return _it; }
- const ValueElement& operator* () { return *_it; }
-
- private:
- void advanceOne() {
- _it = _it->next();
- }
-
- void skipMissing() {
- while (!atEnd() && _it->val.missing()) {
- advanceOne();
- }
- }
-
- const ValueElement* _first;
- const ValueElement* _it;
- const ValueElement* _end;
- bool _includeMissing;
- };
-
- /// Storage class used by both Document and MutableDocument
- class DocumentStorage : public RefCountable {
- public:
- // Note: default constructor should zero-init to support emptyDoc()
- DocumentStorage() : _buffer(NULL)
- , _bufferEnd(NULL)
- , _usedBytes(0)
- , _numFields(0)
- , _hashTabMask(0)
- , _hasTextScore(false)
- , _textScore(0)
- {}
- ~DocumentStorage();
-
- static const DocumentStorage& emptyDoc() {
- static const char emptyBytes[sizeof(DocumentStorage)] = {0};
- return *reinterpret_cast<const DocumentStorage*>(emptyBytes);
- }
-
- size_t size() const {
- // can't use _numFields because it includes removed Fields
- size_t count = 0;
- for (DocumentStorageIterator it = iterator(); !it.atEnd(); it.advance())
- count++;
- return count;
- }
-
- /// Returns the position of the next field to be inserted
- Position getNextPosition() const { return Position(_usedBytes); }
-
- /// Returns the position of the named field (may be missing) or Position()
- Position findField(StringData name) const;
-
- // Document uses these
- const ValueElement& getField(Position pos) const {
- verify(pos.found());
- return *(_firstElement->plusBytes(pos.index));
- }
- Value getField(StringData name) const {
- Position pos = findField(name);
- if (!pos.found())
- return Value();
- return getField(pos).val;
- }
-
- // MutableDocument uses these
- ValueElement& getField(Position pos) {
- verify(pos.found());
- return *(_firstElement->plusBytes(pos.index));
- }
- Value& getField(StringData name) {
- Position pos = findField(name);
- if (!pos.found())
- return appendField(name); // TODO: find a way to avoid hashing name twice
- return getField(pos).val;
- }
-
- /// Adds a new field with missing Value at the end of the document
- Value& appendField(StringData name);
-
- /** Preallocates space for fields. Use this to attempt to prevent buffer growth.
- * This is only valid to call before anything is added to the document.
- */
- void reserveFields(size_t expectedFields);
-
- /// This skips missing values
- DocumentStorageIterator iterator() const {
- return DocumentStorageIterator(_firstElement, end(), false);
}
+ }
+
+ const ValueElement* _first;
+ const ValueElement* _it;
+ const ValueElement* _end;
+ bool _includeMissing;
+};
+
+/// Storage class used by both Document and MutableDocument
+class DocumentStorage : public RefCountable {
+public:
+ // Note: default constructor should zero-init to support emptyDoc()
+ DocumentStorage()
+ : _buffer(NULL),
+ _bufferEnd(NULL),
+ _usedBytes(0),
+ _numFields(0),
+ _hashTabMask(0),
+ _hasTextScore(false),
+ _textScore(0) {}
+ ~DocumentStorage();
+
+ static const DocumentStorage& emptyDoc() {
+ static const char emptyBytes[sizeof(DocumentStorage)] = {0};
+ return *reinterpret_cast<const DocumentStorage*>(emptyBytes);
+ }
+
+ size_t size() const {
+ // can't use _numFields because it includes removed Fields
+ size_t count = 0;
+ for (DocumentStorageIterator it = iterator(); !it.atEnd(); it.advance())
+ count++;
+ return count;
+ }
+
+ /// Returns the position of the next field to be inserted
+ Position getNextPosition() const {
+ return Position(_usedBytes);
+ }
+
+ /// Returns the position of the named field (may be missing) or Position()
+ Position findField(StringData name) const;
+
+ // Document uses these
+ const ValueElement& getField(Position pos) const {
+ verify(pos.found());
+ return *(_firstElement->plusBytes(pos.index));
+ }
+ Value getField(StringData name) const {
+ Position pos = findField(name);
+ if (!pos.found())
+ return Value();
+ return getField(pos).val;
+ }
+
+ // MutableDocument uses these
+ ValueElement& getField(Position pos) {
+ verify(pos.found());
+ return *(_firstElement->plusBytes(pos.index));
+ }
+ Value& getField(StringData name) {
+ Position pos = findField(name);
+ if (!pos.found())
+ return appendField(name); // TODO: find a way to avoid hashing name twice
+ return getField(pos).val;
+ }
+
+ /// Adds a new field with missing Value at the end of the document
+ Value& appendField(StringData name);
+
+ /** Preallocates space for fields. Use this to attempt to prevent buffer growth.
+ * This is only valid to call before anything is added to the document.
+ */
+ void reserveFields(size_t expectedFields);
- /// This includes missing values
- DocumentStorageIterator iteratorAll() const {
- return DocumentStorageIterator(_firstElement, end(), true);
- }
+ /// This skips missing values
+ DocumentStorageIterator iterator() const {
+ return DocumentStorageIterator(_firstElement, end(), false);
+ }
- /// Shallow copy of this. Caller owns memory.
- boost::intrusive_ptr<DocumentStorage> clone() const;
+ /// This includes missing values
+ DocumentStorageIterator iteratorAll() const {
+ return DocumentStorageIterator(_firstElement, end(), true);
+ }
- size_t allocatedBytes() const {
- return !_buffer ? 0 : (_bufferEnd - _buffer + hashTabBytes());
- }
+ /// Shallow copy of this. Caller owns memory.
+ boost::intrusive_ptr<DocumentStorage> clone() const;
- /**
- * Copies all metadata from source if it has any.
- * Note: does not clear metadata from this.
- */
- void copyMetaDataFrom(const DocumentStorage& source) {
- if (source.hasTextScore()) {
- setTextScore(source.getTextScore());
- }
- }
+ size_t allocatedBytes() const {
+ return !_buffer ? 0 : (_bufferEnd - _buffer + hashTabBytes());
+ }
- bool hasTextScore() const { return _hasTextScore; }
- double getTextScore() const { return _textScore; }
- void setTextScore(double score) {
- _hasTextScore = true;
- _textScore = score;
+ /**
+ * Copies all metadata from source if it has any.
+ * Note: does not clear metadata from this.
+ */
+ void copyMetaDataFrom(const DocumentStorage& source) {
+ if (source.hasTextScore()) {
+ setTextScore(source.getTextScore());
}
+ }
+
+ bool hasTextScore() const {
+ return _hasTextScore;
+ }
+ double getTextScore() const {
+ return _textScore;
+ }
+ void setTextScore(double score) {
+ _hasTextScore = true;
+ _textScore = score;
+ }
+
+private:
+ /// Same as lastElement->next() or firstElement() if empty.
+ const ValueElement* end() const {
+ return _firstElement->plusBytes(_usedBytes);
+ }
+
+ /// Allocates space in _buffer. Copies existing data if there is any.
+ void alloc(unsigned newSize);
+
+ /// Call after adding field to _buffer and increasing _numFields
+ void addFieldToHashTable(Position pos);
+
+ // assumes _hashTabMask is (power of two) - 1
+ unsigned hashTabBuckets() const {
+ return _hashTabMask + 1;
+ }
+ unsigned hashTabBytes() const {
+ return hashTabBuckets() * sizeof(Position);
+ }
+
+ /// rehash on buffer growth if load-factor > .5 (attempt to keep lf < 1 when full)
+ bool needRehash() const {
+ return _numFields * 2 > hashTabBuckets();
+ }
+
+ /// Initialize empty hash table
+ void hashTabInit() {
+ memset(_hashTab, -1, hashTabBytes());
+ }
+
+ static unsigned hashKey(StringData name) {
+ // TODO consider FNV-1a once we have a better benchmark corpus
+ unsigned out;
+ MurmurHash3_x86_32(name.rawData(), name.size(), 0, &out);
+ return out;
+ }
+
+ unsigned bucketForKey(StringData name) const {
+ return hashKey(name) & _hashTabMask;
+ }
+
+ /// Adds all fields to the hash table
+ void rehash() {
+ hashTabInit();
+ for (DocumentStorageIterator it = iteratorAll(); !it.atEnd(); it.advance())
+ addFieldToHashTable(it.position());
+ }
+
+ enum {
+ HASH_TAB_INIT_SIZE = 8, // must be power of 2
+ HASH_TAB_MIN = 4, // don't hash fields for docs smaller than this
+ // set to 1 to always hash
+ };
- private:
-
- /// Same as lastElement->next() or firstElement() if empty.
- const ValueElement* end() const { return _firstElement->plusBytes(_usedBytes); }
-
- /// Allocates space in _buffer. Copies existing data if there is any.
- void alloc(unsigned newSize);
-
- /// Call after adding field to _buffer and increasing _numFields
- void addFieldToHashTable(Position pos);
-
- // assumes _hashTabMask is (power of two) - 1
- unsigned hashTabBuckets() const { return _hashTabMask + 1; }
- unsigned hashTabBytes() const { return hashTabBuckets() * sizeof(Position); }
-
- /// rehash on buffer growth if load-factor > .5 (attempt to keep lf < 1 when full)
- bool needRehash() const { return _numFields*2 > hashTabBuckets(); }
-
- /// Initialize empty hash table
- void hashTabInit() { memset(_hashTab, -1, hashTabBytes()); }
-
- static unsigned hashKey(StringData name) {
- // TODO consider FNV-1a once we have a better benchmark corpus
- unsigned out;
- MurmurHash3_x86_32(name.rawData(), name.size(), 0, &out);
- return out;
- }
+ // _buffer layout:
+ // -------------------------------------------------------------------------------
+ // | ValueElement1 Name1 | ValueElement2 Name2 | ... FREE SPACE ... | Hash Table |
+ // -------------------------------------------------------------------------------
+ // ^ _buffer and _firstElement point here ^
+ // _bufferEnd and _hashTab point here ^
+ //
+ //
+ // When the buffer grows, the hash table moves to the new end.
+ union {
+ char* _buffer;
+ ValueElement* _firstElement;
+ };
- unsigned bucketForKey(StringData name) const {
- return hashKey(name) & _hashTabMask;
- }
+ union {
+ // pointer to "end" of _buffer element space and start of hash table (same position)
+ char* _bufferEnd;
+ Position* _hashTab; // table lazily initialized once _numFields == HASH_TAB_MIN
+ };
- /// Adds all fields to the hash table
- void rehash() {
- hashTabInit();
- for (DocumentStorageIterator it = iteratorAll(); !it.atEnd(); it.advance())
- addFieldToHashTable(it.position());
- }
+ unsigned _usedBytes; // position where next field would start
+ unsigned _numFields; // this includes removed fields
+ unsigned _hashTabMask; // equal to hashTabBuckets()-1 but used more often
- enum {
- HASH_TAB_INIT_SIZE = 8, // must be power of 2
- HASH_TAB_MIN = 4, // don't hash fields for docs smaller than this
- // set to 1 to always hash
- };
-
- // _buffer layout:
- // -------------------------------------------------------------------------------
- // | ValueElement1 Name1 | ValueElement2 Name2 | ... FREE SPACE ... | Hash Table |
- // -------------------------------------------------------------------------------
- // ^ _buffer and _firstElement point here ^
- // _bufferEnd and _hashTab point here ^
- //
- //
- // When the buffer grows, the hash table moves to the new end.
- union {
- char* _buffer;
- ValueElement* _firstElement;
- };
-
- union {
- // pointer to "end" of _buffer element space and start of hash table (same position)
- char* _bufferEnd;
- Position* _hashTab; // table lazily initialized once _numFields == HASH_TAB_MIN
- };
-
- unsigned _usedBytes; // position where next field would start
- unsigned _numFields; // this includes removed fields
- unsigned _hashTabMask; // equal to hashTabBuckets()-1 but used more often
-
- bool _hasTextScore; // When adding more metadata fields, this should become a bitvector
- double _textScore;
- // When adding a field, make sure to update clone() method
- };
+ bool _hasTextScore; // When adding more metadata fields, this should become a bitvector
+ double _textScore;
+ // When adding a field, make sure to update clone() method
+};
}
diff --git a/src/mongo/db/pipeline/document_source.cpp b/src/mongo/db/pipeline/document_source.cpp
index baa3e486784..57d12a7c85c 100644
--- a/src/mongo/db/pipeline/document_source.cpp
+++ b/src/mongo/db/pipeline/document_source.cpp
@@ -34,42 +34,40 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::vector;
+using boost::intrusive_ptr;
+using std::vector;
- DocumentSource::DocumentSource(const intrusive_ptr<ExpressionContext> &pCtx)
- : pSource(NULL)
- , pExpCtx(pCtx)
- {}
+DocumentSource::DocumentSource(const intrusive_ptr<ExpressionContext>& pCtx)
+ : pSource(NULL), pExpCtx(pCtx) {}
- const char *DocumentSource::getSourceName() const {
- static const char unknown[] = "[UNKNOWN]";
- return unknown;
- }
+const char* DocumentSource::getSourceName() const {
+ static const char unknown[] = "[UNKNOWN]";
+ return unknown;
+}
- void DocumentSource::setSource(DocumentSource *pTheSource) {
- verify(!pSource);
- pSource = pTheSource;
- }
+void DocumentSource::setSource(DocumentSource* pTheSource) {
+ verify(!pSource);
+ pSource = pTheSource;
+}
- bool DocumentSource::coalesce(const intrusive_ptr<DocumentSource> &pNextSource) {
- return false;
- }
+bool DocumentSource::coalesce(const intrusive_ptr<DocumentSource>& pNextSource) {
+ return false;
+}
- intrusive_ptr<DocumentSource> DocumentSource::optimize() {
- return this;
- }
+intrusive_ptr<DocumentSource> DocumentSource::optimize() {
+ return this;
+}
- void DocumentSource::dispose() {
- if ( pSource ) {
- pSource->dispose();
- }
+void DocumentSource::dispose() {
+ if (pSource) {
+ pSource->dispose();
}
+}
- void DocumentSource::serializeToArray(vector<Value>& array, bool explain) const {
- Value entry = serialize(explain);
- if (!entry.missing()) {
- array.push_back(entry);
- }
+void DocumentSource::serializeToArray(vector<Value>& array, bool explain) const {
+ Value entry = serialize(explain);
+ if (!entry.missing()) {
+ array.push_back(entry);
}
}
+}
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
index 51f5ae7c3b3..4902f8b4a40 100644
--- a/src/mongo/db/pipeline/document_source.h
+++ b/src/mongo/db/pipeline/document_source.h
@@ -51,1062 +51,1082 @@
namespace mongo {
- class Accumulator;
- class Document;
- class Expression;
- class ExpressionFieldPath;
- class ExpressionObject;
- class DocumentSourceLimit;
- class PlanExecutor;
-
- class DocumentSource : public IntrusiveCounterUnsigned {
- public:
- virtual ~DocumentSource() {}
-
- /** Returns the next Document if there is one or boost::none if at EOF.
- * Subclasses must call pExpCtx->checkForInterupt().
- */
- virtual boost::optional<Document> getNext() = 0;
-
- /**
- * Inform the source that it is no longer needed and may release its resources. After
- * dispose() is called the source must still be able to handle iteration requests, but may
- * become eof().
- * NOTE: For proper mutex yielding, dispose() must be called on any DocumentSource that will
- * not be advanced until eof(), see SERVER-6123.
- */
- virtual void dispose();
-
- /**
- Get the source's name.
+class Accumulator;
+class Document;
+class Expression;
+class ExpressionFieldPath;
+class ExpressionObject;
+class DocumentSourceLimit;
+class PlanExecutor;
+
+class DocumentSource : public IntrusiveCounterUnsigned {
+public:
+ virtual ~DocumentSource() {}
+
+ /** Returns the next Document if there is one or boost::none if at EOF.
+ * Subclasses must call pExpCtx->checkForInterupt().
+ */
+ virtual boost::optional<Document> getNext() = 0;
- @returns the std::string name of the source as a constant string;
- this is static, and there's no need to worry about adopting it
- */
- virtual const char *getSourceName() const;
+ /**
+ * Inform the source that it is no longer needed and may release its resources. After
+ * dispose() is called the source must still be able to handle iteration requests, but may
+ * become eof().
+ * NOTE: For proper mutex yielding, dispose() must be called on any DocumentSource that will
+ * not be advanced until eof(), see SERVER-6123.
+ */
+ virtual void dispose();
- /**
- Set the underlying source this source should use to get Documents
- from.
+ /**
+ Get the source's name.
- It is an error to set the source more than once. This is to
- prevent changing sources once the original source has been started;
- this could break the state maintained by the DocumentSource.
+ @returns the std::string name of the source as a constant string;
+ this is static, and there's no need to worry about adopting it
+ */
+ virtual const char* getSourceName() const;
- This pointer is not reference counted because that has led to
- some circular references. As a result, this doesn't keep
- sources alive, and is only intended to be used temporarily for
- the lifetime of a Pipeline::run().
+ /**
+ Set the underlying source this source should use to get Documents
+ from.
- @param pSource the underlying source to use
- */
- virtual void setSource(DocumentSource *pSource);
+ It is an error to set the source more than once. This is to
+ prevent changing sources once the original source has been started;
+ this could break the state maintained by the DocumentSource.
- /**
- Attempt to coalesce this DocumentSource with its successor in the
- document processing pipeline. If successful, the successor
- DocumentSource should be removed from the pipeline and discarded.
+ This pointer is not reference counted because that has led to
+ some circular references. As a result, this doesn't keep
+ sources alive, and is only intended to be used temporarily for
+ the lifetime of a Pipeline::run().
- If successful, this operation can be applied repeatedly, in an
- attempt to coalesce several sources together.
+ @param pSource the underlying source to use
+ */
+ virtual void setSource(DocumentSource* pSource);
- The default implementation is to do nothing, and return false.
+ /**
+ Attempt to coalesce this DocumentSource with its successor in the
+ document processing pipeline. If successful, the successor
+ DocumentSource should be removed from the pipeline and discarded.
- @param pNextSource the next source in the document processing chain.
- @returns whether or not the attempt to coalesce was successful or not;
- if the attempt was not successful, nothing has been changed
- */
- virtual bool coalesce(const boost::intrusive_ptr<DocumentSource> &pNextSource);
+ If successful, this operation can be applied repeatedly, in an
+ attempt to coalesce several sources together.
- /**
- * Returns an optimized DocumentSource that is semantically equivalent to this one, or
- * nullptr if this stage is a no-op. Implementations are allowed to modify themselves
- * in-place and return a pointer to themselves. For best results, first coalesce compatible
- * sources using coalesce().
- *
- * This is intended for any operations that include expressions, and provides a hook for
- * those to optimize those operations.
- *
- * The default implementation is to do nothing and return yourself.
- */
- virtual boost::intrusive_ptr<DocumentSource> optimize();
+ The default implementation is to do nothing, and return false.
- enum GetDepsReturn {
- NOT_SUPPORTED = 0x0, // The full object and all metadata may be required
- SEE_NEXT = 0x1, // Later stages could need either fields or metadata
- EXHAUSTIVE_FIELDS = 0x2, // Later stages won't need more fields from input
- EXHAUSTIVE_META = 0x4, // Later stages won't need more metadata from input
- EXHAUSTIVE_ALL = EXHAUSTIVE_FIELDS | EXHAUSTIVE_META, // Later stages won't need either
- };
+ @param pNextSource the next source in the document processing chain.
+ @returns whether or not the attempt to coalesce was successful or not;
+ if the attempt was not successful, nothing has been changed
+ */
+ virtual bool coalesce(const boost::intrusive_ptr<DocumentSource>& pNextSource);
- /**
- * Get the dependencies this operation needs to do its job.
- */
- virtual GetDepsReturn getDependencies(DepsTracker* deps) const {
- return NOT_SUPPORTED;
- }
+ /**
+ * Returns an optimized DocumentSource that is semantically equivalent to this one, or
+ * nullptr if this stage is a no-op. Implementations are allowed to modify themselves
+ * in-place and return a pointer to themselves. For best results, first coalesce compatible
+ * sources using coalesce().
+ *
+ * This is intended for any operations that include expressions, and provides a hook for
+ * those to optimize those operations.
+ *
+ * The default implementation is to do nothing and return yourself.
+ */
+ virtual boost::intrusive_ptr<DocumentSource> optimize();
+
+ enum GetDepsReturn {
+ NOT_SUPPORTED = 0x0, // The full object and all metadata may be required
+ SEE_NEXT = 0x1, // Later stages could need either fields or metadata
+ EXHAUSTIVE_FIELDS = 0x2, // Later stages won't need more fields from input
+ EXHAUSTIVE_META = 0x4, // Later stages won't need more metadata from input
+ EXHAUSTIVE_ALL = EXHAUSTIVE_FIELDS | EXHAUSTIVE_META, // Later stages won't need either
+ };
- /**
- * In the default case, serializes the DocumentSource and adds it to the std::vector<Value>.
- *
- * A subclass may choose to overwrite this, rather than serialize,
- * if it should output multiple stages (eg, $sort sometimes also outputs a $limit).
- */
+ /**
+ * Get the dependencies this operation needs to do its job.
+ */
+ virtual GetDepsReturn getDependencies(DepsTracker* deps) const {
+ return NOT_SUPPORTED;
+ }
- virtual void serializeToArray(std::vector<Value>& array, bool explain = false) const;
+ /**
+ * In the default case, serializes the DocumentSource and adds it to the std::vector<Value>.
+ *
+ * A subclass may choose to overwrite this, rather than serialize,
+ * if it should output multiple stages (eg, $sort sometimes also outputs a $limit).
+ */
- /// Returns true if doesn't require an input source (most DocumentSources do).
- virtual bool isValidInitialSource() const { return false; }
+ virtual void serializeToArray(std::vector<Value>& array, bool explain = false) const;
- protected:
- /**
- Base constructor.
- */
- DocumentSource(const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ /// Returns true if doesn't require an input source (most DocumentSources do).
+ virtual bool isValidInitialSource() const {
+ return false;
+ }
- /*
- Most DocumentSources have an underlying source they get their data
- from. This is a convenience for them.
+protected:
+ /**
+ Base constructor.
+ */
+ DocumentSource(const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- The default implementation of setSource() sets this; if you don't
- need a source, override that to verify(). The default is to
- verify() if this has already been set.
- */
- DocumentSource *pSource;
+ /*
+ Most DocumentSources have an underlying source they get their data
+ from. This is a convenience for them.
- boost::intrusive_ptr<ExpressionContext> pExpCtx;
+ The default implementation of setSource() sets this; if you don't
+ need a source, override that to verify(). The default is to
+ verify() if this has already been set.
+ */
+ DocumentSource* pSource;
- private:
- /**
- * Create a Value that represents the document source.
- *
- * This is used by the default implementation of serializeToArray() to add this object
- * to a pipeline being serialized. Returning a missing() Value results in no entry
- * being added to the array for this stage (DocumentSource).
- */
- virtual Value serialize(bool explain = false) const = 0;
- };
+ boost::intrusive_ptr<ExpressionContext> pExpCtx;
- /** This class marks DocumentSources that should be split between the merger and the shards.
- * See Pipeline::Optimizations::Sharded::findSplitPoint() for details.
+private:
+ /**
+ * Create a Value that represents the document source.
+ *
+ * This is used by the default implementation of serializeToArray() to add this object
+ * to a pipeline being serialized. Returning a missing() Value results in no entry
+ * being added to the array for this stage (DocumentSource).
*/
- class SplittableDocumentSource {
- public:
- /** returns a source to be run on the shards.
- * if NULL, don't run on shards
- */
- virtual boost::intrusive_ptr<DocumentSource> getShardSource() = 0;
-
- /** returns a source that combines results from shards.
- * if NULL, don't run on merger
- */
- virtual boost::intrusive_ptr<DocumentSource> getMergeSource() = 0;
- protected:
- // It is invalid to delete through a SplittableDocumentSource-typed pointer.
- virtual ~SplittableDocumentSource() {}
- };
+ virtual Value serialize(bool explain = false) const = 0;
+};
-
- /** This class marks DocumentSources which need mongod-specific functionality.
- * It causes a MongodInterface to be injected when in a mongod and prevents mongos from
- * merging pipelines containing this stage.
+/** This class marks DocumentSources that should be split between the merger and the shards.
+ * See Pipeline::Optimizations::Sharded::findSplitPoint() for details.
+ */
+class SplittableDocumentSource {
+public:
+ /** returns a source to be run on the shards.
+ * if NULL, don't run on shards
*/
- class DocumentSourceNeedsMongod {
- public:
- // Wraps mongod-specific functions to allow linking into mongos.
- class MongodInterface {
- public:
- virtual ~MongodInterface() {};
-
- /**
- * Always returns a DBDirectClient.
- * Callers must not cache the returned pointer outside the scope of a single function.
- */
- virtual DBClientBase* directClient() = 0;
-
- // Note that in some rare cases this could return a false negative but will never return
- // a false positive. This method will be fixed in the future once it becomes possible to
- // avoid false negatives.
- virtual bool isSharded(const NamespaceString& ns) = 0;
-
- virtual bool isCapped(const NamespaceString& ns) = 0;
-
- /**
- * Inserts 'objs' into 'ns' and returns the "detailed" last error object.
- */
- virtual BSONObj insert(const NamespaceString& ns, const std::vector<BSONObj>& objs) = 0;
-
- // Add new methods as needed.
- };
-
- void injectMongodInterface(std::shared_ptr<MongodInterface> mongod) {
- _mongod = mongod;
- }
+ virtual boost::intrusive_ptr<DocumentSource> getShardSource() = 0;
- protected:
- // It is invalid to delete through a DocumentSourceNeedsMongod-typed pointer.
- virtual ~DocumentSourceNeedsMongod() {}
+ /** returns a source that combines results from shards.
+ * if NULL, don't run on merger
+ */
+ virtual boost::intrusive_ptr<DocumentSource> getMergeSource() = 0;
- // Gives subclasses access to a MongodInterface implementation
- std::shared_ptr<MongodInterface> _mongod;
- };
+protected:
+ // It is invalid to delete through a SplittableDocumentSource-typed pointer.
+ virtual ~SplittableDocumentSource() {}
+};
- class DocumentSourceBsonArray :
- public DocumentSource {
+/** This class marks DocumentSources which need mongod-specific functionality.
+ * It causes a MongodInterface to be injected when in a mongod and prevents mongos from
+ * merging pipelines containing this stage.
+ */
+class DocumentSourceNeedsMongod {
+public:
+ // Wraps mongod-specific functions to allow linking into mongos.
+ class MongodInterface {
public:
- // virtuals from DocumentSource
- virtual boost::optional<Document> getNext();
- virtual Value serialize(bool explain = false) const;
- virtual void setSource(DocumentSource *pSource);
- virtual bool isValidInitialSource() const { return true; }
+ virtual ~MongodInterface(){};
/**
- Create a document source based on a BSON array.
-
- This is usually put at the beginning of a chain of document sources
- in order to fetch data from the database.
-
- CAUTION: the BSON is not read until the source is used. Any
- elements that appear after these documents must not be read until
- this source is exhausted.
-
- @param array the BSON array to treat as a document source
- @param pExpCtx the expression context for the pipeline
- @returns the newly created document source
- */
- static boost::intrusive_ptr<DocumentSourceBsonArray> create(
- const BSONObj& array,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
-
- private:
- DocumentSourceBsonArray(
- const BSONObj& embeddedArray,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ * Always returns a DBDirectClient.
+ * Callers must not cache the returned pointer outside the scope of a single function.
+ */
+ virtual DBClientBase* directClient() = 0;
- BSONObj embeddedObject;
- BSONObjIterator arrayIterator;
- };
+ // Note that in some rare cases this could return a false negative but will never return
+ // a false positive. This method will be fixed in the future once it becomes possible to
+ // avoid false negatives.
+ virtual bool isSharded(const NamespaceString& ns) = 0;
+ virtual bool isCapped(const NamespaceString& ns) = 0;
- class DocumentSourceCommandShards :
- public DocumentSource {
- public:
- // virtuals from DocumentSource
- virtual boost::optional<Document> getNext();
- virtual Value serialize(bool explain = false) const;
- virtual void setSource(DocumentSource *pSource);
- virtual bool isValidInitialSource() const { return true; }
-
- /* convenient shorthand for a commonly used type */
- typedef std::vector<Strategy::CommandResult> ShardOutput;
-
- /** Returns the result arrays from shards using the 2.4 protocol.
- * Call this instead of getNext() if you want access to the raw streams.
- * This method should only be called at most once.
+ /**
+ * Inserts 'objs' into 'ns' and returns the "detailed" last error object.
*/
- std::vector<BSONArray> getArrays();
+ virtual BSONObj insert(const NamespaceString& ns, const std::vector<BSONObj>& objs) = 0;
- /**
- Create a DocumentSource that wraps the output of many shards
+ // Add new methods as needed.
+ };
- @param shardOutput output from the individual shards
- @param pExpCtx the expression context for the pipeline
- @returns the newly created DocumentSource
- */
- static boost::intrusive_ptr<DocumentSourceCommandShards> create(
- const ShardOutput& shardOutput,
- const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+ void injectMongodInterface(std::shared_ptr<MongodInterface> mongod) {
+ _mongod = mongod;
+ }
- private:
- DocumentSourceCommandShards(const ShardOutput& shardOutput,
- const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+protected:
+ // It is invalid to delete through a DocumentSourceNeedsMongod-typed pointer.
+ virtual ~DocumentSourceNeedsMongod() {}
- /**
- Advance to the next document, setting pCurrent appropriately.
+ // Gives subclasses access to a MongodInterface implementation
+ std::shared_ptr<MongodInterface> _mongod;
+};
- Adjusts pCurrent, pBsonSource, and iterator, as needed. On exit,
- pCurrent is the Document to return, or NULL. If NULL, this
- indicates there is nothing more to return.
- */
- void getNextDocument();
-
- bool unstarted;
- bool hasCurrent;
- bool newSource; // set to true for the first item of a new source
- boost::intrusive_ptr<DocumentSourceBsonArray> pBsonSource;
- Document pCurrent;
- ShardOutput::const_iterator iterator;
- ShardOutput::const_iterator listEnd;
- };
+class DocumentSourceBsonArray : public DocumentSource {
+public:
+ // virtuals from DocumentSource
+ virtual boost::optional<Document> getNext();
+ virtual Value serialize(bool explain = false) const;
+ virtual void setSource(DocumentSource* pSource);
+ virtual bool isValidInitialSource() const {
+ return true;
+ }
/**
- * Constructs and returns Documents from the BSONObj objects produced by a supplied
- * PlanExecutor.
- *
- * An object of this type may only be used by one thread, see SERVER-6123.
+ Create a document source based on a BSON array.
+
+ This is usually put at the beginning of a chain of document sources
+ in order to fetch data from the database.
+
+ CAUTION: the BSON is not read until the source is used. Any
+ elements that appear after these documents must not be read until
+ this source is exhausted.
+
+ @param array the BSON array to treat as a document source
+ @param pExpCtx the expression context for the pipeline
+ @returns the newly created document source
+ */
+ static boost::intrusive_ptr<DocumentSourceBsonArray> create(
+ const BSONObj& array, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+
+private:
+ DocumentSourceBsonArray(const BSONObj& embeddedArray,
+ const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+
+ BSONObj embeddedObject;
+ BSONObjIterator arrayIterator;
+};
+
+
+class DocumentSourceCommandShards : public DocumentSource {
+public:
+ // virtuals from DocumentSource
+ virtual boost::optional<Document> getNext();
+ virtual Value serialize(bool explain = false) const;
+ virtual void setSource(DocumentSource* pSource);
+ virtual bool isValidInitialSource() const {
+ return true;
+ }
+
+ /* convenient shorthand for a commonly used type */
+ typedef std::vector<Strategy::CommandResult> ShardOutput;
+
+ /** Returns the result arrays from shards using the 2.4 protocol.
+ * Call this instead of getNext() if you want access to the raw streams.
+ * This method should only be called at most once.
*/
- class DocumentSourceCursor :
- public DocumentSource {
- public:
- // virtuals from DocumentSource
- virtual ~DocumentSourceCursor();
- virtual boost::optional<Document> getNext();
- virtual const char *getSourceName() const;
- virtual Value serialize(bool explain = false) const;
- virtual void setSource(DocumentSource *pSource);
- virtual bool coalesce(const boost::intrusive_ptr<DocumentSource>& nextSource);
- virtual bool isValidInitialSource() const { return true; }
- virtual void dispose();
+ std::vector<BSONArray> getArrays();
- /**
- * Create a document source based on a passed-in PlanExecutor.
- *
- * This is usually put at the beginning of a chain of document sources
- * in order to fetch data from the database.
- */
- static boost::intrusive_ptr<DocumentSourceCursor> create(
- const std::string& ns,
- const std::shared_ptr<PlanExecutor>& exec,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ /**
+ Create a DocumentSource that wraps the output of many shards
- /*
- Record the query that was specified for the cursor this wraps, if
- any.
+ @param shardOutput output from the individual shards
+ @param pExpCtx the expression context for the pipeline
+ @returns the newly created DocumentSource
+ */
+ static boost::intrusive_ptr<DocumentSourceCommandShards> create(
+ const ShardOutput& shardOutput, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- This should be captured after any optimizations are applied to
- the pipeline so that it reflects what is really used.
+private:
+ DocumentSourceCommandShards(const ShardOutput& shardOutput,
+ const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- This gets used for explain output.
+ /**
+ Advance to the next document, setting pCurrent appropriately.
- @param pBsonObj the query to record
- */
- void setQuery(const BSONObj& query) { _query = query; }
+ Adjusts pCurrent, pBsonSource, and iterator, as needed. On exit,
+ pCurrent is the Document to return, or NULL. If NULL, this
+ indicates there is nothing more to return.
+ */
+ void getNextDocument();
- /*
- Record the sort that was specified for the cursor this wraps, if
- any.
+ bool unstarted;
+ bool hasCurrent;
+ bool newSource; // set to true for the first item of a new source
+ boost::intrusive_ptr<DocumentSourceBsonArray> pBsonSource;
+ Document pCurrent;
+ ShardOutput::const_iterator iterator;
+ ShardOutput::const_iterator listEnd;
+};
- This should be captured after any optimizations are applied to
- the pipeline so that it reflects what is really used.
- This gets used for explain output.
+/**
+ * Constructs and returns Documents from the BSONObj objects produced by a supplied
+ * PlanExecutor.
+ *
+ * An object of this type may only be used by one thread, see SERVER-6123.
+ */
+class DocumentSourceCursor : public DocumentSource {
+public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceCursor();
+ virtual boost::optional<Document> getNext();
+ virtual const char* getSourceName() const;
+ virtual Value serialize(bool explain = false) const;
+ virtual void setSource(DocumentSource* pSource);
+ virtual bool coalesce(const boost::intrusive_ptr<DocumentSource>& nextSource);
+ virtual bool isValidInitialSource() const {
+ return true;
+ }
+ virtual void dispose();
- @param pBsonObj the sort to record
- */
- void setSort(const BSONObj& sort) { _sort = sort; }
+ /**
+ * Create a document source based on a passed-in PlanExecutor.
+ *
+ * This is usually put at the beginning of a chain of document sources
+ * in order to fetch data from the database.
+ */
+ static boost::intrusive_ptr<DocumentSourceCursor> create(
+ const std::string& ns,
+ const std::shared_ptr<PlanExecutor>& exec,
+ const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- /**
- * Informs this object of projection and dependency information.
- *
- * @param projection A projection specification describing the fields needed by the rest of
- * the pipeline.
- * @param deps The output of DepsTracker::toParsedDeps
- */
- void setProjection(const BSONObj& projection, const boost::optional<ParsedDeps>& deps);
+ /*
+ Record the query that was specified for the cursor this wraps, if
+ any.
- /// returns -1 for no limit
- long long getLimit() const;
+ This should be captured after any optimizations are applied to
+ the pipeline so that it reflects what is really used.
- private:
- DocumentSourceCursor(
- const std::string& ns,
- const std::shared_ptr<PlanExecutor>& exec,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ This gets used for explain output.
- void loadBatch();
+ @param pBsonObj the query to record
+ */
+ void setQuery(const BSONObj& query) {
+ _query = query;
+ }
- std::deque<Document> _currentBatch;
+ /*
+ Record the sort that was specified for the cursor this wraps, if
+ any.
- // BSONObj members must outlive _projection and cursor.
- BSONObj _query;
- BSONObj _sort;
- BSONObj _projection;
- boost::optional<ParsedDeps> _dependencies;
- boost::intrusive_ptr<DocumentSourceLimit> _limit;
- long long _docsAddedToBatches; // for _limit enforcement
+ This should be captured after any optimizations are applied to
+ the pipeline so that it reflects what is really used.
- const std::string _ns;
- std::shared_ptr<PlanExecutor> _exec; // PipelineProxyStage holds a weak_ptr to this.
- };
+ This gets used for explain output.
+ @param pBsonObj the sort to record
+ */
+ void setSort(const BSONObj& sort) {
+ _sort = sort;
+ }
- class DocumentSourceGroup : public DocumentSource
- , public SplittableDocumentSource {
- public:
- // virtuals from DocumentSource
- virtual boost::optional<Document> getNext();
- virtual const char *getSourceName() const;
- virtual boost::intrusive_ptr<DocumentSource> optimize();
- virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
- virtual void dispose();
- virtual Value serialize(bool explain = false) const;
+ /**
+ * Informs this object of projection and dependency information.
+ *
+ * @param projection A projection specification describing the fields needed by the rest of
+ * the pipeline.
+ * @param deps The output of DepsTracker::toParsedDeps
+ */
+ void setProjection(const BSONObj& projection, const boost::optional<ParsedDeps>& deps);
- static boost::intrusive_ptr<DocumentSourceGroup> create(
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ /// returns -1 for no limit
+ long long getLimit() const;
- /**
- Add an accumulator.
+private:
+ DocumentSourceCursor(const std::string& ns,
+ const std::shared_ptr<PlanExecutor>& exec,
+ const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- Accumulators become fields in the Documents that result from
- grouping. Each unique group document must have it's own
- accumulator; the accumulator factory is used to create that.
+ void loadBatch();
- @param fieldName the name the accumulator result will have in the
- result documents
- @param pAccumulatorFactory used to create the accumulator for the
- group field
- */
- void addAccumulator(const std::string& fieldName,
- boost::intrusive_ptr<Accumulator> (*pAccumulatorFactory)(),
- const boost::intrusive_ptr<Expression> &pExpression);
+ std::deque<Document> _currentBatch;
- /// Tell this source if it is doing a merge from shards. Defaults to false.
- void setDoingMerge(bool doingMerge) { _doingMerge = doingMerge; }
+ // BSONObj members must outlive _projection and cursor.
+ BSONObj _query;
+ BSONObj _sort;
+ BSONObj _projection;
+ boost::optional<ParsedDeps> _dependencies;
+ boost::intrusive_ptr<DocumentSourceLimit> _limit;
+ long long _docsAddedToBatches; // for _limit enforcement
- /**
- Create a grouping DocumentSource from BSON.
+ const std::string _ns;
+ std::shared_ptr<PlanExecutor> _exec; // PipelineProxyStage holds a weak_ptr to this.
+};
- This is a convenience method that uses the above, and operates on
- a BSONElement that has been deteremined to be an Object with an
- element named $group.
- @param pBsonElement the BSONELement that defines the group
- @param pExpCtx the expression context
- @returns the grouping DocumentSource
- */
- static boost::intrusive_ptr<DocumentSource> createFromBson(
- BSONElement elem,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+class DocumentSourceGroup : public DocumentSource, public SplittableDocumentSource {
+public:
+ // virtuals from DocumentSource
+ virtual boost::optional<Document> getNext();
+ virtual const char* getSourceName() const;
+ virtual boost::intrusive_ptr<DocumentSource> optimize();
+ virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
+ virtual void dispose();
+ virtual Value serialize(bool explain = false) const;
- // Virtuals for SplittableDocumentSource
- virtual boost::intrusive_ptr<DocumentSource> getShardSource();
- virtual boost::intrusive_ptr<DocumentSource> getMergeSource();
+ static boost::intrusive_ptr<DocumentSourceGroup> create(
+ const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- static const char groupName[];
-
- private:
- DocumentSourceGroup(const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
-
- /// Spill groups map to disk and returns an iterator to the file.
- std::shared_ptr<Sorter<Value, Value>::Iterator> spill();
-
- // Only used by spill. Would be function-local if that were legal in C++03.
- class SpillSTLComparator;
+ /**
+ Add an accumulator.
- /*
- Before returning anything, this source must fetch everything from
- the underlying source and group it. populate() is used to do that
- on the first call to any method on this source. The populated
- boolean indicates that this has been done.
- */
- void populate();
- bool populated;
+ Accumulators become fields in the Documents that result from
+ grouping. Each unique group document must have it's own
+ accumulator; the accumulator factory is used to create that.
- /**
- * Parses the raw id expression into _idExpressions and possibly _idFieldNames.
- */
- void parseIdExpression(BSONElement groupField, const VariablesParseState& vps);
+ @param fieldName the name the accumulator result will have in the
+ result documents
+ @param pAccumulatorFactory used to create the accumulator for the
+ group field
+ */
+ void addAccumulator(const std::string& fieldName,
+ boost::intrusive_ptr<Accumulator>(*pAccumulatorFactory)(),
+ const boost::intrusive_ptr<Expression>& pExpression);
- /**
- * Computes the internal representation of the group key.
- */
- Value computeId(Variables* vars);
+ /// Tell this source if it is doing a merge from shards. Defaults to false.
+ void setDoingMerge(bool doingMerge) {
+ _doingMerge = doingMerge;
+ }
- /**
- * Converts the internal representation of the group key to the _id shape specified by the
- * user.
- */
- Value expandId(const Value& val);
-
-
- typedef std::vector<boost::intrusive_ptr<Accumulator> > Accumulators;
- typedef boost::unordered_map<Value, Accumulators, Value::Hash> GroupsMap;
- GroupsMap groups;
-
- /*
- The field names for the result documents and the accumulator
- factories for the result documents. The Expressions are the
- common expressions used by each instance of each accumulator
- in order to find the right-hand side of what gets added to the
- accumulator. Note that each of those is the same for each group,
- so we can share them across all groups by adding them to the
- accumulators after we use the factories to make a new set of
- accumulators for each new group.
-
- These three vectors parallel each other.
- */
- std::vector<std::string> vFieldName;
- std::vector<boost::intrusive_ptr<Accumulator> (*)()> vpAccumulatorFactory;
- std::vector<boost::intrusive_ptr<Expression> > vpExpression;
-
-
- Document makeDocument(const Value& id, const Accumulators& accums, bool mergeableOutput);
-
- bool _doingMerge;
- bool _spilled;
- const bool _extSortAllowed;
- const int _maxMemoryUsageBytes;
- std::unique_ptr<Variables> _variables;
- std::vector<std::string> _idFieldNames; // used when id is a document
- std::vector<boost::intrusive_ptr<Expression> > _idExpressions;
-
- // only used when !_spilled
- GroupsMap::iterator groupsIterator;
-
- // only used when _spilled
- std::unique_ptr<Sorter<Value, Value>::Iterator> _sorterIterator;
- std::pair<Value, Value> _firstPartOfNextGroup;
- Value _currentId;
- Accumulators _currentAccumulators;
- };
+ /**
+ Create a grouping DocumentSource from BSON.
+ This is a convenience method that uses the above, and operates on
+ a BSONElement that has been deteremined to be an Object with an
+ element named $group.
- class DocumentSourceMatch : public DocumentSource {
- public:
- // virtuals from DocumentSource
- virtual boost::optional<Document> getNext();
- virtual const char *getSourceName() const;
- virtual bool coalesce(const boost::intrusive_ptr<DocumentSource>& nextSource);
- virtual Value serialize(bool explain = false) const;
- virtual boost::intrusive_ptr<DocumentSource> optimize();
- virtual void setSource(DocumentSource* Source);
+ @param pBsonElement the BSONELement that defines the group
+ @param pExpCtx the expression context
+ @returns the grouping DocumentSource
+ */
+ static boost::intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- /**
- Create a filter.
+ // Virtuals for SplittableDocumentSource
+ virtual boost::intrusive_ptr<DocumentSource> getShardSource();
+ virtual boost::intrusive_ptr<DocumentSource> getMergeSource();
- @param pBsonElement the raw BSON specification for the filter
- @returns the filter
- */
- static boost::intrusive_ptr<DocumentSource> createFromBson(
- BSONElement elem,
- const boost::intrusive_ptr<ExpressionContext> &pCtx);
-
- /// Returns the query in Matcher syntax.
- BSONObj getQuery() const;
-
- static const char matchName[];
-
- /** Returns the portion of the match that can safely be promoted to before a $redact.
- * If this returns an empty BSONObj, no part of this match may safely be promoted.
- *
- * To be safe to promote, removing a field from a document to be matched must not cause
- * that document to be accepted when it would otherwise be rejected. As an example,
- * {name: {$ne: "bob smith"}} accepts documents without a name field, which means that
- * running this filter before a redact that would remove the name field would leak
- * information. On the other hand, {age: {$gt:5}} is ok because it doesn't accept documents
- * that have had their age field removed.
- */
- BSONObj redactSafePortion() const;
+ static const char groupName[];
- static bool isTextQuery(const BSONObj& query);
- bool isTextQuery() const { return _isTextQuery; }
+private:
+ DocumentSourceGroup(const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- private:
- DocumentSourceMatch(const BSONObj &query,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ /// Spill groups map to disk and returns an iterator to the file.
+ std::shared_ptr<Sorter<Value, Value>::Iterator> spill();
- std::unique_ptr<Matcher> matcher;
- bool _isTextQuery;
- };
+ // Only used by spill. Would be function-local if that were legal in C++03.
+ class SpillSTLComparator;
- class DocumentSourceMergeCursors :
- public DocumentSource {
- public:
- typedef std::vector<std::pair<ConnectionString, CursorId> > CursorIds;
+ /*
+ Before returning anything, this source must fetch everything from
+ the underlying source and group it. populate() is used to do that
+ on the first call to any method on this source. The populated
+ boolean indicates that this has been done.
+ */
+ void populate();
+ bool populated;
- // virtuals from DocumentSource
- boost::optional<Document> getNext();
- virtual void setSource(DocumentSource *pSource);
- virtual const char *getSourceName() const;
- virtual void dispose();
- virtual Value serialize(bool explain = false) const;
- virtual bool isValidInitialSource() const { return true; }
+ /**
+ * Parses the raw id expression into _idExpressions and possibly _idFieldNames.
+ */
+ void parseIdExpression(BSONElement groupField, const VariablesParseState& vps);
- static boost::intrusive_ptr<DocumentSource> createFromBson(
- BSONElement elem,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ /**
+ * Computes the internal representation of the group key.
+ */
+ Value computeId(Variables* vars);
- static boost::intrusive_ptr<DocumentSource> create(
- const CursorIds& cursorIds,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ /**
+ * Converts the internal representation of the group key to the _id shape specified by the
+ * user.
+ */
+ Value expandId(const Value& val);
+
+
+ typedef std::vector<boost::intrusive_ptr<Accumulator>> Accumulators;
+ typedef boost::unordered_map<Value, Accumulators, Value::Hash> GroupsMap;
+ GroupsMap groups;
+
+ /*
+ The field names for the result documents and the accumulator
+ factories for the result documents. The Expressions are the
+ common expressions used by each instance of each accumulator
+ in order to find the right-hand side of what gets added to the
+ accumulator. Note that each of those is the same for each group,
+ so we can share them across all groups by adding them to the
+ accumulators after we use the factories to make a new set of
+ accumulators for each new group.
+
+ These three vectors parallel each other.
+ */
+ std::vector<std::string> vFieldName;
+ std::vector<boost::intrusive_ptr<Accumulator>(*)()> vpAccumulatorFactory;
+ std::vector<boost::intrusive_ptr<Expression>> vpExpression;
+
+
+ Document makeDocument(const Value& id, const Accumulators& accums, bool mergeableOutput);
+
+ bool _doingMerge;
+ bool _spilled;
+ const bool _extSortAllowed;
+ const int _maxMemoryUsageBytes;
+ std::unique_ptr<Variables> _variables;
+ std::vector<std::string> _idFieldNames; // used when id is a document
+ std::vector<boost::intrusive_ptr<Expression>> _idExpressions;
+
+ // only used when !_spilled
+ GroupsMap::iterator groupsIterator;
+
+ // only used when _spilled
+ std::unique_ptr<Sorter<Value, Value>::Iterator> _sorterIterator;
+ std::pair<Value, Value> _firstPartOfNextGroup;
+ Value _currentId;
+ Accumulators _currentAccumulators;
+};
+
+
+class DocumentSourceMatch : public DocumentSource {
+public:
+ // virtuals from DocumentSource
+ virtual boost::optional<Document> getNext();
+ virtual const char* getSourceName() const;
+ virtual bool coalesce(const boost::intrusive_ptr<DocumentSource>& nextSource);
+ virtual Value serialize(bool explain = false) const;
+ virtual boost::intrusive_ptr<DocumentSource> optimize();
+ virtual void setSource(DocumentSource* Source);
- static const char name[];
+ /**
+ Create a filter.
- /** Returns non-owning pointers to cursors managed by this stage.
- * Call this instead of getNext() if you want access to the raw streams.
- * This method should only be called at most once.
- */
- std::vector<DBClientCursor*> getCursors();
+ @param pBsonElement the raw BSON specification for the filter
+ @returns the filter
+ */
+ static boost::intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pCtx);
- /**
- * Returns the next object from the cursor, throwing an appropriate exception if the cursor
- * reported an error. This is a better form of DBClientCursor::nextSafe.
- */
- static Document nextSafeFrom(DBClientCursor* cursor);
+ /// Returns the query in Matcher syntax.
+ BSONObj getQuery() const;
- private:
+ static const char matchName[];
- struct CursorAndConnection {
- CursorAndConnection(ConnectionString host, NamespaceString ns, CursorId id);
- ScopedDbConnection connection;
- DBClientCursor cursor;
- };
+ /** Returns the portion of the match that can safely be promoted to before a $redact.
+ * If this returns an empty BSONObj, no part of this match may safely be promoted.
+ *
+ * To be safe to promote, removing a field from a document to be matched must not cause
+ * that document to be accepted when it would otherwise be rejected. As an example,
+ * {name: {$ne: "bob smith"}} accepts documents without a name field, which means that
+ * running this filter before a redact that would remove the name field would leak
+ * information. On the other hand, {age: {$gt:5}} is ok because it doesn't accept documents
+ * that have had their age field removed.
+ */
+ BSONObj redactSafePortion() const;
+
+ static bool isTextQuery(const BSONObj& query);
+ bool isTextQuery() const {
+ return _isTextQuery;
+ }
+
+private:
+ DocumentSourceMatch(const BSONObj& query,
+ const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+
+ std::unique_ptr<Matcher> matcher;
+ bool _isTextQuery;
+};
+
+class DocumentSourceMergeCursors : public DocumentSource {
+public:
+ typedef std::vector<std::pair<ConnectionString, CursorId>> CursorIds;
+
+ // virtuals from DocumentSource
+ boost::optional<Document> getNext();
+ virtual void setSource(DocumentSource* pSource);
+ virtual const char* getSourceName() const;
+ virtual void dispose();
+ virtual Value serialize(bool explain = false) const;
+ virtual bool isValidInitialSource() const {
+ return true;
+ }
+
+ static boost::intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+
+ static boost::intrusive_ptr<DocumentSource> create(
+ const CursorIds& cursorIds, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+
+ static const char name[];
+
+ /** Returns non-owning pointers to cursors managed by this stage.
+ * Call this instead of getNext() if you want access to the raw streams.
+ * This method should only be called at most once.
+ */
+ std::vector<DBClientCursor*> getCursors();
- // using list to enable removing arbitrary elements
- typedef std::list<std::shared_ptr<CursorAndConnection> > Cursors;
+ /**
+ * Returns the next object from the cursor, throwing an appropriate exception if the cursor
+ * reported an error. This is a better form of DBClientCursor::nextSafe.
+ */
+ static Document nextSafeFrom(DBClientCursor* cursor);
- DocumentSourceMergeCursors(
- const CursorIds& cursorIds,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+private:
+ struct CursorAndConnection {
+ CursorAndConnection(ConnectionString host, NamespaceString ns, CursorId id);
+ ScopedDbConnection connection;
+ DBClientCursor cursor;
+ };
- // Converts _cursorIds into active _cursors.
- void start();
+ // using list to enable removing arbitrary elements
+ typedef std::list<std::shared_ptr<CursorAndConnection>> Cursors;
- // This is the description of cursors to merge.
- const CursorIds _cursorIds;
+ DocumentSourceMergeCursors(const CursorIds& cursorIds,
+ const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- // These are the actual cursors we are merging. Created lazily.
- Cursors _cursors;
- Cursors::iterator _currentCursor;
+ // Converts _cursorIds into active _cursors.
+ void start();
- bool _unstarted;
- };
+ // This is the description of cursors to merge.
+ const CursorIds _cursorIds;
- class DocumentSourceOut : public DocumentSource
- , public SplittableDocumentSource
- , public DocumentSourceNeedsMongod {
- public:
- // virtuals from DocumentSource
- virtual ~DocumentSourceOut();
- virtual boost::optional<Document> getNext();
- virtual const char *getSourceName() const;
- virtual Value serialize(bool explain = false) const;
- virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
+ // These are the actual cursors we are merging. Created lazily.
+ Cursors _cursors;
+ Cursors::iterator _currentCursor;
- // Virtuals for SplittableDocumentSource
- virtual boost::intrusive_ptr<DocumentSource> getShardSource() { return NULL; }
- virtual boost::intrusive_ptr<DocumentSource> getMergeSource() { return this; }
+ bool _unstarted;
+};
- const NamespaceString& getOutputNs() const { return _outputNs; }
+class DocumentSourceOut : public DocumentSource,
+ public SplittableDocumentSource,
+ public DocumentSourceNeedsMongod {
+public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceOut();
+ virtual boost::optional<Document> getNext();
+ virtual const char* getSourceName() const;
+ virtual Value serialize(bool explain = false) const;
+ virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
- /**
- Create a document source for output and pass-through.
+ // Virtuals for SplittableDocumentSource
+ virtual boost::intrusive_ptr<DocumentSource> getShardSource() {
+ return NULL;
+ }
+ virtual boost::intrusive_ptr<DocumentSource> getMergeSource() {
+ return this;
+ }
- This can be put anywhere in a pipeline and will store content as
- well as pass it on.
+ const NamespaceString& getOutputNs() const {
+ return _outputNs;
+ }
- @param pBsonElement the raw BSON specification for the source
- @param pExpCtx the expression context for the pipeline
- @returns the newly created document source
- */
- static boost::intrusive_ptr<DocumentSource> createFromBson(
- BSONElement elem,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ /**
+ Create a document source for output and pass-through.
- static const char outName[];
+ This can be put anywhere in a pipeline and will store content as
+ well as pass it on.
- private:
- DocumentSourceOut(const NamespaceString& outputNs,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ @param pBsonElement the raw BSON specification for the source
+ @param pExpCtx the expression context for the pipeline
+ @returns the newly created document source
+ */
+ static boost::intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- // Sets _tempsNs and prepares it to receive data.
- void prepTempCollection();
+ static const char outName[];
- void spill(const std::vector<BSONObj>& toInsert);
+private:
+ DocumentSourceOut(const NamespaceString& outputNs,
+ const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- bool _done;
+ // Sets _tempsNs and prepares it to receive data.
+ void prepTempCollection();
- NamespaceString _tempNs; // output goes here as it is being processed.
- const NamespaceString _outputNs; // output will go here after all data is processed.
- };
+ void spill(const std::vector<BSONObj>& toInsert);
+ bool _done;
- class DocumentSourceProject : public DocumentSource {
- public:
- // virtuals from DocumentSource
- virtual boost::optional<Document> getNext();
- virtual const char *getSourceName() const;
- virtual boost::intrusive_ptr<DocumentSource> optimize();
- virtual Value serialize(bool explain = false) const;
+ NamespaceString _tempNs; // output goes here as it is being processed.
+ const NamespaceString _outputNs; // output will go here after all data is processed.
+};
- virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
- /**
- Create a new projection DocumentSource from BSON.
+class DocumentSourceProject : public DocumentSource {
+public:
+ // virtuals from DocumentSource
+ virtual boost::optional<Document> getNext();
+ virtual const char* getSourceName() const;
+ virtual boost::intrusive_ptr<DocumentSource> optimize();
+ virtual Value serialize(bool explain = false) const;
- This is a convenience for directly handling BSON, and relies on the
- above methods.
+ virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
- @param pBsonElement the BSONElement with an object named $project
- @param pExpCtx the expression context for the pipeline
- @returns the created projection
- */
- static boost::intrusive_ptr<DocumentSource> createFromBson(
- BSONElement elem,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ /**
+ Create a new projection DocumentSource from BSON.
- static const char projectName[];
+ This is a convenience for directly handling BSON, and relies on the
+ above methods.
- /** projection as specified by the user */
- BSONObj getRaw() const { return _raw; }
+ @param pBsonElement the BSONElement with an object named $project
+ @param pExpCtx the expression context for the pipeline
+ @returns the created projection
+ */
+ static boost::intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- private:
- DocumentSourceProject(const boost::intrusive_ptr<ExpressionContext>& pExpCtx,
- const boost::intrusive_ptr<ExpressionObject>& exprObj);
+ static const char projectName[];
- // configuration state
- std::unique_ptr<Variables> _variables;
- boost::intrusive_ptr<ExpressionObject> pEO;
- BSONObj _raw;
- };
+ /** projection as specified by the user */
+ BSONObj getRaw() const {
+ return _raw;
+ }
- class DocumentSourceRedact :
- public DocumentSource {
- public:
- virtual boost::optional<Document> getNext();
- virtual const char* getSourceName() const;
- virtual boost::intrusive_ptr<DocumentSource> optimize();
+private:
+ DocumentSourceProject(const boost::intrusive_ptr<ExpressionContext>& pExpCtx,
+ const boost::intrusive_ptr<ExpressionObject>& exprObj);
- static const char redactName[];
+ // configuration state
+ std::unique_ptr<Variables> _variables;
+ boost::intrusive_ptr<ExpressionObject> pEO;
+ BSONObj _raw;
+};
- static boost::intrusive_ptr<DocumentSource> createFromBson(
- BSONElement elem,
- const boost::intrusive_ptr<ExpressionContext>& expCtx);
+class DocumentSourceRedact : public DocumentSource {
+public:
+ virtual boost::optional<Document> getNext();
+ virtual const char* getSourceName() const;
+ virtual boost::intrusive_ptr<DocumentSource> optimize();
- virtual Value serialize(bool explain = false) const;
+ static const char redactName[];
- private:
- DocumentSourceRedact(const boost::intrusive_ptr<ExpressionContext>& expCtx,
- const boost::intrusive_ptr<Expression>& previsit);
+ static boost::intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& expCtx);
- // These both work over _variables
- boost::optional<Document> redactObject(); // redacts CURRENT
- Value redactValue(const Value& in);
+ virtual Value serialize(bool explain = false) const;
- Variables::Id _currentId;
- std::unique_ptr<Variables> _variables;
- boost::intrusive_ptr<Expression> _expression;
- };
+private:
+ DocumentSourceRedact(const boost::intrusive_ptr<ExpressionContext>& expCtx,
+ const boost::intrusive_ptr<Expression>& previsit);
- class DocumentSourceSort : public DocumentSource
- , public SplittableDocumentSource {
- public:
- // virtuals from DocumentSource
- virtual boost::optional<Document> getNext();
- virtual const char *getSourceName() const;
- virtual void serializeToArray(std::vector<Value>& array, bool explain = false) const;
- virtual bool coalesce(const boost::intrusive_ptr<DocumentSource> &pNextSource);
- virtual void dispose();
+ // These both work over _variables
+ boost::optional<Document> redactObject(); // redacts CURRENT
+ Value redactValue(const Value& in);
- virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
+ Variables::Id _currentId;
+ std::unique_ptr<Variables> _variables;
+ boost::intrusive_ptr<Expression> _expression;
+};
- virtual boost::intrusive_ptr<DocumentSource> getShardSource();
- virtual boost::intrusive_ptr<DocumentSource> getMergeSource();
+class DocumentSourceSort : public DocumentSource, public SplittableDocumentSource {
+public:
+ // virtuals from DocumentSource
+ virtual boost::optional<Document> getNext();
+ virtual const char* getSourceName() const;
+ virtual void serializeToArray(std::vector<Value>& array, bool explain = false) const;
+ virtual bool coalesce(const boost::intrusive_ptr<DocumentSource>& pNextSource);
+ virtual void dispose();
- /**
- Add sort key field.
+ virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
- Adds a sort key field to the key being built up. A concatenated
- key is built up by calling this repeatedly.
+ virtual boost::intrusive_ptr<DocumentSource> getShardSource();
+ virtual boost::intrusive_ptr<DocumentSource> getMergeSource();
- @param fieldPath the field path to the key component
- @param ascending if true, use the key for an ascending sort,
- otherwise, use it for descending
- */
- void addKey(const std::string &fieldPath, bool ascending);
+ /**
+ Add sort key field.
- /// Write out a Document whose contents are the sort key.
- Document serializeSortKey(bool explain) const;
+ Adds a sort key field to the key being built up. A concatenated
+ key is built up by calling this repeatedly.
- /**
- Create a sorting DocumentSource from BSON.
+ @param fieldPath the field path to the key component
+ @param ascending if true, use the key for an ascending sort,
+ otherwise, use it for descending
+ */
+ void addKey(const std::string& fieldPath, bool ascending);
- This is a convenience method that uses the above, and operates on
- a BSONElement that has been deteremined to be an Object with an
- element named $group.
+ /// Write out a Document whose contents are the sort key.
+ Document serializeSortKey(bool explain) const;
- @param pBsonElement the BSONELement that defines the group
- @param pExpCtx the expression context for the pipeline
- @returns the grouping DocumentSource
- */
- static boost::intrusive_ptr<DocumentSource> createFromBson(
- BSONElement elem,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ /**
+ Create a sorting DocumentSource from BSON.
- /// Create a DocumentSourceSort with a given sort and (optional) limit
- static boost::intrusive_ptr<DocumentSourceSort> create(
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx,
- BSONObj sortOrder,
- long long limit=-1);
+ This is a convenience method that uses the above, and operates on
+ a BSONElement that has been deteremined to be an Object with an
+ element named $group.
- /// returns -1 for no limit
- long long getLimit() const;
+ @param pBsonElement the BSONELement that defines the group
+ @param pExpCtx the expression context for the pipeline
+ @returns the grouping DocumentSource
+ */
+ static boost::intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- boost::intrusive_ptr<DocumentSourceLimit> getLimitSrc() const { return limitSrc; }
+ /// Create a DocumentSourceSort with a given sort and (optional) limit
+ static boost::intrusive_ptr<DocumentSourceSort> create(
+ const boost::intrusive_ptr<ExpressionContext>& pExpCtx,
+ BSONObj sortOrder,
+ long long limit = -1);
- static const char sortName[];
+ /// returns -1 for no limit
+ long long getLimit() const;
- private:
- DocumentSourceSort(const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ boost::intrusive_ptr<DocumentSourceLimit> getLimitSrc() const {
+ return limitSrc;
+ }
- virtual Value serialize(bool explain = false) const {
- verify(false); // should call addToBsonArray instead
- }
+ static const char sortName[];
- /*
- Before returning anything, this source must fetch everything from
- the underlying source and group it. populate() is used to do that
- on the first call to any method on this source. The populated
- boolean indicates that this has been done.
- */
- void populate();
- bool populated;
-
- SortOptions makeSortOptions() const;
-
- // These are used to merge pre-sorted results from a DocumentSourceMergeCursors or a
- // DocumentSourceCommandShards depending on whether we have finished upgrading to 2.6 or
- // not.
- class IteratorFromCursor;
- class IteratorFromBsonArray;
- void populateFromCursors(const std::vector<DBClientCursor*>& cursors);
- void populateFromBsonArrays(const std::vector<BSONArray>& arrays);
-
- /* these two parallel each other */
- typedef std::vector<boost::intrusive_ptr<Expression> > SortKey;
- SortKey vSortKey;
- std::vector<char> vAscending; // used like std::vector<bool> but without specialization
-
- /// Extracts the fields in vSortKey from the Document;
- Value extractKey(const Document& d) const;
-
- /// Compare two Values according to the specified sort key.
- int compare(const Value& lhs, const Value& rhs) const;
-
- typedef Sorter<Value, Document> MySorter;
-
- // For MySorter
- class Comparator {
- public:
- explicit Comparator(const DocumentSourceSort& source): _source(source) {}
- int operator()(const MySorter::Data& lhs, const MySorter::Data& rhs) const {
- return _source.compare(lhs.first, rhs.first);
- }
- private:
- const DocumentSourceSort& _source;
- };
-
- boost::intrusive_ptr<DocumentSourceLimit> limitSrc;
-
- bool _done;
- bool _mergingPresorted;
- std::unique_ptr<MySorter::Iterator> _output;
- };
+private:
+ DocumentSourceSort(const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- class DocumentSourceLimit : public DocumentSource
- , public SplittableDocumentSource {
- public:
- // virtuals from DocumentSource
- virtual boost::optional<Document> getNext();
- virtual const char *getSourceName() const;
- virtual bool coalesce(const boost::intrusive_ptr<DocumentSource> &pNextSource);
- virtual Value serialize(bool explain = false) const;
-
- virtual GetDepsReturn getDependencies(DepsTracker* deps) const {
- return SEE_NEXT; // This doesn't affect needed fields
- }
+ virtual Value serialize(bool explain = false) const {
+ verify(false); // should call addToBsonArray instead
+ }
- /**
- Create a new limiting DocumentSource.
+ /*
+ Before returning anything, this source must fetch everything from
+ the underlying source and group it. populate() is used to do that
+ on the first call to any method on this source. The populated
+ boolean indicates that this has been done.
+ */
+ void populate();
+ bool populated;
- @param pExpCtx the expression context for the pipeline
- @returns the DocumentSource
- */
- static boost::intrusive_ptr<DocumentSourceLimit> create(
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx,
- long long limit);
+ SortOptions makeSortOptions() const;
- // Virtuals for SplittableDocumentSource
- // Need to run on rounter. Running on shard as well is an optimization.
- virtual boost::intrusive_ptr<DocumentSource> getShardSource() { return this; }
- virtual boost::intrusive_ptr<DocumentSource> getMergeSource() { return this; }
+ // These are used to merge pre-sorted results from a DocumentSourceMergeCursors or a
+ // DocumentSourceCommandShards depending on whether we have finished upgrading to 2.6 or
+ // not.
+ class IteratorFromCursor;
+ class IteratorFromBsonArray;
+ void populateFromCursors(const std::vector<DBClientCursor*>& cursors);
+ void populateFromBsonArrays(const std::vector<BSONArray>& arrays);
- long long getLimit() const { return limit; }
- void setLimit(long long newLimit) { limit = newLimit; }
+ /* these two parallel each other */
+ typedef std::vector<boost::intrusive_ptr<Expression>> SortKey;
+ SortKey vSortKey;
+ std::vector<char> vAscending; // used like std::vector<bool> but without specialization
- /**
- Create a limiting DocumentSource from BSON.
+ /// Extracts the fields in vSortKey from the Document;
+ Value extractKey(const Document& d) const;
- This is a convenience method that uses the above, and operates on
- a BSONElement that has been deteremined to be an Object with an
- element named $limit.
+ /// Compare two Values according to the specified sort key.
+ int compare(const Value& lhs, const Value& rhs) const;
- @param pBsonElement the BSONELement that defines the limit
- @param pExpCtx the expression context
- @returns the grouping DocumentSource
- */
- static boost::intrusive_ptr<DocumentSource> createFromBson(
- BSONElement elem,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ typedef Sorter<Value, Document> MySorter;
- static const char limitName[];
+ // For MySorter
+ class Comparator {
+ public:
+ explicit Comparator(const DocumentSourceSort& source) : _source(source) {}
+ int operator()(const MySorter::Data& lhs, const MySorter::Data& rhs) const {
+ return _source.compare(lhs.first, rhs.first);
+ }
private:
- DocumentSourceLimit(const boost::intrusive_ptr<ExpressionContext> &pExpCtx,
- long long limit);
-
- long long limit;
- long long count;
+ const DocumentSourceSort& _source;
};
- class DocumentSourceSkip : public DocumentSource
- , public SplittableDocumentSource {
- public:
- // virtuals from DocumentSource
- virtual boost::optional<Document> getNext();
- virtual const char *getSourceName() const;
- virtual bool coalesce(const boost::intrusive_ptr<DocumentSource> &pNextSource);
- virtual Value serialize(bool explain = false) const;
- virtual boost::intrusive_ptr<DocumentSource> optimize();
-
- virtual GetDepsReturn getDependencies(DepsTracker* deps) const {
- return SEE_NEXT; // This doesn't affect needed fields
- }
-
- /**
- Create a new skipping DocumentSource.
+ boost::intrusive_ptr<DocumentSourceLimit> limitSrc;
- @param pExpCtx the expression context
- @returns the DocumentSource
- */
- static boost::intrusive_ptr<DocumentSourceSkip> create(
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ bool _done;
+ bool _mergingPresorted;
+ std::unique_ptr<MySorter::Iterator> _output;
+};
- // Virtuals for SplittableDocumentSource
- // Need to run on rounter. Can't run on shards.
- virtual boost::intrusive_ptr<DocumentSource> getShardSource() { return NULL; }
- virtual boost::intrusive_ptr<DocumentSource> getMergeSource() { return this; }
+class DocumentSourceLimit : public DocumentSource, public SplittableDocumentSource {
+public:
+ // virtuals from DocumentSource
+ virtual boost::optional<Document> getNext();
+ virtual const char* getSourceName() const;
+ virtual bool coalesce(const boost::intrusive_ptr<DocumentSource>& pNextSource);
+ virtual Value serialize(bool explain = false) const;
- long long getSkip() const { return _skip; }
- void setSkip(long long newSkip) { _skip = newSkip; }
+ virtual GetDepsReturn getDependencies(DepsTracker* deps) const {
+ return SEE_NEXT; // This doesn't affect needed fields
+ }
- /**
- Create a skipping DocumentSource from BSON.
+ /**
+ Create a new limiting DocumentSource.
- This is a convenience method that uses the above, and operates on
- a BSONElement that has been deteremined to be an Object with an
- element named $skip.
+ @param pExpCtx the expression context for the pipeline
+ @returns the DocumentSource
+ */
+ static boost::intrusive_ptr<DocumentSourceLimit> create(
+ const boost::intrusive_ptr<ExpressionContext>& pExpCtx, long long limit);
+
+ // Virtuals for SplittableDocumentSource
+ // Need to run on rounter. Running on shard as well is an optimization.
+ virtual boost::intrusive_ptr<DocumentSource> getShardSource() {
+ return this;
+ }
+ virtual boost::intrusive_ptr<DocumentSource> getMergeSource() {
+ return this;
+ }
+
+ long long getLimit() const {
+ return limit;
+ }
+ void setLimit(long long newLimit) {
+ limit = newLimit;
+ }
- @param pBsonElement the BSONELement that defines the skip
- @param pExpCtx the expression context
- @returns the grouping DocumentSource
- */
- static boost::intrusive_ptr<DocumentSource> createFromBson(
- BSONElement elem,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ /**
+ Create a limiting DocumentSource from BSON.
- static const char skipName[];
+ This is a convenience method that uses the above, and operates on
+ a BSONElement that has been deteremined to be an Object with an
+ element named $limit.
- private:
- DocumentSourceSkip(const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ @param pBsonElement the BSONELement that defines the limit
+ @param pExpCtx the expression context
+ @returns the grouping DocumentSource
+ */
+ static boost::intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- long long _skip;
- bool _needToSkip;
- };
+ static const char limitName[];
+private:
+ DocumentSourceLimit(const boost::intrusive_ptr<ExpressionContext>& pExpCtx, long long limit);
- class DocumentSourceUnwind :
- public DocumentSource {
- public:
- // virtuals from DocumentSource
- virtual boost::optional<Document> getNext();
- virtual const char *getSourceName() const;
- virtual Value serialize(bool explain = false) const;
+ long long limit;
+ long long count;
+};
- virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
+class DocumentSourceSkip : public DocumentSource, public SplittableDocumentSource {
+public:
+ // virtuals from DocumentSource
+ virtual boost::optional<Document> getNext();
+ virtual const char* getSourceName() const;
+ virtual bool coalesce(const boost::intrusive_ptr<DocumentSource>& pNextSource);
+ virtual Value serialize(bool explain = false) const;
+ virtual boost::intrusive_ptr<DocumentSource> optimize();
- /**
- Create a new projection DocumentSource from BSON.
+ virtual GetDepsReturn getDependencies(DepsTracker* deps) const {
+ return SEE_NEXT; // This doesn't affect needed fields
+ }
- This is a convenience for directly handling BSON, and relies on the
- above methods.
+ /**
+ Create a new skipping DocumentSource.
- @param pBsonElement the BSONElement with an object named $project
- @param pExpCtx the expression context for the pipeline
- @returns the created projection
- */
- static boost::intrusive_ptr<DocumentSource> createFromBson(
- BSONElement elem,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ @param pExpCtx the expression context
+ @returns the DocumentSource
+ */
+ static boost::intrusive_ptr<DocumentSourceSkip> create(
+ const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+
+ // Virtuals for SplittableDocumentSource
+ // Need to run on rounter. Can't run on shards.
+ virtual boost::intrusive_ptr<DocumentSource> getShardSource() {
+ return NULL;
+ }
+ virtual boost::intrusive_ptr<DocumentSource> getMergeSource() {
+ return this;
+ }
+
+ long long getSkip() const {
+ return _skip;
+ }
+ void setSkip(long long newSkip) {
+ _skip = newSkip;
+ }
- static const char unwindName[];
+ /**
+ Create a skipping DocumentSource from BSON.
- private:
- DocumentSourceUnwind(const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ This is a convenience method that uses the above, and operates on
+ a BSONElement that has been deteremined to be an Object with an
+ element named $skip.
- /** Specify the field to unwind. */
- void unwindPath(const FieldPath &fieldPath);
+ @param pBsonElement the BSONELement that defines the skip
+ @param pExpCtx the expression context
+ @returns the grouping DocumentSource
+ */
+ static boost::intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- // Configuration state.
- std::unique_ptr<FieldPath> _unwindPath;
+ static const char skipName[];
- // Iteration state.
- class Unwinder;
- std::unique_ptr<Unwinder> _unwinder;
- };
+private:
+ DocumentSourceSkip(const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- class DocumentSourceGeoNear : public DocumentSource
- , public SplittableDocumentSource
- , public DocumentSourceNeedsMongod {
- public:
- // virtuals from DocumentSource
- virtual boost::optional<Document> getNext();
- virtual const char *getSourceName() const;
- virtual void setSource(DocumentSource *pSource);
- virtual bool coalesce(const boost::intrusive_ptr<DocumentSource> &pNextSource);
- virtual bool isValidInitialSource() const { return true; }
- virtual Value serialize(bool explain = false) const;
+ long long _skip;
+ bool _needToSkip;
+};
- // Virtuals for SplittableDocumentSource
- virtual boost::intrusive_ptr<DocumentSource> getShardSource();
- virtual boost::intrusive_ptr<DocumentSource> getMergeSource();
- static boost::intrusive_ptr<DocumentSource> createFromBson(
- BSONElement elem,
- const boost::intrusive_ptr<ExpressionContext> &pCtx);
+class DocumentSourceUnwind : public DocumentSource {
+public:
+ // virtuals from DocumentSource
+ virtual boost::optional<Document> getNext();
+ virtual const char* getSourceName() const;
+ virtual Value serialize(bool explain = false) const;
- static char geoNearName[];
+ virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
- long long getLimit() { return limit; }
+ /**
+ Create a new projection DocumentSource from BSON.
- // this should only be used for testing
- static boost::intrusive_ptr<DocumentSourceGeoNear> create(
- const boost::intrusive_ptr<ExpressionContext> &pCtx);
+ This is a convenience for directly handling BSON, and relies on the
+ above methods.
- private:
- DocumentSourceGeoNear(const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
-
- void parseOptions(BSONObj options);
- BSONObj buildGeoNearCmd() const;
- void runCommand();
-
- // These fields describe the command to run.
- // coords and distanceField are required, rest are optional
- BSONObj coords; // "near" option, but near is a reserved keyword on windows
- bool coordsIsArray;
- std::unique_ptr<FieldPath> distanceField; // Using unique_ptr because FieldPath can't be empty
- long long limit;
- double maxDistance;
- double minDistance;
- BSONObj query;
- bool spherical;
- double distanceMultiplier;
- std::unique_ptr<FieldPath> includeLocs;
-
- // these fields are used while processing the results
- BSONObj cmdOutput;
- std::unique_ptr<BSONObjIterator> resultsIterator; // iterator over cmdOutput["results"]
- };
+ @param pBsonElement the BSONElement with an object named $project
+ @param pExpCtx the expression context for the pipeline
+ @returns the created projection
+ */
+ static boost::intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+
+ static const char unwindName[];
+
+private:
+ DocumentSourceUnwind(const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+
+ /** Specify the field to unwind. */
+ void unwindPath(const FieldPath& fieldPath);
+
+ // Configuration state.
+ std::unique_ptr<FieldPath> _unwindPath;
+
+ // Iteration state.
+ class Unwinder;
+ std::unique_ptr<Unwinder> _unwinder;
+};
+
+class DocumentSourceGeoNear : public DocumentSource,
+ public SplittableDocumentSource,
+ public DocumentSourceNeedsMongod {
+public:
+ // virtuals from DocumentSource
+ virtual boost::optional<Document> getNext();
+ virtual const char* getSourceName() const;
+ virtual void setSource(DocumentSource* pSource);
+ virtual bool coalesce(const boost::intrusive_ptr<DocumentSource>& pNextSource);
+ virtual bool isValidInitialSource() const {
+ return true;
+ }
+ virtual Value serialize(bool explain = false) const;
+
+ // Virtuals for SplittableDocumentSource
+ virtual boost::intrusive_ptr<DocumentSource> getShardSource();
+ virtual boost::intrusive_ptr<DocumentSource> getMergeSource();
+
+ static boost::intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& pCtx);
+
+ static char geoNearName[];
+
+ long long getLimit() {
+ return limit;
+ }
+
+ // this should only be used for testing
+ static boost::intrusive_ptr<DocumentSourceGeoNear> create(
+ const boost::intrusive_ptr<ExpressionContext>& pCtx);
+
+private:
+ DocumentSourceGeoNear(const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
+
+ void parseOptions(BSONObj options);
+ BSONObj buildGeoNearCmd() const;
+ void runCommand();
+
+ // These fields describe the command to run.
+ // coords and distanceField are required, rest are optional
+ BSONObj coords; // "near" option, but near is a reserved keyword on windows
+ bool coordsIsArray;
+ std::unique_ptr<FieldPath> distanceField; // Using unique_ptr because FieldPath can't be empty
+ long long limit;
+ double maxDistance;
+ double minDistance;
+ BSONObj query;
+ bool spherical;
+ double distanceMultiplier;
+ std::unique_ptr<FieldPath> includeLocs;
+
+ // these fields are used while processing the results
+ BSONObj cmdOutput;
+ std::unique_ptr<BSONObjIterator> resultsIterator; // iterator over cmdOutput["results"]
+};
}
diff --git a/src/mongo/db/pipeline/document_source_bson_array.cpp b/src/mongo/db/pipeline/document_source_bson_array.cpp
index ca3e56e65d1..aa05258e954 100644
--- a/src/mongo/db/pipeline/document_source_bson_array.cpp
+++ b/src/mongo/db/pipeline/document_source_bson_array.cpp
@@ -34,41 +34,35 @@
namespace mongo {
- using boost::intrusive_ptr;
+using boost::intrusive_ptr;
- boost::optional<Document> DocumentSourceBsonArray::getNext() {
- pExpCtx->checkForInterrupt();
+boost::optional<Document> DocumentSourceBsonArray::getNext() {
+ pExpCtx->checkForInterrupt();
- if (!arrayIterator.more())
- return boost::none;
+ if (!arrayIterator.more())
+ return boost::none;
- return Document(arrayIterator.next().Obj());
- }
-
- void DocumentSourceBsonArray::setSource(DocumentSource *pSource) {
- /* this doesn't take a source */
- verify(false);
- }
+ return Document(arrayIterator.next().Obj());
+}
- DocumentSourceBsonArray::DocumentSourceBsonArray(
- const BSONObj& array,
- const intrusive_ptr<ExpressionContext> &pExpCtx)
- : DocumentSource(pExpCtx)
- , embeddedObject(array)
- , arrayIterator(embeddedObject)
- {}
+void DocumentSourceBsonArray::setSource(DocumentSource* pSource) {
+ /* this doesn't take a source */
+ verify(false);
+}
- intrusive_ptr<DocumentSourceBsonArray> DocumentSourceBsonArray::create(
- const BSONObj& array,
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
+DocumentSourceBsonArray::DocumentSourceBsonArray(const BSONObj& array,
+ const intrusive_ptr<ExpressionContext>& pExpCtx)
+ : DocumentSource(pExpCtx), embeddedObject(array), arrayIterator(embeddedObject) {}
- return new DocumentSourceBsonArray(array, pExpCtx);
- }
+intrusive_ptr<DocumentSourceBsonArray> DocumentSourceBsonArray::create(
+ const BSONObj& array, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ return new DocumentSourceBsonArray(array, pExpCtx);
+}
- Value DocumentSourceBsonArray::serialize(bool explain) const {
- if (explain) {
- return Value(DOC("bsonArray" << Document()));
- }
- return Value();
+Value DocumentSourceBsonArray::serialize(bool explain) const {
+ if (explain) {
+ return Value(DOC("bsonArray" << Document()));
}
+ return Value();
+}
}
diff --git a/src/mongo/db/pipeline/document_source_command_shards.cpp b/src/mongo/db/pipeline/document_source_command_shards.cpp
index 5e32f19881c..548c68d4f64 100644
--- a/src/mongo/db/pipeline/document_source_command_shards.cpp
+++ b/src/mongo/db/pipeline/document_source_command_shards.cpp
@@ -33,97 +33,93 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::vector;
+using boost::intrusive_ptr;
+using std::vector;
- void DocumentSourceCommandShards::setSource(DocumentSource *pSource) {
- /* this doesn't take a source */
- verify(false);
- }
+void DocumentSourceCommandShards::setSource(DocumentSource* pSource) {
+ /* this doesn't take a source */
+ verify(false);
+}
- Value DocumentSourceCommandShards::serialize(bool explain) const {
- // this has no BSON equivalent
- verify(false);
- }
+Value DocumentSourceCommandShards::serialize(bool explain) const {
+ // this has no BSON equivalent
+ verify(false);
+}
- DocumentSourceCommandShards::DocumentSourceCommandShards(
- const ShardOutput& shardOutput,
- const intrusive_ptr<ExpressionContext> &pExpCtx):
- DocumentSource(pExpCtx),
- unstarted(true),
- hasCurrent(false),
- newSource(false),
- pBsonSource(),
- pCurrent(),
- iterator(shardOutput.begin()),
- listEnd(shardOutput.end())
- {}
-
- intrusive_ptr<DocumentSourceCommandShards>
- DocumentSourceCommandShards::create(
- const ShardOutput& shardOutput,
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
- intrusive_ptr<DocumentSourceCommandShards> pSource(
- new DocumentSourceCommandShards(shardOutput, pExpCtx));
- return pSource;
- }
+DocumentSourceCommandShards::DocumentSourceCommandShards(
+ const ShardOutput& shardOutput, const intrusive_ptr<ExpressionContext>& pExpCtx)
+ : DocumentSource(pExpCtx),
+ unstarted(true),
+ hasCurrent(false),
+ newSource(false),
+ pBsonSource(),
+ pCurrent(),
+ iterator(shardOutput.begin()),
+ listEnd(shardOutput.end()) {}
+
+intrusive_ptr<DocumentSourceCommandShards> DocumentSourceCommandShards::create(
+ const ShardOutput& shardOutput, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ intrusive_ptr<DocumentSourceCommandShards> pSource(
+ new DocumentSourceCommandShards(shardOutput, pExpCtx));
+ return pSource;
+}
namespace {
- BSONArray extractResultsArray(const Strategy::CommandResult& result) {
- /* grab the next command result */
- BSONObj resultObj = result.result;
-
- uassert(16390, str::stream() << "sharded pipeline failed on shard " <<
- result.shardTargetId << ": " <<
- resultObj.toString(),
- resultObj["ok"].trueValue());
-
- /* grab the result array out of the shard server's response */
- BSONElement resultArray = resultObj["result"];
- massert(16391, str::stream() << "no result array? shard:" <<
- result.shardTargetId << ": " <<
- resultObj.toString(),
- resultArray.type() == Array);
-
- return BSONArray(resultArray.Obj());
- }
+BSONArray extractResultsArray(const Strategy::CommandResult& result) {
+ /* grab the next command result */
+ BSONObj resultObj = result.result;
+
+ uassert(16390,
+ str::stream() << "sharded pipeline failed on shard " << result.shardTargetId << ": "
+ << resultObj.toString(),
+ resultObj["ok"].trueValue());
+
+ /* grab the result array out of the shard server's response */
+ BSONElement resultArray = resultObj["result"];
+ massert(16391,
+ str::stream() << "no result array? shard:" << result.shardTargetId << ": "
+ << resultObj.toString(),
+ resultArray.type() == Array);
+
+ return BSONArray(resultArray.Obj());
+}
}
- vector<BSONArray> DocumentSourceCommandShards::getArrays() {
- vector<BSONArray> out;
- for (; iterator != listEnd; ++iterator) {
- out.push_back(extractResultsArray(*iterator));
- }
- return out;
+vector<BSONArray> DocumentSourceCommandShards::getArrays() {
+ vector<BSONArray> out;
+ for (; iterator != listEnd; ++iterator) {
+ out.push_back(extractResultsArray(*iterator));
}
+ return out;
+}
- boost::optional<Document> DocumentSourceCommandShards::getNext() {
- pExpCtx->checkForInterrupt();
-
- while(true) {
- if (!pBsonSource.get()) {
- /* if there aren't any more futures, we're done */
- if (iterator == listEnd)
- return boost::none;
+boost::optional<Document> DocumentSourceCommandShards::getNext() {
+ pExpCtx->checkForInterrupt();
- BSONArray resultArray = extractResultsArray(*iterator);
+ while (true) {
+ if (!pBsonSource.get()) {
+ /* if there aren't any more futures, we're done */
+ if (iterator == listEnd)
+ return boost::none;
- // done with error checking, don't need the shard name anymore
- ++iterator;
+ BSONArray resultArray = extractResultsArray(*iterator);
- if (resultArray.isEmpty()){
- // this shard had no results, on to the next one
- continue;
- }
+ // done with error checking, don't need the shard name anymore
+ ++iterator;
- pBsonSource = DocumentSourceBsonArray::create(resultArray, pExpCtx);
+ if (resultArray.isEmpty()) {
+ // this shard had no results, on to the next one
+ continue;
}
- if (boost::optional<Document> out = pBsonSource->getNext())
- return out;
-
- // Source exhausted. Try next.
- pBsonSource.reset();
+ pBsonSource = DocumentSourceBsonArray::create(resultArray, pExpCtx);
}
+
+ if (boost::optional<Document> out = pBsonSource->getNext())
+ return out;
+
+ // Source exhausted. Try next.
+ pBsonSource.reset();
}
}
+}
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index d862663363d..702852f53b2 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -43,179 +43,175 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::shared_ptr;
- using std::string;
+using boost::intrusive_ptr;
+using std::shared_ptr;
+using std::string;
- DocumentSourceCursor::~DocumentSourceCursor() {
- dispose();
- }
-
- const char *DocumentSourceCursor::getSourceName() const {
- return "$cursor";
- }
+DocumentSourceCursor::~DocumentSourceCursor() {
+ dispose();
+}
- boost::optional<Document> DocumentSourceCursor::getNext() {
- pExpCtx->checkForInterrupt();
+const char* DocumentSourceCursor::getSourceName() const {
+ return "$cursor";
+}
- if (_currentBatch.empty()) {
- loadBatch();
+boost::optional<Document> DocumentSourceCursor::getNext() {
+ pExpCtx->checkForInterrupt();
- if (_currentBatch.empty()) // exhausted the cursor
- return boost::none;
- }
+ if (_currentBatch.empty()) {
+ loadBatch();
- Document out = _currentBatch.front();
- _currentBatch.pop_front();
- return out;
+ if (_currentBatch.empty()) // exhausted the cursor
+ return boost::none;
}
- void DocumentSourceCursor::dispose() {
- // Can't call in to PlanExecutor or ClientCursor registries from this function since it
- // will be called when an agg cursor is killed which would cause a deadlock.
- _exec.reset();
- _currentBatch.clear();
- }
-
- void DocumentSourceCursor::loadBatch() {
- if (!_exec) {
- dispose();
- return;
- }
+ Document out = _currentBatch.front();
+ _currentBatch.pop_front();
+ return out;
+}
- // We have already validated the sharding version when we constructed the PlanExecutor
- // so we shouldn't check it again.
- const NamespaceString nss(_ns);
- AutoGetCollectionForRead autoColl(pExpCtx->opCtx, nss);
+void DocumentSourceCursor::dispose() {
+ // Can't call in to PlanExecutor or ClientCursor registries from this function since it
+ // will be called when an agg cursor is killed which would cause a deadlock.
+ _exec.reset();
+ _currentBatch.clear();
+}
- _exec->restoreState(pExpCtx->opCtx);
+void DocumentSourceCursor::loadBatch() {
+ if (!_exec) {
+ dispose();
+ return;
+ }
- int memUsageBytes = 0;
- BSONObj obj;
- PlanExecutor::ExecState state;
- while ((state = _exec->getNext(&obj, NULL)) == PlanExecutor::ADVANCED) {
- if (_dependencies) {
- _currentBatch.push_back(_dependencies->extractFields(obj));
- }
- else {
- _currentBatch.push_back(Document::fromBsonWithMetaData(obj));
- }
+ // We have already validated the sharding version when we constructed the PlanExecutor
+ // so we shouldn't check it again.
+ const NamespaceString nss(_ns);
+ AutoGetCollectionForRead autoColl(pExpCtx->opCtx, nss);
+
+ _exec->restoreState(pExpCtx->opCtx);
+
+ int memUsageBytes = 0;
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ while ((state = _exec->getNext(&obj, NULL)) == PlanExecutor::ADVANCED) {
+ if (_dependencies) {
+ _currentBatch.push_back(_dependencies->extractFields(obj));
+ } else {
+ _currentBatch.push_back(Document::fromBsonWithMetaData(obj));
+ }
- if (_limit) {
- if (++_docsAddedToBatches == _limit->getLimit()) {
- break;
- }
- verify(_docsAddedToBatches < _limit->getLimit());
+ if (_limit) {
+ if (++_docsAddedToBatches == _limit->getLimit()) {
+ break;
}
+ verify(_docsAddedToBatches < _limit->getLimit());
+ }
- memUsageBytes += _currentBatch.back().getApproximateSize();
+ memUsageBytes += _currentBatch.back().getApproximateSize();
- if (memUsageBytes > MaxBytesToReturnToClientAtOnce) {
- // End this batch and prepare PlanExecutor for yielding.
- _exec->saveState();
- return;
- }
+ if (memUsageBytes > MaxBytesToReturnToClientAtOnce) {
+ // End this batch and prepare PlanExecutor for yielding.
+ _exec->saveState();
+ return;
}
+ }
- // If we got here, there won't be any more documents, so destroy the executor. Can't use
- // dispose since we want to keep the _currentBatch.
- _exec.reset();
-
- uassert(16028, str::stream() << "collection or index disappeared when cursor yielded: "
- << WorkingSetCommon::toStatusString(obj),
- state != PlanExecutor::DEAD);
+ // If we got here, there won't be any more documents, so destroy the executor. Can't use
+ // dispose since we want to keep the _currentBatch.
+ _exec.reset();
- uassert(17285, str::stream() << "cursor encountered an error: "
- << WorkingSetCommon::toStatusString(obj),
- state != PlanExecutor::FAILURE);
+ uassert(16028,
+ str::stream() << "collection or index disappeared when cursor yielded: "
+ << WorkingSetCommon::toStatusString(obj),
+ state != PlanExecutor::DEAD);
- massert(17286, str::stream() << "Unexpected return from PlanExecutor::getNext: " << state,
- state == PlanExecutor::IS_EOF || state == PlanExecutor::ADVANCED);
- }
+ uassert(
+ 17285,
+ str::stream() << "cursor encountered an error: " << WorkingSetCommon::toStatusString(obj),
+ state != PlanExecutor::FAILURE);
- void DocumentSourceCursor::setSource(DocumentSource *pSource) {
- /* this doesn't take a source */
- verify(false);
- }
+ massert(17286,
+ str::stream() << "Unexpected return from PlanExecutor::getNext: " << state,
+ state == PlanExecutor::IS_EOF || state == PlanExecutor::ADVANCED);
+}
- long long DocumentSourceCursor::getLimit() const {
- return _limit ? _limit->getLimit() : -1;
- }
+void DocumentSourceCursor::setSource(DocumentSource* pSource) {
+ /* this doesn't take a source */
+ verify(false);
+}
- bool DocumentSourceCursor::coalesce(const intrusive_ptr<DocumentSource>& nextSource) {
- // Note: Currently we assume the $limit is logically after any $sort or
- // $match. If we ever pull in $match or $sort using this method, we
- // will need to keep track of the order of the sub-stages.
+long long DocumentSourceCursor::getLimit() const {
+ return _limit ? _limit->getLimit() : -1;
+}
- if (!_limit) {
- _limit = dynamic_cast<DocumentSourceLimit*>(nextSource.get());
- return _limit.get(); // false if next is not a $limit
- }
- else {
- return _limit->coalesce(nextSource);
- }
+bool DocumentSourceCursor::coalesce(const intrusive_ptr<DocumentSource>& nextSource) {
+ // Note: Currently we assume the $limit is logically after any $sort or
+ // $match. If we ever pull in $match or $sort using this method, we
+ // will need to keep track of the order of the sub-stages.
- return false;
+ if (!_limit) {
+ _limit = dynamic_cast<DocumentSourceLimit*>(nextSource.get());
+ return _limit.get(); // false if next is not a $limit
+ } else {
+ return _limit->coalesce(nextSource);
}
- Value DocumentSourceCursor::serialize(bool explain) const {
- // we never parse a documentSourceCursor, so we only serialize for explain
- if (!explain)
- return Value();
+ return false;
+}
- // Get planner-level explain info from the underlying PlanExecutor.
- BSONObjBuilder explainBuilder;
- {
- const NamespaceString nss(_ns);
- AutoGetCollectionForRead autoColl(pExpCtx->opCtx, nss);
+Value DocumentSourceCursor::serialize(bool explain) const {
+ // we never parse a documentSourceCursor, so we only serialize for explain
+ if (!explain)
+ return Value();
- massert(17392, "No _exec. Were we disposed before explained?", _exec);
+ // Get planner-level explain info from the underlying PlanExecutor.
+ BSONObjBuilder explainBuilder;
+ {
+ const NamespaceString nss(_ns);
+ AutoGetCollectionForRead autoColl(pExpCtx->opCtx, nss);
- _exec->restoreState(pExpCtx->opCtx);
- Explain::explainStages(_exec.get(), ExplainCommon::QUERY_PLANNER, &explainBuilder);
- _exec->saveState();
- }
+ massert(17392, "No _exec. Were we disposed before explained?", _exec);
- MutableDocument out;
- out["query"] = Value(_query);
+ _exec->restoreState(pExpCtx->opCtx);
+ Explain::explainStages(_exec.get(), ExplainCommon::QUERY_PLANNER, &explainBuilder);
+ _exec->saveState();
+ }
- if (!_sort.isEmpty())
- out["sort"] = Value(_sort);
+ MutableDocument out;
+ out["query"] = Value(_query);
- if (_limit)
- out["limit"] = Value(_limit->getLimit());
+ if (!_sort.isEmpty())
+ out["sort"] = Value(_sort);
- if (!_projection.isEmpty())
- out["fields"] = Value(_projection);
+ if (_limit)
+ out["limit"] = Value(_limit->getLimit());
- // Add explain results from the query system into the agg explain output.
- BSONObj explainObj = explainBuilder.obj();
- invariant(explainObj.hasField("queryPlanner"));
- out["queryPlanner"] = Value(explainObj["queryPlanner"]);
+ if (!_projection.isEmpty())
+ out["fields"] = Value(_projection);
- return Value(DOC(getSourceName() << out.freezeToValue()));
- }
+ // Add explain results from the query system into the agg explain output.
+ BSONObj explainObj = explainBuilder.obj();
+ invariant(explainObj.hasField("queryPlanner"));
+ out["queryPlanner"] = Value(explainObj["queryPlanner"]);
- DocumentSourceCursor::DocumentSourceCursor(const string& ns,
- const std::shared_ptr<PlanExecutor>& exec,
- const intrusive_ptr<ExpressionContext> &pCtx)
- : DocumentSource(pCtx)
- , _docsAddedToBatches(0)
- , _ns(ns)
- , _exec(exec)
- {}
-
- intrusive_ptr<DocumentSourceCursor> DocumentSourceCursor::create(
- const string& ns,
- const std::shared_ptr<PlanExecutor>& exec,
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
- return new DocumentSourceCursor(ns, exec, pExpCtx);
- }
+ return Value(DOC(getSourceName() << out.freezeToValue()));
+}
- void DocumentSourceCursor::setProjection(
- const BSONObj& projection,
- const boost::optional<ParsedDeps>& deps) {
- _projection = projection;
- _dependencies = deps;
- }
+DocumentSourceCursor::DocumentSourceCursor(const string& ns,
+ const std::shared_ptr<PlanExecutor>& exec,
+ const intrusive_ptr<ExpressionContext>& pCtx)
+ : DocumentSource(pCtx), _docsAddedToBatches(0), _ns(ns), _exec(exec) {}
+
+intrusive_ptr<DocumentSourceCursor> DocumentSourceCursor::create(
+ const string& ns,
+ const std::shared_ptr<PlanExecutor>& exec,
+ const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ return new DocumentSourceCursor(ns, exec, pExpCtx);
+}
+
+void DocumentSourceCursor::setProjection(const BSONObj& projection,
+ const boost::optional<ParsedDeps>& deps) {
+ _projection = projection;
+ _dependencies = deps;
+}
}
diff --git a/src/mongo/db/pipeline/document_source_geo_near.cpp b/src/mongo/db/pipeline/document_source_geo_near.cpp
index 20ba3e2dadc..c66702480fc 100644
--- a/src/mongo/db/pipeline/document_source_geo_near.cpp
+++ b/src/mongo/db/pipeline/document_source_geo_near.cpp
@@ -36,197 +36,195 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::min;
+using boost::intrusive_ptr;
+using std::min;
- char DocumentSourceGeoNear::geoNearName[] = "$geoNear";
- const char *DocumentSourceGeoNear::getSourceName() const { return geoNearName; }
-
- boost::optional<Document> DocumentSourceGeoNear::getNext() {
- pExpCtx->checkForInterrupt();
+char DocumentSourceGeoNear::geoNearName[] = "$geoNear";
+const char* DocumentSourceGeoNear::getSourceName() const {
+ return geoNearName;
+}
- if (!resultsIterator)
- runCommand();
+boost::optional<Document> DocumentSourceGeoNear::getNext() {
+ pExpCtx->checkForInterrupt();
- if (!resultsIterator->more())
- return boost::none;
+ if (!resultsIterator)
+ runCommand();
- // each result from the geoNear command is wrapped in a wrapper object with "obj",
- // "dis" and maybe "loc" fields. We want to take the object from "obj" and inject the
- // other fields into it.
- Document result (resultsIterator->next().embeddedObject());
- MutableDocument output (result["obj"].getDocument());
- output.setNestedField(*distanceField, result["dis"]);
- if (includeLocs)
- output.setNestedField(*includeLocs, result["loc"]);
+ if (!resultsIterator->more())
+ return boost::none;
- return output.freeze();
- }
+ // each result from the geoNear command is wrapped in a wrapper object with "obj",
+ // "dis" and maybe "loc" fields. We want to take the object from "obj" and inject the
+ // other fields into it.
+ Document result(resultsIterator->next().embeddedObject());
+ MutableDocument output(result["obj"].getDocument());
+ output.setNestedField(*distanceField, result["dis"]);
+ if (includeLocs)
+ output.setNestedField(*includeLocs, result["loc"]);
- void DocumentSourceGeoNear::setSource(DocumentSource*) {
- uasserted(16602, "$geoNear is only allowed as the first pipeline stage");
- }
+ return output.freeze();
+}
- bool DocumentSourceGeoNear::coalesce(const intrusive_ptr<DocumentSource> &pNextSource) {
- DocumentSourceLimit* limitSrc = dynamic_cast<DocumentSourceLimit*>(pNextSource.get());
- if (limitSrc) {
- limit = min(limit, limitSrc->getLimit());
- return true;
- }
+void DocumentSourceGeoNear::setSource(DocumentSource*) {
+ uasserted(16602, "$geoNear is only allowed as the first pipeline stage");
+}
- return false;
+bool DocumentSourceGeoNear::coalesce(const intrusive_ptr<DocumentSource>& pNextSource) {
+ DocumentSourceLimit* limitSrc = dynamic_cast<DocumentSourceLimit*>(pNextSource.get());
+ if (limitSrc) {
+ limit = min(limit, limitSrc->getLimit());
+ return true;
}
- // This command is sent as-is to the shards.
- // On router this becomes a sort by distance (nearest-first) with limit.
- intrusive_ptr<DocumentSource> DocumentSourceGeoNear::getShardSource() { return this; }
- intrusive_ptr<DocumentSource> DocumentSourceGeoNear::getMergeSource() {
- return DocumentSourceSort::create(pExpCtx,
- BSON(distanceField->getPath(false) << 1),
- limit);
- }
+ return false;
+}
- Value DocumentSourceGeoNear::serialize(bool explain) const {
- MutableDocument result;
+// This command is sent as-is to the shards.
+// On router this becomes a sort by distance (nearest-first) with limit.
+intrusive_ptr<DocumentSource> DocumentSourceGeoNear::getShardSource() {
+ return this;
+}
+intrusive_ptr<DocumentSource> DocumentSourceGeoNear::getMergeSource() {
+ return DocumentSourceSort::create(pExpCtx, BSON(distanceField->getPath(false) << 1), limit);
+}
- if (coordsIsArray) {
- result.setField("near", Value(BSONArray(coords)));
- }
- else {
- result.setField("near", Value(coords));
- }
+Value DocumentSourceGeoNear::serialize(bool explain) const {
+ MutableDocument result;
- // not in buildGeoNearCmd
- result.setField("distanceField", Value(distanceField->getPath(false)));
+ if (coordsIsArray) {
+ result.setField("near", Value(BSONArray(coords)));
+ } else {
+ result.setField("near", Value(coords));
+ }
- result.setField("limit", Value(limit));
+ // not in buildGeoNearCmd
+ result.setField("distanceField", Value(distanceField->getPath(false)));
- if (maxDistance > 0)
- result.setField("maxDistance", Value(maxDistance));
+ result.setField("limit", Value(limit));
- if (minDistance > 0)
- result.setField("minDistance", Value(minDistance));
+ if (maxDistance > 0)
+ result.setField("maxDistance", Value(maxDistance));
- result.setField("query", Value(query));
- result.setField("spherical", Value(spherical));
- result.setField("distanceMultiplier", Value(distanceMultiplier));
+ if (minDistance > 0)
+ result.setField("minDistance", Value(minDistance));
- if (includeLocs)
- result.setField("includeLocs", Value(includeLocs->getPath(false)));
+ result.setField("query", Value(query));
+ result.setField("spherical", Value(spherical));
+ result.setField("distanceMultiplier", Value(distanceMultiplier));
- return Value(DOC(getSourceName() << result.freeze()));
- }
+ if (includeLocs)
+ result.setField("includeLocs", Value(includeLocs->getPath(false)));
- BSONObj DocumentSourceGeoNear::buildGeoNearCmd() const {
- // this is very similar to sourceToBson, but slightly different.
- // differences will be noted.
+ return Value(DOC(getSourceName() << result.freeze()));
+}
- BSONObjBuilder geoNear; // not building a subField
+BSONObj DocumentSourceGeoNear::buildGeoNearCmd() const {
+ // this is very similar to sourceToBson, but slightly different.
+ // differences will be noted.
- geoNear.append("geoNear", pExpCtx->ns.coll()); // not in toBson
+ BSONObjBuilder geoNear; // not building a subField
- if (coordsIsArray) {
- geoNear.appendArray("near", coords);
- }
- else {
- geoNear.append("near", coords);
- }
+ geoNear.append("geoNear", pExpCtx->ns.coll()); // not in toBson
- geoNear.append("num", limit); // called limit in toBson
+ if (coordsIsArray) {
+ geoNear.appendArray("near", coords);
+ } else {
+ geoNear.append("near", coords);
+ }
- if (maxDistance > 0)
- geoNear.append("maxDistance", maxDistance);
+ geoNear.append("num", limit); // called limit in toBson
- if (minDistance > 0)
- geoNear.append("minDistance", minDistance);
+ if (maxDistance > 0)
+ geoNear.append("maxDistance", maxDistance);
- geoNear.append("query", query);
- geoNear.append("spherical", spherical);
- geoNear.append("distanceMultiplier", distanceMultiplier);
+ if (minDistance > 0)
+ geoNear.append("minDistance", minDistance);
- if (includeLocs)
- geoNear.append("includeLocs", true); // String in toBson
+ geoNear.append("query", query);
+ geoNear.append("spherical", spherical);
+ geoNear.append("distanceMultiplier", distanceMultiplier);
- return geoNear.obj();
- }
+ if (includeLocs)
+ geoNear.append("includeLocs", true); // String in toBson
- void DocumentSourceGeoNear::runCommand() {
- massert(16603, "Already ran geoNearCommand",
- !resultsIterator);
+ return geoNear.obj();
+}
- bool ok = _mongod->directClient()->runCommand(pExpCtx->ns.db().toString(),
- buildGeoNearCmd(),
- cmdOutput);
- uassert(16604, "geoNear command failed: " + cmdOutput.toString(),
- ok);
+void DocumentSourceGeoNear::runCommand() {
+ massert(16603, "Already ran geoNearCommand", !resultsIterator);
- resultsIterator.reset(new BSONObjIterator(cmdOutput["results"].embeddedObject()));
- }
+ bool ok = _mongod->directClient()->runCommand(
+ pExpCtx->ns.db().toString(), buildGeoNearCmd(), cmdOutput);
+ uassert(16604, "geoNear command failed: " + cmdOutput.toString(), ok);
- intrusive_ptr<DocumentSourceGeoNear> DocumentSourceGeoNear::create(
- const intrusive_ptr<ExpressionContext> &pCtx) {
- return new DocumentSourceGeoNear(pCtx);
- }
+ resultsIterator.reset(new BSONObjIterator(cmdOutput["results"].embeddedObject()));
+}
- intrusive_ptr<DocumentSource> DocumentSourceGeoNear::createFromBson(
- BSONElement elem,
- const intrusive_ptr<ExpressionContext> &pCtx) {
- intrusive_ptr<DocumentSourceGeoNear> out = new DocumentSourceGeoNear(pCtx);
- out->parseOptions(elem.embeddedObjectUserCheck());
- return out;
- }
+intrusive_ptr<DocumentSourceGeoNear> DocumentSourceGeoNear::create(
+ const intrusive_ptr<ExpressionContext>& pCtx) {
+ return new DocumentSourceGeoNear(pCtx);
+}
- void DocumentSourceGeoNear::parseOptions(BSONObj options) {
- // near and distanceField are required
+intrusive_ptr<DocumentSource> DocumentSourceGeoNear::createFromBson(
+ BSONElement elem, const intrusive_ptr<ExpressionContext>& pCtx) {
+ intrusive_ptr<DocumentSourceGeoNear> out = new DocumentSourceGeoNear(pCtx);
+ out->parseOptions(elem.embeddedObjectUserCheck());
+ return out;
+}
- uassert(16605, "$geoNear requires a 'near' option as an Array",
- options["near"].isABSONObj()); // Array or Object (Object is deprecated)
- coordsIsArray = options["near"].type() == Array;
- coords = options["near"].embeddedObject().getOwned();
+void DocumentSourceGeoNear::parseOptions(BSONObj options) {
+ // near and distanceField are required
- uassert(16606, "$geoNear requires a 'distanceField' option as a String",
- options["distanceField"].type() == String);
- distanceField.reset(new FieldPath(options["distanceField"].str()));
+ uassert(16605,
+ "$geoNear requires a 'near' option as an Array",
+ options["near"].isABSONObj()); // Array or Object (Object is deprecated)
+ coordsIsArray = options["near"].type() == Array;
+ coords = options["near"].embeddedObject().getOwned();
- // remaining fields are optional
+ uassert(16606,
+ "$geoNear requires a 'distanceField' option as a String",
+ options["distanceField"].type() == String);
+ distanceField.reset(new FieldPath(options["distanceField"].str()));
- // num and limit are synonyms
- if (options["limit"].isNumber())
- limit = options["limit"].numberLong();
- if (options["num"].isNumber())
- limit = options["num"].numberLong();
+ // remaining fields are optional
- if (options["maxDistance"].isNumber())
- maxDistance = options["maxDistance"].numberDouble();
+ // num and limit are synonyms
+ if (options["limit"].isNumber())
+ limit = options["limit"].numberLong();
+ if (options["num"].isNumber())
+ limit = options["num"].numberLong();
- if (options["minDistance"].isNumber())
- minDistance = options["minDistance"].numberDouble();
+ if (options["maxDistance"].isNumber())
+ maxDistance = options["maxDistance"].numberDouble();
- if (options["query"].type() == Object)
- query = options["query"].embeddedObject().getOwned();
+ if (options["minDistance"].isNumber())
+ minDistance = options["minDistance"].numberDouble();
- spherical = options["spherical"].trueValue();
+ if (options["query"].type() == Object)
+ query = options["query"].embeddedObject().getOwned();
- if (options["distanceMultiplier"].isNumber())
- distanceMultiplier = options["distanceMultiplier"].numberDouble();
+ spherical = options["spherical"].trueValue();
- if (options.hasField("includeLocs")) {
- uassert(16607, "$geoNear requires that 'includeLocs' option is a String",
- options["includeLocs"].type() == String);
- includeLocs.reset(new FieldPath(options["includeLocs"].str()));
- }
+ if (options["distanceMultiplier"].isNumber())
+ distanceMultiplier = options["distanceMultiplier"].numberDouble();
- if (options.hasField("uniqueDocs"))
- warning() << "ignoring deprecated uniqueDocs option in $geoNear aggregation stage";
+ if (options.hasField("includeLocs")) {
+ uassert(16607,
+ "$geoNear requires that 'includeLocs' option is a String",
+ options["includeLocs"].type() == String);
+ includeLocs.reset(new FieldPath(options["includeLocs"].str()));
}
- DocumentSourceGeoNear::DocumentSourceGeoNear(const intrusive_ptr<ExpressionContext> &pExpCtx)
- : DocumentSource(pExpCtx)
- , coordsIsArray(false)
- , limit(100)
- , maxDistance(-1.0)
- , minDistance(-1.0)
- , spherical(false)
- , distanceMultiplier(1.0)
- {}
+ if (options.hasField("uniqueDocs"))
+ warning() << "ignoring deprecated uniqueDocs option in $geoNear aggregation stage";
+}
+
+DocumentSourceGeoNear::DocumentSourceGeoNear(const intrusive_ptr<ExpressionContext>& pExpCtx)
+ : DocumentSource(pExpCtx),
+ coordsIsArray(false),
+ limit(100),
+ maxDistance(-1.0),
+ minDistance(-1.0),
+ spherical(false),
+ distanceMultiplier(1.0) {}
}
diff --git a/src/mongo/db/pipeline/document_source_group.cpp b/src/mongo/db/pipeline/document_source_group.cpp
index aba16b65e17..487f0809ed0 100644
--- a/src/mongo/db/pipeline/document_source_group.cpp
+++ b/src/mongo/db/pipeline/document_source_group.cpp
@@ -39,596 +39,580 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::shared_ptr;
- using std::pair;
- using std::vector;
+using boost::intrusive_ptr;
+using std::shared_ptr;
+using std::pair;
+using std::vector;
- const char DocumentSourceGroup::groupName[] = "$group";
+const char DocumentSourceGroup::groupName[] = "$group";
- const char *DocumentSourceGroup::getSourceName() const {
- return groupName;
- }
+const char* DocumentSourceGroup::getSourceName() const {
+ return groupName;
+}
- boost::optional<Document> DocumentSourceGroup::getNext() {
- pExpCtx->checkForInterrupt();
+boost::optional<Document> DocumentSourceGroup::getNext() {
+ pExpCtx->checkForInterrupt();
- if (!populated)
- populate();
+ if (!populated)
+ populate();
- if (_spilled) {
- if (!_sorterIterator)
- return boost::none;
+ if (_spilled) {
+ if (!_sorterIterator)
+ return boost::none;
- const size_t numAccumulators = vpAccumulatorFactory.size();
- for (size_t i=0; i < numAccumulators; i++) {
- _currentAccumulators[i]->reset(); // prep accumulators for a new group
- }
+ const size_t numAccumulators = vpAccumulatorFactory.size();
+ for (size_t i = 0; i < numAccumulators; i++) {
+ _currentAccumulators[i]->reset(); // prep accumulators for a new group
+ }
- _currentId = _firstPartOfNextGroup.first;
- while (_currentId == _firstPartOfNextGroup.first) {
- // Inside of this loop, _firstPartOfNextGroup is the current data being processed.
- // At loop exit, it is the first value to be processed in the next group.
+ _currentId = _firstPartOfNextGroup.first;
+ while (_currentId == _firstPartOfNextGroup.first) {
+ // Inside of this loop, _firstPartOfNextGroup is the current data being processed.
+ // At loop exit, it is the first value to be processed in the next group.
- switch (numAccumulators) { // mirrors switch in spill()
- case 0: // no Accumulators so no Values
+ switch (numAccumulators) { // mirrors switch in spill()
+ case 0: // no Accumulators so no Values
break;
- case 1: // single accumulators serialize as a single Value
+ case 1: // single accumulators serialize as a single Value
_currentAccumulators[0]->process(_firstPartOfNextGroup.second,
/*merging=*/true);
break;
- default: { // multiple accumulators serialize as an array
+ default: { // multiple accumulators serialize as an array
const vector<Value>& accumulatorStates =
_firstPartOfNextGroup.second.getArray();
- for (size_t i=0; i < numAccumulators; i++) {
+ for (size_t i = 0; i < numAccumulators; i++) {
_currentAccumulators[i]->process(accumulatorStates[i],
/*merging=*/true);
}
break;
}
- }
-
- if (!_sorterIterator->more()) {
- dispose();
- break;
- }
-
- _firstPartOfNextGroup = _sorterIterator->next();
}
- return makeDocument(_currentId, _currentAccumulators, pExpCtx->inShard);
-
- } else {
- if (groups.empty())
- return boost::none;
-
- Document out = makeDocument(groupsIterator->first,
- groupsIterator->second,
- pExpCtx->inShard);
-
- if (++groupsIterator == groups.end())
+ if (!_sorterIterator->more()) {
dispose();
+ break;
+ }
- return out;
+ _firstPartOfNextGroup = _sorterIterator->next();
}
- }
-
- void DocumentSourceGroup::dispose() {
- // free our resources
- GroupsMap().swap(groups);
- _sorterIterator.reset();
- // make us look done
- groupsIterator = groups.end();
+ return makeDocument(_currentId, _currentAccumulators, pExpCtx->inShard);
- // free our source's resources
- pSource->dispose();
- }
+ } else {
+ if (groups.empty())
+ return boost::none;
- intrusive_ptr<DocumentSource> DocumentSourceGroup::optimize() {
- // TODO if all _idExpressions are ExpressionConstants after optimization, then we know there
- // will only be one group. We should take advantage of that to avoid going through the hash
- // table.
- for (size_t i = 0; i < _idExpressions.size(); i++) {
- _idExpressions[i] = _idExpressions[i]->optimize();
- }
+ Document out =
+ makeDocument(groupsIterator->first, groupsIterator->second, pExpCtx->inShard);
- for (size_t i = 0; i < vFieldName.size(); i++) {
- vpExpression[i] = vpExpression[i]->optimize();
- }
+ if (++groupsIterator == groups.end())
+ dispose();
- return this;
+ return out;
}
+}
- Value DocumentSourceGroup::serialize(bool explain) const {
- MutableDocument insides;
+void DocumentSourceGroup::dispose() {
+ // free our resources
+ GroupsMap().swap(groups);
+ _sorterIterator.reset();
- // add the _id
- if (_idFieldNames.empty()) {
- invariant(_idExpressions.size() == 1);
- insides["_id"] = _idExpressions[0]->serialize(explain);
- }
- else {
- // decomposed document case
- invariant(_idExpressions.size() == _idFieldNames.size());
- MutableDocument md;
- for (size_t i = 0; i < _idExpressions.size(); i++) {
- md[_idFieldNames[i]] = _idExpressions[i]->serialize(explain);
- }
- insides["_id"] = md.freezeToValue();
- }
+ // make us look done
+ groupsIterator = groups.end();
- // add the remaining fields
- const size_t n = vFieldName.size();
- for(size_t i = 0; i < n; ++i) {
- intrusive_ptr<Accumulator> accum = vpAccumulatorFactory[i]();
- insides[vFieldName[i]] =
- Value(DOC(accum->getOpName() << vpExpression[i]->serialize(explain)));
- }
-
- if (_doingMerge) {
- // This makes the output unparsable (with error) on pre 2.6 shards, but it will never
- // be sent to old shards when this flag is true since they can't do a merge anyway.
+ // free our source's resources
+ pSource->dispose();
+}
- insides["$doingMerge"] = Value(true);
- }
+intrusive_ptr<DocumentSource> DocumentSourceGroup::optimize() {
+ // TODO if all _idExpressions are ExpressionConstants after optimization, then we know there
+ // will only be one group. We should take advantage of that to avoid going through the hash
+ // table.
+ for (size_t i = 0; i < _idExpressions.size(); i++) {
+ _idExpressions[i] = _idExpressions[i]->optimize();
+ }
- return Value(DOC(getSourceName() << insides.freeze()));
+ for (size_t i = 0; i < vFieldName.size(); i++) {
+ vpExpression[i] = vpExpression[i]->optimize();
}
- DocumentSource::GetDepsReturn DocumentSourceGroup::getDependencies(DepsTracker* deps) const {
- // add the _id
+ return this;
+}
+
+Value DocumentSourceGroup::serialize(bool explain) const {
+ MutableDocument insides;
+
+ // add the _id
+ if (_idFieldNames.empty()) {
+ invariant(_idExpressions.size() == 1);
+ insides["_id"] = _idExpressions[0]->serialize(explain);
+ } else {
+ // decomposed document case
+ invariant(_idExpressions.size() == _idFieldNames.size());
+ MutableDocument md;
for (size_t i = 0; i < _idExpressions.size(); i++) {
- _idExpressions[i]->addDependencies(deps);
+ md[_idFieldNames[i]] = _idExpressions[i]->serialize(explain);
}
+ insides["_id"] = md.freezeToValue();
+ }
- // add the rest
- const size_t n = vFieldName.size();
- for(size_t i = 0; i < n; ++i) {
- vpExpression[i]->addDependencies(deps);
- }
+ // add the remaining fields
+ const size_t n = vFieldName.size();
+ for (size_t i = 0; i < n; ++i) {
+ intrusive_ptr<Accumulator> accum = vpAccumulatorFactory[i]();
+ insides[vFieldName[i]] =
+ Value(DOC(accum->getOpName() << vpExpression[i]->serialize(explain)));
+ }
+
+ if (_doingMerge) {
+ // This makes the output unparsable (with error) on pre 2.6 shards, but it will never
+ // be sent to old shards when this flag is true since they can't do a merge anyway.
- return EXHAUSTIVE_ALL;
+ insides["$doingMerge"] = Value(true);
}
- intrusive_ptr<DocumentSourceGroup> DocumentSourceGroup::create(
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
- intrusive_ptr<DocumentSourceGroup> pSource(
- new DocumentSourceGroup(pExpCtx));
- return pSource;
+ return Value(DOC(getSourceName() << insides.freeze()));
+}
+
+DocumentSource::GetDepsReturn DocumentSourceGroup::getDependencies(DepsTracker* deps) const {
+ // add the _id
+ for (size_t i = 0; i < _idExpressions.size(); i++) {
+ _idExpressions[i]->addDependencies(deps);
}
- DocumentSourceGroup::DocumentSourceGroup(const intrusive_ptr<ExpressionContext>& pExpCtx)
- : DocumentSource(pExpCtx)
- , populated(false)
- , _doingMerge(false)
- , _spilled(false)
- , _extSortAllowed(pExpCtx->extSortAllowed && !pExpCtx->inRouter)
- , _maxMemoryUsageBytes(100*1024*1024)
- {}
-
- void DocumentSourceGroup::addAccumulator(
- const std::string& fieldName,
- intrusive_ptr<Accumulator> (*pAccumulatorFactory)(),
- const intrusive_ptr<Expression> &pExpression) {
- vFieldName.push_back(fieldName);
- vpAccumulatorFactory.push_back(pAccumulatorFactory);
- vpExpression.push_back(pExpression);
+ // add the rest
+ const size_t n = vFieldName.size();
+ for (size_t i = 0; i < n; ++i) {
+ vpExpression[i]->addDependencies(deps);
}
+ return EXHAUSTIVE_ALL;
+}
- struct GroupOpDesc {
- const char* name;
- intrusive_ptr<Accumulator> (*factory)();
- };
+intrusive_ptr<DocumentSourceGroup> DocumentSourceGroup::create(
+ const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ intrusive_ptr<DocumentSourceGroup> pSource(new DocumentSourceGroup(pExpCtx));
+ return pSource;
+}
- static int GroupOpDescCmp(const void *pL, const void *pR) {
- return strcmp(((const GroupOpDesc *)pL)->name,
- ((const GroupOpDesc *)pR)->name);
- }
+DocumentSourceGroup::DocumentSourceGroup(const intrusive_ptr<ExpressionContext>& pExpCtx)
+ : DocumentSource(pExpCtx),
+ populated(false),
+ _doingMerge(false),
+ _spilled(false),
+ _extSortAllowed(pExpCtx->extSortAllowed && !pExpCtx->inRouter),
+ _maxMemoryUsageBytes(100 * 1024 * 1024) {}
+
+void DocumentSourceGroup::addAccumulator(const std::string& fieldName,
+ intrusive_ptr<Accumulator>(*pAccumulatorFactory)(),
+ const intrusive_ptr<Expression>& pExpression) {
+ vFieldName.push_back(fieldName);
+ vpAccumulatorFactory.push_back(pAccumulatorFactory);
+ vpExpression.push_back(pExpression);
+}
- /*
- Keep these sorted alphabetically so we can bsearch() them using
- GroupOpDescCmp() above.
- */
- static const GroupOpDesc GroupOpTable[] = {
- {"$addToSet", AccumulatorAddToSet::create},
- {"$avg", AccumulatorAvg::create},
- {"$first", AccumulatorFirst::create},
- {"$last", AccumulatorLast::create},
- {"$max", AccumulatorMinMax::createMax},
- {"$min", AccumulatorMinMax::createMin},
- {"$push", AccumulatorPush::create},
- {"$stdDevPop", AccumulatorStdDev::createPop},
- {"$stdDevSamp", AccumulatorStdDev::createSamp},
- {"$sum", AccumulatorSum::create},
- };
-
- static const size_t NGroupOp = sizeof(GroupOpTable)/sizeof(GroupOpTable[0]);
-
- intrusive_ptr<DocumentSource> DocumentSourceGroup::createFromBson(
- BSONElement elem,
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
- uassert(15947, "a group's fields must be specified in an object",
- elem.type() == Object);
-
- intrusive_ptr<DocumentSourceGroup> pGroup(
- DocumentSourceGroup::create(pExpCtx));
-
- BSONObj groupObj(elem.Obj());
- BSONObjIterator groupIterator(groupObj);
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- while(groupIterator.more()) {
- BSONElement groupField(groupIterator.next());
- const char *pFieldName = groupField.fieldName();
-
- if (str::equals(pFieldName, "_id")) {
- uassert(15948, "a group's _id may only be specified once",
- pGroup->_idExpressions.empty());
- pGroup->parseIdExpression(groupField, vps);
- invariant(!pGroup->_idExpressions.empty());
- }
- else if (str::equals(pFieldName, "$doingMerge")) {
- massert(17030, "$doingMerge should be true if present",
- groupField.Bool());
- pGroup->setDoingMerge(true);
- }
- else {
- /*
- Treat as a projection field with the additional ability to
- add aggregation operators.
- */
- uassert(16414, str::stream() <<
- "the group aggregate field name '" << pFieldName <<
- "' cannot be used because $group's field names cannot contain '.'",
- !str::contains(pFieldName, '.') );
-
- uassert(15950, str::stream() <<
- "the group aggregate field name '" <<
- pFieldName << "' cannot be an operator name",
- pFieldName[0] != '$');
-
- uassert(15951, str::stream() <<
- "the group aggregate field '" << pFieldName <<
- "' must be defined as an expression inside an object",
- groupField.type() == Object);
-
- BSONObj subField(groupField.Obj());
- BSONObjIterator subIterator(subField);
- size_t subCount = 0;
- for(; subIterator.more(); ++subCount) {
- BSONElement subElement(subIterator.next());
-
- /* look for the specified operator */
- GroupOpDesc key;
- key.name = subElement.fieldName();
- const GroupOpDesc *pOp =
- (const GroupOpDesc *)bsearch(
- &key, GroupOpTable, NGroupOp, sizeof(GroupOpDesc),
- GroupOpDescCmp);
-
- uassert(15952, str::stream() << "unknown group operator '" << key.name << "'",
- pOp);
-
- intrusive_ptr<Expression> pGroupExpr;
-
- BSONType elementType = subElement.type();
- if (elementType == Object) {
- Expression::ObjectCtx oCtx(Expression::ObjectCtx::DOCUMENT_OK);
- pGroupExpr = Expression::parseObject(subElement.Obj(), &oCtx, vps);
- }
- else if (elementType == Array) {
- uasserted(15953, str::stream()
- << "aggregating group operators are unary (" << key.name << ")");
- }
- else { /* assume its an atomic single operand */
- pGroupExpr = Expression::parseOperand(subElement, vps);
- }
+struct GroupOpDesc {
+ const char* name;
+ intrusive_ptr<Accumulator>(*factory)();
+};
- pGroup->addAccumulator(pFieldName, pOp->factory, pGroupExpr);
+static int GroupOpDescCmp(const void* pL, const void* pR) {
+ return strcmp(((const GroupOpDesc*)pL)->name, ((const GroupOpDesc*)pR)->name);
+}
+
+/*
+ Keep these sorted alphabetically so we can bsearch() them using
+ GroupOpDescCmp() above.
+*/
+static const GroupOpDesc GroupOpTable[] = {
+ {"$addToSet", AccumulatorAddToSet::create},
+ {"$avg", AccumulatorAvg::create},
+ {"$first", AccumulatorFirst::create},
+ {"$last", AccumulatorLast::create},
+ {"$max", AccumulatorMinMax::createMax},
+ {"$min", AccumulatorMinMax::createMin},
+ {"$push", AccumulatorPush::create},
+ {"$stdDevPop", AccumulatorStdDev::createPop},
+ {"$stdDevSamp", AccumulatorStdDev::createSamp},
+ {"$sum", AccumulatorSum::create},
+};
+
+static const size_t NGroupOp = sizeof(GroupOpTable) / sizeof(GroupOpTable[0]);
+
+intrusive_ptr<DocumentSource> DocumentSourceGroup::createFromBson(
+ BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ uassert(15947, "a group's fields must be specified in an object", elem.type() == Object);
+
+ intrusive_ptr<DocumentSourceGroup> pGroup(DocumentSourceGroup::create(pExpCtx));
+
+ BSONObj groupObj(elem.Obj());
+ BSONObjIterator groupIterator(groupObj);
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ while (groupIterator.more()) {
+ BSONElement groupField(groupIterator.next());
+ const char* pFieldName = groupField.fieldName();
+
+ if (str::equals(pFieldName, "_id")) {
+ uassert(
+ 15948, "a group's _id may only be specified once", pGroup->_idExpressions.empty());
+ pGroup->parseIdExpression(groupField, vps);
+ invariant(!pGroup->_idExpressions.empty());
+ } else if (str::equals(pFieldName, "$doingMerge")) {
+ massert(17030, "$doingMerge should be true if present", groupField.Bool());
+
+ pGroup->setDoingMerge(true);
+ } else {
+ /*
+ Treat as a projection field with the additional ability to
+ add aggregation operators.
+ */
+ uassert(
+ 16414,
+ str::stream() << "the group aggregate field name '" << pFieldName
+ << "' cannot be used because $group's field names cannot contain '.'",
+ !str::contains(pFieldName, '.'));
+
+ uassert(15950,
+ str::stream() << "the group aggregate field name '" << pFieldName
+ << "' cannot be an operator name",
+ pFieldName[0] != '$');
+
+ uassert(15951,
+ str::stream() << "the group aggregate field '" << pFieldName
+ << "' must be defined as an expression inside an object",
+ groupField.type() == Object);
+
+ BSONObj subField(groupField.Obj());
+ BSONObjIterator subIterator(subField);
+ size_t subCount = 0;
+ for (; subIterator.more(); ++subCount) {
+ BSONElement subElement(subIterator.next());
+
+ /* look for the specified operator */
+ GroupOpDesc key;
+ key.name = subElement.fieldName();
+ const GroupOpDesc* pOp = (const GroupOpDesc*)bsearch(
+ &key, GroupOpTable, NGroupOp, sizeof(GroupOpDesc), GroupOpDescCmp);
+
+ uassert(15952, str::stream() << "unknown group operator '" << key.name << "'", pOp);
+
+ intrusive_ptr<Expression> pGroupExpr;
+
+ BSONType elementType = subElement.type();
+ if (elementType == Object) {
+ Expression::ObjectCtx oCtx(Expression::ObjectCtx::DOCUMENT_OK);
+ pGroupExpr = Expression::parseObject(subElement.Obj(), &oCtx, vps);
+ } else if (elementType == Array) {
+ uasserted(15953,
+ str::stream() << "aggregating group operators are unary (" << key.name
+ << ")");
+ } else { /* assume its an atomic single operand */
+ pGroupExpr = Expression::parseOperand(subElement, vps);
}
- uassert(15954, str::stream() <<
- "the computed aggregate '" <<
- pFieldName << "' must specify exactly one operator",
- subCount == 1);
+ pGroup->addAccumulator(pFieldName, pOp->factory, pGroupExpr);
}
+
+ uassert(15954,
+ str::stream() << "the computed aggregate '" << pFieldName
+ << "' must specify exactly one operator",
+ subCount == 1);
}
+ }
- uassert(15955, "a group specification must include an _id",
- !pGroup->_idExpressions.empty());
+ uassert(15955, "a group specification must include an _id", !pGroup->_idExpressions.empty());
- pGroup->_variables.reset(new Variables(idGenerator.getIdCount()));
+ pGroup->_variables.reset(new Variables(idGenerator.getIdCount()));
- return pGroup;
- }
+ return pGroup;
+}
- namespace {
- class SorterComparator {
- public:
- typedef pair<Value, Value> Data;
- int operator() (const Data& lhs, const Data& rhs) const {
- return Value::compare(lhs.first, rhs.first);
- }
- };
+namespace {
+class SorterComparator {
+public:
+ typedef pair<Value, Value> Data;
+ int operator()(const Data& lhs, const Data& rhs) const {
+ return Value::compare(lhs.first, rhs.first);
}
+};
+}
- void DocumentSourceGroup::populate() {
- const size_t numAccumulators = vpAccumulatorFactory.size();
- dassert(numAccumulators == vpExpression.size());
-
- // pushed to on spill()
- vector<shared_ptr<Sorter<Value, Value>::Iterator> > sortedFiles;
- int memoryUsageBytes = 0;
-
- // This loop consumes all input from pSource and buckets it based on pIdExpression.
- while (boost::optional<Document> input = pSource->getNext()) {
- if (memoryUsageBytes > _maxMemoryUsageBytes) {
- uassert(16945, "Exceeded memory limit for $group, but didn't allow external sort."
- " Pass allowDiskUse:true to opt in.",
- _extSortAllowed);
- sortedFiles.push_back(spill());
- memoryUsageBytes = 0;
- }
+void DocumentSourceGroup::populate() {
+ const size_t numAccumulators = vpAccumulatorFactory.size();
+ dassert(numAccumulators == vpExpression.size());
+
+ // pushed to on spill()
+ vector<shared_ptr<Sorter<Value, Value>::Iterator>> sortedFiles;
+ int memoryUsageBytes = 0;
+
+ // This loop consumes all input from pSource and buckets it based on pIdExpression.
+ while (boost::optional<Document> input = pSource->getNext()) {
+ if (memoryUsageBytes > _maxMemoryUsageBytes) {
+ uassert(16945,
+ "Exceeded memory limit for $group, but didn't allow external sort."
+ " Pass allowDiskUse:true to opt in.",
+ _extSortAllowed);
+ sortedFiles.push_back(spill());
+ memoryUsageBytes = 0;
+ }
- _variables->setRoot(*input);
+ _variables->setRoot(*input);
- /* get the _id value */
- Value id = computeId(_variables.get());
+ /* get the _id value */
+ Value id = computeId(_variables.get());
- /* treat missing values the same as NULL SERVER-4674 */
- if (id.missing())
- id = Value(BSONNULL);
+ /* treat missing values the same as NULL SERVER-4674 */
+ if (id.missing())
+ id = Value(BSONNULL);
- /*
- Look for the _id value in the map; if it's not there, add a
- new entry with a blank accumulator.
- */
- const size_t oldSize = groups.size();
- vector<intrusive_ptr<Accumulator> >& group = groups[id];
- const bool inserted = groups.size() != oldSize;
+ /*
+ Look for the _id value in the map; if it's not there, add a
+ new entry with a blank accumulator.
+ */
+ const size_t oldSize = groups.size();
+ vector<intrusive_ptr<Accumulator>>& group = groups[id];
+ const bool inserted = groups.size() != oldSize;
- if (inserted) {
- memoryUsageBytes += id.getApproximateSize();
+ if (inserted) {
+ memoryUsageBytes += id.getApproximateSize();
- // Add the accumulators
- group.reserve(numAccumulators);
- for (size_t i = 0; i < numAccumulators; i++) {
- group.push_back(vpAccumulatorFactory[i]());
- }
- } else {
- for (size_t i = 0; i < numAccumulators; i++) {
- // subtract old mem usage. New usage added back after processing.
- memoryUsageBytes -= group[i]->memUsageForSorter();
- }
+ // Add the accumulators
+ group.reserve(numAccumulators);
+ for (size_t i = 0; i < numAccumulators; i++) {
+ group.push_back(vpAccumulatorFactory[i]());
}
-
- /* tickle all the accumulators for the group we found */
- dassert(numAccumulators == group.size());
+ } else {
for (size_t i = 0; i < numAccumulators; i++) {
- group[i]->process(vpExpression[i]->evaluate(_variables.get()), _doingMerge);
- memoryUsageBytes += group[i]->memUsageForSorter();
+ // subtract old mem usage. New usage added back after processing.
+ memoryUsageBytes -= group[i]->memUsageForSorter();
}
+ }
- // We are done with the ROOT document so release it.
- _variables->clearRoot();
-
- DEV {
- // In debug mode, spill every time we have a duplicate id to stress merge logic.
- if (!inserted // is a dup
- && !pExpCtx->inRouter // can't spill to disk in router
- && !_extSortAllowed // don't change behavior when testing external sort
- && sortedFiles.size() < 20 // don't open too many FDs
- ) {
- sortedFiles.push_back(spill());
- }
- }
+ /* tickle all the accumulators for the group we found */
+ dassert(numAccumulators == group.size());
+ for (size_t i = 0; i < numAccumulators; i++) {
+ group[i]->process(vpExpression[i]->evaluate(_variables.get()), _doingMerge);
+ memoryUsageBytes += group[i]->memUsageForSorter();
}
- // These blocks do any final steps necessary to prepare to output results.
- if (!sortedFiles.empty()) {
- _spilled = true;
- if (!groups.empty()) {
+ // We are done with the ROOT document so release it.
+ _variables->clearRoot();
+
+ DEV {
+ // In debug mode, spill every time we have a duplicate id to stress merge logic.
+ if (!inserted // is a dup
+ &&
+ !pExpCtx->inRouter // can't spill to disk in router
+ &&
+ !_extSortAllowed // don't change behavior when testing external sort
+ &&
+ sortedFiles.size() < 20 // don't open too many FDs
+ ) {
sortedFiles.push_back(spill());
}
+ }
+ }
- // We won't be using groups again so free its memory.
- GroupsMap().swap(groups);
+ // These blocks do any final steps necessary to prepare to output results.
+ if (!sortedFiles.empty()) {
+ _spilled = true;
+ if (!groups.empty()) {
+ sortedFiles.push_back(spill());
+ }
- _sorterIterator.reset(
- Sorter<Value,Value>::Iterator::merge(
- sortedFiles, SortOptions(), SorterComparator()));
+ // We won't be using groups again so free its memory.
+ GroupsMap().swap(groups);
- // prepare current to accumulate data
- _currentAccumulators.reserve(numAccumulators);
- for (size_t i = 0; i < numAccumulators; i++) {
- _currentAccumulators.push_back(vpAccumulatorFactory[i]());
- }
+ _sorterIterator.reset(
+ Sorter<Value, Value>::Iterator::merge(sortedFiles, SortOptions(), SorterComparator()));
- verify(_sorterIterator->more()); // we put data in, we should get something out.
- _firstPartOfNextGroup = _sorterIterator->next();
- } else {
- // start the group iterator
- groupsIterator = groups.begin();
+ // prepare current to accumulate data
+ _currentAccumulators.reserve(numAccumulators);
+ for (size_t i = 0; i < numAccumulators; i++) {
+ _currentAccumulators.push_back(vpAccumulatorFactory[i]());
}
- populated = true;
+ verify(_sorterIterator->more()); // we put data in, we should get something out.
+ _firstPartOfNextGroup = _sorterIterator->next();
+ } else {
+ // start the group iterator
+ groupsIterator = groups.begin();
}
- class DocumentSourceGroup::SpillSTLComparator {
- public:
- bool operator() (const GroupsMap::value_type* lhs, const GroupsMap::value_type* rhs) const {
- return Value::compare(lhs->first, rhs->first) < 0;
- }
- };
+ populated = true;
+}
- shared_ptr<Sorter<Value, Value>::Iterator> DocumentSourceGroup::spill() {
- vector<const GroupsMap::value_type*> ptrs; // using pointers to speed sorting
- ptrs.reserve(groups.size());
- for (GroupsMap::const_iterator it=groups.begin(), end=groups.end(); it != end; ++it) {
- ptrs.push_back(&*it);
- }
+class DocumentSourceGroup::SpillSTLComparator {
+public:
+ bool operator()(const GroupsMap::value_type* lhs, const GroupsMap::value_type* rhs) const {
+ return Value::compare(lhs->first, rhs->first) < 0;
+ }
+};
+
+shared_ptr<Sorter<Value, Value>::Iterator> DocumentSourceGroup::spill() {
+ vector<const GroupsMap::value_type*> ptrs; // using pointers to speed sorting
+ ptrs.reserve(groups.size());
+ for (GroupsMap::const_iterator it = groups.begin(), end = groups.end(); it != end; ++it) {
+ ptrs.push_back(&*it);
+ }
- stable_sort(ptrs.begin(), ptrs.end(), SpillSTLComparator());
+ stable_sort(ptrs.begin(), ptrs.end(), SpillSTLComparator());
- SortedFileWriter<Value, Value> writer(SortOptions().TempDir(pExpCtx->tempDir));
- switch (vpAccumulatorFactory.size()) { // same as ptrs[i]->second.size() for all i.
- case 0: // no values, essentially a distinct
- for (size_t i=0; i < ptrs.size(); i++) {
+ SortedFileWriter<Value, Value> writer(SortOptions().TempDir(pExpCtx->tempDir));
+ switch (vpAccumulatorFactory.size()) { // same as ptrs[i]->second.size() for all i.
+ case 0: // no values, essentially a distinct
+ for (size_t i = 0; i < ptrs.size(); i++) {
writer.addAlreadySorted(ptrs[i]->first, Value());
}
break;
- case 1: // just one value, use optimized serialization as single Value
- for (size_t i=0; i < ptrs.size(); i++) {
+ case 1: // just one value, use optimized serialization as single Value
+ for (size_t i = 0; i < ptrs.size(); i++) {
writer.addAlreadySorted(ptrs[i]->first,
ptrs[i]->second[0]->getValue(/*toBeMerged=*/true));
}
break;
- default: // multiple values, serialize as array-typed Value
- for (size_t i=0; i < ptrs.size(); i++) {
+ default: // multiple values, serialize as array-typed Value
+ for (size_t i = 0; i < ptrs.size(); i++) {
vector<Value> accums;
- for (size_t j=0; j < ptrs[i]->second.size(); j++) {
+ for (size_t j = 0; j < ptrs[i]->second.size(); j++) {
accums.push_back(ptrs[i]->second[j]->getValue(/*toBeMerged=*/true));
}
writer.addAlreadySorted(ptrs[i]->first, Value(std::move(accums)));
}
break;
- }
+ }
- groups.clear();
+ groups.clear();
- return shared_ptr<Sorter<Value, Value>::Iterator>(writer.done());
- }
+ return shared_ptr<Sorter<Value, Value>::Iterator>(writer.done());
+}
- void DocumentSourceGroup::parseIdExpression(BSONElement groupField,
- const VariablesParseState& vps) {
- if (groupField.type() == Object && !groupField.Obj().isEmpty()) {
- // {_id: {}} is treated as grouping on a constant, not an expression
+void DocumentSourceGroup::parseIdExpression(BSONElement groupField,
+ const VariablesParseState& vps) {
+ if (groupField.type() == Object && !groupField.Obj().isEmpty()) {
+ // {_id: {}} is treated as grouping on a constant, not an expression
- const BSONObj idKeyObj = groupField.Obj();
- if (idKeyObj.firstElementFieldName()[0] == '$') {
- // grouping on a $op expression
- Expression::ObjectCtx oCtx(0);
- _idExpressions.push_back(Expression::parseObject(idKeyObj, &oCtx, vps));
- }
- else {
- // grouping on an "artificial" object. Rather than create the object for each input
- // in populate(), instead group on the output of the raw expressions. The artificial
- // object will be created at the end in makeDocument() while outputting results.
- BSONForEach(field, idKeyObj) {
- uassert(17390, "$group does not support inclusion-style expressions",
- !field.isNumber() && field.type() != Bool);
-
- _idFieldNames.push_back(field.fieldName());
- _idExpressions.push_back(Expression::parseOperand(field, vps));
- }
+ const BSONObj idKeyObj = groupField.Obj();
+ if (idKeyObj.firstElementFieldName()[0] == '$') {
+ // grouping on a $op expression
+ Expression::ObjectCtx oCtx(0);
+ _idExpressions.push_back(Expression::parseObject(idKeyObj, &oCtx, vps));
+ } else {
+ // grouping on an "artificial" object. Rather than create the object for each input
+ // in populate(), instead group on the output of the raw expressions. The artificial
+ // object will be created at the end in makeDocument() while outputting results.
+ BSONForEach(field, idKeyObj) {
+ uassert(17390,
+ "$group does not support inclusion-style expressions",
+ !field.isNumber() && field.type() != Bool);
+
+ _idFieldNames.push_back(field.fieldName());
+ _idExpressions.push_back(Expression::parseOperand(field, vps));
}
}
- else if (groupField.type() == String && groupField.valuestr()[0] == '$') {
- // grouping on a field path.
- _idExpressions.push_back(ExpressionFieldPath::parse(groupField.str(), vps));
- }
- else {
- // constant id - single group
- _idExpressions.push_back(ExpressionConstant::create(Value(groupField)));
- }
+ } else if (groupField.type() == String && groupField.valuestr()[0] == '$') {
+ // grouping on a field path.
+ _idExpressions.push_back(ExpressionFieldPath::parse(groupField.str(), vps));
+ } else {
+ // constant id - single group
+ _idExpressions.push_back(ExpressionConstant::create(Value(groupField)));
}
+}
- Value DocumentSourceGroup::computeId(Variables* vars) {
- // If only one expression return result directly
- if (_idExpressions.size() == 1)
- return _idExpressions[0]->evaluate(vars);
+Value DocumentSourceGroup::computeId(Variables* vars) {
+ // If only one expression return result directly
+ if (_idExpressions.size() == 1)
+ return _idExpressions[0]->evaluate(vars);
- // Multiple expressions get results wrapped in a vector
- vector<Value> vals;
- vals.reserve(_idExpressions.size());
- for (size_t i = 0; i < _idExpressions.size(); i++) {
- vals.push_back(_idExpressions[i]->evaluate(vars));
- }
- return Value(std::move(vals));
+ // Multiple expressions get results wrapped in a vector
+ vector<Value> vals;
+ vals.reserve(_idExpressions.size());
+ for (size_t i = 0; i < _idExpressions.size(); i++) {
+ vals.push_back(_idExpressions[i]->evaluate(vars));
}
+ return Value(std::move(vals));
+}
- Value DocumentSourceGroup::expandId(const Value& val) {
- // _id doesn't get wrapped in a document
- if (_idFieldNames.empty())
- return val;
-
- // _id is a single-field document containing val
- if (_idFieldNames.size() == 1)
- return Value(DOC(_idFieldNames[0] << val));
-
- // _id is a multi-field document containing the elements of val
- const vector<Value>& vals = val.getArray();
- invariant(_idFieldNames.size() == vals.size());
- MutableDocument md(vals.size());
- for (size_t i = 0; i < vals.size(); i++) {
- md[_idFieldNames[i]] = vals[i];
- }
- return md.freezeToValue();
+Value DocumentSourceGroup::expandId(const Value& val) {
+ // _id doesn't get wrapped in a document
+ if (_idFieldNames.empty())
+ return val;
+
+ // _id is a single-field document containing val
+ if (_idFieldNames.size() == 1)
+ return Value(DOC(_idFieldNames[0] << val));
+
+ // _id is a multi-field document containing the elements of val
+ const vector<Value>& vals = val.getArray();
+ invariant(_idFieldNames.size() == vals.size());
+ MutableDocument md(vals.size());
+ for (size_t i = 0; i < vals.size(); i++) {
+ md[_idFieldNames[i]] = vals[i];
}
+ return md.freezeToValue();
+}
- Document DocumentSourceGroup::makeDocument(const Value& id,
- const Accumulators& accums,
- bool mergeableOutput) {
- const size_t n = vFieldName.size();
- MutableDocument out (1 + n);
-
- /* add the _id field */
- out.addField("_id", expandId(id));
-
- /* add the rest of the fields */
- for(size_t i = 0; i < n; ++i) {
- Value val = accums[i]->getValue(mergeableOutput);
- if (val.missing()) {
- // we return null in this case so return objects are predictable
- out.addField(vFieldName[i], Value(BSONNULL));
- }
- else {
- out.addField(vFieldName[i], val);
- }
+Document DocumentSourceGroup::makeDocument(const Value& id,
+ const Accumulators& accums,
+ bool mergeableOutput) {
+ const size_t n = vFieldName.size();
+ MutableDocument out(1 + n);
+
+ /* add the _id field */
+ out.addField("_id", expandId(id));
+
+ /* add the rest of the fields */
+ for (size_t i = 0; i < n; ++i) {
+ Value val = accums[i]->getValue(mergeableOutput);
+ if (val.missing()) {
+ // we return null in this case so return objects are predictable
+ out.addField(vFieldName[i], Value(BSONNULL));
+ } else {
+ out.addField(vFieldName[i], val);
}
-
- return out.freeze();
}
- intrusive_ptr<DocumentSource> DocumentSourceGroup::getShardSource() {
- return this; // No modifications necessary when on shard
- }
-
- intrusive_ptr<DocumentSource> DocumentSourceGroup::getMergeSource() {
- intrusive_ptr<DocumentSourceGroup> pMerger(DocumentSourceGroup::create(pExpCtx));
- pMerger->setDoingMerge(true);
+ return out.freeze();
+}
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- /* the merger will use the same grouping key */
- pMerger->_idExpressions.push_back(ExpressionFieldPath::parse("$$ROOT._id", vps));
+intrusive_ptr<DocumentSource> DocumentSourceGroup::getShardSource() {
+ return this; // No modifications necessary when on shard
+}
- const size_t n = vFieldName.size();
- for(size_t i = 0; i < n; ++i) {
- /*
- The merger's output field names will be the same, as will the
- accumulator factories. However, for some accumulators, the
- expression to be accumulated will be different. The original
- accumulator may be collecting an expression based on a field
- expression or constant. Here, we accumulate the output of the
- same name from the prior group.
- */
- pMerger->addAccumulator(
- vFieldName[i], vpAccumulatorFactory[i],
- ExpressionFieldPath::parse("$$ROOT." + vFieldName[i], vps));
- }
+intrusive_ptr<DocumentSource> DocumentSourceGroup::getMergeSource() {
+ intrusive_ptr<DocumentSourceGroup> pMerger(DocumentSourceGroup::create(pExpCtx));
+ pMerger->setDoingMerge(true);
+
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ /* the merger will use the same grouping key */
+ pMerger->_idExpressions.push_back(ExpressionFieldPath::parse("$$ROOT._id", vps));
+
+ const size_t n = vFieldName.size();
+ for (size_t i = 0; i < n; ++i) {
+ /*
+ The merger's output field names will be the same, as will the
+ accumulator factories. However, for some accumulators, the
+ expression to be accumulated will be different. The original
+ accumulator may be collecting an expression based on a field
+ expression or constant. Here, we accumulate the output of the
+ same name from the prior group.
+ */
+ pMerger->addAccumulator(vFieldName[i],
+ vpAccumulatorFactory[i],
+ ExpressionFieldPath::parse("$$ROOT." + vFieldName[i], vps));
+ }
- pMerger->_variables.reset(new Variables(idGenerator.getIdCount()));
+ pMerger->_variables.reset(new Variables(idGenerator.getIdCount()));
- return pMerger;
- }
+ return pMerger;
+}
}
#include "mongo/db/sorter/sorter.cpp"
diff --git a/src/mongo/db/pipeline/document_source_limit.cpp b/src/mongo/db/pipeline/document_source_limit.cpp
index 1789b689eda..9729c6bae8f 100644
--- a/src/mongo/db/pipeline/document_source_limit.cpp
+++ b/src/mongo/db/pipeline/document_source_limit.cpp
@@ -37,66 +37,57 @@
namespace mongo {
- using boost::intrusive_ptr;
+using boost::intrusive_ptr;
- const char DocumentSourceLimit::limitName[] = "$limit";
+const char DocumentSourceLimit::limitName[] = "$limit";
- DocumentSourceLimit::DocumentSourceLimit(const intrusive_ptr<ExpressionContext> &pExpCtx,
- long long limit)
- : DocumentSource(pExpCtx)
- , limit(limit)
- , count(0)
- {}
+DocumentSourceLimit::DocumentSourceLimit(const intrusive_ptr<ExpressionContext>& pExpCtx,
+ long long limit)
+ : DocumentSource(pExpCtx), limit(limit), count(0) {}
- const char *DocumentSourceLimit::getSourceName() const {
- return limitName;
- }
-
- bool DocumentSourceLimit::coalesce(
- const intrusive_ptr<DocumentSource> &pNextSource) {
- DocumentSourceLimit *pLimit =
- dynamic_cast<DocumentSourceLimit *>(pNextSource.get());
+const char* DocumentSourceLimit::getSourceName() const {
+ return limitName;
+}
- /* if it's not another $limit, we can't coalesce */
- if (!pLimit)
- return false;
+bool DocumentSourceLimit::coalesce(const intrusive_ptr<DocumentSource>& pNextSource) {
+ DocumentSourceLimit* pLimit = dynamic_cast<DocumentSourceLimit*>(pNextSource.get());
- /* we need to limit by the minimum of the two limits */
- if (pLimit->limit < limit)
- limit = pLimit->limit;
- return true;
- }
+ /* if it's not another $limit, we can't coalesce */
+ if (!pLimit)
+ return false;
- boost::optional<Document> DocumentSourceLimit::getNext() {
- pExpCtx->checkForInterrupt();
+ /* we need to limit by the minimum of the two limits */
+ if (pLimit->limit < limit)
+ limit = pLimit->limit;
+ return true;
+}
- if (++count > limit) {
- pSource->dispose();
- return boost::none;
- }
+boost::optional<Document> DocumentSourceLimit::getNext() {
+ pExpCtx->checkForInterrupt();
- return pSource->getNext();
+ if (++count > limit) {
+ pSource->dispose();
+ return boost::none;
}
- Value DocumentSourceLimit::serialize(bool explain) const {
- return Value(DOC(getSourceName() << limit));
- }
+ return pSource->getNext();
+}
- intrusive_ptr<DocumentSourceLimit> DocumentSourceLimit::create(
- const intrusive_ptr<ExpressionContext> &pExpCtx,
- long long limit) {
- uassert(15958, "the limit must be positive",
- limit > 0);
- return new DocumentSourceLimit(pExpCtx, limit);
- }
+Value DocumentSourceLimit::serialize(bool explain) const {
+ return Value(DOC(getSourceName() << limit));
+}
+
+intrusive_ptr<DocumentSourceLimit> DocumentSourceLimit::create(
+ const intrusive_ptr<ExpressionContext>& pExpCtx, long long limit) {
+ uassert(15958, "the limit must be positive", limit > 0);
+ return new DocumentSourceLimit(pExpCtx, limit);
+}
- intrusive_ptr<DocumentSource> DocumentSourceLimit::createFromBson(
- BSONElement elem,
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
- uassert(15957, "the limit must be specified as a number",
- elem.isNumber());
+intrusive_ptr<DocumentSource> DocumentSourceLimit::createFromBson(
+ BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ uassert(15957, "the limit must be specified as a number", elem.isNumber());
- long long limit = elem.numberLong();
- return DocumentSourceLimit::create(pExpCtx, limit);
- }
+ long long limit = elem.numberLong();
+ return DocumentSourceLimit::create(pExpCtx, limit);
+}
}
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
index b6c6e005446..50ef5e95eb5 100644
--- a/src/mongo/db/pipeline/document_source_match.cpp
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -39,121 +39,123 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::string;
- using std::vector;
+using boost::intrusive_ptr;
+using std::string;
+using std::vector;
- const char DocumentSourceMatch::matchName[] = "$match";
+const char DocumentSourceMatch::matchName[] = "$match";
- const char *DocumentSourceMatch::getSourceName() const {
- return matchName;
- }
-
- Value DocumentSourceMatch::serialize(bool explain) const {
- return Value(DOC(getSourceName() << Document(getQuery())));
- }
+const char* DocumentSourceMatch::getSourceName() const {
+ return matchName;
+}
- intrusive_ptr<DocumentSource> DocumentSourceMatch::optimize() {
- return getQuery().isEmpty() ? nullptr : this;
- }
+Value DocumentSourceMatch::serialize(bool explain) const {
+ return Value(DOC(getSourceName() << Document(getQuery())));
+}
- boost::optional<Document> DocumentSourceMatch::getNext() {
- pExpCtx->checkForInterrupt();
+intrusive_ptr<DocumentSource> DocumentSourceMatch::optimize() {
+ return getQuery().isEmpty() ? nullptr : this;
+}
- // The user facing error should have been generated earlier.
- massert(17309, "Should never call getNext on a $match stage with $text clause",
- !_isTextQuery);
+boost::optional<Document> DocumentSourceMatch::getNext() {
+ pExpCtx->checkForInterrupt();
- while (boost::optional<Document> next = pSource->getNext()) {
- // The matcher only takes BSON documents, so we have to make one.
- if (matcher->matches(next->toBson()))
- return next;
- }
+ // The user facing error should have been generated earlier.
+ massert(17309, "Should never call getNext on a $match stage with $text clause", !_isTextQuery);
- // Nothing matched
- return boost::none;
+ while (boost::optional<Document> next = pSource->getNext()) {
+ // The matcher only takes BSON documents, so we have to make one.
+ if (matcher->matches(next->toBson()))
+ return next;
}
- bool DocumentSourceMatch::coalesce(const intrusive_ptr<DocumentSource>& nextSource) {
- DocumentSourceMatch* otherMatch = dynamic_cast<DocumentSourceMatch*>(nextSource.get());
- if (!otherMatch)
- return false;
+ // Nothing matched
+ return boost::none;
+}
- if (otherMatch->_isTextQuery) {
- // Non-initial text queries are disallowed (enforced by setSource below). This prevents
- // "hiding" a non-initial text query by combining it with another match.
- return false;
+bool DocumentSourceMatch::coalesce(const intrusive_ptr<DocumentSource>& nextSource) {
+ DocumentSourceMatch* otherMatch = dynamic_cast<DocumentSourceMatch*>(nextSource.get());
+ if (!otherMatch)
+ return false;
- // The rest of this block is for once we support non-initial text queries.
+ if (otherMatch->_isTextQuery) {
+ // Non-initial text queries are disallowed (enforced by setSource below). This prevents
+ // "hiding" a non-initial text query by combining it with another match.
+ return false;
- if (_isTextQuery) {
- // The score should only come from the last $match. We can't combine since then this
- // match's score would impact otherMatch's.
- return false;
- }
+ // The rest of this block is for once we support non-initial text queries.
- _isTextQuery = true;
+ if (_isTextQuery) {
+ // The score should only come from the last $match. We can't combine since then this
+ // match's score would impact otherMatch's.
+ return false;
}
- // Replace our matcher with the $and of ours and theirs.
- matcher.reset(new Matcher(BSON("$and" << BSON_ARRAY(getQuery()
- << otherMatch->getQuery())),
- MatchExpressionParser::WhereCallback()));
-
- return true;
+ _isTextQuery = true;
}
+ // Replace our matcher with the $and of ours and theirs.
+ matcher.reset(new Matcher(BSON("$and" << BSON_ARRAY(getQuery() << otherMatch->getQuery())),
+ MatchExpressionParser::WhereCallback()));
+
+ return true;
+}
+
namespace {
- // This block contains the functions that make up the implementation of
- // DocumentSourceMatch::redactSafePortion(). They will only be called after
- // the Match expression has been successfully parsed so they can assume that
- // input is well formed.
+// This block contains the functions that make up the implementation of
+// DocumentSourceMatch::redactSafePortion(). They will only be called after
+// the Match expression has been successfully parsed so they can assume that
+// input is well formed.
- bool isAllDigits(StringData str) {
- if (str.empty())
- return false;
+bool isAllDigits(StringData str) {
+ if (str.empty())
+ return false;
- for (size_t i=0; i < str.size(); i++) {
- if (!isdigit(str[i]))
- return false;
- }
- return true;
+ for (size_t i = 0; i < str.size(); i++) {
+ if (!isdigit(str[i]))
+ return false;
}
+ return true;
+}
- bool isFieldnameRedactSafe(StringData fieldName) {
- // Can't have numeric elements in the dotted path since redacting elements from an array
- // would change the indexes.
+bool isFieldnameRedactSafe(StringData fieldName) {
+ // Can't have numeric elements in the dotted path since redacting elements from an array
+ // would change the indexes.
- const size_t dotPos = fieldName.find('.');
- if (dotPos == string::npos)
- return !isAllDigits(fieldName);
+ const size_t dotPos = fieldName.find('.');
+ if (dotPos == string::npos)
+ return !isAllDigits(fieldName);
- const StringData part = fieldName.substr(0, dotPos);
- const StringData rest = fieldName.substr(dotPos + 1);
- return !isAllDigits(part) && isFieldnameRedactSafe(rest);
- }
+ const StringData part = fieldName.substr(0, dotPos);
+ const StringData rest = fieldName.substr(dotPos + 1);
+ return !isAllDigits(part) && isFieldnameRedactSafe(rest);
+}
- bool isTypeRedactSafeInComparison(BSONType type) {
- if (type == Array) return false;
- if (type == Object) return false;
- if (type == jstNULL) return false;
- if (type == Undefined) return false; // Currently a Matcher parse error.
+bool isTypeRedactSafeInComparison(BSONType type) {
+ if (type == Array)
+ return false;
+ if (type == Object)
+ return false;
+ if (type == jstNULL)
+ return false;
+ if (type == Undefined)
+ return false; // Currently a Matcher parse error.
- return true;
- }
+ return true;
+}
- Document redactSafePortionTopLevel(BSONObj query); // mutually recursive with next function
+Document redactSafePortionTopLevel(BSONObj query); // mutually recursive with next function
- // Returns the redact-safe portion of an "inner" match expression. This is the layer like
- // {$gt: 5} which does not include the field name. Returns an empty document if none of the
- // expression can safely be promoted in front of a $redact.
- Document redactSafePortionDollarOps(BSONObj expr) {
- MutableDocument output;
- BSONForEach(field, expr) {
- if (field.fieldName()[0] != '$')
- continue;
+// Returns the redact-safe portion of an "inner" match expression. This is the layer like
+// {$gt: 5} which does not include the field name. Returns an empty document if none of the
+// expression can safely be promoted in front of a $redact.
+Document redactSafePortionDollarOps(BSONObj expr) {
+ MutableDocument output;
+ BSONForEach(field, expr) {
+ if (field.fieldName()[0] != '$')
+ continue;
- switch(BSONObj::MatchType(field.getGtLtOp(BSONObj::Equality))) {
+ switch (BSONObj::MatchType(field.getGtLtOp(BSONObj::Equality))) {
// These are always ok
case BSONObj::opTYPE:
case BSONObj::opREGEX:
@@ -218,7 +220,7 @@ namespace {
}
// These are never allowed
- case BSONObj::Equality: // This actually means unknown
+ case BSONObj::Equality: // This actually means unknown
case BSONObj::opMAX_DISTANCE:
case BSONObj::opNEAR:
case BSONObj::NE:
@@ -228,55 +230,57 @@ namespace {
case BSONObj::opWITHIN:
case BSONObj::opGEO_INTERSECTS:
continue;
- }
}
- return output.freeze();
}
+ return output.freeze();
+}
- // Returns the redact-safe portion of an "outer" match expression. This is the layer like
- // {fieldName: {...}} which does include the field name. Returns an empty document if none of
- // the expression can safely be promoted in front of a $redact.
- Document redactSafePortionTopLevel(BSONObj query) {
- MutableDocument output;
- BSONForEach(field, query) {
- if (field.fieldName()[0] == '$') {
- if (str::equals(field.fieldName(), "$or")) {
- // $or must be all-or-nothing (line $in). Can't include subset of elements.
- vector<Value> okClauses;
- BSONForEach(elem, field.Obj()) {
- Document clause = redactSafePortionTopLevel(elem.Obj());
- if (clause.empty()) {
- okClauses.clear();
- break;
- }
- okClauses.push_back(Value(clause));
- }
-
- if (!okClauses.empty())
- output["$or"] = Value(std::move(okClauses));
- }
- else if (str::equals(field.fieldName(), "$and")) {
- // $and can include subset of elements (like $all).
- vector<Value> okClauses;
- BSONForEach(elem, field.Obj()) {
- Document clause = redactSafePortionTopLevel(elem.Obj());
- if (!clause.empty())
- okClauses.push_back(Value(clause));
+// Returns the redact-safe portion of an "outer" match expression. This is the layer like
+// {fieldName: {...}} which does include the field name. Returns an empty document if none of
+// the expression can safely be promoted in front of a $redact.
+Document redactSafePortionTopLevel(BSONObj query) {
+ MutableDocument output;
+ BSONForEach(field, query) {
+ if (field.fieldName()[0] == '$') {
+ if (str::equals(field.fieldName(), "$or")) {
+ // $or must be all-or-nothing (line $in). Can't include subset of elements.
+ vector<Value> okClauses;
+ BSONForEach(elem, field.Obj()) {
+ Document clause = redactSafePortionTopLevel(elem.Obj());
+ if (clause.empty()) {
+ okClauses.clear();
+ break;
}
- if (!okClauses.empty())
- output["$and"] = Value(std::move(okClauses));
+ okClauses.push_back(Value(clause));
}
- continue;
+ if (!okClauses.empty())
+ output["$or"] = Value(std::move(okClauses));
+ } else if (str::equals(field.fieldName(), "$and")) {
+ // $and can include subset of elements (like $all).
+ vector<Value> okClauses;
+ BSONForEach(elem, field.Obj()) {
+ Document clause = redactSafePortionTopLevel(elem.Obj());
+ if (!clause.empty())
+ okClauses.push_back(Value(clause));
+ }
+ if (!okClauses.empty())
+ output["$and"] = Value(std::move(okClauses));
}
- if (!isFieldnameRedactSafe(field.fieldNameStringData()))
- continue;
+ continue;
+ }
+
+ if (!isFieldnameRedactSafe(field.fieldNameStringData()))
+ continue;
- switch (field.type()) {
- case Array: continue; // exact matches on arrays are never allowed
- case jstNULL: continue; // can't look for missing fields
- case Undefined: continue; // Currently a Matcher parse error.
+ switch (field.type()) {
+ case Array:
+ continue; // exact matches on arrays are never allowed
+ case jstNULL:
+ continue; // can't look for missing fields
+ case Undefined:
+ continue; // Currently a Matcher parse error.
case Object: {
Document sub = redactSafePortionDollarOps(field.Obj());
@@ -290,69 +294,68 @@ namespace {
default:
output[field.fieldNameStringData()] = Value(field);
break;
- }
}
- return output.freeze();
}
+ return output.freeze();
+}
}
- BSONObj DocumentSourceMatch::redactSafePortion() const {
- return redactSafePortionTopLevel(getQuery()).toBson();
- }
+BSONObj DocumentSourceMatch::redactSafePortion() const {
+ return redactSafePortionTopLevel(getQuery()).toBson();
+}
- void DocumentSourceMatch::setSource(DocumentSource* source) {
- uassert(17313, "$match with $text is only allowed as the first pipeline stage",
- !_isTextQuery);
+void DocumentSourceMatch::setSource(DocumentSource* source) {
+ uassert(17313, "$match with $text is only allowed as the first pipeline stage", !_isTextQuery);
- DocumentSource::setSource(source);
- }
+ DocumentSource::setSource(source);
+}
- bool DocumentSourceMatch::isTextQuery(const BSONObj& query) {
- BSONForEach(e, query) {
- const StringData fieldName = e.fieldNameStringData();
- if (fieldName == StringData("$text", StringData::LiteralTag()))
- return true;
+bool DocumentSourceMatch::isTextQuery(const BSONObj& query) {
+ BSONForEach(e, query) {
+ const StringData fieldName = e.fieldNameStringData();
+ if (fieldName == StringData("$text", StringData::LiteralTag()))
+ return true;
- if (e.isABSONObj() && isTextQuery(e.Obj()))
- return true;
- }
- return false;
+ if (e.isABSONObj() && isTextQuery(e.Obj()))
+ return true;
}
+ return false;
+}
- static void uassertNoDisallowedClauses(BSONObj query) {
- BSONForEach(e, query) {
- // can't use the Matcher API because this would segfault the constructor
- uassert(16395, "$where is not allowed inside of a $match aggregation expression",
- ! str::equals(e.fieldName(), "$where"));
- // geo breaks if it is not the first portion of the pipeline
- uassert(16424, "$near is not allowed inside of a $match aggregation expression",
- ! str::equals(e.fieldName(), "$near"));
- uassert(16426, "$nearSphere is not allowed inside of a $match aggregation expression",
- ! str::equals(e.fieldName(), "$nearSphere"));
- if (e.isABSONObj())
- uassertNoDisallowedClauses(e.Obj());
- }
+static void uassertNoDisallowedClauses(BSONObj query) {
+ BSONForEach(e, query) {
+ // can't use the Matcher API because this would segfault the constructor
+ uassert(16395,
+ "$where is not allowed inside of a $match aggregation expression",
+ !str::equals(e.fieldName(), "$where"));
+ // geo breaks if it is not the first portion of the pipeline
+ uassert(16424,
+ "$near is not allowed inside of a $match aggregation expression",
+ !str::equals(e.fieldName(), "$near"));
+ uassert(16426,
+ "$nearSphere is not allowed inside of a $match aggregation expression",
+ !str::equals(e.fieldName(), "$nearSphere"));
+ if (e.isABSONObj())
+ uassertNoDisallowedClauses(e.Obj());
}
+}
- intrusive_ptr<DocumentSource> DocumentSourceMatch::createFromBson(
- BSONElement elem,
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
- uassert(15959, "the match filter must be an expression in an object",
- elem.type() == Object);
+intrusive_ptr<DocumentSource> DocumentSourceMatch::createFromBson(
+ BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ uassert(15959, "the match filter must be an expression in an object", elem.type() == Object);
- uassertNoDisallowedClauses(elem.Obj());
+ uassertNoDisallowedClauses(elem.Obj());
- return new DocumentSourceMatch(elem.Obj(), pExpCtx);
- }
+ return new DocumentSourceMatch(elem.Obj(), pExpCtx);
+}
- BSONObj DocumentSourceMatch::getQuery() const {
- return *(matcher->getQuery());
- }
+BSONObj DocumentSourceMatch::getQuery() const {
+ return *(matcher->getQuery());
+}
- DocumentSourceMatch::DocumentSourceMatch(const BSONObj &query,
- const intrusive_ptr<ExpressionContext> &pExpCtx)
- : DocumentSource(pExpCtx),
- matcher(new Matcher(query.getOwned(), MatchExpressionParser::WhereCallback())),
- _isTextQuery(isTextQuery(query))
- {}
+DocumentSourceMatch::DocumentSourceMatch(const BSONObj& query,
+ const intrusive_ptr<ExpressionContext>& pExpCtx)
+ : DocumentSource(pExpCtx),
+ matcher(new Matcher(query.getOwned(), MatchExpressionParser::WhereCallback())),
+ _isTextQuery(isTextQuery(query)) {}
}
diff --git a/src/mongo/db/pipeline/document_source_merge_cursors.cpp b/src/mongo/db/pipeline/document_source_merge_cursors.cpp
index afe924d6a2f..d1e618f35bf 100644
--- a/src/mongo/db/pipeline/document_source_merge_cursors.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_cursors.cpp
@@ -33,146 +33,137 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::make_pair;
- using std::string;
- using std::vector;
+using boost::intrusive_ptr;
+using std::make_pair;
+using std::string;
+using std::vector;
- const char DocumentSourceMergeCursors::name[] = "$mergeCursors";
+const char DocumentSourceMergeCursors::name[] = "$mergeCursors";
- const char* DocumentSourceMergeCursors::getSourceName() const {
- return name;
- }
+const char* DocumentSourceMergeCursors::getSourceName() const {
+ return name;
+}
- void DocumentSourceMergeCursors::setSource(DocumentSource *pSource) {
- /* this doesn't take a source */
- verify(false);
- }
+void DocumentSourceMergeCursors::setSource(DocumentSource* pSource) {
+ /* this doesn't take a source */
+ verify(false);
+}
- DocumentSourceMergeCursors::DocumentSourceMergeCursors(
- const CursorIds& cursorIds,
- const intrusive_ptr<ExpressionContext> &pExpCtx)
- : DocumentSource(pExpCtx)
- , _cursorIds(cursorIds)
- , _unstarted(true)
- {}
-
- intrusive_ptr<DocumentSource> DocumentSourceMergeCursors::create(
- const CursorIds& cursorIds,
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
- return new DocumentSourceMergeCursors(cursorIds, pExpCtx);
- }
+DocumentSourceMergeCursors::DocumentSourceMergeCursors(
+ const CursorIds& cursorIds, const intrusive_ptr<ExpressionContext>& pExpCtx)
+ : DocumentSource(pExpCtx), _cursorIds(cursorIds), _unstarted(true) {}
- intrusive_ptr<DocumentSource> DocumentSourceMergeCursors::createFromBson(
- BSONElement elem,
- const intrusive_ptr<ExpressionContext>& pExpCtx) {
+intrusive_ptr<DocumentSource> DocumentSourceMergeCursors::create(
+ const CursorIds& cursorIds, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ return new DocumentSourceMergeCursors(cursorIds, pExpCtx);
+}
- massert(17026, string("Expected an Array, but got a ") + typeName(elem.type()),
- elem.type() == Array);
+intrusive_ptr<DocumentSource> DocumentSourceMergeCursors::createFromBson(
+ BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ massert(17026,
+ string("Expected an Array, but got a ") + typeName(elem.type()),
+ elem.type() == Array);
+
+ CursorIds cursorIds;
+ BSONObj array = elem.embeddedObject();
+ BSONForEach(cursor, array) {
+ massert(17027,
+ string("Expected an Object, but got a ") + typeName(cursor.type()),
+ cursor.type() == Object);
+
+ cursorIds.push_back(
+ make_pair(ConnectionString(HostAndPort(cursor["host"].String())), cursor["id"].Long()));
+ }
- CursorIds cursorIds;
- BSONObj array = elem.embeddedObject();
- BSONForEach(cursor, array) {
- massert(17027, string("Expected an Object, but got a ") + typeName(cursor.type()),
- cursor.type() == Object);
+ return new DocumentSourceMergeCursors(cursorIds, pExpCtx);
+}
- cursorIds.push_back(make_pair(ConnectionString(HostAndPort(cursor["host"].String())),
- cursor["id"].Long()));
- }
-
- return new DocumentSourceMergeCursors(cursorIds, pExpCtx);
+Value DocumentSourceMergeCursors::serialize(bool explain) const {
+ vector<Value> cursors;
+ for (size_t i = 0; i < _cursorIds.size(); i++) {
+ cursors.push_back(Value(
+ DOC("host" << Value(_cursorIds[i].first.toString()) << "id" << _cursorIds[i].second)));
}
+ return Value(DOC(getSourceName() << Value(cursors)));
+}
- Value DocumentSourceMergeCursors::serialize(bool explain) const {
- vector<Value> cursors;
- for (size_t i = 0; i < _cursorIds.size(); i++) {
- cursors.push_back(Value(DOC("host" << Value(_cursorIds[i].first.toString())
- << "id" << _cursorIds[i].second)));
- }
- return Value(DOC(getSourceName() << Value(cursors)));
+DocumentSourceMergeCursors::CursorAndConnection::CursorAndConnection(ConnectionString host,
+ NamespaceString ns,
+ CursorId id)
+ : connection(host), cursor(connection.get(), ns, id, 0, 0) {}
+
+vector<DBClientCursor*> DocumentSourceMergeCursors::getCursors() {
+ verify(_unstarted);
+ start();
+ vector<DBClientCursor*> out;
+ for (Cursors::const_iterator it = _cursors.begin(); it != _cursors.end(); ++it) {
+ out.push_back(&((*it)->cursor));
}
- DocumentSourceMergeCursors::CursorAndConnection::CursorAndConnection(
- ConnectionString host,
- NamespaceString ns,
- CursorId id)
- : connection(host)
- , cursor(connection.get(), ns, id, 0, 0)
- {}
+ return out;
+}
- vector<DBClientCursor*> DocumentSourceMergeCursors::getCursors() {
- verify(_unstarted);
- start();
- vector<DBClientCursor*> out;
- for (Cursors::const_iterator it = _cursors.begin(); it !=_cursors.end(); ++it) {
- out.push_back(&((*it)->cursor));
- }
+void DocumentSourceMergeCursors::start() {
+ _unstarted = false;
- return out;
+ // open each cursor and send message asking for a batch
+ for (CursorIds::const_iterator it = _cursorIds.begin(); it != _cursorIds.end(); ++it) {
+ _cursors.push_back(
+ std::make_shared<CursorAndConnection>(it->first, pExpCtx->ns, it->second));
+ verify(_cursors.back()->connection->lazySupported());
+ _cursors.back()->cursor.initLazy(); // shouldn't block
}
- void DocumentSourceMergeCursors::start() {
- _unstarted = false;
-
- // open each cursor and send message asking for a batch
- for (CursorIds::const_iterator it = _cursorIds.begin(); it !=_cursorIds.end(); ++it) {
- _cursors.push_back(std::make_shared<CursorAndConnection>(
- it->first, pExpCtx->ns, it->second));
- verify(_cursors.back()->connection->lazySupported());
- _cursors.back()->cursor.initLazy(); // shouldn't block
- }
-
- // wait for all cursors to return a batch
- // TODO need a way to keep cursors alive if some take longer than 10 minutes.
- for (Cursors::const_iterator it = _cursors.begin(); it !=_cursors.end(); ++it) {
- bool retry = false;
- bool ok = (*it)->cursor.initLazyFinish(retry); // blocks here for first batch
-
- uassert(17028,
- "error reading response from " + _cursors.back()->connection->toString(),
- ok);
- verify(!retry);
- }
+ // wait for all cursors to return a batch
+ // TODO need a way to keep cursors alive if some take longer than 10 minutes.
+ for (Cursors::const_iterator it = _cursors.begin(); it != _cursors.end(); ++it) {
+ bool retry = false;
+ bool ok = (*it)->cursor.initLazyFinish(retry); // blocks here for first batch
- _currentCursor = _cursors.begin();
+ uassert(
+ 17028, "error reading response from " + _cursors.back()->connection->toString(), ok);
+ verify(!retry);
}
- Document DocumentSourceMergeCursors::nextSafeFrom(DBClientCursor* cursor) {
- const BSONObj next = cursor->next();
- if (next.hasField("$err")) {
- const int code = next.hasField("code") ? next["code"].numberInt() : 17029;
- uasserted(code, str::stream() << "Received error in response from "
- << cursor->originalHost()
- << ": " << next);
- }
- return Document::fromBsonWithMetaData(next);
+ _currentCursor = _cursors.begin();
+}
+
+Document DocumentSourceMergeCursors::nextSafeFrom(DBClientCursor* cursor) {
+ const BSONObj next = cursor->next();
+ if (next.hasField("$err")) {
+ const int code = next.hasField("code") ? next["code"].numberInt() : 17029;
+ uasserted(code,
+ str::stream() << "Received error in response from " << cursor->originalHost()
+ << ": " << next);
}
+ return Document::fromBsonWithMetaData(next);
+}
- boost::optional<Document> DocumentSourceMergeCursors::getNext() {
- if (_unstarted)
- start();
+boost::optional<Document> DocumentSourceMergeCursors::getNext() {
+ if (_unstarted)
+ start();
- // purge eof cursors and release their connections
- while (!_cursors.empty() && !(*_currentCursor)->cursor.more()) {
- (*_currentCursor)->connection.done();
- _cursors.erase(_currentCursor);
- _currentCursor = _cursors.begin();
- }
+ // purge eof cursors and release their connections
+ while (!_cursors.empty() && !(*_currentCursor)->cursor.more()) {
+ (*_currentCursor)->connection.done();
+ _cursors.erase(_currentCursor);
+ _currentCursor = _cursors.begin();
+ }
- if (_cursors.empty())
- return boost::none;
+ if (_cursors.empty())
+ return boost::none;
- const Document next = nextSafeFrom(&((*_currentCursor)->cursor));
+ const Document next = nextSafeFrom(&((*_currentCursor)->cursor));
- // advance _currentCursor, wrapping if needed
- if (++_currentCursor == _cursors.end())
- _currentCursor = _cursors.begin();
+ // advance _currentCursor, wrapping if needed
+ if (++_currentCursor == _cursors.end())
+ _currentCursor = _cursors.begin();
- return next;
- }
+ return next;
+}
- void DocumentSourceMergeCursors::dispose() {
- _cursors.clear();
- _currentCursor = _cursors.end();
- }
+void DocumentSourceMergeCursors::dispose() {
+ _cursors.clear();
+ _currentCursor = _cursors.end();
+}
}
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
index bdb8d1d7055..858df389be8 100644
--- a/src/mongo/db/pipeline/document_source_out.cpp
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -32,171 +32,166 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::vector;
-
- const char DocumentSourceOut::outName[] = "$out";
-
- DocumentSourceOut::~DocumentSourceOut() {
- DESTRUCTOR_GUARD(
- // Make sure we drop the temp collection if anything goes wrong. Errors are ignored
- // here because nothing can be done about them. Additionally, if this fails and the
- // collection is left behind, it will be cleaned up next time the server is started.
- if (_mongod && _tempNs.size())
- _mongod->directClient()->dropCollection(_tempNs.ns());
- )
- }
+using boost::intrusive_ptr;
+using std::vector;
- const char *DocumentSourceOut::getSourceName() const {
- return outName;
- }
+const char DocumentSourceOut::outName[] = "$out";
- static AtomicUInt32 aggOutCounter;
- void DocumentSourceOut::prepTempCollection() {
- verify(_mongod);
- verify(_tempNs.size() == 0);
-
- DBClientBase* conn = _mongod->directClient();
-
- // Fail early by checking before we do any work.
- uassert(17017, str::stream() << "namespace '" << _outputNs.ns()
- << "' is sharded so it can't be used for $out'",
- !_mongod->isSharded(_outputNs));
-
- // cannot $out to capped collection
- uassert(17152, str::stream() << "namespace '" << _outputNs.ns()
- << "' is capped so it can't be used for $out",
- !_mongod->isCapped(_outputNs));
-
- _tempNs = NamespaceString(StringData(str::stream() << _outputNs.db()
- << ".tmp.agg_out."
- << aggOutCounter.addAndFetch(1)
- ));
-
- // Create output collection, copying options from existing collection if any.
- {
- const auto infos = conn->getCollectionInfos(_outputNs.db().toString(),
- BSON("name" << _outputNs.coll()));
- const auto options = infos.empty() ? BSONObj()
- : infos.front().getObjectField("options");
-
- BSONObjBuilder cmd;
- cmd << "create" << _tempNs.coll();
- cmd << "temp" << true;
- cmd.appendElementsUnique(options);
-
- BSONObj info;
- bool ok = conn->runCommand(_outputNs.db().toString(), cmd.done(), info);
- uassert(16994, str::stream() << "failed to create temporary $out collection '"
- << _tempNs.ns() << "': " << info.toString(),
- ok);
- }
+DocumentSourceOut::~DocumentSourceOut() {
+ DESTRUCTOR_GUARD(
+ // Make sure we drop the temp collection if anything goes wrong. Errors are ignored
+ // here because nothing can be done about them. Additionally, if this fails and the
+ // collection is left behind, it will be cleaned up next time the server is started.
+ if (_mongod && _tempNs.size()) _mongod->directClient()->dropCollection(_tempNs.ns());)
+}
- // copy indexes on _outputNs to _tempNs
- const std::list<BSONObj> indexes = conn->getIndexSpecs(_outputNs);
- for (std::list<BSONObj>::const_iterator it = indexes.begin(); it != indexes.end(); ++it) {
- MutableDocument index((Document(*it)));
- index.remove("_id"); // indexes shouldn't have _ids but some existing ones do
- index["ns"] = Value(_tempNs.ns());
-
- BSONObj indexBson = index.freeze().toBson();
- conn->insert(_tempNs.getSystemIndexesCollection(), indexBson);
- BSONObj err = conn->getLastErrorDetailed();
- uassert(16995, str::stream() << "copying index for $out failed."
- << " index: " << indexBson
- << " error: " << err,
- DBClientWithCommands::getLastErrorString(err).empty());
- }
- }
+const char* DocumentSourceOut::getSourceName() const {
+ return outName;
+}
- void DocumentSourceOut::spill(const vector<BSONObj>& toInsert) {
- BSONObj err = _mongod->insert(_tempNs, toInsert);
- uassert(16996, str::stream() << "insert for $out failed: " << err,
- DBClientWithCommands::getLastErrorString(err).empty());
- }
+static AtomicUInt32 aggOutCounter;
+void DocumentSourceOut::prepTempCollection() {
+ verify(_mongod);
+ verify(_tempNs.size() == 0);
- boost::optional<Document> DocumentSourceOut::getNext() {
- pExpCtx->checkForInterrupt();
-
- // make sure we only write out once
- if (_done)
- return boost::none;
- _done = true;
-
- verify(_mongod);
- DBClientBase* conn = _mongod->directClient();
-
- prepTempCollection();
- verify(_tempNs.size() != 0);
-
- vector<BSONObj> bufferedObjects;
- int bufferedBytes = 0;
- while (boost::optional<Document> next = pSource->getNext()) {
- BSONObj toInsert = next->toBson();
- bufferedBytes += toInsert.objsize();
- if (!bufferedObjects.empty() && bufferedBytes > BSONObjMaxUserSize) {
- spill(bufferedObjects);
- bufferedObjects.clear();
- bufferedBytes = toInsert.objsize();
- }
- bufferedObjects.push_back(toInsert);
- }
+ DBClientBase* conn = _mongod->directClient();
- if (!bufferedObjects.empty())
- spill(bufferedObjects);
+ // Fail early by checking before we do any work.
+ uassert(17017,
+ str::stream() << "namespace '" << _outputNs.ns()
+ << "' is sharded so it can't be used for $out'",
+ !_mongod->isSharded(_outputNs));
+
+ // cannot $out to capped collection
+ uassert(17152,
+ str::stream() << "namespace '" << _outputNs.ns()
+ << "' is capped so it can't be used for $out",
+ !_mongod->isCapped(_outputNs));
+
+ _tempNs = NamespaceString(StringData(str::stream() << _outputNs.db() << ".tmp.agg_out."
+ << aggOutCounter.addAndFetch(1)));
- // Checking again to make sure we didn't become sharded while running.
- uassert(17018, str::stream() << "namespace '" << _outputNs.ns()
- << "' became sharded so it can't be used for $out'",
- !_mongod->isSharded(_outputNs));
+ // Create output collection, copying options from existing collection if any.
+ {
+ const auto infos =
+ conn->getCollectionInfos(_outputNs.db().toString(), BSON("name" << _outputNs.coll()));
+ const auto options = infos.empty() ? BSONObj() : infos.front().getObjectField("options");
+
+ BSONObjBuilder cmd;
+ cmd << "create" << _tempNs.coll();
+ cmd << "temp" << true;
+ cmd.appendElementsUnique(options);
- BSONObj rename = BSON("renameCollection" << _tempNs.ns()
- << "to" << _outputNs.ns()
- << "dropTarget" << true
- );
BSONObj info;
- bool ok = conn->runCommand("admin", rename, info);
- uassert(16997, str::stream() << "renameCollection for $out failed: " << info,
+ bool ok = conn->runCommand(_outputNs.db().toString(), cmd.done(), info);
+ uassert(16994,
+ str::stream() << "failed to create temporary $out collection '" << _tempNs.ns()
+ << "': " << info.toString(),
ok);
+ }
+
+ // copy indexes on _outputNs to _tempNs
+ const std::list<BSONObj> indexes = conn->getIndexSpecs(_outputNs);
+ for (std::list<BSONObj>::const_iterator it = indexes.begin(); it != indexes.end(); ++it) {
+ MutableDocument index((Document(*it)));
+ index.remove("_id"); // indexes shouldn't have _ids but some existing ones do
+ index["ns"] = Value(_tempNs.ns());
+
+ BSONObj indexBson = index.freeze().toBson();
+ conn->insert(_tempNs.getSystemIndexesCollection(), indexBson);
+ BSONObj err = conn->getLastErrorDetailed();
+ uassert(16995,
+ str::stream() << "copying index for $out failed."
+ << " index: " << indexBson << " error: " << err,
+ DBClientWithCommands::getLastErrorString(err).empty());
+ }
+}
- // We don't need to drop the temp collection in our destructor if the rename succeeded.
- _tempNs = NamespaceString("");
+void DocumentSourceOut::spill(const vector<BSONObj>& toInsert) {
+ BSONObj err = _mongod->insert(_tempNs, toInsert);
+ uassert(16996,
+ str::stream() << "insert for $out failed: " << err,
+ DBClientWithCommands::getLastErrorString(err).empty());
+}
+
+boost::optional<Document> DocumentSourceOut::getNext() {
+ pExpCtx->checkForInterrupt();
- // This "DocumentSource" doesn't produce output documents. This can change in the future
- // if we support using $out in "tee" mode.
+ // make sure we only write out once
+ if (_done)
return boost::none;
- }
+ _done = true;
- DocumentSourceOut::DocumentSourceOut(const NamespaceString& outputNs,
- const intrusive_ptr<ExpressionContext>& pExpCtx)
- : DocumentSource(pExpCtx)
- , _done(false)
- , _tempNs("") // filled in by prepTempCollection
- , _outputNs(outputNs)
- {}
-
- intrusive_ptr<DocumentSource> DocumentSourceOut::createFromBson(
- BSONElement elem,
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
- uassert(16990, str::stream() << "$out only supports a string argument, not "
- << typeName(elem.type()),
- elem.type() == String);
-
- NamespaceString outputNs(pExpCtx->ns.db().toString() + '.' + elem.str());
- uassert(17385, "Can't $out to special collection: " + elem.str(),
- !outputNs.isSpecial());
- return new DocumentSourceOut(outputNs, pExpCtx);
- }
+ verify(_mongod);
+ DBClientBase* conn = _mongod->directClient();
- Value DocumentSourceOut::serialize(bool explain) const {
- massert(17000, "$out shouldn't have different db than input",
- _outputNs.db() == pExpCtx->ns.db());
+ prepTempCollection();
+ verify(_tempNs.size() != 0);
- return Value(DOC(getSourceName() << _outputNs.coll()));
+ vector<BSONObj> bufferedObjects;
+ int bufferedBytes = 0;
+ while (boost::optional<Document> next = pSource->getNext()) {
+ BSONObj toInsert = next->toBson();
+ bufferedBytes += toInsert.objsize();
+ if (!bufferedObjects.empty() && bufferedBytes > BSONObjMaxUserSize) {
+ spill(bufferedObjects);
+ bufferedObjects.clear();
+ bufferedBytes = toInsert.objsize();
+ }
+ bufferedObjects.push_back(toInsert);
}
- DocumentSource::GetDepsReturn DocumentSourceOut::getDependencies(DepsTracker* deps) const {
- deps->needWholeDocument = true;
- return EXHAUSTIVE_ALL;
- }
+ if (!bufferedObjects.empty())
+ spill(bufferedObjects);
+
+ // Checking again to make sure we didn't become sharded while running.
+ uassert(17018,
+ str::stream() << "namespace '" << _outputNs.ns()
+ << "' became sharded so it can't be used for $out'",
+ !_mongod->isSharded(_outputNs));
+
+ BSONObj rename =
+ BSON("renameCollection" << _tempNs.ns() << "to" << _outputNs.ns() << "dropTarget" << true);
+ BSONObj info;
+ bool ok = conn->runCommand("admin", rename, info);
+ uassert(16997, str::stream() << "renameCollection for $out failed: " << info, ok);
+
+ // We don't need to drop the temp collection in our destructor if the rename succeeded.
+ _tempNs = NamespaceString("");
+
+ // This "DocumentSource" doesn't produce output documents. This can change in the future
+ // if we support using $out in "tee" mode.
+ return boost::none;
+}
+
+DocumentSourceOut::DocumentSourceOut(const NamespaceString& outputNs,
+ const intrusive_ptr<ExpressionContext>& pExpCtx)
+ : DocumentSource(pExpCtx),
+ _done(false),
+ _tempNs("") // filled in by prepTempCollection
+ ,
+ _outputNs(outputNs) {}
+
+intrusive_ptr<DocumentSource> DocumentSourceOut::createFromBson(
+ BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ uassert(16990,
+ str::stream() << "$out only supports a string argument, not " << typeName(elem.type()),
+ elem.type() == String);
+
+ NamespaceString outputNs(pExpCtx->ns.db().toString() + '.' + elem.str());
+ uassert(17385, "Can't $out to special collection: " + elem.str(), !outputNs.isSpecial());
+ return new DocumentSourceOut(outputNs, pExpCtx);
+}
+
+Value DocumentSourceOut::serialize(bool explain) const {
+ massert(
+ 17000, "$out shouldn't have different db than input", _outputNs.db() == pExpCtx->ns.db());
+
+ return Value(DOC(getSourceName() << _outputNs.coll()));
+}
+
+DocumentSource::GetDepsReturn DocumentSourceOut::getDependencies(DepsTracker* deps) const {
+ deps->needWholeDocument = true;
+ return EXHAUSTIVE_ALL;
+}
}
diff --git a/src/mongo/db/pipeline/document_source_project.cpp b/src/mongo/db/pipeline/document_source_project.cpp
index c652b177fa4..c0bc9aa68a2 100644
--- a/src/mongo/db/pipeline/document_source_project.cpp
+++ b/src/mongo/db/pipeline/document_source_project.cpp
@@ -38,91 +38,85 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::string;
- using std::vector;
-
- const char DocumentSourceProject::projectName[] = "$project";
-
- DocumentSourceProject::DocumentSourceProject(const intrusive_ptr<ExpressionContext>& pExpCtx,
- const intrusive_ptr<ExpressionObject>& exprObj)
- : DocumentSource(pExpCtx)
- , pEO(exprObj)
- { }
-
- const char *DocumentSourceProject::getSourceName() const {
- return projectName;
- }
-
- boost::optional<Document> DocumentSourceProject::getNext() {
- pExpCtx->checkForInterrupt();
-
- boost::optional<Document> input = pSource->getNext();
- if (!input)
- return boost::none;
-
- /* create the result document */
- const size_t sizeHint = pEO->getSizeHint();
- MutableDocument out (sizeHint);
- out.copyMetaDataFrom(*input);
-
- /*
- Use the ExpressionObject to create the base result.
-
- If we're excluding fields at the top level, leave out the _id if
- it is found, because we took care of it above.
- */
- _variables->setRoot(*input);
- pEO->addToDocument(out, *input, _variables.get());
- _variables->clearRoot();
-
- return out.freeze();
- }
-
- intrusive_ptr<DocumentSource> DocumentSourceProject::optimize() {
- intrusive_ptr<Expression> pE(pEO->optimize());
- pEO = boost::dynamic_pointer_cast<ExpressionObject>(pE);
- return this;
- }
-
- Value DocumentSourceProject::serialize(bool explain) const {
- return Value(DOC(getSourceName() << pEO->serialize(explain)));
- }
-
- intrusive_ptr<DocumentSource> DocumentSourceProject::createFromBson(
- BSONElement elem,
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
-
- /* validate */
- uassert(15969, str::stream() << projectName <<
- " specification must be an object",
- elem.type() == Object);
-
- Expression::ObjectCtx objectCtx(
- Expression::ObjectCtx::DOCUMENT_OK
- | Expression::ObjectCtx::TOP_LEVEL
- | Expression::ObjectCtx::INCLUSION_OK
- );
-
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- intrusive_ptr<Expression> parsed = Expression::parseObject(elem.Obj(), &objectCtx, vps);
- ExpressionObject* exprObj = dynamic_cast<ExpressionObject*>(parsed.get());
- massert(16402, "parseObject() returned wrong type of Expression", exprObj);
- uassert(16403, "$projection requires at least one output field", exprObj->getFieldCount());
-
- intrusive_ptr<DocumentSourceProject> pProject(new DocumentSourceProject(pExpCtx, exprObj));
- pProject->_variables.reset(new Variables(idGenerator.getIdCount()));
-
- BSONObj projectObj = elem.Obj();
- pProject->_raw = projectObj.getOwned();
-
- return pProject;
- }
-
- DocumentSource::GetDepsReturn DocumentSourceProject::getDependencies(DepsTracker* deps) const {
- vector<string> path; // empty == top-level
- pEO->addDependencies(deps, &path);
- return EXHAUSTIVE_FIELDS;
- }
+using boost::intrusive_ptr;
+using std::string;
+using std::vector;
+
+const char DocumentSourceProject::projectName[] = "$project";
+
+DocumentSourceProject::DocumentSourceProject(const intrusive_ptr<ExpressionContext>& pExpCtx,
+ const intrusive_ptr<ExpressionObject>& exprObj)
+ : DocumentSource(pExpCtx), pEO(exprObj) {}
+
+const char* DocumentSourceProject::getSourceName() const {
+ return projectName;
+}
+
+boost::optional<Document> DocumentSourceProject::getNext() {
+ pExpCtx->checkForInterrupt();
+
+ boost::optional<Document> input = pSource->getNext();
+ if (!input)
+ return boost::none;
+
+ /* create the result document */
+ const size_t sizeHint = pEO->getSizeHint();
+ MutableDocument out(sizeHint);
+ out.copyMetaDataFrom(*input);
+
+ /*
+ Use the ExpressionObject to create the base result.
+
+ If we're excluding fields at the top level, leave out the _id if
+ it is found, because we took care of it above.
+ */
+ _variables->setRoot(*input);
+ pEO->addToDocument(out, *input, _variables.get());
+ _variables->clearRoot();
+
+ return out.freeze();
+}
+
+intrusive_ptr<DocumentSource> DocumentSourceProject::optimize() {
+ intrusive_ptr<Expression> pE(pEO->optimize());
+ pEO = boost::dynamic_pointer_cast<ExpressionObject>(pE);
+ return this;
+}
+
+Value DocumentSourceProject::serialize(bool explain) const {
+ return Value(DOC(getSourceName() << pEO->serialize(explain)));
+}
+
+intrusive_ptr<DocumentSource> DocumentSourceProject::createFromBson(
+ BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ /* validate */
+ uassert(15969,
+ str::stream() << projectName << " specification must be an object",
+ elem.type() == Object);
+
+ Expression::ObjectCtx objectCtx(Expression::ObjectCtx::DOCUMENT_OK |
+ Expression::ObjectCtx::TOP_LEVEL |
+ Expression::ObjectCtx::INCLUSION_OK);
+
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ intrusive_ptr<Expression> parsed = Expression::parseObject(elem.Obj(), &objectCtx, vps);
+ ExpressionObject* exprObj = dynamic_cast<ExpressionObject*>(parsed.get());
+ massert(16402, "parseObject() returned wrong type of Expression", exprObj);
+ uassert(16403, "$projection requires at least one output field", exprObj->getFieldCount());
+
+ intrusive_ptr<DocumentSourceProject> pProject(new DocumentSourceProject(pExpCtx, exprObj));
+ pProject->_variables.reset(new Variables(idGenerator.getIdCount()));
+
+ BSONObj projectObj = elem.Obj();
+ pProject->_raw = projectObj.getOwned();
+
+ return pProject;
+}
+
+DocumentSource::GetDepsReturn DocumentSourceProject::getDependencies(DepsTracker* deps) const {
+ vector<string> path; // empty == top-level
+ pEO->addDependencies(deps, &path);
+ return EXHAUSTIVE_FIELDS;
+}
}
diff --git a/src/mongo/db/pipeline/document_source_redact.cpp b/src/mongo/db/pipeline/document_source_redact.cpp
index 98916a0afe0..860dd3a8f73 100644
--- a/src/mongo/db/pipeline/document_source_redact.cpp
+++ b/src/mongo/db/pipeline/document_source_redact.cpp
@@ -39,135 +39,124 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::vector;
+using boost::intrusive_ptr;
+using std::vector;
- const char DocumentSourceRedact::redactName[] = "$redact";
+const char DocumentSourceRedact::redactName[] = "$redact";
- DocumentSourceRedact::DocumentSourceRedact(const intrusive_ptr<ExpressionContext>& expCtx,
- const intrusive_ptr<Expression>& expression)
- : DocumentSource(expCtx)
- , _expression(expression)
- { }
+DocumentSourceRedact::DocumentSourceRedact(const intrusive_ptr<ExpressionContext>& expCtx,
+ const intrusive_ptr<Expression>& expression)
+ : DocumentSource(expCtx), _expression(expression) {}
- const char *DocumentSourceRedact::getSourceName() const {
- return redactName;
- }
+const char* DocumentSourceRedact::getSourceName() const {
+ return redactName;
+}
- static const Value descendVal = Value("descend");
- static const Value pruneVal = Value("prune");
- static const Value keepVal = Value("keep");
+static const Value descendVal = Value("descend");
+static const Value pruneVal = Value("prune");
+static const Value keepVal = Value("keep");
- boost::optional<Document> DocumentSourceRedact::getNext() {
- while (boost::optional<Document> in = pSource->getNext()) {
- _variables->setRoot(*in);
- _variables->setValue(_currentId, Value(*in));
- if (boost::optional<Document> result = redactObject()) {
- return result;
- }
+boost::optional<Document> DocumentSourceRedact::getNext() {
+ while (boost::optional<Document> in = pSource->getNext()) {
+ _variables->setRoot(*in);
+ _variables->setValue(_currentId, Value(*in));
+ if (boost::optional<Document> result = redactObject()) {
+ return result;
}
-
- return boost::none;
}
- Value DocumentSourceRedact::redactValue(const Value& in) {
- const BSONType valueType = in.getType();
- if (valueType == Object) {
- _variables->setValue(_currentId, in);
- const boost::optional<Document> result = redactObject();
- if (result) {
- return Value(*result);
- }
- else {
- return Value();
- }
+ return boost::none;
+}
+
+Value DocumentSourceRedact::redactValue(const Value& in) {
+ const BSONType valueType = in.getType();
+ if (valueType == Object) {
+ _variables->setValue(_currentId, in);
+ const boost::optional<Document> result = redactObject();
+ if (result) {
+ return Value(*result);
+ } else {
+ return Value();
}
- else if (valueType == Array) {
- // TODO dont copy if possible
- vector<Value> newArr;
- const vector<Value>& arr = in.getArray();
- for (size_t i = 0; i < arr.size(); i++) {
- if (arr[i].getType() == Object || arr[i].getType() == Array) {
- const Value toAdd = redactValue(arr[i]) ;
- if (!toAdd.missing()) {
- newArr.push_back(toAdd);
- }
- }
- else {
- newArr.push_back(arr[i]);
+ } else if (valueType == Array) {
+ // TODO dont copy if possible
+ vector<Value> newArr;
+ const vector<Value>& arr = in.getArray();
+ for (size_t i = 0; i < arr.size(); i++) {
+ if (arr[i].getType() == Object || arr[i].getType() == Array) {
+ const Value toAdd = redactValue(arr[i]);
+ if (!toAdd.missing()) {
+ newArr.push_back(toAdd);
}
+ } else {
+ newArr.push_back(arr[i]);
}
- return Value(std::move(newArr));
- }
- else {
- return in;
}
+ return Value(std::move(newArr));
+ } else {
+ return in;
}
+}
- boost::optional<Document> DocumentSourceRedact::redactObject() {
- const Value expressionResult = _expression->evaluate(_variables.get());
-
- if (expressionResult == keepVal) {
- return _variables->getDocument(_currentId);
- }
- else if (expressionResult == pruneVal) {
- return boost::optional<Document>();
- }
- else if (expressionResult == descendVal) {
- const Document in = _variables->getDocument(_currentId);
- MutableDocument out;
- out.copyMetaDataFrom(in);
- FieldIterator fields(in);
- while (fields.more()) {
- const Document::FieldPair field(fields.next());
-
- // This changes CURRENT so don't read from _variables after this
- const Value val = redactValue(field.second);
- if (!val.missing()) {
- out.addField(field.first, val);
- }
+boost::optional<Document> DocumentSourceRedact::redactObject() {
+ const Value expressionResult = _expression->evaluate(_variables.get());
+
+ if (expressionResult == keepVal) {
+ return _variables->getDocument(_currentId);
+ } else if (expressionResult == pruneVal) {
+ return boost::optional<Document>();
+ } else if (expressionResult == descendVal) {
+ const Document in = _variables->getDocument(_currentId);
+ MutableDocument out;
+ out.copyMetaDataFrom(in);
+ FieldIterator fields(in);
+ while (fields.more()) {
+ const Document::FieldPair field(fields.next());
+
+ // This changes CURRENT so don't read from _variables after this
+ const Value val = redactValue(field.second);
+ if (!val.missing()) {
+ out.addField(field.first, val);
}
- return out.freeze();
- }
- else {
- uasserted(17053, str::stream() << "$redact's expression should not return anything "
- << "aside from the variables $$KEEP, $$DESCEND, and "
- << "$$PRUNE, but returned "
- << expressionResult.toString());
}
+ return out.freeze();
+ } else {
+ uasserted(17053,
+ str::stream() << "$redact's expression should not return anything "
+ << "aside from the variables $$KEEP, $$DESCEND, and "
+ << "$$PRUNE, but returned " << expressionResult.toString());
}
+}
- intrusive_ptr<DocumentSource> DocumentSourceRedact::optimize() {
- _expression = _expression->optimize();
- return this;
- }
+intrusive_ptr<DocumentSource> DocumentSourceRedact::optimize() {
+ _expression = _expression->optimize();
+ return this;
+}
- Value DocumentSourceRedact::serialize(bool explain) const {
- return Value(DOC(getSourceName() << _expression.get()->serialize(explain)));
- }
+Value DocumentSourceRedact::serialize(bool explain) const {
+ return Value(DOC(getSourceName() << _expression.get()->serialize(explain)));
+}
- intrusive_ptr<DocumentSource> DocumentSourceRedact::createFromBson(
- BSONElement elem,
- const intrusive_ptr<ExpressionContext>& expCtx) {
-
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- Variables::Id currentId = vps.defineVariable("CURRENT"); // will differ from ROOT
- Variables::Id decendId = vps.defineVariable("DESCEND");
- Variables::Id pruneId = vps.defineVariable("PRUNE");
- Variables::Id keepId = vps.defineVariable("KEEP");
- intrusive_ptr<Expression> expression = Expression::parseOperand(elem, vps);
- intrusive_ptr<DocumentSourceRedact> source = new DocumentSourceRedact(expCtx, expression);
-
- // TODO figure out how much of this belongs in constructor and how much here.
- // Set up variables. Never need to reset DESCEND, PRUNE, or KEEP.
- source->_currentId = currentId;
- source->_variables.reset(new Variables(idGenerator.getIdCount()));
- source->_variables->setValue(decendId, descendVal);
- source->_variables->setValue(pruneId, pruneVal);
- source->_variables->setValue(keepId, keepVal);
-
-
- return source;
- }
+intrusive_ptr<DocumentSource> DocumentSourceRedact::createFromBson(
+ BSONElement elem, const intrusive_ptr<ExpressionContext>& expCtx) {
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ Variables::Id currentId = vps.defineVariable("CURRENT"); // will differ from ROOT
+ Variables::Id decendId = vps.defineVariable("DESCEND");
+ Variables::Id pruneId = vps.defineVariable("PRUNE");
+ Variables::Id keepId = vps.defineVariable("KEEP");
+ intrusive_ptr<Expression> expression = Expression::parseOperand(elem, vps);
+ intrusive_ptr<DocumentSourceRedact> source = new DocumentSourceRedact(expCtx, expression);
+
+ // TODO figure out how much of this belongs in constructor and how much here.
+ // Set up variables. Never need to reset DESCEND, PRUNE, or KEEP.
+ source->_currentId = currentId;
+ source->_variables.reset(new Variables(idGenerator.getIdCount()));
+ source->_variables->setValue(decendId, descendVal);
+ source->_variables->setValue(pruneId, pruneVal);
+ source->_variables->setValue(keepId, keepVal);
+
+
+ return source;
+}
}
diff --git a/src/mongo/db/pipeline/document_source_skip.cpp b/src/mongo/db/pipeline/document_source_skip.cpp
index 4899565fe99..3a1430b3b98 100644
--- a/src/mongo/db/pipeline/document_source_skip.cpp
+++ b/src/mongo/db/pipeline/document_source_skip.cpp
@@ -37,78 +37,72 @@
namespace mongo {
- using boost::intrusive_ptr;
+using boost::intrusive_ptr;
- const char DocumentSourceSkip::skipName[] = "$skip";
+const char DocumentSourceSkip::skipName[] = "$skip";
- DocumentSourceSkip::DocumentSourceSkip(const intrusive_ptr<ExpressionContext> &pExpCtx):
- DocumentSource(pExpCtx),
- _skip(0),
- _needToSkip(true) {
- }
+DocumentSourceSkip::DocumentSourceSkip(const intrusive_ptr<ExpressionContext>& pExpCtx)
+ : DocumentSource(pExpCtx), _skip(0), _needToSkip(true) {}
- const char *DocumentSourceSkip::getSourceName() const {
- return skipName;
- }
+const char* DocumentSourceSkip::getSourceName() const {
+ return skipName;
+}
- bool DocumentSourceSkip::coalesce(
- const intrusive_ptr<DocumentSource> &pNextSource) {
- DocumentSourceSkip *pSkip =
- dynamic_cast<DocumentSourceSkip *>(pNextSource.get());
+bool DocumentSourceSkip::coalesce(const intrusive_ptr<DocumentSource>& pNextSource) {
+ DocumentSourceSkip* pSkip = dynamic_cast<DocumentSourceSkip*>(pNextSource.get());
- /* if it's not another $skip, we can't coalesce */
- if (!pSkip)
- return false;
+ /* if it's not another $skip, we can't coalesce */
+ if (!pSkip)
+ return false;
- /* we need to skip over the sum of the two consecutive $skips */
- _skip += pSkip->_skip;
- return true;
- }
+ /* we need to skip over the sum of the two consecutive $skips */
+ _skip += pSkip->_skip;
+ return true;
+}
- boost::optional<Document> DocumentSourceSkip::getNext() {
- pExpCtx->checkForInterrupt();
+boost::optional<Document> DocumentSourceSkip::getNext() {
+ pExpCtx->checkForInterrupt();
- if (_needToSkip) {
- _needToSkip = false;
- for (long long i=0; i < _skip; i++) {
- if (!pSource->getNext())
- return boost::none;
- }
+ if (_needToSkip) {
+ _needToSkip = false;
+ for (long long i = 0; i < _skip; i++) {
+ if (!pSource->getNext())
+ return boost::none;
}
-
- return pSource->getNext();
}
- Value DocumentSourceSkip::serialize(bool explain) const {
- return Value(DOC(getSourceName() << _skip));
- }
+ return pSource->getNext();
+}
- intrusive_ptr<DocumentSource> DocumentSourceSkip::optimize() {
- return _skip == 0 ? nullptr : this;
- }
+Value DocumentSourceSkip::serialize(bool explain) const {
+ return Value(DOC(getSourceName() << _skip));
+}
- intrusive_ptr<DocumentSourceSkip> DocumentSourceSkip::create(
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
- intrusive_ptr<DocumentSourceSkip> pSource(
- new DocumentSourceSkip(pExpCtx));
- return pSource;
- }
+intrusive_ptr<DocumentSource> DocumentSourceSkip::optimize() {
+ return _skip == 0 ? nullptr : this;
+}
- intrusive_ptr<DocumentSource> DocumentSourceSkip::createFromBson(
- BSONElement elem,
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
- uassert(15972, str::stream() << DocumentSourceSkip::skipName <<
- ": the value to skip must be a number",
- elem.isNumber());
+intrusive_ptr<DocumentSourceSkip> DocumentSourceSkip::create(
+ const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ intrusive_ptr<DocumentSourceSkip> pSource(new DocumentSourceSkip(pExpCtx));
+ return pSource;
+}
- intrusive_ptr<DocumentSourceSkip> pSkip(
- DocumentSourceSkip::create(pExpCtx));
+intrusive_ptr<DocumentSource> DocumentSourceSkip::createFromBson(
+ BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ uassert(15972,
+ str::stream() << DocumentSourceSkip::skipName
+ << ": the value to skip must be a number",
+ elem.isNumber());
- pSkip->_skip = elem.numberLong();
- uassert(15956, str::stream() << DocumentSourceSkip::skipName <<
- ": the number to skip cannot be negative",
- pSkip->_skip >= 0);
+ intrusive_ptr<DocumentSourceSkip> pSkip(DocumentSourceSkip::create(pExpCtx));
- return pSkip;
- }
+ pSkip->_skip = elem.numberLong();
+ uassert(15956,
+ str::stream() << DocumentSourceSkip::skipName
+ << ": the number to skip cannot be negative",
+ pSkip->_skip >= 0);
+
+ return pSkip;
+}
}
diff --git a/src/mongo/db/pipeline/document_source_sort.cpp b/src/mongo/db/pipeline/document_source_sort.cpp
index f4e57d5c8ae..1b7396b8513 100644
--- a/src/mongo/db/pipeline/document_source_sort.cpp
+++ b/src/mongo/db/pipeline/document_source_sort.cpp
@@ -39,329 +39,324 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::unique_ptr;
- using std::make_pair;
- using std::string;
- using std::vector;
+using boost::intrusive_ptr;
+using std::unique_ptr;
+using std::make_pair;
+using std::string;
+using std::vector;
- const char DocumentSourceSort::sortName[] = "$sort";
+const char DocumentSourceSort::sortName[] = "$sort";
- const char *DocumentSourceSort::getSourceName() const {
- return sortName;
- }
+const char* DocumentSourceSort::getSourceName() const {
+ return sortName;
+}
- boost::optional<Document> DocumentSourceSort::getNext() {
- pExpCtx->checkForInterrupt();
+boost::optional<Document> DocumentSourceSort::getNext() {
+ pExpCtx->checkForInterrupt();
- if (!populated)
- populate();
+ if (!populated)
+ populate();
- if (!_output || !_output->more())
- return boost::none;
+ if (!_output || !_output->more())
+ return boost::none;
- return _output->next().second;
- }
+ return _output->next().second;
+}
- void DocumentSourceSort::serializeToArray(vector<Value>& array, bool explain) const {
- if (explain) { // always one Value for combined $sort + $limit
- array.push_back(Value(DOC(getSourceName() <<
- DOC("sortKey" << serializeSortKey(explain)
- << "mergePresorted" << (_mergingPresorted ? Value(true) : Value())
- << "limit" << (limitSrc ? Value(limitSrc->getLimit()) : Value())))));
- }
- else { // one Value for $sort and maybe a Value for $limit
- MutableDocument inner (serializeSortKey(explain));
- if (_mergingPresorted)
- inner["$mergePresorted"] = Value(true);
- array.push_back(Value(DOC(getSourceName() << inner.freeze())));
-
- if (limitSrc) {
- limitSrc->serializeToArray(array);
- }
+void DocumentSourceSort::serializeToArray(vector<Value>& array, bool explain) const {
+ if (explain) { // always one Value for combined $sort + $limit
+ array.push_back(
+ Value(DOC(getSourceName()
+ << DOC("sortKey" << serializeSortKey(explain) << "mergePresorted"
+ << (_mergingPresorted ? Value(true) : Value()) << "limit"
+ << (limitSrc ? Value(limitSrc->getLimit()) : Value())))));
+ } else { // one Value for $sort and maybe a Value for $limit
+ MutableDocument inner(serializeSortKey(explain));
+ if (_mergingPresorted)
+ inner["$mergePresorted"] = Value(true);
+ array.push_back(Value(DOC(getSourceName() << inner.freeze())));
+
+ if (limitSrc) {
+ limitSrc->serializeToArray(array);
}
}
+}
- void DocumentSourceSort::dispose() {
- _output.reset();
- pSource->dispose();
- }
+void DocumentSourceSort::dispose() {
+ _output.reset();
+ pSource->dispose();
+}
- DocumentSourceSort::DocumentSourceSort(const intrusive_ptr<ExpressionContext> &pExpCtx)
- : DocumentSource(pExpCtx)
- , populated(false)
- , _mergingPresorted(false)
- {}
+DocumentSourceSort::DocumentSourceSort(const intrusive_ptr<ExpressionContext>& pExpCtx)
+ : DocumentSource(pExpCtx), populated(false), _mergingPresorted(false) {}
- long long DocumentSourceSort::getLimit() const {
- return limitSrc ? limitSrc->getLimit() : -1;
- }
+long long DocumentSourceSort::getLimit() const {
+ return limitSrc ? limitSrc->getLimit() : -1;
+}
- bool DocumentSourceSort::coalesce(const intrusive_ptr<DocumentSource> &pNextSource) {
- if (!limitSrc) {
- limitSrc = dynamic_cast<DocumentSourceLimit*>(pNextSource.get());
- return limitSrc.get(); // false if next is not a $limit
- }
- else {
- return limitSrc->coalesce(pNextSource);
- }
+bool DocumentSourceSort::coalesce(const intrusive_ptr<DocumentSource>& pNextSource) {
+ if (!limitSrc) {
+ limitSrc = dynamic_cast<DocumentSourceLimit*>(pNextSource.get());
+ return limitSrc.get(); // false if next is not a $limit
+ } else {
+ return limitSrc->coalesce(pNextSource);
}
+}
- void DocumentSourceSort::addKey(const string& fieldPath, bool ascending) {
- VariablesIdGenerator idGenerator;
- VariablesParseState vps(&idGenerator);
- vSortKey.push_back(ExpressionFieldPath::parse("$$ROOT." + fieldPath, vps));
- vAscending.push_back(ascending);
- }
+void DocumentSourceSort::addKey(const string& fieldPath, bool ascending) {
+ VariablesIdGenerator idGenerator;
+ VariablesParseState vps(&idGenerator);
+ vSortKey.push_back(ExpressionFieldPath::parse("$$ROOT." + fieldPath, vps));
+ vAscending.push_back(ascending);
+}
- Document DocumentSourceSort::serializeSortKey(bool explain) const {
- MutableDocument keyObj;
- // add the key fields
- const size_t n = vSortKey.size();
- for(size_t i = 0; i < n; ++i) {
- if (ExpressionFieldPath* efp = dynamic_cast<ExpressionFieldPath*>(vSortKey[i].get())) {
- // ExpressionFieldPath gets special syntax that includes direction
- const FieldPath& withVariable = efp->getFieldPath();
- verify(withVariable.getPathLength() > 1);
- verify(withVariable.getFieldName(0) == "ROOT");
- const string fieldPath = withVariable.tail().getPath(false);
-
- // append a named integer based on the sort order
- keyObj.setField(fieldPath, Value(vAscending[i] ? 1 : -1));
- }
- else {
- // other expressions use a made-up field name
- keyObj[string(str::stream() << "$computed" << i)] = vSortKey[i]->serialize(explain);
- }
+Document DocumentSourceSort::serializeSortKey(bool explain) const {
+ MutableDocument keyObj;
+ // add the key fields
+ const size_t n = vSortKey.size();
+ for (size_t i = 0; i < n; ++i) {
+ if (ExpressionFieldPath* efp = dynamic_cast<ExpressionFieldPath*>(vSortKey[i].get())) {
+ // ExpressionFieldPath gets special syntax that includes direction
+ const FieldPath& withVariable = efp->getFieldPath();
+ verify(withVariable.getPathLength() > 1);
+ verify(withVariable.getFieldName(0) == "ROOT");
+ const string fieldPath = withVariable.tail().getPath(false);
+
+ // append a named integer based on the sort order
+ keyObj.setField(fieldPath, Value(vAscending[i] ? 1 : -1));
+ } else {
+ // other expressions use a made-up field name
+ keyObj[string(str::stream() << "$computed" << i)] = vSortKey[i]->serialize(explain);
}
- return keyObj.freeze();
}
+ return keyObj.freeze();
+}
- DocumentSource::GetDepsReturn DocumentSourceSort::getDependencies(DepsTracker* deps) const {
- for(size_t i = 0; i < vSortKey.size(); ++i) {
- vSortKey[i]->addDependencies(deps);
- }
-
- return SEE_NEXT;
+DocumentSource::GetDepsReturn DocumentSourceSort::getDependencies(DepsTracker* deps) const {
+ for (size_t i = 0; i < vSortKey.size(); ++i) {
+ vSortKey[i]->addDependencies(deps);
}
+ return SEE_NEXT;
+}
- intrusive_ptr<DocumentSource> DocumentSourceSort::createFromBson(
- BSONElement elem,
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
- uassert(15973, str::stream() << " the " <<
- sortName << " key specification must be an object",
- elem.type() == Object);
- return create(pExpCtx, elem.embeddedObject());
- }
+intrusive_ptr<DocumentSource> DocumentSourceSort::createFromBson(
+ BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ uassert(15973,
+ str::stream() << " the " << sortName << " key specification must be an object",
+ elem.type() == Object);
- intrusive_ptr<DocumentSourceSort> DocumentSourceSort::create(
- const intrusive_ptr<ExpressionContext> &pExpCtx,
- BSONObj sortOrder,
- long long limit) {
+ return create(pExpCtx, elem.embeddedObject());
+}
- intrusive_ptr<DocumentSourceSort> pSort = new DocumentSourceSort(pExpCtx);
+intrusive_ptr<DocumentSourceSort> DocumentSourceSort::create(
+ const intrusive_ptr<ExpressionContext>& pExpCtx, BSONObj sortOrder, long long limit) {
+ intrusive_ptr<DocumentSourceSort> pSort = new DocumentSourceSort(pExpCtx);
- /* check for then iterate over the sort object */
- BSONForEach(keyField, sortOrder) {
- const char* fieldName = keyField.fieldName();
+ /* check for then iterate over the sort object */
+ BSONForEach(keyField, sortOrder) {
+ const char* fieldName = keyField.fieldName();
- if (str::equals(fieldName, "$mergePresorted")) {
- verify(keyField.Bool());
- pSort->_mergingPresorted = true;
- continue;
- }
+ if (str::equals(fieldName, "$mergePresorted")) {
+ verify(keyField.Bool());
+ pSort->_mergingPresorted = true;
+ continue;
+ }
- if (keyField.type() == Object) {
- // this restriction is due to needing to figure out sort direction
- uassert(17312,
- "the only expression supported by $sort right now is {$meta: 'textScore'}",
- keyField.Obj() == BSON("$meta" << "textScore"));
+ if (keyField.type() == Object) {
+ // this restriction is due to needing to figure out sort direction
+ uassert(17312,
+ "the only expression supported by $sort right now is {$meta: 'textScore'}",
+ keyField.Obj() == BSON("$meta"
+ << "textScore"));
- pSort->vSortKey.push_back(new ExpressionMeta());
- pSort->vAscending.push_back(false); // best scoring documents first
- continue;
- }
-
- uassert(15974,
- "$sort key ordering must be specified using a number or {$meta: 'textScore'}",
- keyField.isNumber());
+ pSort->vSortKey.push_back(new ExpressionMeta());
+ pSort->vAscending.push_back(false); // best scoring documents first
+ continue;
+ }
- int sortOrder = keyField.numberInt();
+ uassert(15974,
+ "$sort key ordering must be specified using a number or {$meta: 'textScore'}",
+ keyField.isNumber());
- uassert(15975, "$sort key ordering must be 1 (for ascending) or -1 (for descending)",
- ((sortOrder == 1) || (sortOrder == -1)));
+ int sortOrder = keyField.numberInt();
- pSort->addKey(fieldName, (sortOrder > 0));
- }
+ uassert(15975,
+ "$sort key ordering must be 1 (for ascending) or -1 (for descending)",
+ ((sortOrder == 1) || (sortOrder == -1)));
- uassert(15976, str::stream() << sortName << " must have at least one sort key",
- !pSort->vSortKey.empty());
+ pSort->addKey(fieldName, (sortOrder > 0));
+ }
- if (limit > 0) {
- bool coalesced = pSort->coalesce(DocumentSourceLimit::create(pExpCtx, limit));
- verify(coalesced); // should always coalesce
- verify(pSort->getLimit() == limit);
- }
+ uassert(15976,
+ str::stream() << sortName << " must have at least one sort key",
+ !pSort->vSortKey.empty());
- return pSort;
+ if (limit > 0) {
+ bool coalesced = pSort->coalesce(DocumentSourceLimit::create(pExpCtx, limit));
+ verify(coalesced); // should always coalesce
+ verify(pSort->getLimit() == limit);
}
- SortOptions DocumentSourceSort::makeSortOptions() const {
- /* make sure we've got a sort key */
- verify(vSortKey.size());
+ return pSort;
+}
- SortOptions opts;
- if (limitSrc)
- opts.limit = limitSrc->getLimit();
+SortOptions DocumentSourceSort::makeSortOptions() const {
+ /* make sure we've got a sort key */
+ verify(vSortKey.size());
- opts.maxMemoryUsageBytes = 100*1024*1024;
- if (pExpCtx->extSortAllowed && !pExpCtx->inRouter) {
- opts.extSortAllowed = true;
- opts.tempDir = pExpCtx->tempDir;
- }
+ SortOptions opts;
+ if (limitSrc)
+ opts.limit = limitSrc->getLimit();
- return opts;
+ opts.maxMemoryUsageBytes = 100 * 1024 * 1024;
+ if (pExpCtx->extSortAllowed && !pExpCtx->inRouter) {
+ opts.extSortAllowed = true;
+ opts.tempDir = pExpCtx->tempDir;
}
- void DocumentSourceSort::populate() {
- if (_mergingPresorted) {
- typedef DocumentSourceMergeCursors DSCursors;
- typedef DocumentSourceCommandShards DSCommands;
- if (DSCursors* castedSource = dynamic_cast<DSCursors*>(pSource)) {
- populateFromCursors(castedSource->getCursors());
- } else if (DSCommands* castedSource = dynamic_cast<DSCommands*>(pSource)) {
- populateFromBsonArrays(castedSource->getArrays());
- } else {
- msgasserted(17196, "can only mergePresorted from MergeCursors and CommandShards");
- }
+ return opts;
+}
+
+void DocumentSourceSort::populate() {
+ if (_mergingPresorted) {
+ typedef DocumentSourceMergeCursors DSCursors;
+ typedef DocumentSourceCommandShards DSCommands;
+ if (DSCursors* castedSource = dynamic_cast<DSCursors*>(pSource)) {
+ populateFromCursors(castedSource->getCursors());
+ } else if (DSCommands* castedSource = dynamic_cast<DSCommands*>(pSource)) {
+ populateFromBsonArrays(castedSource->getArrays());
} else {
- unique_ptr<MySorter> sorter (MySorter::make(makeSortOptions(), Comparator(*this)));
- while (boost::optional<Document> next = pSource->getNext()) {
- sorter->add(extractKey(*next), *next);
- }
- _output.reset(sorter->done());
+ msgasserted(17196, "can only mergePresorted from MergeCursors and CommandShards");
}
- populated = true;
+ } else {
+ unique_ptr<MySorter> sorter(MySorter::make(makeSortOptions(), Comparator(*this)));
+ while (boost::optional<Document> next = pSource->getNext()) {
+ sorter->add(extractKey(*next), *next);
+ }
+ _output.reset(sorter->done());
}
+ populated = true;
+}
- class DocumentSourceSort::IteratorFromCursor : public MySorter::Iterator {
- public:
- IteratorFromCursor(DocumentSourceSort* sorter, DBClientCursor* cursor)
- : _sorter(sorter)
- , _cursor(cursor)
- {}
-
- bool more() { return _cursor->more(); }
- Data next() {
- const Document doc = DocumentSourceMergeCursors::nextSafeFrom(_cursor);
- return make_pair(_sorter->extractKey(doc), doc);
- }
- private:
- DocumentSourceSort* _sorter;
- DBClientCursor* _cursor;
- };
-
- void DocumentSourceSort::populateFromCursors(const vector<DBClientCursor*>& cursors) {
- vector<std::shared_ptr<MySorter::Iterator> > iterators;
- for (size_t i = 0; i < cursors.size(); i++) {
- iterators.push_back(std::make_shared<IteratorFromCursor>(this, cursors[i]));
- }
+class DocumentSourceSort::IteratorFromCursor : public MySorter::Iterator {
+public:
+ IteratorFromCursor(DocumentSourceSort* sorter, DBClientCursor* cursor)
+ : _sorter(sorter), _cursor(cursor) {}
- _output.reset(MySorter::Iterator::merge(iterators, makeSortOptions(), Comparator(*this)));
+ bool more() {
+ return _cursor->more();
+ }
+ Data next() {
+ const Document doc = DocumentSourceMergeCursors::nextSafeFrom(_cursor);
+ return make_pair(_sorter->extractKey(doc), doc);
}
- class DocumentSourceSort::IteratorFromBsonArray : public MySorter::Iterator {
- public:
- IteratorFromBsonArray(DocumentSourceSort* sorter, const BSONArray& array)
- : _sorter(sorter)
- , _iterator(array)
- {}
-
- bool more() { return _iterator.more(); }
- Data next() {
- Document doc(_iterator.next().Obj());
- return make_pair(_sorter->extractKey(doc), doc);
- }
- private:
- DocumentSourceSort* _sorter;
- BSONObjIterator _iterator;
- };
-
- void DocumentSourceSort::populateFromBsonArrays(const vector<BSONArray>& arrays) {
- vector<std::shared_ptr<MySorter::Iterator> > iterators;
- for (size_t i = 0; i < arrays.size(); i++) {
- iterators.push_back(std::make_shared<IteratorFromBsonArray>(this, arrays[i]));
- }
+private:
+ DocumentSourceSort* _sorter;
+ DBClientCursor* _cursor;
+};
- _output.reset(MySorter::Iterator::merge(iterators, makeSortOptions(), Comparator(*this)));
+void DocumentSourceSort::populateFromCursors(const vector<DBClientCursor*>& cursors) {
+ vector<std::shared_ptr<MySorter::Iterator>> iterators;
+ for (size_t i = 0; i < cursors.size(); i++) {
+ iterators.push_back(std::make_shared<IteratorFromCursor>(this, cursors[i]));
}
- Value DocumentSourceSort::extractKey(const Document& d) const {
- Variables vars(0, d);
- if (vSortKey.size() == 1) {
- return vSortKey[0]->evaluate(&vars);
- }
+ _output.reset(MySorter::Iterator::merge(iterators, makeSortOptions(), Comparator(*this)));
+}
- vector<Value> keys;
- keys.reserve(vSortKey.size());
- for (size_t i=0; i < vSortKey.size(); i++) {
- keys.push_back(vSortKey[i]->evaluate(&vars));
- }
- return Value(std::move(keys));
+class DocumentSourceSort::IteratorFromBsonArray : public MySorter::Iterator {
+public:
+ IteratorFromBsonArray(DocumentSourceSort* sorter, const BSONArray& array)
+ : _sorter(sorter), _iterator(array) {}
+
+ bool more() {
+ return _iterator.more();
+ }
+ Data next() {
+ Document doc(_iterator.next().Obj());
+ return make_pair(_sorter->extractKey(doc), doc);
}
- int DocumentSourceSort::compare(const Value& lhs, const Value& rhs) const {
-
- /*
- populate() already checked that there is a non-empty sort key,
- so we shouldn't have to worry about that here.
-
- However, the tricky part is what to do is none of the sort keys are
- present. In this case, consider the document less.
- */
- const size_t n = vSortKey.size();
- if (n == 1) { // simple fast case
- if (vAscending[0])
- return Value::compare(lhs, rhs);
- else
- return -Value::compare(lhs, rhs);
- }
+private:
+ DocumentSourceSort* _sorter;
+ BSONObjIterator _iterator;
+};
- // compound sort
- for (size_t i = 0; i < n; i++) {
- int cmp = Value::compare(lhs[i], rhs[i]);
- if (cmp) {
- /* if necessary, adjust the return value by the key ordering */
- if (!vAscending[i])
- cmp = -cmp;
+void DocumentSourceSort::populateFromBsonArrays(const vector<BSONArray>& arrays) {
+ vector<std::shared_ptr<MySorter::Iterator>> iterators;
+ for (size_t i = 0; i < arrays.size(); i++) {
+ iterators.push_back(std::make_shared<IteratorFromBsonArray>(this, arrays[i]));
+ }
- return cmp;
- }
- }
+ _output.reset(MySorter::Iterator::merge(iterators, makeSortOptions(), Comparator(*this)));
+}
- /*
- If we got here, everything matched (or didn't exist), so we'll
- consider the documents equal for purposes of this sort.
- */
- return 0;
+Value DocumentSourceSort::extractKey(const Document& d) const {
+ Variables vars(0, d);
+ if (vSortKey.size() == 1) {
+ return vSortKey[0]->evaluate(&vars);
}
- intrusive_ptr<DocumentSource> DocumentSourceSort::getShardSource() {
- verify(!_mergingPresorted);
- return this;
+ vector<Value> keys;
+ keys.reserve(vSortKey.size());
+ for (size_t i = 0; i < vSortKey.size(); i++) {
+ keys.push_back(vSortKey[i]->evaluate(&vars));
}
+ return Value(std::move(keys));
+}
+
+int DocumentSourceSort::compare(const Value& lhs, const Value& rhs) const {
+ /*
+ populate() already checked that there is a non-empty sort key,
+ so we shouldn't have to worry about that here.
+
+ However, the tricky part is what to do is none of the sort keys are
+ present. In this case, consider the document less.
+ */
+ const size_t n = vSortKey.size();
+ if (n == 1) { // simple fast case
+ if (vAscending[0])
+ return Value::compare(lhs, rhs);
+ else
+ return -Value::compare(lhs, rhs);
+ }
+
+ // compound sort
+ for (size_t i = 0; i < n; i++) {
+ int cmp = Value::compare(lhs[i], rhs[i]);
+ if (cmp) {
+ /* if necessary, adjust the return value by the key ordering */
+ if (!vAscending[i])
+ cmp = -cmp;
- intrusive_ptr<DocumentSource> DocumentSourceSort::getMergeSource() {
- verify(!_mergingPresorted);
- intrusive_ptr<DocumentSourceSort> other = new DocumentSourceSort(pExpCtx);
- other->vAscending = vAscending;
- other->vSortKey = vSortKey;
- other->limitSrc = limitSrc;
- other->_mergingPresorted = true;
- return other;
+ return cmp;
+ }
}
+
+ /*
+ If we got here, everything matched (or didn't exist), so we'll
+ consider the documents equal for purposes of this sort.
+ */
+ return 0;
+}
+
+intrusive_ptr<DocumentSource> DocumentSourceSort::getShardSource() {
+ verify(!_mergingPresorted);
+ return this;
+}
+
+intrusive_ptr<DocumentSource> DocumentSourceSort::getMergeSource() {
+ verify(!_mergingPresorted);
+ intrusive_ptr<DocumentSourceSort> other = new DocumentSourceSort(pExpCtx);
+ other->vAscending = vAscending;
+ other->vSortKey = vSortKey;
+ other->limitSrc = limitSrc;
+ other->_mergingPresorted = true;
+ return other;
+}
}
#include "mongo/db/sorter/sorter.cpp"
diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp
index 2caf7f95c64..8ec126b967c 100644
--- a/src/mongo/db/pipeline/document_source_unwind.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind.cpp
@@ -36,150 +36,141 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::string;
- using std::vector;
-
- /** Helper class to unwind array from a single document. */
- class DocumentSourceUnwind::Unwinder {
- public:
- /** @param unwindPath is the field path to the array to unwind. */
- Unwinder(const FieldPath& unwindPath);
- /** Reset the unwinder to unwind a new document. */
- void resetDocument(const Document& document);
-
- /**
- * @return the next document unwound from the document provided to resetDocument(), using
- * the current value in the array located at the provided unwindPath.
- *
- * Returns boost::none if the array is exhausted.
- */
- boost::optional<Document> getNext();
-
- private:
- // Path to the array to unwind.
- const FieldPath _unwindPath;
-
- Value _inputArray;
- MutableDocument _output;
-
- // Document indexes of the field path components.
- vector<Position> _unwindPathFieldIndexes;
- // Index into the _inputArray to return next.
- size_t _index;
- };
-
- DocumentSourceUnwind::Unwinder::Unwinder(const FieldPath& unwindPath):
- _unwindPath(unwindPath) {
+using boost::intrusive_ptr;
+using std::string;
+using std::vector;
+
+/** Helper class to unwind array from a single document. */
+class DocumentSourceUnwind::Unwinder {
+public:
+ /** @param unwindPath is the field path to the array to unwind. */
+ Unwinder(const FieldPath& unwindPath);
+ /** Reset the unwinder to unwind a new document. */
+ void resetDocument(const Document& document);
+
+ /**
+ * @return the next document unwound from the document provided to resetDocument(), using
+ * the current value in the array located at the provided unwindPath.
+ *
+ * Returns boost::none if the array is exhausted.
+ */
+ boost::optional<Document> getNext();
+
+private:
+ // Path to the array to unwind.
+ const FieldPath _unwindPath;
+
+ Value _inputArray;
+ MutableDocument _output;
+
+ // Document indexes of the field path components.
+ vector<Position> _unwindPathFieldIndexes;
+ // Index into the _inputArray to return next.
+ size_t _index;
+};
+
+DocumentSourceUnwind::Unwinder::Unwinder(const FieldPath& unwindPath) : _unwindPath(unwindPath) {}
+
+void DocumentSourceUnwind::Unwinder::resetDocument(const Document& document) {
+ // Reset document specific attributes.
+ _inputArray = Value();
+ _output.reset(document);
+ _unwindPathFieldIndexes.clear();
+ _index = 0;
+
+ Value pathValue = document.getNestedField(_unwindPath, &_unwindPathFieldIndexes);
+ if (pathValue.nullish()) {
+ // The path does not exist or is null.
+ return;
}
- void DocumentSourceUnwind::Unwinder::resetDocument(const Document& document) {
-
- // Reset document specific attributes.
- _inputArray = Value();
- _output.reset(document);
- _unwindPathFieldIndexes.clear();
- _index = 0;
+ _inputArray = pathValue;
+}
- Value pathValue = document.getNestedField(_unwindPath, &_unwindPathFieldIndexes);
- if (pathValue.nullish()) {
- // The path does not exist or is null.
- return;
- }
+boost::optional<Document> DocumentSourceUnwind::Unwinder::getNext() {
+ if (_inputArray.missing())
+ return boost::none;
- _inputArray = pathValue;
- }
-
- boost::optional<Document> DocumentSourceUnwind::Unwinder::getNext() {
- if (_inputArray.missing())
- return boost::none;
+ // If needed, this will automatically clone all the documents along the
+ // field path so that the end values are not shared across documents
+ // that have come out of this pipeline operator. This is a partial deep
+ // clone. Because the value at the end will be replaced, everything
+ // along the path leading to that will be replaced in order not to share
+ // that change with any other clones (or the original).
- // If needed, this will automatically clone all the documents along the
- // field path so that the end values are not shared across documents
- // that have come out of this pipeline operator. This is a partial deep
- // clone. Because the value at the end will be replaced, everything
- // along the path leading to that will be replaced in order not to share
- // that change with any other clones (or the original).
-
- if (_inputArray.getType() == Array) {
- if (_index == _inputArray.getArrayLength())
- return boost::none;
- _output.setNestedField(_unwindPathFieldIndexes, _inputArray[_index]);
- }
- else if (_index > 0) {
+ if (_inputArray.getType() == Array) {
+ if (_index == _inputArray.getArrayLength())
return boost::none;
- }
- else {
- //_output.setNestedField(_unwindPathFieldIndexes, _inputArray);
- }
- _index++;
- return _output.peek();
+ _output.setNestedField(_unwindPathFieldIndexes, _inputArray[_index]);
+ } else if (_index > 0) {
+ return boost::none;
+ } else {
+ //_output.setNestedField(_unwindPathFieldIndexes, _inputArray);
}
+ _index++;
+ return _output.peek();
+}
- const char DocumentSourceUnwind::unwindName[] = "$unwind";
+const char DocumentSourceUnwind::unwindName[] = "$unwind";
- DocumentSourceUnwind::DocumentSourceUnwind(
- const intrusive_ptr<ExpressionContext> &pExpCtx):
- DocumentSource(pExpCtx) {
- }
+DocumentSourceUnwind::DocumentSourceUnwind(const intrusive_ptr<ExpressionContext>& pExpCtx)
+ : DocumentSource(pExpCtx) {}
- const char *DocumentSourceUnwind::getSourceName() const {
- return unwindName;
- }
-
- boost::optional<Document> DocumentSourceUnwind::getNext() {
- pExpCtx->checkForInterrupt();
+const char* DocumentSourceUnwind::getSourceName() const {
+ return unwindName;
+}
- boost::optional<Document> out = _unwinder->getNext();
- while (!out) {
- // No more elements in array currently being unwound. This will loop if the input
- // document is missing the unwind field or has an empty array.
- boost::optional<Document> input = pSource->getNext();
- if (!input)
- return boost::none; // input exhausted
+boost::optional<Document> DocumentSourceUnwind::getNext() {
+ pExpCtx->checkForInterrupt();
- // Try to extract an output document from the new input document.
- _unwinder->resetDocument(*input);
- out = _unwinder->getNext();
- }
+ boost::optional<Document> out = _unwinder->getNext();
+ while (!out) {
+ // No more elements in array currently being unwound. This will loop if the input
+ // document is missing the unwind field or has an empty array.
+ boost::optional<Document> input = pSource->getNext();
+ if (!input)
+ return boost::none; // input exhausted
- return out;
+ // Try to extract an output document from the new input document.
+ _unwinder->resetDocument(*input);
+ out = _unwinder->getNext();
}
- Value DocumentSourceUnwind::serialize(bool explain) const {
- verify(_unwindPath);
- return Value(DOC(getSourceName() << _unwindPath->getPath(true)));
- }
+ return out;
+}
- DocumentSource::GetDepsReturn DocumentSourceUnwind::getDependencies(DepsTracker* deps) const {
- deps->fields.insert(_unwindPath->getPath(false));
- return SEE_NEXT;
- }
+Value DocumentSourceUnwind::serialize(bool explain) const {
+ verify(_unwindPath);
+ return Value(DOC(getSourceName() << _unwindPath->getPath(true)));
+}
- void DocumentSourceUnwind::unwindPath(const FieldPath &fieldPath) {
- // Can't set more than one unwind path.
- uassert(15979, str::stream() << unwindName << "can't unwind more than one path",
- !_unwindPath);
- // Record the unwind path.
- _unwindPath.reset(new FieldPath(fieldPath));
- _unwinder.reset(new Unwinder(fieldPath));
- }
+DocumentSource::GetDepsReturn DocumentSourceUnwind::getDependencies(DepsTracker* deps) const {
+ deps->fields.insert(_unwindPath->getPath(false));
+ return SEE_NEXT;
+}
- intrusive_ptr<DocumentSource> DocumentSourceUnwind::createFromBson(
- BSONElement elem,
- const intrusive_ptr<ExpressionContext> &pExpCtx) {
- /*
- The value of $unwind should just be a field path.
- */
- uassert(15981, str::stream() << "the " << unwindName <<
- " field path must be specified as a string",
- elem.type() == String);
-
- string prefixedPathString(elem.str());
- string pathString(Expression::removeFieldPrefix(prefixedPathString));
- intrusive_ptr<DocumentSourceUnwind> pUnwind(new DocumentSourceUnwind(pExpCtx));
- pUnwind->unwindPath(FieldPath(pathString));
-
- return pUnwind;
- }
+void DocumentSourceUnwind::unwindPath(const FieldPath& fieldPath) {
+ // Can't set more than one unwind path.
+ uassert(15979, str::stream() << unwindName << "can't unwind more than one path", !_unwindPath);
+ // Record the unwind path.
+ _unwindPath.reset(new FieldPath(fieldPath));
+ _unwinder.reset(new Unwinder(fieldPath));
+}
+
+intrusive_ptr<DocumentSource> DocumentSourceUnwind::createFromBson(
+ BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ /*
+ The value of $unwind should just be a field path.
+ */
+ uassert(15981,
+ str::stream() << "the " << unwindName << " field path must be specified as a string",
+ elem.type() == String);
+
+ string prefixedPathString(elem.str());
+ string pathString(Expression::removeFieldPrefix(prefixedPathString));
+ intrusive_ptr<DocumentSourceUnwind> pUnwind(new DocumentSourceUnwind(pExpCtx));
+ pUnwind->unwindPath(FieldPath(pathString));
+
+ return pUnwind;
+}
}
diff --git a/src/mongo/db/pipeline/document_value_test.cpp b/src/mongo/db/pipeline/document_value_test.cpp
index 0afffb5b639..544ac629a9b 100644
--- a/src/mongo/db/pipeline/document_value_test.cpp
+++ b/src/mongo/db/pipeline/document_value_test.cpp
@@ -38,1480 +38,1618 @@
namespace DocumentTests {
- using std::endl;
- using std::numeric_limits;
- using std::string;
- using std::vector;
+using std::endl;
+using std::numeric_limits;
+using std::string;
+using std::vector;
+
+mongo::Document::FieldPair getNthField(mongo::Document doc, size_t index) {
+ mongo::FieldIterator it(doc);
+ while (index--) // advance index times
+ it.next();
+ return it.next();
+}
+
+namespace Document {
+
+using mongo::Document;
+
+BSONObj toBson(const Document& document) {
+ return document.toBson();
+}
+
+Document fromBson(BSONObj obj) {
+ return Document(obj);
+}
+
+void assertRoundTrips(const Document& document1) {
+ BSONObj obj1 = toBson(document1);
+ Document document2 = fromBson(obj1);
+ BSONObj obj2 = toBson(document2);
+ ASSERT_EQUALS(obj1, obj2);
+ ASSERT_EQUALS(document1, document2);
+}
+
+/** Create a Document. */
+class Create {
+public:
+ void run() {
+ Document document;
+ ASSERT_EQUALS(0U, document.size());
+ assertRoundTrips(document);
+ }
+};
+
+/** Create a Document from a BSONObj. */
+class CreateFromBsonObj {
+public:
+ void run() {
+ Document document = fromBson(BSONObj());
+ ASSERT_EQUALS(0U, document.size());
+ document = fromBson(BSON("a" << 1 << "b"
+ << "q"));
+ ASSERT_EQUALS(2U, document.size());
+ ASSERT_EQUALS("a", getNthField(document, 0).first.toString());
+ ASSERT_EQUALS(1, getNthField(document, 0).second.getInt());
+ ASSERT_EQUALS("b", getNthField(document, 1).first.toString());
+ ASSERT_EQUALS("q", getNthField(document, 1).second.getString());
+ assertRoundTrips(document);
+ }
+};
+
+/** Add Document fields. */
+class AddField {
+public:
+ void run() {
+ MutableDocument md;
+ md.addField("foo", Value(1));
+ ASSERT_EQUALS(1U, md.peek().size());
+ ASSERT_EQUALS(1, md.peek()["foo"].getInt());
+ md.addField("bar", Value(99));
+ ASSERT_EQUALS(2U, md.peek().size());
+ ASSERT_EQUALS(99, md.peek()["bar"].getInt());
+ // No assertion is triggered by a duplicate field name.
+ md.addField("a", Value(5));
+
+ Document final = md.freeze();
+ ASSERT_EQUALS(3U, final.size());
+ assertRoundTrips(final);
+ }
+};
+
+/** Get Document values. */
+class GetValue {
+public:
+ void run() {
+ Document document = fromBson(BSON("a" << 1 << "b" << 2.2));
+ ASSERT_EQUALS(1, document["a"].getInt());
+ ASSERT_EQUALS(1, document["a"].getInt());
+ ASSERT_EQUALS(2.2, document["b"].getDouble());
+ ASSERT_EQUALS(2.2, document["b"].getDouble());
+ // Missing field.
+ ASSERT(document["c"].missing());
+ ASSERT(document["c"].missing());
+ assertRoundTrips(document);
+ }
+};
+
+/** Get Document fields. */
+class SetField {
+public:
+ void run() {
+ Document original = fromBson(BSON("a" << 1 << "b" << 2.2 << "c" << 99));
+
+ // Initial positions. Used at end of function to make sure nothing moved
+ const Position apos = original.positionOf("a");
+ const Position bpos = original.positionOf("c");
+ const Position cpos = original.positionOf("c");
+
+ MutableDocument md(original);
+
+ // Set the first field.
+ md.setField("a", Value("foo"));
+ ASSERT_EQUALS(3U, md.peek().size());
+ ASSERT_EQUALS("foo", md.peek()["a"].getString());
+ ASSERT_EQUALS("foo", getNthField(md.peek(), 0).second.getString());
+ assertRoundTrips(md.peek());
+ // Set the second field.
+ md["b"] = Value("bar");
+ ASSERT_EQUALS(3U, md.peek().size());
+ ASSERT_EQUALS("bar", md.peek()["b"].getString());
+ ASSERT_EQUALS("bar", getNthField(md.peek(), 1).second.getString());
+ assertRoundTrips(md.peek());
+
+ // Remove the second field.
+ md.setField("b", Value());
+ PRINT(md.peek().toString());
+ ASSERT_EQUALS(2U, md.peek().size());
+ ASSERT(md.peek()["b"].missing());
+ ASSERT_EQUALS("a", getNthField(md.peek(), 0).first.toString());
+ ASSERT_EQUALS("c", getNthField(md.peek(), 1).first.toString());
+ ASSERT_EQUALS(99, md.peek()["c"].getInt());
+ assertRoundTrips(md.peek());
+
+ // Remove the first field.
+ md["a"] = Value();
+ ASSERT_EQUALS(1U, md.peek().size());
+ ASSERT(md.peek()["a"].missing());
+ ASSERT_EQUALS("c", getNthField(md.peek(), 0).first.toString());
+ ASSERT_EQUALS(99, md.peek()["c"].getInt());
+ assertRoundTrips(md.peek());
+
+ // Remove the final field. Verify document is empty.
+ md.remove("c");
+ ASSERT(md.peek().empty());
+ ASSERT_EQUALS(0U, md.peek().size());
+ ASSERT_EQUALS(md.peek(), Document());
+ ASSERT(!FieldIterator(md.peek()).more());
+ ASSERT(md.peek()["c"].missing());
+ assertRoundTrips(md.peek());
+
+ // Set a nested field using []
+ md["x"]["y"]["z"] = Value("nested");
+ ASSERT_EQUALS(md.peek()["x"]["y"]["z"], Value("nested"));
+
+ // Set a nested field using setNestedField
+ FieldPath xxyyzz = string("xx.yy.zz");
+ md.setNestedField(xxyyzz, Value("nested"));
+ ASSERT_EQUALS(md.peek().getNestedField(xxyyzz), Value("nested"));
+
+ // Set a nested fields through an existing empty document
+ md["xxx"] = Value(Document());
+ md["xxx"]["yyy"] = Value(Document());
+ FieldPath xxxyyyzzz = string("xxx.yyy.zzz");
+ md.setNestedField(xxxyyyzzz, Value("nested"));
+ ASSERT_EQUALS(md.peek().getNestedField(xxxyyyzzz), Value("nested"));
+
+ // Make sure nothing moved
+ ASSERT_EQUALS(apos, md.peek().positionOf("a"));
+ ASSERT_EQUALS(bpos, md.peek().positionOf("c"));
+ ASSERT_EQUALS(cpos, md.peek().positionOf("c"));
+ ASSERT_EQUALS(Position(), md.peek().positionOf("d"));
+ }
+};
+
+/** Document comparator. */
+class Compare {
+public:
+ void run() {
+ assertComparison(0, BSONObj(), BSONObj());
+ assertComparison(0, BSON("a" << 1), BSON("a" << 1));
+ assertComparison(-1, BSONObj(), BSON("a" << 1));
+ assertComparison(-1, BSON("a" << 1), BSON("c" << 1));
+ assertComparison(0, BSON("a" << 1 << "r" << 2), BSON("a" << 1 << "r" << 2));
+ assertComparison(-1, BSON("a" << 1), BSON("a" << 1 << "r" << 2));
+ assertComparison(0, BSON("a" << 2), BSON("a" << 2));
+ assertComparison(-1, BSON("a" << 1), BSON("a" << 2));
+ assertComparison(-1, BSON("a" << 1 << "b" << 1), BSON("a" << 1 << "b" << 2));
+ // numbers sort before strings
+ assertComparison(-1,
+ BSON("a" << 1),
+ BSON("a"
+ << "foo"));
+ // numbers sort before strings, even if keys compare otherwise
+ assertComparison(-1,
+ BSON("b" << 1),
+ BSON("a"
+ << "foo"));
+ // null before number, even if keys compare otherwise
+ assertComparison(-1, BSON("z" << BSONNULL), BSON("a" << 1));
+ }
- mongo::Document::FieldPair getNthField(mongo::Document doc, size_t index) {
- mongo::FieldIterator it (doc);
- while (index--) // advance index times
- it.next();
- return it.next();
+public:
+ int cmp(const BSONObj& a, const BSONObj& b) {
+ int result = Document::compare(fromBson(a), fromBson(b));
+ return // sign
+ result < 0 ? -1 : result > 0 ? 1 : 0;
+ }
+ void assertComparison(int expectedResult, const BSONObj& a, const BSONObj& b) {
+ ASSERT_EQUALS(expectedResult, cmp(a, b));
+ ASSERT_EQUALS(-expectedResult, cmp(b, a));
+ if (expectedResult == 0) {
+ ASSERT_EQUALS(hash(a), hash(b));
+ }
+ }
+ size_t hash(const BSONObj& obj) {
+ size_t seed = 0x106e1e1;
+ Document(obj).hash_combine(seed);
+ return seed;
}
+};
- namespace Document {
+/** Shallow copy clone of a single field Document. */
+class Clone {
+public:
+ void run() {
+ const Document document = fromBson(BSON("a" << BSON("b" << 1)));
+ MutableDocument cloneOnDemand(document);
- using mongo::Document;
+ // Check equality.
+ ASSERT_EQUALS(document, cloneOnDemand.peek());
+ // Check pointer equality of sub document.
+ ASSERT_EQUALS(document["a"].getDocument().getPtr(),
+ cloneOnDemand.peek()["a"].getDocument().getPtr());
- BSONObj toBson( const Document& document ) {
- return document.toBson();
- }
- Document fromBson( BSONObj obj ) {
- return Document(obj);
- }
+ // Change field in clone and ensure the original document's field is unchanged.
+ cloneOnDemand.setField(StringData("a"), Value(2));
+ ASSERT_EQUALS(Value(1), document.getNestedField(FieldPath("a.b")));
- void assertRoundTrips( const Document& document1 ) {
- BSONObj obj1 = toBson( document1 );
- Document document2 = fromBson( obj1 );
- BSONObj obj2 = toBson( document2 );
- ASSERT_EQUALS( obj1, obj2 );
- ASSERT_EQUALS( document1, document2 );
- }
- /** Create a Document. */
- class Create {
- public:
- void run() {
- Document document;
- ASSERT_EQUALS( 0U, document.size() );
- assertRoundTrips( document );
- }
- };
-
- /** Create a Document from a BSONObj. */
- class CreateFromBsonObj {
- public:
- void run() {
- Document document = fromBson( BSONObj() );
- ASSERT_EQUALS( 0U, document.size() );
- document = fromBson( BSON( "a" << 1 << "b" << "q" ) );
- ASSERT_EQUALS( 2U, document.size() );
- ASSERT_EQUALS( "a", getNthField(document, 0).first.toString() );
- ASSERT_EQUALS( 1, getNthField(document, 0).second.getInt() );
- ASSERT_EQUALS( "b", getNthField(document, 1).first.toString() );
- ASSERT_EQUALS( "q", getNthField(document, 1).second.getString() );
- assertRoundTrips( document );
- }
- };
-
- /** Add Document fields. */
- class AddField {
- public:
- void run() {
- MutableDocument md;
- md.addField( "foo", Value( 1 ) );
- ASSERT_EQUALS( 1U, md.peek().size() );
- ASSERT_EQUALS( 1, md.peek()["foo"].getInt() );
- md.addField( "bar", Value( 99 ) );
- ASSERT_EQUALS( 2U, md.peek().size() );
- ASSERT_EQUALS( 99, md.peek()["bar"].getInt() );
- // No assertion is triggered by a duplicate field name.
- md.addField( "a", Value( 5 ) );
-
- Document final = md.freeze();
- ASSERT_EQUALS( 3U, final.size() );
- assertRoundTrips( final );
- }
- };
-
- /** Get Document values. */
- class GetValue {
- public:
- void run() {
- Document document = fromBson( BSON( "a" << 1 << "b" << 2.2 ) );
- ASSERT_EQUALS( 1, document["a"].getInt() );
- ASSERT_EQUALS( 1, document["a"].getInt() );
- ASSERT_EQUALS( 2.2, document["b"].getDouble() );
- ASSERT_EQUALS( 2.2, document["b"].getDouble() );
- // Missing field.
- ASSERT( document["c"].missing() );
- ASSERT( document["c"].missing() );
- assertRoundTrips( document );
- }
- };
-
- /** Get Document fields. */
- class SetField {
- public:
- void run() {
- Document original = fromBson(BSON("a" << 1 << "b" << 2.2 << "c" << 99));
-
- // Initial positions. Used at end of function to make sure nothing moved
- const Position apos = original.positionOf("a");
- const Position bpos = original.positionOf("c");
- const Position cpos = original.positionOf("c");
-
- MutableDocument md (original);
-
- // Set the first field.
- md.setField( "a" , Value( "foo" ) );
- ASSERT_EQUALS( 3U, md.peek().size() );
- ASSERT_EQUALS( "foo", md.peek()["a"].getString() );
- ASSERT_EQUALS( "foo", getNthField(md.peek(), 0).second.getString() );
- assertRoundTrips( md.peek() );
- // Set the second field.
- md["b"] = Value("bar");
- ASSERT_EQUALS( 3U, md.peek().size() );
- ASSERT_EQUALS( "bar", md.peek()["b"].getString() );
- ASSERT_EQUALS( "bar", getNthField(md.peek(), 1).second.getString() );
- assertRoundTrips( md.peek() );
-
- // Remove the second field.
- md.setField("b", Value());
- PRINT(md.peek().toString());
- ASSERT_EQUALS( 2U, md.peek().size() );
- ASSERT( md.peek()["b"].missing() );
- ASSERT_EQUALS( "a", getNthField(md.peek(), 0 ).first.toString() );
- ASSERT_EQUALS( "c", getNthField(md.peek(), 1 ).first.toString() );
- ASSERT_EQUALS( 99, md.peek()["c"].getInt() );
- assertRoundTrips( md.peek() );
-
- // Remove the first field.
- md["a"] = Value();
- ASSERT_EQUALS( 1U, md.peek().size() );
- ASSERT( md.peek()["a"].missing() );
- ASSERT_EQUALS( "c", getNthField(md.peek(), 0 ).first.toString() );
- ASSERT_EQUALS( 99, md.peek()["c"].getInt() );
- assertRoundTrips( md.peek() );
-
- // Remove the final field. Verify document is empty.
- md.remove("c");
- ASSERT( md.peek().empty() );
- ASSERT_EQUALS( 0U, md.peek().size() );
- ASSERT_EQUALS( md.peek(), Document() );
- ASSERT( !FieldIterator(md.peek()).more() );
- ASSERT( md.peek()["c"].missing() );
- assertRoundTrips( md.peek() );
-
- // Set a nested field using []
- md["x"]["y"]["z"] = Value("nested");
- ASSERT_EQUALS(md.peek()["x"]["y"]["z"], Value("nested"));
-
- // Set a nested field using setNestedField
- FieldPath xxyyzz = string("xx.yy.zz");
- md.setNestedField(xxyyzz, Value("nested"));
- ASSERT_EQUALS(md.peek().getNestedField(xxyyzz), Value("nested") );
-
- // Set a nested fields through an existing empty document
- md["xxx"] = Value(Document());
- md["xxx"]["yyy"] = Value(Document());
- FieldPath xxxyyyzzz = string("xxx.yyy.zzz");
- md.setNestedField(xxxyyyzzz, Value("nested"));
- ASSERT_EQUALS(md.peek().getNestedField(xxxyyyzzz), Value("nested") );
-
- // Make sure nothing moved
- ASSERT_EQUALS(apos, md.peek().positionOf("a"));
- ASSERT_EQUALS(bpos, md.peek().positionOf("c"));
- ASSERT_EQUALS(cpos, md.peek().positionOf("c"));
- ASSERT_EQUALS(Position(), md.peek().positionOf("d"));
- }
- };
-
- /** Document comparator. */
- class Compare {
- public:
- void run() {
- assertComparison( 0, BSONObj(), BSONObj() );
- assertComparison( 0, BSON( "a" << 1 ), BSON( "a" << 1 ) );
- assertComparison( -1, BSONObj(), BSON( "a" << 1 ) );
- assertComparison( -1, BSON( "a" << 1 ), BSON( "c" << 1 ) );
- assertComparison( 0, BSON( "a" << 1 << "r" << 2 ), BSON( "a" << 1 << "r" << 2 ) );
- assertComparison( -1, BSON( "a" << 1 ), BSON( "a" << 1 << "r" << 2 ) );
- assertComparison( 0, BSON( "a" << 2 ), BSON( "a" << 2 ) );
- assertComparison( -1, BSON( "a" << 1 ), BSON( "a" << 2 ) );
- assertComparison( -1, BSON( "a" << 1 << "b" << 1 ), BSON( "a" << 1 << "b" << 2 ) );
- // numbers sort before strings
- assertComparison( -1, BSON( "a" << 1 ), BSON( "a" << "foo" ) );
- // numbers sort before strings, even if keys compare otherwise
- assertComparison( -1, BSON( "b" << 1 ), BSON( "a" << "foo" ) );
- // null before number, even if keys compare otherwise
- assertComparison( -1, BSON( "z" << BSONNULL ), BSON( "a" << 1 ) );
- }
- public:
- int cmp( const BSONObj& a, const BSONObj& b ) {
- int result = Document::compare( fromBson( a ), fromBson( b ) );
- return // sign
- result < 0 ? -1 :
- result > 0 ? 1 :
- 0;
- }
- void assertComparison( int expectedResult, const BSONObj& a, const BSONObj& b ) {
- ASSERT_EQUALS( expectedResult, cmp( a, b ) );
- ASSERT_EQUALS( -expectedResult, cmp( b, a ) );
- if ( expectedResult == 0 ) {
- ASSERT_EQUALS( hash( a ), hash( b ) );
- }
- }
- size_t hash( const BSONObj& obj ) {
- size_t seed = 0x106e1e1;
- Document(obj).hash_combine(seed);
- return seed;
- }
- };
-
- /** Shallow copy clone of a single field Document. */
- class Clone {
- public:
- void run() {
- const Document document = fromBson( BSON( "a" << BSON( "b" << 1 ) ) );
- MutableDocument cloneOnDemand (document);
-
- // Check equality.
- ASSERT_EQUALS(document, cloneOnDemand.peek());
- // Check pointer equality of sub document.
- ASSERT_EQUALS( document["a"].getDocument().getPtr(),
- cloneOnDemand.peek()["a"].getDocument().getPtr() );
-
-
- // Change field in clone and ensure the original document's field is unchanged.
- cloneOnDemand.setField( StringData("a"), Value(2) );
- ASSERT_EQUALS( Value(1), document.getNestedField(FieldPath("a.b")) );
-
-
- // setNestedField and ensure the original document is unchanged.
-
- cloneOnDemand.reset(document);
- vector<Position> path;
- ASSERT_EQUALS( Value(1), document.getNestedField(FieldPath("a.b"), &path) );
-
- cloneOnDemand.setNestedField(path, Value(2));
-
- ASSERT_EQUALS( Value(1), document.getNestedField(FieldPath("a.b")) );
- ASSERT_EQUALS( Value(2), cloneOnDemand.peek().getNestedField(FieldPath("a.b")) );
- ASSERT_EQUALS( DOC( "a" << DOC( "b" << 1 ) ), document );
- ASSERT_EQUALS( DOC( "a" << DOC( "b" << 2 ) ), cloneOnDemand.freeze() );
- }
- };
-
- /** Shallow copy clone of a multi field Document. */
- class CloneMultipleFields {
- public:
- void run() {
- Document document =
- fromBson( fromjson( "{a:1,b:['ra',4],c:{z:1},d:'lal'}" ) );
- Document clonedDocument = document.clone();
- ASSERT_EQUALS(document, clonedDocument);
- }
- };
-
- /** FieldIterator for an empty Document. */
- class FieldIteratorEmpty {
- public:
- void run() {
- FieldIterator iterator ( (Document()) );
- ASSERT( !iterator.more() );
- }
- };
-
- /** FieldIterator for a single field Document. */
- class FieldIteratorSingle {
- public:
- void run() {
- FieldIterator iterator (fromBson( BSON( "a" << 1 ) ));
- ASSERT( iterator.more() );
- Document::FieldPair field = iterator.next();
- ASSERT_EQUALS( "a", field.first.toString() );
- ASSERT_EQUALS( 1, field.second.getInt() );
- ASSERT( !iterator.more() );
- }
- };
-
- /** FieldIterator for a multiple field Document. */
- class FieldIteratorMultiple {
- public:
- void run() {
- FieldIterator iterator (fromBson( BSON( "a" << 1 << "b" << 5.6 << "c" << "z" )));
- ASSERT( iterator.more() );
- Document::FieldPair field = iterator.next();
- ASSERT_EQUALS( "a", field.first.toString() );
- ASSERT_EQUALS( 1, field.second.getInt() );
- ASSERT( iterator.more() );
-
- Document::FieldPair field2 = iterator.next();
- ASSERT_EQUALS( "b", field2.first.toString() );
- ASSERT_EQUALS( 5.6, field2.second.getDouble() );
- ASSERT( iterator.more() );
-
- Document::FieldPair field3 = iterator.next();
- ASSERT_EQUALS( "c", field3.first.toString() );
- ASSERT_EQUALS( "z", field3.second.getString() );
- ASSERT( !iterator.more() );
- }
- };
-
- class AllTypesDoc {
- public:
- void run() {
- // These are listed in order of BSONType with some duplicates
- append("minkey", MINKEY);
- // EOO not valid in middle of BSONObj
- append("double", 1.0);
- append("c-string", "string\0after NUL"); // after NULL is ignored
- append("c++", StringData("string\0after NUL", StringData::LiteralTag()).toString());
- append("StringData", StringData("string\0after NUL", StringData::LiteralTag()));
- append("emptyObj", BSONObj());
- append("filledObj", BSON("a" << 1));
- append("emptyArray", BSON("" << BSONArray()).firstElement());
- append("filledArray", BSON("" << BSON_ARRAY(1 << "a")).firstElement());
- append("binData", BSONBinData("a\0b", 3, BinDataGeneral));
- append("binDataCustom", BSONBinData("a\0b", 3, bdtCustom));
- append("binDataUUID", BSONBinData("123456789\0abcdef", 16, bdtUUID));
- append("undefined", BSONUndefined);
- append("oid", OID());
- append("true", true);
- append("false", false);
- append("date", jsTime());
- append("null", BSONNULL);
- append("regex", BSONRegEx(".*"));
- append("regexFlags", BSONRegEx(".*", "i"));
- append("regexEmpty", BSONRegEx("", ""));
- append("dbref", BSONDBRef("foo", OID()));
- append("code", BSONCode("function() {}"));
- append("codeNul", BSONCode(StringData("var nul = '\0'", StringData::LiteralTag())));
- append("symbol", BSONSymbol("foo"));
- append("symbolNul", BSONSymbol(StringData("f\0o", StringData::LiteralTag())));
- append("codeWScope", BSONCodeWScope("asdf", BSONObj()));
- append("codeWScopeWScope", BSONCodeWScope("asdf", BSON("one" << 1)));
- append("int", 1);
- append("timestamp", Timestamp());
- append("long", 1LL);
- append("very long", 1LL << 40);
- append("maxkey", MAXKEY);
-
- const BSONArray arr = arrBuilder.arr();
-
- // can't use append any more since arrBuilder is done
- objBuilder << "mega array" << arr;
- docBuilder["mega array"] = mongo::Value(values);
-
- const BSONObj obj = objBuilder.obj();
- const Document doc = docBuilder.freeze();
-
- const BSONObj obj2 = toBson(doc);
- const Document doc2 = fromBson(obj);
-
- // logical equality
- ASSERT_EQUALS(obj, obj2);
- ASSERT_EQUALS(doc, doc2);
-
- // binary equality
- ASSERT_EQUALS(obj.objsize(), obj2.objsize());
- ASSERT_EQUALS(memcmp(obj.objdata(), obj2.objdata(), obj.objsize()), 0);
-
- // ensure sorter serialization round-trips correctly
- BufBuilder bb;
- doc.serializeForSorter(bb);
- BufReader reader(bb.buf(), bb.len());
- const Document doc3 = Document::deserializeForSorter(
- reader, Document::SorterDeserializeSettings());
- BSONObj obj3 = toBson(doc3);
- ASSERT_EQUALS(obj.objsize(), obj3.objsize());
- ASSERT_EQUALS(memcmp(obj.objdata(), obj3.objdata(), obj.objsize()), 0);
- }
-
- template <typename T>
- void append(const char* name, const T& thing) {
- objBuilder << name << thing;
- arrBuilder << thing;
- docBuilder[name] = mongo::Value(thing);
- values.push_back(mongo::Value(thing));
- }
-
- vector<mongo::Value> values;
- MutableDocument docBuilder;
- BSONObjBuilder objBuilder;
- BSONArrayBuilder arrBuilder;
- };
- } // namespace Document
-
- namespace Value {
-
- using mongo::Value;
-
- BSONObj toBson( const Value& value ) {
- if (value.missing())
- return BSONObj(); // EOO
-
- BSONObjBuilder bob;
- value.addToBsonObj( &bob, "" );
- return bob.obj();
- }
+ // setNestedField and ensure the original document is unchanged.
- Value fromBson( const BSONObj& obj ) {
- BSONElement element = obj.firstElement();
- return Value( element );
- }
+ cloneOnDemand.reset(document);
+ vector<Position> path;
+ ASSERT_EQUALS(Value(1), document.getNestedField(FieldPath("a.b"), &path));
- void assertRoundTrips( const Value& value1 ) {
- BSONObj obj1 = toBson( value1 );
- Value value2 = fromBson( obj1 );
- BSONObj obj2 = toBson( value2 );
- ASSERT_EQUALS( obj1, obj2 );
- ASSERT_EQUALS(value1, value2);
- ASSERT_EQUALS(value1.getType(), value2.getType());
- }
+ cloneOnDemand.setNestedField(path, Value(2));
- class BSONArrayTest {
- public:
- void run() {
- ASSERT_EQUALS(Value(BSON_ARRAY(1 << 2 << 3)), DOC_ARRAY(1 << 2 << 3));
- ASSERT_EQUALS(Value(BSONArray()), Value(vector<Value>()));
- }
- };
-
- /** Int type. */
- class Int {
- public:
- void run() {
- Value value = Value( 5 );
- ASSERT_EQUALS( 5, value.getInt() );
- ASSERT_EQUALS( 5, value.getLong() );
- ASSERT_EQUALS( 5, value.getDouble() );
- ASSERT_EQUALS( NumberInt, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Long type. */
- class Long {
- public:
- void run() {
- Value value = Value( 99LL );
- ASSERT_EQUALS( 99, value.getLong() );
- ASSERT_EQUALS( 99, value.getDouble() );
- ASSERT_EQUALS( NumberLong, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Double type. */
- class Double {
- public:
- void run() {
- Value value = Value( 5.5 );
- ASSERT_EQUALS( 5.5, value.getDouble() );
- ASSERT_EQUALS( NumberDouble, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** String type. */
- class String {
- public:
- void run() {
- Value value = Value( "foo" );
- ASSERT_EQUALS( "foo", value.getString() );
- ASSERT_EQUALS( mongo::String, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** String with a null character. */
- class StringWithNull {
- public:
- void run() {
- string withNull( "a\0b", 3 );
- BSONObj objWithNull = BSON( "" << withNull );
- ASSERT_EQUALS( withNull, objWithNull[ "" ].str() );
- Value value = fromBson( objWithNull );
- ASSERT_EQUALS( withNull, value.getString() );
- assertRoundTrips( value );
- }
- };
-
- /** Date type. */
- class Date {
- public:
- void run() {
- Value value = Value(Date_t::fromMillisSinceEpoch(999));
- ASSERT_EQUALS( 999, value.getDate() );
- ASSERT_EQUALS( mongo::Date, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Timestamp type. */
- class JSTimestamp {
- public:
- void run() {
- Value value = Value( Timestamp( 777 ) );
- ASSERT( Timestamp( 777 ) == value.getTimestamp() );
- ASSERT_EQUALS( mongo::bsonTimestamp, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Document with no fields. */
- class EmptyDocument {
- public:
- void run() {
- mongo::Document document = mongo::Document();
- Value value = Value( document );
- ASSERT_EQUALS( document.getPtr(), value.getDocument().getPtr() );
- ASSERT_EQUALS( Object, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Document type. */
- class Document {
- public:
- void run() {
- mongo::MutableDocument md;
- md.addField( "a", Value( 5 ) );
- md.addField( "apple", Value( "rrr" ) );
- md.addField( "banana", Value( -.3 ) );
- mongo::Document document = md.freeze();
-
- Value value = Value( document );
- // Check document pointers are equal.
- ASSERT_EQUALS( document.getPtr(), value.getDocument().getPtr() );
- // Check document contents.
- ASSERT_EQUALS( 5, document["a"].getInt() );
- ASSERT_EQUALS( "rrr", document["apple"].getString() );
- ASSERT_EQUALS( -.3, document["banana"].getDouble() );
- ASSERT_EQUALS( Object, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Array with no elements. */
- class EmptyArray {
- public:
- void run() {
- vector<Value> array;
- Value value (array);
- const vector<Value>& array2 = value.getArray();
-
- ASSERT( array2.empty() );
- ASSERT_EQUALS( Array, value.getType() );
- ASSERT_EQUALS( 0U, value.getArrayLength() );
- assertRoundTrips( value );
- }
- };
-
- /** Array type. */
- class Array {
- public:
- void run() {
- vector<Value> array;
- array.push_back( Value( 5 ) );
- array.push_back( Value( "lala" ) );
- array.push_back( Value( 3.14 ) );
- Value value = Value( array );
- const vector<Value>& array2 = value.getArray();
-
- ASSERT( !array2.empty() );
- ASSERT_EQUALS( array2.size(), 3U);
- ASSERT_EQUALS( 5, array2[0].getInt() );
- ASSERT_EQUALS( "lala", array2[1].getString() );
- ASSERT_EQUALS( 3.14, array2[2].getDouble() );
- ASSERT_EQUALS( mongo::Array, value.getType() );
- ASSERT_EQUALS( 3U, value.getArrayLength() );
- assertRoundTrips( value );
- }
- };
-
- /** Oid type. */
- class Oid {
- public:
- void run() {
- Value value =
- fromBson( BSON( "" << OID( "abcdefabcdefabcdefabcdef" ) ) );
- ASSERT_EQUALS( OID( "abcdefabcdefabcdefabcdef" ), value.getOid() );
- ASSERT_EQUALS( jstOID, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Bool type. */
- class Bool {
- public:
- void run() {
- Value value = fromBson( BSON( "" << true ) );
- ASSERT_EQUALS( true, value.getBool() );
- ASSERT_EQUALS( mongo::Bool, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Regex type. */
- class Regex {
- public:
- void run() {
- Value value = fromBson( fromjson( "{'':/abc/}" ) );
- ASSERT_EQUALS( string("abc"), value.getRegex() );
- ASSERT_EQUALS( RegEx, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Symbol type (currently unsupported). */
- class Symbol {
- public:
- void run() {
- Value value (BSONSymbol("FOOBAR"));
- ASSERT_EQUALS( "FOOBAR", value.getSymbol() );
- ASSERT_EQUALS( mongo::Symbol, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Undefined type. */
- class Undefined {
- public:
- void run() {
- Value value = Value(BSONUndefined);
- ASSERT_EQUALS( mongo::Undefined, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** Null type. */
- class Null {
- public:
- void run() {
- Value value = Value(BSONNULL);
- ASSERT_EQUALS( jstNULL, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** True value. */
- class True {
- public:
- void run() {
- Value value = Value(true);
- ASSERT_EQUALS( true, value.getBool() );
- ASSERT_EQUALS( mongo::Bool, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** False value. */
- class False {
- public:
- void run() {
- Value value = Value(false);
- ASSERT_EQUALS( false, value.getBool() );
- ASSERT_EQUALS( mongo::Bool, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** -1 value. */
- class MinusOne {
- public:
- void run() {
- Value value = Value(-1);
- ASSERT_EQUALS( -1, value.getInt() );
- ASSERT_EQUALS( NumberInt, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** 0 value. */
- class Zero {
- public:
- void run() {
- Value value = Value(0);
- ASSERT_EQUALS( 0, value.getInt() );
- ASSERT_EQUALS( NumberInt, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- /** 1 value. */
- class One {
- public:
- void run() {
- Value value = Value(1);
- ASSERT_EQUALS( 1, value.getInt() );
- ASSERT_EQUALS( NumberInt, value.getType() );
- assertRoundTrips( value );
- }
- };
-
- namespace Coerce {
-
- class ToBoolBase {
- public:
- virtual ~ToBoolBase() {
- }
- void run() {
- ASSERT_EQUALS( expected(), value().coerceToBool() );
- }
- protected:
- virtual Value value() = 0;
- virtual bool expected() = 0;
- };
-
- class ToBoolTrue : public ToBoolBase {
- bool expected() { return true; }
- };
-
- class ToBoolFalse : public ToBoolBase {
- bool expected() { return false; }
- };
-
- /** Coerce 0 to bool. */
- class ZeroIntToBool : public ToBoolFalse {
- Value value() { return Value( 0 ); }
- };
-
- /** Coerce -1 to bool. */
- class NonZeroIntToBool : public ToBoolTrue {
- Value value() { return Value( -1 ); }
- };
-
- /** Coerce 0LL to bool. */
- class ZeroLongToBool : public ToBoolFalse {
- Value value() { return Value( 0LL ); }
- };
-
- /** Coerce 5LL to bool. */
- class NonZeroLongToBool : public ToBoolTrue {
- Value value() { return Value( 5LL ); }
- };
-
- /** Coerce 0.0 to bool. */
- class ZeroDoubleToBool : public ToBoolFalse {
- Value value() { return Value( 0 ); }
- };
-
- /** Coerce -1.3 to bool. */
- class NonZeroDoubleToBool : public ToBoolTrue {
- Value value() { return Value( -1.3 ); }
- };
-
- /** Coerce "" to bool. */
- class StringToBool : public ToBoolTrue {
- Value value() { return Value( "" ); }
- };
-
- /** Coerce {} to bool. */
- class ObjectToBool : public ToBoolTrue {
- Value value() {
- return Value( mongo::Document() );
- }
- };
-
- /** Coerce [] to bool. */
- class ArrayToBool : public ToBoolTrue {
- Value value() {
- return Value( vector<Value>() );
- }
- };
-
- /** Coerce Date(0) to bool. */
- class DateToBool : public ToBoolTrue {
- Value value() { return Value(Date_t{}); }
- };
-
- /** Coerce js literal regex to bool. */
- class RegexToBool : public ToBoolTrue {
- Value value() { return fromBson( fromjson( "{''://}" ) ); }
- };
-
- /** Coerce true to bool. */
- class TrueToBool : public ToBoolTrue {
- Value value() { return fromBson( BSON( "" << true ) ); }
- };
-
- /** Coerce false to bool. */
- class FalseToBool : public ToBoolFalse {
- Value value() { return fromBson( BSON( "" << false ) ); }
- };
-
- /** Coerce null to bool. */
- class NullToBool : public ToBoolFalse {
- Value value() { return Value(BSONNULL); }
- };
-
- /** Coerce undefined to bool. */
- class UndefinedToBool : public ToBoolFalse {
- Value value() { return Value(BSONUndefined); }
- };
-
- class ToIntBase {
- public:
- virtual ~ToIntBase() {
- }
- void run() {
- if (asserts())
- ASSERT_THROWS( value().coerceToInt(), UserException );
- else
- ASSERT_EQUALS( expected(), value().coerceToInt() );
- }
- protected:
- virtual Value value() = 0;
- virtual int expected() { return 0; }
- virtual bool asserts() { return false; }
- };
-
- /** Coerce -5 to int. */
- class IntToInt : public ToIntBase {
- Value value() { return Value( -5 ); }
- int expected() { return -5; }
- };
-
- /** Coerce long to int. */
- class LongToInt : public ToIntBase {
- Value value() { return Value( 0xff00000007LL ); }
- int expected() { return 7; }
- };
-
- /** Coerce 9.8 to int. */
- class DoubleToInt : public ToIntBase {
- Value value() { return Value( 9.8 ); }
- int expected() { return 9; }
- };
-
- /** Coerce null to int. */
- class NullToInt : public ToIntBase {
- Value value() { return Value(BSONNULL); }
- bool asserts() { return true; }
- };
-
- /** Coerce undefined to int. */
- class UndefinedToInt : public ToIntBase {
- Value value() { return Value(BSONUndefined); }
- bool asserts() { return true; }
- };
-
- /** Coerce "" to int unsupported. */
- class StringToInt {
- public:
- void run() {
- ASSERT_THROWS( Value( "" ).coerceToInt(), UserException );
- }
- };
-
- class ToLongBase {
- public:
- virtual ~ToLongBase() {
- }
- void run() {
- if (asserts())
- ASSERT_THROWS( value().coerceToLong(), UserException );
- else
- ASSERT_EQUALS( expected(), value().coerceToLong() );
- }
- protected:
- virtual Value value() = 0;
- virtual long long expected() { return 0; }
- virtual bool asserts() { return false; }
- };
-
- /** Coerce -5 to long. */
- class IntToLong : public ToLongBase {
- Value value() { return Value( -5 ); }
- long long expected() { return -5; }
- };
-
- /** Coerce long to long. */
- class LongToLong : public ToLongBase {
- Value value() { return Value( 0xff00000007LL ); }
- long long expected() { return 0xff00000007LL; }
- };
-
- /** Coerce 9.8 to long. */
- class DoubleToLong : public ToLongBase {
- Value value() { return Value( 9.8 ); }
- long long expected() { return 9; }
- };
-
- /** Coerce null to long. */
- class NullToLong : public ToLongBase {
- Value value() { return Value(BSONNULL); }
- bool asserts() { return true; }
- };
-
- /** Coerce undefined to long. */
- class UndefinedToLong : public ToLongBase {
- Value value() { return Value(BSONUndefined); }
- bool asserts() { return true; }
- };
-
- /** Coerce string to long unsupported. */
- class StringToLong {
- public:
- void run() {
- ASSERT_THROWS( Value( "" ).coerceToLong(), UserException );
- }
- };
-
- class ToDoubleBase {
- public:
- virtual ~ToDoubleBase() {
- }
- void run() {
- if (asserts())
- ASSERT_THROWS( value().coerceToDouble(), UserException );
- else
- ASSERT_EQUALS( expected(), value().coerceToDouble() );
- }
- protected:
- virtual Value value() = 0;
- virtual double expected() { return 0; }
- virtual bool asserts() { return false; }
- };
-
- /** Coerce -5 to double. */
- class IntToDouble : public ToDoubleBase {
- Value value() { return Value( -5 ); }
- double expected() { return -5; }
- };
-
- /** Coerce long to double. */
- class LongToDouble : public ToDoubleBase {
- Value value() {
- // A long that cannot be exactly represented as a double.
- return Value( static_cast<double>( 0x8fffffffffffffffLL ) );
- }
- double expected() { return static_cast<double>( 0x8fffffffffffffffLL ); }
- };
-
- /** Coerce double to double. */
- class DoubleToDouble : public ToDoubleBase {
- Value value() { return Value( 9.8 ); }
- double expected() { return 9.8; }
- };
-
- /** Coerce null to double. */
- class NullToDouble : public ToDoubleBase {
- Value value() { return Value(BSONNULL); }
- bool asserts() { return true; }
- };
-
- /** Coerce undefined to double. */
- class UndefinedToDouble : public ToDoubleBase {
- Value value() { return Value(BSONUndefined); }
- bool asserts() { return true; }
- };
-
- /** Coerce string to double unsupported. */
- class StringToDouble {
- public:
- void run() {
- ASSERT_THROWS( Value( "" ).coerceToDouble(), UserException );
- }
- };
-
- class ToDateBase {
- public:
- virtual ~ToDateBase() {
- }
- void run() {
- ASSERT_EQUALS( expected(), value().coerceToDate() );
- }
- protected:
- virtual Value value() = 0;
- virtual long long expected() = 0;
- };
-
- /** Coerce date to date. */
- class DateToDate : public ToDateBase {
- Value value() { return Value(Date_t::fromMillisSinceEpoch(888)); }
- long long expected() { return 888; }
- };
-
- /**
- * Convert timestamp to date. This extracts the time portion of the timestamp, which
- * is different from BSON behavior of interpreting all bytes as a date.
- */
- class TimestampToDate : public ToDateBase {
- Value value() {
- return Value( Timestamp( 777, 666 ) );
- }
- long long expected() { return 777 * 1000; }
- };
-
- /** Coerce string to date unsupported. */
- class StringToDate {
- public:
- void run() {
- ASSERT_THROWS( Value( "" ).coerceToDate(), UserException );
- }
- };
-
- class ToStringBase {
- public:
- virtual ~ToStringBase() {
- }
- void run() {
- ASSERT_EQUALS( expected(), value().coerceToString() );
- }
- protected:
- virtual Value value() = 0;
- virtual string expected() { return ""; }
- };
-
- /** Coerce -0.2 to string. */
- class DoubleToString : public ToStringBase {
- Value value() { return Value( -0.2 ); }
- string expected() { return "-0.2"; }
- };
-
- /** Coerce -4 to string. */
- class IntToString : public ToStringBase {
- Value value() { return Value( -4 ); }
- string expected() { return "-4"; }
- };
-
- /** Coerce 10000LL to string. */
- class LongToString : public ToStringBase {
- Value value() { return Value( 10000LL ); }
- string expected() { return "10000"; }
- };
-
- /** Coerce string to string. */
- class StringToString : public ToStringBase {
- Value value() { return Value( "fO_o" ); }
- string expected() { return "fO_o"; }
- };
-
- /** Coerce timestamp to string. */
- class TimestampToString : public ToStringBase {
- Value value() {
- return Value( Timestamp( 1, 2 ) );
- }
- string expected() { return Timestamp( 1, 2 ).toStringPretty(); }
- };
-
- /** Coerce date to string. */
- class DateToString : public ToStringBase {
- Value value() { return Value(Date_t::fromMillisSinceEpoch(1234567890LL*1000)); }
- string expected() { return "2009-02-13T23:31:30"; } // from js
- };
-
- /** Coerce null to string. */
- class NullToString : public ToStringBase {
- Value value() { return Value(BSONNULL); }
- };
-
- /** Coerce undefined to string. */
- class UndefinedToString : public ToStringBase {
- Value value() { return Value(BSONUndefined); }
- };
-
- /** Coerce document to string unsupported. */
- class DocumentToString {
- public:
- void run() {
- ASSERT_THROWS( Value
- ( mongo::Document() ).coerceToString(),
- UserException );
- }
- };
-
- /** Coerce timestamp to timestamp. */
- class TimestampToTimestamp {
- public:
- void run() {
- Value value = Value( Timestamp( 1010 ) );
- ASSERT( Timestamp( 1010 ) == value.coerceToTimestamp() );
- }
- };
-
- /** Coerce date to timestamp unsupported. */
- class DateToTimestamp {
- public:
- void run() {
- ASSERT_THROWS( Value(Date_t::fromMillisSinceEpoch(1010)).coerceToTimestamp(),
- UserException );
- }
- };
-
- } // namespace Coerce
-
- /** Get the "widest" of two numeric types. */
- class GetWidestNumeric {
- public:
- void run() {
- using mongo::Undefined;
-
- // Numeric types.
- assertWidest( NumberInt, NumberInt, NumberInt );
- assertWidest( NumberLong, NumberInt, NumberLong );
- assertWidest( NumberDouble, NumberInt, NumberDouble );
- assertWidest( NumberLong, NumberLong, NumberLong );
- assertWidest( NumberDouble, NumberLong, NumberDouble );
- assertWidest( NumberDouble, NumberDouble, NumberDouble );
-
- // Missing value and numeric types (result Undefined).
- assertWidest( Undefined, NumberInt, Undefined );
- assertWidest( Undefined, NumberInt, Undefined );
- assertWidest( Undefined, NumberLong, jstNULL );
- assertWidest( Undefined, NumberLong, Undefined );
- assertWidest( Undefined, NumberDouble, jstNULL );
- assertWidest( Undefined, NumberDouble, Undefined );
-
- // Missing value types (result Undefined).
- assertWidest( Undefined, jstNULL, jstNULL );
- assertWidest( Undefined, jstNULL, Undefined );
- assertWidest( Undefined, Undefined, Undefined );
-
- // Other types (result Undefined).
- assertWidest( Undefined, NumberInt, mongo::Bool );
- assertWidest( Undefined, mongo::String, NumberDouble );
- }
- private:
- void assertWidest( BSONType expectedWidest, BSONType a, BSONType b ) {
- ASSERT_EQUALS( expectedWidest, Value::getWidestNumeric( a, b ) );
- ASSERT_EQUALS( expectedWidest, Value::getWidestNumeric( b, a ) );
- }
- };
-
- /** Add a Value to a BSONObj. */
- class AddToBsonObj {
- public:
- void run() {
- BSONObjBuilder bob;
- Value( 4.4 ).addToBsonObj( &bob, "a" );
- Value( 22 ).addToBsonObj( &bob, "b" );
- Value( "astring" ).addToBsonObj( &bob, "c" );
- ASSERT_EQUALS( BSON( "a" << 4.4 << "b" << 22 << "c" << "astring" ), bob.obj() );
- }
- };
-
- /** Add a Value to a BSONArray. */
- class AddToBsonArray {
- public:
- void run() {
- BSONArrayBuilder bab;
- Value( 4.4 ).addToBsonArray( &bab );
- Value( 22 ).addToBsonArray( &bab );
- Value( "astring" ).addToBsonArray( &bab );
- ASSERT_EQUALS( BSON_ARRAY( 4.4 << 22 << "astring" ), bab.arr() );
- }
- };
-
- /** Value comparator. */
- class Compare {
- public:
- void run() {
- BSONObjBuilder undefinedBuilder;
- undefinedBuilder.appendUndefined( "" );
- BSONObj undefined = undefinedBuilder.obj();
-
- // Undefined / null.
- assertComparison( 0, undefined, undefined );
- assertComparison( -1, undefined, BSON( "" << BSONNULL ) );
- assertComparison( 0, BSON( "" << BSONNULL ), BSON( "" << BSONNULL ) );
-
- // Undefined / null with other types.
- assertComparison( -1, undefined, BSON( "" << 1 ) );
- assertComparison( -1, undefined, BSON( "" << "bar" ) );
- assertComparison( -1, BSON( "" << BSONNULL ), BSON( "" << -1 ) );
- assertComparison( -1, BSON( "" << BSONNULL ), BSON( "" << "bar" ) );
-
- // Numeric types.
- assertComparison( 0, 5, 5LL );
- assertComparison( 0, -2, -2.0 );
- assertComparison( 0, 90LL, 90.0 );
- assertComparison( -1, 5, 6LL );
- assertComparison( -1, -2, 2.1 );
- assertComparison( 1, 90LL, 89.999 );
- assertComparison( -1, 90, 90.1 );
- assertComparison( 0, numeric_limits<double>::quiet_NaN(),
- numeric_limits<double>::signaling_NaN() );
- assertComparison( -1, numeric_limits<double>::quiet_NaN(), 5 );
-
- // strings compare between numbers and objects
- assertComparison( 1, "abc", 90 );
- assertComparison( -1, "abc", BSON( "a" << "b" ) );
-
- // String comparison.
- assertComparison( -1, "", "a" );
- assertComparison( 0, "a", "a" );
- assertComparison( -1, "a", "b" );
- assertComparison( -1, "aa", "b" );
- assertComparison( 1, "bb", "b" );
- assertComparison( 1, "bb", "b" );
- assertComparison( 1, "b-", "b" );
- assertComparison( -1, "b-", "ba" );
- // With a null character.
- assertComparison( 1, string( "a\0", 2 ), "a" );
-
- // Object.
- assertComparison( 0, fromjson( "{'':{}}" ), fromjson( "{'':{}}" ) );
- assertComparison( 0, fromjson( "{'':{x:1}}" ), fromjson( "{'':{x:1}}" ) );
- assertComparison( -1, fromjson( "{'':{}}" ), fromjson( "{'':{x:1}}" ) );
- assertComparison( -1, fromjson( "{'':{'z': 1}}" ), fromjson( "{'':{'a': 'a'}}") );
-
- // Array.
- assertComparison( 0, fromjson( "{'':[]}" ), fromjson( "{'':[]}" ) );
- assertComparison( -1, fromjson( "{'':[0]}" ), fromjson( "{'':[1]}" ) );
- assertComparison( -1, fromjson( "{'':[0,0]}" ), fromjson( "{'':[1]}" ) );
- assertComparison( -1, fromjson( "{'':[0]}" ), fromjson( "{'':[0,0]}" ) );
- assertComparison( -1, fromjson( "{'':[0]}" ), fromjson( "{'':['']}" ) );
-
- // OID.
- assertComparison( 0, OID( "abcdefabcdefabcdefabcdef" ),
- OID( "abcdefabcdefabcdefabcdef" ) );
- assertComparison( 1, OID( "abcdefabcdefabcdefabcdef" ),
- OID( "010101010101010101010101" ) );
-
- // Bool.
- assertComparison( 0, true, true );
- assertComparison( 0, false, false );
- assertComparison( 1, true, false );
-
- // Date.
- assertComparison( 0,
- Date_t::fromMillisSinceEpoch( 555 ),
- Date_t::fromMillisSinceEpoch( 555 ) );
- assertComparison( 1,
- Date_t::fromMillisSinceEpoch( 555 ),
- Date_t::fromMillisSinceEpoch( 554 ) );
- // Negative date.
- assertComparison( 1,
- Date_t::fromMillisSinceEpoch( 0 ),
- Date_t::fromMillisSinceEpoch( -1 ) );
-
- // Regex.
- assertComparison( 0, fromjson( "{'':/a/}" ), fromjson( "{'':/a/}" ) );
- assertComparison( -1, fromjson( "{'':/a/}" ), fromjson( "{'':/a/i}" ) );
- assertComparison( -1, fromjson( "{'':/a/}" ), fromjson( "{'':/aa/}" ) );
-
- // Timestamp.
- assertComparison( 0, Timestamp( 1234 ), Timestamp( 1234 ) );
- assertComparison( -1, Timestamp( 4 ), Timestamp( 1234 ) );
-
- // Cross-type comparisons. Listed in order of canonical types.
- assertComparison(-1, Value(mongo::MINKEY), Value());
- assertComparison(0, Value(), Value());
- assertComparison(0, Value(), Value(BSONUndefined));
- assertComparison(-1, Value(BSONUndefined), Value(BSONNULL));
- assertComparison(-1, Value(BSONNULL), Value(1));
- assertComparison(0, Value(1), Value(1LL));
- assertComparison(0, Value(1), Value(1.0));
- assertComparison(-1, Value(1), Value("string"));
- assertComparison(0, Value("string"), Value(BSONSymbol("string")));
- assertComparison(-1, Value("string"), Value(mongo::Document()));
- assertComparison(-1, Value(mongo::Document()), Value(vector<Value>()));
- assertComparison(-1, Value(vector<Value>()), Value(BSONBinData("", 0, MD5Type)));
- assertComparison(-1, Value(BSONBinData("", 0, MD5Type)), Value(mongo::OID()));
- assertComparison(-1, Value(mongo::OID()), Value(false));
- assertComparison(-1, Value(false), Value(Date_t()));
- assertComparison(-1, Value(Date_t()), Value(Timestamp()));
- assertComparison(-1, Value(Timestamp()), Value(BSONRegEx("")));
- assertComparison(-1, Value(BSONRegEx("")), Value(BSONDBRef("", mongo::OID())));
- assertComparison(-1, Value(BSONDBRef("", mongo::OID())), Value(BSONCode("")));
- assertComparison(-1, Value(BSONCode("")), Value(BSONCodeWScope("", BSONObj())));
- assertComparison(-1, Value(BSONCodeWScope("", BSONObj())), Value(mongo::MAXKEY));
- }
- private:
- template<class T,class U>
- void assertComparison( int expectedResult, const T& a, const U& b ) {
- assertComparison( expectedResult, BSON( "" << a ), BSON( "" << b ) );
- }
- void assertComparison( int expectedResult, const Timestamp& a, const Timestamp& b ) {
- BSONObjBuilder first;
- first.append( "", a );
- BSONObjBuilder second;
- second.append( "", b );
- assertComparison( expectedResult, first.obj(), second.obj() );
- }
- int sign(int cmp) {
- if (cmp == 0) return 0;
- else if (cmp < 0) return -1;
- else return 1;
- }
- int cmp( const Value& a, const Value& b ) {
- return sign(Value::compare(a, b));
- }
- void assertComparison( int expectedResult, const BSONObj& a, const BSONObj& b ) {
- assertComparison(expectedResult, fromBson(a), fromBson(b));
- }
- void assertComparison(int expectedResult, const Value& a, const Value& b) {
- mongo::unittest::log() <<
- "testing " << a.toString() << " and " << b.toString() << endl;
- // reflexivity
- ASSERT_EQUALS(0, cmp(a, a));
- ASSERT_EQUALS(0, cmp(b, b));
-
- // symmetry
- ASSERT_EQUALS( expectedResult, cmp( a, b ) );
- ASSERT_EQUALS( -expectedResult, cmp( b, a ) );
-
- if ( expectedResult == 0 ) {
- // equal values must hash equally.
- ASSERT_EQUALS( hash( a ), hash( b ) );
- }
- else {
- // unequal values must hash unequally.
- // (not true in general but we should error if it fails in any of these cases)
- ASSERT_NOT_EQUALS( hash( a ), hash( b ) );
- }
-
- // same as BSON
- ASSERT_EQUALS(expectedResult, sign(toBson(a).firstElement().woCompare(
- toBson(b).firstElement())));
- }
- size_t hash(const Value& v) {
- size_t seed = 0xf00ba6;
- v.hash_combine( seed );
- return seed;
- }
- };
-
- class SubFields {
- public:
- void run() {
- const Value val = fromBson(fromjson(
- "{'': {a: [{x:1, b:[1, {y:1, c:1234, z:1}, 1]}]}}"));
- // ^ this outer object is removed by fromBson
-
- ASSERT(val.getType() == mongo::Object);
-
- ASSERT(val[999].missing());
- ASSERT(val["missing"].missing());
- ASSERT(val["a"].getType() == mongo::Array);
-
- ASSERT(val["a"][999].missing());
- ASSERT(val["a"]["missing"].missing());
- ASSERT(val["a"][0].getType() == mongo::Object);
-
- ASSERT(val["a"][0][999].missing());
- ASSERT(val["a"][0]["missing"].missing());
- ASSERT(val["a"][0]["b"].getType() == mongo::Array);
-
- ASSERT(val["a"][0]["b"][999].missing());
- ASSERT(val["a"][0]["b"]["missing"].missing());
- ASSERT(val["a"][0]["b"][1].getType() == mongo::Object);
-
- ASSERT(val["a"][0]["b"][1][999].missing());
- ASSERT(val["a"][0]["b"][1]["missing"].missing());
- ASSERT(val["a"][0]["b"][1]["c"].getType() == mongo::NumberInt);
- ASSERT_EQUALS(val["a"][0]["b"][1]["c"].getInt(), 1234);
- }
- };
-
-
- class SerializationOfMissingForSorter {
- // Can't be tested in AllTypesDoc since missing values are omitted when adding to BSON.
- public:
- void run() {
- const Value missing;
- const Value arrayOfMissing = Value(vector<Value>(10));
-
- BufBuilder bb;
- missing.serializeForSorter(bb);
- arrayOfMissing.serializeForSorter(bb);
-
- BufReader reader(bb.buf(), bb.len());
- ASSERT_EQUALS(
- missing,
- Value::deserializeForSorter(reader, Value::SorterDeserializeSettings()));
- ASSERT_EQUALS(
- arrayOfMissing,
- Value::deserializeForSorter(reader, Value::SorterDeserializeSettings()));
- }
- };
- } // namespace Value
-
- class All : public Suite {
- public:
- All() : Suite( "document" ) {
- }
- void setupTests() {
- add<Document::Create>();
- add<Document::CreateFromBsonObj>();
- add<Document::AddField>();
- add<Document::GetValue>();
- add<Document::SetField>();
- add<Document::Compare>();
- add<Document::Clone>();
- add<Document::CloneMultipleFields>();
- add<Document::FieldIteratorEmpty>();
- add<Document::FieldIteratorSingle>();
- add<Document::FieldIteratorMultiple>();
- add<Document::AllTypesDoc>();
-
- add<Value::BSONArrayTest>();
- add<Value::Int>();
- add<Value::Long>();
- add<Value::Double>();
- add<Value::String>();
- add<Value::StringWithNull>();
- add<Value::Date>();
- add<Value::JSTimestamp>();
- add<Value::EmptyDocument>();
- add<Value::EmptyArray>();
- add<Value::Array>();
- add<Value::Oid>();
- add<Value::Bool>();
- add<Value::Regex>();
- add<Value::Symbol>();
- add<Value::Undefined>();
- add<Value::Null>();
- add<Value::True>();
- add<Value::False>();
- add<Value::MinusOne>();
- add<Value::Zero>();
- add<Value::One>();
-
- add<Value::Coerce::ZeroIntToBool>();
- add<Value::Coerce::NonZeroIntToBool>();
- add<Value::Coerce::ZeroLongToBool>();
- add<Value::Coerce::NonZeroLongToBool>();
- add<Value::Coerce::ZeroDoubleToBool>();
- add<Value::Coerce::NonZeroDoubleToBool>();
- add<Value::Coerce::StringToBool>();
- add<Value::Coerce::ObjectToBool>();
- add<Value::Coerce::ArrayToBool>();
- add<Value::Coerce::DateToBool>();
- add<Value::Coerce::RegexToBool>();
- add<Value::Coerce::TrueToBool>();
- add<Value::Coerce::FalseToBool>();
- add<Value::Coerce::NullToBool>();
- add<Value::Coerce::UndefinedToBool>();
- add<Value::Coerce::IntToInt>();
- add<Value::Coerce::LongToInt>();
- add<Value::Coerce::DoubleToInt>();
- add<Value::Coerce::NullToInt>();
- add<Value::Coerce::UndefinedToInt>();
- add<Value::Coerce::StringToInt>();
- add<Value::Coerce::IntToLong>();
- add<Value::Coerce::LongToLong>();
- add<Value::Coerce::DoubleToLong>();
- add<Value::Coerce::NullToLong>();
- add<Value::Coerce::UndefinedToLong>();
- add<Value::Coerce::StringToLong>();
- add<Value::Coerce::IntToDouble>();
- add<Value::Coerce::LongToDouble>();
- add<Value::Coerce::DoubleToDouble>();
- add<Value::Coerce::NullToDouble>();
- add<Value::Coerce::UndefinedToDouble>();
- add<Value::Coerce::StringToDouble>();
- add<Value::Coerce::DateToDate>();
- add<Value::Coerce::TimestampToDate>();
- add<Value::Coerce::StringToDate>();
- add<Value::Coerce::DoubleToString>();
- add<Value::Coerce::IntToString>();
- add<Value::Coerce::LongToString>();
- add<Value::Coerce::StringToString>();
- add<Value::Coerce::TimestampToString>();
- add<Value::Coerce::DateToString>();
- add<Value::Coerce::NullToString>();
- add<Value::Coerce::UndefinedToString>();
- add<Value::Coerce::DocumentToString>();
- add<Value::Coerce::TimestampToTimestamp>();
- add<Value::Coerce::DateToTimestamp>();
-
- add<Value::GetWidestNumeric>();
- add<Value::AddToBsonObj>();
- add<Value::AddToBsonArray>();
- add<Value::Compare>();
- add<Value::SubFields>();
- add<Value::SerializationOfMissingForSorter>();
+ ASSERT_EQUALS(Value(1), document.getNestedField(FieldPath("a.b")));
+ ASSERT_EQUALS(Value(2), cloneOnDemand.peek().getNestedField(FieldPath("a.b")));
+ ASSERT_EQUALS(DOC("a" << DOC("b" << 1)), document);
+ ASSERT_EQUALS(DOC("a" << DOC("b" << 2)), cloneOnDemand.freeze());
+ }
+};
+
+/** Shallow copy clone of a multi field Document. */
+class CloneMultipleFields {
+public:
+ void run() {
+ Document document = fromBson(fromjson("{a:1,b:['ra',4],c:{z:1},d:'lal'}"));
+ Document clonedDocument = document.clone();
+ ASSERT_EQUALS(document, clonedDocument);
+ }
+};
+
+/** FieldIterator for an empty Document. */
+class FieldIteratorEmpty {
+public:
+ void run() {
+ FieldIterator iterator((Document()));
+ ASSERT(!iterator.more());
+ }
+};
+
+/** FieldIterator for a single field Document. */
+class FieldIteratorSingle {
+public:
+ void run() {
+ FieldIterator iterator(fromBson(BSON("a" << 1)));
+ ASSERT(iterator.more());
+ Document::FieldPair field = iterator.next();
+ ASSERT_EQUALS("a", field.first.toString());
+ ASSERT_EQUALS(1, field.second.getInt());
+ ASSERT(!iterator.more());
+ }
+};
+
+/** FieldIterator for a multiple field Document. */
+class FieldIteratorMultiple {
+public:
+ void run() {
+ FieldIterator iterator(fromBson(BSON("a" << 1 << "b" << 5.6 << "c"
+ << "z")));
+ ASSERT(iterator.more());
+ Document::FieldPair field = iterator.next();
+ ASSERT_EQUALS("a", field.first.toString());
+ ASSERT_EQUALS(1, field.second.getInt());
+ ASSERT(iterator.more());
+
+ Document::FieldPair field2 = iterator.next();
+ ASSERT_EQUALS("b", field2.first.toString());
+ ASSERT_EQUALS(5.6, field2.second.getDouble());
+ ASSERT(iterator.more());
+
+ Document::FieldPair field3 = iterator.next();
+ ASSERT_EQUALS("c", field3.first.toString());
+ ASSERT_EQUALS("z", field3.second.getString());
+ ASSERT(!iterator.more());
+ }
+};
+
+class AllTypesDoc {
+public:
+ void run() {
+ // These are listed in order of BSONType with some duplicates
+ append("minkey", MINKEY);
+ // EOO not valid in middle of BSONObj
+ append("double", 1.0);
+ append("c-string", "string\0after NUL"); // after NULL is ignored
+ append("c++", StringData("string\0after NUL", StringData::LiteralTag()).toString());
+ append("StringData", StringData("string\0after NUL", StringData::LiteralTag()));
+ append("emptyObj", BSONObj());
+ append("filledObj", BSON("a" << 1));
+ append("emptyArray", BSON("" << BSONArray()).firstElement());
+ append("filledArray", BSON("" << BSON_ARRAY(1 << "a")).firstElement());
+ append("binData", BSONBinData("a\0b", 3, BinDataGeneral));
+ append("binDataCustom", BSONBinData("a\0b", 3, bdtCustom));
+ append("binDataUUID", BSONBinData("123456789\0abcdef", 16, bdtUUID));
+ append("undefined", BSONUndefined);
+ append("oid", OID());
+ append("true", true);
+ append("false", false);
+ append("date", jsTime());
+ append("null", BSONNULL);
+ append("regex", BSONRegEx(".*"));
+ append("regexFlags", BSONRegEx(".*", "i"));
+ append("regexEmpty", BSONRegEx("", ""));
+ append("dbref", BSONDBRef("foo", OID()));
+ append("code", BSONCode("function() {}"));
+ append("codeNul", BSONCode(StringData("var nul = '\0'", StringData::LiteralTag())));
+ append("symbol", BSONSymbol("foo"));
+ append("symbolNul", BSONSymbol(StringData("f\0o", StringData::LiteralTag())));
+ append("codeWScope", BSONCodeWScope("asdf", BSONObj()));
+ append("codeWScopeWScope", BSONCodeWScope("asdf", BSON("one" << 1)));
+ append("int", 1);
+ append("timestamp", Timestamp());
+ append("long", 1LL);
+ append("very long", 1LL << 40);
+ append("maxkey", MAXKEY);
+
+ const BSONArray arr = arrBuilder.arr();
+
+ // can't use append any more since arrBuilder is done
+ objBuilder << "mega array" << arr;
+ docBuilder["mega array"] = mongo::Value(values);
+
+ const BSONObj obj = objBuilder.obj();
+ const Document doc = docBuilder.freeze();
+
+ const BSONObj obj2 = toBson(doc);
+ const Document doc2 = fromBson(obj);
+
+ // logical equality
+ ASSERT_EQUALS(obj, obj2);
+ ASSERT_EQUALS(doc, doc2);
+
+ // binary equality
+ ASSERT_EQUALS(obj.objsize(), obj2.objsize());
+ ASSERT_EQUALS(memcmp(obj.objdata(), obj2.objdata(), obj.objsize()), 0);
+
+ // ensure sorter serialization round-trips correctly
+ BufBuilder bb;
+ doc.serializeForSorter(bb);
+ BufReader reader(bb.buf(), bb.len());
+ const Document doc3 =
+ Document::deserializeForSorter(reader, Document::SorterDeserializeSettings());
+ BSONObj obj3 = toBson(doc3);
+ ASSERT_EQUALS(obj.objsize(), obj3.objsize());
+ ASSERT_EQUALS(memcmp(obj.objdata(), obj3.objdata(), obj.objsize()), 0);
+ }
+
+ template <typename T>
+ void append(const char* name, const T& thing) {
+ objBuilder << name << thing;
+ arrBuilder << thing;
+ docBuilder[name] = mongo::Value(thing);
+ values.push_back(mongo::Value(thing));
+ }
+
+ vector<mongo::Value> values;
+ MutableDocument docBuilder;
+ BSONObjBuilder objBuilder;
+ BSONArrayBuilder arrBuilder;
+};
+} // namespace Document
+
+namespace Value {
+
+using mongo::Value;
+
+BSONObj toBson(const Value& value) {
+ if (value.missing())
+ return BSONObj(); // EOO
+
+ BSONObjBuilder bob;
+ value.addToBsonObj(&bob, "");
+ return bob.obj();
+}
+
+Value fromBson(const BSONObj& obj) {
+ BSONElement element = obj.firstElement();
+ return Value(element);
+}
+
+void assertRoundTrips(const Value& value1) {
+ BSONObj obj1 = toBson(value1);
+ Value value2 = fromBson(obj1);
+ BSONObj obj2 = toBson(value2);
+ ASSERT_EQUALS(obj1, obj2);
+ ASSERT_EQUALS(value1, value2);
+ ASSERT_EQUALS(value1.getType(), value2.getType());
+}
+
+class BSONArrayTest {
+public:
+ void run() {
+ ASSERT_EQUALS(Value(BSON_ARRAY(1 << 2 << 3)), DOC_ARRAY(1 << 2 << 3));
+ ASSERT_EQUALS(Value(BSONArray()), Value(vector<Value>()));
+ }
+};
+
+/** Int type. */
+class Int {
+public:
+ void run() {
+ Value value = Value(5);
+ ASSERT_EQUALS(5, value.getInt());
+ ASSERT_EQUALS(5, value.getLong());
+ ASSERT_EQUALS(5, value.getDouble());
+ ASSERT_EQUALS(NumberInt, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Long type. */
+class Long {
+public:
+ void run() {
+ Value value = Value(99LL);
+ ASSERT_EQUALS(99, value.getLong());
+ ASSERT_EQUALS(99, value.getDouble());
+ ASSERT_EQUALS(NumberLong, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Double type. */
+class Double {
+public:
+ void run() {
+ Value value = Value(5.5);
+ ASSERT_EQUALS(5.5, value.getDouble());
+ ASSERT_EQUALS(NumberDouble, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** String type. */
+class String {
+public:
+ void run() {
+ Value value = Value("foo");
+ ASSERT_EQUALS("foo", value.getString());
+ ASSERT_EQUALS(mongo::String, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** String with a null character. */
+class StringWithNull {
+public:
+ void run() {
+ string withNull("a\0b", 3);
+ BSONObj objWithNull = BSON("" << withNull);
+ ASSERT_EQUALS(withNull, objWithNull[""].str());
+ Value value = fromBson(objWithNull);
+ ASSERT_EQUALS(withNull, value.getString());
+ assertRoundTrips(value);
+ }
+};
+
+/** Date type. */
+class Date {
+public:
+ void run() {
+ Value value = Value(Date_t::fromMillisSinceEpoch(999));
+ ASSERT_EQUALS(999, value.getDate());
+ ASSERT_EQUALS(mongo::Date, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Timestamp type. */
+class JSTimestamp {
+public:
+ void run() {
+ Value value = Value(Timestamp(777));
+ ASSERT(Timestamp(777) == value.getTimestamp());
+ ASSERT_EQUALS(mongo::bsonTimestamp, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Document with no fields. */
+class EmptyDocument {
+public:
+ void run() {
+ mongo::Document document = mongo::Document();
+ Value value = Value(document);
+ ASSERT_EQUALS(document.getPtr(), value.getDocument().getPtr());
+ ASSERT_EQUALS(Object, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Document type. */
+class Document {
+public:
+ void run() {
+ mongo::MutableDocument md;
+ md.addField("a", Value(5));
+ md.addField("apple", Value("rrr"));
+ md.addField("banana", Value(-.3));
+ mongo::Document document = md.freeze();
+
+ Value value = Value(document);
+ // Check document pointers are equal.
+ ASSERT_EQUALS(document.getPtr(), value.getDocument().getPtr());
+ // Check document contents.
+ ASSERT_EQUALS(5, document["a"].getInt());
+ ASSERT_EQUALS("rrr", document["apple"].getString());
+ ASSERT_EQUALS(-.3, document["banana"].getDouble());
+ ASSERT_EQUALS(Object, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Array with no elements. */
+class EmptyArray {
+public:
+ void run() {
+ vector<Value> array;
+ Value value(array);
+ const vector<Value>& array2 = value.getArray();
+
+ ASSERT(array2.empty());
+ ASSERT_EQUALS(Array, value.getType());
+ ASSERT_EQUALS(0U, value.getArrayLength());
+ assertRoundTrips(value);
+ }
+};
+
+/** Array type. */
+class Array {
+public:
+ void run() {
+ vector<Value> array;
+ array.push_back(Value(5));
+ array.push_back(Value("lala"));
+ array.push_back(Value(3.14));
+ Value value = Value(array);
+ const vector<Value>& array2 = value.getArray();
+
+ ASSERT(!array2.empty());
+ ASSERT_EQUALS(array2.size(), 3U);
+ ASSERT_EQUALS(5, array2[0].getInt());
+ ASSERT_EQUALS("lala", array2[1].getString());
+ ASSERT_EQUALS(3.14, array2[2].getDouble());
+ ASSERT_EQUALS(mongo::Array, value.getType());
+ ASSERT_EQUALS(3U, value.getArrayLength());
+ assertRoundTrips(value);
+ }
+};
+
+/** Oid type. */
+class Oid {
+public:
+ void run() {
+ Value value = fromBson(BSON("" << OID("abcdefabcdefabcdefabcdef")));
+ ASSERT_EQUALS(OID("abcdefabcdefabcdefabcdef"), value.getOid());
+ ASSERT_EQUALS(jstOID, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Bool type. */
+class Bool {
+public:
+ void run() {
+ Value value = fromBson(BSON("" << true));
+ ASSERT_EQUALS(true, value.getBool());
+ ASSERT_EQUALS(mongo::Bool, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Regex type. */
+class Regex {
+public:
+ void run() {
+ Value value = fromBson(fromjson("{'':/abc/}"));
+ ASSERT_EQUALS(string("abc"), value.getRegex());
+ ASSERT_EQUALS(RegEx, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Symbol type (currently unsupported). */
+class Symbol {
+public:
+ void run() {
+ Value value(BSONSymbol("FOOBAR"));
+ ASSERT_EQUALS("FOOBAR", value.getSymbol());
+ ASSERT_EQUALS(mongo::Symbol, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Undefined type. */
+class Undefined {
+public:
+ void run() {
+ Value value = Value(BSONUndefined);
+ ASSERT_EQUALS(mongo::Undefined, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** Null type. */
+class Null {
+public:
+ void run() {
+ Value value = Value(BSONNULL);
+ ASSERT_EQUALS(jstNULL, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** True value. */
+class True {
+public:
+ void run() {
+ Value value = Value(true);
+ ASSERT_EQUALS(true, value.getBool());
+ ASSERT_EQUALS(mongo::Bool, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** False value. */
+class False {
+public:
+ void run() {
+ Value value = Value(false);
+ ASSERT_EQUALS(false, value.getBool());
+ ASSERT_EQUALS(mongo::Bool, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** -1 value. */
+class MinusOne {
+public:
+ void run() {
+ Value value = Value(-1);
+ ASSERT_EQUALS(-1, value.getInt());
+ ASSERT_EQUALS(NumberInt, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** 0 value. */
+class Zero {
+public:
+ void run() {
+ Value value = Value(0);
+ ASSERT_EQUALS(0, value.getInt());
+ ASSERT_EQUALS(NumberInt, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+/** 1 value. */
+class One {
+public:
+ void run() {
+ Value value = Value(1);
+ ASSERT_EQUALS(1, value.getInt());
+ ASSERT_EQUALS(NumberInt, value.getType());
+ assertRoundTrips(value);
+ }
+};
+
+namespace Coerce {
+
+class ToBoolBase {
+public:
+ virtual ~ToBoolBase() {}
+ void run() {
+ ASSERT_EQUALS(expected(), value().coerceToBool());
+ }
+
+protected:
+ virtual Value value() = 0;
+ virtual bool expected() = 0;
+};
+
+class ToBoolTrue : public ToBoolBase {
+ bool expected() {
+ return true;
+ }
+};
+
+class ToBoolFalse : public ToBoolBase {
+ bool expected() {
+ return false;
+ }
+};
+
+/** Coerce 0 to bool. */
+class ZeroIntToBool : public ToBoolFalse {
+ Value value() {
+ return Value(0);
+ }
+};
+
+/** Coerce -1 to bool. */
+class NonZeroIntToBool : public ToBoolTrue {
+ Value value() {
+ return Value(-1);
+ }
+};
+
+/** Coerce 0LL to bool. */
+class ZeroLongToBool : public ToBoolFalse {
+ Value value() {
+ return Value(0LL);
+ }
+};
+
+/** Coerce 5LL to bool. */
+class NonZeroLongToBool : public ToBoolTrue {
+ Value value() {
+ return Value(5LL);
+ }
+};
+
+/** Coerce 0.0 to bool. */
+class ZeroDoubleToBool : public ToBoolFalse {
+ Value value() {
+ return Value(0);
+ }
+};
+
+/** Coerce -1.3 to bool. */
+class NonZeroDoubleToBool : public ToBoolTrue {
+ Value value() {
+ return Value(-1.3);
+ }
+};
+
+/** Coerce "" to bool. */
+class StringToBool : public ToBoolTrue {
+ Value value() {
+ return Value("");
+ }
+};
+
+/** Coerce {} to bool. */
+class ObjectToBool : public ToBoolTrue {
+ Value value() {
+ return Value(mongo::Document());
+ }
+};
+
+/** Coerce [] to bool. */
+class ArrayToBool : public ToBoolTrue {
+ Value value() {
+ return Value(vector<Value>());
+ }
+};
+
+/** Coerce Date(0) to bool. */
+class DateToBool : public ToBoolTrue {
+ Value value() {
+ return Value(Date_t{});
+ }
+};
+
+/** Coerce js literal regex to bool. */
+class RegexToBool : public ToBoolTrue {
+ Value value() {
+ return fromBson(fromjson("{''://}"));
+ }
+};
+
+/** Coerce true to bool. */
+class TrueToBool : public ToBoolTrue {
+ Value value() {
+ return fromBson(BSON("" << true));
+ }
+};
+
+/** Coerce false to bool. */
+class FalseToBool : public ToBoolFalse {
+ Value value() {
+ return fromBson(BSON("" << false));
+ }
+};
+
+/** Coerce null to bool. */
+class NullToBool : public ToBoolFalse {
+ Value value() {
+ return Value(BSONNULL);
+ }
+};
+
+/** Coerce undefined to bool. */
+class UndefinedToBool : public ToBoolFalse {
+ Value value() {
+ return Value(BSONUndefined);
+ }
+};
+
+class ToIntBase {
+public:
+ virtual ~ToIntBase() {}
+ void run() {
+ if (asserts())
+ ASSERT_THROWS(value().coerceToInt(), UserException);
+ else
+ ASSERT_EQUALS(expected(), value().coerceToInt());
+ }
+
+protected:
+ virtual Value value() = 0;
+ virtual int expected() {
+ return 0;
+ }
+ virtual bool asserts() {
+ return false;
+ }
+};
+
+/** Coerce -5 to int. */
+class IntToInt : public ToIntBase {
+ Value value() {
+ return Value(-5);
+ }
+ int expected() {
+ return -5;
+ }
+};
+
+/** Coerce long to int. */
+class LongToInt : public ToIntBase {
+ Value value() {
+ return Value(0xff00000007LL);
+ }
+ int expected() {
+ return 7;
+ }
+};
+
+/** Coerce 9.8 to int. */
+class DoubleToInt : public ToIntBase {
+ Value value() {
+ return Value(9.8);
+ }
+ int expected() {
+ return 9;
+ }
+};
+
+/** Coerce null to int. */
+class NullToInt : public ToIntBase {
+ Value value() {
+ return Value(BSONNULL);
+ }
+ bool asserts() {
+ return true;
+ }
+};
+
+/** Coerce undefined to int. */
+class UndefinedToInt : public ToIntBase {
+ Value value() {
+ return Value(BSONUndefined);
+ }
+ bool asserts() {
+ return true;
+ }
+};
+
+/** Coerce "" to int unsupported. */
+class StringToInt {
+public:
+ void run() {
+ ASSERT_THROWS(Value("").coerceToInt(), UserException);
+ }
+};
+
+class ToLongBase {
+public:
+ virtual ~ToLongBase() {}
+ void run() {
+ if (asserts())
+ ASSERT_THROWS(value().coerceToLong(), UserException);
+ else
+ ASSERT_EQUALS(expected(), value().coerceToLong());
+ }
+
+protected:
+ virtual Value value() = 0;
+ virtual long long expected() {
+ return 0;
+ }
+ virtual bool asserts() {
+ return false;
+ }
+};
+
+/** Coerce -5 to long. */
+class IntToLong : public ToLongBase {
+ Value value() {
+ return Value(-5);
+ }
+ long long expected() {
+ return -5;
+ }
+};
+
+/** Coerce long to long. */
+class LongToLong : public ToLongBase {
+ Value value() {
+ return Value(0xff00000007LL);
+ }
+ long long expected() {
+ return 0xff00000007LL;
+ }
+};
+
+/** Coerce 9.8 to long. */
+class DoubleToLong : public ToLongBase {
+ Value value() {
+ return Value(9.8);
+ }
+ long long expected() {
+ return 9;
+ }
+};
+
+/** Coerce null to long. */
+class NullToLong : public ToLongBase {
+ Value value() {
+ return Value(BSONNULL);
+ }
+ bool asserts() {
+ return true;
+ }
+};
+
+/** Coerce undefined to long. */
+class UndefinedToLong : public ToLongBase {
+ Value value() {
+ return Value(BSONUndefined);
+ }
+ bool asserts() {
+ return true;
+ }
+};
+
+/** Coerce string to long unsupported. */
+class StringToLong {
+public:
+ void run() {
+ ASSERT_THROWS(Value("").coerceToLong(), UserException);
+ }
+};
+
+class ToDoubleBase {
+public:
+ virtual ~ToDoubleBase() {}
+ void run() {
+ if (asserts())
+ ASSERT_THROWS(value().coerceToDouble(), UserException);
+ else
+ ASSERT_EQUALS(expected(), value().coerceToDouble());
+ }
+
+protected:
+ virtual Value value() = 0;
+ virtual double expected() {
+ return 0;
+ }
+ virtual bool asserts() {
+ return false;
+ }
+};
+
+/** Coerce -5 to double. */
+class IntToDouble : public ToDoubleBase {
+ Value value() {
+ return Value(-5);
+ }
+ double expected() {
+ return -5;
+ }
+};
+
+/** Coerce long to double. */
+class LongToDouble : public ToDoubleBase {
+ Value value() {
+ // A long that cannot be exactly represented as a double.
+ return Value(static_cast<double>(0x8fffffffffffffffLL));
+ }
+ double expected() {
+ return static_cast<double>(0x8fffffffffffffffLL);
+ }
+};
+
+/** Coerce double to double. */
+class DoubleToDouble : public ToDoubleBase {
+ Value value() {
+ return Value(9.8);
+ }
+ double expected() {
+ return 9.8;
+ }
+};
+
+/** Coerce null to double. */
+class NullToDouble : public ToDoubleBase {
+ Value value() {
+ return Value(BSONNULL);
+ }
+ bool asserts() {
+ return true;
+ }
+};
+
+/** Coerce undefined to double. */
+class UndefinedToDouble : public ToDoubleBase {
+ Value value() {
+ return Value(BSONUndefined);
+ }
+ bool asserts() {
+ return true;
+ }
+};
+
+/** Coerce string to double unsupported. */
+class StringToDouble {
+public:
+ void run() {
+ ASSERT_THROWS(Value("").coerceToDouble(), UserException);
+ }
+};
+
+class ToDateBase {
+public:
+ virtual ~ToDateBase() {}
+ void run() {
+ ASSERT_EQUALS(expected(), value().coerceToDate());
+ }
+
+protected:
+ virtual Value value() = 0;
+ virtual long long expected() = 0;
+};
+
+/** Coerce date to date. */
+class DateToDate : public ToDateBase {
+ Value value() {
+ return Value(Date_t::fromMillisSinceEpoch(888));
+ }
+ long long expected() {
+ return 888;
+ }
+};
+
+/**
+ * Convert timestamp to date. This extracts the time portion of the timestamp, which
+ * is different from BSON behavior of interpreting all bytes as a date.
+ */
+class TimestampToDate : public ToDateBase {
+ Value value() {
+ return Value(Timestamp(777, 666));
+ }
+ long long expected() {
+ return 777 * 1000;
+ }
+};
+
+/** Coerce string to date unsupported. */
+class StringToDate {
+public:
+ void run() {
+ ASSERT_THROWS(Value("").coerceToDate(), UserException);
+ }
+};
+
+class ToStringBase {
+public:
+ virtual ~ToStringBase() {}
+ void run() {
+ ASSERT_EQUALS(expected(), value().coerceToString());
+ }
+
+protected:
+ virtual Value value() = 0;
+ virtual string expected() {
+ return "";
+ }
+};
+
+/** Coerce -0.2 to string. */
+class DoubleToString : public ToStringBase {
+ Value value() {
+ return Value(-0.2);
+ }
+ string expected() {
+ return "-0.2";
+ }
+};
+
+/** Coerce -4 to string. */
+class IntToString : public ToStringBase {
+ Value value() {
+ return Value(-4);
+ }
+ string expected() {
+ return "-4";
+ }
+};
+
+/** Coerce 10000LL to string. */
+class LongToString : public ToStringBase {
+ Value value() {
+ return Value(10000LL);
+ }
+ string expected() {
+ return "10000";
+ }
+};
+
+/** Coerce string to string. */
+class StringToString : public ToStringBase {
+ Value value() {
+ return Value("fO_o");
+ }
+ string expected() {
+ return "fO_o";
+ }
+};
+
+/** Coerce timestamp to string. */
+class TimestampToString : public ToStringBase {
+ Value value() {
+ return Value(Timestamp(1, 2));
+ }
+ string expected() {
+ return Timestamp(1, 2).toStringPretty();
+ }
+};
+
+/** Coerce date to string. */
+class DateToString : public ToStringBase {
+ Value value() {
+ return Value(Date_t::fromMillisSinceEpoch(1234567890LL * 1000));
+ }
+ string expected() {
+ return "2009-02-13T23:31:30";
+ } // from js
+};
+
+/** Coerce null to string. */
+class NullToString : public ToStringBase {
+ Value value() {
+ return Value(BSONNULL);
+ }
+};
+
+/** Coerce undefined to string. */
+class UndefinedToString : public ToStringBase {
+ Value value() {
+ return Value(BSONUndefined);
+ }
+};
+
+/** Coerce document to string unsupported. */
+class DocumentToString {
+public:
+ void run() {
+ ASSERT_THROWS(Value(mongo::Document()).coerceToString(), UserException);
+ }
+};
+
+/** Coerce timestamp to timestamp. */
+class TimestampToTimestamp {
+public:
+ void run() {
+ Value value = Value(Timestamp(1010));
+ ASSERT(Timestamp(1010) == value.coerceToTimestamp());
+ }
+};
+
+/** Coerce date to timestamp unsupported. */
+class DateToTimestamp {
+public:
+ void run() {
+ ASSERT_THROWS(Value(Date_t::fromMillisSinceEpoch(1010)).coerceToTimestamp(), UserException);
+ }
+};
+
+} // namespace Coerce
+
+/** Get the "widest" of two numeric types. */
+class GetWidestNumeric {
+public:
+ void run() {
+ using mongo::Undefined;
+
+ // Numeric types.
+ assertWidest(NumberInt, NumberInt, NumberInt);
+ assertWidest(NumberLong, NumberInt, NumberLong);
+ assertWidest(NumberDouble, NumberInt, NumberDouble);
+ assertWidest(NumberLong, NumberLong, NumberLong);
+ assertWidest(NumberDouble, NumberLong, NumberDouble);
+ assertWidest(NumberDouble, NumberDouble, NumberDouble);
+
+ // Missing value and numeric types (result Undefined).
+ assertWidest(Undefined, NumberInt, Undefined);
+ assertWidest(Undefined, NumberInt, Undefined);
+ assertWidest(Undefined, NumberLong, jstNULL);
+ assertWidest(Undefined, NumberLong, Undefined);
+ assertWidest(Undefined, NumberDouble, jstNULL);
+ assertWidest(Undefined, NumberDouble, Undefined);
+
+ // Missing value types (result Undefined).
+ assertWidest(Undefined, jstNULL, jstNULL);
+ assertWidest(Undefined, jstNULL, Undefined);
+ assertWidest(Undefined, Undefined, Undefined);
+
+ // Other types (result Undefined).
+ assertWidest(Undefined, NumberInt, mongo::Bool);
+ assertWidest(Undefined, mongo::String, NumberDouble);
+ }
+
+private:
+ void assertWidest(BSONType expectedWidest, BSONType a, BSONType b) {
+ ASSERT_EQUALS(expectedWidest, Value::getWidestNumeric(a, b));
+ ASSERT_EQUALS(expectedWidest, Value::getWidestNumeric(b, a));
+ }
+};
+
+/** Add a Value to a BSONObj. */
+class AddToBsonObj {
+public:
+ void run() {
+ BSONObjBuilder bob;
+ Value(4.4).addToBsonObj(&bob, "a");
+ Value(22).addToBsonObj(&bob, "b");
+ Value("astring").addToBsonObj(&bob, "c");
+ ASSERT_EQUALS(BSON("a" << 4.4 << "b" << 22 << "c"
+ << "astring"),
+ bob.obj());
+ }
+};
+
+/** Add a Value to a BSONArray. */
+class AddToBsonArray {
+public:
+ void run() {
+ BSONArrayBuilder bab;
+ Value(4.4).addToBsonArray(&bab);
+ Value(22).addToBsonArray(&bab);
+ Value("astring").addToBsonArray(&bab);
+ ASSERT_EQUALS(BSON_ARRAY(4.4 << 22 << "astring"), bab.arr());
+ }
+};
+
+/** Value comparator. */
+class Compare {
+public:
+ void run() {
+ BSONObjBuilder undefinedBuilder;
+ undefinedBuilder.appendUndefined("");
+ BSONObj undefined = undefinedBuilder.obj();
+
+ // Undefined / null.
+ assertComparison(0, undefined, undefined);
+ assertComparison(-1, undefined, BSON("" << BSONNULL));
+ assertComparison(0, BSON("" << BSONNULL), BSON("" << BSONNULL));
+
+ // Undefined / null with other types.
+ assertComparison(-1, undefined, BSON("" << 1));
+ assertComparison(-1,
+ undefined,
+ BSON(""
+ << "bar"));
+ assertComparison(-1, BSON("" << BSONNULL), BSON("" << -1));
+ assertComparison(-1,
+ BSON("" << BSONNULL),
+ BSON(""
+ << "bar"));
+
+ // Numeric types.
+ assertComparison(0, 5, 5LL);
+ assertComparison(0, -2, -2.0);
+ assertComparison(0, 90LL, 90.0);
+ assertComparison(-1, 5, 6LL);
+ assertComparison(-1, -2, 2.1);
+ assertComparison(1, 90LL, 89.999);
+ assertComparison(-1, 90, 90.1);
+ assertComparison(
+ 0, numeric_limits<double>::quiet_NaN(), numeric_limits<double>::signaling_NaN());
+ assertComparison(-1, numeric_limits<double>::quiet_NaN(), 5);
+
+ // strings compare between numbers and objects
+ assertComparison(1, "abc", 90);
+ assertComparison(-1,
+ "abc",
+ BSON("a"
+ << "b"));
+
+ // String comparison.
+ assertComparison(-1, "", "a");
+ assertComparison(0, "a", "a");
+ assertComparison(-1, "a", "b");
+ assertComparison(-1, "aa", "b");
+ assertComparison(1, "bb", "b");
+ assertComparison(1, "bb", "b");
+ assertComparison(1, "b-", "b");
+ assertComparison(-1, "b-", "ba");
+ // With a null character.
+ assertComparison(1, string("a\0", 2), "a");
+
+ // Object.
+ assertComparison(0, fromjson("{'':{}}"), fromjson("{'':{}}"));
+ assertComparison(0, fromjson("{'':{x:1}}"), fromjson("{'':{x:1}}"));
+ assertComparison(-1, fromjson("{'':{}}"), fromjson("{'':{x:1}}"));
+ assertComparison(-1, fromjson("{'':{'z': 1}}"), fromjson("{'':{'a': 'a'}}"));
+
+ // Array.
+ assertComparison(0, fromjson("{'':[]}"), fromjson("{'':[]}"));
+ assertComparison(-1, fromjson("{'':[0]}"), fromjson("{'':[1]}"));
+ assertComparison(-1, fromjson("{'':[0,0]}"), fromjson("{'':[1]}"));
+ assertComparison(-1, fromjson("{'':[0]}"), fromjson("{'':[0,0]}"));
+ assertComparison(-1, fromjson("{'':[0]}"), fromjson("{'':['']}"));
+
+ // OID.
+ assertComparison(0, OID("abcdefabcdefabcdefabcdef"), OID("abcdefabcdefabcdefabcdef"));
+ assertComparison(1, OID("abcdefabcdefabcdefabcdef"), OID("010101010101010101010101"));
+
+ // Bool.
+ assertComparison(0, true, true);
+ assertComparison(0, false, false);
+ assertComparison(1, true, false);
+
+ // Date.
+ assertComparison(0, Date_t::fromMillisSinceEpoch(555), Date_t::fromMillisSinceEpoch(555));
+ assertComparison(1, Date_t::fromMillisSinceEpoch(555), Date_t::fromMillisSinceEpoch(554));
+ // Negative date.
+ assertComparison(1, Date_t::fromMillisSinceEpoch(0), Date_t::fromMillisSinceEpoch(-1));
+
+ // Regex.
+ assertComparison(0, fromjson("{'':/a/}"), fromjson("{'':/a/}"));
+ assertComparison(-1, fromjson("{'':/a/}"), fromjson("{'':/a/i}"));
+ assertComparison(-1, fromjson("{'':/a/}"), fromjson("{'':/aa/}"));
+
+ // Timestamp.
+ assertComparison(0, Timestamp(1234), Timestamp(1234));
+ assertComparison(-1, Timestamp(4), Timestamp(1234));
+
+ // Cross-type comparisons. Listed in order of canonical types.
+ assertComparison(-1, Value(mongo::MINKEY), Value());
+ assertComparison(0, Value(), Value());
+ assertComparison(0, Value(), Value(BSONUndefined));
+ assertComparison(-1, Value(BSONUndefined), Value(BSONNULL));
+ assertComparison(-1, Value(BSONNULL), Value(1));
+ assertComparison(0, Value(1), Value(1LL));
+ assertComparison(0, Value(1), Value(1.0));
+ assertComparison(-1, Value(1), Value("string"));
+ assertComparison(0, Value("string"), Value(BSONSymbol("string")));
+ assertComparison(-1, Value("string"), Value(mongo::Document()));
+ assertComparison(-1, Value(mongo::Document()), Value(vector<Value>()));
+ assertComparison(-1, Value(vector<Value>()), Value(BSONBinData("", 0, MD5Type)));
+ assertComparison(-1, Value(BSONBinData("", 0, MD5Type)), Value(mongo::OID()));
+ assertComparison(-1, Value(mongo::OID()), Value(false));
+ assertComparison(-1, Value(false), Value(Date_t()));
+ assertComparison(-1, Value(Date_t()), Value(Timestamp()));
+ assertComparison(-1, Value(Timestamp()), Value(BSONRegEx("")));
+ assertComparison(-1, Value(BSONRegEx("")), Value(BSONDBRef("", mongo::OID())));
+ assertComparison(-1, Value(BSONDBRef("", mongo::OID())), Value(BSONCode("")));
+ assertComparison(-1, Value(BSONCode("")), Value(BSONCodeWScope("", BSONObj())));
+ assertComparison(-1, Value(BSONCodeWScope("", BSONObj())), Value(mongo::MAXKEY));
+ }
+
+private:
+ template <class T, class U>
+ void assertComparison(int expectedResult, const T& a, const U& b) {
+ assertComparison(expectedResult, BSON("" << a), BSON("" << b));
+ }
+ void assertComparison(int expectedResult, const Timestamp& a, const Timestamp& b) {
+ BSONObjBuilder first;
+ first.append("", a);
+ BSONObjBuilder second;
+ second.append("", b);
+ assertComparison(expectedResult, first.obj(), second.obj());
+ }
+ int sign(int cmp) {
+ if (cmp == 0)
+ return 0;
+ else if (cmp < 0)
+ return -1;
+ else
+ return 1;
+ }
+ int cmp(const Value& a, const Value& b) {
+ return sign(Value::compare(a, b));
+ }
+ void assertComparison(int expectedResult, const BSONObj& a, const BSONObj& b) {
+ assertComparison(expectedResult, fromBson(a), fromBson(b));
+ }
+ void assertComparison(int expectedResult, const Value& a, const Value& b) {
+ mongo::unittest::log() << "testing " << a.toString() << " and " << b.toString() << endl;
+ // reflexivity
+ ASSERT_EQUALS(0, cmp(a, a));
+ ASSERT_EQUALS(0, cmp(b, b));
+
+ // symmetry
+ ASSERT_EQUALS(expectedResult, cmp(a, b));
+ ASSERT_EQUALS(-expectedResult, cmp(b, a));
+
+ if (expectedResult == 0) {
+ // equal values must hash equally.
+ ASSERT_EQUALS(hash(a), hash(b));
+ } else {
+ // unequal values must hash unequally.
+ // (not true in general but we should error if it fails in any of these cases)
+ ASSERT_NOT_EQUALS(hash(a), hash(b));
}
- };
- SuiteInstance<All> myall;
+ // same as BSON
+ ASSERT_EQUALS(expectedResult,
+ sign(toBson(a).firstElement().woCompare(toBson(b).firstElement())));
+ }
+ size_t hash(const Value& v) {
+ size_t seed = 0xf00ba6;
+ v.hash_combine(seed);
+ return seed;
+ }
+};
+
+class SubFields {
+public:
+ void run() {
+ const Value val = fromBson(fromjson("{'': {a: [{x:1, b:[1, {y:1, c:1234, z:1}, 1]}]}}"));
+ // ^ this outer object is removed by fromBson
+
+ ASSERT(val.getType() == mongo::Object);
+
+ ASSERT(val[999].missing());
+ ASSERT(val["missing"].missing());
+ ASSERT(val["a"].getType() == mongo::Array);
+
+ ASSERT(val["a"][999].missing());
+ ASSERT(val["a"]["missing"].missing());
+ ASSERT(val["a"][0].getType() == mongo::Object);
+
+ ASSERT(val["a"][0][999].missing());
+ ASSERT(val["a"][0]["missing"].missing());
+ ASSERT(val["a"][0]["b"].getType() == mongo::Array);
+
+ ASSERT(val["a"][0]["b"][999].missing());
+ ASSERT(val["a"][0]["b"]["missing"].missing());
+ ASSERT(val["a"][0]["b"][1].getType() == mongo::Object);
+
+ ASSERT(val["a"][0]["b"][1][999].missing());
+ ASSERT(val["a"][0]["b"][1]["missing"].missing());
+ ASSERT(val["a"][0]["b"][1]["c"].getType() == mongo::NumberInt);
+ ASSERT_EQUALS(val["a"][0]["b"][1]["c"].getInt(), 1234);
+ }
+};
+
+
+class SerializationOfMissingForSorter {
+ // Can't be tested in AllTypesDoc since missing values are omitted when adding to BSON.
+public:
+ void run() {
+ const Value missing;
+ const Value arrayOfMissing = Value(vector<Value>(10));
+
+ BufBuilder bb;
+ missing.serializeForSorter(bb);
+ arrayOfMissing.serializeForSorter(bb);
+
+ BufReader reader(bb.buf(), bb.len());
+ ASSERT_EQUALS(missing,
+ Value::deserializeForSorter(reader, Value::SorterDeserializeSettings()));
+ ASSERT_EQUALS(arrayOfMissing,
+ Value::deserializeForSorter(reader, Value::SorterDeserializeSettings()));
+ }
+};
+} // namespace Value
+
+class All : public Suite {
+public:
+ All() : Suite("document") {}
+ void setupTests() {
+ add<Document::Create>();
+ add<Document::CreateFromBsonObj>();
+ add<Document::AddField>();
+ add<Document::GetValue>();
+ add<Document::SetField>();
+ add<Document::Compare>();
+ add<Document::Clone>();
+ add<Document::CloneMultipleFields>();
+ add<Document::FieldIteratorEmpty>();
+ add<Document::FieldIteratorSingle>();
+ add<Document::FieldIteratorMultiple>();
+ add<Document::AllTypesDoc>();
+
+ add<Value::BSONArrayTest>();
+ add<Value::Int>();
+ add<Value::Long>();
+ add<Value::Double>();
+ add<Value::String>();
+ add<Value::StringWithNull>();
+ add<Value::Date>();
+ add<Value::JSTimestamp>();
+ add<Value::EmptyDocument>();
+ add<Value::EmptyArray>();
+ add<Value::Array>();
+ add<Value::Oid>();
+ add<Value::Bool>();
+ add<Value::Regex>();
+ add<Value::Symbol>();
+ add<Value::Undefined>();
+ add<Value::Null>();
+ add<Value::True>();
+ add<Value::False>();
+ add<Value::MinusOne>();
+ add<Value::Zero>();
+ add<Value::One>();
+
+ add<Value::Coerce::ZeroIntToBool>();
+ add<Value::Coerce::NonZeroIntToBool>();
+ add<Value::Coerce::ZeroLongToBool>();
+ add<Value::Coerce::NonZeroLongToBool>();
+ add<Value::Coerce::ZeroDoubleToBool>();
+ add<Value::Coerce::NonZeroDoubleToBool>();
+ add<Value::Coerce::StringToBool>();
+ add<Value::Coerce::ObjectToBool>();
+ add<Value::Coerce::ArrayToBool>();
+ add<Value::Coerce::DateToBool>();
+ add<Value::Coerce::RegexToBool>();
+ add<Value::Coerce::TrueToBool>();
+ add<Value::Coerce::FalseToBool>();
+ add<Value::Coerce::NullToBool>();
+ add<Value::Coerce::UndefinedToBool>();
+ add<Value::Coerce::IntToInt>();
+ add<Value::Coerce::LongToInt>();
+ add<Value::Coerce::DoubleToInt>();
+ add<Value::Coerce::NullToInt>();
+ add<Value::Coerce::UndefinedToInt>();
+ add<Value::Coerce::StringToInt>();
+ add<Value::Coerce::IntToLong>();
+ add<Value::Coerce::LongToLong>();
+ add<Value::Coerce::DoubleToLong>();
+ add<Value::Coerce::NullToLong>();
+ add<Value::Coerce::UndefinedToLong>();
+ add<Value::Coerce::StringToLong>();
+ add<Value::Coerce::IntToDouble>();
+ add<Value::Coerce::LongToDouble>();
+ add<Value::Coerce::DoubleToDouble>();
+ add<Value::Coerce::NullToDouble>();
+ add<Value::Coerce::UndefinedToDouble>();
+ add<Value::Coerce::StringToDouble>();
+ add<Value::Coerce::DateToDate>();
+ add<Value::Coerce::TimestampToDate>();
+ add<Value::Coerce::StringToDate>();
+ add<Value::Coerce::DoubleToString>();
+ add<Value::Coerce::IntToString>();
+ add<Value::Coerce::LongToString>();
+ add<Value::Coerce::StringToString>();
+ add<Value::Coerce::TimestampToString>();
+ add<Value::Coerce::DateToString>();
+ add<Value::Coerce::NullToString>();
+ add<Value::Coerce::UndefinedToString>();
+ add<Value::Coerce::DocumentToString>();
+ add<Value::Coerce::TimestampToTimestamp>();
+ add<Value::Coerce::DateToTimestamp>();
+
+ add<Value::GetWidestNumeric>();
+ add<Value::AddToBsonObj>();
+ add<Value::AddToBsonArray>();
+ add<Value::Compare>();
+ add<Value::SubFields>();
+ add<Value::SerializationOfMissingForSorter>();
+ }
+};
+
+SuiteInstance<All> myall;
-} // namespace DocumentTests
+} // namespace DocumentTests
diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp
index 34cbda702a3..8db17f3152a 100644
--- a/src/mongo/db/pipeline/expression.cpp
+++ b/src/mongo/db/pipeline/expression.cpp
@@ -31,7 +31,7 @@
#include "mongo/db/pipeline/expression.h"
#include <boost/algorithm/string.hpp>
-#include <boost/preprocessor/cat.hpp> // like the ## operator but works with __LINE__
+#include <boost/preprocessor/cat.hpp> // like the ## operator but works with __LINE__
#include <cstdio>
#include "mongo/base/init.h"
@@ -44,268 +44,266 @@
#include "mongo/util/mongoutils/str.h"
namespace mongo {
- using namespace mongoutils;
+using namespace mongoutils;
- using boost::intrusive_ptr;
- using std::set;
- using std::string;
- using std::vector;
+using boost::intrusive_ptr;
+using std::set;
+using std::string;
+using std::vector;
- /// Helper function to easily wrap constants with $const.
- static Value serializeConstant(Value val) {
- return Value(DOC("$const" << val));
- }
-
- void Variables::uassertValidNameForUserWrite(StringData varName) {
- // System variables users allowed to write to (currently just one)
- if (varName == "CURRENT") {
- return;
- }
-
- uassert(16866, "empty variable names are not allowed",
- !varName.empty());
-
- const bool firstCharIsValid = (varName[0] >= 'a' && varName[0] <= 'z')
- || (varName[0] & '\x80') // non-ascii
- ;
-
- uassert(16867, str::stream() <<
- "'" << varName << "' starts with an invalid character for a user variable name",
- firstCharIsValid);
-
- for (size_t i = 1; i < varName.size(); i++) {
- const bool charIsValid = (varName[i] >= 'a' && varName[i] <= 'z')
- || (varName[i] >= 'A' && varName[i] <= 'Z')
- || (varName[i] >= '0' && varName[i] <= '9')
- || (varName[i] == '_')
- || (varName[i] & '\x80') // non-ascii
- ;
+/// Helper function to easily wrap constants with $const.
+static Value serializeConstant(Value val) {
+ return Value(DOC("$const" << val));
+}
- uassert(16868, str::stream() << "'" << varName << "' contains an invalid character "
- << "for a variable name: '" << varName[i] << "'",
- charIsValid);
- }
+void Variables::uassertValidNameForUserWrite(StringData varName) {
+ // System variables users allowed to write to (currently just one)
+ if (varName == "CURRENT") {
+ return;
}
- void Variables::uassertValidNameForUserRead(StringData varName) {
- uassert(16869, "empty variable names are not allowed",
- !varName.empty());
+ uassert(16866, "empty variable names are not allowed", !varName.empty());
- const bool firstCharIsValid = (varName[0] >= 'a' && varName[0] <= 'z')
- || (varName[0] >= 'A' && varName[0] <= 'Z')
- || (varName[0] & '\x80') // non-ascii
- ;
+ const bool firstCharIsValid =
+ (varName[0] >= 'a' && varName[0] <= 'z') || (varName[0] & '\x80') // non-ascii
+ ;
- uassert(16870, str::stream() <<
- "'" << varName << "' starts with an invalid character for a variable name",
- firstCharIsValid);
+ uassert(16867,
+ str::stream() << "'" << varName
+ << "' starts with an invalid character for a user variable name",
+ firstCharIsValid);
- for (size_t i = 1; i < varName.size(); i++) {
- const bool charIsValid = (varName[i] >= 'a' && varName[i] <= 'z')
- || (varName[i] >= 'A' && varName[i] <= 'Z')
- || (varName[i] >= '0' && varName[i] <= '9')
- || (varName[i] == '_')
- || (varName[i] & '\x80') // non-ascii
- ;
+ for (size_t i = 1; i < varName.size(); i++) {
+ const bool charIsValid = (varName[i] >= 'a' && varName[i] <= 'z') ||
+ (varName[i] >= 'A' && varName[i] <= 'Z') || (varName[i] >= '0' && varName[i] <= '9') ||
+ (varName[i] == '_') || (varName[i] & '\x80') // non-ascii
+ ;
- uassert(16871, str::stream() << "'" << varName << "' contains an invalid character "
- << "for a variable name: '" << varName[i] << "'",
- charIsValid);
- }
- }
-
- void Variables::setValue(Id id, const Value& value) {
- massert(17199, "can't use Variables::setValue to set ROOT",
- id != ROOT_ID);
-
- verify(id < _numVars);
- _rest[id] = value;
+ uassert(16868,
+ str::stream() << "'" << varName << "' contains an invalid character "
+ << "for a variable name: '" << varName[i] << "'",
+ charIsValid);
}
+}
- Value Variables::getValue(Id id) const {
- if (id == ROOT_ID)
- return Value(_root);
+void Variables::uassertValidNameForUserRead(StringData varName) {
+ uassert(16869, "empty variable names are not allowed", !varName.empty());
- verify(id < _numVars);
- return _rest[id];
- }
+ const bool firstCharIsValid = (varName[0] >= 'a' && varName[0] <= 'z') ||
+ (varName[0] >= 'A' && varName[0] <= 'Z') || (varName[0] & '\x80') // non-ascii
+ ;
- Document Variables::getDocument(Id id) const {
- if (id == ROOT_ID)
- return _root;
+ uassert(16870,
+ str::stream() << "'" << varName
+ << "' starts with an invalid character for a variable name",
+ firstCharIsValid);
- verify(id < _numVars);
- const Value var = _rest[id];
- if (var.getType() == Object)
- return var.getDocument();
+ for (size_t i = 1; i < varName.size(); i++) {
+ const bool charIsValid = (varName[i] >= 'a' && varName[i] <= 'z') ||
+ (varName[i] >= 'A' && varName[i] <= 'Z') || (varName[i] >= '0' && varName[i] <= '9') ||
+ (varName[i] == '_') || (varName[i] & '\x80') // non-ascii
+ ;
- return Document();
+ uassert(16871,
+ str::stream() << "'" << varName << "' contains an invalid character "
+ << "for a variable name: '" << varName[i] << "'",
+ charIsValid);
}
+}
- Variables::Id VariablesParseState::defineVariable(StringData name) {
- // caller should have validated before hand by using Variables::uassertValidNameForUserWrite
- massert(17275, "Can't redefine ROOT",
- name != "ROOT");
+void Variables::setValue(Id id, const Value& value) {
+ massert(17199, "can't use Variables::setValue to set ROOT", id != ROOT_ID);
- Variables::Id id = _idGenerator->generateId();
- _variables[name] = id;
- return id;
- }
-
- Variables::Id VariablesParseState::getVariable(StringData name) const {
- StringMap<Variables::Id>::const_iterator it = _variables.find(name);
- if (it != _variables.end())
- return it->second;
+ verify(id < _numVars);
+ _rest[id] = value;
+}
- uassert(17276, str::stream() << "Use of undefined variable: " << name,
- name == "ROOT" || name == "CURRENT");
+Value Variables::getValue(Id id) const {
+ if (id == ROOT_ID)
+ return Value(_root);
- return Variables::ROOT_ID;
- }
+ verify(id < _numVars);
+ return _rest[id];
+}
- /* --------------------------- Expression ------------------------------ */
+Document Variables::getDocument(Id id) const {
+ if (id == ROOT_ID)
+ return _root;
- Expression::ObjectCtx::ObjectCtx(int theOptions)
- : options(theOptions)
- {}
+ verify(id < _numVars);
+ const Value var = _rest[id];
+ if (var.getType() == Object)
+ return var.getDocument();
- bool Expression::ObjectCtx::documentOk() const {
- return ((options & DOCUMENT_OK) != 0);
- }
+ return Document();
+}
- bool Expression::ObjectCtx::topLevel() const {
- return ((options & TOP_LEVEL) != 0);
- }
+Variables::Id VariablesParseState::defineVariable(StringData name) {
+ // caller should have validated before hand by using Variables::uassertValidNameForUserWrite
+ massert(17275, "Can't redefine ROOT", name != "ROOT");
- bool Expression::ObjectCtx::inclusionOk() const {
- return ((options & INCLUSION_OK) != 0);
- }
+ Variables::Id id = _idGenerator->generateId();
+ _variables[name] = id;
+ return id;
+}
- string Expression::removeFieldPrefix(const string &prefixedField) {
- uassert(16419, str::stream()<<"field path must not contain embedded null characters" << prefixedField.find("\0") << "," ,
- prefixedField.find('\0') == string::npos);
+Variables::Id VariablesParseState::getVariable(StringData name) const {
+ StringMap<Variables::Id>::const_iterator it = _variables.find(name);
+ if (it != _variables.end())
+ return it->second;
- const char* pPrefixedField = prefixedField.c_str();
- uassert(15982, str::stream() <<
- "field path references must be prefixed with a '$' ('" <<
- prefixedField << "'", pPrefixedField[0] == '$');
+ uassert(17276,
+ str::stream() << "Use of undefined variable: " << name,
+ name == "ROOT" || name == "CURRENT");
- return string(pPrefixedField + 1);
- }
+ return Variables::ROOT_ID;
+}
- intrusive_ptr<Expression> Expression::parseObject(
- BSONObj obj,
- ObjectCtx* pCtx,
- const VariablesParseState& vps) {
- /*
- An object expression can take any of the following forms:
+/* --------------------------- Expression ------------------------------ */
- f0: {f1: ..., f2: ..., f3: ...}
- f0: {$operator:[operand1, operand2, ...]}
- */
+Expression::ObjectCtx::ObjectCtx(int theOptions) : options(theOptions) {}
- intrusive_ptr<Expression> pExpression; // the result
- intrusive_ptr<ExpressionObject> pExpressionObject; // alt result
- enum { UNKNOWN, NOTOPERATOR, OPERATOR } kind = UNKNOWN;
+bool Expression::ObjectCtx::documentOk() const {
+ return ((options & DOCUMENT_OK) != 0);
+}
- if (obj.isEmpty())
- return ExpressionObject::create();
- BSONObjIterator iter(obj);
+bool Expression::ObjectCtx::topLevel() const {
+ return ((options & TOP_LEVEL) != 0);
+}
- for(size_t fieldCount = 0; iter.more(); ++fieldCount) {
- BSONElement fieldElement(iter.next());
- const char* pFieldName = fieldElement.fieldName();
+bool Expression::ObjectCtx::inclusionOk() const {
+ return ((options & INCLUSION_OK) != 0);
+}
- if (pFieldName[0] == '$') {
- uassert(15983, str::stream() <<
- "the operator must be the only field in a pipeline object (at '"
- << pFieldName << "'",
- fieldCount == 0);
+string Expression::removeFieldPrefix(const string& prefixedField) {
+ uassert(16419,
+ str::stream() << "field path must not contain embedded null characters"
+ << prefixedField.find("\0") << ",",
+ prefixedField.find('\0') == string::npos);
- uassert(16404, "$expressions are not allowed at the top-level of $project",
- !pCtx->topLevel());
+ const char* pPrefixedField = prefixedField.c_str();
+ uassert(15982,
+ str::stream() << "field path references must be prefixed with a '$' ('" << prefixedField
+ << "'",
+ pPrefixedField[0] == '$');
- /* we've determined this "object" is an operator expression */
- kind = OPERATOR;
+ return string(pPrefixedField + 1);
+}
- pExpression = parseExpression(fieldElement, vps);
+intrusive_ptr<Expression> Expression::parseObject(BSONObj obj,
+ ObjectCtx* pCtx,
+ const VariablesParseState& vps) {
+ /*
+ An object expression can take any of the following forms:
+
+ f0: {f1: ..., f2: ..., f3: ...}
+ f0: {$operator:[operand1, operand2, ...]}
+ */
+
+ intrusive_ptr<Expression> pExpression; // the result
+ intrusive_ptr<ExpressionObject> pExpressionObject; // alt result
+ enum { UNKNOWN, NOTOPERATOR, OPERATOR } kind = UNKNOWN;
+
+ if (obj.isEmpty())
+ return ExpressionObject::create();
+ BSONObjIterator iter(obj);
+
+ for (size_t fieldCount = 0; iter.more(); ++fieldCount) {
+ BSONElement fieldElement(iter.next());
+ const char* pFieldName = fieldElement.fieldName();
+
+ if (pFieldName[0] == '$') {
+ uassert(
+ 15983,
+ str::stream() << "the operator must be the only field in a pipeline object (at '"
+ << pFieldName << "'",
+ fieldCount == 0);
+
+ uassert(16404,
+ "$expressions are not allowed at the top-level of $project",
+ !pCtx->topLevel());
+
+ /* we've determined this "object" is an operator expression */
+ kind = OPERATOR;
+
+ pExpression = parseExpression(fieldElement, vps);
+ } else {
+ uassert(15990,
+ str::stream() << "this object is already an operator expression, and can't be "
+ "used as a document expression (at '" << pFieldName << "')",
+ kind != OPERATOR);
+
+ uassert(16405,
+ "dotted field names are only allowed at the top level",
+ pCtx->topLevel() || !str::contains(pFieldName, '.'));
+
+ /* if it's our first time, create the document expression */
+ if (!pExpression.get()) {
+ verify(pCtx->documentOk());
+ // CW TODO error: document not allowed in this context
+
+ pExpressionObject =
+ pCtx->topLevel() ? ExpressionObject::createRoot() : ExpressionObject::create();
+ pExpression = pExpressionObject;
+
+ /* this "object" is not an operator expression */
+ kind = NOTOPERATOR;
}
- else {
- uassert(15990, str::stream() << "this object is already an operator expression, and can't be used as a document expression (at '" <<
- pFieldName << "')",
- kind != OPERATOR);
- uassert(16405, "dotted field names are only allowed at the top level",
- pCtx->topLevel() || !str::contains(pFieldName, '.'));
+ BSONType fieldType = fieldElement.type();
+ string fieldName(pFieldName);
+ switch (fieldType) {
+ case Object: {
+ /* it's a nested document */
+ ObjectCtx oCtx((pCtx->documentOk() ? ObjectCtx::DOCUMENT_OK : 0) |
+ (pCtx->inclusionOk() ? ObjectCtx::INCLUSION_OK : 0));
- /* if it's our first time, create the document expression */
- if (!pExpression.get()) {
- verify(pCtx->documentOk());
- // CW TODO error: document not allowed in this context
-
- pExpressionObject = pCtx->topLevel() ? ExpressionObject::createRoot()
- : ExpressionObject::create();
- pExpression = pExpressionObject;
-
- /* this "object" is not an operator expression */
- kind = NOTOPERATOR;
+ pExpressionObject->addField(fieldName,
+ parseObject(fieldElement.Obj(), &oCtx, vps));
+ break;
}
-
- BSONType fieldType = fieldElement.type();
- string fieldName(pFieldName);
- switch (fieldType){
- case Object: {
- /* it's a nested document */
- ObjectCtx oCtx(
- (pCtx->documentOk() ? ObjectCtx::DOCUMENT_OK : 0)
- | (pCtx->inclusionOk() ? ObjectCtx::INCLUSION_OK : 0));
-
- pExpressionObject->addField(fieldName,
- parseObject(fieldElement.Obj(), &oCtx, vps));
- break;
- }
- case String: {
- /* it's a renamed field */
- // CW TODO could also be a constant
- pExpressionObject->addField(fieldName,
- ExpressionFieldPath::parse(fieldElement.str(),
- vps));
- break;
- }
- case Bool:
- case NumberDouble:
- case NumberLong:
- case NumberInt: {
- /* it's an inclusion specification */
- if (fieldElement.trueValue()) {
- uassert(16420, "field inclusion is not allowed inside of $expressions",
- pCtx->inclusionOk());
- pExpressionObject->includePath(fieldName);
- }
- else {
- uassert(16406,
- "The top-level _id field is the only field currently supported for exclusion",
- pCtx->topLevel() && fieldName == "_id");
- pExpressionObject->excludeId(true);
- }
- break;
+ case String: {
+ /* it's a renamed field */
+ // CW TODO could also be a constant
+ pExpressionObject->addField(
+ fieldName, ExpressionFieldPath::parse(fieldElement.str(), vps));
+ break;
+ }
+ case Bool:
+ case NumberDouble:
+ case NumberLong:
+ case NumberInt: {
+ /* it's an inclusion specification */
+ if (fieldElement.trueValue()) {
+ uassert(16420,
+ "field inclusion is not allowed inside of $expressions",
+ pCtx->inclusionOk());
+ pExpressionObject->includePath(fieldName);
+ } else {
+ uassert(16406,
+ "The top-level _id field is the only field currently supported for "
+ "exclusion",
+ pCtx->topLevel() && fieldName == "_id");
+ pExpressionObject->excludeId(true);
}
- default:
- uassert(15992, str::stream() <<
- "disallowed field type " << typeName(fieldType) <<
- " in object expression (at '" <<
- fieldName << "')", false);
+ break;
}
+ default:
+ uassert(15992,
+ str::stream() << "disallowed field type " << typeName(fieldType)
+ << " in object expression (at '" << fieldName << "')",
+ false);
}
}
-
- return pExpression;
}
+ return pExpression;
+}
+
namespace {
- typedef stdx::function<intrusive_ptr<Expression>(BSONElement, const VariablesParseState&)>
- ExpressionParser;
- StringMap<ExpressionParser> expressionParserMap;
+typedef stdx::function<intrusive_ptr<Expression>(BSONElement, const VariablesParseState&)>
+ ExpressionParser;
+StringMap<ExpressionParser> expressionParserMap;
}
/** Registers an ExpressionParser so it can be called from parseExpression and friends.
@@ -313,2440 +311,2355 @@ namespace {
* As an example, if your expression looks like {"$foo": [1,2,3]} you would add this line:
* REGISTER_EXPRESSION("$foo", ExpressionFoo::parse);
*/
-#define REGISTER_EXPRESSION(key, parserFunc) \
- MONGO_INITIALIZER(BOOST_PP_CAT(addToExpressionParserMap, __LINE__))(InitializerContext*) { \
- /* prevent duplicate expressions */ \
- StringMap<ExpressionParser>::const_iterator op = expressionParserMap.find(key); \
- massert(17064, str::stream() << "Duplicate expression (" << key << ") detected at " \
- << __FILE__ << ":" << __LINE__, \
- op == expressionParserMap.end()); \
- /* register expression */ \
- expressionParserMap[key] = (parserFunc); \
- return Status::OK(); \
- }
-
- intrusive_ptr<Expression> Expression::parseExpression(
- BSONElement exprElement,
- const VariablesParseState& vps) {
-
- /* look for the specified operator */
- const char* opName = exprElement.fieldName();
- StringMap<ExpressionParser>::const_iterator op = expressionParserMap.find(opName);
- uassert(15999, str::stream() << "invalid operator '" << opName << "'",
- op != expressionParserMap.end());
-
- /* make the expression node */
- return op->second(exprElement, vps);
- }
-
- Expression::ExpressionVector ExpressionNary::parseArguments(
- BSONElement exprElement,
- const VariablesParseState& vps) {
-
- ExpressionVector out;
- if (exprElement.type() == Array) {
- BSONForEach(elem, exprElement.Obj()) {
- out.push_back(Expression::parseOperand(elem, vps));
- }
- }
- else { // assume it's an atomic operand
- out.push_back(Expression::parseOperand(exprElement, vps));
- }
+#define REGISTER_EXPRESSION(key, parserFunc) \
+ MONGO_INITIALIZER(BOOST_PP_CAT(addToExpressionParserMap, __LINE__))(InitializerContext*) { \
+ /* prevent duplicate expressions */ \
+ StringMap<ExpressionParser>::const_iterator op = expressionParserMap.find(key); \
+ massert(17064, \
+ str::stream() << "Duplicate expression (" << key << ") detected at " << __FILE__ \
+ << ":" << __LINE__, \
+ op == expressionParserMap.end()); \
+ /* register expression */ \
+ expressionParserMap[key] = (parserFunc); \
+ return Status::OK(); \
+ }
+
+intrusive_ptr<Expression> Expression::parseExpression(BSONElement exprElement,
+ const VariablesParseState& vps) {
+ /* look for the specified operator */
+ const char* opName = exprElement.fieldName();
+ StringMap<ExpressionParser>::const_iterator op = expressionParserMap.find(opName);
+ uassert(15999,
+ str::stream() << "invalid operator '" << opName << "'",
+ op != expressionParserMap.end());
+
+ /* make the expression node */
+ return op->second(exprElement, vps);
+}
- return out;
+Expression::ExpressionVector ExpressionNary::parseArguments(BSONElement exprElement,
+ const VariablesParseState& vps) {
+ ExpressionVector out;
+ if (exprElement.type() == Array) {
+ BSONForEach(elem, exprElement.Obj()) {
+ out.push_back(Expression::parseOperand(elem, vps));
+ }
+ } else { // assume it's an atomic operand
+ out.push_back(Expression::parseOperand(exprElement, vps));
}
- intrusive_ptr<Expression> Expression::parseOperand(
- BSONElement exprElement,
- const VariablesParseState& vps) {
+ return out;
+}
- BSONType type = exprElement.type();
+intrusive_ptr<Expression> Expression::parseOperand(BSONElement exprElement,
+ const VariablesParseState& vps) {
+ BSONType type = exprElement.type();
- if (type == String && exprElement.valuestr()[0] == '$') {
- /* if we got here, this is a field path expression */
- return ExpressionFieldPath::parse(exprElement.str(), vps);
- }
- else if (type == Object) {
- ObjectCtx oCtx(ObjectCtx::DOCUMENT_OK);
- return Expression::parseObject(exprElement.Obj(), &oCtx, vps);
- }
- else {
- return ExpressionConstant::parse(exprElement, vps);
- }
+ if (type == String && exprElement.valuestr()[0] == '$') {
+ /* if we got here, this is a field path expression */
+ return ExpressionFieldPath::parse(exprElement.str(), vps);
+ } else if (type == Object) {
+ ObjectCtx oCtx(ObjectCtx::DOCUMENT_OK);
+ return Expression::parseObject(exprElement.Obj(), &oCtx, vps);
+ } else {
+ return ExpressionConstant::parse(exprElement, vps);
}
+}
- /* ----------------------- ExpressionAbs ---------------------------- */
+/* ----------------------- ExpressionAbs ---------------------------- */
- Value ExpressionAbs::evaluateInternal(Variables* vars) const {
- Value val = vpOperand[0]->evaluateInternal(vars);
+Value ExpressionAbs::evaluateInternal(Variables* vars) const {
+ Value val = vpOperand[0]->evaluateInternal(vars);
- if (val.numeric()) {
- BSONType type = val.getType();
- if (type == NumberDouble) {
- return Value(std::abs(val.getDouble()));
- }
- else {
- long long num = val.getLong();
- uassert(28680, "can't take $abs of long long min",
- num != std::numeric_limits<long long>::min());
- long long absVal = std::abs(num);
- return type == NumberLong ? Value(absVal) : Value::createIntOrLong(absVal);
- }
- }
- else if (val.nullish()) {
- return Value(BSONNULL);
- }
- else {
- uasserted(28681, str::stream() << "$abs only supports numeric types, not "
- << typeName(val.getType()));
+ if (val.numeric()) {
+ BSONType type = val.getType();
+ if (type == NumberDouble) {
+ return Value(std::abs(val.getDouble()));
+ } else {
+ long long num = val.getLong();
+ uassert(28680,
+ "can't take $abs of long long min",
+ num != std::numeric_limits<long long>::min());
+ long long absVal = std::abs(num);
+ return type == NumberLong ? Value(absVal) : Value::createIntOrLong(absVal);
}
+ } else if (val.nullish()) {
+ return Value(BSONNULL);
+ } else {
+ uasserted(28681,
+ str::stream() << "$abs only supports numeric types, not "
+ << typeName(val.getType()));
}
+}
- REGISTER_EXPRESSION("$abs", ExpressionAbs::parse);
- const char* ExpressionAbs::getOpName() const {
- return "$abs";
- }
+REGISTER_EXPRESSION("$abs", ExpressionAbs::parse);
+const char* ExpressionAbs::getOpName() const {
+ return "$abs";
+}
- /* ------------------------- ExpressionAdd ----------------------------- */
+/* ------------------------- ExpressionAdd ----------------------------- */
- Value ExpressionAdd::evaluateInternal(Variables* vars) const {
+Value ExpressionAdd::evaluateInternal(Variables* vars) const {
+ /*
+ We'll try to return the narrowest possible result value. To do that
+ without creating intermediate Values, do the arithmetic for double
+ and integral types in parallel, tracking the current narrowest
+ type.
+ */
+ double doubleTotal = 0;
+ long long longTotal = 0;
+ BSONType totalType = NumberInt;
+ bool haveDate = false;
- /*
- We'll try to return the narrowest possible result value. To do that
- without creating intermediate Values, do the arithmetic for double
- and integral types in parallel, tracking the current narrowest
- type.
- */
- double doubleTotal = 0;
- long long longTotal = 0;
- BSONType totalType = NumberInt;
- bool haveDate = false;
-
- const size_t n = vpOperand.size();
- for (size_t i = 0; i < n; ++i) {
- Value val = vpOperand[i]->evaluateInternal(vars);
-
- if (val.numeric()) {
- totalType = Value::getWidestNumeric(totalType, val.getType());
-
- doubleTotal += val.coerceToDouble();
- longTotal += val.coerceToLong();
- }
- else if (val.getType() == Date) {
- uassert(16612, "only one Date allowed in an $add expression",
- !haveDate);
- haveDate = true;
+ const size_t n = vpOperand.size();
+ for (size_t i = 0; i < n; ++i) {
+ Value val = vpOperand[i]->evaluateInternal(vars);
- // We don't manipulate totalType here.
+ if (val.numeric()) {
+ totalType = Value::getWidestNumeric(totalType, val.getType());
- longTotal += val.getDate();
- doubleTotal += val.getDate();
- }
- else if (val.nullish()) {
- return Value(BSONNULL);
- }
- else {
- uasserted(16554, str::stream() << "$add only supports numeric or date types, not "
- << typeName(val.getType()));
- }
- }
+ doubleTotal += val.coerceToDouble();
+ longTotal += val.coerceToLong();
+ } else if (val.getType() == Date) {
+ uassert(16612, "only one Date allowed in an $add expression", !haveDate);
+ haveDate = true;
- if (haveDate) {
- if (totalType == NumberDouble)
- longTotal = static_cast<long long>(doubleTotal);
- return Value(Date_t::fromMillisSinceEpoch(longTotal));
- }
- else if (totalType == NumberLong) {
- return Value(longTotal);
- }
- else if (totalType == NumberDouble) {
- return Value(doubleTotal);
- }
- else if (totalType == NumberInt) {
- return Value::createIntOrLong(longTotal);
- }
- else {
- massert(16417, "$add resulted in a non-numeric type", false);
- }
- }
+ // We don't manipulate totalType here.
- REGISTER_EXPRESSION("$add", ExpressionAdd::parse);
- const char* ExpressionAdd::getOpName() const {
- return "$add";
+ longTotal += val.getDate();
+ doubleTotal += val.getDate();
+ } else if (val.nullish()) {
+ return Value(BSONNULL);
+ } else {
+ uasserted(16554,
+ str::stream() << "$add only supports numeric or date types, not "
+ << typeName(val.getType()));
+ }
+ }
+
+ if (haveDate) {
+ if (totalType == NumberDouble)
+ longTotal = static_cast<long long>(doubleTotal);
+ return Value(Date_t::fromMillisSinceEpoch(longTotal));
+ } else if (totalType == NumberLong) {
+ return Value(longTotal);
+ } else if (totalType == NumberDouble) {
+ return Value(doubleTotal);
+ } else if (totalType == NumberInt) {
+ return Value::createIntOrLong(longTotal);
+ } else {
+ massert(16417, "$add resulted in a non-numeric type", false);
}
+}
- /* ------------------------- ExpressionAllElementsTrue -------------------------- */
+REGISTER_EXPRESSION("$add", ExpressionAdd::parse);
+const char* ExpressionAdd::getOpName() const {
+ return "$add";
+}
- Value ExpressionAllElementsTrue::evaluateInternal(Variables* vars) const {
- const Value arr = vpOperand[0]->evaluateInternal(vars);
- uassert(17040, str::stream() << getOpName() << "'s argument must be an array, but is "
- << typeName(arr.getType()),
- arr.getType() == Array);
- const vector<Value>& array = arr.getArray();
- for (vector<Value>::const_iterator it = array.begin(); it != array.end(); ++it) {
- if (!it->coerceToBool()) {
- return Value(false);
- }
- }
- return Value(true);
- }
+/* ------------------------- ExpressionAllElementsTrue -------------------------- */
- REGISTER_EXPRESSION("$allElementsTrue", ExpressionAllElementsTrue::parse);
- const char* ExpressionAllElementsTrue::getOpName() const {
- return "$allElementsTrue";
+Value ExpressionAllElementsTrue::evaluateInternal(Variables* vars) const {
+ const Value arr = vpOperand[0]->evaluateInternal(vars);
+ uassert(17040,
+ str::stream() << getOpName() << "'s argument must be an array, but is "
+ << typeName(arr.getType()),
+ arr.getType() == Array);
+ const vector<Value>& array = arr.getArray();
+ for (vector<Value>::const_iterator it = array.begin(); it != array.end(); ++it) {
+ if (!it->coerceToBool()) {
+ return Value(false);
+ }
}
+ return Value(true);
+}
- /* ------------------------- ExpressionAnd ----------------------------- */
-
- intrusive_ptr<Expression> ExpressionAnd::optimize() {
- /* optimize the conjunction as much as possible */
- intrusive_ptr<Expression> pE(ExpressionNary::optimize());
+REGISTER_EXPRESSION("$allElementsTrue", ExpressionAllElementsTrue::parse);
+const char* ExpressionAllElementsTrue::getOpName() const {
+ return "$allElementsTrue";
+}
- /* if the result isn't a conjunction, we can't do anything */
- ExpressionAnd *pAnd = dynamic_cast<ExpressionAnd *>(pE.get());
- if (!pAnd)
- return pE;
+/* ------------------------- ExpressionAnd ----------------------------- */
- /*
- Check the last argument on the result; if it's not constant (as
- promised by ExpressionNary::optimize(),) then there's nothing
- we can do.
- */
- const size_t n = pAnd->vpOperand.size();
- // ExpressionNary::optimize() generates an ExpressionConstant for {$and:[]}.
- verify(n > 0);
- intrusive_ptr<Expression> pLast(pAnd->vpOperand[n - 1]);
- const ExpressionConstant *pConst =
- dynamic_cast<ExpressionConstant *>(pLast.get());
- if (!pConst)
- return pE;
+intrusive_ptr<Expression> ExpressionAnd::optimize() {
+ /* optimize the conjunction as much as possible */
+ intrusive_ptr<Expression> pE(ExpressionNary::optimize());
- /*
- Evaluate and coerce the last argument to a boolean. If it's false,
- then we can replace this entire expression.
- */
- bool last = pConst->getValue().coerceToBool();
- if (!last) {
- intrusive_ptr<ExpressionConstant> pFinal(
- ExpressionConstant::create(Value(false)));
- return pFinal;
- }
+ /* if the result isn't a conjunction, we can't do anything */
+ ExpressionAnd* pAnd = dynamic_cast<ExpressionAnd*>(pE.get());
+ if (!pAnd)
+ return pE;
- /*
- If we got here, the final operand was true, so we don't need it
- anymore. If there was only one other operand, we don't need the
- conjunction either. Note we still need to keep the promise that
- the result will be a boolean.
- */
- if (n == 2) {
- intrusive_ptr<Expression> pFinal(
- ExpressionCoerceToBool::create(pAnd->vpOperand[0]));
- return pFinal;
- }
+ /*
+ Check the last argument on the result; if it's not constant (as
+ promised by ExpressionNary::optimize(),) then there's nothing
+ we can do.
+ */
+ const size_t n = pAnd->vpOperand.size();
+ // ExpressionNary::optimize() generates an ExpressionConstant for {$and:[]}.
+ verify(n > 0);
+ intrusive_ptr<Expression> pLast(pAnd->vpOperand[n - 1]);
+ const ExpressionConstant* pConst = dynamic_cast<ExpressionConstant*>(pLast.get());
+ if (!pConst)
+ return pE;
- /*
- Remove the final "true" value, and return the new expression.
+ /*
+ Evaluate and coerce the last argument to a boolean. If it's false,
+ then we can replace this entire expression.
+ */
+ bool last = pConst->getValue().coerceToBool();
+ if (!last) {
+ intrusive_ptr<ExpressionConstant> pFinal(ExpressionConstant::create(Value(false)));
+ return pFinal;
+ }
- CW TODO:
- Note that because of any implicit conversions, we may need to
- apply an implicit boolean conversion.
- */
- pAnd->vpOperand.resize(n - 1);
- return pE;
+ /*
+ If we got here, the final operand was true, so we don't need it
+ anymore. If there was only one other operand, we don't need the
+ conjunction either. Note we still need to keep the promise that
+ the result will be a boolean.
+ */
+ if (n == 2) {
+ intrusive_ptr<Expression> pFinal(ExpressionCoerceToBool::create(pAnd->vpOperand[0]));
+ return pFinal;
}
- Value ExpressionAnd::evaluateInternal(Variables* vars) const {
- const size_t n = vpOperand.size();
- for(size_t i = 0; i < n; ++i) {
- Value pValue(vpOperand[i]->evaluateInternal(vars));
- if (!pValue.coerceToBool())
- return Value(false);
- }
+ /*
+ Remove the final "true" value, and return the new expression.
- return Value(true);
- }
+ CW TODO:
+ Note that because of any implicit conversions, we may need to
+ apply an implicit boolean conversion.
+ */
+ pAnd->vpOperand.resize(n - 1);
+ return pE;
+}
- REGISTER_EXPRESSION("$and", ExpressionAnd::parse);
- const char* ExpressionAnd::getOpName() const {
- return "$and";
+Value ExpressionAnd::evaluateInternal(Variables* vars) const {
+ const size_t n = vpOperand.size();
+ for (size_t i = 0; i < n; ++i) {
+ Value pValue(vpOperand[i]->evaluateInternal(vars));
+ if (!pValue.coerceToBool())
+ return Value(false);
}
- /* ------------------------- ExpressionAnyElementTrue -------------------------- */
+ return Value(true);
+}
- Value ExpressionAnyElementTrue::evaluateInternal(Variables* vars) const {
- const Value arr = vpOperand[0]->evaluateInternal(vars);
- uassert(17041, str::stream() << getOpName() << "'s argument must be an array, but is "
- << typeName(arr.getType()),
- arr.getType() == Array);
- const vector<Value>& array = arr.getArray();
- for (vector<Value>::const_iterator it = array.begin(); it != array.end(); ++it) {
- if (it->coerceToBool()) {
- return Value(true);
- }
+REGISTER_EXPRESSION("$and", ExpressionAnd::parse);
+const char* ExpressionAnd::getOpName() const {
+ return "$and";
+}
+
+/* ------------------------- ExpressionAnyElementTrue -------------------------- */
+
+Value ExpressionAnyElementTrue::evaluateInternal(Variables* vars) const {
+ const Value arr = vpOperand[0]->evaluateInternal(vars);
+ uassert(17041,
+ str::stream() << getOpName() << "'s argument must be an array, but is "
+ << typeName(arr.getType()),
+ arr.getType() == Array);
+ const vector<Value>& array = arr.getArray();
+ for (vector<Value>::const_iterator it = array.begin(); it != array.end(); ++it) {
+ if (it->coerceToBool()) {
+ return Value(true);
}
- return Value(false);
}
+ return Value(false);
+}
- REGISTER_EXPRESSION("$anyElementTrue", ExpressionAnyElementTrue::parse);
- const char* ExpressionAnyElementTrue::getOpName() const {
- return "$anyElementTrue";
- }
+REGISTER_EXPRESSION("$anyElementTrue", ExpressionAnyElementTrue::parse);
+const char* ExpressionAnyElementTrue::getOpName() const {
+ return "$anyElementTrue";
+}
- /* ------------------------- ExpressionArrayElemAt -------------------------- */
+/* ------------------------- ExpressionArrayElemAt -------------------------- */
+
+Value ExpressionArrayElemAt::evaluateInternal(Variables* vars) const {
+ const Value array = vpOperand[0]->evaluateInternal(vars);
+ const Value indexArg = vpOperand[1]->evaluateInternal(vars);
+
+ if (array.nullish() || indexArg.nullish()) {
+ return Value(BSONNULL);
+ }
+
+ uassert(28689,
+ str::stream() << getOpName() << "'s first argument must be an array, but is "
+ << typeName(array.getType()),
+ array.getType() == Array);
+ uassert(28690,
+ str::stream() << getOpName() << "'s second argument must be a numeric value,"
+ << " but is " << typeName(indexArg.getType()),
+ indexArg.numeric());
+ uassert(28691,
+ str::stream() << getOpName() << "'s second argument must be representable as"
+ << " a 32-bit integer: " << indexArg.coerceToDouble(),
+ indexArg.integral());
+
+ long long i = indexArg.coerceToLong();
+ if (i < 0 && static_cast<size_t>(std::abs(i)) > array.getArrayLength()) {
+ // Positive indices that are too large are handled automatically by Value.
+ return Value();
+ } else if (i < 0) {
+ // Index from the back of the array.
+ i = array.getArrayLength() + i;
+ }
+ const size_t index = static_cast<size_t>(i);
+ return array[index];
+}
- Value ExpressionArrayElemAt::evaluateInternal(Variables* vars) const {
- const Value array = vpOperand[0]->evaluateInternal(vars);
- const Value indexArg = vpOperand[1]->evaluateInternal(vars);
+REGISTER_EXPRESSION("$arrayElemAt", ExpressionArrayElemAt::parse);
+const char* ExpressionArrayElemAt::getOpName() const {
+ return "$arrayElemAt";
+}
- if (array.nullish() || indexArg.nullish()) {
- return Value(BSONNULL);
- }
+/* -------------------- ExpressionCoerceToBool ------------------------- */
- uassert(28689, str::stream() << getOpName() << "'s first argument must be an array, but is "
- << typeName(array.getType()),
- array.getType() == Array);
- uassert(28690, str::stream() << getOpName() << "'s second argument must be a numeric value,"
- << " but is " << typeName(indexArg.getType()),
- indexArg.numeric());
- uassert(28691, str::stream() << getOpName() << "'s second argument must be representable as"
- << " a 32-bit integer: " << indexArg.coerceToDouble(),
- indexArg.integral());
-
- long long i = indexArg.coerceToLong();
- if (i < 0 && static_cast<size_t>(std::abs(i)) > array.getArrayLength()) {
- // Positive indices that are too large are handled automatically by Value.
- return Value();
- }
- else if (i < 0) {
- // Index from the back of the array.
- i = array.getArrayLength() + i;
- }
- const size_t index = static_cast<size_t>(i);
- return array[index];
- }
+intrusive_ptr<ExpressionCoerceToBool> ExpressionCoerceToBool::create(
+ const intrusive_ptr<Expression>& pExpression) {
+ intrusive_ptr<ExpressionCoerceToBool> pNew(new ExpressionCoerceToBool(pExpression));
+ return pNew;
+}
- REGISTER_EXPRESSION("$arrayElemAt", ExpressionArrayElemAt::parse);
- const char* ExpressionArrayElemAt::getOpName() const {
- return "$arrayElemAt";
- }
+ExpressionCoerceToBool::ExpressionCoerceToBool(const intrusive_ptr<Expression>& pTheExpression)
+ : Expression(), pExpression(pTheExpression) {}
- /* -------------------- ExpressionCoerceToBool ------------------------- */
+intrusive_ptr<Expression> ExpressionCoerceToBool::optimize() {
+ /* optimize the operand */
+ pExpression = pExpression->optimize();
- intrusive_ptr<ExpressionCoerceToBool> ExpressionCoerceToBool::create(
- const intrusive_ptr<Expression> &pExpression) {
- intrusive_ptr<ExpressionCoerceToBool> pNew(
- new ExpressionCoerceToBool(pExpression));
- return pNew;
- }
+ /* if the operand already produces a boolean, then we don't need this */
+ /* LATER - Expression to support a "typeof" query? */
+ Expression* pE = pExpression.get();
+ if (dynamic_cast<ExpressionAnd*>(pE) || dynamic_cast<ExpressionOr*>(pE) ||
+ dynamic_cast<ExpressionNot*>(pE) || dynamic_cast<ExpressionCoerceToBool*>(pE))
+ return pExpression;
- ExpressionCoerceToBool::ExpressionCoerceToBool(
- const intrusive_ptr<Expression> &pTheExpression):
- Expression(),
- pExpression(pTheExpression) {
- }
+ return intrusive_ptr<Expression>(this);
+}
- intrusive_ptr<Expression> ExpressionCoerceToBool::optimize() {
- /* optimize the operand */
- pExpression = pExpression->optimize();
+void ExpressionCoerceToBool::addDependencies(DepsTracker* deps, vector<string>* path) const {
+ pExpression->addDependencies(deps);
+}
- /* if the operand already produces a boolean, then we don't need this */
- /* LATER - Expression to support a "typeof" query? */
- Expression *pE = pExpression.get();
- if (dynamic_cast<ExpressionAnd *>(pE) ||
- dynamic_cast<ExpressionOr *>(pE) ||
- dynamic_cast<ExpressionNot *>(pE) ||
- dynamic_cast<ExpressionCoerceToBool *>(pE))
- return pExpression;
+Value ExpressionCoerceToBool::evaluateInternal(Variables* vars) const {
+ Value pResult(pExpression->evaluateInternal(vars));
+ bool b = pResult.coerceToBool();
+ if (b)
+ return Value(true);
+ return Value(false);
+}
- return intrusive_ptr<Expression>(this);
- }
+Value ExpressionCoerceToBool::serialize(bool explain) const {
+ // When not explaining, serialize to an $and expression. When parsed, the $and expression
+ // will be optimized back into a ExpressionCoerceToBool.
+ const char* name = explain ? "$coerceToBool" : "$and";
+ return Value(DOC(name << DOC_ARRAY(pExpression->serialize(explain))));
+}
- void ExpressionCoerceToBool::addDependencies(DepsTracker* deps, vector<string>* path) const {
- pExpression->addDependencies(deps);
- }
+/* ----------------------- ExpressionCompare --------------------------- */
+
+REGISTER_EXPRESSION("$cmp",
+ stdx::bind(ExpressionCompare::parse,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ ExpressionCompare::CMP));
+REGISTER_EXPRESSION("$eq",
+ stdx::bind(ExpressionCompare::parse,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ ExpressionCompare::EQ));
+REGISTER_EXPRESSION("$gt",
+ stdx::bind(ExpressionCompare::parse,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ ExpressionCompare::GT));
+REGISTER_EXPRESSION("$gte",
+ stdx::bind(ExpressionCompare::parse,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ ExpressionCompare::GTE));
+REGISTER_EXPRESSION("$lt",
+ stdx::bind(ExpressionCompare::parse,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ ExpressionCompare::LT));
+REGISTER_EXPRESSION("$lte",
+ stdx::bind(ExpressionCompare::parse,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ ExpressionCompare::LTE));
+REGISTER_EXPRESSION("$ne",
+ stdx::bind(ExpressionCompare::parse,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ ExpressionCompare::NE));
+intrusive_ptr<Expression> ExpressionCompare::parse(BSONElement bsonExpr,
+ const VariablesParseState& vps,
+ CmpOp op) {
+ intrusive_ptr<ExpressionCompare> expr = new ExpressionCompare(op);
+ ExpressionVector args = parseArguments(bsonExpr, vps);
+ expr->validateArguments(args);
+ expr->vpOperand = args;
+ return expr;
+}
- Value ExpressionCoerceToBool::evaluateInternal(Variables* vars) const {
- Value pResult(pExpression->evaluateInternal(vars));
- bool b = pResult.coerceToBool();
- if (b)
- return Value(true);
- return Value(false);
- }
-
- Value ExpressionCoerceToBool::serialize(bool explain) const {
- // When not explaining, serialize to an $and expression. When parsed, the $and expression
- // will be optimized back into a ExpressionCoerceToBool.
- const char* name = explain ? "$coerceToBool" : "$and";
- return Value(DOC(name << DOC_ARRAY(pExpression->serialize(explain))));
- }
-
- /* ----------------------- ExpressionCompare --------------------------- */
-
- REGISTER_EXPRESSION("$cmp",
- stdx::bind(ExpressionCompare::parse, stdx::placeholders::_1, stdx::placeholders::_2, ExpressionCompare::CMP));
- REGISTER_EXPRESSION("$eq",
- stdx::bind(ExpressionCompare::parse, stdx::placeholders::_1, stdx::placeholders::_2, ExpressionCompare::EQ));
- REGISTER_EXPRESSION("$gt",
- stdx::bind(ExpressionCompare::parse, stdx::placeholders::_1, stdx::placeholders::_2, ExpressionCompare::GT));
- REGISTER_EXPRESSION("$gte",
- stdx::bind(ExpressionCompare::parse, stdx::placeholders::_1, stdx::placeholders::_2, ExpressionCompare::GTE));
- REGISTER_EXPRESSION("$lt",
- stdx::bind(ExpressionCompare::parse, stdx::placeholders::_1, stdx::placeholders::_2, ExpressionCompare::LT));
- REGISTER_EXPRESSION("$lte",
- stdx::bind(ExpressionCompare::parse, stdx::placeholders::_1, stdx::placeholders::_2, ExpressionCompare::LTE));
- REGISTER_EXPRESSION("$ne",
- stdx::bind(ExpressionCompare::parse, stdx::placeholders::_1, stdx::placeholders::_2, ExpressionCompare::NE));
- intrusive_ptr<Expression> ExpressionCompare::parse(
- BSONElement bsonExpr,
- const VariablesParseState& vps,
- CmpOp op) {
-
- intrusive_ptr<ExpressionCompare> expr = new ExpressionCompare(op);
- ExpressionVector args = parseArguments(bsonExpr, vps);
- expr->validateArguments(args);
- expr->vpOperand = args;
- return expr;
- }
-
- ExpressionCompare::ExpressionCompare(CmpOp theCmpOp)
- : cmpOp(theCmpOp)
- {}
+ExpressionCompare::ExpressionCompare(CmpOp theCmpOp) : cmpOp(theCmpOp) {}
namespace {
- // Lookup table for truth value returns
- struct CmpLookup {
- const bool truthValue[3]; // truth value for -1, 0, 1
- const ExpressionCompare::CmpOp reverse; // reverse(b,a) returns the same as op(a,b)
- const char name[5]; // string name with trailing '\0'
- };
- static const CmpLookup cmpLookup[7] = {
- /* -1 0 1 reverse name */
- /* EQ */ { { false, true, false }, ExpressionCompare::EQ, "$eq" },
- /* NE */ { { true, false, true }, ExpressionCompare::NE, "$ne" },
- /* GT */ { { false, false, true }, ExpressionCompare::LT, "$gt" },
- /* GTE */ { { false, true, true }, ExpressionCompare::LTE, "$gte" },
- /* LT */ { { true, false, false }, ExpressionCompare::GT, "$lt" },
- /* LTE */ { { true, true, false }, ExpressionCompare::GTE, "$lte" },
-
- // CMP is special. Only name is used.
- /* CMP */ { { false, false, false }, ExpressionCompare::CMP, "$cmp" },
- };
-}
-
- Value ExpressionCompare::evaluateInternal(Variables* vars) const {
- Value pLeft(vpOperand[0]->evaluateInternal(vars));
- Value pRight(vpOperand[1]->evaluateInternal(vars));
-
- int cmp = Value::compare(pLeft, pRight);
-
- // Make cmp one of 1, 0, or -1.
- if (cmp == 0) {
- // leave as 0
- } else if (cmp < 0) {
- cmp = -1;
- } else if (cmp > 0) {
- cmp = 1;
- }
+// Lookup table for truth value returns
+struct CmpLookup {
+ const bool truthValue[3]; // truth value for -1, 0, 1
+ const ExpressionCompare::CmpOp reverse; // reverse(b,a) returns the same as op(a,b)
+ const char name[5]; // string name with trailing '\0'
+};
+static const CmpLookup cmpLookup[7] = {
+ /* -1 0 1 reverse name */
+ /* EQ */ {{false, true, false}, ExpressionCompare::EQ, "$eq"},
+ /* NE */ {{true, false, true}, ExpressionCompare::NE, "$ne"},
+ /* GT */ {{false, false, true}, ExpressionCompare::LT, "$gt"},
+ /* GTE */ {{false, true, true}, ExpressionCompare::LTE, "$gte"},
+ /* LT */ {{true, false, false}, ExpressionCompare::GT, "$lt"},
+ /* LTE */ {{true, true, false}, ExpressionCompare::GTE, "$lte"},
+
+ // CMP is special. Only name is used.
+ /* CMP */ {{false, false, false}, ExpressionCompare::CMP, "$cmp"},
+};
+}
- if (cmpOp == CMP)
- return Value(cmp);
+Value ExpressionCompare::evaluateInternal(Variables* vars) const {
+ Value pLeft(vpOperand[0]->evaluateInternal(vars));
+ Value pRight(vpOperand[1]->evaluateInternal(vars));
- bool returnValue = cmpLookup[cmpOp].truthValue[cmp + 1];
- return Value(returnValue);
- }
+ int cmp = Value::compare(pLeft, pRight);
- const char* ExpressionCompare::getOpName() const {
- return cmpLookup[cmpOp].name;
+ // Make cmp one of 1, 0, or -1.
+ if (cmp == 0) {
+ // leave as 0
+ } else if (cmp < 0) {
+ cmp = -1;
+ } else if (cmp > 0) {
+ cmp = 1;
}
- /* ------------------------- ExpressionConcat ----------------------------- */
+ if (cmpOp == CMP)
+ return Value(cmp);
- Value ExpressionConcat::evaluateInternal(Variables* vars) const {
- const size_t n = vpOperand.size();
+ bool returnValue = cmpLookup[cmpOp].truthValue[cmp + 1];
+ return Value(returnValue);
+}
- StringBuilder result;
- for (size_t i = 0; i < n; ++i) {
- Value val = vpOperand[i]->evaluateInternal(vars);
- if (val.nullish())
- return Value(BSONNULL);
+const char* ExpressionCompare::getOpName() const {
+ return cmpLookup[cmpOp].name;
+}
- uassert(16702, str::stream() << "$concat only supports strings, not "
- << typeName(val.getType()),
- val.getType() == String);
+/* ------------------------- ExpressionConcat ----------------------------- */
- result << val.coerceToString();
- }
+Value ExpressionConcat::evaluateInternal(Variables* vars) const {
+ const size_t n = vpOperand.size();
- return Value(result.str());
- }
+ StringBuilder result;
+ for (size_t i = 0; i < n; ++i) {
+ Value val = vpOperand[i]->evaluateInternal(vars);
+ if (val.nullish())
+ return Value(BSONNULL);
+
+ uassert(16702,
+ str::stream() << "$concat only supports strings, not " << typeName(val.getType()),
+ val.getType() == String);
- REGISTER_EXPRESSION("$concat", ExpressionConcat::parse);
- const char* ExpressionConcat::getOpName() const {
- return "$concat";
+ result << val.coerceToString();
}
- /* ------------------------- ExpressionConcatArrays ----------------------------- */
+ return Value(result.str());
+}
- Value ExpressionConcatArrays::evaluateInternal(Variables* vars) const {
- const size_t n = vpOperand.size();
- vector<Value> values;
+REGISTER_EXPRESSION("$concat", ExpressionConcat::parse);
+const char* ExpressionConcat::getOpName() const {
+ return "$concat";
+}
- for (size_t i = 0; i < n; ++i) {
- Value val = vpOperand[i]->evaluateInternal(vars);
- if (val.nullish()) {
- return Value(BSONNULL);
- }
+/* ------------------------- ExpressionConcatArrays ----------------------------- */
- uassert(28664, str::stream() << "$concatArrays only supports arrays, not "
- << typeName(val.getType()),
- val.getType() == Array);
+Value ExpressionConcatArrays::evaluateInternal(Variables* vars) const {
+ const size_t n = vpOperand.size();
+ vector<Value> values;
- const auto& subValues = val.getArray();
- values.insert(values.end(), subValues.begin(), subValues.end());
+ for (size_t i = 0; i < n; ++i) {
+ Value val = vpOperand[i]->evaluateInternal(vars);
+ if (val.nullish()) {
+ return Value(BSONNULL);
}
- return Value(std::move(values));
- }
-
- REGISTER_EXPRESSION("$concatArrays", ExpressionConcatArrays::parse);
- const char* ExpressionConcatArrays::getOpName() const {
- return "$concatArrays";
- }
- /* ----------------------- ExpressionCond ------------------------------ */
+ uassert(28664,
+ str::stream() << "$concatArrays only supports arrays, not "
+ << typeName(val.getType()),
+ val.getType() == Array);
- Value ExpressionCond::evaluateInternal(Variables* vars) const {
- Value pCond(vpOperand[0]->evaluateInternal(vars));
- int idx = pCond.coerceToBool() ? 1 : 2;
- return vpOperand[idx]->evaluateInternal(vars);
+ const auto& subValues = val.getArray();
+ values.insert(values.end(), subValues.begin(), subValues.end());
}
+ return Value(std::move(values));
+}
- intrusive_ptr<Expression> ExpressionCond::parse(
- BSONElement expr,
- const VariablesParseState& vps) {
+REGISTER_EXPRESSION("$concatArrays", ExpressionConcatArrays::parse);
+const char* ExpressionConcatArrays::getOpName() const {
+ return "$concatArrays";
+}
- if (expr.type() != Object) {
- return Base::parse(expr, vps);
- }
- verify(str::equals(expr.fieldName(), "$cond"));
-
- intrusive_ptr<ExpressionCond> ret = new ExpressionCond();
- ret->vpOperand.resize(3);
-
- const BSONObj args = expr.embeddedObject();
- BSONForEach(arg, args) {
- if (str::equals(arg.fieldName(), "if")) {
- ret->vpOperand[0] = parseOperand(arg, vps);
- } else if (str::equals(arg.fieldName(), "then")) {
- ret->vpOperand[1] = parseOperand(arg, vps);
- } else if (str::equals(arg.fieldName(), "else")) {
- ret->vpOperand[2] = parseOperand(arg, vps);
- } else {
- uasserted(17083, str::stream()
- << "Unrecognized parameter to $cond: " << arg.fieldName());
- }
- }
+/* ----------------------- ExpressionCond ------------------------------ */
- uassert(17080, "Missing 'if' parameter to $cond",
- ret->vpOperand[0]);
- uassert(17081, "Missing 'then' parameter to $cond",
- ret->vpOperand[1]);
- uassert(17082, "Missing 'else' parameter to $cond",
- ret->vpOperand[2]);
+Value ExpressionCond::evaluateInternal(Variables* vars) const {
+ Value pCond(vpOperand[0]->evaluateInternal(vars));
+ int idx = pCond.coerceToBool() ? 1 : 2;
+ return vpOperand[idx]->evaluateInternal(vars);
+}
- return ret;
+intrusive_ptr<Expression> ExpressionCond::parse(BSONElement expr, const VariablesParseState& vps) {
+ if (expr.type() != Object) {
+ return Base::parse(expr, vps);
}
+ verify(str::equals(expr.fieldName(), "$cond"));
+
+ intrusive_ptr<ExpressionCond> ret = new ExpressionCond();
+ ret->vpOperand.resize(3);
- REGISTER_EXPRESSION("$cond", ExpressionCond::parse);
- const char* ExpressionCond::getOpName() const {
- return "$cond";
+ const BSONObj args = expr.embeddedObject();
+ BSONForEach(arg, args) {
+ if (str::equals(arg.fieldName(), "if")) {
+ ret->vpOperand[0] = parseOperand(arg, vps);
+ } else if (str::equals(arg.fieldName(), "then")) {
+ ret->vpOperand[1] = parseOperand(arg, vps);
+ } else if (str::equals(arg.fieldName(), "else")) {
+ ret->vpOperand[2] = parseOperand(arg, vps);
+ } else {
+ uasserted(17083,
+ str::stream() << "Unrecognized parameter to $cond: " << arg.fieldName());
+ }
}
- /* ---------------------- ExpressionConstant --------------------------- */
+ uassert(17080, "Missing 'if' parameter to $cond", ret->vpOperand[0]);
+ uassert(17081, "Missing 'then' parameter to $cond", ret->vpOperand[1]);
+ uassert(17082, "Missing 'else' parameter to $cond", ret->vpOperand[2]);
- intrusive_ptr<Expression> ExpressionConstant::parse(
- BSONElement exprElement,
- const VariablesParseState& vps) {
- return new ExpressionConstant(Value(exprElement));
- }
+ return ret;
+}
+REGISTER_EXPRESSION("$cond", ExpressionCond::parse);
+const char* ExpressionCond::getOpName() const {
+ return "$cond";
+}
- intrusive_ptr<ExpressionConstant> ExpressionConstant::create(const Value& pValue) {
- intrusive_ptr<ExpressionConstant> pEC(new ExpressionConstant(pValue));
- return pEC;
- }
+/* ---------------------- ExpressionConstant --------------------------- */
- ExpressionConstant::ExpressionConstant(const Value& pTheValue): pValue(pTheValue) {}
+intrusive_ptr<Expression> ExpressionConstant::parse(BSONElement exprElement,
+ const VariablesParseState& vps) {
+ return new ExpressionConstant(Value(exprElement));
+}
- intrusive_ptr<Expression> ExpressionConstant::optimize() {
- /* nothing to do */
- return intrusive_ptr<Expression>(this);
- }
+intrusive_ptr<ExpressionConstant> ExpressionConstant::create(const Value& pValue) {
+ intrusive_ptr<ExpressionConstant> pEC(new ExpressionConstant(pValue));
+ return pEC;
+}
- void ExpressionConstant::addDependencies(DepsTracker* deps, vector<string>* path) const {
- /* nothing to do */
- }
+ExpressionConstant::ExpressionConstant(const Value& pTheValue) : pValue(pTheValue) {}
- Value ExpressionConstant::evaluateInternal(Variables* vars) const {
- return pValue;
- }
- Value ExpressionConstant::serialize(bool explain) const {
- return serializeConstant(pValue);
- }
+intrusive_ptr<Expression> ExpressionConstant::optimize() {
+ /* nothing to do */
+ return intrusive_ptr<Expression>(this);
+}
- REGISTER_EXPRESSION("$const", ExpressionConstant::parse);
- REGISTER_EXPRESSION("$literal", ExpressionConstant::parse); // alias
- const char* ExpressionConstant::getOpName() const {
- return "$const";
- }
+void ExpressionConstant::addDependencies(DepsTracker* deps, vector<string>* path) const {
+ /* nothing to do */
+}
+
+Value ExpressionConstant::evaluateInternal(Variables* vars) const {
+ return pValue;
+}
+
+Value ExpressionConstant::serialize(bool explain) const {
+ return serializeConstant(pValue);
+}
- /* ---------------------- ExpressionDateToString ----------------------- */
+REGISTER_EXPRESSION("$const", ExpressionConstant::parse);
+REGISTER_EXPRESSION("$literal", ExpressionConstant::parse); // alias
+const char* ExpressionConstant::getOpName() const {
+ return "$const";
+}
- REGISTER_EXPRESSION("$dateToString", ExpressionDateToString::parse);
- intrusive_ptr<Expression> ExpressionDateToString::parse(
- BSONElement expr,
- const VariablesParseState& vps) {
+/* ---------------------- ExpressionDateToString ----------------------- */
- verify(str::equals(expr.fieldName(), "$dateToString"));
+REGISTER_EXPRESSION("$dateToString", ExpressionDateToString::parse);
+intrusive_ptr<Expression> ExpressionDateToString::parse(BSONElement expr,
+ const VariablesParseState& vps) {
+ verify(str::equals(expr.fieldName(), "$dateToString"));
- uassert(18629, "$dateToString only supports an object as its argument",
- expr.type() == Object);
+ uassert(18629, "$dateToString only supports an object as its argument", expr.type() == Object);
- BSONElement formatElem;
- BSONElement dateElem;
- const BSONObj args = expr.embeddedObject();
- BSONForEach(arg, args) {
- if (str::equals(arg.fieldName(), "format")) {
- formatElem = arg;
- } else if (str::equals(arg.fieldName(), "date")) {
- dateElem = arg;
- } else {
- uasserted(18534, str::stream() << "Unrecognized argument to $dateToString: "
- << arg.fieldName());
- }
+ BSONElement formatElem;
+ BSONElement dateElem;
+ const BSONObj args = expr.embeddedObject();
+ BSONForEach(arg, args) {
+ if (str::equals(arg.fieldName(), "format")) {
+ formatElem = arg;
+ } else if (str::equals(arg.fieldName(), "date")) {
+ dateElem = arg;
+ } else {
+ uasserted(18534,
+ str::stream()
+ << "Unrecognized argument to $dateToString: " << arg.fieldName());
}
+ }
- uassert(18627, "Missing 'format' parameter to $dateToString",
- !formatElem.eoo());
- uassert(18628, "Missing 'date' parameter to $dateToString",
- !dateElem.eoo());
-
- uassert(18533, "The 'format' parameter to $dateToString must be a string literal",
- formatElem.type() == String);
+ uassert(18627, "Missing 'format' parameter to $dateToString", !formatElem.eoo());
+ uassert(18628, "Missing 'date' parameter to $dateToString", !dateElem.eoo());
- const string format = formatElem.str();
+ uassert(18533,
+ "The 'format' parameter to $dateToString must be a string literal",
+ formatElem.type() == String);
- validateFormat(format);
+ const string format = formatElem.str();
- return new ExpressionDateToString(format, parseOperand(dateElem, vps));
- }
+ validateFormat(format);
- ExpressionDateToString::ExpressionDateToString(const string& format,
- intrusive_ptr<Expression> date)
- : _format(format)
- , _date(date)
- {}
+ return new ExpressionDateToString(format, parseOperand(dateElem, vps));
+}
- intrusive_ptr<Expression> ExpressionDateToString::optimize() {
- _date = _date->optimize();
- return this;
- }
+ExpressionDateToString::ExpressionDateToString(const string& format, intrusive_ptr<Expression> date)
+ : _format(format), _date(date) {}
- Value ExpressionDateToString::serialize(bool explain) const {
- return Value(DOC("$dateToString" << DOC("format" << _format
- << "date" << _date->serialize(explain)
- )));
- }
+intrusive_ptr<Expression> ExpressionDateToString::optimize() {
+ _date = _date->optimize();
+ return this;
+}
- Value ExpressionDateToString::evaluateInternal(Variables* vars) const {
- const Value date = _date->evaluateInternal(vars);
+Value ExpressionDateToString::serialize(bool explain) const {
+ return Value(
+ DOC("$dateToString" << DOC("format" << _format << "date" << _date->serialize(explain))));
+}
- if (date.nullish()) {
- return Value(BSONNULL);
- }
+Value ExpressionDateToString::evaluateInternal(Variables* vars) const {
+ const Value date = _date->evaluateInternal(vars);
- return Value(formatDate(_format, date.coerceToTm(), date.coerceToDate()));
+ if (date.nullish()) {
+ return Value(BSONNULL);
}
- // verifies that any '%' is followed by a valid format character, and that
- // the format string ends with an even number of '%' symbols
- void ExpressionDateToString::validateFormat(const std::string& format) {
- for (string::const_iterator it = format.begin(); it != format.end(); ++it) {
- if (*it != '%') {
- continue;
- }
+ return Value(formatDate(_format, date.coerceToTm(), date.coerceToDate()));
+}
+
+// verifies that any '%' is followed by a valid format character, and that
+// the format string ends with an even number of '%' symbols
+void ExpressionDateToString::validateFormat(const std::string& format) {
+ for (string::const_iterator it = format.begin(); it != format.end(); ++it) {
+ if (*it != '%') {
+ continue;
+ }
- ++it; // next character must be format modifier
- uassert(18535, "Unmatched '%' at end of $dateToString format string",
- it != format.end());
+ ++it; // next character must be format modifier
+ uassert(18535, "Unmatched '%' at end of $dateToString format string", it != format.end());
- switch (*it) {
+ switch (*it) {
// all of these fall through intentionally
- case '%': case 'Y': case 'm':
- case 'd': case 'H': case 'M':
- case 'S': case 'L': case 'j':
- case 'w': case 'U':
+ case '%':
+ case 'Y':
+ case 'm':
+ case 'd':
+ case 'H':
+ case 'M':
+ case 'S':
+ case 'L':
+ case 'j':
+ case 'w':
+ case 'U':
break;
default:
- uasserted(18536, str::stream() << "Invalid format character '%"
- << *it
- << "' in $dateToString format string");
- }
+ uasserted(18536,
+ str::stream() << "Invalid format character '%" << *it
+ << "' in $dateToString format string");
}
}
+}
- string ExpressionDateToString::formatDate(const string& format,
- const tm& tm,
- const long long date) {
- StringBuilder formatted;
- for (string::const_iterator it = format.begin(); it != format.end(); ++it) {
- if (*it != '%') {
- formatted << *it;
- continue;
- }
+string ExpressionDateToString::formatDate(const string& format,
+ const tm& tm,
+ const long long date) {
+ StringBuilder formatted;
+ for (string::const_iterator it = format.begin(); it != format.end(); ++it) {
+ if (*it != '%') {
+ formatted << *it;
+ continue;
+ }
- ++it; // next character is format modifier
- invariant(it != format.end()); // checked in validateFormat
+ ++it; // next character is format modifier
+ invariant(it != format.end()); // checked in validateFormat
- switch (*it) {
- case '%': // Escaped literal %
+ switch (*it) {
+ case '%': // Escaped literal %
formatted << '%';
break;
- case 'Y': // Year
- {
- const int year = ExpressionYear::extract(tm);
- uassert(18537, str::stream() << "$dateToString is only defined on year 0-9999,"
- << " tried to use year "
- << year,
- (year >= 0) && (year <= 9999));
- insertPadded(formatted, year, 4);
- break;
- }
- case 'm': // Month
+ case 'Y': // Year
+ {
+ const int year = ExpressionYear::extract(tm);
+ uassert(18537,
+ str::stream() << "$dateToString is only defined on year 0-9999,"
+ << " tried to use year " << year,
+ (year >= 0) && (year <= 9999));
+ insertPadded(formatted, year, 4);
+ break;
+ }
+ case 'm': // Month
insertPadded(formatted, ExpressionMonth::extract(tm), 2);
break;
- case 'd': // Day of month
+ case 'd': // Day of month
insertPadded(formatted, ExpressionDayOfMonth::extract(tm), 2);
break;
- case 'H': // Hour
+ case 'H': // Hour
insertPadded(formatted, ExpressionHour::extract(tm), 2);
break;
- case 'M': // Minute
+ case 'M': // Minute
insertPadded(formatted, ExpressionMinute::extract(tm), 2);
break;
- case 'S': // Second
+ case 'S': // Second
insertPadded(formatted, ExpressionSecond::extract(tm), 2);
break;
- case 'L': // Millisecond
+ case 'L': // Millisecond
insertPadded(formatted, ExpressionMillisecond::extract(date), 3);
break;
- case 'j': // Day of year
+ case 'j': // Day of year
insertPadded(formatted, ExpressionDayOfYear::extract(tm), 3);
break;
- case 'w': // Day of week
+ case 'w': // Day of week
insertPadded(formatted, ExpressionDayOfWeek::extract(tm), 1);
break;
- case 'U': // Week
+ case 'U': // Week
insertPadded(formatted, ExpressionWeek::extract(tm), 2);
break;
default:
// Should never happen as format is pre-validated
invariant(false);
- }
}
- return formatted.str();
- }
-
- // Only works with 1 <= spaces <= 4 and 0 <= number <= 9999.
- // If spaces is less than the digit count of number we simply insert the number
- // without padding.
- void ExpressionDateToString::insertPadded(StringBuilder& sb, int number, int width) {
- invariant(width >= 1);
- invariant(width <= 4);
- invariant(number >= 0);
- invariant(number <= 9999);
-
- int digits = 1;
-
- if (number >= 1000) {
- digits = 4;
- } else if (number >= 100) {
- digits = 3;
- } else if (number >= 10) {
- digits = 2;
- }
-
- if (width > digits) {
- sb.write("0000", width - digits);
- }
- sb << number;
}
+ return formatted.str();
+}
- void ExpressionDateToString::addDependencies(DepsTracker* deps, vector<string> *path) const {
- _date->addDependencies(deps);
- }
+// Only works with 1 <= spaces <= 4 and 0 <= number <= 9999.
+// If spaces is less than the digit count of number we simply insert the number
+// without padding.
+void ExpressionDateToString::insertPadded(StringBuilder& sb, int number, int width) {
+ invariant(width >= 1);
+ invariant(width <= 4);
+ invariant(number >= 0);
+ invariant(number <= 9999);
- /* ---------------------- ExpressionDayOfMonth ------------------------- */
+ int digits = 1;
- Value ExpressionDayOfMonth::evaluateInternal(Variables* vars) const {
- Value pDate(vpOperand[0]->evaluateInternal(vars));
- return Value(extract(pDate.coerceToTm()));
+ if (number >= 1000) {
+ digits = 4;
+ } else if (number >= 100) {
+ digits = 3;
+ } else if (number >= 10) {
+ digits = 2;
}
- REGISTER_EXPRESSION("$dayOfMonth", ExpressionDayOfMonth::parse);
- const char* ExpressionDayOfMonth::getOpName() const {
- return "$dayOfMonth";
+ if (width > digits) {
+ sb.write("0000", width - digits);
}
+ sb << number;
+}
- /* ------------------------- ExpressionDayOfWeek ----------------------------- */
+void ExpressionDateToString::addDependencies(DepsTracker* deps, vector<string>* path) const {
+ _date->addDependencies(deps);
+}
- Value ExpressionDayOfWeek::evaluateInternal(Variables* vars) const {
- Value pDate(vpOperand[0]->evaluateInternal(vars));
- return Value(extract(pDate.coerceToTm()));
- }
+/* ---------------------- ExpressionDayOfMonth ------------------------- */
- REGISTER_EXPRESSION("$dayOfWeek", ExpressionDayOfWeek::parse);
- const char* ExpressionDayOfWeek::getOpName() const {
- return "$dayOfWeek";
- }
+Value ExpressionDayOfMonth::evaluateInternal(Variables* vars) const {
+ Value pDate(vpOperand[0]->evaluateInternal(vars));
+ return Value(extract(pDate.coerceToTm()));
+}
- /* ------------------------- ExpressionDayOfYear ----------------------------- */
+REGISTER_EXPRESSION("$dayOfMonth", ExpressionDayOfMonth::parse);
+const char* ExpressionDayOfMonth::getOpName() const {
+ return "$dayOfMonth";
+}
- Value ExpressionDayOfYear::evaluateInternal(Variables* vars) const {
- Value pDate(vpOperand[0]->evaluateInternal(vars));
- return Value(extract(pDate.coerceToTm()));
- }
+/* ------------------------- ExpressionDayOfWeek ----------------------------- */
- REGISTER_EXPRESSION("$dayOfYear", ExpressionDayOfYear::parse);
- const char* ExpressionDayOfYear::getOpName() const {
- return "$dayOfYear";
- }
+Value ExpressionDayOfWeek::evaluateInternal(Variables* vars) const {
+ Value pDate(vpOperand[0]->evaluateInternal(vars));
+ return Value(extract(pDate.coerceToTm()));
+}
- /* ----------------------- ExpressionDivide ---------------------------- */
+REGISTER_EXPRESSION("$dayOfWeek", ExpressionDayOfWeek::parse);
+const char* ExpressionDayOfWeek::getOpName() const {
+ return "$dayOfWeek";
+}
- Value ExpressionDivide::evaluateInternal(Variables* vars) const {
- Value lhs = vpOperand[0]->evaluateInternal(vars);
- Value rhs = vpOperand[1]->evaluateInternal(vars);
+/* ------------------------- ExpressionDayOfYear ----------------------------- */
- if (lhs.numeric() && rhs.numeric()) {
- double numer = lhs.coerceToDouble();
- double denom = rhs.coerceToDouble();
- uassert(16608, "can't $divide by zero",
- denom != 0);
+Value ExpressionDayOfYear::evaluateInternal(Variables* vars) const {
+ Value pDate(vpOperand[0]->evaluateInternal(vars));
+ return Value(extract(pDate.coerceToTm()));
+}
- return Value(numer / denom);
- }
- else if (lhs.nullish() || rhs.nullish()) {
- return Value(BSONNULL);
- }
- else {
- uasserted(16609, str::stream() << "$divide only supports numeric types, not "
- << typeName(lhs.getType())
- << " and "
- << typeName(rhs.getType()));
- }
- }
+REGISTER_EXPRESSION("$dayOfYear", ExpressionDayOfYear::parse);
+const char* ExpressionDayOfYear::getOpName() const {
+ return "$dayOfYear";
+}
- REGISTER_EXPRESSION("$divide", ExpressionDivide::parse);
- const char* ExpressionDivide::getOpName() const {
- return "$divide";
- }
+/* ----------------------- ExpressionDivide ---------------------------- */
- /* ---------------------- ExpressionObject --------------------------- */
+Value ExpressionDivide::evaluateInternal(Variables* vars) const {
+ Value lhs = vpOperand[0]->evaluateInternal(vars);
+ Value rhs = vpOperand[1]->evaluateInternal(vars);
- intrusive_ptr<ExpressionObject> ExpressionObject::create() {
- return new ExpressionObject(false);
- }
+ if (lhs.numeric() && rhs.numeric()) {
+ double numer = lhs.coerceToDouble();
+ double denom = rhs.coerceToDouble();
+ uassert(16608, "can't $divide by zero", denom != 0);
- intrusive_ptr<ExpressionObject> ExpressionObject::createRoot() {
- return new ExpressionObject(true);
+ return Value(numer / denom);
+ } else if (lhs.nullish() || rhs.nullish()) {
+ return Value(BSONNULL);
+ } else {
+ uasserted(16609,
+ str::stream() << "$divide only supports numeric types, not "
+ << typeName(lhs.getType()) << " and " << typeName(rhs.getType()));
}
+}
- ExpressionObject::ExpressionObject(bool atRoot)
- : _excludeId(false)
- , _atRoot(atRoot)
- {}
+REGISTER_EXPRESSION("$divide", ExpressionDivide::parse);
+const char* ExpressionDivide::getOpName() const {
+ return "$divide";
+}
- intrusive_ptr<Expression> ExpressionObject::optimize() {
- for (FieldMap::iterator it(_expressions.begin()); it!=_expressions.end(); ++it) {
- if (it->second)
- it->second = it->second->optimize();
- }
+/* ---------------------- ExpressionObject --------------------------- */
+
+intrusive_ptr<ExpressionObject> ExpressionObject::create() {
+ return new ExpressionObject(false);
+}
- return intrusive_ptr<Expression>(this);
+intrusive_ptr<ExpressionObject> ExpressionObject::createRoot() {
+ return new ExpressionObject(true);
+}
+
+ExpressionObject::ExpressionObject(bool atRoot) : _excludeId(false), _atRoot(atRoot) {}
+
+intrusive_ptr<Expression> ExpressionObject::optimize() {
+ for (FieldMap::iterator it(_expressions.begin()); it != _expressions.end(); ++it) {
+ if (it->second)
+ it->second = it->second->optimize();
}
- bool ExpressionObject::isSimple() {
- for (FieldMap::iterator it(_expressions.begin()); it!=_expressions.end(); ++it) {
- if (it->second && !it->second->isSimple())
- return false;
- }
- return true;
+ return intrusive_ptr<Expression>(this);
+}
+
+bool ExpressionObject::isSimple() {
+ for (FieldMap::iterator it(_expressions.begin()); it != _expressions.end(); ++it) {
+ if (it->second && !it->second->isSimple())
+ return false;
}
+ return true;
+}
- void ExpressionObject::addDependencies(DepsTracker* deps, vector<string>* path) const {
- string pathStr;
- if (path) {
- if (path->empty()) {
- // we are in the top level of a projection so _id is implicit
- if (!_excludeId)
- deps->fields.insert("_id");
- }
- else {
- FieldPath f (*path);
- pathStr = f.getPath(false);
- pathStr += '.';
- }
- }
- else {
- verify(!_excludeId);
+void ExpressionObject::addDependencies(DepsTracker* deps, vector<string>* path) const {
+ string pathStr;
+ if (path) {
+ if (path->empty()) {
+ // we are in the top level of a projection so _id is implicit
+ if (!_excludeId)
+ deps->fields.insert("_id");
+ } else {
+ FieldPath f(*path);
+ pathStr = f.getPath(false);
+ pathStr += '.';
}
+ } else {
+ verify(!_excludeId);
+ }
- for (FieldMap::const_iterator it(_expressions.begin()); it!=_expressions.end(); ++it) {
- if (it->second) {
- if (path) path->push_back(it->first);
- it->second->addDependencies(deps, path);
- if (path) path->pop_back();
- }
- else { // inclusion
- uassert(16407, "inclusion not supported in objects nested in $expressions",
- path);
+ for (FieldMap::const_iterator it(_expressions.begin()); it != _expressions.end(); ++it) {
+ if (it->second) {
+ if (path)
+ path->push_back(it->first);
+ it->second->addDependencies(deps, path);
+ if (path)
+ path->pop_back();
+ } else { // inclusion
+ uassert(16407, "inclusion not supported in objects nested in $expressions", path);
- deps->fields.insert(pathStr + it->first);
- }
+ deps->fields.insert(pathStr + it->first);
}
}
+}
- void ExpressionObject::addToDocument(
- MutableDocument& out,
- const Document& currentDoc,
- Variables* vars
- ) const
- {
- FieldMap::const_iterator end = _expressions.end();
+void ExpressionObject::addToDocument(MutableDocument& out,
+ const Document& currentDoc,
+ Variables* vars) const {
+ FieldMap::const_iterator end = _expressions.end();
- // This is used to mark fields we've done so that we can add the ones we haven't
- set<string> doneFields;
+ // This is used to mark fields we've done so that we can add the ones we haven't
+ set<string> doneFields;
- FieldIterator fields(currentDoc);
- while(fields.more()) {
- Document::FieldPair field (fields.next());
+ FieldIterator fields(currentDoc);
+ while (fields.more()) {
+ Document::FieldPair field(fields.next());
- // TODO don't make a new string here
- const string fieldName = field.first.toString();
- FieldMap::const_iterator exprIter = _expressions.find(fieldName);
+ // TODO don't make a new string here
+ const string fieldName = field.first.toString();
+ FieldMap::const_iterator exprIter = _expressions.find(fieldName);
- // This field is not supposed to be in the output (unless it is _id)
- if (exprIter == end) {
- if (!_excludeId && _atRoot && field.first == "_id") {
- // _id from the root doc is always included (until exclusion is supported)
- // not updating doneFields since "_id" isn't in _expressions
- out.addField(field.first, field.second);
- }
- continue;
+ // This field is not supposed to be in the output (unless it is _id)
+ if (exprIter == end) {
+ if (!_excludeId && _atRoot && field.first == "_id") {
+ // _id from the root doc is always included (until exclusion is supported)
+ // not updating doneFields since "_id" isn't in _expressions
+ out.addField(field.first, field.second);
}
+ continue;
+ }
- // make sure we don't add this field again
- doneFields.insert(exprIter->first);
+ // make sure we don't add this field again
+ doneFields.insert(exprIter->first);
- Expression* expr = exprIter->second.get();
+ Expression* expr = exprIter->second.get();
- if (!expr) {
- // This means pull the matching field from the input document
- out.addField(field.first, field.second);
- continue;
- }
+ if (!expr) {
+ // This means pull the matching field from the input document
+ out.addField(field.first, field.second);
+ continue;
+ }
- ExpressionObject* exprObj = dynamic_cast<ExpressionObject*>(expr);
- BSONType valueType = field.second.getType();
- if ((valueType != Object && valueType != Array) || !exprObj ) {
- // This expression replace the whole field
+ ExpressionObject* exprObj = dynamic_cast<ExpressionObject*>(expr);
+ BSONType valueType = field.second.getType();
+ if ((valueType != Object && valueType != Array) || !exprObj) {
+ // This expression replace the whole field
- Value pValue(expr->evaluateInternal(vars));
+ Value pValue(expr->evaluateInternal(vars));
- // don't add field if nothing was found in the subobject
- if (exprObj && pValue.getDocument().empty())
- continue;
+ // don't add field if nothing was found in the subobject
+ if (exprObj && pValue.getDocument().empty())
+ continue;
- /*
- Don't add non-existent values (note: different from NULL or Undefined);
- this is consistent with existing selection syntax which doesn't
- force the appearance of non-existent fields.
- */
- if (!pValue.missing())
- out.addField(field.first, pValue);
+ /*
+ Don't add non-existent values (note: different from NULL or Undefined);
+ this is consistent with existing selection syntax which doesn't
+ force the appearance of non-existent fields.
+ */
+ if (!pValue.missing())
+ out.addField(field.first, pValue);
- continue;
- }
+ continue;
+ }
+ /*
+ Check on the type of the input value. If it's an
+ object, just walk down into that recursively, and
+ add it to the result.
+ */
+ if (valueType == Object) {
+ MutableDocument sub(exprObj->getSizeHint());
+ exprObj->addToDocument(sub, field.second.getDocument(), vars);
+ out.addField(field.first, sub.freezeToValue());
+ } else if (valueType == Array) {
/*
- Check on the type of the input value. If it's an
- object, just walk down into that recursively, and
- add it to the result.
+ If it's an array, we have to do the same thing,
+ but to each array element. Then, add the array
+ of results to the current document.
*/
- if (valueType == Object) {
- MutableDocument sub (exprObj->getSizeHint());
- exprObj->addToDocument(sub, field.second.getDocument(), vars);
- out.addField(field.first, sub.freezeToValue());
- }
- else if (valueType == Array) {
- /*
- If it's an array, we have to do the same thing,
- but to each array element. Then, add the array
- of results to the current document.
- */
- vector<Value> result;
- const vector<Value>& input = field.second.getArray();
- for (size_t i=0; i < input.size(); i++) {
- // can't look for a subfield in a non-object value.
- if (input[i].getType() != Object)
- continue;
-
- MutableDocument doc (exprObj->getSizeHint());
- exprObj->addToDocument(doc, input[i].getDocument(), vars);
- result.push_back(doc.freezeToValue());
- }
+ vector<Value> result;
+ const vector<Value>& input = field.second.getArray();
+ for (size_t i = 0; i < input.size(); i++) {
+ // can't look for a subfield in a non-object value.
+ if (input[i].getType() != Object)
+ continue;
- out.addField(field.first, Value(std::move(result)));
+ MutableDocument doc(exprObj->getSizeHint());
+ exprObj->addToDocument(doc, input[i].getDocument(), vars);
+ result.push_back(doc.freezeToValue());
}
- else {
- verify( false );
- }
- }
- if (doneFields.size() == _expressions.size())
- return;
+ out.addField(field.first, Value(std::move(result)));
+ } else {
+ verify(false);
+ }
+ }
- /* add any remaining fields we haven't already taken care of */
- for (vector<string>::const_iterator i(_order.begin()); i!=_order.end(); ++i) {
- FieldMap::const_iterator it = _expressions.find(*i);
- string fieldName(it->first);
+ if (doneFields.size() == _expressions.size())
+ return;
- /* if we've already dealt with this field, above, do nothing */
- if (doneFields.count(fieldName))
- continue;
+ /* add any remaining fields we haven't already taken care of */
+ for (vector<string>::const_iterator i(_order.begin()); i != _order.end(); ++i) {
+ FieldMap::const_iterator it = _expressions.find(*i);
+ string fieldName(it->first);
- // this is a missing inclusion field
- if (!it->second)
- continue;
+ /* if we've already dealt with this field, above, do nothing */
+ if (doneFields.count(fieldName))
+ continue;
- Value pValue(it->second->evaluateInternal(vars));
+ // this is a missing inclusion field
+ if (!it->second)
+ continue;
- /*
- Don't add non-existent values (note: different from NULL or Undefined);
- this is consistent with existing selection syntax which doesn't
- force the appearnance of non-existent fields.
- */
- if (pValue.missing())
- continue;
+ Value pValue(it->second->evaluateInternal(vars));
- // don't add field if nothing was found in the subobject
- if (dynamic_cast<ExpressionObject*>(it->second.get())
- && pValue.getDocument().empty())
- continue;
+ /*
+ Don't add non-existent values (note: different from NULL or Undefined);
+ this is consistent with existing selection syntax which doesn't
+ force the appearnance of non-existent fields.
+ */
+ if (pValue.missing())
+ continue;
+ // don't add field if nothing was found in the subobject
+ if (dynamic_cast<ExpressionObject*>(it->second.get()) && pValue.getDocument().empty())
+ continue;
- out.addField(fieldName, pValue);
- }
- }
- size_t ExpressionObject::getSizeHint() const {
- // Note: this can overestimate, but that is better than underestimating
- return _expressions.size() + (_excludeId ? 0 : 1);
+ out.addField(fieldName, pValue);
}
+}
- Document ExpressionObject::evaluateDocument(Variables* vars) const {
- /* create and populate the result */
- MutableDocument out (getSizeHint());
+size_t ExpressionObject::getSizeHint() const {
+ // Note: this can overestimate, but that is better than underestimating
+ return _expressions.size() + (_excludeId ? 0 : 1);
+}
- addToDocument(out,
- Document(), // No inclusion field matching.
- vars);
- return out.freeze();
- }
+Document ExpressionObject::evaluateDocument(Variables* vars) const {
+ /* create and populate the result */
+ MutableDocument out(getSizeHint());
- Value ExpressionObject::evaluateInternal(Variables* vars) const {
- return Value(evaluateDocument(vars));
- }
+ addToDocument(out,
+ Document(), // No inclusion field matching.
+ vars);
+ return out.freeze();
+}
- void ExpressionObject::addField(const FieldPath &fieldPath,
- const intrusive_ptr<Expression> &pExpression) {
- const string fieldPart = fieldPath.getFieldName(0);
- const bool haveExpr = _expressions.count(fieldPart);
+Value ExpressionObject::evaluateInternal(Variables* vars) const {
+ return Value(evaluateDocument(vars));
+}
- intrusive_ptr<Expression>& expr = _expressions[fieldPart]; // inserts if !haveExpr
- intrusive_ptr<ExpressionObject> subObj = dynamic_cast<ExpressionObject*>(expr.get());
+void ExpressionObject::addField(const FieldPath& fieldPath,
+ const intrusive_ptr<Expression>& pExpression) {
+ const string fieldPart = fieldPath.getFieldName(0);
+ const bool haveExpr = _expressions.count(fieldPart);
- if (!haveExpr) {
- _order.push_back(fieldPart);
- }
- else { // we already have an expression or inclusion for this field
- if (fieldPath.getPathLength() == 1) {
- // This expression is for right here
-
- ExpressionObject* newSubObj = dynamic_cast<ExpressionObject*>(pExpression.get());
- uassert(16400, str::stream()
- << "can't add an expression for field " << fieldPart
- << " because there is already an expression for that field"
- << " or one of its sub-fields.",
- subObj && newSubObj); // we can merge them
-
- // Copy everything from the newSubObj to the existing subObj
- // This is for cases like { $project:{ 'b.c':1, b:{ a:1 } } }
- for (vector<string>::const_iterator it (newSubObj->_order.begin());
- it != newSubObj->_order.end();
- ++it) {
- // asserts if any fields are dupes
- subObj->addField(*it, newSubObj->_expressions[*it]);
- }
- return;
- }
- else {
- // This expression is for a subfield
- uassert(16401, str::stream()
- << "can't add an expression for a subfield of " << fieldPart
- << " because there is already an expression that applies to"
- << " the whole field",
- subObj);
- }
- }
+ intrusive_ptr<Expression>& expr = _expressions[fieldPart]; // inserts if !haveExpr
+ intrusive_ptr<ExpressionObject> subObj = dynamic_cast<ExpressionObject*>(expr.get());
+ if (!haveExpr) {
+ _order.push_back(fieldPart);
+ } else { // we already have an expression or inclusion for this field
if (fieldPath.getPathLength() == 1) {
- verify(!haveExpr); // haveExpr case handled above.
- expr = pExpression;
+ // This expression is for right here
+
+ ExpressionObject* newSubObj = dynamic_cast<ExpressionObject*>(pExpression.get());
+ uassert(16400,
+ str::stream() << "can't add an expression for field " << fieldPart
+ << " because there is already an expression for that field"
+ << " or one of its sub-fields.",
+ subObj && newSubObj); // we can merge them
+
+ // Copy everything from the newSubObj to the existing subObj
+ // This is for cases like { $project:{ 'b.c':1, b:{ a:1 } } }
+ for (vector<string>::const_iterator it(newSubObj->_order.begin());
+ it != newSubObj->_order.end();
+ ++it) {
+ // asserts if any fields are dupes
+ subObj->addField(*it, newSubObj->_expressions[*it]);
+ }
return;
+ } else {
+ // This expression is for a subfield
+ uassert(16401,
+ str::stream() << "can't add an expression for a subfield of " << fieldPart
+ << " because there is already an expression that applies to"
+ << " the whole field",
+ subObj);
}
-
- if (!haveExpr)
- expr = subObj = ExpressionObject::create();
-
- subObj->addField(fieldPath.tail(), pExpression);
}
- void ExpressionObject::includePath(const string &theFieldPath) {
- addField(theFieldPath, NULL);
+ if (fieldPath.getPathLength() == 1) {
+ verify(!haveExpr); // haveExpr case handled above.
+ expr = pExpression;
+ return;
}
- Value ExpressionObject::serialize(bool explain) const {
- MutableDocument valBuilder;
- if (_excludeId)
- valBuilder["_id"] = Value(false);
+ if (!haveExpr)
+ expr = subObj = ExpressionObject::create();
- for (vector<string>::const_iterator it(_order.begin()); it!=_order.end(); ++it) {
- string fieldName = *it;
- verify(_expressions.find(fieldName) != _expressions.end());
- intrusive_ptr<Expression> expr = _expressions.find(fieldName)->second;
+ subObj->addField(fieldPath.tail(), pExpression);
+}
- if (!expr) {
- // this is inclusion, not an expression
- valBuilder[fieldName] = Value(true);
- }
- else {
- valBuilder[fieldName] = expr->serialize(explain);
- }
- }
- return valBuilder.freezeToValue();
- }
+void ExpressionObject::includePath(const string& theFieldPath) {
+ addField(theFieldPath, NULL);
+}
- /* --------------------- ExpressionFieldPath --------------------------- */
+Value ExpressionObject::serialize(bool explain) const {
+ MutableDocument valBuilder;
+ if (_excludeId)
+ valBuilder["_id"] = Value(false);
- // this is the old deprecated version only used by tests not using variables
- intrusive_ptr<ExpressionFieldPath> ExpressionFieldPath::create(const string& fieldPath) {
- return new ExpressionFieldPath("CURRENT." + fieldPath, Variables::ROOT_ID);
- }
+ for (vector<string>::const_iterator it(_order.begin()); it != _order.end(); ++it) {
+ string fieldName = *it;
+ verify(_expressions.find(fieldName) != _expressions.end());
+ intrusive_ptr<Expression> expr = _expressions.find(fieldName)->second;
- // this is the new version that supports every syntax
- intrusive_ptr<ExpressionFieldPath> ExpressionFieldPath::parse(
- const string& raw,
- const VariablesParseState& vps) {
+ if (!expr) {
+ // this is inclusion, not an expression
+ valBuilder[fieldName] = Value(true);
+ } else {
+ valBuilder[fieldName] = expr->serialize(explain);
+ }
+ }
+ return valBuilder.freezeToValue();
+}
- uassert(16873, str::stream() << "FieldPath '" << raw << "' doesn't start with $",
- raw.c_str()[0] == '$'); // c_str()[0] is always a valid reference.
+/* --------------------- ExpressionFieldPath --------------------------- */
- uassert(16872, str::stream() << "'$' by itself is not a valid FieldPath",
- raw.size() >= 2); // need at least "$" and either "$" or a field name
+// this is the old deprecated version only used by tests not using variables
+intrusive_ptr<ExpressionFieldPath> ExpressionFieldPath::create(const string& fieldPath) {
+ return new ExpressionFieldPath("CURRENT." + fieldPath, Variables::ROOT_ID);
+}
- if (raw[1] == '$') {
- const StringData rawSD = raw;
- const StringData fieldPath = rawSD.substr(2); // strip off $$
- const StringData varName = fieldPath.substr(0, fieldPath.find('.'));
- Variables::uassertValidNameForUserRead(varName);
- return new ExpressionFieldPath(fieldPath.toString(), vps.getVariable(varName));
- }
- else {
- return new ExpressionFieldPath("CURRENT." + raw.substr(1), // strip the "$" prefix
- vps.getVariable("CURRENT"));
- }
+// this is the new version that supports every syntax
+intrusive_ptr<ExpressionFieldPath> ExpressionFieldPath::parse(const string& raw,
+ const VariablesParseState& vps) {
+ uassert(16873,
+ str::stream() << "FieldPath '" << raw << "' doesn't start with $",
+ raw.c_str()[0] == '$'); // c_str()[0] is always a valid reference.
+
+ uassert(16872,
+ str::stream() << "'$' by itself is not a valid FieldPath",
+ raw.size() >= 2); // need at least "$" and either "$" or a field name
+
+ if (raw[1] == '$') {
+ const StringData rawSD = raw;
+ const StringData fieldPath = rawSD.substr(2); // strip off $$
+ const StringData varName = fieldPath.substr(0, fieldPath.find('.'));
+ Variables::uassertValidNameForUserRead(varName);
+ return new ExpressionFieldPath(fieldPath.toString(), vps.getVariable(varName));
+ } else {
+ return new ExpressionFieldPath("CURRENT." + raw.substr(1), // strip the "$" prefix
+ vps.getVariable("CURRENT"));
}
+}
- ExpressionFieldPath::ExpressionFieldPath(const string& theFieldPath, Variables::Id variable)
- : _fieldPath(theFieldPath)
- , _variable(variable)
- {}
+ExpressionFieldPath::ExpressionFieldPath(const string& theFieldPath, Variables::Id variable)
+ : _fieldPath(theFieldPath), _variable(variable) {}
- intrusive_ptr<Expression> ExpressionFieldPath::optimize() {
- /* nothing can be done for these */
- return intrusive_ptr<Expression>(this);
- }
+intrusive_ptr<Expression> ExpressionFieldPath::optimize() {
+ /* nothing can be done for these */
+ return intrusive_ptr<Expression>(this);
+}
- void ExpressionFieldPath::addDependencies(DepsTracker* deps, vector<string>* path) const {
- if (_variable == Variables::ROOT_ID) { // includes CURRENT when it is equivalent to ROOT.
- if (_fieldPath.getPathLength() == 1) {
- deps->needWholeDocument = true; // need full doc if just "$$ROOT"
- } else {
- deps->fields.insert(_fieldPath.tail().getPath(false));
- }
+void ExpressionFieldPath::addDependencies(DepsTracker* deps, vector<string>* path) const {
+ if (_variable == Variables::ROOT_ID) { // includes CURRENT when it is equivalent to ROOT.
+ if (_fieldPath.getPathLength() == 1) {
+ deps->needWholeDocument = true; // need full doc if just "$$ROOT"
+ } else {
+ deps->fields.insert(_fieldPath.tail().getPath(false));
}
}
+}
- Value ExpressionFieldPath::evaluatePathArray(size_t index, const Value& input) const {
- dassert(input.getType() == Array);
-
- // Check for remaining path in each element of array
- vector<Value> result;
- const vector<Value>& array = input.getArray();
- for (size_t i=0; i < array.size(); i++) {
- if (array[i].getType() != Object)
- continue;
+Value ExpressionFieldPath::evaluatePathArray(size_t index, const Value& input) const {
+ dassert(input.getType() == Array);
- const Value nested = evaluatePath(index, array[i].getDocument());
- if (!nested.missing())
- result.push_back(nested);
- }
+ // Check for remaining path in each element of array
+ vector<Value> result;
+ const vector<Value>& array = input.getArray();
+ for (size_t i = 0; i < array.size(); i++) {
+ if (array[i].getType() != Object)
+ continue;
- return Value(std::move(result));
+ const Value nested = evaluatePath(index, array[i].getDocument());
+ if (!nested.missing())
+ result.push_back(nested);
}
- Value ExpressionFieldPath::evaluatePath(size_t index, const Document& input) const {
- // Note this function is very hot so it is important that is is well optimized.
- // In particular, all return paths should support RVO.
- /* if we've hit the end of the path, stop */
- if (index == _fieldPath.getPathLength() - 1)
- return input[_fieldPath.getFieldName(index)];
+ return Value(std::move(result));
+}
+Value ExpressionFieldPath::evaluatePath(size_t index, const Document& input) const {
+ // Note this function is very hot so it is important that is is well optimized.
+ // In particular, all return paths should support RVO.
+
+ /* if we've hit the end of the path, stop */
+ if (index == _fieldPath.getPathLength() - 1)
+ return input[_fieldPath.getFieldName(index)];
- // Try to dive deeper
- const Value val = input[_fieldPath.getFieldName(index)];
- switch (val.getType()) {
+ // Try to dive deeper
+ const Value val = input[_fieldPath.getFieldName(index)];
+ switch (val.getType()) {
case Object:
- return evaluatePath(index+1, val.getDocument());
+ return evaluatePath(index + 1, val.getDocument());
case Array:
- return evaluatePathArray(index+1, val);
+ return evaluatePathArray(index + 1, val);
default:
return Value();
- }
}
+}
- Value ExpressionFieldPath::evaluateInternal(Variables* vars) const {
- if (_fieldPath.getPathLength() == 1) // get the whole variable
- return vars->getValue(_variable);
+Value ExpressionFieldPath::evaluateInternal(Variables* vars) const {
+ if (_fieldPath.getPathLength() == 1) // get the whole variable
+ return vars->getValue(_variable);
- if (_variable == Variables::ROOT_ID) {
- // ROOT is always a document so use optimized code path
- return evaluatePath(1, vars->getRoot());
- }
-
- Value var = vars->getValue(_variable);
- switch (var.getType()) {
- case Object: return evaluatePath(1, var.getDocument());
- case Array: return evaluatePathArray(1, var);
- default: return Value();
- }
+ if (_variable == Variables::ROOT_ID) {
+ // ROOT is always a document so use optimized code path
+ return evaluatePath(1, vars->getRoot());
}
- Value ExpressionFieldPath::serialize(bool explain) const {
- if (_fieldPath.getFieldName(0) == "CURRENT" && _fieldPath.getPathLength() > 1) {
- // use short form for "$$CURRENT.foo" but not just "$$CURRENT"
- return Value("$" + _fieldPath.tail().getPath(false));
- }
- else {
- return Value("$$" + _fieldPath.getPath(false));
- }
+ Value var = vars->getValue(_variable);
+ switch (var.getType()) {
+ case Object:
+ return evaluatePath(1, var.getDocument());
+ case Array:
+ return evaluatePathArray(1, var);
+ default:
+ return Value();
}
+}
- /* ------------------------- ExpressionFilter ----------------------------- */
+Value ExpressionFieldPath::serialize(bool explain) const {
+ if (_fieldPath.getFieldName(0) == "CURRENT" && _fieldPath.getPathLength() > 1) {
+ // use short form for "$$CURRENT.foo" but not just "$$CURRENT"
+ return Value("$" + _fieldPath.tail().getPath(false));
+ } else {
+ return Value("$$" + _fieldPath.getPath(false));
+ }
+}
- REGISTER_EXPRESSION("$filter", ExpressionFilter::parse);
- intrusive_ptr<Expression> ExpressionFilter::parse(BSONElement expr,
- const VariablesParseState& vpsIn) {
+/* ------------------------- ExpressionFilter ----------------------------- */
- verify(str::equals(expr.fieldName(), "$filter"));
+REGISTER_EXPRESSION("$filter", ExpressionFilter::parse);
+intrusive_ptr<Expression> ExpressionFilter::parse(BSONElement expr,
+ const VariablesParseState& vpsIn) {
+ verify(str::equals(expr.fieldName(), "$filter"));
- uassert(28646, "$filter only supports an object as its argument",
- expr.type() == Object);
+ uassert(28646, "$filter only supports an object as its argument", expr.type() == Object);
- // "cond" must be parsed after "as" regardless of BSON order.
- BSONElement inputElem;
- BSONElement asElem;
- BSONElement condElem;
- for (auto elem : expr.Obj()) {
- if (str::equals(elem.fieldName(), "input")) {
- inputElem = elem;
- } else if (str::equals(elem.fieldName(), "as")) {
- asElem = elem;
- } else if (str::equals(elem.fieldName(), "cond")) {
- condElem = elem;
- } else {
- uasserted(28647, str::stream()
- << "Unrecognized parameter to $filter: " << elem.fieldName());
- }
+ // "cond" must be parsed after "as" regardless of BSON order.
+ BSONElement inputElem;
+ BSONElement asElem;
+ BSONElement condElem;
+ for (auto elem : expr.Obj()) {
+ if (str::equals(elem.fieldName(), "input")) {
+ inputElem = elem;
+ } else if (str::equals(elem.fieldName(), "as")) {
+ asElem = elem;
+ } else if (str::equals(elem.fieldName(), "cond")) {
+ condElem = elem;
+ } else {
+ uasserted(28647,
+ str::stream() << "Unrecognized parameter to $filter: " << elem.fieldName());
}
+ }
- uassert(28648, "Missing 'input' parameter to $filter",
- !inputElem.eoo());
- uassert(28649, "Missing 'as' parameter to $filter",
- !asElem.eoo());
- uassert(28650, "Missing 'cond' parameter to $filter",
- !condElem.eoo());
+ uassert(28648, "Missing 'input' parameter to $filter", !inputElem.eoo());
+ uassert(28649, "Missing 'as' parameter to $filter", !asElem.eoo());
+ uassert(28650, "Missing 'cond' parameter to $filter", !condElem.eoo());
- // Parse "input", only has outer variables.
- intrusive_ptr<Expression> input = parseOperand(inputElem, vpsIn);
+ // Parse "input", only has outer variables.
+ intrusive_ptr<Expression> input = parseOperand(inputElem, vpsIn);
- // Parse "as".
- VariablesParseState vpsSub(vpsIn); // vpsSub gets our variable, vpsIn doesn't.
- string varName = asElem.str();
- Variables::uassertValidNameForUserWrite(varName);
- Variables::Id varId = vpsSub.defineVariable(varName);
+ // Parse "as".
+ VariablesParseState vpsSub(vpsIn); // vpsSub gets our variable, vpsIn doesn't.
+ string varName = asElem.str();
+ Variables::uassertValidNameForUserWrite(varName);
+ Variables::Id varId = vpsSub.defineVariable(varName);
- // Parse "cond", has access to "as" variable.
- intrusive_ptr<Expression> cond = parseOperand(condElem, vpsSub);
-
- return new ExpressionFilter(std::move(varName), varId, std::move(input), std::move(cond));
- }
+ // Parse "cond", has access to "as" variable.
+ intrusive_ptr<Expression> cond = parseOperand(condElem, vpsSub);
- ExpressionFilter::ExpressionFilter(string varName,
- Variables::Id varId,
- intrusive_ptr<Expression> input,
- intrusive_ptr<Expression> filter)
- : _varName(std::move(varName))
- , _varId(varId)
- , _input(std::move(input))
- , _filter(std::move(filter))
- {}
+ return new ExpressionFilter(std::move(varName), varId, std::move(input), std::move(cond));
+}
- intrusive_ptr<Expression> ExpressionFilter::optimize() {
- // TODO handle when _input is constant.
- _input = _input->optimize();
- _filter = _filter->optimize();
- return this;
- }
+ExpressionFilter::ExpressionFilter(string varName,
+ Variables::Id varId,
+ intrusive_ptr<Expression> input,
+ intrusive_ptr<Expression> filter)
+ : _varName(std::move(varName)),
+ _varId(varId),
+ _input(std::move(input)),
+ _filter(std::move(filter)) {}
+
+intrusive_ptr<Expression> ExpressionFilter::optimize() {
+ // TODO handle when _input is constant.
+ _input = _input->optimize();
+ _filter = _filter->optimize();
+ return this;
+}
- Value ExpressionFilter::serialize(bool explain) const {
- return Value(DOC("$filter" << DOC("input" << _input->serialize(explain)
- << "as" << _varName
- << "cond" << _filter->serialize(explain)
- )));
- }
+Value ExpressionFilter::serialize(bool explain) const {
+ return Value(DOC("$filter" << DOC("input" << _input->serialize(explain) << "as" << _varName
+ << "cond" << _filter->serialize(explain))));
+}
- Value ExpressionFilter::evaluateInternal(Variables* vars) const {
- // We are guaranteed at parse time that this isn't using our _varId.
- const Value inputVal = _input->evaluateInternal(vars);
- if (inputVal.nullish())
- return Value(BSONNULL);
+Value ExpressionFilter::evaluateInternal(Variables* vars) const {
+ // We are guaranteed at parse time that this isn't using our _varId.
+ const Value inputVal = _input->evaluateInternal(vars);
+ if (inputVal.nullish())
+ return Value(BSONNULL);
- uassert(28651, str::stream() << "input to $filter must be an Array not "
- << typeName(inputVal.getType()),
- inputVal.getType() == Array);
+ uassert(28651,
+ str::stream() << "input to $filter must be an Array not "
+ << typeName(inputVal.getType()),
+ inputVal.getType() == Array);
- const vector<Value>& input = inputVal.getArray();
+ const vector<Value>& input = inputVal.getArray();
- if (input.empty())
- return inputVal;
+ if (input.empty())
+ return inputVal;
- vector<Value> output;
- for (const auto& elem : input) {
- vars->setValue(_varId, elem);
+ vector<Value> output;
+ for (const auto& elem : input) {
+ vars->setValue(_varId, elem);
- if (_filter->evaluateInternal(vars).coerceToBool()) {
- output.push_back(std::move(elem));
- }
+ if (_filter->evaluateInternal(vars).coerceToBool()) {
+ output.push_back(std::move(elem));
}
-
- return Value(std::move(output));
}
- void ExpressionFilter::addDependencies(DepsTracker* deps, vector<string>* path) const {
- _input->addDependencies(deps);
- _filter->addDependencies(deps);
- }
+ return Value(std::move(output));
+}
- /* ------------------------- ExpressionLet ----------------------------- */
+void ExpressionFilter::addDependencies(DepsTracker* deps, vector<string>* path) const {
+ _input->addDependencies(deps);
+ _filter->addDependencies(deps);
+}
- REGISTER_EXPRESSION("$let", ExpressionLet::parse);
- intrusive_ptr<Expression> ExpressionLet::parse(
- BSONElement expr,
- const VariablesParseState& vpsIn) {
+/* ------------------------- ExpressionLet ----------------------------- */
- verify(str::equals(expr.fieldName(), "$let"));
+REGISTER_EXPRESSION("$let", ExpressionLet::parse);
+intrusive_ptr<Expression> ExpressionLet::parse(BSONElement expr, const VariablesParseState& vpsIn) {
+ verify(str::equals(expr.fieldName(), "$let"));
- uassert(16874, "$let only supports an object as its argument",
- expr.type() == Object);
- const BSONObj args = expr.embeddedObject();
+ uassert(16874, "$let only supports an object as its argument", expr.type() == Object);
+ const BSONObj args = expr.embeddedObject();
- // varsElem must be parsed before inElem regardless of BSON order.
- BSONElement varsElem;
- BSONElement inElem;
- BSONForEach(arg, args) {
- if (str::equals(arg.fieldName(), "vars")) {
- varsElem = arg;
- } else if (str::equals(arg.fieldName(), "in")) {
- inElem = arg;
- } else {
- uasserted(16875, str::stream()
- << "Unrecognized parameter to $let: " << arg.fieldName());
- }
+ // varsElem must be parsed before inElem regardless of BSON order.
+ BSONElement varsElem;
+ BSONElement inElem;
+ BSONForEach(arg, args) {
+ if (str::equals(arg.fieldName(), "vars")) {
+ varsElem = arg;
+ } else if (str::equals(arg.fieldName(), "in")) {
+ inElem = arg;
+ } else {
+ uasserted(16875,
+ str::stream() << "Unrecognized parameter to $let: " << arg.fieldName());
}
+ }
- uassert(16876, "Missing 'vars' parameter to $let",
- !varsElem.eoo());
- uassert(16877, "Missing 'in' parameter to $let",
- !inElem.eoo());
-
- // parse "vars"
- VariablesParseState vpsSub(vpsIn); // vpsSub gets our vars, vpsIn doesn't.
- VariableMap vars;
- BSONForEach(varElem, varsElem.embeddedObjectUserCheck()) {
- const string varName = varElem.fieldName();
- Variables::uassertValidNameForUserWrite(varName);
- Variables::Id id = vpsSub.defineVariable(varName);
-
- vars[id] = NameAndExpression(varName,
- parseOperand(varElem, vpsIn)); // only has outer vars
- }
+ uassert(16876, "Missing 'vars' parameter to $let", !varsElem.eoo());
+ uassert(16877, "Missing 'in' parameter to $let", !inElem.eoo());
- // parse "in"
- intrusive_ptr<Expression> subExpression = parseOperand(inElem, vpsSub); // has our vars
+ // parse "vars"
+ VariablesParseState vpsSub(vpsIn); // vpsSub gets our vars, vpsIn doesn't.
+ VariableMap vars;
+ BSONForEach(varElem, varsElem.embeddedObjectUserCheck()) {
+ const string varName = varElem.fieldName();
+ Variables::uassertValidNameForUserWrite(varName);
+ Variables::Id id = vpsSub.defineVariable(varName);
- return new ExpressionLet(vars, subExpression);
+ vars[id] = NameAndExpression(varName, parseOperand(varElem, vpsIn)); // only has outer vars
}
- ExpressionLet::ExpressionLet(const VariableMap& vars, intrusive_ptr<Expression> subExpression)
- : _variables(vars)
- , _subExpression(subExpression)
- {}
+ // parse "in"
+ intrusive_ptr<Expression> subExpression = parseOperand(inElem, vpsSub); // has our vars
- intrusive_ptr<Expression> ExpressionLet::optimize() {
- if (_variables.empty()) {
- // we aren't binding any variables so just return the subexpression
- return _subExpression->optimize();
- }
+ return new ExpressionLet(vars, subExpression);
+}
- for (VariableMap::iterator it=_variables.begin(), end=_variables.end(); it != end; ++it) {
- it->second.expression = it->second.expression->optimize();
- }
+ExpressionLet::ExpressionLet(const VariableMap& vars, intrusive_ptr<Expression> subExpression)
+ : _variables(vars), _subExpression(subExpression) {}
- // TODO be smarter with constant "variables"
- _subExpression = _subExpression->optimize();
+intrusive_ptr<Expression> ExpressionLet::optimize() {
+ if (_variables.empty()) {
+ // we aren't binding any variables so just return the subexpression
+ return _subExpression->optimize();
+ }
- return this;
+ for (VariableMap::iterator it = _variables.begin(), end = _variables.end(); it != end; ++it) {
+ it->second.expression = it->second.expression->optimize();
}
- Value ExpressionLet::serialize(bool explain) const {
- MutableDocument vars;
- for (VariableMap::const_iterator it=_variables.begin(), end=_variables.end();
- it != end; ++it) {
- vars[it->second.name] = it->second.expression->serialize(explain);
- }
+ // TODO be smarter with constant "variables"
+ _subExpression = _subExpression->optimize();
+
+ return this;
+}
- return Value(DOC("$let" << DOC("vars" << vars.freeze()
- << "in" << _subExpression->serialize(explain))
- ));
+Value ExpressionLet::serialize(bool explain) const {
+ MutableDocument vars;
+ for (VariableMap::const_iterator it = _variables.begin(), end = _variables.end(); it != end;
+ ++it) {
+ vars[it->second.name] = it->second.expression->serialize(explain);
}
- Value ExpressionLet::evaluateInternal(Variables* vars) const {
- for (VariableMap::const_iterator it=_variables.begin(), end=_variables.end();
- it != end; ++it) {
- // It is guaranteed at parse-time that these expressions don't use the variable ids we
- // are setting
- vars->setValue(it->first,
- it->second.expression->evaluateInternal(vars));
- }
+ return Value(
+ DOC("$let" << DOC("vars" << vars.freeze() << "in" << _subExpression->serialize(explain))));
+}
- return _subExpression->evaluateInternal(vars);
+Value ExpressionLet::evaluateInternal(Variables* vars) const {
+ for (VariableMap::const_iterator it = _variables.begin(), end = _variables.end(); it != end;
+ ++it) {
+ // It is guaranteed at parse-time that these expressions don't use the variable ids we
+ // are setting
+ vars->setValue(it->first, it->second.expression->evaluateInternal(vars));
}
- void ExpressionLet::addDependencies(DepsTracker* deps, vector<string>* path) const {
- for (VariableMap::const_iterator it=_variables.begin(), end=_variables.end();
- it != end; ++it) {
- it->second.expression->addDependencies(deps);
- }
+ return _subExpression->evaluateInternal(vars);
+}
- // TODO be smarter when CURRENT is a bound variable
- _subExpression->addDependencies(deps);
+void ExpressionLet::addDependencies(DepsTracker* deps, vector<string>* path) const {
+ for (VariableMap::const_iterator it = _variables.begin(), end = _variables.end(); it != end;
+ ++it) {
+ it->second.expression->addDependencies(deps);
}
+ // TODO be smarter when CURRENT is a bound variable
+ _subExpression->addDependencies(deps);
+}
- /* ------------------------- ExpressionMap ----------------------------- */
- REGISTER_EXPRESSION("$map", ExpressionMap::parse);
- intrusive_ptr<Expression> ExpressionMap::parse(
- BSONElement expr,
- const VariablesParseState& vpsIn) {
+/* ------------------------- ExpressionMap ----------------------------- */
- verify(str::equals(expr.fieldName(), "$map"));
+REGISTER_EXPRESSION("$map", ExpressionMap::parse);
+intrusive_ptr<Expression> ExpressionMap::parse(BSONElement expr, const VariablesParseState& vpsIn) {
+ verify(str::equals(expr.fieldName(), "$map"));
- uassert(16878, "$map only supports an object as its argument",
- expr.type() == Object);
+ uassert(16878, "$map only supports an object as its argument", expr.type() == Object);
- // "in" must be parsed after "as" regardless of BSON order
- BSONElement inputElem;
- BSONElement asElem;
- BSONElement inElem;
- const BSONObj args = expr.embeddedObject();
- BSONForEach(arg, args) {
- if (str::equals(arg.fieldName(), "input")) {
- inputElem = arg;
- } else if (str::equals(arg.fieldName(), "as")) {
- asElem = arg;
- } else if (str::equals(arg.fieldName(), "in")) {
- inElem = arg;
- } else {
- uasserted(16879, str::stream()
- << "Unrecognized parameter to $map: " << arg.fieldName());
- }
+ // "in" must be parsed after "as" regardless of BSON order
+ BSONElement inputElem;
+ BSONElement asElem;
+ BSONElement inElem;
+ const BSONObj args = expr.embeddedObject();
+ BSONForEach(arg, args) {
+ if (str::equals(arg.fieldName(), "input")) {
+ inputElem = arg;
+ } else if (str::equals(arg.fieldName(), "as")) {
+ asElem = arg;
+ } else if (str::equals(arg.fieldName(), "in")) {
+ inElem = arg;
+ } else {
+ uasserted(16879,
+ str::stream() << "Unrecognized parameter to $map: " << arg.fieldName());
}
+ }
- uassert(16880, "Missing 'input' parameter to $map",
- !inputElem.eoo());
- uassert(16881, "Missing 'as' parameter to $map",
- !asElem.eoo());
- uassert(16882, "Missing 'in' parameter to $map",
- !inElem.eoo());
-
- // parse "input"
- intrusive_ptr<Expression> input = parseOperand(inputElem, vpsIn); // only has outer vars
-
- // parse "as"
- VariablesParseState vpsSub(vpsIn); // vpsSub gets our vars, vpsIn doesn't.
- string varName = asElem.str();
- Variables::uassertValidNameForUserWrite(varName);
- Variables::Id varId = vpsSub.defineVariable(varName);
-
- // parse "in"
- intrusive_ptr<Expression> in = parseOperand(inElem, vpsSub); // has access to map variable
+ uassert(16880, "Missing 'input' parameter to $map", !inputElem.eoo());
+ uassert(16881, "Missing 'as' parameter to $map", !asElem.eoo());
+ uassert(16882, "Missing 'in' parameter to $map", !inElem.eoo());
- return new ExpressionMap(varName, varId, input, in);
- }
+ // parse "input"
+ intrusive_ptr<Expression> input = parseOperand(inputElem, vpsIn); // only has outer vars
- ExpressionMap::ExpressionMap(const string& varName,
- Variables::Id varId,
- intrusive_ptr<Expression> input,
- intrusive_ptr<Expression> each)
- : _varName(varName)
- , _varId(varId)
- , _input(input)
- , _each(each)
- {}
+ // parse "as"
+ VariablesParseState vpsSub(vpsIn); // vpsSub gets our vars, vpsIn doesn't.
+ string varName = asElem.str();
+ Variables::uassertValidNameForUserWrite(varName);
+ Variables::Id varId = vpsSub.defineVariable(varName);
- intrusive_ptr<Expression> ExpressionMap::optimize() {
- // TODO handle when _input is constant
- _input = _input->optimize();
- _each = _each->optimize();
- return this;
- }
+ // parse "in"
+ intrusive_ptr<Expression> in = parseOperand(inElem, vpsSub); // has access to map variable
- Value ExpressionMap::serialize(bool explain) const {
- return Value(DOC("$map" << DOC("input" << _input->serialize(explain)
- << "as" << _varName
- << "in" << _each->serialize(explain)
- )));
- }
+ return new ExpressionMap(varName, varId, input, in);
+}
- Value ExpressionMap::evaluateInternal(Variables* vars) const {
- // guaranteed at parse time that this isn't using our _varId
- const Value inputVal = _input->evaluateInternal(vars);
- if (inputVal.nullish())
- return Value(BSONNULL);
+ExpressionMap::ExpressionMap(const string& varName,
+ Variables::Id varId,
+ intrusive_ptr<Expression> input,
+ intrusive_ptr<Expression> each)
+ : _varName(varName), _varId(varId), _input(input), _each(each) {}
+
+intrusive_ptr<Expression> ExpressionMap::optimize() {
+ // TODO handle when _input is constant
+ _input = _input->optimize();
+ _each = _each->optimize();
+ return this;
+}
- uassert(16883, str::stream() << "input to $map must be an Array not "
- << typeName(inputVal.getType()),
- inputVal.getType() == Array);
+Value ExpressionMap::serialize(bool explain) const {
+ return Value(DOC("$map" << DOC("input" << _input->serialize(explain) << "as" << _varName << "in"
+ << _each->serialize(explain))));
+}
- const vector<Value>& input = inputVal.getArray();
+Value ExpressionMap::evaluateInternal(Variables* vars) const {
+ // guaranteed at parse time that this isn't using our _varId
+ const Value inputVal = _input->evaluateInternal(vars);
+ if (inputVal.nullish())
+ return Value(BSONNULL);
- if (input.empty())
- return inputVal;
+ uassert(16883,
+ str::stream() << "input to $map must be an Array not " << typeName(inputVal.getType()),
+ inputVal.getType() == Array);
- vector<Value> output;
- output.reserve(input.size());
- for (size_t i=0; i < input.size(); i++) {
- vars->setValue(_varId, input[i]);
+ const vector<Value>& input = inputVal.getArray();
- Value toInsert = _each->evaluateInternal(vars);
- if (toInsert.missing())
- toInsert = Value(BSONNULL); // can't insert missing values into array
+ if (input.empty())
+ return inputVal;
- output.push_back(toInsert);
- }
+ vector<Value> output;
+ output.reserve(input.size());
+ for (size_t i = 0; i < input.size(); i++) {
+ vars->setValue(_varId, input[i]);
- return Value(std::move(output));
- }
+ Value toInsert = _each->evaluateInternal(vars);
+ if (toInsert.missing())
+ toInsert = Value(BSONNULL); // can't insert missing values into array
- void ExpressionMap::addDependencies(DepsTracker* deps, vector<string>* path) const {
- _input->addDependencies(deps);
- _each->addDependencies(deps);
+ output.push_back(toInsert);
}
- /* ------------------------- ExpressionMeta ----------------------------- */
+ return Value(std::move(output));
+}
- REGISTER_EXPRESSION("$meta", ExpressionMeta::parse);
- intrusive_ptr<Expression> ExpressionMeta::parse(
- BSONElement expr,
- const VariablesParseState& vpsIn) {
+void ExpressionMap::addDependencies(DepsTracker* deps, vector<string>* path) const {
+ _input->addDependencies(deps);
+ _each->addDependencies(deps);
+}
- uassert(17307, "$meta only supports String arguments",
- expr.type() == String);
- uassert(17308, "Unsupported argument to $meta: " + expr.String(),
- expr.String() == "textScore");
+/* ------------------------- ExpressionMeta ----------------------------- */
- return new ExpressionMeta();
- }
+REGISTER_EXPRESSION("$meta", ExpressionMeta::parse);
+intrusive_ptr<Expression> ExpressionMeta::parse(BSONElement expr,
+ const VariablesParseState& vpsIn) {
+ uassert(17307, "$meta only supports String arguments", expr.type() == String);
+ uassert(17308, "Unsupported argument to $meta: " + expr.String(), expr.String() == "textScore");
- Value ExpressionMeta::serialize(bool explain) const {
- return Value(DOC("$meta" << "textScore"));
- }
+ return new ExpressionMeta();
+}
- Value ExpressionMeta::evaluateInternal(Variables* vars) const {
- const Document& root = vars->getRoot();
- return root.hasTextScore()
- ? Value(root.getTextScore())
- : Value();
- }
+Value ExpressionMeta::serialize(bool explain) const {
+ return Value(DOC("$meta"
+ << "textScore"));
+}
- void ExpressionMeta::addDependencies(DepsTracker* deps, vector<string>* path) const {
- deps->needTextScore = true;
- }
+Value ExpressionMeta::evaluateInternal(Variables* vars) const {
+ const Document& root = vars->getRoot();
+ return root.hasTextScore() ? Value(root.getTextScore()) : Value();
+}
- /* ------------------------- ExpressionMillisecond ----------------------------- */
+void ExpressionMeta::addDependencies(DepsTracker* deps, vector<string>* path) const {
+ deps->needTextScore = true;
+}
- Value ExpressionMillisecond::evaluateInternal(Variables* vars) const {
- Value date(vpOperand[0]->evaluateInternal(vars));
- return Value(extract(date.coerceToDate()));
- }
+/* ------------------------- ExpressionMillisecond ----------------------------- */
- int ExpressionMillisecond::extract(const long long date) {
- const int ms = date % 1000LL;
- // adding 1000 since dates before 1970 would have negative ms
- return ms >= 0 ? ms : 1000 + ms;
- }
+Value ExpressionMillisecond::evaluateInternal(Variables* vars) const {
+ Value date(vpOperand[0]->evaluateInternal(vars));
+ return Value(extract(date.coerceToDate()));
+}
- REGISTER_EXPRESSION("$millisecond", ExpressionMillisecond::parse);
- const char* ExpressionMillisecond::getOpName() const {
- return "$millisecond";
- }
+int ExpressionMillisecond::extract(const long long date) {
+ const int ms = date % 1000LL;
+ // adding 1000 since dates before 1970 would have negative ms
+ return ms >= 0 ? ms : 1000 + ms;
+}
- /* ------------------------- ExpressionMinute -------------------------- */
+REGISTER_EXPRESSION("$millisecond", ExpressionMillisecond::parse);
+const char* ExpressionMillisecond::getOpName() const {
+ return "$millisecond";
+}
- Value ExpressionMinute::evaluateInternal(Variables* vars) const {
- Value pDate(vpOperand[0]->evaluateInternal(vars));
- return Value(extract(pDate.coerceToTm()));
- }
+/* ------------------------- ExpressionMinute -------------------------- */
- REGISTER_EXPRESSION("$minute", ExpressionMinute::parse);
- const char* ExpressionMinute::getOpName() const {
- return "$minute";
- }
+Value ExpressionMinute::evaluateInternal(Variables* vars) const {
+ Value pDate(vpOperand[0]->evaluateInternal(vars));
+ return Value(extract(pDate.coerceToTm()));
+}
- /* ----------------------- ExpressionMod ---------------------------- */
+REGISTER_EXPRESSION("$minute", ExpressionMinute::parse);
+const char* ExpressionMinute::getOpName() const {
+ return "$minute";
+}
- Value ExpressionMod::evaluateInternal(Variables* vars) const {
- Value lhs = vpOperand[0]->evaluateInternal(vars);
- Value rhs = vpOperand[1]->evaluateInternal(vars);
+/* ----------------------- ExpressionMod ---------------------------- */
- BSONType leftType = lhs.getType();
- BSONType rightType = rhs.getType();
+Value ExpressionMod::evaluateInternal(Variables* vars) const {
+ Value lhs = vpOperand[0]->evaluateInternal(vars);
+ Value rhs = vpOperand[1]->evaluateInternal(vars);
- if (lhs.numeric() && rhs.numeric()) {
- // ensure we aren't modding by 0
- double right = rhs.coerceToDouble();
+ BSONType leftType = lhs.getType();
+ BSONType rightType = rhs.getType();
- uassert(16610, "can't $mod by 0",
- right != 0);
+ if (lhs.numeric() && rhs.numeric()) {
+ // ensure we aren't modding by 0
+ double right = rhs.coerceToDouble();
- if (leftType == NumberDouble || (rightType == NumberDouble && !rhs.integral())) {
- // Need to do fmod. Integer-valued double case is handled below.
+ uassert(16610, "can't $mod by 0", right != 0);
- double left = lhs.coerceToDouble();
- return Value(fmod(left, right));
- }
- else if (leftType == NumberLong || rightType == NumberLong) {
- // if either is long, return long
- long long left = lhs.coerceToLong();
- long long rightLong = rhs.coerceToLong();
- return Value(left % rightLong);
- }
+ if (leftType == NumberDouble || (rightType == NumberDouble && !rhs.integral())) {
+ // Need to do fmod. Integer-valued double case is handled below.
- // lastly they must both be ints, return int
- int left = lhs.coerceToInt();
- int rightInt = rhs.coerceToInt();
- return Value(left % rightInt);
- }
- else if (lhs.nullish() || rhs.nullish()) {
- return Value(BSONNULL);
- }
- else {
- uasserted(16611, str::stream() << "$mod only supports numeric types, not "
- << typeName(lhs.getType())
- << " and "
- << typeName(rhs.getType()));
+ double left = lhs.coerceToDouble();
+ return Value(fmod(left, right));
+ } else if (leftType == NumberLong || rightType == NumberLong) {
+ // if either is long, return long
+ long long left = lhs.coerceToLong();
+ long long rightLong = rhs.coerceToLong();
+ return Value(left % rightLong);
}
- }
- REGISTER_EXPRESSION("$mod", ExpressionMod::parse);
- const char* ExpressionMod::getOpName() const {
- return "$mod";
+ // lastly they must both be ints, return int
+ int left = lhs.coerceToInt();
+ int rightInt = rhs.coerceToInt();
+ return Value(left % rightInt);
+ } else if (lhs.nullish() || rhs.nullish()) {
+ return Value(BSONNULL);
+ } else {
+ uasserted(16611,
+ str::stream() << "$mod only supports numeric types, not "
+ << typeName(lhs.getType()) << " and " << typeName(rhs.getType()));
}
+}
- /* ------------------------ ExpressionMonth ----------------------------- */
+REGISTER_EXPRESSION("$mod", ExpressionMod::parse);
+const char* ExpressionMod::getOpName() const {
+ return "$mod";
+}
- Value ExpressionMonth::evaluateInternal(Variables* vars) const {
- Value pDate(vpOperand[0]->evaluateInternal(vars));
- return Value(extract(pDate.coerceToTm()));
- }
+/* ------------------------ ExpressionMonth ----------------------------- */
- REGISTER_EXPRESSION("$month", ExpressionMonth::parse);
- const char* ExpressionMonth::getOpName() const {
- return "$month";
- }
+Value ExpressionMonth::evaluateInternal(Variables* vars) const {
+ Value pDate(vpOperand[0]->evaluateInternal(vars));
+ return Value(extract(pDate.coerceToTm()));
+}
- /* ------------------------- ExpressionMultiply ----------------------------- */
+REGISTER_EXPRESSION("$month", ExpressionMonth::parse);
+const char* ExpressionMonth::getOpName() const {
+ return "$month";
+}
- Value ExpressionMultiply::evaluateInternal(Variables* vars) const {
- /*
- We'll try to return the narrowest possible result value. To do that
- without creating intermediate Values, do the arithmetic for double
- and integral types in parallel, tracking the current narrowest
- type.
- */
- double doubleProduct = 1;
- long long longProduct = 1;
- BSONType productType = NumberInt;
-
- const size_t n = vpOperand.size();
- for(size_t i = 0; i < n; ++i) {
- Value val = vpOperand[i]->evaluateInternal(vars);
-
- if (val.numeric()) {
- productType = Value::getWidestNumeric(productType, val.getType());
-
- doubleProduct *= val.coerceToDouble();
- longProduct *= val.coerceToLong();
- }
- else if (val.nullish()) {
- return Value(BSONNULL);
- }
- else {
- uasserted(16555, str::stream() << "$multiply only supports numeric types, not "
- << typeName(val.getType()));
- }
- }
+/* ------------------------- ExpressionMultiply ----------------------------- */
- if (productType == NumberDouble)
- return Value(doubleProduct);
- else if (productType == NumberLong)
- return Value(longProduct);
- else if (productType == NumberInt)
- return Value::createIntOrLong(longProduct);
- else
- massert(16418, "$multiply resulted in a non-numeric type", false);
- }
+Value ExpressionMultiply::evaluateInternal(Variables* vars) const {
+ /*
+ We'll try to return the narrowest possible result value. To do that
+ without creating intermediate Values, do the arithmetic for double
+ and integral types in parallel, tracking the current narrowest
+ type.
+ */
+ double doubleProduct = 1;
+ long long longProduct = 1;
+ BSONType productType = NumberInt;
- REGISTER_EXPRESSION("$multiply", ExpressionMultiply::parse);
- const char* ExpressionMultiply::getOpName() const {
- return "$multiply";
- }
+ const size_t n = vpOperand.size();
+ for (size_t i = 0; i < n; ++i) {
+ Value val = vpOperand[i]->evaluateInternal(vars);
- /* ------------------------- ExpressionHour ----------------------------- */
+ if (val.numeric()) {
+ productType = Value::getWidestNumeric(productType, val.getType());
- Value ExpressionHour::evaluateInternal(Variables* vars) const {
- Value pDate(vpOperand[0]->evaluateInternal(vars));
- return Value(extract(pDate.coerceToTm()));
+ doubleProduct *= val.coerceToDouble();
+ longProduct *= val.coerceToLong();
+ } else if (val.nullish()) {
+ return Value(BSONNULL);
+ } else {
+ uasserted(16555,
+ str::stream() << "$multiply only supports numeric types, not "
+ << typeName(val.getType()));
+ }
}
- REGISTER_EXPRESSION("$hour", ExpressionHour::parse);
- const char* ExpressionHour::getOpName() const {
- return "$hour";
- }
+ if (productType == NumberDouble)
+ return Value(doubleProduct);
+ else if (productType == NumberLong)
+ return Value(longProduct);
+ else if (productType == NumberInt)
+ return Value::createIntOrLong(longProduct);
+ else
+ massert(16418, "$multiply resulted in a non-numeric type", false);
+}
- /* ----------------------- ExpressionIfNull ---------------------------- */
+REGISTER_EXPRESSION("$multiply", ExpressionMultiply::parse);
+const char* ExpressionMultiply::getOpName() const {
+ return "$multiply";
+}
- Value ExpressionIfNull::evaluateInternal(Variables* vars) const {
- Value pLeft(vpOperand[0]->evaluateInternal(vars));
- if (!pLeft.nullish())
- return pLeft;
+/* ------------------------- ExpressionHour ----------------------------- */
- Value pRight(vpOperand[1]->evaluateInternal(vars));
- return pRight;
- }
+Value ExpressionHour::evaluateInternal(Variables* vars) const {
+ Value pDate(vpOperand[0]->evaluateInternal(vars));
+ return Value(extract(pDate.coerceToTm()));
+}
- REGISTER_EXPRESSION("$ifNull", ExpressionIfNull::parse);
- const char* ExpressionIfNull::getOpName() const {
- return "$ifNull";
- }
+REGISTER_EXPRESSION("$hour", ExpressionHour::parse);
+const char* ExpressionHour::getOpName() const {
+ return "$hour";
+}
- /* ------------------------ ExpressionNary ----------------------------- */
+/* ----------------------- ExpressionIfNull ---------------------------- */
- intrusive_ptr<Expression> ExpressionNary::optimize() {
- const size_t n = vpOperand.size();
+Value ExpressionIfNull::evaluateInternal(Variables* vars) const {
+ Value pLeft(vpOperand[0]->evaluateInternal(vars));
+ if (!pLeft.nullish())
+ return pLeft;
- // optimize sub-expressions and count constants
- unsigned constCount = 0;
- for(size_t i = 0; i < n; ++i) {
- intrusive_ptr<Expression> optimized = vpOperand[i]->optimize();
+ Value pRight(vpOperand[1]->evaluateInternal(vars));
+ return pRight;
+}
- // substitute the optimized expression
- vpOperand[i] = optimized;
+REGISTER_EXPRESSION("$ifNull", ExpressionIfNull::parse);
+const char* ExpressionIfNull::getOpName() const {
+ return "$ifNull";
+}
- // check to see if the result was a constant
- if (dynamic_cast<ExpressionConstant*>(optimized.get())) {
- constCount++;
- }
- }
+/* ------------------------ ExpressionNary ----------------------------- */
- // If all the operands are constant, we can replace this expression with a constant. Using
- // an empty Variables since it will never be accessed.
- if (constCount == n) {
- Variables emptyVars;
- Value pResult(evaluateInternal(&emptyVars));
- intrusive_ptr<Expression> pReplacement(
- ExpressionConstant::create(pResult));
- return pReplacement;
- }
+intrusive_ptr<Expression> ExpressionNary::optimize() {
+ const size_t n = vpOperand.size();
- // Remaining optimizations are only for associative and commutative expressions.
- if (!isAssociativeAndCommutative())
- return this;
-
- // Process vpOperand to split it into constant and nonconstant vectors.
- // This can leave vpOperand in an invalid state that is cleaned up after the loop.
- ExpressionVector constExprs;
- ExpressionVector nonConstExprs;
- for(size_t i = 0; i < vpOperand.size(); ++i) { // NOTE: vpOperand grows in loop
- intrusive_ptr<Expression> expr = vpOperand[i];
- if (dynamic_cast<ExpressionConstant*>(expr.get())) {
- constExprs.push_back(expr);
- }
- else {
- // If the child operand is the same type as this, then we can
- // extract its operands and inline them here because we know
- // this is commutative and associative. We detect sameness of
- // the child operator by checking for equality of the opNames
- ExpressionNary* nary = dynamic_cast<ExpressionNary*>(expr.get());
- if (!nary || !str::equals(nary->getOpName(), getOpName())) {
- nonConstExprs.push_back(expr);
- }
- else {
- // same expression, so flatten by adding to vpOperand which
- // will be processed later in this loop.
- vpOperand.insert(vpOperand.end(),
- nary->vpOperand.begin(),
- nary->vpOperand.end());
- }
- }
- }
+ // optimize sub-expressions and count constants
+ unsigned constCount = 0;
+ for (size_t i = 0; i < n; ++i) {
+ intrusive_ptr<Expression> optimized = vpOperand[i]->optimize();
- // collapse all constant expressions (if any)
- Value constValue;
- if (!constExprs.empty()) {
- vpOperand = constExprs;
- Variables emptyVars;
- constValue = evaluateInternal(&emptyVars);
- }
+ // substitute the optimized expression
+ vpOperand[i] = optimized;
- // now set the final expression list with constant (if any) at the end
- vpOperand = nonConstExprs;
- if (!constExprs.empty()) {
- vpOperand.push_back(ExpressionConstant::create(constValue));
+ // check to see if the result was a constant
+ if (dynamic_cast<ExpressionConstant*>(optimized.get())) {
+ constCount++;
}
+ }
- return this;
+ // If all the operands are constant, we can replace this expression with a constant. Using
+ // an empty Variables since it will never be accessed.
+ if (constCount == n) {
+ Variables emptyVars;
+ Value pResult(evaluateInternal(&emptyVars));
+ intrusive_ptr<Expression> pReplacement(ExpressionConstant::create(pResult));
+ return pReplacement;
}
- void ExpressionNary::addDependencies(DepsTracker* deps, vector<string>* path) const {
- for(ExpressionVector::const_iterator i(vpOperand.begin());
- i != vpOperand.end(); ++i) {
- (*i)->addDependencies(deps);
+ // Remaining optimizations are only for associative and commutative expressions.
+ if (!isAssociativeAndCommutative())
+ return this;
+
+ // Process vpOperand to split it into constant and nonconstant vectors.
+ // This can leave vpOperand in an invalid state that is cleaned up after the loop.
+ ExpressionVector constExprs;
+ ExpressionVector nonConstExprs;
+ for (size_t i = 0; i < vpOperand.size(); ++i) { // NOTE: vpOperand grows in loop
+ intrusive_ptr<Expression> expr = vpOperand[i];
+ if (dynamic_cast<ExpressionConstant*>(expr.get())) {
+ constExprs.push_back(expr);
+ } else {
+ // If the child operand is the same type as this, then we can
+ // extract its operands and inline them here because we know
+ // this is commutative and associative. We detect sameness of
+ // the child operator by checking for equality of the opNames
+ ExpressionNary* nary = dynamic_cast<ExpressionNary*>(expr.get());
+ if (!nary || !str::equals(nary->getOpName(), getOpName())) {
+ nonConstExprs.push_back(expr);
+ } else {
+ // same expression, so flatten by adding to vpOperand which
+ // will be processed later in this loop.
+ vpOperand.insert(vpOperand.end(), nary->vpOperand.begin(), nary->vpOperand.end());
+ }
}
}
- void ExpressionNary::addOperand(const intrusive_ptr<Expression>& pExpression) {
- vpOperand.push_back(pExpression);
+ // collapse all constant expressions (if any)
+ Value constValue;
+ if (!constExprs.empty()) {
+ vpOperand = constExprs;
+ Variables emptyVars;
+ constValue = evaluateInternal(&emptyVars);
}
- Value ExpressionNary::serialize(bool explain) const {
- const size_t nOperand = vpOperand.size();
- vector<Value> array;
- /* build up the array */
- for(size_t i = 0; i < nOperand; i++)
- array.push_back(vpOperand[i]->serialize(explain));
+ // now set the final expression list with constant (if any) at the end
+ vpOperand = nonConstExprs;
+ if (!constExprs.empty()) {
+ vpOperand.push_back(ExpressionConstant::create(constValue));
+ }
- return Value(DOC(getOpName() << array));
+ return this;
+}
+
+void ExpressionNary::addDependencies(DepsTracker* deps, vector<string>* path) const {
+ for (ExpressionVector::const_iterator i(vpOperand.begin()); i != vpOperand.end(); ++i) {
+ (*i)->addDependencies(deps);
}
+}
- /* ------------------------- ExpressionNot ----------------------------- */
+void ExpressionNary::addOperand(const intrusive_ptr<Expression>& pExpression) {
+ vpOperand.push_back(pExpression);
+}
- Value ExpressionNot::evaluateInternal(Variables* vars) const {
- Value pOp(vpOperand[0]->evaluateInternal(vars));
+Value ExpressionNary::serialize(bool explain) const {
+ const size_t nOperand = vpOperand.size();
+ vector<Value> array;
+ /* build up the array */
+ for (size_t i = 0; i < nOperand; i++)
+ array.push_back(vpOperand[i]->serialize(explain));
- bool b = pOp.coerceToBool();
- return Value(!b);
- }
+ return Value(DOC(getOpName() << array));
+}
- REGISTER_EXPRESSION("$not", ExpressionNot::parse);
- const char* ExpressionNot::getOpName() const {
- return "$not";
- }
+/* ------------------------- ExpressionNot ----------------------------- */
- /* -------------------------- ExpressionOr ----------------------------- */
+Value ExpressionNot::evaluateInternal(Variables* vars) const {
+ Value pOp(vpOperand[0]->evaluateInternal(vars));
- Value ExpressionOr::evaluateInternal(Variables* vars) const {
- const size_t n = vpOperand.size();
- for(size_t i = 0; i < n; ++i) {
- Value pValue(vpOperand[i]->evaluateInternal(vars));
- if (pValue.coerceToBool())
- return Value(true);
- }
+ bool b = pOp.coerceToBool();
+ return Value(!b);
+}
- return Value(false);
- }
+REGISTER_EXPRESSION("$not", ExpressionNot::parse);
+const char* ExpressionNot::getOpName() const {
+ return "$not";
+}
- intrusive_ptr<Expression> ExpressionOr::optimize() {
- /* optimize the disjunction as much as possible */
- intrusive_ptr<Expression> pE(ExpressionNary::optimize());
+/* -------------------------- ExpressionOr ----------------------------- */
- /* if the result isn't a disjunction, we can't do anything */
- ExpressionOr *pOr = dynamic_cast<ExpressionOr *>(pE.get());
- if (!pOr)
- return pE;
+Value ExpressionOr::evaluateInternal(Variables* vars) const {
+ const size_t n = vpOperand.size();
+ for (size_t i = 0; i < n; ++i) {
+ Value pValue(vpOperand[i]->evaluateInternal(vars));
+ if (pValue.coerceToBool())
+ return Value(true);
+ }
- /*
- Check the last argument on the result; if it's not constant (as
- promised by ExpressionNary::optimize(),) then there's nothing
- we can do.
- */
- const size_t n = pOr->vpOperand.size();
- // ExpressionNary::optimize() generates an ExpressionConstant for {$or:[]}.
- verify(n > 0);
- intrusive_ptr<Expression> pLast(pOr->vpOperand[n - 1]);
- const ExpressionConstant *pConst =
- dynamic_cast<ExpressionConstant *>(pLast.get());
- if (!pConst)
- return pE;
+ return Value(false);
+}
- /*
- Evaluate and coerce the last argument to a boolean. If it's true,
- then we can replace this entire expression.
- */
- bool last = pConst->getValue().coerceToBool();
- if (last) {
- intrusive_ptr<ExpressionConstant> pFinal(
- ExpressionConstant::create(Value(true)));
- return pFinal;
- }
+intrusive_ptr<Expression> ExpressionOr::optimize() {
+ /* optimize the disjunction as much as possible */
+ intrusive_ptr<Expression> pE(ExpressionNary::optimize());
- /*
- If we got here, the final operand was false, so we don't need it
- anymore. If there was only one other operand, we don't need the
- conjunction either. Note we still need to keep the promise that
- the result will be a boolean.
- */
- if (n == 2) {
- intrusive_ptr<Expression> pFinal(
- ExpressionCoerceToBool::create(pOr->vpOperand[0]));
- return pFinal;
- }
+ /* if the result isn't a disjunction, we can't do anything */
+ ExpressionOr* pOr = dynamic_cast<ExpressionOr*>(pE.get());
+ if (!pOr)
+ return pE;
- /*
- Remove the final "false" value, and return the new expression.
- */
- pOr->vpOperand.resize(n - 1);
+ /*
+ Check the last argument on the result; if it's not constant (as
+ promised by ExpressionNary::optimize(),) then there's nothing
+ we can do.
+ */
+ const size_t n = pOr->vpOperand.size();
+ // ExpressionNary::optimize() generates an ExpressionConstant for {$or:[]}.
+ verify(n > 0);
+ intrusive_ptr<Expression> pLast(pOr->vpOperand[n - 1]);
+ const ExpressionConstant* pConst = dynamic_cast<ExpressionConstant*>(pLast.get());
+ if (!pConst)
return pE;
- }
- REGISTER_EXPRESSION("$or", ExpressionOr::parse);
- const char* ExpressionOr::getOpName() const {
- return "$or";
+ /*
+ Evaluate and coerce the last argument to a boolean. If it's true,
+ then we can replace this entire expression.
+ */
+ bool last = pConst->getValue().coerceToBool();
+ if (last) {
+ intrusive_ptr<ExpressionConstant> pFinal(ExpressionConstant::create(Value(true)));
+ return pFinal;
}
- /* ------------------------- ExpressionSecond ----------------------------- */
-
- Value ExpressionSecond::evaluateInternal(Variables* vars) const {
- Value pDate(vpOperand[0]->evaluateInternal(vars));
- return Value(extract(pDate.coerceToTm()));
+ /*
+ If we got here, the final operand was false, so we don't need it
+ anymore. If there was only one other operand, we don't need the
+ conjunction either. Note we still need to keep the promise that
+ the result will be a boolean.
+ */
+ if (n == 2) {
+ intrusive_ptr<Expression> pFinal(ExpressionCoerceToBool::create(pOr->vpOperand[0]));
+ return pFinal;
}
- REGISTER_EXPRESSION("$second", ExpressionSecond::parse);
- const char* ExpressionSecond::getOpName() const {
- return "$second";
- }
+ /*
+ Remove the final "false" value, and return the new expression.
+ */
+ pOr->vpOperand.resize(n - 1);
+ return pE;
+}
- namespace {
- ValueSet arrayToSet(const Value& val) {
- const vector<Value>& array = val.getArray();
- return ValueSet(array.begin(), array.end());
- }
- }
+REGISTER_EXPRESSION("$or", ExpressionOr::parse);
+const char* ExpressionOr::getOpName() const {
+ return "$or";
+}
- /* ----------------------- ExpressionSetDifference ---------------------------- */
+/* ------------------------- ExpressionSecond ----------------------------- */
- Value ExpressionSetDifference::evaluateInternal(Variables* vars) const {
- const Value lhs = vpOperand[0]->evaluateInternal(vars);
- const Value rhs = vpOperand[1]->evaluateInternal(vars);
+Value ExpressionSecond::evaluateInternal(Variables* vars) const {
+ Value pDate(vpOperand[0]->evaluateInternal(vars));
+ return Value(extract(pDate.coerceToTm()));
+}
- if (lhs.nullish() || rhs.nullish()) {
- return Value(BSONNULL);
- }
+REGISTER_EXPRESSION("$second", ExpressionSecond::parse);
+const char* ExpressionSecond::getOpName() const {
+ return "$second";
+}
- uassert(17048, str::stream() << "both operands of $setDifference must be arrays. First "
- << "argument is of type: " << typeName(lhs.getType()),
- lhs.getType() == Array);
- uassert(17049, str::stream() << "both operands of $setDifference must be arrays. Second "
- << "argument is of type: " << typeName(rhs.getType()),
- rhs.getType() == Array);
+namespace {
+ValueSet arrayToSet(const Value& val) {
+ const vector<Value>& array = val.getArray();
+ return ValueSet(array.begin(), array.end());
+}
+}
- ValueSet rhsSet = arrayToSet(rhs);
- const vector<Value>& lhsArray = lhs.getArray();
- vector<Value> returnVec;
+/* ----------------------- ExpressionSetDifference ---------------------------- */
- for (vector<Value>::const_iterator it = lhsArray.begin(); it != lhsArray.end(); ++it) {
- // rhsSet serves the dual role of filtering out elements that were originally present
- // in RHS and of eleminating duplicates from LHS
- if (rhsSet.insert(*it).second) {
- returnVec.push_back(*it);
- }
- }
- return Value(std::move(returnVec));
- }
+Value ExpressionSetDifference::evaluateInternal(Variables* vars) const {
+ const Value lhs = vpOperand[0]->evaluateInternal(vars);
+ const Value rhs = vpOperand[1]->evaluateInternal(vars);
- REGISTER_EXPRESSION("$setDifference", ExpressionSetDifference::parse);
- const char* ExpressionSetDifference::getOpName() const {
- return "$setDifference";
+ if (lhs.nullish() || rhs.nullish()) {
+ return Value(BSONNULL);
}
- /* ----------------------- ExpressionSetEquals ---------------------------- */
+ uassert(17048,
+ str::stream() << "both operands of $setDifference must be arrays. First "
+ << "argument is of type: " << typeName(lhs.getType()),
+ lhs.getType() == Array);
+ uassert(17049,
+ str::stream() << "both operands of $setDifference must be arrays. Second "
+ << "argument is of type: " << typeName(rhs.getType()),
+ rhs.getType() == Array);
- void ExpressionSetEquals::validateArguments(const ExpressionVector& args) const {
- uassert(17045, str::stream() << "$setEquals needs at least two arguments had: "
- << args.size(),
- args.size() >= 2);
+ ValueSet rhsSet = arrayToSet(rhs);
+ const vector<Value>& lhsArray = lhs.getArray();
+ vector<Value> returnVec;
+
+ for (vector<Value>::const_iterator it = lhsArray.begin(); it != lhsArray.end(); ++it) {
+ // rhsSet serves the dual role of filtering out elements that were originally present
+ // in RHS and of eleminating duplicates from LHS
+ if (rhsSet.insert(*it).second) {
+ returnVec.push_back(*it);
+ }
}
+ return Value(std::move(returnVec));
+}
- Value ExpressionSetEquals::evaluateInternal(Variables* vars) const {
- const size_t n = vpOperand.size();
- std::set<Value> lhs;
+REGISTER_EXPRESSION("$setDifference", ExpressionSetDifference::parse);
+const char* ExpressionSetDifference::getOpName() const {
+ return "$setDifference";
+}
- for (size_t i = 0; i < n; i++) {
- const Value nextEntry = vpOperand[i]->evaluateInternal(vars);
- uassert(17044, str::stream() << "All operands of $setEquals must be arrays. One "
- << "argument is of type: "
- << typeName(nextEntry.getType()),
- nextEntry.getType() == Array);
+/* ----------------------- ExpressionSetEquals ---------------------------- */
- if (i == 0) {
- lhs.insert(nextEntry.getArray().begin(), nextEntry.getArray().end());
- }
- else {
- const std::set<Value> rhs(nextEntry.getArray().begin(), nextEntry.getArray().end());
- if (lhs != rhs) {
- return Value(false);
- }
+void ExpressionSetEquals::validateArguments(const ExpressionVector& args) const {
+ uassert(17045,
+ str::stream() << "$setEquals needs at least two arguments had: " << args.size(),
+ args.size() >= 2);
+}
+
+Value ExpressionSetEquals::evaluateInternal(Variables* vars) const {
+ const size_t n = vpOperand.size();
+ std::set<Value> lhs;
+
+ for (size_t i = 0; i < n; i++) {
+ const Value nextEntry = vpOperand[i]->evaluateInternal(vars);
+ uassert(17044,
+ str::stream() << "All operands of $setEquals must be arrays. One "
+ << "argument is of type: " << typeName(nextEntry.getType()),
+ nextEntry.getType() == Array);
+
+ if (i == 0) {
+ lhs.insert(nextEntry.getArray().begin(), nextEntry.getArray().end());
+ } else {
+ const std::set<Value> rhs(nextEntry.getArray().begin(), nextEntry.getArray().end());
+ if (lhs != rhs) {
+ return Value(false);
}
}
- return Value(true);
}
+ return Value(true);
+}
- REGISTER_EXPRESSION("$setEquals", ExpressionSetEquals::parse);
- const char* ExpressionSetEquals::getOpName() const {
- return "$setEquals";
- }
+REGISTER_EXPRESSION("$setEquals", ExpressionSetEquals::parse);
+const char* ExpressionSetEquals::getOpName() const {
+ return "$setEquals";
+}
- /* ----------------------- ExpressionSetIntersection ---------------------------- */
+/* ----------------------- ExpressionSetIntersection ---------------------------- */
- Value ExpressionSetIntersection::evaluateInternal(Variables* vars) const {
- const size_t n = vpOperand.size();
- ValueSet currentIntersection;
- for (size_t i = 0; i < n; i++) {
- const Value nextEntry = vpOperand[i]->evaluateInternal(vars);
- if (nextEntry.nullish()) {
- return Value(BSONNULL);
- }
- uassert(17047, str::stream() << "All operands of $setIntersection must be arrays. One "
- << "argument is of type: "
- << typeName(nextEntry.getType()),
- nextEntry.getType() == Array);
-
- if (i == 0) {
- currentIntersection.insert(nextEntry.getArray().begin(),
- nextEntry.getArray().end());
+Value ExpressionSetIntersection::evaluateInternal(Variables* vars) const {
+ const size_t n = vpOperand.size();
+ ValueSet currentIntersection;
+ for (size_t i = 0; i < n; i++) {
+ const Value nextEntry = vpOperand[i]->evaluateInternal(vars);
+ if (nextEntry.nullish()) {
+ return Value(BSONNULL);
+ }
+ uassert(17047,
+ str::stream() << "All operands of $setIntersection must be arrays. One "
+ << "argument is of type: " << typeName(nextEntry.getType()),
+ nextEntry.getType() == Array);
+
+ if (i == 0) {
+ currentIntersection.insert(nextEntry.getArray().begin(), nextEntry.getArray().end());
+ } else {
+ ValueSet nextSet = arrayToSet(nextEntry);
+ if (currentIntersection.size() > nextSet.size()) {
+ // to iterate over whichever is the smaller set
+ nextSet.swap(currentIntersection);
}
- else {
- ValueSet nextSet = arrayToSet(nextEntry);
- if (currentIntersection.size() > nextSet.size()) {
- // to iterate over whichever is the smaller set
- nextSet.swap(currentIntersection);
- }
- ValueSet::iterator it = currentIntersection.begin();
- while (it != currentIntersection.end()) {
- if (!nextSet.count(*it)) {
- ValueSet::iterator del = it;
- ++it;
- currentIntersection.erase(del);
- }
- else {
- ++it;
- }
+ ValueSet::iterator it = currentIntersection.begin();
+ while (it != currentIntersection.end()) {
+ if (!nextSet.count(*it)) {
+ ValueSet::iterator del = it;
+ ++it;
+ currentIntersection.erase(del);
+ } else {
+ ++it;
}
}
- if (currentIntersection.empty()) {
- break;
- }
}
- return Value(vector<Value>(currentIntersection.begin(),
- currentIntersection.end()));
+ if (currentIntersection.empty()) {
+ break;
+ }
}
+ return Value(vector<Value>(currentIntersection.begin(), currentIntersection.end()));
+}
- REGISTER_EXPRESSION("$setIntersection", ExpressionSetIntersection::parse);
- const char* ExpressionSetIntersection::getOpName() const {
- return "$setIntersection";
- }
+REGISTER_EXPRESSION("$setIntersection", ExpressionSetIntersection::parse);
+const char* ExpressionSetIntersection::getOpName() const {
+ return "$setIntersection";
+}
- /* ----------------------- ExpressionSetIsSubset ---------------------------- */
+/* ----------------------- ExpressionSetIsSubset ---------------------------- */
namespace {
- Value setIsSubsetHelper(const vector<Value>& lhs, const ValueSet& rhs) {
- // do not shortcircuit when lhs.size() > rhs.size()
- // because lhs can have redundant entries
- for (vector<Value>::const_iterator it = lhs.begin(); it != lhs.end(); ++it) {
- if (!rhs.count(*it)) {
- return Value(false);
- }
+Value setIsSubsetHelper(const vector<Value>& lhs, const ValueSet& rhs) {
+ // do not shortcircuit when lhs.size() > rhs.size()
+ // because lhs can have redundant entries
+ for (vector<Value>::const_iterator it = lhs.begin(); it != lhs.end(); ++it) {
+ if (!rhs.count(*it)) {
+ return Value(false);
}
- return Value(true);
}
+ return Value(true);
+}
}
- Value ExpressionSetIsSubset::evaluateInternal(Variables* vars) const {
- const Value lhs = vpOperand[0]->evaluateInternal(vars);
- const Value rhs = vpOperand[1]->evaluateInternal(vars);
-
- uassert(17046, str::stream() << "both operands of $setIsSubset must be arrays. First "
- << "argument is of type: " << typeName(lhs.getType()),
- lhs.getType() == Array);
- uassert(17042, str::stream() << "both operands of $setIsSubset must be arrays. Second "
- << "argument is of type: " << typeName(rhs.getType()),
- rhs.getType() == Array);
+Value ExpressionSetIsSubset::evaluateInternal(Variables* vars) const {
+ const Value lhs = vpOperand[0]->evaluateInternal(vars);
+ const Value rhs = vpOperand[1]->evaluateInternal(vars);
- return setIsSubsetHelper(lhs.getArray(), arrayToSet(rhs));
- }
+ uassert(17046,
+ str::stream() << "both operands of $setIsSubset must be arrays. First "
+ << "argument is of type: " << typeName(lhs.getType()),
+ lhs.getType() == Array);
+ uassert(17042,
+ str::stream() << "both operands of $setIsSubset must be arrays. Second "
+ << "argument is of type: " << typeName(rhs.getType()),
+ rhs.getType() == Array);
- /**
- * This class handles the case where the RHS set is constant.
- *
- * Since it is constant we can construct the hashset once which makes the runtime performance
- * effectively constant with respect to the size of RHS. Large, constant RHS is expected to be a
- * major use case for $redact and this has been verified to improve performance significantly.
- */
- class ExpressionSetIsSubset::Optimized : public ExpressionSetIsSubset {
- public:
- Optimized(const ValueSet& cachedRhsSet, const ExpressionVector& operands)
- : _cachedRhsSet(cachedRhsSet)
- {
- vpOperand = operands;
- }
+ return setIsSubsetHelper(lhs.getArray(), arrayToSet(rhs));
+}
- virtual Value evaluateInternal(Variables* vars) const {
- const Value lhs = vpOperand[0]->evaluateInternal(vars);
+/**
+ * This class handles the case where the RHS set is constant.
+ *
+ * Since it is constant we can construct the hashset once which makes the runtime performance
+ * effectively constant with respect to the size of RHS. Large, constant RHS is expected to be a
+ * major use case for $redact and this has been verified to improve performance significantly.
+ */
+class ExpressionSetIsSubset::Optimized : public ExpressionSetIsSubset {
+public:
+ Optimized(const ValueSet& cachedRhsSet, const ExpressionVector& operands)
+ : _cachedRhsSet(cachedRhsSet) {
+ vpOperand = operands;
+ }
- uassert(17310, str::stream() << "both operands of $setIsSubset must be arrays. First "
- << "argument is of type: " << typeName(lhs.getType()),
- lhs.getType() == Array);
+ virtual Value evaluateInternal(Variables* vars) const {
+ const Value lhs = vpOperand[0]->evaluateInternal(vars);
- return setIsSubsetHelper(lhs.getArray(), _cachedRhsSet);
- }
+ uassert(17310,
+ str::stream() << "both operands of $setIsSubset must be arrays. First "
+ << "argument is of type: " << typeName(lhs.getType()),
+ lhs.getType() == Array);
- private:
- const ValueSet _cachedRhsSet;
- };
+ return setIsSubsetHelper(lhs.getArray(), _cachedRhsSet);
+ }
- intrusive_ptr<Expression> ExpressionSetIsSubset::optimize() {
- // perfore basic optimizations
- intrusive_ptr<Expression> optimized = ExpressionNary::optimize();
+private:
+ const ValueSet _cachedRhsSet;
+};
- // if ExpressionNary::optimize() created a new value, return it directly
- if (optimized.get() != this)
- return optimized;
+intrusive_ptr<Expression> ExpressionSetIsSubset::optimize() {
+ // perfore basic optimizations
+ intrusive_ptr<Expression> optimized = ExpressionNary::optimize();
- if (ExpressionConstant* ec = dynamic_cast<ExpressionConstant*>(vpOperand[1].get())) {
- const Value rhs = ec->getValue();
- uassert(17311, str::stream() << "both operands of $setIsSubset must be arrays. Second "
- << "argument is of type: " << typeName(rhs.getType()),
- rhs.getType() == Array);
+ // if ExpressionNary::optimize() created a new value, return it directly
+ if (optimized.get() != this)
+ return optimized;
- return new Optimized(arrayToSet(rhs), vpOperand);
- }
+ if (ExpressionConstant* ec = dynamic_cast<ExpressionConstant*>(vpOperand[1].get())) {
+ const Value rhs = ec->getValue();
+ uassert(17311,
+ str::stream() << "both operands of $setIsSubset must be arrays. Second "
+ << "argument is of type: " << typeName(rhs.getType()),
+ rhs.getType() == Array);
- return optimized;
+ return new Optimized(arrayToSet(rhs), vpOperand);
}
- REGISTER_EXPRESSION("$setIsSubset", ExpressionSetIsSubset::parse);
- const char* ExpressionSetIsSubset::getOpName() const {
- return "$setIsSubset";
- }
+ return optimized;
+}
- /* ----------------------- ExpressionSetUnion ---------------------------- */
+REGISTER_EXPRESSION("$setIsSubset", ExpressionSetIsSubset::parse);
+const char* ExpressionSetIsSubset::getOpName() const {
+ return "$setIsSubset";
+}
- Value ExpressionSetUnion::evaluateInternal(Variables* vars) const {
- ValueSet unionedSet;
- const size_t n = vpOperand.size();
- for (size_t i = 0; i < n; i++) {
- const Value newEntries = vpOperand[i]->evaluateInternal(vars);
- if (newEntries.nullish()) {
- return Value(BSONNULL);
- }
- uassert(17043, str::stream() << "All operands of $setUnion must be arrays. One argument"
- << " is of type: " << typeName(newEntries.getType()),
- newEntries.getType() == Array);
+/* ----------------------- ExpressionSetUnion ---------------------------- */
- unionedSet.insert(newEntries.getArray().begin(), newEntries.getArray().end());
+Value ExpressionSetUnion::evaluateInternal(Variables* vars) const {
+ ValueSet unionedSet;
+ const size_t n = vpOperand.size();
+ for (size_t i = 0; i < n; i++) {
+ const Value newEntries = vpOperand[i]->evaluateInternal(vars);
+ if (newEntries.nullish()) {
+ return Value(BSONNULL);
}
- return Value(vector<Value>(unionedSet.begin(), unionedSet.end()));
- }
+ uassert(17043,
+ str::stream() << "All operands of $setUnion must be arrays. One argument"
+ << " is of type: " << typeName(newEntries.getType()),
+ newEntries.getType() == Array);
- REGISTER_EXPRESSION("$setUnion", ExpressionSetUnion::parse);
- const char* ExpressionSetUnion::getOpName() const {
- return "$setUnion";
+ unionedSet.insert(newEntries.getArray().begin(), newEntries.getArray().end());
}
+ return Value(vector<Value>(unionedSet.begin(), unionedSet.end()));
+}
- /* ----------------------- ExpressionIsArray ---------------------------- */
-
- Value ExpressionIsArray::evaluateInternal(Variables* vars) const {
- Value argument = vpOperand[0]->evaluateInternal(vars);
- return Value(argument.getType() == Array);
- }
-
- REGISTER_EXPRESSION("$isArray", ExpressionIsArray::parse);
- const char* ExpressionIsArray::getOpName() const {
- return "$isArray";
- }
-
- /* ----------------------- ExpressionSize ---------------------------- */
-
- Value ExpressionSize::evaluateInternal(Variables* vars) const {
- Value array = vpOperand[0]->evaluateInternal(vars);
-
- uassert(17124, str::stream() << "The argument to $size must be an Array, but was of type: "
- << typeName(array.getType()),
- array.getType() == Array);
- return Value::createIntOrLong(array.getArray().size());
- }
+REGISTER_EXPRESSION("$setUnion", ExpressionSetUnion::parse);
+const char* ExpressionSetUnion::getOpName() const {
+ return "$setUnion";
+}
- REGISTER_EXPRESSION("$size", ExpressionSize::parse);
- const char* ExpressionSize::getOpName() const {
- return "$size";
- }
+/* ----------------------- ExpressionIsArray ---------------------------- */
- /* ----------------------- ExpressionStrcasecmp ---------------------------- */
+Value ExpressionIsArray::evaluateInternal(Variables* vars) const {
+ Value argument = vpOperand[0]->evaluateInternal(vars);
+ return Value(argument.getType() == Array);
+}
- Value ExpressionStrcasecmp::evaluateInternal(Variables* vars) const {
- Value pString1(vpOperand[0]->evaluateInternal(vars));
- Value pString2(vpOperand[1]->evaluateInternal(vars));
+REGISTER_EXPRESSION("$isArray", ExpressionIsArray::parse);
+const char* ExpressionIsArray::getOpName() const {
+ return "$isArray";
+}
- /* boost::iequals returns a bool not an int so strings must actually be allocated */
- string str1 = boost::to_upper_copy( pString1.coerceToString() );
- string str2 = boost::to_upper_copy( pString2.coerceToString() );
- int result = str1.compare(str2);
+/* ----------------------- ExpressionSize ---------------------------- */
- if (result == 0)
- return Value(0);
- else if (result > 0)
- return Value(1);
- else
- return Value(-1);
- }
+Value ExpressionSize::evaluateInternal(Variables* vars) const {
+ Value array = vpOperand[0]->evaluateInternal(vars);
- REGISTER_EXPRESSION("$strcasecmp", ExpressionStrcasecmp::parse);
- const char* ExpressionStrcasecmp::getOpName() const {
- return "$strcasecmp";
- }
+ uassert(17124,
+ str::stream() << "The argument to $size must be an Array, but was of type: "
+ << typeName(array.getType()),
+ array.getType() == Array);
+ return Value::createIntOrLong(array.getArray().size());
+}
- /* ----------------------- ExpressionSubstr ---------------------------- */
+REGISTER_EXPRESSION("$size", ExpressionSize::parse);
+const char* ExpressionSize::getOpName() const {
+ return "$size";
+}
- Value ExpressionSubstr::evaluateInternal(Variables* vars) const {
- Value pString(vpOperand[0]->evaluateInternal(vars));
- Value pLower(vpOperand[1]->evaluateInternal(vars));
- Value pLength(vpOperand[2]->evaluateInternal(vars));
+/* ----------------------- ExpressionStrcasecmp ---------------------------- */
- string str = pString.coerceToString();
- uassert(16034, str::stream() << getOpName() <<
- ": starting index must be a numeric type (is BSON type " <<
- typeName(pLower.getType()) << ")",
- (pLower.getType() == NumberInt
- || pLower.getType() == NumberLong
- || pLower.getType() == NumberDouble));
- uassert(16035, str::stream() << getOpName() <<
- ": length must be a numeric type (is BSON type " <<
- typeName(pLength.getType() )<< ")",
- (pLength.getType() == NumberInt
- || pLength.getType() == NumberLong
- || pLength.getType() == NumberDouble));
+Value ExpressionStrcasecmp::evaluateInternal(Variables* vars) const {
+ Value pString1(vpOperand[0]->evaluateInternal(vars));
+ Value pString2(vpOperand[1]->evaluateInternal(vars));
- string::size_type lower = static_cast< string::size_type >( pLower.coerceToLong() );
- string::size_type length = static_cast< string::size_type >( pLength.coerceToLong() );
+ /* boost::iequals returns a bool not an int so strings must actually be allocated */
+ string str1 = boost::to_upper_copy(pString1.coerceToString());
+ string str2 = boost::to_upper_copy(pString2.coerceToString());
+ int result = str1.compare(str2);
- auto isContinuationByte = [](char c){ return ((c & 0xc0) == 0x80); };
+ if (result == 0)
+ return Value(0);
+ else if (result > 0)
+ return Value(1);
+ else
+ return Value(-1);
+}
- uassert(28656, str::stream() << getOpName() <<
- ": Invalid range, starting index is a UTF-8 continuation byte.",
- (lower >= str.length() || !isContinuationByte(str[lower])));
+REGISTER_EXPRESSION("$strcasecmp", ExpressionStrcasecmp::parse);
+const char* ExpressionStrcasecmp::getOpName() const {
+ return "$strcasecmp";
+}
- // Check the byte after the last character we'd return. If it is a continuation byte, that
- // means we're in the middle of a UTF-8 character.
- uassert(28657, str::stream() << getOpName() <<
- ": Invalid range, ending index is in the middle of a UTF-8 character.",
- (lower + length >= str.length() || !isContinuationByte(str[lower + length])));
+/* ----------------------- ExpressionSubstr ---------------------------- */
+
+Value ExpressionSubstr::evaluateInternal(Variables* vars) const {
+ Value pString(vpOperand[0]->evaluateInternal(vars));
+ Value pLower(vpOperand[1]->evaluateInternal(vars));
+ Value pLength(vpOperand[2]->evaluateInternal(vars));
+
+ string str = pString.coerceToString();
+ uassert(16034,
+ str::stream() << getOpName()
+ << ": starting index must be a numeric type (is BSON type "
+ << typeName(pLower.getType()) << ")",
+ (pLower.getType() == NumberInt || pLower.getType() == NumberLong ||
+ pLower.getType() == NumberDouble));
+ uassert(16035,
+ str::stream() << getOpName() << ": length must be a numeric type (is BSON type "
+ << typeName(pLength.getType()) << ")",
+ (pLength.getType() == NumberInt || pLength.getType() == NumberLong ||
+ pLength.getType() == NumberDouble));
+
+ string::size_type lower = static_cast<string::size_type>(pLower.coerceToLong());
+ string::size_type length = static_cast<string::size_type>(pLength.coerceToLong());
+
+ auto isContinuationByte = [](char c) { return ((c & 0xc0) == 0x80); };
+
+ uassert(28656,
+ str::stream() << getOpName()
+ << ": Invalid range, starting index is a UTF-8 continuation byte.",
+ (lower >= str.length() || !isContinuationByte(str[lower])));
+
+ // Check the byte after the last character we'd return. If it is a continuation byte, that
+ // means we're in the middle of a UTF-8 character.
+ uassert(
+ 28657,
+ str::stream() << getOpName()
+ << ": Invalid range, ending index is in the middle of a UTF-8 character.",
+ (lower + length >= str.length() || !isContinuationByte(str[lower + length])));
+
+ if (lower >= str.length()) {
+ // If lower > str.length() then string::substr() will throw out_of_range, so return an
+ // empty string if lower is not a valid string index.
+ return Value("");
+ }
+ return Value(str.substr(lower, length));
+}
- if ( lower >= str.length() ) {
- // If lower > str.length() then string::substr() will throw out_of_range, so return an
- // empty string if lower is not a valid string index.
- return Value("");
- }
- return Value(str.substr(lower, length));
- }
+REGISTER_EXPRESSION("$substr", ExpressionSubstr::parse);
+const char* ExpressionSubstr::getOpName() const {
+ return "$substr";
+}
- REGISTER_EXPRESSION("$substr", ExpressionSubstr::parse);
- const char* ExpressionSubstr::getOpName() const {
- return "$substr";
+/* ----------------------- ExpressionSubtract ---------------------------- */
+
+Value ExpressionSubtract::evaluateInternal(Variables* vars) const {
+ Value lhs = vpOperand[0]->evaluateInternal(vars);
+ Value rhs = vpOperand[1]->evaluateInternal(vars);
+
+ BSONType diffType = Value::getWidestNumeric(rhs.getType(), lhs.getType());
+
+ if (diffType == NumberDouble) {
+ double right = rhs.coerceToDouble();
+ double left = lhs.coerceToDouble();
+ return Value(left - right);
+ } else if (diffType == NumberLong) {
+ long long right = rhs.coerceToLong();
+ long long left = lhs.coerceToLong();
+ return Value(left - right);
+ } else if (diffType == NumberInt) {
+ long long right = rhs.coerceToLong();
+ long long left = lhs.coerceToLong();
+ return Value::createIntOrLong(left - right);
+ } else if (lhs.nullish() || rhs.nullish()) {
+ return Value(BSONNULL);
+ } else if (lhs.getType() == Date) {
+ if (rhs.getType() == Date) {
+ long long timeDelta = lhs.getDate() - rhs.getDate();
+ return Value(timeDelta);
+ } else if (rhs.numeric()) {
+ long long millisSinceEpoch = lhs.getDate() - rhs.coerceToLong();
+ return Value(Date_t::fromMillisSinceEpoch(millisSinceEpoch));
+ } else {
+ uasserted(16613,
+ str::stream() << "cant $subtract a " << typeName(rhs.getType())
+ << " from a Date");
+ }
+ } else {
+ uasserted(16556,
+ str::stream() << "cant $subtract a" << typeName(rhs.getType()) << " from a "
+ << typeName(lhs.getType()));
}
+}
- /* ----------------------- ExpressionSubtract ---------------------------- */
-
- Value ExpressionSubtract::evaluateInternal(Variables* vars) const {
- Value lhs = vpOperand[0]->evaluateInternal(vars);
- Value rhs = vpOperand[1]->evaluateInternal(vars);
+REGISTER_EXPRESSION("$subtract", ExpressionSubtract::parse);
+const char* ExpressionSubtract::getOpName() const {
+ return "$subtract";
+}
- BSONType diffType = Value::getWidestNumeric(rhs.getType(), lhs.getType());
+/* ------------------------- ExpressionToLower ----------------------------- */
- if (diffType == NumberDouble) {
- double right = rhs.coerceToDouble();
- double left = lhs.coerceToDouble();
- return Value(left - right);
- }
- else if (diffType == NumberLong) {
- long long right = rhs.coerceToLong();
- long long left = lhs.coerceToLong();
- return Value(left - right);
- }
- else if (diffType == NumberInt) {
- long long right = rhs.coerceToLong();
- long long left = lhs.coerceToLong();
- return Value::createIntOrLong(left - right);
- }
- else if (lhs.nullish() || rhs.nullish()) {
- return Value(BSONNULL);
- }
- else if (lhs.getType() == Date) {
- if (rhs.getType() == Date) {
- long long timeDelta = lhs.getDate() - rhs.getDate();
- return Value(timeDelta);
- }
- else if (rhs.numeric()) {
- long long millisSinceEpoch = lhs.getDate() - rhs.coerceToLong();
- return Value(Date_t::fromMillisSinceEpoch(millisSinceEpoch));
- }
- else {
- uasserted(16613, str::stream() << "cant $subtract a "
- << typeName(rhs.getType())
- << " from a Date");
- }
- }
- else {
- uasserted(16556, str::stream() << "cant $subtract a"
- << typeName(rhs.getType())
- << " from a "
- << typeName(lhs.getType()));
- }
- }
+Value ExpressionToLower::evaluateInternal(Variables* vars) const {
+ Value pString(vpOperand[0]->evaluateInternal(vars));
+ string str = pString.coerceToString();
+ boost::to_lower(str);
+ return Value(str);
+}
- REGISTER_EXPRESSION("$subtract", ExpressionSubtract::parse);
- const char* ExpressionSubtract::getOpName() const {
- return "$subtract";
- }
+REGISTER_EXPRESSION("$toLower", ExpressionToLower::parse);
+const char* ExpressionToLower::getOpName() const {
+ return "$toLower";
+}
- /* ------------------------- ExpressionToLower ----------------------------- */
+/* ------------------------- ExpressionToUpper -------------------------- */
- Value ExpressionToLower::evaluateInternal(Variables* vars) const {
- Value pString(vpOperand[0]->evaluateInternal(vars));
- string str = pString.coerceToString();
- boost::to_lower(str);
- return Value(str);
- }
+Value ExpressionToUpper::evaluateInternal(Variables* vars) const {
+ Value pString(vpOperand[0]->evaluateInternal(vars));
+ string str(pString.coerceToString());
+ boost::to_upper(str);
+ return Value(str);
+}
- REGISTER_EXPRESSION("$toLower", ExpressionToLower::parse);
- const char* ExpressionToLower::getOpName() const {
- return "$toLower";
- }
+REGISTER_EXPRESSION("$toUpper", ExpressionToUpper::parse);
+const char* ExpressionToUpper::getOpName() const {
+ return "$toUpper";
+}
- /* ------------------------- ExpressionToUpper -------------------------- */
+/* ------------------------- ExpressionWeek ----------------------------- */
- Value ExpressionToUpper::evaluateInternal(Variables* vars) const {
- Value pString(vpOperand[0]->evaluateInternal(vars));
- string str(pString.coerceToString());
- boost::to_upper(str);
- return Value(str);
- }
+Value ExpressionWeek::evaluateInternal(Variables* vars) const {
+ Value pDate(vpOperand[0]->evaluateInternal(vars));
+ return Value(extract(pDate.coerceToTm()));
+}
- REGISTER_EXPRESSION("$toUpper", ExpressionToUpper::parse);
- const char* ExpressionToUpper::getOpName() const {
- return "$toUpper";
- }
+int ExpressionWeek::extract(const tm& tm) {
+ int dayOfWeek = tm.tm_wday;
+ int dayOfYear = tm.tm_yday;
+ int prevSundayDayOfYear = dayOfYear - dayOfWeek; // may be negative
+ int nextSundayDayOfYear = prevSundayDayOfYear + 7; // must be positive
- /* ------------------------- ExpressionWeek ----------------------------- */
+ // Return the zero based index of the week of the next sunday, equal to the one based index
+ // of the week of the previous sunday, which is to be returned.
+ int nextSundayWeek = nextSundayDayOfYear / 7;
- Value ExpressionWeek::evaluateInternal(Variables* vars) const {
- Value pDate(vpOperand[0]->evaluateInternal(vars));
- return Value(extract(pDate.coerceToTm()));
+ // Verify that the week calculation is consistent with strftime "%U".
+ DEV {
+ char buf[3];
+ verify(strftime(buf, 3, "%U", &tm));
+ verify(int(str::toUnsigned(buf)) == nextSundayWeek);
}
- int ExpressionWeek::extract(const tm& tm) {
- int dayOfWeek = tm.tm_wday;
- int dayOfYear = tm.tm_yday;
- int prevSundayDayOfYear = dayOfYear - dayOfWeek; // may be negative
- int nextSundayDayOfYear = prevSundayDayOfYear + 7; // must be positive
-
- // Return the zero based index of the week of the next sunday, equal to the one based index
- // of the week of the previous sunday, which is to be returned.
- int nextSundayWeek = nextSundayDayOfYear / 7;
-
- // Verify that the week calculation is consistent with strftime "%U".
- DEV{
- char buf[3];
- verify(strftime(buf,3,"%U",&tm));
- verify(int(str::toUnsigned(buf))==nextSundayWeek);
- }
-
- return nextSundayWeek;
- }
+ return nextSundayWeek;
+}
- REGISTER_EXPRESSION("$week", ExpressionWeek::parse);
- const char* ExpressionWeek::getOpName() const {
- return "$week";
- }
+REGISTER_EXPRESSION("$week", ExpressionWeek::parse);
+const char* ExpressionWeek::getOpName() const {
+ return "$week";
+}
- /* ------------------------- ExpressionYear ----------------------------- */
+/* ------------------------- ExpressionYear ----------------------------- */
- Value ExpressionYear::evaluateInternal(Variables* vars) const {
- Value pDate(vpOperand[0]->evaluateInternal(vars));
- return Value(extract(pDate.coerceToTm()));
- }
+Value ExpressionYear::evaluateInternal(Variables* vars) const {
+ Value pDate(vpOperand[0]->evaluateInternal(vars));
+ return Value(extract(pDate.coerceToTm()));
+}
- REGISTER_EXPRESSION("$year", ExpressionYear::parse);
- const char* ExpressionYear::getOpName() const {
- return "$year";
- }
+REGISTER_EXPRESSION("$year", ExpressionYear::parse);
+const char* ExpressionYear::getOpName() const {
+ return "$year";
+}
}
diff --git a/src/mongo/db/pipeline/expression.h b/src/mongo/db/pipeline/expression.h
index 258e0d6c034..bec2013e2ff 100644
--- a/src/mongo/db/pipeline/expression.h
+++ b/src/mongo/db/pipeline/expression.h
@@ -42,1032 +42,1047 @@
namespace mongo {
- class BSONArrayBuilder;
- class BSONElement;
- class BSONObjBuilder;
- class DocumentSource;
-
- // TODO: Look into merging with ExpressionContext and possibly ObjectCtx.
- /// The state used as input and working space for Expressions.
- class Variables {
- MONGO_DISALLOW_COPYING(Variables);
- public:
- /**
- * Each unique variable is assigned a unique id of this type
- */
- typedef size_t Id;
-
- // This is only for expressions that use no variables (even ROOT).
- Variables() :_numVars(0) {}
-
- explicit Variables(size_t numVars, const Document& root = Document())
- : _root(root)
- , _rest(numVars == 0 ? NULL : new Value[numVars])
- , _numVars(numVars)
- {}
+class BSONArrayBuilder;
+class BSONElement;
+class BSONObjBuilder;
+class DocumentSource;
- static void uassertValidNameForUserWrite(StringData varName);
- static void uassertValidNameForUserRead(StringData varName);
+// TODO: Look into merging with ExpressionContext and possibly ObjectCtx.
+/// The state used as input and working space for Expressions.
+class Variables {
+ MONGO_DISALLOW_COPYING(Variables);
- static const Id ROOT_ID = Id(-1);
+public:
+ /**
+ * Each unique variable is assigned a unique id of this type
+ */
+ typedef size_t Id;
- /**
- * Use this instead of setValue for setting ROOT
- */
- void setRoot(const Document& root) { _root = root; }
- void clearRoot() { _root = Document(); }
- const Document& getRoot() const { return _root; }
+ // This is only for expressions that use no variables (even ROOT).
+ Variables() : _numVars(0) {}
- void setValue(Id id, const Value& value);
- Value getValue(Id id) const;
+ explicit Variables(size_t numVars, const Document& root = Document())
+ : _root(root), _rest(numVars == 0 ? NULL : new Value[numVars]), _numVars(numVars) {}
- /**
- * returns Document() for non-document values.
- */
- Document getDocument(Id id) const;
+ static void uassertValidNameForUserWrite(StringData varName);
+ static void uassertValidNameForUserRead(StringData varName);
- private:
- Document _root;
- const std::unique_ptr<Value[]> _rest;
- const size_t _numVars;
- };
+ static const Id ROOT_ID = Id(-1);
/**
- * Generates Variables::Ids and keeps track of the number of Ids handed out.
+ * Use this instead of setValue for setting ROOT
*/
- class VariablesIdGenerator {
- public:
- VariablesIdGenerator() : _nextId(0) {}
-
- Variables::Id generateId() { return _nextId++; }
-
- /**
- * Returns the number of Ids handed out by this Generator.
- * Return value is intended to be passed to Variables constructor.
- */
- Variables::Id getIdCount() const { return _nextId; }
+ void setRoot(const Document& root) {
+ _root = root;
+ }
+ void clearRoot() {
+ _root = Document();
+ }
+ const Document& getRoot() const {
+ return _root;
+ }
- private:
- Variables::Id _nextId;
- };
+ void setValue(Id id, const Value& value);
+ Value getValue(Id id) const;
/**
- * This class represents the Variables that are defined in an Expression tree.
- *
- * All copies from a given instance share enough information to ensure unique Ids are assigned
- * and to propagate back to the original instance enough information to correctly construct a
- * Variables instance.
+ * returns Document() for non-document values.
*/
- class VariablesParseState {
- public:
- explicit VariablesParseState(VariablesIdGenerator* idGenerator)
- : _idGenerator(idGenerator)
- {}
+ Document getDocument(Id id) const;
- /**
- * Assigns a named variable a unique Id. This differs from all other variables, even
- * others with the same name.
- *
- * The special variables ROOT and CURRENT are always implicitly defined with CURRENT
- * equivalent to ROOT. If CURRENT is explicitly defined by a call to this function, it
- * breaks that equivalence.
- *
- * NOTE: Name validation is responsibility of caller.
- */
- Variables::Id defineVariable(StringData name);
-
- /**
- * Returns the current Id for a variable. uasserts if the variable isn't defined.
- */
- Variables::Id getVariable(StringData name) const;
+private:
+ Document _root;
+ const std::unique_ptr<Value[]> _rest;
+ const size_t _numVars;
+};
- private:
- StringMap<Variables::Id> _variables;
- VariablesIdGenerator* _idGenerator;
- };
+/**
+ * Generates Variables::Ids and keeps track of the number of Ids handed out.
+ */
+class VariablesIdGenerator {
+public:
+ VariablesIdGenerator() : _nextId(0) {}
- class Expression :
- public IntrusiveCounterUnsigned {
- public:
- virtual ~Expression() {};
+ Variables::Id generateId() {
+ return _nextId++;
+ }
- /*
- Optimize the Expression.
+ /**
+ * Returns the number of Ids handed out by this Generator.
+ * Return value is intended to be passed to Variables constructor.
+ */
+ Variables::Id getIdCount() const {
+ return _nextId;
+ }
- This provides an opportunity to do constant folding, or to
- collapse nested operators that have the same precedence, such as
- $add, $and, or $or.
+private:
+ Variables::Id _nextId;
+};
- The Expression should be replaced with the return value, which may
- or may not be the same object. In the case of constant folding,
- a computed expression may be replaced by a constant.
+/**
+ * This class represents the Variables that are defined in an Expression tree.
+ *
+ * All copies from a given instance share enough information to ensure unique Ids are assigned
+ * and to propagate back to the original instance enough information to correctly construct a
+ * Variables instance.
+ */
+class VariablesParseState {
+public:
+ explicit VariablesParseState(VariablesIdGenerator* idGenerator) : _idGenerator(idGenerator) {}
- @returns the optimized Expression
- */
- virtual boost::intrusive_ptr<Expression> optimize() { return this; }
+ /**
+ * Assigns a named variable a unique Id. This differs from all other variables, even
+ * others with the same name.
+ *
+ * The special variables ROOT and CURRENT are always implicitly defined with CURRENT
+ * equivalent to ROOT. If CURRENT is explicitly defined by a call to this function, it
+ * breaks that equivalence.
+ *
+ * NOTE: Name validation is responsibility of caller.
+ */
+ Variables::Id defineVariable(StringData name);
- /**
- * Add this expression's field dependencies to the set
- *
- * Expressions are trees, so this is often recursive.
- *
- * @param deps Fully qualified paths to depended-on fields are added to this set.
- * Empty std::string means need full document.
- * @param path path to self if all ancestors are ExpressionObjects.
- * Top-level ExpressionObject gets pointer to empty vector.
- * If any other Expression is an ancestor, or in other cases
- * where {a:1} inclusion objects aren't allowed, they get
- * NULL.
- */
- virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path=NULL) const = 0;
+ /**
+ * Returns the current Id for a variable. uasserts if the variable isn't defined.
+ */
+ Variables::Id getVariable(StringData name) const;
- /** simple expressions are just inclusion exclusion as supported by ExpressionObject */
- virtual bool isSimple() { return false; }
+private:
+ StringMap<Variables::Id> _variables;
+ VariablesIdGenerator* _idGenerator;
+};
+class Expression : public IntrusiveCounterUnsigned {
+public:
+ virtual ~Expression(){};
- /**
- * Serialize the Expression tree recursively.
- * If explain is false, returns a Value parsable by parseOperand().
- */
- virtual Value serialize(bool explain) const = 0;
+ /*
+ Optimize the Expression.
- /// Evaluate expression with specified inputs and return result. (only used by tests)
- Value evaluate(const Document& root) const {
- Variables vars(0, root);
- return evaluate(&vars);
- }
+ This provides an opportunity to do constant folding, or to
+ collapse nested operators that have the same precedence, such as
+ $add, $and, or $or.
- /**
- * Evaluate expression with specified inputs and return result.
- *
- * While vars is non-const, if properly constructed, subexpressions modifications to it
- * should not effect outer expressions due to unique variable Ids.
- */
- Value evaluate(Variables* vars) const { return evaluateInternal(vars); }
+ The Expression should be replaced with the return value, which may
+ or may not be the same object. In the case of constant folding,
+ a computed expression may be replaced by a constant.
- /*
- Utility class for parseObject() below.
+ @returns the optimized Expression
+ */
+ virtual boost::intrusive_ptr<Expression> optimize() {
+ return this;
+ }
- DOCUMENT_OK indicates that it is OK to use a Document in the current
- context.
- */
- class ObjectCtx {
- public:
- ObjectCtx(int options);
- static const int DOCUMENT_OK = 0x0001;
- static const int TOP_LEVEL = 0x0002;
- static const int INCLUSION_OK = 0x0004;
-
- bool documentOk() const;
- bool topLevel() const;
- bool inclusionOk() const;
-
- private:
- int options;
- };
-
- //
- // Diagram of relationship between parse functions when parsing a $op:
- //
- // { someFieldOrArrayIndex: { $op: [ARGS] } }
- // ^ parseExpression on inner $op BSONElement
- // ^ parseObject on BSONObject
- // ^ parseOperand on outer BSONElement wrapping the $op Object
- //
+ /**
+ * Add this expression's field dependencies to the set
+ *
+ * Expressions are trees, so this is often recursive.
+ *
+ * @param deps Fully qualified paths to depended-on fields are added to this set.
+ * Empty std::string means need full document.
+ * @param path path to self if all ancestors are ExpressionObjects.
+ * Top-level ExpressionObject gets pointer to empty vector.
+ * If any other Expression is an ancestor, or in other cases
+ * where {a:1} inclusion objects aren't allowed, they get
+ * NULL.
+ */
+ virtual void addDependencies(DepsTracker* deps,
+ std::vector<std::string>* path = NULL) const = 0;
- /**
- * Parses a BSON Object that could represent a functional expression or a Document
- * expression.
- */
- static boost::intrusive_ptr<Expression> parseObject(
- BSONObj obj,
- ObjectCtx *pCtx,
- const VariablesParseState& vps);
+ /** simple expressions are just inclusion exclusion as supported by ExpressionObject */
+ virtual bool isSimple() {
+ return false;
+ }
- /**
- * Parses a BSONElement which has already been determined to be functional expression.
- *
- * exprElement should be the only element inside the expression object. That is the
- * field name should be the $op for the expression.
- */
- static boost::intrusive_ptr<Expression> parseExpression(
- BSONElement exprElement,
- const VariablesParseState& vps);
+ /**
+ * Serialize the Expression tree recursively.
+ * If explain is false, returns a Value parsable by parseOperand().
+ */
+ virtual Value serialize(bool explain) const = 0;
- /**
- * Parses a BSONElement which is an operand in an Expression.
- *
- * This is the most generic parser and can parse ExpressionFieldPath, a literal, or a $op.
- * If it is a $op, exprElement should be the outer element whose value is an Object
- * containing the $op.
- */
- static boost::intrusive_ptr<Expression> parseOperand(
- BSONElement exprElement,
- const VariablesParseState& vps);
+ /// Evaluate expression with specified inputs and return result. (only used by tests)
+ Value evaluate(const Document& root) const {
+ Variables vars(0, root);
+ return evaluate(&vars);
+ }
- /*
- Produce a field path std::string with the field prefix removed.
+ /**
+ * Evaluate expression with specified inputs and return result.
+ *
+ * While vars is non-const, if properly constructed, subexpressions modifications to it
+ * should not effect outer expressions due to unique variable Ids.
+ */
+ Value evaluate(Variables* vars) const {
+ return evaluateInternal(vars);
+ }
- Throws an error if the field prefix is not present.
+ /*
+ Utility class for parseObject() below.
- @param prefixedField the prefixed field
- @returns the field path with the prefix removed
- */
- static std::string removeFieldPrefix(const std::string &prefixedField);
+ DOCUMENT_OK indicates that it is OK to use a Document in the current
+ context.
+ */
+ class ObjectCtx {
+ public:
+ ObjectCtx(int options);
+ static const int DOCUMENT_OK = 0x0001;
+ static const int TOP_LEVEL = 0x0002;
+ static const int INCLUSION_OK = 0x0004;
- /** Evaluate the subclass Expression using the given Variables as context and return result.
- *
- * Should only be called by subclasses, but can't be protected because they need to call
- * this function on each other.
- */
- virtual Value evaluateInternal(Variables* vars) const = 0;
+ bool documentOk() const;
+ bool topLevel() const;
+ bool inclusionOk() const;
- protected:
- typedef std::vector<boost::intrusive_ptr<Expression> > ExpressionVector;
+ private:
+ int options;
};
+ //
+ // Diagram of relationship between parse functions when parsing a $op:
+ //
+ // { someFieldOrArrayIndex: { $op: [ARGS] } }
+ // ^ parseExpression on inner $op BSONElement
+ // ^ parseObject on BSONObject
+ // ^ parseOperand on outer BSONElement wrapping the $op Object
+ //
- /// Inherit from ExpressionVariadic or ExpressionFixedArity instead of directly from this class.
- class ExpressionNary :
- public Expression {
- public:
- // virtuals from Expression
- virtual boost::intrusive_ptr<Expression> optimize();
- virtual Value serialize(bool explain) const;
- virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path=NULL) const;
-
- /*
- Add an operand to the n-ary expression.
-
- @param pExpression the expression to add
- */
- virtual void addOperand(const boost::intrusive_ptr<Expression> &pExpression);
-
- // TODO split this into two functions
- virtual bool isAssociativeAndCommutative() const { return false; }
+ /**
+ * Parses a BSON Object that could represent a functional expression or a Document
+ * expression.
+ */
+ static boost::intrusive_ptr<Expression> parseObject(BSONObj obj,
+ ObjectCtx* pCtx,
+ const VariablesParseState& vps);
- /*
- Get the name of the operator.
+ /**
+ * Parses a BSONElement which has already been determined to be functional expression.
+ *
+ * exprElement should be the only element inside the expression object. That is the
+ * field name should be the $op for the expression.
+ */
+ static boost::intrusive_ptr<Expression> parseExpression(BSONElement exprElement,
+ const VariablesParseState& vps);
- @returns the name of the operator; this std::string belongs to the class
- implementation, and should not be deleted
- and should not
- */
- virtual const char* getOpName() const = 0;
- /// Allow subclasses the opportunity to validate arguments at parse time.
- virtual void validateArguments(const ExpressionVector& args) const {}
+ /**
+ * Parses a BSONElement which is an operand in an Expression.
+ *
+ * This is the most generic parser and can parse ExpressionFieldPath, a literal, or a $op.
+ * If it is a $op, exprElement should be the outer element whose value is an Object
+ * containing the $op.
+ */
+ static boost::intrusive_ptr<Expression> parseOperand(BSONElement exprElement,
+ const VariablesParseState& vps);
- static ExpressionVector parseArguments(
- BSONElement bsonExpr,
- const VariablesParseState& vps);
+ /*
+ Produce a field path std::string with the field prefix removed.
- protected:
- ExpressionNary() {}
+ Throws an error if the field prefix is not present.
- ExpressionVector vpOperand;
- };
+ @param prefixedField the prefixed field
+ @returns the field path with the prefix removed
+ */
+ static std::string removeFieldPrefix(const std::string& prefixedField);
- /// Inherit from ExpressionVariadic or ExpressionFixedArity instead of directly from this class.
- template <typename SubClass>
- class ExpressionNaryBase : public ExpressionNary {
- public:
- static boost::intrusive_ptr<Expression> parse(BSONElement bsonExpr,
- const VariablesParseState& vps) {
- boost::intrusive_ptr<ExpressionNaryBase> expr = new SubClass();
- ExpressionVector args = parseArguments(bsonExpr, vps);
- expr->validateArguments(args);
- expr->vpOperand = args;
- return expr;
- }
- };
+ /** Evaluate the subclass Expression using the given Variables as context and return result.
+ *
+ * Should only be called by subclasses, but can't be protected because they need to call
+ * this function on each other.
+ */
+ virtual Value evaluateInternal(Variables* vars) const = 0;
- /// Inherit from this class if your expression takes a variable number of arguments.
- template <typename SubClass>
- class ExpressionVariadic : public ExpressionNaryBase<SubClass> {
- };
+protected:
+ typedef std::vector<boost::intrusive_ptr<Expression>> ExpressionVector;
+};
- /// Inherit from this class if your expression takes a fixed number of arguments.
- template <typename SubClass, int NArgs>
- class ExpressionFixedArity : public ExpressionNaryBase<SubClass> {
- public:
- virtual void validateArguments(const Expression::ExpressionVector& args) const {
- uassert(16020, mongoutils::str::stream()
- << "Expression " << this->getOpName() << " takes exactly " << NArgs
- << " arguments. " << args.size() << " were passed in.",
- args.size() == NArgs);
-
- }
- };
+/// Inherit from ExpressionVariadic or ExpressionFixedArity instead of directly from this class.
+class ExpressionNary : public Expression {
+public:
+ // virtuals from Expression
+ virtual boost::intrusive_ptr<Expression> optimize();
+ virtual Value serialize(bool explain) const;
+ virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path = NULL) const;
- class ExpressionAbs final : public ExpressionFixedArity<ExpressionAbs, 1> {
- Value evaluateInternal(Variables* vars) const final;
- const char* getOpName() const final;
- };
+ /*
+ Add an operand to the n-ary expression.
+ @param pExpression the expression to add
+ */
+ virtual void addOperand(const boost::intrusive_ptr<Expression>& pExpression);
- class ExpressionAdd : public ExpressionVariadic<ExpressionAdd> {
- public:
- // virtuals from Expression
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- virtual bool isAssociativeAndCommutative() const { return true; }
- };
+ // TODO split this into two functions
+ virtual bool isAssociativeAndCommutative() const {
+ return false;
+ }
+ /*
+ Get the name of the operator.
+
+ @returns the name of the operator; this std::string belongs to the class
+ implementation, and should not be deleted
+ and should not
+ */
+ virtual const char* getOpName() const = 0;
+
+ /// Allow subclasses the opportunity to validate arguments at parse time.
+ virtual void validateArguments(const ExpressionVector& args) const {}
+
+ static ExpressionVector parseArguments(BSONElement bsonExpr, const VariablesParseState& vps);
+
+protected:
+ ExpressionNary() {}
+
+ ExpressionVector vpOperand;
+};
+
+/// Inherit from ExpressionVariadic or ExpressionFixedArity instead of directly from this class.
+template <typename SubClass>
+class ExpressionNaryBase : public ExpressionNary {
+public:
+ static boost::intrusive_ptr<Expression> parse(BSONElement bsonExpr,
+ const VariablesParseState& vps) {
+ boost::intrusive_ptr<ExpressionNaryBase> expr = new SubClass();
+ ExpressionVector args = parseArguments(bsonExpr, vps);
+ expr->validateArguments(args);
+ expr->vpOperand = args;
+ return expr;
+ }
+};
+
+/// Inherit from this class if your expression takes a variable number of arguments.
+template <typename SubClass>
+class ExpressionVariadic : public ExpressionNaryBase<SubClass> {};
+
+/// Inherit from this class if your expression takes a fixed number of arguments.
+template <typename SubClass, int NArgs>
+class ExpressionFixedArity : public ExpressionNaryBase<SubClass> {
+public:
+ virtual void validateArguments(const Expression::ExpressionVector& args) const {
+ uassert(16020,
+ mongoutils::str::stream() << "Expression " << this->getOpName() << " takes exactly "
+ << NArgs << " arguments. " << args.size()
+ << " were passed in.",
+ args.size() == NArgs);
+ }
+};
- class ExpressionAllElementsTrue : public ExpressionFixedArity<ExpressionAllElementsTrue, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+class ExpressionAbs final : public ExpressionFixedArity<ExpressionAbs, 1> {
+ Value evaluateInternal(Variables* vars) const final;
+ const char* getOpName() const final;
+};
- class ExpressionAnd : public ExpressionVariadic<ExpressionAnd> {
- public:
- // virtuals from Expression
- virtual boost::intrusive_ptr<Expression> optimize();
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- virtual bool isAssociativeAndCommutative() const { return true; }
- };
+class ExpressionAdd : public ExpressionVariadic<ExpressionAdd> {
+public:
+ // virtuals from Expression
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+ virtual bool isAssociativeAndCommutative() const {
+ return true;
+ }
+};
- class ExpressionAnyElementTrue : public ExpressionFixedArity<ExpressionAnyElementTrue, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+class ExpressionAllElementsTrue : public ExpressionFixedArity<ExpressionAllElementsTrue, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- class ExpressionArrayElemAt final : public ExpressionFixedArity<ExpressionArrayElemAt, 2> {
- public:
- Value evaluateInternal(Variables* vars) const final;
- const char* getOpName() const final;
- };
+class ExpressionAnd : public ExpressionVariadic<ExpressionAnd> {
+public:
+ // virtuals from Expression
+ virtual boost::intrusive_ptr<Expression> optimize();
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+ virtual bool isAssociativeAndCommutative() const {
+ return true;
+ }
+};
- class ExpressionCoerceToBool : public Expression {
- public:
- // virtuals from ExpressionNary
- virtual boost::intrusive_ptr<Expression> optimize();
- virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path=NULL) const;
- virtual Value evaluateInternal(Variables* vars) const;
- virtual Value serialize(bool explain) const;
- static boost::intrusive_ptr<ExpressionCoerceToBool> create(
- const boost::intrusive_ptr<Expression> &pExpression);
+class ExpressionAnyElementTrue : public ExpressionFixedArity<ExpressionAnyElementTrue, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- private:
- ExpressionCoerceToBool(const boost::intrusive_ptr<Expression> &pExpression);
+class ExpressionArrayElemAt final : public ExpressionFixedArity<ExpressionArrayElemAt, 2> {
+public:
+ Value evaluateInternal(Variables* vars) const final;
+ const char* getOpName() const final;
+};
- boost::intrusive_ptr<Expression> pExpression;
- };
+class ExpressionCoerceToBool : public Expression {
+public:
+ // virtuals from ExpressionNary
+ virtual boost::intrusive_ptr<Expression> optimize();
+ virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path = NULL) const;
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual Value serialize(bool explain) const;
- class ExpressionCompare : public ExpressionFixedArity<ExpressionCompare, 2> {
- public:
+ static boost::intrusive_ptr<ExpressionCoerceToBool> create(
+ const boost::intrusive_ptr<Expression>& pExpression);
- /** Enumeration of comparison operators. Any changes to these values require adjustment of
- * the lookup table in the implementation.
- */
- enum CmpOp {
- EQ = 0, // return true for a == b, false otherwise
- NE = 1, // return true for a != b, false otherwise
- GT = 2, // return true for a > b, false otherwise
- GTE = 3, // return true for a >= b, false otherwise
- LT = 4, // return true for a < b, false otherwise
- LTE = 5, // return true for a <= b, false otherwise
- CMP = 6, // return -1, 0, 1 for a < b, a == b, a > b
- };
-
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
-
- static boost::intrusive_ptr<Expression> parse(
- BSONElement bsonExpr,
- const VariablesParseState& vps,
- CmpOp cmpOp);
-
- ExpressionCompare(CmpOp cmpOp);
- private:
- CmpOp cmpOp;
- };
+private:
+ ExpressionCoerceToBool(const boost::intrusive_ptr<Expression>& pExpression);
+ boost::intrusive_ptr<Expression> pExpression;
+};
- class ExpressionConcat : public ExpressionVariadic<ExpressionConcat> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
-
- class ExpressionConcatArrays final : public ExpressionVariadic<ExpressionConcatArrays> {
- public:
- Value evaluateInternal(Variables* vars) const final;
- const char* getOpName() const final;
+class ExpressionCompare : public ExpressionFixedArity<ExpressionCompare, 2> {
+public:
+ /** Enumeration of comparison operators. Any changes to these values require adjustment of
+ * the lookup table in the implementation.
+ */
+ enum CmpOp {
+ EQ = 0, // return true for a == b, false otherwise
+ NE = 1, // return true for a != b, false otherwise
+ GT = 2, // return true for a > b, false otherwise
+ GTE = 3, // return true for a >= b, false otherwise
+ LT = 4, // return true for a < b, false otherwise
+ LTE = 5, // return true for a <= b, false otherwise
+ CMP = 6, // return -1, 0, 1 for a < b, a == b, a > b
};
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
- class ExpressionCond : public ExpressionFixedArity<ExpressionCond, 3> {
- typedef ExpressionFixedArity<ExpressionCond, 3> Base;
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
+ static boost::intrusive_ptr<Expression> parse(BSONElement bsonExpr,
+ const VariablesParseState& vps,
+ CmpOp cmpOp);
- static boost::intrusive_ptr<Expression> parse(
- BSONElement expr,
- const VariablesParseState& vps);
- };
+ ExpressionCompare(CmpOp cmpOp);
+private:
+ CmpOp cmpOp;
+};
- class ExpressionConstant : public Expression {
- public:
- // virtuals from Expression
- virtual boost::intrusive_ptr<Expression> optimize();
- virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path=NULL) const;
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- virtual Value serialize(bool explain) const;
-
- static boost::intrusive_ptr<ExpressionConstant> create(const Value& pValue);
- static boost::intrusive_ptr<Expression> parse(
- BSONElement bsonExpr,
- const VariablesParseState& vps);
-
- /*
- Get the constant value represented by this Expression.
-
- @returns the value
- */
- Value getValue() const;
- private:
- ExpressionConstant(const Value& pValue);
+class ExpressionConcat : public ExpressionVariadic<ExpressionConcat> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- Value pValue;
- };
- class ExpressionDateToString : public Expression {
- public:
- // virtuals from Expression
- virtual boost::intrusive_ptr<Expression> optimize();
- virtual Value serialize(bool explain) const;
- virtual Value evaluateInternal(Variables* vars) const;
- virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path=NULL) const;
+class ExpressionConcatArrays final : public ExpressionVariadic<ExpressionConcatArrays> {
+public:
+ Value evaluateInternal(Variables* vars) const final;
+ const char* getOpName() const final;
+};
- static boost::intrusive_ptr<Expression> parse(
- BSONElement expr,
- const VariablesParseState& vps);
- private:
- ExpressionDateToString(const std::string& format, // the format string
- boost::intrusive_ptr<Expression> date); // the date to format
+class ExpressionCond : public ExpressionFixedArity<ExpressionCond, 3> {
+ typedef ExpressionFixedArity<ExpressionCond, 3> Base;
- // Will uassert on invalid data
- static void validateFormat(const std::string& format);
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
- // Need raw date as tm doesn't have millisecond resolution.
- // Format must be valid.
- static std::string formatDate(const std::string& format,
- const tm& tm,
- const long long date);
+ static boost::intrusive_ptr<Expression> parse(BSONElement expr, const VariablesParseState& vps);
+};
- static void insertPadded(StringBuilder& sb, int number, int spaces);
- const std::string _format;
- boost::intrusive_ptr<Expression> _date;
- };
+class ExpressionConstant : public Expression {
+public:
+ // virtuals from Expression
+ virtual boost::intrusive_ptr<Expression> optimize();
+ virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path = NULL) const;
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+ virtual Value serialize(bool explain) const;
- class ExpressionDayOfMonth : public ExpressionFixedArity<ExpressionDayOfMonth, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
+ static boost::intrusive_ptr<ExpressionConstant> create(const Value& pValue);
+ static boost::intrusive_ptr<Expression> parse(BSONElement bsonExpr,
+ const VariablesParseState& vps);
- static inline int extract(const tm& tm) { return tm.tm_mday; }
- };
+ /*
+ Get the constant value represented by this Expression.
+ @returns the value
+ */
+ Value getValue() const;
- class ExpressionDayOfWeek : public ExpressionFixedArity<ExpressionDayOfWeek, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
+private:
+ ExpressionConstant(const Value& pValue);
- // MySQL uses 1-7, tm uses 0-6
- static inline int extract(const tm& tm) { return tm.tm_wday + 1; }
- };
+ Value pValue;
+};
+class ExpressionDateToString : public Expression {
+public:
+ // virtuals from Expression
+ virtual boost::intrusive_ptr<Expression> optimize();
+ virtual Value serialize(bool explain) const;
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path = NULL) const;
- class ExpressionDayOfYear : public ExpressionFixedArity<ExpressionDayOfYear, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
+ static boost::intrusive_ptr<Expression> parse(BSONElement expr, const VariablesParseState& vps);
- // MySQL uses 1-366, tm uses 0-365
- static inline int extract(const tm& tm) { return tm.tm_yday + 1; }
- };
+private:
+ ExpressionDateToString(const std::string& format, // the format string
+ boost::intrusive_ptr<Expression> date); // the date to format
+ // Will uassert on invalid data
+ static void validateFormat(const std::string& format);
- class ExpressionDivide : public ExpressionFixedArity<ExpressionDivide, 2> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+ // Need raw date as tm doesn't have millisecond resolution.
+ // Format must be valid.
+ static std::string formatDate(const std::string& format, const tm& tm, const long long date);
+ static void insertPadded(StringBuilder& sb, int number, int spaces);
- class ExpressionFieldPath : public Expression {
- public:
- // virtuals from Expression
- virtual boost::intrusive_ptr<Expression> optimize();
- virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path=NULL) const;
- virtual Value evaluateInternal(Variables* vars) const;
- virtual Value serialize(bool explain) const;
+ const std::string _format;
+ boost::intrusive_ptr<Expression> _date;
+};
- /*
- Create a field path expression using old semantics (rooted off of CURRENT).
+class ExpressionDayOfMonth : public ExpressionFixedArity<ExpressionDayOfMonth, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
- // NOTE: this method is deprecated and only used by tests
- // TODO remove this method in favor of parse()
+ static inline int extract(const tm& tm) {
+ return tm.tm_mday;
+ }
+};
- Evaluation will extract the value associated with the given field
- path from the source document.
- @param fieldPath the field path string, without any leading document
- indicator
- @returns the newly created field path expression
- */
- static boost::intrusive_ptr<ExpressionFieldPath> create(const std::string& fieldPath);
+class ExpressionDayOfWeek : public ExpressionFixedArity<ExpressionDayOfWeek, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
- /// Like create(), but works with the raw std::string from the user with the "$" prefixes.
- static boost::intrusive_ptr<ExpressionFieldPath> parse(
- const std::string& raw,
- const VariablesParseState& vps);
+ // MySQL uses 1-7, tm uses 0-6
+ static inline int extract(const tm& tm) {
+ return tm.tm_wday + 1;
+ }
+};
- const FieldPath& getFieldPath() const { return _fieldPath; }
- private:
- ExpressionFieldPath(const std::string& fieldPath, Variables::Id variable);
+class ExpressionDayOfYear : public ExpressionFixedArity<ExpressionDayOfYear, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
- /*
- Internal implementation of evaluateInternal(), used recursively.
+ // MySQL uses 1-366, tm uses 0-365
+ static inline int extract(const tm& tm) {
+ return tm.tm_yday + 1;
+ }
+};
- The internal implementation doesn't just use a loop because of
- the possibility that we need to skip over an array. If the path
- is "a.b.c", and a is an array, then we fan out from there, and
- traverse "b.c" for each element of a:[...]. This requires that
- a be an array of objects in order to navigate more deeply.
- @param index current path field index to extract
- @param input current document traversed to (not the top-level one)
- @returns the field found; could be an array
- */
- Value evaluatePath(size_t index, const Document& input) const;
+class ExpressionDivide : public ExpressionFixedArity<ExpressionDivide, 2> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- // Helper for evaluatePath to handle Array case
- Value evaluatePathArray(size_t index, const Value& input) const;
- const FieldPath _fieldPath;
- const Variables::Id _variable;
- };
+class ExpressionFieldPath : public Expression {
+public:
+ // virtuals from Expression
+ virtual boost::intrusive_ptr<Expression> optimize();
+ virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path = NULL) const;
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual Value serialize(bool explain) const;
+ /*
+ Create a field path expression using old semantics (rooted off of CURRENT).
- class ExpressionFilter final : public Expression {
- public:
- // virtuals from Expression
- boost::intrusive_ptr<Expression> optimize() final;
- Value serialize(bool explain) const final;
- Value evaluateInternal(Variables* vars) const final;
- void addDependencies(DepsTracker* deps,
- std::vector<std::string>* path=NULL) const final;
+ // NOTE: this method is deprecated and only used by tests
+ // TODO remove this method in favor of parse()
- static boost::intrusive_ptr<Expression> parse(
- BSONElement expr,
- const VariablesParseState& vps);
+ Evaluation will extract the value associated with the given field
+ path from the source document.
- private:
- ExpressionFilter(std::string varName,
- Variables::Id varId,
- boost::intrusive_ptr<Expression> input,
- boost::intrusive_ptr<Expression> filter);
-
- // The name of the variable to set to each element in the array.
- std::string _varName;
- // The id of the variable to set.
- Variables::Id _varId;
- // The array to iterate over.
- boost::intrusive_ptr<Expression> _input;
- // The expression determining whether each element should be present in the result array.
- boost::intrusive_ptr<Expression> _filter;
- };
-
-
- class ExpressionHour : public ExpressionFixedArity<ExpressionHour, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
-
- static inline int extract(const tm& tm) { return tm.tm_hour; }
- };
+ @param fieldPath the field path string, without any leading document
+ indicator
+ @returns the newly created field path expression
+ */
+ static boost::intrusive_ptr<ExpressionFieldPath> create(const std::string& fieldPath);
+ /// Like create(), but works with the raw std::string from the user with the "$" prefixes.
+ static boost::intrusive_ptr<ExpressionFieldPath> parse(const std::string& raw,
+ const VariablesParseState& vps);
- class ExpressionIfNull : public ExpressionFixedArity<ExpressionIfNull, 2> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+ const FieldPath& getFieldPath() const {
+ return _fieldPath;
+ }
+private:
+ ExpressionFieldPath(const std::string& fieldPath, Variables::Id variable);
- class ExpressionLet : public Expression {
- public:
- // virtuals from Expression
- virtual boost::intrusive_ptr<Expression> optimize();
- virtual Value serialize(bool explain) const;
- virtual Value evaluateInternal(Variables* vars) const;
- virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path=NULL) const;
+ /*
+ Internal implementation of evaluateInternal(), used recursively.
- static boost::intrusive_ptr<Expression> parse(
- BSONElement expr,
- const VariablesParseState& vps);
+ The internal implementation doesn't just use a loop because of
+ the possibility that we need to skip over an array. If the path
+ is "a.b.c", and a is an array, then we fan out from there, and
+ traverse "b.c" for each element of a:[...]. This requires that
+ a be an array of objects in order to navigate more deeply.
- struct NameAndExpression {
- NameAndExpression() {}
- NameAndExpression(std::string name, boost::intrusive_ptr<Expression> expression)
- : name(name)
- , expression(expression)
- {}
+ @param index current path field index to extract
+ @param input current document traversed to (not the top-level one)
+ @returns the field found; could be an array
+ */
+ Value evaluatePath(size_t index, const Document& input) const;
+
+ // Helper for evaluatePath to handle Array case
+ Value evaluatePathArray(size_t index, const Value& input) const;
+
+ const FieldPath _fieldPath;
+ const Variables::Id _variable;
+};
+
+
+class ExpressionFilter final : public Expression {
+public:
+ // virtuals from Expression
+ boost::intrusive_ptr<Expression> optimize() final;
+ Value serialize(bool explain) const final;
+ Value evaluateInternal(Variables* vars) const final;
+ void addDependencies(DepsTracker* deps, std::vector<std::string>* path = NULL) const final;
+
+ static boost::intrusive_ptr<Expression> parse(BSONElement expr, const VariablesParseState& vps);
+
+private:
+ ExpressionFilter(std::string varName,
+ Variables::Id varId,
+ boost::intrusive_ptr<Expression> input,
+ boost::intrusive_ptr<Expression> filter);
+
+ // The name of the variable to set to each element in the array.
+ std::string _varName;
+ // The id of the variable to set.
+ Variables::Id _varId;
+ // The array to iterate over.
+ boost::intrusive_ptr<Expression> _input;
+ // The expression determining whether each element should be present in the result array.
+ boost::intrusive_ptr<Expression> _filter;
+};
+
+
+class ExpressionHour : public ExpressionFixedArity<ExpressionHour, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+
+ static inline int extract(const tm& tm) {
+ return tm.tm_hour;
+ }
+};
- std::string name;
- boost::intrusive_ptr<Expression> expression;
- };
- typedef std::map<Variables::Id, NameAndExpression> VariableMap;
+class ExpressionIfNull : public ExpressionFixedArity<ExpressionIfNull, 2> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- private:
- ExpressionLet(const VariableMap& vars,
- boost::intrusive_ptr<Expression> subExpression);
- VariableMap _variables;
- boost::intrusive_ptr<Expression> _subExpression;
- };
+class ExpressionLet : public Expression {
+public:
+ // virtuals from Expression
+ virtual boost::intrusive_ptr<Expression> optimize();
+ virtual Value serialize(bool explain) const;
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path = NULL) const;
- class ExpressionMap : public Expression {
- public:
- // virtuals from Expression
- virtual boost::intrusive_ptr<Expression> optimize();
- virtual Value serialize(bool explain) const;
- virtual Value evaluateInternal(Variables* vars) const;
- virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path=NULL) const;
+ static boost::intrusive_ptr<Expression> parse(BSONElement expr, const VariablesParseState& vps);
- static boost::intrusive_ptr<Expression> parse(
- BSONElement expr,
- const VariablesParseState& vps);
+ struct NameAndExpression {
+ NameAndExpression() {}
+ NameAndExpression(std::string name, boost::intrusive_ptr<Expression> expression)
+ : name(name), expression(expression) {}
- private:
- ExpressionMap(const std::string& varName, // name of variable to set
- Variables::Id varId, // id of variable to set
- boost::intrusive_ptr<Expression> input, // yields array to iterate
- boost::intrusive_ptr<Expression> each); // yields results to be added to output array
-
- std::string _varName;
- Variables::Id _varId;
- boost::intrusive_ptr<Expression> _input;
- boost::intrusive_ptr<Expression> _each;
+ std::string name;
+ boost::intrusive_ptr<Expression> expression;
};
- class ExpressionMeta : public Expression {
- public:
- // virtuals from Expression
- virtual Value serialize(bool explain) const;
- virtual Value evaluateInternal(Variables* vars) const;
- virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path=NULL) const;
-
- static boost::intrusive_ptr<Expression> parse(
- BSONElement expr,
- const VariablesParseState& vps);
- };
+ typedef std::map<Variables::Id, NameAndExpression> VariableMap;
+
+private:
+ ExpressionLet(const VariableMap& vars, boost::intrusive_ptr<Expression> subExpression);
+
+ VariableMap _variables;
+ boost::intrusive_ptr<Expression> _subExpression;
+};
+
+class ExpressionMap : public Expression {
+public:
+ // virtuals from Expression
+ virtual boost::intrusive_ptr<Expression> optimize();
+ virtual Value serialize(bool explain) const;
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path = NULL) const;
+
+ static boost::intrusive_ptr<Expression> parse(BSONElement expr, const VariablesParseState& vps);
+
+private:
+ ExpressionMap(
+ const std::string& varName, // name of variable to set
+ Variables::Id varId, // id of variable to set
+ boost::intrusive_ptr<Expression> input, // yields array to iterate
+ boost::intrusive_ptr<Expression> each); // yields results to be added to output array
+
+ std::string _varName;
+ Variables::Id _varId;
+ boost::intrusive_ptr<Expression> _input;
+ boost::intrusive_ptr<Expression> _each;
+};
+
+class ExpressionMeta : public Expression {
+public:
+ // virtuals from Expression
+ virtual Value serialize(bool explain) const;
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path = NULL) const;
+
+ static boost::intrusive_ptr<Expression> parse(BSONElement expr, const VariablesParseState& vps);
+};
+
+class ExpressionMillisecond : public ExpressionFixedArity<ExpressionMillisecond, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+
+ static int extract(const long long date);
+};
+
+
+class ExpressionMinute : public ExpressionFixedArity<ExpressionMinute, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+
+ static int extract(const tm& tm) {
+ return tm.tm_min;
+ }
+};
- class ExpressionMillisecond : public ExpressionFixedArity<ExpressionMillisecond, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- static int extract(const long long date);
- };
+class ExpressionMod : public ExpressionFixedArity<ExpressionMod, 2> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- class ExpressionMinute : public ExpressionFixedArity<ExpressionMinute, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
+class ExpressionMultiply : public ExpressionVariadic<ExpressionMultiply> {
+public:
+ // virtuals from Expression
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+ virtual bool isAssociativeAndCommutative() const {
+ return true;
+ }
+};
- static int extract(const tm& tm) { return tm.tm_min; }
- };
+class ExpressionMonth : public ExpressionFixedArity<ExpressionMonth, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
- class ExpressionMod : public ExpressionFixedArity<ExpressionMod, 2> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
-
+ // MySQL uses 1-12, tm uses 0-11
+ static inline int extract(const tm& tm) {
+ return tm.tm_mon + 1;
+ }
+};
- class ExpressionMultiply : public ExpressionVariadic<ExpressionMultiply> {
- public:
- // virtuals from Expression
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- virtual bool isAssociativeAndCommutative() const { return true; }
- };
+class ExpressionNot : public ExpressionFixedArity<ExpressionNot, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- class ExpressionMonth : public ExpressionFixedArity<ExpressionMonth, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- // MySQL uses 1-12, tm uses 0-11
- static inline int extract(const tm& tm) { return tm.tm_mon + 1; }
- };
+class ExpressionObject : public Expression {
+public:
+ // virtuals from Expression
+ virtual boost::intrusive_ptr<Expression> optimize();
+ virtual bool isSimple();
+ virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path = NULL) const;
+ /** Only evaluates non inclusion expressions. For inclusions, use addToDocument(). */
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual Value serialize(bool explain) const;
+ /// like evaluate(), but return a Document instead of a Value-wrapped Document.
+ Document evaluateDocument(Variables* vars) const;
- class ExpressionNot : public ExpressionFixedArity<ExpressionNot, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+ /** Evaluates with inclusions and adds results to passed in Mutable document
+ *
+ * @param output the MutableDocument to add the evaluated expressions to
+ * @param currentDoc the input Document for this level (for inclusions)
+ * @param vars the variables for use in subexpressions
+ */
+ void addToDocument(MutableDocument& ouput, const Document& currentDoc, Variables* vars) const;
+ // estimated number of fields that will be output
+ size_t getSizeHint() const;
- class ExpressionObject : public Expression {
- public:
- // virtuals from Expression
- virtual boost::intrusive_ptr<Expression> optimize();
- virtual bool isSimple();
- virtual void addDependencies(DepsTracker* deps, std::vector<std::string>* path=NULL) const;
- /** Only evaluates non inclusion expressions. For inclusions, use addToDocument(). */
- virtual Value evaluateInternal(Variables* vars) const;
- virtual Value serialize(bool explain) const;
-
- /// like evaluate(), but return a Document instead of a Value-wrapped Document.
- Document evaluateDocument(Variables* vars) const;
-
- /** Evaluates with inclusions and adds results to passed in Mutable document
- *
- * @param output the MutableDocument to add the evaluated expressions to
- * @param currentDoc the input Document for this level (for inclusions)
- * @param vars the variables for use in subexpressions
- */
- void addToDocument(MutableDocument& ouput,
- const Document& currentDoc,
- Variables* vars
- ) const;
+ /** Create an empty expression.
+ * Until fields are added, this will evaluate to an empty document.
+ */
+ static boost::intrusive_ptr<ExpressionObject> create();
- // estimated number of fields that will be output
- size_t getSizeHint() const;
+ /// Like create but uses special handling of _id for root object of $project.
+ static boost::intrusive_ptr<ExpressionObject> createRoot();
- /** Create an empty expression.
- * Until fields are added, this will evaluate to an empty document.
- */
- static boost::intrusive_ptr<ExpressionObject> create();
+ /*
+ Add a field to the document expression.
- /// Like create but uses special handling of _id for root object of $project.
- static boost::intrusive_ptr<ExpressionObject> createRoot();
+ @param fieldPath the path the evaluated expression will have in the
+ result Document
+ @param pExpression the expression to evaluate obtain this field's
+ Value in the result Document
+ */
+ void addField(const FieldPath& fieldPath, const boost::intrusive_ptr<Expression>& pExpression);
- /*
- Add a field to the document expression.
+ /*
+ Add a field path to the set of those to be included.
- @param fieldPath the path the evaluated expression will have in the
- result Document
- @param pExpression the expression to evaluate obtain this field's
- Value in the result Document
- */
- void addField(const FieldPath &fieldPath,
- const boost::intrusive_ptr<Expression> &pExpression);
+ Note that including a nested field implies including everything on
+ the path leading down to it.
- /*
- Add a field path to the set of those to be included.
+ @param fieldPath the name of the field to be included
+ */
+ void includePath(const std::string& fieldPath);
- Note that including a nested field implies including everything on
- the path leading down to it.
+ /*
+ Get a count of the added fields.
- @param fieldPath the name of the field to be included
- */
- void includePath(const std::string &fieldPath);
+ @returns how many fields have been added
+ */
+ size_t getFieldCount() const;
- /*
- Get a count of the added fields.
+ /*
+ Specialized BSON conversion that allows for writing out a
+ $project specification. This creates a standalone object, which must
+ be added to a containing object with a name
- @returns how many fields have been added
- */
- size_t getFieldCount() const;
+ @param pBuilder where to write the object to
+ @param requireExpression see Expression::addToBsonObj
+ */
+ void documentToBson(BSONObjBuilder* pBuilder, bool requireExpression) const;
- /*
- Specialized BSON conversion that allows for writing out a
- $project specification. This creates a standalone object, which must
- be added to a containing object with a name
+ /*
+ Visitor abstraction used by emitPaths(). Each path is recorded by
+ calling path().
+ */
+ class PathSink {
+ public:
+ virtual ~PathSink(){};
- @param pBuilder where to write the object to
- @param requireExpression see Expression::addToBsonObj
- */
- void documentToBson(BSONObjBuilder *pBuilder,
- bool requireExpression) const;
+ /**
+ Record a path.
- /*
- Visitor abstraction used by emitPaths(). Each path is recorded by
- calling path().
+ @param path the dotted path string
+ @param include if true, the path is included; if false, the path
+ is excluded
*/
- class PathSink {
- public:
- virtual ~PathSink() {};
-
- /**
- Record a path.
+ virtual void path(const std::string& path, bool include) = 0;
+ };
- @param path the dotted path string
- @param include if true, the path is included; if false, the path
- is excluded
- */
- virtual void path(const std::string &path, bool include) = 0;
- };
+ void excludeId(bool b) {
+ _excludeId = b;
+ }
- void excludeId(bool b) { _excludeId = b; }
+private:
+ ExpressionObject(bool atRoot);
- private:
- ExpressionObject(bool atRoot);
+ // Mapping from fieldname to the Expression that generates its value.
+ // NULL expression means inclusion from source document.
+ typedef std::map<std::string, boost::intrusive_ptr<Expression>> FieldMap;
+ FieldMap _expressions;
- // Mapping from fieldname to the Expression that generates its value.
- // NULL expression means inclusion from source document.
- typedef std::map<std::string, boost::intrusive_ptr<Expression> > FieldMap;
- FieldMap _expressions;
+ // this is used to maintain order for generated fields not in the source document
+ std::vector<std::string> _order;
- // this is used to maintain order for generated fields not in the source document
- std::vector<std::string> _order;
+ bool _excludeId;
+ bool _atRoot;
+};
- bool _excludeId;
- bool _atRoot;
- };
+class ExpressionOr : public ExpressionVariadic<ExpressionOr> {
+public:
+ // virtuals from Expression
+ virtual boost::intrusive_ptr<Expression> optimize();
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+ virtual bool isAssociativeAndCommutative() const {
+ return true;
+ }
+};
- class ExpressionOr : public ExpressionVariadic<ExpressionOr> {
- public:
- // virtuals from Expression
- virtual boost::intrusive_ptr<Expression> optimize();
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- virtual bool isAssociativeAndCommutative() const { return true; }
- };
+class ExpressionSecond : public ExpressionFixedArity<ExpressionSecond, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
- class ExpressionSecond : public ExpressionFixedArity<ExpressionSecond, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
+ static inline int extract(const tm& tm) {
+ return tm.tm_sec;
+ }
+};
- static inline int extract(const tm& tm) { return tm.tm_sec; }
- };
+class ExpressionSetDifference : public ExpressionFixedArity<ExpressionSetDifference, 2> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- class ExpressionSetDifference : public ExpressionFixedArity<ExpressionSetDifference, 2> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+class ExpressionSetEquals : public ExpressionVariadic<ExpressionSetEquals> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+ virtual void validateArguments(const ExpressionVector& args) const;
+};
- class ExpressionSetEquals : public ExpressionVariadic<ExpressionSetEquals> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- virtual void validateArguments(const ExpressionVector& args) const;
- };
+class ExpressionSetIntersection : public ExpressionVariadic<ExpressionSetIntersection> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+ virtual bool isAssociativeAndCommutative() const {
+ return true;
+ }
+};
- class ExpressionSetIntersection : public ExpressionVariadic<ExpressionSetIntersection> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- virtual bool isAssociativeAndCommutative() const { return true; }
- };
+class ExpressionSetIsSubset : public ExpressionFixedArity<ExpressionSetIsSubset, 2> {
+public:
+ // virtuals from ExpressionNary
+ virtual boost::intrusive_ptr<Expression> optimize();
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
- class ExpressionSetIsSubset : public ExpressionFixedArity<ExpressionSetIsSubset, 2> {
- public:
- // virtuals from ExpressionNary
- virtual boost::intrusive_ptr<Expression> optimize();
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- private:
- class Optimized;
- };
+private:
+ class Optimized;
+};
- class ExpressionSetUnion : public ExpressionVariadic<ExpressionSetUnion> {
- public:
- // virtuals from ExpressionNary
- // virtual intrusive_ptr<Expression> optimize();
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- virtual bool isAssociativeAndCommutative() const { return true; }
- };
+class ExpressionSetUnion : public ExpressionVariadic<ExpressionSetUnion> {
+public:
+ // virtuals from ExpressionNary
+ // virtual intrusive_ptr<Expression> optimize();
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+ virtual bool isAssociativeAndCommutative() const {
+ return true;
+ }
+};
- class ExpressionIsArray : public ExpressionFixedArity<ExpressionIsArray, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+class ExpressionIsArray : public ExpressionFixedArity<ExpressionIsArray, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- class ExpressionSize : public ExpressionFixedArity<ExpressionSize, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+class ExpressionSize : public ExpressionFixedArity<ExpressionSize, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- class ExpressionStrcasecmp : public ExpressionFixedArity<ExpressionStrcasecmp, 2> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+class ExpressionStrcasecmp : public ExpressionFixedArity<ExpressionStrcasecmp, 2> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- class ExpressionSubstr : public ExpressionFixedArity<ExpressionSubstr, 3> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+class ExpressionSubstr : public ExpressionFixedArity<ExpressionSubstr, 3> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- class ExpressionSubtract : public ExpressionFixedArity<ExpressionSubtract, 2> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+class ExpressionSubtract : public ExpressionFixedArity<ExpressionSubtract, 2> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- class ExpressionToLower : public ExpressionFixedArity<ExpressionToLower, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+class ExpressionToLower : public ExpressionFixedArity<ExpressionToLower, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- class ExpressionToUpper : public ExpressionFixedArity<ExpressionToUpper, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
- };
+class ExpressionToUpper : public ExpressionFixedArity<ExpressionToUpper, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
+};
- class ExpressionWeek : public ExpressionFixedArity<ExpressionWeek, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
+class ExpressionWeek : public ExpressionFixedArity<ExpressionWeek, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
- static int extract(const tm& tm);
- };
+ static int extract(const tm& tm);
+};
- class ExpressionYear : public ExpressionFixedArity<ExpressionYear, 1> {
- public:
- // virtuals from ExpressionNary
- virtual Value evaluateInternal(Variables* vars) const;
- virtual const char* getOpName() const;
+class ExpressionYear : public ExpressionFixedArity<ExpressionYear, 1> {
+public:
+ // virtuals from ExpressionNary
+ virtual Value evaluateInternal(Variables* vars) const;
+ virtual const char* getOpName() const;
- // tm_year is years since 1990
- static int extract(const tm& tm) { return tm.tm_year + 1900; }
- };
+ // tm_year is years since 1990
+ static int extract(const tm& tm) {
+ return tm.tm_year + 1900;
+ }
+};
}
@@ -1075,11 +1090,11 @@ namespace mongo {
namespace mongo {
- inline Value ExpressionConstant::getValue() const {
- return pValue;
- }
+inline Value ExpressionConstant::getValue() const {
+ return pValue;
+}
- inline size_t ExpressionObject::getFieldCount() const {
- return _expressions.size();
- }
+inline size_t ExpressionObject::getFieldCount() const {
+ return _expressions.size();
+}
}
diff --git a/src/mongo/db/pipeline/expression_context.h b/src/mongo/db/pipeline/expression_context.h
index 96cce9e4b4f..d4e18dcc582 100644
--- a/src/mongo/db/pipeline/expression_context.h
+++ b/src/mongo/db/pipeline/expression_context.h
@@ -36,34 +36,31 @@
namespace mongo {
- struct ExpressionContext : public IntrusiveCounterUnsigned {
- public:
- ExpressionContext(OperationContext* opCtx, const NamespaceString& ns)
- : ns(ns)
- , opCtx(opCtx)
- {}
+struct ExpressionContext : public IntrusiveCounterUnsigned {
+public:
+ ExpressionContext(OperationContext* opCtx, const NamespaceString& ns) : ns(ns), opCtx(opCtx) {}
- /** Used by a pipeline to check for interrupts so that killOp() works.
- * @throws if the operation has been interrupted
- */
- void checkForInterrupt() {
- if (opCtx && --interruptCounter == 0) { // XXX SERVER-13931 for opCtx check
- // The checkForInterrupt could be expensive, at least in relative terms.
- opCtx->checkForInterrupt();
- interruptCounter = kInterruptCheckPeriod;
- }
+ /** Used by a pipeline to check for interrupts so that killOp() works.
+ * @throws if the operation has been interrupted
+ */
+ void checkForInterrupt() {
+ if (opCtx && --interruptCounter == 0) { // XXX SERVER-13931 for opCtx check
+ // The checkForInterrupt could be expensive, at least in relative terms.
+ opCtx->checkForInterrupt();
+ interruptCounter = kInterruptCheckPeriod;
}
+ }
- bool inShard = false;
- bool inRouter = false;
- bool extSortAllowed = false;
- bool bypassDocumentValidation = false;
+ bool inShard = false;
+ bool inRouter = false;
+ bool extSortAllowed = false;
+ bool bypassDocumentValidation = false;
- NamespaceString ns;
- std::string tempDir; // Defaults to empty to prevent external sorting in mongos.
+ NamespaceString ns;
+ std::string tempDir; // Defaults to empty to prevent external sorting in mongos.
- OperationContext* opCtx;
- static const int kInterruptCheckPeriod = 128;
- int interruptCounter = kInterruptCheckPeriod; // when 0, check interruptStatus
- };
+ OperationContext* opCtx;
+ static const int kInterruptCheckPeriod = 128;
+ int interruptCounter = kInterruptCheckPeriod; // when 0, check interruptStatus
+};
}
diff --git a/src/mongo/db/pipeline/field_path.cpp b/src/mongo/db/pipeline/field_path.cpp
index 49115178a02..c2c934de41a 100644
--- a/src/mongo/db/pipeline/field_path.cpp
+++ b/src/mongo/db/pipeline/field_path.cpp
@@ -34,87 +34,85 @@
namespace mongo {
- using std::ostream;
- using std::string;
- using std::stringstream;
- using std::vector;
+using std::ostream;
+using std::string;
+using std::stringstream;
+using std::vector;
- using namespace mongoutils;
+using namespace mongoutils;
- const char FieldPath::prefix[] = "$";
+const char FieldPath::prefix[] = "$";
- FieldPath::FieldPath(const vector<string>& fieldPath) {
- massert(16409, "FieldPath cannot be constructed from an empty vector.", !fieldPath.empty());
- vFieldName.reserve(fieldPath.size());
- for(vector<string>::const_iterator i = fieldPath.begin(); i != fieldPath.end(); ++i) {
- pushFieldName(*i);
- }
- verify(getPathLength() > 0);
+FieldPath::FieldPath(const vector<string>& fieldPath) {
+ massert(16409, "FieldPath cannot be constructed from an empty vector.", !fieldPath.empty());
+ vFieldName.reserve(fieldPath.size());
+ for (vector<string>::const_iterator i = fieldPath.begin(); i != fieldPath.end(); ++i) {
+ pushFieldName(*i);
}
+ verify(getPathLength() > 0);
+}
- FieldPath::FieldPath(const string& fieldPath) {
- /*
- The field path could be using dot notation.
- Break the field path up by peeling off successive pieces.
- */
- size_t startpos = 0;
- while(true) {
- /* find the next dot */
- const size_t dotpos = fieldPath.find('.', startpos);
-
- /* if there are no more dots, use the remainder of the string */
- if (dotpos == fieldPath.npos) {
- string lastFieldName = fieldPath.substr(startpos, dotpos);
- pushFieldName(lastFieldName);
- break;
- }
-
- /* use the string up to the dot */
- const size_t length = dotpos - startpos;
- string nextFieldName = fieldPath.substr(startpos, length);
- pushFieldName(nextFieldName);
-
- /* next time, search starting one spot after that */
- startpos = dotpos + 1;
+FieldPath::FieldPath(const string& fieldPath) {
+ /*
+ The field path could be using dot notation.
+ Break the field path up by peeling off successive pieces.
+ */
+ size_t startpos = 0;
+ while (true) {
+ /* find the next dot */
+ const size_t dotpos = fieldPath.find('.', startpos);
+
+ /* if there are no more dots, use the remainder of the string */
+ if (dotpos == fieldPath.npos) {
+ string lastFieldName = fieldPath.substr(startpos, dotpos);
+ pushFieldName(lastFieldName);
+ break;
}
- verify(getPathLength() > 0);
- }
- string FieldPath::getPath(bool fieldPrefix) const {
- stringstream ss;
- writePath(ss, fieldPrefix);
- return ss.str();
+ /* use the string up to the dot */
+ const size_t length = dotpos - startpos;
+ string nextFieldName = fieldPath.substr(startpos, length);
+ pushFieldName(nextFieldName);
+
+ /* next time, search starting one spot after that */
+ startpos = dotpos + 1;
}
+ verify(getPathLength() > 0);
+}
- void FieldPath::writePath(ostream &outStream, bool fieldPrefix) const {
- if (fieldPrefix)
- outStream << prefix;
+string FieldPath::getPath(bool fieldPrefix) const {
+ stringstream ss;
+ writePath(ss, fieldPrefix);
+ return ss.str();
+}
- const size_t n = vFieldName.size();
+void FieldPath::writePath(ostream& outStream, bool fieldPrefix) const {
+ if (fieldPrefix)
+ outStream << prefix;
- verify(n > 0);
- outStream << vFieldName[0];
- for(size_t i = 1; i < n; ++i)
- outStream << '.' << vFieldName[i];
- }
+ const size_t n = vFieldName.size();
- FieldPath FieldPath::tail() const {
- vector<string> allButFirst(vFieldName.begin()+1, vFieldName.end());
- return FieldPath(allButFirst);
- }
+ verify(n > 0);
+ outStream << vFieldName[0];
+ for (size_t i = 1; i < n; ++i)
+ outStream << '.' << vFieldName[i];
+}
- void FieldPath::uassertValidFieldName(const string& fieldName) {
- uassert(15998, "FieldPath field names may not be empty strings.", fieldName.length() > 0);
- uassert(16410, "FieldPath field names may not start with '$'.", fieldName[0] != '$');
- uassert(16411, "FieldPath field names may not contain '\0'.",
- fieldName.find('\0') == string::npos);
- uassert(16412, "FieldPath field names may not contain '.'.",
- !str::contains(fieldName, '.'));
- }
+FieldPath FieldPath::tail() const {
+ vector<string> allButFirst(vFieldName.begin() + 1, vFieldName.end());
+ return FieldPath(allButFirst);
+}
- void FieldPath::pushFieldName(const string& fieldName) {
- uassertValidFieldName(fieldName);
- vFieldName.push_back(fieldName);
- }
+void FieldPath::uassertValidFieldName(const string& fieldName) {
+ uassert(15998, "FieldPath field names may not be empty strings.", fieldName.length() > 0);
+ uassert(16410, "FieldPath field names may not start with '$'.", fieldName[0] != '$');
+ uassert(
+ 16411, "FieldPath field names may not contain '\0'.", fieldName.find('\0') == string::npos);
+ uassert(16412, "FieldPath field names may not contain '.'.", !str::contains(fieldName, '.'));
+}
+void FieldPath::pushFieldName(const string& fieldName) {
+ uassertValidFieldName(fieldName);
+ vFieldName.push_back(fieldName);
+}
}
diff --git a/src/mongo/db/pipeline/field_path.h b/src/mongo/db/pipeline/field_path.h
index f400ca421ce..84dca905fdb 100644
--- a/src/mongo/db/pipeline/field_path.h
+++ b/src/mongo/db/pipeline/field_path.h
@@ -33,77 +33,76 @@
namespace mongo {
- class FieldPath {
- public:
-
- /**
- * Constructor.
- *
- * @param fieldPath the dotted field path std::string or non empty pre-split vector.
- * The constructed object will have getPathLength() > 0.
- * Uassert if any component field names do not pass validation.
- */
- FieldPath(const std::string& fieldPath);
- FieldPath(const std::vector<std::string>& fieldPath);
-
- /**
- Get the number of path elements in the field path.
-
- @returns the number of path elements
- */
- size_t getPathLength() const;
-
- /**
- Get a particular path element from the path.
-
- @param i the zero based index of the path element.
- @returns the path element
- */
- const std::string& getFieldName(size_t i) const;
-
- /**
- Get the full path.
-
- @param fieldPrefix whether or not to include the field prefix
- @returns the complete field path
- */
- std::string getPath(bool fieldPrefix) const;
-
- /**
- Write the full path.
-
- @param outStream where to write the path to
- @param fieldPrefix whether or not to include the field prefix
- */
- void writePath(std::ostream &outStream, bool fieldPrefix) const;
-
- /**
- Get the prefix string.
-
- @returns the prefix string
- */
- static const char *getPrefix();
-
- static const char prefix[];
-
- /**
- * A FieldPath like this but missing the first element (useful for recursion).
- * Precondition getPathLength() > 1.
- */
- FieldPath tail() const;
-
- private:
- /** Uassert if a field name does not pass validation. */
- static void uassertValidFieldName(const std::string& fieldName);
-
- /**
- * Push a new field name to the back of the vector of names comprising the field path.
- * Uassert if 'fieldName' does not pass validation.
- */
- void pushFieldName(const std::string& fieldName);
-
- std::vector<std::string> vFieldName;
- };
+class FieldPath {
+public:
+ /**
+ * Constructor.
+ *
+ * @param fieldPath the dotted field path std::string or non empty pre-split vector.
+ * The constructed object will have getPathLength() > 0.
+ * Uassert if any component field names do not pass validation.
+ */
+ FieldPath(const std::string& fieldPath);
+ FieldPath(const std::vector<std::string>& fieldPath);
+
+ /**
+ Get the number of path elements in the field path.
+
+ @returns the number of path elements
+ */
+ size_t getPathLength() const;
+
+ /**
+ Get a particular path element from the path.
+
+ @param i the zero based index of the path element.
+ @returns the path element
+ */
+ const std::string& getFieldName(size_t i) const;
+
+ /**
+ Get the full path.
+
+ @param fieldPrefix whether or not to include the field prefix
+ @returns the complete field path
+ */
+ std::string getPath(bool fieldPrefix) const;
+
+ /**
+ Write the full path.
+
+ @param outStream where to write the path to
+ @param fieldPrefix whether or not to include the field prefix
+ */
+ void writePath(std::ostream& outStream, bool fieldPrefix) const;
+
+ /**
+ Get the prefix string.
+
+ @returns the prefix string
+ */
+ static const char* getPrefix();
+
+ static const char prefix[];
+
+ /**
+ * A FieldPath like this but missing the first element (useful for recursion).
+ * Precondition getPathLength() > 1.
+ */
+ FieldPath tail() const;
+
+private:
+ /** Uassert if a field name does not pass validation. */
+ static void uassertValidFieldName(const std::string& fieldName);
+
+ /**
+ * Push a new field name to the back of the vector of names comprising the field path.
+ * Uassert if 'fieldName' does not pass validation.
+ */
+ void pushFieldName(const std::string& fieldName);
+
+ std::vector<std::string> vFieldName;
+};
}
@@ -111,18 +110,16 @@ namespace mongo {
namespace mongo {
- inline size_t FieldPath::getPathLength() const {
- return vFieldName.size();
- }
-
- inline const std::string& FieldPath::getFieldName(size_t i) const {
- dassert(i < getPathLength());
- return vFieldName[i];
- }
-
- inline const char *FieldPath::getPrefix() {
- return prefix;
- }
+inline size_t FieldPath::getPathLength() const {
+ return vFieldName.size();
+}
+inline const std::string& FieldPath::getFieldName(size_t i) const {
+ dassert(i < getPathLength());
+ return vFieldName[i];
}
+inline const char* FieldPath::getPrefix() {
+ return prefix;
+}
+}
diff --git a/src/mongo/db/pipeline/field_path_test.cpp b/src/mongo/db/pipeline/field_path_test.cpp
index 052d2321248..63c0216a76d 100644
--- a/src/mongo/db/pipeline/field_path_test.cpp
+++ b/src/mongo/db/pipeline/field_path_test.cpp
@@ -34,220 +34,219 @@
#include "mongo/dbtests/dbtests.h"
namespace mongo {
- using std::string;
- using std::vector;
-
- /** FieldPath constructed from empty string. */
- class Empty {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( "" ), UserException );
- }
- };
-
- /** FieldPath constructed from empty vector. */
- class EmptyVector {
- public:
- void run() {
- vector<string> vec;
- ASSERT_THROWS( FieldPath path( vec ), MsgAssertionException );
- }
- };
-
- /** FieldPath constructed from a simple string (without dots). */
- class Simple {
- public:
- void run() {
- FieldPath path( "foo" );
- ASSERT_EQUALS( 1U, path.getPathLength() );
- ASSERT_EQUALS( "foo", path.getFieldName( 0 ) );
- ASSERT_EQUALS( "foo", path.getPath( false ) );
- ASSERT_EQUALS( "$foo", path.getPath( true ) );
- }
- };
-
- /** FieldPath constructed from a single element vector. */
- class SimpleVector {
- public:
- void run() {
- vector<string> vec( 1, "foo" );
- FieldPath path( vec );
- ASSERT_EQUALS( 1U, path.getPathLength() );
- ASSERT_EQUALS( "foo", path.getFieldName( 0 ) );
- ASSERT_EQUALS( "foo", path.getPath( false ) );
- }
- };
-
- /** FieldPath consisting of a '$' character. */
- class DollarSign {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( "$" ), UserException );
- }
- };
-
- /** FieldPath with a '$' prefix. */
- class DollarSignPrefix {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( "$a" ), UserException );
- }
- };
-
- /** FieldPath constructed from a string with one dot. */
- class Dotted {
- public:
- void run() {
- FieldPath path( "foo.bar" );
- ASSERT_EQUALS( 2U, path.getPathLength() );
- ASSERT_EQUALS( "foo", path.getFieldName( 0 ) );
- ASSERT_EQUALS( "bar", path.getFieldName( 1 ) );
- ASSERT_EQUALS( "foo.bar", path.getPath( false ) );
- ASSERT_EQUALS( "$foo.bar", path.getPath( true ) );
- }
- };
-
- /** FieldPath constructed from a single element vector containing a dot. */
- class VectorWithDot {
- public:
- void run() {
- vector<string> vec( 1, "fo.o" );
- ASSERT_THROWS( FieldPath path( vec ), UserException );
- }
- };
-
- /** FieldPath constructed from a two element vector. */
- class TwoFieldVector {
- public:
- void run() {
- vector<string> vec;
- vec.push_back( "foo" );
- vec.push_back( "bar" );
- FieldPath path( vec );
- ASSERT_EQUALS( 2U, path.getPathLength() );
- ASSERT_EQUALS( "foo.bar", path.getPath( false ) );
- }
- };
-
- /** FieldPath with a '$' prefix in the second field. */
- class DollarSignPrefixSecondField {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( "a.$b" ), UserException );
- }
- };
-
- /** FieldPath constructed from a string with two dots. */
- class TwoDotted {
- public:
- void run() {
- FieldPath path( "foo.bar.baz" );
- ASSERT_EQUALS( 3U, path.getPathLength() );
- ASSERT_EQUALS( "foo", path.getFieldName( 0 ) );
- ASSERT_EQUALS( "bar", path.getFieldName( 1 ) );
- ASSERT_EQUALS( "baz", path.getFieldName( 2 ) );
- ASSERT_EQUALS( "foo.bar.baz", path.getPath( false ) );
- }
- };
-
- /** FieldPath constructed from a string ending in a dot. */
- class TerminalDot {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( "foo." ), UserException );
- }
- };
-
- /** FieldPath constructed from a string beginning with a dot. */
- class PrefixDot {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( ".foo" ), UserException );
- }
- };
-
- /** FieldPath constructed from a string with adjacent dots. */
- class AdjacentDots {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( "foo..bar" ), UserException );
- }
- };
-
- /** FieldPath constructed from a string with one letter between two dots. */
- class LetterBetweenDots {
- public:
- void run() {
- FieldPath path( "foo.a.bar" );
- ASSERT_EQUALS( 3U, path.getPathLength() );
- ASSERT_EQUALS( "foo.a.bar", path.getPath( false ) );
- }
- };
-
- /** FieldPath containing a null character. */
- class NullCharacter {
- public:
- void run() {
- ASSERT_THROWS( FieldPath path( string( "foo.b\0r", 7 ) ), UserException );
- }
- };
-
- /** FieldPath constructed with a vector containing a null character. */
- class VectorNullCharacter {
- public:
- void run() {
- vector<string> vec;
- vec.push_back( "foo" );
- vec.push_back( string( "b\0r", 3 ) );
- ASSERT_THROWS( FieldPath path( vec ), UserException );
- }
- };
-
- /** Tail of a FieldPath. */
- class Tail {
- public:
- void run() {
- FieldPath path = FieldPath( "foo.bar" ).tail();
- ASSERT_EQUALS( 1U, path.getPathLength() );
- ASSERT_EQUALS( "bar", path.getPath( false ) );
- }
- };
-
- /** Tail of a FieldPath with three fields. */
- class TailThreeFields {
- public:
- void run() {
- FieldPath path = FieldPath( "foo.bar.baz" ).tail();
- ASSERT_EQUALS( 2U, path.getPathLength() );
- ASSERT_EQUALS( "bar.baz", path.getPath( false ) );
- }
- };
-
- class All : public Suite {
- public:
- All() : Suite( "field_path" ) {
- }
- void setupTests() {
- add<Empty>();
- add<EmptyVector>();
- add<Simple>();
- add<SimpleVector>();
- add<DollarSign>();
- add<DollarSignPrefix>();
- add<Dotted>();
- add<VectorWithDot>();
- add<TwoFieldVector>();
- add<DollarSignPrefixSecondField>();
- add<TwoDotted>();
- add<TerminalDot>();
- add<PrefixDot>();
- add<AdjacentDots>();
- add<LetterBetweenDots>();
- add<NullCharacter>();
- add<VectorNullCharacter>();
- add<Tail>();
- add<TailThreeFields>();
- }
- };
- SuiteInstance<All> myall;
-} // namespace mongo
+using std::string;
+using std::vector;
+
+/** FieldPath constructed from empty string. */
+class Empty {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path(""), UserException);
+ }
+};
+
+/** FieldPath constructed from empty vector. */
+class EmptyVector {
+public:
+ void run() {
+ vector<string> vec;
+ ASSERT_THROWS(FieldPath path(vec), MsgAssertionException);
+ }
+};
+
+/** FieldPath constructed from a simple string (without dots). */
+class Simple {
+public:
+ void run() {
+ FieldPath path("foo");
+ ASSERT_EQUALS(1U, path.getPathLength());
+ ASSERT_EQUALS("foo", path.getFieldName(0));
+ ASSERT_EQUALS("foo", path.getPath(false));
+ ASSERT_EQUALS("$foo", path.getPath(true));
+ }
+};
+
+/** FieldPath constructed from a single element vector. */
+class SimpleVector {
+public:
+ void run() {
+ vector<string> vec(1, "foo");
+ FieldPath path(vec);
+ ASSERT_EQUALS(1U, path.getPathLength());
+ ASSERT_EQUALS("foo", path.getFieldName(0));
+ ASSERT_EQUALS("foo", path.getPath(false));
+ }
+};
+
+/** FieldPath consisting of a '$' character. */
+class DollarSign {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path("$"), UserException);
+ }
+};
+
+/** FieldPath with a '$' prefix. */
+class DollarSignPrefix {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path("$a"), UserException);
+ }
+};
+
+/** FieldPath constructed from a string with one dot. */
+class Dotted {
+public:
+ void run() {
+ FieldPath path("foo.bar");
+ ASSERT_EQUALS(2U, path.getPathLength());
+ ASSERT_EQUALS("foo", path.getFieldName(0));
+ ASSERT_EQUALS("bar", path.getFieldName(1));
+ ASSERT_EQUALS("foo.bar", path.getPath(false));
+ ASSERT_EQUALS("$foo.bar", path.getPath(true));
+ }
+};
+
+/** FieldPath constructed from a single element vector containing a dot. */
+class VectorWithDot {
+public:
+ void run() {
+ vector<string> vec(1, "fo.o");
+ ASSERT_THROWS(FieldPath path(vec), UserException);
+ }
+};
+
+/** FieldPath constructed from a two element vector. */
+class TwoFieldVector {
+public:
+ void run() {
+ vector<string> vec;
+ vec.push_back("foo");
+ vec.push_back("bar");
+ FieldPath path(vec);
+ ASSERT_EQUALS(2U, path.getPathLength());
+ ASSERT_EQUALS("foo.bar", path.getPath(false));
+ }
+};
+
+/** FieldPath with a '$' prefix in the second field. */
+class DollarSignPrefixSecondField {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path("a.$b"), UserException);
+ }
+};
+
+/** FieldPath constructed from a string with two dots. */
+class TwoDotted {
+public:
+ void run() {
+ FieldPath path("foo.bar.baz");
+ ASSERT_EQUALS(3U, path.getPathLength());
+ ASSERT_EQUALS("foo", path.getFieldName(0));
+ ASSERT_EQUALS("bar", path.getFieldName(1));
+ ASSERT_EQUALS("baz", path.getFieldName(2));
+ ASSERT_EQUALS("foo.bar.baz", path.getPath(false));
+ }
+};
+
+/** FieldPath constructed from a string ending in a dot. */
+class TerminalDot {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path("foo."), UserException);
+ }
+};
+
+/** FieldPath constructed from a string beginning with a dot. */
+class PrefixDot {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path(".foo"), UserException);
+ }
+};
+
+/** FieldPath constructed from a string with adjacent dots. */
+class AdjacentDots {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path("foo..bar"), UserException);
+ }
+};
+
+/** FieldPath constructed from a string with one letter between two dots. */
+class LetterBetweenDots {
+public:
+ void run() {
+ FieldPath path("foo.a.bar");
+ ASSERT_EQUALS(3U, path.getPathLength());
+ ASSERT_EQUALS("foo.a.bar", path.getPath(false));
+ }
+};
+
+/** FieldPath containing a null character. */
+class NullCharacter {
+public:
+ void run() {
+ ASSERT_THROWS(FieldPath path(string("foo.b\0r", 7)), UserException);
+ }
+};
+
+/** FieldPath constructed with a vector containing a null character. */
+class VectorNullCharacter {
+public:
+ void run() {
+ vector<string> vec;
+ vec.push_back("foo");
+ vec.push_back(string("b\0r", 3));
+ ASSERT_THROWS(FieldPath path(vec), UserException);
+ }
+};
+
+/** Tail of a FieldPath. */
+class Tail {
+public:
+ void run() {
+ FieldPath path = FieldPath("foo.bar").tail();
+ ASSERT_EQUALS(1U, path.getPathLength());
+ ASSERT_EQUALS("bar", path.getPath(false));
+ }
+};
+
+/** Tail of a FieldPath with three fields. */
+class TailThreeFields {
+public:
+ void run() {
+ FieldPath path = FieldPath("foo.bar.baz").tail();
+ ASSERT_EQUALS(2U, path.getPathLength());
+ ASSERT_EQUALS("bar.baz", path.getPath(false));
+ }
+};
+
+class All : public Suite {
+public:
+ All() : Suite("field_path") {}
+ void setupTests() {
+ add<Empty>();
+ add<EmptyVector>();
+ add<Simple>();
+ add<SimpleVector>();
+ add<DollarSign>();
+ add<DollarSignPrefix>();
+ add<Dotted>();
+ add<VectorWithDot>();
+ add<TwoFieldVector>();
+ add<DollarSignPrefixSecondField>();
+ add<TwoDotted>();
+ add<TerminalDot>();
+ add<PrefixDot>();
+ add<AdjacentDots>();
+ add<LetterBetweenDots>();
+ add<NullCharacter>();
+ add<VectorNullCharacter>();
+ add<Tail>();
+ add<TailThreeFields>();
+ }
+};
+SuiteInstance<All> myall;
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index bba6bf9615f..9e9427190f1 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -46,597 +46,572 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::endl;
- using std::ostringstream;
- using std::string;
- using std::vector;
-
- const char Pipeline::commandName[] = "aggregate";
- const char Pipeline::pipelineName[] = "pipeline";
- const char Pipeline::explainName[] = "explain";
- const char Pipeline::fromRouterName[] = "fromRouter";
- const char Pipeline::serverPipelineName[] = "serverPipeline";
- const char Pipeline::mongosPipelineName[] = "mongosPipeline";
-
- Pipeline::Pipeline(const intrusive_ptr<ExpressionContext> &pTheCtx):
- explain(false),
- pCtx(pTheCtx) {
- }
-
-
- /* this structure is used to make a lookup table of operators */
- struct StageDesc {
- const char *pName;
- intrusive_ptr<DocumentSource> (*pFactory)(
- BSONElement, const intrusive_ptr<ExpressionContext> &);
- };
-
- /* this table must be in alphabetical order by name for bsearch() */
- static const StageDesc stageDesc[] = {
- {DocumentSourceGeoNear::geoNearName,
- DocumentSourceGeoNear::createFromBson},
- {DocumentSourceGroup::groupName,
- DocumentSourceGroup::createFromBson},
- {DocumentSourceLimit::limitName,
- DocumentSourceLimit::createFromBson},
- {DocumentSourceMatch::matchName,
- DocumentSourceMatch::createFromBson},
- {DocumentSourceMergeCursors::name,
- DocumentSourceMergeCursors::createFromBson},
- {DocumentSourceOut::outName,
- DocumentSourceOut::createFromBson},
- {DocumentSourceProject::projectName,
- DocumentSourceProject::createFromBson},
- {DocumentSourceRedact::redactName,
- DocumentSourceRedact::createFromBson},
- {DocumentSourceSkip::skipName,
- DocumentSourceSkip::createFromBson},
- {DocumentSourceSort::sortName,
- DocumentSourceSort::createFromBson},
- {DocumentSourceUnwind::unwindName,
- DocumentSourceUnwind::createFromBson},
- };
- static const size_t nStageDesc = sizeof(stageDesc) / sizeof(StageDesc);
-
- static int stageDescCmp(const void *pL, const void *pR) {
- return strcmp(((const StageDesc *)pL)->pName,
- ((const StageDesc *)pR)->pName);
- }
-
- intrusive_ptr<Pipeline> Pipeline::parseCommand(string& errmsg,
- const BSONObj& cmdObj,
- const intrusive_ptr<ExpressionContext>& pCtx) {
- intrusive_ptr<Pipeline> pPipeline(new Pipeline(pCtx));
- vector<BSONElement> pipeline;
-
- /* gather the specification for the aggregation */
- for(BSONObj::iterator cmdIterator = cmdObj.begin();
- cmdIterator.more(); ) {
- BSONElement cmdElement(cmdIterator.next());
- const char *pFieldName = cmdElement.fieldName();
-
- // ignore top-level fields prefixed with $. They are for the command processor, not us.
- if (pFieldName[0] == '$') {
- continue;
- }
-
- // maxTimeMS is also for the command processor.
- if (pFieldName == LiteParsedQuery::cmdOptionMaxTimeMS) {
- continue;
- }
-
- // ignore cursor options since they are handled externally.
- if (str::equals(pFieldName, "cursor")) {
- continue;
- }
+using boost::intrusive_ptr;
+using std::endl;
+using std::ostringstream;
+using std::string;
+using std::vector;
+
+const char Pipeline::commandName[] = "aggregate";
+const char Pipeline::pipelineName[] = "pipeline";
+const char Pipeline::explainName[] = "explain";
+const char Pipeline::fromRouterName[] = "fromRouter";
+const char Pipeline::serverPipelineName[] = "serverPipeline";
+const char Pipeline::mongosPipelineName[] = "mongosPipeline";
+
+Pipeline::Pipeline(const intrusive_ptr<ExpressionContext>& pTheCtx)
+ : explain(false), pCtx(pTheCtx) {}
+
+
+/* this structure is used to make a lookup table of operators */
+struct StageDesc {
+ const char* pName;
+ intrusive_ptr<DocumentSource>(*pFactory)(BSONElement, const intrusive_ptr<ExpressionContext>&);
+};
+
+/* this table must be in alphabetical order by name for bsearch() */
+static const StageDesc stageDesc[] = {
+ {DocumentSourceGeoNear::geoNearName, DocumentSourceGeoNear::createFromBson},
+ {DocumentSourceGroup::groupName, DocumentSourceGroup::createFromBson},
+ {DocumentSourceLimit::limitName, DocumentSourceLimit::createFromBson},
+ {DocumentSourceMatch::matchName, DocumentSourceMatch::createFromBson},
+ {DocumentSourceMergeCursors::name, DocumentSourceMergeCursors::createFromBson},
+ {DocumentSourceOut::outName, DocumentSourceOut::createFromBson},
+ {DocumentSourceProject::projectName, DocumentSourceProject::createFromBson},
+ {DocumentSourceRedact::redactName, DocumentSourceRedact::createFromBson},
+ {DocumentSourceSkip::skipName, DocumentSourceSkip::createFromBson},
+ {DocumentSourceSort::sortName, DocumentSourceSort::createFromBson},
+ {DocumentSourceUnwind::unwindName, DocumentSourceUnwind::createFromBson},
+};
+static const size_t nStageDesc = sizeof(stageDesc) / sizeof(StageDesc);
+
+static int stageDescCmp(const void* pL, const void* pR) {
+ return strcmp(((const StageDesc*)pL)->pName, ((const StageDesc*)pR)->pName);
+}
+
+intrusive_ptr<Pipeline> Pipeline::parseCommand(string& errmsg,
+ const BSONObj& cmdObj,
+ const intrusive_ptr<ExpressionContext>& pCtx) {
+ intrusive_ptr<Pipeline> pPipeline(new Pipeline(pCtx));
+ vector<BSONElement> pipeline;
+
+ /* gather the specification for the aggregation */
+ for (BSONObj::iterator cmdIterator = cmdObj.begin(); cmdIterator.more();) {
+ BSONElement cmdElement(cmdIterator.next());
+ const char* pFieldName = cmdElement.fieldName();
+
+ // ignore top-level fields prefixed with $. They are for the command processor, not us.
+ if (pFieldName[0] == '$') {
+ continue;
+ }
- /* look for the aggregation command */
- if (!strcmp(pFieldName, commandName)) {
- continue;
- }
+ // maxTimeMS is also for the command processor.
+ if (pFieldName == LiteParsedQuery::cmdOptionMaxTimeMS) {
+ continue;
+ }
- /* check for the collection name */
- if (!strcmp(pFieldName, pipelineName)) {
- pipeline = cmdElement.Array();
- continue;
- }
+ // ignore cursor options since they are handled externally.
+ if (str::equals(pFieldName, "cursor")) {
+ continue;
+ }
- /* check for explain option */
- if (!strcmp(pFieldName, explainName)) {
- pPipeline->explain = cmdElement.Bool();
- continue;
- }
+ /* look for the aggregation command */
+ if (!strcmp(pFieldName, commandName)) {
+ continue;
+ }
- /* if the request came from the router, we're in a shard */
- if (!strcmp(pFieldName, fromRouterName)) {
- pCtx->inShard = cmdElement.Bool();
- continue;
- }
+ /* check for the collection name */
+ if (!strcmp(pFieldName, pipelineName)) {
+ pipeline = cmdElement.Array();
+ continue;
+ }
- if (str::equals(pFieldName, "allowDiskUse")) {
- uassert(16949,
- str::stream() << "allowDiskUse must be a bool, not a "
- << typeName(cmdElement.type()),
- cmdElement.type() == Bool);
- pCtx->extSortAllowed = cmdElement.Bool();
- continue;
- }
+ /* check for explain option */
+ if (!strcmp(pFieldName, explainName)) {
+ pPipeline->explain = cmdElement.Bool();
+ continue;
+ }
- if (pFieldName == bypassDocumentValidationCommandOption()) {
- pCtx->bypassDocumentValidation = cmdElement.trueValue();
- continue;
- }
+ /* if the request came from the router, we're in a shard */
+ if (!strcmp(pFieldName, fromRouterName)) {
+ pCtx->inShard = cmdElement.Bool();
+ continue;
+ }
- /* we didn't recognize a field in the command */
- ostringstream sb;
- sb << "unrecognized field '" << cmdElement.fieldName() << "'";
- errmsg = sb.str();
- return intrusive_ptr<Pipeline>();
+ if (str::equals(pFieldName, "allowDiskUse")) {
+ uassert(16949,
+ str::stream() << "allowDiskUse must be a bool, not a "
+ << typeName(cmdElement.type()),
+ cmdElement.type() == Bool);
+ pCtx->extSortAllowed = cmdElement.Bool();
+ continue;
}
- /*
- If we get here, we've harvested the fields we expect for a pipeline.
-
- Set up the specified document source pipeline.
- */
- SourceContainer& sources = pPipeline->sources; // shorthand
-
- /* iterate over the steps in the pipeline */
- const size_t nSteps = pipeline.size();
- for(size_t iStep = 0; iStep < nSteps; ++iStep) {
- /* pull out the pipeline element as an object */
- BSONElement pipeElement(pipeline[iStep]);
- uassert(15942, str::stream() << "pipeline element " <<
- iStep << " is not an object",
- pipeElement.type() == Object);
- BSONObj bsonObj(pipeElement.Obj());
-
- // Parse a pipeline stage from 'bsonObj'.
- uassert(16435, "A pipeline stage specification object must contain exactly one field.",
- bsonObj.nFields() == 1);
- BSONElement stageSpec = bsonObj.firstElement();
- const char* stageName = stageSpec.fieldName();
-
- // Create a DocumentSource pipeline stage from 'stageSpec'.
- StageDesc key;
- key.pName = stageName;
- const StageDesc* pDesc = (const StageDesc*)
- bsearch(&key, stageDesc, nStageDesc, sizeof(StageDesc),
- stageDescCmp);
-
- uassert(16436,
- str::stream() << "Unrecognized pipeline stage name: '" << stageName << "'",
- pDesc);
- intrusive_ptr<DocumentSource> stage = pDesc->pFactory(stageSpec, pCtx);
- verify(stage);
- sources.push_back(stage);
-
- // TODO find a good general way to check stages that must be first syntactically
-
- if (dynamic_cast<DocumentSourceOut*>(stage.get())) {
- uassert(16991, "$out can only be the final stage in the pipeline",
- iStep == nSteps - 1);
- }
+ if (pFieldName == bypassDocumentValidationCommandOption()) {
+ pCtx->bypassDocumentValidation = cmdElement.trueValue();
+ continue;
}
- // The order in which optimizations are applied can have significant impact on the
- // efficiency of the final pipeline. Be Careful!
- Optimizations::Local::moveMatchBeforeSort(pPipeline.get());
- Optimizations::Local::moveSkipAndLimitBeforeProject(pPipeline.get());
- Optimizations::Local::moveLimitBeforeSkip(pPipeline.get());
- Optimizations::Local::coalesceAdjacent(pPipeline.get());
- Optimizations::Local::optimizeEachDocumentSource(pPipeline.get());
- Optimizations::Local::duplicateMatchBeforeInitalRedact(pPipeline.get());
+ /* we didn't recognize a field in the command */
+ ostringstream sb;
+ sb << "unrecognized field '" << cmdElement.fieldName() << "'";
+ errmsg = sb.str();
+ return intrusive_ptr<Pipeline>();
+ }
- return pPipeline;
+ /*
+ If we get here, we've harvested the fields we expect for a pipeline.
+
+ Set up the specified document source pipeline.
+ */
+ SourceContainer& sources = pPipeline->sources; // shorthand
+
+ /* iterate over the steps in the pipeline */
+ const size_t nSteps = pipeline.size();
+ for (size_t iStep = 0; iStep < nSteps; ++iStep) {
+ /* pull out the pipeline element as an object */
+ BSONElement pipeElement(pipeline[iStep]);
+ uassert(15942,
+ str::stream() << "pipeline element " << iStep << " is not an object",
+ pipeElement.type() == Object);
+ BSONObj bsonObj(pipeElement.Obj());
+
+ // Parse a pipeline stage from 'bsonObj'.
+ uassert(16435,
+ "A pipeline stage specification object must contain exactly one field.",
+ bsonObj.nFields() == 1);
+ BSONElement stageSpec = bsonObj.firstElement();
+ const char* stageName = stageSpec.fieldName();
+
+ // Create a DocumentSource pipeline stage from 'stageSpec'.
+ StageDesc key;
+ key.pName = stageName;
+ const StageDesc* pDesc =
+ (const StageDesc*)bsearch(&key, stageDesc, nStageDesc, sizeof(StageDesc), stageDescCmp);
+
+ uassert(16436,
+ str::stream() << "Unrecognized pipeline stage name: '" << stageName << "'",
+ pDesc);
+ intrusive_ptr<DocumentSource> stage = pDesc->pFactory(stageSpec, pCtx);
+ verify(stage);
+ sources.push_back(stage);
+
+ // TODO find a good general way to check stages that must be first syntactically
+
+ if (dynamic_cast<DocumentSourceOut*>(stage.get())) {
+ uassert(16991, "$out can only be the final stage in the pipeline", iStep == nSteps - 1);
+ }
}
- void Pipeline::Optimizations::Local::moveMatchBeforeSort(Pipeline* pipeline) {
- // TODO Keep moving matches across multiple sorts as moveLimitBeforeSkip does below.
- // TODO Check sort for limit. Not an issue currently due to order optimizations are applied,
- // but should be fixed.
- SourceContainer& sources = pipeline->sources;
- for (size_t srcn = sources.size(), srci = 1; srci < srcn; ++srci) {
- intrusive_ptr<DocumentSource> &pSource = sources[srci];
- DocumentSourceMatch* match = dynamic_cast<DocumentSourceMatch *>(pSource.get());
- if (match && !match->isTextQuery()) {
- intrusive_ptr<DocumentSource> &pPrevious = sources[srci - 1];
- if (dynamic_cast<DocumentSourceSort *>(pPrevious.get())) {
- /* swap this item with the previous */
- intrusive_ptr<DocumentSource> pTemp(pPrevious);
- pPrevious = pSource;
- pSource = pTemp;
- }
+ // The order in which optimizations are applied can have significant impact on the
+ // efficiency of the final pipeline. Be Careful!
+ Optimizations::Local::moveMatchBeforeSort(pPipeline.get());
+ Optimizations::Local::moveSkipAndLimitBeforeProject(pPipeline.get());
+ Optimizations::Local::moveLimitBeforeSkip(pPipeline.get());
+ Optimizations::Local::coalesceAdjacent(pPipeline.get());
+ Optimizations::Local::optimizeEachDocumentSource(pPipeline.get());
+ Optimizations::Local::duplicateMatchBeforeInitalRedact(pPipeline.get());
+
+ return pPipeline;
+}
+
+void Pipeline::Optimizations::Local::moveMatchBeforeSort(Pipeline* pipeline) {
+ // TODO Keep moving matches across multiple sorts as moveLimitBeforeSkip does below.
+ // TODO Check sort for limit. Not an issue currently due to order optimizations are applied,
+ // but should be fixed.
+ SourceContainer& sources = pipeline->sources;
+ for (size_t srcn = sources.size(), srci = 1; srci < srcn; ++srci) {
+ intrusive_ptr<DocumentSource>& pSource = sources[srci];
+ DocumentSourceMatch* match = dynamic_cast<DocumentSourceMatch*>(pSource.get());
+ if (match && !match->isTextQuery()) {
+ intrusive_ptr<DocumentSource>& pPrevious = sources[srci - 1];
+ if (dynamic_cast<DocumentSourceSort*>(pPrevious.get())) {
+ /* swap this item with the previous */
+ intrusive_ptr<DocumentSource> pTemp(pPrevious);
+ pPrevious = pSource;
+ pSource = pTemp;
}
}
}
-
- void Pipeline::Optimizations::Local::moveSkipAndLimitBeforeProject(Pipeline* pipeline) {
- SourceContainer& sources = pipeline->sources;
- if (sources.empty()) return;
-
- for (int i = sources.size() - 1; i >= 1 /* not looking at 0 */; i--) {
- // This optimization only applies when a $project comes before a $skip or $limit.
- auto project = dynamic_cast<DocumentSourceProject*>(sources[i-1].get());
- if (!project) continue;
-
- auto skip = dynamic_cast<DocumentSourceSkip*>(sources[i].get());
- auto limit = dynamic_cast<DocumentSourceLimit*>(sources[i].get());
- if (!(skip || limit)) continue;
-
- swap(sources[i], sources[i-1]);
-
- // Start at back again. This is needed to handle cases with more than 1 $skip or
- // $limit (S means skip, L means limit, P means project)
+}
+
+void Pipeline::Optimizations::Local::moveSkipAndLimitBeforeProject(Pipeline* pipeline) {
+ SourceContainer& sources = pipeline->sources;
+ if (sources.empty())
+ return;
+
+ for (int i = sources.size() - 1; i >= 1 /* not looking at 0 */; i--) {
+ // This optimization only applies when a $project comes before a $skip or $limit.
+ auto project = dynamic_cast<DocumentSourceProject*>(sources[i - 1].get());
+ if (!project)
+ continue;
+
+ auto skip = dynamic_cast<DocumentSourceSkip*>(sources[i].get());
+ auto limit = dynamic_cast<DocumentSourceLimit*>(sources[i].get());
+ if (!(skip || limit))
+ continue;
+
+ swap(sources[i], sources[i - 1]);
+
+ // Start at back again. This is needed to handle cases with more than 1 $skip or
+ // $limit (S means skip, L means limit, P means project)
+ //
+ // These would work without second pass (assuming back to front ordering)
+ // PS -> SP
+ // PL -> LP
+ // PPL -> LPP
+ // PPS -> SPP
+ //
+ // The following cases need a second pass to handle the second skip or limit
+ // PLL -> LLP
+ // PPLL -> LLPP
+ // PLPL -> LLPP
+ i = sources.size(); // decremented before next pass
+ }
+}
+
+void Pipeline::Optimizations::Local::moveLimitBeforeSkip(Pipeline* pipeline) {
+ SourceContainer& sources = pipeline->sources;
+ if (sources.empty())
+ return;
+
+ for (int i = sources.size() - 1; i >= 1 /* not looking at 0 */; i--) {
+ DocumentSourceLimit* limit = dynamic_cast<DocumentSourceLimit*>(sources[i].get());
+ DocumentSourceSkip* skip = dynamic_cast<DocumentSourceSkip*>(sources[i - 1].get());
+ if (limit && skip) {
+ // Increase limit by skip since the skipped docs now pass through the $limit
+ limit->setLimit(limit->getLimit() + skip->getSkip());
+ swap(sources[i], sources[i - 1]);
+
+ // Start at back again. This is needed to handle cases with more than 1 $limit
+ // (S means skip, L means limit)
//
- // These would work without second pass (assuming back to front ordering)
- // PS -> SP
- // PL -> LP
- // PPL -> LPP
- // PPS -> SPP
+ // These two would work without second pass (assuming back to front ordering)
+ // SL -> LS
+ // SSL -> LSS
//
- // The following cases need a second pass to handle the second skip or limit
- // PLL -> LLP
- // PPLL -> LLPP
- // PLPL -> LLPP
- i = sources.size(); // decremented before next pass
+ // The following cases need a second pass to handle the second limit
+ // SLL -> LLS
+ // SSLL -> LLSS
+ // SLSL -> LLSS
+ i = sources.size(); // decremented before next pass
}
}
-
- void Pipeline::Optimizations::Local::moveLimitBeforeSkip(Pipeline* pipeline) {
- SourceContainer& sources = pipeline->sources;
- if (sources.empty())
- return;
-
- for(int i = sources.size() - 1; i >= 1 /* not looking at 0 */; i--) {
- DocumentSourceLimit* limit =
- dynamic_cast<DocumentSourceLimit*>(sources[i].get());
- DocumentSourceSkip* skip =
- dynamic_cast<DocumentSourceSkip*>(sources[i-1].get());
- if (limit && skip) {
- // Increase limit by skip since the skipped docs now pass through the $limit
- limit->setLimit(limit->getLimit() + skip->getSkip());
- swap(sources[i], sources[i-1]);
-
- // Start at back again. This is needed to handle cases with more than 1 $limit
- // (S means skip, L means limit)
- //
- // These two would work without second pass (assuming back to front ordering)
- // SL -> LS
- // SSL -> LSS
- //
- // The following cases need a second pass to handle the second limit
- // SLL -> LLS
- // SSLL -> LLSS
- // SLSL -> LLSS
- i = sources.size(); // decremented before next pass
- }
- }
+}
+
+void Pipeline::Optimizations::Local::coalesceAdjacent(Pipeline* pipeline) {
+ SourceContainer& sources = pipeline->sources;
+ if (sources.empty())
+ return;
+
+ // move all sources to a temporary list
+ SourceContainer tempSources;
+ sources.swap(tempSources);
+
+ // move the first one to the final list
+ sources.push_back(tempSources[0]);
+
+ // run through the sources, coalescing them or keeping them
+ for (size_t tempn = tempSources.size(), tempi = 1; tempi < tempn; ++tempi) {
+ // If we can't coalesce the source with the last, then move it
+ // to the final list, and make it the new last. (If we succeeded,
+ // then we're still on the same last, and there's no need to move
+ // or do anything with the source -- the destruction of tempSources
+ // will take care of the rest.)
+ intrusive_ptr<DocumentSource>& pLastSource = sources.back();
+ intrusive_ptr<DocumentSource>& pTemp = tempSources[tempi];
+ verify(pTemp && pLastSource);
+ if (!pLastSource->coalesce(pTemp))
+ sources.push_back(pTemp);
}
-
- void Pipeline::Optimizations::Local::coalesceAdjacent(Pipeline* pipeline) {
- SourceContainer& sources = pipeline->sources;
- if (sources.empty())
- return;
-
- // move all sources to a temporary list
- SourceContainer tempSources;
- sources.swap(tempSources);
-
- // move the first one to the final list
- sources.push_back(tempSources[0]);
-
- // run through the sources, coalescing them or keeping them
- for (size_t tempn = tempSources.size(), tempi = 1; tempi < tempn; ++tempi) {
- // If we can't coalesce the source with the last, then move it
- // to the final list, and make it the new last. (If we succeeded,
- // then we're still on the same last, and there's no need to move
- // or do anything with the source -- the destruction of tempSources
- // will take care of the rest.)
- intrusive_ptr<DocumentSource> &pLastSource = sources.back();
- intrusive_ptr<DocumentSource> &pTemp = tempSources[tempi];
- verify(pTemp && pLastSource);
- if (!pLastSource->coalesce(pTemp))
- sources.push_back(pTemp);
+}
+
+void Pipeline::Optimizations::Local::optimizeEachDocumentSource(Pipeline* pipeline) {
+ SourceContainer& sources = pipeline->sources;
+ SourceContainer newSources;
+ for (SourceContainer::iterator it(sources.begin()); it != sources.end(); ++it) {
+ if (auto out = (*it)->optimize()) {
+ newSources.push_back(std::move(out));
}
}
-
- void Pipeline::Optimizations::Local::optimizeEachDocumentSource(Pipeline* pipeline) {
- SourceContainer& sources = pipeline->sources;
- SourceContainer newSources;
- for (SourceContainer::iterator it(sources.begin()); it != sources.end(); ++it) {
- if (auto out = (*it)->optimize()) {
- newSources.push_back(std::move(out));
+ pipeline->sources = std::move(newSources);
+}
+
+void Pipeline::Optimizations::Local::duplicateMatchBeforeInitalRedact(Pipeline* pipeline) {
+ SourceContainer& sources = pipeline->sources;
+ if (sources.size() >= 2 && dynamic_cast<DocumentSourceRedact*>(sources[0].get())) {
+ if (DocumentSourceMatch* match = dynamic_cast<DocumentSourceMatch*>(sources[1].get())) {
+ const BSONObj redactSafePortion = match->redactSafePortion();
+ if (!redactSafePortion.isEmpty()) {
+ sources.push_front(DocumentSourceMatch::createFromBson(
+ BSON("$match" << redactSafePortion).firstElement(), pipeline->pCtx));
}
}
- pipeline->sources = std::move(newSources);
}
-
- void Pipeline::Optimizations::Local::duplicateMatchBeforeInitalRedact(Pipeline* pipeline) {
- SourceContainer& sources = pipeline->sources;
- if (sources.size() >= 2 && dynamic_cast<DocumentSourceRedact*>(sources[0].get())) {
- if (DocumentSourceMatch* match = dynamic_cast<DocumentSourceMatch*>(sources[1].get())) {
- const BSONObj redactSafePortion = match->redactSafePortion();
- if (!redactSafePortion.isEmpty()) {
- sources.push_front(
- DocumentSourceMatch::createFromBson(
- BSON("$match" << redactSafePortion).firstElement(),
- pipeline->pCtx));
- }
+}
+
+void Pipeline::addRequiredPrivileges(Command* commandTemplate,
+ const string& db,
+ BSONObj cmdObj,
+ vector<Privilege>* out) {
+ ResourcePattern inputResource(commandTemplate->parseResourcePattern(db, cmdObj));
+ uassert(17138,
+ mongoutils::str::stream() << "Invalid input resource, " << inputResource.toString(),
+ inputResource.isExactNamespacePattern());
+
+ out->push_back(Privilege(inputResource, ActionType::find));
+
+ BSONObj pipeline = cmdObj.getObjectField("pipeline");
+ BSONForEach(stageElem, pipeline) {
+ BSONObj stage = stageElem.embeddedObjectUserCheck();
+ if (str::equals(stage.firstElementFieldName(), "$out")) {
+ NamespaceString outputNs(db, stage.firstElement().str());
+ uassert(17139,
+ mongoutils::str::stream() << "Invalid $out target namespace, " << outputNs.ns(),
+ outputNs.isValid());
+
+ ActionSet actions;
+ actions.addAction(ActionType::remove);
+ actions.addAction(ActionType::insert);
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ actions.addAction(ActionType::bypassDocumentValidation);
}
+
+ out->push_back(Privilege(ResourcePattern::forExactNamespace(outputNs), actions));
}
}
-
- void Pipeline::addRequiredPrivileges(Command* commandTemplate,
- const string& db,
- BSONObj cmdObj,
- vector<Privilege>* out) {
- ResourcePattern inputResource(commandTemplate->parseResourcePattern(db, cmdObj));
- uassert(17138,
- mongoutils::str::stream() << "Invalid input resource, " << inputResource.toString(),
- inputResource.isExactNamespacePattern());
-
- out->push_back(Privilege(inputResource, ActionType::find));
-
- BSONObj pipeline = cmdObj.getObjectField("pipeline");
- BSONForEach(stageElem, pipeline) {
- BSONObj stage = stageElem.embeddedObjectUserCheck();
- if (str::equals(stage.firstElementFieldName(), "$out")) {
- NamespaceString outputNs(db, stage.firstElement().str());
- uassert(17139,
- mongoutils::str::stream() << "Invalid $out target namespace, " <<
- outputNs.ns(),
- outputNs.isValid());
-
- ActionSet actions;
- actions.addAction(ActionType::remove);
- actions.addAction(ActionType::insert);
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- actions.addAction(ActionType::bypassDocumentValidation);
- }
-
- out->push_back(Privilege(ResourcePattern::forExactNamespace(outputNs), actions));
- }
+}
+
+intrusive_ptr<Pipeline> Pipeline::splitForSharded() {
+ // Create and initialize the shard spec we'll return. We start with an empty pipeline on the
+ // shards and all work being done in the merger. Optimizations can move operations between
+ // the pipelines to be more efficient.
+ intrusive_ptr<Pipeline> shardPipeline(new Pipeline(pCtx));
+ shardPipeline->explain = explain;
+
+ // The order in which optimizations are applied can have significant impact on the
+ // efficiency of the final pipeline. Be Careful!
+ Optimizations::Sharded::findSplitPoint(shardPipeline.get(), this);
+ Optimizations::Sharded::moveFinalUnwindFromShardsToMerger(shardPipeline.get(), this);
+ Optimizations::Sharded::limitFieldsSentFromShardsToMerger(shardPipeline.get(), this);
+
+ return shardPipeline;
+}
+
+void Pipeline::Optimizations::Sharded::findSplitPoint(Pipeline* shardPipe, Pipeline* mergePipe) {
+ while (!mergePipe->sources.empty()) {
+ intrusive_ptr<DocumentSource> current = mergePipe->sources.front();
+ mergePipe->sources.pop_front();
+
+ // Check if this source is splittable
+ SplittableDocumentSource* splittable =
+ dynamic_cast<SplittableDocumentSource*>(current.get());
+
+ if (!splittable) {
+ // move the source from the merger sources to the shard sources
+ shardPipe->sources.push_back(current);
+ } else {
+ // split this source into Merge and Shard sources
+ intrusive_ptr<DocumentSource> shardSource = splittable->getShardSource();
+ intrusive_ptr<DocumentSource> mergeSource = splittable->getMergeSource();
+ if (shardSource)
+ shardPipe->sources.push_back(shardSource);
+ if (mergeSource)
+ mergePipe->sources.push_front(mergeSource);
+
+ break;
}
}
+}
+
+void Pipeline::Optimizations::Sharded::moveFinalUnwindFromShardsToMerger(Pipeline* shardPipe,
+ Pipeline* mergePipe) {
+ while (!shardPipe->sources.empty() &&
+ dynamic_cast<DocumentSourceUnwind*>(shardPipe->sources.back().get())) {
+ mergePipe->sources.push_front(shardPipe->sources.back());
+ shardPipe->sources.pop_back();
+ }
+}
+
+void Pipeline::Optimizations::Sharded::limitFieldsSentFromShardsToMerger(Pipeline* shardPipe,
+ Pipeline* mergePipe) {
+ DepsTracker mergeDeps = mergePipe->getDependencies(shardPipe->getInitialQuery());
+ if (mergeDeps.needWholeDocument)
+ return; // the merge needs all fields, so nothing we can do.
+
+ // Empty project is "special" so if no fields are needed, we just ask for _id instead.
+ if (mergeDeps.fields.empty())
+ mergeDeps.fields.insert("_id");
+
+ // Remove metadata from dependencies since it automatically flows through projection and we
+ // don't want to project it in to the document.
+ mergeDeps.needTextScore = false;
+
+ // HEURISTIC: only apply optimization if none of the shard stages have an exhaustive list of
+ // field dependencies. While this may not be 100% ideal in all cases, it is simple and
+ // avoids the worst cases by ensuring that:
+ // 1) Optimization IS applied when the shards wouldn't have known their exhaustive list of
+ // dependencies. This situation can happen when a $sort is before the first $project or
+ // $group. Without the optimization, the shards would have to reify and transmit full
+ // objects even though only a subset of fields are needed.
+ // 2) Optimization IS NOT applied immediately following a $project or $group since it would
+ // add an unnecessary project (and therefore a deep-copy).
+ for (size_t i = 0; i < shardPipe->sources.size(); i++) {
+ DepsTracker dt; // ignored
+ if (shardPipe->sources[i]->getDependencies(&dt) & DocumentSource::EXHAUSTIVE_FIELDS)
+ return;
+ }
- intrusive_ptr<Pipeline> Pipeline::splitForSharded() {
- // Create and initialize the shard spec we'll return. We start with an empty pipeline on the
- // shards and all work being done in the merger. Optimizations can move operations between
- // the pipelines to be more efficient.
- intrusive_ptr<Pipeline> shardPipeline(new Pipeline(pCtx));
- shardPipeline->explain = explain;
-
- // The order in which optimizations are applied can have significant impact on the
- // efficiency of the final pipeline. Be Careful!
- Optimizations::Sharded::findSplitPoint(shardPipeline.get(), this);
- Optimizations::Sharded::moveFinalUnwindFromShardsToMerger(shardPipeline.get(), this);
- Optimizations::Sharded::limitFieldsSentFromShardsToMerger(shardPipeline.get(), this);
+ // if we get here, add the project.
+ shardPipe->sources.push_back(DocumentSourceProject::createFromBson(
+ BSON("$project" << mergeDeps.toProjection()).firstElement(), shardPipe->pCtx));
+}
- return shardPipeline;
- }
+BSONObj Pipeline::getInitialQuery() const {
+ if (sources.empty())
+ return BSONObj();
- void Pipeline::Optimizations::Sharded::findSplitPoint(Pipeline* shardPipe,
- Pipeline* mergePipe) {
- while (!mergePipe->sources.empty()) {
- intrusive_ptr<DocumentSource> current = mergePipe->sources.front();
- mergePipe->sources.pop_front();
+ /* look for an initial $match */
+ DocumentSourceMatch* match = dynamic_cast<DocumentSourceMatch*>(sources.front().get());
+ if (!match)
+ return BSONObj();
- // Check if this source is splittable
- SplittableDocumentSource* splittable =
- dynamic_cast<SplittableDocumentSource*>(current.get());
+ return match->getQuery();
+}
- if (!splittable){
- // move the source from the merger sources to the shard sources
- shardPipe->sources.push_back(current);
- }
- else {
- // split this source into Merge and Shard sources
- intrusive_ptr<DocumentSource> shardSource = splittable->getShardSource();
- intrusive_ptr<DocumentSource> mergeSource = splittable->getMergeSource();
- if (shardSource) shardPipe->sources.push_back(shardSource);
- if (mergeSource) mergePipe->sources.push_front(mergeSource);
-
- break;
- }
- }
+bool Pipeline::hasOutStage() const {
+ if (sources.empty()) {
+ return false;
}
- void Pipeline::Optimizations::Sharded::moveFinalUnwindFromShardsToMerger(Pipeline* shardPipe,
- Pipeline* mergePipe) {
- while (!shardPipe->sources.empty()
- && dynamic_cast<DocumentSourceUnwind*>(shardPipe->sources.back().get())) {
- mergePipe->sources.push_front(shardPipe->sources.back());
- shardPipe->sources.pop_back();
- }
+ // The $out stage must be the last one in the pipeline, so check if the last stage is $out.
+ return dynamic_cast<DocumentSourceOut*>(sources.back().get());
+}
+
+Document Pipeline::serialize() const {
+ MutableDocument serialized;
+ // create an array out of the pipeline operations
+ vector<Value> array;
+ for (SourceContainer::const_iterator iter(sources.begin()), listEnd(sources.end());
+ iter != listEnd;
+ ++iter) {
+ intrusive_ptr<DocumentSource> pSource(*iter);
+ pSource->serializeToArray(array);
}
- void Pipeline::Optimizations::Sharded::limitFieldsSentFromShardsToMerger(Pipeline* shardPipe,
- Pipeline* mergePipe) {
- DepsTracker mergeDeps = mergePipe->getDependencies(shardPipe->getInitialQuery());
- if (mergeDeps.needWholeDocument)
- return; // the merge needs all fields, so nothing we can do.
-
- // Empty project is "special" so if no fields are needed, we just ask for _id instead.
- if (mergeDeps.fields.empty())
- mergeDeps.fields.insert("_id");
-
- // Remove metadata from dependencies since it automatically flows through projection and we
- // don't want to project it in to the document.
- mergeDeps.needTextScore = false;
-
- // HEURISTIC: only apply optimization if none of the shard stages have an exhaustive list of
- // field dependencies. While this may not be 100% ideal in all cases, it is simple and
- // avoids the worst cases by ensuring that:
- // 1) Optimization IS applied when the shards wouldn't have known their exhaustive list of
- // dependencies. This situation can happen when a $sort is before the first $project or
- // $group. Without the optimization, the shards would have to reify and transmit full
- // objects even though only a subset of fields are needed.
- // 2) Optimization IS NOT applied immediately following a $project or $group since it would
- // add an unnecessary project (and therefore a deep-copy).
- for (size_t i = 0; i < shardPipe->sources.size(); i++) {
- DepsTracker dt; // ignored
- if (shardPipe->sources[i]->getDependencies(&dt) & DocumentSource::EXHAUSTIVE_FIELDS)
- return;
- }
+ // add the top-level items to the command
+ serialized.setField(commandName, Value(pCtx->ns.coll()));
+ serialized.setField(pipelineName, Value(array));
- // if we get here, add the project.
- shardPipe->sources.push_back(
- DocumentSourceProject::createFromBson(
- BSON("$project" << mergeDeps.toProjection()).firstElement(),
- shardPipe->pCtx));
+ if (explain) {
+ serialized.setField(explainName, Value(explain));
}
- BSONObj Pipeline::getInitialQuery() const {
- if (sources.empty())
- return BSONObj();
-
- /* look for an initial $match */
- DocumentSourceMatch* match = dynamic_cast<DocumentSourceMatch*>(sources.front().get());
- if (!match)
- return BSONObj();
-
- return match->getQuery();
+ if (pCtx->extSortAllowed) {
+ serialized.setField("allowDiskUse", Value(true));
}
- bool Pipeline::hasOutStage() const {
- if (sources.empty()) {
- return false;
- }
-
- // The $out stage must be the last one in the pipeline, so check if the last stage is $out.
- return dynamic_cast<DocumentSourceOut*>(sources.back().get());
+ if (pCtx->bypassDocumentValidation) {
+ serialized.setField(bypassDocumentValidationCommandOption(), Value(true));
}
- Document Pipeline::serialize() const {
- MutableDocument serialized;
- // create an array out of the pipeline operations
- vector<Value> array;
- for(SourceContainer::const_iterator iter(sources.begin()),
- listEnd(sources.end());
- iter != listEnd;
- ++iter) {
- intrusive_ptr<DocumentSource> pSource(*iter);
- pSource->serializeToArray(array);
- }
-
- // add the top-level items to the command
- serialized.setField(commandName, Value(pCtx->ns.coll()));
- serialized.setField(pipelineName, Value(array));
-
- if (explain) {
- serialized.setField(explainName, Value(explain));
- }
-
- if (pCtx->extSortAllowed) {
- serialized.setField("allowDiskUse", Value(true));
- }
+ return serialized.freeze();
+}
- if (pCtx->bypassDocumentValidation) {
- serialized.setField(bypassDocumentValidationCommandOption(), Value(true));
- }
+void Pipeline::stitch() {
+ massert(16600, "should not have an empty pipeline", !sources.empty());
- return serialized.freeze();
+ /* chain together the sources we found */
+ DocumentSource* prevSource = sources.front().get();
+ for (SourceContainer::iterator iter(sources.begin() + 1), listEnd(sources.end());
+ iter != listEnd;
+ ++iter) {
+ intrusive_ptr<DocumentSource> pTemp(*iter);
+ pTemp->setSource(prevSource);
+ prevSource = pTemp.get();
}
-
- void Pipeline::stitch() {
- massert(16600, "should not have an empty pipeline",
- !sources.empty());
-
- /* chain together the sources we found */
- DocumentSource* prevSource = sources.front().get();
- for(SourceContainer::iterator iter(sources.begin() + 1),
- listEnd(sources.end());
- iter != listEnd;
- ++iter) {
- intrusive_ptr<DocumentSource> pTemp(*iter);
- pTemp->setSource(prevSource);
- prevSource = pTemp.get();
- }
+}
+
+void Pipeline::run(BSONObjBuilder& result) {
+ // should not get here in the explain case
+ verify(!explain);
+
+ // the array in which the aggregation results reside
+ // cant use subArrayStart() due to error handling
+ BSONArrayBuilder resultArray;
+ DocumentSource* finalSource = sources.back().get();
+ while (boost::optional<Document> next = finalSource->getNext()) {
+ // add the document to the result set
+ BSONObjBuilder documentBuilder(resultArray.subobjStart());
+ next->toBson(&documentBuilder);
+ documentBuilder.doneFast();
+ // object will be too large, assert. the extra 1KB is for headers
+ uassert(16389,
+ str::stream() << "aggregation result exceeds maximum document size ("
+ << BSONObjMaxUserSize / (1024 * 1024) << "MB)",
+ resultArray.len() < BSONObjMaxUserSize - 1024);
}
- void Pipeline::run(BSONObjBuilder& result) {
- // should not get here in the explain case
- verify(!explain);
-
- // the array in which the aggregation results reside
- // cant use subArrayStart() due to error handling
- BSONArrayBuilder resultArray;
- DocumentSource* finalSource = sources.back().get();
- while (boost::optional<Document> next = finalSource->getNext()) {
- // add the document to the result set
- BSONObjBuilder documentBuilder (resultArray.subobjStart());
- next->toBson(&documentBuilder);
- documentBuilder.doneFast();
- // object will be too large, assert. the extra 1KB is for headers
- uassert(16389,
- str::stream() << "aggregation result exceeds maximum document size ("
- << BSONObjMaxUserSize / (1024 * 1024) << "MB)",
- resultArray.len() < BSONObjMaxUserSize - 1024);
- }
+ resultArray.done();
+ result.appendArray("result", resultArray.arr());
+}
- resultArray.done();
- result.appendArray("result", resultArray.arr());
+vector<Value> Pipeline::writeExplainOps() const {
+ vector<Value> array;
+ for (SourceContainer::const_iterator it = sources.begin(); it != sources.end(); ++it) {
+ (*it)->serializeToArray(array, /*explain=*/true);
}
-
- vector<Value> Pipeline::writeExplainOps() const {
- vector<Value> array;
- for(SourceContainer::const_iterator it = sources.begin(); it != sources.end(); ++it) {
- (*it)->serializeToArray(array, /*explain=*/true);
+ return array;
+}
+
+void Pipeline::addInitialSource(intrusive_ptr<DocumentSource> source) {
+ sources.push_front(source);
+}
+
+DepsTracker Pipeline::getDependencies(const BSONObj& initialQuery) const {
+ DepsTracker deps;
+ bool knowAllFields = false;
+ bool knowAllMeta = false;
+ for (size_t i = 0; i < sources.size() && !(knowAllFields && knowAllMeta); i++) {
+ DepsTracker localDeps;
+ DocumentSource::GetDepsReturn status = sources[i]->getDependencies(&localDeps);
+
+ if (status == DocumentSource::NOT_SUPPORTED) {
+ // Assume this stage needs everything. We may still know something about our
+ // dependencies if an earlier stage returned either EXHAUSTIVE_FIELDS or
+ // EXHAUSTIVE_META.
+ break;
}
- return array;
- }
- void Pipeline::addInitialSource(intrusive_ptr<DocumentSource> source) {
- sources.push_front(source);
- }
-
- DepsTracker Pipeline::getDependencies(const BSONObj& initialQuery) const {
- DepsTracker deps;
- bool knowAllFields = false;
- bool knowAllMeta = false;
- for (size_t i=0; i < sources.size() && !(knowAllFields && knowAllMeta); i++) {
- DepsTracker localDeps;
- DocumentSource::GetDepsReturn status = sources[i]->getDependencies(&localDeps);
-
- if (status == DocumentSource::NOT_SUPPORTED) {
- // Assume this stage needs everything. We may still know something about our
- // dependencies if an earlier stage returned either EXHAUSTIVE_FIELDS or
- // EXHAUSTIVE_META.
- break;
- }
-
- if (!knowAllFields) {
- deps.fields.insert(localDeps.fields.begin(), localDeps.fields.end());
- if (localDeps.needWholeDocument)
- deps.needWholeDocument = true;
- knowAllFields = status & DocumentSource::EXHAUSTIVE_FIELDS;
- }
-
- if (!knowAllMeta) {
- if (localDeps.needTextScore)
- deps.needTextScore = true;
-
- knowAllMeta = status & DocumentSource::EXHAUSTIVE_META;
- }
+ if (!knowAllFields) {
+ deps.fields.insert(localDeps.fields.begin(), localDeps.fields.end());
+ if (localDeps.needWholeDocument)
+ deps.needWholeDocument = true;
+ knowAllFields = status & DocumentSource::EXHAUSTIVE_FIELDS;
}
- if (!knowAllFields)
- deps.needWholeDocument = true; // don't know all fields we need
-
- // NOTE This code assumes that textScore can only be generated by the initial query.
- if (DocumentSourceMatch::isTextQuery(initialQuery)) {
- // If doing a text query, assume we need the score if we can't prove we don't.
- if (!knowAllMeta)
+ if (!knowAllMeta) {
+ if (localDeps.needTextScore)
deps.needTextScore = true;
+
+ knowAllMeta = status & DocumentSource::EXHAUSTIVE_META;
}
- else {
- // If we aren't doing a text query, then we don't need to ask for the textScore since we
- // know it will be missing anyway.
- deps.needTextScore = false;
- }
+ }
- return deps;
+ if (!knowAllFields)
+ deps.needWholeDocument = true; // don't know all fields we need
+
+ // NOTE This code assumes that textScore can only be generated by the initial query.
+ if (DocumentSourceMatch::isTextQuery(initialQuery)) {
+ // If doing a text query, assume we need the score if we can't prove we don't.
+ if (!knowAllMeta)
+ deps.needTextScore = true;
+ } else {
+ // If we aren't doing a text query, then we don't need to ask for the textScore since we
+ // know it will be missing anyway.
+ deps.needTextScore = false;
}
-} // namespace mongo
+
+ return deps;
+}
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/pipeline.h b/src/mongo/db/pipeline/pipeline.h
index c6274fbf17e..0eb9988d6af 100644
--- a/src/mongo/db/pipeline/pipeline.h
+++ b/src/mongo/db/pipeline/pipeline.h
@@ -37,149 +37,155 @@
#include "mongo/util/timer.h"
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class Command;
- struct DepsTracker;
- class DocumentSource;
- struct ExpressionContext;
- class Privilege;
-
- /** mongodb "commands" (sent via db.$cmd.findOne(...))
- subclass to make a command. define a singleton object for it.
- */
- class Pipeline :
- public IntrusiveCounterUnsigned {
+class BSONObj;
+class BSONObjBuilder;
+class Command;
+struct DepsTracker;
+class DocumentSource;
+struct ExpressionContext;
+class Privilege;
+
+/** mongodb "commands" (sent via db.$cmd.findOne(...))
+ subclass to make a command. define a singleton object for it.
+ */
+class Pipeline : public IntrusiveCounterUnsigned {
+public:
+ /**
+ * Create a pipeline from the command.
+ *
+ * @param errmsg where to write errors, if there are any
+ * @param cmdObj the command object sent from the client
+ * @returns the pipeline, if created, otherwise a NULL reference
+ */
+ static boost::intrusive_ptr<Pipeline> parseCommand(
+ std::string& errmsg,
+ const BSONObj& cmdObj,
+ const boost::intrusive_ptr<ExpressionContext>& pCtx);
+
+ /// Helper to implement Command::addRequiredPrivileges
+ static void addRequiredPrivileges(Command* commandTemplate,
+ const std::string& dbname,
+ BSONObj cmdObj,
+ std::vector<Privilege>* out);
+
+ const boost::intrusive_ptr<ExpressionContext>& getContext() const {
+ return pCtx;
+ }
+
+ /**
+ Split the current Pipeline into a Pipeline for each shard, and
+ a Pipeline that combines the results within mongos.
+
+ This permanently alters this pipeline for the merging operation.
+
+ @returns the Spec for the pipeline command that should be sent
+ to the shards
+ */
+ boost::intrusive_ptr<Pipeline> splitForSharded();
+
+ /** If the pipeline starts with a $match, return its BSON predicate.
+ * Returns empty BSON if the first stage isn't $match.
+ */
+ BSONObj getInitialQuery() const;
+
+ /**
+ * Returns true if the pipeline contains a $out stage, and false otherwise.
+ */
+ bool hasOutStage() const;
+
+ /**
+ Write the Pipeline as a BSONObj command. This should be the
+ inverse of parseCommand().
+
+ This is only intended to be used by the shard command obtained
+ from splitForSharded(). Some pipeline operations in the merge
+ process do not have equivalent command forms, and using this on
+ the mongos Pipeline will cause assertions.
+
+ @param the builder to write the command to
+ */
+ Document serialize() const;
+
+ /** Stitch together the source pointers (by calling setSource) for each source in sources.
+ * Must be called after optimize and addInitialSource but before trying to get results.
+ */
+ void stitch();
+
+ /**
+ Run the Pipeline on the given source.
+
+ @param result builder to write the result to
+ */
+ void run(BSONObjBuilder& result);
+
+ bool isExplain() const {
+ return explain;
+ }
+
+ /// The initial source is special since it varies between mongos and mongod.
+ void addInitialSource(boost::intrusive_ptr<DocumentSource> source);
+
+ /// The source that represents the output. Returns a non-owning pointer.
+ DocumentSource* output() {
+ invariant(!sources.empty());
+ return sources.back().get();
+ }
+
+ /**
+ * Write the pipeline's operators to a std::vector<Value>, with the
+ * explain flag true (for DocumentSource::serializeToArray()).
+ */
+ std::vector<Value> writeExplainOps() const;
+
+ /**
+ * Returns the dependencies needed by this pipeline.
+ *
+ * initialQuery is used as a fallback for metadata dependency detection. The assumption is
+ * that any metadata produced by the query is needed unless we can prove it isn't.
+ */
+ DepsTracker getDependencies(const BSONObj& initialQuery) const;
+
+ /**
+ The aggregation command name.
+ */
+ static const char commandName[];
+
+ /*
+ PipelineD is a "sister" class that has additional functionality
+ for the Pipeline. It exists because of linkage requirements.
+ Pipeline needs to function in mongod and mongos. PipelineD
+ contains extra functionality required in mongod, and which can't
+ appear in mongos because the required symbols are unavailable
+ for linking there. Consider PipelineD to be an extension of this
+ class for mongod only.
+ */
+ friend class PipelineD;
+
+private:
+ class Optimizations {
public:
- /**
- * Create a pipeline from the command.
- *
- * @param errmsg where to write errors, if there are any
- * @param cmdObj the command object sent from the client
- * @returns the pipeline, if created, otherwise a NULL reference
- */
- static boost::intrusive_ptr<Pipeline> parseCommand(
- std::string& errmsg,
- const BSONObj& cmdObj,
- const boost::intrusive_ptr<ExpressionContext>& pCtx);
-
- /// Helper to implement Command::addRequiredPrivileges
- static void addRequiredPrivileges(Command* commandTemplate,
- const std::string& dbname,
- BSONObj cmdObj,
- std::vector<Privilege>* out);
-
- const boost::intrusive_ptr<ExpressionContext>& getContext() const { return pCtx; }
-
- /**
- Split the current Pipeline into a Pipeline for each shard, and
- a Pipeline that combines the results within mongos.
-
- This permanently alters this pipeline for the merging operation.
-
- @returns the Spec for the pipeline command that should be sent
- to the shards
- */
- boost::intrusive_ptr<Pipeline> splitForSharded();
-
- /** If the pipeline starts with a $match, return its BSON predicate.
- * Returns empty BSON if the first stage isn't $match.
- */
- BSONObj getInitialQuery() const;
-
- /**
- * Returns true if the pipeline contains a $out stage, and false otherwise.
- */
- bool hasOutStage() const;
-
- /**
- Write the Pipeline as a BSONObj command. This should be the
- inverse of parseCommand().
-
- This is only intended to be used by the shard command obtained
- from splitForSharded(). Some pipeline operations in the merge
- process do not have equivalent command forms, and using this on
- the mongos Pipeline will cause assertions.
-
- @param the builder to write the command to
- */
- Document serialize() const;
-
- /** Stitch together the source pointers (by calling setSource) for each source in sources.
- * Must be called after optimize and addInitialSource but before trying to get results.
- */
- void stitch();
-
- /**
- Run the Pipeline on the given source.
-
- @param result builder to write the result to
- */
- void run(BSONObjBuilder& result);
-
- bool isExplain() const { return explain; }
-
- /// The initial source is special since it varies between mongos and mongod.
- void addInitialSource(boost::intrusive_ptr<DocumentSource> source);
-
- /// The source that represents the output. Returns a non-owning pointer.
- DocumentSource* output() { invariant( !sources.empty() ); return sources.back().get(); }
-
- /**
- * Write the pipeline's operators to a std::vector<Value>, with the
- * explain flag true (for DocumentSource::serializeToArray()).
- */
- std::vector<Value> writeExplainOps() const;
-
- /**
- * Returns the dependencies needed by this pipeline.
- *
- * initialQuery is used as a fallback for metadata dependency detection. The assumption is
- * that any metadata produced by the query is needed unless we can prove it isn't.
- */
- DepsTracker getDependencies(const BSONObj& initialQuery) const;
-
- /**
- The aggregation command name.
- */
- static const char commandName[];
-
- /*
- PipelineD is a "sister" class that has additional functionality
- for the Pipeline. It exists because of linkage requirements.
- Pipeline needs to function in mongod and mongos. PipelineD
- contains extra functionality required in mongod, and which can't
- appear in mongos because the required symbols are unavailable
- for linking there. Consider PipelineD to be an extension of this
- class for mongod only.
- */
- friend class PipelineD;
-
- private:
- class Optimizations {
- public:
- // These contain static functions that optimize pipelines in various ways.
- // They are classes rather than namespaces so that they can be friends of Pipeline.
- // Classes are defined in pipeline_optimizations.h.
- class Local;
- class Sharded;
- };
-
- friend class Optimizations::Local;
- friend class Optimizations::Sharded;
-
- static const char pipelineName[];
- static const char explainName[];
- static const char fromRouterName[];
- static const char serverPipelineName[];
- static const char mongosPipelineName[];
-
- Pipeline(const boost::intrusive_ptr<ExpressionContext> &pCtx);
-
- typedef std::deque<boost::intrusive_ptr<DocumentSource> > SourceContainer;
- SourceContainer sources;
- bool explain;
-
- boost::intrusive_ptr<ExpressionContext> pCtx;
+ // These contain static functions that optimize pipelines in various ways.
+ // They are classes rather than namespaces so that they can be friends of Pipeline.
+ // Classes are defined in pipeline_optimizations.h.
+ class Local;
+ class Sharded;
};
-} // namespace mongo
+
+ friend class Optimizations::Local;
+ friend class Optimizations::Sharded;
+
+ static const char pipelineName[];
+ static const char explainName[];
+ static const char fromRouterName[];
+ static const char serverPipelineName[];
+ static const char mongosPipelineName[];
+
+ Pipeline(const boost::intrusive_ptr<ExpressionContext>& pCtx);
+
+ typedef std::deque<boost::intrusive_ptr<DocumentSource>> SourceContainer;
+ SourceContainer sources;
+ bool explain;
+
+ boost::intrusive_ptr<ExpressionContext> pCtx;
+};
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index b6ddfdd7e12..6dbcfe4c812 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -45,219 +45,197 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::shared_ptr;
- using std::string;
+using boost::intrusive_ptr;
+using std::shared_ptr;
+using std::string;
namespace {
- class MongodImplementation final : public DocumentSourceNeedsMongod::MongodInterface {
- public:
- MongodImplementation(const intrusive_ptr<ExpressionContext>& ctx)
- : _ctx(ctx)
- , _client(ctx->opCtx)
- {}
-
- DBClientBase* directClient() final {
- // opCtx may have changed since our last call
- invariant(_ctx->opCtx);
- _client.setOpCtx(_ctx->opCtx);
- return &_client;
- }
+class MongodImplementation final : public DocumentSourceNeedsMongod::MongodInterface {
+public:
+ MongodImplementation(const intrusive_ptr<ExpressionContext>& ctx)
+ : _ctx(ctx), _client(ctx->opCtx) {}
+
+ DBClientBase* directClient() final {
+ // opCtx may have changed since our last call
+ invariant(_ctx->opCtx);
+ _client.setOpCtx(_ctx->opCtx);
+ return &_client;
+ }
- bool isSharded(const NamespaceString& ns) final {
- const ChunkVersion unsharded(0, 0, OID());
- return !(shardingState.getVersion(ns.ns()).isWriteCompatibleWith(unsharded));
- }
+ bool isSharded(const NamespaceString& ns) final {
+ const ChunkVersion unsharded(0, 0, OID());
+ return !(shardingState.getVersion(ns.ns()).isWriteCompatibleWith(unsharded));
+ }
- bool isCapped(const NamespaceString& ns) final {
- AutoGetCollectionForRead ctx(_ctx->opCtx, ns.ns());
- Collection* collection = ctx.getCollection();
- return collection && collection->isCapped();
- }
+ bool isCapped(const NamespaceString& ns) final {
+ AutoGetCollectionForRead ctx(_ctx->opCtx, ns.ns());
+ Collection* collection = ctx.getCollection();
+ return collection && collection->isCapped();
+ }
- BSONObj insert(const NamespaceString& ns, const std::vector<BSONObj>& objs) final {
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (_ctx->bypassDocumentValidation)
- maybeDisableValidation.emplace(_ctx->opCtx);
+ BSONObj insert(const NamespaceString& ns, const std::vector<BSONObj>& objs) final {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (_ctx->bypassDocumentValidation)
+ maybeDisableValidation.emplace(_ctx->opCtx);
- _client.insert(ns.ns(), objs);
- return _client.getLastErrorDetailed();
- }
+ _client.insert(ns.ns(), objs);
+ return _client.getLastErrorDetailed();
+ }
- private:
- intrusive_ptr<ExpressionContext> _ctx;
- DBDirectClient _client;
- };
+private:
+ intrusive_ptr<ExpressionContext> _ctx;
+ DBDirectClient _client;
+};
}
- shared_ptr<PlanExecutor> PipelineD::prepareCursorSource(
- OperationContext* txn,
- Collection* collection,
- const intrusive_ptr<Pipeline>& pPipeline,
- const intrusive_ptr<ExpressionContext>& pExpCtx) {
- // get the full "namespace" name
- const string& fullName = pExpCtx->ns.ns();
-
- // We will be modifying the source vector as we go
- Pipeline::SourceContainer& sources = pPipeline->sources;
-
- // Inject a MongodImplementation to sources that need them.
- for (size_t i = 0; i < sources.size(); i++) {
- DocumentSourceNeedsMongod* needsMongod =
- dynamic_cast<DocumentSourceNeedsMongod*>(sources[i].get());
- if (needsMongod) {
- needsMongod->injectMongodInterface(
- std::make_shared<MongodImplementation>(pExpCtx));
- }
+shared_ptr<PlanExecutor> PipelineD::prepareCursorSource(
+ OperationContext* txn,
+ Collection* collection,
+ const intrusive_ptr<Pipeline>& pPipeline,
+ const intrusive_ptr<ExpressionContext>& pExpCtx) {
+ // get the full "namespace" name
+ const string& fullName = pExpCtx->ns.ns();
+
+ // We will be modifying the source vector as we go
+ Pipeline::SourceContainer& sources = pPipeline->sources;
+
+ // Inject a MongodImplementation to sources that need them.
+ for (size_t i = 0; i < sources.size(); i++) {
+ DocumentSourceNeedsMongod* needsMongod =
+ dynamic_cast<DocumentSourceNeedsMongod*>(sources[i].get());
+ if (needsMongod) {
+ needsMongod->injectMongodInterface(std::make_shared<MongodImplementation>(pExpCtx));
}
+ }
- if (!sources.empty() && sources.front()->isValidInitialSource()) {
- if (dynamic_cast<DocumentSourceMergeCursors*>(sources.front().get())) {
- // Enable the hooks for setting up authentication on the subsequent internal
- // connections we are going to create. This would normally have been done
- // when SetShardVersion was called, but since SetShardVersion is never called
- // on secondaries, this is needed.
- ShardedConnectionInfo::addHook();
- }
- return std::shared_ptr<PlanExecutor>(); // don't need a cursor
+ if (!sources.empty() && sources.front()->isValidInitialSource()) {
+ if (dynamic_cast<DocumentSourceMergeCursors*>(sources.front().get())) {
+ // Enable the hooks for setting up authentication on the subsequent internal
+ // connections we are going to create. This would normally have been done
+ // when SetShardVersion was called, but since SetShardVersion is never called
+ // on secondaries, this is needed.
+ ShardedConnectionInfo::addHook();
}
+ return std::shared_ptr<PlanExecutor>(); // don't need a cursor
+ }
- // Look for an initial match. This works whether we got an initial query or not.
- // If not, it results in a "{}" query, which will be what we want in that case.
- const BSONObj queryObj = pPipeline->getInitialQuery();
- if (!queryObj.isEmpty()) {
- // This will get built in to the Cursor we'll create, so
- // remove the match from the pipeline
- sources.pop_front();
- }
-
- // Find the set of fields in the source documents depended on by this pipeline.
- const DepsTracker deps = pPipeline->getDependencies(queryObj);
-
- // Passing query an empty projection since it is faster to use ParsedDeps::extractFields().
- // This will need to change to support covering indexes (SERVER-12015). There is an
- // exception for textScore since that can only be retrieved by a query projection.
- const BSONObj projectionForQuery = deps.needTextScore ? deps.toProjection() : BSONObj();
-
- /*
- Look for an initial sort; we'll try to add this to the
- Cursor we create. If we're successful in doing that (further down),
- we'll remove the $sort from the pipeline, because the documents
- will already come sorted in the specified order as a result of the
- index scan.
- */
- intrusive_ptr<DocumentSourceSort> sortStage;
- BSONObj sortObj;
- if (!sources.empty()) {
- sortStage = dynamic_cast<DocumentSourceSort*>(sources.front().get());
- if (sortStage) {
- // build the sort key
- sortObj = sortStage->serializeSortKey(/*explain*/false).toBson();
- }
- }
-
- // Create the PlanExecutor.
- //
- // If we try to create a PlanExecutor that includes both the match and the
- // sort, and the two are incompatible wrt the available indexes, then
- // we don't get a PlanExecutor back.
- //
- // So we try to use both first. If that fails, try again, without the
- // sort.
- //
- // If we don't have a sort, jump straight to just creating a PlanExecutor.
- // without the sort.
- //
- // If we are able to incorporate the sort into the PlanExecutor, remove it
- // from the head of the pipeline.
- //
- // LATER - we should be able to find this out before we create the
- // cursor. Either way, we can then apply other optimizations there
- // are tickets for, such as SERVER-4507.
- const size_t runnerOptions = QueryPlannerParams::DEFAULT
- | QueryPlannerParams::INCLUDE_SHARD_FILTER
- | QueryPlannerParams::NO_BLOCKING_SORT
- ;
- std::shared_ptr<PlanExecutor> exec;
- bool sortInRunner = false;
-
- const WhereCallbackReal whereCallback(pExpCtx->opCtx, pExpCtx->ns.db());
+ // Look for an initial match. This works whether we got an initial query or not.
+ // If not, it results in a "{}" query, which will be what we want in that case.
+ const BSONObj queryObj = pPipeline->getInitialQuery();
+ if (!queryObj.isEmpty()) {
+ // This will get built in to the Cursor we'll create, so
+ // remove the match from the pipeline
+ sources.pop_front();
+ }
+ // Find the set of fields in the source documents depended on by this pipeline.
+ const DepsTracker deps = pPipeline->getDependencies(queryObj);
+
+ // Passing query an empty projection since it is faster to use ParsedDeps::extractFields().
+ // This will need to change to support covering indexes (SERVER-12015). There is an
+ // exception for textScore since that can only be retrieved by a query projection.
+ const BSONObj projectionForQuery = deps.needTextScore ? deps.toProjection() : BSONObj();
+
+ /*
+ Look for an initial sort; we'll try to add this to the
+ Cursor we create. If we're successful in doing that (further down),
+ we'll remove the $sort from the pipeline, because the documents
+ will already come sorted in the specified order as a result of the
+ index scan.
+ */
+ intrusive_ptr<DocumentSourceSort> sortStage;
+ BSONObj sortObj;
+ if (!sources.empty()) {
+ sortStage = dynamic_cast<DocumentSourceSort*>(sources.front().get());
if (sortStage) {
- CanonicalQuery* cq;
- Status status =
- CanonicalQuery::canonicalize(pExpCtx->ns,
- queryObj,
- sortObj,
- projectionForQuery,
- &cq,
- whereCallback);
-
- PlanExecutor* rawExec;
- if (status.isOK() && getExecutor(txn,
- collection,
- cq,
- PlanExecutor::YIELD_AUTO,
- &rawExec,
- runnerOptions).isOK()) {
- // success: The PlanExecutor will handle sorting for us using an index.
- exec.reset(rawExec);
- sortInRunner = true;
-
- sources.pop_front();
- if (sortStage->getLimitSrc()) {
- // need to reinsert coalesced $limit after removing $sort
- sources.push_front(sortStage->getLimitSrc());
- }
- }
+ // build the sort key
+ sortObj = sortStage->serializeSortKey(/*explain*/ false).toBson();
}
+ }
- if (!exec.get()) {
- const BSONObj noSort;
- CanonicalQuery* cq;
- uassertStatusOK(
- CanonicalQuery::canonicalize(pExpCtx->ns,
- queryObj,
- noSort,
- projectionForQuery,
- &cq,
- whereCallback));
-
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutor(txn,
- collection,
- cq,
- PlanExecutor::YIELD_AUTO,
- &rawExec,
- runnerOptions));
+ // Create the PlanExecutor.
+ //
+ // If we try to create a PlanExecutor that includes both the match and the
+ // sort, and the two are incompatible wrt the available indexes, then
+ // we don't get a PlanExecutor back.
+ //
+ // So we try to use both first. If that fails, try again, without the
+ // sort.
+ //
+ // If we don't have a sort, jump straight to just creating a PlanExecutor.
+ // without the sort.
+ //
+ // If we are able to incorporate the sort into the PlanExecutor, remove it
+ // from the head of the pipeline.
+ //
+ // LATER - we should be able to find this out before we create the
+ // cursor. Either way, we can then apply other optimizations there
+ // are tickets for, such as SERVER-4507.
+ const size_t runnerOptions = QueryPlannerParams::DEFAULT |
+ QueryPlannerParams::INCLUDE_SHARD_FILTER | QueryPlannerParams::NO_BLOCKING_SORT;
+ std::shared_ptr<PlanExecutor> exec;
+ bool sortInRunner = false;
+
+ const WhereCallbackReal whereCallback(pExpCtx->opCtx, pExpCtx->ns.db());
+
+ if (sortStage) {
+ CanonicalQuery* cq;
+ Status status = CanonicalQuery::canonicalize(
+ pExpCtx->ns, queryObj, sortObj, projectionForQuery, &cq, whereCallback);
+
+ PlanExecutor* rawExec;
+ if (status.isOK() &&
+ getExecutor(txn, collection, cq, PlanExecutor::YIELD_AUTO, &rawExec, runnerOptions)
+ .isOK()) {
+ // success: The PlanExecutor will handle sorting for us using an index.
exec.reset(rawExec);
+ sortInRunner = true;
+
+ sources.pop_front();
+ if (sortStage->getLimitSrc()) {
+ // need to reinsert coalesced $limit after removing $sort
+ sources.push_front(sortStage->getLimitSrc());
+ }
}
+ }
+ if (!exec.get()) {
+ const BSONObj noSort;
+ CanonicalQuery* cq;
+ uassertStatusOK(CanonicalQuery::canonicalize(
+ pExpCtx->ns, queryObj, noSort, projectionForQuery, &cq, whereCallback));
- // DocumentSourceCursor expects a yielding PlanExecutor that has had its state saved. We
- // deregister the PlanExecutor so that it can be registered with ClientCursor.
- exec->deregisterExec();
- exec->saveState();
+ PlanExecutor* rawExec;
+ uassertStatusOK(
+ getExecutor(txn, collection, cq, PlanExecutor::YIELD_AUTO, &rawExec, runnerOptions));
+ exec.reset(rawExec);
+ }
- // Put the PlanExecutor into a DocumentSourceCursor and add it to the front of the pipeline.
- intrusive_ptr<DocumentSourceCursor> pSource =
- DocumentSourceCursor::create(fullName, exec, pExpCtx);
- // Note the query, sort, and projection for explain.
- pSource->setQuery(queryObj);
- if (sortInRunner)
- pSource->setSort(sortObj);
+ // DocumentSourceCursor expects a yielding PlanExecutor that has had its state saved. We
+ // deregister the PlanExecutor so that it can be registered with ClientCursor.
+ exec->deregisterExec();
+ exec->saveState();
- pSource->setProjection(deps.toProjection(), deps.toParsedDeps());
+ // Put the PlanExecutor into a DocumentSourceCursor and add it to the front of the pipeline.
+ intrusive_ptr<DocumentSourceCursor> pSource =
+ DocumentSourceCursor::create(fullName, exec, pExpCtx);
- while (!sources.empty() && pSource->coalesce(sources.front())) {
- sources.pop_front();
- }
+ // Note the query, sort, and projection for explain.
+ pSource->setQuery(queryObj);
+ if (sortInRunner)
+ pSource->setSort(sortObj);
- pPipeline->addInitialSource(pSource);
+ pSource->setProjection(deps.toProjection(), deps.toParsedDeps());
- return exec;
+ while (!sources.empty() && pSource->coalesce(sources.front())) {
+ sources.pop_front();
}
-} // namespace mongo
+ pPipeline->addInitialSource(pSource);
+
+ return exec;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/pipeline_d.h b/src/mongo/db/pipeline/pipeline_d.h
index 3a818c3ddcd..8866e6bdfe6 100644
--- a/src/mongo/db/pipeline/pipeline_d.h
+++ b/src/mongo/db/pipeline/pipeline_d.h
@@ -32,54 +32,53 @@
#include <memory>
namespace mongo {
- class Collection;
- class DocumentSourceCursor;
- struct ExpressionContext;
- class OperationContext;
- class Pipeline;
- class PlanExecutor;
+class Collection;
+class DocumentSourceCursor;
+struct ExpressionContext;
+class OperationContext;
+class Pipeline;
+class PlanExecutor;
- /*
- PipelineD is an extension of the Pipeline class, but with additional
- material that references symbols that are not available in mongos,
- where the remainder of the Pipeline class also functions. PipelineD
- is a friend of Pipeline so that it can have equal access to Pipeline's
- members.
+/*
+ PipelineD is an extension of the Pipeline class, but with additional
+ material that references symbols that are not available in mongos,
+ where the remainder of the Pipeline class also functions. PipelineD
+ is a friend of Pipeline so that it can have equal access to Pipeline's
+ members.
- See the friend declaration in Pipeline.
+ See the friend declaration in Pipeline.
+ */
+class PipelineD {
+public:
+ /**
+ * Create a Cursor wrapped in a DocumentSourceCursor, which is suitable
+ * to be the first source for a pipeline to begin with. This source
+ * will feed the execution of the pipeline.
+ *
+ * This method looks for early pipeline stages that can be folded into
+ * the underlying cursor, and when a cursor can absorb those, they
+ * are removed from the head of the pipeline. For example, an
+ * early match can be removed and replaced with a Cursor that will
+ * do an index scan.
+ *
+ * The cursor is added to the front of the pipeline's sources.
+ *
+ * Must have a AutoGetCollectionForRead before entering.
+ *
+ * If the returned PlanExecutor is non-null, you are responsible for ensuring
+ * it receives appropriate invalidate and kill messages.
+ *
+ * @param pPipeline the logical "this" for this operation
+ * @param pExpCtx the expression context for this pipeline
*/
- class PipelineD {
- public:
-
- /**
- * Create a Cursor wrapped in a DocumentSourceCursor, which is suitable
- * to be the first source for a pipeline to begin with. This source
- * will feed the execution of the pipeline.
- *
- * This method looks for early pipeline stages that can be folded into
- * the underlying cursor, and when a cursor can absorb those, they
- * are removed from the head of the pipeline. For example, an
- * early match can be removed and replaced with a Cursor that will
- * do an index scan.
- *
- * The cursor is added to the front of the pipeline's sources.
- *
- * Must have a AutoGetCollectionForRead before entering.
- *
- * If the returned PlanExecutor is non-null, you are responsible for ensuring
- * it receives appropriate invalidate and kill messages.
- *
- * @param pPipeline the logical "this" for this operation
- * @param pExpCtx the expression context for this pipeline
- */
- static std::shared_ptr<PlanExecutor> prepareCursorSource(
- OperationContext* txn,
- Collection* collection,
- const boost::intrusive_ptr<Pipeline> &pPipeline,
- const boost::intrusive_ptr<ExpressionContext> &pExpCtx);
+ static std::shared_ptr<PlanExecutor> prepareCursorSource(
+ OperationContext* txn,
+ Collection* collection,
+ const boost::intrusive_ptr<Pipeline>& pPipeline,
+ const boost::intrusive_ptr<ExpressionContext>& pExpCtx);
- private:
- PipelineD(); // does not exist: prevent instantiation
- };
+private:
+ PipelineD(); // does not exist: prevent instantiation
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/pipeline_optimizations.h b/src/mongo/db/pipeline/pipeline_optimizations.h
index ac4b9e8b697..68763c2ac5c 100644
--- a/src/mongo/db/pipeline/pipeline_optimizations.h
+++ b/src/mongo/db/pipeline/pipeline_optimizations.h
@@ -36,97 +36,97 @@
#include "mongo/db/pipeline/pipeline.h"
namespace mongo {
+/**
+ * This class holds optimizations applied to a single Pipeline.
+ *
+ * Each function has the same signature and takes a Pipeline as an in/out parameter.
+ */
+class Pipeline::Optimizations::Local {
+public:
/**
- * This class holds optimizations applied to a single Pipeline.
+ * Moves matches before any adjacent sort phases.
*
- * Each function has the same signature and takes a Pipeline as an in/out parameter.
+ * This means we sort fewer items. Neither sorts, nor matches (excluding $text)
+ * change the documents in the stream, so this transformation shouldn't affect
+ * the result.
*/
- class Pipeline::Optimizations::Local {
- public:
- /**
- * Moves matches before any adjacent sort phases.
- *
- * This means we sort fewer items. Neither sorts, nor matches (excluding $text)
- * change the documents in the stream, so this transformation shouldn't affect
- * the result.
- */
- static void moveMatchBeforeSort(Pipeline* pipeline);
+ static void moveMatchBeforeSort(Pipeline* pipeline);
- /**
- * Moves skip and limit before any adjacent project phases.
- *
- * While this is performance-neutral on its own, it enables other optimizations
- * such as combining sort and limit.
- */
- static void moveSkipAndLimitBeforeProject(Pipeline* pipeline);
+ /**
+ * Moves skip and limit before any adjacent project phases.
+ *
+ * While this is performance-neutral on its own, it enables other optimizations
+ * such as combining sort and limit.
+ */
+ static void moveSkipAndLimitBeforeProject(Pipeline* pipeline);
- /**
- * Moves limits before any adjacent skip phases.
- *
- * This is more optimal for sharding since currently, we can only split
- * the pipeline at a single source and it is better to limit the results
- * coming from each shard. This also enables other optimizations like
- * coalescing the limit into a sort.
- */
- static void moveLimitBeforeSkip(Pipeline* pipeline);
+ /**
+ * Moves limits before any adjacent skip phases.
+ *
+ * This is more optimal for sharding since currently, we can only split
+ * the pipeline at a single source and it is better to limit the results
+ * coming from each shard. This also enables other optimizations like
+ * coalescing the limit into a sort.
+ */
+ static void moveLimitBeforeSkip(Pipeline* pipeline);
- /**
- * Runs through the DocumentSources, and give each one the opportunity
- * to coalesce with its successor. If successful, remove the successor.
- *
- * This should generally be run after optimizations that reorder stages
- * to be most effective.
- *
- * NOTE: uses the DocumentSource::coalesce() method
- */
- static void coalesceAdjacent(Pipeline* pipeline);
+ /**
+ * Runs through the DocumentSources, and give each one the opportunity
+ * to coalesce with its successor. If successful, remove the successor.
+ *
+ * This should generally be run after optimizations that reorder stages
+ * to be most effective.
+ *
+ * NOTE: uses the DocumentSource::coalesce() method
+ */
+ static void coalesceAdjacent(Pipeline* pipeline);
- /**
- * Gives each DocumentSource the opportunity to optimize itself.
- *
- * NOTE: uses the DocumentSource::optimize() method
- */
- static void optimizeEachDocumentSource(Pipeline* pipeline);
+ /**
+ * Gives each DocumentSource the opportunity to optimize itself.
+ *
+ * NOTE: uses the DocumentSource::optimize() method
+ */
+ static void optimizeEachDocumentSource(Pipeline* pipeline);
- /**
- * Optimizes [$redact, $match] to [$match, $redact, $match] if possible.
- *
- * This gives us the ability to use indexes and reduce the number of
- * BSONObjs converted to Documents.
- */
- static void duplicateMatchBeforeInitalRedact(Pipeline* pipeline);
- };
+ /**
+ * Optimizes [$redact, $match] to [$match, $redact, $match] if possible.
+ *
+ * This gives us the ability to use indexes and reduce the number of
+ * BSONObjs converted to Documents.
+ */
+ static void duplicateMatchBeforeInitalRedact(Pipeline* pipeline);
+};
+/**
+ * This class holds optimizations applied to a shard Pipeline and a merger Pipeline.
+ *
+ * Each function has the same signature and takes two Pipelines, both as an in/out parameters.
+ */
+class Pipeline::Optimizations::Sharded {
+public:
/**
- * This class holds optimizations applied to a shard Pipeline and a merger Pipeline.
+ * Moves everything before a splittable stage to the shards. If there
+ * are no splittable stages, moves everything to the shards.
+ *
+ * It is not safe to call this optimization multiple times.
*
- * Each function has the same signature and takes two Pipelines, both as an in/out parameters.
+ * NOTE: looks for SplittableDocumentSources and uses that API
*/
- class Pipeline::Optimizations::Sharded {
- public:
- /**
- * Moves everything before a splittable stage to the shards. If there
- * are no splittable stages, moves everything to the shards.
- *
- * It is not safe to call this optimization multiple times.
- *
- * NOTE: looks for SplittableDocumentSources and uses that API
- */
- static void findSplitPoint(Pipeline* shardPipe, Pipeline* mergePipe);
+ static void findSplitPoint(Pipeline* shardPipe, Pipeline* mergePipe);
- /**
- * If the final stage on shards is to unwind an array, move that stage to the merger. This
- * cuts down on network traffic and allows us to take advantage of reduced copying in
- * unwind.
- */
- static void moveFinalUnwindFromShardsToMerger(Pipeline* shardPipe, Pipeline* mergePipe);
+ /**
+ * If the final stage on shards is to unwind an array, move that stage to the merger. This
+ * cuts down on network traffic and allows us to take advantage of reduced copying in
+ * unwind.
+ */
+ static void moveFinalUnwindFromShardsToMerger(Pipeline* shardPipe, Pipeline* mergePipe);
- /**
- * Adds a stage to the end of shardPipe explicitly requesting all fields that mergePipe
- * needs. This is only done if it heuristically determines that it is needed. This
- * optimization can reduce the amount of network traffic and can also enable the shards to
- * convert less source BSON into Documents.
- */
- static void limitFieldsSentFromShardsToMerger(Pipeline* shardPipe, Pipeline* mergePipe);
- };
-} // namespace mongo
+ /**
+ * Adds a stage to the end of shardPipe explicitly requesting all fields that mergePipe
+ * needs. This is only done if it heuristically determines that it is needed. This
+ * optimization can reduce the amount of network traffic and can also enable the shards to
+ * convert less source BSON into Documents.
+ */
+ static void limitFieldsSentFromShardsToMerger(Pipeline* shardPipe, Pipeline* mergePipe);
+};
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/value.cpp b/src/mongo/db/pipeline/value.cpp
index 0d55a309002..4afc5ccf684 100644
--- a/src/mongo/db/pipeline/value.cpp
+++ b/src/mongo/db/pipeline/value.cpp
@@ -41,17 +41,17 @@
#include "mongo/util/mongoutils/str.h"
namespace mongo {
- using namespace mongoutils;
- using boost::intrusive_ptr;
- using std::min;
- using std::numeric_limits;
- using std::ostream;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- void ValueStorage::verifyRefCountingIfShould() const {
- switch (type) {
+using namespace mongoutils;
+using boost::intrusive_ptr;
+using std::min;
+using std::numeric_limits;
+using std::ostream;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+void ValueStorage::verifyRefCountingIfShould() const {
+ switch (type) {
case MinKey:
case MaxKey:
case jstOID:
@@ -76,8 +76,8 @@ namespace mongo {
verify(refCounter == !shortStr);
break;
- case BinData: // TODO this should probably support short-string optimization
- case Array: // TODO this should probably support empty-is-NULL optimization
+ case BinData: // TODO this should probably support short-string optimization
+ case Array: // TODO this should probably support empty-is-NULL optimization
case DBRef:
case CodeWScope:
// the above types always reference external data.
@@ -89,61 +89,60 @@ namespace mongo {
// Objects either hold a NULL ptr or should be ref-counting
verify(refCounter == bool(genericRCPtr));
break;
- }
}
+}
- void ValueStorage::putString(StringData s) {
- // Note: this also stores data portion of BinData
- const size_t sizeNoNUL = s.size();
- if (sizeNoNUL <= sizeof(shortStrStorage)) {
- shortStr = true;
- shortStrSize = s.size();
- s.copyTo(shortStrStorage, false); // no NUL
-
- // All memory is zeroed before this is called.
- // Note this may be past end of shortStrStorage and into nulTerminator
- dassert(shortStrStorage[sizeNoNUL] == '\0');
- }
- else {
- putRefCountable(RCString::create(s));
- }
+void ValueStorage::putString(StringData s) {
+ // Note: this also stores data portion of BinData
+ const size_t sizeNoNUL = s.size();
+ if (sizeNoNUL <= sizeof(shortStrStorage)) {
+ shortStr = true;
+ shortStrSize = s.size();
+ s.copyTo(shortStrStorage, false); // no NUL
+
+ // All memory is zeroed before this is called.
+ // Note this may be past end of shortStrStorage and into nulTerminator
+ dassert(shortStrStorage[sizeNoNUL] == '\0');
+ } else {
+ putRefCountable(RCString::create(s));
}
+}
- void ValueStorage::putDocument(const Document& d) {
- putRefCountable(d._storage);
- }
+void ValueStorage::putDocument(const Document& d) {
+ putRefCountable(d._storage);
+}
- void ValueStorage::putVector(const RCVector* vec) {
- fassert(16485, vec);
- putRefCountable(vec);
- }
+void ValueStorage::putVector(const RCVector* vec) {
+ fassert(16485, vec);
+ putRefCountable(vec);
+}
- void ValueStorage::putRegEx(const BSONRegEx& re) {
- const size_t patternLen = re.pattern.size();
- const size_t flagsLen = re.flags.size();
- const size_t totalLen = patternLen + 1/*middle NUL*/ + flagsLen;
+void ValueStorage::putRegEx(const BSONRegEx& re) {
+ const size_t patternLen = re.pattern.size();
+ const size_t flagsLen = re.flags.size();
+ const size_t totalLen = patternLen + 1 /*middle NUL*/ + flagsLen;
- // Need to copy since putString doesn't support scatter-gather.
- std::unique_ptr<char[]> buf (new char[totalLen]);
- re.pattern.copyTo(buf.get(), true);
- re.flags.copyTo(buf.get() + patternLen + 1, false); // no NUL
- putString(StringData(buf.get(), totalLen));
- }
+ // Need to copy since putString doesn't support scatter-gather.
+ std::unique_ptr<char[]> buf(new char[totalLen]);
+ re.pattern.copyTo(buf.get(), true);
+ re.flags.copyTo(buf.get() + patternLen + 1, false); // no NUL
+ putString(StringData(buf.get(), totalLen));
+}
- Document ValueStorage::getDocument() const {
- if (!genericRCPtr)
- return Document();
+Document ValueStorage::getDocument() const {
+ if (!genericRCPtr)
+ return Document();
- dassert(typeid(*genericRCPtr) == typeid(const DocumentStorage));
- const DocumentStorage* documentPtr = static_cast<const DocumentStorage*>(genericRCPtr);
- return Document(documentPtr);
- }
+ dassert(typeid(*genericRCPtr) == typeid(const DocumentStorage));
+ const DocumentStorage* documentPtr = static_cast<const DocumentStorage*>(genericRCPtr);
+ return Document(documentPtr);
+}
- // not in header because document is fwd declared
- Value::Value(const BSONObj& obj) : _storage(Object, Document(obj)) {}
+// not in header because document is fwd declared
+Value::Value(const BSONObj& obj) : _storage(Object, Document(obj)) {}
- Value::Value(const BSONElement& elem) : _storage(elem.type()) {
- switch(elem.type()) {
+Value::Value(const BSONElement& elem) : _storage(elem.type()) {
+ switch (elem.type()) {
// These are all type-only, no data
case EOO:
case MinKey:
@@ -168,7 +167,7 @@ namespace mongo {
}
case Array: {
- intrusive_ptr<RCVector> vec (new RCVector);
+ intrusive_ptr<RCVector> vec(new RCVector);
BSONForEach(sub, elem.embeddedObject()) {
vec->vec.push_back(Value(sub));
}
@@ -207,7 +206,7 @@ namespace mongo {
break;
case CodeWScope: {
- StringData code (elem.codeWScopeCode(), elem.codeWScopeCodeLen()-1);
+ StringData code(elem.codeWScopeCode(), elem.codeWScopeCodeLen() - 1);
_storage.putCodeWScope(BSONCodeWScope(code, elem.codeWScopeObject()));
break;
}
@@ -222,83 +221,100 @@ namespace mongo {
case DBRef:
_storage.putDBRef(BSONDBRef(elem.dbrefNS(), elem.dbrefOID()));
break;
- }
}
+}
- Value::Value(const BSONArray& arr) : _storage(Array) {
- intrusive_ptr<RCVector> vec (new RCVector);
- BSONForEach(sub, arr) {
- vec->vec.push_back(Value(sub));
- }
- _storage.putVector(vec.get());
+Value::Value(const BSONArray& arr) : _storage(Array) {
+ intrusive_ptr<RCVector> vec(new RCVector);
+ BSONForEach(sub, arr) {
+ vec->vec.push_back(Value(sub));
}
+ _storage.putVector(vec.get());
+}
- Value Value::createIntOrLong(long long longValue) {
- int intValue = longValue;
- if (intValue != longValue) {
- // it is too large to be an int and should remain a long
- return Value(longValue);
- }
-
- // should be an int since all arguments were int and it fits
- return Value(intValue);
+Value Value::createIntOrLong(long long longValue) {
+ int intValue = longValue;
+ if (intValue != longValue) {
+ // it is too large to be an int and should remain a long
+ return Value(longValue);
}
- double Value::getDouble() const {
- BSONType type = getType();
- if (type == NumberInt)
- return _storage.intValue;
- if (type == NumberLong)
- return static_cast< double >( _storage.longValue );
+ // should be an int since all arguments were int and it fits
+ return Value(intValue);
+}
- verify(type == NumberDouble);
- return _storage.doubleValue;
- }
+double Value::getDouble() const {
+ BSONType type = getType();
+ if (type == NumberInt)
+ return _storage.intValue;
+ if (type == NumberLong)
+ return static_cast<double>(_storage.longValue);
- Document Value::getDocument() const {
- verify(getType() == Object);
- return _storage.getDocument();
- }
+ verify(type == NumberDouble);
+ return _storage.doubleValue;
+}
+
+Document Value::getDocument() const {
+ verify(getType() == Object);
+ return _storage.getDocument();
+}
- Value Value::operator[] (size_t index) const {
- if (getType() != Array || index >= getArrayLength())
- return Value();
+Value Value::operator[](size_t index) const {
+ if (getType() != Array || index >= getArrayLength())
+ return Value();
- return getArray()[index];
- }
+ return getArray()[index];
+}
- Value Value::operator[] (StringData name) const {
- if (getType() != Object)
- return Value();
+Value Value::operator[](StringData name) const {
+ if (getType() != Object)
+ return Value();
- return getDocument()[name];
- }
+ return getDocument()[name];
+}
- BSONObjBuilder& operator << (BSONObjBuilderValueStream& builder, const Value& val) {
- switch(val.getType()) {
- case EOO: return builder.builder(); // nothing appended
- case MinKey: return builder << MINKEY;
- case MaxKey: return builder << MAXKEY;
- case jstNULL: return builder << BSONNULL;
- case Undefined: return builder << BSONUndefined;
- case jstOID: return builder << val.getOid();
- case NumberInt: return builder << val.getInt();
- case NumberLong: return builder << val.getLong();
- case NumberDouble: return builder << val.getDouble();
- case String: return builder << val.getStringData();
- case Bool: return builder << val.getBool();
- case Date: return builder << Date_t::fromMillisSinceEpoch(val.getDate());
- case bsonTimestamp: return builder << val.getTimestamp();
- case Object: return builder << val.getDocument();
- case Symbol: return builder << BSONSymbol(val.getStringData());
- case Code: return builder << BSONCode(val.getStringData());
- case RegEx: return builder << BSONRegEx(val.getRegex(), val.getRegexFlags());
+BSONObjBuilder& operator<<(BSONObjBuilderValueStream& builder, const Value& val) {
+ switch (val.getType()) {
+ case EOO:
+ return builder.builder(); // nothing appended
+ case MinKey:
+ return builder << MINKEY;
+ case MaxKey:
+ return builder << MAXKEY;
+ case jstNULL:
+ return builder << BSONNULL;
+ case Undefined:
+ return builder << BSONUndefined;
+ case jstOID:
+ return builder << val.getOid();
+ case NumberInt:
+ return builder << val.getInt();
+ case NumberLong:
+ return builder << val.getLong();
+ case NumberDouble:
+ return builder << val.getDouble();
+ case String:
+ return builder << val.getStringData();
+ case Bool:
+ return builder << val.getBool();
+ case Date:
+ return builder << Date_t::fromMillisSinceEpoch(val.getDate());
+ case bsonTimestamp:
+ return builder << val.getTimestamp();
+ case Object:
+ return builder << val.getDocument();
+ case Symbol:
+ return builder << BSONSymbol(val.getStringData());
+ case Code:
+ return builder << BSONCode(val.getStringData());
+ case RegEx:
+ return builder << BSONRegEx(val.getRegex(), val.getRegexFlags());
case DBRef:
return builder << BSONDBRef(val._storage.getDBRef()->ns, val._storage.getDBRef()->oid);
case BinData:
- return builder << BSONBinData(val.getStringData().rawData(), // looking for void*
+ return builder << BSONBinData(val.getStringData().rawData(), // looking for void*
val.getStringData().size(),
val._storage.binDataType());
@@ -310,29 +326,29 @@ namespace mongo {
const vector<Value>& array = val.getArray();
const size_t n = array.size();
BSONArrayBuilder arrayBuilder(builder.subarrayStart());
- for(size_t i = 0; i < n; i++) {
+ for (size_t i = 0; i < n; i++) {
array[i].addToBsonArray(&arrayBuilder);
}
arrayBuilder.doneFast();
return builder.builder();
}
- }
- verify(false);
}
+ verify(false);
+}
- void Value::addToBsonObj(BSONObjBuilder* pBuilder, StringData fieldName) const {
- *pBuilder << fieldName << *this;
- }
+void Value::addToBsonObj(BSONObjBuilder* pBuilder, StringData fieldName) const {
+ *pBuilder << fieldName << *this;
+}
- void Value::addToBsonArray(BSONArrayBuilder* pBuilder) const {
- if (!missing()) { // don't want to increment builder's counter
- *pBuilder << *this;
- }
+void Value::addToBsonArray(BSONArrayBuilder* pBuilder) const {
+ if (!missing()) { // don't want to increment builder's counter
+ *pBuilder << *this;
}
+}
- bool Value::coerceToBool() const {
- // TODO Unify the implementation with BSONElement::trueValue().
- switch(getType()) {
+bool Value::coerceToBool() const {
+ // TODO Unify the implementation with BSONElement::trueValue().
+ switch (getType()) {
case CodeWScope:
case MinKey:
case DBRef:
@@ -354,16 +370,20 @@ namespace mongo {
case Undefined:
return false;
- case Bool: return _storage.boolValue;
- case NumberInt: return _storage.intValue;
- case NumberLong: return _storage.longValue;
- case NumberDouble: return _storage.doubleValue;
- }
- verify(false);
+ case Bool:
+ return _storage.boolValue;
+ case NumberInt:
+ return _storage.intValue;
+ case NumberLong:
+ return _storage.longValue;
+ case NumberDouble:
+ return _storage.doubleValue;
}
+ verify(false);
+}
- int Value::coerceToInt() const {
- switch(getType()) {
+int Value::coerceToInt() const {
+ switch (getType()) {
case NumberInt:
return _storage.intValue;
@@ -374,15 +394,15 @@ namespace mongo {
return static_cast<int>(_storage.doubleValue);
default:
- uassert(16003, str::stream() <<
- "can't convert from BSON type " << typeName(getType()) <<
- " to int",
+ uassert(16003,
+ str::stream() << "can't convert from BSON type " << typeName(getType())
+ << " to int",
false);
- } // switch(getType())
- }
+ } // switch(getType())
+}
- long long Value::coerceToLong() const {
- switch(getType()) {
+long long Value::coerceToLong() const {
+ switch (getType()) {
case NumberLong:
return _storage.longValue;
@@ -393,15 +413,15 @@ namespace mongo {
return static_cast<long long>(_storage.doubleValue);
default:
- uassert(16004, str::stream() <<
- "can't convert from BSON type " << typeName(getType()) <<
- " to long",
+ uassert(16004,
+ str::stream() << "can't convert from BSON type " << typeName(getType())
+ << " to long",
false);
- } // switch(getType())
- }
+ } // switch(getType())
+}
- double Value::coerceToDouble() const {
- switch(getType()) {
+double Value::coerceToDouble() const {
+ switch (getType()) {
case NumberDouble:
return _storage.doubleValue;
@@ -412,15 +432,15 @@ namespace mongo {
return static_cast<double>(_storage.longValue);
default:
- uassert(16005, str::stream() <<
- "can't convert from BSON type " << typeName(getType()) <<
- " to double",
+ uassert(16005,
+ str::stream() << "can't convert from BSON type " << typeName(getType())
+ << " to double",
false);
- } // switch(getType())
- }
+ } // switch(getType())
+}
- long long Value::coerceToDate() const {
- switch(getType()) {
+long long Value::coerceToDate() const {
+ switch (getType()) {
case Date:
return getDate();
@@ -428,65 +448,66 @@ namespace mongo {
return getTimestamp().getSecs() * 1000LL;
default:
- uassert(16006, str::stream() <<
- "can't convert from BSON type " << typeName(getType()) << " to Date",
+ uassert(16006,
+ str::stream() << "can't convert from BSON type " << typeName(getType())
+ << " to Date",
false);
- } // switch(getType())
- }
+ } // switch(getType())
+}
- time_t Value::coerceToTimeT() const {
- long long millis = coerceToDate();
- if (millis < 0) {
- // We want the division below to truncate toward -inf rather than 0
- // eg Dec 31, 1969 23:59:58.001 should be -2 seconds rather than -1
- // This is needed to get the correct values from coerceToTM
- if ( -1999 / 1000 != -2) { // this is implementation defined
- millis -= 1000-1;
- }
+time_t Value::coerceToTimeT() const {
+ long long millis = coerceToDate();
+ if (millis < 0) {
+ // We want the division below to truncate toward -inf rather than 0
+ // eg Dec 31, 1969 23:59:58.001 should be -2 seconds rather than -1
+ // This is needed to get the correct values from coerceToTM
+ if (-1999 / 1000 != -2) { // this is implementation defined
+ millis -= 1000 - 1;
}
- const long long seconds = millis / 1000;
+ }
+ const long long seconds = millis / 1000;
- uassert(16421, "Can't handle date values outside of time_t range",
- seconds >= std::numeric_limits<time_t>::min() &&
- seconds <= std::numeric_limits<time_t>::max());
+ uassert(16421,
+ "Can't handle date values outside of time_t range",
+ seconds >= std::numeric_limits<time_t>::min() &&
+ seconds <= std::numeric_limits<time_t>::max());
- return static_cast<time_t>(seconds);
- }
- tm Value::coerceToTm() const {
- // See implementation in Date_t.
- // Can't reuse that here because it doesn't support times before 1970
- time_t dtime = coerceToTimeT();
- tm out;
-
-#if defined(_WIN32) // Both the argument order and the return values differ
- bool itWorked = gmtime_s(&out, &dtime) == 0;
+ return static_cast<time_t>(seconds);
+}
+tm Value::coerceToTm() const {
+ // See implementation in Date_t.
+ // Can't reuse that here because it doesn't support times before 1970
+ time_t dtime = coerceToTimeT();
+ tm out;
+
+#if defined(_WIN32) // Both the argument order and the return values differ
+ bool itWorked = gmtime_s(&out, &dtime) == 0;
#else
- bool itWorked = gmtime_r(&dtime, &out) != NULL;
+ bool itWorked = gmtime_r(&dtime, &out) != NULL;
#endif
- if (!itWorked) {
- if (dtime < 0) {
- // Windows docs say it doesn't support these, but empirically it seems to work
- uasserted(16422, "gmtime failed - your system doesn't support dates before 1970");
- }
- else {
- uasserted(16423, str::stream() << "gmtime failed to convert time_t of " << dtime);
- }
+ if (!itWorked) {
+ if (dtime < 0) {
+ // Windows docs say it doesn't support these, but empirically it seems to work
+ uasserted(16422, "gmtime failed - your system doesn't support dates before 1970");
+ } else {
+ uasserted(16423, str::stream() << "gmtime failed to convert time_t of " << dtime);
}
-
- return out;
}
- static string tmToISODateString(const tm& time) {
- char buf[128];
- size_t len = strftime(buf, 128, "%Y-%m-%dT%H:%M:%S", &time);
- verify(len > 0);
- verify(len < 128);
- return buf;
- }
+ return out;
+}
+
+static string tmToISODateString(const tm& time) {
+ char buf[128];
+ size_t len = strftime(buf, 128, "%Y-%m-%dT%H:%M:%S", &time);
+ verify(len > 0);
+ verify(len < 128);
+ return buf;
+}
- string Value::coerceToString() const {
- switch(getType()) {
+string Value::coerceToString() const {
+ switch (getType()) {
case NumberDouble:
return str::stream() << _storage.doubleValue;
@@ -513,57 +534,53 @@ namespace mongo {
return "";
default:
- uassert(16007, str::stream() <<
- "can't convert from BSON type " << typeName(getType()) <<
- " to String",
+ uassert(16007,
+ str::stream() << "can't convert from BSON type " << typeName(getType())
+ << " to String",
false);
- } // switch(getType())
- }
+ } // switch(getType())
+}
- Timestamp Value::coerceToTimestamp() const {
- switch(getType()) {
+Timestamp Value::coerceToTimestamp() const {
+ switch (getType()) {
case bsonTimestamp:
return getTimestamp();
default:
- uassert(16378, str::stream() <<
- "can't convert from BSON type " << typeName(getType()) <<
- " to timestamp",
+ uassert(16378,
+ str::stream() << "can't convert from BSON type " << typeName(getType())
+ << " to timestamp",
false);
- } // switch(getType())
- }
+ } // switch(getType())
+}
- // Helper function for Value::compare.
- // Better than l-r for cases where difference > MAX_INT
- template <typename T>
- inline static int cmp(const T& left, const T& right) {
- if (left < right) {
- return -1;
- }
- else if (left == right) {
- return 0;
- }
- else {
- dassert(left > right);
- return 1;
- }
+// Helper function for Value::compare.
+// Better than l-r for cases where difference > MAX_INT
+template <typename T>
+inline static int cmp(const T& left, const T& right) {
+ if (left < right) {
+ return -1;
+ } else if (left == right) {
+ return 0;
+ } else {
+ dassert(left > right);
+ return 1;
}
+}
- int Value::compare(const Value& rL, const Value& rR) {
- // Note, this function needs to behave identically to BSON's compareElementValues().
- // Additionally, any changes here must be replicated in hash_combine().
- BSONType lType = rL.getType();
- BSONType rType = rR.getType();
+int Value::compare(const Value& rL, const Value& rR) {
+ // Note, this function needs to behave identically to BSON's compareElementValues().
+ // Additionally, any changes here must be replicated in hash_combine().
+ BSONType lType = rL.getType();
+ BSONType rType = rR.getType();
- int ret = lType == rType
- ? 0 // fast-path common case
- : cmp(canonicalizeBSONType(lType),
- canonicalizeBSONType(rType));
+ int ret = lType == rType ? 0 // fast-path common case
+ : cmp(canonicalizeBSONType(lType), canonicalizeBSONType(rType));
- if (ret)
- return ret;
+ if (ret)
+ return ret;
- switch(lType) {
+ switch (lType) {
// Order of types is the same as in compareElementValues() to make it easier to verify
// These are valueless types
@@ -577,10 +594,10 @@ namespace mongo {
case Bool:
return rL.getBool() - rR.getBool();
- case bsonTimestamp: // unsigned
+ case bsonTimestamp: // unsigned
return cmp(rL._storage.timestampValue, rR._storage.timestampValue);
- case Date: // signed
+ case Date: // signed
return cmp(rL._storage.dateValue, rR._storage.dateValue);
// Numbers should compare by equivalence even if different types
@@ -588,32 +605,40 @@ namespace mongo {
// All types can precisely represent all NumberInts, so it is safe to simply convert to
// whatever rhs's type is.
switch (rType) {
- case NumberInt: return compareInts(rL._storage.intValue, rR._storage.intValue);
- case NumberLong: return compareLongs(rL._storage.intValue, rR._storage.longValue);
- case NumberDouble: return compareDoubles(rL._storage.intValue, rR._storage.doubleValue);
- default: invariant(false);
+ case NumberInt:
+ return compareInts(rL._storage.intValue, rR._storage.intValue);
+ case NumberLong:
+ return compareLongs(rL._storage.intValue, rR._storage.longValue);
+ case NumberDouble:
+ return compareDoubles(rL._storage.intValue, rR._storage.doubleValue);
+ default:
+ invariant(false);
}
}
case NumberLong: {
switch (rType) {
- case NumberLong: return compareLongs(rL._storage.longValue, rR._storage.longValue);
- case NumberInt: return compareLongs(rL._storage.longValue, rR._storage.intValue);
- case NumberDouble: return compareLongToDouble(rL._storage.longValue,
- rR._storage.doubleValue);
- default: invariant(false);
+ case NumberLong:
+ return compareLongs(rL._storage.longValue, rR._storage.longValue);
+ case NumberInt:
+ return compareLongs(rL._storage.longValue, rR._storage.intValue);
+ case NumberDouble:
+ return compareLongToDouble(rL._storage.longValue, rR._storage.doubleValue);
+ default:
+ invariant(false);
}
}
case NumberDouble: {
switch (rType) {
- case NumberDouble: return compareDoubles(rL._storage.doubleValue,
- rR._storage.doubleValue);
- case NumberInt: return compareDoubles(rL._storage.doubleValue,
- rR._storage.intValue);
- case NumberLong: return compareDoubleToLong(rL._storage.doubleValue,
- rR._storage.longValue);
- default: invariant(false);
+ case NumberDouble:
+ return compareDoubles(rL._storage.doubleValue, rR._storage.doubleValue);
+ case NumberInt:
+ return compareDoubles(rL._storage.doubleValue, rR._storage.intValue);
+ case NumberLong:
+ return compareDoubleToLong(rL._storage.doubleValue, rR._storage.longValue);
+ default:
+ invariant(false);
}
}
@@ -633,14 +658,14 @@ namespace mongo {
const vector<Value>& rArr = rR.getArray();
const size_t elems = std::min(lArr.size(), rArr.size());
- for (size_t i = 0; i < elems; i++ ) {
+ for (size_t i = 0; i < elems; i++) {
// compare the two corresponding elements
ret = Value::compare(lArr[i], rArr[i]);
if (ret)
- return ret; // values are unequal
+ return ret; // values are unequal
}
- // if we get here we are either equal or one is prefix of the other
+ // if we get here we are either equal or one is prefix of the other
return cmp(lArr.size(), rArr.size());
}
@@ -667,7 +692,7 @@ namespace mongo {
return rL.getStringData().compare(rR.getStringData());
}
- case RegEx: // same as String in this impl but keeping order same as compareElementValues
+ case RegEx: // same as String in this impl but keeping order same as compareElementValues
return rL.getStringData().compare(rR.getStringData());
case CodeWScope: {
@@ -680,16 +705,16 @@ namespace mongo {
return l->scope.woCompare(r->scope);
}
- }
- verify(false);
}
+ verify(false);
+}
- void Value::hash_combine(size_t &seed) const {
- BSONType type = getType();
+void Value::hash_combine(size_t& seed) const {
+ BSONType type = getType();
- boost::hash_combine(seed, canonicalizeBSONType(type));
+ boost::hash_combine(seed, canonicalizeBSONType(type));
- switch (type) {
+ switch (type) {
// Order of types is the same as in Value::compare() and compareElementValues().
// These are valueless types
@@ -720,8 +745,7 @@ namespace mongo {
const double dbl = getDouble();
if (std::isnan(dbl)) {
boost::hash_combine(seed, numeric_limits<double>::quiet_NaN());
- }
- else {
+ } else {
boost::hash_combine(seed, dbl);
}
break;
@@ -745,7 +769,7 @@ namespace mongo {
case Array: {
const vector<Value>& vec = getArray();
- for (size_t i=0; i < vec.size(); i++)
+ for (size_t i = 0; i < vec.size(); i++)
vec[i].hash_combine(seed);
break;
}
@@ -775,12 +799,12 @@ namespace mongo {
boost::hash_combine(seed, BSONObj::Hasher()(cws->scope));
break;
}
- }
}
+}
- BSONType Value::getWidestNumeric(BSONType lType, BSONType rType) {
- if (lType == NumberDouble) {
- switch(rType) {
+BSONType Value::getWidestNumeric(BSONType lType, BSONType rType) {
+ if (lType == NumberDouble) {
+ switch (rType) {
case NumberDouble:
case NumberLong:
case NumberInt:
@@ -788,10 +812,9 @@ namespace mongo {
default:
break;
- }
}
- else if (lType == NumberLong) {
- switch(rType) {
+ } else if (lType == NumberLong) {
+ switch (rType) {
case NumberDouble:
return NumberDouble;
@@ -801,10 +824,9 @@ namespace mongo {
default:
break;
- }
}
- else if (lType == NumberInt) {
- switch(rType) {
+ } else if (lType == NumberInt) {
+ switch (rType) {
case NumberDouble:
return NumberDouble;
@@ -816,38 +838,38 @@ namespace mongo {
default:
break;
- }
}
-
- // Reachable, but callers must subsequently err out in this case.
- return Undefined;
}
- bool Value::integral() const {
- switch (getType()) {
+ // Reachable, but callers must subsequently err out in this case.
+ return Undefined;
+}
+
+bool Value::integral() const {
+ switch (getType()) {
case NumberInt:
return true;
case NumberLong:
- return (_storage.longValue <= numeric_limits<int>::max()
- && _storage.longValue >= numeric_limits<int>::min());
+ return (_storage.longValue <= numeric_limits<int>::max() &&
+ _storage.longValue >= numeric_limits<int>::min());
case NumberDouble:
- return (_storage.doubleValue <= numeric_limits<int>::max()
- && _storage.doubleValue >= numeric_limits<int>::min()
- && _storage.doubleValue == static_cast<int>(_storage.doubleValue));
+ return (_storage.doubleValue <= numeric_limits<int>::max() &&
+ _storage.doubleValue >= numeric_limits<int>::min() &&
+ _storage.doubleValue == static_cast<int>(_storage.doubleValue));
default:
return false;
- }
}
+}
- size_t Value::getApproximateSize() const {
- switch(getType()) {
+size_t Value::getApproximateSize() const {
+ switch (getType()) {
case Code:
case RegEx:
case Symbol:
case BinData:
case String:
return sizeof(Value) + (_storage.shortStr
- ? 0 // string stored inline, so no extra mem usage
+ ? 0 // string stored inline, so no extra mem usage
: sizeof(RCString) + _storage.getString().size());
case Object:
@@ -857,15 +879,15 @@ namespace mongo {
size_t size = sizeof(Value);
size += sizeof(RCVector);
const size_t n = getArray().size();
- for(size_t i = 0; i < n; ++i) {
+ for (size_t i = 0; i < n; ++i) {
size += getArray()[i].getApproximateSize();
}
return size;
}
case CodeWScope:
- return sizeof(Value) + sizeof(RCCodeWScope) + _storage.getCodeWScope()->code.size()
- + _storage.getCodeWScope()->scope.objsize();
+ return sizeof(Value) + sizeof(RCCodeWScope) + _storage.getCodeWScope()->code.size() +
+ _storage.getCodeWScope()->scope.objsize();
case DBRef:
return sizeof(Value) + sizeof(RCDBRef) + _storage.getDBRef()->ns.size();
@@ -884,40 +906,57 @@ namespace mongo {
case jstNULL:
case Undefined:
return sizeof(Value);
- }
- verify(false);
}
+ verify(false);
+}
- string Value::toString() const {
- // TODO use StringBuilder when operator << is ready
- stringstream out;
- out << *this;
- return out.str();
- }
+string Value::toString() const {
+ // TODO use StringBuilder when operator << is ready
+ stringstream out;
+ out << *this;
+ return out.str();
+}
- ostream& operator << (ostream& out, const Value& val) {
- switch(val.getType()) {
- case EOO: return out << "MISSING";
- case MinKey: return out << "MinKey";
- case MaxKey: return out << "MaxKey";
- case jstOID: return out << val.getOid();
- case String: return out << '"' << val.getString() << '"';
- case RegEx: return out << '/' << val.getRegex() << '/' << val.getRegexFlags();
- case Symbol: return out << "Symbol(\"" << val.getSymbol() << "\")";
- case Code: return out << "Code(\"" << val.getCode() << "\")";
- case Bool: return out << (val.getBool() ? "true" : "false");
- case NumberDouble: return out << val.getDouble();
- case NumberLong: return out << val.getLong();
- case NumberInt: return out << val.getInt();
- case jstNULL: return out << "null";
- case Undefined: return out << "undefined";
- case Date: return out << tmToISODateString(val.coerceToTm());
- case bsonTimestamp: return out << val.getTimestamp().toString();
- case Object: return out << val.getDocument().toString();
+ostream& operator<<(ostream& out, const Value& val) {
+ switch (val.getType()) {
+ case EOO:
+ return out << "MISSING";
+ case MinKey:
+ return out << "MinKey";
+ case MaxKey:
+ return out << "MaxKey";
+ case jstOID:
+ return out << val.getOid();
+ case String:
+ return out << '"' << val.getString() << '"';
+ case RegEx:
+ return out << '/' << val.getRegex() << '/' << val.getRegexFlags();
+ case Symbol:
+ return out << "Symbol(\"" << val.getSymbol() << "\")";
+ case Code:
+ return out << "Code(\"" << val.getCode() << "\")";
+ case Bool:
+ return out << (val.getBool() ? "true" : "false");
+ case NumberDouble:
+ return out << val.getDouble();
+ case NumberLong:
+ return out << val.getLong();
+ case NumberInt:
+ return out << val.getInt();
+ case jstNULL:
+ return out << "null";
+ case Undefined:
+ return out << "undefined";
+ case Date:
+ return out << tmToISODateString(val.coerceToTm());
+ case bsonTimestamp:
+ return out << val.getTimestamp().toString();
+ case Object:
+ return out << val.getDocument().toString();
case Array: {
out << "[";
const size_t n = val.getArray().size();
- for(size_t i = 0; i < n; i++) {
+ for (size_t i = 0; i < n; i++) {
if (i)
out << ", ";
out << val.getArray()[i];
@@ -928,26 +967,25 @@ namespace mongo {
case CodeWScope:
return out << "CodeWScope(\"" << val._storage.getCodeWScope()->code << "\", "
- << val._storage.getCodeWScope()->scope << ')';
+ << val._storage.getCodeWScope()->scope << ')';
- case BinData:
+ case BinData:
return out << "BinData(" << val._storage.binDataType() << ", \""
- << toHex(val._storage.getString().rawData()
- ,val._storage.getString().size())
- << "\")";
+ << toHex(val._storage.getString().rawData(), val._storage.getString().size())
+ << "\")";
case DBRef:
return out << "DBRef(\"" << val._storage.getDBRef()->ns << "\", "
- << val._storage.getDBRef()->oid << ')';
- }
-
- // Not in default case to trigger better warning if a case is missing
- verify(false);
+ << val._storage.getDBRef()->oid << ')';
}
- void Value::serializeForSorter(BufBuilder& buf) const {
- buf.appendChar(getType());
- switch(getType()) {
+ // Not in default case to trigger better warning if a case is missing
+ verify(false);
+}
+
+void Value::serializeForSorter(BufBuilder& buf) const {
+ buf.appendChar(getType());
+ switch (getType()) {
// type-only types
case EOO:
case MinKey:
@@ -957,13 +995,27 @@ namespace mongo {
break;
// simple types
- case jstOID: buf.appendStruct(_storage.oid); break;
- case NumberInt: buf.appendNum(_storage.intValue); break;
- case NumberLong: buf.appendNum(_storage.longValue); break;
- case NumberDouble: buf.appendNum(_storage.doubleValue); break;
- case Bool: buf.appendChar(_storage.boolValue); break;
- case Date: buf.appendNum(_storage.dateValue); break;
- case bsonTimestamp: buf.appendStruct(getTimestamp()); break;
+ case jstOID:
+ buf.appendStruct(_storage.oid);
+ break;
+ case NumberInt:
+ buf.appendNum(_storage.intValue);
+ break;
+ case NumberLong:
+ buf.appendNum(_storage.longValue);
+ break;
+ case NumberDouble:
+ buf.appendNum(_storage.doubleValue);
+ break;
+ case Bool:
+ buf.appendChar(_storage.boolValue);
+ break;
+ case Date:
+ buf.appendNum(_storage.dateValue);
+ break;
+ case bsonTimestamp:
+ buf.appendStruct(getTimestamp());
+ break;
// types that are like strings
case String:
@@ -1003,7 +1055,7 @@ namespace mongo {
buf.appendStr(cws->code, /*NUL byte*/ false);
cws->scope.serializeForSorter(buf);
break;
- }
+ }
case Array: {
const vector<Value>& array = getArray();
@@ -1013,12 +1065,12 @@ namespace mongo {
array[i].serializeForSorter(buf);
break;
}
- }
}
+}
- Value Value::deserializeForSorter(BufReader& buf, const SorterDeserializeSettings& settings) {
- const BSONType type = BSONType(buf.read<signed char>()); // need sign extension for MinKey
- switch(type) {
+Value Value::deserializeForSorter(BufReader& buf, const SorterDeserializeSettings& settings) {
+ const BSONType type = BSONType(buf.read<signed char>()); // need sign extension for MinKey
+ switch (type) {
// type-only types
case EOO:
case MinKey:
@@ -1028,13 +1080,20 @@ namespace mongo {
return Value(ValueStorage(type));
// simple types
- case jstOID: return Value(OID::from(buf.skip(OID::kOIDSize)));
- case NumberInt: return Value(buf.read<int>());
- case NumberLong: return Value(buf.read<long long>());
- case NumberDouble: return Value(buf.read<double>());
- case Bool: return Value(bool(buf.read<char>()));
- case Date: return Value(Date_t::fromMillisSinceEpoch(buf.read<long long>()));
- case bsonTimestamp: return Value(buf.read<Timestamp>());
+ case jstOID:
+ return Value(OID::from(buf.skip(OID::kOIDSize)));
+ case NumberInt:
+ return Value(buf.read<int>());
+ case NumberLong:
+ return Value(buf.read<long long>());
+ case NumberDouble:
+ return Value(buf.read<double>());
+ case Bool:
+ return Value(bool(buf.read<char>()));
+ case Date:
+ return Value(Date_t::fromMillisSinceEpoch(buf.read<long long>()));
+ case bsonTimestamp:
+ return Value(buf.read<Timestamp>());
// types that are like strings
case String:
@@ -1059,8 +1118,8 @@ namespace mongo {
}
case Object:
- return Value(Document::deserializeForSorter(buf,
- Document::SorterDeserializeSettings()));
+ return Value(
+ Document::deserializeForSorter(buf, Document::SorterDeserializeSettings()));
case DBRef: {
OID oid = OID::from(buf.skip(OID::kOIDSize));
@@ -1073,7 +1132,7 @@ namespace mongo {
const char* str = static_cast<const char*>(buf.skip(size));
BSONObj bson = BSONObj::deserializeForSorter(buf, BSONObj::SorterDeserializeSettings());
return Value(BSONCodeWScope(StringData(str, size), bson));
- }
+ }
case Array: {
const int numElems = buf.read<int>();
@@ -1083,7 +1142,7 @@ namespace mongo {
array.push_back(deserializeForSorter(buf, settings));
return Value(std::move(array));
}
- }
- verify(false);
}
+ verify(false);
+}
}
diff --git a/src/mongo/db/pipeline/value.h b/src/mongo/db/pipeline/value.h
index 41201f64873..0852407a84a 100644
--- a/src/mongo/db/pipeline/value.h
+++ b/src/mongo/db/pipeline/value.h
@@ -33,328 +33,338 @@
#include "mongo/platform/unordered_set.h"
namespace mongo {
- class BSONElement;
+class BSONElement;
- /** A variant type that can hold any type of data representable in BSON
+/** A variant type that can hold any type of data representable in BSON
+ *
+ * Small values are stored inline, but some values, such as large strings,
+ * are heap allocated. It has smart pointer capabilities built-in so it is
+ * safe and recommended to pass these around and return them by value.
+ *
+ * Values are immutable, but can be assigned. This means that once you have
+ * a Value, you can be assured that none of the data in that Value will
+ * change. However if you have a non-const Value you replace it with
+ * operator=. These rules are the same as BSONObj, and similar to
+ * shared_ptr<const Object> with stronger guarantees of constness. This is
+ * also the same as Java's std::string type.
+ *
+ * Thread-safety: A single Value instance can be safely shared between
+ * threads as long as there are no writers while other threads are
+ * accessing the object. Any number of threads can read from a Value
+ * concurrently. There are no restrictions on how threads access Value
+ * instances exclusively owned by them, even if they reference the same
+ * storage as Value in other threads.
+ */
+class Value {
+public:
+ /** Construct a Value
*
- * Small values are stored inline, but some values, such as large strings,
- * are heap allocated. It has smart pointer capabilities built-in so it is
- * safe and recommended to pass these around and return them by value.
+ * All types not listed will be rejected rather than converted (see private for why)
*
- * Values are immutable, but can be assigned. This means that once you have
- * a Value, you can be assured that none of the data in that Value will
- * change. However if you have a non-const Value you replace it with
- * operator=. These rules are the same as BSONObj, and similar to
- * shared_ptr<const Object> with stronger guarantees of constness. This is
- * also the same as Java's std::string type.
+ * Note: Currently these are all explicit conversions.
+ * I'm not sure if we want implicit or not.
+ * //TODO decide
+ */
+
+ Value() : _storage() {} // "Missing" value
+ explicit Value(bool value) : _storage(Bool, value) {}
+ explicit Value(int value) : _storage(NumberInt, value) {}
+ explicit Value(long long value) : _storage(NumberLong, value) {}
+ explicit Value(double value) : _storage(NumberDouble, value) {}
+ explicit Value(const Timestamp& value) : _storage(bsonTimestamp, value) {}
+ explicit Value(const OID& value) : _storage(jstOID, value) {}
+ explicit Value(StringData value) : _storage(String, value) {}
+ explicit Value(const std::string& value) : _storage(String, StringData(value)) {}
+ explicit Value(const char* value) : _storage(String, StringData(value)) {}
+ explicit Value(const Document& doc) : _storage(Object, doc) {}
+ explicit Value(const BSONObj& obj);
+ explicit Value(const BSONArray& arr);
+ explicit Value(std::vector<Value> vec) : _storage(Array, new RCVector(std::move(vec))) {}
+ explicit Value(const BSONBinData& bd) : _storage(BinData, bd) {}
+ explicit Value(const BSONRegEx& re) : _storage(RegEx, re) {}
+ explicit Value(const BSONCodeWScope& cws) : _storage(CodeWScope, cws) {}
+ explicit Value(const BSONDBRef& dbref) : _storage(DBRef, dbref) {}
+ explicit Value(const BSONSymbol& sym) : _storage(Symbol, sym.symbol) {}
+ explicit Value(const BSONCode& code) : _storage(Code, code.code) {}
+ explicit Value(const NullLabeler&) : _storage(jstNULL) {} // BSONNull
+ explicit Value(const UndefinedLabeler&) : _storage(Undefined) {} // BSONUndefined
+ explicit Value(const MinKeyLabeler&) : _storage(MinKey) {} // MINKEY
+ explicit Value(const MaxKeyLabeler&) : _storage(MaxKey) {} // MAXKEY
+ explicit Value(const Date_t& date) : _storage(Date, date.toMillisSinceEpoch()) {}
+
+ // TODO: add an unsafe version that can share storage with the BSONElement
+ /// Deep-convert from BSONElement to Value
+ explicit Value(const BSONElement& elem);
+
+ /** Construct a long or integer-valued Value.
*
- * Thread-safety: A single Value instance can be safely shared between
- * threads as long as there are no writers while other threads are
- * accessing the object. Any number of threads can read from a Value
- * concurrently. There are no restrictions on how threads access Value
- * instances exclusively owned by them, even if they reference the same
- * storage as Value in other threads.
+ * Used when preforming arithmetic operations with int where the
+ * result may be too large and need to be stored as long. The Value
+ * will be an int if value fits, otherwise it will be a long.
+ */
+ static Value createIntOrLong(long long value);
+
+ /** A "missing" value indicates the lack of a Value.
+ * This is similar to undefined/null but should not appear in output to BSON.
+ * Missing Values are returned by Document when accessing non-existent fields.
*/
- class Value {
- public:
- /** Construct a Value
- *
- * All types not listed will be rejected rather than converted (see private for why)
- *
- * Note: Currently these are all explicit conversions.
- * I'm not sure if we want implicit or not.
- * //TODO decide
- */
-
- Value(): _storage() {} // "Missing" value
- explicit Value(bool value) : _storage(Bool, value) {}
- explicit Value(int value) : _storage(NumberInt, value) {}
- explicit Value(long long value) : _storage(NumberLong, value) {}
- explicit Value(double value) : _storage(NumberDouble, value) {}
- explicit Value(const Timestamp& value) : _storage(bsonTimestamp, value) {}
- explicit Value(const OID& value) : _storage(jstOID, value) {}
- explicit Value(StringData value) : _storage(String, value) {}
- explicit Value(const std::string& value) : _storage(String, StringData(value)) {}
- explicit Value(const char* value) : _storage(String, StringData(value)) {}
- explicit Value(const Document& doc) : _storage(Object, doc) {}
- explicit Value(const BSONObj& obj);
- explicit Value(const BSONArray& arr);
- explicit Value(std::vector<Value> vec) : _storage(Array, new RCVector(std::move(vec))) {}
- explicit Value(const BSONBinData& bd) : _storage(BinData, bd) {}
- explicit Value(const BSONRegEx& re) : _storage(RegEx, re) {}
- explicit Value(const BSONCodeWScope& cws) : _storage(CodeWScope, cws) {}
- explicit Value(const BSONDBRef& dbref) : _storage(DBRef, dbref) {}
- explicit Value(const BSONSymbol& sym) : _storage(Symbol, sym.symbol) {}
- explicit Value(const BSONCode& code) : _storage(Code, code.code) {}
- explicit Value(const NullLabeler&) : _storage(jstNULL) {} // BSONNull
- explicit Value(const UndefinedLabeler&) : _storage(Undefined) {} // BSONUndefined
- explicit Value(const MinKeyLabeler&) : _storage(MinKey) {} // MINKEY
- explicit Value(const MaxKeyLabeler&) : _storage(MaxKey) {} // MAXKEY
- explicit Value(const Date_t& date) : _storage(Date, date.toMillisSinceEpoch()) {}
-
- // TODO: add an unsafe version that can share storage with the BSONElement
- /// Deep-convert from BSONElement to Value
- explicit Value(const BSONElement& elem);
-
- /** Construct a long or integer-valued Value.
- *
- * Used when preforming arithmetic operations with int where the
- * result may be too large and need to be stored as long. The Value
- * will be an int if value fits, otherwise it will be a long.
- */
- static Value createIntOrLong(long long value);
-
- /** A "missing" value indicates the lack of a Value.
- * This is similar to undefined/null but should not appear in output to BSON.
- * Missing Values are returned by Document when accessing non-existent fields.
- */
- bool missing() const { return _storage.type == EOO; }
-
- /// true if missing() or type is jstNULL or Undefined
- bool nullish() const {
- return missing()
- || _storage.type == jstNULL
- || _storage.type == Undefined;
- }
+ bool missing() const {
+ return _storage.type == EOO;
+ }
- /// true if type represents a number
- bool numeric() const {
- return _storage.type == NumberDouble
- || _storage.type == NumberLong
- || _storage.type == NumberInt;
- }
+ /// true if missing() or type is jstNULL or Undefined
+ bool nullish() const {
+ return missing() || _storage.type == jstNULL || _storage.type == Undefined;
+ }
- /**
- * Returns true if this value is a numeric type that can be represented as a 32-bit integer,
- * and false otherwise.
- */
- bool integral() const;
-
- /// Get the BSON type of the field.
- BSONType getType() const { return _storage.bsonType(); }
-
- /** Exact type getters.
- * Asserts if the requested value type is not exactly correct.
- * See coerceTo methods below for a more type-flexible alternative.
- */
- double getDouble() const;
- std::string getString() const;
- Document getDocument() const;
- OID getOid() const;
- bool getBool() const;
- long long getDate() const; // in milliseconds
- Timestamp getTimestamp() const;
- const char* getRegex() const;
- const char* getRegexFlags() const;
- std::string getSymbol() const;
- std::string getCode() const;
- int getInt() const;
- long long getLong() const;
- const std::vector<Value>& getArray() const { return _storage.getArray(); }
- size_t getArrayLength() const;
-
- /// Access an element of a subarray. Returns Value() if missing or getType() != Array
- Value operator[] (size_t index) const;
-
- /// Access a field of a subdocument. Returns Value() if missing or getType() != Object
- Value operator[] (StringData name) const;
-
- /// Add this value to the BSON object under construction.
- void addToBsonObj(BSONObjBuilder* pBuilder, StringData fieldName) const;
-
- /// Add this field to the BSON array under construction.
- void addToBsonArray(BSONArrayBuilder* pBuilder) const;
-
- // Support BSONObjBuilder and BSONArrayBuilder "stream" API
- friend BSONObjBuilder& operator << (BSONObjBuilderValueStream& builder, const Value& val);
-
- /** Coerce a value to a bool using BSONElement::trueValue() rules.
- */
- bool coerceToBool() const;
-
- /** Coercion operators to extract values with fuzzy type logic.
- *
- * These currently assert if called on an unconvertible type.
- * TODO: decided how to handle unsupported types.
- */
- std::string coerceToString() const;
- int coerceToInt() const;
- long long coerceToLong() const;
- double coerceToDouble() const;
- Timestamp coerceToTimestamp() const;
- long long coerceToDate() const;
- time_t coerceToTimeT() const;
- tm coerceToTm() const; // broken-out time struct (see man gmtime)
-
-
- /** Compare two Values.
- * @returns an integer less than zero, zero, or an integer greater than
- * zero, depending on whether lhs < rhs, lhs == rhs, or lhs > rhs
- * Warning: may return values other than -1, 0, or 1
- */
- static int compare(const Value& lhs, const Value& rhs);
-
- friend
- bool operator==(const Value& v1, const Value& v2) {
- if (v1._storage.identical(v2._storage)) {
- // Simple case
- return true;
- }
- return (Value::compare(v1, v2) == 0);
- }
-
- friend bool operator!=(const Value& v1, const Value& v2) {
- return !(v1 == v2);
- }
+ /// true if type represents a number
+ bool numeric() const {
+ return _storage.type == NumberDouble || _storage.type == NumberLong ||
+ _storage.type == NumberInt;
+ }
- friend bool operator<(const Value& lhs, const Value& rhs) {
- return (Value::compare(lhs, rhs) < 0);
- }
+ /**
+ * Returns true if this value is a numeric type that can be represented as a 32-bit integer,
+ * and false otherwise.
+ */
+ bool integral() const;
- /// This is for debugging, logging, etc. See getString() for how to extract a string.
- std::string toString() const;
- friend std::ostream& operator << (std::ostream& out, const Value& v);
+ /// Get the BSON type of the field.
+ BSONType getType() const {
+ return _storage.bsonType();
+ }
- void swap(Value& rhs) {
- _storage.swap(rhs._storage);
- }
+ /** Exact type getters.
+ * Asserts if the requested value type is not exactly correct.
+ * See coerceTo methods below for a more type-flexible alternative.
+ */
+ double getDouble() const;
+ std::string getString() const;
+ Document getDocument() const;
+ OID getOid() const;
+ bool getBool() const;
+ long long getDate() const; // in milliseconds
+ Timestamp getTimestamp() const;
+ const char* getRegex() const;
+ const char* getRegexFlags() const;
+ std::string getSymbol() const;
+ std::string getCode() const;
+ int getInt() const;
+ long long getLong() const;
+ const std::vector<Value>& getArray() const {
+ return _storage.getArray();
+ }
+ size_t getArrayLength() const;
- /** Figure out what the widest of two numeric types is.
- *
- * Widest can be thought of as "most capable," or "able to hold the
- * largest or most precise value." The progression is Int, Long, Double.
- */
- static BSONType getWidestNumeric(BSONType lType, BSONType rType);
-
- /// Get the approximate memory size of the value, in bytes. Includes sizeof(Value)
- size_t getApproximateSize() const;
-
- /** Calculate a hash value.
- *
- * Meant to be used to create composite hashes suitable for
- * hashed container classes such as unordered_map<>.
- */
- void hash_combine(size_t& seed) const;
-
- /// struct Hash is defined to enable the use of Values as keys in unordered_map.
- struct Hash : std::unary_function<const Value&, size_t> {
- size_t operator()(const Value& rV) const;
- };
-
- /// Call this after memcpying to update ref counts if needed
- void memcpyed() const { _storage.memcpyed(); }
-
- /// members for Sorter
- struct SorterDeserializeSettings {}; // unused
- void serializeForSorter(BufBuilder& buf) const;
- static Value deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&);
- int memUsageForSorter() const { return getApproximateSize(); }
- Value getOwned() const { return *this; }
-
- private:
- /** This is a "honeypot" to prevent unexpected implicit conversions to the accepted argument
- * types. bool is especially bad since without this it will accept any pointer.
- *
- * Template argument name was chosen to make produced error easier to read.
- */
- template <typename InvalidArgumentType>
- explicit Value(const InvalidArgumentType& invalidArgument);
-
- explicit Value(const ValueStorage& storage) :_storage(storage) {}
-
- // does no type checking
- StringData getStringData() const; // May contain embedded NUL bytes
-
- ValueStorage _storage;
- friend class MutableValue; // gets and sets _storage.genericRCPtr
- };
- BOOST_STATIC_ASSERT(sizeof(Value) == 16);
+ /// Access an element of a subarray. Returns Value() if missing or getType() != Array
+ Value operator[](size_t index) const;
- typedef unordered_set<Value, Value::Hash> ValueSet;
-}
+ /// Access a field of a subdocument. Returns Value() if missing or getType() != Object
+ Value operator[](StringData name) const;
-namespace std {
- // This is used by std::sort and others
- template <>
- inline void swap(mongo::Value& lhs, mongo::Value& rhs) { lhs.swap(rhs); }
-}
+ /// Add this value to the BSON object under construction.
+ void addToBsonObj(BSONObjBuilder* pBuilder, StringData fieldName) const;
-/* ======================= INLINED IMPLEMENTATIONS ========================== */
+ /// Add this field to the BSON array under construction.
+ void addToBsonArray(BSONArrayBuilder* pBuilder) const;
-namespace mongo {
+ // Support BSONObjBuilder and BSONArrayBuilder "stream" API
+ friend BSONObjBuilder& operator<<(BSONObjBuilderValueStream& builder, const Value& val);
- inline size_t Value::getArrayLength() const {
- verify(getType() == Array);
- return getArray().size();
- }
+ /** Coerce a value to a bool using BSONElement::trueValue() rules.
+ */
+ bool coerceToBool() const;
- inline size_t Value::Hash::operator()(const Value& v) const {
- size_t seed = 0xf0afbeef;
- v.hash_combine(seed);
- return seed;
- }
+ /** Coercion operators to extract values with fuzzy type logic.
+ *
+ * These currently assert if called on an unconvertible type.
+ * TODO: decided how to handle unsupported types.
+ */
+ std::string coerceToString() const;
+ int coerceToInt() const;
+ long long coerceToLong() const;
+ double coerceToDouble() const;
+ Timestamp coerceToTimestamp() const;
+ long long coerceToDate() const;
+ time_t coerceToTimeT() const;
+ tm coerceToTm() const; // broken-out time struct (see man gmtime)
+
+
+ /** Compare two Values.
+ * @returns an integer less than zero, zero, or an integer greater than
+ * zero, depending on whether lhs < rhs, lhs == rhs, or lhs > rhs
+ * Warning: may return values other than -1, 0, or 1
+ */
+ static int compare(const Value& lhs, const Value& rhs);
- inline StringData Value::getStringData() const {
- return _storage.getString();
+ friend bool operator==(const Value& v1, const Value& v2) {
+ if (v1._storage.identical(v2._storage)) {
+ // Simple case
+ return true;
+ }
+ return (Value::compare(v1, v2) == 0);
}
- inline std::string Value::getString() const {
- verify(getType() == String);
- return _storage.getString().toString();
+ friend bool operator!=(const Value& v1, const Value& v2) {
+ return !(v1 == v2);
}
- inline OID Value::getOid() const {
- verify(getType() == jstOID);
- return OID(_storage.oid);
+ friend bool operator<(const Value& lhs, const Value& rhs) {
+ return (Value::compare(lhs, rhs) < 0);
}
- inline bool Value::getBool() const {
- verify(getType() == Bool);
- return _storage.boolValue;
- }
+ /// This is for debugging, logging, etc. See getString() for how to extract a string.
+ std::string toString() const;
+ friend std::ostream& operator<<(std::ostream& out, const Value& v);
- inline long long Value::getDate() const {
- verify(getType() == Date);
- return _storage.dateValue;
+ void swap(Value& rhs) {
+ _storage.swap(rhs._storage);
}
- inline Timestamp Value::getTimestamp() const {
- verify(getType() == bsonTimestamp);
- return Timestamp(_storage.timestampValue);
- }
+ /** Figure out what the widest of two numeric types is.
+ *
+ * Widest can be thought of as "most capable," or "able to hold the
+ * largest or most precise value." The progression is Int, Long, Double.
+ */
+ static BSONType getWidestNumeric(BSONType lType, BSONType rType);
- inline const char* Value::getRegex() const {
- verify(getType() == RegEx);
- return _storage.getString().rawData(); // this is known to be NUL terminated
- }
- inline const char* Value::getRegexFlags() const {
- verify(getType() == RegEx);
- const char* pattern = _storage.getString().rawData(); // this is known to be NUL terminated
- const char* flags = pattern + strlen(pattern) + 1; // first byte after pattern's NUL
- dassert(flags + strlen(flags) == pattern + _storage.getString().size());
- return flags;
+ /// Get the approximate memory size of the value, in bytes. Includes sizeof(Value)
+ size_t getApproximateSize() const;
+
+ /** Calculate a hash value.
+ *
+ * Meant to be used to create composite hashes suitable for
+ * hashed container classes such as unordered_map<>.
+ */
+ void hash_combine(size_t& seed) const;
+
+ /// struct Hash is defined to enable the use of Values as keys in unordered_map.
+ struct Hash : std::unary_function<const Value&, size_t> {
+ size_t operator()(const Value& rV) const;
+ };
+
+ /// Call this after memcpying to update ref counts if needed
+ void memcpyed() const {
+ _storage.memcpyed();
}
- inline std::string Value::getSymbol() const {
- verify(getType() == Symbol);
- return _storage.getString().toString();
+ /// members for Sorter
+ struct SorterDeserializeSettings {}; // unused
+ void serializeForSorter(BufBuilder& buf) const;
+ static Value deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&);
+ int memUsageForSorter() const {
+ return getApproximateSize();
}
- inline std::string Value::getCode() const {
- verify(getType() == Code);
- return _storage.getString().toString();
+ Value getOwned() const {
+ return *this;
}
- inline int Value::getInt() const {
- verify(getType() == NumberInt);
- return _storage.intValue;
- }
+private:
+ /** This is a "honeypot" to prevent unexpected implicit conversions to the accepted argument
+ * types. bool is especially bad since without this it will accept any pointer.
+ *
+ * Template argument name was chosen to make produced error easier to read.
+ */
+ template <typename InvalidArgumentType>
+ explicit Value(const InvalidArgumentType& invalidArgument);
- inline long long Value::getLong() const {
- BSONType type = getType();
- if (type == NumberInt)
- return _storage.intValue;
+ explicit Value(const ValueStorage& storage) : _storage(storage) {}
- verify(type == NumberLong);
- return _storage.longValue;
- }
+ // does no type checking
+ StringData getStringData() const; // May contain embedded NUL bytes
+
+ ValueStorage _storage;
+ friend class MutableValue; // gets and sets _storage.genericRCPtr
+};
+BOOST_STATIC_ASSERT(sizeof(Value) == 16);
+
+typedef unordered_set<Value, Value::Hash> ValueSet;
+}
+
+namespace std {
+// This is used by std::sort and others
+template <>
+inline void swap(mongo::Value& lhs, mongo::Value& rhs) {
+ lhs.swap(rhs);
+}
+}
+
+/* ======================= INLINED IMPLEMENTATIONS ========================== */
+
+namespace mongo {
+
+inline size_t Value::getArrayLength() const {
+ verify(getType() == Array);
+ return getArray().size();
+}
+
+inline size_t Value::Hash::operator()(const Value& v) const {
+ size_t seed = 0xf0afbeef;
+ v.hash_combine(seed);
+ return seed;
+}
+
+inline StringData Value::getStringData() const {
+ return _storage.getString();
+}
+
+inline std::string Value::getString() const {
+ verify(getType() == String);
+ return _storage.getString().toString();
+}
+
+inline OID Value::getOid() const {
+ verify(getType() == jstOID);
+ return OID(_storage.oid);
+}
+
+inline bool Value::getBool() const {
+ verify(getType() == Bool);
+ return _storage.boolValue;
+}
+
+inline long long Value::getDate() const {
+ verify(getType() == Date);
+ return _storage.dateValue;
+}
+
+inline Timestamp Value::getTimestamp() const {
+ verify(getType() == bsonTimestamp);
+ return Timestamp(_storage.timestampValue);
+}
+
+inline const char* Value::getRegex() const {
+ verify(getType() == RegEx);
+ return _storage.getString().rawData(); // this is known to be NUL terminated
+}
+inline const char* Value::getRegexFlags() const {
+ verify(getType() == RegEx);
+ const char* pattern = _storage.getString().rawData(); // this is known to be NUL terminated
+ const char* flags = pattern + strlen(pattern) + 1; // first byte after pattern's NUL
+ dassert(flags + strlen(flags) == pattern + _storage.getString().size());
+ return flags;
+}
+
+inline std::string Value::getSymbol() const {
+ verify(getType() == Symbol);
+ return _storage.getString().toString();
+}
+inline std::string Value::getCode() const {
+ verify(getType() == Code);
+ return _storage.getString().toString();
+}
+
+inline int Value::getInt() const {
+ verify(getType() == NumberInt);
+ return _storage.intValue;
+}
+
+inline long long Value::getLong() const {
+ BSONType type = getType();
+ if (type == NumberInt)
+ return _storage.intValue;
+
+ verify(type == NumberLong);
+ return _storage.longValue;
+}
};
diff --git a/src/mongo/db/pipeline/value_internal.h b/src/mongo/db/pipeline/value_internal.h
index 6cb31c8f635..b5b9f5c77ea 100644
--- a/src/mongo/db/pipeline/value_internal.h
+++ b/src/mongo/db/pipeline/value_internal.h
@@ -41,226 +41,275 @@
namespace mongo {
- class Document;
- class DocumentStorage;
- class Value;
-
- //TODO: a MutableVector, similar to MutableDocument
- /// A heap-allocated reference-counted std::vector
- class RCVector : public RefCountable {
- public:
- RCVector() {}
- RCVector(std::vector<Value> v) :vec(std::move(v)) {}
- std::vector<Value> vec;
- };
-
- class RCCodeWScope : public RefCountable {
- public:
- RCCodeWScope(const std::string& str, BSONObj obj) :code(str), scope(obj.getOwned()) {}
- const std::string code;
- const BSONObj scope; // Not worth converting to Document for now
- };
-
- class RCDBRef : public RefCountable {
- public:
- RCDBRef(const std::string& str, const OID& o) :ns(str), oid(o) {}
- const std::string ns;
- const OID oid;
- };
+class Document;
+class DocumentStorage;
+class Value;
+
+// TODO: a MutableVector, similar to MutableDocument
+/// A heap-allocated reference-counted std::vector
+class RCVector : public RefCountable {
+public:
+ RCVector() {}
+ RCVector(std::vector<Value> v) : vec(std::move(v)) {}
+ std::vector<Value> vec;
+};
+
+class RCCodeWScope : public RefCountable {
+public:
+ RCCodeWScope(const std::string& str, BSONObj obj) : code(str), scope(obj.getOwned()) {}
+ const std::string code;
+ const BSONObj scope; // Not worth converting to Document for now
+};
+
+class RCDBRef : public RefCountable {
+public:
+ RCDBRef(const std::string& str, const OID& o) : ns(str), oid(o) {}
+ const std::string ns;
+ const OID oid;
+};
#pragma pack(1)
- class ValueStorage {
- public:
- // Note: it is important the memory is zeroed out (by calling zero()) at the start of every
- // constructor. Much code relies on every byte being predictably initialized to zero.
-
- // This is a "missing" Value
- ValueStorage() { zero(); type = EOO; }
-
- explicit ValueStorage(BSONType t) { zero(); type = t; }
- ValueStorage(BSONType t, int i) { zero(); type = t; intValue = i; }
- ValueStorage(BSONType t, long long l) { zero(); type = t; longValue = l; }
- ValueStorage(BSONType t, double d) { zero(); type = t; doubleValue = d; }
- ValueStorage(BSONType t, Timestamp r) { zero(); type = t; timestampValue = r.asULL(); }
- ValueStorage(BSONType t, bool b) { zero(); type = t; boolValue = b; }
- ValueStorage(BSONType t, const Document& d) { zero(); type = t; putDocument(d); }
- ValueStorage(BSONType t, const RCVector* a) { zero(); type = t; putVector(a); }
- ValueStorage(BSONType t, StringData s) { zero(); type = t; putString(s); }
- ValueStorage(BSONType t, const BSONBinData& bd) { zero(); type = t; putBinData(bd); }
- ValueStorage(BSONType t, const BSONRegEx& re) { zero(); type = t; putRegEx(re); }
- ValueStorage(BSONType t, const BSONCodeWScope& cs) { zero(); type = t; putCodeWScope(cs); }
- ValueStorage(BSONType t, const BSONDBRef& dbref) { zero(); type = t; putDBRef(dbref); }
-
- ValueStorage(BSONType t, const OID& o) {
- zero();
- type = t;
- memcpy(&oid, o.view().view(), OID::kOIDSize);
- }
-
- ValueStorage(const ValueStorage& rhs) {
- memcpy(this, &rhs, sizeof(*this));
- memcpyed();
- }
-
- ~ValueStorage() {
- DEV verifyRefCountingIfShould();
- if (refCounter)
- intrusive_ptr_release(genericRCPtr);
- DEV memset(this, 0xee, sizeof(*this));
- }
-
- ValueStorage& operator= (ValueStorage rhsCopy) {
- this->swap(rhsCopy);
- return *this;
- }
-
- void swap(ValueStorage& rhs) {
- // Don't need to update ref-counts because they will be the same in the end
- char temp[sizeof(ValueStorage)];
- memcpy(temp, this, sizeof(*this));
- memcpy(this, &rhs, sizeof(*this));
- memcpy(&rhs, temp, sizeof(*this));
+class ValueStorage {
+public:
+ // Note: it is important the memory is zeroed out (by calling zero()) at the start of every
+ // constructor. Much code relies on every byte being predictably initialized to zero.
+
+ // This is a "missing" Value
+ ValueStorage() {
+ zero();
+ type = EOO;
+ }
+
+ explicit ValueStorage(BSONType t) {
+ zero();
+ type = t;
+ }
+ ValueStorage(BSONType t, int i) {
+ zero();
+ type = t;
+ intValue = i;
+ }
+ ValueStorage(BSONType t, long long l) {
+ zero();
+ type = t;
+ longValue = l;
+ }
+ ValueStorage(BSONType t, double d) {
+ zero();
+ type = t;
+ doubleValue = d;
+ }
+ ValueStorage(BSONType t, Timestamp r) {
+ zero();
+ type = t;
+ timestampValue = r.asULL();
+ }
+ ValueStorage(BSONType t, bool b) {
+ zero();
+ type = t;
+ boolValue = b;
+ }
+ ValueStorage(BSONType t, const Document& d) {
+ zero();
+ type = t;
+ putDocument(d);
+ }
+ ValueStorage(BSONType t, const RCVector* a) {
+ zero();
+ type = t;
+ putVector(a);
+ }
+ ValueStorage(BSONType t, StringData s) {
+ zero();
+ type = t;
+ putString(s);
+ }
+ ValueStorage(BSONType t, const BSONBinData& bd) {
+ zero();
+ type = t;
+ putBinData(bd);
+ }
+ ValueStorage(BSONType t, const BSONRegEx& re) {
+ zero();
+ type = t;
+ putRegEx(re);
+ }
+ ValueStorage(BSONType t, const BSONCodeWScope& cs) {
+ zero();
+ type = t;
+ putCodeWScope(cs);
+ }
+ ValueStorage(BSONType t, const BSONDBRef& dbref) {
+ zero();
+ type = t;
+ putDBRef(dbref);
+ }
+
+ ValueStorage(BSONType t, const OID& o) {
+ zero();
+ type = t;
+ memcpy(&oid, o.view().view(), OID::kOIDSize);
+ }
+
+ ValueStorage(const ValueStorage& rhs) {
+ memcpy(this, &rhs, sizeof(*this));
+ memcpyed();
+ }
+
+ ~ValueStorage() {
+ DEV verifyRefCountingIfShould();
+ if (refCounter)
+ intrusive_ptr_release(genericRCPtr);
+ DEV memset(this, 0xee, sizeof(*this));
+ }
+
+ ValueStorage& operator=(ValueStorage rhsCopy) {
+ this->swap(rhsCopy);
+ return *this;
+ }
+
+ void swap(ValueStorage& rhs) {
+ // Don't need to update ref-counts because they will be the same in the end
+ char temp[sizeof(ValueStorage)];
+ memcpy(temp, this, sizeof(*this));
+ memcpy(this, &rhs, sizeof(*this));
+ memcpy(&rhs, temp, sizeof(*this));
+ }
+
+ /// Call this after memcpying to update ref counts if needed
+ void memcpyed() const {
+ DEV verifyRefCountingIfShould();
+ if (refCounter)
+ intrusive_ptr_add_ref(genericRCPtr);
+ }
+
+ /// These are only to be called during Value construction on an empty Value
+ void putString(StringData s);
+ void putVector(const RCVector* v);
+ void putDocument(const Document& d);
+ void putRegEx(const BSONRegEx& re);
+ void putBinData(const BSONBinData& bd) {
+ putRefCountable(RCString::create(StringData(static_cast<const char*>(bd.data), bd.length)));
+ binSubType = bd.type;
+ }
+
+ void putDBRef(const BSONDBRef& dbref) {
+ putRefCountable(new RCDBRef(dbref.ns.toString(), dbref.oid));
+ }
+
+ void putCodeWScope(const BSONCodeWScope& cws) {
+ putRefCountable(new RCCodeWScope(cws.code.toString(), cws.scope));
+ }
+
+ void putRefCountable(boost::intrusive_ptr<const RefCountable> ptr) {
+ genericRCPtr = ptr.get();
+
+ if (genericRCPtr) {
+ intrusive_ptr_add_ref(genericRCPtr);
+ refCounter = true;
}
-
- /// Call this after memcpying to update ref counts if needed
- void memcpyed() const {
- DEV verifyRefCountingIfShould();
- if (refCounter)
- intrusive_ptr_add_ref(genericRCPtr);
- }
-
- /// These are only to be called during Value construction on an empty Value
- void putString(StringData s);
- void putVector(const RCVector* v);
- void putDocument(const Document& d);
- void putRegEx(const BSONRegEx& re);
- void putBinData(const BSONBinData& bd) {
- putRefCountable(
- RCString::create(
- StringData(static_cast<const char*>(bd.data), bd.length)));
- binSubType = bd.type;
- }
-
- void putDBRef(const BSONDBRef& dbref) {
- putRefCountable(new RCDBRef(dbref.ns.toString(), dbref.oid));
- }
-
- void putCodeWScope(const BSONCodeWScope& cws) {
- putRefCountable(new RCCodeWScope(cws.code.toString(), cws.scope));
+ DEV verifyRefCountingIfShould();
+ }
+
+ StringData getString() const {
+ if (shortStr) {
+ return StringData(shortStrStorage, shortStrSize);
+ } else {
+ dassert(typeid(*genericRCPtr) == typeid(const RCString));
+ const RCString* stringPtr = static_cast<const RCString*>(genericRCPtr);
+ return StringData(stringPtr->c_str(), stringPtr->size());
}
-
- void putRefCountable(boost::intrusive_ptr<const RefCountable> ptr) {
- genericRCPtr = ptr.get();
-
- if (genericRCPtr) {
- intrusive_ptr_add_ref(genericRCPtr);
- refCounter = true;
- }
- DEV verifyRefCountingIfShould();
- }
-
- StringData getString() const {
- if (shortStr) {
- return StringData(shortStrStorage, shortStrSize);
- }
- else {
- dassert(typeid(*genericRCPtr) == typeid(const RCString));
- const RCString* stringPtr = static_cast<const RCString*>(genericRCPtr);
- return StringData(stringPtr->c_str(), stringPtr->size());
- }
- }
-
- const std::vector<Value>& getArray() const {
- dassert(typeid(*genericRCPtr) == typeid(const RCVector));
- const RCVector* arrayPtr = static_cast<const RCVector*>(genericRCPtr);
- return arrayPtr->vec;
- }
-
- boost::intrusive_ptr<const RCCodeWScope> getCodeWScope() const {
- dassert(typeid(*genericRCPtr) == typeid(const RCCodeWScope));
- return static_cast<const RCCodeWScope*>(genericRCPtr);
- }
-
- boost::intrusive_ptr<const RCDBRef> getDBRef() const {
- dassert(typeid(*genericRCPtr) == typeid(const RCDBRef));
- return static_cast<const RCDBRef*>(genericRCPtr);
- }
-
- // Document is incomplete here so this can't be inline
- Document getDocument() const;
-
- BSONType bsonType() const {
- return BSONType(type);
- }
-
- BinDataType binDataType() const {
- dassert(type == BinData);
- return BinDataType(binSubType);
- }
-
- void zero() {
- memset(this, 0, sizeof(*this));
- }
-
- // Byte-for-byte identical
- bool identical(const ValueStorage& other) const {
- return (i64[0] == other.i64[0]
- && i64[1] == other.i64[1]);
- }
-
- void verifyRefCountingIfShould() const;
-
- // This data is public because this should only be used by Value which would be a friend
- union {
+ }
+
+ const std::vector<Value>& getArray() const {
+ dassert(typeid(*genericRCPtr) == typeid(const RCVector));
+ const RCVector* arrayPtr = static_cast<const RCVector*>(genericRCPtr);
+ return arrayPtr->vec;
+ }
+
+ boost::intrusive_ptr<const RCCodeWScope> getCodeWScope() const {
+ dassert(typeid(*genericRCPtr) == typeid(const RCCodeWScope));
+ return static_cast<const RCCodeWScope*>(genericRCPtr);
+ }
+
+ boost::intrusive_ptr<const RCDBRef> getDBRef() const {
+ dassert(typeid(*genericRCPtr) == typeid(const RCDBRef));
+ return static_cast<const RCDBRef*>(genericRCPtr);
+ }
+
+ // Document is incomplete here so this can't be inline
+ Document getDocument() const;
+
+ BSONType bsonType() const {
+ return BSONType(type);
+ }
+
+ BinDataType binDataType() const {
+ dassert(type == BinData);
+ return BinDataType(binSubType);
+ }
+
+ void zero() {
+ memset(this, 0, sizeof(*this));
+ }
+
+ // Byte-for-byte identical
+ bool identical(const ValueStorage& other) const {
+ return (i64[0] == other.i64[0] && i64[1] == other.i64[1]);
+ }
+
+ void verifyRefCountingIfShould() const;
+
+ // This data is public because this should only be used by Value which would be a friend
+ union {
+ struct {
+ // byte 1
+ signed char type;
+
+ // byte 2
struct {
- // byte 1
- signed char type;
+ bool refCounter : 1; // true if we need to refCount
+ bool shortStr : 1; // true if we are using short strings
+ // reservedFlags: 6;
+ };
+
+ // bytes 3-16;
+ union {
+ unsigned char oid[12];
- // byte 2
struct {
- bool refCounter : 1; // true if we need to refCount
- bool shortStr : 1; // true if we are using short strings
- // reservedFlags: 6;
+ char shortStrSize; // TODO Consider moving into flags union (4 bits)
+ char shortStrStorage[16 /*total bytes*/ - 3 /*offset*/ - 1 /*NUL byte*/];
+ union {
+ char nulTerminator;
+ };
};
- // bytes 3-16;
- union {
- unsigned char oid[12];
-
- struct {
- char shortStrSize; // TODO Consider moving into flags union (4 bits)
- char shortStrStorage[16/*total bytes*/ - 3/*offset*/ - 1/*NUL byte*/];
- union {
- char nulTerminator;
- };
+ struct {
+ union {
+ unsigned char binSubType;
+ char pad[6];
+ char stringCache[6]; // TODO copy first few bytes of strings in here
};
-
- struct {
- union {
- unsigned char binSubType;
- char pad[6];
- char stringCache[6]; // TODO copy first few bytes of strings in here
- };
- union { // 8 bytes long and 8-byte aligned
- // There should be no pointers to non-const data
- const RefCountable* genericRCPtr;
-
- double doubleValue;
- bool boolValue;
- int intValue;
- long long longValue;
- unsigned long long timestampValue;
- long long dateValue;
- };
+ union { // 8 bytes long and 8-byte aligned
+ // There should be no pointers to non-const data
+ const RefCountable* genericRCPtr;
+
+ double doubleValue;
+ bool boolValue;
+ int intValue;
+ long long longValue;
+ unsigned long long timestampValue;
+ long long dateValue;
};
};
};
-
- // covers the whole ValueStorage
- long long i64[2];
};
+
+ // covers the whole ValueStorage
+ long long i64[2];
};
- BOOST_STATIC_ASSERT(sizeof(ValueStorage) == 16);
+};
+BOOST_STATIC_ASSERT(sizeof(ValueStorage) == 16);
#pragma pack()
-
}
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index fd306d8d5e2..77a44c4b834 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -48,69 +48,63 @@
namespace mongo {
- using std::endl;
- using std::string;
+using std::endl;
+using std::string;
namespace repl {
namespace {
- // todo / idea: the prefetcher, when it fetches _id, on an upsert, will see if the record exists. if it does not,
- // at write time, we can just do an insert, which will be faster.
-
- //The count (of batches) and time spent fetching pages before application
- // -- meaning depends on the prefetch behavior: all, _id index, none, etc.)
- TimerStats prefetchIndexStats;
- ServerStatusMetricField<TimerStats> displayPrefetchIndexPages("repl.preload.indexes",
- &prefetchIndexStats );
- TimerStats prefetchDocStats;
- ServerStatusMetricField<TimerStats> displayPrefetchDocPages("repl.preload.docs",
- &prefetchDocStats );
-
- // page in pages needed for all index lookups on a given object
- void prefetchIndexPages(OperationContext* txn,
- Collection* collection,
- const BackgroundSync::IndexPrefetchConfig& prefetchConfig,
- const BSONObj& obj) {
-
- // do we want prefetchConfig to be (1) as-is, (2) for update ops only, or (3) configured per op type?
- // One might want PREFETCH_NONE for updates, but it's more rare that it is a bad idea for inserts.
- // #3 (per op), a big issue would be "too many knobs".
- switch (prefetchConfig) {
+// todo / idea: the prefetcher, when it fetches _id, on an upsert, will see if the record exists. if it does not,
+// at write time, we can just do an insert, which will be faster.
+
+// The count (of batches) and time spent fetching pages before application
+// -- meaning depends on the prefetch behavior: all, _id index, none, etc.)
+TimerStats prefetchIndexStats;
+ServerStatusMetricField<TimerStats> displayPrefetchIndexPages("repl.preload.indexes",
+ &prefetchIndexStats);
+TimerStats prefetchDocStats;
+ServerStatusMetricField<TimerStats> displayPrefetchDocPages("repl.preload.docs", &prefetchDocStats);
+
+// page in pages needed for all index lookups on a given object
+void prefetchIndexPages(OperationContext* txn,
+ Collection* collection,
+ const BackgroundSync::IndexPrefetchConfig& prefetchConfig,
+ const BSONObj& obj) {
+ // do we want prefetchConfig to be (1) as-is, (2) for update ops only, or (3) configured per op type?
+ // One might want PREFETCH_NONE for updates, but it's more rare that it is a bad idea for inserts.
+ // #3 (per op), a big issue would be "too many knobs".
+ switch (prefetchConfig) {
case BackgroundSync::PREFETCH_NONE:
return;
- case BackgroundSync::PREFETCH_ID_ONLY:
- {
- TimerHolder timer( &prefetchIndexStats);
+ case BackgroundSync::PREFETCH_ID_ONLY: {
+ TimerHolder timer(&prefetchIndexStats);
// on the update op case, the call to prefetchRecordPages will touch the _id index.
// thus perhaps this option isn't very useful?
try {
IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(txn);
- if ( !desc )
+ if (!desc)
return;
- IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex( desc );
- invariant( iam );
+ IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex(desc);
+ invariant(iam);
iam->touch(txn, obj);
- }
- catch (const DBException& e) {
+ } catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchIndexPages(): " << e.what() << endl;
}
break;
}
- case BackgroundSync::PREFETCH_ALL:
- {
+ case BackgroundSync::PREFETCH_ALL: {
// indexCount includes all indexes, including ones
// in the process of being built
IndexCatalog::IndexIterator ii =
- collection->getIndexCatalog()->getIndexIterator( txn, true );
- while ( ii.more() ) {
- TimerHolder timer( &prefetchIndexStats);
+ collection->getIndexCatalog()->getIndexIterator(txn, true);
+ while (ii.more()) {
+ TimerHolder timer(&prefetchIndexStats);
// This will page in all index pages for the given object.
try {
IndexDescriptor* desc = ii.next();
- IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex( desc );
- verify( iam );
+ IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex(desc);
+ verify(iam);
iam->touch(txn, obj);
- }
- catch (const DBException& e) {
+ } catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchIndexPages(): " << e.what() << endl;
}
}
@@ -118,126 +112,115 @@ namespace {
}
default:
fassertFailed(16427);
- }
}
-
- // page in the data pages for a record associated with an object
- void prefetchRecordPages(OperationContext* txn,
- Database* db,
- const char* ns,
- const BSONObj& obj) {
-
- BSONElement _id;
- if( obj.getObjectID(_id) ) {
- TimerHolder timer(&prefetchDocStats);
- BSONObjBuilder builder;
- builder.append(_id);
- BSONObj result;
- try {
- if (Helpers::findById(txn, db, ns, builder.done(), result)) {
- // do we want to use Record::touch() here? it's pretty similar.
- volatile char _dummy_char = '\0';
-
- // Touch the first word on every page in order to fault it into memory
- for (int i = 0; i < result.objsize(); i += g_minOSPageSizeBytes) {
- _dummy_char += *(result.objdata() + i);
- }
- // hit the last page, in case we missed it above
- _dummy_char += *(result.objdata() + result.objsize() - 1);
+}
+
+// page in the data pages for a record associated with an object
+void prefetchRecordPages(OperationContext* txn, Database* db, const char* ns, const BSONObj& obj) {
+ BSONElement _id;
+ if (obj.getObjectID(_id)) {
+ TimerHolder timer(&prefetchDocStats);
+ BSONObjBuilder builder;
+ builder.append(_id);
+ BSONObj result;
+ try {
+ if (Helpers::findById(txn, db, ns, builder.done(), result)) {
+ // do we want to use Record::touch() here? it's pretty similar.
+ volatile char _dummy_char = '\0';
+
+ // Touch the first word on every page in order to fault it into memory
+ for (int i = 0; i < result.objsize(); i += g_minOSPageSizeBytes) {
+ _dummy_char += *(result.objdata() + i);
}
+ // hit the last page, in case we missed it above
+ _dummy_char += *(result.objdata() + result.objsize() - 1);
}
- catch(const DBException& e) {
- LOG(2) << "ignoring exception in prefetchRecordPages(): " << e.what() << endl;
- }
+ } catch (const DBException& e) {
+ LOG(2) << "ignoring exception in prefetchRecordPages(): " << e.what() << endl;
}
}
-} // namespace
-
- // prefetch for an oplog operation
- void prefetchPagesForReplicatedOp(OperationContext* txn,
- Database* db,
- const BSONObj& op) {
- invariant(db);
- const BackgroundSync::IndexPrefetchConfig prefetchConfig =
- BackgroundSync::get()->getIndexPrefetchConfig();
- const char *opField;
- const char *opType = op.getStringField("op");
- switch (*opType) {
- case 'i': // insert
- case 'd': // delete
+}
+} // namespace
+
+// prefetch for an oplog operation
+void prefetchPagesForReplicatedOp(OperationContext* txn, Database* db, const BSONObj& op) {
+ invariant(db);
+ const BackgroundSync::IndexPrefetchConfig prefetchConfig =
+ BackgroundSync::get()->getIndexPrefetchConfig();
+ const char* opField;
+ const char* opType = op.getStringField("op");
+ switch (*opType) {
+ case 'i': // insert
+ case 'd': // delete
opField = "o";
break;
- case 'u': // update
+ case 'u': // update
opField = "o2";
break;
default:
// prefetch ignores other ops
return;
- }
-
- BSONObj obj = op.getObjectField(opField);
- const char *ns = op.getStringField("ns");
-
- // This will have to change for engines other than MMAP V1, because they might not have
- // means for directly prefetching pages from the collection. For this purpose, acquire S
- // lock on the database, instead of optimizing with IS.
- Lock::CollectionLock collLock(txn->lockState(), ns, MODE_S);
+ }
- Collection* collection = db->getCollection( ns );
- if (!collection) {
- return;
- }
+ BSONObj obj = op.getObjectField(opField);
+ const char* ns = op.getStringField("ns");
- LOG(4) << "index prefetch for op " << *opType << endl;
+ // This will have to change for engines other than MMAP V1, because they might not have
+ // means for directly prefetching pages from the collection. For this purpose, acquire S
+ // lock on the database, instead of optimizing with IS.
+ Lock::CollectionLock collLock(txn->lockState(), ns, MODE_S);
- // should we prefetch index pages on updates? if the update is in-place and doesn't change
- // indexed values, it is actually slower - a lot slower if there are a dozen indexes or
- // lots of multikeys. possible variations (not all mutually exclusive):
- // 1) current behavior: full prefetch
- // 2) don't do it for updates
- // 3) don't do multikey indexes for updates
- // 4) don't prefetchIndexPages on some heuristic; e.g., if it's an $inc.
- // 5) if not prefetching index pages (#2), we should do it if we are upsertings and it
- // will be an insert. to do that we could do the prefetchRecordPage first and if DNE
- // then we do #1.
- //
- // note that on deletes 'obj' does not have all the keys we would want to prefetch on.
- // a way to achieve that would be to prefetch the record first, and then afterwards do
- // this part.
- //
- prefetchIndexPages(txn, collection, prefetchConfig, obj);
+ Collection* collection = db->getCollection(ns);
+ if (!collection) {
+ return;
+ }
- // do not prefetch the data for inserts; it doesn't exist yet
- //
- // we should consider doing the record prefetch for the delete op case as we hit the record
- // when we delete. note if done we only want to touch the first page.
- //
- // update: do record prefetch.
- if ((*opType == 'u') &&
- // do not prefetch the data for capped collections because
- // they typically do not have an _id index for findById() to use.
- !collection->isCapped()) {
- prefetchRecordPages(txn, db, ns, obj);
- }
+ LOG(4) << "index prefetch for op " << *opType << endl;
+
+ // should we prefetch index pages on updates? if the update is in-place and doesn't change
+ // indexed values, it is actually slower - a lot slower if there are a dozen indexes or
+ // lots of multikeys. possible variations (not all mutually exclusive):
+ // 1) current behavior: full prefetch
+ // 2) don't do it for updates
+ // 3) don't do multikey indexes for updates
+ // 4) don't prefetchIndexPages on some heuristic; e.g., if it's an $inc.
+ // 5) if not prefetching index pages (#2), we should do it if we are upsertings and it
+ // will be an insert. to do that we could do the prefetchRecordPage first and if DNE
+ // then we do #1.
+ //
+ // note that on deletes 'obj' does not have all the keys we would want to prefetch on.
+ // a way to achieve that would be to prefetch the record first, and then afterwards do
+ // this part.
+ //
+ prefetchIndexPages(txn, collection, prefetchConfig, obj);
+
+ // do not prefetch the data for inserts; it doesn't exist yet
+ //
+ // we should consider doing the record prefetch for the delete op case as we hit the record
+ // when we delete. note if done we only want to touch the first page.
+ //
+ // update: do record prefetch.
+ if ((*opType == 'u') &&
+ // do not prefetch the data for capped collections because
+ // they typically do not have an _id index for findById() to use.
+ !collection->isCapped()) {
+ prefetchRecordPages(txn, db, ns, obj);
}
+}
- class ReplIndexPrefetch : public ServerParameter {
- public:
- ReplIndexPrefetch()
- : ServerParameter( ServerParameterSet::getGlobal(), "replIndexPrefetch" ) {
- }
+class ReplIndexPrefetch : public ServerParameter {
+public:
+ ReplIndexPrefetch() : ServerParameter(ServerParameterSet::getGlobal(), "replIndexPrefetch") {}
- virtual ~ReplIndexPrefetch() {
- }
+ virtual ~ReplIndexPrefetch() {}
- const char * _value() {
- if (getGlobalReplicationCoordinator()->getReplicationMode() !=
- ReplicationCoordinator::modeReplSet) {
- return "uninitialized";
- }
- BackgroundSync::IndexPrefetchConfig ip =
- BackgroundSync::get()->getIndexPrefetchConfig();
- switch (ip) {
+ const char* _value() {
+ if (getGlobalReplicationCoordinator()->getReplicationMode() !=
+ ReplicationCoordinator::modeReplSet) {
+ return "uninitialized";
+ }
+ BackgroundSync::IndexPrefetchConfig ip = BackgroundSync::get()->getIndexPrefetchConfig();
+ switch (ip) {
case BackgroundSync::PREFETCH_NONE:
return "none";
case BackgroundSync::PREFETCH_ID_ONLY:
@@ -246,44 +229,44 @@ namespace {
return "all";
default:
return "invalid";
- }
- }
-
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const string& name) {
- b.append( name, _value() );
}
+ }
- virtual Status set( const BSONElement& newValueElement ) {
- if (getGlobalReplicationCoordinator()->getReplicationMode() !=
- ReplicationCoordinator::modeReplSet) {
- return Status( ErrorCodes::BadValue, "replication is not enabled" );
- }
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const string& name) {
+ b.append(name, _value());
+ }
- std::string prefetch = newValueElement.valuestrsafe();
- return setFromString( prefetch );
+ virtual Status set(const BSONElement& newValueElement) {
+ if (getGlobalReplicationCoordinator()->getReplicationMode() !=
+ ReplicationCoordinator::modeReplSet) {
+ return Status(ErrorCodes::BadValue, "replication is not enabled");
}
- virtual Status setFromString( const string& prefetch ) {
- log() << "changing replication index prefetch behavior to " << prefetch << endl;
+ std::string prefetch = newValueElement.valuestrsafe();
+ return setFromString(prefetch);
+ }
- BackgroundSync::IndexPrefetchConfig prefetchConfig;
+ virtual Status setFromString(const string& prefetch) {
+ log() << "changing replication index prefetch behavior to " << prefetch << endl;
- if (prefetch == "none")
- prefetchConfig = BackgroundSync::PREFETCH_NONE;
- else if (prefetch == "_id_only")
- prefetchConfig = BackgroundSync::PREFETCH_ID_ONLY;
- else if (prefetch == "all")
- prefetchConfig = BackgroundSync::PREFETCH_ALL;
- else {
- return Status( ErrorCodes::BadValue,
- str::stream() << "unrecognized indexPrefetch setting: " << prefetch );
- }
+ BackgroundSync::IndexPrefetchConfig prefetchConfig;
- BackgroundSync::get()->setIndexPrefetchConfig(prefetchConfig);
- return Status::OK();
+ if (prefetch == "none")
+ prefetchConfig = BackgroundSync::PREFETCH_NONE;
+ else if (prefetch == "_id_only")
+ prefetchConfig = BackgroundSync::PREFETCH_ID_ONLY;
+ else if (prefetch == "all")
+ prefetchConfig = BackgroundSync::PREFETCH_ALL;
+ else {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "unrecognized indexPrefetch setting: " << prefetch);
}
- } replIndexPrefetch;
+ BackgroundSync::get()->setIndexPrefetchConfig(prefetchConfig);
+ return Status::OK();
+ }
+
+} replIndexPrefetch;
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/prefetch.h b/src/mongo/db/prefetch.h
index 97a826092e6..a37c010f22c 100644
--- a/src/mongo/db/prefetch.h
+++ b/src/mongo/db/prefetch.h
@@ -28,14 +28,12 @@
#pragma once
namespace mongo {
- class BSONObj;
- class Database;
- class OperationContext;
+class BSONObj;
+class Database;
+class OperationContext;
namespace repl {
- // page in possible index and/or data pages for an op from the oplog
- void prefetchPagesForReplicatedOp(OperationContext* txn,
- Database* db,
- const BSONObj& op);
-} // namespace repl
-} // namespace mongo
+// page in possible index and/or data pages for an op from the oplog
+void prefetchPagesForReplicatedOp(OperationContext* txn, Database* db, const BSONObj& op);
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 67f753e0591..ac6ba1627d1 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -39,635 +39,615 @@
namespace mongo {
namespace {
- /**
- * Comparator for MatchExpression nodes. Returns an integer less than, equal to, or greater
- * than zero if 'lhs' is less than, equal to, or greater than 'rhs', respectively.
- *
- * Sorts by:
- * 1) operator type (MatchExpression::MatchType)
- * 2) path name (MatchExpression::path())
- * 3) sort order of children
- * 4) number of children (MatchExpression::numChildren())
- *
- * The third item is needed to ensure that match expression trees which should have the same
- * cache key always sort the same way. If you're wondering when the tuple (operator type, path
- * name) could ever be equal, consider this query:
- *
- * {$and:[{$or:[{a:1},{a:2}]},{$or:[{a:1},{b:2}]}]}
- *
- * The two OR nodes would compare as equal in this case were it not for tuple item #3 (sort
- * order of children).
- */
- int matchExpressionComparator(const MatchExpression* lhs, const MatchExpression* rhs) {
- MatchExpression::MatchType lhsMatchType = lhs->matchType();
- MatchExpression::MatchType rhsMatchType = rhs->matchType();
- if (lhsMatchType != rhsMatchType) {
- return lhsMatchType < rhsMatchType ? -1 : 1;
- }
-
- StringData lhsPath = lhs->path();
- StringData rhsPath = rhs->path();
- int pathsCompare = lhsPath.compare(rhsPath);
- if (pathsCompare != 0) {
- return pathsCompare;
- }
+/**
+ * Comparator for MatchExpression nodes. Returns an integer less than, equal to, or greater
+ * than zero if 'lhs' is less than, equal to, or greater than 'rhs', respectively.
+ *
+ * Sorts by:
+ * 1) operator type (MatchExpression::MatchType)
+ * 2) path name (MatchExpression::path())
+ * 3) sort order of children
+ * 4) number of children (MatchExpression::numChildren())
+ *
+ * The third item is needed to ensure that match expression trees which should have the same
+ * cache key always sort the same way. If you're wondering when the tuple (operator type, path
+ * name) could ever be equal, consider this query:
+ *
+ * {$and:[{$or:[{a:1},{a:2}]},{$or:[{a:1},{b:2}]}]}
+ *
+ * The two OR nodes would compare as equal in this case were it not for tuple item #3 (sort
+ * order of children).
+ */
+int matchExpressionComparator(const MatchExpression* lhs, const MatchExpression* rhs) {
+ MatchExpression::MatchType lhsMatchType = lhs->matchType();
+ MatchExpression::MatchType rhsMatchType = rhs->matchType();
+ if (lhsMatchType != rhsMatchType) {
+ return lhsMatchType < rhsMatchType ? -1 : 1;
+ }
- const size_t numChildren = std::min(lhs->numChildren(), rhs->numChildren());
- for (size_t childIdx = 0; childIdx < numChildren; ++childIdx) {
- int childCompare = matchExpressionComparator(lhs->getChild(childIdx),
- rhs->getChild(childIdx));
- if (childCompare != 0) {
- return childCompare;
- }
- }
+ StringData lhsPath = lhs->path();
+ StringData rhsPath = rhs->path();
+ int pathsCompare = lhsPath.compare(rhsPath);
+ if (pathsCompare != 0) {
+ return pathsCompare;
+ }
- if (lhs->numChildren() != rhs->numChildren()) {
- return lhs->numChildren() < rhs->numChildren() ? -1 : 1;
+ const size_t numChildren = std::min(lhs->numChildren(), rhs->numChildren());
+ for (size_t childIdx = 0; childIdx < numChildren; ++childIdx) {
+ int childCompare =
+ matchExpressionComparator(lhs->getChild(childIdx), rhs->getChild(childIdx));
+ if (childCompare != 0) {
+ return childCompare;
}
-
- // They're equal!
- return 0;
}
- bool matchExpressionLessThan(const MatchExpression* lhs, const MatchExpression* rhs) {
- return matchExpressionComparator(lhs, rhs) < 0;
+ if (lhs->numChildren() != rhs->numChildren()) {
+ return lhs->numChildren() < rhs->numChildren() ? -1 : 1;
}
-} // namespace
+ // They're equal!
+ return 0;
+}
- //
- // These all punt to the many-argumented canonicalize below.
- //
+bool matchExpressionLessThan(const MatchExpression* lhs, const MatchExpression* rhs) {
+ return matchExpressionComparator(lhs, rhs) < 0;
+}
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- const BSONObj emptyObj;
- return CanonicalQuery::canonicalize(
- ns, query, emptyObj, emptyObj, 0, 0, out, whereCallback);
- }
-
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- bool explain,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- const BSONObj emptyObj;
- return CanonicalQuery::canonicalize(ns,
- query,
- emptyObj, // sort
- emptyObj, // projection
- 0, // skip
- 0, // limit
- emptyObj, // hint
- emptyObj, // min
- emptyObj, // max
- false, // snapshot
- explain,
- out,
- whereCallback);
- }
-
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- long long skip,
- long long limit,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- const BSONObj emptyObj;
- return CanonicalQuery::canonicalize(ns,
- query,
- emptyObj,
- emptyObj,
- skip,
- limit,
- out,
- whereCallback);
- }
-
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- return CanonicalQuery::canonicalize(ns, query, sort, proj, 0, 0, out, whereCallback);
- }
-
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- const BSONObj emptyObj;
- return CanonicalQuery::canonicalize(
- ns, query, sort, proj, skip, limit, emptyObj, out, whereCallback);
- }
-
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- const BSONObj emptyObj;
- return CanonicalQuery::canonicalize(ns, query, sort, proj, skip, limit, hint,
- emptyObj, emptyObj,
- false, // snapshot
- false, // explain
- out,
- whereCallback);
+} // namespace
+
+//
+// These all punt to the many-argumented canonicalize below.
+//
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ const BSONObj emptyObj;
+ return CanonicalQuery::canonicalize(ns, query, emptyObj, emptyObj, 0, 0, out, whereCallback);
+}
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ bool explain,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ const BSONObj emptyObj;
+ return CanonicalQuery::canonicalize(ns,
+ query,
+ emptyObj, // sort
+ emptyObj, // projection
+ 0, // skip
+ 0, // limit
+ emptyObj, // hint
+ emptyObj, // min
+ emptyObj, // max
+ false, // snapshot
+ explain,
+ out,
+ whereCallback);
+}
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ long long skip,
+ long long limit,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ const BSONObj emptyObj;
+ return CanonicalQuery::canonicalize(
+ ns, query, emptyObj, emptyObj, skip, limit, out, whereCallback);
+}
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ return CanonicalQuery::canonicalize(ns, query, sort, proj, 0, 0, out, whereCallback);
+}
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ const BSONObj emptyObj;
+ return CanonicalQuery::canonicalize(
+ ns, query, sort, proj, skip, limit, emptyObj, out, whereCallback);
+}
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ const BSONObj emptyObj;
+ return CanonicalQuery::canonicalize(ns,
+ query,
+ sort,
+ proj,
+ skip,
+ limit,
+ hint,
+ emptyObj,
+ emptyObj,
+ false, // snapshot
+ false, // explain
+ out,
+ whereCallback);
+}
+
+//
+// These actually call init() on the CQ.
+//
+
+// static
+Status CanonicalQuery::canonicalize(const QueryMessage& qm,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ // Make LiteParsedQuery.
+ auto lpqStatus = LiteParsedQuery::fromLegacyQueryMessage(qm);
+ if (!lpqStatus.isOK()) {
+ return lpqStatus.getStatus();
}
- //
- // These actually call init() on the CQ.
- //
+ return CanonicalQuery::canonicalize(lpqStatus.getValue().release(), out, whereCallback);
+}
- // static
- Status CanonicalQuery::canonicalize(const QueryMessage& qm,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- // Make LiteParsedQuery.
- auto lpqStatus = LiteParsedQuery::fromLegacyQueryMessage(qm);
- if (!lpqStatus.isOK()) {
- return lpqStatus.getStatus();
- }
+// static
+Status CanonicalQuery::canonicalize(LiteParsedQuery* lpq,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ std::unique_ptr<LiteParsedQuery> autoLpq(lpq);
- return CanonicalQuery::canonicalize(lpqStatus.getValue().release(), out, whereCallback);
+ // Make MatchExpression.
+ StatusWithMatchExpression swme =
+ MatchExpressionParser::parse(autoLpq->getFilter(), whereCallback);
+ if (!swme.isOK()) {
+ return swme.getStatus();
}
- // static
- Status CanonicalQuery::canonicalize(LiteParsedQuery* lpq,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- std::unique_ptr<LiteParsedQuery> autoLpq(lpq);
-
- // Make MatchExpression.
- StatusWithMatchExpression swme = MatchExpressionParser::parse(autoLpq->getFilter(),
- whereCallback);
- if (!swme.isOK()) {
- return swme.getStatus();
- }
+ // Make the CQ we'll hopefully return.
+ std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
- // Make the CQ we'll hopefully return.
- std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
-
- // Takes ownership of lpq and the MatchExpression* in swme.
- Status initStatus = cq->init(autoLpq.release(), whereCallback, swme.getValue());
-
- if (!initStatus.isOK()) { return initStatus; }
- *out = cq.release();
- return Status::OK();
- }
-
- // static
- Status CanonicalQuery::canonicalize(const CanonicalQuery& baseQuery,
- MatchExpression* root,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
-
- // Pass empty sort and projection.
- BSONObj emptyObj;
-
- // 0, 0, 0 is 'ntoskip', 'ntoreturn', and 'queryoptions'
- // false, false is 'snapshot' and 'explain'
- auto lpqStatus = LiteParsedQuery::makeAsOpQuery(baseQuery.ns(),
- 0,
- 0,
- 0,
- baseQuery.getParsed().getFilter(),
- baseQuery.getParsed().getProj(),
- baseQuery.getParsed().getSort(),
- emptyObj,
- emptyObj,
- emptyObj,
- false,
- false);
- if (!lpqStatus.isOK()) {
- return lpqStatus.getStatus();
- }
+ // Takes ownership of lpq and the MatchExpression* in swme.
+ Status initStatus = cq->init(autoLpq.release(), whereCallback, swme.getValue());
- // Make the CQ we'll hopefully return.
- std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
- Status initStatus = cq->init(lpqStatus.getValue().release(), whereCallback, root->shallowClone());
-
- if (!initStatus.isOK()) { return initStatus; }
- *out = cq.release();
- return Status::OK();
- }
-
- // static
- Status CanonicalQuery::canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot,
- bool explain,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
-
- // Pass empty sort and projection.
- BSONObj emptyObj;
-
- auto lpqStatus = LiteParsedQuery::makeAsOpQuery(ns,
- skip,
- limit,
- 0,
- query,
- proj,
- sort,
- hint,
- minObj,
- maxObj,
- snapshot,
- explain);
- if (!lpqStatus.isOK()) {
- return lpqStatus.getStatus();
- }
+ if (!initStatus.isOK()) {
+ return initStatus;
+ }
+ *out = cq.release();
+ return Status::OK();
+}
+
+// static
+Status CanonicalQuery::canonicalize(const CanonicalQuery& baseQuery,
+ MatchExpression* root,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ // Pass empty sort and projection.
+ BSONObj emptyObj;
+
+ // 0, 0, 0 is 'ntoskip', 'ntoreturn', and 'queryoptions'
+ // false, false is 'snapshot' and 'explain'
+ auto lpqStatus = LiteParsedQuery::makeAsOpQuery(baseQuery.ns(),
+ 0,
+ 0,
+ 0,
+ baseQuery.getParsed().getFilter(),
+ baseQuery.getParsed().getProj(),
+ baseQuery.getParsed().getSort(),
+ emptyObj,
+ emptyObj,
+ emptyObj,
+ false,
+ false);
+ if (!lpqStatus.isOK()) {
+ return lpqStatus.getStatus();
+ }
- auto& lpq = lpqStatus.getValue();
+ // Make the CQ we'll hopefully return.
+ std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
+ Status initStatus =
+ cq->init(lpqStatus.getValue().release(), whereCallback, root->shallowClone());
- // Build a parse tree from the BSONObj in the parsed query.
- StatusWithMatchExpression swme =
- MatchExpressionParser::parse(lpq->getFilter(), whereCallback);
- if (!swme.isOK()) {
- return swme.getStatus();
- }
+ if (!initStatus.isOK()) {
+ return initStatus;
+ }
+ *out = cq.release();
+ return Status::OK();
+}
+
+// static
+Status CanonicalQuery::canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot,
+ bool explain,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ // Pass empty sort and projection.
+ BSONObj emptyObj;
+
+ auto lpqStatus = LiteParsedQuery::makeAsOpQuery(
+ ns, skip, limit, 0, query, proj, sort, hint, minObj, maxObj, snapshot, explain);
+ if (!lpqStatus.isOK()) {
+ return lpqStatus.getStatus();
+ }
- // Make the CQ we'll hopefully return.
- std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
- // Takes ownership of lpq and the MatchExpression* in swme.
- Status initStatus = cq->init(lpq.release(), whereCallback, swme.getValue());
+ auto& lpq = lpqStatus.getValue();
- if (!initStatus.isOK()) { return initStatus; }
- *out = cq.release();
- return Status::OK();
+ // Build a parse tree from the BSONObj in the parsed query.
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(lpq->getFilter(), whereCallback);
+ if (!swme.isOK()) {
+ return swme.getStatus();
}
- Status CanonicalQuery::init(LiteParsedQuery* lpq,
- const MatchExpressionParser::WhereCallback& whereCallback,
- MatchExpression* root) {
- _pq.reset(lpq);
+ // Make the CQ we'll hopefully return.
+ std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
+ // Takes ownership of lpq and the MatchExpression* in swme.
+ Status initStatus = cq->init(lpq.release(), whereCallback, swme.getValue());
- // Normalize, sort and validate tree.
- root = normalizeTree(root);
+ if (!initStatus.isOK()) {
+ return initStatus;
+ }
+ *out = cq.release();
+ return Status::OK();
+}
+
+Status CanonicalQuery::init(LiteParsedQuery* lpq,
+ const MatchExpressionParser::WhereCallback& whereCallback,
+ MatchExpression* root) {
+ _pq.reset(lpq);
+
+ // Normalize, sort and validate tree.
+ root = normalizeTree(root);
+
+ sortTree(root);
+ _root.reset(root);
+ Status validStatus = isValid(root, *_pq);
+ if (!validStatus.isOK()) {
+ return validStatus;
+ }
- sortTree(root);
- _root.reset(root);
- Status validStatus = isValid(root, *_pq);
- if (!validStatus.isOK()) {
- return validStatus;
+ // Validate the projection if there is one.
+ if (!_pq->getProj().isEmpty()) {
+ ParsedProjection* pp;
+ Status projStatus = ParsedProjection::make(_pq->getProj(), _root.get(), &pp, whereCallback);
+ if (!projStatus.isOK()) {
+ return projStatus;
}
+ _proj.reset(pp);
+ }
- // Validate the projection if there is one.
- if (!_pq->getProj().isEmpty()) {
- ParsedProjection* pp;
- Status projStatus =
- ParsedProjection::make(_pq->getProj(), _root.get(), &pp, whereCallback);
- if (!projStatus.isOK()) {
- return projStatus;
- }
- _proj.reset(pp);
- }
+ return Status::OK();
+}
- return Status::OK();
- }
+// static
+bool CanonicalQuery::isSimpleIdQuery(const BSONObj& query) {
+ bool hasID = false;
- // static
- bool CanonicalQuery::isSimpleIdQuery(const BSONObj& query) {
- bool hasID = false;
+ BSONObjIterator it(query);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ if (str::equals("_id", elt.fieldName())) {
+ // Verify that the query on _id is a simple equality.
+ hasID = true;
- BSONObjIterator it(query);
- while (it.more()) {
- BSONElement elt = it.next();
- if (str::equals("_id", elt.fieldName() ) ) {
- // Verify that the query on _id is a simple equality.
- hasID = true;
-
- if (elt.type() == Object) {
- // If the value is an object, it can't have a query operator
- // (must be a literal object match).
- if (elt.Obj().firstElementFieldName()[0] == '$') {
- return false;
- }
- }
- else if (!elt.isSimpleType() && BinData != elt.type()) {
- // The _id fild cannot be something like { _id : { $gt : ...
- // But it can be BinData.
+ if (elt.type() == Object) {
+ // If the value is an object, it can't have a query operator
+ // (must be a literal object match).
+ if (elt.Obj().firstElementFieldName()[0] == '$') {
return false;
}
- }
- else if (elt.fieldName()[0] == '$' &&
- (str::equals("$isolated", elt.fieldName())||
- str::equals("$atomic", elt.fieldName()))) {
- // ok, passthrough
- }
- else {
- // If the field is not _id, it must be $isolated/$atomic.
+ } else if (!elt.isSimpleType() && BinData != elt.type()) {
+ // The _id fild cannot be something like { _id : { $gt : ...
+ // But it can be BinData.
return false;
}
+ } else if (elt.fieldName()[0] == '$' && (str::equals("$isolated", elt.fieldName()) ||
+ str::equals("$atomic", elt.fieldName()))) {
+ // ok, passthrough
+ } else {
+ // If the field is not _id, it must be $isolated/$atomic.
+ return false;
}
-
- return hasID;
}
- // static
- MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) {
- // root->isLogical() is true now. We care about AND, OR, and NOT. NOR currently scares us.
- if (MatchExpression::AND == root->matchType() || MatchExpression::OR == root->matchType()) {
- // We could have AND of AND of AND. Make sure we clean up our children before merging
- // them.
- // UNITTEST 11738048
- for (size_t i = 0; i < root->getChildVector()->size(); ++i) {
- (*root->getChildVector())[i] = normalizeTree(root->getChild(i));
- }
-
- // If any of our children are of the same logical operator that we are, we remove the
- // child's children and append them to ourselves after we examine all children.
- std::vector<MatchExpression*> absorbedChildren;
-
- for (size_t i = 0; i < root->numChildren();) {
- MatchExpression* child = root->getChild(i);
- if (child->matchType() == root->matchType()) {
- // AND of an AND or OR of an OR. Absorb child's children into ourself.
- for (size_t j = 0; j < child->numChildren(); ++j) {
- absorbedChildren.push_back(child->getChild(j));
- }
- // TODO(opt): this is possibly n^2-ish
- root->getChildVector()->erase(root->getChildVector()->begin() + i);
- child->getChildVector()->clear();
- // Note that this only works because we cleared the child's children
- delete child;
- // Don't increment 'i' as the current child 'i' used to be child 'i+1'
- }
- else {
- ++i;
+ return hasID;
+}
+
+// static
+MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) {
+ // root->isLogical() is true now. We care about AND, OR, and NOT. NOR currently scares us.
+ if (MatchExpression::AND == root->matchType() || MatchExpression::OR == root->matchType()) {
+ // We could have AND of AND of AND. Make sure we clean up our children before merging
+ // them.
+ // UNITTEST 11738048
+ for (size_t i = 0; i < root->getChildVector()->size(); ++i) {
+ (*root->getChildVector())[i] = normalizeTree(root->getChild(i));
+ }
+
+ // If any of our children are of the same logical operator that we are, we remove the
+ // child's children and append them to ourselves after we examine all children.
+ std::vector<MatchExpression*> absorbedChildren;
+
+ for (size_t i = 0; i < root->numChildren();) {
+ MatchExpression* child = root->getChild(i);
+ if (child->matchType() == root->matchType()) {
+ // AND of an AND or OR of an OR. Absorb child's children into ourself.
+ for (size_t j = 0; j < child->numChildren(); ++j) {
+ absorbedChildren.push_back(child->getChild(j));
}
- }
-
- root->getChildVector()->insert(root->getChildVector()->end(),
- absorbedChildren.begin(),
- absorbedChildren.end());
-
- // AND of 1 thing is the thing, OR of 1 thing is the thing.
- if (1 == root->numChildren()) {
- MatchExpression* ret = root->getChild(0);
- root->getChildVector()->clear();
- delete root;
- return ret;
- }
- }
- else if (MatchExpression::NOT == root->matchType()) {
- // Normalize the rest of the tree hanging off this NOT node.
- NotMatchExpression* nme = static_cast<NotMatchExpression*>(root);
- MatchExpression* child = nme->releaseChild();
- // normalizeTree(...) takes ownership of 'child', and then
- // transfers ownership of its return value to 'nme'.
- nme->resetChild(normalizeTree(child));
- }
- else if (MatchExpression::ELEM_MATCH_VALUE == root->matchType()) {
- // Just normalize our children.
- for (size_t i = 0; i < root->getChildVector()->size(); ++i) {
- (*root->getChildVector())[i] = normalizeTree(root->getChild(i));
+ // TODO(opt): this is possibly n^2-ish
+ root->getChildVector()->erase(root->getChildVector()->begin() + i);
+ child->getChildVector()->clear();
+ // Note that this only works because we cleared the child's children
+ delete child;
+ // Don't increment 'i' as the current child 'i' used to be child 'i+1'
+ } else {
+ ++i;
}
}
- return root;
+ root->getChildVector()->insert(
+ root->getChildVector()->end(), absorbedChildren.begin(), absorbedChildren.end());
+
+ // AND of 1 thing is the thing, OR of 1 thing is the thing.
+ if (1 == root->numChildren()) {
+ MatchExpression* ret = root->getChild(0);
+ root->getChildVector()->clear();
+ delete root;
+ return ret;
+ }
+ } else if (MatchExpression::NOT == root->matchType()) {
+ // Normalize the rest of the tree hanging off this NOT node.
+ NotMatchExpression* nme = static_cast<NotMatchExpression*>(root);
+ MatchExpression* child = nme->releaseChild();
+ // normalizeTree(...) takes ownership of 'child', and then
+ // transfers ownership of its return value to 'nme'.
+ nme->resetChild(normalizeTree(child));
+ } else if (MatchExpression::ELEM_MATCH_VALUE == root->matchType()) {
+ // Just normalize our children.
+ for (size_t i = 0; i < root->getChildVector()->size(); ++i) {
+ (*root->getChildVector())[i] = normalizeTree(root->getChild(i));
+ }
}
- // static
- void CanonicalQuery::sortTree(MatchExpression* tree) {
- for (size_t i = 0; i < tree->numChildren(); ++i) {
- sortTree(tree->getChild(i));
- }
- std::vector<MatchExpression*>* children = tree->getChildVector();
- if (NULL != children) {
- std::sort(children->begin(), children->end(), matchExpressionLessThan);
- }
+ return root;
+}
+
+// static
+void CanonicalQuery::sortTree(MatchExpression* tree) {
+ for (size_t i = 0; i < tree->numChildren(); ++i) {
+ sortTree(tree->getChild(i));
+ }
+ std::vector<MatchExpression*>* children = tree->getChildVector();
+ if (NULL != children) {
+ std::sort(children->begin(), children->end(), matchExpressionLessThan);
}
+}
- // static
- size_t CanonicalQuery::countNodes(const MatchExpression* root,
- MatchExpression::MatchType type) {
- size_t sum = 0;
- if (type == root->matchType()) {
- sum = 1;
- }
- for (size_t i = 0; i < root->numChildren(); ++i) {
- sum += countNodes(root->getChild(i), type);
- }
- return sum;
+// static
+size_t CanonicalQuery::countNodes(const MatchExpression* root, MatchExpression::MatchType type) {
+ size_t sum = 0;
+ if (type == root->matchType()) {
+ sum = 1;
}
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ sum += countNodes(root->getChild(i), type);
+ }
+ return sum;
+}
- /**
- * Does 'root' have a subtree of type 'subtreeType' with a node of type 'childType' inside?
- */
- bool hasNodeInSubtree(MatchExpression* root, MatchExpression::MatchType childType,
- MatchExpression::MatchType subtreeType) {
- if (subtreeType == root->matchType()) {
- return QueryPlannerCommon::hasNode(root, childType);
- }
- for (size_t i = 0; i < root->numChildren(); ++i) {
- if (hasNodeInSubtree(root->getChild(i), childType, subtreeType)) {
- return true;
- }
+/**
+ * Does 'root' have a subtree of type 'subtreeType' with a node of type 'childType' inside?
+ */
+bool hasNodeInSubtree(MatchExpression* root,
+ MatchExpression::MatchType childType,
+ MatchExpression::MatchType subtreeType) {
+ if (subtreeType == root->matchType()) {
+ return QueryPlannerCommon::hasNode(root, childType);
+ }
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ if (hasNodeInSubtree(root->getChild(i), childType, subtreeType)) {
+ return true;
}
- return false;
}
+ return false;
+}
- // static
- Status CanonicalQuery::isValid(MatchExpression* root, const LiteParsedQuery& parsed) {
- // Analysis below should be done after squashing the tree to make it clearer.
+// static
+Status CanonicalQuery::isValid(MatchExpression* root, const LiteParsedQuery& parsed) {
+ // Analysis below should be done after squashing the tree to make it clearer.
- // There can only be one TEXT. If there is a TEXT, it cannot appear inside a NOR.
- //
- // Note that the query grammar (as enforced by the MatchExpression parser) forbids TEXT
- // inside of value-expression clauses like NOT, so we don't check those here.
- size_t numText = countNodes(root, MatchExpression::TEXT);
- if (numText > 1) {
- return Status(ErrorCodes::BadValue, "Too many text expressions");
- }
- else if (1 == numText) {
- if (hasNodeInSubtree(root, MatchExpression::TEXT, MatchExpression::NOR)) {
- return Status(ErrorCodes::BadValue, "text expression not allowed in nor");
- }
+ // There can only be one TEXT. If there is a TEXT, it cannot appear inside a NOR.
+ //
+ // Note that the query grammar (as enforced by the MatchExpression parser) forbids TEXT
+ // inside of value-expression clauses like NOT, so we don't check those here.
+ size_t numText = countNodes(root, MatchExpression::TEXT);
+ if (numText > 1) {
+ return Status(ErrorCodes::BadValue, "Too many text expressions");
+ } else if (1 == numText) {
+ if (hasNodeInSubtree(root, MatchExpression::TEXT, MatchExpression::NOR)) {
+ return Status(ErrorCodes::BadValue, "text expression not allowed in nor");
}
+ }
- // There can only be one NEAR. If there is a NEAR, it must be either the root or the root
- // must be an AND and its child must be a NEAR.
- size_t numGeoNear = countNodes(root, MatchExpression::GEO_NEAR);
- if (numGeoNear > 1) {
- return Status(ErrorCodes::BadValue, "Too many geoNear expressions");
- }
- else if (1 == numGeoNear) {
- bool topLevel = false;
- if (MatchExpression::GEO_NEAR == root->matchType()) {
- topLevel = true;
- }
- else if (MatchExpression::AND == root->matchType()) {
- for (size_t i = 0; i < root->numChildren(); ++i) {
- if (MatchExpression::GEO_NEAR == root->getChild(i)->matchType()) {
- topLevel = true;
- break;
- }
+ // There can only be one NEAR. If there is a NEAR, it must be either the root or the root
+ // must be an AND and its child must be a NEAR.
+ size_t numGeoNear = countNodes(root, MatchExpression::GEO_NEAR);
+ if (numGeoNear > 1) {
+ return Status(ErrorCodes::BadValue, "Too many geoNear expressions");
+ } else if (1 == numGeoNear) {
+ bool topLevel = false;
+ if (MatchExpression::GEO_NEAR == root->matchType()) {
+ topLevel = true;
+ } else if (MatchExpression::AND == root->matchType()) {
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ if (MatchExpression::GEO_NEAR == root->getChild(i)->matchType()) {
+ topLevel = true;
+ break;
}
}
- if (!topLevel) {
- return Status(ErrorCodes::BadValue, "geoNear must be top-level expr");
- }
}
-
- // NEAR cannot have a $natural sort or $natural hint.
- if (numGeoNear > 0) {
- BSONObj sortObj = parsed.getSort();
- if (!sortObj["$natural"].eoo()) {
- return Status(ErrorCodes::BadValue,
- "geoNear expression not allowed with $natural sort order");
- }
-
- BSONObj hintObj = parsed.getHint();
- if (!hintObj["$natural"].eoo()) {
- return Status(ErrorCodes::BadValue,
- "geoNear expression not allowed with $natural hint");
- }
+ if (!topLevel) {
+ return Status(ErrorCodes::BadValue, "geoNear must be top-level expr");
}
+ }
- // TEXT and NEAR cannot both be in the query.
- if (numText > 0 && numGeoNear > 0) {
- return Status(ErrorCodes::BadValue, "text and geoNear not allowed in same query");
+ // NEAR cannot have a $natural sort or $natural hint.
+ if (numGeoNear > 0) {
+ BSONObj sortObj = parsed.getSort();
+ if (!sortObj["$natural"].eoo()) {
+ return Status(ErrorCodes::BadValue,
+ "geoNear expression not allowed with $natural sort order");
}
- // TEXT and {$natural: ...} sort order cannot both be in the query.
- if (numText > 0) {
- const BSONObj& sortObj = parsed.getSort();
- BSONObjIterator it(sortObj);
- while (it.more()) {
- BSONElement elt = it.next();
- if (str::equals("$natural", elt.fieldName())) {
- return Status(ErrorCodes::BadValue,
- "text expression not allowed with $natural sort order");
- }
- }
+ BSONObj hintObj = parsed.getHint();
+ if (!hintObj["$natural"].eoo()) {
+ return Status(ErrorCodes::BadValue,
+ "geoNear expression not allowed with $natural hint");
}
+ }
- // TEXT and hint cannot both be in the query.
- if (numText > 0 && !parsed.getHint().isEmpty()) {
- return Status(ErrorCodes::BadValue, "text and hint not allowed in same query");
- }
+ // TEXT and NEAR cannot both be in the query.
+ if (numText > 0 && numGeoNear > 0) {
+ return Status(ErrorCodes::BadValue, "text and geoNear not allowed in same query");
+ }
- // TEXT and snapshot cannot both be in the query.
- if (numText > 0 && parsed.isSnapshot()) {
- return Status(ErrorCodes::BadValue, "text and snapshot not allowed in same query");
+ // TEXT and {$natural: ...} sort order cannot both be in the query.
+ if (numText > 0) {
+ const BSONObj& sortObj = parsed.getSort();
+ BSONObjIterator it(sortObj);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ if (str::equals("$natural", elt.fieldName())) {
+ return Status(ErrorCodes::BadValue,
+ "text expression not allowed with $natural sort order");
+ }
}
+ }
- return Status::OK();
+ // TEXT and hint cannot both be in the query.
+ if (numText > 0 && !parsed.getHint().isEmpty()) {
+ return Status(ErrorCodes::BadValue, "text and hint not allowed in same query");
}
- // static
- // XXX TODO: This does not belong here at all.
- MatchExpression* CanonicalQuery::logicalRewrite(MatchExpression* tree) {
- // Only thing we do is pull an OR up at the root.
- if (MatchExpression::AND != tree->matchType()) {
- return tree;
- }
+ // TEXT and snapshot cannot both be in the query.
+ if (numText > 0 && parsed.isSnapshot()) {
+ return Status(ErrorCodes::BadValue, "text and snapshot not allowed in same query");
+ }
- // We want to bail out ASAP if we have nothing to do here.
- size_t numOrs = 0;
- for (size_t i = 0; i < tree->numChildren(); ++i) {
- if (MatchExpression::OR == tree->getChild(i)->matchType()) {
- ++numOrs;
- }
- }
+ return Status::OK();
+}
- // Only do this for one OR right now.
- if (1 != numOrs) {
- return tree;
- }
+// static
+// XXX TODO: This does not belong here at all.
+MatchExpression* CanonicalQuery::logicalRewrite(MatchExpression* tree) {
+ // Only thing we do is pull an OR up at the root.
+ if (MatchExpression::AND != tree->matchType()) {
+ return tree;
+ }
- // Detach the OR from the root.
- invariant(NULL != tree->getChildVector());
- std::vector<MatchExpression*>& rootChildren = *tree->getChildVector();
- MatchExpression* orChild = NULL;
- for (size_t i = 0; i < rootChildren.size(); ++i) {
- if (MatchExpression::OR == rootChildren[i]->matchType()) {
- orChild = rootChildren[i];
- rootChildren.erase(rootChildren.begin() + i);
- break;
- }
+ // We want to bail out ASAP if we have nothing to do here.
+ size_t numOrs = 0;
+ for (size_t i = 0; i < tree->numChildren(); ++i) {
+ if (MatchExpression::OR == tree->getChild(i)->matchType()) {
+ ++numOrs;
}
+ }
- // AND the existing root with each or child.
- invariant(NULL != orChild);
- invariant(NULL != orChild->getChildVector());
- std::vector<MatchExpression*>& orChildren = *orChild->getChildVector();
- for (size_t i = 0; i < orChildren.size(); ++i) {
- AndMatchExpression* ama = new AndMatchExpression();
- ama->add(orChildren[i]);
- ama->add(tree->shallowClone());
- orChildren[i] = ama;
- }
- delete tree;
+ // Only do this for one OR right now.
+ if (1 != numOrs) {
+ return tree;
+ }
- // Clean up any consequences from this tomfoolery.
- return normalizeTree(orChild);
+ // Detach the OR from the root.
+ invariant(NULL != tree->getChildVector());
+ std::vector<MatchExpression*>& rootChildren = *tree->getChildVector();
+ MatchExpression* orChild = NULL;
+ for (size_t i = 0; i < rootChildren.size(); ++i) {
+ if (MatchExpression::OR == rootChildren[i]->matchType()) {
+ orChild = rootChildren[i];
+ rootChildren.erase(rootChildren.begin() + i);
+ break;
+ }
}
- std::string CanonicalQuery::toString() const {
- str::stream ss;
- ss << "ns=" << _pq->ns();
+ // AND the existing root with each or child.
+ invariant(NULL != orChild);
+ invariant(NULL != orChild->getChildVector());
+ std::vector<MatchExpression*>& orChildren = *orChild->getChildVector();
+ for (size_t i = 0; i < orChildren.size(); ++i) {
+ AndMatchExpression* ama = new AndMatchExpression();
+ ama->add(orChildren[i]);
+ ama->add(tree->shallowClone());
+ orChildren[i] = ama;
+ }
+ delete tree;
- if (_pq->getBatchSize()) {
- ss << " batchSize=" << *_pq->getBatchSize();
- }
+ // Clean up any consequences from this tomfoolery.
+ return normalizeTree(orChild);
+}
- if (_pq->getLimit()) {
- ss << " limit=" << *_pq->getLimit();
- }
+std::string CanonicalQuery::toString() const {
+ str::stream ss;
+ ss << "ns=" << _pq->ns();
- ss << " skip=" << _pq->getSkip() << "\n";
+ if (_pq->getBatchSize()) {
+ ss << " batchSize=" << *_pq->getBatchSize();
+ }
- // The expression tree puts an endl on for us.
- ss << "Tree: " << _root->toString();
- ss << "Sort: " << _pq->getSort().toString() << '\n';
- ss << "Proj: " << _pq->getProj().toString() << '\n';
- return ss;
+ if (_pq->getLimit()) {
+ ss << " limit=" << *_pq->getLimit();
}
- std::string CanonicalQuery::toStringShort() const {
- str::stream ss;
- ss << "query: " << _pq->getFilter().toString()
- << " sort: " << _pq->getSort().toString()
- << " projection: " << _pq->getProj().toString()
- << " skip: " << _pq->getSkip();
+ ss << " skip=" << _pq->getSkip() << "\n";
- if (_pq->getBatchSize()) {
- ss << " batchSize: " << *_pq->getBatchSize();
- }
+ // The expression tree puts an endl on for us.
+ ss << "Tree: " << _root->toString();
+ ss << "Sort: " << _pq->getSort().toString() << '\n';
+ ss << "Proj: " << _pq->getProj().toString() << '\n';
+ return ss;
+}
- if (_pq->getLimit()) {
- ss << " limit: " << *_pq->getLimit();
- }
+std::string CanonicalQuery::toStringShort() const {
+ str::stream ss;
+ ss << "query: " << _pq->getFilter().toString() << " sort: " << _pq->getSort().toString()
+ << " projection: " << _pq->getProj().toString() << " skip: " << _pq->getSkip();
- return ss;
+ if (_pq->getBatchSize()) {
+ ss << " batchSize: " << *_pq->getBatchSize();
}
+ if (_pq->getLimit()) {
+ ss << " limit: " << *_pq->getLimit();
+ }
+
+ return ss;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/canonical_query.h b/src/mongo/db/query/canonical_query.h
index 365a06e4802..58a2c46f3a8 100644
--- a/src/mongo/db/query/canonical_query.h
+++ b/src/mongo/db/query/canonical_query.h
@@ -38,188 +38,199 @@
namespace mongo {
- class CanonicalQuery {
- public:
- /**
- * Caller owns the pointer in 'out' if any call to canonicalize returns Status::OK().
- *
- * Used for legacy find through the OP_QUERY message.
- */
- static Status canonicalize(const QueryMessage& qm,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- /**
- * Takes ownership of 'lpq'.
- *
- * Caller owns the pointer in 'out' if any call to canonicalize returns Status::OK().
- *
- * Used for finds using the find command path.
- */
- static Status canonicalize(LiteParsedQuery* lpq,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- /**
- * For testing or for internal clients to use.
- */
-
- /**
- * Used for creating sub-queries from an existing CanonicalQuery.
- *
- * 'root' must be an expression in baseQuery.root().
- *
- * Does not take ownership of 'root'.
- */
- static Status canonicalize(const CanonicalQuery& baseQuery,
- MatchExpression* root,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- bool explain,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- long long skip,
- long long limit,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- static Status canonicalize(const std::string& ns,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot,
- bool explain,
- CanonicalQuery** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- /**
- * Returns true if "query" describes an exact-match query on _id, possibly with
- * the $isolated/$atomic modifier.
- */
- static bool isSimpleIdQuery(const BSONObj& query);
-
- // What namespace is this query over?
- const std::string& ns() const { return _pq->ns(); }
-
- //
- // Accessors for the query
- //
- MatchExpression* root() const { return _root.get(); }
- BSONObj getQueryObj() const { return _pq->getFilter(); }
- const LiteParsedQuery& getParsed() const { return *_pq; }
- const ParsedProjection* getProj() const { return _proj.get(); }
-
- // Debugging
- std::string toString() const;
- std::string toStringShort() const;
-
- /**
- * Validates match expression, checking for certain
- * combinations of operators in match expression and
- * query options in LiteParsedQuery.
- * Since 'root' is derived from 'filter' in LiteParsedQuery,
- * 'filter' is not validated.
- *
- * TODO: Move this to query_validator.cpp
- */
- static Status isValid(MatchExpression* root, const LiteParsedQuery& parsed);
-
- /**
- * Returns the normalized version of the subtree rooted at 'root'.
- *
- * Takes ownership of 'root'.
- */
- static MatchExpression* normalizeTree(MatchExpression* root);
-
- /**
- * Traverses expression tree post-order.
- * Sorts children at each non-leaf node by (MatchType, path(), children, number of children)
- */
- static void sortTree(MatchExpression* tree);
-
- /**
- * Returns a count of 'type' nodes in expression tree.
- */
- static size_t countNodes(const MatchExpression* root, MatchExpression::MatchType type);
-
- /**
- * Takes ownership of 'tree'. Performs some rewriting of the query to a logically
- * equivalent but more digestible form.
- *
- * TODO: This doesn't entirely belong here. Really we'd do this while exploring
- * solutions in an enumeration setting but given the current lack of pruning
- * while exploring the enumeration space we do it here.
- */
- static MatchExpression* logicalRewrite(MatchExpression* tree);
- private:
- // You must go through canonicalize to create a CanonicalQuery.
- CanonicalQuery() { }
-
- /**
- * Takes ownership of 'root' and 'lpq'.
- */
- Status init(LiteParsedQuery* lpq,
- const MatchExpressionParser::WhereCallback& whereCallback,
- MatchExpression* root);
-
- std::unique_ptr<LiteParsedQuery> _pq;
-
- // _root points into _pq->getFilter()
- std::unique_ptr<MatchExpression> _root;
-
- std::unique_ptr<ParsedProjection> _proj;
- };
+class CanonicalQuery {
+public:
+ /**
+ * Caller owns the pointer in 'out' if any call to canonicalize returns Status::OK().
+ *
+ * Used for legacy find through the OP_QUERY message.
+ */
+ static Status canonicalize(const QueryMessage& qm,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ /**
+ * Takes ownership of 'lpq'.
+ *
+ * Caller owns the pointer in 'out' if any call to canonicalize returns Status::OK().
+ *
+ * Used for finds using the find command path.
+ */
+ static Status canonicalize(LiteParsedQuery* lpq,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ /**
+ * For testing or for internal clients to use.
+ */
+
+ /**
+ * Used for creating sub-queries from an existing CanonicalQuery.
+ *
+ * 'root' must be an expression in baseQuery.root().
+ *
+ * Does not take ownership of 'root'.
+ */
+ static Status canonicalize(const CanonicalQuery& baseQuery,
+ MatchExpression* root,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ bool explain,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ long long skip,
+ long long limit,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ static Status canonicalize(const std::string& ns,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot,
+ bool explain,
+ CanonicalQuery** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ /**
+ * Returns true if "query" describes an exact-match query on _id, possibly with
+ * the $isolated/$atomic modifier.
+ */
+ static bool isSimpleIdQuery(const BSONObj& query);
+
+ // What namespace is this query over?
+ const std::string& ns() const {
+ return _pq->ns();
+ }
+
+ //
+ // Accessors for the query
+ //
+ MatchExpression* root() const {
+ return _root.get();
+ }
+ BSONObj getQueryObj() const {
+ return _pq->getFilter();
+ }
+ const LiteParsedQuery& getParsed() const {
+ return *_pq;
+ }
+ const ParsedProjection* getProj() const {
+ return _proj.get();
+ }
+
+ // Debugging
+ std::string toString() const;
+ std::string toStringShort() const;
+
+ /**
+ * Validates match expression, checking for certain
+ * combinations of operators in match expression and
+ * query options in LiteParsedQuery.
+ * Since 'root' is derived from 'filter' in LiteParsedQuery,
+ * 'filter' is not validated.
+ *
+ * TODO: Move this to query_validator.cpp
+ */
+ static Status isValid(MatchExpression* root, const LiteParsedQuery& parsed);
+
+ /**
+ * Returns the normalized version of the subtree rooted at 'root'.
+ *
+ * Takes ownership of 'root'.
+ */
+ static MatchExpression* normalizeTree(MatchExpression* root);
+
+ /**
+ * Traverses expression tree post-order.
+ * Sorts children at each non-leaf node by (MatchType, path(), children, number of children)
+ */
+ static void sortTree(MatchExpression* tree);
+
+ /**
+ * Returns a count of 'type' nodes in expression tree.
+ */
+ static size_t countNodes(const MatchExpression* root, MatchExpression::MatchType type);
+
+ /**
+ * Takes ownership of 'tree'. Performs some rewriting of the query to a logically
+ * equivalent but more digestible form.
+ *
+ * TODO: This doesn't entirely belong here. Really we'd do this while exploring
+ * solutions in an enumeration setting but given the current lack of pruning
+ * while exploring the enumeration space we do it here.
+ */
+ static MatchExpression* logicalRewrite(MatchExpression* tree);
+
+private:
+ // You must go through canonicalize to create a CanonicalQuery.
+ CanonicalQuery() {}
+
+ /**
+ * Takes ownership of 'root' and 'lpq'.
+ */
+ Status init(LiteParsedQuery* lpq,
+ const MatchExpressionParser::WhereCallback& whereCallback,
+ MatchExpression* root);
+
+ std::unique_ptr<LiteParsedQuery> _pq;
+
+ // _root points into _pq->getFilter()
+ std::unique_ptr<MatchExpression> _root;
+
+ std::unique_ptr<ParsedProjection> _proj;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index d125cb692ab..d3fb9a55fd4 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -34,534 +34,507 @@
namespace mongo {
namespace {
- using std::string;
- using std::unique_ptr;
- using unittest::assertGet;
-
- static const char* ns = "somebogusns";
-
- /**
- * Helper function to parse the given BSON object as a MatchExpression, checks the status,
- * and return the MatchExpression*.
- */
- MatchExpression* parseMatchExpression(const BSONObj& obj) {
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
- if (!status.isOK()) {
- mongoutils::str::stream ss;
- ss << "failed to parse query: " << obj.toString()
- << ". Reason: " << status.getStatus().toString();
- FAIL(ss);
- }
- return status.getValue();
- }
+using std::string;
+using std::unique_ptr;
+using unittest::assertGet;
- /**
- * Helper function which parses and normalizes 'queryStr', and returns whether the given
- * (expression tree, lite parsed query) tuple passes CanonicalQuery::isValid().
- * Returns Status::OK() if the tuple is valid, else returns an error Status.
- */
- Status isValid(const std::string& queryStr, const LiteParsedQuery& lpqRaw) {
- BSONObj queryObj = fromjson(queryStr);
- std::unique_ptr<MatchExpression> me(
- CanonicalQuery::normalizeTree(parseMatchExpression(queryObj)));
- return CanonicalQuery::isValid(me.get(), lpqRaw);
- }
+static const char* ns = "somebogusns";
- void assertEquivalent(const char* queryStr,
- const MatchExpression* expected,
- const MatchExpression* actual) {
- if (actual->equivalent(expected)) {
- return;
- }
- mongoutils::str::stream ss;
- ss << "Match expressions are not equivalent."
- << "\nOriginal query: " << queryStr
- << "\nExpected: " << expected->toString()
- << "\nActual: " << actual->toString();
- FAIL(ss);
- }
-
- void assertNotEquivalent(const char* queryStr,
- const MatchExpression* expected,
- const MatchExpression* actual) {
- if (!actual->equivalent(expected)) {
- return;
- }
+/**
+ * Helper function to parse the given BSON object as a MatchExpression, checks the status,
+ * and return the MatchExpression*.
+ */
+MatchExpression* parseMatchExpression(const BSONObj& obj) {
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
+ if (!status.isOK()) {
mongoutils::str::stream ss;
- ss << "Match expressions are equivalent."
- << "\nOriginal query: " << queryStr
- << "\nExpected: " << expected->toString()
- << "\nActual: " << actual->toString();
+ ss << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString();
FAIL(ss);
}
+ return status.getValue();
+}
-
- TEST(CanonicalQueryTest, IsValidText) {
- // Passes in default values for LiteParsedQuery.
- // Filter inside LiteParsedQuery is not used.
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Valid: regular TEXT.
- ASSERT_OK(isValid("{$text: {$search: 's'}}", *lpq));
-
- // Valid: TEXT inside OR.
- ASSERT_OK(isValid(
- "{$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- "]}",
- *lpq
- ));
-
- // Valid: TEXT outside NOR.
- ASSERT_OK(isValid("{$text: {$search: 's'}, $nor: [{a: 1}, {b: 1}]}", *lpq));
-
- // Invalid: TEXT inside NOR.
- ASSERT_NOT_OK(isValid("{$nor: [{$text: {$search: 's'}}, {a: 1}]}", *lpq));
-
- // Invalid: TEXT inside NOR.
- ASSERT_NOT_OK(isValid(
- "{$nor: ["
- " {$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- " ]},"
- " {a: 2}"
- "]}",
- *lpq
- ));
-
- // Invalid: >1 TEXT.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {$text: {$search: 's'}},"
- " {$text: {$search: 't'}}"
- "]}",
- *lpq
- ));
-
- // Invalid: >1 TEXT.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- " ]},"
- " {$or: ["
- " {$text: {$search: 't'}},"
- " {b: 1}"
- " ]}"
- "]}",
- *lpq
- ));
- }
-
- TEST(CanonicalQueryTest, IsValidGeo) {
- // Passes in default values for LiteParsedQuery.
- // Filter inside LiteParsedQuery is not used.
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Valid: regular GEO_NEAR.
- ASSERT_OK(isValid("{a: {$near: [0, 0]}}", *lpq));
-
- // Valid: GEO_NEAR inside nested AND.
- ASSERT_OK(isValid(
- "{$and: ["
- " {$and: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- " ]},"
- " {c: 1}"
- "]}",
- *lpq
- ));
-
- // Invalid: >1 GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {a: {$near: [0, 0]}},"
- " {b: {$near: [0, 0]}}"
- "]}",
- *lpq
- ));
-
- // Invalid: >1 GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {a: {$geoNear: [0, 0]}},"
- " {b: {$near: [0, 0]}}"
- "]}",
- *lpq
- ));
-
- // Invalid: >1 GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {$and: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- " ]},"
- " {$and: ["
- " {c: {$near: [0, 0]}},"
- " {d: 1}"
- " ]}"
- "]}",
- *lpq
- ));
-
- // Invalid: GEO_NEAR inside NOR.
- ASSERT_NOT_OK(isValid(
- "{$nor: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- "]}",
- *lpq
- ));
-
- // Invalid: GEO_NEAR inside OR.
- ASSERT_NOT_OK(isValid(
- "{$or: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- "]}",
- *lpq
- ));
- }
-
- TEST(CanonicalQueryTest, IsValidTextAndGeo) {
- // Passes in default values for LiteParsedQuery.
- // Filter inside LiteParsedQuery is not used.
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Invalid: TEXT and GEO_NEAR.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$near: [0, 0]}}", *lpq));
-
- // Invalid: TEXT and GEO_NEAR.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$geoNear: [0, 0]}}", *lpq));
-
- // Invalid: TEXT and GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- " ],"
- " b: {$near: [0, 0]}}",
- *lpq
- ));
- }
-
- TEST(CanonicalQueryTest, IsValidTextAndNaturalAscending) {
- // Passes in default values for LiteParsedQuery except for sort order.
- // Filter inside LiteParsedQuery is not used.
- BSONObj sort = fromjson("{$natural: 1}");
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- sort,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Invalid: TEXT and {$natural: 1} sort order.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
- }
-
- TEST(CanonicalQueryTest, IsValidTextAndNaturalDescending) {
- // Passes in default values for LiteParsedQuery except for sort order.
- // Filter inside LiteParsedQuery is not used.
- BSONObj sort = fromjson("{$natural: -1}");
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- sort,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Invalid: TEXT and {$natural: -1} sort order.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
- }
-
- TEST(CanonicalQueryTest, IsValidTextAndHint) {
- // Passes in default values for LiteParsedQuery except for hint.
- // Filter inside LiteParsedQuery is not used.
- BSONObj hint = fromjson("{a: 1}");
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- hint,
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Invalid: TEXT and {$natural: -1} sort order.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
- }
-
- // SERVER-14366
- TEST(CanonicalQueryTest, IsValidGeoNearNaturalSort) {
- // Passes in default values for LiteParsedQuery except for sort order.
- // Filter inside LiteParsedQuery is not used.
- BSONObj sort = fromjson("{$natural: 1}");
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- sort,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Invalid: GEO_NEAR and {$natural: 1} sort order.
- ASSERT_NOT_OK(isValid("{a: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}",
- *lpq));
- }
-
- // SERVER-14366
- TEST(CanonicalQueryTest, IsValidGeoNearNaturalHint) {
- // Passes in default values for LiteParsedQuery except for the hint.
- // Filter inside LiteParsedQuery is not used.
- BSONObj hint = fromjson("{$natural: 1}");
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- hint,
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- // Invalid: GEO_NEAR and {$natural: 1} hint.
- ASSERT_NOT_OK(isValid("{a: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}",
- *lpq));
- }
-
- TEST(CanonicalQueryTest, IsValidTextAndSnapshot) {
- // Passes in default values for LiteParsedQuery except for snapshot.
- // Filter inside LiteParsedQuery is not used.
- bool snapshot = true;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery(ns,
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- snapshot,
- false))); // explain
-
- // Invalid: TEXT and snapshot.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
- }
-
- //
- // Tests for CanonicalQuery::sortTree
- //
-
- /**
- * Helper function for testing CanonicalQuery::sortTree().
- *
- * Verifies that sorting the expression 'unsortedQueryStr' yields an expression equivalent to
- * the expression 'sortedQueryStr'.
- */
- void testSortTree(const char* unsortedQueryStr, const char* sortedQueryStr) {
- BSONObj unsortedQueryObj = fromjson(unsortedQueryStr);
- unique_ptr<MatchExpression> unsortedQueryExpr(parseMatchExpression(unsortedQueryObj));
-
- BSONObj sortedQueryObj = fromjson(sortedQueryStr);
- unique_ptr<MatchExpression> sortedQueryExpr(parseMatchExpression(sortedQueryObj));
-
- // Sanity check that the unsorted expression is not equivalent to the sorted expression.
- assertNotEquivalent(unsortedQueryStr, unsortedQueryExpr.get(), sortedQueryExpr.get());
-
- // Sanity check that sorting the sorted expression is a no-op.
- {
- unique_ptr<MatchExpression> sortedQueryExprClone(parseMatchExpression(sortedQueryObj));
- CanonicalQuery::sortTree(sortedQueryExprClone.get());
- assertEquivalent(unsortedQueryStr, sortedQueryExpr.get(), sortedQueryExprClone.get());
- }
-
- // Test that sorting the unsorted expression yields the sorted expression.
- CanonicalQuery::sortTree(unsortedQueryExpr.get());
- assertEquivalent(unsortedQueryStr, unsortedQueryExpr.get(), sortedQueryExpr.get());
- }
-
- // Test that an EQ expression sorts before a GT expression.
- TEST(CanonicalQueryTest, SortTreeMatchTypeComparison) {
- testSortTree("{a: {$gt: 1}, a: 1}", "{a: 1, a: {$gt: 1}}");
- }
-
- // Test that an EQ expression on path "a" sorts before an EQ expression on path "b".
- TEST(CanonicalQueryTest, SortTreePathComparison) {
- testSortTree("{b: 1, a: 1}", "{a: 1, b: 1}");
- testSortTree("{'a.b': 1, a: 1}", "{a: 1, 'a.b': 1}");
- testSortTree("{'a.c': 1, 'a.b': 1}", "{'a.b': 1, 'a.c': 1}");
- }
-
- // Test that AND expressions sort according to their first differing child.
- TEST(CanonicalQueryTest, SortTreeChildComparison) {
- testSortTree("{$or: [{a: 1, c: 1}, {a: 1, b: 1}]}", "{$or: [{a: 1, b: 1}, {a: 1, c: 1}]}");
- }
-
- // Test that an AND with 2 children sorts before an AND with 3 children, if the first 2 children
- // are equivalent in both.
- TEST(CanonicalQueryTest, SortTreeNumChildrenComparison) {
- testSortTree("{$or: [{a: 1, b: 1, c: 1}, {a: 1, b: 1}]}",
- "{$or: [{a: 1, b: 1}, {a: 1, b: 1, c: 1}]}");
+/**
+ * Helper function which parses and normalizes 'queryStr', and returns whether the given
+ * (expression tree, lite parsed query) tuple passes CanonicalQuery::isValid().
+ * Returns Status::OK() if the tuple is valid, else returns an error Status.
+ */
+Status isValid(const std::string& queryStr, const LiteParsedQuery& lpqRaw) {
+ BSONObj queryObj = fromjson(queryStr);
+ std::unique_ptr<MatchExpression> me(
+ CanonicalQuery::normalizeTree(parseMatchExpression(queryObj)));
+ return CanonicalQuery::isValid(me.get(), lpqRaw);
+}
+
+void assertEquivalent(const char* queryStr,
+ const MatchExpression* expected,
+ const MatchExpression* actual) {
+ if (actual->equivalent(expected)) {
+ return;
}
-
- //
- // Tests for CanonicalQuery::logicalRewrite
- //
-
- /**
- * Utility function to create a CanonicalQuery
- */
- CanonicalQuery* canonicalize(const char* queryStr) {
- BSONObj queryObj = fromjson(queryStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, &cq);
- ASSERT_OK(result);
- return cq;
+ mongoutils::str::stream ss;
+ ss << "Match expressions are not equivalent."
+ << "\nOriginal query: " << queryStr << "\nExpected: " << expected->toString()
+ << "\nActual: " << actual->toString();
+ FAIL(ss);
+}
+
+void assertNotEquivalent(const char* queryStr,
+ const MatchExpression* expected,
+ const MatchExpression* actual) {
+ if (!actual->equivalent(expected)) {
+ return;
}
+ mongoutils::str::stream ss;
+ ss << "Match expressions are equivalent."
+ << "\nOriginal query: " << queryStr << "\nExpected: " << expected->toString()
+ << "\nActual: " << actual->toString();
+ FAIL(ss);
+}
+
+
+TEST(CanonicalQueryTest, IsValidText) {
+ // Passes in default values for LiteParsedQuery.
+ // Filter inside LiteParsedQuery is not used.
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Valid: regular TEXT.
+ ASSERT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+
+ // Valid: TEXT inside OR.
+ ASSERT_OK(isValid(
+ "{$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ "]}",
+ *lpq));
+
+ // Valid: TEXT outside NOR.
+ ASSERT_OK(isValid("{$text: {$search: 's'}, $nor: [{a: 1}, {b: 1}]}", *lpq));
+
+ // Invalid: TEXT inside NOR.
+ ASSERT_NOT_OK(isValid("{$nor: [{$text: {$search: 's'}}, {a: 1}]}", *lpq));
+
+ // Invalid: TEXT inside NOR.
+ ASSERT_NOT_OK(isValid(
+ "{$nor: ["
+ " {$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ " ]},"
+ " {a: 2}"
+ "]}",
+ *lpq));
+
+ // Invalid: >1 TEXT.
+ ASSERT_NOT_OK(isValid(
+ "{$and: ["
+ " {$text: {$search: 's'}},"
+ " {$text: {$search: 't'}}"
+ "]}",
+ *lpq));
+
+ // Invalid: >1 TEXT.
+ ASSERT_NOT_OK(isValid(
+ "{$and: ["
+ " {$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ " ]},"
+ " {$or: ["
+ " {$text: {$search: 't'}},"
+ " {b: 1}"
+ " ]}"
+ "]}",
+ *lpq));
+}
+
+TEST(CanonicalQueryTest, IsValidGeo) {
+ // Passes in default values for LiteParsedQuery.
+ // Filter inside LiteParsedQuery is not used.
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Valid: regular GEO_NEAR.
+ ASSERT_OK(isValid("{a: {$near: [0, 0]}}", *lpq));
+
+ // Valid: GEO_NEAR inside nested AND.
+ ASSERT_OK(isValid(
+ "{$and: ["
+ " {$and: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ " ]},"
+ " {c: 1}"
+ "]}",
+ *lpq));
+
+ // Invalid: >1 GEO_NEAR.
+ ASSERT_NOT_OK(isValid(
+ "{$and: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: {$near: [0, 0]}}"
+ "]}",
+ *lpq));
+
+ // Invalid: >1 GEO_NEAR.
+ ASSERT_NOT_OK(isValid(
+ "{$and: ["
+ " {a: {$geoNear: [0, 0]}},"
+ " {b: {$near: [0, 0]}}"
+ "]}",
+ *lpq));
+
+ // Invalid: >1 GEO_NEAR.
+ ASSERT_NOT_OK(isValid(
+ "{$and: ["
+ " {$and: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ " ]},"
+ " {$and: ["
+ " {c: {$near: [0, 0]}},"
+ " {d: 1}"
+ " ]}"
+ "]}",
+ *lpq));
+
+ // Invalid: GEO_NEAR inside NOR.
+ ASSERT_NOT_OK(isValid(
+ "{$nor: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ "]}",
+ *lpq));
+
+ // Invalid: GEO_NEAR inside OR.
+ ASSERT_NOT_OK(isValid(
+ "{$or: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ "]}",
+ *lpq));
+}
+
+TEST(CanonicalQueryTest, IsValidTextAndGeo) {
+ // Passes in default values for LiteParsedQuery.
+ // Filter inside LiteParsedQuery is not used.
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Invalid: TEXT and GEO_NEAR.
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$near: [0, 0]}}", *lpq));
+
+ // Invalid: TEXT and GEO_NEAR.
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$geoNear: [0, 0]}}", *lpq));
+
+ // Invalid: TEXT and GEO_NEAR.
+ ASSERT_NOT_OK(isValid(
+ "{$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ " ],"
+ " b: {$near: [0, 0]}}",
+ *lpq));
+}
+
+TEST(CanonicalQueryTest, IsValidTextAndNaturalAscending) {
+ // Passes in default values for LiteParsedQuery except for sort order.
+ // Filter inside LiteParsedQuery is not used.
+ BSONObj sort = fromjson("{$natural: 1}");
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ sort,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Invalid: TEXT and {$natural: 1} sort order.
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+}
+
+TEST(CanonicalQueryTest, IsValidTextAndNaturalDescending) {
+ // Passes in default values for LiteParsedQuery except for sort order.
+ // Filter inside LiteParsedQuery is not used.
+ BSONObj sort = fromjson("{$natural: -1}");
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ sort,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Invalid: TEXT and {$natural: -1} sort order.
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+}
+
+TEST(CanonicalQueryTest, IsValidTextAndHint) {
+ // Passes in default values for LiteParsedQuery except for hint.
+ // Filter inside LiteParsedQuery is not used.
+ BSONObj hint = fromjson("{a: 1}");
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ hint,
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Invalid: TEXT and {$natural: -1} sort order.
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+}
+
+// SERVER-14366
+TEST(CanonicalQueryTest, IsValidGeoNearNaturalSort) {
+ // Passes in default values for LiteParsedQuery except for sort order.
+ // Filter inside LiteParsedQuery is not used.
+ BSONObj sort = fromjson("{$natural: 1}");
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ sort,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Invalid: GEO_NEAR and {$natural: 1} sort order.
+ ASSERT_NOT_OK(isValid("{a: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}", *lpq));
+}
+
+// SERVER-14366
+TEST(CanonicalQueryTest, IsValidGeoNearNaturalHint) {
+ // Passes in default values for LiteParsedQuery except for the hint.
+ // Filter inside LiteParsedQuery is not used.
+ BSONObj hint = fromjson("{$natural: 1}");
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ hint,
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ // Invalid: GEO_NEAR and {$natural: 1} hint.
+ ASSERT_NOT_OK(isValid("{a: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}", *lpq));
+}
+
+TEST(CanonicalQueryTest, IsValidTextAndSnapshot) {
+ // Passes in default values for LiteParsedQuery except for snapshot.
+ // Filter inside LiteParsedQuery is not used.
+ bool snapshot = true;
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery(ns,
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ snapshot,
+ false))); // explain
+
+ // Invalid: TEXT and snapshot.
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+}
+
+//
+// Tests for CanonicalQuery::sortTree
+//
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- &cq);
- ASSERT_OK(result);
- return cq;
- }
+/**
+ * Helper function for testing CanonicalQuery::sortTree().
+ *
+ * Verifies that sorting the expression 'unsortedQueryStr' yields an expression equivalent to
+ * the expression 'sortedQueryStr'.
+ */
+void testSortTree(const char* unsortedQueryStr, const char* sortedQueryStr) {
+ BSONObj unsortedQueryObj = fromjson(unsortedQueryStr);
+ unique_ptr<MatchExpression> unsortedQueryExpr(parseMatchExpression(unsortedQueryObj));
- // Don't do anything with a double OR.
- TEST(CanonicalQueryTest, RewriteNoDoubleOr) {
- string queryStr = "{$or:[{a:1}, {b:1}], $or:[{c:1}, {d:1}], e:1}";
- BSONObj queryObj = fromjson(queryStr);
- unique_ptr<MatchExpression> base(parseMatchExpression(queryObj));
- unique_ptr<MatchExpression> rewrite(CanonicalQuery::logicalRewrite(base->shallowClone()));
- assertEquivalent(queryStr.c_str(), base.get(), rewrite.get());
- }
+ BSONObj sortedQueryObj = fromjson(sortedQueryStr);
+ unique_ptr<MatchExpression> sortedQueryExpr(parseMatchExpression(sortedQueryObj));
- // Do something with a single or.
- TEST(CanonicalQueryTest, RewriteSingleOr) {
- // Rewrite of this...
- string queryStr = "{$or:[{a:1}, {b:1}], e:1}";
- BSONObj queryObj = fromjson(queryStr);
- unique_ptr<MatchExpression> rewrite(CanonicalQuery::logicalRewrite(parseMatchExpression(queryObj)));
-
- // Should look like this.
- string rewriteStr = "{$or:[{a:1, e:1}, {b:1, e:1}]}";
- BSONObj rewriteObj = fromjson(rewriteStr);
- unique_ptr<MatchExpression> base(parseMatchExpression(rewriteObj));
- assertEquivalent(queryStr.c_str(), base.get(), rewrite.get());
- }
+ // Sanity check that the unsorted expression is not equivalent to the sorted expression.
+ assertNotEquivalent(unsortedQueryStr, unsortedQueryExpr.get(), sortedQueryExpr.get());
- /**
- * Test function for CanonicalQuery::normalize.
- */
- void testNormalizeQuery(const char* queryStr, const char* expectedExprStr) {
- unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
- MatchExpression* me = cq->root();
- BSONObj expectedExprObj = fromjson(expectedExprStr);
- unique_ptr<MatchExpression> expectedExpr(parseMatchExpression(expectedExprObj));
- assertEquivalent(queryStr, expectedExpr.get(), me);
+ // Sanity check that sorting the sorted expression is a no-op.
+ {
+ unique_ptr<MatchExpression> sortedQueryExprClone(parseMatchExpression(sortedQueryObj));
+ CanonicalQuery::sortTree(sortedQueryExprClone.get());
+ assertEquivalent(unsortedQueryStr, sortedQueryExpr.get(), sortedQueryExprClone.get());
}
- TEST(CanonicalQueryTest, NormalizeQuerySort) {
- // Field names
- testNormalizeQuery("{b: 1, a: 1}", "{a: 1, b: 1}");
- // Operator types
- testNormalizeQuery("{a: {$gt: 5}, a: {$lt: 10}}}", "{a: {$lt: 10}, a: {$gt: 5}}");
- // Nested queries
- testNormalizeQuery("{a: {$elemMatch: {c: 1, b:1}}}",
- "{a: {$elemMatch: {b: 1, c:1}}}");
- }
+ // Test that sorting the unsorted expression yields the sorted expression.
+ CanonicalQuery::sortTree(unsortedQueryExpr.get());
+ assertEquivalent(unsortedQueryStr, unsortedQueryExpr.get(), sortedQueryExpr.get());
+}
+
+// Test that an EQ expression sorts before a GT expression.
+TEST(CanonicalQueryTest, SortTreeMatchTypeComparison) {
+ testSortTree("{a: {$gt: 1}, a: 1}", "{a: 1, a: {$gt: 1}}");
+}
+
+// Test that an EQ expression on path "a" sorts before an EQ expression on path "b".
+TEST(CanonicalQueryTest, SortTreePathComparison) {
+ testSortTree("{b: 1, a: 1}", "{a: 1, b: 1}");
+ testSortTree("{'a.b': 1, a: 1}", "{a: 1, 'a.b': 1}");
+ testSortTree("{'a.c': 1, 'a.b': 1}", "{'a.b': 1, 'a.c': 1}");
+}
+
+// Test that AND expressions sort according to their first differing child.
+TEST(CanonicalQueryTest, SortTreeChildComparison) {
+ testSortTree("{$or: [{a: 1, c: 1}, {a: 1, b: 1}]}", "{$or: [{a: 1, b: 1}, {a: 1, c: 1}]}");
+}
+
+// Test that an AND with 2 children sorts before an AND with 3 children, if the first 2 children
+// are equivalent in both.
+TEST(CanonicalQueryTest, SortTreeNumChildrenComparison) {
+ testSortTree("{$or: [{a: 1, b: 1, c: 1}, {a: 1, b: 1}]}",
+ "{$or: [{a: 1, b: 1}, {a: 1, b: 1, c: 1}]}");
+}
+
+//
+// Tests for CanonicalQuery::logicalRewrite
+//
- TEST(CanonicalQueryTest, NormalizeQueryTree) {
- // Single-child $or elimination.
- testNormalizeQuery("{$or: [{b: 1}]}", "{b: 1}");
- // Single-child $and elimination.
- testNormalizeQuery("{$or: [{$and: [{a: 1}]}, {b: 1}]}", "{$or: [{a: 1}, {b: 1}]}");
- // $or absorbs $or children.
- testNormalizeQuery("{$or: [{a: 1}, {$or: [{b: 1}, {$or: [{c: 1}]}]}, {d: 1}]}",
- "{$or: [{a: 1}, {b: 1}, {c: 1}, {d: 1}]}");
- // $and absorbs $and children.
- testNormalizeQuery("{$and: [{$and: [{a: 1}, {b: 1}]}, {c: 1}]}",
- "{$and: [{a: 1}, {b: 1}, {c: 1}]}");
- }
+/**
+ * Utility function to create a CanonicalQuery
+ */
+CanonicalQuery* canonicalize(const char* queryStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns, queryObj, &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr, const char* projStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj, projObj, &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+// Don't do anything with a double OR.
+TEST(CanonicalQueryTest, RewriteNoDoubleOr) {
+ string queryStr = "{$or:[{a:1}, {b:1}], $or:[{c:1}, {d:1}], e:1}";
+ BSONObj queryObj = fromjson(queryStr);
+ unique_ptr<MatchExpression> base(parseMatchExpression(queryObj));
+ unique_ptr<MatchExpression> rewrite(CanonicalQuery::logicalRewrite(base->shallowClone()));
+ assertEquivalent(queryStr.c_str(), base.get(), rewrite.get());
+}
+
+// Do something with a single or.
+TEST(CanonicalQueryTest, RewriteSingleOr) {
+ // Rewrite of this...
+ string queryStr = "{$or:[{a:1}, {b:1}], e:1}";
+ BSONObj queryObj = fromjson(queryStr);
+ unique_ptr<MatchExpression> rewrite(
+ CanonicalQuery::logicalRewrite(parseMatchExpression(queryObj)));
+
+ // Should look like this.
+ string rewriteStr = "{$or:[{a:1, e:1}, {b:1, e:1}]}";
+ BSONObj rewriteObj = fromjson(rewriteStr);
+ unique_ptr<MatchExpression> base(parseMatchExpression(rewriteObj));
+ assertEquivalent(queryStr.c_str(), base.get(), rewrite.get());
+}
-} // namespace
-} // namespace mongo
+/**
+ * Test function for CanonicalQuery::normalize.
+ */
+void testNormalizeQuery(const char* queryStr, const char* expectedExprStr) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
+ MatchExpression* me = cq->root();
+ BSONObj expectedExprObj = fromjson(expectedExprStr);
+ unique_ptr<MatchExpression> expectedExpr(parseMatchExpression(expectedExprObj));
+ assertEquivalent(queryStr, expectedExpr.get(), me);
+}
+
+TEST(CanonicalQueryTest, NormalizeQuerySort) {
+ // Field names
+ testNormalizeQuery("{b: 1, a: 1}", "{a: 1, b: 1}");
+ // Operator types
+ testNormalizeQuery("{a: {$gt: 5}, a: {$lt: 10}}}", "{a: {$lt: 10}, a: {$gt: 5}}");
+ // Nested queries
+ testNormalizeQuery("{a: {$elemMatch: {c: 1, b:1}}}", "{a: {$elemMatch: {b: 1, c:1}}}");
+}
+
+TEST(CanonicalQueryTest, NormalizeQueryTree) {
+ // Single-child $or elimination.
+ testNormalizeQuery("{$or: [{b: 1}]}", "{b: 1}");
+ // Single-child $and elimination.
+ testNormalizeQuery("{$or: [{$and: [{a: 1}]}, {b: 1}]}", "{$or: [{a: 1}, {b: 1}]}");
+ // $or absorbs $or children.
+ testNormalizeQuery("{$or: [{a: 1}, {$or: [{b: 1}, {$or: [{c: 1}]}]}, {d: 1}]}",
+ "{$or: [{a: 1}, {b: 1}, {c: 1}, {d: 1}]}");
+ // $and absorbs $and children.
+ testNormalizeQuery("{$and: [{$and: [{a: 1}, {b: 1}]}, {c: 1}]}",
+ "{$and: [{a: 1}, {b: 1}, {c: 1}]}");
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/count_request.cpp b/src/mongo/db/query/count_request.cpp
index a587586590f..8541a5cc12b 100644
--- a/src/mongo/db/query/count_request.cpp
+++ b/src/mongo/db/query/count_request.cpp
@@ -36,96 +36,90 @@
namespace mongo {
namespace {
- const char kCmdName[] = "count";
- const char kQueryField[] = "query";
- const char kLimitField[] = "limit";
- const char kSkipField[] = "skip";
- const char kHintField[] = "hint";
+const char kCmdName[] = "count";
+const char kQueryField[] = "query";
+const char kLimitField[] = "limit";
+const char kSkipField[] = "skip";
+const char kHintField[] = "hint";
-} // namespace
+} // namespace
- CountRequest::CountRequest(const std::string& fullNs, BSONObj query)
- : _fullNs(fullNs),
- _query(query.getOwned()) {
- }
+CountRequest::CountRequest(const std::string& fullNs, BSONObj query)
+ : _fullNs(fullNs), _query(query.getOwned()) {}
- void CountRequest::setHint(BSONObj hint) {
- _hint = hint.getOwned();
- }
+void CountRequest::setHint(BSONObj hint) {
+ _hint = hint.getOwned();
+}
- BSONObj CountRequest::toBSON() const {
- BSONObjBuilder builder;
+BSONObj CountRequest::toBSON() const {
+ BSONObjBuilder builder;
- builder.append(kCmdName, _fullNs);
- builder.append(kQueryField, _query);
+ builder.append(kCmdName, _fullNs);
+ builder.append(kQueryField, _query);
- if (_limit) {
- builder.append(kLimitField, _limit.get());
- }
-
- if (_skip) {
- builder.append(kSkipField, _skip.get());
- }
-
- if (_hint) {
- builder.append(kHintField, _hint.get());
- }
+ if (_limit) {
+ builder.append(kLimitField, _limit.get());
+ }
- return builder.obj();
+ if (_skip) {
+ builder.append(kSkipField, _skip.get());
}
- StatusWith<CountRequest> CountRequest::parseFromBSON(const std::string& dbname,
- const BSONObj& cmdObj) {
+ if (_hint) {
+ builder.append(kHintField, _hint.get());
+ }
- BSONElement firstElt = cmdObj.firstElement();
- const std::string coll = (firstElt.type() == BSONType::String) ? firstElt.str() : "";
+ return builder.obj();
+}
- const std::string ns = str::stream() << dbname << "." << coll;
- if (!nsIsFull(ns)) {
- return Status(ErrorCodes::BadValue, "invalid collection name");
- }
+StatusWith<CountRequest> CountRequest::parseFromBSON(const std::string& dbname,
+ const BSONObj& cmdObj) {
+ BSONElement firstElt = cmdObj.firstElement();
+ const std::string coll = (firstElt.type() == BSONType::String) ? firstElt.str() : "";
- // We don't validate that "query" is a nested object due to SERVER-15456.
- CountRequest request(ns, cmdObj.getObjectField(kQueryField));
+ const std::string ns = str::stream() << dbname << "." << coll;
+ if (!nsIsFull(ns)) {
+ return Status(ErrorCodes::BadValue, "invalid collection name");
+ }
- // Limit
- if (cmdObj[kLimitField].isNumber()) {
- long long limit = cmdObj[kLimitField].numberLong();
+ // We don't validate that "query" is a nested object due to SERVER-15456.
+ CountRequest request(ns, cmdObj.getObjectField(kQueryField));
- // For counts, limit and -limit mean the same thing.
- if (limit < 0) {
- limit = -limit;
- }
+ // Limit
+ if (cmdObj[kLimitField].isNumber()) {
+ long long limit = cmdObj[kLimitField].numberLong();
- request.setLimit(limit);
- }
- else if (cmdObj[kLimitField].ok()) {
- return Status(ErrorCodes::BadValue, "limit value is not a valid number");
+ // For counts, limit and -limit mean the same thing.
+ if (limit < 0) {
+ limit = -limit;
}
- // Skip
- if (cmdObj[kSkipField].isNumber()) {
- long long skip = cmdObj[kSkipField].numberLong();
- if (skip < 0) {
- return Status(ErrorCodes::BadValue, "skip value is negative in count query");
- }
+ request.setLimit(limit);
+ } else if (cmdObj[kLimitField].ok()) {
+ return Status(ErrorCodes::BadValue, "limit value is not a valid number");
+ }
- request.setSkip(skip);
- }
- else if (cmdObj[kSkipField].ok()) {
- return Status(ErrorCodes::BadValue, "skip value is not a valid number");
+ // Skip
+ if (cmdObj[kSkipField].isNumber()) {
+ long long skip = cmdObj[kSkipField].numberLong();
+ if (skip < 0) {
+ return Status(ErrorCodes::BadValue, "skip value is negative in count query");
}
- // Hint
- if (Object == cmdObj[kHintField].type()) {
- request.setHint(cmdObj[kHintField].Obj());
- }
- else if (String == cmdObj[kHintField].type()) {
- const std::string hint = cmdObj.getStringField(kHintField);
- request.setHint(BSON("$hint" << hint));
- }
+ request.setSkip(skip);
+ } else if (cmdObj[kSkipField].ok()) {
+ return Status(ErrorCodes::BadValue, "skip value is not a valid number");
+ }
- return request;
+ // Hint
+ if (Object == cmdObj[kHintField].type()) {
+ request.setHint(cmdObj[kHintField].Obj());
+ } else if (String == cmdObj[kHintField].type()) {
+ const std::string hint = cmdObj.getStringField(kHintField);
+ request.setHint(BSON("$hint" << hint));
}
-} // namespace mongo
+ return request;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/count_request.h b/src/mongo/db/query/count_request.h
index 0e9eb25ee30..42b654a7896 100644
--- a/src/mongo/db/query/count_request.h
+++ b/src/mongo/db/query/count_request.h
@@ -36,59 +36,72 @@
namespace mongo {
- template<typename T> class StatusWith;
+template <typename T>
+class StatusWith;
+/**
+ * A description of a request for a count operation. Copyable.
+ */
+class CountRequest {
+public:
/**
- * A description of a request for a count operation. Copyable.
+ * Construct an empty request.
*/
- class CountRequest {
- public:
-
- /**
- * Construct an empty request.
- */
- CountRequest(const std::string& fullNs, BSONObj query);
-
- const std::string& getNs() const { return _fullNs; }
- const BSONObj getQuery() const { return _query; }
-
- long long getLimit() const { return _limit.value_or(0); }
- void setLimit(long long limit) { _limit = limit; }
-
- long long getSkip() const { return _skip.value_or(0); }
- void setSkip(long long skip) { _skip = skip; }
+ CountRequest(const std::string& fullNs, BSONObj query);
+
+ const std::string& getNs() const {
+ return _fullNs;
+ }
+ const BSONObj getQuery() const {
+ return _query;
+ }
+
+ long long getLimit() const {
+ return _limit.value_or(0);
+ }
+ void setLimit(long long limit) {
+ _limit = limit;
+ }
+
+ long long getSkip() const {
+ return _skip.value_or(0);
+ }
+ void setSkip(long long skip) {
+ _skip = skip;
+ }
+
+ const BSONObj getHint() const {
+ return _hint.value_or(BSONObj());
+ }
+ void setHint(BSONObj hint);
- const BSONObj getHint() const { return _hint.value_or(BSONObj()); }
- void setHint(BSONObj hint);
-
- /**
- * Constructs a BSON representation of this request, which can be used for sending it in
- * commands.
- */
- BSONObj toBSON() const;
+ /**
+ * Constructs a BSON representation of this request, which can be used for sending it in
+ * commands.
+ */
+ BSONObj toBSON() const;
- /**
- * Construct a CountRequest from the command specification and db name.
- */
- static StatusWith<CountRequest> parseFromBSON(const std::string& dbname,
- const BSONObj& cmdObj);
+ /**
+ * Construct a CountRequest from the command specification and db name.
+ */
+ static StatusWith<CountRequest> parseFromBSON(const std::string& dbname, const BSONObj& cmdObj);
- private:
- // Namespace to operate on (e.g. "foo.bar").
- const std::string _fullNs;
+private:
+ // Namespace to operate on (e.g. "foo.bar").
+ const std::string _fullNs;
- // A predicate describing the set of documents to count.
- const BSONObj _query;
+ // A predicate describing the set of documents to count.
+ const BSONObj _query;
- // Optional. An integer limiting the number of documents to count.
- boost::optional<long long> _limit;
+ // Optional. An integer limiting the number of documents to count.
+ boost::optional<long long> _limit;
- // Optional. An integer indicating to not include the first n documents in the count.
- boost::optional<long long> _skip;
+ // Optional. An integer indicating to not include the first n documents in the count.
+ boost::optional<long long> _skip;
- // Optional. Indicates to the query planner that it should generate a count plan using a
- // particular index.
- boost::optional<BSONObj> _hint;
- };
+ // Optional. Indicates to the query planner that it should generate a count plan using a
+ // particular index.
+ boost::optional<BSONObj> _hint;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/count_request_test.cpp b/src/mongo/db/query/count_request_test.cpp
index 4f0546b4180..da68a2ce79f 100644
--- a/src/mongo/db/query/count_request_test.cpp
+++ b/src/mongo/db/query/count_request_test.cpp
@@ -36,98 +36,98 @@
namespace mongo {
namespace {
- TEST(CountRequest, ParseDefaults) {
- const auto countRequestStatus =
- CountRequest::parseFromBSON("TestDB",
- BSON("count" << "TestColl" <<
- "query" << BSON("a" << BSON("$lte" << 10))));
-
- ASSERT_OK(countRequestStatus.getStatus());
-
- const CountRequest& countRequest = countRequestStatus.getValue();
-
- ASSERT_EQUALS(countRequest.getNs(), "TestDB.TestColl");
- ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$lte' : 10 } }"));
-
- // Defaults
- ASSERT_EQUALS(countRequest.getLimit(), 0);
- ASSERT_EQUALS(countRequest.getSkip(), 0);
- ASSERT(countRequest.getHint().isEmpty());
- }
-
- TEST(CountRequest, ParseComplete) {
- const auto countRequestStatus =
- CountRequest::parseFromBSON("TestDB",
- BSON("count" << "TestColl" <<
- "query" << BSON("a" << BSON("$gte" << 11)) <<
- "limit" << 100 <<
- "skip" << 1000 <<
- "hint" << BSON("b" << 5)));
-
- ASSERT_OK(countRequestStatus.getStatus());
-
- const CountRequest& countRequest = countRequestStatus.getValue();
-
- ASSERT_EQUALS(countRequest.getNs(), "TestDB.TestColl");
- ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
- ASSERT_EQUALS(countRequest.getLimit(), 100);
- ASSERT_EQUALS(countRequest.getSkip(), 1000);
- ASSERT_EQUALS(countRequest.getHint(), fromjson("{ b : 5 }"));
- }
-
- TEST(CountRequest, ParseNegativeLimit) {
- const auto countRequestStatus =
- CountRequest::parseFromBSON("TestDB",
- BSON("count" << "TestColl" <<
- "query" << BSON("a" << BSON("$gte" << 11)) <<
- "limit" << -100 <<
- "skip" << 1000 <<
- "hint" << BSON("b" << 5)));
-
- ASSERT_OK(countRequestStatus.getStatus());
-
- const CountRequest& countRequest = countRequestStatus.getValue();
-
- ASSERT_EQUALS(countRequest.getNs(), "TestDB.TestColl");
- ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
- ASSERT_EQUALS(countRequest.getLimit(), 100);
- ASSERT_EQUALS(countRequest.getSkip(), 1000);
- ASSERT_EQUALS(countRequest.getHint(), fromjson("{ b : 5 }"));
- }
-
- TEST(CountRequest, FailParseMissingNS) {
- const auto countRequestStatus =
- CountRequest::parseFromBSON("TestDB",
- BSON("query" << BSON("a" << BSON("$gte" << 11))));
-
- ASSERT_EQUALS(countRequestStatus.getStatus(), ErrorCodes::BadValue);
- }
-
- TEST(CountRequest, FailParseBadSkipValue) {
- const auto countRequestStatus =
- CountRequest::parseFromBSON("TestDB",
- BSON("count" << "TestColl" <<
- "query" << BSON("a" << BSON("$gte" << 11)) <<
- "skip" << -1000));
-
- ASSERT_EQUALS(countRequestStatus.getStatus(), ErrorCodes::BadValue);
- }
-
- TEST(CountRequest, ToBSON) {
- CountRequest countRequest("TestDB.TestColl", BSON("a" << BSON("$gte" << 11)));
- countRequest.setLimit(100);
- countRequest.setSkip(1000);
- countRequest.setHint(BSON("b" << 5));
-
- BSONObj actualObj = countRequest.toBSON();
- BSONObj expectedObj(fromjson("{ count : 'TestDB.TestColl',"
- " query : { a : { '$gte' : 11 } },"
- " limit : 100,"
- " skip : 1000,"
- " hint : { b : 5 } }"));
-
- ASSERT_EQUALS(actualObj, expectedObj);
- }
-
-} // namespace
-} // namespace mongo
+TEST(CountRequest, ParseDefaults) {
+ const auto countRequestStatus =
+ CountRequest::parseFromBSON("TestDB",
+ BSON("count"
+ << "TestColl"
+ << "query" << BSON("a" << BSON("$lte" << 10))));
+
+ ASSERT_OK(countRequestStatus.getStatus());
+
+ const CountRequest& countRequest = countRequestStatus.getValue();
+
+ ASSERT_EQUALS(countRequest.getNs(), "TestDB.TestColl");
+ ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$lte' : 10 } }"));
+
+ // Defaults
+ ASSERT_EQUALS(countRequest.getLimit(), 0);
+ ASSERT_EQUALS(countRequest.getSkip(), 0);
+ ASSERT(countRequest.getHint().isEmpty());
+}
+
+TEST(CountRequest, ParseComplete) {
+ const auto countRequestStatus =
+ CountRequest::parseFromBSON("TestDB",
+ BSON("count"
+ << "TestColl"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "limit"
+ << 100 << "skip" << 1000 << "hint" << BSON("b" << 5)));
+
+ ASSERT_OK(countRequestStatus.getStatus());
+
+ const CountRequest& countRequest = countRequestStatus.getValue();
+
+ ASSERT_EQUALS(countRequest.getNs(), "TestDB.TestColl");
+ ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
+ ASSERT_EQUALS(countRequest.getLimit(), 100);
+ ASSERT_EQUALS(countRequest.getSkip(), 1000);
+ ASSERT_EQUALS(countRequest.getHint(), fromjson("{ b : 5 }"));
+}
+
+TEST(CountRequest, ParseNegativeLimit) {
+ const auto countRequestStatus =
+ CountRequest::parseFromBSON("TestDB",
+ BSON("count"
+ << "TestColl"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "limit"
+ << -100 << "skip" << 1000 << "hint" << BSON("b" << 5)));
+
+ ASSERT_OK(countRequestStatus.getStatus());
+
+ const CountRequest& countRequest = countRequestStatus.getValue();
+
+ ASSERT_EQUALS(countRequest.getNs(), "TestDB.TestColl");
+ ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
+ ASSERT_EQUALS(countRequest.getLimit(), 100);
+ ASSERT_EQUALS(countRequest.getSkip(), 1000);
+ ASSERT_EQUALS(countRequest.getHint(), fromjson("{ b : 5 }"));
+}
+
+TEST(CountRequest, FailParseMissingNS) {
+ const auto countRequestStatus =
+ CountRequest::parseFromBSON("TestDB", BSON("query" << BSON("a" << BSON("$gte" << 11))));
+
+ ASSERT_EQUALS(countRequestStatus.getStatus(), ErrorCodes::BadValue);
+}
+
+TEST(CountRequest, FailParseBadSkipValue) {
+ const auto countRequestStatus =
+ CountRequest::parseFromBSON("TestDB",
+ BSON("count"
+ << "TestColl"
+ << "query" << BSON("a" << BSON("$gte" << 11)) << "skip"
+ << -1000));
+
+ ASSERT_EQUALS(countRequestStatus.getStatus(), ErrorCodes::BadValue);
+}
+
+TEST(CountRequest, ToBSON) {
+ CountRequest countRequest("TestDB.TestColl", BSON("a" << BSON("$gte" << 11)));
+ countRequest.setLimit(100);
+ countRequest.setSkip(1000);
+ countRequest.setHint(BSON("b" << 5));
+
+ BSONObj actualObj = countRequest.toBSON();
+ BSONObj expectedObj(fromjson(
+ "{ count : 'TestDB.TestColl',"
+ " query : { a : { '$gte' : 11 } },"
+ " limit : 100,"
+ " skip : 1000,"
+ " hint : { b : 5 } }"));
+
+ ASSERT_EQUALS(actualObj, expectedObj);
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/cursor_responses.cpp b/src/mongo/db/query/cursor_responses.cpp
index 1bd6be298e2..9165c498e37 100644
--- a/src/mongo/db/query/cursor_responses.cpp
+++ b/src/mongo/db/query/cursor_responses.cpp
@@ -34,26 +34,26 @@
namespace mongo {
- void appendCursorResponseObject(long long cursorId,
- StringData cursorNamespace,
- BSONArray firstBatch,
- BSONObjBuilder* builder) {
- BSONObjBuilder cursorObj(builder->subobjStart("cursor"));
- cursorObj.append("id", cursorId);
- cursorObj.append("ns", cursorNamespace);
- cursorObj.append("firstBatch", firstBatch);
- cursorObj.done();
- }
+void appendCursorResponseObject(long long cursorId,
+ StringData cursorNamespace,
+ BSONArray firstBatch,
+ BSONObjBuilder* builder) {
+ BSONObjBuilder cursorObj(builder->subobjStart("cursor"));
+ cursorObj.append("id", cursorId);
+ cursorObj.append("ns", cursorNamespace);
+ cursorObj.append("firstBatch", firstBatch);
+ cursorObj.done();
+}
- void appendGetMoreResponseObject(long long cursorId,
- StringData cursorNamespace,
- BSONArray nextBatch,
- BSONObjBuilder* builder) {
- BSONObjBuilder cursorObj(builder->subobjStart("cursor"));
- cursorObj.append("id", cursorId);
- cursorObj.append("ns", cursorNamespace);
- cursorObj.append("nextBatch", nextBatch);
- cursorObj.done();
- }
+void appendGetMoreResponseObject(long long cursorId,
+ StringData cursorNamespace,
+ BSONArray nextBatch,
+ BSONObjBuilder* builder) {
+ BSONObjBuilder cursorObj(builder->subobjStart("cursor"));
+ cursorObj.append("id", cursorId);
+ cursorObj.append("ns", cursorNamespace);
+ cursorObj.append("nextBatch", nextBatch);
+ cursorObj.done();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/cursor_responses.h b/src/mongo/db/query/cursor_responses.h
index 2c9fed3b610..06f2f268e63 100644
--- a/src/mongo/db/query/cursor_responses.h
+++ b/src/mongo/db/query/cursor_responses.h
@@ -30,36 +30,36 @@
namespace mongo {
- class BSONObjBuilder;
- class StringData;
- struct BSONArray;
+class BSONObjBuilder;
+class StringData;
+struct BSONArray;
- /**
- * Builds a cursor response object from the provided cursor identifiers and "firstBatch",
- * and appends the response object to the provided builder under the field name "cursor".
- * If the node is a member of a replSet, also appends the current term, primary, and
- * lastOp information.
- *
- * The response object has the following format:
- * { id: <NumberLong>, ns: <String>, firstBatch: <Array> }.
- */
- void appendCursorResponseObject(long long cursorId,
- StringData cursorNamespace,
- BSONArray firstBatch,
- BSONObjBuilder* builder);
+/**
+ * Builds a cursor response object from the provided cursor identifiers and "firstBatch",
+ * and appends the response object to the provided builder under the field name "cursor".
+ * If the node is a member of a replSet, also appends the current term, primary, and
+ * lastOp information.
+ *
+ * The response object has the following format:
+ * { id: <NumberLong>, ns: <String>, firstBatch: <Array> }.
+ */
+void appendCursorResponseObject(long long cursorId,
+ StringData cursorNamespace,
+ BSONArray firstBatch,
+ BSONObjBuilder* builder);
- /**
- * Builds a getMore response object from the provided cursor identifiers and "nextBatch",
- * and appends the response object to the provided builder under the field name "cursor".
- * If the node is a member of a replSet, also appends the current term, primary, and
- * lastOp information.
- *
- * The response object has the following format:
- * { id: <NumberLong>, ns: <String>, nextBatch: <Array> }.
- */
- void appendGetMoreResponseObject(long long cursorId,
- StringData cursorNamespace,
- BSONArray nextBatch,
- BSONObjBuilder* builder);
+/**
+ * Builds a getMore response object from the provided cursor identifiers and "nextBatch",
+ * and appends the response object to the provided builder under the field name "cursor".
+ * If the node is a member of a replSet, also appends the current term, primary, and
+ * lastOp information.
+ *
+ * The response object has the following format:
+ * { id: <NumberLong>, ns: <String>, nextBatch: <Array> }.
+ */
+void appendGetMoreResponseObject(long long cursorId,
+ StringData cursorNamespace,
+ BSONArray nextBatch,
+ BSONObjBuilder* builder);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 709fd07b28b..748e8af3b63 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -46,696 +46,659 @@
namespace {
- using namespace mongo;
- using std::string;
- using std::unique_ptr;
- using std::vector;
-
- /**
- * Traverse the tree rooted at 'root', and add all tree nodes into the list 'flattened'.
- */
- void flattenStatsTree(const PlanStageStats* root, vector<const PlanStageStats*>* flattened) {
- flattened->push_back(root);
- for (size_t i = 0; i < root->children.size(); ++i) {
- flattenStatsTree(root->children[i], flattened);
- }
+using namespace mongo;
+using std::string;
+using std::unique_ptr;
+using std::vector;
+
+/**
+ * Traverse the tree rooted at 'root', and add all tree nodes into the list 'flattened'.
+ */
+void flattenStatsTree(const PlanStageStats* root, vector<const PlanStageStats*>* flattened) {
+ flattened->push_back(root);
+ for (size_t i = 0; i < root->children.size(); ++i) {
+ flattenStatsTree(root->children[i], flattened);
}
+}
- /**
- * Traverse the tree rooted at 'root', and add all nodes into the list 'flattened'.
- */
- void flattenExecTree(const PlanStage* root, vector<const PlanStage*>* flattened) {
- flattened->push_back(root);
- vector<PlanStage*> children = root->getChildren();
- for (size_t i = 0; i < children.size(); ++i) {
- flattenExecTree(children[i], flattened);
- }
+/**
+ * Traverse the tree rooted at 'root', and add all nodes into the list 'flattened'.
+ */
+void flattenExecTree(const PlanStage* root, vector<const PlanStage*>* flattened) {
+ flattened->push_back(root);
+ vector<PlanStage*> children = root->getChildren();
+ for (size_t i = 0; i < children.size(); ++i) {
+ flattenExecTree(children[i], flattened);
}
+}
- /**
- * Get a pointer to the MultiPlanStage inside the stage tree rooted at 'root'.
- * Returns NULL if there is no MPS.
- */
- MultiPlanStage* getMultiPlanStage(PlanStage* root) {
- if (root->stageType() == STAGE_MULTI_PLAN) {
- MultiPlanStage* mps = static_cast<MultiPlanStage*>(root);
+/**
+ * Get a pointer to the MultiPlanStage inside the stage tree rooted at 'root'.
+ * Returns NULL if there is no MPS.
+ */
+MultiPlanStage* getMultiPlanStage(PlanStage* root) {
+ if (root->stageType() == STAGE_MULTI_PLAN) {
+ MultiPlanStage* mps = static_cast<MultiPlanStage*>(root);
+ return mps;
+ }
+
+ vector<PlanStage*> children = root->getChildren();
+ for (size_t i = 0; i < children.size(); i++) {
+ MultiPlanStage* mps = getMultiPlanStage(children[i]);
+ if (mps != NULL) {
return mps;
}
+ }
- vector<PlanStage*> children = root->getChildren();
- for (size_t i = 0; i < children.size(); i++) {
- MultiPlanStage* mps = getMultiPlanStage(children[i]);
- if (mps != NULL) {
- return mps;
- }
- }
+ return NULL;
+}
- return NULL;
+/**
+ * Given the SpecificStats object for a stage and the type of the stage, returns the
+ * number of index keys examined by the stage.
+ *
+ * This is used for getting the total number of keys examined by a plan. We need
+ * to collect a 'totalKeysExamined' metric for a regular explain (in which case this
+ * gets called from Explain::generateExecStats()) or for the slow query log / profiler
+ * (in which case this gets called from Explain::getSummaryStats()).
+ */
+size_t getKeysExamined(StageType type, const SpecificStats* specific) {
+ if (STAGE_IXSCAN == type) {
+ const IndexScanStats* spec = static_cast<const IndexScanStats*>(specific);
+ return spec->keysExamined;
+ } else if (STAGE_IDHACK == type) {
+ const IDHackStats* spec = static_cast<const IDHackStats*>(specific);
+ return spec->keysExamined;
+ } else if (STAGE_TEXT == type) {
+ const TextStats* spec = static_cast<const TextStats*>(specific);
+ return spec->keysExamined;
+ } else if (STAGE_COUNT_SCAN == type) {
+ const CountScanStats* spec = static_cast<const CountScanStats*>(specific);
+ return spec->keysExamined;
+ } else if (STAGE_DISTINCT_SCAN == type) {
+ const DistinctScanStats* spec = static_cast<const DistinctScanStats*>(specific);
+ return spec->keysExamined;
}
- /**
- * Given the SpecificStats object for a stage and the type of the stage, returns the
- * number of index keys examined by the stage.
- *
- * This is used for getting the total number of keys examined by a plan. We need
- * to collect a 'totalKeysExamined' metric for a regular explain (in which case this
- * gets called from Explain::generateExecStats()) or for the slow query log / profiler
- * (in which case this gets called from Explain::getSummaryStats()).
- */
- size_t getKeysExamined(StageType type, const SpecificStats* specific) {
- if (STAGE_IXSCAN == type) {
- const IndexScanStats* spec = static_cast<const IndexScanStats*>(specific);
- return spec->keysExamined;
- }
- else if (STAGE_IDHACK == type) {
- const IDHackStats* spec = static_cast<const IDHackStats*>(specific);
- return spec->keysExamined;
- }
- else if (STAGE_TEXT == type) {
- const TextStats* spec = static_cast<const TextStats*>(specific);
- return spec->keysExamined;
- }
- else if (STAGE_COUNT_SCAN == type) {
- const CountScanStats* spec = static_cast<const CountScanStats*>(specific);
- return spec->keysExamined;
- }
- else if (STAGE_DISTINCT_SCAN == type) {
- const DistinctScanStats* spec = static_cast<const DistinctScanStats*>(specific);
- return spec->keysExamined;
- }
-
- return 0;
- }
-
- /**
- * Given the SpecificStats object for a stage and the type of the stage, returns the
- * number of documents examined by the stage.
- *
- * This is used for getting the total number of documents examined by a plan. We need
- * to collect a 'totalDocsExamined' metric for a regular explain (in which case this
- * gets called from Explain::generateExecStats()) or for the slow query log / profiler
- * (in which case this gets called from Explain::getSummaryStats()).
- */
- size_t getDocsExamined(StageType type, const SpecificStats* specific) {
- if (STAGE_IDHACK == type) {
- const IDHackStats* spec = static_cast<const IDHackStats*>(specific);
- return spec->docsExamined;
- }
- else if (STAGE_TEXT == type) {
- const TextStats* spec = static_cast<const TextStats*>(specific);
- return spec->fetches;
- }
- else if (STAGE_FETCH == type) {
- const FetchStats* spec = static_cast<const FetchStats*>(specific);
- return spec->docsExamined;
- }
- else if (STAGE_COLLSCAN == type) {
- const CollectionScanStats* spec = static_cast<const CollectionScanStats*>(specific);
- return spec->docsTested;
- }
+ return 0;
+}
- return 0;
+/**
+ * Given the SpecificStats object for a stage and the type of the stage, returns the
+ * number of documents examined by the stage.
+ *
+ * This is used for getting the total number of documents examined by a plan. We need
+ * to collect a 'totalDocsExamined' metric for a regular explain (in which case this
+ * gets called from Explain::generateExecStats()) or for the slow query log / profiler
+ * (in which case this gets called from Explain::getSummaryStats()).
+ */
+size_t getDocsExamined(StageType type, const SpecificStats* specific) {
+ if (STAGE_IDHACK == type) {
+ const IDHackStats* spec = static_cast<const IDHackStats*>(specific);
+ return spec->docsExamined;
+ } else if (STAGE_TEXT == type) {
+ const TextStats* spec = static_cast<const TextStats*>(specific);
+ return spec->fetches;
+ } else if (STAGE_FETCH == type) {
+ const FetchStats* spec = static_cast<const FetchStats*>(specific);
+ return spec->docsExamined;
+ } else if (STAGE_COLLSCAN == type) {
+ const CollectionScanStats* spec = static_cast<const CollectionScanStats*>(specific);
+ return spec->docsTested;
}
- /**
- * Adds to the plan summary string being built by 'ss' for the execution stage 'stage'.
- */
- void addStageSummaryStr(const PlanStage* stage, mongoutils::str::stream& ss) {
- // First add the stage type string.
- const CommonStats* common = stage->getCommonStats();
- ss << common->stageTypeStr;
-
- // Some leaf nodes also provide info about the index they used.
- const SpecificStats* specific = stage->getSpecificStats();
- if (STAGE_COUNT_SCAN == stage->stageType()) {
- const CountScanStats* spec = static_cast<const CountScanStats*>(specific);
- ss << " " << spec->keyPattern;
- }
- else if (STAGE_DISTINCT_SCAN == stage->stageType()) {
- const DistinctScanStats* spec = static_cast<const DistinctScanStats*>(specific);
- ss << " " << spec->keyPattern;
- }
- else if (STAGE_GEO_NEAR_2D == stage->stageType()) {
- const NearStats* spec = static_cast<const NearStats*>(specific);
- ss << " " << spec->keyPattern;
- }
- else if (STAGE_GEO_NEAR_2DSPHERE == stage->stageType()) {
- const NearStats* spec = static_cast<const NearStats*>(specific);
- ss << " " << spec->keyPattern;
- }
- else if (STAGE_IXSCAN == stage->stageType()) {
- const IndexScanStats* spec = static_cast<const IndexScanStats*>(specific);
- ss << " " << spec->keyPattern;
- }
- else if (STAGE_TEXT == stage->stageType()) {
- const TextStats* spec = static_cast<const TextStats*>(specific);
- ss << " " << spec->indexPrefix;
- }
+ return 0;
+}
+
+/**
+ * Adds to the plan summary string being built by 'ss' for the execution stage 'stage'.
+ */
+void addStageSummaryStr(const PlanStage* stage, mongoutils::str::stream& ss) {
+ // First add the stage type string.
+ const CommonStats* common = stage->getCommonStats();
+ ss << common->stageTypeStr;
+
+ // Some leaf nodes also provide info about the index they used.
+ const SpecificStats* specific = stage->getSpecificStats();
+ if (STAGE_COUNT_SCAN == stage->stageType()) {
+ const CountScanStats* spec = static_cast<const CountScanStats*>(specific);
+ ss << " " << spec->keyPattern;
+ } else if (STAGE_DISTINCT_SCAN == stage->stageType()) {
+ const DistinctScanStats* spec = static_cast<const DistinctScanStats*>(specific);
+ ss << " " << spec->keyPattern;
+ } else if (STAGE_GEO_NEAR_2D == stage->stageType()) {
+ const NearStats* spec = static_cast<const NearStats*>(specific);
+ ss << " " << spec->keyPattern;
+ } else if (STAGE_GEO_NEAR_2DSPHERE == stage->stageType()) {
+ const NearStats* spec = static_cast<const NearStats*>(specific);
+ ss << " " << spec->keyPattern;
+ } else if (STAGE_IXSCAN == stage->stageType()) {
+ const IndexScanStats* spec = static_cast<const IndexScanStats*>(specific);
+ ss << " " << spec->keyPattern;
+ } else if (STAGE_TEXT == stage->stageType()) {
+ const TextStats* spec = static_cast<const TextStats*>(specific);
+ ss << " " << spec->indexPrefix;
}
+}
-} // namespace
+} // namespace
namespace mongo {
- using mongoutils::str::stream;
-
- // static
- void Explain::statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* bob,
- BSONObjBuilder* topLevelBob) {
- invariant(bob);
- invariant(topLevelBob);
-
- // Stop as soon as the BSON object we're building exceeds 10 MB.
- static const int kMaxStatsBSONSize = 10 * 1024 * 1024;
- if (topLevelBob->len() > kMaxStatsBSONSize) {
- bob->append("warning", "stats tree exceeded 10 MB");
- return;
- }
-
- // Stage name.
- bob->append("stage", stats.common.stageTypeStr);
+using mongoutils::str::stream;
+
+// static
+void Explain::statsToBSON(const PlanStageStats& stats,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* bob,
+ BSONObjBuilder* topLevelBob) {
+ invariant(bob);
+ invariant(topLevelBob);
+
+ // Stop as soon as the BSON object we're building exceeds 10 MB.
+ static const int kMaxStatsBSONSize = 10 * 1024 * 1024;
+ if (topLevelBob->len() > kMaxStatsBSONSize) {
+ bob->append("warning", "stats tree exceeded 10 MB");
+ return;
+ }
- // Display the BSON representation of the filter, if there is one.
- if (!stats.common.filter.isEmpty()) {
- bob->append("filter", stats.common.filter);
- }
+ // Stage name.
+ bob->append("stage", stats.common.stageTypeStr);
- // Some top-level exec stats get pulled out of the root stage.
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("nReturned", stats.common.advanced);
- bob->appendNumber("executionTimeMillisEstimate", stats.common.executionTimeMillis);
- bob->appendNumber("works", stats.common.works);
- bob->appendNumber("advanced", stats.common.advanced);
- bob->appendNumber("needTime", stats.common.needTime);
- bob->appendNumber("needYield", stats.common.needYield);
- bob->appendNumber("saveState", stats.common.yields);
- bob->appendNumber("restoreState", stats.common.unyields);
- bob->appendNumber("isEOF", stats.common.isEOF);
- bob->appendNumber("invalidates", stats.common.invalidates);
- }
+ // Display the BSON representation of the filter, if there is one.
+ if (!stats.common.filter.isEmpty()) {
+ bob->append("filter", stats.common.filter);
+ }
- // Stage-specific stats
- if (STAGE_AND_HASH == stats.stageType) {
- AndHashStats* spec = static_cast<AndHashStats*>(stats.specific.get());
+ // Some top-level exec stats get pulled out of the root stage.
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("nReturned", stats.common.advanced);
+ bob->appendNumber("executionTimeMillisEstimate", stats.common.executionTimeMillis);
+ bob->appendNumber("works", stats.common.works);
+ bob->appendNumber("advanced", stats.common.advanced);
+ bob->appendNumber("needTime", stats.common.needTime);
+ bob->appendNumber("needYield", stats.common.needYield);
+ bob->appendNumber("saveState", stats.common.yields);
+ bob->appendNumber("restoreState", stats.common.unyields);
+ bob->appendNumber("isEOF", stats.common.isEOF);
+ bob->appendNumber("invalidates", stats.common.invalidates);
+ }
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("memUsage", spec->memUsage);
- bob->appendNumber("memLimit", spec->memLimit);
+ // Stage-specific stats
+ if (STAGE_AND_HASH == stats.stageType) {
+ AndHashStats* spec = static_cast<AndHashStats*>(stats.specific.get());
- bob->appendNumber("flaggedButPassed", spec->flaggedButPassed);
- bob->appendNumber("flaggedInProgress", spec->flaggedInProgress);
- for (size_t i = 0; i < spec->mapAfterChild.size(); ++i) {
- bob->appendNumber(string(stream() << "mapAfterChild_" << i),
- spec->mapAfterChild[i]);
- }
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("memUsage", spec->memUsage);
+ bob->appendNumber("memLimit", spec->memLimit);
+
+ bob->appendNumber("flaggedButPassed", spec->flaggedButPassed);
+ bob->appendNumber("flaggedInProgress", spec->flaggedInProgress);
+ for (size_t i = 0; i < spec->mapAfterChild.size(); ++i) {
+ bob->appendNumber(string(stream() << "mapAfterChild_" << i),
+ spec->mapAfterChild[i]);
}
}
- else if (STAGE_AND_SORTED == stats.stageType) {
- AndSortedStats* spec = static_cast<AndSortedStats*>(stats.specific.get());
-
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("flagged", spec->flagged);
- for (size_t i = 0; i < spec->failedAnd.size(); ++i) {
- bob->appendNumber(string(stream() << "failedAnd_" << i),
- spec->failedAnd[i]);
- }
+ } else if (STAGE_AND_SORTED == stats.stageType) {
+ AndSortedStats* spec = static_cast<AndSortedStats*>(stats.specific.get());
+
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("flagged", spec->flagged);
+ for (size_t i = 0; i < spec->failedAnd.size(); ++i) {
+ bob->appendNumber(string(stream() << "failedAnd_" << i), spec->failedAnd[i]);
}
}
- else if (STAGE_COLLSCAN == stats.stageType) {
- CollectionScanStats* spec = static_cast<CollectionScanStats*>(stats.specific.get());
- bob->append("direction", spec->direction > 0 ? "forward" : "backward");
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("docsExamined", spec->docsTested);
- }
+ } else if (STAGE_COLLSCAN == stats.stageType) {
+ CollectionScanStats* spec = static_cast<CollectionScanStats*>(stats.specific.get());
+ bob->append("direction", spec->direction > 0 ? "forward" : "backward");
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("docsExamined", spec->docsTested);
}
- else if (STAGE_COUNT == stats.stageType) {
- CountStats* spec = static_cast<CountStats*>(stats.specific.get());
+ } else if (STAGE_COUNT == stats.stageType) {
+ CountStats* spec = static_cast<CountStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("nCounted", spec->nCounted);
- bob->appendNumber("nSkipped", spec->nSkipped);
- }
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("nCounted", spec->nCounted);
+ bob->appendNumber("nSkipped", spec->nSkipped);
}
- else if (STAGE_COUNT_SCAN == stats.stageType) {
- CountScanStats* spec = static_cast<CountScanStats*>(stats.specific.get());
-
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("keysExamined", spec->keysExamined);
- }
+ } else if (STAGE_COUNT_SCAN == stats.stageType) {
+ CountScanStats* spec = static_cast<CountScanStats*>(stats.specific.get());
- bob->append("keyPattern", spec->keyPattern);
- bob->append("indexName", spec->indexName);
- bob->appendBool("isMultiKey", spec->isMultiKey);
- bob->appendBool("isUnique", spec->isUnique);
- bob->appendBool("isSparse", spec->isSparse);
- bob->appendBool("isPartial", spec->isPartial);
- bob->append("indexVersion", spec->indexVersion);
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("keysExamined", spec->keysExamined);
}
- else if (STAGE_DELETE == stats.stageType) {
- DeleteStats* spec = static_cast<DeleteStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("nWouldDelete", spec->docsDeleted);
- bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
- }
- }
- else if (STAGE_FETCH == stats.stageType) {
- FetchStats* spec = static_cast<FetchStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("docsExamined", spec->docsExamined);
- bob->appendNumber("alreadyHasObj", spec->alreadyHasObj);
- }
- }
- else if (STAGE_GEO_NEAR_2D == stats.stageType
- || STAGE_GEO_NEAR_2DSPHERE == stats.stageType) {
- NearStats* spec = static_cast<NearStats*>(stats.specific.get());
-
- bob->append("keyPattern", spec->keyPattern);
- bob->append("indexName", spec->indexName);
-
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- BSONArrayBuilder intervalsBob(bob->subarrayStart("searchIntervals"));
- for (vector<IntervalStats>::const_iterator it = spec->intervalStats.begin();
- it != spec->intervalStats.end(); ++it) {
- BSONObjBuilder intervalBob(intervalsBob.subobjStart());
- intervalBob.append("minDistance", it->minDistanceAllowed);
- intervalBob.append("maxDistance", it->maxDistanceAllowed);
- intervalBob.append("maxInclusive", it->inclusiveMaxDistanceAllowed);
- }
- intervalsBob.doneFast();
- }
- }
- else if (STAGE_GROUP == stats.stageType) {
- GroupStats* spec = static_cast<GroupStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("nGroups", spec->nGroups);
- }
+ bob->append("keyPattern", spec->keyPattern);
+ bob->append("indexName", spec->indexName);
+ bob->appendBool("isMultiKey", spec->isMultiKey);
+ bob->appendBool("isUnique", spec->isUnique);
+ bob->appendBool("isSparse", spec->isSparse);
+ bob->appendBool("isPartial", spec->isPartial);
+ bob->append("indexVersion", spec->indexVersion);
+ } else if (STAGE_DELETE == stats.stageType) {
+ DeleteStats* spec = static_cast<DeleteStats*>(stats.specific.get());
+
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("nWouldDelete", spec->docsDeleted);
+ bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
}
- else if (STAGE_IDHACK == stats.stageType) {
- IDHackStats* spec = static_cast<IDHackStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("keysExamined", spec->keysExamined);
- bob->appendNumber("docsExamined", spec->docsExamined);
- }
+ } else if (STAGE_FETCH == stats.stageType) {
+ FetchStats* spec = static_cast<FetchStats*>(stats.specific.get());
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("docsExamined", spec->docsExamined);
+ bob->appendNumber("alreadyHasObj", spec->alreadyHasObj);
}
- else if (STAGE_IXSCAN == stats.stageType) {
- IndexScanStats* spec = static_cast<IndexScanStats*>(stats.specific.get());
-
- bob->append("keyPattern", spec->keyPattern);
- bob->append("indexName", spec->indexName);
- bob->appendBool("isMultiKey", spec->isMultiKey);
- bob->appendBool("isUnique", spec->isUnique);
- bob->appendBool("isSparse", spec->isSparse);
- bob->appendBool("isPartial", spec->isPartial);
- bob->append("indexVersion", spec->indexVersion);
- bob->append("direction", spec->direction > 0 ? "forward" : "backward");
-
- if ((topLevelBob->len() + spec->indexBounds.objsize()) > kMaxStatsBSONSize) {
- bob->append("warning", "index bounds omitted due to BSON size limit");
- }
- else {
- bob->append("indexBounds", spec->indexBounds);
- }
+ } else if (STAGE_GEO_NEAR_2D == stats.stageType || STAGE_GEO_NEAR_2DSPHERE == stats.stageType) {
+ NearStats* spec = static_cast<NearStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("keysExamined", spec->keysExamined);
- bob->appendNumber("dupsTested", spec->dupsTested);
- bob->appendNumber("dupsDropped", spec->dupsDropped);
- bob->appendNumber("seenInvalidated", spec->seenInvalidated);
- }
- }
- else if (STAGE_OR == stats.stageType) {
- OrStats* spec = static_cast<OrStats*>(stats.specific.get());
+ bob->append("keyPattern", spec->keyPattern);
+ bob->append("indexName", spec->indexName);
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("dupsTested", spec->dupsTested);
- bob->appendNumber("dupsDropped", spec->dupsDropped);
- bob->appendNumber("locsForgotten", spec->locsForgotten);
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ BSONArrayBuilder intervalsBob(bob->subarrayStart("searchIntervals"));
+ for (vector<IntervalStats>::const_iterator it = spec->intervalStats.begin();
+ it != spec->intervalStats.end();
+ ++it) {
+ BSONObjBuilder intervalBob(intervalsBob.subobjStart());
+ intervalBob.append("minDistance", it->minDistanceAllowed);
+ intervalBob.append("maxDistance", it->maxDistanceAllowed);
+ intervalBob.append("maxInclusive", it->inclusiveMaxDistanceAllowed);
}
+ intervalsBob.doneFast();
}
- else if (STAGE_LIMIT == stats.stageType) {
- LimitStats* spec = static_cast<LimitStats*>(stats.specific.get());
- bob->appendNumber("limitAmount", spec->limit);
+ } else if (STAGE_GROUP == stats.stageType) {
+ GroupStats* spec = static_cast<GroupStats*>(stats.specific.get());
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("nGroups", spec->nGroups);
}
- else if (STAGE_PROJECTION == stats.stageType) {
- ProjectionStats* spec = static_cast<ProjectionStats*>(stats.specific.get());
- bob->append("transformBy", spec->projObj);
+ } else if (STAGE_IDHACK == stats.stageType) {
+ IDHackStats* spec = static_cast<IDHackStats*>(stats.specific.get());
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("keysExamined", spec->keysExamined);
+ bob->appendNumber("docsExamined", spec->docsExamined);
}
- else if (STAGE_SHARDING_FILTER == stats.stageType) {
- ShardingFilterStats* spec = static_cast<ShardingFilterStats*>(stats.specific.get());
+ } else if (STAGE_IXSCAN == stats.stageType) {
+ IndexScanStats* spec = static_cast<IndexScanStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("chunkSkips", spec->chunkSkips);
- }
+ bob->append("keyPattern", spec->keyPattern);
+ bob->append("indexName", spec->indexName);
+ bob->appendBool("isMultiKey", spec->isMultiKey);
+ bob->appendBool("isUnique", spec->isUnique);
+ bob->appendBool("isSparse", spec->isSparse);
+ bob->appendBool("isPartial", spec->isPartial);
+ bob->append("indexVersion", spec->indexVersion);
+ bob->append("direction", spec->direction > 0 ? "forward" : "backward");
+
+ if ((topLevelBob->len() + spec->indexBounds.objsize()) > kMaxStatsBSONSize) {
+ bob->append("warning", "index bounds omitted due to BSON size limit");
+ } else {
+ bob->append("indexBounds", spec->indexBounds);
}
- else if (STAGE_SKIP == stats.stageType) {
- SkipStats* spec = static_cast<SkipStats*>(stats.specific.get());
- bob->appendNumber("skipAmount", spec->skip);
+
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("keysExamined", spec->keysExamined);
+ bob->appendNumber("dupsTested", spec->dupsTested);
+ bob->appendNumber("dupsDropped", spec->dupsDropped);
+ bob->appendNumber("seenInvalidated", spec->seenInvalidated);
}
- else if (STAGE_SORT == stats.stageType) {
- SortStats* spec = static_cast<SortStats*>(stats.specific.get());
- bob->append("sortPattern", spec->sortPattern);
+ } else if (STAGE_OR == stats.stageType) {
+ OrStats* spec = static_cast<OrStats*>(stats.specific.get());
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("memUsage", spec->memUsage);
- bob->appendNumber("memLimit", spec->memLimit);
- }
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("dupsTested", spec->dupsTested);
+ bob->appendNumber("dupsDropped", spec->dupsDropped);
+ bob->appendNumber("locsForgotten", spec->locsForgotten);
+ }
+ } else if (STAGE_LIMIT == stats.stageType) {
+ LimitStats* spec = static_cast<LimitStats*>(stats.specific.get());
+ bob->appendNumber("limitAmount", spec->limit);
+ } else if (STAGE_PROJECTION == stats.stageType) {
+ ProjectionStats* spec = static_cast<ProjectionStats*>(stats.specific.get());
+ bob->append("transformBy", spec->projObj);
+ } else if (STAGE_SHARDING_FILTER == stats.stageType) {
+ ShardingFilterStats* spec = static_cast<ShardingFilterStats*>(stats.specific.get());
- if (spec->limit > 0) {
- bob->appendNumber("limitAmount", spec->limit);
- }
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("chunkSkips", spec->chunkSkips);
}
- else if (STAGE_SORT_MERGE == stats.stageType) {
- MergeSortStats* spec = static_cast<MergeSortStats*>(stats.specific.get());
- bob->append("sortPattern", spec->sortPattern);
+ } else if (STAGE_SKIP == stats.stageType) {
+ SkipStats* spec = static_cast<SkipStats*>(stats.specific.get());
+ bob->appendNumber("skipAmount", spec->skip);
+ } else if (STAGE_SORT == stats.stageType) {
+ SortStats* spec = static_cast<SortStats*>(stats.specific.get());
+ bob->append("sortPattern", spec->sortPattern);
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("dupsTested", spec->dupsTested);
- bob->appendNumber("dupsDropped", spec->dupsDropped);
- }
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("memUsage", spec->memUsage);
+ bob->appendNumber("memLimit", spec->memLimit);
}
- else if (STAGE_TEXT == stats.stageType) {
- TextStats* spec = static_cast<TextStats*>(stats.specific.get());
-
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("keysExamined", spec->keysExamined);
- bob->appendNumber("docsExamined", spec->fetches);
- }
- bob->append("indexPrefix", spec->indexPrefix);
- bob->append("indexName", spec->indexName);
- bob->append("parsedTextQuery", spec->parsedTextQuery);
- }
- else if (STAGE_UPDATE == stats.stageType) {
- UpdateStats* spec = static_cast<UpdateStats*>(stats.specific.get());
-
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- bob->appendNumber("nMatched", spec->nMatched);
- bob->appendNumber("nWouldModify", spec->nModified);
- bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
- bob->appendBool("wouldInsert", spec->inserted);
- bob->appendBool("fastmod", spec->fastmod);
- bob->appendBool("fastmodinsert", spec->fastmodinsert);
- }
+ if (spec->limit > 0) {
+ bob->appendNumber("limitAmount", spec->limit);
}
+ } else if (STAGE_SORT_MERGE == stats.stageType) {
+ MergeSortStats* spec = static_cast<MergeSortStats*>(stats.specific.get());
+ bob->append("sortPattern", spec->sortPattern);
- // We're done if there are no children.
- if (stats.children.empty()) {
- return;
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("dupsTested", spec->dupsTested);
+ bob->appendNumber("dupsDropped", spec->dupsDropped);
}
+ } else if (STAGE_TEXT == stats.stageType) {
+ TextStats* spec = static_cast<TextStats*>(stats.specific.get());
- // If there's just one child (a common scenario), avoid making an array. This makes
- // the output more readable by saving a level of nesting. Name the field 'inputStage'
- // rather than 'inputStages'.
- if (1 == stats.children.size()) {
- BSONObjBuilder childBob;
- statsToBSON(*stats.children[0], verbosity, &childBob, topLevelBob);
- bob->append("inputStage", childBob.obj());
- return;
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("keysExamined", spec->keysExamined);
+ bob->appendNumber("docsExamined", spec->fetches);
}
- // There is more than one child. Recursively call statsToBSON(...) on each
- // of them and add them to the 'inputStages' array.
+ bob->append("indexPrefix", spec->indexPrefix);
+ bob->append("indexName", spec->indexName);
+ bob->append("parsedTextQuery", spec->parsedTextQuery);
+ } else if (STAGE_UPDATE == stats.stageType) {
+ UpdateStats* spec = static_cast<UpdateStats*>(stats.specific.get());
- BSONArrayBuilder childrenBob(bob->subarrayStart("inputStages"));
- for (size_t i = 0; i < stats.children.size(); ++i) {
- BSONObjBuilder childBob(childrenBob.subobjStart());
- statsToBSON(*stats.children[i], verbosity, &childBob, topLevelBob);
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ bob->appendNumber("nMatched", spec->nMatched);
+ bob->appendNumber("nWouldModify", spec->nModified);
+ bob->appendNumber("nInvalidateSkips", spec->nInvalidateSkips);
+ bob->appendBool("wouldInsert", spec->inserted);
+ bob->appendBool("fastmod", spec->fastmod);
+ bob->appendBool("fastmodinsert", spec->fastmodinsert);
}
- childrenBob.doneFast();
}
- // static
- BSONObj Explain::statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity) {
- BSONObjBuilder bob;
- statsToBSON(stats, &bob, verbosity);
- return bob.obj();
+ // We're done if there are no children.
+ if (stats.children.empty()) {
+ return;
}
- // static
- void Explain::statsToBSON(const PlanStageStats& stats,
- BSONObjBuilder* bob,
- ExplainCommon::Verbosity verbosity) {
- statsToBSON(stats, verbosity, bob, bob);
+ // If there's just one child (a common scenario), avoid making an array. This makes
+ // the output more readable by saving a level of nesting. Name the field 'inputStage'
+ // rather than 'inputStages'.
+ if (1 == stats.children.size()) {
+ BSONObjBuilder childBob;
+ statsToBSON(*stats.children[0], verbosity, &childBob, topLevelBob);
+ bob->append("inputStage", childBob.obj());
+ return;
}
- // static
- void Explain::generatePlannerInfo(PlanExecutor* exec,
- PlanStageStats* winnerStats,
- const vector<PlanStageStats*>& rejectedStats,
- BSONObjBuilder* out) {
- CanonicalQuery* query = exec->getCanonicalQuery();
-
- BSONObjBuilder plannerBob(out->subobjStart("queryPlanner"));;
-
- plannerBob.append("plannerVersion", QueryPlanner::kPlannerVersion);
- plannerBob.append("namespace", exec->ns());
-
- // Find whether there is an index filter set for the query shape. The 'indexFilterSet'
- // field will always be false in the case of EOF or idhack plans.
- bool indexFilterSet = false;
- if (exec->collection() && exec->getCanonicalQuery()) {
- const CollectionInfoCache* infoCache = exec->collection()->infoCache();
- const QuerySettings* querySettings = infoCache->getQuerySettings();
- PlanCacheKey planCacheKey =
- infoCache->getPlanCache()->computeKey(*exec->getCanonicalQuery());
- AllowedIndices* allowedIndicesRaw;
- if (querySettings->getAllowedIndices(planCacheKey, &allowedIndicesRaw)) {
- // Found an index filter set on the query shape.
- std::unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
- indexFilterSet = true;
- }
- }
- plannerBob.append("indexFilterSet", indexFilterSet);
-
- // In general we should have a canonical query, but sometimes we may avoid
- // creating a canonical query as an optimization (specifically, the update system
- // does not canonicalize for idhack updates). In these cases, 'query' is NULL.
- if (NULL != query) {
- BSONObjBuilder parsedQueryBob(plannerBob.subobjStart("parsedQuery"));
- query->root()->toBSON(&parsedQueryBob);
- parsedQueryBob.doneFast();
- }
-
- BSONObjBuilder winningPlanBob(plannerBob.subobjStart("winningPlan"));
- statsToBSON(*winnerStats, &winningPlanBob, ExplainCommon::QUERY_PLANNER);
- winningPlanBob.doneFast();
-
- // Genenerate array of rejected plans.
- BSONArrayBuilder allPlansBob(plannerBob.subarrayStart("rejectedPlans"));
- for (size_t i = 0; i < rejectedStats.size(); i++) {
- BSONObjBuilder childBob(allPlansBob.subobjStart());
- statsToBSON(*rejectedStats[i], &childBob, ExplainCommon::QUERY_PLANNER);
- }
- allPlansBob.doneFast();
+ // There is more than one child. Recursively call statsToBSON(...) on each
+ // of them and add them to the 'inputStages' array.
- plannerBob.doneFast();
+ BSONArrayBuilder childrenBob(bob->subarrayStart("inputStages"));
+ for (size_t i = 0; i < stats.children.size(); ++i) {
+ BSONObjBuilder childBob(childrenBob.subobjStart());
+ statsToBSON(*stats.children[i], verbosity, &childBob, topLevelBob);
}
-
- // static
- void Explain::generateExecStats(PlanStageStats* stats,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out,
- long long totalTimeMillis) {
-
- out->appendNumber("nReturned", stats->common.advanced);
-
- // Time elapsed could might be either precise or approximate.
- if (totalTimeMillis >= 0) {
- out->appendNumber("executionTimeMillis", totalTimeMillis);
+ childrenBob.doneFast();
+}
+
+// static
+BSONObj Explain::statsToBSON(const PlanStageStats& stats, ExplainCommon::Verbosity verbosity) {
+ BSONObjBuilder bob;
+ statsToBSON(stats, &bob, verbosity);
+ return bob.obj();
+}
+
+// static
+void Explain::statsToBSON(const PlanStageStats& stats,
+ BSONObjBuilder* bob,
+ ExplainCommon::Verbosity verbosity) {
+ statsToBSON(stats, verbosity, bob, bob);
+}
+
+// static
+void Explain::generatePlannerInfo(PlanExecutor* exec,
+ PlanStageStats* winnerStats,
+ const vector<PlanStageStats*>& rejectedStats,
+ BSONObjBuilder* out) {
+ CanonicalQuery* query = exec->getCanonicalQuery();
+
+ BSONObjBuilder plannerBob(out->subobjStart("queryPlanner"));
+ ;
+
+ plannerBob.append("plannerVersion", QueryPlanner::kPlannerVersion);
+ plannerBob.append("namespace", exec->ns());
+
+ // Find whether there is an index filter set for the query shape. The 'indexFilterSet'
+ // field will always be false in the case of EOF or idhack plans.
+ bool indexFilterSet = false;
+ if (exec->collection() && exec->getCanonicalQuery()) {
+ const CollectionInfoCache* infoCache = exec->collection()->infoCache();
+ const QuerySettings* querySettings = infoCache->getQuerySettings();
+ PlanCacheKey planCacheKey =
+ infoCache->getPlanCache()->computeKey(*exec->getCanonicalQuery());
+ AllowedIndices* allowedIndicesRaw;
+ if (querySettings->getAllowedIndices(planCacheKey, &allowedIndicesRaw)) {
+ // Found an index filter set on the query shape.
+ std::unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
+ indexFilterSet = true;
}
- else {
- out->appendNumber("executionTimeMillisEstimate", stats->common.executionTimeMillis);
- }
-
- // Flatten the stats tree into a list.
- vector<const PlanStageStats*> statsNodes;
- flattenStatsTree(stats, &statsNodes);
+ }
+ plannerBob.append("indexFilterSet", indexFilterSet);
+
+ // In general we should have a canonical query, but sometimes we may avoid
+ // creating a canonical query as an optimization (specifically, the update system
+ // does not canonicalize for idhack updates). In these cases, 'query' is NULL.
+ if (NULL != query) {
+ BSONObjBuilder parsedQueryBob(plannerBob.subobjStart("parsedQuery"));
+ query->root()->toBSON(&parsedQueryBob);
+ parsedQueryBob.doneFast();
+ }
- // Iterate over all stages in the tree and get the total number of keys/docs examined.
- // These are just aggregations of information already available in the stats tree.
- size_t totalKeysExamined = 0;
- size_t totalDocsExamined = 0;
- for (size_t i = 0; i < statsNodes.size(); ++i) {
+ BSONObjBuilder winningPlanBob(plannerBob.subobjStart("winningPlan"));
+ statsToBSON(*winnerStats, &winningPlanBob, ExplainCommon::QUERY_PLANNER);
+ winningPlanBob.doneFast();
- totalKeysExamined += getKeysExamined(statsNodes[i]->stageType,
- statsNodes[i]->specific.get());
- totalDocsExamined += getDocsExamined(statsNodes[i]->stageType,
- statsNodes[i]->specific.get());
- }
+ // Genenerate array of rejected plans.
+ BSONArrayBuilder allPlansBob(plannerBob.subarrayStart("rejectedPlans"));
+ for (size_t i = 0; i < rejectedStats.size(); i++) {
+ BSONObjBuilder childBob(allPlansBob.subobjStart());
+ statsToBSON(*rejectedStats[i], &childBob, ExplainCommon::QUERY_PLANNER);
+ }
+ allPlansBob.doneFast();
- out->appendNumber("totalKeysExamined", totalKeysExamined);
- out->appendNumber("totalDocsExamined", totalDocsExamined);
+ plannerBob.doneFast();
+}
- // Add the tree of stages, with individual execution stats for each stage.
- BSONObjBuilder stagesBob(out->subobjStart("executionStages"));
- statsToBSON(*stats, &stagesBob, verbosity);
- stagesBob.doneFast();
+// static
+void Explain::generateExecStats(PlanStageStats* stats,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out,
+ long long totalTimeMillis) {
+ out->appendNumber("nReturned", stats->common.advanced);
+
+ // Time elapsed could might be either precise or approximate.
+ if (totalTimeMillis >= 0) {
+ out->appendNumber("executionTimeMillis", totalTimeMillis);
+ } else {
+ out->appendNumber("executionTimeMillisEstimate", stats->common.executionTimeMillis);
}
- // static
- void Explain::generateServerInfo(BSONObjBuilder* out) {
- BSONObjBuilder serverBob(out->subobjStart("serverInfo"));
- out->append("host", getHostNameCached());
- out->appendNumber("port", serverGlobalParams.port);
- out->append("version", versionString);
- out->append("gitVersion", gitVersion());
- serverBob.doneFast();
+ // Flatten the stats tree into a list.
+ vector<const PlanStageStats*> statsNodes;
+ flattenStatsTree(stats, &statsNodes);
+
+ // Iterate over all stages in the tree and get the total number of keys/docs examined.
+ // These are just aggregations of information already available in the stats tree.
+ size_t totalKeysExamined = 0;
+ size_t totalDocsExamined = 0;
+ for (size_t i = 0; i < statsNodes.size(); ++i) {
+ totalKeysExamined +=
+ getKeysExamined(statsNodes[i]->stageType, statsNodes[i]->specific.get());
+ totalDocsExamined +=
+ getDocsExamined(statsNodes[i]->stageType, statsNodes[i]->specific.get());
}
- // static
- void Explain::explainStages(PlanExecutor* exec,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) {
- //
- // Step 1: run the stages as required by the verbosity level.
- //
-
- // Inspect the tree to see if there is a MultiPlanStage.
- MultiPlanStage* mps = getMultiPlanStage(exec->getRootStage());
-
- // Get stats of the winning plan from the trial period, if the verbosity level
- // is high enough and there was a runoff between multiple plans.
- unique_ptr<PlanStageStats> winningStatsTrial;
- if (verbosity >= ExplainCommon::EXEC_ALL_PLANS && NULL != mps) {
- winningStatsTrial.reset(exec->getStats());
- invariant(winningStatsTrial.get());
- }
+ out->appendNumber("totalKeysExamined", totalKeysExamined);
+ out->appendNumber("totalDocsExamined", totalDocsExamined);
+
+ // Add the tree of stages, with individual execution stats for each stage.
+ BSONObjBuilder stagesBob(out->subobjStart("executionStages"));
+ statsToBSON(*stats, &stagesBob, verbosity);
+ stagesBob.doneFast();
+}
+
+// static
+void Explain::generateServerInfo(BSONObjBuilder* out) {
+ BSONObjBuilder serverBob(out->subobjStart("serverInfo"));
+ out->append("host", getHostNameCached());
+ out->appendNumber("port", serverGlobalParams.port);
+ out->append("version", versionString);
+ out->append("gitVersion", gitVersion());
+ serverBob.doneFast();
+}
+
+// static
+void Explain::explainStages(PlanExecutor* exec,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) {
+ //
+ // Step 1: run the stages as required by the verbosity level.
+ //
+
+ // Inspect the tree to see if there is a MultiPlanStage.
+ MultiPlanStage* mps = getMultiPlanStage(exec->getRootStage());
+
+ // Get stats of the winning plan from the trial period, if the verbosity level
+ // is high enough and there was a runoff between multiple plans.
+ unique_ptr<PlanStageStats> winningStatsTrial;
+ if (verbosity >= ExplainCommon::EXEC_ALL_PLANS && NULL != mps) {
+ winningStatsTrial.reset(exec->getStats());
+ invariant(winningStatsTrial.get());
+ }
- // If we need execution stats, then run the plan in order to gather the stats.
- Status executePlanStatus = Status::OK();
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- executePlanStatus = exec->executePlan();
- }
+ // If we need execution stats, then run the plan in order to gather the stats.
+ Status executePlanStatus = Status::OK();
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ executePlanStatus = exec->executePlan();
+ }
- //
- // Step 2: collect plan stats (which also give the structure of the plan tree).
- //
+ //
+ // Step 2: collect plan stats (which also give the structure of the plan tree).
+ //
- // Get stats for the winning plan.
- unique_ptr<PlanStageStats> winningStats(exec->getStats());
+ // Get stats for the winning plan.
+ unique_ptr<PlanStageStats> winningStats(exec->getStats());
- // Get stats for the rejected plans, if more than one plan was considered.
- OwnedPointerVector<PlanStageStats> allPlansStats;
- if (NULL != mps) {
- allPlansStats = mps->generateCandidateStats();
- }
+ // Get stats for the rejected plans, if more than one plan was considered.
+ OwnedPointerVector<PlanStageStats> allPlansStats;
+ if (NULL != mps) {
+ allPlansStats = mps->generateCandidateStats();
+ }
- //
- // Step 3: use the stats trees to produce explain BSON.
- //
+ //
+ // Step 3: use the stats trees to produce explain BSON.
+ //
- if (verbosity >= ExplainCommon::QUERY_PLANNER) {
- generatePlannerInfo(exec, winningStats.get(), allPlansStats.vector(), out);
- }
+ if (verbosity >= ExplainCommon::QUERY_PLANNER) {
+ generatePlannerInfo(exec, winningStats.get(), allPlansStats.vector(), out);
+ }
- if (verbosity >= ExplainCommon::EXEC_STATS) {
- BSONObjBuilder execBob(out->subobjStart("executionStats"));
-
- // If there is an execution error while running the query, the error is reported under
- // the "executionStats" section and the explain as a whole succeeds.
- execBob.append("executionSuccess", executePlanStatus.isOK());
- if (!executePlanStatus.isOK()) {
- execBob.append("errorMessage", executePlanStatus.reason());
- execBob.append("errorCode", executePlanStatus.code());
+ if (verbosity >= ExplainCommon::EXEC_STATS) {
+ BSONObjBuilder execBob(out->subobjStart("executionStats"));
+
+ // If there is an execution error while running the query, the error is reported under
+ // the "executionStats" section and the explain as a whole succeeds.
+ execBob.append("executionSuccess", executePlanStatus.isOK());
+ if (!executePlanStatus.isOK()) {
+ execBob.append("errorMessage", executePlanStatus.reason());
+ execBob.append("errorCode", executePlanStatus.code());
+ }
+
+ // Generate exec stats BSON for the winning plan.
+ OperationContext* opCtx = exec->getOpCtx();
+ long long totalTimeMillis = CurOp::get(opCtx)->elapsedMillis();
+ generateExecStats(winningStats.get(), verbosity, &execBob, totalTimeMillis);
+
+ // Also generate exec stats for all plans, if the verbosity level is high enough.
+ // These stats reflect what happened during the trial period that ranked the plans.
+ if (verbosity >= ExplainCommon::EXEC_ALL_PLANS) {
+ // If we ranked multiple plans against each other, then add stats collected
+ // from the trial period of the winning plan. The "allPlansExecution" section
+ // will contain an apples-to-apples comparison of the winning plan's stats against
+ // all rejected plans' stats collected during the trial period.
+ if (NULL != mps) {
+ invariant(winningStatsTrial.get());
+ allPlansStats.push_back(winningStatsTrial.release());
}
- // Generate exec stats BSON for the winning plan.
- OperationContext* opCtx = exec->getOpCtx();
- long long totalTimeMillis = CurOp::get(opCtx)->elapsedMillis();
- generateExecStats(winningStats.get(), verbosity, &execBob, totalTimeMillis);
-
- // Also generate exec stats for all plans, if the verbosity level is high enough.
- // These stats reflect what happened during the trial period that ranked the plans.
- if (verbosity >= ExplainCommon::EXEC_ALL_PLANS) {
- // If we ranked multiple plans against each other, then add stats collected
- // from the trial period of the winning plan. The "allPlansExecution" section
- // will contain an apples-to-apples comparison of the winning plan's stats against
- // all rejected plans' stats collected during the trial period.
- if (NULL != mps) {
- invariant(winningStatsTrial.get());
- allPlansStats.push_back(winningStatsTrial.release());
- }
-
- BSONArrayBuilder allPlansBob(execBob.subarrayStart("allPlansExecution"));
- for (size_t i = 0; i < allPlansStats.size(); ++i) {
- BSONObjBuilder planBob(allPlansBob.subobjStart());
- generateExecStats(allPlansStats[i], verbosity, &planBob);
- planBob.doneFast();
- }
- allPlansBob.doneFast();
+ BSONArrayBuilder allPlansBob(execBob.subarrayStart("allPlansExecution"));
+ for (size_t i = 0; i < allPlansStats.size(); ++i) {
+ BSONObjBuilder planBob(allPlansBob.subobjStart());
+ generateExecStats(allPlansStats[i], verbosity, &planBob);
+ planBob.doneFast();
}
-
- execBob.doneFast();
+ allPlansBob.doneFast();
}
- generateServerInfo(out);
- }
-
- // static
- std::string Explain::getPlanSummary(const PlanExecutor* exec) {
- return getPlanSummary(exec->getRootStage());
+ execBob.doneFast();
}
- // static
- std::string Explain::getPlanSummary(const PlanStage* root) {
- std::vector<const PlanStage*> stages;
- flattenExecTree(root, &stages);
-
- // Use this stream to build the plan summary string.
- mongoutils::str::stream ss;
- bool seenLeaf = false;
-
- for (size_t i = 0; i < stages.size(); i++) {
- if (stages[i]->getChildren().empty()) {
- // This is a leaf node. Add to the plan summary string accordingly. Unless
- // this is the first leaf we've seen, add a delimiting string first.
- if (seenLeaf) {
- ss << ", ";
- }
- else {
- seenLeaf = true;
- }
- addStageSummaryStr(stages[i], ss);
+ generateServerInfo(out);
+}
+
+// static
+std::string Explain::getPlanSummary(const PlanExecutor* exec) {
+ return getPlanSummary(exec->getRootStage());
+}
+
+// static
+std::string Explain::getPlanSummary(const PlanStage* root) {
+ std::vector<const PlanStage*> stages;
+ flattenExecTree(root, &stages);
+
+ // Use this stream to build the plan summary string.
+ mongoutils::str::stream ss;
+ bool seenLeaf = false;
+
+ for (size_t i = 0; i < stages.size(); i++) {
+ if (stages[i]->getChildren().empty()) {
+ // This is a leaf node. Add to the plan summary string accordingly. Unless
+ // this is the first leaf we've seen, add a delimiting string first.
+ if (seenLeaf) {
+ ss << ", ";
+ } else {
+ seenLeaf = true;
}
+ addStageSummaryStr(stages[i], ss);
}
-
- return ss;
}
- // static
- void Explain::getSummaryStats(const PlanExecutor* exec, PlanSummaryStats* statsOut) {
- invariant(NULL != statsOut);
+ return ss;
+}
- PlanStage* root = exec->getRootStage();
+// static
+void Explain::getSummaryStats(const PlanExecutor* exec, PlanSummaryStats* statsOut) {
+ invariant(NULL != statsOut);
- // We can get some of the fields we need from the common stats stored in the
- // root stage of the plan tree.
- const CommonStats* common = root->getCommonStats();
- statsOut->nReturned = common->advanced;
- statsOut->executionTimeMillis = common->executionTimeMillis;
+ PlanStage* root = exec->getRootStage();
- // The other fields are aggregations over the stages in the plan tree. We flatten
- // the tree into a list and then compute these aggregations.
- std::vector<const PlanStage*> stages;
- flattenExecTree(root, &stages);
+ // We can get some of the fields we need from the common stats stored in the
+ // root stage of the plan tree.
+ const CommonStats* common = root->getCommonStats();
+ statsOut->nReturned = common->advanced;
+ statsOut->executionTimeMillis = common->executionTimeMillis;
- for (size_t i = 0; i < stages.size(); i++) {
- statsOut->totalKeysExamined += getKeysExamined(stages[i]->stageType(),
- stages[i]->getSpecificStats());
- statsOut->totalDocsExamined += getDocsExamined(stages[i]->stageType(),
- stages[i]->getSpecificStats());
+ // The other fields are aggregations over the stages in the plan tree. We flatten
+ // the tree into a list and then compute these aggregations.
+ std::vector<const PlanStage*> stages;
+ flattenExecTree(root, &stages);
- if (STAGE_IDHACK == stages[i]->stageType()) {
- statsOut->isIdhack = true;
- }
- if (STAGE_SORT == stages[i]->stageType()) {
- statsOut->hasSortStage = true;
- }
+ for (size_t i = 0; i < stages.size(); i++) {
+ statsOut->totalKeysExamined +=
+ getKeysExamined(stages[i]->stageType(), stages[i]->getSpecificStats());
+ statsOut->totalDocsExamined +=
+ getDocsExamined(stages[i]->stageType(), stages[i]->getSpecificStats());
+
+ if (STAGE_IDHACK == stages[i]->stageType()) {
+ statsOut->isIdhack = true;
+ }
+ if (STAGE_SORT == stages[i]->stageType()) {
+ statsOut->hasSortStage = true;
}
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h
index 14a9d1289f5..7a0013294a0 100644
--- a/src/mongo/db/query/explain.h
+++ b/src/mongo/db/query/explain.h
@@ -38,161 +38,160 @@
namespace mongo {
- class Collection;
- class OperationContext;
+class Collection;
+class OperationContext;
+/**
+ * A container for the summary statistics that the profiler, slow query log, and
+ * other non-explain debug mechanisms may want to collect.
+ */
+struct PlanSummaryStats {
+ PlanSummaryStats()
+ : nReturned(0),
+ totalKeysExamined(0),
+ totalDocsExamined(0),
+ executionTimeMillis(0),
+ isIdhack(false),
+ hasSortStage(false) {}
+
+ // The number of results returned by the plan.
+ size_t nReturned;
+
+ // The total number of index keys examined by the plan.
+ size_t totalKeysExamined;
+
+ // The total number of documents examined by the plan.
+ size_t totalDocsExamined;
+
+ // The number of milliseconds spent inside the root stage's work() method.
+ long long executionTimeMillis;
+
+ // Did this plan use the fast path for key-value retrievals on the _id index?
+ bool isIdhack;
+
+ // Did this plan use an in-memory sort stage?
+ bool hasSortStage;
+};
+
+/**
+ * Namespace for the collection of static methods used to generate explain information.
+ */
+class Explain {
+public:
/**
- * A container for the summary statistics that the profiler, slow query log, and
- * other non-explain debug mechanisms may want to collect.
+ * Get explain BSON for the execution stages contained by 'exec'. Use this function if you
+ * have a PlanExecutor and want to convert it into a human readable explain format. Any
+ * operation which has a query component (e.g. find, update, group) can be explained via
+ * this function.
+ *
+ * The explain information is extracted from 'exec' and added to the out-parameter 'out'.
+ *
+ * The explain information is generated with the level of detail specified by 'verbosity'.
+ *
+ * Does not take ownership of its arguments.
+ *
+ * If there is an error during the execution of the query, the error message and code are
+ * added to the "executionStats" section of the explain.
*/
- struct PlanSummaryStats {
+ static void explainStages(PlanExecutor* exec,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out);
- PlanSummaryStats() : nReturned(0),
- totalKeysExamined(0),
- totalDocsExamined(0),
- executionTimeMillis(0),
- isIdhack(false),
- hasSortStage(false) { }
-
- // The number of results returned by the plan.
- size_t nReturned;
+ /**
+ * Converts the stats tree 'stats' into a corresponding BSON object containing
+ * explain information.
+ *
+ * Generates the BSON stats at a verbosity specified by 'verbosity'. Defaults
+ * to execution stats verbosity.
+ */
+ static BSONObj statsToBSON(const PlanStageStats& stats,
+ ExplainCommon::Verbosity verbosity = ExplainCommon::EXEC_STATS);
- // The total number of index keys examined by the plan.
- size_t totalKeysExamined;
+ /**
+ * This version of stats tree to BSON conversion returns the result through the
+ * out-parameter 'bob' rather than returning a BSONObj.
+ *
+ * Generates the BSON stats at a verbosity specified by 'verbosity'. Defaults
+ * to execution stats verbosity.
+ */
+ static void statsToBSON(const PlanStageStats& stats,
+ BSONObjBuilder* bob,
+ ExplainCommon::Verbosity verbosity = ExplainCommon::EXEC_STATS);
- // The total number of documents examined by the plan.
- size_t totalDocsExamined;
+ /**
+ * Returns a short plan summary std::string describing the leaves of the query plan.
+ */
+ static std::string getPlanSummary(const PlanExecutor* exec);
+ static std::string getPlanSummary(const PlanStage* root);
- // The number of milliseconds spent inside the root stage's work() method.
- long long executionTimeMillis;
+ /**
+ * Fills out 'statsOut' with summary stats using the execution tree contained
+ * in 'exec'.
+ *
+ * The summary stats are consumed by debug mechanisms such as the profiler and
+ * the slow query log.
+ *
+ * This is a lightweight alternative for explainStages(...) above which is useful
+ * when operations want to request debug information without doing all the work
+ * to generate a full explain.
+ *
+ * Does not take ownership of its arguments.
+ */
+ static void getSummaryStats(const PlanExecutor* exec, PlanSummaryStats* statsOut);
- // Did this plan use the fast path for key-value retrievals on the _id index?
- bool isIdhack;
+private:
+ /**
+ * Private helper that does the heavy-lifting for the public statsToBSON(...) functions
+ * declared above.
+ *
+ * Not used except as a helper to the public statsToBSON(...) functions.
+ */
+ static void statsToBSON(const PlanStageStats& stats,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* bob,
+ BSONObjBuilder* topLevelBob);
- // Did this plan use an in-memory sort stage?
- bool hasSortStage;
- };
+ /**
+ * Adds the 'queryPlanner' explain section to the BSON object being built
+ * by 'out'.
+ *
+ * This is a helper for generating explain BSON. It is used by explainStages(...).
+ *
+ * @param exec -- the stage tree for the operation being explained.
+ * @param winnerStats -- the stats tree for the winning plan.
+ * @param rejectedStats -- an array of stats trees, one per rejected plan
+ */
+ static void generatePlannerInfo(PlanExecutor* exec,
+ PlanStageStats* winnerStats,
+ const std::vector<PlanStageStats*>& rejectedStats,
+ BSONObjBuilder* out);
/**
- * Namespace for the collection of static methods used to generate explain information.
+ * Generates the execution stats section for the stats tree 'stats',
+ * adding the resulting BSON to 'out'.
+ *
+ * The 'totalTimeMillis' value passed here will be added to the top level of
+ * the execution stats section, but will not affect the reporting of timing for
+ * individual stages. If 'totalTimeMillis' is not specified, then the default
+ * value of -1 indicates that we should only use the approximate timing information
+ * collected by the stages.
+ *
+ * Stats are generated at the verbosity specified by 'verbosity'.
+ *
+ * This is a helper for generating explain BSON. It is used by explainStages(...).
*/
- class Explain {
- public:
- /**
- * Get explain BSON for the execution stages contained by 'exec'. Use this function if you
- * have a PlanExecutor and want to convert it into a human readable explain format. Any
- * operation which has a query component (e.g. find, update, group) can be explained via
- * this function.
- *
- * The explain information is extracted from 'exec' and added to the out-parameter 'out'.
- *
- * The explain information is generated with the level of detail specified by 'verbosity'.
- *
- * Does not take ownership of its arguments.
- *
- * If there is an error during the execution of the query, the error message and code are
- * added to the "executionStats" section of the explain.
- */
- static void explainStages(PlanExecutor* exec,
+ static void generateExecStats(PlanStageStats* stats,
ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out);
-
- /**
- * Converts the stats tree 'stats' into a corresponding BSON object containing
- * explain information.
- *
- * Generates the BSON stats at a verbosity specified by 'verbosity'. Defaults
- * to execution stats verbosity.
- */
- static BSONObj statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity = ExplainCommon::EXEC_STATS);
-
- /**
- * This version of stats tree to BSON conversion returns the result through the
- * out-parameter 'bob' rather than returning a BSONObj.
- *
- * Generates the BSON stats at a verbosity specified by 'verbosity'. Defaults
- * to execution stats verbosity.
- */
- static void statsToBSON(const PlanStageStats& stats,
- BSONObjBuilder* bob,
- ExplainCommon::Verbosity verbosity = ExplainCommon::EXEC_STATS);
-
- /**
- * Returns a short plan summary std::string describing the leaves of the query plan.
- */
- static std::string getPlanSummary(const PlanExecutor* exec);
- static std::string getPlanSummary(const PlanStage* root);
-
- /**
- * Fills out 'statsOut' with summary stats using the execution tree contained
- * in 'exec'.
- *
- * The summary stats are consumed by debug mechanisms such as the profiler and
- * the slow query log.
- *
- * This is a lightweight alternative for explainStages(...) above which is useful
- * when operations want to request debug information without doing all the work
- * to generate a full explain.
- *
- * Does not take ownership of its arguments.
- */
- static void getSummaryStats(const PlanExecutor* exec, PlanSummaryStats* statsOut);
-
- private:
- /**
- * Private helper that does the heavy-lifting for the public statsToBSON(...) functions
- * declared above.
- *
- * Not used except as a helper to the public statsToBSON(...) functions.
- */
- static void statsToBSON(const PlanStageStats& stats,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* bob,
- BSONObjBuilder* topLevelBob);
-
- /**
- * Adds the 'queryPlanner' explain section to the BSON object being built
- * by 'out'.
- *
- * This is a helper for generating explain BSON. It is used by explainStages(...).
- *
- * @param exec -- the stage tree for the operation being explained.
- * @param winnerStats -- the stats tree for the winning plan.
- * @param rejectedStats -- an array of stats trees, one per rejected plan
- */
- static void generatePlannerInfo(PlanExecutor* exec,
- PlanStageStats* winnerStats,
- const std::vector<PlanStageStats*>& rejectedStats,
- BSONObjBuilder* out);
-
- /**
- * Generates the execution stats section for the stats tree 'stats',
- * adding the resulting BSON to 'out'.
- *
- * The 'totalTimeMillis' value passed here will be added to the top level of
- * the execution stats section, but will not affect the reporting of timing for
- * individual stages. If 'totalTimeMillis' is not specified, then the default
- * value of -1 indicates that we should only use the approximate timing information
- * collected by the stages.
- *
- * Stats are generated at the verbosity specified by 'verbosity'.
- *
- * This is a helper for generating explain BSON. It is used by explainStages(...).
- */
- static void generateExecStats(PlanStageStats* stats,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out,
- long long totalTimeMillis = -1);
-
- /**
- * Adds the 'serverInfo' explain section to the BSON object being build
- * by 'out'.
- *
- * This is a helper for generating explain BSON. It is used by explainStages(...).
- */
- static void generateServerInfo(BSONObjBuilder* out);
-
- };
-
-} // namespace
+ BSONObjBuilder* out,
+ long long totalTimeMillis = -1);
+
+ /**
+ * Adds the 'serverInfo' explain section to the BSON object being build
+ * by 'out'.
+ *
+ * This is a helper for generating explain BSON. It is used by explainStages(...).
+ */
+ static void generateServerInfo(BSONObjBuilder* out);
+};
+
+} // namespace
diff --git a/src/mongo/db/query/explain_common.cpp b/src/mongo/db/query/explain_common.cpp
index aeef9df95f6..1f049de6cec 100644
--- a/src/mongo/db/query/explain_common.cpp
+++ b/src/mongo/db/query/explain_common.cpp
@@ -34,9 +34,9 @@
namespace mongo {
- // static
- const char* ExplainCommon::verbosityString(ExplainCommon::Verbosity verbosity) {
- switch (verbosity) {
+// static
+const char* ExplainCommon::verbosityString(ExplainCommon::Verbosity verbosity) {
+ switch (verbosity) {
case QUERY_PLANNER:
return "queryPlanner";
case EXEC_STATS:
@@ -46,31 +46,30 @@ namespace mongo {
default:
invariant(0);
return "unknown";
- }
}
+}
- // static
- Status ExplainCommon::parseCmdBSON(const BSONObj& cmdObj, ExplainCommon::Verbosity* verbosity) {
- if (Object != cmdObj.firstElement().type()) {
- return Status(ErrorCodes::BadValue, "explain command requires a nested object");
- }
+// static
+Status ExplainCommon::parseCmdBSON(const BSONObj& cmdObj, ExplainCommon::Verbosity* verbosity) {
+ if (Object != cmdObj.firstElement().type()) {
+ return Status(ErrorCodes::BadValue, "explain command requires a nested object");
+ }
- *verbosity = ExplainCommon::EXEC_ALL_PLANS;
- if (!cmdObj["verbosity"].eoo()) {
- const char* verbStr = cmdObj["verbosity"].valuestrsafe();
- if (mongoutils::str::equals(verbStr, "queryPlanner")) {
- *verbosity = ExplainCommon::QUERY_PLANNER;
- }
- else if (mongoutils::str::equals(verbStr, "executionStats")) {
- *verbosity = ExplainCommon::EXEC_STATS;
- }
- else if (!mongoutils::str::equals(verbStr, "allPlansExecution")) {
- return Status(ErrorCodes::BadValue, "verbosity string must be one of "
- "{'queryPlanner', 'executionStats', 'allPlansExecution'}");
- }
+ *verbosity = ExplainCommon::EXEC_ALL_PLANS;
+ if (!cmdObj["verbosity"].eoo()) {
+ const char* verbStr = cmdObj["verbosity"].valuestrsafe();
+ if (mongoutils::str::equals(verbStr, "queryPlanner")) {
+ *verbosity = ExplainCommon::QUERY_PLANNER;
+ } else if (mongoutils::str::equals(verbStr, "executionStats")) {
+ *verbosity = ExplainCommon::EXEC_STATS;
+ } else if (!mongoutils::str::equals(verbStr, "allPlansExecution")) {
+ return Status(ErrorCodes::BadValue,
+ "verbosity string must be one of "
+ "{'queryPlanner', 'executionStats', 'allPlansExecution'}");
}
-
- return Status::OK();
}
-} // namespace mongo
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/explain_common.h b/src/mongo/db/query/explain_common.h
index 05bd975295f..f76c6d348cf 100644
--- a/src/mongo/db/query/explain_common.h
+++ b/src/mongo/db/query/explain_common.h
@@ -33,45 +33,44 @@
namespace mongo {
+/**
+ * Utilities used for explain implementations on both mongod and mongos.
+ */
+class ExplainCommon {
+public:
/**
- * Utilities used for explain implementations on both mongod and mongos.
+ * The various supported verbosity levels for explain. The order is
+ * significant: the enum values are assigned in order of increasing verbosity.
*/
- class ExplainCommon {
- public:
- /**
- * The various supported verbosity levels for explain. The order is
- * significant: the enum values are assigned in order of increasing verbosity.
- */
- enum Verbosity {
- // At all verbosities greater than or equal to QUERY_PLANNER, we display information
- // about the plan selected and alternate rejected plans. Does not include any execution-
- // related info. String alias is "queryPlanner".
- QUERY_PLANNER = 0,
-
- // At all verbosities greater than or equal to EXEC_STATS, we display a section of
- // output containing both overall execution stats, and stats per stage in the
- // execution tree. String alias is "execStats".
- EXEC_STATS = 1,
+ enum Verbosity {
+ // At all verbosities greater than or equal to QUERY_PLANNER, we display information
+ // about the plan selected and alternate rejected plans. Does not include any execution-
+ // related info. String alias is "queryPlanner".
+ QUERY_PLANNER = 0,
- // At this second-highest verbosity level, we generate the execution stats for each
- // rejected plan as well as the winning plan. String alias is "allPlansExecution".
- EXEC_ALL_PLANS = 2,
- };
+ // At all verbosities greater than or equal to EXEC_STATS, we display a section of
+ // output containing both overall execution stats, and stats per stage in the
+ // execution tree. String alias is "execStats".
+ EXEC_STATS = 1,
- /**
- * Converts an explain verbosity to its string representation.
- */
- static const char* verbosityString(ExplainCommon::Verbosity verbosity);
+ // At this second-highest verbosity level, we generate the execution stats for each
+ // rejected plan as well as the winning plan. String alias is "allPlansExecution".
+ EXEC_ALL_PLANS = 2,
+ };
- /**
- * Does some basic validation of the command BSON, and retrieves the explain verbosity.
- *
- * Returns a non-OK status if parsing fails.
- *
- * On success, populates "verbosity".
- */
- static Status parseCmdBSON(const BSONObj& cmdObj, ExplainCommon::Verbosity* verbosity);
+ /**
+ * Converts an explain verbosity to its string representation.
+ */
+ static const char* verbosityString(ExplainCommon::Verbosity verbosity);
- };
+ /**
+ * Does some basic validation of the command BSON, and retrieves the explain verbosity.
+ *
+ * Returns a non-OK status if parsing fails.
+ *
+ * On success, populates "verbosity".
+ */
+ static Status parseCmdBSON(const BSONObj& cmdObj, ExplainCommon::Verbosity* verbosity);
+};
-} // namespace
+} // namespace
diff --git a/src/mongo/db/query/expression_index.cpp b/src/mongo/db/query/expression_index.cpp
index 51cc439da23..67f06314266 100644
--- a/src/mongo/db/query/expression_index.cpp
+++ b/src/mongo/db/query/expression_index.cpp
@@ -39,174 +39,160 @@
namespace mongo {
- using std::set;
-
- BSONObj ExpressionMapping::hash(const BSONElement& value) {
- BSONObjBuilder bob;
- bob.append("", BSONElementHasher::hash64(value, BSONElementHasher::DEFAULT_HASH_SEED));
- return bob.obj();
+using std::set;
+
+BSONObj ExpressionMapping::hash(const BSONElement& value) {
+ BSONObjBuilder bob;
+ bob.append("", BSONElementHasher::hash64(value, BSONElementHasher::DEFAULT_HASH_SEED));
+ return bob.obj();
+}
+
+// For debugging only
+static std::string toCoveringString(const GeoHashConverter& hashConverter,
+ const set<GeoHash>& covering) {
+ string result = "[";
+ for (set<GeoHash>::const_iterator it = covering.begin(); it != covering.end(); ++it) {
+ if (it != covering.begin())
+ result += ", ";
+
+ const GeoHash& geoHash = *it;
+
+ result += hashConverter.unhashToBoxCovering(geoHash).toString();
+ result += " (" + geoHash.toStringHex1() + ")";
}
- // For debugging only
- static std::string toCoveringString(const GeoHashConverter& hashConverter,
- const set<GeoHash>& covering) {
- string result = "[";
- for (set<GeoHash>::const_iterator it = covering.begin(); it != covering.end();
- ++it) {
-
- if (it != covering.begin()) result += ", ";
-
- const GeoHash& geoHash = *it;
-
- result += hashConverter.unhashToBoxCovering(geoHash).toString();
- result += " (" + geoHash.toStringHex1() + ")";
- }
-
- return result + "]";
+ return result + "]";
+}
+
+void ExpressionMapping::cover2d(const R2Region& region,
+ const BSONObj& indexInfoObj,
+ int maxCoveringCells,
+ OrderedIntervalList* oil) {
+ GeoHashConverter::Parameters hashParams;
+ Status paramStatus = GeoHashConverter::parseParameters(indexInfoObj, &hashParams);
+ verify(paramStatus.isOK()); // We validated the parameters when creating the index
+
+ GeoHashConverter hashConverter(hashParams);
+ R2RegionCoverer coverer(&hashConverter);
+ coverer.setMaxLevel(hashConverter.getBits());
+ coverer.setMaxCells(maxCoveringCells);
+
+ // TODO: Maybe slightly optimize by returning results in order
+ vector<GeoHash> unorderedCovering;
+ coverer.getCovering(region, &unorderedCovering);
+ set<GeoHash> covering(unorderedCovering.begin(), unorderedCovering.end());
+
+ for (set<GeoHash>::const_iterator it = covering.begin(); it != covering.end(); ++it) {
+ const GeoHash& geoHash = *it;
+ BSONObjBuilder builder;
+ geoHash.appendHashMin(&builder, "");
+ geoHash.appendHashMax(&builder, "");
+
+ oil->intervals.push_back(IndexBoundsBuilder::makeRangeInterval(builder.obj(), true, true));
}
-
- void ExpressionMapping::cover2d(const R2Region& region,
- const BSONObj& indexInfoObj,
- int maxCoveringCells,
- OrderedIntervalList* oil) {
-
- GeoHashConverter::Parameters hashParams;
- Status paramStatus = GeoHashConverter::parseParameters(indexInfoObj, &hashParams);
- verify(paramStatus.isOK()); // We validated the parameters when creating the index
-
- GeoHashConverter hashConverter(hashParams);
- R2RegionCoverer coverer(&hashConverter);
- coverer.setMaxLevel(hashConverter.getBits());
- coverer.setMaxCells(maxCoveringCells);
-
- // TODO: Maybe slightly optimize by returning results in order
- vector<GeoHash> unorderedCovering;
- coverer.getCovering(region, &unorderedCovering);
- set<GeoHash> covering(unorderedCovering.begin(), unorderedCovering.end());
-
- for (set<GeoHash>::const_iterator it = covering.begin(); it != covering.end();
- ++it) {
-
- const GeoHash& geoHash = *it;
- BSONObjBuilder builder;
- geoHash.appendHashMin(&builder, "");
- geoHash.appendHashMax(&builder, "");
-
- oil->intervals.push_back(IndexBoundsBuilder::makeRangeInterval(builder.obj(),
- true,
- true));
- }
+}
+
+// TODO: what should we really pass in for indexInfoObj?
+void ExpressionMapping::cover2dsphere(const S2Region& region,
+ const BSONObj& indexInfoObj,
+ OrderedIntervalList* oilOut) {
+ int coarsestIndexedLevel;
+ BSONElement ce = indexInfoObj["coarsestIndexedLevel"];
+ if (ce.isNumber()) {
+ coarsestIndexedLevel = ce.numberInt();
+ } else {
+ coarsestIndexedLevel = S2::kAvgEdge.GetClosestLevel(100 * 1000.0 / kRadiusOfEarthInMeters);
}
- // TODO: what should we really pass in for indexInfoObj?
- void ExpressionMapping::cover2dsphere(const S2Region& region,
- const BSONObj& indexInfoObj,
- OrderedIntervalList* oilOut) {
-
- int coarsestIndexedLevel;
- BSONElement ce = indexInfoObj["coarsestIndexedLevel"];
- if (ce.isNumber()) {
- coarsestIndexedLevel = ce.numberInt();
- }
- else {
- coarsestIndexedLevel =
- S2::kAvgEdge.GetClosestLevel(100 * 1000.0 / kRadiusOfEarthInMeters);
+ // The min level of our covering is the level whose cells are the closest match to the
+ // *area* of the region (or the max indexed level, whichever is smaller) The max level
+ // is 4 sizes larger.
+ double edgeLen = sqrt(region.GetRectBound().Area());
+ S2RegionCoverer coverer;
+ coverer.set_min_level(min(coarsestIndexedLevel, 2 + S2::kAvgEdge.GetClosestLevel(edgeLen)));
+ coverer.set_max_level(4 + coverer.min_level());
+
+ std::vector<S2CellId> cover;
+ coverer.GetCovering(region, &cover);
+
+ // Look at the cells we cover and all cells that are within our covering and finer.
+ // Anything with our cover as a strict prefix is contained within the cover and should
+ // be intersection tested.
+ std::set<string> intervalSet;
+ std::set<string> exactSet;
+ for (size_t i = 0; i < cover.size(); ++i) {
+ S2CellId coveredCell = cover[i];
+ intervalSet.insert(coveredCell.toString());
+
+ // Look at the cells that cover us. We want to look at every cell that contains the
+ // covering we would index on if we were to insert the query geometry. We generate
+ // the would-index-with-this-covering and find all the cells strictly containing the
+ // cells in that set, until we hit the coarsest indexed cell. We use equality, not
+ // a prefix match. Why not prefix? Because we've already looked at everything
+ // finer or as fine as our initial covering.
+ //
+ // Say we have a fine point with cell id 212121, we go up one, get 21212, we don't
+ // want to look at cells 21212[not-1] because we know they're not going to intersect
+ // with 212121, but entries inserted with cell value 21212 (no trailing digits) may.
+ // And we've already looked at points with the cell id 211111 from the regex search
+ // created above, so we only want things where the value of the last digit is not
+ // stored (and therefore could be 1).
+
+ while (coveredCell.level() > coarsestIndexedLevel) {
+ // Add the parent cell of the currently covered cell since we aren't at the
+ // coarsest level yet
+ // NOTE: Be careful not to generate cells strictly less than the
+ // coarsestIndexedLevel - this can result in S2 failures when level < 0.
+
+ coveredCell = coveredCell.parent();
+ exactSet.insert(coveredCell.toString());
}
+ }
- // The min level of our covering is the level whose cells are the closest match to the
- // *area* of the region (or the max indexed level, whichever is smaller) The max level
- // is 4 sizes larger.
- double edgeLen = sqrt(region.GetRectBound().Area());
- S2RegionCoverer coverer;
- coverer.set_min_level(min(coarsestIndexedLevel,
- 2 + S2::kAvgEdge.GetClosestLevel(edgeLen)));
- coverer.set_max_level(4 + coverer.min_level());
-
- std::vector<S2CellId> cover;
- coverer.GetCovering(region, &cover);
-
- // Look at the cells we cover and all cells that are within our covering and finer.
- // Anything with our cover as a strict prefix is contained within the cover and should
- // be intersection tested.
- std::set<string> intervalSet;
- std::set<string> exactSet;
- for (size_t i = 0; i < cover.size(); ++i) {
-
- S2CellId coveredCell = cover[i];
- intervalSet.insert(coveredCell.toString());
-
- // Look at the cells that cover us. We want to look at every cell that contains the
- // covering we would index on if we were to insert the query geometry. We generate
- // the would-index-with-this-covering and find all the cells strictly containing the
- // cells in that set, until we hit the coarsest indexed cell. We use equality, not
- // a prefix match. Why not prefix? Because we've already looked at everything
- // finer or as fine as our initial covering.
- //
- // Say we have a fine point with cell id 212121, we go up one, get 21212, we don't
- // want to look at cells 21212[not-1] because we know they're not going to intersect
- // with 212121, but entries inserted with cell value 21212 (no trailing digits) may.
- // And we've already looked at points with the cell id 211111 from the regex search
- // created above, so we only want things where the value of the last digit is not
- // stored (and therefore could be 1).
-
- while (coveredCell.level() > coarsestIndexedLevel) {
-
- // Add the parent cell of the currently covered cell since we aren't at the
- // coarsest level yet
- // NOTE: Be careful not to generate cells strictly less than the
- // coarsestIndexedLevel - this can result in S2 failures when level < 0.
-
- coveredCell = coveredCell.parent();
- exactSet.insert(coveredCell.toString());
- }
+ // We turned the cell IDs into strings which define point intervals or prefixes of
+ // strings we want to look for.
+ std::set<std::string>::iterator exactIt = exactSet.begin();
+ std::set<std::string>::iterator intervalIt = intervalSet.begin();
+ while (exactSet.end() != exactIt && intervalSet.end() != intervalIt) {
+ const std::string& exact = *exactIt;
+ const std::string& ival = *intervalIt;
+ if (exact < ival) {
+ // add exact
+ oilOut->intervals.push_back(IndexBoundsBuilder::makePointInterval(exact));
+ exactIt++;
+ } else {
+ std::string end = ival;
+ end[end.size() - 1]++;
+ oilOut->intervals.push_back(
+ IndexBoundsBuilder::makeRangeInterval(ival, end, true, false));
+ intervalIt++;
}
+ }
- // We turned the cell IDs into strings which define point intervals or prefixes of
- // strings we want to look for.
- std::set<std::string>::iterator exactIt = exactSet.begin();
- std::set<std::string>::iterator intervalIt = intervalSet.begin();
- while (exactSet.end() != exactIt && intervalSet.end() != intervalIt) {
- const std::string& exact = *exactIt;
+ if (exactSet.end() != exactIt) {
+ verify(intervalSet.end() == intervalIt);
+ do {
+ oilOut->intervals.push_back(IndexBoundsBuilder::makePointInterval(*exactIt));
+ exactIt++;
+ } while (exactSet.end() != exactIt);
+ } else if (intervalSet.end() != intervalIt) {
+ verify(exactSet.end() == exactIt);
+ do {
const std::string& ival = *intervalIt;
- if (exact < ival) {
- // add exact
- oilOut->intervals.push_back(IndexBoundsBuilder::makePointInterval(exact));
- exactIt++;
- }
- else {
- std::string end = ival;
- end[end.size() - 1]++;
- oilOut->intervals.push_back(
- IndexBoundsBuilder::makeRangeInterval(ival, end, true, false));
- intervalIt++;
- }
- }
-
- if (exactSet.end() != exactIt) {
- verify(intervalSet.end() == intervalIt);
- do {
- oilOut->intervals.push_back(IndexBoundsBuilder::makePointInterval(*exactIt));
- exactIt++;
- } while (exactSet.end() != exactIt);
- }
- else if (intervalSet.end() != intervalIt) {
- verify(exactSet.end() == exactIt);
- do {
- const std::string& ival = *intervalIt;
- std::string end = ival;
- end[end.size() - 1]++;
- oilOut->intervals.push_back(
- IndexBoundsBuilder::makeRangeInterval(ival, end, true, false));
- intervalIt++;
- } while (intervalSet.end() != intervalIt);
- }
+ std::string end = ival;
+ end[end.size() - 1]++;
+ oilOut->intervals.push_back(
+ IndexBoundsBuilder::makeRangeInterval(ival, end, true, false));
+ intervalIt++;
+ } while (intervalSet.end() != intervalIt);
+ }
- // Make sure that our intervals don't overlap each other and are ordered correctly.
- // This perhaps should only be done in debug mode.
- if (!oilOut->isValidFor(1)) {
- cout << "check your assumptions! OIL = " << oilOut->toString() << std::endl;
- verify(0);
- }
+ // Make sure that our intervals don't overlap each other and are ordered correctly.
+ // This perhaps should only be done in debug mode.
+ if (!oilOut->isValidFor(1)) {
+ cout << "check your assumptions! OIL = " << oilOut->toString() << std::endl;
+ verify(0);
}
+}
} // namespace mongo
diff --git a/src/mongo/db/query/expression_index.h b/src/mongo/db/query/expression_index.h
index b50c2037e21..910433924ff 100644
--- a/src/mongo/db/query/expression_index.h
+++ b/src/mongo/db/query/expression_index.h
@@ -32,29 +32,28 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/geo/shapes.h"
-#include "mongo/db/query/index_bounds_builder.h" // For OrderedIntervalList
+#include "mongo/db/query/index_bounds_builder.h" // For OrderedIntervalList
namespace mongo {
- /**
- * Functions that compute expression index mappings.
- *
- * TODO: I think we could structure this more generally with respect to planning.
- */
- class ExpressionMapping {
- public:
-
- static BSONObj hash(const BSONElement& value);
-
- static void cover2d(const R2Region& region,
- const BSONObj& indexInfoObj,
- int maxCoveringCells,
- OrderedIntervalList* oil);
-
- // TODO: what should we really pass in for indexInfoObj?
- static void cover2dsphere(const S2Region& region,
- const BSONObj& indexInfoObj,
- OrderedIntervalList* oilOut);
- };
+/**
+ * Functions that compute expression index mappings.
+ *
+ * TODO: I think we could structure this more generally with respect to planning.
+ */
+class ExpressionMapping {
+public:
+ static BSONObj hash(const BSONElement& value);
+
+ static void cover2d(const R2Region& region,
+ const BSONObj& indexInfoObj,
+ int maxCoveringCells,
+ OrderedIntervalList* oil);
+
+ // TODO: what should we really pass in for indexInfoObj?
+ static void cover2dsphere(const S2Region& region,
+ const BSONObj& indexInfoObj,
+ OrderedIntervalList* oilOut);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/expression_index_knobs.cpp b/src/mongo/db/query/expression_index_knobs.cpp
index 4dd8dfe69c1..94422c3aec1 100644
--- a/src/mongo/db/query/expression_index_knobs.cpp
+++ b/src/mongo/db/query/expression_index_knobs.cpp
@@ -32,8 +32,8 @@
namespace mongo {
- MONGO_EXPORT_SERVER_PARAMETER(internalGeoPredicateQuery2DMaxCoveringCells, int, 16);
+MONGO_EXPORT_SERVER_PARAMETER(internalGeoPredicateQuery2DMaxCoveringCells, int, 16);
- MONGO_EXPORT_SERVER_PARAMETER(internalGeoNearQuery2DMaxCoveringCells, int, 16);
+MONGO_EXPORT_SERVER_PARAMETER(internalGeoNearQuery2DMaxCoveringCells, int, 16);
} // namespace mongo
diff --git a/src/mongo/db/query/expression_index_knobs.h b/src/mongo/db/query/expression_index_knobs.h
index 6dcfbaf2592..c5cfa8169e0 100644
--- a/src/mongo/db/query/expression_index_knobs.h
+++ b/src/mongo/db/query/expression_index_knobs.h
@@ -30,18 +30,18 @@
namespace mongo {
- //
- // Geo Query knobs
- //
+//
+// Geo Query knobs
+//
- /**
- * The maximum number of cells to use for 2D geo query covering for predicate queries
- */
- extern int internalGeoPredicateQuery2DMaxCoveringCells;
+/**
+ * The maximum number of cells to use for 2D geo query covering for predicate queries
+ */
+extern int internalGeoPredicateQuery2DMaxCoveringCells;
- /**
- * The maximum number of cells to use for 2D geo query covering for predicate queries
- */
- extern int internalGeoNearQuery2DMaxCoveringCells;
+/**
+ * The maximum number of cells to use for 2D geo query covering for predicate queries
+ */
+extern int internalGeoNearQuery2DMaxCoveringCells;
} // namespace mongo
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index ffb57194ebf..f170dc8ea04 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -66,698 +66,666 @@ using std::unique_ptr;
namespace mongo {
- // The .h for this in find_constants.h.
- const int32_t MaxBytesToReturnToClientAtOnce = 4 * 1024 * 1024;
-
- // Failpoint for checking whether we've received a getmore.
- MONGO_FP_DECLARE(failReceivedGetmore);
-
- ScopedRecoveryUnitSwapper::ScopedRecoveryUnitSwapper(ClientCursor* cc, OperationContext* txn)
- : _cc(cc),
- _txn(txn),
- _dismissed(false) {
- // Save this for later. We restore it upon destruction.
- _txn->recoveryUnit()->abandonSnapshot();
- _txnPreviousRecoveryUnit.reset(txn->releaseRecoveryUnit());
-
- // Transfer ownership of the RecoveryUnit from the ClientCursor to the OpCtx.
- RecoveryUnit* ccRecoveryUnit = cc->releaseOwnedRecoveryUnit();
- _txnPreviousRecoveryUnitState = txn->setRecoveryUnit(ccRecoveryUnit,
- OperationContext::kNotInUnitOfWork);
+// The .h for this in find_constants.h.
+const int32_t MaxBytesToReturnToClientAtOnce = 4 * 1024 * 1024;
+
+// Failpoint for checking whether we've received a getmore.
+MONGO_FP_DECLARE(failReceivedGetmore);
+
+ScopedRecoveryUnitSwapper::ScopedRecoveryUnitSwapper(ClientCursor* cc, OperationContext* txn)
+ : _cc(cc), _txn(txn), _dismissed(false) {
+ // Save this for later. We restore it upon destruction.
+ _txn->recoveryUnit()->abandonSnapshot();
+ _txnPreviousRecoveryUnit.reset(txn->releaseRecoveryUnit());
+
+ // Transfer ownership of the RecoveryUnit from the ClientCursor to the OpCtx.
+ RecoveryUnit* ccRecoveryUnit = cc->releaseOwnedRecoveryUnit();
+ _txnPreviousRecoveryUnitState =
+ txn->setRecoveryUnit(ccRecoveryUnit, OperationContext::kNotInUnitOfWork);
+}
+
+void ScopedRecoveryUnitSwapper::dismiss() {
+ _dismissed = true;
+}
+
+ScopedRecoveryUnitSwapper::~ScopedRecoveryUnitSwapper() {
+ _txn->recoveryUnit()->abandonSnapshot();
+
+ if (_dismissed) {
+ // Just clean up the recovery unit which we originally got from the ClientCursor.
+ delete _txn->releaseRecoveryUnit();
+ } else {
+ // Swap the RU back into the ClientCursor for subsequent getMores.
+ _cc->setOwnedRecoveryUnit(_txn->releaseRecoveryUnit());
}
- void ScopedRecoveryUnitSwapper::dismiss() {
- _dismissed = true;
- }
-
- ScopedRecoveryUnitSwapper::~ScopedRecoveryUnitSwapper() {
- _txn->recoveryUnit()->abandonSnapshot();
-
- if (_dismissed) {
- // Just clean up the recovery unit which we originally got from the ClientCursor.
- delete _txn->releaseRecoveryUnit();
- }
- else {
- // Swap the RU back into the ClientCursor for subsequent getMores.
- _cc->setOwnedRecoveryUnit(_txn->releaseRecoveryUnit());
- }
+ _txn->setRecoveryUnit(_txnPreviousRecoveryUnit.release(), _txnPreviousRecoveryUnitState);
+}
- _txn->setRecoveryUnit(_txnPreviousRecoveryUnit.release(), _txnPreviousRecoveryUnitState);
+/**
+ * If ntoreturn is zero, we stop generating additional results as soon as we have either 101
+ * documents or at least 1MB of data. On subsequent getmores, there is no limit on the number
+ * of results; we will stop as soon as we have at least 4 MB of data. The idea is that on a
+ * find() where one doesn't use much results, we don't return much, but once getmore kicks in,
+ * we start pushing significant quantities.
+ *
+ * If ntoreturn is non-zero, the we stop building the first batch once we either have ntoreturn
+ * results, or when the result set exceeds 4 MB.
+ */
+bool enoughForFirstBatch(const LiteParsedQuery& pq, int numDocs, int bytesBuffered) {
+ if (!pq.getBatchSize()) {
+ return (bytesBuffered > 1024 * 1024) || numDocs >= LiteParsedQuery::kDefaultBatchSize;
}
-
- /**
- * If ntoreturn is zero, we stop generating additional results as soon as we have either 101
- * documents or at least 1MB of data. On subsequent getmores, there is no limit on the number
- * of results; we will stop as soon as we have at least 4 MB of data. The idea is that on a
- * find() where one doesn't use much results, we don't return much, but once getmore kicks in,
- * we start pushing significant quantities.
- *
- * If ntoreturn is non-zero, the we stop building the first batch once we either have ntoreturn
- * results, or when the result set exceeds 4 MB.
- */
- bool enoughForFirstBatch(const LiteParsedQuery& pq, int numDocs, int bytesBuffered) {
- if (!pq.getBatchSize()) {
- return (bytesBuffered > 1024 * 1024) || numDocs >= LiteParsedQuery::kDefaultBatchSize;
- }
- return numDocs >= *pq.getBatchSize() || bytesBuffered > MaxBytesToReturnToClientAtOnce;
+ return numDocs >= *pq.getBatchSize() || bytesBuffered > MaxBytesToReturnToClientAtOnce;
+}
+
+bool enoughForGetMore(int ntoreturn, int numDocs, int bytesBuffered) {
+ return (ntoreturn && numDocs >= ntoreturn) || (bytesBuffered > MaxBytesToReturnToClientAtOnce);
+}
+
+bool isCursorTailable(const ClientCursor* cursor) {
+ return cursor->queryOptions() & QueryOption_CursorTailable;
+}
+
+bool isCursorAwaitData(const ClientCursor* cursor) {
+ return cursor->queryOptions() & QueryOption_AwaitData;
+}
+
+bool shouldSaveCursor(OperationContext* txn,
+ const Collection* collection,
+ PlanExecutor::ExecState finalState,
+ PlanExecutor* exec) {
+ if (PlanExecutor::FAILURE == finalState || PlanExecutor::DEAD == finalState) {
+ return false;
}
- bool enoughForGetMore(int ntoreturn, int numDocs, int bytesBuffered) {
- return (ntoreturn && numDocs >= ntoreturn)
- || (bytesBuffered > MaxBytesToReturnToClientAtOnce);
+ const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
+ if (!pq.wantMore() && !pq.isTailable()) {
+ return false;
}
- bool isCursorTailable(const ClientCursor* cursor) {
- return cursor->queryOptions() & QueryOption_CursorTailable;
+ if (!pq.isFromFindCommand() && pq.getBatchSize() && *pq.getBatchSize() == 1) {
+ return false;
}
- bool isCursorAwaitData(const ClientCursor* cursor) {
- return cursor->queryOptions() & QueryOption_AwaitData;
+ // We keep a tailable cursor around unless the collection we're tailing has no
+ // records.
+ //
+ // SERVER-13955: we should be able to create a tailable cursor that waits on
+ // an empty collection. Right now we do not keep a cursor if the collection
+ // has zero records.
+ if (pq.isTailable()) {
+ return collection && collection->numRecords(txn) != 0U;
}
- bool shouldSaveCursor(OperationContext* txn,
- const Collection* collection,
- PlanExecutor::ExecState finalState,
- PlanExecutor* exec) {
- if (PlanExecutor::FAILURE == finalState || PlanExecutor::DEAD == finalState) {
- return false;
- }
-
- const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
- if (!pq.wantMore() && !pq.isTailable()) {
- return false;
- }
-
- if (!pq.isFromFindCommand() && pq.getBatchSize() && *pq.getBatchSize() == 1) {
- return false;
- }
+ return !exec->isEOF();
+}
- // We keep a tailable cursor around unless the collection we're tailing has no
- // records.
- //
- // SERVER-13955: we should be able to create a tailable cursor that waits on
- // an empty collection. Right now we do not keep a cursor if the collection
- // has zero records.
- if (pq.isTailable()) {
- return collection && collection->numRecords(txn) != 0U;
- }
+bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
+ PlanExecutor* exec,
+ bool isTailable) {
+ if (PlanExecutor::FAILURE == finalState || PlanExecutor::DEAD == finalState) {
+ return false;
+ }
- return !exec->isEOF();
+ if (isTailable) {
+ return true;
}
- bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
- PlanExecutor* exec,
- bool isTailable) {
- if (PlanExecutor::FAILURE == finalState || PlanExecutor::DEAD == finalState) {
- return false;
- }
+ return !exec->isEOF();
+}
+
+void beginQueryOp(OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& queryObj,
+ int ntoreturn,
+ int ntoskip) {
+ auto curop = CurOp::get(txn);
+ curop->debug().ns = nss.ns();
+ curop->debug().query = queryObj;
+ curop->debug().ntoreturn = ntoreturn;
+ curop->debug().ntoskip = ntoskip;
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ curop->setQuery_inlock(queryObj);
+}
+
+void endQueryOp(OperationContext* txn,
+ PlanExecutor* exec,
+ int dbProfilingLevel,
+ int numResults,
+ CursorId cursorId) {
+ auto curop = CurOp::get(txn);
+ invariant(exec);
+
+ // Fill out basic curop query exec properties.
+ curop->debug().nreturned = numResults;
+ curop->debug().cursorid = (0 == cursorId ? -1 : cursorId);
+ curop->debug().cursorExhausted = (0 == cursorId);
+
+ // Fill out curop based on explain summary statistics.
+ PlanSummaryStats summaryStats;
+ Explain::getSummaryStats(exec, &summaryStats);
+ curop->debug().scanAndOrder = summaryStats.hasSortStage;
+ curop->debug().nscanned = summaryStats.totalKeysExamined;
+ curop->debug().nscannedObjects = summaryStats.totalDocsExamined;
+ curop->debug().idhack = summaryStats.isIdhack;
+
+ const logger::LogComponent queryLogComponent = logger::LogComponent::kQuery;
+ const logger::LogSeverity logLevelOne = logger::LogSeverity::Debug(1);
+
+ // Set debug information for consumption by the profiler and slow query log.
+ if (dbProfilingLevel > 0 || curop->elapsedMillis() > serverGlobalParams.slowMS ||
+ logger::globalLogDomain()->shouldLog(queryLogComponent, logLevelOne)) {
+ // Generate plan summary string.
+ curop->debug().planSummary = Explain::getPlanSummary(exec);
+ }
- if (isTailable) {
- return true;
+ // Set debug information for consumption by the profiler only.
+ if (dbProfilingLevel > 0) {
+ // Get BSON stats.
+ unique_ptr<PlanStageStats> execStats(exec->getStats());
+ BSONObjBuilder statsBob;
+ Explain::statsToBSON(*execStats, &statsBob);
+ curop->debug().execStats.set(statsBob.obj());
+
+ // Replace exec stats with plan summary if stats cannot fit into CachedBSONObj.
+ if (curop->debug().execStats.tooBig() && !curop->debug().planSummary.empty()) {
+ BSONObjBuilder bob;
+ bob.append("summary", curop->debug().planSummary.toString());
+ curop->debug().execStats.set(bob.done());
}
-
- return !exec->isEOF();
}
+}
- void beginQueryOp(OperationContext* txn,
- const NamespaceString& nss,
- const BSONObj& queryObj,
- int ntoreturn,
- int ntoskip) {
- auto curop = CurOp::get(txn);
- curop->debug().ns = nss.ns();
- curop->debug().query = queryObj;
- curop->debug().ntoreturn = ntoreturn;
- curop->debug().ntoskip = ntoskip;
- stdx::lock_guard<Client> lk(*txn->getClient());
- curop->setQuery_inlock(queryObj);
+/**
+ * Called by db/instance.cpp. This is the getMore entry point.
+ *
+ * pass - when QueryOption_AwaitData is in use, the caller will make repeated calls
+ * when this method returns an empty result, incrementing pass on each call.
+ * Thus, pass == 0 indicates this is the first "attempt" before any 'awaiting'.
+ */
+QueryResult::View getMore(OperationContext* txn,
+ const char* ns,
+ int ntoreturn,
+ long long cursorid,
+ int pass,
+ bool& exhaust,
+ bool* isCursorAuthorized) {
+ CurOp& curop = *CurOp::get(txn);
+
+ // For testing, we may want to fail if we receive a getmore.
+ if (MONGO_FAIL_POINT(failReceivedGetmore)) {
+ invariant(0);
}
- void endQueryOp(OperationContext* txn,
- PlanExecutor* exec,
- int dbProfilingLevel,
- int numResults,
- CursorId cursorId) {
- auto curop = CurOp::get(txn);
- invariant(exec);
-
- // Fill out basic curop query exec properties.
- curop->debug().nreturned = numResults;
- curop->debug().cursorid = (0 == cursorId ? -1 : cursorId);
- curop->debug().cursorExhausted = (0 == cursorId);
-
- // Fill out curop based on explain summary statistics.
- PlanSummaryStats summaryStats;
- Explain::getSummaryStats(exec, &summaryStats);
- curop->debug().scanAndOrder = summaryStats.hasSortStage;
- curop->debug().nscanned = summaryStats.totalKeysExamined;
- curop->debug().nscannedObjects = summaryStats.totalDocsExamined;
- curop->debug().idhack = summaryStats.isIdhack;
-
- const logger::LogComponent queryLogComponent = logger::LogComponent::kQuery;
- const logger::LogSeverity logLevelOne = logger::LogSeverity::Debug(1);
-
- // Set debug information for consumption by the profiler and slow query log.
- if (dbProfilingLevel > 0
- || curop->elapsedMillis() > serverGlobalParams.slowMS
- || logger::globalLogDomain()->shouldLog(queryLogComponent, logLevelOne)) {
- // Generate plan summary string.
- curop->debug().planSummary = Explain::getPlanSummary(exec);
- }
+ exhaust = false;
+
+ const NamespaceString nss(ns);
+
+ // Depending on the type of cursor being operated on, we hold locks for the whole getMore,
+ // or none of the getMore, or part of the getMore. The three cases in detail:
+ //
+ // 1) Normal cursor: we lock with "ctx" and hold it for the whole getMore.
+ // 2) Cursor owned by global cursor manager: we don't lock anything. These cursors don't
+ // own any collection state.
+ // 3) Agg cursor: we lock with "ctx", then release, then relock with "unpinDBLock" and
+ // "unpinCollLock". This is because agg cursors handle locking internally (hence the
+ // release), but the pin and unpin of the cursor must occur under the collection lock.
+ // We don't use our AutoGetCollectionForRead "ctx" to relock, because
+ // AutoGetCollectionForRead checks the sharding version (and we want the relock for the
+ // unpin to succeed even if the sharding version has changed).
+ //
+ // Note that we declare our locks before our ClientCursorPin, in order to ensure that the
+ // pin's destructor is called before the lock destructors (so that the unpin occurs under
+ // the lock).
+ std::unique_ptr<AutoGetCollectionForRead> ctx;
+ std::unique_ptr<Lock::DBLock> unpinDBLock;
+ std::unique_ptr<Lock::CollectionLock> unpinCollLock;
+
+ CursorManager* cursorManager;
+ CursorManager* globalCursorManager = CursorManager::getGlobalCursorManager();
+ if (globalCursorManager->ownsCursorId(cursorid)) {
+ cursorManager = globalCursorManager;
+ } else {
+ ctx.reset(new AutoGetCollectionForRead(txn, nss));
+ Collection* collection = ctx->getCollection();
+ uassert(17356, "collection dropped between getMore calls", collection);
+ cursorManager = collection->getCursorManager();
+ }
- // Set debug information for consumption by the profiler only.
- if (dbProfilingLevel > 0) {
- // Get BSON stats.
- unique_ptr<PlanStageStats> execStats(exec->getStats());
- BSONObjBuilder statsBob;
- Explain::statsToBSON(*execStats, &statsBob);
- curop->debug().execStats.set(statsBob.obj());
-
- // Replace exec stats with plan summary if stats cannot fit into CachedBSONObj.
- if (curop->debug().execStats.tooBig() && !curop->debug().planSummary.empty()) {
- BSONObjBuilder bob;
- bob.append("summary", curop->debug().planSummary.toString());
- curop->debug().execStats.set(bob.done());
+ LOG(5) << "Running getMore, cursorid: " << cursorid << endl;
+
+ // This checks to make sure the operation is allowed on a replicated node. Since we are not
+ // passing in a query object (necessary to check SlaveOK query option), the only state where
+ // reads are allowed is PRIMARY (or master in master/slave). This function uasserts if
+ // reads are not okay.
+ Status status = repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor(txn, nss, true);
+ uassertStatusOK(status);
+
+ // A pin performs a CC lookup and if there is a CC, increments the CC's pin value so it
+ // doesn't time out. Also informs ClientCursor that there is somebody actively holding the
+ // CC, so don't delete it.
+ ClientCursorPin ccPin(cursorManager, cursorid);
+ ClientCursor* cc = ccPin.c();
+
+ // If we're not being called from DBDirectClient we want to associate the RecoveryUnit
+ // used to create the execution machinery inside the cursor with our OperationContext.
+ // If we throw or otherwise exit this method in a disorderly fashion, we must ensure
+ // that further calls to getMore won't fail, and that the provided OperationContext
+ // has a valid RecoveryUnit. As such, we use RAII to accomplish this.
+ //
+ // This must be destroyed before the ClientCursor is destroyed.
+ std::unique_ptr<ScopedRecoveryUnitSwapper> ruSwapper;
+
+ // These are set in the QueryResult msg we return.
+ int resultFlags = ResultFlag_AwaitCapable;
+
+ int numResults = 0;
+ int startingResult = 0;
+
+ const int InitialBufSize = 512 + sizeof(QueryResult::Value) + MaxBytesToReturnToClientAtOnce;
+
+ BufBuilder bb(InitialBufSize);
+ bb.skip(sizeof(QueryResult::Value));
+
+ if (NULL == cc) {
+ cursorid = 0;
+ resultFlags = ResultFlag_CursorNotFound;
+ } else {
+ // Check for spoofing of the ns such that it does not match the one originally
+ // there for the cursor.
+ uassert(ErrorCodes::Unauthorized,
+ str::stream() << "Requested getMore on namespace " << ns << ", but cursor "
+ << cursorid << " belongs to namespace " << cc->ns(),
+ ns == cc->ns());
+ *isCursorAuthorized = true;
+
+ // Restore the RecoveryUnit if we need to.
+ if (txn->getClient()->isInDirectClient()) {
+ if (cc->hasRecoveryUnit())
+ invariant(txn->recoveryUnit() == cc->getUnownedRecoveryUnit());
+ } else {
+ if (!cc->hasRecoveryUnit()) {
+ // Start using a new RecoveryUnit
+ cc->setOwnedRecoveryUnit(
+ getGlobalServiceContext()->getGlobalStorageEngine()->newRecoveryUnit());
}
+ // Swap RecoveryUnit(s) between the ClientCursor and OperationContext.
+ ruSwapper.reset(new ScopedRecoveryUnitSwapper(cc, txn));
}
- }
- /**
- * Called by db/instance.cpp. This is the getMore entry point.
- *
- * pass - when QueryOption_AwaitData is in use, the caller will make repeated calls
- * when this method returns an empty result, incrementing pass on each call.
- * Thus, pass == 0 indicates this is the first "attempt" before any 'awaiting'.
- */
- QueryResult::View getMore(OperationContext* txn,
- const char* ns,
- int ntoreturn,
- long long cursorid,
- int pass,
- bool& exhaust,
- bool* isCursorAuthorized) {
-
- CurOp& curop = *CurOp::get(txn);
-
- // For testing, we may want to fail if we receive a getmore.
- if (MONGO_FAIL_POINT(failReceivedGetmore)) {
- invariant(0);
+ // Reset timeout timer on the cursor since the cursor is still in use.
+ cc->setIdleTime(0);
+
+ // If the operation that spawned this cursor had a time limit set, apply leftover
+ // time to this getmore.
+ curop.setMaxTimeMicros(cc->getLeftoverMaxTimeMicros());
+ txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+
+ // Ensure that the original query or command object is available in the slow query log,
+ // profiler, and currentOp.
+ curop.debug().query = cc->getQuery();
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ curop.setQuery_inlock(cc->getQuery());
}
- exhaust = false;
-
- const NamespaceString nss(ns);
-
- // Depending on the type of cursor being operated on, we hold locks for the whole getMore,
- // or none of the getMore, or part of the getMore. The three cases in detail:
- //
- // 1) Normal cursor: we lock with "ctx" and hold it for the whole getMore.
- // 2) Cursor owned by global cursor manager: we don't lock anything. These cursors don't
- // own any collection state.
- // 3) Agg cursor: we lock with "ctx", then release, then relock with "unpinDBLock" and
- // "unpinCollLock". This is because agg cursors handle locking internally (hence the
- // release), but the pin and unpin of the cursor must occur under the collection lock.
- // We don't use our AutoGetCollectionForRead "ctx" to relock, because
- // AutoGetCollectionForRead checks the sharding version (and we want the relock for the
- // unpin to succeed even if the sharding version has changed).
- //
- // Note that we declare our locks before our ClientCursorPin, in order to ensure that the
- // pin's destructor is called before the lock destructors (so that the unpin occurs under
- // the lock).
- std::unique_ptr<AutoGetCollectionForRead> ctx;
- std::unique_ptr<Lock::DBLock> unpinDBLock;
- std::unique_ptr<Lock::CollectionLock> unpinCollLock;
-
- CursorManager* cursorManager;
- CursorManager* globalCursorManager = CursorManager::getGlobalCursorManager();
- if (globalCursorManager->ownsCursorId(cursorid)) {
- cursorManager = globalCursorManager;
+ if (0 == pass) {
+ cc->updateSlaveLocation(txn);
}
- else {
- ctx.reset(new AutoGetCollectionForRead(txn, nss));
- Collection* collection = ctx->getCollection();
- uassert( 17356, "collection dropped between getMore calls", collection );
- cursorManager = collection->getCursorManager();
+
+ if (cc->isAggCursor()) {
+ // Agg cursors handle their own locking internally.
+ ctx.reset(); // unlocks
}
- LOG(5) << "Running getMore, cursorid: " << cursorid << endl;
-
- // This checks to make sure the operation is allowed on a replicated node. Since we are not
- // passing in a query object (necessary to check SlaveOK query option), the only state where
- // reads are allowed is PRIMARY (or master in master/slave). This function uasserts if
- // reads are not okay.
- Status status = repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor(
- txn,
- nss,
- true);
- uassertStatusOK(status);
-
- // A pin performs a CC lookup and if there is a CC, increments the CC's pin value so it
- // doesn't time out. Also informs ClientCursor that there is somebody actively holding the
- // CC, so don't delete it.
- ClientCursorPin ccPin(cursorManager, cursorid);
- ClientCursor* cc = ccPin.c();
-
- // If we're not being called from DBDirectClient we want to associate the RecoveryUnit
- // used to create the execution machinery inside the cursor with our OperationContext.
- // If we throw or otherwise exit this method in a disorderly fashion, we must ensure
- // that further calls to getMore won't fail, and that the provided OperationContext
- // has a valid RecoveryUnit. As such, we use RAII to accomplish this.
- //
- // This must be destroyed before the ClientCursor is destroyed.
- std::unique_ptr<ScopedRecoveryUnitSwapper> ruSwapper;
-
- // These are set in the QueryResult msg we return.
- int resultFlags = ResultFlag_AwaitCapable;
-
- int numResults = 0;
- int startingResult = 0;
-
- const int InitialBufSize =
- 512 + sizeof(QueryResult::Value) + MaxBytesToReturnToClientAtOnce;
-
- BufBuilder bb(InitialBufSize);
- bb.skip(sizeof(QueryResult::Value));
+ // If we're replaying the oplog, we save the last time that we read.
+ Timestamp slaveReadTill;
- if (NULL == cc) {
- cursorid = 0;
- resultFlags = ResultFlag_CursorNotFound;
- }
- else {
- // Check for spoofing of the ns such that it does not match the one originally
- // there for the cursor.
- uassert(ErrorCodes::Unauthorized,
- str::stream() << "Requested getMore on namespace " << ns << ", but cursor "
- << cursorid << " belongs to namespace " << cc->ns(),
- ns == cc->ns());
- *isCursorAuthorized = true;
-
- // Restore the RecoveryUnit if we need to.
- if (txn->getClient()->isInDirectClient()) {
- if (cc->hasRecoveryUnit())
- invariant(txn->recoveryUnit() == cc->getUnownedRecoveryUnit());
- }
- else {
- if (!cc->hasRecoveryUnit()) {
- // Start using a new RecoveryUnit
- cc->setOwnedRecoveryUnit(
- getGlobalServiceContext()->getGlobalStorageEngine()->newRecoveryUnit());
+ // What number result are we starting at? Used to fill out the reply.
+ startingResult = cc->pos();
- }
- // Swap RecoveryUnit(s) between the ClientCursor and OperationContext.
- ruSwapper.reset(new ScopedRecoveryUnitSwapper(cc, txn));
- }
+ // What gives us results.
+ PlanExecutor* exec = cc->getExecutor();
+ const int queryOptions = cc->queryOptions();
- // Reset timeout timer on the cursor since the cursor is still in use.
- cc->setIdleTime(0);
+ // Get results out of the executor.
+ exec->restoreState(txn);
- // If the operation that spawned this cursor had a time limit set, apply leftover
- // time to this getmore.
- curop.setMaxTimeMicros(cc->getLeftoverMaxTimeMicros());
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ // Add result to output buffer.
+ bb.appendBuf((void*)obj.objdata(), obj.objsize());
- // Ensure that the original query or command object is available in the slow query log,
- // profiler, and currentOp.
- curop.debug().query = cc->getQuery();
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- curop.setQuery_inlock(cc->getQuery());
- }
+ // Count the result.
+ ++numResults;
- if (0 == pass) {
- cc->updateSlaveLocation(txn);
+ // Possibly note slave's position in the oplog.
+ if (queryOptions & QueryOption_OplogReplay) {
+ BSONElement e = obj["ts"];
+ if (Date == e.type() || bsonTimestamp == e.type()) {
+ slaveReadTill = e.timestamp();
+ }
}
- if (cc->isAggCursor()) {
- // Agg cursors handle their own locking internally.
- ctx.reset(); // unlocks
+ if (enoughForGetMore(ntoreturn, numResults, bb.len())) {
+ break;
}
+ }
- // If we're replaying the oplog, we save the last time that we read.
- Timestamp slaveReadTill;
-
- // What number result are we starting at? Used to fill out the reply.
- startingResult = cc->pos();
-
- // What gives us results.
- PlanExecutor* exec = cc->getExecutor();
- const int queryOptions = cc->queryOptions();
+ if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
+ // Propagate this error to caller.
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ error() << "getMore executor error, stats: " << Explain::statsToBSON(*stats);
+ uasserted(17406, "getMore executor error: " + WorkingSetCommon::toStatusString(obj));
+ }
- // Get results out of the executor.
- exec->restoreState(txn);
+ const bool shouldSaveCursor = shouldSaveCursorGetMore(state, exec, isCursorTailable(cc));
- BSONObj obj;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- // Add result to output buffer.
- bb.appendBuf((void*)obj.objdata(), obj.objsize());
+ // In order to deregister a cursor, we need to be holding the DB + collection lock and
+ // if the cursor is aggregation, we release these locks.
+ if (cc->isAggCursor()) {
+ invariant(NULL == ctx.get());
+ unpinDBLock.reset(new Lock::DBLock(txn->lockState(), nss.db(), MODE_IS));
+ unpinCollLock.reset(new Lock::CollectionLock(txn->lockState(), nss.ns(), MODE_IS));
+ }
- // Count the result.
- ++numResults;
+ // Our two possible ClientCursorPin cleanup paths are:
+ // 1) If the cursor is not going to be saved, we call deleteUnderlying() on the pin.
+ // 2) If the cursor is going to be saved, we simply let the pin go out of scope. In
+ // this case, the pin's destructor will be invoked, which will call release() on the
+ // pin. Because our ClientCursorPin is declared after our lock is declared, this
+ // will happen under the lock.
+ if (!shouldSaveCursor) {
+ ruSwapper.reset();
+ ccPin.deleteUnderlying();
+
+ // cc is now invalid, as is the executor
+ cursorid = 0;
+ cc = NULL;
+ curop.debug().cursorExhausted = true;
+
+ LOG(5) << "getMore NOT saving client cursor, ended with state "
+ << PlanExecutor::statestr(state) << endl;
+ } else {
+ // Continue caching the ClientCursor.
+ cc->incPos(numResults);
+ exec->saveState();
+ LOG(5) << "getMore saving client cursor ended with state "
+ << PlanExecutor::statestr(state) << endl;
- // Possibly note slave's position in the oplog.
- if (queryOptions & QueryOption_OplogReplay) {
- BSONElement e = obj["ts"];
- if (Date == e.type() || bsonTimestamp == e.type()) {
- slaveReadTill = e.timestamp();
- }
+ if (PlanExecutor::IS_EOF == state && (queryOptions & QueryOption_CursorTailable)) {
+ if (!txn->getClient()->isInDirectClient()) {
+ // Don't stash the RU. Get a new one on the next getMore.
+ ruSwapper->dismiss();
}
- if (enoughForGetMore(ntoreturn, numResults, bb.len())) {
- break;
+ if ((queryOptions & QueryOption_AwaitData) && (numResults == 0) && (pass < 1000)) {
+ // Bubble up to the AwaitData handling code in receivedGetMore which will
+ // try again.
+ return NULL;
}
}
- if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- // Propagate this error to caller.
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- error() << "getMore executor error, stats: "
- << Explain::statsToBSON(*stats);
- uasserted(17406, "getMore executor error: " +
- WorkingSetCommon::toStatusString(obj));
+ // Possibly note slave's position in the oplog.
+ if ((queryOptions & QueryOption_OplogReplay) && !slaveReadTill.isNull()) {
+ cc->slaveReadTill(slaveReadTill);
}
- const bool shouldSaveCursor =
- shouldSaveCursorGetMore(state, exec, isCursorTailable(cc));
+ exhaust = (queryOptions & QueryOption_Exhaust);
- // In order to deregister a cursor, we need to be holding the DB + collection lock and
- // if the cursor is aggregation, we release these locks.
- if (cc->isAggCursor()) {
- invariant(NULL == ctx.get());
- unpinDBLock.reset(new Lock::DBLock(txn->lockState(), nss.db(), MODE_IS));
- unpinCollLock.reset(new Lock::CollectionLock(txn->lockState(), nss.ns(), MODE_IS));
- }
+ // If the getmore had a time limit, remaining time is "rolled over" back to the
+ // cursor (for use by future getmore ops).
+ cc->setLeftoverMaxTimeMicros(curop.getRemainingMaxTimeMicros());
+ }
+ }
- // Our two possible ClientCursorPin cleanup paths are:
- // 1) If the cursor is not going to be saved, we call deleteUnderlying() on the pin.
- // 2) If the cursor is going to be saved, we simply let the pin go out of scope. In
- // this case, the pin's destructor will be invoked, which will call release() on the
- // pin. Because our ClientCursorPin is declared after our lock is declared, this
- // will happen under the lock.
- if (!shouldSaveCursor) {
- ruSwapper.reset();
- ccPin.deleteUnderlying();
-
- // cc is now invalid, as is the executor
- cursorid = 0;
- cc = NULL;
- curop.debug().cursorExhausted = true;
-
- LOG(5) << "getMore NOT saving client cursor, ended with state "
- << PlanExecutor::statestr(state)
- << endl;
- }
- else {
- // Continue caching the ClientCursor.
- cc->incPos(numResults);
- exec->saveState();
- LOG(5) << "getMore saving client cursor ended with state "
- << PlanExecutor::statestr(state)
- << endl;
-
- if (PlanExecutor::IS_EOF == state && (queryOptions & QueryOption_CursorTailable)) {
- if (!txn->getClient()->isInDirectClient()) {
- // Don't stash the RU. Get a new one on the next getMore.
- ruSwapper->dismiss();
- }
-
- if ((queryOptions & QueryOption_AwaitData)
- && (numResults == 0)
- && (pass < 1000)) {
- // Bubble up to the AwaitData handling code in receivedGetMore which will
- // try again.
- return NULL;
- }
- }
+ QueryResult::View qr = bb.buf();
+ qr.msgdata().setLen(bb.len());
+ qr.msgdata().setOperation(opReply);
+ qr.setResultFlags(resultFlags);
+ qr.setCursorId(cursorid);
+ qr.setStartingFrom(startingResult);
+ qr.setNReturned(numResults);
+ bb.decouple();
+ LOG(5) << "getMore returned " << numResults << " results\n";
+ return qr;
+}
+
+std::string runQuery(OperationContext* txn,
+ QueryMessage& q,
+ const NamespaceString& nss,
+ Message& result) {
+ CurOp& curop = *CurOp::get(txn);
+ // Validate the namespace.
+ uassert(16256, str::stream() << "Invalid ns [" << nss.ns() << "]", nss.isValid());
+ invariant(!nss.isCommand());
+
+ // Set curop information.
+ beginQueryOp(txn, nss, q.query, q.ntoreturn, q.ntoskip);
+
+ // Parse the qm into a CanonicalQuery.
+ std::unique_ptr<CanonicalQuery> cq;
+ {
+ CanonicalQuery* cqRaw;
+ Status canonStatus =
+ CanonicalQuery::canonicalize(q, &cqRaw, WhereCallbackReal(txn, nss.db()));
+ if (!canonStatus.isOK()) {
+ uasserted(17287,
+ str::stream() << "Can't canonicalize query: " << canonStatus.toString());
+ }
+ cq.reset(cqRaw);
+ }
+ invariant(cq.get());
+
+ LOG(5) << "Running query:\n" << cq->toString();
+ LOG(2) << "Running query: " << cq->toStringShort();
+
+ // Parse, canonicalize, plan, transcribe, and get a plan executor.
+ AutoGetCollectionForRead ctx(txn, nss);
+ Collection* collection = ctx.getCollection();
+
+ const int dbProfilingLevel =
+ ctx.getDb() ? ctx.getDb()->getProfilingLevel() : serverGlobalParams.defaultProfile;
+
+ // We have a parsed query. Time to get the execution plan for it.
+ std::unique_ptr<PlanExecutor> exec;
+ {
+ PlanExecutor* rawExec;
+ Status execStatus =
+ getExecutorFind(txn, collection, nss, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec);
+ uassertStatusOK(execStatus);
+ exec.reset(rawExec);
+ }
+ const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
- // Possibly note slave's position in the oplog.
- if ((queryOptions & QueryOption_OplogReplay) && !slaveReadTill.isNull()) {
- cc->slaveReadTill(slaveReadTill);
- }
+ // If it's actually an explain, do the explain and return rather than falling through
+ // to the normal query execution loop.
+ if (pq.isExplain()) {
+ BufBuilder bb;
+ bb.skip(sizeof(QueryResult::Value));
- exhaust = (queryOptions & QueryOption_Exhaust);
+ BSONObjBuilder explainBob;
+ Explain::explainStages(exec.get(), ExplainCommon::EXEC_ALL_PLANS, &explainBob);
- // If the getmore had a time limit, remaining time is "rolled over" back to the
- // cursor (for use by future getmore ops).
- cc->setLeftoverMaxTimeMicros( curop.getRemainingMaxTimeMicros() );
- }
- }
+ // Add the resulting object to the return buffer.
+ BSONObj explainObj = explainBob.obj();
+ bb.appendBuf((void*)explainObj.objdata(), explainObj.objsize());
+
+ // TODO: Does this get overwritten/do we really need to set this twice?
+ curop.debug().query = q.query;
+ // Set query result fields.
QueryResult::View qr = bb.buf();
+ bb.decouple();
+ qr.setResultFlagsToOk();
qr.msgdata().setLen(bb.len());
+ curop.debug().responseLength = bb.len();
qr.msgdata().setOperation(opReply);
- qr.setResultFlags(resultFlags);
- qr.setCursorId(cursorid);
- qr.setStartingFrom(startingResult);
- qr.setNReturned(numResults);
- bb.decouple();
- LOG(5) << "getMore returned " << numResults << " results\n";
- return qr;
+ qr.setCursorId(0);
+ qr.setStartingFrom(0);
+ qr.setNReturned(1);
+ result.setData(qr.view2ptr(), true);
+ return "";
}
- std::string runQuery(OperationContext* txn,
- QueryMessage& q,
- const NamespaceString& nss,
- Message &result) {
- CurOp& curop = *CurOp::get(txn);
- // Validate the namespace.
- uassert(16256, str::stream() << "Invalid ns [" << nss.ns() << "]", nss.isValid());
- invariant(!nss.isCommand());
-
- // Set curop information.
- beginQueryOp(txn, nss, q.query, q.ntoreturn, q.ntoskip);
-
- // Parse the qm into a CanonicalQuery.
- std::unique_ptr<CanonicalQuery> cq;
- {
- CanonicalQuery* cqRaw;
- Status canonStatus = CanonicalQuery::canonicalize(q,
- &cqRaw,
- WhereCallbackReal(txn, nss.db()));
- if (!canonStatus.isOK()) {
- uasserted(17287, str::stream() << "Can't canonicalize query: "
- << canonStatus.toString());
- }
- cq.reset(cqRaw);
- }
- invariant(cq.get());
-
- LOG(5) << "Running query:\n" << cq->toString();
- LOG(2) << "Running query: " << cq->toStringShort();
+ // We freak out later if this changes before we're done with the query.
+ const ChunkVersion shardingVersionAtStart = shardingState.getVersion(nss.ns());
- // Parse, canonicalize, plan, transcribe, and get a plan executor.
- AutoGetCollectionForRead ctx(txn, nss);
- Collection* collection = ctx.getCollection();
+ // Handle query option $maxTimeMS (not used with commands).
+ curop.setMaxTimeMicros(static_cast<unsigned long long>(pq.getMaxTimeMS()) * 1000);
+ txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
- const int dbProfilingLevel = ctx.getDb() ? ctx.getDb()->getProfilingLevel() :
- serverGlobalParams.defaultProfile;
+ // uassert if we are not on a primary, and not a secondary with SlaveOk query parameter set.
+ bool slaveOK = pq.isSlaveOk() || pq.hasReadPref();
+ Status serveReadsStatus =
+ repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor(txn, nss, slaveOK);
+ uassertStatusOK(serveReadsStatus);
- // We have a parsed query. Time to get the execution plan for it.
- std::unique_ptr<PlanExecutor> exec;
- {
- PlanExecutor* rawExec;
- Status execStatus = getExecutorFind(txn,
- collection,
- nss,
- cq.release(),
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- uassertStatusOK(execStatus);
- exec.reset(rawExec);
- }
- const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
-
- // If it's actually an explain, do the explain and return rather than falling through
- // to the normal query execution loop.
- if (pq.isExplain()) {
- BufBuilder bb;
- bb.skip(sizeof(QueryResult::Value));
-
- BSONObjBuilder explainBob;
- Explain::explainStages(exec.get(), ExplainCommon::EXEC_ALL_PLANS, &explainBob);
-
- // Add the resulting object to the return buffer.
- BSONObj explainObj = explainBob.obj();
- bb.appendBuf((void*)explainObj.objdata(), explainObj.objsize());
-
- // TODO: Does this get overwritten/do we really need to set this twice?
- curop.debug().query = q.query;
-
- // Set query result fields.
- QueryResult::View qr = bb.buf();
- bb.decouple();
- qr.setResultFlagsToOk();
- qr.msgdata().setLen(bb.len());
- curop.debug().responseLength = bb.len();
- qr.msgdata().setOperation(opReply);
- qr.setCursorId(0);
- qr.setStartingFrom(0);
- qr.setNReturned(1);
- result.setData(qr.view2ptr(), true);
- return "";
- }
+ // Run the query.
+ // bb is used to hold query results
+ // this buffer should contain either requested documents per query or
+ // explain information, but not both
+ BufBuilder bb(32768);
+ bb.skip(sizeof(QueryResult::Value));
- // We freak out later if this changes before we're done with the query.
- const ChunkVersion shardingVersionAtStart = shardingState.getVersion(nss.ns());
-
- // Handle query option $maxTimeMS (not used with commands).
- curop.setMaxTimeMicros(static_cast<unsigned long long>(pq.getMaxTimeMS()) * 1000);
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
-
- // uassert if we are not on a primary, and not a secondary with SlaveOk query parameter set.
- bool slaveOK = pq.isSlaveOk() || pq.hasReadPref();
- Status serveReadsStatus = repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor(
- txn,
- nss,
- slaveOK);
- uassertStatusOK(serveReadsStatus);
-
- // Run the query.
- // bb is used to hold query results
- // this buffer should contain either requested documents per query or
- // explain information, but not both
- BufBuilder bb(32768);
- bb.skip(sizeof(QueryResult::Value));
+ // How many results have we obtained from the executor?
+ int numResults = 0;
- // How many results have we obtained from the executor?
- int numResults = 0;
+ // If we're replaying the oplog, we save the last time that we read.
+ Timestamp slaveReadTill;
- // If we're replaying the oplog, we save the last time that we read.
- Timestamp slaveReadTill;
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ // uint64_t numMisplacedDocs = 0;
- BSONObj obj;
- PlanExecutor::ExecState state;
- // uint64_t numMisplacedDocs = 0;
-
- // Get summary info about which plan the executor is using.
- curop.debug().planSummary = Explain::getPlanSummary(exec.get());
+ // Get summary info about which plan the executor is using.
+ curop.debug().planSummary = Explain::getPlanSummary(exec.get());
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- // Add result to output buffer.
- bb.appendBuf((void*)obj.objdata(), obj.objsize());
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ // Add result to output buffer.
+ bb.appendBuf((void*)obj.objdata(), obj.objsize());
- // Count the result.
- ++numResults;
+ // Count the result.
+ ++numResults;
- // Possibly note slave's position in the oplog.
- if (pq.isOplogReplay()) {
- BSONElement e = obj["ts"];
- if (Date == e.type() || bsonTimestamp == e.type()) {
- slaveReadTill = e.timestamp();
- }
- }
-
- if (enoughForFirstBatch(pq, numResults, bb.len())) {
- LOG(5) << "Enough for first batch, wantMore=" << pq.wantMore()
- << " batchSize=" << pq.getBatchSize().value_or(0)
- << " numResults=" << numResults
- << endl;
- break;
+ // Possibly note slave's position in the oplog.
+ if (pq.isOplogReplay()) {
+ BSONElement e = obj["ts"];
+ if (Date == e.type() || bsonTimestamp == e.type()) {
+ slaveReadTill = e.timestamp();
}
}
- // If we cache the executor later, we want to deregister it as it receives notifications
- // anyway by virtue of being cached.
- //
- // If we don't cache the executor later, we are deleting it, so it must be deregistered.
- //
- // So, no matter what, deregister the executor.
- exec->deregisterExec();
-
- // Caller expects exceptions thrown in certain cases.
- if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- error() << "Plan executor error during find: " << PlanExecutor::statestr(state)
- << ", stats: " << Explain::statsToBSON(*stats);
- uasserted(17144, "Executor error: " + WorkingSetCommon::toStatusString(obj));
+ if (enoughForFirstBatch(pq, numResults, bb.len())) {
+ LOG(5) << "Enough for first batch, wantMore=" << pq.wantMore()
+ << " batchSize=" << pq.getBatchSize().value_or(0) << " numResults=" << numResults
+ << endl;
+ break;
}
+ }
- // TODO: Currently, chunk ranges are kept around until all ClientCursors created while the
- // chunk belonged on this node are gone. Separating chunk lifetime management from
- // ClientCursor should allow this check to go away.
- if (!shardingState.getVersion(nss.ns()).isWriteCompatibleWith(shardingVersionAtStart)) {
- // if the version changed during the query we might be missing some data and its safe to
- // send this as mongos can resend at this point
- throw SendStaleConfigException(nss.ns(), "version changed during initial query",
- shardingVersionAtStart,
- shardingState.getVersion(nss.ns()));
- }
-
- // Fill out curop based on query results. If we have a cursorid, we will fill out curop with
- // this cursorid later.
- long long ccId = 0;
-
- if (shouldSaveCursor(txn, collection, state, exec.get())) {
- // We won't use the executor until it's getMore'd.
- exec->saveState();
-
- // Allocate a new ClientCursor. We don't have to worry about leaking it as it's
- // inserted into a global map by its ctor.
- ClientCursor* cc = new ClientCursor(collection->getCursorManager(),
- exec.release(),
- nss.ns(),
- pq.getOptions(),
- pq.getFilter());
- ccId = cc->cursorid();
-
- if (txn->getClient()->isInDirectClient()) {
- cc->setUnownedRecoveryUnit(txn->recoveryUnit());
- }
- else if (state == PlanExecutor::IS_EOF && pq.isTailable()) {
- // Don't stash the RU for tailable cursors at EOF, let them get a new RU on their
- // next getMore.
- }
- else {
- // We stash away the RecoveryUnit in the ClientCursor. It's used for subsequent
- // getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
- txn->recoveryUnit()->abandonSnapshot();
- cc->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- invariant(txn->setRecoveryUnit(storageEngine->newRecoveryUnit(),
- OperationContext::kNotInUnitOfWork)
- == OperationContext::kNotInUnitOfWork);
- }
-
- LOG(5) << "caching executor with cursorid " << ccId
- << " after returning " << numResults << " results" << endl;
-
- // TODO document
- if (pq.isOplogReplay() && !slaveReadTill.isNull()) {
- cc->slaveReadTill(slaveReadTill);
- }
+ // If we cache the executor later, we want to deregister it as it receives notifications
+ // anyway by virtue of being cached.
+ //
+ // If we don't cache the executor later, we are deleting it, so it must be deregistered.
+ //
+ // So, no matter what, deregister the executor.
+ exec->deregisterExec();
+
+ // Caller expects exceptions thrown in certain cases.
+ if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ error() << "Plan executor error during find: " << PlanExecutor::statestr(state)
+ << ", stats: " << Explain::statsToBSON(*stats);
+ uasserted(17144, "Executor error: " + WorkingSetCommon::toStatusString(obj));
+ }
- // TODO document
- if (pq.isExhaust()) {
- curop.debug().exhaust = true;
- }
+ // TODO: Currently, chunk ranges are kept around until all ClientCursors created while the
+ // chunk belonged on this node are gone. Separating chunk lifetime management from
+ // ClientCursor should allow this check to go away.
+ if (!shardingState.getVersion(nss.ns()).isWriteCompatibleWith(shardingVersionAtStart)) {
+ // if the version changed during the query we might be missing some data and its safe to
+ // send this as mongos can resend at this point
+ throw SendStaleConfigException(nss.ns(),
+ "version changed during initial query",
+ shardingVersionAtStart,
+ shardingState.getVersion(nss.ns()));
+ }
- cc->setPos(numResults);
+ // Fill out curop based on query results. If we have a cursorid, we will fill out curop with
+ // this cursorid later.
+ long long ccId = 0;
+
+ if (shouldSaveCursor(txn, collection, state, exec.get())) {
+ // We won't use the executor until it's getMore'd.
+ exec->saveState();
+
+ // Allocate a new ClientCursor. We don't have to worry about leaking it as it's
+ // inserted into a global map by its ctor.
+ ClientCursor* cc = new ClientCursor(collection->getCursorManager(),
+ exec.release(),
+ nss.ns(),
+ pq.getOptions(),
+ pq.getFilter());
+ ccId = cc->cursorid();
+
+ if (txn->getClient()->isInDirectClient()) {
+ cc->setUnownedRecoveryUnit(txn->recoveryUnit());
+ } else if (state == PlanExecutor::IS_EOF && pq.isTailable()) {
+ // Don't stash the RU for tailable cursors at EOF, let them get a new RU on their
+ // next getMore.
+ } else {
+ // We stash away the RecoveryUnit in the ClientCursor. It's used for subsequent
+ // getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
+ txn->recoveryUnit()->abandonSnapshot();
+ cc->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ invariant(txn->setRecoveryUnit(storageEngine->newRecoveryUnit(),
+ OperationContext::kNotInUnitOfWork) ==
+ OperationContext::kNotInUnitOfWork);
+ }
- // If the query had a time limit, remaining time is "rolled over" to the cursor (for
- // use by future getmore ops).
- cc->setLeftoverMaxTimeMicros(curop.getRemainingMaxTimeMicros());
+ LOG(5) << "caching executor with cursorid " << ccId << " after returning " << numResults
+ << " results" << endl;
- endQueryOp(txn, cc->getExecutor(), dbProfilingLevel, numResults, ccId);
+ // TODO document
+ if (pq.isOplogReplay() && !slaveReadTill.isNull()) {
+ cc->slaveReadTill(slaveReadTill);
}
- else {
- LOG(5) << "Not caching executor but returning " << numResults << " results.\n";
- endQueryOp(txn, exec.get(), dbProfilingLevel, numResults, ccId);
+
+ // TODO document
+ if (pq.isExhaust()) {
+ curop.debug().exhaust = true;
}
- // Add the results from the query into the output buffer.
- result.appendData(bb.buf(), bb.len());
- bb.decouple();
+ cc->setPos(numResults);
- // Fill out the output buffer's header.
- QueryResult::View qr = result.header().view2ptr();
- qr.setCursorId(ccId);
- qr.setResultFlagsToOk();
- qr.msgdata().setOperation(opReply);
- qr.setStartingFrom(0);
- qr.setNReturned(numResults);
+ // If the query had a time limit, remaining time is "rolled over" to the cursor (for
+ // use by future getmore ops).
+ cc->setLeftoverMaxTimeMicros(curop.getRemainingMaxTimeMicros());
- // curop.debug().exhaust is set above.
- return curop.debug().exhaust ? nss.ns() : "";
+ endQueryOp(txn, cc->getExecutor(), dbProfilingLevel, numResults, ccId);
+ } else {
+ LOG(5) << "Not caching executor but returning " << numResults << " results.\n";
+ endQueryOp(txn, exec.get(), dbProfilingLevel, numResults, ccId);
}
+ // Add the results from the query into the output buffer.
+ result.appendData(bb.buf(), bb.len());
+ bb.decouple();
+
+ // Fill out the output buffer's header.
+ QueryResult::View qr = result.header().view2ptr();
+ qr.setCursorId(ccId);
+ qr.setResultFlagsToOk();
+ qr.msgdata().setOperation(opReply);
+ qr.setStartingFrom(0);
+ qr.setNReturned(numResults);
+
+ // curop.debug().exhaust is set above.
+ return curop.debug().exhaust ? nss.ns() : "";
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/find.h b/src/mongo/db/query/find.h
index 81803b66364..330afc76b90 100644
--- a/src/mongo/db/query/find.h
+++ b/src/mongo/db/query/find.h
@@ -38,132 +38,132 @@
namespace mongo {
- class NamespaceString;
- class OperationContext;
+class NamespaceString;
+class OperationContext;
- class ScopedRecoveryUnitSwapper {
- public:
- ScopedRecoveryUnitSwapper(ClientCursor* cc, OperationContext* txn);
+class ScopedRecoveryUnitSwapper {
+public:
+ ScopedRecoveryUnitSwapper(ClientCursor* cc, OperationContext* txn);
- ~ScopedRecoveryUnitSwapper();
+ ~ScopedRecoveryUnitSwapper();
- /**
- * Dismissing the RU swapper causes it to simply free the recovery unit rather than swapping
- * it back into the ClientCursor.
- */
- void dismiss();
+ /**
+ * Dismissing the RU swapper causes it to simply free the recovery unit rather than swapping
+ * it back into the ClientCursor.
+ */
+ void dismiss();
- private:
- ClientCursor* _cc;
- OperationContext* _txn;
- bool _dismissed;
+private:
+ ClientCursor* _cc;
+ OperationContext* _txn;
+ bool _dismissed;
- std::unique_ptr<RecoveryUnit> _txnPreviousRecoveryUnit;
- OperationContext::RecoveryUnitState _txnPreviousRecoveryUnitState;
- };
+ std::unique_ptr<RecoveryUnit> _txnPreviousRecoveryUnit;
+ OperationContext::RecoveryUnitState _txnPreviousRecoveryUnitState;
+};
- /**
- * Returns true if enough results have been prepared to stop adding more to the first batch.
- *
- * Should be called *after* adding to the result set rather than before.
- */
- bool enoughForFirstBatch(const LiteParsedQuery& pq, int numDocs, int bytesBuffered);
+/**
+ * Returns true if enough results have been prepared to stop adding more to the first batch.
+ *
+ * Should be called *after* adding to the result set rather than before.
+ */
+bool enoughForFirstBatch(const LiteParsedQuery& pq, int numDocs, int bytesBuffered);
- /**
- * Returns true if enough results have been prepared to stop adding more to a getMore batch.
- *
- * Should be called *after* adding to the result set rather than before.
- */
- bool enoughForGetMore(int ntoreturn, int numDocs, int bytesBuffered);
+/**
+ * Returns true if enough results have been prepared to stop adding more to a getMore batch.
+ *
+ * Should be called *after* adding to the result set rather than before.
+ */
+bool enoughForGetMore(int ntoreturn, int numDocs, int bytesBuffered);
- /**
- * Whether or not the ClientCursor* is tailable.
- */
- bool isCursorTailable(const ClientCursor* cursor);
+/**
+ * Whether or not the ClientCursor* is tailable.
+ */
+bool isCursorTailable(const ClientCursor* cursor);
- /**
- * Whether or not the ClientCursor* has the awaitData flag set.
- */
- bool isCursorAwaitData(const ClientCursor* cursor);
+/**
+ * Whether or not the ClientCursor* has the awaitData flag set.
+ */
+bool isCursorAwaitData(const ClientCursor* cursor);
- /**
- * Returns true if we should keep a cursor around because we're expecting to return more query
- * results.
- *
- * If false, the caller should close the cursor and indicate this to the client by sending back
- * a cursor ID of 0.
- */
- bool shouldSaveCursor(OperationContext* txn,
- const Collection* collection,
- PlanExecutor::ExecState finalState,
- PlanExecutor* exec);
+/**
+ * Returns true if we should keep a cursor around because we're expecting to return more query
+ * results.
+ *
+ * If false, the caller should close the cursor and indicate this to the client by sending back
+ * a cursor ID of 0.
+ */
+bool shouldSaveCursor(OperationContext* txn,
+ const Collection* collection,
+ PlanExecutor::ExecState finalState,
+ PlanExecutor* exec);
- /**
- * Similar to shouldSaveCursor(), but used in getMore to determine whether we should keep
- * the cursor around for additional getMores().
- *
- * If false, the caller should close the cursor and indicate this to the client by sending back
- * a cursor ID of 0.
- */
- bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
- PlanExecutor* exec,
- bool isTailable);
+/**
+ * Similar to shouldSaveCursor(), but used in getMore to determine whether we should keep
+ * the cursor around for additional getMores().
+ *
+ * If false, the caller should close the cursor and indicate this to the client by sending back
+ * a cursor ID of 0.
+ */
+bool shouldSaveCursorGetMore(PlanExecutor::ExecState finalState,
+ PlanExecutor* exec,
+ bool isTailable);
- /**
- * Fills out the CurOp for "txn" with information about this query.
- */
- void beginQueryOp(OperationContext* txn,
- const NamespaceString& nss,
- const BSONObj& queryObj,
- int ntoreturn,
- int ntoskip);
+/**
+ * Fills out the CurOp for "txn" with information about this query.
+ */
+void beginQueryOp(OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& queryObj,
+ int ntoreturn,
+ int ntoskip);
- /**
- * Fills out CurOp for "txn" with information regarding this query's execution.
- *
- * Uses explain functionality to extract stats from 'exec'.
- *
- * The database profiling level, 'dbProfilingLevel', is used to conditionalize whether or not we
- * do expensive stats gathering.
- */
- void endQueryOp(OperationContext* txn,
- PlanExecutor* exec,
- int dbProfilingLevel,
- int numResults,
- CursorId cursorId);
+/**
+ * Fills out CurOp for "txn" with information regarding this query's execution.
+ *
+ * Uses explain functionality to extract stats from 'exec'.
+ *
+ * The database profiling level, 'dbProfilingLevel', is used to conditionalize whether or not we
+ * do expensive stats gathering.
+ */
+void endQueryOp(OperationContext* txn,
+ PlanExecutor* exec,
+ int dbProfilingLevel,
+ int numResults,
+ CursorId cursorId);
- /**
- * Constructs a PlanExecutor for a query with the oplogReplay option set to true,
- * for the query 'cq' over the collection 'collection'. The PlanExecutor will
- * wrap a singleton OplogStart stage.
- *
- * The oplog start finding hack requires that 'cq' has a $gt or $gte predicate over
- * a field named 'ts'.
- *
- * On success, caller takes ownership of *execOut.
- */
- Status getOplogStartHack(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* cq,
- PlanExecutor** execOut);
+/**
+ * Constructs a PlanExecutor for a query with the oplogReplay option set to true,
+ * for the query 'cq' over the collection 'collection'. The PlanExecutor will
+ * wrap a singleton OplogStart stage.
+ *
+ * The oplog start finding hack requires that 'cq' has a $gt or $gte predicate over
+ * a field named 'ts'.
+ *
+ * On success, caller takes ownership of *execOut.
+ */
+Status getOplogStartHack(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* cq,
+ PlanExecutor** execOut);
- /**
- * Called from the getMore entry point in ops/query.cpp.
- */
- QueryResult::View getMore(OperationContext* txn,
- const char* ns,
- int ntoreturn,
- long long cursorid,
- int pass,
- bool& exhaust,
- bool* isCursorAuthorized);
+/**
+ * Called from the getMore entry point in ops/query.cpp.
+ */
+QueryResult::View getMore(OperationContext* txn,
+ const char* ns,
+ int ntoreturn,
+ long long cursorid,
+ int pass,
+ bool& exhaust,
+ bool* isCursorAuthorized);
- /**
- * Run the query 'q' and place the result in 'result'.
- */
- std::string runQuery(OperationContext* txn,
- QueryMessage& q,
- const NamespaceString& ns,
- Message &result);
+/**
+ * Run the query 'q' and place the result in 'result'.
+ */
+std::string runQuery(OperationContext* txn,
+ QueryMessage& q,
+ const NamespaceString& ns,
+ Message& result);
} // namespace mongo
diff --git a/src/mongo/db/query/find_and_modify_request.cpp b/src/mongo/db/query/find_and_modify_request.cpp
index 1b960198d1a..2db1c61af80 100644
--- a/src/mongo/db/query/find_and_modify_request.cpp
+++ b/src/mongo/db/query/find_and_modify_request.cpp
@@ -37,175 +37,166 @@
namespace mongo {
namespace {
- const char kCmdName[] = "findAndModify";
- const char kQueryField[] = "query";
- const char kSortField[] = "sort";
- const char kRemoveField[] = "remove";
- const char kUpdateField[] = "update";
- const char kNewField[] = "new";
- const char kFieldProjectionField[] = "fields";
- const char kUpsertField[] = "upsert";
- const char kWriteConcernField[] = "writeConcern";
-
-} // unnamed namespace
-
- FindAndModifyRequest::FindAndModifyRequest(NamespaceString fullNs,
- BSONObj query,
- BSONObj updateObj):
- _ns(std::move(fullNs)),
- _query(query.getOwned()),
- _updateObj(updateObj.getOwned()),
- _isRemove(false) {
- }
+const char kCmdName[] = "findAndModify";
+const char kQueryField[] = "query";
+const char kSortField[] = "sort";
+const char kRemoveField[] = "remove";
+const char kUpdateField[] = "update";
+const char kNewField[] = "new";
+const char kFieldProjectionField[] = "fields";
+const char kUpsertField[] = "upsert";
+const char kWriteConcernField[] = "writeConcern";
+
+} // unnamed namespace
+
+FindAndModifyRequest::FindAndModifyRequest(NamespaceString fullNs, BSONObj query, BSONObj updateObj)
+ : _ns(std::move(fullNs)),
+ _query(query.getOwned()),
+ _updateObj(updateObj.getOwned()),
+ _isRemove(false) {}
+
+FindAndModifyRequest FindAndModifyRequest::makeUpdate(NamespaceString fullNs,
+ BSONObj query,
+ BSONObj updateObj) {
+ return FindAndModifyRequest(fullNs, query, updateObj);
+}
- FindAndModifyRequest FindAndModifyRequest::makeUpdate(NamespaceString fullNs,
- BSONObj query,
- BSONObj updateObj) {
- return FindAndModifyRequest(fullNs, query, updateObj);
- }
+FindAndModifyRequest FindAndModifyRequest::makeRemove(NamespaceString fullNs, BSONObj query) {
+ FindAndModifyRequest request(fullNs, query, BSONObj());
+ request._isRemove = true;
+ return request;
+}
- FindAndModifyRequest FindAndModifyRequest::makeRemove(NamespaceString fullNs,
- BSONObj query) {
- FindAndModifyRequest request(fullNs, query, BSONObj());
- request._isRemove = true;
- return request;
- }
+BSONObj FindAndModifyRequest::toBSON() const {
+ BSONObjBuilder builder;
- BSONObj FindAndModifyRequest::toBSON() const {
- BSONObjBuilder builder;
+ builder.append(kCmdName, _ns.coll());
+ builder.append(kQueryField, _query);
- builder.append(kCmdName, _ns.coll());
- builder.append(kQueryField, _query);
+ if (_isRemove) {
+ builder.append(kRemoveField, true);
+ } else {
+ builder.append(kUpdateField, _updateObj);
- if (_isRemove) {
- builder.append(kRemoveField, true);
+ if (_isUpsert) {
+ builder.append(kUpsertField, _isUpsert.get());
}
- else {
- builder.append(kUpdateField, _updateObj);
+ }
- if (_isUpsert) {
- builder.append(kUpsertField, _isUpsert.get());
- }
- }
+ if (_fieldProjection) {
+ builder.append(kFieldProjectionField, _fieldProjection.get());
+ }
- if (_fieldProjection) {
- builder.append(kFieldProjectionField, _fieldProjection.get());
- }
+ if (_sort) {
+ builder.append(kSortField, _sort.get());
+ }
- if (_sort) {
- builder.append(kSortField, _sort.get());
- }
+ if (_shouldReturnNew) {
+ builder.append(kNewField, _shouldReturnNew.get());
+ }
- if (_shouldReturnNew) {
- builder.append(kNewField, _shouldReturnNew.get());
- }
+ if (_writeConcern) {
+ builder.append(kWriteConcernField, _writeConcern->toBSON());
+ }
- if (_writeConcern) {
- builder.append(kWriteConcernField, _writeConcern->toBSON());
- }
+ return builder.obj();
+}
+
+StatusWith<FindAndModifyRequest> FindAndModifyRequest::parseFromBSON(NamespaceString fullNs,
+ const BSONObj& cmdObj) {
+ BSONObj query = cmdObj.getObjectField(kQueryField);
+ BSONObj fields = cmdObj.getObjectField(kFieldProjectionField);
+ BSONObj updateObj = cmdObj.getObjectField(kUpdateField);
+ BSONObj sort = cmdObj.getObjectField(kSortField);
+ bool shouldReturnNew = cmdObj[kNewField].trueValue();
+ bool isUpsert = cmdObj[kUpsertField].trueValue();
+ bool isRemove = cmdObj[kRemoveField].trueValue();
+ bool isUpdate = cmdObj.hasField(kUpdateField);
- return builder.obj();
+ if (!isRemove && !isUpdate) {
+ return {ErrorCodes::FailedToParse, "Either an update or remove=true must be specified"};
}
- StatusWith<FindAndModifyRequest> FindAndModifyRequest::parseFromBSON(NamespaceString fullNs,
- const BSONObj& cmdObj) {
- BSONObj query = cmdObj.getObjectField(kQueryField);
- BSONObj fields = cmdObj.getObjectField(kFieldProjectionField);
- BSONObj updateObj = cmdObj.getObjectField(kUpdateField);
- BSONObj sort = cmdObj.getObjectField(kSortField);
- bool shouldReturnNew = cmdObj[kNewField].trueValue();
- bool isUpsert = cmdObj[kUpsertField].trueValue();
- bool isRemove = cmdObj[kRemoveField].trueValue();
- bool isUpdate = cmdObj.hasField(kUpdateField);
-
- if (!isRemove && !isUpdate) {
- return {ErrorCodes::FailedToParse,
- "Either an update or remove=true must be specified"};
+ if (isRemove) {
+ if (isUpdate) {
+ return {ErrorCodes::FailedToParse, "Cannot specify both an update and remove=true"};
}
- if (isRemove) {
- if (isUpdate) {
- return {ErrorCodes::FailedToParse,
- "Cannot specify both an update and remove=true"};
- }
-
- if (isUpsert) {
- return {ErrorCodes::FailedToParse,
- "Cannot specify both upsert=true and remove=true"};
- }
+ if (isUpsert) {
+ return {ErrorCodes::FailedToParse, "Cannot specify both upsert=true and remove=true"};
+ }
- if (shouldReturnNew) {
- return {ErrorCodes::FailedToParse,
+ if (shouldReturnNew) {
+ return {ErrorCodes::FailedToParse,
"Cannot specify both new=true and remove=true;"
" 'remove' always returns the deleted document"};
- }
}
+ }
- FindAndModifyRequest request(std::move(fullNs), query, updateObj);
- request._isRemove = isRemove;
- request.setFieldProjection(fields);
- request.setSort(sort);
-
- if (!isRemove) {
- request.setShouldReturnNew(shouldReturnNew);
- request.setUpsert(isUpsert);
- }
+ FindAndModifyRequest request(std::move(fullNs), query, updateObj);
+ request._isRemove = isRemove;
+ request.setFieldProjection(fields);
+ request.setSort(sort);
- return request;
+ if (!isRemove) {
+ request.setShouldReturnNew(shouldReturnNew);
+ request.setUpsert(isUpsert);
}
- void FindAndModifyRequest::setFieldProjection(BSONObj fields) {
- _fieldProjection = fields.getOwned();
- }
+ return request;
+}
- void FindAndModifyRequest::setSort(BSONObj sort) {
- _sort = sort.getOwned();
- }
+void FindAndModifyRequest::setFieldProjection(BSONObj fields) {
+ _fieldProjection = fields.getOwned();
+}
- void FindAndModifyRequest::setShouldReturnNew(bool shouldReturnNew) {
- dassert(!_isRemove);
- _shouldReturnNew = shouldReturnNew;
- }
+void FindAndModifyRequest::setSort(BSONObj sort) {
+ _sort = sort.getOwned();
+}
- void FindAndModifyRequest::setUpsert(bool upsert) {
- dassert(!_isRemove);
- _isUpsert = upsert;
- }
+void FindAndModifyRequest::setShouldReturnNew(bool shouldReturnNew) {
+ dassert(!_isRemove);
+ _shouldReturnNew = shouldReturnNew;
+}
- void FindAndModifyRequest::setWriteConcern(WriteConcernOptions writeConcern) {
- _writeConcern = std::move(writeConcern);
- }
+void FindAndModifyRequest::setUpsert(bool upsert) {
+ dassert(!_isRemove);
+ _isUpsert = upsert;
+}
- const NamespaceString& FindAndModifyRequest::getNamespaceString() const {
- return _ns;
- }
+void FindAndModifyRequest::setWriteConcern(WriteConcernOptions writeConcern) {
+ _writeConcern = std::move(writeConcern);
+}
- BSONObj FindAndModifyRequest::getQuery() const {
- return _query;
- }
+const NamespaceString& FindAndModifyRequest::getNamespaceString() const {
+ return _ns;
+}
- BSONObj FindAndModifyRequest::getFields() const {
- return _fieldProjection.value_or(BSONObj());
- }
+BSONObj FindAndModifyRequest::getQuery() const {
+ return _query;
+}
- BSONObj FindAndModifyRequest::getUpdateObj() const {
- return _updateObj;
- }
+BSONObj FindAndModifyRequest::getFields() const {
+ return _fieldProjection.value_or(BSONObj());
+}
- BSONObj FindAndModifyRequest::getSort() const {
- return _sort.value_or(BSONObj());
- }
+BSONObj FindAndModifyRequest::getUpdateObj() const {
+ return _updateObj;
+}
- bool FindAndModifyRequest::shouldReturnNew() const {
- return _shouldReturnNew.value_or(false);
- }
+BSONObj FindAndModifyRequest::getSort() const {
+ return _sort.value_or(BSONObj());
+}
- bool FindAndModifyRequest::isUpsert() const {
- return _isUpsert.value_or(false);
- }
+bool FindAndModifyRequest::shouldReturnNew() const {
+ return _shouldReturnNew.value_or(false);
+}
- bool FindAndModifyRequest::isRemove() const {
- return _isRemove;
- }
+bool FindAndModifyRequest::isUpsert() const {
+ return _isUpsert.value_or(false);
+}
+bool FindAndModifyRequest::isRemove() const {
+ return _isRemove;
+}
}
diff --git a/src/mongo/db/query/find_and_modify_request.h b/src/mongo/db/query/find_and_modify_request.h
index 353869abee6..b4252c30805 100644
--- a/src/mongo/db/query/find_and_modify_request.h
+++ b/src/mongo/db/query/find_and_modify_request.h
@@ -36,126 +36,123 @@
namespace mongo {
- template <typename T> class StatusWith;
+template <typename T>
+class StatusWith;
+
+/**
+ * Represents the user-supplied options to the findAndModify command. Note that this
+ * does not offer round trip preservation. For example, for the case where
+ * output = parseBSON(input).toBSON(), 'output' is not guaranteed to be equal to 'input'.
+ * However, the semantic meaning of 'output' will be the same with 'input'.
+ *
+ * The BSONObj members contained within this struct are owned objects.
+ */
+class FindAndModifyRequest {
+public:
+ /**
+ * Creates a new instance of an 'update' type findAndModify request.
+ */
+ static FindAndModifyRequest makeUpdate(NamespaceString fullNs,
+ BSONObj query,
+ BSONObj updateObj);
/**
- * Represents the user-supplied options to the findAndModify command. Note that this
- * does not offer round trip preservation. For example, for the case where
- * output = parseBSON(input).toBSON(), 'output' is not guaranteed to be equal to 'input'.
- * However, the semantic meaning of 'output' will be the same with 'input'.
+ * Creates a new instance of an 'remove' type findAndModify request.
+ */
+ static FindAndModifyRequest makeRemove(NamespaceString fullNs, BSONObj query);
+
+ /**
+ * Create a new instance of FindAndModifyRequest from a valid BSONObj.
+ * Returns an error if the BSONObj is malformed.
+ * Format:
+ *
+ * {
+ * findAndModify: <collection-name>,
+ * query: <document>,
+ * sort: <document>,
+ * remove: <boolean>,
+ * update: <document>,
+ * new: <boolean>,
+ * fields: <document>,
+ * upsert: <boolean>
+ * }
*
- * The BSONObj members contained within this struct are owned objects.
+ * Note: does not parse the writeConcern field or the findAndModify field.
+ */
+ static StatusWith<FindAndModifyRequest> parseFromBSON(NamespaceString fullNs,
+ const BSONObj& cmdObj);
+
+ /**
+ * Serializes this object into a BSON representation. Fields that are not
+ * set will not be part of the the serialized object.
+ */
+ BSONObj toBSON() const;
+
+ const NamespaceString& getNamespaceString() const;
+ BSONObj getQuery() const;
+ BSONObj getFields() const;
+ BSONObj getUpdateObj() const;
+ BSONObj getSort() const;
+ bool shouldReturnNew() const;
+ bool isUpsert() const;
+ bool isRemove() const;
+
+ // Not implemented. Use extractWriteConcern() to get the setting instead.
+ WriteConcernOptions getWriteConcern() const;
+
+ //
+ // Setters for update type request only.
+ //
+
+ /**
+ * If shouldReturnNew is new, the findAndModify response should return the document
+ * after the modification was applied if the query matched a document. Otherwise,
+ * it will return the matched document before the modification.
*/
- class FindAndModifyRequest {
- public:
-
- /**
- * Creates a new instance of an 'update' type findAndModify request.
- */
- static FindAndModifyRequest makeUpdate(NamespaceString fullNs,
- BSONObj query,
- BSONObj updateObj);
-
- /**
- * Creates a new instance of an 'remove' type findAndModify request.
- */
- static FindAndModifyRequest makeRemove(NamespaceString fullNs,
- BSONObj query);
-
- /**
- * Create a new instance of FindAndModifyRequest from a valid BSONObj.
- * Returns an error if the BSONObj is malformed.
- * Format:
- *
- * {
- * findAndModify: <collection-name>,
- * query: <document>,
- * sort: <document>,
- * remove: <boolean>,
- * update: <document>,
- * new: <boolean>,
- * fields: <document>,
- * upsert: <boolean>
- * }
- *
- * Note: does not parse the writeConcern field or the findAndModify field.
- */
- static StatusWith<FindAndModifyRequest> parseFromBSON(NamespaceString fullNs,
- const BSONObj& cmdObj);
-
- /**
- * Serializes this object into a BSON representation. Fields that are not
- * set will not be part of the the serialized object.
- */
- BSONObj toBSON() const;
-
- const NamespaceString& getNamespaceString() const;
- BSONObj getQuery() const;
- BSONObj getFields() const;
- BSONObj getUpdateObj() const;
- BSONObj getSort() const;
- bool shouldReturnNew() const;
- bool isUpsert() const;
- bool isRemove() const;
-
- // Not implemented. Use extractWriteConcern() to get the setting instead.
- WriteConcernOptions getWriteConcern() const;
-
- //
- // Setters for update type request only.
- //
-
- /**
- * If shouldReturnNew is new, the findAndModify response should return the document
- * after the modification was applied if the query matched a document. Otherwise,
- * it will return the matched document before the modification.
- */
- void setShouldReturnNew(bool shouldReturnNew);
-
- void setUpsert(bool upsert);
-
- //
- // Setters for optional parameters
- //
-
- /**
- * Specifies the field to project on the matched document.
- */
- void setFieldProjection(BSONObj fields);
-
- /**
- * Sets the sort order for the query. In cases where the query yields multiple matches,
- * only the first document based on the sort order will be modified/removed.
- */
- void setSort(BSONObj sort);
-
- /**
- * Sets the write concern for this request.
- */
- void setWriteConcern(WriteConcernOptions writeConcern);
-
- private:
- /**
- * Creates a new FindAndModifyRequest with the required fields.
- */
- FindAndModifyRequest(NamespaceString fullNs,
- BSONObj query,
- BSONObj updateObj);
-
- // Required fields
- const NamespaceString _ns;
- const BSONObj _query;
-
- // Required for updates
- const BSONObj _updateObj;
-
- boost::optional<bool> _isUpsert;
- boost::optional<BSONObj> _fieldProjection;
- boost::optional<BSONObj> _sort;
- boost::optional<bool> _shouldReturnNew;
- boost::optional<WriteConcernOptions> _writeConcern;
-
- // Flag used internally to differentiate whether this is an update or remove type request.
- bool _isRemove;
- };
+ void setShouldReturnNew(bool shouldReturnNew);
+
+ void setUpsert(bool upsert);
+
+ //
+ // Setters for optional parameters
+ //
+
+ /**
+ * Specifies the field to project on the matched document.
+ */
+ void setFieldProjection(BSONObj fields);
+
+ /**
+ * Sets the sort order for the query. In cases where the query yields multiple matches,
+ * only the first document based on the sort order will be modified/removed.
+ */
+ void setSort(BSONObj sort);
+
+ /**
+ * Sets the write concern for this request.
+ */
+ void setWriteConcern(WriteConcernOptions writeConcern);
+
+private:
+ /**
+ * Creates a new FindAndModifyRequest with the required fields.
+ */
+ FindAndModifyRequest(NamespaceString fullNs, BSONObj query, BSONObj updateObj);
+
+ // Required fields
+ const NamespaceString _ns;
+ const BSONObj _query;
+
+ // Required for updates
+ const BSONObj _updateObj;
+
+ boost::optional<bool> _isUpsert;
+ boost::optional<BSONObj> _fieldProjection;
+ boost::optional<BSONObj> _sort;
+ boost::optional<bool> _shouldReturnNew;
+ boost::optional<WriteConcernOptions> _writeConcern;
+
+ // Flag used internally to differentiate whether this is an update or remove type request.
+ bool _isRemove;
+};
}
diff --git a/src/mongo/db/query/find_and_modify_request_test.cpp b/src/mongo/db/query/find_and_modify_request_test.cpp
index bde2c48ac2e..27490715e02 100644
--- a/src/mongo/db/query/find_and_modify_request_test.cpp
+++ b/src/mongo/db/query/find_and_modify_request_test.cpp
@@ -35,173 +35,155 @@
namespace mongo {
namespace {
- TEST(FindAndModifyRequest, BasicUpdate) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
-
- BSONObj expectedObj(fromjson(R"json({
+TEST(FindAndModifyRequest, BasicUpdate) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithUpsert) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setUpsert(true);
+TEST(FindAndModifyRequest, UpdateWithUpsert) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setUpsert(true);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
upsert: true
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithUpsertFalse) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setUpsert(false);
+TEST(FindAndModifyRequest, UpdateWithUpsertFalse) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setUpsert(false);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
upsert: false
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithProjection) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- const BSONObj field(BSON("z" << 1));
+TEST(FindAndModifyRequest, UpdateWithProjection) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ const BSONObj field(BSON("z" << 1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setFieldProjection(field);
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setFieldProjection(field);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
fields: { z: 1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithNewTrue) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
+TEST(FindAndModifyRequest, UpdateWithNewTrue) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setShouldReturnNew(true);
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setShouldReturnNew(true);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
new: true
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithNewFalse) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
+TEST(FindAndModifyRequest, UpdateWithNewFalse) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setShouldReturnNew(false);
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setShouldReturnNew(false);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
new: false
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithSort) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- const BSONObj sort(BSON("z" << -1));
+TEST(FindAndModifyRequest, UpdateWithSort) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ const BSONObj sort(BSON("z" << -1));
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setSort(sort);
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setSort(sort);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
sort: { z: -1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, UpdateWithWriteConcern) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
+TEST(FindAndModifyRequest, UpdateWithWriteConcern) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setWriteConcern(writeConcern);
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setWriteConcern(writeConcern);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
writeConcern: { w: 2, fsync: true, wtimeout: 150 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
-
- TEST(FindAndModifyRequest, UpdateWithFullSpec) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj update(BSON("y" << 1));
- const BSONObj sort(BSON("z" << -1));
- const BSONObj field(BSON("x" << 1 << "y" << 1));
- const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
-
- auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"),
- query,
- update);
- request.setFieldProjection(field);
- request.setShouldReturnNew(true);
- request.setSort(sort);
- request.setWriteConcern(writeConcern);
- request.setUpsert(true);
-
- BSONObj expectedObj(fromjson(R"json({
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
+
+TEST(FindAndModifyRequest, UpdateWithFullSpec) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj update(BSON("y" << 1));
+ const BSONObj sort(BSON("z" << -1));
+ const BSONObj field(BSON("x" << 1 << "y" << 1));
+ const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
+
+ auto request = FindAndModifyRequest::makeUpdate(NamespaceString("test.user"), query, update);
+ request.setFieldProjection(field);
+ request.setShouldReturnNew(true);
+ request.setSort(sort);
+ request.setWriteConcern(writeConcern);
+ request.setUpsert(true);
+
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
@@ -212,85 +194,85 @@ namespace {
writeConcern: { w: 2, fsync: true, wtimeout: 150 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, BasicRemove) {
- const BSONObj query(BSON("x" << 1));
- auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
+TEST(FindAndModifyRequest, BasicRemove) {
+ const BSONObj query(BSON("x" << 1));
+ auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, RemoveWithProjection) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj field(BSON("z" << 1));
+TEST(FindAndModifyRequest, RemoveWithProjection) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj field(BSON("z" << 1));
- auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
- request.setFieldProjection(field);
+ auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
+ request.setFieldProjection(field);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true,
fields: { z: 1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, RemoveWithSort) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj sort(BSON("z" << -1));
+TEST(FindAndModifyRequest, RemoveWithSort) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj sort(BSON("z" << -1));
- auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
- request.setSort(sort);
+ auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
+ request.setSort(sort);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true,
sort: { z: -1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, RemoveWithWriteConcern) {
- const BSONObj query(BSON("x" << 1));
- const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
+TEST(FindAndModifyRequest, RemoveWithWriteConcern) {
+ const BSONObj query(BSON("x" << 1));
+ const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
- auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
- request.setWriteConcern(writeConcern);
+ auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
+ request.setWriteConcern(writeConcern);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true,
writeConcern: { w: 2, fsync: true, wtimeout: 150 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, RemoveWithFullSpec) {
- const BSONObj query(BSON("x" << 1));
- const BSONObj sort(BSON("z" << -1));
- const BSONObj field(BSON("x" << 1 << "y" << 1));
- const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
+TEST(FindAndModifyRequest, RemoveWithFullSpec) {
+ const BSONObj query(BSON("x" << 1));
+ const BSONObj sort(BSON("z" << -1));
+ const BSONObj field(BSON("x" << 1 << "y" << 1));
+ const WriteConcernOptions writeConcern(2, WriteConcernOptions::FSYNC, 150);
- auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
- request.setFieldProjection(field);
- request.setSort(sort);
- request.setWriteConcern(writeConcern);
+ auto request = FindAndModifyRequest::makeRemove(NamespaceString("test.user"), query);
+ request.setFieldProjection(field);
+ request.setSort(sort);
+ request.setWriteConcern(writeConcern);
- BSONObj expectedObj(fromjson(R"json({
+ BSONObj expectedObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true,
@@ -299,31 +281,31 @@ namespace {
writeConcern: { w: 2, fsync: true, wtimeout: 150 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
- }
+ ASSERT_EQUALS(expectedObj, request.toBSON());
+}
- TEST(FindAndModifyRequest, ParseWithUpdateOnlyRequiredFields) {
- BSONObj cmdObj(fromjson(R"json({
+TEST(FindAndModifyRequest, ParseWithUpdateOnlyRequiredFields) {
+ BSONObj cmdObj(fromjson(R"json({
query: { x: 1 },
update: { y: 1 }
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_OK(parseStatus.getStatus());
-
- auto request = parseStatus.getValue();
- ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
- ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
- ASSERT_EQUALS(BSON("y" << 1), request.getUpdateObj());
- ASSERT_EQUALS(false, request.isUpsert());
- ASSERT_EQUALS(false, request.isRemove());
- ASSERT_EQUALS(BSONObj(), request.getFields());
- ASSERT_EQUALS(BSONObj(), request.getSort());
- ASSERT_EQUALS(false, request.shouldReturnNew());
- }
-
- TEST(FindAndModifyRequest, ParseWithUpdateFullSpec) {
- BSONObj cmdObj(fromjson(R"json({
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_OK(parseStatus.getStatus());
+
+ auto request = parseStatus.getValue();
+ ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
+ ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
+ ASSERT_EQUALS(BSON("y" << 1), request.getUpdateObj());
+ ASSERT_EQUALS(false, request.isUpsert());
+ ASSERT_EQUALS(false, request.isRemove());
+ ASSERT_EQUALS(BSONObj(), request.getFields());
+ ASSERT_EQUALS(BSONObj(), request.getSort());
+ ASSERT_EQUALS(false, request.shouldReturnNew());
+}
+
+TEST(FindAndModifyRequest, ParseWithUpdateFullSpec) {
+ BSONObj cmdObj(fromjson(R"json({
query: { x: 1 },
update: { y: 1 },
upsert: true,
@@ -332,42 +314,42 @@ namespace {
new: true
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_OK(parseStatus.getStatus());
-
- auto request = parseStatus.getValue();
- ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
- ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
- ASSERT_EQUALS(BSON("y" << 1), request.getUpdateObj());
- ASSERT_EQUALS(true, request.isUpsert());
- ASSERT_EQUALS(false, request.isRemove());
- ASSERT_EQUALS(BSON("x" << 1 << "y" << 1), request.getFields());
- ASSERT_EQUALS(BSON("z" << -1), request.getSort());
- ASSERT_EQUALS(true, request.shouldReturnNew());
- }
-
- TEST(FindAndModifyRequest, ParseWithRemoveOnlyRequiredFields) {
- BSONObj cmdObj(fromjson(R"json({
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_OK(parseStatus.getStatus());
+
+ auto request = parseStatus.getValue();
+ ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
+ ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
+ ASSERT_EQUALS(BSON("y" << 1), request.getUpdateObj());
+ ASSERT_EQUALS(true, request.isUpsert());
+ ASSERT_EQUALS(false, request.isRemove());
+ ASSERT_EQUALS(BSON("x" << 1 << "y" << 1), request.getFields());
+ ASSERT_EQUALS(BSON("z" << -1), request.getSort());
+ ASSERT_EQUALS(true, request.shouldReturnNew());
+}
+
+TEST(FindAndModifyRequest, ParseWithRemoveOnlyRequiredFields) {
+ BSONObj cmdObj(fromjson(R"json({
query: { x: 1 },
remove: true
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_OK(parseStatus.getStatus());
-
- auto request = parseStatus.getValue();
- ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
- ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
- ASSERT_EQUALS(BSONObj(), request.getUpdateObj());
- ASSERT_EQUALS(false, request.isUpsert());
- ASSERT_EQUALS(true, request.isRemove());
- ASSERT_EQUALS(BSONObj(), request.getFields());
- ASSERT_EQUALS(BSONObj(), request.getSort());
- ASSERT_EQUALS(false, request.shouldReturnNew());
- }
-
- TEST(FindAndModifyRequest, ParseWithRemoveFullSpec) {
- BSONObj cmdObj(fromjson(R"json({
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_OK(parseStatus.getStatus());
+
+ auto request = parseStatus.getValue();
+ ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
+ ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
+ ASSERT_EQUALS(BSONObj(), request.getUpdateObj());
+ ASSERT_EQUALS(false, request.isUpsert());
+ ASSERT_EQUALS(true, request.isRemove());
+ ASSERT_EQUALS(BSONObj(), request.getFields());
+ ASSERT_EQUALS(BSONObj(), request.getSort());
+ ASSERT_EQUALS(false, request.shouldReturnNew());
+}
+
+TEST(FindAndModifyRequest, ParseWithRemoveFullSpec) {
+ BSONObj cmdObj(fromjson(R"json({
query: { x: 1 },
remove: true,
fields: { x: 1, y: 1 },
@@ -375,65 +357,65 @@ namespace {
new: false
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_OK(parseStatus.getStatus());
-
- auto request = parseStatus.getValue();
- ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
- ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
- ASSERT_EQUALS(BSONObj(), request.getUpdateObj());
- ASSERT_EQUALS(false, request.isUpsert());
- ASSERT_EQUALS(true, request.isRemove());
- ASSERT_EQUALS(BSON("x" << 1 << "y" << 1), request.getFields());
- ASSERT_EQUALS(BSON("z" << -1), request.getSort());
- ASSERT_EQUALS(false, request.shouldReturnNew());
- }
-
- TEST(FindAndModifyRequest, ParseWithIncompleteSpec) {
- BSONObj cmdObj(fromjson(R"json({
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_OK(parseStatus.getStatus());
+
+ auto request = parseStatus.getValue();
+ ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
+ ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
+ ASSERT_EQUALS(BSONObj(), request.getUpdateObj());
+ ASSERT_EQUALS(false, request.isUpsert());
+ ASSERT_EQUALS(true, request.isRemove());
+ ASSERT_EQUALS(BSON("x" << 1 << "y" << 1), request.getFields());
+ ASSERT_EQUALS(BSON("z" << -1), request.getSort());
+ ASSERT_EQUALS(false, request.shouldReturnNew());
+}
+
+TEST(FindAndModifyRequest, ParseWithIncompleteSpec) {
+ BSONObj cmdObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 }
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_NOT_OK(parseStatus.getStatus());
- }
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_NOT_OK(parseStatus.getStatus());
+}
- TEST(FindAndModifyRequest, ParseWithAmbiguousUpdateRemove) {
- BSONObj cmdObj(fromjson(R"json({
+TEST(FindAndModifyRequest, ParseWithAmbiguousUpdateRemove) {
+ BSONObj cmdObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
update: { y: 1 },
remove: true
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_NOT_OK(parseStatus.getStatus());
- }
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_NOT_OK(parseStatus.getStatus());
+}
- TEST(FindAndModifyRequest, ParseWithRemovePlusUpsert) {
- BSONObj cmdObj(fromjson(R"json({
+TEST(FindAndModifyRequest, ParseWithRemovePlusUpsert) {
+ BSONObj cmdObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true,
upsert: true
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_NOT_OK(parseStatus.getStatus());
- }
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_NOT_OK(parseStatus.getStatus());
+}
- TEST(FindAndModifyRequest, ParseWithRemoveAndReturnNew) {
- BSONObj cmdObj(fromjson(R"json({
+TEST(FindAndModifyRequest, ParseWithRemoveAndReturnNew) {
+ BSONObj cmdObj(fromjson(R"json({
findAndModify: 'user',
query: { x: 1 },
remove: true,
new: true
})json"));
- auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
- ASSERT_NOT_OK(parseStatus.getStatus());
- }
+ auto parseStatus = FindAndModifyRequest::parseFromBSON(NamespaceString("a.b"), cmdObj);
+ ASSERT_NOT_OK(parseStatus.getStatus());
+}
-} // unnamed namespace
-} // namespace mongo
+} // unnamed namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/find_constants.h b/src/mongo/db/query/find_constants.h
index e6333798682..68b515a0499 100644
--- a/src/mongo/db/query/find_constants.h
+++ b/src/mongo/db/query/find_constants.h
@@ -28,8 +28,8 @@
namespace mongo {
- // We cut off further objects once we cross this threshold; thus, you might get
- // a little bit more than this, it is a threshold rather than a limit.
- extern const int32_t MaxBytesToReturnToClientAtOnce;
+// We cut off further objects once we cross this threshold; thus, you might get
+// a little bit more than this, it is a threshold rather than a limit.
+extern const int32_t MaxBytesToReturnToClientAtOnce;
} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 0f5044273ba..189910bbae1 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -80,1439 +80,1381 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::string;
- using std::vector;
-
- // static
- void filterAllowedIndexEntries(const AllowedIndices& allowedIndices,
- std::vector<IndexEntry>* indexEntries) {
- invariant(indexEntries);
-
- // Filter index entries
- // Check BSON objects in AllowedIndices::_indexKeyPatterns against IndexEntry::keyPattern.
- // Removes IndexEntrys that do not match _indexKeyPatterns.
- std::vector<IndexEntry> temp;
- for (std::vector<IndexEntry>::const_iterator i = indexEntries->begin();
- i != indexEntries->end(); ++i) {
- const IndexEntry& indexEntry = *i;
- for (std::vector<BSONObj>::const_iterator j = allowedIndices.indexKeyPatterns.begin();
- j != allowedIndices.indexKeyPatterns.end(); ++j) {
- const BSONObj& index = *j;
- // Copy index entry to temp vector if found in query settings.
- if (0 == indexEntry.keyPattern.woCompare(index)) {
- temp.push_back(indexEntry);
- break;
- }
+using std::unique_ptr;
+using std::endl;
+using std::string;
+using std::vector;
+
+// static
+void filterAllowedIndexEntries(const AllowedIndices& allowedIndices,
+ std::vector<IndexEntry>* indexEntries) {
+ invariant(indexEntries);
+
+ // Filter index entries
+ // Check BSON objects in AllowedIndices::_indexKeyPatterns against IndexEntry::keyPattern.
+ // Removes IndexEntrys that do not match _indexKeyPatterns.
+ std::vector<IndexEntry> temp;
+ for (std::vector<IndexEntry>::const_iterator i = indexEntries->begin();
+ i != indexEntries->end();
+ ++i) {
+ const IndexEntry& indexEntry = *i;
+ for (std::vector<BSONObj>::const_iterator j = allowedIndices.indexKeyPatterns.begin();
+ j != allowedIndices.indexKeyPatterns.end();
+ ++j) {
+ const BSONObj& index = *j;
+ // Copy index entry to temp vector if found in query settings.
+ if (0 == indexEntry.keyPattern.woCompare(index)) {
+ temp.push_back(indexEntry);
+ break;
}
}
-
- // Update results.
- temp.swap(*indexEntries);
}
- namespace {
- // The body is below in the "count hack" section but getExecutor calls it.
- bool turnIxscanIntoCount(QuerySolution* soln);
-
- bool filteredIndexBad(const MatchExpression* filter, CanonicalQuery* query) {
- if (!filter)
- return false;
+ // Update results.
+ temp.swap(*indexEntries);
+}
- MatchExpression* queryPredicates = query->root();
- if (!queryPredicates) {
- // Index is filtered, but query has none.
- // Impossible to use index.
- return true;
- }
+namespace {
+// The body is below in the "count hack" section but getExecutor calls it.
+bool turnIxscanIntoCount(QuerySolution* soln);
- return !expression::isSubsetOf(queryPredicates, filter);
- }
- } // namespace
-
-
- void fillOutPlannerParams(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* canonicalQuery,
- QueryPlannerParams* plannerParams) {
- // If it's not NULL, we may have indices. Access the catalog and fill out IndexEntry(s)
- IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn,
- false);
- while (ii.more()) {
- const IndexDescriptor* desc = ii.next();
-
- IndexCatalogEntry* ice = ii.catalogEntry(desc);
- if (filteredIndexBad(ice->getFilterExpression(), canonicalQuery)) {
- continue;
- }
+bool filteredIndexBad(const MatchExpression* filter, CanonicalQuery* query) {
+ if (!filter)
+ return false;
- plannerParams->indices.push_back(IndexEntry(desc->keyPattern(),
- desc->getAccessMethodName(),
- desc->isMultikey(txn),
- desc->isSparse(),
- desc->unique(),
- desc->indexName(),
- ice->getFilterExpression(),
- desc->infoObj()));
- }
+ MatchExpression* queryPredicates = query->root();
+ if (!queryPredicates) {
+ // Index is filtered, but query has none.
+ // Impossible to use index.
+ return true;
+ }
- // If query supports index filters, filter params.indices by indices in query settings.
- QuerySettings* querySettings = collection->infoCache()->getQuerySettings();
- AllowedIndices* allowedIndicesRaw;
- PlanCacheKey planCacheKey =
- collection->infoCache()->getPlanCache()->computeKey(*canonicalQuery);
-
- // Filter index catalog if index filters are specified for query.
- // Also, signal to planner that application hint should be ignored.
- if (querySettings->getAllowedIndices(planCacheKey, &allowedIndicesRaw)) {
- std::unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
- filterAllowedIndexEntries(*allowedIndices, &plannerParams->indices);
- plannerParams->indexFiltersApplied = true;
- }
+ return !expression::isSubsetOf(queryPredicates, filter);
+}
+} // namespace
- // We will not output collection scans unless there are no indexed solutions. NO_TABLE_SCAN
- // overrides this behavior by not outputting a collscan even if there are no indexed
- // solutions.
- if (storageGlobalParams.noTableScan) {
- const string& ns = canonicalQuery->ns();
- // There are certain cases where we ignore this restriction:
- bool ignore = canonicalQuery->getQueryObj().isEmpty()
- || (string::npos != ns.find(".system."))
- || (0 == ns.find("local."));
- if (!ignore) {
- plannerParams->options |= QueryPlannerParams::NO_TABLE_SCAN;
- }
- }
- // If the caller wants a shard filter, make sure we're actually sharded.
- if (plannerParams->options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- CollectionMetadataPtr collMetadata =
- shardingState.getCollectionMetadata(canonicalQuery->ns());
+void fillOutPlannerParams(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* canonicalQuery,
+ QueryPlannerParams* plannerParams) {
+ // If it's not NULL, we may have indices. Access the catalog and fill out IndexEntry(s)
+ IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn, false);
+ while (ii.more()) {
+ const IndexDescriptor* desc = ii.next();
- if (collMetadata) {
- plannerParams->shardKey = collMetadata->getKeyPattern();
- }
- else {
- // If there's no metadata don't bother w/the shard filter since we won't know what
- // the key pattern is anyway...
- plannerParams->options &= ~QueryPlannerParams::INCLUDE_SHARD_FILTER;
- }
+ IndexCatalogEntry* ice = ii.catalogEntry(desc);
+ if (filteredIndexBad(ice->getFilterExpression(), canonicalQuery)) {
+ continue;
}
- if (internalQueryPlannerEnableIndexIntersection) {
- plannerParams->options |= QueryPlannerParams::INDEX_INTERSECTION;
- }
+ plannerParams->indices.push_back(IndexEntry(desc->keyPattern(),
+ desc->getAccessMethodName(),
+ desc->isMultikey(txn),
+ desc->isSparse(),
+ desc->unique(),
+ desc->indexName(),
+ ice->getFilterExpression(),
+ desc->infoObj()));
+ }
- plannerParams->options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
+ // If query supports index filters, filter params.indices by indices in query settings.
+ QuerySettings* querySettings = collection->infoCache()->getQuerySettings();
+ AllowedIndices* allowedIndicesRaw;
+ PlanCacheKey planCacheKey =
+ collection->infoCache()->getPlanCache()->computeKey(*canonicalQuery);
+
+ // Filter index catalog if index filters are specified for query.
+ // Also, signal to planner that application hint should be ignored.
+ if (querySettings->getAllowedIndices(planCacheKey, &allowedIndicesRaw)) {
+ std::unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
+ filterAllowedIndexEntries(*allowedIndices, &plannerParams->indices);
+ plannerParams->indexFiltersApplied = true;
+ }
- // Doc-level locking storage engines cannot answer predicates implicitly via exact index
- // bounds for index intersection plans, as this can lead to spurious matches.
- //
- // Such storage engines do not use the invalidation framework, and therefore
- // have no need for KEEP_MUTATIONS.
- if (supportsDocLocking()) {
- plannerParams->options |= QueryPlannerParams::CANNOT_TRIM_IXISECT;
+ // We will not output collection scans unless there are no indexed solutions. NO_TABLE_SCAN
+ // overrides this behavior by not outputting a collscan even if there are no indexed
+ // solutions.
+ if (storageGlobalParams.noTableScan) {
+ const string& ns = canonicalQuery->ns();
+ // There are certain cases where we ignore this restriction:
+ bool ignore = canonicalQuery->getQueryObj().isEmpty() ||
+ (string::npos != ns.find(".system.")) || (0 == ns.find("local."));
+ if (!ignore) {
+ plannerParams->options |= QueryPlannerParams::NO_TABLE_SCAN;
}
- else {
- plannerParams->options |= QueryPlannerParams::KEEP_MUTATIONS;
+ }
+
+ // If the caller wants a shard filter, make sure we're actually sharded.
+ if (plannerParams->options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
+ CollectionMetadataPtr collMetadata =
+ shardingState.getCollectionMetadata(canonicalQuery->ns());
+
+ if (collMetadata) {
+ plannerParams->shardKey = collMetadata->getKeyPattern();
+ } else {
+ // If there's no metadata don't bother w/the shard filter since we won't know what
+ // the key pattern is anyway...
+ plannerParams->options &= ~QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
}
- namespace {
-
- /**
- * Build an execution tree for the query described in 'canonicalQuery'. Does not take
- * ownership of arguments.
- *
- * If an execution tree could be created, then returns Status::OK() and sets 'rootOut' to
- * the root of the constructed execution tree, and sets 'querySolutionOut' to the associated
- * query solution (if applicable) or NULL.
- *
- * If an execution tree could not be created, returns a Status indicating why and sets both
- * 'rootOut' and 'querySolutionOut' to NULL.
- */
- Status prepareExecution(OperationContext* opCtx,
- Collection* collection,
- WorkingSet* ws,
- CanonicalQuery* canonicalQuery,
- size_t plannerOptions,
- PlanStage** rootOut,
- QuerySolution** querySolutionOut) {
- invariant(canonicalQuery);
- *rootOut = NULL;
- *querySolutionOut = NULL;
-
- // This can happen as we're called by internal clients as well.
- if (NULL == collection) {
- const string& ns = canonicalQuery->ns();
- LOG(2) << "Collection " << ns << " does not exist."
- << " Using EOF plan: " << canonicalQuery->toStringShort();
- *rootOut = new EOFStage();
- return Status::OK();
- }
+ if (internalQueryPlannerEnableIndexIntersection) {
+ plannerParams->options |= QueryPlannerParams::INDEX_INTERSECTION;
+ }
- // Fill out the planning params. We use these for both cached solutions and non-cached.
- QueryPlannerParams plannerParams;
- plannerParams.options = plannerOptions;
- fillOutPlannerParams(opCtx, collection, canonicalQuery, &plannerParams);
+ plannerParams->options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
+
+ // Doc-level locking storage engines cannot answer predicates implicitly via exact index
+ // bounds for index intersection plans, as this can lead to spurious matches.
+ //
+ // Such storage engines do not use the invalidation framework, and therefore
+ // have no need for KEEP_MUTATIONS.
+ if (supportsDocLocking()) {
+ plannerParams->options |= QueryPlannerParams::CANNOT_TRIM_IXISECT;
+ } else {
+ plannerParams->options |= QueryPlannerParams::KEEP_MUTATIONS;
+ }
+}
- // If we have an _id index we can use an idhack plan.
- if (IDHackStage::supportsQuery(*canonicalQuery) &&
- collection->getIndexCatalog()->findIdIndex(opCtx)) {
+namespace {
- LOG(2) << "Using idhack: " << canonicalQuery->toStringShort();
+/**
+ * Build an execution tree for the query described in 'canonicalQuery'. Does not take
+ * ownership of arguments.
+ *
+ * If an execution tree could be created, then returns Status::OK() and sets 'rootOut' to
+ * the root of the constructed execution tree, and sets 'querySolutionOut' to the associated
+ * query solution (if applicable) or NULL.
+ *
+ * If an execution tree could not be created, returns a Status indicating why and sets both
+ * 'rootOut' and 'querySolutionOut' to NULL.
+ */
+Status prepareExecution(OperationContext* opCtx,
+ Collection* collection,
+ WorkingSet* ws,
+ CanonicalQuery* canonicalQuery,
+ size_t plannerOptions,
+ PlanStage** rootOut,
+ QuerySolution** querySolutionOut) {
+ invariant(canonicalQuery);
+ *rootOut = NULL;
+ *querySolutionOut = NULL;
+
+ // This can happen as we're called by internal clients as well.
+ if (NULL == collection) {
+ const string& ns = canonicalQuery->ns();
+ LOG(2) << "Collection " << ns << " does not exist."
+ << " Using EOF plan: " << canonicalQuery->toStringShort();
+ *rootOut = new EOFStage();
+ return Status::OK();
+ }
- *rootOut = new IDHackStage(opCtx, collection, canonicalQuery, ws);
+ // Fill out the planning params. We use these for both cached solutions and non-cached.
+ QueryPlannerParams plannerParams;
+ plannerParams.options = plannerOptions;
+ fillOutPlannerParams(opCtx, collection, canonicalQuery, &plannerParams);
- // Might have to filter out orphaned docs.
- if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- *rootOut =
- new ShardFilterStage(shardingState.getCollectionMetadata(collection->ns()),
- ws, *rootOut);
- }
+ // If we have an _id index we can use an idhack plan.
+ if (IDHackStage::supportsQuery(*canonicalQuery) &&
+ collection->getIndexCatalog()->findIdIndex(opCtx)) {
+ LOG(2) << "Using idhack: " << canonicalQuery->toStringShort();
- // There might be a projection. The idhack stage will always fetch the full
- // document, so we don't support covered projections. However, we might use the
- // simple inclusion fast path.
- if (NULL != canonicalQuery->getProj()) {
- ProjectionStageParams params(WhereCallbackReal(opCtx, collection->ns().db()));
- params.projObj = canonicalQuery->getProj()->getProjObj();
-
- // Stuff the right data into the params depending on what proj impl we use.
- if (canonicalQuery->getProj()->requiresDocument()
- || canonicalQuery->getProj()->wantIndexKey()) {
- params.fullExpression = canonicalQuery->root();
- params.projImpl = ProjectionStageParams::NO_FAST_PATH;
- }
- else {
- params.projImpl = ProjectionStageParams::SIMPLE_DOC;
- }
+ *rootOut = new IDHackStage(opCtx, collection, canonicalQuery, ws);
- *rootOut = new ProjectionStage(params, ws, *rootOut);
- }
+ // Might have to filter out orphaned docs.
+ if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
+ *rootOut = new ShardFilterStage(
+ shardingState.getCollectionMetadata(collection->ns()), ws, *rootOut);
+ }
- return Status::OK();
+ // There might be a projection. The idhack stage will always fetch the full
+ // document, so we don't support covered projections. However, we might use the
+ // simple inclusion fast path.
+ if (NULL != canonicalQuery->getProj()) {
+ ProjectionStageParams params(WhereCallbackReal(opCtx, collection->ns().db()));
+ params.projObj = canonicalQuery->getProj()->getProjObj();
+
+ // Stuff the right data into the params depending on what proj impl we use.
+ if (canonicalQuery->getProj()->requiresDocument() ||
+ canonicalQuery->getProj()->wantIndexKey()) {
+ params.fullExpression = canonicalQuery->root();
+ params.projImpl = ProjectionStageParams::NO_FAST_PATH;
+ } else {
+ params.projImpl = ProjectionStageParams::SIMPLE_DOC;
}
- // Tailable: If the query requests tailable the collection must be capped.
- if (canonicalQuery->getParsed().isTailable()) {
- if (!collection->isCapped()) {
- return Status(ErrorCodes::BadValue,
- "error processing query: " + canonicalQuery->toString() +
- " tailable cursor requested on non capped collection");
- }
+ *rootOut = new ProjectionStage(params, ws, *rootOut);
+ }
- // If a sort is specified it must be equal to expectedSort.
- const BSONObj expectedSort = BSON("$natural" << 1);
- const BSONObj& actualSort = canonicalQuery->getParsed().getSort();
- if (!actualSort.isEmpty() && !(actualSort == expectedSort)) {
- return Status(ErrorCodes::BadValue,
- "error processing query: " + canonicalQuery->toString() +
- " invalid sort specified for tailable cursor: "
- + actualSort.toString());
- }
- }
+ return Status::OK();
+ }
- // Try to look up a cached solution for the query.
- CachedSolution* rawCS;
- if (PlanCache::shouldCacheQuery(*canonicalQuery) &&
- collection->infoCache()->getPlanCache()->get(*canonicalQuery, &rawCS).isOK()) {
- // We have a CachedSolution. Have the planner turn it into a QuerySolution.
- std::unique_ptr<CachedSolution> cs(rawCS);
- QuerySolution *qs;
- Status status = QueryPlanner::planFromCache(*canonicalQuery, plannerParams, *cs,
- &qs);
-
- if (status.isOK()) {
- verify(StageBuilder::build(opCtx, collection, *qs, ws, rootOut));
- if ((plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT)
- && turnIxscanIntoCount(qs)) {
-
- LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
- << ", planSummary: " << Explain::getPlanSummary(*rootOut);
- }
+ // Tailable: If the query requests tailable the collection must be capped.
+ if (canonicalQuery->getParsed().isTailable()) {
+ if (!collection->isCapped()) {
+ return Status(ErrorCodes::BadValue,
+ "error processing query: " + canonicalQuery->toString() +
+ " tailable cursor requested on non capped collection");
+ }
- // Add a CachedPlanStage on top of the previous root.
- //
- // 'decisionWorks' is used to determine whether the existing cache entry should
- // be evicted, and the query replanned.
- //
- // Takes ownership of '*rootOut'.
- *rootOut = new CachedPlanStage(opCtx,
- collection,
- ws,
- canonicalQuery,
- plannerParams,
- cs->decisionWorks,
- *rootOut);
- *querySolutionOut = qs;
- return Status::OK();
- }
+ // If a sort is specified it must be equal to expectedSort.
+ const BSONObj expectedSort = BSON("$natural" << 1);
+ const BSONObj& actualSort = canonicalQuery->getParsed().getSort();
+ if (!actualSort.isEmpty() && !(actualSort == expectedSort)) {
+ return Status(ErrorCodes::BadValue,
+ "error processing query: " + canonicalQuery->toString() +
+ " invalid sort specified for tailable cursor: " +
+ actualSort.toString());
+ }
+ }
+
+ // Try to look up a cached solution for the query.
+ CachedSolution* rawCS;
+ if (PlanCache::shouldCacheQuery(*canonicalQuery) &&
+ collection->infoCache()->getPlanCache()->get(*canonicalQuery, &rawCS).isOK()) {
+ // We have a CachedSolution. Have the planner turn it into a QuerySolution.
+ std::unique_ptr<CachedSolution> cs(rawCS);
+ QuerySolution* qs;
+ Status status = QueryPlanner::planFromCache(*canonicalQuery, plannerParams, *cs, &qs);
+
+ if (status.isOK()) {
+ verify(StageBuilder::build(opCtx, collection, *qs, ws, rootOut));
+ if ((plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) &&
+ turnIxscanIntoCount(qs)) {
+ LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
+ << ", planSummary: " << Explain::getPlanSummary(*rootOut);
}
- if (internalQueryPlanOrChildrenIndependently
- && SubplanStage::canUseSubplanning(*canonicalQuery)) {
+ // Add a CachedPlanStage on top of the previous root.
+ //
+ // 'decisionWorks' is used to determine whether the existing cache entry should
+ // be evicted, and the query replanned.
+ //
+ // Takes ownership of '*rootOut'.
+ *rootOut = new CachedPlanStage(
+ opCtx, collection, ws, canonicalQuery, plannerParams, cs->decisionWorks, *rootOut);
+ *querySolutionOut = qs;
+ return Status::OK();
+ }
+ }
- LOG(2) << "Running query as sub-queries: " << canonicalQuery->toStringShort();
+ if (internalQueryPlanOrChildrenIndependently &&
+ SubplanStage::canUseSubplanning(*canonicalQuery)) {
+ LOG(2) << "Running query as sub-queries: " << canonicalQuery->toStringShort();
- *rootOut = new SubplanStage(opCtx, collection, ws, plannerParams, canonicalQuery);
- return Status::OK();
- }
+ *rootOut = new SubplanStage(opCtx, collection, ws, plannerParams, canonicalQuery);
+ return Status::OK();
+ }
- vector<QuerySolution*> solutions;
- Status status = QueryPlanner::plan(*canonicalQuery, plannerParams, &solutions);
- if (!status.isOK()) {
- return Status(ErrorCodes::BadValue,
- "error processing query: " + canonicalQuery->toString() +
- " planner returned error: " + status.reason());
- }
+ vector<QuerySolution*> solutions;
+ Status status = QueryPlanner::plan(*canonicalQuery, plannerParams, &solutions);
+ if (!status.isOK()) {
+ return Status(ErrorCodes::BadValue,
+ "error processing query: " + canonicalQuery->toString() +
+ " planner returned error: " + status.reason());
+ }
- // We cannot figure out how to answer the query. Perhaps it requires an index
- // we do not have?
- if (0 == solutions.size()) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "error processing query: "
- << canonicalQuery->toString()
- << " No query solutions");
- }
+ // We cannot figure out how to answer the query. Perhaps it requires an index
+ // we do not have?
+ if (0 == solutions.size()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "error processing query: " << canonicalQuery->toString()
+ << " No query solutions");
+ }
- // See if one of our solutions is a fast count hack in disguise.
- if (plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) {
- for (size_t i = 0; i < solutions.size(); ++i) {
- if (turnIxscanIntoCount(solutions[i])) {
- // Great, we can use solutions[i]. Clean up the other QuerySolution(s).
- for (size_t j = 0; j < solutions.size(); ++j) {
- if (j != i) {
- delete solutions[j];
- }
- }
-
- // We're not going to cache anything that's fast count.
- verify(StageBuilder::build(opCtx, collection, *solutions[i], ws, rootOut));
-
- LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
- << ", planSummary: " << Explain::getPlanSummary(*rootOut);
-
- *querySolutionOut = solutions[i];
- return Status::OK();
+ // See if one of our solutions is a fast count hack in disguise.
+ if (plannerParams.options & QueryPlannerParams::PRIVATE_IS_COUNT) {
+ for (size_t i = 0; i < solutions.size(); ++i) {
+ if (turnIxscanIntoCount(solutions[i])) {
+ // Great, we can use solutions[i]. Clean up the other QuerySolution(s).
+ for (size_t j = 0; j < solutions.size(); ++j) {
+ if (j != i) {
+ delete solutions[j];
}
}
- }
- if (1 == solutions.size()) {
- // Only one possible plan. Run it. Build the stages from the solution.
- verify(StageBuilder::build(opCtx, collection, *solutions[0], ws, rootOut));
+ // We're not going to cache anything that's fast count.
+ verify(StageBuilder::build(opCtx, collection, *solutions[i], ws, rootOut));
- LOG(2) << "Only one plan is available; it will be run but will not be cached. "
- << canonicalQuery->toStringShort()
+ LOG(2) << "Using fast count: " << canonicalQuery->toStringShort()
<< ", planSummary: " << Explain::getPlanSummary(*rootOut);
- *querySolutionOut = solutions[0];
+ *querySolutionOut = solutions[i];
return Status::OK();
}
- else {
- // Many solutions. Create a MultiPlanStage to pick the best, update the cache,
- // and so on. The working set will be shared by all candidate plans.
- MultiPlanStage* multiPlanStage = new MultiPlanStage(opCtx, collection, canonicalQuery);
-
- for (size_t ix = 0; ix < solutions.size(); ++ix) {
- if (solutions[ix]->cacheData.get()) {
- solutions[ix]->cacheData->indexFilterApplied =
- plannerParams.indexFiltersApplied;
- }
+ }
+ }
- // version of StageBuild::build when WorkingSet is shared
- PlanStage* nextPlanRoot;
- verify(StageBuilder::build(opCtx, collection, *solutions[ix], ws,
- &nextPlanRoot));
+ if (1 == solutions.size()) {
+ // Only one possible plan. Run it. Build the stages from the solution.
+ verify(StageBuilder::build(opCtx, collection, *solutions[0], ws, rootOut));
- // Owns none of the arguments
- multiPlanStage->addPlan(solutions[ix], nextPlanRoot, ws);
- }
+ LOG(2) << "Only one plan is available; it will be run but will not be cached. "
+ << canonicalQuery->toStringShort()
+ << ", planSummary: " << Explain::getPlanSummary(*rootOut);
- *rootOut = multiPlanStage;
- return Status::OK();
+ *querySolutionOut = solutions[0];
+ return Status::OK();
+ } else {
+ // Many solutions. Create a MultiPlanStage to pick the best, update the cache,
+ // and so on. The working set will be shared by all candidate plans.
+ MultiPlanStage* multiPlanStage = new MultiPlanStage(opCtx, collection, canonicalQuery);
+
+ for (size_t ix = 0; ix < solutions.size(); ++ix) {
+ if (solutions[ix]->cacheData.get()) {
+ solutions[ix]->cacheData->indexFilterApplied = plannerParams.indexFiltersApplied;
}
- }
- } // namespace
+ // version of StageBuild::build when WorkingSet is shared
+ PlanStage* nextPlanRoot;
+ verify(StageBuilder::build(opCtx, collection, *solutions[ix], ws, &nextPlanRoot));
- Status getExecutor(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* rawCanonicalQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out,
- size_t plannerOptions) {
- unique_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
- unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanStage* root;
- QuerySolution* querySolution;
- Status status = prepareExecution(txn, collection, ws.get(), canonicalQuery.get(),
- plannerOptions, &root, &querySolution);
- if (!status.isOK()) {
- return status;
+ // Owns none of the arguments
+ multiPlanStage->addPlan(solutions[ix], nextPlanRoot, ws);
}
- invariant(root);
- // We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be null.
- return PlanExecutor::make(txn, ws.release(), root, querySolution, canonicalQuery.release(),
- collection, yieldPolicy, out);
- }
- Status getExecutor(OperationContext* txn,
- Collection* collection,
- const std::string& ns,
- const BSONObj& unparsedQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out,
- size_t plannerOptions) {
- if (!collection) {
- LOG(2) << "Collection " << ns << " does not exist."
- << " Using EOF stage: " << unparsedQuery.toString();
- EOFStage* eofStage = new EOFStage();
- WorkingSet* ws = new WorkingSet();
- return PlanExecutor::make(txn, ws, eofStage, ns, yieldPolicy, out);
- }
+ *rootOut = multiPlanStage;
+ return Status::OK();
+ }
+}
- if (!CanonicalQuery::isSimpleIdQuery(unparsedQuery) ||
- !collection->getIndexCatalog()->findIdIndex(txn)) {
+} // namespace
- const WhereCallbackReal whereCallback(txn, collection->ns().db());
- CanonicalQuery* cq;
- Status status = CanonicalQuery::canonicalize(collection->ns(), unparsedQuery, &cq,
- whereCallback);
- if (!status.isOK())
- return status;
+Status getExecutor(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* rawCanonicalQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** out,
+ size_t plannerOptions) {
+ unique_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ PlanStage* root;
+ QuerySolution* querySolution;
+ Status status = prepareExecution(
+ txn, collection, ws.get(), canonicalQuery.get(), plannerOptions, &root, &querySolution);
+ if (!status.isOK()) {
+ return status;
+ }
+ invariant(root);
+ // We must have a tree of stages in order to have a valid plan executor, but the query
+ // solution may be null.
+ return PlanExecutor::make(txn,
+ ws.release(),
+ root,
+ querySolution,
+ canonicalQuery.release(),
+ collection,
+ yieldPolicy,
+ out);
+}
+
+Status getExecutor(OperationContext* txn,
+ Collection* collection,
+ const std::string& ns,
+ const BSONObj& unparsedQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** out,
+ size_t plannerOptions) {
+ if (!collection) {
+ LOG(2) << "Collection " << ns << " does not exist."
+ << " Using EOF stage: " << unparsedQuery.toString();
+ EOFStage* eofStage = new EOFStage();
+ WorkingSet* ws = new WorkingSet();
+ return PlanExecutor::make(txn, ws, eofStage, ns, yieldPolicy, out);
+ }
- // Takes ownership of 'cq'.
- return getExecutor(txn, collection, cq, yieldPolicy, out, plannerOptions);
- }
+ if (!CanonicalQuery::isSimpleIdQuery(unparsedQuery) ||
+ !collection->getIndexCatalog()->findIdIndex(txn)) {
+ const WhereCallbackReal whereCallback(txn, collection->ns().db());
+ CanonicalQuery* cq;
+ Status status =
+ CanonicalQuery::canonicalize(collection->ns(), unparsedQuery, &cq, whereCallback);
+ if (!status.isOK())
+ return status;
- LOG(2) << "Using idhack: " << unparsedQuery.toString();
+ // Takes ownership of 'cq'.
+ return getExecutor(txn, collection, cq, yieldPolicy, out, plannerOptions);
+ }
- WorkingSet* ws = new WorkingSet();
- PlanStage* root = new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws);
+ LOG(2) << "Using idhack: " << unparsedQuery.toString();
- // Might have to filter out orphaned docs.
- if (plannerOptions & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- root = new ShardFilterStage(shardingState.getCollectionMetadata(collection->ns()), ws,
- root);
- }
+ WorkingSet* ws = new WorkingSet();
+ PlanStage* root = new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws);
- return PlanExecutor::make(txn, ws, root, collection, yieldPolicy, out);
+ // Might have to filter out orphaned docs.
+ if (plannerOptions & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
+ root =
+ new ShardFilterStage(shardingState.getCollectionMetadata(collection->ns()), ws, root);
}
- //
- // Find
- //
+ return PlanExecutor::make(txn, ws, root, collection, yieldPolicy, out);
+}
+
+//
+// Find
+//
namespace {
- /**
- * Returns true if 'me' is a GTE or GE predicate over the "ts" field.
- * Such predicates can be used for the oplog start hack.
- */
- bool isOplogTsPred(const mongo::MatchExpression* me) {
- if (mongo::MatchExpression::GT != me->matchType()
- && mongo::MatchExpression::GTE != me->matchType()) {
- return false;
- }
+/**
+ * Returns true if 'me' is a GTE or GE predicate over the "ts" field.
+ * Such predicates can be used for the oplog start hack.
+ */
+bool isOplogTsPred(const mongo::MatchExpression* me) {
+ if (mongo::MatchExpression::GT != me->matchType() &&
+ mongo::MatchExpression::GTE != me->matchType()) {
+ return false;
+ }
- return mongoutils::str::equals(me->path().rawData(), "ts");
- }
-
- mongo::BSONElement extractOplogTsOptime(const mongo::MatchExpression* me) {
- invariant(isOplogTsPred(me));
- return static_cast<const mongo::ComparisonMatchExpression*>(me)->getData();
- }
-
- Status getOplogStartHack(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* cq,
- PlanExecutor** execOut) {
- invariant(collection);
- invariant(cq);
- unique_ptr<CanonicalQuery> autoCq(cq);
-
- // A query can only do oplog start finding if it has a top-level $gt or $gte predicate over
- // the "ts" field (the operation's timestamp). Find that predicate and pass it to
- // the OplogStart stage.
- MatchExpression* tsExpr = NULL;
- if (MatchExpression::AND == cq->root()->matchType()) {
- // The query has an AND at the top-level. See if any of the children
- // of the AND are $gt or $gte predicates over 'ts'.
- for (size_t i = 0; i < cq->root()->numChildren(); ++i) {
- MatchExpression* me = cq->root()->getChild(i);
- if (isOplogTsPred(me)) {
- tsExpr = me;
- break;
- }
+ return mongoutils::str::equals(me->path().rawData(), "ts");
+}
+
+mongo::BSONElement extractOplogTsOptime(const mongo::MatchExpression* me) {
+ invariant(isOplogTsPred(me));
+ return static_cast<const mongo::ComparisonMatchExpression*>(me)->getData();
+}
+
+Status getOplogStartHack(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* cq,
+ PlanExecutor** execOut) {
+ invariant(collection);
+ invariant(cq);
+ unique_ptr<CanonicalQuery> autoCq(cq);
+
+ // A query can only do oplog start finding if it has a top-level $gt or $gte predicate over
+ // the "ts" field (the operation's timestamp). Find that predicate and pass it to
+ // the OplogStart stage.
+ MatchExpression* tsExpr = NULL;
+ if (MatchExpression::AND == cq->root()->matchType()) {
+ // The query has an AND at the top-level. See if any of the children
+ // of the AND are $gt or $gte predicates over 'ts'.
+ for (size_t i = 0; i < cq->root()->numChildren(); ++i) {
+ MatchExpression* me = cq->root()->getChild(i);
+ if (isOplogTsPred(me)) {
+ tsExpr = me;
+ break;
}
}
- else if (isOplogTsPred(cq->root())) {
- // The root of the tree is a $gt or $gte predicate over 'ts'.
- tsExpr = cq->root();
- }
+ } else if (isOplogTsPred(cq->root())) {
+ // The root of the tree is a $gt or $gte predicate over 'ts'.
+ tsExpr = cq->root();
+ }
- if (NULL == tsExpr) {
- return Status(ErrorCodes::OplogOperationUnsupported,
- "OplogReplay query does not contain top-level "
- "$gt or $gte over the 'ts' field.");
- }
+ if (NULL == tsExpr) {
+ return Status(ErrorCodes::OplogOperationUnsupported,
+ "OplogReplay query does not contain top-level "
+ "$gt or $gte over the 'ts' field.");
+ }
- boost::optional<RecordId> startLoc = boost::none;
+ boost::optional<RecordId> startLoc = boost::none;
- // See if the RecordStore supports the oplogStartHack
- const BSONElement tsElem = extractOplogTsOptime(tsExpr);
- if (tsElem.type() == bsonTimestamp) {
- StatusWith<RecordId> goal = oploghack::keyForOptime(tsElem.timestamp());
- if (goal.isOK()) {
- startLoc = collection->getRecordStore()->oplogStartHack(txn, goal.getValue());
- }
+ // See if the RecordStore supports the oplogStartHack
+ const BSONElement tsElem = extractOplogTsOptime(tsExpr);
+ if (tsElem.type() == bsonTimestamp) {
+ StatusWith<RecordId> goal = oploghack::keyForOptime(tsElem.timestamp());
+ if (goal.isOK()) {
+ startLoc = collection->getRecordStore()->oplogStartHack(txn, goal.getValue());
}
+ }
- if (startLoc) {
- LOG(3) << "Using direct oplog seek";
+ if (startLoc) {
+ LOG(3) << "Using direct oplog seek";
+ } else {
+ LOG(3) << "Using OplogStart stage";
+
+ // Fallback to trying the OplogStart stage.
+ WorkingSet* oplogws = new WorkingSet();
+ OplogStart* stage = new OplogStart(txn, collection, tsExpr, oplogws);
+ PlanExecutor* rawExec;
+
+ // Takes ownership of oplogws and stage.
+ Status execStatus =
+ PlanExecutor::make(txn, oplogws, stage, collection, PlanExecutor::YIELD_AUTO, &rawExec);
+ invariant(execStatus.isOK());
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+
+ // The stage returns a RecordId of where to start.
+ startLoc = RecordId();
+ PlanExecutor::ExecState state = exec->getNext(NULL, startLoc.get_ptr());
+
+ // This is normal. The start of the oplog is the beginning of the collection.
+ if (PlanExecutor::IS_EOF == state) {
+ return getExecutor(
+ txn, collection, autoCq.release(), PlanExecutor::YIELD_AUTO, execOut);
}
- else {
- LOG(3) << "Using OplogStart stage";
-
- // Fallback to trying the OplogStart stage.
- WorkingSet* oplogws = new WorkingSet();
- OplogStart* stage = new OplogStart(txn, collection, tsExpr, oplogws);
- PlanExecutor* rawExec;
-
- // Takes ownership of oplogws and stage.
- Status execStatus = PlanExecutor::make(txn, oplogws, stage, collection,
- PlanExecutor::YIELD_AUTO, &rawExec);
- invariant(execStatus.isOK());
- std::unique_ptr<PlanExecutor> exec(rawExec);
-
- // The stage returns a RecordId of where to start.
- startLoc = RecordId();
- PlanExecutor::ExecState state = exec->getNext(NULL, startLoc.get_ptr());
-
- // This is normal. The start of the oplog is the beginning of the collection.
- if (PlanExecutor::IS_EOF == state) {
- return getExecutor(txn, collection, autoCq.release(), PlanExecutor::YIELD_AUTO,
- execOut);
- }
- // This is not normal. An error was encountered.
- if (PlanExecutor::ADVANCED != state) {
- return Status(ErrorCodes::InternalError,
- "quick oplog start location had error...?");
- }
+ // This is not normal. An error was encountered.
+ if (PlanExecutor::ADVANCED != state) {
+ return Status(ErrorCodes::InternalError, "quick oplog start location had error...?");
}
+ }
- // Build our collection scan...
- CollectionScanParams params;
- params.collection = collection;
- params.start = *startLoc;
- params.direction = CollectionScanParams::FORWARD;
- params.tailable = cq->getParsed().isTailable();
+ // Build our collection scan...
+ CollectionScanParams params;
+ params.collection = collection;
+ params.start = *startLoc;
+ params.direction = CollectionScanParams::FORWARD;
+ params.tailable = cq->getParsed().isTailable();
- WorkingSet* ws = new WorkingSet();
- CollectionScan* cs = new CollectionScan(txn, params, ws, cq->root());
- // Takes ownership of 'ws', 'cs', and 'cq'.
- return PlanExecutor::make(txn, ws, cs, autoCq.release(), collection,
- PlanExecutor::YIELD_AUTO, execOut);
- }
+ WorkingSet* ws = new WorkingSet();
+ CollectionScan* cs = new CollectionScan(txn, params, ws, cq->root());
+ // Takes ownership of 'ws', 'cs', and 'cq'.
+ return PlanExecutor::make(
+ txn, ws, cs, autoCq.release(), collection, PlanExecutor::YIELD_AUTO, execOut);
+}
-} // namespace
+} // namespace
- Status getExecutorFind(OperationContext* txn,
- Collection* collection,
- const NamespaceString& nss,
- CanonicalQuery* rawCanonicalQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- std::unique_ptr<CanonicalQuery> cq(rawCanonicalQuery);
- if (NULL != collection && cq->getParsed().isOplogReplay()) {
- return getOplogStartHack(txn, collection, cq.release(), out);
- }
+Status getExecutorFind(OperationContext* txn,
+ Collection* collection,
+ const NamespaceString& nss,
+ CanonicalQuery* rawCanonicalQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ std::unique_ptr<CanonicalQuery> cq(rawCanonicalQuery);
+ if (NULL != collection && cq->getParsed().isOplogReplay()) {
+ return getOplogStartHack(txn, collection, cq.release(), out);
+ }
- size_t options = QueryPlannerParams::DEFAULT;
- if (shardingState.needCollectionMetadata(txn->getClient(), nss.ns())) {
- options |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
- }
- return getExecutor(txn, collection, cq.release(), PlanExecutor::YIELD_AUTO, out, options);
+ size_t options = QueryPlannerParams::DEFAULT;
+ if (shardingState.needCollectionMetadata(txn->getClient(), nss.ns())) {
+ options |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
+ return getExecutor(txn, collection, cq.release(), PlanExecutor::YIELD_AUTO, out, options);
+}
namespace {
- /**
- * Wrap the specified 'root' plan stage in a ProjectionStage. Does not take ownership of any
- * arguments other than root.
- *
- * If the projection was valid, then return Status::OK() with a pointer to the newly created
- * ProjectionStage. Otherwise, return a status indicating the error reason.
- */
- StatusWith<std::unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
- const NamespaceString& nsString,
- CanonicalQuery* cq,
- const BSONObj& proj,
- bool allowPositional,
- WorkingSet* ws,
- std::unique_ptr<PlanStage> root) {
- invariant(!proj.isEmpty());
-
- ParsedProjection* rawParsedProj;
- Status ppStatus = ParsedProjection::make(proj.getOwned(), cq->root(), &rawParsedProj);
- if (!ppStatus.isOK()) {
- return ppStatus;
- }
- std::unique_ptr<ParsedProjection> pp(rawParsedProj);
-
- // ProjectionExec requires the MatchDetails from the query expression when the projection
- // uses the positional operator. Since the query may no longer match the newly-updated
- // document, we forbid this case.
- if (!allowPositional && pp->requiresMatchDetails()) {
- return {ErrorCodes::BadValue,
- "cannot use a positional projection and return the new document"};
- }
-
- ProjectionStageParams params(WhereCallbackReal(txn, nsString.db()));
- params.projObj = proj;
- params.fullExpression = cq->root();
- return {stdx::make_unique<ProjectionStage>(params, ws, root.release())};
+/**
+ * Wrap the specified 'root' plan stage in a ProjectionStage. Does not take ownership of any
+ * arguments other than root.
+ *
+ * If the projection was valid, then return Status::OK() with a pointer to the newly created
+ * ProjectionStage. Otherwise, return a status indicating the error reason.
+ */
+StatusWith<std::unique_ptr<PlanStage>> applyProjection(OperationContext* txn,
+ const NamespaceString& nsString,
+ CanonicalQuery* cq,
+ const BSONObj& proj,
+ bool allowPositional,
+ WorkingSet* ws,
+ std::unique_ptr<PlanStage> root) {
+ invariant(!proj.isEmpty());
+
+ ParsedProjection* rawParsedProj;
+ Status ppStatus = ParsedProjection::make(proj.getOwned(), cq->root(), &rawParsedProj);
+ if (!ppStatus.isOK()) {
+ return ppStatus;
+ }
+ std::unique_ptr<ParsedProjection> pp(rawParsedProj);
+
+ // ProjectionExec requires the MatchDetails from the query expression when the projection
+ // uses the positional operator. Since the query may no longer match the newly-updated
+ // document, we forbid this case.
+ if (!allowPositional && pp->requiresMatchDetails()) {
+ return {ErrorCodes::BadValue,
+ "cannot use a positional projection and return the new document"};
}
-} // namespace
+ ProjectionStageParams params(WhereCallbackReal(txn, nsString.db()));
+ params.projObj = proj;
+ params.fullExpression = cq->root();
+ return {stdx::make_unique<ProjectionStage>(params, ws, root.release())};
+}
- //
- // Delete
- //
+} // namespace
- Status getExecutorDelete(OperationContext* txn,
- Collection* collection,
- ParsedDelete* parsedDelete,
- PlanExecutor** execOut) {
- const DeleteRequest* request = parsedDelete->getRequest();
-
- const NamespaceString& nss(request->getNamespaceString());
- if (!request->isGod()) {
- if (nss.isSystem()) {
- uassert(12050,
- "cannot delete from system namespace",
- legalClientSystemNS(nss.ns(), true));
- }
- if (nss.ns().find('$') != string::npos) {
- log() << "cannot delete from collection with reserved $ in name: " << nss << endl;
- uasserted(10100, "cannot delete from collection with reserved $ in name");
- }
+//
+// Delete
+//
+
+Status getExecutorDelete(OperationContext* txn,
+ Collection* collection,
+ ParsedDelete* parsedDelete,
+ PlanExecutor** execOut) {
+ const DeleteRequest* request = parsedDelete->getRequest();
+
+ const NamespaceString& nss(request->getNamespaceString());
+ if (!request->isGod()) {
+ if (nss.isSystem()) {
+ uassert(
+ 12050, "cannot delete from system namespace", legalClientSystemNS(nss.ns(), true));
}
-
- if (collection && collection->isCapped()) {
- return Status(ErrorCodes::IllegalOperation,
- str::stream() << "cannot remove from a capped collection: " << nss.ns());
+ if (nss.ns().find('$') != string::npos) {
+ log() << "cannot delete from collection with reserved $ in name: " << nss << endl;
+ uasserted(10100, "cannot delete from collection with reserved $ in name");
}
+ }
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss);
-
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while removing from " << nss.ns());
- }
+ if (collection && collection->isCapped()) {
+ return Status(ErrorCodes::IllegalOperation,
+ str::stream() << "cannot remove from a capped collection: " << nss.ns());
+ }
- DeleteStageParams deleteStageParams;
- deleteStageParams.isMulti = request->isMulti();
- deleteStageParams.fromMigrate = request->isFromMigrate();
- deleteStageParams.isExplain = request->isExplain();
- deleteStageParams.returnDeleted = request->shouldReturnDeleted();
-
- unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanExecutor::YieldPolicy policy = parsedDelete->canYield() ? PlanExecutor::YIELD_AUTO :
- PlanExecutor::YIELD_MANUAL;
-
- if (!parsedDelete->hasParsedQuery()) {
- // This is the idhack fast-path for getting a PlanExecutor without doing the work
- // to create a CanonicalQuery.
- const BSONObj& unparsedQuery = request->getQuery();
-
- if (!collection) {
- // Treat collections that do not exist as empty collections. Note that the explain
- // reporting machinery always assumes that the root stage for a delete operation is
- // a DeleteStage, so in this case we put a DeleteStage on top of an EOFStage.
- LOG(2) << "Collection " << nss.ns() << " does not exist."
- << " Using EOF stage: " << unparsedQuery.toString();
- DeleteStage* deleteStage = new DeleteStage(txn, deleteStageParams, ws.get(), NULL,
- new EOFStage());
- return PlanExecutor::make(txn, ws.release(), deleteStage, nss.ns(), policy,
- execOut);
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss);
- }
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while removing from " << nss.ns());
+ }
- if (CanonicalQuery::isSimpleIdQuery(unparsedQuery)
- && collection->getIndexCatalog()->findIdIndex(txn)
- && request->getProj().isEmpty()) {
- LOG(2) << "Using idhack: " << unparsedQuery.toString();
-
- PlanStage* idHackStage = new IDHackStage(txn,
- collection,
- unparsedQuery["_id"].wrap(),
- ws.get());
- DeleteStage* root = new DeleteStage(txn, deleteStageParams, ws.get(), collection,
- idHackStage);
- return PlanExecutor::make(txn, ws.release(), root, collection, policy, execOut);
- }
+ DeleteStageParams deleteStageParams;
+ deleteStageParams.isMulti = request->isMulti();
+ deleteStageParams.fromMigrate = request->isFromMigrate();
+ deleteStageParams.isExplain = request->isExplain();
+ deleteStageParams.returnDeleted = request->shouldReturnDeleted();
- // If we're here then we don't have a parsed query, but we're also not eligible for
- // the idhack fast path. We need to force canonicalization now.
- Status cqStatus = parsedDelete->parseQueryToCQ();
- if (!cqStatus.isOK()) {
- return cqStatus;
- }
- }
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ PlanExecutor::YieldPolicy policy =
+ parsedDelete->canYield() ? PlanExecutor::YIELD_AUTO : PlanExecutor::YIELD_MANUAL;
- // This is the regular path for when we have a CanonicalQuery.
- std::unique_ptr<CanonicalQuery> cq(parsedDelete->releaseParsedQuery());
+ if (!parsedDelete->hasParsedQuery()) {
+ // This is the idhack fast-path for getting a PlanExecutor without doing the work
+ // to create a CanonicalQuery.
+ const BSONObj& unparsedQuery = request->getQuery();
- PlanStage* rawRoot;
- QuerySolution* rawQuerySolution;
- const size_t defaultPlannerOptions = 0;
- Status status = prepareExecution(txn, collection, ws.get(), cq.get(),
- defaultPlannerOptions, &rawRoot, &rawQuerySolution);
- if (!status.isOK()) {
- return status;
- }
- invariant(rawRoot);
- std::unique_ptr<QuerySolution> querySolution(rawQuerySolution);
- deleteStageParams.canonicalQuery = cq.get();
-
- rawRoot = new DeleteStage(txn, deleteStageParams, ws.get(), collection, rawRoot);
- std::unique_ptr<PlanStage> root(rawRoot);
-
- if (!request->getProj().isEmpty()) {
- invariant(request->shouldReturnDeleted());
-
- const bool allowPositional = true;
- StatusWith<std::unique_ptr<PlanStage>> projStatus = applyProjection(txn,
- nss,
- cq.get(),
- request->getProj(),
- allowPositional,
- ws.get(),
- std::move(root));
- if (!projStatus.isOK()) {
- return projStatus.getStatus();
- }
- root = std::move(projStatus.getValue());
+ if (!collection) {
+ // Treat collections that do not exist as empty collections. Note that the explain
+ // reporting machinery always assumes that the root stage for a delete operation is
+ // a DeleteStage, so in this case we put a DeleteStage on top of an EOFStage.
+ LOG(2) << "Collection " << nss.ns() << " does not exist."
+ << " Using EOF stage: " << unparsedQuery.toString();
+ DeleteStage* deleteStage =
+ new DeleteStage(txn, deleteStageParams, ws.get(), NULL, new EOFStage());
+ return PlanExecutor::make(txn, ws.release(), deleteStage, nss.ns(), policy, execOut);
}
- // We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be null.
- return PlanExecutor::make(txn,
- ws.release(),
- root.release(),
- querySolution.release(),
- cq.release(),
- collection,
- policy,
- execOut);
- }
+ if (CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
+ collection->getIndexCatalog()->findIdIndex(txn) && request->getProj().isEmpty()) {
+ LOG(2) << "Using idhack: " << unparsedQuery.toString();
- //
- // Update
- //
+ PlanStage* idHackStage =
+ new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get());
+ DeleteStage* root =
+ new DeleteStage(txn, deleteStageParams, ws.get(), collection, idHackStage);
+ return PlanExecutor::make(txn, ws.release(), root, collection, policy, execOut);
+ }
- namespace {
-
- // TODO: Make this a function on NamespaceString, or make it cleaner.
- inline void validateUpdate(const char* ns ,
- const BSONObj& updateobj,
- const BSONObj& patternOrig) {
- uassert(10155 , "cannot update reserved $ collection", strchr(ns, '$') == 0);
- if (strstr(ns, ".system.")) {
- /* dm: it's very important that system.indexes is never updated as IndexDetails
- has pointers into it */
- uassert(10156,
- str::stream() << "cannot update system collection: "
- << ns << " q: " << patternOrig << " u: " << updateobj,
- legalClientSystemNS(ns , true));
- }
+ // If we're here then we don't have a parsed query, but we're also not eligible for
+ // the idhack fast path. We need to force canonicalization now.
+ Status cqStatus = parsedDelete->parseQueryToCQ();
+ if (!cqStatus.isOK()) {
+ return cqStatus;
}
+ }
- } // namespace
+ // This is the regular path for when we have a CanonicalQuery.
+ std::unique_ptr<CanonicalQuery> cq(parsedDelete->releaseParsedQuery());
- Status getExecutorUpdate(OperationContext* txn,
- Collection* collection,
- ParsedUpdate* parsedUpdate,
- OpDebug* opDebug,
- PlanExecutor** execOut) {
- const UpdateRequest* request = parsedUpdate->getRequest();
- UpdateDriver* driver = parsedUpdate->getDriver();
+ PlanStage* rawRoot;
+ QuerySolution* rawQuerySolution;
+ const size_t defaultPlannerOptions = 0;
+ Status status = prepareExecution(
+ txn, collection, ws.get(), cq.get(), defaultPlannerOptions, &rawRoot, &rawQuerySolution);
+ if (!status.isOK()) {
+ return status;
+ }
+ invariant(rawRoot);
+ std::unique_ptr<QuerySolution> querySolution(rawQuerySolution);
+ deleteStageParams.canonicalQuery = cq.get();
- const NamespaceString& nsString = request->getNamespaceString();
- UpdateLifecycle* lifecycle = request->getLifecycle();
+ rawRoot = new DeleteStage(txn, deleteStageParams, ws.get(), collection, rawRoot);
+ std::unique_ptr<PlanStage> root(rawRoot);
- validateUpdate(nsString.ns().c_str(), request->getUpdates(), request->getQuery());
+ if (!request->getProj().isEmpty()) {
+ invariant(request->shouldReturnDeleted());
- // If there is no collection and this is an upsert, callers are supposed to create
- // the collection prior to calling this method. Explain, however, will never do
- // collection or database creation.
- if (!collection && request->isUpsert()) {
- invariant(request->isExplain());
+ const bool allowPositional = true;
+ StatusWith<std::unique_ptr<PlanStage>> projStatus = applyProjection(
+ txn, nss, cq.get(), request->getProj(), allowPositional, ws.get(), std::move(root));
+ if (!projStatus.isOK()) {
+ return projStatus.getStatus();
}
+ root = std::move(projStatus.getValue());
+ }
- // TODO: This seems a bit circuitious.
- opDebug->updateobj = request->getUpdates();
+ // We must have a tree of stages in order to have a valid plan executor, but the query
+ // solution may be null.
+ return PlanExecutor::make(txn,
+ ws.release(),
+ root.release(),
+ querySolution.release(),
+ cq.release(),
+ collection,
+ policy,
+ execOut);
+}
+
+//
+// Update
+//
- // If this is a user-issued update, then we want to return an error: you cannot perform
- // writes on a secondary. If this is an update to a secondary from the replication system,
- // however, then we make an exception and let the write proceed. In this case,
- // shouldCallLogOp() will be false.
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString);
+namespace {
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while performing update on "
- << nsString.ns());
- }
+// TODO: Make this a function on NamespaceString, or make it cleaner.
+inline void validateUpdate(const char* ns, const BSONObj& updateobj, const BSONObj& patternOrig) {
+ uassert(10155, "cannot update reserved $ collection", strchr(ns, '$') == 0);
+ if (strstr(ns, ".system.")) {
+ /* dm: it's very important that system.indexes is never updated as IndexDetails
+ has pointers into it */
+ uassert(10156,
+ str::stream() << "cannot update system collection: " << ns << " q: " << patternOrig
+ << " u: " << updateobj,
+ legalClientSystemNS(ns, true));
+ }
+}
- if (lifecycle) {
- lifecycle->setCollection(collection);
- driver->refreshIndexKeys(lifecycle->getIndexKeys(txn));
- }
+} // namespace
- PlanExecutor::YieldPolicy policy = parsedUpdate->canYield() ? PlanExecutor::YIELD_AUTO :
- PlanExecutor::YIELD_MANUAL;
-
- unique_ptr<WorkingSet> ws(new WorkingSet());
- UpdateStageParams updateStageParams(request, driver, opDebug);
-
- if (!parsedUpdate->hasParsedQuery()) {
- // This is the idhack fast-path for getting a PlanExecutor without doing the work
- // to create a CanonicalQuery.
- const BSONObj& unparsedQuery = request->getQuery();
-
- if (!collection) {
- // Treat collections that do not exist as empty collections. Note that the explain
- // reporting machinery always assumes that the root stage for an update operation is
- // an UpdateStage, so in this case we put an UpdateStage on top of an EOFStage.
- LOG(2) << "Collection " << nsString.ns() << " does not exist."
- << " Using EOF stage: " << unparsedQuery.toString();
- UpdateStage* updateStage = new UpdateStage(txn, updateStageParams, ws.get(),
- collection, new EOFStage());
- return PlanExecutor::make(txn, ws.release(), updateStage, nsString.ns(),
- policy, execOut);
- }
+Status getExecutorUpdate(OperationContext* txn,
+ Collection* collection,
+ ParsedUpdate* parsedUpdate,
+ OpDebug* opDebug,
+ PlanExecutor** execOut) {
+ const UpdateRequest* request = parsedUpdate->getRequest();
+ UpdateDriver* driver = parsedUpdate->getDriver();
- if (CanonicalQuery::isSimpleIdQuery(unparsedQuery)
- && collection->getIndexCatalog()->findIdIndex(txn)
- && request->getProj().isEmpty()) {
+ const NamespaceString& nsString = request->getNamespaceString();
+ UpdateLifecycle* lifecycle = request->getLifecycle();
- LOG(2) << "Using idhack: " << unparsedQuery.toString();
+ validateUpdate(nsString.ns().c_str(), request->getUpdates(), request->getQuery());
- PlanStage* idHackStage = new IDHackStage(txn,
- collection,
- unparsedQuery["_id"].wrap(),
- ws.get());
- UpdateStage* root = new UpdateStage(txn, updateStageParams, ws.get(), collection,
- idHackStage);
- return PlanExecutor::make(txn, ws.release(), root, collection, policy, execOut);
- }
+ // If there is no collection and this is an upsert, callers are supposed to create
+ // the collection prior to calling this method. Explain, however, will never do
+ // collection or database creation.
+ if (!collection && request->isUpsert()) {
+ invariant(request->isExplain());
+ }
- // If we're here then we don't have a parsed query, but we're also not eligible for
- // the idhack fast path. We need to force canonicalization now.
- Status cqStatus = parsedUpdate->parseQueryToCQ();
- if (!cqStatus.isOK()) {
- return cqStatus;
- }
- }
+ // TODO: This seems a bit circuitious.
+ opDebug->updateobj = request->getUpdates();
- // This is the regular path for when we have a CanonicalQuery.
- std::unique_ptr<CanonicalQuery> cq(parsedUpdate->releaseParsedQuery());
+ // If this is a user-issued update, then we want to return an error: you cannot perform
+ // writes on a secondary. If this is an update to a secondary from the replication system,
+ // however, then we make an exception and let the write proceed. In this case,
+ // shouldCallLogOp() will be false.
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString);
- PlanStage* rawRoot;
- QuerySolution* rawQuerySolution;
- const size_t defaultPlannerOptions = 0;
- Status status = prepareExecution(txn, collection, ws.get(), cq.get(),
- defaultPlannerOptions, &rawRoot, &rawQuerySolution);
- if (!status.isOK()) {
- return status;
- }
- invariant(rawRoot);
- std::unique_ptr<QuerySolution> querySolution(rawQuerySolution);
- updateStageParams.canonicalQuery = cq.get();
-
- rawRoot = new UpdateStage(txn, updateStageParams, ws.get(), collection, rawRoot);
- std::unique_ptr<PlanStage> root(rawRoot);
-
- if (!request->getProj().isEmpty()) {
- invariant(request->shouldReturnAnyDocs());
-
- // If the plan stage is to return the newly-updated version of the documents, then it
- // is invalid to use a positional projection because the query expression need not
- // match the array element after the update has been applied.
- const bool allowPositional = request->shouldReturnOldDocs();
- StatusWith<std::unique_ptr<PlanStage>> projStatus = applyProjection(txn,
- nsString,
- cq.get(),
- request->getProj(),
- allowPositional,
- ws.get(),
- std::move(root));
- if (!projStatus.isOK()) {
- return projStatus.getStatus();
- }
- root = std::move(projStatus.getValue());
- }
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while performing update on " << nsString.ns());
+ }
- // We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be null. Takes ownership of all args other than 'collection' and 'txn'
- return PlanExecutor::make(txn,
- ws.release(),
- root.release(),
- querySolution.release(),
- cq.release(),
- collection,
- policy,
- execOut);
+ if (lifecycle) {
+ lifecycle->setCollection(collection);
+ driver->refreshIndexKeys(lifecycle->getIndexKeys(txn));
}
- //
- // Group
- //
+ PlanExecutor::YieldPolicy policy =
+ parsedUpdate->canYield() ? PlanExecutor::YIELD_AUTO : PlanExecutor::YIELD_MANUAL;
- Status getExecutorGroup(OperationContext* txn,
- Collection* collection,
- const GroupRequest& request,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** execOut) {
- if (!globalScriptEngine) {
- return Status(ErrorCodes::BadValue, "server-side JavaScript execution is disabled");
- }
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ UpdateStageParams updateStageParams(request, driver, opDebug);
- unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanStage* root;
- QuerySolution* querySolution;
+ if (!parsedUpdate->hasParsedQuery()) {
+ // This is the idhack fast-path for getting a PlanExecutor without doing the work
+ // to create a CanonicalQuery.
+ const BSONObj& unparsedQuery = request->getQuery();
if (!collection) {
- // Treat collections that do not exist as empty collections. Note that the explain
- // reporting machinery always assumes that the root stage for a group operation is a
- // GroupStage, so in this case we put a GroupStage on top of an EOFStage.
- root = new GroupStage(txn, request, ws.get(), new EOFStage());
- return PlanExecutor::make(txn, ws.release(), root, request.ns, yieldPolicy, execOut);
+ // Treat collections that do not exist as empty collections. Note that the explain
+ // reporting machinery always assumes that the root stage for an update operation is
+ // an UpdateStage, so in this case we put an UpdateStage on top of an EOFStage.
+ LOG(2) << "Collection " << nsString.ns() << " does not exist."
+ << " Using EOF stage: " << unparsedQuery.toString();
+ UpdateStage* updateStage =
+ new UpdateStage(txn, updateStageParams, ws.get(), collection, new EOFStage());
+ return PlanExecutor::make(
+ txn, ws.release(), updateStage, nsString.ns(), policy, execOut);
}
- const NamespaceString nss(request.ns);
- const WhereCallbackReal whereCallback(txn, nss.db());
- CanonicalQuery* rawCanonicalQuery;
- Status canonicalizeStatus = CanonicalQuery::canonicalize(request.ns,
- request.query,
- request.explain,
- &rawCanonicalQuery,
- whereCallback);
- if (!canonicalizeStatus.isOK()) {
- return canonicalizeStatus;
- }
- unique_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
+ if (CanonicalQuery::isSimpleIdQuery(unparsedQuery) &&
+ collection->getIndexCatalog()->findIdIndex(txn) && request->getProj().isEmpty()) {
+ LOG(2) << "Using idhack: " << unparsedQuery.toString();
- const size_t defaultPlannerOptions = 0;
- Status status = prepareExecution(txn, collection, ws.get(), canonicalQuery.get(),
- defaultPlannerOptions, &root, &querySolution);
- if (!status.isOK()) {
- return status;
+ PlanStage* idHackStage =
+ new IDHackStage(txn, collection, unparsedQuery["_id"].wrap(), ws.get());
+ UpdateStage* root =
+ new UpdateStage(txn, updateStageParams, ws.get(), collection, idHackStage);
+ return PlanExecutor::make(txn, ws.release(), root, collection, policy, execOut);
}
- invariant(root);
- root = new GroupStage(txn, request, ws.get(), root);
- // We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be null. Takes ownership of all args other than 'collection'.
- return PlanExecutor::make(txn,
- ws.release(),
- root,
- querySolution,
- canonicalQuery.release(),
- collection,
- yieldPolicy,
- execOut);
+ // If we're here then we don't have a parsed query, but we're also not eligible for
+ // the idhack fast path. We need to force canonicalization now.
+ Status cqStatus = parsedUpdate->parseQueryToCQ();
+ if (!cqStatus.isOK()) {
+ return cqStatus;
+ }
}
- //
- // Count hack
- //
+ // This is the regular path for when we have a CanonicalQuery.
+ std::unique_ptr<CanonicalQuery> cq(parsedUpdate->releaseParsedQuery());
- namespace {
+ PlanStage* rawRoot;
+ QuerySolution* rawQuerySolution;
+ const size_t defaultPlannerOptions = 0;
+ Status status = prepareExecution(
+ txn, collection, ws.get(), cq.get(), defaultPlannerOptions, &rawRoot, &rawQuerySolution);
+ if (!status.isOK()) {
+ return status;
+ }
+ invariant(rawRoot);
+ std::unique_ptr<QuerySolution> querySolution(rawQuerySolution);
+ updateStageParams.canonicalQuery = cq.get();
+
+ rawRoot = new UpdateStage(txn, updateStageParams, ws.get(), collection, rawRoot);
+ std::unique_ptr<PlanStage> root(rawRoot);
+
+ if (!request->getProj().isEmpty()) {
+ invariant(request->shouldReturnAnyDocs());
+
+ // If the plan stage is to return the newly-updated version of the documents, then it
+ // is invalid to use a positional projection because the query expression need not
+ // match the array element after the update has been applied.
+ const bool allowPositional = request->shouldReturnOldDocs();
+ StatusWith<std::unique_ptr<PlanStage>> projStatus = applyProjection(txn,
+ nsString,
+ cq.get(),
+ request->getProj(),
+ allowPositional,
+ ws.get(),
+ std::move(root));
+ if (!projStatus.isOK()) {
+ return projStatus.getStatus();
+ }
+ root = std::move(projStatus.getValue());
+ }
- /**
- * Returns 'true' if the provided solution 'soln' can be rewritten to use
- * a fast counting stage. Mutates the tree in 'soln->root'.
- *
- * Otherwise, returns 'false'.
- */
- bool turnIxscanIntoCount(QuerySolution* soln) {
- QuerySolutionNode* root = soln->root.get();
+ // We must have a tree of stages in order to have a valid plan executor, but the query
+ // solution may be null. Takes ownership of all args other than 'collection' and 'txn'
+ return PlanExecutor::make(txn,
+ ws.release(),
+ root.release(),
+ querySolution.release(),
+ cq.release(),
+ collection,
+ policy,
+ execOut);
+}
+
+//
+// Group
+//
+
+Status getExecutorGroup(OperationContext* txn,
+ Collection* collection,
+ const GroupRequest& request,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** execOut) {
+ if (!globalScriptEngine) {
+ return Status(ErrorCodes::BadValue, "server-side JavaScript execution is disabled");
+ }
- // Root should be a fetch w/o any filters.
- if (STAGE_FETCH != root->getType()) {
- return false;
- }
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ PlanStage* root;
+ QuerySolution* querySolution;
- if (NULL != root->filter.get()) {
- return false;
- }
+ if (!collection) {
+ // Treat collections that do not exist as empty collections. Note that the explain
+ // reporting machinery always assumes that the root stage for a group operation is a
+ // GroupStage, so in this case we put a GroupStage on top of an EOFStage.
+ root = new GroupStage(txn, request, ws.get(), new EOFStage());
+ return PlanExecutor::make(txn, ws.release(), root, request.ns, yieldPolicy, execOut);
+ }
- // Child should be an ixscan.
- if (STAGE_IXSCAN != root->children[0]->getType()) {
- return false;
- }
+ const NamespaceString nss(request.ns);
+ const WhereCallbackReal whereCallback(txn, nss.db());
+ CanonicalQuery* rawCanonicalQuery;
+ Status canonicalizeStatus = CanonicalQuery::canonicalize(
+ request.ns, request.query, request.explain, &rawCanonicalQuery, whereCallback);
+ if (!canonicalizeStatus.isOK()) {
+ return canonicalizeStatus;
+ }
+ unique_ptr<CanonicalQuery> canonicalQuery(rawCanonicalQuery);
+
+ const size_t defaultPlannerOptions = 0;
+ Status status = prepareExecution(txn,
+ collection,
+ ws.get(),
+ canonicalQuery.get(),
+ defaultPlannerOptions,
+ &root,
+ &querySolution);
+ if (!status.isOK()) {
+ return status;
+ }
+ invariant(root);
+
+ root = new GroupStage(txn, request, ws.get(), root);
+ // We must have a tree of stages in order to have a valid plan executor, but the query
+ // solution may be null. Takes ownership of all args other than 'collection'.
+ return PlanExecutor::make(txn,
+ ws.release(),
+ root,
+ querySolution,
+ canonicalQuery.release(),
+ collection,
+ yieldPolicy,
+ execOut);
+}
+
+//
+// Count hack
+//
- IndexScanNode* isn = static_cast<IndexScanNode*>(root->children[0]);
+namespace {
- // No filters allowed and side-stepping isSimpleRange for now. TODO: do we ever see
- // isSimpleRange here? because we could well use it. I just don't think we ever do see
- // it.
+/**
+ * Returns 'true' if the provided solution 'soln' can be rewritten to use
+ * a fast counting stage. Mutates the tree in 'soln->root'.
+ *
+ * Otherwise, returns 'false'.
+ */
+bool turnIxscanIntoCount(QuerySolution* soln) {
+ QuerySolutionNode* root = soln->root.get();
- if (NULL != isn->filter.get() || isn->bounds.isSimpleRange) {
- return false;
- }
+ // Root should be a fetch w/o any filters.
+ if (STAGE_FETCH != root->getType()) {
+ return false;
+ }
- // Make sure the bounds are OK.
- BSONObj startKey;
- bool startKeyInclusive;
- BSONObj endKey;
- bool endKeyInclusive;
-
- if (!IndexBoundsBuilder::isSingleInterval( isn->bounds,
- &startKey,
- &startKeyInclusive,
- &endKey,
- &endKeyInclusive )) {
- return false;
- }
+ if (NULL != root->filter.get()) {
+ return false;
+ }
- // Make the count node that we replace the fetch + ixscan with.
- CountNode* cn = new CountNode();
- cn->indexKeyPattern = isn->indexKeyPattern;
- cn->startKey = startKey;
- cn->startKeyInclusive = startKeyInclusive;
- cn->endKey = endKey;
- cn->endKeyInclusive = endKeyInclusive;
- // Takes ownership of 'cn' and deletes the old root.
- soln->root.reset(cn);
- return true;
- }
+ // Child should be an ixscan.
+ if (STAGE_IXSCAN != root->children[0]->getType()) {
+ return false;
+ }
- /**
- * Returns true if indices contains an index that can be
- * used with DistinctNode. Sets indexOut to the array index
- * of PlannerParams::indices.
- * Look for the index for the fewest fields.
- * Criteria for suitable index is that the index cannot be special
- * (geo, hashed, text, ...).
- *
- * Multikey indices are not suitable for DistinctNode when the projection
- * is on an array element. Arrays are flattened in a multikey index which
- * makes it impossible for the distinct scan stage (plan stage generated from
- * DistinctNode) to select the requested element by array index.
- *
- * Multikey indices cannot be used for the fast distinct hack if the field is dotted.
- * Currently the solution generated for the distinct hack includes a projection stage and
- * the projection stage cannot be covered with a dotted field.
- */
- bool getDistinctNodeIndex(const std::vector<IndexEntry>& indices,
- const std::string& field, size_t* indexOut) {
- invariant(indexOut);
- bool isDottedField = str::contains(field, '.');
- int minFields = std::numeric_limits<int>::max();
- for (size_t i = 0; i < indices.size(); ++i) {
- // Skip special indices.
- if (!IndexNames::findPluginName(indices[i].keyPattern).empty()) {
- continue;
- }
- // Skip multikey indices if we are projecting on a dotted field.
- if (indices[i].multikey && isDottedField) {
- continue;
- }
- int nFields = indices[i].keyPattern.nFields();
- // Pick the index with the lowest number of fields.
- if (nFields < minFields) {
- minFields = nFields;
- *indexOut = i;
- }
- }
- return minFields != std::numeric_limits<int>::max();
- }
+ IndexScanNode* isn = static_cast<IndexScanNode*>(root->children[0]);
- /**
- * Checks dotted field for a projection and truncates the
- * field name if we could be projecting on an array element.
- * Sets 'isIDOut' to true if the projection is on a sub document of _id.
- * For example, _id.a.2, _id.b.c.
- */
- std::string getProjectedDottedField(const std::string& field, bool* isIDOut) {
- // Check if field contains an array index.
- std::vector<std::string> res;
- mongo::splitStringDelim(field, &res, '.');
-
- // Since we could exit early from the loop,
- // we should check _id here and set '*isIDOut' accordingly.
- *isIDOut = ("_id" == res[0]);
-
- // Skip the first dotted component. If the field starts
- // with a number, the number cannot be an array index.
- int arrayIndex = 0;
- for (size_t i = 1; i < res.size(); ++i) {
- if (mongo::parseNumberFromStringWithBase(res[i], 10, &arrayIndex).isOK()) {
- // Array indices cannot be negative numbers (this is not $slice).
- // Negative numbers are allowed as field names.
- if (arrayIndex >= 0) {
- // Generate prefix of field up to (but not including) array index.
- std::vector<std::string> prefixStrings(res);
- prefixStrings.resize(i);
- // Reset projectedField. Instead of overwriting, joinStringDelim() appends joined string
- // to the end of projectedField.
- std::string projectedField;
- mongo::joinStringDelim(prefixStrings, &projectedField, '.');
- return projectedField;
- }
- }
- }
+ // No filters allowed and side-stepping isSimpleRange for now. TODO: do we ever see
+ // isSimpleRange here? because we could well use it. I just don't think we ever do see
+ // it.
- return field;
- }
+ if (NULL != isn->filter.get() || isn->bounds.isSimpleRange) {
+ return false;
+ }
- /**
- * Creates a projection spec for a distinct command from the requested field.
- * In most cases, the projection spec will be {_id: 0, key: 1}.
- * The exceptions are:
- * 1) When the requested field is '_id', the projection spec will {_id: 1}.
- * 2) When the requested field could be an array element (eg. a.0),
- * the projected field will be the prefix of the field up to the array element.
- * For example, a.b.2 => {_id: 0, 'a.b': 1}
- * Note that we can't use a $slice projection because the distinct command filters
- * the results from the executor using the dotted field name. Using $slice will
- * re-order the documents in the array in the results.
- */
- BSONObj getDistinctProjection(const std::string& field) {
- std::string projectedField(field);
-
- bool isID = false;
- if ("_id" == field) {
- isID = true;
- }
- else if (str::contains(field, '.')) {
- projectedField = getProjectedDottedField(field, &isID);
- }
- BSONObjBuilder bob;
- if (!isID) {
- bob.append("_id", 0);
- }
- bob.append(projectedField, 1);
- return bob.obj();
- }
+ // Make sure the bounds are OK.
+ BSONObj startKey;
+ bool startKeyInclusive;
+ BSONObj endKey;
+ bool endKeyInclusive;
- } // namespace
+ if (!IndexBoundsBuilder::isSingleInterval(
+ isn->bounds, &startKey, &startKeyInclusive, &endKey, &endKeyInclusive)) {
+ return false;
+ }
- Status getExecutorCount(OperationContext* txn,
- Collection* collection,
- const CountRequest& request,
- bool explain,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** execOut) {
+ // Make the count node that we replace the fetch + ixscan with.
+ CountNode* cn = new CountNode();
+ cn->indexKeyPattern = isn->indexKeyPattern;
+ cn->startKey = startKey;
+ cn->startKeyInclusive = startKeyInclusive;
+ cn->endKey = endKey;
+ cn->endKeyInclusive = endKeyInclusive;
+ // Takes ownership of 'cn' and deletes the old root.
+ soln->root.reset(cn);
+ return true;
+}
- unique_ptr<WorkingSet> ws(new WorkingSet());
- PlanStage* root;
- QuerySolution* querySolution;
-
- // If collection exists and the query is empty, no additional canonicalization is needed.
- // If the query is empty, then we can determine the count by just asking the collection
- // for its number of records. This is implemented by the CountStage, and we don't need
- // to create a child for the count stage in this case.
- //
- // If there is a hint, then we can't use a trival count plan as described above.
- if (collection && request.getQuery().isEmpty() && request.getHint().isEmpty()) {
- root = new CountStage(txn, collection, request, ws.get(), NULL);
- return PlanExecutor::make(txn,
- ws.release(),
- root,
- request.getNs(),
- yieldPolicy,
- execOut);
+/**
+ * Returns true if indices contains an index that can be
+ * used with DistinctNode. Sets indexOut to the array index
+ * of PlannerParams::indices.
+ * Look for the index for the fewest fields.
+ * Criteria for suitable index is that the index cannot be special
+ * (geo, hashed, text, ...).
+ *
+ * Multikey indices are not suitable for DistinctNode when the projection
+ * is on an array element. Arrays are flattened in a multikey index which
+ * makes it impossible for the distinct scan stage (plan stage generated from
+ * DistinctNode) to select the requested element by array index.
+ *
+ * Multikey indices cannot be used for the fast distinct hack if the field is dotted.
+ * Currently the solution generated for the distinct hack includes a projection stage and
+ * the projection stage cannot be covered with a dotted field.
+ */
+bool getDistinctNodeIndex(const std::vector<IndexEntry>& indices,
+ const std::string& field,
+ size_t* indexOut) {
+ invariant(indexOut);
+ bool isDottedField = str::contains(field, '.');
+ int minFields = std::numeric_limits<int>::max();
+ for (size_t i = 0; i < indices.size(); ++i) {
+ // Skip special indices.
+ if (!IndexNames::findPluginName(indices[i].keyPattern).empty()) {
+ continue;
}
-
- unique_ptr<CanonicalQuery> cq;
- if (!request.getQuery().isEmpty() || !request.getHint().isEmpty()) {
- // If query or hint is not empty, canonicalize the query before working with collection.
- typedef MatchExpressionParser::WhereCallback WhereCallback;
- CanonicalQuery* rawCq = NULL;
- Status canonStatus = CanonicalQuery::canonicalize(
- request.getNs(),
- request.getQuery(),
- BSONObj(), // sort
- BSONObj(), // projection
- 0, // skip
- 0, // limit
- request.getHint(),
- BSONObj(), // min
- BSONObj(), // max
- false, // snapshot
- explain,
- &rawCq,
- collection ?
- static_cast<const WhereCallback&>(WhereCallbackReal(txn,
- collection->ns().db())) :
- static_cast<const WhereCallback&>(WhereCallbackNoop()));
- if (!canonStatus.isOK()) {
- return canonStatus;
- }
- cq.reset(rawCq);
+ // Skip multikey indices if we are projecting on a dotted field.
+ if (indices[i].multikey && isDottedField) {
+ continue;
+ }
+ int nFields = indices[i].keyPattern.nFields();
+ // Pick the index with the lowest number of fields.
+ if (nFields < minFields) {
+ minFields = nFields;
+ *indexOut = i;
}
+ }
+ return minFields != std::numeric_limits<int>::max();
+}
- if (!collection) {
- // Treat collections that do not exist as empty collections. Note that the explain
- // reporting machinery always assumes that the root stage for a count operation is
- // a CountStage, so in this case we put a CountStage on top of an EOFStage.
- root = new CountStage(txn, collection, request, ws.get(), new EOFStage());
- return PlanExecutor::make(txn,
- ws.release(),
- root,
- request.getNs(),
- yieldPolicy,
- execOut);
+/**
+ * Checks dotted field for a projection and truncates the
+ * field name if we could be projecting on an array element.
+ * Sets 'isIDOut' to true if the projection is on a sub document of _id.
+ * For example, _id.a.2, _id.b.c.
+ */
+std::string getProjectedDottedField(const std::string& field, bool* isIDOut) {
+ // Check if field contains an array index.
+ std::vector<std::string> res;
+ mongo::splitStringDelim(field, &res, '.');
+
+ // Since we could exit early from the loop,
+ // we should check _id here and set '*isIDOut' accordingly.
+ *isIDOut = ("_id" == res[0]);
+
+ // Skip the first dotted component. If the field starts
+ // with a number, the number cannot be an array index.
+ int arrayIndex = 0;
+ for (size_t i = 1; i < res.size(); ++i) {
+ if (mongo::parseNumberFromStringWithBase(res[i], 10, &arrayIndex).isOK()) {
+ // Array indices cannot be negative numbers (this is not $slice).
+ // Negative numbers are allowed as field names.
+ if (arrayIndex >= 0) {
+ // Generate prefix of field up to (but not including) array index.
+ std::vector<std::string> prefixStrings(res);
+ prefixStrings.resize(i);
+ // Reset projectedField. Instead of overwriting, joinStringDelim() appends joined string
+ // to the end of projectedField.
+ std::string projectedField;
+ mongo::joinStringDelim(prefixStrings, &projectedField, '.');
+ return projectedField;
+ }
}
+ }
- invariant(cq.get());
+ return field;
+}
- const size_t plannerOptions = QueryPlannerParams::PRIVATE_IS_COUNT;
- Status prepStatus = prepareExecution(txn, collection, ws.get(), cq.get(), plannerOptions,
- &root, &querySolution);
- if (!prepStatus.isOK()) {
- return prepStatus;
- }
- invariant(root);
-
- // Make a CountStage to be the new root.
- root = new CountStage(txn, collection, request, ws.get(), root);
- // We must have a tree of stages in order to have a valid plan executor, but the query
- // solution may be NULL. Takes ownership of all args other than 'collection' and 'txn'
- return PlanExecutor::make(txn,
- ws.release(),
- root,
- querySolution,
- cq.release(),
- collection,
- yieldPolicy,
- execOut);
+/**
+ * Creates a projection spec for a distinct command from the requested field.
+ * In most cases, the projection spec will be {_id: 0, key: 1}.
+ * The exceptions are:
+ * 1) When the requested field is '_id', the projection spec will {_id: 1}.
+ * 2) When the requested field could be an array element (eg. a.0),
+ * the projected field will be the prefix of the field up to the array element.
+ * For example, a.b.2 => {_id: 0, 'a.b': 1}
+ * Note that we can't use a $slice projection because the distinct command filters
+ * the results from the executor using the dotted field name. Using $slice will
+ * re-order the documents in the array in the results.
+ */
+BSONObj getDistinctProjection(const std::string& field) {
+ std::string projectedField(field);
+
+ bool isID = false;
+ if ("_id" == field) {
+ isID = true;
+ } else if (str::contains(field, '.')) {
+ projectedField = getProjectedDottedField(field, &isID);
+ }
+ BSONObjBuilder bob;
+ if (!isID) {
+ bob.append("_id", 0);
}
+ bob.append(projectedField, 1);
+ return bob.obj();
+}
+} // namespace
+
+Status getExecutorCount(OperationContext* txn,
+ Collection* collection,
+ const CountRequest& request,
+ bool explain,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** execOut) {
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ PlanStage* root;
+ QuerySolution* querySolution;
+
+ // If collection exists and the query is empty, no additional canonicalization is needed.
+ // If the query is empty, then we can determine the count by just asking the collection
+ // for its number of records. This is implemented by the CountStage, and we don't need
+ // to create a child for the count stage in this case.
//
- // Distinct hack
- //
+ // If there is a hint, then we can't use a trival count plan as described above.
+ if (collection && request.getQuery().isEmpty() && request.getHint().isEmpty()) {
+ root = new CountStage(txn, collection, request, ws.get(), NULL);
+ return PlanExecutor::make(txn, ws.release(), root, request.getNs(), yieldPolicy, execOut);
+ }
- bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const string& field) {
- QuerySolutionNode* root = soln->root.get();
+ unique_ptr<CanonicalQuery> cq;
+ if (!request.getQuery().isEmpty() || !request.getHint().isEmpty()) {
+ // If query or hint is not empty, canonicalize the query before working with collection.
+ typedef MatchExpressionParser::WhereCallback WhereCallback;
+ CanonicalQuery* rawCq = NULL;
+ Status canonStatus = CanonicalQuery::canonicalize(
+ request.getNs(),
+ request.getQuery(),
+ BSONObj(), // sort
+ BSONObj(), // projection
+ 0, // skip
+ 0, // limit
+ request.getHint(),
+ BSONObj(), // min
+ BSONObj(), // max
+ false, // snapshot
+ explain,
+ &rawCq,
+ collection
+ ? static_cast<const WhereCallback&>(WhereCallbackReal(txn, collection->ns().db()))
+ : static_cast<const WhereCallback&>(WhereCallbackNoop()));
+ if (!canonStatus.isOK()) {
+ return canonStatus;
+ }
+ cq.reset(rawCq);
+ }
- // We're looking for a project on top of an ixscan.
- if (STAGE_PROJECTION == root->getType() && (STAGE_IXSCAN == root->children[0]->getType())) {
- IndexScanNode* isn = static_cast<IndexScanNode*>(root->children[0]);
+ if (!collection) {
+ // Treat collections that do not exist as empty collections. Note that the explain
+ // reporting machinery always assumes that the root stage for a count operation is
+ // a CountStage, so in this case we put a CountStage on top of an EOFStage.
+ root = new CountStage(txn, collection, request, ws.get(), new EOFStage());
+ return PlanExecutor::make(txn, ws.release(), root, request.getNs(), yieldPolicy, execOut);
+ }
- // An additional filter must be applied to the data in the key, so we can't just skip
- // all the keys with a given value; we must examine every one to find the one that (may)
- // pass the filter.
- if (NULL != isn->filter.get()) {
- return false;
- }
+ invariant(cq.get());
- // We only set this when we have special query modifiers (.max() or .min()) or other
- // special cases. Don't want to handle the interactions between those and distinct.
- // Don't think this will ever really be true but if it somehow is, just ignore this
- // soln.
- if (isn->bounds.isSimpleRange) {
- return false;
- }
+ const size_t plannerOptions = QueryPlannerParams::PRIVATE_IS_COUNT;
+ Status prepStatus = prepareExecution(
+ txn, collection, ws.get(), cq.get(), plannerOptions, &root, &querySolution);
+ if (!prepStatus.isOK()) {
+ return prepStatus;
+ }
+ invariant(root);
+
+ // Make a CountStage to be the new root.
+ root = new CountStage(txn, collection, request, ws.get(), root);
+ // We must have a tree of stages in order to have a valid plan executor, but the query
+ // solution may be NULL. Takes ownership of all args other than 'collection' and 'txn'
+ return PlanExecutor::make(
+ txn, ws.release(), root, querySolution, cq.release(), collection, yieldPolicy, execOut);
+}
+
+//
+// Distinct hack
+//
+
+bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const string& field) {
+ QuerySolutionNode* root = soln->root.get();
+
+ // We're looking for a project on top of an ixscan.
+ if (STAGE_PROJECTION == root->getType() && (STAGE_IXSCAN == root->children[0]->getType())) {
+ IndexScanNode* isn = static_cast<IndexScanNode*>(root->children[0]);
+
+ // An additional filter must be applied to the data in the key, so we can't just skip
+ // all the keys with a given value; we must examine every one to find the one that (may)
+ // pass the filter.
+ if (NULL != isn->filter.get()) {
+ return false;
+ }
- // Make a new DistinctNode. We swap this for the ixscan in the provided solution.
- DistinctNode* dn = new DistinctNode();
- dn->indexKeyPattern = isn->indexKeyPattern;
- dn->direction = isn->direction;
- dn->bounds = isn->bounds;
-
- // Figure out which field we're skipping to the next value of. TODO: We currently only
- // try to distinct-hack when there is an index prefixed by the field we're distinct-ing
- // over. Consider removing this code if we stick with that policy.
- dn->fieldNo = 0;
- BSONObjIterator it(isn->indexKeyPattern);
- while (it.more()) {
- if (field == it.next().fieldName()) {
- break;
- }
- dn->fieldNo++;
- }
+ // We only set this when we have special query modifiers (.max() or .min()) or other
+ // special cases. Don't want to handle the interactions between those and distinct.
+ // Don't think this will ever really be true but if it somehow is, just ignore this
+ // soln.
+ if (isn->bounds.isSimpleRange) {
+ return false;
+ }
- // Delete the old index scan, set the child of project to the fast distinct scan.
- delete root->children[0];
- root->children[0] = dn;
- return true;
+ // Make a new DistinctNode. We swap this for the ixscan in the provided solution.
+ DistinctNode* dn = new DistinctNode();
+ dn->indexKeyPattern = isn->indexKeyPattern;
+ dn->direction = isn->direction;
+ dn->bounds = isn->bounds;
+
+ // Figure out which field we're skipping to the next value of. TODO: We currently only
+ // try to distinct-hack when there is an index prefixed by the field we're distinct-ing
+ // over. Consider removing this code if we stick with that policy.
+ dn->fieldNo = 0;
+ BSONObjIterator it(isn->indexKeyPattern);
+ while (it.more()) {
+ if (field == it.next().fieldName()) {
+ break;
+ }
+ dn->fieldNo++;
}
- return false;
+ // Delete the old index scan, set the child of project to the fast distinct scan.
+ delete root->children[0];
+ root->children[0] = dn;
+ return true;
}
- Status getExecutorDistinct(OperationContext* txn,
- Collection* collection,
- const BSONObj& query,
- const std::string& field,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- // This should'a been checked by the distinct command.
- invariant(collection);
-
- // TODO: check for idhack here?
-
- // When can we do a fast distinct hack?
- // 1. There is a plan with just one leaf and that leaf is an ixscan.
- // 2. The ixscan indexes the field we're interested in.
- // 2a: We are correct if the index contains the field but for now we look for prefix.
- // 3. The query is covered/no fetch.
- //
- // We go through normal planning (with limited parameters) to see if we can produce
- // a soln with the above properties.
-
- QueryPlannerParams plannerParams;
- plannerParams.options = QueryPlannerParams::NO_TABLE_SCAN;
-
- // TODO Need to check if query is compatible with any partial indexes. SERVER-17854.
- IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn,false);
- while (ii.more()) {
- const IndexDescriptor* desc = ii.next();
- // The distinct hack can work if any field is in the index but it's not always clear
- // if it's a win unless it's the first field.
- if (desc->keyPattern().firstElement().fieldName() == field) {
- plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
- desc->getAccessMethodName(),
- desc->isMultikey(txn),
- desc->isSparse(),
- desc->unique(),
- desc->indexName(),
- NULL,
- desc->infoObj()));
- }
- }
+ return false;
+}
- const WhereCallbackReal whereCallback(txn, collection->ns().db());
+Status getExecutorDistinct(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& query,
+ const std::string& field,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ // This should'a been checked by the distinct command.
+ invariant(collection);
- // If there are no suitable indices for the distinct hack bail out now into regular planning
- // with no projection.
- if (plannerParams.indices.empty()) {
- CanonicalQuery* cq;
- Status status = CanonicalQuery::canonicalize(
- collection->ns().ns(), query, &cq, whereCallback);
- if (!status.isOK()) {
- return status;
- }
+ // TODO: check for idhack here?
- // Takes ownership of 'cq'.
- return getExecutor(txn, collection, cq, yieldPolicy, out);
+ // When can we do a fast distinct hack?
+ // 1. There is a plan with just one leaf and that leaf is an ixscan.
+ // 2. The ixscan indexes the field we're interested in.
+ // 2a: We are correct if the index contains the field but for now we look for prefix.
+ // 3. The query is covered/no fetch.
+ //
+ // We go through normal planning (with limited parameters) to see if we can produce
+ // a soln with the above properties.
+
+ QueryPlannerParams plannerParams;
+ plannerParams.options = QueryPlannerParams::NO_TABLE_SCAN;
+
+ // TODO Need to check if query is compatible with any partial indexes. SERVER-17854.
+ IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator(txn, false);
+ while (ii.more()) {
+ const IndexDescriptor* desc = ii.next();
+ // The distinct hack can work if any field is in the index but it's not always clear
+ // if it's a win unless it's the first field.
+ if (desc->keyPattern().firstElement().fieldName() == field) {
+ plannerParams.indices.push_back(IndexEntry(desc->keyPattern(),
+ desc->getAccessMethodName(),
+ desc->isMultikey(txn),
+ desc->isSparse(),
+ desc->unique(),
+ desc->indexName(),
+ NULL,
+ desc->infoObj()));
}
+ }
- //
- // If we're here, we have an index prefixed by the field we're distinct-ing over.
- //
-
- // Applying a projection allows the planner to try to give us covered plans that we can turn
- // into the projection hack. getDistinctProjection deals with .find() projection semantics
- // (ie _id:1 being implied by default).
- BSONObj projection = getDistinctProjection(field);
+ const WhereCallbackReal whereCallback(txn, collection->ns().db());
- // Apply a projection of the key. Empty BSONObj() is for the sort.
+ // If there are no suitable indices for the distinct hack bail out now into regular planning
+ // with no projection.
+ if (plannerParams.indices.empty()) {
CanonicalQuery* cq;
- Status status = CanonicalQuery::canonicalize(collection->ns().ns(),
- query,
- BSONObj(),
- projection,
- &cq,
- whereCallback);
+ Status status =
+ CanonicalQuery::canonicalize(collection->ns().ns(), query, &cq, whereCallback);
if (!status.isOK()) {
return status;
}
- unique_ptr<CanonicalQuery> autoCq(cq);
+ // Takes ownership of 'cq'.
+ return getExecutor(txn, collection, cq, yieldPolicy, out);
+ }
- // If there's no query, we can just distinct-scan one of the indices.
- // Not every index in plannerParams.indices may be suitable. Refer to
- // getDistinctNodeIndex().
- size_t distinctNodeIndex = 0;
- if (query.isEmpty() &&
- getDistinctNodeIndex(plannerParams.indices, field, &distinctNodeIndex)) {
- DistinctNode* dn = new DistinctNode();
- dn->indexKeyPattern = plannerParams.indices[distinctNodeIndex].keyPattern;
- dn->direction = 1;
- IndexBoundsBuilder::allValuesBounds(dn->indexKeyPattern, &dn->bounds);
- dn->fieldNo = 0;
+ //
+ // If we're here, we have an index prefixed by the field we're distinct-ing over.
+ //
- QueryPlannerParams params;
+ // Applying a projection allows the planner to try to give us covered plans that we can turn
+ // into the projection hack. getDistinctProjection deals with .find() projection semantics
+ // (ie _id:1 being implied by default).
+ BSONObj projection = getDistinctProjection(field);
+
+ // Apply a projection of the key. Empty BSONObj() is for the sort.
+ CanonicalQuery* cq;
+ Status status = CanonicalQuery::canonicalize(
+ collection->ns().ns(), query, BSONObj(), projection, &cq, whereCallback);
+ if (!status.isOK()) {
+ return status;
+ }
- // Takes ownership of 'dn'.
- QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(*cq, params, dn);
- invariant(soln);
+ unique_ptr<CanonicalQuery> autoCq(cq);
- WorkingSet* ws = new WorkingSet();
- PlanStage* root;
- verify(StageBuilder::build(txn, collection, *soln, ws, &root));
+ // If there's no query, we can just distinct-scan one of the indices.
+ // Not every index in plannerParams.indices may be suitable. Refer to
+ // getDistinctNodeIndex().
+ size_t distinctNodeIndex = 0;
+ if (query.isEmpty() && getDistinctNodeIndex(plannerParams.indices, field, &distinctNodeIndex)) {
+ DistinctNode* dn = new DistinctNode();
+ dn->indexKeyPattern = plannerParams.indices[distinctNodeIndex].keyPattern;
+ dn->direction = 1;
+ IndexBoundsBuilder::allValuesBounds(dn->indexKeyPattern, &dn->bounds);
+ dn->fieldNo = 0;
- LOG(2) << "Using fast distinct: " << cq->toStringShort()
- << ", planSummary: " << Explain::getPlanSummary(root);
+ QueryPlannerParams params;
- // Takes ownership of its arguments (except for 'collection').
- return PlanExecutor::make(txn, ws, root, soln, autoCq.release(), collection,
- yieldPolicy, out);
- }
+ // Takes ownership of 'dn'.
+ QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(*cq, params, dn);
+ invariant(soln);
- // See if we can answer the query in a fast-distinct compatible fashion.
- vector<QuerySolution*> solutions;
- status = QueryPlanner::plan(*cq, plannerParams, &solutions);
- if (!status.isOK()) {
- return getExecutor(txn, collection, autoCq.release(), yieldPolicy, out);
- }
+ WorkingSet* ws = new WorkingSet();
+ PlanStage* root;
+ verify(StageBuilder::build(txn, collection, *soln, ws, &root));
- // We look for a solution that has an ixscan we can turn into a distinctixscan
- for (size_t i = 0; i < solutions.size(); ++i) {
- if (turnIxscanIntoDistinctIxscan(solutions[i], field)) {
- // Great, we can use solutions[i]. Clean up the other QuerySolution(s).
- for (size_t j = 0; j < solutions.size(); ++j) {
- if (j != i) {
- delete solutions[j];
- }
- }
+ LOG(2) << "Using fast distinct: " << cq->toStringShort()
+ << ", planSummary: " << Explain::getPlanSummary(root);
- // Build and return the SSR over solutions[i].
- WorkingSet* ws = new WorkingSet();
- PlanStage* root;
- verify(StageBuilder::build(txn, collection, *solutions[i], ws, &root));
+ // Takes ownership of its arguments (except for 'collection').
+ return PlanExecutor::make(
+ txn, ws, root, soln, autoCq.release(), collection, yieldPolicy, out);
+ }
- LOG(2) << "Using fast distinct: " << cq->toStringShort()
- << ", planSummary: " << Explain::getPlanSummary(root);
+ // See if we can answer the query in a fast-distinct compatible fashion.
+ vector<QuerySolution*> solutions;
+ status = QueryPlanner::plan(*cq, plannerParams, &solutions);
+ if (!status.isOK()) {
+ return getExecutor(txn, collection, autoCq.release(), yieldPolicy, out);
+ }
- // Takes ownership of 'ws', 'root', 'solutions[i]', and 'autoCq'.
- return PlanExecutor::make(txn, ws, root, solutions[i], autoCq.release(),
- collection, yieldPolicy, out);
+ // We look for a solution that has an ixscan we can turn into a distinctixscan
+ for (size_t i = 0; i < solutions.size(); ++i) {
+ if (turnIxscanIntoDistinctIxscan(solutions[i], field)) {
+ // Great, we can use solutions[i]. Clean up the other QuerySolution(s).
+ for (size_t j = 0; j < solutions.size(); ++j) {
+ if (j != i) {
+ delete solutions[j];
+ }
}
- }
- // If we're here, the planner made a soln with the restricted index set but we couldn't
- // translate any of them into a distinct-compatible soln. So, delete the solutions and just
- // go through normal planning.
- for (size_t i = 0; i < solutions.size(); ++i) {
- delete solutions[i];
- }
+ // Build and return the SSR over solutions[i].
+ WorkingSet* ws = new WorkingSet();
+ PlanStage* root;
+ verify(StageBuilder::build(txn, collection, *solutions[i], ws, &root));
- // We drop the projection from the 'cq'. Unfortunately this is not trivial.
- status = CanonicalQuery::canonicalize(collection->ns().ns(), query, &cq, whereCallback);
- if (!status.isOK()) {
- return status;
+ LOG(2) << "Using fast distinct: " << cq->toStringShort()
+ << ", planSummary: " << Explain::getPlanSummary(root);
+
+ // Takes ownership of 'ws', 'root', 'solutions[i]', and 'autoCq'.
+ return PlanExecutor::make(
+ txn, ws, root, solutions[i], autoCq.release(), collection, yieldPolicy, out);
}
+ }
- autoCq.reset(cq);
+ // If we're here, the planner made a soln with the restricted index set but we couldn't
+ // translate any of them into a distinct-compatible soln. So, delete the solutions and just
+ // go through normal planning.
+ for (size_t i = 0; i < solutions.size(); ++i) {
+ delete solutions[i];
+ }
- // Takes ownership of 'autoCq'.
- return getExecutor(txn, collection, autoCq.release(), yieldPolicy, out);
+ // We drop the projection from the 'cq'. Unfortunately this is not trivial.
+ status = CanonicalQuery::canonicalize(collection->ns().ns(), query, &cq, whereCallback);
+ if (!status.isOK()) {
+ return status;
}
+ autoCq.reset(cq);
+
+ // Takes ownership of 'autoCq'.
+ return getExecutor(txn, collection, autoCq.release(), yieldPolicy, out);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index c92f67a00ca..24d99ecc791 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -39,171 +39,171 @@
namespace mongo {
- class Collection;
- class CountRequest;
-
- struct GroupRequest;
-
- /**
- * Filter indexes retrieved from index catalog by
- * allowed indices in query settings.
- * Used by getExecutor().
- * This function is public to facilitate testing.
- */
- void filterAllowedIndexEntries(const AllowedIndices& allowedIndices,
- std::vector<IndexEntry>* indexEntries);
-
- /**
- * Fill out the provided 'plannerParams' for the 'canonicalQuery' operating on the collection
- * 'collection'. Exposed for testing.
- */
- void fillOutPlannerParams(OperationContext* txn,
- Collection* collection,
- CanonicalQuery* canonicalQuery,
- QueryPlannerParams* plannerParams);
-
- /**
- * Get a plan executor for a query. Takes ownership of 'rawCanonicalQuery'.
- *
- * If the query is valid and an executor could be created, returns Status::OK()
- * and populates *out with the PlanExecutor.
- *
- * If the query cannot be executed, returns a Status indicating why.
- */
- Status getExecutor(OperationContext* txn,
+class Collection;
+class CountRequest;
+
+struct GroupRequest;
+
+/**
+ * Filter indexes retrieved from index catalog by
+ * allowed indices in query settings.
+ * Used by getExecutor().
+ * This function is public to facilitate testing.
+ */
+void filterAllowedIndexEntries(const AllowedIndices& allowedIndices,
+ std::vector<IndexEntry>* indexEntries);
+
+/**
+ * Fill out the provided 'plannerParams' for the 'canonicalQuery' operating on the collection
+ * 'collection'. Exposed for testing.
+ */
+void fillOutPlannerParams(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* canonicalQuery,
+ QueryPlannerParams* plannerParams);
+
+/**
+ * Get a plan executor for a query. Takes ownership of 'rawCanonicalQuery'.
+ *
+ * If the query is valid and an executor could be created, returns Status::OK()
+ * and populates *out with the PlanExecutor.
+ *
+ * If the query cannot be executed, returns a Status indicating why.
+ */
+Status getExecutor(OperationContext* txn,
+ Collection* collection,
+ CanonicalQuery* rawCanonicalQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** out,
+ size_t plannerOptions = 0);
+
+/**
+ * Get a plan executor for query. This differs from the getExecutor(...) function
+ * above in that the above requires a non-NULL canonical query, whereas this
+ * function can retrieve a plan executor from the raw query object.
+ *
+ * Used to support idhack updates that do not create a canonical query.
+ *
+ * If the query is valid and an executor could be created, returns Status::OK()
+ * and populates *out with the PlanExecutor.
+ *
+ * If the query cannot be executed, returns a Status indicating why.
+ */
+Status getExecutor(OperationContext* txn,
+ Collection* collection,
+ const std::string& ns,
+ const BSONObj& unparsedQuery,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** out,
+ size_t plannerOptions = 0);
+
+/**
+ * Get a plan executor for a .find() operation. Takes ownership of 'rawCanonicalQuery'.
+ *
+ * If the query is valid and an executor could be created, returns Status::OK()
+ * and populates *out with the PlanExecutor.
+ *
+ * If the query cannot be executed, returns a Status indicating why.
+ */
+Status getExecutorFind(OperationContext* txn,
Collection* collection,
+ const NamespaceString& nss,
CanonicalQuery* rawCanonicalQuery,
PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out,
- size_t plannerOptions = 0);
-
- /**
- * Get a plan executor for query. This differs from the getExecutor(...) function
- * above in that the above requires a non-NULL canonical query, whereas this
- * function can retrieve a plan executor from the raw query object.
- *
- * Used to support idhack updates that do not create a canonical query.
- *
- * If the query is valid and an executor could be created, returns Status::OK()
- * and populates *out with the PlanExecutor.
- *
- * If the query cannot be executed, returns a Status indicating why.
- */
- Status getExecutor(OperationContext* txn,
- Collection* collection,
- const std::string& ns,
- const BSONObj& unparsedQuery,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out,
- size_t plannerOptions = 0);
-
- /**
- * Get a plan executor for a .find() operation. Takes ownership of 'rawCanonicalQuery'.
- *
- * If the query is valid and an executor could be created, returns Status::OK()
- * and populates *out with the PlanExecutor.
- *
- * If the query cannot be executed, returns a Status indicating why.
- */
- Status getExecutorFind(OperationContext* txn,
+ PlanExecutor** out);
+
+/**
+ * If possible, turn the provided QuerySolution into a QuerySolution that uses a DistinctNode
+ * to provide results for the distinct command.
+ *
+ * If the provided solution could be mutated successfully, returns true, otherwise returns
+ * false.
+ */
+bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const std::string& field);
+
+/*
+ * Get an executor for a query executing as part of a distinct command.
+ *
+ * Distinct is unique in that it doesn't care about getting all the results; it just wants all
+ * possible values of a certain field. As such, we can skip lots of data in certain cases (see
+ * body of method for detail).
+ */
+Status getExecutorDistinct(OperationContext* txn,
Collection* collection,
- const NamespaceString& nss,
- CanonicalQuery* rawCanonicalQuery,
+ const BSONObj& query,
+ const std::string& field,
PlanExecutor::YieldPolicy yieldPolicy,
PlanExecutor** out);
- /**
- * If possible, turn the provided QuerySolution into a QuerySolution that uses a DistinctNode
- * to provide results for the distinct command.
- *
- * If the provided solution could be mutated successfully, returns true, otherwise returns
- * false.
- */
- bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, const std::string& field);
-
- /*
- * Get an executor for a query executing as part of a distinct command.
- *
- * Distinct is unique in that it doesn't care about getting all the results; it just wants all
- * possible values of a certain field. As such, we can skip lots of data in certain cases (see
- * body of method for detail).
- */
- Status getExecutorDistinct(OperationContext* txn,
- Collection* collection,
- const BSONObj& query,
- const std::string& field,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** out);
-
- /*
- * Get a PlanExecutor for a query executing as part of a count command.
- *
- * Count doesn't care about actually examining its results; it just wants to walk through them.
- * As such, with certain covered queries, we can skip the overhead of fetching etc. when
- * executing a count.
- */
- Status getExecutorCount(OperationContext* txn,
- Collection* collection,
- const CountRequest& request,
- bool explain,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** execOut);
-
- /**
- * Get a PlanExecutor for a delete operation. 'parsedDelete' describes the query predicate
- * and delete flags like 'isMulti'. The caller must hold the appropriate MODE_X or MODE_IX
- * locks, and must not release these locks until after the returned PlanExecutor is deleted.
- *
- * The returned PlanExecutor will yield if and only if parsedDelete->canYield().
- *
- * Does not take ownership of its arguments.
- *
- * If the query is valid and an executor could be created, returns Status::OK() and populates
- * *execOut with the PlanExecutor. The caller takes ownership of *execOut.
- *
- * If the query cannot be executed, returns a Status indicating why.
- */
- Status getExecutorDelete(OperationContext* txn,
- Collection* collection,
- ParsedDelete* parsedDelete,
- PlanExecutor** execOut);
-
- /**
- * Get a PlanExecutor for an update operation. 'parsedUpdate' describes the query predicate
- * and update modifiers. The caller must hold the appropriate MODE_X or MODE_IX locks prior
- * to calling this function, and must not release these locks until after the returned
- * PlanExecutor is deleted.
- *
- * The returned PlanExecutor will yield if and only if parsedUpdate->canYield().
- *
- * Does not take ownership of its arguments.
- *
- * If the query is valid and an executor could be created, returns Status::OK() and populates
- * *out with the PlanExecutor. The caller takes ownership of *execOut.
- *
- * If the query cannot be executed, returns a Status indicating why.
- */
- Status getExecutorUpdate(OperationContext* txn,
- Collection* collection,
- ParsedUpdate* parsedUpdate,
- OpDebug* opDebug,
- PlanExecutor** execOut);
-
- /**
- * Get a PlanExecutor for a group operation. 'rawCanonicalQuery' describes the predicate for
- * the documents to be grouped.
- *
- * Takes ownership of 'rawCanonicalQuery'. Does not take ownership of other args.
- *
- * If the query is valid and an executor could be created, returns Status::OK() and populates
- * *out with the PlanExecutor.
- *
- * If an executor could not be created, returns a Status indicating why.
- */
- Status getExecutorGroup(OperationContext* txn,
- Collection* collection,
- const GroupRequest& request,
- PlanExecutor::YieldPolicy yieldPolicy,
- PlanExecutor** execOut);
+/*
+ * Get a PlanExecutor for a query executing as part of a count command.
+ *
+ * Count doesn't care about actually examining its results; it just wants to walk through them.
+ * As such, with certain covered queries, we can skip the overhead of fetching etc. when
+ * executing a count.
+ */
+Status getExecutorCount(OperationContext* txn,
+ Collection* collection,
+ const CountRequest& request,
+ bool explain,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** execOut);
+
+/**
+ * Get a PlanExecutor for a delete operation. 'parsedDelete' describes the query predicate
+ * and delete flags like 'isMulti'. The caller must hold the appropriate MODE_X or MODE_IX
+ * locks, and must not release these locks until after the returned PlanExecutor is deleted.
+ *
+ * The returned PlanExecutor will yield if and only if parsedDelete->canYield().
+ *
+ * Does not take ownership of its arguments.
+ *
+ * If the query is valid and an executor could be created, returns Status::OK() and populates
+ * *execOut with the PlanExecutor. The caller takes ownership of *execOut.
+ *
+ * If the query cannot be executed, returns a Status indicating why.
+ */
+Status getExecutorDelete(OperationContext* txn,
+ Collection* collection,
+ ParsedDelete* parsedDelete,
+ PlanExecutor** execOut);
+
+/**
+ * Get a PlanExecutor for an update operation. 'parsedUpdate' describes the query predicate
+ * and update modifiers. The caller must hold the appropriate MODE_X or MODE_IX locks prior
+ * to calling this function, and must not release these locks until after the returned
+ * PlanExecutor is deleted.
+ *
+ * The returned PlanExecutor will yield if and only if parsedUpdate->canYield().
+ *
+ * Does not take ownership of its arguments.
+ *
+ * If the query is valid and an executor could be created, returns Status::OK() and populates
+ * *out with the PlanExecutor. The caller takes ownership of *execOut.
+ *
+ * If the query cannot be executed, returns a Status indicating why.
+ */
+Status getExecutorUpdate(OperationContext* txn,
+ Collection* collection,
+ ParsedUpdate* parsedUpdate,
+ OpDebug* opDebug,
+ PlanExecutor** execOut);
+
+/**
+ * Get a PlanExecutor for a group operation. 'rawCanonicalQuery' describes the predicate for
+ * the documents to be grouped.
+ *
+ * Takes ownership of 'rawCanonicalQuery'. Does not take ownership of other args.
+ *
+ * If the query is valid and an executor could be created, returns Status::OK() and populates
+ * *out with the PlanExecutor.
+ *
+ * If an executor could not be created, returns a Status indicating why.
+ */
+Status getExecutorGroup(OperationContext* txn,
+ Collection* collection,
+ const GroupRequest& request,
+ PlanExecutor::YieldPolicy yieldPolicy,
+ PlanExecutor** execOut);
} // namespace mongo
diff --git a/src/mongo/db/query/get_executor_test.cpp b/src/mongo/db/query/get_executor_test.cpp
index 7ec251bb6ee..fdc04609df8 100644
--- a/src/mongo/db/query/get_executor_test.cpp
+++ b/src/mongo/db/query/get_executor_test.cpp
@@ -41,105 +41,102 @@ using namespace mongo;
namespace {
- using std::unique_ptr;
-
- static const char* ns = "somebogusns";
-
- /**
- * Utility functions to create a CanonicalQuery
- */
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- &cq);
- ASSERT_OK(result);
- return cq;
- }
+using std::unique_ptr;
- //
- // get_executor tests
- //
-
- //
- // filterAllowedIndexEntries
- //
-
- /**
- * Test function to check filterAllowedIndexEntries
- */
- void testAllowedIndices(const char* hintKeyPatterns[],
- const char* indexCatalogKeyPatterns[],
- const char* expectedFilteredKeyPatterns[]) {
- PlanCache planCache;
- QuerySettings querySettings;
- AllowedIndices *allowedIndicesRaw;
-
- // getAllowedIndices should return false when query shape is not yet in query settings.
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}"));
- PlanCacheKey key = planCache.computeKey(*cq);
- ASSERT_FALSE(querySettings.getAllowedIndices(key, &allowedIndicesRaw));
-
- // Add entry to query settings.
- std::vector<BSONObj> indexKeyPatterns;
- for (int i=0; hintKeyPatterns[i] != NULL; ++i) {
- indexKeyPatterns.push_back(fromjson(hintKeyPatterns[i]));
- }
- querySettings.setAllowedIndices(*cq, key, indexKeyPatterns);
-
- // Index entry vector should contain 1 entry after filtering.
- ASSERT_TRUE(querySettings.getAllowedIndices(key, &allowedIndicesRaw));
- ASSERT_FALSE(key.empty());
- ASSERT(NULL != allowedIndicesRaw);
- unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
-
- // Indexes from index catalog.
- std::vector<IndexEntry> indexEntries;
- for (int i=0; indexCatalogKeyPatterns[i] != NULL; ++i) {
- indexEntries.push_back(IndexEntry(fromjson(indexCatalogKeyPatterns[i])));
- }
-
- // Apply filter in allowed indices.
- filterAllowedIndexEntries(*allowedIndices, &indexEntries);
- size_t numExpected = 0;
- while (expectedFilteredKeyPatterns[numExpected] != NULL) {
- ASSERT_LESS_THAN(numExpected, indexEntries.size());
- ASSERT_EQUALS(indexEntries[numExpected].keyPattern,
- fromjson(expectedFilteredKeyPatterns[numExpected]));
- numExpected++;
- }
- ASSERT_EQUALS(indexEntries.size(), numExpected);
- }
-
- // Use of index filters to select compound index over single key index.
- TEST(GetExecutorTest, GetAllowedIndices) {
- const char* hintKeyPatterns[] = {"{a: 1, b: 1}", NULL};
- const char* indexCatalogKeyPatterns[] = {"{a: 1}", "{a: 1, b: 1}", "{a: 1, c: 1}", NULL};
- const char* expectedFilteredKeyPatterns[] = {"{a: 1, b: 1}", NULL};
- testAllowedIndices(hintKeyPatterns, indexCatalogKeyPatterns, expectedFilteredKeyPatterns);
- }
+static const char* ns = "somebogusns";
+
+/**
+ * Utility functions to create a CanonicalQuery
+ */
+CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr, const char* projStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj, projObj, &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+//
+// get_executor tests
+//
+
+//
+// filterAllowedIndexEntries
+//
- // Setting index filter referring to non-existent indexes
- // will effectively disregard the index catalog and
- // result in the planner generating a collection scan.
- TEST(GetExecutorTest, GetAllowedIndicesNonExistentIndexKeyPatterns) {
- const char* hintKeyPatterns[] = {"{nosuchfield: 1}", NULL};
- const char* indexCatalogKeyPatterns[] = {"{a: 1}", "{a: 1, b: 1}", "{a: 1, c: 1}", NULL};
- const char* expectedFilteredKeyPatterns[] = {NULL};
- testAllowedIndices(hintKeyPatterns, indexCatalogKeyPatterns, expectedFilteredKeyPatterns);
+/**
+ * Test function to check filterAllowedIndexEntries
+ */
+void testAllowedIndices(const char* hintKeyPatterns[],
+ const char* indexCatalogKeyPatterns[],
+ const char* expectedFilteredKeyPatterns[]) {
+ PlanCache planCache;
+ QuerySettings querySettings;
+ AllowedIndices* allowedIndicesRaw;
+
+ // getAllowedIndices should return false when query shape is not yet in query settings.
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}"));
+ PlanCacheKey key = planCache.computeKey(*cq);
+ ASSERT_FALSE(querySettings.getAllowedIndices(key, &allowedIndicesRaw));
+
+ // Add entry to query settings.
+ std::vector<BSONObj> indexKeyPatterns;
+ for (int i = 0; hintKeyPatterns[i] != NULL; ++i) {
+ indexKeyPatterns.push_back(fromjson(hintKeyPatterns[i]));
+ }
+ querySettings.setAllowedIndices(*cq, key, indexKeyPatterns);
+
+ // Index entry vector should contain 1 entry after filtering.
+ ASSERT_TRUE(querySettings.getAllowedIndices(key, &allowedIndicesRaw));
+ ASSERT_FALSE(key.empty());
+ ASSERT(NULL != allowedIndicesRaw);
+ unique_ptr<AllowedIndices> allowedIndices(allowedIndicesRaw);
+
+ // Indexes from index catalog.
+ std::vector<IndexEntry> indexEntries;
+ for (int i = 0; indexCatalogKeyPatterns[i] != NULL; ++i) {
+ indexEntries.push_back(IndexEntry(fromjson(indexCatalogKeyPatterns[i])));
}
- // This test case shows how to force query execution to use
- // an index that orders items in descending order.
- TEST(GetExecutorTest, GetAllowedIndicesDescendingOrder) {
- const char* hintKeyPatterns[] = {"{a: -1}", NULL};
- const char* indexCatalogKeyPatterns[] = {"{a: 1}", "{a: -1}", NULL};
- const char* expectedFilteredKeyPatterns[] = {"{a: -1}", NULL};
- testAllowedIndices(hintKeyPatterns, indexCatalogKeyPatterns, expectedFilteredKeyPatterns);
+ // Apply filter in allowed indices.
+ filterAllowedIndexEntries(*allowedIndices, &indexEntries);
+ size_t numExpected = 0;
+ while (expectedFilteredKeyPatterns[numExpected] != NULL) {
+ ASSERT_LESS_THAN(numExpected, indexEntries.size());
+ ASSERT_EQUALS(indexEntries[numExpected].keyPattern,
+ fromjson(expectedFilteredKeyPatterns[numExpected]));
+ numExpected++;
}
+ ASSERT_EQUALS(indexEntries.size(), numExpected);
+}
+
+// Use of index filters to select compound index over single key index.
+TEST(GetExecutorTest, GetAllowedIndices) {
+ const char* hintKeyPatterns[] = {"{a: 1, b: 1}", NULL};
+ const char* indexCatalogKeyPatterns[] = {"{a: 1}", "{a: 1, b: 1}", "{a: 1, c: 1}", NULL};
+ const char* expectedFilteredKeyPatterns[] = {"{a: 1, b: 1}", NULL};
+ testAllowedIndices(hintKeyPatterns, indexCatalogKeyPatterns, expectedFilteredKeyPatterns);
+}
+
+// Setting index filter referring to non-existent indexes
+// will effectively disregard the index catalog and
+// result in the planner generating a collection scan.
+TEST(GetExecutorTest, GetAllowedIndicesNonExistentIndexKeyPatterns) {
+ const char* hintKeyPatterns[] = {"{nosuchfield: 1}", NULL};
+ const char* indexCatalogKeyPatterns[] = {"{a: 1}", "{a: 1, b: 1}", "{a: 1, c: 1}", NULL};
+ const char* expectedFilteredKeyPatterns[] = {NULL};
+ testAllowedIndices(hintKeyPatterns, indexCatalogKeyPatterns, expectedFilteredKeyPatterns);
+}
+
+// This test case shows how to force query execution to use
+// an index that orders items in descending order.
+TEST(GetExecutorTest, GetAllowedIndicesDescendingOrder) {
+ const char* hintKeyPatterns[] = {"{a: -1}", NULL};
+ const char* indexCatalogKeyPatterns[] = {"{a: 1}", "{a: -1}", NULL};
+ const char* expectedFilteredKeyPatterns[] = {"{a: -1}", NULL};
+ testAllowedIndices(hintKeyPatterns, indexCatalogKeyPatterns, expectedFilteredKeyPatterns);
+}
} // namespace
diff --git a/src/mongo/db/query/getmore_request.cpp b/src/mongo/db/query/getmore_request.cpp
index 678276f8467..bb3054af79e 100644
--- a/src/mongo/db/query/getmore_request.cpp
+++ b/src/mongo/db/query/getmore_request.cpp
@@ -40,113 +40,104 @@
namespace mongo {
- GetMoreRequest::GetMoreRequest()
- : cursorid(0),
- batchSize(0) { }
-
- GetMoreRequest::GetMoreRequest(const std::string& fullns,
- CursorId id,
- boost::optional<int> sizeOfBatch)
- : nss(fullns),
- cursorid(id),
- batchSize(sizeOfBatch) { }
-
- Status GetMoreRequest::isValid() const {
- if (!nss.isValid()) {
- return Status(ErrorCodes::BadValue, str::stream()
- << "Invalid namespace for getMore: " << nss.ns());
- }
+GetMoreRequest::GetMoreRequest() : cursorid(0), batchSize(0) {}
- if (cursorid == 0) {
- return Status(ErrorCodes::BadValue, "Cursor id for getMore must be non-zero");
- }
+GetMoreRequest::GetMoreRequest(const std::string& fullns,
+ CursorId id,
+ boost::optional<int> sizeOfBatch)
+ : nss(fullns), cursorid(id), batchSize(sizeOfBatch) {}
- if (batchSize && *batchSize <= 0) {
- return Status(ErrorCodes::BadValue, str::stream()
- << "Batch size for getMore must be positive, "
- << "but received: " << *batchSize);
- }
-
- return Status::OK();
+Status GetMoreRequest::isValid() const {
+ if (!nss.isValid()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Invalid namespace for getMore: " << nss.ns());
}
- // static
- std::string GetMoreRequest::parseNs(const std::string& dbname, const BSONObj& cmdObj) {
- BSONElement collElt = cmdObj["collection"];
- const std::string coll = (collElt.type() == BSONType::String) ? collElt.String()
- : "";
+ if (cursorid == 0) {
+ return Status(ErrorCodes::BadValue, "Cursor id for getMore must be non-zero");
+ }
- return str::stream() << dbname << "." << coll;
+ if (batchSize && *batchSize <= 0) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Batch size for getMore must be positive, "
+ << "but received: " << *batchSize);
}
- // static
- StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbname,
- const BSONObj& cmdObj) {
- invariant(!dbname.empty());
+ return Status::OK();
+}
- // Required fields.
- boost::optional<CursorId> cursorid;
- boost::optional<std::string> fullns;
+// static
+std::string GetMoreRequest::parseNs(const std::string& dbname, const BSONObj& cmdObj) {
+ BSONElement collElt = cmdObj["collection"];
+ const std::string coll = (collElt.type() == BSONType::String) ? collElt.String() : "";
- // Optional field.
- boost::optional<int> batchSize;
+ return str::stream() << dbname << "." << coll;
+}
- for (BSONElement el : cmdObj) {
- const char* fieldName = el.fieldName();
- if (str::equals(fieldName, "getMore")) {
- if (el.type() != BSONType::NumberLong) {
- return {ErrorCodes::TypeMismatch,
- str::stream() << "Field 'getMore' must be of type long in: " << cmdObj};
- }
+// static
+StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbname,
+ const BSONObj& cmdObj) {
+ invariant(!dbname.empty());
- cursorid = el.Long();
- }
- else if (str::equals(fieldName, "collection")) {
- if (el.type() != BSONType::String) {
- return {ErrorCodes::TypeMismatch,
- str::stream() << "Field 'collection' must be of type string in: "
- << cmdObj};
- }
-
- fullns = parseNs(dbname, cmdObj);
- }
- else if (str::equals(fieldName, "batchSize")) {
- if (!el.isNumber()) {
- return {ErrorCodes::TypeMismatch,
- str::stream() << "Field 'batchSize' must be a number in: " << cmdObj};
- }
+ // Required fields.
+ boost::optional<CursorId> cursorid;
+ boost::optional<std::string> fullns;
- batchSize = el.numberInt();
+ // Optional field.
+ boost::optional<int> batchSize;
+
+ for (BSONElement el : cmdObj) {
+ const char* fieldName = el.fieldName();
+ if (str::equals(fieldName, "getMore")) {
+ if (el.type() != BSONType::NumberLong) {
+ return {ErrorCodes::TypeMismatch,
+ str::stream() << "Field 'getMore' must be of type long in: " << cmdObj};
}
- else if (str::equals(fieldName, "maxTimeMS")) {
- // maxTimeMS is parsed by the command handling code, so we don't repeat the parsing
- // here.
- continue;
+
+ cursorid = el.Long();
+ } else if (str::equals(fieldName, "collection")) {
+ if (el.type() != BSONType::String) {
+ return {ErrorCodes::TypeMismatch,
+ str::stream()
+ << "Field 'collection' must be of type string in: " << cmdObj};
}
- else if (!str::startsWith(fieldName, "$")) {
- return {ErrorCodes::FailedToParse,
- str::stream() << "Failed to parse: " << cmdObj << ". "
- << "Unrecognized field '" << fieldName << "'."};
+
+ fullns = parseNs(dbname, cmdObj);
+ } else if (str::equals(fieldName, "batchSize")) {
+ if (!el.isNumber()) {
+ return {ErrorCodes::TypeMismatch,
+ str::stream() << "Field 'batchSize' must be a number in: " << cmdObj};
}
- }
- if (!cursorid) {
+ batchSize = el.numberInt();
+ } else if (str::equals(fieldName, "maxTimeMS")) {
+ // maxTimeMS is parsed by the command handling code, so we don't repeat the parsing
+ // here.
+ continue;
+ } else if (!str::startsWith(fieldName, "$")) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Field 'getMore' missing in: " << cmdObj};
+ str::stream() << "Failed to parse: " << cmdObj << ". "
+ << "Unrecognized field '" << fieldName << "'."};
}
+ }
- if (!fullns) {
- return {ErrorCodes::FailedToParse,
- str::stream() << "Field 'collection' missing in: " << cmdObj};
- }
+ if (!cursorid) {
+ return {ErrorCodes::FailedToParse,
+ str::stream() << "Field 'getMore' missing in: " << cmdObj};
+ }
- GetMoreRequest request(*fullns, *cursorid, batchSize);
- Status validStatus = request.isValid();
- if (!validStatus.isOK()) {
- return validStatus;
- }
+ if (!fullns) {
+ return {ErrorCodes::FailedToParse,
+ str::stream() << "Field 'collection' missing in: " << cmdObj};
+ }
- return request;
+ GetMoreRequest request(*fullns, *cursorid, batchSize);
+ Status validStatus = request.isValid();
+ if (!validStatus.isOK()) {
+ return validStatus;
}
-} // namespace mongo
+ return request;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/getmore_request.h b/src/mongo/db/query/getmore_request.h
index 2d30f96ed75..c967408d008 100644
--- a/src/mongo/db/query/getmore_request.h
+++ b/src/mongo/db/query/getmore_request.h
@@ -38,38 +38,38 @@
namespace mongo {
- struct GetMoreRequest {
- /**
- * Construct an empty request.
- */
- GetMoreRequest();
+struct GetMoreRequest {
+ /**
+ * Construct an empty request.
+ */
+ GetMoreRequest();
- /**
- * Construct a GetMoreRequest from the command specification and db name.
- */
- static StatusWith<GetMoreRequest> parseFromBSON(const std::string& dbname,
- const BSONObj& cmdObj);
+ /**
+ * Construct a GetMoreRequest from the command specification and db name.
+ */
+ static StatusWith<GetMoreRequest> parseFromBSON(const std::string& dbname,
+ const BSONObj& cmdObj);
- static std::string parseNs(const std::string& dbname, const BSONObj& cmdObj);
+ static std::string parseNs(const std::string& dbname, const BSONObj& cmdObj);
- const NamespaceString nss;
- const CursorId cursorid;
+ const NamespaceString nss;
+ const CursorId cursorid;
- // The batch size is optional. If not provided, we will put as many documents into the batch
- // as fit within the byte limit.
- const boost::optional<int> batchSize;
+ // The batch size is optional. If not provided, we will put as many documents into the batch
+ // as fit within the byte limit.
+ const boost::optional<int> batchSize;
- private:
- /**
- * Construct from parsed BSON
- */
- GetMoreRequest(const std::string& fullns, CursorId id, boost::optional<int> batch);
+private:
+ /**
+ * Construct from parsed BSON
+ */
+ GetMoreRequest(const std::string& fullns, CursorId id, boost::optional<int> batch);
- /**
- * Returns a non-OK status if there are semantic errors in the parsed request
- * (e.g. a negative batchSize).
- */
- Status isValid() const;
- };
+ /**
+ * Returns a non-OK status if there are semantic errors in the parsed request
+ * (e.g. a negative batchSize).
+ */
+ Status isValid() const;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/getmore_request_test.cpp b/src/mongo/db/query/getmore_request_test.cpp
index bd2f8c8b242..abb50b693f6 100644
--- a/src/mongo/db/query/getmore_request_test.cpp
+++ b/src/mongo/db/query/getmore_request_test.cpp
@@ -37,138 +37,152 @@
namespace {
- using namespace mongo;
-
- TEST(GetMoreRequestTest, parseFromBSONEmptyCommandObject) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db", BSONObj());
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONCursorIdNotNumeric) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << "not a number" <<
- "collection" << "coll"));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONCursorIdNotLongLong) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << "not a number" <<
- "collection" << 123));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONMissingCollection) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123)));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONCollectionNotString) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << 456));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONBatchSizeNotInteger) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) <<
- "collection" << "coll" <<
- "batchSize" << "not a number"));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONInvalidCursorId) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(0) << "collection" << "coll"));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONNegativeCursorId) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(-123) << "collection" << "coll"));
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
- ASSERT_EQUALS(CursorId(-123), result.getValue().cursorid);
- ASSERT_FALSE(result.getValue().batchSize);
- }
-
- TEST(GetMoreRequestTest, parseFromBSONUnrecognizedFieldName) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) <<
- "collection" << "coll" <<
- "unknown_field" << 1));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSize) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << "coll" << "batchSize" << -1));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSizeOfZero) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << "coll" << "batchSize" << 0));
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
- }
-
- TEST(GetMoreRequestTest, parseFromBSONNoBatchSize) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << "coll"));
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
- ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
- ASSERT_FALSE(result.getValue().batchSize);
- }
-
- TEST(GetMoreRequestTest, parseFromBSONBatchSizeProvided) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << "coll" << "batchSize" << 200));
- ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
- ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
- ASSERT(result.getValue().batchSize);
- ASSERT_EQUALS(200, *result.getValue().batchSize);
- }
-
- TEST(GetMoreRequestTest, parseFromBSONIgnoreDollarPrefixedFields) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << "coll" << "$foo" << "bar"));
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
- ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
- }
-
- TEST(GetMoreRequestTest, parseFromBSONIgnoreMaxTimeMS) {
- StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
- "db",
- BSON("getMore" << CursorId(123) << "collection" << "coll" << "maxTimeMS" << 100));
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
- ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
- }
-
-} // namespace
+using namespace mongo;
+
+TEST(GetMoreRequestTest, parseFromBSONEmptyCommandObject) {
+ StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db", BSONObj());
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONCursorIdNotNumeric) {
+ StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db",
+ BSON("getMore"
+ << "not a number"
+ << "collection"
+ << "coll"));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONCursorIdNotLongLong) {
+ StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db",
+ BSON("getMore"
+ << "not a number"
+ << "collection" << 123));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONMissingCollection) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db", BSON("getMore" << CursorId(123)));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONCollectionNotString) {
+ StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON(
+ "db", BSON("getMore" << CursorId(123) << "collection" << 456));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONBatchSizeNotInteger) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "batchSize"
+ << "not a number"));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONInvalidCursorId) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(0) << "collection"
+ << "coll"));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONNegativeCursorId) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(-123) << "collection"
+ << "coll"));
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
+ ASSERT_EQUALS(CursorId(-123), result.getValue().cursorid);
+ ASSERT_FALSE(result.getValue().batchSize);
+}
+
+TEST(GetMoreRequestTest, parseFromBSONUnrecognizedFieldName) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "unknown_field" << 1));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSize) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "batchSize" << -1));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSizeOfZero) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "batchSize" << 0));
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+}
+
+TEST(GetMoreRequestTest, parseFromBSONNoBatchSize) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"));
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
+ ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
+ ASSERT_FALSE(result.getValue().batchSize);
+}
+
+TEST(GetMoreRequestTest, parseFromBSONBatchSizeProvided) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "batchSize" << 200));
+ ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
+ ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
+ ASSERT(result.getValue().batchSize);
+ ASSERT_EQUALS(200, *result.getValue().batchSize);
+}
+
+TEST(GetMoreRequestTest, parseFromBSONIgnoreDollarPrefixedFields) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "$foo"
+ << "bar"));
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
+ ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
+}
+
+TEST(GetMoreRequestTest, parseFromBSONIgnoreMaxTimeMS) {
+ StatusWith<GetMoreRequest> result =
+ GetMoreRequest::parseFromBSON("db",
+ BSON("getMore" << CursorId(123) << "collection"
+ << "coll"
+ << "maxTimeMS" << 100));
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
+ ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
+}
+
+} // namespace
diff --git a/src/mongo/db/query/index_bounds.cpp b/src/mongo/db/query/index_bounds.cpp
index f7592994e29..dafd6e7fce8 100644
--- a/src/mongo/db/query/index_bounds.cpp
+++ b/src/mongo/db/query/index_bounds.cpp
@@ -33,520 +33,529 @@
namespace mongo {
- using std::string;
- using std::vector;
+using std::string;
+using std::vector;
- namespace {
+namespace {
- // Return a value in the set {-1, 0, 1} to represent the sign of parameter i.
- int sgn(int i) {
- if (i == 0)
- return 0;
- return i > 0 ? 1 : -1;
- }
-
- /**
- * Returns BEHIND if the key is behind the interval.
- * Returns WITHIN if the key is within the interval.
- * Returns AHEAD if the key is ahead the interval.
- *
- * All directions are oriented along 'direction'.
- */
- IndexBoundsChecker::Location intervalCmp(const Interval& interval, const BSONElement& key,
- const int expectedDirection) {
- int cmp = sgn(key.woCompare(interval.start, false));
- bool startOK = (cmp == expectedDirection) || (cmp == 0 && interval.startInclusive);
- if (!startOK) { return IndexBoundsChecker::BEHIND; }
-
- cmp = sgn(key.woCompare(interval.end, false));
- bool endOK = (cmp == -expectedDirection) || (cmp == 0 && interval.endInclusive);
- if (!endOK) { return IndexBoundsChecker::AHEAD; }
-
- return IndexBoundsChecker::WITHIN;
- }
+// Return a value in the set {-1, 0, 1} to represent the sign of parameter i.
+int sgn(int i) {
+ if (i == 0)
+ return 0;
+ return i > 0 ? 1 : -1;
+}
- } // namespace
-
- // For debugging.
- size_t IndexBounds::size() const {
- return fields.size();
+/**
+ * Returns BEHIND if the key is behind the interval.
+ * Returns WITHIN if the key is within the interval.
+ * Returns AHEAD if the key is ahead the interval.
+ *
+ * All directions are oriented along 'direction'.
+ */
+IndexBoundsChecker::Location intervalCmp(const Interval& interval,
+ const BSONElement& key,
+ const int expectedDirection) {
+ int cmp = sgn(key.woCompare(interval.start, false));
+ bool startOK = (cmp == expectedDirection) || (cmp == 0 && interval.startInclusive);
+ if (!startOK) {
+ return IndexBoundsChecker::BEHIND;
}
- string IndexBounds::getFieldName(size_t i) const {
- return i < size() ? fields[i].name : "";
+ cmp = sgn(key.woCompare(interval.end, false));
+ bool endOK = (cmp == -expectedDirection) || (cmp == 0 && interval.endInclusive);
+ if (!endOK) {
+ return IndexBoundsChecker::AHEAD;
}
- size_t IndexBounds::getNumIntervals(size_t i) const {
- return i < size() ? fields[i].intervals.size() : 0;
- }
+ return IndexBoundsChecker::WITHIN;
+}
- Interval IndexBounds::getInterval(size_t i, size_t j) const {
- if (i < size() && j < fields[i].intervals.size()) {
- return fields[i].intervals[j];
- }
- else {
- return Interval();
- }
- }
+} // namespace
- string OrderedIntervalList::toString() const {
- mongoutils::str::stream ss;
- ss << "['" << name << "']: ";
- for (size_t j = 0; j < intervals.size(); ++j) {
- ss << intervals[j].toString();
- if (j < intervals.size() - 1) {
- ss << ", ";
- }
- }
- return ss;
- }
+// For debugging.
+size_t IndexBounds::size() const {
+ return fields.size();
+}
- // static
- void OrderedIntervalList::complement() {
- BSONObjBuilder minBob;
- minBob.appendMinKey("");
- BSONObj minObj = minBob.obj();
-
- // We complement by scanning the entire range of BSON values
- // from MinKey to MaxKey. The value from which we must begin
- // the next complemented interval is kept in 'curBoundary'.
- BSONElement curBoundary = minObj.firstElement();
-
- // If 'curInclusive' is true, then 'curBoundary' is
- // included in one of the original intervals, and hence
- // should not be included in the complement (and vice-versa
- // if 'curInclusive' is false).
- bool curInclusive = false;
-
- // We will build up a list of intervals that represents
- // the inversion of those in the OIL.
- vector<Interval> newIntervals;
- for (size_t j = 0; j < intervals.size(); ++j) {
- Interval curInt = intervals[j];
- if (0 != curInt.start.woCompare(curBoundary) ||
- (!curInclusive && !curInt.startInclusive)) {
- // Make a new interval from 'curBoundary' to
- // the start of 'curInterval'.
- BSONObjBuilder intBob;
- intBob.append(curBoundary);
- intBob.append(curInt.start);
- Interval newInt(intBob.obj(), !curInclusive, !curInt.startInclusive);
- newIntervals.push_back(newInt);
- }
+string IndexBounds::getFieldName(size_t i) const {
+ return i < size() ? fields[i].name : "";
+}
- // Reset the boundary for the next iteration.
- curBoundary = curInt.end;
- curInclusive = curInt.endInclusive;
- }
+size_t IndexBounds::getNumIntervals(size_t i) const {
+ return i < size() ? fields[i].intervals.size() : 0;
+}
- // We may have to add a final interval which ends in MaxKey.
- BSONObjBuilder maxBob;
- maxBob.appendMaxKey("");
- BSONObj maxObj = maxBob.obj();
- BSONElement maxKey = maxObj.firstElement();
- if (0 != maxKey.woCompare(curBoundary) || !curInclusive) {
+Interval IndexBounds::getInterval(size_t i, size_t j) const {
+ if (i < size() && j < fields[i].intervals.size()) {
+ return fields[i].intervals[j];
+ } else {
+ return Interval();
+ }
+}
+
+string OrderedIntervalList::toString() const {
+ mongoutils::str::stream ss;
+ ss << "['" << name << "']: ";
+ for (size_t j = 0; j < intervals.size(); ++j) {
+ ss << intervals[j].toString();
+ if (j < intervals.size() - 1) {
+ ss << ", ";
+ }
+ }
+ return ss;
+}
+
+// static
+void OrderedIntervalList::complement() {
+ BSONObjBuilder minBob;
+ minBob.appendMinKey("");
+ BSONObj minObj = minBob.obj();
+
+ // We complement by scanning the entire range of BSON values
+ // from MinKey to MaxKey. The value from which we must begin
+ // the next complemented interval is kept in 'curBoundary'.
+ BSONElement curBoundary = minObj.firstElement();
+
+ // If 'curInclusive' is true, then 'curBoundary' is
+ // included in one of the original intervals, and hence
+ // should not be included in the complement (and vice-versa
+ // if 'curInclusive' is false).
+ bool curInclusive = false;
+
+ // We will build up a list of intervals that represents
+ // the inversion of those in the OIL.
+ vector<Interval> newIntervals;
+ for (size_t j = 0; j < intervals.size(); ++j) {
+ Interval curInt = intervals[j];
+ if (0 != curInt.start.woCompare(curBoundary) || (!curInclusive && !curInt.startInclusive)) {
+ // Make a new interval from 'curBoundary' to
+ // the start of 'curInterval'.
BSONObjBuilder intBob;
intBob.append(curBoundary);
- intBob.append(maxKey);
- Interval newInt(intBob.obj(), !curInclusive, true);
+ intBob.append(curInt.start);
+ Interval newInt(intBob.obj(), !curInclusive, !curInt.startInclusive);
newIntervals.push_back(newInt);
}
- // Replace the old list of intervals with the new one.
- intervals.clear();
- intervals.insert(intervals.end(), newIntervals.begin(), newIntervals.end());
+ // Reset the boundary for the next iteration.
+ curBoundary = curInt.end;
+ curInclusive = curInt.endInclusive;
}
- string IndexBounds::toString() const {
- mongoutils::str::stream ss;
- if (isSimpleRange) {
- ss << "[" << startKey.toString() << ", ";
- if (endKey.isEmpty()) {
+ // We may have to add a final interval which ends in MaxKey.
+ BSONObjBuilder maxBob;
+ maxBob.appendMaxKey("");
+ BSONObj maxObj = maxBob.obj();
+ BSONElement maxKey = maxObj.firstElement();
+ if (0 != maxKey.woCompare(curBoundary) || !curInclusive) {
+ BSONObjBuilder intBob;
+ intBob.append(curBoundary);
+ intBob.append(maxKey);
+ Interval newInt(intBob.obj(), !curInclusive, true);
+ newIntervals.push_back(newInt);
+ }
+
+ // Replace the old list of intervals with the new one.
+ intervals.clear();
+ intervals.insert(intervals.end(), newIntervals.begin(), newIntervals.end());
+}
+
+string IndexBounds::toString() const {
+ mongoutils::str::stream ss;
+ if (isSimpleRange) {
+ ss << "[" << startKey.toString() << ", ";
+ if (endKey.isEmpty()) {
+ ss << "]";
+ } else {
+ ss << endKey.toString();
+ if (endKeyInclusive) {
ss << "]";
+ } else {
+ ss << ")";
}
- else {
- ss << endKey.toString();
- if (endKeyInclusive) {
- ss << "]";
- }
- else {
- ss << ")";
- }
- }
- return ss;
}
- for (size_t i = 0; i < fields.size(); ++i) {
- if (i > 0) {
- ss << ", ";
- }
- ss << "field #" << i << fields[i].toString();
- }
-
return ss;
}
+ for (size_t i = 0; i < fields.size(); ++i) {
+ if (i > 0) {
+ ss << ", ";
+ }
+ ss << "field #" << i << fields[i].toString();
+ }
- BSONObj IndexBounds::toBSON() const {
- BSONObjBuilder bob;
- vector<OrderedIntervalList>::const_iterator itField;
- for (itField = fields.begin(); itField != fields.end(); ++itField) {
- BSONArrayBuilder fieldBuilder(bob.subarrayStart(itField->name));
-
- vector<Interval>::const_iterator itInterval;
- for (itInterval = itField->intervals.begin()
- ; itInterval != itField->intervals.end()
- ; ++itInterval) {
- std::string intervalStr = itInterval->toString();
-
- // Insulate against hitting BSON size limit.
- if ((bob.len() + (int)intervalStr.size()) > BSONObjMaxUserSize) {
- fieldBuilder.append("warning: bounds truncated due to BSON size limit");
- fieldBuilder.doneFast();
- return bob.obj();
- }
-
- fieldBuilder.append(intervalStr);
+ return ss;
+}
+
+BSONObj IndexBounds::toBSON() const {
+ BSONObjBuilder bob;
+ vector<OrderedIntervalList>::const_iterator itField;
+ for (itField = fields.begin(); itField != fields.end(); ++itField) {
+ BSONArrayBuilder fieldBuilder(bob.subarrayStart(itField->name));
+
+ vector<Interval>::const_iterator itInterval;
+ for (itInterval = itField->intervals.begin(); itInterval != itField->intervals.end();
+ ++itInterval) {
+ std::string intervalStr = itInterval->toString();
+
+ // Insulate against hitting BSON size limit.
+ if ((bob.len() + (int)intervalStr.size()) > BSONObjMaxUserSize) {
+ fieldBuilder.append("warning: bounds truncated due to BSON size limit");
+ fieldBuilder.doneFast();
+ return bob.obj();
}
- fieldBuilder.doneFast();
+ fieldBuilder.append(intervalStr);
}
- return bob.obj();
+ fieldBuilder.doneFast();
}
- //
- // Validity checking for bounds
- //
+ return bob.obj();
+}
- bool OrderedIntervalList::isValidFor(int expectedOrientation) const {
- // Make sure each interval's start is oriented correctly with respect to its end.
- for (size_t j = 0; j < intervals.size(); ++j) {
- // false means don't consider field name.
- int cmp = sgn(intervals[j].end.woCompare(intervals[j].start, false));
+//
+// Validity checking for bounds
+//
- if (cmp == 0 && intervals[j].startInclusive
- && intervals[j].endInclusive) { continue; }
+bool OrderedIntervalList::isValidFor(int expectedOrientation) const {
+ // Make sure each interval's start is oriented correctly with respect to its end.
+ for (size_t j = 0; j < intervals.size(); ++j) {
+ // false means don't consider field name.
+ int cmp = sgn(intervals[j].end.woCompare(intervals[j].start, false));
- if (cmp != expectedOrientation) {
- return false;
- }
+ if (cmp == 0 && intervals[j].startInclusive && intervals[j].endInclusive) {
+ continue;
}
- // Make sure each interval is oriented correctly with respect to its neighbors.
- for (size_t j = 1; j < intervals.size(); ++j) {
- int cmp = sgn(intervals[j].start.woCompare(intervals[j - 1].end, false));
-
- // TODO: We could care if the end of one interval is the start of another. The bounds
- // are still valid but they're a bit sloppy; they could have been combined to form one
- // interval if either of them is inclusive.
- if (0 == cmp) { continue; }
-
- if (cmp != expectedOrientation) {
- return false;
- }
+ if (cmp != expectedOrientation) {
+ return false;
}
- return true;
}
- bool IndexBounds::isValidFor(const BSONObj& keyPattern, int direction) {
- if (isSimpleRange) {
- return direction == sgn(endKey.woCompare(startKey, keyPattern, false));
+ // Make sure each interval is oriented correctly with respect to its neighbors.
+ for (size_t j = 1; j < intervals.size(); ++j) {
+ int cmp = sgn(intervals[j].start.woCompare(intervals[j - 1].end, false));
+
+ // TODO: We could care if the end of one interval is the start of another. The bounds
+ // are still valid but they're a bit sloppy; they could have been combined to form one
+ // interval if either of them is inclusive.
+ if (0 == cmp) {
+ continue;
}
- BSONObjIterator it(keyPattern);
+ if (cmp != expectedOrientation) {
+ return false;
+ }
+ }
+ return true;
+}
- for (size_t i = 0; i < fields.size(); ++i) {
- // We expect a bound for each field in the index.
- if (!it.more()) { return false; }
- BSONElement elt = it.next();
+bool IndexBounds::isValidFor(const BSONObj& keyPattern, int direction) {
+ if (isSimpleRange) {
+ return direction == sgn(endKey.woCompare(startKey, keyPattern, false));
+ }
- const OrderedIntervalList& field = fields[i];
+ BSONObjIterator it(keyPattern);
- // Make sure the names match up.
- if (field.name != elt.fieldName()) { return false; }
+ for (size_t i = 0; i < fields.size(); ++i) {
+ // We expect a bound for each field in the index.
+ if (!it.more()) {
+ return false;
+ }
+ BSONElement elt = it.next();
- // Special indices are all inserted increasing. elt.number() will return 0 if it's
- // not a number. Special indices are strings, not numbers.
- int expectedOrientation = direction * ((elt.number() >= 0) ? 1 : -1);
+ const OrderedIntervalList& field = fields[i];
- if (!field.isValidFor(expectedOrientation)) {
- return false;
- }
+ // Make sure the names match up.
+ if (field.name != elt.fieldName()) {
+ return false;
}
- return !it.more();
+ // Special indices are all inserted increasing. elt.number() will return 0 if it's
+ // not a number. Special indices are strings, not numbers.
+ int expectedOrientation = direction * ((elt.number() >= 0) ? 1 : -1);
+
+ if (!field.isValidFor(expectedOrientation)) {
+ return false;
+ }
}
- //
- // Iteration over index bounds
- //
+ return !it.more();
+}
+
+//
+// Iteration over index bounds
+//
+
+IndexBoundsChecker::IndexBoundsChecker(const IndexBounds* bounds,
+ const BSONObj& keyPattern,
+ int scanDirection)
+ : _bounds(bounds), _curInterval(bounds->fields.size(), 0) {
+ BSONObjIterator it(keyPattern);
+ while (it.more()) {
+ int indexDirection = it.next().number() >= 0 ? 1 : -1;
+ _expectedDirection.push_back(indexDirection * scanDirection);
+ }
+}
- IndexBoundsChecker::IndexBoundsChecker(const IndexBounds* bounds, const BSONObj& keyPattern,
- int scanDirection)
- : _bounds(bounds), _curInterval(bounds->fields.size(), 0) {
+bool IndexBoundsChecker::getStartSeekPoint(IndexSeekPoint* out) {
+ out->prefixLen = 0;
+ out->prefixExclusive = false;
+ out->keySuffix.resize(_bounds->fields.size());
+ out->suffixInclusive.resize(_bounds->fields.size());
- BSONObjIterator it(keyPattern);
- while (it.more()) {
- int indexDirection = it.next().number() >= 0 ? 1 : -1;
- _expectedDirection.push_back(indexDirection * scanDirection);
+ for (size_t i = 0; i < _bounds->fields.size(); ++i) {
+ if (0 == _bounds->fields[i].intervals.size()) {
+ return false;
}
+ out->keySuffix[i] = &_bounds->fields[i].intervals[0].start;
+ out->suffixInclusive[i] = _bounds->fields[i].intervals[0].startInclusive;
}
- bool IndexBoundsChecker::getStartSeekPoint(IndexSeekPoint* out) {
- out->prefixLen = 0;
- out->prefixExclusive = false;
- out->keySuffix.resize(_bounds->fields.size());
- out->suffixInclusive.resize(_bounds->fields.size());
+ return true;
+}
+
+bool IndexBoundsChecker::findLeftmostProblem(const vector<BSONElement>& keyValues,
+ size_t* where,
+ Location* what) {
+ // For each field in the index key, see if it's in the interval it should be.
+ for (size_t i = 0; i < _curInterval.size(); ++i) {
+ const OrderedIntervalList& field = _bounds->fields[i];
+ const Interval& currentInterval = field.intervals[_curInterval[i]];
+ Location cmp = intervalCmp(currentInterval, keyValues[i], _expectedDirection[i]);
+
+ // If it's not in the interval we think it is...
+ if (0 != cmp) {
+ *where = i;
+ *what = cmp;
+ return true;
+ }
+ }
- for (size_t i = 0; i < _bounds->fields.size(); ++i) {
- if (0 == _bounds->fields[i].intervals.size()) {
- return false;
- }
- out->keySuffix[i] = &_bounds->fields[i].intervals[0].start;
- out->suffixInclusive[i] = _bounds->fields[i].intervals[0].startInclusive;
+ return false;
+}
+
+bool IndexBoundsChecker::spaceLeftToAdvance(size_t fieldsToCheck,
+ const vector<BSONElement>& keyValues) {
+ // Check end conditions. Since we need to move the keys before
+ // firstNonContainedField forward, let's make sure that those fields are not at the
+ // end of their bounds.
+ for (size_t i = 0; i < fieldsToCheck; ++i) {
+ // Field 'i' isn't at its last interval. There's possibly a key we could move forward
+ // to, either in the current interval or the next one.
+ if (_curInterval[i] != _bounds->fields[i].intervals.size() - 1) {
+ return true;
}
- return true;
- }
+ // Field 'i' is at its last interval.
+ const Interval& ival = _bounds->fields[i].intervals[_curInterval[i]];
- bool IndexBoundsChecker::findLeftmostProblem(const vector<BSONElement>& keyValues,
- size_t* where,
- Location* what) {
- // For each field in the index key, see if it's in the interval it should be.
- for (size_t i = 0; i < _curInterval.size(); ++i) {
- const OrderedIntervalList& field = _bounds->fields[i];
- const Interval& currentInterval = field.intervals[_curInterval[i]];
- Location cmp = intervalCmp(currentInterval, keyValues[i], _expectedDirection[i]);
-
- // If it's not in the interval we think it is...
- if (0 != cmp) {
- *where = i;
- *what = cmp;
- return true;
- }
+ // We're OK if it's an open interval. There are an infinite number of keys between any
+ // key and the end point...
+ if (!ival.endInclusive) {
+ return true;
}
- return false;
+ // If it's a closed interval, we're fine so long as we haven't hit the end point of
+ // the interval.
+ if (-_expectedDirection[i] == sgn(keyValues[i].woCompare(ival.end, false))) {
+ return true;
+ }
}
- bool IndexBoundsChecker::spaceLeftToAdvance(size_t fieldsToCheck,
- const vector<BSONElement>& keyValues) {
- // Check end conditions. Since we need to move the keys before
- // firstNonContainedField forward, let's make sure that those fields are not at the
- // end of their bounds.
- for (size_t i = 0; i < fieldsToCheck; ++i) {
- // Field 'i' isn't at its last interval. There's possibly a key we could move forward
- // to, either in the current interval or the next one.
- if (_curInterval[i] != _bounds->fields[i].intervals.size() - 1) {
- return true;
- }
+ return false;
+}
+
+bool IndexBoundsChecker::isValidKey(const BSONObj& key) {
+ BSONObjIterator it(key);
+ size_t curOil = 0;
+ while (it.more()) {
+ BSONElement elt = it.next();
+ size_t whichInterval;
+ Location loc = findIntervalForField(
+ elt, _bounds->fields[curOil], _expectedDirection[curOil], &whichInterval);
+ if (WITHIN != loc) {
+ return false;
+ }
+ ++curOil;
+ }
+ return true;
+}
+
+IndexBoundsChecker::KeyState IndexBoundsChecker::checkKey(const BSONObj& key, IndexSeekPoint* out) {
+ verify(_curInterval.size() > 0);
+ out->keySuffix.resize(_curInterval.size());
+ out->suffixInclusive.resize(_curInterval.size());
+
+ // It's useful later to go from a field number to the value for that field. Store these.
+ // TODO: on optimization pass, populate the vector as-needed and keep the vector around as a
+ // member variable
+ vector<BSONElement> keyValues;
+ BSONObjIterator keyIt(key);
+ while (keyIt.more()) {
+ keyValues.push_back(keyIt.next());
+ }
+ verify(keyValues.size() == _curInterval.size());
- // Field 'i' is at its last interval.
- const Interval& ival = _bounds->fields[i].intervals[_curInterval[i]];
+ size_t firstNonContainedField;
+ Location orientation;
- // We're OK if it's an open interval. There are an infinite number of keys between any
- // key and the end point...
- if (!ival.endInclusive) {
- return true;
- }
+ if (!findLeftmostProblem(keyValues, &firstNonContainedField, &orientation)) {
+ // All fields in the index are within the current interval. Caller can use the key.
+ return VALID;
+ }
- // If it's a closed interval, we're fine so long as we haven't hit the end point of
- // the interval.
- if (-_expectedDirection[i] == sgn(keyValues[i].woCompare(ival.end, false))) {
- return true;
- }
+ // Field number 'firstNonContainedField' of the index key is before its current interval.
+ if (BEHIND == orientation) {
+ // It's behind our current interval, but our current interval could be wrong. Start all
+ // intervals from firstNonContainedField to the right over...
+ for (size_t i = firstNonContainedField; i < _curInterval.size(); ++i) {
+ _curInterval[i] = 0;
}
- return false;
- }
-
- bool IndexBoundsChecker::isValidKey(const BSONObj& key) {
- BSONObjIterator it(key);
- size_t curOil = 0;
- while (it.more()) {
- BSONElement elt = it.next();
- size_t whichInterval;
- Location loc = findIntervalForField(elt, _bounds->fields[curOil], _expectedDirection[curOil], &whichInterval);
- if (WITHIN != loc) {
- return false;
- }
- ++curOil;
+ // ...and try again. This call modifies 'orientation', so we may check its value again
+ // in the clause below if field number 'firstNonContainedField' isn't in its first
+ // interval.
+ if (!findLeftmostProblem(keyValues, &firstNonContainedField, &orientation)) {
+ return VALID;
}
- return true;
}
- IndexBoundsChecker::KeyState IndexBoundsChecker::checkKey(const BSONObj& key,
- IndexSeekPoint* out) {
- verify(_curInterval.size() > 0);
- out->keySuffix.resize(_curInterval.size());
- out->suffixInclusive.resize(_curInterval.size());
-
- // It's useful later to go from a field number to the value for that field. Store these.
- // TODO: on optimization pass, populate the vector as-needed and keep the vector around as a
- // member variable
- vector<BSONElement> keyValues;
- BSONObjIterator keyIt(key);
- while (keyIt.more()) {
- keyValues.push_back(keyIt.next());
+ // Field number 'firstNonContainedField' of the index key is before all current intervals.
+ if (BEHIND == orientation) {
+ // Tell the caller to move forward to the start of the current interval.
+ out->keyPrefix = key.getOwned();
+ out->prefixLen = firstNonContainedField;
+ out->prefixExclusive = false;
+
+ for (size_t j = firstNonContainedField; j < _curInterval.size(); ++j) {
+ const OrderedIntervalList& oil = _bounds->fields[j];
+ out->keySuffix[j] = &oil.intervals[_curInterval[j]].start;
+ out->suffixInclusive[j] = oil.intervals[_curInterval[j]].startInclusive;
}
- verify(keyValues.size() == _curInterval.size());
- size_t firstNonContainedField;
- Location orientation;
+ return MUST_ADVANCE;
+ }
- if (!findLeftmostProblem(keyValues, &firstNonContainedField, &orientation)) {
- // All fields in the index are within the current interval. Caller can use the key.
- return VALID;
- }
+ verify(AHEAD == orientation);
+
+ // Field number 'firstNonContainedField' of the index key is after interval we think it's
+ // in. Fields 0 through 'firstNonContained-1' are within their current intervals and we can
+ // ignore them.
+ while (firstNonContainedField < _curInterval.size()) {
+ // Find the interval that contains our field.
+ size_t newIntervalForField;
+
+ Location where = findIntervalForField(keyValues[firstNonContainedField],
+ _bounds->fields[firstNonContainedField],
+ _expectedDirection[firstNonContainedField],
+ &newIntervalForField);
+
+ if (WITHIN == where) {
+ // Found a new interval for field firstNonContainedField. Move our internal choice
+ // of interval to that.
+ _curInterval[firstNonContainedField] = newIntervalForField;
+ // Let's find valid intervals for fields to the right.
+ ++firstNonContainedField;
+ } else if (BEHIND == where) {
+ // firstNonContained field is between the intervals (newIntervalForField-1) and
+ // newIntervalForField. We have to tell the caller to move forward until he at
+ // least hits our new current interval.
+ _curInterval[firstNonContainedField] = newIntervalForField;
+
+ // All other fields to the right start at their first interval.
+ for (size_t i = firstNonContainedField + 1; i < _curInterval.size(); ++i) {
+ _curInterval[i] = 0;
+ }
- // Field number 'firstNonContainedField' of the index key is before its current interval.
- if (BEHIND == orientation) {
- // It's behind our current interval, but our current interval could be wrong. Start all
- // intervals from firstNonContainedField to the right over...
+ out->keyPrefix = key.getOwned();
+ out->prefixLen = firstNonContainedField;
+ out->prefixExclusive = false;
for (size_t i = firstNonContainedField; i < _curInterval.size(); ++i) {
- _curInterval[i] = 0;
+ const OrderedIntervalList& oil = _bounds->fields[i];
+ out->keySuffix[i] = &oil.intervals[_curInterval[i]].start;
+ out->suffixInclusive[i] = oil.intervals[_curInterval[i]].startInclusive;
}
- // ...and try again. This call modifies 'orientation', so we may check its value again
- // in the clause below if field number 'firstNonContainedField' isn't in its first
- // interval.
- if (!findLeftmostProblem(keyValues, &firstNonContainedField, &orientation)) {
- return VALID;
+ return MUST_ADVANCE;
+ } else {
+ verify(AHEAD == where);
+ // Field number 'firstNonContainedField' cannot possibly be placed into an interval,
+ // as it is already past its last possible interval. The caller must move forward
+ // to a key with a greater value for the previous field.
+
+ // If all fields to the left have hit the end of their intervals, we can't ask them
+ // to move forward and we should stop iterating.
+ if (!spaceLeftToAdvance(firstNonContainedField, keyValues)) {
+ return DONE;
}
- }
- // Field number 'firstNonContainedField' of the index key is before all current intervals.
- if (BEHIND == orientation) {
- // Tell the caller to move forward to the start of the current interval.
out->keyPrefix = key.getOwned();
out->prefixLen = firstNonContainedField;
- out->prefixExclusive = false;
+ out->prefixExclusive = true;
- for (size_t j = firstNonContainedField; j < _curInterval.size(); ++j) {
- const OrderedIntervalList& oil = _bounds->fields[j];
- out->keySuffix[j] = &oil.intervals[_curInterval[j]].start;
- out->suffixInclusive[j] = oil.intervals[_curInterval[j]].startInclusive;
+ for (size_t i = firstNonContainedField; i < _curInterval.size(); ++i) {
+ _curInterval[i] = 0;
}
+ // If movePastKeyElts is true, we don't examine any fields after the keyEltsToUse
+ // fields of the key. As such we don't populate the out/incOut.
return MUST_ADVANCE;
}
-
- verify(AHEAD == orientation);
-
- // Field number 'firstNonContainedField' of the index key is after interval we think it's
- // in. Fields 0 through 'firstNonContained-1' are within their current intervals and we can
- // ignore them.
- while (firstNonContainedField < _curInterval.size()) {
- // Find the interval that contains our field.
- size_t newIntervalForField;
-
- Location where = findIntervalForField(keyValues[firstNonContainedField],
- _bounds->fields[firstNonContainedField],
- _expectedDirection[firstNonContainedField],
- &newIntervalForField);
-
- if (WITHIN == where) {
- // Found a new interval for field firstNonContainedField. Move our internal choice
- // of interval to that.
- _curInterval[firstNonContainedField] = newIntervalForField;
- // Let's find valid intervals for fields to the right.
- ++firstNonContainedField;
- }
- else if (BEHIND == where) {
- // firstNonContained field is between the intervals (newIntervalForField-1) and
- // newIntervalForField. We have to tell the caller to move forward until he at
- // least hits our new current interval.
- _curInterval[firstNonContainedField] = newIntervalForField;
-
- // All other fields to the right start at their first interval.
- for (size_t i = firstNonContainedField + 1; i < _curInterval.size(); ++i) {
- _curInterval[i] = 0;
- }
-
- out->keyPrefix = key.getOwned();
- out->prefixLen = firstNonContainedField;
- out->prefixExclusive = false;
- for (size_t i = firstNonContainedField; i < _curInterval.size(); ++i) {
- const OrderedIntervalList& oil = _bounds->fields[i];
- out->keySuffix[i] = &oil.intervals[_curInterval[i]].start;
- out->suffixInclusive[i] = oil.intervals[_curInterval[i]].startInclusive;
- }
-
- return MUST_ADVANCE;
- }
- else {
- verify (AHEAD == where);
- // Field number 'firstNonContainedField' cannot possibly be placed into an interval,
- // as it is already past its last possible interval. The caller must move forward
- // to a key with a greater value for the previous field.
-
- // If all fields to the left have hit the end of their intervals, we can't ask them
- // to move forward and we should stop iterating.
- if (!spaceLeftToAdvance(firstNonContainedField, keyValues)) {
- return DONE;
- }
-
- out->keyPrefix = key.getOwned();
- out->prefixLen = firstNonContainedField;
- out->prefixExclusive = true;
-
- for (size_t i = firstNonContainedField; i < _curInterval.size(); ++i) {
- _curInterval[i] = 0;
- }
-
- // If movePastKeyElts is true, we don't examine any fields after the keyEltsToUse
- // fields of the key. As such we don't populate the out/incOut.
- return MUST_ADVANCE;
- }
- }
-
- verify(firstNonContainedField == _curInterval.size());
- return VALID;
}
- namespace {
-
- /**
- * Returns true if key (first member of pair) is AHEAD of interval
- * along 'direction' (second member of pair).
- */
- bool isKeyAheadOfInterval(const Interval& interval,
- const std::pair<BSONElement, int>& keyAndDirection) {
- const BSONElement& elt = keyAndDirection.first;
- int expectedDirection = keyAndDirection.second;
- IndexBoundsChecker::Location where = intervalCmp(interval, elt, expectedDirection);
- return IndexBoundsChecker::AHEAD == where;
- }
+ verify(firstNonContainedField == _curInterval.size());
+ return VALID;
+}
- } // namespace
-
- // static
- IndexBoundsChecker::Location IndexBoundsChecker::findIntervalForField(const BSONElement& elt,
- const OrderedIntervalList& oil, const int expectedDirection, size_t* newIntervalIndex) {
- // Binary search for interval.
- // Intervals are ordered in the same direction as our keys.
- // Key behind all intervals: [BEHIND, ..., BEHIND]
- // Key ahead of all intervals: [AHEAD, ..., AHEAD]
- // Key within one interval: [AHEAD, ..., WITHIN, BEHIND, ...]
- // Key not in any inteval: [AHEAD, ..., AHEAD, BEHIND, ...]
-
- // Find left-most BEHIND/WITHIN interval.
- vector<Interval>::const_iterator i =
- std::lower_bound(oil.intervals.begin(), oil.intervals.end(),
- std::make_pair(elt, expectedDirection), isKeyAheadOfInterval);
-
- // Key ahead of all intervals.
- if (i == oil.intervals.end()) {
- return AHEAD;
- }
+namespace {
- // Found either interval containing key or left-most BEHIND interval.
- *newIntervalIndex = std::distance(oil.intervals.begin(), i);
+/**
+ * Returns true if key (first member of pair) is AHEAD of interval
+ * along 'direction' (second member of pair).
+ */
+bool isKeyAheadOfInterval(const Interval& interval,
+ const std::pair<BSONElement, int>& keyAndDirection) {
+ const BSONElement& elt = keyAndDirection.first;
+ int expectedDirection = keyAndDirection.second;
+ IndexBoundsChecker::Location where = intervalCmp(interval, elt, expectedDirection);
+ return IndexBoundsChecker::AHEAD == where;
+}
+
+} // namespace
+
+// static
+IndexBoundsChecker::Location IndexBoundsChecker::findIntervalForField(
+ const BSONElement& elt,
+ const OrderedIntervalList& oil,
+ const int expectedDirection,
+ size_t* newIntervalIndex) {
+ // Binary search for interval.
+ // Intervals are ordered in the same direction as our keys.
+ // Key behind all intervals: [BEHIND, ..., BEHIND]
+ // Key ahead of all intervals: [AHEAD, ..., AHEAD]
+ // Key within one interval: [AHEAD, ..., WITHIN, BEHIND, ...]
+ // Key not in any inteval: [AHEAD, ..., AHEAD, BEHIND, ...]
+
+ // Find left-most BEHIND/WITHIN interval.
+ vector<Interval>::const_iterator i = std::lower_bound(oil.intervals.begin(),
+ oil.intervals.end(),
+ std::make_pair(elt, expectedDirection),
+ isKeyAheadOfInterval);
+
+ // Key ahead of all intervals.
+ if (i == oil.intervals.end()) {
+ return AHEAD;
+ }
- // Additional check to determine if interval contains key.
- Location where = intervalCmp(*i, elt, expectedDirection);
- invariant(BEHIND == where || WITHIN == where);
+ // Found either interval containing key or left-most BEHIND interval.
+ *newIntervalIndex = std::distance(oil.intervals.begin(), i);
- return where;
- }
+ // Additional check to determine if interval contains key.
+ Location where = intervalCmp(*i, elt, expectedDirection);
+ invariant(BEHIND == where || WITHIN == where);
+
+ return where;
+}
} // namespace mongo
diff --git a/src/mongo/db/query/index_bounds.h b/src/mongo/db/query/index_bounds.h
index d1e140f3932..f70f75def8b 100644
--- a/src/mongo/db/query/index_bounds.h
+++ b/src/mongo/db/query/index_bounds.h
@@ -37,201 +37,204 @@
namespace mongo {
+/**
+ * An ordered list of intervals for one field.
+ */
+struct OrderedIntervalList {
+ OrderedIntervalList() {}
+ OrderedIntervalList(const std::string& n) : name(n) {}
+
+ // Must be ordered according to the index order.
+ std::vector<Interval> intervals;
+
+ // TODO: We could drop this. Only used in IndexBounds::isValidFor.
+ std::string name;
+
+ bool isValidFor(int expectedOrientation) const;
+ std::string toString() const;
+
/**
- * An ordered list of intervals for one field.
+ * Complements the OIL. Used by the index bounds builder in order
+ * to create index bounds for $not predicates.
+ *
+ * Assumes the OIL is increasing, and therefore must be called prior to
+ * alignBounds(...).
+ *
+ * Example:
+ * The complement of [3, 6), [8, 10] is [MinKey, 3), [6, 8), (20, MaxKey],
+ * where this OIL has direction==1.
*/
- struct OrderedIntervalList {
- OrderedIntervalList() { }
- OrderedIntervalList(const std::string& n) : name(n) { }
-
- // Must be ordered according to the index order.
- std::vector<Interval> intervals;
-
- // TODO: We could drop this. Only used in IndexBounds::isValidFor.
- std::string name;
-
- bool isValidFor(int expectedOrientation) const;
- std::string toString() const;
-
- /**
- * Complements the OIL. Used by the index bounds builder in order
- * to create index bounds for $not predicates.
- *
- * Assumes the OIL is increasing, and therefore must be called prior to
- * alignBounds(...).
- *
- * Example:
- * The complement of [3, 6), [8, 10] is [MinKey, 3), [6, 8), (20, MaxKey],
- * where this OIL has direction==1.
- */
- void complement();
- };
+ void complement();
+};
+
+/**
+ * Tied to an index. Permissible values for all fields in the index. Requires the index to
+ * interpret. Previously known as FieldRangeVector.
+ */
+struct IndexBounds {
+ IndexBounds() : isSimpleRange(false), endKeyInclusive(false) {}
+
+ // For each indexed field, the values that the field is allowed to take on.
+ std::vector<OrderedIntervalList> fields;
+
+ // Debugging check.
+ // We must have as many fields the key pattern does.
+ // The fields must be oriented in the direction we'd encounter them given the indexing
+ // direction (the value of the field in keyPattern) and index traversal direction provided
+ // by 'direction'.
+ //
+ // An example: [7, 20]
+ // We can traverse this forward if indexed ascending
+ // We can traverse this backwards if indexed descending.
+ bool isValidFor(const BSONObj& keyPattern, int direction);
+
+ // Methods below used for debugging purpose only. Do not use outside testing code.
+ size_t size() const;
+ std::string getFieldName(size_t i) const;
+ size_t getNumIntervals(size_t i) const;
+ Interval getInterval(size_t i, size_t j) const;
+ std::string toString() const;
/**
- * Tied to an index. Permissible values for all fields in the index. Requires the index to
- * interpret. Previously known as FieldRangeVector.
+ * BSON format for explain. The format is an array of strings for each field.
+ * Each string represents an interval. The strings use "[" and "]" if the interval
+ * bounds are inclusive, and "(" / ")" if exclusive.
+ *
+ * Ex.
+ * {a: ["[1, 1]", "(3, 10)"], b: ["[Infinity, 10)"] }
*/
- struct IndexBounds {
- IndexBounds() : isSimpleRange(false), endKeyInclusive(false) { }
-
- // For each indexed field, the values that the field is allowed to take on.
- std::vector<OrderedIntervalList> fields;
-
- // Debugging check.
- // We must have as many fields the key pattern does.
- // The fields must be oriented in the direction we'd encounter them given the indexing
- // direction (the value of the field in keyPattern) and index traversal direction provided
- // by 'direction'.
- //
- // An example: [7, 20]
- // We can traverse this forward if indexed ascending
- // We can traverse this backwards if indexed descending.
- bool isValidFor(const BSONObj& keyPattern, int direction);
-
- // Methods below used for debugging purpose only. Do not use outside testing code.
- size_t size() const;
- std::string getFieldName(size_t i) const;
- size_t getNumIntervals(size_t i) const;
- Interval getInterval(size_t i, size_t j) const;
- std::string toString() const;
-
- /**
- * BSON format for explain. The format is an array of strings for each field.
- * Each string represents an interval. The strings use "[" and "]" if the interval
- * bounds are inclusive, and "(" / ")" if exclusive.
- *
- * Ex.
- * {a: ["[1, 1]", "(3, 10)"], b: ["[Infinity, 10)"] }
- */
- BSONObj toBSON() const;
-
- // TODO: we use this for max/min scan. Consider migrating that.
- bool isSimpleRange;
- BSONObj startKey;
- BSONObj endKey;
- bool endKeyInclusive;
+ BSONObj toBSON() const;
+
+ // TODO: we use this for max/min scan. Consider migrating that.
+ bool isSimpleRange;
+ BSONObj startKey;
+ BSONObj endKey;
+ bool endKeyInclusive;
+};
+
+/**
+ * A helper used by IndexScan to navigate an index.
+ */
+class IndexBoundsChecker {
+public:
+ /**
+ * keyPattern is the index that we're iterating over.
+ * bounds are the bounds we're allowed to iterate over.
+ * direction is the direction we're moving over the index, 1 or -1.
+ *
+ * Bounds not owned by us.
+ */
+ IndexBoundsChecker(const IndexBounds* bounds, const BSONObj& keyPattern, int direction);
+
+
+ /**
+ * Get the IndexSeekPoint that we should with.
+ *
+ * Returns false if there are no possible index entries that match the bounds. In this case
+ * there is no valid start point to seek to so out will not be filled out and the caller
+ * should emit no results.
+ */
+ bool getStartSeekPoint(IndexSeekPoint* out);
+
+ /**
+ * The states of a key from an index scan. See checkKey below.
+ */
+ enum KeyState {
+ VALID,
+ MUST_ADVANCE,
+ DONE,
};
/**
- * A helper used by IndexScan to navigate an index.
+ * Is 'key' a valid key? Note that this differs from checkKey, which assumes that it
+ * receives keys in sorted order.
+ */
+ bool isValidKey(const BSONObj& key);
+
+ /**
+ * This function checks if the key is within the bounds we're iterating over and updates any
+ * internal state required to efficiently determine if the key is within our bounds.
+ *
+ * Possible outcomes:
+ *
+ * 1. The key is in our bounds. Returns VALID. Caller can use the data associated with the
+ * key.
+ *
+ * 2. The key is not in our bounds but has not exceeded the maximum value in our bounds.
+ * Returns MUST_ADVANCE. Caller must advance to the query provided in the out parameters
+ * and call checkKey again.
+ *
+ * 3. The key is past our bounds. Returns DONE. No further keys will satisfy the bounds
+ * and the caller should stop.
+ *
+ * keyEltsToUse, movePastKeyElts, out, and incOut must all be non-NULL.
+ * out and incOut must already be resized to have as many elements as the key has fields.
+ *
+ * In parameters:
+ * currentKey is the index key.
+ *
+ * Out parameter only valid if we return MUST_ADVANCE.
*/
- class IndexBoundsChecker {
- public:
- /**
- * keyPattern is the index that we're iterating over.
- * bounds are the bounds we're allowed to iterate over.
- * direction is the direction we're moving over the index, 1 or -1.
- *
- * Bounds not owned by us.
- */
- IndexBoundsChecker(const IndexBounds* bounds, const BSONObj& keyPattern, int direction);
-
-
- /**
- * Get the IndexSeekPoint that we should with.
- *
- * Returns false if there are no possible index entries that match the bounds. In this case
- * there is no valid start point to seek to so out will not be filled out and the caller
- * should emit no results.
- */
- bool getStartSeekPoint(IndexSeekPoint* out);
-
- /**
- * The states of a key from an index scan. See checkKey below.
- */
- enum KeyState {
- VALID,
- MUST_ADVANCE,
- DONE,
- };
-
- /**
- * Is 'key' a valid key? Note that this differs from checkKey, which assumes that it
- * receives keys in sorted order.
- */
- bool isValidKey(const BSONObj& key);
-
- /**
- * This function checks if the key is within the bounds we're iterating over and updates any
- * internal state required to efficiently determine if the key is within our bounds.
- *
- * Possible outcomes:
- *
- * 1. The key is in our bounds. Returns VALID. Caller can use the data associated with the
- * key.
- *
- * 2. The key is not in our bounds but has not exceeded the maximum value in our bounds.
- * Returns MUST_ADVANCE. Caller must advance to the query provided in the out parameters
- * and call checkKey again.
- *
- * 3. The key is past our bounds. Returns DONE. No further keys will satisfy the bounds
- * and the caller should stop.
- *
- * keyEltsToUse, movePastKeyElts, out, and incOut must all be non-NULL.
- * out and incOut must already be resized to have as many elements as the key has fields.
- *
- * In parameters:
- * currentKey is the index key.
- *
- * Out parameter only valid if we return MUST_ADVANCE.
- */
- KeyState checkKey(const BSONObj& currentKey, IndexSeekPoint* query);
-
- /**
- * Relative position of a key to an interval.
- * Exposed for testing only.
- */
- enum Location {
- BEHIND = -1,
- WITHIN = 0,
- AHEAD = 1,
- };
-
- /**
- * If 'elt' is in any interval, return WITHIN and set 'newIntervalIndex' to the index of the
- * interval in the ordered interval list.
- *
- * If 'elt' is not in any interval but could be advanced to be in one, return BEHIND and set
- * 'newIntervalIndex' to the index of the interval that 'elt' could be advanced to.
- *
- * If 'elt' cannot be advanced to any interval, return AHEAD.
- *
- * Exposed for testing only.
- *
- * TODO(efficiency): Start search from a given index.
- */
- static Location findIntervalForField(const BSONElement &elt, const OrderedIntervalList& oil,
- const int expectedDirection, size_t* newIntervalIndex);
-
- private:
- /**
- * Find the first field in the key that isn't within the interval we think it is. Returns
- * false if every field is in the interval we think it is. Returns true and populates out
- * parameters if a field isn't in the interval we think it is.
- *
- * Out parameters set if we return true:
- * 'where' is the leftmost field that isn't in the interval we think it is.
- * 'what' is the orientation of the field with respect to that interval.
- */
- bool findLeftmostProblem(const std::vector<BSONElement>& keyValues, size_t* where,
- Location* what);
-
- /**
- * Returns true if it's possible to advance any of the first 'fieldsToCheck' fields of the
- * index key and still be within valid index bounds.
- *
- * keyValues are the elements of the index key in order.
- */
- bool spaceLeftToAdvance(size_t fieldsToCheck, const std::vector<BSONElement>& keyValues);
-
- // The actual bounds. Must outlive this object. Not owned by us.
- const IndexBounds* _bounds;
-
- // For each field, which interval are we currently in?
- std::vector<size_t> _curInterval;
-
- // Direction of scan * direction of indexing.
- std::vector<int> _expectedDirection;
+ KeyState checkKey(const BSONObj& currentKey, IndexSeekPoint* query);
+
+ /**
+ * Relative position of a key to an interval.
+ * Exposed for testing only.
+ */
+ enum Location {
+ BEHIND = -1,
+ WITHIN = 0,
+ AHEAD = 1,
};
+ /**
+ * If 'elt' is in any interval, return WITHIN and set 'newIntervalIndex' to the index of the
+ * interval in the ordered interval list.
+ *
+ * If 'elt' is not in any interval but could be advanced to be in one, return BEHIND and set
+ * 'newIntervalIndex' to the index of the interval that 'elt' could be advanced to.
+ *
+ * If 'elt' cannot be advanced to any interval, return AHEAD.
+ *
+ * Exposed for testing only.
+ *
+ * TODO(efficiency): Start search from a given index.
+ */
+ static Location findIntervalForField(const BSONElement& elt,
+ const OrderedIntervalList& oil,
+ const int expectedDirection,
+ size_t* newIntervalIndex);
+
+private:
+ /**
+ * Find the first field in the key that isn't within the interval we think it is. Returns
+ * false if every field is in the interval we think it is. Returns true and populates out
+ * parameters if a field isn't in the interval we think it is.
+ *
+ * Out parameters set if we return true:
+ * 'where' is the leftmost field that isn't in the interval we think it is.
+ * 'what' is the orientation of the field with respect to that interval.
+ */
+ bool findLeftmostProblem(const std::vector<BSONElement>& keyValues,
+ size_t* where,
+ Location* what);
+
+ /**
+ * Returns true if it's possible to advance any of the first 'fieldsToCheck' fields of the
+ * index key and still be within valid index bounds.
+ *
+ * keyValues are the elements of the index key in order.
+ */
+ bool spaceLeftToAdvance(size_t fieldsToCheck, const std::vector<BSONElement>& keyValues);
+
+ // The actual bounds. Must outlive this object. Not owned by us.
+ const IndexBounds* _bounds;
+
+ // For each field, which interval are we currently in?
+ std::vector<size_t> _curInterval;
+
+ // Direction of scan * direction of indexing.
+ std::vector<int> _expectedDirection;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/query/index_bounds_builder.cpp b/src/mongo/db/query/index_bounds_builder.cpp
index 8df448edcf5..92bff9f8d39 100644
--- a/src/mongo/db/query/index_bounds_builder.cpp
+++ b/src/mongo/db/query/index_bounds_builder.cpp
@@ -48,33 +48,32 @@
namespace mongo {
- string IndexBoundsBuilder::simpleRegex(const char* regex, const char* flags,
- BoundsTightness* tightnessOut) {
- string r = "";
- *tightnessOut = IndexBoundsBuilder::INEXACT_COVERED;
-
- bool multilineOK;
- if ( regex[0] == '\\' && regex[1] == 'A') {
- multilineOK = true;
- regex += 2;
- }
- else if (regex[0] == '^') {
- multilineOK = false;
- regex += 1;
- }
- else {
- return r;
- }
+string IndexBoundsBuilder::simpleRegex(const char* regex,
+ const char* flags,
+ BoundsTightness* tightnessOut) {
+ string r = "";
+ *tightnessOut = IndexBoundsBuilder::INEXACT_COVERED;
+
+ bool multilineOK;
+ if (regex[0] == '\\' && regex[1] == 'A') {
+ multilineOK = true;
+ regex += 2;
+ } else if (regex[0] == '^') {
+ multilineOK = false;
+ regex += 1;
+ } else {
+ return r;
+ }
- // A regex with the "|" character is never considered a simple regular expression.
- if (StringData(regex).find('|') != std::string::npos) {
- return "";
- }
+ // A regex with the "|" character is never considered a simple regular expression.
+ if (StringData(regex).find('|') != std::string::npos) {
+ return "";
+ }
- bool extended = false;
- while (*flags) {
- switch (*(flags++)) {
- case 'm': // multiline
+ bool extended = false;
+ while (*flags) {
+ switch (*(flags++)) {
+ case 'm': // multiline
if (multilineOK)
continue;
else
@@ -83,964 +82,912 @@ namespace mongo {
// Single-line mode specified. This just changes the behavior of the '.'
// character to match every character instead of every character except '\n'.
continue;
- case 'x': // extended
+ case 'x': // extended
extended = true;
break;
default:
- return r; // cant use index
- }
+ return r; // cant use index
}
+ }
- mongoutils::str::stream ss;
+ mongoutils::str::stream ss;
- while(*regex) {
- char c = *(regex++);
+ while (*regex) {
+ char c = *(regex++);
- // We should have bailed out early above if '|' is in the regex.
- invariant(c != '|');
+ // We should have bailed out early above if '|' is in the regex.
+ invariant(c != '|');
- if ( c == '*' || c == '?' ) {
- // These are the only two symbols that make the last char optional
- r = ss;
- r = r.substr( 0 , r.size() - 1 );
- return r; //breaking here fails with /^a?/
- }
- else if (c == '\\') {
- c = *(regex++);
- if (c == 'Q'){
- // \Q...\E quotes everything inside
- while (*regex) {
- c = (*regex++);
- if (c == '\\' && (*regex == 'E')){
- regex++; //skip the 'E'
- break; // go back to start of outer loop
- }
- else {
- ss << c; // character should match itself
- }
+ if (c == '*' || c == '?') {
+ // These are the only two symbols that make the last char optional
+ r = ss;
+ r = r.substr(0, r.size() - 1);
+ return r; // breaking here fails with /^a?/
+ } else if (c == '\\') {
+ c = *(regex++);
+ if (c == 'Q') {
+ // \Q...\E quotes everything inside
+ while (*regex) {
+ c = (*regex++);
+ if (c == '\\' && (*regex == 'E')) {
+ regex++; // skip the 'E'
+ break; // go back to start of outer loop
+ } else {
+ ss << c; // character should match itself
}
}
- else if ((c >= 'A' && c <= 'Z') ||
- (c >= 'a' && c <= 'z') ||
- (c >= '0' && c <= '0') ||
- (c == '\0')) {
- // don't know what to do with these
- r = ss;
- break;
- }
- else {
- // slash followed by non-alphanumeric represents the following char
- ss << c;
- }
- }
- else if (strchr("^$.[()+{", c)) {
- // list of "metacharacters" from man pcrepattern
+ } else if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '0') ||
+ (c == '\0')) {
+ // don't know what to do with these
r = ss;
break;
- }
- else if (extended && c == '#') {
- // comment
- r = ss;
- break;
- }
- else if (extended && isspace(c)) {
- continue;
- }
- else {
- // self-matching char
+ } else {
+ // slash followed by non-alphanumeric represents the following char
ss << c;
}
- }
-
- if ( r.empty() && *regex == 0 ) {
+ } else if (strchr("^$.[()+{", c)) {
+ // list of "metacharacters" from man pcrepattern
+ r = ss;
+ break;
+ } else if (extended && c == '#') {
+ // comment
r = ss;
- *tightnessOut = r.empty() ? IndexBoundsBuilder::INEXACT_COVERED : IndexBoundsBuilder::EXACT;
+ break;
+ } else if (extended && isspace(c)) {
+ continue;
+ } else {
+ // self-matching char
+ ss << c;
}
-
- return r;
}
-
- // static
- void IndexBoundsBuilder::allValuesForField(const BSONElement& elt, OrderedIntervalList* out) {
- // ARGH, BSONValue would make this shorter.
- BSONObjBuilder bob;
- bob.appendMinKey("");
- bob.appendMaxKey("");
- out->name = elt.fieldName();
- out->intervals.push_back(makeRangeInterval(bob.obj(), true, true));
+ if (r.empty() && *regex == 0) {
+ r = ss;
+ *tightnessOut = r.empty() ? IndexBoundsBuilder::INEXACT_COVERED : IndexBoundsBuilder::EXACT;
}
- Interval IndexBoundsBuilder::allValues() {
- BSONObjBuilder bob;
- bob.appendMinKey("");
- bob.appendMaxKey("");
- return makeRangeInterval(bob.obj(), true, true);
+ return r;
+}
+
+
+// static
+void IndexBoundsBuilder::allValuesForField(const BSONElement& elt, OrderedIntervalList* out) {
+ // ARGH, BSONValue would make this shorter.
+ BSONObjBuilder bob;
+ bob.appendMinKey("");
+ bob.appendMaxKey("");
+ out->name = elt.fieldName();
+ out->intervals.push_back(makeRangeInterval(bob.obj(), true, true));
+}
+
+Interval IndexBoundsBuilder::allValues() {
+ BSONObjBuilder bob;
+ bob.appendMinKey("");
+ bob.appendMaxKey("");
+ return makeRangeInterval(bob.obj(), true, true);
+}
+
+bool IntervalComparison(const Interval& lhs, const Interval& rhs) {
+ int wo = lhs.start.woCompare(rhs.start, false);
+ if (0 != wo) {
+ return wo < 0;
}
- bool IntervalComparison(const Interval& lhs, const Interval& rhs) {
- int wo = lhs.start.woCompare(rhs.start, false);
- if (0 != wo) {
- return wo < 0;
- }
-
- // The start and end are equal.
- // Strict weak requires irreflexivity which implies that equivalence returns false.
- if (lhs.startInclusive == rhs.startInclusive) { return false; }
-
- // Put the bound that's inclusive to the left.
- return lhs.startInclusive;
+ // The start and end are equal.
+ // Strict weak requires irreflexivity which implies that equivalence returns false.
+ if (lhs.startInclusive == rhs.startInclusive) {
+ return false;
}
- // static
- void IndexBoundsBuilder::translateAndIntersect(const MatchExpression* expr,
- const BSONElement& elt,
- const IndexEntry& index,
- OrderedIntervalList* oilOut,
- BoundsTightness* tightnessOut) {
- OrderedIntervalList arg;
- translate(expr, elt, index, &arg, tightnessOut);
-
- // translate outputs arg in sorted order. intersectize assumes that its arguments are
- // sorted.
- intersectize(arg, oilOut);
- }
+ // Put the bound that's inclusive to the left.
+ return lhs.startInclusive;
+}
- // static
- void IndexBoundsBuilder::translateAndUnion(const MatchExpression* expr,
+// static
+void IndexBoundsBuilder::translateAndIntersect(const MatchExpression* expr,
const BSONElement& elt,
const IndexEntry& index,
OrderedIntervalList* oilOut,
BoundsTightness* tightnessOut) {
- OrderedIntervalList arg;
- translate(expr, elt, index, &arg, tightnessOut);
-
- // Append the new intervals to oilOut.
- oilOut->intervals.insert(oilOut->intervals.end(), arg.intervals.begin(),
- arg.intervals.end());
-
- // Union the appended intervals with the existing ones.
- unionize(oilOut);
+ OrderedIntervalList arg;
+ translate(expr, elt, index, &arg, tightnessOut);
+
+ // translate outputs arg in sorted order. intersectize assumes that its arguments are
+ // sorted.
+ intersectize(arg, oilOut);
+}
+
+// static
+void IndexBoundsBuilder::translateAndUnion(const MatchExpression* expr,
+ const BSONElement& elt,
+ const IndexEntry& index,
+ OrderedIntervalList* oilOut,
+ BoundsTightness* tightnessOut) {
+ OrderedIntervalList arg;
+ translate(expr, elt, index, &arg, tightnessOut);
+
+ // Append the new intervals to oilOut.
+ oilOut->intervals.insert(oilOut->intervals.end(), arg.intervals.begin(), arg.intervals.end());
+
+ // Union the appended intervals with the existing ones.
+ unionize(oilOut);
+}
+
+bool typeMatch(const BSONObj& obj) {
+ BSONObjIterator it(obj);
+ verify(it.more());
+ BSONElement first = it.next();
+ verify(it.more());
+ BSONElement second = it.next();
+ return first.canonicalType() == second.canonicalType();
+}
+
+// static
+void IndexBoundsBuilder::translate(const MatchExpression* expr,
+ const BSONElement& elt,
+ const IndexEntry& index,
+ OrderedIntervalList* oilOut,
+ BoundsTightness* tightnessOut) {
+ // We expect that the OIL we are constructing starts out empty.
+ invariant(oilOut->intervals.empty());
+
+ oilOut->name = elt.fieldName();
+
+ bool isHashed = false;
+ if (mongoutils::str::equals("hashed", elt.valuestrsafe())) {
+ isHashed = true;
}
- bool typeMatch(const BSONObj& obj) {
- BSONObjIterator it(obj);
- verify(it.more());
- BSONElement first = it.next();
- verify(it.more());
- BSONElement second = it.next();
- return first.canonicalType() == second.canonicalType();
+ if (isHashed) {
+ verify(MatchExpression::EQ == expr->matchType() ||
+ MatchExpression::MATCH_IN == expr->matchType());
}
- // static
- void IndexBoundsBuilder::translate(const MatchExpression* expr,
- const BSONElement& elt,
- const IndexEntry& index,
- OrderedIntervalList* oilOut,
- BoundsTightness* tightnessOut) {
- // We expect that the OIL we are constructing starts out empty.
- invariant(oilOut->intervals.empty());
+ if (MatchExpression::ELEM_MATCH_VALUE == expr->matchType()) {
+ OrderedIntervalList acc;
+ translate(expr->getChild(0), elt, index, &acc, tightnessOut);
- oilOut->name = elt.fieldName();
-
- bool isHashed = false;
- if (mongoutils::str::equals("hashed", elt.valuestrsafe())) {
- isHashed = true;
+ for (size_t i = 1; i < expr->numChildren(); ++i) {
+ OrderedIntervalList next;
+ BoundsTightness tightness;
+ translate(expr->getChild(i), elt, index, &next, &tightness);
+ intersectize(next, &acc);
}
- if (isHashed) {
- verify(MatchExpression::EQ == expr->matchType()
- || MatchExpression::MATCH_IN == expr->matchType());
+ for (size_t i = 0; i < acc.intervals.size(); ++i) {
+ oilOut->intervals.push_back(acc.intervals[i]);
}
- if (MatchExpression::ELEM_MATCH_VALUE == expr->matchType()) {
- OrderedIntervalList acc;
- translate(expr->getChild(0), elt, index, &acc, tightnessOut);
-
- for (size_t i = 1; i < expr->numChildren(); ++i) {
- OrderedIntervalList next;
- BoundsTightness tightness;
- translate(expr->getChild(i), elt, index, &next, &tightness);
- intersectize(next, &acc);
- }
-
- for (size_t i = 0; i < acc.intervals.size(); ++i) {
- oilOut->intervals.push_back(acc.intervals[i]);
- }
+ if (!oilOut->intervals.empty()) {
+ std::sort(oilOut->intervals.begin(), oilOut->intervals.end(), IntervalComparison);
+ }
- if (!oilOut->intervals.empty()) {
- std::sort(oilOut->intervals.begin(), oilOut->intervals.end(), IntervalComparison);
- }
+ // $elemMatch value requires an array.
+ // Scalars and directly nested objects are not matched with $elemMatch.
+ // We can't tell if a multi-key index key is derived from an array field.
+ // Therefore, a fetch is required.
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ } else if (MatchExpression::NOT == expr->matchType()) {
+ // A NOT is indexed by virtue of its child. If we're here then the NOT's child
+ // must be a kind of node for which we can index negations. It can't be things like
+ // $mod, $regex, or $type.
+ MatchExpression* child = expr->getChild(0);
+
+ // If we have a NOT -> EXISTS, we must handle separately.
+ if (MatchExpression::EXISTS == child->matchType()) {
+ // We should never try to use a sparse index for $exists:false.
+ invariant(!index.sparse);
+ BSONObjBuilder bob;
+ bob.appendNull("");
+ bob.appendNull("");
+ BSONObj dataObj = bob.obj();
+ oilOut->intervals.push_back(makeRangeInterval(dataObj, true, true));
- // $elemMatch value requires an array.
- // Scalars and directly nested objects are not matched with $elemMatch.
- // We can't tell if a multi-key index key is derived from an array field.
- // Therefore, a fetch is required.
*tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ return;
}
- else if (MatchExpression::NOT == expr->matchType()) {
- // A NOT is indexed by virtue of its child. If we're here then the NOT's child
- // must be a kind of node for which we can index negations. It can't be things like
- // $mod, $regex, or $type.
- MatchExpression* child = expr->getChild(0);
-
- // If we have a NOT -> EXISTS, we must handle separately.
- if (MatchExpression::EXISTS == child->matchType()) {
- // We should never try to use a sparse index for $exists:false.
- invariant(!index.sparse);
- BSONObjBuilder bob;
- bob.appendNull("");
- bob.appendNull("");
- BSONObj dataObj = bob.obj();
- oilOut->intervals.push_back(makeRangeInterval(dataObj, true, true));
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- return;
- }
+ translate(child, elt, index, oilOut, tightnessOut);
+ oilOut->complement();
- translate(child, elt, index, oilOut, tightnessOut);
- oilOut->complement();
-
- // If the index is multikey, it doesn't matter what the tightness
- // of the child is, we must return INEXACT_FETCH. Consider a multikey
- // index on 'a' with document {a: [1, 2, 3]} and query {a: {$ne: 3}}.
- // If we treated the bounds [MinKey, 3), (3, MaxKey] as exact, then
- // we would erroneously return the document!
- if (index.multikey) {
- *tightnessOut = INEXACT_FETCH;
- }
+ // If the index is multikey, it doesn't matter what the tightness
+ // of the child is, we must return INEXACT_FETCH. Consider a multikey
+ // index on 'a' with document {a: [1, 2, 3]} and query {a: {$ne: 3}}.
+ // If we treated the bounds [MinKey, 3), (3, MaxKey] as exact, then
+ // we would erroneously return the document!
+ if (index.multikey) {
+ *tightnessOut = INEXACT_FETCH;
}
- else if (MatchExpression::EXISTS == expr->matchType()) {
- // We only handle the {$exists:true} case, as {$exists:false}
- // will have been translated to {$not:{ $exists:true }}.
- //
- // Documents with a missing value are stored *as if* they were
- // explicitly given the value 'null'. Given:
- // X = { b : 1 }
- // Y = { a : null, b : 1 }
- // X and Y look identical from within a standard index on { a : 1 }.
- // HOWEVER a sparse index on { a : 1 } will treat X and Y differently,
- // storing Y and not storing X.
- //
- // We can safely use an index in the following cases:
- // {a:{ $exists:true }} - normal index helps, but we must still fetch
- // {a:{ $exists:true }} - sparse index is exact
- // {a:{ $exists:false }} - normal index requires a fetch
- // {a:{ $exists:false }} - sparse indexes cannot be used at all.
- //
- // Noted in SERVER-12869, in case this ever changes some day.
- if (index.sparse) {
- oilOut->intervals.push_back(allValues());
- // A sparse, compound index on { a:1, b:1 } will include entries
- // for all of the following documents:
- // { a:1 }, { b:1 }, { a:1, b:1 }
- // So we must use INEXACT bounds in this case.
- if ( 1 < index.keyPattern.nFields() ) {
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
- else {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- }
- }
- else {
- oilOut->intervals.push_back(allValues());
+ } else if (MatchExpression::EXISTS == expr->matchType()) {
+ // We only handle the {$exists:true} case, as {$exists:false}
+ // will have been translated to {$not:{ $exists:true }}.
+ //
+ // Documents with a missing value are stored *as if* they were
+ // explicitly given the value 'null'. Given:
+ // X = { b : 1 }
+ // Y = { a : null, b : 1 }
+ // X and Y look identical from within a standard index on { a : 1 }.
+ // HOWEVER a sparse index on { a : 1 } will treat X and Y differently,
+ // storing Y and not storing X.
+ //
+ // We can safely use an index in the following cases:
+ // {a:{ $exists:true }} - normal index helps, but we must still fetch
+ // {a:{ $exists:true }} - sparse index is exact
+ // {a:{ $exists:false }} - normal index requires a fetch
+ // {a:{ $exists:false }} - sparse indexes cannot be used at all.
+ //
+ // Noted in SERVER-12869, in case this ever changes some day.
+ if (index.sparse) {
+ oilOut->intervals.push_back(allValues());
+ // A sparse, compound index on { a:1, b:1 } will include entries
+ // for all of the following documents:
+ // { a:1 }, { b:1 }, { a:1, b:1 }
+ // So we must use INEXACT bounds in this case.
+ if (1 < index.keyPattern.nFields()) {
*tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
- }
- else if (MatchExpression::EQ == expr->matchType()) {
- const EqualityMatchExpression* node = static_cast<const EqualityMatchExpression*>(expr);
- translateEquality(node->getData(), isHashed, oilOut, tightnessOut);
- }
- else if (MatchExpression::LTE == expr->matchType()) {
- const LTEMatchExpression* node = static_cast<const LTEMatchExpression*>(expr);
- BSONElement dataElt = node->getData();
-
- // Everything is <= MaxKey.
- if (MaxKey == dataElt.type()) {
- oilOut->intervals.push_back(allValues());
+ } else {
*tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
-
- // Only NaN is <= NaN.
- if (std::isnan(dataElt.numberDouble())) {
- double nan = dataElt.numberDouble();
- oilOut->intervals.push_back(makePointInterval(nan));
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
-
- BSONObjBuilder bob;
- // Use -infinity for one-sided numerical bounds
- if (dataElt.isNumber()) {
- bob.appendNumber("", -std::numeric_limits<double>::infinity());
- }
- else {
- bob.appendMinForType("", dataElt.type());
- }
- bob.appendAs(dataElt, "");
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
- oilOut->intervals.push_back(makeRangeInterval(dataObj, typeMatch(dataObj), true));
-
- if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- }
- else {
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
}
+ } else {
+ oilOut->intervals.push_back(allValues());
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
}
- else if (MatchExpression::LT == expr->matchType()) {
- const LTMatchExpression* node = static_cast<const LTMatchExpression*>(expr);
- BSONElement dataElt = node->getData();
+ } else if (MatchExpression::EQ == expr->matchType()) {
+ const EqualityMatchExpression* node = static_cast<const EqualityMatchExpression*>(expr);
+ translateEquality(node->getData(), isHashed, oilOut, tightnessOut);
+ } else if (MatchExpression::LTE == expr->matchType()) {
+ const LTEMatchExpression* node = static_cast<const LTEMatchExpression*>(expr);
+ BSONElement dataElt = node->getData();
- // Everything is <= MaxKey.
- if (MaxKey == dataElt.type()) {
- oilOut->intervals.push_back(allValues());
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
-
- // Nothing is < NaN.
- if (std::isnan(dataElt.numberDouble())) {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
+ // Everything is <= MaxKey.
+ if (MaxKey == dataElt.type()) {
+ oilOut->intervals.push_back(allValues());
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
+ }
- BSONObjBuilder bob;
- // Use -infinity for one-sided numerical bounds
- if (dataElt.isNumber()) {
- bob.appendNumber("", -std::numeric_limits<double>::infinity());
- }
- else {
- bob.appendMinForType("", dataElt.type());
- }
- bob.appendAs(dataElt, "");
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
- Interval interval = makeRangeInterval(dataObj, typeMatch(dataObj), false);
+ // Only NaN is <= NaN.
+ if (std::isnan(dataElt.numberDouble())) {
+ double nan = dataElt.numberDouble();
+ oilOut->intervals.push_back(makePointInterval(nan));
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
+ }
- // If the operand to LT is equal to the lower bound X, the interval [X, X) is invalid
- // and should not be added to the bounds.
- if (!interval.isNull()) {
- oilOut->intervals.push_back(interval);
- }
+ BSONObjBuilder bob;
+ // Use -infinity for one-sided numerical bounds
+ if (dataElt.isNumber()) {
+ bob.appendNumber("", -std::numeric_limits<double>::infinity());
+ } else {
+ bob.appendMinForType("", dataElt.type());
+ }
+ bob.appendAs(dataElt, "");
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ oilOut->intervals.push_back(makeRangeInterval(dataObj, typeMatch(dataObj), true));
+
+ if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ } else {
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ }
+ } else if (MatchExpression::LT == expr->matchType()) {
+ const LTMatchExpression* node = static_cast<const LTMatchExpression*>(expr);
+ BSONElement dataElt = node->getData();
- if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- }
- else {
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
+ // Everything is <= MaxKey.
+ if (MaxKey == dataElt.type()) {
+ oilOut->intervals.push_back(allValues());
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
}
- else if (MatchExpression::GT == expr->matchType()) {
- const GTMatchExpression* node = static_cast<const GTMatchExpression*>(expr);
- BSONElement dataElt = node->getData();
- // Everything is > MinKey.
- if (MinKey == dataElt.type()) {
- oilOut->intervals.push_back(allValues());
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
+ // Nothing is < NaN.
+ if (std::isnan(dataElt.numberDouble())) {
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
+ }
- // Nothing is > NaN.
- if (std::isnan(dataElt.numberDouble())) {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
+ BSONObjBuilder bob;
+ // Use -infinity for one-sided numerical bounds
+ if (dataElt.isNumber()) {
+ bob.appendNumber("", -std::numeric_limits<double>::infinity());
+ } else {
+ bob.appendMinForType("", dataElt.type());
+ }
+ bob.appendAs(dataElt, "");
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ Interval interval = makeRangeInterval(dataObj, typeMatch(dataObj), false);
- BSONObjBuilder bob;
- bob.appendAs(node->getData(), "");
- if (dataElt.isNumber()) {
- bob.appendNumber("", std::numeric_limits<double>::infinity());
- }
- else {
- bob.appendMaxForType("", dataElt.type());
- }
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
- Interval interval = makeRangeInterval(dataObj, false, typeMatch(dataObj));
+ // If the operand to LT is equal to the lower bound X, the interval [X, X) is invalid
+ // and should not be added to the bounds.
+ if (!interval.isNull()) {
+ oilOut->intervals.push_back(interval);
+ }
- // If the operand to GT is equal to the upper bound X, the interval (X, X] is invalid
- // and should not be added to the bounds.
- if (!interval.isNull()) {
- oilOut->intervals.push_back(interval);
- }
+ if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ } else {
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ }
+ } else if (MatchExpression::GT == expr->matchType()) {
+ const GTMatchExpression* node = static_cast<const GTMatchExpression*>(expr);
+ BSONElement dataElt = node->getData();
- if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- }
- else {
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
+ // Everything is > MinKey.
+ if (MinKey == dataElt.type()) {
+ oilOut->intervals.push_back(allValues());
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
}
- else if (MatchExpression::GTE == expr->matchType()) {
- const GTEMatchExpression* node = static_cast<const GTEMatchExpression*>(expr);
- BSONElement dataElt = node->getData();
- // Everything is >= MinKey.
- if (MinKey == dataElt.type()) {
- oilOut->intervals.push_back(allValues());
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
+ // Nothing is > NaN.
+ if (std::isnan(dataElt.numberDouble())) {
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
+ }
- // Only NaN is >= NaN.
- if (std::isnan(dataElt.numberDouble())) {
- double nan = dataElt.numberDouble();
- oilOut->intervals.push_back(makePointInterval(nan));
- *tightnessOut = IndexBoundsBuilder::EXACT;
- return;
- }
+ BSONObjBuilder bob;
+ bob.appendAs(node->getData(), "");
+ if (dataElt.isNumber()) {
+ bob.appendNumber("", std::numeric_limits<double>::infinity());
+ } else {
+ bob.appendMaxForType("", dataElt.type());
+ }
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ Interval interval = makeRangeInterval(dataObj, false, typeMatch(dataObj));
- BSONObjBuilder bob;
- bob.appendAs(dataElt, "");
- if (dataElt.isNumber()) {
- bob.appendNumber("", std::numeric_limits<double>::infinity());
- }
- else {
- bob.appendMaxForType("", dataElt.type());
- }
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
+ // If the operand to GT is equal to the upper bound X, the interval (X, X] is invalid
+ // and should not be added to the bounds.
+ if (!interval.isNull()) {
+ oilOut->intervals.push_back(interval);
+ }
- oilOut->intervals.push_back(makeRangeInterval(dataObj, true, typeMatch(dataObj)));
- if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- }
- else {
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
+ if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ } else {
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
}
- else if (MatchExpression::REGEX == expr->matchType()) {
- const RegexMatchExpression* rme = static_cast<const RegexMatchExpression*>(expr);
- translateRegex(rme, oilOut, tightnessOut);
+ } else if (MatchExpression::GTE == expr->matchType()) {
+ const GTEMatchExpression* node = static_cast<const GTEMatchExpression*>(expr);
+ BSONElement dataElt = node->getData();
+
+ // Everything is >= MinKey.
+ if (MinKey == dataElt.type()) {
+ oilOut->intervals.push_back(allValues());
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
}
- else if (MatchExpression::MOD == expr->matchType()) {
- BSONObjBuilder bob;
- bob.appendMinForType("", NumberDouble);
- bob.appendMaxForType("", NumberDouble);
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
- oilOut->intervals.push_back(makeRangeInterval(dataObj, true, true));
- *tightnessOut = IndexBoundsBuilder::INEXACT_COVERED;
+
+ // Only NaN is >= NaN.
+ if (std::isnan(dataElt.numberDouble())) {
+ double nan = dataElt.numberDouble();
+ oilOut->intervals.push_back(makePointInterval(nan));
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ return;
}
- else if (MatchExpression::TYPE_OPERATOR == expr->matchType()) {
- const TypeMatchExpression* tme = static_cast<const TypeMatchExpression*>(expr);
- BSONObjBuilder bob;
- bob.appendMinForType("", tme->getData());
- bob.appendMaxForType("", tme->getData());
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
- oilOut->intervals.push_back(makeRangeInterval(dataObj, true, true));
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+
+ BSONObjBuilder bob;
+ bob.appendAs(dataElt, "");
+ if (dataElt.isNumber()) {
+ bob.appendNumber("", std::numeric_limits<double>::infinity());
+ } else {
+ bob.appendMaxForType("", dataElt.type());
}
- else if (MatchExpression::MATCH_IN == expr->matchType()) {
- const InMatchExpression* ime = static_cast<const InMatchExpression*>(expr);
- const ArrayFilterEntries& afr = ime->getData();
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ oilOut->intervals.push_back(makeRangeInterval(dataObj, true, typeMatch(dataObj)));
+ if (dataElt.isSimpleType() || dataElt.type() == BSONType::BinData) {
*tightnessOut = IndexBoundsBuilder::EXACT;
+ } else {
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ }
+ } else if (MatchExpression::REGEX == expr->matchType()) {
+ const RegexMatchExpression* rme = static_cast<const RegexMatchExpression*>(expr);
+ translateRegex(rme, oilOut, tightnessOut);
+ } else if (MatchExpression::MOD == expr->matchType()) {
+ BSONObjBuilder bob;
+ bob.appendMinForType("", NumberDouble);
+ bob.appendMaxForType("", NumberDouble);
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ oilOut->intervals.push_back(makeRangeInterval(dataObj, true, true));
+ *tightnessOut = IndexBoundsBuilder::INEXACT_COVERED;
+ } else if (MatchExpression::TYPE_OPERATOR == expr->matchType()) {
+ const TypeMatchExpression* tme = static_cast<const TypeMatchExpression*>(expr);
+ BSONObjBuilder bob;
+ bob.appendMinForType("", tme->getData());
+ bob.appendMaxForType("", tme->getData());
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ oilOut->intervals.push_back(makeRangeInterval(dataObj, true, true));
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ } else if (MatchExpression::MATCH_IN == expr->matchType()) {
+ const InMatchExpression* ime = static_cast<const InMatchExpression*>(expr);
+ const ArrayFilterEntries& afr = ime->getData();
- // Create our various intervals.
+ *tightnessOut = IndexBoundsBuilder::EXACT;
- IndexBoundsBuilder::BoundsTightness tightness;
- for (BSONElementSet::iterator it = afr.equalities().begin();
- it != afr.equalities().end(); ++it) {
- translateEquality(*it, isHashed, oilOut, &tightness);
- if (tightness != IndexBoundsBuilder::EXACT) {
- *tightnessOut = tightness;
- }
- }
+ // Create our various intervals.
- for (size_t i = 0; i < afr.numRegexes(); ++i) {
- translateRegex(afr.regex(i), oilOut, &tightness);
- if (tightness != IndexBoundsBuilder::EXACT) {
- *tightnessOut = tightness;
- }
+ IndexBoundsBuilder::BoundsTightness tightness;
+ for (BSONElementSet::iterator it = afr.equalities().begin(); it != afr.equalities().end();
+ ++it) {
+ translateEquality(*it, isHashed, oilOut, &tightness);
+ if (tightness != IndexBoundsBuilder::EXACT) {
+ *tightnessOut = tightness;
}
+ }
- if (afr.hasNull()) {
- // A null index key does not always match a null query value so we must fetch the
- // doc and run a full comparison. See SERVER-4529.
- // TODO: Do we already set the tightnessOut by calling translateEquality?
- *tightnessOut = INEXACT_FETCH;
+ for (size_t i = 0; i < afr.numRegexes(); ++i) {
+ translateRegex(afr.regex(i), oilOut, &tightness);
+ if (tightness != IndexBoundsBuilder::EXACT) {
+ *tightnessOut = tightness;
}
+ }
- if (afr.hasEmptyArray()) {
- // Empty arrays are indexed as undefined.
- BSONObjBuilder undefinedBob;
- undefinedBob.appendUndefined("");
- oilOut->intervals.push_back(makePointInterval(undefinedBob.obj()));
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
+ if (afr.hasNull()) {
+ // A null index key does not always match a null query value so we must fetch the
+ // doc and run a full comparison. See SERVER-4529.
+ // TODO: Do we already set the tightnessOut by calling translateEquality?
+ *tightnessOut = INEXACT_FETCH;
+ }
- unionize(oilOut);
+ if (afr.hasEmptyArray()) {
+ // Empty arrays are indexed as undefined.
+ BSONObjBuilder undefinedBob;
+ undefinedBob.appendUndefined("");
+ oilOut->intervals.push_back(makePointInterval(undefinedBob.obj()));
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
}
- else if (MatchExpression::GEO == expr->matchType()) {
- const GeoMatchExpression* gme = static_cast<const GeoMatchExpression*>(expr);
+ unionize(oilOut);
+ } else if (MatchExpression::GEO == expr->matchType()) {
+ const GeoMatchExpression* gme = static_cast<const GeoMatchExpression*>(expr);
- if (mongoutils::str::equals("2dsphere", elt.valuestrsafe())) {
- verify(gme->getGeoExpression().getGeometry().hasS2Region());
- const S2Region& region = gme->getGeoExpression().getGeometry().getS2Region();
- ExpressionMapping::cover2dsphere(region, index.infoObj, oilOut);
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
- else if (mongoutils::str::equals("2d", elt.valuestrsafe())) {
- verify(gme->getGeoExpression().getGeometry().hasR2Region());
- const R2Region& region = gme->getGeoExpression().getGeometry().getR2Region();
+ if (mongoutils::str::equals("2dsphere", elt.valuestrsafe())) {
+ verify(gme->getGeoExpression().getGeometry().hasS2Region());
+ const S2Region& region = gme->getGeoExpression().getGeometry().getS2Region();
+ ExpressionMapping::cover2dsphere(region, index.infoObj, oilOut);
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ } else if (mongoutils::str::equals("2d", elt.valuestrsafe())) {
+ verify(gme->getGeoExpression().getGeometry().hasR2Region());
+ const R2Region& region = gme->getGeoExpression().getGeometry().getR2Region();
- ExpressionMapping::cover2d(region,
- index.infoObj,
- internalGeoPredicateQuery2DMaxCoveringCells,
- oilOut);
+ ExpressionMapping::cover2d(
+ region, index.infoObj, internalGeoPredicateQuery2DMaxCoveringCells, oilOut);
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
- else {
- warning() << "Planner error trying to build geo bounds for " << elt.toString()
- << " index element.";
- verify(0);
- }
- }
- else {
- warning() << "Planner error, trying to build bounds for expression: "
- << expr->toString() << endl;
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ } else {
+ warning() << "Planner error trying to build geo bounds for " << elt.toString()
+ << " index element.";
verify(0);
}
+ } else {
+ warning() << "Planner error, trying to build bounds for expression: " << expr->toString()
+ << endl;
+ verify(0);
}
-
- // static
- Interval IndexBoundsBuilder::makeRangeInterval(const BSONObj& obj, bool startInclusive,
- bool endInclusive) {
- Interval ret;
- ret._intervalData = obj;
- ret.startInclusive = startInclusive;
- ret.endInclusive = endInclusive;
- BSONObjIterator it(obj);
- verify(it.more());
- ret.start = it.next();
- verify(it.more());
- ret.end = it.next();
- return ret;
- }
-
- // static
- void IndexBoundsBuilder::intersectize(const OrderedIntervalList& arg,
- OrderedIntervalList* oilOut) {
- verify(arg.name == oilOut->name);
-
- size_t argidx = 0;
- const vector<Interval>& argiv = arg.intervals;
-
- size_t ividx = 0;
- vector<Interval>& iv = oilOut->intervals;
-
- vector<Interval> result;
-
- while (argidx < argiv.size() && ividx < iv.size()) {
- Interval::IntervalComparison cmp = argiv[argidx].compare(iv[ividx]);
-
- verify(Interval::INTERVAL_UNKNOWN != cmp);
-
- if (cmp == Interval::INTERVAL_PRECEDES
- || cmp == Interval::INTERVAL_PRECEDES_COULD_UNION) {
- // argiv is before iv. move argiv forward.
+}
+
+// static
+Interval IndexBoundsBuilder::makeRangeInterval(const BSONObj& obj,
+ bool startInclusive,
+ bool endInclusive) {
+ Interval ret;
+ ret._intervalData = obj;
+ ret.startInclusive = startInclusive;
+ ret.endInclusive = endInclusive;
+ BSONObjIterator it(obj);
+ verify(it.more());
+ ret.start = it.next();
+ verify(it.more());
+ ret.end = it.next();
+ return ret;
+}
+
+// static
+void IndexBoundsBuilder::intersectize(const OrderedIntervalList& arg, OrderedIntervalList* oilOut) {
+ verify(arg.name == oilOut->name);
+
+ size_t argidx = 0;
+ const vector<Interval>& argiv = arg.intervals;
+
+ size_t ividx = 0;
+ vector<Interval>& iv = oilOut->intervals;
+
+ vector<Interval> result;
+
+ while (argidx < argiv.size() && ividx < iv.size()) {
+ Interval::IntervalComparison cmp = argiv[argidx].compare(iv[ividx]);
+
+ verify(Interval::INTERVAL_UNKNOWN != cmp);
+
+ if (cmp == Interval::INTERVAL_PRECEDES || cmp == Interval::INTERVAL_PRECEDES_COULD_UNION) {
+ // argiv is before iv. move argiv forward.
+ ++argidx;
+ } else if (cmp == Interval::INTERVAL_SUCCEEDS) {
+ // iv is before argiv. move iv forward.
+ ++ividx;
+ } else {
+ // argiv[argidx] (cmpresults) iv[ividx]
+ Interval newInt = argiv[argidx];
+ newInt.intersect(iv[ividx], cmp);
+ result.push_back(newInt);
+
+ if (Interval::INTERVAL_EQUALS == cmp) {
++argidx;
- }
- else if (cmp == Interval::INTERVAL_SUCCEEDS) {
- // iv is before argiv. move iv forward.
++ividx;
- }
- else {
- // argiv[argidx] (cmpresults) iv[ividx]
- Interval newInt = argiv[argidx];
- newInt.intersect(iv[ividx], cmp);
- result.push_back(newInt);
-
- if (Interval::INTERVAL_EQUALS == cmp) {
- ++argidx;
- ++ividx;
- }
- else if (Interval::INTERVAL_WITHIN == cmp) {
- ++argidx;
- }
- else if (Interval::INTERVAL_CONTAINS == cmp) {
- ++ividx;
- }
- else if (Interval::INTERVAL_OVERLAPS_BEFORE == cmp) {
- ++argidx;
- }
- else if (Interval::INTERVAL_OVERLAPS_AFTER == cmp) {
- ++ividx;
- }
- else {
- verify(0);
- }
+ } else if (Interval::INTERVAL_WITHIN == cmp) {
+ ++argidx;
+ } else if (Interval::INTERVAL_CONTAINS == cmp) {
+ ++ividx;
+ } else if (Interval::INTERVAL_OVERLAPS_BEFORE == cmp) {
+ ++argidx;
+ } else if (Interval::INTERVAL_OVERLAPS_AFTER == cmp) {
+ ++ividx;
+ } else {
+ verify(0);
}
}
-
- oilOut->intervals.swap(result);
}
- // static
- void IndexBoundsBuilder::unionize(OrderedIntervalList* oilOut) {
- vector<Interval>& iv = oilOut->intervals;
-
- // This can happen.
- if (iv.empty()) { return; }
+ oilOut->intervals.swap(result);
+}
- // Step 1: sort.
- std::sort(iv.begin(), iv.end(), IntervalComparison);
+// static
+void IndexBoundsBuilder::unionize(OrderedIntervalList* oilOut) {
+ vector<Interval>& iv = oilOut->intervals;
- // Step 2: Walk through and merge.
- size_t i = 0;
- while (i < iv.size() - 1) {
- // Compare i with i + 1.
- Interval::IntervalComparison cmp = iv[i].compare(iv[i + 1]);
-
- // This means our sort didn't work.
- verify(Interval::INTERVAL_SUCCEEDS != cmp);
-
- // Intervals are correctly ordered.
- if (Interval::INTERVAL_PRECEDES == cmp) {
- // We can move to the next pair.
- ++i;
- }
- else if (Interval::INTERVAL_EQUALS == cmp || Interval::INTERVAL_WITHIN == cmp) {
- // Interval 'i' is equal to i+1, or is contained within i+1.
- // Remove interval i and don't move to the next value of 'i'.
- iv.erase(iv.begin() + i);
- }
- else if (Interval::INTERVAL_CONTAINS == cmp) {
- // Interval 'i' contains i+1, remove i+1 and don't move to the next value of 'i'.
- iv.erase(iv.begin() + i + 1);
- }
- else if (Interval::INTERVAL_OVERLAPS_BEFORE == cmp
- || Interval::INTERVAL_PRECEDES_COULD_UNION == cmp) {
- // We want to merge intervals i and i+1.
- // Interval 'i' starts before interval 'i+1'.
- BSONObjBuilder bob;
- bob.appendAs(iv[i].start, "");
- bob.appendAs(iv[i + 1].end, "");
- BSONObj data = bob.obj();
- bool startInclusive = iv[i].startInclusive;
- bool endInclusive = iv[i + 1].endInclusive;
- iv.erase(iv.begin() + i);
- // iv[i] is now the former iv[i + 1]
- iv[i] = makeRangeInterval(data, startInclusive, endInclusive);
- // Don't increment 'i'.
- }
- }
- }
-
- // static
- Interval IndexBoundsBuilder::makeRangeInterval(const string& start, const string& end,
- bool startInclusive, bool endInclusive) {
- BSONObjBuilder bob;
- bob.append("", start);
- bob.append("", end);
- return makeRangeInterval(bob.obj(), startInclusive, endInclusive);
+ // This can happen.
+ if (iv.empty()) {
+ return;
}
- // static
- Interval IndexBoundsBuilder::makePointInterval(const BSONObj& obj) {
- Interval ret;
- ret._intervalData = obj;
- ret.startInclusive = ret.endInclusive = true;
- ret.start = ret.end = obj.firstElement();
- return ret;
- }
-
- // static
- Interval IndexBoundsBuilder::makePointInterval(const string& str) {
- BSONObjBuilder bob;
- bob.append("", str);
- return makePointInterval(bob.obj());
- }
-
- // static
- Interval IndexBoundsBuilder::makePointInterval(double d) {
- BSONObjBuilder bob;
- bob.append("", d);
- return makePointInterval(bob.obj());
- }
-
- // static
- BSONObj IndexBoundsBuilder::objFromElement(const BSONElement& elt) {
- BSONObjBuilder bob;
- bob.appendAs(elt, "");
- return bob.obj();
- }
-
- // static
- void IndexBoundsBuilder::reverseInterval(Interval* ival) {
- BSONElement tmp = ival->start;
- ival->start = ival->end;
- ival->end = tmp;
-
- bool tmpInc = ival->startInclusive;
- ival->startInclusive = ival->endInclusive;
- ival->endInclusive = tmpInc;
- }
-
- // static
- void IndexBoundsBuilder::translateRegex(const RegexMatchExpression* rme,
- OrderedIntervalList* oilOut, BoundsTightness* tightnessOut) {
-
- const string start = simpleRegex(rme->getString().c_str(), rme->getFlags().c_str(), tightnessOut);
-
- // Note that 'tightnessOut' is set by simpleRegex above.
- if (!start.empty()) {
- string end = start;
- end[end.size() - 1]++;
- oilOut->intervals.push_back(makeRangeInterval(start, end, true, false));
- }
- else {
+ // Step 1: sort.
+ std::sort(iv.begin(), iv.end(), IntervalComparison);
+
+ // Step 2: Walk through and merge.
+ size_t i = 0;
+ while (i < iv.size() - 1) {
+ // Compare i with i + 1.
+ Interval::IntervalComparison cmp = iv[i].compare(iv[i + 1]);
+
+ // This means our sort didn't work.
+ verify(Interval::INTERVAL_SUCCEEDS != cmp);
+
+ // Intervals are correctly ordered.
+ if (Interval::INTERVAL_PRECEDES == cmp) {
+ // We can move to the next pair.
+ ++i;
+ } else if (Interval::INTERVAL_EQUALS == cmp || Interval::INTERVAL_WITHIN == cmp) {
+ // Interval 'i' is equal to i+1, or is contained within i+1.
+ // Remove interval i and don't move to the next value of 'i'.
+ iv.erase(iv.begin() + i);
+ } else if (Interval::INTERVAL_CONTAINS == cmp) {
+ // Interval 'i' contains i+1, remove i+1 and don't move to the next value of 'i'.
+ iv.erase(iv.begin() + i + 1);
+ } else if (Interval::INTERVAL_OVERLAPS_BEFORE == cmp ||
+ Interval::INTERVAL_PRECEDES_COULD_UNION == cmp) {
+ // We want to merge intervals i and i+1.
+ // Interval 'i' starts before interval 'i+1'.
BSONObjBuilder bob;
- bob.appendMinForType("", String);
- bob.appendMaxForType("", String);
- BSONObj dataObj = bob.obj();
- verify(dataObj.isOwned());
- oilOut->intervals.push_back(makeRangeInterval(dataObj, true, false));
+ bob.appendAs(iv[i].start, "");
+ bob.appendAs(iv[i + 1].end, "");
+ BSONObj data = bob.obj();
+ bool startInclusive = iv[i].startInclusive;
+ bool endInclusive = iv[i + 1].endInclusive;
+ iv.erase(iv.begin() + i);
+ // iv[i] is now the former iv[i + 1]
+ iv[i] = makeRangeInterval(data, startInclusive, endInclusive);
+ // Don't increment 'i'.
}
-
- // Regexes are after strings.
+ }
+}
+
+// static
+Interval IndexBoundsBuilder::makeRangeInterval(const string& start,
+ const string& end,
+ bool startInclusive,
+ bool endInclusive) {
+ BSONObjBuilder bob;
+ bob.append("", start);
+ bob.append("", end);
+ return makeRangeInterval(bob.obj(), startInclusive, endInclusive);
+}
+
+// static
+Interval IndexBoundsBuilder::makePointInterval(const BSONObj& obj) {
+ Interval ret;
+ ret._intervalData = obj;
+ ret.startInclusive = ret.endInclusive = true;
+ ret.start = ret.end = obj.firstElement();
+ return ret;
+}
+
+// static
+Interval IndexBoundsBuilder::makePointInterval(const string& str) {
+ BSONObjBuilder bob;
+ bob.append("", str);
+ return makePointInterval(bob.obj());
+}
+
+// static
+Interval IndexBoundsBuilder::makePointInterval(double d) {
+ BSONObjBuilder bob;
+ bob.append("", d);
+ return makePointInterval(bob.obj());
+}
+
+// static
+BSONObj IndexBoundsBuilder::objFromElement(const BSONElement& elt) {
+ BSONObjBuilder bob;
+ bob.appendAs(elt, "");
+ return bob.obj();
+}
+
+// static
+void IndexBoundsBuilder::reverseInterval(Interval* ival) {
+ BSONElement tmp = ival->start;
+ ival->start = ival->end;
+ ival->end = tmp;
+
+ bool tmpInc = ival->startInclusive;
+ ival->startInclusive = ival->endInclusive;
+ ival->endInclusive = tmpInc;
+}
+
+// static
+void IndexBoundsBuilder::translateRegex(const RegexMatchExpression* rme,
+ OrderedIntervalList* oilOut,
+ BoundsTightness* tightnessOut) {
+ const string start =
+ simpleRegex(rme->getString().c_str(), rme->getFlags().c_str(), tightnessOut);
+
+ // Note that 'tightnessOut' is set by simpleRegex above.
+ if (!start.empty()) {
+ string end = start;
+ end[end.size() - 1]++;
+ oilOut->intervals.push_back(makeRangeInterval(start, end, true, false));
+ } else {
BSONObjBuilder bob;
- bob.appendRegex("", rme->getString(), rme->getFlags());
- oilOut->intervals.push_back(makePointInterval(bob.obj()));
+ bob.appendMinForType("", String);
+ bob.appendMaxForType("", String);
+ BSONObj dataObj = bob.obj();
+ verify(dataObj.isOwned());
+ oilOut->intervals.push_back(makeRangeInterval(dataObj, true, false));
}
- // static
- void IndexBoundsBuilder::translateEquality(const BSONElement& data, bool isHashed,
- OrderedIntervalList* oil, BoundsTightness* tightnessOut) {
- // We have to copy the data out of the parse tree and stuff it into the index
- // bounds. BSONValue will be useful here.
- if (Array != data.type()) {
- BSONObj dataObj;
- if (isHashed) {
- dataObj = ExpressionMapping::hash(data);
- }
- else {
- dataObj = objFromElement(data);
- }
+ // Regexes are after strings.
+ BSONObjBuilder bob;
+ bob.appendRegex("", rme->getString(), rme->getFlags());
+ oilOut->intervals.push_back(makePointInterval(bob.obj()));
+}
- verify(dataObj.isOwned());
- oil->intervals.push_back(makePointInterval(dataObj));
-
- if (dataObj.firstElement().isNull() || isHashed) {
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
- }
- else {
- *tightnessOut = IndexBoundsBuilder::EXACT;
- }
- return;
+// static
+void IndexBoundsBuilder::translateEquality(const BSONElement& data,
+ bool isHashed,
+ OrderedIntervalList* oil,
+ BoundsTightness* tightnessOut) {
+ // We have to copy the data out of the parse tree and stuff it into the index
+ // bounds. BSONValue will be useful here.
+ if (Array != data.type()) {
+ BSONObj dataObj;
+ if (isHashed) {
+ dataObj = ExpressionMapping::hash(data);
+ } else {
+ dataObj = objFromElement(data);
}
- // If we're here, Array == data.type().
- //
- // Using arrays with hashed indices is currently not supported, so we don't have to worry
- // about that case.
- //
- // Arrays are indexed by either:
- //
- // 1. the first element if there is one. Note that using the first is arbitrary; we could
- // just as well use any array element.). If the query is {a: [1, 2, 3]}, for example, then
- // using the bounds [1, 1] for the multikey index will pick up every document containing the
- // array [1, 2, 3].
- //
- // 2. undefined if the array is empty.
- //
- // Also, arrays are indexed by:
- //
- // 3. the full array if it's inside of another array. We check for this so that the query
- // {a: [1, 2, 3]} will match documents like {a: [[1, 2, 3], 4, 5]}.
-
- // Case 3.
- oil->intervals.push_back(makePointInterval(objFromElement(data)));
+ verify(dataObj.isOwned());
+ oil->intervals.push_back(makePointInterval(dataObj));
- if (data.Obj().isEmpty()) {
- // Case 2.
- BSONObjBuilder undefinedBob;
- undefinedBob.appendUndefined("");
- oil->intervals.push_back(makePointInterval(undefinedBob.obj()));
- }
- else {
- // Case 1.
- BSONElement firstEl = data.Obj().firstElement();
- oil->intervals.push_back(makePointInterval(objFromElement(firstEl)));
+ if (dataObj.firstElement().isNull() || isHashed) {
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ } else {
+ *tightnessOut = IndexBoundsBuilder::EXACT;
}
+ return;
+ }
- std::sort(oil->intervals.begin(), oil->intervals.end(), IntervalComparison);
- *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+ // If we're here, Array == data.type().
+ //
+ // Using arrays with hashed indices is currently not supported, so we don't have to worry
+ // about that case.
+ //
+ // Arrays are indexed by either:
+ //
+ // 1. the first element if there is one. Note that using the first is arbitrary; we could
+ // just as well use any array element.). If the query is {a: [1, 2, 3]}, for example, then
+ // using the bounds [1, 1] for the multikey index will pick up every document containing the
+ // array [1, 2, 3].
+ //
+ // 2. undefined if the array is empty.
+ //
+ // Also, arrays are indexed by:
+ //
+ // 3. the full array if it's inside of another array. We check for this so that the query
+ // {a: [1, 2, 3]} will match documents like {a: [[1, 2, 3], 4, 5]}.
+
+ // Case 3.
+ oil->intervals.push_back(makePointInterval(objFromElement(data)));
+
+ if (data.Obj().isEmpty()) {
+ // Case 2.
+ BSONObjBuilder undefinedBob;
+ undefinedBob.appendUndefined("");
+ oil->intervals.push_back(makePointInterval(undefinedBob.obj()));
+ } else {
+ // Case 1.
+ BSONElement firstEl = data.Obj().firstElement();
+ oil->intervals.push_back(makePointInterval(objFromElement(firstEl)));
}
- // static
- void IndexBoundsBuilder::allValuesBounds(const BSONObj& keyPattern, IndexBounds* bounds) {
- bounds->fields.resize(keyPattern.nFields());
+ std::sort(oil->intervals.begin(), oil->intervals.end(), IntervalComparison);
+ *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH;
+}
- BSONObjIterator it(keyPattern);
- int field = 0;
- while (it.more()) {
- IndexBoundsBuilder::allValuesForField(it.next(), &bounds->fields[field]);
- ++field;
- }
+// static
+void IndexBoundsBuilder::allValuesBounds(const BSONObj& keyPattern, IndexBounds* bounds) {
+ bounds->fields.resize(keyPattern.nFields());
- alignBounds(bounds, keyPattern);
+ BSONObjIterator it(keyPattern);
+ int field = 0;
+ while (it.more()) {
+ IndexBoundsBuilder::allValuesForField(it.next(), &bounds->fields[field]);
+ ++field;
}
- // static
- void IndexBoundsBuilder::alignBounds(IndexBounds* bounds, const BSONObj& kp, int scanDir) {
- BSONObjIterator it(kp);
- size_t oilIdx = 0;
- while (it.more()) {
- BSONElement elt = it.next();
- int direction = (elt.numberInt() >= 0) ? 1 : -1;
- direction *= scanDir;
- if (-1 == direction) {
- vector<Interval>& iv = bounds->fields[oilIdx].intervals;
- // Step 1: reverse the list.
- std::reverse(iv.begin(), iv.end());
- // Step 2: reverse each interval.
- for (size_t i = 0; i < iv.size(); ++i) {
- iv[i].reverse();
- }
- }
- ++oilIdx;
- }
-
- if (!bounds->isValidFor(kp, scanDir)) {
- log() << "INVALID BOUNDS: " << bounds->toString() << endl
- << "kp = " << kp.toString() << endl
- << "scanDir = " << scanDir << endl;
- verify(0);
- }
+ alignBounds(bounds, keyPattern);
+}
+
+// static
+void IndexBoundsBuilder::alignBounds(IndexBounds* bounds, const BSONObj& kp, int scanDir) {
+ BSONObjIterator it(kp);
+ size_t oilIdx = 0;
+ while (it.more()) {
+ BSONElement elt = it.next();
+ int direction = (elt.numberInt() >= 0) ? 1 : -1;
+ direction *= scanDir;
+ if (-1 == direction) {
+ vector<Interval>& iv = bounds->fields[oilIdx].intervals;
+ // Step 1: reverse the list.
+ std::reverse(iv.begin(), iv.end());
+ // Step 2: reverse each interval.
+ for (size_t i = 0; i < iv.size(); ++i) {
+ iv[i].reverse();
+ }
+ }
+ ++oilIdx;
}
- // static
- bool IndexBoundsBuilder::isSingleInterval(const IndexBounds& bounds,
- BSONObj* startKey,
- bool* startKeyInclusive,
- BSONObj* endKey,
- bool* endKeyInclusive) {
- // We build our start/end keys as we go.
- BSONObjBuilder startBob;
- BSONObjBuilder endBob;
-
- // The start and end keys are inclusive unless we have a non-point interval, in which case
- // we take the inclusivity from there.
- *startKeyInclusive = true;
- *endKeyInclusive = true;
-
- size_t fieldNo = 0;
-
- // First, we skip over point intervals.
- for (; fieldNo < bounds.fields.size(); ++fieldNo) {
- const OrderedIntervalList& oil = bounds.fields[fieldNo];
- // A point interval requires just one interval...
- if (1 != oil.intervals.size()) {
- break;
- }
- if (!oil.intervals[0].isPoint()) {
- break;
- }
- // Since it's a point, start == end.
- startBob.append(oil.intervals[0].start);
- endBob.append(oil.intervals[0].end);
- }
-
- if (fieldNo >= bounds.fields.size()) {
- // All our intervals are points. We count for all values of one field.
- *startKey = startBob.obj();
- *endKey = endBob.obj();
- return true;
- }
-
- // After point intervals we can have exactly one non-point interval.
- const OrderedIntervalList& nonPoint = bounds.fields[fieldNo];
- if (1 != nonPoint.intervals.size()) {
- return false;
- }
+ if (!bounds->isValidFor(kp, scanDir)) {
+ log() << "INVALID BOUNDS: " << bounds->toString() << endl
+ << "kp = " << kp.toString() << endl
+ << "scanDir = " << scanDir << endl;
+ verify(0);
+ }
+}
+
+// static
+bool IndexBoundsBuilder::isSingleInterval(const IndexBounds& bounds,
+ BSONObj* startKey,
+ bool* startKeyInclusive,
+ BSONObj* endKey,
+ bool* endKeyInclusive) {
+ // We build our start/end keys as we go.
+ BSONObjBuilder startBob;
+ BSONObjBuilder endBob;
+
+ // The start and end keys are inclusive unless we have a non-point interval, in which case
+ // we take the inclusivity from there.
+ *startKeyInclusive = true;
+ *endKeyInclusive = true;
+
+ size_t fieldNo = 0;
+
+ // First, we skip over point intervals.
+ for (; fieldNo < bounds.fields.size(); ++fieldNo) {
+ const OrderedIntervalList& oil = bounds.fields[fieldNo];
+ // A point interval requires just one interval...
+ if (1 != oil.intervals.size()) {
+ break;
+ }
+ if (!oil.intervals[0].isPoint()) {
+ break;
+ }
+ // Since it's a point, start == end.
+ startBob.append(oil.intervals[0].start);
+ endBob.append(oil.intervals[0].end);
+ }
- // Add the non-point interval to our builder and set the inclusivity from it.
- startBob.append(nonPoint.intervals[0].start);
- *startKeyInclusive = nonPoint.intervals[0].startInclusive;
- endBob.append(nonPoint.intervals[0].end);
- *endKeyInclusive = nonPoint.intervals[0].endInclusive;
-
- ++fieldNo;
-
- // Get some "all values" intervals for comparison's sake.
- // TODO: make static?
- Interval minMax = IndexBoundsBuilder::allValues();
- Interval maxMin = minMax;
- maxMin.reverse();
-
- // And after the non-point interval we can have any number of "all values" intervals.
- for (; fieldNo < bounds.fields.size(); ++fieldNo) {
- const OrderedIntervalList& oil = bounds.fields[fieldNo];
- // "All Values" is just one point.
- if (1 != oil.intervals.size()) {
- break;
- }
+ if (fieldNo >= bounds.fields.size()) {
+ // All our intervals are points. We count for all values of one field.
+ *startKey = startBob.obj();
+ *endKey = endBob.obj();
+ return true;
+ }
- // Must be min->max or max->min.
- if (oil.intervals[0].equals(minMax)) {
- // As an example for the logic below, consider the index {a:1, b:1} and a count for
- // {a: {$gt: 2}}. Our start key isn't inclusive (as it's $gt: 2) and looks like
- // {"":2} so far. If we move to the key greater than {"":2, "": MaxKey} we will get
- // the first value of 'a' that is greater than 2.
- if (!*startKeyInclusive) {
- startBob.appendMaxKey("");
- }
- else {
- // In this case, consider the index {a:1, b:1} and a count for {a:{$gte: 2}}.
- // We want to look at all values where a is 2, so our start key is {"":2,
- // "":MinKey}.
- startBob.appendMinKey("");
- }
+ // After point intervals we can have exactly one non-point interval.
+ const OrderedIntervalList& nonPoint = bounds.fields[fieldNo];
+ if (1 != nonPoint.intervals.size()) {
+ return false;
+ }
- // Same deal as above. Consider the index {a:1, b:1} and a count for {a: {$lt: 2}}.
- // Our end key isn't inclusive as ($lt: 2) and looks like {"":2} so far. We can't
- // look at any values where a is 2 so we have to stop at {"":2, "": MinKey} as
- // that's the smallest key where a is still 2.
- if (!*endKeyInclusive) {
- endBob.appendMinKey("");
- }
- else {
- endBob.appendMaxKey("");
- }
- }
- else if (oil.intervals[0].equals(maxMin)) {
- // The reasoning here is the same as above but with the directions reversed.
- if (!*startKeyInclusive) {
- startBob.appendMinKey("");
- }
- else {
- startBob.appendMaxKey("");
- }
- if (!*endKeyInclusive) {
- endBob.appendMaxKey("");
- }
- else {
- endBob.appendMinKey("");
- }
- }
- else {
- // No dice.
- break;
- }
+ // Add the non-point interval to our builder and set the inclusivity from it.
+ startBob.append(nonPoint.intervals[0].start);
+ *startKeyInclusive = nonPoint.intervals[0].startInclusive;
+ endBob.append(nonPoint.intervals[0].end);
+ *endKeyInclusive = nonPoint.intervals[0].endInclusive;
+
+ ++fieldNo;
+
+ // Get some "all values" intervals for comparison's sake.
+ // TODO: make static?
+ Interval minMax = IndexBoundsBuilder::allValues();
+ Interval maxMin = minMax;
+ maxMin.reverse();
+
+ // And after the non-point interval we can have any number of "all values" intervals.
+ for (; fieldNo < bounds.fields.size(); ++fieldNo) {
+ const OrderedIntervalList& oil = bounds.fields[fieldNo];
+ // "All Values" is just one point.
+ if (1 != oil.intervals.size()) {
+ break;
+ }
+
+ // Must be min->max or max->min.
+ if (oil.intervals[0].equals(minMax)) {
+ // As an example for the logic below, consider the index {a:1, b:1} and a count for
+ // {a: {$gt: 2}}. Our start key isn't inclusive (as it's $gt: 2) and looks like
+ // {"":2} so far. If we move to the key greater than {"":2, "": MaxKey} we will get
+ // the first value of 'a' that is greater than 2.
+ if (!*startKeyInclusive) {
+ startBob.appendMaxKey("");
+ } else {
+ // In this case, consider the index {a:1, b:1} and a count for {a:{$gte: 2}}.
+ // We want to look at all values where a is 2, so our start key is {"":2,
+ // "":MinKey}.
+ startBob.appendMinKey("");
+ }
+
+ // Same deal as above. Consider the index {a:1, b:1} and a count for {a: {$lt: 2}}.
+ // Our end key isn't inclusive as ($lt: 2) and looks like {"":2} so far. We can't
+ // look at any values where a is 2 so we have to stop at {"":2, "": MinKey} as
+ // that's the smallest key where a is still 2.
+ if (!*endKeyInclusive) {
+ endBob.appendMinKey("");
+ } else {
+ endBob.appendMaxKey("");
+ }
+ } else if (oil.intervals[0].equals(maxMin)) {
+ // The reasoning here is the same as above but with the directions reversed.
+ if (!*startKeyInclusive) {
+ startBob.appendMinKey("");
+ } else {
+ startBob.appendMaxKey("");
+ }
+ if (!*endKeyInclusive) {
+ endBob.appendMaxKey("");
+ } else {
+ endBob.appendMinKey("");
+ }
+ } else {
+ // No dice.
+ break;
}
+ }
- if (fieldNo >= bounds.fields.size()) {
- *startKey = startBob.obj();
- *endKey = endBob.obj();
- return true;
- }
- else {
- return false;
- }
+ if (fieldNo >= bounds.fields.size()) {
+ *startKey = startBob.obj();
+ *endKey = endBob.obj();
+ return true;
+ } else {
+ return false;
}
+}
} // namespace mongo
diff --git a/src/mongo/db/query/index_bounds_builder.h b/src/mongo/db/query/index_bounds_builder.h
index 094e8a4dbc1..785f78feb6d 100644
--- a/src/mongo/db/query/index_bounds_builder.h
+++ b/src/mongo/db/query/index_bounds_builder.h
@@ -36,161 +36,158 @@
namespace mongo {
+/**
+ * Translates expressions over fields into bounds on an index.
+ */
+class IndexBoundsBuilder {
+public:
+ /**
+ * Describes various degrees of precision with which predicates can be evaluated based
+ * on the index bounds.
+ *
+ * The integer values of the enum are significant, and are assigned in order of
+ * increasing tightness. These values are used when we need to do comparison between two
+ * BoundsTightness values. Such comparisons can answer questions such as "Does predicate
+ * X have tighter or looser bounds than predicate Y?".
+ */
+ enum BoundsTightness {
+ // Index bounds are inexact, and a fetch is required.
+ INEXACT_FETCH = 0,
+
+ // Index bounds are inexact, but no fetch is required
+ INEXACT_COVERED = 1,
+
+ // Index bounds are exact.
+ EXACT = 2
+ };
+
+ /**
+ * Populate the provided O.I.L. with one interval goes from MinKey to MaxKey (or vice-versa
+ * depending on the index direction).
+ */
+ static void allValuesForField(const BSONElement& elt, OrderedIntervalList* out);
+
/**
- * Translates expressions over fields into bounds on an index.
+ * Turn the MatchExpression in 'expr' into a set of index bounds. The field that 'expr' is
+ * concerned with is indexed according to the keypattern element 'elt' from index 'index'.
+ *
+ * If 'expr' is elemMatch, the index tag is affixed to a child.
+ *
+ * The expression must be a predicate over one field. That is, expr->isLeaf() or
+ * expr->isArray() must be true, and expr->isLogical() must be false.
*/
- class IndexBoundsBuilder {
- public:
- /**
- * Describes various degrees of precision with which predicates can be evaluated based
- * on the index bounds.
- *
- * The integer values of the enum are significant, and are assigned in order of
- * increasing tightness. These values are used when we need to do comparison between two
- * BoundsTightness values. Such comparisons can answer questions such as "Does predicate
- * X have tighter or looser bounds than predicate Y?".
- */
- enum BoundsTightness {
- // Index bounds are inexact, and a fetch is required.
- INEXACT_FETCH = 0,
-
- // Index bounds are inexact, but no fetch is required
- INEXACT_COVERED = 1,
-
- // Index bounds are exact.
- EXACT = 2
- };
-
- /**
- * Populate the provided O.I.L. with one interval goes from MinKey to MaxKey (or vice-versa
- * depending on the index direction).
- */
- static void allValuesForField(const BSONElement& elt, OrderedIntervalList* out);
-
- /**
- * Turn the MatchExpression in 'expr' into a set of index bounds. The field that 'expr' is
- * concerned with is indexed according to the keypattern element 'elt' from index 'index'.
- *
- * If 'expr' is elemMatch, the index tag is affixed to a child.
- *
- * The expression must be a predicate over one field. That is, expr->isLeaf() or
- * expr->isArray() must be true, and expr->isLogical() must be false.
- */
- static void translate(const MatchExpression* expr,
- const BSONElement& elt,
- const IndexEntry& index,
- OrderedIntervalList* oilOut,
- BoundsTightness* tightnessOut);
-
- /**
- * Creates bounds for 'expr' (indexed according to 'elt'). Intersects those bounds
- * with the bounds in oilOut, which is an in/out parameter.
- */
- static void translateAndIntersect(const MatchExpression* expr,
- const BSONElement& elt,
- const IndexEntry& index,
- OrderedIntervalList* oilOut,
- BoundsTightness* tightnessOut);
-
- /**
- * Creates bounds for 'expr' (indexed according to 'elt'). Unions those bounds
- * with the bounds in oilOut, which is an in/out parameter.
- */
- static void translateAndUnion(const MatchExpression* expr,
+ static void translate(const MatchExpression* expr,
+ const BSONElement& elt,
+ const IndexEntry& index,
+ OrderedIntervalList* oilOut,
+ BoundsTightness* tightnessOut);
+
+ /**
+ * Creates bounds for 'expr' (indexed according to 'elt'). Intersects those bounds
+ * with the bounds in oilOut, which is an in/out parameter.
+ */
+ static void translateAndIntersect(const MatchExpression* expr,
const BSONElement& elt,
const IndexEntry& index,
OrderedIntervalList* oilOut,
BoundsTightness* tightnessOut);
- /**
- * Make a range interval from the provided object.
- * The object must have exactly two fields. The first field is the start, the second the
- * end.
- * The two inclusive flags indicate whether or not the start/end fields are included in the
- * interval (closed interval if included, open if not).
- */
- static Interval makeRangeInterval(const BSONObj& obj,
- bool startInclusive,
- bool endInclusive);
-
- static Interval makeRangeInterval(const std::string& start,
- const std::string& end,
- bool startInclusive,
- bool endInclusive);
-
- /**
- * Make a point interval from the provided object.
- * The object must have exactly one field which is the value of the point interval.
- */
- static Interval makePointInterval(const BSONObj& obj);
- static Interval makePointInterval(const std::string& str);
- static Interval makePointInterval(double d);
-
- /**
- * Since we have no BSONValue we must make an object that's a copy of a piece of another
- * object.
- */
- static BSONObj objFromElement(const BSONElement& elt);
-
- /**
- * Swap start/end in the provided interval.
- */
- static void reverseInterval(Interval* ival);
-
- /**
- * Copied almost verbatim from db/queryutil.cpp.
- *
- * returns a std::string that when used as a matcher, would match a super set of regex()
- *
- * returns "" for complex regular expressions
- *
- * used to optimize queries in some simple regex cases that start with '^'
- */
- static std::string simpleRegex(const char* regex,
- const char* flags,
+ /**
+ * Creates bounds for 'expr' (indexed according to 'elt'). Unions those bounds
+ * with the bounds in oilOut, which is an in/out parameter.
+ */
+ static void translateAndUnion(const MatchExpression* expr,
+ const BSONElement& elt,
+ const IndexEntry& index,
+ OrderedIntervalList* oilOut,
BoundsTightness* tightnessOut);
- /**
- * Returns an Interval from minKey to maxKey
- */
- static Interval allValues();
+ /**
+ * Make a range interval from the provided object.
+ * The object must have exactly two fields. The first field is the start, the second the
+ * end.
+ * The two inclusive flags indicate whether or not the start/end fields are included in the
+ * interval (closed interval if included, open if not).
+ */
+ static Interval makeRangeInterval(const BSONObj& obj, bool startInclusive, bool endInclusive);
+
+ static Interval makeRangeInterval(const std::string& start,
+ const std::string& end,
+ bool startInclusive,
+ bool endInclusive);
+
+ /**
+ * Make a point interval from the provided object.
+ * The object must have exactly one field which is the value of the point interval.
+ */
+ static Interval makePointInterval(const BSONObj& obj);
+ static Interval makePointInterval(const std::string& str);
+ static Interval makePointInterval(double d);
+
+ /**
+ * Since we have no BSONValue we must make an object that's a copy of a piece of another
+ * object.
+ */
+ static BSONObj objFromElement(const BSONElement& elt);
+
+ /**
+ * Swap start/end in the provided interval.
+ */
+ static void reverseInterval(Interval* ival);
- static void translateRegex(const RegexMatchExpression* rme,
- OrderedIntervalList* oil,
+ /**
+ * Copied almost verbatim from db/queryutil.cpp.
+ *
+ * returns a std::string that when used as a matcher, would match a super set of regex()
+ *
+ * returns "" for complex regular expressions
+ *
+ * used to optimize queries in some simple regex cases that start with '^'
+ */
+ static std::string simpleRegex(const char* regex,
+ const char* flags,
BoundsTightness* tightnessOut);
- static void translateEquality(const BSONElement& data,
- bool isHashed,
- OrderedIntervalList* oil,
- BoundsTightness* tightnessOut);
+ /**
+ * Returns an Interval from minKey to maxKey
+ */
+ static Interval allValues();
- static void unionize(OrderedIntervalList* oilOut);
- static void intersectize(const OrderedIntervalList& arg,
- OrderedIntervalList* oilOut);
-
- /**
- * Fills out 'bounds' with the bounds for an index scan over all values of the
- * index described by 'keyPattern' in the default forward direction.
- */
- static void allValuesBounds(const BSONObj& keyPattern, IndexBounds* bounds);
-
- /**
- * Assumes each OIL in 'bounds' is increasing.
- *
- * Aligns OILs (and bounds) according to the 'kp' direction * the scanDir.
- */
- static void alignBounds(IndexBounds* bounds, const BSONObj& kp, int scanDir = 1);
-
- /**
- * Returns 'true' if the bounds 'bounds' can be represented as one interval between
- * 'startKey' and 'endKey'. Inclusivity of each bound is set through the relevant
- * (name)KeyInclusive parameter. Returns 'false' if otherwise.
- */
- static bool isSingleInterval(const IndexBounds& bounds,
- BSONObj* startKey,
- bool* startKeyInclusive,
- BSONObj* endKey,
- bool* endKeyInclusive);
- };
+ static void translateRegex(const RegexMatchExpression* rme,
+ OrderedIntervalList* oil,
+ BoundsTightness* tightnessOut);
+
+ static void translateEquality(const BSONElement& data,
+ bool isHashed,
+ OrderedIntervalList* oil,
+ BoundsTightness* tightnessOut);
+
+ static void unionize(OrderedIntervalList* oilOut);
+ static void intersectize(const OrderedIntervalList& arg, OrderedIntervalList* oilOut);
+
+ /**
+ * Fills out 'bounds' with the bounds for an index scan over all values of the
+ * index described by 'keyPattern' in the default forward direction.
+ */
+ static void allValuesBounds(const BSONObj& keyPattern, IndexBounds* bounds);
+
+ /**
+ * Assumes each OIL in 'bounds' is increasing.
+ *
+ * Aligns OILs (and bounds) according to the 'kp' direction * the scanDir.
+ */
+ static void alignBounds(IndexBounds* bounds, const BSONObj& kp, int scanDir = 1);
+
+ /**
+ * Returns 'true' if the bounds 'bounds' can be represented as one interval between
+ * 'startKey' and 'endKey'. Inclusivity of each bound is set through the relevant
+ * (name)KeyInclusive parameter. Returns 'false' if otherwise.
+ */
+ static bool isSingleInterval(const IndexBounds& bounds,
+ BSONObj* startKey,
+ bool* startKeyInclusive,
+ BSONObj* endKey,
+ bool* endKeyInclusive);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/index_bounds_builder_test.cpp b/src/mongo/db/query/index_bounds_builder_test.cpp
index 1b349b0515a..cf8129c016d 100644
--- a/src/mongo/db/query/index_bounds_builder_test.cpp
+++ b/src/mongo/db/query/index_bounds_builder_test.cpp
@@ -42,1353 +42,1373 @@ using namespace mongo;
namespace {
- using std::unique_ptr;
- using std::numeric_limits;
- using std::string;
- using std::vector;
-
- double numberMin = -numeric_limits<double>::max();
- double numberMax = numeric_limits<double>::max();
- double negativeInfinity = -numeric_limits<double>::infinity();
- double positiveInfinity = numeric_limits<double>::infinity();
-
- /**
- * Utility function to create MatchExpression
- */
- MatchExpression* parseMatchExpression(const BSONObj& obj) {
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
- ASSERT_TRUE(status.isOK());
- MatchExpression* expr(status.getValue());
- return expr;
- }
+using std::unique_ptr;
+using std::numeric_limits;
+using std::string;
+using std::vector;
- /**
- * Given a list of queries in 'toUnion', translate into index bounds and return
- * the union of these bounds in the out-parameter 'oilOut'.
- */
- void testTranslateAndUnion(const vector<BSONObj>& toUnion, OrderedIntervalList* oilOut,
- IndexBoundsBuilder::BoundsTightness* tightnessOut) {
+double numberMin = -numeric_limits<double>::max();
+double numberMax = numeric_limits<double>::max();
+double negativeInfinity = -numeric_limits<double>::infinity();
+double positiveInfinity = numeric_limits<double>::infinity();
- IndexEntry testIndex = IndexEntry(BSONObj());
-
- for (vector<BSONObj>::const_iterator it = toUnion.begin();
- it != toUnion.end();
- ++it) {
- unique_ptr<MatchExpression> expr(parseMatchExpression(*it));
- BSONElement elt = it->firstElement();
- if (toUnion.begin() == it) {
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
- else {
- IndexBoundsBuilder::translateAndUnion(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
- }
- }
+/**
+ * Utility function to create MatchExpression
+ */
+MatchExpression* parseMatchExpression(const BSONObj& obj) {
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
+ ASSERT_TRUE(status.isOK());
+ MatchExpression* expr(status.getValue());
+ return expr;
+}
- /**
- * Given a list of queries in 'toUnion', translate into index bounds and return
- * the intersection of these bounds in the out-parameter 'oilOut'.
- */
- void testTranslateAndIntersect(const vector<BSONObj>& toIntersect, OrderedIntervalList* oilOut,
- IndexBoundsBuilder::BoundsTightness* tightnessOut) {
-
- IndexEntry testIndex = IndexEntry(BSONObj());
-
- for (vector<BSONObj>::const_iterator it = toIntersect.begin();
- it != toIntersect.end();
- ++it) {
- unique_ptr<MatchExpression> expr(parseMatchExpression(*it));
- BSONElement elt = it->firstElement();
- if (toIntersect.begin() == it) {
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
- else {
- IndexBoundsBuilder::translateAndIntersect(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
+/**
+ * Given a list of queries in 'toUnion', translate into index bounds and return
+ * the union of these bounds in the out-parameter 'oilOut'.
+ */
+void testTranslateAndUnion(const vector<BSONObj>& toUnion,
+ OrderedIntervalList* oilOut,
+ IndexBoundsBuilder::BoundsTightness* tightnessOut) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+
+ for (vector<BSONObj>::const_iterator it = toUnion.begin(); it != toUnion.end(); ++it) {
+ unique_ptr<MatchExpression> expr(parseMatchExpression(*it));
+ BSONElement elt = it->firstElement();
+ if (toUnion.begin() == it) {
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, oilOut, tightnessOut);
+ } else {
+ IndexBoundsBuilder::translateAndUnion(expr.get(), elt, testIndex, oilOut, tightnessOut);
}
}
+}
- /**
- * 'constraints' is a vector of BSONObj's representing match expressions, where
- * each filter is paired with a boolean. If the boolean is true, then the filter's
- * index bounds should be intersected with the other constraints; if false, then
- * they should be unioned. The resulting bounds are returned in the
- * out-parameter 'oilOut'.
- */
- void testTranslate(const vector< std::pair<BSONObj, bool> >& constraints,
- OrderedIntervalList* oilOut,
- IndexBoundsBuilder::BoundsTightness* tightnessOut) {
-
- IndexEntry testIndex = IndexEntry(BSONObj());
-
- for (vector< std::pair<BSONObj, bool> >::const_iterator it = constraints.begin();
- it != constraints.end();
- ++it) {
- BSONObj obj = it->first;
- bool isIntersect = it->second;
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- if (constraints.begin() == it) {
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
- else if (isIntersect) {
- IndexBoundsBuilder::translateAndIntersect(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
- else {
- IndexBoundsBuilder::translateAndUnion(expr.get(), elt, testIndex, oilOut, tightnessOut);
- }
+/**
+ * Given a list of queries in 'toUnion', translate into index bounds and return
+ * the intersection of these bounds in the out-parameter 'oilOut'.
+ */
+void testTranslateAndIntersect(const vector<BSONObj>& toIntersect,
+ OrderedIntervalList* oilOut,
+ IndexBoundsBuilder::BoundsTightness* tightnessOut) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+
+ for (vector<BSONObj>::const_iterator it = toIntersect.begin(); it != toIntersect.end(); ++it) {
+ unique_ptr<MatchExpression> expr(parseMatchExpression(*it));
+ BSONElement elt = it->firstElement();
+ if (toIntersect.begin() == it) {
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, oilOut, tightnessOut);
+ } else {
+ IndexBoundsBuilder::translateAndIntersect(
+ expr.get(), elt, testIndex, oilOut, tightnessOut);
}
}
+}
- /**
- * run isSingleInterval and return the result to calling test.
- */
- bool testSingleInterval(IndexBounds bounds) {
- BSONObj startKey;
- bool startKeyIn;
- BSONObj endKey;
- bool endKeyIn;
- return IndexBoundsBuilder::isSingleInterval( bounds,
- &startKey,
- &startKeyIn,
- &endKey,
- &endKeyIn );
- }
-
- //
- // $elemMatch value
- // Example: {a: {$elemMatch: {$gt: 2}}}
- //
-
- TEST(IndexBoundsBuilderTest, TranslateElemMatchValue) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- // Bounds generated should be the same as the embedded expression
- // except for the tightness.
- BSONObj obj = fromjson("{a: {$elemMatch: {$gt: 2}}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 2, '': Infinity}"), false, true)));
- ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
- }
-
- //
- // Comparison operators ($lte, $lt, $gt, $gte, $eq)
- //
-
- TEST(IndexBoundsBuilderTest, TranslateLteNumber) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lte: 1}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -Infinity, '': 1}"), true, true)));
- ASSERT(tightness == IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLteNumberMin) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << BSON("$lte" << numberMin));
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << negativeInfinity << "" << numberMin), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLteNegativeInfinity) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lte: -Infinity}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -Infinity, '': -Infinity}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLtNumber) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lt: 1}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -Infinity, '': 1}"), true, false)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLtNumberMin) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << BSON("$lt" << numberMin));
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << negativeInfinity << "" << numberMin), true, false)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLtNegativeInfinity) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lt: -Infinity}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLtDate) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << LT << Date_t::fromMillisSinceEpoch(5000));
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': true, '': new Date(5000)}"), false, false)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtNumber) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gt: 1}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': Infinity}"), false, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtNumberMax) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << BSON("$gt" << numberMax));
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << numberMax << "" << positiveInfinity), false, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtPositiveInfinity) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gt: Infinity}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGteNumber) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gte: 1}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': Infinity}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGteNumberMax) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << BSON("$gte" << numberMax));
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << numberMax << "" << positiveInfinity), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtePositiveInfinity) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gte: Infinity}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': Infinity, '': Infinity}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtString) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gt: 'abc'}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 'abc', '': {}}"), false, false)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateEqualNan) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: NaN}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': NaN, '': NaN}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLtNan) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lt: NaN}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLteNan) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lte: NaN}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': NaN, '': NaN}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtNan) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gt: NaN}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGteNan) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gte: NaN}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': NaN, '': NaN}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateEqual) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << 4);
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 4, '': 4}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateArrayEqualBasic) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: [1, 2, 3]}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': 1}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': [1, 2, 3], '': [1, 2, 3]}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateIn) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$in: [8, 44, -1, -3]}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 4U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -3, '': -3}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': -1, '': -1}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[2].compare(
- Interval(fromjson("{'': 8, '': 8}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[3].compare(
- Interval(fromjson("{'': 44, '': 44}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateInArray) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$in: [[1], 2]}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 3U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': 1}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': 2, '': 2}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[2].compare(
- Interval(fromjson("{'': [1], '': [1]}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLteBinData) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lte: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA',"
- "$type: '00'}}}");
- std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQ(oil.name, "a");
- ASSERT_EQ(oil.intervals.size(), 1U);
- ASSERT_EQ(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': {$binary: '', $type: '00'},"
- "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
- true, true)));
- ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateLtBinData) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$lt: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA',"
- "$type: '00'}}}");
- std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQ(oil.name, "a");
- ASSERT_EQ(oil.intervals.size(), 1U);
- ASSERT_EQ(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': {$binary: '', $type: '00'},"
- "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
- true, false)));
- ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGtBinData) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gt: {$binary: '////////////////////////////',"
- "$type: '00'}}}");
- std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQ(oil.name, "a");
- ASSERT_EQ(oil.intervals.size(), 1U);
- ASSERT_EQ(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': {$binary: '////////////////////////////', $type: '00'},"
- "'': ObjectId('000000000000000000000000')}"),
- false, false)));
- ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, TranslateGteBinData) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$gte: {$binary: '////////////////////////////',"
- "$type: '00'}}}");
- std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQ(oil.name, "a");
- ASSERT_EQ(oil.intervals.size(), 1U);
- ASSERT_EQ(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': {$binary: '////////////////////////////', $type: '00'},"
- "'': ObjectId('000000000000000000000000')}"),
- true, false)));
- ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
- }
-
- //
- // $exists tests
- //
-
- TEST(IndexBoundsBuilderTest, ExistsTrue) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$exists: true}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
- oil.intervals[0].compare(IndexBoundsBuilder::allValues()));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
- }
-
- TEST(IndexBoundsBuilderTest, ExistsFalse) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$exists: false}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': null, '': null}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
- }
-
- TEST(IndexBoundsBuilderTest, ExistsTrueSparse) {
- IndexEntry testIndex = IndexEntry(BSONObj(),
- false, // multikey
- true, // sparse
- false, // unique
- "exists_true_sparse",
- nullptr, // filterExpr
- BSONObj());
- BSONObj obj = fromjson("{a: {$exists: true}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
- oil.intervals[0].compare(IndexBoundsBuilder::allValues()));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- //
- // Union tests
- //
-
- TEST(IndexBoundsBuilderTest, UnionTwoLt) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toUnion;
- toUnion.push_back(fromjson("{a: {$lt: 1}}"));
- toUnion.push_back(fromjson("{a: {$lt: 5}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndUnion(toUnion, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -Infinity, '': 5}"), true, false)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, UnionDupEq) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toUnion;
- toUnion.push_back(fromjson("{a: 1}"));
- toUnion.push_back(fromjson("{a: 5}"));
- toUnion.push_back(fromjson("{a: 1}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndUnion(toUnion, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': 1}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': 5, '': 5}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, UnionGtLt) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toUnion;
- toUnion.push_back(fromjson("{a: {$gt: 1}}"));
- toUnion.push_back(fromjson("{a: {$lt: 3}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndUnion(toUnion, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -Infinity, '': Infinity}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, UnionTwoEmptyRanges) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector< std::pair<BSONObj, bool> > constraints;
- constraints.push_back(std::make_pair(fromjson("{a: {$gt: 1}}"), true));
- constraints.push_back(std::make_pair(fromjson("{a: {$lte: 0}}"), true));
- constraints.push_back(std::make_pair(fromjson("{a: {$in:[]}}"), false));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslate(constraints, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- }
-
- //
- // Intersection tests
- //
-
- TEST(IndexBoundsBuilderTest, IntersectTwoLt) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: {$lt: 1}}"));
- toIntersect.push_back(fromjson("{a: {$lt: 5}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': -Infinity, '': 1}"), true, false)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, IntersectEqGte) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: 1}}"));
- toIntersect.push_back(fromjson("{a: {$gte: 1}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': 1}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, IntersectGtLte) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: {$gt: 0}}"));
- toIntersect.push_back(fromjson("{a: {$lte: 10}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 0, '': 10}"), false, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, IntersectGtIn) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: {$gt: 4}}"));
- toIntersect.push_back(fromjson("{a: {$in: [1,2,3,4,5,6]}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 5, '': 5}"), true, true)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': 6, '': 6}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, IntersectionIsPointInterval) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: {$gte: 1}}"));
- toIntersect.push_back(fromjson("{a: {$lte: 1}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 1, '': 1}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, IntersectFullyContained) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: {$gt: 5}}"));
- toIntersect.push_back(fromjson("{a: {$lt: 15}}"));
- toIntersect.push_back(fromjson("{a: {$gte: 6}}"));
- toIntersect.push_back(fromjson("{a: {$lte: 13}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 6, '': 13}"), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, EmptyIntersection) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: 1}}"));
- toIntersect.push_back(fromjson("{a: {$gte: 2}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- }
-
- //
- // $mod
- //
-
- TEST(IndexBoundsBuilderTest, TranslateMod) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$mod: [2, 0]}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << numberMin << "" << numberMax), true, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- //
- // Test simpleRegex
- //
-
- TEST(SimpleRegexTest, RootedLine) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("^foo", "", &tightness);
- ASSERT_EQUALS(prefix, "foo");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedString) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("\\Afoo", "", &tightness);
- ASSERT_EQUALS(prefix, "foo");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedOptionalFirstChar) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("^f?oo", "", &tightness);
- ASSERT_EQUALS(prefix, "");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, RootedOptionalSecondChar) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("^fz?oo", "", &tightness);
- ASSERT_EQUALS(prefix, "f");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, RootedMultiline) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("^foo", "m", &tightness);
- ASSERT_EQUALS(prefix, "");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, RootedStringMultiline) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("\\Afoo", "m", &tightness);
- ASSERT_EQUALS(prefix, "foo");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedCaseInsensitiveMulti) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("\\Afoo", "mi", &tightness);
- ASSERT_EQUALS(prefix, "");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, RootedComplex) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "\\Af \t\vo\n\ro \\ \\# #comment", "mx", &tightness);
- ASSERT_EQUALS(prefix, "foo #");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, RootedLiteral) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qasdf\\E", "", &tightness);
- ASSERT_EQUALS(prefix, "asdf");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedLiteralWithExtra) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qasdf\\E.*", "", &tightness);
- ASSERT_EQUALS(prefix, "asdf");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, RootedLiteralNoEnd) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qasdf", "", &tightness);
- ASSERT_EQUALS(prefix, "asdf");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedLiteralBackslash) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qasdf\\\\E", "", &tightness);
- ASSERT_EQUALS(prefix, "asdf\\");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedLiteralDotStar) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qas.*df\\E", "", &tightness);
- ASSERT_EQUALS(prefix, "as.*df");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedLiteralNestedEscape) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qas\\Q[df\\E", "", &tightness);
- ASSERT_EQUALS(prefix, "as\\Q[df");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(SimpleRegexTest, RootedLiteralNestedEscapeEnd) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^\\Qas\\E\\\\E\\Q$df\\E", "", &tightness);
- ASSERT_EQUALS(prefix, "as\\E$df");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- // A regular expression with the "|" character is not considered simple. See SERVER-15235.
- TEST(SimpleRegexTest, PipeCharacterDisallowed) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^(a(a|$)|b", "", &tightness);
- ASSERT_EQUALS(prefix, "");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(SimpleRegexTest, PipeCharacterDisallowed2) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "^(a(a|$)|^b", "", &tightness);
- ASSERT_EQUALS(prefix, "");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- // SERVER-9035
- TEST(SimpleRegexTest, RootedSingleLineMode) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("^foo", "s", &tightness);
- ASSERT_EQUALS(prefix, "foo");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- // SERVER-9035
- TEST(SimpleRegexTest, NonRootedSingleLineMode) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex("foo", "s", &tightness);
- ASSERT_EQUALS(prefix, "");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- // SERVER-9035
- TEST(SimpleRegexTest, RootedComplexSingleLineMode) {
- IndexBoundsBuilder::BoundsTightness tightness;
- string prefix = IndexBoundsBuilder::simpleRegex(
- "\\Af \t\vo\n\ro \\ \\# #comment", "msx", &tightness);
- ASSERT_EQUALS(prefix, "foo #");
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- //
- // Regex bounds
- //
-
- TEST(IndexBoundsBuilderTest, SimpleNonPrefixRegex) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: /foo/}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': '', '': {}}"), true, false)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': /foo/, '': /foo/}"), true, true)));
- ASSERT(tightness == IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(IndexBoundsBuilderTest, NonSimpleRegexWithPipe) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: /^foo.*|bar/}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': '', '': {}}"), true, false)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': /^foo.*|bar/, '': /^foo.*|bar/}"), true, true)));
- ASSERT(tightness == IndexBoundsBuilder::INEXACT_COVERED);
- }
-
- TEST(IndexBoundsBuilderTest, SimpleRegexSingleLineMode) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: /^foo/s}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 'foo', '': 'fop'}"), true, false)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': /^foo/s, '': /^foo/s}"), true, true)));
- ASSERT(tightness == IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, SimplePrefixRegex) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: /^foo/}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(fromjson("{'': 'foo', '': 'fop'}"), true, false)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(fromjson("{'': /^foo/, '': /^foo/}"), true, true)));
- ASSERT(tightness == IndexBoundsBuilder::EXACT);
- }
-
- //
- // isSingleInterval
- //
-
- TEST(IndexBoundsBuilderTest, SingleFieldEqualityInterval) {
- // Equality on a single field is a single interval.
- OrderedIntervalList oil("a");
- IndexBounds bounds;
- oil.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
- bounds.fields.push_back(oil);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, SingleIntervalSingleFieldInterval) {
- // Single interval on a single field is a single interval.
- OrderedIntervalList oil("a");
- IndexBounds bounds;
- oil.intervals.push_back(Interval(fromjson("{ '':5, '':Infinity }"), true, true));
- bounds.fields.push_back(oil);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, MultipleIntervalsSingleFieldInterval) {
- // Multiple intervals on a single field is not a single interval.
- OrderedIntervalList oil("a");
- IndexBounds bounds;
- oil.intervals.push_back(Interval(fromjson( "{ '':4, '':5 }" ), true, true));
- oil.intervals.push_back(Interval(fromjson( "{ '':7, '':Infinity }" ), true, true));
- bounds.fields.push_back(oil);
- ASSERT(!testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualityTwoFieldsInterval) {
- // Equality on two fields is a compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
- oil_b.intervals.push_back(Interval(BSON("" << 6 << "" << 6), true, true));
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualityFirstFieldSingleIntervalSecondFieldInterval) {
- // Equality on first field and single interval on second field
- // is a compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
- oil_b.intervals.push_back(Interval(fromjson( "{ '':6, '':Infinity }" ), true, true));
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, SingleIntervalFirstAndSecondFieldsInterval) {
- // Single interval on first field and single interval on second field is
- // not a compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(fromjson( "{ '':-Infinity, '':5 }" ), true, true));
- oil_b.intervals.push_back(Interval(fromjson( "{ '':6, '':Infinity }" ), true, true));
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- ASSERT(!testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, MultipleIntervalsTwoFieldsInterval) {
- // Multiple intervals on two fields is not a compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON( "" << 4 << "" << 4 ), true, true));
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(Interval(BSON( "" << 7 << "" << 7 ), true, true));
- oil_b.intervals.push_back(Interval(BSON( "" << 8 << "" << 8 ), true, true));
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- ASSERT(!testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, MissingSecondFieldInterval) {
- // when second field is not specified, still a compound single interval
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(IndexBoundsBuilder::allValues());
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualityTwoFieldsIntervalThirdInterval) {
- // Equality on first two fields and single interval on third is a
- // compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- OrderedIntervalList oil_c("c");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(Interval(BSON( "" << 6 << "" << 6 ), true, true));
- oil_c.intervals.push_back(Interval(fromjson( "{ '':7, '':Infinity }" ), true, true));
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- bounds.fields.push_back(oil_c);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualitySingleIntervalMissingInterval) {
- // Equality, then Single Interval, then missing is a compound single interval
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- OrderedIntervalList oil_c("c");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(Interval(fromjson( "{ '':7, '':Infinity }" ), true, true));
- oil_c.intervals.push_back(IndexBoundsBuilder::allValues());
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- bounds.fields.push_back(oil_c);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualitySingleMissingMissingInterval) {
- // Equality, then single interval, then missing, then missing,
- // is a compound single interval
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- OrderedIntervalList oil_c("c");
- OrderedIntervalList oil_d("d");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(Interval(fromjson( "{ '':7, '':Infinity }" ), true, true));
- oil_c.intervals.push_back(IndexBoundsBuilder::allValues());
- oil_d.intervals.push_back(IndexBoundsBuilder::allValues());
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- bounds.fields.push_back(oil_c);
- bounds.fields.push_back(oil_d);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualitySingleMissingMissingMixedInterval) {
- // Equality, then single interval, then missing, then missing, with mixed order
- // fields is a compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- OrderedIntervalList oil_c("c");
- OrderedIntervalList oil_d("d");
- IndexBounds bounds;
- Interval allValues = IndexBoundsBuilder::allValues();
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(Interval(fromjson( "{ '':7, '':Infinity }" ), true, true));
- oil_c.intervals.push_back(allValues);
- IndexBoundsBuilder::reverseInterval(&allValues);
- oil_d.intervals.push_back(allValues);
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- bounds.fields.push_back(oil_c);
- bounds.fields.push_back(oil_d);
- ASSERT(testSingleInterval(bounds));
- }
-
- TEST(IndexBoundsBuilderTest, EqualitySingleMissingSingleInterval) {
- // Equality, then single interval, then missing, then single interval is not
- // a compound single interval.
- OrderedIntervalList oil_a("a");
- OrderedIntervalList oil_b("b");
- OrderedIntervalList oil_c("c");
- OrderedIntervalList oil_d("d");
- IndexBounds bounds;
- oil_a.intervals.push_back(Interval(BSON( "" << 5 << "" << 5 ), true, true));
- oil_b.intervals.push_back(Interval(fromjson( "{ '':7, '':Infinity }" ), true, true));
- oil_c.intervals.push_back(IndexBoundsBuilder::allValues());
- oil_d.intervals.push_back(Interval(fromjson( "{ '':1, '':Infinity }" ), true, true));
- bounds.fields.push_back(oil_a);
- bounds.fields.push_back(oil_b);
- bounds.fields.push_back(oil_c);
- bounds.fields.push_back(oil_d);
- ASSERT(!testSingleInterval(bounds));
- }
-
- //
- // Complementing bounds for negations
- //
-
- /**
- * Get a BSONObj which represents the interval from
- * MinKey to 'end'.
- */
- BSONObj minKeyIntObj(int end) {
- BSONObjBuilder bob;
- bob.appendMinKey("");
- bob.appendNumber("", end);
- return bob.obj();
- }
-
- /**
- * Get a BSONObj which represents the interval from
- * 'start' to MaxKey.
- */
- BSONObj maxKeyIntObj(int start) {
- BSONObjBuilder bob;
- bob.appendNumber("", start);
- bob.appendMaxKey("");
- return bob.obj();
- }
-
- // Expected oil: [MinKey, 3), (3, MaxKey]
- TEST(IndexBoundsBuilderTest, SimpleNE) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = BSON("a" << BSON("$ne" << 3));
+/**
+ * 'constraints' is a vector of BSONObj's representing match expressions, where
+ * each filter is paired with a boolean. If the boolean is true, then the filter's
+ * index bounds should be intersected with the other constraints; if false, then
+ * they should be unioned. The resulting bounds are returned in the
+ * out-parameter 'oilOut'.
+ */
+void testTranslate(const vector<std::pair<BSONObj, bool>>& constraints,
+ OrderedIntervalList* oilOut,
+ IndexBoundsBuilder::BoundsTightness* tightnessOut) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+
+ for (vector<std::pair<BSONObj, bool>>::const_iterator it = constraints.begin();
+ it != constraints.end();
+ ++it) {
+ BSONObj obj = it->first;
+ bool isIntersect = it->second;
unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
BSONElement elt = obj.firstElement();
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(minKeyIntObj(3), true, false)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(maxKeyIntObj(3), false, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, IntersectWithNE) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toIntersect;
- toIntersect.push_back(fromjson("{a: {$gt: 1}}"));
- toIntersect.push_back(fromjson("{a: {$ne: 2}}}"));
- toIntersect.push_back(fromjson("{a: {$lte: 6}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndIntersect(toIntersect, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << 1 << "" << 2), false, false)));
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(BSON("" << 2 << "" << 6), false, true)));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
- }
-
- TEST(IndexBoundsBuilderTest, UnionizeWithNE) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- vector<BSONObj> toUnionize;
- toUnionize.push_back(fromjson("{a: {$ne: 3}}"));
- toUnionize.push_back(fromjson("{a: {$ne: 4}}}"));
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- testTranslateAndUnion(toUnionize, &oil, &tightness);
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- IndexBoundsBuilder::allValues()));
- ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+ if (constraints.begin() == it) {
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, oilOut, tightnessOut);
+ } else if (isIntersect) {
+ IndexBoundsBuilder::translateAndIntersect(
+ expr.get(), elt, testIndex, oilOut, tightnessOut);
+ } else {
+ IndexBoundsBuilder::translateAndUnion(expr.get(), elt, testIndex, oilOut, tightnessOut);
+ }
}
+}
- // Test $type bounds for Code BSON type.
- TEST(IndexBoundsBuilderTest, CodeTypeBounds) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$type: 13}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
-
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
-
- // Build the expected interval.
- BSONObjBuilder bob;
- bob.appendCode("", "");
- bob.appendCodeWScope("", "", BSONObj());
- BSONObj expectedInterval = bob.obj();
-
- // Check the output of translate().
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(expectedInterval, true, true)));
- ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
- }
+/**
+ * run isSingleInterval and return the result to calling test.
+ */
+bool testSingleInterval(IndexBounds bounds) {
+ BSONObj startKey;
+ bool startKeyIn;
+ BSONObj endKey;
+ bool endKeyIn;
+ return IndexBoundsBuilder::isSingleInterval(bounds, &startKey, &startKeyIn, &endKey, &endKeyIn);
+}
+
+//
+// $elemMatch value
+// Example: {a: {$elemMatch: {$gt: 2}}}
+//
+
+TEST(IndexBoundsBuilderTest, TranslateElemMatchValue) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ // Bounds generated should be the same as the embedded expression
+ // except for the tightness.
+ BSONObj obj = fromjson("{a: {$elemMatch: {$gt: 2}}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 2, '': Infinity}"), false, true)));
+ ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
+}
+
+//
+// Comparison operators ($lte, $lt, $gt, $gte, $eq)
+//
+
+TEST(IndexBoundsBuilderTest, TranslateLteNumber) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$lte: 1}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': 1}"), true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLteNumberMin) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << BSON("$lte" << numberMin));
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(BSON("" << negativeInfinity << "" << numberMin), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLteNegativeInfinity) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$lte: -Infinity}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': -Infinity}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLtNumber) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$lt: 1}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': 1}"), true, false)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLtNumberMin) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << BSON("$lt" << numberMin));
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(BSON("" << negativeInfinity << "" << numberMin), true, false)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLtNegativeInfinity) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$lt: -Infinity}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLtDate) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << LT << Date_t::fromMillisSinceEpoch(5000));
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(fromjson("{'': true, '': new Date(5000)}"), false, false)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtNumber) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gt: 1}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': Infinity}"), false, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtNumberMax) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << BSON("$gt" << numberMax));
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(BSON("" << numberMax << "" << positiveInfinity), false, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtPositiveInfinity) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gt: Infinity}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGteNumber) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gte: 1}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': Infinity}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGteNumberMax) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << BSON("$gte" << numberMax));
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(BSON("" << numberMax << "" << positiveInfinity), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtePositiveInfinity) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gte: Infinity}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': Infinity, '': Infinity}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtString) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gt: 'abc'}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 'abc', '': {}}"), false, false)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateEqualNan) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: NaN}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': NaN, '': NaN}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLtNan) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$lt: NaN}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLteNan) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$lte: NaN}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': NaN, '': NaN}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtNan) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gt: NaN}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGteNan) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$gte: NaN}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': NaN, '': NaN}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateEqual) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << 4);
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 4, '': 4}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateArrayEqualBasic) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: [1, 2, 3]}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': 1}"), true, true)));
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': [1, 2, 3], '': [1, 2, 3]}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateIn) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$in: [8, 44, -1, -3]}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 4U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -3, '': -3}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': -1, '': -1}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[2].compare(Interval(fromjson("{'': 8, '': 8}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[3].compare(Interval(fromjson("{'': 44, '': 44}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateInArray) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$in: [[1], 2]}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 3U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': 1}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': 2, '': 2}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[2].compare(Interval(fromjson("{'': [1], '': [1]}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLteBinData) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson(
+ "{a: {$lte: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA',"
+ "$type: '00'}}}");
+ std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQ(oil.name, "a");
+ ASSERT_EQ(oil.intervals.size(), 1U);
+ ASSERT_EQ(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(fromjson(
+ "{'': {$binary: '', $type: '00'},"
+ "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
+ true,
+ true)));
+ ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateLtBinData) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson(
+ "{a: {$lt: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA',"
+ "$type: '00'}}}");
+ std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQ(oil.name, "a");
+ ASSERT_EQ(oil.intervals.size(), 1U);
+ ASSERT_EQ(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(fromjson(
+ "{'': {$binary: '', $type: '00'},"
+ "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
+ true,
+ false)));
+ ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGtBinData) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson(
+ "{a: {$gt: {$binary: '////////////////////////////',"
+ "$type: '00'}}}");
+ std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQ(oil.name, "a");
+ ASSERT_EQ(oil.intervals.size(), 1U);
+ ASSERT_EQ(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(fromjson(
+ "{'': {$binary: '////////////////////////////', $type: '00'},"
+ "'': ObjectId('000000000000000000000000')}"),
+ false,
+ false)));
+ ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, TranslateGteBinData) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson(
+ "{a: {$gte: {$binary: '////////////////////////////',"
+ "$type: '00'}}}");
+ std::unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQ(oil.name, "a");
+ ASSERT_EQ(oil.intervals.size(), 1U);
+ ASSERT_EQ(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(
+ Interval(fromjson(
+ "{'': {$binary: '////////////////////////////', $type: '00'},"
+ "'': ObjectId('000000000000000000000000')}"),
+ true,
+ false)));
+ ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
+}
+
+//
+// $exists tests
+//
+
+TEST(IndexBoundsBuilderTest, ExistsTrue) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$exists: true}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(IndexBoundsBuilder::allValues()));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
+}
+
+TEST(IndexBoundsBuilderTest, ExistsFalse) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$exists: false}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': null, '': null}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_FETCH);
+}
+
+TEST(IndexBoundsBuilderTest, ExistsTrueSparse) {
+ IndexEntry testIndex = IndexEntry(BSONObj(),
+ false, // multikey
+ true, // sparse
+ false, // unique
+ "exists_true_sparse",
+ nullptr, // filterExpr
+ BSONObj());
+ BSONObj obj = fromjson("{a: {$exists: true}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(IndexBoundsBuilder::allValues()));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+//
+// Union tests
+//
+
+TEST(IndexBoundsBuilderTest, UnionTwoLt) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toUnion;
+ toUnion.push_back(fromjson("{a: {$lt: 1}}"));
+ toUnion.push_back(fromjson("{a: {$lt: 5}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndUnion(toUnion, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': 5}"), true, false)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, UnionDupEq) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toUnion;
+ toUnion.push_back(fromjson("{a: 1}"));
+ toUnion.push_back(fromjson("{a: 5}"));
+ toUnion.push_back(fromjson("{a: 1}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndUnion(toUnion, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': 1}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': 5, '': 5}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, UnionGtLt) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toUnion;
+ toUnion.push_back(fromjson("{a: {$gt: 1}}"));
+ toUnion.push_back(fromjson("{a: {$lt: 3}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndUnion(toUnion, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': Infinity}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, UnionTwoEmptyRanges) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<std::pair<BSONObj, bool>> constraints;
+ constraints.push_back(std::make_pair(fromjson("{a: {$gt: 1}}"), true));
+ constraints.push_back(std::make_pair(fromjson("{a: {$lte: 0}}"), true));
+ constraints.push_back(std::make_pair(fromjson("{a: {$in:[]}}"), false));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslate(constraints, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+}
+
+//
+// Intersection tests
+//
+
+TEST(IndexBoundsBuilderTest, IntersectTwoLt) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: {$lt: 1}}"));
+ toIntersect.push_back(fromjson("{a: {$lt: 5}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': -Infinity, '': 1}"), true, false)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, IntersectEqGte) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: 1}}"));
+ toIntersect.push_back(fromjson("{a: {$gte: 1}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': 1}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, IntersectGtLte) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: {$gt: 0}}"));
+ toIntersect.push_back(fromjson("{a: {$lte: 10}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 0, '': 10}"), false, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, IntersectGtIn) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: {$gt: 4}}"));
+ toIntersect.push_back(fromjson("{a: {$in: [1,2,3,4,5,6]}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 5, '': 5}"), true, true)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': 6, '': 6}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, IntersectionIsPointInterval) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: {$gte: 1}}"));
+ toIntersect.push_back(fromjson("{a: {$lte: 1}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 1, '': 1}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, IntersectFullyContained) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: {$gt: 5}}"));
+ toIntersect.push_back(fromjson("{a: {$lt: 15}}"));
+ toIntersect.push_back(fromjson("{a: {$gte: 6}}"));
+ toIntersect.push_back(fromjson("{a: {$lte: 13}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 6, '': 13}"), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, EmptyIntersection) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: 1}}"));
+ toIntersect.push_back(fromjson("{a: {$gte: 2}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+}
+
+//
+// $mod
+//
+
+TEST(IndexBoundsBuilderTest, TranslateMod) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$mod: [2, 0]}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(BSON("" << numberMin << "" << numberMax), true, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+//
+// Test simpleRegex
+//
+
+TEST(SimpleRegexTest, RootedLine) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^foo", "", &tightness);
+ ASSERT_EQUALS(prefix, "foo");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedString) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("\\Afoo", "", &tightness);
+ ASSERT_EQUALS(prefix, "foo");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedOptionalFirstChar) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^f?oo", "", &tightness);
+ ASSERT_EQUALS(prefix, "");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, RootedOptionalSecondChar) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^fz?oo", "", &tightness);
+ ASSERT_EQUALS(prefix, "f");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, RootedMultiline) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^foo", "m", &tightness);
+ ASSERT_EQUALS(prefix, "");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, RootedStringMultiline) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("\\Afoo", "m", &tightness);
+ ASSERT_EQUALS(prefix, "foo");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedCaseInsensitiveMulti) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("\\Afoo", "mi", &tightness);
+ ASSERT_EQUALS(prefix, "");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, RootedComplex) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix =
+ IndexBoundsBuilder::simpleRegex("\\Af \t\vo\n\ro \\ \\# #comment", "mx", &tightness);
+ ASSERT_EQUALS(prefix, "foo #");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, RootedLiteral) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qasdf\\E", "", &tightness);
+ ASSERT_EQUALS(prefix, "asdf");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedLiteralWithExtra) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qasdf\\E.*", "", &tightness);
+ ASSERT_EQUALS(prefix, "asdf");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, RootedLiteralNoEnd) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qasdf", "", &tightness);
+ ASSERT_EQUALS(prefix, "asdf");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedLiteralBackslash) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qasdf\\\\E", "", &tightness);
+ ASSERT_EQUALS(prefix, "asdf\\");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedLiteralDotStar) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qas.*df\\E", "", &tightness);
+ ASSERT_EQUALS(prefix, "as.*df");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedLiteralNestedEscape) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qas\\Q[df\\E", "", &tightness);
+ ASSERT_EQUALS(prefix, "as\\Q[df");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(SimpleRegexTest, RootedLiteralNestedEscapeEnd) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^\\Qas\\E\\\\E\\Q$df\\E", "", &tightness);
+ ASSERT_EQUALS(prefix, "as\\E$df");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+// A regular expression with the "|" character is not considered simple. See SERVER-15235.
+TEST(SimpleRegexTest, PipeCharacterDisallowed) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^(a(a|$)|b", "", &tightness);
+ ASSERT_EQUALS(prefix, "");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(SimpleRegexTest, PipeCharacterDisallowed2) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^(a(a|$)|^b", "", &tightness);
+ ASSERT_EQUALS(prefix, "");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+// SERVER-9035
+TEST(SimpleRegexTest, RootedSingleLineMode) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("^foo", "s", &tightness);
+ ASSERT_EQUALS(prefix, "foo");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+// SERVER-9035
+TEST(SimpleRegexTest, NonRootedSingleLineMode) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix = IndexBoundsBuilder::simpleRegex("foo", "s", &tightness);
+ ASSERT_EQUALS(prefix, "");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+// SERVER-9035
+TEST(SimpleRegexTest, RootedComplexSingleLineMode) {
+ IndexBoundsBuilder::BoundsTightness tightness;
+ string prefix =
+ IndexBoundsBuilder::simpleRegex("\\Af \t\vo\n\ro \\ \\# #comment", "msx", &tightness);
+ ASSERT_EQUALS(prefix, "foo #");
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+//
+// Regex bounds
+//
+
+TEST(IndexBoundsBuilderTest, SimpleNonPrefixRegex) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: /foo/}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': '', '': {}}"), true, false)));
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': /foo/, '': /foo/}"), true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(IndexBoundsBuilderTest, NonSimpleRegexWithPipe) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: /^foo.*|bar/}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': '', '': {}}"), true, false)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(
+ Interval(fromjson("{'': /^foo.*|bar/, '': /^foo.*|bar/}"), true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::INEXACT_COVERED);
+}
+
+TEST(IndexBoundsBuilderTest, SimpleRegexSingleLineMode) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: /^foo/s}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 'foo', '': 'fop'}"), true, false)));
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': /^foo/s, '': /^foo/s}"), true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, SimplePrefixRegex) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: /^foo/}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(fromjson("{'': 'foo', '': 'fop'}"), true, false)));
+ ASSERT_EQUALS(
+ Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(fromjson("{'': /^foo/, '': /^foo/}"), true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::EXACT);
+}
+
+//
+// isSingleInterval
+//
+
+TEST(IndexBoundsBuilderTest, SingleFieldEqualityInterval) {
+ // Equality on a single field is a single interval.
+ OrderedIntervalList oil("a");
+ IndexBounds bounds;
+ oil.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ bounds.fields.push_back(oil);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, SingleIntervalSingleFieldInterval) {
+ // Single interval on a single field is a single interval.
+ OrderedIntervalList oil("a");
+ IndexBounds bounds;
+ oil.intervals.push_back(Interval(fromjson("{ '':5, '':Infinity }"), true, true));
+ bounds.fields.push_back(oil);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, MultipleIntervalsSingleFieldInterval) {
+ // Multiple intervals on a single field is not a single interval.
+ OrderedIntervalList oil("a");
+ IndexBounds bounds;
+ oil.intervals.push_back(Interval(fromjson("{ '':4, '':5 }"), true, true));
+ oil.intervals.push_back(Interval(fromjson("{ '':7, '':Infinity }"), true, true));
+ bounds.fields.push_back(oil);
+ ASSERT(!testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualityTwoFieldsInterval) {
+ // Equality on two fields is a compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(BSON("" << 6 << "" << 6), true, true));
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualityFirstFieldSingleIntervalSecondFieldInterval) {
+ // Equality on first field and single interval on second field
+ // is a compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(fromjson("{ '':6, '':Infinity }"), true, true));
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, SingleIntervalFirstAndSecondFieldsInterval) {
+ // Single interval on first field and single interval on second field is
+ // not a compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(fromjson("{ '':-Infinity, '':5 }"), true, true));
+ oil_b.intervals.push_back(Interval(fromjson("{ '':6, '':Infinity }"), true, true));
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ ASSERT(!testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, MultipleIntervalsTwoFieldsInterval) {
+ // Multiple intervals on two fields is not a compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 4 << "" << 4), true, true));
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(BSON("" << 7 << "" << 7), true, true));
+ oil_b.intervals.push_back(Interval(BSON("" << 8 << "" << 8), true, true));
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ ASSERT(!testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, MissingSecondFieldInterval) {
+ // when second field is not specified, still a compound single interval
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(IndexBoundsBuilder::allValues());
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualityTwoFieldsIntervalThirdInterval) {
+ // Equality on first two fields and single interval on third is a
+ // compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ OrderedIntervalList oil_c("c");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(BSON("" << 6 << "" << 6), true, true));
+ oil_c.intervals.push_back(Interval(fromjson("{ '':7, '':Infinity }"), true, true));
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ bounds.fields.push_back(oil_c);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualitySingleIntervalMissingInterval) {
+ // Equality, then Single Interval, then missing is a compound single interval
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ OrderedIntervalList oil_c("c");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(fromjson("{ '':7, '':Infinity }"), true, true));
+ oil_c.intervals.push_back(IndexBoundsBuilder::allValues());
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ bounds.fields.push_back(oil_c);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualitySingleMissingMissingInterval) {
+ // Equality, then single interval, then missing, then missing,
+ // is a compound single interval
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ OrderedIntervalList oil_c("c");
+ OrderedIntervalList oil_d("d");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(fromjson("{ '':7, '':Infinity }"), true, true));
+ oil_c.intervals.push_back(IndexBoundsBuilder::allValues());
+ oil_d.intervals.push_back(IndexBoundsBuilder::allValues());
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ bounds.fields.push_back(oil_c);
+ bounds.fields.push_back(oil_d);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualitySingleMissingMissingMixedInterval) {
+ // Equality, then single interval, then missing, then missing, with mixed order
+ // fields is a compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ OrderedIntervalList oil_c("c");
+ OrderedIntervalList oil_d("d");
+ IndexBounds bounds;
+ Interval allValues = IndexBoundsBuilder::allValues();
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(fromjson("{ '':7, '':Infinity }"), true, true));
+ oil_c.intervals.push_back(allValues);
+ IndexBoundsBuilder::reverseInterval(&allValues);
+ oil_d.intervals.push_back(allValues);
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ bounds.fields.push_back(oil_c);
+ bounds.fields.push_back(oil_d);
+ ASSERT(testSingleInterval(bounds));
+}
+
+TEST(IndexBoundsBuilderTest, EqualitySingleMissingSingleInterval) {
+ // Equality, then single interval, then missing, then single interval is not
+ // a compound single interval.
+ OrderedIntervalList oil_a("a");
+ OrderedIntervalList oil_b("b");
+ OrderedIntervalList oil_c("c");
+ OrderedIntervalList oil_d("d");
+ IndexBounds bounds;
+ oil_a.intervals.push_back(Interval(BSON("" << 5 << "" << 5), true, true));
+ oil_b.intervals.push_back(Interval(fromjson("{ '':7, '':Infinity }"), true, true));
+ oil_c.intervals.push_back(IndexBoundsBuilder::allValues());
+ oil_d.intervals.push_back(Interval(fromjson("{ '':1, '':Infinity }"), true, true));
+ bounds.fields.push_back(oil_a);
+ bounds.fields.push_back(oil_b);
+ bounds.fields.push_back(oil_c);
+ bounds.fields.push_back(oil_d);
+ ASSERT(!testSingleInterval(bounds));
+}
+
+//
+// Complementing bounds for negations
+//
- // Test $type bounds for Code With Scoped BSON type.
- TEST(IndexBoundsBuilderTest, CodeWithScopeTypeBounds) {
- IndexEntry testIndex = IndexEntry(BSONObj());
- BSONObj obj = fromjson("{a: {$type: 15}}");
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- BSONElement elt = obj.firstElement();
+/**
+ * Get a BSONObj which represents the interval from
+ * MinKey to 'end'.
+ */
+BSONObj minKeyIntObj(int end) {
+ BSONObjBuilder bob;
+ bob.appendMinKey("");
+ bob.appendNumber("", end);
+ return bob.obj();
+}
- OrderedIntervalList oil;
- IndexBoundsBuilder::BoundsTightness tightness;
- IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
-
- // Build the expected interval.
- BSONObjBuilder bob;
- bob.appendCodeWScope("", "", BSONObj());
- bob.appendMaxKey("");
- BSONObj expectedInterval = bob.obj();
-
- // Check the output of translate().
- ASSERT_EQUALS(oil.name, "a");
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(expectedInterval, true, true)));
- ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
- }
+/**
+ * Get a BSONObj which represents the interval from
+ * 'start' to MaxKey.
+ */
+BSONObj maxKeyIntObj(int start) {
+ BSONObjBuilder bob;
+ bob.appendNumber("", start);
+ bob.appendMaxKey("");
+ return bob.obj();
+}
+
+// Expected oil: [MinKey, 3), (3, MaxKey]
+TEST(IndexBoundsBuilderTest, SimpleNE) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = BSON("a" << BSON("$ne" << 3));
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(minKeyIntObj(3), true, false)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(maxKeyIntObj(3), false, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, IntersectWithNE) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toIntersect;
+ toIntersect.push_back(fromjson("{a: {$gt: 1}}"));
+ toIntersect.push_back(fromjson("{a: {$ne: 2}}}"));
+ toIntersect.push_back(fromjson("{a: {$lte: 6}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndIntersect(toIntersect, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(BSON("" << 1 << "" << 2), false, false)));
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(BSON("" << 2 << "" << 6), false, true)));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+TEST(IndexBoundsBuilderTest, UnionizeWithNE) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ vector<BSONObj> toUnionize;
+ toUnionize.push_back(fromjson("{a: {$ne: 3}}"));
+ toUnionize.push_back(fromjson("{a: {$ne: 4}}}"));
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ testTranslateAndUnion(toUnionize, &oil, &tightness);
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(IndexBoundsBuilder::allValues()));
+ ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
+}
+
+// Test $type bounds for Code BSON type.
+TEST(IndexBoundsBuilderTest, CodeTypeBounds) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$type: 13}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+
+ // Build the expected interval.
+ BSONObjBuilder bob;
+ bob.appendCode("", "");
+ bob.appendCodeWScope("", "", BSONObj());
+ BSONObj expectedInterval = bob.obj();
+
+ // Check the output of translate().
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(expectedInterval, true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
+}
+
+// Test $type bounds for Code With Scoped BSON type.
+TEST(IndexBoundsBuilderTest, CodeWithScopeTypeBounds) {
+ IndexEntry testIndex = IndexEntry(BSONObj());
+ BSONObj obj = fromjson("{a: {$type: 15}}");
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ BSONElement elt = obj.firstElement();
+
+ OrderedIntervalList oil;
+ IndexBoundsBuilder::BoundsTightness tightness;
+ IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
+
+ // Build the expected interval.
+ BSONObjBuilder bob;
+ bob.appendCodeWScope("", "", BSONObj());
+ bob.appendMaxKey("");
+ BSONObj expectedInterval = bob.obj();
+
+ // Check the output of translate().
+ ASSERT_EQUALS(oil.name, "a");
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(expectedInterval, true, true)));
+ ASSERT(tightness == IndexBoundsBuilder::INEXACT_FETCH);
+}
} // namespace
diff --git a/src/mongo/db/query/index_bounds_test.cpp b/src/mongo/db/query/index_bounds_test.cpp
index d5cc470b3af..d1613ca0c94 100644
--- a/src/mongo/db/query/index_bounds_test.cpp
+++ b/src/mongo/db/query/index_bounds_test.cpp
@@ -42,659 +42,661 @@ using namespace mongo;
namespace {
- //
- // Validation
- //
-
- TEST(IndexBoundsTest, ValidBasic) {
- OrderedIntervalList list("foo");
- list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- IndexBounds bounds;
- bounds.fields.push_back(list);
-
- // Go forwards with data indexed forwards.
- ASSERT(bounds.isValidFor(BSON("foo" << 1), 1));
- // Go backwards with data indexed backwards.
- ASSERT(bounds.isValidFor(BSON("foo" << -1), -1));
- // Bounds are not oriented along the direction of traversal.
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), -1));
-
- // Bounds must match the index exactly.
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1 << "bar" << 1), 1));
- ASSERT_FALSE(bounds.isValidFor(BSON("bar" << 1), 1));
- }
-
- TEST(IndexBoundsTest, ValidTwoFields) {
- OrderedIntervalList list("foo");
- list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- IndexBounds bounds;
- bounds.fields.push_back(list);
-
- // Let's add another field
- OrderedIntervalList otherList("bar");
- otherList.intervals.push_back(Interval(BSON("" << 0 << "" << 3), true, true));
- bounds.fields.push_back(otherList);
-
- // These are OK.
- ASSERT(bounds.isValidFor(BSON("foo" << 1 << "bar" << 1), 1));
- ASSERT(bounds.isValidFor(BSON("foo" << -1 << "bar" << -1), -1));
-
- // Direction(s) don't match.
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << -1 << "bar" << 1), -1));
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1 << "bar" << -1), -1));
-
- // Index doesn't match.
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), 1));
- ASSERT_FALSE(bounds.isValidFor(BSON("bar" << 1 << "foo" << 1), 1));
- }
-
- TEST(IndexBoundsTest, ValidIntervalsInOrder) {
- OrderedIntervalList list("foo");
- // Whether navigated forward or backward, there's no valid ordering for these two intervals.
- list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- list.intervals.push_back(Interval(BSON("" << 0 << "" << 5), true, true));
- IndexBounds bounds;
- bounds.fields.push_back(list);
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), 1));
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << -1), 1));
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), -1));
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << -1), -1));
- }
-
- TEST(IndexBoundsTest, ValidNoOverlappingIntervals) {
- OrderedIntervalList list("foo");
- // overlapping intervals not allowed.
- list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- list.intervals.push_back(Interval(BSON("" << 19 << "" << 25), true, true));
- IndexBounds bounds;
- bounds.fields.push_back(list);
- ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), 1));
- }
-
- TEST(IndexBoundsTest, ValidOverlapOnlyWhenBothOpen) {
- OrderedIntervalList list("foo");
- list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, false));
- list.intervals.push_back(Interval(BSON("" << 20 << "" << 25), false, true));
- IndexBounds bounds;
- bounds.fields.push_back(list);
- ASSERT(bounds.isValidFor(BSON("foo" << 1), 1));
- }
-
- //
- // Tests for OrderedIntervalList::complement()
- //
-
- /**
- * Get a BSONObj which represents the interval from
- * MinKey to 'end'.
- */
- BSONObj minKeyIntObj(int end) {
- BSONObjBuilder bob;
- bob.appendMinKey("");
- bob.appendNumber("", end);
- return bob.obj();
- }
-
- /**
- * Get a BSONObj which represents the interval from
- * 'start' to MaxKey.
- */
- BSONObj maxKeyIntObj(int start) {
- BSONObjBuilder bob;
- bob.appendNumber("", start);
- bob.appendMaxKey("");
- return bob.obj();
- }
-
- /**
- * Get a BSONObj which represents the interval
- * [MinKey, MaxKey].
- */
- BSONObj allValues() {
- BSONObjBuilder bob;
- bob.appendMinKey("");
- bob.appendMaxKey("");
- return bob.obj();
- }
-
- /**
- * Test that if we complement the OIL twice,
- * we get back the original OIL.
- */
- void testDoubleComplement(const OrderedIntervalList* oil) {
- OrderedIntervalList clone;
- for (size_t i = 0; i < oil->intervals.size(); ++i) {
- clone.intervals.push_back(oil->intervals[i]);
- }
-
- clone.complement();
- clone.complement();
-
- ASSERT_EQUALS(oil->intervals.size(), clone.intervals.size());
- for (size_t i = 0; i < oil->intervals.size(); ++i) {
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
- oil->intervals[i].compare(clone.intervals[i]));
- }
- }
-
- // Complement of empty is [MinKey, MaxKey]
- TEST(IndexBoundsTest, ComplementEmptyOil) {
- OrderedIntervalList oil;
- testDoubleComplement(&oil);
- oil.complement();
- ASSERT_EQUALS(oil.intervals.size(), 1U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(allValues(), true, true)));
- }
-
- // Complement of [MinKey, MaxKey] is empty
- TEST(IndexBoundsTest, ComplementAllValues) {
- OrderedIntervalList oil;
- oil.intervals.push_back(Interval(allValues(), true, true));
- testDoubleComplement(&oil);
- oil.complement();
- ASSERT_EQUALS(oil.intervals.size(), 0U);
- }
-
- // Complement of [MinKey, 3), [5, MaxKey) is
- // [3, 5), [MaxKey, MaxKey].
- TEST(IndexBoundsTest, ComplementRanges) {
- OrderedIntervalList oil;
- oil.intervals.push_back(Interval(minKeyIntObj(3), true, false));
- oil.intervals.push_back(Interval(maxKeyIntObj(5), true, false));
- testDoubleComplement(&oil);
- oil.complement();
- ASSERT_EQUALS(oil.intervals.size(), 2U);
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(BSON("" << 3 << "" << 5), true, false)));
-
- // Make the interval [MaxKey, MaxKey].
- BSONObjBuilder bob;
- bob.appendMaxKey("");
- bob.appendMaxKey("");
- BSONObj maxKeyInt = bob.obj();
-
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(maxKeyInt, true, true)));
- }
-
- // Complement of (MinKey, 3), (3, MaxKey) is
- // [MinKey, MinKey], [3, 3], [MaxKey, MaxKey].
- TEST(IndexBoundsTest, ComplementRanges2) {
- OrderedIntervalList oil;
- oil.intervals.push_back(Interval(minKeyIntObj(3), false, false));
- oil.intervals.push_back(Interval(maxKeyIntObj(3), false, false));
- testDoubleComplement(&oil);
- oil.complement();
- ASSERT_EQUALS(oil.intervals.size(), 3U);
-
- // First interval is [MinKey, MinKey]
- BSONObjBuilder minBob;
- minBob.appendMinKey("");
- minBob.appendMinKey("");
- BSONObj minObj = minBob.obj();
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[0].compare(
- Interval(minObj, true, true)));
-
- // Second is [3, 3]
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[1].compare(
- Interval(BSON("" << 3 << "" << 3), true, true)));
-
- // Third is [MaxKey, MaxKey]
- BSONObjBuilder maxBob;
- maxBob.appendMaxKey("");
- maxBob.appendMaxKey("");
- BSONObj maxObj = maxBob.obj();
- ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil.intervals[2].compare(
- Interval(maxObj, true, true)));
- }
-
- //
- // Iteration over
- //
-
- TEST(IndexBoundsCheckerTest, StartKey) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
- IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
-
- IndexSeekPoint seekPoint;
- it.getStartSeekPoint(&seekPoint);
-
- ASSERT_EQUALS(seekPoint.keySuffix[0]->numberInt(), 7);
- ASSERT_EQUALS(seekPoint.suffixInclusive[0], true);
- ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
- ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
- }
-
- TEST(IndexBoundsCheckerTest, CheckEnd) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- fooList.intervals.push_back(Interval(BSON("" << 21 << "" << 30), true, false));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
- IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 7 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // Second field moves past the end, but we're not done, since there's still an interval in
- // the previous field that the key hasn't advanced to.
- state = it.checkKey(BSON("" << 20 << "" << 5), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT(seekPoint.prefixExclusive);
-
- // The next index key is in the second interval for 'foo' and there is a valid interval for
- // 'bar'.
- state = it.checkKey(BSON("" << 22 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // The next index key is very close to the end of the open interval for foo, and it's past
- // the interval for 'bar'. Since the interval for foo is open, we are asked to move
- // forward, since we possibly could.
- state = it.checkKey(BSON("" << 29.9 << "" << 5), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT(seekPoint.prefixExclusive);
- }
-
- TEST(IndexBoundsCheckerTest, MoveIntervalForwardToNextInterval) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- fooList.intervals.push_back(Interval(BSON("" << 21 << "" << 30), true, false));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
- IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 7 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // "foo" moves between two intervals.
- state = it.checkKey(BSON("" << 20.5 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 0);
- // Should be told to move exactly to the next interval's beginning.
- ASSERT_EQUALS(seekPoint.prefixExclusive, false);
- ASSERT_EQUALS(seekPoint.keySuffix[0]->numberInt(), 21);
- ASSERT_EQUALS(seekPoint.suffixInclusive[0], true);
- ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
- ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
- }
+//
+// Validation
+//
+
+TEST(IndexBoundsTest, ValidBasic) {
+ OrderedIntervalList list("foo");
+ list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ IndexBounds bounds;
+ bounds.fields.push_back(list);
+
+ // Go forwards with data indexed forwards.
+ ASSERT(bounds.isValidFor(BSON("foo" << 1), 1));
+ // Go backwards with data indexed backwards.
+ ASSERT(bounds.isValidFor(BSON("foo" << -1), -1));
+ // Bounds are not oriented along the direction of traversal.
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), -1));
+
+ // Bounds must match the index exactly.
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1 << "bar" << 1), 1));
+ ASSERT_FALSE(bounds.isValidFor(BSON("bar" << 1), 1));
+}
+
+TEST(IndexBoundsTest, ValidTwoFields) {
+ OrderedIntervalList list("foo");
+ list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ IndexBounds bounds;
+ bounds.fields.push_back(list);
+
+ // Let's add another field
+ OrderedIntervalList otherList("bar");
+ otherList.intervals.push_back(Interval(BSON("" << 0 << "" << 3), true, true));
+ bounds.fields.push_back(otherList);
+
+ // These are OK.
+ ASSERT(bounds.isValidFor(BSON("foo" << 1 << "bar" << 1), 1));
+ ASSERT(bounds.isValidFor(BSON("foo" << -1 << "bar" << -1), -1));
+
+ // Direction(s) don't match.
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << -1 << "bar" << 1), -1));
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1 << "bar" << -1), -1));
+
+ // Index doesn't match.
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), 1));
+ ASSERT_FALSE(bounds.isValidFor(BSON("bar" << 1 << "foo" << 1), 1));
+}
+
+TEST(IndexBoundsTest, ValidIntervalsInOrder) {
+ OrderedIntervalList list("foo");
+ // Whether navigated forward or backward, there's no valid ordering for these two intervals.
+ list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ list.intervals.push_back(Interval(BSON("" << 0 << "" << 5), true, true));
+ IndexBounds bounds;
+ bounds.fields.push_back(list);
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), 1));
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << -1), 1));
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), -1));
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << -1), -1));
+}
+
+TEST(IndexBoundsTest, ValidNoOverlappingIntervals) {
+ OrderedIntervalList list("foo");
+ // overlapping intervals not allowed.
+ list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ list.intervals.push_back(Interval(BSON("" << 19 << "" << 25), true, true));
+ IndexBounds bounds;
+ bounds.fields.push_back(list);
+ ASSERT_FALSE(bounds.isValidFor(BSON("foo" << 1), 1));
+}
+
+TEST(IndexBoundsTest, ValidOverlapOnlyWhenBothOpen) {
+ OrderedIntervalList list("foo");
+ list.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, false));
+ list.intervals.push_back(Interval(BSON("" << 20 << "" << 25), false, true));
+ IndexBounds bounds;
+ bounds.fields.push_back(list);
+ ASSERT(bounds.isValidFor(BSON("foo" << 1), 1));
+}
+
+//
+// Tests for OrderedIntervalList::complement()
+//
- TEST(IndexBoundsCheckerTest, MoveIntervalForwardManyIntervals) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
- fooList.intervals.push_back(Interval(BSON("" << 21 << "" << 30), true, false));
- fooList.intervals.push_back(Interval(BSON("" << 31 << "" << 40), true, false));
- fooList.intervals.push_back(Interval(BSON("" << 41 << "" << 50), true, false));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- IndexBoundsChecker it(&bounds, BSON("foo" << 1), 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 7), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+/**
+ * Get a BSONObj which represents the interval from
+ * MinKey to 'end'.
+ */
+BSONObj minKeyIntObj(int end) {
+ BSONObjBuilder bob;
+ bob.appendMinKey("");
+ bob.appendNumber("", end);
+ return bob.obj();
+}
- // "foo" moves forward a few intervals.
- state = it.checkKey(BSON("" << 42), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
- }
+/**
+ * Get a BSONObj which represents the interval from
+ * 'start' to MaxKey.
+ */
+BSONObj maxKeyIntObj(int start) {
+ BSONObjBuilder bob;
+ bob.appendNumber("", start);
+ bob.appendMaxKey("");
+ return bob.obj();
+}
- TEST(IndexBoundsCheckerTest, SimpleCheckKey) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, true));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
- IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 7 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // The rightmost key is past the range. We should be told to move past the key before the
- // one whose interval we exhausted.
- state = it.checkKey(BSON("" << 7 << "" << 5.00001), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT_EQUALS(seekPoint.prefixExclusive, true);
-
- // Move a little forward, but note that the rightmost key isn't in the interval yet.
- state = it.checkKey(BSON("" << 7.2 << "" << 0), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT_EQUALS(seekPoint.prefixExclusive, false);
- ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
- ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
-
- // Move to the edge of both intervals, 20,5
- state = it.checkKey(BSON("" << 20 << "" << 5), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // And a little beyond.
- state = it.checkKey(BSON("" << 20 << "" << 5.1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::DONE);
- }
+/**
+ * Get a BSONObj which represents the interval
+ * [MinKey, MaxKey].
+ */
+BSONObj allValues() {
+ BSONObjBuilder bob;
+ bob.appendMinKey("");
+ bob.appendMaxKey("");
+ return bob.obj();
+}
- TEST(IndexBoundsCheckerTest, FirstKeyMovedIsOKSecondKeyMustMove) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 0 << "" << 9), true, true));
- fooList.intervals.push_back(Interval(BSON("" << 10 << "" << 20), true, true));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, true));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
- IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 0 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // First key moves to next interval, second key needs to be advanced.
- state = it.checkKey(BSON("" << 10 << "" << -1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT_EQUALS(seekPoint.prefixExclusive, false);
- ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
- ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
+/**
+ * Test that if we complement the OIL twice,
+ * we get back the original OIL.
+ */
+void testDoubleComplement(const OrderedIntervalList* oil) {
+ OrderedIntervalList clone;
+ for (size_t i = 0; i < oil->intervals.size(); ++i) {
+ clone.intervals.push_back(oil->intervals[i]);
}
- TEST(IndexBoundsCheckerTest, SecondIntervalMustRewind) {
- OrderedIntervalList first("first");
- first.intervals.push_back(Interval(BSON("" << 25 << "" << 30), true, true));
-
- OrderedIntervalList second("second");
- second.intervals.push_back(Interval(BSON("" << 0 << "" << 0), true, true));
- second.intervals.push_back(Interval(BSON("" << 9 << "" << 9), true, true));
-
- IndexBounds bounds;
- bounds.fields.push_back(first);
- bounds.fields.push_back(second);
-
- BSONObj idx = BSON("first" << 1 << "second" << 1);
- ASSERT(bounds.isValidFor(idx, 1));
- IndexBoundsChecker it(&bounds, idx, 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- state = it.checkKey(BSON("" << 25 << "" << 0), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- state = it.checkKey(BSON("" << 25 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT_EQUALS(seekPoint.prefixExclusive, false);
- ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 9);
- ASSERT_EQUALS(seekPoint.suffixInclusive[1], true);
-
- state = it.checkKey(BSON("" << 25 << "" << 9), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+ clone.complement();
+ clone.complement();
- // First key moved forward. The second key moved back to a valid state but it's behind
- // the interval that the checker thought it was in.
- state = it.checkKey(BSON("" << 26 << "" << 0), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+ ASSERT_EQUALS(oil->intervals.size(), clone.intervals.size());
+ for (size_t i = 0; i < oil->intervals.size(); ++i) {
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS, oil->intervals[i].compare(clone.intervals[i]));
}
+}
+
+// Complement of empty is [MinKey, MaxKey]
+TEST(IndexBoundsTest, ComplementEmptyOil) {
+ OrderedIntervalList oil;
+ testDoubleComplement(&oil);
+ oil.complement();
+ ASSERT_EQUALS(oil.intervals.size(), 1U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(allValues(), true, true)));
+}
+
+// Complement of [MinKey, MaxKey] is empty
+TEST(IndexBoundsTest, ComplementAllValues) {
+ OrderedIntervalList oil;
+ oil.intervals.push_back(Interval(allValues(), true, true));
+ testDoubleComplement(&oil);
+ oil.complement();
+ ASSERT_EQUALS(oil.intervals.size(), 0U);
+}
+
+// Complement of [MinKey, 3), [5, MaxKey) is
+// [3, 5), [MaxKey, MaxKey].
+TEST(IndexBoundsTest, ComplementRanges) {
+ OrderedIntervalList oil;
+ oil.intervals.push_back(Interval(minKeyIntObj(3), true, false));
+ oil.intervals.push_back(Interval(maxKeyIntObj(5), true, false));
+ testDoubleComplement(&oil);
+ oil.complement();
+ ASSERT_EQUALS(oil.intervals.size(), 2U);
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(BSON("" << 3 << "" << 5), true, false)));
+
+ // Make the interval [MaxKey, MaxKey].
+ BSONObjBuilder bob;
+ bob.appendMaxKey("");
+ bob.appendMaxKey("");
+ BSONObj maxKeyInt = bob.obj();
+
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(maxKeyInt, true, true)));
+}
+
+// Complement of (MinKey, 3), (3, MaxKey) is
+// [MinKey, MinKey], [3, 3], [MaxKey, MaxKey].
+TEST(IndexBoundsTest, ComplementRanges2) {
+ OrderedIntervalList oil;
+ oil.intervals.push_back(Interval(minKeyIntObj(3), false, false));
+ oil.intervals.push_back(Interval(maxKeyIntObj(3), false, false));
+ testDoubleComplement(&oil);
+ oil.complement();
+ ASSERT_EQUALS(oil.intervals.size(), 3U);
+
+ // First interval is [MinKey, MinKey]
+ BSONObjBuilder minBob;
+ minBob.appendMinKey("");
+ minBob.appendMinKey("");
+ BSONObj minObj = minBob.obj();
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[0].compare(Interval(minObj, true, true)));
+
+ // Second is [3, 3]
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[1].compare(Interval(BSON("" << 3 << "" << 3), true, true)));
+
+ // Third is [MaxKey, MaxKey]
+ BSONObjBuilder maxBob;
+ maxBob.appendMaxKey("");
+ maxBob.appendMaxKey("");
+ BSONObj maxObj = maxBob.obj();
+ ASSERT_EQUALS(Interval::INTERVAL_EQUALS,
+ oil.intervals[2].compare(Interval(maxObj, true, true)));
+}
+
+//
+// Iteration over
+//
+
+TEST(IndexBoundsCheckerTest, StartKey) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+ IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
+
+ IndexSeekPoint seekPoint;
+ it.getStartSeekPoint(&seekPoint);
+
+ ASSERT_EQUALS(seekPoint.keySuffix[0]->numberInt(), 7);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[0], true);
+ ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
+}
+
+TEST(IndexBoundsCheckerTest, CheckEnd) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ fooList.intervals.push_back(Interval(BSON("" << 21 << "" << 30), true, false));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+ IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 7 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // Second field moves past the end, but we're not done, since there's still an interval in
+ // the previous field that the key hasn't advanced to.
+ state = it.checkKey(BSON("" << 20 << "" << 5), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT(seekPoint.prefixExclusive);
+
+ // The next index key is in the second interval for 'foo' and there is a valid interval for
+ // 'bar'.
+ state = it.checkKey(BSON("" << 22 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // The next index key is very close to the end of the open interval for foo, and it's past
+ // the interval for 'bar'. Since the interval for foo is open, we are asked to move
+ // forward, since we possibly could.
+ state = it.checkKey(BSON("" << 29.9 << "" << 5), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT(seekPoint.prefixExclusive);
+}
+
+TEST(IndexBoundsCheckerTest, MoveIntervalForwardToNextInterval) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ fooList.intervals.push_back(Interval(BSON("" << 21 << "" << 30), true, false));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+ IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 7 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // "foo" moves between two intervals.
+ state = it.checkKey(BSON("" << 20.5 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 0);
+ // Should be told to move exactly to the next interval's beginning.
+ ASSERT_EQUALS(seekPoint.prefixExclusive, false);
+ ASSERT_EQUALS(seekPoint.keySuffix[0]->numberInt(), 21);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[0], true);
+ ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
+}
+
+TEST(IndexBoundsCheckerTest, MoveIntervalForwardManyIntervals) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+ fooList.intervals.push_back(Interval(BSON("" << 21 << "" << 30), true, false));
+ fooList.intervals.push_back(Interval(BSON("" << 31 << "" << 40), true, false));
+ fooList.intervals.push_back(Interval(BSON("" << 41 << "" << 50), true, false));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ IndexBoundsChecker it(&bounds, BSON("foo" << 1), 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 7), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // "foo" moves forward a few intervals.
+ state = it.checkKey(BSON("" << 42), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+}
+
+TEST(IndexBoundsCheckerTest, SimpleCheckKey) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 7 << "" << 20), true, true));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, true));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+ IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 7 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // The rightmost key is past the range. We should be told to move past the key before the
+ // one whose interval we exhausted.
+ state = it.checkKey(BSON("" << 7 << "" << 5.00001), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT_EQUALS(seekPoint.prefixExclusive, true);
+
+ // Move a little forward, but note that the rightmost key isn't in the interval yet.
+ state = it.checkKey(BSON("" << 7.2 << "" << 0), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT_EQUALS(seekPoint.prefixExclusive, false);
+ ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
+
+ // Move to the edge of both intervals, 20,5
+ state = it.checkKey(BSON("" << 20 << "" << 5), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // And a little beyond.
+ state = it.checkKey(BSON("" << 20 << "" << 5.1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::DONE);
+}
+
+TEST(IndexBoundsCheckerTest, FirstKeyMovedIsOKSecondKeyMustMove) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 0 << "" << 9), true, true));
+ fooList.intervals.push_back(Interval(BSON("" << 10 << "" << 20), true, true));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, true));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+ IndexBoundsChecker it(&bounds, BSON("foo" << 1 << "bar" << 1), 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 0 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // First key moves to next interval, second key needs to be advanced.
+ state = it.checkKey(BSON("" << 10 << "" << -1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT_EQUALS(seekPoint.prefixExclusive, false);
+ ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 0);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[1], false);
+}
+
+TEST(IndexBoundsCheckerTest, SecondIntervalMustRewind) {
+ OrderedIntervalList first("first");
+ first.intervals.push_back(Interval(BSON("" << 25 << "" << 30), true, true));
+
+ OrderedIntervalList second("second");
+ second.intervals.push_back(Interval(BSON("" << 0 << "" << 0), true, true));
+ second.intervals.push_back(Interval(BSON("" << 9 << "" << 9), true, true));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(first);
+ bounds.fields.push_back(second);
+
+ BSONObj idx = BSON("first" << 1 << "second" << 1);
+ ASSERT(bounds.isValidFor(idx, 1));
+ IndexBoundsChecker it(&bounds, idx, 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ state = it.checkKey(BSON("" << 25 << "" << 0), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ state = it.checkKey(BSON("" << 25 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT_EQUALS(seekPoint.prefixExclusive, false);
+ ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 9);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[1], true);
+
+ state = it.checkKey(BSON("" << 25 << "" << 9), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // First key moved forward. The second key moved back to a valid state but it's behind
+ // the interval that the checker thought it was in.
+ state = it.checkKey(BSON("" << 26 << "" << 0), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+}
+
+TEST(IndexBoundsCheckerTest, SimpleCheckKeyBackwards) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 20 << "" << 7), true, true));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 5 << "" << 0), true, false));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+
+ BSONObj idx = BSON("foo" << -1 << "bar" << -1);
+ ASSERT(bounds.isValidFor(idx, 1));
+ IndexBoundsChecker it(&bounds, idx, 1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 20 << "" << 5), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // The rightmost key is past the range. We should be told to move past the key before the
+ // one whose interval we exhausted.
+ state = it.checkKey(BSON("" << 20 << "" << 0), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT_EQUALS(seekPoint.prefixExclusive, true);
+
+ // Move a little forward, but note that the rightmost key isn't in the interval yet.
+ state = it.checkKey(BSON("" << 19 << "" << 6), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT_EQUALS(seekPoint.prefixExclusive, false);
+ ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 5);
+ ASSERT_EQUALS(seekPoint.suffixInclusive[1], true);
+
+ // Move to the edge of both intervals
+ state = it.checkKey(BSON("" << 7 << "" << 0.01), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // And a little beyond.
+ state = it.checkKey(BSON("" << 7 << "" << 0), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::DONE);
+}
+
+TEST(IndexBoundsCheckerTest, CheckEndBackwards) {
+ OrderedIntervalList fooList("foo");
+ fooList.intervals.push_back(Interval(BSON("" << 30 << "" << 21), true, true));
+ fooList.intervals.push_back(Interval(BSON("" << 20 << "" << 7), true, false));
+
+ OrderedIntervalList barList("bar");
+ barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
+
+ IndexBounds bounds;
+ bounds.fields.push_back(fooList);
+ bounds.fields.push_back(barList);
+
+ BSONObj idx = BSON("foo" << 1 << "bar" << -1);
+ ASSERT(bounds.isValidFor(idx, -1));
+ IndexBoundsChecker it(&bounds, idx, -1);
+
+ IndexSeekPoint seekPoint;
+ IndexBoundsChecker::KeyState state;
+
+ // Start at something in our range.
+ state = it.checkKey(BSON("" << 30 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // Second field moves past the end, but we're not done, since there's still an interval in
+ // the previous field that the key hasn't advanced to.
+ state = it.checkKey(BSON("" << 30 << "" << 5), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT(seekPoint.prefixExclusive);
+
+ // The next index key is in the second interval for 'foo' and there is a valid interval for
+ // 'bar'.
+ state = it.checkKey(BSON("" << 20 << "" << 1), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
+
+ // The next index key is very close to the end of the open interval for foo, and it's past
+ // the interval for 'bar'. Since the interval for foo is open, we are asked to move
+ // forward, since we possibly could.
+ state = it.checkKey(BSON("" << 7.001 << "" << 5), &seekPoint);
+ ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
+ ASSERT_EQUALS(seekPoint.prefixLen, 1);
+ ASSERT(seekPoint.prefixExclusive);
+}
+
+//
+// IndexBoundsChecker::findIntervalForField
+//
- TEST(IndexBoundsCheckerTest, SimpleCheckKeyBackwards) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 20 << "" << 7), true, true));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 5 << "" << 0), true, false));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
-
- BSONObj idx = BSON("foo" << -1 << "bar" << -1);
- ASSERT(bounds.isValidFor(idx, 1));
- IndexBoundsChecker it(&bounds, idx, 1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 20 << "" << 5), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // The rightmost key is past the range. We should be told to move past the key before the
- // one whose interval we exhausted.
- state = it.checkKey(BSON("" << 20 << "" << 0), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT_EQUALS(seekPoint.prefixExclusive, true);
-
- // Move a little forward, but note that the rightmost key isn't in the interval yet.
- state = it.checkKey(BSON("" << 19 << "" << 6), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT_EQUALS(seekPoint.prefixExclusive, false);
- ASSERT_EQUALS(seekPoint.keySuffix[1]->numberInt(), 5);
- ASSERT_EQUALS(seekPoint.suffixInclusive[1], true);
-
- // Move to the edge of both intervals
- state = it.checkKey(BSON("" << 7 << "" << 0.01), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // And a little beyond.
- state = it.checkKey(BSON("" << 7 << "" << 0), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::DONE);
- }
-
- TEST(IndexBoundsCheckerTest, CheckEndBackwards) {
- OrderedIntervalList fooList("foo");
- fooList.intervals.push_back(Interval(BSON("" << 30 << "" << 21), true, true));
- fooList.intervals.push_back(Interval(BSON("" << 20 << "" << 7), true, false));
-
- OrderedIntervalList barList("bar");
- barList.intervals.push_back(Interval(BSON("" << 0 << "" << 5), false, false));
-
- IndexBounds bounds;
- bounds.fields.push_back(fooList);
- bounds.fields.push_back(barList);
-
- BSONObj idx = BSON("foo" << 1 << "bar" << -1);
- ASSERT(bounds.isValidFor(idx, -1));
- IndexBoundsChecker it(&bounds, idx, -1);
-
- IndexSeekPoint seekPoint;
- IndexBoundsChecker::KeyState state;
-
- // Start at something in our range.
- state = it.checkKey(BSON("" << 30 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // Second field moves past the end, but we're not done, since there's still an interval in
- // the previous field that the key hasn't advanced to.
- state = it.checkKey(BSON("" << 30 << "" << 5), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT(seekPoint.prefixExclusive);
-
- // The next index key is in the second interval for 'foo' and there is a valid interval for
- // 'bar'.
- state = it.checkKey(BSON("" << 20 << "" << 1), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::VALID);
-
- // The next index key is very close to the end of the open interval for foo, and it's past
- // the interval for 'bar'. Since the interval for foo is open, we are asked to move
- // forward, since we possibly could.
- state = it.checkKey(BSON("" << 7.001 << "" << 5), &seekPoint);
- ASSERT_EQUALS(state, IndexBoundsChecker::MUST_ADVANCE);
- ASSERT_EQUALS(seekPoint.prefixLen, 1);
- ASSERT(seekPoint.prefixExclusive);
+/**
+ * Returns string representation of IndexBoundsChecker::Location.
+ */
+std::string toString(IndexBoundsChecker::Location location) {
+ switch (location) {
+ case IndexBoundsChecker::BEHIND:
+ return "BEHIND";
+ case IndexBoundsChecker::WITHIN:
+ return "WITHIN";
+ case IndexBoundsChecker::AHEAD:
+ return "AHEAD";
}
+ invariant(0);
+}
- //
- // IndexBoundsChecker::findIntervalForField
- //
-
- /**
- * Returns string representation of IndexBoundsChecker::Location.
- */
- std::string toString(IndexBoundsChecker::Location location) {
- switch(location) {
- case IndexBoundsChecker::BEHIND: return "BEHIND";
- case IndexBoundsChecker::WITHIN: return "WITHIN";
- case IndexBoundsChecker::AHEAD: return "AHEAD";
- }
- invariant(0);
+/**
+ * Test function for findIntervalForField.
+ * Constructs a list of point intervals from 'points' and searches for 'key'
+ * using findIntervalForField(). Verifies expected location and index (if expectedLocation
+ * is BEHIND or WITHIN).
+ * 'points' is provided in BSON format: {points: [pt1, pt2, pt4, ...]
+ */
+void testFindIntervalForField(int key,
+ const BSONObj& pointsObj,
+ const int expectedDirection,
+ IndexBoundsChecker::Location expectedLocation,
+ size_t expectedIntervalIndex) {
+ // Create key BSONElement.
+ BSONObj keyObj = BSON("" << key);
+ BSONElement keyElt = keyObj.firstElement();
+
+ // Construct point intervals.
+ OrderedIntervalList oil("foo");
+ BSONObjIterator i(pointsObj.getObjectField("points"));
+ while (i.more()) {
+ BSONElement e = i.next();
+ int j = e.numberInt();
+ oil.intervals.push_back(Interval(BSON("" << j << "" << j), true, true));
}
-
- /**
- * Test function for findIntervalForField.
- * Constructs a list of point intervals from 'points' and searches for 'key'
- * using findIntervalForField(). Verifies expected location and index (if expectedLocation
- * is BEHIND or WITHIN).
- * 'points' is provided in BSON format: {points: [pt1, pt2, pt4, ...]
- */
- void testFindIntervalForField(int key, const BSONObj& pointsObj, const int expectedDirection,
- IndexBoundsChecker::Location expectedLocation,
- size_t expectedIntervalIndex) {
- // Create key BSONElement.
- BSONObj keyObj = BSON("" << key);
- BSONElement keyElt = keyObj.firstElement();
-
- // Construct point intervals.
- OrderedIntervalList oil("foo");
- BSONObjIterator i(pointsObj.getObjectField("points"));
- while (i.more()) {
- BSONElement e = i.next();
- int j = e.numberInt();
- oil.intervals.push_back(Interval(BSON("" << j << "" << j), true, true));
- }
- size_t intervalIndex = 0;
- IndexBoundsChecker::Location location =
- IndexBoundsChecker::findIntervalForField(keyElt, oil, expectedDirection, &intervalIndex);
- if (expectedLocation != location) {
- mongoutils::str::stream ss;
- ss << "Unexpected location from findIntervalForField: key=" << keyElt
- << "; intervals=" << oil.toString() << "; direction=" << expectedDirection
- << ". Expected: " << toString(expectedLocation)
- << ". Actual: " << toString(location);
- FAIL(ss);
- }
- // Check interval index if location is BEHIND or WITHIN.
- if ((IndexBoundsChecker::BEHIND == expectedLocation ||
- IndexBoundsChecker::WITHIN == expectedLocation) &&
- expectedIntervalIndex != intervalIndex) {
- mongoutils::str::stream ss;
- ss << "Unexpected interval index from findIntervalForField: key=" << keyElt
- << "; intervals=" << oil.toString() << "; direction=" << expectedDirection
- << "; location= " << toString(location)
- << ". Expected: " << expectedIntervalIndex
- << ". Actual: " << intervalIndex;
- FAIL(ss);
- }
+ size_t intervalIndex = 0;
+ IndexBoundsChecker::Location location =
+ IndexBoundsChecker::findIntervalForField(keyElt, oil, expectedDirection, &intervalIndex);
+ if (expectedLocation != location) {
+ mongoutils::str::stream ss;
+ ss << "Unexpected location from findIntervalForField: key=" << keyElt
+ << "; intervals=" << oil.toString() << "; direction=" << expectedDirection
+ << ". Expected: " << toString(expectedLocation) << ". Actual: " << toString(location);
+ FAIL(ss);
}
-
- TEST(IndexBoundsCheckerTest, FindIntervalForField) {
- // No intervals
- BSONObj pointsObj = fromjson("{points: []}");
- testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
- testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
-
- // One interval
- pointsObj = fromjson("{points: [5]}");
- testFindIntervalForField(4, pointsObj, 1, IndexBoundsChecker::BEHIND, 0U);
- testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::WITHIN, 0U);
- testFindIntervalForField(6, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
-
- // One interval - reverse direction
- pointsObj = fromjson("{points: [5]}");
- testFindIntervalForField(6, pointsObj, -1, IndexBoundsChecker::BEHIND, 0U);
- testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::WITHIN, 0U);
- testFindIntervalForField(4, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
-
- // Two intervals
- // Verifies off-by-one handling in upper bound of binary search.
- pointsObj = fromjson("{points: [5, 7]}");
- testFindIntervalForField(4, pointsObj, 1, IndexBoundsChecker::BEHIND, 0U);
- testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::WITHIN, 0U);
- testFindIntervalForField(6, pointsObj, 1, IndexBoundsChecker::BEHIND, 1U);
- testFindIntervalForField(7, pointsObj, 1, IndexBoundsChecker::WITHIN, 1U);
- testFindIntervalForField(8, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
-
- // Two intervals - reverse direction
- // Verifies off-by-one handling in upper bound of binary search.
- pointsObj = fromjson("{points: [7, 5]}");
- testFindIntervalForField(8, pointsObj, -1, IndexBoundsChecker::BEHIND, 0U);
- testFindIntervalForField(7, pointsObj, -1, IndexBoundsChecker::WITHIN, 0U);
- testFindIntervalForField(6, pointsObj, -1, IndexBoundsChecker::BEHIND, 1U);
- testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::WITHIN, 1U);
- testFindIntervalForField(4, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
-
- // Multiple intervals - odd number of intervals.
- pointsObj = fromjson("{points: [1, 3, 5, 7, 9]}");
- testFindIntervalForField(0, pointsObj, 1, IndexBoundsChecker::BEHIND, 0U);
- testFindIntervalForField(1, pointsObj, 1, IndexBoundsChecker::WITHIN, 0U);
- testFindIntervalForField(2, pointsObj, 1, IndexBoundsChecker::BEHIND, 1U);
- testFindIntervalForField(3, pointsObj, 1, IndexBoundsChecker::WITHIN, 1U);
- testFindIntervalForField(4, pointsObj, 1, IndexBoundsChecker::BEHIND, 2U);
- testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::WITHIN, 2U);
- testFindIntervalForField(6, pointsObj, 1, IndexBoundsChecker::BEHIND, 3U);
- testFindIntervalForField(7, pointsObj, 1, IndexBoundsChecker::WITHIN, 3U);
- testFindIntervalForField(8, pointsObj, 1, IndexBoundsChecker::BEHIND, 4U);
- testFindIntervalForField(9, pointsObj, 1, IndexBoundsChecker::WITHIN, 4U);
- testFindIntervalForField(10, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
-
- // Multiple intervals - even number of intervals, reverse direction
- // Interval order has to match direction.
- pointsObj = fromjson("{points: [7, 5, 3, 1]}");
- testFindIntervalForField(8, pointsObj, -1, IndexBoundsChecker::BEHIND, 0U);
- testFindIntervalForField(7, pointsObj, -1, IndexBoundsChecker::WITHIN, 0U);
- testFindIntervalForField(6, pointsObj, -1, IndexBoundsChecker::BEHIND, 1U);
- testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::WITHIN, 1U);
- testFindIntervalForField(4, pointsObj, -1, IndexBoundsChecker::BEHIND, 2U);
- testFindIntervalForField(3, pointsObj, -1, IndexBoundsChecker::WITHIN, 2U);
- testFindIntervalForField(2, pointsObj, -1, IndexBoundsChecker::BEHIND, 3U);
- testFindIntervalForField(1, pointsObj, -1, IndexBoundsChecker::WITHIN, 3U);
- testFindIntervalForField(0, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
+ // Check interval index if location is BEHIND or WITHIN.
+ if ((IndexBoundsChecker::BEHIND == expectedLocation ||
+ IndexBoundsChecker::WITHIN == expectedLocation) &&
+ expectedIntervalIndex != intervalIndex) {
+ mongoutils::str::stream ss;
+ ss << "Unexpected interval index from findIntervalForField: key=" << keyElt
+ << "; intervals=" << oil.toString() << "; direction=" << expectedDirection
+ << "; location= " << toString(location) << ". Expected: " << expectedIntervalIndex
+ << ". Actual: " << intervalIndex;
+ FAIL(ss);
}
+}
+
+TEST(IndexBoundsCheckerTest, FindIntervalForField) {
+ // No intervals
+ BSONObj pointsObj = fromjson("{points: []}");
+ testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
+ testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
+
+ // One interval
+ pointsObj = fromjson("{points: [5]}");
+ testFindIntervalForField(4, pointsObj, 1, IndexBoundsChecker::BEHIND, 0U);
+ testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::WITHIN, 0U);
+ testFindIntervalForField(6, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
+
+ // One interval - reverse direction
+ pointsObj = fromjson("{points: [5]}");
+ testFindIntervalForField(6, pointsObj, -1, IndexBoundsChecker::BEHIND, 0U);
+ testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::WITHIN, 0U);
+ testFindIntervalForField(4, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
+
+ // Two intervals
+ // Verifies off-by-one handling in upper bound of binary search.
+ pointsObj = fromjson("{points: [5, 7]}");
+ testFindIntervalForField(4, pointsObj, 1, IndexBoundsChecker::BEHIND, 0U);
+ testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::WITHIN, 0U);
+ testFindIntervalForField(6, pointsObj, 1, IndexBoundsChecker::BEHIND, 1U);
+ testFindIntervalForField(7, pointsObj, 1, IndexBoundsChecker::WITHIN, 1U);
+ testFindIntervalForField(8, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
+
+ // Two intervals - reverse direction
+ // Verifies off-by-one handling in upper bound of binary search.
+ pointsObj = fromjson("{points: [7, 5]}");
+ testFindIntervalForField(8, pointsObj, -1, IndexBoundsChecker::BEHIND, 0U);
+ testFindIntervalForField(7, pointsObj, -1, IndexBoundsChecker::WITHIN, 0U);
+ testFindIntervalForField(6, pointsObj, -1, IndexBoundsChecker::BEHIND, 1U);
+ testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::WITHIN, 1U);
+ testFindIntervalForField(4, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
+
+ // Multiple intervals - odd number of intervals.
+ pointsObj = fromjson("{points: [1, 3, 5, 7, 9]}");
+ testFindIntervalForField(0, pointsObj, 1, IndexBoundsChecker::BEHIND, 0U);
+ testFindIntervalForField(1, pointsObj, 1, IndexBoundsChecker::WITHIN, 0U);
+ testFindIntervalForField(2, pointsObj, 1, IndexBoundsChecker::BEHIND, 1U);
+ testFindIntervalForField(3, pointsObj, 1, IndexBoundsChecker::WITHIN, 1U);
+ testFindIntervalForField(4, pointsObj, 1, IndexBoundsChecker::BEHIND, 2U);
+ testFindIntervalForField(5, pointsObj, 1, IndexBoundsChecker::WITHIN, 2U);
+ testFindIntervalForField(6, pointsObj, 1, IndexBoundsChecker::BEHIND, 3U);
+ testFindIntervalForField(7, pointsObj, 1, IndexBoundsChecker::WITHIN, 3U);
+ testFindIntervalForField(8, pointsObj, 1, IndexBoundsChecker::BEHIND, 4U);
+ testFindIntervalForField(9, pointsObj, 1, IndexBoundsChecker::WITHIN, 4U);
+ testFindIntervalForField(10, pointsObj, 1, IndexBoundsChecker::AHEAD, 0U);
+
+ // Multiple intervals - even number of intervals, reverse direction
+ // Interval order has to match direction.
+ pointsObj = fromjson("{points: [7, 5, 3, 1]}");
+ testFindIntervalForField(8, pointsObj, -1, IndexBoundsChecker::BEHIND, 0U);
+ testFindIntervalForField(7, pointsObj, -1, IndexBoundsChecker::WITHIN, 0U);
+ testFindIntervalForField(6, pointsObj, -1, IndexBoundsChecker::BEHIND, 1U);
+ testFindIntervalForField(5, pointsObj, -1, IndexBoundsChecker::WITHIN, 1U);
+ testFindIntervalForField(4, pointsObj, -1, IndexBoundsChecker::BEHIND, 2U);
+ testFindIntervalForField(3, pointsObj, -1, IndexBoundsChecker::WITHIN, 2U);
+ testFindIntervalForField(2, pointsObj, -1, IndexBoundsChecker::BEHIND, 3U);
+ testFindIntervalForField(1, pointsObj, -1, IndexBoundsChecker::WITHIN, 3U);
+ testFindIntervalForField(0, pointsObj, -1, IndexBoundsChecker::AHEAD, 0U);
+}
} // namespace
diff --git a/src/mongo/db/query/index_entry.cpp b/src/mongo/db/query/index_entry.cpp
index 78a0d1efc4a..13153465387 100644
--- a/src/mongo/db/query/index_entry.cpp
+++ b/src/mongo/db/query/index_entry.cpp
@@ -34,33 +34,33 @@
namespace mongo {
- std::string IndexEntry::toString() const {
- StringBuilder sb;
- sb << "kp: " << keyPattern;
+std::string IndexEntry::toString() const {
+ StringBuilder sb;
+ sb << "kp: " << keyPattern;
- if (multikey) {
- sb << " multikey";
- }
-
- if (sparse) {
- sb << " sparse";
- }
+ if (multikey) {
+ sb << " multikey";
+ }
- if (unique) {
- sb << " unique";
- }
+ if (sparse) {
+ sb << " sparse";
+ }
- sb << " name: '" << name << "'";
+ if (unique) {
+ sb << " unique";
+ }
- if (filterExpr) {
- sb << " filterExpr: " << filterExpr->toString();
- }
+ sb << " name: '" << name << "'";
- if (!infoObj.isEmpty()) {
- sb << " io: " << infoObj;
- }
+ if (filterExpr) {
+ sb << " filterExpr: " << filterExpr->toString();
+ }
- return sb.str();
+ if (!infoObj.isEmpty()) {
+ sb << " io: " << infoObj;
}
+ return sb.str();
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/index_entry.h b/src/mongo/db/query/index_entry.h
index c27be6bb3bd..9a785abef8b 100644
--- a/src/mongo/db/query/index_entry.h
+++ b/src/mongo/db/query/index_entry.h
@@ -36,90 +36,87 @@
namespace mongo {
- class MatchExpression;
+class MatchExpression;
+
+/**
+ * This name sucks, but every name involving 'index' is used somewhere.
+ */
+struct IndexEntry {
+ /**
+ * Use this constructor if you're making an IndexEntry from the catalog.
+ */
+ IndexEntry(const BSONObj& kp,
+ const std::string& accessMethod,
+ bool mk,
+ bool sp,
+ bool unq,
+ const std::string& n,
+ const MatchExpression* fe,
+ const BSONObj& io)
+ : keyPattern(kp),
+ multikey(mk),
+ sparse(sp),
+ unique(unq),
+ name(n),
+ filterExpr(fe),
+ infoObj(io) {
+ type = IndexNames::nameToType(accessMethod);
+ }
/**
- * This name sucks, but every name involving 'index' is used somewhere.
+ * For testing purposes only.
*/
- struct IndexEntry {
- /**
- * Use this constructor if you're making an IndexEntry from the catalog.
- */
- IndexEntry(const BSONObj& kp,
- const std::string& accessMethod,
- bool mk,
- bool sp,
- bool unq,
- const std::string& n,
- const MatchExpression* fe,
- const BSONObj& io)
- : keyPattern(kp),
- multikey(mk),
- sparse(sp),
- unique(unq),
- name(n),
- filterExpr(fe),
- infoObj(io) {
-
- type = IndexNames::nameToType(accessMethod);
- }
-
- /**
- * For testing purposes only.
- */
- IndexEntry(const BSONObj& kp,
- bool mk,
- bool sp,
- bool unq,
- const std::string& n,
- const MatchExpression* fe,
- const BSONObj& io)
- : keyPattern(kp),
- multikey(mk),
- sparse(sp),
- unique(unq),
- name(n),
- filterExpr(fe),
- infoObj(io) {
-
- type = IndexNames::nameToType(IndexNames::findPluginName(keyPattern));
- }
-
- /**
- * For testing purposes only.
- */
- IndexEntry(const BSONObj& kp)
- : keyPattern(kp),
- multikey(false),
- sparse(false),
- unique(false),
- name("test_foo"),
- filterExpr(nullptr),
- infoObj(BSONObj()) {
-
- type = IndexNames::nameToType(IndexNames::findPluginName(keyPattern));
- }
-
- BSONObj keyPattern;
-
- bool multikey;
-
- bool sparse;
-
- bool unique;
-
- std::string name;
-
- const MatchExpression* filterExpr;
-
- // Geo indices have extra parameters. We need those available to plan correctly.
- BSONObj infoObj;
-
- // What type of index is this? (What access method can we use on the index described
- // by the keyPattern?)
- IndexType type;
-
- std::string toString() const;
- };
+ IndexEntry(const BSONObj& kp,
+ bool mk,
+ bool sp,
+ bool unq,
+ const std::string& n,
+ const MatchExpression* fe,
+ const BSONObj& io)
+ : keyPattern(kp),
+ multikey(mk),
+ sparse(sp),
+ unique(unq),
+ name(n),
+ filterExpr(fe),
+ infoObj(io) {
+ type = IndexNames::nameToType(IndexNames::findPluginName(keyPattern));
+ }
+
+ /**
+ * For testing purposes only.
+ */
+ IndexEntry(const BSONObj& kp)
+ : keyPattern(kp),
+ multikey(false),
+ sparse(false),
+ unique(false),
+ name("test_foo"),
+ filterExpr(nullptr),
+ infoObj(BSONObj()) {
+ type = IndexNames::nameToType(IndexNames::findPluginName(keyPattern));
+ }
+
+ BSONObj keyPattern;
+
+ bool multikey;
+
+ bool sparse;
+
+ bool unique;
+
+ std::string name;
+
+ const MatchExpression* filterExpr;
+
+ // Geo indices have extra parameters. We need those available to plan correctly.
+ BSONObj infoObj;
+
+ // What type of index is this? (What access method can we use on the index described
+ // by the keyPattern?)
+ IndexType type;
+
+ std::string toString() const;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/index_tag.cpp b/src/mongo/db/query/index_tag.cpp
index 7b81a23f556..c1dceec6b02 100644
--- a/src/mongo/db/query/index_tag.cpp
+++ b/src/mongo/db/query/index_tag.cpp
@@ -35,81 +35,79 @@
namespace mongo {
- // TODO: Move out of the enumerator and into the planner.
-
- const size_t IndexTag::kNoIndex = std::numeric_limits<size_t>::max();
-
- void tagForSort(MatchExpression* tree) {
- if (!Indexability::nodeCanUseIndexOnOwnField(tree)) {
- size_t myTagValue = IndexTag::kNoIndex;
- for (size_t i = 0; i < tree->numChildren(); ++i) {
- MatchExpression* child = tree->getChild(i);
- tagForSort(child);
- IndexTag* childTag = static_cast<IndexTag*>(child->getTag());
- if (NULL != childTag) {
- myTagValue = std::min(myTagValue, childTag->index);
- }
- }
- if (myTagValue != IndexTag::kNoIndex) {
- tree->setTag(new IndexTag(myTagValue));
+// TODO: Move out of the enumerator and into the planner.
+
+const size_t IndexTag::kNoIndex = std::numeric_limits<size_t>::max();
+
+void tagForSort(MatchExpression* tree) {
+ if (!Indexability::nodeCanUseIndexOnOwnField(tree)) {
+ size_t myTagValue = IndexTag::kNoIndex;
+ for (size_t i = 0; i < tree->numChildren(); ++i) {
+ MatchExpression* child = tree->getChild(i);
+ tagForSort(child);
+ IndexTag* childTag = static_cast<IndexTag*>(child->getTag());
+ if (NULL != childTag) {
+ myTagValue = std::min(myTagValue, childTag->index);
}
}
+ if (myTagValue != IndexTag::kNoIndex) {
+ tree->setTag(new IndexTag(myTagValue));
+ }
}
+}
- bool TagComparison(const MatchExpression* lhs, const MatchExpression* rhs) {
- IndexTag* lhsTag = static_cast<IndexTag*>(lhs->getTag());
- size_t lhsValue = (NULL == lhsTag) ? IndexTag::kNoIndex : lhsTag->index;
- size_t lhsPos = (NULL == lhsTag) ? IndexTag::kNoIndex : lhsTag->pos;
+bool TagComparison(const MatchExpression* lhs, const MatchExpression* rhs) {
+ IndexTag* lhsTag = static_cast<IndexTag*>(lhs->getTag());
+ size_t lhsValue = (NULL == lhsTag) ? IndexTag::kNoIndex : lhsTag->index;
+ size_t lhsPos = (NULL == lhsTag) ? IndexTag::kNoIndex : lhsTag->pos;
- IndexTag* rhsTag = static_cast<IndexTag*>(rhs->getTag());
- size_t rhsValue = (NULL == rhsTag) ? IndexTag::kNoIndex : rhsTag->index;
- size_t rhsPos = (NULL == rhsTag) ? IndexTag::kNoIndex : rhsTag->pos;
+ IndexTag* rhsTag = static_cast<IndexTag*>(rhs->getTag());
+ size_t rhsValue = (NULL == rhsTag) ? IndexTag::kNoIndex : rhsTag->index;
+ size_t rhsPos = (NULL == rhsTag) ? IndexTag::kNoIndex : rhsTag->pos;
- // First, order on indices.
- if (lhsValue != rhsValue) {
- // This relies on kNoIndex being larger than every other possible index.
- return lhsValue < rhsValue;
- }
-
- // Next, order so that if there's a GEO_NEAR it's first.
- if (MatchExpression::GEO_NEAR == lhs->matchType()) {
- return true;
- }
- else if (MatchExpression::GEO_NEAR == rhs->matchType()) {
- return false;
- }
+ // First, order on indices.
+ if (lhsValue != rhsValue) {
+ // This relies on kNoIndex being larger than every other possible index.
+ return lhsValue < rhsValue;
+ }
- // Ditto text.
- if (MatchExpression::TEXT == lhs->matchType()) {
- return true;
- }
- else if (MatchExpression::TEXT == rhs->matchType()) {
- return false;
- }
+ // Next, order so that if there's a GEO_NEAR it's first.
+ if (MatchExpression::GEO_NEAR == lhs->matchType()) {
+ return true;
+ } else if (MatchExpression::GEO_NEAR == rhs->matchType()) {
+ return false;
+ }
- // Next, order so that the first field of a compound index appears first.
- if (lhsPos != rhsPos) {
- return lhsPos < rhsPos;
- }
+ // Ditto text.
+ if (MatchExpression::TEXT == lhs->matchType()) {
+ return true;
+ } else if (MatchExpression::TEXT == rhs->matchType()) {
+ return false;
+ }
- // Next, order on fields.
- int cmp = lhs->path().compare(rhs->path());
- if (0 != cmp) {
- return 0;
- }
+ // Next, order so that the first field of a compound index appears first.
+ if (lhsPos != rhsPos) {
+ return lhsPos < rhsPos;
+ }
- // Finally, order on expression type.
- return lhs->matchType() < rhs->matchType();
+ // Next, order on fields.
+ int cmp = lhs->path().compare(rhs->path());
+ if (0 != cmp) {
+ return 0;
}
- void sortUsingTags(MatchExpression* tree) {
- for (size_t i = 0; i < tree->numChildren(); ++i) {
- sortUsingTags(tree->getChild(i));
- }
- std::vector<MatchExpression*>* children = tree->getChildVector();
- if (NULL != children) {
- std::sort(children->begin(), children->end(), TagComparison);
- }
+ // Finally, order on expression type.
+ return lhs->matchType() < rhs->matchType();
+}
+
+void sortUsingTags(MatchExpression* tree) {
+ for (size_t i = 0; i < tree->numChildren(); ++i) {
+ sortUsingTags(tree->getChild(i));
+ }
+ std::vector<MatchExpression*>* children = tree->getChildVector();
+ if (NULL != children) {
+ std::sort(children->begin(), children->end(), TagComparison);
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/index_tag.h b/src/mongo/db/query/index_tag.h
index 9fb115c818d..3980aa72f1b 100644
--- a/src/mongo/db/query/index_tag.h
+++ b/src/mongo/db/query/index_tag.h
@@ -35,98 +35,98 @@
namespace mongo {
- // output from enumerator to query planner
- class IndexTag : public MatchExpression::TagData {
- public:
- static const size_t kNoIndex;
-
- IndexTag() : index(kNoIndex), pos(0) {}
- IndexTag(size_t i) : index(i), pos(0) { }
- IndexTag(size_t i, size_t p) : index(i), pos(p) { }
-
- virtual ~IndexTag() { }
-
- virtual void debugString(StringBuilder* builder) const {
- *builder << " || Selected Index #" << index << " pos " << pos;
+// output from enumerator to query planner
+class IndexTag : public MatchExpression::TagData {
+public:
+ static const size_t kNoIndex;
+
+ IndexTag() : index(kNoIndex), pos(0) {}
+ IndexTag(size_t i) : index(i), pos(0) {}
+ IndexTag(size_t i, size_t p) : index(i), pos(p) {}
+
+ virtual ~IndexTag() {}
+
+ virtual void debugString(StringBuilder* builder) const {
+ *builder << " || Selected Index #" << index << " pos " << pos;
+ }
+
+ virtual MatchExpression::TagData* clone() const {
+ return new IndexTag(index, pos);
+ }
+
+ // What index should we try to use for this leaf?
+ size_t index;
+
+ // What position are we in the index? (Compound.)
+ size_t pos;
+};
+
+// used internally
+class RelevantTag : public MatchExpression::TagData {
+public:
+ RelevantTag() : elemMatchExpr(NULL), pathPrefix("") {}
+
+ std::vector<size_t> first;
+ std::vector<size_t> notFirst;
+
+ // We don't know the full path from a node unless we keep notes as we traverse from the
+ // root. We do this once and store it.
+ // TODO: Do a FieldRef / StringData pass.
+ // TODO: We might want this inside of the MatchExpression.
+ std::string path;
+
+ // Points to the innermost containing $elemMatch. If this tag is
+ // attached to an expression not contained in an $elemMatch, then
+ // 'elemMatchExpr' is NULL. Not owned here.
+ MatchExpression* elemMatchExpr;
+
+ // If not contained inside an elemMatch, 'pathPrefix' contains the
+ // part of 'path' prior to the first dot. For example, if 'path' is
+ // "a.b.c", then 'pathPrefix' is "a". If 'path' is just "a", then
+ // 'pathPrefix' is also "a".
+ //
+ // If tagging a predicate contained in an $elemMatch, 'pathPrefix'
+ // holds the prefix of the path *inside* the $elemMatch. If this
+ // tags predicate {a: {$elemMatch: {"b.c": {$gt: 1}}}}, then
+ // 'pathPrefix' is "b".
+ //
+ // Used by the plan enumerator to make sure that we never
+ // compound two predicates sharing a path prefix.
+ std::string pathPrefix;
+
+ virtual void debugString(StringBuilder* builder) const {
+ *builder << " || First: ";
+ for (size_t i = 0; i < first.size(); ++i) {
+ *builder << first[i] << " ";
}
-
- virtual MatchExpression::TagData* clone() const {
- return new IndexTag(index, pos);
+ *builder << "notFirst: ";
+ for (size_t i = 0; i < notFirst.size(); ++i) {
+ *builder << notFirst[i] << " ";
}
+ *builder << "full path: " << path;
+ }
- // What index should we try to use for this leaf?
- size_t index;
-
- // What position are we in the index? (Compound.)
- size_t pos;
- };
-
- // used internally
- class RelevantTag : public MatchExpression::TagData {
- public:
- RelevantTag() : elemMatchExpr(NULL), pathPrefix("") { }
-
- std::vector<size_t> first;
- std::vector<size_t> notFirst;
-
- // We don't know the full path from a node unless we keep notes as we traverse from the
- // root. We do this once and store it.
- // TODO: Do a FieldRef / StringData pass.
- // TODO: We might want this inside of the MatchExpression.
- std::string path;
-
- // Points to the innermost containing $elemMatch. If this tag is
- // attached to an expression not contained in an $elemMatch, then
- // 'elemMatchExpr' is NULL. Not owned here.
- MatchExpression* elemMatchExpr;
-
- // If not contained inside an elemMatch, 'pathPrefix' contains the
- // part of 'path' prior to the first dot. For example, if 'path' is
- // "a.b.c", then 'pathPrefix' is "a". If 'path' is just "a", then
- // 'pathPrefix' is also "a".
- //
- // If tagging a predicate contained in an $elemMatch, 'pathPrefix'
- // holds the prefix of the path *inside* the $elemMatch. If this
- // tags predicate {a: {$elemMatch: {"b.c": {$gt: 1}}}}, then
- // 'pathPrefix' is "b".
- //
- // Used by the plan enumerator to make sure that we never
- // compound two predicates sharing a path prefix.
- std::string pathPrefix;
-
- virtual void debugString(StringBuilder* builder) const {
- *builder << " || First: ";
- for (size_t i = 0; i < first.size(); ++i) {
- *builder << first[i] << " ";
- }
- *builder << "notFirst: ";
- for (size_t i = 0; i < notFirst.size(); ++i) {
- *builder << notFirst[i] << " ";
- }
- *builder << "full path: " << path;
- }
+ virtual MatchExpression::TagData* clone() const {
+ RelevantTag* ret = new RelevantTag();
+ ret->first = first;
+ ret->notFirst = notFirst;
+ return ret;
+ }
+};
- virtual MatchExpression::TagData* clone() const {
- RelevantTag* ret = new RelevantTag();
- ret->first = first;
- ret->notFirst = notFirst;
- return ret;
- }
- };
-
- /**
- * Tags each node of the tree with the lowest numbered index that the sub-tree rooted at that
- * node uses.
- *
- * Nodes that satisfy Indexability::nodeCanUseIndexOnOwnField are already tagged if there
- * exists an index that that node can use.
- */
- void tagForSort(MatchExpression* tree);
-
- /**
- * Sorts the tree using its IndexTag(s). Nodes that use the same index are adjacent to one
- * another.
- */
- void sortUsingTags(MatchExpression* tree);
-
-} // namespace mongo
+/**
+ * Tags each node of the tree with the lowest numbered index that the sub-tree rooted at that
+ * node uses.
+ *
+ * Nodes that satisfy Indexability::nodeCanUseIndexOnOwnField are already tagged if there
+ * exists an index that that node can use.
+ */
+void tagForSort(MatchExpression* tree);
+
+/**
+ * Sorts the tree using its IndexTag(s). Nodes that use the same index are adjacent to one
+ * another.
+ */
+void sortUsingTags(MatchExpression* tree);
+
+} // namespace mongo
diff --git a/src/mongo/db/query/indexability.h b/src/mongo/db/query/indexability.h
index f1f25ced1c6..a68bf3f328a 100644
--- a/src/mongo/db/query/indexability.h
+++ b/src/mongo/db/query/indexability.h
@@ -32,127 +32,123 @@
namespace mongo {
+/**
+ * Logic for how indices can be used with an expression.
+ */
+class Indexability {
+public:
/**
- * Logic for how indices can be used with an expression.
+ * Is an index over me->path() useful?
+ * This is the same thing as being sargable, if you have a RDBMS background.
*/
- class Indexability {
- public:
- /**
- * Is an index over me->path() useful?
- * This is the same thing as being sargable, if you have a RDBMS background.
- */
- static bool nodeCanUseIndexOnOwnField(const MatchExpression* me) {
- if (me->path().empty()) {
- return false;
- }
-
- if (arrayUsesIndexOnOwnField(me)) {
- return true;
- }
+ static bool nodeCanUseIndexOnOwnField(const MatchExpression* me) {
+ if (me->path().empty()) {
+ return false;
+ }
- return isIndexOnOwnFieldTypeNode(me);
+ if (arrayUsesIndexOnOwnField(me)) {
+ return true;
}
- /**
- * This array operator doesn't have any children with fields and can use an index.
- *
- * Example: a: {$elemMatch: {$gte: 1, $lte: 1}}.
- */
- static bool arrayUsesIndexOnOwnField(const MatchExpression* me) {
- if (!me->isArray()) {
- return false;
- }
+ return isIndexOnOwnFieldTypeNode(me);
+ }
- if (MatchExpression::ELEM_MATCH_VALUE != me->matchType()) {
- return false;
- }
+ /**
+ * This array operator doesn't have any children with fields and can use an index.
+ *
+ * Example: a: {$elemMatch: {$gte: 1, $lte: 1}}.
+ */
+ static bool arrayUsesIndexOnOwnField(const MatchExpression* me) {
+ if (!me->isArray()) {
+ return false;
+ }
+
+ if (MatchExpression::ELEM_MATCH_VALUE != me->matchType()) {
+ return false;
+ }
- // We have an ELEM_MATCH_VALUE expression. In order to be
- // considered "indexable" all children of the ELEM_MATCH_VALUE
- // must be "indexable" type expressions as well.
- for (size_t i = 0; i < me->numChildren(); i++) {
- MatchExpression* child = me->getChild(i);
-
- // Special case for NOT: If the child is a NOT, then it's the thing below
- // the NOT that we care about.
- if (MatchExpression::NOT == child->matchType()) {
- MatchExpression* notChild = child->getChild(0);
-
- if (MatchExpression::MOD == notChild->matchType() ||
- MatchExpression::REGEX == notChild->matchType() ||
- MatchExpression::TYPE_OPERATOR == notChild->matchType()) {
- // We can't index negations of this kind of expression node.
- return false;
- }
-
- // It's the child of the NOT that we check for indexability.
- if (!isIndexOnOwnFieldTypeNode(notChild)) {
- return false;
- }
-
- // Special handling for NOT has already been done; don't fall through.
- continue;
+ // We have an ELEM_MATCH_VALUE expression. In order to be
+ // considered "indexable" all children of the ELEM_MATCH_VALUE
+ // must be "indexable" type expressions as well.
+ for (size_t i = 0; i < me->numChildren(); i++) {
+ MatchExpression* child = me->getChild(i);
+
+ // Special case for NOT: If the child is a NOT, then it's the thing below
+ // the NOT that we care about.
+ if (MatchExpression::NOT == child->matchType()) {
+ MatchExpression* notChild = child->getChild(0);
+
+ if (MatchExpression::MOD == notChild->matchType() ||
+ MatchExpression::REGEX == notChild->matchType() ||
+ MatchExpression::TYPE_OPERATOR == notChild->matchType()) {
+ // We can't index negations of this kind of expression node.
+ return false;
}
- if (!isIndexOnOwnFieldTypeNode(child)) {
+ // It's the child of the NOT that we check for indexability.
+ if (!isIndexOnOwnFieldTypeNode(notChild)) {
return false;
}
+
+ // Special handling for NOT has already been done; don't fall through.
+ continue;
}
- // The entire ELEM_MATCH_VALUE is indexable since every one of its children
- // is indexable.
- return true;
+ if (!isIndexOnOwnFieldTypeNode(child)) {
+ return false;
+ }
}
- /**
- * Certain array operators require that the field for that operator is prepended
- * to all fields in that operator's children.
- *
- * Example: a: {$elemMatch: {b:1, c:1}}.
- */
- static bool arrayUsesIndexOnChildren(const MatchExpression* me) {
- return me->isArray() && MatchExpression::ELEM_MATCH_OBJECT == me->matchType();
- }
+ // The entire ELEM_MATCH_VALUE is indexable since every one of its children
+ // is indexable.
+ return true;
+ }
- /**
- * Returns true if 'me' is a NOT, and the child of the NOT can use
- * an index on its own field.
- */
- static bool isBoundsGeneratingNot(const MatchExpression* me) {
- return MatchExpression::NOT == me->matchType() &&
- nodeCanUseIndexOnOwnField(me->getChild(0));
- }
+ /**
+ * Certain array operators require that the field for that operator is prepended
+ * to all fields in that operator's children.
+ *
+ * Example: a: {$elemMatch: {b:1, c:1}}.
+ */
+ static bool arrayUsesIndexOnChildren(const MatchExpression* me) {
+ return me->isArray() && MatchExpression::ELEM_MATCH_OBJECT == me->matchType();
+ }
- /**
- * Returns true if either 'me' is a bounds generating NOT,
- * or 'me' can use an index on its own field.
- */
- static bool isBoundsGenerating(const MatchExpression* me) {
- return isBoundsGeneratingNot(me) || nodeCanUseIndexOnOwnField(me);
- }
+ /**
+ * Returns true if 'me' is a NOT, and the child of the NOT can use
+ * an index on its own field.
+ */
+ static bool isBoundsGeneratingNot(const MatchExpression* me) {
+ return MatchExpression::NOT == me->matchType() &&
+ nodeCanUseIndexOnOwnField(me->getChild(0));
+ }
- private:
- /**
- * Returns true if 'me' is "sargable" but is not a negation and
- * is not an array node such as ELEM_MATCH_VALUE.
- *
- * Used as a helper for nodeCanUseIndexOnOwnField().
- */
- static bool isIndexOnOwnFieldTypeNode(const MatchExpression* me) {
- return me->matchType() == MatchExpression::LTE
- || me->matchType() == MatchExpression::LT
- || me->matchType() == MatchExpression::EQ
- || me->matchType() == MatchExpression::GT
- || me->matchType() == MatchExpression::GTE
- || me->matchType() == MatchExpression::REGEX
- || me->matchType() == MatchExpression::MOD
- || me->matchType() == MatchExpression::MATCH_IN
- || me->matchType() == MatchExpression::TYPE_OPERATOR
- || me->matchType() == MatchExpression::GEO
- || me->matchType() == MatchExpression::GEO_NEAR
- || me->matchType() == MatchExpression::EXISTS
- || me->matchType() == MatchExpression::TEXT;
- }
- };
+ /**
+ * Returns true if either 'me' is a bounds generating NOT,
+ * or 'me' can use an index on its own field.
+ */
+ static bool isBoundsGenerating(const MatchExpression* me) {
+ return isBoundsGeneratingNot(me) || nodeCanUseIndexOnOwnField(me);
+ }
+
+private:
+ /**
+ * Returns true if 'me' is "sargable" but is not a negation and
+ * is not an array node such as ELEM_MATCH_VALUE.
+ *
+ * Used as a helper for nodeCanUseIndexOnOwnField().
+ */
+ static bool isIndexOnOwnFieldTypeNode(const MatchExpression* me) {
+ return me->matchType() == MatchExpression::LTE || me->matchType() == MatchExpression::LT ||
+ me->matchType() == MatchExpression::EQ || me->matchType() == MatchExpression::GT ||
+ me->matchType() == MatchExpression::GTE || me->matchType() == MatchExpression::REGEX ||
+ me->matchType() == MatchExpression::MOD ||
+ me->matchType() == MatchExpression::MATCH_IN ||
+ me->matchType() == MatchExpression::TYPE_OPERATOR ||
+ me->matchType() == MatchExpression::GEO ||
+ me->matchType() == MatchExpression::GEO_NEAR ||
+ me->matchType() == MatchExpression::EXISTS || me->matchType() == MatchExpression::TEXT;
+ }
+};
} // namespace mongo
diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp
index 9a38020d00a..fe44395f021 100644
--- a/src/mongo/db/query/internal_plans.cpp
+++ b/src/mongo/db/query/internal_plans.cpp
@@ -40,91 +40,80 @@
namespace mongo {
- // static
- PlanExecutor* InternalPlanner::collectionScan(OperationContext* txn,
- StringData ns,
- Collection* collection,
- const Direction direction,
- const RecordId startLoc) {
- WorkingSet* ws = new WorkingSet();
-
- if (NULL == collection) {
- EOFStage* eof = new EOFStage();
- PlanExecutor* exec;
- // Takes ownership of 'ws' and 'eof'.
- Status execStatus = PlanExecutor::make(txn,
- ws,
- eof,
- ns.toString(),
- PlanExecutor::YIELD_MANUAL,
- &exec);
- invariant(execStatus.isOK());
- return exec;
- }
-
- invariant( ns == collection->ns().ns() );
-
- CollectionScanParams params;
- params.collection = collection;
- params.start = startLoc;
-
- if (FORWARD == direction) {
- params.direction = CollectionScanParams::FORWARD;
- }
- else {
- params.direction = CollectionScanParams::BACKWARD;
- }
-
- CollectionScan* cs = new CollectionScan(txn, params, ws, NULL);
+// static
+PlanExecutor* InternalPlanner::collectionScan(OperationContext* txn,
+ StringData ns,
+ Collection* collection,
+ const Direction direction,
+ const RecordId startLoc) {
+ WorkingSet* ws = new WorkingSet();
+
+ if (NULL == collection) {
+ EOFStage* eof = new EOFStage();
PlanExecutor* exec;
- // Takes ownership of 'ws' and 'cs'.
- Status execStatus = PlanExecutor::make(txn,
- ws,
- cs,
- collection,
- PlanExecutor::YIELD_MANUAL,
- &exec);
+ // Takes ownership of 'ws' and 'eof'.
+ Status execStatus =
+ PlanExecutor::make(txn, ws, eof, ns.toString(), PlanExecutor::YIELD_MANUAL, &exec);
invariant(execStatus.isOK());
return exec;
}
- // static
- PlanExecutor* InternalPlanner::indexScan(OperationContext* txn,
- const Collection* collection,
- const IndexDescriptor* descriptor,
- const BSONObj& startKey, const BSONObj& endKey,
- bool endKeyInclusive, Direction direction,
- int options) {
- invariant(collection);
- invariant(descriptor);
-
- IndexScanParams params;
- params.descriptor = descriptor;
- params.direction = direction;
- params.bounds.isSimpleRange = true;
- params.bounds.startKey = startKey;
- params.bounds.endKey = endKey;
- params.bounds.endKeyInclusive = endKeyInclusive;
-
- WorkingSet* ws = new WorkingSet();
- IndexScan* ix = new IndexScan(txn, params, ws, NULL);
-
- PlanStage* root = ix;
-
- if (IXSCAN_FETCH & options) {
- root = new FetchStage(txn, ws, root, NULL, collection);
- }
+ invariant(ns == collection->ns().ns());
- PlanExecutor* exec;
- // Takes ownership of 'ws' and 'root'.
- Status execStatus = PlanExecutor::make(txn,
- ws,
- root,
- collection,
- PlanExecutor::YIELD_MANUAL,
- &exec);
- invariant(execStatus.isOK());
- return exec;
+ CollectionScanParams params;
+ params.collection = collection;
+ params.start = startLoc;
+
+ if (FORWARD == direction) {
+ params.direction = CollectionScanParams::FORWARD;
+ } else {
+ params.direction = CollectionScanParams::BACKWARD;
}
+ CollectionScan* cs = new CollectionScan(txn, params, ws, NULL);
+ PlanExecutor* exec;
+ // Takes ownership of 'ws' and 'cs'.
+ Status execStatus =
+ PlanExecutor::make(txn, ws, cs, collection, PlanExecutor::YIELD_MANUAL, &exec);
+ invariant(execStatus.isOK());
+ return exec;
+}
+
+// static
+PlanExecutor* InternalPlanner::indexScan(OperationContext* txn,
+ const Collection* collection,
+ const IndexDescriptor* descriptor,
+ const BSONObj& startKey,
+ const BSONObj& endKey,
+ bool endKeyInclusive,
+ Direction direction,
+ int options) {
+ invariant(collection);
+ invariant(descriptor);
+
+ IndexScanParams params;
+ params.descriptor = descriptor;
+ params.direction = direction;
+ params.bounds.isSimpleRange = true;
+ params.bounds.startKey = startKey;
+ params.bounds.endKey = endKey;
+ params.bounds.endKeyInclusive = endKeyInclusive;
+
+ WorkingSet* ws = new WorkingSet();
+ IndexScan* ix = new IndexScan(txn, params, ws, NULL);
+
+ PlanStage* root = ix;
+
+ if (IXSCAN_FETCH & options) {
+ root = new FetchStage(txn, ws, root, NULL, collection);
+ }
+
+ PlanExecutor* exec;
+ // Takes ownership of 'ws' and 'root'.
+ Status execStatus =
+ PlanExecutor::make(txn, ws, root, collection, PlanExecutor::YIELD_MANUAL, &exec);
+ invariant(execStatus.isOK());
+ return exec;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index d9e763828ca..3b21e3a4f1e 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -33,52 +33,53 @@
namespace mongo {
- class BSONObj;
- class Collection;
- class IndexDescriptor;
- class OperationContext;
- class PlanExecutor;
+class BSONObj;
+class Collection;
+class IndexDescriptor;
+class OperationContext;
+class PlanExecutor;
- /**
- * The internal planner is a one-stop shop for "off-the-shelf" plans. Most internal procedures
- * that do not require advanced queries could be served by plans already in here.
- */
- class InternalPlanner {
- public:
- enum Direction {
- FORWARD = 1,
- BACKWARD = -1,
- };
-
- enum IndexScanOptions {
- // The client is interested in the default outputs of an index scan: BSONObj of the key,
- // RecordId of the record that's indexed. The client does its own fetching if required.
- IXSCAN_DEFAULT = 0,
+/**
+ * The internal planner is a one-stop shop for "off-the-shelf" plans. Most internal procedures
+ * that do not require advanced queries could be served by plans already in here.
+ */
+class InternalPlanner {
+public:
+ enum Direction {
+ FORWARD = 1,
+ BACKWARD = -1,
+ };
- // The client wants the fetched object and the RecordId that refers to it. Delegating
- // the fetch to the runner allows fetching outside of a lock.
- IXSCAN_FETCH = 1,
- };
+ enum IndexScanOptions {
+ // The client is interested in the default outputs of an index scan: BSONObj of the key,
+ // RecordId of the record that's indexed. The client does its own fetching if required.
+ IXSCAN_DEFAULT = 0,
- /**
- * Return a collection scan. Caller owns pointer.
- */
- static PlanExecutor* collectionScan(OperationContext* txn,
- StringData ns,
- Collection* collection,
- const Direction direction = FORWARD,
- const RecordId startLoc = RecordId());
+ // The client wants the fetched object and the RecordId that refers to it. Delegating
+ // the fetch to the runner allows fetching outside of a lock.
+ IXSCAN_FETCH = 1,
+ };
- /**
- * Return an index scan. Caller owns returned pointer.
- */
- static PlanExecutor* indexScan(OperationContext* txn,
- const Collection* collection,
- const IndexDescriptor* descriptor,
- const BSONObj& startKey, const BSONObj& endKey,
- bool endKeyInclusive, Direction direction = FORWARD,
- int options = 0);
+ /**
+ * Return a collection scan. Caller owns pointer.
+ */
+ static PlanExecutor* collectionScan(OperationContext* txn,
+ StringData ns,
+ Collection* collection,
+ const Direction direction = FORWARD,
+ const RecordId startLoc = RecordId());
- };
+ /**
+ * Return an index scan. Caller owns returned pointer.
+ */
+ static PlanExecutor* indexScan(OperationContext* txn,
+ const Collection* collection,
+ const IndexDescriptor* descriptor,
+ const BSONObj& startKey,
+ const BSONObj& endKey,
+ bool endKeyInclusive,
+ Direction direction = FORWARD,
+ int options = 0);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/interval.cpp b/src/mongo/db/query/interval.cpp
index cdbb7abbb6d..2f35dc2eccb 100644
--- a/src/mongo/db/query/interval.cpp
+++ b/src/mongo/db/query/interval.cpp
@@ -30,167 +30,164 @@
namespace mongo {
- using std::string;
-
- Interval::Interval()
- : _intervalData(BSONObj()), start(BSONElement()), startInclusive(false), end(BSONElement()),
- endInclusive(false) { }
-
- Interval::Interval(BSONObj base, bool si, bool ei) {
- init(base, si, ei);
- }
-
- void Interval::init(BSONObj base, bool si, bool ei) {
- verify(base.nFields() >= 2);
-
- _intervalData = base.getOwned();
- BSONObjIterator it(_intervalData);
- start = it.next();
- end = it.next();
- startInclusive = si;
- endInclusive = ei;
+using std::string;
+
+Interval::Interval()
+ : _intervalData(BSONObj()),
+ start(BSONElement()),
+ startInclusive(false),
+ end(BSONElement()),
+ endInclusive(false) {}
+
+Interval::Interval(BSONObj base, bool si, bool ei) {
+ init(base, si, ei);
+}
+
+void Interval::init(BSONObj base, bool si, bool ei) {
+ verify(base.nFields() >= 2);
+
+ _intervalData = base.getOwned();
+ BSONObjIterator it(_intervalData);
+ start = it.next();
+ end = it.next();
+ startInclusive = si;
+ endInclusive = ei;
+}
+
+bool Interval::isEmpty() const {
+ return _intervalData.nFields() == 0;
+}
+
+bool Interval::isPoint() const {
+ return startInclusive && endInclusive && 0 == start.woCompare(end, false);
+}
+
+bool Interval::isNull() const {
+ return (!startInclusive || !endInclusive) && 0 == start.woCompare(end, false);
+}
+
+//
+// Comparison
+//
+
+bool Interval::equals(const Interval& other) const {
+ if (this->startInclusive != other.startInclusive) {
+ return false;
}
- bool Interval::isEmpty() const {
- return _intervalData.nFields() == 0;
+ if (this->endInclusive != other.endInclusive) {
+ return false;
}
- bool Interval::isPoint() const {
- return startInclusive && endInclusive && 0 == start.woCompare(end, false);
+ int res = this->start.woCompare(other.start, false);
+ if (res != 0) {
+ return false;
}
- bool Interval::isNull() const {
- return (!startInclusive || !endInclusive) && 0 == start.woCompare(end, false);
+ res = this->end.woCompare(other.end, false);
+ if (res != 0) {
+ return false;
}
- //
- // Comparison
- //
-
- bool Interval::equals(const Interval& other) const {
- if (this->startInclusive != other.startInclusive) {
- return false;
- }
+ return true;
+}
- if (this->endInclusive != other.endInclusive) {
- return false;
- }
+bool Interval::intersects(const Interval& other) const {
+ int res = this->start.woCompare(other.end, false);
+ if (res > 0) {
+ return false;
+ } else if (res == 0 && (!this->startInclusive || !other.endInclusive)) {
+ return false;
+ }
- int res = this->start.woCompare(other.start, false);
- if (res != 0) {
- return false;
- }
+ res = other.start.woCompare(this->end, false);
+ if (res > 0) {
+ return false;
+ } else if (res == 0 && (!other.startInclusive || !this->endInclusive)) {
+ return false;
+ }
- res = this->end.woCompare(other.end, false);
- if (res != 0) {
- return false;
- }
+ return true;
+}
- return true;
+bool Interval::within(const Interval& other) const {
+ int res = this->start.woCompare(other.start, false);
+ if (res < 0) {
+ return false;
+ } else if (res == 0 && this->startInclusive && !other.startInclusive) {
+ return false;
}
- bool Interval::intersects(const Interval& other) const {
- int res = this->start.woCompare(other.end, false);
- if (res > 0) {
- return false;
- }
- else if (res == 0 && (!this->startInclusive || !other.endInclusive)) {
- return false;
- }
+ res = this->end.woCompare(other.end, false);
+ if (res > 0) {
+ return false;
+ } else if (res == 0 && this->endInclusive && !other.endInclusive) {
+ return false;
+ }
- res = other.start.woCompare(this->end, false);
- if (res > 0) {
- return false;
- }
- else if (res == 0 && (!other.startInclusive || !this->endInclusive)) {
- return false;
- }
+ return true;
+}
+/** Returns true if the start of comes before the start of other */
+bool Interval::precedes(const Interval& other) const {
+ int res = this->start.woCompare(other.start, false);
+ if (res < 0) {
+ return true;
+ } else if (res == 0 && this->startInclusive && !other.startInclusive) {
return true;
}
+ return false;
+}
- bool Interval::within(const Interval& other) const {
- int res = this->start.woCompare(other.start, false);
- if (res < 0) {
- return false;
- }
- else if (res == 0 && this->startInclusive && !other.startInclusive) {
- return false;
- }
-
- res = this->end.woCompare(other.end, false);
- if (res > 0) {
- return false;
- }
- else if (res == 0 && this->endInclusive && !other.endInclusive) {
- return false;
- }
- return true;
- }
+Interval::IntervalComparison Interval::compare(const Interval& other) const {
+ //
+ // Intersect cases
+ //
- /** Returns true if the start of comes before the start of other */
- bool Interval::precedes(const Interval& other) const {
- int res = this->start.woCompare(other.start, false);
- if (res < 0) {
- return true;
+ if (this->intersects(other)) {
+ if (this->equals(other)) {
+ return INTERVAL_EQUALS;
}
- else if (res == 0 && this->startInclusive && !other.startInclusive) {
- return true;
+ if (this->within(other)) {
+ return INTERVAL_WITHIN;
}
- return false;
- }
-
-
- Interval::IntervalComparison Interval::compare(const Interval& other) const {
- //
- // Intersect cases
- //
-
- if (this->intersects(other)) {
- if (this->equals(other)) {
- return INTERVAL_EQUALS;
- }
- if (this->within(other)) {
- return INTERVAL_WITHIN;
- }
- if (other.within(*this)) {
- return INTERVAL_CONTAINS;
- }
- if (this->precedes(other)) {
- return INTERVAL_OVERLAPS_BEFORE;
- }
- return INTERVAL_OVERLAPS_AFTER;
+ if (other.within(*this)) {
+ return INTERVAL_CONTAINS;
}
-
- //
- // Non-intersect cases
- //
-
if (this->precedes(other)) {
- // It's not possible for both endInclusive and other.startInclusive to be true because
- // the bounds would intersect. Refer to section on "Intersect cases" above.
- if ((endInclusive || other.startInclusive) && 0 == end.woCompare(other.start, false)) {
- return INTERVAL_PRECEDES_COULD_UNION;
- }
- return INTERVAL_PRECEDES;
+ return INTERVAL_OVERLAPS_BEFORE;
}
-
- return INTERVAL_SUCCEEDS;
+ return INTERVAL_OVERLAPS_AFTER;
}
//
- // Mutation: Union and Intersection
+ // Non-intersect cases
//
- void Interval::intersect(const Interval& other, IntervalComparison cmp) {
- if (cmp == INTERVAL_UNKNOWN) {
- cmp = this->compare(other);
+ if (this->precedes(other)) {
+ // It's not possible for both endInclusive and other.startInclusive to be true because
+ // the bounds would intersect. Refer to section on "Intersect cases" above.
+ if ((endInclusive || other.startInclusive) && 0 == end.woCompare(other.start, false)) {
+ return INTERVAL_PRECEDES_COULD_UNION;
}
+ return INTERVAL_PRECEDES;
+ }
- BSONObjBuilder builder;
- switch (cmp) {
+ return INTERVAL_SUCCEEDS;
+}
+//
+// Mutation: Union and Intersection
+//
+
+void Interval::intersect(const Interval& other, IntervalComparison cmp) {
+ if (cmp == INTERVAL_UNKNOWN) {
+ cmp = this->compare(other);
+ }
+
+ BSONObjBuilder builder;
+ switch (cmp) {
case INTERVAL_EQUALS:
case INTERVAL_WITHIN:
break;
@@ -220,17 +217,16 @@ namespace mongo {
default:
verify(false);
- }
}
+}
- void Interval::combine(const Interval& other, IntervalComparison cmp) {
- if (cmp == INTERVAL_UNKNOWN) {
- cmp = this->compare(other);
- }
-
- BSONObjBuilder builder;
- switch (cmp) {
+void Interval::combine(const Interval& other, IntervalComparison cmp) {
+ if (cmp == INTERVAL_UNKNOWN) {
+ cmp = this->compare(other);
+ }
+ BSONObjBuilder builder;
+ switch (cmp) {
case INTERVAL_EQUALS:
case INTERVAL_CONTAINS:
break;
@@ -257,62 +253,62 @@ namespace mongo {
default:
verify(false);
- }
- }
-
- void Interval::reverse() {
- std::swap(start, end);
- std::swap(startInclusive, endInclusive);
}
+}
- //
- // Debug info
- //
+void Interval::reverse() {
+ std::swap(start, end);
+ std::swap(startInclusive, endInclusive);
+}
- // static
- string Interval::cmpstr(IntervalComparison c) {
- if (c == INTERVAL_EQUALS) {
- return "INTERVAL_EQUALS";
- }
+//
+// Debug info
+//
- // 'this' contains the other interval.
- if (c == INTERVAL_CONTAINS) {
- return "INTERVAL_CONTAINS";
- }
+// static
+string Interval::cmpstr(IntervalComparison c) {
+ if (c == INTERVAL_EQUALS) {
+ return "INTERVAL_EQUALS";
+ }
- // 'this' is contained by the other interval.
- if (c == INTERVAL_WITHIN) {
- return "INTERVAL_WITHIN";
- }
+ // 'this' contains the other interval.
+ if (c == INTERVAL_CONTAINS) {
+ return "INTERVAL_CONTAINS";
+ }
- // The two intervals intersect and 'this' is before the other interval.
- if (c == INTERVAL_OVERLAPS_BEFORE) {
- return "INTERVAL_OVERLAPS_BEFORE";
- }
+ // 'this' is contained by the other interval.
+ if (c == INTERVAL_WITHIN) {
+ return "INTERVAL_WITHIN";
+ }
- // The two intervals intersect and 'this is after the other interval.
- if (c == INTERVAL_OVERLAPS_AFTER) {
- return "INTERVAL_OVERLAPS_AFTER";
- }
+ // The two intervals intersect and 'this' is before the other interval.
+ if (c == INTERVAL_OVERLAPS_BEFORE) {
+ return "INTERVAL_OVERLAPS_BEFORE";
+ }
- // There is no intersection.
- if (c == INTERVAL_PRECEDES) {
- return "INTERVAL_PRECEDES";
- }
+ // The two intervals intersect and 'this is after the other interval.
+ if (c == INTERVAL_OVERLAPS_AFTER) {
+ return "INTERVAL_OVERLAPS_AFTER";
+ }
- if (c == INTERVAL_PRECEDES_COULD_UNION) {
- return "INTERVAL_PRECEDES_COULD_UNION";
- }
+ // There is no intersection.
+ if (c == INTERVAL_PRECEDES) {
+ return "INTERVAL_PRECEDES";
+ }
- if (c == INTERVAL_SUCCEEDS) {
- return "INTERVAL_SUCCEEDS";
- }
+ if (c == INTERVAL_PRECEDES_COULD_UNION) {
+ return "INTERVAL_PRECEDES_COULD_UNION";
+ }
- if (c == INTERVAL_UNKNOWN) {
- return "INTERVAL_UNKNOWN";
- }
+ if (c == INTERVAL_SUCCEEDS) {
+ return "INTERVAL_SUCCEEDS";
+ }
- return "NO IDEA DUDE";
+ if (c == INTERVAL_UNKNOWN) {
+ return "INTERVAL_UNKNOWN";
}
-} // namespace mongo
+ return "NO IDEA DUDE";
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/interval.h b/src/mongo/db/query/interval.h
index e4f82da8088..88309d33e05 100644
--- a/src/mongo/db/query/interval.h
+++ b/src/mongo/db/query/interval.h
@@ -33,164 +33,161 @@
namespace mongo {
- /** A range of values for one field. */
- struct Interval {
-
- // No BSONValue means we have to keep a BSONObj and pointers (BSONElement) into it.
- // 'start' may not point at the first field in _intervalData.
- // 'end' may not point at the last field in _intervalData.
- // 'start' and 'end' may point at the same field.
- BSONObj _intervalData;
-
- // Start and End must be ordered according to the index order.
- BSONElement start;
- bool startInclusive;
-
- BSONElement end;
- bool endInclusive;
-
- /** Creates an empty interval */
- Interval();
-
- std::string toString() const {
- mongoutils::str::stream ss;
- if (startInclusive) {
- ss << "[";
- }
- else {
- ss << "(";
- }
- // false means omit the field name
- ss << start.toString(false);
- ss << ", ";
- ss << end.toString(false);
- if (endInclusive) {
- ss << "]";
- }
- else {
- ss << ")";
- }
- return ss;
+/** A range of values for one field. */
+struct Interval {
+ // No BSONValue means we have to keep a BSONObj and pointers (BSONElement) into it.
+ // 'start' may not point at the first field in _intervalData.
+ // 'end' may not point at the last field in _intervalData.
+ // 'start' and 'end' may point at the same field.
+ BSONObj _intervalData;
+
+ // Start and End must be ordered according to the index order.
+ BSONElement start;
+ bool startInclusive;
+
+ BSONElement end;
+ bool endInclusive;
+
+ /** Creates an empty interval */
+ Interval();
+
+ std::string toString() const {
+ mongoutils::str::stream ss;
+ if (startInclusive) {
+ ss << "[";
+ } else {
+ ss << "(";
}
+ // false means omit the field name
+ ss << start.toString(false);
+ ss << ", ";
+ ss << end.toString(false);
+ if (endInclusive) {
+ ss << "]";
+ } else {
+ ss << ")";
+ }
+ return ss;
+ }
- /**
- * Creates an interval that starts at the first field of 'base' and ends at the second
- * field of 'base'. (In other words, 'base' is a bsonobj with at least two elements, of
- * which we don't care about field names.)
- *
- * The interval's extremities are closed or not depending on whether
- * 'start'/'endIncluded' are true or not.
- */
- Interval(BSONObj base, bool startIncluded, bool endIncluded);
-
- /** Sets the current interval to the given values (see constructor) */
- void init(BSONObj base, bool startIncluded, bool endIncluded);
-
- /**
- * Returns true if an empty-constructed interval hasn't been init()-ialized yet
- */
- bool isEmpty() const;
-
- /**
- * Does this interval represent exactly one point?
- */
- bool isPoint() const;
-
- /**
- * Returns true if start is same as end and interval is open at either end
- */
- bool isNull() const;
-
+ /**
+ * Creates an interval that starts at the first field of 'base' and ends at the second
+ * field of 'base'. (In other words, 'base' is a bsonobj with at least two elements, of
+ * which we don't care about field names.)
+ *
+ * The interval's extremities are closed or not depending on whether
+ * 'start'/'endIncluded' are true or not.
+ */
+ Interval(BSONObj base, bool startIncluded, bool endIncluded);
+
+ /** Sets the current interval to the given values (see constructor) */
+ void init(BSONObj base, bool startIncluded, bool endIncluded);
+
+ /**
+ * Returns true if an empty-constructed interval hasn't been init()-ialized yet
+ */
+ bool isEmpty() const;
+
+ /**
+ * Does this interval represent exactly one point?
+ */
+ bool isPoint() const;
+
+ /**
+ * Returns true if start is same as end and interval is open at either end
+ */
+ bool isNull() const;
+
+ //
+ // Comparison with other intervals
+ //
+
+ /**
+ * Returns true if 'this' is the same interval as 'other'
+ */
+ bool equals(const Interval& other) const;
+
+ /**
+ * Returns true if 'this' overlaps with 'other', false otherwise.
+ */
+ bool intersects(const Interval& rhs) const;
+
+ /**
+ * Returns true if 'this' is within 'other', false otherwise.
+ */
+ bool within(const Interval& other) const;
+
+ /**
+ * Returns true if 'this' is located before 'other', false otherwise.
+ */
+ bool precedes(const Interval& other) const;
+
+ /** Returns how 'this' compares to 'other' */
+ enum IntervalComparison {
//
- // Comparison with other intervals
+ // There is some intersection.
//
- /**
- * Returns true if 'this' is the same interval as 'other'
- */
- bool equals(const Interval& other) const;
-
- /**
- * Returns true if 'this' overlaps with 'other', false otherwise.
- */
- bool intersects(const Interval& rhs) const;
+ // The two intervals are *exactly* equal.
+ INTERVAL_EQUALS,
- /**
- * Returns true if 'this' is within 'other', false otherwise.
- */
- bool within(const Interval& other) const;
+ // 'this' contains the other interval.
+ INTERVAL_CONTAINS,
- /**
- * Returns true if 'this' is located before 'other', false otherwise.
- */
- bool precedes(const Interval& other) const;
+ // 'this' is contained by the other interval.
+ INTERVAL_WITHIN,
- /** Returns how 'this' compares to 'other' */
- enum IntervalComparison {
- //
- // There is some intersection.
- //
+ // The two intervals intersect and 'this' is before the other interval.
+ INTERVAL_OVERLAPS_BEFORE,
- // The two intervals are *exactly* equal.
- INTERVAL_EQUALS,
+ // The two intervals intersect and 'this is after the other interval.
+ INTERVAL_OVERLAPS_AFTER,
- // 'this' contains the other interval.
- INTERVAL_CONTAINS,
-
- // 'this' is contained by the other interval.
- INTERVAL_WITHIN,
-
- // The two intervals intersect and 'this' is before the other interval.
- INTERVAL_OVERLAPS_BEFORE,
+ //
+ // There is no intersection.
+ //
- // The two intervals intersect and 'this is after the other interval.
- INTERVAL_OVERLAPS_AFTER,
+ INTERVAL_PRECEDES,
- //
- // There is no intersection.
- //
+ // This happens if we have [a,b) [b,c]
+ INTERVAL_PRECEDES_COULD_UNION,
- INTERVAL_PRECEDES,
+ INTERVAL_SUCCEEDS,
- // This happens if we have [a,b) [b,c]
- INTERVAL_PRECEDES_COULD_UNION,
+ INTERVAL_UNKNOWN
+ };
- INTERVAL_SUCCEEDS,
+ IntervalComparison compare(const Interval& other) const;
- INTERVAL_UNKNOWN
- };
+ /**
+ * toString for IntervalComparison
+ */
+ static std::string cmpstr(IntervalComparison c);
- IntervalComparison compare(const Interval& other) const;
+ //
+ // Mutation of intervals
+ //
- /**
- * toString for IntervalComparison
- */
- static std::string cmpstr(IntervalComparison c);
+ /**
+ * Swap start and end points of interval.
+ */
+ void reverse();
- //
- // Mutation of intervals
- //
+ /**
+ * Updates 'this' with the intersection of 'this' and 'other'. If 'this' and 'other'
+ * have been compare()d before, that result can be optionally passed in 'cmp'
+ */
+ void intersect(const Interval& other, IntervalComparison cmp = INTERVAL_UNKNOWN);
- /**
- * Swap start and end points of interval.
- */
- void reverse();
-
- /**
- * Updates 'this' with the intersection of 'this' and 'other'. If 'this' and 'other'
- * have been compare()d before, that result can be optionally passed in 'cmp'
- */
- void intersect(const Interval& other, IntervalComparison cmp = INTERVAL_UNKNOWN);
-
- /**
- * Updates 'this" with the union of 'this' and 'other'. If 'this' and 'other' have
- * been compare()d before, that result can be optionaly passed in 'cmp'.
- */
- void combine(const Interval& other, IntervalComparison cmp = INTERVAL_UNKNOWN);
- };
+ /**
+ * Updates 'this" with the union of 'this' and 'other'. If 'this' and 'other' have
+ * been compare()d before, that result can be optionaly passed in 'cmp'.
+ */
+ void combine(const Interval& other, IntervalComparison cmp = INTERVAL_UNKNOWN);
+};
- inline bool operator==(const Interval& lhs, const Interval& rhs) {
- return lhs.compare(rhs) == Interval::INTERVAL_EQUALS;
- }
+inline bool operator==(const Interval& lhs, const Interval& rhs) {
+ return lhs.compare(rhs) == Interval::INTERVAL_EQUALS;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/interval_test.cpp b/src/mongo/db/query/interval_test.cpp
index 62c4b815e53..d9e829a254b 100644
--- a/src/mongo/db/query/interval_test.cpp
+++ b/src/mongo/db/query/interval_test.cpp
@@ -33,264 +33,264 @@
namespace {
- using mongo::BSONObj;
- using mongo::Interval;
+using mongo::BSONObj;
+using mongo::Interval;
- //
- // Comparison
- //
+//
+// Comparison
+//
- TEST(Comparison, Equality) {
- Interval a(BSON("" << 0 << "" << 10), true, true);
- ASSERT_EQUALS(a.compare(a), Interval::INTERVAL_EQUALS);
+TEST(Comparison, Equality) {
+ Interval a(BSON("" << 0 << "" << 10), true, true);
+ ASSERT_EQUALS(a.compare(a), Interval::INTERVAL_EQUALS);
- Interval b(BSON("" << 0 << "" << 10), true, false);
- ASSERT_NOT_EQUALS(a.compare(b), Interval::INTERVAL_EQUALS);
+ Interval b(BSON("" << 0 << "" << 10), true, false);
+ ASSERT_NOT_EQUALS(a.compare(b), Interval::INTERVAL_EQUALS);
- Interval c(BSON("" << 0 << "" << 10), false, true);
- ASSERT_NOT_EQUALS(a.compare(c), Interval::INTERVAL_EQUALS);
+ Interval c(BSON("" << 0 << "" << 10), false, true);
+ ASSERT_NOT_EQUALS(a.compare(c), Interval::INTERVAL_EQUALS);
- Interval d(BSON("" << 0 << "" << 11), true, true);
- ASSERT_NOT_EQUALS(a.compare(d), Interval::INTERVAL_EQUALS);
+ Interval d(BSON("" << 0 << "" << 11), true, true);
+ ASSERT_NOT_EQUALS(a.compare(d), Interval::INTERVAL_EQUALS);
- Interval e(BSON("" << 1 << "" << 10), true, true);
- ASSERT_NOT_EQUALS(a.compare(e), Interval::INTERVAL_EQUALS);
- }
+ Interval e(BSON("" << 1 << "" << 10), true, true);
+ ASSERT_NOT_EQUALS(a.compare(e), Interval::INTERVAL_EQUALS);
+}
- TEST(Comparison, Contains) {
- Interval a(BSON("" << 0 << "" << 10), true, true);
- Interval b(BSON("" << 1 << "" << 9), true, true);
- ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_CONTAINS);
+TEST(Comparison, Contains) {
+ Interval a(BSON("" << 0 << "" << 10), true, true);
+ Interval b(BSON("" << 1 << "" << 9), true, true);
+ ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_CONTAINS);
- Interval c(BSON("" << 0 << "" << 10), true, false);
- ASSERT_EQUALS(a.compare(c), Interval::INTERVAL_CONTAINS);
+ Interval c(BSON("" << 0 << "" << 10), true, false);
+ ASSERT_EQUALS(a.compare(c), Interval::INTERVAL_CONTAINS);
- Interval d(BSON("" << 0 << "" << 10), false, true);
- ASSERT_EQUALS(a.compare(d), Interval::INTERVAL_CONTAINS);
+ Interval d(BSON("" << 0 << "" << 10), false, true);
+ ASSERT_EQUALS(a.compare(d), Interval::INTERVAL_CONTAINS);
- Interval e(BSON("" << 0 << "" << 11), false, true);
- ASSERT_NOT_EQUALS(a.compare(e), Interval::INTERVAL_CONTAINS);
- }
+ Interval e(BSON("" << 0 << "" << 11), false, true);
+ ASSERT_NOT_EQUALS(a.compare(e), Interval::INTERVAL_CONTAINS);
+}
- TEST(Comparison, Within) {
- Interval a(BSON("" << 0 << "" << 10), true, true);
- ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_WITHIN);
+TEST(Comparison, Within) {
+ Interval a(BSON("" << 0 << "" << 10), true, true);
+ ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_WITHIN);
- Interval b(BSON("" << 1 << "" << 9), true, true);
- ASSERT_EQUALS(b.compare(a), Interval::INTERVAL_WITHIN);
+ Interval b(BSON("" << 1 << "" << 9), true, true);
+ ASSERT_EQUALS(b.compare(a), Interval::INTERVAL_WITHIN);
- Interval c(BSON("" << 0 << "" << 10), true, false);
- ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_WITHIN);
+ Interval c(BSON("" << 0 << "" << 10), true, false);
+ ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_WITHIN);
- Interval d(BSON("" << 0 << "" << 10), false, true);
- ASSERT_EQUALS(d.compare(a), Interval::INTERVAL_WITHIN);
+ Interval d(BSON("" << 0 << "" << 10), false, true);
+ ASSERT_EQUALS(d.compare(a), Interval::INTERVAL_WITHIN);
- Interval e(BSON("" << 0 << "" << 11), false, true);
- ASSERT_NOT_EQUALS(e.compare(a), Interval::INTERVAL_CONTAINS);
- }
+ Interval e(BSON("" << 0 << "" << 11), false, true);
+ ASSERT_NOT_EQUALS(e.compare(a), Interval::INTERVAL_CONTAINS);
+}
- TEST(Comparison, OverlapsBefore) {
- Interval a(BSON("" << 1 << "" << 9), true, false);
- ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_OVERLAPS_BEFORE);
+TEST(Comparison, OverlapsBefore) {
+ Interval a(BSON("" << 1 << "" << 9), true, false);
+ ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_OVERLAPS_BEFORE);
- Interval b(BSON("" << 1 << "" << 9), false, true);
- ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_OVERLAPS_BEFORE);
+ Interval b(BSON("" << 1 << "" << 9), false, true);
+ ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_OVERLAPS_BEFORE);
- Interval c(BSON("" << 1 << "" << 9), false, false);
- ASSERT_NOT_EQUALS(a.compare(c), Interval::INTERVAL_OVERLAPS_BEFORE);
+ Interval c(BSON("" << 1 << "" << 9), false, false);
+ ASSERT_NOT_EQUALS(a.compare(c), Interval::INTERVAL_OVERLAPS_BEFORE);
- Interval d(BSON("" << 2 << "" << 10), true, true);
- ASSERT_EQUALS(a.compare(d), Interval::INTERVAL_OVERLAPS_BEFORE);
+ Interval d(BSON("" << 2 << "" << 10), true, true);
+ ASSERT_EQUALS(a.compare(d), Interval::INTERVAL_OVERLAPS_BEFORE);
- Interval e(BSON("" << 0 << "" << 9), true, false);
- ASSERT_NOT_EQUALS(a.compare(e), Interval::INTERVAL_OVERLAPS_BEFORE);
+ Interval e(BSON("" << 0 << "" << 9), true, false);
+ ASSERT_NOT_EQUALS(a.compare(e), Interval::INTERVAL_OVERLAPS_BEFORE);
- Interval f(BSON("" << 0 << "" << 8), true, false);
- ASSERT_NOT_EQUALS(a.compare(f), Interval::INTERVAL_OVERLAPS_BEFORE);
- }
+ Interval f(BSON("" << 0 << "" << 8), true, false);
+ ASSERT_NOT_EQUALS(a.compare(f), Interval::INTERVAL_OVERLAPS_BEFORE);
+}
- TEST(Comparison, OverlapsAfter) {
- Interval a(BSON("" << 1 << "" << 9), false, true);
- ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_OVERLAPS_AFTER);
+TEST(Comparison, OverlapsAfter) {
+ Interval a(BSON("" << 1 << "" << 9), false, true);
+ ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_OVERLAPS_AFTER);
- Interval b(BSON("" << 1 << "" << 9), true, false);
- ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_OVERLAPS_AFTER);
+ Interval b(BSON("" << 1 << "" << 9), true, false);
+ ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_OVERLAPS_AFTER);
- Interval c(BSON("" << 1 << "" << 9), true, true);
- ASSERT_NOT_EQUALS(a.compare(c), Interval::INTERVAL_OVERLAPS_AFTER);
+ Interval c(BSON("" << 1 << "" << 9), true, true);
+ ASSERT_NOT_EQUALS(a.compare(c), Interval::INTERVAL_OVERLAPS_AFTER);
- Interval d(BSON("" << 2 << "" << 10), true, true);
- ASSERT_NOT_EQUALS(a.compare(d), Interval::INTERVAL_OVERLAPS_AFTER);
+ Interval d(BSON("" << 2 << "" << 10), true, true);
+ ASSERT_NOT_EQUALS(a.compare(d), Interval::INTERVAL_OVERLAPS_AFTER);
- Interval e(BSON("" << 0 << "" << 9), true, false);
- ASSERT_EQUALS(a.compare(e), Interval::INTERVAL_OVERLAPS_AFTER);
- }
+ Interval e(BSON("" << 0 << "" << 9), true, false);
+ ASSERT_EQUALS(a.compare(e), Interval::INTERVAL_OVERLAPS_AFTER);
+}
- TEST(Comparison, Precedes) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_PRECEDES);
+TEST(Comparison, Precedes) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_PRECEDES);
- Interval b(BSON("" << 0 << "" << 10), true, true);
- ASSERT_NOT_EQUALS(b.compare(a), Interval::INTERVAL_PRECEDES);
+ Interval b(BSON("" << 0 << "" << 10), true, true);
+ ASSERT_NOT_EQUALS(b.compare(a), Interval::INTERVAL_PRECEDES);
- Interval c(BSON("" << 0 << "" << 10), true, false);
- ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_PRECEDES_COULD_UNION);
+ Interval c(BSON("" << 0 << "" << 10), true, false);
+ ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_PRECEDES_COULD_UNION);
- Interval d(BSON("" << 0 << "" << 9), true, true);
- ASSERT_EQUALS(d.compare(a), Interval::INTERVAL_PRECEDES);
+ Interval d(BSON("" << 0 << "" << 9), true, true);
+ ASSERT_EQUALS(d.compare(a), Interval::INTERVAL_PRECEDES);
- Interval e(BSON("" << 5 << "" << 15), true, true);
- ASSERT_NOT_EQUALS(e.compare(a), Interval::INTERVAL_PRECEDES);
+ Interval e(BSON("" << 5 << "" << 15), true, true);
+ ASSERT_NOT_EQUALS(e.compare(a), Interval::INTERVAL_PRECEDES);
- Interval f(BSON("" << 5 << "" << 20), true, false);
- ASSERT_NOT_EQUALS(f.compare(a), Interval::INTERVAL_PRECEDES);
- }
+ Interval f(BSON("" << 5 << "" << 20), true, false);
+ ASSERT_NOT_EQUALS(f.compare(a), Interval::INTERVAL_PRECEDES);
+}
- TEST(Comparison, PrecedesCouldUnion) {
- Interval a(BSON("" << 10 << "" << 20), false, true);
- ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_PRECEDES);
+TEST(Comparison, PrecedesCouldUnion) {
+ Interval a(BSON("" << 10 << "" << 20), false, true);
+ ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_PRECEDES);
- Interval b(BSON("" << 0 << "" << 10), true, false);
- ASSERT_EQUALS(b.compare(a), Interval::INTERVAL_PRECEDES);
+ Interval b(BSON("" << 0 << "" << 10), true, false);
+ ASSERT_EQUALS(b.compare(a), Interval::INTERVAL_PRECEDES);
- Interval c(BSON("" << 0 << "" << 10), true, true);
- ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_PRECEDES_COULD_UNION);
- }
+ Interval c(BSON("" << 0 << "" << 10), true, true);
+ ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_PRECEDES_COULD_UNION);
+}
- TEST(Comparison, Succeds) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_SUCCEEDS);
+TEST(Comparison, Succeds) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ ASSERT_NOT_EQUALS(a.compare(a), Interval::INTERVAL_SUCCEEDS);
- Interval b(BSON("" << 20 << "" << 30), true, true);
- ASSERT_NOT_EQUALS(b.compare(a), Interval::INTERVAL_SUCCEEDS);
-
- Interval c(BSON("" << 20 << "" << 30), false, true);
- ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_SUCCEEDS);
-
- Interval d(BSON("" << 21 << "" << 30), true, true);
- ASSERT_EQUALS(d.compare(a), Interval::INTERVAL_SUCCEEDS);
-
- Interval e(BSON("" << 15 << "" << 30), true, true);
- ASSERT_NOT_EQUALS(e.compare(a), Interval::INTERVAL_SUCCEEDS);
- }
-
- //
- // intersection
- //
-
- TEST(Intersection, Equals) {
- BSONObj itv = BSON("" << 10 << "" << 20);
- Interval a(itv, true, true);
- a.intersect(a);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Intersection, Contains) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- BSONObj itv = BSON("" << 11 << "" << 19);
- Interval b(itv, true, true);
- a.intersect(b);
- ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Intersection, Within) {
- BSONObj itv = BSON("" << 10 << "" << 20);
- Interval a(itv, true, true);
- Interval b(BSON("" << 9 << "" << 21), true, true);
- a.intersect(b);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Intersection, OverlapsBefore) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 15 << "" << 25), true, true);
- a.intersect(b);
-
- BSONObj itv = BSON("" << 15 << "" << 20);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Intersection, OverlapsAfter) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 5 << "" << 15), true, true);
- a.intersect(b);
-
- BSONObj itv = BSON("" << 10 << "" << 15);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Intersection, Procedes) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 0 << "" << 5), true, true);
- a.intersect(b);
-
- ASSERT_TRUE(a.isEmpty());
- }
-
- TEST(Intersection, Succeds) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 25 << "" << 30), true, true);
- a.intersect(b);
-
- ASSERT_TRUE(a.isEmpty());
- }
-
- //
- // combine (union)
- //
-
- TEST(Union, Equals) {
- BSONObj itv = BSON("" << 10 << "" << 20);
- Interval a(itv, true, true);
- a.combine(a);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Union, Contains) {
- BSONObj itv = BSON("" << 10 << "" << 20);
- Interval a(itv, true, true);
- Interval b(BSON("" << 11 << "" << 19), true, true);
- a.combine(b);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Union, Within) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 9 << "" << 21), true, true);
- a.combine(b);
- ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Union, OverlapsBefore) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 15 << "" << 25), true, true);
- a.combine(b);
- BSONObj itv = BSON("" << 10 << "" << 25);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Union, OverlapsAfter) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 5 << "" << 15), true, true);
- a.combine(b);
- BSONObj itv = BSON("" << 5 << "" << 20);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Union, Precedes) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 20 << "" << 30), true, true);
- a.combine(b);
- BSONObj itv = BSON("" << 10 << "" << 30);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
- TEST(Union, Succeds) {
- Interval a(BSON("" << 10 << "" << 20), true, true);
- Interval b(BSON("" << 0 << "" << 5), true, true);
- a.combine(b);
- BSONObj itv = BSON("" << 0 << "" << 20);
- ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
- }
-
-} // unnamed namespace
+ Interval b(BSON("" << 20 << "" << 30), true, true);
+ ASSERT_NOT_EQUALS(b.compare(a), Interval::INTERVAL_SUCCEEDS);
+
+ Interval c(BSON("" << 20 << "" << 30), false, true);
+ ASSERT_EQUALS(c.compare(a), Interval::INTERVAL_SUCCEEDS);
+
+ Interval d(BSON("" << 21 << "" << 30), true, true);
+ ASSERT_EQUALS(d.compare(a), Interval::INTERVAL_SUCCEEDS);
+
+ Interval e(BSON("" << 15 << "" << 30), true, true);
+ ASSERT_NOT_EQUALS(e.compare(a), Interval::INTERVAL_SUCCEEDS);
+}
+
+//
+// intersection
+//
+
+TEST(Intersection, Equals) {
+ BSONObj itv = BSON("" << 10 << "" << 20);
+ Interval a(itv, true, true);
+ a.intersect(a);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Intersection, Contains) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ BSONObj itv = BSON("" << 11 << "" << 19);
+ Interval b(itv, true, true);
+ a.intersect(b);
+ ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Intersection, Within) {
+ BSONObj itv = BSON("" << 10 << "" << 20);
+ Interval a(itv, true, true);
+ Interval b(BSON("" << 9 << "" << 21), true, true);
+ a.intersect(b);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Intersection, OverlapsBefore) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 15 << "" << 25), true, true);
+ a.intersect(b);
+
+ BSONObj itv = BSON("" << 15 << "" << 20);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Intersection, OverlapsAfter) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 5 << "" << 15), true, true);
+ a.intersect(b);
+
+ BSONObj itv = BSON("" << 10 << "" << 15);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Intersection, Procedes) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 0 << "" << 5), true, true);
+ a.intersect(b);
+
+ ASSERT_TRUE(a.isEmpty());
+}
+
+TEST(Intersection, Succeds) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 25 << "" << 30), true, true);
+ a.intersect(b);
+
+ ASSERT_TRUE(a.isEmpty());
+}
+
+//
+// combine (union)
+//
+
+TEST(Union, Equals) {
+ BSONObj itv = BSON("" << 10 << "" << 20);
+ Interval a(itv, true, true);
+ a.combine(a);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Union, Contains) {
+ BSONObj itv = BSON("" << 10 << "" << 20);
+ Interval a(itv, true, true);
+ Interval b(BSON("" << 11 << "" << 19), true, true);
+ a.combine(b);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Union, Within) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 9 << "" << 21), true, true);
+ a.combine(b);
+ ASSERT_EQUALS(a.compare(b), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Union, OverlapsBefore) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 15 << "" << 25), true, true);
+ a.combine(b);
+ BSONObj itv = BSON("" << 10 << "" << 25);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Union, OverlapsAfter) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 5 << "" << 15), true, true);
+ a.combine(b);
+ BSONObj itv = BSON("" << 5 << "" << 20);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Union, Precedes) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 20 << "" << 30), true, true);
+ a.combine(b);
+ BSONObj itv = BSON("" << 10 << "" << 30);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+TEST(Union, Succeds) {
+ Interval a(BSON("" << 10 << "" << 20), true, true);
+ Interval b(BSON("" << 0 << "" << 5), true, true);
+ a.combine(b);
+ BSONObj itv = BSON("" << 0 << "" << 20);
+ ASSERT_EQUALS(a.compare(Interval(itv, true, true)), Interval::INTERVAL_EQUALS);
+}
+
+} // unnamed namespace
diff --git a/src/mongo/db/query/lite_parsed_query.cpp b/src/mongo/db/query/lite_parsed_query.cpp
index 1b2ce795eb0..6b8a25abe48 100644
--- a/src/mongo/db/query/lite_parsed_query.cpp
+++ b/src/mongo/db/query/lite_parsed_query.cpp
@@ -41,922 +41,884 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
+using std::unique_ptr;
+using std::string;
- const string LiteParsedQuery::cmdOptionMaxTimeMS("maxTimeMS");
- const string LiteParsedQuery::queryOptionMaxTimeMS("$maxTimeMS");
+const string LiteParsedQuery::cmdOptionMaxTimeMS("maxTimeMS");
+const string LiteParsedQuery::queryOptionMaxTimeMS("$maxTimeMS");
- const string LiteParsedQuery::metaTextScore("textScore");
- const string LiteParsedQuery::metaGeoNearDistance("geoNearDistance");
- const string LiteParsedQuery::metaGeoNearPoint("geoNearPoint");
- const string LiteParsedQuery::metaRecordId("recordId");
- const string LiteParsedQuery::metaIndexKey("indexKey");
+const string LiteParsedQuery::metaTextScore("textScore");
+const string LiteParsedQuery::metaGeoNearDistance("geoNearDistance");
+const string LiteParsedQuery::metaGeoNearPoint("geoNearPoint");
+const string LiteParsedQuery::metaRecordId("recordId");
+const string LiteParsedQuery::metaIndexKey("indexKey");
- const int LiteParsedQuery::kDefaultBatchSize = 101;
+const int LiteParsedQuery::kDefaultBatchSize = 101;
namespace {
- Status checkFieldType(const BSONElement& el, BSONType type) {
- if (type != el.type()) {
- str::stream ss;
- ss << "Failed to parse: " << el.toString() << ". "
- << "'" << el.fieldName() << "' field must be of BSON type "
- << typeName(type) << ".";
- return Status(ErrorCodes::FailedToParse, ss);
- }
+Status checkFieldType(const BSONElement& el, BSONType type) {
+ if (type != el.type()) {
+ str::stream ss;
+ ss << "Failed to parse: " << el.toString() << ". "
+ << "'" << el.fieldName() << "' field must be of BSON type " << typeName(type) << ".";
+ return Status(ErrorCodes::FailedToParse, ss);
+ }
- return Status::OK();
- }
-
- // Find command field names.
- const char kCmdName[] = "find";
- const char kFilterField[] = "filter";
- const char kProjectionField[] = "projection";
- const char kSortField[] = "sort";
- const char kHintField[] = "hint";
- const char kSkipField[] = "skip";
- const char kLimitField[] = "limit";
- const char kBatchSizeField[] = "batchSize";
- const char kSingleBatchField[] = "singleBatch";
- const char kCommentField[] = "comment";
- const char kMaxScanField[] = "maxScan";
- const char kMaxField[] = "max";
- const char kMinField[] = "min";
- const char kReturnKeyField[] = "returnKey";
- const char kShowRecordIdField[] = "showRecordId";
- const char kSnapshotField[] = "snapshot";
- const char kTailableField[] = "tailable";
- const char kOplogReplayField[] = "oplogReplay";
- const char kNoCursorTimeoutField[] = "noCursorTimeout";
- const char kAwaitDataField[] = "awaitData";
- const char kPartialField[] = "partial";
-
-} // namespace
-
- // static
- StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(
- const NamespaceString& nss,
- const BSONObj& cmdObj,
- bool isExplain) {
-
- unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
- pq->_ns = nss.ns();
- pq->_fromCommand = true;
- pq->_explain = isExplain;
-
- // Parse the command BSON by looping through one element at a time.
- BSONObjIterator it(cmdObj);
- while (it.more()) {
- BSONElement el = it.next();
- const char* fieldName = el.fieldName();
- if (str::equals(fieldName, kCmdName)) {
- Status status = checkFieldType(el, String);
- if (!status.isOK()) {
- return status;
- }
+ return Status::OK();
+}
+
+// Find command field names.
+const char kCmdName[] = "find";
+const char kFilterField[] = "filter";
+const char kProjectionField[] = "projection";
+const char kSortField[] = "sort";
+const char kHintField[] = "hint";
+const char kSkipField[] = "skip";
+const char kLimitField[] = "limit";
+const char kBatchSizeField[] = "batchSize";
+const char kSingleBatchField[] = "singleBatch";
+const char kCommentField[] = "comment";
+const char kMaxScanField[] = "maxScan";
+const char kMaxField[] = "max";
+const char kMinField[] = "min";
+const char kReturnKeyField[] = "returnKey";
+const char kShowRecordIdField[] = "showRecordId";
+const char kSnapshotField[] = "snapshot";
+const char kTailableField[] = "tailable";
+const char kOplogReplayField[] = "oplogReplay";
+const char kNoCursorTimeoutField[] = "noCursorTimeout";
+const char kAwaitDataField[] = "awaitData";
+const char kPartialField[] = "partial";
+
+} // namespace
+
+// static
+StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(
+ const NamespaceString& nss, const BSONObj& cmdObj, bool isExplain) {
+ unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
+ pq->_ns = nss.ns();
+ pq->_fromCommand = true;
+ pq->_explain = isExplain;
+
+ // Parse the command BSON by looping through one element at a time.
+ BSONObjIterator it(cmdObj);
+ while (it.more()) {
+ BSONElement el = it.next();
+ const char* fieldName = el.fieldName();
+ if (str::equals(fieldName, kCmdName)) {
+ Status status = checkFieldType(el, String);
+ if (!status.isOK()) {
+ return status;
+ }
+ } else if (str::equals(fieldName, kFilterField)) {
+ Status status = checkFieldType(el, Object);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ pq->_filter = el.Obj().getOwned();
+ } else if (str::equals(fieldName, kProjectionField)) {
+ Status status = checkFieldType(el, Object);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ pq->_proj = el.Obj().getOwned();
+ } else if (str::equals(fieldName, kSortField)) {
+ Status status = checkFieldType(el, Object);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ // Sort document normalization.
+ BSONObj sort = el.Obj().getOwned();
+ if (!isValidSortOrder(sort)) {
+ return Status(ErrorCodes::BadValue, "bad sort specification");
+ }
+
+ pq->_sort = sort;
+ } else if (str::equals(fieldName, kHintField)) {
+ BSONObj hintObj;
+ if (Object == el.type()) {
+ hintObj = cmdObj["hint"].Obj().getOwned();
+ } else if (String == el.type()) {
+ hintObj = el.wrap("$hint");
+ } else {
+ return Status(ErrorCodes::FailedToParse,
+ "hint must be either a string or nested object");
}
- else if (str::equals(fieldName, kFilterField)) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
- pq->_filter = el.Obj().getOwned();
+ pq->_hint = hintObj;
+ } else if (str::equals(fieldName, kSkipField)) {
+ if (!el.isNumber()) {
+ str::stream ss;
+ ss << "Failed to parse: " << cmdObj.toString() << ". "
+ << "'skip' field must be numeric.";
+ return Status(ErrorCodes::FailedToParse, ss);
}
- else if (str::equals(fieldName, kProjectionField)) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
- pq->_proj = el.Obj().getOwned();
+ int skip = el.numberInt();
+ if (skip < 0) {
+ return Status(ErrorCodes::BadValue, "skip value must be non-negative");
}
- else if (str::equals(fieldName, kSortField)) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
- // Sort document normalization.
- BSONObj sort = el.Obj().getOwned();
- if (!isValidSortOrder(sort)) {
- return Status(ErrorCodes::BadValue, "bad sort specification");
- }
-
- pq->_sort = sort;
+ pq->_skip = skip;
+ } else if (str::equals(fieldName, kLimitField)) {
+ if (!el.isNumber()) {
+ str::stream ss;
+ ss << "Failed to parse: " << cmdObj.toString() << ". "
+ << "'limit' field must be numeric.";
+ return Status(ErrorCodes::FailedToParse, ss);
}
- else if (str::equals(fieldName, kHintField)) {
- BSONObj hintObj;
- if (Object == el.type()) {
- hintObj = cmdObj["hint"].Obj().getOwned();
- }
- else if (String == el.type()) {
- hintObj = el.wrap("$hint");
- }
- else {
- return Status(ErrorCodes::FailedToParse,
- "hint must be either a string or nested object");
- }
- pq->_hint = hintObj;
+ int limit = el.numberInt();
+ if (limit <= 0) {
+ return Status(ErrorCodes::BadValue, "limit value must be positive");
}
- else if (str::equals(fieldName, kSkipField)) {
- if (!el.isNumber()) {
- str::stream ss;
- ss << "Failed to parse: " << cmdObj.toString() << ". "
- << "'skip' field must be numeric.";
- return Status(ErrorCodes::FailedToParse, ss);
- }
- int skip = el.numberInt();
- if (skip < 0) {
- return Status(ErrorCodes::BadValue, "skip value must be non-negative");
- }
-
- pq->_skip = skip;
+ pq->_limit = limit;
+ } else if (str::equals(fieldName, kBatchSizeField)) {
+ if (!el.isNumber()) {
+ str::stream ss;
+ ss << "Failed to parse: " << cmdObj.toString() << ". "
+ << "'batchSize' field must be numeric.";
+ return Status(ErrorCodes::FailedToParse, ss);
}
- else if (str::equals(fieldName, kLimitField)) {
- if (!el.isNumber()) {
- str::stream ss;
- ss << "Failed to parse: " << cmdObj.toString() << ". "
- << "'limit' field must be numeric.";
- return Status(ErrorCodes::FailedToParse, ss);
- }
- int limit = el.numberInt();
- if (limit <= 0) {
- return Status(ErrorCodes::BadValue, "limit value must be positive");
- }
-
- pq->_limit = limit;
+ int batchSize = el.numberInt();
+ if (batchSize < 0) {
+ return Status(ErrorCodes::BadValue, "batchSize value must be non-negative");
}
- else if (str::equals(fieldName, kBatchSizeField)) {
- if (!el.isNumber()) {
- str::stream ss;
- ss << "Failed to parse: " << cmdObj.toString() << ". "
- << "'batchSize' field must be numeric.";
- return Status(ErrorCodes::FailedToParse, ss);
- }
-
- int batchSize = el.numberInt();
- if (batchSize < 0) {
- return Status(ErrorCodes::BadValue, "batchSize value must be non-negative");
- }
- pq->_batchSize = batchSize;
+ pq->_batchSize = batchSize;
+ } else if (str::equals(fieldName, kSingleBatchField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kSingleBatchField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_wantMore = !el.boolean();
+ pq->_wantMore = !el.boolean();
+ } else if (str::equals(fieldName, kCommentField)) {
+ Status status = checkFieldType(el, String);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kCommentField)) {
- Status status = checkFieldType(el, String);
- if (!status.isOK()) {
- return status;
- }
- pq->_comment = el.str();
+ pq->_comment = el.str();
+ } else if (str::equals(fieldName, kMaxScanField)) {
+ if (!el.isNumber()) {
+ str::stream ss;
+ ss << "Failed to parse: " << cmdObj.toString() << ". "
+ << "'maxScan' field must be numeric.";
+ return Status(ErrorCodes::FailedToParse, ss);
}
- else if (str::equals(fieldName, kMaxScanField)) {
- if (!el.isNumber()) {
- str::stream ss;
- ss << "Failed to parse: " << cmdObj.toString() << ". "
- << "'maxScan' field must be numeric.";
- return Status(ErrorCodes::FailedToParse, ss);
- }
- int maxScan = el.numberInt();
- if (maxScan < 0) {
- return Status(ErrorCodes::BadValue, "maxScan value must be non-negative");
- }
-
- pq->_maxScan = maxScan;
+ int maxScan = el.numberInt();
+ if (maxScan < 0) {
+ return Status(ErrorCodes::BadValue, "maxScan value must be non-negative");
}
- else if (str::equals(fieldName, cmdOptionMaxTimeMS.c_str())) {
- StatusWith<int> maxTimeMS = parseMaxTimeMS(el);
- if (!maxTimeMS.isOK()) {
- return maxTimeMS.getStatus();
- }
- pq->_maxTimeMS = maxTimeMS.getValue();
+ pq->_maxScan = maxScan;
+ } else if (str::equals(fieldName, cmdOptionMaxTimeMS.c_str())) {
+ StatusWith<int> maxTimeMS = parseMaxTimeMS(el);
+ if (!maxTimeMS.isOK()) {
+ return maxTimeMS.getStatus();
}
- else if (str::equals(fieldName, kMinField)) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
- pq->_min = el.Obj().getOwned();
+ pq->_maxTimeMS = maxTimeMS.getValue();
+ } else if (str::equals(fieldName, kMinField)) {
+ Status status = checkFieldType(el, Object);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kMaxField)) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
- pq->_max = el.Obj().getOwned();
+ pq->_min = el.Obj().getOwned();
+ } else if (str::equals(fieldName, kMaxField)) {
+ Status status = checkFieldType(el, Object);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kReturnKeyField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_returnKey = el.boolean();
+ pq->_max = el.Obj().getOwned();
+ } else if (str::equals(fieldName, kReturnKeyField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kShowRecordIdField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_showRecordId = el.boolean();
+ pq->_returnKey = el.boolean();
+ } else if (str::equals(fieldName, kShowRecordIdField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kSnapshotField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_snapshot = el.boolean();
+ pq->_showRecordId = el.boolean();
+ } else if (str::equals(fieldName, kSnapshotField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, "$readPreference")) {
- pq->_hasReadPref = true;
- }
- else if (str::equals(fieldName, kTailableField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_tailable = el.boolean();
+ pq->_snapshot = el.boolean();
+ } else if (str::equals(fieldName, "$readPreference")) {
+ pq->_hasReadPref = true;
+ } else if (str::equals(fieldName, kTailableField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, "slaveOk")) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_slaveOk = el.boolean();
+ pq->_tailable = el.boolean();
+ } else if (str::equals(fieldName, "slaveOk")) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kOplogReplayField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_oplogReplay = el.boolean();
+ pq->_slaveOk = el.boolean();
+ } else if (str::equals(fieldName, kOplogReplayField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kNoCursorTimeoutField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_noCursorTimeout = el.boolean();
+ pq->_oplogReplay = el.boolean();
+ } else if (str::equals(fieldName, kNoCursorTimeoutField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kAwaitDataField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_awaitData = el.boolean();
+ pq->_noCursorTimeout = el.boolean();
+ } else if (str::equals(fieldName, kAwaitDataField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else if (str::equals(fieldName, kPartialField)) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- pq->_partial = el.boolean();
- }
- else if (str::equals(fieldName, "options")) {
- // 3.0.x versions of the shell may generate an explain of a find command with an
- // 'options' field. We accept this only if the 'options' field is empty so that
- // the shell's explain implementation is forwards compatible.
- //
- // TODO: Remove for 3.4.
- if (!pq->isExplain()) {
- return Status(ErrorCodes::FailedToParse,
- "Field 'options' is only allowed for explain.");
- }
-
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
-
- BSONObj optionsObj = el.Obj();
- if (!optionsObj.isEmpty()) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Failed to parse options: "
- << optionsObj.toString() << ". "
- << "You may need to update your shell or driver.");
- }
- }
- else if (str::equals(fieldName,
- repl::ReadAfterOpTimeArgs::kRootFieldName.c_str())) {
- // read after optime parsing is handled elsewhere.
- continue;
+ pq->_awaitData = el.boolean();
+ } else if (str::equals(fieldName, kPartialField)) {
+ Status status = checkFieldType(el, Bool);
+ if (!status.isOK()) {
+ return status;
}
- else {
+
+ pq->_partial = el.boolean();
+ } else if (str::equals(fieldName, "options")) {
+ // 3.0.x versions of the shell may generate an explain of a find command with an
+ // 'options' field. We accept this only if the 'options' field is empty so that
+ // the shell's explain implementation is forwards compatible.
+ //
+ // TODO: Remove for 3.4.
+ if (!pq->isExplain()) {
return Status(ErrorCodes::FailedToParse,
- str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
- << "Unrecognized field '" << fieldName << "'.");
+ "Field 'options' is only allowed for explain.");
}
- }
- pq->addMetaProjection();
+ Status status = checkFieldType(el, Object);
+ if (!status.isOK()) {
+ return status;
+ }
- Status validateStatus = pq->validateFindCmd();
- if (!validateStatus.isOK()) {
- return validateStatus;
+ BSONObj optionsObj = el.Obj();
+ if (!optionsObj.isEmpty()) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "Failed to parse options: " << optionsObj.toString()
+ << ". "
+ << "You may need to update your shell or driver.");
+ }
+ } else if (str::equals(fieldName, repl::ReadAfterOpTimeArgs::kRootFieldName.c_str())) {
+ // read after optime parsing is handled elsewhere.
+ continue;
+ } else {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
+ << "Unrecognized field '" << fieldName << "'.");
}
+ }
- return std::move(pq);
- }
-
- // static
- StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeAsOpQuery(const string& ns,
- int ntoskip,
- int ntoreturn,
- int queryOptions,
- const BSONObj& query,
- const BSONObj& proj,
- const BSONObj& sort,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot,
- bool explain) {
- unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
- pq->_sort = sort.getOwned();
- pq->_hint = hint.getOwned();
- pq->_min = minObj.getOwned();
- pq->_max = maxObj.getOwned();
- pq->_snapshot = snapshot;
- pq->_explain = explain;
-
- Status status = pq->init(ns, ntoskip, ntoreturn, queryOptions, query, proj, false);
- if (!status.isOK()) {
- return status;
- }
+ pq->addMetaProjection();
- return std::move(pq);
+ Status validateStatus = pq->validateFindCmd();
+ if (!validateStatus.isOK()) {
+ return validateStatus;
}
- // static
- StatusWith<unique_ptr<LiteParsedQuery>>
- LiteParsedQuery::makeAsFindCmd(const NamespaceString& ns,
- const BSONObj& query,
- boost::optional<int> limit) {
- unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
+ return std::move(pq);
+}
+
+// static
+StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeAsOpQuery(const string& ns,
+ int ntoskip,
+ int ntoreturn,
+ int queryOptions,
+ const BSONObj& query,
+ const BSONObj& proj,
+ const BSONObj& sort,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot,
+ bool explain) {
+ unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
+ pq->_sort = sort.getOwned();
+ pq->_hint = hint.getOwned();
+ pq->_min = minObj.getOwned();
+ pq->_max = maxObj.getOwned();
+ pq->_snapshot = snapshot;
+ pq->_explain = explain;
+
+ Status status = pq->init(ns, ntoskip, ntoreturn, queryOptions, query, proj, false);
+ if (!status.isOK()) {
+ return status;
+ }
- pq->_fromCommand = true;
- pq->_ns = ns.ns();
- pq->_filter = query.getOwned();
+ return std::move(pq);
+}
- if (limit) {
- if (limit <= 0) {
- return Status(ErrorCodes::BadValue, "limit value must be positive");
- }
-
- pq->_limit = std::move(limit);
- }
+// static
+StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeAsFindCmd(const NamespaceString& ns,
+ const BSONObj& query,
+ boost::optional<int> limit) {
+ unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
- pq->addMetaProjection();
+ pq->_fromCommand = true;
+ pq->_ns = ns.ns();
+ pq->_filter = query.getOwned();
- Status validateStatus = pq->validateFindCmd();
- if (!validateStatus.isOK()) {
- return validateStatus;
+ if (limit) {
+ if (limit <= 0) {
+ return Status(ErrorCodes::BadValue, "limit value must be positive");
}
- return std::move(pq);
+ pq->_limit = std::move(limit);
}
- BSONObj LiteParsedQuery::asFindCommand() const {
- BSONObjBuilder bob;
+ pq->addMetaProjection();
- const NamespaceString nss(_ns);
- bob.append(kCmdName, nss.coll());
+ Status validateStatus = pq->validateFindCmd();
+ if (!validateStatus.isOK()) {
+ return validateStatus;
+ }
- if (!_filter.isEmpty()) {
- bob.append(kFilterField, _filter);
- }
+ return std::move(pq);
+}
- if (!_proj.isEmpty()) {
- bob.append(kProjectionField, _proj);
- }
+BSONObj LiteParsedQuery::asFindCommand() const {
+ BSONObjBuilder bob;
- if (!_sort.isEmpty()) {
- bob.append(kSortField, _sort);
- }
+ const NamespaceString nss(_ns);
+ bob.append(kCmdName, nss.coll());
- if (!_hint.isEmpty()) {
- bob.append(kHintField, _hint);
- }
+ if (!_filter.isEmpty()) {
+ bob.append(kFilterField, _filter);
+ }
- if (_skip > 0) {
- bob.append(kSkipField, _skip);
- }
+ if (!_proj.isEmpty()) {
+ bob.append(kProjectionField, _proj);
+ }
- if (_limit) {
- bob.append(kLimitField, *_limit);
- }
+ if (!_sort.isEmpty()) {
+ bob.append(kSortField, _sort);
+ }
- if (_batchSize) {
- bob.append(kBatchSizeField, *_batchSize);
- }
+ if (!_hint.isEmpty()) {
+ bob.append(kHintField, _hint);
+ }
- if (!_wantMore) {
- bob.append(kSingleBatchField, true);
- }
+ if (_skip > 0) {
+ bob.append(kSkipField, _skip);
+ }
- if (!_comment.empty()) {
- bob.append(kCommentField, _comment);
- }
+ if (_limit) {
+ bob.append(kLimitField, *_limit);
+ }
- if (_maxScan > 0) {
- bob.append(kMaxScanField, _maxScan);
- }
+ if (_batchSize) {
+ bob.append(kBatchSizeField, *_batchSize);
+ }
- if (_maxTimeMS > 0) {
- bob.append(cmdOptionMaxTimeMS, _maxTimeMS);
- }
+ if (!_wantMore) {
+ bob.append(kSingleBatchField, true);
+ }
- if (!_max.isEmpty()) {
- bob.append(kMaxField, _max);
- }
+ if (!_comment.empty()) {
+ bob.append(kCommentField, _comment);
+ }
- if (!_min.isEmpty()) {
- bob.append(kMinField, _min);
- }
+ if (_maxScan > 0) {
+ bob.append(kMaxScanField, _maxScan);
+ }
- if (_returnKey) {
- bob.append(kReturnKeyField, true);
- }
+ if (_maxTimeMS > 0) {
+ bob.append(cmdOptionMaxTimeMS, _maxTimeMS);
+ }
- if (_showRecordId) {
- bob.append(kShowRecordIdField, true);
- }
+ if (!_max.isEmpty()) {
+ bob.append(kMaxField, _max);
+ }
- if (_snapshot) {
- bob.append(kSnapshotField, true);
- }
+ if (!_min.isEmpty()) {
+ bob.append(kMinField, _min);
+ }
- if (_tailable) {
- bob.append(kTailableField, true);
- }
+ if (_returnKey) {
+ bob.append(kReturnKeyField, true);
+ }
- if (_oplogReplay) {
- bob.append(kOplogReplayField, true);
- }
+ if (_showRecordId) {
+ bob.append(kShowRecordIdField, true);
+ }
- if (_noCursorTimeout) {
- bob.append(kNoCursorTimeoutField, true);
- }
+ if (_snapshot) {
+ bob.append(kSnapshotField, true);
+ }
- if (_awaitData) {
- bob.append(kAwaitDataField, true);
- }
+ if (_tailable) {
+ bob.append(kTailableField, true);
+ }
- if (_partial) {
- bob.append(kPartialField, true);
- }
+ if (_oplogReplay) {
+ bob.append(kOplogReplayField, true);
+ }
- return bob.obj();
+ if (_noCursorTimeout) {
+ bob.append(kNoCursorTimeoutField, true);
}
- void LiteParsedQuery::addReturnKeyMetaProj() {
- BSONObjBuilder projBob;
- projBob.appendElements(_proj);
- // We use $$ because it's never going to show up in a user's projection.
- // The exact text doesn't matter.
- BSONObj indexKey = BSON("$$" <<
- BSON("$meta" << LiteParsedQuery::metaIndexKey));
- projBob.append(indexKey.firstElement());
- _proj = projBob.obj();
+ if (_awaitData) {
+ bob.append(kAwaitDataField, true);
}
- void LiteParsedQuery::addShowRecordIdMetaProj() {
- BSONObjBuilder projBob;
- projBob.appendElements(_proj);
- BSONObj metaRecordId = BSON("$recordId" <<
- BSON("$meta" << LiteParsedQuery::metaRecordId));
- projBob.append(metaRecordId.firstElement());
- _proj = projBob.obj();
+ if (_partial) {
+ bob.append(kPartialField, true);
}
- Status LiteParsedQuery::validate() const {
- // Min and Max objects must have the same fields.
- if (!_min.isEmpty() && !_max.isEmpty()) {
- if (!_min.isFieldNamePrefixOf(_max) ||
- (_min.nFields() != _max.nFields())) {
- return Status(ErrorCodes::BadValue, "min and max must have the same field names");
- }
+ return bob.obj();
+}
+
+void LiteParsedQuery::addReturnKeyMetaProj() {
+ BSONObjBuilder projBob;
+ projBob.appendElements(_proj);
+ // We use $$ because it's never going to show up in a user's projection.
+ // The exact text doesn't matter.
+ BSONObj indexKey = BSON("$$" << BSON("$meta" << LiteParsedQuery::metaIndexKey));
+ projBob.append(indexKey.firstElement());
+ _proj = projBob.obj();
+}
+
+void LiteParsedQuery::addShowRecordIdMetaProj() {
+ BSONObjBuilder projBob;
+ projBob.appendElements(_proj);
+ BSONObj metaRecordId = BSON("$recordId" << BSON("$meta" << LiteParsedQuery::metaRecordId));
+ projBob.append(metaRecordId.firstElement());
+ _proj = projBob.obj();
+}
+
+Status LiteParsedQuery::validate() const {
+ // Min and Max objects must have the same fields.
+ if (!_min.isEmpty() && !_max.isEmpty()) {
+ if (!_min.isFieldNamePrefixOf(_max) || (_min.nFields() != _max.nFields())) {
+ return Status(ErrorCodes::BadValue, "min and max must have the same field names");
}
+ }
- // Can't combine a normal sort and a $meta projection on the same field.
- BSONObjIterator projIt(_proj);
- while (projIt.more()) {
- BSONElement projElt = projIt.next();
- if (isTextScoreMeta(projElt)) {
- BSONElement sortElt = _sort[projElt.fieldName()];
- if (!sortElt.eoo() && !isTextScoreMeta(sortElt)) {
- return Status(ErrorCodes::BadValue,
- "can't have a non-$meta sort on a $meta projection");
- }
+ // Can't combine a normal sort and a $meta projection on the same field.
+ BSONObjIterator projIt(_proj);
+ while (projIt.more()) {
+ BSONElement projElt = projIt.next();
+ if (isTextScoreMeta(projElt)) {
+ BSONElement sortElt = _sort[projElt.fieldName()];
+ if (!sortElt.eoo() && !isTextScoreMeta(sortElt)) {
+ return Status(ErrorCodes::BadValue,
+ "can't have a non-$meta sort on a $meta projection");
}
}
+ }
- // All fields with a $meta sort must have a corresponding $meta projection.
- BSONObjIterator sortIt(_sort);
- while (sortIt.more()) {
- BSONElement sortElt = sortIt.next();
- if (isTextScoreMeta(sortElt)) {
- BSONElement projElt = _proj[sortElt.fieldName()];
- if (projElt.eoo() || !isTextScoreMeta(projElt)) {
- return Status(ErrorCodes::BadValue,
- "must have $meta projection for all $meta sort keys");
- }
+ // All fields with a $meta sort must have a corresponding $meta projection.
+ BSONObjIterator sortIt(_sort);
+ while (sortIt.more()) {
+ BSONElement sortElt = sortIt.next();
+ if (isTextScoreMeta(sortElt)) {
+ BSONElement projElt = _proj[sortElt.fieldName()];
+ if (projElt.eoo() || !isTextScoreMeta(projElt)) {
+ return Status(ErrorCodes::BadValue,
+ "must have $meta projection for all $meta sort keys");
}
}
+ }
- if (_snapshot) {
- if (!_sort.isEmpty()) {
- return Status(ErrorCodes::BadValue, "E12001 can't use sort with $snapshot");
- }
- if (!_hint.isEmpty()) {
- return Status(ErrorCodes::BadValue, "E12002 can't use hint with $snapshot");
- }
+ if (_snapshot) {
+ if (!_sort.isEmpty()) {
+ return Status(ErrorCodes::BadValue, "E12001 can't use sort with $snapshot");
+ }
+ if (!_hint.isEmpty()) {
+ return Status(ErrorCodes::BadValue, "E12002 can't use hint with $snapshot");
}
-
- return Status::OK();
}
- // static
- StatusWith<int> LiteParsedQuery::parseMaxTimeMSCommand(const BSONObj& cmdObj) {
- return parseMaxTimeMS(cmdObj[cmdOptionMaxTimeMS]);
+ return Status::OK();
+}
+
+// static
+StatusWith<int> LiteParsedQuery::parseMaxTimeMSCommand(const BSONObj& cmdObj) {
+ return parseMaxTimeMS(cmdObj[cmdOptionMaxTimeMS]);
+}
+
+// static
+StatusWith<int> LiteParsedQuery::parseMaxTimeMSQuery(const BSONObj& queryObj) {
+ return parseMaxTimeMS(queryObj[queryOptionMaxTimeMS]);
+}
+
+// static
+StatusWith<int> LiteParsedQuery::parseMaxTimeMS(const BSONElement& maxTimeMSElt) {
+ if (!maxTimeMSElt.eoo() && !maxTimeMSElt.isNumber()) {
+ return StatusWith<int>(
+ ErrorCodes::BadValue,
+ (StringBuilder() << maxTimeMSElt.fieldNameStringData() << " must be a number").str());
}
-
- // static
- StatusWith<int> LiteParsedQuery::parseMaxTimeMSQuery(const BSONObj& queryObj) {
- return parseMaxTimeMS(queryObj[queryOptionMaxTimeMS]);
+ long long maxTimeMSLongLong = maxTimeMSElt.safeNumberLong(); // returns 0 on EOO
+ if (maxTimeMSLongLong < 0 || maxTimeMSLongLong > INT_MAX) {
+ return StatusWith<int>(
+ ErrorCodes::BadValue,
+ (StringBuilder() << maxTimeMSElt.fieldNameStringData() << " is out of range").str());
}
-
- // static
- StatusWith<int> LiteParsedQuery::parseMaxTimeMS(const BSONElement& maxTimeMSElt) {
- if (!maxTimeMSElt.eoo() && !maxTimeMSElt.isNumber()) {
- return StatusWith<int>(ErrorCodes::BadValue,
- (StringBuilder()
- << maxTimeMSElt.fieldNameStringData()
- << " must be a number").str());
- }
- long long maxTimeMSLongLong = maxTimeMSElt.safeNumberLong(); // returns 0 on EOO
- if (maxTimeMSLongLong < 0 || maxTimeMSLongLong > INT_MAX) {
- return StatusWith<int>(ErrorCodes::BadValue,
- (StringBuilder()
- << maxTimeMSElt.fieldNameStringData()
- << " is out of range").str());
- }
- double maxTimeMSDouble = maxTimeMSElt.numberDouble();
- if (maxTimeMSElt.type() == mongo::NumberDouble
- && floor(maxTimeMSDouble) != maxTimeMSDouble) {
- return StatusWith<int>(ErrorCodes::BadValue,
- (StringBuilder()
- << maxTimeMSElt.fieldNameStringData()
- << " has non-integral value").str());
- }
- return StatusWith<int>(static_cast<int>(maxTimeMSLongLong));
+ double maxTimeMSDouble = maxTimeMSElt.numberDouble();
+ if (maxTimeMSElt.type() == mongo::NumberDouble && floor(maxTimeMSDouble) != maxTimeMSDouble) {
+ return StatusWith<int>(ErrorCodes::BadValue,
+ (StringBuilder() << maxTimeMSElt.fieldNameStringData()
+ << " has non-integral value").str());
}
+ return StatusWith<int>(static_cast<int>(maxTimeMSLongLong));
+}
- // static
- bool LiteParsedQuery::isTextScoreMeta(BSONElement elt) {
- // elt must be foo: {$meta: "textScore"}
- if (mongo::Object != elt.type()) {
- return false;
- }
- BSONObj metaObj = elt.Obj();
- BSONObjIterator metaIt(metaObj);
- // must have exactly 1 element
- if (!metaIt.more()) {
- return false;
- }
- BSONElement metaElt = metaIt.next();
- if (!str::equals("$meta", metaElt.fieldName())) {
- return false;
- }
- if (mongo::String != metaElt.type()) {
- return false;
- }
- if (LiteParsedQuery::metaTextScore != metaElt.valuestr()) {
- return false;
- }
- // must have exactly 1 element
- if (metaIt.more()) {
- return false;
- }
- return true;
+// static
+bool LiteParsedQuery::isTextScoreMeta(BSONElement elt) {
+ // elt must be foo: {$meta: "textScore"}
+ if (mongo::Object != elt.type()) {
+ return false;
}
+ BSONObj metaObj = elt.Obj();
+ BSONObjIterator metaIt(metaObj);
+ // must have exactly 1 element
+ if (!metaIt.more()) {
+ return false;
+ }
+ BSONElement metaElt = metaIt.next();
+ if (!str::equals("$meta", metaElt.fieldName())) {
+ return false;
+ }
+ if (mongo::String != metaElt.type()) {
+ return false;
+ }
+ if (LiteParsedQuery::metaTextScore != metaElt.valuestr()) {
+ return false;
+ }
+ // must have exactly 1 element
+ if (metaIt.more()) {
+ return false;
+ }
+ return true;
+}
- // static
- bool LiteParsedQuery::isRecordIdMeta(BSONElement elt) {
- // elt must be foo: {$meta: "recordId"}
- if (mongo::Object != elt.type()) {
- return false;
- }
- BSONObj metaObj = elt.Obj();
- BSONObjIterator metaIt(metaObj);
- // must have exactly 1 element
- if (!metaIt.more()) {
- return false;
- }
- BSONElement metaElt = metaIt.next();
- if (!str::equals("$meta", metaElt.fieldName())) {
- return false;
- }
- if (mongo::String != metaElt.type()) {
+// static
+bool LiteParsedQuery::isRecordIdMeta(BSONElement elt) {
+ // elt must be foo: {$meta: "recordId"}
+ if (mongo::Object != elt.type()) {
+ return false;
+ }
+ BSONObj metaObj = elt.Obj();
+ BSONObjIterator metaIt(metaObj);
+ // must have exactly 1 element
+ if (!metaIt.more()) {
+ return false;
+ }
+ BSONElement metaElt = metaIt.next();
+ if (!str::equals("$meta", metaElt.fieldName())) {
+ return false;
+ }
+ if (mongo::String != metaElt.type()) {
+ return false;
+ }
+ if (LiteParsedQuery::metaRecordId != metaElt.valuestr()) {
+ return false;
+ }
+ // must have exactly 1 element
+ if (metaIt.more()) {
+ return false;
+ }
+ return true;
+}
+
+// static
+bool LiteParsedQuery::isValidSortOrder(const BSONObj& sortObj) {
+ BSONObjIterator i(sortObj);
+ while (i.more()) {
+ BSONElement e = i.next();
+ // fieldNameSize() includes NULL terminator. For empty field name,
+ // we should be checking for 1 instead of 0.
+ if (1 == e.fieldNameSize()) {
return false;
}
- if (LiteParsedQuery::metaRecordId != metaElt.valuestr()) {
- return false;
+ if (isTextScoreMeta(e)) {
+ continue;
}
- // must have exactly 1 element
- if (metaIt.more()) {
+ long long n = e.safeNumberLong();
+ if (!(e.isNumber() && (n == -1LL || n == 1LL))) {
return false;
}
- return true;
}
-
- // static
- bool LiteParsedQuery::isValidSortOrder(const BSONObj& sortObj) {
- BSONObjIterator i(sortObj);
- while (i.more()) {
- BSONElement e = i.next();
- // fieldNameSize() includes NULL terminator. For empty field name,
- // we should be checking for 1 instead of 0.
- if (1 == e.fieldNameSize()) {
- return false;
- }
- if (isTextScoreMeta(e)) {
- continue;
- }
- long long n = e.safeNumberLong();
- if (!(e.isNumber() && (n == -1LL || n == 1LL))) {
- return false;
- }
- }
- return true;
- }
-
- // static
- bool LiteParsedQuery::isQueryIsolated(const BSONObj& query) {
- BSONObjIterator iter(query);
- while (iter.more()) {
- BSONElement elt = iter.next();
- if (str::equals(elt.fieldName(), "$isolated") && elt.trueValue())
- return true;
- if (str::equals(elt.fieldName(), "$atomic") && elt.trueValue())
- return true;
- }
- return false;
+ return true;
+}
+
+// static
+bool LiteParsedQuery::isQueryIsolated(const BSONObj& query) {
+ BSONObjIterator iter(query);
+ while (iter.more()) {
+ BSONElement elt = iter.next();
+ if (str::equals(elt.fieldName(), "$isolated") && elt.trueValue())
+ return true;
+ if (str::equals(elt.fieldName(), "$atomic") && elt.trueValue())
+ return true;
}
-
- //
- // Old LiteParsedQuery parsing code: SOON TO BE DEPRECATED.
- //
-
- // static
- StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::fromLegacyQueryMessage(
- const QueryMessage& qm) {
-
- unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
-
- Status status = pq->init(qm.ns,
- qm.ntoskip,
- qm.ntoreturn,
- qm.queryOptions,
- qm.query,
- qm.fields,
- true);
- if (!status.isOK()) {
- return status;
- }
-
- return std::move(pq);
+ return false;
+}
+
+//
+// Old LiteParsedQuery parsing code: SOON TO BE DEPRECATED.
+//
+
+// static
+StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::fromLegacyQueryMessage(
+ const QueryMessage& qm) {
+ unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery());
+
+ Status status =
+ pq->init(qm.ns, qm.ntoskip, qm.ntoreturn, qm.queryOptions, qm.query, qm.fields, true);
+ if (!status.isOK()) {
+ return status;
}
- Status LiteParsedQuery::init(const string& ns, int ntoskip, int ntoreturn, int queryOptions,
- const BSONObj& queryObj, const BSONObj& proj,
- bool fromQueryMessage) {
- _ns = ns;
- _skip = ntoskip;
- _proj = proj.getOwned();
+ return std::move(pq);
+}
+
+Status LiteParsedQuery::init(const string& ns,
+ int ntoskip,
+ int ntoreturn,
+ int queryOptions,
+ const BSONObj& queryObj,
+ const BSONObj& proj,
+ bool fromQueryMessage) {
+ _ns = ns;
+ _skip = ntoskip;
+ _proj = proj.getOwned();
+
+ if (ntoreturn) {
+ _batchSize = ntoreturn;
+ }
- if (ntoreturn) {
- _batchSize = ntoreturn;
- }
+ // Initialize flags passed as 'queryOptions' bit vector.
+ initFromInt(queryOptions);
- // Initialize flags passed as 'queryOptions' bit vector.
- initFromInt(queryOptions);
+ if (_skip < 0) {
+ return Status(ErrorCodes::BadValue, "bad skip value in query");
+ }
- if (_skip < 0) {
- return Status(ErrorCodes::BadValue, "bad skip value in query");
+ if (_batchSize && *_batchSize < 0) {
+ if (*_batchSize == std::numeric_limits<int>::min()) {
+ // _batchSize is negative but can't be negated.
+ return Status(ErrorCodes::BadValue, "bad limit value in query");
}
- if (_batchSize && *_batchSize < 0) {
- if (*_batchSize == std::numeric_limits<int>::min()) {
- // _batchSize is negative but can't be negated.
- return Status(ErrorCodes::BadValue, "bad limit value in query");
- }
+ // A negative number indicates that the cursor should be closed after the first batch.
+ _wantMore = false;
+ _batchSize = -*_batchSize;
+ }
- // A negative number indicates that the cursor should be closed after the first batch.
- _wantMore = false;
- _batchSize = -*_batchSize;
+ if (fromQueryMessage) {
+ BSONElement queryField = queryObj["query"];
+ if (!queryField.isABSONObj()) {
+ queryField = queryObj["$query"];
}
-
- if (fromQueryMessage) {
- BSONElement queryField = queryObj["query"];
- if (!queryField.isABSONObj()) { queryField = queryObj["$query"]; }
- if (queryField.isABSONObj()) {
- _filter = queryField.embeddedObject().getOwned();
- Status status = initFullQuery(queryObj);
- if (!status.isOK()) { return status; }
- }
- else {
- _filter = queryObj.getOwned();
+ if (queryField.isABSONObj()) {
+ _filter = queryField.embeddedObject().getOwned();
+ Status status = initFullQuery(queryObj);
+ if (!status.isOK()) {
+ return status;
}
- }
- else {
- // This is the debugging code path.
+ } else {
_filter = queryObj.getOwned();
}
-
- _hasReadPref = queryObj.hasField("$readPreference");
-
- if (!isValidSortOrder(_sort)) {
- return Status(ErrorCodes::BadValue, "bad sort specification");
- }
-
- return validate();
+ } else {
+ // This is the debugging code path.
+ _filter = queryObj.getOwned();
}
- Status LiteParsedQuery::initFullQuery(const BSONObj& top) {
- BSONObjIterator i(top);
-
- while (i.more()) {
- BSONElement e = i.next();
- const char* name = e.fieldName();
+ _hasReadPref = queryObj.hasField("$readPreference");
- if (0 == strcmp("$orderby", name) || 0 == strcmp("orderby", name)) {
- if (Object == e.type()) {
- _sort = e.embeddedObject().getOwned();
- }
- else if (Array == e.type()) {
- _sort = e.embeddedObject();
-
- // TODO: Is this ever used? I don't think so.
- // Quote:
- // This is for languages whose "objects" are not well ordered (JSON is well
- // ordered).
- // [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
- // note: this is slow, but that is ok as order will have very few pieces
- BSONObjBuilder b;
- char p[2] = "0";
-
- while (1) {
- BSONObj j = _sort.getObjectField(p);
- if (j.isEmpty()) { break; }
- BSONElement e = j.firstElement();
- if (e.eoo()) {
- return Status(ErrorCodes::BadValue, "bad order array");
- }
- if (!e.isNumber()) {
- return Status(ErrorCodes::BadValue, "bad order array [2]");
- }
- b.append(e);
- (*p)++;
- if (!(*p <= '9')) {
- return Status(ErrorCodes::BadValue, "too many ordering elements");
- }
- }
+ if (!isValidSortOrder(_sort)) {
+ return Status(ErrorCodes::BadValue, "bad sort specification");
+ }
- _sort = b.obj();
- }
- else {
- return Status(ErrorCodes::BadValue, "sort must be object or array");
- }
- }
- else if ('$' == *name) {
- name++;
- if (str::equals("explain", name)) {
- // Won't throw.
- _explain = e.trueValue();
- }
- else if (str::equals("snapshot", name)) {
- // Won't throw.
- _snapshot = e.trueValue();
- }
- else if (str::equals("min", name)) {
- if (!e.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "$min must be a BSONObj");
+ return validate();
+}
+
+Status LiteParsedQuery::initFullQuery(const BSONObj& top) {
+ BSONObjIterator i(top);
+
+ while (i.more()) {
+ BSONElement e = i.next();
+ const char* name = e.fieldName();
+
+ if (0 == strcmp("$orderby", name) || 0 == strcmp("orderby", name)) {
+ if (Object == e.type()) {
+ _sort = e.embeddedObject().getOwned();
+ } else if (Array == e.type()) {
+ _sort = e.embeddedObject();
+
+ // TODO: Is this ever used? I don't think so.
+ // Quote:
+ // This is for languages whose "objects" are not well ordered (JSON is well
+ // ordered).
+ // [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
+ // note: this is slow, but that is ok as order will have very few pieces
+ BSONObjBuilder b;
+ char p[2] = "0";
+
+ while (1) {
+ BSONObj j = _sort.getObjectField(p);
+ if (j.isEmpty()) {
+ break;
}
- _min = e.embeddedObject().getOwned();
- }
- else if (str::equals("max", name)) {
- if (!e.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "$max must be a BSONObj");
- }
- _max = e.embeddedObject().getOwned();
- }
- else if (str::equals("hint", name)) {
- if (e.isABSONObj()) {
- _hint = e.embeddedObject().getOwned();
- }
- else if (String == e.type()) {
- _hint = e.wrap();
+ BSONElement e = j.firstElement();
+ if (e.eoo()) {
+ return Status(ErrorCodes::BadValue, "bad order array");
}
- else {
- return Status(ErrorCodes::BadValue,
- "$hint must be either a string or nested object");
+ if (!e.isNumber()) {
+ return Status(ErrorCodes::BadValue, "bad order array [2]");
}
- }
- else if (str::equals("returnKey", name)) {
- // Won't throw.
- if (e.trueValue()) {
- _returnKey = true;
- addReturnKeyMetaProj();
- }
- }
- else if (str::equals("maxScan", name)) {
- // Won't throw.
- _maxScan = e.numberInt();
- }
- else if (str::equals("showDiskLoc", name)) {
- // Won't throw.
- if (e.trueValue()) {
- _showRecordId = true;
- addShowRecordIdMetaProj();
+ b.append(e);
+ (*p)++;
+ if (!(*p <= '9')) {
+ return Status(ErrorCodes::BadValue, "too many ordering elements");
}
}
- else if (str::equals("maxTimeMS", name)) {
- StatusWith<int> maxTimeMS = parseMaxTimeMS(e);
- if (!maxTimeMS.isOK()) {
- return maxTimeMS.getStatus();
- }
- _maxTimeMS = maxTimeMS.getValue();
+
+ _sort = b.obj();
+ } else {
+ return Status(ErrorCodes::BadValue, "sort must be object or array");
+ }
+ } else if ('$' == *name) {
+ name++;
+ if (str::equals("explain", name)) {
+ // Won't throw.
+ _explain = e.trueValue();
+ } else if (str::equals("snapshot", name)) {
+ // Won't throw.
+ _snapshot = e.trueValue();
+ } else if (str::equals("min", name)) {
+ if (!e.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "$min must be a BSONObj");
+ }
+ _min = e.embeddedObject().getOwned();
+ } else if (str::equals("max", name)) {
+ if (!e.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "$max must be a BSONObj");
+ }
+ _max = e.embeddedObject().getOwned();
+ } else if (str::equals("hint", name)) {
+ if (e.isABSONObj()) {
+ _hint = e.embeddedObject().getOwned();
+ } else if (String == e.type()) {
+ _hint = e.wrap();
+ } else {
+ return Status(ErrorCodes::BadValue,
+ "$hint must be either a string or nested object");
+ }
+ } else if (str::equals("returnKey", name)) {
+ // Won't throw.
+ if (e.trueValue()) {
+ _returnKey = true;
+ addReturnKeyMetaProj();
+ }
+ } else if (str::equals("maxScan", name)) {
+ // Won't throw.
+ _maxScan = e.numberInt();
+ } else if (str::equals("showDiskLoc", name)) {
+ // Won't throw.
+ if (e.trueValue()) {
+ _showRecordId = true;
+ addShowRecordIdMetaProj();
+ }
+ } else if (str::equals("maxTimeMS", name)) {
+ StatusWith<int> maxTimeMS = parseMaxTimeMS(e);
+ if (!maxTimeMS.isOK()) {
+ return maxTimeMS.getStatus();
}
+ _maxTimeMS = maxTimeMS.getValue();
}
}
-
- return Status::OK();
}
- int LiteParsedQuery::getOptions() const {
- int options = 0;
- if (_tailable) { options |= QueryOption_CursorTailable; }
- if (_slaveOk) { options |= QueryOption_SlaveOk; }
- if (_oplogReplay) { options |= QueryOption_OplogReplay; }
- if (_noCursorTimeout) { options |= QueryOption_NoCursorTimeout; }
- if (_awaitData) { options |= QueryOption_AwaitData; }
- if (_exhaust) { options |= QueryOption_Exhaust; }
- if (_partial) { options |= QueryOption_PartialResults; }
- return options;
- }
+ return Status::OK();
+}
- void LiteParsedQuery::initFromInt(int options) {
- _tailable = (options & QueryOption_CursorTailable) != 0;
- _slaveOk = (options & QueryOption_SlaveOk) != 0;
- _oplogReplay = (options & QueryOption_OplogReplay) != 0;
- _noCursorTimeout = (options & QueryOption_NoCursorTimeout) != 0;
- _awaitData = (options & QueryOption_AwaitData) != 0;
- _exhaust = (options & QueryOption_Exhaust) != 0;
- _partial = (options & QueryOption_PartialResults) != 0;
+int LiteParsedQuery::getOptions() const {
+ int options = 0;
+ if (_tailable) {
+ options |= QueryOption_CursorTailable;
}
-
- void LiteParsedQuery::addMetaProjection() {
- // We might need to update the projection object with a $meta projection.
- if (returnKey()) {
- addReturnKeyMetaProj();
- }
-
- if (showRecordId()) {
- addShowRecordIdMetaProj();
- }
+ if (_slaveOk) {
+ options |= QueryOption_SlaveOk;
+ }
+ if (_oplogReplay) {
+ options |= QueryOption_OplogReplay;
+ }
+ if (_noCursorTimeout) {
+ options |= QueryOption_NoCursorTimeout;
+ }
+ if (_awaitData) {
+ options |= QueryOption_AwaitData;
+ }
+ if (_exhaust) {
+ options |= QueryOption_Exhaust;
+ }
+ if (_partial) {
+ options |= QueryOption_PartialResults;
+ }
+ return options;
+}
+
+void LiteParsedQuery::initFromInt(int options) {
+ _tailable = (options & QueryOption_CursorTailable) != 0;
+ _slaveOk = (options & QueryOption_SlaveOk) != 0;
+ _oplogReplay = (options & QueryOption_OplogReplay) != 0;
+ _noCursorTimeout = (options & QueryOption_NoCursorTimeout) != 0;
+ _awaitData = (options & QueryOption_AwaitData) != 0;
+ _exhaust = (options & QueryOption_Exhaust) != 0;
+ _partial = (options & QueryOption_PartialResults) != 0;
+}
+
+void LiteParsedQuery::addMetaProjection() {
+ // We might need to update the projection object with a $meta projection.
+ if (returnKey()) {
+ addReturnKeyMetaProj();
}
- Status LiteParsedQuery::validateFindCmd() {
- if (isAwaitData() && !isTailable()) {
- return Status(ErrorCodes::BadValue, "Cannot set awaitData without tailable");
- }
+ if (showRecordId()) {
+ addShowRecordIdMetaProj();
+ }
+}
- return validate();
+Status LiteParsedQuery::validateFindCmd() {
+ if (isAwaitData() && !isTailable()) {
+ return Status(ErrorCodes::BadValue, "Cannot set awaitData without tailable");
}
-} // namespace mongo
+ return validate();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/lite_parsed_query.h b/src/mongo/db/query/lite_parsed_query.h
index de34ea0ffd6..f0744eeb782 100644
--- a/src/mongo/db/query/lite_parsed_query.h
+++ b/src/mongo/db/query/lite_parsed_query.h
@@ -35,245 +35,304 @@
namespace mongo {
- class NamespaceString;
- class QueryMessage;
- class Status;
- template<typename T> class StatusWith;
+class NamespaceString;
+class QueryMessage;
+class Status;
+template <typename T>
+class StatusWith;
+
+/**
+ * Parses the QueryMessage or find command received from the user and makes the various fields
+ * more easily accessible.
+ */
+class LiteParsedQuery {
+public:
+ /**
+ * Parses a find command object, 'cmdObj'. Caller must indicate whether or not this lite
+ * parsed query is an explained query or not via 'isExplain'.
+ *
+ * Returns a heap allocated LiteParsedQuery on success or an error if 'cmdObj' is not well
+ * formed.
+ */
+ static StatusWith<std::unique_ptr<LiteParsedQuery>> makeFromFindCommand(
+ const NamespaceString& nss, const BSONObj& cmdObj, bool isExplain);
+
+ /**
+ * Constructs a LiteParseQuery object as though it is from a legacy QueryMessage.
+ */
+ static StatusWith<std::unique_ptr<LiteParsedQuery>> makeAsOpQuery(const std::string& ns,
+ int ntoskip,
+ int ntoreturn,
+ int queryoptions,
+ const BSONObj& query,
+ const BSONObj& proj,
+ const BSONObj& sort,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot,
+ bool explain);
+
+ /**
+ * Constructs a LiteParseQuery object that can be used to serialize to find command
+ * BSON object.
+ */
+ static StatusWith<std::unique_ptr<LiteParsedQuery>> makeAsFindCmd(const NamespaceString& ns,
+ const BSONObj& query,
+ boost::optional<int> limit);
+
+ /**
+ * Converts this LPQ into a find command.
+ */
+ BSONObj asFindCommand() const;
/**
- * Parses the QueryMessage or find command received from the user and makes the various fields
- * more easily accessible.
+ * Helper functions to parse maxTimeMS from a command object. Returns the contained value,
+ * or an error on parsing fail. When passed an EOO-type element, returns 0 (special value
+ * for "allow to run indefinitely").
*/
- class LiteParsedQuery {
- public:
- /**
- * Parses a find command object, 'cmdObj'. Caller must indicate whether or not this lite
- * parsed query is an explained query or not via 'isExplain'.
- *
- * Returns a heap allocated LiteParsedQuery on success or an error if 'cmdObj' is not well
- * formed.
- */
- static StatusWith<std::unique_ptr<LiteParsedQuery>>
- makeFromFindCommand(const NamespaceString& nss, const BSONObj& cmdObj, bool isExplain);
-
- /**
- * Constructs a LiteParseQuery object as though it is from a legacy QueryMessage.
- */
- static StatusWith<std::unique_ptr<LiteParsedQuery>> makeAsOpQuery(const std::string& ns,
- int ntoskip,
- int ntoreturn,
- int queryoptions,
- const BSONObj& query,
- const BSONObj& proj,
- const BSONObj& sort,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot,
- bool explain);
-
- /**
- * Constructs a LiteParseQuery object that can be used to serialize to find command
- * BSON object.
- */
- static StatusWith<std::unique_ptr<LiteParsedQuery>>
- makeAsFindCmd(const NamespaceString& ns,
- const BSONObj& query,
- boost::optional<int> limit);
-
- /**
- * Converts this LPQ into a find command.
- */
- BSONObj asFindCommand() const;
-
- /**
- * Helper functions to parse maxTimeMS from a command object. Returns the contained value,
- * or an error on parsing fail. When passed an EOO-type element, returns 0 (special value
- * for "allow to run indefinitely").
- */
- static StatusWith<int> parseMaxTimeMSCommand(const BSONObj& cmdObj);
-
- /**
- * Same as parseMaxTimeMSCommand, but for a query object.
- */
- static StatusWith<int> parseMaxTimeMSQuery(const BSONObj& queryObj);
-
- /**
- * Helper function to identify text search sort key
- * Example: {a: {$meta: "textScore"}}
- */
- static bool isTextScoreMeta(BSONElement elt);
-
- /**
- * Helper function to identify recordId projection.
- *
- * Example: {a: {$meta: "recordId"}}.
- */
- static bool isRecordIdMeta(BSONElement elt);
-
- /**
- * Helper function to validate a sort object.
- * Returns true if each element satisfies one of:
- * 1. a number with value 1
- * 2. a number with value -1
- * 3. isTextScoreMeta
- */
- static bool isValidSortOrder(const BSONObj& sortObj);
-
- /**
- * Returns true if the query described by "query" should execute
- * at an elevated level of isolation (i.e., $isolated was specified).
- */
- static bool isQueryIsolated(const BSONObj& query);
-
- // Names of the maxTimeMS command and query option.
- static const std::string cmdOptionMaxTimeMS;
- static const std::string queryOptionMaxTimeMS;
-
- // Names of the $meta projection values.
- static const std::string metaTextScore;
- static const std::string metaGeoNearDistance;
- static const std::string metaGeoNearPoint;
- static const std::string metaRecordId;
- static const std::string metaIndexKey;
-
- const std::string& ns() const { return _ns; }
-
- const BSONObj& getFilter() const { return _filter; }
- const BSONObj& getProj() const { return _proj; }
- const BSONObj& getSort() const { return _sort; }
- const BSONObj& getHint() const { return _hint; }
-
- static const int kDefaultBatchSize;
-
- int getSkip() const { return _skip; }
- boost::optional<int> getLimit() const { return _limit; }
- boost::optional<int> getBatchSize() const { return _batchSize; }
- bool wantMore() const { return _wantMore; }
-
- bool isFromFindCommand() const { return _fromCommand; }
- bool isExplain() const { return _explain; }
-
- const std::string& getComment() const { return _comment; }
-
- int getMaxScan() const { return _maxScan; }
- int getMaxTimeMS() const { return _maxTimeMS; }
-
- const BSONObj& getMin() const { return _min; }
- const BSONObj& getMax() const { return _max; }
-
- bool returnKey() const { return _returnKey; }
- bool showRecordId() const { return _showRecordId; }
- bool isSnapshot() const { return _snapshot; }
- bool hasReadPref() const { return _hasReadPref; }
-
- bool isTailable() const { return _tailable; }
- bool isSlaveOk() const { return _slaveOk; }
- bool isOplogReplay() const { return _oplogReplay; }
- bool isNoCursorTimeout() const { return _noCursorTimeout; }
- bool isAwaitData() const { return _awaitData; }
- bool isExhaust() const { return _exhaust; }
- bool isPartial() const { return _partial; }
-
- /**
- * Return options as a bit vector.
- */
- int getOptions() const;
-
- //
- // Old parsing code: SOON TO BE DEPRECATED.
- //
-
- /**
- * Parse the provided QueryMessage and return a heap constructed LiteParsedQuery, which
- * represents it or an error.
- */
- static StatusWith<std::unique_ptr<LiteParsedQuery>> fromLegacyQueryMessage(
- const QueryMessage& qm);
-
- private:
- LiteParsedQuery() = default;
-
- /**
- * Parsing code calls this after construction of the LPQ is complete. There are additional
- * semantic properties that must be checked even if "lexically" the parse is OK.
- */
- Status validate() const;
-
- Status init(const std::string& ns, int ntoskip, int ntoreturn, int queryOptions,
- const BSONObj& queryObj, const BSONObj& proj, bool fromQueryMessage);
-
- Status initFullQuery(const BSONObj& top);
-
- static StatusWith<int> parseMaxTimeMS(const BSONElement& maxTimeMSElt);
-
- /**
- * Updates the projection object with a $meta projection for the returnKey option.
- */
- void addReturnKeyMetaProj();
-
- /**
- * Updates the projection object with a $meta projection for the showRecordId option.
- */
- void addShowRecordIdMetaProj();
-
- /**
- * Initializes options based on the value of the 'options' bit vector.
- *
- * This contains flags such as tailable, exhaust, and noCursorTimeout.
- */
- void initFromInt(int options);
-
- /**
- * Add the meta projection to this object if needed.
- */
- void addMetaProjection();
-
- /**
- * Returns OK if this is valid in the find command context.
- */
- Status validateFindCmd();
-
- std::string _ns;
-
- BSONObj _filter;
- BSONObj _proj;
- BSONObj _sort;
- // The hint provided, if any. If the hint was by index key pattern, the value of '_hint' is
- // the key pattern hinted. If the hint was by index name, the value of '_hint' is
- // {$hint: <String>}, where <String> is the index name hinted.
- BSONObj _hint;
-
- int _skip = 0;
- bool _wantMore = true;
-
- boost::optional<int> _limit;
- boost::optional<int> _batchSize;
-
- bool _fromCommand = false;
- bool _explain = false;
-
- std::string _comment;
-
- int _maxScan = 0;
- int _maxTimeMS = 0;
-
- BSONObj _min;
- BSONObj _max;
-
- bool _returnKey = false;
- bool _showRecordId = false;
- bool _snapshot = false;
- bool _hasReadPref = false;
-
- // Options that can be specified in the OP_QUERY 'flags' header.
- bool _tailable = false;
- bool _slaveOk = false;
- bool _oplogReplay = false;
- bool _noCursorTimeout = false;
- bool _awaitData = false;
- bool _exhaust = false;
- bool _partial = false;
- };
-
-} // namespace mongo
+ static StatusWith<int> parseMaxTimeMSCommand(const BSONObj& cmdObj);
+
+ /**
+ * Same as parseMaxTimeMSCommand, but for a query object.
+ */
+ static StatusWith<int> parseMaxTimeMSQuery(const BSONObj& queryObj);
+
+ /**
+ * Helper function to identify text search sort key
+ * Example: {a: {$meta: "textScore"}}
+ */
+ static bool isTextScoreMeta(BSONElement elt);
+
+ /**
+ * Helper function to identify recordId projection.
+ *
+ * Example: {a: {$meta: "recordId"}}.
+ */
+ static bool isRecordIdMeta(BSONElement elt);
+
+ /**
+ * Helper function to validate a sort object.
+ * Returns true if each element satisfies one of:
+ * 1. a number with value 1
+ * 2. a number with value -1
+ * 3. isTextScoreMeta
+ */
+ static bool isValidSortOrder(const BSONObj& sortObj);
+
+ /**
+ * Returns true if the query described by "query" should execute
+ * at an elevated level of isolation (i.e., $isolated was specified).
+ */
+ static bool isQueryIsolated(const BSONObj& query);
+
+ // Names of the maxTimeMS command and query option.
+ static const std::string cmdOptionMaxTimeMS;
+ static const std::string queryOptionMaxTimeMS;
+
+ // Names of the $meta projection values.
+ static const std::string metaTextScore;
+ static const std::string metaGeoNearDistance;
+ static const std::string metaGeoNearPoint;
+ static const std::string metaRecordId;
+ static const std::string metaIndexKey;
+
+ const std::string& ns() const {
+ return _ns;
+ }
+
+ const BSONObj& getFilter() const {
+ return _filter;
+ }
+ const BSONObj& getProj() const {
+ return _proj;
+ }
+ const BSONObj& getSort() const {
+ return _sort;
+ }
+ const BSONObj& getHint() const {
+ return _hint;
+ }
+
+ static const int kDefaultBatchSize;
+
+ int getSkip() const {
+ return _skip;
+ }
+ boost::optional<int> getLimit() const {
+ return _limit;
+ }
+ boost::optional<int> getBatchSize() const {
+ return _batchSize;
+ }
+ bool wantMore() const {
+ return _wantMore;
+ }
+
+ bool isFromFindCommand() const {
+ return _fromCommand;
+ }
+ bool isExplain() const {
+ return _explain;
+ }
+
+ const std::string& getComment() const {
+ return _comment;
+ }
+
+ int getMaxScan() const {
+ return _maxScan;
+ }
+ int getMaxTimeMS() const {
+ return _maxTimeMS;
+ }
+
+ const BSONObj& getMin() const {
+ return _min;
+ }
+ const BSONObj& getMax() const {
+ return _max;
+ }
+
+ bool returnKey() const {
+ return _returnKey;
+ }
+ bool showRecordId() const {
+ return _showRecordId;
+ }
+ bool isSnapshot() const {
+ return _snapshot;
+ }
+ bool hasReadPref() const {
+ return _hasReadPref;
+ }
+
+ bool isTailable() const {
+ return _tailable;
+ }
+ bool isSlaveOk() const {
+ return _slaveOk;
+ }
+ bool isOplogReplay() const {
+ return _oplogReplay;
+ }
+ bool isNoCursorTimeout() const {
+ return _noCursorTimeout;
+ }
+ bool isAwaitData() const {
+ return _awaitData;
+ }
+ bool isExhaust() const {
+ return _exhaust;
+ }
+ bool isPartial() const {
+ return _partial;
+ }
+
+ /**
+ * Return options as a bit vector.
+ */
+ int getOptions() const;
+
+ //
+ // Old parsing code: SOON TO BE DEPRECATED.
+ //
+
+ /**
+ * Parse the provided QueryMessage and return a heap constructed LiteParsedQuery, which
+ * represents it or an error.
+ */
+ static StatusWith<std::unique_ptr<LiteParsedQuery>> fromLegacyQueryMessage(
+ const QueryMessage& qm);
+
+private:
+ LiteParsedQuery() = default;
+
+ /**
+ * Parsing code calls this after construction of the LPQ is complete. There are additional
+ * semantic properties that must be checked even if "lexically" the parse is OK.
+ */
+ Status validate() const;
+
+ Status init(const std::string& ns,
+ int ntoskip,
+ int ntoreturn,
+ int queryOptions,
+ const BSONObj& queryObj,
+ const BSONObj& proj,
+ bool fromQueryMessage);
+
+ Status initFullQuery(const BSONObj& top);
+
+ static StatusWith<int> parseMaxTimeMS(const BSONElement& maxTimeMSElt);
+
+ /**
+ * Updates the projection object with a $meta projection for the returnKey option.
+ */
+ void addReturnKeyMetaProj();
+
+ /**
+ * Updates the projection object with a $meta projection for the showRecordId option.
+ */
+ void addShowRecordIdMetaProj();
+
+ /**
+ * Initializes options based on the value of the 'options' bit vector.
+ *
+ * This contains flags such as tailable, exhaust, and noCursorTimeout.
+ */
+ void initFromInt(int options);
+
+ /**
+ * Add the meta projection to this object if needed.
+ */
+ void addMetaProjection();
+
+ /**
+ * Returns OK if this is valid in the find command context.
+ */
+ Status validateFindCmd();
+
+ std::string _ns;
+
+ BSONObj _filter;
+ BSONObj _proj;
+ BSONObj _sort;
+ // The hint provided, if any. If the hint was by index key pattern, the value of '_hint' is
+ // the key pattern hinted. If the hint was by index name, the value of '_hint' is
+ // {$hint: <String>}, where <String> is the index name hinted.
+ BSONObj _hint;
+
+ int _skip = 0;
+ bool _wantMore = true;
+
+ boost::optional<int> _limit;
+ boost::optional<int> _batchSize;
+
+ bool _fromCommand = false;
+ bool _explain = false;
+
+ std::string _comment;
+
+ int _maxScan = 0;
+ int _maxTimeMS = 0;
+
+ BSONObj _min;
+ BSONObj _max;
+
+ bool _returnKey = false;
+ bool _showRecordId = false;
+ bool _snapshot = false;
+ bool _hasReadPref = false;
+
+ // Options that can be specified in the OP_QUERY 'flags' header.
+ bool _tailable = false;
+ bool _slaveOk = false;
+ bool _oplogReplay = false;
+ bool _noCursorTimeout = false;
+ bool _awaitData = false;
+ bool _exhaust = false;
+ bool _partial = false;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/query/lite_parsed_query_test.cpp b/src/mongo/db/query/lite_parsed_query_test.cpp
index eeae7786d6a..e395ca2c87a 100644
--- a/src/mongo/db/query/lite_parsed_query_test.cpp
+++ b/src/mongo/db/query/lite_parsed_query_test.cpp
@@ -39,956 +39,986 @@
namespace mongo {
namespace {
- using std::unique_ptr;
- using unittest::assertGet;
-
- TEST(LiteParsedQueryTest, InitSortOrder) {
- ASSERT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+using std::unique_ptr;
+using unittest::assertGet;
+
+TEST(LiteParsedQueryTest, InitSortOrder) {
+ ASSERT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 1,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ fromjson("{a: 1}"),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+}
+
+TEST(LiteParsedQueryTest, InitSortOrderString) {
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
0,
1,
0,
BSONObj(),
BSONObj(),
+ fromjson("{a: \"\"}"),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+}
+
+TEST(LiteParsedQueryTest, GetFilter) {
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery("testns",
+ 5,
+ 6,
+ 9,
+ BSON("x" << 5),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ ASSERT_EQUALS(BSON("x" << 5), lpq->getFilter());
+}
+
+TEST(LiteParsedQueryTest, NumToReturn) {
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery("testns",
+ 5,
+ 6,
+ 9,
+ BSON("x" << 5),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ ASSERT_EQUALS(6, lpq->getBatchSize());
+ ASSERT(lpq->wantMore());
+}
+
+TEST(LiteParsedQueryTest, NumToReturnNegative) {
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery("testns",
+ 5,
+ -6,
+ 9,
+ BSON("x" << 5),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+
+ ASSERT_EQUALS(6, lpq->getBatchSize());
+ ASSERT(!lpq->wantMore());
+}
+
+TEST(LiteParsedQueryTest, MinFieldsNotPrefixOfMax) {
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
fromjson("{a: 1}"),
+ fromjson("{b: 1}"),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+}
+
+TEST(LiteParsedQueryTest, MinFieldsMoreThanMax) {
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ BSONObj(),
BSONObj(),
BSONObj(),
BSONObj(),
+ fromjson("{a: 1, b: 1}"),
+ fromjson("{a: 1}"),
false, // snapshot
false) // explain
- .getStatus());
- }
-
- TEST(LiteParsedQueryTest, InitSortOrderString) {
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 1,
- 0,
- BSONObj(),
- BSONObj(),
- fromjson("{a: \"\"}"),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false) // explain
- .getStatus());
- }
-
- TEST(LiteParsedQueryTest, GetFilter) {
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery("testns",
- 5,
- 6,
- 9,
- BSON("x" << 5),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- ASSERT_EQUALS(BSON("x" << 5 ), lpq->getFilter());
- }
-
- TEST(LiteParsedQueryTest, NumToReturn) {
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery("testns",
- 5,
- 6,
- 9,
- BSON("x" << 5),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- ASSERT_EQUALS(6, lpq->getBatchSize());
- ASSERT(lpq->wantMore());
- }
-
- TEST(LiteParsedQueryTest, NumToReturnNegative) {
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery("testns",
- 5,
- -6,
- 9,
- BSON("x" << 5),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
-
- ASSERT_EQUALS(6, lpq->getBatchSize());
- ASSERT(!lpq->wantMore());
- }
-
- TEST(LiteParsedQueryTest, MinFieldsNotPrefixOfMax) {
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- fromjson("{a: 1}"),
- fromjson("{b: 1}"),
- false, // snapshot
- false) // explain
- .getStatus());
- }
-
- TEST(LiteParsedQueryTest, MinFieldsMoreThanMax) {
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- fromjson("{a: 1, b: 1}"),
- fromjson("{a: 1}"),
- false, // snapshot
- false) // explain
- .getStatus());
- }
-
- TEST(LiteParsedQueryTest, MinFieldsLessThanMax) {
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- fromjson("{a: 1}"),
- fromjson("{a: 1, b: 1}"),
- false, // snapshot
- false) // explain
- .getStatus());
- }
-
- // Helper function which returns the Status of creating a LiteParsedQuery object with the given
- // parameters.
- void assertLiteParsedQuerySuccess(const BSONObj& query,
- const BSONObj& proj,
- const BSONObj& sort) {
-
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- query,
- proj,
- sort,
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
- }
-
- //
- // Test compatibility of various projection and sort objects.
- //
-
- TEST(LiteParsedQueryTest, ValidSortProj) {
- assertLiteParsedQuerySuccess(BSONObj(),
- fromjson("{a: 1}"),
- fromjson("{a: 1}"));
-
- assertLiteParsedQuerySuccess(BSONObj(),
- fromjson("{a: {$meta: \"textScore\"}}"),
- fromjson("{a: {$meta: \"textScore\"}}"));
- }
-
- TEST(LiteParsedQueryTest, ForbidNonMetaSortOnFieldWithMetaProject) {
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- BSONObj(),
- fromjson("{a: {$meta: \"textScore\"}}"),
- fromjson("{a: 1}"),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false) // explain
- .getStatus());
-
- assertLiteParsedQuerySuccess(BSONObj(),
- fromjson("{a: {$meta: \"textScore\"}}"),
- fromjson("{b: 1}"));
- }
-
- TEST(LiteParsedQueryTest, ForbidMetaSortOnFieldWithoutMetaProject) {
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- BSONObj(),
- fromjson("{a: 1}"),
- fromjson("{a: {$meta: \"textScore\"}}"),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false) // explain
- .getStatus());
-
- ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
- 0,
- 0,
- 0,
- BSONObj(),
- fromjson("{b: 1}"),
- fromjson("{a: {$meta: \"textScore\"}}"),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false) // explain
- .getStatus());
- }
-
- TEST(LiteParsedQueryTest, MakeFindCmd) {
- auto result = LiteParsedQuery::makeAsFindCmd(NamespaceString("test.ns"),
- BSON("x" << 1),
- 2);
- ASSERT_OK(result.getStatus());
-
- auto&& lpq = result.getValue();
- ASSERT_EQUALS("test.ns", lpq->ns());
- ASSERT_EQUALS(BSON("x" << 1), lpq->getFilter());
- ASSERT_EQUALS(2, lpq->getLimit());
-
- ASSERT_EQUALS(BSONObj(), lpq->getProj());
- ASSERT_EQUALS(BSONObj(), lpq->getSort());
- ASSERT_EQUALS(BSONObj(), lpq->getHint());
- ASSERT_EQUALS(BSONObj(), lpq->getMin());
- ASSERT_EQUALS(BSONObj(), lpq->getMax());
-
- ASSERT_EQUALS(0, lpq->getSkip());
- ASSERT_EQUALS(0, lpq->getMaxScan());
- ASSERT_EQUALS(0, lpq->getMaxTimeMS());
- ASSERT_EQUALS(0, lpq->getOptions());
-
- ASSERT_FALSE(lpq->getBatchSize());
-
- ASSERT_TRUE(lpq->isFromFindCommand());
- ASSERT_FALSE(lpq->isExplain());
- ASSERT_FALSE(lpq->returnKey());
- ASSERT_FALSE(lpq->showRecordId());
- ASSERT_FALSE(lpq->isSnapshot());
- ASSERT_FALSE(lpq->hasReadPref());
- ASSERT_FALSE(lpq->isTailable());
- ASSERT_FALSE(lpq->isSlaveOk());
- ASSERT_FALSE(lpq->isOplogReplay());
- ASSERT_FALSE(lpq->isNoCursorTimeout());
- ASSERT_FALSE(lpq->isAwaitData());
- ASSERT_FALSE(lpq->isExhaust());
- ASSERT_FALSE(lpq->isPartial());
- }
-
- TEST(LiteParsedQueryTest, MakeFindCmdNoLimit) {
- auto result = LiteParsedQuery::makeAsFindCmd(NamespaceString("test.ns"),
- BSON("x" << 1),
- boost::none);
- ASSERT_OK(result.getStatus());
-
- auto&& lpq = result.getValue();
- ASSERT_EQUALS("test.ns", lpq->ns());
- ASSERT_EQUALS(BSON("x" << 1), lpq->getFilter());
-
- ASSERT_EQUALS(BSONObj(), lpq->getProj());
- ASSERT_EQUALS(BSONObj(), lpq->getSort());
- ASSERT_EQUALS(BSONObj(), lpq->getHint());
- ASSERT_EQUALS(BSONObj(), lpq->getMin());
- ASSERT_EQUALS(BSONObj(), lpq->getMax());
-
- ASSERT_EQUALS(0, lpq->getSkip());
- ASSERT_EQUALS(0, lpq->getMaxScan());
- ASSERT_EQUALS(0, lpq->getMaxTimeMS());
- ASSERT_EQUALS(0, lpq->getOptions());
-
- ASSERT_FALSE(lpq->getBatchSize());
- ASSERT_FALSE(lpq->getLimit());
-
- ASSERT_TRUE(lpq->isFromFindCommand());
- ASSERT_FALSE(lpq->isExplain());
- ASSERT_FALSE(lpq->returnKey());
- ASSERT_FALSE(lpq->showRecordId());
- ASSERT_FALSE(lpq->isSnapshot());
- ASSERT_FALSE(lpq->hasReadPref());
- ASSERT_FALSE(lpq->isTailable());
- ASSERT_FALSE(lpq->isSlaveOk());
- ASSERT_FALSE(lpq->isOplogReplay());
- ASSERT_FALSE(lpq->isNoCursorTimeout());
- ASSERT_FALSE(lpq->isAwaitData());
- ASSERT_FALSE(lpq->isExhaust());
- ASSERT_FALSE(lpq->isPartial());
- }
-
- TEST(LiteParsedQueryTest, MakeFindCmdBadLimit) {
- auto status = LiteParsedQuery::makeAsFindCmd(NamespaceString("test.ns"),
- BSON("x" << 1),
- 0).getStatus();
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
- }
-
- //
- // Text meta BSON element validation
- //
-
- bool isFirstElementTextScoreMeta(const char* sortStr) {
- BSONObj sortObj = fromjson(sortStr);
- BSONElement elt = sortObj.firstElement();
- bool result = LiteParsedQuery::isTextScoreMeta(elt);
- return result;
- }
-
- // Check validation of $meta expressions
- TEST(LiteParsedQueryTest, IsTextScoreMeta) {
- // Valid textScore meta sort
- ASSERT(isFirstElementTextScoreMeta("{a: {$meta: \"textScore\"}}"));
-
- // Invalid textScore meta sorts
- ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: 1}}"));
- ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: \"image\"}}"));
- ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$world: \"textScore\"}}"));
- ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: \"textScore\", b: 1}}"));
- }
-
- //
- // Sort order validation
- // In a valid sort order, each element satisfies one of:
- // 1. a number with value 1
- // 2. a number with value -1
- // 3. isTextScoreMeta
- //
-
- TEST(LiteParsedQueryTest, ValidateSortOrder) {
- // Valid sorts
- ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{}")));
- ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: 1}")));
- ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: -1}")));
- ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"textScore\"}}")));
-
- // Invalid sorts
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: 100}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: 0}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: -100}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: Infinity}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: -Infinity}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: true}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: false}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: null}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {b: 1}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: []}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: [1, 2, 3]}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: \"\"}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: \"bb\"}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: 1}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"image\"}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$world: \"textScore\"}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"textScore\","
- " b: 1}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': 1}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': -1}")));
- }
-
- //
- // Tests for parsing a lite parsed query from a command BSON object.
- //
-
- TEST(LiteParsedQueryTest, ParseFromCommandBasic) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 3},"
- "sort: {a: 1},"
- "projection: {_id: 0, a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandWithOptions) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 3},"
- "sort: {a: 1},"
- "projection: {_id: 0, a: 1},"
- "showRecordId: true,"
- "maxScan: 1000}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- // Make sure the values from the command BSON are reflected in the LPQ.
- ASSERT(lpq->showRecordId());
- ASSERT_EQUALS(1000, lpq->getMaxScan());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandHintAsString) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "hint: 'foo_1'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- BSONObj hintObj = lpq->getHint();
- ASSERT_EQUALS(BSON("$hint" << "foo_1"), hintObj);
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandValidSortProj) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "projection: {a: 1},"
- "sort: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandValidSortProjMeta) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "projection: {a: {$meta: 'textScore'}},"
- "sort: {a: {$meta: 'textScore'}}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandAllFlagsTrue) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "tailable: true,"
- "slaveOk: true,"
- "oplogReplay: true,"
- "noCursorTimeout: true,"
- "awaitData: true,"
- "partial: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- // Test that all the flags got set to true.
- ASSERT(lpq->isTailable());
- ASSERT(lpq->isSlaveOk());
- ASSERT(lpq->isOplogReplay());
- ASSERT(lpq->isNoCursorTimeout());
- ASSERT(lpq->isAwaitData());
- ASSERT(lpq->isPartial());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandCommentWithValidMinMax) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "comment: 'the comment',"
- "min: {a: 1},"
- "max: {a: 2}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT_EQUALS("the comment", lpq->getComment());
- BSONObj expectedMin = BSON("a" << 1);
- ASSERT_EQUALS(0, expectedMin.woCompare(lpq->getMin()));
- BSONObj expectedMax = BSON("a" << 2);
- ASSERT_EQUALS(0, expectedMax.woCompare(lpq->getMax()));
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandAllNonOptionFields) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "sort: {b: 1},"
- "projection: {c: 1},"
- "hint: {d: 1},"
- "limit: 3,"
- "skip: 5,"
- "batchSize: 90,"
- "singleBatch: false}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- // Check the values inside the LPQ.
- BSONObj expectedQuery = BSON("a" << 1);
- ASSERT_EQUALS(0, expectedQuery.woCompare(lpq->getFilter()));
- BSONObj expectedSort = BSON("b" << 1);
- ASSERT_EQUALS(0, expectedSort.woCompare(lpq->getSort()));
- BSONObj expectedProj = BSON("c" << 1);
- ASSERT_EQUALS(0, expectedProj.woCompare(lpq->getProj()));
- BSONObj expectedHint = BSON("d" << 1);
- ASSERT_EQUALS(0, expectedHint.woCompare(lpq->getHint()));
- ASSERT_EQUALS(3, lpq->getLimit());
- ASSERT_EQUALS(5, lpq->getSkip());
- ASSERT_EQUALS(90, lpq->getBatchSize());
- ASSERT(lpq->wantMore());
- }
-
- //
- // Parsing errors where a field has the wrong type.
- //
-
- TEST(LiteParsedQueryTest, ParseFromCommandQueryWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSortWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "sort: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandProjWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "projection: 'foo'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSkipWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "skip: '5',"
- "projection: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandLimitWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "limit: '5',"
- "projection: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSingleBatchWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "singleBatch: 'false',"
- "projection: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandCommentWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "comment: 1}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandMaxScanWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "maxScan: true,"
- "comment: 'foo'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandMaxTimeMSWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "maxTimeMS: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandMaxWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "max: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandMinWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "min: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandReturnKeyWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "returnKey: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
-
- TEST(LiteParsedQueryTest, ParseFromCommandShowRecordIdWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "showRecordId: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSnapshotWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "snapshot: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandTailableWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "tailable: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSlaveOkWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "slaveOk: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandOplogReplayWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "oplogReplay: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandNoCursorTimeoutWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "noCursorTimeout: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandAwaitDataWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "tailable: true,"
- "awaitData: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandExhaustWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "exhaust: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandPartialWrongType) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "filter: {a: 1},"
- "exhaust: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- //
- // Parsing errors where a field has the right type but a bad value.
- //
-
- TEST(LiteParsedQueryTest, ParseFromCommandNegativeSkipError) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "skip: -3,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandNegativeLimitError) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "limit: -3,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandNegativeBatchSizeError) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "batchSize: -10,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandBatchSizeZero) {
- BSONObj cmdObj = fromjson("{find: 'testns', batchSize: 0}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT(lpq->getBatchSize());
- ASSERT_EQ(0, lpq->getBatchSize());
-
- ASSERT(!lpq->getLimit());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandDefaultBatchSize) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT(!lpq->getBatchSize());
- ASSERT(!lpq->getLimit());
- }
-
- //
- // Errors checked in LiteParsedQuery::validate().
- //
-
- TEST(LiteParsedQueryTest, ParseFromCommandMinMaxDifferentFieldsError) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "min: {a: 3},"
- "max: {b: 4}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSnapshotPlusSortError) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "sort: {a: 3},"
- "snapshot: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandSnapshotPlusHintError) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "snapshot: true,"
- "hint: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandForbidNonMetaSortOnFieldWithMetaProject) {
- BSONObj cmdObj;
-
- cmdObj = fromjson("{find: 'testns',"
- "projection: {a: {$meta: 'textScore'}},"
- "sort: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-
- cmdObj = fromjson("{find: 'testns',"
- "projection: {a: {$meta: 'textScore'}},"
- "sort: {b: 1}}");
- ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandForbidMetaSortOnFieldWithoutMetaProject) {
- BSONObj cmdObj;
-
- cmdObj = fromjson("{find: 'testns',"
- "projection: {a: 1},"
- "sort: {a: {$meta: 'textScore'}}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-
- cmdObj = fromjson("{find: 'testns',"
- "projection: {b: 1},"
- "sort: {a: {$meta: 'textScore'}}}");
- result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandForbidExhaust) {
- BSONObj cmdObj = fromjson("{find: 'testns', exhaust: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandIsFromFindCommand) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT(lpq->isFromFindCommand());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandNotFromFindCommand) {
- std::unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeAsOpQuery("testns",
- 5,
- 6,
- 9,
- BSON( "x" << 5 ),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- BSONObj(),
- false, // snapshot
- false))); // explain
- ASSERT(!lpq->isFromFindCommand());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandAwaitDataButNotTailable) {
- const NamespaceString nss("test.testns");
- BSONObj cmdObj = fromjson("{find: 'testns', awaitData: true}");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseCommandFirstFieldNotString) {
- BSONObj cmdObj = fromjson("{find: 1}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, DefaultQueryParametersCorrect) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
-
- const NamespaceString nss("test.testns");
- std::unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, false)));
-
- ASSERT_EQUALS(0, lpq->getSkip());
- ASSERT_EQUALS(true, lpq->wantMore());
- ASSERT_EQUALS(true, lpq->isFromFindCommand());
- ASSERT_EQUALS(false, lpq->isExplain());
- ASSERT_EQUALS(0, lpq->getMaxScan());
- ASSERT_EQUALS(0, lpq->getMaxTimeMS());
- ASSERT_EQUALS(false, lpq->returnKey());
- ASSERT_EQUALS(false, lpq->showRecordId());
- ASSERT_EQUALS(false, lpq->isSnapshot());
- ASSERT_EQUALS(false, lpq->hasReadPref());
- ASSERT_EQUALS(false, lpq->isTailable());
- ASSERT_EQUALS(false, lpq->isSlaveOk());
- ASSERT_EQUALS(false, lpq->isOplogReplay());
- ASSERT_EQUALS(false, lpq->isNoCursorTimeout());
- ASSERT_EQUALS(false, lpq->isAwaitData());
- ASSERT_EQUALS(false, lpq->isExhaust());
- ASSERT_EQUALS(false, lpq->isPartial());
- }
-
- //
- // Extra fields cause the parse to fail.
- //
-
- TEST(LiteParsedQueryTest, ParseFromCommandForbidExtraField) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "snapshot: true,"
- "foo: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
- TEST(LiteParsedQueryTest, ParseFromCommandForbidExtraOption) {
- BSONObj cmdObj = fromjson("{find: 'testns',"
- "snapshot: true,"
- "foo: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
- }
-
-} // namespace mongo
-} // namespace
+ .getStatus());
+}
+
+TEST(LiteParsedQueryTest, MinFieldsLessThanMax) {
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ fromjson("{a: 1}"),
+ fromjson("{a: 1, b: 1}"),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+}
+
+// Helper function which returns the Status of creating a LiteParsedQuery object with the given
+// parameters.
+void assertLiteParsedQuerySuccess(const BSONObj& query, const BSONObj& proj, const BSONObj& sort) {
+ unique_ptr<LiteParsedQuery> lpq(assertGet(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ query,
+ proj,
+ sort,
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+}
+
+//
+// Test compatibility of various projection and sort objects.
+//
+
+TEST(LiteParsedQueryTest, ValidSortProj) {
+ assertLiteParsedQuerySuccess(BSONObj(), fromjson("{a: 1}"), fromjson("{a: 1}"));
+
+ assertLiteParsedQuerySuccess(BSONObj(),
+ fromjson("{a: {$meta: \"textScore\"}}"),
+ fromjson("{a: {$meta: \"textScore\"}}"));
+}
+
+TEST(LiteParsedQueryTest, ForbidNonMetaSortOnFieldWithMetaProject) {
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: {$meta: \"textScore\"}}"),
+ fromjson("{a: 1}"),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+
+ assertLiteParsedQuerySuccess(
+ BSONObj(), fromjson("{a: {$meta: \"textScore\"}}"), fromjson("{b: 1}"));
+}
+
+TEST(LiteParsedQueryTest, ForbidMetaSortOnFieldWithoutMetaProject) {
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 1}"),
+ fromjson("{a: {$meta: \"textScore\"}}"),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+
+ ASSERT_NOT_OK(LiteParsedQuery::makeAsOpQuery("testns",
+ 0,
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{b: 1}"),
+ fromjson("{a: {$meta: \"textScore\"}}"),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false) // explain
+ .getStatus());
+}
+
+TEST(LiteParsedQueryTest, MakeFindCmd) {
+ auto result = LiteParsedQuery::makeAsFindCmd(NamespaceString("test.ns"), BSON("x" << 1), 2);
+ ASSERT_OK(result.getStatus());
+
+ auto&& lpq = result.getValue();
+ ASSERT_EQUALS("test.ns", lpq->ns());
+ ASSERT_EQUALS(BSON("x" << 1), lpq->getFilter());
+ ASSERT_EQUALS(2, lpq->getLimit());
+
+ ASSERT_EQUALS(BSONObj(), lpq->getProj());
+ ASSERT_EQUALS(BSONObj(), lpq->getSort());
+ ASSERT_EQUALS(BSONObj(), lpq->getHint());
+ ASSERT_EQUALS(BSONObj(), lpq->getMin());
+ ASSERT_EQUALS(BSONObj(), lpq->getMax());
+
+ ASSERT_EQUALS(0, lpq->getSkip());
+ ASSERT_EQUALS(0, lpq->getMaxScan());
+ ASSERT_EQUALS(0, lpq->getMaxTimeMS());
+ ASSERT_EQUALS(0, lpq->getOptions());
+
+ ASSERT_FALSE(lpq->getBatchSize());
+
+ ASSERT_TRUE(lpq->isFromFindCommand());
+ ASSERT_FALSE(lpq->isExplain());
+ ASSERT_FALSE(lpq->returnKey());
+ ASSERT_FALSE(lpq->showRecordId());
+ ASSERT_FALSE(lpq->isSnapshot());
+ ASSERT_FALSE(lpq->hasReadPref());
+ ASSERT_FALSE(lpq->isTailable());
+ ASSERT_FALSE(lpq->isSlaveOk());
+ ASSERT_FALSE(lpq->isOplogReplay());
+ ASSERT_FALSE(lpq->isNoCursorTimeout());
+ ASSERT_FALSE(lpq->isAwaitData());
+ ASSERT_FALSE(lpq->isExhaust());
+ ASSERT_FALSE(lpq->isPartial());
+}
+
+TEST(LiteParsedQueryTest, MakeFindCmdNoLimit) {
+ auto result =
+ LiteParsedQuery::makeAsFindCmd(NamespaceString("test.ns"), BSON("x" << 1), boost::none);
+ ASSERT_OK(result.getStatus());
+
+ auto&& lpq = result.getValue();
+ ASSERT_EQUALS("test.ns", lpq->ns());
+ ASSERT_EQUALS(BSON("x" << 1), lpq->getFilter());
+
+ ASSERT_EQUALS(BSONObj(), lpq->getProj());
+ ASSERT_EQUALS(BSONObj(), lpq->getSort());
+ ASSERT_EQUALS(BSONObj(), lpq->getHint());
+ ASSERT_EQUALS(BSONObj(), lpq->getMin());
+ ASSERT_EQUALS(BSONObj(), lpq->getMax());
+
+ ASSERT_EQUALS(0, lpq->getSkip());
+ ASSERT_EQUALS(0, lpq->getMaxScan());
+ ASSERT_EQUALS(0, lpq->getMaxTimeMS());
+ ASSERT_EQUALS(0, lpq->getOptions());
+
+ ASSERT_FALSE(lpq->getBatchSize());
+ ASSERT_FALSE(lpq->getLimit());
+
+ ASSERT_TRUE(lpq->isFromFindCommand());
+ ASSERT_FALSE(lpq->isExplain());
+ ASSERT_FALSE(lpq->returnKey());
+ ASSERT_FALSE(lpq->showRecordId());
+ ASSERT_FALSE(lpq->isSnapshot());
+ ASSERT_FALSE(lpq->hasReadPref());
+ ASSERT_FALSE(lpq->isTailable());
+ ASSERT_FALSE(lpq->isSlaveOk());
+ ASSERT_FALSE(lpq->isOplogReplay());
+ ASSERT_FALSE(lpq->isNoCursorTimeout());
+ ASSERT_FALSE(lpq->isAwaitData());
+ ASSERT_FALSE(lpq->isExhaust());
+ ASSERT_FALSE(lpq->isPartial());
+}
+
+TEST(LiteParsedQueryTest, MakeFindCmdBadLimit) {
+ auto status =
+ LiteParsedQuery::makeAsFindCmd(NamespaceString("test.ns"), BSON("x" << 1), 0).getStatus();
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
+}
+
+//
+// Text meta BSON element validation
+//
+
+bool isFirstElementTextScoreMeta(const char* sortStr) {
+ BSONObj sortObj = fromjson(sortStr);
+ BSONElement elt = sortObj.firstElement();
+ bool result = LiteParsedQuery::isTextScoreMeta(elt);
+ return result;
+}
+
+// Check validation of $meta expressions
+TEST(LiteParsedQueryTest, IsTextScoreMeta) {
+ // Valid textScore meta sort
+ ASSERT(isFirstElementTextScoreMeta("{a: {$meta: \"textScore\"}}"));
+
+ // Invalid textScore meta sorts
+ ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: 1}}"));
+ ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: \"image\"}}"));
+ ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$world: \"textScore\"}}"));
+ ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: \"textScore\", b: 1}}"));
+}
+
+//
+// Sort order validation
+// In a valid sort order, each element satisfies one of:
+// 1. a number with value 1
+// 2. a number with value -1
+// 3. isTextScoreMeta
+//
+
+TEST(LiteParsedQueryTest, ValidateSortOrder) {
+ // Valid sorts
+ ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{}")));
+ ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: 1}")));
+ ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: -1}")));
+ ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"textScore\"}}")));
+
+ // Invalid sorts
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: 100}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: 0}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: -100}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: Infinity}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: -Infinity}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: true}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: false}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: null}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {}}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {b: 1}}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: []}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: [1, 2, 3]}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: \"\"}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: \"bb\"}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: 1}}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"image\"}}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$world: \"textScore\"}}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson(
+ "{a: {$meta: \"textScore\","
+ " b: 1}}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': 1}")));
+ ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': -1}")));
+}
+
+//
+// Tests for parsing a lite parsed query from a command BSON object.
+//
+
+TEST(LiteParsedQueryTest, ParseFromCommandBasic) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 3},"
+ "sort: {a: 1},"
+ "projection: {_id: 0, a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandWithOptions) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 3},"
+ "sort: {a: 1},"
+ "projection: {_id: 0, a: 1},"
+ "showRecordId: true,"
+ "maxScan: 1000}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ // Make sure the values from the command BSON are reflected in the LPQ.
+ ASSERT(lpq->showRecordId());
+ ASSERT_EQUALS(1000, lpq->getMaxScan());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandHintAsString) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "hint: 'foo_1'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ BSONObj hintObj = lpq->getHint();
+ ASSERT_EQUALS(BSON("$hint"
+ << "foo_1"),
+ hintObj);
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandValidSortProj) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: 1},"
+ "sort: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandValidSortProjMeta) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: {$meta: 'textScore'}},"
+ "sort: {a: {$meta: 'textScore'}}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandAllFlagsTrue) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "tailable: true,"
+ "slaveOk: true,"
+ "oplogReplay: true,"
+ "noCursorTimeout: true,"
+ "awaitData: true,"
+ "partial: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ // Test that all the flags got set to true.
+ ASSERT(lpq->isTailable());
+ ASSERT(lpq->isSlaveOk());
+ ASSERT(lpq->isOplogReplay());
+ ASSERT(lpq->isNoCursorTimeout());
+ ASSERT(lpq->isAwaitData());
+ ASSERT(lpq->isPartial());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandCommentWithValidMinMax) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "comment: 'the comment',"
+ "min: {a: 1},"
+ "max: {a: 2}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT_EQUALS("the comment", lpq->getComment());
+ BSONObj expectedMin = BSON("a" << 1);
+ ASSERT_EQUALS(0, expectedMin.woCompare(lpq->getMin()));
+ BSONObj expectedMax = BSON("a" << 2);
+ ASSERT_EQUALS(0, expectedMax.woCompare(lpq->getMax()));
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandAllNonOptionFields) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "sort: {b: 1},"
+ "projection: {c: 1},"
+ "hint: {d: 1},"
+ "limit: 3,"
+ "skip: 5,"
+ "batchSize: 90,"
+ "singleBatch: false}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ // Check the values inside the LPQ.
+ BSONObj expectedQuery = BSON("a" << 1);
+ ASSERT_EQUALS(0, expectedQuery.woCompare(lpq->getFilter()));
+ BSONObj expectedSort = BSON("b" << 1);
+ ASSERT_EQUALS(0, expectedSort.woCompare(lpq->getSort()));
+ BSONObj expectedProj = BSON("c" << 1);
+ ASSERT_EQUALS(0, expectedProj.woCompare(lpq->getProj()));
+ BSONObj expectedHint = BSON("d" << 1);
+ ASSERT_EQUALS(0, expectedHint.woCompare(lpq->getHint()));
+ ASSERT_EQUALS(3, lpq->getLimit());
+ ASSERT_EQUALS(5, lpq->getSkip());
+ ASSERT_EQUALS(90, lpq->getBatchSize());
+ ASSERT(lpq->wantMore());
+}
+
+//
+// Parsing errors where a field has the wrong type.
+//
+
+TEST(LiteParsedQueryTest, ParseFromCommandQueryWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSortWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "sort: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandProjWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "projection: 'foo'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSkipWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "skip: '5',"
+ "projection: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandLimitWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "limit: '5',"
+ "projection: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSingleBatchWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "singleBatch: 'false',"
+ "projection: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandCommentWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "comment: 1}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandMaxScanWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "maxScan: true,"
+ "comment: 'foo'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandMaxTimeMSWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "maxTimeMS: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandMaxWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "max: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandMinWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "min: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandReturnKeyWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "returnKey: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+
+TEST(LiteParsedQueryTest, ParseFromCommandShowRecordIdWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "showRecordId: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSnapshotWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "snapshot: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandTailableWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "tailable: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSlaveOkWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "slaveOk: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandOplogReplayWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "oplogReplay: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandNoCursorTimeoutWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "noCursorTimeout: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandAwaitDataWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "tailable: true,"
+ "awaitData: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandExhaustWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "exhaust: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandPartialWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "exhaust: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+//
+// Parsing errors where a field has the right type but a bad value.
+//
+
+TEST(LiteParsedQueryTest, ParseFromCommandNegativeSkipError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "skip: -3,"
+ "filter: {a: 3}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandNegativeLimitError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "limit: -3,"
+ "filter: {a: 3}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandNegativeBatchSizeError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "batchSize: -10,"
+ "filter: {a: 3}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandBatchSizeZero) {
+ BSONObj cmdObj = fromjson("{find: 'testns', batchSize: 0}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT(lpq->getBatchSize());
+ ASSERT_EQ(0, lpq->getBatchSize());
+
+ ASSERT(!lpq->getLimit());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandDefaultBatchSize) {
+ BSONObj cmdObj = fromjson("{find: 'testns'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT(!lpq->getBatchSize());
+ ASSERT(!lpq->getLimit());
+}
+
+//
+// Errors checked in LiteParsedQuery::validate().
+//
+
+TEST(LiteParsedQueryTest, ParseFromCommandMinMaxDifferentFieldsError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "min: {a: 3},"
+ "max: {b: 4}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSnapshotPlusSortError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "sort: {a: 3},"
+ "snapshot: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandSnapshotPlusHintError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "snapshot: true,"
+ "hint: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandForbidNonMetaSortOnFieldWithMetaProject) {
+ BSONObj cmdObj;
+
+ cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: {$meta: 'textScore'}},"
+ "sort: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+
+ cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: {$meta: 'textScore'}},"
+ "sort: {b: 1}}");
+ ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandForbidMetaSortOnFieldWithoutMetaProject) {
+ BSONObj cmdObj;
+
+ cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: 1},"
+ "sort: {a: {$meta: 'textScore'}}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+
+ cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {b: 1},"
+ "sort: {a: {$meta: 'textScore'}}}");
+ result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandForbidExhaust) {
+ BSONObj cmdObj = fromjson("{find: 'testns', exhaust: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandIsFromFindCommand) {
+ BSONObj cmdObj = fromjson("{find: 'testns'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT(lpq->isFromFindCommand());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandNotFromFindCommand) {
+ std::unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeAsOpQuery("testns",
+ 5,
+ 6,
+ 9,
+ BSON("x" << 5),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ false, // snapshot
+ false))); // explain
+ ASSERT(!lpq->isFromFindCommand());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandAwaitDataButNotTailable) {
+ const NamespaceString nss("test.testns");
+ BSONObj cmdObj = fromjson("{find: 'testns', awaitData: true}");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseCommandFirstFieldNotString) {
+ BSONObj cmdObj = fromjson("{find: 1}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, DefaultQueryParametersCorrect) {
+ BSONObj cmdObj = fromjson("{find: 'testns'}");
+
+ const NamespaceString nss("test.testns");
+ std::unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, false)));
+
+ ASSERT_EQUALS(0, lpq->getSkip());
+ ASSERT_EQUALS(true, lpq->wantMore());
+ ASSERT_EQUALS(true, lpq->isFromFindCommand());
+ ASSERT_EQUALS(false, lpq->isExplain());
+ ASSERT_EQUALS(0, lpq->getMaxScan());
+ ASSERT_EQUALS(0, lpq->getMaxTimeMS());
+ ASSERT_EQUALS(false, lpq->returnKey());
+ ASSERT_EQUALS(false, lpq->showRecordId());
+ ASSERT_EQUALS(false, lpq->isSnapshot());
+ ASSERT_EQUALS(false, lpq->hasReadPref());
+ ASSERT_EQUALS(false, lpq->isTailable());
+ ASSERT_EQUALS(false, lpq->isSlaveOk());
+ ASSERT_EQUALS(false, lpq->isOplogReplay());
+ ASSERT_EQUALS(false, lpq->isNoCursorTimeout());
+ ASSERT_EQUALS(false, lpq->isAwaitData());
+ ASSERT_EQUALS(false, lpq->isExhaust());
+ ASSERT_EQUALS(false, lpq->isPartial());
+}
+
+//
+// Extra fields cause the parse to fail.
+//
+
+TEST(LiteParsedQueryTest, ParseFromCommandForbidExtraField) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "snapshot: true,"
+ "foo: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(LiteParsedQueryTest, ParseFromCommandForbidExtraOption) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "snapshot: true,"
+ "foo: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+} // namespace mongo
+} // namespace
diff --git a/src/mongo/db/query/lru_key_value.h b/src/mongo/db/query/lru_key_value.h
index 8215c029375..7553ced9020 100644
--- a/src/mongo/db/query/lru_key_value.h
+++ b/src/mongo/db/query/lru_key_value.h
@@ -37,186 +37,192 @@
namespace mongo {
+/**
+ * A key-value store structure with a least recently used (LRU) replacement
+ * policy. The number of entries allowed in the kv-store is set as a constant
+ * upon construction.
+ *
+ * Caveat:
+ * This kv-store is NOT thread safe! The client to this utility is responsible
+ * for protecting concurrent access to the LRU store if used in a threaded
+ * context.
+ *
+ * Implemented as a doubly-linked list (std::list) with a hash map
+ * (boost::unordered_map) for quickly locating the kv-store entries. The
+ * add(), get(), and remove() operations are all O(1).
+ *
+ * The keys of generic type K map to values of type V*. The V*
+ * pointers are owned by the kv-store.
+ *
+ * TODO: We could move this into the util/ directory and do any cleanup necessary to make it
+ * fully general.
+ */
+template <class K, class V>
+class LRUKeyValue {
+public:
+ LRUKeyValue(size_t maxSize) : _maxSize(maxSize), _currentSize(0){};
+
+ ~LRUKeyValue() {
+ clear();
+ }
+
+ typedef std::pair<K, V*> KVListEntry;
+
+ typedef std::list<KVListEntry> KVList;
+ typedef typename KVList::iterator KVListIt;
+ typedef typename KVList::const_iterator KVListConstIt;
+
+ typedef boost::unordered_map<K, KVListIt> KVMap;
+ typedef typename KVMap::const_iterator KVMapConstIt;
+
/**
- * A key-value store structure with a least recently used (LRU) replacement
- * policy. The number of entries allowed in the kv-store is set as a constant
- * upon construction.
+ * Add an (K, V*) pair to the store, where 'key' can
+ * be used to retrieve value 'entry' from the store.
*
- * Caveat:
- * This kv-store is NOT thread safe! The client to this utility is responsible
- * for protecting concurrent access to the LRU store if used in a threaded
- * context.
+ * Takes ownership of 'entry'.
*
- * Implemented as a doubly-linked list (std::list) with a hash map
- * (boost::unordered_map) for quickly locating the kv-store entries. The
- * add(), get(), and remove() operations are all O(1).
+ * If 'key' already exists in the kv-store, 'entry' will
+ * simply replace what is already there.
*
- * The keys of generic type K map to values of type V*. The V*
- * pointers are owned by the kv-store.
+ * The least recently used entry is evicted if the
+ * kv-store is full prior to the add() operation.
*
- * TODO: We could move this into the util/ directory and do any cleanup necessary to make it
- * fully general.
+ * If an entry is evicted, it will be returned in
+ * an unique_ptr for the caller to use before disposing.
*/
- template<class K, class V>
- class LRUKeyValue {
- public:
- LRUKeyValue(size_t maxSize) : _maxSize(maxSize), _currentSize(0) { };
-
- ~LRUKeyValue() {
- clear();
+ std::unique_ptr<V> add(const K& key, V* entry) {
+ // If the key already exists, delete it first.
+ KVMapConstIt i = _kvMap.find(key);
+ if (i != _kvMap.end()) {
+ KVListIt found = i->second;
+ delete found->second;
+ _kvMap.erase(i);
+ _kvList.erase(found);
+ _currentSize--;
}
- typedef std::pair<K, V*> KVListEntry;
-
- typedef std::list<KVListEntry> KVList;
- typedef typename KVList::iterator KVListIt;
- typedef typename KVList::const_iterator KVListConstIt;
-
- typedef boost::unordered_map<K, KVListIt> KVMap;
- typedef typename KVMap::const_iterator KVMapConstIt;
-
- /**
- * Add an (K, V*) pair to the store, where 'key' can
- * be used to retrieve value 'entry' from the store.
- *
- * Takes ownership of 'entry'.
- *
- * If 'key' already exists in the kv-store, 'entry' will
- * simply replace what is already there.
- *
- * The least recently used entry is evicted if the
- * kv-store is full prior to the add() operation.
- *
- * If an entry is evicted, it will be returned in
- * an unique_ptr for the caller to use before disposing.
- */
- std::unique_ptr<V> add(const K& key, V* entry) {
- // If the key already exists, delete it first.
- KVMapConstIt i = _kvMap.find(key);
- if (i != _kvMap.end()) {
- KVListIt found = i->second;
- delete found->second;
- _kvMap.erase(i);
- _kvList.erase(found);
- _currentSize--;
- }
-
- _kvList.push_front(std::make_pair(key, entry));
- _kvMap[key] = _kvList.begin();
- _currentSize++;
-
- // If the store has grown beyond its allowed size,
- // evict the least recently used entry.
- if (_currentSize > _maxSize) {
- V* evictedEntry = _kvList.back().second;
- invariant(evictedEntry);
-
- _kvMap.erase(_kvList.back().first);
- _kvList.pop_back();
- _currentSize--;
- invariant(_currentSize == _maxSize);
-
- // Pass ownership of evicted entry to caller.
- // If caller chooses to ignore this unique_ptr,
- // the evicted entry will be deleted automatically.
- return std::unique_ptr<V>(evictedEntry);
- }
- return std::unique_ptr<V>();
- }
+ _kvList.push_front(std::make_pair(key, entry));
+ _kvMap[key] = _kvList.begin();
+ _currentSize++;
- /**
- * Retrieve the value associated with 'key' from
- * the kv-store. The value is returned through the
- * out-parameter 'entryOut'.
- *
- * The kv-store retains ownership of 'entryOut', so
- * it should not be deleted by the caller.
- *
- * As a side effect, the retrieved entry is promoted
- * to the most recently used.
- */
- Status get(const K& key, V** entryOut) const {
- KVMapConstIt i = _kvMap.find(key);
- if (i == _kvMap.end()) {
- return Status(ErrorCodes::NoSuchKey, "no such key in LRU key-value store");
- }
- KVListIt found = i->second;
- V* foundEntry = found->second;
+ // If the store has grown beyond its allowed size,
+ // evict the least recently used entry.
+ if (_currentSize > _maxSize) {
+ V* evictedEntry = _kvList.back().second;
+ invariant(evictedEntry);
- // Promote the kv-store entry to the front of the list.
- // It is now the most recently used.
- _kvMap.erase(i);
- _kvList.erase(found);
- _kvList.push_front(std::make_pair(key, foundEntry));
- _kvMap[key] = _kvList.begin();
+ _kvMap.erase(_kvList.back().first);
+ _kvList.pop_back();
+ _currentSize--;
+ invariant(_currentSize == _maxSize);
- *entryOut = foundEntry;
- return Status::OK();
+ // Pass ownership of evicted entry to caller.
+ // If caller chooses to ignore this unique_ptr,
+ // the evicted entry will be deleted automatically.
+ return std::unique_ptr<V>(evictedEntry);
}
+ return std::unique_ptr<V>();
+ }
- /**
- * Remove the kv-store entry keyed by 'key'.
- */
- Status remove(const K& key) {
- KVMapConstIt i = _kvMap.find(key);
- if (i == _kvMap.end()) {
- return Status(ErrorCodes::NoSuchKey, "no such key in LRU key-value store");
- }
- KVListIt found = i->second;
- delete found->second;
- _kvMap.erase(i);
- _kvList.erase(found);
- _currentSize--;
- return Status::OK();
+ /**
+ * Retrieve the value associated with 'key' from
+ * the kv-store. The value is returned through the
+ * out-parameter 'entryOut'.
+ *
+ * The kv-store retains ownership of 'entryOut', so
+ * it should not be deleted by the caller.
+ *
+ * As a side effect, the retrieved entry is promoted
+ * to the most recently used.
+ */
+ Status get(const K& key, V** entryOut) const {
+ KVMapConstIt i = _kvMap.find(key);
+ if (i == _kvMap.end()) {
+ return Status(ErrorCodes::NoSuchKey, "no such key in LRU key-value store");
}
+ KVListIt found = i->second;
+ V* foundEntry = found->second;
+
+ // Promote the kv-store entry to the front of the list.
+ // It is now the most recently used.
+ _kvMap.erase(i);
+ _kvList.erase(found);
+ _kvList.push_front(std::make_pair(key, foundEntry));
+ _kvMap[key] = _kvList.begin();
+
+ *entryOut = foundEntry;
+ return Status::OK();
+ }
- /**
- * Deletes all entries in the kv-store.
- */
- void clear() {
- for (KVListIt i = _kvList.begin(); i != _kvList.end(); i++) {
- delete i->second;
- }
- _kvList.clear();
- _kvMap.clear();
- _currentSize = 0;
+ /**
+ * Remove the kv-store entry keyed by 'key'.
+ */
+ Status remove(const K& key) {
+ KVMapConstIt i = _kvMap.find(key);
+ if (i == _kvMap.end()) {
+ return Status(ErrorCodes::NoSuchKey, "no such key in LRU key-value store");
}
+ KVListIt found = i->second;
+ delete found->second;
+ _kvMap.erase(i);
+ _kvList.erase(found);
+ _currentSize--;
+ return Status::OK();
+ }
- /**
- * Returns true if entry is found in the kv-store.
- */
- bool hasKey(const K& key) const {
- return _kvMap.find(key) != _kvMap.end();
+ /**
+ * Deletes all entries in the kv-store.
+ */
+ void clear() {
+ for (KVListIt i = _kvList.begin(); i != _kvList.end(); i++) {
+ delete i->second;
}
+ _kvList.clear();
+ _kvMap.clear();
+ _currentSize = 0;
+ }
- /**
- * Returns the number of entries currently in the kv-store.
- */
- size_t size() const { return _currentSize; }
+ /**
+ * Returns true if entry is found in the kv-store.
+ */
+ bool hasKey(const K& key) const {
+ return _kvMap.find(key) != _kvMap.end();
+ }
- /**
- * TODO: The kv-store should implement its own iterator. Calling through to the underlying
- * iterator exposes the internals, and forces the caller to make a horrible type
- * declaration.
- */
- KVListConstIt begin() const { return _kvList.begin(); }
+ /**
+ * Returns the number of entries currently in the kv-store.
+ */
+ size_t size() const {
+ return _currentSize;
+ }
+
+ /**
+ * TODO: The kv-store should implement its own iterator. Calling through to the underlying
+ * iterator exposes the internals, and forces the caller to make a horrible type
+ * declaration.
+ */
+ KVListConstIt begin() const {
+ return _kvList.begin();
+ }
- KVListConstIt end() const { return _kvList.end(); }
+ KVListConstIt end() const {
+ return _kvList.end();
+ }
- private:
- // The maximum allowable number of entries in the kv-store.
- const size_t _maxSize;
+private:
+ // The maximum allowable number of entries in the kv-store.
+ const size_t _maxSize;
- // The number of entries currently in the kv-store.
- size_t _currentSize;
+ // The number of entries currently in the kv-store.
+ size_t _currentSize;
- // (K, V*) pairs are stored in this std::list. They are sorted in order
- // of use, where the front is the most recently used and the back is the
- // least recently used.
- mutable KVList _kvList;
+ // (K, V*) pairs are stored in this std::list. They are sorted in order
+ // of use, where the front is the most recently used and the back is the
+ // least recently used.
+ mutable KVList _kvList;
- // Maps from a key to the corresponding std::list entry.
- mutable KVMap _kvMap;
- };
+ // Maps from a key to the corresponding std::list entry.
+ mutable KVMap _kvMap;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/lru_key_value_test.cpp b/src/mongo/db/query/lru_key_value_test.cpp
index da9c9c73f34..e77fcec0bc9 100644
--- a/src/mongo/db/query/lru_key_value_test.cpp
+++ b/src/mongo/db/query/lru_key_value_test.cpp
@@ -35,163 +35,163 @@ using namespace mongo;
namespace {
- //
- // Convenience functions
- //
-
- void assertInKVStore(LRUKeyValue<int, int>& cache, int key, int value) {
- int* cachedValue = NULL;
- ASSERT_TRUE(cache.hasKey(key));
- Status s = cache.get(key, &cachedValue);
- ASSERT_OK(s);
- ASSERT_EQUALS(*cachedValue, value);
- }
+//
+// Convenience functions
+//
+
+void assertInKVStore(LRUKeyValue<int, int>& cache, int key, int value) {
+ int* cachedValue = NULL;
+ ASSERT_TRUE(cache.hasKey(key));
+ Status s = cache.get(key, &cachedValue);
+ ASSERT_OK(s);
+ ASSERT_EQUALS(*cachedValue, value);
+}
+
+void assertNotInKVStore(LRUKeyValue<int, int>& cache, int key) {
+ int* cachedValue = NULL;
+ ASSERT_FALSE(cache.hasKey(key));
+ Status s = cache.get(key, &cachedValue);
+ ASSERT_NOT_OK(s);
+}
- void assertNotInKVStore(LRUKeyValue<int, int>& cache, int key) {
- int* cachedValue = NULL;
- ASSERT_FALSE(cache.hasKey(key));
- Status s = cache.get(key, &cachedValue);
- ASSERT_NOT_OK(s);
- }
+/**
+ * Test that we can add an entry and get it back out.
+ */
+TEST(LRUKeyValueTest, BasicAddGet) {
+ LRUKeyValue<int, int> cache(100);
+ cache.add(1, new int(2));
+ assertInKVStore(cache, 1, 2);
+}
- /**
- * Test that we can add an entry and get it back out.
- */
- TEST(LRUKeyValueTest, BasicAddGet) {
- LRUKeyValue<int, int> cache(100);
- cache.add(1, new int(2));
- assertInKVStore(cache, 1, 2);
- }
+/**
+ * A kv-store with a max size of 0 isn't too useful, but test
+ * that at the very least we don't blow up.
+ */
+TEST(LRUKeyValueTest, SizeZeroCache) {
+ LRUKeyValue<int, int> cache(0);
+ cache.add(1, new int(2));
+ assertNotInKVStore(cache, 1);
+}
- /**
- * A kv-store with a max size of 0 isn't too useful, but test
- * that at the very least we don't blow up.
- */
- TEST(LRUKeyValueTest, SizeZeroCache) {
- LRUKeyValue<int, int> cache(0);
- cache.add(1, new int(2));
- assertNotInKVStore(cache, 1);
- }
+/**
+ * Make sure eviction and promotion work properly with
+ * a kv-store of size 1.
+ */
+TEST(LRUKeyValueTest, SizeOneCache) {
+ LRUKeyValue<int, int> cache(1);
+ cache.add(0, new int(0));
+ assertInKVStore(cache, 0, 0);
- /**
- * Make sure eviction and promotion work properly with
- * a kv-store of size 1.
- */
- TEST(LRUKeyValueTest, SizeOneCache) {
- LRUKeyValue<int, int> cache(1);
- cache.add(0, new int(0));
- assertInKVStore(cache, 0, 0);
-
- // Second entry should immediately evict the first.
- cache.add(1, new int(1));
- assertNotInKVStore(cache, 0);
- assertInKVStore(cache, 1, 1);
+ // Second entry should immediately evict the first.
+ cache.add(1, new int(1));
+ assertNotInKVStore(cache, 0);
+ assertInKVStore(cache, 1, 1);
+}
+
+/**
+ * Fill up a size 10 kv-store with 10 entries. Call get()
+ * on every entry except for one. Then call add() and
+ * make sure that the proper entry got evicted.
+ */
+TEST(LRUKeyValueTest, EvictionTest) {
+ int maxSize = 10;
+ LRUKeyValue<int, int> cache(maxSize);
+ for (int i = 0; i < maxSize; ++i) {
+ std::unique_ptr<int> evicted = cache.add(i, new int(i));
+ ASSERT(NULL == evicted.get());
}
+ ASSERT_EQUALS(cache.size(), (size_t)maxSize);
- /**
- * Fill up a size 10 kv-store with 10 entries. Call get()
- * on every entry except for one. Then call add() and
- * make sure that the proper entry got evicted.
- */
- TEST(LRUKeyValueTest, EvictionTest) {
- int maxSize = 10;
- LRUKeyValue<int, int> cache(maxSize);
- for (int i = 0; i < maxSize; ++i) {
- std::unique_ptr<int> evicted = cache.add(i, new int(i));
- ASSERT(NULL == evicted.get());
+ // Call get() on all but one key.
+ int evictKey = 5;
+ for (int i = 0; i < maxSize; ++i) {
+ if (i == evictKey) {
+ continue;
}
- ASSERT_EQUALS(cache.size(), (size_t)maxSize);
+ assertInKVStore(cache, i, i);
+ }
- // Call get() on all but one key.
- int evictKey = 5;
- for (int i = 0; i < maxSize; ++i) {
- if (i == evictKey) { continue; }
+ // Adding another entry causes an eviction.
+ std::unique_ptr<int> evicted = cache.add(maxSize + 1, new int(maxSize + 1));
+ ASSERT_EQUALS(cache.size(), (size_t)maxSize);
+ ASSERT(NULL != evicted.get());
+ ASSERT_EQUALS(*evicted, evictKey);
+
+ // Check that the least recently accessed has been evicted.
+ for (int i = 0; i < maxSize; ++i) {
+ if (i == evictKey) {
+ assertNotInKVStore(cache, evictKey);
+ } else {
assertInKVStore(cache, i, i);
}
-
- // Adding another entry causes an eviction.
- std::unique_ptr<int> evicted = cache.add(maxSize + 1, new int(maxSize + 1));
- ASSERT_EQUALS(cache.size(), (size_t)maxSize);
- ASSERT(NULL != evicted.get());
- ASSERT_EQUALS(*evicted, evictKey);
-
- // Check that the least recently accessed has been evicted.
- for (int i = 0; i < maxSize; ++i) {
- if (i == evictKey) {
- assertNotInKVStore(cache, evictKey);
- }
- else {
- assertInKVStore(cache, i, i);
- }
- }
}
+}
- /**
- * Fill up a size 10 kv-store with 10 entries. Call get()
- * on a single entry to promote it to most recently
- * accessed. Then cause 9 evictions and make sure only
- * the entry on which we called get() remains.
- */
- TEST(LRUKeyValueTest, PromotionTest) {
- int maxSize = 10;
- LRUKeyValue<int, int> cache(maxSize);
- for (int i = 0; i < maxSize; ++i) {
- std::unique_ptr<int> evicted = cache.add(i, new int(i));
- ASSERT(NULL == evicted.get());
- }
- ASSERT_EQUALS(cache.size(), (size_t)maxSize);
+/**
+ * Fill up a size 10 kv-store with 10 entries. Call get()
+ * on a single entry to promote it to most recently
+ * accessed. Then cause 9 evictions and make sure only
+ * the entry on which we called get() remains.
+ */
+TEST(LRUKeyValueTest, PromotionTest) {
+ int maxSize = 10;
+ LRUKeyValue<int, int> cache(maxSize);
+ for (int i = 0; i < maxSize; ++i) {
+ std::unique_ptr<int> evicted = cache.add(i, new int(i));
+ ASSERT(NULL == evicted.get());
+ }
+ ASSERT_EQUALS(cache.size(), (size_t)maxSize);
- // Call get() on a particular key.
- int promoteKey = 5;
- assertInKVStore(cache, promoteKey, promoteKey);
+ // Call get() on a particular key.
+ int promoteKey = 5;
+ assertInKVStore(cache, promoteKey, promoteKey);
- // Evict all but one of the original entries.
- for (int i = maxSize; i < (maxSize + maxSize - 1); ++i) {
- std::unique_ptr<int> evicted = cache.add(i, new int(i));
- ASSERT(NULL != evicted.get());
- }
- ASSERT_EQUALS(cache.size(), (size_t)maxSize);
-
- // Check that the promoteKey has not been evicted.
- for (int i = 0; i < maxSize; ++i) {
- if (i == promoteKey) {
- assertInKVStore(cache, promoteKey, promoteKey);
- }
- else {
- assertNotInKVStore(cache, i);
- }
+ // Evict all but one of the original entries.
+ for (int i = maxSize; i < (maxSize + maxSize - 1); ++i) {
+ std::unique_ptr<int> evicted = cache.add(i, new int(i));
+ ASSERT(NULL != evicted.get());
+ }
+ ASSERT_EQUALS(cache.size(), (size_t)maxSize);
+
+ // Check that the promoteKey has not been evicted.
+ for (int i = 0; i < maxSize; ++i) {
+ if (i == promoteKey) {
+ assertInKVStore(cache, promoteKey, promoteKey);
+ } else {
+ assertNotInKVStore(cache, i);
}
}
+}
- /**
- * Test that calling add() with a key that already exists
- * in the kv-store deletes the existing entry.
- */
- TEST(LRUKeyValueTest, ReplaceKeyTest) {
- LRUKeyValue<int, int> cache(10);
- cache.add(4, new int(4));
- assertInKVStore(cache, 4, 4);
- cache.add(4, new int(5));
- assertInKVStore(cache, 4, 5);
- }
+/**
+ * Test that calling add() with a key that already exists
+ * in the kv-store deletes the existing entry.
+ */
+TEST(LRUKeyValueTest, ReplaceKeyTest) {
+ LRUKeyValue<int, int> cache(10);
+ cache.add(4, new int(4));
+ assertInKVStore(cache, 4, 4);
+ cache.add(4, new int(5));
+ assertInKVStore(cache, 4, 5);
+}
- /**
- * Test iteration over the kv-store.
- */
- TEST(LRUKeyValueTest, IterationTest) {
- LRUKeyValue<int, int> cache(2);
- cache.add(1, new int(1));
- cache.add(2, new int(2));
-
- typedef std::list< std::pair<int, int*> >::const_iterator CacheIterator;
- CacheIterator i = cache.begin();
- ASSERT_EQUALS(i->first, 2);
- ASSERT_EQUALS(*i->second, 2);
- ++i;
- ASSERT_EQUALS(i->first, 1);
- ASSERT_EQUALS(*i->second, 1);
- ++i;
- ASSERT(i == cache.end());
- }
+/**
+ * Test iteration over the kv-store.
+ */
+TEST(LRUKeyValueTest, IterationTest) {
+ LRUKeyValue<int, int> cache(2);
+ cache.add(1, new int(1));
+ cache.add(2, new int(2));
+
+ typedef std::list<std::pair<int, int*>>::const_iterator CacheIterator;
+ CacheIterator i = cache.begin();
+ ASSERT_EQUALS(i->first, 2);
+ ASSERT_EQUALS(*i->second, 2);
+ ++i;
+ ASSERT_EQUALS(i->first, 1);
+ ASSERT_EQUALS(*i->second, 1);
+ ++i;
+ ASSERT(i == cache.end());
+}
} // namespace
diff --git a/src/mongo/db/query/parsed_projection.cpp b/src/mongo/db/query/parsed_projection.cpp
index 7552475d5f1..4ebcff2d1a3 100644
--- a/src/mongo/db/query/parsed_projection.cpp
+++ b/src/mongo/db/query/parsed_projection.cpp
@@ -32,296 +32,283 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
-
- /**
- * Parses the projection 'spec' and checks its validity with respect to the query 'query'.
- * Puts covering information into 'out'.
- *
- * Does not take ownership of 'query'.
- *
- * Returns Status::OK() if it's a valid spec.
- * Returns a Status indicating how it's invalid otherwise.
- */
- // static
- Status ParsedProjection::make(const BSONObj& spec,
- const MatchExpression* const query,
- ParsedProjection** out,
- const MatchExpressionParser::WhereCallback& whereCallback) {
- // Are we including or excluding fields? Values:
- // -1 when we haven't initialized it.
- // 1 when we're including
- // 0 when we're excluding.
- int include_exclude = -1;
-
- // If any of these are 'true' the projection isn't covered.
- bool include = true;
- bool hasNonSimple = false;
- bool hasDottedField = false;
-
- bool includeID = true;
-
- bool hasIndexKeyProjection = false;
-
- bool wantGeoNearPoint = false;
- bool wantGeoNearDistance = false;
-
- // Until we see a positional or elemMatch operator we're normal.
- ArrayOpType arrayOpType = ARRAY_OP_NORMAL;
-
- BSONObjIterator it(spec);
- while (it.more()) {
- BSONElement e = it.next();
-
- if (!e.isNumber() && !e.isBoolean()) {
- hasNonSimple = true;
- }
-
- if (Object == e.type()) {
- BSONObj obj = e.embeddedObject();
- if (1 != obj.nFields()) {
- return Status(ErrorCodes::BadValue, ">1 field in obj: " + obj.toString());
- }
-
- BSONElement e2 = obj.firstElement();
- if (mongoutils::str::equals(e2.fieldName(), "$slice")) {
- if (e2.isNumber()) {
- // This is A-OK.
- }
- else if (e2.type() == Array) {
- BSONObj arr = e2.embeddedObject();
- if (2 != arr.nFields()) {
- return Status(ErrorCodes::BadValue, "$slice array wrong size");
- }
-
- BSONObjIterator it(arr);
- // Skip over 'skip'.
- it.next();
- int limit = it.next().numberInt();
- if (limit <= 0) {
- return Status(ErrorCodes::BadValue, "$slice limit must be positive");
- }
- }
- else {
- return Status(ErrorCodes::BadValue,
- "$slice only supports numbers and [skip, limit] arrays");
- }
- }
- else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) {
- // Validate $elemMatch arguments and dependencies.
- if (Object != e2.type()) {
- return Status(ErrorCodes::BadValue,
- "elemMatch: Invalid argument, object required.");
- }
+using std::unique_ptr;
+using std::string;
- if (ARRAY_OP_POSITIONAL == arrayOpType) {
- return Status(ErrorCodes::BadValue,
- "Cannot specify positional operator and $elemMatch.");
- }
-
- if (mongoutils::str::contains(e.fieldName(), '.')) {
- return Status(ErrorCodes::BadValue,
- "Cannot use $elemMatch projection on a nested field.");
- }
-
- arrayOpType = ARRAY_OP_ELEM_MATCH;
-
- // Create a MatchExpression for the elemMatch.
- BSONObj elemMatchObj = e.wrap();
- verify(elemMatchObj.isOwned());
-
- // TODO: Is there a faster way of validating the elemMatchObj?
- StatusWithMatchExpression swme = MatchExpressionParser::parse(elemMatchObj,
- whereCallback);
- if (!swme.isOK()) {
- return swme.getStatus();
- }
- delete swme.getValue();
- }
- else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
- // Field for meta must be top level. We can relax this at some point.
- if (mongoutils::str::contains(e.fieldName(), '.')) {
- return Status(ErrorCodes::BadValue, "field for $meta cannot be nested");
- }
+/**
+ * Parses the projection 'spec' and checks its validity with respect to the query 'query'.
+ * Puts covering information into 'out'.
+ *
+ * Does not take ownership of 'query'.
+ *
+ * Returns Status::OK() if it's a valid spec.
+ * Returns a Status indicating how it's invalid otherwise.
+ */
+// static
+Status ParsedProjection::make(const BSONObj& spec,
+ const MatchExpression* const query,
+ ParsedProjection** out,
+ const MatchExpressionParser::WhereCallback& whereCallback) {
+ // Are we including or excluding fields? Values:
+ // -1 when we haven't initialized it.
+ // 1 when we're including
+ // 0 when we're excluding.
+ int include_exclude = -1;
+
+ // If any of these are 'true' the projection isn't covered.
+ bool include = true;
+ bool hasNonSimple = false;
+ bool hasDottedField = false;
+
+ bool includeID = true;
+
+ bool hasIndexKeyProjection = false;
+
+ bool wantGeoNearPoint = false;
+ bool wantGeoNearDistance = false;
+
+ // Until we see a positional or elemMatch operator we're normal.
+ ArrayOpType arrayOpType = ARRAY_OP_NORMAL;
+
+ BSONObjIterator it(spec);
+ while (it.more()) {
+ BSONElement e = it.next();
+
+ if (!e.isNumber() && !e.isBoolean()) {
+ hasNonSimple = true;
+ }
- // Make sure the argument to $meta is something we recognize.
- // e.g. {x: {$meta: "textScore"}}
- if (String != e2.type()) {
- return Status(ErrorCodes::BadValue, "unexpected argument to $meta in proj");
- }
+ if (Object == e.type()) {
+ BSONObj obj = e.embeddedObject();
+ if (1 != obj.nFields()) {
+ return Status(ErrorCodes::BadValue, ">1 field in obj: " + obj.toString());
+ }
- if (e2.valuestr() != LiteParsedQuery::metaTextScore
- && e2.valuestr() != LiteParsedQuery::metaRecordId
- && e2.valuestr() != LiteParsedQuery::metaIndexKey
- && e2.valuestr() != LiteParsedQuery::metaGeoNearDistance
- && e2.valuestr() != LiteParsedQuery::metaGeoNearPoint) {
- return Status(ErrorCodes::BadValue,
- "unsupported $meta operator: " + e2.str());
+ BSONElement e2 = obj.firstElement();
+ if (mongoutils::str::equals(e2.fieldName(), "$slice")) {
+ if (e2.isNumber()) {
+ // This is A-OK.
+ } else if (e2.type() == Array) {
+ BSONObj arr = e2.embeddedObject();
+ if (2 != arr.nFields()) {
+ return Status(ErrorCodes::BadValue, "$slice array wrong size");
}
- // This clobbers everything else.
- if (e2.valuestr() == LiteParsedQuery::metaIndexKey) {
- hasIndexKeyProjection = true;
- }
- else if (e2.valuestr() == LiteParsedQuery::metaGeoNearDistance) {
- wantGeoNearDistance = true;
+ BSONObjIterator it(arr);
+ // Skip over 'skip'.
+ it.next();
+ int limit = it.next().numberInt();
+ if (limit <= 0) {
+ return Status(ErrorCodes::BadValue, "$slice limit must be positive");
}
- else if (e2.valuestr() == LiteParsedQuery::metaGeoNearPoint) {
- wantGeoNearPoint = true;
- }
- }
- else {
+ } else {
return Status(ErrorCodes::BadValue,
- string("Unsupported projection option: ") + e.toString());
+ "$slice only supports numbers and [skip, limit] arrays");
}
- }
- else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) {
- includeID = false;
- }
- else {
- // Projections of dotted fields aren't covered.
- if (mongoutils::str::contains(e.fieldName(), '.')) {
- hasDottedField = true;
+ } else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) {
+ // Validate $elemMatch arguments and dependencies.
+ if (Object != e2.type()) {
+ return Status(ErrorCodes::BadValue,
+ "elemMatch: Invalid argument, object required.");
}
- // Validate input.
- if (include_exclude == -1) {
- // If we haven't specified an include/exclude, initialize include_exclude.
- // We expect further include/excludes to match it.
- include_exclude = e.trueValue();
- include = !e.trueValue();
- }
- else if (static_cast<bool>(include_exclude) != e.trueValue()) {
- // Make sure that the incl./excl. matches the previous.
+ if (ARRAY_OP_POSITIONAL == arrayOpType) {
return Status(ErrorCodes::BadValue,
- "Projection cannot have a mix of inclusion and exclusion.");
+ "Cannot specify positional operator and $elemMatch.");
}
- }
-
- if (_isPositionalOperator(e.fieldName())) {
- // Validate the positional op.
- if (!e.trueValue()) {
+ if (mongoutils::str::contains(e.fieldName(), '.')) {
return Status(ErrorCodes::BadValue,
- "Cannot exclude array elements with the positional operator.");
+ "Cannot use $elemMatch projection on a nested field.");
}
- if (ARRAY_OP_POSITIONAL == arrayOpType) {
- return Status(ErrorCodes::BadValue,
- "Cannot specify more than one positional proj. per query.");
+ arrayOpType = ARRAY_OP_ELEM_MATCH;
+
+ // Create a MatchExpression for the elemMatch.
+ BSONObj elemMatchObj = e.wrap();
+ verify(elemMatchObj.isOwned());
+
+ // TODO: Is there a faster way of validating the elemMatchObj?
+ StatusWithMatchExpression swme =
+ MatchExpressionParser::parse(elemMatchObj, whereCallback);
+ if (!swme.isOK()) {
+ return swme.getStatus();
+ }
+ delete swme.getValue();
+ } else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
+ // Field for meta must be top level. We can relax this at some point.
+ if (mongoutils::str::contains(e.fieldName(), '.')) {
+ return Status(ErrorCodes::BadValue, "field for $meta cannot be nested");
}
- if (ARRAY_OP_ELEM_MATCH == arrayOpType) {
- return Status(ErrorCodes::BadValue,
- "Cannot specify positional operator and $elemMatch.");
+ // Make sure the argument to $meta is something we recognize.
+ // e.g. {x: {$meta: "textScore"}}
+ if (String != e2.type()) {
+ return Status(ErrorCodes::BadValue, "unexpected argument to $meta in proj");
}
- std::string after = mongoutils::str::after(e.fieldName(), ".$");
- if (mongoutils::str::contains(after, ".$")) {
- mongoutils::str::stream ss;
- ss << "Positional projection '" << e.fieldName() << "' contains "
- << "the positional operator more than once.";
- return Status(ErrorCodes::BadValue, ss);
+ if (e2.valuestr() != LiteParsedQuery::metaTextScore &&
+ e2.valuestr() != LiteParsedQuery::metaRecordId &&
+ e2.valuestr() != LiteParsedQuery::metaIndexKey &&
+ e2.valuestr() != LiteParsedQuery::metaGeoNearDistance &&
+ e2.valuestr() != LiteParsedQuery::metaGeoNearPoint) {
+ return Status(ErrorCodes::BadValue, "unsupported $meta operator: " + e2.str());
}
- std::string matchfield = mongoutils::str::before(e.fieldName(), '.');
- if (!_hasPositionalOperatorMatch(query, matchfield)) {
- mongoutils::str::stream ss;
- ss << "Positional projection '" << e.fieldName() << "' does not "
- << "match the query document.";
- return Status(ErrorCodes::BadValue, ss);
+ // This clobbers everything else.
+ if (e2.valuestr() == LiteParsedQuery::metaIndexKey) {
+ hasIndexKeyProjection = true;
+ } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearDistance) {
+ wantGeoNearDistance = true;
+ } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearPoint) {
+ wantGeoNearPoint = true;
}
+ } else {
+ return Status(ErrorCodes::BadValue,
+ string("Unsupported projection option: ") + e.toString());
+ }
+ } else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) {
+ includeID = false;
+ } else {
+ // Projections of dotted fields aren't covered.
+ if (mongoutils::str::contains(e.fieldName(), '.')) {
+ hasDottedField = true;
+ }
- arrayOpType = ARRAY_OP_POSITIONAL;
+ // Validate input.
+ if (include_exclude == -1) {
+ // If we haven't specified an include/exclude, initialize include_exclude.
+ // We expect further include/excludes to match it.
+ include_exclude = e.trueValue();
+ include = !e.trueValue();
+ } else if (static_cast<bool>(include_exclude) != e.trueValue()) {
+ // Make sure that the incl./excl. matches the previous.
+ return Status(ErrorCodes::BadValue,
+ "Projection cannot have a mix of inclusion and exclusion.");
}
}
- // Fill out the returned obj.
- unique_ptr<ParsedProjection> pp(new ParsedProjection());
-
- // The positional operator uses the MatchDetails from the query
- // expression to know which array element was matched.
- pp->_requiresMatchDetails = arrayOpType == ARRAY_OP_POSITIONAL;
- // Save the raw spec. It should be owned by the LiteParsedQuery.
- verify(spec.isOwned());
- pp->_source = spec;
- pp->_returnKey = hasIndexKeyProjection;
+ if (_isPositionalOperator(e.fieldName())) {
+ // Validate the positional op.
+ if (!e.trueValue()) {
+ return Status(ErrorCodes::BadValue,
+ "Cannot exclude array elements with the positional operator.");
+ }
- // Dotted fields aren't covered, non-simple require match details, and as for include, "if
- // we default to including then we can't use an index because we don't know what we're
- // missing."
- pp->_requiresDocument = include || hasNonSimple || hasDottedField;
+ if (ARRAY_OP_POSITIONAL == arrayOpType) {
+ return Status(ErrorCodes::BadValue,
+ "Cannot specify more than one positional proj. per query.");
+ }
- // Add geoNear projections.
- pp->_wantGeoNearPoint = wantGeoNearPoint;
- pp->_wantGeoNearDistance = wantGeoNearDistance;
+ if (ARRAY_OP_ELEM_MATCH == arrayOpType) {
+ return Status(ErrorCodes::BadValue,
+ "Cannot specify positional operator and $elemMatch.");
+ }
- // If it's possible to compute the projection in a covered fashion, populate _requiredFields
- // so the planner can perform projection analysis.
- if (!pp->_requiresDocument) {
- if (includeID) {
- pp->_requiredFields.push_back("_id");
+ std::string after = mongoutils::str::after(e.fieldName(), ".$");
+ if (mongoutils::str::contains(after, ".$")) {
+ mongoutils::str::stream ss;
+ ss << "Positional projection '" << e.fieldName() << "' contains "
+ << "the positional operator more than once.";
+ return Status(ErrorCodes::BadValue, ss);
}
- // The only way we could be here is if spec is only simple non-dotted-field projections.
- // Therefore we can iterate over spec to get the fields required.
- BSONObjIterator srcIt(spec);
- while (srcIt.more()) {
- BSONElement elt = srcIt.next();
- // We've already handled the _id field before entering this loop.
- if (includeID && mongoutils::str::equals(elt.fieldName(), "_id")) {
- continue;
- }
- if (elt.trueValue()) {
- pp->_requiredFields.push_back(elt.fieldName());
- }
+ std::string matchfield = mongoutils::str::before(e.fieldName(), '.');
+ if (!_hasPositionalOperatorMatch(query, matchfield)) {
+ mongoutils::str::stream ss;
+ ss << "Positional projection '" << e.fieldName() << "' does not "
+ << "match the query document.";
+ return Status(ErrorCodes::BadValue, ss);
}
+
+ arrayOpType = ARRAY_OP_POSITIONAL;
}
+ }
+
+ // Fill out the returned obj.
+ unique_ptr<ParsedProjection> pp(new ParsedProjection());
+
+ // The positional operator uses the MatchDetails from the query
+ // expression to know which array element was matched.
+ pp->_requiresMatchDetails = arrayOpType == ARRAY_OP_POSITIONAL;
+
+ // Save the raw spec. It should be owned by the LiteParsedQuery.
+ verify(spec.isOwned());
+ pp->_source = spec;
+ pp->_returnKey = hasIndexKeyProjection;
- // returnKey clobbers everything.
- if (hasIndexKeyProjection) {
- pp->_requiresDocument = false;
+ // Dotted fields aren't covered, non-simple require match details, and as for include, "if
+ // we default to including then we can't use an index because we don't know what we're
+ // missing."
+ pp->_requiresDocument = include || hasNonSimple || hasDottedField;
+
+ // Add geoNear projections.
+ pp->_wantGeoNearPoint = wantGeoNearPoint;
+ pp->_wantGeoNearDistance = wantGeoNearDistance;
+
+ // If it's possible to compute the projection in a covered fashion, populate _requiredFields
+ // so the planner can perform projection analysis.
+ if (!pp->_requiresDocument) {
+ if (includeID) {
+ pp->_requiredFields.push_back("_id");
}
- *out = pp.release();
- return Status::OK();
+ // The only way we could be here is if spec is only simple non-dotted-field projections.
+ // Therefore we can iterate over spec to get the fields required.
+ BSONObjIterator srcIt(spec);
+ while (srcIt.more()) {
+ BSONElement elt = srcIt.next();
+ // We've already handled the _id field before entering this loop.
+ if (includeID && mongoutils::str::equals(elt.fieldName(), "_id")) {
+ continue;
+ }
+ if (elt.trueValue()) {
+ pp->_requiredFields.push_back(elt.fieldName());
+ }
+ }
}
- // static
- bool ParsedProjection::_isPositionalOperator(const char* fieldName) {
- return mongoutils::str::contains(fieldName, ".$") &&
- !mongoutils::str::contains(fieldName, ".$ref") &&
- !mongoutils::str::contains(fieldName, ".$id") &&
- !mongoutils::str::contains(fieldName, ".$db");
-
+ // returnKey clobbers everything.
+ if (hasIndexKeyProjection) {
+ pp->_requiresDocument = false;
}
- // static
- bool ParsedProjection::_hasPositionalOperatorMatch(const MatchExpression* const query,
- const std::string& matchfield) {
- if (query->isLogical()) {
- for (unsigned int i = 0; i < query->numChildren(); ++i) {
- if (_hasPositionalOperatorMatch(query->getChild(i), matchfield)) {
- return true;
- }
+ *out = pp.release();
+ return Status::OK();
+}
+
+// static
+bool ParsedProjection::_isPositionalOperator(const char* fieldName) {
+ return mongoutils::str::contains(fieldName, ".$") &&
+ !mongoutils::str::contains(fieldName, ".$ref") &&
+ !mongoutils::str::contains(fieldName, ".$id") &&
+ !mongoutils::str::contains(fieldName, ".$db");
+}
+
+// static
+bool ParsedProjection::_hasPositionalOperatorMatch(const MatchExpression* const query,
+ const std::string& matchfield) {
+ if (query->isLogical()) {
+ for (unsigned int i = 0; i < query->numChildren(); ++i) {
+ if (_hasPositionalOperatorMatch(query->getChild(i), matchfield)) {
+ return true;
}
}
- else {
- StringData queryPath = query->path();
- const char* pathRawData = queryPath.rawData();
- // We have to make a distinction between match expressions that are
- // initialized with an empty field/path name "" and match expressions
- // for which the path is not meaningful (eg. $where and the internal
- // expression type ALWAYS_FALSE).
- if (!pathRawData) {
- return false;
- }
- std::string pathPrefix = mongoutils::str::before(pathRawData, '.');
- return pathPrefix == matchfield;
+ } else {
+ StringData queryPath = query->path();
+ const char* pathRawData = queryPath.rawData();
+ // We have to make a distinction between match expressions that are
+ // initialized with an empty field/path name "" and match expressions
+ // for which the path is not meaningful (eg. $where and the internal
+ // expression type ALWAYS_FALSE).
+ if (!pathRawData) {
+ return false;
}
- return false;
+ std::string pathPrefix = mongoutils::str::before(pathRawData, '.');
+ return pathPrefix == matchfield;
}
+ return false;
+}
} // namespace mongo
diff --git a/src/mongo/db/query/parsed_projection.h b/src/mongo/db/query/parsed_projection.h
index 3fa40fe2ca8..b135b5f47ed 100644
--- a/src/mongo/db/query/parsed_projection.h
+++ b/src/mongo/db/query/parsed_projection.h
@@ -32,112 +32,112 @@
namespace mongo {
- class ParsedProjection {
- public:
- // TODO: this is duplicated in here and in the proj exec code. When we have
- // ProjectionExpression we can remove dups.
- enum ArrayOpType {
- ARRAY_OP_NORMAL = 0,
- ARRAY_OP_ELEM_MATCH,
- ARRAY_OP_POSITIONAL
- };
-
- /**
- * Parses the projection 'spec' and checks its validity with respect to the query 'query'.
- * Puts covering information into 'out'.
- *
- * Returns Status::OK() if it's a valid spec.
- * Returns a Status indicating how it's invalid otherwise.
- */
- static Status make(const BSONObj& spec,
- const MatchExpression* const query,
- ParsedProjection** out,
- const MatchExpressionParser::WhereCallback& whereCallback =
- MatchExpressionParser::WhereCallback());
-
- /**
- * Returns true if the projection requires match details from the query,
- * and false otherwise.
- */
- bool requiresMatchDetails() const { return _requiresMatchDetails; }
-
- /**
- * Is the full document required to compute this projection?
- */
- bool requiresDocument() const { return _requiresDocument; }
-
- /**
- * If requiresDocument() == false, what fields are required to compute
- * the projection?
- */
- const std::vector<std::string>& getRequiredFields() const {
- return _requiredFields;
- }
-
- /**
- * Get the raw BSONObj proj spec obj
- */
- const BSONObj& getProjObj() const {
- return _source;
- }
-
- /**
- * Does the projection want geoNear metadata? If so any geoNear stage should include them.
- */
- bool wantGeoNearDistance() const {
- return _wantGeoNearDistance;
- }
-
- bool wantGeoNearPoint() const {
- return _wantGeoNearPoint;
- }
-
- bool wantIndexKey() const {
- return _returnKey;
- }
-
- private:
- /**
- * Must go through ::make
- */
- ParsedProjection()
- : _requiresMatchDetails(false),
- _requiresDocument(true),
- _wantGeoNearDistance(false),
- _wantGeoNearPoint(false),
- _returnKey(false) { }
-
- /**
- * Returns true if field name refers to a positional projection.
- */
- static bool _isPositionalOperator(const char* fieldName);
-
- /**
- * Returns true if the MatchExpression 'query' queries against
- * the field named by 'matchfield'. This deeply traverses logical
- * nodes in the matchfield and returns true if any of the children
- * have the field (so if 'query' is {$and: [{a: 1}, {b: 1}]} and
- * 'matchfield' is "b", the return value is true).
- *
- * Does not take ownership of 'query'.
- */
- static bool _hasPositionalOperatorMatch(const MatchExpression* const query,
- const std::string& matchfield);
-
- // TODO: stringdata?
- std::vector<std::string> _requiredFields;
-
- bool _requiresMatchDetails;
-
- bool _requiresDocument;
-
- BSONObj _source;
-
- bool _wantGeoNearDistance;
-
- bool _wantGeoNearPoint;
-
- bool _returnKey;
- };
+class ParsedProjection {
+public:
+ // TODO: this is duplicated in here and in the proj exec code. When we have
+ // ProjectionExpression we can remove dups.
+ enum ArrayOpType { ARRAY_OP_NORMAL = 0, ARRAY_OP_ELEM_MATCH, ARRAY_OP_POSITIONAL };
+
+ /**
+ * Parses the projection 'spec' and checks its validity with respect to the query 'query'.
+ * Puts covering information into 'out'.
+ *
+ * Returns Status::OK() if it's a valid spec.
+ * Returns a Status indicating how it's invalid otherwise.
+ */
+ static Status make(const BSONObj& spec,
+ const MatchExpression* const query,
+ ParsedProjection** out,
+ const MatchExpressionParser::WhereCallback& whereCallback =
+ MatchExpressionParser::WhereCallback());
+
+ /**
+ * Returns true if the projection requires match details from the query,
+ * and false otherwise.
+ */
+ bool requiresMatchDetails() const {
+ return _requiresMatchDetails;
+ }
+
+ /**
+ * Is the full document required to compute this projection?
+ */
+ bool requiresDocument() const {
+ return _requiresDocument;
+ }
+
+ /**
+ * If requiresDocument() == false, what fields are required to compute
+ * the projection?
+ */
+ const std::vector<std::string>& getRequiredFields() const {
+ return _requiredFields;
+ }
+
+ /**
+ * Get the raw BSONObj proj spec obj
+ */
+ const BSONObj& getProjObj() const {
+ return _source;
+ }
+
+ /**
+ * Does the projection want geoNear metadata? If so any geoNear stage should include them.
+ */
+ bool wantGeoNearDistance() const {
+ return _wantGeoNearDistance;
+ }
+
+ bool wantGeoNearPoint() const {
+ return _wantGeoNearPoint;
+ }
+
+ bool wantIndexKey() const {
+ return _returnKey;
+ }
+
+private:
+ /**
+ * Must go through ::make
+ */
+ ParsedProjection()
+ : _requiresMatchDetails(false),
+ _requiresDocument(true),
+ _wantGeoNearDistance(false),
+ _wantGeoNearPoint(false),
+ _returnKey(false) {}
+
+ /**
+ * Returns true if field name refers to a positional projection.
+ */
+ static bool _isPositionalOperator(const char* fieldName);
+
+ /**
+ * Returns true if the MatchExpression 'query' queries against
+ * the field named by 'matchfield'. This deeply traverses logical
+ * nodes in the matchfield and returns true if any of the children
+ * have the field (so if 'query' is {$and: [{a: 1}, {b: 1}]} and
+ * 'matchfield' is "b", the return value is true).
+ *
+ * Does not take ownership of 'query'.
+ */
+ static bool _hasPositionalOperatorMatch(const MatchExpression* const query,
+ const std::string& matchfield);
+
+ // TODO: stringdata?
+ std::vector<std::string> _requiredFields;
+
+ bool _requiresMatchDetails;
+
+ bool _requiresDocument;
+
+ BSONObj _source;
+
+ bool _wantGeoNearDistance;
+
+ bool _wantGeoNearPoint;
+
+ bool _returnKey;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/parsed_projection_test.cpp b/src/mongo/db/query/parsed_projection_test.cpp
index c669490f35a..9128575ea95 100644
--- a/src/mongo/db/query/parsed_projection_test.cpp
+++ b/src/mongo/db/query/parsed_projection_test.cpp
@@ -35,182 +35,180 @@
namespace {
- using std::unique_ptr;
- using std::string;
- using std::vector;
-
- using namespace mongo;
-
- //
- // creation function
- //
-
- unique_ptr<ParsedProjection> createParsedProjection(const BSONObj& query, const BSONObj& projObj) {
- StatusWithMatchExpression swme = MatchExpressionParser::parse(query);
- ASSERT(swme.isOK());
- std::unique_ptr<MatchExpression> queryMatchExpr(swme.getValue());
- ParsedProjection* out = NULL;
- Status status = ParsedProjection::make(projObj, queryMatchExpr.get(), &out);
- if (!status.isOK()) {
- FAIL(mongoutils::str::stream() << "failed to parse projection " << projObj
- << " (query: " << query << "): " << status.toString());
- }
- ASSERT(out);
- return unique_ptr<ParsedProjection>(out);
+using std::unique_ptr;
+using std::string;
+using std::vector;
+
+using namespace mongo;
+
+//
+// creation function
+//
+
+unique_ptr<ParsedProjection> createParsedProjection(const BSONObj& query, const BSONObj& projObj) {
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(query);
+ ASSERT(swme.isOK());
+ std::unique_ptr<MatchExpression> queryMatchExpr(swme.getValue());
+ ParsedProjection* out = NULL;
+ Status status = ParsedProjection::make(projObj, queryMatchExpr.get(), &out);
+ if (!status.isOK()) {
+ FAIL(mongoutils::str::stream() << "failed to parse projection " << projObj
+ << " (query: " << query << "): " << status.toString());
}
-
- unique_ptr<ParsedProjection> createParsedProjection(const char* queryStr, const char* projStr) {
- BSONObj query = fromjson(queryStr);
- BSONObj projObj = fromjson(projStr);
- return createParsedProjection(query, projObj);
- }
-
- //
- // Failure to create a parsed projection is expected
- //
-
- void assertInvalidProjection(const char* queryStr, const char* projStr) {
- BSONObj query = fromjson(queryStr);
- BSONObj projObj = fromjson(projStr);
- StatusWithMatchExpression swme = MatchExpressionParser::parse(query);
- ASSERT(swme.isOK());
- std::unique_ptr<MatchExpression> queryMatchExpr(swme.getValue());
- ParsedProjection* out = NULL;
- Status status = ParsedProjection::make(projObj, queryMatchExpr.get(), &out);
- std::unique_ptr<ParsedProjection> destroy(out);
- ASSERT(!status.isOK());
- }
-
- // canonical_query.cpp will invoke ParsedProjection::make only when
- // the projection spec is non-empty. This test case is included for
- // completeness and do not reflect actual usage.
- TEST(ParsedProjectionTest, MakeId) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{}"));
- ASSERT(parsedProj->requiresDocument());
- }
-
- TEST(ParsedProjectionTest, MakeEmpty) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 0}"));
- ASSERT(parsedProj->requiresDocument());
- }
-
- TEST(ParsedProjectionTest, MakeSingleField) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{a: 1}"));
- ASSERT(!parsedProj->requiresDocument());
- const vector<string>& fields = parsedProj->getRequiredFields();
- ASSERT_EQUALS(fields.size(), 2U);
- ASSERT_EQUALS(fields[0], "_id");
- ASSERT_EQUALS(fields[1], "a");
- }
-
- TEST(ParsedProjectionTest, MakeSingleFieldCovered) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 0, a: 1}"));
- ASSERT(!parsedProj->requiresDocument());
- const vector<string>& fields = parsedProj->getRequiredFields();
- ASSERT_EQUALS(fields.size(), 1U);
- ASSERT_EQUALS(fields[0], "a");
- }
-
- TEST(ParsedProjectionTest, MakeSingleFieldIDCovered) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 1}"));
- ASSERT(!parsedProj->requiresDocument());
- const vector<string>& fields = parsedProj->getRequiredFields();
- ASSERT_EQUALS(fields.size(), 1U);
- ASSERT_EQUALS(fields[0], "_id");
- }
-
- // boolean support is undocumented
- TEST(ParsedProjectionTest, MakeSingleFieldCoveredBoolean) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 0, a: true}"));
- ASSERT(!parsedProj->requiresDocument());
- const vector<string>& fields = parsedProj->getRequiredFields();
- ASSERT_EQUALS(fields.size(), 1U);
- ASSERT_EQUALS(fields[0], "a");
- }
-
- // boolean support is undocumented
- TEST(ParsedProjectionTest, MakeSingleFieldCoveredIdBoolean) {
- unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: false, a: 1}"));
- ASSERT(!parsedProj->requiresDocument());
- const vector<string>& fields = parsedProj->getRequiredFields();
- ASSERT_EQUALS(fields.size(), 1U);
- ASSERT_EQUALS(fields[0], "a");
- }
-
- //
- // Positional operator validation
- //
-
- TEST(ParsedProjectionTest, InvalidPositionalOperatorProjections) {
- assertInvalidProjection("{}", "{'a.$': 1}");
- assertInvalidProjection("{a: 1}", "{'b.$': 1}");
- assertInvalidProjection("{a: 1}", "{'a.$': 0}");
- assertInvalidProjection("{a: 1}", "{'a.$.d.$': 1}");
- assertInvalidProjection("{a: 1}", "{'a.$.$': 1}");
- assertInvalidProjection("{a: 1}", "{'a.$.$': 1}");
- assertInvalidProjection("{a: 1, b: 1, c: 1}", "{'abc.$': 1}");
- assertInvalidProjection("{$or: [{a: 1}, {$or: [{b: 1}, {c: 1}]}]}", "{'d.$': 1}");
- assertInvalidProjection("{a: [1, 2, 3]}", "{'.$': 1}");
- }
-
- TEST(ParsedProjectionTest, ValidPositionalOperatorProjections) {
- createParsedProjection("{a: 1}", "{'a.$': 1}");
- createParsedProjection("{a: 1}", "{'a.foo.bar.$': 1}");
- createParsedProjection("{a: 1}", "{'a.foo.bar.$.x.y': 1}");
- createParsedProjection("{'a.b.c': 1}", "{'a.b.c.$': 1}");
- createParsedProjection("{'a.b.c': 1}", "{'a.e.f.$': 1}");
- createParsedProjection("{a: {b: 1}}", "{'a.$': 1}");
- createParsedProjection("{a: 1, b: 1}}", "{'a.$': 1}");
- createParsedProjection("{a: 1, b: 1}}", "{'b.$': 1}");
- createParsedProjection("{$and: [{a: 1}, {b: 1}]}", "{'a.$': 1}");
- createParsedProjection("{$and: [{a: 1}, {b: 1}]}", "{'b.$': 1}");
- createParsedProjection("{$or: [{a: 1}, {b: 1}]}", "{'a.$': 1}");
- createParsedProjection("{$or: [{a: 1}, {b: 1}]}", "{'b.$': 1}");
- createParsedProjection("{$and: [{$or: [{a: 1}, {$and: [{b: 1}, {c: 1}]}]}]}",
- "{'c.d.f.$': 1}");
- // Fields with empty name can be projected using the positional $ operator.
- createParsedProjection("{'': [1, 2, 3]}", "{'.$': 1}");
- }
-
- // Some match expressions (eg. $where) do not override MatchExpression::path()
- // In this test case, we use an internal match expression implementation ALWAYS_FALSE
- // to achieve the same effect.
- // Projection parser should handle this the same way as an empty path.
- TEST(ParsedProjectionTest, InvalidPositionalProjectionDefaultPathMatchExpression) {
- unique_ptr<MatchExpression> queryMatchExpr(new FalseMatchExpression());
- ASSERT(NULL == queryMatchExpr->path().rawData());
-
- ParsedProjection* out = NULL;
- BSONObj projObj = fromjson("{'a.$': 1}");
- Status status = ParsedProjection::make(projObj, queryMatchExpr.get(), &out);
- ASSERT(!status.isOK());
- std::unique_ptr<ParsedProjection> destroy(out);
-
- // Projecting onto empty field should fail.
- BSONObj emptyFieldProjObj = fromjson("{'.$': 1}");
- status = ParsedProjection::make(emptyFieldProjObj, queryMatchExpr.get(), &out);
- ASSERT(!status.isOK());
- }
-
- //
- // DBRef projections
- //
-
- TEST(ParsedProjectionTest, DBRefProjections) {
- // non-dotted
- createParsedProjection(BSONObj(), BSON( "$ref" << 1));
- createParsedProjection(BSONObj(), BSON( "$id" << 1));
- createParsedProjection(BSONObj(), BSON( "$ref" << 1));
- // dotted before
- createParsedProjection("{}", "{'a.$ref': 1}");
- createParsedProjection("{}", "{'a.$id': 1}");
- createParsedProjection("{}", "{'a.$db': 1}");
- // dotted after
- createParsedProjection("{}", "{'$id.a': 1}");
- // position operator on $id
- // $ref and $db hold the collection and database names respectively,
- // so these fields cannot be arrays.
- createParsedProjection("{'a.$id': {$elemMatch: {x: 1}}}", "{'a.$id.$': 1}");
-
- }
-} // unnamed namespace
+ ASSERT(out);
+ return unique_ptr<ParsedProjection>(out);
+}
+
+unique_ptr<ParsedProjection> createParsedProjection(const char* queryStr, const char* projStr) {
+ BSONObj query = fromjson(queryStr);
+ BSONObj projObj = fromjson(projStr);
+ return createParsedProjection(query, projObj);
+}
+
+//
+// Failure to create a parsed projection is expected
+//
+
+void assertInvalidProjection(const char* queryStr, const char* projStr) {
+ BSONObj query = fromjson(queryStr);
+ BSONObj projObj = fromjson(projStr);
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(query);
+ ASSERT(swme.isOK());
+ std::unique_ptr<MatchExpression> queryMatchExpr(swme.getValue());
+ ParsedProjection* out = NULL;
+ Status status = ParsedProjection::make(projObj, queryMatchExpr.get(), &out);
+ std::unique_ptr<ParsedProjection> destroy(out);
+ ASSERT(!status.isOK());
+}
+
+// canonical_query.cpp will invoke ParsedProjection::make only when
+// the projection spec is non-empty. This test case is included for
+// completeness and do not reflect actual usage.
+TEST(ParsedProjectionTest, MakeId) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{}"));
+ ASSERT(parsedProj->requiresDocument());
+}
+
+TEST(ParsedProjectionTest, MakeEmpty) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 0}"));
+ ASSERT(parsedProj->requiresDocument());
+}
+
+TEST(ParsedProjectionTest, MakeSingleField) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{a: 1}"));
+ ASSERT(!parsedProj->requiresDocument());
+ const vector<string>& fields = parsedProj->getRequiredFields();
+ ASSERT_EQUALS(fields.size(), 2U);
+ ASSERT_EQUALS(fields[0], "_id");
+ ASSERT_EQUALS(fields[1], "a");
+}
+
+TEST(ParsedProjectionTest, MakeSingleFieldCovered) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 0, a: 1}"));
+ ASSERT(!parsedProj->requiresDocument());
+ const vector<string>& fields = parsedProj->getRequiredFields();
+ ASSERT_EQUALS(fields.size(), 1U);
+ ASSERT_EQUALS(fields[0], "a");
+}
+
+TEST(ParsedProjectionTest, MakeSingleFieldIDCovered) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 1}"));
+ ASSERT(!parsedProj->requiresDocument());
+ const vector<string>& fields = parsedProj->getRequiredFields();
+ ASSERT_EQUALS(fields.size(), 1U);
+ ASSERT_EQUALS(fields[0], "_id");
+}
+
+// boolean support is undocumented
+TEST(ParsedProjectionTest, MakeSingleFieldCoveredBoolean) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: 0, a: true}"));
+ ASSERT(!parsedProj->requiresDocument());
+ const vector<string>& fields = parsedProj->getRequiredFields();
+ ASSERT_EQUALS(fields.size(), 1U);
+ ASSERT_EQUALS(fields[0], "a");
+}
+
+// boolean support is undocumented
+TEST(ParsedProjectionTest, MakeSingleFieldCoveredIdBoolean) {
+ unique_ptr<ParsedProjection> parsedProj(createParsedProjection("{}", "{_id: false, a: 1}"));
+ ASSERT(!parsedProj->requiresDocument());
+ const vector<string>& fields = parsedProj->getRequiredFields();
+ ASSERT_EQUALS(fields.size(), 1U);
+ ASSERT_EQUALS(fields[0], "a");
+}
+
+//
+// Positional operator validation
+//
+
+TEST(ParsedProjectionTest, InvalidPositionalOperatorProjections) {
+ assertInvalidProjection("{}", "{'a.$': 1}");
+ assertInvalidProjection("{a: 1}", "{'b.$': 1}");
+ assertInvalidProjection("{a: 1}", "{'a.$': 0}");
+ assertInvalidProjection("{a: 1}", "{'a.$.d.$': 1}");
+ assertInvalidProjection("{a: 1}", "{'a.$.$': 1}");
+ assertInvalidProjection("{a: 1}", "{'a.$.$': 1}");
+ assertInvalidProjection("{a: 1, b: 1, c: 1}", "{'abc.$': 1}");
+ assertInvalidProjection("{$or: [{a: 1}, {$or: [{b: 1}, {c: 1}]}]}", "{'d.$': 1}");
+ assertInvalidProjection("{a: [1, 2, 3]}", "{'.$': 1}");
+}
+
+TEST(ParsedProjectionTest, ValidPositionalOperatorProjections) {
+ createParsedProjection("{a: 1}", "{'a.$': 1}");
+ createParsedProjection("{a: 1}", "{'a.foo.bar.$': 1}");
+ createParsedProjection("{a: 1}", "{'a.foo.bar.$.x.y': 1}");
+ createParsedProjection("{'a.b.c': 1}", "{'a.b.c.$': 1}");
+ createParsedProjection("{'a.b.c': 1}", "{'a.e.f.$': 1}");
+ createParsedProjection("{a: {b: 1}}", "{'a.$': 1}");
+ createParsedProjection("{a: 1, b: 1}}", "{'a.$': 1}");
+ createParsedProjection("{a: 1, b: 1}}", "{'b.$': 1}");
+ createParsedProjection("{$and: [{a: 1}, {b: 1}]}", "{'a.$': 1}");
+ createParsedProjection("{$and: [{a: 1}, {b: 1}]}", "{'b.$': 1}");
+ createParsedProjection("{$or: [{a: 1}, {b: 1}]}", "{'a.$': 1}");
+ createParsedProjection("{$or: [{a: 1}, {b: 1}]}", "{'b.$': 1}");
+ createParsedProjection("{$and: [{$or: [{a: 1}, {$and: [{b: 1}, {c: 1}]}]}]}", "{'c.d.f.$': 1}");
+ // Fields with empty name can be projected using the positional $ operator.
+ createParsedProjection("{'': [1, 2, 3]}", "{'.$': 1}");
+}
+
+// Some match expressions (eg. $where) do not override MatchExpression::path()
+// In this test case, we use an internal match expression implementation ALWAYS_FALSE
+// to achieve the same effect.
+// Projection parser should handle this the same way as an empty path.
+TEST(ParsedProjectionTest, InvalidPositionalProjectionDefaultPathMatchExpression) {
+ unique_ptr<MatchExpression> queryMatchExpr(new FalseMatchExpression());
+ ASSERT(NULL == queryMatchExpr->path().rawData());
+
+ ParsedProjection* out = NULL;
+ BSONObj projObj = fromjson("{'a.$': 1}");
+ Status status = ParsedProjection::make(projObj, queryMatchExpr.get(), &out);
+ ASSERT(!status.isOK());
+ std::unique_ptr<ParsedProjection> destroy(out);
+
+ // Projecting onto empty field should fail.
+ BSONObj emptyFieldProjObj = fromjson("{'.$': 1}");
+ status = ParsedProjection::make(emptyFieldProjObj, queryMatchExpr.get(), &out);
+ ASSERT(!status.isOK());
+}
+
+//
+// DBRef projections
+//
+
+TEST(ParsedProjectionTest, DBRefProjections) {
+ // non-dotted
+ createParsedProjection(BSONObj(), BSON("$ref" << 1));
+ createParsedProjection(BSONObj(), BSON("$id" << 1));
+ createParsedProjection(BSONObj(), BSON("$ref" << 1));
+ // dotted before
+ createParsedProjection("{}", "{'a.$ref': 1}");
+ createParsedProjection("{}", "{'a.$id': 1}");
+ createParsedProjection("{}", "{'a.$db': 1}");
+ // dotted after
+ createParsedProjection("{}", "{'$id.a': 1}");
+ // position operator on $id
+ // $ref and $db hold the collection and database names respectively,
+ // so these fields cannot be arrays.
+ createParsedProjection("{'a.$id': {$elemMatch: {x: 1}}}", "{'a.$id.$': 1}");
+}
+} // unnamed namespace
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index b79c2e8f659..f97a81c0f01 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -37,7 +37,7 @@
#include <memory>
#include "boost/thread/locks.hpp"
#include "mongo/base/owned_pointer_vector.h"
-#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
+#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
#include "mongo/db/matcher/expression_array.h"
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/query/plan_ranker.h"
@@ -50,23 +50,23 @@
namespace mongo {
namespace {
- // Delimiters for cache key encoding.
- const char kEncodeDiscriminatorsBegin = '<';
- const char kEncodeDiscriminatorsEnd = '>';
- const char kEncodeChildrenBegin = '[';
- const char kEncodeChildrenEnd = ']';
- const char kEncodeChildrenSeparator = ',';
- const char kEncodeSortSection = '~';
- const char kEncodeProjectionSection = '|';
-
- /**
- * Encode user-provided string. Cache key delimiters seen in the
- * user string are escaped with a backslash.
- */
- void encodeUserString(StringData s, StringBuilder* keyBuilder) {
- for (size_t i = 0; i < s.size(); ++i) {
- char c = s[i];
- switch (c) {
+// Delimiters for cache key encoding.
+const char kEncodeDiscriminatorsBegin = '<';
+const char kEncodeDiscriminatorsEnd = '>';
+const char kEncodeChildrenBegin = '[';
+const char kEncodeChildrenEnd = ']';
+const char kEncodeChildrenSeparator = ',';
+const char kEncodeSortSection = '~';
+const char kEncodeProjectionSection = '|';
+
+/**
+ * Encode user-provided string. Cache key delimiters seen in the
+ * user string are escaped with a backslash.
+ */
+void encodeUserString(StringData s, StringBuilder* keyBuilder) {
+ for (size_t i = 0; i < s.size(); ++i) {
+ char c = s[i];
+ switch (c) {
case kEncodeDiscriminatorsBegin:
case kEncodeDiscriminatorsEnd:
case kEncodeChildrenBegin:
@@ -75,637 +75,680 @@ namespace {
case kEncodeSortSection:
case kEncodeProjectionSection:
case '\\':
- *keyBuilder << '\\';
- // Fall through to default case.
+ *keyBuilder << '\\';
+ // Fall through to default case.
default:
*keyBuilder << c;
- }
}
}
+}
- /**
- * 2-character encoding of MatchExpression::MatchType.
- */
- const char* encodeMatchType(MatchExpression::MatchType mt) {
- switch(mt) {
- case MatchExpression::AND: return "an"; break;
- case MatchExpression::OR: return "or"; break;
- case MatchExpression::NOR: return "nr"; break;
- case MatchExpression::NOT: return "nt"; break;
- case MatchExpression::ELEM_MATCH_OBJECT: return "eo"; break;
- case MatchExpression::ELEM_MATCH_VALUE: return "ev"; break;
- case MatchExpression::SIZE: return "sz"; break;
- case MatchExpression::LTE: return "le"; break;
- case MatchExpression::LT: return "lt"; break;
- case MatchExpression::EQ: return "eq"; break;
- case MatchExpression::GT: return "gt"; break;
- case MatchExpression::GTE: return "ge"; break;
- case MatchExpression::REGEX: return "re"; break;
- case MatchExpression::MOD: return "mo"; break;
- case MatchExpression::EXISTS: return "ex"; break;
- case MatchExpression::MATCH_IN: return "in"; break;
- case MatchExpression::TYPE_OPERATOR: return "ty"; break;
- case MatchExpression::GEO: return "go"; break;
- case MatchExpression::WHERE: return "wh"; break;
- case MatchExpression::ATOMIC: return "at"; break;
- case MatchExpression::ALWAYS_FALSE: return "af"; break;
- case MatchExpression::GEO_NEAR: return "gn"; break;
- case MatchExpression::TEXT: return "te"; break;
- default: verify(0); return "";
- }
+/**
+ * 2-character encoding of MatchExpression::MatchType.
+ */
+const char* encodeMatchType(MatchExpression::MatchType mt) {
+ switch (mt) {
+ case MatchExpression::AND:
+ return "an";
+ break;
+ case MatchExpression::OR:
+ return "or";
+ break;
+ case MatchExpression::NOR:
+ return "nr";
+ break;
+ case MatchExpression::NOT:
+ return "nt";
+ break;
+ case MatchExpression::ELEM_MATCH_OBJECT:
+ return "eo";
+ break;
+ case MatchExpression::ELEM_MATCH_VALUE:
+ return "ev";
+ break;
+ case MatchExpression::SIZE:
+ return "sz";
+ break;
+ case MatchExpression::LTE:
+ return "le";
+ break;
+ case MatchExpression::LT:
+ return "lt";
+ break;
+ case MatchExpression::EQ:
+ return "eq";
+ break;
+ case MatchExpression::GT:
+ return "gt";
+ break;
+ case MatchExpression::GTE:
+ return "ge";
+ break;
+ case MatchExpression::REGEX:
+ return "re";
+ break;
+ case MatchExpression::MOD:
+ return "mo";
+ break;
+ case MatchExpression::EXISTS:
+ return "ex";
+ break;
+ case MatchExpression::MATCH_IN:
+ return "in";
+ break;
+ case MatchExpression::TYPE_OPERATOR:
+ return "ty";
+ break;
+ case MatchExpression::GEO:
+ return "go";
+ break;
+ case MatchExpression::WHERE:
+ return "wh";
+ break;
+ case MatchExpression::ATOMIC:
+ return "at";
+ break;
+ case MatchExpression::ALWAYS_FALSE:
+ return "af";
+ break;
+ case MatchExpression::GEO_NEAR:
+ return "gn";
+ break;
+ case MatchExpression::TEXT:
+ return "te";
+ break;
+ default:
+ verify(0);
+ return "";
}
+}
- /**
- * Encodes GEO match expression.
- * Encoding includes:
- * - type of geo query (within/intersect/near)
- * - geometry type
- * - CRS (flat or spherical)
- */
- void encodeGeoMatchExpression(const GeoMatchExpression* tree, StringBuilder* keyBuilder) {
- const GeoExpression& geoQuery = tree->getGeoExpression();
-
- // Type of geo query.
- switch (geoQuery.getPred()) {
- case GeoExpression::WITHIN: *keyBuilder << "wi"; break;
- case GeoExpression::INTERSECT: *keyBuilder << "in"; break;
- case GeoExpression::INVALID: *keyBuilder << "id"; break;
- }
+/**
+ * Encodes GEO match expression.
+ * Encoding includes:
+ * - type of geo query (within/intersect/near)
+ * - geometry type
+ * - CRS (flat or spherical)
+ */
+void encodeGeoMatchExpression(const GeoMatchExpression* tree, StringBuilder* keyBuilder) {
+ const GeoExpression& geoQuery = tree->getGeoExpression();
+
+ // Type of geo query.
+ switch (geoQuery.getPred()) {
+ case GeoExpression::WITHIN:
+ *keyBuilder << "wi";
+ break;
+ case GeoExpression::INTERSECT:
+ *keyBuilder << "in";
+ break;
+ case GeoExpression::INVALID:
+ *keyBuilder << "id";
+ break;
+ }
- // Geometry type.
- // Only one of the shared_ptrs in GeoContainer may be non-NULL.
- *keyBuilder << geoQuery.getGeometry().getDebugType();
+ // Geometry type.
+ // Only one of the shared_ptrs in GeoContainer may be non-NULL.
+ *keyBuilder << geoQuery.getGeometry().getDebugType();
- // CRS (flat or spherical)
- if (FLAT == geoQuery.getGeometry().getNativeCRS()) {
+ // CRS (flat or spherical)
+ if (FLAT == geoQuery.getGeometry().getNativeCRS()) {
+ *keyBuilder << "fl";
+ } else if (SPHERE == geoQuery.getGeometry().getNativeCRS()) {
+ *keyBuilder << "sp";
+ } else if (STRICT_SPHERE == geoQuery.getGeometry().getNativeCRS()) {
+ *keyBuilder << "ss";
+ } else {
+ error() << "unknown CRS type " << (int)geoQuery.getGeometry().getNativeCRS()
+ << " in geometry of type " << geoQuery.getGeometry().getDebugType();
+ invariant(false);
+ }
+}
+
+/**
+ * Encodes GEO_NEAR match expression.
+ * Encode:
+ * - isNearSphere
+ * - CRS (flat or spherical)
+ */
+void encodeGeoNearMatchExpression(const GeoNearMatchExpression* tree, StringBuilder* keyBuilder) {
+ const GeoNearExpression& nearQuery = tree->getData();
+
+ // isNearSphere
+ *keyBuilder << (nearQuery.isNearSphere ? "ns" : "nr");
+
+ // CRS (flat or spherical or strict-winding spherical)
+ switch (nearQuery.centroid->crs) {
+ case FLAT:
*keyBuilder << "fl";
- }
- else if (SPHERE == geoQuery.getGeometry().getNativeCRS()) {
+ break;
+ case SPHERE:
*keyBuilder << "sp";
- }
- else if (STRICT_SPHERE == geoQuery.getGeometry().getNativeCRS()) {
+ break;
+ case STRICT_SPHERE:
*keyBuilder << "ss";
- }
- else {
- error() << "unknown CRS type " << (int)geoQuery.getGeometry().getNativeCRS()
- << " in geometry of type " << geoQuery.getGeometry().getDebugType();
- invariant(false);
- }
- }
-
- /**
- * Encodes GEO_NEAR match expression.
- * Encode:
- * - isNearSphere
- * - CRS (flat or spherical)
- */
- void encodeGeoNearMatchExpression(const GeoNearMatchExpression* tree,
- StringBuilder* keyBuilder) {
- const GeoNearExpression& nearQuery = tree->getData();
-
- // isNearSphere
- *keyBuilder << (nearQuery.isNearSphere ? "ns" : "nr");
-
- // CRS (flat or spherical or strict-winding spherical)
- switch (nearQuery.centroid->crs) {
- case FLAT: *keyBuilder << "fl"; break;
- case SPHERE: *keyBuilder << "sp"; break;
- case STRICT_SPHERE: *keyBuilder << "ss"; break;
+ break;
case UNSET:
error() << "unknown CRS type " << (int)nearQuery.centroid->crs
<< " in point geometry for near query";
invariant(false);
break;
- }
}
+}
} // namespace
- //
- // Cache-related functions for CanonicalQuery
- //
+//
+// Cache-related functions for CanonicalQuery
+//
- bool PlanCache::shouldCacheQuery(const CanonicalQuery& query) {
- const LiteParsedQuery& lpq = query.getParsed();
- const MatchExpression* expr = query.root();
+bool PlanCache::shouldCacheQuery(const CanonicalQuery& query) {
+ const LiteParsedQuery& lpq = query.getParsed();
+ const MatchExpression* expr = query.root();
- // Collection scan
- // No sort order requested
- if (lpq.getSort().isEmpty() &&
- expr->matchType() == MatchExpression::AND && expr->numChildren() == 0) {
- return false;
- }
-
- // Hint provided
- if (!lpq.getHint().isEmpty()) {
- return false;
- }
-
- // Min provided
- // Min queries are a special case of hinted queries.
- if (!lpq.getMin().isEmpty()) {
- return false;
- }
-
- // Max provided
- // Similar to min, max queries are a special case of hinted queries.
- if (!lpq.getMax().isEmpty()) {
- return false;
- }
+ // Collection scan
+ // No sort order requested
+ if (lpq.getSort().isEmpty() && expr->matchType() == MatchExpression::AND &&
+ expr->numChildren() == 0) {
+ return false;
+ }
- // Explain queries are not-cacheable. This is primarily because of
- // the need to generate current and accurate information in allPlans.
- // If the explain report is generated by the cached plan runner using
- // stale information from the cache for the losing plans, allPlans would
- // simply be wrong.
- if (lpq.isExplain()) {
- return false;
- }
+ // Hint provided
+ if (!lpq.getHint().isEmpty()) {
+ return false;
+ }
- // Tailable cursors won't get cached, just turn into collscans.
- if (query.getParsed().isTailable()) {
- return false;
- }
+ // Min provided
+ // Min queries are a special case of hinted queries.
+ if (!lpq.getMin().isEmpty()) {
+ return false;
+ }
- // Snapshot is really a hint.
- if (query.getParsed().isSnapshot()) {
- return false;
- }
+ // Max provided
+ // Similar to min, max queries are a special case of hinted queries.
+ if (!lpq.getMax().isEmpty()) {
+ return false;
+ }
- return true;
- }
-
- //
- // CachedSolution
- //
-
- CachedSolution::CachedSolution(const PlanCacheKey& key, const PlanCacheEntry& entry)
- : plannerData(entry.plannerData.size()),
- key(key),
- query(entry.query.getOwned()),
- sort(entry.sort.getOwned()),
- projection(entry.projection.getOwned()),
- decisionWorks(entry.decision->stats[0]->common.works) {
- // CachedSolution should not having any references into
- // cache entry. All relevant data should be cloned/copied.
- for (size_t i = 0; i < entry.plannerData.size(); ++i) {
- verify(entry.plannerData[i]);
- plannerData[i] = entry.plannerData[i]->clone();
- }
+ // Explain queries are not-cacheable. This is primarily because of
+ // the need to generate current and accurate information in allPlans.
+ // If the explain report is generated by the cached plan runner using
+ // stale information from the cache for the losing plans, allPlans would
+ // simply be wrong.
+ if (lpq.isExplain()) {
+ return false;
}
- CachedSolution::~CachedSolution() {
- for (std::vector<SolutionCacheData*>::const_iterator i = plannerData.begin();
- i != plannerData.end(); ++i) {
- SolutionCacheData* scd = *i;
- delete scd;
- }
+ // Tailable cursors won't get cached, just turn into collscans.
+ if (query.getParsed().isTailable()) {
+ return false;
}
- //
- // PlanCacheEntry
- //
+ // Snapshot is really a hint.
+ if (query.getParsed().isSnapshot()) {
+ return false;
+ }
- PlanCacheEntry::PlanCacheEntry(const std::vector<QuerySolution*>& solutions,
- PlanRankingDecision* why)
- : plannerData(solutions.size()),
- decision(why) {
- invariant(why);
+ return true;
+}
- // The caller of this constructor is responsible for ensuring
- // that the QuerySolution 's' has valid cacheData. If there's no
- // data to cache you shouldn't be trying to construct a PlanCacheEntry.
+//
+// CachedSolution
+//
- // Copy the solution's cache data into the plan cache entry.
- for (size_t i = 0; i < solutions.size(); ++i) {
- invariant(solutions[i]->cacheData.get());
- plannerData[i] = solutions[i]->cacheData->clone();
- }
+CachedSolution::CachedSolution(const PlanCacheKey& key, const PlanCacheEntry& entry)
+ : plannerData(entry.plannerData.size()),
+ key(key),
+ query(entry.query.getOwned()),
+ sort(entry.sort.getOwned()),
+ projection(entry.projection.getOwned()),
+ decisionWorks(entry.decision->stats[0]->common.works) {
+ // CachedSolution should not having any references into
+ // cache entry. All relevant data should be cloned/copied.
+ for (size_t i = 0; i < entry.plannerData.size(); ++i) {
+ verify(entry.plannerData[i]);
+ plannerData[i] = entry.plannerData[i]->clone();
}
+}
- PlanCacheEntry::~PlanCacheEntry() {
- for (size_t i = 0; i < feedback.size(); ++i) {
- delete feedback[i];
- }
- for (size_t i = 0; i < plannerData.size(); ++i) {
- delete plannerData[i];
- }
+CachedSolution::~CachedSolution() {
+ for (std::vector<SolutionCacheData*>::const_iterator i = plannerData.begin();
+ i != plannerData.end();
+ ++i) {
+ SolutionCacheData* scd = *i;
+ delete scd;
}
+}
- PlanCacheEntry* PlanCacheEntry::clone() const {
- OwnedPointerVector<QuerySolution> solutions;
- for (size_t i = 0; i < plannerData.size(); ++i) {
- QuerySolution* qs = new QuerySolution();
- qs->cacheData.reset(plannerData[i]->clone());
- solutions.mutableVector().push_back(qs);
- }
- PlanCacheEntry* entry = new PlanCacheEntry(solutions.vector(), decision->clone());
-
- // Copy query shape.
- entry->query = query.getOwned();
- entry->sort = sort.getOwned();
- entry->projection = projection.getOwned();
-
- // Copy performance stats.
- for (size_t i = 0; i < feedback.size(); ++i) {
- PlanCacheEntryFeedback* fb = new PlanCacheEntryFeedback();
- fb->stats.reset(feedback[i]->stats->clone());
- fb->score = feedback[i]->score;
- entry->feedback.push_back(fb);
- }
- return entry;
- }
+//
+// PlanCacheEntry
+//
- std::string PlanCacheEntry::toString() const {
- return str::stream()
- << "(query: " << query.toString()
- << ";sort: " << sort.toString()
- << ";projection: " << projection.toString()
- << ";solutions: " << plannerData.size()
- << ")";
- }
+PlanCacheEntry::PlanCacheEntry(const std::vector<QuerySolution*>& solutions,
+ PlanRankingDecision* why)
+ : plannerData(solutions.size()), decision(why) {
+ invariant(why);
- std::string CachedSolution::toString() const {
- return str::stream() << "key: " << key << '\n';
+ // The caller of this constructor is responsible for ensuring
+ // that the QuerySolution 's' has valid cacheData. If there's no
+ // data to cache you shouldn't be trying to construct a PlanCacheEntry.
+
+ // Copy the solution's cache data into the plan cache entry.
+ for (size_t i = 0; i < solutions.size(); ++i) {
+ invariant(solutions[i]->cacheData.get());
+ plannerData[i] = solutions[i]->cacheData->clone();
}
+}
- //
- // PlanCacheIndexTree
- //
+PlanCacheEntry::~PlanCacheEntry() {
+ for (size_t i = 0; i < feedback.size(); ++i) {
+ delete feedback[i];
+ }
+ for (size_t i = 0; i < plannerData.size(); ++i) {
+ delete plannerData[i];
+ }
+}
- void PlanCacheIndexTree::setIndexEntry(const IndexEntry& ie) {
- entry.reset(new IndexEntry(ie));
+PlanCacheEntry* PlanCacheEntry::clone() const {
+ OwnedPointerVector<QuerySolution> solutions;
+ for (size_t i = 0; i < plannerData.size(); ++i) {
+ QuerySolution* qs = new QuerySolution();
+ qs->cacheData.reset(plannerData[i]->clone());
+ solutions.mutableVector().push_back(qs);
}
+ PlanCacheEntry* entry = new PlanCacheEntry(solutions.vector(), decision->clone());
- PlanCacheIndexTree* PlanCacheIndexTree::clone() const {
- PlanCacheIndexTree* root = new PlanCacheIndexTree();
- if (NULL != entry.get()) {
- root->index_pos = index_pos;
- root->setIndexEntry(*entry.get());
- }
+ // Copy query shape.
+ entry->query = query.getOwned();
+ entry->sort = sort.getOwned();
+ entry->projection = projection.getOwned();
+ // Copy performance stats.
+ for (size_t i = 0; i < feedback.size(); ++i) {
+ PlanCacheEntryFeedback* fb = new PlanCacheEntryFeedback();
+ fb->stats.reset(feedback[i]->stats->clone());
+ fb->score = feedback[i]->score;
+ entry->feedback.push_back(fb);
+ }
+ return entry;
+}
+
+std::string PlanCacheEntry::toString() const {
+ return str::stream() << "(query: " << query.toString() << ";sort: " << sort.toString()
+ << ";projection: " << projection.toString()
+ << ";solutions: " << plannerData.size() << ")";
+}
+
+std::string CachedSolution::toString() const {
+ return str::stream() << "key: " << key << '\n';
+}
+
+//
+// PlanCacheIndexTree
+//
+
+void PlanCacheIndexTree::setIndexEntry(const IndexEntry& ie) {
+ entry.reset(new IndexEntry(ie));
+}
+
+PlanCacheIndexTree* PlanCacheIndexTree::clone() const {
+ PlanCacheIndexTree* root = new PlanCacheIndexTree();
+ if (NULL != entry.get()) {
+ root->index_pos = index_pos;
+ root->setIndexEntry(*entry.get());
+ }
+
+ for (std::vector<PlanCacheIndexTree*>::const_iterator it = children.begin();
+ it != children.end();
+ ++it) {
+ PlanCacheIndexTree* clonedChild = (*it)->clone();
+ root->children.push_back(clonedChild);
+ }
+ return root;
+}
+
+std::string PlanCacheIndexTree::toString(int indents) const {
+ StringBuilder result;
+ if (!children.empty()) {
+ result << std::string(3 * indents, '-') << "Node\n";
+ int newIndent = indents + 1;
for (std::vector<PlanCacheIndexTree*>::const_iterator it = children.begin();
- it != children.end(); ++it) {
- PlanCacheIndexTree* clonedChild = (*it)->clone();
- root->children.push_back(clonedChild);
- }
- return root;
- }
-
- std::string PlanCacheIndexTree::toString(int indents) const {
- StringBuilder result;
- if (!children.empty()) {
- result << std::string(3 * indents, '-') << "Node\n";
- int newIndent = indents + 1;
- for (std::vector<PlanCacheIndexTree*>::const_iterator it = children.begin();
- it != children.end(); ++it) {
- result << (*it)->toString(newIndent);
- }
- return result.str();
- }
- else {
- result << std::string(3 * indents, '-') << "Leaf ";
- if (NULL != entry.get()) {
- result << entry->keyPattern.toString() << ", pos: " << index_pos;
- }
- result << '\n';
+ it != children.end();
+ ++it) {
+ result << (*it)->toString(newIndent);
}
return result.str();
+ } else {
+ result << std::string(3 * indents, '-') << "Leaf ";
+ if (NULL != entry.get()) {
+ result << entry->keyPattern.toString() << ", pos: " << index_pos;
+ }
+ result << '\n';
}
+ return result.str();
+}
- //
- // SolutionCacheData
- //
+//
+// SolutionCacheData
+//
- SolutionCacheData* SolutionCacheData::clone() const {
- SolutionCacheData* other = new SolutionCacheData();
- if (NULL != this->tree.get()) {
- // 'tree' could be NULL if the cached solution
- // is a collection scan.
- other->tree.reset(this->tree->clone());
- }
- other->solnType = this->solnType;
- other->wholeIXSolnDir = this->wholeIXSolnDir;
- other->indexFilterApplied = this->indexFilterApplied;
- return other;
+SolutionCacheData* SolutionCacheData::clone() const {
+ SolutionCacheData* other = new SolutionCacheData();
+ if (NULL != this->tree.get()) {
+ // 'tree' could be NULL if the cached solution
+ // is a collection scan.
+ other->tree.reset(this->tree->clone());
}
+ other->solnType = this->solnType;
+ other->wholeIXSolnDir = this->wholeIXSolnDir;
+ other->indexFilterApplied = this->indexFilterApplied;
+ return other;
+}
- std::string SolutionCacheData::toString() const {
- switch (this->solnType) {
+std::string SolutionCacheData::toString() const {
+ switch (this->solnType) {
case WHOLE_IXSCAN_SOLN:
verify(this->tree.get());
- return str::stream()
- << "(whole index scan solution: "
- << "dir=" << this->wholeIXSolnDir << "; "
- << "tree=" << this->tree->toString()
- << ")";
+ return str::stream() << "(whole index scan solution: "
+ << "dir=" << this->wholeIXSolnDir << "; "
+ << "tree=" << this->tree->toString() << ")";
case COLLSCAN_SOLN:
return "(collection scan)";
case USE_INDEX_TAGS_SOLN:
verify(this->tree.get());
- return str::stream()
- << "(index-tagged expression tree: "
- << "tree=" << this->tree->toString()
- << ")";
- }
- MONGO_UNREACHABLE;
+ return str::stream() << "(index-tagged expression tree: "
+ << "tree=" << this->tree->toString() << ")";
}
+ MONGO_UNREACHABLE;
+}
- //
- // PlanCache
- //
+//
+// PlanCache
+//
- PlanCache::PlanCache() : _cache(internalQueryCacheSize) { }
+PlanCache::PlanCache() : _cache(internalQueryCacheSize) {}
- PlanCache::PlanCache(const std::string& ns) : _cache(internalQueryCacheSize), _ns(ns) { }
+PlanCache::PlanCache(const std::string& ns) : _cache(internalQueryCacheSize), _ns(ns) {}
- PlanCache::~PlanCache() { }
+PlanCache::~PlanCache() {}
- /**
- * Traverses expression tree pre-order.
- * Appends an encoding of each node's match type and path name
- * to the output stream.
- */
- void PlanCache::encodeKeyForMatch(const MatchExpression* tree,
- StringBuilder* keyBuilder) const {
- // Encode match type and path.
- *keyBuilder << encodeMatchType(tree->matchType());
+/**
+ * Traverses expression tree pre-order.
+ * Appends an encoding of each node's match type and path name
+ * to the output stream.
+ */
+void PlanCache::encodeKeyForMatch(const MatchExpression* tree, StringBuilder* keyBuilder) const {
+ // Encode match type and path.
+ *keyBuilder << encodeMatchType(tree->matchType());
- encodeUserString(tree->path(), keyBuilder);
+ encodeUserString(tree->path(), keyBuilder);
- // GEO and GEO_NEAR require additional encoding.
- if (MatchExpression::GEO == tree->matchType()) {
- encodeGeoMatchExpression(static_cast<const GeoMatchExpression*>(tree), keyBuilder);
- }
- else if (MatchExpression::GEO_NEAR == tree->matchType()) {
- encodeGeoNearMatchExpression(static_cast<const GeoNearMatchExpression*>(tree),
- keyBuilder);
+ // GEO and GEO_NEAR require additional encoding.
+ if (MatchExpression::GEO == tree->matchType()) {
+ encodeGeoMatchExpression(static_cast<const GeoMatchExpression*>(tree), keyBuilder);
+ } else if (MatchExpression::GEO_NEAR == tree->matchType()) {
+ encodeGeoNearMatchExpression(static_cast<const GeoNearMatchExpression*>(tree), keyBuilder);
+ }
+
+ // Encode indexability.
+ const IndexabilityDiscriminators& discriminators =
+ _indexabilityState.getDiscriminators(tree->path());
+ if (!discriminators.empty()) {
+ *keyBuilder << kEncodeDiscriminatorsBegin;
+ // For each discriminator on this path, append the character '0' or '1'.
+ for (const IndexabilityDiscriminator& discriminator : discriminators) {
+ *keyBuilder << discriminator(tree);
}
+ *keyBuilder << kEncodeDiscriminatorsEnd;
+ }
- // Encode indexability.
- const IndexabilityDiscriminators& discriminators =
- _indexabilityState.getDiscriminators(tree->path());
- if (!discriminators.empty()) {
- *keyBuilder << kEncodeDiscriminatorsBegin;
- // For each discriminator on this path, append the character '0' or '1'.
- for (const IndexabilityDiscriminator& discriminator : discriminators) {
- *keyBuilder << discriminator(tree);
- }
- *keyBuilder << kEncodeDiscriminatorsEnd;
+ // Traverse child nodes.
+ // Enclose children in [].
+ if (tree->numChildren() > 0) {
+ *keyBuilder << kEncodeChildrenBegin;
+ }
+ // Use comma to separate children encoding.
+ for (size_t i = 0; i < tree->numChildren(); ++i) {
+ if (i > 0) {
+ *keyBuilder << kEncodeChildrenSeparator;
}
+ encodeKeyForMatch(tree->getChild(i), keyBuilder);
+ }
+ if (tree->numChildren() > 0) {
+ *keyBuilder << kEncodeChildrenEnd;
+ }
+}
+
+/**
+ * Encodes sort order into cache key.
+ * Sort order is normalized because it provided by
+ * LiteParsedQuery.
+ */
+void PlanCache::encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) const {
+ if (sortObj.isEmpty()) {
+ return;
+ }
+
+ *keyBuilder << kEncodeSortSection;
- // Traverse child nodes.
- // Enclose children in [].
- if (tree->numChildren() > 0) {
- *keyBuilder << kEncodeChildrenBegin;
+ BSONObjIterator it(sortObj);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ // $meta text score
+ if (LiteParsedQuery::isTextScoreMeta(elt)) {
+ *keyBuilder << "t";
}
- // Use comma to separate children encoding.
- for (size_t i = 0; i < tree->numChildren(); ++i) {
- if (i > 0) {
- *keyBuilder << kEncodeChildrenSeparator;
- }
- encodeKeyForMatch(tree->getChild(i), keyBuilder);
+ // Ascending
+ else if (elt.numberInt() == 1) {
+ *keyBuilder << "a";
}
- if (tree->numChildren() > 0) {
- *keyBuilder << kEncodeChildrenEnd;
+ // Descending
+ else {
+ *keyBuilder << "d";
}
- }
+ encodeUserString(elt.fieldName(), keyBuilder);
- /**
- * Encodes sort order into cache key.
- * Sort order is normalized because it provided by
- * LiteParsedQuery.
- */
- void PlanCache::encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) const {
- if (sortObj.isEmpty()) {
- return;
+ // Sort argument separator
+ if (it.more()) {
+ *keyBuilder << ",";
}
+ }
+}
- *keyBuilder << kEncodeSortSection;
-
- BSONObjIterator it(sortObj);
- while (it.more()) {
- BSONElement elt = it.next();
- // $meta text score
- if (LiteParsedQuery::isTextScoreMeta(elt)) {
- *keyBuilder << "t";
- }
- // Ascending
- else if (elt.numberInt() == 1) {
- *keyBuilder << "a";
- }
- // Descending
- else {
- *keyBuilder << "d";
- }
- encodeUserString(elt.fieldName(), keyBuilder);
-
- // Sort argument separator
- if (it.more()) {
- *keyBuilder << ",";
- }
- }
+/**
+ * Encodes parsed projection into cache key.
+ * Does a simple toString() on each projected field
+ * in the BSON object.
+ * Orders the encoded elements in the projection by field name.
+ * This handles all the special projection types ($meta, $elemMatch, etc.)
+ */
+void PlanCache::encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuilder) const {
+ if (projObj.isEmpty()) {
+ return;
}
- /**
- * Encodes parsed projection into cache key.
- * Does a simple toString() on each projected field
- * in the BSON object.
- * Orders the encoded elements in the projection by field name.
- * This handles all the special projection types ($meta, $elemMatch, etc.)
- */
- void PlanCache::encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuilder) const {
- if (projObj.isEmpty()) {
- return;
- }
+ *keyBuilder << kEncodeProjectionSection;
- *keyBuilder << kEncodeProjectionSection;
+ // Sorts the BSON elements by field name using a map.
+ std::map<StringData, BSONElement> elements;
- // Sorts the BSON elements by field name using a map.
- std::map<StringData, BSONElement> elements;
+ BSONObjIterator it(projObj);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ StringData fieldName = elt.fieldNameStringData();
+ elements[fieldName] = elt;
+ }
- BSONObjIterator it(projObj);
- while (it.more()) {
- BSONElement elt = it.next();
- StringData fieldName = elt.fieldNameStringData();
- elements[fieldName] = elt;
- }
+ // Read elements in order of field name
+ for (std::map<StringData, BSONElement>::const_iterator i = elements.begin();
+ i != elements.end();
+ ++i) {
+ const BSONElement& elt = (*i).second;
- // Read elements in order of field name
- for (std::map<StringData, BSONElement>::const_iterator i = elements.begin();
- i != elements.end(); ++i) {
- const BSONElement& elt = (*i).second;
-
- if (elt.isSimpleType()) {
- // For inclusion/exclusion projections, we encode as "i" or "e".
- *keyBuilder << (elt.trueValue() ? "i" : "e");
- }
- else {
- // For projection operators, we use the verbatim string encoding of the element.
- encodeUserString(elt.toString(false, // includeFieldName
- false), // full
- keyBuilder);
- }
-
- encodeUserString(elt.fieldName(), keyBuilder);
+ if (elt.isSimpleType()) {
+ // For inclusion/exclusion projections, we encode as "i" or "e".
+ *keyBuilder << (elt.trueValue() ? "i" : "e");
+ } else {
+ // For projection operators, we use the verbatim string encoding of the element.
+ encodeUserString(elt.toString(false, // includeFieldName
+ false), // full
+ keyBuilder);
}
- }
- Status PlanCache::add(const CanonicalQuery& query,
- const std::vector<QuerySolution*>& solns,
- PlanRankingDecision* why) {
- invariant(why);
+ encodeUserString(elt.fieldName(), keyBuilder);
+ }
+}
- if (solns.empty()) {
- return Status(ErrorCodes::BadValue, "no solutions provided");
- }
+Status PlanCache::add(const CanonicalQuery& query,
+ const std::vector<QuerySolution*>& solns,
+ PlanRankingDecision* why) {
+ invariant(why);
- if (why->stats.size() != solns.size()) {
- return Status(ErrorCodes::BadValue,
- "number of stats in decision must match solutions");
- }
+ if (solns.empty()) {
+ return Status(ErrorCodes::BadValue, "no solutions provided");
+ }
- if (why->scores.size() != solns.size()) {
- return Status(ErrorCodes::BadValue,
- "number of scores in decision must match solutions");
- }
+ if (why->stats.size() != solns.size()) {
+ return Status(ErrorCodes::BadValue, "number of stats in decision must match solutions");
+ }
- if (why->candidateOrder.size() != solns.size()) {
- return Status(ErrorCodes::BadValue,
- "candidate ordering entries in decision must match solutions");
- }
+ if (why->scores.size() != solns.size()) {
+ return Status(ErrorCodes::BadValue, "number of scores in decision must match solutions");
+ }
- PlanCacheEntry* entry = new PlanCacheEntry(solns, why);
- const LiteParsedQuery& pq = query.getParsed();
- entry->query = pq.getFilter().getOwned();
- entry->sort = pq.getSort().getOwned();
- entry->projection = pq.getProj().getOwned();
+ if (why->candidateOrder.size() != solns.size()) {
+ return Status(ErrorCodes::BadValue,
+ "candidate ordering entries in decision must match solutions");
+ }
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- std::unique_ptr<PlanCacheEntry> evictedEntry = _cache.add(computeKey(query), entry);
+ PlanCacheEntry* entry = new PlanCacheEntry(solns, why);
+ const LiteParsedQuery& pq = query.getParsed();
+ entry->query = pq.getFilter().getOwned();
+ entry->sort = pq.getSort().getOwned();
+ entry->projection = pq.getProj().getOwned();
- if (NULL != evictedEntry.get()) {
- LOG(1) << _ns << ": plan cache maximum size exceeded - "
- << "removed least recently used entry "
- << evictedEntry->toString();
- }
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ std::unique_ptr<PlanCacheEntry> evictedEntry = _cache.add(computeKey(query), entry);
- return Status::OK();
+ if (NULL != evictedEntry.get()) {
+ LOG(1) << _ns << ": plan cache maximum size exceeded - "
+ << "removed least recently used entry " << evictedEntry->toString();
}
- Status PlanCache::get(const CanonicalQuery& query, CachedSolution** crOut) const {
- PlanCacheKey key = computeKey(query);
- verify(crOut);
+ return Status::OK();
+}
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- PlanCacheEntry* entry;
- Status cacheStatus = _cache.get(key, &entry);
- if (!cacheStatus.isOK()) {
- return cacheStatus;
- }
- invariant(entry);
+Status PlanCache::get(const CanonicalQuery& query, CachedSolution** crOut) const {
+ PlanCacheKey key = computeKey(query);
+ verify(crOut);
- *crOut = new CachedSolution(key, *entry);
-
- return Status::OK();
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ PlanCacheEntry* entry;
+ Status cacheStatus = _cache.get(key, &entry);
+ if (!cacheStatus.isOK()) {
+ return cacheStatus;
}
+ invariant(entry);
- Status PlanCache::feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* feedback) {
- if (NULL == feedback) {
- return Status(ErrorCodes::BadValue, "feedback is NULL");
- }
- std::unique_ptr<PlanCacheEntryFeedback> autoFeedback(feedback);
- PlanCacheKey ck = computeKey(cq);
-
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- PlanCacheEntry* entry;
- Status cacheStatus = _cache.get(ck, &entry);
- if (!cacheStatus.isOK()) {
- return cacheStatus;
- }
- invariant(entry);
+ *crOut = new CachedSolution(key, *entry);
- // We store up to a constant number of feedback entries.
- if (entry->feedback.size() < size_t(internalQueryCacheFeedbacksStored)) {
- entry->feedback.push_back(autoFeedback.release());
- }
+ return Status::OK();
+}
- return Status::OK();
+Status PlanCache::feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* feedback) {
+ if (NULL == feedback) {
+ return Status(ErrorCodes::BadValue, "feedback is NULL");
}
+ std::unique_ptr<PlanCacheEntryFeedback> autoFeedback(feedback);
+ PlanCacheKey ck = computeKey(cq);
- Status PlanCache::remove(const CanonicalQuery& canonicalQuery) {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- return _cache.remove(computeKey(canonicalQuery));
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ PlanCacheEntry* entry;
+ Status cacheStatus = _cache.get(ck, &entry);
+ if (!cacheStatus.isOK()) {
+ return cacheStatus;
}
+ invariant(entry);
- void PlanCache::clear() {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- _cache.clear();
- _writeOperations.store(0);
+ // We store up to a constant number of feedback entries.
+ if (entry->feedback.size() < size_t(internalQueryCacheFeedbacksStored)) {
+ entry->feedback.push_back(autoFeedback.release());
}
- PlanCacheKey PlanCache::computeKey(const CanonicalQuery& cq) const {
- StringBuilder keyBuilder;
- encodeKeyForMatch(cq.root(), &keyBuilder);
- encodeKeyForSort(cq.getParsed().getSort(), &keyBuilder);
- encodeKeyForProj(cq.getParsed().getProj(), &keyBuilder);
- return keyBuilder.str();
- }
+ return Status::OK();
+}
- Status PlanCache::getEntry(const CanonicalQuery& query, PlanCacheEntry** entryOut) const {
- PlanCacheKey key = computeKey(query);
- verify(entryOut);
+Status PlanCache::remove(const CanonicalQuery& canonicalQuery) {
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ return _cache.remove(computeKey(canonicalQuery));
+}
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- PlanCacheEntry* entry;
- Status cacheStatus = _cache.get(key, &entry);
- if (!cacheStatus.isOK()) {
- return cacheStatus;
- }
- invariant(entry);
+void PlanCache::clear() {
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ _cache.clear();
+ _writeOperations.store(0);
+}
- *entryOut = entry->clone();
+PlanCacheKey PlanCache::computeKey(const CanonicalQuery& cq) const {
+ StringBuilder keyBuilder;
+ encodeKeyForMatch(cq.root(), &keyBuilder);
+ encodeKeyForSort(cq.getParsed().getSort(), &keyBuilder);
+ encodeKeyForProj(cq.getParsed().getProj(), &keyBuilder);
+ return keyBuilder.str();
+}
- return Status::OK();
+Status PlanCache::getEntry(const CanonicalQuery& query, PlanCacheEntry** entryOut) const {
+ PlanCacheKey key = computeKey(query);
+ verify(entryOut);
+
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ PlanCacheEntry* entry;
+ Status cacheStatus = _cache.get(key, &entry);
+ if (!cacheStatus.isOK()) {
+ return cacheStatus;
}
+ invariant(entry);
- std::vector<PlanCacheEntry*> PlanCache::getAllEntries() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- std::vector<PlanCacheEntry*> entries;
- typedef std::list< std::pair<PlanCacheKey, PlanCacheEntry*> >::const_iterator ConstIterator;
- for (ConstIterator i = _cache.begin(); i != _cache.end(); i++) {
- PlanCacheEntry* entry = i->second;
- entries.push_back(entry->clone());
- }
+ *entryOut = entry->clone();
- return entries;
- }
+ return Status::OK();
+}
- bool PlanCache::contains(const CanonicalQuery& cq) const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- return _cache.hasKey(computeKey(cq));
+std::vector<PlanCacheEntry*> PlanCache::getAllEntries() const {
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ std::vector<PlanCacheEntry*> entries;
+ typedef std::list<std::pair<PlanCacheKey, PlanCacheEntry*>>::const_iterator ConstIterator;
+ for (ConstIterator i = _cache.begin(); i != _cache.end(); i++) {
+ PlanCacheEntry* entry = i->second;
+ entries.push_back(entry->clone());
}
- size_t PlanCache::size() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
- return _cache.size();
- }
+ return entries;
+}
- void PlanCache::notifyOfWriteOp() {
- // It's fine to clear the cache multiple times if multiple threads
- // increment the counter to kPlanCacheMaxWriteOperations or greater.
- if (_writeOperations.addAndFetch(1) < internalQueryCacheWriteOpsBetweenFlush) {
- return;
- }
+bool PlanCache::contains(const CanonicalQuery& cq) const {
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ return _cache.hasKey(computeKey(cq));
+}
- LOG(1) << _ns << ": clearing collection plan cache - "
- << internalQueryCacheWriteOpsBetweenFlush
- << " write operations detected since last refresh.";
- clear();
- }
+size_t PlanCache::size() const {
+ stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex);
+ return _cache.size();
+}
- void PlanCache::notifyOfIndexEntries(const std::vector<IndexEntry>& indexEntries) {
- _indexabilityState.updateDiscriminators(indexEntries);
+void PlanCache::notifyOfWriteOp() {
+ // It's fine to clear the cache multiple times if multiple threads
+ // increment the counter to kPlanCacheMaxWriteOperations or greater.
+ if (_writeOperations.addAndFetch(1) < internalQueryCacheWriteOpsBetweenFlush) {
+ return;
}
+ LOG(1) << _ns << ": clearing collection plan cache - " << internalQueryCacheWriteOpsBetweenFlush
+ << " write operations detected since last refresh.";
+ clear();
+}
+
+void PlanCache::notifyOfIndexEntries(const std::vector<IndexEntry>& indexEntries) {
+ _indexabilityState.updateDiscriminators(indexEntries);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 3bc1e474365..974d827e31f 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -42,379 +42,381 @@
namespace mongo {
- // A PlanCacheKey is a string-ified version of a query's predicate/projection/sort.
- typedef std::string PlanCacheKey;
+// A PlanCacheKey is a string-ified version of a query's predicate/projection/sort.
+typedef std::string PlanCacheKey;
- struct PlanRankingDecision;
- struct QuerySolution;
- struct QuerySolutionNode;
+struct PlanRankingDecision;
+struct QuerySolution;
+struct QuerySolutionNode;
+
+/**
+ * When the CachedPlanStage runs a cached query, it can provide feedback to the cache. This
+ * feedback is available to anyone who retrieves that query in the future.
+ */
+struct PlanCacheEntryFeedback {
+ // How well did the cached plan perform?
+ std::unique_ptr<PlanStageStats> stats;
+
+ // The "goodness" score produced by the plan ranker
+ // corresponding to 'stats'.
+ double score;
+};
+
+// TODO: Replace with opaque type.
+typedef std::string PlanID;
+
+/**
+ * A PlanCacheIndexTree is the meaty component of the data
+ * stored in SolutionCacheData. It is a tree structure with
+ * index tags that indicates to the access planner which indices
+ * it should try to use.
+ *
+ * How a PlanCacheIndexTree is created:
+ * The query planner tags a match expression with indices. It
+ * then uses the tagged tree to create a PlanCacheIndexTree,
+ * using QueryPlanner::cacheDataFromTaggedTree. The PlanCacheIndexTree
+ * is isomorphic to the tagged match expression, and has matching
+ * index tags.
+ *
+ * How a PlanCacheIndexTree is used:
+ * When the query planner is planning from the cache, it uses
+ * the PlanCacheIndexTree retrieved from the cache in order to
+ * recreate index assignments. Specifically, a raw MatchExpression
+ * is tagged according to the index tags in the PlanCacheIndexTree.
+ * This is done by QueryPlanner::tagAccordingToCache.
+ */
+struct PlanCacheIndexTree {
+ PlanCacheIndexTree() : entry(nullptr), index_pos(0) {}
+
+ ~PlanCacheIndexTree() {
+ for (std::vector<PlanCacheIndexTree*>::const_iterator it = children.begin();
+ it != children.end();
+ ++it) {
+ delete *it;
+ }
+ }
/**
- * When the CachedPlanStage runs a cached query, it can provide feedback to the cache. This
- * feedback is available to anyone who retrieves that query in the future.
+ * Clone 'ie' and set 'this->entry' to be the clone.
*/
- struct PlanCacheEntryFeedback {
- // How well did the cached plan perform?
- std::unique_ptr<PlanStageStats> stats;
-
- // The "goodness" score produced by the plan ranker
- // corresponding to 'stats'.
- double score;
- };
+ void setIndexEntry(const IndexEntry& ie);
- // TODO: Replace with opaque type.
- typedef std::string PlanID;
+ /**
+ * Make a deep copy.
+ */
+ PlanCacheIndexTree* clone() const;
/**
- * A PlanCacheIndexTree is the meaty component of the data
- * stored in SolutionCacheData. It is a tree structure with
- * index tags that indicates to the access planner which indices
- * it should try to use.
- *
- * How a PlanCacheIndexTree is created:
- * The query planner tags a match expression with indices. It
- * then uses the tagged tree to create a PlanCacheIndexTree,
- * using QueryPlanner::cacheDataFromTaggedTree. The PlanCacheIndexTree
- * is isomorphic to the tagged match expression, and has matching
- * index tags.
- *
- * How a PlanCacheIndexTree is used:
- * When the query planner is planning from the cache, it uses
- * the PlanCacheIndexTree retrieved from the cache in order to
- * recreate index assignments. Specifically, a raw MatchExpression
- * is tagged according to the index tags in the PlanCacheIndexTree.
- * This is done by QueryPlanner::tagAccordingToCache.
+ * For debugging.
*/
- struct PlanCacheIndexTree {
- PlanCacheIndexTree() : entry(nullptr), index_pos(0) { }
-
- ~PlanCacheIndexTree() {
- for (std::vector<PlanCacheIndexTree*>::const_iterator it = children.begin();
- it != children.end(); ++it) {
- delete *it;
- }
- }
+ std::string toString(int indents = 0) const;
+
+ // Children owned here.
+ std::vector<PlanCacheIndexTree*> children;
+
+ // Owned here.
+ std::unique_ptr<IndexEntry> entry;
+
+ size_t index_pos;
+};
- /**
- * Clone 'ie' and set 'this->entry' to be the clone.
- */
- void setIndexEntry(const IndexEntry& ie);
+/**
+ * Data stored inside a QuerySolution which can subsequently be
+ * used to create a cache entry. When this data is retrieved
+ * from the cache, it is sufficient to reconstruct the original
+ * QuerySolution.
+ */
+struct SolutionCacheData {
+ SolutionCacheData()
+ : tree(nullptr),
+ solnType(USE_INDEX_TAGS_SOLN),
+ wholeIXSolnDir(1),
+ indexFilterApplied(false) {}
+
+ // Make a deep copy.
+ SolutionCacheData* clone() const;
+
+ // For debugging.
+ std::string toString() const;
+
+ // Owned here. If 'wholeIXSoln' is false, then 'tree'
+ // can be used to tag an isomorphic match expression. If 'wholeIXSoln'
+ // is true, then 'tree' is used to store the relevant IndexEntry.
+ // If 'collscanSoln' is true, then 'tree' should be NULL.
+ std::unique_ptr<PlanCacheIndexTree> tree;
+
+ enum SolutionType {
+ // Indicates that the plan should use
+ // the index as a proxy for a collection
+ // scan (e.g. using index to provide sort).
+ WHOLE_IXSCAN_SOLN,
+
+ // The cached plan is a collection scan.
+ COLLSCAN_SOLN,
+
+ // Build the solution by using 'tree'
+ // to tag the match expression.
+ USE_INDEX_TAGS_SOLN
+ } solnType;
+
+ // The direction of the index scan used as
+ // a proxy for a collection scan. Used only
+ // for WHOLE_IXSCAN_SOLN.
+ int wholeIXSolnDir;
+
+ // True if index filter was applied.
+ bool indexFilterApplied;
+};
+
+class PlanCacheEntry;
+
+/**
+ * Information returned from a get(...) query.
+ */
+class CachedSolution {
+private:
+ MONGO_DISALLOW_COPYING(CachedSolution);
- /**
- * Make a deep copy.
- */
- PlanCacheIndexTree* clone() const;
+public:
+ CachedSolution(const PlanCacheKey& key, const PlanCacheEntry& entry);
+ ~CachedSolution();
- /**
- * For debugging.
- */
- std::string toString(int indents = 0) const;
+ // Owned here.
+ std::vector<SolutionCacheData*> plannerData;
- // Children owned here.
- std::vector<PlanCacheIndexTree*> children;
+ // Key used to provide feedback on the entry.
+ PlanCacheKey key;
- // Owned here.
- std::unique_ptr<IndexEntry> entry;
+ // For debugging.
+ std::string toString() const;
- size_t index_pos;
- };
+ // We are extracting just enough information from the canonical
+ // query. We could clone the canonical query but the following
+ // items are all that is displayed to the user.
+ BSONObj query;
+ BSONObj sort;
+ BSONObj projection;
+ // The number of work cycles taken to decide on a winning plan when the plan was first
+ // cached.
+ size_t decisionWorks;
+};
+
+/**
+ * Used by the cache to track entries and their performance over time.
+ * Also used by the plan cache commands to display plan cache state.
+ */
+class PlanCacheEntry {
+private:
+ MONGO_DISALLOW_COPYING(PlanCacheEntry);
+
+public:
/**
- * Data stored inside a QuerySolution which can subsequently be
- * used to create a cache entry. When this data is retrieved
- * from the cache, it is sufficient to reconstruct the original
- * QuerySolution.
+ * Create a new PlanCacheEntry.
+ * Grabs any planner-specific data required from the solutions.
+ * Takes ownership of the PlanRankingDecision that placed the plan in the cache.
*/
- struct SolutionCacheData {
- SolutionCacheData() :
- tree(nullptr),
- solnType(USE_INDEX_TAGS_SOLN),
- wholeIXSolnDir(1),
- indexFilterApplied(false) {
- }
+ PlanCacheEntry(const std::vector<QuerySolution*>& solutions, PlanRankingDecision* why);
+
+ ~PlanCacheEntry();
+
+ /**
+ * Make a deep copy.
+ */
+ PlanCacheEntry* clone() const;
+
+ // For debugging.
+ std::string toString() const;
+
+ //
+ // Planner data
+ //
+
+ // Data provided to the planner to allow it to recreate the solutions this entry
+ // represents. Each SolutionCacheData is fully owned here, so in order to return
+ // it from the cache a deep copy is made and returned inside CachedSolution.
+ std::vector<SolutionCacheData*> plannerData;
+
+ // TODO: Do we really want to just hold a copy of the CanonicalQuery? For now we just
+ // extract the data we need.
+ //
+ // Used by the plan cache commands to display an example query
+ // of the appropriate shape.
+ BSONObj query;
+ BSONObj sort;
+ BSONObj projection;
+
+ //
+ // Performance stats
+ //
+
+ // Information that went into picking the winning plan and also why
+ // the other plans lost.
+ std::unique_ptr<PlanRankingDecision> decision;
+
+ // Annotations from cached runs. The CachedPlanStage provides these stats about its
+ // runs when they complete.
+ std::vector<PlanCacheEntryFeedback*> feedback;
+};
+
+/**
+ * Caches the best solution to a query. Aside from the (CanonicalQuery -> QuerySolution)
+ * mapping, the cache contains information on why that mapping was made and statistics on the
+ * cache entry's actual performance on subsequent runs.
+ *
+ */
+class PlanCache {
+private:
+ MONGO_DISALLOW_COPYING(PlanCache);
+
+public:
+ /**
+ * We don't want to cache every possible query. This function
+ * encapsulates the criteria for what makes a canonical query
+ * suitable for lookup/inclusion in the cache.
+ */
+ static bool shouldCacheQuery(const CanonicalQuery& query);
+
+ /**
+ * If omitted, namespace set to empty string.
+ */
+ PlanCache();
+
+ PlanCache(const std::string& ns);
- // Make a deep copy.
- SolutionCacheData* clone() const;
+ ~PlanCache();
+
+ /**
+ * Record solutions for query. Best plan is first element in list.
+ * Each query in the cache will have more than 1 plan because we only
+ * add queries which are considered by the multi plan runner (which happens
+ * only when the query planner generates multiple candidate plans).
+ *
+ * Takes ownership of 'why'.
+ *
+ * If the mapping was added successfully, returns Status::OK().
+ * If the mapping already existed or some other error occurred, returns another Status.
+ */
+ Status add(const CanonicalQuery& query,
+ const std::vector<QuerySolution*>& solns,
+ PlanRankingDecision* why);
- // For debugging.
- std::string toString() const;
+ /**
+ * Look up the cached data access for the provided 'query'. Used by the query planner
+ * to shortcut planning.
+ *
+ * If there is no entry in the cache for the 'query', returns an error Status.
+ *
+ * If there is an entry in the cache, populates 'crOut' and returns Status::OK(). Caller
+ * owns '*crOut'.
+ */
+ Status get(const CanonicalQuery& query, CachedSolution** crOut) const;
- // Owned here. If 'wholeIXSoln' is false, then 'tree'
- // can be used to tag an isomorphic match expression. If 'wholeIXSoln'
- // is true, then 'tree' is used to store the relevant IndexEntry.
- // If 'collscanSoln' is true, then 'tree' should be NULL.
- std::unique_ptr<PlanCacheIndexTree> tree;
+ /**
+ * When the CachedPlanStage runs a plan out of the cache, we want to record data about the
+ * plan's performance. The CachedPlanStage calls feedback(...) after executing the cached
+ * plan for a trial period in order to do this.
+ *
+ * Cache takes ownership of 'feedback'.
+ *
+ * If the entry corresponding to 'cq' isn't in the cache anymore, the feedback is ignored
+ * and an error Status is returned.
+ *
+ * If the entry corresponding to 'cq' still exists, 'feedback' is added to the run
+ * statistics about the plan. Status::OK() is returned.
+ */
+ Status feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* feedback);
- enum SolutionType {
- // Indicates that the plan should use
- // the index as a proxy for a collection
- // scan (e.g. using index to provide sort).
- WHOLE_IXSCAN_SOLN,
+ /**
+ * Remove the entry corresponding to 'ck' from the cache. Returns Status::OK() if the plan
+ * was present and removed and an error status otherwise.
+ */
+ Status remove(const CanonicalQuery& canonicalQuery);
- // The cached plan is a collection scan.
- COLLSCAN_SOLN,
+ /**
+ * Remove *all* cached plans. Does not clear index information.
+ */
+ void clear();
- // Build the solution by using 'tree'
- // to tag the match expression.
- USE_INDEX_TAGS_SOLN
- } solnType;
+ /**
+ * Get the cache key corresponding to the given canonical query. The query need not already
+ * be cached.
+ *
+ * This is provided in the public API simply as a convenience for consumers who need some
+ * description of query shape (e.g. index filters).
+ *
+ * Callers must hold the collection lock when calling this method.
+ */
+ PlanCacheKey computeKey(const CanonicalQuery&) const;
- // The direction of the index scan used as
- // a proxy for a collection scan. Used only
- // for WHOLE_IXSCAN_SOLN.
- int wholeIXSolnDir;
+ /**
+ * Returns a copy of a cache entry.
+ * Used by planCacheListPlans to display plan details.
+ *
+ * If there is no entry in the cache for the 'query', returns an error Status.
+ *
+ * If there is an entry in the cache, populates 'entryOut' and returns Status::OK(). Caller
+ * owns '*entryOut'.
+ */
+ Status getEntry(const CanonicalQuery& cq, PlanCacheEntry** entryOut) const;
- // True if index filter was applied.
- bool indexFilterApplied;
- };
+ /**
+ * Returns a vector of all cache entries.
+ * Caller owns the result vector and is responsible for cleaning up
+ * the cache entry copies.
+ * Used by planCacheListQueryShapes and index_filter_commands_test.cpp.
+ */
+ std::vector<PlanCacheEntry*> getAllEntries() const;
- class PlanCacheEntry;
+ /**
+ * Returns true if there is an entry in the cache for the 'query'.
+ * Internally calls hasKey() on the LRU cache.
+ */
+ bool contains(const CanonicalQuery& cq) const;
/**
- * Information returned from a get(...) query.
+ * Returns number of entries in cache.
+ * Used for testing.
*/
- class CachedSolution {
- private:
- MONGO_DISALLOW_COPYING(CachedSolution);
- public:
- CachedSolution(const PlanCacheKey& key, const PlanCacheEntry& entry);
- ~CachedSolution();
-
- // Owned here.
- std::vector<SolutionCacheData*> plannerData;
-
- // Key used to provide feedback on the entry.
- PlanCacheKey key;
-
- // For debugging.
- std::string toString() const;
-
- // We are extracting just enough information from the canonical
- // query. We could clone the canonical query but the following
- // items are all that is displayed to the user.
- BSONObj query;
- BSONObj sort;
- BSONObj projection;
-
- // The number of work cycles taken to decide on a winning plan when the plan was first
- // cached.
- size_t decisionWorks;
- };
+ size_t size() const;
/**
- * Used by the cache to track entries and their performance over time.
- * Also used by the plan cache commands to display plan cache state.
+ * You must notify the cache if you are doing writes, as query plan utility will change.
+ * Cache is flushed after every 1000 notifications.
*/
- class PlanCacheEntry {
- private:
- MONGO_DISALLOW_COPYING(PlanCacheEntry);
- public:
- /**
- * Create a new PlanCacheEntry.
- * Grabs any planner-specific data required from the solutions.
- * Takes ownership of the PlanRankingDecision that placed the plan in the cache.
- */
- PlanCacheEntry(const std::vector<QuerySolution*>& solutions,
- PlanRankingDecision* why);
-
- ~PlanCacheEntry();
-
- /**
- * Make a deep copy.
- */
- PlanCacheEntry* clone() const;
-
- // For debugging.
- std::string toString() const;
-
- //
- // Planner data
- //
-
- // Data provided to the planner to allow it to recreate the solutions this entry
- // represents. Each SolutionCacheData is fully owned here, so in order to return
- // it from the cache a deep copy is made and returned inside CachedSolution.
- std::vector<SolutionCacheData*> plannerData;
-
- // TODO: Do we really want to just hold a copy of the CanonicalQuery? For now we just
- // extract the data we need.
- //
- // Used by the plan cache commands to display an example query
- // of the appropriate shape.
- BSONObj query;
- BSONObj sort;
- BSONObj projection;
-
- //
- // Performance stats
- //
-
- // Information that went into picking the winning plan and also why
- // the other plans lost.
- std::unique_ptr<PlanRankingDecision> decision;
-
- // Annotations from cached runs. The CachedPlanStage provides these stats about its
- // runs when they complete.
- std::vector<PlanCacheEntryFeedback*> feedback;
- };
+ void notifyOfWriteOp();
/**
- * Caches the best solution to a query. Aside from the (CanonicalQuery -> QuerySolution)
- * mapping, the cache contains information on why that mapping was made and statistics on the
- * cache entry's actual performance on subsequent runs.
+ * Updates internal state kept about the collection's indexes. Must be called when the set
+ * of indexes on the associated collection have changed.
*
+ * Callers must hold the collection lock in exclusive mode when calling this method.
*/
- class PlanCache {
- private:
- MONGO_DISALLOW_COPYING(PlanCache);
- public:
- /**
- * We don't want to cache every possible query. This function
- * encapsulates the criteria for what makes a canonical query
- * suitable for lookup/inclusion in the cache.
- */
- static bool shouldCacheQuery(const CanonicalQuery& query);
-
- /**
- * If omitted, namespace set to empty string.
- */
- PlanCache();
-
- PlanCache(const std::string& ns);
-
- ~PlanCache();
-
- /**
- * Record solutions for query. Best plan is first element in list.
- * Each query in the cache will have more than 1 plan because we only
- * add queries which are considered by the multi plan runner (which happens
- * only when the query planner generates multiple candidate plans).
- *
- * Takes ownership of 'why'.
- *
- * If the mapping was added successfully, returns Status::OK().
- * If the mapping already existed or some other error occurred, returns another Status.
- */
- Status add(const CanonicalQuery& query,
- const std::vector<QuerySolution*>& solns,
- PlanRankingDecision* why);
-
- /**
- * Look up the cached data access for the provided 'query'. Used by the query planner
- * to shortcut planning.
- *
- * If there is no entry in the cache for the 'query', returns an error Status.
- *
- * If there is an entry in the cache, populates 'crOut' and returns Status::OK(). Caller
- * owns '*crOut'.
- */
- Status get(const CanonicalQuery& query, CachedSolution** crOut) const;
-
- /**
- * When the CachedPlanStage runs a plan out of the cache, we want to record data about the
- * plan's performance. The CachedPlanStage calls feedback(...) after executing the cached
- * plan for a trial period in order to do this.
- *
- * Cache takes ownership of 'feedback'.
- *
- * If the entry corresponding to 'cq' isn't in the cache anymore, the feedback is ignored
- * and an error Status is returned.
- *
- * If the entry corresponding to 'cq' still exists, 'feedback' is added to the run
- * statistics about the plan. Status::OK() is returned.
- */
- Status feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* feedback);
-
- /**
- * Remove the entry corresponding to 'ck' from the cache. Returns Status::OK() if the plan
- * was present and removed and an error status otherwise.
- */
- Status remove(const CanonicalQuery& canonicalQuery);
-
- /**
- * Remove *all* cached plans. Does not clear index information.
- */
- void clear();
-
- /**
- * Get the cache key corresponding to the given canonical query. The query need not already
- * be cached.
- *
- * This is provided in the public API simply as a convenience for consumers who need some
- * description of query shape (e.g. index filters).
- *
- * Callers must hold the collection lock when calling this method.
- */
- PlanCacheKey computeKey(const CanonicalQuery&) const;
-
- /**
- * Returns a copy of a cache entry.
- * Used by planCacheListPlans to display plan details.
- *
- * If there is no entry in the cache for the 'query', returns an error Status.
- *
- * If there is an entry in the cache, populates 'entryOut' and returns Status::OK(). Caller
- * owns '*entryOut'.
- */
- Status getEntry(const CanonicalQuery& cq, PlanCacheEntry** entryOut) const;
-
- /**
- * Returns a vector of all cache entries.
- * Caller owns the result vector and is responsible for cleaning up
- * the cache entry copies.
- * Used by planCacheListQueryShapes and index_filter_commands_test.cpp.
- */
- std::vector<PlanCacheEntry*> getAllEntries() const;
-
- /**
- * Returns true if there is an entry in the cache for the 'query'.
- * Internally calls hasKey() on the LRU cache.
- */
- bool contains(const CanonicalQuery& cq) const;
-
- /**
- * Returns number of entries in cache.
- * Used for testing.
- */
- size_t size() const;
-
- /**
- * You must notify the cache if you are doing writes, as query plan utility will change.
- * Cache is flushed after every 1000 notifications.
- */
- void notifyOfWriteOp();
-
- /**
- * Updates internal state kept about the collection's indexes. Must be called when the set
- * of indexes on the associated collection have changed.
- *
- * Callers must hold the collection lock in exclusive mode when calling this method.
- */
- void notifyOfIndexEntries(const std::vector<IndexEntry>& indexEntries);
-
- private:
- void encodeKeyForMatch(const MatchExpression* tree, StringBuilder* keyBuilder) const;
- void encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) const;
- void encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuilder) const;
-
- LRUKeyValue<PlanCacheKey, PlanCacheEntry> _cache;
-
- // Protects _cache.
- mutable stdx::mutex _cacheMutex;
-
- // Counter for write notifications since initialization or last clear() invocation. Starts
- // at 0.
- AtomicInt32 _writeOperations;
-
- // Full namespace of collection.
- std::string _ns;
-
- // Holds computed information about the collection's indexes. Used for generating plan
- // cache keys.
- //
- // Concurrent access is synchronized by the collection lock. Multiple concurrent readers
- // are allowed.
- PlanCacheIndexabilityState _indexabilityState;
- };
+ void notifyOfIndexEntries(const std::vector<IndexEntry>& indexEntries);
+
+private:
+ void encodeKeyForMatch(const MatchExpression* tree, StringBuilder* keyBuilder) const;
+ void encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) const;
+ void encodeKeyForProj(const BSONObj& projObj, StringBuilder* keyBuilder) const;
+
+ LRUKeyValue<PlanCacheKey, PlanCacheEntry> _cache;
+
+ // Protects _cache.
+ mutable stdx::mutex _cacheMutex;
+
+ // Counter for write notifications since initialization or last clear() invocation. Starts
+ // at 0.
+ AtomicInt32 _writeOperations;
+
+ // Full namespace of collection.
+ std::string _ns;
+
+ // Holds computed information about the collection's indexes. Used for generating plan
+ // cache keys.
+ //
+ // Concurrent access is synchronized by the collection lock. Multiple concurrent readers
+ // are allowed.
+ PlanCacheIndexabilityState _indexabilityState;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_indexability.cpp b/src/mongo/db/query/plan_cache_indexability.cpp
index 24e60bbd7b7..a6e0fa32e3f 100644
--- a/src/mongo/db/query/plan_cache_indexability.cpp
+++ b/src/mongo/db/query/plan_cache_indexability.cpp
@@ -41,67 +41,61 @@
namespace mongo {
- void PlanCacheIndexabilityState::processSparseIndex(const BSONObj& keyPattern) {
- for (BSONElement elem : keyPattern) {
- _pathDiscriminatorsMap[elem.fieldNameStringData()].push_back(
- [] (const MatchExpression* queryExpr) {
- if (queryExpr->matchType() == MatchExpression::EQ) {
- const auto* queryExprEquality =
- static_cast<const EqualityMatchExpression*>(queryExpr);
- return !queryExprEquality->getData().isNull();
- }
- else if (queryExpr->matchType() == MatchExpression::MATCH_IN) {
- const auto* queryExprIn =
- static_cast<const InMatchExpression*>(queryExpr);
- return !queryExprIn->getData().hasNull();
- }
- else {
- return true;
- }
+void PlanCacheIndexabilityState::processSparseIndex(const BSONObj& keyPattern) {
+ for (BSONElement elem : keyPattern) {
+ _pathDiscriminatorsMap[elem.fieldNameStringData()].push_back(
+ [](const MatchExpression* queryExpr) {
+ if (queryExpr->matchType() == MatchExpression::EQ) {
+ const auto* queryExprEquality =
+ static_cast<const EqualityMatchExpression*>(queryExpr);
+ return !queryExprEquality->getData().isNull();
+ } else if (queryExpr->matchType() == MatchExpression::MATCH_IN) {
+ const auto* queryExprIn = static_cast<const InMatchExpression*>(queryExpr);
+ return !queryExprIn->getData().hasNull();
+ } else {
+ return true;
}
- );
- }
+ });
}
+}
- void PlanCacheIndexabilityState::processPartialIndex(const MatchExpression* filterExpr) {
- invariant(filterExpr);
- for (size_t i = 0; i < filterExpr->numChildren(); ++i) {
- processPartialIndex(filterExpr->getChild(i));
- }
- if (!filterExpr->isLogical()) {
- _pathDiscriminatorsMap[filterExpr->path()].push_back(
- [filterExpr] (const MatchExpression* queryExpr) {
- return expression::isSubsetOf(queryExpr, filterExpr);
- }
- );
- }
+void PlanCacheIndexabilityState::processPartialIndex(const MatchExpression* filterExpr) {
+ invariant(filterExpr);
+ for (size_t i = 0; i < filterExpr->numChildren(); ++i) {
+ processPartialIndex(filterExpr->getChild(i));
}
+ if (!filterExpr->isLogical()) {
+ _pathDiscriminatorsMap[filterExpr->path()].push_back(
+ [filterExpr](const MatchExpression* queryExpr) {
+ return expression::isSubsetOf(queryExpr, filterExpr);
+ });
+ }
+}
namespace {
- const IndexabilityDiscriminators emptyDiscriminators;
+const IndexabilityDiscriminators emptyDiscriminators;
} // namespace
- const IndexabilityDiscriminators& PlanCacheIndexabilityState::getDiscriminators(
- StringData path) const {
- PathDiscriminatorsMap::const_iterator it = _pathDiscriminatorsMap.find(path);
- if (it == _pathDiscriminatorsMap.end()) {
- return emptyDiscriminators;
- }
- return it->second;
+const IndexabilityDiscriminators& PlanCacheIndexabilityState::getDiscriminators(
+ StringData path) const {
+ PathDiscriminatorsMap::const_iterator it = _pathDiscriminatorsMap.find(path);
+ if (it == _pathDiscriminatorsMap.end()) {
+ return emptyDiscriminators;
}
+ return it->second;
+}
- void PlanCacheIndexabilityState::updateDiscriminators(
- const std::vector<IndexEntry>& indexEntries) {
- _pathDiscriminatorsMap = PathDiscriminatorsMap();
+void PlanCacheIndexabilityState::updateDiscriminators(const std::vector<IndexEntry>& indexEntries) {
+ _pathDiscriminatorsMap = PathDiscriminatorsMap();
- for (const IndexEntry& idx : indexEntries) {
- if (idx.sparse) {
- processSparseIndex(idx.keyPattern);
- }
- if (idx.filterExpr) {
- processPartialIndex(idx.filterExpr);
- }
+ for (const IndexEntry& idx : indexEntries) {
+ if (idx.sparse) {
+ processSparseIndex(idx.keyPattern);
+ }
+ if (idx.filterExpr) {
+ processPartialIndex(idx.filterExpr);
}
}
+}
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_indexability.h b/src/mongo/db/query/plan_cache_indexability.h
index 6d4d4b30012..03278b06929 100644
--- a/src/mongo/db/query/plan_cache_indexability.h
+++ b/src/mongo/db/query/plan_cache_indexability.h
@@ -36,64 +36,65 @@
namespace mongo {
- class BSONObj;
- class MatchExpression;
- struct IndexEntry;
+class BSONObj;
+class MatchExpression;
+struct IndexEntry;
- using IndexabilityDiscriminator = stdx::function<bool(const MatchExpression* me)>;
- using IndexabilityDiscriminators = std::vector<IndexabilityDiscriminator>;
+using IndexabilityDiscriminator = stdx::function<bool(const MatchExpression* me)>;
+using IndexabilityDiscriminators = std::vector<IndexabilityDiscriminator>;
+
+/**
+ * PlanCacheIndexabilityState holds a set of "indexability discriminators" for certain paths.
+ * An indexability discriminator is a binary predicate function, used to classify match
+ * expressions based on the data values in the expression.
+ */
+class PlanCacheIndexabilityState {
+ MONGO_DISALLOW_COPYING(PlanCacheIndexabilityState);
+
+public:
+ PlanCacheIndexabilityState() = default;
/**
- * PlanCacheIndexabilityState holds a set of "indexability discriminators" for certain paths.
- * An indexability discriminator is a binary predicate function, used to classify match
- * expressions based on the data values in the expression.
+ * Gets the set of discriminators associated with 'path'. Returns an empty set if no
+ * discriminators are registered for 'path'.
+ *
+ * The object returned by reference is valid until the next call to updateDiscriminators()
+ * or until destruction of 'this', whichever is first.
*/
- class PlanCacheIndexabilityState {
- MONGO_DISALLOW_COPYING(PlanCacheIndexabilityState);
- public:
- PlanCacheIndexabilityState() = default;
-
- /**
- * Gets the set of discriminators associated with 'path'. Returns an empty set if no
- * discriminators are registered for 'path'.
- *
- * The object returned by reference is valid until the next call to updateDiscriminators()
- * or until destruction of 'this', whichever is first.
- */
- const IndexabilityDiscriminators& getDiscriminators(StringData path) const;
+ const IndexabilityDiscriminators& getDiscriminators(StringData path) const;
- /**
- * Clears discriminators for all paths, and regenerate them from 'indexEntries'.
- */
- void updateDiscriminators(const std::vector<IndexEntry>& indexEntries);
+ /**
+ * Clears discriminators for all paths, and regenerate them from 'indexEntries'.
+ */
+ void updateDiscriminators(const std::vector<IndexEntry>& indexEntries);
- private:
- /**
- * Adds sparse index discriminators for the sparse index with the given key pattern to
- * '_pathDiscriminatorsMap'.
- *
- * A sparse index discriminator distinguishes equality matches to null from other expression
- * types. For example, this allows the predicate {a: 1} to be considered of a different
- * shape from the predicate {a: null}, if there is a sparse index defined with "a" as an
- * element of the key pattern. The former predicate is compatibile with this index, but the
- * latter is not compatible.
- */
- void processSparseIndex(const BSONObj& keyPattern);
+private:
+ /**
+ * Adds sparse index discriminators for the sparse index with the given key pattern to
+ * '_pathDiscriminatorsMap'.
+ *
+ * A sparse index discriminator distinguishes equality matches to null from other expression
+ * types. For example, this allows the predicate {a: 1} to be considered of a different
+ * shape from the predicate {a: null}, if there is a sparse index defined with "a" as an
+ * element of the key pattern. The former predicate is compatibile with this index, but the
+ * latter is not compatible.
+ */
+ void processSparseIndex(const BSONObj& keyPattern);
- /**
- * Adds partial index discriminators for the partial index with the given filter expression
- * to '_pathDiscriminatorsMap'.
- *
- * A partial index discriminator distinguishes expressions that match a given partial index
- * predicate from expressions that don't match the partial index predicate. For example,
- * this allows the predicate {a: {$gt: 5}} to be considered a different shape than the
- * predicate {a: {$gt: -5}}, if there is a partial index defined with document filter {a:
- * {$gt: 0}}. The former is compatible with this index, but the latter is not compatible.
- */
- void processPartialIndex(const MatchExpression* filterExpr);
+ /**
+ * Adds partial index discriminators for the partial index with the given filter expression
+ * to '_pathDiscriminatorsMap'.
+ *
+ * A partial index discriminator distinguishes expressions that match a given partial index
+ * predicate from expressions that don't match the partial index predicate. For example,
+ * this allows the predicate {a: {$gt: 5}} to be considered a different shape than the
+ * predicate {a: {$gt: -5}}, if there is a partial index defined with document filter {a:
+ * {$gt: 0}}. The former is compatible with this index, but the latter is not compatible.
+ */
+ void processPartialIndex(const MatchExpression* filterExpr);
- using PathDiscriminatorsMap = StringMap<IndexabilityDiscriminators>;
- PathDiscriminatorsMap _pathDiscriminatorsMap;
- };
+ using PathDiscriminatorsMap = StringMap<IndexabilityDiscriminators>;
+ PathDiscriminatorsMap _pathDiscriminatorsMap;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_indexability_test.cpp b/src/mongo/db/query/plan_cache_indexability_test.cpp
index 01abda9e525..a5a0ca38e89 100644
--- a/src/mongo/db/query/plan_cache_indexability_test.cpp
+++ b/src/mongo/db/query/plan_cache_indexability_test.cpp
@@ -34,181 +34,179 @@
namespace mongo {
namespace {
- std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj) {
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
- if (!status.isOK()) {
- FAIL(str::stream() << "failed to parse query: " << obj.toString()
- << ". Reason: " << status.getStatus().toString());
- }
- return std::unique_ptr<MatchExpression>(status.getValue());
+std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj) {
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
+ if (!status.isOK()) {
+ FAIL(str::stream() << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString());
}
-
- // Test sparse index discriminators for a simple sparse index.
- TEST(PlanCacheIndexabilityTest, SparseIndexSimple) {
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1),
- false, // multikey
- true, // sparse
- false, // unique
- "", // name
- nullptr, // filterExpr
- BSONObj())});
-
+ return std::unique_ptr<MatchExpression>(status.getValue());
+}
+
+// Test sparse index discriminators for a simple sparse index.
+TEST(PlanCacheIndexabilityTest, SparseIndexSimple) {
+ PlanCacheIndexabilityState state;
+ state.updateDiscriminators({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ true, // sparse
+ false, // unique
+ "", // name
+ nullptr, // filterExpr
+ BSONObj())});
+
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("a");
+ ASSERT_EQ(1U, discriminators.size());
+
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << 1)).get()));
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("a" << BSONNULL)).get()));
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(1)))).get()));
+ ASSERT_EQ(false,
+ disc(parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(BSONNULL)))).get()));
+}
+
+// Test sparse index discriminators for a compound sparse index.
+TEST(PlanCacheIndexabilityTest, SparseIndexCompound) {
+ PlanCacheIndexabilityState state;
+ state.updateDiscriminators({IndexEntry(BSON("a" << 1 << "b" << 1),
+ false, // multikey
+ true, // sparse
+ false, // unique
+ "", // name
+ nullptr, // filterExpr
+ BSONObj())});
+
+ {
const IndexabilityDiscriminators& discriminators = state.getDiscriminators("a");
ASSERT_EQ(1U, discriminators.size());
const IndexabilityDiscriminator& disc = discriminators[0];
ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << 1)).get()));
ASSERT_EQ(false, disc(parseMatchExpression(BSON("a" << BSONNULL)).get()));
- ASSERT_EQ(true,
- disc(parseMatchExpression(BSON("a" << BSON("$in" << BSON_ARRAY(1)))).get()));
- ASSERT_EQ(false,
- disc(parseMatchExpression(BSON("a" <<
- BSON("$in" << BSON_ARRAY(BSONNULL)))).get()));
}
- // Test sparse index discriminators for a compound sparse index.
- TEST(PlanCacheIndexabilityTest, SparseIndexCompound) {
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1 << "b" << 1),
- false, // multikey
- true, // sparse
- false, // unique
- "", // name
- nullptr, // filterExpr
- BSONObj())});
-
- {
- const IndexabilityDiscriminators& discriminators = state.getDiscriminators("a");
- ASSERT_EQ(1U, discriminators.size());
-
- const IndexabilityDiscriminator& disc = discriminators[0];
- ASSERT_EQ(true, disc(parseMatchExpression(BSON("a" << 1)).get()));
- ASSERT_EQ(false, disc(parseMatchExpression(BSON("a" << BSONNULL)).get()));
- }
-
- {
- const IndexabilityDiscriminators& discriminators = state.getDiscriminators("b");
- ASSERT_EQ(1U, discriminators.size());
-
- const IndexabilityDiscriminator& disc = discriminators[0];
- ASSERT_EQ(true, disc(parseMatchExpression(BSON("b" << 1)).get()));
- ASSERT_EQ(false, disc(parseMatchExpression(BSON("b" << BSONNULL)).get()));
- }
- }
-
- // Test partial index discriminators for an index with a simple filter.
- TEST(PlanCacheIndexabilityTest, PartialIndexSimple) {
- BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
- std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- filterExpr.get(),
- BSONObj())});
-
- const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
+ {
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("b");
ASSERT_EQ(1U, discriminators.size());
const IndexabilityDiscriminator& disc = discriminators[0];
- ASSERT_EQ(false, disc(parseMatchExpression(BSON("f" << BSON("$gt" << -5))).get()));
- ASSERT_EQ(true, disc(parseMatchExpression(BSON("f" << BSON("$gt" << 5))).get()));
-
- ASSERT(state.getDiscriminators("a").empty());
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("b" << 1)).get()));
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("b" << BSONNULL)).get()));
}
-
- // Test partial index discriminators for an index where the filter expression is an AND.
- TEST(PlanCacheIndexabilityTest, PartialIndexAnd) {
- BSONObj filterObj = BSON("f" << 1 << "g" << 1);
- std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- filterExpr.get(),
- BSONObj())});
-
- {
- const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
- ASSERT_EQ(1U, discriminators.size());
-
- const IndexabilityDiscriminator& disc = discriminators[0];
- ASSERT_EQ(false, disc(parseMatchExpression(BSON("f" << 0)).get()));
- ASSERT_EQ(true, disc(parseMatchExpression(BSON("f" << 1)).get()));
- }
-
- {
- const IndexabilityDiscriminators& discriminators = state.getDiscriminators("g");
- ASSERT_EQ(1U, discriminators.size());
-
- const IndexabilityDiscriminator& disc = discriminators[0];
- ASSERT_EQ(false, disc(parseMatchExpression(BSON("g" << 0)).get()));
- ASSERT_EQ(true, disc(parseMatchExpression(BSON("g" << 1)).get()));
- }
-
- ASSERT(state.getDiscriminators("a").empty());
- }
-
- // Test partial index discriminators where there are multiple partial indexes.
- TEST(PlanCacheIndexabilityTest, MultiplePartialIndexes) {
- BSONObj filterObj1 = BSON("f" << 1);
- std::unique_ptr<MatchExpression> filterExpr1(parseMatchExpression(filterObj1));
-
- BSONObj filterObj2 = BSON("f" << 2);
- std::unique_ptr<MatchExpression> filterExpr2(parseMatchExpression(filterObj2));
-
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- filterExpr1.get(),
- BSONObj()),
- IndexEntry(BSON("b" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- filterExpr2.get(),
- BSONObj())});
-
+}
+
+// Test partial index discriminators for an index with a simple filter.
+TEST(PlanCacheIndexabilityTest, PartialIndexSimple) {
+ BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
+ std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
+ PlanCacheIndexabilityState state;
+ state.updateDiscriminators({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ filterExpr.get(),
+ BSONObj())});
+
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
+ ASSERT_EQ(1U, discriminators.size());
+
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("f" << BSON("$gt" << -5))).get()));
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("f" << BSON("$gt" << 5))).get()));
+
+ ASSERT(state.getDiscriminators("a").empty());
+}
+
+// Test partial index discriminators for an index where the filter expression is an AND.
+TEST(PlanCacheIndexabilityTest, PartialIndexAnd) {
+ BSONObj filterObj = BSON("f" << 1 << "g" << 1);
+ std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
+ PlanCacheIndexabilityState state;
+ state.updateDiscriminators({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ filterExpr.get(),
+ BSONObj())});
+
+ {
const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
- ASSERT_EQ(2U, discriminators.size());
-
- const IndexabilityDiscriminator& disc1 = discriminators[0];
- const IndexabilityDiscriminator& disc2 = discriminators[1];
-
- ASSERT_EQ(false, disc1(parseMatchExpression(BSON("f" << 0)).get()));
- ASSERT_EQ(false, disc1(parseMatchExpression(BSON("f" << 0)).get()));
+ ASSERT_EQ(1U, discriminators.size());
- ASSERT_NOT_EQUALS(disc1(parseMatchExpression(BSON("f" << 1)).get()),
- disc2(parseMatchExpression(BSON("f" << 1)).get()));
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("f" << 0)).get()));
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("f" << 1)).get()));
+ }
- ASSERT_NOT_EQUALS(disc1(parseMatchExpression(BSON("f" << 2)).get()),
- disc2(parseMatchExpression(BSON("f" << 2)).get()));
+ {
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("g");
+ ASSERT_EQ(1U, discriminators.size());
- ASSERT(state.getDiscriminators("a").empty());
- ASSERT(state.getDiscriminators("b").empty());
+ const IndexabilityDiscriminator& disc = discriminators[0];
+ ASSERT_EQ(false, disc(parseMatchExpression(BSON("g" << 0)).get()));
+ ASSERT_EQ(true, disc(parseMatchExpression(BSON("g" << 1)).get()));
}
- // Test that no discriminators are generated for a regular index.
- TEST(PlanCacheIndexabilityTest, IndexNeitherSparseNorPartial) {
- PlanCacheIndexabilityState state;
- state.updateDiscriminators({IndexEntry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- nullptr,
- BSONObj())});
- ASSERT(state.getDiscriminators("a").empty());
- }
+ ASSERT(state.getDiscriminators("a").empty());
+}
+
+// Test partial index discriminators where there are multiple partial indexes.
+TEST(PlanCacheIndexabilityTest, MultiplePartialIndexes) {
+ BSONObj filterObj1 = BSON("f" << 1);
+ std::unique_ptr<MatchExpression> filterExpr1(parseMatchExpression(filterObj1));
+
+ BSONObj filterObj2 = BSON("f" << 2);
+ std::unique_ptr<MatchExpression> filterExpr2(parseMatchExpression(filterObj2));
+
+ PlanCacheIndexabilityState state;
+ state.updateDiscriminators({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ filterExpr1.get(),
+ BSONObj()),
+ IndexEntry(BSON("b" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ filterExpr2.get(),
+ BSONObj())});
+
+ const IndexabilityDiscriminators& discriminators = state.getDiscriminators("f");
+ ASSERT_EQ(2U, discriminators.size());
+
+ const IndexabilityDiscriminator& disc1 = discriminators[0];
+ const IndexabilityDiscriminator& disc2 = discriminators[1];
+
+ ASSERT_EQ(false, disc1(parseMatchExpression(BSON("f" << 0)).get()));
+ ASSERT_EQ(false, disc1(parseMatchExpression(BSON("f" << 0)).get()));
+
+ ASSERT_NOT_EQUALS(disc1(parseMatchExpression(BSON("f" << 1)).get()),
+ disc2(parseMatchExpression(BSON("f" << 1)).get()));
+
+ ASSERT_NOT_EQUALS(disc1(parseMatchExpression(BSON("f" << 2)).get()),
+ disc2(parseMatchExpression(BSON("f" << 2)).get()));
+
+ ASSERT(state.getDiscriminators("a").empty());
+ ASSERT(state.getDiscriminators("b").empty());
+}
+
+// Test that no discriminators are generated for a regular index.
+TEST(PlanCacheIndexabilityTest, IndexNeitherSparseNorPartial) {
+ PlanCacheIndexabilityState state;
+ state.updateDiscriminators({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ nullptr,
+ BSONObj())});
+ ASSERT(state.getDiscriminators("a").empty());
+}
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 78b9bdf959c..da15528d243 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -50,1169 +50,1218 @@ using namespace mongo;
namespace {
- using std::string;
- using std::unique_ptr;
- using std::vector;
+using std::string;
+using std::unique_ptr;
+using std::vector;
- static const char* ns = "somebogusns";
+static const char* ns = "somebogusns";
- /**
- * Utility functions to create a CanonicalQuery
- */
- CanonicalQuery* canonicalize(const BSONObj& queryObj) {
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, &cq);
- ASSERT_OK(result);
- return cq;
- }
-
- CanonicalQuery* canonicalize(const char* queryStr) {
- BSONObj queryObj = fromjson(queryStr);
- return canonicalize(queryObj);
- }
-
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- &cq);
- ASSERT_OK(result);
- return cq;
- }
-
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr,
- long long skip, long long limit,
- const char* hintStr,
- const char* minStr, const char* maxStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- BSONObj hintObj = fromjson(hintStr);
- BSONObj minObj = fromjson(minStr);
- BSONObj maxObj = fromjson(maxStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- skip, limit,
- hintObj,
- minObj, maxObj,
- false, // snapshot
- false, // explain
- &cq);
- ASSERT_OK(result);
- return cq;
- }
-
- CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr,
- const char* projStr,
- long long skip, long long limit,
- const char* hintStr,
- const char* minStr, const char* maxStr,
- bool snapshot,
- bool explain) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projObj = fromjson(projStr);
- BSONObj hintObj = fromjson(hintStr);
- BSONObj minObj = fromjson(minStr);
- BSONObj maxObj = fromjson(maxStr);
- CanonicalQuery* cq;
- Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj,
- projObj,
- skip, limit,
- hintObj,
- minObj, maxObj,
- snapshot,
- explain,
- &cq);
- ASSERT_OK(result);
- return cq;
- }
-
- /**
- * Utility function to create MatchExpression
- */
- MatchExpression* parseMatchExpression(const BSONObj& obj) {
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
- if (!status.isOK()) {
- str::stream ss;
- ss << "failed to parse query: " << obj.toString()
- << ". Reason: " << status.getStatus().toString();
- FAIL(ss);
- }
- MatchExpression* expr(status.getValue());
- return expr;
- }
+/**
+ * Utility functions to create a CanonicalQuery
+ */
+CanonicalQuery* canonicalize(const BSONObj& queryObj) {
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns, queryObj, &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+CanonicalQuery* canonicalize(const char* queryStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ return canonicalize(queryObj);
+}
+
+CanonicalQuery* canonicalize(const char* queryStr, const char* sortStr, const char* projStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns, queryObj, sortObj, projObj, &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+CanonicalQuery* canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ BSONObj hintObj = fromjson(hintStr);
+ BSONObj minObj = fromjson(minStr);
+ BSONObj maxObj = fromjson(maxStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns,
+ queryObj,
+ sortObj,
+ projObj,
+ skip,
+ limit,
+ hintObj,
+ minObj,
+ maxObj,
+ false, // snapshot
+ false, // explain
+ &cq);
+ ASSERT_OK(result);
+ return cq;
+}
+
+CanonicalQuery* canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr,
+ bool snapshot,
+ bool explain) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projObj = fromjson(projStr);
+ BSONObj hintObj = fromjson(hintStr);
+ BSONObj minObj = fromjson(minStr);
+ BSONObj maxObj = fromjson(maxStr);
+ CanonicalQuery* cq;
+ Status result = CanonicalQuery::canonicalize(ns,
+ queryObj,
+ sortObj,
+ projObj,
+ skip,
+ limit,
+ hintObj,
+ minObj,
+ maxObj,
+ snapshot,
+ explain,
+ &cq);
+ ASSERT_OK(result);
+ return cq;
+}
- void assertEquivalent(const char* queryStr, const MatchExpression* expected, const MatchExpression* actual) {
- if (actual->equivalent(expected)) {
- return;
- }
+/**
+ * Utility function to create MatchExpression
+ */
+MatchExpression* parseMatchExpression(const BSONObj& obj) {
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
+ if (!status.isOK()) {
str::stream ss;
- ss << "Match expressions are not equivalent."
- << "\nOriginal query: " << queryStr
- << "\nExpected: " << expected->toString()
- << "\nActual: " << actual->toString();
+ ss << "failed to parse query: " << obj.toString()
+ << ". Reason: " << status.getStatus().toString();
FAIL(ss);
}
-
- //
- // Tests for CachedSolution
- //
-
- /**
- * Generator for vector of QuerySolution shared pointers.
- */
- struct GenerateQuerySolution {
- QuerySolution* operator()() const {
- unique_ptr<QuerySolution> qs(new QuerySolution());
- qs->cacheData.reset(new SolutionCacheData());
- qs->cacheData->solnType = SolutionCacheData::COLLSCAN_SOLN;
- qs->cacheData->tree.reset(new PlanCacheIndexTree());
- return qs.release();
- }
- };
-
- /**
- * Utility function to create a PlanRankingDecision
- */
- PlanRankingDecision* createDecision(size_t numPlans) {
- unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
- for (size_t i = 0; i < numPlans; ++i) {
- CommonStats common("COLLSCAN");
- unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
- stats->specific.reset(new CollectionScanStats());
- why->stats.mutableVector().push_back(stats.release());
- why->scores.push_back(0U);
- why->candidateOrder.push_back(i);
- }
- return why.release();
+ MatchExpression* expr(status.getValue());
+ return expr;
+}
+
+void assertEquivalent(const char* queryStr,
+ const MatchExpression* expected,
+ const MatchExpression* actual) {
+ if (actual->equivalent(expected)) {
+ return;
}
+ str::stream ss;
+ ss << "Match expressions are not equivalent."
+ << "\nOriginal query: " << queryStr << "\nExpected: " << expected->toString()
+ << "\nActual: " << actual->toString();
+ FAIL(ss);
+}
- /**
- * Test functions for shouldCacheQuery
- * Use these functions to assert which categories
- * of canonicalized queries are suitable for inclusion
- * in the planner cache.
- */
- void assertShouldCacheQuery(const CanonicalQuery& query) {
- if (PlanCache::shouldCacheQuery(query)) {
- return;
- }
- str::stream ss;
- ss << "Canonical query should be cacheable: " << query.toString();
- FAIL(ss);
- }
+//
+// Tests for CachedSolution
+//
- void assertShouldNotCacheQuery(const CanonicalQuery& query) {
- if (!PlanCache::shouldCacheQuery(query)) {
- return;
- }
- str::stream ss;
- ss << "Canonical query should not be cacheable: " << query.toString();
- FAIL(ss);
- }
-
- void assertShouldNotCacheQuery(const BSONObj& query) {
- unique_ptr<CanonicalQuery> cq(canonicalize(query));
- assertShouldNotCacheQuery(*cq);
+/**
+ * Generator for vector of QuerySolution shared pointers.
+ */
+struct GenerateQuerySolution {
+ QuerySolution* operator()() const {
+ unique_ptr<QuerySolution> qs(new QuerySolution());
+ qs->cacheData.reset(new SolutionCacheData());
+ qs->cacheData->solnType = SolutionCacheData::COLLSCAN_SOLN;
+ qs->cacheData->tree.reset(new PlanCacheIndexTree());
+ return qs.release();
}
+};
- void assertShouldNotCacheQuery(const char* queryStr) {
- unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
- assertShouldNotCacheQuery(*cq);
+/**
+ * Utility function to create a PlanRankingDecision
+ */
+PlanRankingDecision* createDecision(size_t numPlans) {
+ unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
+ for (size_t i = 0; i < numPlans; ++i) {
+ CommonStats common("COLLSCAN");
+ unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
+ stats->specific.reset(new CollectionScanStats());
+ why->stats.mutableVector().push_back(stats.release());
+ why->scores.push_back(0U);
+ why->candidateOrder.push_back(i);
}
+ return why.release();
+}
- /**
- * Cacheable queries
- * These queries will be added to the cache with run-time statistics
- * and can be managed with the cache DB commands.
- */
-
- TEST(PlanCacheTest, ShouldCacheQueryBasic) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- assertShouldCacheQuery(*cq);
+/**
+ * Test functions for shouldCacheQuery
+ * Use these functions to assert which categories
+ * of canonicalized queries are suitable for inclusion
+ * in the planner cache.
+ */
+void assertShouldCacheQuery(const CanonicalQuery& query) {
+ if (PlanCache::shouldCacheQuery(query)) {
+ return;
}
-
- TEST(PlanCacheTest, ShouldCacheQuerySort) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{}", "{a: -1}", "{_id: 0, a: 1}"));
- assertShouldCacheQuery(*cq);
+ str::stream ss;
+ ss << "Canonical query should be cacheable: " << query.toString();
+ FAIL(ss);
+}
+
+void assertShouldNotCacheQuery(const CanonicalQuery& query) {
+ if (!PlanCache::shouldCacheQuery(query)) {
+ return;
}
+ str::stream ss;
+ ss << "Canonical query should not be cacheable: " << query.toString();
+ FAIL(ss);
+}
- /*
- * Non-cacheable queries.
- * These queries will be sent through the planning process everytime.
- */
-
- /**
- * Collection scan
- * This should normally be handled by the IDHack runner.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryCollectionScan) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{}"));
- assertShouldNotCacheQuery(*cq);
- }
+void assertShouldNotCacheQuery(const BSONObj& query) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(query));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * Hint
- * A hinted query implies strong user preference for a particular index.
- * Therefore, not much point in caching.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryWithHint) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{a: 1, b: 1}",
- "{}", "{}"));
- assertShouldNotCacheQuery(*cq);
- }
+void assertShouldNotCacheQuery(const char* queryStr) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * Min queries are a specialized case of hinted queries
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryWithMin) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}",
- "{a: 100}", "{}"));
- assertShouldNotCacheQuery(*cq);
- }
+/**
+ * Cacheable queries
+ * These queries will be added to the cache with run-time statistics
+ * and can be managed with the cache DB commands.
+ */
- /**
- * Max queries are non-cacheable for the same reasons as min queries.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryWithMax) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}",
- "{}", "{a: 100}"));
- assertShouldNotCacheQuery(*cq);
- }
+TEST(PlanCacheTest, ShouldCacheQueryBasic) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ assertShouldCacheQuery(*cq);
+}
- /**
- * $geoWithin queries with legacy coordinates are cacheable as long as
- * the planner is able to come up with a cacheable solution.
- */
- TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: {$geoWithin: "
- "{$box: [[-180, -90], [180, 90]]}}}"));
- assertShouldCacheQuery(*cq);
- }
+TEST(PlanCacheTest, ShouldCacheQuerySort) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{}", "{a: -1}", "{_id: 0, a: 1}"));
+ assertShouldCacheQuery(*cq);
+}
- /**
- * $geoWithin queries with GeoJSON coordinates are supported by the index bounds builder.
- */
- TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: {$geoWithin: "
- "{$geometry: {type: 'Polygon', coordinates: "
- "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
- assertShouldCacheQuery(*cq);
- }
+/*
+ * Non-cacheable queries.
+ * These queries will be sent through the planning process everytime.
+ */
- /**
- * $geoWithin queries with both legacy and GeoJSON coordinates are cacheable.
- */
- TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{$or: [{a: {$geoWithin: {$geometry: {type: 'Polygon', "
- "coordinates: [[[0, 0], [0, 90], "
- "[90, 0], [0, 0]]]}}}},"
- "{a: {$geoWithin: {$box: [[-180, -90], [180, 90]]}}}]}"));
- assertShouldCacheQuery(*cq);
- }
+/**
+ * Collection scan
+ * This should normally be handled by the IDHack runner.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryCollectionScan) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{}"));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * $geoIntersects queries are always cacheable because they support GeoJSON coordinates only.
- */
- TEST(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: {$geoIntersects: "
- "{$geometry: {type: 'Point', coordinates: "
- "[10.0, 10.0]}}}}"));
- assertShouldCacheQuery(*cq);
- }
+/**
+ * Hint
+ * A hinted query implies strong user preference for a particular index.
+ * Therefore, not much point in caching.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryWithHint) {
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{a: 1}", "{}", "{}", 0, 0, "{a: 1, b: 1}", "{}", "{}"));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * $geoNear queries are cacheable because we are able to distinguish
- * between flat and spherical queries.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryWithGeoNear) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: {$geoNear: {$geometry: {type: 'Point',"
- "coordinates: [0,0]}, $maxDistance:100}}}"));
- assertShouldCacheQuery(*cq);
- }
+/**
+ * Min queries are a specialized case of hinted queries
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryWithMin) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}", "{a: 100}", "{}"));
+ assertShouldNotCacheQuery(*cq);
+}
- /**
- * Explain queries are not-cacheable because of allPlans cannot
- * be accurately generated from stale cached stats in the plan cache for
- * non-winning plans.
- */
- TEST(PlanCacheTest, ShouldNotCacheQueryExplain) {
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}",
- "{}", "{}", // min, max
- false, // snapshot
- true // explain
- ));
- const LiteParsedQuery& pq = cq->getParsed();
- ASSERT_TRUE(pq.isExplain());
- assertShouldNotCacheQuery(*cq);
- }
+/**
+ * Max queries are non-cacheable for the same reasons as min queries.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryWithMax) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}", "{}", "{}", 0, 0, "{}", "{}", "{a: 100}"));
+ assertShouldNotCacheQuery(*cq);
+}
- // Adding an empty vector of query solutions should fail.
- TEST(PlanCacheTest, AddEmptySolutions) {
- PlanCache planCache;
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- std::vector<QuerySolution*> solns;
- std::unique_ptr<PlanRankingDecision> decision(createDecision(1U));
- ASSERT_NOT_OK(planCache.add(*cq, solns, decision.get()));
- }
+/**
+ * $geoWithin queries with legacy coordinates are cacheable as long as
+ * the planner is able to come up with a cacheable solution.
+ */
+TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{a: {$geoWithin: "
+ "{$box: [[-180, -90], [180, 90]]}}}"));
+ assertShouldCacheQuery(*cq);
+}
- TEST(PlanCacheTest, AddValidSolution) {
- PlanCache planCache;
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- QuerySolution qs;
- qs.cacheData.reset(new SolutionCacheData());
- qs.cacheData->tree.reset(new PlanCacheIndexTree());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
+/**
+ * $geoWithin queries with GeoJSON coordinates are supported by the index bounds builder.
+ */
+TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{a: {$geoWithin: "
+ "{$geometry: {type: 'Polygon', coordinates: "
+ "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
+ assertShouldCacheQuery(*cq);
+}
- // Check if key is in cache before and after add().
- ASSERT_FALSE(planCache.contains(*cq));
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+/**
+ * $geoWithin queries with both legacy and GeoJSON coordinates are cacheable.
+ */
+TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{$or: [{a: {$geoWithin: {$geometry: {type: 'Polygon', "
+ "coordinates: [[[0, 0], [0, 90], "
+ "[90, 0], [0, 0]]]}}}},"
+ "{a: {$geoWithin: {$box: [[-180, -90], [180, 90]]}}}]}"));
+ assertShouldCacheQuery(*cq);
+}
- ASSERT_TRUE(planCache.contains(*cq));
- ASSERT_EQUALS(planCache.size(), 1U);
- }
+/**
+ * $geoIntersects queries are always cacheable because they support GeoJSON coordinates only.
+ */
+TEST(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{a: {$geoIntersects: "
+ "{$geometry: {type: 'Point', coordinates: "
+ "[10.0, 10.0]}}}}"));
+ assertShouldCacheQuery(*cq);
+}
- TEST(PlanCacheTest, NotifyOfWriteOp) {
- PlanCache planCache;
- unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
- QuerySolution qs;
- qs.cacheData.reset(new SolutionCacheData());
- qs.cacheData->tree.reset(new PlanCacheIndexTree());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
- ASSERT_EQUALS(planCache.size(), 1U);
-
- // First (N - 1) write ops should have no effect on cache contents.
- for (int i = 0; i < (internalQueryCacheWriteOpsBetweenFlush - 1); ++i) {
- planCache.notifyOfWriteOp();
- }
- ASSERT_EQUALS(planCache.size(), 1U);
+/**
+ * $geoNear queries are cacheable because we are able to distinguish
+ * between flat and spherical queries.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryWithGeoNear) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(
+ "{a: {$geoNear: {$geometry: {type: 'Point',"
+ "coordinates: [0,0]}, $maxDistance:100}}}"));
+ assertShouldCacheQuery(*cq);
+}
- // N-th notification will cause cache to be cleared.
- planCache.notifyOfWriteOp();
- ASSERT_EQUALS(planCache.size(), 0U);
-
- // Clearing the cache should reset the internal write
- // operation counter.
- // Repopulate cache. Write (N - 1) times.
- // Clear cache.
- // Add cache entry again.
- // After clearing and adding a new entry, the next write operation should not
- // clear the cache.
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
- for (int i = 0; i < (internalQueryCacheWriteOpsBetweenFlush - 1); ++i) {
- planCache.notifyOfWriteOp();
- }
- ASSERT_EQUALS(planCache.size(), 1U);
- planCache.clear();
- ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
- // Notification after clearing will not flush cache.
+/**
+ * Explain queries are not-cacheable because of allPlans cannot
+ * be accurately generated from stale cached stats in the plan cache for
+ * non-winning plans.
+ */
+TEST(PlanCacheTest, ShouldNotCacheQueryExplain) {
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}",
+ "{}",
+ "{}",
+ 0,
+ 0,
+ "{}",
+ "{}",
+ "{}", // min, max
+ false, // snapshot
+ true // explain
+ ));
+ const LiteParsedQuery& pq = cq->getParsed();
+ ASSERT_TRUE(pq.isExplain());
+ assertShouldNotCacheQuery(*cq);
+}
+
+// Adding an empty vector of query solutions should fail.
+TEST(PlanCacheTest, AddEmptySolutions) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ std::vector<QuerySolution*> solns;
+ std::unique_ptr<PlanRankingDecision> decision(createDecision(1U));
+ ASSERT_NOT_OK(planCache.add(*cq, solns, decision.get()));
+}
+
+TEST(PlanCacheTest, AddValidSolution) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ QuerySolution qs;
+ qs.cacheData.reset(new SolutionCacheData());
+ qs.cacheData->tree.reset(new PlanCacheIndexTree());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+
+ // Check if key is in cache before and after add().
+ ASSERT_FALSE(planCache.contains(*cq));
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+
+ ASSERT_TRUE(planCache.contains(*cq));
+ ASSERT_EQUALS(planCache.size(), 1U);
+}
+
+TEST(PlanCacheTest, NotifyOfWriteOp) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
+ QuerySolution qs;
+ qs.cacheData.reset(new SolutionCacheData());
+ qs.cacheData->tree.reset(new PlanCacheIndexTree());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+ ASSERT_EQUALS(planCache.size(), 1U);
+
+ // First (N - 1) write ops should have no effect on cache contents.
+ for (int i = 0; i < (internalQueryCacheWriteOpsBetweenFlush - 1); ++i) {
planCache.notifyOfWriteOp();
- ASSERT_EQUALS(planCache.size(), 1U);
}
-
- /**
- * Each test in the CachePlanSelectionTest suite goes through
- * the following flow:
- *
- * 1) Run QueryPlanner::plan on the query, with specified indices
- * available. This simulates the case in which we failed to plan from
- * the plan cache, and fell back on selecting a plan ourselves. The
- * enumerator will run, and cache data will be stashed into each solution
- * that it generates.
- *
- * 2) Use firstMatchingSolution to select one of the solutions generated
- * by QueryPlanner::plan. This simulates the multi plan runner picking
- * the "best solution".
- *
- * 3) The cache data stashed inside the "best solution" is used to
- * make a CachedSolution which looks exactly like the data structure that
- * would be returned from the cache. This simulates a plan cache hit.
- *
- * 4) Call QueryPlanner::planFromCache, passing it the CachedSolution.
- * This exercises the code which is able to map from a CachedSolution to
- * a full-blown QuerySolution. Finally, assert that the query solution
- * recovered from the cache is identical to the original "best solution".
- */
- class CachePlanSelectionTest : public mongo::unittest::Test {
- protected:
- void setUp() {
- cq = NULL;
- params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
- addIndex(BSON("_id" << 1));
- }
-
- void tearDown() {
- delete cq;
-
- for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
- delete *it;
- }
- }
-
- void addIndex(BSONObj keyPattern, bool multikey = false) {
- // The first false means not multikey.
- // The second false means not sparse.
- // The third arg is the index name and I am egotistical.
- // The NULL means no filter expression.
- params.indices.push_back(IndexEntry(keyPattern,
- multikey,
- false,
- false,
- "hari_king_of_the_stove",
- NULL,
- BSONObj()));
- }
-
- void addIndex(BSONObj keyPattern, bool multikey, bool sparse) {
- params.indices.push_back(IndexEntry(keyPattern,
- multikey,
- sparse,
- false,
- "note_to_self_dont_break_build",
- NULL,
- BSONObj()));
- }
-
- //
- // Execute planner.
- //
-
- void runQuery(BSONObj query) {
- runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
- }
-
- void runQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj) {
- runQuerySortProjSkipLimit(query, sort, proj, 0, 0);
- }
-
- void runQuerySkipLimit(const BSONObj& query, long long skip, long long limit) {
- runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), skip, limit);
- }
-
- void runQueryHint(const BSONObj& query, const BSONObj& hint) {
- runQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
- }
-
- void runQuerySortProjSkipLimit(const BSONObj& query,
- const BSONObj& sort, const BSONObj& proj,
- long long skip, long long limit) {
- runQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
- }
-
- void runQuerySortHint(const BSONObj& query, const BSONObj& sort, const BSONObj& hint) {
- runQuerySortProjSkipLimitHint(query, sort, BSONObj(), 0, 0, hint);
- }
-
- void runQueryHintMinMax(const BSONObj& query, const BSONObj& hint,
- const BSONObj& minObj, const BSONObj& maxObj) {
-
- runQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
- }
-
- void runQuerySortProjSkipLimitHint(const BSONObj& query,
- const BSONObj& sort, const BSONObj& proj,
- long long skip, long long limit,
- const BSONObj& hint) {
- runQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
- }
-
- void runQuerySnapshot(const BSONObj& query) {
- runQueryFull(query, BSONObj(), BSONObj(), 0, 0, BSONObj(), BSONObj(),
- BSONObj(), true);
- }
-
- void runQueryFull(const BSONObj& query,
- const BSONObj& sort, const BSONObj& proj,
- long long skip, long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot) {
-
- // Clean up any previous state from a call to runQueryFull
- delete cq;
- cq = NULL;
-
- for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
- delete *it;
- }
-
- solns.clear();
-
-
- Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, skip, limit, hint,
- minObj, maxObj, snapshot,
- false, // explain
- &cq);
- if (!s.isOK()) { cq = NULL; }
- ASSERT_OK(s);
- s = QueryPlanner::plan(*cq, params, &solns);
- ASSERT_OK(s);
- }
-
- //
- // Solution introspection.
- //
-
- void dumpSolutions(str::stream& ost) const {
- for (vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- ost << (*it)->toString() << '\n';
- }
- }
-
- /**
- * Returns number of generated solutions matching JSON.
- */
- size_t numSolutionMatches(const string& solnJson) const {
- BSONObj testSoln = fromjson(solnJson);
- size_t matches = 0;
- for (vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- QuerySolutionNode* root = (*it)->root.get();
- if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
- ++matches;
- }
- }
- return matches;
- }
-
- /**
- * Verifies that the solution tree represented in json by 'solnJson' is
- * one of the solutions generated by QueryPlanner.
- *
- * The number of expected matches, 'numMatches', could be greater than
- * 1 if solutions differ only by the pattern of index tags on a filter.
- */
- void assertSolutionExists(const string& solnJson, size_t numMatches = 1) const {
- size_t matches = numSolutionMatches(solnJson);
- if (numMatches == matches) {
- return;
- }
- str::stream ss;
- ss << "expected " << numMatches << " matches for solution " << solnJson
- << " but got " << matches
- << " instead. all solutions generated: " << '\n';
- dumpSolutions(ss);
- FAIL(ss);
- }
-
- /**
- * Plan 'query' from the cache. A mock cache entry is created using
- * the cacheData stored inside the QuerySolution 'soln'.
- *
- * Does not take ownership of 'soln'.
- */
- QuerySolution* planQueryFromCache(const BSONObj& query, const QuerySolution& soln) const {
- return planQueryFromCache(query, BSONObj(), BSONObj(), soln);
- }
-
- /**
- * Plan 'query' from the cache with sort order 'sort' and
- * projection 'proj'. A mock cache entry is created using
- * the cacheData stored inside the QuerySolution 'soln'.
- *
- * Does not take ownership of 'soln'.
- */
- QuerySolution* planQueryFromCache(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- const QuerySolution& soln) const {
- CanonicalQuery* cq;
- Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, &cq);
- ASSERT_OK(s);
- unique_ptr<CanonicalQuery> scopedCq(cq);
- cq = NULL;
-
- // Create a CachedSolution the long way..
- // QuerySolution -> PlanCacheEntry -> CachedSolution
- QuerySolution qs;
- qs.cacheData.reset(soln.cacheData->clone());
- std::vector<QuerySolution*> solutions;
- solutions.push_back(&qs);
- PlanCacheEntry entry(solutions, createDecision(1U));
- CachedSolution cachedSoln(ck, entry);
-
- QuerySolution *out;
- s = QueryPlanner::planFromCache(*scopedCq.get(), params, cachedSoln, &out);
- ASSERT_OK(s);
-
- return out;
- }
-
- /**
- * @param solnJson -- a json representation of a query solution.
- *
- * Returns the first solution matching 'solnJson', or fails if
- * no match is found.
- */
- QuerySolution* firstMatchingSolution(const string& solnJson) const {
- BSONObj testSoln = fromjson(solnJson);
- for (vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- QuerySolutionNode* root = (*it)->root.get();
- if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
- return *it;
- }
- }
-
- str::stream ss;
- ss << "Could not find a match for solution " << solnJson
- << " All solutions generated: " << '\n';
- dumpSolutions(ss);
- FAIL(ss);
-
- return NULL;
- }
-
- /**
- * Assert that the QuerySolution 'trueSoln' matches the JSON-based representation
- * of the solution in 'solnJson'.
- *
- * Relies on solutionMatches() -- see query_planner_test_lib.h
- */
- void assertSolutionMatches(QuerySolution* trueSoln, const string& solnJson) const {
- BSONObj testSoln = fromjson(solnJson);
- if (!QueryPlannerTestLib::solutionMatches(testSoln, trueSoln->root.get())) {
- str::stream ss;
- ss << "Expected solution " << solnJson << " did not match true solution: "
- << trueSoln->toString() << '\n';
- FAIL(ss);
- }
- }
-
- /**
- * Overloaded so that it is not necessary to specificy sort and project.
- */
- void assertPlanCacheRecoversSolution(const BSONObj& query, const string& solnJson) {
- assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(), solnJson);
- }
-
- /**
- * First, the solution matching 'solnJson' is retrieved from the vector
- * of solutions generated by QueryPlanner::plan. This solution is
- * then passed into planQueryFromCache(). Asserts that the solution
- * generated by QueryPlanner::planFromCache matches 'solnJson'.
- *
- * Must be called after calling one of the runQuery* methods.
- *
- * Together, 'query', 'sort', and 'proj' should specify the query which
- * was previously run using one of the runQuery* methods.
- */
- void assertPlanCacheRecoversSolution(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- const string& solnJson) {
- QuerySolution* bestSoln = firstMatchingSolution(solnJson);
- QuerySolution* planSoln = planQueryFromCache(query, sort, proj, *bestSoln);
- assertSolutionMatches(planSoln, solnJson);
- delete planSoln;
- }
-
- /**
- * Check that the solution will not be cached. The planner will store
- * cache data inside non-cachable solutions, but will not do so for
- * non-cachable solutions. Therefore, we just have to check that
- * cache data is NULL.
- */
- void assertNotCached(const string& solnJson) {
- QuerySolution* bestSoln = firstMatchingSolution(solnJson);
- ASSERT(NULL != bestSoln);
- ASSERT(NULL == bestSoln->cacheData.get());
- }
-
- static const PlanCacheKey ck;
-
- BSONObj queryObj;
- CanonicalQuery* cq;
- QueryPlannerParams params;
- vector<QuerySolution*> solns;
- };
-
- const PlanCacheKey CachePlanSelectionTest::ck = "mock_cache_key";
-
- //
- // Equality
- //
-
- TEST_F(CachePlanSelectionTest, EqualityIndexScan) {
- addIndex(BSON("x" << 1));
- runQuery(BSON("x" << 5));
-
- assertPlanCacheRecoversSolution(BSON("x" << 5),
- "{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(CachePlanSelectionTest, EqualityIndexScanWithTrailingFields) {
- addIndex(BSON("x" << 1 << "y" << 1));
- runQuery(BSON("x" << 5));
-
- assertPlanCacheRecoversSolution(BSON("x" << 5),
- "{fetch: {filter: null, node: {ixscan: {pattern: {x: 1, y: 1}}}}}");
+ ASSERT_EQUALS(planCache.size(), 1U);
+
+ // N-th notification will cause cache to be cleared.
+ planCache.notifyOfWriteOp();
+ ASSERT_EQUALS(planCache.size(), 0U);
+
+ // Clearing the cache should reset the internal write
+ // operation counter.
+ // Repopulate cache. Write (N - 1) times.
+ // Clear cache.
+ // Add cache entry again.
+ // After clearing and adding a new entry, the next write operation should not
+ // clear the cache.
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+ for (int i = 0; i < (internalQueryCacheWriteOpsBetweenFlush - 1); ++i) {
+ planCache.notifyOfWriteOp();
}
+ ASSERT_EQUALS(planCache.size(), 1U);
+ planCache.clear();
+ ASSERT_OK(planCache.add(*cq, solns, createDecision(1U)));
+ // Notification after clearing will not flush cache.
+ planCache.notifyOfWriteOp();
+ ASSERT_EQUALS(planCache.size(), 1U);
+}
- //
- // Geo
- //
-
- TEST_F(CachePlanSelectionTest, Basic2DSphereNonNear) {
- addIndex(BSON("a" << "2dsphere"));
- BSONObj query;
-
- query = fromjson("{a: {$geoIntersects: {$geometry: {type: 'Point',"
- "coordinates: [10.0, 10.0]}}}}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
-
- query = fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+/**
+ * Each test in the CachePlanSelectionTest suite goes through
+ * the following flow:
+ *
+ * 1) Run QueryPlanner::plan on the query, with specified indices
+ * available. This simulates the case in which we failed to plan from
+ * the plan cache, and fell back on selecting a plan ourselves. The
+ * enumerator will run, and cache data will be stashed into each solution
+ * that it generates.
+ *
+ * 2) Use firstMatchingSolution to select one of the solutions generated
+ * by QueryPlanner::plan. This simulates the multi plan runner picking
+ * the "best solution".
+ *
+ * 3) The cache data stashed inside the "best solution" is used to
+ * make a CachedSolution which looks exactly like the data structure that
+ * would be returned from the cache. This simulates a plan cache hit.
+ *
+ * 4) Call QueryPlanner::planFromCache, passing it the CachedSolution.
+ * This exercises the code which is able to map from a CachedSolution to
+ * a full-blown QuerySolution. Finally, assert that the query solution
+ * recovered from the cache is identical to the original "best solution".
+ */
+class CachePlanSelectionTest : public mongo::unittest::Test {
+protected:
+ void setUp() {
+ cq = NULL;
+ params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
+ addIndex(BSON("_id" << 1));
}
- TEST_F(CachePlanSelectionTest, Basic2DSphereGeoNear) {
- addIndex(BSON("a" << "2dsphere"));
- BSONObj query;
+ void tearDown() {
+ delete cq;
- query = fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {a: '2dsphere'}}");
-
- query = fromjson("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {a: '2dsphere'}}");
- }
-
- TEST_F(CachePlanSelectionTest, Basic2DSphereGeoNearReverseCompound) {
- addIndex(BSON("x" << 1));
- addIndex(BSON("x" << 1 << "a" << "2dsphere"));
- BSONObj query = fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
+ for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
+ delete *it;
+ }
}
- TEST_F(CachePlanSelectionTest, TwoDSphereNoGeoPred) {
- addIndex(BSON("x" << 1 << "a" << "2dsphere"));
- runQuery(BSON("x" << 1));
- assertPlanCacheRecoversSolution(BSON("x" << 1),
- "{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
+ void addIndex(BSONObj keyPattern, bool multikey = false) {
+ // The first false means not multikey.
+ // The second false means not sparse.
+ // The third arg is the index name and I am egotistical.
+ // The NULL means no filter expression.
+ params.indices.push_back(IndexEntry(
+ keyPattern, multikey, false, false, "hari_king_of_the_stove", NULL, BSONObj()));
}
- TEST_F(CachePlanSelectionTest, Or2DSphereNonNear) {
- addIndex(BSON("a" << "2dsphere"));
- addIndex(BSON("b" << "2dsphere"));
- BSONObj query = fromjson("{$or: [ {a: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [10.0, 10.0]}}}},"
- " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}");
-
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
- "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
+ void addIndex(BSONObj keyPattern, bool multikey, bool sparse) {
+ params.indices.push_back(IndexEntry(
+ keyPattern, multikey, sparse, false, "note_to_self_dont_break_build", NULL, BSONObj()));
}
//
- // tree operations
+ // Execute planner.
//
- TEST_F(CachePlanSelectionTest, TwoPredicatesAnding) {
- addIndex(BSON("x" << 1));
- BSONObj query = fromjson("{$and: [ {x: {$gt: 1}}, {x: {$lt: 3}} ] }");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {x: 1}}}}}");
+ void runQuery(BSONObj query) {
+ runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
}
- TEST_F(CachePlanSelectionTest, SimpleOr) {
- addIndex(BSON("a" << 1));
- BSONObj query = fromjson("{$or: [{a: 20}, {a: 21}]}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {a:1}}}}}");
+ void runQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj) {
+ runQuerySortProjSkipLimit(query, sort, proj, 0, 0);
}
- TEST_F(CachePlanSelectionTest, OrWithAndChild) {
- addIndex(BSON("a" << 1));
- BSONObj query = fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1}}}, "
- "{fetch: {filter: {b: 7}, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}]}}}}");
+ void runQuerySkipLimit(const BSONObj& query, long long skip, long long limit) {
+ runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), skip, limit);
}
- TEST_F(CachePlanSelectionTest, AndWithUnindexedOrChild) {
- addIndex(BSON("a" << 1));
- BSONObj query = fromjson("{a:20, $or: [{b:1}, {c:7}]}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+ void runQueryHint(const BSONObj& query, const BSONObj& hint) {
+ runQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
}
-
- TEST_F(CachePlanSelectionTest, AndWithOrWithOneIndex) {
- addIndex(BSON("b" << 1));
- addIndex(BSON("a" << 1));
- BSONObj query = fromjson("{$or: [{b:1}, {c:7}], a:20}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ void runQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit) {
+ runQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
}
- //
- // Sort orders
- //
-
- // SERVER-1205.
- TEST_F(CachePlanSelectionTest, MergeSort) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
-
- BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
- BSONObj sort = BSON("c" << 1);
- runQuerySortProj(query, sort, BSONObj());
-
- assertPlanCacheRecoversSolution(query, sort, BSONObj(),
- "{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}");
+ void runQuerySortHint(const BSONObj& query, const BSONObj& sort, const BSONObj& hint) {
+ runQuerySortProjSkipLimitHint(query, sort, BSONObj(), 0, 0, hint);
}
- // SERVER-1205 as well.
- TEST_F(CachePlanSelectionTest, NoMergeSortIfNoSortWanted) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
-
- BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
- runQuerySortProj(query, BSONObj(), BSONObj());
-
- assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(),
- "{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1, c: 1}}}, "
- "{ixscan: {filter: null, pattern: {b: 1, c: 1}}}]}}}}");
- }
-
- // Disabled: SERVER-10801.
- /*
- TEST_F(CachePlanSelectionTest, SortOnGeoQuery) {
- addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
- BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", "
- "coordinates: [[[1, 1], [1, 90], [180, 90], "
- "[180, 1], [1, 1]]]}}}}");
- BSONObj sort = fromjson("{timestamp: -1}");
- runQuerySortProj(query, sort, BSONObj());
-
- assertPlanCacheRecoversSolution(query, sort, BSONObj(),
- "{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
- }
- */
-
- // SERVER-9257
- TEST_F(CachePlanSelectionTest, CompoundGeoNoGeoPredicate) {
- addIndex(BSON("creationDate" << 1 << "foo.bar" << "2dsphere"));
- BSONObj query = fromjson("{creationDate: {$gt: 7}}");
- BSONObj sort = fromjson("{creationDate: 1}");
- runQuerySortProj(query, sort, BSONObj());
-
- assertPlanCacheRecoversSolution(query, sort, BSONObj(),
- "{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
- }
-
- TEST_F(CachePlanSelectionTest, ReverseScanForSort) {
- addIndex(BSON("_id" << 1));
- runQuerySortProj(BSONObj(), fromjson("{_id: -1}"), BSONObj());
- assertPlanCacheRecoversSolution(BSONObj(), fromjson("{_id: -1}"), BSONObj(),
- "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {_id: 1}}}}}");
+ void runQueryHintMinMax(const BSONObj& query,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj) {
+ runQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
}
- //
- // Caching collection scans.
- //
-
- TEST_F(CachePlanSelectionTest, CollscanNoUsefulIndices) {
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("c" << 1));
- runQuery(BSON("b" << 4));
- assertPlanCacheRecoversSolution(BSON("b" << 4),
- "{cscan: {filter: {b: 4}, dir: 1}}");
+ void runQuerySortProjSkipLimitHint(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint) {
+ runQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
}
- TEST_F(CachePlanSelectionTest, CollscanOrWithoutEnoughIndices) {
- addIndex(BSON("a" << 1));
- BSONObj query =fromjson("{$or: [{a: 20}, {b: 21}]}");
- runQuery(query);
- assertPlanCacheRecoversSolution(query,
- "{cscan: {filter: {$or:[{a:20},{b:21}]}, dir: 1}}");
+ void runQuerySnapshot(const BSONObj& query) {
+ runQueryFull(query, BSONObj(), BSONObj(), 0, 0, BSONObj(), BSONObj(), BSONObj(), true);
}
- TEST_F(CachePlanSelectionTest, CollscanMergeSort) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
-
- BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
- BSONObj sort = BSON("c" << 1);
- runQuerySortProj(query, sort, BSONObj());
+ void runQueryFull(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot) {
+ // Clean up any previous state from a call to runQueryFull
+ delete cq;
+ cq = NULL;
+
+ for (vector<QuerySolution*>::iterator it = solns.begin(); it != solns.end(); ++it) {
+ delete *it;
+ }
- assertPlanCacheRecoversSolution(query, sort, BSONObj(),
- "{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ solns.clear();
+
+
+ Status s = CanonicalQuery::canonicalize(ns,
+ query,
+ sort,
+ proj,
+ skip,
+ limit,
+ hint,
+ minObj,
+ maxObj,
+ snapshot,
+ false, // explain
+ &cq);
+ if (!s.isOK()) {
+ cq = NULL;
+ }
+ ASSERT_OK(s);
+ s = QueryPlanner::plan(*cq, params, &solns);
+ ASSERT_OK(s);
}
//
- // Check queries that, at least for now, are not cached.
+ // Solution introspection.
//
- TEST_F(CachePlanSelectionTest, GeoNear2DNotCached) {
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
- assertNotCached("{geoNear2d: {a: '2d'}}");
- }
-
- TEST_F(CachePlanSelectionTest, MinNotCached) {
- addIndex(BSON("a" << 1));
- runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
- assertNotCached("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ void dumpSolutions(str::stream& ost) const {
+ for (vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ ost << (*it)->toString() << '\n';
+ }
}
- TEST_F(CachePlanSelectionTest, MaxNotCached) {
- addIndex(BSON("a" << 1));
- runQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
- assertNotCached("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ /**
+ * Returns number of generated solutions matching JSON.
+ */
+ size_t numSolutionMatches(const string& solnJson) const {
+ BSONObj testSoln = fromjson(solnJson);
+ size_t matches = 0;
+ for (vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ QuerySolutionNode* root = (*it)->root.get();
+ if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
+ ++matches;
+ }
+ }
+ return matches;
}
- TEST_F(CachePlanSelectionTest, NaturalHintNotCached) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortHint(BSON("a" << 1), BSON("b" << 1), BSON("$natural" << 1));
- assertNotCached("{sort: {pattern: {b: 1}, limit: 0, node: "
- "{cscan: {filter: {a: 1}, dir: 1}}}}");
+ /**
+ * Verifies that the solution tree represented in json by 'solnJson' is
+ * one of the solutions generated by QueryPlanner.
+ *
+ * The number of expected matches, 'numMatches', could be greater than
+ * 1 if solutions differ only by the pattern of index tags on a filter.
+ */
+ void assertSolutionExists(const string& solnJson, size_t numMatches = 1) const {
+ size_t matches = numSolutionMatches(solnJson);
+ if (numMatches == matches) {
+ return;
+ }
+ str::stream ss;
+ ss << "expected " << numMatches << " matches for solution " << solnJson << " but got "
+ << matches << " instead. all solutions generated: " << '\n';
+ dumpSolutions(ss);
+ FAIL(ss);
}
- TEST_F(CachePlanSelectionTest, HintValidNotCached) {
- addIndex(BSON("a" << 1));
- runQueryHint(BSONObj(), fromjson("{a: 1}"));
- assertNotCached("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ /**
+ * Plan 'query' from the cache. A mock cache entry is created using
+ * the cacheData stored inside the QuerySolution 'soln'.
+ *
+ * Does not take ownership of 'soln'.
+ */
+ QuerySolution* planQueryFromCache(const BSONObj& query, const QuerySolution& soln) const {
+ return planQueryFromCache(query, BSONObj(), BSONObj(), soln);
}
- //
- // Queries using '2d' indices are not cached.
- //
+ /**
+ * Plan 'query' from the cache with sort order 'sort' and
+ * projection 'proj'. A mock cache entry is created using
+ * the cacheData stored inside the QuerySolution 'soln'.
+ *
+ * Does not take ownership of 'soln'.
+ */
+ QuerySolution* planQueryFromCache(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ const QuerySolution& soln) const {
+ CanonicalQuery* cq;
+ Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, &cq);
+ ASSERT_OK(s);
+ unique_ptr<CanonicalQuery> scopedCq(cq);
+ cq = NULL;
- TEST_F(CachePlanSelectionTest, Basic2DNonNearNotCached) {
- addIndex(BSON("a" << "2d"));
- BSONObj query;
-
- // Polygon
- query = fromjson("{a : { $within: { $polygon : [[0,0], [2,0], [4,0]] } }}");
- runQuery(query);
- assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Center
- query = fromjson("{a : { $within : { $center : [[ 5, 5 ], 7 ] } }}");
- runQuery(query);
- assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Centersphere
- query = fromjson("{a : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}");
- runQuery(query);
- assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Within box.
- query = fromjson("{a : {$within: {$box : [[0,0],[9,9]]}}}");
- runQuery(query);
- assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
- }
+ // Create a CachedSolution the long way..
+ // QuerySolution -> PlanCacheEntry -> CachedSolution
+ QuerySolution qs;
+ qs.cacheData.reset(soln.cacheData->clone());
+ std::vector<QuerySolution*> solutions;
+ solutions.push_back(&qs);
+ PlanCacheEntry entry(solutions, createDecision(1U));
+ CachedSolution cachedSoln(ck, entry);
- TEST_F(CachePlanSelectionTest, Or2DNonNearNotCached) {
- addIndex(BSON("a" << "2d"));
- addIndex(BSON("b" << "2d"));
- BSONObj query = fromjson("{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}");
+ QuerySolution* out;
+ s = QueryPlanner::planFromCache(*scopedCq.get(), params, cachedSoln, &out);
+ ASSERT_OK(s);
- runQuery(query);
- assertNotCached("{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}},"
- "{fetch: {node: {ixscan: {pattern: {b: '2d'}}}}}]}}");
+ return out;
}
/**
- * Test functions for computeKey. Cache keys are intentionally obfuscated and are
- * meaningful only within the current lifetime of the server process. Users should treat plan
- * cache keys as opaque.
+ * @param solnJson -- a json representation of a query solution.
+ *
+ * Returns the first solution matching 'solnJson', or fails if
+ * no match is found.
*/
- void testComputeKey(const char* queryStr,
- const char* sortStr,
- const char* projStr,
- const char *expectedStr) {
- PlanCache planCache;
- unique_ptr<CanonicalQuery> cq(canonicalize(queryStr, sortStr, projStr));
- PlanCacheKey key = planCache.computeKey(*cq);
- PlanCacheKey expectedKey(expectedStr);
- if (key == expectedKey) {
- return;
+ QuerySolution* firstMatchingSolution(const string& solnJson) const {
+ BSONObj testSoln = fromjson(solnJson);
+ for (vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ QuerySolutionNode* root = (*it)->root.get();
+ if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
+ return *it;
+ }
}
+
str::stream ss;
- ss << "Unexpected plan cache key. Expected: " << expectedKey << ". Actual: " << key
- << ". Query: " << cq->toString();
+ ss << "Could not find a match for solution " << solnJson
+ << " All solutions generated: " << '\n';
+ dumpSolutions(ss);
FAIL(ss);
- }
- TEST(PlanCacheTest, ComputeKey) {
- // Generated cache keys should be treated as opaque to the user.
-
- // No sorts
- testComputeKey("{}", "{}", "{}", "an");
- testComputeKey("{$or: [{a: 1}, {b: 2}]}", "{}", "{}", "or[eqa,eqb]");
- testComputeKey("{$or: [{a: 1}, {b: 1}, {c: 1}], d: 1}", "{}", "{}",
- "an[or[eqa,eqb,eqc],eqd]");
- testComputeKey("{$or: [{a: 1}, {b: 1}], c: 1, d: 1}", "{}", "{}",
- "an[or[eqa,eqb],eqc,eqd]");
- testComputeKey("{a: 1, b: 1, c: 1}", "{}", "{}", "an[eqa,eqb,eqc]");
- testComputeKey("{a: 1, beqc: 1}", "{}", "{}", "an[eqa,eqbeqc]");
- testComputeKey("{ap1a: 1}", "{}", "{}", "eqap1a");
- testComputeKey("{aab: 1}", "{}", "{}", "eqaab");
-
- // With sort
- testComputeKey("{}", "{a: 1}", "{}", "an~aa");
- testComputeKey("{}", "{a: -1}", "{}", "an~da");
- testComputeKey("{}", "{a: {$meta: 'textScore'}}", "{a: {$meta: 'textScore'}}",
- "an~ta|{ $meta: \"textScore\" }a");
- testComputeKey("{a: 1}", "{b: 1}", "{}", "eqa~ab");
-
- // With projection
- testComputeKey("{}", "{}", "{a: 1}", "an|ia");
- testComputeKey("{}", "{}", "{a: -1}", "an|ia");
- testComputeKey("{}", "{}", "{a: -1.0}", "an|ia");
- testComputeKey("{}", "{}", "{a: true}", "an|ia");
- testComputeKey("{}", "{}", "{a: 0}", "an|ea");
- testComputeKey("{}", "{}", "{a: false}", "an|ea");
- testComputeKey("{}", "{}", "{a: 99}", "an|ia");
- testComputeKey("{}", "{}", "{a: 'foo'}", "an|ia");
- testComputeKey("{}", "{}", "{a: {$slice: [3, 5]}}", "an|{ $slice: \\[ 3\\, 5 \\] }a");
- testComputeKey("{}", "{}", "{a: {$elemMatch: {x: 2}}}",
- "an|{ $elemMatch: { x: 2 } }a");
- testComputeKey("{}", "{}", "{a: ObjectId('507f191e810c19729de860ea')}",
- "an|ia");
- testComputeKey("{a: 1}", "{}", "{'a.$': 1}", "eqa|ia.$");
- testComputeKey("{a: 1}", "{}", "{a: 1}", "eqa|ia");
-
- // Projection should be order-insensitive
- testComputeKey("{}", "{}", "{a: 1, b: 1}", "an|iaib");
- testComputeKey("{}", "{}", "{b: 1, a: 1}", "an|iaib");
-
- // With or-elimination and projection
- testComputeKey("{$or: [{a: 1}]}", "{}", "{_id: 0, a: 1}", "eqa|e_idia");
- testComputeKey("{$or: [{a: 1}]}", "{}", "{'a.$': 1}", "eqa|ia.$");
+ return NULL;
}
- // Delimiters found in user field names or non-standard projection field values
- // must be escaped.
- TEST(PlanCacheTest, ComputeKeyEscaped) {
- // Field name in query.
- testComputeKey("{'a,[]~|<>': 1}", "{}", "{}", "eqa\\,\\[\\]\\~\\|\\<\\>");
-
- // Field name in sort.
- testComputeKey("{}", "{'a,[]~|<>': 1}", "{}", "an~aa\\,\\[\\]\\~\\|\\<\\>");
-
- // Field name in projection.
- testComputeKey("{}", "{}", "{'a,[]~|<>': 1}", "an|ia\\,\\[\\]\\~\\|\\<\\>");
-
- // Value in projection.
- testComputeKey("{}", "{}", "{a: 'foo,[]~|<>'}", "an|ia");
+ /**
+ * Assert that the QuerySolution 'trueSoln' matches the JSON-based representation
+ * of the solution in 'solnJson'.
+ *
+ * Relies on solutionMatches() -- see query_planner_test_lib.h
+ */
+ void assertSolutionMatches(QuerySolution* trueSoln, const string& solnJson) const {
+ BSONObj testSoln = fromjson(solnJson);
+ if (!QueryPlannerTestLib::solutionMatches(testSoln, trueSoln->root.get())) {
+ str::stream ss;
+ ss << "Expected solution " << solnJson
+ << " did not match true solution: " << trueSoln->toString() << '\n';
+ FAIL(ss);
+ }
}
- // Cache keys for $geoWithin queries with legacy and GeoJSON coordinates should
- // not be the same.
- TEST(PlanCacheTest, ComputeKeyGeoWithin) {
- PlanCache planCache;
-
- // Legacy coordinates.
- unique_ptr<CanonicalQuery> cqLegacy(canonicalize("{a: {$geoWithin: "
- "{$box: [[-180, -90], [180, 90]]}}}"));
- // GeoJSON coordinates.
- unique_ptr<CanonicalQuery> cqNew(canonicalize("{a: {$geoWithin: "
- "{$geometry: {type: 'Polygon', coordinates: "
- "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
- ASSERT_NOT_EQUALS(planCache.computeKey(*cqLegacy),
- planCache.computeKey(*cqNew));
+ /**
+ * Overloaded so that it is not necessary to specificy sort and project.
+ */
+ void assertPlanCacheRecoversSolution(const BSONObj& query, const string& solnJson) {
+ assertPlanCacheRecoversSolution(query, BSONObj(), BSONObj(), solnJson);
}
- // GEO_NEAR cache keys should include information on geometry and CRS in addition
- // to the match type and field name.
- TEST(PlanCacheTest, ComputeKeyGeoNear) {
- testComputeKey("{a: {$near: [0,0], $maxDistance:0.3 }}", "{}", "{}", "gnanrfl");
- testComputeKey("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}", "{}", "{}", "gnanssp");
- testComputeKey("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}", "{}", "{}", "gnanrsp");
+ /**
+ * First, the solution matching 'solnJson' is retrieved from the vector
+ * of solutions generated by QueryPlanner::plan. This solution is
+ * then passed into planQueryFromCache(). Asserts that the solution
+ * generated by QueryPlanner::planFromCache matches 'solnJson'.
+ *
+ * Must be called after calling one of the runQuery* methods.
+ *
+ * Together, 'query', 'sort', and 'proj' should specify the query which
+ * was previously run using one of the runQuery* methods.
+ */
+ void assertPlanCacheRecoversSolution(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ const string& solnJson) {
+ QuerySolution* bestSoln = firstMatchingSolution(solnJson);
+ QuerySolution* planSoln = planQueryFromCache(query, sort, proj, *bestSoln);
+ assertSolutionMatches(planSoln, solnJson);
+ delete planSoln;
}
- // When a sparse index is present, computeKey() should generate different keys depending on
- // whether or not the predicates in the given query can use the index.
- TEST(PlanCacheTest, ComputeKeySparseIndex) {
- PlanCache planCache;
- planCache.notifyOfIndexEntries({IndexEntry(BSON("a" << 1),
- false, // multikey
- true, // sparse
- false, // unique
- "", // name
- nullptr, // filterExpr
- BSONObj())});
-
- unique_ptr<CanonicalQuery> cqEqNumber(canonicalize("{a: 0}}"));
- unique_ptr<CanonicalQuery> cqEqString(canonicalize("{a: 'x'}}"));
- unique_ptr<CanonicalQuery> cqEqNull(canonicalize("{a: null}}"));
-
- // 'cqEqNumber' and 'cqEqString' get the same key, since both are compatible with this
- // index.
- ASSERT_EQ(planCache.computeKey(*cqEqNumber), planCache.computeKey(*cqEqString));
-
- // 'cqEqNull' gets a different key, since it is not compatible with this index.
- ASSERT_NOT_EQUALS(planCache.computeKey(*cqEqNull), planCache.computeKey(*cqEqNumber));
+ /**
+ * Check that the solution will not be cached. The planner will store
+ * cache data inside non-cachable solutions, but will not do so for
+ * non-cachable solutions. Therefore, we just have to check that
+ * cache data is NULL.
+ */
+ void assertNotCached(const string& solnJson) {
+ QuerySolution* bestSoln = firstMatchingSolution(solnJson);
+ ASSERT(NULL != bestSoln);
+ ASSERT(NULL == bestSoln->cacheData.get());
}
- // When a partial index is present, computeKey() should generate different keys depending on
- // whether or not the predicates in the given query "match" the predicates in the partial index
- // filter.
- TEST(PlanCacheTest, ComputeKeyPartialIndex) {
- BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
- unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
-
- PlanCache planCache;
- planCache.notifyOfIndexEntries({IndexEntry(BSON("a" << 1),
- false, // multikey
- false, // sparse
- false, // unique
- "", // name
- filterExpr.get(),
- BSONObj())});
-
- unique_ptr<CanonicalQuery> cqGtNegativeFive(canonicalize("{f: {$gt: -5}}"));
- unique_ptr<CanonicalQuery> cqGtZero(canonicalize("{f: {$gt: 0}}"));
- unique_ptr<CanonicalQuery> cqGtFive(canonicalize("{f: {$gt: 5}}"));
-
- // 'cqGtZero' and 'cqGtFive' get the same key, since both are compatible with this index.
- ASSERT_EQ(planCache.computeKey(*cqGtZero), planCache.computeKey(*cqGtFive));
-
- // 'cqGtNegativeFive' gets a different key, since it is not compatible with this index.
- ASSERT_NOT_EQUALS(planCache.computeKey(*cqGtNegativeFive), planCache.computeKey(*cqGtZero));
+ static const PlanCacheKey ck;
+
+ BSONObj queryObj;
+ CanonicalQuery* cq;
+ QueryPlannerParams params;
+ vector<QuerySolution*> solns;
+};
+
+const PlanCacheKey CachePlanSelectionTest::ck = "mock_cache_key";
+
+//
+// Equality
+//
+
+TEST_F(CachePlanSelectionTest, EqualityIndexScan) {
+ addIndex(BSON("x" << 1));
+ runQuery(BSON("x" << 5));
+
+ assertPlanCacheRecoversSolution(BSON("x" << 5),
+ "{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, EqualityIndexScanWithTrailingFields) {
+ addIndex(BSON("x" << 1 << "y" << 1));
+ runQuery(BSON("x" << 5));
+
+ assertPlanCacheRecoversSolution(
+ BSON("x" << 5), "{fetch: {filter: null, node: {ixscan: {pattern: {x: 1, y: 1}}}}}");
+}
+
+//
+// Geo
+//
+
+TEST_F(CachePlanSelectionTest, Basic2DSphereNonNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ BSONObj query;
+
+ query = fromjson(
+ "{a: {$geoIntersects: {$geometry: {type: 'Point',"
+ "coordinates: [10.0, 10.0]}}}}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+
+ query = fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, Basic2DSphereGeoNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ BSONObj query;
+
+ query = fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {a: '2dsphere'}}");
+
+ query = fromjson(
+ "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {a: '2dsphere'}}");
+}
+
+TEST_F(CachePlanSelectionTest, Basic2DSphereGeoNearReverseCompound) {
+ addIndex(BSON("x" << 1));
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"));
+ BSONObj query = fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
+}
+
+TEST_F(CachePlanSelectionTest, TwoDSphereNoGeoPred) {
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"));
+ runQuery(BSON("x" << 1));
+ assertPlanCacheRecoversSolution(BSON("x" << 1),
+ "{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, Or2DSphereNonNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ addIndex(BSON("b"
+ << "2dsphere"));
+ BSONObj query = fromjson(
+ "{$or: [ {a: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [10.0, 10.0]}}}},"
+ " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}");
+
+ runQuery(query);
+ assertPlanCacheRecoversSolution(
+ query,
+ "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
+}
+
+//
+// tree operations
+//
+
+TEST_F(CachePlanSelectionTest, TwoPredicatesAnding) {
+ addIndex(BSON("x" << 1));
+ BSONObj query = fromjson("{$and: [ {x: {$gt: 1}}, {x: {$lt: 3}} ] }");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(
+ query, "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, SimpleOr) {
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{$or: [{a: 20}, {a: 21}]}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(
+ query, "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {a:1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, OrWithAndChild) {
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query,
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1}}}, "
+ "{fetch: {filter: {b: 7}, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}]}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, AndWithUnindexedOrChild) {
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{a:20, $or: [{b:1}, {c:7}]}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query,
+ "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+
+TEST_F(CachePlanSelectionTest, AndWithOrWithOneIndex) {
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{$or: [{b:1}, {c:7}], a:20}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query,
+ "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+//
+// Sort orders
+//
+
+// SERVER-1205.
+TEST_F(CachePlanSelectionTest, MergeSort) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+
+ BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
+ BSONObj sort = BSON("c" << 1);
+ runQuerySortProj(query, sort, BSONObj());
+
+ assertPlanCacheRecoversSolution(
+ query,
+ sort,
+ BSONObj(),
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}");
+}
+
+// SERVER-1205 as well.
+TEST_F(CachePlanSelectionTest, NoMergeSortIfNoSortWanted) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+
+ BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
+ runQuerySortProj(query, BSONObj(), BSONObj());
+
+ assertPlanCacheRecoversSolution(query,
+ BSONObj(),
+ BSONObj(),
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1, c: 1}}}, "
+ "{ixscan: {filter: null, pattern: {b: 1, c: 1}}}]}}}}");
+}
+
+// Disabled: SERVER-10801.
+/*
+TEST_F(CachePlanSelectionTest, SortOnGeoQuery) {
+ addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
+ BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", "
+ "coordinates: [[[1, 1], [1, 90], [180, 90], "
+ "[180, 1], [1, 1]]]}}}}");
+ BSONObj sort = fromjson("{timestamp: -1}");
+ runQuerySortProj(query, sort, BSONObj());
+
+ assertPlanCacheRecoversSolution(query, sort, BSONObj(),
+ "{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
+}
+*/
+
+// SERVER-9257
+TEST_F(CachePlanSelectionTest, CompoundGeoNoGeoPredicate) {
+ addIndex(BSON("creationDate" << 1 << "foo.bar"
+ << "2dsphere"));
+ BSONObj query = fromjson("{creationDate: {$gt: 7}}");
+ BSONObj sort = fromjson("{creationDate: 1}");
+ runQuerySortProj(query, sort, BSONObj());
+
+ assertPlanCacheRecoversSolution(
+ query,
+ sort,
+ BSONObj(),
+ "{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, ReverseScanForSort) {
+ addIndex(BSON("_id" << 1));
+ runQuerySortProj(BSONObj(), fromjson("{_id: -1}"), BSONObj());
+ assertPlanCacheRecoversSolution(
+ BSONObj(),
+ fromjson("{_id: -1}"),
+ BSONObj(),
+ "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {_id: 1}}}}}");
+}
+
+//
+// Caching collection scans.
+//
+
+TEST_F(CachePlanSelectionTest, CollscanNoUsefulIndices) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("c" << 1));
+ runQuery(BSON("b" << 4));
+ assertPlanCacheRecoversSolution(BSON("b" << 4), "{cscan: {filter: {b: 4}, dir: 1}}");
+}
+
+TEST_F(CachePlanSelectionTest, CollscanOrWithoutEnoughIndices) {
+ addIndex(BSON("a" << 1));
+ BSONObj query = fromjson("{$or: [{a: 20}, {b: 21}]}");
+ runQuery(query);
+ assertPlanCacheRecoversSolution(query, "{cscan: {filter: {$or:[{a:20},{b:21}]}, dir: 1}}");
+}
+
+TEST_F(CachePlanSelectionTest, CollscanMergeSort) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+
+ BSONObj query = fromjson("{$or: [{a:1}, {b:1}]}");
+ BSONObj sort = BSON("c" << 1);
+ runQuerySortProj(query, sort, BSONObj());
+
+ assertPlanCacheRecoversSolution(
+ query, sort, BSONObj(), "{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+}
+
+//
+// Check queries that, at least for now, are not cached.
+//
+
+TEST_F(CachePlanSelectionTest, GeoNear2DNotCached) {
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
+ assertNotCached("{geoNear2d: {a: '2d'}}");
+}
+
+TEST_F(CachePlanSelectionTest, MinNotCached) {
+ addIndex(BSON("a" << 1));
+ runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
+ assertNotCached(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, MaxNotCached) {
+ addIndex(BSON("a" << 1));
+ runQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
+ assertNotCached(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, NaturalHintNotCached) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortHint(BSON("a" << 1), BSON("b" << 1), BSON("$natural" << 1));
+ assertNotCached(
+ "{sort: {pattern: {b: 1}, limit: 0, node: "
+ "{cscan: {filter: {a: 1}, dir: 1}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, HintValidNotCached) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(BSONObj(), fromjson("{a: 1}"));
+ assertNotCached(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+//
+// Queries using '2d' indices are not cached.
+//
+
+TEST_F(CachePlanSelectionTest, Basic2DNonNearNotCached) {
+ addIndex(BSON("a"
+ << "2d"));
+ BSONObj query;
+
+ // Polygon
+ query = fromjson("{a : { $within: { $polygon : [[0,0], [2,0], [4,0]] } }}");
+ runQuery(query);
+ assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Center
+ query = fromjson("{a : { $within : { $center : [[ 5, 5 ], 7 ] } }}");
+ runQuery(query);
+ assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Centersphere
+ query = fromjson("{a : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}");
+ runQuery(query);
+ assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Within box.
+ query = fromjson("{a : {$within: {$box : [[0,0],[9,9]]}}}");
+ runQuery(query);
+ assertNotCached("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+}
+
+TEST_F(CachePlanSelectionTest, Or2DNonNearNotCached) {
+ addIndex(BSON("a"
+ << "2d"));
+ addIndex(BSON("b"
+ << "2d"));
+ BSONObj query = fromjson(
+ "{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}");
+
+ runQuery(query);
+ assertNotCached(
+ "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {b: '2d'}}}}}]}}");
+}
+
+/**
+ * Test functions for computeKey. Cache keys are intentionally obfuscated and are
+ * meaningful only within the current lifetime of the server process. Users should treat plan
+ * cache keys as opaque.
+ */
+void testComputeKey(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ const char* expectedStr) {
+ PlanCache planCache;
+ unique_ptr<CanonicalQuery> cq(canonicalize(queryStr, sortStr, projStr));
+ PlanCacheKey key = planCache.computeKey(*cq);
+ PlanCacheKey expectedKey(expectedStr);
+ if (key == expectedKey) {
+ return;
}
+ str::stream ss;
+ ss << "Unexpected plan cache key. Expected: " << expectedKey << ". Actual: " << key
+ << ". Query: " << cq->toString();
+ FAIL(ss);
+}
+
+TEST(PlanCacheTest, ComputeKey) {
+ // Generated cache keys should be treated as opaque to the user.
+
+ // No sorts
+ testComputeKey("{}", "{}", "{}", "an");
+ testComputeKey("{$or: [{a: 1}, {b: 2}]}", "{}", "{}", "or[eqa,eqb]");
+ testComputeKey("{$or: [{a: 1}, {b: 1}, {c: 1}], d: 1}", "{}", "{}", "an[or[eqa,eqb,eqc],eqd]");
+ testComputeKey("{$or: [{a: 1}, {b: 1}], c: 1, d: 1}", "{}", "{}", "an[or[eqa,eqb],eqc,eqd]");
+ testComputeKey("{a: 1, b: 1, c: 1}", "{}", "{}", "an[eqa,eqb,eqc]");
+ testComputeKey("{a: 1, beqc: 1}", "{}", "{}", "an[eqa,eqbeqc]");
+ testComputeKey("{ap1a: 1}", "{}", "{}", "eqap1a");
+ testComputeKey("{aab: 1}", "{}", "{}", "eqaab");
+
+ // With sort
+ testComputeKey("{}", "{a: 1}", "{}", "an~aa");
+ testComputeKey("{}", "{a: -1}", "{}", "an~da");
+ testComputeKey("{}",
+ "{a: {$meta: 'textScore'}}",
+ "{a: {$meta: 'textScore'}}",
+ "an~ta|{ $meta: \"textScore\" }a");
+ testComputeKey("{a: 1}", "{b: 1}", "{}", "eqa~ab");
+
+ // With projection
+ testComputeKey("{}", "{}", "{a: 1}", "an|ia");
+ testComputeKey("{}", "{}", "{a: -1}", "an|ia");
+ testComputeKey("{}", "{}", "{a: -1.0}", "an|ia");
+ testComputeKey("{}", "{}", "{a: true}", "an|ia");
+ testComputeKey("{}", "{}", "{a: 0}", "an|ea");
+ testComputeKey("{}", "{}", "{a: false}", "an|ea");
+ testComputeKey("{}", "{}", "{a: 99}", "an|ia");
+ testComputeKey("{}", "{}", "{a: 'foo'}", "an|ia");
+ testComputeKey("{}", "{}", "{a: {$slice: [3, 5]}}", "an|{ $slice: \\[ 3\\, 5 \\] }a");
+ testComputeKey("{}", "{}", "{a: {$elemMatch: {x: 2}}}", "an|{ $elemMatch: { x: 2 } }a");
+ testComputeKey("{}", "{}", "{a: ObjectId('507f191e810c19729de860ea')}", "an|ia");
+ testComputeKey("{a: 1}", "{}", "{'a.$': 1}", "eqa|ia.$");
+ testComputeKey("{a: 1}", "{}", "{a: 1}", "eqa|ia");
+
+ // Projection should be order-insensitive
+ testComputeKey("{}", "{}", "{a: 1, b: 1}", "an|iaib");
+ testComputeKey("{}", "{}", "{b: 1, a: 1}", "an|iaib");
+
+ // With or-elimination and projection
+ testComputeKey("{$or: [{a: 1}]}", "{}", "{_id: 0, a: 1}", "eqa|e_idia");
+ testComputeKey("{$or: [{a: 1}]}", "{}", "{'a.$': 1}", "eqa|ia.$");
+}
+
+// Delimiters found in user field names or non-standard projection field values
+// must be escaped.
+TEST(PlanCacheTest, ComputeKeyEscaped) {
+ // Field name in query.
+ testComputeKey("{'a,[]~|<>': 1}", "{}", "{}", "eqa\\,\\[\\]\\~\\|\\<\\>");
+
+ // Field name in sort.
+ testComputeKey("{}", "{'a,[]~|<>': 1}", "{}", "an~aa\\,\\[\\]\\~\\|\\<\\>");
+
+ // Field name in projection.
+ testComputeKey("{}", "{}", "{'a,[]~|<>': 1}", "an|ia\\,\\[\\]\\~\\|\\<\\>");
+
+ // Value in projection.
+ testComputeKey("{}", "{}", "{a: 'foo,[]~|<>'}", "an|ia");
+}
+
+// Cache keys for $geoWithin queries with legacy and GeoJSON coordinates should
+// not be the same.
+TEST(PlanCacheTest, ComputeKeyGeoWithin) {
+ PlanCache planCache;
+
+ // Legacy coordinates.
+ unique_ptr<CanonicalQuery> cqLegacy(canonicalize(
+ "{a: {$geoWithin: "
+ "{$box: [[-180, -90], [180, 90]]}}}"));
+ // GeoJSON coordinates.
+ unique_ptr<CanonicalQuery> cqNew(canonicalize(
+ "{a: {$geoWithin: "
+ "{$geometry: {type: 'Polygon', coordinates: "
+ "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
+ ASSERT_NOT_EQUALS(planCache.computeKey(*cqLegacy), planCache.computeKey(*cqNew));
+}
+
+// GEO_NEAR cache keys should include information on geometry and CRS in addition
+// to the match type and field name.
+TEST(PlanCacheTest, ComputeKeyGeoNear) {
+ testComputeKey("{a: {$near: [0,0], $maxDistance:0.3 }}", "{}", "{}", "gnanrfl");
+ testComputeKey("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}", "{}", "{}", "gnanssp");
+ testComputeKey(
+ "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}",
+ "{}",
+ "{}",
+ "gnanrsp");
+}
+
+// When a sparse index is present, computeKey() should generate different keys depending on
+// whether or not the predicates in the given query can use the index.
+TEST(PlanCacheTest, ComputeKeySparseIndex) {
+ PlanCache planCache;
+ planCache.notifyOfIndexEntries({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ true, // sparse
+ false, // unique
+ "", // name
+ nullptr, // filterExpr
+ BSONObj())});
+
+ unique_ptr<CanonicalQuery> cqEqNumber(canonicalize("{a: 0}}"));
+ unique_ptr<CanonicalQuery> cqEqString(canonicalize("{a: 'x'}}"));
+ unique_ptr<CanonicalQuery> cqEqNull(canonicalize("{a: null}}"));
+
+ // 'cqEqNumber' and 'cqEqString' get the same key, since both are compatible with this
+ // index.
+ ASSERT_EQ(planCache.computeKey(*cqEqNumber), planCache.computeKey(*cqEqString));
+
+ // 'cqEqNull' gets a different key, since it is not compatible with this index.
+ ASSERT_NOT_EQUALS(planCache.computeKey(*cqEqNull), planCache.computeKey(*cqEqNumber));
+}
+
+// When a partial index is present, computeKey() should generate different keys depending on
+// whether or not the predicates in the given query "match" the predicates in the partial index
+// filter.
+TEST(PlanCacheTest, ComputeKeyPartialIndex) {
+ BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
+ unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
+
+ PlanCache planCache;
+ planCache.notifyOfIndexEntries({IndexEntry(BSON("a" << 1),
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "", // name
+ filterExpr.get(),
+ BSONObj())});
+
+ unique_ptr<CanonicalQuery> cqGtNegativeFive(canonicalize("{f: {$gt: -5}}"));
+ unique_ptr<CanonicalQuery> cqGtZero(canonicalize("{f: {$gt: 0}}"));
+ unique_ptr<CanonicalQuery> cqGtFive(canonicalize("{f: {$gt: 5}}"));
+
+ // 'cqGtZero' and 'cqGtFive' get the same key, since both are compatible with this index.
+ ASSERT_EQ(planCache.computeKey(*cqGtZero), planCache.computeKey(*cqGtFive));
+
+ // 'cqGtNegativeFive' gets a different key, since it is not compatible with this index.
+ ASSERT_NOT_EQUALS(planCache.computeKey(*cqGtNegativeFive), planCache.computeKey(*cqGtZero));
+}
} // namespace
diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp
index ef1f23997d0..20049790c0b 100644
--- a/src/mongo/db/query/plan_enumerator.cpp
+++ b/src/mongo/db/query/plan_enumerator.cpp
@@ -38,1229 +38,1206 @@
namespace {
- using namespace mongo;
- using std::unique_ptr;
- using std::endl;
- using std::set;
- using std::string;
- using std::vector;
-
- std::string getPathPrefix(std::string path) {
- if (mongoutils::str::contains(path, '.')) {
- return mongoutils::str::before(path, '.');
- }
- else {
- return path;
- }
+using namespace mongo;
+using std::unique_ptr;
+using std::endl;
+using std::set;
+using std::string;
+using std::vector;
+
+std::string getPathPrefix(std::string path) {
+ if (mongoutils::str::contains(path, '.')) {
+ return mongoutils::str::before(path, '.');
+ } else {
+ return path;
}
+}
- /**
- * Returns true if either 'node' or a descendent of 'node'
- * is a predicate that is required to use an index.
- */
- bool expressionRequiresIndex(const MatchExpression* node) {
- return CanonicalQuery::countNodes(node, MatchExpression::GEO_NEAR) > 0
- || CanonicalQuery::countNodes(node, MatchExpression::TEXT) > 0;
- }
+/**
+ * Returns true if either 'node' or a descendent of 'node'
+ * is a predicate that is required to use an index.
+ */
+bool expressionRequiresIndex(const MatchExpression* node) {
+ return CanonicalQuery::countNodes(node, MatchExpression::GEO_NEAR) > 0 ||
+ CanonicalQuery::countNodes(node, MatchExpression::TEXT) > 0;
+}
-} // namespace
+} // namespace
namespace mongo {
- PlanEnumerator::PlanEnumerator(const PlanEnumeratorParams& params)
- : _root(params.root),
- _indices(params.indices),
- _ixisect(params.intersect),
- _orLimit(params.maxSolutionsPerOr),
- _intersectLimit(params.maxIntersectPerAnd) { }
-
- PlanEnumerator::~PlanEnumerator() {
- typedef unordered_map<MemoID, NodeAssignment*> MemoMap;
- for (MemoMap::iterator it = _memo.begin(); it != _memo.end(); ++it) {
- delete it->second;
- }
+PlanEnumerator::PlanEnumerator(const PlanEnumeratorParams& params)
+ : _root(params.root),
+ _indices(params.indices),
+ _ixisect(params.intersect),
+ _orLimit(params.maxSolutionsPerOr),
+ _intersectLimit(params.maxIntersectPerAnd) {}
+
+PlanEnumerator::~PlanEnumerator() {
+ typedef unordered_map<MemoID, NodeAssignment*> MemoMap;
+ for (MemoMap::iterator it = _memo.begin(); it != _memo.end(); ++it) {
+ delete it->second;
}
+}
+
+Status PlanEnumerator::init() {
+ // Fill out our memo structure from the tagged _root.
+ _done = !prepMemo(_root, PrepMemoContext());
+
+ // Dump the tags. We replace them with IndexTag instances.
+ _root->resetTag();
- Status PlanEnumerator::init() {
- // Fill out our memo structure from the tagged _root.
- _done = !prepMemo(_root, PrepMemoContext());
+ return Status::OK();
+}
- // Dump the tags. We replace them with IndexTag instances.
- _root->resetTag();
+std::string PlanEnumerator::dumpMemo() {
+ mongoutils::str::stream ss;
- return Status::OK();
+ // Note that this needs to be kept in sync with allocateAssignment which assigns memo IDs.
+ for (size_t i = 1; i < _memo.size(); ++i) {
+ ss << "[Node #" << i << "]: " << _memo[i]->toString() << "\n";
}
+ return ss;
+}
- std::string PlanEnumerator::dumpMemo() {
+string PlanEnumerator::NodeAssignment::toString() const {
+ if (NULL != pred) {
mongoutils::str::stream ss;
-
- // Note that this needs to be kept in sync with allocateAssignment which assigns memo IDs.
- for (size_t i = 1; i < _memo.size(); ++i) {
- ss << "[Node #" << i << "]: " << _memo[i]->toString() << "\n";
+ ss << "predicate\n";
+ ss << "\tfirst indices: [";
+ for (size_t i = 0; i < pred->first.size(); ++i) {
+ ss << pred->first[i];
+ if (i < pred->first.size() - 1)
+ ss << ", ";
}
+ ss << "]\n";
+ ss << "\tpred: " << pred->expr->toString();
+ ss << "\tindexToAssign: " << pred->indexToAssign;
return ss;
- }
-
- string PlanEnumerator::NodeAssignment::toString() const {
- if (NULL != pred) {
- mongoutils::str::stream ss;
- ss << "predicate\n";
- ss << "\tfirst indices: [";
- for (size_t i = 0; i < pred->first.size(); ++i) {
- ss << pred->first[i];
- if (i < pred->first.size() - 1)
- ss << ", ";
+ } else if (NULL != andAssignment) {
+ mongoutils::str::stream ss;
+ ss << "AND enumstate counter " << andAssignment->counter;
+ for (size_t i = 0; i < andAssignment->choices.size(); ++i) {
+ ss << "\n\tchoice " << i << ":\n";
+ const AndEnumerableState& state = andAssignment->choices[i];
+ ss << "\t\tsubnodes: ";
+ for (size_t j = 0; j < state.subnodesToIndex.size(); ++j) {
+ ss << state.subnodesToIndex[j] << " ";
}
- ss << "]\n";
- ss << "\tpred: " << pred->expr->toString();
- ss << "\tindexToAssign: " << pred->indexToAssign;
- return ss;
- }
- else if (NULL != andAssignment) {
- mongoutils::str::stream ss;
- ss << "AND enumstate counter " << andAssignment->counter;
- for (size_t i = 0; i < andAssignment->choices.size(); ++i) {
- ss << "\n\tchoice " << i << ":\n";
- const AndEnumerableState& state = andAssignment->choices[i];
- ss << "\t\tsubnodes: ";
- for (size_t j = 0; j < state.subnodesToIndex.size(); ++j) {
- ss << state.subnodesToIndex[j] << " ";
- }
- ss << '\n';
- for (size_t j = 0; j < state.assignments.size(); ++j) {
- const OneIndexAssignment& oie = state.assignments[j];
- ss << "\t\tidx[" << oie.index << "]\n";
-
- for (size_t k = 0; k < oie.preds.size(); ++k) {
- ss << "\t\t\tpos " << oie.positions[k]
- << " pred " << oie.preds[k]->toString();
- }
+ ss << '\n';
+ for (size_t j = 0; j < state.assignments.size(); ++j) {
+ const OneIndexAssignment& oie = state.assignments[j];
+ ss << "\t\tidx[" << oie.index << "]\n";
+
+ for (size_t k = 0; k < oie.preds.size(); ++k) {
+ ss << "\t\t\tpos " << oie.positions[k] << " pred " << oie.preds[k]->toString();
}
}
- return ss;
}
- else if (NULL != arrayAssignment) {
- mongoutils::str::stream ss;
- ss << "ARRAY SUBNODES enumstate " << arrayAssignment->counter << "/ ONE OF: [ ";
- for (size_t i = 0; i < arrayAssignment->subnodes.size(); ++i) {
- ss << arrayAssignment->subnodes[i] << " ";
- }
- ss << "]";
- return ss;
+ return ss;
+ } else if (NULL != arrayAssignment) {
+ mongoutils::str::stream ss;
+ ss << "ARRAY SUBNODES enumstate " << arrayAssignment->counter << "/ ONE OF: [ ";
+ for (size_t i = 0; i < arrayAssignment->subnodes.size(); ++i) {
+ ss << arrayAssignment->subnodes[i] << " ";
}
- else {
- verify(NULL != orAssignment);
- mongoutils::str::stream ss;
- ss << "ALL OF: [ ";
- for (size_t i = 0; i < orAssignment->subnodes.size(); ++i) {
- ss << orAssignment->subnodes[i] << " ";
- }
- ss << "]";
- return ss;
+ ss << "]";
+ return ss;
+ } else {
+ verify(NULL != orAssignment);
+ mongoutils::str::stream ss;
+ ss << "ALL OF: [ ";
+ for (size_t i = 0; i < orAssignment->subnodes.size(); ++i) {
+ ss << orAssignment->subnodes[i] << " ";
}
+ ss << "]";
+ return ss;
}
+}
- PlanEnumerator::MemoID PlanEnumerator::memoIDForNode(MatchExpression* node) {
- unordered_map<MatchExpression*, MemoID>::iterator it = _nodeToId.find(node);
+PlanEnumerator::MemoID PlanEnumerator::memoIDForNode(MatchExpression* node) {
+ unordered_map<MatchExpression*, MemoID>::iterator it = _nodeToId.find(node);
- if (_nodeToId.end() == it) {
- error() << "Trying to look up memo entry for node, none found.";
- invariant(0);
- }
-
- return it->second;
+ if (_nodeToId.end() == it) {
+ error() << "Trying to look up memo entry for node, none found.";
+ invariant(0);
}
- bool PlanEnumerator::getNext(MatchExpression** tree) {
- if (_done) { return false; }
-
- // Tag with our first solution.
- tagMemo(memoIDForNode(_root));
+ return it->second;
+}
- *tree = _root->shallowClone();
- tagForSort(*tree);
- sortUsingTags(*tree);
-
- _root->resetTag();
- LOG(5) << "Enumerator: memo just before moving:" << endl << dumpMemo();
- _done = nextMemo(memoIDForNode(_root));
- return true;
+bool PlanEnumerator::getNext(MatchExpression** tree) {
+ if (_done) {
+ return false;
}
- //
- // Structure creation
- //
+ // Tag with our first solution.
+ tagMemo(memoIDForNode(_root));
+
+ *tree = _root->shallowClone();
+ tagForSort(*tree);
+ sortUsingTags(*tree);
+
+ _root->resetTag();
+ LOG(5) << "Enumerator: memo just before moving:" << endl
+ << dumpMemo();
+ _done = nextMemo(memoIDForNode(_root));
+ return true;
+}
+
+//
+// Structure creation
+//
+
+void PlanEnumerator::allocateAssignment(MatchExpression* expr,
+ NodeAssignment** assign,
+ MemoID* id) {
+ // We start at 1 so that the lookup of any entries not explicitly allocated
+ // will refer to an invalid memo slot.
+ size_t newID = _memo.size() + 1;
+
+ // Shouldn't be anything there already.
+ verify(_nodeToId.end() == _nodeToId.find(expr));
+ _nodeToId[expr] = newID;
+ verify(_memo.end() == _memo.find(newID));
+ NodeAssignment* newAssignment = new NodeAssignment();
+ _memo[newID] = newAssignment;
+ *assign = newAssignment;
+ *id = newID;
+}
+
+bool PlanEnumerator::prepMemo(MatchExpression* node, PrepMemoContext context) {
+ PrepMemoContext childContext;
+ childContext.elemMatchExpr = context.elemMatchExpr;
+ if (Indexability::nodeCanUseIndexOnOwnField(node)) {
+ // We only get here if our parent is an OR, an array operator, or we're the root.
+
+ // If we have no index tag there are no indices we can use.
+ if (NULL == node->getTag()) {
+ return false;
+ }
- void PlanEnumerator::allocateAssignment(MatchExpression* expr,
- NodeAssignment** assign,
- MemoID* id) {
- // We start at 1 so that the lookup of any entries not explicitly allocated
- // will refer to an invalid memo slot.
- size_t newID = _memo.size() + 1;
-
- // Shouldn't be anything there already.
- verify(_nodeToId.end() == _nodeToId.find(expr));
- _nodeToId[expr] = newID;
- verify(_memo.end() == _memo.find(newID));
- NodeAssignment* newAssignment = new NodeAssignment();
- _memo[newID] = newAssignment;
- *assign = newAssignment;
- *id = newID;
- }
+ RelevantTag* rt = static_cast<RelevantTag*>(node->getTag());
+ // In order to definitely use an index it must be prefixed with our field.
+ // We don't consider notFirst indices here because we must be AND-related to a node
+ // that uses the first spot in that index, and we currently do not know that
+ // unless we're in an AND node.
+ if (0 == rt->first.size()) {
+ return false;
+ }
- bool PlanEnumerator::prepMemo(MatchExpression* node, PrepMemoContext context) {
- PrepMemoContext childContext;
- childContext.elemMatchExpr = context.elemMatchExpr;
- if (Indexability::nodeCanUseIndexOnOwnField(node)) {
- // We only get here if our parent is an OR, an array operator, or we're the root.
-
- // If we have no index tag there are no indices we can use.
- if (NULL == node->getTag()) { return false; }
-
- RelevantTag* rt = static_cast<RelevantTag*>(node->getTag());
- // In order to definitely use an index it must be prefixed with our field.
- // We don't consider notFirst indices here because we must be AND-related to a node
- // that uses the first spot in that index, and we currently do not know that
- // unless we're in an AND node.
- if (0 == rt->first.size()) { return false; }
-
- // We know we can use an index, so grab a memo spot.
- size_t myMemoID;
- NodeAssignment* assign;
- allocateAssignment(node, &assign, &myMemoID);
-
- assign->pred.reset(new PredicateAssignment());
- assign->pred->expr = node;
- assign->pred->first.swap(rt->first);
- return true;
+ // We know we can use an index, so grab a memo spot.
+ size_t myMemoID;
+ NodeAssignment* assign;
+ allocateAssignment(node, &assign, &myMemoID);
+
+ assign->pred.reset(new PredicateAssignment());
+ assign->pred->expr = node;
+ assign->pred->first.swap(rt->first);
+ return true;
+ } else if (Indexability::isBoundsGeneratingNot(node)) {
+ bool childIndexable = prepMemo(node->getChild(0), childContext);
+ // If the child isn't indexable then bail out now.
+ if (!childIndexable) {
+ return false;
}
- else if (Indexability::isBoundsGeneratingNot(node)) {
- bool childIndexable = prepMemo(node->getChild(0), childContext);
- // If the child isn't indexable then bail out now.
- if (!childIndexable) {
+
+ // Our parent node, if any exists, will expect a memo entry keyed on 'node'. As such we
+ // have the node ID for 'node' just point to the memo created for the child that
+ // actually generates the bounds.
+ size_t myMemoID;
+ NodeAssignment* assign;
+ allocateAssignment(node, &assign, &myMemoID);
+ OrAssignment* orAssignment = new OrAssignment();
+ orAssignment->subnodes.push_back(memoIDForNode(node->getChild(0)));
+ assign->orAssignment.reset(orAssignment);
+ return true;
+ } else if (MatchExpression::OR == node->matchType()) {
+ // For an OR to be indexed, all its children must be indexed.
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ if (!prepMemo(node->getChild(i), childContext)) {
return false;
}
-
- // Our parent node, if any exists, will expect a memo entry keyed on 'node'. As such we
- // have the node ID for 'node' just point to the memo created for the child that
- // actually generates the bounds.
- size_t myMemoID;
- NodeAssignment* assign;
- allocateAssignment(node, &assign, &myMemoID);
- OrAssignment* orAssignment = new OrAssignment();
- orAssignment->subnodes.push_back(memoIDForNode(node->getChild(0)));
- assign->orAssignment.reset(orAssignment);
- return true;
}
- else if (MatchExpression::OR == node->matchType()) {
- // For an OR to be indexed, all its children must be indexed.
- for (size_t i = 0; i < node->numChildren(); ++i) {
- if (!prepMemo(node->getChild(i), childContext)) {
- return false;
- }
- }
- // If we're here we're fully indexed and can be in the memo.
- size_t myMemoID;
- NodeAssignment* assign;
- allocateAssignment(node, &assign, &myMemoID);
+ // If we're here we're fully indexed and can be in the memo.
+ size_t myMemoID;
+ NodeAssignment* assign;
+ allocateAssignment(node, &assign, &myMemoID);
- OrAssignment* orAssignment = new OrAssignment();
- for (size_t i = 0; i < node->numChildren(); ++i) {
- orAssignment->subnodes.push_back(memoIDForNode(node->getChild(i)));
- }
- assign->orAssignment.reset(orAssignment);
- return true;
+ OrAssignment* orAssignment = new OrAssignment();
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ orAssignment->subnodes.push_back(memoIDForNode(node->getChild(i)));
}
- else if (Indexability::arrayUsesIndexOnChildren(node)) {
- // Add each of our children as a subnode. We enumerate through each subnode one at a
- // time until it's exhausted then we move on.
- unique_ptr<ArrayAssignment> aa(new ArrayAssignment());
+ assign->orAssignment.reset(orAssignment);
+ return true;
+ } else if (Indexability::arrayUsesIndexOnChildren(node)) {
+ // Add each of our children as a subnode. We enumerate through each subnode one at a
+ // time until it's exhausted then we move on.
+ unique_ptr<ArrayAssignment> aa(new ArrayAssignment());
- if (MatchExpression::ELEM_MATCH_OBJECT == node->matchType()) {
- childContext.elemMatchExpr = node;
- }
+ if (MatchExpression::ELEM_MATCH_OBJECT == node->matchType()) {
+ childContext.elemMatchExpr = node;
+ }
- // For an OR to be indexed, all its children must be indexed.
- for (size_t i = 0; i < node->numChildren(); ++i) {
- if (prepMemo(node->getChild(i), childContext)) {
- aa->subnodes.push_back(memoIDForNode(node->getChild(i)));
- }
+ // For an OR to be indexed, all its children must be indexed.
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ if (prepMemo(node->getChild(i), childContext)) {
+ aa->subnodes.push_back(memoIDForNode(node->getChild(i)));
}
+ }
- if (0 == aa->subnodes.size()) { return false; }
-
- size_t myMemoID;
- NodeAssignment* assign;
- allocateAssignment(node, &assign, &myMemoID);
-
- assign->arrayAssignment.reset(aa.release());
- return true;
+ if (0 == aa->subnodes.size()) {
+ return false;
}
- else if (MatchExpression::AND == node->matchType()) {
- // Map from idx id to children that have a pred over it.
-
- // TODO: The index intersection logic could be simplified if we could iterate over these
- // maps in a known order. Currently when iterating over these maps we have to impose an
- // ordering on each individual pair of indices in order to make sure that the
- // enumeration results are order-independent. See SERVER-12196.
- IndexToPredMap idxToFirst;
- IndexToPredMap idxToNotFirst;
-
- // Children that aren't predicates, and which do not necessarily need
- // to use an index.
- vector<MemoID> subnodes;
-
- // Children that aren't predicates, but which *must* use an index.
- // (e.g. an OR which contains a TEXT child).
- vector<MemoID> mandatorySubnodes;
-
- // A list of predicates contained in the subtree rooted at 'node'
- // obtained by traversing deeply through $and and $elemMatch children.
- vector<MatchExpression*> indexedPreds;
-
- // Partition the childen into the children that aren't predicates which may or may
- // not be indexed ('subnodes'), children that aren't predicates which must use the
- // index ('mandatorySubnodes'). and children that are predicates ('indexedPreds').
- //
- // We have to get the subnodes with mandatory assignments rather than adding the
- // mandatory preds to 'indexedPreds'. Adding the mandatory preds directly to
- // 'indexedPreds' would lead to problems such as pulling a predicate beneath an OR
- // into a set joined by an AND.
- if (!partitionPreds(node, childContext, &indexedPreds,
- &subnodes, &mandatorySubnodes)) {
- return false;
- }
- if (mandatorySubnodes.size() > 1) {
- return false;
- }
+ size_t myMemoID;
+ NodeAssignment* assign;
+ allocateAssignment(node, &assign, &myMemoID);
- // There can only be one mandatory predicate (at most one $text, at most one
- // $geoNear, can't combine $text/$geoNear).
- MatchExpression* mandatoryPred = NULL;
+ assign->arrayAssignment.reset(aa.release());
+ return true;
+ } else if (MatchExpression::AND == node->matchType()) {
+ // Map from idx id to children that have a pred over it.
+
+ // TODO: The index intersection logic could be simplified if we could iterate over these
+ // maps in a known order. Currently when iterating over these maps we have to impose an
+ // ordering on each individual pair of indices in order to make sure that the
+ // enumeration results are order-independent. See SERVER-12196.
+ IndexToPredMap idxToFirst;
+ IndexToPredMap idxToNotFirst;
+
+ // Children that aren't predicates, and which do not necessarily need
+ // to use an index.
+ vector<MemoID> subnodes;
+
+ // Children that aren't predicates, but which *must* use an index.
+ // (e.g. an OR which contains a TEXT child).
+ vector<MemoID> mandatorySubnodes;
+
+ // A list of predicates contained in the subtree rooted at 'node'
+ // obtained by traversing deeply through $and and $elemMatch children.
+ vector<MatchExpression*> indexedPreds;
+
+ // Partition the childen into the children that aren't predicates which may or may
+ // not be indexed ('subnodes'), children that aren't predicates which must use the
+ // index ('mandatorySubnodes'). and children that are predicates ('indexedPreds').
+ //
+ // We have to get the subnodes with mandatory assignments rather than adding the
+ // mandatory preds to 'indexedPreds'. Adding the mandatory preds directly to
+ // 'indexedPreds' would lead to problems such as pulling a predicate beneath an OR
+ // into a set joined by an AND.
+ if (!partitionPreds(node, childContext, &indexedPreds, &subnodes, &mandatorySubnodes)) {
+ return false;
+ }
- // There could be multiple indices which we could use to satisfy the mandatory
- // predicate. Keep the set of such indices. Currently only one text index is
- // allowed per collection, but there could be multiple 2d or 2dsphere indices
- // available to answer a $geoNear predicate.
- set<IndexID> mandatoryIndices;
+ if (mandatorySubnodes.size() > 1) {
+ return false;
+ }
- // Go through 'indexedPreds' and add the predicates to the
- // 'idxToFirst' and 'idxToNotFirst' maps.
- for (size_t i = 0; i < indexedPreds.size(); ++i) {
- MatchExpression* child = indexedPreds[i];
+ // There can only be one mandatory predicate (at most one $text, at most one
+ // $geoNear, can't combine $text/$geoNear).
+ MatchExpression* mandatoryPred = NULL;
- invariant(Indexability::nodeCanUseIndexOnOwnField(child));
+ // There could be multiple indices which we could use to satisfy the mandatory
+ // predicate. Keep the set of such indices. Currently only one text index is
+ // allowed per collection, but there could be multiple 2d or 2dsphere indices
+ // available to answer a $geoNear predicate.
+ set<IndexID> mandatoryIndices;
- RelevantTag* rt = static_cast<RelevantTag*>(child->getTag());
+ // Go through 'indexedPreds' and add the predicates to the
+ // 'idxToFirst' and 'idxToNotFirst' maps.
+ for (size_t i = 0; i < indexedPreds.size(); ++i) {
+ MatchExpression* child = indexedPreds[i];
- if (expressionRequiresIndex(child)) {
- // 'child' is a predicate which *must* be tagged with an index.
- // This should include only TEXT and GEO_NEAR preds.
+ invariant(Indexability::nodeCanUseIndexOnOwnField(child));
- // We expect either 0 or 1 mandatory predicates.
- invariant(NULL == mandatoryPred);
+ RelevantTag* rt = static_cast<RelevantTag*>(child->getTag());
- // Mandatory predicates are TEXT or GEO_NEAR.
- invariant(MatchExpression::TEXT == child->matchType() ||
- MatchExpression::GEO_NEAR == child->matchType());
+ if (expressionRequiresIndex(child)) {
+ // 'child' is a predicate which *must* be tagged with an index.
+ // This should include only TEXT and GEO_NEAR preds.
- // The mandatory predicate must have a corresponding "mandatory index".
- invariant(rt->first.size() != 0 || rt->notFirst.size() != 0);
+ // We expect either 0 or 1 mandatory predicates.
+ invariant(NULL == mandatoryPred);
- mandatoryPred = child;
+ // Mandatory predicates are TEXT or GEO_NEAR.
+ invariant(MatchExpression::TEXT == child->matchType() ||
+ MatchExpression::GEO_NEAR == child->matchType());
- // Find all of the indices that could be used to satisfy the pred,
- // and add them to the 'mandatoryIndices' set.
- mandatoryIndices.insert(rt->first.begin(), rt->first.end());
- mandatoryIndices.insert(rt->notFirst.begin(), rt->notFirst.end());
- }
+ // The mandatory predicate must have a corresponding "mandatory index".
+ invariant(rt->first.size() != 0 || rt->notFirst.size() != 0);
- for (size_t j = 0; j < rt->first.size(); ++j) {
- idxToFirst[rt->first[j]].push_back(child);
- }
+ mandatoryPred = child;
- for (size_t j = 0 ; j< rt->notFirst.size(); ++j) {
- idxToNotFirst[rt->notFirst[j]].push_back(child);
- }
+ // Find all of the indices that could be used to satisfy the pred,
+ // and add them to the 'mandatoryIndices' set.
+ mandatoryIndices.insert(rt->first.begin(), rt->first.end());
+ mandatoryIndices.insert(rt->notFirst.begin(), rt->notFirst.end());
}
- // If none of our children can use indices, bail out.
- if (idxToFirst.empty()
- && (subnodes.size() == 0)
- && (mandatorySubnodes.size() == 0)) {
- return false;
+ for (size_t j = 0; j < rt->first.size(); ++j) {
+ idxToFirst[rt->first[j]].push_back(child);
}
- // At least one child can use an index, so we can create a memo entry.
- AndAssignment* andAssignment = new AndAssignment();
-
- size_t myMemoID;
- NodeAssignment* nodeAssignment;
- allocateAssignment(node, &nodeAssignment, &myMemoID);
- // Takes ownership.
- nodeAssignment->andAssignment.reset(andAssignment);
-
- // Predicates which must use an index might be buried inside
- // a subnode. Handle that case here.
- if (1 == mandatorySubnodes.size()) {
- AndEnumerableState aes;
- aes.subnodesToIndex.push_back(mandatorySubnodes[0]);
- andAssignment->choices.push_back(aes);
- return true;
+ for (size_t j = 0; j < rt->notFirst.size(); ++j) {
+ idxToNotFirst[rt->notFirst[j]].push_back(child);
}
+ }
- if (NULL != mandatoryPred) {
- // We must have at least one index which can be used to answer 'mandatoryPred'.
- invariant(!mandatoryIndices.empty());
- return enumerateMandatoryIndex(idxToFirst, idxToNotFirst, mandatoryPred,
- mandatoryIndices, andAssignment);
- }
+ // If none of our children can use indices, bail out.
+ if (idxToFirst.empty() && (subnodes.size() == 0) && (mandatorySubnodes.size() == 0)) {
+ return false;
+ }
- enumerateOneIndex(idxToFirst, idxToNotFirst, subnodes, andAssignment);
+ // At least one child can use an index, so we can create a memo entry.
+ AndAssignment* andAssignment = new AndAssignment();
- if (_ixisect) {
- enumerateAndIntersect(idxToFirst, idxToNotFirst, subnodes, andAssignment);
- }
+ size_t myMemoID;
+ NodeAssignment* nodeAssignment;
+ allocateAssignment(node, &nodeAssignment, &myMemoID);
+ // Takes ownership.
+ nodeAssignment->andAssignment.reset(andAssignment);
+ // Predicates which must use an index might be buried inside
+ // a subnode. Handle that case here.
+ if (1 == mandatorySubnodes.size()) {
+ AndEnumerableState aes;
+ aes.subnodesToIndex.push_back(mandatorySubnodes[0]);
+ andAssignment->choices.push_back(aes);
return true;
}
- // Don't know what the node is at this point.
- return false;
+ if (NULL != mandatoryPred) {
+ // We must have at least one index which can be used to answer 'mandatoryPred'.
+ invariant(!mandatoryIndices.empty());
+ return enumerateMandatoryIndex(
+ idxToFirst, idxToNotFirst, mandatoryPred, mandatoryIndices, andAssignment);
+ }
+
+ enumerateOneIndex(idxToFirst, idxToNotFirst, subnodes, andAssignment);
+
+ if (_ixisect) {
+ enumerateAndIntersect(idxToFirst, idxToNotFirst, subnodes, andAssignment);
+ }
+
+ return true;
}
- bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
- const IndexToPredMap& idxToNotFirst,
- MatchExpression* mandatoryPred,
- const set<IndexID>& mandatoryIndices,
- AndAssignment* andAssignment) {
- // Generate index assignments for each index in 'mandatoryIndices'. We
- // must assign 'mandatoryPred' to one of these indices, but we try all
- // possibilities in 'mandatoryIndices' because some might be better than
- // others for this query.
- for (set<IndexID>::const_iterator indexIt = mandatoryIndices.begin();
- indexIt != mandatoryIndices.end();
- ++indexIt) {
-
- // We have a predicate which *must* be tagged to use an index.
- // Get the index entry for the index it should use.
- const IndexEntry& thisIndex = (*_indices)[*indexIt];
-
- // Only text, 2d, and 2dsphere index types should be able to satisfy
- // mandatory predicates.
- invariant(INDEX_TEXT == thisIndex.type ||
- INDEX_2D == thisIndex.type ||
- INDEX_2DSPHERE == thisIndex.type);
-
- OneIndexAssignment indexAssign;
- indexAssign.index = *indexIt;
-
- IndexToPredMap::const_iterator it = idxToFirst.find(*indexIt);
- if (idxToFirst.end() == it) {
- // We don't have any predicate to assign to the leading field of this index.
- // This means that we cannot generate a solution using this index, so we
- // just move on to the next index.
- continue;
- }
+ // Don't know what the node is at this point.
+ return false;
+}
+
+bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
+ const IndexToPredMap& idxToNotFirst,
+ MatchExpression* mandatoryPred,
+ const set<IndexID>& mandatoryIndices,
+ AndAssignment* andAssignment) {
+ // Generate index assignments for each index in 'mandatoryIndices'. We
+ // must assign 'mandatoryPred' to one of these indices, but we try all
+ // possibilities in 'mandatoryIndices' because some might be better than
+ // others for this query.
+ for (set<IndexID>::const_iterator indexIt = mandatoryIndices.begin();
+ indexIt != mandatoryIndices.end();
+ ++indexIt) {
+ // We have a predicate which *must* be tagged to use an index.
+ // Get the index entry for the index it should use.
+ const IndexEntry& thisIndex = (*_indices)[*indexIt];
+
+ // Only text, 2d, and 2dsphere index types should be able to satisfy
+ // mandatory predicates.
+ invariant(INDEX_TEXT == thisIndex.type || INDEX_2D == thisIndex.type ||
+ INDEX_2DSPHERE == thisIndex.type);
+
+ OneIndexAssignment indexAssign;
+ indexAssign.index = *indexIt;
+
+ IndexToPredMap::const_iterator it = idxToFirst.find(*indexIt);
+ if (idxToFirst.end() == it) {
+ // We don't have any predicate to assign to the leading field of this index.
+ // This means that we cannot generate a solution using this index, so we
+ // just move on to the next index.
+ continue;
+ }
- const vector<MatchExpression*>& predsOverLeadingField = it->second;
-
- if (thisIndex.multikey) {
- // Special handling for multikey mandatory indices.
- if (predsOverLeadingField.end() != std::find(predsOverLeadingField.begin(),
- predsOverLeadingField.end(),
- mandatoryPred)) {
- // The mandatory predicate is over the first field of the index. Assign
- // it now.
- indexAssign.preds.push_back(mandatoryPred);
- indexAssign.positions.push_back(0);
- }
- else {
- // The mandatory pred is notFirst. Assign an arbitrary predicate
- // over the first position.
- invariant(!predsOverLeadingField.empty());
- indexAssign.preds.push_back(predsOverLeadingField[0]);
- indexAssign.positions.push_back(0);
-
- // Assign the mandatory predicate at the matching position in the compound
- // index. We do this in order to ensure that the mandatory predicate (and not
- // some other predicate over the same position in the compound index) gets
- // assigned.
- //
- // The bad thing that could happen otherwise: A non-mandatory predicate gets
- // chosen by getMultikeyCompoundablePreds(...) instead of 'mandatoryPred'.
- // We would then fail to assign the mandatory predicate, and hence generate
- // a bad data access plan.
- //
- // The mandatory predicate is assigned by calling compound(...) because
- // compound(...) has logic for matching up a predicate with the proper
- // position in the compound index.
- vector<MatchExpression*> mandatoryToCompound;
- mandatoryToCompound.push_back(mandatoryPred);
- compound(mandatoryToCompound, thisIndex, &indexAssign);
-
- // At this point we have assigned a predicate over the leading field and
- // we have assigned the mandatory predicate to a trailing field.
- //
- // Ex:
- // Say we have index {a: 1, b: 1, c: "2dsphere", d: 1}. Also suppose that
- // there is a $near predicate over "c", with additional predicates over
- // "a", "b", "c", and "d". We will have assigned the $near predicate at
- // position 2 and a predicate with path "a" at position 0.
- }
+ const vector<MatchExpression*>& predsOverLeadingField = it->second;
- // Compound remaining predicates in a multikey-safe way.
- IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
- if (compIt != idxToNotFirst.end()) {
- const vector<MatchExpression*>& couldCompound = compIt->second;
- vector<MatchExpression*> tryCompound;
+ if (thisIndex.multikey) {
+ // Special handling for multikey mandatory indices.
+ if (predsOverLeadingField.end() != std::find(predsOverLeadingField.begin(),
+ predsOverLeadingField.end(),
+ mandatoryPred)) {
+ // The mandatory predicate is over the first field of the index. Assign
+ // it now.
+ indexAssign.preds.push_back(mandatoryPred);
+ indexAssign.positions.push_back(0);
+ } else {
+ // The mandatory pred is notFirst. Assign an arbitrary predicate
+ // over the first position.
+ invariant(!predsOverLeadingField.empty());
+ indexAssign.preds.push_back(predsOverLeadingField[0]);
+ indexAssign.positions.push_back(0);
- getMultikeyCompoundablePreds(indexAssign.preds, couldCompound, &tryCompound);
- if (tryCompound.size()) {
- compound(tryCompound, thisIndex, &indexAssign);
- }
- }
+ // Assign the mandatory predicate at the matching position in the compound
+ // index. We do this in order to ensure that the mandatory predicate (and not
+ // some other predicate over the same position in the compound index) gets
+ // assigned.
+ //
+ // The bad thing that could happen otherwise: A non-mandatory predicate gets
+ // chosen by getMultikeyCompoundablePreds(...) instead of 'mandatoryPred'.
+ // We would then fail to assign the mandatory predicate, and hence generate
+ // a bad data access plan.
+ //
+ // The mandatory predicate is assigned by calling compound(...) because
+ // compound(...) has logic for matching up a predicate with the proper
+ // position in the compound index.
+ vector<MatchExpression*> mandatoryToCompound;
+ mandatoryToCompound.push_back(mandatoryPred);
+ compound(mandatoryToCompound, thisIndex, &indexAssign);
+
+ // At this point we have assigned a predicate over the leading field and
+ // we have assigned the mandatory predicate to a trailing field.
+ //
+ // Ex:
+ // Say we have index {a: 1, b: 1, c: "2dsphere", d: 1}. Also suppose that
+ // there is a $near predicate over "c", with additional predicates over
+ // "a", "b", "c", and "d". We will have assigned the $near predicate at
+ // position 2 and a predicate with path "a" at position 0.
}
- else {
- // For non-multikey, we don't have to do anything too special.
- // Just assign all "first" predicates and try to compound like usual.
- indexAssign.preds = it->second;
-
- // Since everything in assign.preds prefixes the index, they all go
- // at position '0' in the index, the first position.
- indexAssign.positions.resize(indexAssign.preds.size(), 0);
-
- // And now we begin compound analysis.
-
- // Find everything that could use assign.index but isn't a pred over
- // the first field of that index.
- IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
- if (compIt != idxToNotFirst.end()) {
- compound(compIt->second, thisIndex, &indexAssign);
+
+ // Compound remaining predicates in a multikey-safe way.
+ IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
+ if (compIt != idxToNotFirst.end()) {
+ const vector<MatchExpression*>& couldCompound = compIt->second;
+ vector<MatchExpression*> tryCompound;
+
+ getMultikeyCompoundablePreds(indexAssign.preds, couldCompound, &tryCompound);
+ if (tryCompound.size()) {
+ compound(tryCompound, thisIndex, &indexAssign);
}
}
+ } else {
+ // For non-multikey, we don't have to do anything too special.
+ // Just assign all "first" predicates and try to compound like usual.
+ indexAssign.preds = it->second;
- // The mandatory predicate must be assigned.
- invariant(indexAssign.preds.end() != std::find(indexAssign.preds.begin(),
- indexAssign.preds.end(),
- mandatoryPred));
+ // Since everything in assign.preds prefixes the index, they all go
+ // at position '0' in the index, the first position.
+ indexAssign.positions.resize(indexAssign.preds.size(), 0);
- // Output the assignments for this index.
- AndEnumerableState state;
- state.assignments.push_back(indexAssign);
- andAssignment->choices.push_back(state);
+ // And now we begin compound analysis.
+
+ // Find everything that could use assign.index but isn't a pred over
+ // the first field of that index.
+ IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
+ if (compIt != idxToNotFirst.end()) {
+ compound(compIt->second, thisIndex, &indexAssign);
+ }
}
- return andAssignment->choices.size() > 0;
- }
+ // The mandatory predicate must be assigned.
+ invariant(indexAssign.preds.end() !=
+ std::find(indexAssign.preds.begin(), indexAssign.preds.end(), mandatoryPred));
- void PlanEnumerator::enumerateOneIndex(const IndexToPredMap& idxToFirst,
- const IndexToPredMap& idxToNotFirst,
- const vector<MemoID>& subnodes,
- AndAssignment* andAssignment) {
- // In the simplest case, an AndAssignment picks indices like a PredicateAssignment. To
- // be indexed we must only pick one index
- //
- // Complications:
- //
- // Some of our child predicates cannot be answered without an index. As such, the
- // indices that those predicates require must always be outputted. We store these
- // mandatory index assignments in 'mandatoryIndices'.
- //
- // Some of our children may not be predicates. We may have ORs (or array operators) as
- // children. If one of these subtrees provides an index, the AND is indexed. We store
- // these subtree choices in 'subnodes'.
- //
- // With the above two cases out of the way, we can focus on the remaining case: what to
- // do with our children that are leaf predicates.
- //
- // Guiding principles for index assignment to leaf predicates:
- //
- // 1. If we assign an index to {x:{$gt: 5}} we should assign the same index to
- // {x:{$lt: 50}}. That is, an index assignment should include all predicates
- // over its leading field.
- //
- // 2. If we have the index {a:1, b:1} and we assign it to {a: 5} we should assign it
- // to {b:7}, since with a predicate over the first field of the compound index,
- // the second field can be bounded as well. We may only assign indices to predicates
- // if all fields to the left of the index field are constrained.
+ // Output the assignments for this index.
+ AndEnumerableState state;
+ state.assignments.push_back(indexAssign);
+ andAssignment->choices.push_back(state);
+ }
- // First, add the state of using each subnode.
- for (size_t i = 0; i < subnodes.size(); ++i) {
- AndEnumerableState aes;
- aes.subnodesToIndex.push_back(subnodes[i]);
- andAssignment->choices.push_back(aes);
- }
+ return andAssignment->choices.size() > 0;
+}
- // For each FIRST, we assign nodes to it.
- for (IndexToPredMap::const_iterator it = idxToFirst.begin(); it != idxToFirst.end(); ++it) {
- // The assignment we're filling out.
- OneIndexAssignment indexAssign;
-
- // This is the index we assign to.
- indexAssign.index = it->first;
-
- const IndexEntry& thisIndex = (*_indices)[it->first];
-
- // If the index is multikey, we only assign one pred to it. We also skip
- // compounding. TODO: is this also true for 2d and 2dsphere indices? can they be
- // multikey but still compoundable?
- if (thisIndex.multikey) {
- // TODO: could pick better pred than first but not too worried since we should
- // really be isecting indices here. Just take the first pred. We don't assign
- // any other preds to this index. The planner will intersect the preds and this
- // enumeration strategy is just one index at a time.
- indexAssign.preds.push_back(it->second[0]);
- indexAssign.positions.push_back(0);
+void PlanEnumerator::enumerateOneIndex(const IndexToPredMap& idxToFirst,
+ const IndexToPredMap& idxToNotFirst,
+ const vector<MemoID>& subnodes,
+ AndAssignment* andAssignment) {
+ // In the simplest case, an AndAssignment picks indices like a PredicateAssignment. To
+ // be indexed we must only pick one index
+ //
+ // Complications:
+ //
+ // Some of our child predicates cannot be answered without an index. As such, the
+ // indices that those predicates require must always be outputted. We store these
+ // mandatory index assignments in 'mandatoryIndices'.
+ //
+ // Some of our children may not be predicates. We may have ORs (or array operators) as
+ // children. If one of these subtrees provides an index, the AND is indexed. We store
+ // these subtree choices in 'subnodes'.
+ //
+ // With the above two cases out of the way, we can focus on the remaining case: what to
+ // do with our children that are leaf predicates.
+ //
+ // Guiding principles for index assignment to leaf predicates:
+ //
+ // 1. If we assign an index to {x:{$gt: 5}} we should assign the same index to
+ // {x:{$lt: 50}}. That is, an index assignment should include all predicates
+ // over its leading field.
+ //
+ // 2. If we have the index {a:1, b:1} and we assign it to {a: 5} we should assign it
+ // to {b:7}, since with a predicate over the first field of the compound index,
+ // the second field can be bounded as well. We may only assign indices to predicates
+ // if all fields to the left of the index field are constrained.
+
+ // First, add the state of using each subnode.
+ for (size_t i = 0; i < subnodes.size(); ++i) {
+ AndEnumerableState aes;
+ aes.subnodesToIndex.push_back(subnodes[i]);
+ andAssignment->choices.push_back(aes);
+ }
- // If there are any preds that could possibly be compounded with this
- // index...
- IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
- if (compIt != idxToNotFirst.end()) {
- const vector<MatchExpression*>& couldCompound = compIt->second;
- vector<MatchExpression*> tryCompound;
-
- // ...select the predicates that are safe to compound and try to
- // compound them.
- getMultikeyCompoundablePreds(indexAssign.preds, couldCompound, &tryCompound);
- if (tryCompound.size()) {
- compound(tryCompound, thisIndex, &indexAssign);
- }
+ // For each FIRST, we assign nodes to it.
+ for (IndexToPredMap::const_iterator it = idxToFirst.begin(); it != idxToFirst.end(); ++it) {
+ // The assignment we're filling out.
+ OneIndexAssignment indexAssign;
+
+ // This is the index we assign to.
+ indexAssign.index = it->first;
+
+ const IndexEntry& thisIndex = (*_indices)[it->first];
+
+ // If the index is multikey, we only assign one pred to it. We also skip
+ // compounding. TODO: is this also true for 2d and 2dsphere indices? can they be
+ // multikey but still compoundable?
+ if (thisIndex.multikey) {
+ // TODO: could pick better pred than first but not too worried since we should
+ // really be isecting indices here. Just take the first pred. We don't assign
+ // any other preds to this index. The planner will intersect the preds and this
+ // enumeration strategy is just one index at a time.
+ indexAssign.preds.push_back(it->second[0]);
+ indexAssign.positions.push_back(0);
+
+ // If there are any preds that could possibly be compounded with this
+ // index...
+ IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
+ if (compIt != idxToNotFirst.end()) {
+ const vector<MatchExpression*>& couldCompound = compIt->second;
+ vector<MatchExpression*> tryCompound;
+
+ // ...select the predicates that are safe to compound and try to
+ // compound them.
+ getMultikeyCompoundablePreds(indexAssign.preds, couldCompound, &tryCompound);
+ if (tryCompound.size()) {
+ compound(tryCompound, thisIndex, &indexAssign);
}
}
- else {
- // The index isn't multikey. Assign all preds to it. The planner will
- // intersect the bounds.
- indexAssign.preds = it->second;
-
- // Since everything in assign.preds prefixes the index, they all go
- // at position '0' in the index, the first position.
- indexAssign.positions.resize(indexAssign.preds.size(), 0);
-
- // Find everything that could use assign.index but isn't a pred over
- // the first field of that index.
- IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
- if (compIt != idxToNotFirst.end()) {
- compound(compIt->second, thisIndex, &indexAssign);
- }
+ } else {
+ // The index isn't multikey. Assign all preds to it. The planner will
+ // intersect the bounds.
+ indexAssign.preds = it->second;
+
+ // Since everything in assign.preds prefixes the index, they all go
+ // at position '0' in the index, the first position.
+ indexAssign.positions.resize(indexAssign.preds.size(), 0);
+
+ // Find everything that could use assign.index but isn't a pred over
+ // the first field of that index.
+ IndexToPredMap::const_iterator compIt = idxToNotFirst.find(indexAssign.index);
+ if (compIt != idxToNotFirst.end()) {
+ compound(compIt->second, thisIndex, &indexAssign);
}
+ }
+ AndEnumerableState state;
+ state.assignments.push_back(indexAssign);
+ andAssignment->choices.push_back(state);
+ }
+}
+
+void PlanEnumerator::enumerateAndIntersect(const IndexToPredMap& idxToFirst,
+ const IndexToPredMap& idxToNotFirst,
+ const vector<MemoID>& subnodes,
+ AndAssignment* andAssignment) {
+ // Hardcoded "look at all members of the power set of size 2" search,
+ // a.k.a. "consider all pairs of indices".
+ //
+ // For each unordered pair of indices do the following:
+ // 0. Impose an ordering (idx1, idx2) using the key patterns.
+ // (*See note below.)
+ // 1. Assign predicates which prefix idx1 to idx1.
+ // 2. Add assigned predicates to a set of predicates---the "already
+ // assigned set".
+ // 3. Assign predicates which prefix idx2 to idx2, as long as they
+ // been assigned to idx1 already. Add newly assigned predicates to
+ // the "already assigned set".
+ // 4. Try to assign predicates to idx1 by compounding.
+ // 5. Add any predicates assigned to idx1 by compounding to the
+ // "already assigned set",
+ // 6. Try to assign predicates to idx2 by compounding.
+ // 7. Determine if we have already assigned all predicates in
+ // the "already assigned set" to a single index. If so, then
+ // don't generate an ixisect solution, as compounding will
+ // be better. Otherwise, output the ixisect assignments.
+ //
+ // *NOTE on ordering. Suppose we have two indices A and B, and a
+ // predicate P1 which is over the prefix of both indices A and B.
+ // If we order the indices (A, B) then P1 will get assigned to A,
+ // but if we order the indices (B, A) then P1 will get assigned to
+ // B. In order to make sure that we get the same result for the unordered
+ // pair {A, B} we have to begin by imposing an ordering. As a more concrete
+ // example, if we have indices {x: 1, y: 1} and {x: 1, z: 1} with predicate
+ // {x: 3}, we want to make sure that {x: 3} gets assigned to the same index
+ // irrespective of ordering.
+
+ size_t sizeBefore = andAssignment->choices.size();
+
+ for (IndexToPredMap::const_iterator firstIt = idxToFirst.begin(); firstIt != idxToFirst.end();
+ ++firstIt) {
+ const IndexEntry& oneIndex = (*_indices)[firstIt->first];
+
+ // 'oneAssign' is used to assign indices and subnodes or to
+ // make assignments for the first index when it's multikey.
+ // It is NOT used in the inner loop that considers pairs of
+ // indices.
+ OneIndexAssignment oneAssign;
+ oneAssign.index = firstIt->first;
+ oneAssign.preds = firstIt->second;
+ // Since everything in assign.preds prefixes the index, they all go
+ // at position '0' in the index, the first position.
+ oneAssign.positions.resize(oneAssign.preds.size(), 0);
+
+ // We create a scan per predicate so if we have >1 predicate we'll already
+ // have at least 2 scans (one predicate per scan as the planner can't
+ // intersect bounds when the index is multikey), so we stop here.
+ if (oneIndex.multikey && oneAssign.preds.size() > 1) {
+ // One could imagine an enormous auto-generated $all query with too many clauses to
+ // have an ixscan per clause.
+ static const size_t kMaxSelfIntersections = 10;
+ if (oneAssign.preds.size() > kMaxSelfIntersections) {
+ // Only take the first kMaxSelfIntersections preds.
+ oneAssign.preds.resize(kMaxSelfIntersections);
+ oneAssign.positions.resize(kMaxSelfIntersections);
+ }
AndEnumerableState state;
- state.assignments.push_back(indexAssign);
+ state.assignments.push_back(oneAssign);
andAssignment->choices.push_back(state);
+ continue;
}
- }
- void PlanEnumerator::enumerateAndIntersect(const IndexToPredMap& idxToFirst,
- const IndexToPredMap& idxToNotFirst,
- const vector<MemoID>& subnodes,
- AndAssignment* andAssignment) {
- // Hardcoded "look at all members of the power set of size 2" search,
- // a.k.a. "consider all pairs of indices".
- //
- // For each unordered pair of indices do the following:
- // 0. Impose an ordering (idx1, idx2) using the key patterns.
- // (*See note below.)
- // 1. Assign predicates which prefix idx1 to idx1.
- // 2. Add assigned predicates to a set of predicates---the "already
- // assigned set".
- // 3. Assign predicates which prefix idx2 to idx2, as long as they
- // been assigned to idx1 already. Add newly assigned predicates to
- // the "already assigned set".
- // 4. Try to assign predicates to idx1 by compounding.
- // 5. Add any predicates assigned to idx1 by compounding to the
- // "already assigned set",
- // 6. Try to assign predicates to idx2 by compounding.
- // 7. Determine if we have already assigned all predicates in
- // the "already assigned set" to a single index. If so, then
- // don't generate an ixisect solution, as compounding will
- // be better. Otherwise, output the ixisect assignments.
- //
- // *NOTE on ordering. Suppose we have two indices A and B, and a
- // predicate P1 which is over the prefix of both indices A and B.
- // If we order the indices (A, B) then P1 will get assigned to A,
- // but if we order the indices (B, A) then P1 will get assigned to
- // B. In order to make sure that we get the same result for the unordered
- // pair {A, B} we have to begin by imposing an ordering. As a more concrete
- // example, if we have indices {x: 1, y: 1} and {x: 1, z: 1} with predicate
- // {x: 3}, we want to make sure that {x: 3} gets assigned to the same index
- // irrespective of ordering.
-
- size_t sizeBefore = andAssignment->choices.size();
-
- for (IndexToPredMap::const_iterator firstIt = idxToFirst.begin();
- firstIt != idxToFirst.end(); ++firstIt) {
-
- const IndexEntry& oneIndex = (*_indices)[firstIt->first];
-
- // 'oneAssign' is used to assign indices and subnodes or to
- // make assignments for the first index when it's multikey.
- // It is NOT used in the inner loop that considers pairs of
- // indices.
- OneIndexAssignment oneAssign;
- oneAssign.index = firstIt->first;
- oneAssign.preds = firstIt->second;
- // Since everything in assign.preds prefixes the index, they all go
- // at position '0' in the index, the first position.
- oneAssign.positions.resize(oneAssign.preds.size(), 0);
-
- // We create a scan per predicate so if we have >1 predicate we'll already
- // have at least 2 scans (one predicate per scan as the planner can't
- // intersect bounds when the index is multikey), so we stop here.
- if (oneIndex.multikey && oneAssign.preds.size() > 1) {
- // One could imagine an enormous auto-generated $all query with too many clauses to
- // have an ixscan per clause.
- static const size_t kMaxSelfIntersections = 10;
- if (oneAssign.preds.size() > kMaxSelfIntersections) {
- // Only take the first kMaxSelfIntersections preds.
- oneAssign.preds.resize(kMaxSelfIntersections);
- oneAssign.positions.resize(kMaxSelfIntersections);
- }
- AndEnumerableState state;
- state.assignments.push_back(oneAssign);
- andAssignment->choices.push_back(state);
- continue;
+ // Output (subnode, firstAssign) pairs.
+ for (size_t i = 0; i < subnodes.size(); ++i) {
+ AndEnumerableState indexAndSubnode;
+ indexAndSubnode.assignments.push_back(oneAssign);
+ indexAndSubnode.subnodesToIndex.push_back(subnodes[i]);
+ andAssignment->choices.push_back(indexAndSubnode);
+ // Limit n^2.
+ if (andAssignment->choices.size() - sizeBefore > _intersectLimit) {
+ return;
}
+ }
- // Output (subnode, firstAssign) pairs.
- for (size_t i = 0; i < subnodes.size(); ++i) {
- AndEnumerableState indexAndSubnode;
- indexAndSubnode.assignments.push_back(oneAssign);
- indexAndSubnode.subnodesToIndex.push_back(subnodes[i]);
- andAssignment->choices.push_back(indexAndSubnode);
- // Limit n^2.
- if (andAssignment->choices.size() - sizeBefore > _intersectLimit) {
- return;
- }
+ // Start looking at all other indices to find one that we want to bundle
+ // with firstAssign.
+ IndexToPredMap::const_iterator secondIt = firstIt;
+ secondIt++;
+ for (; secondIt != idxToFirst.end(); secondIt++) {
+ const IndexEntry& firstIndex = (*_indices)[secondIt->first];
+ const IndexEntry& secondIndex = (*_indices)[secondIt->first];
+
+ // Limit n^2.
+ if (andAssignment->choices.size() - sizeBefore > _intersectLimit) {
+ return;
}
- // Start looking at all other indices to find one that we want to bundle
- // with firstAssign.
- IndexToPredMap::const_iterator secondIt = firstIt;
- secondIt++;
- for (; secondIt != idxToFirst.end(); secondIt++) {
- const IndexEntry& firstIndex = (*_indices)[secondIt->first];
- const IndexEntry& secondIndex = (*_indices)[secondIt->first];
-
- // Limit n^2.
- if (andAssignment->choices.size() - sizeBefore > _intersectLimit) {
- return;
- }
+ // If the other index we're considering is multikey with >1 pred, we don't
+ // want to have it as an additional assignment. Eventually, it1 will be
+ // equal to the current value of secondIt and we'll assign every pred for
+ // this mapping to the index.
+ if (secondIndex.multikey && secondIt->second.size() > 1) {
+ continue;
+ }
- // If the other index we're considering is multikey with >1 pred, we don't
- // want to have it as an additional assignment. Eventually, it1 will be
- // equal to the current value of secondIt and we'll assign every pred for
- // this mapping to the index.
- if (secondIndex.multikey && secondIt->second.size() > 1) {
- continue;
- }
+ //
+ // Step #0:
+ // Impose an ordering (idx1, idx2) using the key patterns.
+ //
+ IndexToPredMap::const_iterator it1, it2;
+ int ordering = firstIndex.keyPattern.woCompare(secondIndex.keyPattern);
+ it1 = (ordering > 0) ? firstIt : secondIt;
+ it2 = (ordering > 0) ? secondIt : firstIt;
+ const IndexEntry& ie1 = (*_indices)[it1->first];
+ const IndexEntry& ie2 = (*_indices)[it2->first];
- //
- // Step #0:
- // Impose an ordering (idx1, idx2) using the key patterns.
- //
- IndexToPredMap::const_iterator it1, it2;
- int ordering = firstIndex.keyPattern.woCompare(secondIndex.keyPattern);
- it1 = (ordering > 0) ? firstIt : secondIt;
- it2 = (ordering > 0) ? secondIt : firstIt;
- const IndexEntry& ie1 = (*_indices)[it1->first];
- const IndexEntry& ie2 = (*_indices)[it2->first];
+ //
+ // Step #1:
+ // Assign predicates which prefix firstIndex to firstAssign.
+ //
+ OneIndexAssignment firstAssign;
+ firstAssign.index = it1->first;
+ firstAssign.preds = it1->second;
+ // Since everything in assign.preds prefixes the index, they all go
+ // at position '0' in the index, the first position.
+ firstAssign.positions.resize(firstAssign.preds.size(), 0);
- //
- // Step #1:
- // Assign predicates which prefix firstIndex to firstAssign.
- //
- OneIndexAssignment firstAssign;
- firstAssign.index = it1->first;
- firstAssign.preds = it1->second;
- // Since everything in assign.preds prefixes the index, they all go
- // at position '0' in the index, the first position.
- firstAssign.positions.resize(firstAssign.preds.size(), 0);
-
- // We keep track of what preds are assigned to indices either because they
- // prefix the index or have been assigned through compounding. We make sure
- // that these predicates DO NOT become additional index assignments.
- // Example: what if firstAssign is the index (x, y) and we're trying to
- // compound? We want to make sure not to compound if the predicate is
- // already assigned to index y.
- set<MatchExpression*> predsAssigned;
+ // We keep track of what preds are assigned to indices either because they
+ // prefix the index or have been assigned through compounding. We make sure
+ // that these predicates DO NOT become additional index assignments.
+ // Example: what if firstAssign is the index (x, y) and we're trying to
+ // compound? We want to make sure not to compound if the predicate is
+ // already assigned to index y.
+ set<MatchExpression*> predsAssigned;
- //
- // Step #2:
- // Add indices assigned in 'firstAssign' to 'predsAssigned'.
- //
- for (size_t i = 0; i < firstAssign.preds.size(); ++i) {
- predsAssigned.insert(firstAssign.preds[i]);
- }
+ //
+ // Step #2:
+ // Add indices assigned in 'firstAssign' to 'predsAssigned'.
+ //
+ for (size_t i = 0; i < firstAssign.preds.size(); ++i) {
+ predsAssigned.insert(firstAssign.preds[i]);
+ }
- //
- // Step #3:
- // Assign predicates which prefix secondIndex to secondAssign and
- // have not already been assigned to firstAssign. Any newly
- // assigned predicates are added to 'predsAssigned'.
- //
- OneIndexAssignment secondAssign;
- secondAssign.index = it2->first;
- const vector<MatchExpression*>& preds = it2->second;
- for (size_t i = 0; i < preds.size(); ++i) {
- if (predsAssigned.end() == predsAssigned.find(preds[i])) {
- secondAssign.preds.push_back(preds[i]);
- secondAssign.positions.push_back(0);
- predsAssigned.insert(preds[i]);
- }
+ //
+ // Step #3:
+ // Assign predicates which prefix secondIndex to secondAssign and
+ // have not already been assigned to firstAssign. Any newly
+ // assigned predicates are added to 'predsAssigned'.
+ //
+ OneIndexAssignment secondAssign;
+ secondAssign.index = it2->first;
+ const vector<MatchExpression*>& preds = it2->second;
+ for (size_t i = 0; i < preds.size(); ++i) {
+ if (predsAssigned.end() == predsAssigned.find(preds[i])) {
+ secondAssign.preds.push_back(preds[i]);
+ secondAssign.positions.push_back(0);
+ predsAssigned.insert(preds[i]);
}
+ }
- // Every predicate that would use this index is already assigned in
- // firstAssign.
- if (0 == secondAssign.preds.size()) { continue; }
+ // Every predicate that would use this index is already assigned in
+ // firstAssign.
+ if (0 == secondAssign.preds.size()) {
+ continue;
+ }
- //
- // Step #4:
- // Compound on firstAssign, if applicable.
- //
- IndexToPredMap::const_iterator firstIndexCompound =
- idxToNotFirst.find(firstAssign.index);
-
- // Can't compound with multikey indices.
- if (!ie1.multikey && firstIndexCompound != idxToNotFirst.end()) {
- // We must remove any elements of 'predsAssigned' from consideration.
- vector<MatchExpression*> tryCompound;
- const vector<MatchExpression*>& couldCompound
- = firstIndexCompound->second;
- for (size_t i = 0; i < couldCompound.size(); ++i) {
- if (predsAssigned.end() == predsAssigned.find(couldCompound[i])) {
- tryCompound.push_back(couldCompound[i]);
- }
- }
- if (tryCompound.size()) {
- compound(tryCompound, ie1, &firstAssign);
+ //
+ // Step #4:
+ // Compound on firstAssign, if applicable.
+ //
+ IndexToPredMap::const_iterator firstIndexCompound =
+ idxToNotFirst.find(firstAssign.index);
+
+ // Can't compound with multikey indices.
+ if (!ie1.multikey && firstIndexCompound != idxToNotFirst.end()) {
+ // We must remove any elements of 'predsAssigned' from consideration.
+ vector<MatchExpression*> tryCompound;
+ const vector<MatchExpression*>& couldCompound = firstIndexCompound->second;
+ for (size_t i = 0; i < couldCompound.size(); ++i) {
+ if (predsAssigned.end() == predsAssigned.find(couldCompound[i])) {
+ tryCompound.push_back(couldCompound[i]);
}
}
-
- //
- // Step #5:
- // Make sure predicates assigned by compounding in step #4 do not get
- // assigned again.
- //
- for (size_t i = 0; i < firstAssign.preds.size(); ++i) {
- if (predsAssigned.end() == predsAssigned.find(firstAssign.preds[i])) {
- predsAssigned.insert(firstAssign.preds[i]);
- }
+ if (tryCompound.size()) {
+ compound(tryCompound, ie1, &firstAssign);
}
+ }
- //
- // Step #6:
- // Compound on firstAssign, if applicable.
- //
- IndexToPredMap::const_iterator secondIndexCompound =
- idxToNotFirst.find(secondAssign.index);
-
- if (!ie2.multikey && secondIndexCompound != idxToNotFirst.end()) {
- // We must remove any elements of 'predsAssigned' from consideration.
- vector<MatchExpression*> tryCompound;
- const vector<MatchExpression*>& couldCompound
- = secondIndexCompound->second;
- for (size_t i = 0; i < couldCompound.size(); ++i) {
- if (predsAssigned.end() == predsAssigned.find(couldCompound[i])) {
- tryCompound.push_back(couldCompound[i]);
- }
- }
- if (tryCompound.size()) {
- compound(tryCompound, ie2, &secondAssign);
- }
+ //
+ // Step #5:
+ // Make sure predicates assigned by compounding in step #4 do not get
+ // assigned again.
+ //
+ for (size_t i = 0; i < firstAssign.preds.size(); ++i) {
+ if (predsAssigned.end() == predsAssigned.find(firstAssign.preds[i])) {
+ predsAssigned.insert(firstAssign.preds[i]);
}
+ }
- // Add predicates in 'secondAssign' to the set of all assigned predicates.
- for (size_t i = 0; i < secondAssign.preds.size(); ++i) {
- if (predsAssigned.end() == predsAssigned.find(secondAssign.preds[i])) {
- predsAssigned.insert(secondAssign.preds[i]);
+ //
+ // Step #6:
+ // Compound on firstAssign, if applicable.
+ //
+ IndexToPredMap::const_iterator secondIndexCompound =
+ idxToNotFirst.find(secondAssign.index);
+
+ if (!ie2.multikey && secondIndexCompound != idxToNotFirst.end()) {
+ // We must remove any elements of 'predsAssigned' from consideration.
+ vector<MatchExpression*> tryCompound;
+ const vector<MatchExpression*>& couldCompound = secondIndexCompound->second;
+ for (size_t i = 0; i < couldCompound.size(); ++i) {
+ if (predsAssigned.end() == predsAssigned.find(couldCompound[i])) {
+ tryCompound.push_back(couldCompound[i]);
}
}
-
- //
- // Step #7:
- // Make sure we haven't already assigned this set of predicates by compounding.
- // If we have, then bail out for this pair of indices.
- //
- if (alreadyCompounded(predsAssigned, andAssignment)) {
- // There is no need to add either 'firstAssign' or 'secondAssign'
- // to 'andAssignment' in this case because we have already performed
- // assignments to single indices in enumerateOneIndex(...).
- continue;
+ if (tryCompound.size()) {
+ compound(tryCompound, ie2, &secondAssign);
}
+ }
- // We're done with this particular pair of indices; output
- // the resulting assignments.
- AndEnumerableState state;
- state.assignments.push_back(firstAssign);
- state.assignments.push_back(secondAssign);
- andAssignment->choices.push_back(state);
+ // Add predicates in 'secondAssign' to the set of all assigned predicates.
+ for (size_t i = 0; i < secondAssign.preds.size(); ++i) {
+ if (predsAssigned.end() == predsAssigned.find(secondAssign.preds[i])) {
+ predsAssigned.insert(secondAssign.preds[i]);
+ }
}
- }
- // TODO: Do we just want one subnode at a time? We can use far more than 2 indices at once
- // doing this very easily. If we want to restrict the # of indices the children use, when
- // we memoize the subtree above we can restrict it to 1 index at a time. This can get
- // tricky if we want both an intersection and a 1-index memo entry, since our state change
- // is simple and we don't traverse the memo in any targeted way. Should also verify that
- // having a one-to-many mapping of MatchExpression to MemoID doesn't break anything. This
- // approach errors on the side of "too much indexing."
- for (size_t i = 0; i < subnodes.size(); ++i) {
- for (size_t j = i + 1; j < subnodes.size(); ++j) {
- AndEnumerableState state;
- state.subnodesToIndex.push_back(subnodes[i]);
- state.subnodesToIndex.push_back(subnodes[j]);
- andAssignment->choices.push_back(state);
+ //
+ // Step #7:
+ // Make sure we haven't already assigned this set of predicates by compounding.
+ // If we have, then bail out for this pair of indices.
+ //
+ if (alreadyCompounded(predsAssigned, andAssignment)) {
+ // There is no need to add either 'firstAssign' or 'secondAssign'
+ // to 'andAssignment' in this case because we have already performed
+ // assignments to single indices in enumerateOneIndex(...).
+ continue;
}
+
+ // We're done with this particular pair of indices; output
+ // the resulting assignments.
+ AndEnumerableState state;
+ state.assignments.push_back(firstAssign);
+ state.assignments.push_back(secondAssign);
+ andAssignment->choices.push_back(state);
}
}
- bool PlanEnumerator::partitionPreds(MatchExpression* node,
- PrepMemoContext context,
- vector<MatchExpression*>* indexOut,
- vector<MemoID>* subnodesOut,
- vector<MemoID>* mandatorySubnodes) {
- for (size_t i = 0; i < node->numChildren(); ++i) {
- MatchExpression* child = node->getChild(i);
- if (Indexability::nodeCanUseIndexOnOwnField(child)) {
- RelevantTag* rt = static_cast<RelevantTag*>(child->getTag());
- if (NULL != context.elemMatchExpr) {
- // If we're in an $elemMatch context, store the
- // innermost parent $elemMatch, as well as the
- // inner path prefix.
- rt->elemMatchExpr = context.elemMatchExpr;
- rt->pathPrefix = getPathPrefix(child->path().toString());
- }
- else {
- // We're not an $elemMatch context, so we should store
- // the prefix of the full path.
- rt->pathPrefix = getPathPrefix(rt->path);
- }
-
- // Output this as a pred that can use the index.
- indexOut->push_back(child);
- }
- else if (Indexability::isBoundsGeneratingNot(child)) {
- partitionPreds(child, context, indexOut, subnodesOut, mandatorySubnodes);
- }
- else if (MatchExpression::ELEM_MATCH_OBJECT == child->matchType()) {
- PrepMemoContext childContext;
- childContext.elemMatchExpr = child;
- partitionPreds(child, childContext, indexOut, subnodesOut, mandatorySubnodes);
- }
- else if (MatchExpression::AND == child->matchType()) {
- partitionPreds(child, context, indexOut, subnodesOut, mandatorySubnodes);
+ // TODO: Do we just want one subnode at a time? We can use far more than 2 indices at once
+ // doing this very easily. If we want to restrict the # of indices the children use, when
+ // we memoize the subtree above we can restrict it to 1 index at a time. This can get
+ // tricky if we want both an intersection and a 1-index memo entry, since our state change
+ // is simple and we don't traverse the memo in any targeted way. Should also verify that
+ // having a one-to-many mapping of MatchExpression to MemoID doesn't break anything. This
+ // approach errors on the side of "too much indexing."
+ for (size_t i = 0; i < subnodes.size(); ++i) {
+ for (size_t j = i + 1; j < subnodes.size(); ++j) {
+ AndEnumerableState state;
+ state.subnodesToIndex.push_back(subnodes[i]);
+ state.subnodesToIndex.push_back(subnodes[j]);
+ andAssignment->choices.push_back(state);
+ }
+ }
+}
+
+bool PlanEnumerator::partitionPreds(MatchExpression* node,
+ PrepMemoContext context,
+ vector<MatchExpression*>* indexOut,
+ vector<MemoID>* subnodesOut,
+ vector<MemoID>* mandatorySubnodes) {
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ MatchExpression* child = node->getChild(i);
+ if (Indexability::nodeCanUseIndexOnOwnField(child)) {
+ RelevantTag* rt = static_cast<RelevantTag*>(child->getTag());
+ if (NULL != context.elemMatchExpr) {
+ // If we're in an $elemMatch context, store the
+ // innermost parent $elemMatch, as well as the
+ // inner path prefix.
+ rt->elemMatchExpr = context.elemMatchExpr;
+ rt->pathPrefix = getPathPrefix(child->path().toString());
+ } else {
+ // We're not an $elemMatch context, so we should store
+ // the prefix of the full path.
+ rt->pathPrefix = getPathPrefix(rt->path);
}
- else {
- bool mandatory = expressionRequiresIndex(child);
- // Recursively prepMemo for the subnode. We fall through
- // to this case for logical nodes other than AND (e.g. OR).
- if (prepMemo(child, context)) {
- size_t childID = memoIDForNode(child);
-
- // Output the subnode.
- if (mandatory) {
- mandatorySubnodes->push_back(childID);
- }
- else {
- subnodesOut->push_back(childID);
- }
- }
- else if (mandatory) {
- // The subnode is mandatory but cannot be indexed. This means
- // that the entire AND cannot be indexed either.
- return false;
+ // Output this as a pred that can use the index.
+ indexOut->push_back(child);
+ } else if (Indexability::isBoundsGeneratingNot(child)) {
+ partitionPreds(child, context, indexOut, subnodesOut, mandatorySubnodes);
+ } else if (MatchExpression::ELEM_MATCH_OBJECT == child->matchType()) {
+ PrepMemoContext childContext;
+ childContext.elemMatchExpr = child;
+ partitionPreds(child, childContext, indexOut, subnodesOut, mandatorySubnodes);
+ } else if (MatchExpression::AND == child->matchType()) {
+ partitionPreds(child, context, indexOut, subnodesOut, mandatorySubnodes);
+ } else {
+ bool mandatory = expressionRequiresIndex(child);
+
+ // Recursively prepMemo for the subnode. We fall through
+ // to this case for logical nodes other than AND (e.g. OR).
+ if (prepMemo(child, context)) {
+ size_t childID = memoIDForNode(child);
+
+ // Output the subnode.
+ if (mandatory) {
+ mandatorySubnodes->push_back(childID);
+ } else {
+ subnodesOut->push_back(childID);
}
+ } else if (mandatory) {
+ // The subnode is mandatory but cannot be indexed. This means
+ // that the entire AND cannot be indexed either.
+ return false;
}
}
-
- return true;
}
- void PlanEnumerator::getMultikeyCompoundablePreds(const vector<MatchExpression*>& assigned,
- const vector<MatchExpression*>& couldCompound,
- vector<MatchExpression*>* out) {
- // Map from a particular $elemMatch expression to the set of prefixes
- // used so far by the predicates inside the $elemMatch. For example,
- // {a: {$elemMatch: {b: 1, c: 2}}} would map to the set {'b', 'c'} at
- // the end of this function's execution.
- //
- // NULL maps to the set of prefixes used so far outside of an $elemMatch
- // context.
- //
- // As we iterate over the available indexed predicates, we keep track
- // of the used prefixes both inside and outside of an $elemMatch context.
- unordered_map<MatchExpression*, set<string> > used;
-
- // Initialize 'used' with the starting predicates in 'assigned'. Begin by
- // initializing the top-level scope with the prefix of the full path.
- for (size_t i = 0; i < assigned.size(); i++) {
- const MatchExpression* assignedPred = assigned[i];
- invariant(NULL != assignedPred->getTag());
- RelevantTag* usedRt = static_cast<RelevantTag*>(assignedPred->getTag());
- set<string> usedPrefixes;
- usedPrefixes.insert(getPathPrefix(usedRt->path));
- used[NULL] = usedPrefixes;
-
- // If 'assigned' is a predicate inside an $elemMatch, we have to
- // add the prefix not only to the top-level context, but also to the
- // the $elemMatch context. For example, if 'assigned' is {a: {$elemMatch: {b: 1}}},
- // then we will have already added "a" to the set for NULL. We now
- // also need to add "b" to the set for the $elemMatch.
- if (NULL != usedRt->elemMatchExpr) {
- set<string> elemMatchUsed;
- // Whereas getPathPrefix(usedRt->path) is the prefix of the full path,
- // usedRt->pathPrefix contains the prefix of the portion of the
- // path that is inside the $elemMatch. These two prefixes are the same
- // in the top-level context, but here must be different because 'usedRt'
- // is in an $elemMatch context.
- elemMatchUsed.insert(usedRt->pathPrefix);
- used[usedRt->elemMatchExpr] = elemMatchUsed;
- }
- }
+ return true;
+}
- for (size_t i = 0; i < couldCompound.size(); ++i) {
- invariant(Indexability::nodeCanUseIndexOnOwnField(couldCompound[i]));
- RelevantTag* rt = static_cast<RelevantTag*>(couldCompound[i]->getTag());
-
- if (used.end() == used.find(rt->elemMatchExpr)) {
- // This is a new $elemMatch that we haven't seen before.
- invariant(used.end() != used.find(NULL));
- set<string>& topLevelUsed = used.find(NULL)->second;
-
- // If the top-level path prefix of the $elemMatch hasn't been
- // used yet, couldCompound[i] is safe to compound.
- if (topLevelUsed.end() == topLevelUsed.find(getPathPrefix(rt->path))) {
- topLevelUsed.insert(getPathPrefix(rt->path));
- set<string> usedPrefixes;
- usedPrefixes.insert(rt->pathPrefix);
- used[rt->elemMatchExpr] = usedPrefixes;
-
- // Output the predicate.
- out->push_back(couldCompound[i]);
- }
+void PlanEnumerator::getMultikeyCompoundablePreds(const vector<MatchExpression*>& assigned,
+ const vector<MatchExpression*>& couldCompound,
+ vector<MatchExpression*>* out) {
+ // Map from a particular $elemMatch expression to the set of prefixes
+ // used so far by the predicates inside the $elemMatch. For example,
+ // {a: {$elemMatch: {b: 1, c: 2}}} would map to the set {'b', 'c'} at
+ // the end of this function's execution.
+ //
+ // NULL maps to the set of prefixes used so far outside of an $elemMatch
+ // context.
+ //
+ // As we iterate over the available indexed predicates, we keep track
+ // of the used prefixes both inside and outside of an $elemMatch context.
+ unordered_map<MatchExpression*, set<string>> used;
+
+ // Initialize 'used' with the starting predicates in 'assigned'. Begin by
+ // initializing the top-level scope with the prefix of the full path.
+ for (size_t i = 0; i < assigned.size(); i++) {
+ const MatchExpression* assignedPred = assigned[i];
+ invariant(NULL != assignedPred->getTag());
+ RelevantTag* usedRt = static_cast<RelevantTag*>(assignedPred->getTag());
+ set<string> usedPrefixes;
+ usedPrefixes.insert(getPathPrefix(usedRt->path));
+ used[NULL] = usedPrefixes;
+
+ // If 'assigned' is a predicate inside an $elemMatch, we have to
+ // add the prefix not only to the top-level context, but also to the
+ // the $elemMatch context. For example, if 'assigned' is {a: {$elemMatch: {b: 1}}},
+ // then we will have already added "a" to the set for NULL. We now
+ // also need to add "b" to the set for the $elemMatch.
+ if (NULL != usedRt->elemMatchExpr) {
+ set<string> elemMatchUsed;
+ // Whereas getPathPrefix(usedRt->path) is the prefix of the full path,
+ // usedRt->pathPrefix contains the prefix of the portion of the
+ // path that is inside the $elemMatch. These two prefixes are the same
+ // in the top-level context, but here must be different because 'usedRt'
+ // is in an $elemMatch context.
+ elemMatchUsed.insert(usedRt->pathPrefix);
+ used[usedRt->elemMatchExpr] = elemMatchUsed;
+ }
+ }
+ for (size_t i = 0; i < couldCompound.size(); ++i) {
+ invariant(Indexability::nodeCanUseIndexOnOwnField(couldCompound[i]));
+ RelevantTag* rt = static_cast<RelevantTag*>(couldCompound[i]->getTag());
+
+ if (used.end() == used.find(rt->elemMatchExpr)) {
+ // This is a new $elemMatch that we haven't seen before.
+ invariant(used.end() != used.find(NULL));
+ set<string>& topLevelUsed = used.find(NULL)->second;
+
+ // If the top-level path prefix of the $elemMatch hasn't been
+ // used yet, couldCompound[i] is safe to compound.
+ if (topLevelUsed.end() == topLevelUsed.find(getPathPrefix(rt->path))) {
+ topLevelUsed.insert(getPathPrefix(rt->path));
+ set<string> usedPrefixes;
+ usedPrefixes.insert(rt->pathPrefix);
+ used[rt->elemMatchExpr] = usedPrefixes;
+
+ // Output the predicate.
+ out->push_back(couldCompound[i]);
}
- else {
- // We've seen this $elemMatch before, or the predicate is
- // top-level (not in an $elemMatch context). If the prefix stored
- // in the tag has not been used yet, then couldCompound[i] is
- // safe to compound.
- set<string>& usedPrefixes = used.find(rt->elemMatchExpr)->second;
- if (usedPrefixes.end() == usedPrefixes.find(rt->pathPrefix)) {
- usedPrefixes.insert(rt->pathPrefix);
-
- // Output the predicate.
- out->push_back(couldCompound[i]);
- }
+
+ } else {
+ // We've seen this $elemMatch before, or the predicate is
+ // top-level (not in an $elemMatch context). If the prefix stored
+ // in the tag has not been used yet, then couldCompound[i] is
+ // safe to compound.
+ set<string>& usedPrefixes = used.find(rt->elemMatchExpr)->second;
+ if (usedPrefixes.end() == usedPrefixes.find(rt->pathPrefix)) {
+ usedPrefixes.insert(rt->pathPrefix);
+
+ // Output the predicate.
+ out->push_back(couldCompound[i]);
}
}
}
+}
- bool PlanEnumerator::alreadyCompounded(const set<MatchExpression*>& ixisectAssigned,
- const AndAssignment* andAssignment) {
- for (size_t i = 0; i < andAssignment->choices.size(); ++i) {
- const AndEnumerableState& state = andAssignment->choices[i];
+bool PlanEnumerator::alreadyCompounded(const set<MatchExpression*>& ixisectAssigned,
+ const AndAssignment* andAssignment) {
+ for (size_t i = 0; i < andAssignment->choices.size(); ++i) {
+ const AndEnumerableState& state = andAssignment->choices[i];
- // We cannot have assigned this set of predicates already by
- // compounding unless this is an assignment to a single index.
- if (state.assignments.size() != 1) {
- continue;
- }
+ // We cannot have assigned this set of predicates already by
+ // compounding unless this is an assignment to a single index.
+ if (state.assignments.size() != 1) {
+ continue;
+ }
- // If the set of preds in 'ixisectAssigned' is a subset of 'oneAssign.preds',
- // then all the preds can be used by compounding on a single index.
- const OneIndexAssignment& oneAssign = state.assignments[0];
+ // If the set of preds in 'ixisectAssigned' is a subset of 'oneAssign.preds',
+ // then all the preds can be used by compounding on a single index.
+ const OneIndexAssignment& oneAssign = state.assignments[0];
- // If 'ixisectAssigned' is larger than 'oneAssign.preds', then
- // it can't be a subset.
- if (ixisectAssigned.size() > oneAssign.preds.size()) {
- continue;
- }
-
- // Check for subset by counting the number of elements in 'oneAssign.preds'
- // that are contained in 'ixisectAssigned'. The elements of both 'oneAssign.preds'
- // and 'ixisectAssigned' are unique (no repeated elements).
- size_t count = 0;
- for (size_t j = 0; j < oneAssign.preds.size(); ++j) {
- if (ixisectAssigned.end() != ixisectAssigned.find(oneAssign.preds[j])) {
- ++count;
- }
- }
+ // If 'ixisectAssigned' is larger than 'oneAssign.preds', then
+ // it can't be a subset.
+ if (ixisectAssigned.size() > oneAssign.preds.size()) {
+ continue;
+ }
- if (ixisectAssigned.size() == count) {
- return true;
+ // Check for subset by counting the number of elements in 'oneAssign.preds'
+ // that are contained in 'ixisectAssigned'. The elements of both 'oneAssign.preds'
+ // and 'ixisectAssigned' are unique (no repeated elements).
+ size_t count = 0;
+ for (size_t j = 0; j < oneAssign.preds.size(); ++j) {
+ if (ixisectAssigned.end() != ixisectAssigned.find(oneAssign.preds[j])) {
+ ++count;
}
+ }
- // We cannot assign the preds by compounding on 'oneAssign'.
- // Move on to the next index.
+ if (ixisectAssigned.size() == count) {
+ return true;
}
- return false;
+ // We cannot assign the preds by compounding on 'oneAssign'.
+ // Move on to the next index.
}
- void PlanEnumerator::compound(const vector<MatchExpression*>& tryCompound,
- const IndexEntry& thisIndex,
- OneIndexAssignment* assign) {
- // Let's try to match up the expressions in 'compExprs' with the
- // fields in the index key pattern.
- BSONObjIterator kpIt(thisIndex.keyPattern);
-
- // Skip the first elt as it's already assigned.
- kpIt.next();
-
- // When we compound we store the field number that the predicate
- // goes over in order to avoid having to iterate again and compare
- // field names.
- size_t posInIdx = 0;
-
- while (kpIt.more()) {
- BSONElement keyElt = kpIt.next();
- ++posInIdx;
-
- // Go through 'tryCompound' to see if there is a compoundable
- // predicate for 'keyElt'. If there is nothing to compound, then
- // simply move on to the next field in the compound index. We
- // do not enforce that fields are assigned contiguously from
- // right to left, i.e. for compound index {a: 1, b: 1, c: 1}
- // it is okay to compound predicates over "a" and "c", skipping "b".
- for (size_t j = 0; j < tryCompound.size(); ++j) {
- MatchExpression* maybe = tryCompound[j];
- // Sigh we grab the full path from the relevant tag.
- RelevantTag* rt = static_cast<RelevantTag*>(maybe->getTag());
- if (keyElt.fieldName() == rt->path) {
- // preds and positions are parallel arrays.
- assign->preds.push_back(maybe);
- assign->positions.push_back(posInIdx);
- }
+ return false;
+}
+
+void PlanEnumerator::compound(const vector<MatchExpression*>& tryCompound,
+ const IndexEntry& thisIndex,
+ OneIndexAssignment* assign) {
+ // Let's try to match up the expressions in 'compExprs' with the
+ // fields in the index key pattern.
+ BSONObjIterator kpIt(thisIndex.keyPattern);
+
+ // Skip the first elt as it's already assigned.
+ kpIt.next();
+
+ // When we compound we store the field number that the predicate
+ // goes over in order to avoid having to iterate again and compare
+ // field names.
+ size_t posInIdx = 0;
+
+ while (kpIt.more()) {
+ BSONElement keyElt = kpIt.next();
+ ++posInIdx;
+
+ // Go through 'tryCompound' to see if there is a compoundable
+ // predicate for 'keyElt'. If there is nothing to compound, then
+ // simply move on to the next field in the compound index. We
+ // do not enforce that fields are assigned contiguously from
+ // right to left, i.e. for compound index {a: 1, b: 1, c: 1}
+ // it is okay to compound predicates over "a" and "c", skipping "b".
+ for (size_t j = 0; j < tryCompound.size(); ++j) {
+ MatchExpression* maybe = tryCompound[j];
+ // Sigh we grab the full path from the relevant tag.
+ RelevantTag* rt = static_cast<RelevantTag*>(maybe->getTag());
+ if (keyElt.fieldName() == rt->path) {
+ // preds and positions are parallel arrays.
+ assign->preds.push_back(maybe);
+ assign->positions.push_back(posInIdx);
}
}
}
+}
+
+//
+// Structure navigation
+//
+
+void PlanEnumerator::tagMemo(size_t id) {
+ LOG(5) << "Tagging memoID " << id << endl;
+ NodeAssignment* assign = _memo[id];
+ verify(NULL != assign);
+
+ if (NULL != assign->pred) {
+ PredicateAssignment* pa = assign->pred.get();
+ verify(NULL == pa->expr->getTag());
+ verify(pa->indexToAssign < pa->first.size());
+ pa->expr->setTag(new IndexTag(pa->first[pa->indexToAssign]));
+ } else if (NULL != assign->orAssignment) {
+ OrAssignment* oa = assign->orAssignment.get();
+ for (size_t i = 0; i < oa->subnodes.size(); ++i) {
+ tagMemo(oa->subnodes[i]);
+ }
+ } else if (NULL != assign->arrayAssignment) {
+ ArrayAssignment* aa = assign->arrayAssignment.get();
+ tagMemo(aa->subnodes[aa->counter]);
+ } else if (NULL != assign->andAssignment) {
+ AndAssignment* aa = assign->andAssignment.get();
+ verify(aa->counter < aa->choices.size());
- //
- // Structure navigation
- //
-
- void PlanEnumerator::tagMemo(size_t id) {
- LOG(5) << "Tagging memoID " << id << endl;
- NodeAssignment* assign = _memo[id];
- verify(NULL != assign);
+ const AndEnumerableState& aes = aa->choices[aa->counter];
- if (NULL != assign->pred) {
- PredicateAssignment* pa = assign->pred.get();
- verify(NULL == pa->expr->getTag());
- verify(pa->indexToAssign < pa->first.size());
- pa->expr->setTag(new IndexTag(pa->first[pa->indexToAssign]));
- }
- else if (NULL != assign->orAssignment) {
- OrAssignment* oa = assign->orAssignment.get();
- for (size_t i = 0; i < oa->subnodes.size(); ++i) {
- tagMemo(oa->subnodes[i]);
- }
- }
- else if (NULL != assign->arrayAssignment) {
- ArrayAssignment* aa = assign->arrayAssignment.get();
- tagMemo(aa->subnodes[aa->counter]);
+ for (size_t j = 0; j < aes.subnodesToIndex.size(); ++j) {
+ tagMemo(aes.subnodesToIndex[j]);
}
- else if (NULL != assign->andAssignment) {
- AndAssignment* aa = assign->andAssignment.get();
- verify(aa->counter < aa->choices.size());
- const AndEnumerableState& aes = aa->choices[aa->counter];
+ for (size_t i = 0; i < aes.assignments.size(); ++i) {
+ const OneIndexAssignment& assign = aes.assignments[i];
- for (size_t j = 0; j < aes.subnodesToIndex.size(); ++j) {
- tagMemo(aes.subnodesToIndex[j]);
+ for (size_t j = 0; j < assign.preds.size(); ++j) {
+ MatchExpression* pred = assign.preds[j];
+ verify(NULL == pred->getTag());
+ pred->setTag(new IndexTag(assign.index, assign.positions[j]));
}
-
- for (size_t i = 0; i < aes.assignments.size(); ++i) {
- const OneIndexAssignment& assign = aes.assignments[i];
-
- for (size_t j = 0; j < assign.preds.size(); ++j) {
- MatchExpression* pred = assign.preds[j];
- verify(NULL == pred->getTag());
- pred->setTag(new IndexTag(assign.index, assign.positions[j]));
- }
- }
- }
- else {
- verify(0);
}
+ } else {
+ verify(0);
}
+}
- bool PlanEnumerator::nextMemo(size_t id) {
- NodeAssignment* assign = _memo[id];
- verify(NULL != assign);
+bool PlanEnumerator::nextMemo(size_t id) {
+ NodeAssignment* assign = _memo[id];
+ verify(NULL != assign);
- if (NULL != assign->pred) {
- PredicateAssignment* pa = assign->pred.get();
- pa->indexToAssign++;
- if (pa->indexToAssign >= pa->first.size()) {
- pa->indexToAssign = 0;
- return true;
- }
- return false;
+ if (NULL != assign->pred) {
+ PredicateAssignment* pa = assign->pred.get();
+ pa->indexToAssign++;
+ if (pa->indexToAssign >= pa->first.size()) {
+ pa->indexToAssign = 0;
+ return true;
}
- else if (NULL != assign->orAssignment) {
- OrAssignment* oa = assign->orAssignment.get();
-
- // Limit the number of OR enumerations
- oa->counter++;
- if (oa->counter >= _orLimit) {
- return true;
- }
+ return false;
+ } else if (NULL != assign->orAssignment) {
+ OrAssignment* oa = assign->orAssignment.get();
- // OR just walks through telling its children to
- // move forward.
- for (size_t i = 0; i < oa->subnodes.size(); ++i) {
- // If there's no carry, we just stop. If there's a carry, we move the next child
- // forward.
- if (!nextMemo(oa->subnodes[i])) {
- return false;
- }
- }
- // If we're here, the last subnode had a carry, therefore the OR has a carry.
+ // Limit the number of OR enumerations
+ oa->counter++;
+ if (oa->counter >= _orLimit) {
return true;
}
- else if (NULL != assign->arrayAssignment) {
- ArrayAssignment* aa = assign->arrayAssignment.get();
- // moving to next on current subnode is OK
- if (!nextMemo(aa->subnodes[aa->counter])) { return false; }
- // Move to next subnode.
- ++aa->counter;
- if (aa->counter < aa->subnodes.size()) {
+
+ // OR just walks through telling its children to
+ // move forward.
+ for (size_t i = 0; i < oa->subnodes.size(); ++i) {
+ // If there's no carry, we just stop. If there's a carry, we move the next child
+ // forward.
+ if (!nextMemo(oa->subnodes[i])) {
return false;
}
- aa->counter = 0;
- return true;
}
- else if (NULL != assign->andAssignment) {
- AndAssignment* aa = assign->andAssignment.get();
-
- // One of our subnodes might have to move on to its next enumeration state.
- const AndEnumerableState& aes = aa->choices[aa->counter];
- for (size_t i = 0; i < aes.subnodesToIndex.size(); ++i) {
- if (!nextMemo(aes.subnodesToIndex[i])) {
- return false;
- }
- }
+ // If we're here, the last subnode had a carry, therefore the OR has a carry.
+ return true;
+ } else if (NULL != assign->arrayAssignment) {
+ ArrayAssignment* aa = assign->arrayAssignment.get();
+ // moving to next on current subnode is OK
+ if (!nextMemo(aa->subnodes[aa->counter])) {
+ return false;
+ }
+ // Move to next subnode.
+ ++aa->counter;
+ if (aa->counter < aa->subnodes.size()) {
+ return false;
+ }
+ aa->counter = 0;
+ return true;
+ } else if (NULL != assign->andAssignment) {
+ AndAssignment* aa = assign->andAssignment.get();
- // None of the subnodes had another enumeration state, so we move on to the
- // next top-level choice.
- ++aa->counter;
- if (aa->counter < aa->choices.size()) {
+ // One of our subnodes might have to move on to its next enumeration state.
+ const AndEnumerableState& aes = aa->choices[aa->counter];
+ for (size_t i = 0; i < aes.subnodesToIndex.size(); ++i) {
+ if (!nextMemo(aes.subnodesToIndex[i])) {
return false;
}
- aa->counter = 0;
- return true;
}
- // This shouldn't happen.
- verify(0);
- return false;
+ // None of the subnodes had another enumeration state, so we move on to the
+ // next top-level choice.
+ ++aa->counter;
+ if (aa->counter < aa->choices.size()) {
+ return false;
+ }
+ aa->counter = 0;
+ return true;
}
-} // namespace mongo
+ // This shouldn't happen.
+ verify(0);
+ return false;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_enumerator.h b/src/mongo/db/query/plan_enumerator.h
index 39f0d697145..f12dde08897 100644
--- a/src/mongo/db/query/plan_enumerator.h
+++ b/src/mongo/db/query/plan_enumerator.h
@@ -39,409 +39,409 @@
namespace mongo {
- struct PlanEnumeratorParams {
+struct PlanEnumeratorParams {
+ PlanEnumeratorParams()
+ : intersect(false),
+ maxSolutionsPerOr(internalQueryEnumerationMaxOrSolutions),
+ maxIntersectPerAnd(internalQueryEnumerationMaxIntersectPerAnd) {}
- PlanEnumeratorParams() : intersect(false),
- maxSolutionsPerOr(internalQueryEnumerationMaxOrSolutions),
- maxIntersectPerAnd(internalQueryEnumerationMaxIntersectPerAnd) { }
+ // Do we provide solutions that use more indices than the minimum required to provide
+ // an indexed solution?
+ bool intersect;
- // Do we provide solutions that use more indices than the minimum required to provide
- // an indexed solution?
- bool intersect;
+ // Not owned here.
+ MatchExpression* root;
- // Not owned here.
- MatchExpression* root;
+ // Not owned here.
+ const std::vector<IndexEntry>* indices;
+
+ // How many plans are we willing to ouput from an OR? We currently consider
+ // all possibly OR plans, which means the product of the number of possibilities
+ // for each clause of the OR. This could grow disastrously large.
+ size_t maxSolutionsPerOr;
+
+ // How many intersect plans are we willing to output from an AND? Given that we pursue an
+ // all-pairs approach, we could wind up creating a lot of enumeration possibilities for
+ // certain inputs.
+ size_t maxIntersectPerAnd;
+};
+
+/**
+ * Provides elements from the power set of possible indices to use. Uses the available
+ * predicate information to make better decisions about what indices are best.
+ */
+class PlanEnumerator {
+ MONGO_DISALLOW_COPYING(PlanEnumerator);
+
+public:
+ /**
+ * Constructs an enumerator for the query specified in 'root' which is tagged with
+ * RelevantTag(s). The index patterns mentioned in the tags are described by 'indices'.
+ *
+ * Does not take ownership of any arguments. They must outlive any calls to getNext(...).
+ */
+ PlanEnumerator(const PlanEnumeratorParams& params);
+
+ ~PlanEnumerator();
+
+ /**
+ * Returns OK and performs a sanity check on the input parameters and prepares the
+ * internal state so that getNext() can be called. Returns an error status with a
+ * description if the sanity check failed.
+ */
+ Status init();
+
+ /**
+ * Outputs a possible plan. Leaves in the plan are tagged with an index to use.
+ * Returns true if a plan was outputted, false if no more plans will be outputted.
+ *
+ * 'tree' is set to point to the query tree. A QueryAssignment is built from this tree.
+ * Caller owns the pointer. Note that 'tree' itself points into data owned by the
+ * provided CanonicalQuery.
+ *
+ * Nodes in 'tree' are tagged with indices that should be used to answer the tagged nodes.
+ * Only nodes that have a field name (isLogical() == false) will be tagged.
+ */
+ bool getNext(MatchExpression** tree);
+
+private:
+ //
+ // Memoization strategy
+ //
+
+
+ // Everything is really a size_t but it's far more readable to impose a type via typedef.
+
+ // An ID we use to index into _memo. An entry in _memo is a NodeAssignment.
+ typedef size_t MemoID;
+
+ // An index in _indices.
+ typedef size_t IndexID;
+
+ // The position of a field in a possibly compound index.
+ typedef size_t IndexPosition;
+
+ struct PrepMemoContext {
+ PrepMemoContext() : elemMatchExpr(NULL) {}
+ MatchExpression* elemMatchExpr;
+ };
+
+ /**
+ * Traverses the match expression and generates the memo structure from it.
+ * Returns true if the provided node uses an index, false otherwise.
+ */
+ bool prepMemo(MatchExpression* node, PrepMemoContext context);
+
+ /**
+ * Traverses the memo structure and annotates the tree with IndexTags for the chosen
+ * indices.
+ */
+ void tagMemo(MemoID id);
+
+ /**
+ * Move to the next enumeration state. Each assignment stores its own enumeration state.
+ * See the various ____Assignment classes below for details on enumeration state.
+ *
+ * Returns true if the memo subtree with root 'node' has no further enumeration states. In
+ * this case, that subtree restarts its enumeration at the beginning state. This implies
+ * that the parent of node should move to the next state. If 'node' is the root of the
+ * tree, we are done with enumeration.
+ *
+ * The return of this function can be thought of like a 'carry' in addition.
+ *
+ * Returns false if the memo subtree has moved to the next state.
+ */
+ bool nextMemo(MemoID id);
+ /**
+ * A short word on the memo structure.
+ *
+ * The PlanEnumerator is interested in matching predicates and indices. Predicates
+ * are leaf nodes in the parse tree. {x:5}, {x: {$geoWithin:...}} are both predicates.
+ *
+ * When we have simple predicates, like {x:5}, the task is easy: any indices prefixed
+ * with 'x' can be used to answer the predicate. This is where the PredicateAssignment
+ * is used.
+ *
+ * With logical operators, things are more complicated. Let's start with OR, the simplest.
+ * Since the output of an OR is the union of its results, each of its children must be
+ * indexed for the entire OR to be indexed. If each subtree of an OR is indexable, the
+ * OR is as well.
+ *
+ * For an AND to be indexed, only one of its children must be indexed. AND is an
+ * intersection of its children, so each of its children describes a superset of the
+ * produced results.
+ */
+
+ struct PredicateAssignment {
+ PredicateAssignment() : indexToAssign(0) {}
+
+ std::vector<IndexID> first;
// Not owned here.
- const std::vector<IndexEntry>* indices;
+ MatchExpression* expr;
+
+ // Enumeration state. An indexed predicate's possible states are the indices that the
+ // predicate can directly use (the 'first' indices). As such this value ranges from 0
+ // to first.size()-1 inclusive.
+ size_t indexToAssign;
+ };
+
+ struct OrAssignment {
+ OrAssignment() : counter(0) {}
+
+ // Each child of an OR must be indexed for the OR to be indexed. When an OR moves to a
+ // subsequent state it just asks all its children to move their states forward.
- // How many plans are we willing to ouput from an OR? We currently consider
- // all possibly OR plans, which means the product of the number of possibilities
- // for each clause of the OR. This could grow disastrously large.
- size_t maxSolutionsPerOr;
+ // Must use all of subnodes.
+ std::vector<MemoID> subnodes;
- // How many intersect plans are we willing to output from an AND? Given that we pursue an
- // all-pairs approach, we could wind up creating a lot of enumeration possibilities for
- // certain inputs.
- size_t maxIntersectPerAnd;
+ // The number of OR states that we've enumerated so far.
+ size_t counter;
};
+ // This is used by AndAssignment and is not an actual assignment.
+ struct OneIndexAssignment {
+ // 'preds[i]' is uses index 'index' at position 'positions[i]'
+ std::vector<MatchExpression*> preds;
+ std::vector<IndexPosition> positions;
+ IndexID index;
+ };
+
+ struct AndEnumerableState {
+ std::vector<OneIndexAssignment> assignments;
+ std::vector<MemoID> subnodesToIndex;
+ };
+
+ struct AndAssignment {
+ AndAssignment() : counter(0) {}
+
+ std::vector<AndEnumerableState> choices;
+
+ // We're on the counter-th member of state.
+ size_t counter;
+ };
+
+ struct ArrayAssignment {
+ ArrayAssignment() : counter(0) {}
+ std::vector<MemoID> subnodes;
+ size_t counter;
+ };
+
+ /**
+ * Associates indices with predicates.
+ */
+ struct NodeAssignment {
+ std::unique_ptr<PredicateAssignment> pred;
+ std::unique_ptr<OrAssignment> orAssignment;
+ std::unique_ptr<AndAssignment> andAssignment;
+ std::unique_ptr<ArrayAssignment> arrayAssignment;
+ std::string toString() const;
+ };
+
+ /**
+ * Allocates a NodeAssignment and associates it with the provided 'expr'.
+ *
+ * The unique MemoID of the new assignment is outputted in '*id'.
+ * The out parameter '*slot' points to the newly allocated NodeAssignment.
+ */
+ void allocateAssignment(MatchExpression* expr, NodeAssignment** slot, MemoID* id);
+
+ /**
+ * Predicates inside $elemMatch's that are semantically "$and of $and"
+ * predicates are not rewritten to the top-level during normalization.
+ * However, we would like to make predicates inside $elemMatch available
+ * for combining index bounds with the top-level $and predicates.
+ *
+ * This function deeply traverses $and and $elemMatch expressions of
+ * the tree rooted at 'node', adding all preds that can use an index
+ * to the output vector 'indexOut'. At the same time, $elemMatch
+ * context information is stashed in the tags so that we don't lose
+ * information due to flattening.
+ *
+ * Nodes that cannot be deeply traversed are returned via the output
+ * vectors 'subnodesOut' and 'mandatorySubnodes'. Subnodes are "mandatory"
+ * if they *must* use an index (TEXT and GEO).
+ *
+ * Does not take ownership of arguments.
+ *
+ * Returns false if the AND cannot be indexed. Otherwise returns true.
+ */
+ bool partitionPreds(MatchExpression* node,
+ PrepMemoContext context,
+ std::vector<MatchExpression*>* indexOut,
+ std::vector<MemoID>* subnodesOut,
+ std::vector<MemoID>* mandatorySubnodes);
+
+ /**
+ * Finds a set of predicates that can be safely compounded with the set
+ * of predicates in 'assigned', under the assumption that we are assigning
+ * predicates to a compound, multikey index.
+ *
+ * The list of candidate predicates that we could compound is passed
+ * in 'couldCompound'. A subset of these predicates that is safe to
+ * combine by compounding is returned in the out-parameter 'out'.
+ *
+ * Does not take ownership of its arguments.
+ *
+ * The rules for when to compound for multikey indices are reasonably
+ * complex, and are dependent on the structure of $elemMatch's used
+ * in the query. Ignoring $elemMatch for the time being, the rule is this:
+ *
+ * "Any set of predicates for which no two predicates share a path
+ * prefix can be compounded."
+ *
+ * Suppose we have predicates over paths 'a.b' and 'a.c'. These cannot
+ * be compounded because they share the prefix 'a'. Similarly, the bounds
+ * for 'a' and 'a.b' cannot be compounded (in the case of multikey index
+ * {a: 1, 'a.b': 1}). You *can* compound predicates over the paths 'a.b.c',
+ * 'd', and 'e.b.c', because there is no shared prefix.
+ *
+ * The rules are different in the presence of $elemMatch. For $elemMatch
+ * {a: {$elemMatch: {<pred1>, ..., <predN>}}}, we are allowed to compound
+ * bounds for pred1 through predN, even though these predicates share the
+ * path prefix 'a'. However, we still cannot compound in the case of
+ * {a: {$elemMatch: {'b.c': {$gt: 1}, 'b.d': 5}}} because 'b.c' and 'b.d'
+ * share a prefix. In other words, what matters inside an $elemMatch is not
+ * the absolute prefix, but rather the "relative prefix" after the shared
+ * $elemMatch part of the path.
+ *
+ * A few more examples:
+ * 1) {'a.b': {$elemMatch: {c: {$gt: 1}, d: 5}}}. In this case, we can
+ * compound, because the $elemMatch is applied to the shared part of
+ * the path 'a.b'.
+ *
+ * 2) {'a.b': 1, a: {$elemMatch: {b: {$gt: 0}}}}. We cannot combine the
+ * bounds here because the prefix 'a' is shared by two predicates which
+ * are not joined together by an $elemMatch.
+ *
+ * NOTE:
+ * Usually 'assigned' has just one predicate. However, in order to support
+ * mandatory predicate assignment (TEXT and GEO_NEAR), we allow multiple
+ * already-assigned predicates to be passed. If a mandatory predicate is over
+ * a trailing field in a multikey compound index, then we assign both a predicate
+ * over the leading field as well as the mandatory predicate prior to calling
+ * this function.
+ *
+ * Ex:
+ * Say we have index {a: 1, b: 1, c: "2dsphere", d: 1} as well as a $near
+ * predicate and a $within predicate over "c". The $near predicate is mandatory
+ * and must be assigned. The $within predicate is not mandatory. Furthermore,
+ * it cannot be assigned in addition to the $near predicate because the index
+ * is multikey.
+ *
+ * In this case the enumerator must assign the $near predicate, and pass it in
+ * in 'assigned'. Otherwise it would be possible to assign the $within predicate,
+ * and then not assign the $near because the $within is already assigned (and
+ * has the same path).
+ */
+ void getMultikeyCompoundablePreds(const std::vector<MatchExpression*>& assigned,
+ const std::vector<MatchExpression*>& couldCompound,
+ std::vector<MatchExpression*>* out);
+
+ /**
+ * 'andAssignment' contains assignments that we've already committed to outputting,
+ * including both single index assignments and ixisect assignments.
+ *
+ * 'ixisectAssigned' is a set of predicates that we are about to add to 'andAssignment'
+ * as an index intersection assignment.
+ *
+ * Returns true if an single index assignment which is already in 'andAssignment'
+ * contains a superset of the predicates in 'ixisectAssigned'. This means that we
+ * can assign the same preds to a compound index rather than using index intersection.
+ *
+ * Ex.
+ * Suppose we have indices {a: 1}, {b: 1}, and {a: 1, b: 1} with query
+ * {a: 2, b: 2}. When we try to intersect {a: 1} and {b: 1} the predicates
+ * a==2 and b==2 will get assigned to respective indices. But then we will
+ * call this function with ixisectAssigned equal to the set {'a==2', 'b==2'},
+ * and notice that we have already assigned this same set of predicates to
+ * the single index {a: 1, b: 1} via compounding.
+ */
+ bool alreadyCompounded(const std::set<MatchExpression*>& ixisectAssigned,
+ const AndAssignment* andAssignment);
+ /**
+ * Output index intersection assignments inside of an AND node.
+ */
+ typedef unordered_map<IndexID, std::vector<MatchExpression*>> IndexToPredMap;
+
/**
- * Provides elements from the power set of possible indices to use. Uses the available
- * predicate information to make better decisions about what indices are best.
+ * Generate index intersection assignments given the predicate/index structure in idxToFirst
+ * and idxToNotFirst (and the sub-trees in 'subnodes'). Outputs the assignments in
+ * 'andAssignment'.
*/
- class PlanEnumerator {
- MONGO_DISALLOW_COPYING(PlanEnumerator);
- public:
- /**
- * Constructs an enumerator for the query specified in 'root' which is tagged with
- * RelevantTag(s). The index patterns mentioned in the tags are described by 'indices'.
- *
- * Does not take ownership of any arguments. They must outlive any calls to getNext(...).
- */
- PlanEnumerator(const PlanEnumeratorParams& params);
-
- ~PlanEnumerator();
-
- /**
- * Returns OK and performs a sanity check on the input parameters and prepares the
- * internal state so that getNext() can be called. Returns an error status with a
- * description if the sanity check failed.
- */
- Status init();
-
- /**
- * Outputs a possible plan. Leaves in the plan are tagged with an index to use.
- * Returns true if a plan was outputted, false if no more plans will be outputted.
- *
- * 'tree' is set to point to the query tree. A QueryAssignment is built from this tree.
- * Caller owns the pointer. Note that 'tree' itself points into data owned by the
- * provided CanonicalQuery.
- *
- * Nodes in 'tree' are tagged with indices that should be used to answer the tagged nodes.
- * Only nodes that have a field name (isLogical() == false) will be tagged.
- */
- bool getNext(MatchExpression** tree);
-
- private:
-
- //
- // Memoization strategy
- //
-
-
- // Everything is really a size_t but it's far more readable to impose a type via typedef.
-
- // An ID we use to index into _memo. An entry in _memo is a NodeAssignment.
- typedef size_t MemoID;
-
- // An index in _indices.
- typedef size_t IndexID;
-
- // The position of a field in a possibly compound index.
- typedef size_t IndexPosition;
-
- struct PrepMemoContext {
- PrepMemoContext() : elemMatchExpr(NULL) { }
- MatchExpression* elemMatchExpr;
- };
-
- /**
- * Traverses the match expression and generates the memo structure from it.
- * Returns true if the provided node uses an index, false otherwise.
- */
- bool prepMemo(MatchExpression* node, PrepMemoContext context);
-
- /**
- * Traverses the memo structure and annotates the tree with IndexTags for the chosen
- * indices.
- */
- void tagMemo(MemoID id);
-
- /**
- * Move to the next enumeration state. Each assignment stores its own enumeration state.
- * See the various ____Assignment classes below for details on enumeration state.
- *
- * Returns true if the memo subtree with root 'node' has no further enumeration states. In
- * this case, that subtree restarts its enumeration at the beginning state. This implies
- * that the parent of node should move to the next state. If 'node' is the root of the
- * tree, we are done with enumeration.
- *
- * The return of this function can be thought of like a 'carry' in addition.
- *
- * Returns false if the memo subtree has moved to the next state.
- */
- bool nextMemo(MemoID id);
-
- /**
- * A short word on the memo structure.
- *
- * The PlanEnumerator is interested in matching predicates and indices. Predicates
- * are leaf nodes in the parse tree. {x:5}, {x: {$geoWithin:...}} are both predicates.
- *
- * When we have simple predicates, like {x:5}, the task is easy: any indices prefixed
- * with 'x' can be used to answer the predicate. This is where the PredicateAssignment
- * is used.
- *
- * With logical operators, things are more complicated. Let's start with OR, the simplest.
- * Since the output of an OR is the union of its results, each of its children must be
- * indexed for the entire OR to be indexed. If each subtree of an OR is indexable, the
- * OR is as well.
- *
- * For an AND to be indexed, only one of its children must be indexed. AND is an
- * intersection of its children, so each of its children describes a superset of the
- * produced results.
- */
-
- struct PredicateAssignment {
- PredicateAssignment() : indexToAssign(0) { }
-
- std::vector<IndexID> first;
- // Not owned here.
- MatchExpression* expr;
-
- // Enumeration state. An indexed predicate's possible states are the indices that the
- // predicate can directly use (the 'first' indices). As such this value ranges from 0
- // to first.size()-1 inclusive.
- size_t indexToAssign;
- };
-
- struct OrAssignment {
- OrAssignment() : counter(0) { }
-
- // Each child of an OR must be indexed for the OR to be indexed. When an OR moves to a
- // subsequent state it just asks all its children to move their states forward.
-
- // Must use all of subnodes.
- std::vector<MemoID> subnodes;
-
- // The number of OR states that we've enumerated so far.
- size_t counter;
- };
-
- // This is used by AndAssignment and is not an actual assignment.
- struct OneIndexAssignment {
- // 'preds[i]' is uses index 'index' at position 'positions[i]'
- std::vector<MatchExpression*> preds;
- std::vector<IndexPosition> positions;
- IndexID index;
- };
-
- struct AndEnumerableState {
- std::vector<OneIndexAssignment> assignments;
- std::vector<MemoID> subnodesToIndex;
- };
-
- struct AndAssignment {
- AndAssignment() : counter(0) { }
-
- std::vector<AndEnumerableState> choices;
-
- // We're on the counter-th member of state.
- size_t counter;
- };
-
- struct ArrayAssignment {
- ArrayAssignment() : counter(0) { }
- std::vector<MemoID> subnodes;
- size_t counter;
- };
-
- /**
- * Associates indices with predicates.
- */
- struct NodeAssignment {
- std::unique_ptr<PredicateAssignment> pred;
- std::unique_ptr<OrAssignment> orAssignment;
- std::unique_ptr<AndAssignment> andAssignment;
- std::unique_ptr<ArrayAssignment> arrayAssignment;
- std::string toString() const;
- };
-
- /**
- * Allocates a NodeAssignment and associates it with the provided 'expr'.
- *
- * The unique MemoID of the new assignment is outputted in '*id'.
- * The out parameter '*slot' points to the newly allocated NodeAssignment.
- */
- void allocateAssignment(MatchExpression* expr, NodeAssignment** slot, MemoID* id);
-
- /**
- * Predicates inside $elemMatch's that are semantically "$and of $and"
- * predicates are not rewritten to the top-level during normalization.
- * However, we would like to make predicates inside $elemMatch available
- * for combining index bounds with the top-level $and predicates.
- *
- * This function deeply traverses $and and $elemMatch expressions of
- * the tree rooted at 'node', adding all preds that can use an index
- * to the output vector 'indexOut'. At the same time, $elemMatch
- * context information is stashed in the tags so that we don't lose
- * information due to flattening.
- *
- * Nodes that cannot be deeply traversed are returned via the output
- * vectors 'subnodesOut' and 'mandatorySubnodes'. Subnodes are "mandatory"
- * if they *must* use an index (TEXT and GEO).
- *
- * Does not take ownership of arguments.
- *
- * Returns false if the AND cannot be indexed. Otherwise returns true.
- */
- bool partitionPreds(MatchExpression* node,
- PrepMemoContext context,
- std::vector<MatchExpression*>* indexOut,
- std::vector<MemoID>* subnodesOut,
- std::vector<MemoID>* mandatorySubnodes);
-
- /**
- * Finds a set of predicates that can be safely compounded with the set
- * of predicates in 'assigned', under the assumption that we are assigning
- * predicates to a compound, multikey index.
- *
- * The list of candidate predicates that we could compound is passed
- * in 'couldCompound'. A subset of these predicates that is safe to
- * combine by compounding is returned in the out-parameter 'out'.
- *
- * Does not take ownership of its arguments.
- *
- * The rules for when to compound for multikey indices are reasonably
- * complex, and are dependent on the structure of $elemMatch's used
- * in the query. Ignoring $elemMatch for the time being, the rule is this:
- *
- * "Any set of predicates for which no two predicates share a path
- * prefix can be compounded."
- *
- * Suppose we have predicates over paths 'a.b' and 'a.c'. These cannot
- * be compounded because they share the prefix 'a'. Similarly, the bounds
- * for 'a' and 'a.b' cannot be compounded (in the case of multikey index
- * {a: 1, 'a.b': 1}). You *can* compound predicates over the paths 'a.b.c',
- * 'd', and 'e.b.c', because there is no shared prefix.
- *
- * The rules are different in the presence of $elemMatch. For $elemMatch
- * {a: {$elemMatch: {<pred1>, ..., <predN>}}}, we are allowed to compound
- * bounds for pred1 through predN, even though these predicates share the
- * path prefix 'a'. However, we still cannot compound in the case of
- * {a: {$elemMatch: {'b.c': {$gt: 1}, 'b.d': 5}}} because 'b.c' and 'b.d'
- * share a prefix. In other words, what matters inside an $elemMatch is not
- * the absolute prefix, but rather the "relative prefix" after the shared
- * $elemMatch part of the path.
- *
- * A few more examples:
- * 1) {'a.b': {$elemMatch: {c: {$gt: 1}, d: 5}}}. In this case, we can
- * compound, because the $elemMatch is applied to the shared part of
- * the path 'a.b'.
- *
- * 2) {'a.b': 1, a: {$elemMatch: {b: {$gt: 0}}}}. We cannot combine the
- * bounds here because the prefix 'a' is shared by two predicates which
- * are not joined together by an $elemMatch.
- *
- * NOTE:
- * Usually 'assigned' has just one predicate. However, in order to support
- * mandatory predicate assignment (TEXT and GEO_NEAR), we allow multiple
- * already-assigned predicates to be passed. If a mandatory predicate is over
- * a trailing field in a multikey compound index, then we assign both a predicate
- * over the leading field as well as the mandatory predicate prior to calling
- * this function.
- *
- * Ex:
- * Say we have index {a: 1, b: 1, c: "2dsphere", d: 1} as well as a $near
- * predicate and a $within predicate over "c". The $near predicate is mandatory
- * and must be assigned. The $within predicate is not mandatory. Furthermore,
- * it cannot be assigned in addition to the $near predicate because the index
- * is multikey.
- *
- * In this case the enumerator must assign the $near predicate, and pass it in
- * in 'assigned'. Otherwise it would be possible to assign the $within predicate,
- * and then not assign the $near because the $within is already assigned (and
- * has the same path).
- */
- void getMultikeyCompoundablePreds(const std::vector<MatchExpression*>& assigned,
- const std::vector<MatchExpression*>& couldCompound,
- std::vector<MatchExpression*>* out);
-
- /**
- * 'andAssignment' contains assignments that we've already committed to outputting,
- * including both single index assignments and ixisect assignments.
- *
- * 'ixisectAssigned' is a set of predicates that we are about to add to 'andAssignment'
- * as an index intersection assignment.
- *
- * Returns true if an single index assignment which is already in 'andAssignment'
- * contains a superset of the predicates in 'ixisectAssigned'. This means that we
- * can assign the same preds to a compound index rather than using index intersection.
- *
- * Ex.
- * Suppose we have indices {a: 1}, {b: 1}, and {a: 1, b: 1} with query
- * {a: 2, b: 2}. When we try to intersect {a: 1} and {b: 1} the predicates
- * a==2 and b==2 will get assigned to respective indices. But then we will
- * call this function with ixisectAssigned equal to the set {'a==2', 'b==2'},
- * and notice that we have already assigned this same set of predicates to
- * the single index {a: 1, b: 1} via compounding.
- */
- bool alreadyCompounded(const std::set<MatchExpression*>& ixisectAssigned,
- const AndAssignment* andAssignment);
- /**
- * Output index intersection assignments inside of an AND node.
- */
- typedef unordered_map<IndexID, std::vector<MatchExpression*> > IndexToPredMap;
-
- /**
- * Generate index intersection assignments given the predicate/index structure in idxToFirst
- * and idxToNotFirst (and the sub-trees in 'subnodes'). Outputs the assignments in
- * 'andAssignment'.
- */
- void enumerateAndIntersect(const IndexToPredMap& idxToFirst,
- const IndexToPredMap& idxToNotFirst,
- const std::vector<MemoID>& subnodes,
- AndAssignment* andAssignment);
-
- /**
- * Generate one-index-at-once assignments given the predicate/index structure in idxToFirst
- * and idxToNotFirst (and the sub-trees in 'subnodes'). Outputs the assignments into
- * 'andAssignment'.
- */
- void enumerateOneIndex(const IndexToPredMap& idxToFirst,
+ void enumerateAndIntersect(const IndexToPredMap& idxToFirst,
const IndexToPredMap& idxToNotFirst,
const std::vector<MemoID>& subnodes,
AndAssignment* andAssignment);
- /**
- * Generate single-index assignments for queries which contain mandatory
- * predicates (TEXT and GEO_NEAR, which are required to use a compatible index).
- * Outputs these assignments into 'andAssignment'.
- *
- * Returns true if it generated at least one assignment, and false if no assignment
- * of 'mandatoryPred' is possible.
- */
- bool enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
- const IndexToPredMap& idxToNotFirst,
- MatchExpression* mandatoryPred,
- const std::set<IndexID>& mandatoryIndices,
- AndAssignment* andAssignment);
-
- /**
- * Try to assign predicates in 'tryCompound' to 'thisIndex' as compound assignments.
- * Output the assignments in 'assign'.
- */
- void compound(const std::vector<MatchExpression*>& tryCompound,
- const IndexEntry& thisIndex,
- OneIndexAssignment* assign);
-
- /**
- * Return the memo entry for 'node'. Does some sanity checking to ensure that a memo entry
- * actually exists.
- */
- MemoID memoIDForNode(MatchExpression* node);
-
- std::string dumpMemo();
-
- // Map from expression to its MemoID.
- unordered_map<MatchExpression*, MemoID> _nodeToId;
-
- // Map from MemoID to its precomputed solution info.
- unordered_map<MemoID, NodeAssignment*> _memo;
-
- // If true, there are no further enumeration states, and getNext should return false.
- // We could be _done immediately after init if we're unable to output an indexed plan.
- bool _done;
-
- //
- // Data used by all enumeration strategies
- //
-
- // Match expression we're planning for. Not owned by us.
- MatchExpression* _root;
-
- // Indices we're allowed to enumerate with. Not owned here.
- const std::vector<IndexEntry>* _indices;
-
- // Do we output >1 index per AND (index intersection)?
- bool _ixisect;
-
- // How many enumerations are we willing to produce from each OR?
- size_t _orLimit;
-
- // How many things do we want from each AND?
- size_t _intersectLimit;
- };
+ /**
+ * Generate one-index-at-once assignments given the predicate/index structure in idxToFirst
+ * and idxToNotFirst (and the sub-trees in 'subnodes'). Outputs the assignments into
+ * 'andAssignment'.
+ */
+ void enumerateOneIndex(const IndexToPredMap& idxToFirst,
+ const IndexToPredMap& idxToNotFirst,
+ const std::vector<MemoID>& subnodes,
+ AndAssignment* andAssignment);
+
+ /**
+ * Generate single-index assignments for queries which contain mandatory
+ * predicates (TEXT and GEO_NEAR, which are required to use a compatible index).
+ * Outputs these assignments into 'andAssignment'.
+ *
+ * Returns true if it generated at least one assignment, and false if no assignment
+ * of 'mandatoryPred' is possible.
+ */
+ bool enumerateMandatoryIndex(const IndexToPredMap& idxToFirst,
+ const IndexToPredMap& idxToNotFirst,
+ MatchExpression* mandatoryPred,
+ const std::set<IndexID>& mandatoryIndices,
+ AndAssignment* andAssignment);
+
+ /**
+ * Try to assign predicates in 'tryCompound' to 'thisIndex' as compound assignments.
+ * Output the assignments in 'assign'.
+ */
+ void compound(const std::vector<MatchExpression*>& tryCompound,
+ const IndexEntry& thisIndex,
+ OneIndexAssignment* assign);
+
+ /**
+ * Return the memo entry for 'node'. Does some sanity checking to ensure that a memo entry
+ * actually exists.
+ */
+ MemoID memoIDForNode(MatchExpression* node);
+
+ std::string dumpMemo();
+
+ // Map from expression to its MemoID.
+ unordered_map<MatchExpression*, MemoID> _nodeToId;
+
+ // Map from MemoID to its precomputed solution info.
+ unordered_map<MemoID, NodeAssignment*> _memo;
+
+ // If true, there are no further enumeration states, and getNext should return false.
+ // We could be _done immediately after init if we're unable to output an indexed plan.
+ bool _done;
+
+ //
+ // Data used by all enumeration strategies
+ //
+
+ // Match expression we're planning for. Not owned by us.
+ MatchExpression* _root;
+
+ // Indices we're allowed to enumerate with. Not owned here.
+ const std::vector<IndexEntry>* _indices;
+
+ // Do we output >1 index per AND (index intersection)?
+ bool _ixisect;
+
+ // How many enumerations are we willing to produce from each OR?
+ size_t _orLimit;
+
+ // How many things do we want from each AND?
+ size_t _intersectLimit;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 24b01bb704e..f234948fe50 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -48,513 +48,498 @@
namespace mongo {
- using std::shared_ptr;
- using std::string;
- using std::vector;
-
- namespace {
-
- /**
- * Retrieves the first stage of a given type from the plan tree, or NULL
- * if no such stage is found.
- */
- PlanStage* getStageByType(PlanStage* root, StageType type) {
- if (root->stageType() == type) {
- return root;
- }
+using std::shared_ptr;
+using std::string;
+using std::vector;
- vector<PlanStage*> children = root->getChildren();
- for (size_t i = 0; i < children.size(); i++) {
- PlanStage* result = getStageByType(children[i], type);
- if (result) {
- return result;
- }
- }
-
- return NULL;
- }
+namespace {
+/**
+ * Retrieves the first stage of a given type from the plan tree, or NULL
+ * if no such stage is found.
+ */
+PlanStage* getStageByType(PlanStage* root, StageType type) {
+ if (root->stageType() == type) {
+ return root;
}
- // static
- Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- return PlanExecutor::make(opCtx, ws, rt, NULL, NULL, collection, "", yieldPolicy, out);
+ vector<PlanStage*> children = root->getChildren();
+ for (size_t i = 0; i < children.size(); i++) {
+ PlanStage* result = getStageByType(children[i], type);
+ if (result) {
+ return result;
+ }
}
- // static
- Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- const std::string& ns,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- return PlanExecutor::make(opCtx, ws, rt, NULL, NULL, NULL, ns, yieldPolicy, out);
+ return NULL;
+}
+}
+
+// static
+Status PlanExecutor::make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ const Collection* collection,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ return PlanExecutor::make(opCtx, ws, rt, NULL, NULL, collection, "", yieldPolicy, out);
+}
+
+// static
+Status PlanExecutor::make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ const std::string& ns,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ return PlanExecutor::make(opCtx, ws, rt, NULL, NULL, NULL, ns, yieldPolicy, out);
+}
+
+// static
+Status PlanExecutor::make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ return PlanExecutor::make(opCtx, ws, rt, NULL, cq, collection, "", yieldPolicy, out);
+}
+
+// static
+Status PlanExecutor::make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ QuerySolution* qs,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ return PlanExecutor::make(opCtx, ws, rt, qs, cq, collection, "", yieldPolicy, out);
+}
+
+// static
+Status PlanExecutor::make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ QuerySolution* qs,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ const std::string& ns,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out) {
+ std::unique_ptr<PlanExecutor> exec(new PlanExecutor(opCtx, ws, rt, qs, cq, collection, ns));
+
+ // Perform plan selection, if necessary.
+ Status status = exec->pickBestPlan(yieldPolicy);
+ if (!status.isOK()) {
+ return status;
}
- // static
- Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- CanonicalQuery* cq,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- return PlanExecutor::make(opCtx, ws, rt, NULL, cq, collection, "", yieldPolicy, out);
+ *out = exec.release();
+ return Status::OK();
+}
+
+PlanExecutor::PlanExecutor(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ QuerySolution* qs,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ const std::string& ns)
+ : _opCtx(opCtx),
+ _collection(collection),
+ _cq(cq),
+ _workingSet(ws),
+ _qs(qs),
+ _root(rt),
+ _ns(ns),
+ _yieldPolicy(new PlanYieldPolicy(this, YIELD_MANUAL)) {
+ // We may still need to initialize _ns from either _collection or _cq.
+ if (!_ns.empty()) {
+ // We already have an _ns set, so there's nothing more to do.
+ return;
}
- // static
- Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- return PlanExecutor::make(opCtx, ws, rt, qs, cq, collection, "", yieldPolicy, out);
+ if (NULL != _collection) {
+ _ns = _collection->ns().ns();
+ } else {
+ invariant(NULL != _cq.get());
+ _ns = _cq->getParsed().ns();
}
-
- // static
- Status PlanExecutor::make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- const std::string& ns,
- YieldPolicy yieldPolicy,
- PlanExecutor** out) {
- std::unique_ptr<PlanExecutor> exec(new PlanExecutor(opCtx, ws, rt, qs, cq, collection, ns));
-
- // Perform plan selection, if necessary.
- Status status = exec->pickBestPlan(yieldPolicy);
- if (!status.isOK()) {
- return status;
- }
-
- *out = exec.release();
- return Status::OK();
+}
+
+Status PlanExecutor::pickBestPlan(YieldPolicy policy) {
+ // For YIELD_AUTO, this will both set an auto yield policy on the PlanExecutor and
+ // register it to receive notifications.
+ this->setYieldPolicy(policy);
+
+ // First check if we need to do subplanning.
+ PlanStage* foundStage = getStageByType(_root.get(), STAGE_SUBPLAN);
+ if (foundStage) {
+ SubplanStage* subplan = static_cast<SubplanStage*>(foundStage);
+ return subplan->pickBestPlan(_yieldPolicy.get());
}
- PlanExecutor::PlanExecutor(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- const std::string& ns)
- : _opCtx(opCtx),
- _collection(collection),
- _cq(cq),
- _workingSet(ws),
- _qs(qs),
- _root(rt),
- _ns(ns),
- _yieldPolicy(new PlanYieldPolicy(this, YIELD_MANUAL)) {
- // We may still need to initialize _ns from either _collection or _cq.
- if (!_ns.empty()) {
- // We already have an _ns set, so there's nothing more to do.
- return;
- }
+ // If we didn't have to do subplanning, we might still have to do regular
+ // multi plan selection...
+ foundStage = getStageByType(_root.get(), STAGE_MULTI_PLAN);
+ if (foundStage) {
+ MultiPlanStage* mps = static_cast<MultiPlanStage*>(foundStage);
+ return mps->pickBestPlan(_yieldPolicy.get());
+ }
- if (NULL != _collection) {
- _ns = _collection->ns().ns();
- }
- else {
- invariant(NULL != _cq.get());
- _ns = _cq->getParsed().ns();
- }
+ // ...or, we might have to run a plan from the cache for a trial period, falling back on
+ // regular planning if the cached plan performs poorly.
+ foundStage = getStageByType(_root.get(), STAGE_CACHED_PLAN);
+ if (foundStage) {
+ CachedPlanStage* cachedPlan = static_cast<CachedPlanStage*>(foundStage);
+ return cachedPlan->pickBestPlan(_yieldPolicy.get());
}
- Status PlanExecutor::pickBestPlan(YieldPolicy policy) {
- // For YIELD_AUTO, this will both set an auto yield policy on the PlanExecutor and
- // register it to receive notifications.
- this->setYieldPolicy(policy);
+ // Either we chose a plan, or no plan selection was required. In both cases,
+ // our work has been successfully completed.
+ return Status::OK();
+}
+
+PlanExecutor::~PlanExecutor() {}
+
+// static
+std::string PlanExecutor::statestr(ExecState s) {
+ if (PlanExecutor::ADVANCED == s) {
+ return "ADVANCED";
+ } else if (PlanExecutor::IS_EOF == s) {
+ return "IS_EOF";
+ } else if (PlanExecutor::DEAD == s) {
+ return "DEAD";
+ } else {
+ verify(PlanExecutor::FAILURE == s);
+ return "FAILURE";
+ }
+}
- // First check if we need to do subplanning.
- PlanStage* foundStage = getStageByType(_root.get(), STAGE_SUBPLAN);
- if (foundStage) {
- SubplanStage* subplan = static_cast<SubplanStage*>(foundStage);
- return subplan->pickBestPlan(_yieldPolicy.get());
- }
+WorkingSet* PlanExecutor::getWorkingSet() const {
+ return _workingSet.get();
+}
- // If we didn't have to do subplanning, we might still have to do regular
- // multi plan selection...
- foundStage = getStageByType(_root.get(), STAGE_MULTI_PLAN);
- if (foundStage) {
- MultiPlanStage* mps = static_cast<MultiPlanStage*>(foundStage);
- return mps->pickBestPlan(_yieldPolicy.get());
- }
+PlanStage* PlanExecutor::getRootStage() const {
+ return _root.get();
+}
- // ...or, we might have to run a plan from the cache for a trial period, falling back on
- // regular planning if the cached plan performs poorly.
- foundStage = getStageByType(_root.get(), STAGE_CACHED_PLAN);
- if (foundStage) {
- CachedPlanStage* cachedPlan = static_cast<CachedPlanStage*>(foundStage);
- return cachedPlan->pickBestPlan(_yieldPolicy.get());
- }
+CanonicalQuery* PlanExecutor::getCanonicalQuery() const {
+ return _cq.get();
+}
- // Either we chose a plan, or no plan selection was required. In both cases,
- // our work has been successfully completed.
- return Status::OK();
- }
+PlanStageStats* PlanExecutor::getStats() const {
+ return _root->getStats();
+}
- PlanExecutor::~PlanExecutor() { }
+const Collection* PlanExecutor::collection() const {
+ return _collection;
+}
- // static
- std::string PlanExecutor::statestr(ExecState s) {
- if (PlanExecutor::ADVANCED == s) {
- return "ADVANCED";
- }
- else if (PlanExecutor::IS_EOF == s) {
- return "IS_EOF";
- }
- else if (PlanExecutor::DEAD == s) {
- return "DEAD";
- }
- else {
- verify(PlanExecutor::FAILURE == s);
- return "FAILURE";
- }
- }
+OperationContext* PlanExecutor::getOpCtx() const {
+ return _opCtx;
+}
- WorkingSet* PlanExecutor::getWorkingSet() const {
- return _workingSet.get();
+void PlanExecutor::saveState() {
+ if (!killed()) {
+ _root->saveState();
}
- PlanStage* PlanExecutor::getRootStage() const {
- return _root.get();
+ // Doc-locking storage engines drop their transactional context after saving state.
+ // The query stages inside this stage tree might buffer record ids (e.g. text, geoNear,
+ // mergeSort, sort) which are no longer protected by the storage engine's transactional
+ // boundaries. Force-fetch the documents for any such record ids so that we have our
+ // own copy in the working set.
+ if (supportsDocLocking()) {
+ WorkingSetCommon::prepareForSnapshotChange(_workingSet.get());
}
- CanonicalQuery* PlanExecutor::getCanonicalQuery() const {
- return _cq.get();
- }
+ _opCtx = NULL;
+}
- PlanStageStats* PlanExecutor::getStats() const {
- return _root->getStats();
- }
+bool PlanExecutor::restoreState(OperationContext* opCtx) {
+ try {
+ return restoreStateWithoutRetrying(opCtx);
+ } catch (const WriteConflictException& wce) {
+ if (!_yieldPolicy->allowedToYield())
+ throw;
- const Collection* PlanExecutor::collection() const {
- return _collection;
+ // Handles retries by calling restoreStateWithoutRetrying() in a loop.
+ return _yieldPolicy->yield(NULL);
}
+}
- OperationContext* PlanExecutor::getOpCtx() const {
- return _opCtx;
- }
+bool PlanExecutor::restoreStateWithoutRetrying(OperationContext* opCtx) {
+ invariant(NULL == _opCtx);
+ invariant(opCtx);
- void PlanExecutor::saveState() {
- if (!killed()) {
- _root->saveState();
- }
+ _opCtx = opCtx;
- // Doc-locking storage engines drop their transactional context after saving state.
- // The query stages inside this stage tree might buffer record ids (e.g. text, geoNear,
- // mergeSort, sort) which are no longer protected by the storage engine's transactional
- // boundaries. Force-fetch the documents for any such record ids so that we have our
- // own copy in the working set.
- if (supportsDocLocking()) {
- WorkingSetCommon::prepareForSnapshotChange(_workingSet.get());
- }
+ // We're restoring after a yield or getMore now. If we're a yielding plan executor, reset
+ // the yield timer in order to prevent from yielding again right away.
+ _yieldPolicy->resetTimer();
- _opCtx = NULL;
+ if (!killed()) {
+ _root->restoreState(opCtx);
}
- bool PlanExecutor::restoreState(OperationContext* opCtx) {
- try {
- return restoreStateWithoutRetrying(opCtx);
- }
- catch (const WriteConflictException& wce) {
- if (!_yieldPolicy->allowedToYield())
- throw;
+ return !killed();
+}
- // Handles retries by calling restoreStateWithoutRetrying() in a loop.
- return _yieldPolicy->yield(NULL);
- }
+void PlanExecutor::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
+ if (!killed()) {
+ _root->invalidate(txn, dl, type);
}
+}
- bool PlanExecutor::restoreStateWithoutRetrying(OperationContext* opCtx) {
- invariant(NULL == _opCtx);
- invariant(opCtx);
+PlanExecutor::ExecState PlanExecutor::getNext(BSONObj* objOut, RecordId* dlOut) {
+ Snapshotted<BSONObj> snapshotted;
+ ExecState state = getNextSnapshotted(objOut ? &snapshotted : NULL, dlOut);
- _opCtx = opCtx;
-
- // We're restoring after a yield or getMore now. If we're a yielding plan executor, reset
- // the yield timer in order to prevent from yielding again right away.
- _yieldPolicy->resetTimer();
-
- if (!killed()) {
- _root->restoreState(opCtx);
- }
-
- return !killed();
- }
-
- void PlanExecutor::invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type) {
- if (!killed()) { _root->invalidate(txn, dl, type); }
+ if (objOut) {
+ *objOut = snapshotted.value();
}
- PlanExecutor::ExecState PlanExecutor::getNext(BSONObj* objOut, RecordId* dlOut) {
- Snapshotted<BSONObj> snapshotted;
- ExecState state = getNextSnapshotted(objOut ? &snapshotted : NULL, dlOut);
-
- if (objOut) {
- *objOut = snapshotted.value();
+ return state;
+}
+
+PlanExecutor::ExecState PlanExecutor::getNextSnapshotted(Snapshotted<BSONObj>* objOut,
+ RecordId* dlOut) {
+ if (killed()) {
+ if (NULL != objOut) {
+ Status status(ErrorCodes::OperationFailed,
+ str::stream() << "Operation aborted because: " << *_killReason);
+ *objOut = Snapshotted<BSONObj>(SnapshotId(),
+ WorkingSetCommon::buildMemberStatusObject(status));
}
-
- return state;
+ return PlanExecutor::DEAD;
}
- PlanExecutor::ExecState PlanExecutor::getNextSnapshotted(Snapshotted<BSONObj>* objOut,
- RecordId* dlOut) {
- if (killed()) {
- if (NULL != objOut) {
- Status status(ErrorCodes::OperationFailed,
- str::stream() << "Operation aborted because: " << *_killReason);
- *objOut = Snapshotted<BSONObj>(SnapshotId(),
- WorkingSetCommon::buildMemberStatusObject(status));
- }
- return PlanExecutor::DEAD;
- }
-
- if (!_stash.empty()) {
- invariant(objOut && !dlOut);
- *objOut = {SnapshotId(), _stash.front()};
- _stash.pop();
- return PlanExecutor::ADVANCED;
- }
+ if (!_stash.empty()) {
+ invariant(objOut && !dlOut);
+ *objOut = {SnapshotId(), _stash.front()};
+ _stash.pop();
+ return PlanExecutor::ADVANCED;
+ }
- // When a stage requests a yield for document fetch, it gives us back a RecordFetcher*
- // to use to pull the record into memory. We take ownership of the RecordFetcher here,
- // deleting it after we've had a chance to do the fetch. For timing-based yields, we
- // just pass a NULL fetcher.
- std::unique_ptr<RecordFetcher> fetcher;
-
- // Incremented on every writeConflict, reset to 0 on any successful call to _root->work.
- size_t writeConflictsInARow = 0;
-
- for (;;) {
- // These are the conditions which can cause us to yield:
- // 1) The yield policy's timer elapsed, or
- // 2) some stage requested a yield due to a document fetch, or
- // 3) we need to yield and retry due to a WriteConflictException.
- // In all cases, the actual yielding happens here.
- if (_yieldPolicy->shouldYield()) {
- _yieldPolicy->yield(fetcher.get());
-
- if (killed()) {
- if (NULL != objOut) {
- Status status(ErrorCodes::OperationFailed,
- str::stream() << "Operation aborted because: "
- << *_killReason);
- *objOut = Snapshotted<BSONObj>(
- SnapshotId(),
- WorkingSetCommon::buildMemberStatusObject(status));
- }
- return PlanExecutor::DEAD;
+ // When a stage requests a yield for document fetch, it gives us back a RecordFetcher*
+ // to use to pull the record into memory. We take ownership of the RecordFetcher here,
+ // deleting it after we've had a chance to do the fetch. For timing-based yields, we
+ // just pass a NULL fetcher.
+ std::unique_ptr<RecordFetcher> fetcher;
+
+ // Incremented on every writeConflict, reset to 0 on any successful call to _root->work.
+ size_t writeConflictsInARow = 0;
+
+ for (;;) {
+ // These are the conditions which can cause us to yield:
+ // 1) The yield policy's timer elapsed, or
+ // 2) some stage requested a yield due to a document fetch, or
+ // 3) we need to yield and retry due to a WriteConflictException.
+ // In all cases, the actual yielding happens here.
+ if (_yieldPolicy->shouldYield()) {
+ _yieldPolicy->yield(fetcher.get());
+
+ if (killed()) {
+ if (NULL != objOut) {
+ Status status(ErrorCodes::OperationFailed,
+ str::stream() << "Operation aborted because: " << *_killReason);
+ *objOut = Snapshotted<BSONObj>(
+ SnapshotId(), WorkingSetCommon::buildMemberStatusObject(status));
}
+ return PlanExecutor::DEAD;
}
+ }
- // We're done using the fetcher, so it should be freed. We don't want to
- // use the same RecordFetcher twice.
- fetcher.reset();
-
- WorkingSetID id = WorkingSet::INVALID_ID;
- PlanStage::StageState code = _root->work(&id);
+ // We're done using the fetcher, so it should be freed. We don't want to
+ // use the same RecordFetcher twice.
+ fetcher.reset();
- if (code != PlanStage::NEED_YIELD)
- writeConflictsInARow = 0;
+ WorkingSetID id = WorkingSet::INVALID_ID;
+ PlanStage::StageState code = _root->work(&id);
- if (PlanStage::ADVANCED == code) {
- // Fast count.
- if (WorkingSet::INVALID_ID == id) {
- invariant(NULL == objOut);
- invariant(NULL == dlOut);
- return PlanExecutor::ADVANCED;
- }
+ if (code != PlanStage::NEED_YIELD)
+ writeConflictsInARow = 0;
- WorkingSetMember* member = _workingSet->get(id);
- bool hasRequestedData = true;
+ if (PlanStage::ADVANCED == code) {
+ // Fast count.
+ if (WorkingSet::INVALID_ID == id) {
+ invariant(NULL == objOut);
+ invariant(NULL == dlOut);
+ return PlanExecutor::ADVANCED;
+ }
- if (NULL != objOut) {
- if (WorkingSetMember::LOC_AND_IDX == member->state) {
- if (1 != member->keyData.size()) {
- _workingSet->free(id);
- hasRequestedData = false;
- }
- else {
- // TODO: currently snapshot ids are only associated with documents, and
- // not with index keys.
- *objOut = Snapshotted<BSONObj>(SnapshotId(),
- member->keyData[0].keyData);
- }
- }
- else if (member->hasObj()) {
- *objOut = member->obj;
- }
- else {
- _workingSet->free(id);
- hasRequestedData = false;
- }
- }
+ WorkingSetMember* member = _workingSet->get(id);
+ bool hasRequestedData = true;
- if (NULL != dlOut) {
- if (member->hasLoc()) {
- *dlOut = member->loc;
- }
- else {
+ if (NULL != objOut) {
+ if (WorkingSetMember::LOC_AND_IDX == member->state) {
+ if (1 != member->keyData.size()) {
_workingSet->free(id);
hasRequestedData = false;
+ } else {
+ // TODO: currently snapshot ids are only associated with documents, and
+ // not with index keys.
+ *objOut = Snapshotted<BSONObj>(SnapshotId(), member->keyData[0].keyData);
}
- }
-
- if (hasRequestedData) {
+ } else if (member->hasObj()) {
+ *objOut = member->obj;
+ } else {
_workingSet->free(id);
- return PlanExecutor::ADVANCED;
+ hasRequestedData = false;
}
- // This result didn't have the data the caller wanted, try again.
}
- else if (PlanStage::NEED_YIELD == code) {
- if (id == WorkingSet::INVALID_ID) {
- if (!_yieldPolicy->allowedToYield()) throw WriteConflictException();
- CurOp::get(_opCtx)->debug().writeConflicts++;
- writeConflictsInARow++;
- WriteConflictException::logAndBackoff(writeConflictsInARow,
- "plan execution",
- _collection->ns().ns());
+ if (NULL != dlOut) {
+ if (member->hasLoc()) {
+ *dlOut = member->loc;
+ } else {
+ _workingSet->free(id);
+ hasRequestedData = false;
}
- else {
- WorkingSetMember* member = _workingSet->get(id);
- invariant(member->hasFetcher());
- // Transfer ownership of the fetcher. Next time around the loop a yield will
- // happen.
- fetcher.reset(member->releaseFetcher());
- }
-
- // If we're allowed to, we will yield next time through the loop.
- if (_yieldPolicy->allowedToYield()) _yieldPolicy->forceYield();
}
- else if (PlanStage::NEED_TIME == code) {
- // Fall through to yield check at end of large conditional.
+
+ if (hasRequestedData) {
+ _workingSet->free(id);
+ return PlanExecutor::ADVANCED;
}
- else if (PlanStage::IS_EOF == code) {
- return PlanExecutor::IS_EOF;
+ // This result didn't have the data the caller wanted, try again.
+ } else if (PlanStage::NEED_YIELD == code) {
+ if (id == WorkingSet::INVALID_ID) {
+ if (!_yieldPolicy->allowedToYield())
+ throw WriteConflictException();
+ CurOp::get(_opCtx)->debug().writeConflicts++;
+ writeConflictsInARow++;
+ WriteConflictException::logAndBackoff(
+ writeConflictsInARow, "plan execution", _collection->ns().ns());
+
+ } else {
+ WorkingSetMember* member = _workingSet->get(id);
+ invariant(member->hasFetcher());
+ // Transfer ownership of the fetcher. Next time around the loop a yield will
+ // happen.
+ fetcher.reset(member->releaseFetcher());
}
- else {
- invariant(PlanStage::DEAD == code || PlanStage::FAILURE == code);
- if (NULL != objOut) {
- BSONObj statusObj;
- WorkingSetCommon::getStatusMemberObject(*_workingSet, id, &statusObj);
- *objOut = Snapshotted<BSONObj>(SnapshotId(), statusObj);
- }
+ // If we're allowed to, we will yield next time through the loop.
+ if (_yieldPolicy->allowedToYield())
+ _yieldPolicy->forceYield();
+ } else if (PlanStage::NEED_TIME == code) {
+ // Fall through to yield check at end of large conditional.
+ } else if (PlanStage::IS_EOF == code) {
+ return PlanExecutor::IS_EOF;
+ } else {
+ invariant(PlanStage::DEAD == code || PlanStage::FAILURE == code);
- return (PlanStage::DEAD == code) ? PlanExecutor::DEAD : PlanExecutor::FAILURE;
+ if (NULL != objOut) {
+ BSONObj statusObj;
+ WorkingSetCommon::getStatusMemberObject(*_workingSet, id, &statusObj);
+ *objOut = Snapshotted<BSONObj>(SnapshotId(), statusObj);
}
- }
- }
-
- bool PlanExecutor::isEOF() {
- return killed() || (_stash.empty() && _root->isEOF());
- }
- void PlanExecutor::registerExec() {
- _safety.reset(new ScopedExecutorRegistration(this));
- }
-
- void PlanExecutor::deregisterExec() {
- _safety.reset();
+ return (PlanStage::DEAD == code) ? PlanExecutor::DEAD : PlanExecutor::FAILURE;
+ }
}
-
- void PlanExecutor::kill(std::string reason) {
- _killReason = std::move(reason);
- _collection = NULL;
-
- // XXX: PlanExecutor is designed to wrap a single execution tree. In the case of
- // aggregation queries, PlanExecutor wraps a proxy stage responsible for pulling results
- // from an aggregation pipeline. The aggregation pipeline pulls results from yet another
- // PlanExecutor. Such nested PlanExecutors require us to manually propagate kill() to
- // the "inner" executor. This is bad, and hopefully can be fixed down the line with the
- // unification of agg and query.
- //
- // The CachedPlanStage is another special case. It needs to update the plan cache from
- // its destructor. It needs to know whether it has been killed so that it can avoid
- // touching a potentially invalid plan cache in this case.
- //
- // TODO: get rid of this code block.
- {
- PlanStage* foundStage = getStageByType(_root.get(), STAGE_PIPELINE_PROXY);
- if (foundStage) {
- PipelineProxyStage* proxyStage = static_cast<PipelineProxyStage*>(foundStage);
- shared_ptr<PlanExecutor> childExec = proxyStage->getChildExecutor();
- if (childExec) {
- childExec->kill(*_killReason);
- }
+}
+
+bool PlanExecutor::isEOF() {
+ return killed() || (_stash.empty() && _root->isEOF());
+}
+
+void PlanExecutor::registerExec() {
+ _safety.reset(new ScopedExecutorRegistration(this));
+}
+
+void PlanExecutor::deregisterExec() {
+ _safety.reset();
+}
+
+void PlanExecutor::kill(std::string reason) {
+ _killReason = std::move(reason);
+ _collection = NULL;
+
+ // XXX: PlanExecutor is designed to wrap a single execution tree. In the case of
+ // aggregation queries, PlanExecutor wraps a proxy stage responsible for pulling results
+ // from an aggregation pipeline. The aggregation pipeline pulls results from yet another
+ // PlanExecutor. Such nested PlanExecutors require us to manually propagate kill() to
+ // the "inner" executor. This is bad, and hopefully can be fixed down the line with the
+ // unification of agg and query.
+ //
+ // The CachedPlanStage is another special case. It needs to update the plan cache from
+ // its destructor. It needs to know whether it has been killed so that it can avoid
+ // touching a potentially invalid plan cache in this case.
+ //
+ // TODO: get rid of this code block.
+ {
+ PlanStage* foundStage = getStageByType(_root.get(), STAGE_PIPELINE_PROXY);
+ if (foundStage) {
+ PipelineProxyStage* proxyStage = static_cast<PipelineProxyStage*>(foundStage);
+ shared_ptr<PlanExecutor> childExec = proxyStage->getChildExecutor();
+ if (childExec) {
+ childExec->kill(*_killReason);
}
}
}
+}
- Status PlanExecutor::executePlan() {
- BSONObj obj;
- PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
- while (PlanExecutor::ADVANCED == state) {
- state = this->getNext(&obj, NULL);
- }
-
- if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
- return Status(ErrorCodes::OperationFailed,
- str::stream() << "Exec error: " << WorkingSetCommon::toStatusString(obj)
- << ", state: " << PlanExecutor::statestr(state));
- }
-
- invariant(PlanExecutor::IS_EOF == state);
- return Status::OK();
+Status PlanExecutor::executePlan() {
+ BSONObj obj;
+ PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
+ while (PlanExecutor::ADVANCED == state) {
+ state = this->getNext(&obj, NULL);
}
- const string& PlanExecutor::ns() {
- return _ns;
+ if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
+ return Status(ErrorCodes::OperationFailed,
+ str::stream() << "Exec error: " << WorkingSetCommon::toStatusString(obj)
+ << ", state: " << PlanExecutor::statestr(state));
}
- void PlanExecutor::setYieldPolicy(YieldPolicy policy, bool registerExecutor) {
- _yieldPolicy->setPolicy(policy);
- if (PlanExecutor::YIELD_AUTO == policy) {
- // Runners that yield automatically generally need to be registered so that
- // after yielding, they receive notifications of events like deletions and
- // index drops. The only exception is that a few PlanExecutors get registered
- // by ClientCursor instead of being registered here. This is unneeded if we only do
- // partial "yields" for WriteConflict retrying.
- if (registerExecutor) {
- this->registerExec();
- }
+ invariant(PlanExecutor::IS_EOF == state);
+ return Status::OK();
+}
+
+const string& PlanExecutor::ns() {
+ return _ns;
+}
+
+void PlanExecutor::setYieldPolicy(YieldPolicy policy, bool registerExecutor) {
+ _yieldPolicy->setPolicy(policy);
+ if (PlanExecutor::YIELD_AUTO == policy) {
+ // Runners that yield automatically generally need to be registered so that
+ // after yielding, they receive notifications of events like deletions and
+ // index drops. The only exception is that a few PlanExecutors get registered
+ // by ClientCursor instead of being registered here. This is unneeded if we only do
+ // partial "yields" for WriteConflict retrying.
+ if (registerExecutor) {
+ this->registerExec();
}
}
-
- void PlanExecutor::enqueue(const BSONObj& obj) {
- _stash.push(obj.getOwned());
- }
-
- //
- // ScopedExecutorRegistration
- //
-
- PlanExecutor::ScopedExecutorRegistration::ScopedExecutorRegistration(PlanExecutor* exec)
- : _exec(exec) {
- // Collection can be null for an EOFStage plan, or other places where registration
- // is not needed.
- if (_exec->collection()) {
- _exec->collection()->getCursorManager()->registerExecutor(exec);
- }
+}
+
+void PlanExecutor::enqueue(const BSONObj& obj) {
+ _stash.push(obj.getOwned());
+}
+
+//
+// ScopedExecutorRegistration
+//
+
+PlanExecutor::ScopedExecutorRegistration::ScopedExecutorRegistration(PlanExecutor* exec)
+ : _exec(exec) {
+ // Collection can be null for an EOFStage plan, or other places where registration
+ // is not needed.
+ if (_exec->collection()) {
+ _exec->collection()->getCursorManager()->registerExecutor(exec);
}
+}
- PlanExecutor::ScopedExecutorRegistration::~ScopedExecutorRegistration() {
- if (_exec->collection()) {
- _exec->collection()->getCursorManager()->deregisterExecutor(_exec);
- }
+PlanExecutor::ScopedExecutorRegistration::~ScopedExecutorRegistration() {
+ if (_exec->collection()) {
+ _exec->collection()->getCursorManager()->deregisterExecutor(_exec);
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h
index c3e6fdc94c7..34611eba38f 100644
--- a/src/mongo/db/query/plan_executor.h
+++ b/src/mongo/db/query/plan_executor.h
@@ -38,413 +38,414 @@
namespace mongo {
- class BSONObj;
- class Collection;
- class RecordId;
- class PlanStage;
- class PlanExecutor;
- struct PlanStageStats;
- class PlanYieldPolicy;
- class WorkingSet;
+class BSONObj;
+class Collection;
+class RecordId;
+class PlanStage;
+class PlanExecutor;
+struct PlanStageStats;
+class PlanYieldPolicy;
+class WorkingSet;
+
+/**
+ * A PlanExecutor is the abstraction that knows how to crank a tree of stages into execution.
+ * The executor is usually part of a larger abstraction that is interacting with the cache
+ * and/or the query optimizer.
+ *
+ * Executes a plan. Calls work() on a plan until a result is produced. Stops when the plan is
+ * EOF or if the plan errors.
+ */
+class PlanExecutor {
+public:
+ enum ExecState {
+ // We successfully populated the out parameter.
+ ADVANCED,
+
+ // We're EOF. We won't return any more results (edge case exception: capped+tailable).
+ IS_EOF,
+
+ // We were killed. This is a special failure case in which we cannot rely on the
+ // collection or database to still be valid.
+ // If the underlying PlanStage has any information on the error, it will be available in
+ // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
+ // details from the output BSON object.
+ DEAD,
+
+ // getNext was asked for data it cannot provide, or the underlying PlanStage had an
+ // unrecoverable error.
+ // If the underlying PlanStage has any information on the error, it will be available in
+ // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
+ // details from the output BSON object.
+ FAILURE,
+ };
/**
- * A PlanExecutor is the abstraction that knows how to crank a tree of stages into execution.
- * The executor is usually part of a larger abstraction that is interacting with the cache
- * and/or the query optimizer.
- *
- * Executes a plan. Calls work() on a plan until a result is produced. Stops when the plan is
- * EOF or if the plan errors.
+ * The yielding policy of the plan executor. By default, an executor does not yield itself
+ * (YIELD_MANUAL).
*/
- class PlanExecutor {
- public:
-
- enum ExecState {
- // We successfully populated the out parameter.
- ADVANCED,
-
- // We're EOF. We won't return any more results (edge case exception: capped+tailable).
- IS_EOF,
-
- // We were killed. This is a special failure case in which we cannot rely on the
- // collection or database to still be valid.
- // If the underlying PlanStage has any information on the error, it will be available in
- // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
- // details from the output BSON object.
- DEAD,
-
- // getNext was asked for data it cannot provide, or the underlying PlanStage had an
- // unrecoverable error.
- // If the underlying PlanStage has any information on the error, it will be available in
- // the objOut parameter. Call WorkingSetCommon::toStatusString() to retrieve the error
- // details from the output BSON object.
- FAILURE,
- };
-
- /**
- * The yielding policy of the plan executor. By default, an executor does not yield itself
- * (YIELD_MANUAL).
- */
- enum YieldPolicy {
- // Any call to getNext() may yield. In particular, the executor may be killed during any
- // call to getNext(). If this occurs, getNext() will return DEAD. Additionally, this
- // will handle all WriteConflictExceptions that occur while processing the query.
- YIELD_AUTO,
-
- // This will handle WriteConflictExceptions that occur while processing the query, but
- // will not yield locks. abandonSnapshot() will be called if a WriteConflictException
- // occurs so callers must be prepared to get a new snapshot.
- WRITE_CONFLICT_RETRY_ONLY,
-
- // Owner must yield manually if yields are requested. How to yield yourself:
- //
- // 0. Let's say you have PlanExecutor* exec.
- //
- // 1. Register your PlanExecutor with ClientCursor. Registered executors are informed
- // about RecordId deletions and namespace invalidation, as well as other important
- // events. Do this by calling registerExec() on the executor. Alternatively, this can
- // be done per-yield (as described below).
- //
- // 2. Construct a PlanYieldPolicy 'policy', passing 'exec' to the constructor.
- //
- // 3. Call PlanYieldPolicy::yield() on 'policy'. If your PlanExecutor is not yet
- // registered (because you want to register on a per-yield basis), then pass
- // 'true' to yield().
- //
- // 4. The call to yield() returns a boolean indicating whether or not 'exec' is
- // still alove. If it is false, then 'exec' was killed during the yield and is
- // no longer valid.
- //
- // It is not possible to handle WriteConflictExceptions in this mode without restarting
- // the query.
- YIELD_MANUAL,
- };
-
+ enum YieldPolicy {
+ // Any call to getNext() may yield. In particular, the executor may be killed during any
+ // call to getNext(). If this occurs, getNext() will return DEAD. Additionally, this
+ // will handle all WriteConflictExceptions that occur while processing the query.
+ YIELD_AUTO,
+
+ // This will handle WriteConflictExceptions that occur while processing the query, but
+ // will not yield locks. abandonSnapshot() will be called if a WriteConflictException
+ // occurs so callers must be prepared to get a new snapshot.
+ WRITE_CONFLICT_RETRY_ONLY,
+
+ // Owner must yield manually if yields are requested. How to yield yourself:
//
- // Factory methods.
+ // 0. Let's say you have PlanExecutor* exec.
//
- // On success, return a new PlanExecutor, owned by the caller, through 'out'.
+ // 1. Register your PlanExecutor with ClientCursor. Registered executors are informed
+ // about RecordId deletions and namespace invalidation, as well as other important
+ // events. Do this by calling registerExec() on the executor. Alternatively, this can
+ // be done per-yield (as described below).
//
- // Passing YIELD_AUTO to any of these factories will construct a yielding executor which
- // may yield in the following circumstances:
- // 1) During plan selection inside the call to make().
- // 2) On any call to getNext().
- // 3) While executing the plan inside executePlan().
+ // 2. Construct a PlanYieldPolicy 'policy', passing 'exec' to the constructor.
//
- // The executor will also be automatically registered to receive notifications in the
- // case of YIELD_AUTO, so no further calls to registerExec() or setYieldPolicy() are
- // necessary.
+ // 3. Call PlanYieldPolicy::yield() on 'policy'. If your PlanExecutor is not yet
+ // registered (because you want to register on a per-yield basis), then pass
+ // 'true' to yield().
//
+ // 4. The call to yield() returns a boolean indicating whether or not 'exec' is
+ // still alove. If it is false, then 'exec' was killed during the yield and is
+ // no longer valid.
+ //
+ // It is not possible to handle WriteConflictExceptions in this mode without restarting
+ // the query.
+ YIELD_MANUAL,
+ };
- /**
- * Used when there is no canonical query and no query solution.
- *
- * Right now this is only for idhack updates which neither canonicalize
- * nor go through normal planning.
- */
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
-
- /**
- * Used when we have a NULL collection and no canonical query. In this case,
- * we need to explicitly pass a namespace to the plan executor.
- */
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- const std::string& ns,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
-
- /**
- * Used when there is a canonical query but no query solution (e.g. idhack
- * queries, queries against a NULL collection, queries using the subplan stage).
- */
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- CanonicalQuery* cq,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
-
- /**
- * The constructor for the normal case, when you have both a canonical query
- * and a query solution.
- */
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
-
- ~PlanExecutor();
+ //
+ // Factory methods.
+ //
+ // On success, return a new PlanExecutor, owned by the caller, through 'out'.
+ //
+ // Passing YIELD_AUTO to any of these factories will construct a yielding executor which
+ // may yield in the following circumstances:
+ // 1) During plan selection inside the call to make().
+ // 2) On any call to getNext().
+ // 3) While executing the plan inside executePlan().
+ //
+ // The executor will also be automatically registered to receive notifications in the
+ // case of YIELD_AUTO, so no further calls to registerExec() or setYieldPolicy() are
+ // necessary.
+ //
- //
- // Accessors
- //
+ /**
+ * Used when there is no canonical query and no query solution.
+ *
+ * Right now this is only for idhack updates which neither canonicalize
+ * nor go through normal planning.
+ */
+ static Status make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ const Collection* collection,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out);
- /**
- * Get the working set used by this executor, without transferring ownership.
- */
- WorkingSet* getWorkingSet() const;
-
- /**
- * Get the stage tree wrapped by this executor, without transferring ownership.
- */
- PlanStage* getRootStage() const;
-
- /**
- * Get the query that this executor is executing, without transferring ownership.
- */
- CanonicalQuery* getCanonicalQuery() const;
-
- /**
- * The collection in which this executor is working.
- */
- const Collection* collection() const;
-
- /**
- * Return the NS that the query is running over.
- */
- const std::string& ns();
-
- /**
- * Return the OperationContext that the plan is currently executing within.
- */
- OperationContext* getOpCtx() const;
-
- /**
- * Generates a tree of stats objects with a separate lifetime from the execution
- * stage tree wrapped by this PlanExecutor. The caller owns the returned pointer.
- *
- * This is OK even if we were killed.
- */
- PlanStageStats* getStats() const;
+ /**
+ * Used when we have a NULL collection and no canonical query. In this case,
+ * we need to explicitly pass a namespace to the plan executor.
+ */
+ static Status make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ const std::string& ns,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out);
- //
- // Methods that just pass down to the PlanStage tree.
- //
+ /**
+ * Used when there is a canonical query but no query solution (e.g. idhack
+ * queries, queries against a NULL collection, queries using the subplan stage).
+ */
+ static Status make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out);
- /**
- * Save any state required to either
- * 1. hibernate waiting for a getMore, or
- * 2. yield the lock (on applicable storage engines) to allow writes to proceed.
- */
- void saveState();
-
- /**
- * Restores the state saved by a saveState() call.
- *
- * Returns true if the state was successfully restored and the execution tree can be
- * work()'d.
- *
- * If allowed, will yield and retry if a WriteConflictException is encountered.
- *
- * Returns false otherwise. The execution tree cannot be worked and should be deleted.
- */
- bool restoreState(OperationContext* opCtx);
-
- /**
- * Same as restoreState but without the logic to retry if a WriteConflictException is
- * thrown.
- *
- * This is only public for PlanYieldPolicy. DO NOT CALL ANYWHERE ELSE.
- */
- bool restoreStateWithoutRetrying(OperationContext* opCtx);
+ /**
+ * The constructor for the normal case, when you have both a canonical query
+ * and a query solution.
+ */
+ static Status make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ QuerySolution* qs,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out);
- //
- // Running Support
- //
+ ~PlanExecutor();
- /**
- * Return the next result from the underlying execution tree.
- *
- * For read operations, objOut or dlOut are populated with another query result.
- *
- * For write operations, the return depends on the particulars of the write stage.
- *
- * If a YIELD_AUTO policy is set, then this method may yield.
- */
- ExecState getNextSnapshotted(Snapshotted<BSONObj>* objOut, RecordId* dlOut);
- ExecState getNext(BSONObj* objOut, RecordId* dlOut);
-
- /**
- * Returns 'true' if the plan is done producing results (or writing), 'false' otherwise.
- *
- * Tailable cursors are a possible exception to this: they may have further results even if
- * isEOF() returns true.
- */
- bool isEOF();
-
- /**
- * Execute the plan to completion, throwing out the results. Used when you want to work the
- * underlying tree without getting results back.
- *
- * If a YIELD_AUTO policy is set on this executor, then this will automatically yield.
- */
- Status executePlan();
+ //
+ // Accessors
+ //
- //
- // Concurrency-related methods.
- //
+ /**
+ * Get the working set used by this executor, without transferring ownership.
+ */
+ WorkingSet* getWorkingSet() const;
+
+ /**
+ * Get the stage tree wrapped by this executor, without transferring ownership.
+ */
+ PlanStage* getRootStage() const;
- /**
- * Register this plan executor with the collection cursor manager so that it
- * receives notifications for events that happen while yielding any locks.
- *
- * Deregistration happens automatically when this plan executor is destroyed.
- */
- void registerExec();
-
- /**
- * Unregister this PlanExecutor. Normally you want the PlanExecutor to be registered
- * for its lifetime, and you shouldn't have to call this explicitly.
- */
- void deregisterExec();
-
- /**
- * If we're yielding locks, the database we're operating over or any collection we're
- * relying on may be dropped. When this happens all cursors and plan executors on that
- * database and collection are killed or deleted in some fashion. Callers must specify
- * the 'reason' for why this executor is being killed.
- */
- void kill(std::string reason);
-
- /**
- * If we're yielding locks, writes may occur to documents that we rely on to keep valid
- * state. As such, if the plan yields, it must be notified of relevant writes so that
- * we can ensure that it doesn't crash if we try to access invalid state.
- */
- void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
-
- /**
- * Helper method to aid in displaying an ExecState for debug or other recreational purposes.
- */
- static std::string statestr(ExecState s);
-
- /**
- * Change the yield policy of the PlanExecutor to 'policy'. If 'registerExecutor' is true,
- * and the yield policy is YIELD_AUTO, then the plan executor gets registered to receive
- * notifications of events from other threads.
- *
- * Everybody who sets the policy to YIELD_AUTO really wants to call registerExec()
- * immediately after EXCEPT commands that create cursors...so we expose the ability to
- * register (or not) here, rather than require all users to have yet another RAII object.
- * Only cursor-creating things like find.cpp set registerExecutor to false.
- */
- void setYieldPolicy(YieldPolicy policy, bool registerExecutor = true);
-
- /**
- * Stash the BSONObj so that it gets returned from the PlanExecutor on a later call to
- * getNext().
- *
- * Enqueued documents are returned in FIFO order. The queued results are exhausted before
- * generating further results from the underlying query plan.
- *
- * Subsequent calls to getNext() must request the BSONObj and *not* the RecordId.
- *
- * If used in combination with getNextSnapshotted(), then the SnapshotId associated with
- * 'obj' will be null when 'obj' is dequeued.
- */
- void enqueue(const BSONObj& obj);
-
- private:
- /**
- * RAII approach to ensuring that plan executors are deregistered.
- *
- * While retrieving the first batch of results, runQuery manually registers the executor
- * with ClientCursor. Certain query execution paths, namely $where, can throw an exception.
- * If we fail to deregister the executor, we will call invalidate/kill on the
- * still-registered-yet-deleted executor.
- *
- * For any subsequent calls to getMore, the executor is already registered with ClientCursor
- * by virtue of being cached, so this exception-proofing is not required.
- */
- struct ScopedExecutorRegistration {
- ScopedExecutorRegistration(PlanExecutor* exec);
- ~ScopedExecutorRegistration();
-
- PlanExecutor* const _exec;
- };
-
- /**
- * New PlanExecutor instances are created with the static make() methods above.
- */
- PlanExecutor(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- const std::string& ns);
-
- /**
- * Public factory methods delegate to this private factory to do their work.
- */
- static Status make(OperationContext* opCtx,
- WorkingSet* ws,
- PlanStage* rt,
- QuerySolution* qs,
- CanonicalQuery* cq,
- const Collection* collection,
- const std::string& ns,
- YieldPolicy yieldPolicy,
- PlanExecutor** out);
-
- /**
- * Clients of PlanExecutor expect that on receiving a new instance from one of the make()
- * factory methods, plan selection has already been completed. In order to enforce this
- * property, this function is called to do plan selection prior to returning the new
- * PlanExecutor.
- *
- * If the tree contains plan selection stages, such as MultiPlanStage or SubplanStage,
- * this calls into their underlying plan selection facilities. Otherwise, does nothing.
- *
- * If a YIELD_AUTO policy is set then locks are yielded during plan selection.
- */
- Status pickBestPlan(YieldPolicy policy);
-
- bool killed() { return static_cast<bool>(_killReason); };
-
- // The OperationContext that we're executing within. We need this in order to release
- // locks.
- OperationContext* _opCtx;
-
- // Collection over which this plan executor runs. Used to resolve record ids retrieved by
- // the plan stages. The collection must not be destroyed while there are active plans.
- const Collection* _collection;
-
- std::unique_ptr<CanonicalQuery> _cq;
- std::unique_ptr<WorkingSet> _workingSet;
- std::unique_ptr<QuerySolution> _qs;
- std::unique_ptr<PlanStage> _root;
-
- // Deregisters this executor when it is destroyed.
- std::unique_ptr<ScopedExecutorRegistration> _safety;
-
- // What namespace are we operating over?
- std::string _ns;
-
- // If _killReason has a value, then we have been killed and the value represents the reason
- // for the kill.
- boost::optional<std::string> _killReason;
-
- // This is used to handle automatic yielding when allowed by the YieldPolicy. Never NULL.
- // TODO make this a non-pointer member. This requires some header shuffling so that this
- // file includes plan_yield_policy.h rather than the other way around.
- const std::unique_ptr<PlanYieldPolicy> _yieldPolicy;
-
- // A stash of results generated by this plan that the user of the PlanExecutor didn't want
- // to consume yet. We empty the queue before retrieving further results from the plan
- // stages.
- std::queue<BSONObj> _stash;
+ /**
+ * Get the query that this executor is executing, without transferring ownership.
+ */
+ CanonicalQuery* getCanonicalQuery() const;
+
+ /**
+ * The collection in which this executor is working.
+ */
+ const Collection* collection() const;
+
+ /**
+ * Return the NS that the query is running over.
+ */
+ const std::string& ns();
+
+ /**
+ * Return the OperationContext that the plan is currently executing within.
+ */
+ OperationContext* getOpCtx() const;
+
+ /**
+ * Generates a tree of stats objects with a separate lifetime from the execution
+ * stage tree wrapped by this PlanExecutor. The caller owns the returned pointer.
+ *
+ * This is OK even if we were killed.
+ */
+ PlanStageStats* getStats() const;
+
+ //
+ // Methods that just pass down to the PlanStage tree.
+ //
+
+ /**
+ * Save any state required to either
+ * 1. hibernate waiting for a getMore, or
+ * 2. yield the lock (on applicable storage engines) to allow writes to proceed.
+ */
+ void saveState();
+
+ /**
+ * Restores the state saved by a saveState() call.
+ *
+ * Returns true if the state was successfully restored and the execution tree can be
+ * work()'d.
+ *
+ * If allowed, will yield and retry if a WriteConflictException is encountered.
+ *
+ * Returns false otherwise. The execution tree cannot be worked and should be deleted.
+ */
+ bool restoreState(OperationContext* opCtx);
+
+ /**
+ * Same as restoreState but without the logic to retry if a WriteConflictException is
+ * thrown.
+ *
+ * This is only public for PlanYieldPolicy. DO NOT CALL ANYWHERE ELSE.
+ */
+ bool restoreStateWithoutRetrying(OperationContext* opCtx);
+
+ //
+ // Running Support
+ //
+
+ /**
+ * Return the next result from the underlying execution tree.
+ *
+ * For read operations, objOut or dlOut are populated with another query result.
+ *
+ * For write operations, the return depends on the particulars of the write stage.
+ *
+ * If a YIELD_AUTO policy is set, then this method may yield.
+ */
+ ExecState getNextSnapshotted(Snapshotted<BSONObj>* objOut, RecordId* dlOut);
+ ExecState getNext(BSONObj* objOut, RecordId* dlOut);
+
+ /**
+ * Returns 'true' if the plan is done producing results (or writing), 'false' otherwise.
+ *
+ * Tailable cursors are a possible exception to this: they may have further results even if
+ * isEOF() returns true.
+ */
+ bool isEOF();
+
+ /**
+ * Execute the plan to completion, throwing out the results. Used when you want to work the
+ * underlying tree without getting results back.
+ *
+ * If a YIELD_AUTO policy is set on this executor, then this will automatically yield.
+ */
+ Status executePlan();
+
+ //
+ // Concurrency-related methods.
+ //
+
+ /**
+ * Register this plan executor with the collection cursor manager so that it
+ * receives notifications for events that happen while yielding any locks.
+ *
+ * Deregistration happens automatically when this plan executor is destroyed.
+ */
+ void registerExec();
+
+ /**
+ * Unregister this PlanExecutor. Normally you want the PlanExecutor to be registered
+ * for its lifetime, and you shouldn't have to call this explicitly.
+ */
+ void deregisterExec();
+
+ /**
+ * If we're yielding locks, the database we're operating over or any collection we're
+ * relying on may be dropped. When this happens all cursors and plan executors on that
+ * database and collection are killed or deleted in some fashion. Callers must specify
+ * the 'reason' for why this executor is being killed.
+ */
+ void kill(std::string reason);
+
+ /**
+ * If we're yielding locks, writes may occur to documents that we rely on to keep valid
+ * state. As such, if the plan yields, it must be notified of relevant writes so that
+ * we can ensure that it doesn't crash if we try to access invalid state.
+ */
+ void invalidate(OperationContext* txn, const RecordId& dl, InvalidationType type);
+
+ /**
+ * Helper method to aid in displaying an ExecState for debug or other recreational purposes.
+ */
+ static std::string statestr(ExecState s);
+
+ /**
+ * Change the yield policy of the PlanExecutor to 'policy'. If 'registerExecutor' is true,
+ * and the yield policy is YIELD_AUTO, then the plan executor gets registered to receive
+ * notifications of events from other threads.
+ *
+ * Everybody who sets the policy to YIELD_AUTO really wants to call registerExec()
+ * immediately after EXCEPT commands that create cursors...so we expose the ability to
+ * register (or not) here, rather than require all users to have yet another RAII object.
+ * Only cursor-creating things like find.cpp set registerExecutor to false.
+ */
+ void setYieldPolicy(YieldPolicy policy, bool registerExecutor = true);
+
+ /**
+ * Stash the BSONObj so that it gets returned from the PlanExecutor on a later call to
+ * getNext().
+ *
+ * Enqueued documents are returned in FIFO order. The queued results are exhausted before
+ * generating further results from the underlying query plan.
+ *
+ * Subsequent calls to getNext() must request the BSONObj and *not* the RecordId.
+ *
+ * If used in combination with getNextSnapshotted(), then the SnapshotId associated with
+ * 'obj' will be null when 'obj' is dequeued.
+ */
+ void enqueue(const BSONObj& obj);
+
+private:
+ /**
+ * RAII approach to ensuring that plan executors are deregistered.
+ *
+ * While retrieving the first batch of results, runQuery manually registers the executor
+ * with ClientCursor. Certain query execution paths, namely $where, can throw an exception.
+ * If we fail to deregister the executor, we will call invalidate/kill on the
+ * still-registered-yet-deleted executor.
+ *
+ * For any subsequent calls to getMore, the executor is already registered with ClientCursor
+ * by virtue of being cached, so this exception-proofing is not required.
+ */
+ struct ScopedExecutorRegistration {
+ ScopedExecutorRegistration(PlanExecutor* exec);
+ ~ScopedExecutorRegistration();
+
+ PlanExecutor* const _exec;
+ };
+
+ /**
+ * New PlanExecutor instances are created with the static make() methods above.
+ */
+ PlanExecutor(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ QuerySolution* qs,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ const std::string& ns);
+
+ /**
+ * Public factory methods delegate to this private factory to do their work.
+ */
+ static Status make(OperationContext* opCtx,
+ WorkingSet* ws,
+ PlanStage* rt,
+ QuerySolution* qs,
+ CanonicalQuery* cq,
+ const Collection* collection,
+ const std::string& ns,
+ YieldPolicy yieldPolicy,
+ PlanExecutor** out);
+
+ /**
+ * Clients of PlanExecutor expect that on receiving a new instance from one of the make()
+ * factory methods, plan selection has already been completed. In order to enforce this
+ * property, this function is called to do plan selection prior to returning the new
+ * PlanExecutor.
+ *
+ * If the tree contains plan selection stages, such as MultiPlanStage or SubplanStage,
+ * this calls into their underlying plan selection facilities. Otherwise, does nothing.
+ *
+ * If a YIELD_AUTO policy is set then locks are yielded during plan selection.
+ */
+ Status pickBestPlan(YieldPolicy policy);
+
+ bool killed() {
+ return static_cast<bool>(_killReason);
};
+ // The OperationContext that we're executing within. We need this in order to release
+ // locks.
+ OperationContext* _opCtx;
+
+ // Collection over which this plan executor runs. Used to resolve record ids retrieved by
+ // the plan stages. The collection must not be destroyed while there are active plans.
+ const Collection* _collection;
+
+ std::unique_ptr<CanonicalQuery> _cq;
+ std::unique_ptr<WorkingSet> _workingSet;
+ std::unique_ptr<QuerySolution> _qs;
+ std::unique_ptr<PlanStage> _root;
+
+ // Deregisters this executor when it is destroyed.
+ std::unique_ptr<ScopedExecutorRegistration> _safety;
+
+ // What namespace are we operating over?
+ std::string _ns;
+
+ // If _killReason has a value, then we have been killed and the value represents the reason
+ // for the kill.
+ boost::optional<std::string> _killReason;
+
+ // This is used to handle automatic yielding when allowed by the YieldPolicy. Never NULL.
+ // TODO make this a non-pointer member. This requires some header shuffling so that this
+ // file includes plan_yield_policy.h rather than the other way around.
+ const std::unique_ptr<PlanYieldPolicy> _yieldPolicy;
+
+ // A stash of results generated by this plan that the user of the PlanExecutor didn't want
+ // to consume yet. We empty the queue before retrieving further results from the plan
+ // stages.
+ std::queue<BSONObj> _stash;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/query/plan_ranker.cpp b/src/mongo/db/query/plan_ranker.cpp
index 5a2672bd526..cd2fbde6b03 100644
--- a/src/mongo/db/query/plan_ranker.cpp
+++ b/src/mongo/db/query/plan_ranker.cpp
@@ -48,220 +48,208 @@
namespace {
- /**
- * Comparator for (scores, candidateIndex) in pickBestPlan().
- */
- bool scoreComparator(const std::pair<double, size_t>& lhs,
- const std::pair<double, size_t>& rhs) {
- // Just compare score in lhs.first and rhs.first;
- // Ignore candidate array index in lhs.second and rhs.second.
- return lhs.first > rhs.first;
- }
+/**
+ * Comparator for (scores, candidateIndex) in pickBestPlan().
+ */
+bool scoreComparator(const std::pair<double, size_t>& lhs, const std::pair<double, size_t>& rhs) {
+ // Just compare score in lhs.first and rhs.first;
+ // Ignore candidate array index in lhs.second and rhs.second.
+ return lhs.first > rhs.first;
+}
-} // namespace
+} // namespace
namespace mongo {
- using std::endl;
- using std::vector;
-
- // static
- size_t PlanRanker::pickBestPlan(const vector<CandidatePlan>& candidates,
- PlanRankingDecision* why) {
- invariant(!candidates.empty());
- invariant(why);
-
- // A plan that hits EOF is automatically scored above
- // its peers. If multiple plans hit EOF during the same
- // set of round-robin calls to work(), then all such plans
- // receive the bonus.
- double eofBonus = 1.0;
-
- // Each plan will have a stat tree.
- vector<PlanStageStats*> statTrees;
-
- // Get stat trees from each plan.
- // Copy stats trees instead of transferring ownership
- // because multi plan runner will need its own stats
- // trees for explain.
- for (size_t i = 0; i < candidates.size(); ++i) {
- statTrees.push_back(candidates[i].root->getStats());
- }
+using std::endl;
+using std::vector;
+
+// static
+size_t PlanRanker::pickBestPlan(const vector<CandidatePlan>& candidates, PlanRankingDecision* why) {
+ invariant(!candidates.empty());
+ invariant(why);
+
+ // A plan that hits EOF is automatically scored above
+ // its peers. If multiple plans hit EOF during the same
+ // set of round-robin calls to work(), then all such plans
+ // receive the bonus.
+ double eofBonus = 1.0;
+
+ // Each plan will have a stat tree.
+ vector<PlanStageStats*> statTrees;
+
+ // Get stat trees from each plan.
+ // Copy stats trees instead of transferring ownership
+ // because multi plan runner will need its own stats
+ // trees for explain.
+ for (size_t i = 0; i < candidates.size(); ++i) {
+ statTrees.push_back(candidates[i].root->getStats());
+ }
- // Holds (score, candidateInndex).
- // Used to derive scores and candidate ordering.
- vector<std::pair<double, size_t> > scoresAndCandidateindices;
-
- // Compute score for each tree. Record the best.
- for (size_t i = 0; i < statTrees.size(); ++i) {
- LOG(5) << "Scoring plan " << i << ":" << endl
- << candidates[i].solution->toString() << "Stats:\n"
- << Explain::statsToBSON(*statTrees[i]).jsonString(Strict, true);
- LOG(2) << "Scoring query plan: "
- << Explain::getPlanSummary(candidates[i].root)
- << " planHitEOF=" << statTrees[i]->common.isEOF;
-
- double score = scoreTree(statTrees[i]);
- LOG(5) << "score = " << score << endl;
- if (statTrees[i]->common.isEOF) {
- LOG(5) << "Adding +" << eofBonus << " EOF bonus to score." << endl;
- score += 1;
- }
- scoresAndCandidateindices.push_back(std::make_pair(score, i));
+ // Holds (score, candidateInndex).
+ // Used to derive scores and candidate ordering.
+ vector<std::pair<double, size_t>> scoresAndCandidateindices;
+
+ // Compute score for each tree. Record the best.
+ for (size_t i = 0; i < statTrees.size(); ++i) {
+ LOG(5) << "Scoring plan " << i << ":" << endl
+ << candidates[i].solution->toString() << "Stats:\n"
+ << Explain::statsToBSON(*statTrees[i]).jsonString(Strict, true);
+ LOG(2) << "Scoring query plan: " << Explain::getPlanSummary(candidates[i].root)
+ << " planHitEOF=" << statTrees[i]->common.isEOF;
+
+ double score = scoreTree(statTrees[i]);
+ LOG(5) << "score = " << score << endl;
+ if (statTrees[i]->common.isEOF) {
+ LOG(5) << "Adding +" << eofBonus << " EOF bonus to score." << endl;
+ score += 1;
}
+ scoresAndCandidateindices.push_back(std::make_pair(score, i));
+ }
- // Sort (scores, candidateIndex). Get best child and populate candidate ordering.
- std::stable_sort(scoresAndCandidateindices.begin(), scoresAndCandidateindices.end(),
- scoreComparator);
-
- // Update results in 'why'
- // Stats and scores in 'why' are sorted in descending order by score.
- why->stats.clear();
- why->scores.clear();
- why->candidateOrder.clear();
- for (size_t i = 0; i < scoresAndCandidateindices.size(); ++i) {
- double score = scoresAndCandidateindices[i].first;
- size_t candidateIndex = scoresAndCandidateindices[i].second;
-
- // We shouldn't cache the scores with the EOF bonus included,
- // as this is just a tie-breaking measure for plan selection.
- // Plans not run through the multi plan runner will not receive
- // the bonus.
- //
- // An example of a bad thing that could happen if we stored scores
- // with the EOF bonus included:
- //
- // Let's say Plan A hits EOF, is the highest ranking plan, and gets
- // cached as such. On subsequent runs it will not receive the bonus.
- // Eventually the plan cache feedback mechanism will evict the cache
- // entry---the scores will appear to have fallen due to the missing
- // EOF bonus.
- //
- // This begs the question, why don't we include the EOF bonus in
- // scoring of cached plans as well? The problem here is that the cached
- // plan runner always runs plans to completion before scoring. Queries
- // that don't get the bonus in the multi plan runner might get the bonus
- // after being run from the plan cache.
- if (statTrees[candidateIndex]->common.isEOF) {
- score -= eofBonus;
- }
-
- why->stats.mutableVector().push_back(statTrees[candidateIndex]);
- why->scores.push_back(score);
- why->candidateOrder.push_back(candidateIndex);
+ // Sort (scores, candidateIndex). Get best child and populate candidate ordering.
+ std::stable_sort(
+ scoresAndCandidateindices.begin(), scoresAndCandidateindices.end(), scoreComparator);
+
+ // Update results in 'why'
+ // Stats and scores in 'why' are sorted in descending order by score.
+ why->stats.clear();
+ why->scores.clear();
+ why->candidateOrder.clear();
+ for (size_t i = 0; i < scoresAndCandidateindices.size(); ++i) {
+ double score = scoresAndCandidateindices[i].first;
+ size_t candidateIndex = scoresAndCandidateindices[i].second;
+
+ // We shouldn't cache the scores with the EOF bonus included,
+ // as this is just a tie-breaking measure for plan selection.
+ // Plans not run through the multi plan runner will not receive
+ // the bonus.
+ //
+ // An example of a bad thing that could happen if we stored scores
+ // with the EOF bonus included:
+ //
+ // Let's say Plan A hits EOF, is the highest ranking plan, and gets
+ // cached as such. On subsequent runs it will not receive the bonus.
+ // Eventually the plan cache feedback mechanism will evict the cache
+ // entry---the scores will appear to have fallen due to the missing
+ // EOF bonus.
+ //
+ // This begs the question, why don't we include the EOF bonus in
+ // scoring of cached plans as well? The problem here is that the cached
+ // plan runner always runs plans to completion before scoring. Queries
+ // that don't get the bonus in the multi plan runner might get the bonus
+ // after being run from the plan cache.
+ if (statTrees[candidateIndex]->common.isEOF) {
+ score -= eofBonus;
}
- size_t bestChild = scoresAndCandidateindices[0].second;
- return bestChild;
+ why->stats.mutableVector().push_back(statTrees[candidateIndex]);
+ why->scores.push_back(score);
+ why->candidateOrder.push_back(candidateIndex);
}
- // TODO: Move this out. This is a signal for ranking but will become its own complicated
- // stats-collecting beast.
- double computeSelectivity(const PlanStageStats* stats) {
- if (STAGE_IXSCAN == stats->stageType) {
- IndexScanStats* iss = static_cast<IndexScanStats*>(stats->specific.get());
- return iss->keyPattern.nFields();
- }
- else {
- double sum = 0;
- for (size_t i = 0; i < stats->children.size(); ++i) {
- sum += computeSelectivity(stats->children[i]);
- }
- return sum;
+ size_t bestChild = scoresAndCandidateindices[0].second;
+ return bestChild;
+}
+
+// TODO: Move this out. This is a signal for ranking but will become its own complicated
+// stats-collecting beast.
+double computeSelectivity(const PlanStageStats* stats) {
+ if (STAGE_IXSCAN == stats->stageType) {
+ IndexScanStats* iss = static_cast<IndexScanStats*>(stats->specific.get());
+ return iss->keyPattern.nFields();
+ } else {
+ double sum = 0;
+ for (size_t i = 0; i < stats->children.size(); ++i) {
+ sum += computeSelectivity(stats->children[i]);
}
+ return sum;
}
+}
- bool hasStage(const StageType type, const PlanStageStats* stats) {
- if (type == stats->stageType) {
+bool hasStage(const StageType type, const PlanStageStats* stats) {
+ if (type == stats->stageType) {
+ return true;
+ }
+ for (size_t i = 0; i < stats->children.size(); ++i) {
+ if (hasStage(type, stats->children[i])) {
return true;
}
- for (size_t i = 0; i < stats->children.size(); ++i) {
- if (hasStage(type, stats->children[i])) {
- return true;
- }
- }
- return false;
+ }
+ return false;
+}
+
+// static
+double PlanRanker::scoreTree(const PlanStageStats* stats) {
+ // We start all scores at 1. Our "no plan selected" score is 0 and we want all plans to
+ // be greater than that.
+ double baseScore = 1;
+
+ // How many "units of work" did the plan perform. Each call to work(...)
+ // counts as one unit.
+ size_t workUnits = stats->common.works;
+
+ // How much did a plan produce?
+ // Range: [0, 1]
+ double productivity =
+ static_cast<double>(stats->common.advanced) / static_cast<double>(workUnits);
+
+ // Just enough to break a tie. Must be small enough to ensure that a more productive
+ // plan doesn't lose to a less productive plan due to tie breaking.
+ static const double epsilon = std::min(1.0 / static_cast<double>(10 * workUnits), 1e-4);
+
+ // We prefer covered projections.
+ //
+ // We only do this when we have a projection stage because we have so many jstests that
+ // check bounds even when a collscan plan is just as good as the ixscan'd plan :(
+ double noFetchBonus = epsilon;
+ if (hasStage(STAGE_PROJECTION, stats) && hasStage(STAGE_FETCH, stats)) {
+ noFetchBonus = 0;
}
- // static
- double PlanRanker::scoreTree(const PlanStageStats* stats) {
- // We start all scores at 1. Our "no plan selected" score is 0 and we want all plans to
- // be greater than that.
- double baseScore = 1;
+ // In the case of ties, prefer solutions without a blocking sort
+ // to solutions with a blocking sort.
+ double noSortBonus = epsilon;
+ if (hasStage(STAGE_SORT, stats)) {
+ noSortBonus = 0;
+ }
- // How many "units of work" did the plan perform. Each call to work(...)
- // counts as one unit.
- size_t workUnits = stats->common.works;
+ // In the case of ties, prefer single index solutions to ixisect. Index
+ // intersection solutions are often slower than single-index solutions
+ // because they require examining a superset of index keys that would be
+ // examined by a single index scan.
+ //
+ // On the other hand, index intersection solutions examine the same
+ // number or fewer of documents. In the case that index intersection
+ // allows us to examine fewer documents, the penalty given to ixisect
+ // can be made up via the no fetch bonus.
+ double noIxisectBonus = epsilon;
+ if (hasStage(STAGE_AND_HASH, stats) || hasStage(STAGE_AND_SORTED, stats)) {
+ noIxisectBonus = 0;
+ }
- // How much did a plan produce?
- // Range: [0, 1]
- double productivity = static_cast<double>(stats->common.advanced)
- / static_cast<double>(workUnits);
+ double tieBreakers = noFetchBonus + noSortBonus + noIxisectBonus;
+ double score = baseScore + productivity + tieBreakers;
- // Just enough to break a tie. Must be small enough to ensure that a more productive
- // plan doesn't lose to a less productive plan due to tie breaking.
- static const double epsilon = std::min(1.0 / static_cast<double>(10 * workUnits), 1e-4);
+ mongoutils::str::stream ss;
+ ss << "score(" << score << ") = baseScore(" << baseScore << ")"
+ << " + productivity((" << stats->common.advanced << " advanced)/(" << stats->common.works
+ << " works) = " << productivity << ")"
+ << " + tieBreakers(" << noFetchBonus << " noFetchBonus + " << noSortBonus
+ << " noSortBonus + " << noIxisectBonus << " noIxisectBonus = " << tieBreakers << ")";
+ std::string scoreStr = ss;
+ LOG(2) << scoreStr;
- // We prefer covered projections.
- //
- // We only do this when we have a projection stage because we have so many jstests that
- // check bounds even when a collscan plan is just as good as the ixscan'd plan :(
- double noFetchBonus = epsilon;
- if (hasStage(STAGE_PROJECTION, stats) && hasStage(STAGE_FETCH, stats)) {
- noFetchBonus = 0;
- }
-
- // In the case of ties, prefer solutions without a blocking sort
- // to solutions with a blocking sort.
- double noSortBonus = epsilon;
- if (hasStage(STAGE_SORT, stats)) {
- noSortBonus = 0;
- }
-
- // In the case of ties, prefer single index solutions to ixisect. Index
- // intersection solutions are often slower than single-index solutions
- // because they require examining a superset of index keys that would be
- // examined by a single index scan.
- //
- // On the other hand, index intersection solutions examine the same
- // number or fewer of documents. In the case that index intersection
- // allows us to examine fewer documents, the penalty given to ixisect
- // can be made up via the no fetch bonus.
- double noIxisectBonus = epsilon;
+ if (internalQueryForceIntersectionPlans) {
if (hasStage(STAGE_AND_HASH, stats) || hasStage(STAGE_AND_SORTED, stats)) {
- noIxisectBonus = 0;
- }
-
- double tieBreakers = noFetchBonus + noSortBonus + noIxisectBonus;
- double score = baseScore + productivity + tieBreakers;
-
- mongoutils::str::stream ss;
- ss << "score(" << score << ") = baseScore(" << baseScore << ")"
- << " + productivity((" << stats->common.advanced
- << " advanced)/("
- << stats->common.works
- << " works) = "
- << productivity << ")"
- << " + tieBreakers(" << noFetchBonus
- << " noFetchBonus + "
- << noSortBonus
- << " noSortBonus + "
- << noIxisectBonus
- << " noIxisectBonus = "
- << tieBreakers << ")";
- std::string scoreStr = ss;
- LOG(2) << scoreStr;
-
- if (internalQueryForceIntersectionPlans) {
- if (hasStage(STAGE_AND_HASH, stats) || hasStage(STAGE_AND_SORTED, stats)) {
- // The boost should be >2.001 to make absolutely sure the ixisect plan will win due
- // to the combination of 1) productivity, 2) eof bonus, and 3) no ixisect bonus.
- score += 3;
- LOG(5) << "Score boosted to " << score << " due to intersection forcing." << endl;
- }
+ // The boost should be >2.001 to make absolutely sure the ixisect plan will win due
+ // to the combination of 1) productivity, 2) eof bonus, and 3) no ixisect bonus.
+ score += 3;
+ LOG(5) << "Score boosted to " << score << " due to intersection forcing." << endl;
}
-
- return score;
}
+ return score;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/plan_ranker.h b/src/mongo/db/query/plan_ranker.h
index ed41c0c1c5a..653fb332f12 100644
--- a/src/mongo/db/query/plan_ranker.h
+++ b/src/mongo/db/query/plan_ranker.h
@@ -39,86 +39,85 @@
namespace mongo {
- struct CandidatePlan;
- struct PlanRankingDecision;
+struct CandidatePlan;
+struct PlanRankingDecision;
+/**
+ * Ranks 2 or more plans.
+ */
+class PlanRanker {
+public:
/**
- * Ranks 2 or more plans.
+ * Returns index in 'candidates' of which plan is best.
+ * Populates 'why' with information relevant to how each plan fared in the ranking process.
+ * Caller owns pointers in 'why'.
+ * 'candidateOrder' holds indices into candidates ordered by score (winner in first element).
*/
- class PlanRanker {
- public:
- /**
- * Returns index in 'candidates' of which plan is best.
- * Populates 'why' with information relevant to how each plan fared in the ranking process.
- * Caller owns pointers in 'why'.
- * 'candidateOrder' holds indices into candidates ordered by score (winner in first element).
- */
- static size_t pickBestPlan(const std::vector<CandidatePlan>& candidates,
- PlanRankingDecision* why);
-
- /**
- * Assign the stats tree a 'goodness' score. The higher the score, the better
- * the plan. The exact value isn't meaningful except for imposing a ranking.
- */
- static double scoreTree(const PlanStageStats* stats);
- };
+ static size_t pickBestPlan(const std::vector<CandidatePlan>& candidates,
+ PlanRankingDecision* why);
/**
- * A container holding one to-be-ranked plan and its associated/relevant data.
- * Does not own any of its pointers.
+ * Assign the stats tree a 'goodness' score. The higher the score, the better
+ * the plan. The exact value isn't meaningful except for imposing a ranking.
*/
- struct CandidatePlan {
- CandidatePlan(QuerySolution* s, PlanStage* r, WorkingSet* w)
- : solution(s), root(r), ws(w), failed(false) { }
+ static double scoreTree(const PlanStageStats* stats);
+};
+
+/**
+ * A container holding one to-be-ranked plan and its associated/relevant data.
+ * Does not own any of its pointers.
+ */
+struct CandidatePlan {
+ CandidatePlan(QuerySolution* s, PlanStage* r, WorkingSet* w)
+ : solution(s), root(r), ws(w), failed(false) {}
- QuerySolution* solution;
- PlanStage* root;
- WorkingSet* ws;
+ QuerySolution* solution;
+ PlanStage* root;
+ WorkingSet* ws;
- // Any results produced during the plan's execution prior to ranking are retained here.
- std::list<WorkingSetID> results;
+ // Any results produced during the plan's execution prior to ranking are retained here.
+ std::list<WorkingSetID> results;
- bool failed;
- };
+ bool failed;
+};
+
+/**
+ * Information about why a plan was picked to be the best. Data here is placed into the cache
+ * and used to compare expected performance with actual.
+ */
+struct PlanRankingDecision {
+ PlanRankingDecision() {}
/**
- * Information about why a plan was picked to be the best. Data here is placed into the cache
- * and used to compare expected performance with actual.
+ * Make a deep copy.
*/
- struct PlanRankingDecision {
-
- PlanRankingDecision() { }
-
- /**
- * Make a deep copy.
- */
- PlanRankingDecision* clone() const {
- PlanRankingDecision* decision = new PlanRankingDecision();
- for (size_t i = 0; i < stats.size(); ++i) {
- PlanStageStats* s = stats.vector()[i];
- invariant(s);
- decision->stats.mutableVector().push_back(s->clone());
- }
- decision->scores = scores;
- decision->candidateOrder = candidateOrder;
- return decision;
+ PlanRankingDecision* clone() const {
+ PlanRankingDecision* decision = new PlanRankingDecision();
+ for (size_t i = 0; i < stats.size(); ++i) {
+ PlanStageStats* s = stats.vector()[i];
+ invariant(s);
+ decision->stats.mutableVector().push_back(s->clone());
}
-
- // Stats of all plans sorted in descending order by score.
- // Owned by us.
- OwnedPointerVector<PlanStageStats> stats;
-
- // The "goodness" score corresponding to 'stats'.
- // Sorted in descending order.
- std::vector<double> scores;
-
- // Ordering of original plans in descending of score.
- // Filled in by PlanRanker::pickBestPlan(candidates, ...)
- // so that candidates[candidateOrder[0]] refers to the best plan
- // with corresponding cores[0] and stats[0]. Runner-up would be
- // candidates[candidateOrder[1]] followed by
- // candidates[candidateOrder[2]], ...
- std::vector<size_t> candidateOrder;
- };
+ decision->scores = scores;
+ decision->candidateOrder = candidateOrder;
+ return decision;
+ }
+
+ // Stats of all plans sorted in descending order by score.
+ // Owned by us.
+ OwnedPointerVector<PlanStageStats> stats;
+
+ // The "goodness" score corresponding to 'stats'.
+ // Sorted in descending order.
+ std::vector<double> scores;
+
+ // Ordering of original plans in descending of score.
+ // Filled in by PlanRanker::pickBestPlan(candidates, ...)
+ // so that candidates[candidateOrder[0]] refers to the best plan
+ // with corresponding cores[0] and stats[0]. Runner-up would be
+ // candidates[candidateOrder[1]] followed by
+ // candidates[candidateOrder[2]], ...
+ std::vector<size_t> candidateOrder;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/plan_yield_policy.cpp b/src/mongo/db/query/plan_yield_policy.cpp
index 67eb574ef5c..45d996893a9 100644
--- a/src/mongo/db/query/plan_yield_policy.cpp
+++ b/src/mongo/db/query/plan_yield_policy.cpp
@@ -38,75 +38,73 @@
namespace mongo {
- PlanYieldPolicy::PlanYieldPolicy(PlanExecutor* exec, PlanExecutor::YieldPolicy policy)
- : _policy(policy),
- _forceYield(false),
- _elapsedTracker(internalQueryExecYieldIterations, internalQueryExecYieldPeriodMS),
- _planYielding(exec) { }
-
- bool PlanYieldPolicy::shouldYield() {
- if (!allowedToYield()) return false;
- invariant(!_planYielding->getOpCtx()->lockState()->inAWriteUnitOfWork());
- if (_forceYield) return true;
- return _elapsedTracker.intervalHasElapsed();
- }
-
- void PlanYieldPolicy::resetTimer() {
- _elapsedTracker.resetLastTime();
- }
-
- bool PlanYieldPolicy::yield(RecordFetcher* fetcher) {
- invariant(_planYielding);
- invariant(allowedToYield());
-
- _forceYield = false;
+PlanYieldPolicy::PlanYieldPolicy(PlanExecutor* exec, PlanExecutor::YieldPolicy policy)
+ : _policy(policy),
+ _forceYield(false),
+ _elapsedTracker(internalQueryExecYieldIterations, internalQueryExecYieldPeriodMS),
+ _planYielding(exec) {}
+
+bool PlanYieldPolicy::shouldYield() {
+ if (!allowedToYield())
+ return false;
+ invariant(!_planYielding->getOpCtx()->lockState()->inAWriteUnitOfWork());
+ if (_forceYield)
+ return true;
+ return _elapsedTracker.intervalHasElapsed();
+}
+
+void PlanYieldPolicy::resetTimer() {
+ _elapsedTracker.resetLastTime();
+}
+
+bool PlanYieldPolicy::yield(RecordFetcher* fetcher) {
+ invariant(_planYielding);
+ invariant(allowedToYield());
+
+ _forceYield = false;
+
+ OperationContext* opCtx = _planYielding->getOpCtx();
+ invariant(opCtx);
+ invariant(!opCtx->lockState()->inAWriteUnitOfWork());
+
+ // Can't use MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN/END since we need to call saveState
+ // before reseting the transaction.
+ for (int attempt = 1; true; attempt++) {
+ try {
+ // All YIELD_AUTO plans will get here eventually when the elapsed tracker triggers
+ // that it's time to yield. Whether or not we will actually yield, we need to check
+ // if this operation has been interrupted. Throws if the interrupt flag is set.
+ if (_policy == PlanExecutor::YIELD_AUTO) {
+ opCtx->checkForInterrupt();
+ }
- OperationContext* opCtx = _planYielding->getOpCtx();
- invariant(opCtx);
- invariant(!opCtx->lockState()->inAWriteUnitOfWork());
+ // No need to yield if the collection is NULL.
+ if (NULL == _planYielding->collection()) {
+ return true;
+ }
- // Can't use MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN/END since we need to call saveState
- // before reseting the transaction.
- for (int attempt = 1; true; attempt++) {
try {
- // All YIELD_AUTO plans will get here eventually when the elapsed tracker triggers
- // that it's time to yield. Whether or not we will actually yield, we need to check
- // if this operation has been interrupted. Throws if the interrupt flag is set.
- if (_policy == PlanExecutor::YIELD_AUTO) {
- opCtx->checkForInterrupt();
- }
-
- // No need to yield if the collection is NULL.
- if (NULL == _planYielding->collection()) {
- return true;
- }
-
- try {
- _planYielding->saveState();
- }
- catch (const WriteConflictException& wce) {
- invariant(!"WriteConflictException not allowed in saveState");
- }
-
- if (_policy == PlanExecutor::WRITE_CONFLICT_RETRY_ONLY) {
- // Just reset the snapshot. Leave all LockManager locks alone.
- opCtx->recoveryUnit()->abandonSnapshot();
- }
- else {
- // Release and reacquire locks.
- QueryYield::yieldAllLocks(opCtx, fetcher);
- }
-
- return _planYielding->restoreStateWithoutRetrying(opCtx);
+ _planYielding->saveState();
+ } catch (const WriteConflictException& wce) {
+ invariant(!"WriteConflictException not allowed in saveState");
}
- catch (const WriteConflictException& wce) {
- CurOp::get(opCtx)->debug().writeConflicts++;
- WriteConflictException::logAndBackoff(attempt,
- "plan execution restoreState",
- _planYielding->collection()->ns().ns());
- // retry
+
+ if (_policy == PlanExecutor::WRITE_CONFLICT_RETRY_ONLY) {
+ // Just reset the snapshot. Leave all LockManager locks alone.
+ opCtx->recoveryUnit()->abandonSnapshot();
+ } else {
+ // Release and reacquire locks.
+ QueryYield::yieldAllLocks(opCtx, fetcher);
}
+
+ return _planYielding->restoreStateWithoutRetrying(opCtx);
+ } catch (const WriteConflictException& wce) {
+ CurOp::get(opCtx)->debug().writeConflicts++;
+ WriteConflictException::logAndBackoff(
+ attempt, "plan execution restoreState", _planYielding->collection()->ns().ns());
+ // retry
}
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_yield_policy.h b/src/mongo/db/query/plan_yield_policy.h
index 8dc5d45447e..0de47c608cd 100644
--- a/src/mongo/db/query/plan_yield_policy.h
+++ b/src/mongo/db/query/plan_yield_policy.h
@@ -34,64 +34,67 @@
namespace mongo {
- class RecordFetcher;
-
- class PlanYieldPolicy {
- public:
- /**
- * If policy == WRITE_CONFLICT_RETRY_ONLY, shouldYield will only return true after
- * forceYield has been called, and yield will only abandonSnapshot without releasing any
- * locks.
- */
- PlanYieldPolicy(PlanExecutor* exec, PlanExecutor::YieldPolicy policy);
-
- /**
- * Used by YIELD_AUTO plan executors in order to check whether it is time to yield.
- * PlanExecutors give up their locks periodically in order to be fair to other
- * threads.
- */
- bool shouldYield();
-
- /**
- * Resets the yield timer so that we wait for a while before yielding again.
- */
- void resetTimer();
-
- /**
- * Used to cause a plan executor to give up locks and go to sleep. The PlanExecutor
- * must *not* be in saved state. Handles calls to save/restore state internally.
- *
- * If 'fetcher' is non-NULL, then we are yielding because the storage engine told us
- * that we will page fault on this record. We use 'fetcher' to retrieve the record
- * after we give up our locks.
- *
- * Returns true if the executor was restored successfully and is still alive. Returns false
- * if the executor got killed during yield.
- */
- bool yield(RecordFetcher* fetcher = NULL);
-
- /**
- * All calls to shouldYield will return true until the next call to yield.
- */
- void forceYield() {
- dassert(allowedToYield());
- _forceYield = true;
- }
-
- bool allowedToYield() const { return _policy != PlanExecutor::YIELD_MANUAL; }
-
- void setPolicy(PlanExecutor::YieldPolicy policy) { _policy = policy; }
-
- private:
- PlanExecutor::YieldPolicy _policy;
-
- bool _forceYield;
- ElapsedTracker _elapsedTracker;
-
- // The plan executor which this yield policy is responsible for yielding. Must
- // not outlive the plan executor.
- PlanExecutor* const _planYielding;
- };
-
-} // namespace mongo
-
+class RecordFetcher;
+
+class PlanYieldPolicy {
+public:
+ /**
+ * If policy == WRITE_CONFLICT_RETRY_ONLY, shouldYield will only return true after
+ * forceYield has been called, and yield will only abandonSnapshot without releasing any
+ * locks.
+ */
+ PlanYieldPolicy(PlanExecutor* exec, PlanExecutor::YieldPolicy policy);
+
+ /**
+ * Used by YIELD_AUTO plan executors in order to check whether it is time to yield.
+ * PlanExecutors give up their locks periodically in order to be fair to other
+ * threads.
+ */
+ bool shouldYield();
+
+ /**
+ * Resets the yield timer so that we wait for a while before yielding again.
+ */
+ void resetTimer();
+
+ /**
+ * Used to cause a plan executor to give up locks and go to sleep. The PlanExecutor
+ * must *not* be in saved state. Handles calls to save/restore state internally.
+ *
+ * If 'fetcher' is non-NULL, then we are yielding because the storage engine told us
+ * that we will page fault on this record. We use 'fetcher' to retrieve the record
+ * after we give up our locks.
+ *
+ * Returns true if the executor was restored successfully and is still alive. Returns false
+ * if the executor got killed during yield.
+ */
+ bool yield(RecordFetcher* fetcher = NULL);
+
+ /**
+ * All calls to shouldYield will return true until the next call to yield.
+ */
+ void forceYield() {
+ dassert(allowedToYield());
+ _forceYield = true;
+ }
+
+ bool allowedToYield() const {
+ return _policy != PlanExecutor::YIELD_MANUAL;
+ }
+
+ void setPolicy(PlanExecutor::YieldPolicy policy) {
+ _policy = policy;
+ }
+
+private:
+ PlanExecutor::YieldPolicy _policy;
+
+ bool _forceYield;
+ ElapsedTracker _elapsedTracker;
+
+ // The plan executor which this yield policy is responsible for yielding. Must
+ // not outlive the plan executor.
+ PlanExecutor* const _planYielding;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp
index e0f714716b8..1c059933f17 100644
--- a/src/mongo/db/query/planner_access.cpp
+++ b/src/mongo/db/query/planner_access.cpp
@@ -46,1305 +46,1250 @@
namespace {
- using namespace mongo;
+using namespace mongo;
- /**
- * Text node functors.
- */
- bool isTextNode(const QuerySolutionNode* node) {
- return STAGE_TEXT == node->getType();
- }
+/**
+ * Text node functors.
+ */
+bool isTextNode(const QuerySolutionNode* node) {
+ return STAGE_TEXT == node->getType();
+}
-} // namespace
+} // namespace
namespace mongo {
- using std::unique_ptr;
- using std::vector;
+using std::unique_ptr;
+using std::vector;
- // static
- QuerySolutionNode* QueryPlannerAccess::makeCollectionScan(const CanonicalQuery& query,
- bool tailable,
- const QueryPlannerParams& params) {
- // Make the (only) node, a collection scan.
- CollectionScanNode* csn = new CollectionScanNode();
- csn->name = query.ns();
- csn->filter.reset(query.root()->shallowClone());
- csn->tailable = tailable;
- csn->maxScan = query.getParsed().getMaxScan();
-
- // If the hint is {$natural: +-1} this changes the direction of the collection scan.
- if (!query.getParsed().getHint().isEmpty()) {
- BSONElement natural = query.getParsed().getHint().getFieldDotted("$natural");
- if (!natural.eoo()) {
- csn->direction = natural.numberInt() >= 0 ? 1 : -1;
- }
+// static
+QuerySolutionNode* QueryPlannerAccess::makeCollectionScan(const CanonicalQuery& query,
+ bool tailable,
+ const QueryPlannerParams& params) {
+ // Make the (only) node, a collection scan.
+ CollectionScanNode* csn = new CollectionScanNode();
+ csn->name = query.ns();
+ csn->filter.reset(query.root()->shallowClone());
+ csn->tailable = tailable;
+ csn->maxScan = query.getParsed().getMaxScan();
+
+ // If the hint is {$natural: +-1} this changes the direction of the collection scan.
+ if (!query.getParsed().getHint().isEmpty()) {
+ BSONElement natural = query.getParsed().getHint().getFieldDotted("$natural");
+ if (!natural.eoo()) {
+ csn->direction = natural.numberInt() >= 0 ? 1 : -1;
}
+ }
- // The sort can specify $natural as well. The sort direction should override the hint
- // direction if both are specified.
- const BSONObj& sortObj = query.getParsed().getSort();
- if (!sortObj.isEmpty()) {
- BSONElement natural = sortObj.getFieldDotted("$natural");
- if (!natural.eoo()) {
- csn->direction = natural.numberInt() >= 0 ? 1 : -1;
- }
+ // The sort can specify $natural as well. The sort direction should override the hint
+ // direction if both are specified.
+ const BSONObj& sortObj = query.getParsed().getSort();
+ if (!sortObj.isEmpty()) {
+ BSONElement natural = sortObj.getFieldDotted("$natural");
+ if (!natural.eoo()) {
+ csn->direction = natural.numberInt() >= 0 ? 1 : -1;
}
-
- return csn;
}
- // static
- QuerySolutionNode* QueryPlannerAccess::makeLeafNode(const CanonicalQuery& query,
- const IndexEntry& index,
- size_t pos,
- MatchExpression* expr,
- IndexBoundsBuilder::BoundsTightness* tightnessOut) {
- // We're guaranteed that all GEO_NEARs are first. This slightly violates the "sort index
- // predicates by their position in the compound index" rule but GEO_NEAR isn't an ixscan.
- // This saves our bacon when we have {foo: 1, bar: "2dsphere"} and the predicate on bar is a
- // $near. If we didn't get the GEO_NEAR first we'd create an IndexScanNode and later cast
- // it to a GeoNear2DSphereNode
- //
- // This should gracefully deal with the case where we have a pred over foo but no geo clause
- // over bar. In that case there is no GEO_NEAR to appear first and it's treated like a
- // straight ixscan.
-
- if (MatchExpression::GEO_NEAR == expr->matchType()) {
- // We must not keep the expression node around.
- *tightnessOut = IndexBoundsBuilder::EXACT;
- GeoNearMatchExpression* nearExpr = static_cast<GeoNearMatchExpression*>(expr);
-
- BSONElement elt = index.keyPattern.firstElement();
- bool indexIs2D = (String == elt.type() && "2d" == elt.String());
-
- if (indexIs2D) {
- GeoNear2DNode* ret = new GeoNear2DNode();
- ret->indexKeyPattern = index.keyPattern;
- ret->nq = &nearExpr->getData();
- ret->baseBounds.fields.resize(index.keyPattern.nFields());
- if (NULL != query.getProj()) {
- ret->addPointMeta = query.getProj()->wantGeoNearPoint();
- ret->addDistMeta = query.getProj()->wantGeoNearDistance();
- }
-
- return ret;
- }
- else {
- GeoNear2DSphereNode* ret = new GeoNear2DSphereNode();
- ret->indexKeyPattern = index.keyPattern;
- ret->nq = &nearExpr->getData();
- ret->baseBounds.fields.resize(index.keyPattern.nFields());
- if (NULL != query.getProj()) {
- ret->addPointMeta = query.getProj()->wantGeoNearPoint();
- ret->addDistMeta = query.getProj()->wantGeoNearDistance();
- }
- return ret;
- }
- }
- else if (MatchExpression::TEXT == expr->matchType()) {
- // We must not keep the expression node around.
- *tightnessOut = IndexBoundsBuilder::EXACT;
- TextMatchExpression* textExpr = static_cast<TextMatchExpression*>(expr);
- TextNode* ret = new TextNode();
+ return csn;
+}
+
+// static
+QuerySolutionNode* QueryPlannerAccess::makeLeafNode(
+ const CanonicalQuery& query,
+ const IndexEntry& index,
+ size_t pos,
+ MatchExpression* expr,
+ IndexBoundsBuilder::BoundsTightness* tightnessOut) {
+ // We're guaranteed that all GEO_NEARs are first. This slightly violates the "sort index
+ // predicates by their position in the compound index" rule but GEO_NEAR isn't an ixscan.
+ // This saves our bacon when we have {foo: 1, bar: "2dsphere"} and the predicate on bar is a
+ // $near. If we didn't get the GEO_NEAR first we'd create an IndexScanNode and later cast
+ // it to a GeoNear2DSphereNode
+ //
+ // This should gracefully deal with the case where we have a pred over foo but no geo clause
+ // over bar. In that case there is no GEO_NEAR to appear first and it's treated like a
+ // straight ixscan.
+
+ if (MatchExpression::GEO_NEAR == expr->matchType()) {
+ // We must not keep the expression node around.
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ GeoNearMatchExpression* nearExpr = static_cast<GeoNearMatchExpression*>(expr);
+
+ BSONElement elt = index.keyPattern.firstElement();
+ bool indexIs2D = (String == elt.type() && "2d" == elt.String());
+
+ if (indexIs2D) {
+ GeoNear2DNode* ret = new GeoNear2DNode();
ret->indexKeyPattern = index.keyPattern;
- ret->query = textExpr->getQuery();
- ret->language = textExpr->getLanguage();
- ret->caseSensitive = textExpr->getCaseSensitive();
- return ret;
- }
- else {
- // Note that indexKeyPattern.firstElement().fieldName() may not equal expr->path()
- // because expr might be inside an array operator that provides a path prefix.
- IndexScanNode* isn = new IndexScanNode();
- isn->indexKeyPattern = index.keyPattern;
- isn->indexIsMultiKey = index.multikey;
- isn->bounds.fields.resize(index.keyPattern.nFields());
- isn->maxScan = query.getParsed().getMaxScan();
- isn->addKeyMetadata = query.getParsed().returnKey();
-
- // Get the ixtag->pos-th element of the index key pattern.
- // TODO: cache this instead/with ixtag->pos?
- BSONObjIterator it(index.keyPattern);
- BSONElement keyElt = it.next();
- for (size_t i = 0; i < pos; ++i) {
- verify(it.more());
- keyElt = it.next();
+ ret->nq = &nearExpr->getData();
+ ret->baseBounds.fields.resize(index.keyPattern.nFields());
+ if (NULL != query.getProj()) {
+ ret->addPointMeta = query.getProj()->wantGeoNearPoint();
+ ret->addDistMeta = query.getProj()->wantGeoNearDistance();
}
- verify(!keyElt.eoo());
-
- IndexBoundsBuilder::translate(expr, keyElt, index, &isn->bounds.fields[pos],
- tightnessOut);
- return isn;
+ return ret;
+ } else {
+ GeoNear2DSphereNode* ret = new GeoNear2DSphereNode();
+ ret->indexKeyPattern = index.keyPattern;
+ ret->nq = &nearExpr->getData();
+ ret->baseBounds.fields.resize(index.keyPattern.nFields());
+ if (NULL != query.getProj()) {
+ ret->addPointMeta = query.getProj()->wantGeoNearPoint();
+ ret->addDistMeta = query.getProj()->wantGeoNearDistance();
+ }
+ return ret;
}
- }
+ } else if (MatchExpression::TEXT == expr->matchType()) {
+ // We must not keep the expression node around.
+ *tightnessOut = IndexBoundsBuilder::EXACT;
+ TextMatchExpression* textExpr = static_cast<TextMatchExpression*>(expr);
+ TextNode* ret = new TextNode();
+ ret->indexKeyPattern = index.keyPattern;
+ ret->query = textExpr->getQuery();
+ ret->language = textExpr->getLanguage();
+ ret->caseSensitive = textExpr->getCaseSensitive();
+ return ret;
+ } else {
+ // Note that indexKeyPattern.firstElement().fieldName() may not equal expr->path()
+ // because expr might be inside an array operator that provides a path prefix.
+ IndexScanNode* isn = new IndexScanNode();
+ isn->indexKeyPattern = index.keyPattern;
+ isn->indexIsMultiKey = index.multikey;
+ isn->bounds.fields.resize(index.keyPattern.nFields());
+ isn->maxScan = query.getParsed().getMaxScan();
+ isn->addKeyMetadata = query.getParsed().returnKey();
- bool QueryPlannerAccess::shouldMergeWithLeaf(const MatchExpression* expr,
- const ScanBuildingState& scanState) {
- const QuerySolutionNode* node = scanState.currentScan.get();
- if (NULL == node || NULL == expr) {
- return false;
+ // Get the ixtag->pos-th element of the index key pattern.
+ // TODO: cache this instead/with ixtag->pos?
+ BSONObjIterator it(index.keyPattern);
+ BSONElement keyElt = it.next();
+ for (size_t i = 0; i < pos; ++i) {
+ verify(it.more());
+ keyElt = it.next();
}
+ verify(!keyElt.eoo());
- if (NULL == scanState.ixtag) {
- return false;
- }
+ IndexBoundsBuilder::translate(expr, keyElt, index, &isn->bounds.fields[pos], tightnessOut);
- if (scanState.currentIndexNumber != scanState.ixtag->index) {
- return false;
- }
+ return isn;
+ }
+}
- size_t pos = scanState.ixtag->pos;
- const IndexEntry& index = scanState.indices[scanState.currentIndexNumber];
- const MatchExpression::MatchType mergeType = scanState.root->matchType();
+bool QueryPlannerAccess::shouldMergeWithLeaf(const MatchExpression* expr,
+ const ScanBuildingState& scanState) {
+ const QuerySolutionNode* node = scanState.currentScan.get();
+ if (NULL == node || NULL == expr) {
+ return false;
+ }
- const StageType type = node->getType();
- const MatchExpression::MatchType exprType = expr->matchType();
+ if (NULL == scanState.ixtag) {
+ return false;
+ }
- //
- // First handle special solution tree leaf types. In general, normal index bounds
- // building is not used for special leaf types, and hence we cannot merge leaves.
- //
- // This rule is always true for OR, but there are exceptions for AND.
- // Specifically, we can often merge a predicate with a special leaf type
- // by adding a filter to the special leaf type.
- //
+ if (scanState.currentIndexNumber != scanState.ixtag->index) {
+ return false;
+ }
- if (STAGE_TEXT == type) {
- // Currently only one text predicate is allowed, but to be safe, make sure that we
- // do not try to merge two text predicates.
- return MatchExpression::AND == mergeType
- && MatchExpression::TEXT != exprType;
- }
+ size_t pos = scanState.ixtag->pos;
+ const IndexEntry& index = scanState.indices[scanState.currentIndexNumber];
+ const MatchExpression::MatchType mergeType = scanState.root->matchType();
+
+ const StageType type = node->getType();
+ const MatchExpression::MatchType exprType = expr->matchType();
+
+ //
+ // First handle special solution tree leaf types. In general, normal index bounds
+ // building is not used for special leaf types, and hence we cannot merge leaves.
+ //
+ // This rule is always true for OR, but there are exceptions for AND.
+ // Specifically, we can often merge a predicate with a special leaf type
+ // by adding a filter to the special leaf type.
+ //
+
+ if (STAGE_TEXT == type) {
+ // Currently only one text predicate is allowed, but to be safe, make sure that we
+ // do not try to merge two text predicates.
+ return MatchExpression::AND == mergeType && MatchExpression::TEXT != exprType;
+ }
- if (STAGE_GEO_NEAR_2D == type || STAGE_GEO_NEAR_2DSPHERE == type) {
- // Currently only one GEO_NEAR is allowed, but to be safe, make sure that we
- // do not try to merge two GEO_NEAR predicates.
- return MatchExpression::AND == mergeType
- && MatchExpression::GEO_NEAR != exprType;
- }
+ if (STAGE_GEO_NEAR_2D == type || STAGE_GEO_NEAR_2DSPHERE == type) {
+ // Currently only one GEO_NEAR is allowed, but to be safe, make sure that we
+ // do not try to merge two GEO_NEAR predicates.
+ return MatchExpression::AND == mergeType && MatchExpression::GEO_NEAR != exprType;
+ }
- //
- // If we're here, then we're done checking for special leaf nodes, and the leaf
- // must be a regular index scan.
- //
+ //
+ // If we're here, then we're done checking for special leaf nodes, and the leaf
+ // must be a regular index scan.
+ //
- invariant(type == STAGE_IXSCAN);
- const IndexScanNode* scan = static_cast<const IndexScanNode*>(node);
- const IndexBounds* boundsToFillOut = &scan->bounds;
+ invariant(type == STAGE_IXSCAN);
+ const IndexScanNode* scan = static_cast<const IndexScanNode*>(node);
+ const IndexBounds* boundsToFillOut = &scan->bounds;
- if (boundsToFillOut->fields[pos].name.empty()) {
- // The bounds will be compounded. This is OK because the
- // plan enumerator told us that it is OK.
+ if (boundsToFillOut->fields[pos].name.empty()) {
+ // The bounds will be compounded. This is OK because the
+ // plan enumerator told us that it is OK.
+ return true;
+ } else {
+ if (MatchExpression::AND == mergeType) {
+ // The bounds will be intersected. This is OK provided
+ // that the index is NOT multikey.
+ return !index.multikey;
+ } else {
+ // The bounds will be unionized.
return true;
}
- else {
- if (MatchExpression::AND == mergeType) {
- // The bounds will be intersected. This is OK provided
- // that the index is NOT multikey.
- return !index.multikey;
- }
- else {
- // The bounds will be unionized.
- return true;
- }
- }
+ }
+}
+
+void QueryPlannerAccess::mergeWithLeafNode(MatchExpression* expr, ScanBuildingState* scanState) {
+ QuerySolutionNode* node = scanState->currentScan.get();
+ invariant(NULL != node);
+
+ const MatchExpression::MatchType mergeType = scanState->root->matchType();
+ size_t pos = scanState->ixtag->pos;
+ const IndexEntry& index = scanState->indices[scanState->currentIndexNumber];
+ const StageType type = node->getType();
+ // Text data is covered, but not exactly. Text covering is unlike any other covering
+ // so we deal with it in addFilterToSolutionNode.
+ if (STAGE_TEXT == type) {
+ scanState->tightness = IndexBoundsBuilder::INEXACT_COVERED;
+ return;
}
- void QueryPlannerAccess::mergeWithLeafNode(MatchExpression* expr,
- ScanBuildingState* scanState) {
- QuerySolutionNode* node = scanState->currentScan.get();
- invariant(NULL != node);
+ IndexBounds* boundsToFillOut = NULL;
- const MatchExpression::MatchType mergeType = scanState->root->matchType();
- size_t pos = scanState->ixtag->pos;
- const IndexEntry& index = scanState->indices[scanState->currentIndexNumber];
+ if (STAGE_GEO_NEAR_2D == type) {
+ invariant(INDEX_2D == index.type);
- const StageType type = node->getType();
+ // 2D indexes are weird - the "2d" field stores a normally-indexed BinData field, but
+ // additional array fields are *not* exploded into multi-keys - they are stored directly
+ // as arrays in the index. Also, no matter what the index expression, the "2d" field is
+ // always first.
+ // This means that we can only generically accumulate bounds for 2D indexes over the
+ // first "2d" field (pos == 0) - MatchExpressions over other fields in the 2D index may
+ // be covered (can be evaluated using only the 2D index key). The additional fields
+ // must not affect the index scan bounds, since they are not stored in an
+ // IndexScan-compatible format.
- // Text data is covered, but not exactly. Text covering is unlike any other covering
- // so we deal with it in addFilterToSolutionNode.
- if (STAGE_TEXT == type) {
+ if (pos > 0) {
+ // Marking this field as covered allows the planner to accumulate a MatchExpression
+ // over the returned 2D index keys instead of adding to the index bounds.
scanState->tightness = IndexBoundsBuilder::INEXACT_COVERED;
return;
}
- IndexBounds* boundsToFillOut = NULL;
-
- if (STAGE_GEO_NEAR_2D == type) {
-
- invariant(INDEX_2D == index.type);
-
- // 2D indexes are weird - the "2d" field stores a normally-indexed BinData field, but
- // additional array fields are *not* exploded into multi-keys - they are stored directly
- // as arrays in the index. Also, no matter what the index expression, the "2d" field is
- // always first.
- // This means that we can only generically accumulate bounds for 2D indexes over the
- // first "2d" field (pos == 0) - MatchExpressions over other fields in the 2D index may
- // be covered (can be evaluated using only the 2D index key). The additional fields
- // must not affect the index scan bounds, since they are not stored in an
- // IndexScan-compatible format.
-
- if (pos > 0) {
- // Marking this field as covered allows the planner to accumulate a MatchExpression
- // over the returned 2D index keys instead of adding to the index bounds.
- scanState->tightness = IndexBoundsBuilder::INEXACT_COVERED;
- return;
- }
+ // We may have other $geoPredicates on a near index - generate bounds for these
+ GeoNear2DNode* gn = static_cast<GeoNear2DNode*>(node);
+ boundsToFillOut = &gn->baseBounds;
+ } else if (STAGE_GEO_NEAR_2DSPHERE == type) {
+ GeoNear2DSphereNode* gn = static_cast<GeoNear2DSphereNode*>(node);
+ boundsToFillOut = &gn->baseBounds;
+ } else {
+ verify(type == STAGE_IXSCAN);
+ IndexScanNode* scan = static_cast<IndexScanNode*>(node);
- // We may have other $geoPredicates on a near index - generate bounds for these
- GeoNear2DNode* gn = static_cast<GeoNear2DNode*>(node);
- boundsToFillOut = &gn->baseBounds;
- }
- else if (STAGE_GEO_NEAR_2DSPHERE == type) {
- GeoNear2DSphereNode* gn = static_cast<GeoNear2DSphereNode*>(node);
- boundsToFillOut = &gn->baseBounds;
+ // See STAGE_GEO_NEAR_2D above - 2D indexes can only accumulate scan bounds over the
+ // first "2d" field (pos == 0)
+ if (INDEX_2D == index.type && pos > 0) {
+ scanState->tightness = IndexBoundsBuilder::INEXACT_COVERED;
+ return;
}
- else {
- verify(type == STAGE_IXSCAN);
- IndexScanNode* scan = static_cast<IndexScanNode*>(node);
-
- // See STAGE_GEO_NEAR_2D above - 2D indexes can only accumulate scan bounds over the
- // first "2d" field (pos == 0)
- if (INDEX_2D == index.type && pos > 0) {
- scanState->tightness = IndexBoundsBuilder::INEXACT_COVERED;
- return;
- }
- boundsToFillOut = &scan->bounds;
- }
+ boundsToFillOut = &scan->bounds;
+ }
- // Get the ixtag->pos-th element of the index key pattern.
- // TODO: cache this instead/with ixtag->pos?
- BSONObjIterator it(index.keyPattern);
- BSONElement keyElt = it.next();
- for (size_t i = 0; i < pos; ++i) {
- verify(it.more());
- keyElt = it.next();
- }
- verify(!keyElt.eoo());
- scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH;
+ // Get the ixtag->pos-th element of the index key pattern.
+ // TODO: cache this instead/with ixtag->pos?
+ BSONObjIterator it(index.keyPattern);
+ BSONElement keyElt = it.next();
+ for (size_t i = 0; i < pos; ++i) {
+ verify(it.more());
+ keyElt = it.next();
+ }
+ verify(!keyElt.eoo());
+ scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH;
- verify(boundsToFillOut->fields.size() > pos);
+ verify(boundsToFillOut->fields.size() > pos);
- OrderedIntervalList* oil = &boundsToFillOut->fields[pos];
+ OrderedIntervalList* oil = &boundsToFillOut->fields[pos];
- if (boundsToFillOut->fields[pos].name.empty()) {
- IndexBoundsBuilder::translate(expr, keyElt, index, oil, &scanState->tightness);
- }
- else {
- if (MatchExpression::AND == mergeType) {
- IndexBoundsBuilder::translateAndIntersect(expr, keyElt, index, oil,
- &scanState->tightness);
- }
- else {
- verify(MatchExpression::OR == mergeType);
- IndexBoundsBuilder::translateAndUnion(expr, keyElt, index, oil,
- &scanState->tightness);
- }
+ if (boundsToFillOut->fields[pos].name.empty()) {
+ IndexBoundsBuilder::translate(expr, keyElt, index, oil, &scanState->tightness);
+ } else {
+ if (MatchExpression::AND == mergeType) {
+ IndexBoundsBuilder::translateAndIntersect(
+ expr, keyElt, index, oil, &scanState->tightness);
+ } else {
+ verify(MatchExpression::OR == mergeType);
+ IndexBoundsBuilder::translateAndUnion(expr, keyElt, index, oil, &scanState->tightness);
}
}
+}
+
+// static
+void QueryPlannerAccess::finishTextNode(QuerySolutionNode* node, const IndexEntry& index) {
+ TextNode* tn = static_cast<TextNode*>(node);
+
+ // Figure out what positions are prefix positions. We build an index key prefix from
+ // the predicates over the text index prefix keys.
+ // For example, say keyPattern = { a: 1, _fts: "text", _ftsx: 1, b: 1 }
+ // prefixEnd should be 1.
+ size_t prefixEnd = 0;
+ BSONObjIterator it(tn->indexKeyPattern);
+ // Count how many prefix terms we have.
+ while (it.more()) {
+ // We know that the only key pattern with a type of String is the _fts field
+ // which is immediately after all prefix fields.
+ if (String == it.next().type()) {
+ break;
+ }
+ ++prefixEnd;
+ }
- // static
- void QueryPlannerAccess::finishTextNode(QuerySolutionNode* node, const IndexEntry& index) {
- TextNode* tn = static_cast<TextNode*>(node);
-
- // Figure out what positions are prefix positions. We build an index key prefix from
- // the predicates over the text index prefix keys.
- // For example, say keyPattern = { a: 1, _fts: "text", _ftsx: 1, b: 1 }
- // prefixEnd should be 1.
- size_t prefixEnd = 0;
- BSONObjIterator it(tn->indexKeyPattern);
- // Count how many prefix terms we have.
- while (it.more()) {
- // We know that the only key pattern with a type of String is the _fts field
- // which is immediately after all prefix fields.
- if (String == it.next().type()) {
- break;
+ // If there's no prefix, the filter is already on the node and the index prefix is null.
+ // We can just return.
+ if (!prefixEnd) {
+ return;
+ }
+
+ // We can't create a text stage if there aren't EQ predicates on its prefix terms. So
+ // if we've made it this far, we should have collected the prefix predicates in the
+ // filter.
+ invariant(NULL != tn->filter.get());
+ MatchExpression* textFilterMe = tn->filter.get();
+
+ BSONObjBuilder prefixBob;
+
+ if (MatchExpression::AND != textFilterMe->matchType()) {
+ // Only one prefix term.
+ invariant(1 == prefixEnd);
+ // Sanity check: must be an EQ.
+ invariant(MatchExpression::EQ == textFilterMe->matchType());
+
+ EqualityMatchExpression* eqExpr = static_cast<EqualityMatchExpression*>(textFilterMe);
+ prefixBob.append(eqExpr->getData());
+ tn->filter.reset();
+ } else {
+ invariant(MatchExpression::AND == textFilterMe->matchType());
+
+ // Indexed by the keyPattern position index assignment. We want to add
+ // prefixes in order but we must order them first.
+ vector<MatchExpression*> prefixExprs(prefixEnd, NULL);
+
+ AndMatchExpression* amExpr = static_cast<AndMatchExpression*>(textFilterMe);
+ invariant(amExpr->numChildren() >= prefixEnd);
+
+ // Look through the AND children. The prefix children we want to
+ // stash in prefixExprs.
+ size_t curChild = 0;
+ while (curChild < amExpr->numChildren()) {
+ MatchExpression* child = amExpr->getChild(curChild);
+ IndexTag* ixtag = static_cast<IndexTag*>(child->getTag());
+ invariant(NULL != ixtag);
+ // Skip this child if it's not part of a prefix, or if we've already assigned a
+ // predicate to this prefix position.
+ if (ixtag->pos >= prefixEnd || prefixExprs[ixtag->pos] != NULL) {
+ ++curChild;
+ continue;
}
- ++prefixEnd;
+ // prefixExprs takes ownership of 'child'.
+ prefixExprs[ixtag->pos] = child;
+ amExpr->getChildVector()->erase(amExpr->getChildVector()->begin() + curChild);
+ // Don't increment curChild.
+ }
+
+ // Go through the prefix equalities in order and create an index prefix out of them.
+ for (size_t i = 0; i < prefixExprs.size(); ++i) {
+ MatchExpression* prefixMe = prefixExprs[i];
+ invariant(NULL != prefixMe);
+ invariant(MatchExpression::EQ == prefixMe->matchType());
+ EqualityMatchExpression* eqExpr = static_cast<EqualityMatchExpression*>(prefixMe);
+ prefixBob.append(eqExpr->getData());
+ // We removed this from the AND expression that owned it, so we must clean it
+ // up ourselves.
+ delete prefixMe;
}
- // If there's no prefix, the filter is already on the node and the index prefix is null.
- // We can just return.
- if (!prefixEnd) {
- return;
+ // Clear out an empty $and.
+ if (0 == amExpr->numChildren()) {
+ tn->filter.reset();
+ } else if (1 == amExpr->numChildren()) {
+ // Clear out unsightly only child of $and
+ MatchExpression* child = amExpr->getChild(0);
+ amExpr->getChildVector()->clear();
+ // Deletes current filter which is amExpr.
+ tn->filter.reset(child);
}
+ }
- // We can't create a text stage if there aren't EQ predicates on its prefix terms. So
- // if we've made it this far, we should have collected the prefix predicates in the
- // filter.
- invariant(NULL != tn->filter.get());
- MatchExpression* textFilterMe = tn->filter.get();
-
- BSONObjBuilder prefixBob;
-
- if (MatchExpression::AND != textFilterMe->matchType()) {
- // Only one prefix term.
- invariant(1 == prefixEnd);
- // Sanity check: must be an EQ.
- invariant(MatchExpression::EQ == textFilterMe->matchType());
+ tn->indexPrefix = prefixBob.obj();
+}
- EqualityMatchExpression* eqExpr = static_cast<EqualityMatchExpression*>(textFilterMe);
- prefixBob.append(eqExpr->getData());
- tn->filter.reset();
+// static
+bool QueryPlannerAccess::orNeedsFetch(const ScanBuildingState* scanState) {
+ if (scanState->loosestBounds == IndexBoundsBuilder::EXACT) {
+ return false;
+ } else if (scanState->loosestBounds == IndexBoundsBuilder::INEXACT_FETCH) {
+ return true;
+ } else {
+ invariant(scanState->loosestBounds == IndexBoundsBuilder::INEXACT_COVERED);
+ const IndexEntry& index = scanState->indices[scanState->currentIndexNumber];
+ return index.multikey;
+ }
+}
+
+// static
+void QueryPlannerAccess::finishAndOutputLeaf(ScanBuildingState* scanState,
+ vector<QuerySolutionNode*>* out) {
+ finishLeafNode(scanState->currentScan.get(), scanState->indices[scanState->currentIndexNumber]);
+
+ if (MatchExpression::OR == scanState->root->matchType()) {
+ if (orNeedsFetch(scanState)) {
+ // In order to correctly evaluate the predicates for this index, we have to
+ // fetch the full documents. Add a fetch node above the index scan whose filter
+ // includes *all* of the predicates used to generate the ixscan.
+ FetchNode* fetch = new FetchNode();
+ // Takes ownership.
+ fetch->filter.reset(scanState->curOr.release());
+ // Takes ownership.
+ fetch->children.push_back(scanState->currentScan.release());
+
+ scanState->currentScan.reset(fetch);
+ } else if (scanState->loosestBounds == IndexBoundsBuilder::INEXACT_COVERED) {
+ // This an OR, at least one of the predicates used to generate 'currentScan'
+ // is inexact covered, but none is inexact fetch. This means that we can put
+ // these predicates, joined by an $or, as filters on the index scan. This avoids
+ // a fetch and allows the predicates to be covered by the index.
+ //
+ // Ex.
+ // Say we have index {a: 1} and query {$or: [{a: /foo/}, {a: /bar/}]}.
+ // The entire query, {$or: [{a: /foo/}, {a: /bar/}]}, should be a filter
+ // in the index scan stage itself.
+ scanState->currentScan->filter.reset(scanState->curOr.release());
}
- else {
- invariant(MatchExpression::AND == textFilterMe->matchType());
-
- // Indexed by the keyPattern position index assignment. We want to add
- // prefixes in order but we must order them first.
- vector<MatchExpression*> prefixExprs(prefixEnd, NULL);
-
- AndMatchExpression* amExpr = static_cast<AndMatchExpression*>(textFilterMe);
- invariant(amExpr->numChildren() >= prefixEnd);
-
- // Look through the AND children. The prefix children we want to
- // stash in prefixExprs.
- size_t curChild = 0;
- while (curChild < amExpr->numChildren()) {
- MatchExpression* child = amExpr->getChild(curChild);
- IndexTag* ixtag = static_cast<IndexTag*>(child->getTag());
- invariant(NULL != ixtag);
- // Skip this child if it's not part of a prefix, or if we've already assigned a
- // predicate to this prefix position.
- if (ixtag->pos >= prefixEnd || prefixExprs[ixtag->pos] != NULL) {
- ++curChild;
- continue;
- }
- // prefixExprs takes ownership of 'child'.
- prefixExprs[ixtag->pos] = child;
- amExpr->getChildVector()->erase(amExpr->getChildVector()->begin() + curChild);
- // Don't increment curChild.
- }
+ }
- // Go through the prefix equalities in order and create an index prefix out of them.
- for (size_t i = 0; i < prefixExprs.size(); ++i) {
- MatchExpression* prefixMe = prefixExprs[i];
- invariant(NULL != prefixMe);
- invariant(MatchExpression::EQ == prefixMe->matchType());
- EqualityMatchExpression* eqExpr = static_cast<EqualityMatchExpression*>(prefixMe);
- prefixBob.append(eqExpr->getData());
- // We removed this from the AND expression that owned it, so we must clean it
- // up ourselves.
- delete prefixMe;
- }
+ out->push_back(scanState->currentScan.release());
+}
- // Clear out an empty $and.
- if (0 == amExpr->numChildren()) {
- tn->filter.reset();
- }
- else if (1 == amExpr->numChildren()) {
- // Clear out unsightly only child of $and
- MatchExpression* child = amExpr->getChild(0);
- amExpr->getChildVector()->clear();
- // Deletes current filter which is amExpr.
- tn->filter.reset(child);
- }
- }
+// static
+void QueryPlannerAccess::finishLeafNode(QuerySolutionNode* node, const IndexEntry& index) {
+ const StageType type = node->getType();
- tn->indexPrefix = prefixBob.obj();
+ if (STAGE_TEXT == type) {
+ finishTextNode(node, index);
+ return;
}
- // static
- bool QueryPlannerAccess::orNeedsFetch(const ScanBuildingState* scanState) {
- if (scanState->loosestBounds == IndexBoundsBuilder::EXACT) {
- return false;
- }
- else if (scanState->loosestBounds == IndexBoundsBuilder::INEXACT_FETCH) {
- return true;
- }
- else {
- invariant(scanState->loosestBounds == IndexBoundsBuilder::INEXACT_COVERED);
- const IndexEntry& index = scanState->indices[scanState->currentIndexNumber];
- return index.multikey;
- }
+ IndexBounds* bounds = NULL;
+
+ if (STAGE_GEO_NEAR_2D == type) {
+ GeoNear2DNode* gnode = static_cast<GeoNear2DNode*>(node);
+ bounds = &gnode->baseBounds;
+ } else if (STAGE_GEO_NEAR_2DSPHERE == type) {
+ GeoNear2DSphereNode* gnode = static_cast<GeoNear2DSphereNode*>(node);
+ bounds = &gnode->baseBounds;
+ } else {
+ verify(type == STAGE_IXSCAN);
+ IndexScanNode* scan = static_cast<IndexScanNode*>(node);
+ bounds = &scan->bounds;
}
- // static
- void QueryPlannerAccess::finishAndOutputLeaf(ScanBuildingState* scanState,
- vector<QuerySolutionNode*>* out) {
- finishLeafNode(scanState->currentScan.get(),
- scanState->indices[scanState->currentIndexNumber]);
-
- if (MatchExpression::OR == scanState->root->matchType()) {
- if (orNeedsFetch(scanState)) {
- // In order to correctly evaluate the predicates for this index, we have to
- // fetch the full documents. Add a fetch node above the index scan whose filter
- // includes *all* of the predicates used to generate the ixscan.
- FetchNode* fetch = new FetchNode();
- // Takes ownership.
- fetch->filter.reset(scanState->curOr.release());
- // Takes ownership.
- fetch->children.push_back(scanState->currentScan.release());
-
- scanState->currentScan.reset(fetch);
- }
- else if (scanState->loosestBounds == IndexBoundsBuilder::INEXACT_COVERED) {
- // This an OR, at least one of the predicates used to generate 'currentScan'
- // is inexact covered, but none is inexact fetch. This means that we can put
- // these predicates, joined by an $or, as filters on the index scan. This avoids
- // a fetch and allows the predicates to be covered by the index.
- //
- // Ex.
- // Say we have index {a: 1} and query {$or: [{a: /foo/}, {a: /bar/}]}.
- // The entire query, {$or: [{a: /foo/}, {a: /bar/}]}, should be a filter
- // in the index scan stage itself.
- scanState->currentScan->filter.reset(scanState->curOr.release());
- }
+ // Find the first field in the scan's bounds that was not filled out.
+ // TODO: could cache this.
+ size_t firstEmptyField = 0;
+ for (firstEmptyField = 0; firstEmptyField < bounds->fields.size(); ++firstEmptyField) {
+ if ("" == bounds->fields[firstEmptyField].name) {
+ verify(bounds->fields[firstEmptyField].intervals.empty());
+ break;
}
-
- out->push_back(scanState->currentScan.release());
}
- // static
- void QueryPlannerAccess::finishLeafNode(QuerySolutionNode* node, const IndexEntry& index) {
- const StageType type = node->getType();
+ // All fields are filled out with bounds, nothing to do.
+ if (firstEmptyField == bounds->fields.size()) {
+ IndexBoundsBuilder::alignBounds(bounds, index.keyPattern);
+ return;
+ }
- if (STAGE_TEXT == type) {
- finishTextNode(node, index);
- return;
- }
+ // Skip ahead to the firstEmptyField-th element, where we begin filling in bounds.
+ BSONObjIterator it(index.keyPattern);
+ for (size_t i = 0; i < firstEmptyField; ++i) {
+ verify(it.more());
+ it.next();
+ }
- IndexBounds* bounds = NULL;
+ // For each field in the key...
+ while (it.more()) {
+ BSONElement kpElt = it.next();
+ // There may be filled-in fields to the right of the firstEmptyField.
+ // Example:
+ // The index {loc:"2dsphere", x:1}
+ // With a predicate over x and a near search over loc.
+ if ("" == bounds->fields[firstEmptyField].name) {
+ verify(bounds->fields[firstEmptyField].intervals.empty());
+ // ...build the "all values" interval.
+ IndexBoundsBuilder::allValuesForField(kpElt, &bounds->fields[firstEmptyField]);
+ }
+ ++firstEmptyField;
+ }
- if (STAGE_GEO_NEAR_2D == type) {
- GeoNear2DNode* gnode = static_cast<GeoNear2DNode*>(node);
- bounds = &gnode->baseBounds;
- }
- else if (STAGE_GEO_NEAR_2DSPHERE == type) {
- GeoNear2DSphereNode* gnode = static_cast<GeoNear2DSphereNode*>(node);
- bounds = &gnode->baseBounds;
+ // Make sure that the length of the key is the length of the bounds we started.
+ verify(firstEmptyField == bounds->fields.size());
+
+ // We create bounds assuming a forward direction but can easily reverse bounds to align
+ // according to our desired direction.
+ IndexBoundsBuilder::alignBounds(bounds, index.keyPattern);
+}
+
+// static
+void QueryPlannerAccess::findElemMatchChildren(const MatchExpression* node,
+ vector<MatchExpression*>* out,
+ vector<MatchExpression*>* subnodesOut) {
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ MatchExpression* child = node->getChild(i);
+ if (Indexability::isBoundsGenerating(child) && NULL != child->getTag()) {
+ out->push_back(child);
+ } else if (MatchExpression::AND == child->matchType() ||
+ Indexability::arrayUsesIndexOnChildren(child)) {
+ findElemMatchChildren(child, out, subnodesOut);
+ } else if (NULL != child->getTag()) {
+ subnodesOut->push_back(child);
}
- else {
- verify(type == STAGE_IXSCAN);
- IndexScanNode* scan = static_cast<IndexScanNode*>(node);
- bounds = &scan->bounds;
- }
-
- // Find the first field in the scan's bounds that was not filled out.
- // TODO: could cache this.
- size_t firstEmptyField = 0;
- for (firstEmptyField = 0; firstEmptyField < bounds->fields.size(); ++firstEmptyField) {
- if ("" == bounds->fields[firstEmptyField].name) {
- verify(bounds->fields[firstEmptyField].intervals.empty());
- break;
+ }
+}
+
+// static
+bool QueryPlannerAccess::processIndexScans(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const std::vector<IndexEntry>& indices,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolutionNode*>* out) {
+ // Initialize the ScanBuildingState.
+ ScanBuildingState scanState(root, inArrayOperator, indices);
+
+ while (scanState.curChild < root->numChildren()) {
+ MatchExpression* child = root->getChild(scanState.curChild);
+
+ // If there is no tag, it's not using an index. We've sorted our children such that the
+ // children with tags are first, so we stop now.
+ if (NULL == child->getTag()) {
+ break;
+ }
+
+ scanState.ixtag = static_cast<IndexTag*>(child->getTag());
+ // If there's a tag it must be valid.
+ verify(IndexTag::kNoIndex != scanState.ixtag->index);
+
+ // If the child can't use an index on its own field (and the child is not a negation
+ // of a bounds-generating expression), then it's indexed by virtue of one of
+ // its children having an index.
+ //
+ // NOTE: If the child is logical, it could possibly collapse into a single ixscan. we
+ // ignore this for now.
+ if (!Indexability::isBoundsGenerating(child)) {
+ // If we're here, then the child is indexed by virtue of its children.
+ // In most cases this means that we recursively build indexed data
+ // access on 'child'.
+ if (!processIndexScansSubnode(query, &scanState, params, out)) {
+ return false;
}
+ continue;
}
- // All fields are filled out with bounds, nothing to do.
- if (firstEmptyField == bounds->fields.size()) {
- IndexBoundsBuilder::alignBounds(bounds, index.keyPattern);
- return;
- }
+ // If we're here, we now know that 'child' can use an index directly and the index is
+ // over the child's field.
- // Skip ahead to the firstEmptyField-th element, where we begin filling in bounds.
- BSONObjIterator it(index.keyPattern);
- for (size_t i = 0; i < firstEmptyField; ++i) {
- verify(it.more());
- it.next();
+ // If 'child' is a NOT, then the tag we're interested in is on the NOT's
+ // child node.
+ if (MatchExpression::NOT == child->matchType()) {
+ scanState.ixtag = static_cast<IndexTag*>(child->getChild(0)->getTag());
+ invariant(IndexTag::kNoIndex != scanState.ixtag->index);
}
- // For each field in the key...
- while (it.more()) {
- BSONElement kpElt = it.next();
- // There may be filled-in fields to the right of the firstEmptyField.
- // Example:
- // The index {loc:"2dsphere", x:1}
- // With a predicate over x and a near search over loc.
- if ("" == bounds->fields[firstEmptyField].name) {
- verify(bounds->fields[firstEmptyField].intervals.empty());
- // ...build the "all values" interval.
- IndexBoundsBuilder::allValuesForField(kpElt,
- &bounds->fields[firstEmptyField]);
+ // If the child we're looking at uses a different index than the current index scan, add
+ // the current index scan to the output as we're done with it. The index scan created
+ // by the child then becomes our new current index scan. Note that the current scan
+ // could be NULL, in which case we don't output it. The rest of the logic is identical.
+ //
+ // If the child uses the same index as the current index scan, we may be able to merge
+ // the bounds for the two scans.
+ //
+ // Guiding principle: must the values we're testing come from the same array in the
+ // document? If so, we can combine bounds (via intersection or compounding). If not,
+ // we can't.
+ //
+ // If the index is NOT multikey, it's always semantically correct to combine bounds,
+ // as there are no arrays to worry about.
+ //
+ // If the index is multikey, there are arrays of values. There are several
+ // complications in the multikey case that have to be obeyed both by the enumerator
+ // and here as we try to merge predicates into query solution leaves. The hairy
+ // details of these rules are documented near the top of planner_access.h.
+ if (shouldMergeWithLeaf(child, scanState)) {
+ // The child uses the same index we're currently building a scan for. Merge
+ // the bounds and filters.
+ verify(scanState.currentIndexNumber == scanState.ixtag->index);
+ scanState.tightness = IndexBoundsBuilder::INEXACT_FETCH;
+ mergeWithLeafNode(child, &scanState);
+ handleFilter(&scanState);
+ } else {
+ if (NULL != scanState.currentScan.get()) {
+ // Output the current scan before starting to construct a new out.
+ finishAndOutputLeaf(&scanState, out);
+ } else {
+ verify(IndexTag::kNoIndex == scanState.currentIndexNumber);
}
- ++firstEmptyField;
- }
- // Make sure that the length of the key is the length of the bounds we started.
- verify(firstEmptyField == bounds->fields.size());
+ // Reset state before producing a new leaf.
+ scanState.resetForNextScan(scanState.ixtag);
- // We create bounds assuming a forward direction but can easily reverse bounds to align
- // according to our desired direction.
- IndexBoundsBuilder::alignBounds(bounds, index.keyPattern);
- }
+ scanState.currentScan.reset(makeLeafNode(query,
+ indices[scanState.currentIndexNumber],
+ scanState.ixtag->pos,
+ child,
+ &scanState.tightness));
- // static
- void QueryPlannerAccess::findElemMatchChildren(const MatchExpression* node,
- vector<MatchExpression*>* out,
- vector<MatchExpression*>* subnodesOut) {
- for (size_t i = 0; i < node->numChildren(); ++i) {
- MatchExpression* child = node->getChild(i);
- if (Indexability::isBoundsGenerating(child) &&
- NULL != child->getTag()) {
- out->push_back(child);
- }
- else if (MatchExpression::AND == child->matchType() ||
- Indexability::arrayUsesIndexOnChildren(child)) {
- findElemMatchChildren(child, out, subnodesOut);
- }
- else if (NULL != child->getTag()) {
- subnodesOut->push_back(child);
- }
+ handleFilter(&scanState);
}
}
- // static
- bool QueryPlannerAccess::processIndexScans(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const std::vector<IndexEntry>& indices,
- const QueryPlannerParams& params,
- std::vector<QuerySolutionNode*>* out) {
- // Initialize the ScanBuildingState.
- ScanBuildingState scanState(root, inArrayOperator, indices);
-
- while (scanState.curChild < root->numChildren()) {
- MatchExpression* child = root->getChild(scanState.curChild);
-
- // If there is no tag, it's not using an index. We've sorted our children such that the
- // children with tags are first, so we stop now.
- if (NULL == child->getTag()) { break; }
-
- scanState.ixtag = static_cast<IndexTag*>(child->getTag());
- // If there's a tag it must be valid.
- verify(IndexTag::kNoIndex != scanState.ixtag->index);
-
- // If the child can't use an index on its own field (and the child is not a negation
- // of a bounds-generating expression), then it's indexed by virtue of one of
- // its children having an index.
- //
- // NOTE: If the child is logical, it could possibly collapse into a single ixscan. we
- // ignore this for now.
- if (!Indexability::isBoundsGenerating(child)) {
- // If we're here, then the child is indexed by virtue of its children.
- // In most cases this means that we recursively build indexed data
- // access on 'child'.
- if (!processIndexScansSubnode(query, &scanState, params, out)) {
- return false;
- }
- continue;
- }
-
- // If we're here, we now know that 'child' can use an index directly and the index is
- // over the child's field.
-
- // If 'child' is a NOT, then the tag we're interested in is on the NOT's
- // child node.
- if (MatchExpression::NOT == child->matchType()) {
- scanState.ixtag = static_cast<IndexTag*>(child->getChild(0)->getTag());
- invariant(IndexTag::kNoIndex != scanState.ixtag->index);
- }
-
- // If the child we're looking at uses a different index than the current index scan, add
- // the current index scan to the output as we're done with it. The index scan created
- // by the child then becomes our new current index scan. Note that the current scan
- // could be NULL, in which case we don't output it. The rest of the logic is identical.
- //
- // If the child uses the same index as the current index scan, we may be able to merge
- // the bounds for the two scans.
- //
- // Guiding principle: must the values we're testing come from the same array in the
- // document? If so, we can combine bounds (via intersection or compounding). If not,
- // we can't.
- //
- // If the index is NOT multikey, it's always semantically correct to combine bounds,
- // as there are no arrays to worry about.
- //
- // If the index is multikey, there are arrays of values. There are several
- // complications in the multikey case that have to be obeyed both by the enumerator
- // and here as we try to merge predicates into query solution leaves. The hairy
- // details of these rules are documented near the top of planner_access.h.
- if (shouldMergeWithLeaf(child, scanState)) {
- // The child uses the same index we're currently building a scan for. Merge
- // the bounds and filters.
- verify(scanState.currentIndexNumber == scanState.ixtag->index);
- scanState.tightness = IndexBoundsBuilder::INEXACT_FETCH;
- mergeWithLeafNode(child, &scanState);
- handleFilter(&scanState);
- }
- else {
- if (NULL != scanState.currentScan.get()) {
- // Output the current scan before starting to construct a new out.
- finishAndOutputLeaf(&scanState, out);
- }
- else {
- verify(IndexTag::kNoIndex == scanState.currentIndexNumber);
- }
-
- // Reset state before producing a new leaf.
- scanState.resetForNextScan(scanState.ixtag);
-
- scanState.currentScan.reset(makeLeafNode(query,
- indices[scanState.currentIndexNumber],
- scanState.ixtag->pos, child,
- &scanState.tightness));
+ // Output the scan we're done with, if it exists.
+ if (NULL != scanState.currentScan.get()) {
+ finishAndOutputLeaf(&scanState, out);
+ }
- handleFilter(&scanState);
+ return true;
+}
+
+// static
+bool QueryPlannerAccess::processIndexScansElemMatch(const CanonicalQuery& query,
+ ScanBuildingState* scanState,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolutionNode*>* out) {
+ MatchExpression* root = scanState->root;
+ MatchExpression* child = root->getChild(scanState->curChild);
+ const vector<IndexEntry>& indices = scanState->indices;
+
+ // We have an AND with an ELEM_MATCH_OBJECT child. The plan enumerator produces
+ // index taggings which indicate that we should try to compound with
+ // predicates retrieved from inside the subtree rooted at the ELEM_MATCH.
+ // In order to obey the enumerator's tagging, we need to retrieve these
+ // predicates from inside the $elemMatch, and try to merge them with
+ // the current index scan.
+
+ // Contains tagged predicates from inside the tree rooted at 'child'
+ // which are logically part of the AND.
+ vector<MatchExpression*> emChildren;
+
+ // Contains tagged nodes that are not logically part of the AND and
+ // cannot use the index directly (e.g. OR nodes which are tagged to
+ // be indexed).
+ vector<MatchExpression*> emSubnodes;
+
+ // Populate 'emChildren' and 'emSubnodes'.
+ findElemMatchChildren(child, &emChildren, &emSubnodes);
+
+ // Recursively build data access for the nodes inside 'emSubnodes'.
+ for (size_t i = 0; i < emSubnodes.size(); ++i) {
+ MatchExpression* subnode = emSubnodes[i];
+
+ if (!Indexability::isBoundsGenerating(subnode)) {
+ // Must pass true for 'inArrayOperator' because the subnode is
+ // beneath an ELEM_MATCH_OBJECT.
+ QuerySolutionNode* childSolution =
+ buildIndexedDataAccess(query, subnode, true, indices, params);
+
+ // buildIndexedDataAccess(...) returns NULL in error conditions, when
+ // it is unable to construct a query solution from a tagged match
+ // expression tree. If we are unable to construct a solution according
+ // to the instructions from the enumerator, then we bail out early
+ // (by returning false) rather than continuing on and potentially
+ // constructing an invalid solution tree.
+ if (NULL == childSolution) {
+ return false;
}
- }
- // Output the scan we're done with, if it exists.
- if (NULL != scanState.currentScan.get()) {
- finishAndOutputLeaf(&scanState, out);
+ // Output the resulting solution tree.
+ out->push_back(childSolution);
}
-
- return true;
}
- // static
- bool QueryPlannerAccess::processIndexScansElemMatch(const CanonicalQuery& query,
- ScanBuildingState* scanState,
- const QueryPlannerParams& params,
- std::vector<QuerySolutionNode*>* out) {
- MatchExpression* root = scanState->root;
- MatchExpression* child = root->getChild(scanState->curChild);
- const vector<IndexEntry>& indices = scanState->indices;
-
- // We have an AND with an ELEM_MATCH_OBJECT child. The plan enumerator produces
- // index taggings which indicate that we should try to compound with
- // predicates retrieved from inside the subtree rooted at the ELEM_MATCH.
- // In order to obey the enumerator's tagging, we need to retrieve these
- // predicates from inside the $elemMatch, and try to merge them with
- // the current index scan.
-
- // Contains tagged predicates from inside the tree rooted at 'child'
- // which are logically part of the AND.
- vector<MatchExpression*> emChildren;
-
- // Contains tagged nodes that are not logically part of the AND and
- // cannot use the index directly (e.g. OR nodes which are tagged to
- // be indexed).
- vector<MatchExpression*> emSubnodes;
-
- // Populate 'emChildren' and 'emSubnodes'.
- findElemMatchChildren(child, &emChildren, &emSubnodes);
-
- // Recursively build data access for the nodes inside 'emSubnodes'.
- for (size_t i = 0; i < emSubnodes.size(); ++i) {
- MatchExpression* subnode = emSubnodes[i];
-
- if (!Indexability::isBoundsGenerating(subnode)) {
- // Must pass true for 'inArrayOperator' because the subnode is
- // beneath an ELEM_MATCH_OBJECT.
- QuerySolutionNode* childSolution = buildIndexedDataAccess(query,
- subnode,
- true,
- indices,
- params);
-
- // buildIndexedDataAccess(...) returns NULL in error conditions, when
- // it is unable to construct a query solution from a tagged match
- // expression tree. If we are unable to construct a solution according
- // to the instructions from the enumerator, then we bail out early
- // (by returning false) rather than continuing on and potentially
- // constructing an invalid solution tree.
- if (NULL == childSolution) { return false; }
-
- // Output the resulting solution tree.
- out->push_back(childSolution);
- }
- }
-
- // For each predicate in 'emChildren', try to merge it with the current index scan.
- //
- // This loop is similar to that in processIndexScans(...), except it does not call into
- // handleFilters(...). Instead, we leave the entire $elemMatch filter intact. This way,
- // the complete $elemMatch expression will be affixed as a filter later on.
- for (size_t i = 0; i < emChildren.size(); ++i) {
- MatchExpression* emChild = emChildren[i];
- invariant(NULL != emChild->getTag());
- scanState->ixtag = static_cast<IndexTag*>(emChild->getTag());
-
- // If 'emChild' is a NOT, then the tag we're interested in is on the NOT's
- // child node.
- if (MatchExpression::NOT == emChild->matchType()) {
- invariant(NULL != emChild->getChild(0)->getTag());
- scanState->ixtag = static_cast<IndexTag*>(emChild->getChild(0)->getTag());
- invariant(IndexTag::kNoIndex != scanState->ixtag->index);
+ // For each predicate in 'emChildren', try to merge it with the current index scan.
+ //
+ // This loop is similar to that in processIndexScans(...), except it does not call into
+ // handleFilters(...). Instead, we leave the entire $elemMatch filter intact. This way,
+ // the complete $elemMatch expression will be affixed as a filter later on.
+ for (size_t i = 0; i < emChildren.size(); ++i) {
+ MatchExpression* emChild = emChildren[i];
+ invariant(NULL != emChild->getTag());
+ scanState->ixtag = static_cast<IndexTag*>(emChild->getTag());
+
+ // If 'emChild' is a NOT, then the tag we're interested in is on the NOT's
+ // child node.
+ if (MatchExpression::NOT == emChild->matchType()) {
+ invariant(NULL != emChild->getChild(0)->getTag());
+ scanState->ixtag = static_cast<IndexTag*>(emChild->getChild(0)->getTag());
+ invariant(IndexTag::kNoIndex != scanState->ixtag->index);
+ }
+
+ if (shouldMergeWithLeaf(emChild, *scanState)) {
+ // The child uses the same index we're currently building a scan for. Merge
+ // the bounds and filters.
+ verify(scanState->currentIndexNumber == scanState->ixtag->index);
+
+ scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH;
+ mergeWithLeafNode(emChild, scanState);
+ } else {
+ if (NULL != scanState->currentScan.get()) {
+ finishAndOutputLeaf(scanState, out);
+ } else {
+ verify(IndexTag::kNoIndex == scanState->currentIndexNumber);
}
- if (shouldMergeWithLeaf(emChild, *scanState)) {
- // The child uses the same index we're currently building a scan for. Merge
- // the bounds and filters.
- verify(scanState->currentIndexNumber == scanState->ixtag->index);
+ scanState->currentIndexNumber = scanState->ixtag->index;
- scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH;
- mergeWithLeafNode(emChild, scanState);
- }
- else {
- if (NULL != scanState->currentScan.get()) {
- finishAndOutputLeaf(scanState, out);
- }
- else {
- verify(IndexTag::kNoIndex == scanState->currentIndexNumber);
- }
-
- scanState->currentIndexNumber = scanState->ixtag->index;
-
- scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH;
- scanState->currentScan.reset(makeLeafNode(query, indices[scanState->currentIndexNumber],
- scanState->ixtag->pos,
- emChild, &scanState->tightness));
- }
+ scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH;
+ scanState->currentScan.reset(makeLeafNode(query,
+ indices[scanState->currentIndexNumber],
+ scanState->ixtag->pos,
+ emChild,
+ &scanState->tightness));
}
+ }
- // We're done processing the $elemMatch child. We leave it hanging off
- // it's AND parent so that it will be affixed as a filter later on,
- // and move on to the next child of the AND.
+ // We're done processing the $elemMatch child. We leave it hanging off
+ // it's AND parent so that it will be affixed as a filter later on,
+ // and move on to the next child of the AND.
+ ++scanState->curChild;
+ return true;
+}
+
+// static
+bool QueryPlannerAccess::processIndexScansSubnode(const CanonicalQuery& query,
+ ScanBuildingState* scanState,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolutionNode*>* out) {
+ MatchExpression* root = scanState->root;
+ MatchExpression* child = root->getChild(scanState->curChild);
+ const vector<IndexEntry>& indices = scanState->indices;
+ bool inArrayOperator = scanState->inArrayOperator;
+
+ if (MatchExpression::AND == root->matchType() &&
+ MatchExpression::ELEM_MATCH_OBJECT == child->matchType()) {
+ return processIndexScansElemMatch(query, scanState, params, out);
+ } else if (!inArrayOperator) {
+ // The logical sub-tree is responsible for fully evaluating itself. Any
+ // required filters or fetches are already hung on it. As such, we remove the
+ // filter branch from our tree. buildIndexedDataAccess takes ownership of the
+ // child.
+ root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
+ // The curChild of today is the curChild+1 of yesterday.
+ } else {
++scanState->curChild;
- return true;
}
- // static
- bool QueryPlannerAccess::processIndexScansSubnode(const CanonicalQuery& query,
- ScanBuildingState* scanState,
- const QueryPlannerParams& params,
- std::vector<QuerySolutionNode*>* out) {
- MatchExpression* root = scanState->root;
- MatchExpression* child = root->getChild(scanState->curChild);
- const vector<IndexEntry>& indices = scanState->indices;
- bool inArrayOperator = scanState->inArrayOperator;
-
- if (MatchExpression::AND == root->matchType() &&
- MatchExpression::ELEM_MATCH_OBJECT == child->matchType()) {
- return processIndexScansElemMatch(query, scanState, params, out);
- }
- else if (!inArrayOperator) {
- // The logical sub-tree is responsible for fully evaluating itself. Any
- // required filters or fetches are already hung on it. As such, we remove the
- // filter branch from our tree. buildIndexedDataAccess takes ownership of the
- // child.
- root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
- // The curChild of today is the curChild+1 of yesterday.
- }
- else {
- ++scanState->curChild;
- }
-
- // If inArrayOperator: takes ownership of child, which is OK, since we detached
- // child from root.
- QuerySolutionNode* childSolution = buildIndexedDataAccess(query,
- child,
- inArrayOperator,
- indices,
- params);
- if (NULL == childSolution) { return false; }
- out->push_back(childSolution);
- return true;
+ // If inArrayOperator: takes ownership of child, which is OK, since we detached
+ // child from root.
+ QuerySolutionNode* childSolution =
+ buildIndexedDataAccess(query, child, inArrayOperator, indices, params);
+ if (NULL == childSolution) {
+ return false;
+ }
+ out->push_back(childSolution);
+ return true;
+}
+
+// static
+QuerySolutionNode* QueryPlannerAccess::buildIndexedAnd(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const vector<IndexEntry>& indices,
+ const QueryPlannerParams& params) {
+ unique_ptr<MatchExpression> autoRoot;
+ if (!inArrayOperator) {
+ autoRoot.reset(root);
}
- // static
- QuerySolutionNode* QueryPlannerAccess::buildIndexedAnd(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const vector<IndexEntry>& indices,
- const QueryPlannerParams& params) {
- unique_ptr<MatchExpression> autoRoot;
- if (!inArrayOperator) {
- autoRoot.reset(root);
- }
-
- // If we are not allowed to trim for ixisect, then clone the match expression before
- // passing it to processIndexScans(), which may do the trimming. If we end up with
- // an index intersection solution, then we use our copy of the match expression to be
- // sure that the FETCH stage will recheck the entire predicate.
- //
- // XXX: This block is a hack to accommodate the storage layer concurrency model.
- std::unique_ptr<MatchExpression> clonedRoot;
- if (params.options & QueryPlannerParams::CANNOT_TRIM_IXISECT) {
- clonedRoot.reset(root->shallowClone());
- }
-
- vector<QuerySolutionNode*> ixscanNodes;
- if (!processIndexScans(query, root, inArrayOperator, indices, params, &ixscanNodes)) {
- return NULL;
- }
-
- //
- // Process all non-indexed predicates. We hang these above the AND with a fetch and
- // filter.
- //
-
- // This is the node we're about to return.
- QuerySolutionNode* andResult;
+ // If we are not allowed to trim for ixisect, then clone the match expression before
+ // passing it to processIndexScans(), which may do the trimming. If we end up with
+ // an index intersection solution, then we use our copy of the match expression to be
+ // sure that the FETCH stage will recheck the entire predicate.
+ //
+ // XXX: This block is a hack to accommodate the storage layer concurrency model.
+ std::unique_ptr<MatchExpression> clonedRoot;
+ if (params.options & QueryPlannerParams::CANNOT_TRIM_IXISECT) {
+ clonedRoot.reset(root->shallowClone());
+ }
- // We must use an index for at least one child of the AND. We shouldn't be here if this
- // isn't the case.
- verify(ixscanNodes.size() >= 1);
+ vector<QuerySolutionNode*> ixscanNodes;
+ if (!processIndexScans(query, root, inArrayOperator, indices, params, &ixscanNodes)) {
+ return NULL;
+ }
- // Short-circuit: an AND of one child is just the child.
- if (ixscanNodes.size() == 1) {
- andResult = ixscanNodes[0];
+ //
+ // Process all non-indexed predicates. We hang these above the AND with a fetch and
+ // filter.
+ //
+
+ // This is the node we're about to return.
+ QuerySolutionNode* andResult;
+
+ // We must use an index for at least one child of the AND. We shouldn't be here if this
+ // isn't the case.
+ verify(ixscanNodes.size() >= 1);
+
+ // Short-circuit: an AND of one child is just the child.
+ if (ixscanNodes.size() == 1) {
+ andResult = ixscanNodes[0];
+ } else {
+ // Figure out if we want AndHashNode or AndSortedNode.
+ bool allSortedByDiskLoc = true;
+ for (size_t i = 0; i < ixscanNodes.size(); ++i) {
+ if (!ixscanNodes[i]->sortedByDiskLoc()) {
+ allSortedByDiskLoc = false;
+ break;
+ }
}
- else {
- // Figure out if we want AndHashNode or AndSortedNode.
- bool allSortedByDiskLoc = true;
- for (size_t i = 0; i < ixscanNodes.size(); ++i) {
- if (!ixscanNodes[i]->sortedByDiskLoc()) {
- allSortedByDiskLoc = false;
+ if (allSortedByDiskLoc) {
+ AndSortedNode* asn = new AndSortedNode();
+ asn->children.swap(ixscanNodes);
+ andResult = asn;
+ } else if (internalQueryPlannerEnableHashIntersection) {
+ AndHashNode* ahn = new AndHashNode();
+ ahn->children.swap(ixscanNodes);
+ andResult = ahn;
+ // The AndHashNode provides the sort order of its last child. If any of the
+ // possible subnodes of AndHashNode provides the sort order we care about, we put
+ // that one last.
+ for (size_t i = 0; i < ahn->children.size(); ++i) {
+ ahn->children[i]->computeProperties();
+ const BSONObjSet& sorts = ahn->children[i]->getSort();
+ if (sorts.end() != sorts.find(query.getParsed().getSort())) {
+ std::swap(ahn->children[i], ahn->children.back());
break;
}
}
- if (allSortedByDiskLoc) {
- AndSortedNode* asn = new AndSortedNode();
- asn->children.swap(ixscanNodes);
- andResult = asn;
- }
- else if (internalQueryPlannerEnableHashIntersection) {
- AndHashNode* ahn = new AndHashNode();
- ahn->children.swap(ixscanNodes);
- andResult = ahn;
- // The AndHashNode provides the sort order of its last child. If any of the
- // possible subnodes of AndHashNode provides the sort order we care about, we put
- // that one last.
- for (size_t i = 0; i < ahn->children.size(); ++i) {
- ahn->children[i]->computeProperties();
- const BSONObjSet& sorts = ahn->children[i]->getSort();
- if (sorts.end() != sorts.find(query.getParsed().getSort())) {
- std::swap(ahn->children[i], ahn->children.back());
- break;
- }
- }
- }
- else {
- // We can't use sort-based intersection, and hash-based intersection is disabled.
- // Clean up the index scans and bail out by returning NULL.
- LOG(5) << "Can't build index intersection solution: "
- << "AND_SORTED is not possible and AND_HASH is disabled.";
-
- for (size_t i = 0; i < ixscanNodes.size(); i++) {
- delete ixscanNodes[i];
- }
- return NULL;
- }
- }
-
- // Don't bother doing any kind of fetch analysis lite if we're doing it anyway above us.
- if (inArrayOperator) {
- return andResult;
- }
-
- // XXX: This block is a hack to accommodate the storage layer concurrency model.
- if ((params.options & QueryPlannerParams::CANNOT_TRIM_IXISECT) &&
- (andResult->getType() == STAGE_AND_HASH || andResult->getType() == STAGE_AND_SORTED)) {
- // We got an index intersection solution, and we aren't allowed to answer predicates
- // using the index. We add a fetch with the entire filter.
- invariant(clonedRoot.get());
- FetchNode* fetch = new FetchNode();
- fetch->filter.reset(clonedRoot.release());
- // Takes ownership of 'andResult'.
- fetch->children.push_back(andResult);
- return fetch;
- }
-
- // If there are any nodes still attached to the AND, we can't answer them using the
- // index, so we put a fetch with filter.
- if (root->numChildren() > 0) {
- FetchNode* fetch = new FetchNode();
- verify(NULL != autoRoot.get());
- if (autoRoot->numChildren() == 1) {
- // An $and of one thing is that thing.
- MatchExpression* child = autoRoot->getChild(0);
- autoRoot->getChildVector()->clear();
- // Takes ownership.
- fetch->filter.reset(child);
- // 'autoRoot' will delete the empty $and.
+ } else {
+ // We can't use sort-based intersection, and hash-based intersection is disabled.
+ // Clean up the index scans and bail out by returning NULL.
+ LOG(5) << "Can't build index intersection solution: "
+ << "AND_SORTED is not possible and AND_HASH is disabled.";
+
+ for (size_t i = 0; i < ixscanNodes.size(); i++) {
+ delete ixscanNodes[i];
}
- else { // root->numChildren() > 1
- // Takes ownership.
- fetch->filter.reset(autoRoot.release());
- }
- // takes ownership
- fetch->children.push_back(andResult);
- andResult = fetch;
- }
- else {
- // root has no children, let autoRoot get rid of it when it goes out of scope.
+ return NULL;
}
+ }
+ // Don't bother doing any kind of fetch analysis lite if we're doing it anyway above us.
+ if (inArrayOperator) {
return andResult;
}
- // static
- QuerySolutionNode* QueryPlannerAccess::buildIndexedOr(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const vector<IndexEntry>& indices,
- const QueryPlannerParams& params) {
- unique_ptr<MatchExpression> autoRoot;
- if (!inArrayOperator) {
- autoRoot.reset(root);
- }
+ // XXX: This block is a hack to accommodate the storage layer concurrency model.
+ if ((params.options & QueryPlannerParams::CANNOT_TRIM_IXISECT) &&
+ (andResult->getType() == STAGE_AND_HASH || andResult->getType() == STAGE_AND_SORTED)) {
+ // We got an index intersection solution, and we aren't allowed to answer predicates
+ // using the index. We add a fetch with the entire filter.
+ invariant(clonedRoot.get());
+ FetchNode* fetch = new FetchNode();
+ fetch->filter.reset(clonedRoot.release());
+ // Takes ownership of 'andResult'.
+ fetch->children.push_back(andResult);
+ return fetch;
+ }
- vector<QuerySolutionNode*> ixscanNodes;
- if (!processIndexScans(query, root, inArrayOperator, indices, params, &ixscanNodes)) {
- return NULL;
- }
+ // If there are any nodes still attached to the AND, we can't answer them using the
+ // index, so we put a fetch with filter.
+ if (root->numChildren() > 0) {
+ FetchNode* fetch = new FetchNode();
+ verify(NULL != autoRoot.get());
+ if (autoRoot->numChildren() == 1) {
+ // An $and of one thing is that thing.
+ MatchExpression* child = autoRoot->getChild(0);
+ autoRoot->getChildVector()->clear();
+ // Takes ownership.
+ fetch->filter.reset(child);
+ // 'autoRoot' will delete the empty $and.
+ } else { // root->numChildren() > 1
+ // Takes ownership.
+ fetch->filter.reset(autoRoot.release());
+ }
+ // takes ownership
+ fetch->children.push_back(andResult);
+ andResult = fetch;
+ } else {
+ // root has no children, let autoRoot get rid of it when it goes out of scope.
+ }
- // Unlike an AND, an OR cannot have filters hanging off of it. We stop processing
- // when any of our children lack index tags. If a node lacks an index tag it cannot
- // be answered via an index.
- if (!inArrayOperator && 0 != root->numChildren()) {
- warning() << "planner OR error, non-indexed child of OR.";
- // We won't enumerate an OR without indices for each child, so this isn't an issue, even
- // if we have an AND with an OR child -- we won't get here unless the OR is fully
- // indexed.
- return NULL;
- }
+ return andResult;
+}
+
+// static
+QuerySolutionNode* QueryPlannerAccess::buildIndexedOr(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const vector<IndexEntry>& indices,
+ const QueryPlannerParams& params) {
+ unique_ptr<MatchExpression> autoRoot;
+ if (!inArrayOperator) {
+ autoRoot.reset(root);
+ }
- QuerySolutionNode* orResult = NULL;
+ vector<QuerySolutionNode*> ixscanNodes;
+ if (!processIndexScans(query, root, inArrayOperator, indices, params, &ixscanNodes)) {
+ return NULL;
+ }
- // An OR of one node is just that node.
- if (1 == ixscanNodes.size()) {
- orResult = ixscanNodes[0];
- }
- else {
- bool shouldMergeSort = false;
-
- if (!query.getParsed().getSort().isEmpty()) {
- const BSONObj& desiredSort = query.getParsed().getSort();
-
- // If there exists a sort order that is present in each child, we can merge them and
- // maintain that sort order / those sort orders.
- ixscanNodes[0]->computeProperties();
- BSONObjSet sharedSortOrders = ixscanNodes[0]->getSort();
-
- if (!sharedSortOrders.empty()) {
- for (size_t i = 1; i < ixscanNodes.size(); ++i) {
- ixscanNodes[i]->computeProperties();
- BSONObjSet isect;
- set_intersection(sharedSortOrders.begin(),
- sharedSortOrders.end(),
- ixscanNodes[i]->getSort().begin(),
- ixscanNodes[i]->getSort().end(),
- std::inserter(isect, isect.end()),
- BSONObjCmp());
- sharedSortOrders = isect;
- if (sharedSortOrders.empty()) {
- break;
- }
+ // Unlike an AND, an OR cannot have filters hanging off of it. We stop processing
+ // when any of our children lack index tags. If a node lacks an index tag it cannot
+ // be answered via an index.
+ if (!inArrayOperator && 0 != root->numChildren()) {
+ warning() << "planner OR error, non-indexed child of OR.";
+ // We won't enumerate an OR without indices for each child, so this isn't an issue, even
+ // if we have an AND with an OR child -- we won't get here unless the OR is fully
+ // indexed.
+ return NULL;
+ }
+
+ QuerySolutionNode* orResult = NULL;
+
+ // An OR of one node is just that node.
+ if (1 == ixscanNodes.size()) {
+ orResult = ixscanNodes[0];
+ } else {
+ bool shouldMergeSort = false;
+
+ if (!query.getParsed().getSort().isEmpty()) {
+ const BSONObj& desiredSort = query.getParsed().getSort();
+
+ // If there exists a sort order that is present in each child, we can merge them and
+ // maintain that sort order / those sort orders.
+ ixscanNodes[0]->computeProperties();
+ BSONObjSet sharedSortOrders = ixscanNodes[0]->getSort();
+
+ if (!sharedSortOrders.empty()) {
+ for (size_t i = 1; i < ixscanNodes.size(); ++i) {
+ ixscanNodes[i]->computeProperties();
+ BSONObjSet isect;
+ set_intersection(sharedSortOrders.begin(),
+ sharedSortOrders.end(),
+ ixscanNodes[i]->getSort().begin(),
+ ixscanNodes[i]->getSort().end(),
+ std::inserter(isect, isect.end()),
+ BSONObjCmp());
+ sharedSortOrders = isect;
+ if (sharedSortOrders.empty()) {
+ break;
}
}
-
- // TODO: If we're looking for the reverse of one of these sort orders we could
- // possibly reverse the ixscan nodes.
- shouldMergeSort = (sharedSortOrders.end() != sharedSortOrders.find(desiredSort));
}
- if (shouldMergeSort) {
- MergeSortNode* msn = new MergeSortNode();
- msn->sort = query.getParsed().getSort();
- msn->children.swap(ixscanNodes);
- orResult = msn;
- }
- else {
- OrNode* orn = new OrNode();
- orn->children.swap(ixscanNodes);
- orResult = orn;
- }
+ // TODO: If we're looking for the reverse of one of these sort orders we could
+ // possibly reverse the ixscan nodes.
+ shouldMergeSort = (sharedSortOrders.end() != sharedSortOrders.find(desiredSort));
}
- // Evaluate text nodes first to ensure that text scores are available.
- // Move text nodes to front of vector.
- std::stable_partition(orResult->children.begin(), orResult->children.end(), isTextNode);
-
- // OR must have an index for each child, so we should have detached all children from
- // 'root', and there's nothing useful to do with an empty or MatchExpression. We let it die
- // via autoRoot.
-
- return orResult;
+ if (shouldMergeSort) {
+ MergeSortNode* msn = new MergeSortNode();
+ msn->sort = query.getParsed().getSort();
+ msn->children.swap(ixscanNodes);
+ orResult = msn;
+ } else {
+ OrNode* orn = new OrNode();
+ orn->children.swap(ixscanNodes);
+ orResult = orn;
+ }
}
- // static
- QuerySolutionNode* QueryPlannerAccess::buildIndexedDataAccess(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const vector<IndexEntry>& indices,
- const QueryPlannerParams& params) {
- if (root->isLogical() && !Indexability::isBoundsGeneratingNot(root)) {
- if (MatchExpression::AND == root->matchType()) {
- // Takes ownership of root.
- return buildIndexedAnd(query, root, inArrayOperator, indices, params);
- }
- else if (MatchExpression::OR == root->matchType()) {
- // Takes ownership of root.
- return buildIndexedOr(query, root, inArrayOperator, indices, params);
- }
- else {
- // Can't do anything with negated logical nodes index-wise.
- if (!inArrayOperator) {
- delete root;
- }
- return NULL;
- }
- }
- else {
- unique_ptr<MatchExpression> autoRoot;
- if (!inArrayOperator) {
- autoRoot.reset(root);
- }
+ // Evaluate text nodes first to ensure that text scores are available.
+ // Move text nodes to front of vector.
+ std::stable_partition(orResult->children.begin(), orResult->children.end(), isTextNode);
- // isArray or isLeaf is true. Either way, it's over one field, and the bounds builder
- // deals with it.
- if (NULL == root->getTag()) {
- // No index to use here, not in the context of logical operator, so we're SOL.
- return NULL;
- }
- else if (Indexability::isBoundsGenerating(root)) {
- // Make an index scan over the tagged index #.
- IndexTag* tag = static_cast<IndexTag*>(root->getTag());
-
- IndexBoundsBuilder::BoundsTightness tightness = IndexBoundsBuilder::EXACT;
- QuerySolutionNode* soln = makeLeafNode(query, indices[tag->index], tag->pos,
- root, &tightness);
- verify(NULL != soln);
- finishLeafNode(soln, indices[tag->index]);
-
- if (inArrayOperator) {
- return soln;
- }
+ // OR must have an index for each child, so we should have detached all children from
+ // 'root', and there's nothing useful to do with an empty or MatchExpression. We let it die
+ // via autoRoot.
- // If the bounds are exact, the set of documents that satisfy the predicate is
- // exactly equal to the set of documents that the scan provides.
- //
- // If the bounds are not exact, the set of documents returned from the scan is a
- // superset of documents that satisfy the predicate, and we must check the
- // predicate.
+ return orResult;
+}
- if (tightness == IndexBoundsBuilder::EXACT) {
- return soln;
- }
- else if (tightness == IndexBoundsBuilder::INEXACT_COVERED
- && !indices[tag->index].multikey) {
- verify(NULL == soln->filter.get());
- soln->filter.reset(autoRoot.release());
- return soln;
- }
- else {
- FetchNode* fetch = new FetchNode();
- verify(NULL != autoRoot.get());
- fetch->filter.reset(autoRoot.release());
- fetch->children.push_back(soln);
- return fetch;
- }
+// static
+QuerySolutionNode* QueryPlannerAccess::buildIndexedDataAccess(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const vector<IndexEntry>& indices,
+ const QueryPlannerParams& params) {
+ if (root->isLogical() && !Indexability::isBoundsGeneratingNot(root)) {
+ if (MatchExpression::AND == root->matchType()) {
+ // Takes ownership of root.
+ return buildIndexedAnd(query, root, inArrayOperator, indices, params);
+ } else if (MatchExpression::OR == root->matchType()) {
+ // Takes ownership of root.
+ return buildIndexedOr(query, root, inArrayOperator, indices, params);
+ } else {
+ // Can't do anything with negated logical nodes index-wise.
+ if (!inArrayOperator) {
+ delete root;
}
- else if (Indexability::arrayUsesIndexOnChildren(root)) {
- QuerySolutionNode* solution = NULL;
-
- invariant(MatchExpression::ELEM_MATCH_OBJECT);
- // The child is an AND.
- invariant(1 == root->numChildren());
- solution = buildIndexedDataAccess(query, root->getChild(0), true, indices, params);
- if (NULL == solution) {
- return NULL;
- }
+ return NULL;
+ }
+ } else {
+ unique_ptr<MatchExpression> autoRoot;
+ if (!inArrayOperator) {
+ autoRoot.reset(root);
+ }
- // There may be an array operator above us.
- if (inArrayOperator) { return solution; }
+ // isArray or isLeaf is true. Either way, it's over one field, and the bounds builder
+ // deals with it.
+ if (NULL == root->getTag()) {
+ // No index to use here, not in the context of logical operator, so we're SOL.
+ return NULL;
+ } else if (Indexability::isBoundsGenerating(root)) {
+ // Make an index scan over the tagged index #.
+ IndexTag* tag = static_cast<IndexTag*>(root->getTag());
+
+ IndexBoundsBuilder::BoundsTightness tightness = IndexBoundsBuilder::EXACT;
+ QuerySolutionNode* soln =
+ makeLeafNode(query, indices[tag->index], tag->pos, root, &tightness);
+ verify(NULL != soln);
+ finishLeafNode(soln, indices[tag->index]);
+
+ if (inArrayOperator) {
+ return soln;
+ }
+ // If the bounds are exact, the set of documents that satisfy the predicate is
+ // exactly equal to the set of documents that the scan provides.
+ //
+ // If the bounds are not exact, the set of documents returned from the scan is a
+ // superset of documents that satisfy the predicate, and we must check the
+ // predicate.
+
+ if (tightness == IndexBoundsBuilder::EXACT) {
+ return soln;
+ } else if (tightness == IndexBoundsBuilder::INEXACT_COVERED &&
+ !indices[tag->index].multikey) {
+ verify(NULL == soln->filter.get());
+ soln->filter.reset(autoRoot.release());
+ return soln;
+ } else {
FetchNode* fetch = new FetchNode();
- // Takes ownership of 'root'.
verify(NULL != autoRoot.get());
fetch->filter.reset(autoRoot.release());
- fetch->children.push_back(solution);
+ fetch->children.push_back(soln);
return fetch;
}
- }
+ } else if (Indexability::arrayUsesIndexOnChildren(root)) {
+ QuerySolutionNode* solution = NULL;
+
+ invariant(MatchExpression::ELEM_MATCH_OBJECT);
+ // The child is an AND.
+ invariant(1 == root->numChildren());
+ solution = buildIndexedDataAccess(query, root->getChild(0), true, indices, params);
+ if (NULL == solution) {
+ return NULL;
+ }
- if (!inArrayOperator) {
- delete root;
- }
+ // There may be an array operator above us.
+ if (inArrayOperator) {
+ return solution;
+ }
- return NULL;
+ FetchNode* fetch = new FetchNode();
+ // Takes ownership of 'root'.
+ verify(NULL != autoRoot.get());
+ fetch->filter.reset(autoRoot.release());
+ fetch->children.push_back(solution);
+ return fetch;
+ }
}
- QuerySolutionNode* QueryPlannerAccess::scanWholeIndex(const IndexEntry& index,
- const CanonicalQuery& query,
- const QueryPlannerParams& params,
- int direction) {
- QuerySolutionNode* solnRoot = NULL;
-
- // Build an ixscan over the id index, use it, and return it.
- IndexScanNode* isn = new IndexScanNode();
- isn->indexKeyPattern = index.keyPattern;
- isn->indexIsMultiKey = index.multikey;
- isn->maxScan = query.getParsed().getMaxScan();
- isn->addKeyMetadata = query.getParsed().returnKey();
+ if (!inArrayOperator) {
+ delete root;
+ }
- IndexBoundsBuilder::allValuesBounds(index.keyPattern, &isn->bounds);
+ return NULL;
+}
- if (-1 == direction) {
- QueryPlannerCommon::reverseScans(isn);
- isn->direction = -1;
- }
+QuerySolutionNode* QueryPlannerAccess::scanWholeIndex(const IndexEntry& index,
+ const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ int direction) {
+ QuerySolutionNode* solnRoot = NULL;
- MatchExpression* filter = query.root()->shallowClone();
+ // Build an ixscan over the id index, use it, and return it.
+ IndexScanNode* isn = new IndexScanNode();
+ isn->indexKeyPattern = index.keyPattern;
+ isn->indexIsMultiKey = index.multikey;
+ isn->maxScan = query.getParsed().getMaxScan();
+ isn->addKeyMetadata = query.getParsed().returnKey();
- // If it's find({}) remove the no-op root.
- if (MatchExpression::AND == filter->matchType() && (0 == filter->numChildren())) {
- delete filter;
- solnRoot = isn;
- }
- else {
- // TODO: We may not need to do the fetch if the predicates in root are covered. But
- // for now it's safe (though *maybe* slower).
- FetchNode* fetch = new FetchNode();
- fetch->filter.reset(filter);
- fetch->children.push_back(isn);
- solnRoot = fetch;
- }
+ IndexBoundsBuilder::allValuesBounds(index.keyPattern, &isn->bounds);
- return solnRoot;
+ if (-1 == direction) {
+ QueryPlannerCommon::reverseScans(isn);
+ isn->direction = -1;
}
- // static
- void QueryPlannerAccess::addFilterToSolutionNode(QuerySolutionNode* node,
- MatchExpression* match,
- MatchExpression::MatchType type) {
- if (NULL == node->filter) {
- node->filter.reset(match);
- }
- else if (type == node->filter->matchType()) {
- // The 'node' already has either an AND or OR filter that matches 'type'. Add 'match' as
- // another branch of the filter.
- ListOfMatchExpression* listFilter =
- static_cast<ListOfMatchExpression*>(node->filter.get());
- listFilter->add(match);
- }
- else {
- // The 'node' already has a filter that does not match 'type'. If 'type' is AND, then
- // combine 'match' with the existing filter by adding an AND. If 'type' is OR, combine
- // by adding an OR node.
- ListOfMatchExpression* listFilter;
- if (MatchExpression::AND == type) {
- listFilter = new AndMatchExpression();
- }
- else {
- verify(MatchExpression::OR == type);
- listFilter = new OrMatchExpression();
- }
- MatchExpression* oldFilter = node->filter->shallowClone();
- listFilter->add(oldFilter);
- listFilter->add(match);
- node->filter.reset(listFilter);
- }
+ MatchExpression* filter = query.root()->shallowClone();
+
+ // If it's find({}) remove the no-op root.
+ if (MatchExpression::AND == filter->matchType() && (0 == filter->numChildren())) {
+ delete filter;
+ solnRoot = isn;
+ } else {
+ // TODO: We may not need to do the fetch if the predicates in root are covered. But
+ // for now it's safe (though *maybe* slower).
+ FetchNode* fetch = new FetchNode();
+ fetch->filter.reset(filter);
+ fetch->children.push_back(isn);
+ solnRoot = fetch;
}
- // static
- void QueryPlannerAccess::handleFilter(ScanBuildingState* scanState) {
- if (MatchExpression::OR == scanState->root->matchType()) {
- handleFilterOr(scanState);
- }
- else if (MatchExpression::AND == scanState->root->matchType()) {
- handleFilterAnd(scanState);
- }
- else {
- // We must be building leaves for either and AND or an OR.
- invariant(0);
- }
+ return solnRoot;
+}
+
+// static
+void QueryPlannerAccess::addFilterToSolutionNode(QuerySolutionNode* node,
+ MatchExpression* match,
+ MatchExpression::MatchType type) {
+ if (NULL == node->filter) {
+ node->filter.reset(match);
+ } else if (type == node->filter->matchType()) {
+ // The 'node' already has either an AND or OR filter that matches 'type'. Add 'match' as
+ // another branch of the filter.
+ ListOfMatchExpression* listFilter = static_cast<ListOfMatchExpression*>(node->filter.get());
+ listFilter->add(match);
+ } else {
+ // The 'node' already has a filter that does not match 'type'. If 'type' is AND, then
+ // combine 'match' with the existing filter by adding an AND. If 'type' is OR, combine
+ // by adding an OR node.
+ ListOfMatchExpression* listFilter;
+ if (MatchExpression::AND == type) {
+ listFilter = new AndMatchExpression();
+ } else {
+ verify(MatchExpression::OR == type);
+ listFilter = new OrMatchExpression();
+ }
+ MatchExpression* oldFilter = node->filter->shallowClone();
+ listFilter->add(oldFilter);
+ listFilter->add(match);
+ node->filter.reset(listFilter);
+ }
+}
+
+// static
+void QueryPlannerAccess::handleFilter(ScanBuildingState* scanState) {
+ if (MatchExpression::OR == scanState->root->matchType()) {
+ handleFilterOr(scanState);
+ } else if (MatchExpression::AND == scanState->root->matchType()) {
+ handleFilterAnd(scanState);
+ } else {
+ // We must be building leaves for either and AND or an OR.
+ invariant(0);
}
+}
- // static
- void QueryPlannerAccess::handleFilterOr(ScanBuildingState* scanState) {
- MatchExpression* root = scanState->root;
- MatchExpression* child = root->getChild(scanState->curChild);
+// static
+void QueryPlannerAccess::handleFilterOr(ScanBuildingState* scanState) {
+ MatchExpression* root = scanState->root;
+ MatchExpression* child = root->getChild(scanState->curChild);
- if (scanState->inArrayOperator) {
- // We're inside an array operator. The entire array operator expression
- // should always be affixed as a filter. We keep 'curChild' in the $and
- // for affixing later.
- ++scanState->curChild;
+ if (scanState->inArrayOperator) {
+ // We're inside an array operator. The entire array operator expression
+ // should always be affixed as a filter. We keep 'curChild' in the $and
+ // for affixing later.
+ ++scanState->curChild;
+ } else {
+ if (scanState->tightness < scanState->loosestBounds) {
+ scanState->loosestBounds = scanState->tightness;
}
- else {
- if (scanState->tightness < scanState->loosestBounds) {
- scanState->loosestBounds = scanState->tightness;
- }
- // Detach 'child' and add it to 'curOr'.
- root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
- scanState->curOr->getChildVector()->push_back(child);
- }
+ // Detach 'child' and add it to 'curOr'.
+ root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
+ scanState->curOr->getChildVector()->push_back(child);
}
-
- // static
- void QueryPlannerAccess::handleFilterAnd(ScanBuildingState* scanState) {
- MatchExpression* root = scanState->root;
- MatchExpression* child = root->getChild(scanState->curChild);
- const IndexEntry& index = scanState->indices[scanState->currentIndexNumber];
-
- if (scanState->inArrayOperator) {
- // We're inside an array operator. The entire array operator expression
- // should always be affixed as a filter. We keep 'curChild' in the $and
- // for affixing later.
- ++scanState->curChild;
- }
- else if (scanState->tightness == IndexBoundsBuilder::EXACT) {
- root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
- delete child;
- }
- else if (scanState->tightness == IndexBoundsBuilder::INEXACT_COVERED
- && (INDEX_TEXT == index.type || !index.multikey)) {
- // The bounds are not exact, but the information needed to
- // evaluate the predicate is in the index key. Remove the
- // MatchExpression from its parent and attach it to the filter
- // of the index scan we're building.
- //
- // We can only use this optimization if the index is NOT multikey.
- // Suppose that we had the multikey index {x: 1} and a document
- // {x: ["a", "b"]}. Now if we query for {x: /b/} the filter might
- // ever only be applied to the index key "a". We'd incorrectly
- // conclude that the document does not match the query :( so we
- // gotta stick to non-multikey indices.
- root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
-
- addFilterToSolutionNode(scanState->currentScan.get(), child, root->matchType());
- }
- else {
- // We keep curChild in the AND for affixing later.
- ++scanState->curChild;
- }
+}
+
+// static
+void QueryPlannerAccess::handleFilterAnd(ScanBuildingState* scanState) {
+ MatchExpression* root = scanState->root;
+ MatchExpression* child = root->getChild(scanState->curChild);
+ const IndexEntry& index = scanState->indices[scanState->currentIndexNumber];
+
+ if (scanState->inArrayOperator) {
+ // We're inside an array operator. The entire array operator expression
+ // should always be affixed as a filter. We keep 'curChild' in the $and
+ // for affixing later.
+ ++scanState->curChild;
+ } else if (scanState->tightness == IndexBoundsBuilder::EXACT) {
+ root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
+ delete child;
+ } else if (scanState->tightness == IndexBoundsBuilder::INEXACT_COVERED &&
+ (INDEX_TEXT == index.type || !index.multikey)) {
+ // The bounds are not exact, but the information needed to
+ // evaluate the predicate is in the index key. Remove the
+ // MatchExpression from its parent and attach it to the filter
+ // of the index scan we're building.
+ //
+ // We can only use this optimization if the index is NOT multikey.
+ // Suppose that we had the multikey index {x: 1} and a document
+ // {x: ["a", "b"]}. Now if we query for {x: /b/} the filter might
+ // ever only be applied to the index key "a". We'd incorrectly
+ // conclude that the document does not match the query :( so we
+ // gotta stick to non-multikey indices.
+ root->getChildVector()->erase(root->getChildVector()->begin() + scanState->curChild);
+
+ addFilterToSolutionNode(scanState->currentScan.get(), child, root->matchType());
+ } else {
+ // We keep curChild in the AND for affixing later.
+ ++scanState->curChild;
}
-
- QuerySolutionNode* QueryPlannerAccess::makeIndexScan(const IndexEntry& index,
- const CanonicalQuery& query,
- const QueryPlannerParams& params,
- const BSONObj& startKey,
- const BSONObj& endKey) {
- QuerySolutionNode* solnRoot = NULL;
-
- // Build an ixscan over the id index, use it, and return it.
- IndexScanNode* isn = new IndexScanNode();
- isn->indexKeyPattern = index.keyPattern;
- isn->indexIsMultiKey = index.multikey;
- isn->direction = 1;
- isn->maxScan = query.getParsed().getMaxScan();
- isn->addKeyMetadata = query.getParsed().returnKey();
- isn->bounds.isSimpleRange = true;
- isn->bounds.startKey = startKey;
- isn->bounds.endKey = endKey;
- isn->bounds.endKeyInclusive = false;
-
- MatchExpression* filter = query.root()->shallowClone();
-
- // If it's find({}) remove the no-op root.
- if (MatchExpression::AND == filter->matchType() && (0 == filter->numChildren())) {
- delete filter;
- solnRoot = isn;
- }
- else {
- // TODO: We may not need to do the fetch if the predicates in root are covered. But
- // for now it's safe (though *maybe* slower).
- FetchNode* fetch = new FetchNode();
- fetch->filter.reset(filter);
- fetch->children.push_back(isn);
- solnRoot = fetch;
- }
-
- return solnRoot;
+}
+
+QuerySolutionNode* QueryPlannerAccess::makeIndexScan(const IndexEntry& index,
+ const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ const BSONObj& startKey,
+ const BSONObj& endKey) {
+ QuerySolutionNode* solnRoot = NULL;
+
+ // Build an ixscan over the id index, use it, and return it.
+ IndexScanNode* isn = new IndexScanNode();
+ isn->indexKeyPattern = index.keyPattern;
+ isn->indexIsMultiKey = index.multikey;
+ isn->direction = 1;
+ isn->maxScan = query.getParsed().getMaxScan();
+ isn->addKeyMetadata = query.getParsed().returnKey();
+ isn->bounds.isSimpleRange = true;
+ isn->bounds.startKey = startKey;
+ isn->bounds.endKey = endKey;
+ isn->bounds.endKeyInclusive = false;
+
+ MatchExpression* filter = query.root()->shallowClone();
+
+ // If it's find({}) remove the no-op root.
+ if (MatchExpression::AND == filter->matchType() && (0 == filter->numChildren())) {
+ delete filter;
+ solnRoot = isn;
+ } else {
+ // TODO: We may not need to do the fetch if the predicates in root are covered. But
+ // for now it's safe (though *maybe* slower).
+ FetchNode* fetch = new FetchNode();
+ fetch->filter.reset(filter);
+ fetch->children.push_back(isn);
+ solnRoot = fetch;
}
+ return solnRoot;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/planner_access.h b/src/mongo/db/query/planner_access.h
index 433e2259bb6..55a05ff5161 100644
--- a/src/mongo/db/query/planner_access.h
+++ b/src/mongo/db/query/planner_access.h
@@ -35,376 +35,375 @@
namespace mongo {
- /**
- * MULTIKEY INDEX BOUNDS RULES
- *
- * 1. In general for a multikey index, we cannot intersect bounds
- * even if the index is not compound.
- * Example:
- * Let's say we have the document {a: [5, 7]}.
- * This document satisfies the query {$and: [ {a: 5}, {a: 7} ] }
- * For the index {a:1} we have the keys {"": 5} and {"": 7}.
- * Each child of the AND is tagged with the index {a: 1}
- * The interval for the {a: 5} branch is [5, 5]. It is exact.
- * The interval for the {a: 7} branch is [7, 7]. It is exact.
- * The intersection of the intervals is {}.
- * If we scan over {}, the intersection of the intervals, we will retrieve nothing.
- *
- * 2. In general for a multikey compound index, we *can* compound the bounds.
- * For example, if we have multikey index {a: 1, b: 1} and query {a: 2, b: 3},
- * we can use the bounds {a: [[2, 2]], b: [[3, 3]]}.
- *
- * 3. Despite rule #2, if fields in the compound index share a prefix, then it
- * is not safe to compound the bounds. We can only specify bounds for the first
- * field.
- * Example:
- * Let's say we have the document {a: [ {b: 3}, {c: 4} ] }
- * This document satisfies the query {'a.b': 3, 'a.c': 4}.
- * For the index {'a.b': 1, 'a.c': 1} we have the keys {"": 3, "": null} and
- * {"": null, "": 4}.
- * Let's use the aforementioned index to answer the query.
- * The bounds for 'a.b' are [3,3], and the bounds for 'a.c' are [4,4].
- * If we combine the bounds, we would only look at keys {"": 3, "":4 }.
- * Therefore we wouldn't look at the document's keys in the index.
- * Therefore we don't combine bounds.
- *
- * 4. There is an exception to rule #1, and that is when we're evaluating
- * an $elemMatch.
- * Example:
- * Let's say that we have the same document from (1), {a: [5, 7]}.
- * This document satisfies {a: {$lte: 5, $gte: 7}}, but it does not
- * satisfy {a: {$elemMatch: {$lte: 5, $gte: 7}}}. The $elemMatch indicates
- * that we are allowed to intersect the bounds, which means that we will
- * scan over the empty interval {} and retrieve nothing. This is the
- * expected result because there is no entry in the array "a" that
- * simultaneously satisfies the predicates a<=5 and a>=7.
- *
- * 5. There is also an exception to rule #3, and that is when we're evaluating
- * an $elemMatch. The bounds can be compounded for predicates that share a prefix
- * so long as the shared prefix is the path for which there is an $elemMatch.
- * Example:
- * Suppose we have the same document from (3), {a: [{b: 3}, {c: 4}]}. As discussed
- * above, we cannot compound the index bounds for query {'a.b': 1, 'a.c': 1}.
- * However, for the query {a: {$elemMatch: {b: 1, c: 1}} we can compound the
- * bounds because the $elemMatch is applied to the shared prefix "a".
- */
+/**
+ * MULTIKEY INDEX BOUNDS RULES
+ *
+ * 1. In general for a multikey index, we cannot intersect bounds
+ * even if the index is not compound.
+ * Example:
+ * Let's say we have the document {a: [5, 7]}.
+ * This document satisfies the query {$and: [ {a: 5}, {a: 7} ] }
+ * For the index {a:1} we have the keys {"": 5} and {"": 7}.
+ * Each child of the AND is tagged with the index {a: 1}
+ * The interval for the {a: 5} branch is [5, 5]. It is exact.
+ * The interval for the {a: 7} branch is [7, 7]. It is exact.
+ * The intersection of the intervals is {}.
+ * If we scan over {}, the intersection of the intervals, we will retrieve nothing.
+ *
+ * 2. In general for a multikey compound index, we *can* compound the bounds.
+ * For example, if we have multikey index {a: 1, b: 1} and query {a: 2, b: 3},
+ * we can use the bounds {a: [[2, 2]], b: [[3, 3]]}.
+ *
+ * 3. Despite rule #2, if fields in the compound index share a prefix, then it
+ * is not safe to compound the bounds. We can only specify bounds for the first
+ * field.
+ * Example:
+ * Let's say we have the document {a: [ {b: 3}, {c: 4} ] }
+ * This document satisfies the query {'a.b': 3, 'a.c': 4}.
+ * For the index {'a.b': 1, 'a.c': 1} we have the keys {"": 3, "": null} and
+ * {"": null, "": 4}.
+ * Let's use the aforementioned index to answer the query.
+ * The bounds for 'a.b' are [3,3], and the bounds for 'a.c' are [4,4].
+ * If we combine the bounds, we would only look at keys {"": 3, "":4 }.
+ * Therefore we wouldn't look at the document's keys in the index.
+ * Therefore we don't combine bounds.
+ *
+ * 4. There is an exception to rule #1, and that is when we're evaluating
+ * an $elemMatch.
+ * Example:
+ * Let's say that we have the same document from (1), {a: [5, 7]}.
+ * This document satisfies {a: {$lte: 5, $gte: 7}}, but it does not
+ * satisfy {a: {$elemMatch: {$lte: 5, $gte: 7}}}. The $elemMatch indicates
+ * that we are allowed to intersect the bounds, which means that we will
+ * scan over the empty interval {} and retrieve nothing. This is the
+ * expected result because there is no entry in the array "a" that
+ * simultaneously satisfies the predicates a<=5 and a>=7.
+ *
+ * 5. There is also an exception to rule #3, and that is when we're evaluating
+ * an $elemMatch. The bounds can be compounded for predicates that share a prefix
+ * so long as the shared prefix is the path for which there is an $elemMatch.
+ * Example:
+ * Suppose we have the same document from (3), {a: [{b: 3}, {c: 4}]}. As discussed
+ * above, we cannot compound the index bounds for query {'a.b': 1, 'a.c': 1}.
+ * However, for the query {a: {$elemMatch: {b: 1, c: 1}} we can compound the
+ * bounds because the $elemMatch is applied to the shared prefix "a".
+ */
+/**
+ * Methods for creating a QuerySolutionNode tree that accesses the data required by the query.
+ */
+class QueryPlannerAccess {
+public:
/**
- * Methods for creating a QuerySolutionNode tree that accesses the data required by the query.
+ * Building the leaves (i.e. the index scans) is done by looping through
+ * predicates one at a time. During the process, there is a fair amount of state
+ * information to keep track of, which we consolidate into this data structure.
*/
- class QueryPlannerAccess {
- public:
+ struct ScanBuildingState {
+ ScanBuildingState(MatchExpression* theRoot,
+ bool inArrayOp,
+ const std::vector<IndexEntry>& indexList)
+ : root(theRoot),
+ inArrayOperator(inArrayOp),
+ indices(indexList),
+ currentScan(nullptr),
+ curChild(0),
+ currentIndexNumber(IndexTag::kNoIndex),
+ ixtag(NULL),
+ tightness(IndexBoundsBuilder::INEXACT_FETCH),
+ curOr(nullptr),
+ loosestBounds(IndexBoundsBuilder::EXACT) {}
+
/**
- * Building the leaves (i.e. the index scans) is done by looping through
- * predicates one at a time. During the process, there is a fair amount of state
- * information to keep track of, which we consolidate into this data structure.
+ * Reset the scan building state in preparation for building a new scan.
+ *
+ * This always should be called prior to allocating a new 'currentScan'.
*/
- struct ScanBuildingState {
-
- ScanBuildingState(MatchExpression* theRoot,
- bool inArrayOp,
- const std::vector<IndexEntry>& indexList)
- : root(theRoot),
- inArrayOperator(inArrayOp),
- indices(indexList),
- currentScan(nullptr),
- curChild(0),
- currentIndexNumber(IndexTag::kNoIndex),
- ixtag(NULL),
- tightness(IndexBoundsBuilder::INEXACT_FETCH),
- curOr(nullptr),
- loosestBounds(IndexBoundsBuilder::EXACT) {
+ void resetForNextScan(IndexTag* newTag) {
+ currentScan.reset(NULL);
+ currentIndexNumber = newTag->index;
+ tightness = IndexBoundsBuilder::INEXACT_FETCH;
+ loosestBounds = IndexBoundsBuilder::EXACT;
+
+ if (MatchExpression::OR == root->matchType()) {
+ curOr.reset(new OrMatchExpression());
}
+ }
- /**
- * Reset the scan building state in preparation for building a new scan.
- *
- * This always should be called prior to allocating a new 'currentScan'.
- */
- void resetForNextScan(IndexTag* newTag) {
- currentScan.reset(NULL);
- currentIndexNumber = newTag->index;
- tightness = IndexBoundsBuilder::INEXACT_FETCH;
- loosestBounds = IndexBoundsBuilder::EXACT;
-
- if (MatchExpression::OR == root->matchType()) {
- curOr.reset(new OrMatchExpression());
- }
- }
+ // The root of the MatchExpression tree for which we are currently building index
+ // scans. Should be either an AND node or an OR node.
+ MatchExpression* root;
- // The root of the MatchExpression tree for which we are currently building index
- // scans. Should be either an AND node or an OR node.
- MatchExpression* root;
-
- // Are we inside an array operator such as $elemMatch or $all?
- bool inArrayOperator;
-
- // A list of relevant indices which 'root' may be tagged to use.
- const std::vector<IndexEntry>& indices;
-
- // The index access node that we are currently constructing. We may merge
- // multiple tagged predicates into a single index scan.
- std::unique_ptr<QuerySolutionNode> currentScan;
-
- // An index into the child vector of 'root'. Indicates the child MatchExpression
- // for which we are currently either constructing a new scan or which we are about
- // to merge with 'currentScan'.
- size_t curChild;
-
- // An index into the 'indices', so that 'indices[currentIndexNumber]' gives the
- // index used by 'currentScan'. If there is no currentScan, this should be set
- // to 'IndexTag::kNoIndex'.
- size_t currentIndexNumber;
-
- // The tag on 'curChild'.
- IndexTag* ixtag;
-
- // Whether the bounds for predicate 'curChild' are exact, inexact and covered by
- // the index, or inexact with a fetch required.
- IndexBoundsBuilder::BoundsTightness tightness;
-
- // If 'root' is an $or, the child predicates which are tagged with the same index are
- // detached from the original root and added here. 'curOr' may be attached as a filter
- // later on, or ignored and cleaned up by the unique_ptr.
- std::unique_ptr<MatchExpression> curOr;
-
- // The values of BoundsTightness range from loosest to tightest in this order:
- //
- // INEXACT_FETCH < INEXACT_COVERED < EXACT
- //
- // 'loosestBounds' stores the smallest of these three values encountered so far for
- // the current scan. If at least one of the child predicates assigned to the current
- // index is INEXACT_FETCH, then 'loosestBounds' is INEXACT_FETCH. If at least one of
- // the child predicates assigned to the current index is INEXACT_COVERED but none are
- // INEXACT_FETCH, then 'loosestBounds' is INEXACT_COVERED.
- IndexBoundsBuilder::BoundsTightness loosestBounds;
-
- private:
- // Default constructor is not allowed.
- ScanBuildingState();
- };
+ // Are we inside an array operator such as $elemMatch or $all?
+ bool inArrayOperator;
- /**
- * Return a CollectionScanNode that scans as requested in 'query'.
- */
- static QuerySolutionNode* makeCollectionScan(const CanonicalQuery& query,
- bool tailable,
- const QueryPlannerParams& params);
+ // A list of relevant indices which 'root' may be tagged to use.
+ const std::vector<IndexEntry>& indices;
- /**
- * Return a plan that uses the provided index as a proxy for a collection scan.
- */
- static QuerySolutionNode* scanWholeIndex(const IndexEntry& index,
- const CanonicalQuery& query,
- const QueryPlannerParams& params,
- int direction = 1);
+ // The index access node that we are currently constructing. We may merge
+ // multiple tagged predicates into a single index scan.
+ std::unique_ptr<QuerySolutionNode> currentScan;
- /**
- * Return a plan that scans the provided index from [startKey to endKey).
- */
- static QuerySolutionNode* makeIndexScan(const IndexEntry& index,
- const CanonicalQuery& query,
- const QueryPlannerParams& params,
- const BSONObj& startKey,
- const BSONObj& endKey);
+ // An index into the child vector of 'root'. Indicates the child MatchExpression
+ // for which we are currently either constructing a new scan or which we are about
+ // to merge with 'currentScan'.
+ size_t curChild;
+ // An index into the 'indices', so that 'indices[currentIndexNumber]' gives the
+ // index used by 'currentScan'. If there is no currentScan, this should be set
+ // to 'IndexTag::kNoIndex'.
+ size_t currentIndexNumber;
+
+ // The tag on 'curChild'.
+ IndexTag* ixtag;
+
+ // Whether the bounds for predicate 'curChild' are exact, inexact and covered by
+ // the index, or inexact with a fetch required.
+ IndexBoundsBuilder::BoundsTightness tightness;
+
+ // If 'root' is an $or, the child predicates which are tagged with the same index are
+ // detached from the original root and added here. 'curOr' may be attached as a filter
+ // later on, or ignored and cleaned up by the unique_ptr.
+ std::unique_ptr<MatchExpression> curOr;
+
+ // The values of BoundsTightness range from loosest to tightest in this order:
//
- // Indexed Data Access methods.
- //
- // The inArrayOperator flag deserves some attention. It is set when we're processing a
- // child of an MatchExpression::ELEM_MATCH_OBJECT.
- //
- // When true, the following behavior changes for all methods below that take it as an argument:
- // 0. No deletion of MatchExpression(s). In fact,
- // 1. No mutation of the MatchExpression at all. We need the tree as-is in order to perform
- // a filter on the entire tree.
- // 2. No fetches performed. There will be a final fetch by the caller of buildIndexedDataAccess
- // who set the value of inArrayOperator to true.
- // 3. No compound indices are used and no bounds are combined. These are incorrect in the context
- // of these operators.
+ // INEXACT_FETCH < INEXACT_COVERED < EXACT
//
+ // 'loosestBounds' stores the smallest of these three values encountered so far for
+ // the current scan. If at least one of the child predicates assigned to the current
+ // index is INEXACT_FETCH, then 'loosestBounds' is INEXACT_FETCH. If at least one of
+ // the child predicates assigned to the current index is INEXACT_COVERED but none are
+ // INEXACT_FETCH, then 'loosestBounds' is INEXACT_COVERED.
+ IndexBoundsBuilder::BoundsTightness loosestBounds;
+
+ private:
+ // Default constructor is not allowed.
+ ScanBuildingState();
+ };
- /**
- * If 'inArrayOperator' is false, takes ownership of 'root'.
- */
- static QuerySolutionNode* buildIndexedDataAccess(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const std::vector<IndexEntry>& indices,
- const QueryPlannerParams& params);
+ /**
+ * Return a CollectionScanNode that scans as requested in 'query'.
+ */
+ static QuerySolutionNode* makeCollectionScan(const CanonicalQuery& query,
+ bool tailable,
+ const QueryPlannerParams& params);
- /**
- * Takes ownership of 'root'.
- */
- static QuerySolutionNode* buildIndexedAnd(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const std::vector<IndexEntry>& indices,
- const QueryPlannerParams& params);
+ /**
+ * Return a plan that uses the provided index as a proxy for a collection scan.
+ */
+ static QuerySolutionNode* scanWholeIndex(const IndexEntry& index,
+ const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ int direction = 1);
- /**
- * Takes ownership of 'root'.
- */
- static QuerySolutionNode* buildIndexedOr(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const std::vector<IndexEntry>& indices,
- const QueryPlannerParams& params);
+ /**
+ * Return a plan that scans the provided index from [startKey to endKey).
+ */
+ static QuerySolutionNode* makeIndexScan(const IndexEntry& index,
+ const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ const BSONObj& startKey,
+ const BSONObj& endKey);
+
+ //
+ // Indexed Data Access methods.
+ //
+ // The inArrayOperator flag deserves some attention. It is set when we're processing a
+ // child of an MatchExpression::ELEM_MATCH_OBJECT.
+ //
+ // When true, the following behavior changes for all methods below that take it as an argument:
+ // 0. No deletion of MatchExpression(s). In fact,
+ // 1. No mutation of the MatchExpression at all. We need the tree as-is in order to perform
+ // a filter on the entire tree.
+ // 2. No fetches performed. There will be a final fetch by the caller of buildIndexedDataAccess
+ // who set the value of inArrayOperator to true.
+ // 3. No compound indices are used and no bounds are combined. These are incorrect in the context
+ // of these operators.
+ //
- /**
- * Traverses the tree rooted at the $elemMatch expression 'node',
- * finding all predicates that can use an index directly and returning
- * them in the out-parameter vector 'out'.
- *
- * Traverses only through AND and ELEM_MATCH_OBJECT nodes.
- *
- * Other nodes (i.e. nodes which cannot use an index directly, and which are
- * neither AND nor ELEM_MATCH_OBJECT) are returned in 'subnodesOut' if they are
- * tagged to use an index.
- */
- static void findElemMatchChildren(const MatchExpression* node,
- std::vector<MatchExpression*>* out,
- std::vector<MatchExpression*>* subnodesOut);
+ /**
+ * If 'inArrayOperator' is false, takes ownership of 'root'.
+ */
+ static QuerySolutionNode* buildIndexedDataAccess(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const std::vector<IndexEntry>& indices,
+ const QueryPlannerParams& params);
- /**
- * Helper used by buildIndexedAnd and buildIndexedOr.
- *
- * The children of AND and OR nodes are sorted by the index that the subtree rooted at
- * that node uses. Child nodes that use the same index are adjacent to one another to
- * facilitate grouping of index scans. As such, the processing for AND and OR is
- * almost identical.
- *
- * See tagForSort and sortUsingTags in index_tag.h for details on ordering the children
- * of OR and AND.
- *
- * Does not take ownership of 'root' but may remove children from it.
- */
- static bool processIndexScans(const CanonicalQuery& query,
- MatchExpression* root,
- bool inArrayOperator,
- const std::vector<IndexEntry>& indices,
- const QueryPlannerParams& params,
- std::vector<QuerySolutionNode*>* out);
+ /**
+ * Takes ownership of 'root'.
+ */
+ static QuerySolutionNode* buildIndexedAnd(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const std::vector<IndexEntry>& indices,
+ const QueryPlannerParams& params);
- /**
- * Used by processIndexScans(...) in order to recursively build a data access
- * plan for a "subnode", a node in the MatchExpression tree which is indexed by
- * virtue of its children.
- *
- * The resulting scans are outputted in the out-parameter 'out'.
- */
- static bool processIndexScansSubnode(const CanonicalQuery& query,
- ScanBuildingState* scanState,
- const QueryPlannerParams& params,
- std::vector<QuerySolutionNode*>* out);
+ /**
+ * Takes ownership of 'root'.
+ */
+ static QuerySolutionNode* buildIndexedOr(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const std::vector<IndexEntry>& indices,
+ const QueryPlannerParams& params);
- /**
- * Used by processIndexScansSubnode(...) to build the leaves of the solution tree for an
- * ELEM_MATCH_OBJECT node beneath an AND.
- *
- * The resulting scans are outputted in the out-parameter 'out'.
- */
- static bool processIndexScansElemMatch(const CanonicalQuery& query,
- ScanBuildingState* scanState,
- const QueryPlannerParams& params,
- std::vector<QuerySolutionNode*>* out);
+ /**
+ * Traverses the tree rooted at the $elemMatch expression 'node',
+ * finding all predicates that can use an index directly and returning
+ * them in the out-parameter vector 'out'.
+ *
+ * Traverses only through AND and ELEM_MATCH_OBJECT nodes.
+ *
+ * Other nodes (i.e. nodes which cannot use an index directly, and which are
+ * neither AND nor ELEM_MATCH_OBJECT) are returned in 'subnodesOut' if they are
+ * tagged to use an index.
+ */
+ static void findElemMatchChildren(const MatchExpression* node,
+ std::vector<MatchExpression*>* out,
+ std::vector<MatchExpression*>* subnodesOut);
- //
- // Helpers for creating an index scan.
- //
+ /**
+ * Helper used by buildIndexedAnd and buildIndexedOr.
+ *
+ * The children of AND and OR nodes are sorted by the index that the subtree rooted at
+ * that node uses. Child nodes that use the same index are adjacent to one another to
+ * facilitate grouping of index scans. As such, the processing for AND and OR is
+ * almost identical.
+ *
+ * See tagForSort and sortUsingTags in index_tag.h for details on ordering the children
+ * of OR and AND.
+ *
+ * Does not take ownership of 'root' but may remove children from it.
+ */
+ static bool processIndexScans(const CanonicalQuery& query,
+ MatchExpression* root,
+ bool inArrayOperator,
+ const std::vector<IndexEntry>& indices,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolutionNode*>* out);
- /**
- * Create a new data access node.
- *
- * If the node is an index scan, the bounds for 'expr' are computed and placed into the
- * first field's OIL position. The rest of the OILs are allocated but uninitialized.
- *
- * If the node is a geo node, grab the geo data from 'expr' and stuff it into the
- * geo solution node of the appropriate type.
- */
- static QuerySolutionNode* makeLeafNode(const CanonicalQuery& query,
- const IndexEntry& index,
- size_t pos,
- MatchExpression* expr,
- IndexBoundsBuilder::BoundsTightness* tightnessOut);
+ /**
+ * Used by processIndexScans(...) in order to recursively build a data access
+ * plan for a "subnode", a node in the MatchExpression tree which is indexed by
+ * virtue of its children.
+ *
+ * The resulting scans are outputted in the out-parameter 'out'.
+ */
+ static bool processIndexScansSubnode(const CanonicalQuery& query,
+ ScanBuildingState* scanState,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolutionNode*>* out);
- /**
- * Merge the predicate 'expr' with the leaf node 'node'.
- */
- static void mergeWithLeafNode(MatchExpression* expr, ScanBuildingState* scanState);
+ /**
+ * Used by processIndexScansSubnode(...) to build the leaves of the solution tree for an
+ * ELEM_MATCH_OBJECT node beneath an AND.
+ *
+ * The resulting scans are outputted in the out-parameter 'out'.
+ */
+ static bool processIndexScansElemMatch(const CanonicalQuery& query,
+ ScanBuildingState* scanState,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolutionNode*>* out);
- /**
- * Determines whether it is safe to merge the expression 'expr' with
- * the leaf node of the query solution contained in 'scanState'.
- *
- * Does not take ownership of its arguments.
- */
- static bool shouldMergeWithLeaf(const MatchExpression* expr,
- const ScanBuildingState& scanState);
+ //
+ // Helpers for creating an index scan.
+ //
- /**
- * If index scan (regular or expression index), fill in any bounds that are missing in
- * 'node' with the "all values for this field" interval.
- *
- * If geo, do nothing.
- * If text, punt to finishTextNode.
- */
- static void finishLeafNode(QuerySolutionNode* node, const IndexEntry& index);
+ /**
+ * Create a new data access node.
+ *
+ * If the node is an index scan, the bounds for 'expr' are computed and placed into the
+ * first field's OIL position. The rest of the OILs are allocated but uninitialized.
+ *
+ * If the node is a geo node, grab the geo data from 'expr' and stuff it into the
+ * geo solution node of the appropriate type.
+ */
+ static QuerySolutionNode* makeLeafNode(const CanonicalQuery& query,
+ const IndexEntry& index,
+ size_t pos,
+ MatchExpression* expr,
+ IndexBoundsBuilder::BoundsTightness* tightnessOut);
- /**
- * Fills in any missing bounds by calling finishLeafNode(...) for the scan contained in
- * 'scanState'. The resulting scan is outputted in the out-parameter 'out', transferring
- * ownership in the process.
- *
- * If 'scanState' is building an index scan for OR-related predicates, filters
- * may be affixed to the scan as necessary.
- */
- static void finishAndOutputLeaf(ScanBuildingState* scanState,
- std::vector<QuerySolutionNode*>* out);
+ /**
+ * Merge the predicate 'expr' with the leaf node 'node'.
+ */
+ static void mergeWithLeafNode(MatchExpression* expr, ScanBuildingState* scanState);
- /**
- * Returns true if the current scan in 'scanState' requires a FetchNode.
- */
- static bool orNeedsFetch(const ScanBuildingState* scanState);
+ /**
+ * Determines whether it is safe to merge the expression 'expr' with
+ * the leaf node of the query solution contained in 'scanState'.
+ *
+ * Does not take ownership of its arguments.
+ */
+ static bool shouldMergeWithLeaf(const MatchExpression* expr,
+ const ScanBuildingState& scanState);
- static void finishTextNode(QuerySolutionNode* node, const IndexEntry& index);
+ /**
+ * If index scan (regular or expression index), fill in any bounds that are missing in
+ * 'node' with the "all values for this field" interval.
+ *
+ * If geo, do nothing.
+ * If text, punt to finishTextNode.
+ */
+ static void finishLeafNode(QuerySolutionNode* node, const IndexEntry& index);
- /**
- * Add the filter 'match' to the query solution node 'node'. Takes
- * ownership of 'match'.
- *
- * The MatchType, 'type', indicates whether 'match' is a child of an
- * AND or an OR match expression.
- */
- static void addFilterToSolutionNode(QuerySolutionNode* node, MatchExpression* match,
- MatchExpression::MatchType type);
+ /**
+ * Fills in any missing bounds by calling finishLeafNode(...) for the scan contained in
+ * 'scanState'. The resulting scan is outputted in the out-parameter 'out', transferring
+ * ownership in the process.
+ *
+ * If 'scanState' is building an index scan for OR-related predicates, filters
+ * may be affixed to the scan as necessary.
+ */
+ static void finishAndOutputLeaf(ScanBuildingState* scanState,
+ std::vector<QuerySolutionNode*>* out);
- /**
- * Once a predicate is merged into the current scan, there are a few things we might
- * want to do with the filter:
- * 1) Detach the filter from its parent and delete it because the predicate is
- * answered by exact index bounds.
- * 2) Leave the filter alone so that it can be affixed as part of a fetch node later.
- * 3) Detach the filter from its parent and attach it directly to an index scan node.
- * We can sometimes due this for INEXACT_COVERED predicates which are not answered exactly
- * by the bounds, but can be answered by examing the data in the index key.
- * 4) Detach the filter from its parent and attach it as a child of a separate
- * MatchExpression tree. This is done for proper handling of inexact bounds for $or
- * queries.
- *
- * This executes one of the four options above, according to the data in 'scanState'.
- */
- static void handleFilter(ScanBuildingState* scanState);
+ /**
+ * Returns true if the current scan in 'scanState' requires a FetchNode.
+ */
+ static bool orNeedsFetch(const ScanBuildingState* scanState);
- /**
- * Implements handleFilter(...) for OR queries.
- */
- static void handleFilterAnd(ScanBuildingState* scanState);
+ static void finishTextNode(QuerySolutionNode* node, const IndexEntry& index);
- /**
- * Implements handleFilter(...) for AND queries.
- */
- static void handleFilterOr(ScanBuildingState* scanState);
- };
+ /**
+ * Add the filter 'match' to the query solution node 'node'. Takes
+ * ownership of 'match'.
+ *
+ * The MatchType, 'type', indicates whether 'match' is a child of an
+ * AND or an OR match expression.
+ */
+ static void addFilterToSolutionNode(QuerySolutionNode* node,
+ MatchExpression* match,
+ MatchExpression::MatchType type);
+
+ /**
+ * Once a predicate is merged into the current scan, there are a few things we might
+ * want to do with the filter:
+ * 1) Detach the filter from its parent and delete it because the predicate is
+ * answered by exact index bounds.
+ * 2) Leave the filter alone so that it can be affixed as part of a fetch node later.
+ * 3) Detach the filter from its parent and attach it directly to an index scan node.
+ * We can sometimes due this for INEXACT_COVERED predicates which are not answered exactly
+ * by the bounds, but can be answered by examing the data in the index key.
+ * 4) Detach the filter from its parent and attach it as a child of a separate
+ * MatchExpression tree. This is done for proper handling of inexact bounds for $or
+ * queries.
+ *
+ * This executes one of the four options above, according to the data in 'scanState'.
+ */
+ static void handleFilter(ScanBuildingState* scanState);
+
+ /**
+ * Implements handleFilter(...) for OR queries.
+ */
+ static void handleFilterAnd(ScanBuildingState* scanState);
+
+ /**
+ * Implements handleFilter(...) for AND queries.
+ */
+ static void handleFilterOr(ScanBuildingState* scanState);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index eee3093b98a..4026d62572a 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -39,733 +39,713 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::endl;
+using std::string;
+using std::vector;
- //
- // Helpers for bounds explosion AKA quick-and-dirty SERVER-1205.
- //
+//
+// Helpers for bounds explosion AKA quick-and-dirty SERVER-1205.
+//
- namespace {
+namespace {
- /**
- * Walk the tree 'root' and output all leaf nodes into 'leafNodes'.
- */
- void getLeafNodes(QuerySolutionNode* root, vector<QuerySolutionNode*>* leafNodes) {
- if (0 == root->children.size()) {
- leafNodes->push_back(root);
- }
- else {
- for (size_t i = 0; i < root->children.size(); ++i) {
- getLeafNodes(root->children[i], leafNodes);
- }
- }
+/**
+ * Walk the tree 'root' and output all leaf nodes into 'leafNodes'.
+ */
+void getLeafNodes(QuerySolutionNode* root, vector<QuerySolutionNode*>* leafNodes) {
+ if (0 == root->children.size()) {
+ leafNodes->push_back(root);
+ } else {
+ for (size_t i = 0; i < root->children.size(); ++i) {
+ getLeafNodes(root->children[i], leafNodes);
}
+ }
+}
- /**
- * Returns true if every interval in 'oil' is a point, false otherwise.
- */
- bool isUnionOfPoints(const OrderedIntervalList& oil) {
- // We can't explode if there are empty bounds. Don't consider the
- // oil a union of points if there are no intervals.
- if (0 == oil.intervals.size()) {
- return false;
- }
+/**
+ * Returns true if every interval in 'oil' is a point, false otherwise.
+ */
+bool isUnionOfPoints(const OrderedIntervalList& oil) {
+ // We can't explode if there are empty bounds. Don't consider the
+ // oil a union of points if there are no intervals.
+ if (0 == oil.intervals.size()) {
+ return false;
+ }
- for (size_t i = 0; i < oil.intervals.size(); ++i) {
- if (!oil.intervals[i].isPoint()) {
- return false;
- }
- }
+ for (size_t i = 0; i < oil.intervals.size(); ++i) {
+ if (!oil.intervals[i].isPoint()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Should we try to expand the index scan(s) in 'solnRoot' to pull out an indexed sort?
+ *
+ * Returns the node which should be replaced by the merge sort of exploded scans
+ * in the out-parameter 'toReplace'.
+ */
+bool structureOKForExplode(QuerySolutionNode* solnRoot, QuerySolutionNode** toReplace) {
+ // For now we only explode if we *know* we will pull the sort out. We can look at
+ // more structure (or just explode and recalculate properties and see what happens)
+ // but for now we just explode if it's a sure bet.
+ //
+ // TODO: Can also try exploding if root is AND_HASH (last child dictates order.),
+ // or other less obvious cases...
+ if (STAGE_IXSCAN == solnRoot->getType()) {
+ *toReplace = solnRoot;
+ return true;
+ }
+ if (STAGE_FETCH == solnRoot->getType()) {
+ if (STAGE_IXSCAN == solnRoot->children[0]->getType()) {
+ *toReplace = solnRoot->children[0];
return true;
}
+ }
- /**
- * Should we try to expand the index scan(s) in 'solnRoot' to pull out an indexed sort?
- *
- * Returns the node which should be replaced by the merge sort of exploded scans
- * in the out-parameter 'toReplace'.
- */
- bool structureOKForExplode(QuerySolutionNode* solnRoot, QuerySolutionNode** toReplace) {
- // For now we only explode if we *know* we will pull the sort out. We can look at
- // more structure (or just explode and recalculate properties and see what happens)
- // but for now we just explode if it's a sure bet.
- //
- // TODO: Can also try exploding if root is AND_HASH (last child dictates order.),
- // or other less obvious cases...
- if (STAGE_IXSCAN == solnRoot->getType()) {
- *toReplace = solnRoot;
- return true;
+ if (STAGE_OR == solnRoot->getType()) {
+ for (size_t i = 0; i < solnRoot->children.size(); ++i) {
+ if (STAGE_IXSCAN != solnRoot->children[i]->getType()) {
+ return false;
}
+ }
+ *toReplace = solnRoot;
+ return true;
+ }
- if (STAGE_FETCH == solnRoot->getType()) {
- if (STAGE_IXSCAN == solnRoot->children[0]->getType()) {
- *toReplace = solnRoot->children[0];
- return true;
- }
- }
+ return false;
+}
- if (STAGE_OR == solnRoot->getType()) {
- for (size_t i = 0; i < solnRoot->children.size(); ++i) {
- if (STAGE_IXSCAN != solnRoot->children[i]->getType()) {
- return false;
- }
- }
- *toReplace = solnRoot;
- return true;
- }
+// vectors of vectors can be > > annoying.
+typedef vector<Interval> PointPrefix;
- return false;
- }
+/**
+ * The first 'fieldsToExplode' fields of 'bounds' are points. Compute the Cartesian product
+ * of those fields and place it in 'prefixOut'.
+ */
+void makeCartesianProduct(const IndexBounds& bounds,
+ size_t fieldsToExplode,
+ vector<PointPrefix>* prefixOut) {
+ vector<PointPrefix> prefixForScans;
+
+ // We dump the Cartesian product of bounds into prefixForScans, starting w/the first
+ // field's points.
+ verify(fieldsToExplode >= 1);
+ const OrderedIntervalList& firstOil = bounds.fields[0];
+ verify(firstOil.intervals.size() >= 1);
+ for (size_t i = 0; i < firstOil.intervals.size(); ++i) {
+ const Interval& ival = firstOil.intervals[i];
+ verify(ival.isPoint());
+ PointPrefix pfix;
+ pfix.push_back(ival);
+ prefixForScans.push_back(pfix);
+ }
- // vectors of vectors can be > > annoying.
- typedef vector<Interval> PointPrefix;
-
- /**
- * The first 'fieldsToExplode' fields of 'bounds' are points. Compute the Cartesian product
- * of those fields and place it in 'prefixOut'.
- */
- void makeCartesianProduct(const IndexBounds& bounds,
- size_t fieldsToExplode,
- vector<PointPrefix>* prefixOut) {
-
- vector<PointPrefix> prefixForScans;
-
- // We dump the Cartesian product of bounds into prefixForScans, starting w/the first
- // field's points.
- verify(fieldsToExplode >= 1);
- const OrderedIntervalList& firstOil = bounds.fields[0];
- verify(firstOil.intervals.size() >= 1);
- for (size_t i = 0; i < firstOil.intervals.size(); ++i) {
- const Interval& ival = firstOil.intervals[i];
- verify(ival.isPoint());
- PointPrefix pfix;
+ // For each subsequent field...
+ for (size_t i = 1; i < fieldsToExplode; ++i) {
+ vector<PointPrefix> newPrefixForScans;
+ const OrderedIntervalList& oil = bounds.fields[i];
+ verify(oil.intervals.size() >= 1);
+ // For each point interval in that field (all ivals must be points)...
+ for (size_t j = 0; j < oil.intervals.size(); ++j) {
+ const Interval& ival = oil.intervals[j];
+ verify(ival.isPoint());
+ // Make a new scan by appending it to all scans in prefixForScans.
+ for (size_t k = 0; k < prefixForScans.size(); ++k) {
+ PointPrefix pfix = prefixForScans[k];
pfix.push_back(ival);
- prefixForScans.push_back(pfix);
+ newPrefixForScans.push_back(pfix);
}
-
- // For each subsequent field...
- for (size_t i = 1; i < fieldsToExplode; ++i) {
- vector<PointPrefix> newPrefixForScans;
- const OrderedIntervalList& oil = bounds.fields[i];
- verify(oil.intervals.size() >= 1);
- // For each point interval in that field (all ivals must be points)...
- for (size_t j = 0; j < oil.intervals.size(); ++j) {
- const Interval& ival = oil.intervals[j];
- verify(ival.isPoint());
- // Make a new scan by appending it to all scans in prefixForScans.
- for (size_t k = 0; k < prefixForScans.size(); ++k) {
- PointPrefix pfix = prefixForScans[k];
- pfix.push_back(ival);
- newPrefixForScans.push_back(pfix);
- }
- }
- // And update prefixForScans.
- newPrefixForScans.swap(prefixForScans);
- }
-
- prefixOut->swap(prefixForScans);
}
+ // And update prefixForScans.
+ newPrefixForScans.swap(prefixForScans);
+ }
- /**
- * Take the provided index scan node 'isn'. Returns a list of index scans which are
- * logically equivalent to 'isn' if joined by a MergeSort through the out-parameter
- * 'explosionResult'. These index scan instances are owned by the caller.
- *
- * fieldsToExplode is a count of how many fields in the scan's bounds are the union of point
- * intervals. This is computed beforehand and provided as a small optimization.
- *
- * Example:
- *
- * For the query find({a: {$in: [1,2]}}).sort({b: 1}) using the index {a:1, b:1}:
- * 'isn' will be scan with bounds a:[[1,1],[2,2]] & b: [MinKey, MaxKey]
- * 'sort' will be {b: 1}
- * 'fieldsToExplode' will be 1 (as only one field isUnionOfPoints).
- *
- * On return, 'explosionResult' will contain the following two scans:
- * a:[[1,1]], b:[MinKey, MaxKey]
- * a:[[2,2]], b:[MinKey, MaxKey]
- */
- void explodeScan(IndexScanNode* isn,
- const BSONObj& sort,
- size_t fieldsToExplode,
- vector<QuerySolutionNode*>* explosionResult) {
-
- // Turn the compact bounds in 'isn' into a bunch of points...
- vector<PointPrefix> prefixForScans;
- makeCartesianProduct(isn->bounds, fieldsToExplode, &prefixForScans);
-
- for (size_t i = 0; i < prefixForScans.size(); ++i) {
- const PointPrefix& prefix = prefixForScans[i];
- verify(prefix.size() == fieldsToExplode);
-
- // Copy boring fields into new child.
- IndexScanNode* child = new IndexScanNode();
- child->indexKeyPattern = isn->indexKeyPattern;
- child->direction = isn->direction;
- child->maxScan = isn->maxScan;
- child->addKeyMetadata = isn->addKeyMetadata;
- child->indexIsMultiKey = isn->indexIsMultiKey;
-
- // Copy the filter, if there is one.
- if (isn->filter.get()) {
- child->filter.reset(isn->filter->shallowClone());
- }
+ prefixOut->swap(prefixForScans);
+}
- // Create child bounds.
- child->bounds.fields.resize(isn->bounds.fields.size());
- for (size_t j = 0; j < fieldsToExplode; ++j) {
- child->bounds.fields[j].intervals.push_back(prefix[j]);
- child->bounds.fields[j].name = isn->bounds.fields[j].name;
- }
- for (size_t j = fieldsToExplode; j < isn->bounds.fields.size(); ++j) {
- child->bounds.fields[j] = isn->bounds.fields[j];
- }
- explosionResult->push_back(child);
- }
+/**
+ * Take the provided index scan node 'isn'. Returns a list of index scans which are
+ * logically equivalent to 'isn' if joined by a MergeSort through the out-parameter
+ * 'explosionResult'. These index scan instances are owned by the caller.
+ *
+ * fieldsToExplode is a count of how many fields in the scan's bounds are the union of point
+ * intervals. This is computed beforehand and provided as a small optimization.
+ *
+ * Example:
+ *
+ * For the query find({a: {$in: [1,2]}}).sort({b: 1}) using the index {a:1, b:1}:
+ * 'isn' will be scan with bounds a:[[1,1],[2,2]] & b: [MinKey, MaxKey]
+ * 'sort' will be {b: 1}
+ * 'fieldsToExplode' will be 1 (as only one field isUnionOfPoints).
+ *
+ * On return, 'explosionResult' will contain the following two scans:
+ * a:[[1,1]], b:[MinKey, MaxKey]
+ * a:[[2,2]], b:[MinKey, MaxKey]
+ */
+void explodeScan(IndexScanNode* isn,
+ const BSONObj& sort,
+ size_t fieldsToExplode,
+ vector<QuerySolutionNode*>* explosionResult) {
+ // Turn the compact bounds in 'isn' into a bunch of points...
+ vector<PointPrefix> prefixForScans;
+ makeCartesianProduct(isn->bounds, fieldsToExplode, &prefixForScans);
+
+ for (size_t i = 0; i < prefixForScans.size(); ++i) {
+ const PointPrefix& prefix = prefixForScans[i];
+ verify(prefix.size() == fieldsToExplode);
+
+ // Copy boring fields into new child.
+ IndexScanNode* child = new IndexScanNode();
+ child->indexKeyPattern = isn->indexKeyPattern;
+ child->direction = isn->direction;
+ child->maxScan = isn->maxScan;
+ child->addKeyMetadata = isn->addKeyMetadata;
+ child->indexIsMultiKey = isn->indexIsMultiKey;
+
+ // Copy the filter, if there is one.
+ if (isn->filter.get()) {
+ child->filter.reset(isn->filter->shallowClone());
}
- /**
- * In the tree '*root', replace 'oldNode' with 'newNode'.
- */
- void replaceNodeInTree(QuerySolutionNode** root,
- QuerySolutionNode* oldNode,
- QuerySolutionNode* newNode) {
- if (*root == oldNode) {
- *root = newNode;
- }
- else {
- for (size_t i = 0 ; i < (*root)->children.size(); ++i) {
- replaceNodeInTree(&(*root)->children[i], oldNode, newNode);
- }
- }
+ // Create child bounds.
+ child->bounds.fields.resize(isn->bounds.fields.size());
+ for (size_t j = 0; j < fieldsToExplode; ++j) {
+ child->bounds.fields[j].intervals.push_back(prefix[j]);
+ child->bounds.fields[j].name = isn->bounds.fields[j].name;
}
-
- bool hasNode(QuerySolutionNode* root, StageType type) {
- if (type == root->getType()) {
- return true;
- }
-
- for (size_t i = 0; i < root->children.size(); ++i) {
- if (hasNode(root->children[i], type)) {
- return true;
- }
- }
-
- return false;
+ for (size_t j = fieldsToExplode; j < isn->bounds.fields.size(); ++j) {
+ child->bounds.fields[j] = isn->bounds.fields[j];
}
+ explosionResult->push_back(child);
+ }
+}
- } // namespace
-
- // static
- BSONObj QueryPlannerAnalysis::getSortPattern(const BSONObj& indexKeyPattern) {
- BSONObjBuilder sortBob;
- BSONObjIterator kpIt(indexKeyPattern);
- while (kpIt.more()) {
- BSONElement elt = kpIt.next();
- if (elt.type() == mongo::String) {
- break;
- }
- long long val = elt.safeNumberLong();
- int sortOrder = val >= 0 ? 1 : -1;
- sortBob.append(elt.fieldName(), sortOrder);
+/**
+ * In the tree '*root', replace 'oldNode' with 'newNode'.
+ */
+void replaceNodeInTree(QuerySolutionNode** root,
+ QuerySolutionNode* oldNode,
+ QuerySolutionNode* newNode) {
+ if (*root == oldNode) {
+ *root = newNode;
+ } else {
+ for (size_t i = 0; i < (*root)->children.size(); ++i) {
+ replaceNodeInTree(&(*root)->children[i], oldNode, newNode);
}
- return sortBob.obj();
}
+}
- // static
- bool QueryPlannerAnalysis::explodeForSort(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- QuerySolutionNode** solnRoot) {
- vector<QuerySolutionNode*> leafNodes;
+bool hasNode(QuerySolutionNode* root, StageType type) {
+ if (type == root->getType()) {
+ return true;
+ }
- QuerySolutionNode* toReplace;
- if (!structureOKForExplode(*solnRoot, &toReplace)) {
- return false;
+ for (size_t i = 0; i < root->children.size(); ++i) {
+ if (hasNode(root->children[i], type)) {
+ return true;
}
+ }
- getLeafNodes(*solnRoot, &leafNodes);
+ return false;
+}
- const BSONObj& desiredSort = query.getParsed().getSort();
+} // namespace
- // How many scan leaves will result from our expansion?
- size_t totalNumScans = 0;
+// static
+BSONObj QueryPlannerAnalysis::getSortPattern(const BSONObj& indexKeyPattern) {
+ BSONObjBuilder sortBob;
+ BSONObjIterator kpIt(indexKeyPattern);
+ while (kpIt.more()) {
+ BSONElement elt = kpIt.next();
+ if (elt.type() == mongo::String) {
+ break;
+ }
+ long long val = elt.safeNumberLong();
+ int sortOrder = val >= 0 ? 1 : -1;
+ sortBob.append(elt.fieldName(), sortOrder);
+ }
+ return sortBob.obj();
+}
+
+// static
+bool QueryPlannerAnalysis::explodeForSort(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ QuerySolutionNode** solnRoot) {
+ vector<QuerySolutionNode*> leafNodes;
+
+ QuerySolutionNode* toReplace;
+ if (!structureOKForExplode(*solnRoot, &toReplace)) {
+ return false;
+ }
- // The value of entry i is how many scans we want to blow up for leafNodes[i].
- // We calculate this in the loop below and might as well reuse it if we blow up
- // that scan.
- vector<size_t> fieldsToExplode;
+ getLeafNodes(*solnRoot, &leafNodes);
- // The sort order we're looking for has to possibly be provided by each of the index scans
- // upon explosion.
- for (size_t i = 0; i < leafNodes.size(); ++i) {
- // We can do this because structureOKForExplode is only true if the leaves are index
- // scans.
- IndexScanNode* isn = static_cast<IndexScanNode*>(leafNodes[i]);
- const IndexBounds& bounds = isn->bounds;
+ const BSONObj& desiredSort = query.getParsed().getSort();
- // Not a point interval prefix, can't try to rewrite.
- if (bounds.isSimpleRange) {
- return false;
- }
+ // How many scan leaves will result from our expansion?
+ size_t totalNumScans = 0;
- // How many scans will we create if we blow up this ixscan?
- size_t numScans = 1;
+ // The value of entry i is how many scans we want to blow up for leafNodes[i].
+ // We calculate this in the loop below and might as well reuse it if we blow up
+ // that scan.
+ vector<size_t> fieldsToExplode;
- // Skip every field that is a union of point intervals and build the resulting sort
- // order from the remaining fields.
- BSONObjIterator kpIt(isn->indexKeyPattern);
- size_t boundsIdx = 0;
- while (kpIt.more()) {
- const OrderedIntervalList& oil = bounds.fields[boundsIdx];
- if (!isUnionOfPoints(oil)) {
- break;
- }
- numScans *= oil.intervals.size();
- kpIt.next();
- ++boundsIdx;
- }
-
- // There's no sort order left to gain by exploding. Just go home. TODO: verify nothing
- // clever we can do here.
- if (!kpIt.more()) {
- return false;
- }
+ // The sort order we're looking for has to possibly be provided by each of the index scans
+ // upon explosion.
+ for (size_t i = 0; i < leafNodes.size(); ++i) {
+ // We can do this because structureOKForExplode is only true if the leaves are index
+ // scans.
+ IndexScanNode* isn = static_cast<IndexScanNode*>(leafNodes[i]);
+ const IndexBounds& bounds = isn->bounds;
- // Only explode if there's at least one field to explode for this scan.
- if (0 == boundsIdx) {
- return false;
- }
+ // Not a point interval prefix, can't try to rewrite.
+ if (bounds.isSimpleRange) {
+ return false;
+ }
- // The rest of the fields define the sort order we could obtain by exploding
- // the bounds.
- BSONObjBuilder resultingSortBob;
- while (kpIt.more()) {
- resultingSortBob.append(kpIt.next());
- }
+ // How many scans will we create if we blow up this ixscan?
+ size_t numScans = 1;
- // See if it's the order we're looking for.
- BSONObj possibleSort = resultingSortBob.obj();
- if (!desiredSort.isPrefixOf(possibleSort)) {
- // We can't get the sort order from the index scan. See if we can
- // get the sort by reversing the scan.
- BSONObj reversePossibleSort = QueryPlannerCommon::reverseSortObj(possibleSort);
- if (!desiredSort.isPrefixOf(reversePossibleSort)) {
- // Can't get the sort order from the reversed index scan either. Give up.
- return false;
- }
- else {
- // We can get the sort order we need if we reverse the scan.
- QueryPlannerCommon::reverseScans(isn);
- }
+ // Skip every field that is a union of point intervals and build the resulting sort
+ // order from the remaining fields.
+ BSONObjIterator kpIt(isn->indexKeyPattern);
+ size_t boundsIdx = 0;
+ while (kpIt.more()) {
+ const OrderedIntervalList& oil = bounds.fields[boundsIdx];
+ if (!isUnionOfPoints(oil)) {
+ break;
}
+ numScans *= oil.intervals.size();
+ kpIt.next();
+ ++boundsIdx;
+ }
- // Do some bookkeeping to see how many ixscans we'll create total.
- totalNumScans += numScans;
-
- // And for this scan how many fields we expand.
- fieldsToExplode.push_back(boundsIdx);
+ // There's no sort order left to gain by exploding. Just go home. TODO: verify nothing
+ // clever we can do here.
+ if (!kpIt.more()) {
+ return false;
}
- // Too many ixscans spoil the performance.
- if (totalNumScans > (size_t)internalQueryMaxScansToExplode) {
- LOG(5) << "Could expand ixscans to pull out sort order but resulting scan count"
- << "(" << totalNumScans << ") is too high.";
+ // Only explode if there's at least one field to explode for this scan.
+ if (0 == boundsIdx) {
return false;
}
- // If we're here, we can (probably? depends on how restrictive the structure check is)
- // get our sort order via ixscan blow-up.
- MergeSortNode* merge = new MergeSortNode();
- merge->sort = desiredSort;
- for (size_t i = 0; i < leafNodes.size(); ++i) {
- IndexScanNode* isn = static_cast<IndexScanNode*>(leafNodes[i]);
- explodeScan(isn, desiredSort, fieldsToExplode[i], &merge->children);
+ // The rest of the fields define the sort order we could obtain by exploding
+ // the bounds.
+ BSONObjBuilder resultingSortBob;
+ while (kpIt.more()) {
+ resultingSortBob.append(kpIt.next());
+ }
+
+ // See if it's the order we're looking for.
+ BSONObj possibleSort = resultingSortBob.obj();
+ if (!desiredSort.isPrefixOf(possibleSort)) {
+ // We can't get the sort order from the index scan. See if we can
+ // get the sort by reversing the scan.
+ BSONObj reversePossibleSort = QueryPlannerCommon::reverseSortObj(possibleSort);
+ if (!desiredSort.isPrefixOf(reversePossibleSort)) {
+ // Can't get the sort order from the reversed index scan either. Give up.
+ return false;
+ } else {
+ // We can get the sort order we need if we reverse the scan.
+ QueryPlannerCommon::reverseScans(isn);
+ }
}
- merge->computeProperties();
+ // Do some bookkeeping to see how many ixscans we'll create total.
+ totalNumScans += numScans;
- // Replace 'toReplace' with the new merge sort node.
- replaceNodeInTree(solnRoot, toReplace, merge);
- // And get rid of the node that got replaced.
- delete toReplace;
+ // And for this scan how many fields we expand.
+ fieldsToExplode.push_back(boundsIdx);
+ }
- return true;
+ // Too many ixscans spoil the performance.
+ if (totalNumScans > (size_t)internalQueryMaxScansToExplode) {
+ LOG(5) << "Could expand ixscans to pull out sort order but resulting scan count"
+ << "(" << totalNumScans << ") is too high.";
+ return false;
}
- // static
- QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- QuerySolutionNode* solnRoot,
- bool* blockingSortOut) {
- *blockingSortOut = false;
+ // If we're here, we can (probably? depends on how restrictive the structure check is)
+ // get our sort order via ixscan blow-up.
+ MergeSortNode* merge = new MergeSortNode();
+ merge->sort = desiredSort;
+ for (size_t i = 0; i < leafNodes.size(); ++i) {
+ IndexScanNode* isn = static_cast<IndexScanNode*>(leafNodes[i]);
+ explodeScan(isn, desiredSort, fieldsToExplode[i], &merge->children);
+ }
- const LiteParsedQuery& lpq = query.getParsed();
- const BSONObj& sortObj = lpq.getSort();
+ merge->computeProperties();
- if (sortObj.isEmpty()) {
- return solnRoot;
- }
+ // Replace 'toReplace' with the new merge sort node.
+ replaceNodeInTree(solnRoot, toReplace, merge);
+ // And get rid of the node that got replaced.
+ delete toReplace;
- // TODO: We could check sortObj for any projections other than :1 and :-1
- // and short-cut some of this.
+ return true;
+}
- // If the sort is $natural, we ignore it, assuming that the caller has detected that and
- // outputted a collscan to satisfy the desired order.
- BSONElement natural = sortObj.getFieldDotted("$natural");
- if (!natural.eoo()) {
- return solnRoot;
- }
+// static
+QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ QuerySolutionNode* solnRoot,
+ bool* blockingSortOut) {
+ *blockingSortOut = false;
- // See if solnRoot gives us the sort. If so, we're done.
- BSONObjSet sorts = solnRoot->getSort();
+ const LiteParsedQuery& lpq = query.getParsed();
+ const BSONObj& sortObj = lpq.getSort();
- // If the sort we want is in the set of sort orders provided already, bail out.
- if (sorts.end() != sorts.find(sortObj)) {
- return solnRoot;
- }
+ if (sortObj.isEmpty()) {
+ return solnRoot;
+ }
- // Sort is not provided. See if we provide the reverse of our sort pattern.
- // If so, we can reverse the scan direction(s).
- BSONObj reverseSort = QueryPlannerCommon::reverseSortObj(sortObj);
- if (sorts.end() != sorts.find(reverseSort)) {
- QueryPlannerCommon::reverseScans(solnRoot);
- LOG(5) << "Reversing ixscan to provide sort. Result: "
- << solnRoot->toString() << endl;
- return solnRoot;
- }
+ // TODO: We could check sortObj for any projections other than :1 and :-1
+ // and short-cut some of this.
- // Sort not provided, can't reverse scans to get the sort. One last trick: We can "explode"
- // index scans over point intervals to an OR of sub-scans in order to pull out a sort.
- // Let's try this.
- if (explodeForSort(query, params, &solnRoot)) {
- return solnRoot;
- }
+ // If the sort is $natural, we ignore it, assuming that the caller has detected that and
+ // outputted a collscan to satisfy the desired order.
+ BSONElement natural = sortObj.getFieldDotted("$natural");
+ if (!natural.eoo()) {
+ return solnRoot;
+ }
- // If we're here, we need to add a sort stage.
+ // See if solnRoot gives us the sort. If so, we're done.
+ BSONObjSet sorts = solnRoot->getSort();
- // If we're not allowed to put a blocking sort in, bail out.
- if (params.options & QueryPlannerParams::NO_BLOCKING_SORT) {
- delete solnRoot;
- return NULL;
- }
+ // If the sort we want is in the set of sort orders provided already, bail out.
+ if (sorts.end() != sorts.find(sortObj)) {
+ return solnRoot;
+ }
- // Add a fetch stage so we have the full object when we hit the sort stage. TODO: Can we
- // pull the values that we sort by out of the key and if so in what cases? Perhaps we can
- // avoid a fetch.
- if (!solnRoot->fetched()) {
- FetchNode* fetch = new FetchNode();
- fetch->children.push_back(solnRoot);
- solnRoot = fetch;
- }
+ // Sort is not provided. See if we provide the reverse of our sort pattern.
+ // If so, we can reverse the scan direction(s).
+ BSONObj reverseSort = QueryPlannerCommon::reverseSortObj(sortObj);
+ if (sorts.end() != sorts.find(reverseSort)) {
+ QueryPlannerCommon::reverseScans(solnRoot);
+ LOG(5) << "Reversing ixscan to provide sort. Result: " << solnRoot->toString() << endl;
+ return solnRoot;
+ }
- // And build the full sort stage.
- SortNode* sort = new SortNode();
- sort->pattern = sortObj;
- sort->query = lpq.getFilter();
- sort->children.push_back(solnRoot);
- solnRoot = sort;
- // When setting the limit on the sort, we need to consider both
- // the limit N and skip count M. The sort should return an ordered list
- // N + M items so that the skip stage can discard the first M results.
- if (lpq.getLimit()) {
- // We have a true limit. The limit can be combined with the SORT stage.
- sort->limit = static_cast<size_t>(*lpq.getLimit()) + static_cast<size_t>(lpq.getSkip());
- }
- else if (!lpq.isFromFindCommand() && lpq.getBatchSize()) {
- // We have an ntoreturn specified by an OP_QUERY style find. This is used
- // by clients to mean both batchSize and limit.
- //
- // Overflow here would be bad and could cause a nonsense limit. Cast
- // skip and limit values to unsigned ints to make sure that the
- // sum is never stored as signed. (See SERVER-13537).
- sort->limit = static_cast<size_t>(*lpq.getBatchSize()) +
- static_cast<size_t>(lpq.getSkip());
-
- // This is a SORT with a limit. The wire protocol has a single quantity
- // called "numToReturn" which could mean either limit or batchSize.
- // We have no idea what the client intended. One way to handle the ambiguity
- // of a limited OR stage is to use the SPLIT_LIMITED_SORT hack.
- //
- // If wantMore is false (meaning that 'ntoreturn' was initially passed to
- // the server as a negative value), then we treat numToReturn as a limit.
- // Since there is no limit-batchSize ambiguity in this case, we do not use the
- // SPLIT_LIMITED_SORT hack.
- //
- // If numToReturn is really a limit, then we want to add a limit to this
- // SORT stage, and hence perform a topK.
- //
- // If numToReturn is really a batchSize, then we want to perform a regular
- // blocking sort.
- //
- // Since we don't know which to use, just join the two options with an OR,
- // with the topK first. If the client wants a limit, they'll get the efficiency
- // of topK. If they want a batchSize, the other OR branch will deliver the missing
- // results. The OR stage handles deduping.
- if (lpq.wantMore()
- && params.options & QueryPlannerParams::SPLIT_LIMITED_SORT
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO)
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)) {
- // If we're here then the SPLIT_LIMITED_SORT hack is turned on,
- // and the query is of a type that allows the hack.
- //
- // Not allowed for geo or text, because we assume elsewhere that those
- // stages appear just once.
- OrNode* orn = new OrNode();
- orn->children.push_back(sort);
- SortNode* sortClone = static_cast<SortNode*>(sort->clone());
- sortClone->limit = 0;
- orn->children.push_back(sortClone);
- solnRoot = orn;
- }
- }
- else {
- sort->limit = 0;
- }
+ // Sort not provided, can't reverse scans to get the sort. One last trick: We can "explode"
+ // index scans over point intervals to an OR of sub-scans in order to pull out a sort.
+ // Let's try this.
+ if (explodeForSort(query, params, &solnRoot)) {
+ return solnRoot;
+ }
- *blockingSortOut = true;
+ // If we're here, we need to add a sort stage.
- return solnRoot;
+ // If we're not allowed to put a blocking sort in, bail out.
+ if (params.options & QueryPlannerParams::NO_BLOCKING_SORT) {
+ delete solnRoot;
+ return NULL;
}
- // static
- QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- QuerySolutionNode* solnRoot) {
- unique_ptr<QuerySolution> soln(new QuerySolution());
- soln->filterData = query.getQueryObj();
- soln->indexFilterApplied = params.indexFiltersApplied;
+ // Add a fetch stage so we have the full object when we hit the sort stage. TODO: Can we
+ // pull the values that we sort by out of the key and if so in what cases? Perhaps we can
+ // avoid a fetch.
+ if (!solnRoot->fetched()) {
+ FetchNode* fetch = new FetchNode();
+ fetch->children.push_back(solnRoot);
+ solnRoot = fetch;
+ }
- solnRoot->computeProperties();
+ // And build the full sort stage.
+ SortNode* sort = new SortNode();
+ sort->pattern = sortObj;
+ sort->query = lpq.getFilter();
+ sort->children.push_back(solnRoot);
+ solnRoot = sort;
+ // When setting the limit on the sort, we need to consider both
+ // the limit N and skip count M. The sort should return an ordered list
+ // N + M items so that the skip stage can discard the first M results.
+ if (lpq.getLimit()) {
+ // We have a true limit. The limit can be combined with the SORT stage.
+ sort->limit = static_cast<size_t>(*lpq.getLimit()) + static_cast<size_t>(lpq.getSkip());
+ } else if (!lpq.isFromFindCommand() && lpq.getBatchSize()) {
+ // We have an ntoreturn specified by an OP_QUERY style find. This is used
+ // by clients to mean both batchSize and limit.
+ //
+ // Overflow here would be bad and could cause a nonsense limit. Cast
+ // skip and limit values to unsigned ints to make sure that the
+ // sum is never stored as signed. (See SERVER-13537).
+ sort->limit = static_cast<size_t>(*lpq.getBatchSize()) + static_cast<size_t>(lpq.getSkip());
+
+ // This is a SORT with a limit. The wire protocol has a single quantity
+ // called "numToReturn" which could mean either limit or batchSize.
+ // We have no idea what the client intended. One way to handle the ambiguity
+ // of a limited OR stage is to use the SPLIT_LIMITED_SORT hack.
+ //
+ // If wantMore is false (meaning that 'ntoreturn' was initially passed to
+ // the server as a negative value), then we treat numToReturn as a limit.
+ // Since there is no limit-batchSize ambiguity in this case, we do not use the
+ // SPLIT_LIMITED_SORT hack.
+ //
+ // If numToReturn is really a limit, then we want to add a limit to this
+ // SORT stage, and hence perform a topK.
+ //
+ // If numToReturn is really a batchSize, then we want to perform a regular
+ // blocking sort.
+ //
+ // Since we don't know which to use, just join the two options with an OR,
+ // with the topK first. If the client wants a limit, they'll get the efficiency
+ // of topK. If they want a batchSize, the other OR branch will deliver the missing
+ // results. The OR stage handles deduping.
+ if (lpq.wantMore() && params.options & QueryPlannerParams::SPLIT_LIMITED_SORT &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT) &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO) &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)) {
+ // If we're here then the SPLIT_LIMITED_SORT hack is turned on,
+ // and the query is of a type that allows the hack.
+ //
+ // Not allowed for geo or text, because we assume elsewhere that those
+ // stages appear just once.
+ OrNode* orn = new OrNode();
+ orn->children.push_back(sort);
+ SortNode* sortClone = static_cast<SortNode*>(sort->clone());
+ sortClone->limit = 0;
+ orn->children.push_back(sortClone);
+ solnRoot = orn;
+ }
+ } else {
+ sort->limit = 0;
+ }
- // solnRoot finds all our results. Let's see what transformations we must perform to the
- // data.
+ *blockingSortOut = true;
- // If we're answering a query on a sharded system, we need to drop documents that aren't
- // logically part of our shard.
- if (params.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
+ return solnRoot;
+}
- if (!solnRoot->fetched()) {
+// static
+QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ QuerySolutionNode* solnRoot) {
+ unique_ptr<QuerySolution> soln(new QuerySolution());
+ soln->filterData = query.getQueryObj();
+ soln->indexFilterApplied = params.indexFiltersApplied;
- // See if we need to fetch information for our shard key.
- // NOTE: Solution nodes only list ordinary, non-transformed index keys for now
+ solnRoot->computeProperties();
- bool fetch = false;
- BSONObjIterator it(params.shardKey);
- while (it.more()) {
- BSONElement nextEl = it.next();
- if (!solnRoot->hasField(nextEl.fieldName())) {
- fetch = true;
- break;
- }
- }
+ // solnRoot finds all our results. Let's see what transformations we must perform to the
+ // data.
- if (fetch) {
- FetchNode* fetch = new FetchNode();
- fetch->children.push_back(solnRoot);
- solnRoot = fetch;
+ // If we're answering a query on a sharded system, we need to drop documents that aren't
+ // logically part of our shard.
+ if (params.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
+ if (!solnRoot->fetched()) {
+ // See if we need to fetch information for our shard key.
+ // NOTE: Solution nodes only list ordinary, non-transformed index keys for now
+
+ bool fetch = false;
+ BSONObjIterator it(params.shardKey);
+ while (it.more()) {
+ BSONElement nextEl = it.next();
+ if (!solnRoot->hasField(nextEl.fieldName())) {
+ fetch = true;
+ break;
}
}
- ShardingFilterNode* sfn = new ShardingFilterNode();
- sfn->children.push_back(solnRoot);
- solnRoot = sfn;
+ if (fetch) {
+ FetchNode* fetch = new FetchNode();
+ fetch->children.push_back(solnRoot);
+ solnRoot = fetch;
+ }
}
- bool hasSortStage = false;
- solnRoot = analyzeSort(query, params, solnRoot, &hasSortStage);
+ ShardingFilterNode* sfn = new ShardingFilterNode();
+ sfn->children.push_back(solnRoot);
+ solnRoot = sfn;
+ }
- // This can happen if we need to create a blocking sort stage and we're not allowed to.
- if (NULL == solnRoot) { return NULL; }
+ bool hasSortStage = false;
+ solnRoot = analyzeSort(query, params, solnRoot, &hasSortStage);
- // A solution can be blocking if it has a blocking sort stage or
- // a hashed AND stage.
- bool hasAndHashStage = hasNode(solnRoot, STAGE_AND_HASH);
- soln->hasBlockingStage = hasSortStage || hasAndHashStage;
+ // This can happen if we need to create a blocking sort stage and we're not allowed to.
+ if (NULL == solnRoot) {
+ return NULL;
+ }
- const LiteParsedQuery& lpq = query.getParsed();
+ // A solution can be blocking if it has a blocking sort stage or
+ // a hashed AND stage.
+ bool hasAndHashStage = hasNode(solnRoot, STAGE_AND_HASH);
+ soln->hasBlockingStage = hasSortStage || hasAndHashStage;
- // If we can (and should), add the keep mutations stage.
+ const LiteParsedQuery& lpq = query.getParsed();
- // We cannot keep mutated documents if:
- //
- // 1. The query requires an index to evaluate the predicate ($text). We can't tell whether
- // or not the doc actually satisfies the $text predicate since we can't evaluate a
- // text MatchExpression.
- //
- // 2. The query implies a sort ($geoNear). It would be rather expensive and hacky to merge
- // the document at the right place.
- //
- // 3. There is an index-provided sort. Ditto above comment about merging.
- //
- // TODO: do we want some kind of pre-planning step where we look for certain nodes and cache
- // them? We do lookups in the tree a few times. This may not matter as most trees are
- // shallow in terms of query nodes.
- bool cannotKeepFlagged = hasNode(solnRoot, STAGE_TEXT)
- || hasNode(solnRoot, STAGE_GEO_NEAR_2D)
- || hasNode(solnRoot, STAGE_GEO_NEAR_2DSPHERE)
- || (!lpq.getSort().isEmpty() && !hasSortStage);
-
- // Only these stages can produce flagged results. A stage has to hold state past one call
- // to work(...) in order to possibly flag a result.
- bool couldProduceFlagged = hasAndHashStage
- || hasNode(solnRoot, STAGE_AND_SORTED)
- || hasNode(solnRoot, STAGE_FETCH);
-
- bool shouldAddMutation = !cannotKeepFlagged && couldProduceFlagged;
-
- if (shouldAddMutation && (params.options & QueryPlannerParams::KEEP_MUTATIONS)) {
- KeepMutationsNode* keep = new KeepMutationsNode();
-
- // We must run the entire expression tree to make sure the document is still valid.
- keep->filter.reset(query.root()->shallowClone());
-
- if (STAGE_SORT == solnRoot->getType()) {
- // We want to insert the invalidated results before the sort stage, if there is one.
- verify(1 == solnRoot->children.size());
- keep->children.push_back(solnRoot->children[0]);
- solnRoot->children[0] = keep;
- }
- else {
- keep->children.push_back(solnRoot);
- solnRoot = keep;
- }
+ // If we can (and should), add the keep mutations stage.
+
+ // We cannot keep mutated documents if:
+ //
+ // 1. The query requires an index to evaluate the predicate ($text). We can't tell whether
+ // or not the doc actually satisfies the $text predicate since we can't evaluate a
+ // text MatchExpression.
+ //
+ // 2. The query implies a sort ($geoNear). It would be rather expensive and hacky to merge
+ // the document at the right place.
+ //
+ // 3. There is an index-provided sort. Ditto above comment about merging.
+ //
+ // TODO: do we want some kind of pre-planning step where we look for certain nodes and cache
+ // them? We do lookups in the tree a few times. This may not matter as most trees are
+ // shallow in terms of query nodes.
+ bool cannotKeepFlagged = hasNode(solnRoot, STAGE_TEXT) ||
+ hasNode(solnRoot, STAGE_GEO_NEAR_2D) || hasNode(solnRoot, STAGE_GEO_NEAR_2DSPHERE) ||
+ (!lpq.getSort().isEmpty() && !hasSortStage);
+
+ // Only these stages can produce flagged results. A stage has to hold state past one call
+ // to work(...) in order to possibly flag a result.
+ bool couldProduceFlagged =
+ hasAndHashStage || hasNode(solnRoot, STAGE_AND_SORTED) || hasNode(solnRoot, STAGE_FETCH);
+
+ bool shouldAddMutation = !cannotKeepFlagged && couldProduceFlagged;
+
+ if (shouldAddMutation && (params.options & QueryPlannerParams::KEEP_MUTATIONS)) {
+ KeepMutationsNode* keep = new KeepMutationsNode();
+
+ // We must run the entire expression tree to make sure the document is still valid.
+ keep->filter.reset(query.root()->shallowClone());
+
+ if (STAGE_SORT == solnRoot->getType()) {
+ // We want to insert the invalidated results before the sort stage, if there is one.
+ verify(1 == solnRoot->children.size());
+ keep->children.push_back(solnRoot->children[0]);
+ solnRoot->children[0] = keep;
+ } else {
+ keep->children.push_back(solnRoot);
+ solnRoot = keep;
}
+ }
- // Project the results.
- if (NULL != query.getProj()) {
- LOG(5) << "PROJECTION: fetched status: " << solnRoot->fetched() << endl;
- LOG(5) << "PROJECTION: Current plan is:\n" << solnRoot->toString() << endl;
-
- ProjectionNode::ProjectionType projType = ProjectionNode::DEFAULT;
- BSONObj coveredKeyObj;
-
- if (query.getProj()->requiresDocument()) {
- LOG(5) << "PROJECTION: claims to require doc adding fetch.\n";
- // If the projection requires the entire document, somebody must fetch.
- if (!solnRoot->fetched()) {
- FetchNode* fetch = new FetchNode();
- fetch->children.push_back(solnRoot);
- solnRoot = fetch;
- }
+ // Project the results.
+ if (NULL != query.getProj()) {
+ LOG(5) << "PROJECTION: fetched status: " << solnRoot->fetched() << endl;
+ LOG(5) << "PROJECTION: Current plan is:\n" << solnRoot->toString() << endl;
+
+ ProjectionNode::ProjectionType projType = ProjectionNode::DEFAULT;
+ BSONObj coveredKeyObj;
+
+ if (query.getProj()->requiresDocument()) {
+ LOG(5) << "PROJECTION: claims to require doc adding fetch.\n";
+ // If the projection requires the entire document, somebody must fetch.
+ if (!solnRoot->fetched()) {
+ FetchNode* fetch = new FetchNode();
+ fetch->children.push_back(solnRoot);
+ solnRoot = fetch;
}
- else if (!query.getProj()->wantIndexKey()) {
- // The only way we're here is if it's a simple projection. That is, we can pick out
- // the fields we want to include and they're not dotted. So we want to execute the
- // projection in the fast-path simple fashion. Just don't know which fast path yet.
- LOG(5) << "PROJECTION: requires fields\n";
- const vector<string>& fields = query.getProj()->getRequiredFields();
- bool covered = true;
- for (size_t i = 0; i < fields.size(); ++i) {
- if (!solnRoot->hasField(fields[i])) {
- LOG(5) << "PROJECTION: not covered due to field "
- << fields[i] << endl;
- covered = false;
- break;
- }
+ } else if (!query.getProj()->wantIndexKey()) {
+ // The only way we're here is if it's a simple projection. That is, we can pick out
+ // the fields we want to include and they're not dotted. So we want to execute the
+ // projection in the fast-path simple fashion. Just don't know which fast path yet.
+ LOG(5) << "PROJECTION: requires fields\n";
+ const vector<string>& fields = query.getProj()->getRequiredFields();
+ bool covered = true;
+ for (size_t i = 0; i < fields.size(); ++i) {
+ if (!solnRoot->hasField(fields[i])) {
+ LOG(5) << "PROJECTION: not covered due to field " << fields[i] << endl;
+ covered = false;
+ break;
}
+ }
- LOG(5) << "PROJECTION: is covered?: = " << covered << endl;
+ LOG(5) << "PROJECTION: is covered?: = " << covered << endl;
- // If any field is missing from the list of fields the projection wants,
- // a fetch is required.
- if (!covered) {
- FetchNode* fetch = new FetchNode();
- fetch->children.push_back(solnRoot);
- solnRoot = fetch;
+ // If any field is missing from the list of fields the projection wants,
+ // a fetch is required.
+ if (!covered) {
+ FetchNode* fetch = new FetchNode();
+ fetch->children.push_back(solnRoot);
+ solnRoot = fetch;
- // It's simple but we'll have the full document and we should just iterate
- // over that.
+ // It's simple but we'll have the full document and we should just iterate
+ // over that.
+ projType = ProjectionNode::SIMPLE_DOC;
+ LOG(5) << "PROJECTION: not covered, fetching.";
+ } else {
+ if (solnRoot->fetched()) {
+ // Fetched implies hasObj() so let's run with that.
projType = ProjectionNode::SIMPLE_DOC;
- LOG(5) << "PROJECTION: not covered, fetching.";
- }
- else {
- if (solnRoot->fetched()) {
- // Fetched implies hasObj() so let's run with that.
- projType = ProjectionNode::SIMPLE_DOC;
- LOG(5) << "PROJECTION: covered via FETCH, using SIMPLE_DOC fast path";
- }
- else {
- // If we're here we're not fetched so we're covered. Let's see if we can
- // get out of using the default projType. If there's only one leaf
- // underneath and it's giving us index data we can use the faster covered
- // impl.
- vector<QuerySolutionNode*> leafNodes;
- getLeafNodes(solnRoot, &leafNodes);
-
- if (1 == leafNodes.size()) {
- // Both the IXSCAN and DISTINCT stages provide covered key data.
- if (STAGE_IXSCAN == leafNodes[0]->getType()) {
- projType = ProjectionNode::COVERED_ONE_INDEX;
- IndexScanNode* ixn = static_cast<IndexScanNode*>(leafNodes[0]);
- coveredKeyObj = ixn->indexKeyPattern;
- LOG(5) << "PROJECTION: covered via IXSCAN, using COVERED fast path";
- }
- else if (STAGE_DISTINCT_SCAN == leafNodes[0]->getType()) {
- projType = ProjectionNode::COVERED_ONE_INDEX;
- DistinctNode* dn = static_cast<DistinctNode*>(leafNodes[0]);
- coveredKeyObj = dn->indexKeyPattern;
- LOG(5) << "PROJECTION: covered via DISTINCT, using COVERED fast path";
- }
+ LOG(5) << "PROJECTION: covered via FETCH, using SIMPLE_DOC fast path";
+ } else {
+ // If we're here we're not fetched so we're covered. Let's see if we can
+ // get out of using the default projType. If there's only one leaf
+ // underneath and it's giving us index data we can use the faster covered
+ // impl.
+ vector<QuerySolutionNode*> leafNodes;
+ getLeafNodes(solnRoot, &leafNodes);
+
+ if (1 == leafNodes.size()) {
+ // Both the IXSCAN and DISTINCT stages provide covered key data.
+ if (STAGE_IXSCAN == leafNodes[0]->getType()) {
+ projType = ProjectionNode::COVERED_ONE_INDEX;
+ IndexScanNode* ixn = static_cast<IndexScanNode*>(leafNodes[0]);
+ coveredKeyObj = ixn->indexKeyPattern;
+ LOG(5) << "PROJECTION: covered via IXSCAN, using COVERED fast path";
+ } else if (STAGE_DISTINCT_SCAN == leafNodes[0]->getType()) {
+ projType = ProjectionNode::COVERED_ONE_INDEX;
+ DistinctNode* dn = static_cast<DistinctNode*>(leafNodes[0]);
+ coveredKeyObj = dn->indexKeyPattern;
+ LOG(5) << "PROJECTION: covered via DISTINCT, using COVERED fast path";
}
}
}
}
-
- // We now know we have whatever data is required for the projection.
- ProjectionNode* projNode = new ProjectionNode();
- projNode->children.push_back(solnRoot);
- projNode->fullExpression = query.root();
- projNode->projection = lpq.getProj();
- projNode->projType = projType;
- projNode->coveredKeyObj = coveredKeyObj;
- solnRoot = projNode;
- }
- else {
- // If there's no projection, we must fetch, as the user wants the entire doc.
- if (!solnRoot->fetched()) {
- FetchNode* fetch = new FetchNode();
- fetch->children.push_back(solnRoot);
- solnRoot = fetch;
- }
}
- if (0 != lpq.getSkip()) {
- SkipNode* skip = new SkipNode();
- skip->skip = lpq.getSkip();
- skip->children.push_back(solnRoot);
- solnRoot = skip;
+ // We now know we have whatever data is required for the projection.
+ ProjectionNode* projNode = new ProjectionNode();
+ projNode->children.push_back(solnRoot);
+ projNode->fullExpression = query.root();
+ projNode->projection = lpq.getProj();
+ projNode->projType = projType;
+ projNode->coveredKeyObj = coveredKeyObj;
+ solnRoot = projNode;
+ } else {
+ // If there's no projection, we must fetch, as the user wants the entire doc.
+ if (!solnRoot->fetched()) {
+ FetchNode* fetch = new FetchNode();
+ fetch->children.push_back(solnRoot);
+ solnRoot = fetch;
}
+ }
- // When there is both a blocking sort and a limit, the limit will
- // be enforced by the blocking sort.
- // Otherwise, we need to limit the results in the case of a hard limit
- // (ie. limit in raw query is negative)
- if (!hasSortStage) {
- // We don't have a sort stage. This means that, if there is a limit, we will have
- // to enforce it ourselves since it's not handled inside SORT.
- if (lpq.getLimit()) {
- LimitNode* limit = new LimitNode();
- limit->limit = *lpq.getLimit();
- limit->children.push_back(solnRoot);
- solnRoot = limit;
- }
- else if (!lpq.isFromFindCommand() && lpq.getBatchSize() && !lpq.wantMore()) {
- // We have a "legacy limit", i.e. a negative ntoreturn value from an OP_QUERY style
- // find.
- LimitNode* limit = new LimitNode();
- limit->limit = *lpq.getBatchSize();
- limit->children.push_back(solnRoot);
- solnRoot = limit;
- }
- }
+ if (0 != lpq.getSkip()) {
+ SkipNode* skip = new SkipNode();
+ skip->skip = lpq.getSkip();
+ skip->children.push_back(solnRoot);
+ solnRoot = skip;
+ }
- soln->root.reset(solnRoot);
- return soln.release();
+ // When there is both a blocking sort and a limit, the limit will
+ // be enforced by the blocking sort.
+ // Otherwise, we need to limit the results in the case of a hard limit
+ // (ie. limit in raw query is negative)
+ if (!hasSortStage) {
+ // We don't have a sort stage. This means that, if there is a limit, we will have
+ // to enforce it ourselves since it's not handled inside SORT.
+ if (lpq.getLimit()) {
+ LimitNode* limit = new LimitNode();
+ limit->limit = *lpq.getLimit();
+ limit->children.push_back(solnRoot);
+ solnRoot = limit;
+ } else if (!lpq.isFromFindCommand() && lpq.getBatchSize() && !lpq.wantMore()) {
+ // We have a "legacy limit", i.e. a negative ntoreturn value from an OP_QUERY style
+ // find.
+ LimitNode* limit = new LimitNode();
+ limit->limit = *lpq.getBatchSize();
+ limit->children.push_back(solnRoot);
+ solnRoot = limit;
+ }
}
+ soln->root.reset(solnRoot);
+ return soln.release();
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/planner_analysis.h b/src/mongo/db/query/planner_analysis.h
index b7591bb31b5..6cbb48df136 100644
--- a/src/mongo/db/query/planner_analysis.h
+++ b/src/mongo/db/query/planner_analysis.h
@@ -34,77 +34,77 @@
namespace mongo {
- class Collection;
+class Collection;
- class QueryPlannerAnalysis {
- public:
- /**
- * Takes an index key pattern and returns an object describing the "maximal sort" that this
- * index can provide. Returned object is in normalized sort form (all elements have value 1
- * or -1).
- *
- * Examples:
- * - {a: 1, b: -1} => {a: 1, b: -1}
- * - {a: true} => {a: 1}
- * - {a: "hashed"} => {}
- * - {a: 1, b: "text", c: 1} => {a: 1}
- */
- static BSONObj getSortPattern(const BSONObj& indexKeyPattern);
+class QueryPlannerAnalysis {
+public:
+ /**
+ * Takes an index key pattern and returns an object describing the "maximal sort" that this
+ * index can provide. Returned object is in normalized sort form (all elements have value 1
+ * or -1).
+ *
+ * Examples:
+ * - {a: 1, b: -1} => {a: 1, b: -1}
+ * - {a: true} => {a: 1}
+ * - {a: "hashed"} => {}
+ * - {a: 1, b: "text", c: 1} => {a: 1}
+ */
+ static BSONObj getSortPattern(const BSONObj& indexKeyPattern);
- /**
- * In brief: performs sort and covering analysis.
- *
- * The solution rooted at 'solnRoot' provides data for the query, whether through some
- * configuration of indices or through a collection scan. Additional stages may be required
- * to perform sorting, projection, or other operations that are independent of the source
- * of the data. These stages are added atop 'solnRoot'.
- *
- * 'taggedRoot' is a copy of the parse tree. Nodes in 'solnRoot' may point into it.
- *
- * Takes ownership of 'solnRoot' and 'taggedRoot'.
- *
- * Returns NULL if a solution cannot be constructed given the requirements in 'params'.
- *
- * Caller owns the returned QuerySolution.
- */
- static QuerySolution* analyzeDataAccess(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- QuerySolutionNode* solnRoot);
+ /**
+ * In brief: performs sort and covering analysis.
+ *
+ * The solution rooted at 'solnRoot' provides data for the query, whether through some
+ * configuration of indices or through a collection scan. Additional stages may be required
+ * to perform sorting, projection, or other operations that are independent of the source
+ * of the data. These stages are added atop 'solnRoot'.
+ *
+ * 'taggedRoot' is a copy of the parse tree. Nodes in 'solnRoot' may point into it.
+ *
+ * Takes ownership of 'solnRoot' and 'taggedRoot'.
+ *
+ * Returns NULL if a solution cannot be constructed given the requirements in 'params'.
+ *
+ * Caller owns the returned QuerySolution.
+ */
+ static QuerySolution* analyzeDataAccess(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ QuerySolutionNode* solnRoot);
- /**
- * Sort the results, if there is a sort required.
- */
- static QuerySolutionNode* analyzeSort(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- QuerySolutionNode* solnRoot,
- bool* blockingSortOut);
+ /**
+ * Sort the results, if there is a sort required.
+ */
+ static QuerySolutionNode* analyzeSort(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ QuerySolutionNode* solnRoot,
+ bool* blockingSortOut);
- /**
- * Internal helper function used by analyzeSort.
- *
- * Rewrites an index scan over many point intervals as an OR of many index scans in order to
- * obtain an indexed sort. For full details, see SERVER-1205.
- *
- * Here is an example:
- *
- * Consider the query find({a: {$in: [1,2]}}).sort({b: 1}) with using the index {a:1, b:1}.
- *
- * Our default solution will be to construct one index scan with the bounds a:[[1,1],[2,2]]
- * and b: [MinKey, MaxKey].
- *
- * However, this is logically equivalent to the union of the following scans:
- * a:[1,1], b:[MinKey, MaxKey]
- * a:[2,2], b:[MinKey, MaxKey]
- *
- * Since the bounds on 'a' are a point, each scan provides the sort order {b:1} in addition
- * to {a:1, b:1}.
- *
- * If we union these scans with a merge sort instead of a normal hashing OR, we can preserve
- * the sort order that each scan provides.
- */
- static bool explodeForSort(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- QuerySolutionNode** solnRoot);
- };
+ /**
+ * Internal helper function used by analyzeSort.
+ *
+ * Rewrites an index scan over many point intervals as an OR of many index scans in order to
+ * obtain an indexed sort. For full details, see SERVER-1205.
+ *
+ * Here is an example:
+ *
+ * Consider the query find({a: {$in: [1,2]}}).sort({b: 1}) with using the index {a:1, b:1}.
+ *
+ * Our default solution will be to construct one index scan with the bounds a:[[1,1],[2,2]]
+ * and b: [MinKey, MaxKey].
+ *
+ * However, this is logically equivalent to the union of the following scans:
+ * a:[1,1], b:[MinKey, MaxKey]
+ * a:[2,2], b:[MinKey, MaxKey]
+ *
+ * Since the bounds on 'a' are a point, each scan provides the sort order {b:1} in addition
+ * to {a:1, b:1}.
+ *
+ * If we union these scans with a merge sort instead of a normal hashing OR, we can preserve
+ * the sort order that each scan provides.
+ */
+ static bool explodeForSort(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ QuerySolutionNode** solnRoot);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/planner_analysis_test.cpp b/src/mongo/db/query/planner_analysis_test.cpp
index 18fc86d6f1d..8849e67af5a 100644
--- a/src/mongo/db/query/planner_analysis_test.cpp
+++ b/src/mongo/db/query/planner_analysis_test.cpp
@@ -36,130 +36,118 @@ using namespace mongo;
namespace {
- TEST(QueryPlannerAnalysis, GetSortPatternBasic) {
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1}")));
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1}")));
- ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1}")));
- ASSERT_EQUALS(fromjson("{a: 1, b: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: -1}")));
- ASSERT_EQUALS(fromjson("{a: -1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: 1}")));
- ASSERT_EQUALS(fromjson("{a: -1, b: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: -1}")));
- }
-
- TEST(QueryPlannerAnalysis, GetSortPatternOtherElements) {
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 0}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 100}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: Infinity}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: true}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: false}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: []}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: {}}")));
-
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -100}")));
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -Infinity}")));
-
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{}")));
- }
-
- TEST(QueryPlannerAnalysis, GetSortPatternSpecialIndexTypes) {
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'hashed'}")));
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'text'}")));
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: '2dsphere'}")));
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: ''}")));
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'foo'}")));
-
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: 'text'}")));
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: '2dsphere'}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text'}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: '2dsphere'}")));
-
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text', c: 1}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: '2dsphere',"
- " c: 1}")));
-
- ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text'}")));
- ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text',"
- " d: 1}")));
- }
-
- // Test the generation of sort orders provided by an index scan done by
- // IndexScanNode::computeProperties().
- TEST(QueryPlannerAnalysis, IxscanSortOrdersBasic) {
- IndexScanNode ixscan;
- ixscan.indexKeyPattern = fromjson("{a: 1, b: 1, c: 1, d: 1, e: 1}");
-
- // Bounds are {a: [[1,1]], b: [[2,2]], c: [[3,3]], d: [[1,5]], e:[[1,1],[2,2]]},
- // all inclusive.
- OrderedIntervalList oil1("a");
- oil1.intervals.push_back(Interval(fromjson("{'': 1, '': 1}"), true, true));
- ixscan.bounds.fields.push_back(oil1);
-
- OrderedIntervalList oil2("b");
- oil2.intervals.push_back(Interval(fromjson("{'': 2, '': 2}"), true, true));
- ixscan.bounds.fields.push_back(oil2);
-
- OrderedIntervalList oil3("c");
- oil3.intervals.push_back(Interval(fromjson("{'': 3, '': 3}"), true, true));
- ixscan.bounds.fields.push_back(oil3);
-
- OrderedIntervalList oil4("d");
- oil4.intervals.push_back(Interval(fromjson("{'': 1, '': 5}"), true, true));
- ixscan.bounds.fields.push_back(oil4);
-
- OrderedIntervalList oil5("e");
- oil5.intervals.push_back(Interval(fromjson("{'': 1, '': 1}"), true, true));
- oil5.intervals.push_back(Interval(fromjson("{'': 2, '': 2}"), true, true));
- ixscan.bounds.fields.push_back(oil5);
-
- // Compute and retrieve the set of sorts.
- ixscan.computeProperties();
- const BSONObjSet& sorts = ixscan.getSort();
-
- // One possible sort is the index key pattern.
- ASSERT(sorts.find(fromjson("{a: 1, b: 1, c: 1, d: 1, e: 1}")) != sorts.end());
-
- // All prefixes of the key pattern.
- ASSERT(sorts.find(fromjson("{a: 1}")) != sorts.end());
- ASSERT(sorts.find(fromjson("{a: 1, b: 1}")) != sorts.end());
- ASSERT(sorts.find(fromjson("{a: 1, b: 1, c: 1}")) != sorts.end());
- ASSERT(sorts.find(fromjson("{a: 1, b: 1, c: 1, d: 1}")) != sorts.end());
-
- // Additional sorts considered due to point intervals on 'a', 'b', and 'c'.
- ASSERT(sorts.find(fromjson("{b: 1, c: 1, d: 1, e: 1}")) != sorts.end());
- ASSERT(sorts.find(fromjson("{c: 1, d: 1, e: 1}")) != sorts.end());
- ASSERT(sorts.find(fromjson("{d: 1, e: 1}")) != sorts.end());
- ASSERT(sorts.find(fromjson("{d: 1}")) != sorts.end());
-
- // There should be 9 total sorts: make sure no other ones snuck their way in.
- ASSERT_EQ(9U, sorts.size());
- }
+TEST(QueryPlannerAnalysis, GetSortPatternBasic) {
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1}")));
+ ASSERT_EQUALS(fromjson("{a: -1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1}")));
+ ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1}")));
+ ASSERT_EQUALS(fromjson("{a: 1, b: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: -1}")));
+ ASSERT_EQUALS(fromjson("{a: -1, b: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: 1}")));
+ ASSERT_EQUALS(fromjson("{a: -1, b: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: -1}")));
+}
+
+TEST(QueryPlannerAnalysis, GetSortPatternOtherElements) {
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 0}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 100}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: Infinity}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: true}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: false}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: []}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: {}}")));
+
+ ASSERT_EQUALS(fromjson("{a: -1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: -100}")));
+ ASSERT_EQUALS(fromjson("{a: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -Infinity}")));
+
+ ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{}")));
+}
+
+TEST(QueryPlannerAnalysis, GetSortPatternSpecialIndexTypes) {
+ ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'hashed'}")));
+ ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'text'}")));
+ ASSERT_EQUALS(fromjson("{}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: '2dsphere'}")));
+ ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: ''}")));
+ ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'foo'}")));
+
+ ASSERT_EQUALS(fromjson("{a: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: 'text'}")));
+ ASSERT_EQUALS(fromjson("{a: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: '2dsphere'}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text'}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: '2dsphere'}")));
+
+ ASSERT_EQUALS(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text', c: 1}")));
+ ASSERT_EQUALS(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson(
+ "{a: 1, b: '2dsphere',"
+ " c: 1}")));
+
+ ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text'}")));
+ ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson(
+ "{a: 1, b: 1, c: 'text',"
+ " d: 1}")));
+}
+
+// Test the generation of sort orders provided by an index scan done by
+// IndexScanNode::computeProperties().
+TEST(QueryPlannerAnalysis, IxscanSortOrdersBasic) {
+ IndexScanNode ixscan;
+ ixscan.indexKeyPattern = fromjson("{a: 1, b: 1, c: 1, d: 1, e: 1}");
+
+ // Bounds are {a: [[1,1]], b: [[2,2]], c: [[3,3]], d: [[1,5]], e:[[1,1],[2,2]]},
+ // all inclusive.
+ OrderedIntervalList oil1("a");
+ oil1.intervals.push_back(Interval(fromjson("{'': 1, '': 1}"), true, true));
+ ixscan.bounds.fields.push_back(oil1);
+
+ OrderedIntervalList oil2("b");
+ oil2.intervals.push_back(Interval(fromjson("{'': 2, '': 2}"), true, true));
+ ixscan.bounds.fields.push_back(oil2);
+
+ OrderedIntervalList oil3("c");
+ oil3.intervals.push_back(Interval(fromjson("{'': 3, '': 3}"), true, true));
+ ixscan.bounds.fields.push_back(oil3);
+
+ OrderedIntervalList oil4("d");
+ oil4.intervals.push_back(Interval(fromjson("{'': 1, '': 5}"), true, true));
+ ixscan.bounds.fields.push_back(oil4);
+
+ OrderedIntervalList oil5("e");
+ oil5.intervals.push_back(Interval(fromjson("{'': 1, '': 1}"), true, true));
+ oil5.intervals.push_back(Interval(fromjson("{'': 2, '': 2}"), true, true));
+ ixscan.bounds.fields.push_back(oil5);
+
+ // Compute and retrieve the set of sorts.
+ ixscan.computeProperties();
+ const BSONObjSet& sorts = ixscan.getSort();
+
+ // One possible sort is the index key pattern.
+ ASSERT(sorts.find(fromjson("{a: 1, b: 1, c: 1, d: 1, e: 1}")) != sorts.end());
+
+ // All prefixes of the key pattern.
+ ASSERT(sorts.find(fromjson("{a: 1}")) != sorts.end());
+ ASSERT(sorts.find(fromjson("{a: 1, b: 1}")) != sorts.end());
+ ASSERT(sorts.find(fromjson("{a: 1, b: 1, c: 1}")) != sorts.end());
+ ASSERT(sorts.find(fromjson("{a: 1, b: 1, c: 1, d: 1}")) != sorts.end());
+
+ // Additional sorts considered due to point intervals on 'a', 'b', and 'c'.
+ ASSERT(sorts.find(fromjson("{b: 1, c: 1, d: 1, e: 1}")) != sorts.end());
+ ASSERT(sorts.find(fromjson("{c: 1, d: 1, e: 1}")) != sorts.end());
+ ASSERT(sorts.find(fromjson("{d: 1, e: 1}")) != sorts.end());
+ ASSERT(sorts.find(fromjson("{d: 1}")) != sorts.end());
+
+ // There should be 9 total sorts: make sure no other ones snuck their way in.
+ ASSERT_EQ(9U, sorts.size());
+}
} // namespace
diff --git a/src/mongo/db/query/planner_ixselect.cpp b/src/mongo/db/query/planner_ixselect.cpp
index f883b5468be..f1f133c8ee5 100644
--- a/src/mongo/db/query/planner_ixselect.cpp
+++ b/src/mongo/db/query/planner_ixselect.cpp
@@ -44,689 +44,650 @@
namespace mongo {
- static double fieldWithDefault(const BSONObj& infoObj, const string& name, double def) {
- BSONElement e = infoObj[name];
- if (e.isNumber()) { return e.numberDouble(); }
- return def;
+static double fieldWithDefault(const BSONObj& infoObj, const string& name, double def) {
+ BSONElement e = infoObj[name];
+ if (e.isNumber()) {
+ return e.numberDouble();
}
+ return def;
+}
- /**
- * 2d indices don't handle wrapping so we can't use them for queries that wrap.
- */
- static bool twoDWontWrap(const Circle& circle, const IndexEntry& index) {
-
- GeoHashConverter::Parameters hashParams;
- Status paramStatus = GeoHashConverter::parseParameters(index.infoObj, &hashParams);
- verify(paramStatus.isOK()); // we validated the params on index creation
-
- GeoHashConverter conv(hashParams);
-
- // FYI: old code used flat not spherical error.
- double yscandist = rad2deg(circle.radius) + conv.getErrorSphere();
- double xscandist = computeXScanDistance(circle.center.y, yscandist);
- bool ret = circle.center.x + xscandist < 180
- && circle.center.x - xscandist > -180
- && circle.center.y + yscandist < 90
- && circle.center.y - yscandist > -90;
- return ret;
+/**
+ * 2d indices don't handle wrapping so we can't use them for queries that wrap.
+ */
+static bool twoDWontWrap(const Circle& circle, const IndexEntry& index) {
+ GeoHashConverter::Parameters hashParams;
+ Status paramStatus = GeoHashConverter::parseParameters(index.infoObj, &hashParams);
+ verify(paramStatus.isOK()); // we validated the params on index creation
+
+ GeoHashConverter conv(hashParams);
+
+ // FYI: old code used flat not spherical error.
+ double yscandist = rad2deg(circle.radius) + conv.getErrorSphere();
+ double xscandist = computeXScanDistance(circle.center.y, yscandist);
+ bool ret = circle.center.x + xscandist < 180 && circle.center.x - xscandist > -180 &&
+ circle.center.y + yscandist < 90 && circle.center.y - yscandist > -90;
+ return ret;
+}
+
+// static
+void QueryPlannerIXSelect::getFields(const MatchExpression* node,
+ string prefix,
+ unordered_set<string>* out) {
+ // Do not traverse tree beyond a NOR negation node
+ MatchExpression::MatchType exprtype = node->matchType();
+ if (exprtype == MatchExpression::NOR) {
+ return;
}
- // static
- void QueryPlannerIXSelect::getFields(const MatchExpression* node,
- string prefix,
- unordered_set<string>* out) {
- // Do not traverse tree beyond a NOR negation node
- MatchExpression::MatchType exprtype = node->matchType();
- if (exprtype == MatchExpression::NOR) {
- return;
- }
-
- // Leaf nodes with a path and some array operators.
- if (Indexability::nodeCanUseIndexOnOwnField(node)) {
- out->insert(prefix + node->path().toString());
- }
- else if (Indexability::arrayUsesIndexOnChildren(node)) {
- // If the array uses an index on its children, it's something like
- // {foo : {$elemMatch: { bar: 1}}}, in which case the predicate is really over
- // foo.bar.
- //
- // When we have {foo: {$all: [{$elemMatch: {a:1}}], the path of the embedded elemMatch
- // is empty. We don't want to append a dot in that case as the field would be foo..a.
- if (!node->path().empty()) {
- prefix += node->path().toString() + ".";
- }
+ // Leaf nodes with a path and some array operators.
+ if (Indexability::nodeCanUseIndexOnOwnField(node)) {
+ out->insert(prefix + node->path().toString());
+ } else if (Indexability::arrayUsesIndexOnChildren(node)) {
+ // If the array uses an index on its children, it's something like
+ // {foo : {$elemMatch: { bar: 1}}}, in which case the predicate is really over
+ // foo.bar.
+ //
+ // When we have {foo: {$all: [{$elemMatch: {a:1}}], the path of the embedded elemMatch
+ // is empty. We don't want to append a dot in that case as the field would be foo..a.
+ if (!node->path().empty()) {
+ prefix += node->path().toString() + ".";
+ }
- for (size_t i = 0; i < node->numChildren(); ++i) {
- getFields(node->getChild(i), prefix, out);
- }
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ getFields(node->getChild(i), prefix, out);
}
- else if (node->isLogical()) {
- for (size_t i = 0; i < node->numChildren(); ++i) {
- getFields(node->getChild(i), prefix, out);
- }
+ } else if (node->isLogical()) {
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ getFields(node->getChild(i), prefix, out);
}
}
-
- // static
- void QueryPlannerIXSelect::findRelevantIndices(const unordered_set<string>& fields,
- const vector<IndexEntry>& allIndices,
- vector<IndexEntry>* out) {
- for (size_t i = 0; i < allIndices.size(); ++i) {
- BSONObjIterator it(allIndices[i].keyPattern);
- verify(it.more());
- BSONElement elt = it.next();
- if (fields.end() != fields.find(elt.fieldName())) {
- out->push_back(allIndices[i]);
- }
+}
+
+// static
+void QueryPlannerIXSelect::findRelevantIndices(const unordered_set<string>& fields,
+ const vector<IndexEntry>& allIndices,
+ vector<IndexEntry>* out) {
+ for (size_t i = 0; i < allIndices.size(); ++i) {
+ BSONObjIterator it(allIndices[i].keyPattern);
+ verify(it.more());
+ BSONElement elt = it.next();
+ if (fields.end() != fields.find(elt.fieldName())) {
+ out->push_back(allIndices[i]);
}
}
+}
+
+// static
+bool QueryPlannerIXSelect::compatible(const BSONElement& elt,
+ const IndexEntry& index,
+ MatchExpression* node) {
+ // Historically one could create indices with any particular value for the index spec,
+ // including values that now indicate a special index. As such we have to make sure the
+ // index type wasn't overridden before we pay attention to the string in the index key
+ // pattern element.
+ //
+ // e.g. long ago we could have created an index {a: "2dsphere"} and it would
+ // be treated as a btree index by an ancient version of MongoDB. To try to run
+ // 2dsphere queries over it would be folly.
+ string indexedFieldType;
+ if (String != elt.type() || (INDEX_BTREE == index.type)) {
+ indexedFieldType = "";
+ } else {
+ indexedFieldType = elt.String();
+ }
- // static
- bool QueryPlannerIXSelect::compatible(const BSONElement& elt,
- const IndexEntry& index,
- MatchExpression* node) {
- // Historically one could create indices with any particular value for the index spec,
- // including values that now indicate a special index. As such we have to make sure the
- // index type wasn't overridden before we pay attention to the string in the index key
- // pattern element.
- //
- // e.g. long ago we could have created an index {a: "2dsphere"} and it would
- // be treated as a btree index by an ancient version of MongoDB. To try to run
- // 2dsphere queries over it would be folly.
- string indexedFieldType;
- if (String != elt.type() || (INDEX_BTREE == index.type)) {
- indexedFieldType = "";
- }
- else {
- indexedFieldType = elt.String();
- }
-
- // We know elt.fieldname() == node->path().
- MatchExpression::MatchType exprtype = node->matchType();
-
- if (indexedFieldType.empty()) {
- // Can't check for null w/a sparse index.
- if (exprtype == MatchExpression::EQ && index.sparse) {
- const EqualityMatchExpression* expr
- = static_cast<const EqualityMatchExpression*>(node);
- if (expr->getData().isNull()) {
- return false;
- }
- }
+ // We know elt.fieldname() == node->path().
+ MatchExpression::MatchType exprtype = node->matchType();
- // Can't check for $in w/ null element w/a sparse index.
- if (exprtype == MatchExpression::MATCH_IN && index.sparse) {
- const InMatchExpression* expr = static_cast<const InMatchExpression*>(node);
- if (expr->getData().hasNull()) {
- return false;
- }
+ if (indexedFieldType.empty()) {
+ // Can't check for null w/a sparse index.
+ if (exprtype == MatchExpression::EQ && index.sparse) {
+ const EqualityMatchExpression* expr = static_cast<const EqualityMatchExpression*>(node);
+ if (expr->getData().isNull()) {
+ return false;
}
+ }
- // We can't use a btree-indexed field for geo expressions.
- if (exprtype == MatchExpression::GEO || exprtype == MatchExpression::GEO_NEAR) {
+ // Can't check for $in w/ null element w/a sparse index.
+ if (exprtype == MatchExpression::MATCH_IN && index.sparse) {
+ const InMatchExpression* expr = static_cast<const InMatchExpression*>(node);
+ if (expr->getData().hasNull()) {
return false;
}
+ }
- // There are restrictions on when we can use the index if
- // the expression is a NOT.
- if (exprtype == MatchExpression::NOT) {
- // Don't allow indexed NOT on special index types such as geo or text indices.
- if (INDEX_BTREE != index.type) {
- return false;
- }
-
- // Prevent negated preds from using sparse indices. Doing so would cause us to
- // miss documents which do not contain the indexed fields.
- if (index.sparse) {
- return false;
- }
-
- // Can't index negations of MOD, REGEX, TYPE_OPERATOR, or ELEM_MATCH_VALUE.
- MatchExpression::MatchType childtype = node->getChild(0)->matchType();
- if (MatchExpression::REGEX == childtype ||
- MatchExpression::MOD == childtype ||
- MatchExpression::TYPE_OPERATOR == childtype ||
- MatchExpression::ELEM_MATCH_VALUE == childtype) {
- return false;
- }
+ // We can't use a btree-indexed field for geo expressions.
+ if (exprtype == MatchExpression::GEO || exprtype == MatchExpression::GEO_NEAR) {
+ return false;
+ }
- // If it's a negated $in, it can't have any REGEX's inside.
- if (MatchExpression::MATCH_IN == childtype) {
- InMatchExpression* ime = static_cast<InMatchExpression*>(node->getChild(0));
- if (ime->getData().numRegexes() != 0) {
- return false;
- }
- }
+ // There are restrictions on when we can use the index if
+ // the expression is a NOT.
+ if (exprtype == MatchExpression::NOT) {
+ // Don't allow indexed NOT on special index types such as geo or text indices.
+ if (INDEX_BTREE != index.type) {
+ return false;
}
- // We can only index EQ using text indices. This is an artificial limitation imposed by
- // FTSSpec::getIndexPrefix() which will fail if there is not an EQ predicate on each
- // index prefix field of the text index.
- //
- // Example for key pattern {a: 1, b: "text"}:
- // - Allowed: node = {a: 7}
- // - Not allowed: node = {a: {$gt: 7}}
-
- if (INDEX_TEXT != index.type) {
- return true;
+ // Prevent negated preds from using sparse indices. Doing so would cause us to
+ // miss documents which do not contain the indexed fields.
+ if (index.sparse) {
+ return false;
}
- // If we're here we know it's a text index. Equalities are OK anywhere in a text index.
- if (MatchExpression::EQ == exprtype) {
- return true;
+ // Can't index negations of MOD, REGEX, TYPE_OPERATOR, or ELEM_MATCH_VALUE.
+ MatchExpression::MatchType childtype = node->getChild(0)->matchType();
+ if (MatchExpression::REGEX == childtype || MatchExpression::MOD == childtype ||
+ MatchExpression::TYPE_OPERATOR == childtype ||
+ MatchExpression::ELEM_MATCH_VALUE == childtype) {
+ return false;
}
- // Not-equalities can only go in a suffix field of an index kp. We look through the key
- // pattern to see if the field we're looking at now appears as a prefix. If so, we
- // can't use this index for it.
- BSONObjIterator specIt(index.keyPattern);
- while (specIt.more()) {
- BSONElement elt = specIt.next();
- // We hit the dividing mark between prefix and suffix, so whatever field we're
- // looking at is a suffix, since it appears *after* the dividing mark between the
- // two. As such, we can use the index.
- if (String == elt.type()) {
- return true;
- }
-
- // If we're here, we're still looking at prefix elements. We know that exprtype
- // isn't EQ so we can't use this index.
- if (node->path() == elt.fieldNameStringData()) {
+ // If it's a negated $in, it can't have any REGEX's inside.
+ if (MatchExpression::MATCH_IN == childtype) {
+ InMatchExpression* ime = static_cast<InMatchExpression*>(node->getChild(0));
+ if (ime->getData().numRegexes() != 0) {
return false;
}
}
+ }
- // NOTE: This shouldn't be reached. Text index implies there is a separator implies we
- // will always hit the 'return true' above.
- invariant(0);
+ // We can only index EQ using text indices. This is an artificial limitation imposed by
+ // FTSSpec::getIndexPrefix() which will fail if there is not an EQ predicate on each
+ // index prefix field of the text index.
+ //
+ // Example for key pattern {a: 1, b: "text"}:
+ // - Allowed: node = {a: 7}
+ // - Not allowed: node = {a: {$gt: 7}}
+
+ if (INDEX_TEXT != index.type) {
return true;
}
- else if (IndexNames::HASHED == indexedFieldType) {
- return exprtype == MatchExpression::MATCH_IN || exprtype == MatchExpression::EQ;
+
+ // If we're here we know it's a text index. Equalities are OK anywhere in a text index.
+ if (MatchExpression::EQ == exprtype) {
+ return true;
}
- else if (IndexNames::GEO_2DSPHERE == indexedFieldType) {
- if (exprtype == MatchExpression::GEO) {
- // within or intersect.
- GeoMatchExpression* gme = static_cast<GeoMatchExpression*>(node);
- const GeoExpression& gq = gme->getGeoExpression();
- const GeometryContainer& gc = gq.getGeometry();
- return gc.hasS2Region();
+
+ // Not-equalities can only go in a suffix field of an index kp. We look through the key
+ // pattern to see if the field we're looking at now appears as a prefix. If so, we
+ // can't use this index for it.
+ BSONObjIterator specIt(index.keyPattern);
+ while (specIt.more()) {
+ BSONElement elt = specIt.next();
+ // We hit the dividing mark between prefix and suffix, so whatever field we're
+ // looking at is a suffix, since it appears *after* the dividing mark between the
+ // two. As such, we can use the index.
+ if (String == elt.type()) {
+ return true;
}
- else if (exprtype == MatchExpression::GEO_NEAR) {
- GeoNearMatchExpression* gnme = static_cast<GeoNearMatchExpression*>(node);
- // Make sure the near query is compatible with 2dsphere.
- return gnme->getData().centroid->crs == SPHERE;
+
+ // If we're here, we're still looking at prefix elements. We know that exprtype
+ // isn't EQ so we can't use this index.
+ if (node->path() == elt.fieldNameStringData()) {
+ return false;
}
- return false;
}
- else if (IndexNames::GEO_2D == indexedFieldType) {
- if (exprtype == MatchExpression::GEO_NEAR) {
- GeoNearMatchExpression* gnme = static_cast<GeoNearMatchExpression*>(node);
- // Make sure the near query is compatible with 2d index
- return gnme->getData().centroid->crs == FLAT || !gnme->getData().isWrappingQuery;
+
+ // NOTE: This shouldn't be reached. Text index implies there is a separator implies we
+ // will always hit the 'return true' above.
+ invariant(0);
+ return true;
+ } else if (IndexNames::HASHED == indexedFieldType) {
+ return exprtype == MatchExpression::MATCH_IN || exprtype == MatchExpression::EQ;
+ } else if (IndexNames::GEO_2DSPHERE == indexedFieldType) {
+ if (exprtype == MatchExpression::GEO) {
+ // within or intersect.
+ GeoMatchExpression* gme = static_cast<GeoMatchExpression*>(node);
+ const GeoExpression& gq = gme->getGeoExpression();
+ const GeometryContainer& gc = gq.getGeometry();
+ return gc.hasS2Region();
+ } else if (exprtype == MatchExpression::GEO_NEAR) {
+ GeoNearMatchExpression* gnme = static_cast<GeoNearMatchExpression*>(node);
+ // Make sure the near query is compatible with 2dsphere.
+ return gnme->getData().centroid->crs == SPHERE;
+ }
+ return false;
+ } else if (IndexNames::GEO_2D == indexedFieldType) {
+ if (exprtype == MatchExpression::GEO_NEAR) {
+ GeoNearMatchExpression* gnme = static_cast<GeoNearMatchExpression*>(node);
+ // Make sure the near query is compatible with 2d index
+ return gnme->getData().centroid->crs == FLAT || !gnme->getData().isWrappingQuery;
+ } else if (exprtype == MatchExpression::GEO) {
+ // 2d only supports within.
+ GeoMatchExpression* gme = static_cast<GeoMatchExpression*>(node);
+ const GeoExpression& gq = gme->getGeoExpression();
+ if (GeoExpression::WITHIN != gq.getPred()) {
+ return false;
}
- else if (exprtype == MatchExpression::GEO) {
- // 2d only supports within.
- GeoMatchExpression* gme = static_cast<GeoMatchExpression*>(node);
- const GeoExpression& gq = gme->getGeoExpression();
- if (GeoExpression::WITHIN != gq.getPred()) {
- return false;
- }
- const GeometryContainer& gc = gq.getGeometry();
+ const GeometryContainer& gc = gq.getGeometry();
- // 2d indices require an R2 covering
- if (gc.hasR2Region()) {
- return true;
- }
+ // 2d indices require an R2 covering
+ if (gc.hasR2Region()) {
+ return true;
+ }
- const CapWithCRS* cap = gc.getCapGeometryHack();
+ const CapWithCRS* cap = gc.getCapGeometryHack();
- // 2d indices can answer centerSphere queries.
- if (NULL == cap) {
- return false;
- }
+ // 2d indices can answer centerSphere queries.
+ if (NULL == cap) {
+ return false;
+ }
- verify(SPHERE == cap->crs);
- const Circle& circle = cap->circle;
+ verify(SPHERE == cap->crs);
+ const Circle& circle = cap->circle;
- // No wrapping around the edge of the world is allowed in 2d centerSphere.
- return twoDWontWrap(circle, index);
- }
- return false;
- }
- else if (IndexNames::TEXT == indexedFieldType) {
- return (exprtype == MatchExpression::TEXT);
- }
- else if (IndexNames::GEO_HAYSTACK == indexedFieldType) {
- return false;
- }
- else {
- warning() << "Unknown indexing for node " << node->toString()
- << " and field " << elt.toString() << endl;
- verify(0);
+ // No wrapping around the edge of the world is allowed in 2d centerSphere.
+ return twoDWontWrap(circle, index);
}
+ return false;
+ } else if (IndexNames::TEXT == indexedFieldType) {
+ return (exprtype == MatchExpression::TEXT);
+ } else if (IndexNames::GEO_HAYSTACK == indexedFieldType) {
+ return false;
+ } else {
+ warning() << "Unknown indexing for node " << node->toString() << " and field "
+ << elt.toString() << endl;
+ verify(0);
+ }
+}
+
+// static
+void QueryPlannerIXSelect::rateIndices(MatchExpression* node,
+ string prefix,
+ const vector<IndexEntry>& indices) {
+ // Do not traverse tree beyond logical NOR node
+ MatchExpression::MatchType exprtype = node->matchType();
+ if (exprtype == MatchExpression::NOR) {
+ return;
}
- // static
- void QueryPlannerIXSelect::rateIndices(MatchExpression* node,
- string prefix,
- const vector<IndexEntry>& indices) {
- // Do not traverse tree beyond logical NOR node
- MatchExpression::MatchType exprtype = node->matchType();
- if (exprtype == MatchExpression::NOR) {
- return;
- }
-
- // Every indexable node is tagged even when no compatible index is
- // available.
- if (Indexability::isBoundsGenerating(node)) {
- string fullPath;
- if (MatchExpression::NOT == node->matchType()) {
- fullPath = prefix + node->getChild(0)->path().toString();
- }
- else {
- fullPath = prefix + node->path().toString();
- }
+ // Every indexable node is tagged even when no compatible index is
+ // available.
+ if (Indexability::isBoundsGenerating(node)) {
+ string fullPath;
+ if (MatchExpression::NOT == node->matchType()) {
+ fullPath = prefix + node->getChild(0)->path().toString();
+ } else {
+ fullPath = prefix + node->path().toString();
+ }
- verify(NULL == node->getTag());
- RelevantTag* rt = new RelevantTag();
- node->setTag(rt);
- rt->path = fullPath;
+ verify(NULL == node->getTag());
+ RelevantTag* rt = new RelevantTag();
+ node->setTag(rt);
+ rt->path = fullPath;
- // TODO: This is slow, with all the string compares.
- for (size_t i = 0; i < indices.size(); ++i) {
- BSONObjIterator it(indices[i].keyPattern);
- BSONElement elt = it.next();
+ // TODO: This is slow, with all the string compares.
+ for (size_t i = 0; i < indices.size(); ++i) {
+ BSONObjIterator it(indices[i].keyPattern);
+ BSONElement elt = it.next();
+ if (elt.fieldName() == fullPath && compatible(elt, indices[i], node)) {
+ rt->first.push_back(i);
+ }
+ while (it.more()) {
+ elt = it.next();
if (elt.fieldName() == fullPath && compatible(elt, indices[i], node)) {
- rt->first.push_back(i);
- }
- while (it.more()) {
- elt = it.next();
- if (elt.fieldName() == fullPath && compatible(elt, indices[i], node)) {
- rt->notFirst.push_back(i);
- }
+ rt->notFirst.push_back(i);
}
}
+ }
- // If this is a NOT, we have to clone the tag and attach
- // it to the NOT's child.
- if (MatchExpression::NOT == node->matchType()) {
- RelevantTag* childRt = static_cast<RelevantTag*>(rt->clone());
- childRt->path = rt->path;
- node->getChild(0)->setTag(childRt);
- }
+ // If this is a NOT, we have to clone the tag and attach
+ // it to the NOT's child.
+ if (MatchExpression::NOT == node->matchType()) {
+ RelevantTag* childRt = static_cast<RelevantTag*>(rt->clone());
+ childRt->path = rt->path;
+ node->getChild(0)->setTag(childRt);
}
- else if (Indexability::arrayUsesIndexOnChildren(node)) {
- // See comment in getFields about all/elemMatch and paths.
- if (!node->path().empty()) {
- prefix += node->path().toString() + ".";
- }
- for (size_t i = 0; i < node->numChildren(); ++i) {
- rateIndices(node->getChild(i), prefix, indices);
- }
+ } else if (Indexability::arrayUsesIndexOnChildren(node)) {
+ // See comment in getFields about all/elemMatch and paths.
+ if (!node->path().empty()) {
+ prefix += node->path().toString() + ".";
}
- else if (node->isLogical()) {
- for (size_t i = 0; i < node->numChildren(); ++i) {
- rateIndices(node->getChild(i), prefix, indices);
- }
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ rateIndices(node->getChild(i), prefix, indices);
+ }
+ } else if (node->isLogical()) {
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ rateIndices(node->getChild(i), prefix, indices);
}
}
+}
- // static
- void QueryPlannerIXSelect::stripInvalidAssignments(MatchExpression* node,
- const vector<IndexEntry>& indices) {
+// static
+void QueryPlannerIXSelect::stripInvalidAssignments(MatchExpression* node,
+ const vector<IndexEntry>& indices) {
+ stripInvalidAssignmentsToTextIndexes(node, indices);
- stripInvalidAssignmentsToTextIndexes(node, indices);
+ if (MatchExpression::GEO != node->matchType() &&
+ MatchExpression::GEO_NEAR != node->matchType()) {
+ stripInvalidAssignmentsTo2dsphereIndices(node, indices);
+ }
+}
- if (MatchExpression::GEO != node->matchType() &&
- MatchExpression::GEO_NEAR != node->matchType()) {
+namespace {
- stripInvalidAssignmentsTo2dsphereIndices(node, indices);
- }
+/**
+ * For every node in the subtree rooted at 'node' that has a RelevantTag, removes index
+ * assignments from that tag.
+ *
+ * Used as a helper for stripUnneededAssignments().
+ */
+void clearAssignments(MatchExpression* node) {
+ if (node->getTag()) {
+ RelevantTag* rt = static_cast<RelevantTag*>(node->getTag());
+ rt->first.clear();
+ rt->notFirst.clear();
}
- namespace {
-
- /**
- * For every node in the subtree rooted at 'node' that has a RelevantTag, removes index
- * assignments from that tag.
- *
- * Used as a helper for stripUnneededAssignments().
- */
- void clearAssignments(MatchExpression* node) {
- if (node->getTag()) {
- RelevantTag* rt = static_cast<RelevantTag*>(node->getTag());
- rt->first.clear();
- rt->notFirst.clear();
- }
+ for (size_t i = 0; i < node->numChildren(); i++) {
+ clearAssignments(node->getChild(i));
+ }
+}
- for (size_t i = 0; i < node->numChildren(); i++) {
- clearAssignments(node->getChild(i));
+} // namespace
+
+// static
+void QueryPlannerIXSelect::stripUnneededAssignments(MatchExpression* node,
+ const std::vector<IndexEntry>& indices) {
+ if (MatchExpression::AND == node->matchType()) {
+ for (size_t i = 0; i < node->numChildren(); i++) {
+ MatchExpression* child = node->getChild(i);
+
+ if (MatchExpression::EQ != child->matchType()) {
+ continue;
}
- }
- } // namespace
+ if (!child->getTag()) {
+ continue;
+ }
- // static
- void QueryPlannerIXSelect::stripUnneededAssignments(MatchExpression* node,
- const std::vector<IndexEntry>& indices) {
- if (MatchExpression::AND == node->matchType()) {
- for (size_t i = 0; i < node->numChildren(); i++) {
- MatchExpression* child = node->getChild(i);
+ // We found a EQ child of an AND which is tagged.
+ RelevantTag* rt = static_cast<RelevantTag*>(child->getTag());
- if (MatchExpression::EQ != child->matchType()) {
- continue;
- }
+ // Look through all of the indices for which this predicate can be answered with
+ // the leading field of the index.
+ for (std::vector<size_t>::const_iterator i = rt->first.begin(); i != rt->first.end();
+ ++i) {
+ size_t index = *i;
- if (!child->getTag()) {
- continue;
- }
+ if (indices[index].unique && 1 == indices[index].keyPattern.nFields()) {
+ // Found an EQ predicate which can use a single-field unique index.
+ // Clear assignments from the entire tree, and add back a single assignment
+ // for 'child' to the unique index.
+ clearAssignments(node);
+ RelevantTag* newRt = static_cast<RelevantTag*>(child->getTag());
+ newRt->first.push_back(index);
- // We found a EQ child of an AND which is tagged.
- RelevantTag* rt = static_cast<RelevantTag*>(child->getTag());
-
- // Look through all of the indices for which this predicate can be answered with
- // the leading field of the index.
- for (std::vector<size_t>::const_iterator i = rt->first.begin();
- i != rt->first.end(); ++i) {
- size_t index = *i;
-
- if (indices[index].unique && 1 == indices[index].keyPattern.nFields()) {
- // Found an EQ predicate which can use a single-field unique index.
- // Clear assignments from the entire tree, and add back a single assignment
- // for 'child' to the unique index.
- clearAssignments(node);
- RelevantTag* newRt = static_cast<RelevantTag*>(child->getTag());
- newRt->first.push_back(index);
-
- // Tag state has been reset in the entire subtree at 'root'; nothing
- // else for us to do.
- return;
- }
+ // Tag state has been reset in the entire subtree at 'root'; nothing
+ // else for us to do.
+ return;
}
}
}
+ }
- for (size_t i = 0; i < node->numChildren(); i++) {
- stripUnneededAssignments(node->getChild(i), indices);
- }
+ for (size_t i = 0; i < node->numChildren(); i++) {
+ stripUnneededAssignments(node->getChild(i), indices);
}
+}
- //
- // Helpers used by stripInvalidAssignments
- //
+//
+// Helpers used by stripInvalidAssignments
+//
- /**
- * Remove 'idx' from the RelevantTag lists for 'node'. 'node' must be a leaf.
- */
- static void removeIndexRelevantTag(MatchExpression* node, size_t idx) {
- RelevantTag* tag = static_cast<RelevantTag*>(node->getTag());
- verify(tag);
- vector<size_t>::iterator firstIt = std::find(tag->first.begin(),
- tag->first.end(),
- idx);
- if (firstIt != tag->first.end()) {
- tag->first.erase(firstIt);
- }
-
- vector<size_t>::iterator notFirstIt = std::find(tag->notFirst.begin(),
- tag->notFirst.end(),
- idx);
- if (notFirstIt != tag->notFirst.end()) {
- tag->notFirst.erase(notFirstIt);
- }
+/**
+ * Remove 'idx' from the RelevantTag lists for 'node'. 'node' must be a leaf.
+ */
+static void removeIndexRelevantTag(MatchExpression* node, size_t idx) {
+ RelevantTag* tag = static_cast<RelevantTag*>(node->getTag());
+ verify(tag);
+ vector<size_t>::iterator firstIt = std::find(tag->first.begin(), tag->first.end(), idx);
+ if (firstIt != tag->first.end()) {
+ tag->first.erase(firstIt);
}
- //
- // Text index quirks
- //
-
- /**
- * Traverse the subtree rooted at 'node' to remove invalid RelevantTag assignments to text index
- * 'idx', which has prefix paths 'prefixPaths'.
- */
- static void stripInvalidAssignmentsToTextIndex(MatchExpression* node,
- size_t idx,
- const unordered_set<StringData, StringData::Hasher>& prefixPaths) {
+ vector<size_t>::iterator notFirstIt =
+ std::find(tag->notFirst.begin(), tag->notFirst.end(), idx);
+ if (notFirstIt != tag->notFirst.end()) {
+ tag->notFirst.erase(notFirstIt);
+ }
+}
- // If we're here, there are prefixPaths and node is either:
- // 1. a text pred which we can't use as we have nothing over its prefix, or
- // 2. a non-text pred which we can't use as we don't have a text pred AND-related.
- if (Indexability::nodeCanUseIndexOnOwnField(node)) {
- removeIndexRelevantTag(node, idx);
- return;
- }
+//
+// Text index quirks
+//
- // Do not traverse tree beyond negation node.
- if (node->matchType() == MatchExpression::NOT
- || node->matchType() == MatchExpression::NOR) {
+/**
+ * Traverse the subtree rooted at 'node' to remove invalid RelevantTag assignments to text index
+ * 'idx', which has prefix paths 'prefixPaths'.
+ */
+static void stripInvalidAssignmentsToTextIndex(
+ MatchExpression* node,
+ size_t idx,
+ const unordered_set<StringData, StringData::Hasher>& prefixPaths) {
+ // If we're here, there are prefixPaths and node is either:
+ // 1. a text pred which we can't use as we have nothing over its prefix, or
+ // 2. a non-text pred which we can't use as we don't have a text pred AND-related.
+ if (Indexability::nodeCanUseIndexOnOwnField(node)) {
+ removeIndexRelevantTag(node, idx);
+ return;
+ }
- return;
- }
+ // Do not traverse tree beyond negation node.
+ if (node->matchType() == MatchExpression::NOT || node->matchType() == MatchExpression::NOR) {
+ return;
+ }
- // For anything to use a text index with prefixes, we require that:
- // 1. The text pred exists in an AND,
- // 2. The non-text preds that use the text index's prefixes are also in that AND.
+ // For anything to use a text index with prefixes, we require that:
+ // 1. The text pred exists in an AND,
+ // 2. The non-text preds that use the text index's prefixes are also in that AND.
- if (node->matchType() != MatchExpression::AND) {
- // It's an OR or some kind of array operator.
- for (size_t i = 0; i < node->numChildren(); ++i) {
- stripInvalidAssignmentsToTextIndex(node->getChild(i), idx, prefixPaths);
- }
- return;
+ if (node->matchType() != MatchExpression::AND) {
+ // It's an OR or some kind of array operator.
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ stripInvalidAssignmentsToTextIndex(node->getChild(i), idx, prefixPaths);
}
+ return;
+ }
- // If we're here, we're an AND. Determine whether the children satisfy the index prefix for
- // the text index.
- invariant(node->matchType() == MatchExpression::AND);
+ // If we're here, we're an AND. Determine whether the children satisfy the index prefix for
+ // the text index.
+ invariant(node->matchType() == MatchExpression::AND);
- bool hasText = false;
+ bool hasText = false;
- // The AND must have an EQ predicate for each prefix path. When we encounter a child with a
- // tag we remove it from childrenPrefixPaths. All children exist if this set is empty at
- // the end.
- unordered_set<StringData, StringData::Hasher> childrenPrefixPaths = prefixPaths;
+ // The AND must have an EQ predicate for each prefix path. When we encounter a child with a
+ // tag we remove it from childrenPrefixPaths. All children exist if this set is empty at
+ // the end.
+ unordered_set<StringData, StringData::Hasher> childrenPrefixPaths = prefixPaths;
- for (size_t i = 0; i < node->numChildren(); ++i) {
- MatchExpression* child = node->getChild(i);
- RelevantTag* tag = static_cast<RelevantTag*>(child->getTag());
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ MatchExpression* child = node->getChild(i);
+ RelevantTag* tag = static_cast<RelevantTag*>(child->getTag());
- if (NULL == tag) {
- // 'child' could be a logical operator. Maybe there are some assignments hiding
- // inside.
- stripInvalidAssignmentsToTextIndex(child, idx, prefixPaths);
- continue;
- }
+ if (NULL == tag) {
+ // 'child' could be a logical operator. Maybe there are some assignments hiding
+ // inside.
+ stripInvalidAssignmentsToTextIndex(child, idx, prefixPaths);
+ continue;
+ }
- bool inFirst = tag->first.end() != std::find(tag->first.begin(),
- tag->first.end(),
- idx);
+ bool inFirst = tag->first.end() != std::find(tag->first.begin(), tag->first.end(), idx);
- bool inNotFirst = tag->notFirst.end() != std::find(tag->notFirst.begin(),
- tag->notFirst.end(),
- idx);
+ bool inNotFirst =
+ tag->notFirst.end() != std::find(tag->notFirst.begin(), tag->notFirst.end(), idx);
- if (inFirst || inNotFirst) {
- // Great! 'child' was assigned to our index.
- if (child->matchType() == MatchExpression::TEXT) {
- hasText = true;
- }
- else {
- childrenPrefixPaths.erase(child->path());
- // One fewer prefix we're looking for, possibly. Note that we could have a
- // suffix assignment on the index and wind up here. In this case the erase
- // above won't do anything since a suffix isn't a prefix.
- }
- }
- else {
- // Recurse on the children to ensure that they're not hiding any assignments
- // to idx.
- stripInvalidAssignmentsToTextIndex(child, idx, prefixPaths);
+ if (inFirst || inNotFirst) {
+ // Great! 'child' was assigned to our index.
+ if (child->matchType() == MatchExpression::TEXT) {
+ hasText = true;
+ } else {
+ childrenPrefixPaths.erase(child->path());
+ // One fewer prefix we're looking for, possibly. Note that we could have a
+ // suffix assignment on the index and wind up here. In this case the erase
+ // above won't do anything since a suffix isn't a prefix.
}
+ } else {
+ // Recurse on the children to ensure that they're not hiding any assignments
+ // to idx.
+ stripInvalidAssignmentsToTextIndex(child, idx, prefixPaths);
}
+ }
- // Our prereqs for using the text index were not satisfied so we remove the assignments from
- // all children of the AND.
- if (!hasText || !childrenPrefixPaths.empty()) {
- for (size_t i = 0; i < node->numChildren(); ++i) {
- stripInvalidAssignmentsToTextIndex(node->getChild(i), idx, prefixPaths);
- }
+ // Our prereqs for using the text index were not satisfied so we remove the assignments from
+ // all children of the AND.
+ if (!hasText || !childrenPrefixPaths.empty()) {
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ stripInvalidAssignmentsToTextIndex(node->getChild(i), idx, prefixPaths);
}
}
+}
- // static
- void QueryPlannerIXSelect::stripInvalidAssignmentsToTextIndexes(
- MatchExpression* node,
- const vector<IndexEntry>& indices) {
+// static
+void QueryPlannerIXSelect::stripInvalidAssignmentsToTextIndexes(MatchExpression* node,
+ const vector<IndexEntry>& indices) {
+ for (size_t i = 0; i < indices.size(); ++i) {
+ const IndexEntry& index = indices[i];
- for (size_t i = 0; i < indices.size(); ++i) {
- const IndexEntry& index = indices[i];
+ // We only care about text indices.
+ if (INDEX_TEXT != index.type) {
+ continue;
+ }
- // We only care about text indices.
- if (INDEX_TEXT != index.type) {
- continue;
- }
+ // Gather the set of paths that comprise the index prefix for this text index.
+ // Each of those paths must have an equality assignment, otherwise we can't assign
+ // *anything* to this index.
+ unordered_set<StringData, StringData::Hasher> textIndexPrefixPaths;
+ BSONObjIterator it(index.keyPattern);
- // Gather the set of paths that comprise the index prefix for this text index.
- // Each of those paths must have an equality assignment, otherwise we can't assign
- // *anything* to this index.
- unordered_set<StringData, StringData::Hasher> textIndexPrefixPaths;
- BSONObjIterator it(index.keyPattern);
-
- // We stop when we see the first string in the key pattern. We know that
- // the prefix precedes "text".
- for (BSONElement elt = it.next(); elt.type() != String; elt = it.next()) {
- textIndexPrefixPaths.insert(elt.fieldName());
- verify(it.more());
- }
+ // We stop when we see the first string in the key pattern. We know that
+ // the prefix precedes "text".
+ for (BSONElement elt = it.next(); elt.type() != String; elt = it.next()) {
+ textIndexPrefixPaths.insert(elt.fieldName());
+ verify(it.more());
+ }
- // If the index prefix is non-empty, remove invalid assignments to it.
- if (!textIndexPrefixPaths.empty()) {
- stripInvalidAssignmentsToTextIndex(node, i, textIndexPrefixPaths);
- }
+ // If the index prefix is non-empty, remove invalid assignments to it.
+ if (!textIndexPrefixPaths.empty()) {
+ stripInvalidAssignmentsToTextIndex(node, i, textIndexPrefixPaths);
}
}
+}
+
+//
+// 2dsphere V2 sparse quirks
+//
+
+static void stripInvalidAssignmentsTo2dsphereIndex(MatchExpression* node, size_t idx) {
+ if (Indexability::nodeCanUseIndexOnOwnField(node) &&
+ MatchExpression::GEO != node->matchType() &&
+ MatchExpression::GEO_NEAR != node->matchType()) {
+ // We found a non-geo predicate tagged to use a V2 2dsphere index which is not
+ // and-related to a geo predicate that can use the index.
+ removeIndexRelevantTag(node, idx);
+ return;
+ }
- //
- // 2dsphere V2 sparse quirks
- //
+ const MatchExpression::MatchType nodeType = node->matchType();
- static void stripInvalidAssignmentsTo2dsphereIndex(MatchExpression* node, size_t idx) {
+ // Don't bother peeking inside of negations.
+ if (MatchExpression::NOT == nodeType || MatchExpression::NOR == nodeType) {
+ return;
+ }
- if (Indexability::nodeCanUseIndexOnOwnField(node)
- && MatchExpression::GEO != node->matchType()
- && MatchExpression::GEO_NEAR != node->matchType()) {
- // We found a non-geo predicate tagged to use a V2 2dsphere index which is not
- // and-related to a geo predicate that can use the index.
- removeIndexRelevantTag(node, idx);
- return;
+ if (MatchExpression::AND != nodeType) {
+ // It's an OR or some kind of array operator.
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ stripInvalidAssignmentsTo2dsphereIndex(node->getChild(i), idx);
}
+ return;
+ }
- const MatchExpression::MatchType nodeType = node->matchType();
+ bool hasGeoField = false;
- // Don't bother peeking inside of negations.
- if (MatchExpression::NOT == nodeType || MatchExpression::NOR == nodeType) {
- return;
- }
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ MatchExpression* child = node->getChild(i);
+ RelevantTag* tag = static_cast<RelevantTag*>(child->getTag());
- if (MatchExpression::AND != nodeType) {
- // It's an OR or some kind of array operator.
- for (size_t i = 0; i < node->numChildren(); ++i) {
- stripInvalidAssignmentsTo2dsphereIndex(node->getChild(i), idx);
- }
- return;
+ if (NULL == tag) {
+ // 'child' could be a logical operator. Maybe there are some assignments hiding
+ // inside.
+ stripInvalidAssignmentsTo2dsphereIndex(child, idx);
+ continue;
}
- bool hasGeoField = false;
+ bool inFirst = tag->first.end() != std::find(tag->first.begin(), tag->first.end(), idx);
- for (size_t i = 0; i < node->numChildren(); ++i) {
- MatchExpression* child = node->getChild(i);
- RelevantTag* tag = static_cast<RelevantTag*>(child->getTag());
+ bool inNotFirst =
+ tag->notFirst.end() != std::find(tag->notFirst.begin(), tag->notFirst.end(), idx);
- if (NULL == tag) {
- // 'child' could be a logical operator. Maybe there are some assignments hiding
- // inside.
- stripInvalidAssignmentsTo2dsphereIndex(child, idx);
- continue;
- }
-
- bool inFirst = tag->first.end() != std::find(tag->first.begin(),
- tag->first.end(),
- idx);
-
- bool inNotFirst = tag->notFirst.end() != std::find(tag->notFirst.begin(),
- tag->notFirst.end(),
- idx);
-
- // If there is an index assignment...
- if (inFirst || inNotFirst) {
- // And it's a geo predicate...
- if (MatchExpression::GEO == child->matchType() ||
- MatchExpression::GEO_NEAR == child->matchType()) {
-
- hasGeoField = true;
- }
- }
- else {
- // Recurse on the children to ensure that they're not hiding any assignments
- // to idx.
- stripInvalidAssignmentsTo2dsphereIndex(child, idx);
+ // If there is an index assignment...
+ if (inFirst || inNotFirst) {
+ // And it's a geo predicate...
+ if (MatchExpression::GEO == child->matchType() ||
+ MatchExpression::GEO_NEAR == child->matchType()) {
+ hasGeoField = true;
}
+ } else {
+ // Recurse on the children to ensure that they're not hiding any assignments
+ // to idx.
+ stripInvalidAssignmentsTo2dsphereIndex(child, idx);
}
+ }
- // If there isn't a geo predicate our results aren't a subset of what's in the geo index, so
- // if we use the index we'll miss results.
- if (!hasGeoField) {
- for (size_t i = 0; i < node->numChildren(); ++i) {
- stripInvalidAssignmentsTo2dsphereIndex(node->getChild(i), idx);
- }
+ // If there isn't a geo predicate our results aren't a subset of what's in the geo index, so
+ // if we use the index we'll miss results.
+ if (!hasGeoField) {
+ for (size_t i = 0; i < node->numChildren(); ++i) {
+ stripInvalidAssignmentsTo2dsphereIndex(node->getChild(i), idx);
}
}
+}
- // static
- void QueryPlannerIXSelect::stripInvalidAssignmentsTo2dsphereIndices(
- MatchExpression* node,
- const vector<IndexEntry>& indices) {
-
- for (size_t i = 0; i < indices.size(); ++i) {
- const IndexEntry& index = indices[i];
+// static
+void QueryPlannerIXSelect::stripInvalidAssignmentsTo2dsphereIndices(
+ MatchExpression* node, const vector<IndexEntry>& indices) {
+ for (size_t i = 0; i < indices.size(); ++i) {
+ const IndexEntry& index = indices[i];
- // We only worry about 2dsphere indices.
- if (INDEX_2DSPHERE != index.type) {
- continue;
- }
+ // We only worry about 2dsphere indices.
+ if (INDEX_2DSPHERE != index.type) {
+ continue;
+ }
- // They also have to be V2. Both ignore the sparse flag but V1 is
- // never-sparse, V2 geo-sparse.
- BSONElement elt = index.infoObj["2dsphereIndexVersion"];
- if (elt.eoo()) {
- continue;
- }
- if (!elt.isNumber()) {
- continue;
- }
- if (2 != elt.numberInt()) {
- continue;
- }
+ // They also have to be V2. Both ignore the sparse flag but V1 is
+ // never-sparse, V2 geo-sparse.
+ BSONElement elt = index.infoObj["2dsphereIndexVersion"];
+ if (elt.eoo()) {
+ continue;
+ }
+ if (!elt.isNumber()) {
+ continue;
+ }
+ if (2 != elt.numberInt()) {
+ continue;
+ }
- // If every field is geo don't bother doing anything.
- bool allFieldsGeo = true;
- BSONObjIterator it(index.keyPattern);
- while (it.more()) {
- BSONElement elt = it.next();
- if (String != elt.type()) {
- allFieldsGeo = false;
- break;
- }
- }
- if (allFieldsGeo) {
- continue;
+ // If every field is geo don't bother doing anything.
+ bool allFieldsGeo = true;
+ BSONObjIterator it(index.keyPattern);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ if (String != elt.type()) {
+ allFieldsGeo = false;
+ break;
}
-
- // Remove bad assignments from this index.
- stripInvalidAssignmentsTo2dsphereIndex(node, i);
}
+ if (allFieldsGeo) {
+ continue;
+ }
+
+ // Remove bad assignments from this index.
+ stripInvalidAssignmentsTo2dsphereIndex(node, i);
}
+}
} // namespace mongo
diff --git a/src/mongo/db/query/planner_ixselect.h b/src/mongo/db/query/planner_ixselect.h
index ad5912222a1..bbef9748d3a 100644
--- a/src/mongo/db/query/planner_ixselect.h
+++ b/src/mongo/db/query/planner_ixselect.h
@@ -34,151 +34,149 @@
namespace mongo {
+/**
+ * Methods for determining what fields and predicates can use indices.
+ */
+class QueryPlannerIXSelect {
+public:
/**
- * Methods for determining what fields and predicates can use indices.
+ * Return all the fields in the tree rooted at 'node' that we can use an index on
+ * in order to answer the query.
+ *
+ * The 'prefix' argument is a path prefix to be prepended to any fields mentioned in
+ * predicates encountered. Some array operators specify a path prefix.
*/
- class QueryPlannerIXSelect {
- public:
- /**
- * Return all the fields in the tree rooted at 'node' that we can use an index on
- * in order to answer the query.
- *
- * The 'prefix' argument is a path prefix to be prepended to any fields mentioned in
- * predicates encountered. Some array operators specify a path prefix.
- */
- static void getFields(const MatchExpression* node,
- std::string prefix,
- unordered_set<std::string>* out);
+ static void getFields(const MatchExpression* node,
+ std::string prefix,
+ unordered_set<std::string>* out);
- /**
- * Find all indices prefixed by fields we have predicates over. Only these indices are
- * useful in answering the query.
- */
- static void findRelevantIndices(const unordered_set<std::string>& fields,
- const std::vector<IndexEntry>& indices,
- std::vector<IndexEntry>* out);
+ /**
+ * Find all indices prefixed by fields we have predicates over. Only these indices are
+ * useful in answering the query.
+ */
+ static void findRelevantIndices(const unordered_set<std::string>& fields,
+ const std::vector<IndexEntry>& indices,
+ std::vector<IndexEntry>* out);
- /**
- * Return true if the index key pattern field 'elt' (which belongs to 'index') can be used
- * to answer the predicate 'node'.
- *
- * For example, {field: "hashed"} can only be used with sets of equalities.
- * {field: "2d"} can only be used with some geo predicates.
- * {field: "2dsphere"} can only be used with some other geo predicates.
- */
- static bool compatible(const BSONElement& elt,
- const IndexEntry& index,
- MatchExpression* node);
+ /**
+ * Return true if the index key pattern field 'elt' (which belongs to 'index') can be used
+ * to answer the predicate 'node'.
+ *
+ * For example, {field: "hashed"} can only be used with sets of equalities.
+ * {field: "2d"} can only be used with some geo predicates.
+ * {field: "2dsphere"} can only be used with some other geo predicates.
+ */
+ static bool compatible(const BSONElement& elt, const IndexEntry& index, MatchExpression* node);
+
+ /**
+ * Determine how useful all of our relevant 'indices' are to all predicates in the subtree
+ * rooted at 'node'. Affixes a RelevantTag to all predicate nodes which can use an index.
+ *
+ * 'prefix' is a path prefix that should be prepended to any path (certain array operators
+ * imply a path prefix).
+ *
+ * For an index to be useful to a predicate, the index must be compatible (see above).
+ *
+ * If an index is prefixed by the predicate's path, it's always useful.
+ *
+ * If an index is compound but not prefixed by a predicate's path, it's only useful if
+ * there exists another predicate that 1. will use that index and 2. is related to the
+ * original predicate by having an AND as a parent.
+ */
+ static void rateIndices(MatchExpression* node,
+ std::string prefix,
+ const std::vector<IndexEntry>& indices);
- /**
- * Determine how useful all of our relevant 'indices' are to all predicates in the subtree
- * rooted at 'node'. Affixes a RelevantTag to all predicate nodes which can use an index.
- *
- * 'prefix' is a path prefix that should be prepended to any path (certain array operators
- * imply a path prefix).
- *
- * For an index to be useful to a predicate, the index must be compatible (see above).
- *
- * If an index is prefixed by the predicate's path, it's always useful.
- *
- * If an index is compound but not prefixed by a predicate's path, it's only useful if
- * there exists another predicate that 1. will use that index and 2. is related to the
- * original predicate by having an AND as a parent.
- */
- static void rateIndices(MatchExpression* node,
- std::string prefix,
- const std::vector<IndexEntry>& indices);
+ /**
+ * Amend the RelevantTag lists for all predicates in the subtree rooted at 'node' to remove
+ * invalid assignments to text and geo indices.
+ *
+ * See the body of this function and the specific stripInvalidAssignments functions for details.
+ */
+ static void stripInvalidAssignments(MatchExpression* node,
+ const std::vector<IndexEntry>& indices);
- /**
- * Amend the RelevantTag lists for all predicates in the subtree rooted at 'node' to remove
- * invalid assignments to text and geo indices.
- *
- * See the body of this function and the specific stripInvalidAssignments functions for details.
- */
- static void stripInvalidAssignments(MatchExpression* node,
- const std::vector<IndexEntry>& indices);
+ /**
+ * In some special cases, we can strip most of the index assignments from the tree early
+ * on. Specifically, if we find an AND which has a child tagged for equality over a
+ * single-field unique index, then all other predicate-to-index assignments can be
+ * stripped off the subtree rooted at 'node'.
+ *
+ * This is used to ensure that we always favor key-value lookup plans over any
+ * more complex plan.
+ *
+ * Example:
+ * Suppose you have match expression OR (AND (a==1, b==2), AND (c==3, d==4)).
+ * There are indices on fields, 'a', 'b', 'c', and 'd'. The index on 'd' is
+ * the only unique index.
+ *
+ * This code will find that the subtree AND (c==3, d==4) can be answered by
+ * looking up the value of 'd' in the unique index. Since no better plan than
+ * a single key lookup is ever available, all assignments in this subtree
+ * are stripped, except for the assignment of d==4 to the unique 'd' index.
+ *
+ * Stripping the assignment for 'c' causes the planner to generate just two
+ * possible plans:
+ * 1) an OR of an index scan over 'a' and an index scan over 'd'
+ * 2) an OR of an index scan over 'b' and an index scan over 'd'
+ */
+ static void stripUnneededAssignments(MatchExpression* node,
+ const std::vector<IndexEntry>& indices);
- /**
- * In some special cases, we can strip most of the index assignments from the tree early
- * on. Specifically, if we find an AND which has a child tagged for equality over a
- * single-field unique index, then all other predicate-to-index assignments can be
- * stripped off the subtree rooted at 'node'.
- *
- * This is used to ensure that we always favor key-value lookup plans over any
- * more complex plan.
- *
- * Example:
- * Suppose you have match expression OR (AND (a==1, b==2), AND (c==3, d==4)).
- * There are indices on fields, 'a', 'b', 'c', and 'd'. The index on 'd' is
- * the only unique index.
- *
- * This code will find that the subtree AND (c==3, d==4) can be answered by
- * looking up the value of 'd' in the unique index. Since no better plan than
- * a single key lookup is ever available, all assignments in this subtree
- * are stripped, except for the assignment of d==4 to the unique 'd' index.
- *
- * Stripping the assignment for 'c' causes the planner to generate just two
- * possible plans:
- * 1) an OR of an index scan over 'a' and an index scan over 'd'
- * 2) an OR of an index scan over 'b' and an index scan over 'd'
- */
- static void stripUnneededAssignments(MatchExpression* node,
- const std::vector<IndexEntry>& indices);
+private:
+ /**
+ * Amend the RelevantTag lists for all predicates in the subtree rooted at 'node' to remove
+ * invalid assignments to text indexes.
+ *
+ * A predicate on a field from a compound text index with a non-empty index prefix
+ * (e.g. pred {a: 1, b: 1} on index {a: 1, b: 1, c: "text"}) is only considered valid to
+ * assign to the text index if it is a direct child of an AND with the following properties:
+ * - it has a TEXT child
+ * - for every index prefix component, it has an EQ child on that component's path
+ *
+ * Note that compatible() enforces the precondition that only EQ nodes are considered
+ * relevant to text index prefixes.
+ * If there is a relevant compound text index with a non-empty "index prefix" (e.g. the
+ * prefix {a: 1, b: 1} for the index {a: 1, b: 1, c: "text"}), amend the RelevantTag(s)
+ * created above to remove assignments to the text index where the query does not have
+ * predicates over each indexed field of the prefix.
+ *
+ * This is necessary because text indices do not obey the normal rules of sparseness, in
+ * that they generate no index keys for documents without indexable text data in at least
+ * one text field (in fact, text indices ignore the sparse option entirely). For example,
+ * given the text index {a: 1, b: 1, c: "text"}:
+ *
+ * - Document {a: 1, b: 6, c: "hello world"} generates 2 index keys
+ * - Document {a: 1, b: 7, c: {d: 1}} generates 0 index keys
+ * - Document {a: 1, b: 8} generates 0 index keys
+ *
+ * As a result, the query {a: 1} *cannot* be satisfied by the text index {a: 1, b: 1, c:
+ * "text"}, since documents without indexed text data would not be returned by the query.
+ * rateIndices() above will eagerly annotate the pred {a: 1} as relevant to the text index;
+ * those annotations get removed here.
+ */
+ static void stripInvalidAssignmentsToTextIndexes(MatchExpression* node,
+ const std::vector<IndexEntry>& indices);
- private:
- /**
- * Amend the RelevantTag lists for all predicates in the subtree rooted at 'node' to remove
- * invalid assignments to text indexes.
- *
- * A predicate on a field from a compound text index with a non-empty index prefix
- * (e.g. pred {a: 1, b: 1} on index {a: 1, b: 1, c: "text"}) is only considered valid to
- * assign to the text index if it is a direct child of an AND with the following properties:
- * - it has a TEXT child
- * - for every index prefix component, it has an EQ child on that component's path
- *
- * Note that compatible() enforces the precondition that only EQ nodes are considered
- * relevant to text index prefixes.
- * If there is a relevant compound text index with a non-empty "index prefix" (e.g. the
- * prefix {a: 1, b: 1} for the index {a: 1, b: 1, c: "text"}), amend the RelevantTag(s)
- * created above to remove assignments to the text index where the query does not have
- * predicates over each indexed field of the prefix.
- *
- * This is necessary because text indices do not obey the normal rules of sparseness, in
- * that they generate no index keys for documents without indexable text data in at least
- * one text field (in fact, text indices ignore the sparse option entirely). For example,
- * given the text index {a: 1, b: 1, c: "text"}:
- *
- * - Document {a: 1, b: 6, c: "hello world"} generates 2 index keys
- * - Document {a: 1, b: 7, c: {d: 1}} generates 0 index keys
- * - Document {a: 1, b: 8} generates 0 index keys
- *
- * As a result, the query {a: 1} *cannot* be satisfied by the text index {a: 1, b: 1, c:
- * "text"}, since documents without indexed text data would not be returned by the query.
- * rateIndices() above will eagerly annotate the pred {a: 1} as relevant to the text index;
- * those annotations get removed here.
- */
- static void stripInvalidAssignmentsToTextIndexes(MatchExpression* node,
+ /**
+ * For V1 2dsphere indices we ignore the sparse option. As such we can use an index
+ * like {nongeo: 1, geo: "2dsphere"} to answer queries only involving nongeo.
+ *
+ * For V2 2dsphere indices also ignore the sparse flag but indexing behavior as compared to
+ * V1 is different. If all of the geo fields are missing from the document we do not index
+ * it. As such we cannot use V2 sparse indices unless we have a predicate over a geo
+ * field.
+ *
+ * 2dsphere indices V2 are "geo-sparse." That is, if there aren't any geo-indexed fields in
+ * a document it won't be indexed. As such we can't use an index like {foo:1, geo:
+ * "2dsphere"} to answer a query on 'foo' if the index is V2 as it will not contain the
+ * document {foo:1}.
+ *
+ * We *can* use it to answer a query on 'foo' if the predicate on 'foo' is AND-related to a
+ * predicate on every geo field in the index.
+ */
+ static void stripInvalidAssignmentsTo2dsphereIndices(MatchExpression* node,
const std::vector<IndexEntry>& indices);
-
- /**
- * For V1 2dsphere indices we ignore the sparse option. As such we can use an index
- * like {nongeo: 1, geo: "2dsphere"} to answer queries only involving nongeo.
- *
- * For V2 2dsphere indices also ignore the sparse flag but indexing behavior as compared to
- * V1 is different. If all of the geo fields are missing from the document we do not index
- * it. As such we cannot use V2 sparse indices unless we have a predicate over a geo
- * field.
- *
- * 2dsphere indices V2 are "geo-sparse." That is, if there aren't any geo-indexed fields in
- * a document it won't be indexed. As such we can't use an index like {foo:1, geo:
- * "2dsphere"} to answer a query on 'foo' if the index is V2 as it will not contain the
- * document {foo:1}.
- *
- * We *can* use it to answer a query on 'foo' if the predicate on 'foo' is AND-related to a
- * predicate on every geo field in the index.
- */
- static void stripInvalidAssignmentsTo2dsphereIndices(MatchExpression* node,
- const std::vector<IndexEntry>& indices);
- };
+};
} // namespace mongo
diff --git a/src/mongo/db/query/planner_ixselect_test.cpp b/src/mongo/db/query/planner_ixselect_test.cpp
index ba1f11e93d0..555a9db11ea 100644
--- a/src/mongo/db/query/planner_ixselect_test.cpp
+++ b/src/mongo/db/query/planner_ixselect_test.cpp
@@ -43,227 +43,230 @@ using namespace mongo;
namespace {
- using std::unique_ptr;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::string;
+using std::vector;
- /**
- * Utility function to create MatchExpression
- */
- MatchExpression* parseMatchExpression(const BSONObj& obj) {
- StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
- ASSERT_TRUE(status.isOK());
- MatchExpression* expr(status.getValue());
- return expr;
- }
+/**
+ * Utility function to create MatchExpression
+ */
+MatchExpression* parseMatchExpression(const BSONObj& obj) {
+ StatusWithMatchExpression status = MatchExpressionParser::parse(obj);
+ ASSERT_TRUE(status.isOK());
+ MatchExpression* expr(status.getValue());
+ return expr;
+}
- /**
- * Utility function to join elements in iterator range with comma
- */
- template <typename Iter> string toString(Iter begin, Iter end) {
- mongoutils::str::stream ss;
- ss << "[";
- for (Iter i = begin; i != end; i++) {
- if (i != begin) {
- ss << " ";
- }
- ss << *i;
+/**
+ * Utility function to join elements in iterator range with comma
+ */
+template <typename Iter>
+string toString(Iter begin, Iter end) {
+ mongoutils::str::stream ss;
+ ss << "[";
+ for (Iter i = begin; i != end; i++) {
+ if (i != begin) {
+ ss << " ";
}
- ss << "]";
- return ss;
+ ss << *i;
}
+ ss << "]";
+ return ss;
+}
- /**
- * Test function for getFields()
- * Parses query string to obtain MatchExpression which is passed together with prefix
- * to QueryPlannerIXSelect::getFields()
- * Results are compared with expected fields (parsed from expectedFieldsStr)
- */
- void testGetFields(const char* query, const char* prefix, const char* expectedFieldsStr) {
- BSONObj obj = fromjson(query);
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- unordered_set<string> fields;
- QueryPlannerIXSelect::getFields(expr.get(), prefix, &fields);
-
- // Verify results
- // First, check that results contain a superset of expected fields.
- vector<string> expectedFields = StringSplitter::split(expectedFieldsStr, ",");
- for (vector<string>::const_iterator i = expectedFields.begin(); i != expectedFields.end();
- i++) {
- if (fields.find(*i) == fields.end()) {
- mongoutils::str::stream ss;
- ss << "getFields(query=" << query << ", prefix=" << prefix << "): unable to find "
- << *i << " in result: " << toString(fields.begin(), fields.end());
- FAIL(ss);
- }
- }
+/**
+ * Test function for getFields()
+ * Parses query string to obtain MatchExpression which is passed together with prefix
+ * to QueryPlannerIXSelect::getFields()
+ * Results are compared with expected fields (parsed from expectedFieldsStr)
+ */
+void testGetFields(const char* query, const char* prefix, const char* expectedFieldsStr) {
+ BSONObj obj = fromjson(query);
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+ unordered_set<string> fields;
+ QueryPlannerIXSelect::getFields(expr.get(), prefix, &fields);
- // Next, confirm that results do not contain any unexpected fields.
- if (fields.size() != expectedFields.size()) {
+ // Verify results
+ // First, check that results contain a superset of expected fields.
+ vector<string> expectedFields = StringSplitter::split(expectedFieldsStr, ",");
+ for (vector<string>::const_iterator i = expectedFields.begin(); i != expectedFields.end();
+ i++) {
+ if (fields.find(*i) == fields.end()) {
mongoutils::str::stream ss;
- ss << "getFields(query=" << query << ", prefix=" << prefix
- << "): unexpected fields in result. expected: "
- << toString(expectedFields.begin(), expectedFields.end())
- << ". actual: " << toString(fields.begin(), fields.end());
+ ss << "getFields(query=" << query << ", prefix=" << prefix << "): unable to find " << *i
+ << " in result: " << toString(fields.begin(), fields.end());
FAIL(ss);
}
}
- /**
- * Basic test cases for getFields()
- * Includes logical operators
- */
- TEST(QueryPlannerIXSelectTest, GetFieldsBasic) {
- // Arguments to test function: query, prefix, comma-delimited list of expected fields
- testGetFields("{}", "", "");
- testGetFields("{a: 1}", "", "a");
- testGetFields("{a: 1}", "c.", "c.a");
- testGetFields("{a: 1, b: 1}", "", "a,b");
- testGetFields("{a: {$in: [1]}}", "", "a");
- testGetFields("{$or: [{a: 1}, {b: 1}]}", "", "a,b");
+ // Next, confirm that results do not contain any unexpected fields.
+ if (fields.size() != expectedFields.size()) {
+ mongoutils::str::stream ss;
+ ss << "getFields(query=" << query << ", prefix=" << prefix
+ << "): unexpected fields in result. expected: "
+ << toString(expectedFields.begin(), expectedFields.end())
+ << ". actual: " << toString(fields.begin(), fields.end());
+ FAIL(ss);
}
+}
- /**
- * Array test cases for getFields
- */
- TEST(QueryPlannerIXSelectTest, GetFieldsArray) {
- testGetFields("{a: {$elemMatch: {b: 1}}}", "", "a.b");
- testGetFields("{a: {$all: [{$elemMatch: {b: 1}}]}}", "", "a.b");
- }
+/**
+ * Basic test cases for getFields()
+ * Includes logical operators
+ */
+TEST(QueryPlannerIXSelectTest, GetFieldsBasic) {
+ // Arguments to test function: query, prefix, comma-delimited list of expected fields
+ testGetFields("{}", "", "");
+ testGetFields("{a: 1}", "", "a");
+ testGetFields("{a: 1}", "c.", "c.a");
+ testGetFields("{a: 1, b: 1}", "", "a,b");
+ testGetFields("{a: {$in: [1]}}", "", "a");
+ testGetFields("{$or: [{a: 1}, {b: 1}]}", "", "a,b");
+}
- /**
- * Negation test cases for getFields()
- * $ne, $nin, $nor
- */
- TEST(QueryPlannerIXSelectTest, GetFieldsNegation) {
- testGetFields("{a: {$ne: 1}}", "", "a");
- testGetFields("{a: {$nin: [1]}}", "", "a");
- testGetFields("{$nor: [{a: 1}, {b: 1}]}", "", "");
- testGetFields("{$and: [{a: 1}, {a: {$ne: 2}}]}", "", "a");
- }
+/**
+ * Array test cases for getFields
+ */
+TEST(QueryPlannerIXSelectTest, GetFieldsArray) {
+ testGetFields("{a: {$elemMatch: {b: 1}}}", "", "a.b");
+ testGetFields("{a: {$all: [{$elemMatch: {b: 1}}]}}", "", "a.b");
+}
- /**
- * Array negation test cases for getFields
- */
- TEST(QueryPlannerIXSelectTest, GetFieldsArrayNegation) {
- testGetFields("{a: {$elemMatch: {b: {$ne: 1}}}}", "", "a.b");
- testGetFields("{a: {$all: [{$elemMatch: {b: {$ne: 1}}}]}}", "", "a.b");
- }
+/**
+ * Negation test cases for getFields()
+ * $ne, $nin, $nor
+ */
+TEST(QueryPlannerIXSelectTest, GetFieldsNegation) {
+ testGetFields("{a: {$ne: 1}}", "", "a");
+ testGetFields("{a: {$nin: [1]}}", "", "a");
+ testGetFields("{$nor: [{a: 1}, {b: 1}]}", "", "");
+ testGetFields("{$and: [{a: 1}, {a: {$ne: 2}}]}", "", "a");
+}
- /**
- * Performs a pre-order traversal of expression tree. Validates
- * that all tagged nodes contain an instance of RelevantTag.
- */
- void findRelevantTaggedNodePaths(MatchExpression* root, vector<string>* paths) {
- MatchExpression::TagData* tag = root->getTag();
- if (tag) {
- StringBuilder buf;
- tag->debugString(&buf);
- RelevantTag* r = dynamic_cast<RelevantTag*>(tag);
- if (!r) {
- mongoutils::str::stream ss;
- ss << "tag is not instance of RelevantTag. tree: " << root->toString()
- << "; tag: " << buf.str();
- FAIL(ss);
- }
- paths->push_back(r->path);
- }
- for (size_t i = 0; i < root->numChildren(); ++i) {
- findRelevantTaggedNodePaths(root->getChild(i), paths);
+/**
+ * Array negation test cases for getFields
+ */
+TEST(QueryPlannerIXSelectTest, GetFieldsArrayNegation) {
+ testGetFields("{a: {$elemMatch: {b: {$ne: 1}}}}", "", "a.b");
+ testGetFields("{a: {$all: [{$elemMatch: {b: {$ne: 1}}}]}}", "", "a.b");
+}
+
+/**
+ * Performs a pre-order traversal of expression tree. Validates
+ * that all tagged nodes contain an instance of RelevantTag.
+ */
+void findRelevantTaggedNodePaths(MatchExpression* root, vector<string>* paths) {
+ MatchExpression::TagData* tag = root->getTag();
+ if (tag) {
+ StringBuilder buf;
+ tag->debugString(&buf);
+ RelevantTag* r = dynamic_cast<RelevantTag*>(tag);
+ if (!r) {
+ mongoutils::str::stream ss;
+ ss << "tag is not instance of RelevantTag. tree: " << root->toString()
+ << "; tag: " << buf.str();
+ FAIL(ss);
}
+ paths->push_back(r->path);
+ }
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ findRelevantTaggedNodePaths(root->getChild(i), paths);
}
-
- /**
- * Parses a MatchExpression from query string and passes that along with
- * prefix to rateIndices.
- * Verifies results against list of expected paths.
- * For now, we're only interested in which nodes are tagged.
- * In future, we may expand this test function to include
- * validate which indices are assigned to a node.
- */
- void testRateIndicesTaggedNodePaths(const char* query, const char* prefix,
- const char* expectedPathsStr) {
- // Parse and rate query. Some of the nodes in the rated tree
- // will be tagged after the rating process.
- BSONObj obj = fromjson(query);
- unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
+}
- // Currently, we tag every indexable node even when no compatible
- // index is available. Hence, it is fine to pass an empty vector of
- // indices to rateIndices().
- vector<IndexEntry> indices;
- QueryPlannerIXSelect::rateIndices(expr.get(), prefix, indices);
+/**
+ * Parses a MatchExpression from query string and passes that along with
+ * prefix to rateIndices.
+ * Verifies results against list of expected paths.
+ * For now, we're only interested in which nodes are tagged.
+ * In future, we may expand this test function to include
+ * validate which indices are assigned to a node.
+ */
+void testRateIndicesTaggedNodePaths(const char* query,
+ const char* prefix,
+ const char* expectedPathsStr) {
+ // Parse and rate query. Some of the nodes in the rated tree
+ // will be tagged after the rating process.
+ BSONObj obj = fromjson(query);
+ unique_ptr<MatchExpression> expr(parseMatchExpression(obj));
- // Retrieve a list of paths embedded in
- // tagged nodes.
- vector<string> paths;
- findRelevantTaggedNodePaths(expr.get(), &paths);
+ // Currently, we tag every indexable node even when no compatible
+ // index is available. Hence, it is fine to pass an empty vector of
+ // indices to rateIndices().
+ vector<IndexEntry> indices;
+ QueryPlannerIXSelect::rateIndices(expr.get(), prefix, indices);
- // Compare with expected list of paths.
- // First verify number of paths retrieved.
- vector<string> expectedPaths = StringSplitter::split(expectedPathsStr, ",");
- if (paths.size() != expectedPaths.size()) {
- mongoutils::str::stream ss;
- ss << "rateIndices(query=" << query << ", prefix=" << prefix
- << "): unexpected number of tagged nodes found. expected: "
- << toString(expectedPaths.begin(), expectedPaths.end()) << ". actual: "
- << toString(paths.begin(), paths.end());
- FAIL(ss);
- }
+ // Retrieve a list of paths embedded in
+ // tagged nodes.
+ vector<string> paths;
+ findRelevantTaggedNodePaths(expr.get(), &paths);
- // Next, check that value and order of each element match between the two lists.
- for (vector<string>::const_iterator i = paths.begin(), j = expectedPaths.begin();
- i != paths.end(); i++, j++) {
- if (*i == *j) {
- continue;
- }
- mongoutils::str::stream ss;
- ss << "rateIndices(query=" << query << ", prefix=" << prefix
- << "): unexpected path found. expected: " << *j << " "
- << toString(expectedPaths.begin(), expectedPaths.end()) << ". actual: "
- << *i << " " << toString(paths.begin(), paths.end());
- FAIL(ss);
- }
+ // Compare with expected list of paths.
+ // First verify number of paths retrieved.
+ vector<string> expectedPaths = StringSplitter::split(expectedPathsStr, ",");
+ if (paths.size() != expectedPaths.size()) {
+ mongoutils::str::stream ss;
+ ss << "rateIndices(query=" << query << ", prefix=" << prefix
+ << "): unexpected number of tagged nodes found. expected: "
+ << toString(expectedPaths.begin(), expectedPaths.end())
+ << ". actual: " << toString(paths.begin(), paths.end());
+ FAIL(ss);
}
- /**
- * Basic test cases for rateIndices().
- * Includes logical operators.
- */
- TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathsBasic) {
- // Test arguments: query, prefix, comma-delimited list of expected paths
- testRateIndicesTaggedNodePaths("{}", "", "");
- testRateIndicesTaggedNodePaths("{a: 1}", "", "a");
- testRateIndicesTaggedNodePaths("{a: 1}", "c.", "c.a");
- testRateIndicesTaggedNodePaths("{a: 1, b: 1}", "", "a,b");
- testRateIndicesTaggedNodePaths("{a: {$in: [1]}}", "", "a");
- testRateIndicesTaggedNodePaths("{$or: [{a: 1}, {b: 1}]}", "", "a,b");
+ // Next, check that value and order of each element match between the two lists.
+ for (vector<string>::const_iterator i = paths.begin(), j = expectedPaths.begin();
+ i != paths.end();
+ i++, j++) {
+ if (*i == *j) {
+ continue;
+ }
+ mongoutils::str::stream ss;
+ ss << "rateIndices(query=" << query << ", prefix=" << prefix
+ << "): unexpected path found. expected: " << *j << " "
+ << toString(expectedPaths.begin(), expectedPaths.end()) << ". actual: " << *i << " "
+ << toString(paths.begin(), paths.end());
+ FAIL(ss);
}
+}
- /**
- * Array test cases for rateIndices().
- */
- TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathArray) {
- testRateIndicesTaggedNodePaths("{a: {$elemMatch: {b: 1}}}", "", "a.b");
- testRateIndicesTaggedNodePaths("{a: {$all: [{$elemMatch: {b: 1}}]}}", "", "a.b");
- }
+/**
+ * Basic test cases for rateIndices().
+ * Includes logical operators.
+ */
+TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathsBasic) {
+ // Test arguments: query, prefix, comma-delimited list of expected paths
+ testRateIndicesTaggedNodePaths("{}", "", "");
+ testRateIndicesTaggedNodePaths("{a: 1}", "", "a");
+ testRateIndicesTaggedNodePaths("{a: 1}", "c.", "c.a");
+ testRateIndicesTaggedNodePaths("{a: 1, b: 1}", "", "a,b");
+ testRateIndicesTaggedNodePaths("{a: {$in: [1]}}", "", "a");
+ testRateIndicesTaggedNodePaths("{$or: [{a: 1}, {b: 1}]}", "", "a,b");
+}
- /**
- * Negation test cases for rateIndices().
- */
- TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathsNegation) {
- testRateIndicesTaggedNodePaths("{a: {$ne: 1}}", "", "a,a");
- testRateIndicesTaggedNodePaths("{a: {$nin: [1]}}", "", "a,a");
- testRateIndicesTaggedNodePaths("{$nor: [{a: 1}, {b: 1}]}", "", "");
- testRateIndicesTaggedNodePaths("{$and: [{a: 1}, {a: {$ne: 2}}]}", "", "a,a,a");
- }
+/**
+ * Array test cases for rateIndices().
+ */
+TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathArray) {
+ testRateIndicesTaggedNodePaths("{a: {$elemMatch: {b: 1}}}", "", "a.b");
+ testRateIndicesTaggedNodePaths("{a: {$all: [{$elemMatch: {b: 1}}]}}", "", "a.b");
+}
- /**
- * Array negation test cases for rateIndices().
- */
- TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathArrayNegation) {
- testRateIndicesTaggedNodePaths("{a: {$elemMatch: {b: {$ne: 1}}}}", "", "a.b,a.b");
- testRateIndicesTaggedNodePaths("{a: {$all: [{$elemMatch: {b: {$ne: 1}}}]}}", "", "a.b,a.b");
- }
+/**
+ * Negation test cases for rateIndices().
+ */
+TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathsNegation) {
+ testRateIndicesTaggedNodePaths("{a: {$ne: 1}}", "", "a,a");
+ testRateIndicesTaggedNodePaths("{a: {$nin: [1]}}", "", "a,a");
+ testRateIndicesTaggedNodePaths("{$nor: [{a: 1}, {b: 1}]}", "", "");
+ testRateIndicesTaggedNodePaths("{$and: [{a: 1}, {a: {$ne: 2}}]}", "", "a,a,a");
+}
+
+/**
+ * Array negation test cases for rateIndices().
+ */
+TEST(QueryPlannerIXSelectTest, RateIndicesTaggedNodePathArrayNegation) {
+ testRateIndicesTaggedNodePaths("{a: {$elemMatch: {b: {$ne: 1}}}}", "", "a.b,a.b");
+ testRateIndicesTaggedNodePaths("{a: {$all: [{$elemMatch: {b: {$ne: 1}}}]}}", "", "a.b,a.b");
+}
} // namespace
diff --git a/src/mongo/db/query/query_knobs.cpp b/src/mongo/db/query/query_knobs.cpp
index fc8ade631c5..c73759cebd4 100644
--- a/src/mongo/db/query/query_knobs.cpp
+++ b/src/mongo/db/query/query_knobs.cpp
@@ -32,40 +32,40 @@
namespace mongo {
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanEvaluationWorks, int, 10000);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanEvaluationWorks, int, 10000);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanEvaluationCollFraction, double, 0.3);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanEvaluationCollFraction, double, 0.3);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanEvaluationMaxResults, int, 101);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanEvaluationMaxResults, int, 101);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheSize, int, 5000);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheSize, int, 5000);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheFeedbacksStored, int, 20);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheFeedbacksStored, int, 20);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheEvictionRatio, double, 10.0);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheEvictionRatio, double, 10.0);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheWriteOpsBetweenFlush, int, 1000);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryCacheWriteOpsBetweenFlush, int, 1000);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerMaxIndexedSolutions, int, 64);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerMaxIndexedSolutions, int, 64);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryEnumerationMaxOrSolutions, int, 10);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryEnumerationMaxOrSolutions, int, 10);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryEnumerationMaxIntersectPerAnd, int, 3);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryEnumerationMaxIntersectPerAnd, int, 3);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryForceIntersectionPlans, bool, false);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryForceIntersectionPlans, bool, false);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerEnableIndexIntersection, bool, true);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerEnableIndexIntersection, bool, true);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerEnableHashIntersection, bool, false);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlannerEnableHashIntersection, bool, false);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanOrChildrenIndependently, bool, true);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryPlanOrChildrenIndependently, bool, true);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryMaxScansToExplode, int, 200);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryMaxScansToExplode, int, 200);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryExecMaxBlockingSortBytes, int, 32 * 1024 * 1024);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryExecMaxBlockingSortBytes, int, 32 * 1024 * 1024);
- // Yield every 128 cycles or 10ms.
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryExecYieldIterations, int, 128);
- MONGO_EXPORT_SERVER_PARAMETER(internalQueryExecYieldPeriodMS, int, 10);
+// Yield every 128 cycles or 10ms.
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryExecYieldIterations, int, 128);
+MONGO_EXPORT_SERVER_PARAMETER(internalQueryExecYieldPeriodMS, int, 10);
} // namespace mongo
diff --git a/src/mongo/db/query/query_knobs.h b/src/mongo/db/query/query_knobs.h
index f2b775593d0..c82d6828a29 100644
--- a/src/mongo/db/query/query_knobs.h
+++ b/src/mongo/db/query/query_knobs.h
@@ -30,78 +30,78 @@
namespace mongo {
- //
- // multi-plan ranking
- //
+//
+// multi-plan ranking
+//
- // Max number of times we call work() on plans before comparing them,
- // for small collections.
- extern int internalQueryPlanEvaluationWorks;
+// Max number of times we call work() on plans before comparing them,
+// for small collections.
+extern int internalQueryPlanEvaluationWorks;
- // For large collections, the number times we work() candidate plans is
- // taken as this fraction of the collection size.
- extern double internalQueryPlanEvaluationCollFraction;
+// For large collections, the number times we work() candidate plans is
+// taken as this fraction of the collection size.
+extern double internalQueryPlanEvaluationCollFraction;
- // Stop working plans once a plan returns this many results.
- extern int internalQueryPlanEvaluationMaxResults;
+// Stop working plans once a plan returns this many results.
+extern int internalQueryPlanEvaluationMaxResults;
- // Do we give a big ranking bonus to intersection plans?
- extern bool internalQueryForceIntersectionPlans;
+// Do we give a big ranking bonus to intersection plans?
+extern bool internalQueryForceIntersectionPlans;
- // Do we have ixisect on at all?
- extern bool internalQueryPlannerEnableIndexIntersection;
+// Do we have ixisect on at all?
+extern bool internalQueryPlannerEnableIndexIntersection;
- // Do we use hash-based intersection for rooted $and queries?
- extern bool internalQueryPlannerEnableHashIntersection;
+// Do we use hash-based intersection for rooted $and queries?
+extern bool internalQueryPlannerEnableHashIntersection;
- //
- // plan cache
- //
+//
+// plan cache
+//
- // How many entries in the cache?
- extern int internalQueryCacheSize;
+// How many entries in the cache?
+extern int internalQueryCacheSize;
- // How many feedback entries do we collect before possibly evicting from the cache based on bad
- // performance?
- extern int internalQueryCacheFeedbacksStored;
+// How many feedback entries do we collect before possibly evicting from the cache based on bad
+// performance?
+extern int internalQueryCacheFeedbacksStored;
- // How many times more works must we perform in order to justify plan cache eviction
- // and replanning?
- extern double internalQueryCacheEvictionRatio;
+// How many times more works must we perform in order to justify plan cache eviction
+// and replanning?
+extern double internalQueryCacheEvictionRatio;
- // How many write ops should we allow in a collection before tossing all cache entries?
- extern int internalQueryCacheWriteOpsBetweenFlush;
+// How many write ops should we allow in a collection before tossing all cache entries?
+extern int internalQueryCacheWriteOpsBetweenFlush;
- //
- // Planning and enumeration.
- //
+//
+// Planning and enumeration.
+//
- // How many indexed solutions will QueryPlanner::plan output?
- extern int internalQueryPlannerMaxIndexedSolutions;
+// How many indexed solutions will QueryPlanner::plan output?
+extern int internalQueryPlannerMaxIndexedSolutions;
- // How many solutions will the enumerator consider at each OR?
- extern int internalQueryEnumerationMaxOrSolutions;
+// How many solutions will the enumerator consider at each OR?
+extern int internalQueryEnumerationMaxOrSolutions;
- // How many intersections will the enumerator consider at each AND?
- extern int internalQueryEnumerationMaxIntersectPerAnd;
+// How many intersections will the enumerator consider at each AND?
+extern int internalQueryEnumerationMaxIntersectPerAnd;
- // Do we want to plan each child of the OR independently?
- extern bool internalQueryPlanOrChildrenIndependently;
+// Do we want to plan each child of the OR independently?
+extern bool internalQueryPlanOrChildrenIndependently;
- // How many index scans are we willing to produce in order to obtain a sort order
- // during explodeForSort?
- extern int internalQueryMaxScansToExplode;
+// How many index scans are we willing to produce in order to obtain a sort order
+// during explodeForSort?
+extern int internalQueryMaxScansToExplode;
- //
- // Query execution.
- //
+//
+// Query execution.
+//
- extern int internalQueryExecMaxBlockingSortBytes;
+extern int internalQueryExecMaxBlockingSortBytes;
- // Yield after this many "should yield?" checks.
- extern int internalQueryExecYieldIterations;
+// Yield after this many "should yield?" checks.
+extern int internalQueryExecYieldIterations;
- // Yield if it's been at least this many milliseconds since we last yielded.
- extern int internalQueryExecYieldPeriodMS;
+// Yield if it's been at least this many milliseconds since we last yielded.
+extern int internalQueryExecYieldPeriodMS;
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 852b705c532..6b98e0fae79 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -34,7 +34,7 @@
#include <vector>
-#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
+#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/matcher/expression_text.h"
#include "mongo/db/query/canonical_query.h"
@@ -49,880 +49,855 @@
namespace mongo {
- using std::unique_ptr;
- using std::numeric_limits;
-
- // Copied verbatim from db/index.h
- static bool isIdIndex( const BSONObj &pattern ) {
- BSONObjIterator i(pattern);
- BSONElement e = i.next();
- //_id index must have form exactly {_id : 1} or {_id : -1}.
- //Allows an index of form {_id : "hashed"} to exist but
- //do not consider it to be the primary _id index
- if(! ( strcmp(e.fieldName(), "_id") == 0
- && (e.numberInt() == 1 || e.numberInt() == -1)))
- return false;
- return i.next().eoo();
- }
+using std::unique_ptr;
+using std::numeric_limits;
+
+// Copied verbatim from db/index.h
+static bool isIdIndex(const BSONObj& pattern) {
+ BSONObjIterator i(pattern);
+ BSONElement e = i.next();
+ //_id index must have form exactly {_id : 1} or {_id : -1}.
+ // Allows an index of form {_id : "hashed"} to exist but
+ // do not consider it to be the primary _id index
+ if (!(strcmp(e.fieldName(), "_id") == 0 && (e.numberInt() == 1 || e.numberInt() == -1)))
+ return false;
+ return i.next().eoo();
+}
- static bool is2DIndex(const BSONObj& pattern) {
- BSONObjIterator it(pattern);
- while (it.more()) {
- BSONElement e = it.next();
- if (String == e.type() && str::equals("2d", e.valuestr())) {
- return true;
- }
+static bool is2DIndex(const BSONObj& pattern) {
+ BSONObjIterator it(pattern);
+ while (it.more()) {
+ BSONElement e = it.next();
+ if (String == e.type() && str::equals("2d", e.valuestr())) {
+ return true;
}
- return false;
}
+ return false;
+}
- string optionString(size_t options) {
- mongoutils::str::stream ss;
+string optionString(size_t options) {
+ mongoutils::str::stream ss;
- // These options are all currently mutually exclusive.
- if (QueryPlannerParams::DEFAULT == options) {
- ss << "DEFAULT ";
- }
- if (options & QueryPlannerParams::NO_TABLE_SCAN) {
- ss << "NO_TABLE_SCAN ";
- }
- if (options & QueryPlannerParams::INCLUDE_COLLSCAN) {
- ss << "INCLUDE_COLLSCAN ";
- }
- if (options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- ss << "INCLUDE_SHARD_FILTER ";
- }
- if (options & QueryPlannerParams::NO_BLOCKING_SORT) {
- ss << "NO_BLOCKING_SORT ";
- }
- if (options & QueryPlannerParams::INDEX_INTERSECTION) {
- ss << "INDEX_INTERSECTION ";
- }
- if (options & QueryPlannerParams::KEEP_MUTATIONS) {
- ss << "KEEP_MUTATIONS";
- }
-
- return ss;
+ // These options are all currently mutually exclusive.
+ if (QueryPlannerParams::DEFAULT == options) {
+ ss << "DEFAULT ";
}
-
- static BSONObj getKeyFromQuery(const BSONObj& keyPattern, const BSONObj& query) {
- return query.extractFieldsUnDotted(keyPattern);
+ if (options & QueryPlannerParams::NO_TABLE_SCAN) {
+ ss << "NO_TABLE_SCAN ";
+ }
+ if (options & QueryPlannerParams::INCLUDE_COLLSCAN) {
+ ss << "INCLUDE_COLLSCAN ";
+ }
+ if (options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
+ ss << "INCLUDE_SHARD_FILTER ";
+ }
+ if (options & QueryPlannerParams::NO_BLOCKING_SORT) {
+ ss << "NO_BLOCKING_SORT ";
+ }
+ if (options & QueryPlannerParams::INDEX_INTERSECTION) {
+ ss << "INDEX_INTERSECTION ";
+ }
+ if (options & QueryPlannerParams::KEEP_MUTATIONS) {
+ ss << "KEEP_MUTATIONS";
}
- static bool indexCompatibleMaxMin(const BSONObj& obj, const BSONObj& keyPattern) {
- BSONObjIterator kpIt(keyPattern);
- BSONObjIterator objIt(obj);
+ return ss;
+}
- for (;;) {
- // Every element up to this point has matched so the KP matches
- if (!kpIt.more() && !objIt.more()) {
- return true;
- }
+static BSONObj getKeyFromQuery(const BSONObj& keyPattern, const BSONObj& query) {
+ return query.extractFieldsUnDotted(keyPattern);
+}
- // If only one iterator is done, it's not a match.
- if (!kpIt.more() || !objIt.more()) {
- return false;
- }
+static bool indexCompatibleMaxMin(const BSONObj& obj, const BSONObj& keyPattern) {
+ BSONObjIterator kpIt(keyPattern);
+ BSONObjIterator objIt(obj);
- // Field names must match and be in the same order.
- BSONElement kpElt = kpIt.next();
- BSONElement objElt = objIt.next();
- if (!mongoutils::str::equals(kpElt.fieldName(), objElt.fieldName())) {
- return false;
- }
+ for (;;) {
+ // Every element up to this point has matched so the KP matches
+ if (!kpIt.more() && !objIt.more()) {
+ return true;
}
- }
- static BSONObj stripFieldNames(const BSONObj& obj) {
- BSONObjIterator it(obj);
- BSONObjBuilder bob;
- while (it.more()) {
- bob.appendAs(it.next(), "");
- }
- return bob.obj();
- }
-
- /**
- * "Finishes" the min object for the $min query option by filling in an empty object with
- * MinKey/MaxKey and stripping field names.
- *
- * In the case that 'minObj' is empty, we "finish" it by filling in either MinKey or MaxKey
- * instead. Choosing whether to use MinKey or MaxKey is done by comparing against 'maxObj'.
- * For instance, suppose 'minObj' is empty, 'maxObj' is { a: 3 }, and the key pattern is
- * { a: -1 }. According to the key pattern ordering, { a: 3 } < MinKey. This means that the
- * proper resulting bounds are
- *
- * start: { '': MaxKey }, end: { '': 3 }
- *
- * as opposed to
- *
- * start: { '': MinKey }, end: { '': 3 }
- *
- * Suppose instead that the key pattern is { a: 1 }, with the same 'minObj' and 'maxObj'
- * (that is, an empty object and { a: 3 } respectively). In this case, { a: 3 } > MinKey,
- * which means that we use range [{'': MinKey}, {'': 3}]. The proper 'minObj' in this case is
- * MinKey, whereas in the previous example it was MaxKey.
- *
- * If 'minObj' is non-empty, then all we do is strip its field names (because index keys always
- * have empty field names).
- */
- static BSONObj finishMinObj(const BSONObj& kp, const BSONObj& minObj, const BSONObj& maxObj) {
- BSONObjBuilder bob;
- bob.appendMinKey("");
- BSONObj minKey = bob.obj();
-
- if (minObj.isEmpty()) {
- if (0 > minKey.woCompare(maxObj, kp, false)) {
- BSONObjBuilder minKeyBuilder;
- minKeyBuilder.appendMinKey("");
- return minKeyBuilder.obj();
- }
- else {
- BSONObjBuilder maxKeyBuilder;
- maxKeyBuilder.appendMaxKey("");
- return maxKeyBuilder.obj();
- }
- }
- else {
- return stripFieldNames(minObj);
+ // If only one iterator is done, it's not a match.
+ if (!kpIt.more() || !objIt.more()) {
+ return false;
}
- }
- /**
- * "Finishes" the max object for the $max query option by filling in an empty object with
- * MinKey/MaxKey and stripping field names.
- *
- * See comment for finishMinObj() for why we need both 'minObj' and 'maxObj'.
- */
- static BSONObj finishMaxObj(const BSONObj& kp, const BSONObj& minObj, const BSONObj& maxObj) {
- BSONObjBuilder bob;
- bob.appendMaxKey("");
- BSONObj maxKey = bob.obj();
-
- if (maxObj.isEmpty()) {
- if (0 < maxKey.woCompare(minObj, kp, false)) {
- BSONObjBuilder maxKeyBuilder;
- maxKeyBuilder.appendMaxKey("");
- return maxKeyBuilder.obj();
- }
- else {
- BSONObjBuilder minKeyBuilder;
- minKeyBuilder.appendMinKey("");
- return minKeyBuilder.obj();
- }
- }
- else {
- return stripFieldNames(maxObj);
+ // Field names must match and be in the same order.
+ BSONElement kpElt = kpIt.next();
+ BSONElement objElt = objIt.next();
+ if (!mongoutils::str::equals(kpElt.fieldName(), objElt.fieldName())) {
+ return false;
}
}
+}
- QuerySolution* buildCollscanSoln(const CanonicalQuery& query,
- bool tailable,
- const QueryPlannerParams& params) {
-
- QuerySolutionNode* solnRoot = QueryPlannerAccess::makeCollectionScan(query, tailable, params);
- return QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+static BSONObj stripFieldNames(const BSONObj& obj) {
+ BSONObjIterator it(obj);
+ BSONObjBuilder bob;
+ while (it.more()) {
+ bob.appendAs(it.next(), "");
}
+ return bob.obj();
+}
- QuerySolution* buildWholeIXSoln(const IndexEntry& index,
- const CanonicalQuery& query,
- const QueryPlannerParams& params,
- int direction = 1) {
-
- QuerySolutionNode* solnRoot = QueryPlannerAccess::scanWholeIndex(index, query, params, direction);
- return QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+/**
+ * "Finishes" the min object for the $min query option by filling in an empty object with
+ * MinKey/MaxKey and stripping field names.
+ *
+ * In the case that 'minObj' is empty, we "finish" it by filling in either MinKey or MaxKey
+ * instead. Choosing whether to use MinKey or MaxKey is done by comparing against 'maxObj'.
+ * For instance, suppose 'minObj' is empty, 'maxObj' is { a: 3 }, and the key pattern is
+ * { a: -1 }. According to the key pattern ordering, { a: 3 } < MinKey. This means that the
+ * proper resulting bounds are
+ *
+ * start: { '': MaxKey }, end: { '': 3 }
+ *
+ * as opposed to
+ *
+ * start: { '': MinKey }, end: { '': 3 }
+ *
+ * Suppose instead that the key pattern is { a: 1 }, with the same 'minObj' and 'maxObj'
+ * (that is, an empty object and { a: 3 } respectively). In this case, { a: 3 } > MinKey,
+ * which means that we use range [{'': MinKey}, {'': 3}]. The proper 'minObj' in this case is
+ * MinKey, whereas in the previous example it was MaxKey.
+ *
+ * If 'minObj' is non-empty, then all we do is strip its field names (because index keys always
+ * have empty field names).
+ */
+static BSONObj finishMinObj(const BSONObj& kp, const BSONObj& minObj, const BSONObj& maxObj) {
+ BSONObjBuilder bob;
+ bob.appendMinKey("");
+ BSONObj minKey = bob.obj();
+
+ if (minObj.isEmpty()) {
+ if (0 > minKey.woCompare(maxObj, kp, false)) {
+ BSONObjBuilder minKeyBuilder;
+ minKeyBuilder.appendMinKey("");
+ return minKeyBuilder.obj();
+ } else {
+ BSONObjBuilder maxKeyBuilder;
+ maxKeyBuilder.appendMaxKey("");
+ return maxKeyBuilder.obj();
+ }
+ } else {
+ return stripFieldNames(minObj);
}
+}
- bool providesSort(const CanonicalQuery& query, const BSONObj& kp) {
- return query.getParsed().getSort().isPrefixOf(kp);
+/**
+ * "Finishes" the max object for the $max query option by filling in an empty object with
+ * MinKey/MaxKey and stripping field names.
+ *
+ * See comment for finishMinObj() for why we need both 'minObj' and 'maxObj'.
+ */
+static BSONObj finishMaxObj(const BSONObj& kp, const BSONObj& minObj, const BSONObj& maxObj) {
+ BSONObjBuilder bob;
+ bob.appendMaxKey("");
+ BSONObj maxKey = bob.obj();
+
+ if (maxObj.isEmpty()) {
+ if (0 < maxKey.woCompare(minObj, kp, false)) {
+ BSONObjBuilder maxKeyBuilder;
+ maxKeyBuilder.appendMaxKey("");
+ return maxKeyBuilder.obj();
+ } else {
+ BSONObjBuilder minKeyBuilder;
+ minKeyBuilder.appendMinKey("");
+ return minKeyBuilder.obj();
+ }
+ } else {
+ return stripFieldNames(maxObj);
+ }
+}
+
+QuerySolution* buildCollscanSoln(const CanonicalQuery& query,
+ bool tailable,
+ const QueryPlannerParams& params) {
+ QuerySolutionNode* solnRoot = QueryPlannerAccess::makeCollectionScan(query, tailable, params);
+ return QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+}
+
+QuerySolution* buildWholeIXSoln(const IndexEntry& index,
+ const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ int direction = 1) {
+ QuerySolutionNode* solnRoot =
+ QueryPlannerAccess::scanWholeIndex(index, query, params, direction);
+ return QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+}
+
+bool providesSort(const CanonicalQuery& query, const BSONObj& kp) {
+ return query.getParsed().getSort().isPrefixOf(kp);
+}
+
+// static
+const int QueryPlanner::kPlannerVersion = 1;
+
+Status QueryPlanner::cacheDataFromTaggedTree(const MatchExpression* const taggedTree,
+ const vector<IndexEntry>& relevantIndices,
+ PlanCacheIndexTree** out) {
+ // On any early return, the out-parameter must contain NULL.
+ *out = NULL;
+
+ if (NULL == taggedTree) {
+ return Status(ErrorCodes::BadValue, "Cannot produce cache data: tree is NULL.");
}
- // static
- const int QueryPlanner::kPlannerVersion = 1;
+ unique_ptr<PlanCacheIndexTree> indexTree(new PlanCacheIndexTree());
- Status QueryPlanner::cacheDataFromTaggedTree(const MatchExpression* const taggedTree,
- const vector<IndexEntry>& relevantIndices,
- PlanCacheIndexTree** out) {
- // On any early return, the out-parameter must contain NULL.
- *out = NULL;
+ if (NULL != taggedTree->getTag()) {
+ IndexTag* itag = static_cast<IndexTag*>(taggedTree->getTag());
+ if (itag->index >= relevantIndices.size()) {
+ mongoutils::str::stream ss;
+ ss << "Index number is " << itag->index << " but there are only "
+ << relevantIndices.size() << " relevant indices.";
+ return Status(ErrorCodes::BadValue, ss);
+ }
- if (NULL == taggedTree) {
- return Status(ErrorCodes::BadValue, "Cannot produce cache data: tree is NULL.");
+ // Make sure not to cache solutions which use '2d' indices.
+ // A 2d index that doesn't wrap on one query may wrap on another, so we have to
+ // check that the index is OK with the predicate. The only thing we have to do
+ // this for is 2d. For now it's easier to move ahead if we don't cache 2d.
+ //
+ // TODO: revisit with a post-cached-index-assignment compatibility check
+ if (is2DIndex(relevantIndices[itag->index].keyPattern)) {
+ return Status(ErrorCodes::BadValue, "can't cache '2d' index");
}
- unique_ptr<PlanCacheIndexTree> indexTree(new PlanCacheIndexTree());
+ IndexEntry* ientry = new IndexEntry(relevantIndices[itag->index]);
+ indexTree->entry.reset(ientry);
+ indexTree->index_pos = itag->pos;
+ }
- if (NULL != taggedTree->getTag()) {
- IndexTag* itag = static_cast<IndexTag*>(taggedTree->getTag());
- if (itag->index >= relevantIndices.size()) {
- mongoutils::str::stream ss;
- ss << "Index number is " << itag->index
- << " but there are only " << relevantIndices.size()
- << " relevant indices.";
- return Status(ErrorCodes::BadValue, ss);
- }
+ for (size_t i = 0; i < taggedTree->numChildren(); ++i) {
+ MatchExpression* taggedChild = taggedTree->getChild(i);
+ PlanCacheIndexTree* indexTreeChild;
+ Status s = cacheDataFromTaggedTree(taggedChild, relevantIndices, &indexTreeChild);
+ if (!s.isOK()) {
+ return s;
+ }
+ indexTree->children.push_back(indexTreeChild);
+ }
- // Make sure not to cache solutions which use '2d' indices.
- // A 2d index that doesn't wrap on one query may wrap on another, so we have to
- // check that the index is OK with the predicate. The only thing we have to do
- // this for is 2d. For now it's easier to move ahead if we don't cache 2d.
- //
- // TODO: revisit with a post-cached-index-assignment compatibility check
- if (is2DIndex(relevantIndices[itag->index].keyPattern)) {
- return Status(ErrorCodes::BadValue, "can't cache '2d' index");
- }
+ *out = indexTree.release();
+ return Status::OK();
+}
- IndexEntry* ientry = new IndexEntry(relevantIndices[itag->index]);
- indexTree->entry.reset(ientry);
- indexTree->index_pos = itag->pos;
- }
+// static
+Status QueryPlanner::tagAccordingToCache(MatchExpression* filter,
+ const PlanCacheIndexTree* const indexTree,
+ const map<BSONObj, size_t>& indexMap) {
+ if (NULL == filter) {
+ return Status(ErrorCodes::BadValue, "Cannot tag tree: filter is NULL.");
+ }
+ if (NULL == indexTree) {
+ return Status(ErrorCodes::BadValue, "Cannot tag tree: indexTree is NULL.");
+ }
- for (size_t i = 0; i < taggedTree->numChildren(); ++i) {
- MatchExpression* taggedChild = taggedTree->getChild(i);
- PlanCacheIndexTree* indexTreeChild;
- Status s = cacheDataFromTaggedTree(taggedChild, relevantIndices, &indexTreeChild);
- if (!s.isOK()) {
- return s;
- }
- indexTree->children.push_back(indexTreeChild);
- }
+ // We're tagging the tree here, so it shouldn't have
+ // any tags hanging off yet.
+ verify(NULL == filter->getTag());
- *out = indexTree.release();
- return Status::OK();
+ if (filter->numChildren() != indexTree->children.size()) {
+ mongoutils::str::stream ss;
+ ss << "Cache topology and query did not match: "
+ << "query has " << filter->numChildren() << " children "
+ << "and cache has " << indexTree->children.size() << " children.";
+ return Status(ErrorCodes::BadValue, ss);
}
- // static
- Status QueryPlanner::tagAccordingToCache(MatchExpression* filter,
- const PlanCacheIndexTree* const indexTree,
- const map<BSONObj, size_t>& indexMap) {
- if (NULL == filter) {
- return Status(ErrorCodes::BadValue, "Cannot tag tree: filter is NULL.");
- }
- if (NULL == indexTree) {
- return Status(ErrorCodes::BadValue, "Cannot tag tree: indexTree is NULL.");
+ // Continue the depth-first tree traversal.
+ for (size_t i = 0; i < filter->numChildren(); ++i) {
+ Status s = tagAccordingToCache(filter->getChild(i), indexTree->children[i], indexMap);
+ if (!s.isOK()) {
+ return s;
}
+ }
- // We're tagging the tree here, so it shouldn't have
- // any tags hanging off yet.
- verify(NULL == filter->getTag());
-
- if (filter->numChildren() != indexTree->children.size()) {
+ if (NULL != indexTree->entry.get()) {
+ map<BSONObj, size_t>::const_iterator got = indexMap.find(indexTree->entry->keyPattern);
+ if (got == indexMap.end()) {
mongoutils::str::stream ss;
- ss << "Cache topology and query did not match: "
- << "query has " << filter->numChildren() << " children "
- << "and cache has " << indexTree->children.size() << " children.";
+ ss << "Did not find index with keyPattern: " << indexTree->entry->keyPattern.toString();
return Status(ErrorCodes::BadValue, ss);
}
-
- // Continue the depth-first tree traversal.
- for (size_t i = 0; i < filter->numChildren(); ++i) {
- Status s = tagAccordingToCache(filter->getChild(i), indexTree->children[i], indexMap);
- if (!s.isOK()) {
- return s;
- }
- }
-
- if (NULL != indexTree->entry.get()) {
- map<BSONObj, size_t>::const_iterator got = indexMap.find(indexTree->entry->keyPattern);
- if (got == indexMap.end()) {
- mongoutils::str::stream ss;
- ss << "Did not find index with keyPattern: " << indexTree->entry->keyPattern.toString();
- return Status(ErrorCodes::BadValue, ss);
- }
- filter->setTag(new IndexTag(got->second, indexTree->index_pos));
- }
-
- return Status::OK();
+ filter->setTag(new IndexTag(got->second, indexTree->index_pos));
}
- // static
- Status QueryPlanner::planFromCache(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- const CachedSolution& cachedSoln,
- QuerySolution** out) {
- invariant(!cachedSoln.plannerData.empty());
- invariant(out);
+ return Status::OK();
+}
- // A query not suitable for caching should not have made its way into the cache.
- invariant(PlanCache::shouldCacheQuery(query));
+// static
+Status QueryPlanner::planFromCache(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ const CachedSolution& cachedSoln,
+ QuerySolution** out) {
+ invariant(!cachedSoln.plannerData.empty());
+ invariant(out);
- // Look up winning solution in cached solution's array.
- const SolutionCacheData& winnerCacheData = *cachedSoln.plannerData[0];
+ // A query not suitable for caching should not have made its way into the cache.
+ invariant(PlanCache::shouldCacheQuery(query));
- if (SolutionCacheData::WHOLE_IXSCAN_SOLN == winnerCacheData.solnType) {
- // The solution can be constructed by a scan over the entire index.
- QuerySolution* soln = buildWholeIXSoln(*winnerCacheData.tree->entry,
- query,
- params,
- winnerCacheData.wholeIXSolnDir);
- if (soln == NULL) {
- return Status(ErrorCodes::BadValue,
- "plan cache error: soln that uses index to provide sort");
- }
- else {
- *out = soln;
- return Status::OK();
- }
+ // Look up winning solution in cached solution's array.
+ const SolutionCacheData& winnerCacheData = *cachedSoln.plannerData[0];
+
+ if (SolutionCacheData::WHOLE_IXSCAN_SOLN == winnerCacheData.solnType) {
+ // The solution can be constructed by a scan over the entire index.
+ QuerySolution* soln = buildWholeIXSoln(
+ *winnerCacheData.tree->entry, query, params, winnerCacheData.wholeIXSolnDir);
+ if (soln == NULL) {
+ return Status(ErrorCodes::BadValue,
+ "plan cache error: soln that uses index to provide sort");
+ } else {
+ *out = soln;
+ return Status::OK();
}
- else if (SolutionCacheData::COLLSCAN_SOLN == winnerCacheData.solnType) {
- // The cached solution is a collection scan. We don't cache collscans
- // with tailable==true, hence the false below.
- QuerySolution* soln = buildCollscanSoln(query, false, params);
- if (soln == NULL) {
- return Status(ErrorCodes::BadValue, "plan cache error: collection scan soln");
- }
- else {
- *out = soln;
- return Status::OK();
- }
+ } else if (SolutionCacheData::COLLSCAN_SOLN == winnerCacheData.solnType) {
+ // The cached solution is a collection scan. We don't cache collscans
+ // with tailable==true, hence the false below.
+ QuerySolution* soln = buildCollscanSoln(query, false, params);
+ if (soln == NULL) {
+ return Status(ErrorCodes::BadValue, "plan cache error: collection scan soln");
+ } else {
+ *out = soln;
+ return Status::OK();
}
+ }
- // SolutionCacheData::USE_TAGS_SOLN == cacheData->solnType
- // If we're here then this is neither the whole index scan or collection scan
- // cases, and we proceed by using the PlanCacheIndexTree to tag the query tree.
+ // SolutionCacheData::USE_TAGS_SOLN == cacheData->solnType
+ // If we're here then this is neither the whole index scan or collection scan
+ // cases, and we proceed by using the PlanCacheIndexTree to tag the query tree.
+
+ // Create a copy of the expression tree. We use cachedSoln to annotate this with indices.
+ MatchExpression* clone = query.root()->shallowClone();
+
+ LOG(5) << "Tagging the match expression according to cache data: " << endl
+ << "Filter:" << endl
+ << clone->toString() << "Cache data:" << endl
+ << winnerCacheData.toString();
+
+ // Map from index name to index number.
+ // TODO: can we assume that the index numbering has the same lifetime
+ // as the cache state?
+ map<BSONObj, size_t> indexMap;
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ const IndexEntry& ie = params.indices[i];
+ indexMap[ie.keyPattern] = i;
+ LOG(5) << "Index " << i << ": " << ie.keyPattern.toString() << endl;
+ }
- // Create a copy of the expression tree. We use cachedSoln to annotate this with indices.
- MatchExpression* clone = query.root()->shallowClone();
+ Status s = tagAccordingToCache(clone, winnerCacheData.tree.get(), indexMap);
+ if (!s.isOK()) {
+ return s;
+ }
- LOG(5) << "Tagging the match expression according to cache data: " << endl
- << "Filter:" << endl << clone->toString()
- << "Cache data:" << endl << winnerCacheData.toString();
+ // The planner requires a defined sort order.
+ sortUsingTags(clone);
- // Map from index name to index number.
- // TODO: can we assume that the index numbering has the same lifetime
- // as the cache state?
- map<BSONObj, size_t> indexMap;
- for (size_t i = 0; i < params.indices.size(); ++i) {
- const IndexEntry& ie = params.indices[i];
- indexMap[ie.keyPattern] = i;
- LOG(5) << "Index " << i << ": " << ie.keyPattern.toString() << endl;
- }
+ LOG(5) << "Tagged tree:" << endl
+ << clone->toString();
- Status s = tagAccordingToCache(clone, winnerCacheData.tree.get(), indexMap);
- if (!s.isOK()) {
- return s;
- }
+ // Use the cached index assignments to build solnRoot. Takes ownership of clone.
+ QuerySolutionNode* solnRoot =
+ QueryPlannerAccess::buildIndexedDataAccess(query, clone, false, params.indices, params);
- // The planner requires a defined sort order.
- sortUsingTags(clone);
+ if (!solnRoot) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Failed to create data access plan from cache. Query: "
+ << query.toStringShort());
+ }
- LOG(5) << "Tagged tree:" << endl << clone->toString();
+ // Takes ownership of 'solnRoot'.
+ QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+ if (!soln) {
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Failed to analyze plan from cache. Query: " << query.toStringShort());
+ }
- // Use the cached index assignments to build solnRoot. Takes ownership of clone.
- QuerySolutionNode* solnRoot =
- QueryPlannerAccess::buildIndexedDataAccess(query, clone, false, params.indices, params);
+ LOG(5) << "Planner: solution constructed from the cache:\n" << soln->toString();
+ *out = soln;
+ return Status::OK();
+}
+
+// static
+Status QueryPlanner::plan(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolution*>* out) {
+ LOG(5) << "Beginning planning..." << endl
+ << "=============================" << endl
+ << "Options = " << optionString(params.options) << endl
+ << "Canonical query:" << endl
+ << query.toString() << "=============================" << endl;
+
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ LOG(5) << "Index " << i << " is " << params.indices[i].toString() << endl;
+ }
- if (!solnRoot) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Failed to create data access plan from cache. Query: "
- << query.toStringShort());
- }
+ bool canTableScan = !(params.options & QueryPlannerParams::NO_TABLE_SCAN);
- // Takes ownership of 'solnRoot'.
- QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
- if (!soln) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Failed to analyze plan from cache. Query: "
- << query.toStringShort());
+ // If the query requests a tailable cursor, the only solution is a collscan + filter with
+ // tailable set on the collscan. TODO: This is a policy departure. Previously I think you
+ // could ask for a tailable cursor and it just tried to give you one. Now, we fail if we
+ // can't provide one. Is this what we want?
+ if (query.getParsed().isTailable()) {
+ if (!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) && canTableScan) {
+ QuerySolution* soln = buildCollscanSoln(query, true, params);
+ if (NULL != soln) {
+ out->push_back(soln);
+ }
}
-
- LOG(5) << "Planner: solution constructed from the cache:\n" << soln->toString();
- *out = soln;
return Status::OK();
}
- // static
- Status QueryPlanner::plan(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- std::vector<QuerySolution*>* out) {
-
- LOG(5) << "Beginning planning..." << endl
- << "=============================" << endl
- << "Options = " << optionString(params.options) << endl
- << "Canonical query:" << endl << query.toString()
- << "=============================" << endl;
-
- for (size_t i = 0; i < params.indices.size(); ++i) {
- LOG(5) << "Index " << i << " is " << params.indices[i].toString() << endl;
- }
-
- bool canTableScan = !(params.options & QueryPlannerParams::NO_TABLE_SCAN);
-
- // If the query requests a tailable cursor, the only solution is a collscan + filter with
- // tailable set on the collscan. TODO: This is a policy departure. Previously I think you
- // could ask for a tailable cursor and it just tried to give you one. Now, we fail if we
- // can't provide one. Is this what we want?
- if (query.getParsed().isTailable()) {
- if (!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
- && canTableScan) {
- QuerySolution* soln = buildCollscanSoln(query, true, params);
+ // The hint or sort can be $natural: 1. If this happens, output a collscan. If both
+ // a $natural hint and a $natural sort are specified, then the direction of the collscan
+ // is determined by the sign of the sort (not the sign of the hint).
+ if (!query.getParsed().getHint().isEmpty() || !query.getParsed().getSort().isEmpty()) {
+ BSONObj hintObj = query.getParsed().getHint();
+ BSONObj sortObj = query.getParsed().getSort();
+ BSONElement naturalHint = hintObj.getFieldDotted("$natural");
+ BSONElement naturalSort = sortObj.getFieldDotted("$natural");
+
+ // A hint overrides a $natural sort. This means that we don't force a table
+ // scan if there is a $natural sort with a non-$natural hint.
+ if (!naturalHint.eoo() || (!naturalSort.eoo() && hintObj.isEmpty())) {
+ LOG(5) << "Forcing a table scan due to hinted $natural\n";
+ // min/max are incompatible with $natural.
+ if (canTableScan && query.getParsed().getMin().isEmpty() &&
+ query.getParsed().getMax().isEmpty()) {
+ QuerySolution* soln = buildCollscanSoln(query, false, params);
if (NULL != soln) {
out->push_back(soln);
}
}
return Status::OK();
}
+ }
- // The hint or sort can be $natural: 1. If this happens, output a collscan. If both
- // a $natural hint and a $natural sort are specified, then the direction of the collscan
- // is determined by the sign of the sort (not the sign of the hint).
- if (!query.getParsed().getHint().isEmpty() || !query.getParsed().getSort().isEmpty()) {
- BSONObj hintObj = query.getParsed().getHint();
- BSONObj sortObj = query.getParsed().getSort();
- BSONElement naturalHint = hintObj.getFieldDotted("$natural");
- BSONElement naturalSort = sortObj.getFieldDotted("$natural");
-
- // A hint overrides a $natural sort. This means that we don't force a table
- // scan if there is a $natural sort with a non-$natural hint.
- if (!naturalHint.eoo() || (!naturalSort.eoo() && hintObj.isEmpty())) {
- LOG(5) << "Forcing a table scan due to hinted $natural\n";
- // min/max are incompatible with $natural.
- if (canTableScan && query.getParsed().getMin().isEmpty()
- && query.getParsed().getMax().isEmpty()) {
- QuerySolution* soln = buildCollscanSoln(query, false, params);
- if (NULL != soln) {
- out->push_back(soln);
- }
- }
- return Status::OK();
- }
- }
+ // Figure out what fields we care about.
+ unordered_set<string> fields;
+ QueryPlannerIXSelect::getFields(query.root(), "", &fields);
- // Figure out what fields we care about.
- unordered_set<string> fields;
- QueryPlannerIXSelect::getFields(query.root(), "", &fields);
+ for (unordered_set<string>::const_iterator it = fields.begin(); it != fields.end(); ++it) {
+ LOG(5) << "Predicate over field '" << *it << "'" << endl;
+ }
- for (unordered_set<string>::const_iterator it = fields.begin(); it != fields.end(); ++it) {
- LOG(5) << "Predicate over field '" << *it << "'" << endl;
- }
+ // Filter our indices so we only look at indices that are over our predicates.
+ vector<IndexEntry> relevantIndices;
- // Filter our indices so we only look at indices that are over our predicates.
- vector<IndexEntry> relevantIndices;
+ // Hints require us to only consider the hinted index.
+ // If index filters in the query settings were used to override
+ // the allowed indices for planning, we should not use the hinted index
+ // requested in the query.
+ BSONObj hintIndex;
+ if (!params.indexFiltersApplied) {
+ hintIndex = query.getParsed().getHint();
+ }
- // Hints require us to only consider the hinted index.
- // If index filters in the query settings were used to override
- // the allowed indices for planning, we should not use the hinted index
- // requested in the query.
- BSONObj hintIndex;
- if (!params.indexFiltersApplied) {
- hintIndex = query.getParsed().getHint();
+ // Snapshot is a form of a hint. If snapshot is set, try to use _id index to make a real
+ // plan. If that fails, just scan the _id index.
+ if (query.getParsed().isSnapshot()) {
+ // Find the ID index in indexKeyPatterns. It's our hint.
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ if (isIdIndex(params.indices[i].keyPattern)) {
+ hintIndex = params.indices[i].keyPattern;
+ break;
+ }
}
+ }
- // Snapshot is a form of a hint. If snapshot is set, try to use _id index to make a real
- // plan. If that fails, just scan the _id index.
- if (query.getParsed().isSnapshot()) {
- // Find the ID index in indexKeyPatterns. It's our hint.
+ size_t hintIndexNumber = numeric_limits<size_t>::max();
+
+ if (hintIndex.isEmpty()) {
+ QueryPlannerIXSelect::findRelevantIndices(fields, params.indices, &relevantIndices);
+ } else {
+ // Sigh. If the hint is specified it might be using the index name.
+ BSONElement firstHintElt = hintIndex.firstElement();
+ if (str::equals("$hint", firstHintElt.fieldName()) && String == firstHintElt.type()) {
+ string hintName = firstHintElt.String();
for (size_t i = 0; i < params.indices.size(); ++i) {
- if (isIdIndex(params.indices[i].keyPattern)) {
+ if (params.indices[i].name == hintName) {
+ LOG(5) << "Hint by name specified, restricting indices to "
+ << params.indices[i].keyPattern.toString() << endl;
+ relevantIndices.clear();
+ relevantIndices.push_back(params.indices[i]);
+ hintIndexNumber = i;
hintIndex = params.indices[i].keyPattern;
break;
}
}
- }
-
- size_t hintIndexNumber = numeric_limits<size_t>::max();
-
- if (hintIndex.isEmpty()) {
- QueryPlannerIXSelect::findRelevantIndices(fields, params.indices, &relevantIndices);
- }
- else {
- // Sigh. If the hint is specified it might be using the index name.
- BSONElement firstHintElt = hintIndex.firstElement();
- if (str::equals("$hint", firstHintElt.fieldName()) && String == firstHintElt.type()) {
- string hintName = firstHintElt.String();
- for (size_t i = 0; i < params.indices.size(); ++i) {
- if (params.indices[i].name == hintName) {
- LOG(5) << "Hint by name specified, restricting indices to "
- << params.indices[i].keyPattern.toString() << endl;
- relevantIndices.clear();
- relevantIndices.push_back(params.indices[i]);
- hintIndexNumber = i;
- hintIndex = params.indices[i].keyPattern;
- break;
- }
- }
- }
- else {
- for (size_t i = 0; i < params.indices.size(); ++i) {
- if (0 == params.indices[i].keyPattern.woCompare(hintIndex)) {
- relevantIndices.clear();
- relevantIndices.push_back(params.indices[i]);
- LOG(5) << "Hint specified, restricting indices to " << hintIndex.toString()
- << endl;
- hintIndexNumber = i;
- break;
- }
+ } else {
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ if (0 == params.indices[i].keyPattern.woCompare(hintIndex)) {
+ relevantIndices.clear();
+ relevantIndices.push_back(params.indices[i]);
+ LOG(5) << "Hint specified, restricting indices to " << hintIndex.toString()
+ << endl;
+ hintIndexNumber = i;
+ break;
}
}
-
- if (hintIndexNumber == numeric_limits<size_t>::max()) {
- return Status(ErrorCodes::BadValue, "bad hint");
- }
}
- // Deal with the .min() and .max() query options. If either exist we can only use an index
- // that matches the object inside.
- if (!query.getParsed().getMin().isEmpty() || !query.getParsed().getMax().isEmpty()) {
- BSONObj minObj = query.getParsed().getMin();
- BSONObj maxObj = query.getParsed().getMax();
-
- // The unfinished siblings of these objects may not be proper index keys because they
- // may be empty objects or have field names. When an index is picked to use for the
- // min/max query, these "finished" objects will always be valid index keys for the
- // index's key pattern.
- BSONObj finishedMinObj;
- BSONObj finishedMaxObj;
-
- // This is the index into params.indices[...] that we use.
- size_t idxNo = numeric_limits<size_t>::max();
-
- // If there's an index hinted we need to be able to use it.
- if (!hintIndex.isEmpty()) {
- if (!minObj.isEmpty() && !indexCompatibleMaxMin(minObj, hintIndex)) {
- LOG(5) << "Minobj doesn't work with hint";
- return Status(ErrorCodes::BadValue,
- "hint provided does not work with min query");
- }
+ if (hintIndexNumber == numeric_limits<size_t>::max()) {
+ return Status(ErrorCodes::BadValue, "bad hint");
+ }
+ }
- if (!maxObj.isEmpty() && !indexCompatibleMaxMin(maxObj, hintIndex)) {
- LOG(5) << "Maxobj doesn't work with hint";
- return Status(ErrorCodes::BadValue,
- "hint provided does not work with max query");
- }
+ // Deal with the .min() and .max() query options. If either exist we can only use an index
+ // that matches the object inside.
+ if (!query.getParsed().getMin().isEmpty() || !query.getParsed().getMax().isEmpty()) {
+ BSONObj minObj = query.getParsed().getMin();
+ BSONObj maxObj = query.getParsed().getMax();
- const BSONObj& kp = params.indices[hintIndexNumber].keyPattern;
- finishedMinObj = finishMinObj(kp, minObj, maxObj);
- finishedMaxObj = finishMaxObj(kp, minObj, maxObj);
+ // The unfinished siblings of these objects may not be proper index keys because they
+ // may be empty objects or have field names. When an index is picked to use for the
+ // min/max query, these "finished" objects will always be valid index keys for the
+ // index's key pattern.
+ BSONObj finishedMinObj;
+ BSONObj finishedMaxObj;
- // The min must be less than the max for the hinted index ordering.
- if (0 <= finishedMinObj.woCompare(finishedMaxObj, kp, false)) {
- LOG(5) << "Minobj/Maxobj don't work with hint";
- return Status(ErrorCodes::BadValue,
- "hint provided does not work with min/max query");
- }
+ // This is the index into params.indices[...] that we use.
+ size_t idxNo = numeric_limits<size_t>::max();
- idxNo = hintIndexNumber;
+ // If there's an index hinted we need to be able to use it.
+ if (!hintIndex.isEmpty()) {
+ if (!minObj.isEmpty() && !indexCompatibleMaxMin(minObj, hintIndex)) {
+ LOG(5) << "Minobj doesn't work with hint";
+ return Status(ErrorCodes::BadValue, "hint provided does not work with min query");
}
- else {
- // No hinted index, look for one that is compatible (has same field names and
- // ordering thereof).
- for (size_t i = 0; i < params.indices.size(); ++i) {
- const BSONObj& kp = params.indices[i].keyPattern;
-
- BSONObj toUse = minObj.isEmpty() ? maxObj : minObj;
- if (indexCompatibleMaxMin(toUse, kp)) {
- // In order to be fully compatible, the min has to be less than the max
- // according to the index key pattern ordering. The first step in verifying
- // this is "finish" the min and max by replacing empty objects and stripping
- // field names.
- finishedMinObj = finishMinObj(kp, minObj, maxObj);
- finishedMaxObj = finishMaxObj(kp, minObj, maxObj);
-
- // Now we have the final min and max. This index is only relevant for
- // the min/max query if min < max.
- if (0 >= finishedMinObj.woCompare(finishedMaxObj, kp, false)) {
- // Found a relevant index.
- idxNo = i;
- break;
- }
-
- // This index is not relevant; move on to the next.
- }
- }
+
+ if (!maxObj.isEmpty() && !indexCompatibleMaxMin(maxObj, hintIndex)) {
+ LOG(5) << "Maxobj doesn't work with hint";
+ return Status(ErrorCodes::BadValue, "hint provided does not work with max query");
}
- if (idxNo == numeric_limits<size_t>::max()) {
- LOG(5) << "Can't find relevant index to use for max/min query";
- // Can't find an index to use, bail out.
+ const BSONObj& kp = params.indices[hintIndexNumber].keyPattern;
+ finishedMinObj = finishMinObj(kp, minObj, maxObj);
+ finishedMaxObj = finishMaxObj(kp, minObj, maxObj);
+
+ // The min must be less than the max for the hinted index ordering.
+ if (0 <= finishedMinObj.woCompare(finishedMaxObj, kp, false)) {
+ LOG(5) << "Minobj/Maxobj don't work with hint";
return Status(ErrorCodes::BadValue,
- "unable to find relevant index for max/min query");
+ "hint provided does not work with min/max query");
}
- LOG(5) << "Max/min query using index " << params.indices[idxNo].toString() << endl;
-
- // Make our scan and output.
- QuerySolutionNode* solnRoot = QueryPlannerAccess::makeIndexScan(params.indices[idxNo],
- query,
- params,
- finishedMinObj,
- finishedMaxObj);
+ idxNo = hintIndexNumber;
+ } else {
+ // No hinted index, look for one that is compatible (has same field names and
+ // ordering thereof).
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ const BSONObj& kp = params.indices[i].keyPattern;
+
+ BSONObj toUse = minObj.isEmpty() ? maxObj : minObj;
+ if (indexCompatibleMaxMin(toUse, kp)) {
+ // In order to be fully compatible, the min has to be less than the max
+ // according to the index key pattern ordering. The first step in verifying
+ // this is "finish" the min and max by replacing empty objects and stripping
+ // field names.
+ finishedMinObj = finishMinObj(kp, minObj, maxObj);
+ finishedMaxObj = finishMaxObj(kp, minObj, maxObj);
+
+ // Now we have the final min and max. This index is only relevant for
+ // the min/max query if min < max.
+ if (0 >= finishedMinObj.woCompare(finishedMaxObj, kp, false)) {
+ // Found a relevant index.
+ idxNo = i;
+ break;
+ }
- QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
- if (NULL != soln) {
- out->push_back(soln);
+ // This index is not relevant; move on to the next.
+ }
}
+ }
- return Status::OK();
+ if (idxNo == numeric_limits<size_t>::max()) {
+ LOG(5) << "Can't find relevant index to use for max/min query";
+ // Can't find an index to use, bail out.
+ return Status(ErrorCodes::BadValue, "unable to find relevant index for max/min query");
}
- for (size_t i = 0; i < relevantIndices.size(); ++i) {
- LOG(2) << "Relevant index " << i << " is " << relevantIndices[i].toString() << endl;
+ LOG(5) << "Max/min query using index " << params.indices[idxNo].toString() << endl;
+
+ // Make our scan and output.
+ QuerySolutionNode* solnRoot = QueryPlannerAccess::makeIndexScan(
+ params.indices[idxNo], query, params, finishedMinObj, finishedMaxObj);
+
+ QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+ if (NULL != soln) {
+ out->push_back(soln);
}
- // Figure out how useful each index is to each predicate.
- QueryPlannerIXSelect::rateIndices(query.root(), "", relevantIndices);
- QueryPlannerIXSelect::stripInvalidAssignments(query.root(), relevantIndices);
+ return Status::OK();
+ }
+
+ for (size_t i = 0; i < relevantIndices.size(); ++i) {
+ LOG(2) << "Relevant index " << i << " is " << relevantIndices[i].toString() << endl;
+ }
- // Unless we have GEO_NEAR, TEXT, or a projection, we may be able to apply an optimization
- // in which we strip unnecessary index assignments.
- //
- // Disallowed with projection because assignment to a non-unique index can allow the plan
- // to be covered.
- //
- // TEXT and GEO_NEAR are special because they require the use of a text/geo index in order
- // to be evaluated correctly. Stripping these "mandatory assignments" is therefore invalid.
- if (query.getParsed().getProj().isEmpty()
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {
- QueryPlannerIXSelect::stripUnneededAssignments(query.root(), relevantIndices);
+ // Figure out how useful each index is to each predicate.
+ QueryPlannerIXSelect::rateIndices(query.root(), "", relevantIndices);
+ QueryPlannerIXSelect::stripInvalidAssignments(query.root(), relevantIndices);
+
+ // Unless we have GEO_NEAR, TEXT, or a projection, we may be able to apply an optimization
+ // in which we strip unnecessary index assignments.
+ //
+ // Disallowed with projection because assignment to a non-unique index can allow the plan
+ // to be covered.
+ //
+ // TEXT and GEO_NEAR are special because they require the use of a text/geo index in order
+ // to be evaluated correctly. Stripping these "mandatory assignments" is therefore invalid.
+ if (query.getParsed().getProj().isEmpty() &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {
+ QueryPlannerIXSelect::stripUnneededAssignments(query.root(), relevantIndices);
+ }
+
+ // query.root() is now annotated with RelevantTag(s).
+ LOG(5) << "Rated tree:" << endl
+ << query.root()->toString();
+
+ // If there is a GEO_NEAR it must have an index it can use directly.
+ MatchExpression* gnNode = NULL;
+ if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR, &gnNode)) {
+ // No index for GEO_NEAR? No query.
+ RelevantTag* tag = static_cast<RelevantTag*>(gnNode->getTag());
+ if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
+ LOG(5) << "Unable to find index for $geoNear query." << endl;
+ // Don't leave tags on query tree.
+ query.root()->resetTag();
+ return Status(ErrorCodes::BadValue, "unable to find index for $geoNear query");
}
- // query.root() is now annotated with RelevantTag(s).
- LOG(5) << "Rated tree:" << endl << query.root()->toString();
-
- // If there is a GEO_NEAR it must have an index it can use directly.
- MatchExpression* gnNode = NULL;
- if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR, &gnNode)) {
- // No index for GEO_NEAR? No query.
- RelevantTag* tag = static_cast<RelevantTag*>(gnNode->getTag());
- if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
- LOG(5) << "Unable to find index for $geoNear query." << endl;
- // Don't leave tags on query tree.
- query.root()->resetTag();
- return Status(ErrorCodes::BadValue, "unable to find index for $geoNear query");
+ LOG(5) << "Rated tree after geonear processing:" << query.root()->toString();
+ }
+
+ // Likewise, if there is a TEXT it must have an index it can use directly.
+ MatchExpression* textNode = NULL;
+ if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT, &textNode)) {
+ RelevantTag* tag = static_cast<RelevantTag*>(textNode->getTag());
+
+ // Exactly one text index required for TEXT. We need to check this explicitly because
+ // the text stage can't be built if no text index exists or there is an ambiguity as to
+ // which one to use.
+ size_t textIndexCount = 0;
+ for (size_t i = 0; i < params.indices.size(); i++) {
+ if (INDEX_TEXT == params.indices[i].type) {
+ textIndexCount++;
}
+ }
+ if (textIndexCount != 1) {
+ // Don't leave tags on query tree.
+ query.root()->resetTag();
+ return Status(ErrorCodes::BadValue, "need exactly one text index for $text query");
+ }
- LOG(5) << "Rated tree after geonear processing:" << query.root()->toString();
+ // Error if the text node is tagged with zero indices.
+ if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
+ // Don't leave tags on query tree.
+ query.root()->resetTag();
+ return Status(ErrorCodes::BadValue,
+ "failed to use text index to satisfy $text query (if text index is "
+ "compound, are equality predicates given for all prefix fields?)");
}
- // Likewise, if there is a TEXT it must have an index it can use directly.
- MatchExpression* textNode = NULL;
- if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT, &textNode)) {
- RelevantTag* tag = static_cast<RelevantTag*>(textNode->getTag());
-
- // Exactly one text index required for TEXT. We need to check this explicitly because
- // the text stage can't be built if no text index exists or there is an ambiguity as to
- // which one to use.
- size_t textIndexCount = 0;
- for (size_t i = 0; i < params.indices.size(); i++) {
- if (INDEX_TEXT == params.indices[i].type) {
- textIndexCount++;
- }
- }
- if (textIndexCount != 1) {
- // Don't leave tags on query tree.
- query.root()->resetTag();
- return Status(ErrorCodes::BadValue, "need exactly one text index for $text query");
- }
+ // At this point, we know that there is only one text index and that the TEXT node is
+ // assigned to it.
+ invariant(1 == tag->first.size() + tag->notFirst.size());
- // Error if the text node is tagged with zero indices.
- if (0 == tag->first.size() && 0 == tag->notFirst.size()) {
- // Don't leave tags on query tree.
- query.root()->resetTag();
- return Status(ErrorCodes::BadValue,
- "failed to use text index to satisfy $text query (if text index is "
- "compound, are equality predicates given for all prefix fields?)");
- }
+ LOG(5) << "Rated tree after text processing:" << query.root()->toString();
+ }
- // At this point, we know that there is only one text index and that the TEXT node is
- // assigned to it.
- invariant(1 == tag->first.size() + tag->notFirst.size());
+ // If we have any relevant indices, we try to create indexed plans.
+ if (0 < relevantIndices.size()) {
+ // The enumerator spits out trees tagged with IndexTag(s).
+ PlanEnumeratorParams enumParams;
+ enumParams.intersect = params.options & QueryPlannerParams::INDEX_INTERSECTION;
+ enumParams.root = query.root();
+ enumParams.indices = &relevantIndices;
- LOG(5) << "Rated tree after text processing:" << query.root()->toString();
- }
+ PlanEnumerator isp(enumParams);
+ isp.init();
- // If we have any relevant indices, we try to create indexed plans.
- if (0 < relevantIndices.size()) {
- // The enumerator spits out trees tagged with IndexTag(s).
- PlanEnumeratorParams enumParams;
- enumParams.intersect = params.options & QueryPlannerParams::INDEX_INTERSECTION;
- enumParams.root = query.root();
- enumParams.indices = &relevantIndices;
-
- PlanEnumerator isp(enumParams);
- isp.init();
-
- MatchExpression* rawTree;
- while (isp.getNext(&rawTree) && (out->size() < params.maxIndexedSolutions)) {
- LOG(5) << "About to build solntree from tagged tree:" << endl
- << rawTree->toString();
-
- // The tagged tree produced by the plan enumerator is not guaranteed
- // to be canonically sorted. In order to be compatible with the cached
- // data, sort the tagged tree according to CanonicalQuery ordering.
- std::unique_ptr<MatchExpression> clone(rawTree->shallowClone());
- CanonicalQuery::sortTree(clone.get());
-
- PlanCacheIndexTree* cacheData;
- Status indexTreeStatus = cacheDataFromTaggedTree(clone.get(), relevantIndices, &cacheData);
- if (!indexTreeStatus.isOK()) {
- LOG(5) << "Query is not cachable: " << indexTreeStatus.reason() << endl;
- }
- unique_ptr<PlanCacheIndexTree> autoData(cacheData);
+ MatchExpression* rawTree;
+ while (isp.getNext(&rawTree) && (out->size() < params.maxIndexedSolutions)) {
+ LOG(5) << "About to build solntree from tagged tree:" << endl
+ << rawTree->toString();
- // This can fail if enumeration makes a mistake.
- QuerySolutionNode* solnRoot =
- QueryPlannerAccess::buildIndexedDataAccess(query, rawTree, false,
- relevantIndices, params);
+ // The tagged tree produced by the plan enumerator is not guaranteed
+ // to be canonically sorted. In order to be compatible with the cached
+ // data, sort the tagged tree according to CanonicalQuery ordering.
+ std::unique_ptr<MatchExpression> clone(rawTree->shallowClone());
+ CanonicalQuery::sortTree(clone.get());
- if (NULL == solnRoot) { continue; }
+ PlanCacheIndexTree* cacheData;
+ Status indexTreeStatus =
+ cacheDataFromTaggedTree(clone.get(), relevantIndices, &cacheData);
+ if (!indexTreeStatus.isOK()) {
+ LOG(5) << "Query is not cachable: " << indexTreeStatus.reason() << endl;
+ }
+ unique_ptr<PlanCacheIndexTree> autoData(cacheData);
- QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query,
- params,
- solnRoot);
- if (NULL != soln) {
- LOG(5) << "Planner: adding solution:" << endl << soln->toString();
- if (indexTreeStatus.isOK()) {
- SolutionCacheData* scd = new SolutionCacheData();
- scd->tree.reset(autoData.release());
- soln->cacheData.reset(scd);
- }
- out->push_back(soln);
+ // This can fail if enumeration makes a mistake.
+ QuerySolutionNode* solnRoot = QueryPlannerAccess::buildIndexedDataAccess(
+ query, rawTree, false, relevantIndices, params);
+
+ if (NULL == solnRoot) {
+ continue;
+ }
+
+ QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
+ if (NULL != soln) {
+ LOG(5) << "Planner: adding solution:" << endl
+ << soln->toString();
+ if (indexTreeStatus.isOK()) {
+ SolutionCacheData* scd = new SolutionCacheData();
+ scd->tree.reset(autoData.release());
+ soln->cacheData.reset(scd);
}
+ out->push_back(soln);
}
}
+ }
- // Don't leave tags on query tree.
- query.root()->resetTag();
-
- LOG(5) << "Planner: outputted " << out->size() << " indexed solutions.\n";
-
- // Produce legible error message for failed OR planning with a TEXT child.
- // TODO: support collection scan for non-TEXT children of OR.
- if (out->size() == 0 && textNode != NULL &&
- MatchExpression::OR == query.root()->matchType()) {
- MatchExpression* root = query.root();
- for (size_t i = 0; i < root->numChildren(); ++i) {
- if (textNode == root->getChild(i)) {
- return Status(ErrorCodes::BadValue,
- "Failed to produce a solution for TEXT under OR - "
- "other non-TEXT clauses under OR have to be indexed as well.");
- }
+ // Don't leave tags on query tree.
+ query.root()->resetTag();
+
+ LOG(5) << "Planner: outputted " << out->size() << " indexed solutions.\n";
+
+ // Produce legible error message for failed OR planning with a TEXT child.
+ // TODO: support collection scan for non-TEXT children of OR.
+ if (out->size() == 0 && textNode != NULL && MatchExpression::OR == query.root()->matchType()) {
+ MatchExpression* root = query.root();
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ if (textNode == root->getChild(i)) {
+ return Status(ErrorCodes::BadValue,
+ "Failed to produce a solution for TEXT under OR - "
+ "other non-TEXT clauses under OR have to be indexed as well.");
}
}
+ }
- // An index was hinted. If there are any solutions, they use the hinted index. If not, we
- // scan the entire index to provide results and output that as our plan. This is the
- // desired behavior when an index is hinted that is not relevant to the query.
- if (!hintIndex.isEmpty()) {
- if (0 == out->size()) {
- QuerySolution* soln = buildWholeIXSoln(params.indices[hintIndexNumber],
- query, params);
- verify(NULL != soln);
- LOG(5) << "Planner: outputting soln that uses hinted index as scan." << endl;
- out->push_back(soln);
+ // An index was hinted. If there are any solutions, they use the hinted index. If not, we
+ // scan the entire index to provide results and output that as our plan. This is the
+ // desired behavior when an index is hinted that is not relevant to the query.
+ if (!hintIndex.isEmpty()) {
+ if (0 == out->size()) {
+ QuerySolution* soln = buildWholeIXSoln(params.indices[hintIndexNumber], query, params);
+ verify(NULL != soln);
+ LOG(5) << "Planner: outputting soln that uses hinted index as scan." << endl;
+ out->push_back(soln);
+ }
+ return Status::OK();
+ }
+
+ // If a sort order is requested, there may be an index that provides it, even if that
+ // index is not over any predicates in the query.
+ //
+ if (!query.getParsed().getSort().isEmpty() &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {
+ // See if we have a sort provided from an index already.
+ // This is implied by the presence of a non-blocking solution.
+ bool usingIndexToSort = false;
+ for (size_t i = 0; i < out->size(); ++i) {
+ QuerySolution* soln = (*out)[i];
+ if (!soln->hasBlockingStage) {
+ usingIndexToSort = true;
+ break;
}
- return Status::OK();
}
- // If a sort order is requested, there may be an index that provides it, even if that
- // index is not over any predicates in the query.
- //
- if (!query.getParsed().getSort().isEmpty()
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {
-
- // See if we have a sort provided from an index already.
- // This is implied by the presence of a non-blocking solution.
- bool usingIndexToSort = false;
- for (size_t i = 0; i < out->size(); ++i) {
- QuerySolution* soln = (*out)[i];
- if (!soln->hasBlockingStage) {
- usingIndexToSort = true;
- break;
+ if (!usingIndexToSort) {
+ for (size_t i = 0; i < params.indices.size(); ++i) {
+ const IndexEntry& index = params.indices[i];
+ // Only regular (non-plugin) indexes can be used to provide a sort.
+ if (index.type != INDEX_BTREE) {
+ continue;
+ }
+ // Only non-sparse indexes can be used to provide a sort.
+ if (index.sparse) {
+ continue;
}
- }
- if (!usingIndexToSort) {
- for (size_t i = 0; i < params.indices.size(); ++i) {
- const IndexEntry& index = params.indices[i];
- // Only regular (non-plugin) indexes can be used to provide a sort.
- if (index.type != INDEX_BTREE) {
- continue;
- }
- // Only non-sparse indexes can be used to provide a sort.
- if (index.sparse) {
- continue;
- }
+ // TODO: Sparse indexes can't normally provide a sort, because non-indexed
+ // documents could potentially be missing from the result set. However, if the
+ // query predicate can be used to guarantee that all documents to be returned
+ // are indexed, then the index should be able to provide the sort.
+ //
+ // For example:
+ // - Sparse index {a: 1, b: 1} should be able to provide a sort for
+ // find({b: 1}).sort({a: 1}). SERVER-13908.
+ // - Index {a: 1, b: "2dsphere"} (which is "geo-sparse", if
+ // 2dsphereIndexVersion=2) should be able to provide a sort for
+ // find({b: GEO}).sort({a:1}). SERVER-10801.
+
+ const BSONObj kp = QueryPlannerAnalysis::getSortPattern(index.keyPattern);
+ if (providesSort(query, kp)) {
+ LOG(5) << "Planner: outputting soln that uses index to provide sort." << endl;
+ QuerySolution* soln = buildWholeIXSoln(params.indices[i], query, params);
+ if (NULL != soln) {
+ PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
+ indexTree->setIndexEntry(params.indices[i]);
+ SolutionCacheData* scd = new SolutionCacheData();
+ scd->tree.reset(indexTree);
+ scd->solnType = SolutionCacheData::WHOLE_IXSCAN_SOLN;
+ scd->wholeIXSolnDir = 1;
- // TODO: Sparse indexes can't normally provide a sort, because non-indexed
- // documents could potentially be missing from the result set. However, if the
- // query predicate can be used to guarantee that all documents to be returned
- // are indexed, then the index should be able to provide the sort.
- //
- // For example:
- // - Sparse index {a: 1, b: 1} should be able to provide a sort for
- // find({b: 1}).sort({a: 1}). SERVER-13908.
- // - Index {a: 1, b: "2dsphere"} (which is "geo-sparse", if
- // 2dsphereIndexVersion=2) should be able to provide a sort for
- // find({b: GEO}).sort({a:1}). SERVER-10801.
-
- const BSONObj kp = QueryPlannerAnalysis::getSortPattern(index.keyPattern);
- if (providesSort(query, kp)) {
- LOG(5) << "Planner: outputting soln that uses index to provide sort."
- << endl;
- QuerySolution* soln = buildWholeIXSoln(params.indices[i],
- query, params);
- if (NULL != soln) {
- PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
- indexTree->setIndexEntry(params.indices[i]);
- SolutionCacheData* scd = new SolutionCacheData();
- scd->tree.reset(indexTree);
- scd->solnType = SolutionCacheData::WHOLE_IXSCAN_SOLN;
- scd->wholeIXSolnDir = 1;
-
- soln->cacheData.reset(scd);
- out->push_back(soln);
- break;
- }
+ soln->cacheData.reset(scd);
+ out->push_back(soln);
+ break;
}
- if (providesSort(query, QueryPlannerCommon::reverseSortObj(kp))) {
- LOG(5) << "Planner: outputting soln that uses (reverse) index "
- << "to provide sort." << endl;
- QuerySolution* soln = buildWholeIXSoln(params.indices[i], query,
- params, -1);
- if (NULL != soln) {
- PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
- indexTree->setIndexEntry(params.indices[i]);
- SolutionCacheData* scd = new SolutionCacheData();
- scd->tree.reset(indexTree);
- scd->solnType = SolutionCacheData::WHOLE_IXSCAN_SOLN;
- scd->wholeIXSolnDir = -1;
-
- soln->cacheData.reset(scd);
- out->push_back(soln);
- break;
- }
+ }
+ if (providesSort(query, QueryPlannerCommon::reverseSortObj(kp))) {
+ LOG(5) << "Planner: outputting soln that uses (reverse) index "
+ << "to provide sort." << endl;
+ QuerySolution* soln = buildWholeIXSoln(params.indices[i], query, params, -1);
+ if (NULL != soln) {
+ PlanCacheIndexTree* indexTree = new PlanCacheIndexTree();
+ indexTree->setIndexEntry(params.indices[i]);
+ SolutionCacheData* scd = new SolutionCacheData();
+ scd->tree.reset(indexTree);
+ scd->solnType = SolutionCacheData::WHOLE_IXSCAN_SOLN;
+ scd->wholeIXSolnDir = -1;
+
+ soln->cacheData.reset(scd);
+ out->push_back(soln);
+ break;
}
}
}
}
+ }
- // geoNear and text queries *require* an index.
- // Also, if a hint is specified it indicates that we MUST use it.
- bool possibleToCollscan = !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)
- && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)
- && hintIndex.isEmpty();
-
- // The caller can explicitly ask for a collscan.
- bool collscanRequested = (params.options & QueryPlannerParams::INCLUDE_COLLSCAN);
-
- // No indexed plans? We must provide a collscan if possible or else we can't run the query.
- bool collscanNeeded = (0 == out->size() && canTableScan);
-
- if (possibleToCollscan && (collscanRequested || collscanNeeded)) {
- QuerySolution* collscan = buildCollscanSoln(query, false, params);
- if (NULL != collscan) {
- SolutionCacheData* scd = new SolutionCacheData();
- scd->solnType = SolutionCacheData::COLLSCAN_SOLN;
- collscan->cacheData.reset(scd);
- out->push_back(collscan);
- LOG(5) << "Planner: outputting a collscan:" << endl
- << collscan->toString();
- }
- }
+ // geoNear and text queries *require* an index.
+ // Also, if a hint is specified it indicates that we MUST use it.
+ bool possibleToCollscan =
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) &&
+ !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT) && hintIndex.isEmpty();
- return Status::OK();
+ // The caller can explicitly ask for a collscan.
+ bool collscanRequested = (params.options & QueryPlannerParams::INCLUDE_COLLSCAN);
+
+ // No indexed plans? We must provide a collscan if possible or else we can't run the query.
+ bool collscanNeeded = (0 == out->size() && canTableScan);
+
+ if (possibleToCollscan && (collscanRequested || collscanNeeded)) {
+ QuerySolution* collscan = buildCollscanSoln(query, false, params);
+ if (NULL != collscan) {
+ SolutionCacheData* scd = new SolutionCacheData();
+ scd->solnType = SolutionCacheData::COLLSCAN_SOLN;
+ collscan->cacheData.reset(scd);
+ out->push_back(collscan);
+ LOG(5) << "Planner: outputting a collscan:" << endl
+ << collscan->toString();
+ }
}
+ return Status::OK();
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner.h b/src/mongo/db/query/query_planner.h
index 80d41bef1e8..0b33b109b4e 100644
--- a/src/mongo/db/query/query_planner.h
+++ b/src/mongo/db/query/query_planner.h
@@ -34,84 +34,84 @@
namespace mongo {
- class CachedSolution;
- class Collection;
+class CachedSolution;
+class Collection;
+
+/**
+ * QueryPlanner's job is to provide an entry point to the query planning and optimization
+ * process.
+ */
+class QueryPlanner {
+public:
+ // Identifies the version of the query planner module. Reported in explain.
+ static const int kPlannerVersion;
/**
- * QueryPlanner's job is to provide an entry point to the query planning and optimization
- * process.
+ * Outputs a series of possible solutions for the provided 'query' into 'out'. Uses the
+ * indices and other data in 'params' to plan with.
+ *
+ * Caller owns pointers in *out.
*/
- class QueryPlanner {
- public:
- // Identifies the version of the query planner module. Reported in explain.
- static const int kPlannerVersion;
+ static Status plan(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ std::vector<QuerySolution*>* out);
- /**
- * Outputs a series of possible solutions for the provided 'query' into 'out'. Uses the
- * indices and other data in 'params' to plan with.
- *
- * Caller owns pointers in *out.
- */
- static Status plan(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- std::vector<QuerySolution*>* out);
-
- /**
- * Attempt to generate a query solution, given data retrieved
- * from the plan cache.
- *
- * @param query -- query for which we are generating a plan
- * @param params -- planning parameters
- * @param cachedSoln -- the CachedSolution retrieved from the plan cache.
- * @param out -- an out-parameter which will be filled in with the solution
- * generated from the cache data
- *
- * On success, the caller is responsible for deleting *out.
- */
- static Status planFromCache(const CanonicalQuery& query,
- const QueryPlannerParams& params,
- const CachedSolution& cachedSoln,
- QuerySolution** out);
+ /**
+ * Attempt to generate a query solution, given data retrieved
+ * from the plan cache.
+ *
+ * @param query -- query for which we are generating a plan
+ * @param params -- planning parameters
+ * @param cachedSoln -- the CachedSolution retrieved from the plan cache.
+ * @param out -- an out-parameter which will be filled in with the solution
+ * generated from the cache data
+ *
+ * On success, the caller is responsible for deleting *out.
+ */
+ static Status planFromCache(const CanonicalQuery& query,
+ const QueryPlannerParams& params,
+ const CachedSolution& cachedSoln,
+ QuerySolution** out);
- /**
- * Used to generated the index tag tree that will be inserted
- * into the plan cache. This data gets stashed inside a QuerySolution
- * until it can be inserted into the cache proper.
- *
- * @param taggedTree -- a MatchExpression with index tags that has been
- * produced by the enumerator.
- * @param relevantIndices -- a list of the index entries used to tag
- * the tree (i.e. index numbers in the tags refer to entries in this vector)
- *
- * On success, a new tagged tree is returned through the out-parameter 'out'.
- * The caller has ownership of both taggedTree and *out.
- *
- * On failure, 'out' is set to NULL.
- */
- static Status cacheDataFromTaggedTree(const MatchExpression* const taggedTree,
- const std::vector<IndexEntry>& relevantIndices,
- PlanCacheIndexTree** out);
+ /**
+ * Used to generated the index tag tree that will be inserted
+ * into the plan cache. This data gets stashed inside a QuerySolution
+ * until it can be inserted into the cache proper.
+ *
+ * @param taggedTree -- a MatchExpression with index tags that has been
+ * produced by the enumerator.
+ * @param relevantIndices -- a list of the index entries used to tag
+ * the tree (i.e. index numbers in the tags refer to entries in this vector)
+ *
+ * On success, a new tagged tree is returned through the out-parameter 'out'.
+ * The caller has ownership of both taggedTree and *out.
+ *
+ * On failure, 'out' is set to NULL.
+ */
+ static Status cacheDataFromTaggedTree(const MatchExpression* const taggedTree,
+ const std::vector<IndexEntry>& relevantIndices,
+ PlanCacheIndexTree** out);
- /**
- * @param filter -- an untagged MatchExpression
- * @param indexTree -- a tree structure retrieved from the
- * cache with index tags that indicates how 'filter' should
- * be tagged.
- * @param indexMap -- needed in order to put the proper index
- * numbers inside the index tags
- *
- * On success, 'filter' is mutated so that it has all the
- * index tags needed in order for the access planner to recreate
- * the cached plan.
- *
- * On failure, the tag state attached to the nodes of 'filter'
- * is invalid. Planning from the cache should be aborted.
- *
- * Does not take ownership of either filter or indexTree.
- */
- static Status tagAccordingToCache(MatchExpression* filter,
- const PlanCacheIndexTree* const indexTree,
- const std::map<BSONObj, size_t>& indexMap);
- };
+ /**
+ * @param filter -- an untagged MatchExpression
+ * @param indexTree -- a tree structure retrieved from the
+ * cache with index tags that indicates how 'filter' should
+ * be tagged.
+ * @param indexMap -- needed in order to put the proper index
+ * numbers inside the index tags
+ *
+ * On success, 'filter' is mutated so that it has all the
+ * index tags needed in order for the access planner to recreate
+ * the cached plan.
+ *
+ * On failure, the tag state attached to the nodes of 'filter'
+ * is invalid. Planning from the cache should be aborted.
+ *
+ * Does not take ownership of either filter or indexTree.
+ */
+ static Status tagAccordingToCache(MatchExpression* filter,
+ const PlanCacheIndexTree* const indexTree,
+ const std::map<BSONObj, size_t>& indexMap);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_array_test.cpp b/src/mongo/db/query/query_planner_array_test.cpp
index 09d2506fae4..e4c3a69dca1 100644
--- a/src/mongo/db/query/query_planner_array_test.cpp
+++ b/src/mongo/db/query/query_planner_array_test.cpp
@@ -35,921 +35,991 @@
namespace {
- using namespace mongo;
-
- TEST_F(QueryPlannerTest, ElemMatchOneField) {
- addIndex(BSON("a.b" << 1));
- runQuery(fromjson("{a : {$elemMatch: {b:1}}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a:{$elemMatch:{b:1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:1}}}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ElemMatchTwoFields) {
- addIndex(BSON("a.b" << 1));
- addIndex(BSON("a.c" << 1));
- runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a:{$elemMatch:{b:1,c:1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'a.c': 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BasicAllElemMatch) {
- addIndex(BSON("foo.a" << 1));
- addIndex(BSON("foo.b" << 1));
- runQuery(fromjson("{foo: {$all: [ {$elemMatch: {a:1, b:1}}, {$elemMatch: {a:2, b:2}}]}}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$all:"
- "[{$elemMatch:{a:1,b:1}},{$elemMatch:{a:2,b:2}}]}}}}");
-
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'foo.a': 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'foo.b': 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BasicAllElemMatch2) {
- // true means multikey
- addIndex(BSON("a.x" << 1), true);
-
- runQuery(fromjson("{a: {$all: [{$elemMatch: {x: 3}}, {$elemMatch: {y: 5}}]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$all:[{$elemMatch:{x:3}},{$elemMatch:{y:5}}]}},"
- "node: {ixscan: {pattern: {'a.x': 1},"
- "bounds: {'a.x': [[3,3,true,true]]}}}}}");
- }
-
- // SERVER-16256
- TEST_F(QueryPlannerTest, AllElemMatchCompound) {
- // true means multikey
- addIndex(BSON("d" << 1 << "a.b" << 1 << "a.c" << 1), true);
-
- runQuery(fromjson("{d: 1, a: {$all: [{$elemMatch: {b: 2, c: 2}},"
- "{$elemMatch: {b: 3, c: 3}}]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and: [{a: {$elemMatch: {b: 2, c: 2}}},"
- "{a: {$elemMatch: {b: 3, c: 3}}}]},"
- "node: {ixscan: {filter: null, pattern: {d:1,'a.b':1,'a.c':1},"
- "bounds: {d: [[1,1,true,true]],"
- "'a.b': [[2,2,true,true]],"
- "'a.c': [[2,2,true,true]]}}}}}");
- }
-
- // SERVER-13677
- TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild) {
- addIndex(BSON("a.b.c.d" << 1));
- runQuery(fromjson("{z: 1, 'a.b': {$elemMatch: {c: {$all: [{$elemMatch: {d: 0}}]}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c.d': 1}}}}}");
- }
-
- // SERVER-13677
- TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild2) {
- // true means multikey
- addIndex(BSON("a.b.c.d" << 1), true);
- runQuery(fromjson("{'a.b': {$elemMatch: {c: {$all: "
- "[{$elemMatch: {d: {$gt: 1, $lt: 3}}}]}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c.d': 1}, "
- "bounds: {'a.b.c.d': [[-Infinity,3,true,false]]}}}}}");
- }
-
- // SERVER-13677
- TEST_F(QueryPlannerTest, ElemMatchWithAllChild) {
- // true means multikey
- addIndex(BSON("a.b.c" << 1), true);
- runQuery(fromjson("{z: 1, 'a.b': {$elemMatch: {c: {$all: [4, 5, 6]}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, "
- "bounds: {'a.b.c': [[4,4,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ElemMatchValueMatch) {
- addIndex(BSON("foo" << 1));
- addIndex(BSON("foo" << 1 << "bar" << 1));
- runQuery(fromjson("{foo: {$elemMatch: {$gt: 5, $lt: 10}}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$elemMatch:{$gt:5,$lt:10}}}}}");
- assertSolutionExists("{fetch: {filter: {foo: {$elemMatch: {$gt: 5, $lt: 10}}}, node: "
- "{ixscan: {filter: null, pattern: {foo: 1}}}}}");
- assertSolutionExists("{fetch: {filter: {foo: {$elemMatch: {$gt: 5, $lt: 10}}}, node: "
- "{ixscan: {filter: null, pattern: {foo: 1, bar: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ElemMatchValueIndexability) {
- addIndex(BSON("foo" << 1));
-
- // An ELEM_MATCH_VALUE can be indexed if all of its child predicates
- // are "index bounds generating".
- runQuery(fromjson("{foo: {$elemMatch: {$gt: 5, $lt: 10}}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$elemMatch:{$gt:5,$lt:10}}}}}");
- assertSolutionExists("{fetch: {filter: {foo: {$elemMatch: {$gt: 5, $lt: 10}}}, node: "
- "{ixscan: {filter: null, pattern: {foo: 1}}}}}");
-
- // We cannot build index bounds for the $size predicate. This means that the
- // ELEM_MATCH_VALUE is not indexable, and we get no indexed solutions.
- runQuery(fromjson("{foo: {$elemMatch: {$gt: 5, $size: 10}}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$elemMatch:{$gt:5,$size:10}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ElemMatchNested) {
- addIndex(BSON("a.b.c" << 1));
- runQuery(fromjson("{ a:{ $elemMatch:{ b:{ $elemMatch:{ c:{ $gte:1, $lte:1 } } } } }}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoElemMatchNested) {
- addIndex(BSON("a.d.e" << 1));
- addIndex(BSON("a.b.c" << 1));
- runQuery(fromjson("{ a:{ $elemMatch:{ d:{ $elemMatch:{ e:{ $lte:1 } } },"
- "b:{ $elemMatch:{ c:{ $gte:1 } } } } } }"));
-
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.d.e': 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ElemMatchCompoundTwoFields) {
- addIndex(BSON("a.b" << 1 << "a.c" << 1));
- runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': 1, 'a.c': 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ArrayEquality) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a : [1, 2, 3]}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a:[1,2,3]}}}");
- assertSolutionExists("{fetch: {filter: {a:[1,2,3]}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- // SERVER-13664
- TEST_F(QueryPlannerTest, ElemMatchEmbeddedAnd) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: {$gte: 2, $lt: 4}, c: 25}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:{$gte:2,$lt: 4},c:25}}}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1, 'a.c': 1}, "
- "bounds: {'a.b': [[-Infinity,4,true,false]], "
- "'a.c': [[25,25,true,true]]}}}}}");
- }
-
- // SERVER-13664
- TEST_F(QueryPlannerTest, ElemMatchEmbeddedOr) {
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
- // true means multikey
- addIndex(BSON("a.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {$or: [{b: 3}, {c: 4}]}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{$or:[{b:3},{c:4}]}}}, "
- "node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}, "
- "{ixscan: {filter: null, pattern: {'a.c': 1}}}]}}}}");
- }
-
- // SERVER-13664
- TEST_F(QueryPlannerTest, ElemMatchEmbeddedRegex) {
- addIndex(BSON("a.b" << 1));
- runQuery(fromjson("{a: {$elemMatch: {b: /foo/}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:/foo/}}}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
- }
-
- // SERVER-14180
- TEST_F(QueryPlannerTest, ElemMatchEmbeddedRegexAnd) {
- addIndex(BSON("a.b" << 1));
- runQuery(fromjson("{a: {$elemMatch: {b: /foo/}}, z: 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:/foo/}}, z:1}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
- }
-
- // SERVER-14180
- TEST_F(QueryPlannerTest, ElemMatchEmbeddedRegexAnd2) {
- addIndex(BSON("a.b" << 1));
- runQuery(fromjson("{a: {$elemMatch: {b: /foo/, b: 3}}, z: 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:/foo/,b:3}}, z:1}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
- }
-
- // $not can appear as a value operator inside of an elemMatch (value). We shouldn't crash if we
- // see it.
- TEST_F(QueryPlannerTest, ElemMatchWithNotInside) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a: {$elemMatch: {$not: {$gte: 6}}}}"));
- }
-
- // SERVER-14625: Make sure we construct bounds properly for $elemMatch object with a
- // negation inside.
- TEST_F(QueryPlannerTest, ElemMatchWithNotInside2) {
- addIndex(BSON("a.b" << 1 << "a.c" << 1));
- runQuery(fromjson("{d: 1, a: {$elemMatch: {c: {$ne: 3}, b: 4}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {d: 1, a: {$elemMatch: {c: {$ne: 3}, b: 4}}}, node:"
- "{ixscan: {filter: null, pattern: {'a.b': 1, 'a.c': 1}, bounds:"
- "{'a.b': [[4,4,true,true]],"
- " 'a.c': [['MinKey',3,true,false],"
- "[3,'MaxKey',false,true]]}}}}}");
- }
-
- // SERVER-13789
- TEST_F(QueryPlannerTest, ElemMatchIndexedNestedOr) {
- addIndex(BSON("bar.baz" << 1));
- runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$or: [{baz: 2}]}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and: [{foo:1},"
- "{bar:{$elemMatch:{$or:[{baz:2}]}}}]}, "
- "node: {ixscan: {pattern: {'bar.baz': 1}, "
- "bounds: {'bar.baz': [[2,2,true,true]]}}}}}");
- }
-
- // SERVER-13789
- TEST_F(QueryPlannerTest, ElemMatchIndexedNestedOrMultiplePreds) {
- addIndex(BSON("bar.baz" << 1));
- addIndex(BSON("bar.z" << 1));
- runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$or: [{baz: 2}, {z: 3}]}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and: [{foo:1},"
- "{bar:{$elemMatch:{$or:[{baz:2},{z:3}]}}}]}, "
- "node: {or: {nodes: ["
- "{ixscan: {pattern: {'bar.baz': 1}, "
- "bounds: {'bar.baz': [[2,2,true,true]]}}},"
- "{ixscan: {pattern: {'bar.z': 1}, "
- "bounds: {'bar.z': [[3,3,true,true]]}}}]}}}}");
- }
-
- // SERVER-13789: Ensure that we properly compound in the multikey case when an
- // $or is beneath an $elemMatch.
- TEST_F(QueryPlannerTest, ElemMatchIndexedNestedOrMultikey) {
- // true means multikey
- addIndex(BSON("bar.baz" << 1 << "bar.z" << 1), true);
- runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$or: [{baz: 2, z: 3}]}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and: [{foo:1},"
- "{bar: {$elemMatch: {$or: [{$and: [{baz:2}, {z:3}]}]}}}]},"
- "node: {ixscan: {pattern: {'bar.baz': 1, 'bar.z': 1}, "
- "bounds: {'bar.baz': [[2,2,true,true]],"
- "'bar.z': [[3,3,true,true]]}}}}}");
- }
-
- // SERVER-13789: Right now we don't index $nor, but make sure that the planner
- // doesn't get confused by a $nor beneath an $elemMatch.
- TEST_F(QueryPlannerTest, ElemMatchIndexedNestedNor) {
- addIndex(BSON("bar.baz" << 1));
- runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$nor: [{baz: 2}, {baz: 3}]}}}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // SERVER-13789
- TEST_F(QueryPlannerTest, ElemMatchIndexedNestedNE) {
- addIndex(BSON("bar.baz" << 1));
- runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {baz: {$ne: 2}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and: [{foo:1},"
- "{bar:{$elemMatch:{baz:{$ne:2}}}}]}, "
- "node: {ixscan: {pattern: {'bar.baz': 1}, "
- "bounds: {'bar.baz': [['MinKey',2,true,false], "
- "[2,'MaxKey',false,true]]}}}}}");
- }
-
- // SERVER-13789: Make sure we properly handle an $or below $elemMatch that is not
- // tagged by the enumerator to use an index.
- TEST_F(QueryPlannerTest, ElemMatchNestedOrNotIndexed) {
- addIndex(BSON("a.b" << 1));
- runQuery(fromjson("{c: 1, a: {$elemMatch: {b: 3, $or: [{c: 4}, {c: 5}]}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
- "{'a.b': [[3,3,true,true]]}}}}}");
- }
-
- // The index bounds can be compounded because the index is not multikey.
- TEST_F(QueryPlannerTest, CompoundBoundsElemMatchNotMultikey) {
- addIndex(BSON("a.x" << 1 << "a.b.c" << 1));
- runQuery(fromjson("{'a.x': 1, a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1}}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:{$elemMatch:{c:{$gte:1}}}}}}, "
- "node: {ixscan: {pattern: {'a.x':1, 'a.b.c':1}, bounds: "
- "{'a.x': [[1,1,true,true]], "
- " 'a.b.c': [[1,Infinity,true,true]]}}}}}");
- }
-
- // The index bounds cannot be compounded because the predicates over 'a.x' and
- // 'a.b.c' 1) share the prefix "a", and 2) are not conjoined by an $elemMatch
- // over the prefix "a".
- TEST_F(QueryPlannerTest, CompoundMultikeyBoundsElemMatch) {
- // true means multikey
- addIndex(BSON("a.x" << 1 << "a.b.c" << 1), true);
- runQuery(fromjson("{'a.x': 1, a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1}}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.x':1, 'a.b.c':1}, bounds: "
- "{'a.x': [[1,1,true,true]], "
- " 'a.b.c': [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // The index bounds cannot be intersected because the index is multikey.
- // The bounds could be intersected if there was an $elemMatch applied to path
- // "a.b.c". However, the $elemMatch is applied to the path "a.b" rather than
- // the full path of the indexed field.
- TEST_F(QueryPlannerTest, MultikeyNestedElemMatch) {
- // true means multikey
- addIndex(BSON("a.b.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1, $lte: 1}}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
- "{'a.b.c': [[-Infinity, 1, true, true]]}}}}}");
- }
-
- // The index bounds cannot be intersected because the index is multikey.
- // The bounds could be intersected if there was an $elemMatch applied to path
- // "a.b.c". However, the $elemMatch is applied to the path "a.b" rather than
- // the full path of the indexed field.
- TEST_F(QueryPlannerTest, MultikeyNestedElemMatchIn) {
- // true means multikey
- addIndex(BSON("a.b.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1, $in:[2]}}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
- "{'a.b.c': [[1, Infinity, true, true]]}}}}}");
- }
-
- // The bounds can be compounded because the index is not multikey.
- TEST_F(QueryPlannerTest, TwoNestedElemMatchBounds) {
- addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1));
- runQuery(fromjson("{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
- "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.d.e': 1, 'a.b.c': 1}, bounds: "
- "{'a.d.e': [[-Infinity, 1, true, true]],"
- "'a.b.c': [[1, Infinity, true, true]]}}}}}");
- }
-
- // The bounds cannot be compounded. Although there is an $elemMatch over the
- // shared path prefix 'a', the predicates must be conjoined by the same $elemMatch,
- // without nested $elemMatch's intervening. The bounds could be compounded if
- // the query were rewritten as {a: {$elemMatch: {'d.e': {$lte: 1}, 'b.c': {$gte: 1}}}}.
- TEST_F(QueryPlannerTest, MultikeyTwoNestedElemMatchBounds) {
- // true means multikey
- addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
- "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.d.e': 1, 'a.b.c': 1}, bounds: "
- "{'a.d.e': [[-Infinity, 1, true, true]],"
- "'a.b.c': [['MinKey', 'MaxKey', true, true]]}}}}}");
- }
-
- // Bounds can be intersected for a multikey index when the predicates are
- // joined by an $elemMatch over the full path of the index field.
- TEST_F(QueryPlannerTest, MultikeyElemMatchValue) {
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson("{'a.b': {$elemMatch: {$gte: 1, $lte: 1}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
- "{'a.b': [[1, 1, true, true]]}}}}}");
- }
-
- // We can intersect the bounds for all three predicates because
- // the index is not multikey.
- TEST_F(QueryPlannerTest, ElemMatchInterectBoundsNotMultikey) {
- addIndex(BSON("a.b" << 1));
- runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
- "'a.b': {$in: [2,5]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
- "{'a.b': [[2, 2, true, true]]}}}}}");
- }
-
- // Bounds can be intersected for a multikey index when the predicates are
- // joined by an $elemMatch over the full path of the index field. The bounds
- // from the $in predicate are not intersected with the bounds from the
- // remaining to predicates because the $in is not joined to the other
- // predicates with an $elemMatch.
- TEST_F(QueryPlannerTest, ElemMatchInterectBoundsMultikey) {
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
- "'a.b': {$in: [2,5]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
- "{'a.b': [[1, 4, true, true]]}}}}}");
- }
-
- // Bounds can be intersected because the predicates are joined by an
- // $elemMatch over the path "a.b.c", the full path of the multikey
- // index field.
- TEST_F(QueryPlannerTest, MultikeyNestedElemMatchValue) {
- // true means multikey
- addIndex(BSON("a.b.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {'b.c': {$elemMatch: {$gte: 1, $lte: 1}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
- "{'a.b.c': [[1, 1, true, true]]}}}}}");
- }
-
- // Bounds cannot be compounded for a multikey compound index when
- // the predicates share a prefix (and there is no $elemMatch).
- TEST_F(QueryPlannerTest, MultikeySharedPrefixNoElemMatch) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{'a.b': 1, 'a.c': 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[1,1,true,true]], "
- " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // Bounds can be compounded because there is an $elemMatch applied to the
- // shared prefix "a".
- TEST_F(QueryPlannerTest, MultikeySharedPrefixElemMatch) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[1,1,true,true]], 'a.c': [[1,1,true,true]]}}}}}");
- }
-
- // Bounds cannot be compounded for the multikey index even though there is an
- // $elemMatch, because the $elemMatch does not join the two predicates. This
- // query is semantically indentical to {'a.b': 1, 'a.c': 1}.
- TEST_F(QueryPlannerTest, MultikeySharedPrefixElemMatchNotShared) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{'a.b': 1, a: {$elemMatch: {c: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[1,1,true,true]], "
- " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // Bounds cannot be compounded for the multikey index even though there are
- // $elemMatch's, because there is not an $elemMatch which joins the two
- // predicates. This query is semantically indentical to {'a.b': 1, 'a.c': 1}.
- TEST_F(QueryPlannerTest, MultikeySharedPrefixTwoElemMatches) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{$and: [{a: {$elemMatch: {b: 1}}}, {a: {$elemMatch: {c: 1}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[1,1,true,true]], "
- " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // Bounds for the predicates joined by the $elemMatch over the shared prefix
- // "a" can be combined. However, the predicate 'a.b'==1 cannot also be combined
- // given that it is outside of the $elemMatch.
- TEST_F(QueryPlannerTest, MultikeySharedPrefixNoIntersectOutsideElemMatch) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{'a.b': 1, a: {$elemMatch: {b: {$gt: 0}, c: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[0,Infinity,false,true]], "
- " 'a.c': [[1,1,true,true]]}}}}}");
- }
-
- // Bounds for the predicates joined by the $elemMatch over the shared prefix
- // "a" can be combined. However, the predicate outside the $elemMatch
- // cannot also be combined.
- TEST_F(QueryPlannerTest, MultikeySharedPrefixNoIntersectOutsideElemMatch2) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}, 'a.b': 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[1,1,true,true]], "
- " 'a.c': [[1,1,true,true]]}}}}}");
- }
-
- // Bounds for the predicates joined by the $elemMatch over the shared prefix
- // "a" can be combined. However, the predicate outside the $elemMatch
- // cannot also be combined.
- TEST_F(QueryPlannerTest, MultikeySharedPrefixNoIntersectOutsideElemMatch3) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{'a.c': 2, a: {$elemMatch: {b: 1, c: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
- "{'a.b': [[1,1,true,true]], "
- " 'a.c': [[1,1,true,true]]}}}}}");
- }
-
- // There are two sets of fields that share a prefix: {'a.b', 'a.c'} and
- // {'d.e', 'd.f'}. Since the index is multikey, we can only use the bounds from
- // one member of each of these sets.
- TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesBasic) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
- runQuery(fromjson("{'a.b': 1, 'a.c': 1, 'd.e': 1, 'd.f': 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
- "bounds: {'a.b':[[1,1,true,true]], "
- " 'a.c':[['MinKey','MaxKey',true,true]], "
- " 'd.e':[[1,1,true,true]], "
- " 'd.f':[['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // All bounds can be combined. Although, 'a.b' and 'a.c' share prefix 'a', the
- // relevant predicates are joined by an $elemMatch on 'a'. Similarly, predicates
- // over 'd.e' and 'd.f' are joined by an $elemMatch on 'd'.
- TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesTwoElemMatch) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}, d: {$elemMatch: {e: 1, f: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and: [{a: {$elemMatch: {b: 1, c: 1}}},"
- "{d: {$elemMatch: {e: 1, f: 1}}}]},"
- "node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
- "bounds: {'a.b':[[1,1,true,true]], "
- " 'a.c':[[1,1,true,true]], "
- " 'd.e':[[1,1,true,true]], "
- " 'd.f':[[1,1,true,true]]}}}}}");
- }
-
- // Bounds for 'a.b' and 'a.c' can be combined because of the $elemMatch on 'a'.
- // Since predicates an 'd.e' and 'd.f' have no $elemMatch, we use the bounds
- // for only one of the two.
- TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesOneElemMatch) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}, 'd.e': 1, 'd.f': 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and:[{a:{$elemMatch:{b:1,c:1}}}, {'d.f':1}]},"
- "node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
- "bounds: {'a.b':[[1,1,true,true]], "
- " 'a.c':[[1,1,true,true]], "
- " 'd.e':[[1,1,true,true]], "
- " 'd.f':[['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // Bounds for 'd.e' and 'd.f' can be combined because of the $elemMatch on 'd'.
- // Since predicates an 'a.b' and 'a.c' have no $elemMatch, we use the bounds
- // for only one of the two.
- TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesOneElemMatch2) {
- // true means multikey
- addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
- runQuery(fromjson("{'a.b': 1, 'a.c': 1, d: {$elemMatch: {e: 1, f: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and:[{d:{$elemMatch:{e:1,f:1}}}, {'a.c':1}]},"
- "node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
- "bounds: {'a.b':[[1,1,true,true]], "
- " 'a.c':[['MinKey','MaxKey',true,true]], "
- " 'd.e':[[1,1,true,true]], "
- " 'd.f':[[1,1,true,true]]}}}}}");
- }
-
- // The bounds cannot be compounded because 'a.b.x' and 'a.b.y' share prefix
- // 'a.b' (and there is no $elemMatch).
- TEST_F(QueryPlannerTest, MultikeyDoubleDottedNoElemMatch) {
- // true means multikey
- addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
- runQuery(fromjson("{'a.b.y': 1, 'a.b.x': 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
- "{'a.b.x': [[1,1,true,true]], "
- " 'a.b.y': [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // The bounds can be compounded because the predicates are joined by an
- // $elemMatch on the shared prefix "a.b".
- TEST_F(QueryPlannerTest, MultikeyDoubleDottedElemMatch) {
- // true means multikey
- addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {x: 1, y: 1}}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
- "{'a.b.x': [[1,1,true,true]], "
- " 'a.b.y': [[1,1,true,true]]}}}}}");
- }
-
- // The bounds cannot be compounded. Although there is an $elemMatch that appears
- // to join the predicates, the path to which the $elemMatch is applied is "a".
- // Therefore, the predicates contained in the $elemMatch are over "b.x" and "b.y".
- // They cannot be compounded due to shared prefix "b".
- TEST_F(QueryPlannerTest, MultikeyDoubleDottedUnhelpfulElemMatch) {
- // true means multikey
- addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {'b.x': 1, 'b.y': 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
- "{'a.b.x': [[1,1,true,true]], "
- " 'a.b.y': [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // The bounds can be compounded because the predicates are joined by an
- // $elemMatch on the shared prefix "a.b".
- TEST_F(QueryPlannerTest, MultikeyDoubleDottedElemMatchOnDotted) {
- // true means multikey
- addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
- runQuery(fromjson("{'a.b': {$elemMatch: {x: 1, y: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
- "{'a.b.x': [[1,1,true,true]], "
- " 'a.b.y': [[1,1,true,true]]}}}}}");
- }
-
- // This one is subtle. Say we compound the bounds for predicates over "a.b.c" and
- // "a.b.d". This is okay because of the predicate over the shared prefix "a.b".
- // It might seem like we can do the same for the $elemMatch over shared prefix "a.e",
- // thus combining all bounds. But in fact, we can't combine any more bounds because
- // we have already used prefix "a". In other words, this query is like having predicates
- // over "a.b" and "a.e", so we can only use bounds from one of the two.
- TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted) {
- // true means multikey
- addIndex(BSON("a.b.c" << 1 << "a.e.f" << 1 << "a.b.d" << 1 << "a.e.g" << 1), true);
- runQuery(fromjson("{'a.b': {$elemMatch: {c: 1, d: 1}}, "
- "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c':1,'a.e.f':1,'a.b.d':1,'a.e.g':1},"
- "bounds: {'a.b.c':[[1,1,true,true]], "
- " 'a.e.f':[['MinKey','MaxKey',true,true]], "
- " 'a.b.d':[[1,1,true,true]], "
- " 'a.e.g':[['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // Similar to MultikeyComplexDoubleDotted above.
- TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted2) {
- // true means multikey
- addIndex(BSON("a.b.c" << 1 << "a.e.c" << 1 << "a.b.d" << 1 << "a.e.d" << 1), true);
- runQuery(fromjson("{'a.b': {$elemMatch: {c: 1, d: 1}}, "
- "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c':1,'a.e.c':1,'a.b.d':1,'a.e.d':1},"
- "bounds: {'a.b.c':[[1,1,true,true]], "
- " 'a.e.c':[['MinKey','MaxKey',true,true]], "
- " 'a.b.d':[[1,1,true,true]], "
- " 'a.e.d':[['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- // SERVER-13422: check that we plan $elemMatch object correctly with
- // index intersection.
- TEST_F(QueryPlannerTest, ElemMatchIndexIntersection) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("shortId" << 1));
- // true means multikey
- addIndex(BSON("a.b.startDate" << 1), true);
- addIndex(BSON("a.b.endDate" << 1), true);
-
- runQuery(fromjson("{shortId: 3, 'a.b': {$elemMatch: {startDate: {$lte: 3},"
- "endDate: {$gt: 6}}}}"));
-
- assertNumSolutions(6U);
-
- // 3 single index solutions.
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {shortId: 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.startDate': 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.endDate': 1}}}}}");
-
- // 3 index intersection solutions. The last one has to intersect two
- // predicates within the $elemMatch object.
- assertSolutionExists("{fetch: {node: {andHash: {nodes: ["
- "{ixscan: {pattern: {shortId: 1}}},"
- "{ixscan: {pattern: {'a.b.startDate': 1}}}]}}}}");
- assertSolutionExists("{fetch: {node: {andHash: {nodes: ["
- "{ixscan: {pattern: {shortId: 1}}},"
- "{ixscan: {pattern: {'a.b.endDate': 1}}}]}}}}");
- assertSolutionExists("{fetch: {node: {andHash: {nodes: ["
- "{ixscan: {pattern: {'a.b.startDate': 1}}},"
- "{ixscan: {pattern: {'a.b.endDate': 1}}}]}}}}");
- }
-
- // SERVER-14718
- TEST_F(QueryPlannerTest, NegationBelowElemMatchValue) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- // true means multikey
- addIndex(BSON("a" << 1), true);
-
- runQuery(fromjson("{a: {$elemMatch: {$ne: 2}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{$ne:2}}}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}, bounds: "
- "{a: [['MinKey',2,true,false], [2,'MaxKey',false,true]]}}}}}");
- }
-
- // SERVER-14718
- TEST_F(QueryPlannerTest, AndWithNegationBelowElemMatchValue) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- // true means multikey
- addIndex(BSON("a" << 1), true);
- addIndex(BSON("b" << 1), true);
-
- runQuery(fromjson("{b: 10, a: {$elemMatch: {$not: {$gt: 4}}}}"));
-
- // One solution using index on 'b' and one using index on 'a'.
- assertNumSolutions(2U);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {b: 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}, bounds: {a: "
- "[['MinKey',4,true,true],[Infinity,'MaxKey',false,true]]}}}}}");
- }
-
- // SERVER-14718
- TEST_F(QueryPlannerTest, AndWithNegationBelowElemMatchValue2) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- // true means multikey
- addIndex(BSON("a" << 1), true);
-
- runQuery(fromjson("{b: 10, a: {$elemMatch: {$not: {$gt: 4}, $gt: 2}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}, bounds: "
- "{a: [[2, 4, false, true]]}}}}}");
- }
-
- // SERVER-14718
- TEST_F(QueryPlannerTest, NegationBelowElemMatchValueBelowElemMatchObject) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
-
- runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {$ne: 4}}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'a.b': 1}, bounds: "
- "{'a.b': [['MinKey',4,true,false],[4,'MaxKey',false,true]]}}}}}");
- }
-
- // SERVER-14718
- TEST_F(QueryPlannerTest, NegationBelowElemMatchValueBelowOrBelowAnd) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- // true means multikey
- addIndex(BSON("a" << 1), true);
- addIndex(BSON("b" << 1));
-
- runQuery(fromjson("{c: 3, $or: [{a: {$elemMatch: {$ne: 4, $ne: 3}}}, {b: 5}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {c:3}, node: {or: {nodes: ["
- "{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}, bounds: "
- "{a: [['MinKey',3,true,false],"
- "[3,4,false,false],"
- "[4,'MaxKey',false,true]]}}}}}, "
- "{ixscan: {filter: null, pattern: {b: 1}, bounds: "
- "{b: [[5,5,true,true]]}}}]}}}}");
- }
-
- // SERVER-14718
- TEST_F(QueryPlannerTest, CantIndexNegationBelowElemMatchValue) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- // true means multikey
- addIndex(BSON("a" << 1), true);
-
- runQuery(fromjson("{a: {$elemMatch: {$not: {$mod: [2, 0]}}}}"));
-
- // There are no indexed solutions, because negations of $mod are not indexable.
- assertNumSolutions(0);
- }
-
- /**
- * Index bounds constraints on a field should not be intersected
- * if the index is multikey.
- */
- TEST_F(QueryPlannerTest, MultikeyTwoConstraintsSameField) {
- addIndex(BSON("a" << 1), true);
- runQuery(fromjson("{a: {$gt: 0, $lt: 5}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {filter: {$and: [{a: {$lt: 5}}, {a: {$gt: 0}}]}, dir: 1}}");
-
- std::vector<std::string> alternates;
- alternates.push_back("{fetch: {filter: {a: {$lt: 5}}, node: {ixscan: {filter: null, "
- "pattern: {a: 1}, bounds: {a: [[0, Infinity, false, true]]}}}}}");
- alternates.push_back("{fetch: {filter: {a: {$gt: 0}}, node: {ixscan: {filter: null, "
- "pattern: {a: 1}, bounds: {a: [[-Infinity, 5, true, false]]}}}}}");
- assertHasOneSolutionOf(alternates);
- }
-
- /**
- * Constraints on fields with a shared parent should not be intersected
- * if the index is multikey.
- */
- TEST_F(QueryPlannerTest, MultikeyTwoConstraintsDifferentFields) {
- addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson("{'a.b': 2, 'a.c': 3}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {filter: {$and: [{'a.b': 2}, {'a.c': 3}]}, dir: 1}}");
-
- std::vector<std::string> alternates;
- alternates.push_back("{fetch: {filter: {'a.c': 3}, node: {ixscan: {filter: null, "
- "pattern: {'a.b': 1, 'a.c': 1}, bounds: "
- "{'a.b': [[2,2,true,true]], "
- " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
- alternates.push_back("{fetch: {filter: {'a.b': 2}, node: {ixscan: {filter: null, "
- "pattern: {'a.b': 1, 'a.c': 1}, bounds: "
- "{'a.b': [['MinKey','MaxKey',true,true]], "
- " 'a.c': [[3,3,true,true]]}}}}}");
- assertHasOneSolutionOf(alternates);
- }
+using namespace mongo;
+
+TEST_F(QueryPlannerTest, ElemMatchOneField) {
+ addIndex(BSON("a.b" << 1));
+ runQuery(fromjson("{a : {$elemMatch: {b:1}}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a:{$elemMatch:{b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:1}}}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ElemMatchTwoFields) {
+ addIndex(BSON("a.b" << 1));
+ addIndex(BSON("a.c" << 1));
+ runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a:{$elemMatch:{b:1,c:1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'a.c': 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicAllElemMatch) {
+ addIndex(BSON("foo.a" << 1));
+ addIndex(BSON("foo.b" << 1));
+ runQuery(fromjson("{foo: {$all: [ {$elemMatch: {a:1, b:1}}, {$elemMatch: {a:2, b:2}}]}}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists(
+ "{cscan: {dir: 1, filter: {foo:{$all:"
+ "[{$elemMatch:{a:1,b:1}},{$elemMatch:{a:2,b:2}}]}}}}");
+
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'foo.a': 1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {'foo.b': 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicAllElemMatch2) {
+ // true means multikey
+ addIndex(BSON("a.x" << 1), true);
+
+ runQuery(fromjson("{a: {$all: [{$elemMatch: {x: 3}}, {$elemMatch: {y: 5}}]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$all:[{$elemMatch:{x:3}},{$elemMatch:{y:5}}]}},"
+ "node: {ixscan: {pattern: {'a.x': 1},"
+ "bounds: {'a.x': [[3,3,true,true]]}}}}}");
+}
+
+// SERVER-16256
+TEST_F(QueryPlannerTest, AllElemMatchCompound) {
+ // true means multikey
+ addIndex(BSON("d" << 1 << "a.b" << 1 << "a.c" << 1), true);
+
+ runQuery(fromjson(
+ "{d: 1, a: {$all: [{$elemMatch: {b: 2, c: 2}},"
+ "{$elemMatch: {b: 3, c: 3}}]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{a: {$elemMatch: {b: 2, c: 2}}},"
+ "{a: {$elemMatch: {b: 3, c: 3}}}]},"
+ "node: {ixscan: {filter: null, pattern: {d:1,'a.b':1,'a.c':1},"
+ "bounds: {d: [[1,1,true,true]],"
+ "'a.b': [[2,2,true,true]],"
+ "'a.c': [[2,2,true,true]]}}}}}");
+}
+
+// SERVER-13677
+TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild) {
+ addIndex(BSON("a.b.c.d" << 1));
+ runQuery(fromjson("{z: 1, 'a.b': {$elemMatch: {c: {$all: [{$elemMatch: {d: 0}}]}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c.d': 1}}}}}");
+}
+
+// SERVER-13677
+TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild2) {
+ // true means multikey
+ addIndex(BSON("a.b.c.d" << 1), true);
+ runQuery(fromjson(
+ "{'a.b': {$elemMatch: {c: {$all: "
+ "[{$elemMatch: {d: {$gt: 1, $lt: 3}}}]}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c.d': 1}, "
+ "bounds: {'a.b.c.d': [[-Infinity,3,true,false]]}}}}}");
+}
+
+// SERVER-13677
+TEST_F(QueryPlannerTest, ElemMatchWithAllChild) {
+ // true means multikey
+ addIndex(BSON("a.b.c" << 1), true);
+ runQuery(fromjson("{z: 1, 'a.b': {$elemMatch: {c: {$all: [4, 5, 6]}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, "
+ "bounds: {'a.b.c': [[4,4,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ElemMatchValueMatch) {
+ addIndex(BSON("foo" << 1));
+ addIndex(BSON("foo" << 1 << "bar" << 1));
+ runQuery(fromjson("{foo: {$elemMatch: {$gt: 5, $lt: 10}}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$elemMatch:{$gt:5,$lt:10}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {foo: {$elemMatch: {$gt: 5, $lt: 10}}}, node: "
+ "{ixscan: {filter: null, pattern: {foo: 1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {foo: {$elemMatch: {$gt: 5, $lt: 10}}}, node: "
+ "{ixscan: {filter: null, pattern: {foo: 1, bar: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ElemMatchValueIndexability) {
+ addIndex(BSON("foo" << 1));
+
+ // An ELEM_MATCH_VALUE can be indexed if all of its child predicates
+ // are "index bounds generating".
+ runQuery(fromjson("{foo: {$elemMatch: {$gt: 5, $lt: 10}}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$elemMatch:{$gt:5,$lt:10}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {foo: {$elemMatch: {$gt: 5, $lt: 10}}}, node: "
+ "{ixscan: {filter: null, pattern: {foo: 1}}}}}");
+
+ // We cannot build index bounds for the $size predicate. This means that the
+ // ELEM_MATCH_VALUE is not indexable, and we get no indexed solutions.
+ runQuery(fromjson("{foo: {$elemMatch: {$gt: 5, $size: 10}}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {foo:{$elemMatch:{$gt:5,$size:10}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ElemMatchNested) {
+ addIndex(BSON("a.b.c" << 1));
+ runQuery(fromjson("{ a:{ $elemMatch:{ b:{ $elemMatch:{ c:{ $gte:1, $lte:1 } } } } }}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoElemMatchNested) {
+ addIndex(BSON("a.d.e" << 1));
+ addIndex(BSON("a.b.c" << 1));
+ runQuery(fromjson(
+ "{ a:{ $elemMatch:{ d:{ $elemMatch:{ e:{ $lte:1 } } },"
+ "b:{ $elemMatch:{ c:{ $gte:1 } } } } } }"));
+
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.d.e': 1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ElemMatchCompoundTwoFields) {
+ addIndex(BSON("a.b" << 1 << "a.c" << 1));
+ runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': 1, 'a.c': 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ArrayEquality) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a : [1, 2, 3]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a:[1,2,3]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:[1,2,3]}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+// SERVER-13664
+TEST_F(QueryPlannerTest, ElemMatchEmbeddedAnd) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: {$gte: 2, $lt: 4}, c: 25}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:{$gte:2,$lt: 4},c:25}}}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1, 'a.c': 1}, "
+ "bounds: {'a.b': [[-Infinity,4,true,false]], "
+ "'a.c': [[25,25,true,true]]}}}}}");
+}
+
+// SERVER-13664
+TEST_F(QueryPlannerTest, ElemMatchEmbeddedOr) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+ // true means multikey
+ addIndex(BSON("a.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {$or: [{b: 3}, {c: 4}]}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{$or:[{b:3},{c:4}]}}}, "
+ "node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}, "
+ "{ixscan: {filter: null, pattern: {'a.c': 1}}}]}}}}");
+}
+
+// SERVER-13664
+TEST_F(QueryPlannerTest, ElemMatchEmbeddedRegex) {
+ addIndex(BSON("a.b" << 1));
+ runQuery(fromjson("{a: {$elemMatch: {b: /foo/}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:/foo/}}}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
+}
+
+// SERVER-14180
+TEST_F(QueryPlannerTest, ElemMatchEmbeddedRegexAnd) {
+ addIndex(BSON("a.b" << 1));
+ runQuery(fromjson("{a: {$elemMatch: {b: /foo/}}, z: 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:/foo/}}, z:1}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
+}
+
+// SERVER-14180
+TEST_F(QueryPlannerTest, ElemMatchEmbeddedRegexAnd2) {
+ addIndex(BSON("a.b" << 1));
+ runQuery(fromjson("{a: {$elemMatch: {b: /foo/, b: 3}}, z: 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:/foo/,b:3}}, z:1}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
+}
+
+// $not can appear as a value operator inside of an elemMatch (value). We shouldn't crash if we
+// see it.
+TEST_F(QueryPlannerTest, ElemMatchWithNotInside) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a: {$elemMatch: {$not: {$gte: 6}}}}"));
+}
+
+// SERVER-14625: Make sure we construct bounds properly for $elemMatch object with a
+// negation inside.
+TEST_F(QueryPlannerTest, ElemMatchWithNotInside2) {
+ addIndex(BSON("a.b" << 1 << "a.c" << 1));
+ runQuery(fromjson("{d: 1, a: {$elemMatch: {c: {$ne: 3}, b: 4}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {d: 1, a: {$elemMatch: {c: {$ne: 3}, b: 4}}}, node:"
+ "{ixscan: {filter: null, pattern: {'a.b': 1, 'a.c': 1}, bounds:"
+ "{'a.b': [[4,4,true,true]],"
+ " 'a.c': [['MinKey',3,true,false],"
+ "[3,'MaxKey',false,true]]}}}}}");
+}
+
+// SERVER-13789
+TEST_F(QueryPlannerTest, ElemMatchIndexedNestedOr) {
+ addIndex(BSON("bar.baz" << 1));
+ runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$or: [{baz: 2}]}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{foo:1},"
+ "{bar:{$elemMatch:{$or:[{baz:2}]}}}]}, "
+ "node: {ixscan: {pattern: {'bar.baz': 1}, "
+ "bounds: {'bar.baz': [[2,2,true,true]]}}}}}");
+}
+
+// SERVER-13789
+TEST_F(QueryPlannerTest, ElemMatchIndexedNestedOrMultiplePreds) {
+ addIndex(BSON("bar.baz" << 1));
+ addIndex(BSON("bar.z" << 1));
+ runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$or: [{baz: 2}, {z: 3}]}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{foo:1},"
+ "{bar:{$elemMatch:{$or:[{baz:2},{z:3}]}}}]}, "
+ "node: {or: {nodes: ["
+ "{ixscan: {pattern: {'bar.baz': 1}, "
+ "bounds: {'bar.baz': [[2,2,true,true]]}}},"
+ "{ixscan: {pattern: {'bar.z': 1}, "
+ "bounds: {'bar.z': [[3,3,true,true]]}}}]}}}}");
+}
+
+// SERVER-13789: Ensure that we properly compound in the multikey case when an
+// $or is beneath an $elemMatch.
+TEST_F(QueryPlannerTest, ElemMatchIndexedNestedOrMultikey) {
+ // true means multikey
+ addIndex(BSON("bar.baz" << 1 << "bar.z" << 1), true);
+ runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$or: [{baz: 2, z: 3}]}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{foo:1},"
+ "{bar: {$elemMatch: {$or: [{$and: [{baz:2}, {z:3}]}]}}}]},"
+ "node: {ixscan: {pattern: {'bar.baz': 1, 'bar.z': 1}, "
+ "bounds: {'bar.baz': [[2,2,true,true]],"
+ "'bar.z': [[3,3,true,true]]}}}}}");
+}
+
+// SERVER-13789: Right now we don't index $nor, but make sure that the planner
+// doesn't get confused by a $nor beneath an $elemMatch.
+TEST_F(QueryPlannerTest, ElemMatchIndexedNestedNor) {
+ addIndex(BSON("bar.baz" << 1));
+ runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {$nor: [{baz: 2}, {baz: 3}]}}}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// SERVER-13789
+TEST_F(QueryPlannerTest, ElemMatchIndexedNestedNE) {
+ addIndex(BSON("bar.baz" << 1));
+ runQuery(fromjson("{foo: 1, $and: [{bar: {$elemMatch: {baz: {$ne: 2}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{foo:1},"
+ "{bar:{$elemMatch:{baz:{$ne:2}}}}]}, "
+ "node: {ixscan: {pattern: {'bar.baz': 1}, "
+ "bounds: {'bar.baz': [['MinKey',2,true,false], "
+ "[2,'MaxKey',false,true]]}}}}}");
+}
+
+// SERVER-13789: Make sure we properly handle an $or below $elemMatch that is not
+// tagged by the enumerator to use an index.
+TEST_F(QueryPlannerTest, ElemMatchNestedOrNotIndexed) {
+ addIndex(BSON("a.b" << 1));
+ runQuery(fromjson("{c: 1, a: {$elemMatch: {b: 3, $or: [{c: 4}, {c: 5}]}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
+ "{'a.b': [[3,3,true,true]]}}}}}");
+}
+
+// The index bounds can be compounded because the index is not multikey.
+TEST_F(QueryPlannerTest, CompoundBoundsElemMatchNotMultikey) {
+ addIndex(BSON("a.x" << 1 << "a.b.c" << 1));
+ runQuery(fromjson("{'a.x': 1, a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1}}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:{$elemMatch:{c:{$gte:1}}}}}}, "
+ "node: {ixscan: {pattern: {'a.x':1, 'a.b.c':1}, bounds: "
+ "{'a.x': [[1,1,true,true]], "
+ " 'a.b.c': [[1,Infinity,true,true]]}}}}}");
+}
+
+// The index bounds cannot be compounded because the predicates over 'a.x' and
+// 'a.b.c' 1) share the prefix "a", and 2) are not conjoined by an $elemMatch
+// over the prefix "a".
+TEST_F(QueryPlannerTest, CompoundMultikeyBoundsElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.x" << 1 << "a.b.c" << 1), true);
+ runQuery(fromjson("{'a.x': 1, a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1}}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.x':1, 'a.b.c':1}, bounds: "
+ "{'a.x': [[1,1,true,true]], "
+ " 'a.b.c': [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// The index bounds cannot be intersected because the index is multikey.
+// The bounds could be intersected if there was an $elemMatch applied to path
+// "a.b.c". However, the $elemMatch is applied to the path "a.b" rather than
+// the full path of the indexed field.
+TEST_F(QueryPlannerTest, MultikeyNestedElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1, $lte: 1}}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
+ "{'a.b.c': [[-Infinity, 1, true, true]]}}}}}");
+}
+
+// The index bounds cannot be intersected because the index is multikey.
+// The bounds could be intersected if there was an $elemMatch applied to path
+// "a.b.c". However, the $elemMatch is applied to the path "a.b" rather than
+// the full path of the indexed field.
+TEST_F(QueryPlannerTest, MultikeyNestedElemMatchIn) {
+ // true means multikey
+ addIndex(BSON("a.b.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1, $in:[2]}}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
+ "{'a.b.c': [[1, Infinity, true, true]]}}}}}");
+}
+
+// The bounds can be compounded because the index is not multikey.
+TEST_F(QueryPlannerTest, TwoNestedElemMatchBounds) {
+ addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1));
+ runQuery(fromjson(
+ "{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
+ "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.d.e': 1, 'a.b.c': 1}, bounds: "
+ "{'a.d.e': [[-Infinity, 1, true, true]],"
+ "'a.b.c': [[1, Infinity, true, true]]}}}}}");
+}
+
+// The bounds cannot be compounded. Although there is an $elemMatch over the
+// shared path prefix 'a', the predicates must be conjoined by the same $elemMatch,
+// without nested $elemMatch's intervening. The bounds could be compounded if
+// the query were rewritten as {a: {$elemMatch: {'d.e': {$lte: 1}, 'b.c': {$gte: 1}}}}.
+TEST_F(QueryPlannerTest, MultikeyTwoNestedElemMatchBounds) {
+ // true means multikey
+ addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1), true);
+ runQuery(fromjson(
+ "{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
+ "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.d.e': 1, 'a.b.c': 1}, bounds: "
+ "{'a.d.e': [[-Infinity, 1, true, true]],"
+ "'a.b.c': [['MinKey', 'MaxKey', true, true]]}}}}}");
+}
+
+// Bounds can be intersected for a multikey index when the predicates are
+// joined by an $elemMatch over the full path of the index field.
+TEST_F(QueryPlannerTest, MultikeyElemMatchValue) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+ runQuery(fromjson("{'a.b': {$elemMatch: {$gte: 1, $lte: 1}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
+ "{'a.b': [[1, 1, true, true]]}}}}}");
+}
+
+// We can intersect the bounds for all three predicates because
+// the index is not multikey.
+TEST_F(QueryPlannerTest, ElemMatchInterectBoundsNotMultikey) {
+ addIndex(BSON("a.b" << 1));
+ runQuery(fromjson(
+ "{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
+ "'a.b': {$in: [2,5]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
+ "{'a.b': [[2, 2, true, true]]}}}}}");
+}
+
+// Bounds can be intersected for a multikey index when the predicates are
+// joined by an $elemMatch over the full path of the index field. The bounds
+// from the $in predicate are not intersected with the bounds from the
+// remaining to predicates because the $in is not joined to the other
+// predicates with an $elemMatch.
+TEST_F(QueryPlannerTest, ElemMatchInterectBoundsMultikey) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+ runQuery(fromjson(
+ "{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
+ "'a.b': {$in: [2,5]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b': 1}, bounds: "
+ "{'a.b': [[1, 4, true, true]]}}}}}");
+}
+
+// Bounds can be intersected because the predicates are joined by an
+// $elemMatch over the path "a.b.c", the full path of the multikey
+// index field.
+TEST_F(QueryPlannerTest, MultikeyNestedElemMatchValue) {
+ // true means multikey
+ addIndex(BSON("a.b.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {'b.c': {$elemMatch: {$gte: 1, $lte: 1}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c': 1}, bounds: "
+ "{'a.b.c': [[1, 1, true, true]]}}}}}");
+}
+
+// Bounds cannot be compounded for a multikey compound index when
+// the predicates share a prefix (and there is no $elemMatch).
+TEST_F(QueryPlannerTest, MultikeySharedPrefixNoElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{'a.b': 1, 'a.c': 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[1,1,true,true]], "
+ " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// Bounds can be compounded because there is an $elemMatch applied to the
+// shared prefix "a".
+TEST_F(QueryPlannerTest, MultikeySharedPrefixElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[1,1,true,true]], 'a.c': [[1,1,true,true]]}}}}}");
+}
+
+// Bounds cannot be compounded for the multikey index even though there is an
+// $elemMatch, because the $elemMatch does not join the two predicates. This
+// query is semantically indentical to {'a.b': 1, 'a.c': 1}.
+TEST_F(QueryPlannerTest, MultikeySharedPrefixElemMatchNotShared) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{'a.b': 1, a: {$elemMatch: {c: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[1,1,true,true]], "
+ " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// Bounds cannot be compounded for the multikey index even though there are
+// $elemMatch's, because there is not an $elemMatch which joins the two
+// predicates. This query is semantically indentical to {'a.b': 1, 'a.c': 1}.
+TEST_F(QueryPlannerTest, MultikeySharedPrefixTwoElemMatches) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{$and: [{a: {$elemMatch: {b: 1}}}, {a: {$elemMatch: {c: 1}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[1,1,true,true]], "
+ " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// Bounds for the predicates joined by the $elemMatch over the shared prefix
+// "a" can be combined. However, the predicate 'a.b'==1 cannot also be combined
+// given that it is outside of the $elemMatch.
+TEST_F(QueryPlannerTest, MultikeySharedPrefixNoIntersectOutsideElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{'a.b': 1, a: {$elemMatch: {b: {$gt: 0}, c: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[0,Infinity,false,true]], "
+ " 'a.c': [[1,1,true,true]]}}}}}");
+}
+
+// Bounds for the predicates joined by the $elemMatch over the shared prefix
+// "a" can be combined. However, the predicate outside the $elemMatch
+// cannot also be combined.
+TEST_F(QueryPlannerTest, MultikeySharedPrefixNoIntersectOutsideElemMatch2) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}, 'a.b': 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[1,1,true,true]], "
+ " 'a.c': [[1,1,true,true]]}}}}}");
+}
+
+// Bounds for the predicates joined by the $elemMatch over the shared prefix
+// "a" can be combined. However, the predicate outside the $elemMatch
+// cannot also be combined.
+TEST_F(QueryPlannerTest, MultikeySharedPrefixNoIntersectOutsideElemMatch3) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{'a.c': 2, a: {$elemMatch: {b: 1, c: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1}, bounds: "
+ "{'a.b': [[1,1,true,true]], "
+ " 'a.c': [[1,1,true,true]]}}}}}");
+}
+
+// There are two sets of fields that share a prefix: {'a.b', 'a.c'} and
+// {'d.e', 'd.f'}. Since the index is multikey, we can only use the bounds from
+// one member of each of these sets.
+TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesBasic) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
+ runQuery(fromjson("{'a.b': 1, 'a.c': 1, 'd.e': 1, 'd.f': 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
+ "bounds: {'a.b':[[1,1,true,true]], "
+ " 'a.c':[['MinKey','MaxKey',true,true]], "
+ " 'd.e':[[1,1,true,true]], "
+ " 'd.f':[['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// All bounds can be combined. Although, 'a.b' and 'a.c' share prefix 'a', the
+// relevant predicates are joined by an $elemMatch on 'a'. Similarly, predicates
+// over 'd.e' and 'd.f' are joined by an $elemMatch on 'd'.
+TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesTwoElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}, d: {$elemMatch: {e: 1, f: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{a: {$elemMatch: {b: 1, c: 1}}},"
+ "{d: {$elemMatch: {e: 1, f: 1}}}]},"
+ "node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
+ "bounds: {'a.b':[[1,1,true,true]], "
+ " 'a.c':[[1,1,true,true]], "
+ " 'd.e':[[1,1,true,true]], "
+ " 'd.f':[[1,1,true,true]]}}}}}");
+}
+
+// Bounds for 'a.b' and 'a.c' can be combined because of the $elemMatch on 'a'.
+// Since predicates an 'd.e' and 'd.f' have no $elemMatch, we use the bounds
+// for only one of the two.
+TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesOneElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: 1, c: 1}}, 'd.e': 1, 'd.f': 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and:[{a:{$elemMatch:{b:1,c:1}}}, {'d.f':1}]},"
+ "node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
+ "bounds: {'a.b':[[1,1,true,true]], "
+ " 'a.c':[[1,1,true,true]], "
+ " 'd.e':[[1,1,true,true]], "
+ " 'd.f':[['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// Bounds for 'd.e' and 'd.f' can be combined because of the $elemMatch on 'd'.
+// Since predicates an 'a.b' and 'a.c' have no $elemMatch, we use the bounds
+// for only one of the two.
+TEST_F(QueryPlannerTest, MultikeyTwoSharedPrefixesOneElemMatch2) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1 << "a.c" << 1 << "d.e" << 1 << "d.f" << 1), true);
+ runQuery(fromjson("{'a.b': 1, 'a.c': 1, d: {$elemMatch: {e: 1, f: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and:[{d:{$elemMatch:{e:1,f:1}}}, {'a.c':1}]},"
+ "node: {ixscan: {pattern: {'a.b':1,'a.c':1,'d.e':1,'d.f':1},"
+ "bounds: {'a.b':[[1,1,true,true]], "
+ " 'a.c':[['MinKey','MaxKey',true,true]], "
+ " 'd.e':[[1,1,true,true]], "
+ " 'd.f':[[1,1,true,true]]}}}}}");
+}
+
+// The bounds cannot be compounded because 'a.b.x' and 'a.b.y' share prefix
+// 'a.b' (and there is no $elemMatch).
+TEST_F(QueryPlannerTest, MultikeyDoubleDottedNoElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
+ runQuery(fromjson("{'a.b.y': 1, 'a.b.x': 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
+ "{'a.b.x': [[1,1,true,true]], "
+ " 'a.b.y': [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// The bounds can be compounded because the predicates are joined by an
+// $elemMatch on the shared prefix "a.b".
+TEST_F(QueryPlannerTest, MultikeyDoubleDottedElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {x: 1, y: 1}}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
+ "{'a.b.x': [[1,1,true,true]], "
+ " 'a.b.y': [[1,1,true,true]]}}}}}");
+}
+
+// The bounds cannot be compounded. Although there is an $elemMatch that appears
+// to join the predicates, the path to which the $elemMatch is applied is "a".
+// Therefore, the predicates contained in the $elemMatch are over "b.x" and "b.y".
+// They cannot be compounded due to shared prefix "b".
+TEST_F(QueryPlannerTest, MultikeyDoubleDottedUnhelpfulElemMatch) {
+ // true means multikey
+ addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {'b.x': 1, 'b.y': 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
+ "{'a.b.x': [[1,1,true,true]], "
+ " 'a.b.y': [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// The bounds can be compounded because the predicates are joined by an
+// $elemMatch on the shared prefix "a.b".
+TEST_F(QueryPlannerTest, MultikeyDoubleDottedElemMatchOnDotted) {
+ // true means multikey
+ addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true);
+ runQuery(fromjson("{'a.b': {$elemMatch: {x: 1, y: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: "
+ "{'a.b.x': [[1,1,true,true]], "
+ " 'a.b.y': [[1,1,true,true]]}}}}}");
+}
+
+// This one is subtle. Say we compound the bounds for predicates over "a.b.c" and
+// "a.b.d". This is okay because of the predicate over the shared prefix "a.b".
+// It might seem like we can do the same for the $elemMatch over shared prefix "a.e",
+// thus combining all bounds. But in fact, we can't combine any more bounds because
+// we have already used prefix "a". In other words, this query is like having predicates
+// over "a.b" and "a.e", so we can only use bounds from one of the two.
+TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted) {
+ // true means multikey
+ addIndex(BSON("a.b.c" << 1 << "a.e.f" << 1 << "a.b.d" << 1 << "a.e.g" << 1), true);
+ runQuery(fromjson(
+ "{'a.b': {$elemMatch: {c: 1, d: 1}}, "
+ "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c':1,'a.e.f':1,'a.b.d':1,'a.e.g':1},"
+ "bounds: {'a.b.c':[[1,1,true,true]], "
+ " 'a.e.f':[['MinKey','MaxKey',true,true]], "
+ " 'a.b.d':[[1,1,true,true]], "
+ " 'a.e.g':[['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// Similar to MultikeyComplexDoubleDotted above.
+TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted2) {
+ // true means multikey
+ addIndex(BSON("a.b.c" << 1 << "a.e.c" << 1 << "a.b.d" << 1 << "a.e.d" << 1), true);
+ runQuery(fromjson(
+ "{'a.b': {$elemMatch: {c: 1, d: 1}}, "
+ "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {'a.b.c':1,'a.e.c':1,'a.b.d':1,'a.e.d':1},"
+ "bounds: {'a.b.c':[[1,1,true,true]], "
+ " 'a.e.c':[['MinKey','MaxKey',true,true]], "
+ " 'a.b.d':[[1,1,true,true]], "
+ " 'a.e.d':[['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+// SERVER-13422: check that we plan $elemMatch object correctly with
+// index intersection.
+TEST_F(QueryPlannerTest, ElemMatchIndexIntersection) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("shortId" << 1));
+ // true means multikey
+ addIndex(BSON("a.b.startDate" << 1), true);
+ addIndex(BSON("a.b.endDate" << 1), true);
+
+ runQuery(fromjson(
+ "{shortId: 3, 'a.b': {$elemMatch: {startDate: {$lte: 3},"
+ "endDate: {$gt: 6}}}}"));
+
+ assertNumSolutions(6U);
+
+ // 3 single index solutions.
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {shortId: 1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.startDate': 1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b.endDate': 1}}}}}");
+
+ // 3 index intersection solutions. The last one has to intersect two
+ // predicates within the $elemMatch object.
+ assertSolutionExists(
+ "{fetch: {node: {andHash: {nodes: ["
+ "{ixscan: {pattern: {shortId: 1}}},"
+ "{ixscan: {pattern: {'a.b.startDate': 1}}}]}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {andHash: {nodes: ["
+ "{ixscan: {pattern: {shortId: 1}}},"
+ "{ixscan: {pattern: {'a.b.endDate': 1}}}]}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {andHash: {nodes: ["
+ "{ixscan: {pattern: {'a.b.startDate': 1}}},"
+ "{ixscan: {pattern: {'a.b.endDate': 1}}}]}}}}");
+}
+
+// SERVER-14718
+TEST_F(QueryPlannerTest, NegationBelowElemMatchValue) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+
+ runQuery(fromjson("{a: {$elemMatch: {$ne: 2}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{$ne:2}}}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}, bounds: "
+ "{a: [['MinKey',2,true,false], [2,'MaxKey',false,true]]}}}}}");
+}
+
+// SERVER-14718
+TEST_F(QueryPlannerTest, AndWithNegationBelowElemMatchValue) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ addIndex(BSON("b" << 1), true);
+
+ runQuery(fromjson("{b: 10, a: {$elemMatch: {$not: {$gt: 4}}}}"));
+
+ // One solution using index on 'b' and one using index on 'a'.
+ assertNumSolutions(2U);
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {b: 1}}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}, bounds: {a: "
+ "[['MinKey',4,true,true],[Infinity,'MaxKey',false,true]]}}}}}");
+}
+
+// SERVER-14718
+TEST_F(QueryPlannerTest, AndWithNegationBelowElemMatchValue2) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+
+ runQuery(fromjson("{b: 10, a: {$elemMatch: {$not: {$gt: 4}, $gt: 2}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}, bounds: "
+ "{a: [[2, 4, false, true]]}}}}}");
+}
+
+// SERVER-14718
+TEST_F(QueryPlannerTest, NegationBelowElemMatchValueBelowElemMatchObject) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+
+ runQuery(fromjson("{a: {$elemMatch: {b: {$elemMatch: {$ne: 4}}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {filter: null, pattern: {'a.b': 1}, bounds: "
+ "{'a.b': [['MinKey',4,true,false],[4,'MaxKey',false,true]]}}}}}");
+}
+
+// SERVER-14718
+TEST_F(QueryPlannerTest, NegationBelowElemMatchValueBelowOrBelowAnd) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ addIndex(BSON("b" << 1));
+
+ runQuery(fromjson("{c: 3, $or: [{a: {$elemMatch: {$ne: 4, $ne: 3}}}, {b: 5}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {c:3}, node: {or: {nodes: ["
+ "{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}, bounds: "
+ "{a: [['MinKey',3,true,false],"
+ "[3,4,false,false],"
+ "[4,'MaxKey',false,true]]}}}}}, "
+ "{ixscan: {filter: null, pattern: {b: 1}, bounds: "
+ "{b: [[5,5,true,true]]}}}]}}}}");
+}
+
+// SERVER-14718
+TEST_F(QueryPlannerTest, CantIndexNegationBelowElemMatchValue) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+
+ runQuery(fromjson("{a: {$elemMatch: {$not: {$mod: [2, 0]}}}}"));
+
+ // There are no indexed solutions, because negations of $mod are not indexable.
+ assertNumSolutions(0);
+}
+
+/**
+ * Index bounds constraints on a field should not be intersected
+ * if the index is multikey.
+ */
+TEST_F(QueryPlannerTest, MultikeyTwoConstraintsSameField) {
+ addIndex(BSON("a" << 1), true);
+ runQuery(fromjson("{a: {$gt: 0, $lt: 5}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {filter: {$and: [{a: {$lt: 5}}, {a: {$gt: 0}}]}, dir: 1}}");
+
+ std::vector<std::string> alternates;
+ alternates.push_back(
+ "{fetch: {filter: {a: {$lt: 5}}, node: {ixscan: {filter: null, "
+ "pattern: {a: 1}, bounds: {a: [[0, Infinity, false, true]]}}}}}");
+ alternates.push_back(
+ "{fetch: {filter: {a: {$gt: 0}}, node: {ixscan: {filter: null, "
+ "pattern: {a: 1}, bounds: {a: [[-Infinity, 5, true, false]]}}}}}");
+ assertHasOneSolutionOf(alternates);
+}
+
+/**
+ * Constraints on fields with a shared parent should not be intersected
+ * if the index is multikey.
+ */
+TEST_F(QueryPlannerTest, MultikeyTwoConstraintsDifferentFields) {
+ addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
+ runQuery(fromjson("{'a.b': 2, 'a.c': 3}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {filter: {$and: [{'a.b': 2}, {'a.c': 3}]}, dir: 1}}");
+
+ std::vector<std::string> alternates;
+ alternates.push_back(
+ "{fetch: {filter: {'a.c': 3}, node: {ixscan: {filter: null, "
+ "pattern: {'a.b': 1, 'a.c': 1}, bounds: "
+ "{'a.b': [[2,2,true,true]], "
+ " 'a.c': [['MinKey','MaxKey',true,true]]}}}}}");
+ alternates.push_back(
+ "{fetch: {filter: {'a.b': 2}, node: {ixscan: {filter: null, "
+ "pattern: {'a.b': 1, 'a.c': 1}, bounds: "
+ "{'a.b': [['MinKey','MaxKey',true,true]], "
+ " 'a.c': [[3,3,true,true]]}}}}}");
+ assertHasOneSolutionOf(alternates);
+}
} // namespace
diff --git a/src/mongo/db/query/query_planner_common.cpp b/src/mongo/db/query/query_planner_common.cpp
index e7d5e207959..2ae977a825e 100644
--- a/src/mongo/db/query/query_planner_common.cpp
+++ b/src/mongo/db/query/query_planner_common.cpp
@@ -36,52 +36,49 @@
namespace mongo {
- void QueryPlannerCommon::reverseScans(QuerySolutionNode* node) {
- StageType type = node->getType();
+void QueryPlannerCommon::reverseScans(QuerySolutionNode* node) {
+ StageType type = node->getType();
- if (STAGE_IXSCAN == type) {
- IndexScanNode* isn = static_cast<IndexScanNode*>(node);
- isn->direction *= -1;
+ if (STAGE_IXSCAN == type) {
+ IndexScanNode* isn = static_cast<IndexScanNode*>(node);
+ isn->direction *= -1;
- if (isn->bounds.isSimpleRange) {
- std::swap(isn->bounds.startKey, isn->bounds.endKey);
- // XXX: Not having a startKeyInclusive means that if we reverse a max/min query
- // we have different results with and without the reverse...
- isn->bounds.endKeyInclusive = true;
- }
- else {
- for (size_t i = 0; i < isn->bounds.fields.size(); ++i) {
- std::vector<Interval>& iv = isn->bounds.fields[i].intervals;
- // Step 1: reverse the list.
- std::reverse(iv.begin(), iv.end());
- // Step 2: reverse each interval.
- for (size_t j = 0; j < iv.size(); ++j) {
- iv[j].reverse();
- }
+ if (isn->bounds.isSimpleRange) {
+ std::swap(isn->bounds.startKey, isn->bounds.endKey);
+ // XXX: Not having a startKeyInclusive means that if we reverse a max/min query
+ // we have different results with and without the reverse...
+ isn->bounds.endKeyInclusive = true;
+ } else {
+ for (size_t i = 0; i < isn->bounds.fields.size(); ++i) {
+ std::vector<Interval>& iv = isn->bounds.fields[i].intervals;
+ // Step 1: reverse the list.
+ std::reverse(iv.begin(), iv.end());
+ // Step 2: reverse each interval.
+ for (size_t j = 0; j < iv.size(); ++j) {
+ iv[j].reverse();
}
}
-
- if (!isn->bounds.isValidFor(isn->indexKeyPattern, isn->direction)) {
- LOG(5) << "Invalid bounds: " << isn->bounds.toString() << std::endl;
- invariant(0);
- }
-
- // TODO: we can just negate every value in the already computed properties.
- isn->computeProperties();
- }
- else if (STAGE_SORT_MERGE == type) {
- // reverse direction of comparison for merge
- MergeSortNode* msn = static_cast<MergeSortNode*>(node);
- msn->sort = reverseSortObj(msn->sort);
- }
- else {
- invariant(STAGE_SORT != type);
- // This shouldn't be here...
}
- for (size_t i = 0; i < node->children.size(); ++i) {
- reverseScans(node->children[i]);
+ if (!isn->bounds.isValidFor(isn->indexKeyPattern, isn->direction)) {
+ LOG(5) << "Invalid bounds: " << isn->bounds.toString() << std::endl;
+ invariant(0);
}
+
+ // TODO: we can just negate every value in the already computed properties.
+ isn->computeProperties();
+ } else if (STAGE_SORT_MERGE == type) {
+ // reverse direction of comparison for merge
+ MergeSortNode* msn = static_cast<MergeSortNode*>(node);
+ msn->sort = reverseSortObj(msn->sort);
+ } else {
+ invariant(STAGE_SORT != type);
+ // This shouldn't be here...
+ }
+
+ for (size_t i = 0; i < node->children.size(); ++i) {
+ reverseScans(node->children[i]);
}
+}
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_common.h b/src/mongo/db/query/query_planner_common.h
index 848e0f00ba1..4fdf8f30815 100644
--- a/src/mongo/db/query/query_planner_common.h
+++ b/src/mongo/db/query/query_planner_common.h
@@ -34,53 +34,53 @@
namespace mongo {
+/**
+ * Methods used by several parts of the planning process.
+ */
+class QueryPlannerCommon {
+public:
/**
- * Methods used by several parts of the planning process.
+ * Does the tree rooted at 'root' have a node with matchType 'type'?
+ *
+ * If 'out' is not NULL, sets 'out' to the first node of type 'type' encountered.
*/
- class QueryPlannerCommon {
- public:
- /**
- * Does the tree rooted at 'root' have a node with matchType 'type'?
- *
- * If 'out' is not NULL, sets 'out' to the first node of type 'type' encountered.
- */
- static bool hasNode(MatchExpression* root, MatchExpression::MatchType type,
- MatchExpression** out = NULL) {
- if (type == root->matchType()) {
- if (NULL != out) {
- *out = root;
- }
- return true;
+ static bool hasNode(MatchExpression* root,
+ MatchExpression::MatchType type,
+ MatchExpression** out = NULL) {
+ if (type == root->matchType()) {
+ if (NULL != out) {
+ *out = root;
}
-
- for (size_t i = 0; i < root->numChildren(); ++i) {
- if (hasNode(root->getChild(i), type, out)) {
- return true;
- }
- }
- return false;
+ return true;
}
- /**
- * Assumes the provided BSONObj is of the form {field1: -+1, ..., field2: -+1}
- * Returns a BSONObj with the values negated.
- */
- static BSONObj reverseSortObj(const BSONObj& sortObj) {
- BSONObjBuilder reverseBob;
- BSONObjIterator it(sortObj);
- while (it.more()) {
- BSONElement elt = it.next();
- reverseBob.append(elt.fieldName(), elt.numberInt() * -1);
+ for (size_t i = 0; i < root->numChildren(); ++i) {
+ if (hasNode(root->getChild(i), type, out)) {
+ return true;
}
- return reverseBob.obj();
}
+ return false;
+ }
- /**
- * Traverses the tree rooted at 'node'. For every STAGE_IXSCAN encountered, reverse
- * the scan direction and index bounds.
- */
- static void reverseScans(QuerySolutionNode* node);
+ /**
+ * Assumes the provided BSONObj is of the form {field1: -+1, ..., field2: -+1}
+ * Returns a BSONObj with the values negated.
+ */
+ static BSONObj reverseSortObj(const BSONObj& sortObj) {
+ BSONObjBuilder reverseBob;
+ BSONObjIterator it(sortObj);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ reverseBob.append(elt.fieldName(), elt.numberInt() * -1);
+ }
+ return reverseBob.obj();
+ }
- };
+ /**
+ * Traverses the tree rooted at 'node'. For every STAGE_IXSCAN encountered, reverse
+ * the scan direction and index bounds.
+ */
+ static void reverseScans(QuerySolutionNode* node);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_geo_test.cpp b/src/mongo/db/query/query_planner_geo_test.cpp
index ca405bad41d..11fc175d2ac 100644
--- a/src/mongo/db/query/query_planner_geo_test.cpp
+++ b/src/mongo/db/query/query_planner_geo_test.cpp
@@ -35,703 +35,845 @@
namespace {
- using namespace mongo;
-
- TEST_F(QueryPlannerTest, Basic2DNonNear) {
- // 2d can answer: within poly, within center, within centersphere, within box.
- // And it can use an index (or not) for each of them. As such, 2 solns expected.
- addIndex(BSON("a" << "2d"));
-
- // Polygon
- runQuery(fromjson("{a : { $within: { $polygon : [[0,0], [2,0], [4,0]] } }}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Center
- runQuery(fromjson("{a : { $within : { $center : [[ 5, 5 ], 7 ] } }}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Centersphere
- runQuery(fromjson("{a : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // Within box.
- runQuery(fromjson("{a : {$within: {$box : [[0,0],[9,9]]}}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
-
- // TODO: test that we *don't* annotate for things we shouldn't.
- }
-
- TEST_F(QueryPlannerTest, Basic2DSphereCompound) {
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("loc" << "2dsphere"));
-
- runQuery(fromjson("{loc:{$near:{$geometry:{type:'Point',"
- "coordinates : [-81.513743,28.369947] },"
- " $maxDistance :100}},a: 'mouse'}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {loc: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, Basic2DCompound) {
- addIndex(BSON("loc" << "2d" << "a" << 1));
-
- runQuery(fromjson("{ loc: { $geoWithin: { $box : [[0, 0],[10, 10]] } },"
- "a: 'mouse' }"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {loc : '2d', a: 1},"
- "filter: {a: 'mouse'},"
- "bounds: {loc: []," // Ignored since complex
- " a: [['MinKey','MaxKey',true,true]]}"
- "}}}}");
- }
-
- TEST_F(QueryPlannerTest, Multikey2DSphereCompound) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << 1), true);
- addIndex(BSON("loc" << "2dsphere"), true);
-
- runQuery(fromjson("{loc:{$near:{$geometry:{type:'Point',"
- "coordinates : [-81.513743,28.369947] },"
- " $maxDistance :100}},a: 'mouse'}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {loc: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, Basic2DSphereNonNear) {
- // 2dsphere can do: within+geometry, intersects+geometry
- addIndex(BSON("a" << "2dsphere"));
-
- runQuery(fromjson("{a: {$geoIntersects: {$geometry: {type: 'Point',"
- "coordinates: [10.0, 10.0]}}}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
-
- runQuery(fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
-
- // TODO: test that we *don't* annotate for things we shouldn't.
- }
-
- TEST_F(QueryPlannerTest, Multikey2DSphereNonNear) {
- // 2dsphere can do: within+geometry, intersects+geometry
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
-
- runQuery(fromjson("{a: {$geoIntersects: {$geometry: {type: 'Point',"
- "coordinates: [10.0, 10.0]}}}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
-
- runQuery(fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
-
- // TODO: test that we *don't* annotate for things we shouldn't.
- }
-
- TEST_F(QueryPlannerTest, Basic2DGeoNear) {
- // Can only do near + old point.
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
- assertNumSolutions(1U);
- assertSolutionExists("{geoNear2d: {a: '2d'}}");
- }
-
- TEST_F(QueryPlannerTest, Basic2DSphereGeoNear) {
- // Can do nearSphere + old point, near + new point.
- addIndex(BSON("a" << "2dsphere"));
-
- runQuery(fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
-
- runQuery(fromjson("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
- }
-
- TEST_F(QueryPlannerTest, Multikey2DSphereGeoNear) {
- // Can do nearSphere + old point, near + new point.
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
-
- runQuery(fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
-
- runQuery(fromjson("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
- }
-
- TEST_F(QueryPlannerTest, Basic2DSphereGeoNearReverseCompound) {
- addIndex(BSON("x" << 1));
- addIndex(BSON("x" << 1 << "a" << "2dsphere"));
- runQuery(fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
- }
-
- TEST_F(QueryPlannerTest, Multikey2DSphereGeoNearReverseCompound) {
- addIndex(BSON("x" << 1), true);
- addIndex(BSON("x" << 1 << "a" << "2dsphere"), true);
- runQuery(fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
- }
-
- TEST_F(QueryPlannerTest, NearNoIndex) {
- addIndex(BSON("x" << 1));
- runInvalidQuery(fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
- }
-
- TEST_F(QueryPlannerTest, TwoDSphereNoGeoPred) {
- addIndex(BSON("x" << 1 << "a" << "2dsphere"));
- runQuery(fromjson("{x:1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoDSphereNoGeoPredMultikey) {
- addIndex(BSON("x" << 1 << "a" << "2dsphere"), true);
- runQuery(fromjson("{x:1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
- }
-
- // SERVER-14723
- TEST_F(QueryPlannerTest, GeoNearMultipleRelevantIndicesButOnlyOneCompatible) {
- addIndex(BSON("a" << "2dsphere"));
- addIndex(BSON("b" << 1 << "a" << "2dsphere"));
-
- runQuery(fromjson("{a: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0,0]}}},"
- " b: {$exists: false}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
- "{geoNear2dsphere: {a: '2dsphere'}}}}");
- }
-
- // SERVER-3984, $or 2d index
- TEST_F(QueryPlannerTest, Or2DNonNear) {
- addIndex(BSON("a" << "2d"));
- addIndex(BSON("b" << "2d"));
- runQuery(fromjson("{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}},"
- "{fetch: {node: {ixscan: {pattern: {b: '2d'}}}}}]}}");
- }
-
- // SERVER-3984, $or 2d index
- TEST_F(QueryPlannerTest, Or2DSameFieldNonNear) {
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
- }
-
- // SERVER-3984, $or 2dsphere index
- TEST_F(QueryPlannerTest, Or2DSphereNonNear) {
- addIndex(BSON("a" << "2dsphere"));
- addIndex(BSON("b" << "2dsphere"));
- runQuery(fromjson("{$or: [ {a: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [10.0, 10.0]}}}},"
- " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
- "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
- }
-
- // SERVER-3984, $or 2dsphere index
- TEST_F(QueryPlannerTest, Or2DSphereNonNearMultikey) {
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
- addIndex(BSON("b" << "2dsphere"), true);
- runQuery(fromjson("{$or: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [10.0, 10.0]}}}},"
- " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: "
- "[{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
- "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
- }
-
- TEST_F(QueryPlannerTest, And2DSameFieldNonNear) {
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- // Bounds of the two 2d geo predicates are combined into
- // a single index scan.
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, And2DWith2DNearSameField) {
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {a : { $near : [ 5, 5 ] } } ]}"));
-
- // GEO_NEAR must use the index, and GEO predicate becomes a filter.
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: { node : { geoNear2d: {a: '2d'} } } }");
- }
-
- TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNear) {
- addIndex(BSON("a" << "2dsphere"));
- runQuery(fromjson("{$and: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- // Bounds of the two 2dsphere geo predicates are combined into
- // a single index scan.
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNearMultikey) {
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
- runQuery(fromjson("{$and: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- // Bounds of the two 2dsphere geo predicates are combined into
- // a single index scan.
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, And2DSphereWithNearSameField) {
- addIndex(BSON("a" << "2dsphere"));
- runQuery(fromjson("{$and: [{a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- "{a: {$near: {$geometry: "
- "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
-
- // GEO_NEAR must use the index, and GEO predicate becomes a filter.
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, And2DSphereWithNearSameFieldMultikey) {
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
- runQuery(fromjson("{$and: [{a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- "{a: {$near: {$geometry: "
- "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
-
- // GEO_NEAR must use the index, and GEO predicate becomes a filter.
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, Or2DSphereSameFieldNonNear) {
- addIndex(BSON("a" << "2dsphere"));
- runQuery(fromjson("{$or: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, Or2DSphereSameFieldNonNearMultikey) {
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
- runQuery(fromjson("{$or: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNear) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << "2dsphere"), true);
- runQuery(fromjson("{a: {$gte: 0}, b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{geoNear2dsphere: {a: 1, b: '2dsphere'}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearFetchRequired) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << "2dsphere"), true);
- runQuery(fromjson("{a: {$gte: 0, $lt: 5}, b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:{$gte:0}}, node: "
- "{geoNear2dsphere: {a: 1, b: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleIndices) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << "2dsphere"), true);
- addIndex(BSON("c" << 1 << "b" << "2dsphere"), true);
- runQuery(fromjson("{a: {$gte: 0}, c: 3, b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{fetch: {filter: {c:3}, node: "
- "{geoNear2dsphere: {a: 1, b: '2dsphere'}}}}");
- assertSolutionExists("{fetch: {filter: {a:{$gte:0}}, node: "
- "{geoNear2dsphere: {c: 1, b: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleLeadingFields) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << "2dsphere"), true);
- runQuery(fromjson("{a: {$lt: 5, $gt: 1}, b: 6, c: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:{$gt:1}}, node: "
- "{geoNear2dsphere: {a: 1, b: 1, c: '2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleGeoPreds) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << "2dsphere"), true);
- runQuery(fromjson("{a: 1, b: 6, $and: ["
- "{c: {$near: {$geometry: {type: 'Point', coordinates: [2, 2]}}}},"
- "{c: {$geoWithin: {$box: [ [1, 1], [3, 3] ] } } } ] }"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a:1, b:1, c:'2dsphere'}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearCompoundTest) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << "2dsphere" << "c" << 1 << "d" << 1), true);
- runQuery(fromjson("{a: {$gte: 0}, c: {$gte: 0, $lt: 4}, d: {$gt: 1, $lt: 5},"
- "b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {d:{$gt:1},c:{$gte:0}}, node: "
- "{geoNear2dsphere: {a: 1, b: '2dsphere', c: 1, d: 1}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMultikey2DNear) {
- // true means multikey
- addIndex(BSON("a" << "2d" << "b" << 1), true);
- runQuery(fromjson("{a: {$near: [0, 0]}, b: {$gte: 0}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: { filter : {b:{$gte: 0}}, node: "
- "{geoNear2d: {a: '2d', b: 1} } } }");
- }
-
- // SERVER-9257
- TEST_F(QueryPlannerTest, CompoundGeoNoGeoPredicate) {
- addIndex(BSON("creationDate" << 1 << "foo.bar" << "2dsphere"));
- runQuerySortProj(fromjson("{creationDate: { $gt: 7}}"),
- fromjson("{creationDate: 1}"), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {creationDate: 1}, limit: 0, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
- }
-
- // SERVER-9257
- TEST_F(QueryPlannerTest, CompoundGeoNoGeoPredicateMultikey) {
- // true means multikey
- addIndex(BSON("creationDate" << 1 << "foo.bar" << "2dsphere"), true);
- runQuerySortProj(fromjson("{creationDate: { $gt: 7}}"),
- fromjson("{creationDate: 1}"), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {creationDate: 1}, limit: 0, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
- }
-
- // Test that a 2dsphere index can satisfy a whole index scan solution if the query has a GEO
- // predicate on at least one of the indexed geo fields.
- // Currently fails. Tracked by SERVER-10801.
- /*
- TEST_F(QueryPlannerTest, SortOnGeoQuery) {
- addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
- BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", coordinates: [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}");
- BSONObj sort = fromjson("{timestamp: -1}");
- runQuerySortProj(query, sort, BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {timestamp: -1}, limit: 0, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
- }
-
- TEST_F(QueryPlannerTest, SortOnGeoQueryMultikey) {
- // true means multikey
- addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"), true);
- BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", "
- "coordinates: [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}");
- BSONObj sort = fromjson("{timestamp: -1}");
- runQuerySortProj(query, sort, BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {timestamp: -1}, limit: 0, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: "
- "{timestamp: -1, position: '2dsphere'}}}}}");
- }
- */
-
-
- //
- // Sort
- //
-
- TEST_F(QueryPlannerTest, CantUseNonCompoundGeoIndexToProvideSort) {
- addIndex(BSON("x" << "2dsphere"));
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantUseNonCompoundGeoIndexToProvideSortWithIndexablePred) {
- addIndex(BSON("x" << "2dsphere"));
- runQuerySortProj(fromjson("{x: {$geoIntersects: {$geometry: {type: 'Point',"
- " coordinates: [0, 0]}}}}"),
- BSON("x" << 1),
- BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, node: "
- "{fetch: {node: "
- "{ixscan: {pattern: {x: '2dsphere'}}}}}}}");
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, node: "
- "{cscan: {dir: 1}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantUseCompoundGeoIndexToProvideSortIfNoGeoPred) {
- addIndex(BSON("x" << 1 << "y" << "2dsphere"));
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CanUseCompoundGeoIndexToProvideSortWithGeoPred) {
- addIndex(BSON("x" << 1 << "y" << "2dsphere"));
- runQuerySortProj(fromjson("{x: 1, y: {$geoIntersects: {$geometry: {type: 'Point',"
- " coordinates: [0, 0]}}}}"),
- BSON("x" << 1),
- BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{fetch: {node: "
- "{ixscan: {pattern: {x: 1, y: '2dsphere'}}}}}");
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, node: "
- "{cscan: {dir: 1}}}}");
- }
-
- //
- // Negation
- //
-
- //
- // 2D geo negation
- // The filter b != 1 is embedded in the geoNear2d node.
+using namespace mongo;
+
+TEST_F(QueryPlannerTest, Basic2DNonNear) {
+ // 2d can answer: within poly, within center, within centersphere, within box.
+ // And it can use an index (or not) for each of them. As such, 2 solns expected.
+ addIndex(BSON("a"
+ << "2d"));
+
+ // Polygon
+ runQuery(fromjson("{a : { $within: { $polygon : [[0,0], [2,0], [4,0]] } }}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Center
+ runQuery(fromjson("{a : { $within : { $center : [[ 5, 5 ], 7 ] } }}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Centersphere
+ runQuery(fromjson("{a : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // Within box.
+ runQuery(fromjson("{a : {$within: {$box : [[0,0],[9,9]]}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+
+ // TODO: test that we *don't* annotate for things we shouldn't.
+}
+
+TEST_F(QueryPlannerTest, Basic2DSphereCompound) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("loc"
+ << "2dsphere"));
+
+ runQuery(fromjson(
+ "{loc:{$near:{$geometry:{type:'Point',"
+ "coordinates : [-81.513743,28.369947] },"
+ " $maxDistance :100}},a: 'mouse'}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {loc: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, Basic2DCompound) {
+ addIndex(BSON("loc"
+ << "2d"
+ << "a" << 1));
+
+ runQuery(fromjson(
+ "{ loc: { $geoWithin: { $box : [[0, 0],[10, 10]] } },"
+ "a: 'mouse' }"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {loc : '2d', a: 1},"
+ "filter: {a: 'mouse'},"
+ "bounds: {loc: []," // Ignored since complex
+ " a: [['MinKey','MaxKey',true,true]]}"
+ "}}}}");
+}
+
+TEST_F(QueryPlannerTest, Multikey2DSphereCompound) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b" << 1), true);
+ addIndex(BSON("loc"
+ << "2dsphere"),
+ true);
+
+ runQuery(fromjson(
+ "{loc:{$near:{$geometry:{type:'Point',"
+ "coordinates : [-81.513743,28.369947] },"
+ " $maxDistance :100}},a: 'mouse'}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {loc: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, Basic2DSphereNonNear) {
+ // 2dsphere can do: within+geometry, intersects+geometry
+ addIndex(BSON("a"
+ << "2dsphere"));
+
+ runQuery(fromjson(
+ "{a: {$geoIntersects: {$geometry: {type: 'Point',"
+ "coordinates: [10.0, 10.0]}}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+
+ runQuery(fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+
+ // TODO: test that we *don't* annotate for things we shouldn't.
+}
+
+TEST_F(QueryPlannerTest, Multikey2DSphereNonNear) {
+ // 2dsphere can do: within+geometry, intersects+geometry
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+
+ runQuery(fromjson(
+ "{a: {$geoIntersects: {$geometry: {type: 'Point',"
+ "coordinates: [10.0, 10.0]}}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+
+ runQuery(fromjson("{a : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+
+ // TODO: test that we *don't* annotate for things we shouldn't.
+}
+
+TEST_F(QueryPlannerTest, Basic2DGeoNear) {
// Can only do near + old point.
- //
- TEST_F(QueryPlannerTest, Negation2DGeoNear) {
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{$and: [{a: {$near: [0, 0], $maxDistance: 0.3}}, {b: {$ne: 1}}]}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: { geoNear2d: {a: '2d'} } } }");
- }
-
- //
- // 2DSphere geo negation
- // Filter is embedded in a separate fetch node.
- //
- TEST_F(QueryPlannerTest, Negation2DSphereGeoNear) {
- // Can do nearSphere + old point, near + new point.
- addIndex(BSON("a" << "2dsphere"));
-
- runQuery(fromjson("{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
- "{b: {$ne: 1}}]}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
-
- runQuery(fromjson("{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
- "coordinates: [0, 0]},"
- "$maxDistance: 100}}},"
- "{b: {$ne: 1}}]}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
- }
-
- //
- // 2DSphere geo negation
- // Filter is embedded in a separate fetch node.
- //
- TEST_F(QueryPlannerTest, Negation2DSphereGeoNearMultikey) {
- // Can do nearSphere + old point, near + new point.
- // true means multikey
- addIndex(BSON("a" << "2dsphere"), true);
-
- runQuery(fromjson("{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
- "{b: {$ne: 1}}]}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
-
- runQuery(fromjson("{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
- "coordinates: [0, 0]},"
- "$maxDistance: 100}}},"
- "{b: {$ne: 1}}]}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
- }
-
- //
- // 2dsphere V2 sparse indices, SERVER-9639
- //
-
- // Basic usage of a sparse 2dsphere index. V1 ignores the sparse field. We can use any prefix
- // of the index as every document is indexed.
- TEST_F(QueryPlannerTest, TwoDSphereSparseV1) {
- // Create a V1 index.
- addIndex(BSON("nonGeo" << 1 << "geo" << "2dsphere"),
- BSON("2dsphereIndexVersion" << 1));
-
- // Can use the index for this.
- runQuery(fromjson("{nonGeo: 7}"));
- assertNumSolutions(2);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {nonGeo: 1, geo: '2dsphere'}}}}}");
- }
-
- // V2 is "geo sparse" and removes the nonGeo assignment.
- TEST_F(QueryPlannerTest, TwoDSphereSparseV2CantUse) {
- // Create a V2 index.
- addIndex(BSON("nonGeo" << 1 << "geo" << "2dsphere"),
- BSON("2dsphereIndexVersion" << 2));
-
- // Can't use the index prefix here as it's a V2 index and we have no geo pred.
- runQuery(fromjson("{nonGeo: 7}"));
- assertNumSolutions(1);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- TEST_F(QueryPlannerTest, TwoDSphereSparseOnePred) {
- // Create a V2 index.
- addIndex(BSON("geo" << "2dsphere"),
- BSON("2dsphereIndexVersion" << 2));
-
- // We can use the index here as we have a geo pred.
- runQuery(fromjson("{geo : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}}"));
- assertNumSolutions(2);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // V2 is geo-sparse and the planner removes the nonGeo assignment when there's no geo pred
- TEST_F(QueryPlannerTest, TwoDSphereSparseV2TwoPreds) {
- addIndex(BSON("nonGeo" << 1 << "geo" << "2dsphere" << "geo2" << "2dsphere"),
- BSON("2dsphereIndexVersion" << 2));
-
- // Non-geo preds can only use a collscan.
- runQuery(fromjson("{nonGeo: 7}"));
- assertNumSolutions(1);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // One geo pred so we can use the index.
- runQuery(fromjson("{nonGeo: 7, geo : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}}"));
- ASSERT_EQUALS(getNumSolutions(), 2U);
-
- // Two geo preds, so we can use the index still.
- runQuery(fromjson("{nonGeo: 7, geo : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] }},"
- " geo2 : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] }}}"));
- ASSERT_EQUALS(getNumSolutions(), 2U);
- }
-
- TEST_F(QueryPlannerTest, TwoDNearCompound) {
- addIndex(BSON("geo" << "2dsphere" << "nongeo" << 1),
- BSON("2dsphereIndexVersion" << 2));
- runQuery(fromjson("{geo: {$nearSphere: [-71.34895, 42.46037]}}"));
- ASSERT_EQUALS(getNumSolutions(), 1U);
- }
-
- TEST_F(QueryPlannerTest, TwoDSphereSparseV2BelowOr) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("geo1" << "2dsphere" << "a" << 1 << "b" << 1),
- BSON("2dsphereIndexVersion" << 2));
- addIndex(BSON("geo2" << "2dsphere" << "a" << 1 << "b" << 1),
- BSON("2dsphereIndexVersion" << 2));
-
- runQuery(fromjson("{a: 4, b: 5, $or: ["
- "{geo1: {$geoWithin: {$centerSphere: [[10, 20], 0.01]}}},"
- "{geo2: {$geoWithin: {$centerSphere: [[10, 20], 0.01]}}}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a: 4, b: 5}, node: {or: {nodes: ["
- "{fetch: {node: {ixscan: {pattern: {geo1:'2dsphere',a:1,b:1}}}}},"
- "{fetch: {node: {ixscan: {pattern: {geo2:'2dsphere',a:1,b:1}}}}}"
- "]}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoDSphereSparseV2BelowElemMatch) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a.b" << "2dsphere" << "a.c" << 1),
- BSON("2dsphereIndexVersion" << 2));
-
- runQuery(fromjson("{a: {$elemMatch: {b: {$geoWithin: {$centerSphere: [[10,20], 0.01]}},"
- "c: {$gt: 3}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': '2dsphere', 'a.c': 1}}}}}");
- }
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{geoNear2d: {a: '2d'}}");
+}
+
+TEST_F(QueryPlannerTest, Basic2DSphereGeoNear) {
+ // Can do nearSphere + old point, near + new point.
+ addIndex(BSON("a"
+ << "2dsphere"));
+
+ runQuery(fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
+
+ runQuery(fromjson(
+ "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
+}
+
+TEST_F(QueryPlannerTest, Multikey2DSphereGeoNear) {
+ // Can do nearSphere + old point, near + new point.
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+
+ runQuery(fromjson("{a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
+
+ runQuery(fromjson(
+ "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{geoNear2dsphere: {a: '2dsphere'}}");
+}
+
+TEST_F(QueryPlannerTest, Basic2DSphereGeoNearReverseCompound) {
+ addIndex(BSON("x" << 1));
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"));
+ runQuery(fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
+}
+
+TEST_F(QueryPlannerTest, Multikey2DSphereGeoNearReverseCompound) {
+ addIndex(BSON("x" << 1), true);
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{geoNear2dsphere: {x: 1, a: '2dsphere'}}");
+}
+
+TEST_F(QueryPlannerTest, NearNoIndex) {
+ addIndex(BSON("x" << 1));
+ runInvalidQuery(fromjson("{x:1, a: {$nearSphere: [0,0], $maxDistance: 0.31 }}"));
+}
+
+TEST_F(QueryPlannerTest, TwoDSphereNoGeoPred) {
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"));
+ runQuery(fromjson("{x:1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoDSphereNoGeoPredMultikey) {
+ addIndex(BSON("x" << 1 << "a"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson("{x:1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1, a: '2dsphere'}}}}}");
+}
+
+// SERVER-14723
+TEST_F(QueryPlannerTest, GeoNearMultipleRelevantIndicesButOnlyOneCompatible) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ addIndex(BSON("b" << 1 << "a"
+ << "2dsphere"));
+
+ runQuery(fromjson(
+ "{a: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0,0]}}},"
+ " b: {$exists: false}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: false}}, node: "
+ "{geoNear2dsphere: {a: '2dsphere'}}}}");
+}
+
+// SERVER-3984, $or 2d index
+TEST_F(QueryPlannerTest, Or2DNonNear) {
+ addIndex(BSON("a"
+ << "2d"));
+ addIndex(BSON("b"
+ << "2d"));
+ runQuery(fromjson(
+ "{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {b: '2d'}}}}}]}}");
+}
+
+// SERVER-3984, $or 2d index
+TEST_F(QueryPlannerTest, Or2DSameFieldNonNear) {
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson(
+ "{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+}
+
+// SERVER-3984, $or 2dsphere index
+TEST_F(QueryPlannerTest, Or2DSphereNonNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ addIndex(BSON("b"
+ << "2dsphere"));
+ runQuery(fromjson(
+ "{$or: [ {a: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [10.0, 10.0]}}}},"
+ " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: [{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
+}
+
+// SERVER-3984, $or 2dsphere index
+TEST_F(QueryPlannerTest, Or2DSphereNonNearMultikey) {
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+ addIndex(BSON("b"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{$or: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [10.0, 10.0]}}}},"
+ " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: "
+ "[{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {b: '2dsphere'}}}}}]}}");
+}
+
+TEST_F(QueryPlannerTest, And2DSameFieldNonNear) {
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson(
+ "{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ // Bounds of the two 2d geo predicates are combined into
+ // a single index scan.
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2d'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, And2DWith2DNearSameField) {
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson(
+ "{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {a : { $near : [ 5, 5 ] } } ]}"));
+
+ // GEO_NEAR must use the index, and GEO predicate becomes a filter.
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: { node : { geoNear2d: {a: '2d'} } } }");
+}
+
+TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ runQuery(fromjson(
+ "{$and: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ // Bounds of the two 2dsphere geo predicates are combined into
+ // a single index scan.
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNearMultikey) {
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{$and: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ // Bounds of the two 2dsphere geo predicates are combined into
+ // a single index scan.
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, And2DSphereWithNearSameField) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ runQuery(fromjson(
+ "{$and: [{a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ "{a: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
+
+ // GEO_NEAR must use the index, and GEO predicate becomes a filter.
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, And2DSphereWithNearSameFieldMultikey) {
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{$and: [{a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ "{a: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
+
+ // GEO_NEAR must use the index, and GEO predicate becomes a filter.
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, Or2DSphereSameFieldNonNear) {
+ addIndex(BSON("a"
+ << "2dsphere"));
+ runQuery(fromjson(
+ "{$or: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, Or2DSphereSameFieldNonNearMultikey) {
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{$or: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNear) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{a: {$gte: 0}, b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{geoNear2dsphere: {a: 1, b: '2dsphere'}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearFetchRequired) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{a: {$gte: 0, $lt: 5}, b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gte:0}}, node: "
+ "{geoNear2dsphere: {a: 1, b: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleIndices) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b"
+ << "2dsphere"),
+ true);
+ addIndex(BSON("c" << 1 << "b"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{a: {$gte: 0}, c: 3, b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{fetch: {filter: {c:3}, node: "
+ "{geoNear2dsphere: {a: 1, b: '2dsphere'}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gte:0}}, node: "
+ "{geoNear2dsphere: {c: 1, b: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleLeadingFields) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b" << 1 << "c"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{a: {$lt: 5, $gt: 1}, b: 6, c: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gt:1}}, node: "
+ "{geoNear2dsphere: {a: 1, b: 1, c: '2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleGeoPreds) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b" << 1 << "c"
+ << "2dsphere"),
+ true);
+ runQuery(fromjson(
+ "{a: 1, b: 6, $and: ["
+ "{c: {$near: {$geometry: {type: 'Point', coordinates: [2, 2]}}}},"
+ "{c: {$geoWithin: {$box: [ [1, 1], [3, 3] ] } } } ] }"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a:1, b:1, c:'2dsphere'}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearCompoundTest) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b"
+ << "2dsphere"
+ << "c" << 1 << "d" << 1),
+ true);
+ runQuery(fromjson(
+ "{a: {$gte: 0}, c: {$gte: 0, $lt: 4}, d: {$gt: 1, $lt: 5},"
+ "b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {d:{$gt:1},c:{$gte:0}}, node: "
+ "{geoNear2dsphere: {a: 1, b: '2dsphere', c: 1, d: 1}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMultikey2DNear) {
+ // true means multikey
+ addIndex(BSON("a"
+ << "2d"
+ << "b" << 1),
+ true);
+ runQuery(fromjson("{a: {$near: [0, 0]}, b: {$gte: 0}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: { filter : {b:{$gte: 0}}, node: "
+ "{geoNear2d: {a: '2d', b: 1} } } }");
+}
+
+// SERVER-9257
+TEST_F(QueryPlannerTest, CompoundGeoNoGeoPredicate) {
+ addIndex(BSON("creationDate" << 1 << "foo.bar"
+ << "2dsphere"));
+ runQuerySortProj(
+ fromjson("{creationDate: { $gt: 7}}"), fromjson("{creationDate: 1}"), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {creationDate: 1}, limit: 0, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
+}
+
+// SERVER-9257
+TEST_F(QueryPlannerTest, CompoundGeoNoGeoPredicateMultikey) {
+ // true means multikey
+ addIndex(BSON("creationDate" << 1 << "foo.bar"
+ << "2dsphere"),
+ true);
+ runQuerySortProj(
+ fromjson("{creationDate: { $gt: 7}}"), fromjson("{creationDate: 1}"), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {creationDate: 1}, limit: 0, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {creationDate: 1, 'foo.bar': '2dsphere'}}}}}");
+}
+
+// Test that a 2dsphere index can satisfy a whole index scan solution if the query has a GEO
+// predicate on at least one of the indexed geo fields.
+// Currently fails. Tracked by SERVER-10801.
+/*
+TEST_F(QueryPlannerTest, SortOnGeoQuery) {
+ addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
+ BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", coordinates: [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}");
+ BSONObj sort = fromjson("{timestamp: -1}");
+ runQuerySortProj(query, sort, BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{sort: {pattern: {timestamp: -1}, limit: 0, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SortOnGeoQueryMultikey) {
+ // true means multikey
+ addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"), true);
+ BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", "
+ "coordinates: [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}");
+ BSONObj sort = fromjson("{timestamp: -1}");
+ runQuerySortProj(query, sort, BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{sort: {pattern: {timestamp: -1}, limit: 0, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: "
+ "{timestamp: -1, position: '2dsphere'}}}}}");
+}
+*/
+
+
+//
+// Sort
+//
+
+TEST_F(QueryPlannerTest, CantUseNonCompoundGeoIndexToProvideSort) {
+ addIndex(BSON("x"
+ << "2dsphere"));
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantUseNonCompoundGeoIndexToProvideSortWithIndexablePred) {
+ addIndex(BSON("x"
+ << "2dsphere"));
+ runQuerySortProj(fromjson(
+ "{x: {$geoIntersects: {$geometry: {type: 'Point',"
+ " coordinates: [0, 0]}}}}"),
+ BSON("x" << 1),
+ BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, node: "
+ "{fetch: {node: "
+ "{ixscan: {pattern: {x: '2dsphere'}}}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, node: "
+ "{cscan: {dir: 1}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantUseCompoundGeoIndexToProvideSortIfNoGeoPred) {
+ addIndex(BSON("x" << 1 << "y"
+ << "2dsphere"));
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CanUseCompoundGeoIndexToProvideSortWithGeoPred) {
+ addIndex(BSON("x" << 1 << "y"
+ << "2dsphere"));
+ runQuerySortProj(fromjson(
+ "{x: 1, y: {$geoIntersects: {$geometry: {type: 'Point',"
+ " coordinates: [0, 0]}}}}"),
+ BSON("x" << 1),
+ BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{fetch: {node: "
+ "{ixscan: {pattern: {x: 1, y: '2dsphere'}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, node: "
+ "{cscan: {dir: 1}}}}");
+}
+
+//
+// Negation
+//
+
+//
+// 2D geo negation
+// The filter b != 1 is embedded in the geoNear2d node.
+// Can only do near + old point.
+//
+TEST_F(QueryPlannerTest, Negation2DGeoNear) {
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson("{$and: [{a: {$near: [0, 0], $maxDistance: 0.3}}, {b: {$ne: 1}}]}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: { geoNear2d: {a: '2d'} } } }");
+}
+
+//
+// 2DSphere geo negation
+// Filter is embedded in a separate fetch node.
+//
+TEST_F(QueryPlannerTest, Negation2DSphereGeoNear) {
+ // Can do nearSphere + old point, near + new point.
+ addIndex(BSON("a"
+ << "2dsphere"));
+
+ runQuery(fromjson(
+ "{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
+ "{b: {$ne: 1}}]}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
+
+ runQuery(fromjson(
+ "{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
+ "coordinates: [0, 0]},"
+ "$maxDistance: 100}}},"
+ "{b: {$ne: 1}}]}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
+}
+
+//
+// 2DSphere geo negation
+// Filter is embedded in a separate fetch node.
+//
+TEST_F(QueryPlannerTest, Negation2DSphereGeoNearMultikey) {
+ // Can do nearSphere + old point, near + new point.
+ // true means multikey
+ addIndex(BSON("a"
+ << "2dsphere"),
+ true);
+
+ runQuery(fromjson(
+ "{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
+ "{b: {$ne: 1}}]}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
+
+ runQuery(fromjson(
+ "{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
+ "coordinates: [0, 0]},"
+ "$maxDistance: 100}}},"
+ "{b: {$ne: 1}}]}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {geoNear2dsphere: {a: '2dsphere'}}}}");
+}
+
+//
+// 2dsphere V2 sparse indices, SERVER-9639
+//
+
+// Basic usage of a sparse 2dsphere index. V1 ignores the sparse field. We can use any prefix
+// of the index as every document is indexed.
+TEST_F(QueryPlannerTest, TwoDSphereSparseV1) {
+ // Create a V1 index.
+ addIndex(BSON("nonGeo" << 1 << "geo"
+ << "2dsphere"),
+ BSON("2dsphereIndexVersion" << 1));
+
+ // Can use the index for this.
+ runQuery(fromjson("{nonGeo: 7}"));
+ assertNumSolutions(2);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {nonGeo: 1, geo: '2dsphere'}}}}}");
+}
+
+// V2 is "geo sparse" and removes the nonGeo assignment.
+TEST_F(QueryPlannerTest, TwoDSphereSparseV2CantUse) {
+ // Create a V2 index.
+ addIndex(BSON("nonGeo" << 1 << "geo"
+ << "2dsphere"),
+ BSON("2dsphereIndexVersion" << 2));
+
+ // Can't use the index prefix here as it's a V2 index and we have no geo pred.
+ runQuery(fromjson("{nonGeo: 7}"));
+ assertNumSolutions(1);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, TwoDSphereSparseOnePred) {
+ // Create a V2 index.
+ addIndex(BSON("geo"
+ << "2dsphere"),
+ BSON("2dsphereIndexVersion" << 2));
+
+ // We can use the index here as we have a geo pred.
+ runQuery(fromjson("{geo : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}}"));
+ assertNumSolutions(2);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// V2 is geo-sparse and the planner removes the nonGeo assignment when there's no geo pred
+TEST_F(QueryPlannerTest, TwoDSphereSparseV2TwoPreds) {
+ addIndex(BSON("nonGeo" << 1 << "geo"
+ << "2dsphere"
+ << "geo2"
+ << "2dsphere"),
+ BSON("2dsphereIndexVersion" << 2));
+
+ // Non-geo preds can only use a collscan.
+ runQuery(fromjson("{nonGeo: 7}"));
+ assertNumSolutions(1);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // One geo pred so we can use the index.
+ runQuery(
+ fromjson("{nonGeo: 7, geo : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] } }}}"));
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+
+ // Two geo preds, so we can use the index still.
+ runQuery(fromjson(
+ "{nonGeo: 7, geo : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] }},"
+ " geo2 : { $geoWithin : { $centerSphere : [[ 10, 20 ], 0.01 ] }}}"));
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+}
+
+TEST_F(QueryPlannerTest, TwoDNearCompound) {
+ addIndex(BSON("geo"
+ << "2dsphere"
+ << "nongeo" << 1),
+ BSON("2dsphereIndexVersion" << 2));
+ runQuery(fromjson("{geo: {$nearSphere: [-71.34895, 42.46037]}}"));
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+}
+
+TEST_F(QueryPlannerTest, TwoDSphereSparseV2BelowOr) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("geo1"
+ << "2dsphere"
+ << "a" << 1 << "b" << 1),
+ BSON("2dsphereIndexVersion" << 2));
+ addIndex(BSON("geo2"
+ << "2dsphere"
+ << "a" << 1 << "b" << 1),
+ BSON("2dsphereIndexVersion" << 2));
+
+ runQuery(fromjson(
+ "{a: 4, b: 5, $or: ["
+ "{geo1: {$geoWithin: {$centerSphere: [[10, 20], 0.01]}}},"
+ "{geo2: {$geoWithin: {$centerSphere: [[10, 20], 0.01]}}}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a: 4, b: 5}, node: {or: {nodes: ["
+ "{fetch: {node: {ixscan: {pattern: {geo1:'2dsphere',a:1,b:1}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {geo2:'2dsphere',a:1,b:1}}}}}"
+ "]}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoDSphereSparseV2BelowElemMatch) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a.b"
+ << "2dsphere"
+ << "a.c" << 1),
+ BSON("2dsphereIndexVersion" << 2));
+
+ runQuery(fromjson(
+ "{a: {$elemMatch: {b: {$geoWithin: {$centerSphere: [[10,20], 0.01]}},"
+ "c: {$gt: 3}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {'a.b': '2dsphere', 'a.c': 1}}}}}");
+}
} // namespace
diff --git a/src/mongo/db/query/query_planner_params.h b/src/mongo/db/query/query_planner_params.h
index aef16655184..9e0b1a32fb3 100644
--- a/src/mongo/db/query/query_planner_params.h
+++ b/src/mongo/db/query/query_planner_params.h
@@ -36,76 +36,76 @@
namespace mongo {
- struct QueryPlannerParams {
-
- QueryPlannerParams() : options(DEFAULT),
- indexFiltersApplied(false),
- maxIndexedSolutions(internalQueryPlannerMaxIndexedSolutions) { }
-
- enum Options {
- // You probably want to set this.
- DEFAULT = 0,
-
- // Set this if you don't want a table scan.
- // See http://docs.mongodb.org/manual/reference/parameters/
- NO_TABLE_SCAN = 1,
-
- // Set this if you *always* want a collscan outputted, even if there's an ixscan. This
- // makes ranking less accurate, especially in the presence of blocking stages.
- INCLUDE_COLLSCAN = 1 << 1,
-
- // Set this if you're running on a sharded cluster. We'll add a "drop all docs that
- // shouldn't be on this shard" stage before projection.
- //
- // In order to set this, you must check
- // shardingState.needCollectionMetadata(current_namespace) in the same lock that you use
- // to build the query executor. You must also wrap the PlanExecutor in a ClientCursor
- // within the same lock. See the comment on ShardFilterStage for details.
- INCLUDE_SHARD_FILTER = 1 << 2,
-
- // Set this if you don't want any plans with a blocking sort stage. All sorts must be
- // provided by an index.
- NO_BLOCKING_SORT = 1 << 3,
-
- // Set this if you want to turn on index intersection.
- INDEX_INTERSECTION = 1 << 4,
-
- // Set this if you want to try to keep documents deleted or mutated during the execution
- // of the query in the query results.
- KEEP_MUTATIONS = 1 << 5,
-
- // Nobody should set this above the getExecutor interface. Internal flag set as a hint
- // to the planner that the caller is actually the count command.
- PRIVATE_IS_COUNT = 1 << 6,
-
- // Set this if you want to handle batchSize properly with sort(). If limits on SORT
- // stages are always actually limits, then this should be left off. If they are
- // sometimes to be interpreted as batchSize, then this should be turned on.
- SPLIT_LIMITED_SORT = 1 << 7,
-
- // Set this to prevent the planner from generating plans which answer a predicate
- // implicitly via exact index bounds for index intersection solutions.
- CANNOT_TRIM_IXISECT = 1 << 8,
- };
-
- // See Options enum above.
- size_t options;
-
- // What indices are available for planning?
- std::vector<IndexEntry> indices;
-
- // What's our shard key? If INCLUDE_SHARD_FILTER is set we will create a shard filtering
- // stage. If we know the shard key, we can perform covering analysis instead of always
- // forcing a fetch.
- BSONObj shardKey;
-
- // Were index filters applied to indices?
- bool indexFiltersApplied;
-
- // What's the max number of indexed solutions we want to output? It's expensive to compare
- // plans via the MultiPlanStage, and the set of possible plans is very large for certain
- // index+query combinations.
- size_t maxIndexedSolutions;
+struct QueryPlannerParams {
+ QueryPlannerParams()
+ : options(DEFAULT),
+ indexFiltersApplied(false),
+ maxIndexedSolutions(internalQueryPlannerMaxIndexedSolutions) {}
+
+ enum Options {
+ // You probably want to set this.
+ DEFAULT = 0,
+
+ // Set this if you don't want a table scan.
+ // See http://docs.mongodb.org/manual/reference/parameters/
+ NO_TABLE_SCAN = 1,
+
+ // Set this if you *always* want a collscan outputted, even if there's an ixscan. This
+ // makes ranking less accurate, especially in the presence of blocking stages.
+ INCLUDE_COLLSCAN = 1 << 1,
+
+ // Set this if you're running on a sharded cluster. We'll add a "drop all docs that
+ // shouldn't be on this shard" stage before projection.
+ //
+ // In order to set this, you must check
+ // shardingState.needCollectionMetadata(current_namespace) in the same lock that you use
+ // to build the query executor. You must also wrap the PlanExecutor in a ClientCursor
+ // within the same lock. See the comment on ShardFilterStage for details.
+ INCLUDE_SHARD_FILTER = 1 << 2,
+
+ // Set this if you don't want any plans with a blocking sort stage. All sorts must be
+ // provided by an index.
+ NO_BLOCKING_SORT = 1 << 3,
+
+ // Set this if you want to turn on index intersection.
+ INDEX_INTERSECTION = 1 << 4,
+
+ // Set this if you want to try to keep documents deleted or mutated during the execution
+ // of the query in the query results.
+ KEEP_MUTATIONS = 1 << 5,
+
+ // Nobody should set this above the getExecutor interface. Internal flag set as a hint
+ // to the planner that the caller is actually the count command.
+ PRIVATE_IS_COUNT = 1 << 6,
+
+ // Set this if you want to handle batchSize properly with sort(). If limits on SORT
+ // stages are always actually limits, then this should be left off. If they are
+ // sometimes to be interpreted as batchSize, then this should be turned on.
+ SPLIT_LIMITED_SORT = 1 << 7,
+
+ // Set this to prevent the planner from generating plans which answer a predicate
+ // implicitly via exact index bounds for index intersection solutions.
+ CANNOT_TRIM_IXISECT = 1 << 8,
};
+ // See Options enum above.
+ size_t options;
+
+ // What indices are available for planning?
+ std::vector<IndexEntry> indices;
+
+ // What's our shard key? If INCLUDE_SHARD_FILTER is set we will create a shard filtering
+ // stage. If we know the shard key, we can perform covering analysis instead of always
+ // forcing a fetch.
+ BSONObj shardKey;
+
+ // Were index filters applied to indices?
+ bool indexFiltersApplied;
+
+ // What's the max number of indexed solutions we want to output? It's expensive to compare
+ // plans via the MultiPlanStage, and the set of possible plans is very large for certain
+ // index+query combinations.
+ size_t maxIndexedSolutions;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index e1abb290262..2c0ca9167a5 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -39,3443 +39,3778 @@
namespace {
- using namespace mongo;
+using namespace mongo;
- //
- // Equality
- //
+//
+// Equality
+//
- TEST_F(QueryPlannerTest, EqualityIndexScan) {
- addIndex(BSON("x" << 1));
+TEST_F(QueryPlannerTest, EqualityIndexScan) {
+ addIndex(BSON("x" << 1));
- runQuery(BSON("x" << 5));
+ runQuery(BSON("x" << 5));
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, EqualityIndexScanWithTrailingFields) {
- addIndex(BSON("x" << 1 << "y" << 1));
-
- runQuery(BSON("x" << 5));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1, y: 1}}}}}");
- }
-
- //
- // indexFilterApplied
- // Check that index filter flag is passed from planner params
- // to generated query solution.
- //
-
- TEST_F(QueryPlannerTest, IndexFilterAppliedDefault) {
- addIndex(BSON("x" << 1));
-
- runQuery(BSON("x" << 5));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
-
- // Check indexFilterApplied in query solutions;
- for (std::vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- QuerySolution* soln = *it;
- ASSERT_FALSE(soln->indexFilterApplied);
- }
- }
-
- TEST_F(QueryPlannerTest, IndexFilterAppliedTrue) {
- params.indexFiltersApplied = true;
-
- addIndex(BSON("x" << 1));
-
- runQuery(BSON("x" << 5));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
-
- // Check indexFilterApplied in query solutions;
- for (std::vector<QuerySolution*>::const_iterator it = solns.begin();
- it != solns.end();
- ++it) {
- QuerySolution* soln = *it;
- ASSERT_EQUALS(params.indexFiltersApplied, soln->indexFilterApplied);
- }
- }
-
- //
- // <
- //
-
- TEST_F(QueryPlannerTest, LessThan) {
- addIndex(BSON("x" << 1));
-
- runQuery(BSON("x" << BSON("$lt" << 5)));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: {$lt: 5}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- //
- // <=
- //
-
- TEST_F(QueryPlannerTest, LessThanEqual) {
- addIndex(BSON("x" << 1));
-
- runQuery(BSON("x" << BSON("$lte" << 5)));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: {$lte: 5}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- //
- // >
- //
-
- TEST_F(QueryPlannerTest, GreaterThan) {
- addIndex(BSON("x" << 1));
-
- runQuery(BSON("x" << BSON("$gt" << 5)));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: {$gt: 5}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- //
- // >=
- //
-
- TEST_F(QueryPlannerTest, GreaterThanEqual) {
- addIndex(BSON("x" << 1));
-
- runQuery(BSON("x" << BSON("$gte" << 5)));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: {$gte: 5}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- //
- // Mod
- //
-
- TEST_F(QueryPlannerTest, Mod) {
- addIndex(BSON("a" << 1));
-
- runQuery(fromjson("{a: {$mod: [2, 0]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: {$mod: [2, 0]}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: {a: {$mod: [2, 0]}}, pattern: {a: 1}}}}}");
- }
-
- //
- // Exists
- //
-
- TEST_F(QueryPlannerTest, ExistsTrue) {
- addIndex(BSON("x" << 1));
-
- runQuery(fromjson("{x: {$exists: true}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsFalse) {
- addIndex(BSON("x" << 1));
-
- runQuery(fromjson("{x: {$exists: false}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsTrueSparseIndex) {
- addIndex(BSON("x" << 1), false, true);
-
- runQuery(fromjson("{x: {$exists: true}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsFalseSparseIndex) {
- addIndex(BSON("x" << 1), false, true);
-
- runQuery(fromjson("{x: {$exists: false}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsTrueOnUnindexedField) {
- addIndex(BSON("x" << 1));
-
- runQuery(fromjson("{x: 1, y: {$exists: true}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsFalseOnUnindexedField) {
- addIndex(BSON("x" << 1));
-
- runQuery(fromjson("{x: 1, y: {$exists: false}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsTrueSparseIndexOnOtherField) {
- addIndex(BSON("x" << 1), false, true);
-
- runQuery(fromjson("{x: 1, y: {$exists: true}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsFalseSparseIndexOnOtherField) {
- addIndex(BSON("x" << 1), false, true);
-
- runQuery(fromjson("{x: 1, y: {$exists: false}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsBounds) {
- addIndex(BSON("b" << 1));
-
- runQuery(fromjson("{b: {$exists: true}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b: {$exists: true}}, node: "
- "{ixscan: {pattern: {b: 1}, bounds: "
- "{b: [['MinKey', 'MaxKey', true, true]]}}}}}");
-
- // This ends up being a double negation, which we currently don't index.
- runQuery(fromjson("{b: {$not: {$exists: false}}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- runQuery(fromjson("{b: {$exists: false}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
- "{ixscan: {pattern: {b: 1}, bounds: "
- "{b: [[null, null, true, true]]}}}}}");
-
- runQuery(fromjson("{b: {$not: {$exists: true}}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
- "{ixscan: {pattern: {b: 1}, bounds: "
- "{b: [[null, null, true, true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ExistsBoundsCompound) {
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuery(fromjson("{a: 1, b: {$exists: true}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b: {$exists: true}}, node: "
- "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
- "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]]}}}}}");
-
- // This ends up being a double negation, which we currently don't index.
- runQuery(fromjson("{a: 1, b: {$not: {$exists: false}}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
- "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]]}}}}}");
-
- runQuery(fromjson("{a: 1, b: {$exists: false}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
- "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
- "{a: [[1,1,true,true]], b: [[null,null,true,true]]}}}}}");
-
- runQuery(fromjson("{a: 1, b: {$not: {$exists: true}}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b: {$exists: false}}, node: "
- "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
- "{a: [[1,1,true,true]], b: [[null,null,true,true]]}}}}}");
- }
-
- //
- // skip and limit
- //
-
- TEST_F(QueryPlannerTest, BasicSkipNoIndex) {
- addIndex(BSON("a" << 1));
-
- runQuerySkipLimit(BSON("x" << 5), 3, 0);
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{skip: {n: 3, node: {cscan: {dir: 1, filter: {x: 5}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BasicSkipWithIndex) {
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuerySkipLimit(BSON("a" << 5), 8, 0);
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{skip: {n: 8, node: {cscan: {dir: 1, filter: {a: 5}}}}}");
- assertSolutionExists("{skip: {n: 8, node: {fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BasicLimitNoIndex) {
- addIndex(BSON("a" << 1));
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
+ assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
+}
- runQuerySkipLimit(BSON("x" << 5), 0, -3);
+TEST_F(QueryPlannerTest, EqualityIndexScanWithTrailingFields) {
+ addIndex(BSON("x" << 1 << "y" << 1));
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{limit: {n: 3, node: {cscan: {dir: 1, filter: {x: 5}}}}}");
- }
+ runQuery(BSON("x" << 5));
- TEST_F(QueryPlannerTest, BasicSoftLimitNoIndex) {
- addIndex(BSON("a" << 1));
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
+ assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1, y: 1}}}}}");
+}
- runQuerySkipLimit(BSON("x" << 5), 0, 3);
+//
+// indexFilterApplied
+// Check that index filter flag is passed from planner params
+// to generated query solution.
+//
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
- }
+TEST_F(QueryPlannerTest, IndexFilterAppliedDefault) {
+ addIndex(BSON("x" << 1));
- TEST_F(QueryPlannerTest, BasicLimitWithIndex) {
- addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(BSON("x" << 5));
- runQuerySkipLimit(BSON("a" << 5), 0, -5);
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
+ assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{limit: {n: 5, node: {cscan: {dir: 1, filter: {a: 5}}}}}");
- assertSolutionExists("{limit: {n: 5, node: {fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}}}");
+ // Check indexFilterApplied in query solutions;
+ for (std::vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ QuerySolution* soln = *it;
+ ASSERT_FALSE(soln->indexFilterApplied);
}
+}
- TEST_F(QueryPlannerTest, BasicSoftLimitWithIndex) {
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuerySkipLimit(BSON("a" << 5), 0, 5);
+TEST_F(QueryPlannerTest, IndexFilterAppliedTrue) {
+ params.indexFiltersApplied = true;
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: 5}}}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}");
- }
+ addIndex(BSON("x" << 1));
- TEST_F(QueryPlannerTest, SkipAndLimit) {
- addIndex(BSON("x" << 1));
+ runQuery(BSON("x" << 5));
- runQuerySkipLimit(BSON("x" << BSON("$lte" << 4)), 7, -2);
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
+ assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{limit: {n: 2, node: {skip: {n: 7, node: "
- "{cscan: {dir: 1, filter: {x: {$lte: 4}}}}}}}}");
- assertSolutionExists("{limit: {n: 2, node: {skip: {n: 7, node: {fetch: "
- "{filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}}}}}");
+ // Check indexFilterApplied in query solutions;
+ for (std::vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
+ QuerySolution* soln = *it;
+ ASSERT_EQUALS(params.indexFiltersApplied, soln->indexFilterApplied);
}
+}
- TEST_F(QueryPlannerTest, SkipAndSoftLimit) {
- addIndex(BSON("x" << 1));
+//
+// <
+//
- runQuerySkipLimit(BSON("x" << BSON("$lte" << 4)), 7, 2);
+TEST_F(QueryPlannerTest, LessThan) {
+ addIndex(BSON("x" << 1));
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{skip: {n: 7, node: "
- "{cscan: {dir: 1, filter: {x: {$lte: 4}}}}}}");
- assertSolutionExists("{skip: {n: 7, node: {fetch: "
- "{filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}}}");
- }
+ runQuery(BSON("x" << BSON("$lt" << 5)));
- //
- // tree operations
- //
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: {$lt: 5}}}}");
+ assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
+}
- TEST_F(QueryPlannerTest, TwoPredicatesAnding) {
- addIndex(BSON("x" << 1));
+//
+// <=
+//
- runQuery(fromjson("{$and: [ {x: {$gt: 1}}, {x: {$lt: 3}} ] }"));
+TEST_F(QueryPlannerTest, LessThanEqual) {
+ addIndex(BSON("x" << 1));
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
+ runQuery(BSON("x" << BSON("$lte" << 5)));
- TEST_F(QueryPlannerTest, SimpleOr) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a: 20}, {a: 21}]}"));
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: {$lte: 5}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a: 20}, {a: 21}]}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a:1}}}}}");
- }
+//
+// >
+//
- TEST_F(QueryPlannerTest, OrWithoutEnoughIndices) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a: 20}, {b: 21}]}"));
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a: 20}, {b: 21}]}}}");
- }
+TEST_F(QueryPlannerTest, GreaterThan) {
+ addIndex(BSON("x" << 1));
- TEST_F(QueryPlannerTest, OrWithAndChild) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}"));
+ runQuery(BSON("x" << BSON("$gt" << 5)));
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1}}}, "
- "{fetch: {filter: {b: 7}, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}]}}}}");
- }
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: {$gt: 5}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
- TEST_F(QueryPlannerTest, AndWithUnindexedOrChild) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a:20, $or: [{b:1}, {c:7}]}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // Logical rewrite means we could get one of these two outcomes:
- size_t matches = 0;
- matches += numSolutionMatches("{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- matches += numSolutionMatches("{or: {filter: null, nodes: ["
- "{fetch: {filter: {b:1}, node: {"
- "ixscan: {filter: null, pattern: {a:1}}}}},"
- "{fetch: {filter: {c:7}, node: {"
- "ixscan: {filter: null, pattern: {a:1}}}}}]}}");
- ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
- }
+//
+// >=
+//
+TEST_F(QueryPlannerTest, GreaterThanEqual) {
+ addIndex(BSON("x" << 1));
- TEST_F(QueryPlannerTest, AndWithOrWithOneIndex) {
- addIndex(BSON("b" << 1));
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{b:1}, {c:7}], a:20}"));
-
- // Logical rewrite gives us at least one of these:
- assertSolutionExists("{cscan: {dir: 1}}");
- size_t matches = 0;
- matches += numSolutionMatches("{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- matches += numSolutionMatches("{or: {filter: null, nodes: ["
- "{fetch: {filter: {b:1}, node: {"
- "ixscan: {filter: null, pattern: {a:1}}}}},"
- "{fetch: {filter: {c:7}, node: {"
- "ixscan: {filter: null, pattern: {a:1}}}}}]}}");
- ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
- }
+ runQuery(BSON("x" << BSON("$gte" << 5)));
- //
- // Additional $or tests
- //
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: {$gte: 5}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
- TEST_F(QueryPlannerTest, OrCollapsesToSingleScan) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a:{$gt:2}}, {a:{$gt:0}}]}"));
+//
+// Mod
+//
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [[0,Infinity,false,true]]}}}}}");
- }
+TEST_F(QueryPlannerTest, Mod) {
+ addIndex(BSON("a" << 1));
- TEST_F(QueryPlannerTest, OrCollapsesToSingleScan2) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a:{$lt:2}}, {a:{$lt:4}}]}"));
+ runQuery(fromjson("{a: {$mod: [2, 0]}}"));
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [[-Infinity,4,true,false]]}}}}}");
- }
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: {$mod: [2, 0]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: {a: {$mod: [2, 0]}}, pattern: {a: 1}}}}}");
+}
- TEST_F(QueryPlannerTest, OrCollapsesToSingleScan3) {
- addIndex(BSON("a" << 1));
- runQueryHint(fromjson("{$or: [{a:1},{a:3}]}"), fromjson("{a:1}"));
+//
+// Exists
+//
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [[1,1,true,true], [3,3,true,true]]}}}}}");
- }
+TEST_F(QueryPlannerTest, ExistsTrue) {
+ addIndex(BSON("x" << 1));
- TEST_F(QueryPlannerTest, OrOnlyOneBranchCanUseIndex) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a:1}, {b:2}]}"));
+ runQuery(fromjson("{x: {$exists: true}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
- TEST_F(QueryPlannerTest, OrOnlyOneBranchCanUseIndexHinted) {
- addIndex(BSON("a" << 1));
- runQueryHint(fromjson("{$or: [{a:1}, {b:2}]}"), fromjson("{a:1}"));
+TEST_F(QueryPlannerTest, ExistsFalse) {
+ addIndex(BSON("x" << 1));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {$or:[{a:1},{b:2}]}, node: {ixscan: "
- "{pattern: {a:1}, bounds: "
- "{a: [['MinKey','MaxKey',true,true]]}}}}}");
- }
+ runQuery(fromjson("{x: {$exists: false}}"));
- TEST_F(QueryPlannerTest, OrNaturalHint) {
- addIndex(BSON("a" << 1));
- runQueryHint(fromjson("{$or: [{a:1}, {a:3}]}"), fromjson("{$natural:1}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
+TEST_F(QueryPlannerTest, ExistsTrueSparseIndex) {
+ addIndex(BSON("x" << 1), false, true);
- // SERVER-13714. A non-top-level indexable negation exposed a bug in plan enumeration.
- TEST_F(QueryPlannerTest, NonTopLevelIndexedNegation) {
- addIndex(BSON("state" << 1));
- addIndex(BSON("is_draft" << 1));
- addIndex(BSON("published_date" << 1));
- addIndex(BSON("newsroom_id" << 1));
-
- BSONObj queryObj = fromjson("{$and:[{$or:[{is_draft:false},{creator_id:1}]},"
- "{$or:[{state:3,is_draft:false},"
- "{published_date:{$ne:null}}]},"
- "{newsroom_id:{$in:[1]}}]}");
- runQuery(queryObj);
- }
-
- TEST_F(QueryPlannerTest, NonTopLevelIndexedNegationMinQuery) {
- addIndex(BSON("state" << 1));
- addIndex(BSON("is_draft" << 1));
- addIndex(BSON("published_date" << 1));
-
- // This is the min query to reproduce SERVER-13714
- BSONObj queryObj = fromjson("{$or:[{state:1, is_draft:1}, {published_date:{$ne: 1}}]}");
- runQuery(queryObj);
- }
-
- // SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
- TEST_F(QueryPlannerTest, OrOfAnd) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a:{$gt:2,$lt:10}}, {a:{$gt:0,$lt:5}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {pattern: {a:1}, bounds: {a: [[2,10,false,false]]}}}, "
- "{ixscan: {pattern: {a:1}, bounds: "
- "{a: [[0,5,false,false]]}}}]}}}}");
- }
-
- // SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
- TEST_F(QueryPlannerTest, OrOfAnd2) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a:{$gt:2,$lt:10}}, {a:{$gt:0,$lt:15}}, {a:{$gt:20}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {pattern: {a:1}, bounds: {a: [[2,10,false,false]]}}}, "
- "{ixscan: {pattern: {a:1}, bounds: {a: [[0,15,false,false]]}}}, "
- "{ixscan: {pattern: {a:1}, bounds: "
- "{a: [[20,Infinity,false,true]]}}}]}}}}");
- }
-
- // SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
- TEST_F(QueryPlannerTest, OrOfAnd3) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a:{$gt:1,$lt:5},b:6}, {a:3,b:{$gt:0,$lt:10}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {b:6}, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [[1,5,false,false]]}}}}}, "
- "{fetch: {filter: {$and:[{b:{$lt:10}},{b:{$gt:0}}]}, node: "
- "{ixscan: {pattern: {a:1}, bounds: {a:[[3,3,true,true]]}}}}}]}}");
- }
-
- // SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
- TEST_F(QueryPlannerTest, OrOfAnd4) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{$or: [{a:{$gt:1,$lt:5}, b:{$gt:0,$lt:3}, c:6}, "
- "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {c:6}, node: {ixscan: {pattern: {a:1,b:1}, "
- "bounds: {a: [[1,5,false,false]], b: [[0,3,false,false]]}}}}}, "
- "{fetch: {filter: {$and:[{c:{$lt:10}},{c:{$gt:0}}]}, node: "
- "{ixscan: {pattern: {a:1,b:1}, "
- " bounds: {a:[[3,3,true,true]], b:[[1,2,false,false]]}}}}}]}}");
- }
-
- // SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
- TEST_F(QueryPlannerTest, OrOfAnd5) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{$or: [{a:{$gt:1,$lt:5}, c:6}, "
- "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {c:6}, node: {ixscan: {pattern: {a:1,b:1}, "
- "bounds: {a: [[1,5,false,false]], "
- "b: [['MinKey','MaxKey',true,true]]}}}}}, "
- "{fetch: {filter: {$and:[{c:{$lt:10}},{c:{$gt:0}}]}, node: "
- "{ixscan: {pattern: {a:1,b:1}, "
- " bounds: {a:[[3,3,true,true]], b:[[1,2,false,false]]}}}}}]}}");
- }
-
- // SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
- TEST_F(QueryPlannerTest, OrOfAnd6) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{$or: [{a:{$in:[1]},b:{$in:[1]}}, {a:{$in:[1,5]},b:{$in:[1,5]}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {pattern: {a:1,b:1}, bounds: "
- "{a: [[1,1,true,true]], b: [[1,1,true,true]]}}}, "
- "{ixscan: {pattern: {a:1,b:1}, bounds: "
- "{a: [[1,1,true,true], [5,5,true,true]], "
- " b: [[1,1,true,true], [5,5,true,true]]}}}]}}}}");
- }
-
- // SERVER-13960: properly handle $or with a mix of exact and inexact predicates.
- TEST_F(QueryPlannerTest, OrInexactWithExact) {
- addIndex(BSON("name" << 1));
- runQuery(fromjson("{$or: [{name: 'thomas'}, {name: /^alexand(er|ra)/}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {filter:"
- "{$or: [{name: 'thomas'}, {name: /^alexand(er|ra)/}]},"
- "pattern: {name: 1}}}}}");
- }
-
- // SERVER-13960: multiple indices, each with an inexact covered predicate.
- TEST_F(QueryPlannerTest, OrInexactWithExact2) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuery(fromjson("{$or: [{a: 'foo'}, {a: /bar/}, {b: 'foo'}, {b: /bar/}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {or: {nodes: ["
- "{ixscan: {filter: {$or:[{a:'foo'},{a:/bar/}]},"
- "pattern: {a: 1}}},"
- "{ixscan: {filter: {$or:[{b:'foo'},{b:/bar/}]},"
- "pattern: {b: 1}}}]}}}}");
- }
-
- // SERVER-13960: an exact, inexact covered, and inexact fetch predicate.
- TEST_F(QueryPlannerTest, OrAllThreeTightnesses) {
- addIndex(BSON("names" << 1));
- runQuery(fromjson("{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/},"
- "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: "
- "{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/},"
- "{names: {$elemMatch: {$eq: 'thomas'}}}]}, "
- "node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
- }
-
- // SERVER-13960: two inexact fetch predicates.
- TEST_F(QueryPlannerTest, OrTwoInexactFetch) {
- // true means multikey
- addIndex(BSON("names" << 1), true);
- runQuery(fromjson("{$or: [{names: {$elemMatch: {$eq: 'alexandra'}}},"
- "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: "
- "{$or: [{names: {$elemMatch: {$eq: 'alexandra'}}},"
- "{names: {$elemMatch: {$eq: 'thomas'}}}]}, "
- "node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
- }
-
- // SERVER-13960: multikey with exact and inexact covered predicates.
- TEST_F(QueryPlannerTest, OrInexactCoveredMultikey) {
- // true means multikey
- addIndex(BSON("names" << 1), true);
- runQuery(fromjson("{$or: [{names: 'dave'}, {names: /joe/}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$or: [{names: 'dave'}, {names: /joe/}]}, "
- "node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
- }
-
- // SERVER-13960: $elemMatch object with $or.
- TEST_F(QueryPlannerTest, OrElemMatchObject) {
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson("{$or: [{a: {$elemMatch: {b: {$lte: 1}}}},"
- "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {a:{$elemMatch:{b:{$gte:4}}}}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}}},"
- "{fetch: {filter: {a:{$elemMatch:{b:{$lte:1}}}}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}]}}");
- }
-
- // SERVER-13960: $elemMatch object inside an $or, below an AND.
- TEST_F(QueryPlannerTest, OrElemMatchObjectBeneathAnd) {
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson("{$or: [{'a.b': 0, a: {$elemMatch: {b: {$lte: 1}}}},"
- "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {$and:[{a:{$elemMatch:{b:{$lte:1}}}},{'a.b':0}]},"
- "node: {ixscan: {filter: null, pattern: {'a.b': 1}, "
- "bounds: {'a.b': [[-Infinity,1,true,true]]}}}}},"
- "{fetch: {filter: {a:{$elemMatch:{b:{$gte:4}}}}, node: "
- "{ixscan: {filter: null, pattern: {'a.b': 1},"
- "bounds: {'a.b': [[4,Infinity,true,true]]}}}}}]}}");
- }
-
- // SERVER-13960: $or below $elemMatch with an inexact covered predicate.
- TEST_F(QueryPlannerTest, OrBelowElemMatchInexactCovered) {
- // true means multikey
- addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson("{a: {$elemMatch: {$or: [{b: 'x'}, {b: /z/}]}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a: {$elemMatch: {$or: [{b: 'x'}, {b: /z/}]}}},"
- "node: {ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
- }
-
- // SERVER-13960: $in with exact and inexact covered predicates.
- TEST_F(QueryPlannerTest, OrWithExactAndInexact) {
- addIndex(BSON("name" << 1));
- runQuery(fromjson("{name: {$in: ['thomas', /^alexand(er|ra)/]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: {name: {$in: ['thomas', /^alexand(er|ra)/]}}, "
- "pattern: {name: 1}}}}}");
- }
-
- // SERVER-13960: $in with exact, inexact covered, and inexact fetch predicates.
- TEST_F(QueryPlannerTest, OrWithExactAndInexact2) {
- addIndex(BSON("name" << 1));
- runQuery(fromjson("{$or: [{name: {$in: ['thomas', /^alexand(er|ra)/]}},"
- "{name: {$exists: false}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$or: [{name: {$in: ['thomas', /^alexand(er|ra)/]}},"
- "{name: {$exists: false}}]}, "
- "node: {ixscan: {filter: null, pattern: {name: 1}}}}}");
- }
-
- // SERVER-13960: $in with exact, inexact covered, and inexact fetch predicates
- // over two indices.
- TEST_F(QueryPlannerTest, OrWithExactAndInexact3) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuery(fromjson("{$or: [{a: {$in: [/z/, /x/]}}, {a: 'w'},"
- "{b: {$exists: false}}, {b: {$in: ['p']}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: {$or:[{a:{$in:[/z/, /x/]}}, {a:'w'}]}, "
- "pattern: {a: 1}}}, "
- "{fetch: {filter: {$or:[{b:{$exists:false}}, {b:{$in:['p']}}]},"
- "node: {ixscan: {filter: null, pattern: {b: 1}}}}}]}}}}");
- }
-
- //
- // Min/Max
- //
-
- TEST_F(QueryPlannerTest, MinValid) {
- addIndex(BSON("a" << 1));
- runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MinWithoutIndex) {
- runInvalidQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
- }
-
- TEST_F(QueryPlannerTest, MinBadHint) {
- addIndex(BSON("b" << 1));
- runInvalidQueryHintMinMax(BSONObj(), fromjson("{b: 1}"), fromjson("{a: 1}"), BSONObj());
- }
-
- TEST_F(QueryPlannerTest, MaxValid) {
- addIndex(BSON("a" << 1));
- runQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MinMaxSameValue) {
- addIndex(BSON("a" << 1));
- runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MaxWithoutIndex) {
- runInvalidQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
- }
-
- TEST_F(QueryPlannerTest, MaxBadHint) {
- addIndex(BSON("b" << 1));
- runInvalidQueryHintMinMax(BSONObj(), fromjson("{b: 1}"), BSONObj(), fromjson("{a: 1}"));
- }
-
- TEST_F(QueryPlannerTest, MaxMinSort) {
- addIndex(BSON("a" << 1));
-
- // Run an empty query, sort {a: 1}, max/min arguments.
- runQueryFull(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, 0, BSONObj(),
- fromjson("{a: 2}"), fromjson("{a: 8}"), false);
-
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MaxMinReverseSort) {
- addIndex(BSON("a" << 1));
-
- // Run an empty query, sort {a: -1}, max/min arguments.
- runQueryFull(BSONObj(), fromjson("{a: -1}"), BSONObj(), 0, 0, BSONObj(),
- fromjson("{a: 2}"), fromjson("{a: 8}"), false);
-
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: -1, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MaxMinReverseIndexDir) {
- addIndex(BSON("a" << -1));
-
- // Because the index is descending, the min is numerically larger than the max.
- runQueryFull(BSONObj(), fromjson("{a: -1}"), BSONObj(), 0, 0, BSONObj(),
- fromjson("{a: 8}"), fromjson("{a: 2}"), false);
-
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: 1, pattern: {a: -1}}}}}");
- }
+ runQuery(fromjson("{x: {$exists: true}}"));
- TEST_F(QueryPlannerTest, MaxMinReverseIndexDirSort) {
- addIndex(BSON("a" << -1));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsFalseSparseIndex) {
+ addIndex(BSON("x" << 1), false, true);
+
+ runQuery(fromjson("{x: {$exists: false}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsTrueOnUnindexedField) {
+ addIndex(BSON("x" << 1));
+
+ runQuery(fromjson("{x: 1, y: {$exists: true}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsFalseOnUnindexedField) {
+ addIndex(BSON("x" << 1));
+
+ runQuery(fromjson("{x: 1, y: {$exists: false}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsTrueSparseIndexOnOtherField) {
+ addIndex(BSON("x" << 1), false, true);
+
+ runQuery(fromjson("{x: 1, y: {$exists: true}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsFalseSparseIndexOnOtherField) {
+ addIndex(BSON("x" << 1), false, true);
+
+ runQuery(fromjson("{x: 1, y: {$exists: false}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsBounds) {
+ addIndex(BSON("b" << 1));
+
+ runQuery(fromjson("{b: {$exists: true}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: true}}, node: "
+ "{ixscan: {pattern: {b: 1}, bounds: "
+ "{b: [['MinKey', 'MaxKey', true, true]]}}}}}");
+
+ // This ends up being a double negation, which we currently don't index.
+ runQuery(fromjson("{b: {$not: {$exists: false}}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
- // Min/max specifies a forward scan with bounds [{a: 8}, {a: 2}]. Asking for
- // an ascending sort reverses the direction of the scan to [{a: 2}, {a: 8}].
- runQueryFull(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, 0, BSONObj(),
- fromjson("{a: 8}"), fromjson("{a: 2}"), false);
+ runQuery(fromjson("{b: {$exists: false}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: false}}, node: "
+ "{ixscan: {pattern: {b: 1}, bounds: "
+ "{b: [[null, null, true, true]]}}}}}");
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: -1,"
- "pattern: {a: -1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MaxMinNoMatchingIndexDir) {
- addIndex(BSON("a" << -1));
- runInvalidQueryHintMinMax(BSONObj(), fromjson("{a: 2}"), BSONObj(), fromjson("{a: 8}"));
- }
-
- TEST_F(QueryPlannerTest, MaxMinSelectCorrectlyOrderedIndex) {
- // There are both ascending and descending indices on 'a'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("a" << -1));
-
- // The ordering of min and max means that we *must* use the descending index.
- runQueryFull(BSONObj(), BSONObj(), BSONObj(), 0, 0, BSONObj(),
- fromjson("{a: 8}"), fromjson("{a: 2}"), false);
-
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: 1, pattern: {a: -1}}}}}");
-
- // If we switch the ordering, then we use the ascending index.
- // The ordering of min and max means that we *must* use the descending index.
- runQueryFull(BSONObj(), BSONObj(), BSONObj(), 0, 0, BSONObj(),
- fromjson("{a: 2}"), fromjson("{a: 8}"), false);
-
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: 1, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, MaxMinBadHintSelectsReverseIndex) {
- // There are both ascending and descending indices on 'a'.
- addIndex(BSON("a" << 1));
- addIndex(BSON("a" << -1));
-
- // A query hinting on {a: 1} is bad if min is {a: 8} and {a: 2} because this
- // min/max pairing requires a descending index.
- runInvalidQueryFull(BSONObj(), BSONObj(), BSONObj(), 0, 0, fromjson("{a: 1}"),
- fromjson("{a: 8}"), fromjson("{a: 2}"), false);
- }
-
-
- //
- // $snapshot
- //
-
- TEST_F(QueryPlannerTest, Snapshot) {
- addIndex(BSON("a" << 1));
- runQuerySnapshot(fromjson("{a: {$gt: 0}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:{$gt:0}}, node: "
- "{ixscan: {filter: null, pattern: {_id: 1}}}}}");
- }
-
- //
- // Tree operations that require simple tree rewriting.
- //
-
- TEST_F(QueryPlannerTest, AndOfAnd) {
- addIndex(BSON("x" << 1));
- runQuery(fromjson("{$and: [ {$and: [ {x: 2.5}]}, {x: {$gt: 1}}, {x: {$lt: 3}} ] }"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- //
- // Logically equivalent queries
- //
-
- TEST_F(QueryPlannerTest, EquivalentAndsOne) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{$and: [{a: 1}, {b: {$all: [10, 20]}}]}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$and:[{a:1},{b:10},{b:20}]}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1, b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, EquivalentAndsTwo) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{$and: [{a: 1, b: 10}, {a: 1, b: 20}]}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$and:[{a:1},{a:1},{b:10},{b:20}]}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1, b: 1}}}}}");
- }
-
- //
- // Covering
- //
-
- TEST_F(QueryPlannerTest, BasicCovering) {
- addIndex(BSON("x" << 1));
- // query, sort, proj
- runQuerySortProj(fromjson("{ x : {$gt: 1}}"), BSONObj(), fromjson("{_id: 0, x: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, x: 1}, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, x: 1}, node: "
- "{cscan: {dir: 1, filter: {x:{$gt:1}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, DottedFieldCovering) {
- addIndex(BSON("a.b" << 1));
- runQuerySortProj(fromjson("{'a.b': 5}"), BSONObj(), fromjson("{_id: 0, 'a.b': 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, 'a.b': 1}, node: "
- "{cscan: {dir: 1, filter: {'a.b': 5}}}}}");
- // SERVER-2104
- //assertSolutionExists("{proj: {spec: {_id: 0, 'a.b': 1}, node: {'a.b': 1}}}");
- }
-
- TEST_F(QueryPlannerTest, IdCovering) {
- runQuerySortProj(fromjson("{_id: {$gt: 10}}"), BSONObj(), fromjson("{_id: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 1}, node: "
- "{cscan: {dir: 1, filter: {_id: {$gt: 10}}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 1}, node: {ixscan: "
- "{filter: null, pattern: {_id: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ProjNonCovering) {
- addIndex(BSON("x" << 1));
- runQuerySortProj(fromjson("{ x : {$gt: 1}}"), BSONObj(), fromjson("{x: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {x: 1}, node: {cscan: "
- "{dir: 1, filter: {x: {$gt: 1}}}}}}");
- assertSolutionExists("{proj: {spec: {x: 1}, node: {fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {x: 1}}}}}}}");
- }
-
- //
- // Basic sort
- //
-
- TEST_F(QueryPlannerTest, BasicSort) {
- addIndex(BSON("x" << 1));
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSort) {
- addIndex(BSON("x" << "hashed"));
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSortWithIndexablePred) {
- addIndex(BSON("x" << "hashed"));
- runQuerySortProj(BSON("x" << BSON("$in" << BSON_ARRAY(0 << 1))), BSON("x" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, node: "
- "{fetch: {node: "
- "{ixscan: {pattern: {x: 'hashed'}}}}}}}");
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, node: "
- "{cscan: {dir: 1, filter: {x: {$in: [0, 1]}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantUseTextIndexToProvideSort) {
- addIndex(BSON("x" << 1 << "_fts" << "text" << "_ftsx" << 1));
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BasicSortWithIndexablePred) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortProj(fromjson("{ a : 5 }"), BSON("b" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, "
- "node: {fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}}}");
- assertSolutionExists("{fetch: {filter: {a: 5}, node: {ixscan: "
- "{filter: null, pattern: {b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BasicSortBooleanIndexKeyPattern) {
- addIndex(BSON("a" << true));
- runQuerySortProj(fromjson("{ a : 5 }"), BSON("a" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: true}}}}}");
- }
-
- // SERVER-14070
- TEST_F(QueryPlannerTest, CompoundIndexWithEqualityPredicatesProvidesSort) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson("{a: 1, b: 1}"), fromjson("{b: 1}"), BSONObj());
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {filter: null,"
- "pattern: {a: 1, b: 1}, "
- "bounds: {a:[[1,1,true,true]], b:[[1,1,true,true]]}}}}}");
- }
-
- //
- // Sort with limit and/or skip
- //
-
- TEST_F(QueryPlannerTest, SortLimit) {
- // Negative limit indicates hard limit - see lite_parsed_query.cpp
- runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, -3);
- assertNumSolutions(1U);
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 3, "
- "node: {cscan: {dir: 1}}}}");
- }
-
- TEST_F(QueryPlannerTest, SortSkip) {
- runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, 0);
- assertNumSolutions(1U);
- // If only skip is provided, do not limit sort.
- assertSolutionExists("{skip: {n: 2, node: "
- "{sort: {pattern: {a: 1}, limit: 0, "
- "node: {cscan: {dir: 1}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, SortSkipLimit) {
- runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, -3);
- assertNumSolutions(1U);
- // Limit in sort node should be adjusted by skip count
- assertSolutionExists("{skip: {n: 2, node: "
- "{sort: {pattern: {a: 1}, limit: 5, "
- "node: {cscan: {dir: 1}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, SortSoftLimit) {
- runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, 3);
- assertNumSolutions(1U);
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 3, "
- "node: {cscan: {dir: 1}}}}");
- }
-
- TEST_F(QueryPlannerTest, SortSkipSoftLimit) {
- runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, 3);
- assertNumSolutions(1U);
- assertSolutionExists("{skip: {n: 2, node: "
- "{sort: {pattern: {a: 1}, limit: 5, "
- "node: {cscan: {dir: 1}}}}}}");
- }
-
- //
- // Sort elimination
- //
-
- TEST_F(QueryPlannerTest, BasicSortElim) {
- addIndex(BSON("x" << 1));
- // query, sort, proj
- runQuerySortProj(fromjson("{ x : {$gt: 1}}"), fromjson("{x: 1}"), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {x: {$gt: 1}}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, SortElimCompound) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson("{ a : 5 }"), BSON("b" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1, b: 1}}}}}");
- }
-
- // SERVER-13611: test that sort elimination still works if there are
- // trailing fields in the index.
- TEST_F(QueryPlannerTest, SortElimTrailingFields) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQuerySortProj(fromjson("{a: 5}"), BSON("b" << 1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1, b: 1, c: 1}}}}}");
- }
-
- // Sort elimination with trailing fields where the sort direction is descending.
- TEST_F(QueryPlannerTest, SortElimTrailingFieldsReverse) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
- runQuerySortProj(fromjson("{a: 5, b: 6}"), BSON("c" << -1), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {c: -1}, limit: 0, "
- "node: {cscan: {dir: 1, filter: {a: 5, b: 6}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, dir: -1, pattern: {a: 1, b: 1, c: 1, d: 1}}}}}");
- }
-
- //
- // Basic compound
- //
-
- TEST_F(QueryPlannerTest, BasicCompound) {
- addIndex(BSON("x" << 1 << "y" << 1));
- runQuery(fromjson("{ x : 5, y: 10}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1, y: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundMissingField) {
- addIndex(BSON("x" << 1 << "y" << 1 << "z" << 1));
- runQuery(fromjson("{ x : 5, z: 10}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {x: 1, y: 1, z: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundFieldsOrder) {
- addIndex(BSON("x" << 1 << "y" << 1 << "z" << 1));
- runQuery(fromjson("{ x : 5, z: 10, y:1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1, y: 1, z: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantUseCompound) {
- addIndex(BSON("x" << 1 << "y" << 1));
- runQuery(fromjson("{ y: 10}"));
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{cscan: {dir: 1, filter: {y: 10}}}");
- }
-
- //
- // $in
- //
-
- TEST_F(QueryPlannerTest, InBasic) {
- addIndex(fromjson("{a: 1}"));
- runQuery(fromjson("{a: {$in: [1, 2]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: {$in: [1, 2]}}}}");
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {pattern: {a: 1}}}}}");
- }
-
- // Logically equivalent to the preceding $in query.
- // Indexed solution should be the same.
- TEST_F(QueryPlannerTest, InBasicOrEquivalent) {
- addIndex(fromjson("{a: 1}"));
- runQuery(fromjson("{$or: [{a: 1}, {a: 2}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a: 1}, {a: 2}]}}}");
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, InSparseIndex) {
- addIndex(fromjson("{a: 1}"),
- false, // multikey
- true); // sparse
- runQuery(fromjson("{a: {$in: [null]}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: {$in: [null]}}}}");
- }
-
- TEST_F(QueryPlannerTest, InCompoundIndexFirst) {
- addIndex(fromjson("{a: 1, b: 1}"));
- runQuery(fromjson("{a: {$in: [1, 2]}, b: 3}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {b: 3, a: {$in: [1, 2]}}}}");
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
- }
-
- // Logically equivalent to the preceding $in query.
- // Indexed solution should be the same.
- // Currently fails - pre-requisite to SERVER-12024
- /*
- TEST_F(QueryPlannerTest, InCompoundIndexFirstOrEquivalent) {
- addIndex(fromjson("{a: 1, b: 1}"));
- runQuery(fromjson("{$and: [{$or: [{a: 1}, {a: 2}]}, {b: 3}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$and: [{$or: [{a: 1}, {a: 2}]}, {b: 3}]}}}");
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
- }
- */
-
- TEST_F(QueryPlannerTest, InCompoundIndexLast) {
- addIndex(fromjson("{a: 1, b: 1}"));
- runQuery(fromjson("{a: 3, b: {$in: [1, 2]}}"));
-
- assertNumSolutions(2U);
- // TODO: update filter in cscan solution when SERVER-12024 is implemented
- assertSolutionExists("{cscan: {dir: 1, filter: {a: 3, b: {$in: [1, 2]}}}}");
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
- }
-
- // Logically equivalent to the preceding $in query.
- // Indexed solution should be the same.
- // Currently fails - pre-requisite to SERVER-12024
- /*
- TEST_F(QueryPlannerTest, InCompoundIndexLastOrEquivalent) {
- addIndex(fromjson("{a: 1, b: 1}"));
- runQuery(fromjson("{$and: [{a: 3}, {$or: [{b: 1}, {b: 2}]}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$and: [{a: 3}, {$or: [{b: 1}, {b: 2}]}]}}}");
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
- }
- */
-
- // SERVER-1205
- TEST_F(QueryPlannerTest, InWithSort) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"),
- BSON("b" << 1), BSONObj(), 0, 1);
-
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 1, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a: 1, b: 1}}}, {ixscan: {pattern: {a: 1, b: 1}}}]}}}}");
- }
-
- // SERVER-1205
- TEST_F(QueryPlannerTest, InWithoutSort) {
- addIndex(BSON("a" << 1 << "b" << 1));
- // No sort means we don't bother to blow up the bounds.
- runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"), BSONObj(), BSONObj(), 0, 1);
-
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
- }
-
- // SERVER-1205
- TEST_F(QueryPlannerTest, ManyInWithSort) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
- runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}, b:{$in:[1,2]}, c:{$in:[1,2]}}"),
- BSON("d" << 1), BSONObj(), 0, 1);
-
- assertSolutionExists("{sort: {pattern: {d: 1}, limit: 1, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
- "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
- "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
- "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
- "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
- "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}}]}}}}");
- }
-
- // SERVER-1205
- TEST_F(QueryPlannerTest, TooManyToExplode) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
- runQuerySortProjSkipLimit(fromjson("{a: {$in: [1,2,3,4,5,6]},"
- "b:{$in:[1,2,3,4,5,6,7,8]},"
- "c:{$in:[1,2,3,4,5,6,7,8]}}"),
- BSON("d" << 1), BSONObj(), 0, 1);
-
- // We cap the # of ixscans we're willing to create.
- assertNumSolutions(2);
- assertSolutionExists("{sort: {pattern: {d: 1}, limit: 1, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{sort: {pattern: {d: 1}, limit: 1, node: "
- "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CantExplodeMetaSort) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << "text"));
- runQuerySortProj(fromjson("{a: {$in: [1, 2]}, b: {$in: [3, 4]}}"),
- fromjson("{c: {$meta: 'textScore'}}"),
- fromjson("{c: {$meta: 'textScore'}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {c:{$meta:'textScore'}}, node: "
- "{sort: {pattern: {c:{$meta:'textScore'}}, limit: 0, node: "
- "{cscan: {filter: {a:{$in:[1,2]},b:{$in:[3,4]}}, dir: 1}}}}}}");
- }
-
- // SERVER-13618: test that exploding scans for sort works even
- // if we must reverse the scan direction.
- TEST_F(QueryPlannerTest, ExplodeMustReverseScans) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
- runQuerySortProj(fromjson("{a: {$in: [1, 2]}, b: {$in: [3, 4]}}"),
- BSON("c" << -1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {c: -1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a:1, b:1, c:1, d:1}}},"
- "{ixscan: {pattern: {a:1, b:1, c:1, d:1}}},"
- "{ixscan: {pattern: {a:1, b:1, c:1, d:1}}},"
- "{ixscan: {pattern: {a:1, b:1, c:1, d:1}}}]}}}}");
- }
-
- // SERVER-13618
- TEST_F(QueryPlannerTest, ExplodeMustReverseScans2) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << -1));
- runQuerySortProj(fromjson("{a: {$in: [1, 2]}, b: {$in: [3, 4]}}"),
- BSON("c" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a:1, b:1, c:-1}}},"
- "{ixscan: {pattern: {a:1, b:1, c:-1}}},"
- "{ixscan: {pattern: {a:1, b:1, c:-1}}},"
- "{ixscan: {pattern: {a:1, b:1, c:-1}}}]}}}}");
- }
-
- // SERVER-13752: don't try to explode if the ordered interval list for
- // the leading field of the compound index is empty.
- TEST_F(QueryPlannerTest, CantExplodeWithEmptyBounds) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson("{a: {$in: []}}"), BSON("b" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: "
- "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}}}}}}}");
- }
-
- // SERVER-13752
- TEST_F(QueryPlannerTest, CantExplodeWithEmptyBounds2) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQuerySortProj(fromjson("{a: {$gt: 3, $lt: 0}}"), BSON("b" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: "
- "{fetch: {node: {ixscan: {pattern: {a:1,b:1,c:1}}}}}}}");
- }
-
- // SERVER-13754: exploding an $or
- TEST_F(QueryPlannerTest, ExplodeOrForSort) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
-
- runQuerySortProj(fromjson("{$or: [{a: 1}, {a: 2}, {b: 2}]}"),
- BSON("c" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {bounds: {a: [[1,1,true,true]], "
- "c: [['MinKey','MaxKey',true,true]]},"
- "pattern: {a:1, c:1}}},"
- "{ixscan: {bounds: {a: [[2,2,true,true]], "
- "c: [['MinKey','MaxKey',true,true]]},"
- "pattern: {a:1, c:1}}},"
- "{ixscan: {bounds: {b: [[2,2,true,true]], "
- "c: [['MinKey','MaxKey',true,true]]},"
- "pattern: {b:1, c:1}}}]}}}}");
- }
-
- // SERVER-13754: exploding an $or
- TEST_F(QueryPlannerTest, ExplodeOrForSort2) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- addIndex(BSON("d" << 1 << "c" << 1));
-
- runQuerySortProj(fromjson("{$or: [{a: 1, b: {$in: [1, 2]}}, {d: 3}]}"),
- BSON("c" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {bounds: {a: [[1,1,true,true]], b: [[1,1,true,true]],"
- "c: [['MinKey','MaxKey',true,true]]},"
- "pattern: {a:1, b:1, c:1}}},"
- "{ixscan: {bounds: {a: [[1,1,true,true]], b: [[2,2,true,true]],"
- "c: [['MinKey','MaxKey',true,true]]},"
- "pattern: {a:1, b:1, c:1}}},"
- "{ixscan: {bounds: {d: [[3,3,true,true]], "
- "c: [['MinKey','MaxKey',true,true]]},"
- "pattern: {d:1, c:1}}}]}}}}");
- }
-
- // SERVER-13754: an $or that can't be exploded, because one clause of the
- // $or does provide the sort, even after explosion.
- TEST_F(QueryPlannerTest, CantExplodeOrForSort) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- addIndex(BSON("d" << 1 << "c" << 1));
-
- runQuerySortProj(fromjson("{$or: [{a: {$in: [1, 2]}}, {d: 3}]}"),
- BSON("c" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: "
- "{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {pattern: {a: 1, b: 1, c: 1}}},"
- "{ixscan: {pattern: {d: 1, c: 1}}}]}}}}}}");
- }
-
- // SERVER-15286: Make sure that at least the explodeForSort() path bails out
- // when it finds that there are no union of point interval fields to explode.
- // We could convert this into a MERGE_SORT plan, but we don't yet do this
- // optimization.
- TEST_F(QueryPlannerTest, CantExplodeOrForSort2) {
- addIndex(BSON("a" << 1));
-
- runQuerySortProj(fromjson("{$or: [{a: {$gt: 1, $lt: 3}}, {a: {$gt: 6, $lt: 10}}]}"),
- BSON("a" << -1),
- BSONObj());
-
- assertNumSolutions(3U);
- assertSolutionExists("{sort: {pattern: {a: -1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}}}}}");
- assertSolutionExists("{sort: {pattern: {a: -1}, limit: 0, node: "
- "{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {pattern: {a: 1}, bounds: "
- "{a: [[1,3,false,false]]}}},"
- "{ixscan: {pattern: {a: 1}, bounds: "
- "{a: [[6,10,false,false]]}}}]}}}}}}");
- }
-
- // SERVER-13754: too many scans in an $or explosion.
- TEST_F(QueryPlannerTest, TooManyToExplodeOr) {
- addIndex(BSON("a" << 1 << "e" << 1));
- addIndex(BSON("b" << 1 << "e" << 1));
- addIndex(BSON("c" << 1 << "e" << 1));
- addIndex(BSON("d" << 1 << "e" << 1));
- runQuerySortProj(fromjson("{$or: [{a: {$in: [1,2,3,4,5,6]},"
- "b: {$in: [1,2,3,4,5,6]}},"
- "{c: {$in: [1,2,3,4,5,6]},"
- "d: {$in: [1,2,3,4,5,6]}}]}"),
- BSON("e" << 1), BSONObj());
-
- // We cap the # of ixscans we're willing to create, so we don't get explosion. Instead
- // we get 5 different solutions which all use a blocking sort.
- assertNumSolutions(5U);
- assertSolutionExists("{sort: {pattern: {e: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{sort: {pattern: {e: 1}, limit: 0, node: "
- "{or: {nodes: ["
- "{fetch: {node: {ixscan: {pattern: {a: 1, e: 1}}}}},"
- "{fetch: {node: {ixscan: {pattern: {c: 1, e: 1}}}}}]}}}}");
- assertSolutionExists("{sort: {pattern: {e: 1}, limit: 0, node: "
- "{or: {nodes: ["
- "{fetch: {node: {ixscan: {pattern: {b: 1, e: 1}}}}},"
- "{fetch: {node: {ixscan: {pattern: {c: 1, e: 1}}}}}]}}}}");
- assertSolutionExists("{sort: {pattern: {e: 1}, limit: 0, node: "
- "{or: {nodes: ["
- "{fetch: {node: {ixscan: {pattern: {a: 1, e: 1}}}}},"
- "{fetch: {node: {ixscan: {pattern: {d: 1, e: 1}}}}}]}}}}");
- assertSolutionExists("{sort: {pattern: {e: 1}, limit: 0, node: "
- "{or: {nodes: ["
- "{fetch: {node: {ixscan: {pattern: {b: 1, e: 1}}}}},"
- "{fetch: {node: {ixscan: {pattern: {d: 1, e: 1}}}}}]}}}}");
- }
-
- // SERVER-15696: Make sure explodeForSort copies filters on IXSCAN stages to all of the
- // scans resulting from the explode. Regex is the easiest way to have the planner create
- // an index scan which filters using the index key.
- TEST_F(QueryPlannerTest, ExplodeIxscanWithFilter) {
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuerySortProj(fromjson("{$and: [{b: {$regex: 'foo', $options: 'i'}},"
- "{a: {$in: [1, 2]}}]}"),
- BSON("b" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a:1, b:1},"
- "filter: {b: {$regex: 'foo', $options: 'i'}}}},"
- "{ixscan: {pattern: {a:1, b:1},"
- "filter: {b: {$regex: 'foo', $options: 'i'}}}}]}}}}");
-
- }
-
- TEST_F(QueryPlannerTest, InWithSortAndLimitTrailingField) {
- addIndex(BSON("a" << 1 << "b" << -1 << "c" << 1));
- runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}, b: {$gte: 0}}"),
- fromjson("{b: -1}"),
- BSONObj(), // no projection
- 0, // no skip
- -1); // .limit(1)
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {b:-1}, limit: 1, "
- "node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{limit: {n: 1, node: {fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a:1,b:-1,c:1}}}, "
- " {ixscan: {pattern: {a:1,b:-1,c:1}}}]}}}}}}");
- }
-
- //
- // Multiple solutions
- //
-
- TEST_F(QueryPlannerTest, TwoPlans) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuery(fromjson("{a:1, b:{$gt:2,$lt:2}}"));
-
- // 2 indexed solns and one non-indexed
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$and:[{b:{$lt:2}},{a:1},{b:{$gt:2}}]}}}");
- assertSolutionExists("{fetch: {filter: {$and:[{b:{$lt:2}},{b:{$gt:2}}]}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1, b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoPlansElemMatch) {
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("arr.x" << 1 << "a" << 1));
-
- runQuery(fromjson("{arr: { $elemMatch : { x : 5 , y : 5 } },"
- " a : 55 , b : { $in : [ 1 , 5 , 8 ] } }"));
-
- // 2 indexed solns and one non-indexed
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
- "{a: [[55,55,true,true]], b: [[1,1,true,true], "
- "[5,5,true,true], [8,8,true,true]]}}}}}");
- assertSolutionExists("{fetch: {filter: {$and: [{arr:{$elemMatch:{x:5,y:5}}},"
- "{b:{$in:[1,5,8]}}]}, "
- "node: {ixscan: {pattern: {'arr.x':1,a:1}, bounds: "
- "{'arr.x': [[5,5,true,true]], 'a':[[55,55,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundAndNonCompoundIndices) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("a" << 1 << "b" << 1), true);
- runQuery(fromjson("{a: 1, b: {$gt: 2, $lt: 2}}"));
-
- ASSERT_EQUALS(getNumSolutions(), 3U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and:[{b:{$lt:2}},{b:{$gt:2}}]}, node: "
- "{ixscan: {pattern: {a:1}, bounds: {a: [[1,1,true,true]]}}}}}");
- assertSolutionExists("{fetch: {filter: {b:{$gt:2}}, node: "
- "{ixscan: {pattern: {a:1,b:1}, bounds: "
- "{a: [[1,1,true,true]], b: [[-Infinity,2,true,false]]}}}}}");
- }
-
- //
- // Sort orders
- //
-
- // SERVER-1205.
- TEST_F(QueryPlannerTest, MergeSort) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
- runQuerySortProj(fromjson("{$or: [{a:1}, {b:1}]}"), fromjson("{c:1}"), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}");
- }
-
- // SERVER-1205 as well.
- TEST_F(QueryPlannerTest, NoMergeSortIfNoSortWanted) {
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1 << "c" << 1));
- runQuerySortProj(fromjson("{$or: [{a:1}, {b:1}]}"), BSONObj(), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a:1}, {b:1}]}}}");
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1, c: 1}}}, "
- "{ixscan: {filter: null, pattern: {b: 1, c: 1}}}]}}}}");
- }
-
- // Basic "keep sort in mind with an OR"
- TEST_F(QueryPlannerTest, MergeSortEvenIfSameIndex) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson("{$or: [{a:1}, {a:7}]}"), fromjson("{b:1}"), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- // TODO the second solution should be mergeSort rather than just sort
- }
-
- TEST_F(QueryPlannerTest, ReverseScanForSort) {
- addIndex(BSON("_id" << 1));
- runQuerySortProj(BSONObj(), fromjson("{_id: -1}"), BSONObj());
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{sort: {pattern: {_id: -1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {_id: 1}}}}}");
- }
-
- //
- // Hint tests
- //
-
- TEST_F(QueryPlannerTest, NaturalHint) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortHint(BSON("a" << 1), BSON("b" << 1), BSON("$natural" << 1));
-
- assertNumSolutions(1U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: "
- "{cscan: {filter: {a: 1}, dir: 1}}}}");
- }
-
- // Test $natural sort and its interaction with $natural hint.
- TEST_F(QueryPlannerTest, NaturalSortAndHint) {
- addIndex(BSON("x" << 1));
-
- // Non-empty query, -1 sort, no hint.
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << -1), BSONObj());
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: -1}}");
-
- // Non-empty query, 1 sort, no hint.
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << 1), BSONObj());
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // Non-empty query, -1 sort, -1 hint.
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << -1),
- BSON("$natural" << -1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: -1}}");
-
- // Non-empty query, 1 sort, -1 hint.
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << 1),
- BSON("$natural" << -1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // Non-empty query, -1 sort, 1 hint.
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << -1),
- BSON("$natural" << 1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: -1}}");
-
- // Non-empty query, 1 sort, 1 hint.
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << 1),
- BSON("$natural" << 1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // Empty query, -1 sort, no hint.
- runQuerySortHint(BSONObj(), BSON("$natural" << -1), BSONObj());
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: -1}}");
-
- // Empty query, 1 sort, no hint.
- runQuerySortHint(BSONObj(), BSON("$natural" << 1), BSONObj());
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // Empty query, -1 sort, -1 hint.
- runQuerySortHint(BSONObj(), BSON("$natural" << -1), BSON("$natural" << -1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: -1}}");
-
- // Empty query, 1 sort, -1 hint.
- runQuerySortHint(BSONObj(), BSON("$natural" << 1), BSON("$natural" << -1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
-
- // Empty query, -1 sort, 1 hint.
- runQuerySortHint(BSONObj(), BSON("$natural" << -1), BSON("$natural" << 1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: -1}}");
-
- // Empty query, 1 sort, 1 hint.
- runQuerySortHint(BSONObj(), BSON("$natural" << 1), BSON("$natural" << 1));
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- TEST_F(QueryPlannerTest, HintOverridesNaturalSort) {
- addIndex(BSON("x" << 1));
- runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << -1), BSON("x" << 1));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {x:{$exists:true}}, node: "
- "{ixscan: {filter: null, pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, HintValid) {
- addIndex(BSON("a" << 1));
- runQueryHint(BSONObj(), fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, HintValidWithPredicate) {
- addIndex(BSON("a" << 1));
- runQueryHint(fromjson("{a: {$gt: 1}}"), fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, HintValidWithSort) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortHint(fromjson("{a: 100, b: 200}"), fromjson("{b: 1}"), fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: "
- "{fetch: {filter: {b: 200}, "
- "node: {ixscan: {filter: null, pattern: {a: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, HintElemMatch) {
- // true means multikey
- addIndex(fromjson("{'a.b': 1}"), true);
- runQueryHint(fromjson("{'a.b': 1, a: {$elemMatch: {b: 2}}}"), fromjson("{'a.b': 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {$and: [{a:{$elemMatch:{b:2}}}, {'a.b': 1}]}, "
- "node: {ixscan: {filter: null, pattern: {'a.b': 1}, bounds: "
- "{'a.b': [[2, 2, true, true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, HintInvalid) {
- addIndex(BSON("a" << 1));
- runInvalidQueryHint(BSONObj(), fromjson("{b: 1}"));
- }
-
- //
- // Sparse indices, SERVER-8067
- // Each index in this block of tests is sparse.
- //
-
- TEST_F(QueryPlannerTest, SparseIndexIgnoreForSort) {
- addIndex(fromjson("{a: 1}"), false, true);
- runQuerySortProj(BSONObj(), fromjson("{a: 1}"), BSONObj());
-
- assertNumSolutions(1U);
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- }
-
- TEST_F(QueryPlannerTest, SparseIndexHintForSort) {
- addIndex(fromjson("{a: 1}"), false, true);
- runQuerySortHint(BSONObj(), fromjson("{a: 1}"), fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, SparseIndexPreferCompoundIndexForSort) {
- addIndex(fromjson("{a: 1}"), false, true);
- addIndex(fromjson("{a: 1, b: 1}"));
- runQuerySortProj(BSONObj(), fromjson("{a: 1}"), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1, b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, SparseIndexForQuery) {
- addIndex(fromjson("{a: 1}"), false, true);
- runQuerySortProj(fromjson("{a: 1}"), BSONObj(), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: 1}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}");
- }
-
- //
- // Regex
- //
-
- TEST_F(QueryPlannerTest, PrefixRegex) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a: /^foo/}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: /^foo/}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, PrefixRegexCovering) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{a: /^foo/}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{cscan: {dir: 1, filter: {a: /^foo/}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegex) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a: /foo/}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {a: /foo/}}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: {a: /foo/}, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegexCovering) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{a: /foo/}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{cscan: {dir: 1, filter: {a: /foo/}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: {a: /foo/}, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegexAnd) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{a: /foo/, b: 2}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {dir: 1, filter: {$and: [{b: 2}, {a: /foo/}]}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: {a: /foo/}, pattern: {a: 1, b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegexAndCovering) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson("{a: /foo/, b: 2}"), BSONObj(),
- fromjson("{_id: 0, a: 1, b: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
- "{cscan: {dir: 1, filter: {$and: [{b: 2}, {a: /foo/}]}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
- "{ixscan: {filter: {a: /foo/}, pattern: {a: 1, b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegexOrCovering) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$or: [{a: /0/}, {a: /1/}]}"), BSONObj(),
- fromjson("{_id: 0, a: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{cscan: {dir: 1, filter: {$or: [{a: /0/}, {a: /1/}]}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: {$or: [{a: /0/}, {a: /1/}]}, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegexInCovering) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{a: {$in: [/foo/, /bar/]}}"), BSONObj(),
- fromjson("{_id: 0, a: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{cscan: {dir: 1, filter: {a:{$in:[/foo/,/bar/]}}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: {a:{$in:[/foo/,/bar/]}}, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoRegexCompoundIndexCovering) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson("{a: /0/, b: /1/}"), BSONObj(),
- fromjson("{_id: 0, a: 1, b: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
- "{cscan: {dir: 1, filter: {$and:[{a:/0/},{b:/1/}]}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
- "{ixscan: {filter: {$and:[{a:/0/},{b:/1/}]}, pattern: {a: 1, b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoRegexSameFieldCovering) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$and: [{a: /0/}, {a: /1/}]}"), BSONObj(),
- fromjson("{_id: 0, a: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{cscan: {dir: 1, filter: {$and:[{a:/0/},{a:/1/}]}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: {$and:[{a:/0/},{a:/1/}]}, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ThreeRegexSameFieldCovering) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$and: [{a: /0/}, {a: /1/}, {a: /2/}]}"), BSONObj(),
- fromjson("{_id: 0, a: 1}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{cscan: {dir: 1, filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NonPrefixRegexMultikey) {
- // true means multikey
- addIndex(BSON("a" << 1), true);
- runQuery(fromjson("{a: /foo/}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {filter: {a: /foo/}, dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a: /foo/}, node: {ixscan: "
- "{pattern: {a: 1}, filter: null}}}}");
- }
-
- TEST_F(QueryPlannerTest, ThreeRegexSameFieldMultikey) {
- // true means multikey
- addIndex(BSON("a" << 1), true);
- runQuery(fromjson("{$and: [{a: /0/}, {a: /1/}, {a: /2/}]}"));
-
- ASSERT_EQUALS(getNumSolutions(), 2U);
- assertSolutionExists("{cscan: {filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}, dir: 1}}");
- assertSolutionExists("{fetch: {filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}, node: {ixscan: "
- "{pattern: {a: 1}, filter: null}}}}");
- }
-
- //
- // Negation
- //
-
- TEST_F(QueryPlannerTest, NegationIndexForSort) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{a: {$ne: 1}}"), fromjson("{a: 1}"), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}, "
- "bounds: {a: [['MinKey',1,true,false], "
- "[1,'MaxKey',false,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegationTopLevel) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{a: {$ne: 1}}"), BSONObj(), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [['MinKey',1,true,false], "
- "[1,'MaxKey',false,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegationOr) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$or: [{a: 1}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- TEST_F(QueryPlannerTest, NegationOrNotIn) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$or: [{a: 1}, {b: {$nin: [1]}}]}"), BSONObj(), BSONObj());
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- TEST_F(QueryPlannerTest, NegationAndIndexOnEquality) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$and: [{a: 1}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1},"
- "bounds: {a: [[1,1,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegationAndIndexOnEqualityAndNegationBranches) {
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortProj(fromjson("{$and: [{a: 1}, {b: 2}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
-
- assertNumSolutions(3U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}, "
- "bounds: {a: [[1,1,true,true]]}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {b: 1}, "
- "bounds: {b: [[2,2,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegationAndIndexOnInequality) {
- addIndex(BSON("b" << 1));
- runQuerySortProj(fromjson("{$and: [{a: 1}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:1}, node: {ixscan: {pattern: {b:1}, "
- "bounds: {b: [['MinKey',1,true,false], "
- "[1,'MaxKey',false,true]]}}}}}");
- }
-
- // Negated regexes don't use the index.
- TEST_F(QueryPlannerTest, NegationRegexPrefix) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: /^a/}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // Negated mods don't use the index
- TEST_F(QueryPlannerTest, NegationMod) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$mod: [2, 1]}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // Negated $type doesn't use the index
- TEST_F(QueryPlannerTest, NegationTypeOperator) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$type: 16}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // Negated $elemMatch value won't use the index
- TEST_F(QueryPlannerTest, NegationElemMatchValue) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$elemMatch: {$gt: 3, $lt: 10}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // Negated $elemMatch object won't use the index
- TEST_F(QueryPlannerTest, NegationElemMatchObject) {
- addIndex(BSON("i.j" << 1));
- runQuery(fromjson("{i: {$not: {$elemMatch: {j: 1}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // Negated $elemMatch object won't use the index
- TEST_F(QueryPlannerTest, NegationElemMatchObject2) {
- addIndex(BSON("i.j" << 1));
- runQuery(fromjson("{i: {$not: {$elemMatch: {j: {$ne: 1}}}}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // If there is a negation that can't use the index,
- // ANDed with a predicate that can use the index, then
- // we can still use the index for the latter predicate.
- TEST_F(QueryPlannerTest, NegationRegexWithIndexablePred) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{$and: [{i: {$not: /o/}}, {i: 2}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {i:1}, "
- "bounds: {i: [[2,2,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegationCantUseSparseIndex) {
- // false means not multikey, true means sparse
- addIndex(BSON("i" << 1), false, true);
- runQuery(fromjson("{i: {$ne: 4}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- TEST_F(QueryPlannerTest, NegationCantUseSparseIndex2) {
- // false means not multikey, true means sparse
- addIndex(BSON("i" << 1 << "j" << 1), false, true);
- runQuery(fromjson("{i: 4, j: {$ne: 5}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {i:1,j:1}, bounds: "
- "{i: [[4,4,true,true]], j: [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegatedRangeStrGT) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$gt: 'a'}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
- "bounds: {i: [['MinKey','a',true,true], "
- "[{},'MaxKey',true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegatedRangeStrGTE) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$gte: 'a'}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
- "bounds: {i: [['MinKey','a',true,false], "
- "[{},'MaxKey',true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegatedRangeIntGT) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$gt: 5}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
- "bounds: {i: [['MinKey',5,true,true], "
- "[Infinity,'MaxKey',false,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegatedRangeIntGTE) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{i: {$not: {$gte: 5}}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
- "bounds: {i: [['MinKey',5,true,false], "
- "[Infinity,'MaxKey',false,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, TwoNegatedRanges) {
- addIndex(BSON("i" << 1));
- runQuery(fromjson("{$and: [{i: {$not: {$lte: 'b'}}}, "
- "{i: {$not: {$gte: 'f'}}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
- "bounds: {i: [['MinKey','',true,false], "
- "['b','f',false,false], "
- "[{},'MaxKey',true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, AndWithNestedNE) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a: {$gt: -1, $lt: 1, $ne: 0}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [[-1,0,false,false], "
- "[0,1,false,false]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NegatePredOnCompoundIndex) {
- addIndex(BSON("x" << 1 << "a" << 1));
- runQuery(fromjson("{x: 1, a: {$ne: 1}, b: {$ne: 2}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {x:1,a:1}, bounds: "
- "{x: [[1,1,true,true]], "
- "a: [['MinKey',1,true,false], [1,'MaxKey',false,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NEOnMultikeyIndex) {
- // true means multikey
- addIndex(BSON("a" << 1), true);
- runQuery(fromjson("{a: {$ne: 3}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$ne:3}}, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [['MinKey',3,true,false],"
- "[3,'MaxKey',false,true]]}}}}}");
- }
-
- // In general, a negated $nin can make use of an index.
- TEST_F(QueryPlannerTest, NinUsesMultikeyIndex) {
- // true means multikey
- addIndex(BSON("a" << 1), true);
- runQuery(fromjson("{a: {$nin: [4, 10]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {a:{$nin:[4,10]}}, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [['MinKey',4,true,false],"
- "[4,10,false,false],"
- "[10,'MaxKey',false,true]]}}}}}");
- }
-
- // But it can't if the $nin contains a regex because regex bounds can't
- // be complemented.
- TEST_F(QueryPlannerTest, NinCantUseMultikeyIndex) {
- // true means multikey
- addIndex(BSON("a" << 1), true);
- runQuery(fromjson("{a: {$nin: [4, /foobar/]}}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- //
- // Multikey indices
- //
-
- //
- // Index bounds related tests
- //
-
- TEST_F(QueryPlannerTest, CompoundIndexBoundsLastFieldMissing) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQuery(fromjson("{a: 5, b: {$gt: 7}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1, c: 1}, bounds: "
- "{a: [[5,5,true,true]], b: [[7,Infinity,false,true]], "
- " c: [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundIndexBoundsMiddleFieldMissing) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQuery(fromjson("{a: 1, c: {$lt: 3}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1, c: 1}, bounds: "
- "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]], "
- " c: [[-Infinity,3,true,false]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundIndexBoundsRangeAndEquality) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{a: {$gt: 8}, b: 6}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
- "{a: [[8,Infinity,false,true]], b:[[6,6,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundIndexBoundsEqualityThenIn) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{a: 5, b: {$in: [2,6,11]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {filter: null, pattern: "
- "{a: 1, b: 1}, bounds: {a: [[5,5,true,true]], "
- "b:[[2,2,true,true],[6,6,true,true],[11,11,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundIndexBoundsStringBounds) {
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{a: {$gt: 'foo'}, b: {$gte: 'bar'}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {filter: null, pattern: "
- "{a: 1, b: 1}, bounds: {a: [['foo',{},false,false]], "
- "b:[['bar',{},true,false]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, IndexBoundsAndWithNestedOr) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$and: [{a: 1, $or: [{a: 2}, {a: 3}]}]}"));
-
- // Given that the index over 'a' isn't multikey, we ideally won't generate any solutions
- // since we know the query describes an empty set if 'a' isn't multikey. Any solutions
- // below are "this is how it currently works" instead of "this is how it should work."
-
- // It's kind of iffy to look for indexed solutions so we don't...
- size_t matches = 0;
- matches += numSolutionMatches("{cscan: {dir: 1, filter: "
- "{$or: [{a: 2, a:1}, {a: 3, a:1}]}}}");
- matches += numSolutionMatches("{cscan: {dir: 1, filter: "
- "{$and: [{$or: [{a: 2}, {a: 3}]}, {a: 1}]}}}");
- ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
- }
-
- TEST_F(QueryPlannerTest, IndexBoundsIndexedSort) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$or: [{a: 1}, {a: 2}]}"), BSON("a" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {a:1}, limit: 0, node: "
- "{cscan: {filter: {$or:[{a:1},{a:2}]}, dir: 1}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {filter: null, "
- "pattern: {a:1}, bounds: {a: [[1,1,true,true], [2,2,true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, IndexBoundsUnindexedSort) {
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{$or: [{a: 1}, {a: 2}]}"), BSON("b" << 1), BSONObj());
-
- assertNumSolutions(2U);
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: "
- "{cscan: {filter: {$or:[{a:1},{a:2}]}, dir: 1}}}}");
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {fetch: "
- "{filter: null, node: {ixscan: {filter: null, "
- "pattern: {a:1}, bounds: {a: [[1,1,true,true], [2,2,true,true]]}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, IndexBoundsUnindexedSortHint) {
- addIndex(BSON("a" << 1));
- runQuerySortHint(fromjson("{$or: [{a: 1}, {a: 2}]}"), BSON("b" << 1), BSON("a" << 1));
-
- assertNumSolutions(1U);
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {fetch: "
- "{filter: null, node: {ixscan: {filter: null, "
- "pattern: {a:1}, bounds: {a: [[1,1,true,true], [2,2,true,true]]}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CompoundIndexBoundsIntersectRanges) {
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- addIndex(BSON("a" << 1 << "c" << 1));
- runQuery(fromjson("{a: {$gt: 1, $lt: 10}, c: {$gt: 1, $lt: 10}}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1,b:1,c:1}, "
- "bounds: {a: [[1,10,false,false]], "
- "b: [['MinKey','MaxKey',true,true]], "
- "c: [[1,10,false,false]]}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1,c:1}, "
- "bounds: {a: [[1,10,false,false]], "
- "c: [[1,10,false,false]]}}}}}");
- }
-
- // Test that planner properly unionizes the index bounds for two negation
- // predicates (SERVER-13890).
- TEST_F(QueryPlannerTest, IndexBoundsOrOfNegations) {
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a: {$ne: 3}}, {a: {$ne: 4}}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
- "bounds: {a: [['MinKey','MaxKey',true,true]]}}}}}");
- }
-
- TEST_F(QueryPlannerTest, BoundsTypeMinKeyMaxKey) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
-
- runQuery(fromjson("{a: {$type: -1}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}, bounds:"
- "{a: [['MinKey','MinKey',true,true]]}}}}}");
-
- runQuery(fromjson("{a: {$type: 127}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}, bounds:"
- "{a: [['MaxKey','MaxKey',true,true]]}}}}}");
- }
-
- //
- // Tests related to building index bounds for multikey
- // indices, combined with compound and $elemMatch
- //
-
- // SERVER-12475: make sure that we compound bounds, even
- // for a multikey index.
- TEST_F(QueryPlannerTest, CompoundMultikeyBounds) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << 1), true);
- runQuery(fromjson("{a: 1, b: 3}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {filter: {$and:[{a:1},{b:3}]}, dir: 1}}");
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: {filter: null, "
- "pattern: {a:1,b:1}, bounds: "
- "{a: [[1,1,true,true]], b: [[3,3,true,true]]}}}}}");
- }
-
- // Make sure that we compound bounds but do not intersect bounds
- // for a compound multikey index.
- TEST_F(QueryPlannerTest, CompoundMultikeyBoundsNoIntersect) {
- // true means multikey
- addIndex(BSON("a" << 1 << "b" << 1), true);
- runQuery(fromjson("{a: 1, b: {$gt: 3, $lte: 5}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{cscan: {dir: 1}}");
- assertSolutionExists("{fetch: {filter: {b:{$gt:3}}, node: {ixscan: {filter: null, "
- "pattern: {a:1,b:1}, bounds: "
- "{a: [[1,1,true,true]], b: [[-Infinity,5,true,true]]}}}}}");
- }
-
- //
- // QueryPlannerParams option tests
- //
-
- TEST_F(QueryPlannerTest, NoBlockingSortsAllowedTest) {
- params.options = QueryPlannerParams::NO_BLOCKING_SORT;
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
- assertNumSolutions(0U);
-
- addIndex(BSON("x" << 1));
-
- runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NoTableScanBasic) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- runQuery(BSONObj());
- assertNumSolutions(0U);
-
- addIndex(BSON("x" << 1));
-
- runQuery(BSONObj());
- assertNumSolutions(0U);
-
- runQuery(fromjson("{x: {$gte: 0}}"));
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, NoTableScanOrWithAndChild) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}"));
-
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1}}}, "
- "{fetch: {filter: {b: 7}, node: {ixscan: "
- "{filter: null, pattern: {a: 1}}}}}]}}}}");
- }
-
- //
- // Index Intersection.
- //
- // We don't exhaustively check all plans here. Instead we check that there exists an
- // intersection plan. The blending of >1 index plans and ==1 index plans is under development
- // but we want to make sure that we create an >1 index plan when we should.
- //
-
- TEST_F(QueryPlannerTest, IntersectBasicTwoPred) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuery(fromjson("{a:1, b:{$gt: 1}}"));
-
- assertSolutionExists("{fetch: {filter: null, node: {andHash: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, IntersectBasicTwoPredCompound) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1 << "c" << 1));
- addIndex(BSON("b" << 1));
- runQuery(fromjson("{a:1, b:1, c:1}"));
-
- // There's an andSorted not andHash because the two seeks are point intervals.
- assertSolutionExists("{fetch: {filter: null, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1, c:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- // SERVER-12196
- TEST_F(QueryPlannerTest, IntersectBasicTwoPredCompoundMatchesIdxOrder1) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuery(fromjson("{a:1, b:1}"));
-
- assertNumSolutions(3U);
-
- assertSolutionExists("{fetch: {filter: {b:1}, node: "
- "{ixscan: {filter: null, pattern: {a:1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b:1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- // SERVER-12196
- TEST_F(QueryPlannerTest, IntersectBasicTwoPredCompoundMatchesIdxOrder2) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("b" << 1));
- addIndex(BSON("a" << 1));
- runQuery(fromjson("{a:1, b:1}"));
-
- assertNumSolutions(3U);
-
- assertSolutionExists("{fetch: {filter: {b:1}, node: "
- "{ixscan: {filter: null, pattern: {a:1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b:1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, IntersectManySelfIntersections) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- // True means multikey.
- addIndex(BSON("a" << 1), true);
-
- // This one goes to 11.
- runQuery(fromjson("{a:1, a:2, a:3, a:4, a:5, a:6, a:7, a:8, a:9, a:10, a:11}"));
-
- // But this one only goes to 10.
- assertSolutionExists("{fetch: {filter: {a:11}, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}}," // 1
- "{ixscan: {filter: null, pattern: {a:1}}}," // 2
- "{ixscan: {filter: null, pattern: {a:1}}}," // 3
- "{ixscan: {filter: null, pattern: {a:1}}}," // 4
- "{ixscan: {filter: null, pattern: {a:1}}}," // 5
- "{ixscan: {filter: null, pattern: {a:1}}}," // 6
- "{ixscan: {filter: null, pattern: {a:1}}}," // 7
- "{ixscan: {filter: null, pattern: {a:1}}}," // 8
- "{ixscan: {filter: null, pattern: {a:1}}}," // 9
- "{ixscan: {filter: null, pattern: {a:1}}}]}}}}"); // 10
- }
-
- TEST_F(QueryPlannerTest, IntersectSubtreeNodes) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- addIndex(BSON("d" << 1));
-
- runQuery(fromjson("{$or: [{a: 1}, {b: 1}], $or: [{c:1}, {d:1}]}"));
- assertSolutionExists("{fetch: {filter: null, node: {andHash: {nodes: ["
- "{or: {nodes: [{ixscan:{filter:null, pattern:{a:1}}},"
- "{ixscan:{filter:null, pattern:{b:1}}}]}},"
- "{or: {nodes: [{ixscan:{filter:null, pattern:{c:1}}},"
- "{ixscan:{filter:null, pattern:{d:1}}}]}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, IntersectSubtreeAndPred) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- runQuery(fromjson("{a: 1, $or: [{b:1}, {c:1}]}"));
-
- // This (can be) rewritten to $or:[ {a:1, b:1}, {c:1, d:1}]. We don't look for the various
- // single $or solutions as that's tested elsewhere. We look for the intersect solution,
- // where each AND inside of the root OR is an and_sorted.
- size_t matches = 0;
- matches += numSolutionMatches("{fetch: {filter: null, node: {or: {nodes: ["
- "{andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {'a':1}}},"
- "{ixscan: {filter: null, pattern: {'b':1}}}]}},"
- "{andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {'a':1}}},"
- "{ixscan: {filter: null, pattern: {'c':1}}}]}}]}}}}");
- matches += numSolutionMatches("{fetch: {filter: null, node: {andHash: {nodes:["
- "{or: {nodes: [{ixscan:{filter:null, pattern:{b:1}}},"
- "{ixscan:{filter:null, pattern:{c:1}}}]}},"
- "{ixscan:{filter: null, pattern:{a:1}}}]}}}}");
- ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
- }
-
- TEST_F(QueryPlannerTest, IntersectElemMatch) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a.b" << 1));
- addIndex(BSON("a.c" << 1));
- runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}"));
- assertSolutionExists("{fetch: {filter: {a:{$elemMatch:{b:1, c:1}}},"
- "node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {'a.b':1}}},"
- "{ixscan: {filter: null, pattern: {'a.c':1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, IntersectSortFromAndHash) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- runQuerySortProj(fromjson("{a: 1, b:{$gt: 1}}"), fromjson("{b:1}"), BSONObj());
-
- // This provides the sort.
- assertSolutionExists("{fetch: {filter: null, node: {andHash: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
-
- // Rearrange the preds, shouldn't matter.
- runQuerySortProj(fromjson("{b: 1, a:{$lt: 7}}"), fromjson("{b:1}"), BSONObj());
- assertSolutionExists("{fetch: {filter: null, node: {andHash: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, IntersectCanBeVeryBig) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- addIndex(BSON("d" << 1));
- runQuery(fromjson("{$or: [{ 'a' : null, 'b' : 94, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 98, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 1, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 2, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 7, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 9, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 16, 'c' : null, 'd' : null }]}"));
-
- assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
- }
-
- // Ensure that disabling AND_HASH intersection works properly.
- TEST_F(QueryPlannerTest, IntersectDisableAndHash) {
- bool oldEnableHashIntersection = internalQueryPlannerEnableHashIntersection;
-
- // Turn index intersection on but disable hash-based intersection.
- internalQueryPlannerEnableHashIntersection = false;
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
-
- runQuery(fromjson("{a: {$gt: 1}, b: 1, c: 1}"));
-
- // We should do an AND_SORT intersection of {b: 1} and {c: 1}, but no AND_HASH plans.
- assertNumSolutions(4U);
- assertSolutionExists("{fetch: {filter: {b: 1, c: 1}, node: {ixscan: "
- "{pattern: {a: 1}, bounds: {a: [[1,Infinity,false,true]]}}}}}");
- assertSolutionExists("{fetch: {filter: {a:{$gt:1},c:1}, node: {ixscan: "
- "{pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}}");
- assertSolutionExists("{fetch: {filter: {a:{$gt:1},b:1}, node: {ixscan: "
- "{pattern: {c: 1}, bounds: {c: [[1,1,true,true]]}}}}}");
- assertSolutionExists("{fetch: {filter: {a:{$gt:1}}, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {b:1}}},"
- "{ixscan: {filter: null, pattern: {c:1}}}]}}}}");
-
- // Restore the old value of the has intersection switch.
- internalQueryPlannerEnableHashIntersection = oldEnableHashIntersection;
- }
-
- //
- // Index intersection cases for SERVER-12825: make sure that
- // we don't generate an ixisect plan if a compound index is
- // available instead.
- //
-
- // SERVER-12825
- TEST_F(QueryPlannerTest, IntersectCompoundInsteadBasic) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson("{a: 1, b: 1}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{fetch: {filter: {b:1}, node: "
- "{ixscan: {filter: null, pattern: {a:1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b:1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a:1,b:1}}}}}");
- }
-
- // SERVER-12825
- TEST_F(QueryPlannerTest, IntersectCompoundInsteadThreeCompoundIndices) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("c" << 1 << "d" << 1));
- addIndex(BSON("a" << 1 << "c" << -1 << "b" << -1 << "d" << 1));
- runQuery(fromjson("{a: 1, b: 1, c: 1, d: 1}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{fetch: {filter: {$and: [{c:1},{d:1}]}, node: "
- "{ixscan: {filter: null, pattern: {a:1,b:1}}}}}");
- assertSolutionExists("{fetch: {filter: {$and:[{a:1},{b:1}]}, node: "
- "{ixscan: {filter: null, pattern: {c:1,d:1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a:1,c:-1,b:-1,d:1}}}}}");
- }
-
- // SERVER-12825
- TEST_F(QueryPlannerTest, IntersectCompoundInsteadUnusedField) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQuery(fromjson("{a: 1, b: 1}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{fetch: {filter: {b:1}, node: "
- "{ixscan: {filter: null, pattern: {a:1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b:1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a:1,b:1,c:1}}}}}");
- }
-
- // SERVER-12825
- TEST_F(QueryPlannerTest, IntersectCompoundInsteadUnusedField2) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("c" << 1 << "d" << 1));
- addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQuery(fromjson("{a: 1, c: 1}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{fetch: {filter: {c:1}, node: "
- "{ixscan: {filter: null, pattern: {a:1,b:1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {c:1,d:1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: "
- "{ixscan: {filter: null, pattern: {a:1,b:1,c:1}}}}}");
- }
-
- //
- // Test that we add a KeepMutations when we should and and we don't add one when we shouldn't.
- //
-
- // Collection scan doesn't keep any state, so it can't produce flagged data.
- TEST_F(QueryPlannerTest, NoMutationsForCollscan) {
- params.options = QueryPlannerParams::KEEP_MUTATIONS;
- runQuery(fromjson(""));
- assertSolutionExists("{cscan: {dir: 1}}");
- }
-
- // Collscan + sort doesn't produce flagged data either.
- TEST_F(QueryPlannerTest, NoMutationsForSort) {
- params.options = QueryPlannerParams::KEEP_MUTATIONS;
- runQuerySortProj(fromjson(""), fromjson("{a:1}"), BSONObj());
- assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
- }
-
- // An index scan + fetch requires a keep node as it can flag data. Also make sure we put it in
- // the right place, under the sort.
- TEST_F(QueryPlannerTest, MutationsFromFetch) {
- params.options = QueryPlannerParams::KEEP_MUTATIONS;
- addIndex(BSON("a" << 1));
- runQuerySortProj(fromjson("{a: 5}"), fromjson("{b:1}"), BSONObj());
- assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {keep: {node: "
- "{fetch: {node: {ixscan: {pattern: {a:1}}}}}}}}}");
- }
-
- // Index scan w/covering doesn't require a keep node as there's no fetch.
- TEST_F(QueryPlannerTest, NoFetchNoKeep) {
- params.options = QueryPlannerParams::KEEP_MUTATIONS;
- addIndex(BSON("x" << 1));
- // query, sort, proj
- runQuerySortProj(fromjson("{ x : {$gt: 1}}"), BSONObj(), fromjson("{_id: 0, x: 1}"));
-
- // cscan is a soln but we override the params that say to include it.
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{proj: {spec: {_id: 0, x: 1}, node: {ixscan: "
- "{filter: null, pattern: {x: 1}}}}}");
- }
-
- // No keep with geoNear.
- TEST_F(QueryPlannerTest, NoKeepWithGeoNear) {
- params.options = QueryPlannerParams::KEEP_MUTATIONS;
- addIndex(BSON("a" << "2d"));
- runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
- ASSERT_EQUALS(getNumSolutions(), 1U);
- assertSolutionExists("{geoNear2d: {a: '2d'}}");
- }
-
- // No keep when we have an indexed sort.
- TEST_F(QueryPlannerTest, NoKeepWithIndexedSort) {
- params.options = QueryPlannerParams::KEEP_MUTATIONS;
- addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"),
- BSON("b" << 1), BSONObj(), 0, 1);
-
- // cscan solution exists but we didn't turn on the "always include a collscan."
- assertNumSolutions(1);
- assertSolutionExists("{fetch: {node: {mergeSort: {nodes: "
- "[{ixscan: {pattern: {a: 1, b: 1}}}, {ixscan: {pattern: {a: 1, b: 1}}}]}}}}");
- }
-
- // Make sure a top-level $or hits the limiting number
- // of solutions that we are willing to consider.
- TEST_F(QueryPlannerTest, OrEnumerationLimit) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- // 6 $or clauses, each with 2 indexed predicates
- // means 2^6 = 64 possibilities. We should hit the limit.
- runQuery(fromjson("{$or: [{a: 1, b: 1},"
- "{a: 2, b: 2},"
- "{a: 3, b: 3},"
- "{a: 4, b: 4},"
- "{a: 5, b: 5},"
- "{a: 6, b: 6}]}"));
-
- assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
- }
-
- TEST_F(QueryPlannerTest, OrEnumerationLimit2) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- addIndex(BSON("d" << 1));
-
- // 3 $or clauses, and a few other preds. Each $or clause can
- // generate up to the max number of allowed $or enumerations.
- runQuery(fromjson("{$or: [{a: 1, b: 1, c: 1, d: 1},"
- "{a: 2, b: 2, c: 2, d: 2},"
- "{a: 3, b: 3, c: 3, d: 3}]}"));
-
- assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
- }
-
- // SERVER-13104: test that we properly enumerate all solutions for nested $or.
- TEST_F(QueryPlannerTest, EnumerateNestedOr) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
-
- runQuery(fromjson("{d: 1, $or: [{a: 1, b: 1}, {c: 1}]}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{fetch: {filter: {d: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}},"
- "{ixscan: {pattern: {c: 1}}}]}}}}");
- assertSolutionExists("{fetch: {filter: {d: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {a: 1}, node: {ixscan: {pattern: {b: 1}}}}},"
- "{ixscan: {pattern: {c: 1}}}]}}}}");
- }
-
- // SERVER-13104: test that we properly enumerate all solutions for nested $or.
- TEST_F(QueryPlannerTest, EnumerateNestedOr2) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- addIndex(BSON("d" << 1));
- addIndex(BSON("e" << 1));
- addIndex(BSON("f" << 1));
-
- runQuery(fromjson("{a: 1, b: 1, $or: [{c: 1, d: 1}, {e: 1, f: 1}]}"));
-
- assertNumSolutions(6U);
-
- // Four possibilities from indexing the $or.
- assertSolutionExists("{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {d: 1}, node: {ixscan: {pattern: {c: 1}}}}},"
- "{fetch: {filter: {f: 1}, node: {ixscan: {pattern: {e: 1}}}}}"
- "]}}}}");
- assertSolutionExists("{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}},"
- "{fetch: {filter: {f: 1}, node: {ixscan: {pattern: {e: 1}}}}}"
- "]}}}}");
- assertSolutionExists("{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {d: 1}, node: {ixscan: {pattern: {c: 1}}}}},"
- "{fetch: {filter: {e: 1}, node: {ixscan: {pattern: {f: 1}}}}}"
- "]}}}}");
- assertSolutionExists("{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}},"
- "{fetch: {filter: {e: 1}, node: {ixscan: {pattern: {f: 1}}}}}"
- "]}}}}");
-
- // Two possibilties from outside the $or.
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {b: 1}}}}}");
- }
-
- //
- // Test the "split limited sort stages" hack.
- //
-
- TEST_F(QueryPlannerTest, SplitLimitedSort) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQuerySortProjSkipLimit(fromjson("{a: 1}"), fromjson("{b: 1}"),
- BSONObj(), 0, 3);
-
- assertNumSolutions(2U);
- // First solution has no blocking stage; no need to split.
- assertSolutionExists("{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b: 1}}}}}");
- // Second solution has a blocking sort with a limit: it gets split and
- // joined with an OR stage.
- assertSolutionExists("{or: {nodes: ["
- "{sort: {pattern: {b: 1}, limit: 3, node: "
- "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}}}, "
- "{sort: {pattern: {b: 1}, limit: 0, node: "
- "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}}}]}}");
- }
-
- // The same query run as a find command with a limit should not require the "split limited sort"
- // hack.
- TEST_F(QueryPlannerTest, NoSplitLimitedSortAsCommand) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1}, sort: {b: 1}, limit: 3}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{limit: {n: 3, node: {fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {b: 1}}}}}}}");
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 3, node: {fetch: {filter: null,"
- "node: {ixscan: {pattern: {a: 1}}}}}}}");
- }
-
- // Same query run as a find command with a batchSize rather than a limit should not require
- // the "split limited sort" hack, and should not have any limit represented inside the plan.
- TEST_F(QueryPlannerTest, NoSplitLimitedSortAsCommandBatchSize) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1}, sort: {b: 1}, batchSize: 3}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{fetch: {filter: {a: 1}, node: {ixscan: "
- "{filter: null, pattern: {b: 1}}}}}");
- assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: {fetch: {filter: null,"
- "node: {ixscan: {pattern: {a: 1}}}}}}}");
- }
-
- //
- // Test shard filter query planning
- //
-
- TEST_F(QueryPlannerTest, ShardFilterCollScan) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1);
- addIndex(BSON("a" << 1));
-
- runQuery(fromjson("{b: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{sharding_filter: {node: "
- "{cscan: {dir: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterBasicIndex) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1);
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQuery(fromjson("{b: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{sharding_filter: {node: "
- "{fetch: {node: "
- "{ixscan: {pattern: {b: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterBasicCovered) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1);
- addIndex(BSON("a" << 1));
-
- runQuery(fromjson("{a: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: "
- "{sharding_filter: {node: "
- "{ixscan: {pattern: {a: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterBasicProjCovered) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1);
- addIndex(BSON("a" << 1));
-
- runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id : 0, a : 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, type: 'coveredIndex', node: "
- "{sharding_filter: {node: "
- "{ixscan: {pattern: {a: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterCompoundProjCovered) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1 << "b" << 1);
- addIndex(BSON("a" << 1 << "b" << 1));
-
- runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id: 0, a: 1, b: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1, b: 1 }, type: 'coveredIndex', node: "
- "{sharding_filter: {node: "
- "{ixscan: {pattern: {a: 1, b: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterNestedProjNotCovered) {
- // Nested projections can't be covered currently, though the shard key filter shouldn't need
- // to fetch.
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1 << "b.c" << 1);
- addIndex(BSON("a" << 1 << "b.c" << 1));
-
- runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id: 0, a: 1, 'b.c': 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1, 'b.c': 1 }, type: 'default', node: "
- "{fetch: {node: "
- "{sharding_filter: {node: "
- "{ixscan: {pattern: {a: 1, 'b.c': 1}}}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterHashProjNotCovered) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << "hashed");
- addIndex(BSON("a" << "hashed"));
-
- runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id : 0, a : 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {_id: 0,a: 1}, type: 'simple', node: "
- "{sharding_filter : {node: "
- "{fetch: {node: "
- "{ixscan: {pattern: {a: 'hashed'}}}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterKeyPrefixIndexCovered) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << 1);
- addIndex(BSON("a" << 1 << "b" << 1 << "_id" << 1));
-
- runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{a : 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {a: 1}, type: 'coveredIndex', node: "
- "{sharding_filter : {node: "
- "{ixscan: {pattern: {a: 1, b: 1, _id: 1}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, ShardFilterNoIndexNotCovered) {
- params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
- params.shardKey = BSON("a" << "hashed");
- addIndex(BSON("b" << 1));
-
- runQuerySortProj(fromjson("{b: 1}"), BSONObj(), fromjson("{_id : 0, a : 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{proj: {spec: {_id: 0,a: 1}, type: 'simple', node: "
- "{sharding_filter : {node: "
- "{fetch: {node: "
- "{ixscan: {pattern: {b: 1}}}}}}}}}");
- }
-
- TEST_F(QueryPlannerTest, CannotTrimIxisectParam) {
- params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
- params.options |= QueryPlannerParams::INDEX_INTERSECTION;
- params.options |= QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
-
- runQuery(fromjson("{a: 1, b: 1, c: 1}"));
-
- assertNumSolutions(3U);
- assertSolutionExists("{fetch: {filter: {b: 1, c: 1}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- assertSolutionExists("{fetch: {filter: {a: 1, c: 1}, node: "
- "{ixscan: {filter: null, pattern: {b: 1}}}}}");
- assertSolutionExists("{fetch: {filter: {a:1,b:1,c:1}, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, CannotTrimIxisectParamBeneathOr) {
- params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
- params.options |= QueryPlannerParams::INDEX_INTERSECTION;
- params.options |= QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
-
- runQuery(fromjson("{d: 1, $or: [{a: 1}, {b: 1, c: 1}]}"));
-
- assertNumSolutions(3U);
-
- assertSolutionExists("{fetch: {filter: {d: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {c: 1}, node: {ixscan: {filter: null,"
- "pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}},"
- "{ixscan: {filter: null, pattern: {a: 1},"
- "bounds: {a: [[1,1,true,true]]}}}]}}}}");
-
- assertSolutionExists("{fetch: {filter: {d: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {b: 1}, node: {ixscan: {filter: null,"
- "pattern: {c: 1}, bounds: {c: [[1,1,true,true]]}}}}},"
- "{ixscan: {filter: null, pattern: {a: 1},"
- "bounds: {a: [[1,1,true,true]]}}}]}}}}");
-
- assertSolutionExists("{fetch: {filter: {d: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {b: 1, c: 1}, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {b: 1}}},"
- "{ixscan: {filter: null, pattern: {c: 1}}}]}}}},"
- "{ixscan: {filter: null, pattern: {a: 1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, CannotTrimIxisectAndHashWithOrChild) {
- params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
- params.options |= QueryPlannerParams::INDEX_INTERSECTION;
- params.options |= QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
-
- runQuery(fromjson("{c: 1, $or: [{a: 1}, {b: 1, d: 1}]}"));
-
- assertNumSolutions(3U);
-
- assertSolutionExists("{fetch: {filter: {c: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {d: 1}, node: {ixscan: {filter: null,"
- "pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}},"
- "{ixscan: {filter: null, pattern: {a: 1},"
- "bounds: {a: [[1,1,true,true]]}}}]}}}}");
-
- assertSolutionExists("{fetch: {filter: {$or:[{b:1,d:1},{a:1}]}, node:"
- "{ixscan: {filter: null, pattern: {c: 1}}}}}");
-
- assertSolutionExists("{fetch: {filter: {c:1,$or:[{a:1},{b:1,d:1}]}, node:{andHash:{nodes:["
- "{or: {nodes: ["
- "{fetch: {filter: {d:1}, node: {ixscan: {pattern: {b: 1}}}}},"
- "{ixscan: {filter: null, pattern: {a: 1}}}]}},"
- "{ixscan: {filter: null, pattern: {c: 1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, CannotTrimIxisectParamSelfIntersection) {
- params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
- params.options = QueryPlannerParams::INDEX_INTERSECTION;
- params.options |= QueryPlannerParams::NO_TABLE_SCAN;
-
- // true means multikey
- addIndex(BSON("a" << 1), true);
-
- runQuery(fromjson("{a: {$all: [1, 2, 3]}}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{fetch: {filter: {$and: [{a:2}, {a:3}]}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- assertSolutionExists("{fetch: {filter: null, node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1},"
- "bounds: {a: [[1,1,true,true]]}}},"
- "{ixscan: {filter: null, pattern: {a:1},"
- "bounds: {a: [[2,2,true,true]]}}},"
- "{ixscan: {filter: null, pattern: {a:1},"
- "bounds: {a: [[3,3,true,true]]}}}]}}}}");
- }
-
-
- // If a lookup against a unique index is available as a possible plan, then the planner
- // should not generate other possibilities.
- TEST_F(QueryPlannerTest, UniqueIndexLookup) {
- params.options = QueryPlannerParams::INDEX_INTERSECTION;
- params.options |= QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1),
- false, // multikey
- false, // sparse,
- true); // unique
-
- runQuery(fromjson("{a: 1, b: 1}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a: 1}, node: "
- "{ixscan: {filter: null, pattern: {b: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, HintOnNonUniqueIndex) {
- params.options = QueryPlannerParams::INDEX_INTERSECTION;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1),
- false, // multikey
- false, // sparse,
- true); // unique
-
- runQueryHint(fromjson("{a: 1, b: 1}"), BSON("a" << 1));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {b: 1}, node: "
- "{ixscan: {filter: null, pattern: {a: 1}}}}}");
- }
-
- TEST_F(QueryPlannerTest, UniqueIndexLookupBelowOr) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- addIndex(BSON("d" << 1),
- false, // multikey
- false, // sparse,
- true); // unique
-
- runQuery(fromjson("{$or: [{a: 1, b: 1}, {c: 1, d: 1}]}"));
-
- // Only two plans because we throw out plans for the right branch of the $or that do not
- // use equality over the unique index.
- assertNumSolutions(2U);
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {a: 1}, node: {ixscan: {pattern: {b: 1}}}}},"
- "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}]}}");
- assertSolutionExists("{or: {nodes: ["
- "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}},"
- "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}]}}");
- }
-
- TEST_F(QueryPlannerTest, UniqueIndexLookupBelowOrBelowAnd) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("c" << 1));
- addIndex(BSON("d" << 1),
- false, // multikey
- false, // sparse,
- true); // unique
-
- runQuery(fromjson("{e: 1, $or: [{a: 1, b: 1}, {c: 1, d: 1}]}"));
-
- // Only two plans because we throw out plans for the right branch of the $or that do not
- // use equality over the unique index.
- assertNumSolutions(2U);
- assertSolutionExists("{fetch: {filter: {e: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {a: 1}, node: {ixscan: {pattern: {b: 1}}}}},"
- "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}"
- "]}}}}");
- assertSolutionExists("{fetch: {filter: {e: 1}, node: {or: {nodes: ["
- "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}},"
- "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}"
- "]}}}}");
- }
-
- TEST_F(QueryPlannerTest, CoveredOrUniqueIndexLookup) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
-
- addIndex(BSON("a" << 1 << "b" << 1));
- addIndex(BSON("a" << 1),
- false, // multikey
- false, // sparse,
- true); // unique
-
- runQuerySortProj(fromjson("{a: 1, b: 1}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
-
- assertNumSolutions(2U);
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}}}}");
- assertSolutionExists("{proj: {spec: {_id: 0, a: 1}, node: "
- "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}");
- }
-
- //
- // Test bad input to query planner helpers.
- //
-
- TEST(BadInputTest, CacheDataFromTaggedTree) {
- PlanCacheIndexTree* indexTree;
-
- // Null match expression.
- std::vector<IndexEntry> relevantIndices;
- Status s = QueryPlanner::cacheDataFromTaggedTree(NULL, relevantIndices, &indexTree);
- ASSERT_NOT_OK(s);
- ASSERT(NULL == indexTree);
-
- // No relevant index matching the index tag.
- relevantIndices.push_back(IndexEntry(BSON("a" << 1)));
-
- CanonicalQuery *cq;
- Status cqStatus = CanonicalQuery::canonicalize("ns", BSON("a" << 3), &cq);
- ASSERT_OK(cqStatus);
- std::unique_ptr<CanonicalQuery> scopedCq(cq);
- scopedCq->root()->setTag(new IndexTag(1));
-
- s = QueryPlanner::cacheDataFromTaggedTree(scopedCq->root(), relevantIndices, &indexTree);
- ASSERT_NOT_OK(s);
- ASSERT(NULL == indexTree);
- }
-
- TEST(BadInputTest, TagAccordingToCache) {
- CanonicalQuery *cq;
- Status cqStatus = CanonicalQuery::canonicalize("ns", BSON("a" << 3), &cq);
- ASSERT_OK(cqStatus);
- std::unique_ptr<CanonicalQuery> scopedCq(cq);
-
- std::unique_ptr<PlanCacheIndexTree> indexTree(new PlanCacheIndexTree());
- indexTree->setIndexEntry(IndexEntry(BSON("a" << 1)));
-
- std::map<BSONObj, size_t> indexMap;
-
- // Null filter.
- Status s = QueryPlanner::tagAccordingToCache(NULL, indexTree.get(), indexMap);
- ASSERT_NOT_OK(s);
-
- // Null indexTree.
- s = QueryPlanner::tagAccordingToCache(scopedCq->root(), NULL, indexMap);
- ASSERT_NOT_OK(s);
-
- // Index not found.
- s = QueryPlanner::tagAccordingToCache(scopedCq->root(), indexTree.get(), indexMap);
- ASSERT_NOT_OK(s);
-
- // Index found once added to the map.
- indexMap[BSON("a" << 1)] = 0;
- s = QueryPlanner::tagAccordingToCache(scopedCq->root(), indexTree.get(), indexMap);
- ASSERT_OK(s);
-
- // Regenerate canonical query in order to clear tags.
- cqStatus = CanonicalQuery::canonicalize("ns", BSON("a" << 3), &cq);
- ASSERT_OK(cqStatus);
- scopedCq.reset(cq);
-
- // Mismatched tree topology.
- PlanCacheIndexTree* child = new PlanCacheIndexTree();
- child->setIndexEntry(IndexEntry(BSON("a" << 1)));
- indexTree->children.push_back(child);
- s = QueryPlanner::tagAccordingToCache(scopedCq->root(), indexTree.get(), indexMap);
- ASSERT_NOT_OK(s);
- }
+ runQuery(fromjson("{b: {$not: {$exists: true}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: false}}, node: "
+ "{ixscan: {pattern: {b: 1}, bounds: "
+ "{b: [[null, null, true, true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ExistsBoundsCompound) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuery(fromjson("{a: 1, b: {$exists: true}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: true}}, node: "
+ "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]]}}}}}");
+
+ // This ends up being a double negation, which we currently don't index.
+ runQuery(fromjson("{a: 1, b: {$not: {$exists: false}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]]}}}}}");
+
+ runQuery(fromjson("{a: 1, b: {$exists: false}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: false}}, node: "
+ "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[null,null,true,true]]}}}}}");
+
+ runQuery(fromjson("{a: 1, b: {$not: {$exists: true}}}"));
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b: {$exists: false}}, node: "
+ "{ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[null,null,true,true]]}}}}}");
+}
+
+//
+// skip and limit
+//
+
+TEST_F(QueryPlannerTest, BasicSkipNoIndex) {
+ addIndex(BSON("a" << 1));
+
+ runQuerySkipLimit(BSON("x" << 5), 3, 0);
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{skip: {n: 3, node: {cscan: {dir: 1, filter: {x: 5}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicSkipWithIndex) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuerySkipLimit(BSON("a" << 5), 8, 0);
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{skip: {n: 8, node: {cscan: {dir: 1, filter: {a: 5}}}}}");
+ assertSolutionExists(
+ "{skip: {n: 8, node: {fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicLimitNoIndex) {
+ addIndex(BSON("a" << 1));
+
+ runQuerySkipLimit(BSON("x" << 5), 0, -3);
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{limit: {n: 3, node: {cscan: {dir: 1, filter: {x: 5}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicSoftLimitNoIndex) {
+ addIndex(BSON("a" << 1));
+
+ runQuerySkipLimit(BSON("x" << 5), 0, 3);
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {x: 5}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicLimitWithIndex) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuerySkipLimit(BSON("a" << 5), 0, -5);
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{limit: {n: 5, node: {cscan: {dir: 1, filter: {a: 5}}}}}");
+ assertSolutionExists(
+ "{limit: {n: 5, node: {fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicSoftLimitWithIndex) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuerySkipLimit(BSON("a" << 5), 0, 5);
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: 5}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SkipAndLimit) {
+ addIndex(BSON("x" << 1));
+
+ runQuerySkipLimit(BSON("x" << BSON("$lte" << 4)), 7, -2);
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{limit: {n: 2, node: {skip: {n: 7, node: "
+ "{cscan: {dir: 1, filter: {x: {$lte: 4}}}}}}}}");
+ assertSolutionExists(
+ "{limit: {n: 2, node: {skip: {n: 7, node: {fetch: "
+ "{filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SkipAndSoftLimit) {
+ addIndex(BSON("x" << 1));
+
+ runQuerySkipLimit(BSON("x" << BSON("$lte" << 4)), 7, 2);
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{skip: {n: 7, node: "
+ "{cscan: {dir: 1, filter: {x: {$lte: 4}}}}}}");
+ assertSolutionExists(
+ "{skip: {n: 7, node: {fetch: "
+ "{filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}}}");
+}
+
+//
+// tree operations
+//
+
+TEST_F(QueryPlannerTest, TwoPredicatesAnding) {
+ addIndex(BSON("x" << 1));
+
+ runQuery(fromjson("{$and: [ {x: {$gt: 1}}, {x: {$lt: 3}} ] }"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SimpleOr) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a: 20}, {a: 21}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a: 20}, {a: 21}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a:1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, OrWithoutEnoughIndices) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a: 20}, {b: 21}]}"));
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a: 20}, {b: 21}]}}}");
+}
+
+TEST_F(QueryPlannerTest, OrWithAndChild) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1}}}, "
+ "{fetch: {filter: {b: 7}, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, AndWithUnindexedOrChild) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a:20, $or: [{b:1}, {c:7}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // Logical rewrite means we could get one of these two outcomes:
+ size_t matches = 0;
+ matches += numSolutionMatches(
+ "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+ matches += numSolutionMatches(
+ "{or: {filter: null, nodes: ["
+ "{fetch: {filter: {b:1}, node: {"
+ "ixscan: {filter: null, pattern: {a:1}}}}},"
+ "{fetch: {filter: {c:7}, node: {"
+ "ixscan: {filter: null, pattern: {a:1}}}}}]}}");
+ ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
+}
+
+
+TEST_F(QueryPlannerTest, AndWithOrWithOneIndex) {
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{b:1}, {c:7}], a:20}"));
+
+ // Logical rewrite gives us at least one of these:
+ assertSolutionExists("{cscan: {dir: 1}}");
+ size_t matches = 0;
+ matches += numSolutionMatches(
+ "{fetch: {filter: {$or: [{b: 1}, {c: 7}]}, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+ matches += numSolutionMatches(
+ "{or: {filter: null, nodes: ["
+ "{fetch: {filter: {b:1}, node: {"
+ "ixscan: {filter: null, pattern: {a:1}}}}},"
+ "{fetch: {filter: {c:7}, node: {"
+ "ixscan: {filter: null, pattern: {a:1}}}}}]}}");
+ ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
+}
+
+//
+// Additional $or tests
+//
+
+TEST_F(QueryPlannerTest, OrCollapsesToSingleScan) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a:{$gt:2}}, {a:{$gt:0}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [[0,Infinity,false,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, OrCollapsesToSingleScan2) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a:{$lt:2}}, {a:{$lt:4}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [[-Infinity,4,true,false]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, OrCollapsesToSingleScan3) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(fromjson("{$or: [{a:1},{a:3}]}"), fromjson("{a:1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [[1,1,true,true], [3,3,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, OrOnlyOneBranchCanUseIndex) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a:1}, {b:2}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, OrOnlyOneBranchCanUseIndexHinted) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(fromjson("{$or: [{a:1}, {b:2}]}"), fromjson("{a:1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {$or:[{a:1},{b:2}]}, node: {ixscan: "
+ "{pattern: {a:1}, bounds: "
+ "{a: [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, OrNaturalHint) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(fromjson("{$or: [{a:1}, {a:3}]}"), fromjson("{$natural:1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// SERVER-13714. A non-top-level indexable negation exposed a bug in plan enumeration.
+TEST_F(QueryPlannerTest, NonTopLevelIndexedNegation) {
+ addIndex(BSON("state" << 1));
+ addIndex(BSON("is_draft" << 1));
+ addIndex(BSON("published_date" << 1));
+ addIndex(BSON("newsroom_id" << 1));
+
+ BSONObj queryObj = fromjson(
+ "{$and:[{$or:[{is_draft:false},{creator_id:1}]},"
+ "{$or:[{state:3,is_draft:false},"
+ "{published_date:{$ne:null}}]},"
+ "{newsroom_id:{$in:[1]}}]}");
+ runQuery(queryObj);
+}
+
+TEST_F(QueryPlannerTest, NonTopLevelIndexedNegationMinQuery) {
+ addIndex(BSON("state" << 1));
+ addIndex(BSON("is_draft" << 1));
+ addIndex(BSON("published_date" << 1));
+
+ // This is the min query to reproduce SERVER-13714
+ BSONObj queryObj = fromjson("{$or:[{state:1, is_draft:1}, {published_date:{$ne: 1}}]}");
+ runQuery(queryObj);
+}
+
+// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
+TEST_F(QueryPlannerTest, OrOfAnd) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a:{$gt:2,$lt:10}}, {a:{$gt:0,$lt:5}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {pattern: {a:1}, bounds: {a: [[2,10,false,false]]}}}, "
+ "{ixscan: {pattern: {a:1}, bounds: "
+ "{a: [[0,5,false,false]]}}}]}}}}");
+}
+
+// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
+TEST_F(QueryPlannerTest, OrOfAnd2) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a:{$gt:2,$lt:10}}, {a:{$gt:0,$lt:15}}, {a:{$gt:20}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {pattern: {a:1}, bounds: {a: [[2,10,false,false]]}}}, "
+ "{ixscan: {pattern: {a:1}, bounds: {a: [[0,15,false,false]]}}}, "
+ "{ixscan: {pattern: {a:1}, bounds: "
+ "{a: [[20,Infinity,false,true]]}}}]}}}}");
+}
+
+// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
+TEST_F(QueryPlannerTest, OrOfAnd3) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a:{$gt:1,$lt:5},b:6}, {a:3,b:{$gt:0,$lt:10}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {b:6}, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [[1,5,false,false]]}}}}}, "
+ "{fetch: {filter: {$and:[{b:{$lt:10}},{b:{$gt:0}}]}, node: "
+ "{ixscan: {pattern: {a:1}, bounds: {a:[[3,3,true,true]]}}}}}]}}");
+}
+
+// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
+TEST_F(QueryPlannerTest, OrOfAnd4) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson(
+ "{$or: [{a:{$gt:1,$lt:5}, b:{$gt:0,$lt:3}, c:6}, "
+ "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {c:6}, node: {ixscan: {pattern: {a:1,b:1}, "
+ "bounds: {a: [[1,5,false,false]], b: [[0,3,false,false]]}}}}}, "
+ "{fetch: {filter: {$and:[{c:{$lt:10}},{c:{$gt:0}}]}, node: "
+ "{ixscan: {pattern: {a:1,b:1}, "
+ " bounds: {a:[[3,3,true,true]], b:[[1,2,false,false]]}}}}}]}}");
+}
+
+// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
+TEST_F(QueryPlannerTest, OrOfAnd5) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson(
+ "{$or: [{a:{$gt:1,$lt:5}, c:6}, "
+ "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {c:6}, node: {ixscan: {pattern: {a:1,b:1}, "
+ "bounds: {a: [[1,5,false,false]], "
+ "b: [['MinKey','MaxKey',true,true]]}}}}}, "
+ "{fetch: {filter: {$and:[{c:{$lt:10}},{c:{$gt:0}}]}, node: "
+ "{ixscan: {pattern: {a:1,b:1}, "
+ " bounds: {a:[[3,3,true,true]], b:[[1,2,false,false]]}}}}}]}}");
+}
+
+// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
+TEST_F(QueryPlannerTest, OrOfAnd6) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{$or: [{a:{$in:[1]},b:{$in:[1]}}, {a:{$in:[1,5]},b:{$in:[1,5]}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {pattern: {a:1,b:1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[1,1,true,true]]}}}, "
+ "{ixscan: {pattern: {a:1,b:1}, bounds: "
+ "{a: [[1,1,true,true], [5,5,true,true]], "
+ " b: [[1,1,true,true], [5,5,true,true]]}}}]}}}}");
+}
+
+// SERVER-13960: properly handle $or with a mix of exact and inexact predicates.
+TEST_F(QueryPlannerTest, OrInexactWithExact) {
+ addIndex(BSON("name" << 1));
+ runQuery(fromjson("{$or: [{name: 'thomas'}, {name: /^alexand(er|ra)/}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {filter:"
+ "{$or: [{name: 'thomas'}, {name: /^alexand(er|ra)/}]},"
+ "pattern: {name: 1}}}}}");
+}
+
+// SERVER-13960: multiple indices, each with an inexact covered predicate.
+TEST_F(QueryPlannerTest, OrInexactWithExact2) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuery(fromjson("{$or: [{a: 'foo'}, {a: /bar/}, {b: 'foo'}, {b: /bar/}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {or: {nodes: ["
+ "{ixscan: {filter: {$or:[{a:'foo'},{a:/bar/}]},"
+ "pattern: {a: 1}}},"
+ "{ixscan: {filter: {$or:[{b:'foo'},{b:/bar/}]},"
+ "pattern: {b: 1}}}]}}}}");
+}
+
+// SERVER-13960: an exact, inexact covered, and inexact fetch predicate.
+TEST_F(QueryPlannerTest, OrAllThreeTightnesses) {
+ addIndex(BSON("names" << 1));
+ runQuery(fromjson(
+ "{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/},"
+ "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: "
+ "{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/},"
+ "{names: {$elemMatch: {$eq: 'thomas'}}}]}, "
+ "node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
+}
+
+// SERVER-13960: two inexact fetch predicates.
+TEST_F(QueryPlannerTest, OrTwoInexactFetch) {
+ // true means multikey
+ addIndex(BSON("names" << 1), true);
+ runQuery(fromjson(
+ "{$or: [{names: {$elemMatch: {$eq: 'alexandra'}}},"
+ "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: "
+ "{$or: [{names: {$elemMatch: {$eq: 'alexandra'}}},"
+ "{names: {$elemMatch: {$eq: 'thomas'}}}]}, "
+ "node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
+}
+
+// SERVER-13960: multikey with exact and inexact covered predicates.
+TEST_F(QueryPlannerTest, OrInexactCoveredMultikey) {
+ // true means multikey
+ addIndex(BSON("names" << 1), true);
+ runQuery(fromjson("{$or: [{names: 'dave'}, {names: /joe/}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$or: [{names: 'dave'}, {names: /joe/}]}, "
+ "node: {ixscan: {filter: null, pattern: {names: 1}}}}}");
+}
+
+// SERVER-13960: $elemMatch object with $or.
+TEST_F(QueryPlannerTest, OrElemMatchObject) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+ runQuery(fromjson(
+ "{$or: [{a: {$elemMatch: {b: {$lte: 1}}}},"
+ "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {a:{$elemMatch:{b:{$gte:4}}}}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}}},"
+ "{fetch: {filter: {a:{$elemMatch:{b:{$lte:1}}}}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1}}}}}]}}");
+}
+
+// SERVER-13960: $elemMatch object inside an $or, below an AND.
+TEST_F(QueryPlannerTest, OrElemMatchObjectBeneathAnd) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+ runQuery(fromjson(
+ "{$or: [{'a.b': 0, a: {$elemMatch: {b: {$lte: 1}}}},"
+ "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {$and:[{a:{$elemMatch:{b:{$lte:1}}}},{'a.b':0}]},"
+ "node: {ixscan: {filter: null, pattern: {'a.b': 1}, "
+ "bounds: {'a.b': [[-Infinity,1,true,true]]}}}}},"
+ "{fetch: {filter: {a:{$elemMatch:{b:{$gte:4}}}}, node: "
+ "{ixscan: {filter: null, pattern: {'a.b': 1},"
+ "bounds: {'a.b': [[4,Infinity,true,true]]}}}}}]}}");
+}
+
+// SERVER-13960: $or below $elemMatch with an inexact covered predicate.
+TEST_F(QueryPlannerTest, OrBelowElemMatchInexactCovered) {
+ // true means multikey
+ addIndex(BSON("a.b" << 1), true);
+ runQuery(fromjson("{a: {$elemMatch: {$or: [{b: 'x'}, {b: /z/}]}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: {$elemMatch: {$or: [{b: 'x'}, {b: /z/}]}}},"
+ "node: {ixscan: {filter: null, pattern: {'a.b': 1}}}}}");
+}
+
+// SERVER-13960: $in with exact and inexact covered predicates.
+TEST_F(QueryPlannerTest, OrWithExactAndInexact) {
+ addIndex(BSON("name" << 1));
+ runQuery(fromjson("{name: {$in: ['thomas', /^alexand(er|ra)/]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: {name: {$in: ['thomas', /^alexand(er|ra)/]}}, "
+ "pattern: {name: 1}}}}}");
+}
+
+// SERVER-13960: $in with exact, inexact covered, and inexact fetch predicates.
+TEST_F(QueryPlannerTest, OrWithExactAndInexact2) {
+ addIndex(BSON("name" << 1));
+ runQuery(fromjson(
+ "{$or: [{name: {$in: ['thomas', /^alexand(er|ra)/]}},"
+ "{name: {$exists: false}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$or: [{name: {$in: ['thomas', /^alexand(er|ra)/]}},"
+ "{name: {$exists: false}}]}, "
+ "node: {ixscan: {filter: null, pattern: {name: 1}}}}}");
+}
+
+// SERVER-13960: $in with exact, inexact covered, and inexact fetch predicates
+// over two indices.
+TEST_F(QueryPlannerTest, OrWithExactAndInexact3) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuery(fromjson(
+ "{$or: [{a: {$in: [/z/, /x/]}}, {a: 'w'},"
+ "{b: {$exists: false}}, {b: {$in: ['p']}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: {$or:[{a:{$in:[/z/, /x/]}}, {a:'w'}]}, "
+ "pattern: {a: 1}}}, "
+ "{fetch: {filter: {$or:[{b:{$exists:false}}, {b:{$in:['p']}}]},"
+ "node: {ixscan: {filter: null, pattern: {b: 1}}}}}]}}}}");
+}
+
+//
+// Min/Max
+//
+
+TEST_F(QueryPlannerTest, MinValid) {
+ addIndex(BSON("a" << 1));
+ runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MinWithoutIndex) {
+ runInvalidQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), BSONObj());
+}
+
+TEST_F(QueryPlannerTest, MinBadHint) {
+ addIndex(BSON("b" << 1));
+ runInvalidQueryHintMinMax(BSONObj(), fromjson("{b: 1}"), fromjson("{a: 1}"), BSONObj());
+}
+
+TEST_F(QueryPlannerTest, MaxValid) {
+ addIndex(BSON("a" << 1));
+ runQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MinMaxSameValue) {
+ addIndex(BSON("a" << 1));
+ runQueryHintMinMax(BSONObj(), BSONObj(), fromjson("{a: 1}"), fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MaxWithoutIndex) {
+ runInvalidQueryHintMinMax(BSONObj(), BSONObj(), BSONObj(), fromjson("{a: 1}"));
+}
+
+TEST_F(QueryPlannerTest, MaxBadHint) {
+ addIndex(BSON("b" << 1));
+ runInvalidQueryHintMinMax(BSONObj(), fromjson("{b: 1}"), BSONObj(), fromjson("{a: 1}"));
+}
+
+TEST_F(QueryPlannerTest, MaxMinSort) {
+ addIndex(BSON("a" << 1));
+
+ // Run an empty query, sort {a: 1}, max/min arguments.
+ runQueryFull(BSONObj(),
+ fromjson("{a: 1}"),
+ BSONObj(),
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 2}"),
+ fromjson("{a: 8}"),
+ false);
+
+ assertNumSolutions(1);
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MaxMinReverseSort) {
+ addIndex(BSON("a" << 1));
+
+ // Run an empty query, sort {a: -1}, max/min arguments.
+ runQueryFull(BSONObj(),
+ fromjson("{a: -1}"),
+ BSONObj(),
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 2}"),
+ fromjson("{a: 8}"),
+ false);
+
+ assertNumSolutions(1);
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: -1, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MaxMinReverseIndexDir) {
+ addIndex(BSON("a" << -1));
+
+ // Because the index is descending, the min is numerically larger than the max.
+ runQueryFull(BSONObj(),
+ fromjson("{a: -1}"),
+ BSONObj(),
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 8}"),
+ fromjson("{a: 2}"),
+ false);
+
+ assertNumSolutions(1);
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: 1, pattern: {a: -1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MaxMinReverseIndexDirSort) {
+ addIndex(BSON("a" << -1));
+
+ // Min/max specifies a forward scan with bounds [{a: 8}, {a: 2}]. Asking for
+ // an ascending sort reverses the direction of the scan to [{a: 2}, {a: 8}].
+ runQueryFull(BSONObj(),
+ fromjson("{a: 1}"),
+ BSONObj(),
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 8}"),
+ fromjson("{a: 2}"),
+ false);
+
+ assertNumSolutions(1);
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {filter: null, dir: -1,"
+ "pattern: {a: -1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MaxMinNoMatchingIndexDir) {
+ addIndex(BSON("a" << -1));
+ runInvalidQueryHintMinMax(BSONObj(), fromjson("{a: 2}"), BSONObj(), fromjson("{a: 8}"));
+}
+
+TEST_F(QueryPlannerTest, MaxMinSelectCorrectlyOrderedIndex) {
+ // There are both ascending and descending indices on 'a'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("a" << -1));
+
+ // The ordering of min and max means that we *must* use the descending index.
+ runQueryFull(BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 8}"),
+ fromjson("{a: 2}"),
+ false);
+
+ assertNumSolutions(1);
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: 1, pattern: {a: -1}}}}}");
+
+ // If we switch the ordering, then we use the ascending index.
+ // The ordering of min and max means that we *must* use the descending index.
+ runQueryFull(BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ 0,
+ 0,
+ BSONObj(),
+ fromjson("{a: 2}"),
+ fromjson("{a: 8}"),
+ false);
+
+ assertNumSolutions(1);
+ assertSolutionExists("{fetch: {node: {ixscan: {filter: null, dir: 1, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, MaxMinBadHintSelectsReverseIndex) {
+ // There are both ascending and descending indices on 'a'.
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("a" << -1));
+
+ // A query hinting on {a: 1} is bad if min is {a: 8} and {a: 2} because this
+ // min/max pairing requires a descending index.
+ runInvalidQueryFull(BSONObj(),
+ BSONObj(),
+ BSONObj(),
+ 0,
+ 0,
+ fromjson("{a: 1}"),
+ fromjson("{a: 8}"),
+ fromjson("{a: 2}"),
+ false);
+}
+
+
+//
+// $snapshot
+//
+
+TEST_F(QueryPlannerTest, Snapshot) {
+ addIndex(BSON("a" << 1));
+ runQuerySnapshot(fromjson("{a: {$gt: 0}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gt:0}}, node: "
+ "{ixscan: {filter: null, pattern: {_id: 1}}}}}");
+}
+
+//
+// Tree operations that require simple tree rewriting.
+//
+
+TEST_F(QueryPlannerTest, AndOfAnd) {
+ addIndex(BSON("x" << 1));
+ runQuery(fromjson("{$and: [ {$and: [ {x: 2.5}]}, {x: {$gt: 1}}, {x: {$lt: 3}} ] }"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
+
+//
+// Logically equivalent queries
+//
+
+TEST_F(QueryPlannerTest, EquivalentAndsOne) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{$and: [{a: 1}, {b: {$all: [10, 20]}}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$and:[{a:1},{b:10},{b:20}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, EquivalentAndsTwo) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{$and: [{a: 1, b: 10}, {a: 1, b: 20}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$and:[{a:1},{a:1},{b:10},{b:20}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+//
+// Covering
+//
+
+TEST_F(QueryPlannerTest, BasicCovering) {
+ addIndex(BSON("x" << 1));
+ // query, sort, proj
+ runQuerySortProj(fromjson("{ x : {$gt: 1}}"), BSONObj(), fromjson("{_id: 0, x: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, x: 1}, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, x: 1}, node: "
+ "{cscan: {dir: 1, filter: {x:{$gt:1}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, DottedFieldCovering) {
+ addIndex(BSON("a.b" << 1));
+ runQuerySortProj(fromjson("{'a.b': 5}"), BSONObj(), fromjson("{_id: 0, 'a.b': 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, 'a.b': 1}, node: "
+ "{cscan: {dir: 1, filter: {'a.b': 5}}}}}");
+ // SERVER-2104
+ // assertSolutionExists("{proj: {spec: {_id: 0, 'a.b': 1}, node: {'a.b': 1}}}");
+}
+
+TEST_F(QueryPlannerTest, IdCovering) {
+ runQuerySortProj(fromjson("{_id: {$gt: 10}}"), BSONObj(), fromjson("{_id: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 1}, node: "
+ "{cscan: {dir: 1, filter: {_id: {$gt: 10}}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 1}, node: {ixscan: "
+ "{filter: null, pattern: {_id: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ProjNonCovering) {
+ addIndex(BSON("x" << 1));
+ runQuerySortProj(fromjson("{ x : {$gt: 1}}"), BSONObj(), fromjson("{x: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {x: 1}, node: {cscan: "
+ "{dir: 1, filter: {x: {$gt: 1}}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {x: 1}, node: {fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {x: 1}}}}}}}");
+}
+
+//
+// Basic sort
+//
+
+TEST_F(QueryPlannerTest, BasicSort) {
+ addIndex(BSON("x" << 1));
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSort) {
+ addIndex(BSON("x"
+ << "hashed"));
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSortWithIndexablePred) {
+ addIndex(BSON("x"
+ << "hashed"));
+ runQuerySortProj(BSON("x" << BSON("$in" << BSON_ARRAY(0 << 1))), BSON("x" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, node: "
+ "{fetch: {node: "
+ "{ixscan: {pattern: {x: 'hashed'}}}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, node: "
+ "{cscan: {dir: 1, filter: {x: {$in: [0, 1]}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantUseTextIndexToProvideSort) {
+ addIndex(BSON("x" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicSortWithIndexablePred) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortProj(fromjson("{ a : 5 }"), BSON("b" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, "
+ "node: {fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: 5}, node: {ixscan: "
+ "{filter: null, pattern: {b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BasicSortBooleanIndexKeyPattern) {
+ addIndex(BSON("a" << true));
+ runQuerySortProj(fromjson("{ a : 5 }"), BSON("a" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {a: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: true}}}}}");
+}
+
+// SERVER-14070
+TEST_F(QueryPlannerTest, CompoundIndexWithEqualityPredicatesProvidesSort) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProj(fromjson("{a: 1, b: 1}"), fromjson("{b: 1}"), BSONObj());
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {filter: null,"
+ "pattern: {a: 1, b: 1}, "
+ "bounds: {a:[[1,1,true,true]], b:[[1,1,true,true]]}}}}}");
+}
+
+//
+// Sort with limit and/or skip
+//
+
+TEST_F(QueryPlannerTest, SortLimit) {
+ // Negative limit indicates hard limit - see lite_parsed_query.cpp
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, -3);
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sort: {pattern: {a: 1}, limit: 3, "
+ "node: {cscan: {dir: 1}}}}");
+}
+
+TEST_F(QueryPlannerTest, SortSkip) {
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, 0);
+ assertNumSolutions(1U);
+ // If only skip is provided, do not limit sort.
+ assertSolutionExists(
+ "{skip: {n: 2, node: "
+ "{sort: {pattern: {a: 1}, limit: 0, "
+ "node: {cscan: {dir: 1}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SortSkipLimit) {
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, -3);
+ assertNumSolutions(1U);
+ // Limit in sort node should be adjusted by skip count
+ assertSolutionExists(
+ "{skip: {n: 2, node: "
+ "{sort: {pattern: {a: 1}, limit: 5, "
+ "node: {cscan: {dir: 1}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SortSoftLimit) {
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, 3);
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sort: {pattern: {a: 1}, limit: 3, "
+ "node: {cscan: {dir: 1}}}}");
+}
+
+TEST_F(QueryPlannerTest, SortSkipSoftLimit) {
+ runQuerySortProjSkipLimit(BSONObj(), fromjson("{a: 1}"), BSONObj(), 2, 3);
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{skip: {n: 2, node: "
+ "{sort: {pattern: {a: 1}, limit: 5, "
+ "node: {cscan: {dir: 1}}}}}}");
+}
+
+//
+// Sort elimination
+//
+
+TEST_F(QueryPlannerTest, BasicSortElim) {
+ addIndex(BSON("x" << 1));
+ // query, sort, proj
+ runQuerySortProj(fromjson("{ x : {$gt: 1}}"), fromjson("{x: 1}"), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {x: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {x: {$gt: 1}}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SortElimCompound) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProj(fromjson("{ a : 5 }"), BSON("b" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+// SERVER-13611: test that sort elimination still works if there are
+// trailing fields in the index.
+TEST_F(QueryPlannerTest, SortElimTrailingFields) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ runQuerySortProj(fromjson("{a: 5}"), BSON("b" << 1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {a: 5}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1, b: 1, c: 1}}}}}");
+}
+
+// Sort elimination with trailing fields where the sort direction is descending.
+TEST_F(QueryPlannerTest, SortElimTrailingFieldsReverse) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
+ runQuerySortProj(fromjson("{a: 5, b: 6}"), BSON("c" << -1), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{sort: {pattern: {c: -1}, limit: 0, "
+ "node: {cscan: {dir: 1, filter: {a: 5, b: 6}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, dir: -1, pattern: {a: 1, b: 1, c: 1, d: 1}}}}}");
+}
+
+//
+// Basic compound
+//
+
+TEST_F(QueryPlannerTest, BasicCompound) {
+ addIndex(BSON("x" << 1 << "y" << 1));
+ runQuery(fromjson("{ x : 5, y: 10}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1, y: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundMissingField) {
+ addIndex(BSON("x" << 1 << "y" << 1 << "z" << 1));
+ runQuery(fromjson("{ x : 5, z: 10}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {x: 1, y: 1, z: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundFieldsOrder) {
+ addIndex(BSON("x" << 1 << "y" << 1 << "z" << 1));
+ runQuery(fromjson("{ x : 5, z: 10, y:1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1, y: 1, z: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantUseCompound) {
+ addIndex(BSON("x" << 1 << "y" << 1));
+ runQuery(fromjson("{ y: 10}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {y: 10}}}");
+}
+
+//
+// $in
+//
+
+TEST_F(QueryPlannerTest, InBasic) {
+ addIndex(fromjson("{a: 1}"));
+ runQuery(fromjson("{a: {$in: [1, 2]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: {$in: [1, 2]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {pattern: {a: 1}}}}}");
+}
+
+// Logically equivalent to the preceding $in query.
+// Indexed solution should be the same.
+TEST_F(QueryPlannerTest, InBasicOrEquivalent) {
+ addIndex(fromjson("{a: 1}"));
+ runQuery(fromjson("{$or: [{a: 1}, {a: 2}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a: 1}, {a: 2}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, InSparseIndex) {
+ addIndex(fromjson("{a: 1}"),
+ false, // multikey
+ true); // sparse
+ runQuery(fromjson("{a: {$in: [null]}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: {$in: [null]}}}}");
+}
+
+TEST_F(QueryPlannerTest, InCompoundIndexFirst) {
+ addIndex(fromjson("{a: 1, b: 1}"));
+ runQuery(fromjson("{a: {$in: [1, 2]}, b: 3}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {b: 3, a: {$in: [1, 2]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
+}
+
+// Logically equivalent to the preceding $in query.
+// Indexed solution should be the same.
+// Currently fails - pre-requisite to SERVER-12024
+/*
+TEST_F(QueryPlannerTest, InCompoundIndexFirstOrEquivalent) {
+ addIndex(fromjson("{a: 1, b: 1}"));
+ runQuery(fromjson("{$and: [{$or: [{a: 1}, {a: 2}]}, {b: 3}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$and: [{$or: [{a: 1}, {a: 2}]}, {b: 3}]}}}");
+ assertSolutionExists("{fetch: {filter: null, "
+ "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
+}
+*/
+
+TEST_F(QueryPlannerTest, InCompoundIndexLast) {
+ addIndex(fromjson("{a: 1, b: 1}"));
+ runQuery(fromjson("{a: 3, b: {$in: [1, 2]}}"));
+
+ assertNumSolutions(2U);
+ // TODO: update filter in cscan solution when SERVER-12024 is implemented
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: 3, b: {$in: [1, 2]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
+}
+
+// Logically equivalent to the preceding $in query.
+// Indexed solution should be the same.
+// Currently fails - pre-requisite to SERVER-12024
+/*
+TEST_F(QueryPlannerTest, InCompoundIndexLastOrEquivalent) {
+ addIndex(fromjson("{a: 1, b: 1}"));
+ runQuery(fromjson("{$and: [{a: 3}, {$or: [{b: 1}, {b: 2}]}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$and: [{a: 3}, {$or: [{b: 1}, {b: 2}]}]}}}");
+ assertSolutionExists("{fetch: {filter: null, "
+ "node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
+}
+*/
+
+// SERVER-1205
+TEST_F(QueryPlannerTest, InWithSort) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"), BSON("b" << 1), BSONObj(), 0, 1);
+
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 1, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a: 1, b: 1}}}, {ixscan: {pattern: {a: 1, b: 1}}}]}}}}");
+}
+
+// SERVER-1205
+TEST_F(QueryPlannerTest, InWithoutSort) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ // No sort means we don't bother to blow up the bounds.
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"), BSONObj(), BSONObj(), 0, 1);
+
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}}}}}");
+}
+
+// SERVER-1205
+TEST_F(QueryPlannerTest, ManyInWithSort) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}, b:{$in:[1,2]}, c:{$in:[1,2]}}"),
+ BSON("d" << 1),
+ BSONObj(),
+ 0,
+ 1);
+
+ assertSolutionExists(
+ "{sort: {pattern: {d: 1}, limit: 1, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}}]}}}}");
+}
+
+// SERVER-1205
+TEST_F(QueryPlannerTest, TooManyToExplode) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
+ runQuerySortProjSkipLimit(fromjson(
+ "{a: {$in: [1,2,3,4,5,6]},"
+ "b:{$in:[1,2,3,4,5,6,7,8]},"
+ "c:{$in:[1,2,3,4,5,6,7,8]}}"),
+ BSON("d" << 1),
+ BSONObj(),
+ 0,
+ 1);
+
+ // We cap the # of ixscans we're willing to create.
+ assertNumSolutions(2);
+ assertSolutionExists(
+ "{sort: {pattern: {d: 1}, limit: 1, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {d: 1}, limit: 1, node: "
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1, c:1, d:1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CantExplodeMetaSort) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c"
+ << "text"));
+ runQuerySortProj(fromjson("{a: {$in: [1, 2]}, b: {$in: [3, 4]}}"),
+ fromjson("{c: {$meta: 'textScore'}}"),
+ fromjson("{c: {$meta: 'textScore'}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {c:{$meta:'textScore'}}, node: "
+ "{sort: {pattern: {c:{$meta:'textScore'}}, limit: 0, node: "
+ "{cscan: {filter: {a:{$in:[1,2]},b:{$in:[3,4]}}, dir: 1}}}}}}");
+}
+
+// SERVER-13618: test that exploding scans for sort works even
+// if we must reverse the scan direction.
+TEST_F(QueryPlannerTest, ExplodeMustReverseScans) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
+ runQuerySortProj(fromjson("{a: {$in: [1, 2]}, b: {$in: [3, 4]}}"), BSON("c" << -1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {c: -1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a:1, b:1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a:1, b:1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a:1, b:1, c:1, d:1}}},"
+ "{ixscan: {pattern: {a:1, b:1, c:1, d:1}}}]}}}}");
+}
+
+// SERVER-13618
+TEST_F(QueryPlannerTest, ExplodeMustReverseScans2) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << -1));
+ runQuerySortProj(fromjson("{a: {$in: [1, 2]}, b: {$in: [3, 4]}}"), BSON("c" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a:1, b:1, c:-1}}},"
+ "{ixscan: {pattern: {a:1, b:1, c:-1}}},"
+ "{ixscan: {pattern: {a:1, b:1, c:-1}}},"
+ "{ixscan: {pattern: {a:1, b:1, c:-1}}}]}}}}");
+}
+
+// SERVER-13752: don't try to explode if the ordered interval list for
+// the leading field of the compound index is empty.
+TEST_F(QueryPlannerTest, CantExplodeWithEmptyBounds) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProj(fromjson("{a: {$in: []}}"), BSON("b" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {b:1}, limit: 0, node: "
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}}}}}}}");
+}
+
+// SERVER-13752
+TEST_F(QueryPlannerTest, CantExplodeWithEmptyBounds2) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ runQuerySortProj(fromjson("{a: {$gt: 3, $lt: 0}}"), BSON("b" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {b:1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {b:1}, limit: 0, node: "
+ "{fetch: {node: {ixscan: {pattern: {a:1,b:1,c:1}}}}}}}");
+}
+
+// SERVER-13754: exploding an $or
+TEST_F(QueryPlannerTest, ExplodeOrForSort) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+
+ runQuerySortProj(fromjson("{$or: [{a: 1}, {a: 2}, {b: 2}]}"), BSON("c" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {bounds: {a: [[1,1,true,true]], "
+ "c: [['MinKey','MaxKey',true,true]]},"
+ "pattern: {a:1, c:1}}},"
+ "{ixscan: {bounds: {a: [[2,2,true,true]], "
+ "c: [['MinKey','MaxKey',true,true]]},"
+ "pattern: {a:1, c:1}}},"
+ "{ixscan: {bounds: {b: [[2,2,true,true]], "
+ "c: [['MinKey','MaxKey',true,true]]},"
+ "pattern: {b:1, c:1}}}]}}}}");
+}
+
+// SERVER-13754: exploding an $or
+TEST_F(QueryPlannerTest, ExplodeOrForSort2) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ addIndex(BSON("d" << 1 << "c" << 1));
+
+ runQuerySortProj(
+ fromjson("{$or: [{a: 1, b: {$in: [1, 2]}}, {d: 3}]}"), BSON("c" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {bounds: {a: [[1,1,true,true]], b: [[1,1,true,true]],"
+ "c: [['MinKey','MaxKey',true,true]]},"
+ "pattern: {a:1, b:1, c:1}}},"
+ "{ixscan: {bounds: {a: [[1,1,true,true]], b: [[2,2,true,true]],"
+ "c: [['MinKey','MaxKey',true,true]]},"
+ "pattern: {a:1, b:1, c:1}}},"
+ "{ixscan: {bounds: {d: [[3,3,true,true]], "
+ "c: [['MinKey','MaxKey',true,true]]},"
+ "pattern: {d:1, c:1}}}]}}}}");
+}
+
+// SERVER-13754: an $or that can't be exploded, because one clause of the
+// $or does provide the sort, even after explosion.
+TEST_F(QueryPlannerTest, CantExplodeOrForSort) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ addIndex(BSON("d" << 1 << "c" << 1));
+
+ runQuerySortProj(fromjson("{$or: [{a: {$in: [1, 2]}}, {d: 3}]}"), BSON("c" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {c: 1}, limit: 0, node: "
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {pattern: {a: 1, b: 1, c: 1}}},"
+ "{ixscan: {pattern: {d: 1, c: 1}}}]}}}}}}");
+}
+
+// SERVER-15286: Make sure that at least the explodeForSort() path bails out
+// when it finds that there are no union of point interval fields to explode.
+// We could convert this into a MERGE_SORT plan, but we don't yet do this
+// optimization.
+TEST_F(QueryPlannerTest, CantExplodeOrForSort2) {
+ addIndex(BSON("a" << 1));
+
+ runQuerySortProj(fromjson("{$or: [{a: {$gt: 1, $lt: 3}}, {a: {$gt: 6, $lt: 10}}]}"),
+ BSON("a" << -1),
+ BSONObj());
+
+ assertNumSolutions(3U);
+ assertSolutionExists("{sort: {pattern: {a: -1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {a: -1}, limit: 0, node: "
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {pattern: {a: 1}, bounds: "
+ "{a: [[1,3,false,false]]}}},"
+ "{ixscan: {pattern: {a: 1}, bounds: "
+ "{a: [[6,10,false,false]]}}}]}}}}}}");
+}
+
+// SERVER-13754: too many scans in an $or explosion.
+TEST_F(QueryPlannerTest, TooManyToExplodeOr) {
+ addIndex(BSON("a" << 1 << "e" << 1));
+ addIndex(BSON("b" << 1 << "e" << 1));
+ addIndex(BSON("c" << 1 << "e" << 1));
+ addIndex(BSON("d" << 1 << "e" << 1));
+ runQuerySortProj(fromjson(
+ "{$or: [{a: {$in: [1,2,3,4,5,6]},"
+ "b: {$in: [1,2,3,4,5,6]}},"
+ "{c: {$in: [1,2,3,4,5,6]},"
+ "d: {$in: [1,2,3,4,5,6]}}]}"),
+ BSON("e" << 1),
+ BSONObj());
+
+ // We cap the # of ixscans we're willing to create, so we don't get explosion. Instead
+ // we get 5 different solutions which all use a blocking sort.
+ assertNumSolutions(5U);
+ assertSolutionExists("{sort: {pattern: {e: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {e: 1}, limit: 0, node: "
+ "{or: {nodes: ["
+ "{fetch: {node: {ixscan: {pattern: {a: 1, e: 1}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {c: 1, e: 1}}}}}]}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {e: 1}, limit: 0, node: "
+ "{or: {nodes: ["
+ "{fetch: {node: {ixscan: {pattern: {b: 1, e: 1}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {c: 1, e: 1}}}}}]}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {e: 1}, limit: 0, node: "
+ "{or: {nodes: ["
+ "{fetch: {node: {ixscan: {pattern: {a: 1, e: 1}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {d: 1, e: 1}}}}}]}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {e: 1}, limit: 0, node: "
+ "{or: {nodes: ["
+ "{fetch: {node: {ixscan: {pattern: {b: 1, e: 1}}}}},"
+ "{fetch: {node: {ixscan: {pattern: {d: 1, e: 1}}}}}]}}}}");
+}
+
+// SERVER-15696: Make sure explodeForSort copies filters on IXSCAN stages to all of the
+// scans resulting from the explode. Regex is the easiest way to have the planner create
+// an index scan which filters using the index key.
+TEST_F(QueryPlannerTest, ExplodeIxscanWithFilter) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuerySortProj(fromjson(
+ "{$and: [{b: {$regex: 'foo', $options: 'i'}},"
+ "{a: {$in: [1, 2]}}]}"),
+ BSON("b" << 1),
+ BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a:1, b:1},"
+ "filter: {b: {$regex: 'foo', $options: 'i'}}}},"
+ "{ixscan: {pattern: {a:1, b:1},"
+ "filter: {b: {$regex: 'foo', $options: 'i'}}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, InWithSortAndLimitTrailingField) {
+ addIndex(BSON("a" << 1 << "b" << -1 << "c" << 1));
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}, b: {$gte: 0}}"),
+ fromjson("{b: -1}"),
+ BSONObj(), // no projection
+ 0, // no skip
+ -1); // .limit(1)
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{sort: {pattern: {b:-1}, limit: 1, "
+ "node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{limit: {n: 1, node: {fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a:1,b:-1,c:1}}}, "
+ " {ixscan: {pattern: {a:1,b:-1,c:1}}}]}}}}}}");
+}
+
+//
+// Multiple solutions
+//
+
+TEST_F(QueryPlannerTest, TwoPlans) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuery(fromjson("{a:1, b:{$gt:2,$lt:2}}"));
+
+ // 2 indexed solns and one non-indexed
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$and:[{b:{$lt:2}},{a:1},{b:{$gt:2}}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and:[{b:{$lt:2}},{b:{$gt:2}}]}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoPlansElemMatch) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("arr.x" << 1 << "a" << 1));
+
+ runQuery(fromjson(
+ "{arr: { $elemMatch : { x : 5 , y : 5 } },"
+ " a : 55 , b : { $in : [ 1 , 5 , 8 ] } }"));
+
+ // 2 indexed solns and one non-indexed
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[55,55,true,true]], b: [[1,1,true,true], "
+ "[5,5,true,true], [8,8,true,true]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{arr:{$elemMatch:{x:5,y:5}}},"
+ "{b:{$in:[1,5,8]}}]}, "
+ "node: {ixscan: {pattern: {'arr.x':1,a:1}, bounds: "
+ "{'arr.x': [[5,5,true,true]], 'a':[[55,55,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundAndNonCompoundIndices) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("a" << 1 << "b" << 1), true);
+ runQuery(fromjson("{a: 1, b: {$gt: 2, $lt: 2}}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 3U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and:[{b:{$lt:2}},{b:{$gt:2}}]}, node: "
+ "{ixscan: {pattern: {a:1}, bounds: {a: [[1,1,true,true]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b:{$gt:2}}, node: "
+ "{ixscan: {pattern: {a:1,b:1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[-Infinity,2,true,false]]}}}}}");
+}
+
+//
+// Sort orders
+//
+
+// SERVER-1205.
+TEST_F(QueryPlannerTest, MergeSort) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+ runQuerySortProj(fromjson("{$or: [{a:1}, {b:1}]}"), fromjson("{c:1}"), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{sort: {pattern: {c: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a: 1, c: 1}}}, {ixscan: {pattern: {b: 1, c: 1}}}]}}}}");
+}
+
+// SERVER-1205 as well.
+TEST_F(QueryPlannerTest, NoMergeSortIfNoSortWanted) {
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1 << "c" << 1));
+ runQuerySortProj(fromjson("{$or: [{a:1}, {b:1}]}"), BSONObj(), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$or: [{a:1}, {b:1}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1, c: 1}}}, "
+ "{ixscan: {filter: null, pattern: {b: 1, c: 1}}}]}}}}");
+}
+
+// Basic "keep sort in mind with an OR"
+TEST_F(QueryPlannerTest, MergeSortEvenIfSameIndex) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProj(fromjson("{$or: [{a:1}, {a:7}]}"), fromjson("{b:1}"), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{sort: {pattern: {b: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ // TODO the second solution should be mergeSort rather than just sort
+}
+
+TEST_F(QueryPlannerTest, ReverseScanForSort) {
+ addIndex(BSON("_id" << 1));
+ runQuerySortProj(BSONObj(), fromjson("{_id: -1}"), BSONObj());
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{sort: {pattern: {_id: -1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {_id: 1}}}}}");
+}
+
+//
+// Hint tests
+//
+
+TEST_F(QueryPlannerTest, NaturalHint) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortHint(BSON("a" << 1), BSON("b" << 1), BSON("$natural" << 1));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, node: "
+ "{cscan: {filter: {a: 1}, dir: 1}}}}");
+}
+
+// Test $natural sort and its interaction with $natural hint.
+TEST_F(QueryPlannerTest, NaturalSortAndHint) {
+ addIndex(BSON("x" << 1));
+
+ // Non-empty query, -1 sort, no hint.
+ runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << -1), BSONObj());
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: -1}}");
+
+ // Non-empty query, 1 sort, no hint.
+ runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << 1), BSONObj());
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // Non-empty query, -1 sort, -1 hint.
+ runQuerySortHint(
+ fromjson("{x: {$exists: true}}"), BSON("$natural" << -1), BSON("$natural" << -1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: -1}}");
+
+ // Non-empty query, 1 sort, -1 hint.
+ runQuerySortHint(
+ fromjson("{x: {$exists: true}}"), BSON("$natural" << 1), BSON("$natural" << -1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // Non-empty query, -1 sort, 1 hint.
+ runQuerySortHint(
+ fromjson("{x: {$exists: true}}"), BSON("$natural" << -1), BSON("$natural" << 1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: -1}}");
+
+ // Non-empty query, 1 sort, 1 hint.
+ runQuerySortHint(
+ fromjson("{x: {$exists: true}}"), BSON("$natural" << 1), BSON("$natural" << 1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // Empty query, -1 sort, no hint.
+ runQuerySortHint(BSONObj(), BSON("$natural" << -1), BSONObj());
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: -1}}");
+
+ // Empty query, 1 sort, no hint.
+ runQuerySortHint(BSONObj(), BSON("$natural" << 1), BSONObj());
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // Empty query, -1 sort, -1 hint.
+ runQuerySortHint(BSONObj(), BSON("$natural" << -1), BSON("$natural" << -1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: -1}}");
+
+ // Empty query, 1 sort, -1 hint.
+ runQuerySortHint(BSONObj(), BSON("$natural" << 1), BSON("$natural" << -1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+
+ // Empty query, -1 sort, 1 hint.
+ runQuerySortHint(BSONObj(), BSON("$natural" << -1), BSON("$natural" << 1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: -1}}");
+
+ // Empty query, 1 sort, 1 hint.
+ runQuerySortHint(BSONObj(), BSON("$natural" << 1), BSON("$natural" << 1));
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, HintOverridesNaturalSort) {
+ addIndex(BSON("x" << 1));
+ runQuerySortHint(fromjson("{x: {$exists: true}}"), BSON("$natural" << -1), BSON("x" << 1));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {x:{$exists:true}}, node: "
+ "{ixscan: {filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, HintValid) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(BSONObj(), fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, HintValidWithPredicate) {
+ addIndex(BSON("a" << 1));
+ runQueryHint(fromjson("{a: {$gt: 1}}"), fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, HintValidWithSort) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortHint(fromjson("{a: 100, b: 200}"), fromjson("{b: 1}"), fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, node: "
+ "{fetch: {filter: {b: 200}, "
+ "node: {ixscan: {filter: null, pattern: {a: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, HintElemMatch) {
+ // true means multikey
+ addIndex(fromjson("{'a.b': 1}"), true);
+ runQueryHint(fromjson("{'a.b': 1, a: {$elemMatch: {b: 2}}}"), fromjson("{'a.b': 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{a:{$elemMatch:{b:2}}}, {'a.b': 1}]}, "
+ "node: {ixscan: {filter: null, pattern: {'a.b': 1}, bounds: "
+ "{'a.b': [[2, 2, true, true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, HintInvalid) {
+ addIndex(BSON("a" << 1));
+ runInvalidQueryHint(BSONObj(), fromjson("{b: 1}"));
+}
+
+//
+// Sparse indices, SERVER-8067
+// Each index in this block of tests is sparse.
+//
+
+TEST_F(QueryPlannerTest, SparseIndexIgnoreForSort) {
+ addIndex(fromjson("{a: 1}"), false, true);
+ runQuerySortProj(BSONObj(), fromjson("{a: 1}"), BSONObj());
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+}
+
+TEST_F(QueryPlannerTest, SparseIndexHintForSort) {
+ addIndex(fromjson("{a: 1}"), false, true);
+ runQuerySortHint(BSONObj(), fromjson("{a: 1}"), fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SparseIndexPreferCompoundIndexForSort) {
+ addIndex(fromjson("{a: 1}"), false, true);
+ addIndex(fromjson("{a: 1, b: 1}"));
+ runQuerySortProj(BSONObj(), fromjson("{a: 1}"), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, SparseIndexForQuery) {
+ addIndex(fromjson("{a: 1}"), false, true);
+ runQuerySortProj(fromjson("{a: 1}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: 1}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}");
+}
+
+//
+// Regex
+//
+
+TEST_F(QueryPlannerTest, PrefixRegex) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a: /^foo/}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: /^foo/}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, PrefixRegexCovering) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{a: /^foo/}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{cscan: {dir: 1, filter: {a: /^foo/}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegex) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a: /foo/}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {a: /foo/}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: {a: /foo/}, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegexCovering) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{a: /foo/}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{cscan: {dir: 1, filter: {a: /foo/}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: {a: /foo/}, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegexAnd) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{a: /foo/, b: 2}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {dir: 1, filter: {$and: [{b: 2}, {a: /foo/}]}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: {a: /foo/}, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegexAndCovering) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProj(fromjson("{a: /foo/, b: 2}"), BSONObj(), fromjson("{_id: 0, a: 1, b: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
+ "{cscan: {dir: 1, filter: {$and: [{b: 2}, {a: /foo/}]}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
+ "{ixscan: {filter: {a: /foo/}, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegexOrCovering) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(
+ fromjson("{$or: [{a: /0/}, {a: /1/}]}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{cscan: {dir: 1, filter: {$or: [{a: /0/}, {a: /1/}]}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: {$or: [{a: /0/}, {a: /1/}]}, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegexInCovering) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{a: {$in: [/foo/, /bar/]}}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{cscan: {dir: 1, filter: {a:{$in:[/foo/,/bar/]}}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: {a:{$in:[/foo/,/bar/]}}, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoRegexCompoundIndexCovering) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProj(fromjson("{a: /0/, b: /1/}"), BSONObj(), fromjson("{_id: 0, a: 1, b: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
+ "{cscan: {dir: 1, filter: {$and:[{a:/0/},{b:/1/}]}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1, b: 1}, node: "
+ "{ixscan: {filter: {$and:[{a:/0/},{b:/1/}]}, pattern: {a: 1, b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoRegexSameFieldCovering) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(
+ fromjson("{$and: [{a: /0/}, {a: /1/}]}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{cscan: {dir: 1, filter: {$and:[{a:/0/},{a:/1/}]}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: {$and:[{a:/0/},{a:/1/}]}, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ThreeRegexSameFieldCovering) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(
+ fromjson("{$and: [{a: /0/}, {a: /1/}, {a: /2/}]}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{cscan: {dir: 1, filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NonPrefixRegexMultikey) {
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ runQuery(fromjson("{a: /foo/}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {filter: {a: /foo/}, dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: /foo/}, node: {ixscan: "
+ "{pattern: {a: 1}, filter: null}}}}");
+}
+
+TEST_F(QueryPlannerTest, ThreeRegexSameFieldMultikey) {
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ runQuery(fromjson("{$and: [{a: /0/}, {a: /1/}, {a: /2/}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 2U);
+ assertSolutionExists("{cscan: {filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}, dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and:[{a:/0/},{a:/1/},{a:/2/}]}, node: {ixscan: "
+ "{pattern: {a: 1}, filter: null}}}}");
+}
+
+//
+// Negation
+//
+
+TEST_F(QueryPlannerTest, NegationIndexForSort) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{a: {$ne: 1}}"), fromjson("{a: 1}"), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1}, "
+ "bounds: {a: [['MinKey',1,true,false], "
+ "[1,'MaxKey',false,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegationTopLevel) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{a: {$ne: 1}}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [['MinKey',1,true,false], "
+ "[1,'MaxKey',false,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegationOr) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{$or: [{a: 1}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, NegationOrNotIn) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{$or: [{a: 1}, {b: {$nin: [1]}}]}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, NegationAndIndexOnEquality) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{$and: [{a: 1}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1},"
+ "bounds: {a: [[1,1,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegationAndIndexOnEqualityAndNegationBranches) {
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortProj(fromjson("{$and: [{a: 1}, {b: 2}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(3U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1}, "
+ "bounds: {a: [[1,1,true,true]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {b: 1}, "
+ "bounds: {b: [[2,2,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegationAndIndexOnInequality) {
+ addIndex(BSON("b" << 1));
+ runQuerySortProj(fromjson("{$and: [{a: 1}, {b: {$ne: 1}}]}"), BSONObj(), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: {ixscan: {pattern: {b:1}, "
+ "bounds: {b: [['MinKey',1,true,false], "
+ "[1,'MaxKey',false,true]]}}}}}");
+}
+
+// Negated regexes don't use the index.
+TEST_F(QueryPlannerTest, NegationRegexPrefix) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: /^a/}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// Negated mods don't use the index
+TEST_F(QueryPlannerTest, NegationMod) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$mod: [2, 1]}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// Negated $type doesn't use the index
+TEST_F(QueryPlannerTest, NegationTypeOperator) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$type: 16}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// Negated $elemMatch value won't use the index
+TEST_F(QueryPlannerTest, NegationElemMatchValue) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$elemMatch: {$gt: 3, $lt: 10}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// Negated $elemMatch object won't use the index
+TEST_F(QueryPlannerTest, NegationElemMatchObject) {
+ addIndex(BSON("i.j" << 1));
+ runQuery(fromjson("{i: {$not: {$elemMatch: {j: 1}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// Negated $elemMatch object won't use the index
+TEST_F(QueryPlannerTest, NegationElemMatchObject2) {
+ addIndex(BSON("i.j" << 1));
+ runQuery(fromjson("{i: {$not: {$elemMatch: {j: {$ne: 1}}}}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// If there is a negation that can't use the index,
+// ANDed with a predicate that can use the index, then
+// we can still use the index for the latter predicate.
+TEST_F(QueryPlannerTest, NegationRegexWithIndexablePred) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{$and: [{i: {$not: /o/}}, {i: 2}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {i:1}, "
+ "bounds: {i: [[2,2,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegationCantUseSparseIndex) {
+ // false means not multikey, true means sparse
+ addIndex(BSON("i" << 1), false, true);
+ runQuery(fromjson("{i: {$ne: 4}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+TEST_F(QueryPlannerTest, NegationCantUseSparseIndex2) {
+ // false means not multikey, true means sparse
+ addIndex(BSON("i" << 1 << "j" << 1), false, true);
+ runQuery(fromjson("{i: 4, j: {$ne: 5}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {i:1,j:1}, bounds: "
+ "{i: [[4,4,true,true]], j: [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegatedRangeStrGT) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$gt: 'a'}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
+ "bounds: {i: [['MinKey','a',true,true], "
+ "[{},'MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegatedRangeStrGTE) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$gte: 'a'}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
+ "bounds: {i: [['MinKey','a',true,false], "
+ "[{},'MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegatedRangeIntGT) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$gt: 5}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
+ "bounds: {i: [['MinKey',5,true,true], "
+ "[Infinity,'MaxKey',false,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegatedRangeIntGTE) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson("{i: {$not: {$gte: 5}}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
+ "bounds: {i: [['MinKey',5,true,false], "
+ "[Infinity,'MaxKey',false,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, TwoNegatedRanges) {
+ addIndex(BSON("i" << 1));
+ runQuery(fromjson(
+ "{$and: [{i: {$not: {$lte: 'b'}}}, "
+ "{i: {$not: {$gte: 'f'}}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {i:1}, "
+ "bounds: {i: [['MinKey','',true,false], "
+ "['b','f',false,false], "
+ "[{},'MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, AndWithNestedNE) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a: {$gt: -1, $lt: 1, $ne: 0}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [[-1,0,false,false], "
+ "[0,1,false,false]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NegatePredOnCompoundIndex) {
+ addIndex(BSON("x" << 1 << "a" << 1));
+ runQuery(fromjson("{x: 1, a: {$ne: 1}, b: {$ne: 2}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {x:1,a:1}, bounds: "
+ "{x: [[1,1,true,true]], "
+ "a: [['MinKey',1,true,false], [1,'MaxKey',false,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NEOnMultikeyIndex) {
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ runQuery(fromjson("{a: {$ne: 3}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$ne:3}}, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [['MinKey',3,true,false],"
+ "[3,'MaxKey',false,true]]}}}}}");
+}
+
+// In general, a negated $nin can make use of an index.
+TEST_F(QueryPlannerTest, NinUsesMultikeyIndex) {
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ runQuery(fromjson("{a: {$nin: [4, 10]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$nin:[4,10]}}, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [['MinKey',4,true,false],"
+ "[4,10,false,false],"
+ "[10,'MaxKey',false,true]]}}}}}");
+}
+
+// But it can't if the $nin contains a regex because regex bounds can't
+// be complemented.
+TEST_F(QueryPlannerTest, NinCantUseMultikeyIndex) {
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+ runQuery(fromjson("{a: {$nin: [4, /foobar/]}}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+//
+// Multikey indices
+//
+
+//
+// Index bounds related tests
+//
+
+TEST_F(QueryPlannerTest, CompoundIndexBoundsLastFieldMissing) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ runQuery(fromjson("{a: 5, b: {$gt: 7}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1, c: 1}, bounds: "
+ "{a: [[5,5,true,true]], b: [[7,Infinity,false,true]], "
+ " c: [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundIndexBoundsMiddleFieldMissing) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ runQuery(fromjson("{a: 1, c: {$lt: 3}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1, c: 1}, bounds: "
+ "{a: [[1,1,true,true]], b: [['MinKey','MaxKey',true,true]], "
+ " c: [[-Infinity,3,true,false]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundIndexBoundsRangeAndEquality) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{a: {$gt: 8}, b: 6}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1, b: 1}, bounds: "
+ "{a: [[8,Infinity,false,true]], b:[[6,6,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundIndexBoundsEqualityThenIn) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{a: 5, b: {$in: [2,6,11]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: "
+ "{a: 1, b: 1}, bounds: {a: [[5,5,true,true]], "
+ "b:[[2,2,true,true],[6,6,true,true],[11,11,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundIndexBoundsStringBounds) {
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{a: {$gt: 'foo'}, b: {$gte: 'bar'}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {filter: null, pattern: "
+ "{a: 1, b: 1}, bounds: {a: [['foo',{},false,false]], "
+ "b:[['bar',{},true,false]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, IndexBoundsAndWithNestedOr) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$and: [{a: 1, $or: [{a: 2}, {a: 3}]}]}"));
+
+ // Given that the index over 'a' isn't multikey, we ideally won't generate any solutions
+ // since we know the query describes an empty set if 'a' isn't multikey. Any solutions
+ // below are "this is how it currently works" instead of "this is how it should work."
+
+ // It's kind of iffy to look for indexed solutions so we don't...
+ size_t matches = 0;
+ matches += numSolutionMatches(
+ "{cscan: {dir: 1, filter: "
+ "{$or: [{a: 2, a:1}, {a: 3, a:1}]}}}");
+ matches += numSolutionMatches(
+ "{cscan: {dir: 1, filter: "
+ "{$and: [{$or: [{a: 2}, {a: 3}]}, {a: 1}]}}}");
+ ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
+}
+
+TEST_F(QueryPlannerTest, IndexBoundsIndexedSort) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{$or: [{a: 1}, {a: 2}]}"), BSON("a" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{sort: {pattern: {a:1}, limit: 0, node: "
+ "{cscan: {filter: {$or:[{a:1},{a:2}]}, dir: 1}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {filter: null, "
+ "pattern: {a:1}, bounds: {a: [[1,1,true,true], [2,2,true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, IndexBoundsUnindexedSort) {
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{$or: [{a: 1}, {a: 2}]}"), BSON("b" << 1), BSONObj());
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{sort: {pattern: {b:1}, limit: 0, node: "
+ "{cscan: {filter: {$or:[{a:1},{a:2}]}, dir: 1}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {b:1}, limit: 0, node: {fetch: "
+ "{filter: null, node: {ixscan: {filter: null, "
+ "pattern: {a:1}, bounds: {a: [[1,1,true,true], [2,2,true,true]]}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, IndexBoundsUnindexedSortHint) {
+ addIndex(BSON("a" << 1));
+ runQuerySortHint(fromjson("{$or: [{a: 1}, {a: 2}]}"), BSON("b" << 1), BSON("a" << 1));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sort: {pattern: {b:1}, limit: 0, node: {fetch: "
+ "{filter: null, node: {ixscan: {filter: null, "
+ "pattern: {a:1}, bounds: {a: [[1,1,true,true], [2,2,true,true]]}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CompoundIndexBoundsIntersectRanges) {
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ addIndex(BSON("a" << 1 << "c" << 1));
+ runQuery(fromjson("{a: {$gt: 1, $lt: 10}, c: {$gt: 1, $lt: 10}}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1,b:1,c:1}, "
+ "bounds: {a: [[1,10,false,false]], "
+ "b: [['MinKey','MaxKey',true,true]], "
+ "c: [[1,10,false,false]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1,c:1}, "
+ "bounds: {a: [[1,10,false,false]], "
+ "c: [[1,10,false,false]]}}}}}");
+}
+
+// Test that planner properly unionizes the index bounds for two negation
+// predicates (SERVER-13890).
+TEST_F(QueryPlannerTest, IndexBoundsOrOfNegations) {
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a: {$ne: 3}}, {a: {$ne: 4}}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {pattern: {a:1}, "
+ "bounds: {a: [['MinKey','MaxKey',true,true]]}}}}}");
+}
+
+TEST_F(QueryPlannerTest, BoundsTypeMinKeyMaxKey) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+
+ runQuery(fromjson("{a: {$type: -1}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1}, bounds:"
+ "{a: [['MinKey','MinKey',true,true]]}}}}}");
+
+ runQuery(fromjson("{a: {$type: 127}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {node: {ixscan: {pattern: {a: 1}, bounds:"
+ "{a: [['MaxKey','MaxKey',true,true]]}}}}}");
+}
+
+//
+// Tests related to building index bounds for multikey
+// indices, combined with compound and $elemMatch
+//
+
+// SERVER-12475: make sure that we compound bounds, even
+// for a multikey index.
+TEST_F(QueryPlannerTest, CompoundMultikeyBounds) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b" << 1), true);
+ runQuery(fromjson("{a: 1, b: 3}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {filter: {$and:[{a:1},{b:3}]}, dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: {filter: null, "
+ "pattern: {a:1,b:1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[3,3,true,true]]}}}}}");
+}
+
+// Make sure that we compound bounds but do not intersect bounds
+// for a compound multikey index.
+TEST_F(QueryPlannerTest, CompoundMultikeyBoundsNoIntersect) {
+ // true means multikey
+ addIndex(BSON("a" << 1 << "b" << 1), true);
+ runQuery(fromjson("{a: 1, b: {$gt: 3, $lte: 5}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists("{cscan: {dir: 1}}");
+ assertSolutionExists(
+ "{fetch: {filter: {b:{$gt:3}}, node: {ixscan: {filter: null, "
+ "pattern: {a:1,b:1}, bounds: "
+ "{a: [[1,1,true,true]], b: [[-Infinity,5,true,true]]}}}}}");
+}
+
+//
+// QueryPlannerParams option tests
+//
+
+TEST_F(QueryPlannerTest, NoBlockingSortsAllowedTest) {
+ params.options = QueryPlannerParams::NO_BLOCKING_SORT;
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+ assertNumSolutions(0U);
+
+ addIndex(BSON("x" << 1));
+
+ runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NoTableScanBasic) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ runQuery(BSONObj());
+ assertNumSolutions(0U);
+
+ addIndex(BSON("x" << 1));
+
+ runQuery(BSONObj());
+ assertNumSolutions(0U);
+
+ runQuery(fromjson("{x: {$gte: 0}}"));
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, NoTableScanOrWithAndChild) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{$or: [{a: 20}, {$and: [{a:1}, {b:7}]}]}"));
+
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1}}}, "
+ "{fetch: {filter: {b: 7}, node: {ixscan: "
+ "{filter: null, pattern: {a: 1}}}}}]}}}}");
+}
+
+//
+// Index Intersection.
+//
+// We don't exhaustively check all plans here. Instead we check that there exists an
+// intersection plan. The blending of >1 index plans and ==1 index plans is under development
+// but we want to make sure that we create an >1 index plan when we should.
+//
+
+TEST_F(QueryPlannerTest, IntersectBasicTwoPred) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuery(fromjson("{a:1, b:{$gt: 1}}"));
+
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andHash: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, IntersectBasicTwoPredCompound) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1 << "c" << 1));
+ addIndex(BSON("b" << 1));
+ runQuery(fromjson("{a:1, b:1, c:1}"));
+
+ // There's an andSorted not andHash because the two seeks are point intervals.
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1, c:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+// SERVER-12196
+TEST_F(QueryPlannerTest, IntersectBasicTwoPredCompoundMatchesIdxOrder1) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuery(fromjson("{a:1, b:1}"));
+
+ assertNumSolutions(3U);
+
+ assertSolutionExists(
+ "{fetch: {filter: {b:1}, node: "
+ "{ixscan: {filter: null, pattern: {a:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+// SERVER-12196
+TEST_F(QueryPlannerTest, IntersectBasicTwoPredCompoundMatchesIdxOrder2) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("a" << 1));
+ runQuery(fromjson("{a:1, b:1}"));
+
+ assertNumSolutions(3U);
+
+ assertSolutionExists(
+ "{fetch: {filter: {b:1}, node: "
+ "{ixscan: {filter: null, pattern: {a:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, IntersectManySelfIntersections) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ // True means multikey.
+ addIndex(BSON("a" << 1), true);
+
+ // This one goes to 11.
+ runQuery(fromjson("{a:1, a:2, a:3, a:4, a:5, a:6, a:7, a:8, a:9, a:10, a:11}"));
+
+ // But this one only goes to 10.
+ assertSolutionExists(
+ "{fetch: {filter: {a:11}, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 1
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 2
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 3
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 4
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 5
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 6
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 7
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 8
+ "{ixscan: {filter: null, pattern: {a:1}}}," // 9
+ "{ixscan: {filter: null, pattern: {a:1}}}]}}}}"); // 10
+}
+
+TEST_F(QueryPlannerTest, IntersectSubtreeNodes) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ addIndex(BSON("d" << 1));
+
+ runQuery(fromjson("{$or: [{a: 1}, {b: 1}], $or: [{c:1}, {d:1}]}"));
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andHash: {nodes: ["
+ "{or: {nodes: [{ixscan:{filter:null, pattern:{a:1}}},"
+ "{ixscan:{filter:null, pattern:{b:1}}}]}},"
+ "{or: {nodes: [{ixscan:{filter:null, pattern:{c:1}}},"
+ "{ixscan:{filter:null, pattern:{d:1}}}]}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, IntersectSubtreeAndPred) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ runQuery(fromjson("{a: 1, $or: [{b:1}, {c:1}]}"));
+
+ // This (can be) rewritten to $or:[ {a:1, b:1}, {c:1, d:1}]. We don't look for the various
+ // single $or solutions as that's tested elsewhere. We look for the intersect solution,
+ // where each AND inside of the root OR is an and_sorted.
+ size_t matches = 0;
+ matches += numSolutionMatches(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {'a':1}}},"
+ "{ixscan: {filter: null, pattern: {'b':1}}}]}},"
+ "{andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {'a':1}}},"
+ "{ixscan: {filter: null, pattern: {'c':1}}}]}}]}}}}");
+ matches += numSolutionMatches(
+ "{fetch: {filter: null, node: {andHash: {nodes:["
+ "{or: {nodes: [{ixscan:{filter:null, pattern:{b:1}}},"
+ "{ixscan:{filter:null, pattern:{c:1}}}]}},"
+ "{ixscan:{filter: null, pattern:{a:1}}}]}}}}");
+ ASSERT_GREATER_THAN_OR_EQUALS(matches, 1U);
+}
+
+TEST_F(QueryPlannerTest, IntersectElemMatch) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a.b" << 1));
+ addIndex(BSON("a.c" << 1));
+ runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}"));
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$elemMatch:{b:1, c:1}}},"
+ "node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {'a.b':1}}},"
+ "{ixscan: {filter: null, pattern: {'a.c':1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, IntersectSortFromAndHash) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ runQuerySortProj(fromjson("{a: 1, b:{$gt: 1}}"), fromjson("{b:1}"), BSONObj());
+
+ // This provides the sort.
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andHash: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+
+ // Rearrange the preds, shouldn't matter.
+ runQuerySortProj(fromjson("{b: 1, a:{$lt: 7}}"), fromjson("{b:1}"), BSONObj());
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andHash: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, IntersectCanBeVeryBig) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ addIndex(BSON("d" << 1));
+ runQuery(fromjson(
+ "{$or: [{ 'a' : null, 'b' : 94, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 98, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 1, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 2, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 7, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 9, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 16, 'c' : null, 'd' : null }]}"));
+
+ assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
+}
+
+// Ensure that disabling AND_HASH intersection works properly.
+TEST_F(QueryPlannerTest, IntersectDisableAndHash) {
+ bool oldEnableHashIntersection = internalQueryPlannerEnableHashIntersection;
+
+ // Turn index intersection on but disable hash-based intersection.
+ internalQueryPlannerEnableHashIntersection = false;
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+
+ runQuery(fromjson("{a: {$gt: 1}, b: 1, c: 1}"));
+
+ // We should do an AND_SORT intersection of {b: 1} and {c: 1}, but no AND_HASH plans.
+ assertNumSolutions(4U);
+ assertSolutionExists(
+ "{fetch: {filter: {b: 1, c: 1}, node: {ixscan: "
+ "{pattern: {a: 1}, bounds: {a: [[1,Infinity,false,true]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gt:1},c:1}, node: {ixscan: "
+ "{pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gt:1},b:1}, node: {ixscan: "
+ "{pattern: {c: 1}, bounds: {c: [[1,1,true,true]]}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:{$gt:1}}, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {b:1}}},"
+ "{ixscan: {filter: null, pattern: {c:1}}}]}}}}");
+
+ // Restore the old value of the has intersection switch.
+ internalQueryPlannerEnableHashIntersection = oldEnableHashIntersection;
+}
+
+//
+// Index intersection cases for SERVER-12825: make sure that
+// we don't generate an ixisect plan if a compound index is
+// available instead.
+//
+
+// SERVER-12825
+TEST_F(QueryPlannerTest, IntersectCompoundInsteadBasic) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuery(fromjson("{a: 1, b: 1}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists(
+ "{fetch: {filter: {b:1}, node: "
+ "{ixscan: {filter: null, pattern: {a:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a:1,b:1}}}}}");
+}
+
+// SERVER-12825
+TEST_F(QueryPlannerTest, IntersectCompoundInsteadThreeCompoundIndices) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("c" << 1 << "d" << 1));
+ addIndex(BSON("a" << 1 << "c" << -1 << "b" << -1 << "d" << 1));
+ runQuery(fromjson("{a: 1, b: 1, c: 1, d: 1}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{c:1},{d:1}]}, node: "
+ "{ixscan: {filter: null, pattern: {a:1,b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {$and:[{a:1},{b:1}]}, node: "
+ "{ixscan: {filter: null, pattern: {c:1,d:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a:1,c:-1,b:-1,d:1}}}}}");
+}
+
+// SERVER-12825
+TEST_F(QueryPlannerTest, IntersectCompoundInsteadUnusedField) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ runQuery(fromjson("{a: 1, b: 1}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists(
+ "{fetch: {filter: {b:1}, node: "
+ "{ixscan: {filter: null, pattern: {a:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a:1,b:1,c:1}}}}}");
+}
+
+// SERVER-12825
+TEST_F(QueryPlannerTest, IntersectCompoundInsteadUnusedField2) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("c" << 1 << "d" << 1));
+ addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
+ runQuery(fromjson("{a: 1, c: 1}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists(
+ "{fetch: {filter: {c:1}, node: "
+ "{ixscan: {filter: null, pattern: {a:1,b:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {c:1,d:1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: "
+ "{ixscan: {filter: null, pattern: {a:1,b:1,c:1}}}}}");
+}
+
+//
+// Test that we add a KeepMutations when we should and and we don't add one when we shouldn't.
+//
+
+// Collection scan doesn't keep any state, so it can't produce flagged data.
+TEST_F(QueryPlannerTest, NoMutationsForCollscan) {
+ params.options = QueryPlannerParams::KEEP_MUTATIONS;
+ runQuery(fromjson(""));
+ assertSolutionExists("{cscan: {dir: 1}}");
+}
+
+// Collscan + sort doesn't produce flagged data either.
+TEST_F(QueryPlannerTest, NoMutationsForSort) {
+ params.options = QueryPlannerParams::KEEP_MUTATIONS;
+ runQuerySortProj(fromjson(""), fromjson("{a:1}"), BSONObj());
+ assertSolutionExists("{sort: {pattern: {a: 1}, limit: 0, node: {cscan: {dir: 1}}}}");
+}
+
+// An index scan + fetch requires a keep node as it can flag data. Also make sure we put it in
+// the right place, under the sort.
+TEST_F(QueryPlannerTest, MutationsFromFetch) {
+ params.options = QueryPlannerParams::KEEP_MUTATIONS;
+ addIndex(BSON("a" << 1));
+ runQuerySortProj(fromjson("{a: 5}"), fromjson("{b:1}"), BSONObj());
+ assertSolutionExists(
+ "{sort: {pattern: {b:1}, limit: 0, node: {keep: {node: "
+ "{fetch: {node: {ixscan: {pattern: {a:1}}}}}}}}}");
+}
+
+// Index scan w/covering doesn't require a keep node as there's no fetch.
+TEST_F(QueryPlannerTest, NoFetchNoKeep) {
+ params.options = QueryPlannerParams::KEEP_MUTATIONS;
+ addIndex(BSON("x" << 1));
+ // query, sort, proj
+ runQuerySortProj(fromjson("{ x : {$gt: 1}}"), BSONObj(), fromjson("{_id: 0, x: 1}"));
+
+ // cscan is a soln but we override the params that say to include it.
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, x: 1}, node: {ixscan: "
+ "{filter: null, pattern: {x: 1}}}}}");
+}
+
+// No keep with geoNear.
+TEST_F(QueryPlannerTest, NoKeepWithGeoNear) {
+ params.options = QueryPlannerParams::KEEP_MUTATIONS;
+ addIndex(BSON("a"
+ << "2d"));
+ runQuery(fromjson("{a: {$near: [0,0], $maxDistance:0.3 }}"));
+ ASSERT_EQUALS(getNumSolutions(), 1U);
+ assertSolutionExists("{geoNear2d: {a: '2d'}}");
+}
+
+// No keep when we have an indexed sort.
+TEST_F(QueryPlannerTest, NoKeepWithIndexedSort) {
+ params.options = QueryPlannerParams::KEEP_MUTATIONS;
+ addIndex(BSON("a" << 1 << "b" << 1));
+ runQuerySortProjSkipLimit(fromjson("{a: {$in: [1, 2]}}"), BSON("b" << 1), BSONObj(), 0, 1);
+
+ // cscan solution exists but we didn't turn on the "always include a collscan."
+ assertNumSolutions(1);
+ assertSolutionExists(
+ "{fetch: {node: {mergeSort: {nodes: "
+ "[{ixscan: {pattern: {a: 1, b: 1}}}, {ixscan: {pattern: {a: 1, b: 1}}}]}}}}");
+}
+
+// Make sure a top-level $or hits the limiting number
+// of solutions that we are willing to consider.
+TEST_F(QueryPlannerTest, OrEnumerationLimit) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ // 6 $or clauses, each with 2 indexed predicates
+ // means 2^6 = 64 possibilities. We should hit the limit.
+ runQuery(fromjson(
+ "{$or: [{a: 1, b: 1},"
+ "{a: 2, b: 2},"
+ "{a: 3, b: 3},"
+ "{a: 4, b: 4},"
+ "{a: 5, b: 5},"
+ "{a: 6, b: 6}]}"));
+
+ assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
+}
+
+TEST_F(QueryPlannerTest, OrEnumerationLimit2) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ addIndex(BSON("d" << 1));
+
+ // 3 $or clauses, and a few other preds. Each $or clause can
+ // generate up to the max number of allowed $or enumerations.
+ runQuery(fromjson(
+ "{$or: [{a: 1, b: 1, c: 1, d: 1},"
+ "{a: 2, b: 2, c: 2, d: 2},"
+ "{a: 3, b: 3, c: 3, d: 3}]}"));
+
+ assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
+}
+
+// SERVER-13104: test that we properly enumerate all solutions for nested $or.
+TEST_F(QueryPlannerTest, EnumerateNestedOr) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+
+ runQuery(fromjson("{d: 1, $or: [{a: 1, b: 1}, {c: 1}]}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{fetch: {filter: {d: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}},"
+ "{ixscan: {pattern: {c: 1}}}]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {d: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {a: 1}, node: {ixscan: {pattern: {b: 1}}}}},"
+ "{ixscan: {pattern: {c: 1}}}]}}}}");
+}
+
+// SERVER-13104: test that we properly enumerate all solutions for nested $or.
+TEST_F(QueryPlannerTest, EnumerateNestedOr2) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ addIndex(BSON("d" << 1));
+ addIndex(BSON("e" << 1));
+ addIndex(BSON("f" << 1));
+
+ runQuery(fromjson("{a: 1, b: 1, $or: [{c: 1, d: 1}, {e: 1, f: 1}]}"));
+
+ assertNumSolutions(6U);
+
+ // Four possibilities from indexing the $or.
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {d: 1}, node: {ixscan: {pattern: {c: 1}}}}},"
+ "{fetch: {filter: {f: 1}, node: {ixscan: {pattern: {e: 1}}}}}"
+ "]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}},"
+ "{fetch: {filter: {f: 1}, node: {ixscan: {pattern: {e: 1}}}}}"
+ "]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {d: 1}, node: {ixscan: {pattern: {c: 1}}}}},"
+ "{fetch: {filter: {e: 1}, node: {ixscan: {pattern: {f: 1}}}}}"
+ "]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1, b: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}},"
+ "{fetch: {filter: {e: 1}, node: {ixscan: {pattern: {f: 1}}}}}"
+ "]}}}}");
+
+ // Two possibilties from outside the $or.
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: 1}}}}}");
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {b: 1}}}}}");
+}
+
+//
+// Test the "split limited sort stages" hack.
+//
+
+TEST_F(QueryPlannerTest, SplitLimitedSort) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ runQuerySortProjSkipLimit(fromjson("{a: 1}"), fromjson("{b: 1}"), BSONObj(), 0, 3);
+
+ assertNumSolutions(2U);
+ // First solution has no blocking stage; no need to split.
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b: 1}}}}}");
+ // Second solution has a blocking sort with a limit: it gets split and
+ // joined with an OR stage.
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{sort: {pattern: {b: 1}, limit: 3, node: "
+ "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}}}, "
+ "{sort: {pattern: {b: 1}, limit: 0, node: "
+ "{fetch: {node: {ixscan: {pattern: {a: 1}}}}}}}]}}");
+}
+
+// The same query run as a find command with a limit should not require the "split limited sort"
+// hack.
+TEST_F(QueryPlannerTest, NoSplitLimitedSortAsCommand) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1}, sort: {b: 1}, limit: 3}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{limit: {n: 3, node: {fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {b: 1}}}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 3, node: {fetch: {filter: null,"
+ "node: {ixscan: {pattern: {a: 1}}}}}}}");
+}
+
+// Same query run as a find command with a batchSize rather than a limit should not require
+// the "split limited sort" hack, and should not have any limit represented inside the plan.
+TEST_F(QueryPlannerTest, NoSplitLimitedSortAsCommandBatchSize) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ params.options |= QueryPlannerParams::SPLIT_LIMITED_SORT;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ runQueryAsCommand(fromjson("{find: 'testns', filter: {a: 1}, sort: {b: 1}, batchSize: 3}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1}, node: {ixscan: "
+ "{filter: null, pattern: {b: 1}}}}}");
+ assertSolutionExists(
+ "{sort: {pattern: {b: 1}, limit: 0, node: {fetch: {filter: null,"
+ "node: {ixscan: {pattern: {a: 1}}}}}}}");
+}
+
+//
+// Test shard filter query planning
+//
+
+TEST_F(QueryPlannerTest, ShardFilterCollScan) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1);
+ addIndex(BSON("a" << 1));
+
+ runQuery(fromjson("{b: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sharding_filter: {node: "
+ "{cscan: {dir: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterBasicIndex) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1);
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ runQuery(fromjson("{b: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{sharding_filter: {node: "
+ "{fetch: {node: "
+ "{ixscan: {pattern: {b: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterBasicCovered) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1);
+ addIndex(BSON("a" << 1));
+
+ runQuery(fromjson("{a: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {node: "
+ "{sharding_filter: {node: "
+ "{ixscan: {pattern: {a: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterBasicProjCovered) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1);
+ addIndex(BSON("a" << 1));
+
+ runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id : 0, a : 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, type: 'coveredIndex', node: "
+ "{sharding_filter: {node: "
+ "{ixscan: {pattern: {a: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterCompoundProjCovered) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1 << "b" << 1);
+ addIndex(BSON("a" << 1 << "b" << 1));
+
+ runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id: 0, a: 1, b: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1, b: 1 }, type: 'coveredIndex', node: "
+ "{sharding_filter: {node: "
+ "{ixscan: {pattern: {a: 1, b: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterNestedProjNotCovered) {
+ // Nested projections can't be covered currently, though the shard key filter shouldn't need
+ // to fetch.
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1 << "b.c" << 1);
+ addIndex(BSON("a" << 1 << "b.c" << 1));
+
+ runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id: 0, a: 1, 'b.c': 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1, 'b.c': 1 }, type: 'default', node: "
+ "{fetch: {node: "
+ "{sharding_filter: {node: "
+ "{ixscan: {pattern: {a: 1, 'b.c': 1}}}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterHashProjNotCovered) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a"
+ << "hashed");
+ addIndex(BSON("a"
+ << "hashed"));
+
+ runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{_id : 0, a : 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0,a: 1}, type: 'simple', node: "
+ "{sharding_filter : {node: "
+ "{fetch: {node: "
+ "{ixscan: {pattern: {a: 'hashed'}}}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterKeyPrefixIndexCovered) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a" << 1);
+ addIndex(BSON("a" << 1 << "b" << 1 << "_id" << 1));
+
+ runQuerySortProj(fromjson("{a: 1}"), BSONObj(), fromjson("{a : 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {a: 1}, type: 'coveredIndex', node: "
+ "{sharding_filter : {node: "
+ "{ixscan: {pattern: {a: 1, b: 1, _id: 1}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, ShardFilterNoIndexNotCovered) {
+ params.options = QueryPlannerParams::INCLUDE_SHARD_FILTER;
+ params.shardKey = BSON("a"
+ << "hashed");
+ addIndex(BSON("b" << 1));
+
+ runQuerySortProj(fromjson("{b: 1}"), BSONObj(), fromjson("{_id : 0, a : 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0,a: 1}, type: 'simple', node: "
+ "{sharding_filter : {node: "
+ "{fetch: {node: "
+ "{ixscan: {pattern: {b: 1}}}}}}}}}");
+}
+
+TEST_F(QueryPlannerTest, CannotTrimIxisectParam) {
+ params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
+ params.options |= QueryPlannerParams::INDEX_INTERSECTION;
+ params.options |= QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+
+ runQuery(fromjson("{a: 1, b: 1, c: 1}"));
+
+ assertNumSolutions(3U);
+ assertSolutionExists(
+ "{fetch: {filter: {b: 1, c: 1}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1, c: 1}, node: "
+ "{ixscan: {filter: null, pattern: {b: 1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {a:1,b:1,c:1}, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, CannotTrimIxisectParamBeneathOr) {
+ params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
+ params.options |= QueryPlannerParams::INDEX_INTERSECTION;
+ params.options |= QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+
+ runQuery(fromjson("{d: 1, $or: [{a: 1}, {b: 1, c: 1}]}"));
+
+ assertNumSolutions(3U);
+
+ assertSolutionExists(
+ "{fetch: {filter: {d: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {c: 1}, node: {ixscan: {filter: null,"
+ "pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}},"
+ "{ixscan: {filter: null, pattern: {a: 1},"
+ "bounds: {a: [[1,1,true,true]]}}}]}}}}");
+
+ assertSolutionExists(
+ "{fetch: {filter: {d: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {b: 1}, node: {ixscan: {filter: null,"
+ "pattern: {c: 1}, bounds: {c: [[1,1,true,true]]}}}}},"
+ "{ixscan: {filter: null, pattern: {a: 1},"
+ "bounds: {a: [[1,1,true,true]]}}}]}}}}");
+
+ assertSolutionExists(
+ "{fetch: {filter: {d: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {b: 1, c: 1}, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {b: 1}}},"
+ "{ixscan: {filter: null, pattern: {c: 1}}}]}}}},"
+ "{ixscan: {filter: null, pattern: {a: 1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, CannotTrimIxisectAndHashWithOrChild) {
+ params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
+ params.options |= QueryPlannerParams::INDEX_INTERSECTION;
+ params.options |= QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+
+ runQuery(fromjson("{c: 1, $or: [{a: 1}, {b: 1, d: 1}]}"));
+
+ assertNumSolutions(3U);
+
+ assertSolutionExists(
+ "{fetch: {filter: {c: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {d: 1}, node: {ixscan: {filter: null,"
+ "pattern: {b: 1}, bounds: {b: [[1,1,true,true]]}}}}},"
+ "{ixscan: {filter: null, pattern: {a: 1},"
+ "bounds: {a: [[1,1,true,true]]}}}]}}}}");
+
+ assertSolutionExists(
+ "{fetch: {filter: {$or:[{b:1,d:1},{a:1}]}, node:"
+ "{ixscan: {filter: null, pattern: {c: 1}}}}}");
+
+ assertSolutionExists(
+ "{fetch: {filter: {c:1,$or:[{a:1},{b:1,d:1}]}, node:{andHash:{nodes:["
+ "{or: {nodes: ["
+ "{fetch: {filter: {d:1}, node: {ixscan: {pattern: {b: 1}}}}},"
+ "{ixscan: {filter: null, pattern: {a: 1}}}]}},"
+ "{ixscan: {filter: null, pattern: {c: 1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, CannotTrimIxisectParamSelfIntersection) {
+ params.options = QueryPlannerParams::CANNOT_TRIM_IXISECT;
+ params.options = QueryPlannerParams::INDEX_INTERSECTION;
+ params.options |= QueryPlannerParams::NO_TABLE_SCAN;
+
+ // true means multikey
+ addIndex(BSON("a" << 1), true);
+
+ runQuery(fromjson("{a: {$all: [1, 2, 3]}}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{fetch: {filter: {$and: [{a:2}, {a:3}]}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1},"
+ "bounds: {a: [[1,1,true,true]]}}},"
+ "{ixscan: {filter: null, pattern: {a:1},"
+ "bounds: {a: [[2,2,true,true]]}}},"
+ "{ixscan: {filter: null, pattern: {a:1},"
+ "bounds: {a: [[3,3,true,true]]}}}]}}}}");
+}
+
+
+// If a lookup against a unique index is available as a possible plan, then the planner
+// should not generate other possibilities.
+TEST_F(QueryPlannerTest, UniqueIndexLookup) {
+ params.options = QueryPlannerParams::INDEX_INTERSECTION;
+ params.options |= QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1),
+ false, // multikey
+ false, // sparse,
+ true); // unique
+
+ runQuery(fromjson("{a: 1, b: 1}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a: 1}, node: "
+ "{ixscan: {filter: null, pattern: {b: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, HintOnNonUniqueIndex) {
+ params.options = QueryPlannerParams::INDEX_INTERSECTION;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1),
+ false, // multikey
+ false, // sparse,
+ true); // unique
+
+ runQueryHint(fromjson("{a: 1, b: 1}"), BSON("a" << 1));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {b: 1}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1}}}}}");
+}
+
+TEST_F(QueryPlannerTest, UniqueIndexLookupBelowOr) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ addIndex(BSON("d" << 1),
+ false, // multikey
+ false, // sparse,
+ true); // unique
+
+ runQuery(fromjson("{$or: [{a: 1, b: 1}, {c: 1, d: 1}]}"));
+
+ // Only two plans because we throw out plans for the right branch of the $or that do not
+ // use equality over the unique index.
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {a: 1}, node: {ixscan: {pattern: {b: 1}}}}},"
+ "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}]}}");
+ assertSolutionExists(
+ "{or: {nodes: ["
+ "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}},"
+ "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}]}}");
+}
+
+TEST_F(QueryPlannerTest, UniqueIndexLookupBelowOrBelowAnd) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("c" << 1));
+ addIndex(BSON("d" << 1),
+ false, // multikey
+ false, // sparse,
+ true); // unique
+
+ runQuery(fromjson("{e: 1, $or: [{a: 1, b: 1}, {c: 1, d: 1}]}"));
+
+ // Only two plans because we throw out plans for the right branch of the $or that do not
+ // use equality over the unique index.
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{fetch: {filter: {e: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {a: 1}, node: {ixscan: {pattern: {b: 1}}}}},"
+ "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}"
+ "]}}}}");
+ assertSolutionExists(
+ "{fetch: {filter: {e: 1}, node: {or: {nodes: ["
+ "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}},"
+ "{fetch: {filter: {c: 1}, node: {ixscan: {pattern: {d: 1}}}}}"
+ "]}}}}");
+}
+
+TEST_F(QueryPlannerTest, CoveredOrUniqueIndexLookup) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+
+ addIndex(BSON("a" << 1 << "b" << 1));
+ addIndex(BSON("a" << 1),
+ false, // multikey
+ false, // sparse,
+ true); // unique
+
+ runQuerySortProj(fromjson("{a: 1, b: 1}"), BSONObj(), fromjson("{_id: 0, a: 1}"));
+
+ assertNumSolutions(2U);
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{fetch: {filter: {b: 1}, node: {ixscan: {pattern: {a: 1}}}}}}}");
+ assertSolutionExists(
+ "{proj: {spec: {_id: 0, a: 1}, node: "
+ "{ixscan: {filter: null, pattern: {a: 1, b: 1}}}}}");
+}
+
+//
+// Test bad input to query planner helpers.
+//
+
+TEST(BadInputTest, CacheDataFromTaggedTree) {
+ PlanCacheIndexTree* indexTree;
+
+ // Null match expression.
+ std::vector<IndexEntry> relevantIndices;
+ Status s = QueryPlanner::cacheDataFromTaggedTree(NULL, relevantIndices, &indexTree);
+ ASSERT_NOT_OK(s);
+ ASSERT(NULL == indexTree);
+
+ // No relevant index matching the index tag.
+ relevantIndices.push_back(IndexEntry(BSON("a" << 1)));
+
+ CanonicalQuery* cq;
+ Status cqStatus = CanonicalQuery::canonicalize("ns", BSON("a" << 3), &cq);
+ ASSERT_OK(cqStatus);
+ std::unique_ptr<CanonicalQuery> scopedCq(cq);
+ scopedCq->root()->setTag(new IndexTag(1));
+
+ s = QueryPlanner::cacheDataFromTaggedTree(scopedCq->root(), relevantIndices, &indexTree);
+ ASSERT_NOT_OK(s);
+ ASSERT(NULL == indexTree);
+}
+
+TEST(BadInputTest, TagAccordingToCache) {
+ CanonicalQuery* cq;
+ Status cqStatus = CanonicalQuery::canonicalize("ns", BSON("a" << 3), &cq);
+ ASSERT_OK(cqStatus);
+ std::unique_ptr<CanonicalQuery> scopedCq(cq);
+
+ std::unique_ptr<PlanCacheIndexTree> indexTree(new PlanCacheIndexTree());
+ indexTree->setIndexEntry(IndexEntry(BSON("a" << 1)));
+
+ std::map<BSONObj, size_t> indexMap;
+
+ // Null filter.
+ Status s = QueryPlanner::tagAccordingToCache(NULL, indexTree.get(), indexMap);
+ ASSERT_NOT_OK(s);
+
+ // Null indexTree.
+ s = QueryPlanner::tagAccordingToCache(scopedCq->root(), NULL, indexMap);
+ ASSERT_NOT_OK(s);
+
+ // Index not found.
+ s = QueryPlanner::tagAccordingToCache(scopedCq->root(), indexTree.get(), indexMap);
+ ASSERT_NOT_OK(s);
+
+ // Index found once added to the map.
+ indexMap[BSON("a" << 1)] = 0;
+ s = QueryPlanner::tagAccordingToCache(scopedCq->root(), indexTree.get(), indexMap);
+ ASSERT_OK(s);
+
+ // Regenerate canonical query in order to clear tags.
+ cqStatus = CanonicalQuery::canonicalize("ns", BSON("a" << 3), &cq);
+ ASSERT_OK(cqStatus);
+ scopedCq.reset(cq);
+
+ // Mismatched tree topology.
+ PlanCacheIndexTree* child = new PlanCacheIndexTree();
+ child->setIndexEntry(IndexEntry(BSON("a" << 1)));
+ indexTree->children.push_back(child);
+ s = QueryPlanner::tagAccordingToCache(scopedCq->root(), indexTree.get(), indexMap);
+ ASSERT_NOT_OK(s);
+}
} // namespace
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index c6824169ae7..1b876c5296b 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -41,290 +41,299 @@
namespace mongo {
- using unittest::assertGet;
-
- const char* QueryPlannerTest::ns = "somebogus.ns";
-
- void QueryPlannerTest::setUp() {
- internalQueryPlannerEnableHashIntersection = true;
- params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
- addIndex(BSON("_id" << 1));
- }
-
- void QueryPlannerTest::addIndex(BSONObj keyPattern, bool multikey) {
- params.indices.push_back(IndexEntry(keyPattern,
- multikey,
- false, // sparse
- false, // unique
- "hari_king_of_the_stove",
- NULL, // filterExpr
- BSONObj()));
- }
-
- void QueryPlannerTest::addIndex(BSONObj keyPattern, bool multikey, bool sparse) {
- params.indices.push_back(IndexEntry(keyPattern,
- multikey,
- sparse,
- false, // unique
- "note_to_self_dont_break_build",
- NULL, // filterExpr
- BSONObj()));
- }
-
- void QueryPlannerTest::addIndex(BSONObj keyPattern, bool multikey, bool sparse, bool unique) {
- params.indices.push_back(IndexEntry(keyPattern,
- multikey,
- sparse,
- unique,
- "sql_query_walks_into_bar_and_says_can_i_join_you?",
- NULL, // filterExpr
- BSONObj()));
- }
-
- void QueryPlannerTest::addIndex(BSONObj keyPattern, BSONObj infoObj) {
- params.indices.push_back(IndexEntry(keyPattern,
- false, // multikey
- false, // sparse
- false, // unique
- "foo",
- NULL, // filterExpr
- infoObj));
- }
-
- void QueryPlannerTest::runQuery(BSONObj query) {
- runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
- }
-
- void QueryPlannerTest::runQuerySortProj(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj) {
- runQuerySortProjSkipLimit(query, sort, proj, 0, 0);
- }
-
- void QueryPlannerTest::runQuerySkipLimit(const BSONObj& query,
- long long skip,
- long long limit) {
- runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), skip, limit);
- }
-
- void QueryPlannerTest::runQueryHint(const BSONObj& query, const BSONObj& hint) {
- runQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
- }
-
- void QueryPlannerTest::runQuerySortProjSkipLimit(const BSONObj& query,
+using unittest::assertGet;
+
+const char* QueryPlannerTest::ns = "somebogus.ns";
+
+void QueryPlannerTest::setUp() {
+ internalQueryPlannerEnableHashIntersection = true;
+ params.options = QueryPlannerParams::INCLUDE_COLLSCAN;
+ addIndex(BSON("_id" << 1));
+}
+
+void QueryPlannerTest::addIndex(BSONObj keyPattern, bool multikey) {
+ params.indices.push_back(IndexEntry(keyPattern,
+ multikey,
+ false, // sparse
+ false, // unique
+ "hari_king_of_the_stove",
+ NULL, // filterExpr
+ BSONObj()));
+}
+
+void QueryPlannerTest::addIndex(BSONObj keyPattern, bool multikey, bool sparse) {
+ params.indices.push_back(IndexEntry(keyPattern,
+ multikey,
+ sparse,
+ false, // unique
+ "note_to_self_dont_break_build",
+ NULL, // filterExpr
+ BSONObj()));
+}
+
+void QueryPlannerTest::addIndex(BSONObj keyPattern, bool multikey, bool sparse, bool unique) {
+ params.indices.push_back(IndexEntry(keyPattern,
+ multikey,
+ sparse,
+ unique,
+ "sql_query_walks_into_bar_and_says_can_i_join_you?",
+ NULL, // filterExpr
+ BSONObj()));
+}
+
+void QueryPlannerTest::addIndex(BSONObj keyPattern, BSONObj infoObj) {
+ params.indices.push_back(IndexEntry(keyPattern,
+ false, // multikey
+ false, // sparse
+ false, // unique
+ "foo",
+ NULL, // filterExpr
+ infoObj));
+}
+
+void QueryPlannerTest::runQuery(BSONObj query) {
+ runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
+}
+
+void QueryPlannerTest::runQuerySortProj(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj) {
+ runQuerySortProjSkipLimit(query, sort, proj, 0, 0);
+}
+
+void QueryPlannerTest::runQuerySkipLimit(const BSONObj& query, long long skip, long long limit) {
+ runQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), skip, limit);
+}
+
+void QueryPlannerTest::runQueryHint(const BSONObj& query, const BSONObj& hint) {
+ runQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
+}
+
+void QueryPlannerTest::runQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit) {
+ runQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
+}
+
+void QueryPlannerTest::runQuerySortHint(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& hint) {
+ runQuerySortProjSkipLimitHint(query, sort, BSONObj(), 0, 0, hint);
+}
+
+void QueryPlannerTest::runQueryHintMinMax(const BSONObj& query,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj) {
+ runQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
+}
+
+void QueryPlannerTest::runQuerySortProjSkipLimitHint(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip,
- long long limit) {
- runQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
- }
-
- void QueryPlannerTest::runQuerySortHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& hint) {
- runQuerySortProjSkipLimitHint(query, sort, BSONObj(), 0, 0, hint);
- }
-
- void QueryPlannerTest::runQueryHintMinMax(const BSONObj& query,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj) {
- runQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
- }
-
- void QueryPlannerTest::runQuerySortProjSkipLimitHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint) {
- runQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
- }
-
- void QueryPlannerTest::runQuerySnapshot(const BSONObj& query) {
- runQueryFull(query, BSONObj(), BSONObj(), 0, 0, BSONObj(), BSONObj(),
- BSONObj(), true);
- }
-
- void QueryPlannerTest::runQueryFull(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot) {
- // Clean up any previous state from a call to runQueryFull
- solns.clear();
-
- {
- CanonicalQuery* rawCq;
- Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, skip, limit, hint,
- minObj, maxObj, snapshot,
- false, // explain
- &rawCq);
- ASSERT_OK(s);
- cq.reset(rawCq);
- }
-
- ASSERT_OK(QueryPlanner::plan(*cq, params, &solns.mutableVector()));
+ long long limit,
+ const BSONObj& hint) {
+ runQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
+}
+
+void QueryPlannerTest::runQuerySnapshot(const BSONObj& query) {
+ runQueryFull(query, BSONObj(), BSONObj(), 0, 0, BSONObj(), BSONObj(), BSONObj(), true);
+}
+
+void QueryPlannerTest::runQueryFull(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot) {
+ // Clean up any previous state from a call to runQueryFull
+ solns.clear();
+
+ {
+ CanonicalQuery* rawCq;
+ Status s = CanonicalQuery::canonicalize(ns,
+ query,
+ sort,
+ proj,
+ skip,
+ limit,
+ hint,
+ minObj,
+ maxObj,
+ snapshot,
+ false, // explain
+ &rawCq);
+ ASSERT_OK(s);
+ cq.reset(rawCq);
}
- void QueryPlannerTest::runInvalidQuery(const BSONObj& query) {
- runInvalidQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
- }
+ ASSERT_OK(QueryPlanner::plan(*cq, params, &solns.mutableVector()));
+}
- void QueryPlannerTest::runInvalidQuerySortProj(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj) {
- runInvalidQuerySortProjSkipLimit(query, sort, proj, 0, 0);
- }
+void QueryPlannerTest::runInvalidQuery(const BSONObj& query) {
+ runInvalidQuerySortProjSkipLimit(query, BSONObj(), BSONObj(), 0, 0);
+}
- void QueryPlannerTest::runInvalidQuerySortProjSkipLimit(const BSONObj& query,
+void QueryPlannerTest::runInvalidQuerySortProj(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj) {
+ runInvalidQuerySortProjSkipLimit(query, sort, proj, 0, 0);
+}
+
+void QueryPlannerTest::runInvalidQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit) {
+ runInvalidQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
+}
+
+void QueryPlannerTest::runInvalidQueryHint(const BSONObj& query, const BSONObj& hint) {
+ runInvalidQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
+}
+
+void QueryPlannerTest::runInvalidQueryHintMinMax(const BSONObj& query,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj) {
+ runInvalidQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
+}
+
+void QueryPlannerTest::runInvalidQuerySortProjSkipLimitHint(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip,
- long long limit) {
- runInvalidQuerySortProjSkipLimitHint(query, sort, proj, skip, limit, BSONObj());
+ long long limit,
+ const BSONObj& hint) {
+ runInvalidQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
+}
+
+void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot) {
+ solns.clear();
+
+ {
+ CanonicalQuery* rawCq;
+ Status s = CanonicalQuery::canonicalize(ns,
+ query,
+ sort,
+ proj,
+ skip,
+ limit,
+ hint,
+ minObj,
+ maxObj,
+ snapshot,
+ false, // explain
+ &rawCq);
+ ASSERT_OK(s);
+ cq.reset(rawCq);
}
- void QueryPlannerTest::runInvalidQueryHint(const BSONObj& query, const BSONObj& hint) {
- runInvalidQuerySortProjSkipLimitHint(query, BSONObj(), BSONObj(), 0, 0, hint);
- }
+ Status s = QueryPlanner::plan(*cq, params, &solns.mutableVector());
+ ASSERT_NOT_OK(s);
+}
- void QueryPlannerTest::runInvalidQueryHintMinMax(const BSONObj& query,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj) {
- runInvalidQueryFull(query, BSONObj(), BSONObj(), 0, 0, hint, minObj, maxObj, false);
- }
+void QueryPlannerTest::runQueryAsCommand(const BSONObj& cmdObj) {
+ solns.clear();
- void QueryPlannerTest::runInvalidQuerySortProjSkipLimitHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint) {
- runInvalidQueryFull(query, sort, proj, skip, limit, hint, BSONObj(), BSONObj(), false);
- }
+ const NamespaceString nss(ns);
+ invariant(nss.isValid());
- void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot) {
- solns.clear();
-
- {
- CanonicalQuery* rawCq;
- Status s = CanonicalQuery::canonicalize(ns, query, sort, proj, skip, limit, hint,
- minObj, maxObj, snapshot,
- false, // explain
- &rawCq);
- ASSERT_OK(s);
- cq.reset(rawCq);
- }
+ const bool isExplain = false;
+ std::unique_ptr<LiteParsedQuery> lpq(
+ assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
- Status s = QueryPlanner::plan(*cq, params, &solns.mutableVector());
- ASSERT_NOT_OK(s);
- }
+ CanonicalQuery* rawCq;
+ WhereCallbackNoop whereCallback;
+ Status canonStatus = CanonicalQuery::canonicalize(lpq.release(), &rawCq, whereCallback);
+ ASSERT_OK(canonStatus);
+ cq.reset(rawCq);
- void QueryPlannerTest::runQueryAsCommand(const BSONObj& cmdObj) {
- solns.clear();
+ Status s = QueryPlanner::plan(*cq, params, &solns.mutableVector());
+ ASSERT_OK(s);
+}
- const NamespaceString nss(ns);
- invariant(nss.isValid());
+size_t QueryPlannerTest::getNumSolutions() const {
+ return solns.size();
+}
- const bool isExplain = false;
- std::unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+void QueryPlannerTest::dumpSolutions() const {
+ mongoutils::str::stream ost;
+ dumpSolutions(ost);
+ log() << std::string(ost);
+}
- CanonicalQuery* rawCq;
- WhereCallbackNoop whereCallback;
- Status canonStatus = CanonicalQuery::canonicalize(lpq.release(), &rawCq, whereCallback);
- ASSERT_OK(canonStatus);
- cq.reset(rawCq);
-
- Status s = QueryPlanner::plan(*cq, params, &solns.mutableVector());
- ASSERT_OK(s);
- }
-
- size_t QueryPlannerTest::getNumSolutions() const {
- return solns.size();
- }
-
- void QueryPlannerTest::dumpSolutions() const {
- mongoutils::str::stream ost;
- dumpSolutions(ost);
- log() << std::string(ost);
+void QueryPlannerTest::dumpSolutions(mongoutils::str::stream& ost) const {
+ for (auto&& soln : solns) {
+ ost << soln->toString() << '\n';
}
+}
- void QueryPlannerTest::dumpSolutions(mongoutils::str::stream& ost) const {
- for (auto&& soln : solns) {
- ost << soln->toString() << '\n';
- }
+void QueryPlannerTest::assertNumSolutions(size_t expectSolutions) const {
+ if (getNumSolutions() == expectSolutions) {
+ return;
}
-
- void QueryPlannerTest::assertNumSolutions(size_t expectSolutions) const {
- if (getNumSolutions() == expectSolutions) {
- return;
+ mongoutils::str::stream ss;
+ ss << "expected " << expectSolutions << " solutions but got " << getNumSolutions()
+ << " instead. solutions generated: " << '\n';
+ dumpSolutions(ss);
+ FAIL(ss);
+}
+
+size_t QueryPlannerTest::numSolutionMatches(const std::string& solnJson) const {
+ BSONObj testSoln = fromjson(solnJson);
+ size_t matches = 0;
+ for (auto&& soln : solns) {
+ QuerySolutionNode* root = soln->root.get();
+ if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
+ ++matches;
}
- mongoutils::str::stream ss;
- ss << "expected " << expectSolutions << " solutions but got " << getNumSolutions()
- << " instead. solutions generated: " << '\n';
- dumpSolutions(ss);
- FAIL(ss);
}
+ return matches;
+}
- size_t QueryPlannerTest::numSolutionMatches(const std::string& solnJson) const {
- BSONObj testSoln = fromjson(solnJson);
- size_t matches = 0;
- for (auto&& soln : solns) {
- QuerySolutionNode* root = soln->root.get();
- if (QueryPlannerTestLib::solutionMatches(testSoln, root)) {
- ++matches;
- }
- }
- return matches;
+void QueryPlannerTest::assertSolutionExists(const std::string& solnJson, size_t numMatches) const {
+ size_t matches = numSolutionMatches(solnJson);
+ if (numMatches == matches) {
+ return;
}
-
- void QueryPlannerTest::assertSolutionExists(const std::string& solnJson,
- size_t numMatches) const {
- size_t matches = numSolutionMatches(solnJson);
- if (numMatches == matches) {
- return;
+ mongoutils::str::stream ss;
+ ss << "expected " << numMatches << " matches for solution " << solnJson << " but got "
+ << matches << " instead. all solutions generated: " << '\n';
+ dumpSolutions(ss);
+ FAIL(ss);
+}
+
+void QueryPlannerTest::assertHasOneSolutionOf(const std::vector<std::string>& solnStrs) const {
+ size_t matches = 0;
+ for (std::vector<std::string>::const_iterator it = solnStrs.begin(); it != solnStrs.end();
+ ++it) {
+ if (1U == numSolutionMatches(*it)) {
+ ++matches;
}
- mongoutils::str::stream ss;
- ss << "expected " << numMatches << " matches for solution " << solnJson
- << " but got " << matches
- << " instead. all solutions generated: " << '\n';
- dumpSolutions(ss);
- FAIL(ss);
}
-
- void QueryPlannerTest::assertHasOneSolutionOf(const std::vector<std::string>& solnStrs) const {
- size_t matches = 0;
- for (std::vector<std::string>::const_iterator it = solnStrs.begin();
- it != solnStrs.end();
- ++it) {
- if (1U == numSolutionMatches(*it)) {
- ++matches;
- }
- }
- if (1U == matches) {
- return;
- }
- mongoutils::str::stream ss;
- ss << "assertHasOneSolutionOf expected one matching solution"
- << " but got " << matches
- << " instead. all solutions generated: " << '\n';
- dumpSolutions(ss);
- FAIL(ss);
+ if (1U == matches) {
+ return;
}
-
-} // namespace mongo
+ mongoutils::str::stream ss;
+ ss << "assertHasOneSolutionOf expected one matching solution"
+ << " but got " << matches << " instead. all solutions generated: " << '\n';
+ dumpSolutions(ss);
+ FAIL(ss);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_test_fixture.h b/src/mongo/db/query/query_planner_test_fixture.h
index e8b1901785a..843ed949e1a 100644
--- a/src/mongo/db/query/query_planner_test_fixture.h
+++ b/src/mongo/db/query/query_planner_test_fixture.h
@@ -41,155 +41,153 @@
namespace mongo {
- class QueryPlannerTest : public mongo::unittest::Test {
- protected:
- void setUp();
+class QueryPlannerTest : public mongo::unittest::Test {
+protected:
+ void setUp();
- //
- // Build up test.
- //
+ //
+ // Build up test.
+ //
- void addIndex(BSONObj keyPattern, bool multikey = false);
+ void addIndex(BSONObj keyPattern, bool multikey = false);
- void addIndex(BSONObj keyPattern, bool multikey, bool sparse);
+ void addIndex(BSONObj keyPattern, bool multikey, bool sparse);
- void addIndex(BSONObj keyPattern, bool multikey, bool sparse, bool unique);
+ void addIndex(BSONObj keyPattern, bool multikey, bool sparse, bool unique);
- void addIndex(BSONObj keyPattern, BSONObj infoObj);
+ void addIndex(BSONObj keyPattern, BSONObj infoObj);
- //
- // Execute planner.
- //
+ //
+ // Execute planner.
+ //
- void runQuery(BSONObj query);
+ void runQuery(BSONObj query);
- void runQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj);
+ void runQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj);
- void runQuerySkipLimit(const BSONObj& query, long long skip, long long limit);
+ void runQuerySkipLimit(const BSONObj& query, long long skip, long long limit);
- void runQueryHint(const BSONObj& query, const BSONObj& hint);
+ void runQueryHint(const BSONObj& query, const BSONObj& hint);
- void runQuerySortProjSkipLimit(const BSONObj& query,
+ void runQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit);
+
+ void runQuerySortHint(const BSONObj& query, const BSONObj& sort, const BSONObj& hint);
+
+ void runQueryHintMinMax(const BSONObj& query,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj);
+
+ void runQuerySortProjSkipLimitHint(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip,
- long long limit);
+ long long limit,
+ const BSONObj& hint);
- void runQuerySortHint(const BSONObj& query, const BSONObj& sort, const BSONObj& hint);
+ void runQuerySnapshot(const BSONObj& query);
- void runQueryHintMinMax(const BSONObj& query,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj);
+ void runQueryFull(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot);
- void runQuerySortProjSkipLimitHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint);
+ //
+ // Same as runQuery* functions except we expect a failed status from the planning stage.
+ //
- void runQuerySnapshot(const BSONObj& query);
+ void runInvalidQuery(const BSONObj& query);
- void runQueryFull(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot);
+ void runInvalidQuerySortProj(const BSONObj& query, const BSONObj& sort, const BSONObj& proj);
- //
- // Same as runQuery* functions except we expect a failed status from the planning stage.
- //
+ void runInvalidQuerySortProjSkipLimit(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit);
- void runInvalidQuery(const BSONObj& query);
+ void runInvalidQueryHint(const BSONObj& query, const BSONObj& hint);
- void runInvalidQuerySortProj(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj);
+ void runInvalidQueryHintMinMax(const BSONObj& query,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj);
- void runInvalidQuerySortProjSkipLimit(const BSONObj& query,
+ void runInvalidQuerySortProjSkipLimitHint(const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip,
- long long limit);
-
- void runInvalidQueryHint(const BSONObj& query, const BSONObj& hint);
-
- void runInvalidQueryHintMinMax(const BSONObj& query,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj);
-
- void runInvalidQuerySortProjSkipLimitHint(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint);
-
- void runInvalidQueryFull(const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& proj,
- long long skip,
- long long limit,
- const BSONObj& hint,
- const BSONObj& minObj,
- const BSONObj& maxObj,
- bool snapshot);
-
- /**
- * The other runQuery* methods run the query as through it is an OP_QUERY style find. This
- * version goes through find command parsing, and will be planned like a find command.
- */
- void runQueryAsCommand(const BSONObj& cmdObj);
-
- //
- // Introspect solutions.
- //
-
- size_t getNumSolutions() const;
-
- void dumpSolutions() const;
-
- void dumpSolutions(mongoutils::str::stream& ost) const;
-
- /**
- * Checks number solutions. Generates assertion message
- * containing solution dump if applicable.
- */
- void assertNumSolutions(size_t expectSolutions) const;
-
- size_t numSolutionMatches(const std::string& solnJson) const;
-
- /**
- * Verifies that the solution tree represented in json by 'solnJson' is
- * one of the solutions generated by QueryPlanner.
- *
- * The number of expected matches, 'numMatches', could be greater than
- * 1 if solutions differ only by the pattern of index tags on a filter.
- */
- void assertSolutionExists(const std::string& solnJson, size_t numMatches = 1) const;
-
- /**
- * Given a vector of string-based solution tree representations 'solnStrs',
- * verifies that the query planner generated exactly one of these solutions.
- */
- void assertHasOneSolutionOf(const std::vector<std::string>& solnStrs) const;
-
- //
- // Data members.
- //
-
- static const char* ns;
-
- BSONObj queryObj;
- std::unique_ptr<CanonicalQuery> cq;
- QueryPlannerParams params;
- OwnedPointerVector<QuerySolution> solns;
- };
-
-} // namespace mongo
+ long long limit,
+ const BSONObj& hint);
+
+ void runInvalidQueryFull(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& proj,
+ long long skip,
+ long long limit,
+ const BSONObj& hint,
+ const BSONObj& minObj,
+ const BSONObj& maxObj,
+ bool snapshot);
+
+ /**
+ * The other runQuery* methods run the query as through it is an OP_QUERY style find. This
+ * version goes through find command parsing, and will be planned like a find command.
+ */
+ void runQueryAsCommand(const BSONObj& cmdObj);
+
+ //
+ // Introspect solutions.
+ //
+
+ size_t getNumSolutions() const;
+
+ void dumpSolutions() const;
+
+ void dumpSolutions(mongoutils::str::stream& ost) const;
+
+ /**
+ * Checks number solutions. Generates assertion message
+ * containing solution dump if applicable.
+ */
+ void assertNumSolutions(size_t expectSolutions) const;
+
+ size_t numSolutionMatches(const std::string& solnJson) const;
+
+ /**
+ * Verifies that the solution tree represented in json by 'solnJson' is
+ * one of the solutions generated by QueryPlanner.
+ *
+ * The number of expected matches, 'numMatches', could be greater than
+ * 1 if solutions differ only by the pattern of index tags on a filter.
+ */
+ void assertSolutionExists(const std::string& solnJson, size_t numMatches = 1) const;
+
+ /**
+ * Given a vector of string-based solution tree representations 'solnStrs',
+ * verifies that the query planner generated exactly one of these solutions.
+ */
+ void assertHasOneSolutionOf(const std::vector<std::string>& solnStrs) const;
+
+ //
+ // Data members.
+ //
+
+ static const char* ns;
+
+ BSONObj queryObj;
+ std::unique_ptr<CanonicalQuery> cq;
+ QueryPlannerParams params;
+ OwnedPointerVector<QuerySolution> solns;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_test_lib.cpp b/src/mongo/db/query/query_planner_test_lib.cpp
index d81fc0f27e9..6ff080a767f 100644
--- a/src/mongo/db/query/query_planner_test_lib.cpp
+++ b/src/mongo/db/query/query_planner_test_lib.cpp
@@ -43,465 +43,520 @@
namespace {
- using namespace mongo;
+using namespace mongo;
- using std::string;
+using std::string;
- bool filterMatches(const BSONObj& testFilter,
- const QuerySolutionNode* trueFilterNode) {
- if (NULL == trueFilterNode->filter) { return false; }
- StatusWithMatchExpression swme = MatchExpressionParser::parse(testFilter);
- if (!swme.isOK()) {
- return false;
- }
- const std::unique_ptr<MatchExpression> root(swme.getValue());
- CanonicalQuery::sortTree(root.get());
- std::unique_ptr<MatchExpression> trueFilter(trueFilterNode->filter->shallowClone());
- CanonicalQuery::sortTree(trueFilter.get());
- return trueFilter->equivalent(root.get());
+bool filterMatches(const BSONObj& testFilter, const QuerySolutionNode* trueFilterNode) {
+ if (NULL == trueFilterNode->filter) {
+ return false;
}
-
- void appendIntervalBound(BSONObjBuilder& bob, BSONElement& el) {
- if (el.type() == String) {
- std::string data = el.String();
- if (data == "MaxKey") {
- bob.appendMaxKey("");
- }
- else if (data == "MinKey") {
- bob.appendMinKey("");
- }
- else {
- bob.appendAs(el, "");
- }
- }
- else {
+ StatusWithMatchExpression swme = MatchExpressionParser::parse(testFilter);
+ if (!swme.isOK()) {
+ return false;
+ }
+ const std::unique_ptr<MatchExpression> root(swme.getValue());
+ CanonicalQuery::sortTree(root.get());
+ std::unique_ptr<MatchExpression> trueFilter(trueFilterNode->filter->shallowClone());
+ CanonicalQuery::sortTree(trueFilter.get());
+ return trueFilter->equivalent(root.get());
+}
+
+void appendIntervalBound(BSONObjBuilder& bob, BSONElement& el) {
+ if (el.type() == String) {
+ std::string data = el.String();
+ if (data == "MaxKey") {
+ bob.appendMaxKey("");
+ } else if (data == "MinKey") {
+ bob.appendMinKey("");
+ } else {
bob.appendAs(el, "");
}
+ } else {
+ bob.appendAs(el, "");
}
+}
- bool intervalMatches(const BSONObj& testInt, const Interval trueInt) {
- BSONObjIterator it(testInt);
- if (!it.more()) { return false; }
- BSONElement low = it.next();
- if (!it.more()) { return false; }
- BSONElement high = it.next();
- if (!it.more()) { return false; }
- bool startInclusive = it.next().Bool();
- if (!it.more()) { return false; }
- bool endInclusive = it.next().Bool();
- if (it.more()) { return false; }
-
- BSONObjBuilder bob;
- appendIntervalBound(bob, low);
- appendIntervalBound(bob, high);
- Interval toCompare(bob.obj(), startInclusive, endInclusive);
-
- return Interval::INTERVAL_EQUALS == trueInt.compare(toCompare);
+bool intervalMatches(const BSONObj& testInt, const Interval trueInt) {
+ BSONObjIterator it(testInt);
+ if (!it.more()) {
+ return false;
+ }
+ BSONElement low = it.next();
+ if (!it.more()) {
+ return false;
+ }
+ BSONElement high = it.next();
+ if (!it.more()) {
+ return false;
+ }
+ bool startInclusive = it.next().Bool();
+ if (!it.more()) {
+ return false;
+ }
+ bool endInclusive = it.next().Bool();
+ if (it.more()) {
+ return false;
}
- /**
- * Returns whether the BSON representation of the index bounds in
- * 'testBounds' matches 'trueBounds'.
- *
- * 'testBounds' should be of the following format:
- * {<field 1>: <oil 1>, <field 2>: <oil 2>, ...}
- * Each ordered interval list (e.g. <oil 1>) is an array of arrays of
- * the format:
- * [[<low 1>,<high 1>,<lowInclusive 1>,<highInclusive 1>], ...]
- *
- * For example,
- * {a: [[1,2,true,false], [3,4,false,true]], b: [[-Infinity, Infinity]]}
- * Means that the index bounds on field 'a' consist of the two intervals
- * [1, 2) and (3, 4] and the index bounds on field 'b' are [-Infinity, Infinity].
- */
- bool boundsMatch(const BSONObj& testBounds, const IndexBounds trueBounds) {
- // Iterate over the fields on which we have index bounds.
- BSONObjIterator fieldIt(testBounds);
- int fieldItCount = 0;
- while (fieldIt.more()) {
- BSONElement arrEl = fieldIt.next();
- if (arrEl.type() != Array) {
+ BSONObjBuilder bob;
+ appendIntervalBound(bob, low);
+ appendIntervalBound(bob, high);
+ Interval toCompare(bob.obj(), startInclusive, endInclusive);
+
+ return Interval::INTERVAL_EQUALS == trueInt.compare(toCompare);
+}
+
+/**
+ * Returns whether the BSON representation of the index bounds in
+ * 'testBounds' matches 'trueBounds'.
+ *
+ * 'testBounds' should be of the following format:
+ * {<field 1>: <oil 1>, <field 2>: <oil 2>, ...}
+ * Each ordered interval list (e.g. <oil 1>) is an array of arrays of
+ * the format:
+ * [[<low 1>,<high 1>,<lowInclusive 1>,<highInclusive 1>], ...]
+ *
+ * For example,
+ * {a: [[1,2,true,false], [3,4,false,true]], b: [[-Infinity, Infinity]]}
+ * Means that the index bounds on field 'a' consist of the two intervals
+ * [1, 2) and (3, 4] and the index bounds on field 'b' are [-Infinity, Infinity].
+ */
+bool boundsMatch(const BSONObj& testBounds, const IndexBounds trueBounds) {
+ // Iterate over the fields on which we have index bounds.
+ BSONObjIterator fieldIt(testBounds);
+ int fieldItCount = 0;
+ while (fieldIt.more()) {
+ BSONElement arrEl = fieldIt.next();
+ if (arrEl.type() != Array) {
+ return false;
+ }
+ // Iterate over an ordered interval list for
+ // a particular field.
+ BSONObjIterator oilIt(arrEl.Obj());
+ int oilItCount = 0;
+ while (oilIt.more()) {
+ BSONElement intervalEl = oilIt.next();
+ if (intervalEl.type() != Array) {
return false;
}
- // Iterate over an ordered interval list for
- // a particular field.
- BSONObjIterator oilIt(arrEl.Obj());
- int oilItCount = 0;
- while (oilIt.more()) {
- BSONElement intervalEl = oilIt.next();
- if (intervalEl.type() != Array) {
- return false;
- }
- Interval trueInt = trueBounds.getInterval(fieldItCount, oilItCount);
- if (!intervalMatches(intervalEl.Obj(), trueInt)) {
- return false;
- }
- ++oilItCount;
+ Interval trueInt = trueBounds.getInterval(fieldItCount, oilItCount);
+ if (!intervalMatches(intervalEl.Obj(), trueInt)) {
+ return false;
}
- ++fieldItCount;
+ ++oilItCount;
}
-
- return true;
+ ++fieldItCount;
}
-} // namespace
+ return true;
+}
+
+} // namespace
namespace mongo {
- /**
- * Looks in the children stored in the 'nodes' field of 'testSoln'
- * to see if thet match the 'children' field of 'trueSoln'.
- *
- * This does an unordered comparison, i.e. childrenMatch returns
- * true as long as the set of subtrees in testSoln's 'nodes' matches
- * the set of subtrees in trueSoln's 'children' vector.
- */
- static bool childrenMatch(const BSONObj& testSoln, const QuerySolutionNode* trueSoln) {
- BSONElement children = testSoln["nodes"];
- if (children.eoo() || !children.isABSONObj()) { return false; }
-
- // The order of the children array in testSoln might not match
- // the order in trueSoln, so we have to check all combos with
- // these nested loops.
- BSONObjIterator i(children.Obj());
- while (i.more()) {
- BSONElement child = i.next();
- if (child.eoo() || !child.isABSONObj()) { return false; }
-
- // try to match against one of the QuerySolutionNode's children
- bool found = false;
- for (size_t j = 0; j < trueSoln->children.size(); ++j) {
- if (QueryPlannerTestLib::solutionMatches(child.Obj(), trueSoln->children[j])) {
- found = true;
- break;
- }
- }
+/**
+ * Looks in the children stored in the 'nodes' field of 'testSoln'
+ * to see if thet match the 'children' field of 'trueSoln'.
+ *
+ * This does an unordered comparison, i.e. childrenMatch returns
+ * true as long as the set of subtrees in testSoln's 'nodes' matches
+ * the set of subtrees in trueSoln's 'children' vector.
+ */
+static bool childrenMatch(const BSONObj& testSoln, const QuerySolutionNode* trueSoln) {
+ BSONElement children = testSoln["nodes"];
+ if (children.eoo() || !children.isABSONObj()) {
+ return false;
+ }
- // we couldn't match child
- if (!found) { return false; }
+ // The order of the children array in testSoln might not match
+ // the order in trueSoln, so we have to check all combos with
+ // these nested loops.
+ BSONObjIterator i(children.Obj());
+ while (i.more()) {
+ BSONElement child = i.next();
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
}
- return true;
+ // try to match against one of the QuerySolutionNode's children
+ bool found = false;
+ for (size_t j = 0; j < trueSoln->children.size(); ++j) {
+ if (QueryPlannerTestLib::solutionMatches(child.Obj(), trueSoln->children[j])) {
+ found = true;
+ break;
+ }
+ }
+
+ // we couldn't match child
+ if (!found) {
+ return false;
+ }
}
- // static
- bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln,
- const QuerySolutionNode* trueSoln) {
- //
- // leaf nodes
- //
- if (STAGE_COLLSCAN == trueSoln->getType()) {
- const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(trueSoln);
- BSONElement el = testSoln["cscan"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj csObj = el.Obj();
-
- BSONElement dir = csObj["dir"];
- if (dir.eoo() || !dir.isNumber()) { return false; }
- if (dir.numberInt() != csn->direction) { return false; }
-
- BSONElement filter = csObj["filter"];
- if (filter.eoo()) {
- return true;
- }
- else if (filter.isNull()) {
- return NULL == csn->filter;
- }
- else if (!filter.isABSONObj()) {
- return false;
- }
- return filterMatches(filter.Obj(), trueSoln);
+ return true;
+}
+
+// static
+bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln,
+ const QuerySolutionNode* trueSoln) {
+ //
+ // leaf nodes
+ //
+ if (STAGE_COLLSCAN == trueSoln->getType()) {
+ const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(trueSoln);
+ BSONElement el = testSoln["cscan"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
}
- else if (STAGE_IXSCAN == trueSoln->getType()) {
- const IndexScanNode* ixn = static_cast<const IndexScanNode*>(trueSoln);
- BSONElement el = testSoln["ixscan"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj ixscanObj = el.Obj();
+ BSONObj csObj = el.Obj();
- BSONElement pattern = ixscanObj["pattern"];
- if (pattern.eoo() || !pattern.isABSONObj()) { return false; }
- if (pattern.Obj() != ixn->indexKeyPattern) { return false; }
+ BSONElement dir = csObj["dir"];
+ if (dir.eoo() || !dir.isNumber()) {
+ return false;
+ }
+ if (dir.numberInt() != csn->direction) {
+ return false;
+ }
- BSONElement bounds = ixscanObj["bounds"];
- if (!bounds.eoo()) {
- if (!bounds.isABSONObj()) {
- return false;
- }
- else if (!boundsMatch(bounds.Obj(), ixn->bounds)) {
- return false;
- }
- }
+ BSONElement filter = csObj["filter"];
+ if (filter.eoo()) {
+ return true;
+ } else if (filter.isNull()) {
+ return NULL == csn->filter;
+ } else if (!filter.isABSONObj()) {
+ return false;
+ }
+ return filterMatches(filter.Obj(), trueSoln);
+ } else if (STAGE_IXSCAN == trueSoln->getType()) {
+ const IndexScanNode* ixn = static_cast<const IndexScanNode*>(trueSoln);
+ BSONElement el = testSoln["ixscan"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj ixscanObj = el.Obj();
- BSONElement dir = ixscanObj["dir"];
- if (!dir.eoo() && NumberInt == dir.type()) {
- if (dir.numberInt() != ixn->direction) {
- return false;
- }
- }
+ BSONElement pattern = ixscanObj["pattern"];
+ if (pattern.eoo() || !pattern.isABSONObj()) {
+ return false;
+ }
+ if (pattern.Obj() != ixn->indexKeyPattern) {
+ return false;
+ }
- BSONElement filter = ixscanObj["filter"];
- if (filter.eoo()) {
- return true;
- }
- else if (filter.isNull()) {
- return NULL == ixn->filter;
+ BSONElement bounds = ixscanObj["bounds"];
+ if (!bounds.eoo()) {
+ if (!bounds.isABSONObj()) {
+ return false;
+ } else if (!boundsMatch(bounds.Obj(), ixn->bounds)) {
+ return false;
}
- else if (!filter.isABSONObj()) {
+ }
+
+ BSONElement dir = ixscanObj["dir"];
+ if (!dir.eoo() && NumberInt == dir.type()) {
+ if (dir.numberInt() != ixn->direction) {
return false;
}
- return filterMatches(filter.Obj(), trueSoln);
- }
- else if (STAGE_GEO_NEAR_2D == trueSoln->getType()) {
- const GeoNear2DNode* node = static_cast<const GeoNear2DNode*>(trueSoln);
- BSONElement el = testSoln["geoNear2d"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj geoObj = el.Obj();
- return geoObj == node->indexKeyPattern;
- }
- else if (STAGE_GEO_NEAR_2DSPHERE == trueSoln->getType()) {
- const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(trueSoln);
- BSONElement el = testSoln["geoNear2dsphere"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj geoObj = el.Obj();
- return geoObj == node->indexKeyPattern;
- }
- else if (STAGE_TEXT == trueSoln->getType()) {
- // {text: {search: "somestr", language: "something", filter: {blah: 1}}}
- const TextNode* node = static_cast<const TextNode*>(trueSoln);
- BSONElement el = testSoln["text"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj textObj = el.Obj();
-
- BSONElement searchElt = textObj["search"];
- if (!searchElt.eoo()) {
- if (searchElt.String() != node->query) {
- return false;
- }
+ }
+
+ BSONElement filter = ixscanObj["filter"];
+ if (filter.eoo()) {
+ return true;
+ } else if (filter.isNull()) {
+ return NULL == ixn->filter;
+ } else if (!filter.isABSONObj()) {
+ return false;
+ }
+ return filterMatches(filter.Obj(), trueSoln);
+ } else if (STAGE_GEO_NEAR_2D == trueSoln->getType()) {
+ const GeoNear2DNode* node = static_cast<const GeoNear2DNode*>(trueSoln);
+ BSONElement el = testSoln["geoNear2d"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj geoObj = el.Obj();
+ return geoObj == node->indexKeyPattern;
+ } else if (STAGE_GEO_NEAR_2DSPHERE == trueSoln->getType()) {
+ const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(trueSoln);
+ BSONElement el = testSoln["geoNear2dsphere"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj geoObj = el.Obj();
+ return geoObj == node->indexKeyPattern;
+ } else if (STAGE_TEXT == trueSoln->getType()) {
+ // {text: {search: "somestr", language: "something", filter: {blah: 1}}}
+ const TextNode* node = static_cast<const TextNode*>(trueSoln);
+ BSONElement el = testSoln["text"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj textObj = el.Obj();
+
+ BSONElement searchElt = textObj["search"];
+ if (!searchElt.eoo()) {
+ if (searchElt.String() != node->query) {
+ return false;
}
+ }
- BSONElement languageElt = textObj["language"];
- if (!languageElt.eoo()) {
- if (languageElt.String() != node->language) {
- return false;
- }
+ BSONElement languageElt = textObj["language"];
+ if (!languageElt.eoo()) {
+ if (languageElt.String() != node->language) {
+ return false;
}
+ }
- BSONElement caseSensitiveElt = textObj["caseSensitive"];
- if (!caseSensitiveElt.eoo()) {
- if (caseSensitiveElt.trueValue() != node->caseSensitive) {
- return false;
- }
+ BSONElement caseSensitiveElt = textObj["caseSensitive"];
+ if (!caseSensitiveElt.eoo()) {
+ if (caseSensitiveElt.trueValue() != node->caseSensitive) {
+ return false;
}
+ }
- BSONElement indexPrefix = textObj["prefix"];
- if (!indexPrefix.eoo()) {
- if (!indexPrefix.isABSONObj()) {
- return false;
- }
+ BSONElement indexPrefix = textObj["prefix"];
+ if (!indexPrefix.eoo()) {
+ if (!indexPrefix.isABSONObj()) {
+ return false;
+ }
- if (0 != indexPrefix.Obj().woCompare(node->indexPrefix)) {
- return false;
- }
+ if (0 != indexPrefix.Obj().woCompare(node->indexPrefix)) {
+ return false;
}
+ }
- BSONElement filter = textObj["filter"];
- if (!filter.eoo()) {
- if (filter.isNull()) {
- if (NULL != node->filter) { return false; }
- }
- else if (!filter.isABSONObj()) {
- return false;
- }
- else if (!filterMatches(filter.Obj(), trueSoln)) {
+ BSONElement filter = textObj["filter"];
+ if (!filter.eoo()) {
+ if (filter.isNull()) {
+ if (NULL != node->filter) {
return false;
}
+ } else if (!filter.isABSONObj()) {
+ return false;
+ } else if (!filterMatches(filter.Obj(), trueSoln)) {
+ return false;
}
-
- return true;
}
- //
- // internal nodes
- //
- if (STAGE_FETCH == trueSoln->getType()) {
- const FetchNode* fn = static_cast<const FetchNode*>(trueSoln);
+ return true;
+ }
- BSONElement el = testSoln["fetch"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj fetchObj = el.Obj();
+ //
+ // internal nodes
+ //
+ if (STAGE_FETCH == trueSoln->getType()) {
+ const FetchNode* fn = static_cast<const FetchNode*>(trueSoln);
- BSONElement filter = fetchObj["filter"];
- if (!filter.eoo()) {
- if (filter.isNull()) {
- if (NULL != fn->filter) { return false; }
- }
- else if (!filter.isABSONObj()) {
- return false;
- }
- else if (!filterMatches(filter.Obj(), trueSoln)) {
- return false;
- }
- }
+ BSONElement el = testSoln["fetch"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj fetchObj = el.Obj();
- BSONElement child = fetchObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
- return solutionMatches(child.Obj(), fn->children[0]);
- }
- else if (STAGE_OR == trueSoln->getType()) {
- const OrNode * orn = static_cast<const OrNode*>(trueSoln);
- BSONElement el = testSoln["or"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj orObj = el.Obj();
- return childrenMatch(orObj, orn);
- }
- else if (STAGE_AND_HASH == trueSoln->getType()) {
- const AndHashNode* ahn = static_cast<const AndHashNode*>(trueSoln);
- BSONElement el = testSoln["andHash"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj andHashObj = el.Obj();
-
- BSONElement filter = andHashObj["filter"];
- if (!filter.eoo()) {
- if (filter.isNull()) {
- if (NULL != ahn->filter) { return false; }
- }
- else if (!filter.isABSONObj()) {
- return false;
- }
- else if (!filterMatches(filter.Obj(), trueSoln)) {
+ BSONElement filter = fetchObj["filter"];
+ if (!filter.eoo()) {
+ if (filter.isNull()) {
+ if (NULL != fn->filter) {
return false;
}
+ } else if (!filter.isABSONObj()) {
+ return false;
+ } else if (!filterMatches(filter.Obj(), trueSoln)) {
+ return false;
}
+ }
- return childrenMatch(andHashObj, ahn);
+ BSONElement child = fetchObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
+ }
+ return solutionMatches(child.Obj(), fn->children[0]);
+ } else if (STAGE_OR == trueSoln->getType()) {
+ const OrNode* orn = static_cast<const OrNode*>(trueSoln);
+ BSONElement el = testSoln["or"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj orObj = el.Obj();
+ return childrenMatch(orObj, orn);
+ } else if (STAGE_AND_HASH == trueSoln->getType()) {
+ const AndHashNode* ahn = static_cast<const AndHashNode*>(trueSoln);
+ BSONElement el = testSoln["andHash"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
}
- else if (STAGE_AND_SORTED == trueSoln->getType()) {
- const AndSortedNode* asn = static_cast<const AndSortedNode*>(trueSoln);
- BSONElement el = testSoln["andSorted"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj andSortedObj = el.Obj();
+ BSONObj andHashObj = el.Obj();
- BSONElement filter = andSortedObj["filter"];
- if (!filter.eoo()) {
- if (filter.isNull()) {
- if (NULL != asn->filter) { return false; }
- }
- else if (!filter.isABSONObj()) {
- return false;
- }
- else if (!filterMatches(filter.Obj(), trueSoln)) {
+ BSONElement filter = andHashObj["filter"];
+ if (!filter.eoo()) {
+ if (filter.isNull()) {
+ if (NULL != ahn->filter) {
return false;
}
+ } else if (!filter.isABSONObj()) {
+ return false;
+ } else if (!filterMatches(filter.Obj(), trueSoln)) {
+ return false;
}
-
- return childrenMatch(andSortedObj, asn);
}
- else if (STAGE_PROJECTION == trueSoln->getType()) {
- const ProjectionNode* pn = static_cast<const ProjectionNode*>(trueSoln);
- BSONElement el = testSoln["proj"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj projObj = el.Obj();
+ return childrenMatch(andHashObj, ahn);
+ } else if (STAGE_AND_SORTED == trueSoln->getType()) {
+ const AndSortedNode* asn = static_cast<const AndSortedNode*>(trueSoln);
+ BSONElement el = testSoln["andSorted"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj andSortedObj = el.Obj();
- BSONElement projType = projObj["type"];
- if (!projType.eoo()) {
- string projTypeStr = projType.str();
- if (!((pn->projType == ProjectionNode::DEFAULT && projTypeStr == "default") ||
- (pn->projType == ProjectionNode::SIMPLE_DOC && projTypeStr == "simple") ||
- (pn->projType == ProjectionNode::COVERED_ONE_INDEX &&
- projTypeStr == "coveredIndex"))) {
+ BSONElement filter = andSortedObj["filter"];
+ if (!filter.eoo()) {
+ if (filter.isNull()) {
+ if (NULL != asn->filter) {
return false;
}
+ } else if (!filter.isABSONObj()) {
+ return false;
+ } else if (!filterMatches(filter.Obj(), trueSoln)) {
+ return false;
}
+ }
- BSONElement spec = projObj["spec"];
- if (spec.eoo() || !spec.isABSONObj()) { return false; }
- BSONElement child = projObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
+ return childrenMatch(andSortedObj, asn);
+ } else if (STAGE_PROJECTION == trueSoln->getType()) {
+ const ProjectionNode* pn = static_cast<const ProjectionNode*>(trueSoln);
- return (spec.Obj() == pn->projection)
- && solutionMatches(child.Obj(), pn->children[0]);
+ BSONElement el = testSoln["proj"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj projObj = el.Obj();
+
+ BSONElement projType = projObj["type"];
+ if (!projType.eoo()) {
+ string projTypeStr = projType.str();
+ if (!((pn->projType == ProjectionNode::DEFAULT && projTypeStr == "default") ||
+ (pn->projType == ProjectionNode::SIMPLE_DOC && projTypeStr == "simple") ||
+ (pn->projType == ProjectionNode::COVERED_ONE_INDEX &&
+ projTypeStr == "coveredIndex"))) {
+ return false;
+ }
}
- else if (STAGE_SORT == trueSoln->getType()) {
- const SortNode* sn = static_cast<const SortNode*>(trueSoln);
- BSONElement el = testSoln["sort"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj sortObj = el.Obj();
- BSONElement patternEl = sortObj["pattern"];
- if (patternEl.eoo() || !patternEl.isABSONObj()) { return false; }
- BSONElement limitEl = sortObj["limit"];
- if (!limitEl.isNumber()) { return false; }
- BSONElement child = sortObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
+ BSONElement spec = projObj["spec"];
+ if (spec.eoo() || !spec.isABSONObj()) {
+ return false;
+ }
+ BSONElement child = projObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
+ }
- size_t expectedLimit = limitEl.numberInt();
- return (patternEl.Obj() == sn->pattern)
- && (expectedLimit == sn->limit)
- && solutionMatches(child.Obj(), sn->children[0]);
+ return (spec.Obj() == pn->projection) && solutionMatches(child.Obj(), pn->children[0]);
+ } else if (STAGE_SORT == trueSoln->getType()) {
+ const SortNode* sn = static_cast<const SortNode*>(trueSoln);
+ BSONElement el = testSoln["sort"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj sortObj = el.Obj();
+
+ BSONElement patternEl = sortObj["pattern"];
+ if (patternEl.eoo() || !patternEl.isABSONObj()) {
+ return false;
}
- else if (STAGE_SORT_MERGE == trueSoln->getType()) {
- const MergeSortNode* msn = static_cast<const MergeSortNode*>(trueSoln);
- BSONElement el = testSoln["mergeSort"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj mergeSortObj = el.Obj();
- return childrenMatch(mergeSortObj, msn);
+ BSONElement limitEl = sortObj["limit"];
+ if (!limitEl.isNumber()) {
+ return false;
+ }
+ BSONElement child = sortObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
}
- else if (STAGE_SKIP == trueSoln->getType()) {
- const SkipNode* sn = static_cast<const SkipNode*>(trueSoln);
- BSONElement el = testSoln["skip"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj sortObj = el.Obj();
- BSONElement skipEl = sortObj["n"];
- if (!skipEl.isNumber()) { return false; }
- BSONElement child = sortObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
+ size_t expectedLimit = limitEl.numberInt();
+ return (patternEl.Obj() == sn->pattern) && (expectedLimit == sn->limit) &&
+ solutionMatches(child.Obj(), sn->children[0]);
+ } else if (STAGE_SORT_MERGE == trueSoln->getType()) {
+ const MergeSortNode* msn = static_cast<const MergeSortNode*>(trueSoln);
+ BSONElement el = testSoln["mergeSort"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj mergeSortObj = el.Obj();
+ return childrenMatch(mergeSortObj, msn);
+ } else if (STAGE_SKIP == trueSoln->getType()) {
+ const SkipNode* sn = static_cast<const SkipNode*>(trueSoln);
+ BSONElement el = testSoln["skip"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj sortObj = el.Obj();
- return (skipEl.numberInt() == sn->skip)
- && solutionMatches(child.Obj(), sn->children[0]);
+ BSONElement skipEl = sortObj["n"];
+ if (!skipEl.isNumber()) {
+ return false;
+ }
+ BSONElement child = sortObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
}
- else if (STAGE_LIMIT == trueSoln->getType()) {
- const LimitNode* ln = static_cast<const LimitNode*>(trueSoln);
- BSONElement el = testSoln["limit"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj sortObj = el.Obj();
- BSONElement limitEl = sortObj["n"];
- if (!limitEl.isNumber()) { return false; }
- BSONElement child = sortObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
+ return (skipEl.numberInt() == sn->skip) && solutionMatches(child.Obj(), sn->children[0]);
+ } else if (STAGE_LIMIT == trueSoln->getType()) {
+ const LimitNode* ln = static_cast<const LimitNode*>(trueSoln);
+ BSONElement el = testSoln["limit"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj sortObj = el.Obj();
- return (limitEl.numberInt() == ln->limit)
- && solutionMatches(child.Obj(), ln->children[0]);
+ BSONElement limitEl = sortObj["n"];
+ if (!limitEl.isNumber()) {
+ return false;
+ }
+ BSONElement child = sortObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
}
- else if (STAGE_KEEP_MUTATIONS == trueSoln->getType()) {
- const KeepMutationsNode* kn = static_cast<const KeepMutationsNode*>(trueSoln);
- BSONElement el = testSoln["keep"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj keepObj = el.Obj();
+ return (limitEl.numberInt() == ln->limit) && solutionMatches(child.Obj(), ln->children[0]);
+ } else if (STAGE_KEEP_MUTATIONS == trueSoln->getType()) {
+ const KeepMutationsNode* kn = static_cast<const KeepMutationsNode*>(trueSoln);
- // Doesn't have any parameters really.
- BSONElement child = keepObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
+ BSONElement el = testSoln["keep"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj keepObj = el.Obj();
- return solutionMatches(child.Obj(), kn->children[0]);
+ // Doesn't have any parameters really.
+ BSONElement child = keepObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
}
- else if (STAGE_SHARDING_FILTER == trueSoln->getType()) {
- const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(trueSoln);
- BSONElement el = testSoln["sharding_filter"];
- if (el.eoo() || !el.isABSONObj()) { return false; }
- BSONObj keepObj = el.Obj();
+ return solutionMatches(child.Obj(), kn->children[0]);
+ } else if (STAGE_SHARDING_FILTER == trueSoln->getType()) {
+ const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(trueSoln);
- BSONElement child = keepObj["node"];
- if (child.eoo() || !child.isABSONObj()) { return false; }
+ BSONElement el = testSoln["sharding_filter"];
+ if (el.eoo() || !el.isABSONObj()) {
+ return false;
+ }
+ BSONObj keepObj = el.Obj();
- return solutionMatches(child.Obj(), fn->children[0]);
+ BSONElement child = keepObj["node"];
+ if (child.eoo() || !child.isABSONObj()) {
+ return false;
}
- return false;
+ return solutionMatches(child.Obj(), fn->children[0]);
}
+ return false;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_test_lib.h b/src/mongo/db/query/query_planner_test_lib.h
index f1d6e267512..0a1931d1828 100644
--- a/src/mongo/db/query/query_planner_test_lib.h
+++ b/src/mongo/db/query/query_planner_test_lib.h
@@ -41,20 +41,20 @@
namespace mongo {
- class QueryPlannerTestLib {
- public:
- /**
- * @param testSoln -- a BSON representation of a query solution
- * @param trueSoln -- the root node of a query solution tree
- *
- * Returns true if the BSON representation matches the actual
- * tree, otherwise returns false.
- */
- static bool solutionMatches(const BSONObj& testSoln, const QuerySolutionNode* trueSoln);
+class QueryPlannerTestLib {
+public:
+ /**
+ * @param testSoln -- a BSON representation of a query solution
+ * @param trueSoln -- the root node of a query solution tree
+ *
+ * Returns true if the BSON representation matches the actual
+ * tree, otherwise returns false.
+ */
+ static bool solutionMatches(const BSONObj& testSoln, const QuerySolutionNode* trueSoln);
- static bool solutionMatches(const std::string& testSoln, const QuerySolutionNode* trueSoln) {
- return solutionMatches(fromjson(testSoln), trueSoln);
- }
- };
+ static bool solutionMatches(const std::string& testSoln, const QuerySolutionNode* trueSoln) {
+ return solutionMatches(fromjson(testSoln), trueSoln);
+ }
+};
} // namespace mongo
diff --git a/src/mongo/db/query/query_planner_text_test.cpp b/src/mongo/db/query/query_planner_text_test.cpp
index c1cbc292d94..4512536768b 100644
--- a/src/mongo/db/query/query_planner_text_test.cpp
+++ b/src/mongo/db/query/query_planner_text_test.cpp
@@ -35,309 +35,371 @@
namespace {
- using namespace mongo;
-
- //
- // Text
- // Creating an FTS index {a:1, b:"text", c:1} actually
- // creates an index with spec {a:1, _fts: "text", _ftsx: 1, c:1}.
- // So, the latter is what we pass in to the planner.
- //
- // PS. You can also do {a:1, b:"text", d:"text", c:1} and it will create an index with the same
- // key pattern.
- //
-
- // Basic test that it works.
- TEST_F(QueryPlannerTest, SimpleText) {
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$text: {$search: 'blah'}}"));
-
- assertNumSolutions(1);
- assertSolutionExists("{text: {search: 'blah'}}");
- }
-
- // If you create an index {a:1, b: "text"} you can't use it for queries on just 'a'.
- TEST_F(QueryPlannerTest, CantUseTextUnlessHaveTextPred) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{a:1}"));
-
- // No table scans allowed so there is no solution.
- assertNumSolutions(0);
- }
-
- // But if you create an index {a:1, b:"text"} you can use it if it has a pred on 'a'
- // and a text query.
- TEST_F(QueryPlannerTest, HaveOKPrefixOnTextIndex) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1));
-
- runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
- assertNumSolutions(1);
- assertSolutionExists("{text: {prefix: {a:1}, search: 'blah'}}}}");
-
- // TODO: Do we want to $or a collection scan with a text search?
- // runQuery(fromjson("{$or: [{b:1}, {a:1, $text: {$search: 'blah'}}]}"));
- // assertNumSolutions(1);
-
- runQuery(fromjson("{$or: [{_id:1}, {a:1, $text: {$search: 'blah'}}]}"));
- assertNumSolutions(1);
- }
-
- // But the prefixes must be points.
- TEST_F(QueryPlannerTest, HaveBadPrefixOnTextIndex) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1));
- runInvalidQuery(fromjson("{a:{$gt: 1}, $text:{$search: 'blah'}}"));
-
- runInvalidQuery(fromjson("{$text: {$search: 'blah'}}"));
-
- runInvalidQuery(fromjson("{$or: [{a:1}, {$text: {$search: 'blah'}}]}"));
- }
-
- // There can be more than one prefix, but they all require points.
- TEST_F(QueryPlannerTest, ManyPrefixTextIndex) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "b" << 1 << "_fts" << "text" << "_ftsx" << 1));
-
- // Both points.
- runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
- assertSolutionExists("{text: {prefix: {a:1, b:1}, search: 'blah'}}");
- assertNumSolutions(1);
-
- // Missing a.
- runInvalidQuery(fromjson("{b:1, $text:{$search: 'blah'}}"));
-
- // Missing b.
- runInvalidQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
-
- // a is not a point
- runInvalidQuery(fromjson("{a:{$gt: 1}, b:1, $text:{$search: 'blah'}}"));
-
- // b is not a point
- runInvalidQuery(fromjson("{a:1, b:{$gt: 1}, $text:{$search: 'blah'}}"));
- }
-
- // And, suffixes. They're optional and don't need to be points.
- TEST_F(QueryPlannerTest, SuffixOptional) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1 << "b" << 1));
-
- runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
- assertNumSolutions(1);
- assertSolutionExists("{text: {prefix: {a:1}, search: 'blah'}}}}");
-
- runQuery(fromjson("{a:1, b:{$gt: 7}, $text:{$search: 'blah'}}"));
- assertSolutionExists("{text: {prefix: {a:1}, filter: {b: {$gt: 7}}, search: 'blah'}}}}");
- assertNumSolutions(1);
- }
-
- TEST_F(QueryPlannerTest, RemoveFromSubtree) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1 << "b" << 1));
-
- runQuery(fromjson("{a:1, $or: [{a:1}, {b:7}], $text:{$search: 'blah'}}"));
- assertNumSolutions(1);
-
- assertSolutionExists("{fetch: {filter: {$or:[{a:1},{b:7}]},"
- "node: {text: {prefix: {a:1}, search: 'blah'}}}}");
- }
-
- // Text is quite often multikey. None of the prefixes can be arrays, and suffixes are indexed
- // as-is, so we should compound even if it's multikey.
- TEST_F(QueryPlannerTest, CompoundPrefixEvenIfMultikey) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "b" << 1 << "_fts" << "text" << "_ftsx" << 1), true);
-
- // Both points.
- runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
- assertNumSolutions(1);
- assertSolutionExists("{text: {prefix: {a:1, b:1}, search: 'blah'}}");
- }
-
- TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafPrefix) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1 << "b" << 1));
-
- // 'a' is not an EQ so it doesn't compound w/the text pred. We also shouldn't use the text
- // index to satisfy it w/o the text query.
- runInvalidQuery(fromjson("{a:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
- }
-
- TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafSuffixNoPrefix) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1 << "b" << 1));
-
- runQuery(fromjson("{b:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
- assertNumSolutions(1);
- }
-
- TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndex) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$and: [{a: 3}, {$text: {$search: 'foo'}}], a: 3}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{text: {prefix: {a:3}, search: 'foo'}}");
- }
-
- // SERVER-15639: Test that predicates on index prefix fields which are not assigned to the index
- // prefix are correctly included in the solution node filter.
- TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndexAndMultiplePredsOnIndexPrefix) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1 << "_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$and: [{a: 1}, {a: 2}, {$text: {$search: 'foo'}}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{text: {prefix: {a: 1}, search: 'foo', filter: {a: 2}}}");
- }
-
- // SERVER-13039: Test that we don't generate invalid solutions when the TEXT node
- // is buried beneath a logical node.
- TEST_F(QueryPlannerTest, TextInsideOrBasic) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{a: 0, $or: [{_id: 1}, {$text: {$search: 'foo'}}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:0}, node: {or: {nodes: ["
- "{text: {search: 'foo'}}, "
- "{ixscan: {filter: null, pattern: {_id: 1}}}]}}}}");
- }
-
- // SERVER-13039
- TEST_F(QueryPlannerTest, TextInsideOrWithAnotherOr) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$and: [{$or: [{a: 3}, {a: 4}]}, "
- "{$or: [{$text: {$search: 'foo'}}, {a: 5}]}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {$or: [{a: 3}, {a: 4}]}, node: "
- "{or: {nodes: ["
- "{text: {search: 'foo'}}, "
- "{ixscan: {filter: null, pattern: {a: 1}}}]}}}}");
- }
-
- // SERVER-13039
- TEST_F(QueryPlannerTest, TextInsideOrOfAnd) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
- "{a: {$gt: 3}, $text: {$search: 'foo'}}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}, bounds: "
- "{a: [[2,Infinity,false,true]]}}}, "
- "{fetch: {filter: {a:{$gt:3}}, node: "
- "{text: {search: 'foo'}}}}]}}}}");
- }
-
- // SERVER-13039
- TEST_F(QueryPlannerTest, TextInsideAndOrAnd) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("b" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{a: 1, $or: [{a:2}, {b:2}, "
- "{a: 1, $text: {$search: 'foo'}}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:1}, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}}, "
- "{fetch: {filter: {a:1}, node: {text: {search: 'foo'}}}}, "
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
- }
-
- // SERVER-13039
- TEST_F(QueryPlannerTest, TextInsideAndOrAndOr) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
- "{a: {$gt: 3}, $or: [{$text: {$search: 'foo'}}, "
- "{a: 6}]}], "
- "a: 5}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {a:5}, node: {or: {nodes: ["
- "{ixscan: {filter: null, pattern: {a: 1}}}, "
- "{fetch: {filter: {a:{$gt:3}}, node: {or: {nodes: ["
- "{text: {search: 'foo'}}, "
- "{ixscan: {filter: null, pattern: {a: 1}}}]}}}}]}}}}");
- }
-
- // If only one branch of the $or can be indexed, then no indexed
- // solutions are generated, even if one branch is $text.
- TEST_F(QueryPlannerTest, TextInsideOrOneBranchNotIndexed) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{a: 1, $or: [{b: 2}, {$text: {$search: 'foo'}}]}"));
-
- assertNumSolutions(0);
- }
-
- // If the unindexable $or is not the one containing the $text predicate,
- // then we should still be able to generate an indexed solution.
- TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
- params.options = QueryPlannerParams::NO_TABLE_SCAN;
- addIndex(BSON("a" << 1));
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$and: [{$or: [{a: 1}, {b: 1}]}, "
- "{$or: [{a: 2}, {$text: {$search: 'foo'}}]}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: {$or:[{a:1},{b:1}]}, node: {or: {nodes: ["
- "{text: {search: 'foo'}}, "
- "{ixscan: {filter: null, pattern: {a:1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$text: {$search: 'foo'}, a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}}"));
-
- // Mandatory text index is used, and geo predicate becomes a filter.
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {node: {text: {search: 'foo'}}}}");
- }
-
- // SERVER-13960: $text beneath $or with exact predicates.
- TEST_F(QueryPlannerTest, OrTextExact) {
- addIndex(BSON("pre" << 1 << "_fts" << "text" << "_ftsx" << 1));
- addIndex(BSON("other" << 1));
- runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: 2}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{text: {search: 'dave', prefix: {pre: 3}}},"
- "{ixscan: {filter: null, pattern: {other: 1}}}]}}}}");
- }
-
- // SERVER-13960: $text beneath $or with an inexact covered predicate.
- TEST_F(QueryPlannerTest, OrTextInexactCovered) {
- addIndex(BSON("pre" << 1 << "_fts" << "text" << "_ftsx" << 1));
- addIndex(BSON("other" << 1));
- runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: /bar/}]}"));
-
- assertNumSolutions(1U);
- assertSolutionExists("{fetch: {filter: null, node: {or: {nodes: ["
- "{text: {search: 'dave', prefix: {pre: 3}}},"
- "{ixscan: {filter: {$or: [{other: /bar/}]}, "
- "pattern: {other: 1}}}]}}}}");
- }
-
- TEST_F(QueryPlannerTest, TextCaseSensitive) {
- addIndex(BSON("_fts" << "text" << "_ftsx" << 1));
- runQuery(fromjson("{$text: {$search: 'blah', $caseSensitive: true}}"));
-
- assertNumSolutions(1);
- assertSolutionExists("{text: {search: 'blah', caseSensitive: true}}");
- }
+using namespace mongo;
+
+//
+// Text
+// Creating an FTS index {a:1, b:"text", c:1} actually
+// creates an index with spec {a:1, _fts: "text", _ftsx: 1, c:1}.
+// So, the latter is what we pass in to the planner.
+//
+// PS. You can also do {a:1, b:"text", d:"text", c:1} and it will create an index with the same
+// key pattern.
+//
+
+// Basic test that it works.
+TEST_F(QueryPlannerTest, SimpleText) {
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{$text: {$search: 'blah'}}"));
+
+ assertNumSolutions(1);
+ assertSolutionExists("{text: {search: 'blah'}}");
+}
+
+// If you create an index {a:1, b: "text"} you can't use it for queries on just 'a'.
+TEST_F(QueryPlannerTest, CantUseTextUnlessHaveTextPred) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{a:1}"));
+
+ // No table scans allowed so there is no solution.
+ assertNumSolutions(0);
+}
+
+// But if you create an index {a:1, b:"text"} you can use it if it has a pred on 'a'
+// and a text query.
+TEST_F(QueryPlannerTest, HaveOKPrefixOnTextIndex) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+
+ runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
+ assertNumSolutions(1);
+ assertSolutionExists("{text: {prefix: {a:1}, search: 'blah'}}}}");
+
+ // TODO: Do we want to $or a collection scan with a text search?
+ // runQuery(fromjson("{$or: [{b:1}, {a:1, $text: {$search: 'blah'}}]}"));
+ // assertNumSolutions(1);
+
+ runQuery(fromjson("{$or: [{_id:1}, {a:1, $text: {$search: 'blah'}}]}"));
+ assertNumSolutions(1);
+}
+
+// But the prefixes must be points.
+TEST_F(QueryPlannerTest, HaveBadPrefixOnTextIndex) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runInvalidQuery(fromjson("{a:{$gt: 1}, $text:{$search: 'blah'}}"));
+
+ runInvalidQuery(fromjson("{$text: {$search: 'blah'}}"));
+
+ runInvalidQuery(fromjson("{$or: [{a:1}, {$text: {$search: 'blah'}}]}"));
+}
+
+// There can be more than one prefix, but they all require points.
+TEST_F(QueryPlannerTest, ManyPrefixTextIndex) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+
+ // Both points.
+ runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
+ assertSolutionExists("{text: {prefix: {a:1, b:1}, search: 'blah'}}");
+ assertNumSolutions(1);
+
+ // Missing a.
+ runInvalidQuery(fromjson("{b:1, $text:{$search: 'blah'}}"));
+
+ // Missing b.
+ runInvalidQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
+
+ // a is not a point
+ runInvalidQuery(fromjson("{a:{$gt: 1}, b:1, $text:{$search: 'blah'}}"));
+
+ // b is not a point
+ runInvalidQuery(fromjson("{a:1, b:{$gt: 1}, $text:{$search: 'blah'}}"));
+}
+
+// And, suffixes. They're optional and don't need to be points.
+TEST_F(QueryPlannerTest, SuffixOptional) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1 << "b" << 1));
+
+ runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
+ assertNumSolutions(1);
+ assertSolutionExists("{text: {prefix: {a:1}, search: 'blah'}}}}");
+
+ runQuery(fromjson("{a:1, b:{$gt: 7}, $text:{$search: 'blah'}}"));
+ assertSolutionExists("{text: {prefix: {a:1}, filter: {b: {$gt: 7}}, search: 'blah'}}}}");
+ assertNumSolutions(1);
+}
+
+TEST_F(QueryPlannerTest, RemoveFromSubtree) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1 << "b" << 1));
+
+ runQuery(fromjson("{a:1, $or: [{a:1}, {b:7}], $text:{$search: 'blah'}}"));
+ assertNumSolutions(1);
+
+ assertSolutionExists(
+ "{fetch: {filter: {$or:[{a:1},{b:7}]},"
+ "node: {text: {prefix: {a:1}, search: 'blah'}}}}");
+}
+
+// Text is quite often multikey. None of the prefixes can be arrays, and suffixes are indexed
+// as-is, so we should compound even if it's multikey.
+TEST_F(QueryPlannerTest, CompoundPrefixEvenIfMultikey) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1),
+ true);
+
+ // Both points.
+ runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
+ assertNumSolutions(1);
+ assertSolutionExists("{text: {prefix: {a:1, b:1}, search: 'blah'}}");
+}
+
+TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafPrefix) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1 << "b" << 1));
+
+ // 'a' is not an EQ so it doesn't compound w/the text pred. We also shouldn't use the text
+ // index to satisfy it w/o the text query.
+ runInvalidQuery(fromjson("{a:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
+}
+
+TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafSuffixNoPrefix) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1 << "b" << 1));
+
+ runQuery(fromjson("{b:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
+ assertNumSolutions(1);
+}
+
+TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndex) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{$and: [{a: 3}, {$text: {$search: 'foo'}}], a: 3}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{text: {prefix: {a:3}, search: 'foo'}}");
+}
+
+// SERVER-15639: Test that predicates on index prefix fields which are not assigned to the index
+// prefix are correctly included in the solution node filter.
+TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndexAndMultiplePredsOnIndexPrefix) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{$and: [{a: 1}, {a: 2}, {$text: {$search: 'foo'}}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists("{text: {prefix: {a: 1}, search: 'foo', filter: {a: 2}}}");
+}
+
+// SERVER-13039: Test that we don't generate invalid solutions when the TEXT node
+// is buried beneath a logical node.
+TEST_F(QueryPlannerTest, TextInsideOrBasic) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{a: 0, $or: [{_id: 1}, {$text: {$search: 'foo'}}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:0}, node: {or: {nodes: ["
+ "{text: {search: 'foo'}}, "
+ "{ixscan: {filter: null, pattern: {_id: 1}}}]}}}}");
+}
+
+// SERVER-13039
+TEST_F(QueryPlannerTest, TextInsideOrWithAnotherOr) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson(
+ "{$and: [{$or: [{a: 3}, {a: 4}]}, "
+ "{$or: [{$text: {$search: 'foo'}}, {a: 5}]}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {$or: [{a: 3}, {a: 4}]}, node: "
+ "{or: {nodes: ["
+ "{text: {search: 'foo'}}, "
+ "{ixscan: {filter: null, pattern: {a: 1}}}]}}}}");
+}
+
+// SERVER-13039
+TEST_F(QueryPlannerTest, TextInsideOrOfAnd) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson(
+ "{$or: [{a: {$gt: 1, $gt: 2}}, "
+ "{a: {$gt: 3}, $text: {$search: 'foo'}}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}, bounds: "
+ "{a: [[2,Infinity,false,true]]}}}, "
+ "{fetch: {filter: {a:{$gt:3}}, node: "
+ "{text: {search: 'foo'}}}}]}}}}");
+}
+
+// SERVER-13039
+TEST_F(QueryPlannerTest, TextInsideAndOrAnd) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("b" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson(
+ "{a: 1, $or: [{a:2}, {b:2}, "
+ "{a: 1, $text: {$search: 'foo'}}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:1}, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}}, "
+ "{fetch: {filter: {a:1}, node: {text: {search: 'foo'}}}}, "
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
+}
+
+// SERVER-13039
+TEST_F(QueryPlannerTest, TextInsideAndOrAndOr) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson(
+ "{$or: [{a: {$gt: 1, $gt: 2}}, "
+ "{a: {$gt: 3}, $or: [{$text: {$search: 'foo'}}, "
+ "{a: 6}]}], "
+ "a: 5}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {a:5}, node: {or: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a: 1}}}, "
+ "{fetch: {filter: {a:{$gt:3}}, node: {or: {nodes: ["
+ "{text: {search: 'foo'}}, "
+ "{ixscan: {filter: null, pattern: {a: 1}}}]}}}}]}}}}");
+}
+
+// If only one branch of the $or can be indexed, then no indexed
+// solutions are generated, even if one branch is $text.
+TEST_F(QueryPlannerTest, TextInsideOrOneBranchNotIndexed) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{a: 1, $or: [{b: 2}, {$text: {$search: 'foo'}}]}"));
+
+ assertNumSolutions(0);
+}
+
+// If the unindexable $or is not the one containing the $text predicate,
+// then we should still be able to generate an indexed solution.
+TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
+ params.options = QueryPlannerParams::NO_TABLE_SCAN;
+ addIndex(BSON("a" << 1));
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson(
+ "{$and: [{$or: [{a: 1}, {b: 1}]}, "
+ "{$or: [{a: 2}, {$text: {$search: 'foo'}}]}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: {$or:[{a:1},{b:1}]}, node: {or: {nodes: ["
+ "{text: {search: 'foo'}}, "
+ "{ixscan: {filter: null, pattern: {a:1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson(
+ "{$text: {$search: 'foo'}, a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}}"));
+
+ // Mandatory text index is used, and geo predicate becomes a filter.
+ assertNumSolutions(1U);
+ assertSolutionExists("{fetch: {node: {text: {search: 'foo'}}}}");
+}
+
+// SERVER-13960: $text beneath $or with exact predicates.
+TEST_F(QueryPlannerTest, OrTextExact) {
+ addIndex(BSON("pre" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ addIndex(BSON("other" << 1));
+ runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: 2}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{text: {search: 'dave', prefix: {pre: 3}}},"
+ "{ixscan: {filter: null, pattern: {other: 1}}}]}}}}");
+}
+
+// SERVER-13960: $text beneath $or with an inexact covered predicate.
+TEST_F(QueryPlannerTest, OrTextInexactCovered) {
+ addIndex(BSON("pre" << 1 << "_fts"
+ << "text"
+ << "_ftsx" << 1));
+ addIndex(BSON("other" << 1));
+ runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: /bar/}]}"));
+
+ assertNumSolutions(1U);
+ assertSolutionExists(
+ "{fetch: {filter: null, node: {or: {nodes: ["
+ "{text: {search: 'dave', prefix: {pre: 3}}},"
+ "{ixscan: {filter: {$or: [{other: /bar/}]}, "
+ "pattern: {other: 1}}}]}}}}");
+}
+
+TEST_F(QueryPlannerTest, TextCaseSensitive) {
+ addIndex(BSON("_fts"
+ << "text"
+ << "_ftsx" << 1));
+ runQuery(fromjson("{$text: {$search: 'blah', $caseSensitive: true}}"));
+
+ assertNumSolutions(1);
+ assertSolutionExists("{text: {search: 'blah', caseSensitive: true}}");
+}
} // namespace
diff --git a/src/mongo/db/query/query_settings.cpp b/src/mongo/db/query/query_settings.cpp
index c6b2f34fcb8..487f34ebcf9 100644
--- a/src/mongo/db/query/query_settings.cpp
+++ b/src/mongo/db/query/query_settings.cpp
@@ -32,132 +32,137 @@
namespace mongo {
- using std::vector;
-
- //
- // HintOverride
- //
-
- AllowedIndices::AllowedIndices(const std::vector<BSONObj>& indexKeyPatterns) {
- for (std::vector<BSONObj>::const_iterator i = indexKeyPatterns.begin();
- i != indexKeyPatterns.end(); ++i) {
- const BSONObj& indexKeyPattern = *i;
- this->indexKeyPatterns.push_back(indexKeyPattern.getOwned());
- }
+using std::vector;
+
+//
+// HintOverride
+//
+
+AllowedIndices::AllowedIndices(const std::vector<BSONObj>& indexKeyPatterns) {
+ for (std::vector<BSONObj>::const_iterator i = indexKeyPatterns.begin();
+ i != indexKeyPatterns.end();
+ ++i) {
+ const BSONObj& indexKeyPattern = *i;
+ this->indexKeyPatterns.push_back(indexKeyPattern.getOwned());
}
-
- AllowedIndices::~AllowedIndices() { }
-
- //
- // AllowedIndexEntry
- //
-
- AllowedIndexEntry::AllowedIndexEntry(const BSONObj& query, const BSONObj& sort,
- const BSONObj& projection,
- const std::vector<BSONObj>& indexKeyPatterns)
- : query(query.getOwned()),
- sort(sort.getOwned()),
- projection(projection.getOwned()) {
- for (std::vector<BSONObj>::const_iterator i = indexKeyPatterns.begin();
- i != indexKeyPatterns.end(); ++i) {
- const BSONObj& indexKeyPattern = *i;
- this->indexKeyPatterns.push_back(indexKeyPattern.getOwned());
- }
- }
-
- AllowedIndexEntry::~AllowedIndexEntry() { }
-
- AllowedIndexEntry* AllowedIndexEntry::clone() const {
- AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexKeyPatterns);
- return entry;
+}
+
+AllowedIndices::~AllowedIndices() {}
+
+//
+// AllowedIndexEntry
+//
+
+AllowedIndexEntry::AllowedIndexEntry(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& projection,
+ const std::vector<BSONObj>& indexKeyPatterns)
+ : query(query.getOwned()), sort(sort.getOwned()), projection(projection.getOwned()) {
+ for (std::vector<BSONObj>::const_iterator i = indexKeyPatterns.begin();
+ i != indexKeyPatterns.end();
+ ++i) {
+ const BSONObj& indexKeyPattern = *i;
+ this->indexKeyPatterns.push_back(indexKeyPattern.getOwned());
}
+}
- //
- // QuerySettings
- //
-
- QuerySettings::QuerySettings() { }
-
- QuerySettings::~QuerySettings() {
- _clear();
- }
+AllowedIndexEntry::~AllowedIndexEntry() {}
- bool QuerySettings::getAllowedIndices(const PlanCacheKey& key,
- AllowedIndices** allowedIndicesOut) const {
- invariant(allowedIndicesOut);
+AllowedIndexEntry* AllowedIndexEntry::clone() const {
+ AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexKeyPatterns);
+ return entry;
+}
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
- AllowedIndexEntryMap::const_iterator cacheIter = _allowedIndexEntryMap.find(key);
+//
+// QuerySettings
+//
- // Nothing to do if key does not exist in query settings.
- if (cacheIter == _allowedIndexEntryMap.end()) {
- *allowedIndicesOut = NULL;
- return false;
- }
+QuerySettings::QuerySettings() {}
- AllowedIndexEntry* entry = cacheIter->second;
+QuerySettings::~QuerySettings() {
+ _clear();
+}
- // Create a AllowedIndices from entry.
- *allowedIndicesOut = new AllowedIndices(entry->indexKeyPatterns);
+bool QuerySettings::getAllowedIndices(const PlanCacheKey& key,
+ AllowedIndices** allowedIndicesOut) const {
+ invariant(allowedIndicesOut);
- return true;
- }
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ AllowedIndexEntryMap::const_iterator cacheIter = _allowedIndexEntryMap.find(key);
- std::vector<AllowedIndexEntry*> QuerySettings::getAllAllowedIndices() const {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
- vector<AllowedIndexEntry*> entries;
- for (AllowedIndexEntryMap::const_iterator i = _allowedIndexEntryMap.begin(); i != _allowedIndexEntryMap.end(); ++i) {
- AllowedIndexEntry* entry = i->second;
- entries.push_back(entry->clone());
- }
- return entries;
+ // Nothing to do if key does not exist in query settings.
+ if (cacheIter == _allowedIndexEntryMap.end()) {
+ *allowedIndicesOut = NULL;
+ return false;
}
- void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery,
- const PlanCacheKey& key,
- const std::vector<BSONObj>& indexes) {
- const LiteParsedQuery& lpq = canonicalQuery.getParsed();
- const BSONObj& query = lpq.getFilter();
- const BSONObj& sort = lpq.getSort();
- const BSONObj& projection = lpq.getProj();
- AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexes);
-
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
- AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
- // Replace existing entry.
- if (i != _allowedIndexEntryMap.end()) {
- AllowedIndexEntry* entry = i->second;
- delete entry;
- }
- _allowedIndexEntryMap[key] = entry;
- }
+ AllowedIndexEntry* entry = cacheIter->second;
- void QuerySettings::removeAllowedIndices(const PlanCacheKey& key) {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
- AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
+ // Create a AllowedIndices from entry.
+ *allowedIndicesOut = new AllowedIndices(entry->indexKeyPatterns);
- // Nothing to do if key does not exist in query settings.
- if (i == _allowedIndexEntryMap.end()) {
- return;
- }
+ return true;
+}
- // Free up resources and delete entry.
+std::vector<AllowedIndexEntry*> QuerySettings::getAllAllowedIndices() const {
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ vector<AllowedIndexEntry*> entries;
+ for (AllowedIndexEntryMap::const_iterator i = _allowedIndexEntryMap.begin();
+ i != _allowedIndexEntryMap.end();
+ ++i) {
+ AllowedIndexEntry* entry = i->second;
+ entries.push_back(entry->clone());
+ }
+ return entries;
+}
+
+void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery,
+ const PlanCacheKey& key,
+ const std::vector<BSONObj>& indexes) {
+ const LiteParsedQuery& lpq = canonicalQuery.getParsed();
+ const BSONObj& query = lpq.getFilter();
+ const BSONObj& sort = lpq.getSort();
+ const BSONObj& projection = lpq.getProj();
+ AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexes);
+
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
+ // Replace existing entry.
+ if (i != _allowedIndexEntryMap.end()) {
AllowedIndexEntry* entry = i->second;
- _allowedIndexEntryMap.erase(i);
delete entry;
}
+ _allowedIndexEntryMap[key] = entry;
+}
- void QuerySettings::clearAllowedIndices() {
- stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
- _clear();
+void QuerySettings::removeAllowedIndices(const PlanCacheKey& key) {
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ AllowedIndexEntryMap::iterator i = _allowedIndexEntryMap.find(key);
+
+ // Nothing to do if key does not exist in query settings.
+ if (i == _allowedIndexEntryMap.end()) {
+ return;
}
- void QuerySettings::_clear() {
- for (AllowedIndexEntryMap::const_iterator i = _allowedIndexEntryMap.begin(); i != _allowedIndexEntryMap.end(); ++i) {
- AllowedIndexEntry* entry = i->second;
- delete entry;
- }
- _allowedIndexEntryMap.clear();
+ // Free up resources and delete entry.
+ AllowedIndexEntry* entry = i->second;
+ _allowedIndexEntryMap.erase(i);
+ delete entry;
+}
+
+void QuerySettings::clearAllowedIndices() {
+ stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
+ _clear();
+}
+
+void QuerySettings::_clear() {
+ for (AllowedIndexEntryMap::const_iterator i = _allowedIndexEntryMap.begin();
+ i != _allowedIndexEntryMap.end();
+ ++i) {
+ AllowedIndexEntry* entry = i->second;
+ delete entry;
}
+ _allowedIndexEntryMap.clear();
+}
} // namespace mongo
diff --git a/src/mongo/db/query/query_settings.h b/src/mongo/db/query/query_settings.h
index 29449167580..e1125320471 100644
--- a/src/mongo/db/query/query_settings.h
+++ b/src/mongo/db/query/query_settings.h
@@ -41,108 +41,111 @@
namespace mongo {
+/**
+ * Holds allowed indices.
+ */
+class AllowedIndices {
+private:
+ MONGO_DISALLOW_COPYING(AllowedIndices);
+
+public:
+ AllowedIndices(const std::vector<BSONObj>& indexKeyPatterns);
+ ~AllowedIndices();
+
+ // These are the index key patterns that
+ // we will use to override the indexes retrieved from
+ // the index catalog.
+ std::vector<BSONObj> indexKeyPatterns;
+};
+
+/**
+ * Value type for query settings.
+ * Holds:
+ * query shape (query, sort, projection)
+ * vector of index specs
+ */
+class AllowedIndexEntry {
+private:
+ MONGO_DISALLOW_COPYING(AllowedIndexEntry);
+
+public:
+ AllowedIndexEntry(const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& projection,
+ const std::vector<BSONObj>& indexKeyPatterns);
+ ~AllowedIndexEntry();
+ AllowedIndexEntry* clone() const;
+
+ // _query, _sort and _projection collectively
+ // represent the query shape that we are storing hint overrides for.
+ BSONObj query;
+ BSONObj sort;
+ BSONObj projection;
+
+ // These are the index key patterns that
+ // we will use to override the indexes retrieved from
+ // the index catalog.
+ std::vector<BSONObj> indexKeyPatterns;
+};
+
+/**
+ * Holds the index filters in a collection.
+ */
+class QuerySettings {
+private:
+ MONGO_DISALLOW_COPYING(QuerySettings);
+
+public:
+ QuerySettings();
+
+ ~QuerySettings();
+
/**
- * Holds allowed indices.
+ * Returns true and fills out allowedIndicesOut if a hint is set in the query settings
+ * for the query.
+ * Returns false and sets allowedIndicesOut to NULL otherwise.
+ * Caller owns AllowedIndices.
*/
- class AllowedIndices {
- private:
- MONGO_DISALLOW_COPYING(AllowedIndices);
- public:
- AllowedIndices(const std::vector<BSONObj>& indexKeyPatterns);
- ~AllowedIndices();
-
- // These are the index key patterns that
- // we will use to override the indexes retrieved from
- // the index catalog.
- std::vector<BSONObj> indexKeyPatterns;
- };
+ bool getAllowedIndices(const PlanCacheKey& query, AllowedIndices** allowedIndicesOut) const;
/**
- * Value type for query settings.
- * Holds:
- * query shape (query, sort, projection)
- * vector of index specs
+ * Returns copies all overrides for the collection..
+ * Caller owns overrides in vector.
*/
- class AllowedIndexEntry {
- private:
- MONGO_DISALLOW_COPYING(AllowedIndexEntry);
- public:
- AllowedIndexEntry(const BSONObj& query, const BSONObj& sort,
- const BSONObj& projection,
- const std::vector<BSONObj>& indexKeyPatterns);
- ~AllowedIndexEntry();
- AllowedIndexEntry* clone() const;
-
- // _query, _sort and _projection collectively
- // represent the query shape that we are storing hint overrides for.
- BSONObj query;
- BSONObj sort;
- BSONObj projection;
-
- // These are the index key patterns that
- // we will use to override the indexes retrieved from
- // the index catalog.
- std::vector<BSONObj> indexKeyPatterns;
- };
+ std::vector<AllowedIndexEntry*> getAllAllowedIndices() const;
+
+ /**
+ * Adds or replaces entry in query settings.
+ * If existing entry is found for the same key,
+ * frees resources for existing entry before replacing.
+ */
+ void setAllowedIndices(const CanonicalQuery& canonicalQuery,
+ const PlanCacheKey& key,
+ const std::vector<BSONObj>& indexes);
+
+ /**
+ * Removes single entry from query settings. No effect if query shape is not found.
+ */
+ void removeAllowedIndices(const PlanCacheKey& canonicalQuery);
+
+ /**
+ * Clears all allowed indices from query settings.
+ */
+ void clearAllowedIndices();
+
+private:
+ /**
+ * Clears entries without acquiring mutex.
+ */
+ void _clear();
+
+ typedef unordered_map<PlanCacheKey, AllowedIndexEntry*> AllowedIndexEntryMap;
+ AllowedIndexEntryMap _allowedIndexEntryMap;
/**
- * Holds the index filters in a collection.
+ * Protects data in query settings.
*/
- class QuerySettings {
- private:
- MONGO_DISALLOW_COPYING(QuerySettings);
- public:
- QuerySettings();
-
- ~QuerySettings();
-
- /**
- * Returns true and fills out allowedIndicesOut if a hint is set in the query settings
- * for the query.
- * Returns false and sets allowedIndicesOut to NULL otherwise.
- * Caller owns AllowedIndices.
- */
- bool getAllowedIndices(const PlanCacheKey& query,
- AllowedIndices** allowedIndicesOut) const;
-
- /**
- * Returns copies all overrides for the collection..
- * Caller owns overrides in vector.
- */
- std::vector<AllowedIndexEntry*> getAllAllowedIndices() const;
-
- /**
- * Adds or replaces entry in query settings.
- * If existing entry is found for the same key,
- * frees resources for existing entry before replacing.
- */
- void setAllowedIndices(const CanonicalQuery& canonicalQuery,
- const PlanCacheKey& key,
- const std::vector<BSONObj>& indexes);
-
- /**
- * Removes single entry from query settings. No effect if query shape is not found.
- */
- void removeAllowedIndices(const PlanCacheKey& canonicalQuery);
-
- /**
- * Clears all allowed indices from query settings.
- */
- void clearAllowedIndices();
-
- private:
- /**
- * Clears entries without acquiring mutex.
- */
- void _clear();
-
- typedef unordered_map<PlanCacheKey, AllowedIndexEntry*> AllowedIndexEntryMap;
- AllowedIndexEntryMap _allowedIndexEntryMap;
-
- /**
- * Protects data in query settings.
- */
- mutable stdx::mutex _mutex;
- };
+ mutable stdx::mutex _mutex;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/query_solution.cpp b/src/mongo/db/query/query_solution.cpp
index 18e66ccc503..85b0cb7933c 100644
--- a/src/mongo/db/query/query_solution.cpp
+++ b/src/mongo/db/query/query_solution.cpp
@@ -35,840 +35,842 @@
namespace mongo {
- using std::set;
-
- string QuerySolutionNode::toString() const {
- mongoutils::str::stream ss;
- appendToString(&ss, 0);
- return ss;
- }
-
- // static
- void QuerySolutionNode::addIndent(mongoutils::str::stream* ss, int level) {
- for (int i = 0; i < level; ++i) {
- *ss << "---";
- }
- }
-
- void QuerySolutionNode::addCommon(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent + 1);
- *ss << "fetched = " << fetched() << '\n';
- addIndent(ss, indent + 1);
- *ss << "sortedByDiskLoc = " << sortedByDiskLoc() << '\n';
+using std::set;
+
+string QuerySolutionNode::toString() const {
+ mongoutils::str::stream ss;
+ appendToString(&ss, 0);
+ return ss;
+}
+
+// static
+void QuerySolutionNode::addIndent(mongoutils::str::stream* ss, int level) {
+ for (int i = 0; i < level; ++i) {
+ *ss << "---";
+ }
+}
+
+void QuerySolutionNode::addCommon(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent + 1);
+ *ss << "fetched = " << fetched() << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "sortedByDiskLoc = " << sortedByDiskLoc() << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "getSort = [";
+ for (BSONObjSet::const_iterator it = getSort().begin(); it != getSort().end(); it++) {
+ *ss << it->toString() << ", ";
+ }
+ *ss << "]" << '\n';
+}
+
+//
+// TextNode
+//
+
+void TextNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "TEXT\n";
+ addIndent(ss, indent + 1);
+ *ss << "keyPattern = " << indexKeyPattern.toString() << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "query = " << query << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "language = " << language << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "caseSensitive= " << caseSensitive << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "indexPrefix = " << indexPrefix.toString() << '\n';
+ if (NULL != filter) {
addIndent(ss, indent + 1);
- *ss << "getSort = [";
- for (BSONObjSet::const_iterator it = getSort().begin(); it != getSort().end(); it++) {
- *ss << it->toString() << ", ";
- }
- *ss << "]" << '\n';
+ *ss << " filter = " << filter->toString();
}
+ addCommon(ss, indent);
+}
- //
- // TextNode
- //
+QuerySolutionNode* TextNode::clone() const {
+ TextNode* copy = new TextNode();
+ cloneBaseData(copy);
- void TextNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "TEXT\n";
- addIndent(ss, indent + 1);
- *ss << "keyPattern = " << indexKeyPattern.toString() << '\n';
- addIndent(ss, indent + 1);
- *ss << "query = " << query << '\n';
- addIndent(ss, indent + 1);
- *ss << "language = " << language << '\n';
- addIndent(ss, indent + 1);
- *ss << "caseSensitive= " << caseSensitive << '\n';
- addIndent(ss, indent + 1);
- *ss << "indexPrefix = " << indexPrefix.toString() << '\n';
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << " filter = " << filter->toString();
- }
- addCommon(ss, indent);
- }
-
- QuerySolutionNode* TextNode::clone() const {
- TextNode* copy = new TextNode();
- cloneBaseData(copy);
+ copy->_sort = this->_sort;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->query = this->query;
+ copy->language = this->language;
+ copy->caseSensitive = this->caseSensitive;
+ copy->indexPrefix = this->indexPrefix;
- copy->_sort = this->_sort;
- copy->indexKeyPattern = this->indexKeyPattern;
- copy->query = this->query;
- copy->language = this->language;
- copy->caseSensitive = this->caseSensitive;
- copy->indexPrefix = this->indexPrefix;
+ return copy;
+}
- return copy;
- }
-
- //
- // CollectionScanNode
- //
+//
+// CollectionScanNode
+//
- CollectionScanNode::CollectionScanNode() : tailable(false), direction(1), maxScan(0) { }
+CollectionScanNode::CollectionScanNode() : tailable(false), direction(1), maxScan(0) {}
- void CollectionScanNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "COLLSCAN\n";
+void CollectionScanNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "COLLSCAN\n";
+ addIndent(ss, indent + 1);
+ *ss << "ns = " << name << '\n';
+ if (NULL != filter) {
addIndent(ss, indent + 1);
- *ss << "ns = " << name << '\n';
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << "filter = " << filter->toString();
- }
- addCommon(ss, indent);
+ *ss << "filter = " << filter->toString();
}
+ addCommon(ss, indent);
+}
- QuerySolutionNode* CollectionScanNode::clone() const {
- CollectionScanNode* copy = new CollectionScanNode();
- cloneBaseData(copy);
+QuerySolutionNode* CollectionScanNode::clone() const {
+ CollectionScanNode* copy = new CollectionScanNode();
+ cloneBaseData(copy);
- copy->_sort = this->_sort;
- copy->name = this->name;
- copy->tailable = this->tailable;
- copy->direction = this->direction;
- copy->maxScan = this->maxScan;
+ copy->_sort = this->_sort;
+ copy->name = this->name;
+ copy->tailable = this->tailable;
+ copy->direction = this->direction;
+ copy->maxScan = this->maxScan;
- return copy;
- }
+ return copy;
+}
- //
- // AndHashNode
- //
+//
+// AndHashNode
+//
- AndHashNode::AndHashNode() { }
+AndHashNode::AndHashNode() {}
- AndHashNode::~AndHashNode() { }
+AndHashNode::~AndHashNode() {}
- void AndHashNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "AND_HASH\n";
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << " filter = " << filter->toString() << '\n';
- }
- addCommon(ss, indent);
- for (size_t i = 0; i < children.size(); ++i) {
- addIndent(ss, indent + 1);
- *ss << "Child " << i << ":\n";
- children[i]->appendToString(ss, indent + 1);
- }
+void AndHashNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "AND_HASH\n";
+ if (NULL != filter) {
+ addIndent(ss, indent + 1);
+ *ss << " filter = " << filter->toString() << '\n';
+ }
+ addCommon(ss, indent);
+ for (size_t i = 0; i < children.size(); ++i) {
+ addIndent(ss, indent + 1);
+ *ss << "Child " << i << ":\n";
+ children[i]->appendToString(ss, indent + 1);
}
+}
- bool AndHashNode::fetched() const {
- // Any WSM output from this stage came from all children stages. If any child provides
- // fetched data, we merge that fetched data into the WSM we output.
- for (size_t i = 0; i < children.size(); ++i) {
- if (children[i]->fetched()) {
- return true;
- }
+bool AndHashNode::fetched() const {
+ // Any WSM output from this stage came from all children stages. If any child provides
+ // fetched data, we merge that fetched data into the WSM we output.
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (children[i]->fetched()) {
+ return true;
}
- return false;
}
+ return false;
+}
- bool AndHashNode::hasField(const string& field) const {
- // Any WSM output from this stage came from all children stages. Therefore we have all
- // fields covered in our children.
- for (size_t i = 0; i < children.size(); ++i) {
- if (children[i]->hasField(field)) {
- return true;
- }
+bool AndHashNode::hasField(const string& field) const {
+ // Any WSM output from this stage came from all children stages. Therefore we have all
+ // fields covered in our children.
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (children[i]->hasField(field)) {
+ return true;
}
- return false;
}
+ return false;
+}
- QuerySolutionNode* AndHashNode::clone() const {
- AndHashNode* copy = new AndHashNode();
- cloneBaseData(copy);
+QuerySolutionNode* AndHashNode::clone() const {
+ AndHashNode* copy = new AndHashNode();
+ cloneBaseData(copy);
- copy->_sort = this->_sort;
+ copy->_sort = this->_sort;
- return copy;
- }
+ return copy;
+}
- //
- // AndSortedNode
- //
+//
+// AndSortedNode
+//
- AndSortedNode::AndSortedNode() { }
+AndSortedNode::AndSortedNode() {}
- AndSortedNode::~AndSortedNode() { }
+AndSortedNode::~AndSortedNode() {}
- void AndSortedNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "AND_SORTED\n";
- addCommon(ss, indent);
- for (size_t i = 0; i < children.size(); ++i) {
- addIndent(ss, indent + 1);
- *ss << "Child " << i << ":\n";
- children[i]->appendToString(ss, indent + 1);
- }
+void AndSortedNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "AND_SORTED\n";
+ addCommon(ss, indent);
+ for (size_t i = 0; i < children.size(); ++i) {
+ addIndent(ss, indent + 1);
+ *ss << "Child " << i << ":\n";
+ children[i]->appendToString(ss, indent + 1);
}
+}
- bool AndSortedNode::fetched() const {
- // Any WSM output from this stage came from all children stages. If any child provides
- // fetched data, we merge that fetched data into the WSM we output.
- for (size_t i = 0; i < children.size(); ++i) {
- if (children[i]->fetched()) {
- return true;
- }
+bool AndSortedNode::fetched() const {
+ // Any WSM output from this stage came from all children stages. If any child provides
+ // fetched data, we merge that fetched data into the WSM we output.
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (children[i]->fetched()) {
+ return true;
}
- return false;
}
+ return false;
+}
- bool AndSortedNode::hasField(const string& field) const {
- // Any WSM output from this stage came from all children stages. Therefore we have all
- // fields covered in our children.
- for (size_t i = 0; i < children.size(); ++i) {
- if (children[i]->hasField(field)) {
- return true;
- }
+bool AndSortedNode::hasField(const string& field) const {
+ // Any WSM output from this stage came from all children stages. Therefore we have all
+ // fields covered in our children.
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (children[i]->hasField(field)) {
+ return true;
}
- return false;
}
+ return false;
+}
- QuerySolutionNode* AndSortedNode::clone() const {
- AndSortedNode* copy = new AndSortedNode();
- cloneBaseData(copy);
+QuerySolutionNode* AndSortedNode::clone() const {
+ AndSortedNode* copy = new AndSortedNode();
+ cloneBaseData(copy);
- copy->_sort = this->_sort;
+ copy->_sort = this->_sort;
- return copy;
- }
+ return copy;
+}
- //
- // OrNode
- //
-
- OrNode::OrNode() : dedup(true) { }
+//
+// OrNode
+//
- OrNode::~OrNode() { }
+OrNode::OrNode() : dedup(true) {}
- void OrNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "OR\n";
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << " filter = " << filter->toString() << '\n';
- }
- addCommon(ss, indent);
- for (size_t i = 0; i < children.size(); ++i) {
- addIndent(ss, indent + 1);
- *ss << "Child " << i << ":\n";
- children[i]->appendToString(ss, indent + 2);
- *ss << '\n';
- }
- }
+OrNode::~OrNode() {}
- bool OrNode::fetched() const {
- // Any WSM output from this stage came exactly one child stage. Given that we don't know
- // what child stage it came from, we require that all children provide fetched data in order
- // to guarantee that our output is fetched.
- for (size_t i = 0; i < children.size(); ++i) {
- if (!children[i]->fetched()) {
- return false;
- }
- }
- return true;
- }
-
- /**
- * Any WSM output from this stage came from exactly one child stage. Therefore, if
- * we want to guarantee that any output has a certain field, all of our children must
- * have that field.
- */
- bool OrNode::hasField(const string& field) const {
- for (size_t i = 0; i < children.size(); ++i) {
- if (!children[i]->hasField(field)) {
- return false;
- }
- }
- return true;
+void OrNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "OR\n";
+ if (NULL != filter) {
+ addIndent(ss, indent + 1);
+ *ss << " filter = " << filter->toString() << '\n';
}
-
- QuerySolutionNode* OrNode::clone() const {
- OrNode* copy = new OrNode();
- cloneBaseData(copy);
-
- copy->_sort = this->_sort;
- copy->dedup = this->dedup;
-
- return copy;
+ addCommon(ss, indent);
+ for (size_t i = 0; i < children.size(); ++i) {
+ addIndent(ss, indent + 1);
+ *ss << "Child " << i << ":\n";
+ children[i]->appendToString(ss, indent + 2);
+ *ss << '\n';
}
+}
- //
- // MergeSortNode
- //
-
- MergeSortNode::MergeSortNode() : dedup(true) { }
-
- MergeSortNode::~MergeSortNode() { }
-
- void MergeSortNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "MERGE_SORT\n";
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << " filter = " << filter->toString() << '\n';
- }
- addCommon(ss, indent);
- for (size_t i = 0; i < children.size(); ++i) {
- addIndent(ss, indent + 1);
- *ss << "Child " << i << ":\n";
- children[i]->appendToString(ss, indent + 2);
- *ss << '\n';
+bool OrNode::fetched() const {
+ // Any WSM output from this stage came exactly one child stage. Given that we don't know
+ // what child stage it came from, we require that all children provide fetched data in order
+ // to guarantee that our output is fetched.
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (!children[i]->fetched()) {
+ return false;
}
}
+ return true;
+}
- bool MergeSortNode::fetched() const {
- // Any WSM output from this stage came exactly one child stage. Given that we don't know
- // what child stage it came from, we require that all children provide fetched data in order
- // to guarantee that our output is fetched.
- for (size_t i = 0; i < children.size(); ++i) {
- if (!children[i]->fetched()) {
- return false;
- }
- }
- return true;
- }
-
- /**
- * Any WSM output from this stage came from exactly one child stage. Therefore, if
- * we want to guarantee that any output has a certain field, all of our children must
- * have that field.
- */
- bool MergeSortNode::hasField(const string& field) const {
- for (size_t i = 0; i < children.size(); ++i) {
- if (!children[i]->hasField(field)) {
- return false;
- }
+/**
+ * Any WSM output from this stage came from exactly one child stage. Therefore, if
+ * we want to guarantee that any output has a certain field, all of our children must
+ * have that field.
+ */
+bool OrNode::hasField(const string& field) const {
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (!children[i]->hasField(field)) {
+ return false;
}
- return true;
}
+ return true;
+}
- QuerySolutionNode* MergeSortNode::clone() const {
- MergeSortNode* copy = new MergeSortNode();
- cloneBaseData(copy);
+QuerySolutionNode* OrNode::clone() const {
+ OrNode* copy = new OrNode();
+ cloneBaseData(copy);
- copy->_sorts = this->_sorts;
- copy->dedup = this->dedup;
- copy->sort = this->sort;
+ copy->_sort = this->_sort;
+ copy->dedup = this->dedup;
- return copy;
- }
-
- //
- // FetchNode
- //
-
- FetchNode::FetchNode() { }
-
- void FetchNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "FETCH\n";
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- StringBuilder sb;
- *ss << "filter:\n";
- filter->debugString(sb, indent + 2);
- *ss << sb.str();
- }
- addCommon(ss, indent);
- addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
- }
+ return copy;
+}
- QuerySolutionNode* FetchNode::clone() const {
- FetchNode* copy = new FetchNode();
- cloneBaseData(copy);
+//
+// MergeSortNode
+//
- copy->_sorts = this->_sorts;
+MergeSortNode::MergeSortNode() : dedup(true) {}
- return copy;
- }
+MergeSortNode::~MergeSortNode() {}
- //
- // IndexScanNode
- //
-
- IndexScanNode::IndexScanNode()
- : indexIsMultiKey(false), direction(1), maxScan(0), addKeyMetadata(false) { }
-
- void IndexScanNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "IXSCAN\n";
+void MergeSortNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "MERGE_SORT\n";
+ if (NULL != filter) {
addIndent(ss, indent + 1);
- *ss << "keyPattern = " << indexKeyPattern << '\n';
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << "filter = " << filter->toString();
- }
- addIndent(ss, indent + 1);
- *ss << "direction = " << direction << '\n';
- addIndent(ss, indent + 1);
- *ss << "bounds = " << bounds.toString() << '\n';
- addCommon(ss, indent);
+ *ss << " filter = " << filter->toString() << '\n';
}
-
- bool IndexScanNode::hasField(const string& field) const {
- // There is no covering in a multikey index because you don't know whether or not the field
- // in the key was extracted from an array in the original document.
- if (indexIsMultiKey) { return false; }
-
- // Custom index access methods may return non-exact key data - this function is currently
- // used for covering exact key data only.
- if (IndexNames::BTREE != IndexNames::findPluginName(indexKeyPattern)) { return false; }
-
- BSONObjIterator it(indexKeyPattern);
- while (it.more()) {
- if (field == it.next().fieldName()) {
- return true;
- }
- }
- return false;
+ addCommon(ss, indent);
+ for (size_t i = 0; i < children.size(); ++i) {
+ addIndent(ss, indent + 1);
+ *ss << "Child " << i << ":\n";
+ children[i]->appendToString(ss, indent + 2);
+ *ss << '\n';
}
+}
- bool IndexScanNode::sortedByDiskLoc() const {
- // Indices use RecordId as an additional key after the actual index key.
- // Therefore, if we're only examining one index key, the output is sorted
- // by RecordId.
-
- // If it's a simple range query, it's easy to determine if the range is a point.
- if (bounds.isSimpleRange) {
- return 0 == bounds.startKey.woCompare(bounds.endKey, indexKeyPattern);
- }
-
- // If it's a more complex bounds query, we make sure that each field is a point.
- for (size_t i = 0; i < bounds.fields.size(); ++i) {
- const OrderedIntervalList& oil = bounds.fields[i];
- if (1 != oil.intervals.size()) {
- return false;
- }
- const Interval& interval = oil.intervals[0];
- if (0 != interval.start.woCompare(interval.end, false)) {
- return false;
- }
+bool MergeSortNode::fetched() const {
+ // Any WSM output from this stage came exactly one child stage. Given that we don't know
+ // what child stage it came from, we require that all children provide fetched data in order
+ // to guarantee that our output is fetched.
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (!children[i]->fetched()) {
+ return false;
}
-
- return true;
}
+ return true;
+}
- void IndexScanNode::computeProperties() {
- _sorts.clear();
-
- BSONObj sortPattern = QueryPlannerAnalysis::getSortPattern(indexKeyPattern);
- if (direction == -1) {
- sortPattern = QueryPlannerCommon::reverseSortObj(sortPattern);
- }
-
- _sorts.insert(sortPattern);
-
- const int nFields = sortPattern.nFields();
- if (nFields > 1) {
- // We're sorted not only by sortPattern but also by all prefixes of it.
- for (int i = 0; i < nFields; ++i) {
- // Make obj out of fields [0,i]
- BSONObjIterator it(sortPattern);
- BSONObjBuilder prefixBob;
- for (int j = 0; j <= i; ++j) {
- prefixBob.append(it.next());
- }
- _sorts.insert(prefixBob.obj());
- }
- }
-
- // If we are using the index {a:1, b:1} to answer the predicate {a: 10}, it's sorted
- // both by the index key pattern and by the pattern {b: 1}.
-
- // See if there are any fields with equalities for bounds. We can drop these
- // from any sort orders created.
- set<string> equalityFields;
- if (!bounds.isSimpleRange) {
- // Figure out how many fields are point intervals.
- for (size_t i = 0; i < bounds.fields.size(); ++i) {
- const OrderedIntervalList& oil = bounds.fields[i];
- if (oil.intervals.size() != 1) {
- continue;
- }
- const Interval& ival = oil.intervals[0];
- if (!ival.isPoint()) {
- continue;
- }
- equalityFields.insert(oil.name);
- }
- }
-
- if (equalityFields.empty()) {
- return;
- }
-
- // TODO: Each field in equalityFields could be dropped from the sort order since it is
- // a point interval. The full set of sort orders is as follows:
- // For each sort in _sorts:
- // For each drop in powerset(equalityFields):
- // Remove fields in 'drop' from 'sort' and add resulting sort to output.
- //
- // Since this involves a powerset, we don't generate the full set of possibilities.
- // Instead, we generate sort orders by removing possible contiguous prefixes of equality
- // predicates. For example, if the key pattern is {a: 1, b: 1, c: 1, d: 1, e: 1}
- // and and there are equality predicates on 'a', 'b', and 'c', then here we add the sort
- // orders {b: 1, c: 1, d: 1, e: 1} and {c: 1, d: 1, e: 1}. (We also end up adding
- // {d: 1, e: 1} and {d: 1}, but this is done later on.)
- BSONObjIterator it(sortPattern);
- BSONObjBuilder suffixBob;
- while (it.more()) {
- BSONElement elt = it.next();
- // TODO: string slowness. fix when bounds are stringdata not string.
- if (equalityFields.end() == equalityFields.find(string(elt.fieldName()))) {
- suffixBob.append(elt);
- // This field isn't a point interval, can't drop.
- break;
- }
-
- // We add the sort obtained by dropping 'elt' and all preceding elements from the index
- // key pattern.
- BSONObjIterator droppedPrefixIt = it;
- BSONObjBuilder droppedPrefixBob;
- while (droppedPrefixIt.more()) {
- droppedPrefixBob.append(droppedPrefixIt.next());
- }
- _sorts.insert(droppedPrefixBob.obj());
- }
-
- while (it.more()) {
- suffixBob.append(it.next());
- }
-
- // We've found the suffix following the contiguous prefix of equality fields.
- // Ex. For index {a: 1, b: 1, c: 1, d: 1} and query {a: 3, b: 5}, this suffix
- // of the key pattern is {c: 1, d: 1}.
- //
- // Now we have to add all prefixes of this suffix as possible sort orders.
- // Ex. Continuing the example from above, we have to include sort orders
- // {c: 1} and {c: 1, d: 1}.
- BSONObj filterPointsObj = suffixBob.obj();
- for (int i = 0; i < filterPointsObj.nFields(); ++i) {
- // Make obj out of fields [0,i]
- BSONObjIterator it(filterPointsObj);
- BSONObjBuilder prefixBob;
- for (int j = 0; j <= i; ++j) {
- prefixBob.append(it.next());
- }
- _sorts.insert(prefixBob.obj());
+/**
+ * Any WSM output from this stage came from exactly one child stage. Therefore, if
+ * we want to guarantee that any output has a certain field, all of our children must
+ * have that field.
+ */
+bool MergeSortNode::hasField(const string& field) const {
+ for (size_t i = 0; i < children.size(); ++i) {
+ if (!children[i]->hasField(field)) {
+ return false;
}
}
+ return true;
+}
- QuerySolutionNode* IndexScanNode::clone() const {
- IndexScanNode* copy = new IndexScanNode();
- cloneBaseData(copy);
+QuerySolutionNode* MergeSortNode::clone() const {
+ MergeSortNode* copy = new MergeSortNode();
+ cloneBaseData(copy);
- copy->_sorts = this->_sorts;
- copy->indexKeyPattern = this->indexKeyPattern;
- copy->indexIsMultiKey = this->indexIsMultiKey;
- copy->direction = this->direction;
- copy->maxScan = this->maxScan;
- copy->addKeyMetadata = this->addKeyMetadata;
- copy->bounds = this->bounds;
+ copy->_sorts = this->_sorts;
+ copy->dedup = this->dedup;
+ copy->sort = this->sort;
- return copy;
- }
-
- //
- // ProjectionNode
- //
-
- void ProjectionNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "PROJ\n";
- addIndent(ss, indent + 1);
- *ss << "proj = " << projection.toString() << '\n';
- addIndent(ss, indent + 1);
- if (DEFAULT == projType) {
- *ss << "type = DEFAULT\n";
- }
- else if (COVERED_ONE_INDEX == projType) {
- *ss << "type = COVERED_ONE_INDEX\n";
- }
- else {
- invariant(SIMPLE_DOC == projType);
- *ss << "type = SIMPLE_DOC\n";
- }
- addCommon(ss, indent);
- addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
- }
-
- QuerySolutionNode* ProjectionNode::clone() const {
- ProjectionNode* copy = new ProjectionNode();
- cloneBaseData(copy);
+ return copy;
+}
- copy->_sorts = this->_sorts;
- copy->fullExpression = this->fullExpression;
+//
+// FetchNode
+//
- // This MatchExpression* is owned by the canonical query, not by the
- // ProjectionNode. Just copying the pointer is fine.
- copy->projection = this->projection;
+FetchNode::FetchNode() {}
- return copy;
- }
-
- //
- // SortNode
- //
-
- void SortNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "SORT\n";
- addIndent(ss, indent + 1);
- *ss << "pattern = " << pattern.toString() << '\n';
- addIndent(ss, indent + 1);
- *ss << "query for bounds = " << query.toString() << '\n';
- addIndent(ss, indent + 1);
- *ss << "limit = " << limit << '\n';
- addCommon(ss, indent);
+void FetchNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "FETCH\n";
+ if (NULL != filter) {
addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
+ StringBuilder sb;
+ *ss << "filter:\n";
+ filter->debugString(sb, indent + 2);
+ *ss << sb.str();
}
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
- QuerySolutionNode* SortNode::clone() const {
- SortNode* copy = new SortNode();
- cloneBaseData(copy);
-
- copy->_sorts = this->_sorts;
- copy->pattern = this->pattern;
- copy->query = this->query;
- copy->limit = this->limit;
-
- return copy;
- }
-
- //
- // LimitNode
- //
-
+QuerySolutionNode* FetchNode::clone() const {
+ FetchNode* copy = new FetchNode();
+ cloneBaseData(copy);
- void LimitNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "LIMIT\n";
- addIndent(ss, indent + 1);
- *ss << "limit = " << limit << '\n';
- addIndent(ss, indent + 1);
- addCommon(ss, indent);
- addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
- }
+ copy->_sorts = this->_sorts;
- QuerySolutionNode* LimitNode::clone() const {
- LimitNode* copy = new LimitNode();
- cloneBaseData(copy);
+ return copy;
+}
- copy->limit = this->limit;
+//
+// IndexScanNode
+//
- return copy;
- }
-
- //
- // SkipNode
- //
+IndexScanNode::IndexScanNode()
+ : indexIsMultiKey(false), direction(1), maxScan(0), addKeyMetadata(false) {}
- void SkipNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "SKIP\n";
- addIndent(ss, indent + 1);
- *ss << "skip= " << skip << '\n';
- addCommon(ss, indent);
+void IndexScanNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "IXSCAN\n";
+ addIndent(ss, indent + 1);
+ *ss << "keyPattern = " << indexKeyPattern << '\n';
+ if (NULL != filter) {
addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
+ *ss << "filter = " << filter->toString();
}
+ addIndent(ss, indent + 1);
+ *ss << "direction = " << direction << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "bounds = " << bounds.toString() << '\n';
+ addCommon(ss, indent);
+}
- QuerySolutionNode* SkipNode::clone() const {
- SkipNode* copy = new SkipNode();
- cloneBaseData(copy);
-
- copy->skip = this->skip;
-
- return copy;
- }
-
- //
- // GeoNear2DNode
- //
-
- void GeoNear2DNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "GEO_NEAR_2D\n";
- addIndent(ss, indent + 1);
- *ss << "keyPattern = " << indexKeyPattern.toString() << '\n';
- addCommon(ss, indent);
- *ss << "nearQuery = " << nq->toString() << '\n';
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << " filter = " << filter->toString();
- }
+bool IndexScanNode::hasField(const string& field) const {
+ // There is no covering in a multikey index because you don't know whether or not the field
+ // in the key was extracted from an array in the original document.
+ if (indexIsMultiKey) {
+ return false;
}
- QuerySolutionNode* GeoNear2DNode::clone() const {
- GeoNear2DNode* copy = new GeoNear2DNode();
- cloneBaseData(copy);
-
- copy->_sorts = this->_sorts;
- copy->nq = this->nq;
- copy->baseBounds = this->baseBounds;
- copy->indexKeyPattern = this->indexKeyPattern;
- copy->addPointMeta = this->addPointMeta;
- copy->addDistMeta = this->addDistMeta;
-
- return copy;
+ // Custom index access methods may return non-exact key data - this function is currently
+ // used for covering exact key data only.
+ if (IndexNames::BTREE != IndexNames::findPluginName(indexKeyPattern)) {
+ return false;
}
- //
- // GeoNear2DSphereNode
- //
-
- void GeoNear2DSphereNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "GEO_NEAR_2DSPHERE\n";
- addIndent(ss, indent + 1);
- *ss << "keyPattern = " << indexKeyPattern.toString() << '\n';
- addCommon(ss, indent);
- *ss << "baseBounds = " << baseBounds.toString() << '\n';
- addIndent(ss, indent + 1);
- *ss << "nearQuery = " << nq->toString() << '\n';
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- *ss << " filter = " << filter->toString();
+ BSONObjIterator it(indexKeyPattern);
+ while (it.more()) {
+ if (field == it.next().fieldName()) {
+ return true;
}
}
+ return false;
+}
- QuerySolutionNode* GeoNear2DSphereNode::clone() const {
- GeoNear2DSphereNode* copy = new GeoNear2DSphereNode();
- cloneBaseData(copy);
-
- copy->_sorts = this->_sorts;
- copy->nq = this->nq;
- copy->baseBounds = this->baseBounds;
- copy->indexKeyPattern = this->indexKeyPattern;
- copy->addPointMeta = this->addPointMeta;
- copy->addDistMeta = this->addDistMeta;
+bool IndexScanNode::sortedByDiskLoc() const {
+ // Indices use RecordId as an additional key after the actual index key.
+ // Therefore, if we're only examining one index key, the output is sorted
+ // by RecordId.
- return copy;
+ // If it's a simple range query, it's easy to determine if the range is a point.
+ if (bounds.isSimpleRange) {
+ return 0 == bounds.startKey.woCompare(bounds.endKey, indexKeyPattern);
}
- //
- // ShardingFilterNode
- //
-
- void ShardingFilterNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "SHARDING_FILTER\n";
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- StringBuilder sb;
- *ss << "filter:\n";
- filter->debugString(sb, indent + 2);
- *ss << sb.str();
+ // If it's a more complex bounds query, we make sure that each field is a point.
+ for (size_t i = 0; i < bounds.fields.size(); ++i) {
+ const OrderedIntervalList& oil = bounds.fields[i];
+ if (1 != oil.intervals.size()) {
+ return false;
}
- addCommon(ss, indent);
- addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
- }
-
- QuerySolutionNode* ShardingFilterNode::clone() const {
- ShardingFilterNode* copy = new ShardingFilterNode();
- cloneBaseData(copy);
- return copy;
- }
-
- //
- // KeepMutationsNode
- //
-
- void KeepMutationsNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "KEEP_MUTATIONS\n";
- if (NULL != filter) {
- addIndent(ss, indent + 1);
- StringBuilder sb;
- *ss << "filter:\n";
- filter->debugString(sb, indent + 2);
- *ss << sb.str();
+ const Interval& interval = oil.intervals[0];
+ if (0 != interval.start.woCompare(interval.end, false)) {
+ return false;
}
- addCommon(ss, indent);
- addIndent(ss, indent + 1);
- *ss << "Child:" << '\n';
- children[0]->appendToString(ss, indent + 2);
- }
-
- QuerySolutionNode* KeepMutationsNode::clone() const {
- KeepMutationsNode* copy = new KeepMutationsNode();
- cloneBaseData(copy);
-
- copy->sorts = this->sorts;
-
- return copy;
- }
-
- //
- // DistinctNode
- //
-
- void DistinctNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "DISTINCT\n";
- addIndent(ss, indent + 1);
- *ss << "keyPattern = " << indexKeyPattern << '\n';
- addIndent(ss, indent + 1);
- *ss << "direction = " << direction << '\n';
- addIndent(ss, indent + 1);
- *ss << "bounds = " << bounds.toString() << '\n';
}
- QuerySolutionNode* DistinctNode::clone() const {
- DistinctNode* copy = new DistinctNode();
- cloneBaseData(copy);
+ return true;
+}
- copy->sorts = this->sorts;
- copy->indexKeyPattern = this->indexKeyPattern;
- copy->direction = this->direction;
- copy->bounds = this->bounds;
- copy->fieldNo = this->fieldNo;
+void IndexScanNode::computeProperties() {
+ _sorts.clear();
- return copy;
+ BSONObj sortPattern = QueryPlannerAnalysis::getSortPattern(indexKeyPattern);
+ if (direction == -1) {
+ sortPattern = QueryPlannerCommon::reverseSortObj(sortPattern);
}
- //
- // CountNode
- //
+ _sorts.insert(sortPattern);
- void CountNode::appendToString(mongoutils::str::stream* ss, int indent) const {
- addIndent(ss, indent);
- *ss << "COUNT\n";
- addIndent(ss, indent + 1);
- *ss << "keyPattern = " << indexKeyPattern << '\n';
- addIndent(ss, indent + 1);
- *ss << "startKey = " << startKey << '\n';
- addIndent(ss, indent + 1);
- *ss << "endKey = " << endKey << '\n';
+ const int nFields = sortPattern.nFields();
+ if (nFields > 1) {
+ // We're sorted not only by sortPattern but also by all prefixes of it.
+ for (int i = 0; i < nFields; ++i) {
+ // Make obj out of fields [0,i]
+ BSONObjIterator it(sortPattern);
+ BSONObjBuilder prefixBob;
+ for (int j = 0; j <= i; ++j) {
+ prefixBob.append(it.next());
+ }
+ _sorts.insert(prefixBob.obj());
+ }
}
- QuerySolutionNode* CountNode::clone() const {
- CountNode* copy = new CountNode();
- cloneBaseData(copy);
-
- copy->sorts = this->sorts;
- copy->indexKeyPattern = this->indexKeyPattern;
- copy->startKey = this->startKey;
- copy->startKeyInclusive = this->startKeyInclusive;
- copy->endKey = this->endKey;
- copy->endKeyInclusive = this->endKeyInclusive;
+ // If we are using the index {a:1, b:1} to answer the predicate {a: 10}, it's sorted
+ // both by the index key pattern and by the pattern {b: 1}.
- return copy;
- }
+ // See if there are any fields with equalities for bounds. We can drop these
+ // from any sort orders created.
+ set<string> equalityFields;
+ if (!bounds.isSimpleRange) {
+ // Figure out how many fields are point intervals.
+ for (size_t i = 0; i < bounds.fields.size(); ++i) {
+ const OrderedIntervalList& oil = bounds.fields[i];
+ if (oil.intervals.size() != 1) {
+ continue;
+ }
+ const Interval& ival = oil.intervals[0];
+ if (!ival.isPoint()) {
+ continue;
+ }
+ equalityFields.insert(oil.name);
+ }
+ }
+
+ if (equalityFields.empty()) {
+ return;
+ }
+
+ // TODO: Each field in equalityFields could be dropped from the sort order since it is
+ // a point interval. The full set of sort orders is as follows:
+ // For each sort in _sorts:
+ // For each drop in powerset(equalityFields):
+ // Remove fields in 'drop' from 'sort' and add resulting sort to output.
+ //
+ // Since this involves a powerset, we don't generate the full set of possibilities.
+ // Instead, we generate sort orders by removing possible contiguous prefixes of equality
+ // predicates. For example, if the key pattern is {a: 1, b: 1, c: 1, d: 1, e: 1}
+ // and and there are equality predicates on 'a', 'b', and 'c', then here we add the sort
+ // orders {b: 1, c: 1, d: 1, e: 1} and {c: 1, d: 1, e: 1}. (We also end up adding
+ // {d: 1, e: 1} and {d: 1}, but this is done later on.)
+ BSONObjIterator it(sortPattern);
+ BSONObjBuilder suffixBob;
+ while (it.more()) {
+ BSONElement elt = it.next();
+ // TODO: string slowness. fix when bounds are stringdata not string.
+ if (equalityFields.end() == equalityFields.find(string(elt.fieldName()))) {
+ suffixBob.append(elt);
+ // This field isn't a point interval, can't drop.
+ break;
+ }
+
+ // We add the sort obtained by dropping 'elt' and all preceding elements from the index
+ // key pattern.
+ BSONObjIterator droppedPrefixIt = it;
+ BSONObjBuilder droppedPrefixBob;
+ while (droppedPrefixIt.more()) {
+ droppedPrefixBob.append(droppedPrefixIt.next());
+ }
+ _sorts.insert(droppedPrefixBob.obj());
+ }
+
+ while (it.more()) {
+ suffixBob.append(it.next());
+ }
+
+ // We've found the suffix following the contiguous prefix of equality fields.
+ // Ex. For index {a: 1, b: 1, c: 1, d: 1} and query {a: 3, b: 5}, this suffix
+ // of the key pattern is {c: 1, d: 1}.
+ //
+ // Now we have to add all prefixes of this suffix as possible sort orders.
+ // Ex. Continuing the example from above, we have to include sort orders
+ // {c: 1} and {c: 1, d: 1}.
+ BSONObj filterPointsObj = suffixBob.obj();
+ for (int i = 0; i < filterPointsObj.nFields(); ++i) {
+ // Make obj out of fields [0,i]
+ BSONObjIterator it(filterPointsObj);
+ BSONObjBuilder prefixBob;
+ for (int j = 0; j <= i; ++j) {
+ prefixBob.append(it.next());
+ }
+ _sorts.insert(prefixBob.obj());
+ }
+}
+
+QuerySolutionNode* IndexScanNode::clone() const {
+ IndexScanNode* copy = new IndexScanNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->indexIsMultiKey = this->indexIsMultiKey;
+ copy->direction = this->direction;
+ copy->maxScan = this->maxScan;
+ copy->addKeyMetadata = this->addKeyMetadata;
+ copy->bounds = this->bounds;
+
+ return copy;
+}
+
+//
+// ProjectionNode
+//
+
+void ProjectionNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "PROJ\n";
+ addIndent(ss, indent + 1);
+ *ss << "proj = " << projection.toString() << '\n';
+ addIndent(ss, indent + 1);
+ if (DEFAULT == projType) {
+ *ss << "type = DEFAULT\n";
+ } else if (COVERED_ONE_INDEX == projType) {
+ *ss << "type = COVERED_ONE_INDEX\n";
+ } else {
+ invariant(SIMPLE_DOC == projType);
+ *ss << "type = SIMPLE_DOC\n";
+ }
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
+
+QuerySolutionNode* ProjectionNode::clone() const {
+ ProjectionNode* copy = new ProjectionNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->fullExpression = this->fullExpression;
+
+ // This MatchExpression* is owned by the canonical query, not by the
+ // ProjectionNode. Just copying the pointer is fine.
+ copy->projection = this->projection;
+
+ return copy;
+}
+
+//
+// SortNode
+//
+
+void SortNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "SORT\n";
+ addIndent(ss, indent + 1);
+ *ss << "pattern = " << pattern.toString() << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "query for bounds = " << query.toString() << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "limit = " << limit << '\n';
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
+
+QuerySolutionNode* SortNode::clone() const {
+ SortNode* copy = new SortNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->pattern = this->pattern;
+ copy->query = this->query;
+ copy->limit = this->limit;
+
+ return copy;
+}
+
+//
+// LimitNode
+//
+
+
+void LimitNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "LIMIT\n";
+ addIndent(ss, indent + 1);
+ *ss << "limit = " << limit << '\n';
+ addIndent(ss, indent + 1);
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
+
+QuerySolutionNode* LimitNode::clone() const {
+ LimitNode* copy = new LimitNode();
+ cloneBaseData(copy);
+
+ copy->limit = this->limit;
+
+ return copy;
+}
+
+//
+// SkipNode
+//
+
+void SkipNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "SKIP\n";
+ addIndent(ss, indent + 1);
+ *ss << "skip= " << skip << '\n';
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
+
+QuerySolutionNode* SkipNode::clone() const {
+ SkipNode* copy = new SkipNode();
+ cloneBaseData(copy);
+
+ copy->skip = this->skip;
+
+ return copy;
+}
+
+//
+// GeoNear2DNode
+//
+
+void GeoNear2DNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "GEO_NEAR_2D\n";
+ addIndent(ss, indent + 1);
+ *ss << "keyPattern = " << indexKeyPattern.toString() << '\n';
+ addCommon(ss, indent);
+ *ss << "nearQuery = " << nq->toString() << '\n';
+ if (NULL != filter) {
+ addIndent(ss, indent + 1);
+ *ss << " filter = " << filter->toString();
+ }
+}
+
+QuerySolutionNode* GeoNear2DNode::clone() const {
+ GeoNear2DNode* copy = new GeoNear2DNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->nq = this->nq;
+ copy->baseBounds = this->baseBounds;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->addPointMeta = this->addPointMeta;
+ copy->addDistMeta = this->addDistMeta;
+
+ return copy;
+}
+
+//
+// GeoNear2DSphereNode
+//
+
+void GeoNear2DSphereNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "GEO_NEAR_2DSPHERE\n";
+ addIndent(ss, indent + 1);
+ *ss << "keyPattern = " << indexKeyPattern.toString() << '\n';
+ addCommon(ss, indent);
+ *ss << "baseBounds = " << baseBounds.toString() << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "nearQuery = " << nq->toString() << '\n';
+ if (NULL != filter) {
+ addIndent(ss, indent + 1);
+ *ss << " filter = " << filter->toString();
+ }
+}
+
+QuerySolutionNode* GeoNear2DSphereNode::clone() const {
+ GeoNear2DSphereNode* copy = new GeoNear2DSphereNode();
+ cloneBaseData(copy);
+
+ copy->_sorts = this->_sorts;
+ copy->nq = this->nq;
+ copy->baseBounds = this->baseBounds;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->addPointMeta = this->addPointMeta;
+ copy->addDistMeta = this->addDistMeta;
+
+ return copy;
+}
+
+//
+// ShardingFilterNode
+//
+
+void ShardingFilterNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "SHARDING_FILTER\n";
+ if (NULL != filter) {
+ addIndent(ss, indent + 1);
+ StringBuilder sb;
+ *ss << "filter:\n";
+ filter->debugString(sb, indent + 2);
+ *ss << sb.str();
+ }
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
+
+QuerySolutionNode* ShardingFilterNode::clone() const {
+ ShardingFilterNode* copy = new ShardingFilterNode();
+ cloneBaseData(copy);
+ return copy;
+}
+
+//
+// KeepMutationsNode
+//
+
+void KeepMutationsNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "KEEP_MUTATIONS\n";
+ if (NULL != filter) {
+ addIndent(ss, indent + 1);
+ StringBuilder sb;
+ *ss << "filter:\n";
+ filter->debugString(sb, indent + 2);
+ *ss << sb.str();
+ }
+ addCommon(ss, indent);
+ addIndent(ss, indent + 1);
+ *ss << "Child:" << '\n';
+ children[0]->appendToString(ss, indent + 2);
+}
+
+QuerySolutionNode* KeepMutationsNode::clone() const {
+ KeepMutationsNode* copy = new KeepMutationsNode();
+ cloneBaseData(copy);
+
+ copy->sorts = this->sorts;
+
+ return copy;
+}
+
+//
+// DistinctNode
+//
+
+void DistinctNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "DISTINCT\n";
+ addIndent(ss, indent + 1);
+ *ss << "keyPattern = " << indexKeyPattern << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "direction = " << direction << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "bounds = " << bounds.toString() << '\n';
+}
+
+QuerySolutionNode* DistinctNode::clone() const {
+ DistinctNode* copy = new DistinctNode();
+ cloneBaseData(copy);
+
+ copy->sorts = this->sorts;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->direction = this->direction;
+ copy->bounds = this->bounds;
+ copy->fieldNo = this->fieldNo;
+
+ return copy;
+}
+
+//
+// CountNode
+//
+
+void CountNode::appendToString(mongoutils::str::stream* ss, int indent) const {
+ addIndent(ss, indent);
+ *ss << "COUNT\n";
+ addIndent(ss, indent + 1);
+ *ss << "keyPattern = " << indexKeyPattern << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "startKey = " << startKey << '\n';
+ addIndent(ss, indent + 1);
+ *ss << "endKey = " << endKey << '\n';
+}
+
+QuerySolutionNode* CountNode::clone() const {
+ CountNode* copy = new CountNode();
+ cloneBaseData(copy);
+
+ copy->sorts = this->sorts;
+ copy->indexKeyPattern = this->indexKeyPattern;
+ copy->startKey = this->startKey;
+ copy->startKeyInclusive = this->startKeyInclusive;
+ copy->endKey = this->endKey;
+ copy->endKeyInclusive = this->endKeyInclusive;
+
+ return copy;
+}
} // namespace mongo
diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h
index 45a4b24ecea..cd8cbbbd25a 100644
--- a/src/mongo/db/query/query_solution.h
+++ b/src/mongo/db/query/query_solution.h
@@ -38,694 +38,847 @@
namespace mongo {
- using mongo::fts::FTSQuery;
+using mongo::fts::FTSQuery;
- class GeoNearExpression;
+class GeoNearExpression;
- /**
- * This is an abstract representation of a query plan. It can be transcribed into a tree of
- * PlanStages, which can then be handed to a PlanRunner for execution.
- */
- struct QuerySolutionNode {
- QuerySolutionNode() { }
- virtual ~QuerySolutionNode() {
- for (size_t i = 0; i < children.size(); ++i) {
- delete children[i];
- }
+/**
+ * This is an abstract representation of a query plan. It can be transcribed into a tree of
+ * PlanStages, which can then be handed to a PlanRunner for execution.
+ */
+struct QuerySolutionNode {
+ QuerySolutionNode() {}
+ virtual ~QuerySolutionNode() {
+ for (size_t i = 0; i < children.size(); ++i) {
+ delete children[i];
}
+ }
- /**
- * Return a std::string representation of this node and any children.
- */
- std::string toString() const;
-
- /**
- * What stage should this be transcribed to? See stage_types.h.
- */
- virtual StageType getType() const = 0;
+ /**
+ * Return a std::string representation of this node and any children.
+ */
+ std::string toString() const;
- /**
- * Internal function called by toString()
- *
- * TODO: Consider outputting into a BSONObj or builder thereof.
- */
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const = 0;
+ /**
+ * What stage should this be transcribed to? See stage_types.h.
+ */
+ virtual StageType getType() const = 0;
- //
- // Computed properties
- //
+ /**
+ * Internal function called by toString()
+ *
+ * TODO: Consider outputting into a BSONObj or builder thereof.
+ */
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const = 0;
- /**
- * Must be called before any properties are examined.
- */
- virtual void computeProperties() {
- for (size_t i = 0; i < children.size(); ++i) {
- children[i]->computeProperties();
- }
- }
+ //
+ // Computed properties
+ //
- /**
- * If true, one of these are true:
- * 1. All outputs are already fetched, or
- * 2. There is a projection in place and a fetch is not required.
- *
- * If false, a fetch needs to be placed above the root in order to provide results.
- *
- * Usage: To determine if every possible result that might reach the root
- * will be fully-fetched or not. We don't want any surplus fetches.
- */
- virtual bool fetched() const = 0;
-
- /**
- * Returns true if the tree rooted at this node provides data with the field name 'field'.
- * This data can come from any of the types of the WSM.
- *
- * Usage: If an index-only plan has all the fields we're interested in, we don't
- * have to fetch to show results with those fields.
- *
- * TODO: 'field' is probably more appropriate as a FieldRef or string.
- */
- virtual bool hasField(const std::string& field) const = 0;
-
- /**
- * Returns true if the tree rooted at this node provides data that is sorted by the
- * its location on disk.
- *
- * Usage: If all the children of an STAGE_AND_HASH have this property, we can compute the
- * AND faster by replacing the STAGE_AND_HASH with STAGE_AND_SORTED.
- */
- virtual bool sortedByDiskLoc() const = 0;
-
- /**
- * Return a BSONObjSet representing the possible sort orders of the data stream from this
- * node. If the data is not sorted in any particular fashion, returns an empty set.
- *
- * Usage:
- * 1. If our plan gives us a sort order, we don't have to add a sort stage.
- * 2. If all the children of an OR have the same sort order, we can maintain that
- * sort order with a STAGE_SORT_MERGE instead of STAGE_OR.
- */
- virtual const BSONObjSet& getSort() const = 0;
-
- /**
- * Make a deep copy.
- */
- virtual QuerySolutionNode* clone() const = 0;
-
- /**
- * Copy base query solution data from 'this' to 'other'.
- */
- void cloneBaseData(QuerySolutionNode* other) const {
- for (size_t i = 0; i < this->children.size(); i++) {
- other->children.push_back(this->children[i]->clone());
- }
- if (NULL != this->filter) {
- other->filter.reset(this->filter->shallowClone());
- }
+ /**
+ * Must be called before any properties are examined.
+ */
+ virtual void computeProperties() {
+ for (size_t i = 0; i < children.size(); ++i) {
+ children[i]->computeProperties();
}
+ }
- // These are owned here.
- std::vector<QuerySolutionNode*> children;
-
- // If a stage has a non-NULL filter all values outputted from that stage must pass that
- // filter.
- std::unique_ptr<MatchExpression> filter;
-
- protected:
- /**
- * Formatting helper used by toString().
- */
- static void addIndent(mongoutils::str::stream* ss, int level);
-
- /**
- * Every solution node has properties and this adds the debug info for the
- * properties.
- */
- void addCommon(mongoutils::str::stream* ss, int indent) const;
+ /**
+ * If true, one of these are true:
+ * 1. All outputs are already fetched, or
+ * 2. There is a projection in place and a fetch is not required.
+ *
+ * If false, a fetch needs to be placed above the root in order to provide results.
+ *
+ * Usage: To determine if every possible result that might reach the root
+ * will be fully-fetched or not. We don't want any surplus fetches.
+ */
+ virtual bool fetched() const = 0;
- private:
- MONGO_DISALLOW_COPYING(QuerySolutionNode);
- };
+ /**
+ * Returns true if the tree rooted at this node provides data with the field name 'field'.
+ * This data can come from any of the types of the WSM.
+ *
+ * Usage: If an index-only plan has all the fields we're interested in, we don't
+ * have to fetch to show results with those fields.
+ *
+ * TODO: 'field' is probably more appropriate as a FieldRef or string.
+ */
+ virtual bool hasField(const std::string& field) const = 0;
/**
- * A QuerySolution must be entirely self-contained and own everything inside of it.
+ * Returns true if the tree rooted at this node provides data that is sorted by the
+ * its location on disk.
*
- * A tree of stages may be built from a QuerySolution. The QuerySolution must outlive the tree
- * of stages.
+ * Usage: If all the children of an STAGE_AND_HASH have this property, we can compute the
+ * AND faster by replacing the STAGE_AND_HASH with STAGE_AND_SORTED.
*/
- struct QuerySolution {
- QuerySolution() : hasBlockingStage(false), indexFilterApplied(false) { }
+ virtual bool sortedByDiskLoc() const = 0;
- // Owned here.
- std::unique_ptr<QuerySolutionNode> root;
+ /**
+ * Return a BSONObjSet representing the possible sort orders of the data stream from this
+ * node. If the data is not sorted in any particular fashion, returns an empty set.
+ *
+ * Usage:
+ * 1. If our plan gives us a sort order, we don't have to add a sort stage.
+ * 2. If all the children of an OR have the same sort order, we can maintain that
+ * sort order with a STAGE_SORT_MERGE instead of STAGE_OR.
+ */
+ virtual const BSONObjSet& getSort() const = 0;
- // Any filters in root or below point into this object. Must be owned.
- BSONObj filterData;
+ /**
+ * Make a deep copy.
+ */
+ virtual QuerySolutionNode* clone() const = 0;
- // There are two known scenarios in which a query solution might potentially block:
- //
- // Sort stage:
- // If the solution has a sort stage, the sort wasn't provided by an index, so we might want
- // to scan an index to provide that sort in a non-blocking fashion.
- //
- // Hashed AND stage:
- // The hashed AND stage buffers data from multiple index scans and could block. In that case,
- // we would want to fall back on an alternate non-blocking solution.
- bool hasBlockingStage;
-
- // Runner executing this solution might be interested in knowing
- // if the planning process for this solution was based on filtered indices.
- bool indexFilterApplied;
-
- // Owned here. Used by the plan cache.
- std::unique_ptr<SolutionCacheData> cacheData;
-
- /**
- * Output a human-readable std::string representing the plan.
- */
- std::string toString() {
- if (NULL == root) {
- return "empty query solution";
- }
-
- mongoutils::str::stream ss;
- root->appendToString(&ss, 0);
- return ss;
+ /**
+ * Copy base query solution data from 'this' to 'other'.
+ */
+ void cloneBaseData(QuerySolutionNode* other) const {
+ for (size_t i = 0; i < this->children.size(); i++) {
+ other->children.push_back(this->children[i]->clone());
}
- private:
- MONGO_DISALLOW_COPYING(QuerySolution);
- };
-
- struct TextNode : public QuerySolutionNode {
- TextNode() { }
- virtual ~TextNode() { }
-
- virtual StageType getType() const { return STAGE_TEXT; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- // Text's return is LOC_AND_UNOWNED_OBJ or LOC_AND_OWNED_OBJ so it's fetched and has all
- // fields.
- bool fetched() const { return true; }
- bool hasField(const std::string& field) const { return true; }
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return _sort; }
-
- QuerySolutionNode* clone() const;
-
- BSONObjSet _sort;
-
- BSONObj indexKeyPattern;
- std::string query;
- std::string language;
- bool caseSensitive;
-
- // "Prefix" fields of a text index can handle equality predicates. We group them with the
- // text node while creating the text leaf node and convert them into a BSONObj index prefix
- // when we finish the text leaf node.
- BSONObj indexPrefix;
- };
-
- struct CollectionScanNode : public QuerySolutionNode {
- CollectionScanNode();
- virtual ~CollectionScanNode() { }
-
- virtual StageType getType() const { return STAGE_COLLSCAN; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const { return true; }
- bool hasField(const std::string& field) const { return true; }
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return _sort; }
-
- QuerySolutionNode* clone() const;
-
- BSONObjSet _sort;
-
- // Name of the namespace.
- std::string name;
-
- // Should we make a tailable cursor?
- bool tailable;
-
- int direction;
-
- // maxScan option to .find() limits how many docs we look at.
- int maxScan;
- };
-
- struct AndHashNode : public QuerySolutionNode {
- AndHashNode();
- virtual ~AndHashNode();
-
- virtual StageType getType() const { return STAGE_AND_HASH; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const;
- bool hasField(const std::string& field) const;
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return children.back()->getSort(); }
-
- QuerySolutionNode* clone() const;
-
- BSONObjSet _sort;
- };
-
- struct AndSortedNode : public QuerySolutionNode {
- AndSortedNode();
- virtual ~AndSortedNode();
-
- virtual StageType getType() const { return STAGE_AND_SORTED; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const;
- bool hasField(const std::string& field) const;
- bool sortedByDiskLoc() const { return true; }
- const BSONObjSet& getSort() const { return _sort; }
-
- QuerySolutionNode* clone() const;
-
- BSONObjSet _sort;
- };
-
- struct OrNode : public QuerySolutionNode {
- OrNode();
- virtual ~OrNode();
-
- virtual StageType getType() const { return STAGE_OR; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const;
- bool hasField(const std::string& field) const;
- bool sortedByDiskLoc() const {
- // Even if our children are sorted by their diskloc or other fields, we don't maintain
- // any order on the output.
- return false;
+ if (NULL != this->filter) {
+ other->filter.reset(this->filter->shallowClone());
}
- const BSONObjSet& getSort() const { return _sort; }
-
- QuerySolutionNode* clone() const;
+ }
- BSONObjSet _sort;
-
- bool dedup;
- };
+ // These are owned here.
+ std::vector<QuerySolutionNode*> children;
- struct MergeSortNode : public QuerySolutionNode {
- MergeSortNode();
- virtual ~MergeSortNode();
+ // If a stage has a non-NULL filter all values outputted from that stage must pass that
+ // filter.
+ std::unique_ptr<MatchExpression> filter;
- virtual StageType getType() const { return STAGE_SORT_MERGE; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const;
- bool hasField(const std::string& field) const;
- bool sortedByDiskLoc() const { return false; }
-
- const BSONObjSet& getSort() const { return _sorts; }
-
- QuerySolutionNode* clone() const;
-
- virtual void computeProperties() {
- for (size_t i = 0; i < children.size(); ++i) {
- children[i]->computeProperties();
- }
- _sorts.clear();
- _sorts.insert(sort);
- }
-
- BSONObjSet _sorts;
-
- BSONObj sort;
- bool dedup;
- };
-
- struct FetchNode : public QuerySolutionNode {
- FetchNode();
- virtual ~FetchNode() { }
-
- virtual StageType getType() const { return STAGE_FETCH; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const { return true; }
- bool hasField(const std::string& field) const { return true; }
- bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc(); }
- const BSONObjSet& getSort() const { return children[0]->getSort(); }
-
- QuerySolutionNode* clone() const;
-
- BSONObjSet _sorts;
- };
-
- struct IndexScanNode : public QuerySolutionNode {
- IndexScanNode();
- virtual ~IndexScanNode() { }
-
- virtual void computeProperties();
-
- virtual StageType getType() const { return STAGE_IXSCAN; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const { return false; }
- bool hasField(const std::string& field) const;
- bool sortedByDiskLoc() const;
- const BSONObjSet& getSort() const { return _sorts; }
-
- QuerySolutionNode* clone() const;
+protected:
+ /**
+ * Formatting helper used by toString().
+ */
+ static void addIndent(mongoutils::str::stream* ss, int level);
- BSONObjSet _sorts;
+ /**
+ * Every solution node has properties and this adds the debug info for the
+ * properties.
+ */
+ void addCommon(mongoutils::str::stream* ss, int indent) const;
- BSONObj indexKeyPattern;
- bool indexIsMultiKey;
+private:
+ MONGO_DISALLOW_COPYING(QuerySolutionNode);
+};
- int direction;
+/**
+ * A QuerySolution must be entirely self-contained and own everything inside of it.
+ *
+ * A tree of stages may be built from a QuerySolution. The QuerySolution must outlive the tree
+ * of stages.
+ */
+struct QuerySolution {
+ QuerySolution() : hasBlockingStage(false), indexFilterApplied(false) {}
- // maxScan option to .find() limits how many docs we look at.
- int maxScan;
+ // Owned here.
+ std::unique_ptr<QuerySolutionNode> root;
- // If there's a 'returnKey' projection we add key metadata.
- bool addKeyMetadata;
+ // Any filters in root or below point into this object. Must be owned.
+ BSONObj filterData;
- // BIG NOTE:
- // If you use simple bounds, we'll use whatever index access method the keypattern implies.
- // If you use the complex bounds, we force Btree access.
- // The complex bounds require Btree access.
- IndexBounds bounds;
- };
+ // There are two known scenarios in which a query solution might potentially block:
+ //
+ // Sort stage:
+ // If the solution has a sort stage, the sort wasn't provided by an index, so we might want
+ // to scan an index to provide that sort in a non-blocking fashion.
+ //
+ // Hashed AND stage:
+ // The hashed AND stage buffers data from multiple index scans and could block. In that case,
+ // we would want to fall back on an alternate non-blocking solution.
+ bool hasBlockingStage;
- struct ProjectionNode : public QuerySolutionNode {
- /**
- * We have a few implementations of the projection functionality. The most general
- * implementation 'DEFAULT' is much slower than the fast-path implementations
- * below. We only really have all the information available to choose a projection
- * implementation at planning time.
- */
- enum ProjectionType {
- // This is the most general implementation of the projection functionality. It handles
- // every case.
- DEFAULT,
-
- // This is a fast-path for when the projection is fully covered by one index.
- COVERED_ONE_INDEX,
-
- // This is a fast-path for when the projection only has inclusions on non-dotted fields.
- SIMPLE_DOC,
- };
-
- ProjectionNode() : fullExpression(NULL), projType(DEFAULT) { }
-
- virtual ~ProjectionNode() { }
-
- virtual StageType getType() const { return STAGE_PROJECTION; }
-
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- /**
- * Data from the projection node is considered fetch iff the child provides fetched data.
- */
- bool fetched() const { return children[0]->fetched(); }
-
- bool hasField(const std::string& field) const {
- // TODO: Returning false isn't always the right answer -- we may either be including
- // certain fields, or we may be dropping fields (in which case hasField returns true).
- //
- // Given that projection sits on top of everything else in .find() it doesn't matter
- // what we do here.
- return false;
- }
+ // Runner executing this solution might be interested in knowing
+ // if the planning process for this solution was based on filtered indices.
+ bool indexFilterApplied;
- bool sortedByDiskLoc() const {
- // Projections destroy the RecordId. By returning true here, this kind of implies that a
- // fetch could still be done upstream.
- //
- // Perhaps this should be false to not imply that there *is* a RecordId? Kind of a
- // corner case.
- return children[0]->sortedByDiskLoc();
- }
+ // Owned here. Used by the plan cache.
+ std::unique_ptr<SolutionCacheData> cacheData;
- const BSONObjSet& getSort() const {
- // TODO: If we're applying a projection that maintains sort order, the prefix of the
- // sort order we project is the sort order.
- return _sorts;
+ /**
+ * Output a human-readable std::string representing the plan.
+ */
+ std::string toString() {
+ if (NULL == root) {
+ return "empty query solution";
}
- QuerySolutionNode* clone() const;
+ mongoutils::str::stream ss;
+ root->appendToString(&ss, 0);
+ return ss;
+ }
+
+private:
+ MONGO_DISALLOW_COPYING(QuerySolution);
+};
+
+struct TextNode : public QuerySolutionNode {
+ TextNode() {}
+ virtual ~TextNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_TEXT;
+ }
+
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ // Text's return is LOC_AND_UNOWNED_OBJ or LOC_AND_OWNED_OBJ so it's fetched and has all
+ // fields.
+ bool fetched() const {
+ return true;
+ }
+ bool hasField(const std::string& field) const {
+ return true;
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return _sort;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet _sort;
+
+ BSONObj indexKeyPattern;
+ std::string query;
+ std::string language;
+ bool caseSensitive;
+
+ // "Prefix" fields of a text index can handle equality predicates. We group them with the
+ // text node while creating the text leaf node and convert them into a BSONObj index prefix
+ // when we finish the text leaf node.
+ BSONObj indexPrefix;
+};
+
+struct CollectionScanNode : public QuerySolutionNode {
+ CollectionScanNode();
+ virtual ~CollectionScanNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_COLLSCAN;
+ }
+
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return true;
+ }
+ bool hasField(const std::string& field) const {
+ return true;
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return _sort;
+ }
+
+ QuerySolutionNode* clone() const;
- BSONObjSet _sorts;
+ BSONObjSet _sort;
+
+ // Name of the namespace.
+ std::string name;
+
+ // Should we make a tailable cursor?
+ bool tailable;
+
+ int direction;
+
+ // maxScan option to .find() limits how many docs we look at.
+ int maxScan;
+};
+
+struct AndHashNode : public QuerySolutionNode {
+ AndHashNode();
+ virtual ~AndHashNode();
+
+ virtual StageType getType() const {
+ return STAGE_AND_HASH;
+ }
+
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const;
+ bool hasField(const std::string& field) const;
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return children.back()->getSort();
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet _sort;
+};
+
+struct AndSortedNode : public QuerySolutionNode {
+ AndSortedNode();
+ virtual ~AndSortedNode();
+
+ virtual StageType getType() const {
+ return STAGE_AND_SORTED;
+ }
+
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const;
+ bool hasField(const std::string& field) const;
+ bool sortedByDiskLoc() const {
+ return true;
+ }
+ const BSONObjSet& getSort() const {
+ return _sort;
+ }
- // The full query tree. Needed when we have positional operators.
- // Owned in the CanonicalQuery, not here.
- MatchExpression* fullExpression;
+ QuerySolutionNode* clone() const;
- // Given that we don't yet have a MatchExpression analogue for the expression language, we
- // use a BSONObj.
- BSONObj projection;
+ BSONObjSet _sort;
+};
- // What implementation of the projection algorithm should we use?
- ProjectionType projType;
+struct OrNode : public QuerySolutionNode {
+ OrNode();
+ virtual ~OrNode();
- // Only meaningful if projType == COVERED_ONE_INDEX. This is the key pattern of the index
- // supplying our covered data. We can pre-compute which fields to include and cache that
- // data for later if we know we only have one index.
- BSONObj coveredKeyObj;
- };
-
- struct SortNode : public QuerySolutionNode {
- SortNode() : limit(0) { }
- virtual ~SortNode() { }
+ virtual StageType getType() const {
+ return STAGE_OR;
+ }
- virtual StageType getType() const { return STAGE_SORT; }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+ bool fetched() const;
+ bool hasField(const std::string& field) const;
+ bool sortedByDiskLoc() const {
+ // Even if our children are sorted by their diskloc or other fields, we don't maintain
+ // any order on the output.
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return _sort;
+ }
- bool fetched() const { return children[0]->fetched(); }
- bool hasField(const std::string& field) const { return children[0]->hasField(field); }
- bool sortedByDiskLoc() const { return false; }
+ QuerySolutionNode* clone() const;
- const BSONObjSet& getSort() const { return _sorts; }
+ BSONObjSet _sort;
- QuerySolutionNode* clone() const;
+ bool dedup;
+};
- virtual void computeProperties() {
- for (size_t i = 0; i < children.size(); ++i) {
- children[i]->computeProperties();
- }
- _sorts.clear();
- _sorts.insert(pattern);
- }
+struct MergeSortNode : public QuerySolutionNode {
+ MergeSortNode();
+ virtual ~MergeSortNode();
- BSONObjSet _sorts;
+ virtual StageType getType() const {
+ return STAGE_SORT_MERGE;
+ }
- BSONObj pattern;
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
- BSONObj query;
-
- // Sum of both limit and skip count in the parsed query.
- size_t limit;
- };
+ bool fetched() const;
+ bool hasField(const std::string& field) const;
+ bool sortedByDiskLoc() const {
+ return false;
+ }
- struct LimitNode : public QuerySolutionNode {
- LimitNode() { }
- virtual ~LimitNode() { }
+ const BSONObjSet& getSort() const {
+ return _sorts;
+ }
- virtual StageType getType() const { return STAGE_LIMIT; }
+ QuerySolutionNode* clone() const;
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const { return children[0]->fetched(); }
- bool hasField(const std::string& field) const { return children[0]->hasField(field); }
- bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc(); }
- const BSONObjSet& getSort() const { return children[0]->getSort(); }
-
- QuerySolutionNode* clone() const;
-
- int limit;
- };
-
- struct SkipNode : public QuerySolutionNode {
- SkipNode() { }
- virtual ~SkipNode() { }
-
- virtual StageType getType() const { return STAGE_SKIP; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const { return children[0]->fetched(); }
- bool hasField(const std::string& field) const { return children[0]->hasField(field); }
- bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc(); }
- const BSONObjSet& getSort() const { return children[0]->getSort(); }
-
- QuerySolutionNode* clone() const;
+ virtual void computeProperties() {
+ for (size_t i = 0; i < children.size(); ++i) {
+ children[i]->computeProperties();
+ }
+ _sorts.clear();
+ _sorts.insert(sort);
+ }
- int skip;
- };
+ BSONObjSet _sorts;
- // This is a standalone stage.
- struct GeoNear2DNode : public QuerySolutionNode {
- GeoNear2DNode() : addPointMeta(false), addDistMeta(false) { }
- virtual ~GeoNear2DNode() { }
+ BSONObj sort;
+ bool dedup;
+};
- virtual StageType getType() const { return STAGE_GEO_NEAR_2D; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+struct FetchNode : public QuerySolutionNode {
+ FetchNode();
+ virtual ~FetchNode() {}
- bool fetched() const { return true; }
- bool hasField(const std::string& field) const { return true; }
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return _sorts; }
+ virtual StageType getType() const {
+ return STAGE_FETCH;
+ }
- QuerySolutionNode* clone() const;
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
- BSONObjSet _sorts;
+ bool fetched() const {
+ return true;
+ }
+ bool hasField(const std::string& field) const {
+ return true;
+ }
+ bool sortedByDiskLoc() const {
+ return children[0]->sortedByDiskLoc();
+ }
+ const BSONObjSet& getSort() const {
+ return children[0]->getSort();
+ }
- // Not owned here
- const GeoNearExpression* nq;
- IndexBounds baseBounds;
+ QuerySolutionNode* clone() const;
- BSONObj indexKeyPattern;
- bool addPointMeta;
- bool addDistMeta;
- };
+ BSONObjSet _sorts;
+};
- // This is actually its own standalone stage.
- struct GeoNear2DSphereNode : public QuerySolutionNode {
- GeoNear2DSphereNode() : addPointMeta(false), addDistMeta(false) { }
- virtual ~GeoNear2DSphereNode() { }
+struct IndexScanNode : public QuerySolutionNode {
+ IndexScanNode();
+ virtual ~IndexScanNode() {}
- virtual StageType getType() const { return STAGE_GEO_NEAR_2DSPHERE; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+ virtual void computeProperties();
- bool fetched() const { return true; }
- bool hasField(const std::string& field) const { return true; }
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return _sorts; }
+ virtual StageType getType() const {
+ return STAGE_IXSCAN;
+ }
- QuerySolutionNode* clone() const;
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
- BSONObjSet _sorts;
+ bool fetched() const {
+ return false;
+ }
+ bool hasField(const std::string& field) const;
+ bool sortedByDiskLoc() const;
+ const BSONObjSet& getSort() const {
+ return _sorts;
+ }
- // Not owned here
- const GeoNearExpression* nq;
- IndexBounds baseBounds;
+ QuerySolutionNode* clone() const;
- BSONObj indexKeyPattern;
- bool addPointMeta;
- bool addDistMeta;
- };
+ BSONObjSet _sorts;
- //
- // Internal nodes used to provide functionality
- //
+ BSONObj indexKeyPattern;
+ bool indexIsMultiKey;
- /**
- * If we're answering a query on a sharded cluster, docs must be checked against the shard key
- * to ensure that we don't return data that shouldn't be there. This must be done prior to
- * projection, and in fact should be done as early as possible to avoid propagating stale data
- * through the pipeline.
- */
- struct ShardingFilterNode : public QuerySolutionNode {
- ShardingFilterNode() { }
- virtual ~ShardingFilterNode() { }
+ int direction;
- virtual StageType getType() const { return STAGE_SHARDING_FILTER; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+ // maxScan option to .find() limits how many docs we look at.
+ int maxScan;
- bool fetched() const { return children[0]->fetched(); }
- bool hasField(const std::string& field) const { return children[0]->hasField(field); }
- bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc(); }
- const BSONObjSet& getSort() const { return children[0]->getSort(); }
+ // If there's a 'returnKey' projection we add key metadata.
+ bool addKeyMetadata;
- QuerySolutionNode* clone() const;
- };
+ // BIG NOTE:
+ // If you use simple bounds, we'll use whatever index access method the keypattern implies.
+ // If you use the complex bounds, we force Btree access.
+ // The complex bounds require Btree access.
+ IndexBounds bounds;
+};
+struct ProjectionNode : public QuerySolutionNode {
/**
- * If documents mutate or are deleted during a query, we can (in some cases) fetch them
- * and still return them. This stage merges documents that have been mutated or deleted
- * into the query result stream.
+ * We have a few implementations of the projection functionality. The most general
+ * implementation 'DEFAULT' is much slower than the fast-path implementations
+ * below. We only really have all the information available to choose a projection
+ * implementation at planning time.
*/
- struct KeepMutationsNode : public QuerySolutionNode {
- KeepMutationsNode() { }
- virtual ~KeepMutationsNode() { }
-
- virtual StageType getType() const { return STAGE_KEEP_MUTATIONS; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- // Any flagged results are OWNED_OBJ and therefore we're covered if our child is.
- bool fetched() const { return children[0]->fetched(); }
-
- // Any flagged results are OWNED_OBJ and as such they'll have any field we need.
- bool hasField(const std::string& field) const { return children[0]->hasField(field); }
+ enum ProjectionType {
+ // This is the most general implementation of the projection functionality. It handles
+ // every case.
+ DEFAULT,
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return sorts; }
+ // This is a fast-path for when the projection is fully covered by one index.
+ COVERED_ONE_INDEX,
- QuerySolutionNode* clone() const;
-
- // Since we merge in flagged results we have no sort order.
- BSONObjSet sorts;
+ // This is a fast-path for when the projection only has inclusions on non-dotted fields.
+ SIMPLE_DOC,
};
- /**
- * Distinct queries only want one value for a given field. We run an index scan but
- * *always* skip over the current key to the next key.
- */
- struct DistinctNode : public QuerySolutionNode {
- DistinctNode() { }
- virtual ~DistinctNode() { }
-
- virtual StageType getType() const { return STAGE_DISTINCT_SCAN; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- // This stage is created "on top" of normal planning and as such the properties
- // below don't really matter.
- bool fetched() const { return false; }
- bool hasField(const std::string& field) const { return !indexKeyPattern[field].eoo(); }
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return sorts; }
+ ProjectionNode() : fullExpression(NULL), projType(DEFAULT) {}
- QuerySolutionNode* clone() const;
+ virtual ~ProjectionNode() {}
- BSONObjSet sorts;
+ virtual StageType getType() const {
+ return STAGE_PROJECTION;
+ }
- BSONObj indexKeyPattern;
- int direction;
- IndexBounds bounds;
- // We are distinct-ing over the 'fieldNo'-th field of 'indexKeyPattern'.
- int fieldNo;
- };
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
/**
- * Some count queries reduce to counting how many keys are between two entries in a
- * Btree.
+ * Data from the projection node is considered fetch iff the child provides fetched data.
*/
- struct CountNode : public QuerySolutionNode {
- CountNode() { }
- virtual ~CountNode() { }
-
- virtual StageType getType() const { return STAGE_COUNT_SCAN; }
- virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
-
- bool fetched() const { return false; }
- bool hasField(const std::string& field) const { return true; }
- bool sortedByDiskLoc() const { return false; }
- const BSONObjSet& getSort() const { return sorts; }
+ bool fetched() const {
+ return children[0]->fetched();
+ }
- QuerySolutionNode* clone() const;
+ bool hasField(const std::string& field) const {
+ // TODO: Returning false isn't always the right answer -- we may either be including
+ // certain fields, or we may be dropping fields (in which case hasField returns true).
+ //
+ // Given that projection sits on top of everything else in .find() it doesn't matter
+ // what we do here.
+ return false;
+ }
+
+ bool sortedByDiskLoc() const {
+ // Projections destroy the RecordId. By returning true here, this kind of implies that a
+ // fetch could still be done upstream.
+ //
+ // Perhaps this should be false to not imply that there *is* a RecordId? Kind of a
+ // corner case.
+ return children[0]->sortedByDiskLoc();
+ }
+
+ const BSONObjSet& getSort() const {
+ // TODO: If we're applying a projection that maintains sort order, the prefix of the
+ // sort order we project is the sort order.
+ return _sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet _sorts;
+
+ // The full query tree. Needed when we have positional operators.
+ // Owned in the CanonicalQuery, not here.
+ MatchExpression* fullExpression;
+
+ // Given that we don't yet have a MatchExpression analogue for the expression language, we
+ // use a BSONObj.
+ BSONObj projection;
+
+ // What implementation of the projection algorithm should we use?
+ ProjectionType projType;
+
+ // Only meaningful if projType == COVERED_ONE_INDEX. This is the key pattern of the index
+ // supplying our covered data. We can pre-compute which fields to include and cache that
+ // data for later if we know we only have one index.
+ BSONObj coveredKeyObj;
+};
+
+struct SortNode : public QuerySolutionNode {
+ SortNode() : limit(0) {}
+ virtual ~SortNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_SORT;
+ }
+
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return children[0]->fetched();
+ }
+ bool hasField(const std::string& field) const {
+ return children[0]->hasField(field);
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+
+ const BSONObjSet& getSort() const {
+ return _sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ virtual void computeProperties() {
+ for (size_t i = 0; i < children.size(); ++i) {
+ children[i]->computeProperties();
+ }
+ _sorts.clear();
+ _sorts.insert(pattern);
+ }
+
+ BSONObjSet _sorts;
+
+ BSONObj pattern;
+
+ BSONObj query;
+
+ // Sum of both limit and skip count in the parsed query.
+ size_t limit;
+};
+
+struct LimitNode : public QuerySolutionNode {
+ LimitNode() {}
+ virtual ~LimitNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_LIMIT;
+ }
+
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return children[0]->fetched();
+ }
+ bool hasField(const std::string& field) const {
+ return children[0]->hasField(field);
+ }
+ bool sortedByDiskLoc() const {
+ return children[0]->sortedByDiskLoc();
+ }
+ const BSONObjSet& getSort() const {
+ return children[0]->getSort();
+ }
+
+ QuerySolutionNode* clone() const;
+
+ int limit;
+};
+
+struct SkipNode : public QuerySolutionNode {
+ SkipNode() {}
+ virtual ~SkipNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_SKIP;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return children[0]->fetched();
+ }
+ bool hasField(const std::string& field) const {
+ return children[0]->hasField(field);
+ }
+ bool sortedByDiskLoc() const {
+ return children[0]->sortedByDiskLoc();
+ }
+ const BSONObjSet& getSort() const {
+ return children[0]->getSort();
+ }
+
+ QuerySolutionNode* clone() const;
+
+ int skip;
+};
+
+// This is a standalone stage.
+struct GeoNear2DNode : public QuerySolutionNode {
+ GeoNear2DNode() : addPointMeta(false), addDistMeta(false) {}
+ virtual ~GeoNear2DNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_GEO_NEAR_2D;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return true;
+ }
+ bool hasField(const std::string& field) const {
+ return true;
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return _sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet _sorts;
+
+ // Not owned here
+ const GeoNearExpression* nq;
+ IndexBounds baseBounds;
+
+ BSONObj indexKeyPattern;
+ bool addPointMeta;
+ bool addDistMeta;
+};
+
+// This is actually its own standalone stage.
+struct GeoNear2DSphereNode : public QuerySolutionNode {
+ GeoNear2DSphereNode() : addPointMeta(false), addDistMeta(false) {}
+ virtual ~GeoNear2DSphereNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_GEO_NEAR_2DSPHERE;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return true;
+ }
+ bool hasField(const std::string& field) const {
+ return true;
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return _sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet _sorts;
+
+ // Not owned here
+ const GeoNearExpression* nq;
+ IndexBounds baseBounds;
+
+ BSONObj indexKeyPattern;
+ bool addPointMeta;
+ bool addDistMeta;
+};
+
+//
+// Internal nodes used to provide functionality
+//
- BSONObjSet sorts;
+/**
+ * If we're answering a query on a sharded cluster, docs must be checked against the shard key
+ * to ensure that we don't return data that shouldn't be there. This must be done prior to
+ * projection, and in fact should be done as early as possible to avoid propagating stale data
+ * through the pipeline.
+ */
+struct ShardingFilterNode : public QuerySolutionNode {
+ ShardingFilterNode() {}
+ virtual ~ShardingFilterNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_SHARDING_FILTER;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return children[0]->fetched();
+ }
+ bool hasField(const std::string& field) const {
+ return children[0]->hasField(field);
+ }
+ bool sortedByDiskLoc() const {
+ return children[0]->sortedByDiskLoc();
+ }
+ const BSONObjSet& getSort() const {
+ return children[0]->getSort();
+ }
+
+ QuerySolutionNode* clone() const;
+};
- BSONObj indexKeyPattern;
+/**
+ * If documents mutate or are deleted during a query, we can (in some cases) fetch them
+ * and still return them. This stage merges documents that have been mutated or deleted
+ * into the query result stream.
+ */
+struct KeepMutationsNode : public QuerySolutionNode {
+ KeepMutationsNode() {}
+ virtual ~KeepMutationsNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_KEEP_MUTATIONS;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ // Any flagged results are OWNED_OBJ and therefore we're covered if our child is.
+ bool fetched() const {
+ return children[0]->fetched();
+ }
+
+ // Any flagged results are OWNED_OBJ and as such they'll have any field we need.
+ bool hasField(const std::string& field) const {
+ return children[0]->hasField(field);
+ }
+
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ // Since we merge in flagged results we have no sort order.
+ BSONObjSet sorts;
+};
- BSONObj startKey;
- bool startKeyInclusive;
+/**
+ * Distinct queries only want one value for a given field. We run an index scan but
+ * *always* skip over the current key to the next key.
+ */
+struct DistinctNode : public QuerySolutionNode {
+ DistinctNode() {}
+ virtual ~DistinctNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_DISTINCT_SCAN;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ // This stage is created "on top" of normal planning and as such the properties
+ // below don't really matter.
+ bool fetched() const {
+ return false;
+ }
+ bool hasField(const std::string& field) const {
+ return !indexKeyPattern[field].eoo();
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet sorts;
+
+ BSONObj indexKeyPattern;
+ int direction;
+ IndexBounds bounds;
+ // We are distinct-ing over the 'fieldNo'-th field of 'indexKeyPattern'.
+ int fieldNo;
+};
- BSONObj endKey;
- bool endKeyInclusive;
- };
+/**
+ * Some count queries reduce to counting how many keys are between two entries in a
+ * Btree.
+ */
+struct CountNode : public QuerySolutionNode {
+ CountNode() {}
+ virtual ~CountNode() {}
+
+ virtual StageType getType() const {
+ return STAGE_COUNT_SCAN;
+ }
+ virtual void appendToString(mongoutils::str::stream* ss, int indent) const;
+
+ bool fetched() const {
+ return false;
+ }
+ bool hasField(const std::string& field) const {
+ return true;
+ }
+ bool sortedByDiskLoc() const {
+ return false;
+ }
+ const BSONObjSet& getSort() const {
+ return sorts;
+ }
+
+ QuerySolutionNode* clone() const;
+
+ BSONObjSet sorts;
+
+ BSONObj indexKeyPattern;
+
+ BSONObj startKey;
+ bool startKeyInclusive;
+
+ BSONObj endKey;
+ bool endKeyInclusive;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/query_yield.cpp b/src/mongo/db/query/query_yield.cpp
index 5bd1733f0c6..4e0d463a83e 100644
--- a/src/mongo/db/query/query_yield.cpp
+++ b/src/mongo/db/query/query_yield.cpp
@@ -36,40 +36,40 @@
namespace mongo {
- // static
- void QueryYield::yieldAllLocks(OperationContext* txn, RecordFetcher* fetcher) {
- // Things have to happen here in a specific order:
- // 1) Tell the RecordFetcher to do any setup which needs to happen inside locks
- // 2) Release lock mgr locks
- // 3) Go to sleep
- // 4) Touch the record we're yielding on, if there is one (RecordFetcher::fetch)
- // 5) Reacquire lock mgr locks
+// static
+void QueryYield::yieldAllLocks(OperationContext* txn, RecordFetcher* fetcher) {
+ // Things have to happen here in a specific order:
+ // 1) Tell the RecordFetcher to do any setup which needs to happen inside locks
+ // 2) Release lock mgr locks
+ // 3) Go to sleep
+ // 4) Touch the record we're yielding on, if there is one (RecordFetcher::fetch)
+ // 5) Reacquire lock mgr locks
- Locker* locker = txn->lockState();
+ Locker* locker = txn->lockState();
- Locker::LockSnapshot snapshot;
+ Locker::LockSnapshot snapshot;
- if (fetcher) {
- fetcher->setup();
- }
-
- // Nothing was unlocked, just return, yielding is pointless.
- if (!locker->saveLockStateAndUnlock(&snapshot)) {
- return;
- }
+ if (fetcher) {
+ fetcher->setup();
+ }
- // Top-level locks are freed, release any potential low-level (storage engine-specific
- // locks). If we are yielding, we are at a safe place to do so.
- txn->recoveryUnit()->abandonSnapshot();
+ // Nothing was unlocked, just return, yielding is pointless.
+ if (!locker->saveLockStateAndUnlock(&snapshot)) {
+ return;
+ }
- // Track the number of yields in CurOp.
- CurOp::get(txn)->yielded();
+ // Top-level locks are freed, release any potential low-level (storage engine-specific
+ // locks). If we are yielding, we are at a safe place to do so.
+ txn->recoveryUnit()->abandonSnapshot();
- if (fetcher) {
- fetcher->fetch();
- }
+ // Track the number of yields in CurOp.
+ CurOp::get(txn)->yielded();
- locker->restoreLockState(snapshot);
+ if (fetcher) {
+ fetcher->fetch();
}
-} // namespace mongo
+ locker->restoreLockState(snapshot);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/query/query_yield.h b/src/mongo/db/query/query_yield.h
index 14f018368af..a6db563d195 100644
--- a/src/mongo/db/query/query_yield.h
+++ b/src/mongo/db/query/query_yield.h
@@ -30,24 +30,23 @@
namespace mongo {
- class OperationContext;
- class RecordFetcher;
+class OperationContext;
+class RecordFetcher;
+/**
+ * See the documentation for yieldAllLocks(...).
+ */
+class QueryYield {
+ QueryYield();
+
+public:
/**
- * See the documentation for yieldAllLocks(...).
+ * If not in a nested context, unlocks all locks, suggests to the operating system to
+ * switch to another thread, and then reacquires all locks.
+ *
+ * If in a nested context (eg DBDirectClient), does nothing.
*/
- class QueryYield {
- QueryYield();
-
- public:
-
- /**
- * If not in a nested context, unlocks all locks, suggests to the operating system to
- * switch to another thread, and then reacquires all locks.
- *
- * If in a nested context (eg DBDirectClient), does nothing.
- */
- static void yieldAllLocks(OperationContext* txn, RecordFetcher* fetcher);
- };
+ static void yieldAllLocks(OperationContext* txn, RecordFetcher* fetcher);
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index ba6a439dba4..c5a922a6aaf 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -55,294 +55,297 @@
namespace mongo {
- using std::unique_ptr;
-
- PlanStage* buildStages(OperationContext* txn,
- Collection* collection,
- const QuerySolution& qsol,
- const QuerySolutionNode* root,
- WorkingSet* ws) {
- if (STAGE_COLLSCAN == root->getType()) {
- const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(root);
- CollectionScanParams params;
- params.collection = collection;
- params.tailable = csn->tailable;
- params.direction = (csn->direction == 1) ? CollectionScanParams::FORWARD
- : CollectionScanParams::BACKWARD;
- params.maxScan = csn->maxScan;
- return new CollectionScan(txn, params, ws, csn->filter.get());
+using std::unique_ptr;
+
+PlanStage* buildStages(OperationContext* txn,
+ Collection* collection,
+ const QuerySolution& qsol,
+ const QuerySolutionNode* root,
+ WorkingSet* ws) {
+ if (STAGE_COLLSCAN == root->getType()) {
+ const CollectionScanNode* csn = static_cast<const CollectionScanNode*>(root);
+ CollectionScanParams params;
+ params.collection = collection;
+ params.tailable = csn->tailable;
+ params.direction =
+ (csn->direction == 1) ? CollectionScanParams::FORWARD : CollectionScanParams::BACKWARD;
+ params.maxScan = csn->maxScan;
+ return new CollectionScan(txn, params, ws, csn->filter.get());
+ } else if (STAGE_IXSCAN == root->getType()) {
+ const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root);
+
+ if (NULL == collection) {
+ warning() << "Can't ixscan null namespace";
+ return NULL;
}
- else if (STAGE_IXSCAN == root->getType()) {
- const IndexScanNode* ixn = static_cast<const IndexScanNode*>(root);
-
- if (NULL == collection) {
- warning() << "Can't ixscan null namespace";
- return NULL;
- }
- IndexScanParams params;
+ IndexScanParams params;
- params.descriptor =
- collection->getIndexCatalog()->findIndexByKeyPattern( txn, ixn->indexKeyPattern );
- if ( params.descriptor == NULL ) {
- warning() << "Can't find index " << ixn->indexKeyPattern.toString()
- << "in namespace " << collection->ns() << endl;
- return NULL;
- }
+ params.descriptor =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, ixn->indexKeyPattern);
+ if (params.descriptor == NULL) {
+ warning() << "Can't find index " << ixn->indexKeyPattern.toString() << "in namespace "
+ << collection->ns() << endl;
+ return NULL;
+ }
- params.bounds = ixn->bounds;
- params.direction = ixn->direction;
- params.maxScan = ixn->maxScan;
- params.addKeyMetadata = ixn->addKeyMetadata;
- return new IndexScan(txn, params, ws, ixn->filter.get());
+ params.bounds = ixn->bounds;
+ params.direction = ixn->direction;
+ params.maxScan = ixn->maxScan;
+ params.addKeyMetadata = ixn->addKeyMetadata;
+ return new IndexScan(txn, params, ws, ixn->filter.get());
+ } else if (STAGE_FETCH == root->getType()) {
+ const FetchNode* fn = static_cast<const FetchNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, fn->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- else if (STAGE_FETCH == root->getType()) {
- const FetchNode* fn = static_cast<const FetchNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, fn->children[0], ws);
- if (NULL == childStage) { return NULL; }
- return new FetchStage(txn, ws, childStage, fn->filter.get(), collection);
+ return new FetchStage(txn, ws, childStage, fn->filter.get(), collection);
+ } else if (STAGE_SORT == root->getType()) {
+ const SortNode* sn = static_cast<const SortNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, sn->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- else if (STAGE_SORT == root->getType()) {
- const SortNode* sn = static_cast<const SortNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, sn->children[0], ws);
- if (NULL == childStage) { return NULL; }
- SortStageParams params;
- params.collection = collection;
- params.pattern = sn->pattern;
- params.query = sn->query;
- params.limit = sn->limit;
- return new SortStage(params, ws, childStage);
+ SortStageParams params;
+ params.collection = collection;
+ params.pattern = sn->pattern;
+ params.query = sn->query;
+ params.limit = sn->limit;
+ return new SortStage(params, ws, childStage);
+ } else if (STAGE_PROJECTION == root->getType()) {
+ const ProjectionNode* pn = static_cast<const ProjectionNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, pn->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- else if (STAGE_PROJECTION == root->getType()) {
- const ProjectionNode* pn = static_cast<const ProjectionNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, pn->children[0], ws);
- if (NULL == childStage) { return NULL; }
-
- ProjectionStageParams params(WhereCallbackReal(txn, collection->ns().db()));
- params.projObj = pn->projection;
-
- // Stuff the right data into the params depending on what proj impl we use.
- if (ProjectionNode::DEFAULT == pn->projType) {
- params.fullExpression = pn->fullExpression;
- params.projImpl = ProjectionStageParams::NO_FAST_PATH;
- }
- else if (ProjectionNode::COVERED_ONE_INDEX == pn->projType) {
- params.projImpl = ProjectionStageParams::COVERED_ONE_INDEX;
- params.coveredKeyObj = pn->coveredKeyObj;
- invariant(!pn->coveredKeyObj.isEmpty());
- }
- else {
- invariant(ProjectionNode::SIMPLE_DOC == pn->projType);
- params.projImpl = ProjectionStageParams::SIMPLE_DOC;
- }
- return new ProjectionStage(params, ws, childStage);
+ ProjectionStageParams params(WhereCallbackReal(txn, collection->ns().db()));
+ params.projObj = pn->projection;
+
+ // Stuff the right data into the params depending on what proj impl we use.
+ if (ProjectionNode::DEFAULT == pn->projType) {
+ params.fullExpression = pn->fullExpression;
+ params.projImpl = ProjectionStageParams::NO_FAST_PATH;
+ } else if (ProjectionNode::COVERED_ONE_INDEX == pn->projType) {
+ params.projImpl = ProjectionStageParams::COVERED_ONE_INDEX;
+ params.coveredKeyObj = pn->coveredKeyObj;
+ invariant(!pn->coveredKeyObj.isEmpty());
+ } else {
+ invariant(ProjectionNode::SIMPLE_DOC == pn->projType);
+ params.projImpl = ProjectionStageParams::SIMPLE_DOC;
}
- else if (STAGE_LIMIT == root->getType()) {
- const LimitNode* ln = static_cast<const LimitNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, ln->children[0], ws);
- if (NULL == childStage) { return NULL; }
- return new LimitStage(ln->limit, ws, childStage);
- }
- else if (STAGE_SKIP == root->getType()) {
- const SkipNode* sn = static_cast<const SkipNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, sn->children[0], ws);
- if (NULL == childStage) { return NULL; }
- return new SkipStage(sn->skip, ws, childStage);
+
+ return new ProjectionStage(params, ws, childStage);
+ } else if (STAGE_LIMIT == root->getType()) {
+ const LimitNode* ln = static_cast<const LimitNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, ln->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- else if (STAGE_AND_HASH == root->getType()) {
- const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
- unique_ptr<AndHashStage> ret(new AndHashStage(ws, collection));
- for (size_t i = 0; i < ahn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, qsol, ahn->children[i], ws);
- if (NULL == childStage) { return NULL; }
- ret->addChild(childStage);
- }
- return ret.release();
+ return new LimitStage(ln->limit, ws, childStage);
+ } else if (STAGE_SKIP == root->getType()) {
+ const SkipNode* sn = static_cast<const SkipNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, sn->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- else if (STAGE_OR == root->getType()) {
- const OrNode * orn = static_cast<const OrNode*>(root);
- unique_ptr<OrStage> ret(new OrStage(ws, orn->dedup, orn->filter.get()));
- for (size_t i = 0; i < orn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, qsol, orn->children[i], ws);
- if (NULL == childStage) { return NULL; }
- ret->addChild(childStage);
+ return new SkipStage(sn->skip, ws, childStage);
+ } else if (STAGE_AND_HASH == root->getType()) {
+ const AndHashNode* ahn = static_cast<const AndHashNode*>(root);
+ unique_ptr<AndHashStage> ret(new AndHashStage(ws, collection));
+ for (size_t i = 0; i < ahn->children.size(); ++i) {
+ PlanStage* childStage = buildStages(txn, collection, qsol, ahn->children[i], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- return ret.release();
+ ret->addChild(childStage);
}
- else if (STAGE_AND_SORTED == root->getType()) {
- const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
- unique_ptr<AndSortedStage> ret(new AndSortedStage(ws, collection));
- for (size_t i = 0; i < asn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, qsol, asn->children[i], ws);
- if (NULL == childStage) { return NULL; }
- ret->addChild(childStage);
+ return ret.release();
+ } else if (STAGE_OR == root->getType()) {
+ const OrNode* orn = static_cast<const OrNode*>(root);
+ unique_ptr<OrStage> ret(new OrStage(ws, orn->dedup, orn->filter.get()));
+ for (size_t i = 0; i < orn->children.size(); ++i) {
+ PlanStage* childStage = buildStages(txn, collection, qsol, orn->children[i], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- return ret.release();
+ ret->addChild(childStage);
}
- else if (STAGE_SORT_MERGE == root->getType()) {
- const MergeSortNode* msn = static_cast<const MergeSortNode*>(root);
- MergeSortStageParams params;
- params.dedup = msn->dedup;
- params.pattern = msn->sort;
- unique_ptr<MergeSortStage> ret(new MergeSortStage(params, ws, collection));
- for (size_t i = 0; i < msn->children.size(); ++i) {
- PlanStage* childStage = buildStages(txn, collection, qsol, msn->children[i], ws);
- if (NULL == childStage) { return NULL; }
- ret->addChild(childStage);
+ return ret.release();
+ } else if (STAGE_AND_SORTED == root->getType()) {
+ const AndSortedNode* asn = static_cast<const AndSortedNode*>(root);
+ unique_ptr<AndSortedStage> ret(new AndSortedStage(ws, collection));
+ for (size_t i = 0; i < asn->children.size(); ++i) {
+ PlanStage* childStage = buildStages(txn, collection, qsol, asn->children[i], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- return ret.release();
+ ret->addChild(childStage);
}
- else if (STAGE_GEO_NEAR_2D == root->getType()) {
- const GeoNear2DNode* node = static_cast<const GeoNear2DNode*>(root);
-
- GeoNearParams params;
- params.nearQuery = node->nq;
- params.baseBounds = node->baseBounds;
- params.filter = node->filter.get();
- params.addPointMeta = node->addPointMeta;
- params.addDistMeta = node->addDistMeta;
-
- IndexDescriptor* twoDIndex = collection->getIndexCatalog()->findIndexByKeyPattern(txn,
- node->indexKeyPattern);
-
- if (twoDIndex == NULL) {
- warning() << "Can't find 2D index " << node->indexKeyPattern.toString()
- << "in namespace " << collection->ns() << endl;
+ return ret.release();
+ } else if (STAGE_SORT_MERGE == root->getType()) {
+ const MergeSortNode* msn = static_cast<const MergeSortNode*>(root);
+ MergeSortStageParams params;
+ params.dedup = msn->dedup;
+ params.pattern = msn->sort;
+ unique_ptr<MergeSortStage> ret(new MergeSortStage(params, ws, collection));
+ for (size_t i = 0; i < msn->children.size(); ++i) {
+ PlanStage* childStage = buildStages(txn, collection, qsol, msn->children[i], ws);
+ if (NULL == childStage) {
return NULL;
}
+ ret->addChild(childStage);
+ }
+ return ret.release();
+ } else if (STAGE_GEO_NEAR_2D == root->getType()) {
+ const GeoNear2DNode* node = static_cast<const GeoNear2DNode*>(root);
+
+ GeoNearParams params;
+ params.nearQuery = node->nq;
+ params.baseBounds = node->baseBounds;
+ params.filter = node->filter.get();
+ params.addPointMeta = node->addPointMeta;
+ params.addDistMeta = node->addDistMeta;
+
+ IndexDescriptor* twoDIndex =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, node->indexKeyPattern);
+
+ if (twoDIndex == NULL) {
+ warning() << "Can't find 2D index " << node->indexKeyPattern.toString()
+ << "in namespace " << collection->ns() << endl;
+ return NULL;
+ }
- GeoNear2DStage* nearStage = new GeoNear2DStage(params, txn, ws, collection, twoDIndex);
+ GeoNear2DStage* nearStage = new GeoNear2DStage(params, txn, ws, collection, twoDIndex);
- return nearStage;
- }
- else if (STAGE_GEO_NEAR_2DSPHERE == root->getType()) {
- const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(root);
-
- GeoNearParams params;
- params.nearQuery = node->nq;
- params.baseBounds = node->baseBounds;
- params.filter = node->filter.get();
- params.addPointMeta = node->addPointMeta;
- params.addDistMeta = node->addDistMeta;
-
- IndexDescriptor* s2Index = collection->getIndexCatalog()->findIndexByKeyPattern(txn,
- node->indexKeyPattern);
-
- if (s2Index == NULL) {
- warning() << "Can't find 2DSphere index " << node->indexKeyPattern.toString()
- << "in namespace " << collection->ns() << endl;
- return NULL;
- }
+ return nearStage;
+ } else if (STAGE_GEO_NEAR_2DSPHERE == root->getType()) {
+ const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(root);
- return new GeoNear2DSphereStage(params, txn, ws, collection, s2Index);
+ GeoNearParams params;
+ params.nearQuery = node->nq;
+ params.baseBounds = node->baseBounds;
+ params.filter = node->filter.get();
+ params.addPointMeta = node->addPointMeta;
+ params.addDistMeta = node->addDistMeta;
+
+ IndexDescriptor* s2Index =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, node->indexKeyPattern);
+
+ if (s2Index == NULL) {
+ warning() << "Can't find 2DSphere index " << node->indexKeyPattern.toString()
+ << "in namespace " << collection->ns() << endl;
+ return NULL;
}
- else if (STAGE_TEXT == root->getType()) {
- const TextNode* node = static_cast<const TextNode*>(root);
- if (NULL == collection) {
- warning() << "Null collection for text";
- return NULL;
- }
- vector<IndexDescriptor*> idxMatches;
- collection->getIndexCatalog()->findIndexByType(txn, "text", idxMatches);
- if (1 != idxMatches.size()) {
- warning() << "No text index, or more than one text index";
- return NULL;
- }
- IndexDescriptor* index = idxMatches[0];
- const FTSAccessMethod* fam =
- static_cast<FTSAccessMethod*>( collection->getIndexCatalog()->getIndex( index ) );
- TextStageParams params(fam->getSpec());
-
- //params.collection = collection;
- params.index = index;
- params.spec = fam->getSpec();
- params.indexPrefix = node->indexPrefix;
-
- const std::string& language = ("" == node->language
- ? fam->getSpec().defaultLanguage().str()
- : node->language);
-
- Status parseStatus = params.query.parse(node->query,
- language,
- node->caseSensitive,
- fam->getSpec().getTextIndexVersion());
- if (!parseStatus.isOK()) {
- warning() << "Can't parse text search query";
- return NULL;
- }
+ return new GeoNear2DSphereStage(params, txn, ws, collection, s2Index);
+ } else if (STAGE_TEXT == root->getType()) {
+ const TextNode* node = static_cast<const TextNode*>(root);
- return new TextStage(txn, params, ws, node->filter.get());
+ if (NULL == collection) {
+ warning() << "Null collection for text";
+ return NULL;
}
- else if (STAGE_SHARDING_FILTER == root->getType()) {
- const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, fn->children[0], ws);
- if (NULL == childStage) { return NULL; }
- return new ShardFilterStage(shardingState.getCollectionMetadata(collection->ns()),
- ws, childStage);
+ vector<IndexDescriptor*> idxMatches;
+ collection->getIndexCatalog()->findIndexByType(txn, "text", idxMatches);
+ if (1 != idxMatches.size()) {
+ warning() << "No text index, or more than one text index";
+ return NULL;
}
- else if (STAGE_KEEP_MUTATIONS == root->getType()) {
- const KeepMutationsNode* km = static_cast<const KeepMutationsNode*>(root);
- PlanStage* childStage = buildStages(txn, collection, qsol, km->children[0], ws);
- if (NULL == childStage) { return NULL; }
- return new KeepMutationsStage(km->filter.get(), ws, childStage);
+ IndexDescriptor* index = idxMatches[0];
+ const FTSAccessMethod* fam =
+ static_cast<FTSAccessMethod*>(collection->getIndexCatalog()->getIndex(index));
+ TextStageParams params(fam->getSpec());
+
+ // params.collection = collection;
+ params.index = index;
+ params.spec = fam->getSpec();
+ params.indexPrefix = node->indexPrefix;
+
+ const std::string& language =
+ ("" == node->language ? fam->getSpec().defaultLanguage().str() : node->language);
+
+ Status parseStatus = params.query.parse(
+ node->query, language, node->caseSensitive, fam->getSpec().getTextIndexVersion());
+ if (!parseStatus.isOK()) {
+ warning() << "Can't parse text search query";
+ return NULL;
}
- else if (STAGE_DISTINCT_SCAN == root->getType()) {
- const DistinctNode* dn = static_cast<const DistinctNode*>(root);
- if (NULL == collection) {
- warning() << "Can't distinct-scan null namespace";
- return NULL;
- }
-
- DistinctParams params;
-
- params.descriptor =
- collection->getIndexCatalog()->findIndexByKeyPattern(txn, dn->indexKeyPattern);
- params.direction = dn->direction;
- params.bounds = dn->bounds;
- params.fieldNo = dn->fieldNo;
- return new DistinctScan(txn, params, ws);
+ return new TextStage(txn, params, ws, node->filter.get());
+ } else if (STAGE_SHARDING_FILTER == root->getType()) {
+ const ShardingFilterNode* fn = static_cast<const ShardingFilterNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, fn->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
}
- else if (STAGE_COUNT_SCAN == root->getType()) {
- const CountNode* cn = static_cast<const CountNode*>(root);
+ return new ShardFilterStage(
+ shardingState.getCollectionMetadata(collection->ns()), ws, childStage);
+ } else if (STAGE_KEEP_MUTATIONS == root->getType()) {
+ const KeepMutationsNode* km = static_cast<const KeepMutationsNode*>(root);
+ PlanStage* childStage = buildStages(txn, collection, qsol, km->children[0], ws);
+ if (NULL == childStage) {
+ return NULL;
+ }
+ return new KeepMutationsStage(km->filter.get(), ws, childStage);
+ } else if (STAGE_DISTINCT_SCAN == root->getType()) {
+ const DistinctNode* dn = static_cast<const DistinctNode*>(root);
- if (NULL == collection) {
- warning() << "Can't fast-count null namespace (collection null)";
- return NULL;
- }
+ if (NULL == collection) {
+ warning() << "Can't distinct-scan null namespace";
+ return NULL;
+ }
- CountScanParams params;
+ DistinctParams params;
- params.descriptor =
- collection->getIndexCatalog()->findIndexByKeyPattern(txn, cn->indexKeyPattern);
- params.startKey = cn->startKey;
- params.startKeyInclusive = cn->startKeyInclusive;
- params.endKey = cn->endKey;
- params.endKeyInclusive = cn->endKeyInclusive;
+ params.descriptor =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, dn->indexKeyPattern);
+ params.direction = dn->direction;
+ params.bounds = dn->bounds;
+ params.fieldNo = dn->fieldNo;
+ return new DistinctScan(txn, params, ws);
+ } else if (STAGE_COUNT_SCAN == root->getType()) {
+ const CountNode* cn = static_cast<const CountNode*>(root);
- return new CountScan(txn, params, ws);
- }
- else {
- mongoutils::str::stream ss;
- root->appendToString(&ss, 0);
- string nodeStr(ss);
- warning() << "Can't build exec tree for node " << nodeStr << endl;
+ if (NULL == collection) {
+ warning() << "Can't fast-count null namespace (collection null)";
return NULL;
}
- }
- // static (this one is used for Cached and MultiPlanStage)
- bool StageBuilder::build(OperationContext* txn,
- Collection* collection,
- const QuerySolution& solution,
- WorkingSet* wsIn,
- PlanStage** rootOut) {
- if (NULL == wsIn || NULL == rootOut) { return false; }
- QuerySolutionNode* solutionNode = solution.root.get();
- if (NULL == solutionNode) { return false; }
- return NULL != (*rootOut = buildStages(txn, collection, solution, solutionNode, wsIn));
+ CountScanParams params;
+
+ params.descriptor =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, cn->indexKeyPattern);
+ params.startKey = cn->startKey;
+ params.startKeyInclusive = cn->startKeyInclusive;
+ params.endKey = cn->endKey;
+ params.endKeyInclusive = cn->endKeyInclusive;
+
+ return new CountScan(txn, params, ws);
+ } else {
+ mongoutils::str::stream ss;
+ root->appendToString(&ss, 0);
+ string nodeStr(ss);
+ warning() << "Can't build exec tree for node " << nodeStr << endl;
+ return NULL;
+ }
+}
+
+// static (this one is used for Cached and MultiPlanStage)
+bool StageBuilder::build(OperationContext* txn,
+ Collection* collection,
+ const QuerySolution& solution,
+ WorkingSet* wsIn,
+ PlanStage** rootOut) {
+ if (NULL == wsIn || NULL == rootOut) {
+ return false;
+ }
+ QuerySolutionNode* solutionNode = solution.root.get();
+ if (NULL == solutionNode) {
+ return false;
}
+ return NULL != (*rootOut = buildStages(txn, collection, solution, solutionNode, wsIn));
+}
} // namespace mongo
diff --git a/src/mongo/db/query/stage_builder.h b/src/mongo/db/query/stage_builder.h
index c9c88e800bd..c490b9a974d 100644
--- a/src/mongo/db/query/stage_builder.h
+++ b/src/mongo/db/query/stage_builder.h
@@ -34,26 +34,26 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
+/**
+ * The StageBuilder converts a QuerySolution to an executable tree of PlanStage(s).
+ */
+class StageBuilder {
+public:
/**
- * The StageBuilder converts a QuerySolution to an executable tree of PlanStage(s).
+ * Turns 'solution' into an executable tree of PlanStage(s).
+ *
+ * Returns true if the PlanStage tree was built successfully. The root of the tree is in
+ * *rootOut and the WorkingSet that the tree uses is in wsIn.
+ *
+ * Returns false otherwise. *rootOut and *wsOut are invalid.
*/
- class StageBuilder {
- public:
- /**
- * Turns 'solution' into an executable tree of PlanStage(s).
- *
- * Returns true if the PlanStage tree was built successfully. The root of the tree is in
- * *rootOut and the WorkingSet that the tree uses is in wsIn.
- *
- * Returns false otherwise. *rootOut and *wsOut are invalid.
- */
- static bool build(OperationContext* txn,
- Collection* collection,
- const QuerySolution& solution,
- WorkingSet* wsIn,
- PlanStage** rootOut);
- };
+ static bool build(OperationContext* txn,
+ Collection* collection,
+ const QuerySolution& solution,
+ WorkingSet* wsIn,
+ PlanStage** rootOut);
+};
} // namespace mongo
diff --git a/src/mongo/db/query/stage_types.h b/src/mongo/db/query/stage_types.h
index 273da805283..5a4981dc81b 100644
--- a/src/mongo/db/query/stage_types.h
+++ b/src/mongo/db/query/stage_types.h
@@ -30,72 +30,72 @@
namespace mongo {
- /**
- * These map to implementations of the PlanStage interface, all of which live in db/exec/
- */
- enum StageType {
- STAGE_AND_HASH,
- STAGE_AND_SORTED,
- STAGE_CACHED_PLAN,
- STAGE_COLLSCAN,
+/**
+ * These map to implementations of the PlanStage interface, all of which live in db/exec/
+ */
+enum StageType {
+ STAGE_AND_HASH,
+ STAGE_AND_SORTED,
+ STAGE_CACHED_PLAN,
+ STAGE_COLLSCAN,
- // This stage sits at the root of the query tree and counts up the number of results
- // returned by its child.
- STAGE_COUNT,
+ // This stage sits at the root of the query tree and counts up the number of results
+ // returned by its child.
+ STAGE_COUNT,
- // If we're running a .count(), the query is fully covered by one ixscan, and the ixscan is
- // from one key to another, we can just skip through the keys without bothering to examine
- // them.
- STAGE_COUNT_SCAN,
+ // If we're running a .count(), the query is fully covered by one ixscan, and the ixscan is
+ // from one key to another, we can just skip through the keys without bothering to examine
+ // them.
+ STAGE_COUNT_SCAN,
- STAGE_DELETE,
+ STAGE_DELETE,
- // If we're running a distinct, we only care about one value for each key. The distinct
- // scan stage is an ixscan with some key-skipping behvaior that only distinct uses.
- STAGE_DISTINCT_SCAN,
+ // If we're running a distinct, we only care about one value for each key. The distinct
+ // scan stage is an ixscan with some key-skipping behvaior that only distinct uses.
+ STAGE_DISTINCT_SCAN,
- // Dummy stage used for receiving notifications of deletions during chunk migration.
- STAGE_NOTIFY_DELETE,
+ // Dummy stage used for receiving notifications of deletions during chunk migration.
+ STAGE_NOTIFY_DELETE,
- STAGE_EOF,
+ STAGE_EOF,
- // This is more of an "internal-only" stage where we try to keep docs that were mutated
- // during query execution.
- STAGE_KEEP_MUTATIONS,
+ // This is more of an "internal-only" stage where we try to keep docs that were mutated
+ // during query execution.
+ STAGE_KEEP_MUTATIONS,
- STAGE_FETCH,
+ STAGE_FETCH,
- // The two $geoNear impls imply a fetch+sort and must be stages.
- STAGE_GEO_NEAR_2D,
- STAGE_GEO_NEAR_2DSPHERE,
+ // The two $geoNear impls imply a fetch+sort and must be stages.
+ STAGE_GEO_NEAR_2D,
+ STAGE_GEO_NEAR_2DSPHERE,
- STAGE_GROUP,
+ STAGE_GROUP,
- STAGE_IDHACK,
- STAGE_IXSCAN,
- STAGE_LIMIT,
+ STAGE_IDHACK,
+ STAGE_IXSCAN,
+ STAGE_LIMIT,
- // Implements parallelCollectionScan.
- STAGE_MULTI_ITERATOR,
+ // Implements parallelCollectionScan.
+ STAGE_MULTI_ITERATOR,
- STAGE_MULTI_PLAN,
- STAGE_OPLOG_START,
- STAGE_OR,
- STAGE_PROJECTION,
+ STAGE_MULTI_PLAN,
+ STAGE_OPLOG_START,
+ STAGE_OR,
+ STAGE_PROJECTION,
- // Stage for running aggregation pipelines.
- STAGE_PIPELINE_PROXY,
+ // Stage for running aggregation pipelines.
+ STAGE_PIPELINE_PROXY,
- STAGE_QUEUED_DATA,
- STAGE_SHARDING_FILTER,
- STAGE_SKIP,
- STAGE_SORT,
- STAGE_SORT_MERGE,
- STAGE_SUBPLAN,
- STAGE_TEXT,
- STAGE_UNKNOWN,
+ STAGE_QUEUED_DATA,
+ STAGE_SHARDING_FILTER,
+ STAGE_SKIP,
+ STAGE_SORT,
+ STAGE_SORT_MERGE,
+ STAGE_SUBPLAN,
+ STAGE_TEXT,
+ STAGE_UNKNOWN,
- STAGE_UPDATE,
- };
+ STAGE_UPDATE,
+};
} // namespace mongo
diff --git a/src/mongo/db/range_arithmetic.cpp b/src/mongo/db/range_arithmetic.cpp
index fae99baf1f0..603dcd324f5 100644
--- a/src/mongo/db/range_arithmetic.cpp
+++ b/src/mongo/db/range_arithmetic.cpp
@@ -32,109 +32,106 @@
namespace mongo {
- using std::make_pair;
- using std::pair;
- using std::string;
- using std::stringstream;
-
- bool rangeContains( const BSONObj& inclusiveLower,
- const BSONObj& exclusiveUpper,
- const BSONObj& point )
- {
- return point.woCompare( inclusiveLower ) >= 0 && point.woCompare( exclusiveUpper ) < 0;
- }
-
- bool rangeOverlaps( const BSONObj& inclusiveLower1,
- const BSONObj& exclusiveUpper1,
- const BSONObj& inclusiveLower2,
- const BSONObj& exclusiveUpper2 )
- {
- return ( exclusiveUpper1.woCompare( inclusiveLower2 ) > 0 )
- && ( exclusiveUpper2.woCompare( inclusiveLower1 ) > 0 );
- }
-
- int compareRanges( const BSONObj& rangeMin1,
- const BSONObj& rangeMax1,
- const BSONObj& rangeMin2,
- const BSONObj& rangeMax2 )
- {
- const int minCmp = rangeMin1.woCompare( rangeMin2 );
- if ( minCmp != 0 ) return minCmp;
- return rangeMax1.woCompare( rangeMax2 );
- }
-
- // Represents the start and end of an overlap of a tested range
- typedef pair<RangeMap::const_iterator, RangeMap::const_iterator> OverlapBounds;
-
- // Internal-only, shared functionality
- OverlapBounds rangeMapOverlapBounds( const RangeMap& ranges,
- const BSONObj& inclusiveLower,
- const BSONObj& exclusiveUpper ) {
-
- // Returns the first chunk with a min key that is >= lower bound - the previous chunk
- // might overlap.
- RangeMap::const_iterator low = ranges.lower_bound( inclusiveLower );
+using std::make_pair;
+using std::pair;
+using std::string;
+using std::stringstream;
+
+bool rangeContains(const BSONObj& inclusiveLower,
+ const BSONObj& exclusiveUpper,
+ const BSONObj& point) {
+ return point.woCompare(inclusiveLower) >= 0 && point.woCompare(exclusiveUpper) < 0;
+}
- // See if the previous chunk overlaps our range, not clear from just min key
- if ( low != ranges.begin() ) {
+bool rangeOverlaps(const BSONObj& inclusiveLower1,
+ const BSONObj& exclusiveUpper1,
+ const BSONObj& inclusiveLower2,
+ const BSONObj& exclusiveUpper2) {
+ return (exclusiveUpper1.woCompare(inclusiveLower2) > 0) &&
+ (exclusiveUpper2.woCompare(inclusiveLower1) > 0);
+}
- RangeMap::const_iterator next = low;
- --low;
+int compareRanges(const BSONObj& rangeMin1,
+ const BSONObj& rangeMax1,
+ const BSONObj& rangeMin2,
+ const BSONObj& rangeMax2) {
+ const int minCmp = rangeMin1.woCompare(rangeMin2);
+ if (minCmp != 0)
+ return minCmp;
+ return rangeMax1.woCompare(rangeMax2);
+}
- // If the previous range's max value is lte our min value
- if ( low->second.woCompare( inclusiveLower ) < 1 ) {
- low = next;
- }
+// Represents the start and end of an overlap of a tested range
+typedef pair<RangeMap::const_iterator, RangeMap::const_iterator> OverlapBounds;
+
+// Internal-only, shared functionality
+OverlapBounds rangeMapOverlapBounds(const RangeMap& ranges,
+ const BSONObj& inclusiveLower,
+ const BSONObj& exclusiveUpper) {
+ // Returns the first chunk with a min key that is >= lower bound - the previous chunk
+ // might overlap.
+ RangeMap::const_iterator low = ranges.lower_bound(inclusiveLower);
+
+ // See if the previous chunk overlaps our range, not clear from just min key
+ if (low != ranges.begin()) {
+ RangeMap::const_iterator next = low;
+ --low;
+
+ // If the previous range's max value is lte our min value
+ if (low->second.woCompare(inclusiveLower) < 1) {
+ low = next;
}
+ }
- // Returns the first chunk with a max key that is >= upper bound - implies the
- // chunk does not overlap upper bound
- RangeMap::const_iterator high = ranges.lower_bound( exclusiveUpper );
+ // Returns the first chunk with a max key that is >= upper bound - implies the
+ // chunk does not overlap upper bound
+ RangeMap::const_iterator high = ranges.lower_bound(exclusiveUpper);
- return OverlapBounds( low, high );
- }
+ return OverlapBounds(low, high);
+}
- void getRangeMapOverlap( const RangeMap& ranges,
- const BSONObj& inclusiveLower,
- const BSONObj& exclusiveUpper,
- RangeVector* overlap ) {
- overlap->clear();
- OverlapBounds bounds = rangeMapOverlapBounds( ranges, inclusiveLower, exclusiveUpper );
- for ( RangeMap::const_iterator it = bounds.first; it != bounds.second; ++it ) {
- overlap->push_back( make_pair( it->first, it->second ) );
- }
+void getRangeMapOverlap(const RangeMap& ranges,
+ const BSONObj& inclusiveLower,
+ const BSONObj& exclusiveUpper,
+ RangeVector* overlap) {
+ overlap->clear();
+ OverlapBounds bounds = rangeMapOverlapBounds(ranges, inclusiveLower, exclusiveUpper);
+ for (RangeMap::const_iterator it = bounds.first; it != bounds.second; ++it) {
+ overlap->push_back(make_pair(it->first, it->second));
}
+}
- bool rangeMapOverlaps( const RangeMap& ranges,
- const BSONObj& inclusiveLower,
- const BSONObj& exclusiveUpper ) {
- OverlapBounds bounds = rangeMapOverlapBounds( ranges, inclusiveLower, exclusiveUpper );
- return bounds.first != bounds.second;
- }
+bool rangeMapOverlaps(const RangeMap& ranges,
+ const BSONObj& inclusiveLower,
+ const BSONObj& exclusiveUpper) {
+ OverlapBounds bounds = rangeMapOverlapBounds(ranges, inclusiveLower, exclusiveUpper);
+ return bounds.first != bounds.second;
+}
- bool rangeMapContains( const RangeMap& ranges,
- const BSONObj& inclusiveLower,
- const BSONObj& exclusiveUpper ) {
- OverlapBounds bounds = rangeMapOverlapBounds( ranges, inclusiveLower, exclusiveUpper );
- if ( bounds.first == ranges.end() ) return false;
+bool rangeMapContains(const RangeMap& ranges,
+ const BSONObj& inclusiveLower,
+ const BSONObj& exclusiveUpper) {
+ OverlapBounds bounds = rangeMapOverlapBounds(ranges, inclusiveLower, exclusiveUpper);
+ if (bounds.first == ranges.end())
+ return false;
- return bounds.first->first.woCompare( inclusiveLower ) == 0
- && bounds.first->second.woCompare( exclusiveUpper ) == 0;
- }
+ return bounds.first->first.woCompare(inclusiveLower) == 0 &&
+ bounds.first->second.woCompare(exclusiveUpper) == 0;
+}
- string rangeToString( const BSONObj& inclusiveLower, const BSONObj& exclusiveUpper ) {
- stringstream ss;
- ss << "[" << inclusiveLower.toString() << ", " << exclusiveUpper.toString() << ")";
- return ss.str();
- }
+string rangeToString(const BSONObj& inclusiveLower, const BSONObj& exclusiveUpper) {
+ stringstream ss;
+ ss << "[" << inclusiveLower.toString() << ", " << exclusiveUpper.toString() << ")";
+ return ss.str();
+}
- string overlapToString( RangeVector overlap ) {
- stringstream ss;
- for ( RangeVector::const_iterator it = overlap.begin(); it != overlap.end(); ++it ) {
- if ( it != overlap.begin() ) ss << ", ";
- ss << rangeToString( it->first, it->second );
- }
- return ss.str();
+string overlapToString(RangeVector overlap) {
+ stringstream ss;
+ for (RangeVector::const_iterator it = overlap.begin(); it != overlap.end(); ++it) {
+ if (it != overlap.begin())
+ ss << ", ";
+ ss << rangeToString(it->first, it->second);
}
-
+ return ss.str();
+}
}
diff --git a/src/mongo/db/range_arithmetic.h b/src/mongo/db/range_arithmetic.h
index d13683be70b..0032fc5b996 100644
--- a/src/mongo/db/range_arithmetic.h
+++ b/src/mongo/db/range_arithmetic.h
@@ -36,122 +36,117 @@
namespace mongo {
- /**
- * A KeyRange represents a range over keys of documents in a namespace, qualified by a
- * key pattern which defines the documents that are in the key range.
- *
- * There may be many different expressions to generate the same key fields from a document - the
- * keyPattern tells us these expressions.
- *
- * Ex:
- * DocA : { field : "aaaa" }
- * DocB : { field : "bbb" }
- * DocC : { field : "ccccc" }
- *
- * keyPattern : { field : 1 }
- * minKey : { field : "aaaa" } : Id(DocA)
- * maxKey : { field : "ccccc" } : Id(DocB)
- *
- * contains Id(DocB)
- *
- * keyPattern : { field : "numberofletters" }
- * minKey : { field : 4 } : numberofletters(DocA)
- * maxKey : { field : 5 } : numberofletters(DocC)
- *
- * does not contain numberofletters(DocB)
- */
- struct KeyRange {
-
- KeyRange( const std::string& ns,
- const BSONObj& minKey,
- const BSONObj& maxKey,
- const BSONObj& keyPattern ) :
- ns( ns ), minKey( minKey ), maxKey( maxKey ), keyPattern( keyPattern )
- {
- }
-
- KeyRange() {}
-
- std::string ns;
- BSONObj minKey;
- BSONObj maxKey;
- BSONObj keyPattern;
- };
-
- /**
- * Returns true if the point is within the range [inclusiveLower, exclusiveUpper).
- */
- bool rangeContains( const BSONObj& inclusiveLower,
+/**
+ * A KeyRange represents a range over keys of documents in a namespace, qualified by a
+ * key pattern which defines the documents that are in the key range.
+ *
+ * There may be many different expressions to generate the same key fields from a document - the
+ * keyPattern tells us these expressions.
+ *
+ * Ex:
+ * DocA : { field : "aaaa" }
+ * DocB : { field : "bbb" }
+ * DocC : { field : "ccccc" }
+ *
+ * keyPattern : { field : 1 }
+ * minKey : { field : "aaaa" } : Id(DocA)
+ * maxKey : { field : "ccccc" } : Id(DocB)
+ *
+ * contains Id(DocB)
+ *
+ * keyPattern : { field : "numberofletters" }
+ * minKey : { field : 4 } : numberofletters(DocA)
+ * maxKey : { field : 5 } : numberofletters(DocC)
+ *
+ * does not contain numberofletters(DocB)
+ */
+struct KeyRange {
+ KeyRange(const std::string& ns,
+ const BSONObj& minKey,
+ const BSONObj& maxKey,
+ const BSONObj& keyPattern)
+ : ns(ns), minKey(minKey), maxKey(maxKey), keyPattern(keyPattern) {}
+
+ KeyRange() {}
+
+ std::string ns;
+ BSONObj minKey;
+ BSONObj maxKey;
+ BSONObj keyPattern;
+};
+
+/**
+ * Returns true if the point is within the range [inclusiveLower, exclusiveUpper).
+ */
+bool rangeContains(const BSONObj& inclusiveLower,
+ const BSONObj& exclusiveUpper,
+ const BSONObj& point);
+
+/**
+ * Returns true if the bounds specified by [inclusiveLower1, exclusiveUpper1)
+ * intersects with the bounds [inclusiveLower2, exclusiveUpper2).
+ */
+bool rangeOverlaps(const BSONObj& inclusiveLower1,
+ const BSONObj& exclusiveUpper1,
+ const BSONObj& inclusiveLower2,
+ const BSONObj& exclusiveUpper2);
+
+/**
+ * Returns -1 if first range is less than the second range, 0 if equal and 1 if
+ * greater. The ordering is based on comparing both the min first and then uses
+ * the max as the tie breaker.
+ */
+int compareRanges(const BSONObj& rangeMin1,
+ const BSONObj& rangeMax1,
+ const BSONObj& rangeMin2,
+ const BSONObj& rangeMax2);
+
+/**
+ * A RangeMap is a mapping of a BSON range from lower->upper (lower maps to upper), using
+ * standard BSON woCompare. Upper bound is exclusive.
+ *
+ * NOTE: For overlap testing to work correctly, there may be no overlaps present in the map
+ * itself.
+ */
+typedef std::map<BSONObj, BSONObj, BSONObjCmp> RangeMap;
+
+/**
+ * A RangeVector is a list of [lower,upper) ranges.
+ */
+typedef std::vector<std::pair<BSONObj, BSONObj>> RangeVector;
+
+/**
+ * Returns the overlap of a range [inclusiveLower, exclusiveUpper) with the provided range map
+ * as a vector of ranges from the map.
+ */
+void getRangeMapOverlap(const RangeMap& ranges,
+ const BSONObj& inclusiveLower,
const BSONObj& exclusiveUpper,
- const BSONObj& point );
-
- /**
- * Returns true if the bounds specified by [inclusiveLower1, exclusiveUpper1)
- * intersects with the bounds [inclusiveLower2, exclusiveUpper2).
- */
- bool rangeOverlaps( const BSONObj& inclusiveLower1,
- const BSONObj& exclusiveUpper1,
- const BSONObj& inclusiveLower2,
- const BSONObj& exclusiveUpper2 );
-
- /**
- * Returns -1 if first range is less than the second range, 0 if equal and 1 if
- * greater. The ordering is based on comparing both the min first and then uses
- * the max as the tie breaker.
- */
- int compareRanges( const BSONObj& rangeMin1,
- const BSONObj& rangeMax1,
- const BSONObj& rangeMin2,
- const BSONObj& rangeMax2 );
-
- /**
- * A RangeMap is a mapping of a BSON range from lower->upper (lower maps to upper), using
- * standard BSON woCompare. Upper bound is exclusive.
- *
- * NOTE: For overlap testing to work correctly, there may be no overlaps present in the map
- * itself.
- */
- typedef std::map<BSONObj, BSONObj, BSONObjCmp> RangeMap;
-
- /**
- * A RangeVector is a list of [lower,upper) ranges.
- */
- typedef std::vector<std::pair<BSONObj,BSONObj> > RangeVector;
-
- /**
- * Returns the overlap of a range [inclusiveLower, exclusiveUpper) with the provided range map
- * as a vector of ranges from the map.
- */
- void getRangeMapOverlap( const RangeMap& ranges,
- const BSONObj& inclusiveLower,
- const BSONObj& exclusiveUpper,
- RangeVector* vector );
-
- /**
- * Returns true if the provided range map has ranges which overlap the provided range
- * [inclusiveLower, exclusiveUpper).
- */
- bool rangeMapOverlaps( const RangeMap& ranges,
- const BSONObj& inclusiveLower,
- const BSONObj& exclusiveUpper );
-
- /**
- * Returns true if the provided range map exactly contains the provided range
- * [inclusiveLower, exclusiveUpper).
- */
- bool rangeMapContains( const RangeMap& ranges,
- const BSONObj& inclusiveLower,
- const BSONObj& exclusiveUpper );
-
- /**
- * std::string representation of [inclusiveLower, exclusiveUpper)
- */
- std::string rangeToString( const BSONObj& inclusiveLower,
- const BSONObj& exclusiveUpper );
-
- /**
- * std::string representation of overlapping ranges as a list "[range1),[range2),..."
- */
- std::string overlapToString( RangeVector overlap );
+ RangeVector* vector);
+
+/**
+ * Returns true if the provided range map has ranges which overlap the provided range
+ * [inclusiveLower, exclusiveUpper).
+ */
+bool rangeMapOverlaps(const RangeMap& ranges,
+ const BSONObj& inclusiveLower,
+ const BSONObj& exclusiveUpper);
+
+/**
+ * Returns true if the provided range map exactly contains the provided range
+ * [inclusiveLower, exclusiveUpper).
+ */
+bool rangeMapContains(const RangeMap& ranges,
+ const BSONObj& inclusiveLower,
+ const BSONObj& exclusiveUpper);
+/**
+ * std::string representation of [inclusiveLower, exclusiveUpper)
+ */
+std::string rangeToString(const BSONObj& inclusiveLower, const BSONObj& exclusiveUpper);
+
+/**
+ * std::string representation of overlapping ranges as a list "[range1),[range2),..."
+ */
+std::string overlapToString(RangeVector overlap);
}
diff --git a/src/mongo/db/range_arithmetic_test.cpp b/src/mongo/db/range_arithmetic_test.cpp
index 79632689bb7..074e9efe0cb 100644
--- a/src/mongo/db/range_arithmetic_test.cpp
+++ b/src/mongo/db/range_arithmetic_test.cpp
@@ -31,131 +31,124 @@
namespace {
- using mongo::MINKEY;
- using mongo::MAXKEY;
- using mongo::rangeOverlaps;
- using mongo::rangeMapOverlaps;
- using mongo::RangeMap;
- using mongo::RangeVector;
- using std::make_pair;
-
- TEST(BSONRange, SmallerLowerRangeNonSubset) {
- ASSERT_TRUE(rangeOverlaps(BSON("x" << 100), BSON("x" << 200),
- BSON("x" << 50), BSON("x" << 200)));
- ASSERT_TRUE(rangeOverlaps(BSON("x" << 100), BSON("x" << 200),
- BSON("x" << 60), BSON("x" << 199)));
-
- ASSERT_FALSE(rangeOverlaps(BSON("x" << 100), BSON("x" << 200),
- BSON("x" << 70), BSON("x" << 99)));
- ASSERT_FALSE(rangeOverlaps(BSON("x" << 100), BSON("x" << 200),
- BSON("x" << 80), BSON("x" << 100)));
- }
-
- TEST(BSONRange, BiggerUpperRangeNonSubset) {
- ASSERT_TRUE(rangeOverlaps(BSON("x" << 100), BSON("x" << 200),
- BSON("x" << 150), BSON("x" << 200)));
- ASSERT_TRUE(rangeOverlaps(BSON("x" << 100), BSON("x" << 200),
- BSON("x" << 160), BSON("x" << 201)));
- ASSERT_TRUE(rangeOverlaps(BSON("x" << 100), BSON("x" << 200),
- BSON("x" << 170), BSON("x" << 220)));
-
- ASSERT_FALSE(rangeOverlaps(BSON("x" << 100), BSON("x" << 200),
- BSON("x" << 200), BSON("x" << 240)));
- }
-
- TEST(BSONRange, RangeIsSubsetOfOther) {
- ASSERT_TRUE(rangeOverlaps(BSON("x" << 100), BSON("x" << 200),
- BSON("x" << 70), BSON("x" << 300)));
- ASSERT_TRUE(rangeOverlaps(BSON("x" << 100), BSON("x" << 200),
- BSON("x" << 140), BSON("x" << 180)));
- }
-
- TEST(BSONRange, EqualRange) {
- ASSERT_TRUE(rangeOverlaps(BSON("x" << 100), BSON("x" << 200),
- BSON("x" << 100), BSON("x" << 200)));
- }
-
- TEST(RangeMap, RangeMapOverlap) {
-
- RangeMap rangeMap;
- rangeMap.insert( make_pair( BSON( "x" << 100 ), BSON( "x" << 200 ) ) );
- rangeMap.insert( make_pair( BSON( "x" << 200 ), BSON( "x" << 300 ) ) );
- rangeMap.insert( make_pair( BSON( "x" << 300 ), BSON( "x" << 400 ) ) );
-
- RangeVector overlap;
- getRangeMapOverlap( rangeMap, BSON( "x" << 50 ), BSON( "x" << 350 ), &overlap );
+using mongo::MINKEY;
+using mongo::MAXKEY;
+using mongo::rangeOverlaps;
+using mongo::rangeMapOverlaps;
+using mongo::RangeMap;
+using mongo::RangeVector;
+using std::make_pair;
+
+TEST(BSONRange, SmallerLowerRangeNonSubset) {
+ ASSERT_TRUE(
+ rangeOverlaps(BSON("x" << 100), BSON("x" << 200), BSON("x" << 50), BSON("x" << 200)));
+ ASSERT_TRUE(
+ rangeOverlaps(BSON("x" << 100), BSON("x" << 200), BSON("x" << 60), BSON("x" << 199)));
+
+ ASSERT_FALSE(
+ rangeOverlaps(BSON("x" << 100), BSON("x" << 200), BSON("x" << 70), BSON("x" << 99)));
+ ASSERT_FALSE(
+ rangeOverlaps(BSON("x" << 100), BSON("x" << 200), BSON("x" << 80), BSON("x" << 100)));
+}
- ASSERT( !overlap.empty() );
- ASSERT_EQUALS( overlap.size(), 3u );
- }
+TEST(BSONRange, BiggerUpperRangeNonSubset) {
+ ASSERT_TRUE(
+ rangeOverlaps(BSON("x" << 100), BSON("x" << 200), BSON("x" << 150), BSON("x" << 200)));
+ ASSERT_TRUE(
+ rangeOverlaps(BSON("x" << 100), BSON("x" << 200), BSON("x" << 160), BSON("x" << 201)));
+ ASSERT_TRUE(
+ rangeOverlaps(BSON("x" << 100), BSON("x" << 200), BSON("x" << 170), BSON("x" << 220)));
- TEST(RangeMap, RangeMapOverlapPartial) {
+ ASSERT_FALSE(
+ rangeOverlaps(BSON("x" << 100), BSON("x" << 200), BSON("x" << 200), BSON("x" << 240)));
+}
- RangeMap rangeMap;
- rangeMap.insert( make_pair( BSON( "x" << 100 ), BSON( "x" << 200 ) ) );
- rangeMap.insert( make_pair( BSON( "x" << 200 ), BSON( "x" << 300 ) ) );
+TEST(BSONRange, RangeIsSubsetOfOther) {
+ ASSERT_TRUE(
+ rangeOverlaps(BSON("x" << 100), BSON("x" << 200), BSON("x" << 70), BSON("x" << 300)));
+ ASSERT_TRUE(
+ rangeOverlaps(BSON("x" << 100), BSON("x" << 200), BSON("x" << 140), BSON("x" << 180)));
+}
- RangeVector overlap;
- getRangeMapOverlap( rangeMap, BSON( "x" << 150 ), BSON( "x" << 250 ), &overlap );
+TEST(BSONRange, EqualRange) {
+ ASSERT_TRUE(
+ rangeOverlaps(BSON("x" << 100), BSON("x" << 200), BSON("x" << 100), BSON("x" << 200)));
+}
- ASSERT( !overlap.empty() );
- ASSERT_EQUALS( overlap.size(), 2u );
- }
+TEST(RangeMap, RangeMapOverlap) {
+ RangeMap rangeMap;
+ rangeMap.insert(make_pair(BSON("x" << 100), BSON("x" << 200)));
+ rangeMap.insert(make_pair(BSON("x" << 200), BSON("x" << 300)));
+ rangeMap.insert(make_pair(BSON("x" << 300), BSON("x" << 400)));
- TEST(RangeMap, RangeMapOverlapInner) {
+ RangeVector overlap;
+ getRangeMapOverlap(rangeMap, BSON("x" << 50), BSON("x" << 350), &overlap);
- RangeMap rangeMap;
- rangeMap.insert( make_pair( BSON( "x" << 100 ), BSON( "x" << 200 ) ) );
+ ASSERT(!overlap.empty());
+ ASSERT_EQUALS(overlap.size(), 3u);
+}
- RangeVector overlap;
- getRangeMapOverlap( rangeMap, BSON( "x" << 125 ), BSON( "x" << 150 ), &overlap );
+TEST(RangeMap, RangeMapOverlapPartial) {
+ RangeMap rangeMap;
+ rangeMap.insert(make_pair(BSON("x" << 100), BSON("x" << 200)));
+ rangeMap.insert(make_pair(BSON("x" << 200), BSON("x" << 300)));
- ASSERT( !overlap.empty() );
- ASSERT_EQUALS( overlap.size(), 1u );
- }
+ RangeVector overlap;
+ getRangeMapOverlap(rangeMap, BSON("x" << 150), BSON("x" << 250), &overlap);
- TEST(RangeMap, RangeMapNoOverlap) {
+ ASSERT(!overlap.empty());
+ ASSERT_EQUALS(overlap.size(), 2u);
+}
- RangeMap rangeMap;
- rangeMap.insert( make_pair( BSON( "x" << 100 ), BSON( "x" << 200 ) ) );
- rangeMap.insert( make_pair( BSON( "x" << 300 ), BSON( "x" << 400 ) ) );
+TEST(RangeMap, RangeMapOverlapInner) {
+ RangeMap rangeMap;
+ rangeMap.insert(make_pair(BSON("x" << 100), BSON("x" << 200)));
- RangeVector overlap;
- getRangeMapOverlap( rangeMap, BSON( "x" << 200 ), BSON( "x" << 300 ), &overlap );
+ RangeVector overlap;
+ getRangeMapOverlap(rangeMap, BSON("x" << 125), BSON("x" << 150), &overlap);
- ASSERT( overlap.empty() );
- }
+ ASSERT(!overlap.empty());
+ ASSERT_EQUALS(overlap.size(), 1u);
+}
- TEST(RangeMap, RangeMapOverlaps) {
+TEST(RangeMap, RangeMapNoOverlap) {
+ RangeMap rangeMap;
+ rangeMap.insert(make_pair(BSON("x" << 100), BSON("x" << 200)));
+ rangeMap.insert(make_pair(BSON("x" << 300), BSON("x" << 400)));
- RangeMap rangeMap;
- rangeMap.insert( make_pair( BSON( "x" << 100 ), BSON( "x" << 200 ) ) );
+ RangeVector overlap;
+ getRangeMapOverlap(rangeMap, BSON("x" << 200), BSON("x" << 300), &overlap);
- ASSERT( rangeMapOverlaps( rangeMap, BSON( "x" << 100 ), BSON( "x" << 200 ) ) );
- ASSERT( rangeMapOverlaps( rangeMap, BSON( "x" << 99 ), BSON( "x" << 200 ) ) );
- ASSERT( rangeMapOverlaps( rangeMap, BSON( "x" << 100 ), BSON( "x" << 201 ) ) );
- ASSERT( rangeMapOverlaps( rangeMap, BSON( "x" << 100 ), BSON( "x" << 200 ) ) );
- ASSERT( !rangeMapOverlaps( rangeMap, BSON( "x" << 99 ), BSON( "x" << 100 ) ) );
- ASSERT( !rangeMapOverlaps( rangeMap, BSON( "x" << 200 ), BSON( "x" << 201 ) ) );
- }
+ ASSERT(overlap.empty());
+}
- TEST(RangeMap, RangeMapContains) {
+TEST(RangeMap, RangeMapOverlaps) {
+ RangeMap rangeMap;
+ rangeMap.insert(make_pair(BSON("x" << 100), BSON("x" << 200)));
- RangeMap rangeMap;
- rangeMap.insert( make_pair( BSON( "x" << 100 ), BSON( "x" << 200 ) ) );
+ ASSERT(rangeMapOverlaps(rangeMap, BSON("x" << 100), BSON("x" << 200)));
+ ASSERT(rangeMapOverlaps(rangeMap, BSON("x" << 99), BSON("x" << 200)));
+ ASSERT(rangeMapOverlaps(rangeMap, BSON("x" << 100), BSON("x" << 201)));
+ ASSERT(rangeMapOverlaps(rangeMap, BSON("x" << 100), BSON("x" << 200)));
+ ASSERT(!rangeMapOverlaps(rangeMap, BSON("x" << 99), BSON("x" << 100)));
+ ASSERT(!rangeMapOverlaps(rangeMap, BSON("x" << 200), BSON("x" << 201)));
+}
- ASSERT( rangeMapContains( rangeMap, BSON( "x" << 100 ), BSON( "x" << 200 ) ) );
- ASSERT( !rangeMapContains( rangeMap, BSON( "x" << 99 ), BSON( "x" << 200 ) ) );
- ASSERT( !rangeMapContains( rangeMap, BSON( "x" << 100 ), BSON( "x" << 201 ) ) );
- }
+TEST(RangeMap, RangeMapContains) {
+ RangeMap rangeMap;
+ rangeMap.insert(make_pair(BSON("x" << 100), BSON("x" << 200)));
- TEST(RangeMap, RangeMapContainsMinMax) {
+ ASSERT(rangeMapContains(rangeMap, BSON("x" << 100), BSON("x" << 200)));
+ ASSERT(!rangeMapContains(rangeMap, BSON("x" << 99), BSON("x" << 200)));
+ ASSERT(!rangeMapContains(rangeMap, BSON("x" << 100), BSON("x" << 201)));
+}
- RangeMap rangeMap;
- rangeMap.insert( make_pair( BSON( "x" << MINKEY ), BSON( "x" << MAXKEY ) ) );
+TEST(RangeMap, RangeMapContainsMinMax) {
+ RangeMap rangeMap;
+ rangeMap.insert(make_pair(BSON("x" << MINKEY), BSON("x" << MAXKEY)));
- ASSERT( rangeMapContains( rangeMap, BSON( "x" << MINKEY ), BSON( "x" << MAXKEY ) ) );
- ASSERT( !rangeMapContains( rangeMap, BSON( "x" << 1 ), BSON( "x" << MAXKEY ) ) );
- ASSERT( !rangeMapContains( rangeMap, BSON( "x" << MINKEY ), BSON( "x" << 1 ) ) );
- }
+ ASSERT(rangeMapContains(rangeMap, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
+ ASSERT(!rangeMapContains(rangeMap, BSON("x" << 1), BSON("x" << MAXKEY)));
+ ASSERT(!rangeMapContains(rangeMap, BSON("x" << MINKEY), BSON("x" << 1)));
+}
}
diff --git a/src/mongo/db/range_deleter.cpp b/src/mongo/db/range_deleter.cpp
index 771aa9074f7..036f8e7c4d4 100644
--- a/src/mongo/db/range_deleter.cpp
+++ b/src/mongo/db/range_deleter.cpp
@@ -53,349 +53,300 @@ using std::pair;
using std::string;
namespace {
- const long int kNotEmptyTimeoutMillis = 200;
- const long long int kMaxCursorCheckIntervalMillis = 500;
- const size_t kDeleteJobsHistory = 10; // entries
-
- /**
- * Removes an element from the container that holds a pointer type, and deletes the
- * pointer as well. Returns true if the element was found.
- */
- template <typename ContainerType, typename ContainerElementType>
- bool deletePtrElement(ContainerType* container, ContainerElementType elem) {
- typename ContainerType::iterator iter = container->find(elem);
-
- if (iter == container->end()) {
- return false;
- }
+const long int kNotEmptyTimeoutMillis = 200;
+const long long int kMaxCursorCheckIntervalMillis = 500;
+const size_t kDeleteJobsHistory = 10; // entries
- delete *iter;
- container->erase(iter);
- return true;
+/**
+ * Removes an element from the container that holds a pointer type, and deletes the
+ * pointer as well. Returns true if the element was found.
+ */
+template <typename ContainerType, typename ContainerElementType>
+bool deletePtrElement(ContainerType* container, ContainerElementType elem) {
+ typename ContainerType::iterator iter = container->find(elem);
+
+ if (iter == container->end()) {
+ return false;
}
+
+ delete *iter;
+ container->erase(iter);
+ return true;
+}
}
namespace mongo {
- namespace duration = boost::posix_time;
-
- static void logCursorsWaiting(RangeDeleteEntry* entry) {
+namespace duration = boost::posix_time;
- // We always log the first cursors waiting message (so we have cursor ids in the logs).
- // After 15 minutes (the cursor timeout period), we start logging additional messages at
- // a 1 minute interval.
- static const auto kLogCursorsThreshold = stdx::chrono::minutes{15};
- static const auto kLogCursorsInterval = stdx::chrono::minutes{1};
+static void logCursorsWaiting(RangeDeleteEntry* entry) {
+ // We always log the first cursors waiting message (so we have cursor ids in the logs).
+ // After 15 minutes (the cursor timeout period), we start logging additional messages at
+ // a 1 minute interval.
+ static const auto kLogCursorsThreshold = stdx::chrono::minutes{15};
+ static const auto kLogCursorsInterval = stdx::chrono::minutes{1};
- Date_t currentTime = jsTime();
- Milliseconds elapsedMillisSinceQueued{0};
+ Date_t currentTime = jsTime();
+ Milliseconds elapsedMillisSinceQueued{0};
- // We always log the first message when lastLoggedTime == 0
- if (entry->lastLoggedTS != Date_t()) {
+ // We always log the first message when lastLoggedTime == 0
+ if (entry->lastLoggedTS != Date_t()) {
+ if (currentTime > entry->stats.queueStartTS)
+ elapsedMillisSinceQueued = currentTime - entry->stats.queueStartTS;
- if (currentTime > entry->stats.queueStartTS)
- elapsedMillisSinceQueued = currentTime - entry->stats.queueStartTS;
-
- // Not logging, threshold not passed
- if (elapsedMillisSinceQueued < kLogCursorsThreshold)
- return;
+ // Not logging, threshold not passed
+ if (elapsedMillisSinceQueued < kLogCursorsThreshold)
+ return;
- Milliseconds elapsedMillisSinceLog{0};
- if (currentTime > entry->lastLoggedTS)
- elapsedMillisSinceLog = currentTime - entry->lastLoggedTS;
+ Milliseconds elapsedMillisSinceLog{0};
+ if (currentTime > entry->lastLoggedTS)
+ elapsedMillisSinceLog = currentTime - entry->lastLoggedTS;
- // Not logging, logged a short time ago
- if (elapsedMillisSinceLog < kLogCursorsInterval)
- return;
- }
-
- str::stream cursorList;
- for (std::set<CursorId>::const_iterator it = entry->cursorsToWait.begin();
- it != entry->cursorsToWait.end(); ++it) {
- if (it != entry->cursorsToWait.begin())
- cursorList << ", ";
- cursorList << *it;
- }
-
- log() << "waiting for open cursors before removing range "
- << "[" << entry->options.range.minKey << ", " << entry->options.range.maxKey << ") "
- << "in " << entry->options.range.ns
- << (entry->lastLoggedTS == Date_t() ?
- string("") :
- string(str::stream() << ", elapsed secs: " <<
- durationCount<Seconds>(elapsedMillisSinceQueued)))
- << ", cursor ids: [" << string(cursorList) << "]";
+ // Not logging, logged a short time ago
+ if (elapsedMillisSinceLog < kLogCursorsInterval)
+ return;
+ }
- entry->lastLoggedTS = currentTime;
+ str::stream cursorList;
+ for (std::set<CursorId>::const_iterator it = entry->cursorsToWait.begin();
+ it != entry->cursorsToWait.end();
+ ++it) {
+ if (it != entry->cursorsToWait.begin())
+ cursorList << ", ";
+ cursorList << *it;
}
- struct RangeDeleter::NSMinMax {
- NSMinMax(std::string ns, const BSONObj min, const BSONObj max):
- ns(ns), min(min), max(max) {
- }
+ log() << "waiting for open cursors before removing range "
+ << "[" << entry->options.range.minKey << ", " << entry->options.range.maxKey << ") "
+ << "in " << entry->options.range.ns
+ << (entry->lastLoggedTS == Date_t()
+ ? string("")
+ : string(str::stream() << ", elapsed secs: "
+ << durationCount<Seconds>(elapsedMillisSinceQueued)))
+ << ", cursor ids: [" << string(cursorList) << "]";
- std::string ns;
+ entry->lastLoggedTS = currentTime;
+}
- // Inclusive lower range.
- BSONObj min;
+struct RangeDeleter::NSMinMax {
+ NSMinMax(std::string ns, const BSONObj min, const BSONObj max) : ns(ns), min(min), max(max) {}
- // Exclusive upper range.
- BSONObj max;
- };
+ std::string ns;
- bool RangeDeleter::NSMinMaxCmp::operator()(
- const NSMinMax* lhs, const NSMinMax* rhs) const {
- const int nsComp = lhs->ns.compare(rhs->ns);
+ // Inclusive lower range.
+ BSONObj min;
- if (nsComp < 0) {
- return true;
- }
+ // Exclusive upper range.
+ BSONObj max;
+};
- if (nsComp > 0) {
- return false;
- }
+bool RangeDeleter::NSMinMaxCmp::operator()(const NSMinMax* lhs, const NSMinMax* rhs) const {
+ const int nsComp = lhs->ns.compare(rhs->ns);
- return compareRanges(lhs->min, lhs->max, rhs->min, rhs->max) < 0;
+ if (nsComp < 0) {
+ return true;
}
- RangeDeleter::RangeDeleter(RangeDeleterEnv* env):
- _env(env), // ownership xfer
- _stopRequested(false),
- _deletesInProgress(0) {
+ if (nsComp > 0) {
+ return false;
}
- RangeDeleter::~RangeDeleter() {
- for(TaskList::iterator it = _notReadyQueue.begin();
- it != _notReadyQueue.end();
- ++it) {
- delete (*it);
- }
-
- for(TaskList::iterator it = _taskQueue.begin();
- it != _taskQueue.end();
- ++it) {
- delete (*it);
- }
-
- for(NSMinMaxSet::iterator it = _deleteSet.begin();
- it != _deleteSet.end();
- ++it) {
- delete (*it);
- }
+ return compareRanges(lhs->min, lhs->max, rhs->min, rhs->max) < 0;
+}
- for(std::deque<DeleteJobStats*>::iterator it = _statsHistory.begin();
- it != _statsHistory.end();
- ++it) {
- delete (*it);
- }
+RangeDeleter::RangeDeleter(RangeDeleterEnv* env)
+ : _env(env), // ownership xfer
+ _stopRequested(false),
+ _deletesInProgress(0) {}
+RangeDeleter::~RangeDeleter() {
+ for (TaskList::iterator it = _notReadyQueue.begin(); it != _notReadyQueue.end(); ++it) {
+ delete (*it);
}
- void RangeDeleter::startWorkers() {
- if (!_worker) {
- _worker.reset(new stdx::thread(stdx::bind(&RangeDeleter::doWork, this)));
- }
+ for (TaskList::iterator it = _taskQueue.begin(); it != _taskQueue.end(); ++it) {
+ delete (*it);
}
- void RangeDeleter::stopWorkers() {
- {
- stdx::lock_guard<stdx::mutex> sl(_stopMutex);
- _stopRequested = true;
- }
+ for (NSMinMaxSet::iterator it = _deleteSet.begin(); it != _deleteSet.end(); ++it) {
+ delete (*it);
+ }
- if (_worker) {
- _worker->join();
- }
+ for (std::deque<DeleteJobStats*>::iterator it = _statsHistory.begin();
+ it != _statsHistory.end();
+ ++it) {
+ delete (*it);
+ }
+}
- stdx::unique_lock<stdx::mutex> sl(_queueMutex);
- while (_deletesInProgress > 0) {
- _nothingInProgressCV.wait(sl);
- }
+void RangeDeleter::startWorkers() {
+ if (!_worker) {
+ _worker.reset(new stdx::thread(stdx::bind(&RangeDeleter::doWork, this)));
}
+}
- bool RangeDeleter::queueDelete(OperationContext* txn,
- const RangeDeleterOptions& options,
- Notification* notifyDone,
- std::string* errMsg) {
- string dummy;
- if (errMsg == NULL) errMsg = &dummy;
+void RangeDeleter::stopWorkers() {
+ {
+ stdx::lock_guard<stdx::mutex> sl(_stopMutex);
+ _stopRequested = true;
+ }
- const string& ns(options.range.ns);
- const BSONObj& min(options.range.minKey);
- const BSONObj& max(options.range.maxKey);
+ if (_worker) {
+ _worker->join();
+ }
- unique_ptr<RangeDeleteEntry> toDelete(
- new RangeDeleteEntry(options));
- toDelete->notifyDone = notifyDone;
+ stdx::unique_lock<stdx::mutex> sl(_queueMutex);
+ while (_deletesInProgress > 0) {
+ _nothingInProgressCV.wait(sl);
+ }
+}
- {
- stdx::lock_guard<stdx::mutex> sl(_queueMutex);
- if (_stopRequested) {
- *errMsg = "deleter is already stopped.";
- return false;
- }
+bool RangeDeleter::queueDelete(OperationContext* txn,
+ const RangeDeleterOptions& options,
+ Notification* notifyDone,
+ std::string* errMsg) {
+ string dummy;
+ if (errMsg == NULL)
+ errMsg = &dummy;
- if (!canEnqueue_inlock(ns, min, max, errMsg)) {
- return false;
- }
+ const string& ns(options.range.ns);
+ const BSONObj& min(options.range.minKey);
+ const BSONObj& max(options.range.maxKey);
- _deleteSet.insert(new NSMinMax(ns, min.getOwned(), max.getOwned()));
- }
+ unique_ptr<RangeDeleteEntry> toDelete(new RangeDeleteEntry(options));
+ toDelete->notifyDone = notifyDone;
- if (options.waitForOpenCursors) {
- _env->getCursorIds(txn, ns, &toDelete->cursorsToWait);
+ {
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
+ if (_stopRequested) {
+ *errMsg = "deleter is already stopped.";
+ return false;
}
- toDelete->stats.queueStartTS = jsTime();
-
- if (!toDelete->cursorsToWait.empty())
- logCursorsWaiting(toDelete.get());
-
- {
- stdx::lock_guard<stdx::mutex> sl(_queueMutex);
-
- if (toDelete->cursorsToWait.empty()) {
- toDelete->stats.queueEndTS = jsTime();
- _taskQueue.push_back(toDelete.release());
- _taskQueueNotEmptyCV.notify_one();
- }
- else {
- _notReadyQueue.push_back(toDelete.release());
- }
+ if (!canEnqueue_inlock(ns, min, max, errMsg)) {
+ return false;
}
- return true;
+ _deleteSet.insert(new NSMinMax(ns, min.getOwned(), max.getOwned()));
}
-namespace {
- const int kWTimeoutMillis = 60 * 60 * 1000;
-
- bool _waitForMajority(OperationContext* txn, std::string* errMsg) {
- const WriteConcernOptions writeConcern(WriteConcernOptions::kMajority,
- WriteConcernOptions::NONE,
- kWTimeoutMillis);
-
- repl::ReplicationCoordinator::StatusAndDuration replStatus =
- repl::getGlobalReplicationCoordinator()->awaitReplicationOfLastOpForClient(
- txn, writeConcern);
- Milliseconds elapsedTime = replStatus.duration;
- if (replStatus.status.code() == ErrorCodes::ExceededTimeLimit) {
- *errMsg = str::stream() << "rangeDeleter timed out after "
- << durationCount<Seconds>(elapsedTime)
- << " seconds while waiting"
- " for deletions to be replicated to majority nodes";
- log() << *errMsg;
- }
- else if (replStatus.status.code() == ErrorCodes::NotMaster) {
- *errMsg = str::stream() << "rangeDeleter no longer PRIMARY after "
- << durationCount<Seconds>(elapsedTime)
- << " seconds while waiting"
- " for deletions to be replicated to majority nodes";
- }
- else {
- LOG(elapsedTime < Seconds(30) ? 1 : 0)
- << "rangeDeleter took " << durationCount<Seconds>(elapsedTime) << " seconds "
- << " waiting for deletes to be replicated to majority nodes";
-
- fassert(18512, replStatus.status);
- }
-
- return replStatus.status.isOK();
+ if (options.waitForOpenCursors) {
+ _env->getCursorIds(txn, ns, &toDelete->cursorsToWait);
}
-}
-
- bool RangeDeleter::deleteNow(OperationContext* txn,
- const RangeDeleterOptions& options,
- string* errMsg) {
- if (stopRequested()) {
- *errMsg = "deleter is already stopped.";
- return false;
- }
- string dummy;
- if (errMsg == NULL) errMsg = &dummy;
+ toDelete->stats.queueStartTS = jsTime();
- const string& ns(options.range.ns);
- const BSONObj& min(options.range.minKey);
- const BSONObj& max(options.range.maxKey);
+ if (!toDelete->cursorsToWait.empty())
+ logCursorsWaiting(toDelete.get());
- NSMinMax deleteRange(ns, min, max);
- {
- stdx::lock_guard<stdx::mutex> sl(_queueMutex);
- if (!canEnqueue_inlock(ns, min, max, errMsg)) {
- return false;
- }
-
- _deleteSet.insert(&deleteRange);
+ {
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
- // Note: count for pending deletes is an integral part of the shutdown story.
- // Therefore, to simplify things, there is no "pending" state for deletes in
- // deleteNow, the state transition is simply inProgress -> done.
- _deletesInProgress++;
+ if (toDelete->cursorsToWait.empty()) {
+ toDelete->stats.queueEndTS = jsTime();
+ _taskQueue.push_back(toDelete.release());
+ _taskQueueNotEmptyCV.notify_one();
+ } else {
+ _notReadyQueue.push_back(toDelete.release());
}
+ }
- set<CursorId> cursorsToWait;
- if (options.waitForOpenCursors) {
- _env->getCursorIds(txn, ns, &cursorsToWait);
- }
+ return true;
+}
- long long checkIntervalMillis = 5;
+namespace {
+const int kWTimeoutMillis = 60 * 60 * 1000;
+
+bool _waitForMajority(OperationContext* txn, std::string* errMsg) {
+ const WriteConcernOptions writeConcern(
+ WriteConcernOptions::kMajority, WriteConcernOptions::NONE, kWTimeoutMillis);
+
+ repl::ReplicationCoordinator::StatusAndDuration replStatus =
+ repl::getGlobalReplicationCoordinator()->awaitReplicationOfLastOpForClient(txn,
+ writeConcern);
+ Milliseconds elapsedTime = replStatus.duration;
+ if (replStatus.status.code() == ErrorCodes::ExceededTimeLimit) {
+ *errMsg = str::stream() << "rangeDeleter timed out after "
+ << durationCount<Seconds>(elapsedTime)
+ << " seconds while waiting"
+ " for deletions to be replicated to majority nodes";
+ log() << *errMsg;
+ } else if (replStatus.status.code() == ErrorCodes::NotMaster) {
+ *errMsg = str::stream() << "rangeDeleter no longer PRIMARY after "
+ << durationCount<Seconds>(elapsedTime)
+ << " seconds while waiting"
+ " for deletions to be replicated to majority nodes";
+ } else {
+ LOG(elapsedTime < Seconds(30) ? 1 : 0)
+ << "rangeDeleter took " << durationCount<Seconds>(elapsedTime) << " seconds "
+ << " waiting for deletes to be replicated to majority nodes";
+
+ fassert(18512, replStatus.status);
+ }
- RangeDeleteEntry taskDetails(options);
- taskDetails.stats.queueStartTS = jsTime();
+ return replStatus.status.isOK();
+}
+}
- for (; !cursorsToWait.empty(); sleepmillis(checkIntervalMillis)) {
+bool RangeDeleter::deleteNow(OperationContext* txn,
+ const RangeDeleterOptions& options,
+ string* errMsg) {
+ if (stopRequested()) {
+ *errMsg = "deleter is already stopped.";
+ return false;
+ }
- logCursorsWaiting(&taskDetails);
+ string dummy;
+ if (errMsg == NULL)
+ errMsg = &dummy;
- set<CursorId> cursorsNow;
- _env->getCursorIds(txn, ns, &cursorsNow);
+ const string& ns(options.range.ns);
+ const BSONObj& min(options.range.minKey);
+ const BSONObj& max(options.range.maxKey);
- set<CursorId> cursorsLeft;
- std::set_intersection(cursorsToWait.begin(),
- cursorsToWait.end(),
- cursorsNow.begin(),
- cursorsNow.end(),
- std::inserter(cursorsLeft, cursorsLeft.end()));
+ NSMinMax deleteRange(ns, min, max);
+ {
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
+ if (!canEnqueue_inlock(ns, min, max, errMsg)) {
+ return false;
+ }
- cursorsToWait.swap(cursorsLeft);
+ _deleteSet.insert(&deleteRange);
- if (stopRequested()) {
- *errMsg = "deleter was stopped.";
+ // Note: count for pending deletes is an integral part of the shutdown story.
+ // Therefore, to simplify things, there is no "pending" state for deletes in
+ // deleteNow, the state transition is simply inProgress -> done.
+ _deletesInProgress++;
+ }
- stdx::lock_guard<stdx::mutex> sl(_queueMutex);
- _deleteSet.erase(&deleteRange);
+ set<CursorId> cursorsToWait;
+ if (options.waitForOpenCursors) {
+ _env->getCursorIds(txn, ns, &cursorsToWait);
+ }
- _deletesInProgress--;
+ long long checkIntervalMillis = 5;
- if (_deletesInProgress == 0) {
- _nothingInProgressCV.notify_one();
- }
+ RangeDeleteEntry taskDetails(options);
+ taskDetails.stats.queueStartTS = jsTime();
- return false;
- }
+ for (; !cursorsToWait.empty(); sleepmillis(checkIntervalMillis)) {
+ logCursorsWaiting(&taskDetails);
- if (checkIntervalMillis < kMaxCursorCheckIntervalMillis) {
- checkIntervalMillis *= 2;
- }
- }
- taskDetails.stats.queueEndTS = jsTime();
+ set<CursorId> cursorsNow;
+ _env->getCursorIds(txn, ns, &cursorsNow);
- taskDetails.stats.deleteStartTS = jsTime();
- bool result = _env->deleteRange(txn,
- taskDetails,
- &taskDetails.stats.deletedDocCount,
- errMsg);
+ set<CursorId> cursorsLeft;
+ std::set_intersection(cursorsToWait.begin(),
+ cursorsToWait.end(),
+ cursorsNow.begin(),
+ cursorsNow.end(),
+ std::inserter(cursorsLeft, cursorsLeft.end()));
- taskDetails.stats.deleteEndTS = jsTime();
+ cursorsToWait.swap(cursorsLeft);
- if (result) {
- taskDetails.stats.waitForReplStartTS = jsTime();
- result = _waitForMajority(txn, errMsg);
- taskDetails.stats.waitForReplEndTS = jsTime();
- }
+ if (stopRequested()) {
+ *errMsg = "deleter was stopped.";
- {
stdx::lock_guard<stdx::mutex> sl(_queueMutex);
_deleteSet.erase(&deleteRange);
@@ -404,229 +355,244 @@ namespace {
if (_deletesInProgress == 0) {
_nothingInProgressCV.notify_one();
}
+
+ return false;
}
- recordDelStats(new DeleteJobStats(taskDetails.stats));
- return result;
+ if (checkIntervalMillis < kMaxCursorCheckIntervalMillis) {
+ checkIntervalMillis *= 2;
+ }
}
+ taskDetails.stats.queueEndTS = jsTime();
- void RangeDeleter::getStatsHistory(std::vector<DeleteJobStats*>* stats) const {
- stats->clear();
- stats->reserve(kDeleteJobsHistory);
+ taskDetails.stats.deleteStartTS = jsTime();
+ bool result = _env->deleteRange(txn, taskDetails, &taskDetails.stats.deletedDocCount, errMsg);
- stdx::lock_guard<stdx::mutex> sl(_statsHistoryMutex);
- for (std::deque<DeleteJobStats*>::const_iterator it = _statsHistory.begin();
- it != _statsHistory.end(); ++it) {
- stats->push_back(new DeleteJobStats(**it));
- }
+ taskDetails.stats.deleteEndTS = jsTime();
+
+ if (result) {
+ taskDetails.stats.waitForReplStartTS = jsTime();
+ result = _waitForMajority(txn, errMsg);
+ taskDetails.stats.waitForReplEndTS = jsTime();
}
- BSONObj RangeDeleter::toBSON() const {
+ {
stdx::lock_guard<stdx::mutex> sl(_queueMutex);
+ _deleteSet.erase(&deleteRange);
- BSONObjBuilder builder;
+ _deletesInProgress--;
- BSONArrayBuilder notReadyBuilder(builder.subarrayStart("notReady"));
- for (TaskList::const_iterator iter = _notReadyQueue.begin();
- iter != _notReadyQueue.end(); ++iter) {
- notReadyBuilder.append((*iter)->toBSON());
+ if (_deletesInProgress == 0) {
+ _nothingInProgressCV.notify_one();
}
- notReadyBuilder.doneFast();
+ }
- BSONArrayBuilder readyBuilder(builder.subarrayStart("ready"));
- for (TaskList::const_iterator iter = _taskQueue.begin();
- iter != _taskQueue.end(); ++iter) {
- readyBuilder.append((*iter)->toBSON());
- }
- readyBuilder.doneFast();
+ recordDelStats(new DeleteJobStats(taskDetails.stats));
+ return result;
+}
+
+void RangeDeleter::getStatsHistory(std::vector<DeleteJobStats*>* stats) const {
+ stats->clear();
+ stats->reserve(kDeleteJobsHistory);
- return builder.obj();
+ stdx::lock_guard<stdx::mutex> sl(_statsHistoryMutex);
+ for (std::deque<DeleteJobStats*>::const_iterator it = _statsHistory.begin();
+ it != _statsHistory.end();
+ ++it) {
+ stats->push_back(new DeleteJobStats(**it));
}
+}
- void RangeDeleter::doWork() {
- Client::initThreadIfNotAlready("RangeDeleter");
- Client* client = &cc();
+BSONObj RangeDeleter::toBSON() const {
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
- while (!inShutdown() && !stopRequested()) {
- string errMsg;
+ BSONObjBuilder builder;
- RangeDeleteEntry* nextTask = NULL;
+ BSONArrayBuilder notReadyBuilder(builder.subarrayStart("notReady"));
+ for (TaskList::const_iterator iter = _notReadyQueue.begin(); iter != _notReadyQueue.end();
+ ++iter) {
+ notReadyBuilder.append((*iter)->toBSON());
+ }
+ notReadyBuilder.doneFast();
- {
- stdx::unique_lock<stdx::mutex> sl(_queueMutex);
- while (_taskQueue.empty()) {
- _taskQueueNotEmptyCV.timed_wait(
- sl, duration::milliseconds(kNotEmptyTimeoutMillis));
+ BSONArrayBuilder readyBuilder(builder.subarrayStart("ready"));
+ for (TaskList::const_iterator iter = _taskQueue.begin(); iter != _taskQueue.end(); ++iter) {
+ readyBuilder.append((*iter)->toBSON());
+ }
+ readyBuilder.doneFast();
- if (stopRequested()) {
- log() << "stopping range deleter worker" << endl;
- return;
- }
+ return builder.obj();
+}
- if (_taskQueue.empty()) {
- // Try to check if some deletes are ready and move them to the
- // ready queue.
-
- TaskList::iterator iter = _notReadyQueue.begin();
- while (iter != _notReadyQueue.end()) {
- RangeDeleteEntry* entry = *iter;
-
- set<CursorId> cursorsNow;
- if (entry->options.waitForOpenCursors) {
- auto txn = client->makeOperationContext();
- _env->getCursorIds(txn.get(),
- entry->options.range.ns,
- &cursorsNow);
- }
-
- set<CursorId> cursorsLeft;
- std::set_intersection(entry->cursorsToWait.begin(),
- entry->cursorsToWait.end(),
- cursorsNow.begin(),
- cursorsNow.end(),
- std::inserter(cursorsLeft,
- cursorsLeft.end()));
-
- entry->cursorsToWait.swap(cursorsLeft);
-
- if (entry->cursorsToWait.empty()) {
- (*iter)->stats.queueEndTS = jsTime();
- _taskQueue.push_back(*iter);
- _taskQueueNotEmptyCV.notify_one();
- iter = _notReadyQueue.erase(iter);
- }
- else {
- logCursorsWaiting(entry);
- ++iter;
- }
- }
- }
- }
+void RangeDeleter::doWork() {
+ Client::initThreadIfNotAlready("RangeDeleter");
+ Client* client = &cc();
+
+ while (!inShutdown() && !stopRequested()) {
+ string errMsg;
+
+ RangeDeleteEntry* nextTask = NULL;
+
+ {
+ stdx::unique_lock<stdx::mutex> sl(_queueMutex);
+ while (_taskQueue.empty()) {
+ _taskQueueNotEmptyCV.timed_wait(sl, duration::milliseconds(kNotEmptyTimeoutMillis));
if (stopRequested()) {
log() << "stopping range deleter worker" << endl;
return;
}
- nextTask = _taskQueue.front();
- _taskQueue.pop_front();
+ if (_taskQueue.empty()) {
+ // Try to check if some deletes are ready and move them to the
+ // ready queue.
- _deletesInProgress++;
- }
+ TaskList::iterator iter = _notReadyQueue.begin();
+ while (iter != _notReadyQueue.end()) {
+ RangeDeleteEntry* entry = *iter;
- {
- auto txn = client->makeOperationContext();
- nextTask->stats.deleteStartTS = jsTime();
- bool delResult = _env->deleteRange(txn.get(),
- *nextTask,
- &nextTask->stats.deletedDocCount,
- &errMsg);
- nextTask->stats.deleteEndTS = jsTime();
-
- if (delResult) {
- nextTask->stats.waitForReplStartTS = jsTime();
+ set<CursorId> cursorsNow;
+ if (entry->options.waitForOpenCursors) {
+ auto txn = client->makeOperationContext();
+ _env->getCursorIds(txn.get(), entry->options.range.ns, &cursorsNow);
+ }
- if (!_waitForMajority(txn.get(), &errMsg)) {
- warning() << "Error encountered while waiting for replication: " << errMsg;
+ set<CursorId> cursorsLeft;
+ std::set_intersection(entry->cursorsToWait.begin(),
+ entry->cursorsToWait.end(),
+ cursorsNow.begin(),
+ cursorsNow.end(),
+ std::inserter(cursorsLeft, cursorsLeft.end()));
+
+ entry->cursorsToWait.swap(cursorsLeft);
+
+ if (entry->cursorsToWait.empty()) {
+ (*iter)->stats.queueEndTS = jsTime();
+ _taskQueue.push_back(*iter);
+ _taskQueueNotEmptyCV.notify_one();
+ iter = _notReadyQueue.erase(iter);
+ } else {
+ logCursorsWaiting(entry);
+ ++iter;
+ }
}
-
- nextTask->stats.waitForReplEndTS = jsTime();
- }
- else {
- warning() << "Error encountered while trying to delete range: "
- << errMsg << endl;
}
}
- {
- stdx::lock_guard<stdx::mutex> sl(_queueMutex);
+ if (stopRequested()) {
+ log() << "stopping range deleter worker" << endl;
+ return;
+ }
- NSMinMax setEntry(nextTask->options.range.ns,
- nextTask->options.range.minKey,
- nextTask->options.range.maxKey);
- deletePtrElement(&_deleteSet, &setEntry);
- _deletesInProgress--;
+ nextTask = _taskQueue.front();
+ _taskQueue.pop_front();
+
+ _deletesInProgress++;
+ }
+
+ {
+ auto txn = client->makeOperationContext();
+ nextTask->stats.deleteStartTS = jsTime();
+ bool delResult =
+ _env->deleteRange(txn.get(), *nextTask, &nextTask->stats.deletedDocCount, &errMsg);
+ nextTask->stats.deleteEndTS = jsTime();
- if (nextTask->notifyDone) {
- nextTask->notifyDone->notifyOne();
+ if (delResult) {
+ nextTask->stats.waitForReplStartTS = jsTime();
+
+ if (!_waitForMajority(txn.get(), &errMsg)) {
+ warning() << "Error encountered while waiting for replication: " << errMsg;
}
- }
- recordDelStats(new DeleteJobStats(nextTask->stats));
- delete nextTask;
- nextTask = NULL;
+ nextTask->stats.waitForReplEndTS = jsTime();
+ } else {
+ warning() << "Error encountered while trying to delete range: " << errMsg << endl;
+ }
}
- }
- bool RangeDeleter::canEnqueue_inlock(StringData ns,
- const BSONObj& min,
- const BSONObj& max,
- string* errMsg) const {
+ {
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
+
+ NSMinMax setEntry(nextTask->options.range.ns,
+ nextTask->options.range.minKey,
+ nextTask->options.range.maxKey);
+ deletePtrElement(&_deleteSet, &setEntry);
+ _deletesInProgress--;
- NSMinMax toDelete(ns.toString(), min, max);
- if (_deleteSet.count(&toDelete) > 0) {
- *errMsg = str::stream() << "ns: " << ns << ", min: " << min << ", max: " << max
- << " is already being processed for deletion.";
- return false;
+ if (nextTask->notifyDone) {
+ nextTask->notifyDone->notifyOne();
+ }
}
- return true;
+ recordDelStats(new DeleteJobStats(nextTask->stats));
+ delete nextTask;
+ nextTask = NULL;
}
+}
- bool RangeDeleter::stopRequested() const {
- stdx::lock_guard<stdx::mutex> sl(_stopMutex);
- return _stopRequested;
+bool RangeDeleter::canEnqueue_inlock(StringData ns,
+ const BSONObj& min,
+ const BSONObj& max,
+ string* errMsg) const {
+ NSMinMax toDelete(ns.toString(), min, max);
+ if (_deleteSet.count(&toDelete) > 0) {
+ *errMsg = str::stream() << "ns: " << ns << ", min: " << min << ", max: " << max
+ << " is already being processed for deletion.";
+ return false;
}
- size_t RangeDeleter::getTotalDeletes() const {
- stdx::lock_guard<stdx::mutex> sl(_queueMutex);
- return _deleteSet.size();
- }
+ return true;
+}
- size_t RangeDeleter::getPendingDeletes() const {
- stdx::lock_guard<stdx::mutex> sl(_queueMutex);
- return _notReadyQueue.size() + _taskQueue.size();
- }
+bool RangeDeleter::stopRequested() const {
+ stdx::lock_guard<stdx::mutex> sl(_stopMutex);
+ return _stopRequested;
+}
- size_t RangeDeleter::getDeletesInProgress() const {
- stdx::lock_guard<stdx::mutex> sl(_queueMutex);
- return _deletesInProgress;
- }
+size_t RangeDeleter::getTotalDeletes() const {
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
+ return _deleteSet.size();
+}
- void RangeDeleter::recordDelStats(DeleteJobStats* newStat) {
- stdx::lock_guard<stdx::mutex> sl(_statsHistoryMutex);
- if (_statsHistory.size() == kDeleteJobsHistory) {
- delete _statsHistory.front();
- _statsHistory.pop_front();
- }
+size_t RangeDeleter::getPendingDeletes() const {
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
+ return _notReadyQueue.size() + _taskQueue.size();
+}
+
+size_t RangeDeleter::getDeletesInProgress() const {
+ stdx::lock_guard<stdx::mutex> sl(_queueMutex);
+ return _deletesInProgress;
+}
- _statsHistory.push_back(newStat);
+void RangeDeleter::recordDelStats(DeleteJobStats* newStat) {
+ stdx::lock_guard<stdx::mutex> sl(_statsHistoryMutex);
+ if (_statsHistory.size() == kDeleteJobsHistory) {
+ delete _statsHistory.front();
+ _statsHistory.pop_front();
}
- RangeDeleteEntry::RangeDeleteEntry(const RangeDeleterOptions& options)
- : options(options), notifyDone(NULL) {}
+ _statsHistory.push_back(newStat);
+}
- BSONObj RangeDeleteEntry::toBSON() const {
- BSONObjBuilder builder;
- builder.append("ns", options.range.ns);
- builder.append("min", options.range.minKey);
- builder.append("max", options.range.maxKey);
- BSONArrayBuilder cursorBuilder(builder.subarrayStart("cursors"));
+RangeDeleteEntry::RangeDeleteEntry(const RangeDeleterOptions& options)
+ : options(options), notifyDone(NULL) {}
- for (std::set<CursorId>::const_iterator it = cursorsToWait.begin();
- it != cursorsToWait.end(); ++it) {
- cursorBuilder.append((long long)*it);
- }
- cursorBuilder.doneFast();
+BSONObj RangeDeleteEntry::toBSON() const {
+ BSONObjBuilder builder;
+ builder.append("ns", options.range.ns);
+ builder.append("min", options.range.minKey);
+ builder.append("max", options.range.maxKey);
+ BSONArrayBuilder cursorBuilder(builder.subarrayStart("cursors"));
- return builder.done().copy();
+ for (std::set<CursorId>::const_iterator it = cursorsToWait.begin(); it != cursorsToWait.end();
+ ++it) {
+ cursorBuilder.append((long long)*it);
}
+ cursorBuilder.doneFast();
- RangeDeleterOptions::RangeDeleterOptions(const KeyRange& range)
- : range(range),
- fromMigrate(false),
- onlyRemoveOrphanedDocs(false),
- waitForOpenCursors(false) {
- }
+ return builder.done().copy();
+}
+RangeDeleterOptions::RangeDeleterOptions(const KeyRange& range)
+ : range(range), fromMigrate(false), onlyRemoveOrphanedDocs(false), waitForOpenCursors(false) {}
}
diff --git a/src/mongo/db/range_deleter.h b/src/mongo/db/range_deleter.h
index eaf8fa35e1d..f58625f8083 100644
--- a/src/mongo/db/range_deleter.h
+++ b/src/mongo/db/range_deleter.h
@@ -47,289 +47,286 @@
namespace mongo {
- class OperationContext;
- struct DeleteJobStats;
- struct RangeDeleteEntry;
- struct RangeDeleterEnv;
- struct RangeDeleterOptions;
+class OperationContext;
+struct DeleteJobStats;
+struct RangeDeleteEntry;
+struct RangeDeleterEnv;
+struct RangeDeleterOptions;
+
+/**
+ * Class for deleting documents for a given namespace and range. It contains a queue of
+ * jobs to be deleted. Deletions can be "immediate", in which case they are going to be put
+ * in front of the queue and acted on promptly, or "lazy", in which they would be acted
+ * upon when they get to the head of the queue.
+ *
+ * Threading assumptions:
+ *
+ * This class has (currently) one worker thread attacking the queue, one
+ * job at a time. If we want an immediate deletion, that job is going to
+ * be performed on the thread that is requesting it.
+ *
+ * All calls regarding deletion are synchronized.
+ *
+ * Life cycle:
+ * RangeDeleter* deleter = new RangeDeleter(new ...);
+ * deleter->startWorkers();
+ * ...
+ * getGlobalServiceContext()->killAllOperations(); // stop all deletes
+ * deleter->stopWorkers();
+ * delete deleter;
+ */
+class RangeDeleter {
+ MONGO_DISALLOW_COPYING(RangeDeleter);
+
+public:
+ /**
+ * Creates a new deleter and uses an environment object to delegate external logic like
+ * data deletion. Takes ownership of the environment.
+ */
+ explicit RangeDeleter(RangeDeleterEnv* env);
+
+ /**
+ * Destroys this deleter. Must make sure that no threads are working on this queue. Use
+ * stopWorkers to stop the internal workers, it is an error not to do so.
+ */
+ ~RangeDeleter();
+
+ //
+ // Thread management methods
+ //
+
+ /**
+ * Starts the background thread to work on this queue. Does nothing if the worker
+ * thread is already active.
+ *
+ * This call is _not_ thread safe and must be issued before any other call.
+ */
+ void startWorkers();
/**
- * Class for deleting documents for a given namespace and range. It contains a queue of
- * jobs to be deleted. Deletions can be "immediate", in which case they are going to be put
- * in front of the queue and acted on promptly, or "lazy", in which they would be acted
- * upon when they get to the head of the queue.
+ * Stops the background thread working on this queue. This will block if there are
+ * tasks that are being deleted, but will leave the pending tasks in the queue.
*
- * Threading assumptions:
+ * Steps:
+ * 1. Stop accepting new queued deletes.
+ * 2. Stop all idle workers.
+ * 3. Waits for all threads to finish any task that is in progress (but see note
+ * below).
*
- * This class has (currently) one worker thread attacking the queue, one
- * job at a time. If we want an immediate deletion, that job is going to
- * be performed on the thread that is requesting it.
+ * Note:
*
- * All calls regarding deletion are synchronized.
+ * + restarting this deleter with startWorkers after stopping it is not supported.
*
- * Life cycle:
- * RangeDeleter* deleter = new RangeDeleter(new ...);
- * deleter->startWorkers();
- * ...
- * getGlobalServiceContext()->killAllOperations(); // stop all deletes
- * deleter->stopWorkers();
- * delete deleter;
+ * + the worker thread could be running a call in the environment. The thread is
+ * only going to be returned when the environment decides so. In production,
+ * KillCurrentOp::killAll can be used to get the thread back from the environment.
*/
- class RangeDeleter {
- MONGO_DISALLOW_COPYING(RangeDeleter);
- public:
-
- /**
- * Creates a new deleter and uses an environment object to delegate external logic like
- * data deletion. Takes ownership of the environment.
- */
- explicit RangeDeleter(RangeDeleterEnv* env);
-
- /**
- * Destroys this deleter. Must make sure that no threads are working on this queue. Use
- * stopWorkers to stop the internal workers, it is an error not to do so.
- */
- ~RangeDeleter();
-
- //
- // Thread management methods
- //
-
- /**
- * Starts the background thread to work on this queue. Does nothing if the worker
- * thread is already active.
- *
- * This call is _not_ thread safe and must be issued before any other call.
- */
- void startWorkers();
-
- /**
- * Stops the background thread working on this queue. This will block if there are
- * tasks that are being deleted, but will leave the pending tasks in the queue.
- *
- * Steps:
- * 1. Stop accepting new queued deletes.
- * 2. Stop all idle workers.
- * 3. Waits for all threads to finish any task that is in progress (but see note
- * below).
- *
- * Note:
- *
- * + restarting this deleter with startWorkers after stopping it is not supported.
- *
- * + the worker thread could be running a call in the environment. The thread is
- * only going to be returned when the environment decides so. In production,
- * KillCurrentOp::killAll can be used to get the thread back from the environment.
- */
- void stopWorkers();
-
- //
- // Queue manipulation methods - can be called by anyone.
- //
-
- /**
- * Adds a new delete to the queue.
- *
- * If notifyDone is not NULL, it will be signaled after the delete is completed.
- * Note that this will happen only if the delete was actually queued.
- *
- * Returns true if the task is queued and false If the given range is blacklisted,
- * is already queued, or stopWorkers() was called.
- */
- bool queueDelete(OperationContext* txn,
- const RangeDeleterOptions& options,
- Notification* notifyDone,
- std::string* errMsg);
-
- /**
- * Removes the documents specified by the range. Unlike queueTask, this call
- * blocks and the deletion is performed by the current thread.
- *
- * Returns true if the deletion was performed. False if the range is blacklisted,
- * was already queued, or stopWorkers() was called.
- */
- bool deleteNow(OperationContext* txn,
- const RangeDeleterOptions& options,
- std::string* errMsg);
-
- //
- // Introspection methods
- //
-
- // Note: original contents of stats will be cleared. Caller owns the returned stats.
- void getStatsHistory(std::vector<DeleteJobStats*>* stats) const;
-
- size_t getTotalDeletes() const;
- size_t getPendingDeletes() const;
- size_t getDeletesInProgress() const;
-
- //
- // Methods meant to be only used for testing. Should be treated like private
- // methods.
- //
-
- /** Returns a BSON representation of the queue contents. For debugging only. */
- BSONObj toBSON() const;
-
- private:
- // Ownership is transferred to here.
- void recordDelStats(DeleteJobStats* newStat);
-
-
- struct NSMinMax;
-
- struct NSMinMaxCmp {
- bool operator()(const NSMinMax* lhs, const NSMinMax* rhs) const;
- };
-
- typedef std::deque<RangeDeleteEntry*> TaskList; // owned here
-
- typedef std::set<NSMinMax*, NSMinMaxCmp> NSMinMaxSet; // owned here
-
- /** Body of the worker thread */
- void doWork();
-
- /** Returns true if the range doesn't intersect with one other range */
- bool canEnqueue_inlock(StringData ns,
- const BSONObj& min,
- const BSONObj& max,
- std::string* errMsg) const;
-
- /** Returns true if stopWorkers() was called. This call is synchronized. */
- bool stopRequested() const;
-
- std::unique_ptr<RangeDeleterEnv> _env;
-
- // Initially not active. Must be started explicitly.
- std::unique_ptr<stdx::thread> _worker;
-
- // Protects _stopRequested.
- mutable stdx::mutex _stopMutex;
-
- // If set, no other delete taks should be accepted.
- bool _stopRequested;
-
- // No delete is in progress. Used to make sure that there is no activity
- // in this deleter, and therefore is safe to destroy it. Must be used in
- // conjunction with _stopRequested.
- stdx::condition_variable _nothingInProgressCV;
-
- // Protects all the data structure below this.
- mutable stdx::mutex _queueMutex;
-
- // _taskQueue has a task ready to work on.
- stdx::condition_variable _taskQueueNotEmptyCV;
-
- // Queue for storing the list of ranges that have cursors pending on it.
- //
- // Note: pointer life cycle is not handled here.
- TaskList _notReadyQueue;
-
- // Queue for storing the list of ranges that are ready to be removed.
- //
- // Note: pointer life cycle is not handled here.
- TaskList _taskQueue;
-
- // Set of all deletes - deletes waiting for cursors, waiting to be acted upon
- // and in progress. Includes both queued and immediate deletes.
- //
- // queued delete life cycle: new @ queuedDelete, delete @ doWork
- // deleteNow life cycle: deleteNow stack variable
- NSMinMaxSet _deleteSet;
+ void stopWorkers();
- // Keeps track of number of tasks that are in progress, including the inline deletes.
- size_t _deletesInProgress;
-
- // Protects _statsHistory
- mutable stdx::mutex _statsHistoryMutex;
- std::deque<DeleteJobStats*> _statsHistory;
- };
+ //
+ // Queue manipulation methods - can be called by anyone.
+ //
+ /**
+ * Adds a new delete to the queue.
+ *
+ * If notifyDone is not NULL, it will be signaled after the delete is completed.
+ * Note that this will happen only if the delete was actually queued.
+ *
+ * Returns true if the task is queued and false If the given range is blacklisted,
+ * is already queued, or stopWorkers() was called.
+ */
+ bool queueDelete(OperationContext* txn,
+ const RangeDeleterOptions& options,
+ Notification* notifyDone,
+ std::string* errMsg);
/**
- * Simple class for storing statistics for the RangeDeleter.
+ * Removes the documents specified by the range. Unlike queueTask, this call
+ * blocks and the deletion is performed by the current thread.
+ *
+ * Returns true if the deletion was performed. False if the range is blacklisted,
+ * was already queued, or stopWorkers() was called.
*/
- struct DeleteJobStats {
- Date_t queueStartTS;
- Date_t queueEndTS;
- Date_t deleteStartTS;
- Date_t deleteEndTS;
- Date_t waitForReplStartTS;
- Date_t waitForReplEndTS;
-
- long long int deletedDocCount;
-
- DeleteJobStats(): deletedDocCount(0) {
- }
- };
+ bool deleteNow(OperationContext* txn, const RangeDeleterOptions& options, std::string* errMsg);
+
+ //
+ // Introspection methods
+ //
- struct RangeDeleterOptions {
- RangeDeleterOptions(const KeyRange& range);
+ // Note: original contents of stats will be cleared. Caller owns the returned stats.
+ void getStatsHistory(std::vector<DeleteJobStats*>* stats) const;
- const KeyRange range;
+ size_t getTotalDeletes() const;
+ size_t getPendingDeletes() const;
+ size_t getDeletesInProgress() const;
- WriteConcernOptions writeConcern;
- std::string removeSaverReason;
- bool fromMigrate;
- bool onlyRemoveOrphanedDocs;
- bool waitForOpenCursors;
+ //
+ // Methods meant to be only used for testing. Should be treated like private
+ // methods.
+ //
+
+ /** Returns a BSON representation of the queue contents. For debugging only. */
+ BSONObj toBSON() const;
+
+private:
+ // Ownership is transferred to here.
+ void recordDelStats(DeleteJobStats* newStat);
+
+
+ struct NSMinMax;
+
+ struct NSMinMaxCmp {
+ bool operator()(const NSMinMax* lhs, const NSMinMax* rhs) const;
};
- /**
- * For internal use only.
- */
- struct RangeDeleteEntry {
- RangeDeleteEntry(const RangeDeleterOptions& options);
+ typedef std::deque<RangeDeleteEntry*> TaskList; // owned here
- const RangeDeleterOptions options;
+ typedef std::set<NSMinMax*, NSMinMaxCmp> NSMinMaxSet; // owned here
- // Sets of cursors to wait to close until this can be ready
- // for deletion.
- std::set<CursorId> cursorsToWait;
+ /** Body of the worker thread */
+ void doWork();
- // Not owned here.
- // Important invariant: Can only be set and used by one thread.
- Notification* notifyDone;
+ /** Returns true if the range doesn't intersect with one other range */
+ bool canEnqueue_inlock(StringData ns,
+ const BSONObj& min,
+ const BSONObj& max,
+ std::string* errMsg) const;
- // Time since the last time we reported this object.
- Date_t lastLoggedTS;
+ /** Returns true if stopWorkers() was called. This call is synchronized. */
+ bool stopRequested() const;
- DeleteJobStats stats;
+ std::unique_ptr<RangeDeleterEnv> _env;
- // For debugging only
- BSONObj toBSON() const;
- };
+ // Initially not active. Must be started explicitly.
+ std::unique_ptr<stdx::thread> _worker;
+
+ // Protects _stopRequested.
+ mutable stdx::mutex _stopMutex;
+
+ // If set, no other delete taks should be accepted.
+ bool _stopRequested;
+
+ // No delete is in progress. Used to make sure that there is no activity
+ // in this deleter, and therefore is safe to destroy it. Must be used in
+ // conjunction with _stopRequested.
+ stdx::condition_variable _nothingInProgressCV;
+
+ // Protects all the data structure below this.
+ mutable stdx::mutex _queueMutex;
+
+ // _taskQueue has a task ready to work on.
+ stdx::condition_variable _taskQueueNotEmptyCV;
+
+ // Queue for storing the list of ranges that have cursors pending on it.
+ //
+ // Note: pointer life cycle is not handled here.
+ TaskList _notReadyQueue;
+
+ // Queue for storing the list of ranges that are ready to be removed.
+ //
+ // Note: pointer life cycle is not handled here.
+ TaskList _taskQueue;
+
+ // Set of all deletes - deletes waiting for cursors, waiting to be acted upon
+ // and in progress. Includes both queued and immediate deletes.
+ //
+ // queued delete life cycle: new @ queuedDelete, delete @ doWork
+ // deleteNow life cycle: deleteNow stack variable
+ NSMinMaxSet _deleteSet;
+
+ // Keeps track of number of tasks that are in progress, including the inline deletes.
+ size_t _deletesInProgress;
+
+ // Protects _statsHistory
+ mutable stdx::mutex _statsHistoryMutex;
+ std::deque<DeleteJobStats*> _statsHistory;
+};
+
+
+/**
+ * Simple class for storing statistics for the RangeDeleter.
+ */
+struct DeleteJobStats {
+ Date_t queueStartTS;
+ Date_t queueEndTS;
+ Date_t deleteStartTS;
+ Date_t deleteEndTS;
+ Date_t waitForReplStartTS;
+ Date_t waitForReplEndTS;
+
+ long long int deletedDocCount;
+
+ DeleteJobStats() : deletedDocCount(0) {}
+};
+
+struct RangeDeleterOptions {
+ RangeDeleterOptions(const KeyRange& range);
+
+ const KeyRange range;
+
+ WriteConcernOptions writeConcern;
+ std::string removeSaverReason;
+ bool fromMigrate;
+ bool onlyRemoveOrphanedDocs;
+ bool waitForOpenCursors;
+};
+
+/**
+ * For internal use only.
+ */
+struct RangeDeleteEntry {
+ RangeDeleteEntry(const RangeDeleterOptions& options);
+
+ const RangeDeleterOptions options;
+
+ // Sets of cursors to wait to close until this can be ready
+ // for deletion.
+ std::set<CursorId> cursorsToWait;
+
+ // Not owned here.
+ // Important invariant: Can only be set and used by one thread.
+ Notification* notifyDone;
+
+ // Time since the last time we reported this object.
+ Date_t lastLoggedTS;
+
+ DeleteJobStats stats;
+
+ // For debugging only
+ BSONObj toBSON() const;
+};
+
+/**
+ * Class for encapsulating logic used by the RangeDeleter class to perform its tasks.
+ */
+struct RangeDeleterEnv {
+ virtual ~RangeDeleterEnv() {}
/**
- * Class for encapsulating logic used by the RangeDeleter class to perform its tasks.
+ * Deletes the documents from the given range. This method should be
+ * responsible for making sure that the proper contexts are setup
+ * to be able to perform deletions.
+ *
+ * Must be a synchronous call. Docs should be deleted after call ends.
+ * Must not throw Exceptions.
*/
- struct RangeDeleterEnv {
- virtual ~RangeDeleterEnv() {}
-
- /**
- * Deletes the documents from the given range. This method should be
- * responsible for making sure that the proper contexts are setup
- * to be able to perform deletions.
- *
- * Must be a synchronous call. Docs should be deleted after call ends.
- * Must not throw Exceptions.
- */
- virtual bool deleteRange(OperationContext* txn,
- const RangeDeleteEntry& taskDetails,
- long long int* deletedDocs,
- std::string* errMsg) = 0;
-
- /**
- * Gets the list of open cursors on a given namespace. The openCursors is an
- * output parameter that will contain all the cursors open after this is called.
- * Assume that openCursors is empty when passed in.
- *
- * Must be a synchronous call. CursorIds should be populated after call.
- * Must not throw exception.
- */
- virtual void getCursorIds(OperationContext* txn,
- StringData ns,
- std::set<CursorId>* openCursors) = 0;
- };
+ virtual bool deleteRange(OperationContext* txn,
+ const RangeDeleteEntry& taskDetails,
+ long long int* deletedDocs,
+ std::string* errMsg) = 0;
+
+ /**
+ * Gets the list of open cursors on a given namespace. The openCursors is an
+ * output parameter that will contain all the cursors open after this is called.
+ * Assume that openCursors is empty when passed in.
+ *
+ * Must be a synchronous call. CursorIds should be populated after call.
+ * Must not throw exception.
+ */
+ virtual void getCursorIds(OperationContext* txn,
+ StringData ns,
+ std::set<CursorId>* openCursors) = 0;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/range_deleter_db_env.cpp b/src/mongo/db/range_deleter_db_env.cpp
index 69f2cd86d33..b9971f54b03 100644
--- a/src/mongo/db/range_deleter_db_env.cpp
+++ b/src/mongo/db/range_deleter_db_env.cpp
@@ -46,102 +46,87 @@
namespace mongo {
- using std::endl;
- using std::string;
-
- /**
- * Outline of the delete process:
- * 1. Initialize the client for this thread if there is no client. This is for the worker
- * threads that are attached to any of the threads servicing client requests.
- * 2. Grant this thread authorization to perform deletes.
- * 3. Temporarily enable mode to bypass shard version checks. TODO: Replace this hack.
- * 4. Setup callback to save deletes to moveChunk directory (only if moveParanoia is true).
- * 5. Delete range.
- * 6. Wait until the majority of the secondaries catch up.
- */
- bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
- const RangeDeleteEntry& taskDetails,
- long long int* deletedDocs,
- std::string* errMsg) {
- const string ns(taskDetails.options.range.ns);
- const BSONObj inclusiveLower(taskDetails.options.range.minKey);
- const BSONObj exclusiveUpper(taskDetails.options.range.maxKey);
- const BSONObj keyPattern(taskDetails.options.range.keyPattern);
- const WriteConcernOptions writeConcern(taskDetails.options.writeConcern);
- const bool fromMigrate = taskDetails.options.fromMigrate;
- const bool onlyRemoveOrphans = taskDetails.options.onlyRemoveOrphanedDocs;
-
- Client::initThreadIfNotAlready("RangeDeleter");
-
- *deletedDocs = 0;
- ShardForceVersionOkModeBlock forceVersion(txn->getClient());
- {
- Helpers::RemoveSaver removeSaver("moveChunk",
- ns,
- taskDetails.options.removeSaverReason);
- Helpers::RemoveSaver* removeSaverPtr = NULL;
- if (serverGlobalParams.moveParanoia &&
- !taskDetails.options.removeSaverReason.empty()) {
- removeSaverPtr = &removeSaver;
- }
+using std::endl;
+using std::string;
- // log the opId so the user can use it to cancel the delete using killOp.
- unsigned int opId = txn->getOpID();
- log() << "Deleter starting delete for: " << ns
- << " from " << inclusiveLower
- << " -> " << exclusiveUpper
- << ", with opId: " << opId
- << endl;
-
- try {
- *deletedDocs =
- Helpers::removeRange(txn,
- KeyRange(ns,
- inclusiveLower,
- exclusiveUpper,
- keyPattern),
- false, /*maxInclusive*/
- writeConcern,
- removeSaverPtr,
- fromMigrate,
- onlyRemoveOrphans);
-
- if (*deletedDocs < 0) {
- *errMsg = "collection or index dropped before data could be cleaned";
- warning() << *errMsg << endl;
-
- return false;
- }
-
- log() << "rangeDeleter deleted " << *deletedDocs
- << " documents for " << ns
- << " from " << inclusiveLower
- << " -> " << exclusiveUpper
- << endl;
- }
- catch (const DBException& ex) {
- *errMsg = str::stream() << "Error encountered while deleting range: "
- << "ns" << ns
- << " from " << inclusiveLower
- << " -> " << exclusiveUpper
- << ", cause by:" << causedBy(ex);
+/**
+ * Outline of the delete process:
+ * 1. Initialize the client for this thread if there is no client. This is for the worker
+ * threads that are attached to any of the threads servicing client requests.
+ * 2. Grant this thread authorization to perform deletes.
+ * 3. Temporarily enable mode to bypass shard version checks. TODO: Replace this hack.
+ * 4. Setup callback to save deletes to moveChunk directory (only if moveParanoia is true).
+ * 5. Delete range.
+ * 6. Wait until the majority of the secondaries catch up.
+ */
+bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
+ const RangeDeleteEntry& taskDetails,
+ long long int* deletedDocs,
+ std::string* errMsg) {
+ const string ns(taskDetails.options.range.ns);
+ const BSONObj inclusiveLower(taskDetails.options.range.minKey);
+ const BSONObj exclusiveUpper(taskDetails.options.range.maxKey);
+ const BSONObj keyPattern(taskDetails.options.range.keyPattern);
+ const WriteConcernOptions writeConcern(taskDetails.options.writeConcern);
+ const bool fromMigrate = taskDetails.options.fromMigrate;
+ const bool onlyRemoveOrphans = taskDetails.options.onlyRemoveOrphanedDocs;
+
+ Client::initThreadIfNotAlready("RangeDeleter");
+
+ *deletedDocs = 0;
+ ShardForceVersionOkModeBlock forceVersion(txn->getClient());
+ {
+ Helpers::RemoveSaver removeSaver("moveChunk", ns, taskDetails.options.removeSaverReason);
+ Helpers::RemoveSaver* removeSaverPtr = NULL;
+ if (serverGlobalParams.moveParanoia && !taskDetails.options.removeSaverReason.empty()) {
+ removeSaverPtr = &removeSaver;
+ }
+
+ // log the opId so the user can use it to cancel the delete using killOp.
+ unsigned int opId = txn->getOpID();
+ log() << "Deleter starting delete for: " << ns << " from " << inclusiveLower << " -> "
+ << exclusiveUpper << ", with opId: " << opId << endl;
+
+ try {
+ *deletedDocs =
+ Helpers::removeRange(txn,
+ KeyRange(ns, inclusiveLower, exclusiveUpper, keyPattern),
+ false, /*maxInclusive*/
+ writeConcern,
+ removeSaverPtr,
+ fromMigrate,
+ onlyRemoveOrphans);
+
+ if (*deletedDocs < 0) {
+ *errMsg = "collection or index dropped before data could be cleaned";
+ warning() << *errMsg << endl;
return false;
}
- }
- return true;
- }
+ log() << "rangeDeleter deleted " << *deletedDocs << " documents for " << ns << " from "
+ << inclusiveLower << " -> " << exclusiveUpper << endl;
+ } catch (const DBException& ex) {
+ *errMsg = str::stream() << "Error encountered while deleting range: "
+ << "ns" << ns << " from " << inclusiveLower << " -> "
+ << exclusiveUpper << ", cause by:" << causedBy(ex);
- void RangeDeleterDBEnv::getCursorIds(OperationContext* txn,
- StringData ns,
- std::set<CursorId>* openCursors) {
- AutoGetCollectionForRead ctx(txn, ns.toString());
- Collection* collection = ctx.getCollection();
- if (!collection) {
- return;
+ return false;
}
+ }
+
+ return true;
+}
- collection->getCursorManager()->getCursorIds( openCursors );
+void RangeDeleterDBEnv::getCursorIds(OperationContext* txn,
+ StringData ns,
+ std::set<CursorId>* openCursors) {
+ AutoGetCollectionForRead ctx(txn, ns.toString());
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ return;
}
+
+ collection->getCursorManager()->getCursorIds(openCursors);
+}
}
diff --git a/src/mongo/db/range_deleter_db_env.h b/src/mongo/db/range_deleter_db_env.h
index 176399511d5..0bca8c6618c 100644
--- a/src/mongo/db/range_deleter_db_env.h
+++ b/src/mongo/db/range_deleter_db_env.h
@@ -32,35 +32,34 @@
namespace mongo {
+/**
+ * This class implements the deleter methods to be used for a shard.
+ */
+struct RangeDeleterDBEnv : public RangeDeleterEnv {
/**
- * This class implements the deleter methods to be used for a shard.
+ * Deletes the documents from the given range synchronously.
+ *
+ * The keyPattern will be used to determine the right index to use to perform
+ * the deletion and it can be a prefix of an existing index. Caller is responsible
+ * of making sure that both min and max is a prefix of keyPattern.
+ *
+ * Note that secondaryThrottle will be ignored if current process is not part
+ * of a replica set.
+ *
+ * docsDeleted would contain the number of docs deleted if the deletion was successful.
+ *
+ * Does not throw Exceptions.
*/
- struct RangeDeleterDBEnv : public RangeDeleterEnv {
-
- /**
- * Deletes the documents from the given range synchronously.
- *
- * The keyPattern will be used to determine the right index to use to perform
- * the deletion and it can be a prefix of an existing index. Caller is responsible
- * of making sure that both min and max is a prefix of keyPattern.
- *
- * Note that secondaryThrottle will be ignored if current process is not part
- * of a replica set.
- *
- * docsDeleted would contain the number of docs deleted if the deletion was successful.
- *
- * Does not throw Exceptions.
- */
- virtual bool deleteRange(OperationContext* txn,
- const RangeDeleteEntry& taskDetails,
- long long int* deletedDocs,
- std::string* errMsg);
+ virtual bool deleteRange(OperationContext* txn,
+ const RangeDeleteEntry& taskDetails,
+ long long int* deletedDocs,
+ std::string* errMsg);
- /**
- * Gets the list of open cursors on a given namespace.
- */
- virtual void getCursorIds(OperationContext* txn,
- StringData ns,
- std::set<CursorId>* openCursors);
- };
+ /**
+ * Gets the list of open cursors on a given namespace.
+ */
+ virtual void getCursorIds(OperationContext* txn,
+ StringData ns,
+ std::set<CursorId>* openCursors);
+};
}
diff --git a/src/mongo/db/range_deleter_mock_env.cpp b/src/mongo/db/range_deleter_mock_env.cpp
index f831c746ceb..1ee01d6a19a 100644
--- a/src/mongo/db/range_deleter_mock_env.cpp
+++ b/src/mongo/db/range_deleter_mock_env.cpp
@@ -34,125 +34,119 @@
namespace mongo {
- using std::set;
- using std::string;
+using std::set;
+using std::string;
- bool DeletedRangeCmp::operator()(const DeletedRange& lhs,
- const DeletedRange& rhs) const {
- const int nsComp = lhs.ns.compare(rhs.ns);
+bool DeletedRangeCmp::operator()(const DeletedRange& lhs, const DeletedRange& rhs) const {
+ const int nsComp = lhs.ns.compare(rhs.ns);
- if (nsComp < 0) {
- return true;
- }
-
- if (nsComp > 0) {
- return false;
- }
-
- return compareRanges(lhs.min, lhs.max, rhs.min, rhs.max) < 0;
+ if (nsComp < 0) {
+ return true;
}
- RangeDeleterMockEnv::RangeDeleterMockEnv():
- _pauseDelete(false),
- _pausedCount(0),
- _getCursorsCallCount(0) {
-
- setGlobalServiceContext(stdx::make_unique<ServiceContextNoop>());
+ if (nsComp > 0) {
+ return false;
}
- void RangeDeleterMockEnv::addCursorId(StringData ns, CursorId id) {
- stdx::lock_guard<stdx::mutex> sl(_cursorMapMutex);
- _cursorMap[ns.toString()].insert(id);
- }
+ return compareRanges(lhs.min, lhs.max, rhs.min, rhs.max) < 0;
+}
- void RangeDeleterMockEnv::removeCursorId(StringData ns, CursorId id) {
- stdx::lock_guard<stdx::mutex> sl(_cursorMapMutex);
- _cursorMap[ns.toString()].erase(id);
- }
+RangeDeleterMockEnv::RangeDeleterMockEnv()
+ : _pauseDelete(false), _pausedCount(0), _getCursorsCallCount(0) {
+ setGlobalServiceContext(stdx::make_unique<ServiceContextNoop>());
+}
- void RangeDeleterMockEnv::pauseDeletes() {
- stdx::lock_guard<stdx::mutex> sl(_pauseDeleteMutex);
- _pauseDelete = true;
- }
+void RangeDeleterMockEnv::addCursorId(StringData ns, CursorId id) {
+ stdx::lock_guard<stdx::mutex> sl(_cursorMapMutex);
+ _cursorMap[ns.toString()].insert(id);
+}
- void RangeDeleterMockEnv::resumeOneDelete() {
- stdx::lock_guard<stdx::mutex> sl(_pauseDeleteMutex);
- _pauseDelete = false;
- _pausedCV.notify_one();
- }
+void RangeDeleterMockEnv::removeCursorId(StringData ns, CursorId id) {
+ stdx::lock_guard<stdx::mutex> sl(_cursorMapMutex);
+ _cursorMap[ns.toString()].erase(id);
+}
- void RangeDeleterMockEnv::waitForNthGetCursor(uint64_t nthCall) {
- stdx::unique_lock<stdx::mutex> sl(_envStatMutex);
- while (_getCursorsCallCount < nthCall) {
- _cursorsCallCountUpdatedCV.wait(sl);
- }
- }
+void RangeDeleterMockEnv::pauseDeletes() {
+ stdx::lock_guard<stdx::mutex> sl(_pauseDeleteMutex);
+ _pauseDelete = true;
+}
- void RangeDeleterMockEnv::waitForNthPausedDelete(uint64_t nthPause) {
- stdx::unique_lock<stdx::mutex> sl(_pauseDeleteMutex);
- while(_pausedCount < nthPause) {
- _pausedDeleteChangeCV.wait(sl);
- }
- }
+void RangeDeleterMockEnv::resumeOneDelete() {
+ stdx::lock_guard<stdx::mutex> sl(_pauseDeleteMutex);
+ _pauseDelete = false;
+ _pausedCV.notify_one();
+}
- bool RangeDeleterMockEnv::deleteOccured() const {
- stdx::lock_guard<stdx::mutex> sl(_deleteListMutex);
- return !_deleteList.empty();
+void RangeDeleterMockEnv::waitForNthGetCursor(uint64_t nthCall) {
+ stdx::unique_lock<stdx::mutex> sl(_envStatMutex);
+ while (_getCursorsCallCount < nthCall) {
+ _cursorsCallCountUpdatedCV.wait(sl);
}
+}
- DeletedRange RangeDeleterMockEnv::getLastDelete() const {
- stdx::lock_guard<stdx::mutex> sl(_deleteListMutex);
- return _deleteList.back();
+void RangeDeleterMockEnv::waitForNthPausedDelete(uint64_t nthPause) {
+ stdx::unique_lock<stdx::mutex> sl(_pauseDeleteMutex);
+ while (_pausedCount < nthPause) {
+ _pausedDeleteChangeCV.wait(sl);
}
+}
- bool RangeDeleterMockEnv::deleteRange(OperationContext* txn,
- const RangeDeleteEntry& taskDetails,
- long long int* deletedDocs,
- string* errMsg) {
+bool RangeDeleterMockEnv::deleteOccured() const {
+ stdx::lock_guard<stdx::mutex> sl(_deleteListMutex);
+ return !_deleteList.empty();
+}
- {
- stdx::unique_lock<stdx::mutex> sl(_pauseDeleteMutex);
- bool wasInitiallyPaused = _pauseDelete;
+DeletedRange RangeDeleterMockEnv::getLastDelete() const {
+ stdx::lock_guard<stdx::mutex> sl(_deleteListMutex);
+ return _deleteList.back();
+}
- if (_pauseDelete) {
- _pausedCount++;
- _pausedDeleteChangeCV.notify_one();
- }
+bool RangeDeleterMockEnv::deleteRange(OperationContext* txn,
+ const RangeDeleteEntry& taskDetails,
+ long long int* deletedDocs,
+ string* errMsg) {
+ {
+ stdx::unique_lock<stdx::mutex> sl(_pauseDeleteMutex);
+ bool wasInitiallyPaused = _pauseDelete;
- while (_pauseDelete) {
- _pausedCV.wait(sl);
- }
+ if (_pauseDelete) {
+ _pausedCount++;
+ _pausedDeleteChangeCV.notify_one();
+ }
- _pauseDelete = wasInitiallyPaused;
+ while (_pauseDelete) {
+ _pausedCV.wait(sl);
}
- {
- stdx::lock_guard<stdx::mutex> sl(_deleteListMutex);
+ _pauseDelete = wasInitiallyPaused;
+ }
- DeletedRange entry;
- entry.ns = taskDetails.options.range.ns;
- entry.min = taskDetails.options.range.minKey.getOwned();
- entry.max = taskDetails.options.range.maxKey.getOwned();
- entry.shardKeyPattern = taskDetails.options.range.keyPattern.getOwned();
+ {
+ stdx::lock_guard<stdx::mutex> sl(_deleteListMutex);
- _deleteList.push_back(entry);
- }
+ DeletedRange entry;
+ entry.ns = taskDetails.options.range.ns;
+ entry.min = taskDetails.options.range.minKey.getOwned();
+ entry.max = taskDetails.options.range.maxKey.getOwned();
+ entry.shardKeyPattern = taskDetails.options.range.keyPattern.getOwned();
- return true;
+ _deleteList.push_back(entry);
}
- void RangeDeleterMockEnv::getCursorIds(
- OperationContext* txn, StringData ns, set<CursorId>* in) {
- {
- stdx::lock_guard<stdx::mutex> sl(_cursorMapMutex);
- const set<CursorId>& _cursors = _cursorMap[ns.toString()];
- std::copy(_cursors.begin(), _cursors.end(), inserter(*in, in->begin()));
- }
+ return true;
+}
- {
- stdx::lock_guard<stdx::mutex> sl(_envStatMutex);
- _getCursorsCallCount++;
- _cursorsCallCountUpdatedCV.notify_one();
- }
+void RangeDeleterMockEnv::getCursorIds(OperationContext* txn, StringData ns, set<CursorId>* in) {
+ {
+ stdx::lock_guard<stdx::mutex> sl(_cursorMapMutex);
+ const set<CursorId>& _cursors = _cursorMap[ns.toString()];
+ std::copy(_cursors.begin(), _cursors.end(), inserter(*in, in->begin()));
}
+
+ {
+ stdx::lock_guard<stdx::mutex> sl(_envStatMutex);
+ _getCursorsCallCount++;
+ _cursorsCallCountUpdatedCV.notify_one();
+ }
+}
}
diff --git a/src/mongo/db/range_deleter_mock_env.h b/src/mongo/db/range_deleter_mock_env.h
index bba49a360e4..307dbe2bd53 100644
--- a/src/mongo/db/range_deleter_mock_env.h
+++ b/src/mongo/db/range_deleter_mock_env.h
@@ -37,131 +37,131 @@
namespace mongo {
- struct DeletedRange {
- std::string ns;
- BSONObj min;
- BSONObj max;
- BSONObj shardKeyPattern;
- };
+struct DeletedRange {
+ std::string ns;
+ BSONObj min;
+ BSONObj max;
+ BSONObj shardKeyPattern;
+};
+
+/**
+ * Comparator function object compatible with set.
+ */
+struct DeletedRangeCmp {
+ bool operator()(const DeletedRange& lhs, const DeletedRange& rhs) const;
+};
+
+/**
+ * Mock environment for RangeDeleter with knobs for pausing/resuming
+ * deletes, setting open cursors IDs per namespace and the ability to
+ * record the history of deletes performed through this environment.
+ */
+class RangeDeleterMockEnv : public mongo::RangeDeleterEnv {
+public:
+ RangeDeleterMockEnv();
+
+ //
+ // Environment modification methods.
+ //
/**
- * Comparator function object compatible with set.
+ * Adds an id to the current set of cursors in the given namespace.
*/
- struct DeletedRangeCmp {
- bool operator()(const DeletedRange& lhs, const DeletedRange& rhs) const;
- };
+ void addCursorId(StringData ns, CursorId id);
+
+ /**
+ * Removes the id from the set of open cursors in the given namespace.
+ */
+ void removeCursorId(StringData ns, CursorId id);
+
+ //
+ // Environment synchronization methods.
+ //
/**
- * Mock environment for RangeDeleter with knobs for pausing/resuming
- * deletes, setting open cursors IDs per namespace and the ability to
- * record the history of deletes performed through this environment.
+ * Blocks all new deletes from proceeding.
*/
- class RangeDeleterMockEnv: public mongo::RangeDeleterEnv {
- public:
- RangeDeleterMockEnv();
-
- //
- // Environment modification methods.
- //
-
- /**
- * Adds an id to the current set of cursors in the given namespace.
- */
- void addCursorId(StringData ns, CursorId id);
-
- /**
- * Removes the id from the set of open cursors in the given namespace.
- */
- void removeCursorId(StringData ns, CursorId id);
-
- //
- // Environment synchronization methods.
- //
-
- /**
- * Blocks all new deletes from proceeding.
- */
- void pauseDeletes();
-
- /**
- * Unblocks one paused delete.
- */
- void resumeOneDelete();
-
- /**
- * Blocks until the getCursor method was called and terminated at least the
- * specified number of times for the entire lifetime of this deleter.
- */
- void waitForNthGetCursor(uint64_t nthCall);
-
- /**
- * Blocks until the deleteRange method was called and at the same time paused
- * at least the specified number of times for the entire lifetime of this deleter.
- */
- void waitForNthPausedDelete(uint64_t nthPause);
-
- //
- // Environment introspection methods.
- //
-
- /**
- * Returns true if deleteRange was called at least once.
- */
- bool deleteOccured() const;
-
- /**
- * Returns the last delete. Undefined if deleteOccured is false.
- */
- DeletedRange getLastDelete() const;
-
- //
- // Environment methods.
- //
-
- /**
- * Basic implementation of delete that matches the signature for
- * RangeDeleterEnv::deleteRange. This does not actually perform the delete
- * but simply keeps a record of it. Can also be paused by pauseDeletes and
- * resumed with resumeDeletes.
- */
- bool deleteRange(OperationContext* txn,
- const RangeDeleteEntry& taskDetails,
- long long int* deletedDocs,
- std::string* errMsg);
-
- /**
- * Basic implementation of gathering open cursors that matches the signature for
- * RangeDeleterEnv::getCursorIds. The cursors returned can be modified with
- * the setCursorId and clearCursorMap methods.
- */
- void getCursorIds(OperationContext* txn, StringData ns, std::set<CursorId>* in);
-
- private:
- // mutex acquisition ordering:
- // _envStatMutex -> _pauseDeleteMutex -> _deleteListMutex -> _cursorMapMutex
-
- mutable stdx::mutex _deleteListMutex;
- std::vector<DeletedRange> _deleteList;
-
- stdx::mutex _cursorMapMutex;
- std::map<std::string, std::set<CursorId> > _cursorMap;
-
- // Protects _pauseDelete & _pausedCount
- stdx::mutex _pauseDeleteMutex;
- stdx::condition_variable _pausedCV;
- bool _pauseDelete;
-
- // Number of times a delete gets paused.
- uint64_t _pausedCount;
- // _pausedCount < nthPause (used by waitForNthPausedDelete)
- stdx::condition_variable _pausedDeleteChangeCV;
-
- // Protects all variables below this line.
- stdx::mutex _envStatMutex;
-
- // Keeps track of the number of times getCursorIds was called.
- uint64_t _getCursorsCallCount;
- // _getCursorsCallCount < nthCall (used by waitForNthGetCursor)
- stdx::condition_variable _cursorsCallCountUpdatedCV;
- };
+ void pauseDeletes();
+
+ /**
+ * Unblocks one paused delete.
+ */
+ void resumeOneDelete();
+
+ /**
+ * Blocks until the getCursor method was called and terminated at least the
+ * specified number of times for the entire lifetime of this deleter.
+ */
+ void waitForNthGetCursor(uint64_t nthCall);
+
+ /**
+ * Blocks until the deleteRange method was called and at the same time paused
+ * at least the specified number of times for the entire lifetime of this deleter.
+ */
+ void waitForNthPausedDelete(uint64_t nthPause);
+
+ //
+ // Environment introspection methods.
+ //
+
+ /**
+ * Returns true if deleteRange was called at least once.
+ */
+ bool deleteOccured() const;
+
+ /**
+ * Returns the last delete. Undefined if deleteOccured is false.
+ */
+ DeletedRange getLastDelete() const;
+
+ //
+ // Environment methods.
+ //
+
+ /**
+ * Basic implementation of delete that matches the signature for
+ * RangeDeleterEnv::deleteRange. This does not actually perform the delete
+ * but simply keeps a record of it. Can also be paused by pauseDeletes and
+ * resumed with resumeDeletes.
+ */
+ bool deleteRange(OperationContext* txn,
+ const RangeDeleteEntry& taskDetails,
+ long long int* deletedDocs,
+ std::string* errMsg);
+
+ /**
+ * Basic implementation of gathering open cursors that matches the signature for
+ * RangeDeleterEnv::getCursorIds. The cursors returned can be modified with
+ * the setCursorId and clearCursorMap methods.
+ */
+ void getCursorIds(OperationContext* txn, StringData ns, std::set<CursorId>* in);
+
+private:
+ // mutex acquisition ordering:
+ // _envStatMutex -> _pauseDeleteMutex -> _deleteListMutex -> _cursorMapMutex
+
+ mutable stdx::mutex _deleteListMutex;
+ std::vector<DeletedRange> _deleteList;
+
+ stdx::mutex _cursorMapMutex;
+ std::map<std::string, std::set<CursorId>> _cursorMap;
+
+ // Protects _pauseDelete & _pausedCount
+ stdx::mutex _pauseDeleteMutex;
+ stdx::condition_variable _pausedCV;
+ bool _pauseDelete;
+
+ // Number of times a delete gets paused.
+ uint64_t _pausedCount;
+ // _pausedCount < nthPause (used by waitForNthPausedDelete)
+ stdx::condition_variable _pausedDeleteChangeCV;
+
+ // Protects all variables below this line.
+ stdx::mutex _envStatMutex;
+
+ // Keeps track of the number of times getCursorIds was called.
+ uint64_t _getCursorsCallCount;
+ // _getCursorsCallCount < nthCall (used by waitForNthGetCursor)
+ stdx::condition_variable _cursorsCallCountUpdatedCV;
+};
}
diff --git a/src/mongo/db/range_deleter_service.cpp b/src/mongo/db/range_deleter_service.cpp
index a5393230df9..bbe7dc72680 100644
--- a/src/mongo/db/range_deleter_service.cpp
+++ b/src/mongo/db/range_deleter_service.cpp
@@ -33,17 +33,17 @@
namespace {
- mongo::RangeDeleter* _deleter = NULL;
+mongo::RangeDeleter* _deleter = NULL;
}
namespace mongo {
- MONGO_INITIALIZER(RangeDeleterInit)(InitializerContext* context) {
- _deleter = new RangeDeleter(new RangeDeleterDBEnv);
- return Status::OK();
- }
+MONGO_INITIALIZER(RangeDeleterInit)(InitializerContext* context) {
+ _deleter = new RangeDeleter(new RangeDeleterDBEnv);
+ return Status::OK();
+}
- RangeDeleter* getDeleter() {
- return _deleter;
- }
+RangeDeleter* getDeleter() {
+ return _deleter;
+}
}
diff --git a/src/mongo/db/range_deleter_service.h b/src/mongo/db/range_deleter_service.h
index 39adac860dd..d654e870350 100644
--- a/src/mongo/db/range_deleter_service.h
+++ b/src/mongo/db/range_deleter_service.h
@@ -32,8 +32,8 @@
namespace mongo {
- /**
- * Gets the global instance of the deleter and starts it.
- */
- RangeDeleter* getDeleter();
+/**
+ * Gets the global instance of the deleter and starts it.
+ */
+RangeDeleter* getDeleter();
}
diff --git a/src/mongo/db/range_deleter_test.cpp b/src/mongo/db/range_deleter_test.cpp
index 9ec9e1fc1c3..782b7235d60 100644
--- a/src/mongo/db/range_deleter_test.cpp
+++ b/src/mongo/db/range_deleter_test.cpp
@@ -41,369 +41,324 @@
namespace {
- using std::string;
+using std::string;
- using mongo::BSONObj;
- using mongo::CursorId;
- using mongo::DeletedRange;
- using mongo::FieldParser;
- using mongo::KeyRange;
- using mongo::Notification;
- using mongo::RangeDeleter;
- using mongo::RangeDeleterMockEnv;
- using mongo::RangeDeleterOptions;
- using mongo::OperationContext;
+using mongo::BSONObj;
+using mongo::CursorId;
+using mongo::DeletedRange;
+using mongo::FieldParser;
+using mongo::KeyRange;
+using mongo::Notification;
+using mongo::RangeDeleter;
+using mongo::RangeDeleterMockEnv;
+using mongo::RangeDeleterOptions;
+using mongo::OperationContext;
- namespace stdx = mongo::stdx;
+namespace stdx = mongo::stdx;
- OperationContext* const noTxn = NULL; // MockEnv doesn't need txn XXX SERVER-13931
+OperationContext* const noTxn = NULL; // MockEnv doesn't need txn XXX SERVER-13931
- // Capped sleep interval is 640 mSec, Nyquist frequency is 1280 mSec => round up to 2 sec.
- const int MAX_IMMEDIATE_DELETE_WAIT_SECS = 2;
+// Capped sleep interval is 640 mSec, Nyquist frequency is 1280 mSec => round up to 2 sec.
+const int MAX_IMMEDIATE_DELETE_WAIT_SECS = 2;
- const mongo::repl::ReplSettings replSettings;
+const mongo::repl::ReplSettings replSettings;
- // Should not be able to queue deletes if deleter workers were not started.
- TEST(QueueDelete, CantAfterStop) {
- RangeDeleterMockEnv* env = new RangeDeleterMockEnv();
- RangeDeleter deleter(env);
+// Should not be able to queue deletes if deleter workers were not started.
+TEST(QueueDelete, CantAfterStop) {
+ RangeDeleterMockEnv* env = new RangeDeleterMockEnv();
+ RangeDeleter deleter(env);
- std::unique_ptr<mongo::repl::ReplicationCoordinatorMock> mock(
- new mongo::repl::ReplicationCoordinatorMock(replSettings));
+ std::unique_ptr<mongo::repl::ReplicationCoordinatorMock> mock(
+ new mongo::repl::ReplicationCoordinatorMock(replSettings));
- mongo::repl::ReplicationCoordinator::set(mongo::getGlobalServiceContext(),
- std::move(mock));
+ mongo::repl::ReplicationCoordinator::set(mongo::getGlobalServiceContext(), std::move(mock));
- deleter.startWorkers();
- deleter.stopWorkers();
+ deleter.startWorkers();
+ deleter.stopWorkers();
- string errMsg;
- ASSERT_FALSE(deleter.queueDelete(noTxn,
- RangeDeleterOptions(KeyRange("test.user",
- BSON("x" << 120),
- BSON("x" << 200),
- BSON("x" << 1))),
- NULL /* notifier not needed */,
- &errMsg));
- ASSERT_FALSE(errMsg.empty());
- ASSERT_FALSE(env->deleteOccured());
+ string errMsg;
+ ASSERT_FALSE(
+ deleter.queueDelete(noTxn,
+ RangeDeleterOptions(KeyRange(
+ "test.user", BSON("x" << 120), BSON("x" << 200), BSON("x" << 1))),
+ NULL /* notifier not needed */,
+ &errMsg));
+ ASSERT_FALSE(errMsg.empty());
+ ASSERT_FALSE(env->deleteOccured());
+}
- }
+// Should not start delete if the set of cursors that were open when the
+// delete was queued is still open.
+TEST(QueuedDelete, ShouldWaitCursor) {
+ const string ns("test.user");
- // Should not start delete if the set of cursors that were open when the
- // delete was queued is still open.
- TEST(QueuedDelete, ShouldWaitCursor) {
- const string ns("test.user");
+ RangeDeleterMockEnv* env = new RangeDeleterMockEnv();
+ RangeDeleter deleter(env);
- RangeDeleterMockEnv* env = new RangeDeleterMockEnv();
- RangeDeleter deleter(env);
+ std::unique_ptr<mongo::repl::ReplicationCoordinatorMock> mock(
+ new mongo::repl::ReplicationCoordinatorMock(replSettings));
- std::unique_ptr<mongo::repl::ReplicationCoordinatorMock> mock(
- new mongo::repl::ReplicationCoordinatorMock(replSettings));
+ mongo::repl::ReplicationCoordinator::set(mongo::getGlobalServiceContext(), std::move(mock));
- mongo::repl::ReplicationCoordinator::set(mongo::getGlobalServiceContext(),
- std::move(mock));
+ deleter.startWorkers();
- deleter.startWorkers();
+ env->addCursorId(ns, 345);
- env->addCursorId(ns, 345);
+ Notification notifyDone;
+ RangeDeleterOptions deleterOptions(
+ KeyRange(ns, BSON("x" << 0), BSON("x" << 10), BSON("x" << 1)));
+ deleterOptions.waitForOpenCursors = true;
- Notification notifyDone;
- RangeDeleterOptions deleterOptions(KeyRange(ns,
- BSON("x" << 0),
- BSON("x" << 10),
- BSON("x" << 1)));
- deleterOptions.waitForOpenCursors = true;
+ ASSERT_TRUE(
+ deleter.queueDelete(noTxn, deleterOptions, &notifyDone, NULL /* errMsg not needed */));
- ASSERT_TRUE(deleter.queueDelete(noTxn,
- deleterOptions,
- &notifyDone,
- NULL /* errMsg not needed */));
+ env->waitForNthGetCursor(1u);
- env->waitForNthGetCursor(1u);
+ ASSERT_EQUALS(1U, deleter.getPendingDeletes());
+ ASSERT_FALSE(env->deleteOccured());
- ASSERT_EQUALS(1U, deleter.getPendingDeletes());
- ASSERT_FALSE(env->deleteOccured());
+ // Set the open cursors to a totally different sets of cursorIDs.
+ env->addCursorId(ns, 200);
+ env->removeCursorId(ns, 345);
- // Set the open cursors to a totally different sets of cursorIDs.
- env->addCursorId(ns, 200);
- env->removeCursorId(ns, 345);
+ notifyDone.waitToBeNotified();
- notifyDone.waitToBeNotified();
+ ASSERT_TRUE(env->deleteOccured());
+ const DeletedRange deletedChunk(env->getLastDelete());
- ASSERT_TRUE(env->deleteOccured());
- const DeletedRange deletedChunk(env->getLastDelete());
+ ASSERT_EQUALS(ns, deletedChunk.ns);
+ ASSERT_TRUE(deletedChunk.min.equal(BSON("x" << 0)));
+ ASSERT_TRUE(deletedChunk.max.equal(BSON("x" << 10)));
- ASSERT_EQUALS(ns, deletedChunk.ns);
- ASSERT_TRUE(deletedChunk.min.equal(BSON("x" << 0)));
- ASSERT_TRUE(deletedChunk.max.equal(BSON("x" << 10)));
+ deleter.stopWorkers();
+}
- deleter.stopWorkers();
+// Should terminate when stop is requested.
+TEST(QueuedDelete, StopWhileWaitingCursor) {
+ const string ns("test.user");
- }
+ RangeDeleterMockEnv* env = new RangeDeleterMockEnv();
+ RangeDeleter deleter(env);
- // Should terminate when stop is requested.
- TEST(QueuedDelete, StopWhileWaitingCursor) {
- const string ns("test.user");
+ std::unique_ptr<mongo::repl::ReplicationCoordinatorMock> mock(
+ new mongo::repl::ReplicationCoordinatorMock(replSettings));
- RangeDeleterMockEnv* env = new RangeDeleterMockEnv();
- RangeDeleter deleter(env);
+ mongo::repl::ReplicationCoordinator::set(mongo::getGlobalServiceContext(), std::move(mock));
- std::unique_ptr<mongo::repl::ReplicationCoordinatorMock> mock(
- new mongo::repl::ReplicationCoordinatorMock(replSettings));
+ deleter.startWorkers();
- mongo::repl::ReplicationCoordinator::set(mongo::getGlobalServiceContext(),
- std::move(mock));
+ env->addCursorId(ns, 345);
- deleter.startWorkers();
+ Notification notifyDone;
+ RangeDeleterOptions deleterOptions(
+ KeyRange(ns, BSON("x" << 0), BSON("x" << 10), BSON("x" << 1)));
+ deleterOptions.waitForOpenCursors = true;
+ ASSERT_TRUE(
+ deleter.queueDelete(noTxn, deleterOptions, &notifyDone, NULL /* errMsg not needed */));
- env->addCursorId(ns, 345);
- Notification notifyDone;
- RangeDeleterOptions deleterOptions(KeyRange(ns,
- BSON("x" << 0),
- BSON("x" << 10),
- BSON("x" << 1)));
- deleterOptions.waitForOpenCursors = true;
- ASSERT_TRUE(deleter.queueDelete(noTxn,
- deleterOptions,
- &notifyDone,
- NULL /* errMsg not needed */));
+ env->waitForNthGetCursor(1u);
+ deleter.stopWorkers();
+ ASSERT_FALSE(env->deleteOccured());
+}
- env->waitForNthGetCursor(1u);
+static void rangeDeleterDeleteNow(RangeDeleter* deleter,
+ OperationContext* txn,
+ const RangeDeleterOptions& deleterOptions,
+ std::string* errMsg) {
+ deleter->deleteNow(txn, deleterOptions, errMsg);
+}
- deleter.stopWorkers();
- ASSERT_FALSE(env->deleteOccured());
+// Should not start delete if the set of cursors that were open when the
+// deleteNow method is called is still open.
+TEST(ImmediateDelete, ShouldWaitCursor) {
+ const string ns("test.user");
- }
+ RangeDeleterMockEnv* env = new RangeDeleterMockEnv();
+ RangeDeleter deleter(env);
- static void rangeDeleterDeleteNow(RangeDeleter* deleter,
- OperationContext* txn,
- const RangeDeleterOptions& deleterOptions,
- std::string* errMsg) {
- deleter->deleteNow(txn, deleterOptions, errMsg);
- }
+ std::unique_ptr<mongo::repl::ReplicationCoordinatorMock> mock(
+ new mongo::repl::ReplicationCoordinatorMock(replSettings));
- // Should not start delete if the set of cursors that were open when the
- // deleteNow method is called is still open.
- TEST(ImmediateDelete, ShouldWaitCursor) {
- const string ns("test.user");
+ mongo::repl::ReplicationCoordinator::set(mongo::getGlobalServiceContext(), std::move(mock));
- RangeDeleterMockEnv* env = new RangeDeleterMockEnv();
- RangeDeleter deleter(env);
+ deleter.startWorkers();
- std::unique_ptr<mongo::repl::ReplicationCoordinatorMock> mock(
- new mongo::repl::ReplicationCoordinatorMock(replSettings));
+ env->addCursorId(ns, 345);
- mongo::repl::ReplicationCoordinator::set(mongo::getGlobalServiceContext(),
- std::move(mock));
+ string errMsg;
+ RangeDeleterOptions deleterOption(
+ KeyRange(ns, BSON("x" << 0), BSON("x" << 10), BSON("x" << 1)));
+ deleterOption.waitForOpenCursors = true;
+ stdx::thread deleterThread = stdx::thread(
+ mongo::stdx::bind(rangeDeleterDeleteNow, &deleter, noTxn, deleterOption, &errMsg));
- deleter.startWorkers();
+ env->waitForNthGetCursor(1u);
- env->addCursorId(ns, 345);
+ // Note: immediate deletes has no pending state, it goes directly to inProgress
+ // even while waiting for cursors.
+ ASSERT_EQUALS(1U, deleter.getDeletesInProgress());
- string errMsg;
- RangeDeleterOptions deleterOption(KeyRange(ns,
- BSON("x" << 0),
- BSON("x" << 10),
- BSON("x" << 1)));
- deleterOption.waitForOpenCursors = true;
- stdx::thread deleterThread = stdx::thread(mongo::stdx::bind(
- rangeDeleterDeleteNow,
- &deleter,
- noTxn,
- deleterOption,
- &errMsg));
+ ASSERT_FALSE(env->deleteOccured());
- env->waitForNthGetCursor(1u);
+ // Set the open cursors to a totally different sets of cursorIDs.
+ env->addCursorId(ns, 200);
+ env->removeCursorId(ns, 345);
- // Note: immediate deletes has no pending state, it goes directly to inProgress
- // even while waiting for cursors.
- ASSERT_EQUALS(1U, deleter.getDeletesInProgress());
+ ASSERT_TRUE(
+ deleterThread.timed_join(boost::posix_time::seconds(MAX_IMMEDIATE_DELETE_WAIT_SECS)));
- ASSERT_FALSE(env->deleteOccured());
+ ASSERT_TRUE(env->deleteOccured());
+ const DeletedRange deletedChunk(env->getLastDelete());
- // Set the open cursors to a totally different sets of cursorIDs.
- env->addCursorId(ns, 200);
- env->removeCursorId(ns, 345);
+ ASSERT_EQUALS(ns, deletedChunk.ns);
+ ASSERT_TRUE(deletedChunk.min.equal(BSON("x" << 0)));
+ ASSERT_TRUE(deletedChunk.max.equal(BSON("x" << 10)));
+ ASSERT_TRUE(deletedChunk.shardKeyPattern.equal(BSON("x" << 1)));
- ASSERT_TRUE(deleterThread.timed_join(
- boost::posix_time::seconds(MAX_IMMEDIATE_DELETE_WAIT_SECS)));
+ deleter.stopWorkers();
+}
- ASSERT_TRUE(env->deleteOccured());
- const DeletedRange deletedChunk(env->getLastDelete());
+// Should terminate when stop is requested.
+TEST(ImmediateDelete, StopWhileWaitingCursor) {
+ const string ns("test.user");
- ASSERT_EQUALS(ns, deletedChunk.ns);
- ASSERT_TRUE(deletedChunk.min.equal(BSON("x" << 0)));
- ASSERT_TRUE(deletedChunk.max.equal(BSON("x" << 10)));
- ASSERT_TRUE(deletedChunk.shardKeyPattern.equal(BSON("x" << 1)));
+ RangeDeleterMockEnv* env = new RangeDeleterMockEnv();
+ RangeDeleter deleter(env);
- deleter.stopWorkers();
+ std::unique_ptr<mongo::repl::ReplicationCoordinatorMock> mock(
+ new mongo::repl::ReplicationCoordinatorMock(replSettings));
- }
+ mongo::repl::ReplicationCoordinator::set(mongo::getGlobalServiceContext(), std::move(mock));
- // Should terminate when stop is requested.
- TEST(ImmediateDelete, StopWhileWaitingCursor) {
- const string ns("test.user");
+ deleter.startWorkers();
- RangeDeleterMockEnv* env = new RangeDeleterMockEnv();
- RangeDeleter deleter(env);
+ env->addCursorId(ns, 345);
- std::unique_ptr<mongo::repl::ReplicationCoordinatorMock> mock(
- new mongo::repl::ReplicationCoordinatorMock(replSettings));
+ string errMsg;
+ RangeDeleterOptions deleterOption(
+ KeyRange(ns, BSON("x" << 0), BSON("x" << 10), BSON("x" << 1)));
+ deleterOption.waitForOpenCursors = true;
+ stdx::thread deleterThread = stdx::thread(
+ mongo::stdx::bind(rangeDeleterDeleteNow, &deleter, noTxn, deleterOption, &errMsg));
- mongo::repl::ReplicationCoordinator::set(mongo::getGlobalServiceContext(),
- std::move(mock));
+ env->waitForNthGetCursor(1u);
- deleter.startWorkers();
-
- env->addCursorId(ns, 345);
-
- string errMsg;
- RangeDeleterOptions deleterOption(KeyRange(ns,
- BSON("x" << 0),
- BSON("x" << 10),
- BSON("x" << 1)));
- deleterOption.waitForOpenCursors = true;
- stdx::thread deleterThread = stdx::thread(mongo::stdx::bind(
- rangeDeleterDeleteNow,
- &deleter,
- noTxn,
- deleterOption,
- &errMsg));
+ // Note: immediate deletes has no pending state, it goes directly to inProgress
+ // even while waiting for cursors.
+ ASSERT_EQUALS(1U, deleter.getDeletesInProgress());
- env->waitForNthGetCursor(1u);
-
- // Note: immediate deletes has no pending state, it goes directly to inProgress
- // even while waiting for cursors.
- ASSERT_EQUALS(1U, deleter.getDeletesInProgress());
-
- ASSERT_FALSE(env->deleteOccured());
-
- deleter.stopWorkers();
+ ASSERT_FALSE(env->deleteOccured());
- ASSERT_TRUE(deleterThread.timed_join(
- boost::posix_time::seconds(MAX_IMMEDIATE_DELETE_WAIT_SECS)));
+ deleter.stopWorkers();
- ASSERT_FALSE(env->deleteOccured());
+ ASSERT_TRUE(
+ deleterThread.timed_join(boost::posix_time::seconds(MAX_IMMEDIATE_DELETE_WAIT_SECS)));
- }
+ ASSERT_FALSE(env->deleteOccured());
+}
- // Tests the interaction of multiple deletes queued with different states.
- // Starts by adding a new delete task, waits for the worker to work on it,
- // and then adds 2 more task, one of which is ready to be deleted, while the
- // other one is waiting for an open cursor. The test then makes sure that the
- // deletes are performed in the right order.
- TEST(MixedDeletes, MultipleDeletes) {
- const string blockedNS("foo.bar");
- const string ns("test.user");
+// Tests the interaction of multiple deletes queued with different states.
+// Starts by adding a new delete task, waits for the worker to work on it,
+// and then adds 2 more task, one of which is ready to be deleted, while the
+// other one is waiting for an open cursor. The test then makes sure that the
+// deletes are performed in the right order.
+TEST(MixedDeletes, MultipleDeletes) {
+ const string blockedNS("foo.bar");
+ const string ns("test.user");
- RangeDeleterMockEnv* env = new RangeDeleterMockEnv();
- RangeDeleter deleter(env);
-
- std::unique_ptr<mongo::repl::ReplicationCoordinatorMock> mock(
- new mongo::repl::ReplicationCoordinatorMock(replSettings));
-
- mongo::repl::ReplicationCoordinator::set(mongo::getGlobalServiceContext(),
- std::move(mock));
+ RangeDeleterMockEnv* env = new RangeDeleterMockEnv();
+ RangeDeleter deleter(env);
- deleter.startWorkers();
+ std::unique_ptr<mongo::repl::ReplicationCoordinatorMock> mock(
+ new mongo::repl::ReplicationCoordinatorMock(replSettings));
- env->addCursorId(blockedNS, 345);
- env->pauseDeletes();
+ mongo::repl::ReplicationCoordinator::set(mongo::getGlobalServiceContext(), std::move(mock));
- Notification notifyDone1;
- RangeDeleterOptions deleterOption1(KeyRange(ns,
- BSON("x" << 10),
- BSON("x" << 20),
- BSON("x" << 1)));
- deleterOption1.waitForOpenCursors = true;
- ASSERT_TRUE(deleter.queueDelete(noTxn,
- deleterOption1,
- &notifyDone1,
- NULL /* don't care errMsg */));
+ deleter.startWorkers();
- env->waitForNthPausedDelete(1u);
+ env->addCursorId(blockedNS, 345);
+ env->pauseDeletes();
- // Make sure that the delete is already in progress before proceeding.
- ASSERT_EQUALS(1U, deleter.getDeletesInProgress());
+ Notification notifyDone1;
+ RangeDeleterOptions deleterOption1(
+ KeyRange(ns, BSON("x" << 10), BSON("x" << 20), BSON("x" << 1)));
+ deleterOption1.waitForOpenCursors = true;
+ ASSERT_TRUE(
+ deleter.queueDelete(noTxn, deleterOption1, &notifyDone1, NULL /* don't care errMsg */));
- Notification notifyDone2;
- RangeDeleterOptions deleterOption2(KeyRange(blockedNS,
- BSON("x" << 20),
- BSON("x" << 30),
- BSON("x" << 1)));
- deleterOption2.waitForOpenCursors = true;
- ASSERT_TRUE(deleter.queueDelete(noTxn,
- deleterOption2,
- &notifyDone2,
- NULL /* don't care errMsg */));
+ env->waitForNthPausedDelete(1u);
- Notification notifyDone3;
- RangeDeleterOptions deleterOption3(KeyRange(ns,
- BSON("x" << 30),
- BSON("x" << 40),
- BSON("x" << 1)));
- deleterOption3.waitForOpenCursors = true;
- ASSERT_TRUE(deleter.queueDelete(noTxn,
- deleterOption3,
- &notifyDone3,
- NULL /* don't care errMsg */));
-
- // Now, the setup is:
- // { x: 10 } => { x: 20 } in progress.
- // { x: 20 } => { x: 30 } waiting for cursor id 345.
- // { x: 30 } => { x: 40 } waiting to be picked up by worker.
-
- // Make sure that the current state matches the setup.
- ASSERT_EQUALS(3U, deleter.getTotalDeletes());
- ASSERT_EQUALS(2U, deleter.getPendingDeletes());
- ASSERT_EQUALS(1U, deleter.getDeletesInProgress());
-
- // Let the first delete proceed.
- env->resumeOneDelete();
- notifyDone1.waitToBeNotified();
-
- ASSERT_TRUE(env->deleteOccured());
-
- // { x: 10 } => { x: 20 } should be the first one since it is already in
- // progress before the others are queued.
- DeletedRange deleted1(env->getLastDelete());
+ // Make sure that the delete is already in progress before proceeding.
+ ASSERT_EQUALS(1U, deleter.getDeletesInProgress());
- ASSERT_EQUALS(ns, deleted1.ns);
- ASSERT_TRUE(deleted1.min.equal(BSON("x" << 10)));
- ASSERT_TRUE(deleted1.max.equal(BSON("x" << 20)));
- ASSERT_TRUE(deleted1.shardKeyPattern.equal(BSON("x" << 1)));
-
- // Let the second delete proceed.
- env->resumeOneDelete();
- notifyDone3.waitToBeNotified();
-
- DeletedRange deleted2(env->getLastDelete());
-
- // { x: 30 } => { x: 40 } should be next since there are still
- // cursors open for blockedNS.
-
- ASSERT_EQUALS(ns, deleted2.ns);
- ASSERT_TRUE(deleted2.min.equal(BSON("x" << 30)));
- ASSERT_TRUE(deleted2.max.equal(BSON("x" << 40)));
- ASSERT_TRUE(deleted2.shardKeyPattern.equal(BSON("x" << 1)));
-
- env->removeCursorId(blockedNS, 345);
- // Let the last delete proceed.
- env->resumeOneDelete();
- notifyDone2.waitToBeNotified();
-
- DeletedRange deleted3(env->getLastDelete());
-
- ASSERT_EQUALS(blockedNS, deleted3.ns);
- ASSERT_TRUE(deleted3.min.equal(BSON("x" << 20)));
- ASSERT_TRUE(deleted3.max.equal(BSON("x" << 30)));
- ASSERT_TRUE(deleted3.shardKeyPattern.equal(BSON("x" << 1)));
-
- deleter.stopWorkers();
-
- }
-
-} // unnamed namespace
+ Notification notifyDone2;
+ RangeDeleterOptions deleterOption2(
+ KeyRange(blockedNS, BSON("x" << 20), BSON("x" << 30), BSON("x" << 1)));
+ deleterOption2.waitForOpenCursors = true;
+ ASSERT_TRUE(
+ deleter.queueDelete(noTxn, deleterOption2, &notifyDone2, NULL /* don't care errMsg */));
+
+ Notification notifyDone3;
+ RangeDeleterOptions deleterOption3(
+ KeyRange(ns, BSON("x" << 30), BSON("x" << 40), BSON("x" << 1)));
+ deleterOption3.waitForOpenCursors = true;
+ ASSERT_TRUE(
+ deleter.queueDelete(noTxn, deleterOption3, &notifyDone3, NULL /* don't care errMsg */));
+
+ // Now, the setup is:
+ // { x: 10 } => { x: 20 } in progress.
+ // { x: 20 } => { x: 30 } waiting for cursor id 345.
+ // { x: 30 } => { x: 40 } waiting to be picked up by worker.
+
+ // Make sure that the current state matches the setup.
+ ASSERT_EQUALS(3U, deleter.getTotalDeletes());
+ ASSERT_EQUALS(2U, deleter.getPendingDeletes());
+ ASSERT_EQUALS(1U, deleter.getDeletesInProgress());
+
+ // Let the first delete proceed.
+ env->resumeOneDelete();
+ notifyDone1.waitToBeNotified();
+
+ ASSERT_TRUE(env->deleteOccured());
+
+ // { x: 10 } => { x: 20 } should be the first one since it is already in
+ // progress before the others are queued.
+ DeletedRange deleted1(env->getLastDelete());
+
+ ASSERT_EQUALS(ns, deleted1.ns);
+ ASSERT_TRUE(deleted1.min.equal(BSON("x" << 10)));
+ ASSERT_TRUE(deleted1.max.equal(BSON("x" << 20)));
+ ASSERT_TRUE(deleted1.shardKeyPattern.equal(BSON("x" << 1)));
+
+ // Let the second delete proceed.
+ env->resumeOneDelete();
+ notifyDone3.waitToBeNotified();
+
+ DeletedRange deleted2(env->getLastDelete());
+
+ // { x: 30 } => { x: 40 } should be next since there are still
+ // cursors open for blockedNS.
+
+ ASSERT_EQUALS(ns, deleted2.ns);
+ ASSERT_TRUE(deleted2.min.equal(BSON("x" << 30)));
+ ASSERT_TRUE(deleted2.max.equal(BSON("x" << 40)));
+ ASSERT_TRUE(deleted2.shardKeyPattern.equal(BSON("x" << 1)));
+
+ env->removeCursorId(blockedNS, 345);
+ // Let the last delete proceed.
+ env->resumeOneDelete();
+ notifyDone2.waitToBeNotified();
+
+ DeletedRange deleted3(env->getLastDelete());
+
+ ASSERT_EQUALS(blockedNS, deleted3.ns);
+ ASSERT_TRUE(deleted3.min.equal(BSON("x" << 20)));
+ ASSERT_TRUE(deleted3.max.equal(BSON("x" << 30)));
+ ASSERT_TRUE(deleted3.shardKeyPattern.equal(BSON("x" << 1)));
+
+ deleter.stopWorkers();
+}
+
+} // unnamed namespace
diff --git a/src/mongo/db/range_preserver.h b/src/mongo/db/range_preserver.h
index 818d848914e..5b7b655b66f 100644
--- a/src/mongo/db/range_preserver.h
+++ b/src/mongo/db/range_preserver.h
@@ -32,40 +32,40 @@
namespace mongo {
+/**
+ * A RangePreserver prevents the RangeDeleter from removing any new data ranges in a collection.
+ * Previously queued ranges may still be deleted but the documents in those ranges will be
+ * filtered by CollectionMetadata::belongsToMe.
+ *
+ * TODO(greg/hk): Currently, creating a ClientCursor is how we accomplish this. This should
+ * change.
+ */
+class RangePreserver {
+public:
/**
- * A RangePreserver prevents the RangeDeleter from removing any new data ranges in a collection.
- * Previously queued ranges may still be deleted but the documents in those ranges will be
- * filtered by CollectionMetadata::belongsToMe.
- *
- * TODO(greg/hk): Currently, creating a ClientCursor is how we accomplish this. This should
- * change.
+ * Sharding uses the set of active cursor IDs as the current state. We add a dummy
+ * ClientCursor, which creates an additional cursor ID. The cursor ID lasts as long as this
+ * object does. The ClientCursorPin guarantees that the underlying ClientCursor is not
+ * deleted until this object goes out of scope.
*/
- class RangePreserver {
- public:
- /**
- * Sharding uses the set of active cursor IDs as the current state. We add a dummy
- * ClientCursor, which creates an additional cursor ID. The cursor ID lasts as long as this
- * object does. The ClientCursorPin guarantees that the underlying ClientCursor is not
- * deleted until this object goes out of scope.
- */
- RangePreserver(const Collection* collection) {
- // Empty collections don't have any data we need to preserve
- if (collection) {
- // Not a memory leak. Cached in a static structure by CC's ctor.
- ClientCursor* cc = new ClientCursor(collection);
+ RangePreserver(const Collection* collection) {
+ // Empty collections don't have any data we need to preserve
+ if (collection) {
+ // Not a memory leak. Cached in a static structure by CC's ctor.
+ ClientCursor* cc = new ClientCursor(collection);
- // Pin keeps the CC from being deleted while it's in scope. We delete it ourselves.
- _pin.reset(new ClientCursorPin(collection->getCursorManager(), cc->cursorid()));
- }
+ // Pin keeps the CC from being deleted while it's in scope. We delete it ourselves.
+ _pin.reset(new ClientCursorPin(collection->getCursorManager(), cc->cursorid()));
}
+ }
- ~RangePreserver() {
- if (_pin)
- _pin->deleteUnderlying();
- }
+ ~RangePreserver() {
+ if (_pin)
+ _pin->deleteUnderlying();
+ }
- private:
- std::unique_ptr<ClientCursorPin> _pin;
- };
+private:
+ std::unique_ptr<ClientCursorPin> _pin;
+};
} // namespace mongo
diff --git a/src/mongo/db/record_id.h b/src/mongo/db/record_id.h
index 58514b652ee..cd7e8025b90 100644
--- a/src/mongo/db/record_id.h
+++ b/src/mongo/db/record_id.h
@@ -40,103 +40,127 @@
namespace mongo {
+/**
+ * The key that uniquely identifies a Record in a Collection or RecordStore.
+ */
+class RecordId {
+public:
/**
- * The key that uniquely identifies a Record in a Collection or RecordStore.
+ * Constructs a Null RecordId.
*/
- class RecordId {
- public:
-
- /**
- * Constructs a Null RecordId.
- */
- RecordId() : _repr(kNullRepr) {}
+ RecordId() : _repr(kNullRepr) {}
- explicit RecordId(int64_t repr) : _repr(repr) {}
+ explicit RecordId(int64_t repr) : _repr(repr) {}
- /**
- * Construct a RecordId from two halves.
- * TODO consider removing.
- */
- RecordId(int high, int low) : _repr((uint64_t(high) << 32) | uint32_t(low)) {}
+ /**
+ * Construct a RecordId from two halves.
+ * TODO consider removing.
+ */
+ RecordId(int high, int low) : _repr((uint64_t(high) << 32) | uint32_t(low)) {}
- /**
- * A RecordId that compares less than all ids that represent documents in a collection.
- */
- static RecordId min() { return RecordId(kMinRepr); }
+ /**
+ * A RecordId that compares less than all ids that represent documents in a collection.
+ */
+ static RecordId min() {
+ return RecordId(kMinRepr);
+ }
- /**
- * A RecordId that compares greater than all ids that represent documents in a collection.
- */
- static RecordId max() { return RecordId(kMaxRepr); }
+ /**
+ * A RecordId that compares greater than all ids that represent documents in a collection.
+ */
+ static RecordId max() {
+ return RecordId(kMaxRepr);
+ }
- bool isNull() const { return _repr == 0; }
+ bool isNull() const {
+ return _repr == 0;
+ }
- int64_t repr() const { return _repr; }
+ int64_t repr() const {
+ return _repr;
+ }
- /**
- * Normal RecordIds are the only ones valid for representing Records. All RecordIds outside
- * of this range are sentinel values.
- */
- bool isNormal() const { return _repr > 0 && _repr < kMaxRepr; }
+ /**
+ * Normal RecordIds are the only ones valid for representing Records. All RecordIds outside
+ * of this range are sentinel values.
+ */
+ bool isNormal() const {
+ return _repr > 0 && _repr < kMaxRepr;
+ }
- int compare(RecordId rhs) const {
- return _repr == rhs._repr ? 0 :
- _repr < rhs._repr ? -1 : 1;
- }
+ int compare(RecordId rhs) const {
+ return _repr == rhs._repr ? 0 : _repr < rhs._repr ? -1 : 1;
+ }
- /**
- * Hash value for this RecordId. The hash implementation may be modified, and its behavior
- * may differ across platforms. Hash values should not be persisted.
- */
- struct Hasher {
- size_t operator()(RecordId rid) const {
- size_t hash = 0;
- // TODO consider better hashes
- boost::hash_combine(hash, rid.repr());
- return hash;
- }
- };
-
- /// members for Sorter
- struct SorterDeserializeSettings {}; // unused
- void serializeForSorter(BufBuilder& buf) const { buf.appendStruct(_repr); }
- static RecordId deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&) {
- return RecordId(buf.read<int64_t>());
+ /**
+ * Hash value for this RecordId. The hash implementation may be modified, and its behavior
+ * may differ across platforms. Hash values should not be persisted.
+ */
+ struct Hasher {
+ size_t operator()(RecordId rid) const {
+ size_t hash = 0;
+ // TODO consider better hashes
+ boost::hash_combine(hash, rid.repr());
+ return hash;
}
- int memUsageForSorter() const { return sizeof(RecordId); }
- RecordId getOwned() const { return *this; }
-
- private:
- static const int64_t kMaxRepr = LLONG_MAX;
- static const int64_t kNullRepr = 0;
- static const int64_t kMinRepr = LLONG_MIN;
-
- int64_t _repr;
};
- inline bool operator==(RecordId lhs, RecordId rhs) { return lhs.repr() == rhs.repr(); }
- inline bool operator!=(RecordId lhs, RecordId rhs) { return lhs.repr() != rhs.repr(); }
- inline bool operator< (RecordId lhs, RecordId rhs) { return lhs.repr() < rhs.repr(); }
- inline bool operator<=(RecordId lhs, RecordId rhs) { return lhs.repr() <= rhs.repr(); }
- inline bool operator> (RecordId lhs, RecordId rhs) { return lhs.repr() > rhs.repr(); }
- inline bool operator>=(RecordId lhs, RecordId rhs) { return lhs.repr() >= rhs.repr(); }
-
- inline StringBuilder& operator<<( StringBuilder& stream, const RecordId& id ) {
- return stream << "RecordId(" << id.repr() << ')';
+ /// members for Sorter
+ struct SorterDeserializeSettings {}; // unused
+ void serializeForSorter(BufBuilder& buf) const {
+ buf.appendStruct(_repr);
}
-
- inline std::ostream& operator<<( std::ostream& stream, const RecordId& id ) {
- return stream << "RecordId(" << id.repr() << ')';
+ static RecordId deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&) {
+ return RecordId(buf.read<int64_t>());
}
-
- inline std::ostream& operator<<( std::ostream& stream, const boost::optional<RecordId>& id ) {
- return stream << "RecordId(" << (id ? id.get().repr() : 0) << ')';
+ int memUsageForSorter() const {
+ return sizeof(RecordId);
}
-
- inline logger::LogstreamBuilder& operator<<(logger::LogstreamBuilder& stream,
- const RecordId& id) {
- stream.stream() << id;
- return stream;
+ RecordId getOwned() const {
+ return *this;
}
-} // namespace mongo
+private:
+ static const int64_t kMaxRepr = LLONG_MAX;
+ static const int64_t kNullRepr = 0;
+ static const int64_t kMinRepr = LLONG_MIN;
+
+ int64_t _repr;
+};
+
+inline bool operator==(RecordId lhs, RecordId rhs) {
+ return lhs.repr() == rhs.repr();
+}
+inline bool operator!=(RecordId lhs, RecordId rhs) {
+ return lhs.repr() != rhs.repr();
+}
+inline bool operator<(RecordId lhs, RecordId rhs) {
+ return lhs.repr() < rhs.repr();
+}
+inline bool operator<=(RecordId lhs, RecordId rhs) {
+ return lhs.repr() <= rhs.repr();
+}
+inline bool operator>(RecordId lhs, RecordId rhs) {
+ return lhs.repr() > rhs.repr();
+}
+inline bool operator>=(RecordId lhs, RecordId rhs) {
+ return lhs.repr() >= rhs.repr();
+}
+
+inline StringBuilder& operator<<(StringBuilder& stream, const RecordId& id) {
+ return stream << "RecordId(" << id.repr() << ')';
+}
+
+inline std::ostream& operator<<(std::ostream& stream, const RecordId& id) {
+ return stream << "RecordId(" << id.repr() << ')';
+}
+
+inline std::ostream& operator<<(std::ostream& stream, const boost::optional<RecordId>& id) {
+ return stream << "RecordId(" << (id ? id.get().repr() : 0) << ')';
+}
+
+inline logger::LogstreamBuilder& operator<<(logger::LogstreamBuilder& stream, const RecordId& id) {
+ stream.stream() << id;
+ return stream;
+}
+} // namespace mongo
diff --git a/src/mongo/db/record_id_test.cpp b/src/mongo/db/record_id_test.cpp
index efc6d9ca5ea..389549bccc6 100644
--- a/src/mongo/db/record_id_test.cpp
+++ b/src/mongo/db/record_id_test.cpp
@@ -35,34 +35,34 @@
namespace mongo {
namespace {
- TEST( RecordId, HashEqual ) {
- RecordId locA( 1, 2 );
- RecordId locB;
- locB = locA;
- ASSERT_EQUALS( locA, locB );
- RecordId::Hasher hasher;
- ASSERT_EQUALS( hasher( locA ), hasher( locB ) );
- }
+TEST(RecordId, HashEqual) {
+ RecordId locA(1, 2);
+ RecordId locB;
+ locB = locA;
+ ASSERT_EQUALS(locA, locB);
+ RecordId::Hasher hasher;
+ ASSERT_EQUALS(hasher(locA), hasher(locB));
+}
- TEST( RecordId, HashNotEqual ) {
- RecordId original( 1, 2 );
- RecordId diffFile( 10, 2 );
- RecordId diffOfs( 1, 20 );
- RecordId diffBoth( 10, 20 );
- RecordId reversed( 2, 1 );
- ASSERT_NOT_EQUALS( original, diffFile );
- ASSERT_NOT_EQUALS( original, diffOfs );
- ASSERT_NOT_EQUALS( original, diffBoth );
- ASSERT_NOT_EQUALS( original, reversed );
-
- // Unequal DiskLocs need not produce unequal hashes. But unequal hashes are likely, and
- // assumed here for sanity checking of the custom hash implementation.
- RecordId::Hasher hasher;
- ASSERT_NOT_EQUALS( hasher( original ), hasher( diffFile ) );
- ASSERT_NOT_EQUALS( hasher( original ), hasher( diffOfs ) );
- ASSERT_NOT_EQUALS( hasher( original ), hasher( diffBoth ) );
- ASSERT_NOT_EQUALS( hasher( original ), hasher( reversed ) );
- }
-
-} // namespace
-} // namespace mongo
+TEST(RecordId, HashNotEqual) {
+ RecordId original(1, 2);
+ RecordId diffFile(10, 2);
+ RecordId diffOfs(1, 20);
+ RecordId diffBoth(10, 20);
+ RecordId reversed(2, 1);
+ ASSERT_NOT_EQUALS(original, diffFile);
+ ASSERT_NOT_EQUALS(original, diffOfs);
+ ASSERT_NOT_EQUALS(original, diffBoth);
+ ASSERT_NOT_EQUALS(original, reversed);
+
+ // Unequal DiskLocs need not produce unequal hashes. But unequal hashes are likely, and
+ // assumed here for sanity checking of the custom hash implementation.
+ RecordId::Hasher hasher;
+ ASSERT_NOT_EQUALS(hasher(original), hasher(diffFile));
+ ASSERT_NOT_EQUALS(hasher(original), hasher(diffOfs));
+ ASSERT_NOT_EQUALS(hasher(original), hasher(diffBoth));
+ ASSERT_NOT_EQUALS(hasher(original), hasher(reversed));
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 80963f123b7..a3de8953291 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -51,197 +51,198 @@
namespace mongo {
- using std::endl;
- using std::string;
+using std::endl;
+using std::string;
namespace {
- Status rebuildIndexesOnCollection(OperationContext* txn,
- DatabaseCatalogEntry* dbce,
- const std::string& collectionName) {
-
- CollectionCatalogEntry* cce = dbce->getCollectionCatalogEntry(collectionName);
-
- std::vector<string> indexNames;
- std::vector<BSONObj> indexSpecs;
- {
- // Fetch all indexes
- cce->getAllIndexes( txn, &indexNames );
- for ( size_t i = 0; i < indexNames.size(); i++ ) {
- const string& name = indexNames[i];
- BSONObj spec = cce->getIndexSpec( txn, name );
- indexSpecs.push_back(spec.removeField("v").getOwned());
-
- const BSONObj key = spec.getObjectField("key");
- const Status keyStatus = validateKeyPattern(key);
- if (!keyStatus.isOK()) {
- return Status(ErrorCodes::CannotCreateIndex, str::stream()
+Status rebuildIndexesOnCollection(OperationContext* txn,
+ DatabaseCatalogEntry* dbce,
+ const std::string& collectionName) {
+ CollectionCatalogEntry* cce = dbce->getCollectionCatalogEntry(collectionName);
+
+ std::vector<string> indexNames;
+ std::vector<BSONObj> indexSpecs;
+ {
+ // Fetch all indexes
+ cce->getAllIndexes(txn, &indexNames);
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ const string& name = indexNames[i];
+ BSONObj spec = cce->getIndexSpec(txn, name);
+ indexSpecs.push_back(spec.removeField("v").getOwned());
+
+ const BSONObj key = spec.getObjectField("key");
+ const Status keyStatus = validateKeyPattern(key);
+ if (!keyStatus.isOK()) {
+ return Status(
+ ErrorCodes::CannotCreateIndex,
+ str::stream()
<< "Cannot rebuild index " << spec << ": " << keyStatus.reason()
<< " For more info see http://dochub.mongodb.org/core/index-validation");
- }
}
}
+ }
- // Skip the rest if there are no indexes to rebuild.
- if (indexSpecs.empty()) return Status::OK();
-
- std::unique_ptr<Collection> collection;
- std::unique_ptr<MultiIndexBlock> indexer;
- {
- // These steps are combined into a single WUOW to ensure there are no commits without
- // the indexes.
- // 1) Drop all indexes.
- // 2) Open the Collection
- // 3) Start the index build process.
-
- WriteUnitOfWork wuow(txn);
-
- { // 1
- for ( size_t i = 0; i < indexNames.size(); i++ ) {
- Status s = cce->removeIndex(txn, indexNames[i]);
- if (!s.isOK()) return s;
- }
- }
+ // Skip the rest if there are no indexes to rebuild.
+ if (indexSpecs.empty())
+ return Status::OK();
- // Indexes must be dropped before we open the Collection otherwise we could attempt to
- // open a bad index and fail.
- // TODO see if MultiIndexBlock can be made to work without a Collection.
- const StringData ns = cce->ns().ns();
- collection.reset(new Collection(txn, ns, cce, dbce->getRecordStore(ns), dbce));
-
- indexer.reset(new MultiIndexBlock(txn, collection.get()));
- Status status = indexer->init(indexSpecs);
- if (!status.isOK()) {
- // The WUOW will handle cleanup, so the indexer shouldn't do its own.
- indexer->abortWithoutCleanup();
- return status;
+ std::unique_ptr<Collection> collection;
+ std::unique_ptr<MultiIndexBlock> indexer;
+ {
+ // These steps are combined into a single WUOW to ensure there are no commits without
+ // the indexes.
+ // 1) Drop all indexes.
+ // 2) Open the Collection
+ // 3) Start the index build process.
+
+ WriteUnitOfWork wuow(txn);
+
+ { // 1
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ Status s = cce->removeIndex(txn, indexNames[i]);
+ if (!s.isOK())
+ return s;
}
-
- wuow.commit();
}
- // Iterate all records in the collection. Delete them if they aren't valid BSON. Index them
- // if they are.
-
- long long numRecords = 0;
- long long dataSize = 0;
-
- RecordStore* rs = collection->getRecordStore();
- auto cursor = rs->getCursor(txn);
- while (auto record = cursor->next()) {
- RecordId id = record->id;
- RecordData& data = record->data;
-
- Status status = validateBSON(data.data(), data.size());
- if (!status.isOK()) {
- log() << "Invalid BSON detected at " << id << ": " << status << ". Deleting.";
- cursor->savePositioned(); // 'data' is no longer valid.
- {
- WriteUnitOfWork wunit(txn);
- rs->deleteRecord(txn, id);
- wunit.commit();
- }
- cursor->restore(txn);
- continue;
- }
+ // Indexes must be dropped before we open the Collection otherwise we could attempt to
+ // open a bad index and fail.
+ // TODO see if MultiIndexBlock can be made to work without a Collection.
+ const StringData ns = cce->ns().ns();
+ collection.reset(new Collection(txn, ns, cce, dbce->getRecordStore(ns), dbce));
+
+ indexer.reset(new MultiIndexBlock(txn, collection.get()));
+ Status status = indexer->init(indexSpecs);
+ if (!status.isOK()) {
+ // The WUOW will handle cleanup, so the indexer shouldn't do its own.
+ indexer->abortWithoutCleanup();
+ return status;
+ }
- numRecords++;
- dataSize += data.size();
+ wuow.commit();
+ }
- // Now index the record.
- // TODO SERVER-14812 add a mode that drops duplicates rather than failing
- WriteUnitOfWork wunit(txn);
- status = indexer->insert(data.releaseToBson(), id);
- if (!status.isOK()) return status;
- wunit.commit();
+ // Iterate all records in the collection. Delete them if they aren't valid BSON. Index them
+ // if they are.
+
+ long long numRecords = 0;
+ long long dataSize = 0;
+
+ RecordStore* rs = collection->getRecordStore();
+ auto cursor = rs->getCursor(txn);
+ while (auto record = cursor->next()) {
+ RecordId id = record->id;
+ RecordData& data = record->data;
+
+ Status status = validateBSON(data.data(), data.size());
+ if (!status.isOK()) {
+ log() << "Invalid BSON detected at " << id << ": " << status << ". Deleting.";
+ cursor->savePositioned(); // 'data' is no longer valid.
+ {
+ WriteUnitOfWork wunit(txn);
+ rs->deleteRecord(txn, id);
+ wunit.commit();
+ }
+ cursor->restore(txn);
+ continue;
}
- Status status = indexer->doneInserting();
- if (!status.isOK()) return status;
+ numRecords++;
+ dataSize += data.size();
- {
- WriteUnitOfWork wunit(txn);
- indexer->commit();
- rs->updateStatsAfterRepair(txn, numRecords, dataSize);
- wunit.commit();
- }
+ // Now index the record.
+ // TODO SERVER-14812 add a mode that drops duplicates rather than failing
+ WriteUnitOfWork wunit(txn);
+ status = indexer->insert(data.releaseToBson(), id);
+ if (!status.isOK())
+ return status;
+ wunit.commit();
+ }
- return Status::OK();
+ Status status = indexer->doneInserting();
+ if (!status.isOK())
+ return status;
+
+ {
+ WriteUnitOfWork wunit(txn);
+ indexer->commit();
+ rs->updateStatsAfterRepair(txn, numRecords, dataSize);
+ wunit.commit();
}
-} // namespace
- Status repairDatabase(OperationContext* txn,
- StorageEngine* engine,
- const std::string& dbName,
- bool preserveClonedFilesOnFailure,
- bool backupOriginalFiles) {
+ return Status::OK();
+}
+} // namespace
- DisableDocumentValidation validationDisabler(txn);
+Status repairDatabase(OperationContext* txn,
+ StorageEngine* engine,
+ const std::string& dbName,
+ bool preserveClonedFilesOnFailure,
+ bool backupOriginalFiles) {
+ DisableDocumentValidation validationDisabler(txn);
- // We must hold some form of lock here
- invariant(txn->lockState()->isLocked());
- invariant( dbName.find( '.' ) == string::npos );
+ // We must hold some form of lock here
+ invariant(txn->lockState()->isLocked());
+ invariant(dbName.find('.') == string::npos);
- log() << "repairDatabase " << dbName << endl;
+ log() << "repairDatabase " << dbName << endl;
- BackgroundOperation::assertNoBgOpInProgForDb(dbName);
+ BackgroundOperation::assertNoBgOpInProgForDb(dbName);
- txn->checkForInterrupt();
+ txn->checkForInterrupt();
- if (engine->isMmapV1()) {
- // MMAPv1 is a layering violation so it implements its own repairDatabase.
- return static_cast<MMAPV1Engine*>(engine)->repairDatabase(txn,
- dbName,
- preserveClonedFilesOnFailure,
- backupOriginalFiles);
- }
+ if (engine->isMmapV1()) {
+ // MMAPv1 is a layering violation so it implements its own repairDatabase.
+ return static_cast<MMAPV1Engine*>(engine)
+ ->repairDatabase(txn, dbName, preserveClonedFilesOnFailure, backupOriginalFiles);
+ }
- // These are MMAPv1 specific
- if ( preserveClonedFilesOnFailure ) {
- return Status( ErrorCodes::BadValue, "preserveClonedFilesOnFailure not supported" );
- }
- if ( backupOriginalFiles ) {
- return Status( ErrorCodes::BadValue, "backupOriginalFiles not supported" );
- }
+ // These are MMAPv1 specific
+ if (preserveClonedFilesOnFailure) {
+ return Status(ErrorCodes::BadValue, "preserveClonedFilesOnFailure not supported");
+ }
+ if (backupOriginalFiles) {
+ return Status(ErrorCodes::BadValue, "backupOriginalFiles not supported");
+ }
- // Close the db to invalidate all current users and caches.
- dbHolder().close(txn, dbName);
- // Open the db after everything finishes
- class OpenDbInDestructor {
- public:
- OpenDbInDestructor(OperationContext* txn, const std::string& db) :
- _dbName(db)
- , _txn(txn)
- {}
- ~OpenDbInDestructor() {
- dbHolder().openDb(_txn, _dbName);
- }
- private:
- const std::string& _dbName;
- OperationContext* _txn;
- } dbOpener(txn, dbName);
- DatabaseCatalogEntry* dbce = engine->getDatabaseCatalogEntry(txn, dbName);
+ // Close the db to invalidate all current users and caches.
+ dbHolder().close(txn, dbName);
+ // Open the db after everything finishes
+ class OpenDbInDestructor {
+ public:
+ OpenDbInDestructor(OperationContext* txn, const std::string& db) : _dbName(db), _txn(txn) {}
+ ~OpenDbInDestructor() {
+ dbHolder().openDb(_txn, _dbName);
+ }
- std::list<std::string> colls;
- dbce->getCollectionNamespaces(&colls);
+ private:
+ const std::string& _dbName;
+ OperationContext* _txn;
+ } dbOpener(txn, dbName);
+ DatabaseCatalogEntry* dbce = engine->getDatabaseCatalogEntry(txn, dbName);
- for (std::list<std::string>::const_iterator it = colls.begin(); it != colls.end(); ++it) {
- // Don't check for interrupt after starting to repair a collection otherwise we can
- // leave data in an inconsistent state. Interrupting between collections is ok, however.
- txn->checkForInterrupt();
+ std::list<std::string> colls;
+ dbce->getCollectionNamespaces(&colls);
- log() << "Repairing collection " << *it;
+ for (std::list<std::string>::const_iterator it = colls.begin(); it != colls.end(); ++it) {
+ // Don't check for interrupt after starting to repair a collection otherwise we can
+ // leave data in an inconsistent state. Interrupting between collections is ok, however.
+ txn->checkForInterrupt();
- Status status = engine->repairRecordStore(txn, *it);
- if (!status.isOK()) return status;
+ log() << "Repairing collection " << *it;
- status = rebuildIndexesOnCollection(txn, dbce, *it);
- if (!status.isOK()) return status;
+ Status status = engine->repairRecordStore(txn, *it);
+ if (!status.isOK())
+ return status;
- // TODO: uncomment once SERVER-16869
- // engine->flushAllFiles(true);
- }
+ status = rebuildIndexesOnCollection(txn, dbce, *it);
+ if (!status.isOK())
+ return status;
- return Status::OK();
+ // TODO: uncomment once SERVER-16869
+ // engine->flushAllFiles(true);
}
-}
+ return Status::OK();
+}
+}
diff --git a/src/mongo/db/repair_database.h b/src/mongo/db/repair_database.h
index 17fb58274aa..c0945c0ed43 100644
--- a/src/mongo/db/repair_database.h
+++ b/src/mongo/db/repair_database.h
@@ -31,20 +31,19 @@
#include <string>
namespace mongo {
- class OperationContext;
- class Status;
- class StorageEngine;
- class StringData;
+class OperationContext;
+class Status;
+class StorageEngine;
+class StringData;
- /**
- * Repairs a database using a storage engine-specific, best-effort process.
- * Some data may be lost or modified in the process but the output will
- * be structurally valid on successful return.
- */
- Status repairDatabase(OperationContext* txn,
- StorageEngine* engine,
- const std::string& dbName,
- bool preserveClonedFilesOnFailure = false,
- bool backupOriginalFiles = false);
+/**
+ * Repairs a database using a storage engine-specific, best-effort process.
+ * Some data may be lost or modified in the process but the output will
+ * be structurally valid on successful return.
+ */
+Status repairDatabase(OperationContext* txn,
+ StorageEngine* engine,
+ const std::string& dbName,
+ bool preserveClonedFilesOnFailure = false,
+ bool backupOriginalFiles = false);
}
-
diff --git a/src/mongo/db/repl/applier.cpp b/src/mongo/db/repl/applier.cpp
index f1d6942e87a..9000e0ebf65 100644
--- a/src/mongo/db/repl/applier.cpp
+++ b/src/mongo/db/repl/applier.cpp
@@ -40,199 +40,187 @@
namespace mongo {
namespace repl {
- Applier::Applier(ReplicationExecutor* executor,
- const Operations& operations,
- const ApplyOperationFn& applyOperation,
- const CallbackFn& onCompletion)
- : _executor(executor),
- _operations(operations),
- _applyOperation(applyOperation),
- _onCompletion(onCompletion),
- _active(false) {
-
- uassert(ErrorCodes::BadValue, "null replication executor", executor);
- uassert(ErrorCodes::BadValue, "empty list of operations", !operations.empty());
- uassert(ErrorCodes::FailedToParse,
- str::stream() << "last operation missing 'ts' field: " << operations.back(),
- operations.back().hasField("ts"));
- uassert(ErrorCodes::TypeMismatch,
- str::stream() << "'ts' in last operation not a timestamp: " << operations.back(),
- BSONType::bsonTimestamp == operations.back().getField("ts").type());
- uassert(ErrorCodes::BadValue, "apply operation function cannot be null", applyOperation);
- uassert(ErrorCodes::BadValue, "callback function cannot be null", onCompletion);
+Applier::Applier(ReplicationExecutor* executor,
+ const Operations& operations,
+ const ApplyOperationFn& applyOperation,
+ const CallbackFn& onCompletion)
+ : _executor(executor),
+ _operations(operations),
+ _applyOperation(applyOperation),
+ _onCompletion(onCompletion),
+ _active(false) {
+ uassert(ErrorCodes::BadValue, "null replication executor", executor);
+ uassert(ErrorCodes::BadValue, "empty list of operations", !operations.empty());
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "last operation missing 'ts' field: " << operations.back(),
+ operations.back().hasField("ts"));
+ uassert(ErrorCodes::TypeMismatch,
+ str::stream() << "'ts' in last operation not a timestamp: " << operations.back(),
+ BSONType::bsonTimestamp == operations.back().getField("ts").type());
+ uassert(ErrorCodes::BadValue, "apply operation function cannot be null", applyOperation);
+ uassert(ErrorCodes::BadValue, "callback function cannot be null", onCompletion);
+}
+
+Applier::~Applier() {
+ DESTRUCTOR_GUARD(cancel(); wait(););
+}
+
+std::string Applier::getDiagnosticString() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ str::stream output;
+ output << "Applier";
+ output << " executor: " << _executor->getDiagnosticString();
+ output << " active: " << _active;
+ return output;
+}
+
+bool Applier::isActive() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _active;
+}
+
+Status Applier::start() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+
+ if (_active) {
+ return Status(ErrorCodes::IllegalOperation, "applier already started");
}
- Applier::~Applier() {
- DESTRUCTOR_GUARD(
- cancel();
- wait();
- );
+ auto scheduleResult =
+ _executor->scheduleDBWork(stdx::bind(&Applier::_callback, this, stdx::placeholders::_1));
+ if (!scheduleResult.isOK()) {
+ return scheduleResult.getStatus();
}
- std::string Applier::getDiagnosticString() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- str::stream output;
- output << "Applier";
- output << " executor: " << _executor->getDiagnosticString();
- output << " active: " << _active;
- return output;
- }
+ _active = true;
+ _dbWorkCallbackHandle = scheduleResult.getValue();
- bool Applier::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _active;
- }
+ return Status::OK();
+}
- Status Applier::start() {
+void Applier::cancel() {
+ ReplicationExecutor::CallbackHandle dbWorkCallbackHandle;
+ {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (_active) {
- return Status(ErrorCodes::IllegalOperation, "applier already started");
+ if (!_active) {
+ return;
}
- auto scheduleResult = _executor->scheduleDBWork(
- stdx::bind(&Applier::_callback, this, stdx::placeholders::_1));
- if (!scheduleResult.isOK()) {
- return scheduleResult.getStatus();
- }
+ dbWorkCallbackHandle = _dbWorkCallbackHandle;
+ }
- _active = true;
- _dbWorkCallbackHandle = scheduleResult.getValue();
+ if (dbWorkCallbackHandle.isValid()) {
+ _executor->cancel(dbWorkCallbackHandle);
+ }
+}
+
+void Applier::wait() {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
- return Status::OK();
+ while (_active) {
+ _condition.wait(lk);
}
+}
- void Applier::cancel() {
- ReplicationExecutor::CallbackHandle dbWorkCallbackHandle;
- {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+void Applier::_callback(const ReplicationExecutor::CallbackArgs& cbd) {
+ if (!cbd.status.isOK()) {
+ _finishCallback(cbd.status, _operations);
+ return;
+ }
- if (!_active) {
- return;
- }
+ invariant(cbd.txn);
- dbWorkCallbackHandle = _dbWorkCallbackHandle;
- }
+ // Refer to multiSyncApply() and multiInitialSyncApply() in sync_tail.cpp.
+ cbd.txn->setReplicatedWrites(false);
- if (dbWorkCallbackHandle.isValid()) {
- _executor->cancel(dbWorkCallbackHandle);
- }
- }
+ // allow us to get through the magic barrier
+ cbd.txn->lockState()->setIsBatchWriter(true);
- void Applier::wait() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ Status applyStatus(ErrorCodes::InternalError, "not mutated");
- while (_active) {
- _condition.wait(lk);
+ invariant(!_operations.empty());
+ for (auto i = _operations.cbegin(); i != _operations.cend(); ++i) {
+ try {
+ applyStatus = _applyOperation(cbd.txn, *i);
+ } catch (...) {
+ applyStatus = exceptionToStatus();
}
- }
-
- void Applier::_callback(const ReplicationExecutor::CallbackArgs& cbd) {
- if (!cbd.status.isOK()) {
- _finishCallback(cbd.status, _operations);
+ if (!applyStatus.isOK()) {
+ // 'i' points to last operation that was not applied.
+ _finishCallback(applyStatus, Operations(i, _operations.cend()));
return;
}
-
- invariant(cbd.txn);
-
- // Refer to multiSyncApply() and multiInitialSyncApply() in sync_tail.cpp.
- cbd.txn->setReplicatedWrites(false);
-
- // allow us to get through the magic barrier
- cbd.txn->lockState()->setIsBatchWriter(true);
-
- Status applyStatus(ErrorCodes::InternalError, "not mutated");
-
- invariant(!_operations.empty());
- for (auto i = _operations.cbegin(); i != _operations.cend(); ++i) {
- try {
- applyStatus = _applyOperation(cbd.txn, *i);
- }
- catch (...) {
- applyStatus = exceptionToStatus();
- }
- if (!applyStatus.isOK()) {
- // 'i' points to last operation that was not applied.
- _finishCallback(applyStatus, Operations(i, _operations.cend()));
- return;
- }
- }
- _finishCallback(_operations.back().getField("ts").timestamp(), Operations());
}
+ _finishCallback(_operations.back().getField("ts").timestamp(), Operations());
+}
- void Applier::_finishCallback(const StatusWith<Timestamp>& result,
- const Operations& operations) {
- _onCompletion(result, operations);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- _active = false;
- _condition.notify_all();
- }
+void Applier::_finishCallback(const StatusWith<Timestamp>& result, const Operations& operations) {
+ _onCompletion(result, operations);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _active = false;
+ _condition.notify_all();
+}
namespace {
- void pauseBeforeCompletion(
- const StatusWith<Timestamp>& result,
- const Applier::Operations& operationsOnCompletion,
- const PauseDataReplicatorFn& pauseDataReplicator,
- const Applier::CallbackFn& onCompletion) {
-
- if (result.isOK()) {
- pauseDataReplicator();
+void pauseBeforeCompletion(const StatusWith<Timestamp>& result,
+ const Applier::Operations& operationsOnCompletion,
+ const PauseDataReplicatorFn& pauseDataReplicator,
+ const Applier::CallbackFn& onCompletion) {
+ if (result.isOK()) {
+ pauseDataReplicator();
+ }
+ onCompletion(result, operationsOnCompletion);
+};
+
+} // namespace
+
+StatusWith<std::pair<std::unique_ptr<Applier>, Applier::Operations>> applyUntilAndPause(
+ ReplicationExecutor* executor,
+ const Applier::Operations& operations,
+ const Applier::ApplyOperationFn& applyOperation,
+ const Timestamp& lastTimestampToApply,
+ const PauseDataReplicatorFn& pauseDataReplicator,
+ const Applier::CallbackFn& onCompletion) {
+ try {
+ auto comp = [](const BSONObj& left, const BSONObj& right) {
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "Operation missing 'ts' field': " << left,
+ left.hasField("ts"));
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "Operation missing 'ts' field': " << right,
+ right.hasField("ts"));
+ return left["ts"].timestamp() < right["ts"].timestamp();
+ };
+ auto wrapped = BSON("ts" << lastTimestampToApply);
+ auto i = std::lower_bound(operations.cbegin(), operations.cend(), wrapped, comp);
+ bool found = i != operations.cend() && !comp(wrapped, *i);
+ auto j = found ? i + 1 : i;
+ Applier::Operations operationsInRange(operations.cbegin(), j);
+ Applier::Operations operationsNotInRange(j, operations.cend());
+ if (!found) {
+ return std::make_pair(std::unique_ptr<Applier>(new Applier(
+ executor, operationsInRange, applyOperation, onCompletion)),
+ operationsNotInRange);
}
- onCompletion(result, operationsOnCompletion);
- };
-
-} // namespace
-
- StatusWith<std::pair<std::unique_ptr<Applier>, Applier::Operations> > applyUntilAndPause(
- ReplicationExecutor* executor,
- const Applier::Operations& operations,
- const Applier::ApplyOperationFn& applyOperation,
- const Timestamp& lastTimestampToApply,
- const PauseDataReplicatorFn& pauseDataReplicator,
- const Applier::CallbackFn& onCompletion) {
- try {
- auto comp = [](const BSONObj& left, const BSONObj& right) {
- uassert(ErrorCodes::FailedToParse,
- str::stream() << "Operation missing 'ts' field': " << left,
- left.hasField("ts"));
- uassert(ErrorCodes::FailedToParse,
- str::stream() << "Operation missing 'ts' field': " << right,
- right.hasField("ts"));
- return left["ts"].timestamp() < right["ts"].timestamp();
- };
- auto wrapped = BSON("ts" << lastTimestampToApply);
- auto i = std::lower_bound(operations.cbegin(), operations.cend(), wrapped, comp);
- bool found = i != operations.cend() && !comp(wrapped, *i);
- auto j = found ? i+1 : i;
- Applier::Operations operationsInRange(operations.cbegin(), j);
- Applier::Operations operationsNotInRange(j, operations.cend());
- if (!found) {
- return std::make_pair(
- std::unique_ptr<Applier>(
- new Applier(executor, operationsInRange, applyOperation, onCompletion)),
- operationsNotInRange);
- }
-
- return std::make_pair(
- std::unique_ptr<Applier>(new Applier(
- executor,
- operationsInRange,
- applyOperation,
- stdx::bind(pauseBeforeCompletion,
- stdx::placeholders::_1,
- stdx::placeholders::_2,
- pauseDataReplicator,
- onCompletion))),
- operationsNotInRange);
- }
- catch (...) {
- return exceptionToStatus();
- }
- MONGO_UNREACHABLE;
- return Status(ErrorCodes::InternalError, "unreachable");
+ return std::make_pair(
+ std::unique_ptr<Applier>(new Applier(executor,
+ operationsInRange,
+ applyOperation,
+ stdx::bind(pauseBeforeCompletion,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ pauseDataReplicator,
+ onCompletion))),
+ operationsNotInRange);
+ } catch (...) {
+ return exceptionToStatus();
}
+ MONGO_UNREACHABLE;
+ return Status(ErrorCodes::InternalError, "unreachable");
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/applier.h b/src/mongo/db/repl/applier.h
index 7be023683da..a8e48c0edb9 100644
--- a/src/mongo/db/repl/applier.h
+++ b/src/mongo/db/repl/applier.h
@@ -45,129 +45,128 @@
namespace mongo {
namespace repl {
- class Applier {
- MONGO_DISALLOW_COPYING(Applier);
- public:
-
- /**
- * Operations sorted by timestamp in ascending order.
- */
- using Operations = std::vector<BSONObj>;
-
- /**
- * Callback function to report final status of applying operations along with
- * list of operations (if applicable) that were not successfully applied.
- * On success, returns the timestamp of the last operation applied together with an empty
- * list of operations.
- */
- using CallbackFn = stdx::function<void (const StatusWith<Timestamp>&, const Operations&)>;
-
- /**
- * Type of function to to apply a single operation. In production, this function
- * would have the same outcome as calling SyncTail::syncApply() ('convertUpdatesToUpserts'
- * value will be embedded in the function implementation).
- */
- using ApplyOperationFn = stdx::function<Status (OperationContext*, const BSONObj&)>;
-
- /**
- * Creates Applier in inactive state.
- *
- * Accepts list of oplog entries to apply in 'operations'.
- *
- * The callback function will be invoked (after schedule()) when the applied has
- * successfully applied all the operations or encountered a failure. Failures may occur if
- * we failed to apply an operation; or if the underlying scheduled work item
- * on the replication executor was canceled.
- *
- * It is an error for 'operations' to be empty but individual oplog entries
- * contained in 'operations' are not validated.
- */
- Applier(ReplicationExecutor* executor,
- const Operations& operations,
- const ApplyOperationFn& applyOperation,
- const CallbackFn& onCompletion);
-
- /**
- * Blocks while applier is active.
- */
- virtual ~Applier();
-
- /**
- * Returns diagnostic information.
- */
- std::string getDiagnosticString() const;
-
- /**
- * Returns true if the applier has been started (but has not completed).
- */
- bool isActive() const;
-
- /**
- * Starts applier by scheduling initial db work to be run by the executor.
- */
- Status start();
-
- /**
- * Cancels current db work request.
- * Returns immediately if applier is not active.
- *
- * Callback function may be invoked with an ErrorCodes::CallbackCanceled status.
- */
- void cancel();
-
- /**
- * Waits for active database worker to complete.
- * Returns immediately if applier is not active.
- */
- void wait();
-
- private:
-
- /**
- * DB worker callback function - applies all operations.
- */
- void _callback(const ReplicationExecutor::CallbackArgs& cbd);
- void _finishCallback(const StatusWith<Timestamp>& result, const Operations& operations);
-
- // Not owned by us.
- ReplicationExecutor* _executor;
-
- Operations _operations;
- ApplyOperationFn _applyOperation;
- CallbackFn _onCompletion;
-
- // Protects member data of this Applier.
- mutable stdx::mutex _mutex;
-
- stdx::condition_variable _condition;
-
- // _active is true when Applier is scheduled to be run by the executor.
- bool _active;
-
- ReplicationExecutor::CallbackHandle _dbWorkCallbackHandle;
- };
+class Applier {
+ MONGO_DISALLOW_COPYING(Applier);
+public:
+ /**
+ * Operations sorted by timestamp in ascending order.
+ */
+ using Operations = std::vector<BSONObj>;
+
+ /**
+ * Callback function to report final status of applying operations along with
+ * list of operations (if applicable) that were not successfully applied.
+ * On success, returns the timestamp of the last operation applied together with an empty
+ * list of operations.
+ */
+ using CallbackFn = stdx::function<void(const StatusWith<Timestamp>&, const Operations&)>;
+
+ /**
+ * Type of function to to apply a single operation. In production, this function
+ * would have the same outcome as calling SyncTail::syncApply() ('convertUpdatesToUpserts'
+ * value will be embedded in the function implementation).
+ */
+ using ApplyOperationFn = stdx::function<Status(OperationContext*, const BSONObj&)>;
+
+ /**
+ * Creates Applier in inactive state.
+ *
+ * Accepts list of oplog entries to apply in 'operations'.
+ *
+ * The callback function will be invoked (after schedule()) when the applied has
+ * successfully applied all the operations or encountered a failure. Failures may occur if
+ * we failed to apply an operation; or if the underlying scheduled work item
+ * on the replication executor was canceled.
+ *
+ * It is an error for 'operations' to be empty but individual oplog entries
+ * contained in 'operations' are not validated.
+ */
+ Applier(ReplicationExecutor* executor,
+ const Operations& operations,
+ const ApplyOperationFn& applyOperation,
+ const CallbackFn& onCompletion);
+
+ /**
+ * Blocks while applier is active.
+ */
+ virtual ~Applier();
+
+ /**
+ * Returns diagnostic information.
+ */
+ std::string getDiagnosticString() const;
+
+ /**
+ * Returns true if the applier has been started (but has not completed).
+ */
+ bool isActive() const;
/**
- * Applies operations (sorted by timestamp) up to and including 'lastTimestampToApply'.
- * If 'lastTimestampToApply' is found in 'operations':
- * - The applier will be given a subset of 'operations' (includes 'lastTimestampToApply').
- * - On success, the applier will invoke the 'pause' function just before reporting
- * completion status.
- * Otherwise, all entries in 'operations' before 'lastTimestampToApply' will be forwarded to
- * the applier and the 'pause' function will be ignored.
- * If the applier is successfully created, returns the applier and a list of operations that
- * are skipped (operations with 'ts' field value after 'lastTimestampToApply).
+ * Starts applier by scheduling initial db work to be run by the executor.
*/
- using PauseDataReplicatorFn = stdx::function<void ()>;
-
- StatusWith<std::pair<std::unique_ptr<Applier>, Applier::Operations> > applyUntilAndPause(
- ReplicationExecutor* executor,
- const Applier::Operations& operations,
- const Applier::ApplyOperationFn& applyOperation,
- const Timestamp& lastTimestampToApply,
- const PauseDataReplicatorFn& pauseDataReplicator,
- const Applier::CallbackFn& onCompletion);
-
-} // namespace repl
-} // namespace mongo
+ Status start();
+
+ /**
+ * Cancels current db work request.
+ * Returns immediately if applier is not active.
+ *
+ * Callback function may be invoked with an ErrorCodes::CallbackCanceled status.
+ */
+ void cancel();
+
+ /**
+ * Waits for active database worker to complete.
+ * Returns immediately if applier is not active.
+ */
+ void wait();
+
+private:
+ /**
+ * DB worker callback function - applies all operations.
+ */
+ void _callback(const ReplicationExecutor::CallbackArgs& cbd);
+ void _finishCallback(const StatusWith<Timestamp>& result, const Operations& operations);
+
+ // Not owned by us.
+ ReplicationExecutor* _executor;
+
+ Operations _operations;
+ ApplyOperationFn _applyOperation;
+ CallbackFn _onCompletion;
+
+ // Protects member data of this Applier.
+ mutable stdx::mutex _mutex;
+
+ stdx::condition_variable _condition;
+
+ // _active is true when Applier is scheduled to be run by the executor.
+ bool _active;
+
+ ReplicationExecutor::CallbackHandle _dbWorkCallbackHandle;
+};
+
+
+/**
+ * Applies operations (sorted by timestamp) up to and including 'lastTimestampToApply'.
+ * If 'lastTimestampToApply' is found in 'operations':
+ * - The applier will be given a subset of 'operations' (includes 'lastTimestampToApply').
+ * - On success, the applier will invoke the 'pause' function just before reporting
+ * completion status.
+ * Otherwise, all entries in 'operations' before 'lastTimestampToApply' will be forwarded to
+ * the applier and the 'pause' function will be ignored.
+ * If the applier is successfully created, returns the applier and a list of operations that
+ * are skipped (operations with 'ts' field value after 'lastTimestampToApply).
+ */
+using PauseDataReplicatorFn = stdx::function<void()>;
+
+StatusWith<std::pair<std::unique_ptr<Applier>, Applier::Operations>> applyUntilAndPause(
+ ReplicationExecutor* executor,
+ const Applier::Operations& operations,
+ const Applier::ApplyOperationFn& applyOperation,
+ const Timestamp& lastTimestampToApply,
+ const PauseDataReplicatorFn& pauseDataReplicator,
+ const Applier::CallbackFn& onCompletion);
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/applier_test.cpp b/src/mongo/db/repl/applier_test.cpp
index f876a884605..5138d2561c6 100644
--- a/src/mongo/db/repl/applier_test.cpp
+++ b/src/mongo/db/repl/applier_test.cpp
@@ -42,624 +42,609 @@
namespace {
- using namespace mongo;
- using namespace mongo::repl;
-
- using Operations = Applier::Operations;
-
- class ApplierTest : public ReplicationExecutorTest {
- public:
-
- Applier* getApplier() const;
-
- protected:
-
- void setUp() override;
- void tearDown() override;
-
- /**
- * Test function to check behavior when we fail to apply one of the operations.
- */
- void _testApplyOperationFailed(size_t opIndex, stdx::function<Status ()> fail);
-
- std::unique_ptr<Applier> _applier;
- std::unique_ptr<unittest::Barrier> _barrier;
- };
-
- void ApplierTest::setUp() {
- ReplicationExecutorTest::setUp();
- launchExecutorThread();
- auto apply = [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); };
- _applier.reset(new Applier(&getExecutor(),
- {BSON("ts" << Timestamp(Seconds(123), 0))},
- apply,
- [this](const StatusWith<Timestamp>&, const Operations&) {
- if (_barrier.get()) {
- _barrier->countDownAndWait();
- }
- }));
- }
-
- void ApplierTest::tearDown() {
- ReplicationExecutorTest::tearDown();
- _applier.reset();
- _barrier.reset();
- }
-
- Applier* ApplierTest::getApplier() const {
- return _applier.get();
- }
-
- TEST_F(ApplierTest, InvalidConstruction) {
- const Operations operations{BSON("ts" << Timestamp(Seconds(123), 0))};
- auto apply = [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); };
- auto callback = [](const StatusWith<Timestamp>& status, const Operations& operations) { };
-
- // Null executor.
- ASSERT_THROWS_CODE(
- Applier(nullptr, operations, apply, callback),
- UserException,
- ErrorCodes::BadValue);
-
- // Empty list of operations.
- ASSERT_THROWS_CODE(
- Applier(&getExecutor(), {}, apply, callback),
- UserException,
- ErrorCodes::BadValue);
-
- // Last operation missing timestamp field.
- ASSERT_THROWS_CODE(
- Applier(&getExecutor(), {BSONObj()}, apply, callback),
- UserException,
- ErrorCodes::FailedToParse);
-
- // "ts" field in last operation not a timestamp.
- ASSERT_THROWS_CODE(
- Applier(&getExecutor(), {BSON("ts" << 99)}, apply, callback),
- UserException,
- ErrorCodes::TypeMismatch);
-
- // Invalid apply operation function.
- ASSERT_THROWS_CODE(
- Applier(&getExecutor(), operations, Applier::ApplyOperationFn(), callback),
- UserException,
- ErrorCodes::BadValue);
-
- // Invalid callback function.
- ASSERT_THROWS_CODE(
- Applier(&getExecutor(), operations, apply, Applier::CallbackFn()),
- UserException,
- ErrorCodes::BadValue);
- }
-
- TEST_F(ApplierTest, GetDiagnosticString) {
- ASSERT_FALSE(getApplier()->getDiagnosticString().empty());
- }
-
- TEST_F(ApplierTest, IsActiveAfterStart) {
- // Use a barrier to ensure that the callback blocks while
- // we check isActive().
- _barrier.reset(new unittest::Barrier(2U));
- ASSERT_FALSE(getApplier()->isActive());
- ASSERT_OK(getApplier()->start());
- ASSERT_TRUE(getApplier()->isActive());
- _barrier->countDownAndWait();
- }
-
- TEST_F(ApplierTest, StartWhenActive) {
- // Use a barrier to ensure that the callback blocks while
- // we check isActive().
- _barrier.reset(new unittest::Barrier(2U));
- ASSERT_OK(getApplier()->start());
- ASSERT_TRUE(getApplier()->isActive());
- ASSERT_NOT_OK(getApplier()->start());
- ASSERT_TRUE(getApplier()->isActive());
- _barrier->countDownAndWait();
- }
-
- TEST_F(ApplierTest, CancelWithoutStart) {
- ASSERT_FALSE(getApplier()->isActive());
- getApplier()->cancel();
- ASSERT_FALSE(getApplier()->isActive());
- }
-
- TEST_F(ApplierTest, WaitWithoutStart) {
- ASSERT_FALSE(getApplier()->isActive());
- getApplier()->wait();
- ASSERT_FALSE(getApplier()->isActive());
- }
-
- TEST_F(ApplierTest, ShutdownBeforeStart) {
- getExecutor().shutdown();
- ASSERT_NOT_OK(getApplier()->start());
- ASSERT_FALSE(getApplier()->isActive());
- }
-
- TEST_F(ApplierTest, CancelBeforeStartingDBWork) {
- // Schedule a blocking DB work item before the applier to allow us to cancel the applier
- // work item before the executor runs it.
- unittest::Barrier barrier(2U);
- using CallbackData = ReplicationExecutor::CallbackArgs;
- getExecutor().scheduleDBWork([&](const CallbackData& cbd) {
- barrier.countDownAndWait(); // generation 0
- });
- const BSONObj operation = BSON("ts" << Timestamp(Seconds(123), 0));
- stdx::mutex mutex;
- StatusWith<Timestamp> result = getDetectableErrorStatus();
- Applier::Operations operations;
- _applier.reset(new Applier(
- &getExecutor(),
- {operation},
- [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
- [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- result = theResult;
- operations = theOperations;
- }));
-
- getApplier()->start();
- getApplier()->cancel();
- ASSERT_TRUE(getApplier()->isActive());
-
- barrier.countDownAndWait(); // generation 0
-
- getApplier()->wait();
- ASSERT_FALSE(getApplier()->isActive());
-
- stdx::lock_guard<stdx::mutex> lock(mutex);
+using namespace mongo;
+using namespace mongo::repl;
+
+using Operations = Applier::Operations;
+
+class ApplierTest : public ReplicationExecutorTest {
+public:
+ Applier* getApplier() const;
+
+protected:
+ void setUp() override;
+ void tearDown() override;
+
+ /**
+ * Test function to check behavior when we fail to apply one of the operations.
+ */
+ void _testApplyOperationFailed(size_t opIndex, stdx::function<Status()> fail);
+
+ std::unique_ptr<Applier> _applier;
+ std::unique_ptr<unittest::Barrier> _barrier;
+};
+
+void ApplierTest::setUp() {
+ ReplicationExecutorTest::setUp();
+ launchExecutorThread();
+ auto apply = [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); };
+ _applier.reset(new Applier(&getExecutor(),
+ {BSON("ts" << Timestamp(Seconds(123), 0))},
+ apply,
+ [this](const StatusWith<Timestamp>&, const Operations&) {
+ if (_barrier.get()) {
+ _barrier->countDownAndWait();
+ }
+ }));
+}
+
+void ApplierTest::tearDown() {
+ ReplicationExecutorTest::tearDown();
+ _applier.reset();
+ _barrier.reset();
+}
+
+Applier* ApplierTest::getApplier() const {
+ return _applier.get();
+}
+
+TEST_F(ApplierTest, InvalidConstruction) {
+ const Operations operations{BSON("ts" << Timestamp(Seconds(123), 0))};
+ auto apply = [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); };
+ auto callback = [](const StatusWith<Timestamp>& status, const Operations& operations) {};
+
+ // Null executor.
+ ASSERT_THROWS_CODE(
+ Applier(nullptr, operations, apply, callback), UserException, ErrorCodes::BadValue);
+
+ // Empty list of operations.
+ ASSERT_THROWS_CODE(
+ Applier(&getExecutor(), {}, apply, callback), UserException, ErrorCodes::BadValue);
+
+ // Last operation missing timestamp field.
+ ASSERT_THROWS_CODE(Applier(&getExecutor(), {BSONObj()}, apply, callback),
+ UserException,
+ ErrorCodes::FailedToParse);
+
+ // "ts" field in last operation not a timestamp.
+ ASSERT_THROWS_CODE(Applier(&getExecutor(), {BSON("ts" << 99)}, apply, callback),
+ UserException,
+ ErrorCodes::TypeMismatch);
+
+ // Invalid apply operation function.
+ ASSERT_THROWS_CODE(Applier(&getExecutor(), operations, Applier::ApplyOperationFn(), callback),
+ UserException,
+ ErrorCodes::BadValue);
+
+ // Invalid callback function.
+ ASSERT_THROWS_CODE(Applier(&getExecutor(), operations, apply, Applier::CallbackFn()),
+ UserException,
+ ErrorCodes::BadValue);
+}
+
+TEST_F(ApplierTest, GetDiagnosticString) {
+ ASSERT_FALSE(getApplier()->getDiagnosticString().empty());
+}
+
+TEST_F(ApplierTest, IsActiveAfterStart) {
+ // Use a barrier to ensure that the callback blocks while
+ // we check isActive().
+ _barrier.reset(new unittest::Barrier(2U));
+ ASSERT_FALSE(getApplier()->isActive());
+ ASSERT_OK(getApplier()->start());
+ ASSERT_TRUE(getApplier()->isActive());
+ _barrier->countDownAndWait();
+}
+
+TEST_F(ApplierTest, StartWhenActive) {
+ // Use a barrier to ensure that the callback blocks while
+ // we check isActive().
+ _barrier.reset(new unittest::Barrier(2U));
+ ASSERT_OK(getApplier()->start());
+ ASSERT_TRUE(getApplier()->isActive());
+ ASSERT_NOT_OK(getApplier()->start());
+ ASSERT_TRUE(getApplier()->isActive());
+ _barrier->countDownAndWait();
+}
+
+TEST_F(ApplierTest, CancelWithoutStart) {
+ ASSERT_FALSE(getApplier()->isActive());
+ getApplier()->cancel();
+ ASSERT_FALSE(getApplier()->isActive());
+}
+
+TEST_F(ApplierTest, WaitWithoutStart) {
+ ASSERT_FALSE(getApplier()->isActive());
+ getApplier()->wait();
+ ASSERT_FALSE(getApplier()->isActive());
+}
+
+TEST_F(ApplierTest, ShutdownBeforeStart) {
+ getExecutor().shutdown();
+ ASSERT_NOT_OK(getApplier()->start());
+ ASSERT_FALSE(getApplier()->isActive());
+}
+
+TEST_F(ApplierTest, CancelBeforeStartingDBWork) {
+ // Schedule a blocking DB work item before the applier to allow us to cancel the applier
+ // work item before the executor runs it.
+ unittest::Barrier barrier(2U);
+ using CallbackData = ReplicationExecutor::CallbackArgs;
+ getExecutor().scheduleDBWork([&](const CallbackData& cbd) {
+ barrier.countDownAndWait(); // generation 0
+ });
+ const BSONObj operation = BSON("ts" << Timestamp(Seconds(123), 0));
+ stdx::mutex mutex;
+ StatusWith<Timestamp> result = getDetectableErrorStatus();
+ Applier::Operations operations;
+ _applier.reset(
+ new Applier(&getExecutor(),
+ {operation},
+ [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
+ [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ result = theResult;
+ operations = theOperations;
+ }));
+
+ getApplier()->start();
+ getApplier()->cancel();
+ ASSERT_TRUE(getApplier()->isActive());
+
+ barrier.countDownAndWait(); // generation 0
+
+ getApplier()->wait();
+ ASSERT_FALSE(getApplier()->isActive());
+
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, result.getStatus().code());
+ ASSERT_EQUALS(1U, operations.size());
+ ASSERT_EQUALS(operation, operations.front());
+}
+
+TEST_F(ApplierTest, DestroyBeforeStartingDBWork) {
+ // Schedule a blocking DB work item before the applier to allow us to destroy the applier
+ // before the executor runs the work item.
+ unittest::Barrier barrier(2U);
+ using CallbackData = ReplicationExecutor::CallbackArgs;
+ getExecutor().scheduleDBWork([&](const CallbackData& cbd) {
+ barrier.countDownAndWait(); // generation 0
+ // Give the main thread a head start in invoking the applier destructor.
+ sleepmillis(1);
+ });
+ const BSONObj operation = BSON("ts" << Timestamp(Seconds(123), 0));
+ stdx::mutex mutex;
+ StatusWith<Timestamp> result = getDetectableErrorStatus();
+ Applier::Operations operations;
+ _applier.reset(
+ new Applier(&getExecutor(),
+ {operation},
+ [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
+ [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ result = theResult;
+ operations = theOperations;
+ }));
+
+ getApplier()->start();
+ ASSERT_TRUE(getApplier()->isActive());
+
+ barrier.countDownAndWait(); // generation 0
+
+ // It is possible the executor may have invoked the callback before we
+ // destroy the applier. Therefore both OK and CallbackCanceled are acceptable
+ // statuses.
+ _applier.reset();
+
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ if (result.isOK()) {
+ ASSERT_TRUE(operations.empty());
+ } else {
ASSERT_EQUALS(ErrorCodes::CallbackCanceled, result.getStatus().code());
ASSERT_EQUALS(1U, operations.size());
ASSERT_EQUALS(operation, operations.front());
}
-
- TEST_F(ApplierTest, DestroyBeforeStartingDBWork) {
- // Schedule a blocking DB work item before the applier to allow us to destroy the applier
- // before the executor runs the work item.
- unittest::Barrier barrier(2U);
- using CallbackData = ReplicationExecutor::CallbackArgs;
- getExecutor().scheduleDBWork([&](const CallbackData& cbd) {
- barrier.countDownAndWait(); // generation 0
- // Give the main thread a head start in invoking the applier destructor.
- sleepmillis(1);
- });
- const BSONObj operation = BSON("ts" << Timestamp(Seconds(123), 0));
- stdx::mutex mutex;
- StatusWith<Timestamp> result = getDetectableErrorStatus();
- Applier::Operations operations;
- _applier.reset(new Applier(
- &getExecutor(),
- {operation},
- [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
- [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- result = theResult;
- operations = theOperations;
- }));
-
- getApplier()->start();
- ASSERT_TRUE(getApplier()->isActive());
-
- barrier.countDownAndWait(); // generation 0
-
- // It is possible the executor may have invoked the callback before we
- // destroy the applier. Therefore both OK and CallbackCanceled are acceptable
- // statuses.
- _applier.reset();
-
- stdx::lock_guard<stdx::mutex> lock(mutex);
- if (result.isOK()) {
- ASSERT_TRUE(operations.empty());
- }
- else {
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, result.getStatus().code());
- ASSERT_EQUALS(1U, operations.size());
- ASSERT_EQUALS(operation, operations.front());
- }
- }
-
- TEST_F(ApplierTest, WaitForCompletion) {
- const Timestamp timestamp(Seconds(123), 0);
- stdx::mutex mutex;
- StatusWith<Timestamp> result = getDetectableErrorStatus();
- Applier::Operations operations;
- _applier.reset(new Applier(
- &getExecutor(),
- {BSON("ts" << timestamp)},
- [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
- [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- result = theResult;
- operations = theOperations;
- }));
-
- getApplier()->start();
- getApplier()->wait();
- ASSERT_FALSE(getApplier()->isActive());
-
- stdx::lock_guard<stdx::mutex> lock(mutex);
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS(timestamp, result.getValue());
- ASSERT_TRUE(operations.empty());
- }
-
- TEST_F(ApplierTest, DestroyShouldBlockUntilInactive) {
- const Timestamp timestamp(Seconds(123), 0);
- unittest::Barrier barrier(2U);
- stdx::mutex mutex;
- StatusWith<Timestamp> result = getDetectableErrorStatus();
- Applier::Operations operations;
- _applier.reset(new Applier(
- &getExecutor(),
- {BSON("ts" << timestamp)},
- [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
- [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- result = theResult;
- operations = theOperations;
- barrier.countDownAndWait();
- }));
-
- getApplier()->start();
- barrier.countDownAndWait();
- _applier.reset();
-
+}
+
+TEST_F(ApplierTest, WaitForCompletion) {
+ const Timestamp timestamp(Seconds(123), 0);
+ stdx::mutex mutex;
+ StatusWith<Timestamp> result = getDetectableErrorStatus();
+ Applier::Operations operations;
+ _applier.reset(
+ new Applier(&getExecutor(),
+ {BSON("ts" << timestamp)},
+ [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
+ [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ result = theResult;
+ operations = theOperations;
+ }));
+
+ getApplier()->start();
+ getApplier()->wait();
+ ASSERT_FALSE(getApplier()->isActive());
+
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS(timestamp, result.getValue());
+ ASSERT_TRUE(operations.empty());
+}
+
+TEST_F(ApplierTest, DestroyShouldBlockUntilInactive) {
+ const Timestamp timestamp(Seconds(123), 0);
+ unittest::Barrier barrier(2U);
+ stdx::mutex mutex;
+ StatusWith<Timestamp> result = getDetectableErrorStatus();
+ Applier::Operations operations;
+ _applier.reset(
+ new Applier(&getExecutor(),
+ {BSON("ts" << timestamp)},
+ [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
+ [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ result = theResult;
+ operations = theOperations;
+ barrier.countDownAndWait();
+ }));
+
+ getApplier()->start();
+ barrier.countDownAndWait();
+ _applier.reset();
+
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS(timestamp, result.getValue());
+ ASSERT_TRUE(operations.empty());
+}
+
+TEST_F(ApplierTest, ApplyOperationSuccessful) {
+ // Bogus operations codes.
+ Applier::Operations operationsToApply{
+ BSON("op"
+ << "a"
+ << "ts" << Timestamp(Seconds(123), 0)),
+ BSON("op"
+ << "b"
+ << "ts" << Timestamp(Seconds(456), 0)),
+ BSON("op"
+ << "c"
+ << "ts" << Timestamp(Seconds(789), 0)),
+ };
+ stdx::mutex mutex;
+ StatusWith<Timestamp> result = getDetectableErrorStatus();
+ bool areWritesReplicationOnOperationContext = true;
+ bool isLockBatchWriter = false;
+ Applier::Operations operationsApplied;
+ Applier::Operations operationsOnCompletion;
+ auto apply = [&](OperationContext* txn, const BSONObj& operation) {
stdx::lock_guard<stdx::mutex> lock(mutex);
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS(timestamp, result.getValue());
- ASSERT_TRUE(operations.empty());
- }
-
- TEST_F(ApplierTest, ApplyOperationSuccessful) {
- // Bogus operations codes.
- Applier::Operations operationsToApply{
- BSON("op" << "a" << "ts" << Timestamp(Seconds(123), 0)),
- BSON("op" << "b" << "ts" << Timestamp(Seconds(456), 0)),
- BSON("op" << "c" << "ts" << Timestamp(Seconds(789), 0)),
- };
- stdx::mutex mutex;
- StatusWith<Timestamp> result = getDetectableErrorStatus();
- bool areWritesReplicationOnOperationContext = true;
- bool isLockBatchWriter = false;
- Applier::Operations operationsApplied;
- Applier::Operations operationsOnCompletion;
- auto apply = [&](OperationContext* txn, const BSONObj& operation) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- areWritesReplicationOnOperationContext = txn->writesAreReplicated();
- isLockBatchWriter = txn->lockState()->isBatchWriter();
- operationsApplied.push_back(operation);
- return Status::OK();
- };
- auto callback = [&](const StatusWith<Timestamp>& theResult,
- const Operations& theOperations) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- result = theResult;
- operationsOnCompletion = theOperations;
- };
-
- _applier.reset(new Applier(&getExecutor(), operationsToApply, apply, callback));
- _applier->start();
- _applier->wait();
-
+ areWritesReplicationOnOperationContext = txn->writesAreReplicated();
+ isLockBatchWriter = txn->lockState()->isBatchWriter();
+ operationsApplied.push_back(operation);
+ return Status::OK();
+ };
+ auto callback = [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
stdx::lock_guard<stdx::mutex> lock(mutex);
- ASSERT_FALSE(areWritesReplicationOnOperationContext);
- ASSERT_TRUE(isLockBatchWriter);
- ASSERT_EQUALS(operationsToApply.size(), operationsApplied.size());
- ASSERT_EQUALS(operationsToApply[0], operationsApplied[0]);
- ASSERT_EQUALS(operationsToApply[1], operationsApplied[1]);
- ASSERT_EQUALS(operationsToApply[2], operationsApplied[2]);
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS(operationsToApply[2]["ts"].timestamp(), result.getValue());
- ASSERT_TRUE(operationsOnCompletion.empty());
- }
-
- void ApplierTest::_testApplyOperationFailed(size_t opIndex, stdx::function<Status ()> fail) {
- // Bogus operations codes.
- Applier::Operations operationsToApply{
- BSON("op" << "a" << "ts" << Timestamp(Seconds(123), 0)),
- BSON("op" << "b" << "ts" << Timestamp(Seconds(456), 0)),
- BSON("op" << "c" << "ts" << Timestamp(Seconds(789), 0)),
- };
- stdx::mutex mutex;
- StatusWith<Timestamp> result = getDetectableErrorStatus();
- Applier::Operations operationsApplied;
- Applier::Operations operationsOnCompletion;
- auto apply = [&](OperationContext* txn, const BSONObj& operation) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- if (operationsApplied.size() == opIndex) {
- return fail();
- }
- operationsApplied.push_back(operation);
- return Status::OK();
- };
- auto callback = [&](const StatusWith<Timestamp>& theResult,
- const Operations& theOperations) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- result = theResult;
- operationsOnCompletion = theOperations;
- };
-
- _applier.reset(new Applier(&getExecutor(), operationsToApply, apply, callback));
- _applier->start();
- _applier->wait();
+ result = theResult;
+ operationsOnCompletion = theOperations;
+ };
+ _applier.reset(new Applier(&getExecutor(), operationsToApply, apply, callback));
+ _applier->start();
+ _applier->wait();
+
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ ASSERT_FALSE(areWritesReplicationOnOperationContext);
+ ASSERT_TRUE(isLockBatchWriter);
+ ASSERT_EQUALS(operationsToApply.size(), operationsApplied.size());
+ ASSERT_EQUALS(operationsToApply[0], operationsApplied[0]);
+ ASSERT_EQUALS(operationsToApply[1], operationsApplied[1]);
+ ASSERT_EQUALS(operationsToApply[2], operationsApplied[2]);
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS(operationsToApply[2]["ts"].timestamp(), result.getValue());
+ ASSERT_TRUE(operationsOnCompletion.empty());
+}
+
+void ApplierTest::_testApplyOperationFailed(size_t opIndex, stdx::function<Status()> fail) {
+ // Bogus operations codes.
+ Applier::Operations operationsToApply{
+ BSON("op"
+ << "a"
+ << "ts" << Timestamp(Seconds(123), 0)),
+ BSON("op"
+ << "b"
+ << "ts" << Timestamp(Seconds(456), 0)),
+ BSON("op"
+ << "c"
+ << "ts" << Timestamp(Seconds(789), 0)),
+ };
+ stdx::mutex mutex;
+ StatusWith<Timestamp> result = getDetectableErrorStatus();
+ Applier::Operations operationsApplied;
+ Applier::Operations operationsOnCompletion;
+ auto apply = [&](OperationContext* txn, const BSONObj& operation) {
stdx::lock_guard<stdx::mutex> lock(mutex);
- ASSERT_EQUALS(opIndex, operationsApplied.size());
- size_t i = 0;
- for (const auto& operation : operationsApplied) {
- ASSERT_EQUALS(operationsToApply[i], operation);
- i++;
+ if (operationsApplied.size() == opIndex) {
+ return fail();
}
- ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
- ASSERT_EQUALS(operationsToApply.size() - opIndex, operationsOnCompletion.size());
- ASSERT_EQUALS(opIndex, i);
- for (const auto& operation : operationsOnCompletion) {
- ASSERT_EQUALS(operationsToApply[i], operation);
- i++;
- }
- }
-
- TEST_F(ApplierTest, ApplyOperationFailedOnFirstOperation) {
- _testApplyOperationFailed(0U, []() {
- return Status(ErrorCodes::OperationFailed, "");
- });
- }
-
- TEST_F(ApplierTest, ApplyOperationThrowsExceptionOnFirstOperation) {
- _testApplyOperationFailed(0U, []() {
- uasserted(ErrorCodes::OperationFailed, "");
- MONGO_UNREACHABLE;
- return Status(ErrorCodes::InternalError, "unreachable");
- });
- }
-
- TEST_F(ApplierTest, ApplyOperationFailedOnSecondOperation) {
- _testApplyOperationFailed(1U, []() {
- return Status(ErrorCodes::OperationFailed, "");
- });
- }
-
- TEST_F(ApplierTest, ApplyOperationThrowsExceptionOnSecondOperation) {
- _testApplyOperationFailed(1U, []() {
- uasserted(ErrorCodes::OperationFailed, "");
- MONGO_UNREACHABLE;
- return Status(ErrorCodes::InternalError, "unreachable");
- });
- }
-
- TEST_F(ApplierTest, ApplyOperationFailedOnLastOperation) {
- _testApplyOperationFailed(2U, []() {
- return Status(ErrorCodes::OperationFailed, "");
- });
- }
-
- TEST_F(ApplierTest, ApplyOperationThrowsExceptionOnLastOperation) {
- _testApplyOperationFailed(2U, []() {
- uasserted(ErrorCodes::OperationFailed, "");
- MONGO_UNREACHABLE;
- return Status(ErrorCodes::InternalError, "unreachable");
- });
- }
+ operationsApplied.push_back(operation);
+ return Status::OK();
+ };
+ auto callback = [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ result = theResult;
+ operationsOnCompletion = theOperations;
+ };
- class ApplyUntilAndPauseTest : public ApplierTest {};
-
- TEST_F(ApplyUntilAndPauseTest, EmptyOperations) {
- auto result =
- applyUntilAndPause(
- &getExecutor(),
- {},
- [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
- Timestamp(Seconds(123), 0),
- [] {},
- [](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {});
- ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
- }
+ _applier.reset(new Applier(&getExecutor(), operationsToApply, apply, callback));
+ _applier->start();
+ _applier->wait();
- TEST_F(ApplyUntilAndPauseTest, NoOperationsInRange) {
- auto result =
- applyUntilAndPause(
- &getExecutor(),
- {
- BSON("ts" << Timestamp(Seconds(456), 0)),
- BSON("ts" << Timestamp(Seconds(789), 0)),
- },
- [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
- Timestamp(Seconds(123), 0),
- [] {},
- [](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {});
- ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ ASSERT_EQUALS(opIndex, operationsApplied.size());
+ size_t i = 0;
+ for (const auto& operation : operationsApplied) {
+ ASSERT_EQUALS(operationsToApply[i], operation);
+ i++;
}
-
- TEST_F(ApplyUntilAndPauseTest, OperationMissingTimestampField) {
- auto result =
- applyUntilAndPause(
- &getExecutor(),
- {BSONObj()},
- [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
- Timestamp(Seconds(123), 0),
- [] {},
- [](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {});
- ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
+ ASSERT_EQUALS(operationsToApply.size() - opIndex, operationsOnCompletion.size());
+ ASSERT_EQUALS(opIndex, i);
+ for (const auto& operation : operationsOnCompletion) {
+ ASSERT_EQUALS(operationsToApply[i], operation);
+ i++;
}
+}
+
+TEST_F(ApplierTest, ApplyOperationFailedOnFirstOperation) {
+ _testApplyOperationFailed(0U, []() { return Status(ErrorCodes::OperationFailed, ""); });
+}
+
+TEST_F(ApplierTest, ApplyOperationThrowsExceptionOnFirstOperation) {
+ _testApplyOperationFailed(0U,
+ []() {
+ uasserted(ErrorCodes::OperationFailed, "");
+ MONGO_UNREACHABLE;
+ return Status(ErrorCodes::InternalError, "unreachable");
+ });
+}
+
+TEST_F(ApplierTest, ApplyOperationFailedOnSecondOperation) {
+ _testApplyOperationFailed(1U, []() { return Status(ErrorCodes::OperationFailed, ""); });
+}
+
+TEST_F(ApplierTest, ApplyOperationThrowsExceptionOnSecondOperation) {
+ _testApplyOperationFailed(1U,
+ []() {
+ uasserted(ErrorCodes::OperationFailed, "");
+ MONGO_UNREACHABLE;
+ return Status(ErrorCodes::InternalError, "unreachable");
+ });
+}
+
+TEST_F(ApplierTest, ApplyOperationFailedOnLastOperation) {
+ _testApplyOperationFailed(2U, []() { return Status(ErrorCodes::OperationFailed, ""); });
+}
+
+TEST_F(ApplierTest, ApplyOperationThrowsExceptionOnLastOperation) {
+ _testApplyOperationFailed(2U,
+ []() {
+ uasserted(ErrorCodes::OperationFailed, "");
+ MONGO_UNREACHABLE;
+ return Status(ErrorCodes::InternalError, "unreachable");
+ });
+}
+
+class ApplyUntilAndPauseTest : public ApplierTest {};
+
+TEST_F(ApplyUntilAndPauseTest, EmptyOperations) {
+ auto result = applyUntilAndPause(
+ &getExecutor(),
+ {},
+ [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
+ Timestamp(Seconds(123), 0),
+ [] {},
+ [](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {});
+ ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+}
+
+TEST_F(ApplyUntilAndPauseTest, NoOperationsInRange) {
+ auto result = applyUntilAndPause(
+ &getExecutor(),
+ {
+ BSON("ts" << Timestamp(Seconds(456), 0)), BSON("ts" << Timestamp(Seconds(789), 0)),
+ },
+ [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
+ Timestamp(Seconds(123), 0),
+ [] {},
+ [](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {});
+ ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+}
+
+TEST_F(ApplyUntilAndPauseTest, OperationMissingTimestampField) {
+ auto result = applyUntilAndPause(
+ &getExecutor(),
+ {BSONObj()},
+ [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); },
+ Timestamp(Seconds(123), 0),
+ [] {},
+ [](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {});
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+}
+
+TEST_F(ApplyUntilAndPauseTest, ApplyUntilAndPauseSingleOperation) {
+ Timestamp ts(Seconds(123), 0);
+ const Operations operationsToApply{BSON("ts" << ts)};
+ stdx::mutex mutex;
+ StatusWith<Timestamp> completionResult = getDetectableErrorStatus();
+ bool pauseCalled = false;
+ Applier::Operations operationsOnCompletion;
+ auto apply = [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); };
+ auto pause = [&] {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ pauseCalled = true;
+ };
+ auto callback = [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ completionResult = theResult;
+ operationsOnCompletion = theOperations;
+ };
- TEST_F(ApplyUntilAndPauseTest, ApplyUntilAndPauseSingleOperation) {
- Timestamp ts(Seconds(123), 0);
- const Operations operationsToApply{BSON("ts" << ts)};
- stdx::mutex mutex;
- StatusWith<Timestamp> completionResult = getDetectableErrorStatus();
- bool pauseCalled = false;
- Applier::Operations operationsOnCompletion;
- auto apply = [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); };
- auto pause = [&] {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- pauseCalled = true;
- };
- auto callback = [&](const StatusWith<Timestamp>& theResult,
- const Operations& theOperations) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- completionResult = theResult;
- operationsOnCompletion = theOperations;
- };
-
- auto result =
- applyUntilAndPause(&getExecutor(), operationsToApply, apply, ts, pause, callback);
- ASSERT_OK(result.getStatus());
- _applier = std::move(result.getValue().first);
- ASSERT_TRUE(_applier);
-
- const Applier::Operations& operationsDiscarded = result.getValue().second;
- ASSERT_TRUE(operationsDiscarded.empty());
-
- _applier->start();
- _applier->wait();
-
+ auto result = applyUntilAndPause(&getExecutor(), operationsToApply, apply, ts, pause, callback);
+ ASSERT_OK(result.getStatus());
+ _applier = std::move(result.getValue().first);
+ ASSERT_TRUE(_applier);
+
+ const Applier::Operations& operationsDiscarded = result.getValue().second;
+ ASSERT_TRUE(operationsDiscarded.empty());
+
+ _applier->start();
+ _applier->wait();
+
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ ASSERT_TRUE(pauseCalled);
+ ASSERT_OK(completionResult.getStatus());
+ ASSERT_EQUALS(ts, completionResult.getValue());
+ ASSERT_TRUE(operationsOnCompletion.empty());
+}
+
+TEST_F(ApplyUntilAndPauseTest, ApplyUntilAndPauseSingleOperationTimestampNotInOperations) {
+ Timestamp ts(Seconds(123), 0);
+ const Operations operationsToApply{BSON("ts" << ts)};
+ stdx::mutex mutex;
+ StatusWith<Timestamp> completionResult = getDetectableErrorStatus();
+ bool pauseCalled = false;
+ Applier::Operations operationsOnCompletion;
+ auto apply = [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); };
+ auto pause = [&] {
stdx::lock_guard<stdx::mutex> lock(mutex);
- ASSERT_TRUE(pauseCalled);
- ASSERT_OK(completionResult.getStatus());
- ASSERT_EQUALS(ts, completionResult.getValue());
- ASSERT_TRUE(operationsOnCompletion.empty());
- }
+ pauseCalled = true;
+ };
+ auto callback = [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ completionResult = theResult;
+ operationsOnCompletion = theOperations;
+ };
- TEST_F(ApplyUntilAndPauseTest, ApplyUntilAndPauseSingleOperationTimestampNotInOperations) {
- Timestamp ts(Seconds(123), 0);
- const Operations operationsToApply{BSON("ts" << ts)};
- stdx::mutex mutex;
- StatusWith<Timestamp> completionResult = getDetectableErrorStatus();
- bool pauseCalled = false;
- Applier::Operations operationsOnCompletion;
- auto apply = [](OperationContext* txn, const BSONObj& operation) { return Status::OK(); };
- auto pause = [&] {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- pauseCalled = true;
- };
- auto callback = [&](const StatusWith<Timestamp>& theResult,
- const Operations& theOperations) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- completionResult = theResult;
- operationsOnCompletion = theOperations;
- };
-
- Timestamp ts2(Seconds(456), 0);
- auto result =
- applyUntilAndPause(&getExecutor(), operationsToApply, apply, ts2, pause, callback);
- ASSERT_OK(result.getStatus());
- _applier = std::move(result.getValue().first);
- ASSERT_TRUE(_applier);
-
- const Applier::Operations& operationsDiscarded = result.getValue().second;
- ASSERT_TRUE(operationsDiscarded.empty());
-
- _applier->start();
- _applier->wait();
+ Timestamp ts2(Seconds(456), 0);
+ auto result =
+ applyUntilAndPause(&getExecutor(), operationsToApply, apply, ts2, pause, callback);
+ ASSERT_OK(result.getStatus());
+ _applier = std::move(result.getValue().first);
+ ASSERT_TRUE(_applier);
+
+ const Applier::Operations& operationsDiscarded = result.getValue().second;
+ ASSERT_TRUE(operationsDiscarded.empty());
+
+ _applier->start();
+ _applier->wait();
+
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ ASSERT_FALSE(pauseCalled);
+ ASSERT_OK(completionResult.getStatus());
+ ASSERT_EQUALS(ts, completionResult.getValue());
+ ASSERT_TRUE(operationsOnCompletion.empty());
+}
+
+TEST_F(ApplyUntilAndPauseTest, ApplyUntilAndPauseSingleOperationAppliedFailed) {
+ Timestamp ts(Seconds(123), 0);
+ const Operations operationsToApply{BSON("ts" << ts)};
+ stdx::mutex mutex;
+ StatusWith<Timestamp> completionResult = getDetectableErrorStatus();
+ bool pauseCalled = false;
+ Applier::Operations operationsOnCompletion;
+ auto apply = [](OperationContext* txn, const BSONObj& operation) {
+ return Status(ErrorCodes::OperationFailed, "");
+ };
+ auto pause = [&] {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ pauseCalled = true;
+ };
+ auto callback = [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ completionResult = theResult;
+ operationsOnCompletion = theOperations;
+ };
+ auto result = applyUntilAndPause(&getExecutor(), operationsToApply, apply, ts, pause, callback);
+ ASSERT_OK(result.getStatus());
+ _applier = std::move(result.getValue().first);
+ ASSERT_TRUE(_applier);
+
+ const Applier::Operations& operationsDiscarded = result.getValue().second;
+ ASSERT_TRUE(operationsDiscarded.empty());
+
+ _applier->start();
+ _applier->wait();
+
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ ASSERT_FALSE(pauseCalled);
+ ASSERT_NOT_OK(completionResult.getStatus());
+ ASSERT_FALSE(operationsOnCompletion.empty());
+}
+
+void _testApplyUntilAndPauseDiscardOperations(ReplicationExecutor* executor,
+ const Timestamp& ts,
+ bool expectedPauseCalled) {
+ Applier::Operations operationsToApply{
+ BSON("op"
+ << "a"
+ << "ts" << Timestamp(Seconds(123), 0)),
+ BSON("op"
+ << "b"
+ << "ts" << Timestamp(Seconds(456), 0)),
+ BSON("op"
+ << "c"
+ << "ts" << Timestamp(Seconds(789), 0)),
+ };
+ stdx::mutex mutex;
+ StatusWith<Timestamp> completionResult = ApplyUntilAndPauseTest::getDetectableErrorStatus();
+ bool pauseCalled = false;
+ Applier::Operations operationsApplied;
+ Applier::Operations operationsOnCompletion;
+ auto apply = [&](OperationContext* txn, const BSONObj& operation) {
stdx::lock_guard<stdx::mutex> lock(mutex);
- ASSERT_FALSE(pauseCalled);
- ASSERT_OK(completionResult.getStatus());
- ASSERT_EQUALS(ts, completionResult.getValue());
- ASSERT_TRUE(operationsOnCompletion.empty());
- }
+ operationsApplied.push_back(operation);
+ return Status::OK();
+ };
+ auto pause = [&] {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ pauseCalled = true;
+ };
+ auto callback = [&](const StatusWith<Timestamp>& theResult, const Operations& theOperations) {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ completionResult = theResult;
+ operationsOnCompletion = theOperations;
+ };
- TEST_F(ApplyUntilAndPauseTest, ApplyUntilAndPauseSingleOperationAppliedFailed) {
- Timestamp ts(Seconds(123), 0);
- const Operations operationsToApply{BSON("ts" << ts)};
- stdx::mutex mutex;
- StatusWith<Timestamp> completionResult = getDetectableErrorStatus();
- bool pauseCalled = false;
- Applier::Operations operationsOnCompletion;
- auto apply = [](OperationContext* txn, const BSONObj& operation) {
- return Status(ErrorCodes::OperationFailed, "");
- };
- auto pause = [&] {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- pauseCalled = true;
- };
- auto callback = [&](const StatusWith<Timestamp>& theResult,
- const Operations& theOperations) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- completionResult = theResult;
- operationsOnCompletion = theOperations;
- };
-
- auto result =
- applyUntilAndPause(&getExecutor(), operationsToApply, apply, ts, pause, callback);
- ASSERT_OK(result.getStatus());
- _applier = std::move(result.getValue().first);
- ASSERT_TRUE(_applier);
-
- const Applier::Operations& operationsDiscarded = result.getValue().second;
- ASSERT_TRUE(operationsDiscarded.empty());
-
- _applier->start();
- _applier->wait();
+ auto result = applyUntilAndPause(executor, operationsToApply, apply, ts, pause, callback);
+ ASSERT_OK(result.getStatus());
+ ASSERT_TRUE(result.getValue().first);
+ Applier& applier = *result.getValue().first;
- stdx::lock_guard<stdx::mutex> lock(mutex);
- ASSERT_FALSE(pauseCalled);
- ASSERT_NOT_OK(completionResult.getStatus());
- ASSERT_FALSE(operationsOnCompletion.empty());
- }
+ const Applier::Operations& operationsDiscarded = result.getValue().second;
+ ASSERT_EQUALS(1U, operationsDiscarded.size());
+ ASSERT_EQUALS(operationsToApply[2], operationsDiscarded[0]);
- void _testApplyUntilAndPauseDiscardOperations(ReplicationExecutor* executor,
- const Timestamp& ts,
- bool expectedPauseCalled) {
-
- Applier::Operations operationsToApply{
- BSON("op" << "a" << "ts" << Timestamp(Seconds(123), 0)),
- BSON("op" << "b" << "ts" << Timestamp(Seconds(456), 0)),
- BSON("op" << "c" << "ts" << Timestamp(Seconds(789), 0)),
- };
- stdx::mutex mutex;
- StatusWith<Timestamp> completionResult =
- ApplyUntilAndPauseTest::getDetectableErrorStatus();
- bool pauseCalled = false;
- Applier::Operations operationsApplied;
- Applier::Operations operationsOnCompletion;
- auto apply = [&](OperationContext* txn, const BSONObj& operation) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- operationsApplied.push_back(operation);
- return Status::OK();
- };
- auto pause = [&] {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- pauseCalled = true;
- };
- auto callback = [&](const StatusWith<Timestamp>& theResult,
- const Operations& theOperations) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- completionResult = theResult;
- operationsOnCompletion = theOperations;
- };
-
- auto result =
- applyUntilAndPause(executor, operationsToApply, apply, ts, pause, callback);
- ASSERT_OK(result.getStatus());
- ASSERT_TRUE(result.getValue().first);
- Applier& applier = *result.getValue().first;
-
- const Applier::Operations& operationsDiscarded = result.getValue().second;
- ASSERT_EQUALS(1U, operationsDiscarded.size());
- ASSERT_EQUALS(operationsToApply[2], operationsDiscarded[0]);
-
- applier.start();
- applier.wait();
+ applier.start();
+ applier.wait();
- stdx::lock_guard<stdx::mutex> lock(mutex);
- ASSERT_EQUALS(2U, operationsApplied.size());
- ASSERT_EQUALS(operationsToApply[0], operationsApplied[0]);
- ASSERT_EQUALS(operationsToApply[1], operationsApplied[1]);
- ASSERT_EQUALS(expectedPauseCalled, pauseCalled);
- ASSERT_OK(completionResult.getStatus());
- ASSERT_TRUE(operationsOnCompletion.empty());
- }
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ ASSERT_EQUALS(2U, operationsApplied.size());
+ ASSERT_EQUALS(operationsToApply[0], operationsApplied[0]);
+ ASSERT_EQUALS(operationsToApply[1], operationsApplied[1]);
+ ASSERT_EQUALS(expectedPauseCalled, pauseCalled);
+ ASSERT_OK(completionResult.getStatus());
+ ASSERT_TRUE(operationsOnCompletion.empty());
+}
- TEST_F(ApplyUntilAndPauseTest, ApplyUntilAndPauseDiscardOperationsTimestampInOperations) {
- _testApplyUntilAndPauseDiscardOperations(&getExecutor(),
- Timestamp(Seconds(456), 0),
- true);
- }
+TEST_F(ApplyUntilAndPauseTest, ApplyUntilAndPauseDiscardOperationsTimestampInOperations) {
+ _testApplyUntilAndPauseDiscardOperations(&getExecutor(), Timestamp(Seconds(456), 0), true);
+}
- TEST_F(ApplyUntilAndPauseTest, ApplyUntilAndPauseDiscardOperationsTimestampNotInOperations) {
- _testApplyUntilAndPauseDiscardOperations(&getExecutor(),
- Timestamp(Seconds(500), 0),
- false);
- }
+TEST_F(ApplyUntilAndPauseTest, ApplyUntilAndPauseDiscardOperationsTimestampNotInOperations) {
+ _testApplyUntilAndPauseDiscardOperations(&getExecutor(), Timestamp(Seconds(500), 0), false);
+}
-} // namespace
+} // namespace
diff --git a/src/mongo/db/repl/base_cloner.h b/src/mongo/db/repl/base_cloner.h
index 8d6b8be8928..c6de09f8d31 100644
--- a/src/mongo/db/repl/base_cloner.h
+++ b/src/mongo/db/repl/base_cloner.h
@@ -34,51 +34,49 @@
namespace mongo {
namespace repl {
+/**
+ * Used by cloner test fixture to centralize life cycle testing.
+ *
+ * Life cycle interface for collection and database cloners.
+ */
+class BaseCloner {
+public:
/**
- * Used by cloner test fixture to centralize life cycle testing.
- *
- * Life cycle interface for collection and database cloners.
+ * Callback function to report final status of cloning.
*/
- class BaseCloner {
- public:
-
- /**
- * Callback function to report final status of cloning.
- */
- using CallbackFn = stdx::function<void (const Status&)>;
+ using CallbackFn = stdx::function<void(const Status&)>;
- virtual ~BaseCloner() { }
+ virtual ~BaseCloner() {}
- /**
- * Returns diagnostic information.
- */
- virtual std::string getDiagnosticString() const = 0;
-
- /**
- * Returns true if the cloner has been started (but has not completed).
- */
- virtual bool isActive() const = 0;
+ /**
+ * Returns diagnostic information.
+ */
+ virtual std::string getDiagnosticString() const = 0;
- /**
- * Starts cloning by scheduling initial command to be run by the executor.
- */
- virtual Status start() = 0;
+ /**
+ * Returns true if the cloner has been started (but has not completed).
+ */
+ virtual bool isActive() const = 0;
- /**
- * Cancels current remote command request.
- * Returns immediately if cloner is not active.
- *
- * Callback function may be invoked with an ErrorCodes::CallbackCanceled status.
- */
- virtual void cancel() = 0;
+ /**
+ * Starts cloning by scheduling initial command to be run by the executor.
+ */
+ virtual Status start() = 0;
- /**
- * Waits for active remote commands and database worker to complete.
- * Returns immediately if cloner is not active.
- */
- virtual void wait() = 0;
+ /**
+ * Cancels current remote command request.
+ * Returns immediately if cloner is not active.
+ *
+ * Callback function may be invoked with an ErrorCodes::CallbackCanceled status.
+ */
+ virtual void cancel() = 0;
- };
+ /**
+ * Waits for active remote commands and database worker to complete.
+ * Returns immediately if cloner is not active.
+ */
+ virtual void wait() = 0;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.cpp b/src/mongo/db/repl/base_cloner_test_fixture.cpp
index a8df9deb6ba..2f6ca23ab63 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.cpp
+++ b/src/mongo/db/repl/base_cloner_test_fixture.cpp
@@ -38,224 +38,215 @@
namespace mongo {
namespace repl {
- const HostAndPort BaseClonerTest::target("localhost", -1);
- const NamespaceString BaseClonerTest::nss("db.coll");
- const BSONObj BaseClonerTest::idIndexSpec =
- BSON("v" << 1 << "key" << BSON("_id" << 1) << "name" << "_id_" << "ns" << nss.ns());
-
- // static
- BSONObj BaseClonerTest::createCursorResponse(CursorId cursorId,
- const std::string& ns,
- const BSONArray& docs,
- const char* batchFieldName) {
- return BSON("cursor" << BSON("id" << cursorId <<
- "ns" << ns <<
- batchFieldName << docs) <<
- "ok" << 1);
- }
-
- // static
- BSONObj BaseClonerTest::createCursorResponse(CursorId cursorId,
- const BSONArray& docs,
- const char* batchFieldName) {
- return createCursorResponse(cursorId, nss.toString(), docs, batchFieldName);
- }
-
- // static
- BSONObj BaseClonerTest::createCursorResponse(CursorId cursorId,
- const BSONArray& docs) {
- return createCursorResponse(cursorId, docs, "firstBatch");
- }
-
- // static
- BSONObj BaseClonerTest::createListCollectionsResponse(CursorId cursorId,
- const BSONArray& colls,
- const char* fieldName) {
- return createCursorResponse(cursorId, "test.$cmd.listCollections.coll", colls, fieldName);
- }
-
- // static
- BSONObj BaseClonerTest::createListCollectionsResponse(CursorId cursorId,
- const BSONArray& colls) {
- return createListCollectionsResponse(cursorId, colls, "firstBatch");
- }
-
- // static
- BSONObj BaseClonerTest::createListIndexesResponse(CursorId cursorId,
- const BSONArray& specs,
- const char* batchFieldName) {
- return createCursorResponse(cursorId, "test.$cmd.listIndexes.coll", specs, batchFieldName);
- }
-
- // static
- BSONObj BaseClonerTest::createListIndexesResponse(CursorId cursorId,
- const BSONArray& specs) {
- return createListIndexesResponse(cursorId, specs, "firstBatch");
- }
-
- BaseClonerTest::BaseClonerTest()
- : _mutex(),
- _setStatusCondition(),
- _status(getDetectableErrorStatus()) { }
-
- void BaseClonerTest::setUp() {
- ReplicationExecutorTest::setUp();
- clear();
- launchExecutorThread();
- storageInterface.reset(new ClonerStorageInterfaceMock());
- }
-
- void BaseClonerTest::tearDown() {
- ReplicationExecutorTest::tearDown();
- storageInterface.reset();
- }
-
- void BaseClonerTest::clear() {
- _status = getDetectableErrorStatus();
- }
-
- void BaseClonerTest::setStatus(const Status& status) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- _status = status;
- _setStatusCondition.notify_all();
- }
-
- const Status& BaseClonerTest::getStatus() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- return _status;
- }
-
- void BaseClonerTest::scheduleNetworkResponse(NetworkOperationIterator noi,
- const BSONObj& obj) {
- auto net = getNet();
- Milliseconds millis(0);
- RemoteCommandResponse response(obj, millis);
- ReplicationExecutor::ResponseStatus responseStatus(response);
- net->scheduleResponse(noi, net->now(), responseStatus);
- }
-
- void BaseClonerTest::scheduleNetworkResponse(NetworkOperationIterator noi,
- ErrorCodes::Error code,
- const std::string& reason) {
- auto net = getNet();
- ReplicationExecutor::ResponseStatus responseStatus(code, reason);
- net->scheduleResponse(noi, net->now(), responseStatus);
- }
-
- void BaseClonerTest::scheduleNetworkResponse(const BSONObj& obj) {
- ASSERT_TRUE(getNet()->hasReadyRequests());
- scheduleNetworkResponse(getNet()->getNextReadyRequest(), obj);
- }
-
- void BaseClonerTest::scheduleNetworkResponse(ErrorCodes::Error code,
- const std::string& reason) {
- ASSERT_TRUE(getNet()->hasReadyRequests());
- scheduleNetworkResponse(getNet()->getNextReadyRequest(), code, reason);
- }
-
- void BaseClonerTest::processNetworkResponse(const BSONObj& obj) {
- scheduleNetworkResponse(obj);
- finishProcessingNetworkResponse();
- }
-
- void BaseClonerTest::processNetworkResponse(ErrorCodes::Error code,
- const std::string& reason) {
- scheduleNetworkResponse(code, reason);
- finishProcessingNetworkResponse();
- }
-
- void BaseClonerTest::finishProcessingNetworkResponse() {
- clear();
- getNet()->runReadyNetworkOperations();
- }
-
- void BaseClonerTest::testLifeCycle() {
- // GetDiagnosticString
- ASSERT_FALSE(getCloner()->getDiagnosticString().empty());
-
- // IsActiveAfterStart
- ASSERT_FALSE(getCloner()->isActive());
- ASSERT_OK(getCloner()->start());
- ASSERT_TRUE(getCloner()->isActive());
- tearDown();
-
- // StartWhenActive
- setUp();
- ASSERT_OK(getCloner()->start());
- ASSERT_TRUE(getCloner()->isActive());
- ASSERT_NOT_OK(getCloner()->start());
- ASSERT_TRUE(getCloner()->isActive());
- tearDown();
-
- // CancelWithoutStart
- setUp();
- ASSERT_FALSE(getCloner()->isActive());
- getCloner()->cancel();
- ASSERT_FALSE(getCloner()->isActive());
- tearDown();
-
- // WaitWithoutStart
- setUp();
- ASSERT_FALSE(getCloner()->isActive());
- getCloner()->wait();
- ASSERT_FALSE(getCloner()->isActive());
- tearDown();
-
- // ShutdownBeforeStart
- setUp();
- getExecutor().shutdown();
- ASSERT_NOT_OK(getCloner()->start());
- ASSERT_FALSE(getCloner()->isActive());
- tearDown();
-
- // StartAndCancel
- setUp();
- ASSERT_OK(getCloner()->start());
- scheduleNetworkResponse(BSON("ok" << 1));
- getCloner()->cancel();
- finishProcessingNetworkResponse();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, getStatus().code());
- ASSERT_FALSE(getCloner()->isActive());
- tearDown();
-
- // StartButShutdown
- setUp();
- ASSERT_OK(getCloner()->start());
- scheduleNetworkResponse(BSON("ok" << 1));
- getExecutor().shutdown();
- // Network interface should not deliver mock response to callback.
- finishProcessingNetworkResponse();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, getStatus().code());
- ASSERT_FALSE(getCloner()->isActive());
- }
-
- Status ClonerStorageInterfaceMock::beginCollection(OperationContext* txn,
- const NamespaceString& nss,
- const CollectionOptions& options,
- const std::vector<BSONObj>& specs) {
- return beginCollectionFn ? beginCollectionFn(txn, nss, options, specs) : Status::OK();
- }
-
- Status ClonerStorageInterfaceMock::insertDocuments(OperationContext* txn,
- const NamespaceString& nss,
- const std::vector<BSONObj>& docs) {
- return insertDocumentsFn ? insertDocumentsFn(txn, nss, docs) : Status::OK();
- }
-
- Status ClonerStorageInterfaceMock::commitCollection(OperationContext* txn,
- const NamespaceString& nss) {
- return Status::OK();
- }
-
- Status ClonerStorageInterfaceMock::insertMissingDoc(OperationContext* txn,
- const NamespaceString& nss,
- const BSONObj& doc) {
- return Status::OK();
- }
-
- Status ClonerStorageInterfaceMock::dropUserDatabases(OperationContext* txn) {
- return dropUserDatabasesFn ? dropUserDatabasesFn(txn) : Status::OK();
- }
-
-} // namespace repl
-} // namespace mongo
+const HostAndPort BaseClonerTest::target("localhost", -1);
+const NamespaceString BaseClonerTest::nss("db.coll");
+const BSONObj BaseClonerTest::idIndexSpec = BSON("v" << 1 << "key" << BSON("_id" << 1) << "name"
+ << "_id_"
+ << "ns" << nss.ns());
+
+// static
+BSONObj BaseClonerTest::createCursorResponse(CursorId cursorId,
+ const std::string& ns,
+ const BSONArray& docs,
+ const char* batchFieldName) {
+ return BSON("cursor" << BSON("id" << cursorId << "ns" << ns << batchFieldName << docs) << "ok"
+ << 1);
+}
+
+// static
+BSONObj BaseClonerTest::createCursorResponse(CursorId cursorId,
+ const BSONArray& docs,
+ const char* batchFieldName) {
+ return createCursorResponse(cursorId, nss.toString(), docs, batchFieldName);
+}
+
+// static
+BSONObj BaseClonerTest::createCursorResponse(CursorId cursorId, const BSONArray& docs) {
+ return createCursorResponse(cursorId, docs, "firstBatch");
+}
+
+// static
+BSONObj BaseClonerTest::createListCollectionsResponse(CursorId cursorId,
+ const BSONArray& colls,
+ const char* fieldName) {
+ return createCursorResponse(cursorId, "test.$cmd.listCollections.coll", colls, fieldName);
+}
+
+// static
+BSONObj BaseClonerTest::createListCollectionsResponse(CursorId cursorId, const BSONArray& colls) {
+ return createListCollectionsResponse(cursorId, colls, "firstBatch");
+}
+
+// static
+BSONObj BaseClonerTest::createListIndexesResponse(CursorId cursorId,
+ const BSONArray& specs,
+ const char* batchFieldName) {
+ return createCursorResponse(cursorId, "test.$cmd.listIndexes.coll", specs, batchFieldName);
+}
+
+// static
+BSONObj BaseClonerTest::createListIndexesResponse(CursorId cursorId, const BSONArray& specs) {
+ return createListIndexesResponse(cursorId, specs, "firstBatch");
+}
+
+BaseClonerTest::BaseClonerTest()
+ : _mutex(), _setStatusCondition(), _status(getDetectableErrorStatus()) {}
+
+void BaseClonerTest::setUp() {
+ ReplicationExecutorTest::setUp();
+ clear();
+ launchExecutorThread();
+ storageInterface.reset(new ClonerStorageInterfaceMock());
+}
+
+void BaseClonerTest::tearDown() {
+ ReplicationExecutorTest::tearDown();
+ storageInterface.reset();
+}
+
+void BaseClonerTest::clear() {
+ _status = getDetectableErrorStatus();
+}
+
+void BaseClonerTest::setStatus(const Status& status) {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ _status = status;
+ _setStatusCondition.notify_all();
+}
+
+const Status& BaseClonerTest::getStatus() const {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ return _status;
+}
+
+void BaseClonerTest::scheduleNetworkResponse(NetworkOperationIterator noi, const BSONObj& obj) {
+ auto net = getNet();
+ Milliseconds millis(0);
+ RemoteCommandResponse response(obj, millis);
+ ReplicationExecutor::ResponseStatus responseStatus(response);
+ net->scheduleResponse(noi, net->now(), responseStatus);
+}
+
+void BaseClonerTest::scheduleNetworkResponse(NetworkOperationIterator noi,
+ ErrorCodes::Error code,
+ const std::string& reason) {
+ auto net = getNet();
+ ReplicationExecutor::ResponseStatus responseStatus(code, reason);
+ net->scheduleResponse(noi, net->now(), responseStatus);
+}
+
+void BaseClonerTest::scheduleNetworkResponse(const BSONObj& obj) {
+ ASSERT_TRUE(getNet()->hasReadyRequests());
+ scheduleNetworkResponse(getNet()->getNextReadyRequest(), obj);
+}
+
+void BaseClonerTest::scheduleNetworkResponse(ErrorCodes::Error code, const std::string& reason) {
+ ASSERT_TRUE(getNet()->hasReadyRequests());
+ scheduleNetworkResponse(getNet()->getNextReadyRequest(), code, reason);
+}
+
+void BaseClonerTest::processNetworkResponse(const BSONObj& obj) {
+ scheduleNetworkResponse(obj);
+ finishProcessingNetworkResponse();
+}
+
+void BaseClonerTest::processNetworkResponse(ErrorCodes::Error code, const std::string& reason) {
+ scheduleNetworkResponse(code, reason);
+ finishProcessingNetworkResponse();
+}
+
+void BaseClonerTest::finishProcessingNetworkResponse() {
+ clear();
+ getNet()->runReadyNetworkOperations();
+}
+
+void BaseClonerTest::testLifeCycle() {
+ // GetDiagnosticString
+ ASSERT_FALSE(getCloner()->getDiagnosticString().empty());
+
+ // IsActiveAfterStart
+ ASSERT_FALSE(getCloner()->isActive());
+ ASSERT_OK(getCloner()->start());
+ ASSERT_TRUE(getCloner()->isActive());
+ tearDown();
+
+ // StartWhenActive
+ setUp();
+ ASSERT_OK(getCloner()->start());
+ ASSERT_TRUE(getCloner()->isActive());
+ ASSERT_NOT_OK(getCloner()->start());
+ ASSERT_TRUE(getCloner()->isActive());
+ tearDown();
+
+ // CancelWithoutStart
+ setUp();
+ ASSERT_FALSE(getCloner()->isActive());
+ getCloner()->cancel();
+ ASSERT_FALSE(getCloner()->isActive());
+ tearDown();
+
+ // WaitWithoutStart
+ setUp();
+ ASSERT_FALSE(getCloner()->isActive());
+ getCloner()->wait();
+ ASSERT_FALSE(getCloner()->isActive());
+ tearDown();
+
+ // ShutdownBeforeStart
+ setUp();
+ getExecutor().shutdown();
+ ASSERT_NOT_OK(getCloner()->start());
+ ASSERT_FALSE(getCloner()->isActive());
+ tearDown();
+
+ // StartAndCancel
+ setUp();
+ ASSERT_OK(getCloner()->start());
+ scheduleNetworkResponse(BSON("ok" << 1));
+ getCloner()->cancel();
+ finishProcessingNetworkResponse();
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, getStatus().code());
+ ASSERT_FALSE(getCloner()->isActive());
+ tearDown();
+
+ // StartButShutdown
+ setUp();
+ ASSERT_OK(getCloner()->start());
+ scheduleNetworkResponse(BSON("ok" << 1));
+ getExecutor().shutdown();
+ // Network interface should not deliver mock response to callback.
+ finishProcessingNetworkResponse();
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, getStatus().code());
+ ASSERT_FALSE(getCloner()->isActive());
+}
+
+Status ClonerStorageInterfaceMock::beginCollection(OperationContext* txn,
+ const NamespaceString& nss,
+ const CollectionOptions& options,
+ const std::vector<BSONObj>& specs) {
+ return beginCollectionFn ? beginCollectionFn(txn, nss, options, specs) : Status::OK();
+}
+
+Status ClonerStorageInterfaceMock::insertDocuments(OperationContext* txn,
+ const NamespaceString& nss,
+ const std::vector<BSONObj>& docs) {
+ return insertDocumentsFn ? insertDocumentsFn(txn, nss, docs) : Status::OK();
+}
+
+Status ClonerStorageInterfaceMock::commitCollection(OperationContext* txn,
+ const NamespaceString& nss) {
+ return Status::OK();
+}
+
+Status ClonerStorageInterfaceMock::insertMissingDoc(OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& doc) {
+ return Status::OK();
+}
+
+Status ClonerStorageInterfaceMock::dropUserDatabases(OperationContext* txn) {
+ return dropUserDatabasesFn ? dropUserDatabasesFn(txn) : Status::OK();
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.h b/src/mongo/db/repl/base_cloner_test_fixture.h
index a7e5f68e448..cab5c517916 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.h
+++ b/src/mongo/db/repl/base_cloner_test_fixture.h
@@ -44,133 +44,126 @@
namespace mongo {
- struct CollectionOptions;
- class OperationContext;
+struct CollectionOptions;
+class OperationContext;
namespace repl {
- class BaseCloner;
- class ClonerStorageInterfaceMock;
-
- class BaseClonerTest : public ReplicationExecutorTest {
- public:
- typedef executor::NetworkInterfaceMock::NetworkOperationIterator NetworkOperationIterator;
-
- /**
- * Creates a cursor response with given array of documents.
- */
- static BSONObj createCursorResponse(CursorId cursorId,
- const std::string& ns,
- const BSONArray& docs,
- const char* batchFieldName);
-
- static BSONObj createCursorResponse(CursorId cursorId,
- const BSONArray& docs,
- const char* batchFieldName);
-
- static BSONObj createCursorResponse(CursorId cursorId,
- const BSONArray& docs);
-
- /**
- * Creates a listCollections response with given array of index specs.
- */
- static BSONObj createListCollectionsResponse(CursorId cursorId,
- const BSONArray& colls,
- const char* batchFieldName);
-
- static BSONObj createListCollectionsResponse(CursorId cursorId, const BSONArray& colls);
-
- /**
- * Creates a listIndexes response with given array of index specs.
- */
- static BSONObj createListIndexesResponse(CursorId cursorId,
- const BSONArray& specs,
- const char* batchFieldName);
+class BaseCloner;
+class ClonerStorageInterfaceMock;
+
+class BaseClonerTest : public ReplicationExecutorTest {
+public:
+ typedef executor::NetworkInterfaceMock::NetworkOperationIterator NetworkOperationIterator;
+
+ /**
+ * Creates a cursor response with given array of documents.
+ */
+ static BSONObj createCursorResponse(CursorId cursorId,
+ const std::string& ns,
+ const BSONArray& docs,
+ const char* batchFieldName);
- static BSONObj createListIndexesResponse(CursorId cursorId, const BSONArray& specs);
+ static BSONObj createCursorResponse(CursorId cursorId,
+ const BSONArray& docs,
+ const char* batchFieldName);
+
+ static BSONObj createCursorResponse(CursorId cursorId, const BSONArray& docs);
+
+ /**
+ * Creates a listCollections response with given array of index specs.
+ */
+ static BSONObj createListCollectionsResponse(CursorId cursorId,
+ const BSONArray& colls,
+ const char* batchFieldName);
- static const HostAndPort target;
- static const NamespaceString nss;
- static const BSONObj idIndexSpec;
+ static BSONObj createListCollectionsResponse(CursorId cursorId, const BSONArray& colls);
- BaseClonerTest();
+ /**
+ * Creates a listIndexes response with given array of index specs.
+ */
+ static BSONObj createListIndexesResponse(CursorId cursorId,
+ const BSONArray& specs,
+ const char* batchFieldName);
- virtual void clear();
+ static BSONObj createListIndexesResponse(CursorId cursorId, const BSONArray& specs);
- void setStatus(const Status& status);
- const Status& getStatus() const;
+ static const HostAndPort target;
+ static const NamespaceString nss;
+ static const BSONObj idIndexSpec;
- void scheduleNetworkResponse(NetworkOperationIterator noi,
- const BSONObj& obj);
- void scheduleNetworkResponse(NetworkOperationIterator noi,
- ErrorCodes::Error code, const std::string& reason);
- void scheduleNetworkResponse(const BSONObj& obj);
- void scheduleNetworkResponse(ErrorCodes::Error code, const std::string& reason);
- void processNetworkResponse(const BSONObj& obj);
- void processNetworkResponse(ErrorCodes::Error code, const std::string& reason);
- void finishProcessingNetworkResponse();
+ BaseClonerTest();
- /**
- * Tests life cycle functionality.
- */
- virtual BaseCloner* getCloner() const = 0;
- void testLifeCycle();
+ virtual void clear();
- protected:
+ void setStatus(const Status& status);
+ const Status& getStatus() const;
- void setUp() override;
- void tearDown() override;
+ void scheduleNetworkResponse(NetworkOperationIterator noi, const BSONObj& obj);
+ void scheduleNetworkResponse(NetworkOperationIterator noi,
+ ErrorCodes::Error code,
+ const std::string& reason);
+ void scheduleNetworkResponse(const BSONObj& obj);
+ void scheduleNetworkResponse(ErrorCodes::Error code, const std::string& reason);
+ void processNetworkResponse(const BSONObj& obj);
+ void processNetworkResponse(ErrorCodes::Error code, const std::string& reason);
+ void finishProcessingNetworkResponse();
- std::unique_ptr<ClonerStorageInterfaceMock> storageInterface;
+ /**
+ * Tests life cycle functionality.
+ */
+ virtual BaseCloner* getCloner() const = 0;
+ void testLifeCycle();
- private:
+protected:
+ void setUp() override;
+ void tearDown() override;
- // Protects member data of this base cloner fixture.
- mutable stdx::mutex _mutex;
+ std::unique_ptr<ClonerStorageInterfaceMock> storageInterface;
- stdx::condition_variable _setStatusCondition;
+private:
+ // Protects member data of this base cloner fixture.
+ mutable stdx::mutex _mutex;
- Status _status;
+ stdx::condition_variable _setStatusCondition;
- };
+ Status _status;
+};
- class ClonerStorageInterfaceMock : public CollectionCloner::StorageInterface {
- public:
- using InsertCollectionFn = stdx::function<Status (OperationContext*,
- const NamespaceString&,
- const std::vector<BSONObj>&)>;
- using BeginCollectionFn = stdx::function<Status (OperationContext*,
- const NamespaceString&,
- const CollectionOptions&,
- const std::vector<BSONObj>&)>;
- using InsertMissingDocFn = stdx::function<Status (OperationContext*,
- const NamespaceString&,
- const BSONObj&)>;
- using DropUserDatabases = stdx::function<Status (OperationContext*)>;
+class ClonerStorageInterfaceMock : public CollectionCloner::StorageInterface {
+public:
+ using InsertCollectionFn = stdx::function<Status(
+ OperationContext*, const NamespaceString&, const std::vector<BSONObj>&)>;
+ using BeginCollectionFn = stdx::function<Status(OperationContext*,
+ const NamespaceString&,
+ const CollectionOptions&,
+ const std::vector<BSONObj>&)>;
+ using InsertMissingDocFn =
+ stdx::function<Status(OperationContext*, const NamespaceString&, const BSONObj&)>;
+ using DropUserDatabases = stdx::function<Status(OperationContext*)>;
- Status beginCollection(OperationContext* txn,
- const NamespaceString& nss,
- const CollectionOptions& options,
- const std::vector<BSONObj>& specs) override;
+ Status beginCollection(OperationContext* txn,
+ const NamespaceString& nss,
+ const CollectionOptions& options,
+ const std::vector<BSONObj>& specs) override;
- Status insertDocuments(OperationContext* txn,
- const NamespaceString& nss,
- const std::vector<BSONObj>& docs) override;
+ Status insertDocuments(OperationContext* txn,
+ const NamespaceString& nss,
+ const std::vector<BSONObj>& docs) override;
- Status commitCollection(OperationContext* txn,
- const NamespaceString& nss) override;
+ Status commitCollection(OperationContext* txn, const NamespaceString& nss) override;
- Status insertMissingDoc(OperationContext* txn,
- const NamespaceString& nss,
- const BSONObj& doc) override;
+ Status insertMissingDoc(OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& doc) override;
- Status dropUserDatabases(OperationContext* txn);
+ Status dropUserDatabases(OperationContext* txn);
- BeginCollectionFn beginCollectionFn;
- InsertCollectionFn insertDocumentsFn;
- InsertMissingDocFn insertMissingDocFn;
- DropUserDatabases dropUserDatabasesFn;
- };
+ BeginCollectionFn beginCollectionFn;
+ InsertCollectionFn insertDocumentsFn;
+ InsertMissingDocFn insertMissingDocFn;
+ DropUserDatabases dropUserDatabasesFn;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 5317ab36305..5c84a724b94 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -55,523 +55,510 @@
namespace mongo {
- using std::string;
+using std::string;
namespace repl {
namespace {
- const char hashFieldName[] = "h";
- int SleepToAllowBatchingMillis = 2;
- const int BatchIsSmallish = 40000; // bytes
-} // namespace
-
- MONGO_FP_DECLARE(rsBgSyncProduce);
-
- BackgroundSync* BackgroundSync::s_instance = 0;
- stdx::mutex BackgroundSync::s_mutex;
-
- //The number and time spent reading batches off the network
- static TimerStats getmoreReplStats;
- static ServerStatusMetricField<TimerStats> displayBatchesRecieved(
- "repl.network.getmores",
- &getmoreReplStats );
- //The oplog entries read via the oplog reader
- static Counter64 opsReadStats;
- static ServerStatusMetricField<Counter64> displayOpsRead( "repl.network.ops",
- &opsReadStats );
- //The bytes read via the oplog reader
- static Counter64 networkByteStats;
- static ServerStatusMetricField<Counter64> displayBytesRead( "repl.network.bytes",
- &networkByteStats );
-
- //The count of items in the buffer
- static Counter64 bufferCountGauge;
- static ServerStatusMetricField<Counter64> displayBufferCount( "repl.buffer.count",
- &bufferCountGauge );
- //The size (bytes) of items in the buffer
- static Counter64 bufferSizeGauge;
- static ServerStatusMetricField<Counter64> displayBufferSize( "repl.buffer.sizeBytes",
- &bufferSizeGauge );
- //The max size (bytes) of the buffer
- static int bufferMaxSizeGauge = 256*1024*1024;
- static ServerStatusMetricField<int> displayBufferMaxSize( "repl.buffer.maxSizeBytes",
- &bufferMaxSizeGauge );
-
-
- BackgroundSyncInterface::~BackgroundSyncInterface() {}
-
- size_t getSize(const BSONObj& o) {
- // SERVER-9808 Avoid Fortify complaint about implicit signed->unsigned conversion
- return static_cast<size_t>(o.objsize());
+const char hashFieldName[] = "h";
+int SleepToAllowBatchingMillis = 2;
+const int BatchIsSmallish = 40000; // bytes
+} // namespace
+
+MONGO_FP_DECLARE(rsBgSyncProduce);
+
+BackgroundSync* BackgroundSync::s_instance = 0;
+stdx::mutex BackgroundSync::s_mutex;
+
+// The number and time spent reading batches off the network
+static TimerStats getmoreReplStats;
+static ServerStatusMetricField<TimerStats> displayBatchesRecieved("repl.network.getmores",
+ &getmoreReplStats);
+// The oplog entries read via the oplog reader
+static Counter64 opsReadStats;
+static ServerStatusMetricField<Counter64> displayOpsRead("repl.network.ops", &opsReadStats);
+// The bytes read via the oplog reader
+static Counter64 networkByteStats;
+static ServerStatusMetricField<Counter64> displayBytesRead("repl.network.bytes", &networkByteStats);
+
+// The count of items in the buffer
+static Counter64 bufferCountGauge;
+static ServerStatusMetricField<Counter64> displayBufferCount("repl.buffer.count",
+ &bufferCountGauge);
+// The size (bytes) of items in the buffer
+static Counter64 bufferSizeGauge;
+static ServerStatusMetricField<Counter64> displayBufferSize("repl.buffer.sizeBytes",
+ &bufferSizeGauge);
+// The max size (bytes) of the buffer
+static int bufferMaxSizeGauge = 256 * 1024 * 1024;
+static ServerStatusMetricField<int> displayBufferMaxSize("repl.buffer.maxSizeBytes",
+ &bufferMaxSizeGauge);
+
+
+BackgroundSyncInterface::~BackgroundSyncInterface() {}
+
+size_t getSize(const BSONObj& o) {
+ // SERVER-9808 Avoid Fortify complaint about implicit signed->unsigned conversion
+ return static_cast<size_t>(o.objsize());
+}
+
+BackgroundSync::BackgroundSync()
+ : _buffer(bufferMaxSizeGauge, &getSize),
+ _lastOpTimeFetched(Timestamp(std::numeric_limits<int>::max(), 0),
+ std::numeric_limits<long long>::max()),
+ _lastAppliedHash(0),
+ _lastFetchedHash(0),
+ _pause(true),
+ _appliedBuffer(true),
+ _replCoord(getGlobalReplicationCoordinator()),
+ _initialSyncRequestedFlag(false),
+ _indexPrefetchConfig(PREFETCH_ALL) {}
+
+BackgroundSync* BackgroundSync::get() {
+ stdx::unique_lock<stdx::mutex> lock(s_mutex);
+ if (s_instance == NULL && !inShutdown()) {
+ s_instance = new BackgroundSync();
}
+ return s_instance;
+}
- BackgroundSync::BackgroundSync() : _buffer(bufferMaxSizeGauge, &getSize),
- _lastOpTimeFetched(
- Timestamp(std::numeric_limits<int>::max(), 0),
- std::numeric_limits<long long>::max()),
- _lastAppliedHash(0),
- _lastFetchedHash(0),
- _pause(true),
- _appliedBuffer(true),
- _replCoord(getGlobalReplicationCoordinator()),
- _initialSyncRequestedFlag(false),
- _indexPrefetchConfig(PREFETCH_ALL) {
- }
+void BackgroundSync::shutdown() {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
- BackgroundSync* BackgroundSync::get() {
- stdx::unique_lock<stdx::mutex> lock(s_mutex);
- if (s_instance == NULL && !inShutdown()) {
- s_instance = new BackgroundSync();
- }
- return s_instance;
- }
+ // Clear the buffer in case the producerThread is waiting in push() due to a full queue.
+ invariant(inShutdown());
+ _buffer.clear();
+ _pause = true;
- void BackgroundSync::shutdown() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+ // Wake up producerThread so it notices that we're in shutdown
+ _appliedBufferCondition.notify_all();
+ _pausedCondition.notify_all();
+}
- // Clear the buffer in case the producerThread is waiting in push() due to a full queue.
- invariant(inShutdown());
- _buffer.clear();
- _pause = true;
+void BackgroundSync::notify(OperationContext* txn) {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
- // Wake up producerThread so it notices that we're in shutdown
+ // If all ops in the buffer have been applied, unblock waitForRepl (if it's waiting)
+ if (_buffer.empty()) {
+ _appliedBuffer = true;
_appliedBufferCondition.notify_all();
- _pausedCondition.notify_all();
}
+}
- void BackgroundSync::notify(OperationContext* txn) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
+void BackgroundSync::producerThread() {
+ Client::initThread("rsBackgroundSync");
+ AuthorizationSession::get(cc())->grantInternalAuthorization();
- // If all ops in the buffer have been applied, unblock waitForRepl (if it's waiting)
- if (_buffer.empty()) {
- _appliedBuffer = true;
- _appliedBufferCondition.notify_all();
+ while (!inShutdown()) {
+ try {
+ _producerThread();
+ } catch (const DBException& e) {
+ std::string msg(str::stream() << "sync producer problem: " << e.toString());
+ error() << msg;
+ _replCoord->setMyHeartbeatMessage(msg);
+ } catch (const std::exception& e2) {
+ severe() << "sync producer exception: " << e2.what();
+ fassertFailed(28546);
}
}
+}
- void BackgroundSync::producerThread() {
- Client::initThread("rsBackgroundSync");
- AuthorizationSession::get(cc())->grantInternalAuthorization();
-
- while (!inShutdown()) {
- try {
- _producerThread();
- }
- catch (const DBException& e) {
- std::string msg(str::stream() << "sync producer problem: " << e.toString());
- error() << msg;
- _replCoord->setMyHeartbeatMessage(msg);
- }
- catch (const std::exception& e2) {
- severe() << "sync producer exception: " << e2.what();
- fassertFailed(28546);
- }
+void BackgroundSync::_producerThread() {
+ const MemberState state = _replCoord->getMemberState();
+ // we want to pause when the state changes to primary
+ if (_replCoord->isWaitingForApplierToDrain() || state.primary()) {
+ if (!_pause) {
+ stop();
}
+ sleepsecs(1);
+ return;
}
- void BackgroundSync::_producerThread() {
- const MemberState state = _replCoord->getMemberState();
- // we want to pause when the state changes to primary
- if (_replCoord->isWaitingForApplierToDrain() || state.primary()) {
- if (!_pause) {
- stop();
- }
- sleepsecs(1);
- return;
- }
-
- // TODO(spencer): Use a condition variable to await loading a config.
- if (state.startup()) {
- // Wait for a config to be loaded
- sleepsecs(1);
- return;
- }
-
- // We need to wait until initial sync has started.
- if (_replCoord->getMyLastOptime().isNull()) {
- sleepsecs(1);
- return;
- }
- // we want to unpause when we're no longer primary
- // start() also loads _lastOpTimeFetched, which we know is set from the "if"
- OperationContextImpl txn;
- if (_pause) {
- start(&txn);
- }
+ // TODO(spencer): Use a condition variable to await loading a config.
+ if (state.startup()) {
+ // Wait for a config to be loaded
+ sleepsecs(1);
+ return;
+ }
- produce(&txn);
+ // We need to wait until initial sync has started.
+ if (_replCoord->getMyLastOptime().isNull()) {
+ sleepsecs(1);
+ return;
+ }
+ // we want to unpause when we're no longer primary
+ // start() also loads _lastOpTimeFetched, which we know is set from the "if"
+ OperationContextImpl txn;
+ if (_pause) {
+ start(&txn);
}
- void BackgroundSync::produce(OperationContext* txn) {
- // this oplog reader does not do a handshake because we don't want the server it's syncing
- // from to track how far it has synced
- {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- if (_lastOpTimeFetched.isNull()) {
- // then we're initial syncing and we're still waiting for this to be set
- lock.unlock();
- sleepsecs(1);
- // if there is no one to sync from
- return;
- }
+ produce(&txn);
+}
- // Wait until we've applied the ops we have before we choose a sync target
- while (!_appliedBuffer && !inShutdownStrict()) {
- _appliedBufferCondition.wait(lock);
- }
- if (inShutdownStrict()) {
- return;
- }
+void BackgroundSync::produce(OperationContext* txn) {
+ // this oplog reader does not do a handshake because we don't want the server it's syncing
+ // from to track how far it has synced
+ {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ if (_lastOpTimeFetched.isNull()) {
+ // then we're initial syncing and we're still waiting for this to be set
+ lock.unlock();
+ sleepsecs(1);
+ // if there is no one to sync from
+ return;
}
- while (MONGO_FAIL_POINT(rsBgSyncProduce)) {
- sleepmillis(0);
+ // Wait until we've applied the ops we have before we choose a sync target
+ while (!_appliedBuffer && !inShutdownStrict()) {
+ _appliedBufferCondition.wait(lock);
}
-
-
- // find a target to sync from the last optime fetched
- OpTime lastOpTimeFetched;
- {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- lastOpTimeFetched = _lastOpTimeFetched;
- _syncSourceHost = HostAndPort();
+ if (inShutdownStrict()) {
+ return;
}
- _syncSourceReader.resetConnection();
- _syncSourceReader.connectToSyncSource(txn, lastOpTimeFetched, _replCoord);
+ }
- {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- // no server found
- if (_syncSourceReader.getHost().empty()) {
- lock.unlock();
- sleepsecs(1);
- // if there is no one to sync from
- return;
- }
- lastOpTimeFetched = _lastOpTimeFetched;
- _syncSourceHost = _syncSourceReader.getHost();
- _replCoord->signalUpstreamUpdater();
- }
+ while (MONGO_FAIL_POINT(rsBgSyncProduce)) {
+ sleepmillis(0);
+ }
- _syncSourceReader.tailingQueryGTE(rsOplogName.c_str(), lastOpTimeFetched.getTimestamp());
- // if target cut connections between connecting and querying (for
- // example, because it stepped down) we might not have a cursor
- if (!_syncSourceReader.haveCursor()) {
- return;
- }
+ // find a target to sync from the last optime fetched
+ OpTime lastOpTimeFetched;
+ {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ lastOpTimeFetched = _lastOpTimeFetched;
+ _syncSourceHost = HostAndPort();
+ }
+ _syncSourceReader.resetConnection();
+ _syncSourceReader.connectToSyncSource(txn, lastOpTimeFetched, _replCoord);
- if (_rollbackIfNeeded(txn, _syncSourceReader)) {
- stop();
+ {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ // no server found
+ if (_syncSourceReader.getHost().empty()) {
+ lock.unlock();
+ sleepsecs(1);
+ // if there is no one to sync from
return;
}
+ lastOpTimeFetched = _lastOpTimeFetched;
+ _syncSourceHost = _syncSourceReader.getHost();
+ _replCoord->signalUpstreamUpdater();
+ }
- while (!inShutdown()) {
- if (!_syncSourceReader.moreInCurrentBatch()) {
- // Check some things periodically
- // (whenever we run out of items in the
- // current cursor batch)
-
- int bs = _syncSourceReader.currentBatchMessageSize();
- if( bs > 0 && bs < BatchIsSmallish ) {
- // on a very low latency network, if we don't wait a little, we'll be
- // getting ops to write almost one at a time. this will both be expensive
- // for the upstream server as well as potentially defeating our parallel
- // application of batches on the secondary.
- //
- // the inference here is basically if the batch is really small, we are
- // "caught up".
- //
- sleepmillis(SleepToAllowBatchingMillis);
- }
-
- // If we are transitioning to primary state, we need to leave
- // this loop in order to go into bgsync-pause mode.
- if (_replCoord->isWaitingForApplierToDrain() ||
- _replCoord->getMemberState().primary()) {
- return;
- }
-
- // re-evaluate quality of sync target
- if (shouldChangeSyncSource()) {
- return;
- }
+ _syncSourceReader.tailingQueryGTE(rsOplogName.c_str(), lastOpTimeFetched.getTimestamp());
- {
- //record time for each getmore
- TimerHolder batchTimer(&getmoreReplStats);
-
- // This calls receiveMore() on the oplogreader cursor.
- // It can wait up to five seconds for more data.
- _syncSourceReader.more();
- }
- networkByteStats.increment(_syncSourceReader.currentBatchMessageSize());
-
- if (!_syncSourceReader.moreInCurrentBatch()) {
- // If there is still no data from upstream, check a few more things
- // and then loop back for another pass at getting more data
- {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- if (_pause) {
- return;
- }
- }
+ // if target cut connections between connecting and querying (for
+ // example, because it stepped down) we might not have a cursor
+ if (!_syncSourceReader.haveCursor()) {
+ return;
+ }
- _syncSourceReader.tailCheck();
- if( !_syncSourceReader.haveCursor() ) {
- LOG(1) << "replSet end syncTail pass";
- return;
- }
+ if (_rollbackIfNeeded(txn, _syncSourceReader)) {
+ stop();
+ return;
+ }
- continue;
- }
+ while (!inShutdown()) {
+ if (!_syncSourceReader.moreInCurrentBatch()) {
+ // Check some things periodically
+ // (whenever we run out of items in the
+ // current cursor batch)
+
+ int bs = _syncSourceReader.currentBatchMessageSize();
+ if (bs > 0 && bs < BatchIsSmallish) {
+ // on a very low latency network, if we don't wait a little, we'll be
+ // getting ops to write almost one at a time. this will both be expensive
+ // for the upstream server as well as potentially defeating our parallel
+ // application of batches on the secondary.
+ //
+ // the inference here is basically if the batch is really small, we are
+ // "caught up".
+ //
+ sleepmillis(SleepToAllowBatchingMillis);
}
// If we are transitioning to primary state, we need to leave
// this loop in order to go into bgsync-pause mode.
if (_replCoord->isWaitingForApplierToDrain() ||
_replCoord->getMemberState().primary()) {
- LOG(1) << "waiting for draining or we are primary, not adding more ops to buffer";
return;
}
- // At this point, we are guaranteed to have at least one thing to read out
- // of the oplogreader cursor.
- BSONObj o = _syncSourceReader.nextSafe().getOwned();
- opsReadStats.increment();
+ // re-evaluate quality of sync target
+ if (shouldChangeSyncSource()) {
+ return;
+ }
{
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- _appliedBuffer = false;
- }
+ // record time for each getmore
+ TimerHolder batchTimer(&getmoreReplStats);
- OCCASIONALLY {
- LOG(2) << "bgsync buffer has " << _buffer.size() << " bytes";
+ // This calls receiveMore() on the oplogreader cursor.
+ // It can wait up to five seconds for more data.
+ _syncSourceReader.more();
}
+ networkByteStats.increment(_syncSourceReader.currentBatchMessageSize());
- bufferCountGauge.increment();
- bufferSizeGauge.increment(getSize(o));
- _buffer.push(o);
+ if (!_syncSourceReader.moreInCurrentBatch()) {
+ // If there is still no data from upstream, check a few more things
+ // and then loop back for another pass at getting more data
+ {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ if (_pause) {
+ return;
+ }
+ }
- {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- _lastFetchedHash = o["h"].numberLong();
- _lastOpTimeFetched = extractOpTime(o);
- LOG(3) << "lastOpTimeFetched: " << _lastOpTimeFetched;
+ _syncSourceReader.tailCheck();
+ if (!_syncSourceReader.haveCursor()) {
+ LOG(1) << "replSet end syncTail pass";
+ return;
+ }
+
+ continue;
}
}
- }
- bool BackgroundSync::shouldChangeSyncSource() {
- // is it even still around?
- if (getSyncTarget().empty() || _syncSourceReader.getHost().empty()) {
- return true;
+ // If we are transitioning to primary state, we need to leave
+ // this loop in order to go into bgsync-pause mode.
+ if (_replCoord->isWaitingForApplierToDrain() || _replCoord->getMemberState().primary()) {
+ LOG(1) << "waiting for draining or we are primary, not adding more ops to buffer";
+ return;
}
- // check other members: is any member's optime more than MaxSyncSourceLag seconds
- // ahead of the current sync source?
- return _replCoord->shouldChangeSyncSource(_syncSourceReader.getHost());
- }
-
-
- bool BackgroundSync::peek(BSONObj* op) {
- return _buffer.peek(*op);
- }
-
- void BackgroundSync::waitForMore() {
- BSONObj op;
- // Block for one second before timing out.
- // Ignore the value of the op we peeked at.
- _buffer.blockingPeek(op, 1);
- }
-
- void BackgroundSync::consume() {
- // this is just to get the op off the queue, it's been peeked at
- // and queued for application already
- BSONObj op = _buffer.blockingPop();
- bufferCountGauge.decrement(1);
- bufferSizeGauge.decrement(getSize(op));
- }
+ // At this point, we are guaranteed to have at least one thing to read out
+ // of the oplogreader cursor.
+ BSONObj o = _syncSourceReader.nextSafe().getOwned();
+ opsReadStats.increment();
- bool BackgroundSync::_rollbackIfNeeded(OperationContext* txn, OplogReader& r) {
- string hn = r.conn()->getServerAddress();
-
- // Abort only when syncRollback detects we are in a unrecoverable state.
- // In other cases, we log the message contained in the error status and retry later.
- auto fassertRollbackStatusNoTrace = [](int msgid, const Status& status) {
- if (status.isOK()) {
- return;
- }
- if (ErrorCodes::UnrecoverableRollbackError == status.code()) {
- fassertNoTrace(msgid, status);
- }
- warning() << "rollback cannot proceed at this time (retrying later): "
- << status;
- };
-
- if (!r.more()) {
- try {
- BSONObj theirLastOp = r.getLastOp(rsOplogName.c_str());
- if (theirLastOp.isEmpty()) {
- error() << "empty query result from " << hn << " oplog";
- sleepsecs(2);
- return true;
- }
- OpTime theirOpTime = extractOpTime(theirLastOp);
- if (theirOpTime < _lastOpTimeFetched) {
- log() << "we are ahead of the sync source, will try to roll back";
- fassertRollbackStatusNoTrace(
- 28656,
- syncRollback(txn,
- _replCoord->getMyLastOptime(),
- OplogInterfaceLocal(txn, rsOplogName),
- RollbackSourceImpl(r.conn(), rsOplogName),
- _replCoord));
-
- return true;
- }
- /* we're not ahead? maybe our new query got fresher data. best to come back and try again */
- log() << "syncTail condition 1";
- sleepsecs(1);
- }
- catch(DBException& e) {
- error() << "querying " << hn << ' ' << e.toString();
- sleepsecs(2);
- }
- return true;
+ {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ _appliedBuffer = false;
}
- BSONObj o = r.nextSafe();
- OpTime opTime = extractOpTime(o);
- long long hash = o["h"].numberLong();
- if ( opTime != _lastOpTimeFetched || hash != _lastFetchedHash ) {
- log() << "our last op time fetched: " << _lastOpTimeFetched;
- log() << "source's GTE: " << opTime;
- fassertRollbackStatusNoTrace(
- 28657,
- syncRollback(txn,
- _replCoord->getMyLastOptime(),
- OplogInterfaceLocal(txn, rsOplogName),
- RollbackSourceImpl(r.conn(), rsOplogName),
- _replCoord));
- return true;
+ OCCASIONALLY {
+ LOG(2) << "bgsync buffer has " << _buffer.size() << " bytes";
}
- return false;
- }
-
- HostAndPort BackgroundSync::getSyncTarget() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- return _syncSourceHost;
- }
+ bufferCountGauge.increment();
+ bufferSizeGauge.increment(getSize(o));
+ _buffer.push(o);
- void BackgroundSync::clearSyncTarget() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- _syncSourceHost = HostAndPort();
+ {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ _lastFetchedHash = o["h"].numberLong();
+ _lastOpTimeFetched = extractOpTime(o);
+ LOG(3) << "lastOpTimeFetched: " << _lastOpTimeFetched;
+ }
}
+}
- void BackgroundSync::stop() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
-
- _pause = true;
- _syncSourceHost = HostAndPort();
- _lastOpTimeFetched = OpTime();
- _lastFetchedHash = 0;
- _appliedBufferCondition.notify_all();
- _pausedCondition.notify_all();
+bool BackgroundSync::shouldChangeSyncSource() {
+ // is it even still around?
+ if (getSyncTarget().empty() || _syncSourceReader.getHost().empty()) {
+ return true;
}
- void BackgroundSync::start(OperationContext* txn) {
- massert(16235, "going to start syncing, but buffer is not empty", _buffer.empty());
-
- long long updatedLastAppliedHash = _readLastAppliedHash(txn);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- _pause = false;
-
- // reset _last fields with current oplog data
- _lastAppliedHash = updatedLastAppliedHash;
- _lastOpTimeFetched = _replCoord->getMyLastOptime();
- _lastFetchedHash = _lastAppliedHash;
-
- LOG(1) << "bgsync fetch queue set to: " << _lastOpTimeFetched <<
- " " << _lastFetchedHash;
- }
+ // check other members: is any member's optime more than MaxSyncSourceLag seconds
+ // ahead of the current sync source?
+ return _replCoord->shouldChangeSyncSource(_syncSourceReader.getHost());
+}
+
+
+bool BackgroundSync::peek(BSONObj* op) {
+ return _buffer.peek(*op);
+}
+
+void BackgroundSync::waitForMore() {
+ BSONObj op;
+ // Block for one second before timing out.
+ // Ignore the value of the op we peeked at.
+ _buffer.blockingPeek(op, 1);
+}
+
+void BackgroundSync::consume() {
+ // this is just to get the op off the queue, it's been peeked at
+ // and queued for application already
+ BSONObj op = _buffer.blockingPop();
+ bufferCountGauge.decrement(1);
+ bufferSizeGauge.decrement(getSize(op));
+}
+
+bool BackgroundSync::_rollbackIfNeeded(OperationContext* txn, OplogReader& r) {
+ string hn = r.conn()->getServerAddress();
+
+ // Abort only when syncRollback detects we are in a unrecoverable state.
+ // In other cases, we log the message contained in the error status and retry later.
+ auto fassertRollbackStatusNoTrace = [](int msgid, const Status& status) {
+ if (status.isOK()) {
+ return;
+ }
+ if (ErrorCodes::UnrecoverableRollbackError == status.code()) {
+ fassertNoTrace(msgid, status);
+ }
+ warning() << "rollback cannot proceed at this time (retrying later): " << status;
+ };
- void BackgroundSync::waitUntilPaused() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- while (!_pause) {
- _pausedCondition.wait(lock);
+ if (!r.more()) {
+ try {
+ BSONObj theirLastOp = r.getLastOp(rsOplogName.c_str());
+ if (theirLastOp.isEmpty()) {
+ error() << "empty query result from " << hn << " oplog";
+ sleepsecs(2);
+ return true;
+ }
+ OpTime theirOpTime = extractOpTime(theirLastOp);
+ if (theirOpTime < _lastOpTimeFetched) {
+ log() << "we are ahead of the sync source, will try to roll back";
+ fassertRollbackStatusNoTrace(28656,
+ syncRollback(txn,
+ _replCoord->getMyLastOptime(),
+ OplogInterfaceLocal(txn, rsOplogName),
+ RollbackSourceImpl(r.conn(), rsOplogName),
+ _replCoord));
+
+ return true;
+ }
+ /* we're not ahead? maybe our new query got fresher data. best to come back and try again */
+ log() << "syncTail condition 1";
+ sleepsecs(1);
+ } catch (DBException& e) {
+ error() << "querying " << hn << ' ' << e.toString();
+ sleepsecs(2);
}
+ return true;
}
- long long BackgroundSync::getLastAppliedHash() const {
- stdx::lock_guard<stdx::mutex> lck(_mutex);
- return _lastAppliedHash;
+ BSONObj o = r.nextSafe();
+ OpTime opTime = extractOpTime(o);
+ long long hash = o["h"].numberLong();
+ if (opTime != _lastOpTimeFetched || hash != _lastFetchedHash) {
+ log() << "our last op time fetched: " << _lastOpTimeFetched;
+ log() << "source's GTE: " << opTime;
+ fassertRollbackStatusNoTrace(28657,
+ syncRollback(txn,
+ _replCoord->getMyLastOptime(),
+ OplogInterfaceLocal(txn, rsOplogName),
+ RollbackSourceImpl(r.conn(), rsOplogName),
+ _replCoord));
+ return true;
}
- void BackgroundSync::clearBuffer() {
- _buffer.clear();
+ return false;
+}
+
+HostAndPort BackgroundSync::getSyncTarget() {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ return _syncSourceHost;
+}
+
+void BackgroundSync::clearSyncTarget() {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ _syncSourceHost = HostAndPort();
+}
+
+void BackgroundSync::stop() {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+
+ _pause = true;
+ _syncSourceHost = HostAndPort();
+ _lastOpTimeFetched = OpTime();
+ _lastFetchedHash = 0;
+ _appliedBufferCondition.notify_all();
+ _pausedCondition.notify_all();
+}
+
+void BackgroundSync::start(OperationContext* txn) {
+ massert(16235, "going to start syncing, but buffer is not empty", _buffer.empty());
+
+ long long updatedLastAppliedHash = _readLastAppliedHash(txn);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _pause = false;
+
+ // reset _last fields with current oplog data
+ _lastAppliedHash = updatedLastAppliedHash;
+ _lastOpTimeFetched = _replCoord->getMyLastOptime();
+ _lastFetchedHash = _lastAppliedHash;
+
+ LOG(1) << "bgsync fetch queue set to: " << _lastOpTimeFetched << " " << _lastFetchedHash;
+}
+
+void BackgroundSync::waitUntilPaused() {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ while (!_pause) {
+ _pausedCondition.wait(lock);
}
-
- void BackgroundSync::setLastAppliedHash(long long newHash) {
- stdx::lock_guard<stdx::mutex> lck(_mutex);
- _lastAppliedHash = newHash;
+}
+
+long long BackgroundSync::getLastAppliedHash() const {
+ stdx::lock_guard<stdx::mutex> lck(_mutex);
+ return _lastAppliedHash;
+}
+
+void BackgroundSync::clearBuffer() {
+ _buffer.clear();
+}
+
+void BackgroundSync::setLastAppliedHash(long long newHash) {
+ stdx::lock_guard<stdx::mutex> lck(_mutex);
+ _lastAppliedHash = newHash;
+}
+
+void BackgroundSync::loadLastAppliedHash(OperationContext* txn) {
+ long long result = _readLastAppliedHash(txn);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _lastAppliedHash = result;
+}
+
+long long BackgroundSync::_readLastAppliedHash(OperationContext* txn) {
+ BSONObj oplogEntry;
+ try {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lk(txn->lockState(), "local", MODE_X);
+ bool success = Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry);
+ if (!success) {
+ // This can happen when we are to do an initial sync. lastHash will be set
+ // after the initial sync is complete.
+ return 0;
+ }
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "readLastAppliedHash", rsOplogName);
+ } catch (const DBException& ex) {
+ severe() << "Problem reading " << rsOplogName << ": " << ex.toStatus();
+ fassertFailed(18904);
}
-
- void BackgroundSync::loadLastAppliedHash(OperationContext* txn) {
- long long result = _readLastAppliedHash(txn);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- _lastAppliedHash = result;
+ BSONElement hashElement = oplogEntry[hashFieldName];
+ if (hashElement.eoo()) {
+ severe() << "Most recent entry in " << rsOplogName << " missing \"" << hashFieldName
+ << "\" field";
+ fassertFailed(18902);
}
-
- long long BackgroundSync::_readLastAppliedHash(OperationContext* txn) {
- BSONObj oplogEntry;
- try {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), "local", MODE_X);
- bool success = Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry);
- if (!success) {
- // This can happen when we are to do an initial sync. lastHash will be set
- // after the initial sync is complete.
- return 0;
- }
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "readLastAppliedHash", rsOplogName);
- }
- catch (const DBException& ex) {
- severe() << "Problem reading " << rsOplogName << ": " << ex.toStatus();
- fassertFailed(18904);
- }
- BSONElement hashElement = oplogEntry[hashFieldName];
- if (hashElement.eoo()) {
- severe() << "Most recent entry in " << rsOplogName << " missing \"" << hashFieldName <<
- "\" field";
- fassertFailed(18902);
- }
- if (hashElement.type() != NumberLong) {
- severe() << "Expected type of \"" << hashFieldName << "\" in most recent " <<
- rsOplogName << " entry to have type NumberLong, but found " <<
- typeName(hashElement.type());
- fassertFailed(18903);
- }
- return hashElement.safeNumberLong();
+ if (hashElement.type() != NumberLong) {
+ severe() << "Expected type of \"" << hashFieldName << "\" in most recent " << rsOplogName
+ << " entry to have type NumberLong, but found " << typeName(hashElement.type());
+ fassertFailed(18903);
}
+ return hashElement.safeNumberLong();
+}
- bool BackgroundSync::getInitialSyncRequestedFlag() {
- stdx::lock_guard<stdx::mutex> lock(_initialSyncMutex);
- return _initialSyncRequestedFlag;
- }
+bool BackgroundSync::getInitialSyncRequestedFlag() {
+ stdx::lock_guard<stdx::mutex> lock(_initialSyncMutex);
+ return _initialSyncRequestedFlag;
+}
- void BackgroundSync::setInitialSyncRequestedFlag(bool value) {
- stdx::lock_guard<stdx::mutex> lock(_initialSyncMutex);
- _initialSyncRequestedFlag = value;
- }
+void BackgroundSync::setInitialSyncRequestedFlag(bool value) {
+ stdx::lock_guard<stdx::mutex> lock(_initialSyncMutex);
+ _initialSyncRequestedFlag = value;
+}
- void BackgroundSync::pushTestOpToBuffer(const BSONObj& op) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- _buffer.push(op);
- }
+void BackgroundSync::pushTestOpToBuffer(const BSONObj& op) {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ _buffer.push(op);
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h
index d1e7e7ea692..8fe0c9e2b02 100644
--- a/src/mongo/db/repl/bgsync.h
+++ b/src/mongo/db/repl/bgsync.h
@@ -37,158 +37,156 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
namespace repl {
- class Member;
- class ReplicationCoordinator;
+class Member;
+class ReplicationCoordinator;
- // This interface exists to facilitate easier testing;
- // the test infrastructure implements these functions with stubs.
- class BackgroundSyncInterface {
- public:
- virtual ~BackgroundSyncInterface();
+// This interface exists to facilitate easier testing;
+// the test infrastructure implements these functions with stubs.
+class BackgroundSyncInterface {
+public:
+ virtual ~BackgroundSyncInterface();
- // Gets the head of the buffer, but does not remove it.
- // Returns true if an element was present at the head;
- // false if the queue was empty.
- virtual bool peek(BSONObj* op) = 0;
+ // Gets the head of the buffer, but does not remove it.
+ // Returns true if an element was present at the head;
+ // false if the queue was empty.
+ virtual bool peek(BSONObj* op) = 0;
- // Deletes objects in the queue;
- // called by sync thread after it has applied an op
- virtual void consume() = 0;
+ // Deletes objects in the queue;
+ // called by sync thread after it has applied an op
+ virtual void consume() = 0;
- // wait up to 1 second for more ops to appear
- virtual void waitForMore() = 0;
- };
+ // wait up to 1 second for more ops to appear
+ virtual void waitForMore() = 0;
+};
- /**
- * Lock order:
- * 1. rslock
- * 2. rwlock
- * 3. BackgroundSync::_mutex
- */
- class BackgroundSync : public BackgroundSyncInterface {
- public:
- // Allow index prefetching to be turned on/off
- enum IndexPrefetchConfig {
- PREFETCH_NONE=0, PREFETCH_ID_ONLY=1, PREFETCH_ALL=2
- };
+/**
+ * Lock order:
+ * 1. rslock
+ * 2. rwlock
+ * 3. BackgroundSync::_mutex
+ */
+class BackgroundSync : public BackgroundSyncInterface {
+public:
+ // Allow index prefetching to be turned on/off
+ enum IndexPrefetchConfig { PREFETCH_NONE = 0, PREFETCH_ID_ONLY = 1, PREFETCH_ALL = 2 };
- static BackgroundSync* get();
+ static BackgroundSync* get();
- // stop syncing (when this node becomes a primary, e.g.)
- void stop();
+ // stop syncing (when this node becomes a primary, e.g.)
+ void stop();
- void shutdown();
- void notify(OperationContext* txn);
+ void shutdown();
+ void notify(OperationContext* txn);
- // Blocks until _pause becomes true from a call to stop() or shutdown()
- void waitUntilPaused();
+ // Blocks until _pause becomes true from a call to stop() or shutdown()
+ void waitUntilPaused();
- virtual ~BackgroundSync() {}
+ virtual ~BackgroundSync() {}
- // starts the producer thread
- void producerThread();
- // starts the sync target notifying thread
- void notifierThread();
+ // starts the producer thread
+ void producerThread();
+ // starts the sync target notifying thread
+ void notifierThread();
- HostAndPort getSyncTarget();
+ HostAndPort getSyncTarget();
- // Interface implementation
+ // Interface implementation
- virtual bool peek(BSONObj* op);
- virtual void consume();
- virtual void clearSyncTarget();
- virtual void waitForMore();
+ virtual bool peek(BSONObj* op);
+ virtual void consume();
+ virtual void clearSyncTarget();
+ virtual void waitForMore();
- // For monitoring
- BSONObj getCounters();
+ // For monitoring
+ BSONObj getCounters();
- long long getLastAppliedHash() const;
- void setLastAppliedHash(long long oldH);
- void loadLastAppliedHash(OperationContext* txn);
+ long long getLastAppliedHash() const;
+ void setLastAppliedHash(long long oldH);
+ void loadLastAppliedHash(OperationContext* txn);
- // Clears any fetched and buffered oplog entries.
- void clearBuffer();
+ // Clears any fetched and buffered oplog entries.
+ void clearBuffer();
- bool getInitialSyncRequestedFlag();
- void setInitialSyncRequestedFlag(bool value);
+ bool getInitialSyncRequestedFlag();
+ void setInitialSyncRequestedFlag(bool value);
- void setIndexPrefetchConfig(const IndexPrefetchConfig cfg) {
- _indexPrefetchConfig = cfg;
- }
+ void setIndexPrefetchConfig(const IndexPrefetchConfig cfg) {
+ _indexPrefetchConfig = cfg;
+ }
- IndexPrefetchConfig getIndexPrefetchConfig() {
- return _indexPrefetchConfig;
- }
+ IndexPrefetchConfig getIndexPrefetchConfig() {
+ return _indexPrefetchConfig;
+ }
- // Testing related stuff
- void pushTestOpToBuffer(const BSONObj& op);
- private:
- static BackgroundSync *s_instance;
- // protects creation of s_instance
- static stdx::mutex s_mutex;
+ // Testing related stuff
+ void pushTestOpToBuffer(const BSONObj& op);
- // Production thread
- BlockingQueue<BSONObj> _buffer;
- OplogReader _syncSourceReader;
+private:
+ static BackgroundSync* s_instance;
+ // protects creation of s_instance
+ static stdx::mutex s_mutex;
- // _mutex protects all of the class variables except _syncSourceReader and _buffer
- mutable stdx::mutex _mutex;
+ // Production thread
+ BlockingQueue<BSONObj> _buffer;
+ OplogReader _syncSourceReader;
- OpTime _lastOpTimeFetched;
+ // _mutex protects all of the class variables except _syncSourceReader and _buffer
+ mutable stdx::mutex _mutex;
- // lastAppliedHash is used to generate a new hash for the following op, when primary.
- long long _lastAppliedHash;
- // lastFetchedHash is used to match ops to determine if we need to rollback, when
- // a secondary.
- long long _lastFetchedHash;
+ OpTime _lastOpTimeFetched;
- // if produce thread should be running
- bool _pause;
- stdx::condition_variable _pausedCondition;
- bool _appliedBuffer;
- stdx::condition_variable _appliedBufferCondition;
+ // lastAppliedHash is used to generate a new hash for the following op, when primary.
+ long long _lastAppliedHash;
+ // lastFetchedHash is used to match ops to determine if we need to rollback, when
+ // a secondary.
+ long long _lastFetchedHash;
- HostAndPort _syncSourceHost;
+ // if produce thread should be running
+ bool _pause;
+ stdx::condition_variable _pausedCondition;
+ bool _appliedBuffer;
+ stdx::condition_variable _appliedBufferCondition;
- BackgroundSync();
- BackgroundSync(const BackgroundSync& s);
- BackgroundSync operator=(const BackgroundSync& s);
+ HostAndPort _syncSourceHost;
- // Production thread
- void _producerThread();
- // Adds elements to the list, up to maxSize.
- void produce(OperationContext* txn);
- // Checks the criteria for rolling back and executes a rollback if warranted.
- bool _rollbackIfNeeded(OperationContext* txn, OplogReader& r);
+ BackgroundSync();
+ BackgroundSync(const BackgroundSync& s);
+ BackgroundSync operator=(const BackgroundSync& s);
- // Evaluate if the current sync target is still good
- bool shouldChangeSyncSource();
+ // Production thread
+ void _producerThread();
+ // Adds elements to the list, up to maxSize.
+ void produce(OperationContext* txn);
+ // Checks the criteria for rolling back and executes a rollback if warranted.
+ bool _rollbackIfNeeded(OperationContext* txn, OplogReader& r);
- // restart syncing
- void start(OperationContext* txn);
+ // Evaluate if the current sync target is still good
+ bool shouldChangeSyncSource();
- long long _readLastAppliedHash(OperationContext* txn);
+ // restart syncing
+ void start(OperationContext* txn);
- // A pointer to the replication coordinator running the show.
- ReplicationCoordinator* _replCoord;
+ long long _readLastAppliedHash(OperationContext* txn);
- // bool for indicating resync need on this node and the mutex that protects it
- // The resync command sets this flag; the Applier thread observes and clears it.
- bool _initialSyncRequestedFlag;
- stdx::mutex _initialSyncMutex;
+ // A pointer to the replication coordinator running the show.
+ ReplicationCoordinator* _replCoord;
- // This setting affects the Applier prefetcher behavior.
- IndexPrefetchConfig _indexPrefetchConfig;
+ // bool for indicating resync need on this node and the mutex that protects it
+ // The resync command sets this flag; the Applier thread observes and clears it.
+ bool _initialSyncRequestedFlag;
+ stdx::mutex _initialSyncMutex;
- };
+ // This setting affects the Applier prefetcher behavior.
+ IndexPrefetchConfig _indexPrefetchConfig;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/check_quorum_for_config_change.cpp b/src/mongo/db/repl/check_quorum_for_config_change.cpp
index 5e9899897ed..437dca34701 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change.cpp
@@ -45,256 +45,247 @@
namespace mongo {
namespace repl {
- QuorumChecker::QuorumChecker(const ReplicaSetConfig* rsConfig, int myIndex)
- : _rsConfig(rsConfig),
- _myIndex(myIndex),
- _numResponses(1), // We "responded" to ourself already.
- _numElectable(0),
- _vetoStatus(Status::OK()),
- _finalStatus(ErrorCodes::CallbackCanceled, "Quorum check canceled") {
-
- invariant(myIndex < _rsConfig->getNumMembers());
- const MemberConfig& myConfig = _rsConfig->getMemberAt(_myIndex);
-
- if (myConfig.isVoter()) {
- _voters.push_back(myConfig.getHostAndPort());
- }
- if (myConfig.isElectable()) {
- _numElectable = 1;
- }
-
- if (hasReceivedSufficientResponses()) {
- _onQuorumCheckComplete();
- }
+QuorumChecker::QuorumChecker(const ReplicaSetConfig* rsConfig, int myIndex)
+ : _rsConfig(rsConfig),
+ _myIndex(myIndex),
+ _numResponses(1), // We "responded" to ourself already.
+ _numElectable(0),
+ _vetoStatus(Status::OK()),
+ _finalStatus(ErrorCodes::CallbackCanceled, "Quorum check canceled") {
+ invariant(myIndex < _rsConfig->getNumMembers());
+ const MemberConfig& myConfig = _rsConfig->getMemberAt(_myIndex);
+
+ if (myConfig.isVoter()) {
+ _voters.push_back(myConfig.getHostAndPort());
+ }
+ if (myConfig.isElectable()) {
+ _numElectable = 1;
}
- QuorumChecker::~QuorumChecker() {}
+ if (hasReceivedSufficientResponses()) {
+ _onQuorumCheckComplete();
+ }
+}
- std::vector<RemoteCommandRequest> QuorumChecker::getRequests() const {
- const bool isInitialConfig = _rsConfig->getConfigVersion() == 1;
- const MemberConfig& myConfig = _rsConfig->getMemberAt(_myIndex);
+QuorumChecker::~QuorumChecker() {}
- std::vector<RemoteCommandRequest> requests;
- if (hasReceivedSufficientResponses()) {
- return requests;
- }
+std::vector<RemoteCommandRequest> QuorumChecker::getRequests() const {
+ const bool isInitialConfig = _rsConfig->getConfigVersion() == 1;
+ const MemberConfig& myConfig = _rsConfig->getMemberAt(_myIndex);
- ReplSetHeartbeatArgs hbArgs;
- hbArgs.setSetName(_rsConfig->getReplSetName());
- hbArgs.setProtocolVersion(1);
- hbArgs.setConfigVersion(_rsConfig->getConfigVersion());
- hbArgs.setCheckEmpty(isInitialConfig);
- hbArgs.setSenderHost(myConfig.getHostAndPort());
- hbArgs.setSenderId(myConfig.getId());
- const BSONObj hbRequest = hbArgs.toBSON();
+ std::vector<RemoteCommandRequest> requests;
+ if (hasReceivedSufficientResponses()) {
+ return requests;
+ }
- // Send a bunch of heartbeat requests.
- // Schedule an operation when a "sufficient" number of them have completed, and use that
- // to compute the quorum check results.
- // Wait for the "completion" callback to finish, and then it's OK to return the results.
- for (int i = 0; i < _rsConfig->getNumMembers(); ++i) {
- if (_myIndex == i) {
- // No need to check self for liveness or unreadiness.
- continue;
- }
- requests.push_back(RemoteCommandRequest(
- _rsConfig->getMemberAt(i).getHostAndPort(),
- "admin",
- hbRequest,
- _rsConfig->getHeartbeatTimeoutPeriodMillis()));
+ ReplSetHeartbeatArgs hbArgs;
+ hbArgs.setSetName(_rsConfig->getReplSetName());
+ hbArgs.setProtocolVersion(1);
+ hbArgs.setConfigVersion(_rsConfig->getConfigVersion());
+ hbArgs.setCheckEmpty(isInitialConfig);
+ hbArgs.setSenderHost(myConfig.getHostAndPort());
+ hbArgs.setSenderId(myConfig.getId());
+ const BSONObj hbRequest = hbArgs.toBSON();
+
+ // Send a bunch of heartbeat requests.
+ // Schedule an operation when a "sufficient" number of them have completed, and use that
+ // to compute the quorum check results.
+ // Wait for the "completion" callback to finish, and then it's OK to return the results.
+ for (int i = 0; i < _rsConfig->getNumMembers(); ++i) {
+ if (_myIndex == i) {
+ // No need to check self for liveness or unreadiness.
+ continue;
}
-
- return requests;
+ requests.push_back(RemoteCommandRequest(_rsConfig->getMemberAt(i).getHostAndPort(),
+ "admin",
+ hbRequest,
+ _rsConfig->getHeartbeatTimeoutPeriodMillis()));
}
- void QuorumChecker::processResponse(
- const RemoteCommandRequest& request,
- const ResponseStatus& response) {
+ return requests;
+}
- _tabulateHeartbeatResponse(request, response);
- if (hasReceivedSufficientResponses()) {
- _onQuorumCheckComplete();
- }
+void QuorumChecker::processResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response) {
+ _tabulateHeartbeatResponse(request, response);
+ if (hasReceivedSufficientResponses()) {
+ _onQuorumCheckComplete();
}
+}
- void QuorumChecker::_onQuorumCheckComplete() {
- if (!_vetoStatus.isOK()) {
- _finalStatus = _vetoStatus;
- return;
+void QuorumChecker::_onQuorumCheckComplete() {
+ if (!_vetoStatus.isOK()) {
+ _finalStatus = _vetoStatus;
+ return;
+ }
+ if (_rsConfig->getConfigVersion() == 1 && !_badResponses.empty()) {
+ str::stream message;
+ message << "replSetInitiate quorum check failed because not all proposed set members "
+ "responded affirmatively: ";
+ for (std::vector<std::pair<HostAndPort, Status>>::const_iterator it = _badResponses.begin();
+ it != _badResponses.end();
+ ++it) {
+ if (it != _badResponses.begin()) {
+ message << ", ";
+ }
+ message << it->first.toString() << " failed with " << it->second.reason();
}
- if (_rsConfig->getConfigVersion() == 1 && !_badResponses.empty()) {
- str::stream message;
- message << "replSetInitiate quorum check failed because not all proposed set members "
- "responded affirmatively: ";
- for (std::vector<std::pair<HostAndPort, Status> >::const_iterator it =
- _badResponses.begin();
- it != _badResponses.end();
- ++it) {
+ _finalStatus = Status(ErrorCodes::NodeNotFound, message);
+ return;
+ }
+ if (_numElectable == 0) {
+ _finalStatus = Status(ErrorCodes::NodeNotFound,
+ "Quorum check failed because no "
+ "electable nodes responded; at least one required for config");
+ return;
+ }
+ if (int(_voters.size()) < _rsConfig->getMajorityVoteCount()) {
+ str::stream message;
+ message << "Quorum check failed because not enough voting nodes responded; required "
+ << _rsConfig->getMajorityVoteCount() << " but ";
+
+ if (_voters.size() == 0) {
+ message << "none responded";
+ } else {
+ message << "only the following " << _voters.size()
+ << " voting nodes responded: " << _voters.front().toString();
+ for (size_t i = 1; i < _voters.size(); ++i) {
+ message << ", " << _voters[i].toString();
+ }
+ }
+ if (!_badResponses.empty()) {
+ message << "; the following nodes did not respond affirmatively: ";
+ for (std::vector<std::pair<HostAndPort, Status>>::const_iterator it =
+ _badResponses.begin();
+ it != _badResponses.end();
+ ++it) {
if (it != _badResponses.begin()) {
message << ", ";
}
message << it->first.toString() << " failed with " << it->second.reason();
}
- _finalStatus = Status(ErrorCodes::NodeNotFound, message);
- return;
}
- if (_numElectable == 0) {
- _finalStatus = Status(
- ErrorCodes::NodeNotFound, "Quorum check failed because no "
- "electable nodes responded; at least one required for config");
- return;
- }
- if (int(_voters.size()) < _rsConfig->getMajorityVoteCount()) {
- str::stream message;
- message << "Quorum check failed because not enough voting nodes responded; required " <<
- _rsConfig->getMajorityVoteCount() << " but ";
-
- if (_voters.size() == 0) {
- message << "none responded";
- }
- else {
- message << "only the following " << _voters.size() <<
- " voting nodes responded: " << _voters.front().toString();
- for (size_t i = 1; i < _voters.size(); ++i) {
- message << ", " << _voters[i].toString();
- }
- }
- if (!_badResponses.empty()) {
- message << "; the following nodes did not respond affirmatively: ";
- for (std::vector<std::pair<HostAndPort, Status> >::const_iterator it =
- _badResponses.begin();
- it != _badResponses.end();
- ++it) {
- if (it != _badResponses.begin()) {
- message << ", ";
- }
- message << it->first.toString() << " failed with " << it->second.reason();
- }
- }
- _finalStatus = Status(ErrorCodes::NodeNotFound, message);
- return;
- }
- _finalStatus = Status::OK();
+ _finalStatus = Status(ErrorCodes::NodeNotFound, message);
+ return;
+ }
+ _finalStatus = Status::OK();
+}
+
+void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response) {
+ ++_numResponses;
+ if (!response.isOK()) {
+ warning() << "Failed to complete heartbeat request to " << request.target << "; "
+ << response.getStatus();
+ _badResponses.push_back(std::make_pair(request.target, response.getStatus()));
+ return;
}
- void QuorumChecker::_tabulateHeartbeatResponse(
- const RemoteCommandRequest& request,
- const ResponseStatus& response) {
+ BSONObj resBSON = response.getValue().data;
+ ReplSetHeartbeatResponse hbResp;
+ Status hbStatus = hbResp.initialize(resBSON, 0);
- ++_numResponses;
- if (!response.isOK()) {
- warning() << "Failed to complete heartbeat request to " << request.target <<
- "; " << response.getStatus();
- _badResponses.push_back(std::make_pair(request.target, response.getStatus()));
- return;
- }
+ if (hbStatus.code() == ErrorCodes::InconsistentReplicaSetNames) {
+ std::string message = str::stream() << "Our set name did not match that of "
+ << request.target.toString();
+ _vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
+ warning() << message;
+ return;
+ }
- BSONObj resBSON = response.getValue().data;
- ReplSetHeartbeatResponse hbResp;
- Status hbStatus = hbResp.initialize(resBSON, 0);
+ if (!hbStatus.isOK() && hbStatus != ErrorCodes::InvalidReplicaSetConfig) {
+ warning() << "Got error (" << hbStatus << ") response on heartbeat request to "
+ << request.target << "; " << hbResp;
+ _badResponses.push_back(std::make_pair(request.target, hbStatus));
+ return;
+ }
- if (hbStatus.code() == ErrorCodes::InconsistentReplicaSetNames) {
- std::string message = str::stream() << "Our set name did not match that of " <<
- request.target.toString();
+ if (!hbResp.getReplicaSetName().empty()) {
+ if (hbResp.getConfigVersion() >= _rsConfig->getConfigVersion()) {
+ std::string message = str::stream()
+ << "Our config version of " << _rsConfig->getConfigVersion()
+ << " is no larger than the version on " << request.target.toString()
+ << ", which is " << hbResp.getConfigVersion();
_vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
warning() << message;
return;
}
+ }
- if (!hbStatus.isOK() && hbStatus != ErrorCodes::InvalidReplicaSetConfig) {
- warning() << "Got error (" << hbStatus
- << ") response on heartbeat request to " << request.target
- << "; " << hbResp;
- _badResponses.push_back(std::make_pair(request.target, hbStatus));
- return;
- }
-
- if (!hbResp.getReplicaSetName().empty()) {
- if (hbResp.getConfigVersion() >= _rsConfig->getConfigVersion()) {
- std::string message = str::stream() << "Our config version of " <<
- _rsConfig->getConfigVersion() <<
- " is no larger than the version on " << request.target.toString() <<
- ", which is " << hbResp.getConfigVersion();
- _vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
- warning() << message;
- return;
- }
- }
-
- const bool isInitialConfig = _rsConfig->getConfigVersion() == 1;
- if (isInitialConfig && hbResp.hasData()) {
- std::string message = str::stream() << "'" << request.target.toString()
- << "' has data already, cannot initiate set.";
- _vetoStatus = Status(ErrorCodes::CannotInitializeNodeWithData, message);
- warning() << message;
- return;
- }
-
- for (int i = 0; i < _rsConfig->getNumMembers(); ++i) {
- const MemberConfig& memberConfig = _rsConfig->getMemberAt(i);
- if (memberConfig.getHostAndPort() != request.target) {
- continue;
- }
- if (memberConfig.isElectable()) {
- ++_numElectable;
- }
- if (memberConfig.isVoter()) {
- _voters.push_back(request.target);
- }
- return;
- }
- invariant(false);
+ const bool isInitialConfig = _rsConfig->getConfigVersion() == 1;
+ if (isInitialConfig && hbResp.hasData()) {
+ std::string message = str::stream() << "'" << request.target.toString()
+ << "' has data already, cannot initiate set.";
+ _vetoStatus = Status(ErrorCodes::CannotInitializeNodeWithData, message);
+ warning() << message;
+ return;
}
- bool QuorumChecker::hasReceivedSufficientResponses() const {
- if (!_vetoStatus.isOK() || _numResponses == _rsConfig->getNumMembers()) {
- // Vetoed or everybody has responded. All done.
- return true;
- }
- if (_rsConfig->getConfigVersion() == 1) {
- // Have not received responses from every member, and the proposed config
- // version is 1 (initial configuration). Keep waiting.
- return false;
+ for (int i = 0; i < _rsConfig->getNumMembers(); ++i) {
+ const MemberConfig& memberConfig = _rsConfig->getMemberAt(i);
+ if (memberConfig.getHostAndPort() != request.target) {
+ continue;
}
- if (_numElectable == 0) {
- // Have not heard from at least one electable node. Keep waiting.
- return false;
+ if (memberConfig.isElectable()) {
+ ++_numElectable;
}
- if (int(_voters.size()) < _rsConfig->getMajorityVoteCount()) {
- // Have not heard from a majority of voters. Keep waiting.
- return false;
+ if (memberConfig.isVoter()) {
+ _voters.push_back(request.target);
}
+ return;
+ }
+ invariant(false);
+}
- // Have heard from a majority of voters and one electable node. All done.
+bool QuorumChecker::hasReceivedSufficientResponses() const {
+ if (!_vetoStatus.isOK() || _numResponses == _rsConfig->getNumMembers()) {
+ // Vetoed or everybody has responded. All done.
return true;
}
-
- Status checkQuorumGeneral(ReplicationExecutor* executor,
- const ReplicaSetConfig& rsConfig,
- const int myIndex) {
- QuorumChecker checker(&rsConfig, myIndex);
- ScatterGatherRunner runner(&checker);
- Status status = runner.run(executor);
- if (!status.isOK()) {
- return status;
- }
-
- return checker.getFinalStatus();
+ if (_rsConfig->getConfigVersion() == 1) {
+ // Have not received responses from every member, and the proposed config
+ // version is 1 (initial configuration). Keep waiting.
+ return false;
}
-
- Status checkQuorumForInitiate(ReplicationExecutor* executor,
- const ReplicaSetConfig& rsConfig,
- const int myIndex) {
- invariant(rsConfig.getConfigVersion() == 1);
- return checkQuorumGeneral(executor, rsConfig, myIndex);
+ if (_numElectable == 0) {
+ // Have not heard from at least one electable node. Keep waiting.
+ return false;
+ }
+ if (int(_voters.size()) < _rsConfig->getMajorityVoteCount()) {
+ // Have not heard from a majority of voters. Keep waiting.
+ return false;
}
- Status checkQuorumForReconfig(ReplicationExecutor* executor,
- const ReplicaSetConfig& rsConfig,
- const int myIndex) {
- invariant(rsConfig.getConfigVersion() > 1);
- return checkQuorumGeneral(executor, rsConfig, myIndex);
+ // Have heard from a majority of voters and one electable node. All done.
+ return true;
+}
+
+Status checkQuorumGeneral(ReplicationExecutor* executor,
+ const ReplicaSetConfig& rsConfig,
+ const int myIndex) {
+ QuorumChecker checker(&rsConfig, myIndex);
+ ScatterGatherRunner runner(&checker);
+ Status status = runner.run(executor);
+ if (!status.isOK()) {
+ return status;
}
+ return checker.getFinalStatus();
+}
+
+Status checkQuorumForInitiate(ReplicationExecutor* executor,
+ const ReplicaSetConfig& rsConfig,
+ const int myIndex) {
+ invariant(rsConfig.getConfigVersion() == 1);
+ return checkQuorumGeneral(executor, rsConfig, myIndex);
+}
+
+Status checkQuorumForReconfig(ReplicationExecutor* executor,
+ const ReplicaSetConfig& rsConfig,
+ const int myIndex) {
+ invariant(rsConfig.getConfigVersion() > 1);
+ return checkQuorumGeneral(executor, rsConfig, myIndex);
+}
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/check_quorum_for_config_change.h b/src/mongo/db/repl/check_quorum_for_config_change.h
index c38a49106be..5d33c01f4c1 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change.h
+++ b/src/mongo/db/repl/check_quorum_for_config_change.h
@@ -35,116 +35,118 @@
namespace mongo {
namespace repl {
- class ReplicaSetConfig;
+class ReplicaSetConfig;
+/**
+ * Quorum checking state machine.
+ *
+ * Usage: Construct a QuorumChecker, pass in a pointer to the configuration for which you're
+ * checking quorum, and the integer index of the member config representing the "executing"
+ * node. Use ScatterGatherRunner or otherwise execute a scatter-gather procedure as described
+ * in the class comment for the ScatterGatherAlgorithm class. After
+ * hasReceivedSufficientResponses() returns true, you may call getFinalStatus() to get the
+ * result of the quorum check.
+ */
+class QuorumChecker : public ScatterGatherAlgorithm {
+ MONGO_DISALLOW_COPYING(QuorumChecker);
+
+public:
/**
- * Quorum checking state machine.
+ * Constructs a QuorumChecker that is used to confirm that sufficient nodes are up to accept
+ * "rsConfig". "myIndex" is the index of the local node, which is assumed to be up.
*
- * Usage: Construct a QuorumChecker, pass in a pointer to the configuration for which you're
- * checking quorum, and the integer index of the member config representing the "executing"
- * node. Use ScatterGatherRunner or otherwise execute a scatter-gather procedure as described
- * in the class comment for the ScatterGatherAlgorithm class. After
- * hasReceivedSufficientResponses() returns true, you may call getFinalStatus() to get the
- * result of the quorum check.
+ * "rsConfig" must stay in scope until QuorumChecker's destructor completes.
*/
- class QuorumChecker : public ScatterGatherAlgorithm {
- MONGO_DISALLOW_COPYING(QuorumChecker);
- public:
- /**
- * Constructs a QuorumChecker that is used to confirm that sufficient nodes are up to accept
- * "rsConfig". "myIndex" is the index of the local node, which is assumed to be up.
- *
- * "rsConfig" must stay in scope until QuorumChecker's destructor completes.
- */
- QuorumChecker(const ReplicaSetConfig* rsConfig, int myIndex);
- virtual ~QuorumChecker();
-
- virtual std::vector<RemoteCommandRequest> getRequests() const;
- virtual void processResponse(
- const RemoteCommandRequest& request,
- const ResponseStatus& response);
-
- virtual bool hasReceivedSufficientResponses() const;
-
- Status getFinalStatus() const { return _finalStatus; }
-
- private:
- /**
- * Callback that executes after _haveReceivedSufficientReplies() becomes true.
- *
- * Computes the quorum result based on responses received so far, stores it into
- * _finalStatus, and enables QuorumChecker::run() to return.
- */
- void _onQuorumCheckComplete();
-
- /**
- * Updates the QuorumChecker state based on the data from a single heartbeat response.
- */
- void _tabulateHeartbeatResponse(const RemoteCommandRequest& request,
- const ResponseStatus& response);
-
- // Pointer to the replica set configuration for which we're checking quorum.
- const ReplicaSetConfig* const _rsConfig;
-
- // Index of the local node's member configuration in _rsConfig.
- const int _myIndex;
-
- // List of voting nodes that have responded affirmatively.
- std::vector<HostAndPort> _voters;
-
- // List of nodes with bad responses and the bad response status they returned.
- std::vector<std::pair<HostAndPort, Status> > _badResponses;
-
- // Total number of responses and timeouts processed.
- int _numResponses;
-
- // Number of electable nodes that have responded affirmatively.
- int _numElectable;
-
- // Set to a non-OK status if a response from a remote node indicates
- // that the quorum check should definitely fail, such as because of
- // a replica set name mismatch.
- Status _vetoStatus;
-
- // Final status of the quorum check, returned by run().
- Status _finalStatus;
- };
+ QuorumChecker(const ReplicaSetConfig* rsConfig, int myIndex);
+ virtual ~QuorumChecker();
+
+ virtual std::vector<RemoteCommandRequest> getRequests() const;
+ virtual void processResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response);
+
+ virtual bool hasReceivedSufficientResponses() const;
+
+ Status getFinalStatus() const {
+ return _finalStatus;
+ }
+private:
/**
- * Performs a quorum call to determine if a sufficient number of nodes are up
- * to initiate a replica set with configuration "rsConfig".
+ * Callback that executes after _haveReceivedSufficientReplies() becomes true.
*
- * "myIndex" is the index of this node's member configuration in "rsConfig".
- * "executor" is the event loop in which to schedule network/aysnchronous processing.
- *
- * For purposes of initiate, a quorum is only met if all of the following conditions
- * are met:
- * - All nodes respond.
- * - No nodes other than the node running the quorum check have data.
- * - No nodes are already joined to a replica set.
- * - No node reports a replica set name other than the one in "rsConfig".
+ * Computes the quorum result based on responses received so far, stores it into
+ * _finalStatus, and enables QuorumChecker::run() to return.
*/
- Status checkQuorumForInitiate(ReplicationExecutor* executor,
- const ReplicaSetConfig& rsConfig,
- const int myIndex);
+ void _onQuorumCheckComplete();
/**
- * Performs a quorum call to determine if a sufficient number of nodes are up
- * to replace the current replica set configuration with "rsConfig".
- *
- * "myIndex" is the index of this node's member configuration in "rsConfig".
- * "executor" is the event loop in which to schedule network/aysnchronous processing.
- *
- * For purposes of reconfig, a quorum is only met if all of the following conditions
- * are met:
- * - A majority of voting nodes respond.
- * - At least one electable node responds.
- * - No responding node reports a replica set name other than the one in "rsConfig".
- * - All responding nodes report a config version less than the one in "rsConfig".
+ * Updates the QuorumChecker state based on the data from a single heartbeat response.
*/
- Status checkQuorumForReconfig(ReplicationExecutor* executor,
- const ReplicaSetConfig& rsConfig,
- const int myIndex);
+ void _tabulateHeartbeatResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response);
+
+ // Pointer to the replica set configuration for which we're checking quorum.
+ const ReplicaSetConfig* const _rsConfig;
+
+ // Index of the local node's member configuration in _rsConfig.
+ const int _myIndex;
+
+ // List of voting nodes that have responded affirmatively.
+ std::vector<HostAndPort> _voters;
+
+ // List of nodes with bad responses and the bad response status they returned.
+ std::vector<std::pair<HostAndPort, Status>> _badResponses;
+
+ // Total number of responses and timeouts processed.
+ int _numResponses;
+
+ // Number of electable nodes that have responded affirmatively.
+ int _numElectable;
+
+ // Set to a non-OK status if a response from a remote node indicates
+ // that the quorum check should definitely fail, such as because of
+ // a replica set name mismatch.
+ Status _vetoStatus;
+
+ // Final status of the quorum check, returned by run().
+ Status _finalStatus;
+};
+
+/**
+ * Performs a quorum call to determine if a sufficient number of nodes are up
+ * to initiate a replica set with configuration "rsConfig".
+ *
+ * "myIndex" is the index of this node's member configuration in "rsConfig".
+ * "executor" is the event loop in which to schedule network/aysnchronous processing.
+ *
+ * For purposes of initiate, a quorum is only met if all of the following conditions
+ * are met:
+ * - All nodes respond.
+ * - No nodes other than the node running the quorum check have data.
+ * - No nodes are already joined to a replica set.
+ * - No node reports a replica set name other than the one in "rsConfig".
+ */
+Status checkQuorumForInitiate(ReplicationExecutor* executor,
+ const ReplicaSetConfig& rsConfig,
+ const int myIndex);
+
+/**
+ * Performs a quorum call to determine if a sufficient number of nodes are up
+ * to replace the current replica set configuration with "rsConfig".
+ *
+ * "myIndex" is the index of this node's member configuration in "rsConfig".
+ * "executor" is the event loop in which to schedule network/aysnchronous processing.
+ *
+ * For purposes of reconfig, a quorum is only met if all of the following conditions
+ * are met:
+ * - A majority of voting nodes respond.
+ * - At least one electable node responds.
+ * - No responding node reports a replica set name other than the one in "rsConfig".
+ * - All responding nodes report a config version less than the one in "rsConfig".
+ */
+Status checkQuorumForReconfig(ReplicationExecutor* executor,
+ const ReplicaSetConfig& rsConfig,
+ const int myIndex);
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index 8f0e01ec00b..7b3d869b47b 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -45,756 +45,770 @@
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
-#define ASSERT_REASON_CONTAINS(STATUS, PATTERN) do { \
- const mongo::Status s_ = (STATUS); \
- ASSERT_FALSE(s_.reason().find(PATTERN) == std::string::npos) << \
- #STATUS ".reason() == " << s_.reason(); \
+#define ASSERT_REASON_CONTAINS(STATUS, PATTERN) \
+ do { \
+ const mongo::Status s_ = (STATUS); \
+ ASSERT_FALSE(s_.reason().find(PATTERN) == std::string::npos) \
+ << #STATUS ".reason() == " << s_.reason(); \
} while (false)
-#define ASSERT_NOT_REASON_CONTAINS(STATUS, PATTERN) do { \
- const mongo::Status s_ = (STATUS); \
- ASSERT_TRUE(s_.reason().find(PATTERN) == std::string::npos) << \
- #STATUS ".reason() == " << s_.reason(); \
+#define ASSERT_NOT_REASON_CONTAINS(STATUS, PATTERN) \
+ do { \
+ const mongo::Status s_ = (STATUS); \
+ ASSERT_TRUE(s_.reason().find(PATTERN) == std::string::npos) \
+ << #STATUS ".reason() == " << s_.reason(); \
} while (false)
namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
-
- class CheckQuorumTest : public mongo::unittest::Test {
- protected:
- CheckQuorumTest();
-
- void startQuorumCheck(const ReplicaSetConfig& config, int myIndex);
- Status waitForQuorumCheck();
- bool isQuorumCheckDone();
-
- NetworkInterfaceMock* _net;
- StorageInterfaceMock* _storage;
- std::unique_ptr<ReplicationExecutor> _executor;
-
- private:
- void setUp();
- void tearDown();
-
- void _runQuorumCheck(const ReplicaSetConfig& config, int myIndex);
- virtual Status _runQuorumCheckImpl(const ReplicaSetConfig& config, int myIndex) = 0;
-
- std::unique_ptr<stdx::thread> _executorThread;
- std::unique_ptr<stdx::thread> _quorumCheckThread;
- Status _quorumCheckStatus;
- stdx::mutex _mutex;
- bool _isQuorumCheckDone;
- };
-
- CheckQuorumTest::CheckQuorumTest() :
- _quorumCheckStatus(ErrorCodes::InternalError, "Not executed") {
- }
-
- void CheckQuorumTest::setUp() {
- _net = new NetworkInterfaceMock;
- _storage = new StorageInterfaceMock;
- _executor.reset(new ReplicationExecutor(_net, _storage, 1 /* prng */ ));
- _executorThread.reset(new stdx::thread(stdx::bind(&ReplicationExecutor::run,
- _executor.get())));
- }
-
- void CheckQuorumTest::tearDown() {
- _executor->shutdown();
- _executorThread->join();
- }
-
- void CheckQuorumTest::startQuorumCheck(const ReplicaSetConfig& config, int myIndex) {
- ASSERT_FALSE(_quorumCheckThread);
- _isQuorumCheckDone = false;
- _quorumCheckThread.reset(new stdx::thread(stdx::bind(&CheckQuorumTest::_runQuorumCheck,
- this,
- config,
- myIndex)));
- }
-
- Status CheckQuorumTest::waitForQuorumCheck() {
- ASSERT_TRUE(_quorumCheckThread);
- _quorumCheckThread->join();
- return _quorumCheckStatus;
+using executor::NetworkInterfaceMock;
+
+class CheckQuorumTest : public mongo::unittest::Test {
+protected:
+ CheckQuorumTest();
+
+ void startQuorumCheck(const ReplicaSetConfig& config, int myIndex);
+ Status waitForQuorumCheck();
+ bool isQuorumCheckDone();
+
+ NetworkInterfaceMock* _net;
+ StorageInterfaceMock* _storage;
+ std::unique_ptr<ReplicationExecutor> _executor;
+
+private:
+ void setUp();
+ void tearDown();
+
+ void _runQuorumCheck(const ReplicaSetConfig& config, int myIndex);
+ virtual Status _runQuorumCheckImpl(const ReplicaSetConfig& config, int myIndex) = 0;
+
+ std::unique_ptr<stdx::thread> _executorThread;
+ std::unique_ptr<stdx::thread> _quorumCheckThread;
+ Status _quorumCheckStatus;
+ stdx::mutex _mutex;
+ bool _isQuorumCheckDone;
+};
+
+CheckQuorumTest::CheckQuorumTest()
+ : _quorumCheckStatus(ErrorCodes::InternalError, "Not executed") {}
+
+void CheckQuorumTest::setUp() {
+ _net = new NetworkInterfaceMock;
+ _storage = new StorageInterfaceMock;
+ _executor.reset(new ReplicationExecutor(_net, _storage, 1 /* prng */));
+ _executorThread.reset(new stdx::thread(stdx::bind(&ReplicationExecutor::run, _executor.get())));
+}
+
+void CheckQuorumTest::tearDown() {
+ _executor->shutdown();
+ _executorThread->join();
+}
+
+void CheckQuorumTest::startQuorumCheck(const ReplicaSetConfig& config, int myIndex) {
+ ASSERT_FALSE(_quorumCheckThread);
+ _isQuorumCheckDone = false;
+ _quorumCheckThread.reset(
+ new stdx::thread(stdx::bind(&CheckQuorumTest::_runQuorumCheck, this, config, myIndex)));
+}
+
+Status CheckQuorumTest::waitForQuorumCheck() {
+ ASSERT_TRUE(_quorumCheckThread);
+ _quorumCheckThread->join();
+ return _quorumCheckStatus;
+}
+
+bool CheckQuorumTest::isQuorumCheckDone() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _isQuorumCheckDone;
+}
+
+void CheckQuorumTest::_runQuorumCheck(const ReplicaSetConfig& config, int myIndex) {
+ _quorumCheckStatus = _runQuorumCheckImpl(config, myIndex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _isQuorumCheckDone = true;
+}
+
+class CheckQuorumForInitiate : public CheckQuorumTest {
+private:
+ virtual Status _runQuorumCheckImpl(const ReplicaSetConfig& config, int myIndex) {
+ return checkQuorumForInitiate(_executor.get(), config, myIndex);
}
+};
- bool CheckQuorumTest::isQuorumCheckDone() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _isQuorumCheckDone;
+class CheckQuorumForReconfig : public CheckQuorumTest {
+protected:
+ virtual Status _runQuorumCheckImpl(const ReplicaSetConfig& config, int myIndex) {
+ return checkQuorumForReconfig(_executor.get(), config, myIndex);
}
-
- void CheckQuorumTest::_runQuorumCheck(const ReplicaSetConfig& config, int myIndex) {
- _quorumCheckStatus = _runQuorumCheckImpl(config, myIndex);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- _isQuorumCheckDone = true;
- }
-
- class CheckQuorumForInitiate : public CheckQuorumTest {
- private:
- virtual Status _runQuorumCheckImpl(const ReplicaSetConfig& config, int myIndex) {
- return checkQuorumForInitiate(_executor.get(), config, myIndex);
- }
- };
-
- class CheckQuorumForReconfig : public CheckQuorumTest {
- protected:
- virtual Status _runQuorumCheckImpl(const ReplicaSetConfig& config, int myIndex) {
- return checkQuorumForReconfig(_executor.get(), config, myIndex);
- }
- };
-
- ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBson) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(configBson));
- ASSERT_OK(config.validate());
- return config;
+};
+
+ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBson) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(configBson));
+ ASSERT_OK(config.validate());
+ return config;
+}
+
+TEST_F(CheckQuorumForInitiate, ValidSingleNodeSet) {
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1"))));
+ startQuorumCheck(config, 0);
+ ASSERT_OK(waitForQuorumCheck());
+}
+
+TEST_F(CheckQuorumForInitiate, QuorumCheckCanceledByShutdown) {
+ _executor->shutdown();
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1"))));
+ startQuorumCheck(config, 0);
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, waitForQuorumCheck());
+}
+
+TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSeveralDownNodes) {
+ // In this test, "we" are host "h3:1". All other nodes time out on
+ // their heartbeat request, and so the quorum check for initiate
+ // will fail because some members were unavailable.
+ ReplicaSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1") << BSON("_id" << 5 << "host"
+ << "h5:1"))));
+ startQuorumCheck(config, 2);
+ _net->enterNetwork();
+ const Date_t startDate = _net->now();
+ const int numCommandsExpected = config.getNumMembers() - 1;
+ for (int i = 0; i < numCommandsExpected; ++i) {
+ _net->scheduleResponse(_net->getNextReadyRequest(),
+ startDate + Milliseconds(10),
+ ResponseStatus(ErrorCodes::NoSuchKey, "No reply"));
}
-
- TEST_F(CheckQuorumForInitiate, ValidSingleNodeSet) {
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1"))));
- startQuorumCheck(config, 0);
- ASSERT_OK(waitForQuorumCheck());
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ Status status = waitForQuorumCheck();
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
+ ASSERT_REASON_CONTAINS(
+ status, "replSetInitiate quorum check failed because not all proposed set members");
+ ASSERT_REASON_CONTAINS(status, "h1:1");
+ ASSERT_REASON_CONTAINS(status, "h2:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
+ ASSERT_REASON_CONTAINS(status, "h4:1");
+ ASSERT_REASON_CONTAINS(status, "h5:1");
+}
+
+const BSONObj makeHeartbeatRequest(const ReplicaSetConfig& rsConfig, int myConfigIndex) {
+ const MemberConfig& myConfig = rsConfig.getMemberAt(myConfigIndex);
+ ReplSetHeartbeatArgs hbArgs;
+ hbArgs.setSetName(rsConfig.getReplSetName());
+ hbArgs.setProtocolVersion(1);
+ hbArgs.setConfigVersion(rsConfig.getConfigVersion());
+ hbArgs.setCheckEmpty(rsConfig.getConfigVersion() == 1);
+ hbArgs.setSenderHost(myConfig.getHostAndPort());
+ hbArgs.setSenderId(myConfig.getId());
+ return hbArgs.toBSON();
+}
+
+TEST_F(CheckQuorumForInitiate, QuorumCheckSuccessForFiveNodes) {
+ // In this test, "we" are host "h3:1". All nodes respond successfully to their heartbeat
+ // requests, and the quorum check succeeds.
+
+ const ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1") << BSON("_id" << 5 << "host"
+ << "h5:1"))));
+ const int myConfigIndex = 2;
+ const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
+
+ startQuorumCheck(rsConfig, myConfigIndex);
+ const Date_t startDate = _net->now();
+ const int numCommandsExpected = rsConfig.getNumMembers() - 1;
+ unordered_set<HostAndPort> seenHosts;
+ _net->enterNetwork();
+ for (int i = 0; i < numCommandsExpected; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS("admin", request.dbname);
+ ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT(seenHosts.insert(request.target).second) << "Already saw "
+ << request.target.toString();
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(8))));
}
-
- TEST_F(CheckQuorumForInitiate, QuorumCheckCanceledByShutdown) {
- _executor->shutdown();
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1"))));
- startQuorumCheck(config, 0);
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, waitForQuorumCheck());
- }
-
- TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSeveralDownNodes) {
- // In this test, "we" are host "h3:1". All other nodes time out on
- // their heartbeat request, and so the quorum check for initiate
- // will fail because some members were unavailable.
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1") <<
- BSON("_id" << 4 << "host" << "h4:1") <<
- BSON("_id" << 5 << "host" << "h5:1"))));
- startQuorumCheck(config, 2);
- _net->enterNetwork();
- const Date_t startDate = _net->now();
- const int numCommandsExpected = config.getNumMembers() - 1;
- for (int i = 0; i < numCommandsExpected; ++i) {
- _net->scheduleResponse(_net->getNextReadyRequest(),
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_OK(waitForQuorumCheck());
+}
+
+TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToOneDownNode) {
+ // In this test, "we" are host "h3:1". All nodes except "h2:1" respond
+ // successfully to their heartbeat requests, but quorum check fails because
+ // all nodes must be available for initiate. This is so even though "h2"
+ // is neither voting nor electable.
+
+ const ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1"
+ << "priority" << 0 << "votes" << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3:1") << BSON("_id" << 4 << "host"
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1") << BSON("_id" << 6 << "host"
+ << "h6:1"))));
+ const int myConfigIndex = 2;
+ const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
+
+ startQuorumCheck(rsConfig, myConfigIndex);
+ const Date_t startDate = _net->now();
+ const int numCommandsExpected = rsConfig.getNumMembers() - 1;
+ unordered_set<HostAndPort> seenHosts;
+ _net->enterNetwork();
+ for (int i = 0; i < numCommandsExpected; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS("admin", request.dbname);
+ ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT(seenHosts.insert(request.target).second) << "Already saw "
+ << request.target.toString();
+ if (request.target == HostAndPort("h2", 1)) {
+ _net->scheduleResponse(noi,
startDate + Milliseconds(10),
- ResponseStatus(ErrorCodes::NoSuchKey, "No reply"));
+ ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
+ } else {
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- Status status = waitForQuorumCheck();
- ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
- ASSERT_REASON_CONTAINS(
- status, "replSetInitiate quorum check failed because not all proposed set members");
- ASSERT_REASON_CONTAINS(status, "h1:1");
- ASSERT_REASON_CONTAINS(status, "h2:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
- ASSERT_REASON_CONTAINS(status, "h4:1");
- ASSERT_REASON_CONTAINS(status, "h5:1");
}
-
- const BSONObj makeHeartbeatRequest(const ReplicaSetConfig& rsConfig, int myConfigIndex) {
- const MemberConfig& myConfig = rsConfig.getMemberAt(myConfigIndex);
- ReplSetHeartbeatArgs hbArgs;
- hbArgs.setSetName(rsConfig.getReplSetName());
- hbArgs.setProtocolVersion(1);
- hbArgs.setConfigVersion(rsConfig.getConfigVersion());
- hbArgs.setCheckEmpty(rsConfig.getConfigVersion() == 1);
- hbArgs.setSenderHost(myConfig.getHostAndPort());
- hbArgs.setSenderId(myConfig.getId());
- return hbArgs.toBSON();
- }
-
- TEST_F(CheckQuorumForInitiate, QuorumCheckSuccessForFiveNodes) {
- // In this test, "we" are host "h3:1". All nodes respond successfully to their heartbeat
- // requests, and the quorum check succeeds.
-
- const ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1") <<
- BSON("_id" << 4 << "host" << "h4:1") <<
- BSON("_id" << 5 << "host" << "h5:1"))));
- const int myConfigIndex = 2;
- const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
-
- startQuorumCheck(rsConfig, myConfigIndex);
- const Date_t startDate = _net->now();
- const int numCommandsExpected = rsConfig.getNumMembers() - 1;
- unordered_set<HostAndPort> seenHosts;
- _net->enterNetwork();
- for (int i = 0; i < numCommandsExpected; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) <<
- "Already saw " << request.target.toString();
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ Status status = waitForQuorumCheck();
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
+ ASSERT_REASON_CONTAINS(
+ status, "replSetInitiate quorum check failed because not all proposed set members");
+ ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
+ ASSERT_REASON_CONTAINS(status, "h2:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h4:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h5:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h6:1");
+}
+
+TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetNameMismatch) {
+ // In this test, "we" are host "h3:1". All nodes respond
+ // successfully to their heartbeat requests, but quorum check fails because
+ // "h4" declares that the requested replica set name was not what it expected.
+
+ const ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1") << BSON("_id" << 5 << "host"
+ << "h5:1"))));
+ const int myConfigIndex = 2;
+ const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
+
+ startQuorumCheck(rsConfig, myConfigIndex);
+ const Date_t startDate = _net->now();
+ const int numCommandsExpected = rsConfig.getNumMembers() - 1;
+ unordered_set<HostAndPort> seenHosts;
+ _net->enterNetwork();
+ for (int i = 0; i < numCommandsExpected; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS("admin", request.dbname);
+ ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT(seenHosts.insert(request.target).second) << "Already saw "
+ << request.target.toString();
+ if (request.target == HostAndPort("h4", 1)) {
_net->scheduleResponse(noi,
startDate + Milliseconds(10),
ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1), Milliseconds(8))));
+ BSON("ok" << 0 << "mismatch" << true), Milliseconds(8))));
+ } else {
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_OK(waitForQuorumCheck());
}
-
- TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToOneDownNode) {
- // In this test, "we" are host "h3:1". All nodes except "h2:1" respond
- // successfully to their heartbeat requests, but quorum check fails because
- // all nodes must be available for initiate. This is so even though "h2"
- // is neither voting nor electable.
-
- const ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1" <<
- "priority" << 0 << "votes" << 0) <<
- BSON("_id" << 3 << "host" << "h3:1") <<
- BSON("_id" << 4 << "host" << "h4:1") <<
- BSON("_id" << 5 << "host" << "h5:1") <<
- BSON("_id" << 6 << "host" << "h6:1"))));
- const int myConfigIndex = 2;
- const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
-
- startQuorumCheck(rsConfig, myConfigIndex);
- const Date_t startDate = _net->now();
- const int numCommandsExpected = rsConfig.getNumMembers() - 1;
- unordered_set<HostAndPort> seenHosts;
- _net->enterNetwork();
- for (int i = 0; i < numCommandsExpected; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) <<
- "Already saw " << request.target.toString();
- if (request.target == HostAndPort("h2", 1)) {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
- }
- else {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1), Milliseconds(8))));
- }
- }
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- Status status = waitForQuorumCheck();
- ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
- ASSERT_REASON_CONTAINS(
- status, "replSetInitiate quorum check failed because not all proposed set members");
- ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
- ASSERT_REASON_CONTAINS(status, "h2:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h4:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h5:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h6:1");
- }
-
- TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetNameMismatch) {
- // In this test, "we" are host "h3:1". All nodes respond
- // successfully to their heartbeat requests, but quorum check fails because
- // "h4" declares that the requested replica set name was not what it expected.
-
- const ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1") <<
- BSON("_id" << 4 << "host" << "h4:1") <<
- BSON("_id" << 5 << "host" << "h5:1"))));
- const int myConfigIndex = 2;
- const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
-
- startQuorumCheck(rsConfig, myConfigIndex);
- const Date_t startDate = _net->now();
- const int numCommandsExpected = rsConfig.getNumMembers() - 1;
- unordered_set<HostAndPort> seenHosts;
- _net->enterNetwork();
- for (int i = 0; i < numCommandsExpected; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) <<
- "Already saw " << request.target.toString();
- if (request.target == HostAndPort("h4", 1)) {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 0 << "mismatch" << true),
- Milliseconds(8))));
- }
- else {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1), Milliseconds(8))));
- }
- }
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- Status status = waitForQuorumCheck();
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
- ASSERT_REASON_CONTAINS(
- status, "Our set name did not match");
- ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
- ASSERT_REASON_CONTAINS(status, "h4:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h5:1");
- }
-
- TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
- // In this test, "we" are host "h3:1". All nodes respond
- // successfully to their heartbeat requests, but quorum check fails because
- // "h5" declares that it is already initialized.
-
- const ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1") <<
- BSON("_id" << 4 << "host" << "h4:1") <<
- BSON("_id" << 5 << "host" << "h5:1"))));
- const int myConfigIndex = 2;
- const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
-
- startQuorumCheck(rsConfig, myConfigIndex);
- const Date_t startDate = _net->now();
- const int numCommandsExpected = rsConfig.getNumMembers() - 1;
- unordered_set<HostAndPort> seenHosts;
- _net->enterNetwork();
- for (int i = 0; i < numCommandsExpected; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) <<
- "Already saw " << request.target.toString();
- if (request.target == HostAndPort("h5", 1)) {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 0 <<
- "set" << "rs0" <<
- "v" << 1),
- Milliseconds(8))));
- }
- else {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1), Milliseconds(8))));
- }
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ Status status = waitForQuorumCheck();
+ ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
+ ASSERT_REASON_CONTAINS(status, "Our set name did not match");
+ ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
+ ASSERT_REASON_CONTAINS(status, "h4:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h5:1");
+}
+
+TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
+ // In this test, "we" are host "h3:1". All nodes respond
+ // successfully to their heartbeat requests, but quorum check fails because
+ // "h5" declares that it is already initialized.
+
+ const ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1") << BSON("_id" << 5 << "host"
+ << "h5:1"))));
+ const int myConfigIndex = 2;
+ const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
+
+ startQuorumCheck(rsConfig, myConfigIndex);
+ const Date_t startDate = _net->now();
+ const int numCommandsExpected = rsConfig.getNumMembers() - 1;
+ unordered_set<HostAndPort> seenHosts;
+ _net->enterNetwork();
+ for (int i = 0; i < numCommandsExpected; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS("admin", request.dbname);
+ ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT(seenHosts.insert(request.target).second) << "Already saw "
+ << request.target.toString();
+ if (request.target == HostAndPort("h5", 1)) {
+ _net->scheduleResponse(noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 0 << "set"
+ << "rs0"
+ << "v" << 1),
+ Milliseconds(8))));
+ } else {
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- Status status = waitForQuorumCheck();
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
- ASSERT_REASON_CONTAINS(
- status, "Our config version of");
- ASSERT_REASON_CONTAINS(
- status, "is no larger than the version");
- ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h4:1");
- ASSERT_REASON_CONTAINS(status, "h5:1");
}
-
- TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespondent) {
- // In this test, "we" are host "h3:1". Only node "h5" responds before the test completes,
- // and quorum check fails because "h5" declares that it is already initialized.
- //
- // Compare to QuorumCheckFailedDueToInitializedNode, above.
-
- const ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1") <<
- BSON("_id" << 4 << "host" << "h4:1") <<
- BSON("_id" << 5 << "host" << "h5:1"))));
- const int myConfigIndex = 2;
- const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
-
- startQuorumCheck(rsConfig, myConfigIndex);
- const Date_t startDate = _net->now();
- const int numCommandsExpected = rsConfig.getNumMembers() - 1;
- unordered_set<HostAndPort> seenHosts;
- _net->enterNetwork();
- for (int i = 0; i < numCommandsExpected; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) <<
- "Already saw " << request.target.toString();
- if (request.target == HostAndPort("h5", 1)) {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 0 <<
- "set" << "rs0" <<
- "v" << 1),
- Milliseconds(8))));
- }
- else {
- _net->blackHole(noi);
- }
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ Status status = waitForQuorumCheck();
+ ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
+ ASSERT_REASON_CONTAINS(status, "Our config version of");
+ ASSERT_REASON_CONTAINS(status, "is no larger than the version");
+ ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h4:1");
+ ASSERT_REASON_CONTAINS(status, "h5:1");
+}
+
+TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespondent) {
+ // In this test, "we" are host "h3:1". Only node "h5" responds before the test completes,
+ // and quorum check fails because "h5" declares that it is already initialized.
+ //
+ // Compare to QuorumCheckFailedDueToInitializedNode, above.
+
+ const ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1") << BSON("_id" << 5 << "host"
+ << "h5:1"))));
+ const int myConfigIndex = 2;
+ const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
+
+ startQuorumCheck(rsConfig, myConfigIndex);
+ const Date_t startDate = _net->now();
+ const int numCommandsExpected = rsConfig.getNumMembers() - 1;
+ unordered_set<HostAndPort> seenHosts;
+ _net->enterNetwork();
+ for (int i = 0; i < numCommandsExpected; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS("admin", request.dbname);
+ ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT(seenHosts.insert(request.target).second) << "Already saw "
+ << request.target.toString();
+ if (request.target == HostAndPort("h5", 1)) {
+ _net->scheduleResponse(noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 0 << "set"
+ << "rs0"
+ << "v" << 1),
+ Milliseconds(8))));
+ } else {
+ _net->blackHole(noi);
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- Status status = waitForQuorumCheck();
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
- ASSERT_REASON_CONTAINS(
- status, "Our config version of");
- ASSERT_REASON_CONTAINS(
- status, "is no larger than the version");
- ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h4:1");
- ASSERT_REASON_CONTAINS(status, "h5:1");
}
-
- TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToNodeWithData) {
- // In this test, "we" are host "h3:1". Only node "h5" responds before the test completes,
- // and quorum check fails because "h5" declares that it has data already.
-
- const ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1") <<
- BSON("_id" << 4 << "host" << "h4:1") <<
- BSON("_id" << 5 << "host" << "h5:1"))));
- const int myConfigIndex = 2;
- const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
-
- startQuorumCheck(rsConfig, myConfigIndex);
- const Date_t startDate = _net->now();
- const int numCommandsExpected = rsConfig.getNumMembers() - 1;
- unordered_set<HostAndPort> seenHosts;
- _net->enterNetwork();
- for (int i = 0; i < numCommandsExpected; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) <<
- "Already saw " << request.target.toString();
- ReplSetHeartbeatResponse hbResp;
- hbResp.setConfigVersion(0);
- hbResp.noteHasData();
- if (request.target == HostAndPort("h5", 1)) {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- hbResp.toBSON(false),
- Milliseconds(8))));
- }
- else {
- _net->blackHole(noi);
- }
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ Status status = waitForQuorumCheck();
+ ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
+ ASSERT_REASON_CONTAINS(status, "Our config version of");
+ ASSERT_REASON_CONTAINS(status, "is no larger than the version");
+ ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h4:1");
+ ASSERT_REASON_CONTAINS(status, "h5:1");
+}
+
+TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToNodeWithData) {
+ // In this test, "we" are host "h3:1". Only node "h5" responds before the test completes,
+ // and quorum check fails because "h5" declares that it has data already.
+
+ const ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1") << BSON("_id" << 5 << "host"
+ << "h5:1"))));
+ const int myConfigIndex = 2;
+ const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
+
+ startQuorumCheck(rsConfig, myConfigIndex);
+ const Date_t startDate = _net->now();
+ const int numCommandsExpected = rsConfig.getNumMembers() - 1;
+ unordered_set<HostAndPort> seenHosts;
+ _net->enterNetwork();
+ for (int i = 0; i < numCommandsExpected; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS("admin", request.dbname);
+ ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT(seenHosts.insert(request.target).second) << "Already saw "
+ << request.target.toString();
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setConfigVersion(0);
+ hbResp.noteHasData();
+ if (request.target == HostAndPort("h5", 1)) {
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(hbResp.toBSON(false), Milliseconds(8))));
+ } else {
+ _net->blackHole(noi);
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- Status status = waitForQuorumCheck();
- ASSERT_EQUALS(ErrorCodes::CannotInitializeNodeWithData, status);
- ASSERT_REASON_CONTAINS(
- status, "has data already");
- ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h4:1");
- ASSERT_REASON_CONTAINS(status, "h5:1");
}
- TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
- // In this test, "we" are host "h3:1". The request to "h2" does not arrive before the end
- // of the test, and the request to "h1" comes back indicating a higher config version.
-
- const ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1"))));
- const int myConfigIndex = 2;
- const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
-
- startQuorumCheck(rsConfig, myConfigIndex);
- const Date_t startDate = _net->now();
- const int numCommandsExpected = rsConfig.getNumMembers() - 1;
- unordered_set<HostAndPort> seenHosts;
- _net->enterNetwork();
- for (int i = 0; i < numCommandsExpected; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) <<
- "Already saw " << request.target.toString();
- if (request.target == HostAndPort("h1", 1)) {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 0 <<
- "set" << "rs0" <<
- "v" << 5),
- Milliseconds(8))));
- }
- else {
- _net->blackHole(noi);
- }
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ Status status = waitForQuorumCheck();
+ ASSERT_EQUALS(ErrorCodes::CannotInitializeNodeWithData, status);
+ ASSERT_REASON_CONTAINS(status, "has data already");
+ ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h4:1");
+ ASSERT_REASON_CONTAINS(status, "h5:1");
+}
+TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
+ // In this test, "we" are host "h3:1". The request to "h2" does not arrive before the end
+ // of the test, and the request to "h1" comes back indicating a higher config version.
+
+ const ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1"))));
+ const int myConfigIndex = 2;
+ const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
+
+ startQuorumCheck(rsConfig, myConfigIndex);
+ const Date_t startDate = _net->now();
+ const int numCommandsExpected = rsConfig.getNumMembers() - 1;
+ unordered_set<HostAndPort> seenHosts;
+ _net->enterNetwork();
+ for (int i = 0; i < numCommandsExpected; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS("admin", request.dbname);
+ ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT(seenHosts.insert(request.target).second) << "Already saw "
+ << request.target.toString();
+ if (request.target == HostAndPort("h1", 1)) {
+ _net->scheduleResponse(noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 0 << "set"
+ << "rs0"
+ << "v" << 5),
+ Milliseconds(8))));
+ } else {
+ _net->blackHole(noi);
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- Status status = waitForQuorumCheck();
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
- ASSERT_REASON_CONTAINS(
- status, "Our config version of");
- ASSERT_REASON_CONTAINS(
- status, "is no larger than the version");
- ASSERT_REASON_CONTAINS(status, "h1:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
}
-
- TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToIncompatibleSetName) {
- // In this test, "we" are host "h3:1". The request to "h1" times out,
- // and the request to "h2" comes back indicating an incompatible set name.
-
- const ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1"))));
- const int myConfigIndex = 2;
- const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
-
- startQuorumCheck(rsConfig, myConfigIndex);
- const Date_t startDate = _net->now();
- const int numCommandsExpected = rsConfig.getNumMembers() - 1;
- unordered_set<HostAndPort> seenHosts;
- _net->enterNetwork();
- for (int i = 0; i < numCommandsExpected; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) <<
- "Already saw " << request.target.toString();
- if (request.target == HostAndPort("h2", 1)) {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 0 << "mismatch" << true),
- Milliseconds(8))));
- }
- else {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
- }
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ Status status = waitForQuorumCheck();
+ ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
+ ASSERT_REASON_CONTAINS(status, "Our config version of");
+ ASSERT_REASON_CONTAINS(status, "is no larger than the version");
+ ASSERT_REASON_CONTAINS(status, "h1:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
+}
+
+TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToIncompatibleSetName) {
+ // In this test, "we" are host "h3:1". The request to "h1" times out,
+ // and the request to "h2" comes back indicating an incompatible set name.
+
+ const ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1"))));
+ const int myConfigIndex = 2;
+ const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
+
+ startQuorumCheck(rsConfig, myConfigIndex);
+ const Date_t startDate = _net->now();
+ const int numCommandsExpected = rsConfig.getNumMembers() - 1;
+ unordered_set<HostAndPort> seenHosts;
+ _net->enterNetwork();
+ for (int i = 0; i < numCommandsExpected; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS("admin", request.dbname);
+ ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT(seenHosts.insert(request.target).second) << "Already saw "
+ << request.target.toString();
+ if (request.target == HostAndPort("h2", 1)) {
+ _net->scheduleResponse(noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(
+ BSON("ok" << 0 << "mismatch" << true), Milliseconds(8))));
+ } else {
+ _net->scheduleResponse(noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- Status status = waitForQuorumCheck();
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
- ASSERT_REASON_CONTAINS(status, "Our set name did not match");
- ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
- ASSERT_REASON_CONTAINS(status, "h2:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
-
}
-
- TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
- // In this test, "we" are host "h4". Only "h1", "h2" and "h3" are voters,
- // and of the voters, only "h1" responds. As a result, quorum check fails.
- // "h5" also responds, but because it cannot vote, is irrelevant for the reconfig
- // quorum check.
-
- const ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1") <<
- BSON("_id" << 4 << "host" << "h4:1" << "votes" << 0) <<
- BSON("_id" << 5 << "host" << "h5:1" << "votes" << 0))));
- const int myConfigIndex = 3;
- const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
-
- startQuorumCheck(rsConfig, myConfigIndex);
- const Date_t startDate = _net->now();
- const int numCommandsExpected = rsConfig.getNumMembers() - 1;
- unordered_set<HostAndPort> seenHosts;
- _net->enterNetwork();
- for (int i = 0; i < numCommandsExpected; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) <<
- "Already saw " << request.target.toString();
- if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h5", 1)) {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1),
- Milliseconds(8))));
- }
- else {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
- }
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ Status status = waitForQuorumCheck();
+ ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
+ ASSERT_REASON_CONTAINS(status, "Our set name did not match");
+ ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
+ ASSERT_REASON_CONTAINS(status, "h2:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
+}
+
+TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
+ // In this test, "we" are host "h4". Only "h1", "h2" and "h3" are voters,
+ // and of the voters, only "h1" responds. As a result, quorum check fails.
+ // "h5" also responds, but because it cannot vote, is irrelevant for the reconfig
+ // quorum check.
+
+ const ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1"
+ << "votes" << 0)
+ << BSON("_id" << 5 << "host"
+ << "h5:1"
+ << "votes" << 0))));
+ const int myConfigIndex = 3;
+ const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
+
+ startQuorumCheck(rsConfig, myConfigIndex);
+ const Date_t startDate = _net->now();
+ const int numCommandsExpected = rsConfig.getNumMembers() - 1;
+ unordered_set<HostAndPort> seenHosts;
+ _net->enterNetwork();
+ for (int i = 0; i < numCommandsExpected; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS("admin", request.dbname);
+ ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT(seenHosts.insert(request.target).second) << "Already saw "
+ << request.target.toString();
+ if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h5", 1)) {
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(8))));
+ } else {
+ _net->scheduleResponse(noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- Status status = waitForQuorumCheck();
- ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
- ASSERT_REASON_CONTAINS(status, "not enough voting nodes responded; required 2 but only");
- ASSERT_REASON_CONTAINS(status, "h1:1");
- ASSERT_REASON_CONTAINS(status, "h2:1 failed with");
- ASSERT_REASON_CONTAINS(status, "h3:1 failed with");
- ASSERT_NOT_REASON_CONTAINS(status, "h4:1");
- ASSERT_NOT_REASON_CONTAINS(status, "h5:1");
}
-
- TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
- // In this test, "we" are host "h4". Only "h1", "h2" and "h3" are electable,
- // and none of them respond.
-
- const ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1") <<
- BSON("_id" << 4 << "host" << "h4:1" << "priority" << 0) <<
- BSON("_id" << 5 << "host" << "h5:1" << "priority" << 0))));
- const int myConfigIndex = 3;
- const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
-
- startQuorumCheck(rsConfig, myConfigIndex);
- const Date_t startDate = _net->now();
- const int numCommandsExpected = rsConfig.getNumMembers() - 1;
- unordered_set<HostAndPort> seenHosts;
- _net->enterNetwork();
- for (int i = 0; i < numCommandsExpected; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) <<
- "Already saw " << request.target.toString();
- if (request.target == HostAndPort("h5", 1)) {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1),
- Milliseconds(8))));
- }
- else {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
- }
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ Status status = waitForQuorumCheck();
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
+ ASSERT_REASON_CONTAINS(status, "not enough voting nodes responded; required 2 but only");
+ ASSERT_REASON_CONTAINS(status, "h1:1");
+ ASSERT_REASON_CONTAINS(status, "h2:1 failed with");
+ ASSERT_REASON_CONTAINS(status, "h3:1 failed with");
+ ASSERT_NOT_REASON_CONTAINS(status, "h4:1");
+ ASSERT_NOT_REASON_CONTAINS(status, "h5:1");
+}
+
+TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
+ // In this test, "we" are host "h4". Only "h1", "h2" and "h3" are electable,
+ // and none of them respond.
+
+ const ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1"
+ << "priority" << 0)
+ << BSON("_id" << 5 << "host"
+ << "h5:1"
+ << "priority" << 0))));
+ const int myConfigIndex = 3;
+ const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
+
+ startQuorumCheck(rsConfig, myConfigIndex);
+ const Date_t startDate = _net->now();
+ const int numCommandsExpected = rsConfig.getNumMembers() - 1;
+ unordered_set<HostAndPort> seenHosts;
+ _net->enterNetwork();
+ for (int i = 0; i < numCommandsExpected; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS("admin", request.dbname);
+ ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT(seenHosts.insert(request.target).second) << "Already saw "
+ << request.target.toString();
+ if (request.target == HostAndPort("h5", 1)) {
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(8))));
+ } else {
+ _net->scheduleResponse(noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- Status status = waitForQuorumCheck();
- ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
- ASSERT_REASON_CONTAINS(status, "no electable nodes responded");
}
-
- TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
- // In this test, "we" are host "h4". Only "h1", "h2" and "h3" can vote.
- // This test should succeed as soon as h1 and h2 respond, so we block
- // h3 and h5 from responding or timing out until the test completes.
-
- const ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1") <<
- BSON("_id" << 4 << "host" << "h4:1" << "votes" << 0) <<
- BSON("_id" << 5 << "host" << "h5:1" << "votes" << 0))));
- const int myConfigIndex = 3;
- const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
-
- startQuorumCheck(rsConfig, myConfigIndex);
- const Date_t startDate = _net->now();
- const int numCommandsExpected = rsConfig.getNumMembers() - 1;
- unordered_set<HostAndPort> seenHosts;
- _net->enterNetwork();
- for (int i = 0; i < numCommandsExpected; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
- ASSERT(seenHosts.insert(request.target).second) <<
- "Already saw " << request.target.toString();
- if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h2", 1)) {
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1),
- Milliseconds(8))));
- }
- else {
- _net->blackHole(noi);
- }
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ Status status = waitForQuorumCheck();
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
+ ASSERT_REASON_CONTAINS(status, "no electable nodes responded");
+}
+
+TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
+ // In this test, "we" are host "h4". Only "h1", "h2" and "h3" can vote.
+ // This test should succeed as soon as h1 and h2 respond, so we block
+ // h3 and h5 from responding or timing out until the test completes.
+
+ const ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1"
+ << "votes" << 0)
+ << BSON("_id" << 5 << "host"
+ << "h5:1"
+ << "votes" << 0))));
+ const int myConfigIndex = 3;
+ const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
+
+ startQuorumCheck(rsConfig, myConfigIndex);
+ const Date_t startDate = _net->now();
+ const int numCommandsExpected = rsConfig.getNumMembers() - 1;
+ unordered_set<HostAndPort> seenHosts;
+ _net->enterNetwork();
+ for (int i = 0; i < numCommandsExpected; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS("admin", request.dbname);
+ ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT(seenHosts.insert(request.target).second) << "Already saw "
+ << request.target.toString();
+ if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h2", 1)) {
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(8))));
+ } else {
+ _net->blackHole(noi);
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_OK(waitForQuorumCheck());
}
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_OK(waitForQuorumCheck());
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 5abe3c2ed84..1f71fe762e7 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -39,273 +39,269 @@
namespace mongo {
namespace repl {
- CollectionCloner::CollectionCloner(ReplicationExecutor* executor,
- const HostAndPort& source,
- const NamespaceString& sourceNss,
- const CollectionOptions& options,
- const CallbackFn& onCompletion,
- StorageInterface* storageInterface)
- : _executor(executor),
- _source(source),
- _sourceNss(sourceNss),
- _destNss(_sourceNss),
- _options(options),
- _onCompletion(onCompletion),
- _storageInterface(storageInterface),
- _active(false),
- _listIndexesFetcher(_executor,
- _source,
- _sourceNss.db().toString(),
- BSON("listIndexes" << _sourceNss.coll()),
- stdx::bind(&CollectionCloner::_listIndexesCallback,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2,
- stdx::placeholders::_3)),
- _findFetcher(_executor,
- _source,
- _sourceNss.db().toString(),
- BSON("find" << _sourceNss.coll() <<
- "noCursorTimeout" << true), // SERVER-1387
- stdx::bind(&CollectionCloner::_findCallback,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2,
- stdx::placeholders::_3)),
- _indexSpecs(),
- _documents(),
- _dbWorkCallbackHandle(),
- _scheduleDbWorkFn([this](const ReplicationExecutor::CallbackFn& work) {
- return _executor->scheduleDBWork(work);
- }) {
-
- uassert(ErrorCodes::BadValue, "null replication executor", executor);
- uassert(ErrorCodes::BadValue, "invalid collection namespace: " + sourceNss.ns(),
- sourceNss.isValid());
- uassertStatusOK(options.validate());
- uassert(ErrorCodes::BadValue, "callback function cannot be null", onCompletion);
- uassert(ErrorCodes::BadValue, "null storage interface", storageInterface);
+CollectionCloner::CollectionCloner(ReplicationExecutor* executor,
+ const HostAndPort& source,
+ const NamespaceString& sourceNss,
+ const CollectionOptions& options,
+ const CallbackFn& onCompletion,
+ StorageInterface* storageInterface)
+ : _executor(executor),
+ _source(source),
+ _sourceNss(sourceNss),
+ _destNss(_sourceNss),
+ _options(options),
+ _onCompletion(onCompletion),
+ _storageInterface(storageInterface),
+ _active(false),
+ _listIndexesFetcher(_executor,
+ _source,
+ _sourceNss.db().toString(),
+ BSON("listIndexes" << _sourceNss.coll()),
+ stdx::bind(&CollectionCloner::_listIndexesCallback,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ stdx::placeholders::_3)),
+ _findFetcher(_executor,
+ _source,
+ _sourceNss.db().toString(),
+ BSON("find" << _sourceNss.coll() << "noCursorTimeout" << true), // SERVER-1387
+ stdx::bind(&CollectionCloner::_findCallback,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ stdx::placeholders::_3)),
+ _indexSpecs(),
+ _documents(),
+ _dbWorkCallbackHandle(),
+ _scheduleDbWorkFn([this](const ReplicationExecutor::CallbackFn& work) {
+ return _executor->scheduleDBWork(work);
+ }) {
+ uassert(ErrorCodes::BadValue, "null replication executor", executor);
+ uassert(ErrorCodes::BadValue,
+ "invalid collection namespace: " + sourceNss.ns(),
+ sourceNss.isValid());
+ uassertStatusOK(options.validate());
+ uassert(ErrorCodes::BadValue, "callback function cannot be null", onCompletion);
+ uassert(ErrorCodes::BadValue, "null storage interface", storageInterface);
+}
+
+CollectionCloner::~CollectionCloner() {
+ DESTRUCTOR_GUARD(cancel(); wait(););
+}
+
+const NamespaceString& CollectionCloner::getSourceNamespace() const {
+ return _sourceNss;
+}
+
+std::string CollectionCloner::getDiagnosticString() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ str::stream output;
+ output << "CollectionCloner";
+ output << " executor: " << _executor->getDiagnosticString();
+ output << " source: " << _source.toString();
+ output << " source namespace: " << _sourceNss.toString();
+ output << " destination namespace: " << _destNss.toString();
+ output << " collection options: " << _options.toBSON();
+ output << " active: " << _active;
+ output << " listIndexes fetcher: " << _listIndexesFetcher.getDiagnosticString();
+ output << " find fetcher: " << _findFetcher.getDiagnosticString();
+ output << " database worked callback handle: " << (_dbWorkCallbackHandle.isValid() ? "valid"
+ : "invalid");
+ return output;
+}
+
+bool CollectionCloner::isActive() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _active;
+}
+
+Status CollectionCloner::start() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+
+ if (_active) {
+ return Status(ErrorCodes::IllegalOperation, "collection cloner already started");
}
- CollectionCloner::~CollectionCloner() {
- DESTRUCTOR_GUARD(
- cancel();
- wait();
- );
+ Status scheduleResult = _listIndexesFetcher.schedule();
+ if (!scheduleResult.isOK()) {
+ return scheduleResult;
}
- const NamespaceString& CollectionCloner::getSourceNamespace() const {
- return _sourceNss;
- }
+ _active = true;
- std::string CollectionCloner::getDiagnosticString() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- str::stream output;
- output << "CollectionCloner";
- output << " executor: " << _executor->getDiagnosticString();
- output << " source: " << _source.toString();
- output << " source namespace: " << _sourceNss.toString();
- output << " destination namespace: " << _destNss.toString();
- output << " collection options: " << _options.toBSON();
- output << " active: " << _active;
- output << " listIndexes fetcher: " << _listIndexesFetcher.getDiagnosticString();
- output << " find fetcher: " << _findFetcher.getDiagnosticString();
- output << " database worked callback handle: "
- << (_dbWorkCallbackHandle.isValid() ? "valid" : "invalid");
- return output;
- }
+ return Status::OK();
+}
- bool CollectionCloner::isActive() const {
+void CollectionCloner::cancel() {
+ ReplicationExecutor::CallbackHandle dbWorkCallbackHandle;
+ {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _active;
- }
- Status CollectionCloner::start() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
-
- if (_active) {
- return Status(ErrorCodes::IllegalOperation, "collection cloner already started");
+ if (!_active) {
+ return;
}
- Status scheduleResult = _listIndexesFetcher.schedule();
- if (!scheduleResult.isOK()) {
- return scheduleResult;
- }
+ dbWorkCallbackHandle = _dbWorkCallbackHandle;
+ }
- _active = true;
+ _listIndexesFetcher.cancel();
+ _findFetcher.cancel();
- return Status::OK();
+ if (dbWorkCallbackHandle.isValid()) {
+ _executor->cancel(dbWorkCallbackHandle);
}
+}
- void CollectionCloner::cancel() {
- ReplicationExecutor::CallbackHandle dbWorkCallbackHandle;
- {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+void CollectionCloner::wait() {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ _condition.wait(lk, [this]() { return !_active; });
+}
- if (!_active) {
- return;
- }
+void CollectionCloner::waitForDbWorker() {
+ ReplicationExecutor::CallbackHandle dbWorkCallbackHandle;
+ {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
- dbWorkCallbackHandle = _dbWorkCallbackHandle;
+ if (!_active) {
+ return;
}
- _listIndexesFetcher.cancel();
- _findFetcher.cancel();
-
- if (dbWorkCallbackHandle.isValid()) {
- _executor->cancel(dbWorkCallbackHandle);
- }
+ dbWorkCallbackHandle = _dbWorkCallbackHandle;
}
- void CollectionCloner::wait() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- _condition.wait(lk, [this]() { return !_active; });
+ if (dbWorkCallbackHandle.isValid()) {
+ _executor->wait(dbWorkCallbackHandle);
}
+}
- void CollectionCloner::waitForDbWorker() {
- ReplicationExecutor::CallbackHandle dbWorkCallbackHandle;
- {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
-
- if (!_active) {
- return;
- }
+void CollectionCloner::setScheduleDbWorkFn(const ScheduleDbWorkFn& scheduleDbWorkFn) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
- dbWorkCallbackHandle = _dbWorkCallbackHandle;
- }
+ _scheduleDbWorkFn = scheduleDbWorkFn;
+}
- if (dbWorkCallbackHandle.isValid()) {
- _executor->wait(dbWorkCallbackHandle);
- }
+void CollectionCloner::_listIndexesCallback(const Fetcher::QueryResponseStatus& fetchResult,
+ Fetcher::NextAction* nextAction,
+ BSONObjBuilder* getMoreBob) {
+ if (!fetchResult.isOK()) {
+ _finishCallback(nullptr, fetchResult.getStatus());
+ return;
}
- void CollectionCloner::setScheduleDbWorkFn(const ScheduleDbWorkFn& scheduleDbWorkFn) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ auto batchData(fetchResult.getValue());
+ auto&& documents = batchData.documents;
- _scheduleDbWorkFn = scheduleDbWorkFn;
+ if (documents.empty()) {
+ warning() << "No indexes found for collection " << _sourceNss.ns() << " while cloning from "
+ << _source;
}
- void CollectionCloner::_listIndexesCallback(const Fetcher::QueryResponseStatus& fetchResult,
- Fetcher::NextAction* nextAction,
- BSONObjBuilder* getMoreBob) {
- if (!fetchResult.isOK()) {
- _finishCallback(nullptr, fetchResult.getStatus());
- return;
- }
-
- auto batchData(fetchResult.getValue());
- auto&& documents = batchData.documents;
-
- if (documents.empty()) {
- warning() << "No indexes found for collection " << _sourceNss.ns()
- << " while cloning from " << _source;
- }
-
- // We may be called with multiple batches leading to a need to grow _indexSpecs.
- _indexSpecs.reserve(_indexSpecs.size() + documents.size());
- _indexSpecs.insert(_indexSpecs.end(), documents.begin(), documents.end());
-
- // The fetcher will continue to call with kGetMore until an error or the last batch.
- if (*nextAction == Fetcher::NextAction::kGetMore) {
- invariant(getMoreBob);
- getMoreBob->append("getMore", batchData.cursorId);
- getMoreBob->append("collection", batchData.nss.coll());
- return;
- }
-
- // We have all of the indexes now, so we can start cloning the collection data.
- auto&& scheduleResult = _scheduleDbWorkFn(
- stdx::bind(&CollectionCloner::_beginCollectionCallback, this, stdx::placeholders::_1));
- if (!scheduleResult.isOK()) {
- _finishCallback(nullptr, scheduleResult.getStatus());
- return;
- }
+ // We may be called with multiple batches leading to a need to grow _indexSpecs.
+ _indexSpecs.reserve(_indexSpecs.size() + documents.size());
+ _indexSpecs.insert(_indexSpecs.end(), documents.begin(), documents.end());
- _dbWorkCallbackHandle = scheduleResult.getValue();
+ // The fetcher will continue to call with kGetMore until an error or the last batch.
+ if (*nextAction == Fetcher::NextAction::kGetMore) {
+ invariant(getMoreBob);
+ getMoreBob->append("getMore", batchData.cursorId);
+ getMoreBob->append("collection", batchData.nss.coll());
+ return;
}
- void CollectionCloner::_findCallback(const StatusWith<Fetcher::QueryResponse>& fetchResult,
- Fetcher::NextAction* nextAction,
- BSONObjBuilder* getMoreBob) {
- if (!fetchResult.isOK()) {
- _finishCallback(nullptr, fetchResult.getStatus());
- return;
- }
+ // We have all of the indexes now, so we can start cloning the collection data.
+ auto&& scheduleResult = _scheduleDbWorkFn(
+ stdx::bind(&CollectionCloner::_beginCollectionCallback, this, stdx::placeholders::_1));
+ if (!scheduleResult.isOK()) {
+ _finishCallback(nullptr, scheduleResult.getStatus());
+ return;
+ }
- auto batchData(fetchResult.getValue());
- _documents = batchData.documents;
+ _dbWorkCallbackHandle = scheduleResult.getValue();
+}
- bool lastBatch = *nextAction == Fetcher::NextAction::kNoAction;
- auto&& scheduleResult = _scheduleDbWorkFn(stdx::bind(
- &CollectionCloner::_insertDocumentsCallback, this, stdx::placeholders::_1, lastBatch));
- if (!scheduleResult.isOK()) {
- _finishCallback(nullptr, scheduleResult.getStatus());
- return;
- }
+void CollectionCloner::_findCallback(const StatusWith<Fetcher::QueryResponse>& fetchResult,
+ Fetcher::NextAction* nextAction,
+ BSONObjBuilder* getMoreBob) {
+ if (!fetchResult.isOK()) {
+ _finishCallback(nullptr, fetchResult.getStatus());
+ return;
+ }
- if (*nextAction == Fetcher::NextAction::kGetMore) {
- invariant(getMoreBob);
- getMoreBob->append("getMore", batchData.cursorId);
- getMoreBob->append("collection", batchData.nss.coll());
- }
+ auto batchData(fetchResult.getValue());
+ _documents = batchData.documents;
- _dbWorkCallbackHandle = scheduleResult.getValue();
+ bool lastBatch = *nextAction == Fetcher::NextAction::kNoAction;
+ auto&& scheduleResult = _scheduleDbWorkFn(stdx::bind(
+ &CollectionCloner::_insertDocumentsCallback, this, stdx::placeholders::_1, lastBatch));
+ if (!scheduleResult.isOK()) {
+ _finishCallback(nullptr, scheduleResult.getStatus());
+ return;
}
- void CollectionCloner::_beginCollectionCallback(const ReplicationExecutor::CallbackArgs& cbd) {
- OperationContext* txn = cbd.txn;
- if (!cbd.status.isOK()) {
- _finishCallback(txn, cbd.status);
- return;
- }
+ if (*nextAction == Fetcher::NextAction::kGetMore) {
+ invariant(getMoreBob);
+ getMoreBob->append("getMore", batchData.cursorId);
+ getMoreBob->append("collection", batchData.nss.coll());
+ }
- Status status = _storageInterface->beginCollection(txn, _destNss, _options, _indexSpecs);
- if (!status.isOK()) {
- _finishCallback(txn, status);
- return;
- }
+ _dbWorkCallbackHandle = scheduleResult.getValue();
+}
- Status scheduleStatus = _findFetcher.schedule();
- if (!scheduleStatus.isOK()) {
- _finishCallback(txn, scheduleStatus);
- return;
- }
+void CollectionCloner::_beginCollectionCallback(const ReplicationExecutor::CallbackArgs& cbd) {
+ OperationContext* txn = cbd.txn;
+ if (!cbd.status.isOK()) {
+ _finishCallback(txn, cbd.status);
+ return;
}
- void CollectionCloner::_insertDocumentsCallback(const ReplicationExecutor::CallbackArgs& cbd,
- bool lastBatch) {
- OperationContext* txn = cbd.txn;
- if (!cbd.status.isOK()) {
- _finishCallback(txn, cbd.status);
- return;
- }
+ Status status = _storageInterface->beginCollection(txn, _destNss, _options, _indexSpecs);
+ if (!status.isOK()) {
+ _finishCallback(txn, status);
+ return;
+ }
- Status status = _storageInterface->insertDocuments(txn, _destNss, _documents);
- if (!status.isOK()) {
- _finishCallback(txn, status);
- return;
- }
+ Status scheduleStatus = _findFetcher.schedule();
+ if (!scheduleStatus.isOK()) {
+ _finishCallback(txn, scheduleStatus);
+ return;
+ }
+}
+
+void CollectionCloner::_insertDocumentsCallback(const ReplicationExecutor::CallbackArgs& cbd,
+ bool lastBatch) {
+ OperationContext* txn = cbd.txn;
+ if (!cbd.status.isOK()) {
+ _finishCallback(txn, cbd.status);
+ return;
+ }
- if (!lastBatch) {
- return;
- }
+ Status status = _storageInterface->insertDocuments(txn, _destNss, _documents);
+ if (!status.isOK()) {
+ _finishCallback(txn, status);
+ return;
+ }
- _finishCallback(txn, Status::OK());
+ if (!lastBatch) {
+ return;
}
- void CollectionCloner::_finishCallback(OperationContext* txn, const Status& status) {
- if (status.isOK()) {
- auto commitStatus = _storageInterface->commitCollection(txn, _destNss);
- if (!commitStatus.isOK()) {
- warning() << "Failed to commit changes to collection " << _destNss.ns()
- << ": " << commitStatus;
- }
+ _finishCallback(txn, Status::OK());
+}
+
+void CollectionCloner::_finishCallback(OperationContext* txn, const Status& status) {
+ if (status.isOK()) {
+ auto commitStatus = _storageInterface->commitCollection(txn, _destNss);
+ if (!commitStatus.isOK()) {
+ warning() << "Failed to commit changes to collection " << _destNss.ns() << ": "
+ << commitStatus;
}
- _onCompletion(status);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- _active = false;
- _condition.notify_all();
}
-
-} // namespace repl
-} // namespace mongo
+ _onCompletion(status);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _active = false;
+ _condition.notify_all();
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/collection_cloner.h b/src/mongo/db/repl/collection_cloner.h
index 69f3caa1f18..cf69d7f44ef 100644
--- a/src/mongo/db/repl/collection_cloner.h
+++ b/src/mongo/db/repl/collection_cloner.h
@@ -48,217 +48,212 @@
namespace mongo {
namespace repl {
- class CollectionCloner : public BaseCloner {
- MONGO_DISALLOW_COPYING(CollectionCloner);
- public:
-
- /**
- * Storage interface for collection cloner.
- *
- * Supports the operations on the storage layer required by the cloner.
- */
- class StorageInterface;
-
- /**
- * Type of function to schedule database work with the executor.
- *
- * Must be consistent with ReplicationExecutor::scheduleWorkWithGlobalExclusiveLock().
- *
- * Used for testing only.
- */
- using ScheduleDbWorkFn = stdx::function<StatusWith<ReplicationExecutor::CallbackHandle> (
- const ReplicationExecutor::CallbackFn&)>;
-
- /**
- * Creates CollectionCloner task in inactive state. Use start() to activate cloner.
- *
- * The cloner calls 'onCompletion' when the collection cloning has completed or failed.
- *
- * 'onCompletion' will be called exactly once.
- *
- * Takes ownership of the passed StorageInterface object.
- */
- CollectionCloner(ReplicationExecutor* executor,
- const HostAndPort& source,
- const NamespaceString& sourceNss,
- const CollectionOptions& options,
- const CallbackFn& onCompletion,
- StorageInterface* storageInterface);
-
- virtual ~CollectionCloner();
-
- const NamespaceString& getSourceNamespace() const;
-
- std::string getDiagnosticString() const override;
-
- bool isActive() const override;
-
- Status start() override;
-
- void cancel() override;
-
- void wait() override;
-
- //
- // Testing only functions below.
- //
-
- /**
- * Waits for database worker to complete.
- * Returns immediately if collection cloner is not active.
- *
- * For testing only.
- */
- void waitForDbWorker();
-
- /**
- * Overrides how executor schedules database work.
- *
- * For testing only.
- */
- void setScheduleDbWorkFn(const ScheduleDbWorkFn& scheduleDbWorkFn);
-
- private:
-
- /**
- * Read index specs from listIndexes result.
- */
- void _listIndexesCallback(const StatusWith<Fetcher::QueryResponse>& fetchResult,
- Fetcher::NextAction* nextAction,
- BSONObjBuilder* getMoreBob);
-
- /**
- * Read collection documents from find result.
- */
- void _findCallback(const StatusWith<Fetcher::QueryResponse>& fetchResult,
- Fetcher::NextAction* nextAction,
- BSONObjBuilder* getMoreBob);
-
- /**
- * Request storage interface to create collection.
- *
- * Called multiple times if there are more than one batch of responses from listIndexes
- * cursor.
- *
- * 'nextAction' is an in/out arg indicating the next action planned and to be taken
- * by the fetcher.
- */
- void _beginCollectionCallback(const ReplicationExecutor::CallbackArgs& callbackData);
-
- /**
- * Called multiple times if there are more than one batch of documents from the fetcher.
- * On the last batch, 'lastBatch' will be true.
- *
- * Each document returned will be inserted via the storage interfaceRequest storage
- * interface.
- */
- void _insertDocumentsCallback(const ReplicationExecutor::CallbackArgs& callbackData,
- bool lastBatch);
-
- /**
- * Reports completion status.
- * Commits/aborts collection building.
- * Sets cloner to inactive.
- */
- void _finishCallback(OperationContext* txn, const Status& status);
-
- // Not owned by us.
- ReplicationExecutor* _executor;
-
- HostAndPort _source;
- NamespaceString _sourceNss;
- NamespaceString _destNss;
- CollectionOptions _options;
-
- // Invoked once when cloning completes or fails.
- CallbackFn _onCompletion;
-
- // Not owned by us.
- StorageInterface* _storageInterface;
-
- // Protects member data of this collection cloner.
- mutable stdx::mutex _mutex;
+class CollectionCloner : public BaseCloner {
+ MONGO_DISALLOW_COPYING(CollectionCloner);
- mutable stdx::condition_variable _condition;
+public:
+ /**
+ * Storage interface for collection cloner.
+ *
+ * Supports the operations on the storage layer required by the cloner.
+ */
+ class StorageInterface;
+
+ /**
+ * Type of function to schedule database work with the executor.
+ *
+ * Must be consistent with ReplicationExecutor::scheduleWorkWithGlobalExclusiveLock().
+ *
+ * Used for testing only.
+ */
+ using ScheduleDbWorkFn = stdx::function<StatusWith<ReplicationExecutor::CallbackHandle>(
+ const ReplicationExecutor::CallbackFn&)>;
+
+ /**
+ * Creates CollectionCloner task in inactive state. Use start() to activate cloner.
+ *
+ * The cloner calls 'onCompletion' when the collection cloning has completed or failed.
+ *
+ * 'onCompletion' will be called exactly once.
+ *
+ * Takes ownership of the passed StorageInterface object.
+ */
+ CollectionCloner(ReplicationExecutor* executor,
+ const HostAndPort& source,
+ const NamespaceString& sourceNss,
+ const CollectionOptions& options,
+ const CallbackFn& onCompletion,
+ StorageInterface* storageInterface);
+
+ virtual ~CollectionCloner();
+
+ const NamespaceString& getSourceNamespace() const;
+
+ std::string getDiagnosticString() const override;
+
+ bool isActive() const override;
+
+ Status start() override;
- // _active is true when Collection Cloner is started.
- bool _active;
+ void cancel() override;
- // Fetcher instances for running listIndexes and find commands.
- Fetcher _listIndexesFetcher;
- Fetcher _findFetcher;
+ void wait() override;
- std::vector<BSONObj> _indexSpecs;
-
- // Current batch of documents read from fetcher to insert into collection.
- std::vector<BSONObj> _documents;
-
- // Callback handle for database worker.
- ReplicationExecutor::CallbackHandle _dbWorkCallbackHandle;
-
- // Function for scheduling database work using the executor.
- ScheduleDbWorkFn _scheduleDbWorkFn;
-
- };
+ //
+ // Testing only functions below.
+ //
/**
- * Storage interface used by the collection cloner to build a collection.
+ * Waits for database worker to complete.
+ * Returns immediately if collection cloner is not active.
*
- * Operation context is provided by the replication executor via the cloner.
+ * For testing only.
+ */
+ void waitForDbWorker();
+
+ /**
+ * Overrides how executor schedules database work.
*
- * The storage interface is expected to acquire locks on any resources it needs
- * to perform any of its functions.
+ * For testing only.
+ */
+ void setScheduleDbWorkFn(const ScheduleDbWorkFn& scheduleDbWorkFn);
+
+private:
+ /**
+ * Read index specs from listIndexes result.
+ */
+ void _listIndexesCallback(const StatusWith<Fetcher::QueryResponse>& fetchResult,
+ Fetcher::NextAction* nextAction,
+ BSONObjBuilder* getMoreBob);
+
+ /**
+ * Read collection documents from find result.
+ */
+ void _findCallback(const StatusWith<Fetcher::QueryResponse>& fetchResult,
+ Fetcher::NextAction* nextAction,
+ BSONObjBuilder* getMoreBob);
+
+ /**
+ * Request storage interface to create collection.
+ *
+ * Called multiple times if there are more than one batch of responses from listIndexes
+ * cursor.
*
- * TODO: Consider having commit/abort/cancel functions.
+ * 'nextAction' is an in/out arg indicating the next action planned and to be taken
+ * by the fetcher.
*/
- class CollectionCloner::StorageInterface {
- public:
-
- virtual ~StorageInterface() = default;
-
- /**
- * Creates a collection with the provided indexes.
- *
- * Assume that no database locks have been acquired prior to calling this
- * function.
- */
- virtual Status beginCollection(OperationContext* txn,
- const NamespaceString& nss,
- const CollectionOptions& options,
- const std::vector<BSONObj>& indexSpecs) = 0;
-
- /**
- * Inserts documents into a collection.
- *
- * Assume that no database locks have been acquired prior to calling this
- * function.
- */
- virtual Status insertDocuments(OperationContext* txn,
- const NamespaceString& nss,
- const std::vector<BSONObj>& documents) = 0;
-
- /**
- * Commits changes to collection. No effect if collection building has not begun.
- * Operation context could be null.
- */
- virtual Status commitCollection(OperationContext* txn,
- const NamespaceString& nss) = 0;
-
- /**
- * Inserts missing document into a collection (not related to insertDocuments above),
- * during initial sync retry logic
- */
- virtual Status insertMissingDoc(OperationContext* txn,
- const NamespaceString& nss,
- const BSONObj& doc) = 0;
-
- /**
- * Inserts missing document into a collection (not related to insertDocuments above),
- * during initial sync retry logic
- */
- virtual Status dropUserDatabases(OperationContext* txn) = 0;
-
- };
-
-} // namespace repl
-} // namespace mongo
+ void _beginCollectionCallback(const ReplicationExecutor::CallbackArgs& callbackData);
+
+ /**
+ * Called multiple times if there are more than one batch of documents from the fetcher.
+ * On the last batch, 'lastBatch' will be true.
+ *
+ * Each document returned will be inserted via the storage interfaceRequest storage
+ * interface.
+ */
+ void _insertDocumentsCallback(const ReplicationExecutor::CallbackArgs& callbackData,
+ bool lastBatch);
+
+ /**
+ * Reports completion status.
+ * Commits/aborts collection building.
+ * Sets cloner to inactive.
+ */
+ void _finishCallback(OperationContext* txn, const Status& status);
+
+ // Not owned by us.
+ ReplicationExecutor* _executor;
+
+ HostAndPort _source;
+ NamespaceString _sourceNss;
+ NamespaceString _destNss;
+ CollectionOptions _options;
+
+ // Invoked once when cloning completes or fails.
+ CallbackFn _onCompletion;
+
+ // Not owned by us.
+ StorageInterface* _storageInterface;
+
+ // Protects member data of this collection cloner.
+ mutable stdx::mutex _mutex;
+
+ mutable stdx::condition_variable _condition;
+
+ // _active is true when Collection Cloner is started.
+ bool _active;
+
+ // Fetcher instances for running listIndexes and find commands.
+ Fetcher _listIndexesFetcher;
+ Fetcher _findFetcher;
+
+ std::vector<BSONObj> _indexSpecs;
+
+ // Current batch of documents read from fetcher to insert into collection.
+ std::vector<BSONObj> _documents;
+
+ // Callback handle for database worker.
+ ReplicationExecutor::CallbackHandle _dbWorkCallbackHandle;
+
+ // Function for scheduling database work using the executor.
+ ScheduleDbWorkFn _scheduleDbWorkFn;
+};
+
+/**
+ * Storage interface used by the collection cloner to build a collection.
+ *
+ * Operation context is provided by the replication executor via the cloner.
+ *
+ * The storage interface is expected to acquire locks on any resources it needs
+ * to perform any of its functions.
+ *
+ * TODO: Consider having commit/abort/cancel functions.
+ */
+class CollectionCloner::StorageInterface {
+public:
+ virtual ~StorageInterface() = default;
+
+ /**
+ * Creates a collection with the provided indexes.
+ *
+ * Assume that no database locks have been acquired prior to calling this
+ * function.
+ */
+ virtual Status beginCollection(OperationContext* txn,
+ const NamespaceString& nss,
+ const CollectionOptions& options,
+ const std::vector<BSONObj>& indexSpecs) = 0;
+
+ /**
+ * Inserts documents into a collection.
+ *
+ * Assume that no database locks have been acquired prior to calling this
+ * function.
+ */
+ virtual Status insertDocuments(OperationContext* txn,
+ const NamespaceString& nss,
+ const std::vector<BSONObj>& documents) = 0;
+
+ /**
+ * Commits changes to collection. No effect if collection building has not begun.
+ * Operation context could be null.
+ */
+ virtual Status commitCollection(OperationContext* txn, const NamespaceString& nss) = 0;
+
+ /**
+ * Inserts missing document into a collection (not related to insertDocuments above),
+ * during initial sync retry logic
+ */
+ virtual Status insertMissingDoc(OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& doc) = 0;
+
+ /**
+ * Inserts missing document into a collection (not related to insertDocuments above),
+ * during initial sync retry logic
+ */
+ virtual Status dropUserDatabases(OperationContext* txn) = 0;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index 4a6c4d2bc03..8e41238ff7c 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -39,451 +39,454 @@
namespace {
- using namespace mongo;
- using namespace mongo::repl;
-
- class CollectionClonerTest : public BaseClonerTest {
- public:
-
- BaseCloner* getCloner() const override;
-
- protected:
-
- void setUp() override;
- void tearDown() override;
-
- CollectionOptions options;
- std::unique_ptr<CollectionCloner> collectionCloner;
- };
-
- void CollectionClonerTest::setUp() {
- BaseClonerTest::setUp();
- options.reset();
- options.storageEngine = BSON("storageEngine1" << BSONObj());
- collectionCloner.reset(new CollectionCloner(&getExecutor(), target, nss, options,
- stdx::bind(&CollectionClonerTest::setStatus,
- this,
- stdx::placeholders::_1),
- storageInterface.get()));
+using namespace mongo;
+using namespace mongo::repl;
+
+class CollectionClonerTest : public BaseClonerTest {
+public:
+ BaseCloner* getCloner() const override;
+
+protected:
+ void setUp() override;
+ void tearDown() override;
+
+ CollectionOptions options;
+ std::unique_ptr<CollectionCloner> collectionCloner;
+};
+
+void CollectionClonerTest::setUp() {
+ BaseClonerTest::setUp();
+ options.reset();
+ options.storageEngine = BSON("storageEngine1" << BSONObj());
+ collectionCloner.reset(new CollectionCloner(
+ &getExecutor(),
+ target,
+ nss,
+ options,
+ stdx::bind(&CollectionClonerTest::setStatus, this, stdx::placeholders::_1),
+ storageInterface.get()));
+}
+
+void CollectionClonerTest::tearDown() {
+ BaseClonerTest::tearDown();
+ // Executor may still invoke collection cloner's callback before shutting down.
+ collectionCloner.reset();
+ options.reset();
+}
+
+BaseCloner* CollectionClonerTest::getCloner() const {
+ return collectionCloner.get();
+}
+
+TEST_F(CollectionClonerTest, InvalidConstruction) {
+ ReplicationExecutor& executor = getExecutor();
+
+ const auto& cb = [](const Status&) { FAIL("should not reach here"); };
+
+ // Null executor.
+ {
+ CollectionCloner::StorageInterface* si = storageInterface.get();
+ ASSERT_THROWS(CollectionCloner(nullptr, target, nss, options, cb, si), UserException);
}
- void CollectionClonerTest::tearDown() {
- BaseClonerTest::tearDown();
- // Executor may still invoke collection cloner's callback before shutting down.
- collectionCloner.reset();
- options.reset();
- }
+ // Null storage interface
+ ASSERT_THROWS(CollectionCloner(&executor, target, nss, options, cb, nullptr), UserException);
- BaseCloner* CollectionClonerTest::getCloner() const {
- return collectionCloner.get();
+ // Invalid namespace.
+ {
+ NamespaceString badNss("db.");
+ CollectionCloner::StorageInterface* si = storageInterface.get();
+ ASSERT_THROWS(CollectionCloner(&executor, target, badNss, options, cb, si), UserException);
}
- TEST_F(CollectionClonerTest, InvalidConstruction) {
- ReplicationExecutor& executor = getExecutor();
-
- const auto& cb = [](const Status&) { FAIL("should not reach here"); };
-
- // Null executor.
- {
- CollectionCloner::StorageInterface* si = storageInterface.get();
- ASSERT_THROWS(CollectionCloner(nullptr, target, nss, options, cb, si), UserException);
- }
-
- // Null storage interface
- ASSERT_THROWS(CollectionCloner(&executor, target, nss, options, cb, nullptr),
+ // Invalid collection options.
+ {
+ CollectionOptions invalidOptions;
+ invalidOptions.storageEngine = BSON("storageEngine1"
+ << "not a document");
+ CollectionCloner::StorageInterface* si = storageInterface.get();
+ ASSERT_THROWS(CollectionCloner(&executor, target, nss, invalidOptions, cb, si),
UserException);
-
- // Invalid namespace.
- {
- NamespaceString badNss("db.");
- CollectionCloner::StorageInterface* si = storageInterface.get();
- ASSERT_THROWS(CollectionCloner(&executor, target, badNss, options, cb, si),
- UserException);
- }
-
- // Invalid collection options.
- {
- CollectionOptions invalidOptions;
- invalidOptions.storageEngine = BSON("storageEngine1" << "not a document");
- CollectionCloner::StorageInterface* si = storageInterface.get();
- ASSERT_THROWS(CollectionCloner(&executor, target, nss, invalidOptions, cb, si),
- UserException);
- }
-
- // Callback function cannot be null.
- {
- CollectionCloner::CallbackFn nullCb;
- CollectionCloner::StorageInterface* si = storageInterface.get();
- ASSERT_THROWS(CollectionCloner(&executor, target, nss, options, nullCb, si),
- UserException);
- }
- }
-
- TEST_F(CollectionClonerTest, ClonerLifeCycle) {
- testLifeCycle();
- }
-
- TEST_F(CollectionClonerTest, FirstRemoteCommand) {
- ASSERT_OK(collectionCloner->start());
-
- auto net = getNet();
- ASSERT_TRUE(net->hasReadyRequests());
- NetworkOperationIterator noi = net->getNextReadyRequest();
- auto&& noiRequest = noi->getRequest();
- ASSERT_EQUALS(nss.db().toString(), noiRequest.dbname);
- ASSERT_EQUALS("listIndexes", std::string(noiRequest.cmdObj.firstElementFieldName()));
- ASSERT_EQUALS(nss.coll().toString(), noiRequest.cmdObj.firstElement().valuestrsafe());
- ASSERT_FALSE(net->hasReadyRequests());
- ASSERT_TRUE(collectionCloner->isActive());
}
- TEST_F(CollectionClonerTest, RemoteCollectionMissing) {
- ASSERT_OK(collectionCloner->start());
-
- processNetworkResponse(
- BSON("ok" << 0 << "errmsg" << "" << "code" << ErrorCodes::NamespaceNotFound));
-
- ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, getStatus().code());
- ASSERT_FALSE(collectionCloner->isActive());
+ // Callback function cannot be null.
+ {
+ CollectionCloner::CallbackFn nullCb;
+ CollectionCloner::StorageInterface* si = storageInterface.get();
+ ASSERT_THROWS(CollectionCloner(&executor, target, nss, options, nullCb, si), UserException);
}
+}
+
+TEST_F(CollectionClonerTest, ClonerLifeCycle) {
+ testLifeCycle();
+}
+
+TEST_F(CollectionClonerTest, FirstRemoteCommand) {
+ ASSERT_OK(collectionCloner->start());
+
+ auto net = getNet();
+ ASSERT_TRUE(net->hasReadyRequests());
+ NetworkOperationIterator noi = net->getNextReadyRequest();
+ auto&& noiRequest = noi->getRequest();
+ ASSERT_EQUALS(nss.db().toString(), noiRequest.dbname);
+ ASSERT_EQUALS("listIndexes", std::string(noiRequest.cmdObj.firstElementFieldName()));
+ ASSERT_EQUALS(nss.coll().toString(), noiRequest.cmdObj.firstElement().valuestrsafe());
+ ASSERT_FALSE(net->hasReadyRequests());
+ ASSERT_TRUE(collectionCloner->isActive());
+}
+
+TEST_F(CollectionClonerTest, RemoteCollectionMissing) {
+ ASSERT_OK(collectionCloner->start());
+
+ processNetworkResponse(BSON("ok" << 0 << "errmsg"
+ << ""
+ << "code" << ErrorCodes::NamespaceNotFound));
+
+ ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, getStatus().code());
+ ASSERT_FALSE(collectionCloner->isActive());
+}
+
+// A collection may have no indexes. The cloner will produce a warning but
+// will still proceed with cloning.
+TEST_F(CollectionClonerTest, ListIndexesReturnedNoIndexes) {
+ ASSERT_OK(collectionCloner->start());
+
+ // Using a non-zero cursor to ensure that
+ // the cloner stops the fetcher from retrieving more results.
+ processNetworkResponse(createListIndexesResponse(1, BSONArray()));
+
+ ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
+ ASSERT_TRUE(collectionCloner->isActive());
+
+ ASSERT_TRUE(getNet()->hasReadyRequests());
+}
+
+TEST_F(CollectionClonerTest, BeginCollectionScheduleDbWorkFailed) {
+ ASSERT_OK(collectionCloner->start());
+
+ // Replace scheduleDbWork function so that cloner will fail to schedule DB work after
+ // getting index specs.
+ collectionCloner->setScheduleDbWorkFn([](const ReplicationExecutor::CallbackFn& workFn) {
+ return StatusWith<ReplicationExecutor::CallbackHandle>(ErrorCodes::UnknownError, "");
+ });
+
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+
+ ASSERT_EQUALS(ErrorCodes::UnknownError, getStatus().code());
+ ASSERT_FALSE(collectionCloner->isActive());
+}
+
+TEST_F(CollectionClonerTest, BeginCollectionCallbackCanceled) {
+ ASSERT_OK(collectionCloner->start());
+
+ // Replace scheduleDbWork function so that the callback for beginCollection is canceled
+ // immediately after scheduling.
+ auto&& executor = getExecutor();
+ collectionCloner->setScheduleDbWorkFn([&](const ReplicationExecutor::CallbackFn& workFn) {
+ // Schedule as non-exclusive task to allow us to cancel it before the executor is able
+ // to invoke the callback.
+ auto scheduleResult = executor.scheduleWork(workFn);
+ ASSERT_OK(scheduleResult.getStatus());
+ executor.cancel(scheduleResult.getValue());
+ return scheduleResult;
+ });
+
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+
+ collectionCloner->waitForDbWorker();
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, getStatus().code());
+ ASSERT_FALSE(collectionCloner->isActive());
+}
+
+TEST_F(CollectionClonerTest, BeginCollectionFailed) {
+ ASSERT_OK(collectionCloner->start());
+
+ storageInterface->beginCollectionFn = [&](OperationContext* txn,
+ const NamespaceString& theNss,
+ const CollectionOptions& theOptions,
+ const std::vector<BSONObj>& theIndexSpecs) {
+ return Status(ErrorCodes::OperationFailed, "");
+ };
- // A collection may have no indexes. The cloner will produce a warning but
- // will still proceed with cloning.
- TEST_F(CollectionClonerTest, ListIndexesReturnedNoIndexes) {
- ASSERT_OK(collectionCloner->start());
-
- // Using a non-zero cursor to ensure that
- // the cloner stops the fetcher from retrieving more results.
- processNetworkResponse(createListIndexesResponse(1, BSONArray()));
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+
+ collectionCloner->waitForDbWorker();
+
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
+ ASSERT_FALSE(collectionCloner->isActive());
+}
+
+TEST_F(CollectionClonerTest, BeginCollection) {
+ ASSERT_OK(collectionCloner->start());
+
+ NamespaceString collNss;
+ CollectionOptions collOptions;
+ std::vector<BSONObj> collIndexSpecs;
+ storageInterface->beginCollectionFn = [&](OperationContext* txn,
+ const NamespaceString& theNss,
+ const CollectionOptions& theOptions,
+ const std::vector<BSONObj>& theIndexSpecs) {
+ ASSERT(txn);
+ collNss = theNss;
+ collOptions = theOptions;
+ collIndexSpecs = theIndexSpecs;
+ return Status::OK();
+ };
- ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
- ASSERT_TRUE(collectionCloner->isActive());
+ // Split listIndexes response into 2 batches: first batch contains specs[0] and specs[1];
+ // second batch contains specs[2]
+ const std::vector<BSONObj> specs = {idIndexSpec,
+ BSON("v" << 1 << "key" << BSON("a" << 1) << "name"
+ << "a_1"
+ << "ns" << nss.ns()),
+ BSON("v" << 1 << "key" << BSON("b" << 1) << "name"
+ << "b_1"
+ << "ns" << nss.ns())};
- ASSERT_TRUE(getNet()->hasReadyRequests());
- }
+ processNetworkResponse(createListIndexesResponse(1, BSON_ARRAY(specs[0] << specs[1])));
- TEST_F(CollectionClonerTest, BeginCollectionScheduleDbWorkFailed) {
- ASSERT_OK(collectionCloner->start());
+ // 'status' should not be modified because cloning is not finished.
+ ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
+ ASSERT_TRUE(collectionCloner->isActive());
- // Replace scheduleDbWork function so that cloner will fail to schedule DB work after
- // getting index specs.
- collectionCloner->setScheduleDbWorkFn([](const ReplicationExecutor::CallbackFn& workFn) {
- return StatusWith<ReplicationExecutor::CallbackHandle>(ErrorCodes::UnknownError, "");
- });
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(specs[2]), "nextBatch"));
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ collectionCloner->waitForDbWorker();
- ASSERT_EQUALS(ErrorCodes::UnknownError, getStatus().code());
- ASSERT_FALSE(collectionCloner->isActive());
- }
+ // 'status' will be set if listIndexes fails.
+ ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
- TEST_F(CollectionClonerTest, BeginCollectionCallbackCanceled) {
- ASSERT_OK(collectionCloner->start());
-
- // Replace scheduleDbWork function so that the callback for beginCollection is canceled
- // immediately after scheduling.
- auto&& executor = getExecutor();
- collectionCloner->setScheduleDbWorkFn([&](const ReplicationExecutor::CallbackFn& workFn) {
- // Schedule as non-exclusive task to allow us to cancel it before the executor is able
- // to invoke the callback.
- auto scheduleResult = executor.scheduleWork(workFn);
- ASSERT_OK(scheduleResult.getStatus());
- executor.cancel(scheduleResult.getValue());
- return scheduleResult;
- });
-
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
-
- collectionCloner->waitForDbWorker();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, getStatus().code());
- ASSERT_FALSE(collectionCloner->isActive());
+ ASSERT_EQUALS(nss.ns(), collNss.ns());
+ ASSERT_EQUALS(options.toBSON(), collOptions.toBSON());
+ ASSERT_EQUALS(specs.size(), collIndexSpecs.size());
+ for (std::vector<BSONObj>::size_type i = 0; i < specs.size(); ++i) {
+ ASSERT_EQUALS(specs[i], collIndexSpecs[i]);
}
- TEST_F(CollectionClonerTest, BeginCollectionFailed) {
- ASSERT_OK(collectionCloner->start());
+ // Cloner is still active because it has to read the documents from the source collection.
+ ASSERT_TRUE(collectionCloner->isActive());
+}
+
+TEST_F(CollectionClonerTest, FindFetcherScheduleFailed) {
+ ASSERT_OK(collectionCloner->start());
+
+ // Shut down executor while in beginCollection callback.
+ // This will cause the fetcher to fail to schedule the find command.
+ bool collectionCreated = false;
+ storageInterface->beginCollectionFn = [&](OperationContext* txn,
+ const NamespaceString& theNss,
+ const CollectionOptions& theOptions,
+ const std::vector<BSONObj>& theIndexSpecs) {
+ collectionCreated = true;
+ getExecutor().shutdown();
+ return Status::OK();
+ };
- storageInterface->beginCollectionFn = [&](OperationContext* txn,
- const NamespaceString& theNss,
- const CollectionOptions& theOptions,
- const std::vector<BSONObj>& theIndexSpecs) {
- return Status(ErrorCodes::OperationFailed, "");
- };
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ collectionCloner->waitForDbWorker();
+ ASSERT_TRUE(collectionCreated);
- collectionCloner->waitForDbWorker();
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, getStatus().code());
+ ASSERT_FALSE(collectionCloner->isActive());
+}
- ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
- ASSERT_FALSE(collectionCloner->isActive());
- }
+TEST_F(CollectionClonerTest, FindCommandAfterBeginCollection) {
+ ASSERT_OK(collectionCloner->start());
- TEST_F(CollectionClonerTest, BeginCollection) {
- ASSERT_OK(collectionCloner->start());
-
- NamespaceString collNss;
- CollectionOptions collOptions;
- std::vector<BSONObj> collIndexSpecs;
- storageInterface->beginCollectionFn = [&](OperationContext* txn,
- const NamespaceString& theNss,
- const CollectionOptions& theOptions,
- const std::vector<BSONObj>& theIndexSpecs) {
- ASSERT(txn);
- collNss = theNss;
- collOptions = theOptions;
- collIndexSpecs = theIndexSpecs;
- return Status::OK();
- };
-
- // Split listIndexes response into 2 batches: first batch contains specs[0] and specs[1];
- // second batch contains specs[2]
- const std::vector<BSONObj> specs = {
- idIndexSpec,
- BSON("v" << 1 << "key" << BSON("a" << 1) << "name" << "a_1" << "ns" << nss.ns()),
- BSON("v" << 1 << "key" << BSON("b" << 1) << "name" << "b_1" << "ns" << nss.ns())};
-
- processNetworkResponse(createListIndexesResponse(1, BSON_ARRAY(specs[0] << specs[1])));
-
- // 'status' should not be modified because cloning is not finished.
- ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
- ASSERT_TRUE(collectionCloner->isActive());
-
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(specs[2]), "nextBatch"));
-
- collectionCloner->waitForDbWorker();
-
- // 'status' will be set if listIndexes fails.
- ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
-
- ASSERT_EQUALS(nss.ns(), collNss.ns());
- ASSERT_EQUALS(options.toBSON(), collOptions.toBSON());
- ASSERT_EQUALS(specs.size(), collIndexSpecs.size());
- for (std::vector<BSONObj>::size_type i = 0; i < specs.size(); ++i) {
- ASSERT_EQUALS(specs[i], collIndexSpecs[i]);
- }
-
- // Cloner is still active because it has to read the documents from the source collection.
- ASSERT_TRUE(collectionCloner->isActive());
- }
+ bool collectionCreated = false;
+ storageInterface->beginCollectionFn = [&](OperationContext* txn,
+ const NamespaceString& theNss,
+ const CollectionOptions& theOptions,
+ const std::vector<BSONObj>& theIndexSpecs) {
+ collectionCreated = true;
+ return Status::OK();
+ };
- TEST_F(CollectionClonerTest, FindFetcherScheduleFailed) {
- ASSERT_OK(collectionCloner->start());
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- // Shut down executor while in beginCollection callback.
- // This will cause the fetcher to fail to schedule the find command.
- bool collectionCreated = false;
- storageInterface->beginCollectionFn = [&](OperationContext* txn,
- const NamespaceString& theNss,
- const CollectionOptions& theOptions,
- const std::vector<BSONObj>& theIndexSpecs) {
- collectionCreated = true;
- getExecutor().shutdown();
- return Status::OK();
- };
+ collectionCloner->waitForDbWorker();
+ ASSERT_TRUE(collectionCreated);
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ // Fetcher should be scheduled after cloner creates collection.
+ auto net = getNet();
+ ASSERT_TRUE(net->hasReadyRequests());
+ NetworkOperationIterator noi = net->getNextReadyRequest();
+ auto&& noiRequest = noi->getRequest();
+ ASSERT_EQUALS(nss.db().toString(), noiRequest.dbname);
+ ASSERT_EQUALS("find", std::string(noiRequest.cmdObj.firstElementFieldName()));
+ ASSERT_EQUALS(nss.coll().toString(), noiRequest.cmdObj.firstElement().valuestrsafe());
+ ASSERT_TRUE(noiRequest.cmdObj.getField("noCursorTimeout").trueValue());
+ ASSERT_FALSE(net->hasReadyRequests());
+}
- collectionCloner->waitForDbWorker();
- ASSERT_TRUE(collectionCreated);
+TEST_F(CollectionClonerTest, FindCommandFailed) {
+ ASSERT_OK(collectionCloner->start());
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, getStatus().code());
- ASSERT_FALSE(collectionCloner->isActive());
- }
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- TEST_F(CollectionClonerTest, FindCommandAfterBeginCollection) {
- ASSERT_OK(collectionCloner->start());
-
- bool collectionCreated = false;
- storageInterface->beginCollectionFn = [&](OperationContext* txn,
- const NamespaceString& theNss,
- const CollectionOptions& theOptions,
- const std::vector<BSONObj>& theIndexSpecs) {
- collectionCreated = true;
- return Status::OK();
- };
-
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
-
- collectionCloner->waitForDbWorker();
- ASSERT_TRUE(collectionCreated);
-
- // Fetcher should be scheduled after cloner creates collection.
- auto net = getNet();
- ASSERT_TRUE(net->hasReadyRequests());
- NetworkOperationIterator noi = net->getNextReadyRequest();
- auto&& noiRequest = noi->getRequest();
- ASSERT_EQUALS(nss.db().toString(), noiRequest.dbname);
- ASSERT_EQUALS("find", std::string(noiRequest.cmdObj.firstElementFieldName()));
- ASSERT_EQUALS(nss.coll().toString(), noiRequest.cmdObj.firstElement().valuestrsafe());
- ASSERT_TRUE(noiRequest.cmdObj.getField("noCursorTimeout").trueValue());
- ASSERT_FALSE(net->hasReadyRequests());
- }
+ collectionCloner->waitForDbWorker();
- TEST_F(CollectionClonerTest, FindCommandFailed) {
- ASSERT_OK(collectionCloner->start());
+ processNetworkResponse(BSON("ok" << 0 << "errmsg"
+ << ""
+ << "code" << ErrorCodes::CursorNotFound));
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ ASSERT_EQUALS(ErrorCodes::CursorNotFound, getStatus().code());
+ ASSERT_FALSE(collectionCloner->isActive());
+}
- collectionCloner->waitForDbWorker();
+TEST_F(CollectionClonerTest, FindCommandCanceled) {
+ ASSERT_OK(collectionCloner->start());
- processNetworkResponse(
- BSON("ok" << 0 << "errmsg" << "" << "code" << ErrorCodes::CursorNotFound));
+ scheduleNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- ASSERT_EQUALS(ErrorCodes::CursorNotFound, getStatus().code());
- ASSERT_FALSE(collectionCloner->isActive());
- }
+ auto net = getNet();
+ net->runReadyNetworkOperations();
- TEST_F(CollectionClonerTest, FindCommandCanceled) {
- ASSERT_OK(collectionCloner->start());
+ collectionCloner->waitForDbWorker();
- scheduleNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ scheduleNetworkResponse(BSON("ok" << 1));
- auto net = getNet();
- net->runReadyNetworkOperations();
+ collectionCloner->cancel();
- collectionCloner->waitForDbWorker();
+ net->runReadyNetworkOperations();
- scheduleNetworkResponse(BSON("ok" << 1));
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, getStatus().code());
+ ASSERT_FALSE(collectionCloner->isActive());
+}
- collectionCloner->cancel();
+TEST_F(CollectionClonerTest, InsertDocumentsScheduleDbWorkFailed) {
+ ASSERT_OK(collectionCloner->start());
- net->runReadyNetworkOperations();
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, getStatus().code());
- ASSERT_FALSE(collectionCloner->isActive());
- }
+ collectionCloner->waitForDbWorker();
- TEST_F(CollectionClonerTest, InsertDocumentsScheduleDbWorkFailed) {
- ASSERT_OK(collectionCloner->start());
+ // Replace scheduleDbWork function so that cloner will fail to schedule DB work after
+ // getting documents.
+ collectionCloner->setScheduleDbWorkFn([](const ReplicationExecutor::CallbackFn& workFn) {
+ return StatusWith<ReplicationExecutor::CallbackHandle>(ErrorCodes::UnknownError, "");
+ });
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ const BSONObj doc = BSON("_id" << 1);
+ processNetworkResponse(createCursorResponse(0, BSON_ARRAY(doc)));
- collectionCloner->waitForDbWorker();
+ ASSERT_EQUALS(ErrorCodes::UnknownError, getStatus().code());
+ ASSERT_FALSE(collectionCloner->isActive());
+}
- // Replace scheduleDbWork function so that cloner will fail to schedule DB work after
- // getting documents.
- collectionCloner->setScheduleDbWorkFn([](const ReplicationExecutor::CallbackFn& workFn) {
- return StatusWith<ReplicationExecutor::CallbackHandle>(ErrorCodes::UnknownError, "");
- });
+TEST_F(CollectionClonerTest, InsertDocumentsCallbackCanceled) {
+ ASSERT_OK(collectionCloner->start());
- const BSONObj doc = BSON("_id" << 1);
- processNetworkResponse(createCursorResponse(0, BSON_ARRAY(doc)));
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- ASSERT_EQUALS(ErrorCodes::UnknownError, getStatus().code());
- ASSERT_FALSE(collectionCloner->isActive());
- }
+ collectionCloner->waitForDbWorker();
- TEST_F(CollectionClonerTest, InsertDocumentsCallbackCanceled) {
- ASSERT_OK(collectionCloner->start());
+ // Replace scheduleDbWork function so that the callback for insertDocuments is canceled
+ // immediately after scheduling.
+ auto&& executor = getExecutor();
+ collectionCloner->setScheduleDbWorkFn([&](const ReplicationExecutor::CallbackFn& workFn) {
+ // Schedule as non-exclusive task to allow us to cancel it before the executor is able
+ // to invoke the callback.
+ auto scheduleResult = executor.scheduleWork(workFn);
+ ASSERT_OK(scheduleResult.getStatus());
+ executor.cancel(scheduleResult.getValue());
+ return scheduleResult;
+ });
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ const BSONObj doc = BSON("_id" << 1);
+ processNetworkResponse(createCursorResponse(0, BSON_ARRAY(doc)));
- collectionCloner->waitForDbWorker();
+ collectionCloner->waitForDbWorker();
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, getStatus().code());
+ ASSERT_FALSE(collectionCloner->isActive());
+}
- // Replace scheduleDbWork function so that the callback for insertDocuments is canceled
- // immediately after scheduling.
- auto&& executor = getExecutor();
- collectionCloner->setScheduleDbWorkFn([&](const ReplicationExecutor::CallbackFn& workFn) {
- // Schedule as non-exclusive task to allow us to cancel it before the executor is able
- // to invoke the callback.
- auto scheduleResult = executor.scheduleWork(workFn);
- ASSERT_OK(scheduleResult.getStatus());
- executor.cancel(scheduleResult.getValue());
- return scheduleResult;
- });
+TEST_F(CollectionClonerTest, InsertDocumentsFailed) {
+ ASSERT_OK(collectionCloner->start());
- const BSONObj doc = BSON("_id" << 1);
- processNetworkResponse(createCursorResponse(0, BSON_ARRAY(doc)));
-
- collectionCloner->waitForDbWorker();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, getStatus().code());
- ASSERT_FALSE(collectionCloner->isActive());
- }
-
- TEST_F(CollectionClonerTest, InsertDocumentsFailed) {
- ASSERT_OK(collectionCloner->start());
-
- bool insertDocumentsCalled = false;
- storageInterface->insertDocumentsFn = [&](OperationContext* txn,
- const NamespaceString& theNss,
- const std::vector<BSONObj>& theDocuments) {
- insertDocumentsCalled = true;
- return Status(ErrorCodes::OperationFailed, "");
- };
+ bool insertDocumentsCalled = false;
+ storageInterface->insertDocumentsFn = [&](OperationContext* txn,
+ const NamespaceString& theNss,
+ const std::vector<BSONObj>& theDocuments) {
+ insertDocumentsCalled = true;
+ return Status(ErrorCodes::OperationFailed, "");
+ };
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- collectionCloner->waitForDbWorker();
+ collectionCloner->waitForDbWorker();
- processNetworkResponse(createCursorResponse(0, BSONArray()));
+ processNetworkResponse(createCursorResponse(0, BSONArray()));
- collectionCloner->wait();
+ collectionCloner->wait();
- ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
- ASSERT_FALSE(collectionCloner->isActive());
- }
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
+ ASSERT_FALSE(collectionCloner->isActive());
+}
- TEST_F(CollectionClonerTest, InsertDocumentsSingleBatch) {
- ASSERT_OK(collectionCloner->start());
+TEST_F(CollectionClonerTest, InsertDocumentsSingleBatch) {
+ ASSERT_OK(collectionCloner->start());
- std::vector<BSONObj> collDocuments;
- storageInterface->insertDocumentsFn = [&](OperationContext* txn,
- const NamespaceString& theNss,
- const std::vector<BSONObj>& theDocuments) {
- ASSERT(txn);
- collDocuments = theDocuments;
- return Status::OK();
- };
+ std::vector<BSONObj> collDocuments;
+ storageInterface->insertDocumentsFn = [&](OperationContext* txn,
+ const NamespaceString& theNss,
+ const std::vector<BSONObj>& theDocuments) {
+ ASSERT(txn);
+ collDocuments = theDocuments;
+ return Status::OK();
+ };
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- collectionCloner->waitForDbWorker();
+ collectionCloner->waitForDbWorker();
- const BSONObj doc = BSON("_id" << 1);
- processNetworkResponse(createCursorResponse(0, BSON_ARRAY(doc)));
+ const BSONObj doc = BSON("_id" << 1);
+ processNetworkResponse(createCursorResponse(0, BSON_ARRAY(doc)));
- collectionCloner->waitForDbWorker();
- ASSERT_EQUALS(1U, collDocuments.size());
- ASSERT_EQUALS(doc, collDocuments[0]);
+ collectionCloner->waitForDbWorker();
+ ASSERT_EQUALS(1U, collDocuments.size());
+ ASSERT_EQUALS(doc, collDocuments[0]);
- ASSERT_OK(getStatus());
- ASSERT_FALSE(collectionCloner->isActive());
- }
+ ASSERT_OK(getStatus());
+ ASSERT_FALSE(collectionCloner->isActive());
+}
- TEST_F(CollectionClonerTest, InsertDocumentsMultipleBatches) {
- ASSERT_OK(collectionCloner->start());
+TEST_F(CollectionClonerTest, InsertDocumentsMultipleBatches) {
+ ASSERT_OK(collectionCloner->start());
- std::vector<BSONObj> collDocuments;
- storageInterface->insertDocumentsFn = [&](OperationContext* txn,
- const NamespaceString& theNss,
- const std::vector<BSONObj>& theDocuments) {
- ASSERT(txn);
- collDocuments = theDocuments;
- return Status::OK();
- };
+ std::vector<BSONObj> collDocuments;
+ storageInterface->insertDocumentsFn = [&](OperationContext* txn,
+ const NamespaceString& theNss,
+ const std::vector<BSONObj>& theDocuments) {
+ ASSERT(txn);
+ collDocuments = theDocuments;
+ return Status::OK();
+ };
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- collectionCloner->waitForDbWorker();
+ collectionCloner->waitForDbWorker();
- const BSONObj doc = BSON("_id" << 1);
- processNetworkResponse(createCursorResponse(1, BSON_ARRAY(doc)));
+ const BSONObj doc = BSON("_id" << 1);
+ processNetworkResponse(createCursorResponse(1, BSON_ARRAY(doc)));
- collectionCloner->waitForDbWorker();
- ASSERT_EQUALS(1U, collDocuments.size());
- ASSERT_EQUALS(doc, collDocuments[0]);
+ collectionCloner->waitForDbWorker();
+ ASSERT_EQUALS(1U, collDocuments.size());
+ ASSERT_EQUALS(doc, collDocuments[0]);
- ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
- ASSERT_TRUE(collectionCloner->isActive());
+ ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
+ ASSERT_TRUE(collectionCloner->isActive());
- const BSONObj doc2 = BSON("_id" << 1);
- processNetworkResponse(createCursorResponse(0, BSON_ARRAY(doc2), "nextBatch"));
+ const BSONObj doc2 = BSON("_id" << 1);
+ processNetworkResponse(createCursorResponse(0, BSON_ARRAY(doc2), "nextBatch"));
- collectionCloner->waitForDbWorker();
- ASSERT_EQUALS(1U, collDocuments.size());
- ASSERT_EQUALS(doc2, collDocuments[0]);
+ collectionCloner->waitForDbWorker();
+ ASSERT_EQUALS(1U, collDocuments.size());
+ ASSERT_EQUALS(doc2, collDocuments[0]);
- ASSERT_OK(getStatus());
- ASSERT_FALSE(collectionCloner->isActive());
- }
+ ASSERT_OK(getStatus());
+ ASSERT_FALSE(collectionCloner->isActive());
+}
-} // namespace
+} // namespace
diff --git a/src/mongo/db/repl/data_replicator.cpp b/src/mongo/db/repl/data_replicator.cpp
index 0344608dc78..1eb7c9a0ae3 100644
--- a/src/mongo/db/repl/data_replicator.cpp
+++ b/src/mongo/db/repl/data_replicator.cpp
@@ -58,1309 +58,1266 @@
namespace mongo {
namespace repl {
- // Failpoint for initial sync
- MONGO_FP_DECLARE(failInitialSyncWithBadHost);
+// Failpoint for initial sync
+MONGO_FP_DECLARE(failInitialSyncWithBadHost);
namespace {
- // Limit buffer to 256MB
- const size_t kOplogBufferSize = 256 * 1024 * 1024;
+// Limit buffer to 256MB
+const size_t kOplogBufferSize = 256 * 1024 * 1024;
- size_t getSize(const BSONObj& o) {
- // SERVER-9808 Avoid Fortify complaint about implicit signed->unsigned conversion
- return static_cast<size_t>(o.objsize());
- }
+size_t getSize(const BSONObj& o) {
+ // SERVER-9808 Avoid Fortify complaint about implicit signed->unsigned conversion
+ return static_cast<size_t>(o.objsize());
+}
- Timestamp findCommonPoint(HostAndPort host, Timestamp start) {
- // TODO: walk back in the oplog looking for a known/shared optime.
- return Timestamp();
- }
+Timestamp findCommonPoint(HostAndPort host, Timestamp start) {
+ // TODO: walk back in the oplog looking for a known/shared optime.
+ return Timestamp();
+}
-} // namespace
+} // namespace
std::string toString(DataReplicatorState s) {
switch (s) {
- case DataReplicatorState::InitialSync:
- return "InitialSync";
- case DataReplicatorState::Rollback:
- return "Rollback";
- case DataReplicatorState::Steady:
- return "Steady Replication";
- case DataReplicatorState::Uninitialized:
- return "Uninitialized";
+ case DataReplicatorState::InitialSync:
+ return "InitialSync";
+ case DataReplicatorState::Rollback:
+ return "Rollback";
+ case DataReplicatorState::Steady:
+ return "Steady Replication";
+ case DataReplicatorState::Uninitialized:
+ return "Uninitialized";
}
MONGO_UNREACHABLE;
}
- /**
- * Follows the fetcher pattern for a find+getmore on an oplog
- * Returns additional errors if the start oplog entry cannot be found.
- */
- class OplogFetcher : public QueryFetcher {
- MONGO_DISALLOW_COPYING(OplogFetcher);
- public:
- OplogFetcher(ReplicationExecutor* exec,
- const Timestamp& startTS,
- const HostAndPort& src,
- const NamespaceString& nss,
- const QueryFetcher::CallbackFn& work);
-
- virtual ~OplogFetcher() = default;
- std::string toString() const;
-
- const Timestamp getStartTimestamp() const {
- return _startTS;
- }
-
- protected:
-
- void _delegateCallback(const Fetcher::QueryResponseStatus& fetchResult,
- NextAction* nextAction);
-
- const Timestamp _startTS;
- };
-
- // OplogFetcher
- OplogFetcher::OplogFetcher(ReplicationExecutor* exec,
- const Timestamp& startTS,
- const HostAndPort& src,
- const NamespaceString& oplogNSS,
- const QueryFetcher::CallbackFn& work)
- // TODO: add query options await_data, oplog_replay
- : QueryFetcher(exec,
- src,
- oplogNSS,
- BSON("find" << oplogNSS.coll() <<
- "filter" << BSON("ts" << BSON("$gte" << startTS))),
- work),
- _startTS(startTS) {
- }
-
- std::string OplogFetcher::toString() const {
- return str::stream() << "OplogReader -"
- << " startTS: " << _startTS.toString()
- << " fetcher: " << QueryFetcher::getDiagnosticString();
- }
-
- void OplogFetcher::_delegateCallback(const Fetcher::QueryResponseStatus& fetchResult,
- Fetcher::NextAction* nextAction) {
- const bool checkStartTS = _getResponses() == 0;
-
- if (fetchResult.isOK()) {
- Fetcher::Documents::const_iterator firstDoc = fetchResult.getValue().documents.begin();
- auto hasDoc = firstDoc != fetchResult.getValue().documents.end();
-
- if (checkStartTS) {
- if (!hasDoc) {
- // Set next action to none.
- *nextAction = Fetcher::NextAction::kNoAction;
- _onQueryResponse(
- Status(ErrorCodes::OplogStartMissing, str::stream() <<
- "No operations on sync source with op time starting at: " <<
- _startTS.toString()),
- nextAction);
- return;
- } else if ((*firstDoc)["ts"].eoo()) {
- // Set next action to none.
- *nextAction = Fetcher::NextAction::kNoAction;
- _onQueryResponse(
- Status(ErrorCodes::OplogStartMissing, str::stream() <<
- "Missing 'ts' field in first returned " << (*firstDoc)["ts"] <<
- " starting at " << _startTS.toString()),
- nextAction);
- return;
- } else if ((*firstDoc)["ts"].timestamp() != _startTS) {
- // Set next action to none.
- *nextAction = Fetcher::NextAction::kNoAction;
- _onQueryResponse(
- Status(ErrorCodes::OplogStartMissing, str::stream() <<
- "First returned " << (*firstDoc)["ts"] <<
- " is not where we wanted to start: " << _startTS.toString()),
- nextAction);
- return;
- }
+/**
+ * Follows the fetcher pattern for a find+getmore on an oplog
+ * Returns additional errors if the start oplog entry cannot be found.
+ */
+class OplogFetcher : public QueryFetcher {
+ MONGO_DISALLOW_COPYING(OplogFetcher);
- }
+public:
+ OplogFetcher(ReplicationExecutor* exec,
+ const Timestamp& startTS,
+ const HostAndPort& src,
+ const NamespaceString& nss,
+ const QueryFetcher::CallbackFn& work);
- if (hasDoc) {
- _onQueryResponse(fetchResult, nextAction);
- }
- else {
- }
- }
- else {
- _onQueryResponse(fetchResult, nextAction);
- }
- };
+ virtual ~OplogFetcher() = default;
+ std::string toString() const;
- class DatabasesCloner {
- public:
- DatabasesCloner(ReplicationExecutor* exec,
- HostAndPort source,
- stdx::function<void (const Status&)> finishFn)
- : _status(ErrorCodes::NotYetInitialized, ""),
- _exec(exec),
- _source(source),
- _active(false),
- _clonersActive(0),
- _finishFn(finishFn) {
- if (!_finishFn) {
- _status = Status(ErrorCodes::InvalidOptions, "finishFn is not callable.");
- }
- };
-
- Status start();
-
- bool isActive() {
- return _active;
- }
+ const Timestamp getStartTimestamp() const {
+ return _startTS;
+ }
- Status getStatus() {
- return _status;
- }
+protected:
+ void _delegateCallback(const Fetcher::QueryResponseStatus& fetchResult, NextAction* nextAction);
+
+ const Timestamp _startTS;
+};
+
+// OplogFetcher
+OplogFetcher::OplogFetcher(ReplicationExecutor* exec,
+ const Timestamp& startTS,
+ const HostAndPort& src,
+ const NamespaceString& oplogNSS,
+ const QueryFetcher::CallbackFn& work)
+ // TODO: add query options await_data, oplog_replay
+ : QueryFetcher(exec,
+ src,
+ oplogNSS,
+ BSON("find" << oplogNSS.coll() << "filter"
+ << BSON("ts" << BSON("$gte" << startTS))),
+ work),
+ _startTS(startTS) {}
+
+std::string OplogFetcher::toString() const {
+ return str::stream() << "OplogReader -"
+ << " startTS: " << _startTS.toString()
+ << " fetcher: " << QueryFetcher::getDiagnosticString();
+}
- void cancel() {
- if (!_active)
+void OplogFetcher::_delegateCallback(const Fetcher::QueryResponseStatus& fetchResult,
+ Fetcher::NextAction* nextAction) {
+ const bool checkStartTS = _getResponses() == 0;
+
+ if (fetchResult.isOK()) {
+ Fetcher::Documents::const_iterator firstDoc = fetchResult.getValue().documents.begin();
+ auto hasDoc = firstDoc != fetchResult.getValue().documents.end();
+
+ if (checkStartTS) {
+ if (!hasDoc) {
+ // Set next action to none.
+ *nextAction = Fetcher::NextAction::kNoAction;
+ _onQueryResponse(
+ Status(ErrorCodes::OplogStartMissing,
+ str::stream()
+ << "No operations on sync source with op time starting at: "
+ << _startTS.toString()),
+ nextAction);
+ return;
+ } else if ((*firstDoc)["ts"].eoo()) {
+ // Set next action to none.
+ *nextAction = Fetcher::NextAction::kNoAction;
+ _onQueryResponse(Status(ErrorCodes::OplogStartMissing,
+ str::stream() << "Missing 'ts' field in first returned "
+ << (*firstDoc)["ts"] << " starting at "
+ << _startTS.toString()),
+ nextAction);
+ return;
+ } else if ((*firstDoc)["ts"].timestamp() != _startTS) {
+ // Set next action to none.
+ *nextAction = Fetcher::NextAction::kNoAction;
+ _onQueryResponse(Status(ErrorCodes::OplogStartMissing,
+ str::stream() << "First returned " << (*firstDoc)["ts"]
+ << " is not where we wanted to start: "
+ << _startTS.toString()),
+ nextAction);
return;
- _active = false;
- // TODO: cancel all cloners
- _setStatus(Status(ErrorCodes::CallbackCanceled, "Initial Sync Cancelled."));
- }
-
- void wait() {
- // TODO: wait on all cloners
- }
-
- std::string toString() {
- return str::stream() << "initial sync --" <<
- " active:" << _active <<
- " status:" << _status.toString() <<
- " source:" << _source.toString() <<
- " db cloners active:" << _clonersActive <<
- " db count:" << _databaseCloners.size();
- }
-
-
- // For testing
- void setStorageInterface(CollectionCloner::StorageInterface* si) {
- _storage = si;
- }
-
- private:
-
- /**
- * Does the next action necessary for the initial sync process.
- *
- * NOTE: If (!_status.isOK() || !_isActive) then early return.
- */
- void _doNextActions();
-
- /**
- * Setting the status to not-OK will stop the process
- */
- void _setStatus(CBHStatus s) {
- _setStatus(s.getStatus());
- }
-
- /**
- * Setting the status to not-OK will stop the process
- */
- void _setStatus(Status s) {
- // Only set the first time called, all subsequent failures are not recorded --only first
- if (_status.code() != ErrorCodes::NotYetInitialized) {
- _status = s;
}
}
- /**
- * Setting the status to not-OK will stop the process
- */
- void _setStatus(TimestampStatus s) {
- _setStatus(s.getStatus());
+ if (hasDoc) {
+ _onQueryResponse(fetchResult, nextAction);
+ } else {
}
+ } else {
+ _onQueryResponse(fetchResult, nextAction);
+ }
+};
+
+class DatabasesCloner {
+public:
+ DatabasesCloner(ReplicationExecutor* exec,
+ HostAndPort source,
+ stdx::function<void(const Status&)> finishFn)
+ : _status(ErrorCodes::NotYetInitialized, ""),
+ _exec(exec),
+ _source(source),
+ _active(false),
+ _clonersActive(0),
+ _finishFn(finishFn) {
+ if (!_finishFn) {
+ _status = Status(ErrorCodes::InvalidOptions, "finishFn is not callable.");
+ }
+ };
- void _failed();
-
- /** Called each time a database clone is finished */
- void _onEachDBCloneFinish(const Status& status, const std::string name);
+ Status start();
- // Callbacks
+ bool isActive() {
+ return _active;
+ }
- void _onListDatabaseFinish(const CommandCallbackArgs& cbd);
+ Status getStatus() {
+ return _status;
+ }
+ void cancel() {
+ if (!_active)
+ return;
+ _active = false;
+ // TODO: cancel all cloners
+ _setStatus(Status(ErrorCodes::CallbackCanceled, "Initial Sync Cancelled."));
+ }
- // Member variables
- Status _status; // If it is not OK, we stop everything.
- ReplicationExecutor* _exec; // executor to schedule things with
- HostAndPort _source; // The source to use, until we get an error
- bool _active; // false until we start
- std::vector<std::shared_ptr<DatabaseCloner>> _databaseCloners; // database cloners by name
- int _clonersActive;
+ void wait() {
+ // TODO: wait on all cloners
+ }
- const stdx::function<void (const Status&)> _finishFn;
+ std::string toString() {
+ return str::stream() << "initial sync --"
+ << " active:" << _active << " status:" << _status.toString()
+ << " source:" << _source.toString()
+ << " db cloners active:" << _clonersActive
+ << " db count:" << _databaseCloners.size();
+ }
- CollectionCloner::StorageInterface* _storage;
- };
- /** State held during Initial Sync */
- struct InitialSyncState {
- InitialSyncState(DatabasesCloner cloner, Event event)
- : dbsCloner(cloner), finishEvent(event), status(ErrorCodes::IllegalOperation, "") {};
-
- DatabasesCloner dbsCloner; // Cloner for all databases included in initial sync.
- Timestamp beginTimestamp; // Timestamp from the latest entry in oplog when started.
- Timestamp stopTimestamp; // Referred to as minvalid, or the place we can transition states.
- Event finishEvent; // event fired on completion, either successful or not.
- Status status; // final status, only valid after the finishEvent fires.
- size_t fetchedMissingDocs;
- size_t appliedOps;
-
- // Temporary fetch for things like fetching remote optime, or tail
- std::unique_ptr<Fetcher> tmpFetcher;
- TimestampStatus getLatestOplogTimestamp(ReplicationExecutor* exec,
- HostAndPort source,
- const NamespaceString& oplogNS);
- void setStatus(const Status& s);
- void setStatus(const CBHStatus& s);
- void _setTimestampStatus(const QueryResponseStatus& fetchResult,
- Fetcher::NextAction* nextAction,
- TimestampStatus* status) ;
- };
+ // For testing
+ void setStorageInterface(CollectionCloner::StorageInterface* si) {
+ _storage = si;
+ }
- // Initial Sync state
- TimestampStatus InitialSyncState::getLatestOplogTimestamp(ReplicationExecutor* exec,
- HostAndPort source,
- const NamespaceString& oplogNS) {
-
- BSONObj query = BSON("find" << oplogNS.coll() <<
- "sort" << BSON ("$natural" << -1) <<
- "limit" << 1);
-
- TimestampStatus timestampStatus(ErrorCodes::BadValue, "");
- Fetcher f(exec,
- source,
- oplogNS.db().toString(),
- query,
- stdx::bind(&InitialSyncState::_setTimestampStatus, this, stdx::placeholders::_1,
- stdx::placeholders::_2, &timestampStatus));
- Status s = f.schedule();
- if (!s.isOK()) {
- return TimestampStatus(s);
- }
+private:
+ /**
+ * Does the next action necessary for the initial sync process.
+ *
+ * NOTE: If (!_status.isOK() || !_isActive) then early return.
+ */
+ void _doNextActions();
- // wait for fetcher to get the oplog position.
- f.wait();
- return timestampStatus;
+ /**
+ * Setting the status to not-OK will stop the process
+ */
+ void _setStatus(CBHStatus s) {
+ _setStatus(s.getStatus());
}
- void InitialSyncState::_setTimestampStatus(const QueryResponseStatus& fetchResult,
- Fetcher::NextAction* nextAction,
- TimestampStatus* status) {
- if (!fetchResult.isOK()) {
- *status = TimestampStatus(fetchResult.getStatus());
- } else {
- // TODO: Set _beginTimestamp from first doc "ts" field.
- const auto docs = fetchResult.getValue().documents;
- const auto hasDoc = docs.begin() != docs.end();
- if (!hasDoc || !docs.begin()->hasField("ts")) {
- *status = TimestampStatus(ErrorCodes::FailedToParse,
- "Could not find an oplog entry with 'ts' field.");
- } else {
- *status = TimestampStatus(docs.begin()->getField("ts").timestamp());
- }
+ /**
+ * Setting the status to not-OK will stop the process
+ */
+ void _setStatus(Status s) {
+ // Only set the first time called, all subsequent failures are not recorded --only first
+ if (_status.code() != ErrorCodes::NotYetInitialized) {
+ _status = s;
}
}
- void InitialSyncState::setStatus(const Status& s) {
- status = s;
+ /**
+ * Setting the status to not-OK will stop the process
+ */
+ void _setStatus(TimestampStatus s) {
+ _setStatus(s.getStatus());
}
- void InitialSyncState::setStatus(const CBHStatus& s) {
- setStatus(s.getStatus());
+
+ void _failed();
+
+ /** Called each time a database clone is finished */
+ void _onEachDBCloneFinish(const Status& status, const std::string name);
+
+ // Callbacks
+
+ void _onListDatabaseFinish(const CommandCallbackArgs& cbd);
+
+
+ // Member variables
+ Status _status; // If it is not OK, we stop everything.
+ ReplicationExecutor* _exec; // executor to schedule things with
+ HostAndPort _source; // The source to use, until we get an error
+ bool _active; // false until we start
+ std::vector<std::shared_ptr<DatabaseCloner>> _databaseCloners; // database cloners by name
+ int _clonersActive;
+
+ const stdx::function<void(const Status&)> _finishFn;
+
+ CollectionCloner::StorageInterface* _storage;
+};
+
+/** State held during Initial Sync */
+struct InitialSyncState {
+ InitialSyncState(DatabasesCloner cloner, Event event)
+ : dbsCloner(cloner), finishEvent(event), status(ErrorCodes::IllegalOperation, ""){};
+
+ DatabasesCloner dbsCloner; // Cloner for all databases included in initial sync.
+ Timestamp beginTimestamp; // Timestamp from the latest entry in oplog when started.
+ Timestamp stopTimestamp; // Referred to as minvalid, or the place we can transition states.
+ Event finishEvent; // event fired on completion, either successful or not.
+ Status status; // final status, only valid after the finishEvent fires.
+ size_t fetchedMissingDocs;
+ size_t appliedOps;
+
+ // Temporary fetch for things like fetching remote optime, or tail
+ std::unique_ptr<Fetcher> tmpFetcher;
+ TimestampStatus getLatestOplogTimestamp(ReplicationExecutor* exec,
+ HostAndPort source,
+ const NamespaceString& oplogNS);
+ void setStatus(const Status& s);
+ void setStatus(const CBHStatus& s);
+ void _setTimestampStatus(const QueryResponseStatus& fetchResult,
+ Fetcher::NextAction* nextAction,
+ TimestampStatus* status);
+};
+
+// Initial Sync state
+TimestampStatus InitialSyncState::getLatestOplogTimestamp(ReplicationExecutor* exec,
+ HostAndPort source,
+ const NamespaceString& oplogNS) {
+ BSONObj query =
+ BSON("find" << oplogNS.coll() << "sort" << BSON("$natural" << -1) << "limit" << 1);
+
+ TimestampStatus timestampStatus(ErrorCodes::BadValue, "");
+ Fetcher f(exec,
+ source,
+ oplogNS.db().toString(),
+ query,
+ stdx::bind(&InitialSyncState::_setTimestampStatus,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ &timestampStatus));
+ Status s = f.schedule();
+ if (!s.isOK()) {
+ return TimestampStatus(s);
}
- // Initial Sync
- Status DatabasesCloner::start() {
- _active = true;
+ // wait for fetcher to get the oplog position.
+ f.wait();
+ return timestampStatus;
+}
- if (!_status.isOK() && _status.code() != ErrorCodes::NotYetInitialized) {
- return _status;
+void InitialSyncState::_setTimestampStatus(const QueryResponseStatus& fetchResult,
+ Fetcher::NextAction* nextAction,
+ TimestampStatus* status) {
+ if (!fetchResult.isOK()) {
+ *status = TimestampStatus(fetchResult.getStatus());
+ } else {
+ // TODO: Set _beginTimestamp from first doc "ts" field.
+ const auto docs = fetchResult.getValue().documents;
+ const auto hasDoc = docs.begin() != docs.end();
+ if (!hasDoc || !docs.begin()->hasField("ts")) {
+ *status = TimestampStatus(ErrorCodes::FailedToParse,
+ "Could not find an oplog entry with 'ts' field.");
+ } else {
+ *status = TimestampStatus(docs.begin()->getField("ts").timestamp());
}
+ }
+}
- _status = Status::OK();
-
- log() << "starting cloning of all databases";
- // Schedule listDatabase command which will kick off the database cloner per result db.
- Request listDBsReq(_source, "admin", BSON("listDatabases" << true));
- CBHStatus s = _exec->scheduleRemoteCommand(
- listDBsReq,
- stdx::bind(&DatabasesCloner::_onListDatabaseFinish,
- this,
- stdx::placeholders::_1));
- if (!s.isOK()) {
- _setStatus(s);
- _failed();
- }
+void InitialSyncState::setStatus(const Status& s) {
+ status = s;
+}
+void InitialSyncState::setStatus(const CBHStatus& s) {
+ setStatus(s.getStatus());
+}
- _doNextActions();
+// Initial Sync
+Status DatabasesCloner::start() {
+ _active = true;
+ if (!_status.isOK() && _status.code() != ErrorCodes::NotYetInitialized) {
return _status;
}
- void DatabasesCloner::_onListDatabaseFinish(const CommandCallbackArgs& cbd) {
- const Status respStatus = cbd.response.getStatus();
- if (!respStatus.isOK()) {
- // TODO: retry internally?
- _setStatus(respStatus);
- _doNextActions();
- return;
- }
+ _status = Status::OK();
+
+ log() << "starting cloning of all databases";
+ // Schedule listDatabase command which will kick off the database cloner per result db.
+ Request listDBsReq(_source, "admin", BSON("listDatabases" << true));
+ CBHStatus s = _exec->scheduleRemoteCommand(
+ listDBsReq,
+ stdx::bind(&DatabasesCloner::_onListDatabaseFinish, this, stdx::placeholders::_1));
+ if (!s.isOK()) {
+ _setStatus(s);
+ _failed();
+ }
- const auto respBSON = cbd.response.getValue().data;
-
- // There should not be any cloners yet
- invariant(_databaseCloners.size() == 0);
-
- const auto okElem = respBSON["ok"];
- if (okElem.trueValue()) {
- const auto dbsElem = respBSON["databases"].Obj();
- BSONForEach(arrayElement, dbsElem) {
- const BSONObj dbBSON = arrayElement.Obj();
- const std::string name = dbBSON["name"].str();
- ++_clonersActive;
- std::shared_ptr<DatabaseCloner> dbCloner{nullptr};
- try {
- dbCloner.reset(new DatabaseCloner(
- _exec,
- _source,
- name,
- BSONObj(), // do not filter database out.
- [](const BSONObj&) { return true; }, // clone all dbs.
- _storage, // use storage provided.
- [](const Status& status, const NamespaceString& srcNss) {
- if (status.isOK()) {
- log() << "collection clone finished: " << srcNss;
- }
- else {
- log() << "collection clone for '"
- << srcNss << "' failed due to "
- << status.toString();
- }
- },
- [=](const Status& status) {
- _onEachDBCloneFinish(status, name);
- }));
- }
- catch (...) {
- // error creating, fails below.
- }
+ _doNextActions();
- Status s = dbCloner ? dbCloner->start() : Status(ErrorCodes::UnknownError, "Bad!");
+ return _status;
+}
- if (!s.isOK()) {
- std::string err = str::stream() << "could not create cloner for database: "
- << name << " due to: " << s.toString();
- _setStatus(Status(ErrorCodes::InitialSyncFailure, err));
- error() << err;
- break; // exit for_each loop
- }
+void DatabasesCloner::_onListDatabaseFinish(const CommandCallbackArgs& cbd) {
+ const Status respStatus = cbd.response.getStatus();
+ if (!respStatus.isOK()) {
+ // TODO: retry internally?
+ _setStatus(respStatus);
+ _doNextActions();
+ return;
+ }
- // add cloner to list.
- _databaseCloners.push_back(dbCloner);
+ const auto respBSON = cbd.response.getValue().data;
+
+ // There should not be any cloners yet
+ invariant(_databaseCloners.size() == 0);
+
+ const auto okElem = respBSON["ok"];
+ if (okElem.trueValue()) {
+ const auto dbsElem = respBSON["databases"].Obj();
+ BSONForEach(arrayElement, dbsElem) {
+ const BSONObj dbBSON = arrayElement.Obj();
+ const std::string name = dbBSON["name"].str();
+ ++_clonersActive;
+ std::shared_ptr<DatabaseCloner> dbCloner{nullptr};
+ try {
+ dbCloner.reset(new DatabaseCloner(
+ _exec,
+ _source,
+ name,
+ BSONObj(), // do not filter database out.
+ [](const BSONObj&) { return true; }, // clone all dbs.
+ _storage, // use storage provided.
+ [](const Status& status, const NamespaceString& srcNss) {
+ if (status.isOK()) {
+ log() << "collection clone finished: " << srcNss;
+ } else {
+ log() << "collection clone for '" << srcNss << "' failed due to "
+ << status.toString();
+ }
+ },
+ [=](const Status& status) { _onEachDBCloneFinish(status, name); }));
+ } catch (...) {
+ // error creating, fails below.
}
- }
- else {
- _setStatus(Status(ErrorCodes::InitialSyncFailure,
- "failed to clone databases due to failed server response."));
- }
- // Move on to the next steps in the process.
- _doNextActions();
- }
+ Status s = dbCloner ? dbCloner->start() : Status(ErrorCodes::UnknownError, "Bad!");
- void DatabasesCloner::_onEachDBCloneFinish(const Status& status, const std::string name) {
- auto clonersLeft = --_clonersActive;
+ if (!s.isOK()) {
+ std::string err = str::stream() << "could not create cloner for database: " << name
+ << " due to: " << s.toString();
+ _setStatus(Status(ErrorCodes::InitialSyncFailure, err));
+ error() << err;
+ break; // exit for_each loop
+ }
- if (status.isOK()) {
- log() << "database clone finished: " << name;
- }
- else {
- log() << "database clone failed due to "
- << status.toString();
- _setStatus(status);
+ // add cloner to list.
+ _databaseCloners.push_back(dbCloner);
}
+ } else {
+ _setStatus(Status(ErrorCodes::InitialSyncFailure,
+ "failed to clone databases due to failed server response."));
+ }
- if (clonersLeft == 0) {
- _active = false;
- // All cloners are done, trigger event.
- log() << "all database clones finished, calling _finishFn";
- _finishFn(_status);
- }
+ // Move on to the next steps in the process.
+ _doNextActions();
+}
- _doNextActions();
- }
+void DatabasesCloner::_onEachDBCloneFinish(const Status& status, const std::string name) {
+ auto clonersLeft = --_clonersActive;
- void DatabasesCloner::_doNextActions() {
- // If we are no longer active or we had an error, stop doing more
- if (!(_active && _status.isOK())) {
- if (!_status.isOK()) {
- // trigger failed state
- _failed();
- }
- return;
- }
+ if (status.isOK()) {
+ log() << "database clone finished: " << name;
+ } else {
+ log() << "database clone failed due to " << status.toString();
+ _setStatus(status);
}
- void DatabasesCloner::_failed() {
- // TODO: cancel outstanding work, like any cloners active
- invariant(_finishFn);
+ if (clonersLeft == 0) {
+ _active = false;
+ // All cloners are done, trigger event.
+ log() << "all database clones finished, calling _finishFn";
_finishFn(_status);
}
- // Data Replicator
- DataReplicator::DataReplicator(DataReplicatorOptions opts,
- ReplicationExecutor* exec,
- ReplicationCoordinator* replCoord)
- : DataReplicator(opts,
+ _doNextActions();
+}
+
+void DatabasesCloner::_doNextActions() {
+ // If we are no longer active or we had an error, stop doing more
+ if (!(_active && _status.isOK())) {
+ if (!_status.isOK()) {
+ // trigger failed state
+ _failed();
+ }
+ return;
+ }
+}
+
+void DatabasesCloner::_failed() {
+ // TODO: cancel outstanding work, like any cloners active
+ invariant(_finishFn);
+ _finishFn(_status);
+}
+
+// Data Replicator
+DataReplicator::DataReplicator(DataReplicatorOptions opts,
+ ReplicationExecutor* exec,
+ ReplicationCoordinator* replCoord)
+ : DataReplicator(
+ opts,
exec,
replCoord,
// TODO: replace this with a method in the replication coordinator.
- [replCoord] (const Timestamp& ts) { replCoord->setMyLastOptime(OpTime(ts, 0)); }) {
- }
+ [replCoord](const Timestamp& ts) { replCoord->setMyLastOptime(OpTime(ts, 0)); }) {}
+
+DataReplicator::DataReplicator(DataReplicatorOptions opts, ReplicationExecutor* exec)
+ : DataReplicator(opts, exec, nullptr, [](const Timestamp& ts) {}) {}
+
+DataReplicator::DataReplicator(DataReplicatorOptions opts,
+ ReplicationExecutor* exec,
+ ReplicationCoordinator* replCoord,
+ OnBatchCompleteFn batchCompletedFn)
+ : _opts(opts),
+ _exec(exec),
+ _replCoord(replCoord),
+ _state(DataReplicatorState::Uninitialized),
+ _fetcherPaused(false),
+ _reporterPaused(false),
+ _applierActive(false),
+ _applierPaused(false),
+ _batchCompletedFn(batchCompletedFn),
+ _oplogBuffer(kOplogBufferSize, &getSize) {}
+
+DataReplicator::~DataReplicator() {
+ DESTRUCTOR_GUARD(_cancelAllHandles_inlock(); _waitOnAll_inlock(););
+}
- DataReplicator::DataReplicator(DataReplicatorOptions opts,
- ReplicationExecutor* exec)
- : DataReplicator(opts, exec, nullptr, [] (const Timestamp& ts) {}) {
+Status DataReplicator::start() {
+ UniqueLock lk(_mutex);
+ if (_state != DataReplicatorState::Uninitialized) {
+ return Status(ErrorCodes::IllegalOperation,
+ str::stream() << "Already started in another state: " << toString(_state));
}
- DataReplicator::DataReplicator(DataReplicatorOptions opts,
- ReplicationExecutor* exec,
- ReplicationCoordinator* replCoord,
- OnBatchCompleteFn batchCompletedFn)
- : _opts(opts),
- _exec(exec),
- _replCoord(replCoord),
- _state(DataReplicatorState::Uninitialized),
- _fetcherPaused(false),
- _reporterPaused(false),
- _applierActive(false),
- _applierPaused(false),
- _batchCompletedFn(batchCompletedFn),
- _oplogBuffer(kOplogBufferSize, &getSize) {
- }
+ _state = DataReplicatorState::Steady;
+ _applierPaused = false;
+ _fetcherPaused = false;
+ _reporterPaused = false;
+ _doNextActions_Steady_inlock();
+ return Status::OK();
+}
- DataReplicator::~DataReplicator() {
- DESTRUCTOR_GUARD(
- _cancelAllHandles_inlock();
- _waitOnAll_inlock();
- );
- }
+Status DataReplicator::shutdown() {
+ return _shutdown();
+}
- Status DataReplicator::start() {
- UniqueLock lk(_mutex);
- if (_state != DataReplicatorState::Uninitialized) {
- return Status(ErrorCodes::IllegalOperation,
- str::stream() << "Already started in another state: "
- << toString(_state));
- }
+Status DataReplicator::pause() {
+ _pauseApplier();
+ return Status::OK();
+}
- _state = DataReplicatorState::Steady;
- _applierPaused = false;
- _fetcherPaused = false;
- _reporterPaused = false;
- _doNextActions_Steady_inlock();
- return Status::OK();
- }
+DataReplicatorState DataReplicator::getState() const {
+ LockGuard lk(_mutex);
+ return _state;
+}
- Status DataReplicator::shutdown() {
- return _shutdown();
- }
+Timestamp DataReplicator::getLastTimestampFetched() const {
+ LockGuard lk(_mutex);
+ return _lastTimestampFetched;
+}
- Status DataReplicator::pause() {
- _pauseApplier();
- return Status::OK();
+std::string DataReplicator::getDiagnosticString() const {
+ LockGuard lk(_mutex);
+ str::stream out;
+ out << "DataReplicator -"
+ << " opts: " << _opts.toString() << " oplogFetcher: " << _fetcher->toString()
+ << " opsBuffered: " << _oplogBuffer.size() << " state: " << toString(_state);
+ switch (_state) {
+ case DataReplicatorState::InitialSync:
+ out << " opsAppied: " << _initialSyncState->appliedOps
+ << " status: " << _initialSyncState->status.toString();
+ break;
+ case DataReplicatorState::Steady:
+ // TODO: add more here
+ break;
+ case DataReplicatorState::Rollback:
+ // TODO: add more here
+ break;
+ default:
+ break;
}
- DataReplicatorState DataReplicator::getState() const {
- LockGuard lk(_mutex);
- return _state;
- }
+ return out;
+}
- Timestamp DataReplicator::getLastTimestampFetched() const {
- LockGuard lk(_mutex);
- return _lastTimestampFetched;
+Status DataReplicator::resume(bool wait) {
+ CBHStatus handle = _exec->scheduleWork(
+ stdx::bind(&DataReplicator::_resumeFinish, this, stdx::placeholders::_1));
+ const Status status = handle.getStatus();
+ if (wait && status.isOK()) {
+ _exec->wait(handle.getValue());
}
+ return status;
+}
- std::string DataReplicator::getDiagnosticString() const {
- LockGuard lk(_mutex);
- str::stream out;
- out << "DataReplicator -"
- << " opts: " << _opts.toString()
- << " oplogFetcher: " << _fetcher->toString()
- << " opsBuffered: " << _oplogBuffer.size()
- << " state: " << toString(_state);
- switch (_state) {
- case DataReplicatorState::InitialSync:
- out << " opsAppied: " << _initialSyncState->appliedOps
- << " status: " << _initialSyncState->status.toString();
- break;
- case DataReplicatorState::Steady:
- // TODO: add more here
- break;
- case DataReplicatorState::Rollback:
- // TODO: add more here
- break;
- default:
- break;
- }
+void DataReplicator::_resumeFinish(CallbackArgs cbData) {
+ UniqueLock lk(_mutex);
+ _fetcherPaused = _applierPaused = false;
+ lk.unlock();
- return out;
- }
+ _doNextActions();
+}
- Status DataReplicator::resume(bool wait) {
- CBHStatus handle = _exec->scheduleWork(stdx::bind(&DataReplicator::_resumeFinish,
- this,
- stdx::placeholders::_1));
- const Status status = handle.getStatus();
- if (wait && status.isOK()) {
- _exec->wait(handle.getValue());
- }
- return status;
- }
+void DataReplicator::_pauseApplier() {
+ LockGuard lk(_mutex);
+ if (_applier)
+ _applier->wait();
+ _applierPaused = true;
+ _applier.reset();
+}
- void DataReplicator::_resumeFinish(CallbackArgs cbData) {
- UniqueLock lk(_mutex);
- _fetcherPaused = _applierPaused = false;
- lk.unlock();
+Timestamp DataReplicator::_applyUntil(Timestamp untilTimestamp) {
+ // TODO: block until all oplog buffer application is done to the given optime
+ return Timestamp();
+}
- _doNextActions();
- }
+Timestamp DataReplicator::_applyUntilAndPause(Timestamp untilTimestamp) {
+ //_run(&_pauseApplier);
+ return _applyUntil(untilTimestamp);
+}
- void DataReplicator::_pauseApplier() {
- LockGuard lk(_mutex);
- if (_applier)
- _applier->wait();
+TimestampStatus DataReplicator::flushAndPause() {
+ //_run(&_pauseApplier);
+ UniqueLock lk(_mutex);
+ if (_applierActive) {
_applierPaused = true;
- _applier.reset();
+ lk.unlock();
+ _applier->wait();
+ lk.lock();
}
+ return TimestampStatus(_lastTimestampApplied);
+}
- Timestamp DataReplicator::_applyUntil(Timestamp untilTimestamp) {
- // TODO: block until all oplog buffer application is done to the given optime
- return Timestamp();
+void DataReplicator::_resetState_inlock(Timestamp lastAppliedOptime) {
+ invariant(!_anyActiveHandles_inlock());
+ _lastTimestampApplied = _lastTimestampFetched = lastAppliedOptime;
+ _oplogBuffer.clear();
+}
+
+void DataReplicator::slavesHaveProgressed() {
+ if (_reporter) {
+ _reporter->trigger();
}
+}
- Timestamp DataReplicator::_applyUntilAndPause(Timestamp untilTimestamp) {
- //_run(&_pauseApplier);
- return _applyUntil(untilTimestamp);
+void DataReplicator::_setInitialSyncStorageInterface(CollectionCloner::StorageInterface* si) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _storage = si;
+ if (_initialSyncState) {
+ _initialSyncState->dbsCloner.setStorageInterface(_storage);
}
+}
- TimestampStatus DataReplicator::flushAndPause() {
- //_run(&_pauseApplier);
- UniqueLock lk(_mutex);
- if (_applierActive) {
- _applierPaused = true;
- lk.unlock();
- _applier->wait();
- lk.lock();
- }
- return TimestampStatus(_lastTimestampApplied);
+TimestampStatus DataReplicator::resync() {
+ _shutdown();
+ // Drop databases and do initialSync();
+ CBHStatus cbh = _exec->scheduleDBWork(
+ [&](const CallbackArgs& cbData) { _storage->dropUserDatabases(cbData.txn); });
+
+ if (!cbh.isOK()) {
+ return TimestampStatus(cbh.getStatus());
}
- void DataReplicator::_resetState_inlock(Timestamp lastAppliedOptime) {
- invariant(!_anyActiveHandles_inlock());
- _lastTimestampApplied = _lastTimestampFetched = lastAppliedOptime;
- _oplogBuffer.clear();
- }
+ _exec->wait(cbh.getValue());
- void DataReplicator::slavesHaveProgressed() {
- if (_reporter) {
- _reporter->trigger();
- }
+ TimestampStatus status = initialSync();
+ if (status.isOK()) {
+ _resetState_inlock(status.getValue());
}
+ return status;
+}
- void DataReplicator::_setInitialSyncStorageInterface(CollectionCloner::StorageInterface* si) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- _storage = si;
- if (_initialSyncState) {
- _initialSyncState->dbsCloner.setStorageInterface(_storage);
+TimestampStatus DataReplicator::initialSync() {
+ Timer t;
+ UniqueLock lk(_mutex);
+ if (_state != DataReplicatorState::Uninitialized) {
+ if (_state == DataReplicatorState::InitialSync)
+ return TimestampStatus(ErrorCodes::InvalidRoleModification,
+ (str::stream() << "Already doing initial sync;try resync"));
+ else {
+ return TimestampStatus(
+ ErrorCodes::AlreadyInitialized,
+ (str::stream() << "Cannot do initial sync in " << toString(_state) << " state."));
}
}
- TimestampStatus DataReplicator::resync() {
- _shutdown();
- // Drop databases and do initialSync();
- CBHStatus cbh = _exec->scheduleDBWork([&](const CallbackArgs& cbData) {
- _storage->dropUserDatabases(cbData.txn);
- });
+ _state = DataReplicatorState::InitialSync;
- if (!cbh.isOK()) {
- return TimestampStatus(cbh.getStatus());
- }
+ // The reporter is paused for the duration of the initial sync, so cancel just in case.
+ if (_reporter) {
+ _reporter->cancel();
+ }
+ _reporterPaused = true;
+ _applierPaused = true;
- _exec->wait(cbh.getValue());
+ // TODO: set minvalid doc initial sync state.
- TimestampStatus status = initialSync();
- if (status.isOK()) {
- _resetState_inlock(status.getValue());
+ const int maxFailedAttempts = 10;
+ int failedAttempts = 0;
+ Status attemptErrorStatus(Status::OK());
+ while (failedAttempts < maxFailedAttempts) {
+ // For testing, we may want to fail if we receive a getmore.
+ if (MONGO_FAIL_POINT(failInitialSyncWithBadHost)) {
+ attemptErrorStatus = Status(ErrorCodes::InvalidSyncSource, "no sync source avail.");
}
- return status;
- }
-
- TimestampStatus DataReplicator::initialSync() {
- Timer t;
- UniqueLock lk(_mutex);
- if (_state != DataReplicatorState::Uninitialized) {
- if (_state == DataReplicatorState::InitialSync)
- return TimestampStatus(ErrorCodes::InvalidRoleModification,
- (str::stream() << "Already doing initial sync;try resync"));
- else {
- return TimestampStatus(ErrorCodes::AlreadyInitialized,
- (str::stream() << "Cannot do initial sync in "
- << toString(_state) << " state."));
+
+ Event initialSyncFinishEvent;
+ if (attemptErrorStatus.isOK() && _syncSource.empty()) {
+ attemptErrorStatus = _ensureGoodSyncSource_inlock();
+ } else if (attemptErrorStatus.isOK()) {
+ StatusWith<Event> status = _exec->makeEvent();
+ if (!status.isOK()) {
+ attemptErrorStatus = status.getStatus();
+ } else {
+ initialSyncFinishEvent = status.getValue();
}
}
- _state = DataReplicatorState::InitialSync;
+ if (attemptErrorStatus.isOK()) {
+ invariant(initialSyncFinishEvent.isValid());
+ _initialSyncState.reset(new InitialSyncState(
+ DatabasesCloner(
+ _exec,
+ _syncSource,
+ stdx::bind(&DataReplicator::_onDataClonerFinish, this, stdx::placeholders::_1)),
+ initialSyncFinishEvent));
- // The reporter is paused for the duration of the initial sync, so cancel just in case.
- if (_reporter) {
- _reporter->cancel();
+ _initialSyncState->dbsCloner.setStorageInterface(_storage);
+ const NamespaceString ns(_opts.remoteOplogNS);
+ TimestampStatus tsStatus =
+ _initialSyncState->getLatestOplogTimestamp(_exec, _syncSource, ns);
+ attemptErrorStatus = tsStatus.getStatus();
+ if (attemptErrorStatus.isOK()) {
+ _initialSyncState->beginTimestamp = tsStatus.getValue();
+ _fetcher.reset(new OplogFetcher(_exec,
+ _initialSyncState->beginTimestamp,
+ _syncSource,
+ _opts.remoteOplogNS,
+ stdx::bind(&DataReplicator::_onOplogFetchFinish,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2)));
+ _scheduleFetch_inlock();
+ lk.unlock();
+ _initialSyncState->dbsCloner.start(); // When the cloner is done applier starts.
+ invariant(_initialSyncState->finishEvent.isValid());
+ _exec->waitForEvent(_initialSyncState->finishEvent);
+ attemptErrorStatus = _initialSyncState->status;
+
+ // Re-lock DataReplicator Internals
+ lk.lock();
+ }
}
- _reporterPaused = true;
- _applierPaused = true;
- // TODO: set minvalid doc initial sync state.
+ if (attemptErrorStatus.isOK()) {
+ break; // success
+ }
- const int maxFailedAttempts = 10;
- int failedAttempts = 0;
- Status attemptErrorStatus(Status::OK());
- while (failedAttempts < maxFailedAttempts) {
- // For testing, we may want to fail if we receive a getmore.
- if (MONGO_FAIL_POINT(failInitialSyncWithBadHost)) {
- attemptErrorStatus = Status(ErrorCodes::InvalidSyncSource, "no sync source avail.");
- }
+ ++failedAttempts;
- Event initialSyncFinishEvent;
- if (attemptErrorStatus.isOK() && _syncSource.empty()) {
- attemptErrorStatus = _ensureGoodSyncSource_inlock();
- }
- else if(attemptErrorStatus.isOK()) {
- StatusWith<Event> status = _exec->makeEvent();
- if (!status.isOK()) {
- attemptErrorStatus = status.getStatus();
- } else {
- initialSyncFinishEvent = status.getValue();
- }
- }
+ error() << "Initial sync attempt failed -- attempts left: "
+ << (maxFailedAttempts - failedAttempts) << " cause: " << attemptErrorStatus;
- if (attemptErrorStatus.isOK()) {
- invariant(initialSyncFinishEvent.isValid());
- _initialSyncState.reset(new InitialSyncState(
- DatabasesCloner(_exec,
- _syncSource,
- stdx::bind(&DataReplicator::_onDataClonerFinish,
- this,
- stdx::placeholders::_1)),
- initialSyncFinishEvent));
-
- _initialSyncState->dbsCloner.setStorageInterface(_storage);
- const NamespaceString ns(_opts.remoteOplogNS);
- TimestampStatus tsStatus = _initialSyncState->getLatestOplogTimestamp(
- _exec,
- _syncSource,
- ns);
- attemptErrorStatus = tsStatus.getStatus();
- if (attemptErrorStatus.isOK()) {
- _initialSyncState->beginTimestamp = tsStatus.getValue();
- _fetcher.reset(new OplogFetcher(_exec,
- _initialSyncState->beginTimestamp,
- _syncSource,
- _opts.remoteOplogNS,
- stdx::bind(&DataReplicator::_onOplogFetchFinish,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2)));
- _scheduleFetch_inlock();
- lk.unlock();
- _initialSyncState->dbsCloner.start(); // When the cloner is done applier starts.
- invariant(_initialSyncState->finishEvent.isValid());
- _exec->waitForEvent(_initialSyncState->finishEvent);
- attemptErrorStatus = _initialSyncState->status;
-
- // Re-lock DataReplicator Internals
- lk.lock();
- }
- }
+ // Sleep for retry time
+ lk.unlock();
+ sleepmillis(_opts.initialSyncRetryWait.count());
+ lk.lock();
- if (attemptErrorStatus.isOK()) {
- break; // success
- }
+ // No need to print a stack
+ if (failedAttempts >= maxFailedAttempts) {
+ const std::string err =
+ "The maximum number of retries"
+ " have been exhausted for initial sync.";
+ severe() << err;
+ return Status(ErrorCodes::InitialSyncFailure, err);
+ }
+ }
- ++failedAttempts;
+ // Success, cleanup
+ // TODO: re-enable, find blocking call from tests
+ /*
+ _cancelAllHandles_inlock();
+ _waitOnAll_inlock();
- error() << "Initial sync attempt failed -- attempts left: "
- << (maxFailedAttempts - failedAttempts) << " cause: "
- << attemptErrorStatus;
+ _reporterPaused = false;
+ _fetcherPaused = false;
+ _fetcher.reset(nullptr);
+ _tmpFetcher.reset(nullptr);
+ _applierPaused = false;
+ _applier.reset(nullptr);
+ _applierActive = false;
+ _initialSyncState.reset(nullptr);
+ _oplogBuffer.clear();
+ _resetState_inlock(_lastTimestampApplied);
+ */
+ log() << "Initial sync took: " << t.millis() << " milliseconds.";
+ return TimestampStatus(_lastTimestampApplied);
+}
- // Sleep for retry time
- lk.unlock();
- sleepmillis(_opts.initialSyncRetryWait.count());
- lk.lock();
+void DataReplicator::_onDataClonerFinish(const Status& status) {
+ log() << "data clone finished, status: " << status.toString();
+ if (!status.isOK()) {
+ // Iniitial sync failed during cloning of databases
+ _initialSyncState->setStatus(status);
+ _exec->signalEvent(_initialSyncState->finishEvent);
+ return;
+ }
- // No need to print a stack
- if (failedAttempts >= maxFailedAttempts) {
- const std::string err = "The maximum number of retries"
- " have been exhausted for initial sync.";
- severe() << err;
- return Status(ErrorCodes::InitialSyncFailure, err);
- }
+ BSONObj query = BSON("find" << _opts.remoteOplogNS.coll() << "sort" << BSON("$natural" << -1)
+ << "limit" << 1);
+
+ TimestampStatus timestampStatus(ErrorCodes::BadValue, "");
+ _tmpFetcher.reset(new QueryFetcher(_exec,
+ _syncSource,
+ _opts.remoteOplogNS,
+ query,
+ stdx::bind(&DataReplicator::_onApplierReadyStart,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2)));
+ Status s = _tmpFetcher->schedule();
+ if (!s.isOK()) {
+ _initialSyncState->setStatus(s);
+ }
+}
+
+void DataReplicator::_onApplierReadyStart(const QueryResponseStatus& fetchResult,
+ NextAction* nextAction) {
+ // Data clone done, move onto apply.
+ TimestampStatus ts(ErrorCodes::OplogStartMissing, "");
+ _initialSyncState->_setTimestampStatus(fetchResult, nextAction, &ts);
+ if (ts.isOK()) {
+ // TODO: set minvalid?
+ LockGuard lk(_mutex);
+ _initialSyncState->stopTimestamp = ts.getValue();
+ if (_lastTimestampApplied < ts.getValue()) {
+ log() << "waiting for applier to run until ts: " << ts.getValue();
}
+ invariant(_applierPaused);
+ _applierPaused = false;
+ _doNextActions_InitialSync_inlock();
+ } else {
+ _initialSyncState->setStatus(ts.getStatus());
+ _doNextActions();
+ }
+}
- // Success, cleanup
- // TODO: re-enable, find blocking call from tests
-/*
- _cancelAllHandles_inlock();
- _waitOnAll_inlock();
+bool DataReplicator::_anyActiveHandles_inlock() const {
+ return _applierActive || (_fetcher && _fetcher->isActive()) ||
+ (_initialSyncState && _initialSyncState->dbsCloner.isActive()) ||
+ (_reporter && _reporter->isActive());
+}
- _reporterPaused = false;
- _fetcherPaused = false;
- _fetcher.reset(nullptr);
- _tmpFetcher.reset(nullptr);
- _applierPaused = false;
- _applier.reset(nullptr);
- _applierActive = false;
- _initialSyncState.reset(nullptr);
- _oplogBuffer.clear();
- _resetState_inlock(_lastTimestampApplied);
-*/
- log() << "Initial sync took: " << t.millis() << " milliseconds.";
- return TimestampStatus(_lastTimestampApplied);
- }
-
- void DataReplicator::_onDataClonerFinish(const Status& status) {
- log() << "data clone finished, status: " << status.toString();
- if (!status.isOK()) {
- // Iniitial sync failed during cloning of databases
- _initialSyncState->setStatus(status);
- _exec->signalEvent(_initialSyncState->finishEvent);
- return;
- }
+void DataReplicator::_cancelAllHandles_inlock() {
+ if (_fetcher)
+ _fetcher->cancel();
+ if (_applier)
+ _applier->cancel();
+ if (_reporter)
+ _reporter->cancel();
+ if (_initialSyncState && _initialSyncState->dbsCloner.isActive())
+ _initialSyncState->dbsCloner.cancel();
+}
- BSONObj query = BSON("find" << _opts.remoteOplogNS.coll() <<
- "sort" << BSON ("$natural" << -1) <<
- "limit" << 1);
-
- TimestampStatus timestampStatus(ErrorCodes::BadValue, "");
- _tmpFetcher.reset(new QueryFetcher(_exec,
- _syncSource,
- _opts.remoteOplogNS,
- query,
- stdx::bind(&DataReplicator::_onApplierReadyStart,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2)));
- Status s = _tmpFetcher->schedule();
- if (!s.isOK()) {
- _initialSyncState->setStatus(s);
- }
- }
+void DataReplicator::_waitOnAll_inlock() {
+ if (_fetcher)
+ _fetcher->wait();
+ if (_applier)
+ _applier->wait();
+ if (_reporter)
+ _reporter->wait();
+ if (_initialSyncState)
+ _initialSyncState->dbsCloner.wait();
+}
- void DataReplicator::_onApplierReadyStart(const QueryResponseStatus& fetchResult,
- NextAction* nextAction) {
- // Data clone done, move onto apply.
- TimestampStatus ts(ErrorCodes::OplogStartMissing, "");
- _initialSyncState->_setTimestampStatus(fetchResult, nextAction, &ts);
- if (ts.isOK()) {
- // TODO: set minvalid?
- LockGuard lk(_mutex);
- _initialSyncState->stopTimestamp = ts.getValue();
- if (_lastTimestampApplied < ts.getValue()) {
- log() << "waiting for applier to run until ts: " << ts.getValue();
- }
- invariant(_applierPaused);
- _applierPaused = false;
- _doNextActions_InitialSync_inlock();
- }
- else {
- _initialSyncState->setStatus(ts.getStatus());
- _doNextActions();
+void DataReplicator::_doNextActions() {
+ // Can be in one of 3 main states/modes (DataReplicatiorState):
+ // 1.) Initial Sync
+ // 2.) Rollback
+ // 3.) Steady (Replication)
+
+ // Check for shutdown flag, signal event
+ LockGuard lk(_mutex);
+ if (_onShutdown.isValid()) {
+ if (!_anyActiveHandles_inlock()) {
+ _exec->signalEvent(_onShutdown);
+ _state = DataReplicatorState::Uninitialized;
}
+ return;
}
- bool DataReplicator::_anyActiveHandles_inlock() const {
- return _applierActive ||
- (_fetcher && _fetcher->isActive()) ||
- (_initialSyncState && _initialSyncState->dbsCloner.isActive()) ||
- (_reporter && _reporter->isActive());
+ // Do work for the current state
+ switch (_state) {
+ case DataReplicatorState::Rollback:
+ _doNextActions_Rollback_inlock();
+ break;
+ case DataReplicatorState::InitialSync:
+ _doNextActions_InitialSync_inlock();
+ break;
+ case DataReplicatorState::Steady:
+ _doNextActions_Steady_inlock();
+ break;
+ default:
+ return;
}
- void DataReplicator::_cancelAllHandles_inlock() {
- if (_fetcher)
- _fetcher->cancel();
- if (_applier)
- _applier->cancel();
- if (_reporter)
- _reporter->cancel();
- if (_initialSyncState && _initialSyncState->dbsCloner.isActive())
- _initialSyncState->dbsCloner.cancel();
- }
+ // transition when needed
+ _changeStateIfNeeded();
+}
- void DataReplicator::_waitOnAll_inlock() {
- if (_fetcher)
- _fetcher->wait();
- if (_applier)
- _applier->wait();
- if (_reporter)
- _reporter->wait();
- if (_initialSyncState)
- _initialSyncState->dbsCloner.wait();
+void DataReplicator::_doNextActions_InitialSync_inlock() {
+ if (!_initialSyncState) {
+ // TODO: Error case?, reset to uninit'd
+ _state = DataReplicatorState::Uninitialized;
+ log() << "_initialSyncState, so resetting state to Uninitialized";
+ return;
}
- void DataReplicator::_doNextActions() {
- // Can be in one of 3 main states/modes (DataReplicatiorState):
- // 1.) Initial Sync
- // 2.) Rollback
- // 3.) Steady (Replication)
-
- // Check for shutdown flag, signal event
- LockGuard lk(_mutex);
- if (_onShutdown.isValid()) {
- if(!_anyActiveHandles_inlock()) {
- _exec->signalEvent(_onShutdown);
+ if (!_initialSyncState->dbsCloner.isActive()) {
+ if (!_initialSyncState->dbsCloner.getStatus().isOK()) {
+ // TODO: Initial sync failed
+ } else {
+ if (!_lastTimestampApplied.isNull() &&
+ _lastTimestampApplied >= _initialSyncState->stopTimestamp) {
+ invariant(_initialSyncState->finishEvent.isValid());
+ log() << "Applier done, initial sync done, end timestamp: "
+ << _initialSyncState->stopTimestamp
+ << " , last applier: " << _lastTimestampApplied;
_state = DataReplicatorState::Uninitialized;
- }
- return;
- }
-
- // Do work for the current state
- switch (_state) {
- case DataReplicatorState::Rollback:
- _doNextActions_Rollback_inlock();
- break;
- case DataReplicatorState::InitialSync:
- _doNextActions_InitialSync_inlock();
- break;
- case DataReplicatorState::Steady:
+ _initialSyncState->setStatus(Status::OK());
+ _exec->signalEvent(_initialSyncState->finishEvent);
+ } else {
+ // Run steady state events to fetch/apply.
_doNextActions_Steady_inlock();
- break;
- default:
- return;
+ }
}
-
- // transition when needed
- _changeStateIfNeeded();
}
+}
- void DataReplicator::_doNextActions_InitialSync_inlock() {
- if (!_initialSyncState) {
- // TODO: Error case?, reset to uninit'd
- _state = DataReplicatorState::Uninitialized;
- log() << "_initialSyncState, so resetting state to Uninitialized";
- return;
- }
+void DataReplicator::_doNextActions_Rollback_inlock() {
+ // TODO: check rollback state and do next actions
+ // move from rollback phase to rollback phase via scheduled work in exec
+}
- if (!_initialSyncState->dbsCloner.isActive()) {
- if (!_initialSyncState->dbsCloner.getStatus().isOK()) {
- // TODO: Initial sync failed
- }
- else {
- if (!_lastTimestampApplied.isNull() &&
- _lastTimestampApplied >= _initialSyncState->stopTimestamp) {
- invariant(_initialSyncState->finishEvent.isValid());
- log() << "Applier done, initial sync done, end timestamp: "
- << _initialSyncState->stopTimestamp << " , last applier: "
- << _lastTimestampApplied;
- _state = DataReplicatorState::Uninitialized;
- _initialSyncState->setStatus(Status::OK());
- _exec->signalEvent(_initialSyncState->finishEvent);
- }
- else {
- // Run steady state events to fetch/apply.
- _doNextActions_Steady_inlock();
- }
+void DataReplicator::_doNextActions_Steady_inlock() {
+ // Check sync source is still good.
+ if (_syncSource.empty()) {
+ _syncSource = _replCoord->chooseNewSyncSource();
+ }
+ if (_syncSource.empty()) {
+ // No sync source, reschedule check
+ Date_t when = _exec->now() + _opts.syncSourceRetryWait;
+ // schedule self-callback w/executor
+ // to try to get a new sync source in a bit
+ auto checkSyncSource = [this](const executor::TaskExecutor::CallbackArgs& cba) {
+ if (cba.status.code() == ErrorCodes::CallbackCanceled) {
+ return;
}
+ _doNextActions();
+ };
+ _exec->scheduleWorkAt(when, checkSyncSource);
+ } else {
+ // Check if active fetch, if not start one
+ if (!_fetcher || !_fetcher->isActive()) {
+ _scheduleFetch_inlock();
}
}
- void DataReplicator::_doNextActions_Rollback_inlock() {
- // TODO: check rollback state and do next actions
- // move from rollback phase to rollback phase via scheduled work in exec
+ // Check if no active apply and ops to apply
+ if (!_applierActive && _oplogBuffer.size()) {
+ _scheduleApplyBatch_inlock();
}
- void DataReplicator::_doNextActions_Steady_inlock() {
- // Check sync source is still good.
- if (_syncSource.empty()) {
- _syncSource = _replCoord->chooseNewSyncSource();
- }
- if (_syncSource.empty()) {
- // No sync source, reschedule check
- Date_t when = _exec->now() + _opts.syncSourceRetryWait;
- // schedule self-callback w/executor
- // to try to get a new sync source in a bit
- auto checkSyncSource = [this] (const executor::TaskExecutor::CallbackArgs& cba) {
- if (cba.status.code() == ErrorCodes::CallbackCanceled) {
- return;
- }
- _doNextActions();
- };
- _exec->scheduleWorkAt(when, checkSyncSource);
- } else {
- // Check if active fetch, if not start one
- if (!_fetcher || !_fetcher->isActive()) {
- _scheduleFetch_inlock();
- }
- }
+ if (!_reporterPaused && (!_reporter || !_reporter->getStatus().isOK())) {
+ // TODO get reporter in good shape
+ _reporter.reset(new Reporter(_exec, _replCoord, _syncSource));
+ }
+}
- // Check if no active apply and ops to apply
- if (!_applierActive && _oplogBuffer.size()) {
- _scheduleApplyBatch_inlock();
- }
+Operations DataReplicator::_getNextApplierBatch_inlock() {
+ // Return a new batch of ops to apply.
+ // TODO: limit the batch like SyncTail::tryPopAndWaitForMore
+ Operations ops;
+ BSONObj op;
+ while (_oplogBuffer.tryPop(op)) {
+ ops.push_back(op);
+ }
+ return ops;
+}
- if (!_reporterPaused && (!_reporter || !_reporter->getStatus().isOK())) {
- // TODO get reporter in good shape
- _reporter.reset(new Reporter(_exec, _replCoord, _syncSource));
- }
+void DataReplicator::_onApplyBatchFinish(const CallbackArgs& cbData,
+ const TimestampStatus& ts,
+ const Operations& ops,
+ const size_t numApplied) {
+ invariant(cbData.status.isOK());
+ UniqueLock lk(_mutex);
+ if (_initialSyncState) {
+ _initialSyncState->appliedOps += numApplied;
+ }
+ if (!ts.isOK()) {
+ _handleFailedApplyBatch(ts, ops);
+ return;
}
- Operations DataReplicator::_getNextApplierBatch_inlock() {
- // Return a new batch of ops to apply.
- // TODO: limit the batch like SyncTail::tryPopAndWaitForMore
- Operations ops;
- BSONObj op;
- while(_oplogBuffer.tryPop(op)) {
- ops.push_back(op);
- }
- return ops;
+ _lastTimestampApplied = ts.getValue();
+ lk.unlock();
+
+ if (_batchCompletedFn) {
+ _batchCompletedFn(ts.getValue());
+ }
+ // TODO: move the reporter to the replication coordinator and set _batchCompletedFn to a
+ // function in the replCoord.
+ if (_reporter) {
+ _reporter->trigger();
}
- void DataReplicator::_onApplyBatchFinish(const CallbackArgs& cbData,
- const TimestampStatus& ts,
- const Operations& ops,
- const size_t numApplied) {
- invariant(cbData.status.isOK());
- UniqueLock lk(_mutex);
- if (_initialSyncState) {
- _initialSyncState->appliedOps += numApplied;
- }
- if (!ts.isOK()) {
- _handleFailedApplyBatch(ts, ops);
- return;
- }
+ _doNextActions();
+}
- _lastTimestampApplied = ts.getValue();
- lk.unlock();
+void DataReplicator::_handleFailedApplyBatch(const TimestampStatus& ts, const Operations& ops) {
+ switch (_state) {
+ case DataReplicatorState::InitialSync:
+ // TODO: fetch missing doc, and retry.
+ _scheduleApplyAfterFetch(ops);
+ break;
+ case DataReplicatorState::Rollback:
+ // TODO: nothing?
+ default:
+ // fatal
+ fassert(28666, ts.getStatus());
+ }
+}
- if (_batchCompletedFn) {
- _batchCompletedFn(ts.getValue());
- }
- // TODO: move the reporter to the replication coordinator and set _batchCompletedFn to a
- // function in the replCoord.
- if (_reporter) {
- _reporter->trigger();
- }
+void DataReplicator::_scheduleApplyAfterFetch(const Operations& ops) {
+ ++_initialSyncState->fetchedMissingDocs;
+ // TODO: check collection.isCapped, like SyncTail::getMissingDoc
+ const BSONObj failedOplogEntry = *ops.begin();
+ const BSONElement missingIdElem = failedOplogEntry.getFieldDotted("o2._id");
+ const NamespaceString nss(ops.begin()->getField("ns").str());
+ const BSONObj query = BSON("find" << nss.coll() << "filter" << missingIdElem.wrap());
+ _tmpFetcher.reset(new QueryFetcher(_exec,
+ _syncSource,
+ nss,
+ query,
+ stdx::bind(&DataReplicator::_onMissingFetched,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ ops,
+ nss)));
+ Status s = _tmpFetcher->schedule();
+ if (!s.isOK()) {
+ // record error and take next step based on it.
+ _initialSyncState->setStatus(s);
+ _doNextActions();
+ }
+}
+void DataReplicator::_onMissingFetched(const QueryResponseStatus& fetchResult,
+ Fetcher::NextAction* nextAction,
+ const Operations& ops,
+ const NamespaceString nss) {
+ if (!fetchResult.isOK()) {
+ // TODO: do retries on network issues, like SyncTail::getMissingDoc
+ _initialSyncState->setStatus(fetchResult.getStatus());
_doNextActions();
+ return;
+ } else if (!fetchResult.getValue().documents.size()) {
+ // TODO: skip apply for this doc, like multiInitialSyncApply?
+ _initialSyncState->setStatus(
+ Status(ErrorCodes::InitialSyncFailure, "missing doc not found"));
+ _doNextActions();
+ return;
}
- void DataReplicator::_handleFailedApplyBatch(const TimestampStatus& ts, const Operations& ops) {
- switch (_state) {
- case DataReplicatorState::InitialSync:
- // TODO: fetch missing doc, and retry.
- _scheduleApplyAfterFetch(ops);
- break;
- case DataReplicatorState::Rollback:
- // TODO: nothing?
- default:
- // fatal
- fassert(28666, ts.getStatus());
- }
+ const BSONObj missingDoc = *fetchResult.getValue().documents.begin();
+ Status rs{Status::OK()};
+ auto s = _exec->scheduleDBWork(
+ ([&](const CallbackArgs& cd) { rs = _storage->insertMissingDoc(cd.txn, nss, missingDoc); }),
+ nss,
+ MODE_IX);
+ if (!s.isOK()) {
+ _initialSyncState->setStatus(s);
+ _doNextActions();
+ return;
}
- void DataReplicator::_scheduleApplyAfterFetch(const Operations& ops) {
- ++_initialSyncState->fetchedMissingDocs;
- // TODO: check collection.isCapped, like SyncTail::getMissingDoc
- const BSONObj failedOplogEntry = *ops.begin();
- const BSONElement missingIdElem = failedOplogEntry.getFieldDotted("o2._id");
- const NamespaceString nss(ops.begin()->getField("ns").str());
- const BSONObj query = BSON("find" << nss.coll() << "filter" << missingIdElem.wrap());
- _tmpFetcher.reset(new QueryFetcher(_exec, _syncSource, nss, query,
- stdx::bind(&DataReplicator::_onMissingFetched,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2,
- ops,
- nss)));
- Status s = _tmpFetcher->schedule();
- if (!s.isOK()) {
- // record error and take next step based on it.
- _initialSyncState->setStatus(s);
- _doNextActions();
- }
+ _exec->wait(s.getValue());
+ if (!rs.isOK()) {
+ _initialSyncState->setStatus(rs);
+ _doNextActions();
+ return;
}
- void DataReplicator::_onMissingFetched(const QueryResponseStatus& fetchResult,
- Fetcher::NextAction* nextAction,
- const Operations& ops,
- const NamespaceString nss) {
- if (!fetchResult.isOK()) {
- // TODO: do retries on network issues, like SyncTail::getMissingDoc
- _initialSyncState->setStatus(fetchResult.getStatus());
- _doNextActions();
- return;
- } else if (!fetchResult.getValue().documents.size()) {
- // TODO: skip apply for this doc, like multiInitialSyncApply?
- _initialSyncState->setStatus(Status(ErrorCodes::InitialSyncFailure,
- "missing doc not found"));
- _doNextActions();
- return;
- }
+ LockGuard lk(_mutex);
+ auto status = _scheduleApplyBatch_inlock(ops);
+ if (!status.isOK()) {
+ _initialSyncState->setStatus(status);
+ _exec->signalEvent(_initialSyncState->finishEvent);
+ }
+}
- const BSONObj missingDoc = *fetchResult.getValue().documents.begin();
- Status rs{Status::OK()};
- auto s = _exec->scheduleDBWork(([&](const CallbackArgs& cd) {
- rs = _storage->insertMissingDoc(cd.txn, nss, missingDoc);
- }),
- nss,
- MODE_IX);
- if (!s.isOK()) {
- _initialSyncState->setStatus(s);
- _doNextActions();
- return;
- }
+Status DataReplicator::_scheduleApplyBatch() {
+ LockGuard lk(_mutex);
+ return _scheduleApplyBatch_inlock();
+}
- _exec->wait(s.getValue());
- if (!rs.isOK()) {
- _initialSyncState->setStatus(rs);
- _doNextActions();
- return;
- }
+Status DataReplicator::_scheduleApplyBatch_inlock() {
+ if (!_applierPaused && !_applierActive) {
+ _applierActive = true;
+ const Operations ops = _getNextApplierBatch_inlock();
+ invariant(ops.size());
+ invariant(_opts.applierFn);
+ invariant(!(_applier && _applier->isActive()));
+ return _scheduleApplyBatch_inlock(ops);
+ }
+ return Status::OK();
+}
- LockGuard lk(_mutex);
- auto status = _scheduleApplyBatch_inlock(ops);
+Status DataReplicator::_scheduleApplyBatch_inlock(const Operations& ops) {
+ auto lambda = [this](const TimestampStatus& ts, const Operations& theOps) {
+ CBHStatus status = _exec->scheduleWork(stdx::bind(&DataReplicator::_onApplyBatchFinish,
+ this,
+ stdx::placeholders::_1,
+ ts,
+ theOps,
+ theOps.size()));
if (!status.isOK()) {
+ LockGuard lk(_mutex);
_initialSyncState->setStatus(status);
_exec->signalEvent(_initialSyncState->finishEvent);
+ return;
}
- }
+ // block until callback done.
+ _exec->wait(status.getValue());
+ };
- Status DataReplicator::_scheduleApplyBatch() {
- LockGuard lk(_mutex);
- return _scheduleApplyBatch_inlock();
- }
+ _applier.reset(new Applier(_exec, ops, _opts.applierFn, lambda));
+ return _applier->start();
+}
- Status DataReplicator::_scheduleApplyBatch_inlock() {
- if (!_applierPaused && !_applierActive) {
- _applierActive = true;
- const Operations ops = _getNextApplierBatch_inlock();
- invariant(ops.size());
- invariant(_opts.applierFn);
- invariant(!(_applier && _applier->isActive()));
- return _scheduleApplyBatch_inlock(ops);
+Status DataReplicator::_scheduleFetch() {
+ LockGuard lk(_mutex);
+ return _scheduleFetch_inlock();
+}
+
+Status DataReplicator::_ensureGoodSyncSource_inlock() {
+ if (_syncSource.empty()) {
+ if (_replCoord) {
+ _syncSource = _replCoord->chooseNewSyncSource();
+ if (!_syncSource.empty()) {
+ return Status::OK();
+ }
+ } else {
+ _syncSource = _opts.syncSource; // set this back to the options source
}
- return Status::OK();
- }
- Status DataReplicator::_scheduleApplyBatch_inlock(const Operations& ops) {
- auto lambda = [this] (const TimestampStatus& ts, const Operations& theOps) {
- CBHStatus status = _exec->scheduleWork(stdx::bind(&DataReplicator::_onApplyBatchFinish,
- this,
- stdx::placeholders::_1,
- ts,
- theOps,
- theOps.size()));
- if (!status.isOK()) {
- LockGuard lk(_mutex);
- _initialSyncState->setStatus(status);
- _exec->signalEvent(_initialSyncState->finishEvent);
- return;
- }
- // block until callback done.
- _exec->wait(status.getValue());
- };
-
- _applier.reset(new Applier(_exec, ops, _opts.applierFn, lambda));
- return _applier->start();
- }
-
- Status DataReplicator::_scheduleFetch() {
- LockGuard lk(_mutex);
- return _scheduleFetch_inlock();
+ return Status{ErrorCodes::InvalidSyncSource, "No valid sync source."};
}
+ return Status::OK();
+}
- Status DataReplicator::_ensureGoodSyncSource_inlock() {
- if (_syncSource.empty()) {
- if (_replCoord) {
- _syncSource = _replCoord->chooseNewSyncSource();
- if (!_syncSource.empty()) {
- return Status::OK();
- }
- } else {
- _syncSource = _opts.syncSource; // set this back to the options source
+Status DataReplicator::_scheduleFetch_inlock() {
+ if (!_fetcher) {
+ if (!_ensureGoodSyncSource_inlock().isOK()) {
+ auto status = _exec->scheduleWork([this](const CallbackArgs&) { _doNextActions(); });
+ if (!status.isOK()) {
+ return status.getStatus();
}
+ }
- return Status{ErrorCodes::InvalidSyncSource, "No valid sync source."};
+ const auto startOptime =
+ _replCoord ? _replCoord->getMyLastOptime().getTimestamp() : _opts.startOptime;
+ const auto remoteOplogNS = _opts.remoteOplogNS;
+
+ // TODO: add query options await_data, oplog_replay
+ _fetcher.reset(new OplogFetcher(_exec,
+ startOptime,
+ _syncSource,
+ remoteOplogNS,
+ stdx::bind(&DataReplicator::_onOplogFetchFinish,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2)));
+ }
+ if (!_fetcher->isActive()) {
+ Status status = _fetcher->schedule();
+ if (!status.isOK()) {
+ return status;
}
- return Status::OK();
}
+ return Status::OK();
+}
- Status DataReplicator::_scheduleFetch_inlock() {
- if (!_fetcher) {
- if (!_ensureGoodSyncSource_inlock().isOK()) {
- auto status = _exec->scheduleWork([this](const CallbackArgs&){ _doNextActions(); });
- if (!status.isOK()) {
- return status.getStatus();
- }
- }
-
- const auto startOptime = _replCoord ? _replCoord->getMyLastOptime().getTimestamp()
- : _opts.startOptime;
- const auto remoteOplogNS = _opts.remoteOplogNS;
+Status DataReplicator::_scheduleReport() {
+ // TODO
+ return Status::OK();
+}
- // TODO: add query options await_data, oplog_replay
- _fetcher.reset(new OplogFetcher(_exec,
- startOptime,
- _syncSource,
- remoteOplogNS,
- stdx::bind(&DataReplicator::_onOplogFetchFinish,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2)));
+void DataReplicator::_changeStateIfNeeded() {
+ // TODO
+}
- }
- if (!_fetcher->isActive()) {
- Status status = _fetcher->schedule();
- if (!status.isOK()) {
- return status;
- }
- }
- return Status::OK();
+Status DataReplicator::scheduleShutdown() {
+ auto eventStatus = _exec->makeEvent();
+ if (!eventStatus.isOK()) {
+ return eventStatus.getStatus();
}
- Status DataReplicator::_scheduleReport() {
- // TODO
- return Status::OK();
+ {
+ LockGuard lk(_mutex);
+ invariant(!_onShutdown.isValid());
+ _onShutdown = eventStatus.getValue();
+ _cancelAllHandles_inlock();
}
- void DataReplicator::_changeStateIfNeeded() {
- // TODO
+ // Schedule _doNextActions in case nothing is active to trigger the _onShutdown event.
+ auto scheduleResult = _exec->scheduleWork([this](const CallbackArgs&) { _doNextActions(); });
+ if (scheduleResult.isOK()) {
+ return Status::OK();
}
+ return scheduleResult.getStatus();
+}
- Status DataReplicator::scheduleShutdown() {
- auto eventStatus = _exec->makeEvent();
- if (!eventStatus.isOK()) {
- return eventStatus.getStatus();
- }
-
- {
- LockGuard lk(_mutex);
- invariant(!_onShutdown.isValid());
- _onShutdown = eventStatus.getValue();
- _cancelAllHandles_inlock();
- }
-
- // Schedule _doNextActions in case nothing is active to trigger the _onShutdown event.
- auto scheduleResult = _exec->scheduleWork([this] (const CallbackArgs&) {
- _doNextActions();
- });
- if (scheduleResult.isOK()) {
- return Status::OK();
- }
- return scheduleResult.getStatus();
+void DataReplicator::waitForShutdown() {
+ Event onShutdown;
+ {
+ LockGuard lk(_mutex);
+ invariant(_onShutdown.isValid());
+ onShutdown = _onShutdown;
}
-
- void DataReplicator::waitForShutdown() {
- Event onShutdown;
- {
- LockGuard lk(_mutex);
- invariant(_onShutdown.isValid());
- onShutdown = _onShutdown;
- }
- _exec->waitForEvent(onShutdown);
- {
- LockGuard lk(_mutex);
- invariant(!_fetcher->isActive());
- invariant(!_applierActive);
- invariant(!_reporter->isActive());
- }
+ _exec->waitForEvent(onShutdown);
+ {
+ LockGuard lk(_mutex);
+ invariant(!_fetcher->isActive());
+ invariant(!_applierActive);
+ invariant(!_reporter->isActive());
}
+}
- Status DataReplicator::_shutdown() {
- auto status = scheduleShutdown();
- if (status.isOK()) {
- waitForShutdown();
- }
- return status;
+Status DataReplicator::_shutdown() {
+ auto status = scheduleShutdown();
+ if (status.isOK()) {
+ waitForShutdown();
}
+ return status;
+}
- void DataReplicator::_onOplogFetchFinish(const StatusWith<Fetcher::QueryResponse>& fetchResult,
- Fetcher::NextAction* nextAction) {
- const Status status = fetchResult.getStatus();
- if (status.code() == ErrorCodes::CallbackCanceled)
- return;
- if (status.isOK()) {
- const auto& docs = fetchResult.getValue().documents;
- if (docs.begin() != docs.end()) {
- LockGuard lk(_mutex);
- std::for_each(docs.cbegin(),
- docs.cend(),
- [&](const BSONObj& doc) {
- _oplogBuffer.push(doc);
- });
- auto doc = docs.rbegin();
- BSONElement tsElem(doc->getField("ts"));
- while(tsElem.eoo() && doc != docs.rend()) {
- tsElem = (doc++)->getField("ts");
- }
-
- if (!tsElem.eoo()) {
- _lastTimestampFetched = tsElem.timestamp();
- } else {
- warning() <<
- "Did not find a 'ts' timestamp field in any of the fetched documents";
- }
+void DataReplicator::_onOplogFetchFinish(const StatusWith<Fetcher::QueryResponse>& fetchResult,
+ Fetcher::NextAction* nextAction) {
+ const Status status = fetchResult.getStatus();
+ if (status.code() == ErrorCodes::CallbackCanceled)
+ return;
+ if (status.isOK()) {
+ const auto& docs = fetchResult.getValue().documents;
+ if (docs.begin() != docs.end()) {
+ LockGuard lk(_mutex);
+ std::for_each(
+ docs.cbegin(), docs.cend(), [&](const BSONObj& doc) { _oplogBuffer.push(doc); });
+ auto doc = docs.rbegin();
+ BSONElement tsElem(doc->getField("ts"));
+ while (tsElem.eoo() && doc != docs.rend()) {
+ tsElem = (doc++)->getField("ts");
}
- if (*nextAction == Fetcher::NextAction::kNoAction) {
- // TODO: create new fetcher?, with new query from where we left off -- d'tor fetcher
+ if (!tsElem.eoo()) {
+ _lastTimestampFetched = tsElem.timestamp();
+ } else {
+ warning() << "Did not find a 'ts' timestamp field in any of the fetched documents";
}
}
+ if (*nextAction == Fetcher::NextAction::kNoAction) {
+ // TODO: create new fetcher?, with new query from where we left off -- d'tor fetcher
+ }
+ }
- if (!status.isOK()) {
- // Got an error, now decide what to do...
- switch (status.code()) {
- case ErrorCodes::OplogStartMissing: {
- // possible rollback
- _rollbackCommonOptime = findCommonPoint(_syncSource, _lastTimestampApplied);
- if (_rollbackCommonOptime.isNull()) {
- auto s = _replCoord->setFollowerMode(MemberState::RS_RECOVERING);
- if (!s) {
- error() << "Failed to transition to RECOVERING when "
- "we couldn't find oplog start position ("
- << _fetcher->getStartTimestamp().toString()
- << ") from sync source: "
- << _syncSource.toString();
- }
- Date_t until{_exec->now() +
- _opts.blacklistSyncSourcePenaltyForOplogStartMissing};
- _replCoord->blacklistSyncSource(_syncSource, until);
+ if (!status.isOK()) {
+ // Got an error, now decide what to do...
+ switch (status.code()) {
+ case ErrorCodes::OplogStartMissing: {
+ // possible rollback
+ _rollbackCommonOptime = findCommonPoint(_syncSource, _lastTimestampApplied);
+ if (_rollbackCommonOptime.isNull()) {
+ auto s = _replCoord->setFollowerMode(MemberState::RS_RECOVERING);
+ if (!s) {
+ error() << "Failed to transition to RECOVERING when "
+ "we couldn't find oplog start position ("
+ << _fetcher->getStartTimestamp().toString()
+ << ") from sync source: " << _syncSource.toString();
}
- else {
- // TODO: cleanup state/restart -- set _lastApplied, and other stuff
- }
- break;
- }
- case ErrorCodes::InvalidSyncSource:
- // Error, sync source
- // fallthrough
- default:
Date_t until{_exec->now() +
- _opts.blacklistSyncSourcePenaltyForNetworkConnectionError};
+ _opts.blacklistSyncSourcePenaltyForOplogStartMissing};
_replCoord->blacklistSyncSource(_syncSource, until);
+ } else {
+ // TODO: cleanup state/restart -- set _lastApplied, and other stuff
+ }
+ break;
}
- LockGuard lk(_mutex);
- _syncSource = HostAndPort();
+ case ErrorCodes::InvalidSyncSource:
+ // Error, sync source
+ // fallthrough
+ default:
+ Date_t until{_exec->now() +
+ _opts.blacklistSyncSourcePenaltyForNetworkConnectionError};
+ _replCoord->blacklistSyncSource(_syncSource, until);
}
-
- _doNextActions();
+ LockGuard lk(_mutex);
+ _syncSource = HostAndPort();
}
-} // namespace repl
-} // namespace mongo
+
+ _doNextActions();
+}
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/data_replicator.h b/src/mongo/db/repl/data_replicator.h
index 8f1717db09a..3bfce3e9a93 100644
--- a/src/mongo/db/repl/data_replicator.h
+++ b/src/mongo/db/repl/data_replicator.h
@@ -72,7 +72,7 @@ struct InitialSyncState;
/** State for decision tree */
enum class DataReplicatorState {
- Steady, // Default
+ Steady, // Default
InitialSync,
Rollback,
Uninitialized,
@@ -81,11 +81,7 @@ enum class DataReplicatorState {
std::string toString(DataReplicatorState s);
// TBD -- ignore for now
-enum class DataReplicatorScope {
- ReplicateAll,
- ReplicateDB,
- ReplicateCollection
-};
+enum class DataReplicatorScope { ReplicateAll, ReplicateDB, ReplicateCollection };
struct DataReplicatorOptions {
// Error and retry values
@@ -103,12 +99,11 @@ struct DataReplicatorOptions {
DataReplicatorScope scope = DataReplicatorScope::ReplicateAll;
std::string scopeNS;
BSONObj filterCriteria;
- HostAndPort syncSource; // for use without replCoord -- maybe some kind of rsMonitor/interface
+ HostAndPort syncSource; // for use without replCoord -- maybe some kind of rsMonitor/interface
// TODO: replace with real applier function
- Applier::ApplyOperationFn applierFn = [] (OperationContext*, const BSONObj&) -> Status {
- return Status::OK();
- };
+ Applier::ApplyOperationFn applierFn =
+ [](OperationContext*, const BSONObj&) -> Status { return Status::OK(); };
std::string toString() const {
return str::stream() << "DataReplicatorOptions -- "
@@ -129,7 +124,7 @@ struct DataReplicatorOptions {
class DataReplicator {
public:
/** Function to call when a batch is applied. */
- using OnBatchCompleteFn = stdx::function<void (const Timestamp&)>;
+ using OnBatchCompleteFn = stdx::function<void(const Timestamp&)>;
DataReplicator(DataReplicatorOptions opts,
ReplicationExecutor* exec,
@@ -137,8 +132,7 @@ public:
/**
* Used by non-replication coordinator processes, like sharding.
*/
- DataReplicator(DataReplicatorOptions opts,
- ReplicationExecutor* exec);
+ DataReplicator(DataReplicatorOptions opts, ReplicationExecutor* exec);
/**
* Used for testing.
@@ -165,7 +159,7 @@ public:
void waitForShutdown();
// Resumes apply replication events from the oplog
- Status resume(bool wait=false);
+ Status resume(bool wait = false);
// Pauses replication and application
Status pause();
@@ -189,11 +183,12 @@ public:
// For testing only
void _resetState_inlock(Timestamp lastAppliedOptime);
- void __setSourceForTesting(HostAndPort src) { _syncSource = src; }
+ void __setSourceForTesting(HostAndPort src) {
+ _syncSource = src;
+ }
void _setInitialSyncStorageInterface(CollectionCloner::StorageInterface* si);
private:
-
// Returns OK when there is a good syncSource at _syncSource.
Status _ensureGoodSyncSource_inlock();
@@ -263,39 +258,39 @@ private:
// (I) Independently synchronized, see member variable comment.
// Protects member data of this ReplicationCoordinator.
- mutable stdx::mutex _mutex; // (S)
- DataReplicatorState _state; // (MX)
+ mutable stdx::mutex _mutex; // (S)
+ DataReplicatorState _state; // (MX)
// initial sync state
- std::unique_ptr<InitialSyncState> _initialSyncState; // (M)
- CollectionCloner::StorageInterface* _storage; // (M)
+ std::unique_ptr<InitialSyncState> _initialSyncState; // (M)
+ CollectionCloner::StorageInterface* _storage; // (M)
// set during scheduling and onFinish
- bool _fetcherPaused; // (X)
- std::unique_ptr<OplogFetcher> _fetcher; // (S)
- std::unique_ptr<QueryFetcher> _tmpFetcher; // (S)
+ bool _fetcherPaused; // (X)
+ std::unique_ptr<OplogFetcher> _fetcher; // (S)
+ std::unique_ptr<QueryFetcher> _tmpFetcher; // (S)
- bool _reporterPaused; // (M)
- Handle _reporterHandle; // (M)
- std::unique_ptr<Reporter> _reporter; // (M)
+ bool _reporterPaused; // (M)
+ Handle _reporterHandle; // (M)
+ std::unique_ptr<Reporter> _reporter; // (M)
- bool _applierActive; // (M)
- bool _applierPaused; // (X)
- std::unique_ptr<Applier> _applier; // (M)
- OnBatchCompleteFn _batchCompletedFn; // (M)
+ bool _applierActive; // (M)
+ bool _applierPaused; // (X)
+ std::unique_ptr<Applier> _applier; // (M)
+ OnBatchCompleteFn _batchCompletedFn; // (M)
- HostAndPort _syncSource; // (M)
- Timestamp _lastTimestampFetched; // (MX)
- Timestamp _lastTimestampApplied; // (MX)
- BlockingQueue<BSONObj> _oplogBuffer; // (M)
+ HostAndPort _syncSource; // (M)
+ Timestamp _lastTimestampFetched; // (MX)
+ Timestamp _lastTimestampApplied; // (MX)
+ BlockingQueue<BSONObj> _oplogBuffer; // (M)
// Shutdown
- Event _onShutdown; // (M)
+ Event _onShutdown; // (M)
// Rollback stuff
- Timestamp _rollbackCommonOptime; // (MX)
+ Timestamp _rollbackCommonOptime; // (MX)
};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/data_replicator_test.cpp b/src/mongo/db/repl/data_replicator_test.cpp
index 0b8aa6887f5..de8f2da9f12 100644
--- a/src/mongo/db/repl/data_replicator_test.cpp
+++ b/src/mongo/db/repl/data_replicator_test.cpp
@@ -53,599 +53,590 @@
#include "mongo/unittest/unittest.h"
namespace {
- using namespace mongo;
- using namespace mongo::repl;
- using executor::NetworkInterfaceMock;
- using LockGuard = stdx::lock_guard<stdx::mutex>;
- using UniqueLock = stdx::unique_lock<stdx::mutex>;
- using mutex = stdx::mutex;
-
- ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBson) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(configBson));
- ASSERT_OK(config.validate());
- return config;
+using namespace mongo;
+using namespace mongo::repl;
+using executor::NetworkInterfaceMock;
+using LockGuard = stdx::lock_guard<stdx::mutex>;
+using UniqueLock = stdx::unique_lock<stdx::mutex>;
+using mutex = stdx::mutex;
+
+ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBson) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(configBson));
+ ASSERT_OK(config.validate());
+ return config;
+}
+const HostAndPort target("localhost", -1);
+
+class DataReplicatorTest : public ReplicationExecutorTest {
+public:
+ DataReplicatorTest() {}
+
+ void postExecutorThreadLaunch() override{};
+
+ void reset() {
+ // clear/reset state
}
- const HostAndPort target("localhost", -1);
- class DataReplicatorTest : public ReplicationExecutorTest {
- public:
-
- DataReplicatorTest() {}
-
- void postExecutorThreadLaunch() override {};
-
- void reset() {
- // clear/reset state
- }
-
- void createDataReplicator(DataReplicatorOptions opts) {
- _dr.reset(new DataReplicator(opts, &(getExecutor()), _repl.get()));
- _dr->__setSourceForTesting(target);
- }
-
- void createDataReplicator(DataReplicatorOptions opts,
- DataReplicator::OnBatchCompleteFn batchCompletedFn) {
- _dr.reset(new DataReplicator(opts, &(getExecutor()), _repl.get(), batchCompletedFn));
- _dr->__setSourceForTesting(target);
- }
-
- void scheduleNetworkResponse(const BSONObj& obj) {
- NetworkInterfaceMock* net = getNet();
- ASSERT_TRUE(net->hasReadyRequests());
- scheduleNetworkResponse(net->getNextReadyRequest(), obj);
- }
-
- void scheduleNetworkResponse(NetworkInterfaceMock::NetworkOperationIterator noi,
- const BSONObj& obj) {
- NetworkInterfaceMock* net = getNet();
- Milliseconds millis(0);
- RemoteCommandResponse response(obj, millis);
- ReplicationExecutor::ResponseStatus responseStatus(response);
- net->scheduleResponse(noi, net->now(), responseStatus);
- }
-
- void scheduleNetworkResponse(ErrorCodes::Error code, const std::string& reason) {
- NetworkInterfaceMock* net = getNet();
- ASSERT_TRUE(net->hasReadyRequests());
- ReplicationExecutor::ResponseStatus responseStatus(code, reason);
- net->scheduleResponse(net->getNextReadyRequest(), net->now(), responseStatus);
- }
-
- void processNetworkResponse(const BSONObj& obj) {
- scheduleNetworkResponse(obj);
- finishProcessingNetworkResponse();
- }
-
- void processNetworkResponse(ErrorCodes::Error code, const std::string& reason) {
- scheduleNetworkResponse(code, reason);
- finishProcessingNetworkResponse();
- }
-
- void finishProcessingNetworkResponse() {
- getNet()->runReadyNetworkOperations();
- ASSERT_FALSE(getNet()->hasReadyRequests());
- }
-
- DataReplicator& getDR() { return *_dr; }
- ReplicationCoordinator& getRepl() { return *_repl; }
-
- protected:
-
- void setUp() override {
- ReplicationExecutorTest::setUp();
- reset();
-
- _settings.replSet = "foo"; // We are a replica set :)
- _repl.reset(new ReplicationCoordinatorMock(_settings));
- launchExecutorThread();
- DataReplicatorOptions options;
- options.initialSyncRetryWait = Milliseconds(0);
- createDataReplicator(options);
- }
+ void createDataReplicator(DataReplicatorOptions opts) {
+ _dr.reset(new DataReplicator(opts, &(getExecutor()), _repl.get()));
+ _dr->__setSourceForTesting(target);
+ }
- void tearDown() override {
- ReplicationExecutorTest::tearDown();
- _dr.reset();
- _repl.reset();
- // Executor may still invoke callback before shutting down.
- }
+ void createDataReplicator(DataReplicatorOptions opts,
+ DataReplicator::OnBatchCompleteFn batchCompletedFn) {
+ _dr.reset(new DataReplicator(opts, &(getExecutor()), _repl.get(), batchCompletedFn));
+ _dr->__setSourceForTesting(target);
+ }
- private:
- std::unique_ptr<DataReplicator> _dr;
- std::unique_ptr<ReplicationCoordinator> _repl;
- ReplSettings _settings;
+ void scheduleNetworkResponse(const BSONObj& obj) {
+ NetworkInterfaceMock* net = getNet();
+ ASSERT_TRUE(net->hasReadyRequests());
+ scheduleNetworkResponse(net->getNextReadyRequest(), obj);
+ }
- };
+ void scheduleNetworkResponse(NetworkInterfaceMock::NetworkOperationIterator noi,
+ const BSONObj& obj) {
+ NetworkInterfaceMock* net = getNet();
+ Milliseconds millis(0);
+ RemoteCommandResponse response(obj, millis);
+ ReplicationExecutor::ResponseStatus responseStatus(response);
+ net->scheduleResponse(noi, net->now(), responseStatus);
+ }
- TEST_F(DataReplicatorTest, CreateDestroy) {
+ void scheduleNetworkResponse(ErrorCodes::Error code, const std::string& reason) {
+ NetworkInterfaceMock* net = getNet();
+ ASSERT_TRUE(net->hasReadyRequests());
+ ReplicationExecutor::ResponseStatus responseStatus(code, reason);
+ net->scheduleResponse(net->getNextReadyRequest(), net->now(), responseStatus);
}
- TEST_F(DataReplicatorTest, StartOk) {
- ASSERT_EQ(getDR().start().code(), ErrorCodes::OK);
+ void processNetworkResponse(const BSONObj& obj) {
+ scheduleNetworkResponse(obj);
+ finishProcessingNetworkResponse();
}
- TEST_F(DataReplicatorTest, CannotInitialSyncAfterStart) {
- ASSERT_EQ(getDR().start().code(), ErrorCodes::OK);
- ASSERT_EQ(getDR().initialSync(), ErrorCodes::AlreadyInitialized);
+ void processNetworkResponse(ErrorCodes::Error code, const std::string& reason) {
+ scheduleNetworkResponse(code, reason);
+ finishProcessingNetworkResponse();
}
- // Used to run a Initial Sync in a separate thread, to avoid blocking test execution.
- class InitialSyncBackgroundRunner {
- public:
+ void finishProcessingNetworkResponse() {
+ getNet()->runReadyNetworkOperations();
+ ASSERT_FALSE(getNet()->hasReadyRequests());
+ }
- InitialSyncBackgroundRunner(DataReplicator* dr) :
- _dr(dr),
- _result(Status(ErrorCodes::BadValue, "failed to set status")) {}
+ DataReplicator& getDR() {
+ return *_dr;
+ }
+ ReplicationCoordinator& getRepl() {
+ return *_repl;
+ }
- // Could block if _sgr has not finished
- TimestampStatus getResult() {
- _thread->join();
- return _result;
- }
+protected:
+ void setUp() override {
+ ReplicationExecutorTest::setUp();
+ reset();
+
+ _settings.replSet = "foo"; // We are a replica set :)
+ _repl.reset(new ReplicationCoordinatorMock(_settings));
+ launchExecutorThread();
+ DataReplicatorOptions options;
+ options.initialSyncRetryWait = Milliseconds(0);
+ createDataReplicator(options);
+ }
- void run() {
- _thread.reset(new stdx::thread(stdx::bind(&InitialSyncBackgroundRunner::_run, this)));
- sleepmillis(2); // sleep to let new thread run initialSync so it schedules work
- }
+ void tearDown() override {
+ ReplicationExecutorTest::tearDown();
+ _dr.reset();
+ _repl.reset();
+ // Executor may still invoke callback before shutting down.
+ }
- private:
+private:
+ std::unique_ptr<DataReplicator> _dr;
+ std::unique_ptr<ReplicationCoordinator> _repl;
+ ReplSettings _settings;
+};
+
+TEST_F(DataReplicatorTest, CreateDestroy) {}
+
+TEST_F(DataReplicatorTest, StartOk) {
+ ASSERT_EQ(getDR().start().code(), ErrorCodes::OK);
+}
+
+TEST_F(DataReplicatorTest, CannotInitialSyncAfterStart) {
+ ASSERT_EQ(getDR().start().code(), ErrorCodes::OK);
+ ASSERT_EQ(getDR().initialSync(), ErrorCodes::AlreadyInitialized);
+}
+
+// Used to run a Initial Sync in a separate thread, to avoid blocking test execution.
+class InitialSyncBackgroundRunner {
+public:
+ InitialSyncBackgroundRunner(DataReplicator* dr)
+ : _dr(dr), _result(Status(ErrorCodes::BadValue, "failed to set status")) {}
+
+ // Could block if _sgr has not finished
+ TimestampStatus getResult() {
+ _thread->join();
+ return _result;
+ }
- void _run() {
- setThreadName("InitialSyncRunner");
- log() << "starting initial sync";
- _result = _dr->initialSync(); // blocking
- }
+ void run() {
+ _thread.reset(new stdx::thread(stdx::bind(&InitialSyncBackgroundRunner::_run, this)));
+ sleepmillis(2); // sleep to let new thread run initialSync so it schedules work
+ }
- DataReplicator* _dr;
- TimestampStatus _result;
- std::unique_ptr<stdx::thread> _thread;
- };
+private:
+ void _run() {
+ setThreadName("InitialSyncRunner");
+ log() << "starting initial sync";
+ _result = _dr->initialSync(); // blocking
+ }
- class InitialSyncTest : public DataReplicatorTest {
- public:
- InitialSyncTest()
- : _insertCollectionFn([&](OperationContext* txn,
- const NamespaceString& theNss,
- const std::vector<BSONObj>& theDocuments) {
- log() << "insertDoc for " << theNss.toString();
- LockGuard lk(_collectionCountMutex);
- ++(_collectionCounts[theNss.toString()]);
- return Status::OK();
- }),
- _beginCollectionFn([&](OperationContext* txn,
- const NamespaceString& theNss,
- const CollectionOptions& theOptions,
- const std::vector<BSONObj>& theIndexSpecs) {
- log() << "beginCollection for " << theNss.toString();
- LockGuard lk(_collectionCountMutex);
- _collectionCounts[theNss.toString()] = 0;
- return Status::OK();
- }) {};
-
- protected:
-
- void setStorageFuncs(ClonerStorageInterfaceMock::InsertCollectionFn ins,
- ClonerStorageInterfaceMock::BeginCollectionFn beg) {
- _insertCollectionFn = ins;
- _beginCollectionFn = beg;
- }
+ DataReplicator* _dr;
+ TimestampStatus _result;
+ std::unique_ptr<stdx::thread> _thread;
+};
+
+class InitialSyncTest : public DataReplicatorTest {
+public:
+ InitialSyncTest()
+ : _insertCollectionFn([&](OperationContext* txn,
+ const NamespaceString& theNss,
+ const std::vector<BSONObj>& theDocuments) {
+ log() << "insertDoc for " << theNss.toString();
+ LockGuard lk(_collectionCountMutex);
+ ++(_collectionCounts[theNss.toString()]);
+ return Status::OK();
+ }),
+ _beginCollectionFn([&](OperationContext* txn,
+ const NamespaceString& theNss,
+ const CollectionOptions& theOptions,
+ const std::vector<BSONObj>& theIndexSpecs) {
+ log() << "beginCollection for " << theNss.toString();
+ LockGuard lk(_collectionCountMutex);
+ _collectionCounts[theNss.toString()] = 0;
+ return Status::OK();
+ }){};
+
+protected:
+ void setStorageFuncs(ClonerStorageInterfaceMock::InsertCollectionFn ins,
+ ClonerStorageInterfaceMock::BeginCollectionFn beg) {
+ _insertCollectionFn = ins;
+ _beginCollectionFn = beg;
+ }
- void setResponses(std::vector<BSONObj> resps) {
- _responses = resps;
- }
+ void setResponses(std::vector<BSONObj> resps) {
+ _responses = resps;
+ }
- void startSync() {
- DataReplicator* dr = &(getDR());
+ void startSync() {
+ DataReplicator* dr = &(getDR());
- _storage.beginCollectionFn = _beginCollectionFn;
- _storage.insertDocumentsFn = _insertCollectionFn;
- _storage.insertMissingDocFn = [&] (OperationContext* txn,
- const NamespaceString& nss,
- const BSONObj& doc) {
+ _storage.beginCollectionFn = _beginCollectionFn;
+ _storage.insertDocumentsFn = _insertCollectionFn;
+ _storage.insertMissingDocFn =
+ [&](OperationContext* txn, const NamespaceString& nss, const BSONObj& doc) {
return Status::OK();
};
- dr->_setInitialSyncStorageInterface(&_storage);
- _isbr.reset(new InitialSyncBackgroundRunner(dr));
- _isbr->run();
- }
+ dr->_setInitialSyncStorageInterface(&_storage);
+ _isbr.reset(new InitialSyncBackgroundRunner(dr));
+ _isbr->run();
+ }
- void playResponses() {
- // TODO: Handle network responses
- NetworkInterfaceMock* net = getNet();
- int processedRequests(0);
- const int expectedResponses(_responses.size());
-
- //counter for oplog entries
- int c(0);
- while (true) {
- net->enterNetwork();
- if (!net->hasReadyRequests() && processedRequests < expectedResponses) {
- net->exitNetwork();
- continue;
- }
- NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
-
- const BSONObj reqBSON = noi->getRequest().cmdObj;
- const BSONElement cmdElem = reqBSON.firstElement();
- const bool isGetMore =
- cmdElem.fieldNameStringData().equalCaseInsensitive("getmore");
- const long long cursorId = cmdElem.numberLong();
- if (isGetMore && cursorId == 1LL) {
- // process getmore requests from the oplog fetcher
- auto respBSON = fromjson(
- str::stream() << "{ok:1, cursor:{id:1, ns:'local.oplog.rs', nextBatch:["
- "{ts:Timestamp(" << ++c << ",1), h:1, ns:'test.a', v:2, op:'u', o2:{_id:"
- << c << "}, o:{$set:{a:1}}}"
- "]}}");
- net->scheduleResponse(noi,
- net->now(),
- ResponseStatus(RemoteCommandResponse(respBSON,
- Milliseconds(10))));
- net->runReadyNetworkOperations();
- net->exitNetwork();
- continue;
- }
- else if (isGetMore) {
- // TODO: return more data
- }
-
- // process fixed set of responses
- log() << "processing network request: "
- << noi->getRequest().dbname << "." << noi->getRequest().cmdObj.toString();
- net->scheduleResponse(noi,
- net->now(),
- ResponseStatus(
- RemoteCommandResponse(_responses[processedRequests],
- Milliseconds(10))));
+ void playResponses() {
+ // TODO: Handle network responses
+ NetworkInterfaceMock* net = getNet();
+ int processedRequests(0);
+ const int expectedResponses(_responses.size());
+
+ // counter for oplog entries
+ int c(0);
+ while (true) {
+ net->enterNetwork();
+ if (!net->hasReadyRequests() && processedRequests < expectedResponses) {
+ net->exitNetwork();
+ continue;
+ }
+ NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+
+ const BSONObj reqBSON = noi->getRequest().cmdObj;
+ const BSONElement cmdElem = reqBSON.firstElement();
+ const bool isGetMore = cmdElem.fieldNameStringData().equalCaseInsensitive("getmore");
+ const long long cursorId = cmdElem.numberLong();
+ if (isGetMore && cursorId == 1LL) {
+ // process getmore requests from the oplog fetcher
+ auto respBSON = fromjson(str::stream()
+ << "{ok:1, cursor:{id:1, ns:'local.oplog.rs', nextBatch:["
+ "{ts:Timestamp(" << ++c
+ << ",1), h:1, ns:'test.a', v:2, op:'u', o2:{_id:" << c
+ << "}, o:{$set:{a:1}}}"
+ "]}}");
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ ResponseStatus(RemoteCommandResponse(respBSON, Milliseconds(10))));
net->runReadyNetworkOperations();
net->exitNetwork();
- if (++processedRequests >= expectedResponses) {
- log() << "done processing expected requests ";
- break; // once we have processed all requests, continue;
- }
+ continue;
+ } else if (isGetMore) {
+ // TODO: return more data
}
- net->enterNetwork();
- if (net->hasReadyRequests()) {
- log() << "There are unexpected requests left";
- log() << "next cmd: " << net->getNextReadyRequest()->getRequest().cmdObj.toString();
- ASSERT_FALSE(net->hasReadyRequests());
- }
+ // process fixed set of responses
+ log() << "processing network request: " << noi->getRequest().dbname << "."
+ << noi->getRequest().cmdObj.toString();
+ net->scheduleResponse(noi,
+ net->now(),
+ ResponseStatus(RemoteCommandResponse(
+ _responses[processedRequests], Milliseconds(10))));
+ net->runReadyNetworkOperations();
net->exitNetwork();
+ if (++processedRequests >= expectedResponses) {
+ log() << "done processing expected requests ";
+ break; // once we have processed all requests, continue;
+ }
}
- void verifySync(Status s = Status::OK()) {
- verifySync(_isbr->getResult().getStatus().code());
- }
-
- void verifySync(ErrorCodes::Error code) {
- // Check result
- ASSERT_EQ(_isbr->getResult().getStatus().code(), code) << "status codes differ";
- }
-
- std::map<std::string, int> getLocalCollectionCounts() {
- return _collectionCounts;
+ net->enterNetwork();
+ if (net->hasReadyRequests()) {
+ log() << "There are unexpected requests left";
+ log() << "next cmd: " << net->getNextReadyRequest()->getRequest().cmdObj.toString();
+ ASSERT_FALSE(net->hasReadyRequests());
}
-
- private:
- ClonerStorageInterfaceMock::InsertCollectionFn _insertCollectionFn;
- ClonerStorageInterfaceMock::BeginCollectionFn _beginCollectionFn;
- std::vector<BSONObj> _responses;
- std::unique_ptr<InitialSyncBackgroundRunner> _isbr;
- std::map<std::string, int> _collectionCounts; // counts of inserts during cloning
- mutex _collectionCountMutex; // used to protect the collectionCount map
- ClonerStorageInterfaceMock _storage;
- };
-
- TEST_F(InitialSyncTest, Complete) {
- /**
- * Initial Sync will issue these query/commands
- * - startTS = oplog.rs->find().sort({$natural:-1}).limit(-1).next()["ts"]
- * - listDatabases (foreach db do below)
- * -- cloneDatabase (see DatabaseCloner tests).
- * - endTS = oplog.rs->find().sort({$natural:-1}).limit(-1).next()["ts"]
- * - ops = oplog.rs->find({ts:{$gte: startTS}}) (foreach op)
- * -- if local doc is missing, getCollection(op.ns).findOne(_id:op.o2._id)
- * - if any retries were done in the previous loop, endTS query again for minvalid
- *
- */
-
- const std::vector<BSONObj> responses = {
- // get latest oplog ts
- fromjson(
- "{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:2, op:'i', o:{_id:1, a:1}}"
- "]}}"),
- // oplog fetcher find
- fromjson(
- "{ok:1, cursor:{id:1, ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:2, op:'i', o:{_id:1, a:1}}"
- "]}}"),
-// Clone Start
- // listDatabases
- fromjson("{ok:1, databases:[{name:'a'}]}"),
- // listCollections for "a"
- fromjson(
- "{ok:1, cursor:{id:0, ns:'a.$cmd.listCollections', firstBatch:["
- "{name:'a', options:{}} "
- "]}}"),
- // listIndexes:a
- fromjson(
- "{ok:1, cursor:{id:0, ns:'a.$cmd.listIndexes.a', firstBatch:["
- "{v:1, key:{_id:1}, name:'_id_', ns:'a.a'}"
- "]}}"),
- // find:a
- fromjson(
- "{ok:1, cursor:{id:0, ns:'a.a', firstBatch:["
- "{_id:1, a:1} "
- "]}}"),
-// Clone Done
- // get latest oplog ts
- fromjson(
- "{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(2,2), h:1, ns:'b.c', v:2, op:'i', o:{_id:1, c:1}}"
- "]}}"),
-// Applier starts ...
- };
- startSync();
- setResponses(responses);
- playResponses();
- verifySync();
+ net->exitNetwork();
}
- TEST_F(InitialSyncTest, MissingDocOnApplyCompletes) {
-
- DataReplicatorOptions opts;
- int applyCounter{0};
- opts.applierFn = [&] (OperationContext* txn, const BSONObj& op) {
- if (++applyCounter == 1) {
- return Status(ErrorCodes::NoMatchingDocument, "failed: missing doc.");
- }
- return Status::OK();
- };
- createDataReplicator(opts);
-
- const std::vector<BSONObj> responses = {
- // get latest oplog ts
- fromjson(
- "{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:2, op:'i', o:{_id:1, a:1}}"
- "]}}"),
- // oplog fetcher find
- fromjson(
- "{ok:1, cursor:{id:1, ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:2, op:'u', o2:{_id:1}, o:{$set:{a:1}}}"
- "]}}"),
-// Clone Start
- // listDatabases
- fromjson("{ok:1, databases:[{name:'a'}]}"),
- // listCollections for "a"
- fromjson(
- "{ok:1, cursor:{id:0, ns:'a.$cmd.listCollections', firstBatch:["
- "{name:'a', options:{}} "
- "]}}"),
- // listIndexes:a
- fromjson(
- "{ok:1, cursor:{id:0, ns:'a.$cmd.listIndexes.a', firstBatch:["
- "{v:1, key:{_id:1}, name:'_id_', ns:'a.a'}"
- "]}}"),
- // find:a -- empty
- fromjson(
- "{ok:1, cursor:{id:0, ns:'a.a', firstBatch:[]}}"),
-// Clone Done
- // get latest oplog ts
- fromjson(
- "{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(2,2), h:1, ns:'b.c', v:2, op:'i', o:{_id:1, c:1}}"
- "]}}"),
-// Applier starts ...
- // missing doc fetch -- find:a {_id:1}
- fromjson(
- "{ok:1, cursor:{id:0, ns:'a.a', firstBatch:["
- "{_id:1, a:1} "
- "]}}"),
- };
- startSync();
- setResponses(responses);
- playResponses();
- verifySync(ErrorCodes::OK);
+ void verifySync(Status s = Status::OK()) {
+ verifySync(_isbr->getResult().getStatus().code());
}
- TEST_F(InitialSyncTest, Failpoint) {
- mongo::getGlobalFailPointRegistry()->
- getFailPoint("failInitialSyncWithBadHost")->
- setMode(FailPoint::alwaysOn);
-
- BSONObj configObj = BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")
- << BSON("_id" << 2 << "host" << "node2:12345")
- << BSON("_id" << 3 << "host" << "node3:12345")
- ));
-
- ReplicaSetConfig config = assertMakeRSConfig(configObj);
- Timestamp time1(100, 1);
- OpTime opTime1(time1, OpTime::kDefaultTerm);
- getRepl().setMyLastOptime(opTime1);
- ASSERT(getRepl().setFollowerMode(MemberState::RS_SECONDARY));
-
- DataReplicator* dr = &(getDR());
- InitialSyncBackgroundRunner isbr(dr);
- isbr.run();
- ASSERT_EQ(isbr.getResult().getStatus().code(), ErrorCodes::InitialSyncFailure);
-
- mongo::getGlobalFailPointRegistry()->
- getFailPoint("failInitialSyncWithBadHost")->
- setMode(FailPoint::off);
+ void verifySync(ErrorCodes::Error code) {
+ // Check result
+ ASSERT_EQ(_isbr->getResult().getStatus().code(), code) << "status codes differ";
}
- TEST_F(InitialSyncTest, FailsOnClone) {
- const std::vector<BSONObj> responses = {
- // get latest oplog ts
- fromjson(
- "{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:2, op:'i', o:{_id:1, a:1}}"
- "]}}"),
- // oplog fetcher find
- fromjson(
- "{ok:1, cursor:{id:1, ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:2, op:'i', o:{_id:1, a:1}}"
- "]}}"),
-// Clone Start
- // listDatabases
- fromjson("{ok:0}")
- };
- startSync();
- setResponses(responses);
- playResponses();
- verifySync(ErrorCodes::InitialSyncFailure);
+ std::map<std::string, int> getLocalCollectionCounts() {
+ return _collectionCounts;
}
- class SteadyStateTest : public DataReplicatorTest {
- protected:
- void _testOplogStartMissing(const BSONObj& oplogFetcherResponse) {
- DataReplicator& dr = getDR();
- auto net = getNet();
- net->enterNetwork();
- ASSERT_OK(dr.start());
-
- ASSERT_TRUE(net->hasReadyRequests());
- auto noi = net->getNextReadyRequest();
- scheduleNetworkResponse(noi, oplogFetcherResponse);
- net->runReadyNetworkOperations();
- ASSERT_EQUALS(MemberState(MemberState::RS_RECOVERING).toString(),
- getRepl().getMemberState().toString());
+private:
+ ClonerStorageInterfaceMock::InsertCollectionFn _insertCollectionFn;
+ ClonerStorageInterfaceMock::BeginCollectionFn _beginCollectionFn;
+ std::vector<BSONObj> _responses;
+ std::unique_ptr<InitialSyncBackgroundRunner> _isbr;
+ std::map<std::string, int> _collectionCounts; // counts of inserts during cloning
+ mutex _collectionCountMutex; // used to protect the collectionCount map
+ ClonerStorageInterfaceMock _storage;
+};
+
+TEST_F(InitialSyncTest, Complete) {
+ /**
+ * Initial Sync will issue these query/commands
+ * - startTS = oplog.rs->find().sort({$natural:-1}).limit(-1).next()["ts"]
+ * - listDatabases (foreach db do below)
+ * -- cloneDatabase (see DatabaseCloner tests).
+ * - endTS = oplog.rs->find().sort({$natural:-1}).limit(-1).next()["ts"]
+ * - ops = oplog.rs->find({ts:{$gte: startTS}}) (foreach op)
+ * -- if local doc is missing, getCollection(op.ns).findOne(_id:op.o2._id)
+ * - if any retries were done in the previous loop, endTS query again for minvalid
+ *
+ */
+
+ const std::vector<BSONObj> responses = {
+ // get latest oplog ts
+ fromjson(
+ "{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:2, op:'i', o:{_id:1, a:1}}"
+ "]}}"),
+ // oplog fetcher find
+ fromjson(
+ "{ok:1, cursor:{id:1, ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:2, op:'i', o:{_id:1, a:1}}"
+ "]}}"),
+ // Clone Start
+ // listDatabases
+ fromjson("{ok:1, databases:[{name:'a'}]}"),
+ // listCollections for "a"
+ fromjson(
+ "{ok:1, cursor:{id:0, ns:'a.$cmd.listCollections', firstBatch:["
+ "{name:'a', options:{}} "
+ "]}}"),
+ // listIndexes:a
+ fromjson(
+ "{ok:1, cursor:{id:0, ns:'a.$cmd.listIndexes.a', firstBatch:["
+ "{v:1, key:{_id:1}, name:'_id_', ns:'a.a'}"
+ "]}}"),
+ // find:a
+ fromjson(
+ "{ok:1, cursor:{id:0, ns:'a.a', firstBatch:["
+ "{_id:1, a:1} "
+ "]}}"),
+ // Clone Done
+ // get latest oplog ts
+ fromjson(
+ "{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(2,2), h:1, ns:'b.c', v:2, op:'i', o:{_id:1, c:1}}"
+ "]}}"),
+ // Applier starts ...
+ };
+ startSync();
+ setResponses(responses);
+ playResponses();
+ verifySync();
+}
+
+TEST_F(InitialSyncTest, MissingDocOnApplyCompletes) {
+ DataReplicatorOptions opts;
+ int applyCounter{0};
+ opts.applierFn = [&](OperationContext* txn, const BSONObj& op) {
+ if (++applyCounter == 1) {
+ return Status(ErrorCodes::NoMatchingDocument, "failed: missing doc.");
}
-
+ return Status::OK();
};
-
- TEST_F(SteadyStateTest, StartWhenInSteadyState) {
- DataReplicator& dr = getDR();
- ASSERT_EQUALS(toString(DataReplicatorState::Uninitialized), toString(dr.getState()));
- ASSERT_OK(dr.start());
- ASSERT_EQUALS(toString(DataReplicatorState::Steady), toString(dr.getState()));
- ASSERT_EQUALS(ErrorCodes::IllegalOperation, dr.start().code());
- }
-
- TEST_F(SteadyStateTest, ShutdownAfterStart) {
+ createDataReplicator(opts);
+
+ const std::vector<BSONObj> responses = {
+ // get latest oplog ts
+ fromjson(
+ "{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:2, op:'i', o:{_id:1, a:1}}"
+ "]}}"),
+ // oplog fetcher find
+ fromjson(
+ "{ok:1, cursor:{id:1, ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:2, op:'u', o2:{_id:1}, o:{$set:{a:1}}}"
+ "]}}"),
+ // Clone Start
+ // listDatabases
+ fromjson("{ok:1, databases:[{name:'a'}]}"),
+ // listCollections for "a"
+ fromjson(
+ "{ok:1, cursor:{id:0, ns:'a.$cmd.listCollections', firstBatch:["
+ "{name:'a', options:{}} "
+ "]}}"),
+ // listIndexes:a
+ fromjson(
+ "{ok:1, cursor:{id:0, ns:'a.$cmd.listIndexes.a', firstBatch:["
+ "{v:1, key:{_id:1}, name:'_id_', ns:'a.a'}"
+ "]}}"),
+ // find:a -- empty
+ fromjson("{ok:1, cursor:{id:0, ns:'a.a', firstBatch:[]}}"),
+ // Clone Done
+ // get latest oplog ts
+ fromjson(
+ "{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(2,2), h:1, ns:'b.c', v:2, op:'i', o:{_id:1, c:1}}"
+ "]}}"),
+ // Applier starts ...
+ // missing doc fetch -- find:a {_id:1}
+ fromjson(
+ "{ok:1, cursor:{id:0, ns:'a.a', firstBatch:["
+ "{_id:1, a:1} "
+ "]}}"),
+ };
+ startSync();
+ setResponses(responses);
+ playResponses();
+ verifySync(ErrorCodes::OK);
+}
+
+TEST_F(InitialSyncTest, Failpoint) {
+ mongo::getGlobalFailPointRegistry()
+ ->getFailPoint("failInitialSyncWithBadHost")
+ ->setMode(FailPoint::alwaysOn);
+
+ BSONObj configObj = BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")));
+
+ ReplicaSetConfig config = assertMakeRSConfig(configObj);
+ Timestamp time1(100, 1);
+ OpTime opTime1(time1, OpTime::kDefaultTerm);
+ getRepl().setMyLastOptime(opTime1);
+ ASSERT(getRepl().setFollowerMode(MemberState::RS_SECONDARY));
+
+ DataReplicator* dr = &(getDR());
+ InitialSyncBackgroundRunner isbr(dr);
+ isbr.run();
+ ASSERT_EQ(isbr.getResult().getStatus().code(), ErrorCodes::InitialSyncFailure);
+
+ mongo::getGlobalFailPointRegistry()
+ ->getFailPoint("failInitialSyncWithBadHost")
+ ->setMode(FailPoint::off);
+}
+
+TEST_F(InitialSyncTest, FailsOnClone) {
+ const std::vector<BSONObj> responses = {
+ // get latest oplog ts
+ fromjson(
+ "{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:2, op:'i', o:{_id:1, a:1}}"
+ "]}}"),
+ // oplog fetcher find
+ fromjson(
+ "{ok:1, cursor:{id:1, ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:2, op:'i', o:{_id:1, a:1}}"
+ "]}}"),
+ // Clone Start
+ // listDatabases
+ fromjson("{ok:0}")};
+ startSync();
+ setResponses(responses);
+ playResponses();
+ verifySync(ErrorCodes::InitialSyncFailure);
+}
+
+class SteadyStateTest : public DataReplicatorTest {
+protected:
+ void _testOplogStartMissing(const BSONObj& oplogFetcherResponse) {
DataReplicator& dr = getDR();
- ASSERT_EQUALS(toString(DataReplicatorState::Uninitialized), toString(dr.getState()));
auto net = getNet();
net->enterNetwork();
ASSERT_OK(dr.start());
- ASSERT_TRUE(net->hasReadyRequests());
- getExecutor().shutdown();
- ASSERT_EQUALS(toString(DataReplicatorState::Steady), toString(dr.getState()));
- ASSERT_EQUALS(ErrorCodes::IllegalOperation, dr.start().code());
- }
- TEST_F(SteadyStateTest, RequestShutdownAfterStart) {
- DataReplicator& dr = getDR();
- ASSERT_EQUALS(toString(DataReplicatorState::Uninitialized), toString(dr.getState()));
- auto net = getNet();
- net->enterNetwork();
- ASSERT_OK(dr.start());
ASSERT_TRUE(net->hasReadyRequests());
- ASSERT_EQUALS(toString(DataReplicatorState::Steady), toString(dr.getState()));
- // Simulating an invalid remote oplog query response. This will invalidate the existing
- // sync source but that's fine because we're not testing oplog processing.
- scheduleNetworkResponse(BSON("ok" << 0));
+ auto noi = net->getNextReadyRequest();
+ scheduleNetworkResponse(noi, oplogFetcherResponse);
net->runReadyNetworkOperations();
- ASSERT_OK(dr.scheduleShutdown());
- net->exitNetwork(); // runs work item scheduled in 'scheduleShutdown()).
- dr.waitForShutdown();
- ASSERT_EQUALS(toString(DataReplicatorState::Uninitialized), toString(dr.getState()));
- }
-
- TEST_F(SteadyStateTest, RemoteOplogEmpty) {
- _testOplogStartMissing(
- fromjson("{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch: []}}"));
- }
-
- TEST_F(SteadyStateTest, RemoteOplogFirstOperationMissingTimestamp) {
- _testOplogStartMissing(
- fromjson("{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch: [{}]}}"));
- }
-
- TEST_F(SteadyStateTest, RemoteOplogFirstOperationTimestampDoesNotMatch) {
- _testOplogStartMissing(
- fromjson("{ok:1, cursor:{id:0, ns:'local.oplog.rs', "
- "firstBatch: [{ts:Timestamp(1,1)}]}}"));
+ ASSERT_EQUALS(MemberState(MemberState::RS_RECOVERING).toString(),
+ getRepl().getMemberState().toString());
}
-
- TEST_F(SteadyStateTest, ApplyOneOperation) {
- auto operationToApply = BSON("op" << "a" << "ts" << Timestamp(Seconds(123), 0));
- stdx::mutex mutex;
- unittest::Barrier barrier(2U);
- Timestamp lastTimestampApplied;
- BSONObj operationApplied;
- auto batchCompletedFn = [&] (const Timestamp& ts) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- lastTimestampApplied = ts;
- barrier.countDownAndWait();
- };
- DataReplicatorOptions opts;
- opts.applierFn = [&] (OperationContext* txn, const BSONObj& op) {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- operationApplied = op;
- barrier.countDownAndWait();
- return Status::OK();
- };
- createDataReplicator(opts, batchCompletedFn);
-
- auto& repl = getRepl();
- repl.setMyLastOptime(OpTime(operationToApply["ts"].timestamp(), 0));
- ASSERT_TRUE(repl.setFollowerMode(MemberState::RS_SECONDARY));
-
- auto net = getNet();
- net->enterNetwork();
-
- auto& dr = getDR();
- ASSERT_OK(dr.start());
-
- ASSERT_TRUE(net->hasReadyRequests());
- {
- auto networkRequest = net->getNextReadyRequest();
- auto commandResponse = BSON(
- "ok" << 1 <<
- "cursor" << BSON(
- "id" << 0LL <<
- "ns" << "local.oplog.rs" <<
- "firstBatch" << BSON_ARRAY(operationToApply)));
- scheduleNetworkResponse(networkRequest, commandResponse);
- }
- net->runReadyNetworkOperations();
-
- // Wait for applier function.
+};
+
+TEST_F(SteadyStateTest, StartWhenInSteadyState) {
+ DataReplicator& dr = getDR();
+ ASSERT_EQUALS(toString(DataReplicatorState::Uninitialized), toString(dr.getState()));
+ ASSERT_OK(dr.start());
+ ASSERT_EQUALS(toString(DataReplicatorState::Steady), toString(dr.getState()));
+ ASSERT_EQUALS(ErrorCodes::IllegalOperation, dr.start().code());
+}
+
+TEST_F(SteadyStateTest, ShutdownAfterStart) {
+ DataReplicator& dr = getDR();
+ ASSERT_EQUALS(toString(DataReplicatorState::Uninitialized), toString(dr.getState()));
+ auto net = getNet();
+ net->enterNetwork();
+ ASSERT_OK(dr.start());
+ ASSERT_TRUE(net->hasReadyRequests());
+ getExecutor().shutdown();
+ ASSERT_EQUALS(toString(DataReplicatorState::Steady), toString(dr.getState()));
+ ASSERT_EQUALS(ErrorCodes::IllegalOperation, dr.start().code());
+}
+
+TEST_F(SteadyStateTest, RequestShutdownAfterStart) {
+ DataReplicator& dr = getDR();
+ ASSERT_EQUALS(toString(DataReplicatorState::Uninitialized), toString(dr.getState()));
+ auto net = getNet();
+ net->enterNetwork();
+ ASSERT_OK(dr.start());
+ ASSERT_TRUE(net->hasReadyRequests());
+ ASSERT_EQUALS(toString(DataReplicatorState::Steady), toString(dr.getState()));
+ // Simulating an invalid remote oplog query response. This will invalidate the existing
+ // sync source but that's fine because we're not testing oplog processing.
+ scheduleNetworkResponse(BSON("ok" << 0));
+ net->runReadyNetworkOperations();
+ ASSERT_OK(dr.scheduleShutdown());
+ net->exitNetwork(); // runs work item scheduled in 'scheduleShutdown()).
+ dr.waitForShutdown();
+ ASSERT_EQUALS(toString(DataReplicatorState::Uninitialized), toString(dr.getState()));
+}
+
+TEST_F(SteadyStateTest, RemoteOplogEmpty) {
+ _testOplogStartMissing(fromjson("{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch: []}}"));
+}
+
+TEST_F(SteadyStateTest, RemoteOplogFirstOperationMissingTimestamp) {
+ _testOplogStartMissing(
+ fromjson("{ok:1, cursor:{id:0, ns:'local.oplog.rs', firstBatch: [{}]}}"));
+}
+
+TEST_F(SteadyStateTest, RemoteOplogFirstOperationTimestampDoesNotMatch) {
+ _testOplogStartMissing(fromjson(
+ "{ok:1, cursor:{id:0, ns:'local.oplog.rs', "
+ "firstBatch: [{ts:Timestamp(1,1)}]}}"));
+}
+
+TEST_F(SteadyStateTest, ApplyOneOperation) {
+ auto operationToApply = BSON("op"
+ << "a"
+ << "ts" << Timestamp(Seconds(123), 0));
+ stdx::mutex mutex;
+ unittest::Barrier barrier(2U);
+ Timestamp lastTimestampApplied;
+ BSONObj operationApplied;
+ auto batchCompletedFn = [&](const Timestamp& ts) {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ lastTimestampApplied = ts;
barrier.countDownAndWait();
- ASSERT_EQUALS(operationToApply["ts"].timestamp(), dr.getLastTimestampFetched());
- // Run scheduleWork() work item scheduled in DataReplicator::_onApplyBatchFinish().
- net->exitNetwork();
-
- // Wait for batch completion callback.
+ };
+ DataReplicatorOptions opts;
+ opts.applierFn = [&](OperationContext* txn, const BSONObj& op) {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ operationApplied = op;
barrier.countDownAndWait();
+ return Status::OK();
+ };
+ createDataReplicator(opts, batchCompletedFn);
+
+ auto& repl = getRepl();
+ repl.setMyLastOptime(OpTime(operationToApply["ts"].timestamp(), 0));
+ ASSERT_TRUE(repl.setFollowerMode(MemberState::RS_SECONDARY));
+
+ auto net = getNet();
+ net->enterNetwork();
+
+ auto& dr = getDR();
+ ASSERT_OK(dr.start());
+
+ ASSERT_TRUE(net->hasReadyRequests());
+ {
+ auto networkRequest = net->getNextReadyRequest();
+ auto commandResponse = BSON(
+ "ok" << 1 << "cursor" << BSON("id" << 0LL << "ns"
+ << "local.oplog.rs"
+ << "firstBatch" << BSON_ARRAY(operationToApply)));
+ scheduleNetworkResponse(networkRequest, commandResponse);
+ }
+ net->runReadyNetworkOperations();
+
+ // Wait for applier function.
+ barrier.countDownAndWait();
+ ASSERT_EQUALS(operationToApply["ts"].timestamp(), dr.getLastTimestampFetched());
+ // Run scheduleWork() work item scheduled in DataReplicator::_onApplyBatchFinish().
+ net->exitNetwork();
+
+ // Wait for batch completion callback.
+ barrier.countDownAndWait();
+
+ ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
+ repl.getMemberState().toString());
+ {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ ASSERT_EQUALS(operationToApply, operationApplied);
+ ASSERT_EQUALS(operationToApply["ts"].timestamp(), lastTimestampApplied);
+ }
- ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
- repl.getMemberState().toString());
- {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- ASSERT_EQUALS(operationToApply, operationApplied);
- ASSERT_EQUALS(operationToApply["ts"].timestamp(), lastTimestampApplied);
- }
-
- // Ensure that we send position information upstream after completing batch.
- net->enterNetwork();
- ASSERT_TRUE(net->hasReadyRequests());
- {
- auto networkRequest = net->getNextReadyRequest();
- auto commandRequest = networkRequest->getRequest();
- ASSERT_EQUALS("admin", commandRequest.dbname);
- const auto& cmdObj = commandRequest.cmdObj;
- ASSERT_EQUALS(std::string("replSetUpdatePosition"), cmdObj.firstElementFieldName());
- }
+ // Ensure that we send position information upstream after completing batch.
+ net->enterNetwork();
+ ASSERT_TRUE(net->hasReadyRequests());
+ {
+ auto networkRequest = net->getNextReadyRequest();
+ auto commandRequest = networkRequest->getRequest();
+ ASSERT_EQUALS("admin", commandRequest.dbname);
+ const auto& cmdObj = commandRequest.cmdObj;
+ ASSERT_EQUALS(std::string("replSetUpdatePosition"), cmdObj.firstElementFieldName());
}
+}
-} // namespace
+} // namespace
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index b96ab403169..b0c0bddcd01 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -47,294 +47,287 @@ namespace repl {
namespace {
- const char* kNameFieldName = "name";
- const char* kOptionsFieldName = "options";
-
- /**
- * Default listCollections predicate.
- */
- bool acceptAllPred(const BSONObj&) {
- return true;
- }
+const char* kNameFieldName = "name";
+const char* kOptionsFieldName = "options";
- /**
- * Creates a listCollections command obj with an optional filter.
- */
- BSONObj createListCollectionsCommandObject(const BSONObj& filter) {
- BSONObjBuilder output;
- output.append("listCollections", 1);
- if (!filter.isEmpty()) {
- output.append("filter", filter);
- }
- return output.obj();
- }
+/**
+ * Default listCollections predicate.
+ */
+bool acceptAllPred(const BSONObj&) {
+ return true;
+}
-} // namespace
-
- DatabaseCloner::DatabaseCloner(ReplicationExecutor* executor,
- const HostAndPort& source,
- const std::string& dbname,
- const BSONObj& listCollectionsFilter,
- const ListCollectionsPredicateFn& listCollectionsPred,
- CollectionCloner::StorageInterface* si,
- const CollectionCallbackFn& collWork,
- const CallbackFn& onCompletion)
- : _executor(executor),
- _source(source),
- _dbname(dbname),
- _listCollectionsFilter(listCollectionsFilter),
- _listCollectionsPredicate(listCollectionsPred ? listCollectionsPred : acceptAllPred),
- _storageInterface(si),
- _collectionWork(collWork),
- _onCompletion(onCompletion),
- _active(false),
- _listCollectionsFetcher(_executor,
- _source,
- _dbname,
- createListCollectionsCommandObject(_listCollectionsFilter),
- stdx::bind(&DatabaseCloner::_listCollectionsCallback,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2,
- stdx::placeholders::_3)),
- _scheduleDbWorkFn([this](const ReplicationExecutor::CallbackFn& work) {
- return _executor->scheduleDBWork(work);
- }),
- _startCollectionCloner([](CollectionCloner& cloner) { return cloner.start(); }) {
-
- uassert(ErrorCodes::BadValue, "null replication executor", executor);
- uassert(ErrorCodes::BadValue, "empty database name", !dbname.empty());
- uassert(ErrorCodes::BadValue, "storage interface cannot be null", si);
- uassert(ErrorCodes::BadValue, "collection callback function cannot be null", collWork);
- uassert(ErrorCodes::BadValue, "callback function cannot be null", onCompletion);
+/**
+ * Creates a listCollections command obj with an optional filter.
+ */
+BSONObj createListCollectionsCommandObject(const BSONObj& filter) {
+ BSONObjBuilder output;
+ output.append("listCollections", 1);
+ if (!filter.isEmpty()) {
+ output.append("filter", filter);
}
-
- DatabaseCloner::~DatabaseCloner() {
- DESTRUCTOR_GUARD(
- cancel();
- wait();
- );
+ return output.obj();
+}
+
+} // namespace
+
+DatabaseCloner::DatabaseCloner(ReplicationExecutor* executor,
+ const HostAndPort& source,
+ const std::string& dbname,
+ const BSONObj& listCollectionsFilter,
+ const ListCollectionsPredicateFn& listCollectionsPred,
+ CollectionCloner::StorageInterface* si,
+ const CollectionCallbackFn& collWork,
+ const CallbackFn& onCompletion)
+ : _executor(executor),
+ _source(source),
+ _dbname(dbname),
+ _listCollectionsFilter(listCollectionsFilter),
+ _listCollectionsPredicate(listCollectionsPred ? listCollectionsPred : acceptAllPred),
+ _storageInterface(si),
+ _collectionWork(collWork),
+ _onCompletion(onCompletion),
+ _active(false),
+ _listCollectionsFetcher(_executor,
+ _source,
+ _dbname,
+ createListCollectionsCommandObject(_listCollectionsFilter),
+ stdx::bind(&DatabaseCloner::_listCollectionsCallback,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ stdx::placeholders::_3)),
+ _scheduleDbWorkFn([this](const ReplicationExecutor::CallbackFn& work) {
+ return _executor->scheduleDBWork(work);
+ }),
+ _startCollectionCloner([](CollectionCloner& cloner) { return cloner.start(); }) {
+ uassert(ErrorCodes::BadValue, "null replication executor", executor);
+ uassert(ErrorCodes::BadValue, "empty database name", !dbname.empty());
+ uassert(ErrorCodes::BadValue, "storage interface cannot be null", si);
+ uassert(ErrorCodes::BadValue, "collection callback function cannot be null", collWork);
+ uassert(ErrorCodes::BadValue, "callback function cannot be null", onCompletion);
+}
+
+DatabaseCloner::~DatabaseCloner() {
+ DESTRUCTOR_GUARD(cancel(); wait(););
+}
+
+const std::vector<BSONObj>& DatabaseCloner::getCollectionInfos() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _collectionInfos;
+}
+
+std::string DatabaseCloner::getDiagnosticString() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ str::stream output;
+ output << "DatabaseCloner";
+ output << " executor: " << _executor->getDiagnosticString();
+ output << " source: " << _source.toString();
+ output << " database: " << _dbname;
+ output << " listCollections filter" << _listCollectionsFilter;
+ output << " active: " << _active;
+ output << " collection info objects (empty if listCollections is in progress): "
+ << _collectionInfos.size();
+ return output;
+}
+
+bool DatabaseCloner::isActive() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _active;
+}
+
+Status DatabaseCloner::start() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+
+ if (_active) {
+ return Status(ErrorCodes::IllegalOperation, "database cloner already started");
}
- const std::vector<BSONObj>& DatabaseCloner::getCollectionInfos() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _collectionInfos;
+ Status scheduleResult = _listCollectionsFetcher.schedule();
+ if (!scheduleResult.isOK()) {
+ return scheduleResult;
}
- std::string DatabaseCloner::getDiagnosticString() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- str::stream output;
- output << "DatabaseCloner";
- output << " executor: " << _executor->getDiagnosticString();
- output << " source: " << _source.toString();
- output << " database: " << _dbname;
- output << " listCollections filter" << _listCollectionsFilter;
- output << " active: " << _active;
- output << " collection info objects (empty if listCollections is in progress): "
- << _collectionInfos.size();
- return output;
- }
+ _active = true;
- bool DatabaseCloner::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _active;
- }
+ return Status::OK();
+}
- Status DatabaseCloner::start() {
+void DatabaseCloner::cancel() {
+ {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (_active) {
- return Status(ErrorCodes::IllegalOperation, "database cloner already started");
- }
-
- Status scheduleResult = _listCollectionsFetcher.schedule();
- if (!scheduleResult.isOK()) {
- return scheduleResult;
+ if (!_active) {
+ return;
}
-
- _active = true;
-
- return Status::OK();
}
- void DatabaseCloner::cancel() {
- {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _listCollectionsFetcher.cancel();
+}
- if (!_active) {
- return;
- }
- }
+void DatabaseCloner::wait() {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ _condition.wait(lk, [this]() { return !_active; });
+}
- _listCollectionsFetcher.cancel();
- }
+void DatabaseCloner::setScheduleDbWorkFn(const CollectionCloner::ScheduleDbWorkFn& work) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
- void DatabaseCloner::wait() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- _condition.wait(lk, [this]() { return !_active; });
- }
+ _scheduleDbWorkFn = work;
+}
- void DatabaseCloner::setScheduleDbWorkFn(const CollectionCloner::ScheduleDbWorkFn& work) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+void DatabaseCloner::setStartCollectionClonerFn(
+ const StartCollectionClonerFn& startCollectionCloner) {
+ _startCollectionCloner = startCollectionCloner;
+}
- _scheduleDbWorkFn = work;
+void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryResponse>& result,
+ Fetcher::NextAction* nextAction,
+ BSONObjBuilder* getMoreBob) {
+ if (!result.isOK()) {
+ _finishCallback(result.getStatus());
+ return;
}
- void DatabaseCloner::setStartCollectionClonerFn(
- const StartCollectionClonerFn& startCollectionCloner) {
-
- _startCollectionCloner = startCollectionCloner;
+ auto batchData(result.getValue());
+ auto&& documents = batchData.documents;
+
+ // We may be called with multiple batches leading to a need to grow _collectionInfos.
+ _collectionInfos.reserve(_collectionInfos.size() + documents.size());
+ std::copy_if(documents.begin(),
+ documents.end(),
+ std::back_inserter(_collectionInfos),
+ _listCollectionsPredicate);
+
+ // The fetcher will continue to call with kGetMore until an error or the last batch.
+ if (*nextAction == Fetcher::NextAction::kGetMore) {
+ invariant(getMoreBob);
+ getMoreBob->append("getMore", batchData.cursorId);
+ getMoreBob->append("collection", batchData.nss.coll());
+ return;
}
- void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryResponse>& result,
- Fetcher::NextAction* nextAction,
- BSONObjBuilder* getMoreBob) {
+ // Nothing to do for an empty database.
+ if (_collectionInfos.empty()) {
+ _finishCallback(Status::OK());
+ return;
+ }
- if (!result.isOK()) {
- _finishCallback(result.getStatus());
+ _collectionNamespaces.reserve(_collectionInfos.size());
+ std::set<std::string> seen;
+ for (auto&& info : _collectionInfos) {
+ BSONElement nameElement = info.getField(kNameFieldName);
+ if (nameElement.eoo()) {
+ _finishCallback(Status(ErrorCodes::FailedToParse,
+ str::stream() << "collection info must contain '"
+ << kNameFieldName << "' "
+ << "field : " << info));
return;
}
-
- auto batchData(result.getValue());
- auto&& documents = batchData.documents;
-
- // We may be called with multiple batches leading to a need to grow _collectionInfos.
- _collectionInfos.reserve(_collectionInfos.size() + documents.size());
- std::copy_if(documents.begin(), documents.end(),
- std::back_inserter(_collectionInfos),
- _listCollectionsPredicate);
-
- // The fetcher will continue to call with kGetMore until an error or the last batch.
- if (*nextAction == Fetcher::NextAction::kGetMore) {
- invariant(getMoreBob);
- getMoreBob->append("getMore", batchData.cursorId);
- getMoreBob->append("collection", batchData.nss.coll());
+ if (nameElement.type() != mongo::String) {
+ _finishCallback(Status(ErrorCodes::TypeMismatch,
+ str::stream() << "'" << kNameFieldName
+ << "' field must be a string: " << info));
return;
}
-
- // Nothing to do for an empty database.
- if (_collectionInfos.empty()) {
- _finishCallback(Status::OK());
+ const std::string collectionName = nameElement.String();
+ if (seen.find(collectionName) != seen.end()) {
+ _finishCallback(Status(ErrorCodes::DuplicateKey,
+ str::stream()
+ << "collection info contains duplicate collection name "
+ << "'" << collectionName << "': " << info));
return;
}
- _collectionNamespaces.reserve(_collectionInfos.size());
- std::set<std::string> seen;
- for (auto&& info : _collectionInfos) {
- BSONElement nameElement = info.getField(kNameFieldName);
- if (nameElement.eoo()) {
- _finishCallback(Status(ErrorCodes::FailedToParse, str::stream() <<
- "collection info must contain '" << kNameFieldName << "' " <<
- "field : " << info));
- return;
- }
- if (nameElement.type() != mongo::String) {
- _finishCallback(Status(ErrorCodes::TypeMismatch, str::stream() <<
- "'" << kNameFieldName << "' field must be a string: " << info));
- return;
- }
- const std::string collectionName = nameElement.String();
- if (seen.find(collectionName) != seen.end()) {
- _finishCallback(Status(ErrorCodes::DuplicateKey, str::stream() <<
- "collection info contains duplicate collection name " <<
- "'" << collectionName << "': " << info));
- return;
- }
-
- BSONElement optionsElement = info.getField(kOptionsFieldName);
- if (optionsElement.eoo()) {
- _finishCallback(Status(ErrorCodes::FailedToParse, str::stream() <<
- "collection info must contain '" << kOptionsFieldName << "' " <<
- "field : " << info));
- return;
- }
- if (!optionsElement.isABSONObj()) {
- _finishCallback(Status(ErrorCodes::TypeMismatch, str::stream() <<
- "'" << kOptionsFieldName << "' field must be an object: " <<
- info));
- return;
- }
- const BSONObj optionsObj = optionsElement.Obj();
- CollectionOptions options;
- Status parseStatus = options.parse(optionsObj);
- if (!parseStatus.isOK()) {
- _finishCallback(parseStatus);
- return;
- }
- seen.insert(collectionName);
-
- _collectionNamespaces.emplace_back(_dbname, collectionName);
- auto&& nss = *_collectionNamespaces.crbegin();
-
- try {
- _collectionCloners.emplace_back(
- _executor,
- _source,
- nss,
- options,
- stdx::bind(&DatabaseCloner::_collectionClonerCallback,
- this,
- stdx::placeholders::_1,
- nss),
- _storageInterface);
- }
- catch (const UserException& ex) {
- _finishCallback(ex.toStatus());
- return;
- }
+ BSONElement optionsElement = info.getField(kOptionsFieldName);
+ if (optionsElement.eoo()) {
+ _finishCallback(Status(ErrorCodes::FailedToParse,
+ str::stream() << "collection info must contain '"
+ << kOptionsFieldName << "' "
+ << "field : " << info));
+ return;
}
-
- for (auto&& collectionCloner : _collectionCloners) {
- collectionCloner.setScheduleDbWorkFn(_scheduleDbWorkFn);
+ if (!optionsElement.isABSONObj()) {
+ _finishCallback(Status(ErrorCodes::TypeMismatch,
+ str::stream() << "'" << kOptionsFieldName
+ << "' field must be an object: " << info));
+ return;
}
+ const BSONObj optionsObj = optionsElement.Obj();
+ CollectionOptions options;
+ Status parseStatus = options.parse(optionsObj);
+ if (!parseStatus.isOK()) {
+ _finishCallback(parseStatus);
+ return;
+ }
+ seen.insert(collectionName);
+
+ _collectionNamespaces.emplace_back(_dbname, collectionName);
+ auto&& nss = *_collectionNamespaces.crbegin();
+
+ try {
+ _collectionCloners.emplace_back(
+ _executor,
+ _source,
+ nss,
+ options,
+ stdx::bind(
+ &DatabaseCloner::_collectionClonerCallback, this, stdx::placeholders::_1, nss),
+ _storageInterface);
+ } catch (const UserException& ex) {
+ _finishCallback(ex.toStatus());
+ return;
+ }
+ }
+
+ for (auto&& collectionCloner : _collectionCloners) {
+ collectionCloner.setScheduleDbWorkFn(_scheduleDbWorkFn);
+ }
- // Start first collection cloner.
- _currentCollectionClonerIter = _collectionCloners.begin();
+ // Start first collection cloner.
+ _currentCollectionClonerIter = _collectionCloners.begin();
- LOG(1) << " cloning collection " << _currentCollectionClonerIter->getSourceNamespace();
+ LOG(1) << " cloning collection " << _currentCollectionClonerIter->getSourceNamespace();
+ Status startStatus = _startCollectionCloner(*_currentCollectionClonerIter);
+ if (!startStatus.isOK()) {
+ LOG(1) << " failed to start collection cloning on "
+ << _currentCollectionClonerIter->getSourceNamespace() << ": " << startStatus;
+ _finishCallback(startStatus);
+ return;
+ }
+}
+
+void DatabaseCloner::_collectionClonerCallback(const Status& status, const NamespaceString& nss) {
+ // Forward collection cloner result to caller.
+ // Failure to clone a collection does not stop the database cloner
+ // from cloning the rest of the collections in the listCollections result.
+ _collectionWork(status, nss);
+
+ _currentCollectionClonerIter++;
+
+ LOG(1) << " cloning collection " << _currentCollectionClonerIter->getSourceNamespace();
+
+ if (_currentCollectionClonerIter != _collectionCloners.end()) {
Status startStatus = _startCollectionCloner(*_currentCollectionClonerIter);
if (!startStatus.isOK()) {
LOG(1) << " failed to start collection cloning on "
- << _currentCollectionClonerIter->getSourceNamespace()
- << ": " << startStatus;
+ << _currentCollectionClonerIter->getSourceNamespace() << ": " << startStatus;
_finishCallback(startStatus);
return;
}
+ return;
}
- void DatabaseCloner::_collectionClonerCallback(const Status& status,
- const NamespaceString& nss) {
- // Forward collection cloner result to caller.
- // Failure to clone a collection does not stop the database cloner
- // from cloning the rest of the collections in the listCollections result.
- _collectionWork(status, nss);
-
- _currentCollectionClonerIter++;
-
- LOG(1) << " cloning collection " << _currentCollectionClonerIter->getSourceNamespace();
-
- if (_currentCollectionClonerIter != _collectionCloners.end()) {
- Status startStatus = _startCollectionCloner(*_currentCollectionClonerIter);
- if (!startStatus.isOK()) {
- LOG(1) << " failed to start collection cloning on "
- << _currentCollectionClonerIter->getSourceNamespace()
- << ": " << startStatus;
- _finishCallback(startStatus);
- return;
- }
- return;
- }
-
- _finishCallback(Status::OK());
- }
+ _finishCallback(Status::OK());
+}
- void DatabaseCloner::_finishCallback(const Status& status) {
- _onCompletion(status);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- _active = false;
- _condition.notify_all();
- }
+void DatabaseCloner::_finishCallback(const Status& status) {
+ _onCompletion(status);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _active = false;
+ _condition.notify_all();
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/database_cloner.h b/src/mongo/db/repl/database_cloner.h
index f1171bf5e4b..954f816cdaa 100644
--- a/src/mongo/db/repl/database_cloner.h
+++ b/src/mongo/db/repl/database_cloner.h
@@ -47,158 +47,157 @@
namespace mongo {
namespace repl {
- class DatabaseCloner : public BaseCloner {
- MONGO_DISALLOW_COPYING(DatabaseCloner);
- public:
-
- /**
- * Predicate used on the collection info objects returned by listCollections.
- * Each collection info is represented by a document in the following format:
- * {
- * name: <collection name>,
- * options: <collection options>
- * }
- *
- * Returns true if the collection described by the info object should be cloned.
- * Returns false if the collection should be ignored.
- */
- using ListCollectionsPredicateFn = stdx::function<bool (const BSONObj&)>;
-
- /**
- * Callback function to report progress of collection cloning. Arguments are:
- * - status from the collection cloner's 'onCompletion' callback.
- * - source namespace of the collection cloner that completed (or failed).
- *
- * Called exactly once for every collection cloner started by the the database cloner.
- */
- using CollectionCallbackFn = stdx::function<void (const Status&, const NamespaceString&)>;
-
- /**
- * Type of function to start a collection cloner.
- */
- using StartCollectionClonerFn = stdx::function<Status (CollectionCloner&)>;
-
- /**
- * Creates DatabaseCloner task in inactive state. Use start() to activate cloner.
- *
- * The cloner calls 'onCompletion' when the database cloning has completed or failed.
- *
- * 'onCompletion' will be called exactly once.
- *
- * Takes ownership of the passed StorageInterface object.
- */
- DatabaseCloner(ReplicationExecutor* executor,
- const HostAndPort& source,
- const std::string& dbname,
- const BSONObj& listCollectionsFilter,
- const ListCollectionsPredicateFn& listCollectionsPredicate,
- CollectionCloner::StorageInterface* storageInterface,
- const CollectionCallbackFn& collectionWork,
- const CallbackFn& onCompletion);
-
- virtual ~DatabaseCloner();
-
- /**
- * Returns collection info objects read from listCollections result.
- * This will return an empty vector until we have processed the last
- * batch of results from listCollections.
- */
- const std::vector<BSONObj>& getCollectionInfos() const;
-
- std::string getDiagnosticString() const override;
-
- bool isActive() const override;
-
- Status start() override;
-
- void cancel() override;
-
- void wait() override;
-
- //
- // Testing only functions below.
- //
-
- /**
- * Overrides how executor schedules database work.
- *
- * For testing only.
- */
- void setScheduleDbWorkFn(const CollectionCloner::ScheduleDbWorkFn& scheduleDbWorkFn);
-
- /**
- * Overrides how executor starts a collection cloner.
- *
- * For testing only
- */
- void setStartCollectionClonerFn(const StartCollectionClonerFn& startCollectionCloner);
-
- private:
-
- /**
- * Read collection names and options from listCollections result.
- */
- void _listCollectionsCallback(const StatusWith<Fetcher::QueryResponse>& fetchResult,
- Fetcher::NextAction* nextAction,
- BSONObjBuilder* getMoreBob);
-
- /**
- * Forwards collection cloner result to client.
- * Starts a new cloner on a different collection.
- */
- void _collectionClonerCallback(const Status& status, const NamespaceString& nss);
-
- /**
- * Reports completion status.
- * Sets cloner to inactive.
- */
- void _finishCallback(const Status& status);
-
- // Not owned by us.
- ReplicationExecutor* _executor;
-
- HostAndPort _source;
- std::string _dbname;
- BSONObj _listCollectionsFilter;
- ListCollectionsPredicateFn _listCollectionsPredicate;
- CollectionCloner::StorageInterface* _storageInterface;
-
- // Invoked once for every successfully started collection cloner.
- CollectionCallbackFn _collectionWork;
-
- // Invoked once when cloning completes or fails.
- CallbackFn _onCompletion;
-
- // Protects member data of this database cloner.
- mutable stdx::mutex _mutex;
-
- mutable stdx::condition_variable _condition;
-
- // _active is true when database cloner is started.
- bool _active;
-
- // Fetcher instance for running listCollections command.
- Fetcher _listCollectionsFetcher;
-
- // Collection info objects returned from listCollections.
- // Format of each document:
- // {
- // name: <collection name>,
- // options: <collection options>
- // }
- // Holds all collection infos from listCollections.
- std::vector<BSONObj> _collectionInfos;
+class DatabaseCloner : public BaseCloner {
+ MONGO_DISALLOW_COPYING(DatabaseCloner);
+
+public:
+ /**
+ * Predicate used on the collection info objects returned by listCollections.
+ * Each collection info is represented by a document in the following format:
+ * {
+ * name: <collection name>,
+ * options: <collection options>
+ * }
+ *
+ * Returns true if the collection described by the info object should be cloned.
+ * Returns false if the collection should be ignored.
+ */
+ using ListCollectionsPredicateFn = stdx::function<bool(const BSONObj&)>;
+
+ /**
+ * Callback function to report progress of collection cloning. Arguments are:
+ * - status from the collection cloner's 'onCompletion' callback.
+ * - source namespace of the collection cloner that completed (or failed).
+ *
+ * Called exactly once for every collection cloner started by the the database cloner.
+ */
+ using CollectionCallbackFn = stdx::function<void(const Status&, const NamespaceString&)>;
+
+ /**
+ * Type of function to start a collection cloner.
+ */
+ using StartCollectionClonerFn = stdx::function<Status(CollectionCloner&)>;
+
+ /**
+ * Creates DatabaseCloner task in inactive state. Use start() to activate cloner.
+ *
+ * The cloner calls 'onCompletion' when the database cloning has completed or failed.
+ *
+ * 'onCompletion' will be called exactly once.
+ *
+ * Takes ownership of the passed StorageInterface object.
+ */
+ DatabaseCloner(ReplicationExecutor* executor,
+ const HostAndPort& source,
+ const std::string& dbname,
+ const BSONObj& listCollectionsFilter,
+ const ListCollectionsPredicateFn& listCollectionsPredicate,
+ CollectionCloner::StorageInterface* storageInterface,
+ const CollectionCallbackFn& collectionWork,
+ const CallbackFn& onCompletion);
+
+ virtual ~DatabaseCloner();
+
+ /**
+ * Returns collection info objects read from listCollections result.
+ * This will return an empty vector until we have processed the last
+ * batch of results from listCollections.
+ */
+ const std::vector<BSONObj>& getCollectionInfos() const;
+
+ std::string getDiagnosticString() const override;
+
+ bool isActive() const override;
+
+ Status start() override;
+
+ void cancel() override;
+
+ void wait() override;
+
+ //
+ // Testing only functions below.
+ //
+
+ /**
+ * Overrides how executor schedules database work.
+ *
+ * For testing only.
+ */
+ void setScheduleDbWorkFn(const CollectionCloner::ScheduleDbWorkFn& scheduleDbWorkFn);
+
+ /**
+ * Overrides how executor starts a collection cloner.
+ *
+ * For testing only
+ */
+ void setStartCollectionClonerFn(const StartCollectionClonerFn& startCollectionCloner);
+
+private:
+ /**
+ * Read collection names and options from listCollections result.
+ */
+ void _listCollectionsCallback(const StatusWith<Fetcher::QueryResponse>& fetchResult,
+ Fetcher::NextAction* nextAction,
+ BSONObjBuilder* getMoreBob);
+
+ /**
+ * Forwards collection cloner result to client.
+ * Starts a new cloner on a different collection.
+ */
+ void _collectionClonerCallback(const Status& status, const NamespaceString& nss);
+
+ /**
+ * Reports completion status.
+ * Sets cloner to inactive.
+ */
+ void _finishCallback(const Status& status);
+
+ // Not owned by us.
+ ReplicationExecutor* _executor;
+
+ HostAndPort _source;
+ std::string _dbname;
+ BSONObj _listCollectionsFilter;
+ ListCollectionsPredicateFn _listCollectionsPredicate;
+ CollectionCloner::StorageInterface* _storageInterface;
+
+ // Invoked once for every successfully started collection cloner.
+ CollectionCallbackFn _collectionWork;
+
+ // Invoked once when cloning completes or fails.
+ CallbackFn _onCompletion;
+
+ // Protects member data of this database cloner.
+ mutable stdx::mutex _mutex;
+
+ mutable stdx::condition_variable _condition;
+
+ // _active is true when database cloner is started.
+ bool _active;
+
+ // Fetcher instance for running listCollections command.
+ Fetcher _listCollectionsFetcher;
+
+ // Collection info objects returned from listCollections.
+ // Format of each document:
+ // {
+ // name: <collection name>,
+ // options: <collection options>
+ // }
+ // Holds all collection infos from listCollections.
+ std::vector<BSONObj> _collectionInfos;
+
+ std::vector<NamespaceString> _collectionNamespaces;
+
+ std::list<CollectionCloner> _collectionCloners;
+ std::list<CollectionCloner>::iterator _currentCollectionClonerIter;
- std::vector<NamespaceString> _collectionNamespaces;
+ // Function for scheduling database work using the executor.
+ CollectionCloner::ScheduleDbWorkFn _scheduleDbWorkFn;
- std::list<CollectionCloner> _collectionCloners;
- std::list<CollectionCloner>::iterator _currentCollectionClonerIter;
+ StartCollectionClonerFn _startCollectionCloner;
+};
- // Function for scheduling database work using the executor.
- CollectionCloner::ScheduleDbWorkFn _scheduleDbWorkFn;
-
- StartCollectionClonerFn _startCollectionCloner;
- };
-
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/database_cloner_test.cpp b/src/mongo/db/repl/database_cloner_test.cpp
index 1cb772bc898..3683e24eae3 100644
--- a/src/mongo/db/repl/database_cloner_test.cpp
+++ b/src/mongo/db/repl/database_cloner_test.cpp
@@ -39,463 +39,494 @@
namespace {
- using namespace mongo;
- using namespace mongo::repl;
-
- const std::string dbname("db");
-
- class DatabaseClonerTest : public BaseClonerTest {
- public:
-
- DatabaseClonerTest();
- void collectionWork(const Status& status, const NamespaceString& sourceNss);
- void clear() override;
- BaseCloner* getCloner() const override;
-
- protected:
-
- void setUp() override;
- void tearDown() override;
-
- std::list<std::pair<Status, NamespaceString> > collectionWorkResults;
- std::unique_ptr<DatabaseCloner> databaseCloner;
- };
-
- DatabaseClonerTest::DatabaseClonerTest()
- : collectionWorkResults(),
- databaseCloner() { }
-
- void DatabaseClonerTest::collectionWork(const Status& status, const NamespaceString& srcNss) {
- collectionWorkResults.emplace_back(status, srcNss);
- }
-
- void DatabaseClonerTest::setUp() {
- BaseClonerTest::setUp();
- collectionWorkResults.clear();
- databaseCloner.reset(new DatabaseCloner(&getExecutor(),
- target,
- dbname,
- BSONObj(),
- DatabaseCloner::ListCollectionsPredicateFn(),
- storageInterface.get(),
- stdx::bind(&DatabaseClonerTest::collectionWork,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2),
- stdx::bind(&DatabaseClonerTest::setStatus,
- this,
- stdx::placeholders::_1)));
- }
-
- void DatabaseClonerTest::tearDown() {
- BaseClonerTest::tearDown();
- databaseCloner.reset();
- collectionWorkResults.clear();
- }
-
- void DatabaseClonerTest::clear() {
- }
-
- BaseCloner* DatabaseClonerTest::getCloner() const {
- return databaseCloner.get();
- }
-
- TEST_F(DatabaseClonerTest, InvalidConstruction) {
- ReplicationExecutor& executor = getExecutor();
-
- const BSONObj filter;
- DatabaseCloner::ListCollectionsPredicateFn pred;
- CollectionCloner::StorageInterface* si = storageInterface.get();
- namespace stdxph = stdx::placeholders;
- const DatabaseCloner::CollectionCallbackFn ccb =
- stdx::bind(&DatabaseClonerTest::collectionWork, this, stdxph::_1, stdxph::_2);
-
- const auto& cb = [](const Status&) { FAIL("should not reach here"); };
-
- // Null executor.
- ASSERT_THROWS(DatabaseCloner(nullptr, target, dbname, filter, pred, si, ccb, cb),
+using namespace mongo;
+using namespace mongo::repl;
+
+const std::string dbname("db");
+
+class DatabaseClonerTest : public BaseClonerTest {
+public:
+ DatabaseClonerTest();
+ void collectionWork(const Status& status, const NamespaceString& sourceNss);
+ void clear() override;
+ BaseCloner* getCloner() const override;
+
+protected:
+ void setUp() override;
+ void tearDown() override;
+
+ std::list<std::pair<Status, NamespaceString>> collectionWorkResults;
+ std::unique_ptr<DatabaseCloner> databaseCloner;
+};
+
+DatabaseClonerTest::DatabaseClonerTest() : collectionWorkResults(), databaseCloner() {}
+
+void DatabaseClonerTest::collectionWork(const Status& status, const NamespaceString& srcNss) {
+ collectionWorkResults.emplace_back(status, srcNss);
+}
+
+void DatabaseClonerTest::setUp() {
+ BaseClonerTest::setUp();
+ collectionWorkResults.clear();
+ databaseCloner.reset(new DatabaseCloner(
+ &getExecutor(),
+ target,
+ dbname,
+ BSONObj(),
+ DatabaseCloner::ListCollectionsPredicateFn(),
+ storageInterface.get(),
+ stdx::bind(&DatabaseClonerTest::collectionWork,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2),
+ stdx::bind(&DatabaseClonerTest::setStatus, this, stdx::placeholders::_1)));
+}
+
+void DatabaseClonerTest::tearDown() {
+ BaseClonerTest::tearDown();
+ databaseCloner.reset();
+ collectionWorkResults.clear();
+}
+
+void DatabaseClonerTest::clear() {}
+
+BaseCloner* DatabaseClonerTest::getCloner() const {
+ return databaseCloner.get();
+}
+
+TEST_F(DatabaseClonerTest, InvalidConstruction) {
+ ReplicationExecutor& executor = getExecutor();
+
+ const BSONObj filter;
+ DatabaseCloner::ListCollectionsPredicateFn pred;
+ CollectionCloner::StorageInterface* si = storageInterface.get();
+ namespace stdxph = stdx::placeholders;
+ const DatabaseCloner::CollectionCallbackFn ccb =
+ stdx::bind(&DatabaseClonerTest::collectionWork, this, stdxph::_1, stdxph::_2);
+
+ const auto& cb = [](const Status&) { FAIL("should not reach here"); };
+
+ // Null executor.
+ ASSERT_THROWS(DatabaseCloner(nullptr, target, dbname, filter, pred, si, ccb, cb),
+ UserException);
+
+ // Empty database name
+ ASSERT_THROWS(DatabaseCloner(&executor, target, "", filter, pred, si, ccb, cb), UserException);
+
+ // Callback function cannot be null.
+ {
+ DatabaseCloner::CallbackFn ncb;
+ ASSERT_THROWS(DatabaseCloner(&executor, target, dbname, filter, pred, si, ccb, ncb),
UserException);
-
- // Empty database name
- ASSERT_THROWS(DatabaseCloner(&executor, target, "", filter, pred, si, ccb, cb),
- UserException);
-
- // Callback function cannot be null.
- {
- DatabaseCloner::CallbackFn ncb;
- ASSERT_THROWS(DatabaseCloner(&executor, target, dbname, filter, pred, si, ccb, ncb),
- UserException);
- }
-
- // Storage interface cannot be null.
- {
- CollectionCloner::StorageInterface* nsi = nullptr;
- ASSERT_THROWS(DatabaseCloner(&executor, target, dbname, filter, pred, nsi, ccb, cb),
- UserException);
- }
-
- // CollectionCallbackFn function cannot be null.
- {
- DatabaseCloner::CollectionCallbackFn nccb;
- ASSERT_THROWS(DatabaseCloner(&executor, target, dbname, filter, pred, si, nccb, cb),
- UserException);
- }
- }
-
- TEST_F(DatabaseClonerTest, ClonerLifeCycle) {
- testLifeCycle();
}
- TEST_F(DatabaseClonerTest, FirstRemoteCommandWithoutFilter) {
- ASSERT_OK(databaseCloner->start());
-
- auto net = getNet();
- ASSERT_TRUE(net->hasReadyRequests());
- NetworkOperationIterator noi = net->getNextReadyRequest();
- auto&& noiRequest = noi->getRequest();
- ASSERT_EQUALS(nss.db().toString(), noiRequest.dbname);
- ASSERT_EQUALS("listCollections", std::string(noiRequest.cmdObj.firstElementFieldName()));
- ASSERT_EQUALS(1, noiRequest.cmdObj.firstElement().numberInt());
- ASSERT_FALSE(noiRequest.cmdObj.hasField("filter"));
- ASSERT_FALSE(net->hasReadyRequests());
- ASSERT_TRUE(databaseCloner->isActive());
+ // Storage interface cannot be null.
+ {
+ CollectionCloner::StorageInterface* nsi = nullptr;
+ ASSERT_THROWS(DatabaseCloner(&executor, target, dbname, filter, pred, nsi, ccb, cb),
+ UserException);
}
- TEST_F(DatabaseClonerTest, FirstRemoteCommandWithFilter) {
- const BSONObj listCollectionsFilter = BSON("name" << "coll");
- databaseCloner.reset(new DatabaseCloner(&getExecutor(),
- target,
- dbname,
- listCollectionsFilter,
- DatabaseCloner::ListCollectionsPredicateFn(),
- storageInterface.get(),
- stdx::bind(&DatabaseClonerTest::collectionWork,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2),
- stdx::bind(&DatabaseClonerTest::setStatus,
- this,
- stdx::placeholders::_1)));
- ASSERT_OK(databaseCloner->start());
-
- auto net = getNet();
- ASSERT_TRUE(net->hasReadyRequests());
- NetworkOperationIterator noi = net->getNextReadyRequest();
- auto&& noiRequest = noi->getRequest();
- ASSERT_EQUALS(nss.db().toString(), noiRequest.dbname);
- ASSERT_EQUALS("listCollections", std::string(noiRequest.cmdObj.firstElementFieldName()));
- ASSERT_EQUALS(1, noiRequest.cmdObj.firstElement().numberInt());
- BSONElement filterElement = noiRequest.cmdObj.getField("filter");
- ASSERT_TRUE(filterElement.isABSONObj());
- ASSERT_EQUALS(listCollectionsFilter, filterElement.Obj());
- ASSERT_FALSE(net->hasReadyRequests());
- ASSERT_TRUE(databaseCloner->isActive());
+ // CollectionCallbackFn function cannot be null.
+ {
+ DatabaseCloner::CollectionCallbackFn nccb;
+ ASSERT_THROWS(DatabaseCloner(&executor, target, dbname, filter, pred, si, nccb, cb),
+ UserException);
}
-
- TEST_F(DatabaseClonerTest, InvalidListCollectionsFilter) {
- ASSERT_OK(databaseCloner->start());
-
- processNetworkResponse(
- BSON("ok" << 0 << "errmsg" << "unknown operator" << "code" << ErrorCodes::BadValue));
-
- ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
- ASSERT_FALSE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, ClonerLifeCycle) {
+ testLifeCycle();
+}
+
+TEST_F(DatabaseClonerTest, FirstRemoteCommandWithoutFilter) {
+ ASSERT_OK(databaseCloner->start());
+
+ auto net = getNet();
+ ASSERT_TRUE(net->hasReadyRequests());
+ NetworkOperationIterator noi = net->getNextReadyRequest();
+ auto&& noiRequest = noi->getRequest();
+ ASSERT_EQUALS(nss.db().toString(), noiRequest.dbname);
+ ASSERT_EQUALS("listCollections", std::string(noiRequest.cmdObj.firstElementFieldName()));
+ ASSERT_EQUALS(1, noiRequest.cmdObj.firstElement().numberInt());
+ ASSERT_FALSE(noiRequest.cmdObj.hasField("filter"));
+ ASSERT_FALSE(net->hasReadyRequests());
+ ASSERT_TRUE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, FirstRemoteCommandWithFilter) {
+ const BSONObj listCollectionsFilter = BSON("name"
+ << "coll");
+ databaseCloner.reset(new DatabaseCloner(
+ &getExecutor(),
+ target,
+ dbname,
+ listCollectionsFilter,
+ DatabaseCloner::ListCollectionsPredicateFn(),
+ storageInterface.get(),
+ stdx::bind(&DatabaseClonerTest::collectionWork,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2),
+ stdx::bind(&DatabaseClonerTest::setStatus, this, stdx::placeholders::_1)));
+ ASSERT_OK(databaseCloner->start());
+
+ auto net = getNet();
+ ASSERT_TRUE(net->hasReadyRequests());
+ NetworkOperationIterator noi = net->getNextReadyRequest();
+ auto&& noiRequest = noi->getRequest();
+ ASSERT_EQUALS(nss.db().toString(), noiRequest.dbname);
+ ASSERT_EQUALS("listCollections", std::string(noiRequest.cmdObj.firstElementFieldName()));
+ ASSERT_EQUALS(1, noiRequest.cmdObj.firstElement().numberInt());
+ BSONElement filterElement = noiRequest.cmdObj.getField("filter");
+ ASSERT_TRUE(filterElement.isABSONObj());
+ ASSERT_EQUALS(listCollectionsFilter, filterElement.Obj());
+ ASSERT_FALSE(net->hasReadyRequests());
+ ASSERT_TRUE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, InvalidListCollectionsFilter) {
+ ASSERT_OK(databaseCloner->start());
+
+ processNetworkResponse(BSON("ok" << 0 << "errmsg"
+ << "unknown operator"
+ << "code" << ErrorCodes::BadValue));
+
+ ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
+ ASSERT_FALSE(databaseCloner->isActive());
+}
+
+// A database may have no collections. Nothing to do for the database cloner.
+TEST_F(DatabaseClonerTest, ListCollectionsReturnedNoCollections) {
+ ASSERT_OK(databaseCloner->start());
+
+ // Keep going even if initial batch is empty.
+ processNetworkResponse(createListCollectionsResponse(1, BSONArray()));
+
+ ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
+ ASSERT_TRUE(databaseCloner->isActive());
+
+ // Final batch is also empty. Database cloner should stop and return a successful status.
+ processNetworkResponse(createListCollectionsResponse(0, BSONArray(), "nextBatch"));
+
+ ASSERT_OK(getStatus());
+ ASSERT_FALSE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, ListCollectionsPredicate) {
+ DatabaseCloner::ListCollectionsPredicateFn pred =
+ [](const BSONObj& info) { return info["name"].String() != "b"; };
+ databaseCloner.reset(new DatabaseCloner(
+ &getExecutor(),
+ target,
+ dbname,
+ BSONObj(),
+ pred,
+ storageInterface.get(),
+ stdx::bind(&DatabaseClonerTest::collectionWork,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2),
+ stdx::bind(&DatabaseClonerTest::setStatus, this, stdx::placeholders::_1)));
+ ASSERT_OK(databaseCloner->start());
+
+ const std::vector<BSONObj> sourceInfos = {BSON("name"
+ << "a"
+ << "options" << BSONObj()),
+ BSON("name"
+ << "b"
+ << "options" << BSONObj()),
+ BSON("name"
+ << "c"
+ << "options" << BSONObj())};
+ processNetworkResponse(createListCollectionsResponse(
+ 0, BSON_ARRAY(sourceInfos[0] << sourceInfos[1] << sourceInfos[2])));
+
+ ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
+ ASSERT_TRUE(databaseCloner->isActive());
+
+ const std::vector<BSONObj>& collectionInfos = databaseCloner->getCollectionInfos();
+ ASSERT_EQUALS(2U, collectionInfos.size());
+ ASSERT_EQUALS(sourceInfos[0], collectionInfos[0]);
+ ASSERT_EQUALS(sourceInfos[2], collectionInfos[1]);
+}
+
+TEST_F(DatabaseClonerTest, ListCollectionsMultipleBatches) {
+ ASSERT_OK(databaseCloner->start());
+
+ const std::vector<BSONObj> sourceInfos = {BSON("name"
+ << "a"
+ << "options" << BSONObj()),
+ BSON("name"
+ << "b"
+ << "options" << BSONObj())};
+ processNetworkResponse(createListCollectionsResponse(1, BSON_ARRAY(sourceInfos[0])));
+
+ ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
+ ASSERT_TRUE(databaseCloner->isActive());
+
+ {
+ const std::vector<BSONObj>& collectionInfos = databaseCloner->getCollectionInfos();
+ ASSERT_EQUALS(1U, collectionInfos.size());
+ ASSERT_EQUALS(sourceInfos[0], collectionInfos[0]);
}
- // A database may have no collections. Nothing to do for the database cloner.
- TEST_F(DatabaseClonerTest, ListCollectionsReturnedNoCollections) {
- ASSERT_OK(databaseCloner->start());
-
- // Keep going even if initial batch is empty.
- processNetworkResponse(createListCollectionsResponse(1, BSONArray()));
-
- ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
- ASSERT_TRUE(databaseCloner->isActive());
-
- // Final batch is also empty. Database cloner should stop and return a successful status.
- processNetworkResponse(createListCollectionsResponse(0, BSONArray(), "nextBatch"));
-
- ASSERT_OK(getStatus());
- ASSERT_FALSE(databaseCloner->isActive());
- }
+ processNetworkResponse(
+ createListCollectionsResponse(0, BSON_ARRAY(sourceInfos[1]), "nextBatch"));
- TEST_F(DatabaseClonerTest, ListCollectionsPredicate) {
- DatabaseCloner::ListCollectionsPredicateFn pred = [](const BSONObj& info) {
- return info["name"].String() != "b";
- };
- databaseCloner.reset(new DatabaseCloner(&getExecutor(),
- target,
- dbname,
- BSONObj(),
- pred,
- storageInterface.get(),
- stdx::bind(&DatabaseClonerTest::collectionWork,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2),
- stdx::bind(&DatabaseClonerTest::setStatus,
- this,
- stdx::placeholders::_1)));
- ASSERT_OK(databaseCloner->start());
-
- const std::vector<BSONObj> sourceInfos = {
- BSON("name" << "a" << "options" << BSONObj()),
- BSON("name" << "b" << "options" << BSONObj()),
- BSON("name" << "c" << "options" << BSONObj())};
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(sourceInfos[0] <<
- sourceInfos[1] <<
- sourceInfos[2])));
-
- ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
- ASSERT_TRUE(databaseCloner->isActive());
+ ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
+ ASSERT_TRUE(databaseCloner->isActive());
+ {
const std::vector<BSONObj>& collectionInfos = databaseCloner->getCollectionInfos();
ASSERT_EQUALS(2U, collectionInfos.size());
ASSERT_EQUALS(sourceInfos[0], collectionInfos[0]);
- ASSERT_EQUALS(sourceInfos[2], collectionInfos[1]);
+ ASSERT_EQUALS(sourceInfos[1], collectionInfos[1]);
}
-
- TEST_F(DatabaseClonerTest, ListCollectionsMultipleBatches) {
- ASSERT_OK(databaseCloner->start());
-
- const std::vector<BSONObj> sourceInfos = {
- BSON("name" << "a" << "options" << BSONObj()),
- BSON("name" << "b" << "options" << BSONObj())};
- processNetworkResponse(createListCollectionsResponse(1, BSON_ARRAY(sourceInfos[0])));
-
- ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
- ASSERT_TRUE(databaseCloner->isActive());
-
- {
- const std::vector<BSONObj>& collectionInfos = databaseCloner->getCollectionInfos();
- ASSERT_EQUALS(1U, collectionInfos.size());
- ASSERT_EQUALS(sourceInfos[0], collectionInfos[0]);
- }
-
- processNetworkResponse(
- createListCollectionsResponse(0, BSON_ARRAY(sourceInfos[1]), "nextBatch"));
-
- ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
- ASSERT_TRUE(databaseCloner->isActive());
-
- {
- const std::vector<BSONObj>& collectionInfos = databaseCloner->getCollectionInfos();
- ASSERT_EQUALS(2U, collectionInfos.size());
- ASSERT_EQUALS(sourceInfos[0], collectionInfos[0]);
- ASSERT_EQUALS(sourceInfos[1], collectionInfos[1]);
- }
- }
-
- TEST_F(DatabaseClonerTest, CollectionInfoNameFieldMissing) {
- ASSERT_OK(databaseCloner->start());
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(
- BSON("options" << BSONObj()))));
- ASSERT_EQUALS(ErrorCodes::FailedToParse, getStatus().code());
- ASSERT_STRING_CONTAINS(getStatus().reason(), "must contain 'name' field");
- ASSERT_FALSE(databaseCloner->isActive());
- }
-
- TEST_F(DatabaseClonerTest, CollectionInfoNameNotAString) {
- ASSERT_OK(databaseCloner->start());
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(
- BSON("name" << 123 << "options" << BSONObj()))));
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, getStatus().code());
- ASSERT_STRING_CONTAINS(getStatus().reason(), "'name' field must be a string");
- ASSERT_FALSE(databaseCloner->isActive());
- }
-
- TEST_F(DatabaseClonerTest, CollectionInfoNameEmpty) {
- ASSERT_OK(databaseCloner->start());
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(
- BSON("name" << "" << "options" << BSONObj()))));
- ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
- ASSERT_STRING_CONTAINS(getStatus().reason(), "invalid collection namespace: db.");
- ASSERT_FALSE(databaseCloner->isActive());
- }
-
- TEST_F(DatabaseClonerTest, CollectionInfoNameDuplicate) {
- ASSERT_OK(databaseCloner->start());
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(
- BSON("name" << "a" << "options" << BSONObj()) <<
- BSON("name" << "a" << "options" << BSONObj()))));
- ASSERT_EQUALS(ErrorCodes::DuplicateKey, getStatus().code());
- ASSERT_STRING_CONTAINS(getStatus().reason(), "duplicate collection name 'a'");
- ASSERT_FALSE(databaseCloner->isActive());
- }
-
- TEST_F(DatabaseClonerTest, CollectionInfoOptionsFieldMissing) {
- ASSERT_OK(databaseCloner->start());
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(BSON("name" << "a"))));
- ASSERT_EQUALS(ErrorCodes::FailedToParse, getStatus().code());
- ASSERT_STRING_CONTAINS(getStatus().reason(), "must contain 'options' field");
- ASSERT_FALSE(databaseCloner->isActive());
- }
-
- TEST_F(DatabaseClonerTest, CollectionInfoOptionsNotAnObject) {
- ASSERT_OK(databaseCloner->start());
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(
- BSON("name" << "a" << "options" << 123))));
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, getStatus().code());
- ASSERT_STRING_CONTAINS(getStatus().reason(), "'options' field must be an object");
- ASSERT_FALSE(databaseCloner->isActive());
- }
-
- TEST_F(DatabaseClonerTest, InvalidCollectionOptions) {
- ASSERT_OK(databaseCloner->start());
-
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(
- BSON("name" << "a" << "options" << BSON("storageEngine" << 1)))));
-
- ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
- ASSERT_FALSE(databaseCloner->isActive());
- }
-
- TEST_F(DatabaseClonerTest, ListCollectionsReturnsEmptyCollectionName) {
- databaseCloner.reset(new DatabaseCloner(&getExecutor(),
- target,
- dbname,
- BSONObj(),
- DatabaseCloner::ListCollectionsPredicateFn(),
- storageInterface.get(),
- stdx::bind(&DatabaseClonerTest::collectionWork,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2),
- stdx::bind(&DatabaseClonerTest::setStatus,
- this,
- stdx::placeholders::_1)));
- ASSERT_OK(databaseCloner->start());
-
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(
- BSON("name" << "" << "options" << BSONObj()))));
-
- ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
- ASSERT_STRING_CONTAINS(getStatus().reason(), "invalid collection namespace: db.");
- ASSERT_FALSE(databaseCloner->isActive());
- }
-
- TEST_F(DatabaseClonerTest, StartFirstCollectionClonerFailed) {
- ASSERT_OK(databaseCloner->start());
-
- databaseCloner->setStartCollectionClonerFn([](CollectionCloner& cloner) {
+}
+
+TEST_F(DatabaseClonerTest, CollectionInfoNameFieldMissing) {
+ ASSERT_OK(databaseCloner->start());
+ processNetworkResponse(
+ createListCollectionsResponse(0, BSON_ARRAY(BSON("options" << BSONObj()))));
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, getStatus().code());
+ ASSERT_STRING_CONTAINS(getStatus().reason(), "must contain 'name' field");
+ ASSERT_FALSE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, CollectionInfoNameNotAString) {
+ ASSERT_OK(databaseCloner->start());
+ processNetworkResponse(createListCollectionsResponse(
+ 0, BSON_ARRAY(BSON("name" << 123 << "options" << BSONObj()))));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, getStatus().code());
+ ASSERT_STRING_CONTAINS(getStatus().reason(), "'name' field must be a string");
+ ASSERT_FALSE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, CollectionInfoNameEmpty) {
+ ASSERT_OK(databaseCloner->start());
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << ""
+ << "options" << BSONObj()))));
+ ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
+ ASSERT_STRING_CONTAINS(getStatus().reason(), "invalid collection namespace: db.");
+ ASSERT_FALSE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, CollectionInfoNameDuplicate) {
+ ASSERT_OK(databaseCloner->start());
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << BSONObj())
+ << BSON("name"
+ << "a"
+ << "options" << BSONObj()))));
+ ASSERT_EQUALS(ErrorCodes::DuplicateKey, getStatus().code());
+ ASSERT_STRING_CONTAINS(getStatus().reason(), "duplicate collection name 'a'");
+ ASSERT_FALSE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, CollectionInfoOptionsFieldMissing) {
+ ASSERT_OK(databaseCloner->start());
+ processNetworkResponse(createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"))));
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, getStatus().code());
+ ASSERT_STRING_CONTAINS(getStatus().reason(), "must contain 'options' field");
+ ASSERT_FALSE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, CollectionInfoOptionsNotAnObject) {
+ ASSERT_OK(databaseCloner->start());
+ processNetworkResponse(createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << 123))));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, getStatus().code());
+ ASSERT_STRING_CONTAINS(getStatus().reason(), "'options' field must be an object");
+ ASSERT_FALSE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, InvalidCollectionOptions) {
+ ASSERT_OK(databaseCloner->start());
+
+ processNetworkResponse(
+ createListCollectionsResponse(
+ 0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << BSON("storageEngine" << 1)))));
+
+ ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
+ ASSERT_FALSE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, ListCollectionsReturnsEmptyCollectionName) {
+ databaseCloner.reset(new DatabaseCloner(
+ &getExecutor(),
+ target,
+ dbname,
+ BSONObj(),
+ DatabaseCloner::ListCollectionsPredicateFn(),
+ storageInterface.get(),
+ stdx::bind(&DatabaseClonerTest::collectionWork,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2),
+ stdx::bind(&DatabaseClonerTest::setStatus, this, stdx::placeholders::_1)));
+ ASSERT_OK(databaseCloner->start());
+
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << ""
+ << "options" << BSONObj()))));
+
+ ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
+ ASSERT_STRING_CONTAINS(getStatus().reason(), "invalid collection namespace: db.");
+ ASSERT_FALSE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, StartFirstCollectionClonerFailed) {
+ ASSERT_OK(databaseCloner->start());
+
+ databaseCloner->setStartCollectionClonerFn(
+ [](CollectionCloner& cloner) { return Status(ErrorCodes::OperationFailed, ""); });
+
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << BSONObj()))));
+
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
+ ASSERT_FALSE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, StartSecondCollectionClonerFailed) {
+ ASSERT_OK(databaseCloner->start());
+
+ // Replace scheduleDbWork function so that all callbacks (including exclusive tasks)
+ // will run through network interface.
+ auto&& executor = getExecutor();
+ databaseCloner->setScheduleDbWorkFn([&](const ReplicationExecutor::CallbackFn& workFn) {
+ return executor.scheduleWork(workFn);
+ });
+
+ databaseCloner->setStartCollectionClonerFn([](CollectionCloner& cloner) {
+ if (cloner.getSourceNamespace().coll() == "b") {
return Status(ErrorCodes::OperationFailed, "");
- });
-
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(
- BSON("name" << "a" << "options" << BSONObj()))));
-
- ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
- ASSERT_FALSE(databaseCloner->isActive());
- }
-
- TEST_F(DatabaseClonerTest, StartSecondCollectionClonerFailed) {
- ASSERT_OK(databaseCloner->start());
-
- // Replace scheduleDbWork function so that all callbacks (including exclusive tasks)
- // will run through network interface.
- auto&& executor = getExecutor();
- databaseCloner->setScheduleDbWorkFn([&](const ReplicationExecutor::CallbackFn& workFn) {
- return executor.scheduleWork(workFn);
- });
-
- databaseCloner->setStartCollectionClonerFn([](CollectionCloner& cloner) {
- if (cloner.getSourceNamespace().coll() == "b") {
- return Status(ErrorCodes::OperationFailed, "");
- }
- return cloner.start();
- });
-
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(
- BSON("name" << "a" << "options" << BSONObj()) <<
- BSON("name" << "b" << "options" << BSONObj()))));
-
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- processNetworkResponse(createCursorResponse(0, BSONArray()));
-
- ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
- ASSERT_FALSE(databaseCloner->isActive());
- }
-
- TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) {
- ASSERT_OK(databaseCloner->start());
-
- // Replace scheduleDbWork function so that all callbacks (including exclusive tasks)
- // will run through network interface.
- auto&& executor = getExecutor();
- databaseCloner->setScheduleDbWorkFn([&](const ReplicationExecutor::CallbackFn& workFn) {
- return executor.scheduleWork(workFn);
- });
-
- const std::vector<BSONObj> sourceInfos = {
- BSON("name" << "a" << "options" << BSONObj()),
- BSON("name" << "b" << "options" << BSONObj())};
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(sourceInfos[0] <<
- sourceInfos[1])));
-
- ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
- ASSERT_TRUE(databaseCloner->isActive());
-
- // Collection cloners are run serially for now.
- // This affects the order of the network responses.
- processNetworkResponse(
- BSON("ok" << 0 << "errmsg" << "" << "code" << ErrorCodes::NamespaceNotFound));
-
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- processNetworkResponse(createCursorResponse(0, BSONArray()));
-
- ASSERT_OK(getStatus());
- ASSERT_FALSE(databaseCloner->isActive());
-
- ASSERT_EQUALS(2U, collectionWorkResults.size());
- {
- auto i = collectionWorkResults.cbegin();
- ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, i->first.code());
- ASSERT_EQUALS(i->second.ns(), NamespaceString(dbname, "a").ns());
- i++;
- ASSERT_OK(i->first);
- ASSERT_EQUALS(i->second.ns(), NamespaceString(dbname, "b").ns());
}
+ return cloner.start();
+ });
+
+ processNetworkResponse(
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options" << BSONObj())
+ << BSON("name"
+ << "b"
+ << "options" << BSONObj()))));
+
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ processNetworkResponse(createCursorResponse(0, BSONArray()));
+
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
+ ASSERT_FALSE(databaseCloner->isActive());
+}
+
+TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) {
+ ASSERT_OK(databaseCloner->start());
+
+ // Replace scheduleDbWork function so that all callbacks (including exclusive tasks)
+ // will run through network interface.
+ auto&& executor = getExecutor();
+ databaseCloner->setScheduleDbWorkFn([&](const ReplicationExecutor::CallbackFn& workFn) {
+ return executor.scheduleWork(workFn);
+ });
+
+ const std::vector<BSONObj> sourceInfos = {BSON("name"
+ << "a"
+ << "options" << BSONObj()),
+ BSON("name"
+ << "b"
+ << "options" << BSONObj())};
+ processNetworkResponse(
+ createListCollectionsResponse(0, BSON_ARRAY(sourceInfos[0] << sourceInfos[1])));
+
+ ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
+ ASSERT_TRUE(databaseCloner->isActive());
+
+ // Collection cloners are run serially for now.
+ // This affects the order of the network responses.
+ processNetworkResponse(BSON("ok" << 0 << "errmsg"
+ << ""
+ << "code" << ErrorCodes::NamespaceNotFound));
+
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ processNetworkResponse(createCursorResponse(0, BSONArray()));
+
+ ASSERT_OK(getStatus());
+ ASSERT_FALSE(databaseCloner->isActive());
+
+ ASSERT_EQUALS(2U, collectionWorkResults.size());
+ {
+ auto i = collectionWorkResults.cbegin();
+ ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, i->first.code());
+ ASSERT_EQUALS(i->second.ns(), NamespaceString(dbname, "a").ns());
+ i++;
+ ASSERT_OK(i->first);
+ ASSERT_EQUALS(i->second.ns(), NamespaceString(dbname, "b").ns());
}
-
- TEST_F(DatabaseClonerTest, CreateCollections) {
- ASSERT_OK(databaseCloner->start());
-
- // Replace scheduleDbWork function so that all callbacks (including exclusive tasks)
- // will run through network interface.
- auto&& executor = getExecutor();
- databaseCloner->setScheduleDbWorkFn([&](const ReplicationExecutor::CallbackFn& workFn) {
- return executor.scheduleWork(workFn);
- });
-
- const std::vector<BSONObj> sourceInfos = {
- BSON("name" << "a" << "options" << BSONObj()),
- BSON("name" << "b" << "options" << BSONObj())};
- processNetworkResponse(createListCollectionsResponse(0, BSON_ARRAY(sourceInfos[0] <<
- sourceInfos[1])));
-
- ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
- ASSERT_TRUE(databaseCloner->isActive());
-
- // Collection cloners are run serially for now.
- // This affects the order of the network responses.
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- processNetworkResponse(createCursorResponse(0, BSONArray()));
-
- processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
- processNetworkResponse(createCursorResponse(0, BSONArray()));
-
- ASSERT_OK(getStatus());
- ASSERT_FALSE(databaseCloner->isActive());
-
- ASSERT_EQUALS(2U, collectionWorkResults.size());
- {
- auto i = collectionWorkResults.cbegin();
- ASSERT_OK(i->first);
- ASSERT_EQUALS(i->second.ns(), NamespaceString(dbname, "a").ns());
- i++;
- ASSERT_OK(i->first);
- ASSERT_EQUALS(i->second.ns(), NamespaceString(dbname, "b").ns());
- }
+}
+
+TEST_F(DatabaseClonerTest, CreateCollections) {
+ ASSERT_OK(databaseCloner->start());
+
+ // Replace scheduleDbWork function so that all callbacks (including exclusive tasks)
+ // will run through network interface.
+ auto&& executor = getExecutor();
+ databaseCloner->setScheduleDbWorkFn([&](const ReplicationExecutor::CallbackFn& workFn) {
+ return executor.scheduleWork(workFn);
+ });
+
+ const std::vector<BSONObj> sourceInfos = {BSON("name"
+ << "a"
+ << "options" << BSONObj()),
+ BSON("name"
+ << "b"
+ << "options" << BSONObj())};
+ processNetworkResponse(
+ createListCollectionsResponse(0, BSON_ARRAY(sourceInfos[0] << sourceInfos[1])));
+
+ ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
+ ASSERT_TRUE(databaseCloner->isActive());
+
+ // Collection cloners are run serially for now.
+ // This affects the order of the network responses.
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ processNetworkResponse(createCursorResponse(0, BSONArray()));
+
+ processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
+ processNetworkResponse(createCursorResponse(0, BSONArray()));
+
+ ASSERT_OK(getStatus());
+ ASSERT_FALSE(databaseCloner->isActive());
+
+ ASSERT_EQUALS(2U, collectionWorkResults.size());
+ {
+ auto i = collectionWorkResults.cbegin();
+ ASSERT_OK(i->first);
+ ASSERT_EQUALS(i->second.ns(), NamespaceString(dbname, "a").ns());
+ i++;
+ ASSERT_OK(i->first);
+ ASSERT_EQUALS(i->second.ns(), NamespaceString(dbname, "b").ns());
}
+}
-} // namespace
+} // namespace
diff --git a/src/mongo/db/repl/database_task.cpp b/src/mongo/db/repl/database_task.cpp
index 716155a3716..b19bf201b5d 100644
--- a/src/mongo/db/repl/database_task.cpp
+++ b/src/mongo/db/repl/database_task.cpp
@@ -38,63 +38,66 @@
namespace mongo {
namespace repl {
- // static
- DatabaseTask::Task DatabaseTask::makeGlobalExclusiveLockTask(const Task& task) {
- invariant(task);
- DatabaseTask::Task newTask = [task](OperationContext* txn, const Status& status) {
- if (!status.isOK()) {
- return task(txn, status);
- }
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lock(txn->lockState());
- return task(txn, status);
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "globalExclusiveLockTask", "global");
- MONGO_UNREACHABLE;
- };
- return newTask;
- }
+// static
+DatabaseTask::Task DatabaseTask::makeGlobalExclusiveLockTask(const Task& task) {
+ invariant(task);
+ DatabaseTask::Task newTask = [task](OperationContext* txn, const Status& status) {
+ if (!status.isOK()) {
+ return task(txn, status);
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lock(txn->lockState());
+ return task(txn, status);
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "globalExclusiveLockTask", "global");
+ MONGO_UNREACHABLE;
+ };
+ return newTask;
+}
- // static
- DatabaseTask::Task DatabaseTask::makeDatabaseLockTask(const Task& task,
- const std::string& databaseName,
- LockMode mode) {
- invariant(task);
- DatabaseTask::Task newTask = [=](OperationContext* txn, const Status& status) {
- if (!status.isOK()) {
- return task(txn, status);
- }
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- LockMode permissiveLockMode = isSharedLockMode(mode) ? MODE_IS : MODE_IX;
- ScopedTransaction transaction(txn, permissiveLockMode);
- Lock::DBLock lock(txn->lockState(), databaseName, mode);
- return task(txn, status);
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "databaseLockTask", databaseName);
- MONGO_UNREACHABLE;
- };
- return newTask;
- }
+// static
+DatabaseTask::Task DatabaseTask::makeDatabaseLockTask(const Task& task,
+ const std::string& databaseName,
+ LockMode mode) {
+ invariant(task);
+ DatabaseTask::Task newTask = [=](OperationContext* txn, const Status& status) {
+ if (!status.isOK()) {
+ return task(txn, status);
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ LockMode permissiveLockMode = isSharedLockMode(mode) ? MODE_IS : MODE_IX;
+ ScopedTransaction transaction(txn, permissiveLockMode);
+ Lock::DBLock lock(txn->lockState(), databaseName, mode);
+ return task(txn, status);
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "databaseLockTask", databaseName);
+ MONGO_UNREACHABLE;
+ };
+ return newTask;
+}
- // static
- DatabaseTask::Task DatabaseTask::makeCollectionLockTask(const Task& task,
- const NamespaceString& nss,
- LockMode mode) {
- invariant(task);
- DatabaseTask::Task newTask = [=](OperationContext* txn, const Status& status) {
- if (!status.isOK()) {
- return task(txn, status);
- }
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- LockMode permissiveLockMode = isSharedLockMode(mode) ? MODE_IS : MODE_IX;
- ScopedTransaction transaction(txn, permissiveLockMode);
- Lock::DBLock lock(txn->lockState(), nss.db(), permissiveLockMode);
- Lock::CollectionLock collectionLock(txn->lockState(), nss.toString(), mode);
- return task(txn, status);
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "collectionLockTask", nss.toString());
- MONGO_UNREACHABLE;
- };
- return newTask;
- }
+// static
+DatabaseTask::Task DatabaseTask::makeCollectionLockTask(const Task& task,
+ const NamespaceString& nss,
+ LockMode mode) {
+ invariant(task);
+ DatabaseTask::Task newTask = [=](OperationContext* txn, const Status& status) {
+ if (!status.isOK()) {
+ return task(txn, status);
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ LockMode permissiveLockMode = isSharedLockMode(mode) ? MODE_IS : MODE_IX;
+ ScopedTransaction transaction(txn, permissiveLockMode);
+ Lock::DBLock lock(txn->lockState(), nss.db(), permissiveLockMode);
+ Lock::CollectionLock collectionLock(txn->lockState(), nss.toString(), mode);
+ return task(txn, status);
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "collectionLockTask", nss.toString());
+ MONGO_UNREACHABLE;
+ };
+ return newTask;
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/database_task.h b/src/mongo/db/repl/database_task.h
index 5a4f09dfdae..29f10f2902c 100644
--- a/src/mongo/db/repl/database_task.h
+++ b/src/mongo/db/repl/database_task.h
@@ -36,40 +36,36 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
namespace repl {
- class DatabaseTask{
- private:
- DatabaseTask();
+class DatabaseTask {
+private:
+ DatabaseTask();
- public:
+public:
+ using Task = TaskRunner::Task;
- using Task = TaskRunner::Task;
+ /**
+ * Creates a task wrapper that runs the target task inside a global exclusive lock.
+ */
+ static Task makeGlobalExclusiveLockTask(const Task& task);
- /**
- * Creates a task wrapper that runs the target task inside a global exclusive lock.
- */
- static Task makeGlobalExclusiveLockTask(const Task& task);
+ /**
+ * Creates a task wrapper that runs the target task inside a database lock.
+ */
+ static Task makeDatabaseLockTask(const Task& task,
+ const std::string& databaseName,
+ LockMode mode);
- /**
- * Creates a task wrapper that runs the target task inside a database lock.
- */
- static Task makeDatabaseLockTask(const Task& task,
- const std::string& databaseName,
- LockMode mode);
+ /**
+ * Creates a task wrapper that runs the target task inside a collection lock.
+ * Task acquires database lock before attempting to lock collection. Do not
+ * use in combination with makeDatabaseLockTask().
+ */
+ static Task makeCollectionLockTask(const Task& task, const NamespaceString& nss, LockMode mode);
+};
- /**
- * Creates a task wrapper that runs the target task inside a collection lock.
- * Task acquires database lock before attempting to lock collection. Do not
- * use in combination with makeDatabaseLockTask().
- */
- static Task makeCollectionLockTask(const Task& task,
- const NamespaceString& nss,
- LockMode mode);
-
- };
-
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/database_task_test.cpp b/src/mongo/db/repl/database_task_test.cpp
index ef27f8ba93d..25a1b01178c 100644
--- a/src/mongo/db/repl/database_task_test.cpp
+++ b/src/mongo/db/repl/database_task_test.cpp
@@ -37,149 +37,147 @@
namespace {
- using namespace mongo;
- using namespace mongo::repl;
-
- const std::string databaseName = "mydb";
- const std::string collectionName = "mycoll";
- const NamespaceString nss(databaseName, collectionName);
-
- class DatabaseTaskTest : public TaskRunnerTest {
- public:
- OperationContext* createOperationContext() const override;
+using namespace mongo;
+using namespace mongo::repl;
+
+const std::string databaseName = "mydb";
+const std::string collectionName = "mycoll";
+const NamespaceString nss(databaseName, collectionName);
+
+class DatabaseTaskTest : public TaskRunnerTest {
+public:
+ OperationContext* createOperationContext() const override;
+};
+
+OperationContext* DatabaseTaskTest::createOperationContext() const {
+ return new OperationContextReplMock();
+}
+
+TEST_F(DatabaseTaskTest, TaskRunnerErrorStatus) {
+ // Should not attempt to acquire lock on error status from task runner.
+ auto task = [](OperationContext* txn, const Status& status) {
+ ASSERT_FALSE(txn);
+ ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
+ return TaskRunner::NextAction::kInvalid;
};
-
- OperationContext* DatabaseTaskTest::createOperationContext() const {
- return new OperationContextReplMock();
- }
-
- TEST_F(DatabaseTaskTest, TaskRunnerErrorStatus) {
- // Should not attempt to acquire lock on error status from task runner.
- auto task = [](OperationContext* txn, const Status& status) {
- ASSERT_FALSE(txn);
- ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
- return TaskRunner::NextAction::kInvalid;
- };
- auto testLockTask = [](DatabaseTask::Task task) {
- ASSERT_TRUE(TaskRunner::NextAction::kInvalid ==
- task(nullptr, Status(ErrorCodes::BadValue, "")));
- };
- testLockTask(DatabaseTask::makeGlobalExclusiveLockTask(task));
- testLockTask(DatabaseTask::makeDatabaseLockTask(task, databaseName, MODE_X));
- testLockTask(DatabaseTask::makeCollectionLockTask(task, nss, MODE_X));
- }
-
- TEST_F(DatabaseTaskTest, RunGlobalExclusiveLockTask) {
- stdx::mutex mutex;
- bool called = false;
- OperationContext* txn = nullptr;
- bool lockIsW = false;
- Status status = getDetectableErrorStatus();
- // Task returning 'void' implies NextAction::NoAction.
- auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
- called = true;
- txn = theTxn;
- lockIsW = txn->lockState()->isW();
- status = theStatus;
- return TaskRunner::NextAction::kCancel;
- };
- getTaskRunner().schedule(DatabaseTask::makeGlobalExclusiveLockTask(task));
- getThreadPool().join();
- ASSERT_FALSE(getTaskRunner().isActive());
-
+ auto testLockTask = [](DatabaseTask::Task task) {
+ ASSERT_TRUE(TaskRunner::NextAction::kInvalid ==
+ task(nullptr, Status(ErrorCodes::BadValue, "")));
+ };
+ testLockTask(DatabaseTask::makeGlobalExclusiveLockTask(task));
+ testLockTask(DatabaseTask::makeDatabaseLockTask(task, databaseName, MODE_X));
+ testLockTask(DatabaseTask::makeCollectionLockTask(task, nss, MODE_X));
+}
+
+TEST_F(DatabaseTaskTest, RunGlobalExclusiveLockTask) {
+ stdx::mutex mutex;
+ bool called = false;
+ OperationContext* txn = nullptr;
+ bool lockIsW = false;
+ Status status = getDetectableErrorStatus();
+ // Task returning 'void' implies NextAction::NoAction.
+ auto task = [&](OperationContext* theTxn, const Status& theStatus) {
stdx::lock_guard<stdx::mutex> lk(mutex);
- ASSERT_TRUE(called);
- ASSERT(txn);
- ASSERT_TRUE(lockIsW);
- ASSERT_OK(status);
- }
-
- void _testRunDatabaseLockTask(DatabaseTaskTest& test, LockMode mode) {
- stdx::mutex mutex;
- bool called = false;
- OperationContext* txn = nullptr;
- bool isDatabaseLockedForMode = false;
- Status status = test.getDetectableErrorStatus();
- // Task returning 'void' implies NextAction::NoAction.
- auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
- called = true;
- txn = theTxn;
- isDatabaseLockedForMode = txn->lockState()->isDbLockedForMode(databaseName, mode);
- status = theStatus;
- return TaskRunner::NextAction::kCancel;
- };
- test.getTaskRunner().schedule(
- DatabaseTask::makeDatabaseLockTask(task, databaseName, mode));
- test.getThreadPool().join();
- ASSERT_FALSE(test.getTaskRunner().isActive());
-
+ called = true;
+ txn = theTxn;
+ lockIsW = txn->lockState()->isW();
+ status = theStatus;
+ return TaskRunner::NextAction::kCancel;
+ };
+ getTaskRunner().schedule(DatabaseTask::makeGlobalExclusiveLockTask(task));
+ getThreadPool().join();
+ ASSERT_FALSE(getTaskRunner().isActive());
+
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ ASSERT_TRUE(called);
+ ASSERT(txn);
+ ASSERT_TRUE(lockIsW);
+ ASSERT_OK(status);
+}
+
+void _testRunDatabaseLockTask(DatabaseTaskTest& test, LockMode mode) {
+ stdx::mutex mutex;
+ bool called = false;
+ OperationContext* txn = nullptr;
+ bool isDatabaseLockedForMode = false;
+ Status status = test.getDetectableErrorStatus();
+ // Task returning 'void' implies NextAction::NoAction.
+ auto task = [&](OperationContext* theTxn, const Status& theStatus) {
stdx::lock_guard<stdx::mutex> lk(mutex);
- ASSERT_TRUE(called);
- ASSERT(txn);
- ASSERT_TRUE(isDatabaseLockedForMode);
- ASSERT_OK(status);
- }
-
- TEST_F(DatabaseTaskTest, RunDatabaseLockTaskModeX) {
- _testRunDatabaseLockTask(*this, MODE_X);
- }
-
- TEST_F(DatabaseTaskTest, RunDatabaseLockTaskModeS) {
- _testRunDatabaseLockTask(*this, MODE_S);
- }
-
- TEST_F(DatabaseTaskTest, RunDatabaseLockTaskModeIX) {
- _testRunDatabaseLockTask(*this, MODE_IX);
- }
-
- TEST_F(DatabaseTaskTest, RunDatabaseLockTaskModeIS) {
- _testRunDatabaseLockTask(*this, MODE_IS);
- }
-
- void _testRunCollectionLockTask(DatabaseTaskTest& test, LockMode mode) {
- stdx::mutex mutex;
- bool called = false;
- OperationContext* txn = nullptr;
- bool isCollectionLockedForMode = false;
- Status status = test.getDetectableErrorStatus();
- // Task returning 'void' implies NextAction::NoAction.
- auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
- called = true;
- txn = theTxn;
- isCollectionLockedForMode =
- txn->lockState()->isCollectionLockedForMode(nss.toString(), mode);
- status = theStatus;
- return TaskRunner::NextAction::kCancel;
- };
- test.getTaskRunner().schedule(
- DatabaseTask::makeCollectionLockTask(task, nss, mode));
- test.getThreadPool().join();
- ASSERT_FALSE(test.getTaskRunner().isActive());
-
+ called = true;
+ txn = theTxn;
+ isDatabaseLockedForMode = txn->lockState()->isDbLockedForMode(databaseName, mode);
+ status = theStatus;
+ return TaskRunner::NextAction::kCancel;
+ };
+ test.getTaskRunner().schedule(DatabaseTask::makeDatabaseLockTask(task, databaseName, mode));
+ test.getThreadPool().join();
+ ASSERT_FALSE(test.getTaskRunner().isActive());
+
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ ASSERT_TRUE(called);
+ ASSERT(txn);
+ ASSERT_TRUE(isDatabaseLockedForMode);
+ ASSERT_OK(status);
+}
+
+TEST_F(DatabaseTaskTest, RunDatabaseLockTaskModeX) {
+ _testRunDatabaseLockTask(*this, MODE_X);
+}
+
+TEST_F(DatabaseTaskTest, RunDatabaseLockTaskModeS) {
+ _testRunDatabaseLockTask(*this, MODE_S);
+}
+
+TEST_F(DatabaseTaskTest, RunDatabaseLockTaskModeIX) {
+ _testRunDatabaseLockTask(*this, MODE_IX);
+}
+
+TEST_F(DatabaseTaskTest, RunDatabaseLockTaskModeIS) {
+ _testRunDatabaseLockTask(*this, MODE_IS);
+}
+
+void _testRunCollectionLockTask(DatabaseTaskTest& test, LockMode mode) {
+ stdx::mutex mutex;
+ bool called = false;
+ OperationContext* txn = nullptr;
+ bool isCollectionLockedForMode = false;
+ Status status = test.getDetectableErrorStatus();
+ // Task returning 'void' implies NextAction::NoAction.
+ auto task = [&](OperationContext* theTxn, const Status& theStatus) {
stdx::lock_guard<stdx::mutex> lk(mutex);
- ASSERT_TRUE(called);
- ASSERT(txn);
- ASSERT_TRUE(isCollectionLockedForMode);
- ASSERT_OK(status);
- }
-
- TEST_F(DatabaseTaskTest, RunCollectionLockTaskModeX) {
- _testRunCollectionLockTask(*this, MODE_X);
- }
-
- TEST_F(DatabaseTaskTest, RunCollectionLockTaskModeS) {
- _testRunCollectionLockTask(*this, MODE_S);
- }
-
- TEST_F(DatabaseTaskTest, RunCollectionLockTaskModeIX) {
- _testRunCollectionLockTask(*this, MODE_IX);
- }
-
- TEST_F(DatabaseTaskTest, RunCollectionLockTaskModeIS) {
- _testRunCollectionLockTask(*this, MODE_IS);
- }
-
-} // namespace
+ called = true;
+ txn = theTxn;
+ isCollectionLockedForMode =
+ txn->lockState()->isCollectionLockedForMode(nss.toString(), mode);
+ status = theStatus;
+ return TaskRunner::NextAction::kCancel;
+ };
+ test.getTaskRunner().schedule(DatabaseTask::makeCollectionLockTask(task, nss, mode));
+ test.getThreadPool().join();
+ ASSERT_FALSE(test.getTaskRunner().isActive());
+
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ ASSERT_TRUE(called);
+ ASSERT(txn);
+ ASSERT_TRUE(isCollectionLockedForMode);
+ ASSERT_OK(status);
+}
+
+TEST_F(DatabaseTaskTest, RunCollectionLockTaskModeX) {
+ _testRunCollectionLockTask(*this, MODE_X);
+}
+
+TEST_F(DatabaseTaskTest, RunCollectionLockTaskModeS) {
+ _testRunCollectionLockTask(*this, MODE_S);
+}
+
+TEST_F(DatabaseTaskTest, RunCollectionLockTaskModeIX) {
+ _testRunCollectionLockTask(*this, MODE_IX);
+}
+
+TEST_F(DatabaseTaskTest, RunCollectionLockTaskModeIS) {
+ _testRunCollectionLockTask(*this, MODE_IS);
+}
+
+} // namespace
diff --git a/src/mongo/db/repl/elect_cmd_runner.cpp b/src/mongo/db/repl/elect_cmd_runner.cpp
index c80badf684f..c0d958c428e 100644
--- a/src/mongo/db/repl/elect_cmd_runner.cpp
+++ b/src/mongo/db/repl/elect_cmd_runner.cpp
@@ -42,119 +42,108 @@
namespace mongo {
namespace repl {
- ElectCmdRunner::Algorithm::Algorithm(
- const ReplicaSetConfig& rsConfig,
- int selfIndex,
- const std::vector<HostAndPort>& targets,
- OID round)
- : _actualResponses(0),
- _sufficientResponsesReceived(false),
- _rsConfig(rsConfig),
- _selfIndex(selfIndex),
- _targets(targets),
- _round(round) {
-
- // Vote for ourselves, first.
- _receivedVotes = _rsConfig.getMemberAt(_selfIndex).getNumVotes();
+ElectCmdRunner::Algorithm::Algorithm(const ReplicaSetConfig& rsConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& targets,
+ OID round)
+ : _actualResponses(0),
+ _sufficientResponsesReceived(false),
+ _rsConfig(rsConfig),
+ _selfIndex(selfIndex),
+ _targets(targets),
+ _round(round) {
+ // Vote for ourselves, first.
+ _receivedVotes = _rsConfig.getMemberAt(_selfIndex).getNumVotes();
+}
+
+ElectCmdRunner::Algorithm::~Algorithm() {}
+
+std::vector<RemoteCommandRequest> ElectCmdRunner::Algorithm::getRequests() const {
+ const MemberConfig& selfConfig = _rsConfig.getMemberAt(_selfIndex);
+ std::vector<RemoteCommandRequest> requests;
+ BSONObjBuilder electCmdBuilder;
+ electCmdBuilder.append("replSetElect", 1);
+ electCmdBuilder.append("set", _rsConfig.getReplSetName());
+ electCmdBuilder.append("who", selfConfig.getHostAndPort().toString());
+ electCmdBuilder.append("whoid", selfConfig.getId());
+ electCmdBuilder.appendIntOrLL("cfgver", _rsConfig.getConfigVersion());
+ electCmdBuilder.append("round", _round);
+ const BSONObj replSetElectCmd = electCmdBuilder.obj();
+
+ // Schedule a RemoteCommandRequest for each non-DOWN node
+ for (std::vector<HostAndPort>::const_iterator it = _targets.begin(); it != _targets.end();
+ ++it) {
+ invariant(*it != selfConfig.getHostAndPort());
+ requests.push_back(RemoteCommandRequest(
+ *it,
+ "admin",
+ replSetElectCmd,
+ Milliseconds(30 * 1000))); // trying to match current Socket timeout
}
- ElectCmdRunner::Algorithm::~Algorithm() {}
-
- std::vector<RemoteCommandRequest>
- ElectCmdRunner::Algorithm::getRequests() const {
-
- const MemberConfig& selfConfig = _rsConfig.getMemberAt(_selfIndex);
- std::vector<RemoteCommandRequest> requests;
- BSONObjBuilder electCmdBuilder;
- electCmdBuilder.append("replSetElect", 1);
- electCmdBuilder.append("set", _rsConfig.getReplSetName());
- electCmdBuilder.append("who", selfConfig.getHostAndPort().toString());
- electCmdBuilder.append("whoid", selfConfig.getId());
- electCmdBuilder.appendIntOrLL("cfgver", _rsConfig.getConfigVersion());
- electCmdBuilder.append("round", _round);
- const BSONObj replSetElectCmd = electCmdBuilder.obj();
-
- // Schedule a RemoteCommandRequest for each non-DOWN node
- for (std::vector<HostAndPort>::const_iterator it = _targets.begin();
- it != _targets.end();
- ++it) {
-
- invariant(*it != selfConfig.getHostAndPort());
- requests.push_back(RemoteCommandRequest(
- *it,
- "admin",
- replSetElectCmd,
- Milliseconds(30*1000))); // trying to match current Socket timeout
- }
-
- return requests;
- }
+ return requests;
+}
- bool ElectCmdRunner::Algorithm::hasReceivedSufficientResponses() const {
- if (_sufficientResponsesReceived) {
- return true;
- }
- if (_receivedVotes >= _rsConfig.getMajorityVoteCount()) {
- return true;
- }
- if (_receivedVotes < 0) {
- return true;
- }
- if (_actualResponses == _targets.size()) {
- return true;
- }
- return false;
+bool ElectCmdRunner::Algorithm::hasReceivedSufficientResponses() const {
+ if (_sufficientResponsesReceived) {
+ return true;
}
-
- void ElectCmdRunner::Algorithm::processResponse(
- const RemoteCommandRequest& request,
- const ResponseStatus& response) {
-
- ++_actualResponses;
-
- if (response.isOK()) {
- BSONObj res = response.getValue().data;
- log() << "received " << res["vote"] << " votes from " << request.target;
- LOG(1) << "full elect res: " << res.toString();
- BSONElement vote(res["vote"]);
- if (vote.type() != mongo::NumberInt) {
- error() << "wrong type for vote argument in replSetElect command: " <<
- typeName(vote.type());
- _sufficientResponsesReceived = true;
- return;
- }
-
- _receivedVotes += vote._numberInt();
- }
- else {
- warning() << "elect command to " << request.target << " failed: " <<
- response.getStatus();
- }
+ if (_receivedVotes >= _rsConfig.getMajorityVoteCount()) {
+ return true;
}
-
- ElectCmdRunner::ElectCmdRunner() : _isCanceled(false) {}
- ElectCmdRunner::~ElectCmdRunner() {}
-
- StatusWith<ReplicationExecutor::EventHandle> ElectCmdRunner::start(
- ReplicationExecutor* executor,
- const ReplicaSetConfig& currentConfig,
- int selfIndex,
- const std::vector<HostAndPort>& targets,
- const stdx::function<void ()>& onCompletion) {
-
- _algorithm.reset(new Algorithm(currentConfig, selfIndex, targets, OID::gen()));
- _runner.reset(new ScatterGatherRunner(_algorithm.get()));
- return _runner->start(executor, onCompletion);
+ if (_receivedVotes < 0) {
+ return true;
}
-
- void ElectCmdRunner::cancel(ReplicationExecutor* executor) {
- _isCanceled = true;
- _runner->cancel(executor);
+ if (_actualResponses == _targets.size()) {
+ return true;
}
+ return false;
+}
+
+void ElectCmdRunner::Algorithm::processResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response) {
+ ++_actualResponses;
+
+ if (response.isOK()) {
+ BSONObj res = response.getValue().data;
+ log() << "received " << res["vote"] << " votes from " << request.target;
+ LOG(1) << "full elect res: " << res.toString();
+ BSONElement vote(res["vote"]);
+ if (vote.type() != mongo::NumberInt) {
+ error() << "wrong type for vote argument in replSetElect command: "
+ << typeName(vote.type());
+ _sufficientResponsesReceived = true;
+ return;
+ }
- int ElectCmdRunner::getReceivedVotes() const {
- return _algorithm->getReceivedVotes();
+ _receivedVotes += vote._numberInt();
+ } else {
+ warning() << "elect command to " << request.target << " failed: " << response.getStatus();
}
-
-} // namespace repl
-} // namespace mongo
+}
+
+ElectCmdRunner::ElectCmdRunner() : _isCanceled(false) {}
+ElectCmdRunner::~ElectCmdRunner() {}
+
+StatusWith<ReplicationExecutor::EventHandle> ElectCmdRunner::start(
+ ReplicationExecutor* executor,
+ const ReplicaSetConfig& currentConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& targets,
+ const stdx::function<void()>& onCompletion) {
+ _algorithm.reset(new Algorithm(currentConfig, selfIndex, targets, OID::gen()));
+ _runner.reset(new ScatterGatherRunner(_algorithm.get()));
+ return _runner->start(executor, onCompletion);
+}
+
+void ElectCmdRunner::cancel(ReplicationExecutor* executor) {
+ _isCanceled = true;
+ _runner->cancel(executor);
+}
+
+int ElectCmdRunner::getReceivedVotes() const {
+ return _algorithm->getReceivedVotes();
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/elect_cmd_runner.h b/src/mongo/db/repl/elect_cmd_runner.h
index 2a9a7bab435..21295991f26 100644
--- a/src/mongo/db/repl/elect_cmd_runner.h
+++ b/src/mongo/db/repl/elect_cmd_runner.h
@@ -38,88 +38,91 @@
namespace mongo {
- class Status;
+class Status;
namespace repl {
- class ReplicaSetConfig;
- class ScatterGatherRunner;
+class ReplicaSetConfig;
+class ScatterGatherRunner;
- class ElectCmdRunner {
- MONGO_DISALLOW_COPYING(ElectCmdRunner);
+class ElectCmdRunner {
+ MONGO_DISALLOW_COPYING(ElectCmdRunner);
+
+public:
+ class Algorithm : public ScatterGatherAlgorithm {
public:
- class Algorithm : public ScatterGatherAlgorithm {
- public:
- Algorithm(const ReplicaSetConfig& rsConfig,
- int selfIndex,
- const std::vector<HostAndPort>& targets,
- OID round);
-
- virtual ~Algorithm();
- virtual std::vector<RemoteCommandRequest> getRequests() const;
- virtual void processResponse(
- const RemoteCommandRequest& request,
- const ResponseStatus& response);
- virtual bool hasReceivedSufficientResponses() const;
-
- int getReceivedVotes() const { return _receivedVotes; }
-
- private:
- // Tally of the number of received votes for this election.
- int _receivedVotes;
-
- // Number of responses received so far.
- size_t _actualResponses;
-
- bool _sufficientResponsesReceived;
-
- const ReplicaSetConfig _rsConfig;
- const int _selfIndex;
- const std::vector<HostAndPort> _targets;
- const OID _round;
- };
-
- ElectCmdRunner();
- ~ElectCmdRunner();
-
- /**
- * Begins the process of sending replSetElect commands to all non-DOWN nodes
- * in currentConfig.
- *
- * Returned handle can be used to schedule a callback when the process is complete.
- */
- StatusWith<ReplicationExecutor::EventHandle> start(
- ReplicationExecutor* executor,
- const ReplicaSetConfig& currentConfig,
- int selfIndex,
- const std::vector<HostAndPort>& targets,
- const stdx::function<void ()>& onCompletion = stdx::function<void ()>());
-
- /**
- * Informs the ElectCmdRunner to cancel further processing. The "executor"
- * argument must point to the same executor passed to "start()".
- *
- * Like start(), this method must run in the executor context.
- */
- void cancel(ReplicationExecutor* executor);
-
- /**
- * Returns the number of received votes. Only valid to call after
- * the event handle returned from start() has been signaled, which guarantees that
- * the vote count will no longer be touched by callbacks.
- */
- int getReceivedVotes() const;
-
- /**
- * Returns true if cancel() was called on this instance.
- */
- bool isCanceled() const { return _isCanceled; }
+ Algorithm(const ReplicaSetConfig& rsConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& targets,
+ OID round);
+
+ virtual ~Algorithm();
+ virtual std::vector<RemoteCommandRequest> getRequests() const;
+ virtual void processResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response);
+ virtual bool hasReceivedSufficientResponses() const;
+
+ int getReceivedVotes() const {
+ return _receivedVotes;
+ }
private:
- std::unique_ptr<Algorithm> _algorithm;
- std::unique_ptr<ScatterGatherRunner> _runner;
- bool _isCanceled;
+ // Tally of the number of received votes for this election.
+ int _receivedVotes;
+
+ // Number of responses received so far.
+ size_t _actualResponses;
+
+ bool _sufficientResponsesReceived;
+
+ const ReplicaSetConfig _rsConfig;
+ const int _selfIndex;
+ const std::vector<HostAndPort> _targets;
+ const OID _round;
};
+ ElectCmdRunner();
+ ~ElectCmdRunner();
+
+ /**
+ * Begins the process of sending replSetElect commands to all non-DOWN nodes
+ * in currentConfig.
+ *
+ * Returned handle can be used to schedule a callback when the process is complete.
+ */
+ StatusWith<ReplicationExecutor::EventHandle> start(
+ ReplicationExecutor* executor,
+ const ReplicaSetConfig& currentConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& targets,
+ const stdx::function<void()>& onCompletion = stdx::function<void()>());
+
+ /**
+ * Informs the ElectCmdRunner to cancel further processing. The "executor"
+ * argument must point to the same executor passed to "start()".
+ *
+ * Like start(), this method must run in the executor context.
+ */
+ void cancel(ReplicationExecutor* executor);
+
+ /**
+ * Returns the number of received votes. Only valid to call after
+ * the event handle returned from start() has been signaled, which guarantees that
+ * the vote count will no longer be touched by callbacks.
+ */
+ int getReceivedVotes() const;
+
+ /**
+ * Returns true if cancel() was called on this instance.
+ */
+ bool isCanceled() const {
+ return _isCanceled;
+ }
+
+private:
+ std::unique_ptr<Algorithm> _algorithm;
+ std::unique_ptr<ScatterGatherRunner> _runner;
+ bool _isCanceled;
+};
}
}
diff --git a/src/mongo/db/repl/elect_cmd_runner_test.cpp b/src/mongo/db/repl/elect_cmd_runner_test.cpp
index c029994f30d..91f2bbaea5a 100644
--- a/src/mongo/db/repl/elect_cmd_runner_test.cpp
+++ b/src/mongo/db/repl/elect_cmd_runner_test.cpp
@@ -47,379 +47,369 @@ namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
-
- class ElectCmdRunnerTest : public mongo::unittest::Test {
- public:
- void startTest(ElectCmdRunner* electCmdRunner,
- const ReplicaSetConfig& currentConfig,
- int selfIndex,
- const std::vector<HostAndPort>& hosts);
-
- void waitForTest();
-
- void electCmdRunnerRunner(const ReplicationExecutor::CallbackArgs& data,
- ElectCmdRunner* electCmdRunner,
- StatusWith<ReplicationExecutor::EventHandle>* evh,
- const ReplicaSetConfig& currentConfig,
- int selfIndex,
- const std::vector<HostAndPort>& hosts);
-
- NetworkInterfaceMock* _net;
- StorageInterfaceMock* _storage;
- std::unique_ptr<ReplicationExecutor> _executor;
- std::unique_ptr<stdx::thread> _executorThread;
-
- private:
- void setUp();
- void tearDown();
-
- ReplicationExecutor::EventHandle _allDoneEvent;
- };
-
- void ElectCmdRunnerTest::setUp() {
- _net = new NetworkInterfaceMock;
- _storage = new StorageInterfaceMock;
- _executor.reset(new ReplicationExecutor(_net, _storage, 1 /* prng seed */));
- _executorThread.reset(new stdx::thread(stdx::bind(&ReplicationExecutor::run,
- _executor.get())));
- }
-
- void ElectCmdRunnerTest::tearDown() {
- _executor->shutdown();
- _executorThread->join();
+using executor::NetworkInterfaceMock;
+
+class ElectCmdRunnerTest : public mongo::unittest::Test {
+public:
+ void startTest(ElectCmdRunner* electCmdRunner,
+ const ReplicaSetConfig& currentConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& hosts);
+
+ void waitForTest();
+
+ void electCmdRunnerRunner(const ReplicationExecutor::CallbackArgs& data,
+ ElectCmdRunner* electCmdRunner,
+ StatusWith<ReplicationExecutor::EventHandle>* evh,
+ const ReplicaSetConfig& currentConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& hosts);
+
+ NetworkInterfaceMock* _net;
+ StorageInterfaceMock* _storage;
+ std::unique_ptr<ReplicationExecutor> _executor;
+ std::unique_ptr<stdx::thread> _executorThread;
+
+private:
+ void setUp();
+ void tearDown();
+
+ ReplicationExecutor::EventHandle _allDoneEvent;
+};
+
+void ElectCmdRunnerTest::setUp() {
+ _net = new NetworkInterfaceMock;
+ _storage = new StorageInterfaceMock;
+ _executor.reset(new ReplicationExecutor(_net, _storage, 1 /* prng seed */));
+ _executorThread.reset(new stdx::thread(stdx::bind(&ReplicationExecutor::run, _executor.get())));
+}
+
+void ElectCmdRunnerTest::tearDown() {
+ _executor->shutdown();
+ _executorThread->join();
+}
+
+ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBson) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(configBson));
+ ASSERT_OK(config.validate());
+ return config;
+}
+
+const BSONObj makeElectRequest(const ReplicaSetConfig& rsConfig, int selfIndex) {
+ const MemberConfig& myConfig = rsConfig.getMemberAt(selfIndex);
+ return BSON("replSetElect" << 1 << "set" << rsConfig.getReplSetName() << "who"
+ << myConfig.getHostAndPort().toString() << "whoid"
+ << myConfig.getId() << "cfgver" << rsConfig.getConfigVersion()
+ << "round" << 380865962699346850ll);
+}
+
+BSONObj stripRound(const BSONObj& orig) {
+ BSONObjBuilder builder;
+ for (BSONObjIterator iter(orig); iter.more(); iter.next()) {
+ BSONElement e = *iter;
+ if (e.fieldNameStringData() == "round") {
+ continue;
+ }
+ builder.append(e);
}
+ return builder.obj();
+}
+
+// This is necessary because the run method must be scheduled in the Replication Executor
+// for correct concurrency operation.
+void ElectCmdRunnerTest::electCmdRunnerRunner(const ReplicationExecutor::CallbackArgs& data,
+ ElectCmdRunner* electCmdRunner,
+ StatusWith<ReplicationExecutor::EventHandle>* evh,
+ const ReplicaSetConfig& currentConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& hosts) {
+ invariant(data.status.isOK());
+ ReplicationExecutor* executor = dynamic_cast<ReplicationExecutor*>(data.executor);
+ ASSERT(executor);
+ *evh = electCmdRunner->start(executor, currentConfig, selfIndex, hosts);
+}
+
+void ElectCmdRunnerTest::startTest(ElectCmdRunner* electCmdRunner,
+ const ReplicaSetConfig& currentConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& hosts) {
+ StatusWith<ReplicationExecutor::EventHandle> evh(ErrorCodes::InternalError, "Not set");
+ StatusWith<ReplicationExecutor::CallbackHandle> cbh =
+ _executor->scheduleWork(stdx::bind(&ElectCmdRunnerTest::electCmdRunnerRunner,
+ this,
+ stdx::placeholders::_1,
+ electCmdRunner,
+ &evh,
+ currentConfig,
+ selfIndex,
+ hosts));
+ ASSERT_OK(cbh.getStatus());
+ _executor->wait(cbh.getValue());
+ ASSERT_OK(evh.getStatus());
+ _allDoneEvent = evh.getValue();
+}
+
+void ElectCmdRunnerTest::waitForTest() {
+ _executor->waitForEvent(_allDoneEvent);
+}
+
+TEST_F(ElectCmdRunnerTest, OneNode) {
+ // Only one node in the config.
+ const ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1"))));
+
+ std::vector<HostAndPort> hosts;
+ ElectCmdRunner electCmdRunner;
+ startTest(&electCmdRunner, config, 0, hosts);
+ waitForTest();
+ ASSERT_EQUALS(electCmdRunner.getReceivedVotes(), 1);
+}
+
+TEST_F(ElectCmdRunnerTest, TwoNodes) {
+ // Two nodes, we are node h1.
+ const ReplicaSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1"))));
+
+ std::vector<HostAndPort> hosts;
+ hosts.push_back(config.getMemberAt(1).getHostAndPort());
+
+ const BSONObj electRequest = makeElectRequest(config, 0);
+
+ ElectCmdRunner electCmdRunner;
+ startTest(&electCmdRunner, config, 0, hosts);
+ const Date_t startDate = _net->now();
+ _net->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(stripRound(electRequest), stripRound(noi->getRequest().cmdObj));
+ ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(
+ BSON("ok" << 1 << "vote" << 1 << "round" << 380865962699346850ll), Milliseconds(8))));
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ waitForTest();
+ ASSERT_EQUALS(electCmdRunner.getReceivedVotes(), 2);
+}
+
+TEST_F(ElectCmdRunnerTest, ShuttingDown) {
+ // Two nodes, we are node h1. Shutdown happens while we're scheduling remote commands.
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1"))));
+
+ std::vector<HostAndPort> hosts;
+ hosts.push_back(config.getMemberAt(1).getHostAndPort());
+
+ ElectCmdRunner electCmdRunner;
+ StatusWith<ReplicationExecutor::EventHandle> evh(ErrorCodes::InternalError, "Not set");
+ StatusWith<ReplicationExecutor::CallbackHandle> cbh =
+ _executor->scheduleWork(stdx::bind(&ElectCmdRunnerTest::electCmdRunnerRunner,
+ this,
+ stdx::placeholders::_1,
+ &electCmdRunner,
+ &evh,
+ config,
+ 0,
+ hosts));
+ ASSERT_OK(cbh.getStatus());
+ _executor->wait(cbh.getValue());
+ ASSERT_OK(evh.getStatus());
+ _executor->shutdown();
+ _executor->waitForEvent(evh.getValue());
+ ASSERT_EQUALS(electCmdRunner.getReceivedVotes(), 1);
+}
+
+class ElectScatterGatherTest : public mongo::unittest::Test {
+public:
+ virtual void start(const BSONObj& configObj) {
+ int selfConfigIndex = 0;
- ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBson) {
ReplicaSetConfig config;
- ASSERT_OK(config.initialize(configBson));
- ASSERT_OK(config.validate());
- return config;
- }
+ config.initialize(configObj);
- const BSONObj makeElectRequest(const ReplicaSetConfig& rsConfig,
- int selfIndex) {
- const MemberConfig& myConfig = rsConfig.getMemberAt(selfIndex);
- return BSON("replSetElect" << 1 <<
- "set" << rsConfig.getReplSetName() <<
- "who" << myConfig.getHostAndPort().toString() <<
- "whoid" << myConfig.getId() <<
- "cfgver" << rsConfig.getConfigVersion() <<
- "round" << 380865962699346850ll);
- }
-
- BSONObj stripRound(const BSONObj& orig) {
- BSONObjBuilder builder;
- for (BSONObjIterator iter(orig); iter.more(); iter.next()) {
- BSONElement e = *iter;
- if (e.fieldNameStringData() == "round") {
- continue;
- }
- builder.append(e);
+ std::vector<HostAndPort> hosts;
+ for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin();
+ mem != config.membersEnd();
+ ++mem) {
+ hosts.push_back(mem->getHostAndPort());
}
- return builder.obj();
- }
- // This is necessary because the run method must be scheduled in the Replication Executor
- // for correct concurrency operation.
- void ElectCmdRunnerTest::electCmdRunnerRunner(
- const ReplicationExecutor::CallbackArgs& data,
- ElectCmdRunner* electCmdRunner,
- StatusWith<ReplicationExecutor::EventHandle>* evh,
- const ReplicaSetConfig& currentConfig,
- int selfIndex,
- const std::vector<HostAndPort>& hosts) {
-
- invariant(data.status.isOK());
- ReplicationExecutor* executor = dynamic_cast<ReplicationExecutor*>(data.executor);
- ASSERT(executor);
- *evh = electCmdRunner->start(
- executor,
- currentConfig,
- selfIndex,
- hosts);
+ _checker.reset(new ElectCmdRunner::Algorithm(config, selfConfigIndex, hosts, OID()));
}
- void ElectCmdRunnerTest::startTest(ElectCmdRunner* electCmdRunner,
- const ReplicaSetConfig& currentConfig,
- int selfIndex,
- const std::vector<HostAndPort>& hosts) {
-
- StatusWith<ReplicationExecutor::EventHandle> evh(ErrorCodes::InternalError, "Not set");
- StatusWith<ReplicationExecutor::CallbackHandle> cbh =
- _executor->scheduleWork(
- stdx::bind(&ElectCmdRunnerTest::electCmdRunnerRunner,
- this,
- stdx::placeholders::_1,
- electCmdRunner,
- &evh,
- currentConfig,
- selfIndex,
- hosts));
- ASSERT_OK(cbh.getStatus());
- _executor->wait(cbh.getValue());
- ASSERT_OK(evh.getStatus());
- _allDoneEvent = evh.getValue();
+ virtual void tearDown() {
+ _checker.reset(NULL);
}
- void ElectCmdRunnerTest::waitForTest() {
- _executor->waitForEvent(_allDoneEvent);
+protected:
+ bool hasReceivedSufficientResponses() {
+ return _checker->hasReceivedSufficientResponses();
}
- TEST_F(ElectCmdRunnerTest, OneNode) {
- // Only one node in the config.
- const ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1"))));
-
- std::vector<HostAndPort> hosts;
- ElectCmdRunner electCmdRunner;
- startTest(&electCmdRunner, config, 0, hosts);
- waitForTest();
- ASSERT_EQUALS(electCmdRunner.getReceivedVotes(), 1);
+ int getReceivedVotes() {
+ return _checker->getReceivedVotes();
}
- TEST_F(ElectCmdRunnerTest, TwoNodes) {
- // Two nodes, we are node h1.
- const ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1"))));
-
- std::vector<HostAndPort> hosts;
- hosts.push_back(config.getMemberAt(1).getHostAndPort());
-
- const BSONObj electRequest = makeElectRequest(config, 0);
-
- ElectCmdRunner electCmdRunner;
- startTest(&electCmdRunner, config, 0, hosts);
- const Date_t startDate = _net->now();
- _net->enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(stripRound(electRequest), stripRound(noi->getRequest().cmdObj));
- ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1 <<
- "vote" << 1 <<
- "round" << 380865962699346850ll),
- Milliseconds(8))));
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- waitForTest();
- ASSERT_EQUALS(electCmdRunner.getReceivedVotes(), 2);
+ void processResponse(const RemoteCommandRequest& request, const ResponseStatus& response) {
+ _checker->processResponse(request, response);
}
- TEST_F(ElectCmdRunnerTest, ShuttingDown) {
- // Two nodes, we are node h1. Shutdown happens while we're scheduling remote commands.
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1"))));
-
- std::vector<HostAndPort> hosts;
- hosts.push_back(config.getMemberAt(1).getHostAndPort());
-
- ElectCmdRunner electCmdRunner;
- StatusWith<ReplicationExecutor::EventHandle> evh(ErrorCodes::InternalError, "Not set");
- StatusWith<ReplicationExecutor::CallbackHandle> cbh =
- _executor->scheduleWork(
- stdx::bind(&ElectCmdRunnerTest::electCmdRunnerRunner,
- this,
- stdx::placeholders::_1,
- &electCmdRunner,
- &evh,
- config,
- 0,
- hosts));
- ASSERT_OK(cbh.getStatus());
- _executor->wait(cbh.getValue());
- ASSERT_OK(evh.getStatus());
- _executor->shutdown();
- _executor->waitForEvent(evh.getValue());
- ASSERT_EQUALS(electCmdRunner.getReceivedVotes(), 1);
+ RemoteCommandRequest requestFrom(std::string hostname) {
+ return RemoteCommandRequest(HostAndPort(hostname),
+ "", // the non-hostname fields do not matter for Elect
+ BSONObj(),
+ Milliseconds(0));
}
- class ElectScatterGatherTest : public mongo::unittest::Test {
- public:
- virtual void start(const BSONObj& configObj) {
- int selfConfigIndex = 0;
-
- ReplicaSetConfig config;
- config.initialize(configObj);
-
- std::vector<HostAndPort> hosts;
- for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin();
- mem != config.membersEnd();
- ++mem) {
- hosts.push_back(mem->getHostAndPort());
- }
-
- _checker.reset(new ElectCmdRunner::Algorithm(config,
- selfConfigIndex,
- hosts,
- OID()));
- }
-
- virtual void tearDown() {
- _checker.reset(NULL);
- }
-
- protected:
- bool hasReceivedSufficientResponses() {
- return _checker->hasReceivedSufficientResponses();
- }
-
- int getReceivedVotes() {
- return _checker->getReceivedVotes();
- }
-
- void processResponse(const RemoteCommandRequest& request, const ResponseStatus& response) {
- _checker->processResponse(request, response);
- }
-
- RemoteCommandRequest requestFrom(std::string hostname) {
- return RemoteCommandRequest(HostAndPort(hostname),
- "", // the non-hostname fields do not matter for Elect
- BSONObj(),
- Milliseconds(0));
- }
-
- ResponseStatus badResponseStatus() {
- return ResponseStatus(ErrorCodes::NodeNotFound, "not on my watch");
- }
-
- ResponseStatus wrongTypeForVoteField() {
- return ResponseStatus(NetworkInterfaceMock::Response(BSON("vote" << std::string("yea")),
- Milliseconds(10)));
- }
-
- ResponseStatus voteYea() {
- return ResponseStatus(NetworkInterfaceMock::Response(BSON("vote" << 1),
- Milliseconds(10)));
- }
-
- ResponseStatus voteNay() {
- return ResponseStatus(NetworkInterfaceMock::Response(BSON("vote" << -10000),
- Milliseconds(10)));
- }
-
- ResponseStatus abstainFromVoting() {
- return ResponseStatus(NetworkInterfaceMock::Response(BSON("vote" << 0),
- Milliseconds(10)));
- }
-
- BSONObj threeNodesTwoArbitersConfig() {
- return BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host0") <<
- BSON("_id" << 1 << "host" << "host1" << "arbiterOnly" << true) <<
- BSON("_id" << 2 << "host" << "host2" << "arbiterOnly" << true)));
- }
-
- BSONObj basicThreeNodeConfig() {
- return BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host0") <<
- BSON("_id" << 1 << "host" << "host1") <<
- BSON("_id" << 2 << "host" << "host2")));
- }
-
- private:
- unique_ptr<ElectCmdRunner::Algorithm> _checker;
- };
-
- TEST_F(ElectScatterGatherTest, NodeRespondsWithBadVoteType) {
- start(basicThreeNodeConfig());
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host2"), wrongTypeForVoteField());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, getReceivedVotes()); // 1 because we have 1 vote and voted for ourself
+ ResponseStatus badResponseStatus() {
+ return ResponseStatus(ErrorCodes::NodeNotFound, "not on my watch");
}
- TEST_F(ElectScatterGatherTest, NodeRespondsWithBadStatus) {
- start(basicThreeNodeConfig());
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host2"), badResponseStatus());
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host3"), abstainFromVoting());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, getReceivedVotes()); // 1 because we have 1 vote and voted for ourself
+ ResponseStatus wrongTypeForVoteField() {
+ return ResponseStatus(
+ NetworkInterfaceMock::Response(BSON("vote" << std::string("yea")), Milliseconds(10)));
}
- TEST_F(ElectScatterGatherTest, FirstNodeRespondsWithYea) {
- start(basicThreeNodeConfig());
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host2"), voteYea());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(2, getReceivedVotes());
+ ResponseStatus voteYea() {
+ return ResponseStatus(NetworkInterfaceMock::Response(BSON("vote" << 1), Milliseconds(10)));
}
- TEST_F(ElectScatterGatherTest, FirstNodeRespondsWithNaySecondWithYea) {
- start(basicThreeNodeConfig());
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host2"), voteNay());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(-9999, getReceivedVotes());
+ ResponseStatus voteNay() {
+ return ResponseStatus(
+ NetworkInterfaceMock::Response(BSON("vote" << -10000), Milliseconds(10)));
}
- TEST_F(ElectScatterGatherTest, BothNodesAbstainFromVoting) {
- start(basicThreeNodeConfig());
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host2"), abstainFromVoting());
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host3"), abstainFromVoting());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, getReceivedVotes());
+ ResponseStatus abstainFromVoting() {
+ return ResponseStatus(NetworkInterfaceMock::Response(BSON("vote" << 0), Milliseconds(10)));
}
- TEST_F(ElectScatterGatherTest, NodeRespondsWithBadStatusArbiters) {
- start(threeNodesTwoArbitersConfig());
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host2"), badResponseStatus());
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host3"), abstainFromVoting());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, getReceivedVotes()); // 1 because we have 1 vote and voted for ourself
+ BSONObj threeNodesTwoArbitersConfig() {
+ return BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 2 << "host"
+ << "host2"
+ << "arbiterOnly" << true)));
}
- TEST_F(ElectScatterGatherTest, FirstNodeRespondsWithYeaArbiters) {
- start(threeNodesTwoArbitersConfig());
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host2"), voteYea());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(2, getReceivedVotes());
+ BSONObj basicThreeNodeConfig() {
+ return BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1") << BSON("_id" << 2 << "host"
+ << "host2")));
}
- TEST_F(ElectScatterGatherTest, FirstNodeRespondsWithNaySecondWithYeaArbiters) {
- start(threeNodesTwoArbitersConfig());
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host2"), voteNay());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(-9999, getReceivedVotes());
- }
+private:
+ unique_ptr<ElectCmdRunner::Algorithm> _checker;
+};
+
+TEST_F(ElectScatterGatherTest, NodeRespondsWithBadVoteType) {
+ start(basicThreeNodeConfig());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+
+ processResponse(requestFrom("host2"), wrongTypeForVoteField());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, getReceivedVotes()); // 1 because we have 1 vote and voted for ourself
+}
+
+TEST_F(ElectScatterGatherTest, NodeRespondsWithBadStatus) {
+ start(basicThreeNodeConfig());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+
+ processResponse(requestFrom("host2"), badResponseStatus());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+
+ processResponse(requestFrom("host3"), abstainFromVoting());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, getReceivedVotes()); // 1 because we have 1 vote and voted for ourself
+}
+
+TEST_F(ElectScatterGatherTest, FirstNodeRespondsWithYea) {
+ start(basicThreeNodeConfig());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+
+ processResponse(requestFrom("host2"), voteYea());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(2, getReceivedVotes());
+}
+
+TEST_F(ElectScatterGatherTest, FirstNodeRespondsWithNaySecondWithYea) {
+ start(basicThreeNodeConfig());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+
+ processResponse(requestFrom("host2"), voteNay());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(-9999, getReceivedVotes());
+}
+
+TEST_F(ElectScatterGatherTest, BothNodesAbstainFromVoting) {
+ start(basicThreeNodeConfig());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+
+ processResponse(requestFrom("host2"), abstainFromVoting());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+
+ processResponse(requestFrom("host3"), abstainFromVoting());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, getReceivedVotes());
+}
+
+TEST_F(ElectScatterGatherTest, NodeRespondsWithBadStatusArbiters) {
+ start(threeNodesTwoArbitersConfig());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+
+ processResponse(requestFrom("host2"), badResponseStatus());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+
+ processResponse(requestFrom("host3"), abstainFromVoting());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, getReceivedVotes()); // 1 because we have 1 vote and voted for ourself
+}
+
+TEST_F(ElectScatterGatherTest, FirstNodeRespondsWithYeaArbiters) {
+ start(threeNodesTwoArbitersConfig());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+
+ processResponse(requestFrom("host2"), voteYea());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(2, getReceivedVotes());
+}
+
+TEST_F(ElectScatterGatherTest, FirstNodeRespondsWithNaySecondWithYeaArbiters) {
+ start(threeNodesTwoArbitersConfig());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+
+ processResponse(requestFrom("host2"), voteNay());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(-9999, getReceivedVotes());
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/election_winner_declarer.cpp b/src/mongo/db/repl/election_winner_declarer.cpp
index 04175335cbd..55016dc373a 100644
--- a/src/mongo/db/repl/election_winner_declarer.cpp
+++ b/src/mongo/db/repl/election_winner_declarer.cpp
@@ -41,86 +41,80 @@
namespace mongo {
namespace repl {
- ElectionWinnerDeclarer::Algorithm::Algorithm(const std::string& setName,
- long long winnerId,
- long long term,
- const std::vector<HostAndPort>& targets) :
- _setName(setName),
- _winnerId(winnerId),
- _term(term),
- _targets(targets) {}
-
- ElectionWinnerDeclarer::Algorithm::~Algorithm() {}
-
- std::vector<RemoteCommandRequest>
- ElectionWinnerDeclarer::Algorithm::getRequests() const {
- BSONObjBuilder declareElectionWinnerCmdBuilder;
- declareElectionWinnerCmdBuilder.append("replSetDeclareElectionWinner", 1);
- declareElectionWinnerCmdBuilder.append("setName", _setName);
- declareElectionWinnerCmdBuilder.append("winnerId", _winnerId);
- declareElectionWinnerCmdBuilder.append("term", _term);
- const BSONObj declareElectionWinnerCmd = declareElectionWinnerCmdBuilder.obj();
-
- std::vector<RemoteCommandRequest> requests;
- for (const auto& target : _targets) {
- requests.push_back(RemoteCommandRequest(
- target,
- "admin",
- declareElectionWinnerCmd,
- Milliseconds(30*1000))); // trying to match current Socket timeout
- }
-
- return requests;
+ElectionWinnerDeclarer::Algorithm::Algorithm(const std::string& setName,
+ long long winnerId,
+ long long term,
+ const std::vector<HostAndPort>& targets)
+ : _setName(setName), _winnerId(winnerId), _term(term), _targets(targets) {}
+
+ElectionWinnerDeclarer::Algorithm::~Algorithm() {}
+
+std::vector<RemoteCommandRequest> ElectionWinnerDeclarer::Algorithm::getRequests() const {
+ BSONObjBuilder declareElectionWinnerCmdBuilder;
+ declareElectionWinnerCmdBuilder.append("replSetDeclareElectionWinner", 1);
+ declareElectionWinnerCmdBuilder.append("setName", _setName);
+ declareElectionWinnerCmdBuilder.append("winnerId", _winnerId);
+ declareElectionWinnerCmdBuilder.append("term", _term);
+ const BSONObj declareElectionWinnerCmd = declareElectionWinnerCmdBuilder.obj();
+
+ std::vector<RemoteCommandRequest> requests;
+ for (const auto& target : _targets) {
+ requests.push_back(RemoteCommandRequest(
+ target,
+ "admin",
+ declareElectionWinnerCmd,
+ Milliseconds(30 * 1000))); // trying to match current Socket timeout
}
- void ElectionWinnerDeclarer::Algorithm::processResponse(
- const RemoteCommandRequest& request,
- const ResponseStatus& response) {
- _responsesProcessed++;
- if (!response.isOK()) { // failed response
- log() << "ElectionWinnerDeclarer: Got failed response from " << request.target
- << ": " << response.getStatus();
- return;
- }
-
- Status cmdResponseStatus = getStatusFromCommandResult(response.getValue().data);
- if (!cmdResponseStatus.isOK()) { // disagreement response
- _failed = true;
- _status = cmdResponseStatus;
- log() << "ElectionWinnerDeclarer: Got error response from " << request.target
- << " with term: " << response.getValue().data["term"].Number()
- << " and error: " << cmdResponseStatus;
- }
- }
-
- bool ElectionWinnerDeclarer::Algorithm::hasReceivedSufficientResponses() const {
- return _failed || _responsesProcessed == static_cast<int>(_targets.size());
- }
-
- ElectionWinnerDeclarer::ElectionWinnerDeclarer() : _isCanceled(false) {}
- ElectionWinnerDeclarer::~ElectionWinnerDeclarer() {}
+ return requests;
+}
- StatusWith<ReplicationExecutor::EventHandle> ElectionWinnerDeclarer::start(
- ReplicationExecutor* executor,
- const std::string& setName,
- long long winnerId,
- long long term,
- const std::vector<HostAndPort>& targets,
- const stdx::function<void ()>& onCompletion) {
-
- _algorithm.reset(new Algorithm(setName, winnerId, term, targets));
- _runner.reset(new ScatterGatherRunner(_algorithm.get()));
- return _runner->start(executor, onCompletion);
- }
-
- void ElectionWinnerDeclarer::cancel(ReplicationExecutor* executor) {
- _isCanceled = true;
- _runner->cancel(executor);
+void ElectionWinnerDeclarer::Algorithm::processResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response) {
+ _responsesProcessed++;
+ if (!response.isOK()) { // failed response
+ log() << "ElectionWinnerDeclarer: Got failed response from " << request.target << ": "
+ << response.getStatus();
+ return;
}
- Status ElectionWinnerDeclarer::getStatus() const {
- return _algorithm->getStatus();
+ Status cmdResponseStatus = getStatusFromCommandResult(response.getValue().data);
+ if (!cmdResponseStatus.isOK()) { // disagreement response
+ _failed = true;
+ _status = cmdResponseStatus;
+ log() << "ElectionWinnerDeclarer: Got error response from " << request.target
+ << " with term: " << response.getValue().data["term"].Number()
+ << " and error: " << cmdResponseStatus;
}
-
-} // namespace repl
-} // namespace mongo
+}
+
+bool ElectionWinnerDeclarer::Algorithm::hasReceivedSufficientResponses() const {
+ return _failed || _responsesProcessed == static_cast<int>(_targets.size());
+}
+
+ElectionWinnerDeclarer::ElectionWinnerDeclarer() : _isCanceled(false) {}
+ElectionWinnerDeclarer::~ElectionWinnerDeclarer() {}
+
+StatusWith<ReplicationExecutor::EventHandle> ElectionWinnerDeclarer::start(
+ ReplicationExecutor* executor,
+ const std::string& setName,
+ long long winnerId,
+ long long term,
+ const std::vector<HostAndPort>& targets,
+ const stdx::function<void()>& onCompletion) {
+ _algorithm.reset(new Algorithm(setName, winnerId, term, targets));
+ _runner.reset(new ScatterGatherRunner(_algorithm.get()));
+ return _runner->start(executor, onCompletion);
+}
+
+void ElectionWinnerDeclarer::cancel(ReplicationExecutor* executor) {
+ _isCanceled = true;
+ _runner->cancel(executor);
+}
+
+Status ElectionWinnerDeclarer::getStatus() const {
+ return _algorithm->getStatus();
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/election_winner_declarer.h b/src/mongo/db/repl/election_winner_declarer.h
index 9745ed4ce20..8c5fa995880 100644
--- a/src/mongo/db/repl/election_winner_declarer.h
+++ b/src/mongo/db/repl/election_winner_declarer.h
@@ -39,89 +39,90 @@
namespace mongo {
- class Status;
+class Status;
namespace repl {
- class ScatterGatherRunner;
- class ReplSetDeclareElectionWinnerArgs;
+class ScatterGatherRunner;
+class ReplSetDeclareElectionWinnerArgs;
- class ElectionWinnerDeclarer {
- MONGO_DISALLOW_COPYING(ElectionWinnerDeclarer);
- public:
-
- class Algorithm : public ScatterGatherAlgorithm {
- public:
- Algorithm(const std::string& setName,
- long long winnerId,
- long long term,
- const std::vector<HostAndPort>& targets);
- virtual ~Algorithm();
- virtual std::vector<RemoteCommandRequest> getRequests() const;
- virtual void processResponse(
- const RemoteCommandRequest& request,
- const ResponseStatus& response);
- virtual bool hasReceivedSufficientResponses() const;
-
- /**
- * Returns a Status indicating what if anything went wrong while declaring the
- * election winner.
- *
- * It is invalid to call this before hasReceivedSufficeintResponses returns true.
- */
- Status getStatus() const { return _status; }
-
- private:
- const std::string _setName;
- const long long _winnerId;
- const long long _term;
- const std::vector<HostAndPort> _targets;
- bool _failed = false;
- long long _responsesProcessed = 0;
- Status _status = Status::OK();
- };
-
- ElectionWinnerDeclarer();
- virtual ~ElectionWinnerDeclarer();
-
- /**
- * Begins the process of sending replSetDeclareElectionWinner commands to all non-DOWN nodes
- * in currentConfig, with the intention of alerting them of a new primary.
- *
- * evh can be used to schedule a callback when the process is complete.
- * This function must be run in the executor, as it must be synchronous with the command
- * callbacks that it schedules.
- * If this function returns Status::OK(), evh is then guaranteed to be signaled.
- **/
- StatusWith<ReplicationExecutor::EventHandle> start(
- ReplicationExecutor* executor,
- const std::string& setName,
- long long winnerId,
- long long term,
- const std::vector<HostAndPort>& targets,
- const stdx::function<void ()>& onCompletion = stdx::function<void ()>());
+class ElectionWinnerDeclarer {
+ MONGO_DISALLOW_COPYING(ElectionWinnerDeclarer);
- /**
- * Informs the ElectionWinnerDeclarer to cancel further processing. The "executor"
- * argument must point to the same executor passed to "start()".
- *
- * Like start(), this method must run in the executor context.
- */
- void cancel(ReplicationExecutor* executor);
+public:
+ class Algorithm : public ScatterGatherAlgorithm {
+ public:
+ Algorithm(const std::string& setName,
+ long long winnerId,
+ long long term,
+ const std::vector<HostAndPort>& targets);
+ virtual ~Algorithm();
+ virtual std::vector<RemoteCommandRequest> getRequests() const;
+ virtual void processResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response);
+ virtual bool hasReceivedSufficientResponses() const;
/**
- * Returns a Status from the ElectionWinnerDeclarer::algorithm which indicates what
- * if anything went wrong while declaring the election winner.
+ * Returns a Status indicating what if anything went wrong while declaring the
+ * election winner.
*
- * It is invalid to call this before the ElectionWinnerDeclarer::algorithm finishes running.
+ * It is invalid to call this before hasReceivedSufficeintResponses returns true.
*/
- Status getStatus() const;
+ Status getStatus() const {
+ return _status;
+ }
private:
- std::unique_ptr<Algorithm> _algorithm;
- std::unique_ptr<ScatterGatherRunner> _runner;
- bool _isCanceled = false;
+ const std::string _setName;
+ const long long _winnerId;
+ const long long _term;
+ const std::vector<HostAndPort> _targets;
+ bool _failed = false;
+ long long _responsesProcessed = 0;
+ Status _status = Status::OK();
};
+ ElectionWinnerDeclarer();
+ virtual ~ElectionWinnerDeclarer();
+
+ /**
+ * Begins the process of sending replSetDeclareElectionWinner commands to all non-DOWN nodes
+ * in currentConfig, with the intention of alerting them of a new primary.
+ *
+ * evh can be used to schedule a callback when the process is complete.
+ * This function must be run in the executor, as it must be synchronous with the command
+ * callbacks that it schedules.
+ * If this function returns Status::OK(), evh is then guaranteed to be signaled.
+ **/
+ StatusWith<ReplicationExecutor::EventHandle> start(
+ ReplicationExecutor* executor,
+ const std::string& setName,
+ long long winnerId,
+ long long term,
+ const std::vector<HostAndPort>& targets,
+ const stdx::function<void()>& onCompletion = stdx::function<void()>());
+
+ /**
+ * Informs the ElectionWinnerDeclarer to cancel further processing. The "executor"
+ * argument must point to the same executor passed to "start()".
+ *
+ * Like start(), this method must run in the executor context.
+ */
+ void cancel(ReplicationExecutor* executor);
+
+ /**
+ * Returns a Status from the ElectionWinnerDeclarer::algorithm which indicates what
+ * if anything went wrong while declaring the election winner.
+ *
+ * It is invalid to call this before the ElectionWinnerDeclarer::algorithm finishes running.
+ */
+ Status getStatus() const;
+
+private:
+ std::unique_ptr<Algorithm> _algorithm;
+ std::unique_ptr<ScatterGatherRunner> _runner;
+ bool _isCanceled = false;
+};
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/election_winner_declarer_test.cpp b/src/mongo/db/repl/election_winner_declarer_test.cpp
index dfb278f69a1..04177148453 100644
--- a/src/mongo/db/repl/election_winner_declarer_test.cpp
+++ b/src/mongo/db/repl/election_winner_declarer_test.cpp
@@ -44,190 +44,178 @@ namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
- using unittest::assertGet;
+using executor::NetworkInterfaceMock;
+using unittest::assertGet;
- bool stringContains(const std::string &haystack, const std::string& needle) {
- return haystack.find(needle) != std::string::npos;
+bool stringContains(const std::string& haystack, const std::string& needle) {
+ return haystack.find(needle) != std::string::npos;
+}
+
+
+class ElectionWinnerDeclarerTest : public mongo::unittest::Test {
+public:
+ virtual void setUp() {
+ std::string setName = "rs0";
+ long long winnerId = 0;
+ long long term = 1;
+ std::vector<HostAndPort> hosts = {
+ HostAndPort("host0"), HostAndPort("host1"), HostAndPort("host2")};
+
+ _declarer.reset(new ElectionWinnerDeclarer::Algorithm(setName, winnerId, term, hosts));
+ }
+
+ virtual void tearDown() {
+ _declarer.reset(NULL);
+ }
+
+protected:
+ int64_t countLogLinesContaining(const std::string& needle) {
+ return std::count_if(getCapturedLogMessages().begin(),
+ getCapturedLogMessages().end(),
+ stdx::bind(stringContains, stdx::placeholders::_1, needle));
}
+ bool hasReceivedSufficientResponses() {
+ return _declarer->hasReceivedSufficientResponses();
+ }
- class ElectionWinnerDeclarerTest : public mongo::unittest::Test {
- public:
- virtual void setUp() {
- std::string setName = "rs0";
- long long winnerId = 0;
- long long term = 1;
- std::vector<HostAndPort> hosts = {HostAndPort("host0"),
- HostAndPort("host1"),
- HostAndPort("host2")};
-
- _declarer.reset(new ElectionWinnerDeclarer::Algorithm(setName,
- winnerId,
- term,
- hosts));
- }
-
- virtual void tearDown() {
- _declarer.reset(NULL);
- }
-
- protected:
- int64_t countLogLinesContaining(const std::string& needle) {
- return std::count_if(getCapturedLogMessages().begin(),
- getCapturedLogMessages().end(),
- stdx::bind(stringContains,
- stdx::placeholders::_1,
- needle));
- }
-
- bool hasReceivedSufficientResponses() {
- return _declarer->hasReceivedSufficientResponses();
- }
-
- Status getStatus() {
- return _declarer->getStatus();
- }
-
- void processResponse(const RemoteCommandRequest& request, const ResponseStatus& response) {
- _declarer->processResponse(request, response);
- }
-
- RemoteCommandRequest requestFrom(std::string hostname) {
- return RemoteCommandRequest(HostAndPort(hostname),
- "", // fields do not matter in ElectionWinnerDeclarer
- BSONObj(),
- Milliseconds(0));
- }
-
- ResponseStatus badResponseStatus() {
- return ResponseStatus(ErrorCodes::NodeNotFound, "not on my watch");
- }
-
- ResponseStatus staleTermResponse() {
- return ResponseStatus(NetworkInterfaceMock::Response(BSON("ok" << 0
- << "code" << ErrorCodes::BadValue
- << "errmsg"
- << "term has already passed"
- << "term" << 3),
- Milliseconds(10)));
- }
-
- ResponseStatus alreadyAnotherPrimaryResponse() {
- return ResponseStatus(NetworkInterfaceMock::Response(BSON("ok" << 0
- << "code" << ErrorCodes::BadValue
- << "errmsg"
- << "term already has a primary"
- << "term" << 1),
- Milliseconds(10)));
- }
-
- ResponseStatus differentConfigVersionResponse() {
- return ResponseStatus(NetworkInterfaceMock::Response(BSON("ok" << 0
- << "code" << ErrorCodes::BadValue
- << "errmsg"
- << "config version does not match"
- << "term" << 1),
- Milliseconds(10)));
- }
-
- ResponseStatus differentSetNameResponse() {
- return ResponseStatus(NetworkInterfaceMock::Response(BSON("ok" << 0
- << "code" << ErrorCodes::BadValue
- << "errmsg"
- << "replSet name does not match"
- << "term" << 1),
- Milliseconds(10)));
- }
-
- ResponseStatus goodResponse() {
- return ResponseStatus(NetworkInterfaceMock::Response(BSON("ok" << 1
- << "term" << 1),
- Milliseconds(10)));
- }
-
- private:
- unique_ptr<ElectionWinnerDeclarer::Algorithm> _declarer;
-
- };
-
- TEST_F(ElectionWinnerDeclarerTest, FinishWithOnlyGoodResponses) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host0"), goodResponse());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), goodResponse());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host2"), goodResponse());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_OK(getStatus());
+ Status getStatus() {
+ return _declarer->getStatus();
}
- TEST_F(ElectionWinnerDeclarerTest, FailedDueToStaleTerm) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host0"), goodResponse());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), staleTermResponse());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got error response from host1"));
- stopCapturingLogMessages();
- ASSERT_EQUALS(getStatus().reason(), "term has already passed");
+ void processResponse(const RemoteCommandRequest& request, const ResponseStatus& response) {
+ _declarer->processResponse(request, response);
}
- TEST_F(ElectionWinnerDeclarerTest, FailedDueToAnotherPrimary) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host0"), goodResponse());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), alreadyAnotherPrimaryResponse());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got error response from host1"));
- stopCapturingLogMessages();
- ASSERT_EQUALS(getStatus().reason(), "term already has a primary");
+ RemoteCommandRequest requestFrom(std::string hostname) {
+ return RemoteCommandRequest(HostAndPort(hostname),
+ "", // fields do not matter in ElectionWinnerDeclarer
+ BSONObj(),
+ Milliseconds(0));
}
- TEST_F(ElectionWinnerDeclarerTest, FailedDueToDifferentSetName) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host0"), goodResponse());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), differentSetNameResponse());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got error response from host1"));
- stopCapturingLogMessages();
- ASSERT_EQUALS(getStatus().reason(), "replSet name does not match");
+ ResponseStatus badResponseStatus() {
+ return ResponseStatus(ErrorCodes::NodeNotFound, "not on my watch");
}
- TEST_F(ElectionWinnerDeclarerTest, FinishWithOnlyGoodResponsesAndMissingNode) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host0"), goodResponse());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), badResponseStatus());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host2"), goodResponse());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host1"));
- stopCapturingLogMessages();
- ASSERT_OK(getStatus());
+ ResponseStatus staleTermResponse() {
+ return ResponseStatus(NetworkInterfaceMock::Response(
+ BSON("ok" << 0 << "code" << ErrorCodes::BadValue << "errmsg"
+ << "term has already passed"
+ << "term" << 3),
+ Milliseconds(10)));
}
- TEST_F(ElectionWinnerDeclarerTest, FinishWithOnlyMissingResponses) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host0"), badResponseStatus());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), badResponseStatus());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host2"), badResponseStatus());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host0"));
- ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host1"));
- ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host2"));
- stopCapturingLogMessages();
- ASSERT_OK(getStatus());
+ ResponseStatus alreadyAnotherPrimaryResponse() {
+ return ResponseStatus(NetworkInterfaceMock::Response(
+ BSON("ok" << 0 << "code" << ErrorCodes::BadValue << "errmsg"
+ << "term already has a primary"
+ << "term" << 1),
+ Milliseconds(10)));
}
+ ResponseStatus differentConfigVersionResponse() {
+ return ResponseStatus(NetworkInterfaceMock::Response(
+ BSON("ok" << 0 << "code" << ErrorCodes::BadValue << "errmsg"
+ << "config version does not match"
+ << "term" << 1),
+ Milliseconds(10)));
+ }
+
+ ResponseStatus differentSetNameResponse() {
+ return ResponseStatus(NetworkInterfaceMock::Response(
+ BSON("ok" << 0 << "code" << ErrorCodes::BadValue << "errmsg"
+ << "replSet name does not match"
+ << "term" << 1),
+ Milliseconds(10)));
+ }
+
+ ResponseStatus goodResponse() {
+ return ResponseStatus(
+ NetworkInterfaceMock::Response(BSON("ok" << 1 << "term" << 1), Milliseconds(10)));
+ }
+
+private:
+ unique_ptr<ElectionWinnerDeclarer::Algorithm> _declarer;
+};
+
+TEST_F(ElectionWinnerDeclarerTest, FinishWithOnlyGoodResponses) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host0"), goodResponse());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), goodResponse());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host2"), goodResponse());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_OK(getStatus());
+}
+
+TEST_F(ElectionWinnerDeclarerTest, FailedDueToStaleTerm) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host0"), goodResponse());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), staleTermResponse());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got error response from host1"));
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(getStatus().reason(), "term has already passed");
+}
+
+TEST_F(ElectionWinnerDeclarerTest, FailedDueToAnotherPrimary) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host0"), goodResponse());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), alreadyAnotherPrimaryResponse());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got error response from host1"));
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(getStatus().reason(), "term already has a primary");
+}
+
+TEST_F(ElectionWinnerDeclarerTest, FailedDueToDifferentSetName) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host0"), goodResponse());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), differentSetNameResponse());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got error response from host1"));
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(getStatus().reason(), "replSet name does not match");
+}
+
+TEST_F(ElectionWinnerDeclarerTest, FinishWithOnlyGoodResponsesAndMissingNode) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host0"), goodResponse());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), badResponseStatus());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host2"), goodResponse());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host1"));
+ stopCapturingLogMessages();
+ ASSERT_OK(getStatus());
+}
+
+TEST_F(ElectionWinnerDeclarerTest, FinishWithOnlyMissingResponses) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host0"), badResponseStatus());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), badResponseStatus());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host2"), badResponseStatus());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host0"));
+ ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host1"));
+ ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host2"));
+ stopCapturingLogMessages();
+ ASSERT_OK(getStatus());
+}
+
} // namespace
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/freshness_checker.cpp b/src/mongo/db/repl/freshness_checker.cpp
index 17a501f2ce4..ca1d665dd81 100644
--- a/src/mongo/db/repl/freshness_checker.cpp
+++ b/src/mongo/db/repl/freshness_checker.cpp
@@ -46,192 +46,178 @@
namespace mongo {
namespace repl {
- FreshnessChecker::Algorithm::Algorithm(
- Timestamp lastOpTimeApplied,
- const ReplicaSetConfig& rsConfig,
- int selfIndex,
- const std::vector<HostAndPort>& targets) :
- _responsesProcessed(0),
- _failedVoterResponses(0),
- _lastOpTimeApplied(lastOpTimeApplied),
- _rsConfig(rsConfig),
- _selfIndex(selfIndex),
- _targets(targets),
- _votingTargets(0),
- _losableVoters(0),
- _myVote(0),
- _abortReason(None) {
-
- // Count voting targets (since the targets could be a subset of members).
- for (std::vector<HostAndPort>::const_iterator it = _targets.begin();
- it != _targets.end();
- ++it) {
- const MemberConfig* member = _rsConfig.findMemberByHostAndPort(*it);
- if (member && member->isVoter())
- ++_votingTargets;
- }
-
- _myVote = _rsConfig.getMemberAt(_selfIndex).isVoter() ? 1 : 0;
- _losableVoters = std::max(0,
- ((_votingTargets + _myVote) - _rsConfig.getMajorityVoteCount()));
-
- }
-
- FreshnessChecker::Algorithm::~Algorithm() {}
-
- std::vector<RemoteCommandRequest>
- FreshnessChecker::Algorithm::getRequests() const {
- const MemberConfig& selfConfig = _rsConfig.getMemberAt(_selfIndex);
-
- // gather all not-down nodes, get their fullnames(or hostandport's)
- // schedule fresh command for each node
- BSONObjBuilder freshCmdBuilder;
- freshCmdBuilder.append("replSetFresh", 1);
- freshCmdBuilder.append("set", _rsConfig.getReplSetName());
- freshCmdBuilder.append("opTime", Date_t::fromMillisSinceEpoch(_lastOpTimeApplied.asLL()));
- freshCmdBuilder.append("who", selfConfig.getHostAndPort().toString());
- freshCmdBuilder.appendIntOrLL("cfgver", _rsConfig.getConfigVersion());
- freshCmdBuilder.append("id", selfConfig.getId());
- const BSONObj replSetFreshCmd = freshCmdBuilder.obj();
-
- std::vector<RemoteCommandRequest> requests;
- for (std::vector<HostAndPort>::const_iterator it = _targets.begin();
- it != _targets.end();
- ++it) {
- invariant(*it != selfConfig.getHostAndPort());
- requests.push_back(RemoteCommandRequest(
- *it,
- "admin",
- replSetFreshCmd,
- Milliseconds(30*1000))); // trying to match current Socket timeout
- }
-
- return requests;
+FreshnessChecker::Algorithm::Algorithm(Timestamp lastOpTimeApplied,
+ const ReplicaSetConfig& rsConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& targets)
+ : _responsesProcessed(0),
+ _failedVoterResponses(0),
+ _lastOpTimeApplied(lastOpTimeApplied),
+ _rsConfig(rsConfig),
+ _selfIndex(selfIndex),
+ _targets(targets),
+ _votingTargets(0),
+ _losableVoters(0),
+ _myVote(0),
+ _abortReason(None) {
+ // Count voting targets (since the targets could be a subset of members).
+ for (std::vector<HostAndPort>::const_iterator it = _targets.begin(); it != _targets.end();
+ ++it) {
+ const MemberConfig* member = _rsConfig.findMemberByHostAndPort(*it);
+ if (member && member->isVoter())
+ ++_votingTargets;
}
- bool FreshnessChecker::Algorithm::hadTooManyFailedVoterResponses() const {
- const bool tooManyLostVoters = (_failedVoterResponses > _losableVoters);
-
- LOG(3) << "hadTooManyFailedVoterResponses(" << tooManyLostVoters << ") = "
- << _failedVoterResponses << " failed responses <"
- << " (" << _votingTargets << " total voters - "
- << _rsConfig.getMajorityVoteCount() << " majority voters - me ("
- << _myVote << ")) -- losableVotes: " << _losableVoters;
- return tooManyLostVoters;
+ _myVote = _rsConfig.getMemberAt(_selfIndex).isVoter() ? 1 : 0;
+ _losableVoters = std::max(0, ((_votingTargets + _myVote) - _rsConfig.getMajorityVoteCount()));
+}
+
+FreshnessChecker::Algorithm::~Algorithm() {}
+
+std::vector<RemoteCommandRequest> FreshnessChecker::Algorithm::getRequests() const {
+ const MemberConfig& selfConfig = _rsConfig.getMemberAt(_selfIndex);
+
+ // gather all not-down nodes, get their fullnames(or hostandport's)
+ // schedule fresh command for each node
+ BSONObjBuilder freshCmdBuilder;
+ freshCmdBuilder.append("replSetFresh", 1);
+ freshCmdBuilder.append("set", _rsConfig.getReplSetName());
+ freshCmdBuilder.append("opTime", Date_t::fromMillisSinceEpoch(_lastOpTimeApplied.asLL()));
+ freshCmdBuilder.append("who", selfConfig.getHostAndPort().toString());
+ freshCmdBuilder.appendIntOrLL("cfgver", _rsConfig.getConfigVersion());
+ freshCmdBuilder.append("id", selfConfig.getId());
+ const BSONObj replSetFreshCmd = freshCmdBuilder.obj();
+
+ std::vector<RemoteCommandRequest> requests;
+ for (std::vector<HostAndPort>::const_iterator it = _targets.begin(); it != _targets.end();
+ ++it) {
+ invariant(*it != selfConfig.getHostAndPort());
+ requests.push_back(RemoteCommandRequest(
+ *it,
+ "admin",
+ replSetFreshCmd,
+ Milliseconds(30 * 1000))); // trying to match current Socket timeout
}
- bool FreshnessChecker::Algorithm::_isVotingMember(const HostAndPort hap) const {
- const MemberConfig* member = _rsConfig.findMemberByHostAndPort(hap);
- invariant(member);
- return member->isVoter();
- }
-
- void FreshnessChecker::Algorithm::processResponse(
- const RemoteCommandRequest& request,
- const ResponseStatus& response) {
- ++_responsesProcessed;
- bool votingMember = _isVotingMember(request.target);
-
- Status status = Status::OK();
-
- if (!response.isOK() ||
- !((status = getStatusFromCommandResult(response.getValue().data)).isOK())) {
- if (votingMember) {
- ++_failedVoterResponses;
- if (hadTooManyFailedVoterResponses()) {
- _abortReason = QuorumUnreachable;
- }
- }
- if (!response.isOK()) { // network/executor error
- LOG(2) << "FreshnessChecker: Got failed response from " << request.target;
- }
- else { // command error, like unauth
- LOG(2) << "FreshnessChecker: Got error response from " << request.target
- << " :" << status;
+ return requests;
+}
+
+bool FreshnessChecker::Algorithm::hadTooManyFailedVoterResponses() const {
+ const bool tooManyLostVoters = (_failedVoterResponses > _losableVoters);
+
+ LOG(3) << "hadTooManyFailedVoterResponses(" << tooManyLostVoters
+ << ") = " << _failedVoterResponses << " failed responses <"
+ << " (" << _votingTargets << " total voters - " << _rsConfig.getMajorityVoteCount()
+ << " majority voters - me (" << _myVote << ")) -- losableVotes: " << _losableVoters;
+ return tooManyLostVoters;
+}
+
+bool FreshnessChecker::Algorithm::_isVotingMember(const HostAndPort hap) const {
+ const MemberConfig* member = _rsConfig.findMemberByHostAndPort(hap);
+ invariant(member);
+ return member->isVoter();
+}
+
+void FreshnessChecker::Algorithm::processResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response) {
+ ++_responsesProcessed;
+ bool votingMember = _isVotingMember(request.target);
+
+ Status status = Status::OK();
+
+ if (!response.isOK() ||
+ !((status = getStatusFromCommandResult(response.getValue().data)).isOK())) {
+ if (votingMember) {
+ ++_failedVoterResponses;
+ if (hadTooManyFailedVoterResponses()) {
+ _abortReason = QuorumUnreachable;
}
- return;
}
-
- const BSONObj res = response.getValue().data;
-
- LOG(2) << "FreshnessChecker: Got response from " << request.target
- << " of " << res;
-
- if (res["fresher"].trueValue()) {
- log() << "not electing self, we are not freshest";
- _abortReason = FresherNodeFound;
- return;
- }
-
- if (res["opTime"].type() != mongo::Date) {
- error() << "wrong type for opTime argument in replSetFresh response: " <<
- typeName(res["opTime"].type());
- _abortReason = FresherNodeFound;
- return;
- }
- Timestamp remoteTime(res["opTime"].date());
- if (remoteTime == _lastOpTimeApplied) {
- _abortReason = FreshnessTie;
- }
- if (remoteTime > _lastOpTimeApplied) {
- // something really wrong (rogue command?)
- _abortReason = FresherNodeFound;
- return;
- }
-
- if (res["veto"].trueValue()) {
- BSONElement msg = res["errmsg"];
- if (msg.type() == String) {
- log() << "not electing self, " << request.target.toString() <<
- " would veto with '" << msg.String() << "'";
- }
- else {
- log() << "not electing self, " << request.target.toString() <<
- " would veto";
- }
- _abortReason = FresherNodeFound;
- return;
+ if (!response.isOK()) { // network/executor error
+ LOG(2) << "FreshnessChecker: Got failed response from " << request.target;
+ } else { // command error, like unauth
+ LOG(2) << "FreshnessChecker: Got error response from " << request.target << " :"
+ << status;
}
+ return;
}
- bool FreshnessChecker::Algorithm::hasReceivedSufficientResponses() const {
- return (_abortReason != None && _abortReason != FreshnessTie) ||
- (_responsesProcessed == static_cast<int>(_targets.size()));
- }
+ const BSONObj res = response.getValue().data;
- FreshnessChecker::ElectionAbortReason FreshnessChecker::Algorithm::shouldAbortElection() const {
- return _abortReason;
- }
+ LOG(2) << "FreshnessChecker: Got response from " << request.target << " of " << res;
- FreshnessChecker::ElectionAbortReason FreshnessChecker::shouldAbortElection() const {
- return _algorithm->shouldAbortElection();
+ if (res["fresher"].trueValue()) {
+ log() << "not electing self, we are not freshest";
+ _abortReason = FresherNodeFound;
+ return;
}
- long long FreshnessChecker::getOriginalConfigVersion() const {
- return _originalConfigVersion;
+ if (res["opTime"].type() != mongo::Date) {
+ error() << "wrong type for opTime argument in replSetFresh response: "
+ << typeName(res["opTime"].type());
+ _abortReason = FresherNodeFound;
+ return;
}
-
- FreshnessChecker::FreshnessChecker() : _isCanceled(false) {}
- FreshnessChecker::~FreshnessChecker() {}
-
- StatusWith<ReplicationExecutor::EventHandle> FreshnessChecker::start(
- ReplicationExecutor* executor,
- const Timestamp& lastOpTimeApplied,
- const ReplicaSetConfig& currentConfig,
- int selfIndex,
- const std::vector<HostAndPort>& targets,
- const stdx::function<void ()>& onCompletion) {
-
- _originalConfigVersion = currentConfig.getConfigVersion();
- _algorithm.reset(new Algorithm(lastOpTimeApplied, currentConfig, selfIndex, targets));
- _runner.reset(new ScatterGatherRunner(_algorithm.get()));
- return _runner->start(executor, onCompletion);
+ Timestamp remoteTime(res["opTime"].date());
+ if (remoteTime == _lastOpTimeApplied) {
+ _abortReason = FreshnessTie;
}
-
- void FreshnessChecker::cancel(ReplicationExecutor* executor) {
- _isCanceled = true;
- _runner->cancel(executor);
+ if (remoteTime > _lastOpTimeApplied) {
+ // something really wrong (rogue command?)
+ _abortReason = FresherNodeFound;
+ return;
}
-} // namespace repl
-} // namespace mongo
+ if (res["veto"].trueValue()) {
+ BSONElement msg = res["errmsg"];
+ if (msg.type() == String) {
+ log() << "not electing self, " << request.target.toString() << " would veto with '"
+ << msg.String() << "'";
+ } else {
+ log() << "not electing self, " << request.target.toString() << " would veto";
+ }
+ _abortReason = FresherNodeFound;
+ return;
+ }
+}
+
+bool FreshnessChecker::Algorithm::hasReceivedSufficientResponses() const {
+ return (_abortReason != None && _abortReason != FreshnessTie) ||
+ (_responsesProcessed == static_cast<int>(_targets.size()));
+}
+
+FreshnessChecker::ElectionAbortReason FreshnessChecker::Algorithm::shouldAbortElection() const {
+ return _abortReason;
+}
+
+FreshnessChecker::ElectionAbortReason FreshnessChecker::shouldAbortElection() const {
+ return _algorithm->shouldAbortElection();
+}
+
+long long FreshnessChecker::getOriginalConfigVersion() const {
+ return _originalConfigVersion;
+}
+
+FreshnessChecker::FreshnessChecker() : _isCanceled(false) {}
+FreshnessChecker::~FreshnessChecker() {}
+
+StatusWith<ReplicationExecutor::EventHandle> FreshnessChecker::start(
+ ReplicationExecutor* executor,
+ const Timestamp& lastOpTimeApplied,
+ const ReplicaSetConfig& currentConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& targets,
+ const stdx::function<void()>& onCompletion) {
+ _originalConfigVersion = currentConfig.getConfigVersion();
+ _algorithm.reset(new Algorithm(lastOpTimeApplied, currentConfig, selfIndex, targets));
+ _runner.reset(new ScatterGatherRunner(_algorithm.get()));
+ return _runner->start(executor, onCompletion);
+}
+
+void FreshnessChecker::cancel(ReplicationExecutor* executor) {
+ _isCanceled = true;
+ _runner->cancel(executor);
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/freshness_checker.h b/src/mongo/db/repl/freshness_checker.h
index 188e67bd953..11515bfc0ad 100644
--- a/src/mongo/db/repl/freshness_checker.h
+++ b/src/mongo/db/repl/freshness_checker.h
@@ -38,127 +38,128 @@
namespace mongo {
- class Status;
+class Status;
namespace repl {
- class ReplicaSetConfig;
- class ScatterGatherRunner;
+class ReplicaSetConfig;
+class ScatterGatherRunner;
- class FreshnessChecker {
- MONGO_DISALLOW_COPYING(FreshnessChecker);
+class FreshnessChecker {
+ MONGO_DISALLOW_COPYING(FreshnessChecker);
+
+public:
+ enum ElectionAbortReason {
+ None = 0,
+ FresherNodeFound, // Freshness check found fresher node
+ FreshnessTie, // Freshness check resulted in one or more nodes with our lastAppliedOpTime
+ QuorumUnavailable, // Not enough up voters
+ QuorumUnreachable // Too many failed voter responses
+ };
+
+ class Algorithm : public ScatterGatherAlgorithm {
public:
- enum ElectionAbortReason {
- None = 0,
- FresherNodeFound, // Freshness check found fresher node
- FreshnessTie, // Freshness check resulted in one or more nodes with our lastAppliedOpTime
- QuorumUnavailable, // Not enough up voters
- QuorumUnreachable // Too many failed voter responses
- };
-
- class Algorithm : public ScatterGatherAlgorithm {
- public:
- Algorithm(Timestamp lastOpTimeApplied,
- const ReplicaSetConfig& rsConfig,
- int selfIndex,
- const std::vector<HostAndPort>& targets);
- virtual ~Algorithm();
- virtual std::vector<RemoteCommandRequest> getRequests() const;
- virtual void processResponse(
- const RemoteCommandRequest& request,
- const ResponseStatus& response);
- virtual bool hasReceivedSufficientResponses() const;
- ElectionAbortReason shouldAbortElection() const;
-
- private:
- // Returns true if the number of failed votes is over _losableVotes()
- bool hadTooManyFailedVoterResponses() const;
-
- // Returns true if the member, by host and port, has a vote.
- bool _isVotingMember(const HostAndPort host) const;
-
- // Number of responses received so far.
- int _responsesProcessed;
-
- // Number of failed voter responses so far.
- int _failedVoterResponses;
-
- // Last Timestamp applied by the caller; used in the Fresh command
- const Timestamp _lastOpTimeApplied;
-
- // Config to use for this check
- const ReplicaSetConfig _rsConfig;
-
- // Our index position in _rsConfig
- const int _selfIndex;
-
- // The UP members we are checking
- const std::vector<HostAndPort> _targets;
-
- // Number of voting targets
- int _votingTargets;
-
- // Number of voting nodes which can error
- int _losableVoters;
-
- // 1 if I have a vote, otherwise 0
- int _myVote;
-
- // Reason to abort, start with None
- ElectionAbortReason _abortReason;
-
- };
-
- FreshnessChecker();
- virtual ~FreshnessChecker();
-
- /**
- * Begins the process of sending replSetFresh commands to all non-DOWN nodes
- * in currentConfig, with the intention of determining whether the current node
- * is freshest.
- * evh can be used to schedule a callback when the process is complete.
- * This function must be run in the executor, as it must be synchronous with the command
- * callbacks that it schedules.
- * If this function returns Status::OK(), evh is then guaranteed to be signaled.
- **/
- StatusWith<ReplicationExecutor::EventHandle> start(
- ReplicationExecutor* executor,
- const Timestamp& lastOpTimeApplied,
- const ReplicaSetConfig& currentConfig,
- int selfIndex,
- const std::vector<HostAndPort>& targets,
- const stdx::function<void ()>& onCompletion = stdx::function<void ()>());
-
- /**
- * Informs the freshness checker to cancel further processing. The "executor"
- * argument must point to the same executor passed to "start()".
- *
- * Like start(), this method must run in the executor context.
- */
- void cancel(ReplicationExecutor* executor);
-
- /**
- * Returns true if cancel() was called on this instance.
- */
- bool isCanceled() const { return _isCanceled; }
-
- /**
- * 'None' if the election should continue, otherwise the reason to abort
- */
+ Algorithm(Timestamp lastOpTimeApplied,
+ const ReplicaSetConfig& rsConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& targets);
+ virtual ~Algorithm();
+ virtual std::vector<RemoteCommandRequest> getRequests() const;
+ virtual void processResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response);
+ virtual bool hasReceivedSufficientResponses() const;
ElectionAbortReason shouldAbortElection() const;
- /**
- * Returns the config version supplied in the config when start() was called.
- * Useful for determining if the the config version has changed.
- */
- long long getOriginalConfigVersion() const;
-
private:
- std::unique_ptr<Algorithm> _algorithm;
- std::unique_ptr<ScatterGatherRunner> _runner;
- long long _originalConfigVersion;
- bool _isCanceled;
+ // Returns true if the number of failed votes is over _losableVotes()
+ bool hadTooManyFailedVoterResponses() const;
+
+ // Returns true if the member, by host and port, has a vote.
+ bool _isVotingMember(const HostAndPort host) const;
+
+ // Number of responses received so far.
+ int _responsesProcessed;
+
+ // Number of failed voter responses so far.
+ int _failedVoterResponses;
+
+ // Last Timestamp applied by the caller; used in the Fresh command
+ const Timestamp _lastOpTimeApplied;
+
+ // Config to use for this check
+ const ReplicaSetConfig _rsConfig;
+
+ // Our index position in _rsConfig
+ const int _selfIndex;
+
+ // The UP members we are checking
+ const std::vector<HostAndPort> _targets;
+
+ // Number of voting targets
+ int _votingTargets;
+
+ // Number of voting nodes which can error
+ int _losableVoters;
+
+ // 1 if I have a vote, otherwise 0
+ int _myVote;
+
+ // Reason to abort, start with None
+ ElectionAbortReason _abortReason;
};
+ FreshnessChecker();
+ virtual ~FreshnessChecker();
+
+ /**
+ * Begins the process of sending replSetFresh commands to all non-DOWN nodes
+ * in currentConfig, with the intention of determining whether the current node
+ * is freshest.
+ * evh can be used to schedule a callback when the process is complete.
+ * This function must be run in the executor, as it must be synchronous with the command
+ * callbacks that it schedules.
+ * If this function returns Status::OK(), evh is then guaranteed to be signaled.
+ **/
+ StatusWith<ReplicationExecutor::EventHandle> start(
+ ReplicationExecutor* executor,
+ const Timestamp& lastOpTimeApplied,
+ const ReplicaSetConfig& currentConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& targets,
+ const stdx::function<void()>& onCompletion = stdx::function<void()>());
+
+ /**
+ * Informs the freshness checker to cancel further processing. The "executor"
+ * argument must point to the same executor passed to "start()".
+ *
+ * Like start(), this method must run in the executor context.
+ */
+ void cancel(ReplicationExecutor* executor);
+
+ /**
+ * Returns true if cancel() was called on this instance.
+ */
+ bool isCanceled() const {
+ return _isCanceled;
+ }
+
+ /**
+ * 'None' if the election should continue, otherwise the reason to abort
+ */
+ ElectionAbortReason shouldAbortElection() const;
+
+ /**
+ * Returns the config version supplied in the config when start() was called.
+ * Useful for determining if the the config version has changed.
+ */
+ long long getOriginalConfigVersion() const;
+
+private:
+ std::unique_ptr<Algorithm> _algorithm;
+ std::unique_ptr<ScatterGatherRunner> _runner;
+ long long _originalConfigVersion;
+ bool _isCanceled;
+};
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/freshness_checker_test.cpp b/src/mongo/db/repl/freshness_checker_test.cpp
index 82a266b2910..fb08c072acd 100644
--- a/src/mongo/db/repl/freshness_checker_test.cpp
+++ b/src/mongo/db/repl/freshness_checker_test.cpp
@@ -48,1034 +48,991 @@ namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
- using unittest::assertGet;
-
- bool stringContains(const std::string &haystack, const std::string& needle) {
- return haystack.find(needle) != std::string::npos;
+using executor::NetworkInterfaceMock;
+using unittest::assertGet;
+
+bool stringContains(const std::string& haystack, const std::string& needle) {
+ return haystack.find(needle) != std::string::npos;
+}
+
+class FreshnessCheckerTest : public mongo::unittest::Test {
+protected:
+ void startTest(const Timestamp& lastOpTimeApplied,
+ const ReplicaSetConfig& currentConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& hosts);
+ void waitOnChecker();
+ FreshnessChecker::ElectionAbortReason shouldAbortElection() const;
+
+ int64_t countLogLinesContaining(const std::string& needle) {
+ return std::count_if(getCapturedLogMessages().begin(),
+ getCapturedLogMessages().end(),
+ stdx::bind(stringContains, stdx::placeholders::_1, needle));
}
- class FreshnessCheckerTest : public mongo::unittest::Test {
- protected:
- void startTest(const Timestamp& lastOpTimeApplied,
- const ReplicaSetConfig& currentConfig,
- int selfIndex,
- const std::vector<HostAndPort>& hosts);
- void waitOnChecker();
- FreshnessChecker::ElectionAbortReason shouldAbortElection() const;
-
- int64_t countLogLinesContaining(const std::string& needle) {
- return std::count_if(getCapturedLogMessages().begin(),
- getCapturedLogMessages().end(),
- stdx::bind(stringContains,
- stdx::placeholders::_1,
- needle));
- }
-
- NetworkInterfaceMock* _net;
- StorageInterfaceMock* _storage;
- std::unique_ptr<ReplicationExecutor> _executor;
- std::unique_ptr<stdx::thread> _executorThread;
-
- private:
- void freshnessCheckerRunner(const ReplicationExecutor::CallbackArgs& data,
- const Timestamp& lastOpTimeApplied,
- const ReplicaSetConfig& currentConfig,
- int selfIndex,
- const std::vector<HostAndPort>& hosts);
- void setUp();
- void tearDown();
-
- std::unique_ptr<FreshnessChecker> _checker;
- ReplicationExecutor::EventHandle _checkerDoneEvent;
- };
-
- void FreshnessCheckerTest::setUp() {
- _net = new NetworkInterfaceMock;
- _storage = new StorageInterfaceMock;
- _executor.reset(new ReplicationExecutor(_net, _storage, 1 /* prng seed */));
- _executorThread.reset(new stdx::thread(stdx::bind(&ReplicationExecutor::run,
- _executor.get())));
- _checker.reset(new FreshnessChecker);
+ NetworkInterfaceMock* _net;
+ StorageInterfaceMock* _storage;
+ std::unique_ptr<ReplicationExecutor> _executor;
+ std::unique_ptr<stdx::thread> _executorThread;
+
+private:
+ void freshnessCheckerRunner(const ReplicationExecutor::CallbackArgs& data,
+ const Timestamp& lastOpTimeApplied,
+ const ReplicaSetConfig& currentConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& hosts);
+ void setUp();
+ void tearDown();
+
+ std::unique_ptr<FreshnessChecker> _checker;
+ ReplicationExecutor::EventHandle _checkerDoneEvent;
+};
+
+void FreshnessCheckerTest::setUp() {
+ _net = new NetworkInterfaceMock;
+ _storage = new StorageInterfaceMock;
+ _executor.reset(new ReplicationExecutor(_net, _storage, 1 /* prng seed */));
+ _executorThread.reset(new stdx::thread(stdx::bind(&ReplicationExecutor::run, _executor.get())));
+ _checker.reset(new FreshnessChecker);
+}
+
+void FreshnessCheckerTest::tearDown() {
+ _executor->shutdown();
+ _executorThread->join();
+}
+
+void FreshnessCheckerTest::waitOnChecker() {
+ _executor->waitForEvent(_checkerDoneEvent);
+}
+
+FreshnessChecker::ElectionAbortReason FreshnessCheckerTest::shouldAbortElection() const {
+ return _checker->shouldAbortElection();
+}
+
+ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBson) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(configBson));
+ ASSERT_OK(config.validate());
+ return config;
+}
+
+const BSONObj makeFreshRequest(const ReplicaSetConfig& rsConfig,
+ Timestamp lastOpTimeApplied,
+ int selfIndex) {
+ const MemberConfig& myConfig = rsConfig.getMemberAt(selfIndex);
+ return BSON("replSetFresh" << 1 << "set" << rsConfig.getReplSetName() << "opTime"
+ << Date_t::fromMillisSinceEpoch(lastOpTimeApplied.asLL()) << "who"
+ << myConfig.getHostAndPort().toString() << "cfgver"
+ << rsConfig.getConfigVersion() << "id" << myConfig.getId());
+}
+
+// This is necessary because the run method must be scheduled in the Replication Executor
+// for correct concurrency operation.
+void FreshnessCheckerTest::freshnessCheckerRunner(const ReplicationExecutor::CallbackArgs& data,
+ const Timestamp& lastOpTimeApplied,
+ const ReplicaSetConfig& currentConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& hosts) {
+ invariant(data.status.isOK());
+ ReplicationExecutor* executor = dynamic_cast<ReplicationExecutor*>(data.executor);
+ ASSERT(executor);
+ StatusWith<ReplicationExecutor::EventHandle> evh =
+ _checker->start(executor, lastOpTimeApplied, currentConfig, selfIndex, hosts);
+ _checkerDoneEvent = assertGet(evh);
+}
+
+void FreshnessCheckerTest::startTest(const Timestamp& lastOpTimeApplied,
+ const ReplicaSetConfig& currentConfig,
+ int selfIndex,
+ const std::vector<HostAndPort>& hosts) {
+ _executor->wait(
+ assertGet(_executor->scheduleWork(stdx::bind(&FreshnessCheckerTest::freshnessCheckerRunner,
+ this,
+ stdx::placeholders::_1,
+ lastOpTimeApplied,
+ currentConfig,
+ selfIndex,
+ hosts))));
+}
+
+TEST_F(FreshnessCheckerTest, TwoNodes) {
+ // Two nodes, we are node h1. We are freshest, but we tie with h2.
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1"))));
+
+ std::vector<HostAndPort> hosts;
+ hosts.push_back(config.getMemberAt(1).getHostAndPort());
+ const BSONObj freshRequest = makeFreshRequest(config, Timestamp(0, 0), 0);
+
+ startTest(Timestamp(0, 0), config, 0, hosts);
+ const Date_t startDate = _net->now();
+ _net->enterNetwork();
+ for (size_t i = 0; i < hosts.size(); ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
+ _net->scheduleResponse(noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(
+ BSON("ok" << 1 << "id" << 2 << "set"
+ << "rs0"
+ << "who"
+ << "h1"
+ << "cfgver" << 1 << "opTime" << Date_t()),
+ Milliseconds(8))));
}
-
- void FreshnessCheckerTest::tearDown() {
- _executor->shutdown();
- _executorThread->join();
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ waitOnChecker();
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FreshnessTie);
+}
+
+TEST_F(FreshnessCheckerTest, ShuttingDown) {
+ // Two nodes, we are node h1. Shutdown happens while we're scheduling remote commands.
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1"))));
+
+ std::vector<HostAndPort> hosts;
+ hosts.push_back(config.getMemberAt(1).getHostAndPort());
+
+ startTest(Timestamp(0, 0), config, 0, hosts);
+ _executor->shutdown();
+ waitOnChecker();
+
+ // This seems less than ideal, but if we are shutting down, the next phase of election
+ // cannot proceed anyway.
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::None);
+}
+
+TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshest) {
+ // other responds as fresher than us
+ startCapturingLogMessages();
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1"))));
+
+ std::vector<HostAndPort> hosts;
+ hosts.push_back(config.getMemberAt(1).getHostAndPort());
+
+ const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10, 0), 0);
+
+ startTest(Timestamp(10, 0), config, 0, hosts);
+ const Date_t startDate = _net->now();
+ _net->enterNetwork();
+ for (size_t i = 0; i < hosts.size(); ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1 << "id" << 2 << "set"
+ << "rs0"
+ << "who"
+ << "h1"
+ << "cfgver" << 1 << "fresher" << true
+ << "opTime" << Date_t()),
+ Milliseconds(8))));
}
-
- void FreshnessCheckerTest::waitOnChecker() {
- _executor->waitForEvent(_checkerDoneEvent);
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ waitOnChecker();
+
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+ ASSERT_EQUALS(1, countLogLinesContaining("not electing self, we are not freshest"));
+}
+
+TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTime) {
+ // other responds with a later optime than ours
+ startCapturingLogMessages();
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1"))));
+
+ std::vector<HostAndPort> hosts;
+ hosts.push_back(config.getMemberAt(1).getHostAndPort());
+
+ const BSONObj freshRequest = makeFreshRequest(config, Timestamp(0, 0), 0);
+
+ startTest(Timestamp(0, 0), config, 0, hosts);
+ const Date_t startDate = _net->now();
+ _net->enterNetwork();
+ for (size_t i = 0; i < hosts.size(); ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(
+ BSON("ok" << 1 << "id" << 2 << "set"
+ << "rs0"
+ << "who"
+ << "h1"
+ << "cfgver" << 1 << "opTime"
+ << Date_t::fromMillisSinceEpoch(Timestamp(10, 0).asLL())),
+ Milliseconds(8))));
}
-
- FreshnessChecker::ElectionAbortReason FreshnessCheckerTest::shouldAbortElection() const {
- return _checker->shouldAbortElection();
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ waitOnChecker();
+
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+}
+
+TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponse) {
+ // other responds with "opTime" field of non-Date value, causing not freshest
+ startCapturingLogMessages();
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1"))));
+
+ std::vector<HostAndPort> hosts;
+ hosts.push_back(config.getMemberAt(1).getHostAndPort());
+
+ const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10, 0), 0);
+
+ startTest(Timestamp(10, 0), config, 0, hosts);
+ const Date_t startDate = _net->now();
+ _net->enterNetwork();
+ for (size_t i = 0; i < hosts.size(); ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1 << "id" << 2 << "set"
+ << "rs0"
+ << "who"
+ << "h1"
+ << "cfgver" << 1 << "opTime" << 3),
+ Milliseconds(8))));
}
-
- ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBson) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(configBson));
- ASSERT_OK(config.validate());
- return config;
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ waitOnChecker();
+
+ stopCapturingLogMessages();
+
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ "wrong type for opTime argument in replSetFresh "
+ "response: NumberInt32"));
+}
+
+TEST_F(FreshnessCheckerTest, ElectVetoed) {
+ // other responds with veto
+ startCapturingLogMessages();
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1"))));
+
+ std::vector<HostAndPort> hosts;
+ hosts.push_back(config.getMemberAt(1).getHostAndPort());
+
+ const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10, 0), 0);
+
+ startTest(Timestamp(10, 0), config, 0, hosts);
+ const Date_t startDate = _net->now();
+ _net->enterNetwork();
+ for (size_t i = 0; i < hosts.size(); ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(
+ BSON("ok" << 1 << "id" << 2 << "set"
+ << "rs0"
+ << "who"
+ << "h1"
+ << "cfgver" << 1 << "veto" << true << "errmsg"
+ << "I'd rather you didn't"
+ << "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(0, 0).asLL())),
+ Milliseconds(8))));
}
-
- const BSONObj makeFreshRequest(const ReplicaSetConfig& rsConfig,
- Timestamp lastOpTimeApplied,
- int selfIndex) {
- const MemberConfig& myConfig = rsConfig.getMemberAt(selfIndex);
- return BSON("replSetFresh" << 1 <<
- "set" << rsConfig.getReplSetName() <<
- "opTime" << Date_t::fromMillisSinceEpoch(lastOpTimeApplied.asLL()) <<
- "who" << myConfig.getHostAndPort().toString() <<
- "cfgver" << rsConfig.getConfigVersion() <<
- "id" << myConfig.getId());
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ waitOnChecker();
+
+ stopCapturingLogMessages();
+
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ "not electing self, h1:27017 would veto with "
+ "'I'd rather you didn't'"));
+}
+
+int findIdForMember(const ReplicaSetConfig& rsConfig, const HostAndPort& host) {
+ const MemberConfig* member = rsConfig.findMemberByHostAndPort(host);
+ ASSERT_TRUE(member != NULL) << "No host named " << host.toString() << " in config";
+ return member->getId();
+}
+
+TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestManyNodes) {
+ // one other responds as fresher than us
+ startCapturingLogMessages();
+ ReplicaSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1") << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3") << BSON("_id" << 5 << "host"
+ << "h4"))));
+
+ std::vector<HostAndPort> hosts;
+ for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin(); mem != config.membersEnd();
+ ++mem) {
+ hosts.push_back(mem->getHostAndPort());
}
- // This is necessary because the run method must be scheduled in the Replication Executor
- // for correct concurrency operation.
- void FreshnessCheckerTest::freshnessCheckerRunner(
- const ReplicationExecutor::CallbackArgs& data,
- const Timestamp& lastOpTimeApplied,
- const ReplicaSetConfig& currentConfig,
- int selfIndex,
- const std::vector<HostAndPort>& hosts) {
-
- invariant(data.status.isOK());
- ReplicationExecutor* executor = dynamic_cast<ReplicationExecutor*>(data.executor);
- ASSERT(executor);
- StatusWith<ReplicationExecutor::EventHandle> evh = _checker->start(executor,
- lastOpTimeApplied,
- currentConfig,
- selfIndex,
- hosts);
- _checkerDoneEvent = assertGet(evh);
+ const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10, 0), 0);
+
+ startTest(Timestamp(10, 0), config, 0, hosts);
+ const Date_t startDate = _net->now();
+ unordered_set<HostAndPort> seen;
+ _net->enterNetwork();
+ for (size_t i = 0; i < hosts.size(); ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const HostAndPort target = noi->getRequest().target;
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT(seen.insert(target).second) << "Already saw " << target;
+ BSONObjBuilder responseBuilder;
+ responseBuilder << "ok" << 1 << "id" << findIdForMember(config, target) << "set"
+ << "rs0"
+ << "who" << target.toString() << "cfgver" << 1 << "opTime"
+ << Date_t::fromMillisSinceEpoch(Timestamp(0, 0).asLL());
+ if (target.host() == "h1") {
+ responseBuilder << "fresher" << true;
+ }
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(responseBuilder.obj(), Milliseconds(8))));
}
-
- void FreshnessCheckerTest::startTest(const Timestamp& lastOpTimeApplied,
- const ReplicaSetConfig& currentConfig,
- int selfIndex,
- const std::vector<HostAndPort>& hosts) {
- _executor->wait(
- assertGet(
- _executor->scheduleWork(
- stdx::bind(&FreshnessCheckerTest::freshnessCheckerRunner,
- this,
- stdx::placeholders::_1,
- lastOpTimeApplied,
- currentConfig,
- selfIndex,
- hosts))));
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ waitOnChecker();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+ ASSERT_EQUALS(1, countLogLinesContaining("not electing self, we are not freshest"));
+}
+
+TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTimeManyNodes) {
+ // one other responds with a later optime than ours
+ startCapturingLogMessages();
+ ReplicaSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1") << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3") << BSON("_id" << 5 << "host"
+ << "h4"))));
+
+ std::vector<HostAndPort> hosts;
+ for (ReplicaSetConfig::MemberIterator mem = config.membersBegin(); mem != config.membersEnd();
+ ++mem) {
+ if (HostAndPort("h0") == mem->getHostAndPort()) {
+ continue;
+ }
+ hosts.push_back(mem->getHostAndPort());
}
- TEST_F(FreshnessCheckerTest, TwoNodes) {
- // Two nodes, we are node h1. We are freshest, but we tie with h2.
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1"))));
-
- std::vector<HostAndPort> hosts;
- hosts.push_back(config.getMemberAt(1).getHostAndPort());
- const BSONObj freshRequest = makeFreshRequest(config, Timestamp(0,0), 0);
-
- startTest(Timestamp(0, 0), config, 0, hosts);
- const Date_t startDate = _net->now();
- _net->enterNetwork();
- for (size_t i = 0; i < hosts.size(); ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
- ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
+ const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10, 0), 0);
+
+ startTest(Timestamp(10, 0), config, 0, hosts);
+ const Date_t startDate = _net->now();
+ unordered_set<HostAndPort> seen;
+ _net->enterNetwork();
+
+ for (size_t i = 0; i < hosts.size(); ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const HostAndPort target = noi->getRequest().target;
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT(seen.insert(target).second) << "Already saw " << target;
+ BSONObjBuilder responseBuilder;
+ if (target.host() == "h4") {
+ responseBuilder << "ok" << 1 << "id" << findIdForMember(config, target) << "set"
+ << "rs0"
+ << "who" << target.toString() << "cfgver" << 1 << "opTime"
+ << Date_t::fromMillisSinceEpoch(Timestamp(20, 0).asLL());
_net->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1 <<
- "id" << 2 <<
- "set" << "rs0" <<
- "who" << "h1" <<
- "cfgver" << 1 <<
- "opTime" << Date_t()),
- Milliseconds(8))));
+ noi,
+ startDate + Milliseconds(20),
+ ResponseStatus(RemoteCommandResponse(responseBuilder.obj(), Milliseconds(8))));
+ } else {
+ responseBuilder << "ok" << 1 << "id" << findIdForMember(config, target) << "set"
+ << "rs0"
+ << "who" << target.toString() << "cfgver" << 1 << "opTime"
+ << Date_t::fromMillisSinceEpoch(Timestamp(10, 0).asLL());
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(responseBuilder.obj(), Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- waitOnChecker();
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FreshnessTie);
}
-
- TEST_F(FreshnessCheckerTest, ShuttingDown) {
- // Two nodes, we are node h1. Shutdown happens while we're scheduling remote commands.
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1"))));
-
- std::vector<HostAndPort> hosts;
- hosts.push_back(config.getMemberAt(1).getHostAndPort());
-
- startTest(
- Timestamp(0, 0),
- config,
- 0,
- hosts);
- _executor->shutdown();
- waitOnChecker();
-
- // This seems less than ideal, but if we are shutting down, the next phase of election
- // cannot proceed anyway.
- ASSERT_EQUALS(shouldAbortElection(),FreshnessChecker::None);
-
+ _net->runUntil(startDate + Milliseconds(10));
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(0, countLogLinesContaining("not electing self, we are not freshest"));
+ _net->runUntil(startDate + Milliseconds(20));
+ ASSERT_EQUALS(startDate + Milliseconds(20), _net->now());
+ _net->exitNetwork();
+ waitOnChecker();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+}
+
+TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponseManyNodes) {
+ // one other responds with "opTime" field of non-Date value, causing not freshest
+ startCapturingLogMessages();
+ ReplicaSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1") << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3") << BSON("_id" << 5 << "host"
+ << "h4"))));
+
+ std::vector<HostAndPort> hosts;
+ for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin(); mem != config.membersEnd();
+ ++mem) {
+ hosts.push_back(mem->getHostAndPort());
}
- TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshest) {
- // other responds as fresher than us
- startCapturingLogMessages();
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1"))));
-
- std::vector<HostAndPort> hosts;
- hosts.push_back(config.getMemberAt(1).getHostAndPort());
-
- const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10,0), 0);
-
- startTest(Timestamp(10, 0), config, 0, hosts);
- const Date_t startDate = _net->now();
- _net->enterNetwork();
- for (size_t i = 0; i < hosts.size(); ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
- ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
- _net->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1 <<
- "id" << 2 <<
- "set" << "rs0" <<
- "who" << "h1" <<
- "cfgver" << 1 <<
- "fresher" << true <<
- "opTime" << Date_t()),
- Milliseconds(8))));
+ const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10, 0), 0);
+
+ startTest(Timestamp(10, 0), config, 0, hosts);
+ const Date_t startDate = _net->now();
+ unordered_set<HostAndPort> seen;
+ _net->enterNetwork();
+ for (size_t i = 0; i < hosts.size(); ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const HostAndPort target = noi->getRequest().target;
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT(seen.insert(target).second) << "Already saw " << target;
+ BSONObjBuilder responseBuilder;
+ responseBuilder << "ok" << 1 << "id" << findIdForMember(config, target) << "set"
+ << "rs0"
+ << "who" << target.toString() << "cfgver" << 1;
+ if (target.host() == "h1") {
+ responseBuilder << "opTime" << 3;
+ } else {
+ responseBuilder << "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(0, 0).asLL());
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- waitOnChecker();
-
- stopCapturingLogMessages();
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- ASSERT_EQUALS(1, countLogLinesContaining("not electing self, we are not freshest"));
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(responseBuilder.obj(), Milliseconds(8))));
+ }
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ waitOnChecker();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ "wrong type for opTime argument in replSetFresh "
+ "response: NumberInt32"));
+}
+
+TEST_F(FreshnessCheckerTest, ElectVetoedManyNodes) {
+ // one other responds with veto
+ startCapturingLogMessages();
+ ReplicaSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1") << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3") << BSON("_id" << 5 << "host"
+ << "h4"))));
+
+ std::vector<HostAndPort> hosts;
+ for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin(); mem != config.membersEnd();
+ ++mem) {
+ hosts.push_back(mem->getHostAndPort());
}
- TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTime) {
- // other responds with a later optime than ours
- startCapturingLogMessages();
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1"))));
-
- std::vector<HostAndPort> hosts;
- hosts.push_back(config.getMemberAt(1).getHostAndPort());
-
- const BSONObj freshRequest = makeFreshRequest(config, Timestamp(0,0), 0);
-
- startTest(Timestamp(0, 0), config, 0, hosts);
- const Date_t startDate = _net->now();
- _net->enterNetwork();
- for (size_t i = 0; i < hosts.size(); ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
- ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
- _net->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1 <<
- "id" << 2 <<
- "set" << "rs0" <<
- "who" << "h1" <<
- "cfgver" << 1 <<
- "opTime" << Date_t::fromMillisSinceEpoch(
- Timestamp(10,0).asLL())),
- Milliseconds(8))));
+ const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10, 0), 0);
+
+ startTest(Timestamp(10, 0), config, 0, hosts);
+ const Date_t startDate = _net->now();
+ unordered_set<HostAndPort> seen;
+ _net->enterNetwork();
+ for (size_t i = 0; i < hosts.size(); ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const HostAndPort target = noi->getRequest().target;
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT(seen.insert(target).second) << "Already saw " << target;
+ BSONObjBuilder responseBuilder;
+ responseBuilder << "ok" << 1 << "id" << findIdForMember(config, target) << "set"
+ << "rs0"
+ << "who" << target.toString() << "cfgver" << 1 << "opTime"
+ << Date_t::fromMillisSinceEpoch(Timestamp(0, 0).asLL());
+ if (target.host() == "h1") {
+ responseBuilder << "veto" << true << "errmsg"
+ << "I'd rather you didn't";
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- waitOnChecker();
-
- stopCapturingLogMessages();
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(responseBuilder.obj(), Milliseconds(8))));
}
-
- TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponse) {
- // other responds with "opTime" field of non-Date value, causing not freshest
- startCapturingLogMessages();
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1"))));
-
- std::vector<HostAndPort> hosts;
- hosts.push_back(config.getMemberAt(1).getHostAndPort());
-
- const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10,0), 0);
-
- startTest(Timestamp(10, 0), config, 0, hosts);
- const Date_t startDate = _net->now();
- _net->enterNetwork();
- for (size_t i = 0; i < hosts.size(); ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
- ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
- _net->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1 <<
- "id" << 2 <<
- "set" << "rs0" <<
- "who" << "h1" <<
- "cfgver" << 1 <<
- "opTime" << 3),
- Milliseconds(8))));
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ waitOnChecker();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ "not electing self, h1:27017 would veto with "
+ "'I'd rather you didn't'"));
+}
+
+TEST_F(FreshnessCheckerTest, ElectVetoedAndTiedFreshnessManyNodes) {
+ // one other responds with veto and another responds with tie
+ startCapturingLogMessages();
+ ReplicaSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1") << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3") << BSON("_id" << 5 << "host"
+ << "h4"))));
+
+ std::vector<HostAndPort> hosts;
+ for (ReplicaSetConfig::MemberIterator mem = config.membersBegin(); mem != config.membersEnd();
+ ++mem) {
+ if (HostAndPort("h0") == mem->getHostAndPort()) {
+ continue;
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- waitOnChecker();
-
- stopCapturingLogMessages();
-
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- ASSERT_EQUALS(1, countLogLinesContaining("wrong type for opTime argument in replSetFresh "
- "response: NumberInt32"));
+ hosts.push_back(mem->getHostAndPort());
}
- TEST_F(FreshnessCheckerTest, ElectVetoed) {
- // other responds with veto
- startCapturingLogMessages();
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1"))));
-
- std::vector<HostAndPort> hosts;
- hosts.push_back(config.getMemberAt(1).getHostAndPort());
-
- const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10,0), 0);
-
- startTest(Timestamp(10, 0), config, 0, hosts);
- const Date_t startDate = _net->now();
- _net->enterNetwork();
- for (size_t i = 0; i < hosts.size(); ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
- ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
+ const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10, 0), 0);
+
+ startTest(Timestamp(10, 0), config, 0, hosts);
+ const Date_t startDate = _net->now();
+ unordered_set<HostAndPort> seen;
+ _net->enterNetwork();
+
+ for (size_t i = 0; i < hosts.size(); ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const HostAndPort target = noi->getRequest().target;
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT(seen.insert(target).second) << "Already saw " << target;
+ BSONObjBuilder responseBuilder;
+ if (target.host() == "h4") {
+ responseBuilder << "ok" << 1 << "id" << findIdForMember(config, target) << "set"
+ << "rs0"
+ << "who" << target.toString() << "cfgver" << 1 << "veto" << true
+ << "errmsg"
+ << "I'd rather you didn't"
+ << "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(10, 0).asLL());
_net->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1 <<
- "id" << 2 <<
- "set" << "rs0" <<
- "who" << "h1" <<
- "cfgver" << 1 <<
- "veto" << true <<
- "errmsg" << "I'd rather you didn't" <<
- "opTime" << Date_t::fromMillisSinceEpoch(
- Timestamp(0,0).asLL())),
- Milliseconds(8))));
+ noi,
+ startDate + Milliseconds(20),
+ ResponseStatus(RemoteCommandResponse(responseBuilder.obj(), Milliseconds(8))));
+ } else {
+ responseBuilder << "ok" << 1 << "id" << findIdForMember(config, target) << "set"
+ << "rs0"
+ << "who" << target.toString() << "cfgver" << 1 << "opTime"
+ << Date_t::fromMillisSinceEpoch(Timestamp(10, 0).asLL());
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(responseBuilder.obj(), Milliseconds(8))));
}
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- waitOnChecker();
-
- stopCapturingLogMessages();
-
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- ASSERT_EQUALS(1, countLogLinesContaining("not electing self, h1:27017 would veto with "
- "'I'd rather you didn't'"));
}
-
- int findIdForMember(const ReplicaSetConfig& rsConfig, const HostAndPort& host) {
- const MemberConfig* member = rsConfig.findMemberByHostAndPort(host);
- ASSERT_TRUE(member != NULL) << "No host named " << host.toString() << " in config";
- return member->getId();
+ _net->runUntil(startDate + Milliseconds(10));
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ ASSERT_EQUALS(0,
+ countLogLinesContaining(
+ "not electing self, h4:27017 would veto with '"
+ "errmsg: \"I'd rather you didn't\"'"));
+ _net->runUntil(startDate + Milliseconds(20));
+ ASSERT_EQUALS(startDate + Milliseconds(20), _net->now());
+ _net->exitNetwork();
+ waitOnChecker();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ "not electing self, h4:27017 would veto with "
+ "'I'd rather you didn't'"));
+}
+
+TEST_F(FreshnessCheckerTest, ElectManyNodesNotAllRespond) {
+ ReplicaSetConfig config =
+ assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1") << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3") << BSON("_id" << 5 << "host"
+ << "h4"))));
+
+ std::vector<HostAndPort> hosts;
+ for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin(); mem != config.membersEnd();
+ ++mem) {
+ hosts.push_back(mem->getHostAndPort());
}
- TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestManyNodes) {
- // one other responds as fresher than us
- startCapturingLogMessages();
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1") <<
- BSON("_id" << 3 << "host" << "h2") <<
- BSON("_id" << 4 << "host" << "h3") <<
- BSON("_id" << 5 << "host" << "h4"))));
-
- std::vector<HostAndPort> hosts;
- for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin();
- mem != config.membersEnd();
- ++mem) {
- hosts.push_back(mem->getHostAndPort());
- }
-
- const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10,0), 0);
-
- startTest(Timestamp(10, 0), config, 0, hosts);
- const Date_t startDate = _net->now();
- unordered_set<HostAndPort> seen;
- _net->enterNetwork();
- for (size_t i = 0; i < hosts.size(); ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const HostAndPort target = noi->getRequest().target;
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
- ASSERT(seen.insert(target).second) << "Already saw " << target;
+ const Timestamp lastOpTimeApplied(10, 0);
+ const BSONObj freshRequest = makeFreshRequest(config, lastOpTimeApplied, 0);
+
+ startTest(Timestamp(10, 0), config, 0, hosts);
+ const Date_t startDate = _net->now();
+ unordered_set<HostAndPort> seen;
+ _net->enterNetwork();
+ for (size_t i = 0; i < hosts.size(); ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
+ const HostAndPort target = noi->getRequest().target;
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT(seen.insert(target).second) << "Already saw " << target;
+ if (target.host() == "h2" || target.host() == "h3") {
+ _net->scheduleResponse(noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
+ } else {
BSONObjBuilder responseBuilder;
- responseBuilder <<
- "ok" << 1 <<
- "id" << findIdForMember(config, target) <<
- "set" << "rs0" <<
- "who" << target.toString() <<
- "cfgver" << 1 <<
- "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(0,0).asLL());
- if (target.host() == "h1") {
- responseBuilder << "fresher" << true;
- }
+ responseBuilder << "ok" << 1 << "id" << findIdForMember(config, target) << "set"
+ << "rs0"
+ << "who" << target.toString() << "cfgver" << 1 << "opTime"
+ << Date_t::fromMillisSinceEpoch(Timestamp(0, 0).asLL());
_net->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- responseBuilder.obj(),
- Milliseconds(8))));
- }
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- waitOnChecker();
- stopCapturingLogMessages();
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- ASSERT_EQUALS(1, countLogLinesContaining("not electing self, we are not freshest"));
- }
-
- TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTimeManyNodes) {
- // one other responds with a later optime than ours
- startCapturingLogMessages();
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1") <<
- BSON("_id" << 3 << "host" << "h2") <<
- BSON("_id" << 4 << "host" << "h3") <<
- BSON("_id" << 5 << "host" << "h4"))));
-
- std::vector<HostAndPort> hosts;
- for (ReplicaSetConfig::MemberIterator mem = config.membersBegin();
- mem != config.membersEnd();
- ++mem) {
- if (HostAndPort("h0") == mem->getHostAndPort()) {
- continue;
- }
- hosts.push_back(mem->getHostAndPort());
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(responseBuilder.obj(), Milliseconds(8))));
}
-
- const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10,0), 0);
-
- startTest(Timestamp(10, 0), config, 0, hosts);
- const Date_t startDate = _net->now();
- unordered_set<HostAndPort> seen;
- _net->enterNetwork();
-
- for (size_t i = 0; i < hosts.size(); ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const HostAndPort target = noi->getRequest().target;
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
- ASSERT(seen.insert(target).second) << "Already saw " << target;
- BSONObjBuilder responseBuilder;
- if (target.host() == "h4") {
- responseBuilder <<
- "ok" << 1 <<
- "id" << findIdForMember(config, target) <<
- "set" << "rs0" <<
- "who" << target.toString() <<
- "cfgver" << 1 <<
- "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(20,0).asLL());
- _net->scheduleResponse(
- noi,
- startDate + Milliseconds(20),
- ResponseStatus(RemoteCommandResponse(
- responseBuilder.obj(),
- Milliseconds(8))));
- }
- else {
- responseBuilder <<
- "ok" << 1 <<
- "id" << findIdForMember(config, target) <<
- "set" << "rs0" <<
- "who" << target.toString() <<
- "cfgver" << 1 <<
- "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(10,0).asLL());
- _net->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- responseBuilder.obj(),
- Milliseconds(8))));
- }
- }
- _net->runUntil(startDate + Milliseconds(10));
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- ASSERT_EQUALS(0, countLogLinesContaining("not electing self, we are not freshest"));
- _net->runUntil(startDate + Milliseconds(20));
- ASSERT_EQUALS(startDate + Milliseconds(20), _net->now());
- _net->exitNetwork();
- waitOnChecker();
- stopCapturingLogMessages();
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
}
+ _net->runUntil(startDate + Milliseconds(10));
+ _net->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
+ waitOnChecker();
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::None);
+}
+
+class FreshnessScatterGatherTest : public mongo::unittest::Test {
+public:
+ virtual void setUp() {
+ int selfConfigIndex = 0;
+ Timestamp lastOpTimeApplied(100, 0);
- TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponseManyNodes) {
- // one other responds with "opTime" field of non-Date value, causing not freshest
- startCapturingLogMessages();
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1") <<
- BSON("_id" << 3 << "host" << "h2") <<
- BSON("_id" << 4 << "host" << "h3") <<
- BSON("_id" << 5 << "host" << "h4"))));
+ ReplicaSetConfig config;
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1") << BSON("_id" << 2 << "host"
+ << "host2"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin();
- mem != config.membersEnd();
- ++mem) {
+ mem != config.membersEnd();
+ ++mem) {
hosts.push_back(mem->getHostAndPort());
}
- const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10,0), 0);
-
- startTest(Timestamp(10, 0), config, 0, hosts);
- const Date_t startDate = _net->now();
- unordered_set<HostAndPort> seen;
- _net->enterNetwork();
- for (size_t i = 0; i < hosts.size(); ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const HostAndPort target = noi->getRequest().target;
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
- ASSERT(seen.insert(target).second) << "Already saw " << target;
- BSONObjBuilder responseBuilder;
- responseBuilder <<
- "ok" << 1 <<
- "id" << findIdForMember(config, target) <<
- "set" << "rs0" <<
- "who" << target.toString() <<
- "cfgver" << 1;
- if (target.host() == "h1") {
- responseBuilder << "opTime" << 3;
- }
- else {
- responseBuilder << "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(0,0).asLL());
- }
- _net->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- responseBuilder.obj(),
- Milliseconds(8))));
- }
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- waitOnChecker();
- stopCapturingLogMessages();
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- ASSERT_EQUALS(1, countLogLinesContaining("wrong type for opTime argument in replSetFresh "
- "response: NumberInt32"));
+ _checker.reset(
+ new FreshnessChecker::Algorithm(lastOpTimeApplied, config, selfConfigIndex, hosts));
}
- TEST_F(FreshnessCheckerTest, ElectVetoedManyNodes) {
- // one other responds with veto
- startCapturingLogMessages();
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1") <<
- BSON("_id" << 3 << "host" << "h2") <<
- BSON("_id" << 4 << "host" << "h3") <<
- BSON("_id" << 5 << "host" << "h4"))));
+ virtual void tearDown() {
+ _checker.reset(NULL);
+ }
- std::vector<HostAndPort> hosts;
- for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin();
- mem != config.membersEnd();
- ++mem) {
- hosts.push_back(mem->getHostAndPort());
- }
+protected:
+ bool hasReceivedSufficientResponses() {
+ return _checker->hasReceivedSufficientResponses();
+ }
- const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10,0), 0);
-
- startTest(Timestamp(10, 0), config, 0, hosts);
- const Date_t startDate = _net->now();
- unordered_set<HostAndPort> seen;
- _net->enterNetwork();
- for (size_t i = 0; i < hosts.size(); ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const HostAndPort target = noi->getRequest().target;
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
- ASSERT(seen.insert(target).second) << "Already saw " << target;
- BSONObjBuilder responseBuilder;
- responseBuilder <<
- "ok" << 1 <<
- "id" << findIdForMember(config, target) <<
- "set" << "rs0" <<
- "who" << target.toString() <<
- "cfgver" << 1 <<
- "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(0,0).asLL());
- if (target.host() == "h1") {
- responseBuilder << "veto" << true << "errmsg" << "I'd rather you didn't";
- }
- _net->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- responseBuilder.obj(),
- Milliseconds(8))));
- }
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- waitOnChecker();
- stopCapturingLogMessages();
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- ASSERT_EQUALS(1, countLogLinesContaining("not electing self, h1:27017 would veto with "
- "'I'd rather you didn't'"));
+ void processResponse(const RemoteCommandRequest& request, const ResponseStatus& response) {
+ _checker->processResponse(request, response);
}
- TEST_F(FreshnessCheckerTest, ElectVetoedAndTiedFreshnessManyNodes) {
- // one other responds with veto and another responds with tie
- startCapturingLogMessages();
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1") <<
- BSON("_id" << 3 << "host" << "h2") <<
- BSON("_id" << 4 << "host" << "h3") <<
- BSON("_id" << 5 << "host" << "h4"))));
-
- std::vector<HostAndPort> hosts;
- for (ReplicaSetConfig::MemberIterator mem = config.membersBegin();
- mem != config.membersEnd();
- ++mem) {
- if (HostAndPort("h0") == mem->getHostAndPort()) {
- continue;
- }
- hosts.push_back(mem->getHostAndPort());
- }
+ FreshnessChecker::ElectionAbortReason shouldAbortElection() const {
+ return _checker->shouldAbortElection();
+ }
- const BSONObj freshRequest = makeFreshRequest(config, Timestamp(10,0), 0);
+ ResponseStatus lessFresh() {
+ BSONObjBuilder bb;
+ bb.append("ok", 1.0);
+ bb.appendDate("opTime", Date_t::fromMillisSinceEpoch(Timestamp(10, 0).asLL()));
+ return ResponseStatus(NetworkInterfaceMock::Response(bb.obj(), Milliseconds(10)));
+ }
- startTest(Timestamp(10, 0), config, 0, hosts);
- const Date_t startDate = _net->now();
- unordered_set<HostAndPort> seen;
- _net->enterNetwork();
+ ResponseStatus moreFreshViaOpTime() {
+ BSONObjBuilder bb;
+ bb.append("ok", 1.0);
+ bb.appendDate("opTime", Date_t::fromMillisSinceEpoch(Timestamp(110, 0).asLL()));
+ return ResponseStatus(NetworkInterfaceMock::Response(bb.obj(), Milliseconds(10)));
+ }
- for (size_t i = 0; i < hosts.size(); ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const HostAndPort target = noi->getRequest().target;
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
- ASSERT(seen.insert(target).second) << "Already saw " << target;
- BSONObjBuilder responseBuilder;
- if (target.host() == "h4") {
- responseBuilder <<
- "ok" << 1 <<
- "id" << findIdForMember(config, target) <<
- "set" << "rs0" <<
- "who" << target.toString() <<
- "cfgver" << 1 <<
- "veto" << true <<
- "errmsg" << "I'd rather you didn't" <<
- "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(10,0).asLL());
- _net->scheduleResponse(
- noi,
- startDate + Milliseconds(20),
- ResponseStatus(RemoteCommandResponse(
- responseBuilder.obj(),
- Milliseconds(8))));
- }
- else {
- responseBuilder <<
- "ok" << 1 <<
- "id" << findIdForMember(config, target) <<
- "set" << "rs0" <<
- "who" << target.toString() <<
- "cfgver" << 1 <<
- "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(10,0).asLL());
- _net->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- responseBuilder.obj(),
- Milliseconds(8))));
- }
- }
- _net->runUntil(startDate + Milliseconds(10));
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- ASSERT_EQUALS(0, countLogLinesContaining("not electing self, h4:27017 would veto with '"
- "errmsg: \"I'd rather you didn't\"'"));
- _net->runUntil(startDate + Milliseconds(20));
- ASSERT_EQUALS(startDate + Milliseconds(20), _net->now());
- _net->exitNetwork();
- waitOnChecker();
- stopCapturingLogMessages();
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- ASSERT_EQUALS(1, countLogLinesContaining("not electing self, h4:27017 would veto with "
- "'I'd rather you didn't'"));
+ ResponseStatus wrongTypeForOpTime() {
+ BSONObjBuilder bb;
+ bb.append("ok", 1.0);
+ bb.append("opTime", std::string("several minutes ago"));
+ return ResponseStatus(NetworkInterfaceMock::Response(bb.obj(), Milliseconds(10)));
}
- TEST_F(FreshnessCheckerTest, ElectManyNodesNotAllRespond) {
- ReplicaSetConfig config = assertMakeRSConfig(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h0") <<
- BSON("_id" << 2 << "host" << "h1") <<
- BSON("_id" << 3 << "host" << "h2") <<
- BSON("_id" << 4 << "host" << "h3") <<
- BSON("_id" << 5 << "host" << "h4"))));
+ ResponseStatus unauthorized() {
+ BSONObjBuilder bb;
+ bb.append("ok", 0.0);
+ bb.append("code", ErrorCodes::Unauthorized);
+ bb.append("errmsg", "Unauthorized");
+ return ResponseStatus(NetworkInterfaceMock::Response(bb.obj(), Milliseconds(10)));
+ }
- std::vector<HostAndPort> hosts;
- for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin();
- mem != config.membersEnd();
- ++mem) {
- hosts.push_back(mem->getHostAndPort());
- }
+ ResponseStatus tiedForFreshness() {
+ BSONObjBuilder bb;
+ bb.append("ok", 1.0);
+ bb.appendDate("opTime", Date_t::fromMillisSinceEpoch(Timestamp(100, 0).asLL()));
+ return ResponseStatus(NetworkInterfaceMock::Response(bb.obj(), Milliseconds(10)));
+ }
- const Timestamp lastOpTimeApplied(10,0);
- const BSONObj freshRequest = makeFreshRequest(config, lastOpTimeApplied, 0);
-
- startTest(Timestamp(10, 0), config, 0, hosts);
- const Date_t startDate = _net->now();
- unordered_set<HostAndPort> seen;
- _net->enterNetwork();
- for (size_t i = 0; i < hosts.size(); ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
- const HostAndPort target = noi->getRequest().target;
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
- ASSERT(seen.insert(target).second) << "Already saw " << target;
- if (target.host() == "h2" || target.host() == "h3") {
- _net->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
- }
- else {
- BSONObjBuilder responseBuilder;
- responseBuilder <<
- "ok" << 1 <<
- "id" << findIdForMember(config, target) <<
- "set" << "rs0" <<
- "who" << target.toString() <<
- "cfgver" << 1 <<
- "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(0,0).asLL());
- _net->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- responseBuilder.obj(),
- Milliseconds(8))));
- }
- }
- _net->runUntil(startDate + Milliseconds(10));
- _net->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
- waitOnChecker();
- ASSERT_EQUALS(shouldAbortElection(),FreshnessChecker::None);
+ ResponseStatus moreFresh() {
+ return ResponseStatus(NetworkInterfaceMock::Response(BSON("ok" << 1.0 << "fresher" << true),
+ Milliseconds(10)));
+ }
+
+ ResponseStatus veto() {
+ return ResponseStatus(
+ NetworkInterfaceMock::Response(BSON("ok" << 1.0 << "veto" << true << "errmsg"
+ << "vetoed!"),
+ Milliseconds(10)));
}
- class FreshnessScatterGatherTest : public mongo::unittest::Test {
- public:
- virtual void setUp() {
- int selfConfigIndex = 0;
- Timestamp lastOpTimeApplied(100, 0);
-
- ReplicaSetConfig config;
- config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host0") <<
- BSON("_id" << 1 << "host" << "host1") <<
- BSON("_id" << 2 << "host" << "host2"))));
-
- std::vector<HostAndPort> hosts;
- for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin();
- mem != config.membersEnd();
- ++mem) {
- hosts.push_back(mem->getHostAndPort());
- }
-
- _checker.reset(new FreshnessChecker::Algorithm(lastOpTimeApplied,
- config,
- selfConfigIndex,
- hosts));
+ RemoteCommandRequest requestFrom(std::string hostname) {
+ return RemoteCommandRequest(HostAndPort(hostname),
+ "", // the non-hostname fields do not matter in Freshness
+ BSONObj(),
+ Milliseconds(0));
+ }
- }
+private:
+ unique_ptr<FreshnessChecker::Algorithm> _checker;
+};
- virtual void tearDown() {
- _checker.reset(NULL);
- }
+TEST_F(FreshnessScatterGatherTest, BothNodesLessFresh) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- protected:
- bool hasReceivedSufficientResponses() {
- return _checker->hasReceivedSufficientResponses();
- }
+ processResponse(requestFrom("host1"), lessFresh());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- void processResponse(const RemoteCommandRequest& request, const ResponseStatus& response) {
- _checker->processResponse(request, response);
- }
+ processResponse(requestFrom("host2"), lessFresh());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::None);
+}
- FreshnessChecker::ElectionAbortReason shouldAbortElection() const {
- return _checker->shouldAbortElection();
- }
+TEST_F(FreshnessScatterGatherTest, FirstNodeFresher) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- ResponseStatus lessFresh() {
- BSONObjBuilder bb;
- bb.append("ok", 1.0);
- bb.appendDate("opTime", Date_t::fromMillisSinceEpoch(Timestamp(10, 0).asLL()));
- return ResponseStatus(NetworkInterfaceMock::Response(bb.obj(), Milliseconds(10)));
- }
+ processResponse(requestFrom("host1"), moreFresh());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+}
- ResponseStatus moreFreshViaOpTime() {
- BSONObjBuilder bb;
- bb.append("ok", 1.0);
- bb.appendDate("opTime", Date_t::fromMillisSinceEpoch(Timestamp(110, 0).asLL()));
- return ResponseStatus(NetworkInterfaceMock::Response(bb.obj(), Milliseconds(10)));
- }
+TEST_F(FreshnessScatterGatherTest, FirstNodeFresherViaOpTime) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- ResponseStatus wrongTypeForOpTime() {
- BSONObjBuilder bb;
- bb.append("ok", 1.0);
- bb.append("opTime", std::string("several minutes ago"));
- return ResponseStatus(NetworkInterfaceMock::Response(bb.obj(), Milliseconds(10)));
- }
+ processResponse(requestFrom("host1"), moreFreshViaOpTime());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+}
- ResponseStatus unauthorized() {
- BSONObjBuilder bb;
- bb.append("ok", 0.0);
- bb.append("code", ErrorCodes::Unauthorized);
- bb.append("errmsg", "Unauthorized");
- return ResponseStatus(NetworkInterfaceMock::Response(bb.obj(), Milliseconds(10)));
- }
+TEST_F(FreshnessScatterGatherTest, FirstNodeVetoes) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- ResponseStatus tiedForFreshness() {
- BSONObjBuilder bb;
- bb.append("ok", 1.0);
- bb.appendDate("opTime", Date_t::fromMillisSinceEpoch(Timestamp(100, 0).asLL()));
- return ResponseStatus(NetworkInterfaceMock::Response(bb.obj(), Milliseconds(10)));
- }
+ processResponse(requestFrom("host1"), veto());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+}
- ResponseStatus moreFresh() {
- return ResponseStatus(NetworkInterfaceMock::Response(BSON("ok" << 1.0 <<
- "fresher" << true),
- Milliseconds(10)));
- }
+TEST_F(FreshnessScatterGatherTest, FirstNodeWrongTypeForOpTime) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- ResponseStatus veto() {
- return ResponseStatus(NetworkInterfaceMock::Response(BSON("ok" << 1.0 <<
- "veto" << true <<
- "errmsg" << "vetoed!"),
- Milliseconds(10)));
- }
+ processResponse(requestFrom("host1"), wrongTypeForOpTime());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+}
- RemoteCommandRequest requestFrom(std::string hostname) {
- return RemoteCommandRequest(HostAndPort(hostname),
- "", // the non-hostname fields do not matter in Freshness
- BSONObj(),
- Milliseconds(0));
- }
- private:
- unique_ptr<FreshnessChecker::Algorithm> _checker;
- };
-
- TEST_F(FreshnessScatterGatherTest, BothNodesLessFresh) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host1"), lessFresh());
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host2"), lessFresh());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(),FreshnessChecker::None);
- }
+TEST_F(FreshnessScatterGatherTest, FirstNodeTiedForFreshness) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- TEST_F(FreshnessScatterGatherTest, FirstNodeFresher) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host1"), moreFresh());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- }
+ processResponse(requestFrom("host1"), tiedForFreshness());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- TEST_F(FreshnessScatterGatherTest, FirstNodeFresherViaOpTime) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host1"), moreFreshViaOpTime());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- }
+ processResponse(requestFrom("host2"), lessFresh());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FreshnessTie);
+}
- TEST_F(FreshnessScatterGatherTest, FirstNodeVetoes) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host1"), veto());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- }
+TEST_F(FreshnessScatterGatherTest, FirstNodeTiedAndSecondFresher) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- TEST_F(FreshnessScatterGatherTest, FirstNodeWrongTypeForOpTime) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host1"), wrongTypeForOpTime());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- }
+ processResponse(requestFrom("host1"), tiedForFreshness());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- TEST_F(FreshnessScatterGatherTest, FirstNodeTiedForFreshness) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host1"), tiedForFreshness());
- ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host2"), moreFresh());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+}
- processResponse(requestFrom("host2"), lessFresh());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FreshnessTie);
- }
+TEST_F(FreshnessScatterGatherTest, FirstNodeTiedAndSecondFresherViaOpTime) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- TEST_F(FreshnessScatterGatherTest, FirstNodeTiedAndSecondFresher) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host1"), tiedForFreshness());
- ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), tiedForFreshness());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host2"), moreFresh());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- }
+ processResponse(requestFrom("host2"), moreFreshViaOpTime());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+}
- TEST_F(FreshnessScatterGatherTest, FirstNodeTiedAndSecondFresherViaOpTime) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host1"), tiedForFreshness());
- ASSERT_FALSE(hasReceivedSufficientResponses());
+TEST_F(FreshnessScatterGatherTest, FirstNodeTiedAndSecondVetoes) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host2"), moreFreshViaOpTime());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- }
+ processResponse(requestFrom("host1"), tiedForFreshness());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- TEST_F(FreshnessScatterGatherTest, FirstNodeTiedAndSecondVetoes) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host1"), tiedForFreshness());
- ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host2"), veto());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+}
- processResponse(requestFrom("host2"), veto());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- }
+TEST_F(FreshnessScatterGatherTest, FirstNodeTiedAndSecondWrongTypeForOpTime) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- TEST_F(FreshnessScatterGatherTest, FirstNodeTiedAndSecondWrongTypeForOpTime) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
-
- processResponse(requestFrom("host1"), tiedForFreshness());
- ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), tiedForFreshness());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host2"), wrongTypeForOpTime());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- }
+ processResponse(requestFrom("host2"), wrongTypeForOpTime());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+}
- TEST_F(FreshnessScatterGatherTest, FirstNodeLessFreshAndSecondWrongTypeForOpTime) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
+TEST_F(FreshnessScatterGatherTest, FirstNodeLessFreshAndSecondWrongTypeForOpTime) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), lessFresh());
- ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), lessFresh());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host2"), wrongTypeForOpTime());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- }
+ processResponse(requestFrom("host2"), wrongTypeForOpTime());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+}
- TEST_F(FreshnessScatterGatherTest, SecondNodeTiedAndFirstWrongTypeForOpTime) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
+TEST_F(FreshnessScatterGatherTest, SecondNodeTiedAndFirstWrongTypeForOpTime) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host2"), wrongTypeForOpTime());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
- }
+ processResponse(requestFrom("host2"), wrongTypeForOpTime());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
+}
- TEST_F(FreshnessScatterGatherTest, NotEnoughVotersDueNetworkErrors) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
+TEST_F(FreshnessScatterGatherTest, NotEnoughVotersDueNetworkErrors) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"),
- ResponseStatus(Status(ErrorCodes::NetworkTimeout, "")));
- ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), ResponseStatus(Status(ErrorCodes::NetworkTimeout, "")));
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host2"),
- ResponseStatus(Status(ErrorCodes::NetworkTimeout, "")));
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::QuorumUnreachable);
- }
+ processResponse(requestFrom("host2"), ResponseStatus(Status(ErrorCodes::NetworkTimeout, "")));
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::QuorumUnreachable);
+}
- TEST_F(FreshnessScatterGatherTest, NotEnoughVotersDueToUnauthorized) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
+TEST_F(FreshnessScatterGatherTest, NotEnoughVotersDueToUnauthorized) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), unauthorized());
- ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), unauthorized());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host2"), unauthorized());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::QuorumUnreachable);
- }
+ processResponse(requestFrom("host2"), unauthorized());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::QuorumUnreachable);
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/handshake_args.cpp b/src/mongo/db/repl/handshake_args.cpp
index db815ee3aa2..2ceae3df86e 100644
--- a/src/mongo/db/repl/handshake_args.cpp
+++ b/src/mongo/db/repl/handshake_args.cpp
@@ -40,75 +40,65 @@ namespace repl {
namespace {
- const std::string kRIDFieldName = "handshake";
- // TODO(danneberg) remove after 3.0 since this field is only allowed for backwards compatibility
- const std::string kOldMemberConfigFieldName = "config";
- const std::string kMemberIdFieldName = "member";
-
- const std::string kLegalHandshakeFieldNames[] = {
- kRIDFieldName,
- kOldMemberConfigFieldName,
- kMemberIdFieldName
- };
-
-} // namespace
-
- HandshakeArgs::HandshakeArgs() :
- _hasRid(false),
- _hasMemberId(false),
- _rid(OID()),
- _memberId(-1) {}
-
- Status HandshakeArgs::initialize(const BSONObj& argsObj) {
- Status status = bsonCheckOnlyHasFields("HandshakeArgs",
- argsObj,
- kLegalHandshakeFieldNames);
- if (!status.isOK())
+const std::string kRIDFieldName = "handshake";
+// TODO(danneberg) remove after 3.0 since this field is only allowed for backwards compatibility
+const std::string kOldMemberConfigFieldName = "config";
+const std::string kMemberIdFieldName = "member";
+
+const std::string kLegalHandshakeFieldNames[] = {
+ kRIDFieldName, kOldMemberConfigFieldName, kMemberIdFieldName};
+
+} // namespace
+
+HandshakeArgs::HandshakeArgs() : _hasRid(false), _hasMemberId(false), _rid(OID()), _memberId(-1) {}
+
+Status HandshakeArgs::initialize(const BSONObj& argsObj) {
+ Status status = bsonCheckOnlyHasFields("HandshakeArgs", argsObj, kLegalHandshakeFieldNames);
+ if (!status.isOK())
+ return status;
+
+ BSONElement oid;
+ status = bsonExtractTypedField(argsObj, kRIDFieldName, jstOID, &oid);
+ if (!status.isOK())
+ return status;
+ _rid = oid.OID();
+ _hasRid = true;
+
+ status = bsonExtractIntegerField(argsObj, kMemberIdFieldName, &_memberId);
+ if (!status.isOK()) {
+ // field not necessary for master slave, do not return NoSuchKey Error
+ if (status != ErrorCodes::NoSuchKey) {
return status;
-
- BSONElement oid;
- status = bsonExtractTypedField(argsObj, kRIDFieldName, jstOID, &oid);
- if (!status.isOK())
- return status;
- _rid = oid.OID();
- _hasRid = true;
-
- status = bsonExtractIntegerField(argsObj, kMemberIdFieldName, &_memberId);
- if (!status.isOK()) {
- // field not necessary for master slave, do not return NoSuchKey Error
- if (status != ErrorCodes::NoSuchKey) {
- return status;
- }
- _memberId = -1;
- }
- else {
- _hasMemberId = true;
}
-
- return Status::OK();
- }
-
- bool HandshakeArgs::isInitialized() const {
- return _hasRid;
- }
-
- void HandshakeArgs::setRid(const OID& newVal) {
- _rid = newVal;
- _hasRid = true;
- }
-
- void HandshakeArgs::setMemberId(long long newVal) {
- _memberId = newVal;
+ _memberId = -1;
+ } else {
_hasMemberId = true;
}
- BSONObj HandshakeArgs::toBSON() const {
- invariant(isInitialized());
- BSONObjBuilder builder;
- builder.append(kRIDFieldName, _rid);
- builder.append(kMemberIdFieldName, _memberId);
- return builder.obj();
- }
+ return Status::OK();
+}
+
+bool HandshakeArgs::isInitialized() const {
+ return _hasRid;
+}
+
+void HandshakeArgs::setRid(const OID& newVal) {
+ _rid = newVal;
+ _hasRid = true;
+}
+
+void HandshakeArgs::setMemberId(long long newVal) {
+ _memberId = newVal;
+ _hasMemberId = true;
+}
+
+BSONObj HandshakeArgs::toBSON() const {
+ invariant(isInitialized());
+ BSONObjBuilder builder;
+ builder.append(kRIDFieldName, _rid);
+ builder.append(kMemberIdFieldName, _memberId);
+ return builder.obj();
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/handshake_args.h b/src/mongo/db/repl/handshake_args.h
index b0d442aaaf6..b83bef87842 100644
--- a/src/mongo/db/repl/handshake_args.h
+++ b/src/mongo/db/repl/handshake_args.h
@@ -32,64 +32,72 @@
namespace mongo {
- class Status;
+class Status;
namespace repl {
+/**
+ * Arguments to the handshake command.
+ */
+class HandshakeArgs {
+public:
+ HandshakeArgs();
+
+ /**
+ * Initializes this HandshakeArgs from the contents of args.
+ */
+ Status initialize(const BSONObj& argsObj);
+
+ /**
+ * Returns true if all required fields have been initialized.
+ */
+ bool isInitialized() const;
+
+ /**
+ * Gets the _id of the sender in their ReplSetConfig.
+ */
+ long long getMemberId() const {
+ return _memberId;
+ }
+
/**
- * Arguments to the handshake command.
+ * Gets the unique identifier of the sender, which is used to track replication progress.
*/
- class HandshakeArgs {
- public:
- HandshakeArgs();
-
- /**
- * Initializes this HandshakeArgs from the contents of args.
- */
- Status initialize(const BSONObj& argsObj);
-
- /**
- * Returns true if all required fields have been initialized.
- */
- bool isInitialized() const;
-
- /**
- * Gets the _id of the sender in their ReplSetConfig.
- */
- long long getMemberId() const { return _memberId; }
-
- /**
- * Gets the unique identifier of the sender, which is used to track replication progress.
- */
- OID getRid() const { return _rid; }
-
- /**
- * The below methods check whether or not value in the method name has been set.
- */
- bool hasRid() { return _hasRid; };
- bool hasMemberId() { return _hasMemberId; };
-
- /**
- * The below methods set the value in the method name to 'newVal'.
- */
- void setRid(const OID& newVal);
- void setMemberId(long long newVal);
-
- /**
- * Returns a BSONified version of the object.
- * Should only be called if the mandatory fields have been set.
- * Optional fields are only included if they have been set.
- */
- BSONObj toBSON() const;
-
- private:
- bool _hasRid;
- bool _hasMemberId;
-
- // look at the body of the isInitialized() function to see which fields are mandatory
- OID _rid;
- long long _memberId;
+ OID getRid() const {
+ return _rid;
+ }
+
+ /**
+ * The below methods check whether or not value in the method name has been set.
+ */
+ bool hasRid() {
+ return _hasRid;
};
+ bool hasMemberId() {
+ return _hasMemberId;
+ };
+
+ /**
+ * The below methods set the value in the method name to 'newVal'.
+ */
+ void setRid(const OID& newVal);
+ void setMemberId(long long newVal);
+
+ /**
+ * Returns a BSONified version of the object.
+ * Should only be called if the mandatory fields have been set.
+ * Optional fields are only included if they have been set.
+ */
+ BSONObj toBSON() const;
+
+private:
+ bool _hasRid;
+ bool _hasMemberId;
+
+ // look at the body of the isInitialized() function to see which fields are mandatory
+ OID _rid;
+ long long _memberId;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/heartbeat_response_action.cpp b/src/mongo/db/repl/heartbeat_response_action.cpp
index 49ed33c4780..385e250329d 100644
--- a/src/mongo/db/repl/heartbeat_response_action.cpp
+++ b/src/mongo/db/repl/heartbeat_response_action.cpp
@@ -33,41 +33,41 @@
namespace mongo {
namespace repl {
- HeartbeatResponseAction HeartbeatResponseAction::makeNoAction() {
- return HeartbeatResponseAction();
- }
+HeartbeatResponseAction HeartbeatResponseAction::makeNoAction() {
+ return HeartbeatResponseAction();
+}
- HeartbeatResponseAction HeartbeatResponseAction::makeReconfigAction() {
- HeartbeatResponseAction result;
- result._action = Reconfig;
- return result;
- }
+HeartbeatResponseAction HeartbeatResponseAction::makeReconfigAction() {
+ HeartbeatResponseAction result;
+ result._action = Reconfig;
+ return result;
+}
- HeartbeatResponseAction HeartbeatResponseAction::makeElectAction() {
- HeartbeatResponseAction result;
- result._action = StartElection;
- return result;
- }
+HeartbeatResponseAction HeartbeatResponseAction::makeElectAction() {
+ HeartbeatResponseAction result;
+ result._action = StartElection;
+ return result;
+}
- HeartbeatResponseAction HeartbeatResponseAction::makeStepDownSelfAction(int primaryIndex) {
- HeartbeatResponseAction result;
- result._action = StepDownSelf;
- result._primaryIndex = primaryIndex;
- return result;
- }
+HeartbeatResponseAction HeartbeatResponseAction::makeStepDownSelfAction(int primaryIndex) {
+ HeartbeatResponseAction result;
+ result._action = StepDownSelf;
+ result._primaryIndex = primaryIndex;
+ return result;
+}
- HeartbeatResponseAction HeartbeatResponseAction::makeStepDownRemoteAction(int primaryIndex) {
- HeartbeatResponseAction result;
- result._action = StepDownRemotePrimary;
- result._primaryIndex = primaryIndex;
- return result;
- }
+HeartbeatResponseAction HeartbeatResponseAction::makeStepDownRemoteAction(int primaryIndex) {
+ HeartbeatResponseAction result;
+ result._action = StepDownRemotePrimary;
+ result._primaryIndex = primaryIndex;
+ return result;
+}
- HeartbeatResponseAction::HeartbeatResponseAction() : _action(NoAction), _primaryIndex(-1) {}
+HeartbeatResponseAction::HeartbeatResponseAction() : _action(NoAction), _primaryIndex(-1) {}
- void HeartbeatResponseAction::setNextHeartbeatStartDate(Date_t when) {
- _nextHeartbeatStartDate = when;
- }
+void HeartbeatResponseAction::setNextHeartbeatStartDate(Date_t when) {
+ _nextHeartbeatStartDate = when;
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/heartbeat_response_action.h b/src/mongo/db/repl/heartbeat_response_action.h
index 55c2d459920..f45b3668a91 100644
--- a/src/mongo/db/repl/heartbeat_response_action.h
+++ b/src/mongo/db/repl/heartbeat_response_action.h
@@ -33,88 +33,88 @@
namespace mongo {
namespace repl {
+/**
+ * Description of actions taken in response to a heartbeat.
+ *
+ * This includes when to schedule the next heartbeat to a target, and any other actions to
+ * take, such as scheduling an election or stepping down as primary.
+ */
+class HeartbeatResponseAction {
+public:
+ /**
+ * Actions taken based on heartbeat responses
+ */
+ enum Action { NoAction, Reconfig, StartElection, StepDownSelf, StepDownRemotePrimary };
+
+ /**
+ * Makes a new action representing doing nothing.
+ */
+ static HeartbeatResponseAction makeNoAction();
+
+ /**
+ * Makes a new action representing the instruction to reconfigure the current node.
+ */
+ static HeartbeatResponseAction makeReconfigAction();
+
+ /**
+ * Makes a new action telling the current node to attempt to elect itself primary.
+ */
+ static HeartbeatResponseAction makeElectAction();
+
+ /**
+ * Makes a new action telling the current node to step down as primary.
+ *
+ * It is an error to call this with primaryIndex != the index of the current node.
+ */
+ static HeartbeatResponseAction makeStepDownSelfAction(int primaryIndex);
+
/**
- * Description of actions taken in response to a heartbeat.
+ * Makes a new action telling the current node to ask the specified remote node to step
+ * down as primary.
*
- * This includes when to schedule the next heartbeat to a target, and any other actions to
- * take, such as scheduling an election or stepping down as primary.
+ * It is an error to call this with primaryIndex == the index of the current node.
+ */
+ static HeartbeatResponseAction makeStepDownRemoteAction(int primaryIndex);
+
+ /**
+ * Construct an action with unspecified action and a next heartbeat start date in the
+ * past.
+ */
+ HeartbeatResponseAction();
+
+ /**
+ * Sets the date at which the next heartbeat should be scheduled.
+ */
+ void setNextHeartbeatStartDate(Date_t when);
+
+ /**
+ * Gets the action type of this action.
+ */
+ Action getAction() const {
+ return _action;
+ }
+
+ /**
+ * Gets the time at which the next heartbeat should be scheduled. If the
+ * time is not in the future, the next heartbeat should be scheduled immediately.
+ */
+ Date_t getNextHeartbeatStartDate() const {
+ return _nextHeartbeatStartDate;
+ }
+
+ /**
+ * If getAction() returns StepDownSelf or StepDownPrimary, this is the index
+ * in the current replica set config of the node that ought to step down.
*/
- class HeartbeatResponseAction {
- public:
- /**
- * Actions taken based on heartbeat responses
- */
- enum Action {
- NoAction,
- Reconfig,
- StartElection,
- StepDownSelf,
- StepDownRemotePrimary
- };
-
- /**
- * Makes a new action representing doing nothing.
- */
- static HeartbeatResponseAction makeNoAction();
-
- /**
- * Makes a new action representing the instruction to reconfigure the current node.
- */
- static HeartbeatResponseAction makeReconfigAction();
-
- /**
- * Makes a new action telling the current node to attempt to elect itself primary.
- */
- static HeartbeatResponseAction makeElectAction();
-
- /**
- * Makes a new action telling the current node to step down as primary.
- *
- * It is an error to call this with primaryIndex != the index of the current node.
- */
- static HeartbeatResponseAction makeStepDownSelfAction(int primaryIndex);
-
- /**
- * Makes a new action telling the current node to ask the specified remote node to step
- * down as primary.
- *
- * It is an error to call this with primaryIndex == the index of the current node.
- */
- static HeartbeatResponseAction makeStepDownRemoteAction(int primaryIndex);
-
- /**
- * Construct an action with unspecified action and a next heartbeat start date in the
- * past.
- */
- HeartbeatResponseAction();
-
- /**
- * Sets the date at which the next heartbeat should be scheduled.
- */
- void setNextHeartbeatStartDate(Date_t when);
-
- /**
- * Gets the action type of this action.
- */
- Action getAction() const { return _action; }
-
- /**
- * Gets the time at which the next heartbeat should be scheduled. If the
- * time is not in the future, the next heartbeat should be scheduled immediately.
- */
- Date_t getNextHeartbeatStartDate() const { return _nextHeartbeatStartDate; }
-
- /**
- * If getAction() returns StepDownSelf or StepDownPrimary, this is the index
- * in the current replica set config of the node that ought to step down.
- */
- int getPrimaryConfigIndex() const { return _primaryIndex; }
-
- private:
- Action _action;
- int _primaryIndex;
- Date_t _nextHeartbeatStartDate;
- };
+ int getPrimaryConfigIndex() const {
+ return _primaryIndex;
+ }
+
+private:
+ Action _action;
+ int _primaryIndex;
+ Date_t _nextHeartbeatStartDate;
+};
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/initial_sync.cpp b/src/mongo/db/repl/initial_sync.cpp
index 494094862ba..0c6695c15a3 100644
--- a/src/mongo/db/repl/initial_sync.cpp
+++ b/src/mongo/db/repl/initial_sync.cpp
@@ -39,24 +39,23 @@
namespace mongo {
namespace repl {
- unsigned replSetForceInitialSyncFailure = 0;
-
- InitialSync::InitialSync(BackgroundSyncInterface *q) :
- SyncTail(q, multiInitialSyncApply) {}
-
- InitialSync::~InitialSync() {}
-
- /* initial oplog application, during initial sync, after cloning.
- */
- void InitialSync::oplogApplication(OperationContext* txn, const OpTime& endOpTime) {
- if (replSetForceInitialSyncFailure > 0) {
- log() << "test code invoked, forced InitialSync failure: "
- << replSetForceInitialSyncFailure;
- replSetForceInitialSyncFailure--;
- throw DBException("forced error",0);
- }
- _applyOplogUntil(txn, endOpTime);
+unsigned replSetForceInitialSyncFailure = 0;
+
+InitialSync::InitialSync(BackgroundSyncInterface* q) : SyncTail(q, multiInitialSyncApply) {}
+
+InitialSync::~InitialSync() {}
+
+/* initial oplog application, during initial sync, after cloning.
+*/
+void InitialSync::oplogApplication(OperationContext* txn, const OpTime& endOpTime) {
+ if (replSetForceInitialSyncFailure > 0) {
+ log() << "test code invoked, forced InitialSync failure: "
+ << replSetForceInitialSyncFailure;
+ replSetForceInitialSyncFailure--;
+ throw DBException("forced error", 0);
}
+ _applyOplogUntil(txn, endOpTime);
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/initial_sync.h b/src/mongo/db/repl/initial_sync.h
index 41c1310656c..9fd435341c1 100644
--- a/src/mongo/db/repl/initial_sync.h
+++ b/src/mongo/db/repl/initial_sync.h
@@ -33,28 +33,30 @@
namespace mongo {
namespace repl {
- class BackgroundSyncInterface;
+class BackgroundSyncInterface;
+
+/**
+ * Initial clone and sync
+ */
+class InitialSync : public SyncTail {
+public:
+ virtual ~InitialSync();
+ InitialSync(BackgroundSyncInterface* q);
/**
- * Initial clone and sync
+ * applies up to endOpTime, fetching missing documents as needed.
*/
- class InitialSync : public SyncTail {
- public:
- virtual ~InitialSync();
- InitialSync(BackgroundSyncInterface *q);
-
- /**
- * applies up to endOpTime, fetching missing documents as needed.
- */
- void oplogApplication(OperationContext* txn, const OpTime& endOpTime);
-
- // Initial sync will ignore all journal requirement flags and doesn't wait until
- // operations are durable before updating the last OpTime.
- virtual bool supportsWaitingUntilDurable() { return false; }
- };
-
- // Used for ReplSetTest testing.
- extern unsigned replSetForceInitialSyncFailure;
-
-} // namespace repl
-} // namespace mongo
+ void oplogApplication(OperationContext* txn, const OpTime& endOpTime);
+
+ // Initial sync will ignore all journal requirement flags and doesn't wait until
+ // operations are durable before updating the last OpTime.
+ virtual bool supportsWaitingUntilDurable() {
+ return false;
+ }
+};
+
+// Used for ReplSetTest testing.
+extern unsigned replSetForceInitialSyncFailure;
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/is_master_response.cpp b/src/mongo/db/repl/is_master_response.cpp
index ab38c295d14..4a4b1394670 100644
--- a/src/mongo/db/repl/is_master_response.cpp
+++ b/src/mongo/db/repl/is_master_response.cpp
@@ -42,415 +42,417 @@ namespace mongo {
namespace repl {
namespace {
- const std::string kIsMasterFieldName = "ismaster";
- const std::string kSecondaryFieldName = "secondary";
- const std::string kSetNameFieldName = "setName";
- const std::string kSetVersionFieldName = "setVersion";
- const std::string kHostsFieldName = "hosts";
- const std::string kPassivesFieldName = "passives";
- const std::string kArbitersFieldName = "arbiters";
- const std::string kPrimaryFieldName = "primary";
- const std::string kArbiterOnlyFieldName = "arbiterOnly";
- const std::string kPassiveFieldName = "passive";
- const std::string kHiddenFieldName = "hidden";
- const std::string kBuildIndexesFieldName = "buildIndexes";
- const std::string kSlaveDelayFieldName = "slaveDelay";
- const std::string kTagsFieldName = "tags";
- const std::string kMeFieldName = "me";
- const std::string kElectionIdFieldName = "electionId";
-
- // field name constants that don't directly correspond to member variables
- const std::string kInfoFieldName = "info";
- const std::string kIsReplicaSetFieldName = "isreplicaset";
- const std::string kErrmsgFieldName = "errmsg";
- const std::string kCodeFieldName = "code";
+const std::string kIsMasterFieldName = "ismaster";
+const std::string kSecondaryFieldName = "secondary";
+const std::string kSetNameFieldName = "setName";
+const std::string kSetVersionFieldName = "setVersion";
+const std::string kHostsFieldName = "hosts";
+const std::string kPassivesFieldName = "passives";
+const std::string kArbitersFieldName = "arbiters";
+const std::string kPrimaryFieldName = "primary";
+const std::string kArbiterOnlyFieldName = "arbiterOnly";
+const std::string kPassiveFieldName = "passive";
+const std::string kHiddenFieldName = "hidden";
+const std::string kBuildIndexesFieldName = "buildIndexes";
+const std::string kSlaveDelayFieldName = "slaveDelay";
+const std::string kTagsFieldName = "tags";
+const std::string kMeFieldName = "me";
+const std::string kElectionIdFieldName = "electionId";
+
+// field name constants that don't directly correspond to member variables
+const std::string kInfoFieldName = "info";
+const std::string kIsReplicaSetFieldName = "isreplicaset";
+const std::string kErrmsgFieldName = "errmsg";
+const std::string kCodeFieldName = "code";
} // namespace
- IsMasterResponse::IsMasterResponse() :
- _isMaster(false),
- _isMasterSet(false),
- _secondary(false),
- _isSecondarySet(false),
- _setNameSet(false),
- _setVersion(0),
- _setVersionSet(false),
- _hostsSet(false),
- _passivesSet(false),
- _arbitersSet(false),
- _primarySet(false),
- _arbiterOnly(false),
- _arbiterOnlySet(false),
- _passive(false),
- _passiveSet(false),
- _hidden(false),
- _hiddenSet(false),
- _buildIndexes(true),
- _buildIndexesSet(false),
- _slaveDelay(0),
- _slaveDelaySet(false),
- _tagsSet(false),
- _meSet(false),
- _electionId(OID()),
- _configSet(true),
- _shutdownInProgress(false)
- {}
-
- void IsMasterResponse::addToBSON(BSONObjBuilder* builder) const {
- if (_shutdownInProgress) {
- builder->append(kCodeFieldName, ErrorCodes::ShutdownInProgress);
- builder->append(kErrmsgFieldName, "replication shutdown in progress");
- return;
- }
+IsMasterResponse::IsMasterResponse()
+ : _isMaster(false),
+ _isMasterSet(false),
+ _secondary(false),
+ _isSecondarySet(false),
+ _setNameSet(false),
+ _setVersion(0),
+ _setVersionSet(false),
+ _hostsSet(false),
+ _passivesSet(false),
+ _arbitersSet(false),
+ _primarySet(false),
+ _arbiterOnly(false),
+ _arbiterOnlySet(false),
+ _passive(false),
+ _passiveSet(false),
+ _hidden(false),
+ _hiddenSet(false),
+ _buildIndexes(true),
+ _buildIndexesSet(false),
+ _slaveDelay(0),
+ _slaveDelaySet(false),
+ _tagsSet(false),
+ _meSet(false),
+ _electionId(OID()),
+ _configSet(true),
+ _shutdownInProgress(false) {}
+
+void IsMasterResponse::addToBSON(BSONObjBuilder* builder) const {
+ if (_shutdownInProgress) {
+ builder->append(kCodeFieldName, ErrorCodes::ShutdownInProgress);
+ builder->append(kErrmsgFieldName, "replication shutdown in progress");
+ return;
+ }
- if (!_configSet) {
- builder->append(kIsMasterFieldName, false);
- builder->append(kSecondaryFieldName, false);
- builder->append(kInfoFieldName, "Does not have a valid replica set config");
- builder->append(kIsReplicaSetFieldName , true);
- return;
- }
+ if (!_configSet) {
+ builder->append(kIsMasterFieldName, false);
+ builder->append(kSecondaryFieldName, false);
+ builder->append(kInfoFieldName, "Does not have a valid replica set config");
+ builder->append(kIsReplicaSetFieldName, true);
+ return;
+ }
- invariant(_setNameSet);
- builder->append(kSetNameFieldName, _setName);
- invariant(_setVersionSet);
- builder->append(kSetVersionFieldName, static_cast<int>(_setVersion));
- invariant(_isMasterSet);
- builder->append(kIsMasterFieldName, _isMaster);
- invariant(_isSecondarySet);
- builder->append(kSecondaryFieldName, _secondary);
-
- if (_hostsSet) {
- std::vector<std::string> hosts;
- for (size_t i = 0; i < _hosts.size(); ++i) {
- hosts.push_back(_hosts[i].toString());
- }
- builder->append(kHostsFieldName, hosts);
+ invariant(_setNameSet);
+ builder->append(kSetNameFieldName, _setName);
+ invariant(_setVersionSet);
+ builder->append(kSetVersionFieldName, static_cast<int>(_setVersion));
+ invariant(_isMasterSet);
+ builder->append(kIsMasterFieldName, _isMaster);
+ invariant(_isSecondarySet);
+ builder->append(kSecondaryFieldName, _secondary);
+
+ if (_hostsSet) {
+ std::vector<std::string> hosts;
+ for (size_t i = 0; i < _hosts.size(); ++i) {
+ hosts.push_back(_hosts[i].toString());
}
- if (_passivesSet) {
- std::vector<std::string> passives;
- for (size_t i = 0; i < _passives.size(); ++i) {
- passives.push_back(_passives[i].toString());
- }
- builder->append(kPassivesFieldName, passives);
+ builder->append(kHostsFieldName, hosts);
+ }
+ if (_passivesSet) {
+ std::vector<std::string> passives;
+ for (size_t i = 0; i < _passives.size(); ++i) {
+ passives.push_back(_passives[i].toString());
}
- if (_arbitersSet) {
- std::vector<std::string> arbiters;
- for (size_t i = 0; i < _arbiters.size(); ++i) {
- arbiters.push_back(_arbiters[i].toString());
- }
- builder->append(kArbitersFieldName, arbiters);
+ builder->append(kPassivesFieldName, passives);
+ }
+ if (_arbitersSet) {
+ std::vector<std::string> arbiters;
+ for (size_t i = 0; i < _arbiters.size(); ++i) {
+ arbiters.push_back(_arbiters[i].toString());
}
- if (_primarySet)
- builder->append(kPrimaryFieldName, _primary.toString());
- if (_arbiterOnlySet)
- builder->append(kArbiterOnlyFieldName, _arbiterOnly);
- if (_passiveSet)
- builder->append(kPassiveFieldName, _passive);
- if (_hiddenSet)
- builder->append(kHiddenFieldName, _hidden);
- if (_buildIndexesSet)
- builder->append(kBuildIndexesFieldName, _buildIndexes);
- if (_slaveDelaySet)
- builder->appendIntOrLL(kSlaveDelayFieldName, durationCount<Seconds>(_slaveDelay));
- if (_tagsSet) {
- BSONObjBuilder tags(builder->subobjStart(kTagsFieldName));
- for (unordered_map<std::string, std::string>::const_iterator it = _tags.begin();
- it != _tags.end(); ++it) {
- tags.append(it->first, it->second);
- }
+ builder->append(kArbitersFieldName, arbiters);
+ }
+ if (_primarySet)
+ builder->append(kPrimaryFieldName, _primary.toString());
+ if (_arbiterOnlySet)
+ builder->append(kArbiterOnlyFieldName, _arbiterOnly);
+ if (_passiveSet)
+ builder->append(kPassiveFieldName, _passive);
+ if (_hiddenSet)
+ builder->append(kHiddenFieldName, _hidden);
+ if (_buildIndexesSet)
+ builder->append(kBuildIndexesFieldName, _buildIndexes);
+ if (_slaveDelaySet)
+ builder->appendIntOrLL(kSlaveDelayFieldName, durationCount<Seconds>(_slaveDelay));
+ if (_tagsSet) {
+ BSONObjBuilder tags(builder->subobjStart(kTagsFieldName));
+ for (unordered_map<std::string, std::string>::const_iterator it = _tags.begin();
+ it != _tags.end();
+ ++it) {
+ tags.append(it->first, it->second);
+ }
+ }
+ invariant(_meSet);
+ builder->append(kMeFieldName, _me.toString());
+ if (_electionId.isSet())
+ builder->append(kElectionIdFieldName, _electionId);
+}
+
+BSONObj IsMasterResponse::toBSON() const {
+ BSONObjBuilder builder;
+ addToBSON(&builder);
+ return builder.obj();
+}
+
+Status IsMasterResponse::initialize(const BSONObj& doc) {
+ Status status = bsonExtractBooleanField(doc, kIsMasterFieldName, &_isMaster);
+ if (!status.isOK()) {
+ return status;
+ }
+ _isMasterSet = true;
+ status = bsonExtractBooleanField(doc, kSecondaryFieldName, &_secondary);
+ if (!status.isOK()) {
+ return status;
+ }
+ _isSecondarySet = true;
+ if (doc.hasField(kInfoFieldName)) {
+ if (_isMaster || _secondary || !doc.hasField(kIsReplicaSetFieldName) ||
+ !doc[kIsReplicaSetFieldName].booleanSafe()) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "Expected presence of \"" << kInfoFieldName
+ << "\" field to indicate no valid config loaded, but other "
+ "fields weren't as we expected");
+ }
+ _configSet = false;
+ return Status::OK();
+ } else {
+ if (doc.hasField(kIsReplicaSetFieldName)) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "Found \"" << kIsReplicaSetFieldName
+ << "\" field which should indicate that no valid config "
+ "is loaded, but we didn't also have an \""
+ << kInfoFieldName << "\" field as we expected");
}
- invariant(_meSet);
- builder->append(kMeFieldName, _me.toString());
- if (_electionId.isSet())
- builder->append(kElectionIdFieldName, _electionId);
}
- BSONObj IsMasterResponse::toBSON() const {
- BSONObjBuilder builder;
- addToBSON(&builder);
- return builder.obj();
+ status = bsonExtractStringField(doc, kSetNameFieldName, &_setName);
+ if (!status.isOK()) {
+ return status;
+ }
+ _setNameSet = true;
+ status = bsonExtractIntegerField(doc, kSetVersionFieldName, &_setVersion);
+ if (!status.isOK()) {
+ return status;
}
+ _setVersionSet = true;
- Status IsMasterResponse::initialize(const BSONObj& doc) {
- Status status = bsonExtractBooleanField(doc, kIsMasterFieldName, &_isMaster);
- if (!status.isOK()) {
- return status;
- }
- _isMasterSet = true;
- status = bsonExtractBooleanField(doc, kSecondaryFieldName, &_secondary);
+ if (doc.hasField(kHostsFieldName)) {
+ BSONElement hostsElement;
+ status = bsonExtractTypedField(doc, kHostsFieldName, Array, &hostsElement);
if (!status.isOK()) {
return status;
}
- _isSecondarySet = true;
- if (doc.hasField(kInfoFieldName)) {
- if (_isMaster ||
- _secondary ||
- !doc.hasField(kIsReplicaSetFieldName) ||
- !doc[kIsReplicaSetFieldName].booleanSafe()) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Expected presence of \"" << kInfoFieldName <<
- "\" field to indicate no valid config loaded, but other "
- "fields weren't as we expected");
- }
- _configSet = false;
- return Status::OK();
- }
- else {
- if (doc.hasField(kIsReplicaSetFieldName)) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Found \"" << kIsReplicaSetFieldName <<
- "\" field which should indicate that no valid config "
- "is loaded, but we didn't also have an \"" <<
- kInfoFieldName << "\" field as we expected");
+ for (BSONObjIterator it(hostsElement.Obj()); it.more();) {
+ BSONElement hostElement = it.next();
+ if (hostElement.type() != String) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Elements in \"" << kHostsFieldName
+ << "\" array of isMaster response must be of type "
+ << typeName(String) << " but found type "
+ << typeName(hostElement.type()));
}
+ _hosts.push_back(HostAndPort(hostElement.String()));
}
+ _hostsSet = true;
+ }
- status = bsonExtractStringField(doc, kSetNameFieldName, &_setName);
- if (!status.isOK()) {
- return status;
- }
- _setNameSet = true;
- status = bsonExtractIntegerField(doc, kSetVersionFieldName, &_setVersion);
+ if (doc.hasField(kPassivesFieldName)) {
+ BSONElement passivesElement;
+ status = bsonExtractTypedField(doc, kPassivesFieldName, Array, &passivesElement);
if (!status.isOK()) {
return status;
}
- _setVersionSet = true;
-
- if (doc.hasField(kHostsFieldName)) {
- BSONElement hostsElement;
- status = bsonExtractTypedField(doc, kHostsFieldName, Array, &hostsElement);
- if (!status.isOK()) {
- return status;
- }
- for (BSONObjIterator it(hostsElement.Obj()); it.more();) {
- BSONElement hostElement = it.next();
- if (hostElement.type() != String) {
- return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Elements in \"" << kHostsFieldName <<
- "\" array of isMaster response must be of type " <<
- typeName(String) << " but found type " <<
- typeName(hostElement.type()));
- }
- _hosts.push_back(HostAndPort(hostElement.String()));
- }
- _hostsSet = true;
- }
-
- if (doc.hasField(kPassivesFieldName)) {
- BSONElement passivesElement;
- status = bsonExtractTypedField(doc, kPassivesFieldName, Array, &passivesElement);
- if (!status.isOK()) {
- return status;
- }
- for (BSONObjIterator it(passivesElement.Obj()); it.more();) {
- BSONElement passiveElement = it.next();
- if (passiveElement.type() != String) {
- return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Elements in \"" << kPassivesFieldName <<
- "\" array of isMaster response must be of type " <<
- typeName(String) << " but found type " <<
- typeName(passiveElement.type()));
- }
- _passives.push_back(HostAndPort(passiveElement.String()));
- }
- _passivesSet = true;
- }
-
- if (doc.hasField(kArbitersFieldName)) {
- BSONElement arbitersElement;
- status = bsonExtractTypedField(doc, kArbitersFieldName, Array, &arbitersElement);
- if (!status.isOK()) {
- return status;
- }
- for (BSONObjIterator it(arbitersElement.Obj()); it.more();) {
- BSONElement arbiterElement = it.next();
- if (arbiterElement.type() != String) {
- return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Elements in \"" << kArbitersFieldName <<
- "\" array of isMaster response must be of type " <<
- typeName(String) << " but found type " <<
- typeName(arbiterElement.type()));
- }
- _arbiters.push_back(HostAndPort(arbiterElement.String()));
- }
- _arbitersSet = true;
- }
-
- if (doc.hasField(kPrimaryFieldName)) {
- std::string primaryString;
- status = bsonExtractStringField(doc, kPrimaryFieldName, &primaryString);
- if (!status.isOK()) {
- return status;
- }
- _primary = HostAndPort(primaryString);
- _primarySet = true;
- }
-
- if (doc.hasField(kArbiterOnlyFieldName)) {
- status = bsonExtractBooleanField(doc, kArbiterOnlyFieldName, &_arbiterOnly);
- if (!status.isOK()) {
- return status;
- }
- _arbiterOnlySet = true;
- }
-
- if (doc.hasField(kPassiveFieldName)) {
- status = bsonExtractBooleanField(doc, kPassiveFieldName, &_passive);
- if (!status.isOK()) {
- return status;
- }
- _passiveSet = true;
- }
-
- if (doc.hasField(kHiddenFieldName)) {
- status = bsonExtractBooleanField(doc, kHiddenFieldName, &_hidden);
- if (!status.isOK()) {
- return status;
- }
- _hiddenSet = true;
- }
-
- if (doc.hasField(kBuildIndexesFieldName)) {
- status = bsonExtractBooleanField(doc, kBuildIndexesFieldName, &_buildIndexes);
- if (!status.isOK()) {
- return status;
- }
- _buildIndexesSet = true;
- }
-
- if (doc.hasField(kSlaveDelayFieldName)) {
- long long slaveDelaySecs;
- status = bsonExtractIntegerField(doc, kSlaveDelayFieldName, &slaveDelaySecs);
- if (!status.isOK()) {
- return status;
+ for (BSONObjIterator it(passivesElement.Obj()); it.more();) {
+ BSONElement passiveElement = it.next();
+ if (passiveElement.type() != String) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Elements in \"" << kPassivesFieldName
+ << "\" array of isMaster response must be of type "
+ << typeName(String) << " but found type "
+ << typeName(passiveElement.type()));
}
- _slaveDelaySet = true;
- _slaveDelay = Seconds(slaveDelaySecs);
+ _passives.push_back(HostAndPort(passiveElement.String()));
}
+ _passivesSet = true;
+ }
- if (doc.hasField(kTagsFieldName)) {
- BSONElement tagsElement;
- status = bsonExtractTypedField(doc, kTagsFieldName, Object, &tagsElement);
- if (!status.isOK()) {
- return status;
- }
- for (BSONObjIterator it(tagsElement.Obj()); it.more();) {
- BSONElement tagElement = it.next();
- if (tagElement.type() != String) {
- return Status(ErrorCodes::TypeMismatch,
- str::stream() << "Elements in \"" << kTagsFieldName << "\" obj "
- "of isMaster response must be of type " <<
- typeName(String) << " but found type " <<
- typeName(tagsElement.type()));
- }
- _tags[tagElement.fieldNameStringData().toString()] = tagElement.String();
- }
- _tagsSet = true;
+ if (doc.hasField(kArbitersFieldName)) {
+ BSONElement arbitersElement;
+ status = bsonExtractTypedField(doc, kArbitersFieldName, Array, &arbitersElement);
+ if (!status.isOK()) {
+ return status;
}
-
- if (doc.hasField(kElectionIdFieldName)) {
- BSONElement electionIdElem;
- status = bsonExtractTypedField(doc, kElectionIdFieldName, jstOID, &electionIdElem);
- if (!status.isOK()) {
- return status;
+ for (BSONObjIterator it(arbitersElement.Obj()); it.more();) {
+ BSONElement arbiterElement = it.next();
+ if (arbiterElement.type() != String) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Elements in \"" << kArbitersFieldName
+ << "\" array of isMaster response must be of type "
+ << typeName(String) << " but found type "
+ << typeName(arbiterElement.type()));
}
- _electionId = electionIdElem.OID();
+ _arbiters.push_back(HostAndPort(arbiterElement.String()));
}
+ _arbitersSet = true;
+ }
- std::string meString;
- status = bsonExtractStringField(doc, kMeFieldName, &meString);
+ if (doc.hasField(kPrimaryFieldName)) {
+ std::string primaryString;
+ status = bsonExtractStringField(doc, kPrimaryFieldName, &primaryString);
if (!status.isOK()) {
return status;
}
- _me = HostAndPort(meString);
- _meSet = true;
-
- return Status::OK();
- }
-
- void IsMasterResponse::setIsMaster(bool isMaster) {
- _isMasterSet = true;
- _isMaster = isMaster;
- }
-
- void IsMasterResponse::setIsSecondary(bool secondary) {
- _isSecondarySet = true;
- _secondary = secondary;
- }
-
- void IsMasterResponse::setReplSetName(const std::string& setName) {
- _setNameSet = true;
- _setName = setName;
- }
-
- void IsMasterResponse::setReplSetVersion(long long version) {
- _setVersionSet = true;
- _setVersion = version;
- }
-
- void IsMasterResponse::addHost(const HostAndPort& host) {
- _hostsSet = true;
- _hosts.push_back(host);
- }
-
- void IsMasterResponse::addPassive(const HostAndPort& passive) {
- _passivesSet = true;
- _passives.push_back(passive);
- }
-
- void IsMasterResponse::addArbiter(const HostAndPort& arbiter) {
- _arbitersSet = true;
- _arbiters.push_back(arbiter);
- }
-
- void IsMasterResponse::setPrimary(const HostAndPort& primary) {
+ _primary = HostAndPort(primaryString);
_primarySet = true;
- _primary = primary;
}
- void IsMasterResponse::setIsArbiterOnly(bool arbiterOnly) {
+ if (doc.hasField(kArbiterOnlyFieldName)) {
+ status = bsonExtractBooleanField(doc, kArbiterOnlyFieldName, &_arbiterOnly);
+ if (!status.isOK()) {
+ return status;
+ }
_arbiterOnlySet = true;
- _arbiterOnly = arbiterOnly;
}
- void IsMasterResponse::setIsPassive(bool passive) {
+ if (doc.hasField(kPassiveFieldName)) {
+ status = bsonExtractBooleanField(doc, kPassiveFieldName, &_passive);
+ if (!status.isOK()) {
+ return status;
+ }
_passiveSet = true;
- _passive = passive;
}
- void IsMasterResponse::setIsHidden(bool hidden) {
+ if (doc.hasField(kHiddenFieldName)) {
+ status = bsonExtractBooleanField(doc, kHiddenFieldName, &_hidden);
+ if (!status.isOK()) {
+ return status;
+ }
_hiddenSet = true;
- _hidden = hidden;
}
- void IsMasterResponse::setShouldBuildIndexes(bool buildIndexes) {
+ if (doc.hasField(kBuildIndexesFieldName)) {
+ status = bsonExtractBooleanField(doc, kBuildIndexesFieldName, &_buildIndexes);
+ if (!status.isOK()) {
+ return status;
+ }
_buildIndexesSet = true;
- _buildIndexes = buildIndexes;
}
- void IsMasterResponse::setSlaveDelay(Seconds slaveDelay) {
+ if (doc.hasField(kSlaveDelayFieldName)) {
+ long long slaveDelaySecs;
+ status = bsonExtractIntegerField(doc, kSlaveDelayFieldName, &slaveDelaySecs);
+ if (!status.isOK()) {
+ return status;
+ }
_slaveDelaySet = true;
- _slaveDelay = slaveDelay;
+ _slaveDelay = Seconds(slaveDelaySecs);
}
- void IsMasterResponse::addTag(const std::string& tagKey, const std::string& tagValue) {
+ if (doc.hasField(kTagsFieldName)) {
+ BSONElement tagsElement;
+ status = bsonExtractTypedField(doc, kTagsFieldName, Object, &tagsElement);
+ if (!status.isOK()) {
+ return status;
+ }
+ for (BSONObjIterator it(tagsElement.Obj()); it.more();) {
+ BSONElement tagElement = it.next();
+ if (tagElement.type() != String) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Elements in \"" << kTagsFieldName
+ << "\" obj "
+ "of isMaster response must be of type "
+ << typeName(String) << " but found type "
+ << typeName(tagsElement.type()));
+ }
+ _tags[tagElement.fieldNameStringData().toString()] = tagElement.String();
+ }
_tagsSet = true;
- _tags[tagKey] = tagValue;
}
- void IsMasterResponse::setMe(const HostAndPort& me) {
- _meSet = true;
- _me = me;
+ if (doc.hasField(kElectionIdFieldName)) {
+ BSONElement electionIdElem;
+ status = bsonExtractTypedField(doc, kElectionIdFieldName, jstOID, &electionIdElem);
+ if (!status.isOK()) {
+ return status;
+ }
+ _electionId = electionIdElem.OID();
}
- void IsMasterResponse::setElectionId(const OID& electionId) {
- _electionId = electionId;
+ std::string meString;
+ status = bsonExtractStringField(doc, kMeFieldName, &meString);
+ if (!status.isOK()) {
+ return status;
}
-
- void IsMasterResponse::markAsNoConfig() { _configSet = false; }
-
- void IsMasterResponse::markAsShutdownInProgress() { _shutdownInProgress = true; }
-
-} // namespace repl
-} // namespace mongo
+ _me = HostAndPort(meString);
+ _meSet = true;
+
+ return Status::OK();
+}
+
+void IsMasterResponse::setIsMaster(bool isMaster) {
+ _isMasterSet = true;
+ _isMaster = isMaster;
+}
+
+void IsMasterResponse::setIsSecondary(bool secondary) {
+ _isSecondarySet = true;
+ _secondary = secondary;
+}
+
+void IsMasterResponse::setReplSetName(const std::string& setName) {
+ _setNameSet = true;
+ _setName = setName;
+}
+
+void IsMasterResponse::setReplSetVersion(long long version) {
+ _setVersionSet = true;
+ _setVersion = version;
+}
+
+void IsMasterResponse::addHost(const HostAndPort& host) {
+ _hostsSet = true;
+ _hosts.push_back(host);
+}
+
+void IsMasterResponse::addPassive(const HostAndPort& passive) {
+ _passivesSet = true;
+ _passives.push_back(passive);
+}
+
+void IsMasterResponse::addArbiter(const HostAndPort& arbiter) {
+ _arbitersSet = true;
+ _arbiters.push_back(arbiter);
+}
+
+void IsMasterResponse::setPrimary(const HostAndPort& primary) {
+ _primarySet = true;
+ _primary = primary;
+}
+
+void IsMasterResponse::setIsArbiterOnly(bool arbiterOnly) {
+ _arbiterOnlySet = true;
+ _arbiterOnly = arbiterOnly;
+}
+
+void IsMasterResponse::setIsPassive(bool passive) {
+ _passiveSet = true;
+ _passive = passive;
+}
+
+void IsMasterResponse::setIsHidden(bool hidden) {
+ _hiddenSet = true;
+ _hidden = hidden;
+}
+
+void IsMasterResponse::setShouldBuildIndexes(bool buildIndexes) {
+ _buildIndexesSet = true;
+ _buildIndexes = buildIndexes;
+}
+
+void IsMasterResponse::setSlaveDelay(Seconds slaveDelay) {
+ _slaveDelaySet = true;
+ _slaveDelay = slaveDelay;
+}
+
+void IsMasterResponse::addTag(const std::string& tagKey, const std::string& tagValue) {
+ _tagsSet = true;
+ _tags[tagKey] = tagValue;
+}
+
+void IsMasterResponse::setMe(const HostAndPort& me) {
+ _meSet = true;
+ _me = me;
+}
+
+void IsMasterResponse::setElectionId(const OID& electionId) {
+ _electionId = electionId;
+}
+
+void IsMasterResponse::markAsNoConfig() {
+ _configSet = false;
+}
+
+void IsMasterResponse::markAsShutdownInProgress() {
+ _shutdownInProgress = true;
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/is_master_response.h b/src/mongo/db/repl/is_master_response.h
index dd0eda70e2b..7bfaf1ac0b2 100644
--- a/src/mongo/db/repl/is_master_response.h
+++ b/src/mongo/db/repl/is_master_response.h
@@ -38,179 +38,216 @@
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class Status;
+class BSONObj;
+class BSONObjBuilder;
+class Status;
namespace repl {
+/**
+ * Response structure for the ismaster command. Only handles responses from nodes
+ * that are in replset mode.
+ */
+class IsMasterResponse {
+public:
+ IsMasterResponse();
+
/**
- * Response structure for the ismaster command. Only handles responses from nodes
- * that are in replset mode.
+ * Initializes this IsMasterResponse from the contents of "doc".
*/
- class IsMasterResponse {
- public:
- IsMasterResponse();
-
- /**
- * Initializes this IsMasterResponse from the contents of "doc".
- */
- Status initialize(const BSONObj& doc);
-
- /**
- * Appends all non-default values to "builder".
- * There are two values that are handled specially: if _inShutdown is true or _configSet
- * is false, we will add a standard response to "builder" indicating either that we are
- * in the middle of shutting down or we do not have a valid replica set config, ignoring
- * the values of all other member variables.
- */
- void addToBSON(BSONObjBuilder* builder) const;
+ Status initialize(const BSONObj& doc);
- /**
- * Returns a BSONObj consisting the results of calling addToBSON on an otherwise empty
- * BSONObjBuilder.
- */
- BSONObj toBSON() const;
-
-
- // ===================== Accessors for member variables ================================= //
-
- bool isMaster() const { return _isMaster; }
+ /**
+ * Appends all non-default values to "builder".
+ * There are two values that are handled specially: if _inShutdown is true or _configSet
+ * is false, we will add a standard response to "builder" indicating either that we are
+ * in the middle of shutting down or we do not have a valid replica set config, ignoring
+ * the values of all other member variables.
+ */
+ void addToBSON(BSONObjBuilder* builder) const;
- bool isSecondary() const { return _secondary; }
+ /**
+ * Returns a BSONObj consisting the results of calling addToBSON on an otherwise empty
+ * BSONObjBuilder.
+ */
+ BSONObj toBSON() const;
- const std::string& getReplSetName() const { return _setName; }
- long long getReplSetVersion() const { return _setVersion; }
+ // ===================== Accessors for member variables ================================= //
- const std::vector<HostAndPort>& getHosts() const { return _hosts; }
+ bool isMaster() const {
+ return _isMaster;
+ }
- const std::vector<HostAndPort>& getPassives() const { return _passives; }
+ bool isSecondary() const {
+ return _secondary;
+ }
- const std::vector<HostAndPort>& getArbiters() const { return _arbiters; }
+ const std::string& getReplSetName() const {
+ return _setName;
+ }
- const HostAndPort& getPrimary() const { return _primary; }
+ long long getReplSetVersion() const {
+ return _setVersion;
+ }
- bool hasPrimary() const { return _primarySet; }
+ const std::vector<HostAndPort>& getHosts() const {
+ return _hosts;
+ }
- bool isArbiterOnly() const { return _arbiterOnly; }
+ const std::vector<HostAndPort>& getPassives() const {
+ return _passives;
+ }
- bool isPassive() const { return _passive; }
+ const std::vector<HostAndPort>& getArbiters() const {
+ return _arbiters;
+ }
- bool isHidden() const { return _hidden; }
+ const HostAndPort& getPrimary() const {
+ return _primary;
+ }
- bool shouldBuildIndexes() const { return _buildIndexes; }
+ bool hasPrimary() const {
+ return _primarySet;
+ }
- Seconds getSlaveDelay() const { return _slaveDelay; }
+ bool isArbiterOnly() const {
+ return _arbiterOnly;
+ }
- const unordered_map<std::string, std::string> getTags() const { return _tags; }
+ bool isPassive() const {
+ return _passive;
+ }
- const HostAndPort& getMe() const { return _me; }
+ bool isHidden() const {
+ return _hidden;
+ }
- const OID& getElectionId() const { return _electionId; }
+ bool shouldBuildIndexes() const {
+ return _buildIndexes;
+ }
- /**
- * If false, calls to toBSON/addToBSON will ignore all other fields and add a specific
- * message to indicate that we have no replica set config.
- */
- bool isConfigSet() const { return _configSet; }
+ Seconds getSlaveDelay() const {
+ return _slaveDelay;
+ }
- /**
- * If false, calls to toBSON/addToBSON will ignore all other fields and add a specific
- * message to indicate that we are in the middle of shutting down.
- */
- bool isShutdownInProgress() const { return _shutdownInProgress; }
+ const unordered_map<std::string, std::string> getTags() const {
+ return _tags;
+ }
+ const HostAndPort& getMe() const {
+ return _me;
+ }
- // ===================== Mutators for member variables ================================= //
+ const OID& getElectionId() const {
+ return _electionId;
+ }
- void setIsMaster(bool isMaster);
+ /**
+ * If false, calls to toBSON/addToBSON will ignore all other fields and add a specific
+ * message to indicate that we have no replica set config.
+ */
+ bool isConfigSet() const {
+ return _configSet;
+ }
- void setIsSecondary(bool secondary);
+ /**
+ * If false, calls to toBSON/addToBSON will ignore all other fields and add a specific
+ * message to indicate that we are in the middle of shutting down.
+ */
+ bool isShutdownInProgress() const {
+ return _shutdownInProgress;
+ }
- void setReplSetName(const std::string& setName);
- void setReplSetVersion(long long version);
+ // ===================== Mutators for member variables ================================= //
- void addHost(const HostAndPort& host);
+ void setIsMaster(bool isMaster);
- void addPassive(const HostAndPort& passive);
+ void setIsSecondary(bool secondary);
- void addArbiter(const HostAndPort& arbiter);
+ void setReplSetName(const std::string& setName);
- void setPrimary(const HostAndPort& primary);
+ void setReplSetVersion(long long version);
- void setIsArbiterOnly(bool arbiterOnly);
+ void addHost(const HostAndPort& host);
- void setIsPassive(bool passive);
+ void addPassive(const HostAndPort& passive);
- void setIsHidden(bool hidden);
+ void addArbiter(const HostAndPort& arbiter);
- void setShouldBuildIndexes(bool buildIndexes);
+ void setPrimary(const HostAndPort& primary);
- void setSlaveDelay(Seconds slaveDelay);
+ void setIsArbiterOnly(bool arbiterOnly);
- void addTag(const std::string& tagKey, const std::string& tagValue);
+ void setIsPassive(bool passive);
- void setMe(const HostAndPort& me);
+ void setIsHidden(bool hidden);
- void setElectionId(const OID& electionId);
+ void setShouldBuildIndexes(bool buildIndexes);
- /**
- * Marks _configSet as false, which will cause future calls to toBSON/addToBSON to ignore
- * all other member variables and output a hardcoded response indicating that we have no
- * valid replica set config.
- */
- void markAsNoConfig();
+ void setSlaveDelay(Seconds slaveDelay);
- /**
- * Marks _shutdownInProgress as true, which will cause future calls to toBSON/addToBSON to
- * ignore all other member variables and output a hardcoded response indicating that we are
- * in the middle of shutting down.
- */
- void markAsShutdownInProgress();
+ void addTag(const std::string& tagKey, const std::string& tagValue);
- private:
+ void setMe(const HostAndPort& me);
- bool _isMaster;
- bool _isMasterSet;
- bool _secondary;
- bool _isSecondarySet;
- std::string _setName;
- bool _setNameSet;
- long long _setVersion;
- bool _setVersionSet;
- std::vector<HostAndPort> _hosts;
- bool _hostsSet;
- std::vector<HostAndPort> _passives;
- bool _passivesSet;
- std::vector<HostAndPort> _arbiters;
- bool _arbitersSet;
- HostAndPort _primary;
- bool _primarySet;
- bool _arbiterOnly;
- bool _arbiterOnlySet;
- bool _passive;
- bool _passiveSet;
- bool _hidden;
- bool _hiddenSet;
- bool _buildIndexes;
- bool _buildIndexesSet;
- Seconds _slaveDelay;
- bool _slaveDelaySet;
- unordered_map<std::string, std::string> _tags;
- bool _tagsSet;
- HostAndPort _me;
- bool _meSet;
- OID _electionId;
+ void setElectionId(const OID& electionId);
- // If _configSet is false this means we don't have a valid repl set config, so toBSON
- // will return a set of hardcoded values that indicate this.
- bool _configSet;
- // If _shutdownInProgress is true toBSON will return a set of hardcoded values to indicate
- // that we are mid shutdown
- bool _shutdownInProgress;
- };
+ /**
+ * Marks _configSet as false, which will cause future calls to toBSON/addToBSON to ignore
+ * all other member variables and output a hardcoded response indicating that we have no
+ * valid replica set config.
+ */
+ void markAsNoConfig();
-} // namespace repl
-} // namespace mongo
+ /**
+ * Marks _shutdownInProgress as true, which will cause future calls to toBSON/addToBSON to
+ * ignore all other member variables and output a hardcoded response indicating that we are
+ * in the middle of shutting down.
+ */
+ void markAsShutdownInProgress();
+
+private:
+ bool _isMaster;
+ bool _isMasterSet;
+ bool _secondary;
+ bool _isSecondarySet;
+ std::string _setName;
+ bool _setNameSet;
+ long long _setVersion;
+ bool _setVersionSet;
+ std::vector<HostAndPort> _hosts;
+ bool _hostsSet;
+ std::vector<HostAndPort> _passives;
+ bool _passivesSet;
+ std::vector<HostAndPort> _arbiters;
+ bool _arbitersSet;
+ HostAndPort _primary;
+ bool _primarySet;
+ bool _arbiterOnly;
+ bool _arbiterOnlySet;
+ bool _passive;
+ bool _passiveSet;
+ bool _hidden;
+ bool _hiddenSet;
+ bool _buildIndexes;
+ bool _buildIndexesSet;
+ Seconds _slaveDelay;
+ bool _slaveDelaySet;
+ unordered_map<std::string, std::string> _tags;
+ bool _tagsSet;
+ HostAndPort _me;
+ bool _meSet;
+ OID _electionId;
+
+ // If _configSet is false this means we don't have a valid repl set config, so toBSON
+ // will return a set of hardcoded values that indicate this.
+ bool _configSet;
+ // If _shutdownInProgress is true toBSON will return a set of hardcoded values to indicate
+ // that we are mid shutdown
+ bool _shutdownInProgress;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/isself.cpp b/src/mongo/db/repl/isself.cpp
index d0b44858913..e34b1cc9660 100644
--- a/src/mongo/db/repl/isself.cpp
+++ b/src/mongo/db/repl/isself.cpp
@@ -47,7 +47,8 @@
#include "mongo/util/scopeguard.h"
#include "mongo/util/log.h"
-#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__sun) || defined(__OpenBSD__)
+#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__sun) || \
+ defined(__OpenBSD__)
#define FASTPATH_UNIX 1
#endif
@@ -75,281 +76,269 @@
namespace mongo {
namespace repl {
- OID instanceId;
+OID instanceId;
- MONGO_INITIALIZER(GenerateInstanceId)(InitializerContext*) {
- instanceId = OID::gen();
- return Status::OK();
- }
+MONGO_INITIALIZER(GenerateInstanceId)(InitializerContext*) {
+ instanceId = OID::gen();
+ return Status::OK();
+}
namespace {
- /**
- * Helper to convert a message from a networking function to a string.
- * Needed because errnoWithDescription uses strerror on linux, when
- * we need gai_strerror.
- */
- std::string stringifyError(int code) {
+/**
+ * Helper to convert a message from a networking function to a string.
+ * Needed because errnoWithDescription uses strerror on linux, when
+ * we need gai_strerror.
+ */
+std::string stringifyError(int code) {
#if FASTPATH_UNIX
- return gai_strerror(code);
+ return gai_strerror(code);
#elif defined(_WIN32)
- // FormatMessage in errnoWithDescription works here on windows
- return errnoWithDescription(code);
+ // FormatMessage in errnoWithDescription works here on windows
+ return errnoWithDescription(code);
#endif
- }
-
- /**
- * Resolves a host and port to a list of IP addresses. This requires a syscall. If the
- * ipv6enabled parameter is true, both IPv6 and IPv4 addresses will be returned.
- */
- std::vector<std::string> getAddrsForHost(const std::string& iporhost,
- const int port,
- const bool ipv6enabled) {
- addrinfo* addrs = NULL;
- addrinfo hints = {0};
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_family = (ipv6enabled ? AF_UNSPEC : AF_INET);
-
- const std::string portNum = BSONObjBuilder::numStr(port);
-
- std::vector<std::string> out;
-
- int err = getaddrinfo(iporhost.c_str(), portNum.c_str(), &hints, &addrs);
+}
- if (err) {
- warning() << "getaddrinfo(\"" << iporhost << "\") failed: "
- << stringifyError(err) << std::endl;
- return out;
- }
+/**
+ * Resolves a host and port to a list of IP addresses. This requires a syscall. If the
+ * ipv6enabled parameter is true, both IPv6 and IPv4 addresses will be returned.
+ */
+std::vector<std::string> getAddrsForHost(const std::string& iporhost,
+ const int port,
+ const bool ipv6enabled) {
+ addrinfo* addrs = NULL;
+ addrinfo hints = {0};
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_family = (ipv6enabled ? AF_UNSPEC : AF_INET);
+
+ const std::string portNum = BSONObjBuilder::numStr(port);
+
+ std::vector<std::string> out;
+
+ int err = getaddrinfo(iporhost.c_str(), portNum.c_str(), &hints, &addrs);
+
+ if (err) {
+ warning() << "getaddrinfo(\"" << iporhost << "\") failed: " << stringifyError(err)
+ << std::endl;
+ return out;
+ }
- ON_BLOCK_EXIT(freeaddrinfo, addrs);
+ ON_BLOCK_EXIT(freeaddrinfo, addrs);
- for (addrinfo* addr = addrs; addr != NULL; addr = addr->ai_next) {
- int family = addr->ai_family;
- char host[NI_MAXHOST];
+ for (addrinfo* addr = addrs; addr != NULL; addr = addr->ai_next) {
+ int family = addr->ai_family;
+ char host[NI_MAXHOST];
- if (family == AF_INET || family == AF_INET6) {
- err = getnameinfo(addr->ai_addr, addr->ai_addrlen, host,
- NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
- if (err) {
- warning() << "getnameinfo() failed: " << stringifyError(err) << std::endl;
- continue;
- }
- out.push_back(host);
+ if (family == AF_INET || family == AF_INET6) {
+ err = getnameinfo(
+ addr->ai_addr, addr->ai_addrlen, host, NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
+ if (err) {
+ warning() << "getnameinfo() failed: " << stringifyError(err) << std::endl;
+ continue;
}
-
+ out.push_back(host);
}
+ }
- if (shouldLog(logger::LogSeverity::Debug(2))) {
- StringBuilder builder;
- builder << "getAddrsForHost(\"" << iporhost << ":" << port << "\"):";
- for (std::vector<std::string>::const_iterator o = out.begin(); o != out.end(); ++o) {
- builder << " [ " << *o << "]";
- }
- LOG(2) << builder.str();
+ if (shouldLog(logger::LogSeverity::Debug(2))) {
+ StringBuilder builder;
+ builder << "getAddrsForHost(\"" << iporhost << ":" << port << "\"):";
+ for (std::vector<std::string>::const_iterator o = out.begin(); o != out.end(); ++o) {
+ builder << " [ " << *o << "]";
}
-
- return out;
+ LOG(2) << builder.str();
}
-} // namespace
+ return out;
+}
- bool isSelf(const HostAndPort& hostAndPort) {
+} // namespace
- // Fastpath: check if the host&port in question is bound to one
- // of the interfaces on this machine.
- // No need for ip match if the ports do not match
- if (hostAndPort.port() == serverGlobalParams.port) {
- std::vector<std::string> myAddrs = serverGlobalParams.bind_ip.empty() ?
- getBoundAddrs(IPv6Enabled()) :
- std::vector<std::string>();
+bool isSelf(const HostAndPort& hostAndPort) {
+ // Fastpath: check if the host&port in question is bound to one
+ // of the interfaces on this machine.
+ // No need for ip match if the ports do not match
+ if (hostAndPort.port() == serverGlobalParams.port) {
+ std::vector<std::string> myAddrs = serverGlobalParams.bind_ip.empty()
+ ? getBoundAddrs(IPv6Enabled())
+ : std::vector<std::string>();
+
+ if (!serverGlobalParams.bind_ip.empty()) {
+ boost::split(myAddrs, serverGlobalParams.bind_ip, boost::is_any_of(", "));
+ }
- if (!serverGlobalParams.bind_ip.empty()) {
- boost::split(myAddrs, serverGlobalParams.bind_ip, boost::is_any_of(", "));
- }
+ const std::vector<std::string> hostAddrs =
+ getAddrsForHost(hostAndPort.host(), hostAndPort.port(), IPv6Enabled());
- const std::vector<std::string> hostAddrs = getAddrsForHost(hostAndPort.host(),
- hostAndPort.port(),
- IPv6Enabled());
-
- for (std::vector<std::string>::const_iterator i = myAddrs.begin();
- i != myAddrs.end(); ++i) {
- for (std::vector<std::string>::const_iterator j = hostAddrs.begin();
- j != hostAddrs.end(); ++j) {
- if (*i == *j) {
- return true;
- }
+ for (std::vector<std::string>::const_iterator i = myAddrs.begin(); i != myAddrs.end();
+ ++i) {
+ for (std::vector<std::string>::const_iterator j = hostAddrs.begin();
+ j != hostAddrs.end();
+ ++j) {
+ if (*i == *j) {
+ return true;
}
}
}
+ }
+
+ // Ensure that the server is up and ready to accept incoming network requests.
+ const Listener* listener = Listener::getTimeTracker();
+ if (!listener) {
+ return false;
+ }
+ listener->waitUntilListening();
- // Ensure that the server is up and ready to accept incoming network requests.
- const Listener* listener = Listener::getTimeTracker();
- if (!listener) {
+ try {
+ DBClientConnection conn;
+ std::string errmsg;
+ conn.setSoTimeout(30); // 30 second timeout
+ if (!conn.connect(hostAndPort, errmsg)) {
return false;
}
- listener->waitUntilListening();
- try {
- DBClientConnection conn;
- std::string errmsg;
- conn.setSoTimeout(30); // 30 second timeout
- if (!conn.connect(hostAndPort, errmsg)) {
+ if (getGlobalAuthorizationManager()->isAuthEnabled() && isInternalAuthSet()) {
+ if (!authenticateInternalUser(&conn)) {
return false;
}
-
- if (getGlobalAuthorizationManager()->isAuthEnabled() && isInternalAuthSet()) {
- if (!authenticateInternalUser(&conn)) {
- return false;
- }
- }
- BSONObj out;
- bool ok = conn.simpleCommand("admin" , &out, "_isSelf");
- bool me = ok && out["id"].type() == jstOID && instanceId == out["id"].OID();
-
- return me;
- }
- catch (const std::exception& e) {
- warning() << "could't check isSelf (" << hostAndPort << ") " << e.what() << std::endl;
}
+ BSONObj out;
+ bool ok = conn.simpleCommand("admin", &out, "_isSelf");
+ bool me = ok && out["id"].type() == jstOID && instanceId == out["id"].OID();
- return false;
+ return me;
+ } catch (const std::exception& e) {
+ warning() << "could't check isSelf (" << hostAndPort << ") " << e.what() << std::endl;
}
- /**
- * Returns all the IP addresses bound to the network interfaces of this machine.
- * This requires a syscall. If the ipv6enabled parameter is true, both IPv6 AND IPv4
- * addresses will be returned.
- */
- std::vector<std::string> getBoundAddrs(const bool ipv6enabled) {
- std::vector<std::string> out;
+ return false;
+}
+
+/**
+ * Returns all the IP addresses bound to the network interfaces of this machine.
+ * This requires a syscall. If the ipv6enabled parameter is true, both IPv6 AND IPv4
+ * addresses will be returned.
+ */
+std::vector<std::string> getBoundAddrs(const bool ipv6enabled) {
+ std::vector<std::string> out;
#ifdef FASTPATH_UNIX
- ifaddrs* addrs;
+ ifaddrs* addrs;
- int err = getifaddrs(&addrs);
- if (err) {
- warning() << "getifaddrs failure: " << errnoWithDescription(err) << std::endl;
- return out;
- }
- ON_BLOCK_EXIT(freeifaddrs, addrs);
-
- // based on example code from linux getifaddrs manpage
- for (ifaddrs* addr = addrs; addr != NULL; addr = addr->ifa_next) {
- if (addr->ifa_addr == NULL) continue;
- int family = addr->ifa_addr->sa_family;
- char host[NI_MAXHOST];
-
- if (family == AF_INET || (ipv6enabled && (family == AF_INET6))) {
- err = getnameinfo(addr->ifa_addr,
- (family == AF_INET ? sizeof(struct sockaddr_in)
- : sizeof(struct sockaddr_in6)),
- host, NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
- if (err) {
- warning() << "getnameinfo() failed: " << gai_strerror(err) << std::endl;
- continue;
- }
- out.push_back(host);
+ int err = getifaddrs(&addrs);
+ if (err) {
+ warning() << "getifaddrs failure: " << errnoWithDescription(err) << std::endl;
+ return out;
+ }
+ ON_BLOCK_EXIT(freeifaddrs, addrs);
+
+ // based on example code from linux getifaddrs manpage
+ for (ifaddrs* addr = addrs; addr != NULL; addr = addr->ifa_next) {
+ if (addr->ifa_addr == NULL)
+ continue;
+ int family = addr->ifa_addr->sa_family;
+ char host[NI_MAXHOST];
+
+ if (family == AF_INET || (ipv6enabled && (family == AF_INET6))) {
+ err = getnameinfo(
+ addr->ifa_addr,
+ (family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)),
+ host,
+ NI_MAXHOST,
+ NULL,
+ 0,
+ NI_NUMERICHOST);
+ if (err) {
+ warning() << "getnameinfo() failed: " << gai_strerror(err) << std::endl;
+ continue;
}
+ out.push_back(host);
}
+ }
#elif defined(_WIN32)
- // Start with the MS recommended 15KB buffer. Use multiple attempts
- // for the rare case that the adapter config changes between calls
+ // Start with the MS recommended 15KB buffer. Use multiple attempts
+ // for the rare case that the adapter config changes between calls
- ULONG adaptersLen = 15 * 1024;
- std::unique_ptr<char[]> buf(new char[adaptersLen]);
- IP_ADAPTER_ADDRESSES* adapters = reinterpret_cast<IP_ADAPTER_ADDRESSES*>(buf.get());
- DWORD err;
+ ULONG adaptersLen = 15 * 1024;
+ std::unique_ptr<char[]> buf(new char[adaptersLen]);
+ IP_ADAPTER_ADDRESSES* adapters = reinterpret_cast<IP_ADAPTER_ADDRESSES*>(buf.get());
+ DWORD err;
- ULONG family = ipv6enabled ? AF_UNSPEC : AF_INET;
+ ULONG family = ipv6enabled ? AF_UNSPEC : AF_INET;
- for (int tries = 0; tries < 3; ++tries) {
- err = GetAdaptersAddresses(family,
- GAA_FLAG_SKIP_ANYCAST | // only want unicast addrs
+ for (int tries = 0; tries < 3; ++tries) {
+ err = GetAdaptersAddresses(family,
+ GAA_FLAG_SKIP_ANYCAST | // only want unicast addrs
GAA_FLAG_SKIP_MULTICAST |
GAA_FLAG_SKIP_DNS_SERVER,
- NULL,
- adapters,
- &adaptersLen);
-
- if (err == ERROR_BUFFER_OVERFLOW) {
- // in this case, adaptersLen will be set to the size we need to allocate
- buf.reset(new char[adaptersLen]);
- adapters = reinterpret_cast<IP_ADAPTER_ADDRESSES*>(buf.get());
- }
- else {
- break; // only retry for incorrectly sized buffer
- }
+ NULL,
+ adapters,
+ &adaptersLen);
+
+ if (err == ERROR_BUFFER_OVERFLOW) {
+ // in this case, adaptersLen will be set to the size we need to allocate
+ buf.reset(new char[adaptersLen]);
+ adapters = reinterpret_cast<IP_ADAPTER_ADDRESSES*>(buf.get());
+ } else {
+ break; // only retry for incorrectly sized buffer
}
+ }
- if (err != NO_ERROR) {
- warning() << "GetAdaptersAddresses() failed: " << errnoWithDescription(err)
- << std::endl;
- return out;
- }
+ if (err != NO_ERROR) {
+ warning() << "GetAdaptersAddresses() failed: " << errnoWithDescription(err) << std::endl;
+ return out;
+ }
- for (IP_ADAPTER_ADDRESSES* adapter = adapters;
- adapter != NULL; adapter = adapter->Next) {
- for (IP_ADAPTER_UNICAST_ADDRESS* addr = adapter->FirstUnicastAddress;
- addr != NULL; addr = addr->Next) {
-
- short family =
- reinterpret_cast<SOCKADDR_STORAGE*>(addr->Address.lpSockaddr)->ss_family;
-
- if (family == AF_INET) {
- // IPv4
- SOCKADDR_IN* sock = reinterpret_cast<SOCKADDR_IN*>(addr->Address.lpSockaddr);
- char addrstr[INET_ADDRSTRLEN] = {0};
- boost::system::error_code ec;
- // Not all windows versions have inet_ntop
- boost::asio::detail::socket_ops::inet_ntop(AF_INET,
- &(sock->sin_addr),
- addrstr,
- INET_ADDRSTRLEN,
- 0,
- ec);
- if (ec) {
- warning() << "inet_ntop failed during IPv4 address conversion: "
- << ec.message() << std::endl;
- continue;
- }
- out.push_back(addrstr);
+ for (IP_ADAPTER_ADDRESSES* adapter = adapters; adapter != NULL; adapter = adapter->Next) {
+ for (IP_ADAPTER_UNICAST_ADDRESS* addr = adapter->FirstUnicastAddress; addr != NULL;
+ addr = addr->Next) {
+ short family = reinterpret_cast<SOCKADDR_STORAGE*>(addr->Address.lpSockaddr)->ss_family;
+
+ if (family == AF_INET) {
+ // IPv4
+ SOCKADDR_IN* sock = reinterpret_cast<SOCKADDR_IN*>(addr->Address.lpSockaddr);
+ char addrstr[INET_ADDRSTRLEN] = {0};
+ boost::system::error_code ec;
+ // Not all windows versions have inet_ntop
+ boost::asio::detail::socket_ops::inet_ntop(
+ AF_INET, &(sock->sin_addr), addrstr, INET_ADDRSTRLEN, 0, ec);
+ if (ec) {
+ warning() << "inet_ntop failed during IPv4 address conversion: " << ec.message()
+ << std::endl;
+ continue;
}
- else if (family == AF_INET6) {
- // IPv6
- SOCKADDR_IN6* sock = reinterpret_cast<SOCKADDR_IN6*>(addr->Address.lpSockaddr);
- char addrstr[INET6_ADDRSTRLEN] = {0};
- boost::system::error_code ec;
- boost::asio::detail::socket_ops::inet_ntop(AF_INET6,
- &(sock->sin6_addr),
- addrstr,
- INET6_ADDRSTRLEN,
- 0,
- ec);
- if (ec) {
- warning() << "inet_ntop failed during IPv6 address conversion: "
- << ec.message() << std::endl;
- continue;
- }
- out.push_back(addrstr);
+ out.push_back(addrstr);
+ } else if (family == AF_INET6) {
+ // IPv6
+ SOCKADDR_IN6* sock = reinterpret_cast<SOCKADDR_IN6*>(addr->Address.lpSockaddr);
+ char addrstr[INET6_ADDRSTRLEN] = {0};
+ boost::system::error_code ec;
+ boost::asio::detail::socket_ops::inet_ntop(
+ AF_INET6, &(sock->sin6_addr), addrstr, INET6_ADDRSTRLEN, 0, ec);
+ if (ec) {
+ warning() << "inet_ntop failed during IPv6 address conversion: " << ec.message()
+ << std::endl;
+ continue;
}
+ out.push_back(addrstr);
}
}
+ }
#endif // defined(_WIN32)
- if (shouldLog(logger::LogSeverity::Debug(2))) {
- StringBuilder builder;
- builder << "getBoundAddrs():";
- for (std::vector<std::string>::const_iterator o = out.begin(); o != out.end(); ++o) {
- builder << " [ " << *o << "]";
- }
- LOG(2) << builder.str();
+ if (shouldLog(logger::LogSeverity::Debug(2))) {
+ StringBuilder builder;
+ builder << "getBoundAddrs():";
+ for (std::vector<std::string>::const_iterator o = out.begin(); o != out.end(); ++o) {
+ builder << " [ " << *o << "]";
}
- return out;
+ LOG(2) << builder.str();
}
+ return out;
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/isself.h b/src/mongo/db/repl/isself.h
index cbcbbd9f031..19b61bf47bd 100644
--- a/src/mongo/db/repl/isself.h
+++ b/src/mongo/db/repl/isself.h
@@ -34,30 +34,30 @@
#include "mongo/bson/oid.h"
namespace mongo {
- struct HostAndPort;
+struct HostAndPort;
namespace repl {
- /**
- * An identifier unique to this instance. Used by isSelf to see if we are talking
- * to ourself or someone else.
- */
- extern OID instanceId;
-
- /**
- * Returns true if "hostAndPort" identifies this instance.
- */
- bool isSelf(const HostAndPort& hostAndPort);
-
- /**
- * Returns all the IP addresses bound to the network interfaces of this machine.
- * This requires a syscall. If the ipv6enabled parameter is true, both IPv6 AND IPv4
- * addresses will be returned.
- *
- * Note: this only works on Linux and Windows. All calls should be properly ifdef'd,
- * otherwise an invariant will be triggered.
- */
- std::vector<std::string> getBoundAddrs(const bool ipv6enabled);
-
-} // namespace repl
-} // namespace mongo
+/**
+ * An identifier unique to this instance. Used by isSelf to see if we are talking
+ * to ourself or someone else.
+ */
+extern OID instanceId;
+
+/**
+ * Returns true if "hostAndPort" identifies this instance.
+ */
+bool isSelf(const HostAndPort& hostAndPort);
+
+/**
+ * Returns all the IP addresses bound to the network interfaces of this machine.
+ * This requires a syscall. If the ipv6enabled parameter is true, both IPv6 AND IPv4
+ * addresses will be returned.
+ *
+ * Note: this only works on Linux and Windows. All calls should be properly ifdef'd,
+ * otherwise an invariant will be triggered.
+ */
+std::vector<std::string> getBoundAddrs(const bool ipv6enabled);
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/isself_test.cpp b/src/mongo/db/repl/isself_test.cpp
index b3af1721e62..b6a3e26f8e6 100644
--- a/src/mongo/db/repl/isself_test.cpp
+++ b/src/mongo/db/repl/isself_test.cpp
@@ -40,45 +40,41 @@ namespace repl {
namespace {
- using std::string;
+using std::string;
- TEST(IsSelf, DetectsSameHostIPv4) {
+TEST(IsSelf, DetectsSameHostIPv4) {
#if defined(_WIN32) || defined(__linux__) || defined(__APPLE__)
- bool wasEnabled = IPv6Enabled();
- enableIPv6(false);
- ON_BLOCK_EXIT(enableIPv6, wasEnabled);
- // first we get the addrs bound on this host
- const std::vector<std::string> addrs = getBoundAddrs(false);
- // Fastpath should agree with the result of getBoundAddrs
- // since it uses it...
- for (std::vector<string>::const_iterator it = addrs.begin();
- it != addrs.end(); ++it) {
-
- ASSERT(isSelf(HostAndPort(*it, serverGlobalParams.port)));
- }
+ bool wasEnabled = IPv6Enabled();
+ enableIPv6(false);
+ ON_BLOCK_EXIT(enableIPv6, wasEnabled);
+ // first we get the addrs bound on this host
+ const std::vector<std::string> addrs = getBoundAddrs(false);
+ // Fastpath should agree with the result of getBoundAddrs
+ // since it uses it...
+ for (std::vector<string>::const_iterator it = addrs.begin(); it != addrs.end(); ++it) {
+ ASSERT(isSelf(HostAndPort(*it, serverGlobalParams.port)));
+ }
#else
- ASSERT(true);
+ ASSERT(true);
#endif
- }
+}
- TEST(IsSelf, DetectsSameHostIPv6) {
+TEST(IsSelf, DetectsSameHostIPv6) {
#if defined(_WIN32) || defined(__linux__) || defined(__APPLE__)
- bool wasEnabled = IPv6Enabled();
- enableIPv6(true);
- ON_BLOCK_EXIT(enableIPv6, wasEnabled);
- // first we get the addrs bound on this host
- const std::vector<std::string> addrs = getBoundAddrs(true);
- // Fastpath should agree with the result of getBoundAddrs
- // since it uses it...
- for (std::vector<string>::const_iterator it = addrs.begin();
- it != addrs.end(); ++it) {
-
- ASSERT(isSelf(HostAndPort(*it, serverGlobalParams.port)));
- }
+ bool wasEnabled = IPv6Enabled();
+ enableIPv6(true);
+ ON_BLOCK_EXIT(enableIPv6, wasEnabled);
+ // first we get the addrs bound on this host
+ const std::vector<std::string> addrs = getBoundAddrs(true);
+ // Fastpath should agree with the result of getBoundAddrs
+ // since it uses it...
+ for (std::vector<string>::const_iterator it = addrs.begin(); it != addrs.end(); ++it) {
+ ASSERT(isSelf(HostAndPort(*it, serverGlobalParams.port)));
+ }
#else
- ASSERT(true);
+ ASSERT(true);
#endif
- }
+}
} // namespace
diff --git a/src/mongo/db/repl/last_vote.cpp b/src/mongo/db/repl/last_vote.cpp
index e7a1b178bf4..94e88231bac 100644
--- a/src/mongo/db/repl/last_vote.cpp
+++ b/src/mongo/db/repl/last_vote.cpp
@@ -36,57 +36,54 @@ namespace mongo {
namespace repl {
namespace {
- const std::string kCandidateIdFieldName = "candidateId";
- const std::string kTermFieldName = "term";
+const std::string kCandidateIdFieldName = "candidateId";
+const std::string kTermFieldName = "term";
- const std::string kLegalFieldNames[] = {
- kCandidateIdFieldName,
- kTermFieldName,
- };
+const std::string kLegalFieldNames[] = {
+ kCandidateIdFieldName, kTermFieldName,
+};
} // namespace
- Status LastVote::initialize(const BSONObj& argsObj) {
- Status status = bsonCheckOnlyHasFields("VotedFar",
- argsObj,
- kLegalFieldNames);
- if (!status.isOK())
- return status;
+Status LastVote::initialize(const BSONObj& argsObj) {
+ Status status = bsonCheckOnlyHasFields("VotedFar", argsObj, kLegalFieldNames);
+ if (!status.isOK())
+ return status;
- status = bsonExtractIntegerField(argsObj, kTermFieldName, &_term);
- if (!status.isOK())
- return status;
+ status = bsonExtractIntegerField(argsObj, kTermFieldName, &_term);
+ if (!status.isOK())
+ return status;
- status = bsonExtractIntegerField(argsObj, kCandidateIdFieldName, &_candidateId);
- if (!status.isOK())
- return status;
+ status = bsonExtractIntegerField(argsObj, kCandidateIdFieldName, &_candidateId);
+ if (!status.isOK())
+ return status;
- return Status::OK();
- }
+ return Status::OK();
+}
- void LastVote::setTerm(long long term) {
- _term = term;
- }
+void LastVote::setTerm(long long term) {
+ _term = term;
+}
- void LastVote::setCandidateId(long long candidateId) {
- _candidateId = candidateId;
- }
+void LastVote::setCandidateId(long long candidateId) {
+ _candidateId = candidateId;
+}
- long long LastVote::getTerm() const {
- return _term;
- }
+long long LastVote::getTerm() const {
+ return _term;
+}
- long long LastVote::getCandidateId() const {
- return _candidateId;
- }
+long long LastVote::getCandidateId() const {
+ return _candidateId;
+}
- BSONObj LastVote::toBSON() const {
- BSONObjBuilder builder;
- builder.append(kTermFieldName, _term);
- builder.append(kCandidateIdFieldName, _candidateId);
- return builder.obj();
- }
+BSONObj LastVote::toBSON() const {
+ BSONObjBuilder builder;
+ builder.append(kTermFieldName, _term);
+ builder.append(kCandidateIdFieldName, _candidateId);
+ return builder.obj();
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/last_vote.h b/src/mongo/db/repl/last_vote.h
index b2314823f00..fe1d67a3fe9 100644
--- a/src/mongo/db/repl/last_vote.h
+++ b/src/mongo/db/repl/last_vote.h
@@ -30,27 +30,27 @@
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class Status;
+class BSONObj;
+class BSONObjBuilder;
+class Status;
namespace repl {
- class LastVote {
- public:
- Status initialize(const BSONObj& argsObj);
+class LastVote {
+public:
+ Status initialize(const BSONObj& argsObj);
- long long getTerm() const;
- long long getCandidateId() const;
+ long long getTerm() const;
+ long long getCandidateId() const;
- void setTerm(long long term);
- void setCandidateId(long long candidateId);
- BSONObj toBSON() const;
+ void setTerm(long long term);
+ void setCandidateId(long long candidateId);
+ BSONObj toBSON() const;
- private:
- long long _candidateId = -1;
- long long _term = -1;
- };
+private:
+ long long _candidateId = -1;
+ long long _term = -1;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index a1a58527b62..1e1bd428d39 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -82,1374 +82,1352 @@ using std::vector;
namespace mongo {
namespace repl {
- void pretouchOperation(OperationContext* txn, const BSONObj& op);
- void pretouchN(vector<BSONObj>&, unsigned a, unsigned b);
+void pretouchOperation(OperationContext* txn, const BSONObj& op);
+void pretouchN(vector<BSONObj>&, unsigned a, unsigned b);
- /* if 1 sync() is running */
- volatile int syncing = 0;
- volatile int relinquishSyncingSome = 0;
+/* if 1 sync() is running */
+volatile int syncing = 0;
+volatile int relinquishSyncingSome = 0;
- static time_t lastForcedResync = 0;
+static time_t lastForcedResync = 0;
- /* output by the web console */
- const char *replInfo = "";
- struct ReplInfo {
- ReplInfo(const char *msg) {
- replInfo = msg;
- }
- ~ReplInfo() {
- replInfo = "?";
- }
- };
-
-
- ReplSource::ReplSource(OperationContext* txn) {
- nClonedThisPass = 0;
- ensureMe(txn);
- }
-
- ReplSource::ReplSource(OperationContext* txn, BSONObj o) : nClonedThisPass(0) {
- only = o.getStringField("only");
- hostName = o.getStringField("host");
- _sourceName = o.getStringField("source");
- uassert( 10118 , "'host' field not set in sources collection object", !hostName.empty() );
- uassert( 10119 , "only source='main' allowed for now with replication", sourceName() == "main" );
- BSONElement e = o.getField("syncedTo");
- if ( !e.eoo() ) {
- uassert(10120, "bad sources 'syncedTo' field value",
- e.type() == Date || e.type() == bsonTimestamp);
- Timestamp tmp( e.date() );
- syncedTo = tmp;
- }
-
- BSONObj dbsObj = o.getObjectField("dbsNextPass");
- if ( !dbsObj.isEmpty() ) {
- BSONObjIterator i(dbsObj);
- while ( 1 ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- addDbNextPass.insert( e.fieldName() );
- }
- }
-
- dbsObj = o.getObjectField("incompleteCloneDbs");
- if ( !dbsObj.isEmpty() ) {
- BSONObjIterator i(dbsObj);
- while ( 1 ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- incompleteCloneDbs.insert( e.fieldName() );
- }
- }
- ensureMe(txn);
+/* output by the web console */
+const char* replInfo = "";
+struct ReplInfo {
+ ReplInfo(const char* msg) {
+ replInfo = msg;
+ }
+ ~ReplInfo() {
+ replInfo = "?";
+ }
+};
+
+
+ReplSource::ReplSource(OperationContext* txn) {
+ nClonedThisPass = 0;
+ ensureMe(txn);
+}
+
+ReplSource::ReplSource(OperationContext* txn, BSONObj o) : nClonedThisPass(0) {
+ only = o.getStringField("only");
+ hostName = o.getStringField("host");
+ _sourceName = o.getStringField("source");
+ uassert(10118, "'host' field not set in sources collection object", !hostName.empty());
+ uassert(10119, "only source='main' allowed for now with replication", sourceName() == "main");
+ BSONElement e = o.getField("syncedTo");
+ if (!e.eoo()) {
+ uassert(10120,
+ "bad sources 'syncedTo' field value",
+ e.type() == Date || e.type() == bsonTimestamp);
+ Timestamp tmp(e.date());
+ syncedTo = tmp;
}
- /* Turn our C++ Source object into a BSONObj */
- BSONObj ReplSource::jsobj() {
- BSONObjBuilder b;
- b.append("host", hostName);
- b.append("source", sourceName());
- if ( !only.empty() )
- b.append("only", only);
- if ( !syncedTo.isNull() )
- b.append("syncedTo", syncedTo);
-
- BSONObjBuilder dbsNextPassBuilder;
- int n = 0;
- for ( set<string>::iterator i = addDbNextPass.begin(); i != addDbNextPass.end(); i++ ) {
- n++;
- dbsNextPassBuilder.appendBool(*i, 1);
+ BSONObj dbsObj = o.getObjectField("dbsNextPass");
+ if (!dbsObj.isEmpty()) {
+ BSONObjIterator i(dbsObj);
+ while (1) {
+ BSONElement e = i.next();
+ if (e.eoo())
+ break;
+ addDbNextPass.insert(e.fieldName());
}
- if ( n )
- b.append("dbsNextPass", dbsNextPassBuilder.done());
+ }
- BSONObjBuilder incompleteCloneDbsBuilder;
- n = 0;
- for ( set<string>::iterator i = incompleteCloneDbs.begin(); i != incompleteCloneDbs.end(); i++ ) {
- n++;
- incompleteCloneDbsBuilder.appendBool(*i, 1);
+ dbsObj = o.getObjectField("incompleteCloneDbs");
+ if (!dbsObj.isEmpty()) {
+ BSONObjIterator i(dbsObj);
+ while (1) {
+ BSONElement e = i.next();
+ if (e.eoo())
+ break;
+ incompleteCloneDbs.insert(e.fieldName());
}
- if ( n )
- b.append("incompleteCloneDbs", incompleteCloneDbsBuilder.done());
-
- return b.obj();
}
+ ensureMe(txn);
+}
+
+/* Turn our C++ Source object into a BSONObj */
+BSONObj ReplSource::jsobj() {
+ BSONObjBuilder b;
+ b.append("host", hostName);
+ b.append("source", sourceName());
+ if (!only.empty())
+ b.append("only", only);
+ if (!syncedTo.isNull())
+ b.append("syncedTo", syncedTo);
+
+ BSONObjBuilder dbsNextPassBuilder;
+ int n = 0;
+ for (set<string>::iterator i = addDbNextPass.begin(); i != addDbNextPass.end(); i++) {
+ n++;
+ dbsNextPassBuilder.appendBool(*i, 1);
+ }
+ if (n)
+ b.append("dbsNextPass", dbsNextPassBuilder.done());
+
+ BSONObjBuilder incompleteCloneDbsBuilder;
+ n = 0;
+ for (set<string>::iterator i = incompleteCloneDbs.begin(); i != incompleteCloneDbs.end(); i++) {
+ n++;
+ incompleteCloneDbsBuilder.appendBool(*i, 1);
+ }
+ if (n)
+ b.append("incompleteCloneDbs", incompleteCloneDbsBuilder.done());
- void ReplSource::ensureMe(OperationContext* txn) {
- string myname = getHostName();
+ return b.obj();
+}
- // local.me is an identifier for a server for getLastError w:2+
- bool exists = Helpers::getSingleton(txn, "local.me", _me);
+void ReplSource::ensureMe(OperationContext* txn) {
+ string myname = getHostName();
- if (!exists || !_me.hasField("host") || _me["host"].String() != myname) {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dblk(txn->lockState(), "local", MODE_X);
- WriteUnitOfWork wunit(txn);
- // clean out local.me
- Helpers::emptyCollection(txn, "local.me");
+ // local.me is an identifier for a server for getLastError w:2+
+ bool exists = Helpers::getSingleton(txn, "local.me", _me);
- // repopulate
- BSONObjBuilder b;
- b.appendOID("_id", 0, true);
- b.append("host", myname);
- _me = b.obj();
- Helpers::putSingleton(txn, "local.me", _me);
- wunit.commit();
- }
- _me = _me.getOwned();
- }
+ if (!exists || !_me.hasField("host") || _me["host"].String() != myname) {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dblk(txn->lockState(), "local", MODE_X);
+ WriteUnitOfWork wunit(txn);
+ // clean out local.me
+ Helpers::emptyCollection(txn, "local.me");
- void ReplSource::save(OperationContext* txn) {
+ // repopulate
BSONObjBuilder b;
- verify( !hostName.empty() );
- b.append("host", hostName);
- // todo: finish allowing multiple source configs.
- // this line doesn't work right when source is null, if that is allowed as it is now:
- //b.append("source", _sourceName);
- BSONObj pattern = b.done();
+ b.appendOID("_id", 0, true);
+ b.append("host", myname);
+ _me = b.obj();
+ Helpers::putSingleton(txn, "local.me", _me);
+ wunit.commit();
+ }
+ _me = _me.getOwned();
+}
- BSONObj o = jsobj();
- LOG( 1 ) << "Saving repl source: " << o << endl;
+void ReplSource::save(OperationContext* txn) {
+ BSONObjBuilder b;
+ verify(!hostName.empty());
+ b.append("host", hostName);
+ // todo: finish allowing multiple source configs.
+ // this line doesn't work right when source is null, if that is allowed as it is now:
+ // b.append("source", _sourceName);
+ BSONObj pattern = b.done();
- {
- OpDebug debug;
+ BSONObj o = jsobj();
+ LOG(1) << "Saving repl source: " << o << endl;
- OldClientContext ctx(txn, "local.sources");
+ {
+ OpDebug debug;
- const NamespaceString requestNs("local.sources");
- UpdateRequest request(requestNs);
+ OldClientContext ctx(txn, "local.sources");
- request.setQuery(pattern);
- request.setUpdates(o);
- request.setUpsert();
+ const NamespaceString requestNs("local.sources");
+ UpdateRequest request(requestNs);
- UpdateResult res = update(txn, ctx.db(), request, &debug);
+ request.setQuery(pattern);
+ request.setUpdates(o);
+ request.setUpsert();
- verify( ! res.modifiers );
- verify( res.numMatched == 1 );
- }
- }
+ UpdateResult res = update(txn, ctx.db(), request, &debug);
- static void addSourceToList(OperationContext* txn,
- ReplSource::SourceVector &v,
- ReplSource& s,
- ReplSource::SourceVector &old) {
- if ( !s.syncedTo.isNull() ) { // Don't reuse old ReplSource if there was a forced resync.
- for ( ReplSource::SourceVector::iterator i = old.begin(); i != old.end(); ) {
- if ( s == **i ) {
- v.push_back(*i);
- old.erase(i);
- return;
- }
- i++;
+ verify(!res.modifiers);
+ verify(res.numMatched == 1);
+ }
+}
+
+static void addSourceToList(OperationContext* txn,
+ ReplSource::SourceVector& v,
+ ReplSource& s,
+ ReplSource::SourceVector& old) {
+ if (!s.syncedTo.isNull()) { // Don't reuse old ReplSource if there was a forced resync.
+ for (ReplSource::SourceVector::iterator i = old.begin(); i != old.end();) {
+ if (s == **i) {
+ v.push_back(*i);
+ old.erase(i);
+ return;
}
+ i++;
}
-
- v.push_back( std::shared_ptr< ReplSource >( new ReplSource( s ) ) );
}
- /* we reuse our existing objects so that we can keep our existing connection
- and cursor in effect.
- */
- void ReplSource::loadAll(OperationContext* txn, SourceVector &v) {
- const char* localSources = "local.sources";
- OldClientContext ctx(txn, localSources);
- SourceVector old = v;
- v.clear();
-
- const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
- if (!replSettings.source.empty()) {
- // --source <host> specified.
- // check that no items are in sources other than that
- // add if missing
- int n = 0;
- unique_ptr<PlanExecutor> exec(
- InternalPlanner::collectionScan(txn,
- localSources,
- ctx.db()->getCollection(localSources)));
- BSONObj obj;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- n++;
- ReplSource tmp(txn, obj);
- if (tmp.hostName != replSettings.source) {
- log() << "--source " << replSettings.source << " != " << tmp.hostName
- << " from local.sources collection" << endl;
- log() << "for instructions on changing this slave's source, see:" << endl;
- log() << "http://dochub.mongodb.org/core/masterslave" << endl;
- log() << "terminating mongod after 30 seconds" << endl;
- sleepsecs(30);
- dbexit( EXIT_REPLICATION_ERROR );
- }
- if (tmp.only != replSettings.only) {
- log() << "--only " << replSettings.only << " != " << tmp.only
- << " from local.sources collection" << endl;
- log() << "terminating after 30 seconds" << endl;
- sleepsecs(30);
- dbexit( EXIT_REPLICATION_ERROR );
- }
- }
- uassert(17065, "Internal error reading from local.sources", PlanExecutor::IS_EOF == state);
- uassert( 10002 , "local.sources collection corrupt?", n<2 );
- if ( n == 0 ) {
- // source missing. add.
- ReplSource s(txn);
- s.hostName = replSettings.source;
- s.only = replSettings.only;
- s.save(txn);
- }
- }
- else {
- try {
- massert(10384 , "--only requires use of --source", replSettings.only.empty());
- }
- catch ( ... ) {
- dbexit( EXIT_BADOPTIONS );
- }
- }
+ v.push_back(std::shared_ptr<ReplSource>(new ReplSource(s)));
+}
- unique_ptr<PlanExecutor> exec(
- InternalPlanner::collectionScan(txn,
- localSources,
- ctx.db()->getCollection(localSources)));
+/* we reuse our existing objects so that we can keep our existing connection
+ and cursor in effect.
+*/
+void ReplSource::loadAll(OperationContext* txn, SourceVector& v) {
+ const char* localSources = "local.sources";
+ OldClientContext ctx(txn, localSources);
+ SourceVector old = v;
+ v.clear();
+
+ const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
+ if (!replSettings.source.empty()) {
+ // --source <host> specified.
+ // check that no items are in sources other than that
+ // add if missing
+ int n = 0;
+ unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
+ txn, localSources, ctx.db()->getCollection(localSources)));
BSONObj obj;
PlanExecutor::ExecState state;
while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ n++;
ReplSource tmp(txn, obj);
- if ( tmp.syncedTo.isNull() ) {
- DBDirectClient c(txn);
- BSONObj op = c.findOne( "local.oplog.$main", QUERY( "op" << NE << "n" ).sort( BSON( "$natural" << -1 ) ) );
- if ( !op.isEmpty() ) {
- tmp.syncedTo = op[ "ts" ].timestamp();
- }
+ if (tmp.hostName != replSettings.source) {
+ log() << "--source " << replSettings.source << " != " << tmp.hostName
+ << " from local.sources collection" << endl;
+ log() << "for instructions on changing this slave's source, see:" << endl;
+ log() << "http://dochub.mongodb.org/core/masterslave" << endl;
+ log() << "terminating mongod after 30 seconds" << endl;
+ sleepsecs(30);
+ dbexit(EXIT_REPLICATION_ERROR);
}
- addSourceToList(txn, v, tmp, old);
+ if (tmp.only != replSettings.only) {
+ log() << "--only " << replSettings.only << " != " << tmp.only
+ << " from local.sources collection" << endl;
+ log() << "terminating after 30 seconds" << endl;
+ sleepsecs(30);
+ dbexit(EXIT_REPLICATION_ERROR);
+ }
+ }
+ uassert(17065, "Internal error reading from local.sources", PlanExecutor::IS_EOF == state);
+ uassert(10002, "local.sources collection corrupt?", n < 2);
+ if (n == 0) {
+ // source missing. add.
+ ReplSource s(txn);
+ s.hostName = replSettings.source;
+ s.only = replSettings.only;
+ s.save(txn);
+ }
+ } else {
+ try {
+ massert(10384, "--only requires use of --source", replSettings.only.empty());
+ } catch (...) {
+ dbexit(EXIT_BADOPTIONS);
}
- uassert(17066, "Internal error reading from local.sources", PlanExecutor::IS_EOF == state);
}
- bool ReplSource::throttledForceResyncDead( OperationContext* txn, const char *requester ) {
- if ( time( 0 ) - lastForcedResync > 600 ) {
- forceResyncDead( txn, requester );
- lastForcedResync = time( 0 );
- return true;
+ unique_ptr<PlanExecutor> exec(
+ InternalPlanner::collectionScan(txn, localSources, ctx.db()->getCollection(localSources)));
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ ReplSource tmp(txn, obj);
+ if (tmp.syncedTo.isNull()) {
+ DBDirectClient c(txn);
+ BSONObj op = c.findOne("local.oplog.$main",
+ QUERY("op" << NE << "n").sort(BSON("$natural" << -1)));
+ if (!op.isEmpty()) {
+ tmp.syncedTo = op["ts"].timestamp();
+ }
}
+ addSourceToList(txn, v, tmp, old);
+ }
+ uassert(17066, "Internal error reading from local.sources", PlanExecutor::IS_EOF == state);
+}
+
+bool ReplSource::throttledForceResyncDead(OperationContext* txn, const char* requester) {
+ if (time(0) - lastForcedResync > 600) {
+ forceResyncDead(txn, requester);
+ lastForcedResync = time(0);
+ return true;
+ }
+ return false;
+}
+
+void ReplSource::forceResyncDead(OperationContext* txn, const char* requester) {
+ if (!replAllDead)
+ return;
+ SourceVector sources;
+ ReplSource::loadAll(txn, sources);
+ for (SourceVector::iterator i = sources.begin(); i != sources.end(); ++i) {
+ log() << requester << " forcing resync from " << (*i)->hostName << endl;
+ (*i)->forceResync(txn, requester);
+ }
+ replAllDead = 0;
+}
+
+class HandshakeCmd : public Command {
+public:
+ void help(stringstream& h) const {
+ h << "internal";
+ }
+ HandshakeCmd() : Command("handshake") {}
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
return false;
}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::internal);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
- void ReplSource::forceResyncDead( OperationContext* txn, const char *requester ) {
- if ( !replAllDead )
- return;
- SourceVector sources;
- ReplSource::loadAll(txn, sources);
- for( SourceVector::iterator i = sources.begin(); i != sources.end(); ++i ) {
- log() << requester << " forcing resync from " << (*i)->hostName << endl;
- (*i)->forceResync( txn, requester );
- }
- replAllDead = 0;
- }
-
- class HandshakeCmd : public Command {
- public:
- void help(stringstream& h) const { h << "internal"; }
- HandshakeCmd() : Command("handshake") {}
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::internal);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ virtual bool run(OperationContext* txn,
+ const string& ns,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ HandshakeArgs handshake;
+ Status status = handshake.initialize(cmdObj);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool run(OperationContext* txn,
- const string& ns,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
-
- HandshakeArgs handshake;
- Status status = handshake.initialize(cmdObj);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ ReplClientInfo::forClient(txn->getClient()).setRemoteID(handshake.getRid());
- ReplClientInfo::forClient(txn->getClient()).setRemoteID(handshake.getRid());
+ status = getGlobalReplicationCoordinator()->processHandshake(txn, handshake);
+ return appendCommandStatus(result, status);
+ }
- status = getGlobalReplicationCoordinator()->processHandshake(txn, handshake);
- return appendCommandStatus(result, status);
- }
+} handshakeCmd;
- } handshakeCmd;
+bool replHandshake(DBClientConnection* conn, const OID& myRID) {
+ string myname = getHostName();
- bool replHandshake(DBClientConnection *conn, const OID& myRID) {
- string myname = getHostName();
+ BSONObjBuilder cmd;
+ cmd.append("handshake", myRID);
- BSONObjBuilder cmd;
- cmd.append("handshake", myRID);
+ BSONObj res;
+ bool ok = conn->runCommand("admin", cmd.obj(), res);
+ // ignoring for now on purpose for older versions
+ LOG(ok ? 1 : 0) << "replHandshake result: " << res << endl;
+ return true;
+}
- BSONObj res;
- bool ok = conn->runCommand( "admin" , cmd.obj() , res );
- // ignoring for now on purpose for older versions
- LOG( ok ? 1 : 0 ) << "replHandshake result: " << res << endl;
+bool ReplSource::_connect(OplogReader* reader, const HostAndPort& host, const OID& myRID) {
+ if (reader->conn()) {
return true;
}
- bool ReplSource::_connect(OplogReader* reader, const HostAndPort& host, const OID& myRID) {
- if (reader->conn()) {
- return true;
- }
-
- if (!reader->connect(host)) {
- return false;
- }
-
- if (!replHandshake(reader->conn(), myRID)) {
- return false;
- }
+ if (!reader->connect(host)) {
+ return false;
+ }
- return true;
+ if (!replHandshake(reader->conn(), myRID)) {
+ return false;
}
+ return true;
+}
- void ReplSource::forceResync( OperationContext* txn, const char *requester ) {
- BSONObj info;
- {
- // This is always a GlobalWrite lock (so no ns/db used from the context)
- invariant(txn->lockState()->isW());
- Lock::TempRelease tempRelease(txn->lockState());
- if (!_connect(&oplogReader, HostAndPort(hostName),
- getGlobalReplicationCoordinator()->getMyRID())) {
- msgassertedNoTrace( 14051 , "unable to connect to resync");
- }
- /* todo use getDatabaseNames() method here */
- bool ok = oplogReader.conn()->runCommand("admin",
- BSON("listDatabases" << 1),
- info,
- QueryOption_SlaveOk);
- massert( 10385 , "Unable to get database list", ok );
+void ReplSource::forceResync(OperationContext* txn, const char* requester) {
+ BSONObj info;
+ {
+ // This is always a GlobalWrite lock (so no ns/db used from the context)
+ invariant(txn->lockState()->isW());
+ Lock::TempRelease tempRelease(txn->lockState());
+
+ if (!_connect(&oplogReader,
+ HostAndPort(hostName),
+ getGlobalReplicationCoordinator()->getMyRID())) {
+ msgassertedNoTrace(14051, "unable to connect to resync");
}
+ /* todo use getDatabaseNames() method here */
+ bool ok = oplogReader.conn()->runCommand(
+ "admin", BSON("listDatabases" << 1), info, QueryOption_SlaveOk);
+ massert(10385, "Unable to get database list", ok);
+ }
- BSONObjIterator i( info.getField( "databases" ).embeddedObject() );
- while( i.moreWithEOO() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- string name = e.embeddedObject().getField( "name" ).valuestr();
- if ( !e.embeddedObject().getBoolField( "empty" ) ) {
- if ( name != "local" ) {
- if ( only.empty() || only == name ) {
- resyncDrop( txn, name );
- }
+ BSONObjIterator i(info.getField("databases").embeddedObject());
+ while (i.moreWithEOO()) {
+ BSONElement e = i.next();
+ if (e.eoo())
+ break;
+ string name = e.embeddedObject().getField("name").valuestr();
+ if (!e.embeddedObject().getBoolField("empty")) {
+ if (name != "local") {
+ if (only.empty() || only == name) {
+ resyncDrop(txn, name);
}
}
}
- syncedTo = Timestamp();
- addDbNextPass.clear();
- save(txn);
- }
-
- void ReplSource::resyncDrop( OperationContext* txn, const string& db ) {
- log() << "resync: dropping database " << db;
- OldClientContext ctx(txn, db);
- dropDatabase(txn, ctx.db());
}
-
- /* grab initial copy of a database from the master */
- void ReplSource::resync(OperationContext* txn, const std::string& dbName) {
- const std::string db(dbName); // need local copy of the name, we're dropping the original
- resyncDrop( txn, db );
-
- {
- log() << "resync: cloning database " << db << " to get an initial copy" << endl;
- ReplInfo r("resync: cloning a database");
-
- CloneOptions cloneOptions;
- cloneOptions.fromDB = db;
- cloneOptions.slaveOk = true;
- cloneOptions.useReplAuth = true;
- cloneOptions.snapshot = true;
- cloneOptions.mayYield = true;
- cloneOptions.mayBeInterrupted = false;
-
- Cloner cloner;
- Status status = cloner.copyDb(txn,
- db,
- hostName.c_str(),
- cloneOptions,
- NULL);
-
- if (!status.isOK()) {
- if (status.code() == ErrorCodes::DatabaseDifferCase) {
- resyncDrop( txn, db );
- log() << "resync: database " << db
- << " not valid on the master due to a name conflict, dropping.";
- return;
- }
- else {
- log() << "resync of " << db << " from " << hostName
- << " failed due to: " << status.toString();
- throw SyncException();
- }
+ syncedTo = Timestamp();
+ addDbNextPass.clear();
+ save(txn);
+}
+
+void ReplSource::resyncDrop(OperationContext* txn, const string& db) {
+ log() << "resync: dropping database " << db;
+ OldClientContext ctx(txn, db);
+ dropDatabase(txn, ctx.db());
+}
+
+/* grab initial copy of a database from the master */
+void ReplSource::resync(OperationContext* txn, const std::string& dbName) {
+ const std::string db(dbName); // need local copy of the name, we're dropping the original
+ resyncDrop(txn, db);
+
+ {
+ log() << "resync: cloning database " << db << " to get an initial copy" << endl;
+ ReplInfo r("resync: cloning a database");
+
+ CloneOptions cloneOptions;
+ cloneOptions.fromDB = db;
+ cloneOptions.slaveOk = true;
+ cloneOptions.useReplAuth = true;
+ cloneOptions.snapshot = true;
+ cloneOptions.mayYield = true;
+ cloneOptions.mayBeInterrupted = false;
+
+ Cloner cloner;
+ Status status = cloner.copyDb(txn, db, hostName.c_str(), cloneOptions, NULL);
+
+ if (!status.isOK()) {
+ if (status.code() == ErrorCodes::DatabaseDifferCase) {
+ resyncDrop(txn, db);
+ log() << "resync: database " << db
+ << " not valid on the master due to a name conflict, dropping.";
+ return;
+ } else {
+ log() << "resync of " << db << " from " << hostName
+ << " failed due to: " << status.toString();
+ throw SyncException();
}
}
-
- log() << "resync: done with initial clone for db: " << db << endl;
}
- static DatabaseIgnorer ___databaseIgnorer;
+ log() << "resync: done with initial clone for db: " << db << endl;
+}
- void DatabaseIgnorer::doIgnoreUntilAfter( const string &db, const Timestamp &futureOplogTime ) {
- if ( futureOplogTime > _ignores[ db ] ) {
- _ignores[ db ] = futureOplogTime;
- }
+static DatabaseIgnorer ___databaseIgnorer;
+
+void DatabaseIgnorer::doIgnoreUntilAfter(const string& db, const Timestamp& futureOplogTime) {
+ if (futureOplogTime > _ignores[db]) {
+ _ignores[db] = futureOplogTime;
}
+}
- bool DatabaseIgnorer::ignoreAt( const string &db, const Timestamp &currentOplogTime ) {
- if ( _ignores[ db ].isNull() ) {
- return false;
- }
- if ( _ignores[ db ] >= currentOplogTime ) {
- return true;
- } else {
- // The ignore state has expired, so clear it.
- _ignores.erase( db );
- return false;
- }
+bool DatabaseIgnorer::ignoreAt(const string& db, const Timestamp& currentOplogTime) {
+ if (_ignores[db].isNull()) {
+ return false;
+ }
+ if (_ignores[db] >= currentOplogTime) {
+ return true;
+ } else {
+ // The ignore state has expired, so clear it.
+ _ignores.erase(db);
+ return false;
+ }
+}
+
+bool ReplSource::handleDuplicateDbName(OperationContext* txn,
+ const BSONObj& op,
+ const char* ns,
+ const char* db) {
+ // We are already locked at this point
+ if (dbHolder().get(txn, ns) != NULL) {
+ // Database is already present.
+ return true;
+ }
+ BSONElement ts = op.getField("ts");
+ if ((ts.type() == Date || ts.type() == bsonTimestamp) &&
+ ___databaseIgnorer.ignoreAt(db, ts.timestamp())) {
+ // Database is ignored due to a previous indication that it is
+ // missing from master after optime "ts".
+ return false;
+ }
+ if (Database::duplicateUncasedName(db).empty()) {
+ // No duplicate database names are present.
+ return true;
}
- bool ReplSource::handleDuplicateDbName( OperationContext* txn,
- const BSONObj &op,
- const char* ns,
- const char* db ) {
- // We are already locked at this point
- if (dbHolder().get(txn, ns) != NULL) {
- // Database is already present.
- return true;
- }
- BSONElement ts = op.getField( "ts" );
- if ( ( ts.type() == Date || ts.type() == bsonTimestamp ) && ___databaseIgnorer.ignoreAt( db, ts.timestamp() ) ) {
- // Database is ignored due to a previous indication that it is
- // missing from master after optime "ts".
- return false;
- }
- if (Database::duplicateUncasedName(db).empty()) {
- // No duplicate database names are present.
- return true;
+ Timestamp lastTime;
+ bool dbOk = false;
+ {
+ // This is always a GlobalWrite lock (so no ns/db used from the context)
+ invariant(txn->lockState()->isW());
+ Lock::TempRelease(txn->lockState());
+
+ // We always log an operation after executing it (never before), so
+ // a database list will always be valid as of an oplog entry generated
+ // before it was retrieved.
+
+ BSONObj last =
+ oplogReader.findOne(this->ns().c_str(), Query().sort(BSON("$natural" << -1)));
+ if (!last.isEmpty()) {
+ BSONElement ts = last.getField("ts");
+ massert(14032,
+ "Invalid 'ts' in remote log",
+ ts.type() == Date || ts.type() == bsonTimestamp);
+ lastTime = Timestamp(ts.date());
}
- Timestamp lastTime;
- bool dbOk = false;
- {
- // This is always a GlobalWrite lock (so no ns/db used from the context)
- invariant(txn->lockState()->isW());
- Lock::TempRelease(txn->lockState());
-
- // We always log an operation after executing it (never before), so
- // a database list will always be valid as of an oplog entry generated
- // before it was retrieved.
-
- BSONObj last = oplogReader.findOne( this->ns().c_str(), Query().sort( BSON( "$natural" << -1 ) ) );
- if ( !last.isEmpty() ) {
- BSONElement ts = last.getField( "ts" );
- massert(14032, "Invalid 'ts' in remote log",
- ts.type() == Date || ts.type() == bsonTimestamp);
- lastTime = Timestamp( ts.date() );
- }
-
- BSONObj info;
- bool ok = oplogReader.conn()->runCommand( "admin", BSON( "listDatabases" << 1 ), info );
- massert( 14033, "Unable to get database list", ok );
- BSONObjIterator i( info.getField( "databases" ).embeddedObject() );
- while( i.more() ) {
- BSONElement e = i.next();
-
- const char * name = e.embeddedObject().getField( "name" ).valuestr();
- if ( strcasecmp( name, db ) != 0 )
- continue;
+ BSONObj info;
+ bool ok = oplogReader.conn()->runCommand("admin", BSON("listDatabases" << 1), info);
+ massert(14033, "Unable to get database list", ok);
+ BSONObjIterator i(info.getField("databases").embeddedObject());
+ while (i.more()) {
+ BSONElement e = i.next();
- if ( strcmp( name, db ) == 0 ) {
- // The db exists on master, still need to check that no conflicts exist there.
- dbOk = true;
- continue;
- }
+ const char* name = e.embeddedObject().getField("name").valuestr();
+ if (strcasecmp(name, db) != 0)
+ continue;
- // The master has a db name that conflicts with the requested name.
- dbOk = false;
- break;
+ if (strcmp(name, db) == 0) {
+ // The db exists on master, still need to check that no conflicts exist there.
+ dbOk = true;
+ continue;
}
- }
- if ( !dbOk ) {
- ___databaseIgnorer.doIgnoreUntilAfter( db, lastTime );
- incompleteCloneDbs.erase(db);
- addDbNextPass.erase(db);
- return false;
+ // The master has a db name that conflicts with the requested name.
+ dbOk = false;
+ break;
}
+ }
- // Check for duplicates again, since we released the lock above.
- set< string > duplicates;
- Database::duplicateUncasedName(db, &duplicates);
+ if (!dbOk) {
+ ___databaseIgnorer.doIgnoreUntilAfter(db, lastTime);
+ incompleteCloneDbs.erase(db);
+ addDbNextPass.erase(db);
+ return false;
+ }
- // The database is present on the master and no conflicting databases
- // are present on the master. Drop any local conflicts.
- for( set< string >::const_iterator i = duplicates.begin(); i != duplicates.end(); ++i ) {
- ___databaseIgnorer.doIgnoreUntilAfter( *i, lastTime );
- incompleteCloneDbs.erase(*i);
- addDbNextPass.erase(*i);
+ // Check for duplicates again, since we released the lock above.
+ set<string> duplicates;
+ Database::duplicateUncasedName(db, &duplicates);
- OldClientContext ctx(txn, *i);
- dropDatabase(txn, ctx.db());
- }
+ // The database is present on the master and no conflicting databases
+ // are present on the master. Drop any local conflicts.
+ for (set<string>::const_iterator i = duplicates.begin(); i != duplicates.end(); ++i) {
+ ___databaseIgnorer.doIgnoreUntilAfter(*i, lastTime);
+ incompleteCloneDbs.erase(*i);
+ addDbNextPass.erase(*i);
- massert(14034, "Duplicate database names present after attempting to delete duplicates",
- Database::duplicateUncasedName(db).empty());
- return true;
+ OldClientContext ctx(txn, *i);
+ dropDatabase(txn, ctx.db());
}
- void ReplSource::applyCommand(OperationContext* txn, const BSONObj& op) {
- try {
- Status status = applyCommand_inlock(txn, op);
- if (!status.isOK()) {
- SyncTail sync(nullptr, SyncTail::MultiSyncApplyFunc());
- sync.setHostname(hostName);
- if (sync.shouldRetry(txn, op)) {
- uassert(28639,
- "Failure retrying initial sync update",
- applyCommand_inlock(txn, op).isOK());
- }
+ massert(14034,
+ "Duplicate database names present after attempting to delete duplicates",
+ Database::duplicateUncasedName(db).empty());
+ return true;
+}
+
+void ReplSource::applyCommand(OperationContext* txn, const BSONObj& op) {
+ try {
+ Status status = applyCommand_inlock(txn, op);
+ if (!status.isOK()) {
+ SyncTail sync(nullptr, SyncTail::MultiSyncApplyFunc());
+ sync.setHostname(hostName);
+ if (sync.shouldRetry(txn, op)) {
+ uassert(28639,
+ "Failure retrying initial sync update",
+ applyCommand_inlock(txn, op).isOK());
}
}
- catch ( UserException& e ) {
- log() << "sync: caught user assertion " << e << " while applying op: " << op << endl;;
- }
- catch ( DBException& e ) {
- log() << "sync: caught db exception " << e << " while applying op: " << op << endl;;
- }
-
+ } catch (UserException& e) {
+ log() << "sync: caught user assertion " << e << " while applying op: " << op << endl;
+ ;
+ } catch (DBException& e) {
+ log() << "sync: caught db exception " << e << " while applying op: " << op << endl;
+ ;
}
-
- void ReplSource::applyOperation(OperationContext* txn, Database* db, const BSONObj& op) {
- try {
- Status status = applyOperation_inlock( txn, db, op );
- if (!status.isOK()) {
- SyncTail sync(nullptr, SyncTail::MultiSyncApplyFunc());
- sync.setHostname(hostName);
- if (sync.shouldRetry(txn, op)) {
- uassert(15914,
- "Failure retrying initial sync update",
- applyOperation_inlock(txn, db, op).isOK());
- }
+}
+
+void ReplSource::applyOperation(OperationContext* txn, Database* db, const BSONObj& op) {
+ try {
+ Status status = applyOperation_inlock(txn, db, op);
+ if (!status.isOK()) {
+ SyncTail sync(nullptr, SyncTail::MultiSyncApplyFunc());
+ sync.setHostname(hostName);
+ if (sync.shouldRetry(txn, op)) {
+ uassert(15914,
+ "Failure retrying initial sync update",
+ applyOperation_inlock(txn, db, op).isOK());
}
}
- catch ( UserException& e ) {
- log() << "sync: caught user assertion " << e << " while applying op: " << op << endl;;
- }
- catch ( DBException& e ) {
- log() << "sync: caught db exception " << e << " while applying op: " << op << endl;;
- }
-
+ } catch (UserException& e) {
+ log() << "sync: caught user assertion " << e << " while applying op: " << op << endl;
+ ;
+ } catch (DBException& e) {
+ log() << "sync: caught db exception " << e << " while applying op: " << op << endl;
+ ;
}
+}
- /* local.$oplog.main is of the form:
- { ts: ..., op: <optype>, ns: ..., o: <obj> , o2: <extraobj>, b: <boolflag> }
- ...
- see logOp() comments.
+/* local.$oplog.main is of the form:
+ { ts: ..., op: <optype>, ns: ..., o: <obj> , o2: <extraobj>, b: <boolflag> }
+ ...
+ see logOp() comments.
- @param alreadyLocked caller already put us in write lock if true
- */
- void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn, BSONObj& op, bool alreadyLocked) {
- LOG(6) << "processing op: " << op << endl;
-
- if( op.getStringField("op")[0] == 'n' )
- return;
-
- char clientName[MaxDatabaseNameLen];
- const char *ns = op.getStringField("ns");
- nsToDatabase(ns, clientName);
-
- if ( *ns == '.' ) {
- log() << "skipping bad op in oplog: " << op.toString() << endl;
- return;
- }
- else if ( *ns == 0 ) {
- /*if( op.getStringField("op")[0] != 'n' )*/ {
- log() << "halting replication, bad op in oplog:\n " << op.toString() << endl;
- replAllDead = "bad object in oplog";
- throw SyncException();
- }
- //ns = "local.system.x";
- //nsToDatabase(ns, clientName);
+ @param alreadyLocked caller already put us in write lock if true
+*/
+void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn,
+ BSONObj& op,
+ bool alreadyLocked) {
+ LOG(6) << "processing op: " << op << endl;
+
+ if (op.getStringField("op")[0] == 'n')
+ return;
+
+ char clientName[MaxDatabaseNameLen];
+ const char* ns = op.getStringField("ns");
+ nsToDatabase(ns, clientName);
+
+ if (*ns == '.') {
+ log() << "skipping bad op in oplog: " << op.toString() << endl;
+ return;
+ } else if (*ns == 0) {
+ /*if( op.getStringField("op")[0] != 'n' )*/ {
+ log() << "halting replication, bad op in oplog:\n " << op.toString() << endl;
+ replAllDead = "bad object in oplog";
+ throw SyncException();
}
+ // ns = "local.system.x";
+ // nsToDatabase(ns, clientName);
+ }
- if ( !only.empty() && only != clientName )
- return;
-
- // Push the CurOp stack for "txn" so each individual oplog entry application is separately
- // reported.
- CurOp individualOp(txn);
- txn->setReplicatedWrites(false);
- const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
- if (replSettings.pretouch &&
- !alreadyLocked/*doesn't make sense if in write lock already*/) {
- if (replSettings.pretouch > 1) {
- /* note: this is bad - should be put in ReplSource. but this is first test... */
- static int countdown;
- verify( countdown >= 0 );
- if( countdown > 0 ) {
- countdown--; // was pretouched on a prev pass
+ if (!only.empty() && only != clientName)
+ return;
+
+ // Push the CurOp stack for "txn" so each individual oplog entry application is separately
+ // reported.
+ CurOp individualOp(txn);
+ txn->setReplicatedWrites(false);
+ const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
+ if (replSettings.pretouch && !alreadyLocked /*doesn't make sense if in write lock already*/) {
+ if (replSettings.pretouch > 1) {
+ /* note: this is bad - should be put in ReplSource. but this is first test... */
+ static int countdown;
+ verify(countdown >= 0);
+ if (countdown > 0) {
+ countdown--; // was pretouched on a prev pass
+ } else {
+ const int m = 4;
+ if (tp.get() == 0) {
+ int nthr = min(8, replSettings.pretouch);
+ nthr = max(nthr, 1);
+ tp.reset(new OldThreadPool(nthr));
}
- else {
- const int m = 4;
- if( tp.get() == 0 ) {
- int nthr = min(8, replSettings.pretouch);
- nthr = max(nthr, 1);
- tp.reset( new OldThreadPool(nthr) );
- }
- vector<BSONObj> v;
- oplogReader.peek(v, replSettings.pretouch);
- unsigned a = 0;
- while( 1 ) {
- if( a >= v.size() ) break;
- unsigned b = a + m - 1; // v[a..b]
- if( b >= v.size() ) b = v.size() - 1;
- tp->schedule(pretouchN, v, a, b);
- DEV cout << "pretouch task: " << a << ".." << b << endl;
- a += m;
- }
- // we do one too...
- pretouchOperation(txn, op);
- tp->join();
- countdown = v.size();
+ vector<BSONObj> v;
+ oplogReader.peek(v, replSettings.pretouch);
+ unsigned a = 0;
+ while (1) {
+ if (a >= v.size())
+ break;
+ unsigned b = a + m - 1; // v[a..b]
+ if (b >= v.size())
+ b = v.size() - 1;
+ tp->schedule(pretouchN, v, a, b);
+ DEV cout << "pretouch task: " << a << ".." << b << endl;
+ a += m;
}
- }
- else {
+ // we do one too...
pretouchOperation(txn, op);
+ tp->join();
+ countdown = v.size();
}
+ } else {
+ pretouchOperation(txn, op);
}
+ }
- unique_ptr<Lock::GlobalWrite> lk(alreadyLocked ? 0 : new Lock::GlobalWrite(txn->lockState()));
+ unique_ptr<Lock::GlobalWrite> lk(alreadyLocked ? 0 : new Lock::GlobalWrite(txn->lockState()));
- if ( replAllDead ) {
- // hmmm why is this check here and not at top of this function? does it get set between top and here?
- log() << "replAllDead, throwing SyncException: " << replAllDead << endl;
- throw SyncException();
- }
+ if (replAllDead) {
+ // hmmm why is this check here and not at top of this function? does it get set between top and here?
+ log() << "replAllDead, throwing SyncException: " << replAllDead << endl;
+ throw SyncException();
+ }
- if (!handleDuplicateDbName(txn, op, ns, clientName)) {
- return;
- }
+ if (!handleDuplicateDbName(txn, op, ns, clientName)) {
+ return;
+ }
- // special case apply for commands to avoid implicit database creation
- if (*op.getStringField("op") == 'c') {
- applyCommand(txn, op);
- return;
- }
+ // special case apply for commands to avoid implicit database creation
+ if (*op.getStringField("op") == 'c') {
+ applyCommand(txn, op);
+ return;
+ }
- // This code executes on the slaves only, so it doesn't need to be sharding-aware since
- // mongos will not send requests there. That's why the last argument is false (do not do
- // version checking).
- OldClientContext ctx(txn, ns, false);
-
- bool empty = !ctx.db()->getDatabaseCatalogEntry()->hasUserData();
- bool incompleteClone = incompleteCloneDbs.count( clientName ) != 0;
-
- LOG(6) << "ns: " << ns << ", justCreated: " << ctx.justCreated() << ", empty: " << empty << ", incompleteClone: " << incompleteClone << endl;
-
- if ( ctx.justCreated() || empty || incompleteClone ) {
- // we must add to incomplete list now that setClient has been called
- incompleteCloneDbs.insert( clientName );
- if ( nClonedThisPass ) {
- /* we only clone one database per pass, even if a lot need done. This helps us
- avoid overflowing the master's transaction log by doing too much work before going
- back to read more transactions. (Imagine a scenario of slave startup where we try to
- clone 100 databases in one pass.)
- */
- addDbNextPass.insert( clientName );
- }
- else {
- if ( incompleteClone ) {
- log() << "An earlier initial clone of '" << clientName << "' did not complete, now resyncing." << endl;
- }
- save(txn);
- OldClientContext ctx(txn, ns);
- nClonedThisPass++;
- resync(txn, ctx.db()->name());
- addDbNextPass.erase(clientName);
- incompleteCloneDbs.erase( clientName );
+ // This code executes on the slaves only, so it doesn't need to be sharding-aware since
+ // mongos will not send requests there. That's why the last argument is false (do not do
+ // version checking).
+ OldClientContext ctx(txn, ns, false);
+
+ bool empty = !ctx.db()->getDatabaseCatalogEntry()->hasUserData();
+ bool incompleteClone = incompleteCloneDbs.count(clientName) != 0;
+
+ LOG(6) << "ns: " << ns << ", justCreated: " << ctx.justCreated() << ", empty: " << empty
+ << ", incompleteClone: " << incompleteClone << endl;
+
+ if (ctx.justCreated() || empty || incompleteClone) {
+ // we must add to incomplete list now that setClient has been called
+ incompleteCloneDbs.insert(clientName);
+ if (nClonedThisPass) {
+ /* we only clone one database per pass, even if a lot need done. This helps us
+ avoid overflowing the master's transaction log by doing too much work before going
+ back to read more transactions. (Imagine a scenario of slave startup where we try to
+ clone 100 databases in one pass.)
+ */
+ addDbNextPass.insert(clientName);
+ } else {
+ if (incompleteClone) {
+ log() << "An earlier initial clone of '" << clientName
+ << "' did not complete, now resyncing." << endl;
}
save(txn);
+ OldClientContext ctx(txn, ns);
+ nClonedThisPass++;
+ resync(txn, ctx.db()->name());
+ addDbNextPass.erase(clientName);
+ incompleteCloneDbs.erase(clientName);
}
- else {
- applyOperation(txn, ctx.db(), op);
- addDbNextPass.erase( clientName );
- }
+ save(txn);
+ } else {
+ applyOperation(txn, ctx.db(), op);
+ addDbNextPass.erase(clientName);
}
+}
- void ReplSource::syncToTailOfRemoteLog() {
- string _ns = ns();
- BSONObjBuilder b;
- if ( !only.empty() ) {
- b.appendRegex("ns", string("^") + pcrecpp::RE::QuoteMeta( only ));
- }
- BSONObj last = oplogReader.findOne( _ns.c_str(), Query( b.done() ).sort( BSON( "$natural" << -1 ) ) );
- if ( !last.isEmpty() ) {
- BSONElement ts = last.getField( "ts" );
- massert(10386, "non Date ts found: " + last.toString(),
- ts.type() == Date || ts.type() == bsonTimestamp);
- syncedTo = Timestamp( ts.date() );
- }
+void ReplSource::syncToTailOfRemoteLog() {
+ string _ns = ns();
+ BSONObjBuilder b;
+ if (!only.empty()) {
+ b.appendRegex("ns", string("^") + pcrecpp::RE::QuoteMeta(only));
+ }
+ BSONObj last = oplogReader.findOne(_ns.c_str(), Query(b.done()).sort(BSON("$natural" << -1)));
+ if (!last.isEmpty()) {
+ BSONElement ts = last.getField("ts");
+ massert(10386,
+ "non Date ts found: " + last.toString(),
+ ts.type() == Date || ts.type() == bsonTimestamp);
+ syncedTo = Timestamp(ts.date());
}
+}
- class ReplApplyBatchSize : public ServerParameter {
- public:
- ReplApplyBatchSize()
- : ServerParameter( ServerParameterSet::getGlobal(), "replApplyBatchSize" ),
- _value( 1 ) {
- }
+class ReplApplyBatchSize : public ServerParameter {
+public:
+ ReplApplyBatchSize()
+ : ServerParameter(ServerParameterSet::getGlobal(), "replApplyBatchSize"), _value(1) {}
- int get() const { return _value; }
+ int get() const {
+ return _value;
+ }
+
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const string& name) {
+ b.append(name, _value);
+ }
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const string& name) {
- b.append( name, _value );
+ virtual Status set(const BSONElement& newValuElement) {
+ return set(newValuElement.numberInt());
+ }
+
+ virtual Status set(int b) {
+ if (b < 1 || b > 1024) {
+ return Status(ErrorCodes::BadValue, "replApplyBatchSize has to be >= 1 and < 1024");
}
- virtual Status set( const BSONElement& newValuElement ) {
- return set( newValuElement.numberInt() );
+ const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
+ if (replSettings.slavedelay != 0 && b > 1) {
+ return Status(ErrorCodes::BadValue, "can't use a batch size > 1 with slavedelay");
+ }
+ if (!replSettings.slave) {
+ return Status(ErrorCodes::BadValue,
+ "can't set replApplyBatchSize on a non-slave machine");
}
- virtual Status set( int b ) {
- if( b < 1 || b > 1024 ) {
- return Status( ErrorCodes::BadValue,
- "replApplyBatchSize has to be >= 1 and < 1024" );
- }
+ _value = b;
+ return Status::OK();
+ }
- const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
- if ( replSettings.slavedelay != 0 && b > 1 ) {
- return Status( ErrorCodes::BadValue,
- "can't use a batch size > 1 with slavedelay" );
- }
- if ( ! replSettings.slave ) {
- return Status( ErrorCodes::BadValue,
- "can't set replApplyBatchSize on a non-slave machine" );
- }
+ virtual Status setFromString(const string& str) {
+ return set(atoi(str.c_str()));
+ }
- _value = b;
- return Status::OK();
- }
+ int _value;
- virtual Status setFromString( const string& str ) {
- return set( atoi( str.c_str() ) );
- }
+} replApplyBatchSize;
- int _value;
-
- } replApplyBatchSize;
-
- /* slave: pull some data from the master's oplog
- note: not yet in db mutex at this point.
- @return -1 error
- 0 ok, don't sleep
- 1 ok, sleep
- */
- int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
- int okResultCode = 1;
- string ns = string("local.oplog.$") + sourceName();
- LOG(2) << "sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() << '\n';
-
- bool tailing = true;
- oplogReader.tailCheck();
-
- bool initial = syncedTo.isNull();
-
- if ( !oplogReader.haveCursor() || initial ) {
- if ( initial ) {
- // Important to grab last oplog timestamp before listing databases.
- syncToTailOfRemoteLog();
- BSONObj info;
- bool ok = oplogReader.conn()->runCommand( "admin", BSON( "listDatabases" << 1 ), info );
- massert( 10389 , "Unable to get database list", ok );
- BSONObjIterator i( info.getField( "databases" ).embeddedObject() );
- while( i.moreWithEOO() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- string name = e.embeddedObject().getField( "name" ).valuestr();
- if ( !e.embeddedObject().getBoolField( "empty" ) ) {
- if ( name != "local" ) {
- if ( only.empty() || only == name ) {
- LOG( 2 ) << "adding to 'addDbNextPass': " << name << endl;
- addDbNextPass.insert( name );
- }
+/* slave: pull some data from the master's oplog
+ note: not yet in db mutex at this point.
+ @return -1 error
+ 0 ok, don't sleep
+ 1 ok, sleep
+*/
+int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
+ int okResultCode = 1;
+ string ns = string("local.oplog.$") + sourceName();
+ LOG(2) << "sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() << '\n';
+
+ bool tailing = true;
+ oplogReader.tailCheck();
+
+ bool initial = syncedTo.isNull();
+
+ if (!oplogReader.haveCursor() || initial) {
+ if (initial) {
+ // Important to grab last oplog timestamp before listing databases.
+ syncToTailOfRemoteLog();
+ BSONObj info;
+ bool ok = oplogReader.conn()->runCommand("admin", BSON("listDatabases" << 1), info);
+ massert(10389, "Unable to get database list", ok);
+ BSONObjIterator i(info.getField("databases").embeddedObject());
+ while (i.moreWithEOO()) {
+ BSONElement e = i.next();
+ if (e.eoo())
+ break;
+ string name = e.embeddedObject().getField("name").valuestr();
+ if (!e.embeddedObject().getBoolField("empty")) {
+ if (name != "local") {
+ if (only.empty() || only == name) {
+ LOG(2) << "adding to 'addDbNextPass': " << name << endl;
+ addDbNextPass.insert(name);
}
}
}
- // obviously global isn't ideal, but non-repl set is old so
- // keeping it simple
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- save(txn);
}
+ // obviously global isn't ideal, but non-repl set is old so
+ // keeping it simple
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ save(txn);
+ }
- BSONObjBuilder gte;
- gte.append("$gte", syncedTo);
- BSONObjBuilder query;
- query.append("ts", gte.done());
- if ( !only.empty() ) {
- // note we may here skip a LOT of data table scanning, a lot of work for the master.
- // maybe append "\\." here?
- query.appendRegex("ns", string("^") + pcrecpp::RE::QuoteMeta( only ));
- }
- BSONObj queryObj = query.done();
- // e.g. queryObj = { ts: { $gte: syncedTo } }
+ BSONObjBuilder gte;
+ gte.append("$gte", syncedTo);
+ BSONObjBuilder query;
+ query.append("ts", gte.done());
+ if (!only.empty()) {
+ // note we may here skip a LOT of data table scanning, a lot of work for the master.
+ // maybe append "\\." here?
+ query.appendRegex("ns", string("^") + pcrecpp::RE::QuoteMeta(only));
+ }
+ BSONObj queryObj = query.done();
+ // e.g. queryObj = { ts: { $gte: syncedTo } }
- oplogReader.tailingQuery(ns.c_str(), queryObj);
- tailing = false;
+ oplogReader.tailingQuery(ns.c_str(), queryObj);
+ tailing = false;
+ } else {
+ LOG(2) << "tailing=true\n";
+ }
+
+ if (!oplogReader.haveCursor()) {
+ log() << "dbclient::query returns null (conn closed?)" << endl;
+ oplogReader.resetConnection();
+ return -1;
+ }
+
+ // show any deferred database creates from a previous pass
+ {
+ set<string>::iterator i = addDbNextPass.begin();
+ if (i != addDbNextPass.end()) {
+ BSONObjBuilder b;
+ b.append("ns", *i + '.');
+ b.append("op", "db");
+ BSONObj op = b.done();
+ _sync_pullOpLog_applyOperation(txn, op, false);
+ }
+ }
+
+ if (!oplogReader.more()) {
+ if (tailing) {
+ LOG(2) << "tailing & no new activity\n";
+ okResultCode = 0; // don't sleep
+
+ } else {
+ log() << ns << " oplog is empty" << endl;
}
- else {
- LOG(2) << "tailing=true\n";
+ {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ save(txn);
}
+ return okResultCode;
+ }
- if( !oplogReader.haveCursor() ) {
- log() << "dbclient::query returns null (conn closed?)" << endl;
- oplogReader.resetConnection();
- return -1;
+ Timestamp nextOpTime;
+ {
+ BSONObj op = oplogReader.next();
+ BSONElement ts = op.getField("ts");
+ if (ts.type() != Date && ts.type() != bsonTimestamp) {
+ string err = op.getStringField("$err");
+ if (!err.empty()) {
+ // 13051 is "tailable cursor requested on non capped collection"
+ if (op.getIntField("code") == 13051) {
+ log() << "trying to slave off of a non-master" << '\n';
+ massert(13344, "trying to slave off of a non-master", false);
+ } else {
+ error() << "$err reading remote oplog: " + err << '\n';
+ massert(10390, "got $err reading remote oplog", false);
+ }
+ } else {
+ error() << "bad object read from remote oplog: " << op.toString() << '\n';
+ massert(10391, "bad object read from remote oplog", false);
+ }
}
- // show any deferred database creates from a previous pass
- {
- set<string>::iterator i = addDbNextPass.begin();
- if ( i != addDbNextPass.end() ) {
- BSONObjBuilder b;
- b.append("ns", *i + '.');
- b.append("op", "db");
- BSONObj op = b.done();
- _sync_pullOpLog_applyOperation(txn, op, false);
+ nextOpTime = Timestamp(ts.date());
+ LOG(2) << "first op time received: " << nextOpTime.toString() << '\n';
+ if (initial) {
+ LOG(1) << "initial run\n";
+ }
+ if (tailing) {
+ if (!(syncedTo < nextOpTime)) {
+ warning() << "ASSERTION failed : syncedTo < nextOpTime" << endl;
+ log() << "syncTo: " << syncedTo.toStringLong() << endl;
+ log() << "nextOpTime: " << nextOpTime.toStringLong() << endl;
+ verify(false);
}
+ oplogReader.putBack(op); // op will be processed in the loop below
+ nextOpTime = Timestamp(); // will reread the op below
+ } else if (nextOpTime != syncedTo) { // didn't get what we queried for - error
+ log() << "nextOpTime " << nextOpTime.toStringLong() << ' '
+ << ((nextOpTime < syncedTo) ? "<??" : ">") << " syncedTo "
+ << syncedTo.toStringLong() << '\n'
+ << "time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs()) << "sec\n"
+ << "tailing: " << tailing << '\n' << "data too stale, halting replication"
+ << endl;
+ replInfo = replAllDead = "data too stale halted replication";
+ verify(syncedTo < nextOpTime);
+ throw SyncException();
+ } else {
+ /* t == syncedTo, so the first op was applied previously or it is the first op of initial query and need not be applied. */
}
+ }
- if ( !oplogReader.more() ) {
- if ( tailing ) {
- LOG(2) << "tailing & no new activity\n";
- okResultCode = 0; // don't sleep
+ // apply operations
+ {
+ int n = 0;
+ time_t saveLast = time(0);
+ while (1) {
+ // we need "&& n" to assure we actually process at least one op to get a sync
+ // point recorded in the first place.
+ const bool moreInitialSyncsPending = !addDbNextPass.empty() && n;
- }
- else {
- log() << ns << " oplog is empty" << endl;
- }
- {
+ if (moreInitialSyncsPending || !oplogReader.more()) {
ScopedTransaction transaction(txn, MODE_X);
Lock::GlobalWrite lk(txn->lockState());
- save(txn);
- }
- return okResultCode;
- }
- Timestamp nextOpTime;
- {
- BSONObj op = oplogReader.next();
- BSONElement ts = op.getField("ts");
- if ( ts.type() != Date && ts.type() != bsonTimestamp ) {
- string err = op.getStringField("$err");
- if ( !err.empty() ) {
- // 13051 is "tailable cursor requested on non capped collection"
- if (op.getIntField("code") == 13051) {
- log() << "trying to slave off of a non-master" << '\n';
- massert( 13344 , "trying to slave off of a non-master", false );
- }
- else {
- error() << "$err reading remote oplog: " + err << '\n';
- massert( 10390 , "got $err reading remote oplog", false );
- }
+ if (tailing) {
+ okResultCode = 0; // don't sleep
}
- else {
- error() << "bad object read from remote oplog: " << op.toString() << '\n';
- massert( 10391 , "bad object read from remote oplog", false);
- }
- }
- nextOpTime = Timestamp( ts.date() );
- LOG(2) << "first op time received: " << nextOpTime.toString() << '\n';
- if ( initial ) {
- LOG(1) << "initial run\n";
- }
- if( tailing ) {
- if( !( syncedTo < nextOpTime ) ) {
- warning() << "ASSERTION failed : syncedTo < nextOpTime" << endl;
- log() << "syncTo: " << syncedTo.toStringLong() << endl;
- log() << "nextOpTime: " << nextOpTime.toStringLong() << endl;
- verify(false);
- }
- oplogReader.putBack( op ); // op will be processed in the loop below
- nextOpTime = Timestamp(); // will reread the op below
- }
- else if ( nextOpTime != syncedTo ) { // didn't get what we queried for - error
- log()
- << "nextOpTime " << nextOpTime.toStringLong() << ' '
- << ((nextOpTime < syncedTo) ? "<??" : ">")
- << " syncedTo " << syncedTo.toStringLong() << '\n'
- << "time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs())
- << "sec\n"
- << "tailing: " << tailing << '\n'
- << "data too stale, halting replication" << endl;
- replInfo = replAllDead = "data too stale halted replication";
- verify( syncedTo < nextOpTime );
- throw SyncException();
- }
- else {
- /* t == syncedTo, so the first op was applied previously or it is the first op of initial query and need not be applied. */
+ syncedTo = nextOpTime;
+ save(txn); // note how far we are synced up to now
+ nApplied = n;
+ break;
}
- }
- // apply operations
- {
- int n = 0;
- time_t saveLast = time(0);
- while ( 1 ) {
- // we need "&& n" to assure we actually process at least one op to get a sync
- // point recorded in the first place.
- const bool moreInitialSyncsPending = !addDbNextPass.empty() && n;
-
- if ( moreInitialSyncsPending || !oplogReader.more() ) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ OCCASIONALLY if (n > 0 && (n > 100000 || time(0) - saveLast > 60)) {
+ // periodically note our progress, in case we are doing a lot of work and crash
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ syncedTo = nextOpTime;
+ // can't update local log ts since there are pending operations from our peer
+ save(txn);
+ log() << "checkpoint applied " << n << " operations" << endl;
+ log() << "syncedTo: " << syncedTo.toStringLong() << endl;
+ saveLast = time(0);
+ n = 0;
+ }
- if (tailing) {
- okResultCode = 0; // don't sleep
- }
+ BSONObj op = oplogReader.next();
- syncedTo = nextOpTime;
- save(txn); // note how far we are synced up to now
- nApplied = n;
- break;
+ int b = replApplyBatchSize.get();
+ bool justOne = b == 1;
+ unique_ptr<Lock::GlobalWrite> lk(justOne ? 0 : new Lock::GlobalWrite(txn->lockState()));
+ while (1) {
+ BSONElement ts = op.getField("ts");
+ if (!(ts.type() == Date || ts.type() == bsonTimestamp)) {
+ log() << "sync error: problem querying remote oplog record" << endl;
+ log() << "op: " << op.toString() << endl;
+ log() << "halting replication" << endl;
+ replInfo = replAllDead = "sync error: no ts found querying remote oplog record";
+ throw SyncException();
}
-
- OCCASIONALLY if( n > 0 && ( n > 100000 || time(0) - saveLast > 60 ) ) {
- // periodically note our progress, in case we are doing a lot of work and crash
+ Timestamp last = nextOpTime;
+ nextOpTime = Timestamp(ts.date());
+ if (!(last < nextOpTime)) {
+ log() << "sync error: last applied optime at slave >= nextOpTime from master"
+ << endl;
+ log() << " last: " << last.toStringLong() << endl;
+ log() << " nextOpTime: " << nextOpTime.toStringLong() << endl;
+ log() << " halting replication" << endl;
+ replInfo = replAllDead = "sync error last >= nextOpTime";
+ uassert(
+ 10123,
+ "replication error last applied optime at slave >= nextOpTime from master",
+ false);
+ }
+ const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
+ if (replSettings.slavedelay &&
+ (unsigned(time(0)) < nextOpTime.getSecs() + replSettings.slavedelay)) {
+ verify(justOne);
+ oplogReader.putBack(op);
+ _sleepAdviceTime = nextOpTime.getSecs() + replSettings.slavedelay + 1;
ScopedTransaction transaction(txn, MODE_X);
Lock::GlobalWrite lk(txn->lockState());
- syncedTo = nextOpTime;
- // can't update local log ts since there are pending operations from our peer
- save(txn);
- log() << "checkpoint applied " << n << " operations" << endl;
+ if (n > 0) {
+ syncedTo = last;
+ save(txn);
+ }
+ log() << "applied " << n << " operations" << endl;
log() << "syncedTo: " << syncedTo.toStringLong() << endl;
- saveLast = time(0);
- n = 0;
+ log() << "waiting until: " << _sleepAdviceTime << " to continue" << endl;
+ return okResultCode;
}
- BSONObj op = oplogReader.next();
-
- int b = replApplyBatchSize.get();
- bool justOne = b == 1;
- unique_ptr<Lock::GlobalWrite> lk(justOne ? 0 : new Lock::GlobalWrite(txn->lockState()));
- while( 1 ) {
-
- BSONElement ts = op.getField("ts");
- if( !( ts.type() == Date || ts.type() == bsonTimestamp ) ) {
- log() << "sync error: problem querying remote oplog record" << endl;
- log() << "op: " << op.toString() << endl;
- log() << "halting replication" << endl;
- replInfo = replAllDead = "sync error: no ts found querying remote oplog record";
- throw SyncException();
- }
- Timestamp last = nextOpTime;
- nextOpTime = Timestamp( ts.date() );
- if ( !( last < nextOpTime ) ) {
- log() << "sync error: last applied optime at slave >= nextOpTime from master" << endl;
- log() << " last: " << last.toStringLong() << endl;
- log() << " nextOpTime: " << nextOpTime.toStringLong() << endl;
- log() << " halting replication" << endl;
- replInfo = replAllDead = "sync error last >= nextOpTime";
- uassert( 10123 , "replication error last applied optime at slave >= nextOpTime from master", false);
- }
- const ReplSettings& replSettings =
- getGlobalReplicationCoordinator()->getSettings();
- if ( replSettings.slavedelay && ( unsigned( time( 0 ) ) < nextOpTime.getSecs() + replSettings.slavedelay ) ) {
- verify( justOne );
- oplogReader.putBack( op );
- _sleepAdviceTime = nextOpTime.getSecs() + replSettings.slavedelay + 1;
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- if ( n > 0 ) {
- syncedTo = last;
- save(txn);
- }
- log() << "applied " << n << " operations" << endl;
- log() << "syncedTo: " << syncedTo.toStringLong() << endl;
- log() << "waiting until: " << _sleepAdviceTime << " to continue" << endl;
- return okResultCode;
- }
-
- _sync_pullOpLog_applyOperation(txn, op, !justOne);
- n++;
+ _sync_pullOpLog_applyOperation(txn, op, !justOne);
+ n++;
- if( --b == 0 )
- break;
- // if to here, we are doing mulpile applications in a singel write lock acquisition
- if( !oplogReader.moreInCurrentBatch() ) {
- // break if no more in batch so we release lock while reading from the master
- break;
- }
- op = oplogReader.next();
+ if (--b == 0)
+ break;
+ // if to here, we are doing mulpile applications in a singel write lock acquisition
+ if (!oplogReader.moreInCurrentBatch()) {
+ // break if no more in batch so we release lock while reading from the master
+ break;
}
+ op = oplogReader.next();
}
}
-
- return okResultCode;
}
+ return okResultCode;
+}
- /* note: not yet in mutex at this point.
- returns >= 0 if ok. return -1 if you want to reconnect.
- return value of zero indicates no sleep necessary before next call
- */
- int ReplSource::sync(OperationContext* txn, int& nApplied) {
- _sleepAdviceTime = 0;
- ReplInfo r("sync");
- if (!serverGlobalParams.quiet) {
- LogstreamBuilder l = log();
- l << "syncing from ";
- if( sourceName() != "main" ) {
- l << "source:" << sourceName() << ' ';
- }
- l << "host:" << hostName << endl;
- }
- nClonedThisPass = 0;
-
- // FIXME Handle cases where this db isn't on default port, or default port is spec'd in hostName.
- if ((string("localhost") == hostName || string("127.0.0.1") == hostName) &&
- serverGlobalParams.port == ServerGlobalParams::DefaultDBPort) {
- log() << "can't sync from self (localhost). sources configuration may be wrong." << endl;
- sleepsecs(5);
- return -1;
- }
- if ( !_connect(&oplogReader,
- HostAndPort(hostName),
- getGlobalReplicationCoordinator()->getMyRID()) ) {
- LOG(4) << "can't connect to sync source" << endl;
- return -1;
- }
+/* note: not yet in mutex at this point.
+ returns >= 0 if ok. return -1 if you want to reconnect.
+ return value of zero indicates no sleep necessary before next call
+*/
+int ReplSource::sync(OperationContext* txn, int& nApplied) {
+ _sleepAdviceTime = 0;
+ ReplInfo r("sync");
+ if (!serverGlobalParams.quiet) {
+ LogstreamBuilder l = log();
+ l << "syncing from ";
+ if (sourceName() != "main") {
+ l << "source:" << sourceName() << ' ';
+ }
+ l << "host:" << hostName << endl;
+ }
+ nClonedThisPass = 0;
+
+ // FIXME Handle cases where this db isn't on default port, or default port is spec'd in hostName.
+ if ((string("localhost") == hostName || string("127.0.0.1") == hostName) &&
+ serverGlobalParams.port == ServerGlobalParams::DefaultDBPort) {
+ log() << "can't sync from self (localhost). sources configuration may be wrong." << endl;
+ sleepsecs(5);
+ return -1;
+ }
- return _sync_pullOpLog(txn, nApplied);
+ if (!_connect(
+ &oplogReader, HostAndPort(hostName), getGlobalReplicationCoordinator()->getMyRID())) {
+ LOG(4) << "can't connect to sync source" << endl;
+ return -1;
}
- /* --------------------------------------------------------------*/
+ return _sync_pullOpLog(txn, nApplied);
+}
- static bool _replMainStarted = false;
+/* --------------------------------------------------------------*/
- /*
- TODO:
- _ source has autoptr to the cursor
- _ reuse that cursor when we can
- */
+static bool _replMainStarted = false;
- /* returns: # of seconds to sleep before next pass
- 0 = no sleep recommended
- 1 = special sentinel indicating adaptive sleep recommended
- */
- int _replMain(OperationContext* txn, ReplSource::SourceVector& sources, int& nApplied) {
- {
- ReplInfo r("replMain load sources");
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- ReplSource::loadAll(txn, sources);
+/*
+TODO:
+_ source has autoptr to the cursor
+_ reuse that cursor when we can
+*/
- // only need this param for initial reset
- _replMainStarted = true;
- }
+/* returns: # of seconds to sleep before next pass
+ 0 = no sleep recommended
+ 1 = special sentinel indicating adaptive sleep recommended
+*/
+int _replMain(OperationContext* txn, ReplSource::SourceVector& sources, int& nApplied) {
+ {
+ ReplInfo r("replMain load sources");
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ ReplSource::loadAll(txn, sources);
- if ( sources.empty() ) {
- /* replication is not configured yet (for --slave) in local.sources. Poll for config it
- every 20 seconds.
- */
- log() << "no source given, add a master to local.sources to start replication" << endl;
- return 20;
- }
+ // only need this param for initial reset
+ _replMainStarted = true;
+ }
- int sleepAdvice = 1;
- for ( ReplSource::SourceVector::iterator i = sources.begin(); i != sources.end(); i++ ) {
- ReplSource *s = i->get();
- int res = -1;
- try {
- res = s->sync(txn, nApplied);
- bool moreToSync = s->haveMoreDbsToSync();
- if( res < 0 ) {
- sleepAdvice = 3;
- }
- else if( moreToSync ) {
- sleepAdvice = 0;
- }
- else if ( s->sleepAdvice() ) {
- sleepAdvice = s->sleepAdvice();
- }
- else
- sleepAdvice = res;
- }
- catch ( const SyncException& ) {
- log() << "caught SyncException" << endl;
- return 10;
- }
- catch ( AssertionException& e ) {
- if ( e.severe() ) {
- log() << "replMain AssertionException " << e.what() << endl;
- return 60;
- }
- else {
- log() << "AssertionException " << e.what() << endl;
- }
- replInfo = "replMain caught AssertionException";
- }
- catch ( const DBException& e ) {
- log() << "DBException " << e.what() << endl;
- replInfo = "replMain caught DBException";
- }
- catch ( const std::exception &e ) {
- log() << "std::exception " << e.what() << endl;
- replInfo = "replMain caught std::exception";
- }
- catch ( ... ) {
- log() << "unexpected exception during replication. replication will halt" << endl;
- replAllDead = "caught unexpected exception during replication";
- }
- if ( res < 0 )
- s->oplogReader.resetConnection();
- }
- return sleepAdvice;
+ if (sources.empty()) {
+ /* replication is not configured yet (for --slave) in local.sources. Poll for config it
+ every 20 seconds.
+ */
+ log() << "no source given, add a master to local.sources to start replication" << endl;
+ return 20;
}
- static void replMain(OperationContext* txn) {
- ReplSource::SourceVector sources;
- while ( 1 ) {
- int s = 0;
- {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- if ( replAllDead ) {
- // throttledForceResyncDead can throw
- if ( !getGlobalReplicationCoordinator()->getSettings().autoresync ||
- !ReplSource::throttledForceResyncDead( txn, "auto" ) ) {
- log() << "all sources dead: " << replAllDead << ", sleeping for 5 seconds" << endl;
- break;
- }
- }
- verify( syncing == 0 ); // i.e., there is only one sync thread running. we will want to change/fix this.
- syncing++;
+ int sleepAdvice = 1;
+ for (ReplSource::SourceVector::iterator i = sources.begin(); i != sources.end(); i++) {
+ ReplSource* s = i->get();
+ int res = -1;
+ try {
+ res = s->sync(txn, nApplied);
+ bool moreToSync = s->haveMoreDbsToSync();
+ if (res < 0) {
+ sleepAdvice = 3;
+ } else if (moreToSync) {
+ sleepAdvice = 0;
+ } else if (s->sleepAdvice()) {
+ sleepAdvice = s->sleepAdvice();
+ } else
+ sleepAdvice = res;
+ } catch (const SyncException&) {
+ log() << "caught SyncException" << endl;
+ return 10;
+ } catch (AssertionException& e) {
+ if (e.severe()) {
+ log() << "replMain AssertionException " << e.what() << endl;
+ return 60;
+ } else {
+ log() << "AssertionException " << e.what() << endl;
}
+ replInfo = "replMain caught AssertionException";
+ } catch (const DBException& e) {
+ log() << "DBException " << e.what() << endl;
+ replInfo = "replMain caught DBException";
+ } catch (const std::exception& e) {
+ log() << "std::exception " << e.what() << endl;
+ replInfo = "replMain caught std::exception";
+ } catch (...) {
+ log() << "unexpected exception during replication. replication will halt" << endl;
+ replAllDead = "caught unexpected exception during replication";
+ }
+ if (res < 0)
+ s->oplogReader.resetConnection();
+ }
+ return sleepAdvice;
+}
- try {
- int nApplied = 0;
- s = _replMain(txn, sources, nApplied);
- if( s == 1 ) {
- if( nApplied == 0 ) s = 2;
- else if( nApplied > 100 ) {
- // sleep very little - just enough that we aren't truly hammering master
- sleepmillis(75);
- s = 0;
- }
+static void replMain(OperationContext* txn) {
+ ReplSource::SourceVector sources;
+ while (1) {
+ int s = 0;
+ {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ if (replAllDead) {
+ // throttledForceResyncDead can throw
+ if (!getGlobalReplicationCoordinator()->getSettings().autoresync ||
+ !ReplSource::throttledForceResyncDead(txn, "auto")) {
+ log() << "all sources dead: " << replAllDead << ", sleeping for 5 seconds"
+ << endl;
+ break;
}
}
- catch (...) {
- log() << "caught exception in _replMain" << endl;
- s = 4;
- }
-
- {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- verify( syncing == 1 );
- syncing--;
- }
-
- if( relinquishSyncingSome ) {
- relinquishSyncingSome = 0;
- s = 1; // sleep before going back in to syncing=1
- }
+ verify(
+ syncing ==
+ 0); // i.e., there is only one sync thread running. we will want to change/fix this.
+ syncing++;
+ }
- if ( s ) {
- stringstream ss;
- ss << "sleep " << s << " sec before next pass";
- string msg = ss.str();
- if (!serverGlobalParams.quiet)
- log() << msg << endl;
- ReplInfo r(msg.c_str());
- sleepsecs(s);
+ try {
+ int nApplied = 0;
+ s = _replMain(txn, sources, nApplied);
+ if (s == 1) {
+ if (nApplied == 0)
+ s = 2;
+ else if (nApplied > 100) {
+ // sleep very little - just enough that we aren't truly hammering master
+ sleepmillis(75);
+ s = 0;
+ }
}
+ } catch (...) {
+ log() << "caught exception in _replMain" << endl;
+ s = 4;
}
- }
-
- static void replMasterThread() {
- sleepsecs(4);
- Client::initThread("replmaster");
- int toSleep = 10;
- while( 1 ) {
- sleepsecs(toSleep);
- // Write a keep-alive like entry to the log. This will make things like
- // printReplicationStatus() and printSlaveReplicationStatus() stay up-to-date even
- // when things are idle.
- OperationContextImpl txn;
- AuthorizationSession::get(txn.getClient())->grantInternalAuthorization();
+ {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ verify(syncing == 1);
+ syncing--;
+ }
- Lock::GlobalWrite globalWrite(txn.lockState(), 1);
- if (globalWrite.isLocked()) {
- toSleep = 10;
+ if (relinquishSyncingSome) {
+ relinquishSyncingSome = 0;
+ s = 1; // sleep before going back in to syncing=1
+ }
- try {
- WriteUnitOfWork wuow(&txn);
- getGlobalServiceContext()->getOpObserver()->onOpMessage(&txn, BSONObj());
- wuow.commit();
- }
- catch (...) {
- log() << "caught exception in replMasterThread()" << endl;
- }
- }
- else {
- LOG(5) << "couldn't logKeepalive" << endl;
- toSleep = 1;
- }
+ if (s) {
+ stringstream ss;
+ ss << "sleep " << s << " sec before next pass";
+ string msg = ss.str();
+ if (!serverGlobalParams.quiet)
+ log() << msg << endl;
+ ReplInfo r(msg.c_str());
+ sleepsecs(s);
}
}
-
- static void replSlaveThread() {
- sleepsecs(1);
- Client::initThread("replslave");
-
+}
+
+static void replMasterThread() {
+ sleepsecs(4);
+ Client::initThread("replmaster");
+ int toSleep = 10;
+ while (1) {
+ sleepsecs(toSleep);
+
+ // Write a keep-alive like entry to the log. This will make things like
+ // printReplicationStatus() and printSlaveReplicationStatus() stay up-to-date even
+ // when things are idle.
OperationContextImpl txn;
AuthorizationSession::get(txn.getClient())->grantInternalAuthorization();
- DisableDocumentValidation validationDisabler(&txn);
- while ( 1 ) {
+ Lock::GlobalWrite globalWrite(txn.lockState(), 1);
+ if (globalWrite.isLocked()) {
+ toSleep = 10;
+
try {
- replMain(&txn);
- sleepsecs(5);
- }
- catch ( AssertionException& ) {
- ReplInfo r("Assertion in replSlaveThread(): sleeping 5 minutes before retry");
- log() << "Assertion in replSlaveThread(): sleeping 5 minutes before retry" << endl;
- sleepsecs(300);
- }
- catch ( DBException& e ) {
- log() << "exception in replSlaveThread(): " << e.what()
- << ", sleeping 5 minutes before retry" << endl;
- sleepsecs(300);
- }
- catch ( ... ) {
- log() << "error in replSlaveThread(): sleeping 5 minutes before retry" << endl;
- sleepsecs(300);
+ WriteUnitOfWork wuow(&txn);
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(&txn, BSONObj());
+ wuow.commit();
+ } catch (...) {
+ log() << "caught exception in replMasterThread()" << endl;
}
+ } else {
+ LOG(5) << "couldn't logKeepalive" << endl;
+ toSleep = 1;
}
}
+}
- void startMasterSlave(OperationContext* txn) {
-
- const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
- if( !replSettings.slave && !replSettings.master )
- return;
+static void replSlaveThread() {
+ sleepsecs(1);
+ Client::initThread("replslave");
- AuthorizationSession::get(txn->getClient())->grantInternalAuthorization();
+ OperationContextImpl txn;
+ AuthorizationSession::get(txn.getClient())->grantInternalAuthorization();
+ DisableDocumentValidation validationDisabler(&txn);
- {
- ReplSource temp(txn); // Ensures local.me is populated
+ while (1) {
+ try {
+ replMain(&txn);
+ sleepsecs(5);
+ } catch (AssertionException&) {
+ ReplInfo r("Assertion in replSlaveThread(): sleeping 5 minutes before retry");
+ log() << "Assertion in replSlaveThread(): sleeping 5 minutes before retry" << endl;
+ sleepsecs(300);
+ } catch (DBException& e) {
+ log() << "exception in replSlaveThread(): " << e.what()
+ << ", sleeping 5 minutes before retry" << endl;
+ sleepsecs(300);
+ } catch (...) {
+ log() << "error in replSlaveThread(): sleeping 5 minutes before retry" << endl;
+ sleepsecs(300);
}
+ }
+}
- if ( replSettings.slave ) {
- verify( replSettings.slave == SimpleSlave );
- LOG(1) << "slave=true" << endl;
- stdx::thread repl_thread(replSlaveThread);
- }
+void startMasterSlave(OperationContext* txn) {
+ const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
+ if (!replSettings.slave && !replSettings.master)
+ return;
- if ( replSettings.master ) {
- LOG(1) << "master=true" << endl;
- createOplog(txn);
- stdx::thread t(replMasterThread);
- }
+ AuthorizationSession::get(txn->getClient())->grantInternalAuthorization();
- if (replSettings.fastsync) {
- while(!_replMainStarted) // don't allow writes until we've set up from log
- sleepmillis( 50 );
- }
+ {
+ ReplSource temp(txn); // Ensures local.me is populated
}
- int _dummy_z;
-
- void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
- Client::initThreadIfNotAlready("pretouchN");
- OperationContextImpl txn; // XXX
- ScopedTransaction transaction(&txn, MODE_S);
- Lock::GlobalRead lk(txn.lockState());
+ if (replSettings.slave) {
+ verify(replSettings.slave == SimpleSlave);
+ LOG(1) << "slave=true" << endl;
+ stdx::thread repl_thread(replSlaveThread);
+ }
- for( unsigned i = a; i <= b; i++ ) {
- const BSONObj& op = v[i];
- const char *which = "o";
- const char *opType = op.getStringField("op");
- if ( *opType == 'i' )
- ;
- else if( *opType == 'u' )
- which = "o2";
- else
- continue;
- /* todo : other operations */
+ if (replSettings.master) {
+ LOG(1) << "master=true" << endl;
+ createOplog(txn);
+ stdx::thread t(replMasterThread);
+ }
- try {
- BSONObj o = op.getObjectField(which);
- BSONElement _id;
- if( o.getObjectID(_id) ) {
- const char *ns = op.getStringField("ns");
- BSONObjBuilder b;
- b.append(_id);
- BSONObj result;
- OldClientContext ctx(&txn, ns);
- if( Helpers::findById(&txn, ctx.db(), ns, b.done(), result) )
- _dummy_z += result.objsize(); // touch
- }
- }
- catch( DBException& e ) {
- log() << "ignoring assertion in pretouchN() " << a << ' ' << b << ' ' << i << ' ' << e.toString() << endl;
- }
- }
+ if (replSettings.fastsync) {
+ while (!_replMainStarted) // don't allow writes until we've set up from log
+ sleepmillis(50);
}
+}
+int _dummy_z;
- void pretouchOperation(OperationContext* txn, const BSONObj& op) {
+void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
+ Client::initThreadIfNotAlready("pretouchN");
- if (txn->lockState()->isWriteLocked()) {
- return; // no point pretouching if write locked. not sure if this will ever fire, but just in case.
- }
+ OperationContextImpl txn; // XXX
+ ScopedTransaction transaction(&txn, MODE_S);
+ Lock::GlobalRead lk(txn.lockState());
- const char *which = "o";
- const char *opType = op.getStringField("op");
- if ( *opType == 'i' )
+ for (unsigned i = a; i <= b; i++) {
+ const BSONObj& op = v[i];
+ const char* which = "o";
+ const char* opType = op.getStringField("op");
+ if (*opType == 'i')
;
- else if( *opType == 'u' )
+ else if (*opType == 'u')
which = "o2";
else
- return;
+ continue;
/* todo : other operations */
try {
BSONObj o = op.getObjectField(which);
BSONElement _id;
- if( o.getObjectID(_id) ) {
- const char *ns = op.getStringField("ns");
+ if (o.getObjectID(_id)) {
+ const char* ns = op.getStringField("ns");
BSONObjBuilder b;
b.append(_id);
BSONObj result;
- AutoGetCollectionForRead ctx(txn, ns );
- if (Helpers::findById(txn, ctx.getDb(), ns, b.done(), result)) {
- _dummy_z += result.objsize(); // touch
- }
+ OldClientContext ctx(&txn, ns);
+ if (Helpers::findById(&txn, ctx.db(), ns, b.done(), result))
+ _dummy_z += result.objsize(); // touch
}
+ } catch (DBException& e) {
+ log() << "ignoring assertion in pretouchN() " << a << ' ' << b << ' ' << i << ' '
+ << e.toString() << endl;
}
- catch( DBException& ) {
- log() << "ignoring assertion in pretouchOperation()" << endl;
+ }
+}
+
+void pretouchOperation(OperationContext* txn, const BSONObj& op) {
+ if (txn->lockState()->isWriteLocked()) {
+ return; // no point pretouching if write locked. not sure if this will ever fire, but just in case.
+ }
+
+ const char* which = "o";
+ const char* opType = op.getStringField("op");
+ if (*opType == 'i')
+ ;
+ else if (*opType == 'u')
+ which = "o2";
+ else
+ return;
+ /* todo : other operations */
+
+ try {
+ BSONObj o = op.getObjectField(which);
+ BSONElement _id;
+ if (o.getObjectID(_id)) {
+ const char* ns = op.getStringField("ns");
+ BSONObjBuilder b;
+ b.append(_id);
+ BSONObj result;
+ AutoGetCollectionForRead ctx(txn, ns);
+ if (Helpers::findById(txn, ctx.getDb(), ns, b.done(), result)) {
+ _dummy_z += result.objsize(); // touch
+ }
}
+ } catch (DBException&) {
+ log() << "ignoring assertion in pretouchOperation()" << endl;
}
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/master_slave.h b/src/mongo/db/repl/master_slave.h
index 89a73ddfb90..d290be23de3 100644
--- a/src/mongo/db/repl/master_slave.h
+++ b/src/mongo/db/repl/master_slave.h
@@ -42,152 +42,163 @@
namespace mongo {
- class Database;
- class OldThreadPool;
- class OperationContext;
+class Database;
+class OldThreadPool;
+class OperationContext;
namespace repl {
- // Main entry point for master/slave at startup time.
- void startMasterSlave(OperationContext* txn);
+// Main entry point for master/slave at startup time.
+void startMasterSlave(OperationContext* txn);
- // externed for use with resync.cpp
- extern volatile int relinquishSyncingSome;
- extern volatile int syncing;
+// externed for use with resync.cpp
+extern volatile int relinquishSyncingSome;
+extern volatile int syncing;
- extern const char *replInfo;
+extern const char* replInfo;
- /* A replication exception */
- class SyncException : public DBException {
- public:
- SyncException() : DBException( "sync exception" , 10001 ) {}
- };
+/* A replication exception */
+class SyncException : public DBException {
+public:
+ SyncException() : DBException("sync exception", 10001) {}
+};
- /* A Source is a source from which we can pull (replicate) data.
- stored in collection local.sources.
+/* A Source is a source from which we can pull (replicate) data.
+ stored in collection local.sources.
- Can be a group of things to replicate for several databases.
+ Can be a group of things to replicate for several databases.
- { host: ..., source: ..., only: ..., syncedTo: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
+ { host: ..., source: ..., only: ..., syncedTo: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
- 'source' defaults to 'main'; support for multiple source names is
- not done (always use main for now).
+ 'source' defaults to 'main'; support for multiple source names is
+ not done (always use main for now).
+*/
+class ReplSource {
+ std::shared_ptr<OldThreadPool> tp;
+
+ void resync(OperationContext* txn, const std::string& dbName);
+
+ /** @param alreadyLocked caller already put us in write lock if true */
+ void _sync_pullOpLog_applyOperation(OperationContext* txn, BSONObj& op, bool alreadyLocked);
+
+ /* pull some operations from the master's oplog, and apply them.
+ calls sync_pullOpLog_applyOperation
*/
- class ReplSource {
- std::shared_ptr<OldThreadPool> tp;
-
- void resync(OperationContext* txn, const std::string& dbName);
-
- /** @param alreadyLocked caller already put us in write lock if true */
- void _sync_pullOpLog_applyOperation(OperationContext* txn, BSONObj& op, bool alreadyLocked);
-
- /* pull some operations from the master's oplog, and apply them.
- calls sync_pullOpLog_applyOperation
- */
- int _sync_pullOpLog(OperationContext* txn, int& nApplied);
-
- /* we only clone one database per pass, even if a lot need done. This helps us
- avoid overflowing the master's transaction log by doing too much work before going
- back to read more transactions. (Imagine a scenario of slave startup where we try to
- clone 100 databases in one pass.)
- */
- std::set<std::string> addDbNextPass;
-
- std::set<std::string> incompleteCloneDbs;
-
- /// TODO(spencer): Remove this once the LegacyReplicationCoordinator is gone.
- BSONObj _me;
-
- void resyncDrop( OperationContext* txn, const std::string& db );
- // call without the db mutex
- void syncToTailOfRemoteLog();
- std::string ns() const { return std::string( "local.oplog.$" ) + sourceName(); }
- unsigned _sleepAdviceTime;
-
- /**
- * If 'db' is a new database and its name would conflict with that of
- * an existing database, synchronize these database names with the
- * master.
- * @return true iff an op with the specified ns may be applied.
- */
- bool handleDuplicateDbName( OperationContext* txn,
- const BSONObj &op,
- const char* ns,
- const char* db );
-
- // populates _me so that it can be passed to oplogreader for handshakes
- /// TODO(spencer): Remove this function once the LegacyReplicationCoordinator is gone.
- void ensureMe(OperationContext* txn);
-
- void forceResync(OperationContext* txn, const char *requester);
-
- bool _connect(OplogReader* reader, const HostAndPort& host, const OID& myRID);
- public:
- OplogReader oplogReader;
-
- void applyCommand(OperationContext* txn, const BSONObj& op);
- void applyOperation(OperationContext* txn, Database* db, const BSONObj& op);
- std::string hostName; // ip addr or hostname plus optionally, ":<port>"
- std::string _sourceName; // a logical source name.
- std::string sourceName() const { return _sourceName.empty() ? "main" : _sourceName; }
- std::string only; // only a certain db. note that in the sources collection, this may not be changed once you start replicating.
-
- /* the last time point we have already synced up to (in the remote/master's oplog). */
- Timestamp syncedTo;
-
- int nClonedThisPass;
-
- typedef std::vector< std::shared_ptr< ReplSource > > SourceVector;
- static void loadAll(OperationContext* txn, SourceVector&);
-
- explicit ReplSource(OperationContext* txn, BSONObj);
- // This is not the constructor you are looking for. Always prefer the version that takes
- // a BSONObj. This is public only as a hack so that the ReplicationCoordinator can find
- // out the process's RID in master/slave setups.
- ReplSource(OperationContext* txn);
-
- /* -1 = error */
- int sync(OperationContext* txn, int& nApplied);
-
- void save(OperationContext* txn); // write ourself to local.sources
-
- // make a jsobj from our member fields of the form
- // { host: ..., source: ..., syncedTo: ... }
- BSONObj jsobj();
-
- bool operator==(const ReplSource&r) const {
- return hostName == r.hostName && sourceName() == r.sourceName();
- }
- std::string toString() const { return sourceName() + "@" + hostName; }
-
- bool haveMoreDbsToSync() const { return !addDbNextPass.empty(); }
- int sleepAdvice() const {
- if ( !_sleepAdviceTime )
- return 0;
- int wait = _sleepAdviceTime - unsigned( time( 0 ) );
- return wait > 0 ? wait : 0;
- }
-
- static bool throttledForceResyncDead( OperationContext* txn, const char *requester );
- static void forceResyncDead( OperationContext* txn, const char *requester );
- };
+ int _sync_pullOpLog(OperationContext* txn, int& nApplied);
+
+ /* we only clone one database per pass, even if a lot need done. This helps us
+ avoid overflowing the master's transaction log by doing too much work before going
+ back to read more transactions. (Imagine a scenario of slave startup where we try to
+ clone 100 databases in one pass.)
+ */
+ std::set<std::string> addDbNextPass;
+
+ std::set<std::string> incompleteCloneDbs;
+
+ /// TODO(spencer): Remove this once the LegacyReplicationCoordinator is gone.
+ BSONObj _me;
+
+ void resyncDrop(OperationContext* txn, const std::string& db);
+ // call without the db mutex
+ void syncToTailOfRemoteLog();
+ std::string ns() const {
+ return std::string("local.oplog.$") + sourceName();
+ }
+ unsigned _sleepAdviceTime;
/**
- * Helper class used to set and query an ignore state for a named database.
- * The ignore state will expire after a specified Timestamp.
+ * If 'db' is a new database and its name would conflict with that of
+ * an existing database, synchronize these database names with the
+ * master.
+ * @return true iff an op with the specified ns may be applied.
*/
- class DatabaseIgnorer {
- public:
- /** Indicate that operations for 'db' should be ignored until after 'futureOplogTime' */
- void doIgnoreUntilAfter( const std::string &db, const Timestamp &futureOplogTime );
- /**
- * Query ignore state of 'db'; if 'currentOplogTime' is after the ignore
- * limit, the ignore state will be cleared.
- */
- bool ignoreAt( const std::string &db, const Timestamp &currentOplogTime );
- private:
- std::map< std::string, Timestamp > _ignores;
- };
-
-} // namespace repl
-} // namespace mongo
+ bool handleDuplicateDbName(OperationContext* txn,
+ const BSONObj& op,
+ const char* ns,
+ const char* db);
+
+ // populates _me so that it can be passed to oplogreader for handshakes
+ /// TODO(spencer): Remove this function once the LegacyReplicationCoordinator is gone.
+ void ensureMe(OperationContext* txn);
+
+ void forceResync(OperationContext* txn, const char* requester);
+
+ bool _connect(OplogReader* reader, const HostAndPort& host, const OID& myRID);
+
+public:
+ OplogReader oplogReader;
+
+ void applyCommand(OperationContext* txn, const BSONObj& op);
+ void applyOperation(OperationContext* txn, Database* db, const BSONObj& op);
+ std::string hostName; // ip addr or hostname plus optionally, ":<port>"
+ std::string _sourceName; // a logical source name.
+ std::string sourceName() const {
+ return _sourceName.empty() ? "main" : _sourceName;
+ }
+ std::string
+ only; // only a certain db. note that in the sources collection, this may not be changed once you start replicating.
+
+ /* the last time point we have already synced up to (in the remote/master's oplog). */
+ Timestamp syncedTo;
+
+ int nClonedThisPass;
+
+ typedef std::vector<std::shared_ptr<ReplSource>> SourceVector;
+ static void loadAll(OperationContext* txn, SourceVector&);
+
+ explicit ReplSource(OperationContext* txn, BSONObj);
+ // This is not the constructor you are looking for. Always prefer the version that takes
+ // a BSONObj. This is public only as a hack so that the ReplicationCoordinator can find
+ // out the process's RID in master/slave setups.
+ ReplSource(OperationContext* txn);
+
+ /* -1 = error */
+ int sync(OperationContext* txn, int& nApplied);
+
+ void save(OperationContext* txn); // write ourself to local.sources
+
+ // make a jsobj from our member fields of the form
+ // { host: ..., source: ..., syncedTo: ... }
+ BSONObj jsobj();
+
+ bool operator==(const ReplSource& r) const {
+ return hostName == r.hostName && sourceName() == r.sourceName();
+ }
+ std::string toString() const {
+ return sourceName() + "@" + hostName;
+ }
+
+ bool haveMoreDbsToSync() const {
+ return !addDbNextPass.empty();
+ }
+ int sleepAdvice() const {
+ if (!_sleepAdviceTime)
+ return 0;
+ int wait = _sleepAdviceTime - unsigned(time(0));
+ return wait > 0 ? wait : 0;
+ }
+
+ static bool throttledForceResyncDead(OperationContext* txn, const char* requester);
+ static void forceResyncDead(OperationContext* txn, const char* requester);
+};
+
+/**
+ * Helper class used to set and query an ignore state for a named database.
+ * The ignore state will expire after a specified Timestamp.
+ */
+class DatabaseIgnorer {
+public:
+ /** Indicate that operations for 'db' should be ignored until after 'futureOplogTime' */
+ void doIgnoreUntilAfter(const std::string& db, const Timestamp& futureOplogTime);
+ /**
+ * Query ignore state of 'db'; if 'currentOplogTime' is after the ignore
+ * limit, the ignore state will be cleared.
+ */
+ bool ignoreAt(const std::string& db, const Timestamp& currentOplogTime);
+
+private:
+ std::map<std::string, Timestamp> _ignores;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/member_config.cpp b/src/mongo/db/repl/member_config.cpp
index 02711adedd3..a6f8e311928 100644
--- a/src/mongo/db/repl/member_config.cpp
+++ b/src/mongo/db/repl/member_config.cpp
@@ -40,280 +40,268 @@
namespace mongo {
namespace repl {
- const std::string MemberConfig::kIdFieldName = "_id";
- const std::string MemberConfig::kVotesFieldName = "votes";
- const std::string MemberConfig::kPriorityFieldName = "priority";
- const std::string MemberConfig::kHostFieldName = "host";
- const std::string MemberConfig::kHiddenFieldName = "hidden";
- const std::string MemberConfig::kSlaveDelayFieldName = "slaveDelay";
- const std::string MemberConfig::kArbiterOnlyFieldName = "arbiterOnly";
- const std::string MemberConfig::kBuildIndexesFieldName = "buildIndexes";
- const std::string MemberConfig::kTagsFieldName = "tags";
- const std::string MemberConfig::kInternalVoterTagName = "$voter";
- const std::string MemberConfig::kInternalElectableTagName = "$electable";
- const std::string MemberConfig::kInternalAllTagName = "$all";
+const std::string MemberConfig::kIdFieldName = "_id";
+const std::string MemberConfig::kVotesFieldName = "votes";
+const std::string MemberConfig::kPriorityFieldName = "priority";
+const std::string MemberConfig::kHostFieldName = "host";
+const std::string MemberConfig::kHiddenFieldName = "hidden";
+const std::string MemberConfig::kSlaveDelayFieldName = "slaveDelay";
+const std::string MemberConfig::kArbiterOnlyFieldName = "arbiterOnly";
+const std::string MemberConfig::kBuildIndexesFieldName = "buildIndexes";
+const std::string MemberConfig::kTagsFieldName = "tags";
+const std::string MemberConfig::kInternalVoterTagName = "$voter";
+const std::string MemberConfig::kInternalElectableTagName = "$electable";
+const std::string MemberConfig::kInternalAllTagName = "$all";
namespace {
- const std::string kLegalMemberConfigFieldNames[] = {
- MemberConfig::kIdFieldName,
- MemberConfig::kVotesFieldName,
- MemberConfig::kPriorityFieldName,
- MemberConfig::kHostFieldName,
- MemberConfig::kHiddenFieldName,
- MemberConfig::kSlaveDelayFieldName,
- MemberConfig::kArbiterOnlyFieldName,
- MemberConfig::kBuildIndexesFieldName,
- MemberConfig::kTagsFieldName
- };
-
- const int kVotesFieldDefault = 1;
- const double kPriorityFieldDefault = 1.0;
- const Seconds kSlaveDelayFieldDefault(0);
- const bool kArbiterOnlyFieldDefault = false;
- const bool kHiddenFieldDefault = false;
- const bool kBuildIndexesFieldDefault = true;
-
- const Seconds kMaxSlaveDelay(3600 * 24 * 366);
+const std::string kLegalMemberConfigFieldNames[] = {MemberConfig::kIdFieldName,
+ MemberConfig::kVotesFieldName,
+ MemberConfig::kPriorityFieldName,
+ MemberConfig::kHostFieldName,
+ MemberConfig::kHiddenFieldName,
+ MemberConfig::kSlaveDelayFieldName,
+ MemberConfig::kArbiterOnlyFieldName,
+ MemberConfig::kBuildIndexesFieldName,
+ MemberConfig::kTagsFieldName};
+
+const int kVotesFieldDefault = 1;
+const double kPriorityFieldDefault = 1.0;
+const Seconds kSlaveDelayFieldDefault(0);
+const bool kArbiterOnlyFieldDefault = false;
+const bool kHiddenFieldDefault = false;
+const bool kBuildIndexesFieldDefault = true;
+
+const Seconds kMaxSlaveDelay(3600 * 24 * 366);
} // namespace
- Status MemberConfig::initialize(const BSONObj& mcfg, ReplicaSetTagConfig* tagConfig) {
- Status status = bsonCheckOnlyHasFields(
- "replica set member configuration", mcfg, kLegalMemberConfigFieldNames);
- if (!status.isOK())
- return status;
-
- //
- // Parse _id field.
- //
- BSONElement idElement = mcfg[kIdFieldName];
- if (idElement.eoo()) {
- return Status(ErrorCodes::NoSuchKey, str::stream() << kIdFieldName <<
- " field is missing");
- }
- if (!idElement.isNumber()) {
- return Status(ErrorCodes::TypeMismatch, str::stream() << kIdFieldName <<
- " field has non-numeric type " << typeName(idElement.type()));
- }
- _id = idElement.numberInt();
-
- //
- // Parse h field.
- //
- std::string hostAndPortString;
- status = bsonExtractStringField(mcfg, kHostFieldName, &hostAndPortString);
- if (!status.isOK())
- return status;
- boost::trim(hostAndPortString);
- status = _host.initialize(hostAndPortString);
- if (!status.isOK())
- return status;
- if (!_host.hasPort()) {
- // make port explicit even if default.
- _host = HostAndPort(_host.host(), _host.port());
- }
+Status MemberConfig::initialize(const BSONObj& mcfg, ReplicaSetTagConfig* tagConfig) {
+ Status status = bsonCheckOnlyHasFields(
+ "replica set member configuration", mcfg, kLegalMemberConfigFieldNames);
+ if (!status.isOK())
+ return status;
- //
- // Parse votes field.
- //
- BSONElement votesElement = mcfg[kVotesFieldName];
- if (votesElement.eoo()) {
- _votes = kVotesFieldDefault;
- }
- else if (votesElement.isNumber()) {
- _votes = votesElement.numberInt();
- }
- else {
- return Status(ErrorCodes::TypeMismatch, str::stream() << kVotesFieldName <<
- " field value has non-numeric type " <<
- typeName(votesElement.type()));
- }
+ //
+ // Parse _id field.
+ //
+ BSONElement idElement = mcfg[kIdFieldName];
+ if (idElement.eoo()) {
+ return Status(ErrorCodes::NoSuchKey, str::stream() << kIdFieldName << " field is missing");
+ }
+ if (!idElement.isNumber()) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << kIdFieldName << " field has non-numeric type "
+ << typeName(idElement.type()));
+ }
+ _id = idElement.numberInt();
- //
- // Parse priority field.
- //
- BSONElement priorityElement = mcfg[kPriorityFieldName];
- if (priorityElement.eoo()) {
- _priority = kPriorityFieldDefault;
- }
- else if (priorityElement.isNumber()) {
- _priority = priorityElement.numberDouble();
- }
- else {
- return Status(ErrorCodes::TypeMismatch, str::stream() << kPriorityFieldName <<
- " field has non-numeric type " << typeName(priorityElement.type()));
- }
+ //
+ // Parse h field.
+ //
+ std::string hostAndPortString;
+ status = bsonExtractStringField(mcfg, kHostFieldName, &hostAndPortString);
+ if (!status.isOK())
+ return status;
+ boost::trim(hostAndPortString);
+ status = _host.initialize(hostAndPortString);
+ if (!status.isOK())
+ return status;
+ if (!_host.hasPort()) {
+ // make port explicit even if default.
+ _host = HostAndPort(_host.host(), _host.port());
+ }
- //
- // Parse arbiterOnly field.
- //
- status = bsonExtractBooleanFieldWithDefault(mcfg,
- kArbiterOnlyFieldName,
- kArbiterOnlyFieldDefault,
- &_arbiterOnly);
- if (!status.isOK())
- return status;
-
- //
- // Parse slaveDelay field.
- //
- BSONElement slaveDelayElement = mcfg[kSlaveDelayFieldName];
- if (slaveDelayElement.eoo()) {
- _slaveDelay = kSlaveDelayFieldDefault;
- }
- else if (slaveDelayElement.isNumber()) {
- _slaveDelay = Seconds(slaveDelayElement.numberInt());
- }
- else {
- return Status(ErrorCodes::TypeMismatch, str::stream() << kSlaveDelayFieldName <<
- " field value has non-numeric type " <<
- typeName(slaveDelayElement.type()));
- }
+ //
+ // Parse votes field.
+ //
+ BSONElement votesElement = mcfg[kVotesFieldName];
+ if (votesElement.eoo()) {
+ _votes = kVotesFieldDefault;
+ } else if (votesElement.isNumber()) {
+ _votes = votesElement.numberInt();
+ } else {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << kVotesFieldName << " field value has non-numeric type "
+ << typeName(votesElement.type()));
+ }
+
+ //
+ // Parse priority field.
+ //
+ BSONElement priorityElement = mcfg[kPriorityFieldName];
+ if (priorityElement.eoo()) {
+ _priority = kPriorityFieldDefault;
+ } else if (priorityElement.isNumber()) {
+ _priority = priorityElement.numberDouble();
+ } else {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << kPriorityFieldName << " field has non-numeric type "
+ << typeName(priorityElement.type()));
+ }
+
+ //
+ // Parse arbiterOnly field.
+ //
+ status = bsonExtractBooleanFieldWithDefault(
+ mcfg, kArbiterOnlyFieldName, kArbiterOnlyFieldDefault, &_arbiterOnly);
+ if (!status.isOK())
+ return status;
- //
- // Parse hidden field.
- //
- status = bsonExtractBooleanFieldWithDefault(mcfg,
- kHiddenFieldName,
- kHiddenFieldDefault,
- &_hidden);
- if (!status.isOK())
- return status;
-
- //
- // Parse buildIndexes field.
- //
- status = bsonExtractBooleanFieldWithDefault(mcfg,
- kBuildIndexesFieldName,
- kBuildIndexesFieldDefault,
- &_buildIndexes);
- if (!status.isOK())
- return status;
-
- //
- // Parse "tags" field.
- //
- _tags.clear();
- BSONElement tagsElement;
- status = bsonExtractTypedField(mcfg, kTagsFieldName, Object, &tagsElement);
- if (status.isOK()) {
- for (BSONObj::iterator tagIter(tagsElement.Obj()); tagIter.more();) {
- const BSONElement& tag = tagIter.next();
- if (tag.type() != String) {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "tags." <<
- tag.fieldName() << " field has non-string value of type " <<
- typeName(tag.type()));
- }
- _tags.push_back(tagConfig->makeTag(tag.fieldNameStringData(),
- tag.valueStringData()));
+ //
+ // Parse slaveDelay field.
+ //
+ BSONElement slaveDelayElement = mcfg[kSlaveDelayFieldName];
+ if (slaveDelayElement.eoo()) {
+ _slaveDelay = kSlaveDelayFieldDefault;
+ } else if (slaveDelayElement.isNumber()) {
+ _slaveDelay = Seconds(slaveDelayElement.numberInt());
+ } else {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << kSlaveDelayFieldName << " field value has non-numeric type "
+ << typeName(slaveDelayElement.type()));
+ }
+
+ //
+ // Parse hidden field.
+ //
+ status =
+ bsonExtractBooleanFieldWithDefault(mcfg, kHiddenFieldName, kHiddenFieldDefault, &_hidden);
+ if (!status.isOK())
+ return status;
+
+ //
+ // Parse buildIndexes field.
+ //
+ status = bsonExtractBooleanFieldWithDefault(
+ mcfg, kBuildIndexesFieldName, kBuildIndexesFieldDefault, &_buildIndexes);
+ if (!status.isOK())
+ return status;
+
+ //
+ // Parse "tags" field.
+ //
+ _tags.clear();
+ BSONElement tagsElement;
+ status = bsonExtractTypedField(mcfg, kTagsFieldName, Object, &tagsElement);
+ if (status.isOK()) {
+ for (BSONObj::iterator tagIter(tagsElement.Obj()); tagIter.more();) {
+ const BSONElement& tag = tagIter.next();
+ if (tag.type() != String) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "tags." << tag.fieldName()
+ << " field has non-string value of type "
+ << typeName(tag.type()));
}
+ _tags.push_back(tagConfig->makeTag(tag.fieldNameStringData(), tag.valueStringData()));
}
- else if (ErrorCodes::NoSuchKey != status) {
- return status;
- }
+ } else if (ErrorCodes::NoSuchKey != status) {
+ return status;
+ }
- //
- // Add internal tags based on other member properties.
- //
-
- // Add a voter tag if this non-arbiter member votes; use _id for uniquity.
- const std::string id = str::stream() << _id;
- if (isVoter() && !_arbiterOnly) {
- _tags.push_back(tagConfig->makeTag(kInternalVoterTagName, id));
- }
+ //
+ // Add internal tags based on other member properties.
+ //
- // Add an electable tag if this member is electable.
- if (isElectable()) {
- _tags.push_back(tagConfig->makeTag(kInternalElectableTagName, id));
- }
+ // Add a voter tag if this non-arbiter member votes; use _id for uniquity.
+ const std::string id = str::stream() << _id;
+ if (isVoter() && !_arbiterOnly) {
+ _tags.push_back(tagConfig->makeTag(kInternalVoterTagName, id));
+ }
- // Add a tag for generic counting of this node.
- if (!_arbiterOnly) {
- _tags.push_back(tagConfig->makeTag(kInternalAllTagName, id));
- }
+ // Add an electable tag if this member is electable.
+ if (isElectable()) {
+ _tags.push_back(tagConfig->makeTag(kInternalElectableTagName, id));
+ }
- return Status::OK();
+ // Add a tag for generic counting of this node.
+ if (!_arbiterOnly) {
+ _tags.push_back(tagConfig->makeTag(kInternalAllTagName, id));
}
- Status MemberConfig::validate() const {
- if (_id < 0 || _id > 255) {
- return Status(ErrorCodes::BadValue, str::stream() << kIdFieldName <<
- " field value of " << _id << " is out of range.");
- }
+ return Status::OK();
+}
- if (_priority < 0 || _priority > 1000) {
- return Status(ErrorCodes::BadValue, str::stream() << kPriorityFieldName <<
- " field value of " << _priority << " is out of range");
- }
- if (_votes != 0 && _votes != 1) {
- return Status(ErrorCodes::BadValue, str::stream() << kVotesFieldName <<
- " field value is " << _votes << " but must be 0 or 1");
- }
- if (_arbiterOnly) {
- if (!_tags.empty()) {
- return Status(ErrorCodes::BadValue, "Cannot set tags on arbiters.");
- }
- if (!isVoter()) {
- return Status(ErrorCodes::BadValue, "Arbiter must vote (cannot have 0 votes)");
- }
- }
- if (_slaveDelay < Seconds(0) || _slaveDelay > kMaxSlaveDelay) {
- return Status(ErrorCodes::BadValue, str::stream() << kSlaveDelayFieldName <<
- " field value of " << durationCount<Seconds>(_slaveDelay) <<
- " seconds is out of range");
- }
- if (_slaveDelay > Seconds(0) && _priority != 0) {
- return Status(ErrorCodes::BadValue, "slaveDelay requires priority be zero");
- }
- if (_hidden && _priority != 0) {
- return Status(ErrorCodes::BadValue, "priority must be 0 when hidden=true");
+Status MemberConfig::validate() const {
+ if (_id < 0 || _id > 255) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << kIdFieldName << " field value of " << _id
+ << " is out of range.");
+ }
+
+ if (_priority < 0 || _priority > 1000) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << kPriorityFieldName << " field value of " << _priority
+ << " is out of range");
+ }
+ if (_votes != 0 && _votes != 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << kVotesFieldName << " field value is " << _votes
+ << " but must be 0 or 1");
+ }
+ if (_arbiterOnly) {
+ if (!_tags.empty()) {
+ return Status(ErrorCodes::BadValue, "Cannot set tags on arbiters.");
}
- if (!_buildIndexes && _priority != 0) {
- return Status(ErrorCodes::BadValue, "priority must be 0 when buildIndexes=false");
+ if (!isVoter()) {
+ return Status(ErrorCodes::BadValue, "Arbiter must vote (cannot have 0 votes)");
}
- return Status::OK();
}
+ if (_slaveDelay < Seconds(0) || _slaveDelay > kMaxSlaveDelay) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << kSlaveDelayFieldName << " field value of "
+ << durationCount<Seconds>(_slaveDelay)
+ << " seconds is out of range");
+ }
+ if (_slaveDelay > Seconds(0) && _priority != 0) {
+ return Status(ErrorCodes::BadValue, "slaveDelay requires priority be zero");
+ }
+ if (_hidden && _priority != 0) {
+ return Status(ErrorCodes::BadValue, "priority must be 0 when hidden=true");
+ }
+ if (!_buildIndexes && _priority != 0) {
+ return Status(ErrorCodes::BadValue, "priority must be 0 when buildIndexes=false");
+ }
+ return Status::OK();
+}
- bool MemberConfig::hasTags(const ReplicaSetTagConfig& tagConfig) const {
- for (std::vector<ReplicaSetTag>::const_iterator tag = _tags.begin();
- tag != _tags.end();
- tag++) {
- std::string tagKey = tagConfig.getTagKey(*tag);
- if (tagKey[0] == '$') {
- // Filter out internal tags
- continue;
- }
- return true;
+bool MemberConfig::hasTags(const ReplicaSetTagConfig& tagConfig) const {
+ for (std::vector<ReplicaSetTag>::const_iterator tag = _tags.begin(); tag != _tags.end();
+ tag++) {
+ std::string tagKey = tagConfig.getTagKey(*tag);
+ if (tagKey[0] == '$') {
+ // Filter out internal tags
+ continue;
}
- return false;
+ return true;
}
+ return false;
+}
- BSONObj MemberConfig::toBSON(const ReplicaSetTagConfig& tagConfig) const {
- BSONObjBuilder configBuilder;
- configBuilder.append("_id", _id);
- configBuilder.append("host", _host.toString());
- configBuilder.append("arbiterOnly", _arbiterOnly);
- configBuilder.append("buildIndexes", _buildIndexes);
- configBuilder.append("hidden", _hidden);
- configBuilder.append("priority", _priority);
-
- BSONObjBuilder tags(configBuilder.subobjStart("tags"));
- for (std::vector<ReplicaSetTag>::const_iterator tag = _tags.begin();
- tag != _tags.end();
- tag++) {
- std::string tagKey = tagConfig.getTagKey(*tag);
- if (tagKey[0] == '$') {
- // Filter out internal tags
- continue;
- }
- tags.append(tagKey, tagConfig.getTagValue(*tag));
- }
- tags.done();
+BSONObj MemberConfig::toBSON(const ReplicaSetTagConfig& tagConfig) const {
+ BSONObjBuilder configBuilder;
+ configBuilder.append("_id", _id);
+ configBuilder.append("host", _host.toString());
+ configBuilder.append("arbiterOnly", _arbiterOnly);
+ configBuilder.append("buildIndexes", _buildIndexes);
+ configBuilder.append("hidden", _hidden);
+ configBuilder.append("priority", _priority);
- configBuilder.append("slaveDelay", durationCount<Seconds>(_slaveDelay));
- configBuilder.append("votes", getNumVotes());
- return configBuilder.obj();
+ BSONObjBuilder tags(configBuilder.subobjStart("tags"));
+ for (std::vector<ReplicaSetTag>::const_iterator tag = _tags.begin(); tag != _tags.end();
+ tag++) {
+ std::string tagKey = tagConfig.getTagKey(*tag);
+ if (tagKey[0] == '$') {
+ // Filter out internal tags
+ continue;
+ }
+ tags.append(tagKey, tagConfig.getTagValue(*tag));
}
+ tags.done();
+
+ configBuilder.append("slaveDelay", durationCount<Seconds>(_slaveDelay));
+ configBuilder.append("votes", getNumVotes());
+ return configBuilder.obj();
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/member_config.h b/src/mongo/db/repl/member_config.h
index f980a8e2bc8..694a8941f8e 100644
--- a/src/mongo/db/repl/member_config.h
+++ b/src/mongo/db/repl/member_config.h
@@ -38,143 +38,168 @@
namespace mongo {
- class BSONObj;
+class BSONObj;
namespace repl {
+/**
+ * Representation of the configuration information about a particular member of a replica set.
+ */
+class MemberConfig {
+public:
+ typedef std::vector<ReplicaSetTag>::const_iterator TagIterator;
+
+ static const std::string kIdFieldName;
+ static const std::string kVotesFieldName;
+ static const std::string kPriorityFieldName;
+ static const std::string kHostFieldName;
+ static const std::string kHiddenFieldName;
+ static const std::string kSlaveDelayFieldName;
+ static const std::string kArbiterOnlyFieldName;
+ static const std::string kBuildIndexesFieldName;
+ static const std::string kTagsFieldName;
+ static const std::string kInternalVoterTagName;
+ static const std::string kInternalElectableTagName;
+ static const std::string kInternalAllTagName;
+
+ /**
+ * Default constructor, produces a MemberConfig in an undefined state.
+ * Must successfully call initialze() before calling validate() or the
+ * accessors.
+ */
+ MemberConfig() : _slaveDelay(0) {}
+
+ /**
+ * Initializes this MemberConfig from the contents of "mcfg".
+ *
+ * If "mcfg" describes any tags, builds ReplicaSetTags for this
+ * configuration using "tagConfig" as the tag's namespace. This may
+ * have the effect of altering "tagConfig" when "mcfg" describes a
+ * tag not previously added to "tagConfig".
+ */
+ Status initialize(const BSONObj& mcfg, ReplicaSetTagConfig* tagConfig);
+
+ /**
+ * Performs basic consistency checks on the member configuration.
+ */
+ Status validate() const;
+
+ /**
+ * Gets the identifier for this member, unique within a ReplicaSetConfig.
+ */
+ int getId() const {
+ return _id;
+ }
+
+ /**
+ * Gets the canonical name of this member, by which other members and clients
+ * will contact it.
+ */
+ const HostAndPort& getHostAndPort() const {
+ return _host;
+ }
+
+ /**
+ * Gets this member's priority. Higher means more likely to be elected
+ * primary.
+ */
+ double getPriority() const {
+ return _priority;
+ }
+
/**
- * Representation of the configuration information about a particular member of a replica set.
- */
- class MemberConfig {
- public:
- typedef std::vector<ReplicaSetTag>::const_iterator TagIterator;
-
- static const std::string kIdFieldName;
- static const std::string kVotesFieldName;
- static const std::string kPriorityFieldName;
- static const std::string kHostFieldName;
- static const std::string kHiddenFieldName;
- static const std::string kSlaveDelayFieldName;
- static const std::string kArbiterOnlyFieldName;
- static const std::string kBuildIndexesFieldName;
- static const std::string kTagsFieldName;
- static const std::string kInternalVoterTagName;
- static const std::string kInternalElectableTagName;
- static const std::string kInternalAllTagName;
-
- /**
- * Default constructor, produces a MemberConfig in an undefined state.
- * Must successfully call initialze() before calling validate() or the
- * accessors.
- */
- MemberConfig() : _slaveDelay(0) {}
-
- /**
- * Initializes this MemberConfig from the contents of "mcfg".
- *
- * If "mcfg" describes any tags, builds ReplicaSetTags for this
- * configuration using "tagConfig" as the tag's namespace. This may
- * have the effect of altering "tagConfig" when "mcfg" describes a
- * tag not previously added to "tagConfig".
- */
- Status initialize(const BSONObj& mcfg, ReplicaSetTagConfig* tagConfig);
-
- /**
- * Performs basic consistency checks on the member configuration.
- */
- Status validate() const;
-
- /**
- * Gets the identifier for this member, unique within a ReplicaSetConfig.
- */
- int getId() const { return _id; }
-
- /**
- * Gets the canonical name of this member, by which other members and clients
- * will contact it.
- */
- const HostAndPort& getHostAndPort() const { return _host; }
-
- /**
- * Gets this member's priority. Higher means more likely to be elected
- * primary.
- */
- double getPriority() const { return _priority; }
-
- /**
- * Gets the amount of time behind the primary that this member will atempt to
- * remain. Zero seconds means stay as caught up as possible.
- */
- Seconds getSlaveDelay() const { return _slaveDelay; }
-
- /**
- * Returns true if this member may vote in elections.
- */
- bool isVoter() const { return _votes != 0; }
-
- /**
- * Returns the number of votes that this member gets.
- */
- int getNumVotes() const { return isVoter() ? 1 : 0; }
-
- /**
- * Returns true if this member is an arbiter (is not data-bearing).
- */
- bool isArbiter() const { return _arbiterOnly; }
-
- /**
- * Returns true if this member is hidden (not reported by isMaster, not electable).
- */
- bool isHidden() const { return _hidden; }
-
- /**
- * Returns true if this member should build secondary indexes.
- */
- bool shouldBuildIndexes() const { return _buildIndexes; }
-
- /**
- * Gets the number of replica set tags, including internal '$' tags, for this member.
- */
- size_t getNumTags() const { return _tags.size(); }
-
- /**
- * Returns true if this MemberConfig has any non-internal tags, using "tagConfig" to
- * determine the internal property of the tags.
- */
- bool hasTags(const ReplicaSetTagConfig& tagConfig) const;
-
- /**
- * Gets a begin iterator over the tags for this member.
- */
- TagIterator tagsBegin() const { return _tags.begin(); }
-
- /**
- * Gets an end iterator over the tags for this member.
- */
- TagIterator tagsEnd() const { return _tags.end(); }
-
- /**
- * Returns true if this represents the configuration of an electable member.
- */
- bool isElectable() const { return !isArbiter() && getPriority() > 0; }
-
- /**
- * Returns the member config as a BSONObj, using "tagConfig" to generate the tag subdoc.
- */
- BSONObj toBSON(const ReplicaSetTagConfig& tagConfig) const;
-
- private:
-
- int _id;
- HostAndPort _host;
- double _priority; // 0 means can never be primary
- int _votes; // Can this member vote? Only 0 and 1 are valid. Default 1.
- bool _arbiterOnly;
- Seconds _slaveDelay;
- bool _hidden; // if set, don't advertise to drivers in isMaster.
- bool _buildIndexes; // if false, do not create any non-_id indexes
- std::vector<ReplicaSetTag> _tags; // tagging for data center, rack, etc.
- };
+ * Gets the amount of time behind the primary that this member will atempt to
+ * remain. Zero seconds means stay as caught up as possible.
+ */
+ Seconds getSlaveDelay() const {
+ return _slaveDelay;
+ }
+
+ /**
+ * Returns true if this member may vote in elections.
+ */
+ bool isVoter() const {
+ return _votes != 0;
+ }
+
+ /**
+ * Returns the number of votes that this member gets.
+ */
+ int getNumVotes() const {
+ return isVoter() ? 1 : 0;
+ }
+
+ /**
+ * Returns true if this member is an arbiter (is not data-bearing).
+ */
+ bool isArbiter() const {
+ return _arbiterOnly;
+ }
+
+ /**
+ * Returns true if this member is hidden (not reported by isMaster, not electable).
+ */
+ bool isHidden() const {
+ return _hidden;
+ }
+
+ /**
+ * Returns true if this member should build secondary indexes.
+ */
+ bool shouldBuildIndexes() const {
+ return _buildIndexes;
+ }
+
+ /**
+ * Gets the number of replica set tags, including internal '$' tags, for this member.
+ */
+ size_t getNumTags() const {
+ return _tags.size();
+ }
+
+ /**
+ * Returns true if this MemberConfig has any non-internal tags, using "tagConfig" to
+ * determine the internal property of the tags.
+ */
+ bool hasTags(const ReplicaSetTagConfig& tagConfig) const;
+
+ /**
+ * Gets a begin iterator over the tags for this member.
+ */
+ TagIterator tagsBegin() const {
+ return _tags.begin();
+ }
+
+ /**
+ * Gets an end iterator over the tags for this member.
+ */
+ TagIterator tagsEnd() const {
+ return _tags.end();
+ }
+
+ /**
+ * Returns true if this represents the configuration of an electable member.
+ */
+ bool isElectable() const {
+ return !isArbiter() && getPriority() > 0;
+ }
+
+ /**
+ * Returns the member config as a BSONObj, using "tagConfig" to generate the tag subdoc.
+ */
+ BSONObj toBSON(const ReplicaSetTagConfig& tagConfig) const;
+
+private:
+ int _id;
+ HostAndPort _host;
+ double _priority; // 0 means can never be primary
+ int _votes; // Can this member vote? Only 0 and 1 are valid. Default 1.
+ bool _arbiterOnly;
+ Seconds _slaveDelay;
+ bool _hidden; // if set, don't advertise to drivers in isMaster.
+ bool _buildIndexes; // if false, do not create any non-_id indexes
+ std::vector<ReplicaSetTag> _tags; // tagging for data center, rack, etc.
+};
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/member_config_test.cpp b/src/mongo/db/repl/member_config_test.cpp
index 6411156f4f8..c53556f411d 100644
--- a/src/mongo/db/repl/member_config_test.cpp
+++ b/src/mongo/db/repl/member_config_test.cpp
@@ -38,328 +38,417 @@ namespace mongo {
namespace repl {
namespace {
- TEST(MemberConfig, ParseMinimalMemberConfigAndCheckDefaults) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "localhost:12345"),
- &tagConfig));
- ASSERT_EQUALS(0, mc.getId());
- ASSERT_EQUALS(HostAndPort("localhost", 12345), mc.getHostAndPort());
- ASSERT_EQUALS(1.0, mc.getPriority());
- ASSERT_EQUALS(Seconds(0), mc.getSlaveDelay());
- ASSERT_TRUE(mc.isVoter());
- ASSERT_FALSE(mc.isHidden());
- ASSERT_FALSE(mc.isArbiter());
- ASSERT_TRUE(mc.shouldBuildIndexes());
- ASSERT_EQUALS(3U, mc.getNumTags());
- ASSERT_OK(mc.validate());
- }
-
- TEST(MemberConfig, ParseFailsWithIllegalFieldName) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_EQUALS(ErrorCodes::BadValue,
- mc.initialize(BSON("_id" << 0 << "host" << "localhost" << "frim" << 1),
- &tagConfig));
- }
-
- TEST(MemberConfig, ParseFailsWithMissingIdField) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, mc.initialize(BSON("host" << "localhost:12345"),
- &tagConfig));
- }
-
- TEST(MemberConfig, ParseFailsWithBadIdField) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, mc.initialize(BSON("host" << "localhost:12345"),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::TypeMismatch,
- mc.initialize(BSON("_id" << "0" << "host" << "localhost:12345"),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::TypeMismatch,
- mc.initialize(BSON("_id" << Date_t() << "host" << "localhost:12345"),
- &tagConfig));
- }
-
- TEST(MemberConfig, ParseFailsWithMissingHostField) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, mc.initialize(BSON("_id" << 0), &tagConfig));
- }
-
-
- TEST(MemberConfig, ParseFailsWithBadHostField) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, mc.initialize(BSON("_id" << 0 << "host" << 0),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::FailedToParse, mc.initialize(BSON("_id" << 0 << "host" << ""),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::FailedToParse,
- mc.initialize(BSON("_id" << 0 << "host" << "myhost:zabc"), &tagConfig));
- }
-
- TEST(MemberConfig, ParseArbiterOnly) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "arbiterOnly" << 1.0),
- &tagConfig));
- ASSERT_TRUE(mc.isArbiter());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "arbiterOnly" << false),
- &tagConfig));
- ASSERT_TRUE(!mc.isArbiter());
- }
-
- TEST(MemberConfig, ParseHidden) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "hidden" << 1.0),
- &tagConfig));
- ASSERT_TRUE(mc.isHidden());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "hidden" << false),
- &tagConfig));
- ASSERT_TRUE(!mc.isHidden());
- ASSERT_EQUALS(ErrorCodes::TypeMismatch,
- mc.initialize(BSON("_id" << 0 << "host" << "h" << "hidden" << "1.0"),
- &tagConfig));
- }
-
- TEST(MemberConfig, ParseBuildIndexes) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "buildIndexes" << 1.0),
- &tagConfig));
- ASSERT_TRUE(mc.shouldBuildIndexes());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "buildIndexes" << false),
- &tagConfig));
- ASSERT_TRUE(!mc.shouldBuildIndexes());
- }
-
- TEST(MemberConfig, ParseVotes) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << 1.0),
- &tagConfig));
- ASSERT_TRUE(mc.isVoter());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << 0),
- &tagConfig));
- ASSERT_FALSE(mc.isVoter());
-
- // For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << 1.5),
- &tagConfig));
- ASSERT_TRUE(mc.isVoter());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << 0.5),
- &tagConfig));
- ASSERT_FALSE(mc.isVoter());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << -0.5),
- &tagConfig));
- ASSERT_FALSE(mc.isVoter());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << 2),
- &tagConfig));
-
- ASSERT_EQUALS(ErrorCodes::TypeMismatch,
- mc.initialize(BSON("_id" << 0 <<
- "host" << "h" <<
- "votes" << Date_t::fromMillisSinceEpoch(2)),
- &tagConfig));
- }
-
- TEST(MemberConfig, ParsePriority) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 1),
- &tagConfig));
- ASSERT_EQUALS(1.0, mc.getPriority());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 0),
- &tagConfig));
- ASSERT_EQUALS(0.0, mc.getPriority());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 100.8),
- &tagConfig));
- ASSERT_EQUALS(100.8, mc.getPriority());
-
- ASSERT_EQUALS(ErrorCodes::TypeMismatch,
- mc.initialize(BSON("_id" << 0 <<
- "host" << "h" <<
- "priority" << Date_t::fromMillisSinceEpoch(2)),
- &tagConfig));
- }
-
- TEST(MemberConfig, ParseSlaveDelay) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "slaveDelay" << 100),
- &tagConfig));
- ASSERT_EQUALS(Seconds(100), mc.getSlaveDelay());
- }
-
- TEST(MemberConfig, ParseTags) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" <<
- "tags" << BSON("k1" << "v1" << "k2" << "v2")),
- &tagConfig));
- ASSERT_EQUALS(5U, mc.getNumTags());
- ASSERT_EQUALS(5, std::distance(mc.tagsBegin(), mc.tagsEnd()));
- ASSERT_EQUALS(1, std::count(mc.tagsBegin(), mc.tagsEnd(), tagConfig.findTag("k1", "v1")));
- ASSERT_EQUALS(1, std::count(mc.tagsBegin(), mc.tagsEnd(), tagConfig.findTag("k2", "v2")));
- ASSERT_EQUALS(1, std::count(mc.tagsBegin(), mc.tagsEnd(), tagConfig.findTag("$voter",
- "0")));
- ASSERT_EQUALS(1, std::count(mc.tagsBegin(), mc.tagsEnd(), tagConfig.findTag("$electable",
- "0")));
- ASSERT_EQUALS(1, std::count(mc.tagsBegin(), mc.tagsEnd(), tagConfig.findTag("$all",
- "0")));
- }
-
- TEST(MemberConfig, ValidateFailsWithIdOutOfRange) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << -1 << "host" << "localhost:12345"),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
- ASSERT_OK(mc.initialize(BSON("_id" << 256 << "host" << "localhost:12345"),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
- }
-
- TEST(MemberConfig, ValidateVotes) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
-
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << 1.0),
- &tagConfig));
- ASSERT_OK(mc.validate());
- ASSERT_TRUE(mc.isVoter());
-
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << 0),
- &tagConfig));
- ASSERT_OK(mc.validate());
- ASSERT_FALSE(mc.isVoter());
-
- // For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << 1.5),
- &tagConfig));
- ASSERT_OK(mc.validate());
- ASSERT_TRUE(mc.isVoter());
-
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << 0.5),
- &tagConfig));
- ASSERT_OK(mc.validate());
- ASSERT_FALSE(mc.isVoter());
-
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << -0.5),
- &tagConfig));
- ASSERT_OK(mc.validate());
- ASSERT_FALSE(mc.isVoter());
-
- // Invalid values
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << 2),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
-
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "votes" << -1),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
- }
-
- TEST(MemberConfig, ValidatePriorityRanges) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 0),
- &tagConfig));
- ASSERT_OK(mc.validate());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 1000),
- &tagConfig));
- ASSERT_OK(mc.validate());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << -1),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 1001),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
- }
-
- TEST(MemberConfig, ValidateSlaveDelays) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 0 <<
- "slaveDelay" << 0),
- &tagConfig));
- ASSERT_OK(mc.validate());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 0 <<
- "slaveDelay" << 3600 * 10),
- &tagConfig));
- ASSERT_OK(mc.validate());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 0 <<
- "slaveDelay" << -1),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 0 <<
- "slaveDelay" << 3600 * 24 * 400),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
- }
-
- TEST(MemberConfig, ValidatePriorityAndSlaveDelayRelationship) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 1 <<
- "slaveDelay" << 60),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
- }
-
- TEST(MemberConfig, ValidatePriorityAndHiddenRelationship) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 1 <<
- "hidden" << true),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 1 <<
- "hidden" << false),
- &tagConfig));
- ASSERT_OK(mc.validate());
- }
-
- TEST(MemberConfig, ValidatePriorityAndBuildIndexesRelationship) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 1 <<
- "buildIndexes" << false),
- &tagConfig));
-
- ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" << "priority" << 1 <<
- "buildIndexes" << true),
- &tagConfig));
- ASSERT_OK(mc.validate());
- }
-
- TEST(MemberConfig, ValidateArbiterVotesRelationship) {
- ReplicaSetTagConfig tagConfig;
- MemberConfig mc;
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" <<
- "votes" << 1 << "arbiterOnly" << true),
- &tagConfig));
- ASSERT_OK(mc.validate());
-
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" <<
- "votes" << 0 << "arbiterOnly" << false),
- &tagConfig));
- ASSERT_OK(mc.validate());
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" <<
- "votes" << 1 << "arbiterOnly" << false),
- &tagConfig));
- ASSERT_OK(mc.validate());
-
- ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host" << "h" <<
- "votes" << 0 << "arbiterOnly" << true),
- &tagConfig));
- ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
- }
+TEST(MemberConfig, ParseMinimalMemberConfigAndCheckDefaults) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "localhost:12345"),
+ &tagConfig));
+ ASSERT_EQUALS(0, mc.getId());
+ ASSERT_EQUALS(HostAndPort("localhost", 12345), mc.getHostAndPort());
+ ASSERT_EQUALS(1.0, mc.getPriority());
+ ASSERT_EQUALS(Seconds(0), mc.getSlaveDelay());
+ ASSERT_TRUE(mc.isVoter());
+ ASSERT_FALSE(mc.isHidden());
+ ASSERT_FALSE(mc.isArbiter());
+ ASSERT_TRUE(mc.shouldBuildIndexes());
+ ASSERT_EQUALS(3U, mc.getNumTags());
+ ASSERT_OK(mc.validate());
+}
+
+TEST(MemberConfig, ParseFailsWithIllegalFieldName) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_EQUALS(ErrorCodes::BadValue,
+ mc.initialize(BSON("_id" << 0 << "host"
+ << "localhost"
+ << "frim" << 1),
+ &tagConfig));
+}
+
+TEST(MemberConfig, ParseFailsWithMissingIdField) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey,
+ mc.initialize(BSON("host"
+ << "localhost:12345"),
+ &tagConfig));
+}
+
+TEST(MemberConfig, ParseFailsWithBadIdField) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey,
+ mc.initialize(BSON("host"
+ << "localhost:12345"),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch,
+ mc.initialize(BSON("_id"
+ << "0"
+ << "host"
+ << "localhost:12345"),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch,
+ mc.initialize(BSON("_id" << Date_t() << "host"
+ << "localhost:12345"),
+ &tagConfig));
+}
+
+TEST(MemberConfig, ParseFailsWithMissingHostField) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, mc.initialize(BSON("_id" << 0), &tagConfig));
+}
+
+
+TEST(MemberConfig, ParseFailsWithBadHostField) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch,
+ mc.initialize(BSON("_id" << 0 << "host" << 0), &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::FailedToParse,
+ mc.initialize(BSON("_id" << 0 << "host"
+ << ""),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::FailedToParse,
+ mc.initialize(BSON("_id" << 0 << "host"
+ << "myhost:zabc"),
+ &tagConfig));
+}
+
+TEST(MemberConfig, ParseArbiterOnly) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "arbiterOnly" << 1.0),
+ &tagConfig));
+ ASSERT_TRUE(mc.isArbiter());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "arbiterOnly" << false),
+ &tagConfig));
+ ASSERT_TRUE(!mc.isArbiter());
+}
+
+TEST(MemberConfig, ParseHidden) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "hidden" << 1.0),
+ &tagConfig));
+ ASSERT_TRUE(mc.isHidden());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "hidden" << false),
+ &tagConfig));
+ ASSERT_TRUE(!mc.isHidden());
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch,
+ mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "hidden"
+ << "1.0"),
+ &tagConfig));
+}
+
+TEST(MemberConfig, ParseBuildIndexes) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "buildIndexes" << 1.0),
+ &tagConfig));
+ ASSERT_TRUE(mc.shouldBuildIndexes());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "buildIndexes" << false),
+ &tagConfig));
+ ASSERT_TRUE(!mc.shouldBuildIndexes());
+}
+
+TEST(MemberConfig, ParseVotes) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 1.0),
+ &tagConfig));
+ ASSERT_TRUE(mc.isVoter());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 0),
+ &tagConfig));
+ ASSERT_FALSE(mc.isVoter());
+
+ // For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 1.5),
+ &tagConfig));
+ ASSERT_TRUE(mc.isVoter());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 0.5),
+ &tagConfig));
+ ASSERT_FALSE(mc.isVoter());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << -0.5),
+ &tagConfig));
+ ASSERT_FALSE(mc.isVoter());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 2),
+ &tagConfig));
+
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch,
+ mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << Date_t::fromMillisSinceEpoch(2)),
+ &tagConfig));
+}
+
+TEST(MemberConfig, ParsePriority) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 1),
+ &tagConfig));
+ ASSERT_EQUALS(1.0, mc.getPriority());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 0),
+ &tagConfig));
+ ASSERT_EQUALS(0.0, mc.getPriority());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 100.8),
+ &tagConfig));
+ ASSERT_EQUALS(100.8, mc.getPriority());
+
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch,
+ mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << Date_t::fromMillisSinceEpoch(2)),
+ &tagConfig));
+}
+
+TEST(MemberConfig, ParseSlaveDelay) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "slaveDelay" << 100),
+ &tagConfig));
+ ASSERT_EQUALS(Seconds(100), mc.getSlaveDelay());
+}
+
+TEST(MemberConfig, ParseTags) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "tags" << BSON("k1"
+ << "v1"
+ << "k2"
+ << "v2")),
+ &tagConfig));
+ ASSERT_EQUALS(5U, mc.getNumTags());
+ ASSERT_EQUALS(5, std::distance(mc.tagsBegin(), mc.tagsEnd()));
+ ASSERT_EQUALS(1, std::count(mc.tagsBegin(), mc.tagsEnd(), tagConfig.findTag("k1", "v1")));
+ ASSERT_EQUALS(1, std::count(mc.tagsBegin(), mc.tagsEnd(), tagConfig.findTag("k2", "v2")));
+ ASSERT_EQUALS(1, std::count(mc.tagsBegin(), mc.tagsEnd(), tagConfig.findTag("$voter", "0")));
+ ASSERT_EQUALS(1,
+ std::count(mc.tagsBegin(), mc.tagsEnd(), tagConfig.findTag("$electable", "0")));
+ ASSERT_EQUALS(1, std::count(mc.tagsBegin(), mc.tagsEnd(), tagConfig.findTag("$all", "0")));
+}
+
+TEST(MemberConfig, ValidateFailsWithIdOutOfRange) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << -1 << "host"
+ << "localhost:12345"),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
+ ASSERT_OK(mc.initialize(BSON("_id" << 256 << "host"
+ << "localhost:12345"),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
+}
+
+TEST(MemberConfig, ValidateVotes) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 1.0),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+ ASSERT_TRUE(mc.isVoter());
+
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 0),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+ ASSERT_FALSE(mc.isVoter());
+
+ // For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 1.5),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+ ASSERT_TRUE(mc.isVoter());
+
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 0.5),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+ ASSERT_FALSE(mc.isVoter());
+
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << -0.5),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+ ASSERT_FALSE(mc.isVoter());
+
+ // Invalid values
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 2),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
+
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << -1),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
+}
+
+TEST(MemberConfig, ValidatePriorityRanges) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 0),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 1000),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << -1),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 1001),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
+}
+
+TEST(MemberConfig, ValidateSlaveDelays) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 0 << "slaveDelay" << 0),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 0 << "slaveDelay" << 3600 * 10),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 0 << "slaveDelay" << -1),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 0 << "slaveDelay" << 3600 * 24 * 400),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
+}
+
+TEST(MemberConfig, ValidatePriorityAndSlaveDelayRelationship) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 1 << "slaveDelay" << 60),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
+}
+
+TEST(MemberConfig, ValidatePriorityAndHiddenRelationship) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 1 << "hidden" << true),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 1 << "hidden" << false),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+}
+
+TEST(MemberConfig, ValidatePriorityAndBuildIndexesRelationship) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 1 << "buildIndexes" << false),
+ &tagConfig));
+
+ ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "priority" << 1 << "buildIndexes" << true),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+}
+
+TEST(MemberConfig, ValidateArbiterVotesRelationship) {
+ ReplicaSetTagConfig tagConfig;
+ MemberConfig mc;
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 1 << "arbiterOnly" << true),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 0 << "arbiterOnly" << false),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 1 << "arbiterOnly" << false),
+ &tagConfig));
+ ASSERT_OK(mc.validate());
+
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes" << 0 << "arbiterOnly" << true),
+ &tagConfig));
+ ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/member_heartbeat_data.cpp b/src/mongo/db/repl/member_heartbeat_data.cpp
index edbc40de393..357c41f1cf2 100644
--- a/src/mongo/db/repl/member_heartbeat_data.cpp
+++ b/src/mongo/db/repl/member_heartbeat_data.cpp
@@ -39,71 +39,67 @@
namespace mongo {
namespace repl {
- MemberHeartbeatData::MemberHeartbeatData() :
- _health(-1),
- _authIssue(false) {
-
- _lastResponse.setState(MemberState::RS_UNKNOWN);
- _lastResponse.setElectionTime(Timestamp());
- _lastResponse.setOpTime(OpTime());
+MemberHeartbeatData::MemberHeartbeatData() : _health(-1), _authIssue(false) {
+ _lastResponse.setState(MemberState::RS_UNKNOWN);
+ _lastResponse.setElectionTime(Timestamp());
+ _lastResponse.setOpTime(OpTime());
+}
+
+void MemberHeartbeatData::setUpValues(Date_t now,
+ const HostAndPort& host,
+ ReplSetHeartbeatResponse hbResponse) {
+ _health = 1;
+ if (_upSince == Date_t()) {
+ _upSince = now;
}
-
- void MemberHeartbeatData::setUpValues(Date_t now,
- const HostAndPort& host,
- ReplSetHeartbeatResponse hbResponse) {
- _health = 1;
- if (_upSince == Date_t()) {
- _upSince = now;
- }
- _authIssue = false;
- _lastHeartbeat = now;
- if (!hbResponse.hasState()) {
- hbResponse.setState(MemberState::RS_UNKNOWN);
- }
- if (!hbResponse.hasElectionTime()) {
- hbResponse.setElectionTime(_lastResponse.getElectionTime());
- }
- if (!hbResponse.hasOpTime()) {
- hbResponse.setOpTime(_lastResponse.getOpTime());
- }
-
- // Log if the state changes
- if (_lastResponse.getState() != hbResponse.getState()){
- log() << "Member " << host.toString() << " is now in state "
- << hbResponse.getState().toString() << rsLog;
- }
-
- _lastResponse = hbResponse;
+ _authIssue = false;
+ _lastHeartbeat = now;
+ if (!hbResponse.hasState()) {
+ hbResponse.setState(MemberState::RS_UNKNOWN);
}
-
- void MemberHeartbeatData::setDownValues(Date_t now, const std::string& heartbeatMessage) {
-
- _health = 0;
- _upSince = Date_t();
- _lastHeartbeat = now;
- _authIssue = false;
-
- _lastResponse = ReplSetHeartbeatResponse();
- _lastResponse.setState(MemberState::RS_DOWN);
- _lastResponse.setElectionTime(Timestamp());
- _lastResponse.setOpTime(OpTime());
- _lastResponse.setHbMsg(heartbeatMessage);
- _lastResponse.setSyncingTo(HostAndPort());
+ if (!hbResponse.hasElectionTime()) {
+ hbResponse.setElectionTime(_lastResponse.getElectionTime());
+ }
+ if (!hbResponse.hasOpTime()) {
+ hbResponse.setOpTime(_lastResponse.getOpTime());
}
- void MemberHeartbeatData::setAuthIssue(Date_t now) {
- _health = 0; // set health to 0 so that this doesn't count towards majority.
- _upSince = Date_t();
- _lastHeartbeat = now;
- _authIssue = true;
-
- _lastResponse = ReplSetHeartbeatResponse();
- _lastResponse.setState(MemberState::RS_UNKNOWN);
- _lastResponse.setElectionTime(Timestamp());
- _lastResponse.setOpTime(OpTime());
- _lastResponse.setHbMsg("");
- _lastResponse.setSyncingTo(HostAndPort());
+ // Log if the state changes
+ if (_lastResponse.getState() != hbResponse.getState()) {
+ log() << "Member " << host.toString() << " is now in state "
+ << hbResponse.getState().toString() << rsLog;
}
-} // namespace repl
-} // namespace mongo
+ _lastResponse = hbResponse;
+}
+
+void MemberHeartbeatData::setDownValues(Date_t now, const std::string& heartbeatMessage) {
+ _health = 0;
+ _upSince = Date_t();
+ _lastHeartbeat = now;
+ _authIssue = false;
+
+ _lastResponse = ReplSetHeartbeatResponse();
+ _lastResponse.setState(MemberState::RS_DOWN);
+ _lastResponse.setElectionTime(Timestamp());
+ _lastResponse.setOpTime(OpTime());
+ _lastResponse.setHbMsg(heartbeatMessage);
+ _lastResponse.setSyncingTo(HostAndPort());
+}
+
+void MemberHeartbeatData::setAuthIssue(Date_t now) {
+ _health = 0; // set health to 0 so that this doesn't count towards majority.
+ _upSince = Date_t();
+ _lastHeartbeat = now;
+ _authIssue = true;
+
+ _lastResponse = ReplSetHeartbeatResponse();
+ _lastResponse.setState(MemberState::RS_UNKNOWN);
+ _lastResponse.setElectionTime(Timestamp());
+ _lastResponse.setOpTime(OpTime());
+ _lastResponse.setHbMsg("");
+ _lastResponse.setSyncingTo(HostAndPort());
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/member_heartbeat_data.h b/src/mongo/db/repl/member_heartbeat_data.h
index 8996cfb7d96..6a3b4f62880 100644
--- a/src/mongo/db/repl/member_heartbeat_data.h
+++ b/src/mongo/db/repl/member_heartbeat_data.h
@@ -36,77 +36,103 @@
namespace mongo {
namespace repl {
+/**
+ * This class contains the data returned from a heartbeat command for one member
+ * of a replica set.
+ **/
+class MemberHeartbeatData {
+public:
+ MemberHeartbeatData();
+
+ MemberState getState() const {
+ return _lastResponse.getState();
+ }
+ int getHealth() const {
+ return _health;
+ }
+ Date_t getUpSince() const {
+ return _upSince;
+ }
+ Date_t getLastHeartbeat() const {
+ return _lastHeartbeat;
+ }
+ Date_t getLastHeartbeatRecv() const {
+ return _lastHeartbeatRecv;
+ }
+ void setLastHeartbeatRecv(Date_t newHeartbeatRecvTime) {
+ _lastHeartbeatRecv = newHeartbeatRecvTime;
+ }
+ const std::string& getLastHeartbeatMsg() const {
+ return _lastResponse.getHbMsg();
+ }
+ const HostAndPort& getSyncSource() const {
+ return _lastResponse.getSyncingTo();
+ }
+ OpTime getOpTime() const {
+ return _lastResponse.getOpTime();
+ }
+ int getConfigVersion() const {
+ return _lastResponse.getConfigVersion();
+ }
+ bool hasAuthIssue() const {
+ return _authIssue;
+ }
+
+ Timestamp getElectionTime() const {
+ return _lastResponse.getElectionTime();
+ }
+
+ // Returns true if the last heartbeat data explicilty stated that the node
+ // is not electable.
+ bool isUnelectable() const {
+ return _lastResponse.hasIsElectable() && !_lastResponse.isElectable();
+ }
+
+ // Was this member up for the last heartbeat?
+ bool up() const {
+ return _health > 0;
+ }
+ // Was this member up for the last hearbeeat
+ // (or we haven't received the first heartbeat yet)
+ bool maybeUp() const {
+ return _health != 0;
+ }
+
+ /**
+ * Sets values in this object from the results of a successful heartbeat command.
+ */
+ void setUpValues(Date_t now, const HostAndPort& host, ReplSetHeartbeatResponse hbResponse);
+
+ /**
+ * Sets values in this object from the results of a erroring/failed heartbeat command.
+ * _authIssues is set to false, _health is set to 0, _state is set to RS_DOWN, and
+ * other values are set as specified.
+ */
+ void setDownValues(Date_t now, const std::string& heartbeatMessage);
+
/**
- * This class contains the data returned from a heartbeat command for one member
- * of a replica set.
- **/
- class MemberHeartbeatData {
- public:
- MemberHeartbeatData();
-
- MemberState getState() const { return _lastResponse.getState(); }
- int getHealth() const { return _health; }
- Date_t getUpSince() const { return _upSince; }
- Date_t getLastHeartbeat() const { return _lastHeartbeat; }
- Date_t getLastHeartbeatRecv() const { return _lastHeartbeatRecv; }
- void setLastHeartbeatRecv(Date_t newHeartbeatRecvTime) {
- _lastHeartbeatRecv = newHeartbeatRecvTime;
- }
- const std::string& getLastHeartbeatMsg() const { return _lastResponse.getHbMsg(); }
- const HostAndPort& getSyncSource() const { return _lastResponse.getSyncingTo(); }
- OpTime getOpTime() const { return _lastResponse.getOpTime(); }
- int getConfigVersion() const { return _lastResponse.getConfigVersion(); }
- bool hasAuthIssue() const { return _authIssue; }
-
- Timestamp getElectionTime() const { return _lastResponse.getElectionTime(); }
-
- // Returns true if the last heartbeat data explicilty stated that the node
- // is not electable.
- bool isUnelectable() const {
- return _lastResponse.hasIsElectable() && !_lastResponse.isElectable();
- }
-
- // Was this member up for the last heartbeat?
- bool up() const { return _health > 0; }
- // Was this member up for the last hearbeeat
- // (or we haven't received the first heartbeat yet)
- bool maybeUp() const { return _health != 0; }
-
- /**
- * Sets values in this object from the results of a successful heartbeat command.
- */
- void setUpValues(Date_t now, const HostAndPort& host, ReplSetHeartbeatResponse hbResponse);
-
- /**
- * Sets values in this object from the results of a erroring/failed heartbeat command.
- * _authIssues is set to false, _health is set to 0, _state is set to RS_DOWN, and
- * other values are set as specified.
- */
- void setDownValues(Date_t now, const std::string& heartbeatMessage);
-
- /**
- * Sets values in this object that indicate there was an auth issue on the last heartbeat
- * command.
- */
- void setAuthIssue(Date_t now);
-
- private:
- // -1 = not checked yet, 0 = member is down/unreachable, 1 = member is up
- int _health;
-
- // Time of first successful heartbeat, if currently still up
- Date_t _upSince;
- // This is the last time we got a response from a heartbeat request to a given member.
- Date_t _lastHeartbeat;
- // This is the last time we got a heartbeat request from a given member.
- Date_t _lastHeartbeatRecv;
-
- // Did the last heartbeat show a failure to authenticate?
- bool _authIssue;
-
- // The last heartbeat response we received.
- ReplSetHeartbeatResponse _lastResponse;
- };
-
-} // namespace repl
-} // namespace mongo
+ * Sets values in this object that indicate there was an auth issue on the last heartbeat
+ * command.
+ */
+ void setAuthIssue(Date_t now);
+
+private:
+ // -1 = not checked yet, 0 = member is down/unreachable, 1 = member is up
+ int _health;
+
+ // Time of first successful heartbeat, if currently still up
+ Date_t _upSince;
+ // This is the last time we got a response from a heartbeat request to a given member.
+ Date_t _lastHeartbeat;
+ // This is the last time we got a heartbeat request from a given member.
+ Date_t _lastHeartbeatRecv;
+
+ // Did the last heartbeat show a failure to authenticate?
+ bool _authIssue;
+
+ // The last heartbeat response we received.
+ ReplSetHeartbeatResponse _lastResponse;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/member_state.h b/src/mongo/db/repl/member_state.h
index c3e3ffd292b..4adf7516845 100644
--- a/src/mongo/db/repl/member_state.h
+++ b/src/mongo/db/repl/member_state.h
@@ -36,65 +36,97 @@ namespace mongo {
namespace repl {
- /*
- RS_STARTUP serving still starting up, or still trying to initiate the set
- RS_PRIMARY this server thinks it is primary
- RS_SECONDARY this server thinks it is a secondary (slave mode)
- RS_RECOVERING recovering/resyncing; after recovery usually auto-transitions to secondary
- RS_STARTUP2 loaded config, still determining who is primary
+/*
+ RS_STARTUP serving still starting up, or still trying to initiate the set
+ RS_PRIMARY this server thinks it is primary
+ RS_SECONDARY this server thinks it is a secondary (slave mode)
+ RS_RECOVERING recovering/resyncing; after recovery usually auto-transitions to secondary
+ RS_STARTUP2 loaded config, still determining who is primary
- State -> integer mappings are reserved forever. Do not change them or delete them, except
- to update RS_MAX when introducing new states.
- */
- struct MemberState {
- enum MS {
- RS_STARTUP = 0,
- RS_PRIMARY = 1,
- RS_SECONDARY = 2,
- RS_RECOVERING = 3,
- RS_STARTUP2 = 5,
- RS_UNKNOWN = 6, /* remote node not yet reached */
- RS_ARBITER = 7,
- RS_DOWN = 8, /* node not reachable for a report */
- RS_ROLLBACK = 9,
- RS_REMOVED = 10, /* node removed from replica set */
- RS_MAX = 10
- } s;
+ State -> integer mappings are reserved forever. Do not change them or delete them, except
+ to update RS_MAX when introducing new states.
+*/
+struct MemberState {
+ enum MS {
+ RS_STARTUP = 0,
+ RS_PRIMARY = 1,
+ RS_SECONDARY = 2,
+ RS_RECOVERING = 3,
+ RS_STARTUP2 = 5,
+ RS_UNKNOWN = 6, /* remote node not yet reached */
+ RS_ARBITER = 7,
+ RS_DOWN = 8, /* node not reachable for a report */
+ RS_ROLLBACK = 9,
+ RS_REMOVED = 10, /* node removed from replica set */
+ RS_MAX = 10
+ } s;
- MemberState(MS ms = RS_UNKNOWN) : s(ms) { }
- explicit MemberState(int ms) : s((MS) ms) { }
+ MemberState(MS ms = RS_UNKNOWN) : s(ms) {}
+ explicit MemberState(int ms) : s((MS)ms) {}
- bool startup() const { return s == RS_STARTUP; }
- bool primary() const { return s == RS_PRIMARY; }
- bool secondary() const { return s == RS_SECONDARY; }
- bool recovering() const { return s == RS_RECOVERING; }
- bool startup2() const { return s == RS_STARTUP2; }
- bool rollback() const { return s == RS_ROLLBACK; }
- bool readable() const { return s == RS_PRIMARY || s == RS_SECONDARY; }
- bool removed() const { return s == RS_REMOVED; }
- bool arbiter() const { return s == RS_ARBITER; }
+ bool startup() const {
+ return s == RS_STARTUP;
+ }
+ bool primary() const {
+ return s == RS_PRIMARY;
+ }
+ bool secondary() const {
+ return s == RS_SECONDARY;
+ }
+ bool recovering() const {
+ return s == RS_RECOVERING;
+ }
+ bool startup2() const {
+ return s == RS_STARTUP2;
+ }
+ bool rollback() const {
+ return s == RS_ROLLBACK;
+ }
+ bool readable() const {
+ return s == RS_PRIMARY || s == RS_SECONDARY;
+ }
+ bool removed() const {
+ return s == RS_REMOVED;
+ }
+ bool arbiter() const {
+ return s == RS_ARBITER;
+ }
- std::string toString() const;
+ std::string toString() const;
- bool operator==(const MemberState& r) const { return s == r.s; }
- bool operator!=(const MemberState& r) const { return s != r.s; }
- };
+ bool operator==(const MemberState& r) const {
+ return s == r.s;
+ }
+ bool operator!=(const MemberState& r) const {
+ return s != r.s;
+ }
+};
- inline std::string MemberState::toString() const {
- switch ( s ) {
- case RS_STARTUP: return "STARTUP";
- case RS_PRIMARY: return "PRIMARY";
- case RS_SECONDARY: return "SECONDARY";
- case RS_RECOVERING: return "RECOVERING";
- case RS_STARTUP2: return "STARTUP2";
- case RS_ARBITER: return "ARBITER";
- case RS_DOWN: return "DOWN";
- case RS_ROLLBACK: return "ROLLBACK";
- case RS_UNKNOWN: return "UNKNOWN";
- case RS_REMOVED: return "REMOVED";
- }
- return "";
+inline std::string MemberState::toString() const {
+ switch (s) {
+ case RS_STARTUP:
+ return "STARTUP";
+ case RS_PRIMARY:
+ return "PRIMARY";
+ case RS_SECONDARY:
+ return "SECONDARY";
+ case RS_RECOVERING:
+ return "RECOVERING";
+ case RS_STARTUP2:
+ return "STARTUP2";
+ case RS_ARBITER:
+ return "ARBITER";
+ case RS_DOWN:
+ return "DOWN";
+ case RS_ROLLBACK:
+ return "ROLLBACK";
+ case RS_UNKNOWN:
+ return "UNKNOWN";
+ case RS_REMOVED:
+ return "REMOVED";
}
+ return "";
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/minvalid.cpp b/src/mongo/db/repl/minvalid.cpp
index 5cd3acd64e7..b14966486fe 100644
--- a/src/mongo/db/repl/minvalid.cpp
+++ b/src/mongo/db/repl/minvalid.cpp
@@ -46,74 +46,76 @@ namespace mongo {
namespace repl {
namespace {
- const char* initialSyncFlagString = "doingInitialSync";
- const BSONObj initialSyncFlag(BSON(initialSyncFlagString << true));
- const char* minvalidNS = "local.replset.minvalid";
-} // namespace
-
- // Writes
- void clearInitialSyncFlag(OperationContext* txn) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- // TODO: Investigate correctness of taking MODE_IX for DB/Collection locks
- Lock::DBLock dblk(txn->lockState(), "local", MODE_X);
- Helpers::putSingleton(txn, minvalidNS, BSON("$unset" << initialSyncFlag));
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "clearInitialSyncFlags", minvalidNS);
-
+const char* initialSyncFlagString = "doingInitialSync";
+const BSONObj initialSyncFlag(BSON(initialSyncFlagString << true));
+const char* minvalidNS = "local.replset.minvalid";
+} // namespace
+
+// Writes
+void clearInitialSyncFlag(OperationContext* txn) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ // TODO: Investigate correctness of taking MODE_IX for DB/Collection locks
+ Lock::DBLock dblk(txn->lockState(), "local", MODE_X);
+ Helpers::putSingleton(txn, minvalidNS, BSON("$unset" << initialSyncFlag));
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "clearInitialSyncFlags", minvalidNS);
+}
- void setInitialSyncFlag(OperationContext* txn) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dblk(txn->lockState(), "local", MODE_X);
- Helpers::putSingleton(txn, minvalidNS, BSON("$set" << initialSyncFlag));
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "setInitialSyncFlags", minvalidNS);
+void setInitialSyncFlag(OperationContext* txn) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dblk(txn->lockState(), "local", MODE_X);
+ Helpers::putSingleton(txn, minvalidNS, BSON("$set" << initialSyncFlag));
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "setInitialSyncFlags", minvalidNS);
+}
- void setMinValid(OperationContext* ctx, const OpTime& opTime) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(ctx, MODE_IX);
- Lock::DBLock dblk(ctx->lockState(), "local", MODE_X);
- Helpers::putSingleton(ctx,
- minvalidNS,
- BSON("$set" << BSON("ts" << opTime.getTimestamp() <<
- "t" << opTime.getTerm())));
-
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(ctx, "setMinValid", minvalidNS);
+void setMinValid(OperationContext* ctx, const OpTime& opTime) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(ctx, MODE_IX);
+ Lock::DBLock dblk(ctx->lockState(), "local", MODE_X);
+ Helpers::putSingleton(
+ ctx,
+ minvalidNS,
+ BSON("$set" << BSON("ts" << opTime.getTimestamp() << "t" << opTime.getTerm())));
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(ctx, "setMinValid", minvalidNS);
+}
- // Reads
- bool getInitialSyncFlag() {
- OperationContextImpl txn;
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(&txn, MODE_IS);
- Lock::DBLock dblk(txn.lockState(), "local", MODE_IS);
- Lock::CollectionLock lk(txn.lockState(), minvalidNS, MODE_IS);
- BSONObj mv;
- bool found = Helpers::getSingleton( &txn, minvalidNS, mv);
-
- if (found) {
- return mv[initialSyncFlagString].trueValue();
- }
- return false;
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(&txn, "getInitialSyncFlags", minvalidNS);
-
- MONGO_UNREACHABLE;
+// Reads
+bool getInitialSyncFlag() {
+ OperationContextImpl txn;
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(&txn, MODE_IS);
+ Lock::DBLock dblk(txn.lockState(), "local", MODE_IS);
+ Lock::CollectionLock lk(txn.lockState(), minvalidNS, MODE_IS);
+ BSONObj mv;
+ bool found = Helpers::getSingleton(&txn, minvalidNS, mv);
+
+ if (found) {
+ return mv[initialSyncFlagString].trueValue();
+ }
+ return false;
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(&txn, "getInitialSyncFlags", minvalidNS);
- OpTime getMinValid(OperationContext* txn) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IS);
- Lock::DBLock dblk(txn->lockState(), "local", MODE_IS);
- Lock::CollectionLock lk(txn->lockState(), minvalidNS, MODE_IS);
- BSONObj mv;
- bool found = Helpers::getSingleton(txn, minvalidNS, mv);
- if (found) {
- return extractOpTime(mv);
- }
- return OpTime();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "getMinValid", minvalidNS);
- }
+ MONGO_UNREACHABLE;
+}
+OpTime getMinValid(OperationContext* txn) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IS);
+ Lock::DBLock dblk(txn->lockState(), "local", MODE_IS);
+ Lock::CollectionLock lk(txn->lockState(), minvalidNS, MODE_IS);
+ BSONObj mv;
+ bool found = Helpers::getSingleton(txn, minvalidNS, mv);
+ if (found) {
+ return extractOpTime(mv);
+ }
+ return OpTime();
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "getMinValid", minvalidNS);
+}
}
}
diff --git a/src/mongo/db/repl/minvalid.h b/src/mongo/db/repl/minvalid.h
index 2118809c424..8d0ceb79613 100644
--- a/src/mongo/db/repl/minvalid.h
+++ b/src/mongo/db/repl/minvalid.h
@@ -29,39 +29,39 @@
#pragma once
namespace mongo {
- class BSONObj;
- class OperationContext;
+class BSONObj;
+class OperationContext;
namespace repl {
- class OpTime;
+class OpTime;
- /**
- * Helper functions for maintaining local.replset.minvalid collection contents.
- *
- * When a member reaches its minValid optime it is in a consistent state. Thus, minValid is
- * set as the last step in initial sync. At the beginning of initial sync, _initialSyncFlag
- * is appended onto minValid to indicate that initial sync was started but has not yet
- * completed.
- * minValid is also used during "normal" sync: the last op in each batch is used to set
- * minValid, to indicate that we are in a consistent state when the batch has been fully
- * applied.
- */
+/**
+ * Helper functions for maintaining local.replset.minvalid collection contents.
+ *
+ * When a member reaches its minValid optime it is in a consistent state. Thus, minValid is
+ * set as the last step in initial sync. At the beginning of initial sync, _initialSyncFlag
+ * is appended onto minValid to indicate that initial sync was started but has not yet
+ * completed.
+ * minValid is also used during "normal" sync: the last op in each batch is used to set
+ * minValid, to indicate that we are in a consistent state when the batch has been fully
+ * applied.
+ */
- /**
- * The initial sync flag is used to durably record the state of an initial sync; its boolean
- * value is true when an initial sync is in progress and hasn't yet completed. The flag
- * is stored as part of the local.replset.minvalid collection.
- */
- void clearInitialSyncFlag(OperationContext* txn);
- void setInitialSyncFlag(OperationContext* txn);
- bool getInitialSyncFlag();
+/**
+ * The initial sync flag is used to durably record the state of an initial sync; its boolean
+ * value is true when an initial sync is in progress and hasn't yet completed. The flag
+ * is stored as part of the local.replset.minvalid collection.
+ */
+void clearInitialSyncFlag(OperationContext* txn);
+void setInitialSyncFlag(OperationContext* txn);
+bool getInitialSyncFlag();
- /**
- * The minValid value is the earliest (minimum) Timestamp that must be applied in order to
- * consider the dataset consistent. Do not allow client reads if our last applied operation is
- * before the minValid time.
- */
- void setMinValid(OperationContext* ctx, const OpTime& opTime);
- OpTime getMinValid(OperationContext* txn);
+/**
+ * The minValid value is the earliest (minimum) Timestamp that must be applied in order to
+ * consider the dataset consistent. Do not allow client reads if our last applied operation is
+ * before the minValid time.
+ */
+void setMinValid(OperationContext* ctx, const OpTime& opTime);
+OpTime getMinValid(OperationContext* txn);
}
}
diff --git a/src/mongo/db/repl/operation_context_repl_mock.cpp b/src/mongo/db/repl/operation_context_repl_mock.cpp
index 78587bda2f3..8b66c6b800f 100644
--- a/src/mongo/db/repl/operation_context_repl_mock.cpp
+++ b/src/mongo/db/repl/operation_context_repl_mock.cpp
@@ -36,52 +36,50 @@
namespace mongo {
namespace repl {
- OperationContextReplMock::OperationContextReplMock() : OperationContextReplMock(0) {}
+OperationContextReplMock::OperationContextReplMock() : OperationContextReplMock(0) {}
- OperationContextReplMock::OperationContextReplMock(unsigned int opNum) :
- OperationContextReplMock(nullptr, opNum) {
- }
+OperationContextReplMock::OperationContextReplMock(unsigned int opNum)
+ : OperationContextReplMock(nullptr, opNum) {}
- OperationContextReplMock::OperationContextReplMock(Client* client, unsigned int opNum) :
- OperationContextNoop(client, opNum, new MMAPV1LockerImpl()),
- _checkForInterruptStatus(Status::OK()),
- _maxTimeMicrosRemaining(0),
- _writesAreReplicated(true) {
- }
+OperationContextReplMock::OperationContextReplMock(Client* client, unsigned int opNum)
+ : OperationContextNoop(client, opNum, new MMAPV1LockerImpl()),
+ _checkForInterruptStatus(Status::OK()),
+ _maxTimeMicrosRemaining(0),
+ _writesAreReplicated(true) {}
- OperationContextReplMock::~OperationContextReplMock() = default;
+OperationContextReplMock::~OperationContextReplMock() = default;
- void OperationContextReplMock::checkForInterrupt() {
- uassertStatusOK(checkForInterruptNoAssert());
- }
+void OperationContextReplMock::checkForInterrupt() {
+ uassertStatusOK(checkForInterruptNoAssert());
+}
- Status OperationContextReplMock::checkForInterruptNoAssert() {
- if (!_checkForInterruptStatus.isOK()) {
- return _checkForInterruptStatus;
- }
-
- return Status::OK();
+Status OperationContextReplMock::checkForInterruptNoAssert() {
+ if (!_checkForInterruptStatus.isOK()) {
+ return _checkForInterruptStatus;
}
- void OperationContextReplMock::setCheckForInterruptStatus(Status status) {
- _checkForInterruptStatus = std::move(status);
- }
+ return Status::OK();
+}
- uint64_t OperationContextReplMock::getRemainingMaxTimeMicros() const {
- return _maxTimeMicrosRemaining;
- }
+void OperationContextReplMock::setCheckForInterruptStatus(Status status) {
+ _checkForInterruptStatus = std::move(status);
+}
- void OperationContextReplMock::setRemainingMaxTimeMicros(uint64_t micros) {
- _maxTimeMicrosRemaining = micros;
- }
+uint64_t OperationContextReplMock::getRemainingMaxTimeMicros() const {
+ return _maxTimeMicrosRemaining;
+}
- void OperationContextReplMock::setReplicatedWrites(bool writesAreReplicated) {
- _writesAreReplicated = writesAreReplicated;
- }
+void OperationContextReplMock::setRemainingMaxTimeMicros(uint64_t micros) {
+ _maxTimeMicrosRemaining = micros;
+}
- bool OperationContextReplMock::writesAreReplicated() const {
- return _writesAreReplicated;
- }
+void OperationContextReplMock::setReplicatedWrites(bool writesAreReplicated) {
+ _writesAreReplicated = writesAreReplicated;
+}
+
+bool OperationContextReplMock::writesAreReplicated() const {
+ return _writesAreReplicated;
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/operation_context_repl_mock.h b/src/mongo/db/repl/operation_context_repl_mock.h
index 592a331fbb4..c16b55a19bf 100644
--- a/src/mongo/db/repl/operation_context_repl_mock.h
+++ b/src/mongo/db/repl/operation_context_repl_mock.h
@@ -33,41 +33,41 @@
namespace mongo {
- class Locker;
+class Locker;
namespace repl {
- /**
- * Mock implementation of OperationContext that can be used with real instances of LockManager.
- * Note this is not thread safe and the setter methods should only be called in the context
- * where access to this object is guaranteed to be serialized.
- */
- class OperationContextReplMock : public OperationContextNoop {
- public:
- OperationContextReplMock();
- explicit OperationContextReplMock(unsigned int opNum);
- OperationContextReplMock(Client* client, unsigned int opNum);
- virtual ~OperationContextReplMock();
+/**
+ * Mock implementation of OperationContext that can be used with real instances of LockManager.
+ * Note this is not thread safe and the setter methods should only be called in the context
+ * where access to this object is guaranteed to be serialized.
+ */
+class OperationContextReplMock : public OperationContextNoop {
+public:
+ OperationContextReplMock();
+ explicit OperationContextReplMock(unsigned int opNum);
+ OperationContextReplMock(Client* client, unsigned int opNum);
+ virtual ~OperationContextReplMock();
- virtual void checkForInterrupt() override;
+ virtual void checkForInterrupt() override;
- virtual Status checkForInterruptNoAssert() override;
+ virtual Status checkForInterruptNoAssert() override;
- void setCheckForInterruptStatus(Status status);
+ void setCheckForInterruptStatus(Status status);
- virtual uint64_t getRemainingMaxTimeMicros() const override;
+ virtual uint64_t getRemainingMaxTimeMicros() const override;
- void setRemainingMaxTimeMicros(uint64_t micros);
+ void setRemainingMaxTimeMicros(uint64_t micros);
- void setReplicatedWrites(bool writesAreReplicated = true) override;
+ void setReplicatedWrites(bool writesAreReplicated = true) override;
- bool writesAreReplicated() const override;
+ bool writesAreReplicated() const override;
- private:
- Status _checkForInterruptStatus;
- uint64_t _maxTimeMicrosRemaining;
- bool _writesAreReplicated;
- };
+private:
+ Status _checkForInterruptStatus;
+ uint64_t _maxTimeMicrosRemaining;
+ bool _writesAreReplicated;
+};
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 26708ee8de7..2afa1b53c52 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -87,742 +87,679 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::stringstream;
+using std::endl;
+using std::string;
+using std::stringstream;
namespace repl {
- std::string rsOplogName = "local.oplog.rs";
- std::string masterSlaveOplogName = "local.oplog.$main";
- int OPLOG_VERSION = 2;
+std::string rsOplogName = "local.oplog.rs";
+std::string masterSlaveOplogName = "local.oplog.$main";
+int OPLOG_VERSION = 2;
namespace {
- // cached copies of these...so don't rename them, drop them, etc.!!!
- Database* _localDB = nullptr;
- Collection* _localOplogCollection = nullptr;
-
- // Synchronizes the section where a new Timestamp is generated and when it actually
- // appears in the oplog.
- stdx::mutex newOpMutex;
- stdx::condition_variable newTimestampNotifier;
-
- static std::string _oplogCollectionName;
-
- // so we can fail the same way
- void checkOplogInsert( StatusWith<RecordId> result ) {
- massert( 17322,
- str::stream() << "write to oplog failed: " << result.getStatus().toString(),
- result.isOK() );
- }
+// cached copies of these...so don't rename them, drop them, etc.!!!
+Database* _localDB = nullptr;
+Collection* _localOplogCollection = nullptr;
- /**
- * Allocates an optime for a new entry in the oplog, and updates the replication coordinator to
- * reflect that new optime. Returns the new optime and the correct value of the "h" field for
- * the new oplog entry.
- *
- * NOTE: From the time this function returns to the time that the new oplog entry is written
- * to the storage system, all errors must be considered fatal. This is because the this
- * function registers the new optime with the storage system and the replication coordinator,
- * and provides no facility to revert those registrations on rollback.
- */
- std::pair<OpTime, long long> getNextOpTime(OperationContext* txn,
- Collection* oplog,
- const char* ns,
- ReplicationCoordinator* replCoord,
- const char* opstr) {
- stdx::lock_guard<stdx::mutex> lk(newOpMutex);
- Timestamp ts = getNextGlobalTimestamp();
- newTimestampNotifier.notify_all();
-
- fassert(28560, oplog->getRecordStore()->oplogDiskLocRegister(txn, ts));
-
- long long hashNew = 0;
- long long term = 0;
-
- // Set hash and term if we're in replset mode, otherwise they remain 0 in master/slave.
- if (replCoord->getReplicationMode() == ReplicationCoordinator::modeReplSet) {
- // Current term. If we're not a replset of pv=1, it could be the default value (0) or
- // the last valid term before downgrade.
- term = ReplClientInfo::forClient(txn->getClient()).getTerm();
-
- hashNew = BackgroundSync::get()->getLastAppliedHash();
-
- // Check to make sure logOp() is legal at this point.
- if (*opstr == 'n') {
- // 'n' operations are always logged
- invariant(*ns == '\0');
- // 'n' operations do not advance the hash, since they are not rolled back
- }
- else {
- // Advance the hash
- hashNew = (hashNew * 131 + ts.asLL()) * 17 + replCoord->getMyId();
+// Synchronizes the section where a new Timestamp is generated and when it actually
+// appears in the oplog.
+stdx::mutex newOpMutex;
+stdx::condition_variable newTimestampNotifier;
- BackgroundSync::get()->setLastAppliedHash(hashNew);
- }
- }
+static std::string _oplogCollectionName;
- OpTime opTime(ts, term);
- replCoord->setMyLastOptime(opTime);
- return std::pair<OpTime,long long>(opTime, hashNew);
- }
+// so we can fail the same way
+void checkOplogInsert(StatusWith<RecordId> result) {
+ massert(17322,
+ str::stream() << "write to oplog failed: " << result.getStatus().toString(),
+ result.isOK());
+}
- /**
- * This allows us to stream the oplog entry directly into data region
- * main goal is to avoid copying the o portion
- * which can be very large
- * TODO: can have this build the entire doc
- */
- class OplogDocWriter : public DocWriter {
- public:
- OplogDocWriter( const BSONObj& frame, const BSONObj& oField )
- : _frame( frame ), _oField( oField ) {
+/**
+ * Allocates an optime for a new entry in the oplog, and updates the replication coordinator to
+ * reflect that new optime. Returns the new optime and the correct value of the "h" field for
+ * the new oplog entry.
+ *
+ * NOTE: From the time this function returns to the time that the new oplog entry is written
+ * to the storage system, all errors must be considered fatal. This is because the this
+ * function registers the new optime with the storage system and the replication coordinator,
+ * and provides no facility to revert those registrations on rollback.
+ */
+std::pair<OpTime, long long> getNextOpTime(OperationContext* txn,
+ Collection* oplog,
+ const char* ns,
+ ReplicationCoordinator* replCoord,
+ const char* opstr) {
+ stdx::lock_guard<stdx::mutex> lk(newOpMutex);
+ Timestamp ts = getNextGlobalTimestamp();
+ newTimestampNotifier.notify_all();
+
+ fassert(28560, oplog->getRecordStore()->oplogDiskLocRegister(txn, ts));
+
+ long long hashNew = 0;
+ long long term = 0;
+
+ // Set hash and term if we're in replset mode, otherwise they remain 0 in master/slave.
+ if (replCoord->getReplicationMode() == ReplicationCoordinator::modeReplSet) {
+ // Current term. If we're not a replset of pv=1, it could be the default value (0) or
+ // the last valid term before downgrade.
+ term = ReplClientInfo::forClient(txn->getClient()).getTerm();
+
+ hashNew = BackgroundSync::get()->getLastAppliedHash();
+
+ // Check to make sure logOp() is legal at this point.
+ if (*opstr == 'n') {
+ // 'n' operations are always logged
+ invariant(*ns == '\0');
+ // 'n' operations do not advance the hash, since they are not rolled back
+ } else {
+ // Advance the hash
+ hashNew = (hashNew * 131 + ts.asLL()) * 17 + replCoord->getMyId();
+
+ BackgroundSync::get()->setLastAppliedHash(hashNew);
}
+ }
- ~OplogDocWriter(){}
-
- void writeDocument( char* start ) const {
- char* buf = start;
-
- memcpy( buf, _frame.objdata(), _frame.objsize() - 1 ); // don't copy final EOO
+ OpTime opTime(ts, term);
+ replCoord->setMyLastOptime(opTime);
+ return std::pair<OpTime, long long>(opTime, hashNew);
+}
- reinterpret_cast<int*>( buf )[0] = documentSize();
+/**
+ * This allows us to stream the oplog entry directly into data region
+ * main goal is to avoid copying the o portion
+ * which can be very large
+ * TODO: can have this build the entire doc
+ */
+class OplogDocWriter : public DocWriter {
+public:
+ OplogDocWriter(const BSONObj& frame, const BSONObj& oField) : _frame(frame), _oField(oField) {}
- buf += ( _frame.objsize() - 1 );
- buf[0] = (char)Object;
- buf[1] = 'o';
- buf[2] = 0;
- memcpy( buf+3, _oField.objdata(), _oField.objsize() );
- buf += 3 + _oField.objsize();
- buf[0] = EOO;
+ ~OplogDocWriter() {}
- verify( static_cast<size_t>( ( buf + 1 ) - start ) == documentSize() ); // DEV?
- }
+ void writeDocument(char* start) const {
+ char* buf = start;
- size_t documentSize() const {
- return _frame.objsize() + _oField.objsize() + 1 /* type */ + 2 /* "o" */;
- }
+ memcpy(buf, _frame.objdata(), _frame.objsize() - 1); // don't copy final EOO
- private:
- BSONObj _frame;
- BSONObj _oField;
- };
+ reinterpret_cast<int*>(buf)[0] = documentSize();
-} // namespace
+ buf += (_frame.objsize() - 1);
+ buf[0] = (char)Object;
+ buf[1] = 'o';
+ buf[2] = 0;
+ memcpy(buf + 3, _oField.objdata(), _oField.objsize());
+ buf += 3 + _oField.objsize();
+ buf[0] = EOO;
- void setOplogCollectionName() {
- if (getGlobalReplicationCoordinator()->getReplicationMode() ==
- ReplicationCoordinator::modeReplSet) {
- _oplogCollectionName = rsOplogName;
- }
- else {
- _oplogCollectionName = masterSlaveOplogName;
- }
+ verify(static_cast<size_t>((buf + 1) - start) == documentSize()); // DEV?
}
- /* we write to local.oplog.rs:
- { ts : ..., h: ..., v: ..., op: ..., etc }
- ts: an OpTime timestamp
- h: hash
- v: version
- op:
- "i" insert
- "u" update
- "d" delete
- "c" db cmd
- "db" declares presence of a database (ns is set to the db name + '.')
- "n" no op
-
- bb param:
- if not null, specifies a boolean to pass along to the other side as b: param.
- used for "justOne" or "upsert" flags on 'd', 'u'
-
- */
+ size_t documentSize() const {
+ return _frame.objsize() + _oField.objsize() + 1 /* type */ + 2 /* "o" */;
+ }
- void _logOp(OperationContext* txn,
- const char *opstr,
- const char *ns,
- const BSONObj& obj,
- BSONObj *o2,
- bool fromMigrate) {
- NamespaceString nss(ns);
- if (nss.db() == "local") {
- return;
- }
+private:
+ BSONObj _frame;
+ BSONObj _oField;
+};
- if (nss.isSystemDotProfile()) {
- return;
- }
+} // namespace
- if (!getGlobalReplicationCoordinator()->isReplEnabled()) {
- return;
- }
+void setOplogCollectionName() {
+ if (getGlobalReplicationCoordinator()->getReplicationMode() ==
+ ReplicationCoordinator::modeReplSet) {
+ _oplogCollectionName = rsOplogName;
+ } else {
+ _oplogCollectionName = masterSlaveOplogName;
+ }
+}
+
+/* we write to local.oplog.rs:
+ { ts : ..., h: ..., v: ..., op: ..., etc }
+ ts: an OpTime timestamp
+ h: hash
+ v: version
+ op:
+ "i" insert
+ "u" update
+ "d" delete
+ "c" db cmd
+ "db" declares presence of a database (ns is set to the db name + '.')
+ "n" no op
+
+ bb param:
+ if not null, specifies a boolean to pass along to the other side as b: param.
+ used for "justOne" or "upsert" flags on 'd', 'u'
- if (!txn->writesAreReplicated()) {
- return;
- }
+*/
- fassert(28626, txn->recoveryUnit());
+void _logOp(OperationContext* txn,
+ const char* opstr,
+ const char* ns,
+ const BSONObj& obj,
+ BSONObj* o2,
+ bool fromMigrate) {
+ NamespaceString nss(ns);
+ if (nss.db() == "local") {
+ return;
+ }
- Lock::DBLock lk(txn->lockState(), "local", MODE_IX);
+ if (nss.isSystemDotProfile()) {
+ return;
+ }
- ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
+ if (!getGlobalReplicationCoordinator()->isReplEnabled()) {
+ return;
+ }
- if (ns[0] && replCoord->getReplicationMode() == ReplicationCoordinator::modeReplSet &&
- !replCoord->canAcceptWritesFor(nss)) {
- severe() << "logOp() but can't accept write to collection " << ns;
- fassertFailed(17405);
- }
- Lock::CollectionLock lk2(txn->lockState(), _oplogCollectionName, MODE_IX);
+ if (!txn->writesAreReplicated()) {
+ return;
+ }
+ fassert(28626, txn->recoveryUnit());
- if (_localOplogCollection == nullptr) {
- OldClientContext ctx(txn, _oplogCollectionName);
- _localDB = ctx.db();
- invariant(_localDB);
- _localOplogCollection = _localDB->getCollection(_oplogCollectionName);
- massert(13347,
- "the oplog collection " + _oplogCollectionName +
- " missing. did you drop it? if so, restart the server",
- _localOplogCollection);
- }
+ Lock::DBLock lk(txn->lockState(), "local", MODE_IX);
- std::pair<OpTime, long long> slot = getNextOpTime(txn,
- _localOplogCollection,
- ns,
- replCoord,
- opstr);
-
- /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
- instead we do a single copy to the destination position in the memory mapped file.
- */
-
- BSONObjBuilder b(256);
- b.append("ts", slot.first.getTimestamp());
- b.append("t", slot.first.getTerm());
- b.append("h", slot.second);
- b.append("v", OPLOG_VERSION);
- b.append("op", opstr);
- b.append("ns", ns);
- if (fromMigrate) {
- b.appendBool("fromMigrate", true);
- }
+ ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- if ( o2 ) {
- b.append("o2", *o2);
- }
- BSONObj partial = b.done();
+ if (ns[0] && replCoord->getReplicationMode() == ReplicationCoordinator::modeReplSet &&
+ !replCoord->canAcceptWritesFor(nss)) {
+ severe() << "logOp() but can't accept write to collection " << ns;
+ fassertFailed(17405);
+ }
+ Lock::CollectionLock lk2(txn->lockState(), _oplogCollectionName, MODE_IX);
- OplogDocWriter writer( partial, obj );
- checkOplogInsert( _localOplogCollection->insertDocument( txn, &writer, false ) );
- ReplClientInfo::forClient(txn->getClient()).setLastOp( slot.first );
+ if (_localOplogCollection == nullptr) {
+ OldClientContext ctx(txn, _oplogCollectionName);
+ _localDB = ctx.db();
+ invariant(_localDB);
+ _localOplogCollection = _localDB->getCollection(_oplogCollectionName);
+ massert(13347,
+ "the oplog collection " + _oplogCollectionName +
+ " missing. did you drop it? if so, restart the server",
+ _localOplogCollection);
}
- OpTime writeOpsToOplog(OperationContext* txn, const std::deque<BSONObj>& ops) {
- ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
-
- OpTime lastOptime;
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- lastOptime = replCoord->getMyLastOptime();
- invariant(!ops.empty());
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), "local", MODE_X);
-
- if ( _localOplogCollection == 0 ) {
- OldClientContext ctx(txn, rsOplogName);
-
- _localDB = ctx.db();
- verify( _localDB );
- _localOplogCollection = _localDB->getCollection(rsOplogName);
- massert(13389,
- "local.oplog.rs missing. did you drop it? if so restart server",
- _localOplogCollection);
- }
+ std::pair<OpTime, long long> slot =
+ getNextOpTime(txn, _localOplogCollection, ns, replCoord, opstr);
- OldClientContext ctx(txn, rsOplogName, _localDB);
- WriteUnitOfWork wunit(txn);
+ /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
+ instead we do a single copy to the destination position in the memory mapped file.
+ */
- for (std::deque<BSONObj>::const_iterator it = ops.begin();
- it != ops.end();
- ++it) {
- const BSONObj& op = *it;
- const OpTime optime = extractOpTime(op);
+ BSONObjBuilder b(256);
+ b.append("ts", slot.first.getTimestamp());
+ b.append("t", slot.first.getTerm());
+ b.append("h", slot.second);
+ b.append("v", OPLOG_VERSION);
+ b.append("op", opstr);
+ b.append("ns", ns);
+ if (fromMigrate) {
+ b.appendBool("fromMigrate", true);
+ }
- checkOplogInsert(_localOplogCollection->insertDocument(txn, op, false));
+ if (o2) {
+ b.append("o2", *o2);
+ }
+ BSONObj partial = b.done();
- if (!(lastOptime < optime)) {
- severe() << "replication oplog stream went back in time. "
- "previous timestamp: " << lastOptime << " newest timestamp: " << optime
- << ". Op being applied: " << op;
- fassertFailedNoTrace(18905);
- }
- lastOptime = optime;
- }
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "writeOps", _localOplogCollection->ns().ns());
+ OplogDocWriter writer(partial, obj);
+ checkOplogInsert(_localOplogCollection->insertDocument(txn, &writer, false));
- BackgroundSync* bgsync = BackgroundSync::get();
- // Keep this up-to-date, in case we step up to primary.
- long long hash = ops.back()["h"].numberLong();
- bgsync->setLastAppliedHash(hash);
+ ReplClientInfo::forClient(txn->getClient()).setLastOp(slot.first);
+}
- return lastOptime;
- }
+OpTime writeOpsToOplog(OperationContext* txn, const std::deque<BSONObj>& ops) {
+ ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- void createOplog(OperationContext* txn) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
+ OpTime lastOptime;
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ lastOptime = replCoord->getMyLastOptime();
+ invariant(!ops.empty());
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lk(txn->lockState(), "local", MODE_X);
- const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
- bool rs = !replSettings.replSet.empty();
+ if (_localOplogCollection == 0) {
+ OldClientContext ctx(txn, rsOplogName);
- OldClientContext ctx(txn, _oplogCollectionName);
- Collection* collection = ctx.db()->getCollection( _oplogCollectionName );
+ _localDB = ctx.db();
+ verify(_localDB);
+ _localOplogCollection = _localDB->getCollection(rsOplogName);
+ massert(13389,
+ "local.oplog.rs missing. did you drop it? if so restart server",
+ _localOplogCollection);
+ }
- if ( collection ) {
+ OldClientContext ctx(txn, rsOplogName, _localDB);
+ WriteUnitOfWork wunit(txn);
- if (replSettings.oplogSize != 0) {
- const CollectionOptions oplogOpts =
- collection->getCatalogEntry()->getCollectionOptions(txn);
+ for (std::deque<BSONObj>::const_iterator it = ops.begin(); it != ops.end(); ++it) {
+ const BSONObj& op = *it;
+ const OpTime optime = extractOpTime(op);
- int o = (int)(oplogOpts.cappedSize / ( 1024 * 1024 ) );
- int n = (int)(replSettings.oplogSize / (1024 * 1024));
- if ( n != o ) {
- stringstream ss;
- ss << "cmdline oplogsize (" << n << ") different than existing (" << o << ") see: http://dochub.mongodb.org/core/increase-oplog";
- log() << ss.str() << endl;
- throw UserException( 13257 , ss.str() );
- }
- }
+ checkOplogInsert(_localOplogCollection->insertDocument(txn, op, false));
- if ( !rs )
- initTimestampFromOplog(txn, _oplogCollectionName);
- return;
+ if (!(lastOptime < optime)) {
+ severe() << "replication oplog stream went back in time. "
+ "previous timestamp: " << lastOptime << " newest timestamp: " << optime
+ << ". Op being applied: " << op;
+ fassertFailedNoTrace(18905);
+ }
+ lastOptime = optime;
}
-
- /* create an oplog collection, if it doesn't yet exist. */
- long long sz = 0;
- if ( replSettings.oplogSize != 0 ) {
- sz = replSettings.oplogSize;
+ wunit.commit();
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "writeOps", _localOplogCollection->ns().ns());
+
+ BackgroundSync* bgsync = BackgroundSync::get();
+ // Keep this up-to-date, in case we step up to primary.
+ long long hash = ops.back()["h"].numberLong();
+ bgsync->setLastAppliedHash(hash);
+
+ return lastOptime;
+}
+
+void createOplog(OperationContext* txn) {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+
+ const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
+ bool rs = !replSettings.replSet.empty();
+
+ OldClientContext ctx(txn, _oplogCollectionName);
+ Collection* collection = ctx.db()->getCollection(_oplogCollectionName);
+
+ if (collection) {
+ if (replSettings.oplogSize != 0) {
+ const CollectionOptions oplogOpts =
+ collection->getCatalogEntry()->getCollectionOptions(txn);
+
+ int o = (int)(oplogOpts.cappedSize / (1024 * 1024));
+ int n = (int)(replSettings.oplogSize / (1024 * 1024));
+ if (n != o) {
+ stringstream ss;
+ ss << "cmdline oplogsize (" << n << ") different than existing (" << o
+ << ") see: http://dochub.mongodb.org/core/increase-oplog";
+ log() << ss.str() << endl;
+ throw UserException(13257, ss.str());
+ }
}
- else {
- /* not specified. pick a default size */
- sz = 50LL * 1024LL * 1024LL;
- if ( sizeof(int *) >= 8 ) {
+
+ if (!rs)
+ initTimestampFromOplog(txn, _oplogCollectionName);
+ return;
+ }
+
+ /* create an oplog collection, if it doesn't yet exist. */
+ long long sz = 0;
+ if (replSettings.oplogSize != 0) {
+ sz = replSettings.oplogSize;
+ } else {
+ /* not specified. pick a default size */
+ sz = 50LL * 1024LL * 1024LL;
+ if (sizeof(int*) >= 8) {
#if defined(__APPLE__)
- // typically these are desktops (dev machines), so keep it smallish
- sz = (256-64) * 1024 * 1024;
+ // typically these are desktops (dev machines), so keep it smallish
+ sz = (256 - 64) * 1024 * 1024;
#else
- sz = 990LL * 1024 * 1024;
- double free =
- File::freeSpace(storageGlobalParams.dbpath); //-1 if call not supported.
- long long fivePct = static_cast<long long>( free * 0.05 );
- if ( fivePct > sz )
- sz = fivePct;
- // we use 5% of free space up to 50GB (1TB free)
- static long long upperBound = 50LL * 1024 * 1024 * 1024;
- if (fivePct > upperBound)
- sz = upperBound;
+ sz = 990LL * 1024 * 1024;
+ double free = File::freeSpace(storageGlobalParams.dbpath); //-1 if call not supported.
+ long long fivePct = static_cast<long long>(free * 0.05);
+ if (fivePct > sz)
+ sz = fivePct;
+ // we use 5% of free space up to 50GB (1TB free)
+ static long long upperBound = 50LL * 1024 * 1024 * 1024;
+ if (fivePct > upperBound)
+ sz = upperBound;
#endif
- }
}
-
- log() << "******" << endl;
- log() << "creating replication oplog of size: " << (int)( sz / ( 1024 * 1024 ) ) << "MB..." << endl;
-
- CollectionOptions options;
- options.capped = true;
- options.cappedSize = sz;
- options.autoIndexId = CollectionOptions::NO;
-
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork uow( txn );
- invariant(ctx.db()->createCollection(txn, _oplogCollectionName, options));
- if( !rs )
- getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, BSONObj());
- uow.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", _oplogCollectionName);
-
- /* sync here so we don't get any surprising lag later when we try to sync */
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->flushAllFiles(true);
- log() << "******" << endl;
}
- // -------------------------------------
+ log() << "******" << endl;
+ log() << "creating replication oplog of size: " << (int)(sz / (1024 * 1024)) << "MB..." << endl;
-namespace {
- NamespaceString parseNs(const string& ns, const BSONObj& cmdObj) {
- BSONElement first = cmdObj.firstElement();
- uassert(28635,
- "no collection name specified",
- first.canonicalType() == canonicalizeBSONType(mongo::String)
- && first.valuestrsize() > 0);
- std::string coll = first.valuestr();
- return NamespaceString(NamespaceString(ns).db().toString(), coll);
+ CollectionOptions options;
+ options.capped = true;
+ options.cappedSize = sz;
+ options.autoIndexId = CollectionOptions::NO;
+
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork uow(txn);
+ invariant(ctx.db()->createCollection(txn, _oplogCollectionName, options));
+ if (!rs)
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, BSONObj());
+ uow.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createCollection", _oplogCollectionName);
- using OpApplyFn = stdx::function<Status (OperationContext*, const char*, BSONObj&)>;
+ /* sync here so we don't get any surprising lag later when we try to sync */
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ storageEngine->flushAllFiles(true);
+ log() << "******" << endl;
+}
- struct ApplyOpMetadata {
- OpApplyFn applyFunc;
- std::set<ErrorCodes::Error> acceptableErrors;
+// -------------------------------------
- ApplyOpMetadata(OpApplyFn fun) {
- applyFunc = fun;
- }
+namespace {
+NamespaceString parseNs(const string& ns, const BSONObj& cmdObj) {
+ BSONElement first = cmdObj.firstElement();
+ uassert(28635,
+ "no collection name specified",
+ first.canonicalType() == canonicalizeBSONType(mongo::String) &&
+ first.valuestrsize() > 0);
+ std::string coll = first.valuestr();
+ return NamespaceString(NamespaceString(ns).db().toString(), coll);
+}
+
+using OpApplyFn = stdx::function<Status(OperationContext*, const char*, BSONObj&)>;
+
+struct ApplyOpMetadata {
+ OpApplyFn applyFunc;
+ std::set<ErrorCodes::Error> acceptableErrors;
+
+ ApplyOpMetadata(OpApplyFn fun) {
+ applyFunc = fun;
+ }
- ApplyOpMetadata(OpApplyFn fun, std::set<ErrorCodes::Error> theAcceptableErrors) {
- applyFunc = fun;
- acceptableErrors = theAcceptableErrors;
+ ApplyOpMetadata(OpApplyFn fun, std::set<ErrorCodes::Error> theAcceptableErrors) {
+ applyFunc = fun;
+ acceptableErrors = theAcceptableErrors;
+ }
+};
+
+std::map<std::string, ApplyOpMetadata> opsMap = {
+ {"create",
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd)
+ -> Status { return createCollection(txn, NamespaceString(ns).db().toString(), cmd); },
+ {ErrorCodes::NamespaceExists}}},
+ {"collMod",
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ BSONObjBuilder resultWeDontCareAbout;
+ return collMod(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ }}},
+ {"dropDatabase",
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd)
+ -> Status { return dropDatabase(txn, NamespaceString(ns).db().toString()); },
+ {ErrorCodes::DatabaseNotFound}}},
+ {"drop",
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropCollection(txn, parseNs(ns, cmd), resultWeDontCareAbout);
+ },
+ // IllegalOperation is necessary because in 3.0 we replicate drops of system.profile
+ // TODO(dannenberg) remove IllegalOperation once we no longer need 3.0 compatibility
+ {ErrorCodes::NamespaceNotFound, ErrorCodes::IllegalOperation}}},
+ // deleteIndex(es) is deprecated but still works as of April 10, 2015
+ {"deleteIndex",
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ },
+ {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
+ {"deleteIndexes",
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ },
+ {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
+ {"dropIndex",
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ },
+ {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
+ {"dropIndexes",
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ },
+ {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
+ {"renameCollection",
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ return renameCollection(txn,
+ NamespaceString(cmd.firstElement().valuestrsafe()),
+ NamespaceString(cmd["to"].valuestrsafe()),
+ cmd["stayTemp"].trueValue(),
+ cmd["dropTarget"].trueValue());
+ },
+ {ErrorCodes::NamespaceNotFound, ErrorCodes::NamespaceExists}}},
+ {"applyOps",
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ BSONObjBuilder resultWeDontCareAbout;
+ return applyOps(txn, nsToDatabase(ns), cmd, &resultWeDontCareAbout);
+ },
+ {ErrorCodes::UnknownError}}},
+ {"convertToCapped",
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd)
+ -> Status { return convertToCapped(txn, parseNs(ns, cmd), cmd["size"].number()); }}},
+ {"emptycapped",
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd)
+ -> Status { return emptyCapped(txn, parseNs(ns, cmd)); }}},
+};
+
+} // namespace
+
+// @return failure status if an update should have happened and the document DNE.
+// See replset initial sync code.
+Status applyOperation_inlock(OperationContext* txn,
+ Database* db,
+ const BSONObj& op,
+ bool convertUpdateToUpsert) {
+ LOG(3) << "applying op: " << op << endl;
+
+ OpCounters* opCounters = txn->writesAreReplicated() ? &globalOpCounters : &replOpCounters;
+
+ const char* names[] = {"o", "ns", "op", "b", "o2"};
+ BSONElement fields[5];
+ op.getFields(5, names, fields);
+ BSONElement& fieldO = fields[0];
+ BSONElement& fieldNs = fields[1];
+ BSONElement& fieldOp = fields[2];
+ BSONElement& fieldB = fields[3];
+ BSONElement& fieldO2 = fields[4];
+
+ BSONObj o;
+ if (fieldO.isABSONObj())
+ o = fieldO.embeddedObject();
+
+ const char* ns = fieldNs.valuestrsafe();
+
+ BSONObj o2;
+ if (fieldO2.isABSONObj())
+ o2 = fieldO2.Obj();
+
+ bool valueB = fieldB.booleanSafe();
+
+ if (nsIsFull(ns)) {
+ if (supportsDocLocking()) {
+ // WiredTiger, and others requires MODE_IX since the applier threads driving
+ // this allow writes to the same collection on any thread.
+ invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_IX));
+ } else {
+ // mmapV1 ensures that all operations to the same collection are executed from
+ // the same worker thread, so it takes an exclusive lock (MODE_X)
+ invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
}
- };
-
- std::map<std::string, ApplyOpMetadata> opsMap = {
- {"create",
- {
- [](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- return createCollection(txn, NamespaceString(ns).db().toString(), cmd);
- },
- {ErrorCodes::NamespaceExists}
- }
- },
- {"collMod",
- {
- [](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return collMod(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- }
- }
- },
- {"dropDatabase",
- {
- [](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- return dropDatabase(txn, NamespaceString(ns).db().toString());
- },
- {ErrorCodes::DatabaseNotFound}
- }
- },
- {"drop",
- {
- [](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropCollection(txn, parseNs(ns, cmd), resultWeDontCareAbout);
- },
- // IllegalOperation is necessary because in 3.0 we replicate drops of system.profile
- // TODO(dannenberg) remove IllegalOperation once we no longer need 3.0 compatibility
- {ErrorCodes::NamespaceNotFound, ErrorCodes::IllegalOperation}
- }
- },
- // deleteIndex(es) is deprecated but still works as of April 10, 2015
- {"deleteIndex",
- {
- [](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- },
- {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}
- }
- },
- {"deleteIndexes",
- {
- [](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- },
- {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}
- }
- },
- {"dropIndex",
- {
- [](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- },
- {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}
- }
- },
- {"dropIndexes",
- {
- [](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- },
- {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}
- }
- },
- {"renameCollection",
- {
- [](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- return renameCollection(txn,
- NamespaceString(cmd.firstElement().valuestrsafe()),
- NamespaceString(cmd["to"].valuestrsafe()),
- cmd["stayTemp"].trueValue(),
- cmd["dropTarget"].trueValue());
- },
- {ErrorCodes::NamespaceNotFound, ErrorCodes::NamespaceExists}
- }
- },
- {"applyOps",
- {
- [](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return applyOps(txn, nsToDatabase(ns), cmd, &resultWeDontCareAbout);
- },
- {ErrorCodes::UnknownError}
- }
- },
- {"convertToCapped",
- {
- [](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- return convertToCapped(txn,
- parseNs(ns, cmd),
- cmd["size"].number());
- }
- }
- },
- {"emptycapped",
- {
- [](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- return emptyCapped(txn, parseNs(ns, cmd));
- }
- }
- },
- };
-
-} // namespace
-
- // @return failure status if an update should have happened and the document DNE.
- // See replset initial sync code.
- Status applyOperation_inlock(OperationContext* txn,
- Database* db,
- const BSONObj& op,
- bool convertUpdateToUpsert) {
- LOG(3) << "applying op: " << op << endl;
-
- OpCounters * opCounters = txn->writesAreReplicated() ? &globalOpCounters : &replOpCounters;
-
- const char *names[] = { "o", "ns", "op", "b", "o2" };
- BSONElement fields[5];
- op.getFields(5, names, fields);
- BSONElement& fieldO = fields[0];
- BSONElement& fieldNs = fields[1];
- BSONElement& fieldOp = fields[2];
- BSONElement& fieldB = fields[3];
- BSONElement& fieldO2 = fields[4];
-
- BSONObj o;
- if( fieldO.isABSONObj() )
- o = fieldO.embeddedObject();
-
- const char *ns = fieldNs.valuestrsafe();
-
- BSONObj o2;
- if (fieldO2.isABSONObj())
- o2 = fieldO2.Obj();
-
- bool valueB = fieldB.booleanSafe();
-
- if (nsIsFull(ns)) {
- if (supportsDocLocking()) {
- // WiredTiger, and others requires MODE_IX since the applier threads driving
- // this allow writes to the same collection on any thread.
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_IX));
+ }
+ Collection* collection = db->getCollection(ns);
+ IndexCatalog* indexCatalog = collection == nullptr ? nullptr : collection->getIndexCatalog();
+
+ // operation type -- see logOp() comments for types
+ const char* opType = fieldOp.valuestrsafe();
+ invariant(*opType != 'c'); // commands are processed in applyCommand_inlock()
+
+ if (*opType == 'i') {
+ opCounters->gotInsert();
+
+ const char* p = strchr(ns, '.');
+ if (p && nsToCollectionSubstring(p) == "system.indexes") {
+ if (o["background"].trueValue()) {
+ IndexBuilder* builder = new IndexBuilder(o);
+ // This spawns a new thread and returns immediately.
+ builder->go();
+ // Wait for thread to start and register itself
+ Lock::TempRelease release(txn->lockState());
+ IndexBuilder::waitForBgIndexStarting();
} else {
- // mmapV1 ensures that all operations to the same collection are executed from
- // the same worker thread, so it takes an exclusive lock (MODE_X)
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ IndexBuilder builder(o);
+ Status status = builder.buildInForeground(txn, db);
+ uassertStatusOK(status);
}
- }
- Collection* collection = db->getCollection( ns );
- IndexCatalog* indexCatalog = collection == nullptr ? nullptr : collection->getIndexCatalog();
-
- // operation type -- see logOp() comments for types
- const char *opType = fieldOp.valuestrsafe();
- invariant(*opType != 'c'); // commands are processed in applyCommand_inlock()
-
- if ( *opType == 'i' ) {
- opCounters->gotInsert();
-
- const char *p = strchr(ns, '.');
- if ( p && nsToCollectionSubstring( p ) == "system.indexes" ) {
- if (o["background"].trueValue()) {
- IndexBuilder* builder = new IndexBuilder(o);
- // This spawns a new thread and returns immediately.
- builder->go();
- // Wait for thread to start and register itself
- Lock::TempRelease release(txn->lockState());
- IndexBuilder::waitForBgIndexStarting();
- }
- else {
- IndexBuilder builder(o);
- Status status = builder.buildInForeground(txn, db);
- uassertStatusOK(status);
- }
- }
- else {
- // do upserts for inserts as we might get replayed more than once
- OpDebug debug;
-
- uassert(ErrorCodes::NamespaceNotFound, str::stream() <<
- "Failed to apply insert due to missing collection: " << op.toString(),
- collection);
-
- // No _id.
- // This indicates an issue with the upstream server:
- // The oplog entry is corrupted; or
- // The version of the upstream server is obsolete.
- uassert(ErrorCodes::NoSuchKey, str::stream() <<
- "Failed to apply insert due to missing _id: " << op.toString(),
- o.hasField("_id"));
-
- // TODO: It may be better to do an insert here, and then catch the duplicate
- // key exception and do update then. Very few upserts will not be inserts...
- BSONObjBuilder b;
- b.append(o.getField("_id"));
-
- const NamespaceString requestNs(ns);
- UpdateRequest request(requestNs);
-
- request.setQuery(b.done());
- request.setUpdates(o);
- request.setUpsert();
- UpdateLifecycleImpl updateLifecycle(true, requestNs);
- request.setLifecycle(&updateLifecycle);
-
- update(txn, db, request, &debug);
- }
- }
- else if ( *opType == 'u' ) {
- opCounters->gotUpdate();
-
+ } else {
+ // do upserts for inserts as we might get replayed more than once
OpDebug debug;
- BSONObj updateCriteria = o2;
- const bool upsert = valueB || convertUpdateToUpsert;
- uassert(ErrorCodes::NoSuchKey, str::stream() <<
- "Failed to apply update due to missing _id: " << op.toString(),
- updateCriteria.hasField("_id"));
+ uassert(ErrorCodes::NamespaceNotFound,
+ str::stream() << "Failed to apply insert due to missing collection: "
+ << op.toString(),
+ collection);
+
+ // No _id.
+ // This indicates an issue with the upstream server:
+ // The oplog entry is corrupted; or
+ // The version of the upstream server is obsolete.
+ uassert(ErrorCodes::NoSuchKey,
+ str::stream() << "Failed to apply insert due to missing _id: " << op.toString(),
+ o.hasField("_id"));
+
+ // TODO: It may be better to do an insert here, and then catch the duplicate
+ // key exception and do update then. Very few upserts will not be inserts...
+ BSONObjBuilder b;
+ b.append(o.getField("_id"));
const NamespaceString requestNs(ns);
UpdateRequest request(requestNs);
- request.setQuery(updateCriteria);
+ request.setQuery(b.done());
request.setUpdates(o);
- request.setUpsert(upsert);
+ request.setUpsert();
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
- UpdateResult ur = update(txn, db, request, &debug);
-
- if( ur.numMatched == 0 ) {
- if( ur.modifiers ) {
- if( updateCriteria.nFields() == 1 ) {
- // was a simple { _id : ... } update criteria
- string msg = str::stream() << "failed to apply update: " << op.toString();
- error() << msg;
- return Status(ErrorCodes::OperationFailed, msg);
- }
- // Need to check to see if it isn't present so we can exit early with a
- // failure. Note that adds some overhead for this extra check in some cases,
- // such as an updateCriteria
- // of the form
- // { _id:..., { x : {$size:...} }
- // thus this is not ideal.
- if (collection == NULL ||
- (indexCatalog->haveIdIndex(txn) &&
- Helpers::findById(txn, collection, updateCriteria).isNull()) ||
- // capped collections won't have an _id index
- (!indexCatalog->haveIdIndex(txn) &&
- Helpers::findOne(txn, collection, updateCriteria, false).isNull())) {
- string msg = str::stream() << "couldn't find doc: " << op.toString();
- error() << msg;
- return Status(ErrorCodes::OperationFailed, msg);
- }
-
- // Otherwise, it's present; zero objects were updated because of additional specifiers
- // in the query for idempotence
+ update(txn, db, request, &debug);
+ }
+ } else if (*opType == 'u') {
+ opCounters->gotUpdate();
+
+ OpDebug debug;
+ BSONObj updateCriteria = o2;
+ const bool upsert = valueB || convertUpdateToUpsert;
+
+ uassert(ErrorCodes::NoSuchKey,
+ str::stream() << "Failed to apply update due to missing _id: " << op.toString(),
+ updateCriteria.hasField("_id"));
+
+ const NamespaceString requestNs(ns);
+ UpdateRequest request(requestNs);
+
+ request.setQuery(updateCriteria);
+ request.setUpdates(o);
+ request.setUpsert(upsert);
+ UpdateLifecycleImpl updateLifecycle(true, requestNs);
+ request.setLifecycle(&updateLifecycle);
+
+ UpdateResult ur = update(txn, db, request, &debug);
+
+ if (ur.numMatched == 0) {
+ if (ur.modifiers) {
+ if (updateCriteria.nFields() == 1) {
+ // was a simple { _id : ... } update criteria
+ string msg = str::stream() << "failed to apply update: " << op.toString();
+ error() << msg;
+ return Status(ErrorCodes::OperationFailed, msg);
}
- else {
- // this could happen benignly on an oplog duplicate replay of an upsert
- // (because we are idempotent),
- // if an regular non-mod update fails the item is (presumably) missing.
- if( !upsert ) {
- string msg = str::stream() << "update of non-mod failed: " << op.toString();
- error() << msg;
- return Status(ErrorCodes::OperationFailed, msg);
- }
+ // Need to check to see if it isn't present so we can exit early with a
+ // failure. Note that adds some overhead for this extra check in some cases,
+ // such as an updateCriteria
+ // of the form
+ // { _id:..., { x : {$size:...} }
+ // thus this is not ideal.
+ if (collection == NULL ||
+ (indexCatalog->haveIdIndex(txn) &&
+ Helpers::findById(txn, collection, updateCriteria).isNull()) ||
+ // capped collections won't have an _id index
+ (!indexCatalog->haveIdIndex(txn) &&
+ Helpers::findOne(txn, collection, updateCriteria, false).isNull())) {
+ string msg = str::stream() << "couldn't find doc: " << op.toString();
+ error() << msg;
+ return Status(ErrorCodes::OperationFailed, msg);
}
- }
- }
- else if ( *opType == 'd' ) {
- opCounters->gotDelete();
-
- uassert(ErrorCodes::NoSuchKey, str::stream() <<
- "Failed to apply delete due to missing _id: " << op.toString(),
- o.hasField("_id"));
- if (opType[1] == 0) {
- deleteObjects(txn, db, ns, o, PlanExecutor::YIELD_MANUAL, /*justOne*/ valueB);
+ // Otherwise, it's present; zero objects were updated because of additional specifiers
+ // in the query for idempotence
+ } else {
+ // this could happen benignly on an oplog duplicate replay of an upsert
+ // (because we are idempotent),
+ // if an regular non-mod update fails the item is (presumably) missing.
+ if (!upsert) {
+ string msg = str::stream() << "update of non-mod failed: " << op.toString();
+ error() << msg;
+ return Status(ErrorCodes::OperationFailed, msg);
+ }
}
- else
- verify( opType[1] == 'b' ); // "db" advertisement
- }
- else if ( *opType == 'n' ) {
- // no op
}
- else {
- throw MsgAssertionException( 14825 , ErrorMsg("error in applyOperation : unknown opType ", *opType) );
- }
-
- // AuthorizationManager's logOp method registers a RecoveryUnit::Change
- // and to do so we need to have begun a UnitOfWork
- WriteUnitOfWork wuow(txn);
- getGlobalAuthorizationManager()->logOp(
- txn,
- opType,
- ns,
- o,
- fieldO2.isABSONObj() ? &o2 : NULL);
- wuow.commit();
-
- return Status::OK();
+ } else if (*opType == 'd') {
+ opCounters->gotDelete();
+
+ uassert(ErrorCodes::NoSuchKey,
+ str::stream() << "Failed to apply delete due to missing _id: " << op.toString(),
+ o.hasField("_id"));
+
+ if (opType[1] == 0) {
+ deleteObjects(txn, db, ns, o, PlanExecutor::YIELD_MANUAL, /*justOne*/ valueB);
+ } else
+ verify(opType[1] == 'b'); // "db" advertisement
+ } else if (*opType == 'n') {
+ // no op
+ } else {
+ throw MsgAssertionException(14825,
+ ErrorMsg("error in applyOperation : unknown opType ", *opType));
}
- Status applyCommand_inlock(OperationContext* txn, const BSONObj& op) {
- const char *names[] = { "o", "ns", "op" };
- BSONElement fields[3];
- op.getFields(3, names, fields);
- BSONElement& fieldO = fields[0];
- BSONElement& fieldNs = fields[1];
- BSONElement& fieldOp = fields[2];
-
- const char* opType = fieldOp.valuestrsafe();
- invariant(*opType == 'c'); // only commands are processed here
+ // AuthorizationManager's logOp method registers a RecoveryUnit::Change
+ // and to do so we need to have begun a UnitOfWork
+ WriteUnitOfWork wuow(txn);
+ getGlobalAuthorizationManager()->logOp(txn, opType, ns, o, fieldO2.isABSONObj() ? &o2 : NULL);
+ wuow.commit();
+
+ return Status::OK();
+}
+
+Status applyCommand_inlock(OperationContext* txn, const BSONObj& op) {
+ const char* names[] = {"o", "ns", "op"};
+ BSONElement fields[3];
+ op.getFields(3, names, fields);
+ BSONElement& fieldO = fields[0];
+ BSONElement& fieldNs = fields[1];
+ BSONElement& fieldOp = fields[2];
+
+ const char* opType = fieldOp.valuestrsafe();
+ invariant(*opType == 'c'); // only commands are processed here
+
+ BSONObj o;
+ if (fieldO.isABSONObj()) {
+ o = fieldO.embeddedObject();
+ }
- BSONObj o;
- if (fieldO.isABSONObj()) {
- o = fieldO.embeddedObject();
- }
+ const char* ns = fieldNs.valuestrsafe();
- const char* ns = fieldNs.valuestrsafe();
+ // Applying commands in repl is done under Global W-lock, so it is safe to not
+ // perform the current DB checks after reacquiring the lock.
+ invariant(txn->lockState()->isW());
- // Applying commands in repl is done under Global W-lock, so it is safe to not
- // perform the current DB checks after reacquiring the lock.
- invariant(txn->lockState()->isW());
-
- bool done = false;
+ bool done = false;
- while (!done) {
- ApplyOpMetadata curOpToApply = opsMap.find(o.firstElementFieldName())->second;
- Status status = Status::OK();
- try {
- status = curOpToApply.applyFunc(txn, ns, o);
- }
- catch (...) {
- status = exceptionToStatus();
- }
- switch (status.code()) {
+ while (!done) {
+ ApplyOpMetadata curOpToApply = opsMap.find(o.firstElementFieldName())->second;
+ Status status = Status::OK();
+ try {
+ status = curOpToApply.applyFunc(txn, ns, o);
+ } catch (...) {
+ status = exceptionToStatus();
+ }
+ switch (status.code()) {
case ErrorCodes::WriteConflict: {
// Need to throw this up to a higher level where it will be caught and the
// operation retried.
@@ -848,69 +785,65 @@ namespace {
if (_oplogCollectionName == masterSlaveOplogName) {
error() << "Failed command " << o << " on " << nsToDatabaseSubstring(ns)
<< " with status " << status << " during oplog application";
- }
- else if (curOpToApply.acceptableErrors.find(status.code())
- == curOpToApply.acceptableErrors.end()) {
+ } else if (curOpToApply.acceptableErrors.find(status.code()) ==
+ curOpToApply.acceptableErrors.end()) {
error() << "Failed command " << o << " on " << nsToDatabaseSubstring(ns)
<< " with status " << status << " during oplog application";
return status;
}
- // fallthrough
+ // fallthrough
case ErrorCodes::OK:
done = true;
break;
- }
}
-
- // AuthorizationManager's logOp method registers a RecoveryUnit::Change
- // and to do so we need to have begun a UnitOfWork
- WriteUnitOfWork wuow(txn);
- getGlobalAuthorizationManager()->logOp(txn, opType, ns, o, nullptr);
- wuow.commit();
-
- return Status::OK();
}
- void waitUpToOneSecondForTimestampChange(const Timestamp& referenceTime) {
- stdx::unique_lock<stdx::mutex> lk(newOpMutex);
+ // AuthorizationManager's logOp method registers a RecoveryUnit::Change
+ // and to do so we need to have begun a UnitOfWork
+ WriteUnitOfWork wuow(txn);
+ getGlobalAuthorizationManager()->logOp(txn, opType, ns, o, nullptr);
+ wuow.commit();
- while (referenceTime == getLastSetTimestamp()) {
- if (!newTimestampNotifier.timed_wait(lk, boost::posix_time::seconds(1)))
- return;
- }
- }
+ return Status::OK();
+}
- void setNewTimestamp(const Timestamp& newTime) {
- stdx::lock_guard<stdx::mutex> lk(newOpMutex);
- setGlobalTimestamp(newTime);
- newTimestampNotifier.notify_all();
- }
+void waitUpToOneSecondForTimestampChange(const Timestamp& referenceTime) {
+ stdx::unique_lock<stdx::mutex> lk(newOpMutex);
- OpTime extractOpTime(const BSONObj& op) {
- const Timestamp ts = op["ts"].timestamp();
- const long long term = op["t"].numberLong(); // Default to 0 if it's absent
- return OpTime(ts, term);
+ while (referenceTime == getLastSetTimestamp()) {
+ if (!newTimestampNotifier.timed_wait(lk, boost::posix_time::seconds(1)))
+ return;
}
-
- void initTimestampFromOplog(OperationContext* txn, const std::string& oplogNS) {
- DBDirectClient c(txn);
- BSONObj lastOp = c.findOne(oplogNS,
- Query().sort(reverseNaturalObj),
- NULL,
- QueryOption_SlaveOk);
-
- if (!lastOp.isEmpty()) {
- LOG(1) << "replSet setting last Timestamp";
- setNewTimestamp(lastOp[ "ts" ].timestamp());
- }
+}
+
+void setNewTimestamp(const Timestamp& newTime) {
+ stdx::lock_guard<stdx::mutex> lk(newOpMutex);
+ setGlobalTimestamp(newTime);
+ newTimestampNotifier.notify_all();
+}
+
+OpTime extractOpTime(const BSONObj& op) {
+ const Timestamp ts = op["ts"].timestamp();
+ const long long term = op["t"].numberLong(); // Default to 0 if it's absent
+ return OpTime(ts, term);
+}
+
+void initTimestampFromOplog(OperationContext* txn, const std::string& oplogNS) {
+ DBDirectClient c(txn);
+ BSONObj lastOp = c.findOne(oplogNS, Query().sort(reverseNaturalObj), NULL, QueryOption_SlaveOk);
+
+ if (!lastOp.isEmpty()) {
+ LOG(1) << "replSet setting last Timestamp";
+ setNewTimestamp(lastOp["ts"].timestamp());
}
+}
- void oplogCheckCloseDatabase(OperationContext* txn, Database* db) {
- invariant(txn->lockState()->isW());
+void oplogCheckCloseDatabase(OperationContext* txn, Database* db) {
+ invariant(txn->lockState()->isW());
- _localDB = nullptr;
- _localOplogCollection = nullptr;
- }
+ _localDB = nullptr;
+ _localOplogCollection = nullptr;
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index b3a1f19f634..16e0944ccf6 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -39,100 +39,99 @@
#include "mongo/util/time_support.h"
namespace mongo {
- class BSONObj;
- class Collection;
- struct CollectionOptions;
- class Database;
- class NamespaceString;
- class OperationContext;
- class Timestamp;
- class RecordId;
+class BSONObj;
+class Collection;
+struct CollectionOptions;
+class Database;
+class NamespaceString;
+class OperationContext;
+class Timestamp;
+class RecordId;
namespace repl {
- class ReplicationCoordinator;
-
- // Create a new capped collection for the oplog if it doesn't yet exist.
- // This will be either local.oplog.rs (replica sets) or local.oplog.$main (master/slave)
- // If the collection already exists, set the 'last' OpTime if master/slave (side effect!)
- void createOplog(OperationContext* txn);
-
- // This function writes ops into the replica-set oplog;
- // used internally by replication secondaries after they have applied ops. Updates the global
- // optime.
- // Returns the optime for the last op inserted.
- OpTime writeOpsToOplog(OperationContext* txn,
- const std::deque<BSONObj>& ops);
-
- extern std::string rsOplogName;
- extern std::string masterSlaveOplogName;
-
- extern int OPLOG_VERSION;
-
- /** Log an operation to the local oplog
- *
- * @param opstr
- * "i" insert
- * "u" update
- * "d" delete
- * "c" db cmd
- * "n" no-op
- * "db" declares presence of a database (ns is set to the db name + '.')
- *
- * For 'u' records, 'obj' captures the mutation made to the object but not
- * the object itself. 'o2' captures the the criteria for the object that will be modified.
- */
- void _logOp(OperationContext* txn,
- const char *opstr,
- const char *ns,
- const BSONObj& obj,
- BSONObj *o2,
- bool fromMigrate);
-
- // Flush out the cached pointers to the local database and oplog.
- // Used by the closeDatabase command to ensure we don't cache closed things.
- void oplogCheckCloseDatabase(OperationContext* txn, Database * db);
-
- /**
- * Take a non-command op and apply it locally
- * Used for applying from an oplog
- * @param convertUpdateToUpsert convert some updates to upserts for idempotency reasons
- * Returns failure status if the op was an update that could not be applied.
- */
- Status applyOperation_inlock(OperationContext* txn,
- Database* db,
- const BSONObj& op,
- bool convertUpdateToUpsert = false);
-
- /**
- * Take a command op and apply it locally
- * Used for applying from an oplog
- * Returns failure status if the op that could not be applied.
- */
- Status applyCommand_inlock(OperationContext* txn, const BSONObj& op);
-
- /**
- * Waits up to one second for the Timestamp from the oplog to change.
- */
- void waitUpToOneSecondForTimestampChange(const Timestamp& referenceTime);
-
- /**
- * Initializes the global Timestamp with the value from the timestamp of the last oplog entry.
- */
- void initTimestampFromOplog(OperationContext* txn, const std::string& oplogNS);
-
- /**
- * Sets the global Timestamp to be 'newTime'.
- */
- void setNewTimestamp(const Timestamp& newTime);
-
- /*
- * Extract the OpTime from log entry.
- */
- OpTime extractOpTime(const BSONObj& op);
-
- /**
- * Detects the current replication mode and sets the "_oplogCollectionName" accordingly.
- */
- void setOplogCollectionName();
-} // namespace repl
-} // namespace mongo
+class ReplicationCoordinator;
+
+// Create a new capped collection for the oplog if it doesn't yet exist.
+// This will be either local.oplog.rs (replica sets) or local.oplog.$main (master/slave)
+// If the collection already exists, set the 'last' OpTime if master/slave (side effect!)
+void createOplog(OperationContext* txn);
+
+// This function writes ops into the replica-set oplog;
+// used internally by replication secondaries after they have applied ops. Updates the global
+// optime.
+// Returns the optime for the last op inserted.
+OpTime writeOpsToOplog(OperationContext* txn, const std::deque<BSONObj>& ops);
+
+extern std::string rsOplogName;
+extern std::string masterSlaveOplogName;
+
+extern int OPLOG_VERSION;
+
+/** Log an operation to the local oplog
+ *
+ * @param opstr
+ * "i" insert
+ * "u" update
+ * "d" delete
+ * "c" db cmd
+ * "n" no-op
+ * "db" declares presence of a database (ns is set to the db name + '.')
+ *
+ * For 'u' records, 'obj' captures the mutation made to the object but not
+ * the object itself. 'o2' captures the the criteria for the object that will be modified.
+ */
+void _logOp(OperationContext* txn,
+ const char* opstr,
+ const char* ns,
+ const BSONObj& obj,
+ BSONObj* o2,
+ bool fromMigrate);
+
+// Flush out the cached pointers to the local database and oplog.
+// Used by the closeDatabase command to ensure we don't cache closed things.
+void oplogCheckCloseDatabase(OperationContext* txn, Database* db);
+
+/**
+ * Take a non-command op and apply it locally
+ * Used for applying from an oplog
+ * @param convertUpdateToUpsert convert some updates to upserts for idempotency reasons
+ * Returns failure status if the op was an update that could not be applied.
+ */
+Status applyOperation_inlock(OperationContext* txn,
+ Database* db,
+ const BSONObj& op,
+ bool convertUpdateToUpsert = false);
+
+/**
+ * Take a command op and apply it locally
+ * Used for applying from an oplog
+ * Returns failure status if the op that could not be applied.
+ */
+Status applyCommand_inlock(OperationContext* txn, const BSONObj& op);
+
+/**
+ * Waits up to one second for the Timestamp from the oplog to change.
+ */
+void waitUpToOneSecondForTimestampChange(const Timestamp& referenceTime);
+
+/**
+ * Initializes the global Timestamp with the value from the timestamp of the last oplog entry.
+ */
+void initTimestampFromOplog(OperationContext* txn, const std::string& oplogNS);
+
+/**
+ * Sets the global Timestamp to be 'newTime'.
+ */
+void setNewTimestamp(const Timestamp& newTime);
+
+/*
+ * Extract the OpTime from log entry.
+ */
+OpTime extractOpTime(const BSONObj& op);
+
+/**
+ * Detects the current replication mode and sets the "_oplogCollectionName" accordingly.
+ */
+void setOplogCollectionName();
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/oplog_interface.h b/src/mongo/db/repl/oplog_interface.h
index 4ca10947ddd..b03a9ac1a43 100644
--- a/src/mongo/db/repl/oplog_interface.h
+++ b/src/mongo/db/repl/oplog_interface.h
@@ -40,40 +40,40 @@
namespace mongo {
namespace repl {
- class OplogInterface {
- MONGO_DISALLOW_COPYING(OplogInterface);
- public:
+class OplogInterface {
+ MONGO_DISALLOW_COPYING(OplogInterface);
- class Iterator;
+public:
+ class Iterator;
- OplogInterface() = default;
- virtual ~OplogInterface() = default;
+ OplogInterface() = default;
+ virtual ~OplogInterface() = default;
- /**
- * Diagnostic information.
- */
- virtual std::string toString() const = 0;
+ /**
+ * Diagnostic information.
+ */
+ virtual std::string toString() const = 0;
- /**
- * Produces an iterator over oplog collection in reverse natural order.
- */
- virtual std::unique_ptr<Iterator> makeIterator() const = 0;
- };
+ /**
+ * Produces an iterator over oplog collection in reverse natural order.
+ */
+ virtual std::unique_ptr<Iterator> makeIterator() const = 0;
+};
- class OplogInterface::Iterator {
- MONGO_DISALLOW_COPYING(Iterator);
- public:
+class OplogInterface::Iterator {
+ MONGO_DISALLOW_COPYING(Iterator);
- using Value = std::pair<BSONObj, RecordId>;
+public:
+ using Value = std::pair<BSONObj, RecordId>;
- Iterator() = default;
- virtual ~Iterator() = default;
+ Iterator() = default;
+ virtual ~Iterator() = default;
- /**
- * Returns next operation and record id (if applicable) in the oplog.
- */
- virtual StatusWith<Value> next() = 0;
- };
+ /**
+ * Returns next operation and record id (if applicable) in the oplog.
+ */
+ virtual StatusWith<Value> next() = 0;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/oplog_interface_local.cpp b/src/mongo/db/repl/oplog_interface_local.cpp
index c005865e9b0..30c9a7ab9e5 100644
--- a/src/mongo/db/repl/oplog_interface_local.cpp
+++ b/src/mongo/db/repl/oplog_interface_local.cpp
@@ -41,67 +41,57 @@ namespace repl {
namespace {
- class OplogIteratorLocal : public OplogInterface::Iterator {
- public:
-
- OplogIteratorLocal(OperationContext* txn,
- const std::string& collectionName);
-
- StatusWith<Value> next() override;
-
- private:
-
- ScopedTransaction _transaction;
- Lock::DBLock _dbLock;
- Lock::CollectionLock _collectionLock;
- OldClientContext _ctx;
- std::unique_ptr<PlanExecutor> _exec;
-
- };
-
- OplogIteratorLocal::OplogIteratorLocal(OperationContext* txn,
- const std::string& collectionName)
- : _transaction(txn, MODE_IS),
- _dbLock(txn->lockState(), nsToDatabase(collectionName), MODE_IS),
- _collectionLock(txn->lockState(), collectionName, MODE_S),
- _ctx(txn, collectionName),
- _exec(InternalPlanner::collectionScan(txn,
- collectionName,
- _ctx.db()->getCollection(collectionName),
- InternalPlanner::BACKWARD)) { }
-
- StatusWith<OplogInterface::Iterator::Value> OplogIteratorLocal::next() {
- BSONObj obj;
- RecordId recordId;
-
- if (PlanExecutor::ADVANCED != _exec->getNext(&obj, &recordId)) {
- return StatusWith<Value>(ErrorCodes::NoSuchKey, "no more operations in local oplog");
- }
- return StatusWith<Value>(std::make_pair(obj, recordId));
+class OplogIteratorLocal : public OplogInterface::Iterator {
+public:
+ OplogIteratorLocal(OperationContext* txn, const std::string& collectionName);
+
+ StatusWith<Value> next() override;
+
+private:
+ ScopedTransaction _transaction;
+ Lock::DBLock _dbLock;
+ Lock::CollectionLock _collectionLock;
+ OldClientContext _ctx;
+ std::unique_ptr<PlanExecutor> _exec;
+};
+
+OplogIteratorLocal::OplogIteratorLocal(OperationContext* txn, const std::string& collectionName)
+ : _transaction(txn, MODE_IS),
+ _dbLock(txn->lockState(), nsToDatabase(collectionName), MODE_IS),
+ _collectionLock(txn->lockState(), collectionName, MODE_S),
+ _ctx(txn, collectionName),
+ _exec(InternalPlanner::collectionScan(txn,
+ collectionName,
+ _ctx.db()->getCollection(collectionName),
+ InternalPlanner::BACKWARD)) {}
+
+StatusWith<OplogInterface::Iterator::Value> OplogIteratorLocal::next() {
+ BSONObj obj;
+ RecordId recordId;
+
+ if (PlanExecutor::ADVANCED != _exec->getNext(&obj, &recordId)) {
+ return StatusWith<Value>(ErrorCodes::NoSuchKey, "no more operations in local oplog");
}
+ return StatusWith<Value>(std::make_pair(obj, recordId));
+}
-} // namespace
+} // namespace
- OplogInterfaceLocal::OplogInterfaceLocal(OperationContext* txn,
- const std::string& collectionName)
- : _txn(txn),
- _collectionName(collectionName) {
+OplogInterfaceLocal::OplogInterfaceLocal(OperationContext* txn, const std::string& collectionName)
+ : _txn(txn), _collectionName(collectionName) {
+ invariant(txn);
+ invariant(!collectionName.empty());
+}
- invariant(txn);
- invariant(!collectionName.empty());
- }
-
- std::string OplogInterfaceLocal::toString() const {
- return str::stream() <<
- "LocalOplogInterface: "
- "operation context: " << _txn->getNS() << "/" << _txn->getOpID() <<
- "; collection: " << _collectionName;
- }
+std::string OplogInterfaceLocal::toString() const {
+ return str::stream() << "LocalOplogInterface: "
+ "operation context: " << _txn->getNS() << "/" << _txn->getOpID()
+ << "; collection: " << _collectionName;
+}
- std::unique_ptr<OplogInterface::Iterator> OplogInterfaceLocal::makeIterator() const {
- return std::unique_ptr<OplogInterface::Iterator>(
- new OplogIteratorLocal(_txn, _collectionName));
- }
+std::unique_ptr<OplogInterface::Iterator> OplogInterfaceLocal::makeIterator() const {
+ return std::unique_ptr<OplogInterface::Iterator>(new OplogIteratorLocal(_txn, _collectionName));
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/oplog_interface_local.h b/src/mongo/db/repl/oplog_interface_local.h
index cd61a81a239..32c9adc4377 100644
--- a/src/mongo/db/repl/oplog_interface_local.h
+++ b/src/mongo/db/repl/oplog_interface_local.h
@@ -32,27 +32,24 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
namespace repl {
- /**
- * Scans local oplog collection in reverse natural order.
- */
-
- class OplogInterfaceLocal : public OplogInterface {
- public:
-
- OplogInterfaceLocal(OperationContext* txn, const std::string& collectionName);
- std::string toString() const override;
- std::unique_ptr<OplogInterface::Iterator> makeIterator() const override;
-
- private:
+/**
+ * Scans local oplog collection in reverse natural order.
+ */
- OperationContext* _txn;
- std::string _collectionName;
+class OplogInterfaceLocal : public OplogInterface {
+public:
+ OplogInterfaceLocal(OperationContext* txn, const std::string& collectionName);
+ std::string toString() const override;
+ std::unique_ptr<OplogInterface::Iterator> makeIterator() const override;
- };
+private:
+ OperationContext* _txn;
+ std::string _collectionName;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/oplog_interface_mock.cpp b/src/mongo/db/repl/oplog_interface_mock.cpp
index 97cea831fd7..4936d5aa533 100644
--- a/src/mongo/db/repl/oplog_interface_mock.cpp
+++ b/src/mongo/db/repl/oplog_interface_mock.cpp
@@ -35,49 +35,43 @@ namespace repl {
namespace {
- class OplogIteratorMock : public OplogInterface::Iterator {
- public:
-
- OplogIteratorMock(OplogInterfaceMock::Operations::const_iterator iterator,
- OplogInterfaceMock::Operations::const_iterator iteratorEnd);
- StatusWith<Value> next() override;
-
- private:
-
- OplogInterfaceMock::Operations::const_iterator _iterator;
- OplogInterfaceMock::Operations::const_iterator _iteratorEnd;
-
- };
-
- OplogIteratorMock::OplogIteratorMock(OplogInterfaceMock::Operations::const_iterator iter,
- OplogInterfaceMock::Operations::const_iterator iterEnd)
- : _iterator(iter),
- _iteratorEnd(iterEnd) {}
-
- StatusWith<OplogInterface::Iterator::Value> OplogIteratorMock::next() {
- if (_iterator == _iteratorEnd) {
- return StatusWith<OplogInterface::Iterator::Value>(ErrorCodes::NoSuchKey,
- "no more ops");
- }
- return *(_iterator++);
+class OplogIteratorMock : public OplogInterface::Iterator {
+public:
+ OplogIteratorMock(OplogInterfaceMock::Operations::const_iterator iterator,
+ OplogInterfaceMock::Operations::const_iterator iteratorEnd);
+ StatusWith<Value> next() override;
+
+private:
+ OplogInterfaceMock::Operations::const_iterator _iterator;
+ OplogInterfaceMock::Operations::const_iterator _iteratorEnd;
+};
+
+OplogIteratorMock::OplogIteratorMock(OplogInterfaceMock::Operations::const_iterator iter,
+ OplogInterfaceMock::Operations::const_iterator iterEnd)
+ : _iterator(iter), _iteratorEnd(iterEnd) {}
+
+StatusWith<OplogInterface::Iterator::Value> OplogIteratorMock::next() {
+ if (_iterator == _iteratorEnd) {
+ return StatusWith<OplogInterface::Iterator::Value>(ErrorCodes::NoSuchKey, "no more ops");
}
+ return *(_iterator++);
+}
-} // namespace
+} // namespace
- OplogInterfaceMock::OplogInterfaceMock(std::initializer_list<Operation> operations)
- : _operations(operations) {}
+OplogInterfaceMock::OplogInterfaceMock(std::initializer_list<Operation> operations)
+ : _operations(operations) {}
- OplogInterfaceMock::OplogInterfaceMock(const Operations& operations)
- : _operations(operations) {}
+OplogInterfaceMock::OplogInterfaceMock(const Operations& operations) : _operations(operations) {}
- std::string OplogInterfaceMock::toString() const {
- return "OplogInterfaceMock";
- }
+std::string OplogInterfaceMock::toString() const {
+ return "OplogInterfaceMock";
+}
- std::unique_ptr<OplogInterface::Iterator> OplogInterfaceMock::makeIterator() const {
- return std::unique_ptr<OplogInterface::Iterator>(
- new OplogIteratorMock(_operations.begin(), _operations.end()));
- }
+std::unique_ptr<OplogInterface::Iterator> OplogInterfaceMock::makeIterator() const {
+ return std::unique_ptr<OplogInterface::Iterator>(
+ new OplogIteratorMock(_operations.begin(), _operations.end()));
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/oplog_interface_mock.h b/src/mongo/db/repl/oplog_interface_mock.h
index 4c2049a5688..524ab3c8d2f 100644
--- a/src/mongo/db/repl/oplog_interface_mock.h
+++ b/src/mongo/db/repl/oplog_interface_mock.h
@@ -34,21 +34,23 @@
namespace mongo {
namespace repl {
- /**
- * Simulates oplog for testing rollback functionality.
- */
- class OplogInterfaceMock : public OplogInterface {
- MONGO_DISALLOW_COPYING(OplogInterfaceMock);
- public:
- using Operation = std::pair<BSONObj,RecordId>;
- using Operations = std::list<Operation>;
- explicit OplogInterfaceMock(std::initializer_list<Operation> operations);
- explicit OplogInterfaceMock(const Operations& operations);
- std::string toString() const override;
- std::unique_ptr<OplogInterface::Iterator> makeIterator() const override;
- private:
- Operations _operations;
- };
+/**
+ * Simulates oplog for testing rollback functionality.
+ */
+class OplogInterfaceMock : public OplogInterface {
+ MONGO_DISALLOW_COPYING(OplogInterfaceMock);
+
+public:
+ using Operation = std::pair<BSONObj, RecordId>;
+ using Operations = std::list<Operation>;
+ explicit OplogInterfaceMock(std::initializer_list<Operation> operations);
+ explicit OplogInterfaceMock(const Operations& operations);
+ std::string toString() const override;
+ std::unique_ptr<OplogInterface::Iterator> makeIterator() const override;
+
+private:
+ Operations _operations;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/oplog_interface_remote.cpp b/src/mongo/db/repl/oplog_interface_remote.cpp
index da78924fc55..445d9776d0b 100644
--- a/src/mongo/db/repl/oplog_interface_remote.cpp
+++ b/src/mongo/db/repl/oplog_interface_remote.cpp
@@ -39,51 +39,46 @@ namespace repl {
namespace {
- class OplogIteratorRemote : public OplogInterface::Iterator {
- public:
+class OplogIteratorRemote : public OplogInterface::Iterator {
+public:
+ OplogIteratorRemote(std::unique_ptr<DBClientCursor> cursor);
+ StatusWith<Value> next() override;
- OplogIteratorRemote(std::unique_ptr<DBClientCursor> cursor);
- StatusWith<Value> next() override;
+private:
+ std::unique_ptr<DBClientCursor> _cursor;
+};
- private:
+OplogIteratorRemote::OplogIteratorRemote(std::unique_ptr<DBClientCursor> cursor)
+ : _cursor(std::move(cursor)) {}
- std::unique_ptr<DBClientCursor> _cursor;
-
- };
-
- OplogIteratorRemote::OplogIteratorRemote(std::unique_ptr<DBClientCursor> cursor)
- : _cursor(std::move(cursor)) { }
-
- StatusWith<OplogInterface::Iterator::Value> OplogIteratorRemote::next() {
- if (!_cursor.get()) {
- return StatusWith<Value>(ErrorCodes::NamespaceNotFound, "no cursor for remote oplog");
- }
- if (!_cursor->more()) {
- return StatusWith<Value>(ErrorCodes::NoSuchKey, "no more operations in remote oplog");
- }
- return StatusWith<Value>(std::make_pair(_cursor->nextSafe(), RecordId()));
+StatusWith<OplogInterface::Iterator::Value> OplogIteratorRemote::next() {
+ if (!_cursor.get()) {
+ return StatusWith<Value>(ErrorCodes::NamespaceNotFound, "no cursor for remote oplog");
}
-
-} // namespace
-
- OplogInterfaceRemote::OplogInterfaceRemote(DBClientConnection* conn,
- const std::string& collectionName)
- : _conn(conn),
- _collectionName(collectionName) {
-
- invariant(conn);
+ if (!_cursor->more()) {
+ return StatusWith<Value>(ErrorCodes::NoSuchKey, "no more operations in remote oplog");
}
-
- std::string OplogInterfaceRemote::toString() const {
- return _conn->toString();
- }
-
- std::unique_ptr<OplogInterface::Iterator> OplogInterfaceRemote::makeIterator() const {
- const Query query = Query().sort(BSON("$natural" << -1));
- const BSONObj fields = BSON("ts" << 1 << "h" << 1);
- return std::unique_ptr<OplogInterface::Iterator>(
- new OplogIteratorRemote(_conn->query(_collectionName, query, 0, 0, &fields, 0, 0)));
- }
-
-} // namespace repl
-} // namespace mongo
+ return StatusWith<Value>(std::make_pair(_cursor->nextSafe(), RecordId()));
+}
+
+} // namespace
+
+OplogInterfaceRemote::OplogInterfaceRemote(DBClientConnection* conn,
+ const std::string& collectionName)
+ : _conn(conn), _collectionName(collectionName) {
+ invariant(conn);
+}
+
+std::string OplogInterfaceRemote::toString() const {
+ return _conn->toString();
+}
+
+std::unique_ptr<OplogInterface::Iterator> OplogInterfaceRemote::makeIterator() const {
+ const Query query = Query().sort(BSON("$natural" << -1));
+ const BSONObj fields = BSON("ts" << 1 << "h" << 1);
+ return std::unique_ptr<OplogInterface::Iterator>(
+ new OplogIteratorRemote(_conn->query(_collectionName, query, 0, 0, &fields, 0, 0)));
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/oplog_interface_remote.h b/src/mongo/db/repl/oplog_interface_remote.h
index ee91d9197d2..300e755f105 100644
--- a/src/mongo/db/repl/oplog_interface_remote.h
+++ b/src/mongo/db/repl/oplog_interface_remote.h
@@ -32,27 +32,24 @@
namespace mongo {
- class DBClientConnection;
+class DBClientConnection;
namespace repl {
- /**
- * Reads oplog on remote server.
- */
-
- class OplogInterfaceRemote : public OplogInterface {
- public:
-
- explicit OplogInterfaceRemote(DBClientConnection* conn, const std::string& collectionName);
- std::string toString() const override;
- std::unique_ptr<OplogInterface::Iterator> makeIterator() const override;
-
- private:
+/**
+ * Reads oplog on remote server.
+ */
- DBClientConnection* _conn;
- std::string _collectionName;
+class OplogInterfaceRemote : public OplogInterface {
+public:
+ explicit OplogInterfaceRemote(DBClientConnection* conn, const std::string& collectionName);
+ std::string toString() const override;
+ std::unique_ptr<OplogInterface::Iterator> makeIterator() const override;
- };
+private:
+ DBClientConnection* _conn;
+ std::string _collectionName;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/oplogreader.cpp b/src/mongo/db/repl/oplogreader.cpp
index d982eae975e..012d7d2458c 100644
--- a/src/mongo/db/repl/oplogreader.cpp
+++ b/src/mongo/db/repl/oplogreader.cpp
@@ -52,162 +52,152 @@
namespace mongo {
- using std::shared_ptr;
- using std::endl;
- using std::string;
+using std::shared_ptr;
+using std::endl;
+using std::string;
namespace repl {
- const BSONObj reverseNaturalObj = BSON( "$natural" << -1 );
+const BSONObj reverseNaturalObj = BSON("$natural" << -1);
- //number of readers created;
- // this happens when the source source changes, a reconfig/network-error or the cursor dies
- static Counter64 readersCreatedStats;
- static ServerStatusMetricField<Counter64> displayReadersCreated(
- "repl.network.readersCreated",
- &readersCreatedStats );
+// number of readers created;
+// this happens when the source source changes, a reconfig/network-error or the cursor dies
+static Counter64 readersCreatedStats;
+static ServerStatusMetricField<Counter64> displayReadersCreated("repl.network.readersCreated",
+ &readersCreatedStats);
- bool replAuthenticate(DBClientBase *conn) {
- if (!getGlobalAuthorizationManager()->isAuthEnabled())
- return true;
+bool replAuthenticate(DBClientBase* conn) {
+ if (!getGlobalAuthorizationManager()->isAuthEnabled())
+ return true;
- if (!isInternalAuthSet())
- return false;
- return authenticateInternalUser(conn);
- }
+ if (!isInternalAuthSet())
+ return false;
+ return authenticateInternalUser(conn);
+}
- OplogReader::OplogReader() {
- _tailingQueryOptions = QueryOption_SlaveOk;
- _tailingQueryOptions |= QueryOption_CursorTailable | QueryOption_OplogReplay;
-
- /* TODO: slaveOk maybe shouldn't use? */
- _tailingQueryOptions |= QueryOption_AwaitData;
+OplogReader::OplogReader() {
+ _tailingQueryOptions = QueryOption_SlaveOk;
+ _tailingQueryOptions |= QueryOption_CursorTailable | QueryOption_OplogReplay;
- readersCreatedStats.increment();
- }
+ /* TODO: slaveOk maybe shouldn't use? */
+ _tailingQueryOptions |= QueryOption_AwaitData;
- bool OplogReader::connect(const HostAndPort& host) {
- if (conn() == NULL || _host != host) {
- resetConnection();
- _conn = shared_ptr<DBClientConnection>(new DBClientConnection(false,
- tcp_timeout));
- string errmsg;
- if ( !_conn->connect(host, errmsg) ||
- (getGlobalAuthorizationManager()->isAuthEnabled() &&
- !replAuthenticate(_conn.get())) ) {
-
- resetConnection();
- error() << errmsg << endl;
- return false;
- }
- _conn->port().tag |= executor::NetworkInterface::kMessagingPortKeepOpen;
- _host = host;
- }
- return true;
- }
+ readersCreatedStats.increment();
+}
- void OplogReader::tailCheck() {
- if( cursor.get() && cursor->isDead() ) {
- log() << "old cursor isDead, will initiate a new one" << std::endl;
- resetCursor();
+bool OplogReader::connect(const HostAndPort& host) {
+ if (conn() == NULL || _host != host) {
+ resetConnection();
+ _conn = shared_ptr<DBClientConnection>(new DBClientConnection(false, tcp_timeout));
+ string errmsg;
+ if (!_conn->connect(host, errmsg) ||
+ (getGlobalAuthorizationManager()->isAuthEnabled() && !replAuthenticate(_conn.get()))) {
+ resetConnection();
+ error() << errmsg << endl;
+ return false;
}
+ _conn->port().tag |= executor::NetworkInterface::kMessagingPortKeepOpen;
+ _host = host;
}
+ return true;
+}
- void OplogReader::query(const char *ns,
- Query query,
- int nToReturn,
- int nToSkip,
- const BSONObj* fields) {
- cursor.reset(
- _conn->query(ns, query, nToReturn, nToSkip, fields, QueryOption_SlaveOk).release()
- );
- }
-
- void OplogReader::tailingQuery(const char *ns, const BSONObj& query) {
- verify( !haveCursor() );
- LOG(2) << ns << ".find(" << query.toString() << ')' << endl;
- cursor.reset( _conn->query( ns, query, 0, 0, nullptr, _tailingQueryOptions ).release() );
- }
-
- void OplogReader::tailingQueryGTE(const char *ns, Timestamp optime) {
- BSONObjBuilder gte;
- gte.append("$gte", optime);
- BSONObjBuilder query;
- query.append("ts", gte.done());
- tailingQuery(ns, query.done());
- }
-
- HostAndPort OplogReader::getHost() const {
- return _host;
+void OplogReader::tailCheck() {
+ if (cursor.get() && cursor->isDead()) {
+ log() << "old cursor isDead, will initiate a new one" << std::endl;
+ resetCursor();
}
-
- void OplogReader::connectToSyncSource(OperationContext* txn,
- const OpTime& lastOpTimeFetched,
- ReplicationCoordinator* replCoord) {
- const Timestamp sentinelTimestamp(duration_cast<Seconds>(Milliseconds(curTimeMillis64())), 0);
- const OpTime sentinel(sentinelTimestamp, std::numeric_limits<long long>::max());
- OpTime oldestOpTimeSeen = sentinel;
-
- invariant(conn() == NULL);
-
- while (true) {
- HostAndPort candidate = replCoord->chooseNewSyncSource();
-
- if (candidate.empty()) {
- if (oldestOpTimeSeen == sentinel) {
- // If, in this invocation of connectToSyncSource(), we did not successfully
- // connect to any node ahead of us,
- // we apparently have no sync sources to connect to.
- // This situation is common; e.g. if there are no writes to the primary at
- // the moment.
- return;
- }
-
- // Connected to at least one member, but in all cases we were too stale to use them
- // as a sync source.
- error() << "too stale to catch up";
- log() << "our last optime : " << lastOpTimeFetched;
- log() << "oldest available is " << oldestOpTimeSeen;
- log() << "See http://dochub.mongodb.org/core/resyncingaverystalereplicasetmember";
- setMinValid(txn, oldestOpTimeSeen);
- bool worked = replCoord->setFollowerMode(MemberState::RS_RECOVERING);
- if (!worked) {
- warning() << "Failed to transition into "
- << MemberState(MemberState::RS_RECOVERING)
- << ". Current state: " << replCoord->getMemberState();
- }
+}
+
+void OplogReader::query(
+ const char* ns, Query query, int nToReturn, int nToSkip, const BSONObj* fields) {
+ cursor.reset(
+ _conn->query(ns, query, nToReturn, nToSkip, fields, QueryOption_SlaveOk).release());
+}
+
+void OplogReader::tailingQuery(const char* ns, const BSONObj& query) {
+ verify(!haveCursor());
+ LOG(2) << ns << ".find(" << query.toString() << ')' << endl;
+ cursor.reset(_conn->query(ns, query, 0, 0, nullptr, _tailingQueryOptions).release());
+}
+
+void OplogReader::tailingQueryGTE(const char* ns, Timestamp optime) {
+ BSONObjBuilder gte;
+ gte.append("$gte", optime);
+ BSONObjBuilder query;
+ query.append("ts", gte.done());
+ tailingQuery(ns, query.done());
+}
+
+HostAndPort OplogReader::getHost() const {
+ return _host;
+}
+
+void OplogReader::connectToSyncSource(OperationContext* txn,
+ const OpTime& lastOpTimeFetched,
+ ReplicationCoordinator* replCoord) {
+ const Timestamp sentinelTimestamp(duration_cast<Seconds>(Milliseconds(curTimeMillis64())), 0);
+ const OpTime sentinel(sentinelTimestamp, std::numeric_limits<long long>::max());
+ OpTime oldestOpTimeSeen = sentinel;
+
+ invariant(conn() == NULL);
+
+ while (true) {
+ HostAndPort candidate = replCoord->chooseNewSyncSource();
+
+ if (candidate.empty()) {
+ if (oldestOpTimeSeen == sentinel) {
+ // If, in this invocation of connectToSyncSource(), we did not successfully
+ // connect to any node ahead of us,
+ // we apparently have no sync sources to connect to.
+ // This situation is common; e.g. if there are no writes to the primary at
+ // the moment.
return;
}
- if (!connect(candidate)) {
- LOG(2) << "can't connect to " << candidate.toString() <<
- " to read operations";
- resetConnection();
- replCoord->blacklistSyncSource(candidate, Date_t::now() + Seconds(10));
- continue;
+ // Connected to at least one member, but in all cases we were too stale to use them
+ // as a sync source.
+ error() << "too stale to catch up";
+ log() << "our last optime : " << lastOpTimeFetched;
+ log() << "oldest available is " << oldestOpTimeSeen;
+ log() << "See http://dochub.mongodb.org/core/resyncingaverystalereplicasetmember";
+ setMinValid(txn, oldestOpTimeSeen);
+ bool worked = replCoord->setFollowerMode(MemberState::RS_RECOVERING);
+ if (!worked) {
+ warning() << "Failed to transition into " << MemberState(MemberState::RS_RECOVERING)
+ << ". Current state: " << replCoord->getMemberState();
}
- // Read the first (oldest) op and confirm that it's not newer than our last
- // fetched op. Otherwise, we have fallen off the back of that source's oplog.
- BSONObj remoteOldestOp(findOne(rsOplogName.c_str(), Query()));
- OpTime remoteOldOpTime = extractOpTime(remoteOldestOp);
-
- if (lastOpTimeFetched < remoteOldOpTime) {
- // We're too stale to use this sync source.
- resetConnection();
- replCoord->blacklistSyncSource(candidate, Date_t::now() + Minutes(10));
- if (oldestOpTimeSeen > remoteOldOpTime) {
- warning() << "we are too stale to use " << candidate.toString() <<
- " as a sync source";
- oldestOpTimeSeen = remoteOldOpTime;
- }
- continue;
+ return;
+ }
+
+ if (!connect(candidate)) {
+ LOG(2) << "can't connect to " << candidate.toString() << " to read operations";
+ resetConnection();
+ replCoord->blacklistSyncSource(candidate, Date_t::now() + Seconds(10));
+ continue;
+ }
+ // Read the first (oldest) op and confirm that it's not newer than our last
+ // fetched op. Otherwise, we have fallen off the back of that source's oplog.
+ BSONObj remoteOldestOp(findOne(rsOplogName.c_str(), Query()));
+ OpTime remoteOldOpTime = extractOpTime(remoteOldestOp);
+
+ if (lastOpTimeFetched < remoteOldOpTime) {
+ // We're too stale to use this sync source.
+ resetConnection();
+ replCoord->blacklistSyncSource(candidate, Date_t::now() + Minutes(10));
+ if (oldestOpTimeSeen > remoteOldOpTime) {
+ warning() << "we are too stale to use " << candidate.toString()
+ << " as a sync source";
+ oldestOpTimeSeen = remoteOldOpTime;
}
+ continue;
+ }
- // Got a valid sync source.
- return;
- } // while (true)
- }
+ // Got a valid sync source.
+ return;
+ } // while (true)
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/oplogreader.h b/src/mongo/db/repl/oplogreader.h
index 63dcaaeaa20..718fa162d88 100644
--- a/src/mongo/db/repl/oplogreader.h
+++ b/src/mongo/db/repl/oplogreader.h
@@ -38,117 +38,126 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
namespace repl {
- class ReplicationCoordinator;
- class OpTime;
+class ReplicationCoordinator;
+class OpTime;
- // {"$natural": -1 }
- extern const BSONObj reverseNaturalObj;
+// {"$natural": -1 }
+extern const BSONObj reverseNaturalObj;
+
+/**
+ * Authenticates conn using the server's cluster-membership credentials.
+ *
+ * Returns true on successful authentication.
+ */
+bool replAuthenticate(DBClientBase* conn);
+
+/* started abstracting out the querying of the primary/master's oplog
+ still fairly awkward but a start.
+*/
+
+class OplogReader {
+private:
+ std::shared_ptr<DBClientConnection> _conn;
+ std::shared_ptr<DBClientCursor> cursor;
+ int _tailingQueryOptions;
+
+ // If _conn was actively connected, _host represents the current HostAndPort of the
+ // connection.
+ HostAndPort _host;
+
+public:
+ OplogReader();
+ ~OplogReader() {}
+ void resetCursor() {
+ cursor.reset();
+ }
+ void resetConnection() {
+ cursor.reset();
+ _conn.reset();
+ _host = HostAndPort();
+ }
+ DBClientConnection* conn() {
+ return _conn.get();
+ }
+ BSONObj findOne(const char* ns, const Query& q) {
+ return conn()->findOne(ns, q, 0, QueryOption_SlaveOk);
+ }
+ BSONObj getLastOp(const std::string& ns) {
+ return findOne(ns.c_str(), Query().sort(reverseNaturalObj));
+ }
+
+ /* SO_TIMEOUT (send/recv time out) for our DBClientConnections */
+ static const int tcp_timeout = 30;
+
+ /* ok to call if already connected */
+ bool connect(const HostAndPort& host);
+
+ void tailCheck();
+
+ bool haveCursor() {
+ return cursor.get() != 0;
+ }
+
+ void query(const char* ns, Query query, int nToReturn, int nToSkip, const BSONObj* fields = 0);
+
+ void tailingQuery(const char* ns, const BSONObj& query);
+
+ void tailingQueryGTE(const char* ns, Timestamp t);
+
+ bool more() {
+ uassert(15910, "Doesn't have cursor for reading oplog", cursor.get());
+ return cursor->more();
+ }
+
+ bool moreInCurrentBatch() {
+ uassert(15911, "Doesn't have cursor for reading oplog", cursor.get());
+ return cursor->moreInCurrentBatch();
+ }
+
+ int currentBatchMessageSize() {
+ if (NULL == cursor->getMessage())
+ return 0;
+ return cursor->getMessage()->size();
+ }
+
+ BSONObj nextSafe() {
+ return cursor->nextSafe();
+ }
+ BSONObj next() {
+ return cursor->next();
+ }
+
+
+ // master/slave only
+ void peek(std::vector<BSONObj>& v, int n) {
+ if (cursor.get())
+ cursor->peek(v, n);
+ }
+
+ // master/slave only
+ void putBack(BSONObj op) {
+ cursor->putBack(op);
+ }
+
+ HostAndPort getHost() const;
/**
- * Authenticates conn using the server's cluster-membership credentials.
- *
- * Returns true on successful authentication.
+ * Connects this OplogReader to a valid sync source, using the provided lastOpTimeFetched
+ * and ReplicationCoordinator objects.
+ * If this function fails to connect to a sync source that is viable, this OplogReader
+ * is left unconnected, where this->conn() equals NULL.
+ * In the process of connecting, this function may add items to the repl coordinator's
+ * sync source blacklist.
+ * This function may throw DB exceptions.
*/
- bool replAuthenticate(DBClientBase* conn);
-
- /* started abstracting out the querying of the primary/master's oplog
- still fairly awkward but a start.
- */
-
- class OplogReader {
- private:
- std::shared_ptr<DBClientConnection> _conn;
- std::shared_ptr<DBClientCursor> cursor;
- int _tailingQueryOptions;
-
- // If _conn was actively connected, _host represents the current HostAndPort of the
- // connection.
- HostAndPort _host;
- public:
- OplogReader();
- ~OplogReader() { }
- void resetCursor() { cursor.reset(); }
- void resetConnection() {
- cursor.reset();
- _conn.reset();
- _host = HostAndPort();
- }
- DBClientConnection* conn() { return _conn.get(); }
- BSONObj findOne(const char *ns, const Query& q) {
- return conn()->findOne(ns, q, 0, QueryOption_SlaveOk);
- }
- BSONObj getLastOp(const std::string& ns) {
- return findOne(ns.c_str(), Query().sort(reverseNaturalObj));
- }
-
- /* SO_TIMEOUT (send/recv time out) for our DBClientConnections */
- static const int tcp_timeout = 30;
-
- /* ok to call if already connected */
- bool connect(const HostAndPort& host);
-
- void tailCheck();
-
- bool haveCursor() { return cursor.get() != 0; }
-
- void query(const char *ns,
- Query query,
- int nToReturn,
- int nToSkip,
- const BSONObj* fields=0);
-
- void tailingQuery(const char *ns, const BSONObj& query);
-
- void tailingQueryGTE(const char *ns, Timestamp t);
-
- bool more() {
- uassert( 15910, "Doesn't have cursor for reading oplog", cursor.get() );
- return cursor->more();
- }
-
- bool moreInCurrentBatch() {
- uassert( 15911, "Doesn't have cursor for reading oplog", cursor.get() );
- return cursor->moreInCurrentBatch();
- }
-
- int currentBatchMessageSize() {
- if( NULL == cursor->getMessage() )
- return 0;
- return cursor->getMessage()->size();
- }
-
- BSONObj nextSafe() { return cursor->nextSafe(); }
- BSONObj next() { return cursor->next(); }
-
-
- // master/slave only
- void peek(std::vector<BSONObj>& v, int n) {
- if( cursor.get() )
- cursor->peek(v,n);
- }
-
- // master/slave only
- void putBack(BSONObj op) { cursor->putBack(op); }
-
- HostAndPort getHost() const;
-
- /**
- * Connects this OplogReader to a valid sync source, using the provided lastOpTimeFetched
- * and ReplicationCoordinator objects.
- * If this function fails to connect to a sync source that is viable, this OplogReader
- * is left unconnected, where this->conn() equals NULL.
- * In the process of connecting, this function may add items to the repl coordinator's
- * sync source blacklist.
- * This function may throw DB exceptions.
- */
- void connectToSyncSource(OperationContext* txn,
- const OpTime& lastOpTimeFetched,
- ReplicationCoordinator* replCoord);
- };
-
-} // namespace repl
-} // namespace mongo
+ void connectToSyncSource(OperationContext* txn,
+ const OpTime& lastOpTimeFetched,
+ ReplicationCoordinator* replCoord);
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/optime.cpp b/src/mongo/db/repl/optime.cpp
index 73907ac9146..87cf966c1ef 100644
--- a/src/mongo/db/repl/optime.cpp
+++ b/src/mongo/db/repl/optime.cpp
@@ -36,33 +36,33 @@
namespace mongo {
namespace repl {
- OpTime::OpTime(Timestamp ts, long long term) : _timestamp(std::move(ts)), _term(term) {}
+OpTime::OpTime(Timestamp ts, long long term) : _timestamp(std::move(ts)), _term(term) {}
- Timestamp OpTime::getTimestamp() const {
- return _timestamp;
- }
+Timestamp OpTime::getTimestamp() const {
+ return _timestamp;
+}
- long long OpTime::getSecs() const {
- return _timestamp.getSecs();
- }
+long long OpTime::getSecs() const {
+ return _timestamp.getSecs();
+}
- long long OpTime::getTerm() const {
- return _term;
- }
+long long OpTime::getTerm() const {
+ return _term;
+}
- bool OpTime::isNull() const {
- return _timestamp.isNull();
- }
+bool OpTime::isNull() const {
+ return _timestamp.isNull();
+}
- std::string OpTime::toString() const {
- std::stringstream ss;
- ss << "(term: " << _term << ", timestamp: " << _timestamp.toStringPretty() << ")";
- return ss.str();
- }
+std::string OpTime::toString() const {
+ std::stringstream ss;
+ ss << "(term: " << _term << ", timestamp: " << _timestamp.toStringPretty() << ")";
+ return ss.str();
+}
- std::ostream& operator<<(std::ostream& out, const OpTime& opTime) {
- return out << opTime.toString();
- }
+std::ostream& operator<<(std::ostream& out, const OpTime& opTime) {
+ return out << opTime.toString();
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/optime.h b/src/mongo/db/repl/optime.h
index 76b21eaca2e..5fb4faea093 100644
--- a/src/mongo/db/repl/optime.h
+++ b/src/mongo/db/repl/optime.h
@@ -35,65 +35,65 @@
namespace mongo {
namespace repl {
- /**
- * OpTime encompasses a Timestamp (which itself is composed of two 32-bit integers, which can
- * represent a time_t and a counter), and a 64-bit Term number. OpTime can be used to
- * label every op in an oplog with a unique identifier.
- */
+/**
+ * OpTime encompasses a Timestamp (which itself is composed of two 32-bit integers, which can
+ * represent a time_t and a counter), and a 64-bit Term number. OpTime can be used to
+ * label every op in an oplog with a unique identifier.
+ */
- class OpTime {
- public:
- // The default term after the first time upgrading from protocol version 0.
- //
- // This is also the first term for nodes that were recently started up but have not
- // yet joined the cluster, all in protocol version 1.
- static const long long kDefaultTerm = 0;
+class OpTime {
+public:
+ // The default term after the first time upgrading from protocol version 0.
+ //
+ // This is also the first term for nodes that were recently started up but have not
+ // yet joined the cluster, all in protocol version 1.
+ static const long long kDefaultTerm = 0;
- OpTime() = default;
- OpTime(Timestamp ts, long long term);
+ OpTime() = default;
+ OpTime(Timestamp ts, long long term);
- Timestamp getTimestamp() const;
+ Timestamp getTimestamp() const;
- long long getSecs() const;
+ long long getSecs() const;
- long long getTerm() const;
+ long long getTerm() const;
- std::string toString() const;
+ std::string toString() const;
- // Returns true when this OpTime is not yet initialized.
- bool isNull() const;
+ // Returns true when this OpTime is not yet initialized.
+ bool isNull() const;
- inline bool operator==(const OpTime& rhs) const {
- return std::tie(_term, _timestamp) == std::tie(rhs._term, rhs._timestamp);
- }
+ inline bool operator==(const OpTime& rhs) const {
+ return std::tie(_term, _timestamp) == std::tie(rhs._term, rhs._timestamp);
+ }
- inline bool operator<(const OpTime& rhs) const {
- // Compare term first, then the opTimes.
- return std::tie(_term, _timestamp) < std::tie(rhs._term, rhs._timestamp);
- }
+ inline bool operator<(const OpTime& rhs) const {
+ // Compare term first, then the opTimes.
+ return std::tie(_term, _timestamp) < std::tie(rhs._term, rhs._timestamp);
+ }
- inline bool operator!=(const OpTime& rhs) const {
- return !(*this == rhs);
- }
+ inline bool operator!=(const OpTime& rhs) const {
+ return !(*this == rhs);
+ }
- inline bool operator<=(const OpTime& rhs) const {
- return *this < rhs || *this == rhs;
- }
+ inline bool operator<=(const OpTime& rhs) const {
+ return *this < rhs || *this == rhs;
+ }
- inline bool operator>(const OpTime& rhs) const {
- return !(*this <= rhs);
- }
+ inline bool operator>(const OpTime& rhs) const {
+ return !(*this <= rhs);
+ }
- inline bool operator>=(const OpTime& rhs) const {
- return !(*this < rhs);
- }
+ inline bool operator>=(const OpTime& rhs) const {
+ return !(*this < rhs);
+ }
- friend std::ostream& operator<<(std::ostream& out, const OpTime& opTime);
+ friend std::ostream& operator<<(std::ostream& out, const OpTime& opTime);
- private:
- Timestamp _timestamp;
- long long _term = kDefaultTerm;
- };
+private:
+ Timestamp _timestamp;
+ long long _term = kDefaultTerm;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/read_after_optime_args.cpp b/src/mongo/db/repl/read_after_optime_args.cpp
index 2a7c7817662..e3dcd87274b 100644
--- a/src/mongo/db/repl/read_after_optime_args.cpp
+++ b/src/mongo/db/repl/read_after_optime_args.cpp
@@ -41,70 +41,62 @@ using std::string;
namespace mongo {
namespace repl {
- const string ReadAfterOpTimeArgs::kRootFieldName("$readConcern");
- const string ReadAfterOpTimeArgs::kOpTimeFieldName("afterOpTime");
- const string ReadAfterOpTimeArgs::kOpTimestampFieldName("ts");
- const string ReadAfterOpTimeArgs::kOpTermFieldName("term");
+const string ReadAfterOpTimeArgs::kRootFieldName("$readConcern");
+const string ReadAfterOpTimeArgs::kOpTimeFieldName("afterOpTime");
+const string ReadAfterOpTimeArgs::kOpTimestampFieldName("ts");
+const string ReadAfterOpTimeArgs::kOpTermFieldName("term");
- ReadAfterOpTimeArgs::ReadAfterOpTimeArgs(): ReadAfterOpTimeArgs(OpTime()) {
- }
+ReadAfterOpTimeArgs::ReadAfterOpTimeArgs() : ReadAfterOpTimeArgs(OpTime()) {}
- ReadAfterOpTimeArgs::ReadAfterOpTimeArgs(OpTime opTime):
- _opTime(std::move(opTime)) {
- }
+ReadAfterOpTimeArgs::ReadAfterOpTimeArgs(OpTime opTime) : _opTime(std::move(opTime)) {}
- const OpTime& ReadAfterOpTimeArgs::getOpTime() const {
- return _opTime;
- }
+const OpTime& ReadAfterOpTimeArgs::getOpTime() const {
+ return _opTime;
+}
- Status ReadAfterOpTimeArgs::initialize(const BSONObj& cmdObj) {
- auto afterElem = cmdObj[ReadAfterOpTimeArgs::kRootFieldName];
+Status ReadAfterOpTimeArgs::initialize(const BSONObj& cmdObj) {
+ auto afterElem = cmdObj[ReadAfterOpTimeArgs::kRootFieldName];
- if (afterElem.eoo()) {
- return Status::OK();
- }
+ if (afterElem.eoo()) {
+ return Status::OK();
+ }
- if (!afterElem.isABSONObj()) {
- return Status(ErrorCodes::FailedToParse, "'after' field should be an object");
- }
+ if (!afterElem.isABSONObj()) {
+ return Status(ErrorCodes::FailedToParse, "'after' field should be an object");
+ }
- BSONObj readAfterObj = afterElem.Obj();
- BSONElement opTimeElem;
- auto opTimeStatus = bsonExtractTypedField(readAfterObj,
- ReadAfterOpTimeArgs::kOpTimeFieldName,
- Object,
- &opTimeElem);
+ BSONObj readAfterObj = afterElem.Obj();
+ BSONElement opTimeElem;
+ auto opTimeStatus = bsonExtractTypedField(
+ readAfterObj, ReadAfterOpTimeArgs::kOpTimeFieldName, Object, &opTimeElem);
- if (!opTimeStatus.isOK()) {
- return opTimeStatus;
- }
+ if (!opTimeStatus.isOK()) {
+ return opTimeStatus;
+ }
- BSONObj opTimeObj = opTimeElem.Obj();
- BSONElement timestampElem;
+ BSONObj opTimeObj = opTimeElem.Obj();
+ BSONElement timestampElem;
- Timestamp timestamp;
- auto timestampStatus =
- bsonExtractTimestampField(opTimeObj,
- ReadAfterOpTimeArgs::kOpTimestampFieldName,
- &timestamp);
+ Timestamp timestamp;
+ auto timestampStatus = bsonExtractTimestampField(
+ opTimeObj, ReadAfterOpTimeArgs::kOpTimestampFieldName, &timestamp);
- if (!timestampStatus.isOK()) {
- return timestampStatus;
- }
+ if (!timestampStatus.isOK()) {
+ return timestampStatus;
+ }
- long long termNumber;
- auto termStatus = bsonExtractIntegerField(opTimeObj,
- ReadAfterOpTimeArgs::kOpTermFieldName,
- &termNumber);
+ long long termNumber;
+ auto termStatus =
+ bsonExtractIntegerField(opTimeObj, ReadAfterOpTimeArgs::kOpTermFieldName, &termNumber);
- if (!termStatus.isOK()) {
- return termStatus;
- }
+ if (!termStatus.isOK()) {
+ return termStatus;
+ }
- _opTime = OpTime(timestamp, termNumber);
+ _opTime = OpTime(timestamp, termNumber);
- return Status::OK();
- }
+ return Status::OK();
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/read_after_optime_args.h b/src/mongo/db/repl/read_after_optime_args.h
index ee58dd3b9b2..04536ce8702 100644
--- a/src/mongo/db/repl/read_after_optime_args.h
+++ b/src/mongo/db/repl/read_after_optime_args.h
@@ -36,40 +36,38 @@
namespace mongo {
- class BSONObj;
+class BSONObj;
namespace repl {
- class ReadAfterOpTimeArgs {
- public:
+class ReadAfterOpTimeArgs {
+public:
+ static const std::string kRootFieldName;
+ static const std::string kOpTimeFieldName;
+ static const std::string kOpTimestampFieldName;
+ static const std::string kOpTermFieldName;
- static const std::string kRootFieldName;
- static const std::string kOpTimeFieldName;
- static const std::string kOpTimestampFieldName;
- static const std::string kOpTermFieldName;
+ ReadAfterOpTimeArgs();
+ explicit ReadAfterOpTimeArgs(OpTime opTime);
- ReadAfterOpTimeArgs();
- explicit ReadAfterOpTimeArgs(OpTime opTime);
+ /**
+ * Format:
+ * {
+ * find: “coll”,
+ * filter: <Query Object>,
+ * $readConcern: { // optional
+ * afterOpTime: { ts: <timestamp>, term: <NumberLong> },
+ * }
+ * }
+ */
+ Status initialize(const BSONObj& cmdObj);
- /**
- * Format:
- * {
- * find: “coll”,
- * filter: <Query Object>,
- * $readConcern: { // optional
- * afterOpTime: { ts: <timestamp>, term: <NumberLong> },
- * }
- * }
- */
- Status initialize(const BSONObj& cmdObj);
+ const OpTime& getOpTime() const;
+ const Milliseconds& getTimeout() const;
- const OpTime& getOpTime() const;
- const Milliseconds& getTimeout() const;
+private:
+ OpTime _opTime;
+};
- private:
-
- OpTime _opTime;
- };
-
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/read_after_optime_args_test.cpp b/src/mongo/db/repl/read_after_optime_args_test.cpp
index b37ca9f16ac..e79a9ff5dbb 100644
--- a/src/mongo/db/repl/read_after_optime_args_test.cpp
+++ b/src/mongo/db/repl/read_after_optime_args_test.cpp
@@ -34,86 +34,87 @@ namespace mongo {
namespace repl {
namespace {
- TEST(ReadAfterParse, BasicFullSpecification) {
- ReadAfterOpTimeArgs readAfterOpTime;
- ASSERT_OK(readAfterOpTime.initialize(BSON(
- "find" << "test"
- << ReadAfterOpTimeArgs::kRootFieldName
- << BSON(ReadAfterOpTimeArgs::kOpTimeFieldName
- << BSON(ReadAfterOpTimeArgs::kOpTimestampFieldName << Timestamp(20, 30)
- << ReadAfterOpTimeArgs::kOpTermFieldName << 2)))));
+TEST(ReadAfterParse, BasicFullSpecification) {
+ ReadAfterOpTimeArgs readAfterOpTime;
+ ASSERT_OK(readAfterOpTime.initialize(BSON(
+ "find"
+ << "test" << ReadAfterOpTimeArgs::kRootFieldName
+ << BSON(ReadAfterOpTimeArgs::kOpTimeFieldName
+ << BSON(ReadAfterOpTimeArgs::kOpTimestampFieldName
+ << Timestamp(20, 30) << ReadAfterOpTimeArgs::kOpTermFieldName << 2)))));
- ASSERT_EQ(Timestamp(20, 30), readAfterOpTime.getOpTime().getTimestamp());
- ASSERT_EQ(2, readAfterOpTime.getOpTime().getTerm());
- }
+ ASSERT_EQ(Timestamp(20, 30), readAfterOpTime.getOpTime().getTimestamp());
+ ASSERT_EQ(2, readAfterOpTime.getOpTime().getTerm());
+}
- TEST(ReadAfterParse, Empty) {
- ReadAfterOpTimeArgs readAfterOpTime;
- ASSERT_OK(readAfterOpTime.initialize(BSON("find" << "test")));
+TEST(ReadAfterParse, Empty) {
+ ReadAfterOpTimeArgs readAfterOpTime;
+ ASSERT_OK(readAfterOpTime.initialize(BSON("find"
+ << "test")));
- ASSERT(readAfterOpTime.getOpTime().getTimestamp().isNull());
- }
+ ASSERT(readAfterOpTime.getOpTime().getTimestamp().isNull());
+}
- TEST(ReadAfterParse, BadRootType) {
- ReadAfterOpTimeArgs readAfterOpTime;
- ASSERT_NOT_OK(readAfterOpTime.initialize(BSON(
- "find" << "test"
- << ReadAfterOpTimeArgs::kRootFieldName << "x")));
- }
+TEST(ReadAfterParse, BadRootType) {
+ ReadAfterOpTimeArgs readAfterOpTime;
+ ASSERT_NOT_OK(
+ readAfterOpTime.initialize(BSON("find"
+ << "test" << ReadAfterOpTimeArgs::kRootFieldName << "x")));
+}
- TEST(ReadAfterParse, BadOpTimeType) {
- ReadAfterOpTimeArgs readAfterOpTime;
- ASSERT_NOT_OK(readAfterOpTime.initialize(BSON(
- "find" << "test"
- << ReadAfterOpTimeArgs::kRootFieldName
- << BSON(ReadAfterOpTimeArgs::kOpTimeFieldName << 2))));
- }
+TEST(ReadAfterParse, BadOpTimeType) {
+ ReadAfterOpTimeArgs readAfterOpTime;
+ ASSERT_NOT_OK(
+ readAfterOpTime.initialize(BSON("find"
+ << "test" << ReadAfterOpTimeArgs::kRootFieldName
+ << BSON(ReadAfterOpTimeArgs::kOpTimeFieldName << 2))));
+}
- TEST(ReadAfterParse, OpTimeRequiredIfRootPresent) {
- ReadAfterOpTimeArgs readAfterOpTime;
- ASSERT_NOT_OK(readAfterOpTime.initialize(BSON(
- "find" << "test"
- << ReadAfterOpTimeArgs::kRootFieldName << BSONObj())));
- }
+TEST(ReadAfterParse, OpTimeRequiredIfRootPresent) {
+ ReadAfterOpTimeArgs readAfterOpTime;
+ ASSERT_NOT_OK(readAfterOpTime.initialize(BSON("find"
+ << "test" << ReadAfterOpTimeArgs::kRootFieldName
+ << BSONObj())));
+}
- TEST(ReadAfterParse, NoOpTimeTS) {
- ReadAfterOpTimeArgs readAfterOpTime;
- ASSERT_NOT_OK(readAfterOpTime.initialize(BSON(
- "find" << "test"
- << ReadAfterOpTimeArgs::kRootFieldName
- << BSON(ReadAfterOpTimeArgs::kOpTimeFieldName
- << BSON(ReadAfterOpTimeArgs::kOpTermFieldName << 2)))));
- }
+TEST(ReadAfterParse, NoOpTimeTS) {
+ ReadAfterOpTimeArgs readAfterOpTime;
+ ASSERT_NOT_OK(
+ readAfterOpTime.initialize(BSON("find"
+ << "test" << ReadAfterOpTimeArgs::kRootFieldName
+ << BSON(ReadAfterOpTimeArgs::kOpTimeFieldName << BSON(
+ ReadAfterOpTimeArgs::kOpTermFieldName << 2)))));
+}
- TEST(ReadAfterParse, NoOpTimeTerm) {
- ReadAfterOpTimeArgs readAfterOpTime;
- ASSERT_NOT_OK(readAfterOpTime.initialize(BSON(
- "find" << "test"
- << ReadAfterOpTimeArgs::kRootFieldName
- << BSON(ReadAfterOpTimeArgs::kOpTimeFieldName
- << BSON(ReadAfterOpTimeArgs::kOpTermFieldName << 2)))));
- }
+TEST(ReadAfterParse, NoOpTimeTerm) {
+ ReadAfterOpTimeArgs readAfterOpTime;
+ ASSERT_NOT_OK(
+ readAfterOpTime.initialize(BSON("find"
+ << "test" << ReadAfterOpTimeArgs::kRootFieldName
+ << BSON(ReadAfterOpTimeArgs::kOpTimeFieldName << BSON(
+ ReadAfterOpTimeArgs::kOpTermFieldName << 2)))));
+}
- TEST(ReadAfterParse, BadOpTimeTSType) {
- ReadAfterOpTimeArgs readAfterOpTime;
- ASSERT_NOT_OK(readAfterOpTime.initialize(BSON(
- "find" << "test"
- << ReadAfterOpTimeArgs::kRootFieldName
- << BSON(ReadAfterOpTimeArgs::kOpTimeFieldName
- << BSON(ReadAfterOpTimeArgs::kOpTimestampFieldName << BSON("x" << 1)
- << ReadAfterOpTimeArgs::kOpTermFieldName << 2)))));
- }
+TEST(ReadAfterParse, BadOpTimeTSType) {
+ ReadAfterOpTimeArgs readAfterOpTime;
+ ASSERT_NOT_OK(readAfterOpTime.initialize(
+ BSON("find"
+ << "test" << ReadAfterOpTimeArgs::kRootFieldName
+ << BSON(ReadAfterOpTimeArgs::kOpTimeFieldName
+ << BSON(ReadAfterOpTimeArgs::kOpTimestampFieldName
+ << BSON("x" << 1) << ReadAfterOpTimeArgs::kOpTermFieldName << 2)))));
+}
- TEST(ReadAfterParse, BadOpTimeTermType) {
- ReadAfterOpTimeArgs readAfterOpTime;
- ASSERT_NOT_OK(readAfterOpTime.initialize(BSON(
- "find" << "test"
- << ReadAfterOpTimeArgs::kRootFieldName
- << BSON(ReadAfterOpTimeArgs::kOpTimeFieldName
- << BSON(ReadAfterOpTimeArgs::kOpTimestampFieldName << Timestamp(1, 0)
- << ReadAfterOpTimeArgs::kOpTermFieldName << "y")))));
- }
+TEST(ReadAfterParse, BadOpTimeTermType) {
+ ReadAfterOpTimeArgs readAfterOpTime;
+ ASSERT_NOT_OK(readAfterOpTime.initialize(BSON(
+ "find"
+ << "test" << ReadAfterOpTimeArgs::kRootFieldName
+ << BSON(ReadAfterOpTimeArgs::kOpTimeFieldName
+ << BSON(ReadAfterOpTimeArgs::kOpTimestampFieldName
+ << Timestamp(1, 0) << ReadAfterOpTimeArgs::kOpTermFieldName << "y")))));
+}
-} // unnamed namespace
-} // namespace repl
-} // namespace mongo
+} // unnamed namespace
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/read_after_optime_response.cpp b/src/mongo/db/repl/read_after_optime_response.cpp
index 3a6d5fc9962..332508c9e3c 100644
--- a/src/mongo/db/repl/read_after_optime_response.cpp
+++ b/src/mongo/db/repl/read_after_optime_response.cpp
@@ -39,48 +39,40 @@ using std::string;
namespace mongo {
namespace repl {
- const string ReadAfterOpTimeResponse::kWaitedMSFieldName("waitedMS");
+const string ReadAfterOpTimeResponse::kWaitedMSFieldName("waitedMS");
- ReadAfterOpTimeResponse::ReadAfterOpTimeResponse(Status status):
- ReadAfterOpTimeResponse(status, stdx::chrono::milliseconds(0), false) {
- }
+ReadAfterOpTimeResponse::ReadAfterOpTimeResponse(Status status)
+ : ReadAfterOpTimeResponse(status, stdx::chrono::milliseconds(0), false) {}
- ReadAfterOpTimeResponse::ReadAfterOpTimeResponse():
- ReadAfterOpTimeResponse(Status::OK()) {
- }
+ReadAfterOpTimeResponse::ReadAfterOpTimeResponse() : ReadAfterOpTimeResponse(Status::OK()) {}
- ReadAfterOpTimeResponse::ReadAfterOpTimeResponse(Status status,
- stdx::chrono::milliseconds duration):
- ReadAfterOpTimeResponse(status, duration, true) {
- }
-
- ReadAfterOpTimeResponse::ReadAfterOpTimeResponse(Status status,
- stdx::chrono::milliseconds duration,
- bool waited):
- _waited(waited),
- _duration(duration),
- _status(status) {
- }
+ReadAfterOpTimeResponse::ReadAfterOpTimeResponse(Status status, stdx::chrono::milliseconds duration)
+ : ReadAfterOpTimeResponse(status, duration, true) {}
- void ReadAfterOpTimeResponse::appendInfo(BSONObjBuilder* builder) {
- if (!_waited) {
- return;
- }
+ReadAfterOpTimeResponse::ReadAfterOpTimeResponse(Status status,
+ stdx::chrono::milliseconds duration,
+ bool waited)
+ : _waited(waited), _duration(duration), _status(status) {}
- builder->append(kWaitedMSFieldName, durationCount<Milliseconds>(_duration));
+void ReadAfterOpTimeResponse::appendInfo(BSONObjBuilder* builder) {
+ if (!_waited) {
+ return;
}
- bool ReadAfterOpTimeResponse::didWait() const {
- return _waited;
- }
+ builder->append(kWaitedMSFieldName, durationCount<Milliseconds>(_duration));
+}
- stdx::chrono::milliseconds ReadAfterOpTimeResponse::getDuration() const {
- return _duration;
- }
+bool ReadAfterOpTimeResponse::didWait() const {
+ return _waited;
+}
- Status ReadAfterOpTimeResponse::getStatus() const {
- return _status;
- }
+stdx::chrono::milliseconds ReadAfterOpTimeResponse::getDuration() const {
+ return _duration;
+}
+
+Status ReadAfterOpTimeResponse::getStatus() const {
+ return _status;
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/read_after_optime_response.h b/src/mongo/db/repl/read_after_optime_response.h
index 33163131363..7bd5788dd0f 100644
--- a/src/mongo/db/repl/read_after_optime_response.h
+++ b/src/mongo/db/repl/read_after_optime_response.h
@@ -35,57 +35,55 @@
namespace mongo {
- class BSONObjBuilder;
+class BSONObjBuilder;
namespace repl {
- class ReadAfterOpTimeResponse {
- public:
- static const std::string kWaitedMSFieldName;
-
- /**
- * Constructs a default response that has OK status, and wait is false.
- */
- ReadAfterOpTimeResponse();
-
- /**
- * Constructs a response with the given status with wait equals to false.
- */
- explicit ReadAfterOpTimeResponse(Status status);
-
- /**
- * Constructs a response with wait set to true along with the given parameters.
- */
- ReadAfterOpTimeResponse(Status status, stdx::chrono::milliseconds duration);
-
- /**
- * Appends to the builder the timeout and duration info if didWait() is true.
- * Note: does not include status.
- */
- void appendInfo(BSONObjBuilder* builder);
-
- bool didWait() const;
-
- /**
- * Returns the amount of duration waiting for opTime to pass.
- * Valid only if didWait is true.
- */
- stdx::chrono::milliseconds getDuration() const;
-
- /**
- * Returns more details about an error if it occurred.
- */
- Status getStatus() const;
-
- private:
- ReadAfterOpTimeResponse(Status status,
- stdx::chrono::milliseconds duration,
- bool waited);
-
- bool _waited;
- stdx::chrono::milliseconds _duration;
- Status _status;
- };
-
-} // namespace repl
-} // namespace mongo
+class ReadAfterOpTimeResponse {
+public:
+ static const std::string kWaitedMSFieldName;
+
+ /**
+ * Constructs a default response that has OK status, and wait is false.
+ */
+ ReadAfterOpTimeResponse();
+
+ /**
+ * Constructs a response with the given status with wait equals to false.
+ */
+ explicit ReadAfterOpTimeResponse(Status status);
+
+ /**
+ * Constructs a response with wait set to true along with the given parameters.
+ */
+ ReadAfterOpTimeResponse(Status status, stdx::chrono::milliseconds duration);
+
+ /**
+ * Appends to the builder the timeout and duration info if didWait() is true.
+ * Note: does not include status.
+ */
+ void appendInfo(BSONObjBuilder* builder);
+
+ bool didWait() const;
+
+ /**
+ * Returns the amount of duration waiting for opTime to pass.
+ * Valid only if didWait is true.
+ */
+ stdx::chrono::milliseconds getDuration() const;
+
+ /**
+ * Returns more details about an error if it occurred.
+ */
+ Status getStatus() const;
+
+private:
+ ReadAfterOpTimeResponse(Status status, stdx::chrono::milliseconds duration, bool waited);
+
+ bool _waited;
+ stdx::chrono::milliseconds _duration;
+ Status _status;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/read_after_optime_response_test.cpp b/src/mongo/db/repl/read_after_optime_response_test.cpp
index 09d70204255..7104fca99b7 100644
--- a/src/mongo/db/repl/read_after_optime_response_test.cpp
+++ b/src/mongo/db/repl/read_after_optime_response_test.cpp
@@ -34,49 +34,49 @@ namespace mongo {
namespace repl {
namespace {
- TEST(ReadAfterResponse, Default) {
- ReadAfterOpTimeResponse response;
+TEST(ReadAfterResponse, Default) {
+ ReadAfterOpTimeResponse response;
- ASSERT_FALSE(response.didWait());
+ ASSERT_FALSE(response.didWait());
- BSONObjBuilder builder;
- response.appendInfo(&builder);
+ BSONObjBuilder builder;
+ response.appendInfo(&builder);
- BSONObj obj(builder.done());
- ASSERT_TRUE(obj.isEmpty());
- }
+ BSONObj obj(builder.done());
+ ASSERT_TRUE(obj.isEmpty());
+}
- TEST(ReadAfterResponse, WithStatus) {
- ReadAfterOpTimeResponse response(Status(ErrorCodes::InternalError, "test"));
+TEST(ReadAfterResponse, WithStatus) {
+ ReadAfterOpTimeResponse response(Status(ErrorCodes::InternalError, "test"));
- ASSERT_FALSE(response.didWait());
+ ASSERT_FALSE(response.didWait());
- ASSERT_EQ(ErrorCodes::InternalError, response.getStatus().code());
+ ASSERT_EQ(ErrorCodes::InternalError, response.getStatus().code());
- BSONObjBuilder builder;
- response.appendInfo(&builder);
+ BSONObjBuilder builder;
+ response.appendInfo(&builder);
- BSONObj obj(builder.done());
- ASSERT_TRUE(obj.isEmpty());
- }
+ BSONObj obj(builder.done());
+ ASSERT_TRUE(obj.isEmpty());
+}
- TEST(ReadAfterResponse, WaitedWithDuration) {
- ReadAfterOpTimeResponse response(Status(ErrorCodes::InternalError, "test"),
- stdx::chrono::milliseconds(7));
+TEST(ReadAfterResponse, WaitedWithDuration) {
+ ReadAfterOpTimeResponse response(Status(ErrorCodes::InternalError, "test"),
+ stdx::chrono::milliseconds(7));
- ASSERT_TRUE(response.didWait());
- ASSERT_EQUALS(Milliseconds(7), response.getDuration());
- ASSERT_EQ(ErrorCodes::InternalError, response.getStatus().code());
+ ASSERT_TRUE(response.didWait());
+ ASSERT_EQUALS(Milliseconds(7), response.getDuration());
+ ASSERT_EQ(ErrorCodes::InternalError, response.getStatus().code());
- BSONObjBuilder builder;
- response.appendInfo(&builder);
+ BSONObjBuilder builder;
+ response.appendInfo(&builder);
- BSONObj obj(builder.done());
- auto waitedMSElem = obj[ReadAfterOpTimeResponse::kWaitedMSFieldName];
- ASSERT_TRUE(waitedMSElem.isNumber());
- ASSERT_EQ(7, waitedMSElem.numberLong());
- }
+ BSONObj obj(builder.done());
+ auto waitedMSElem = obj[ReadAfterOpTimeResponse::kWaitedMSFieldName];
+ ASSERT_TRUE(waitedMSElem.isNumber());
+ ASSERT_EQ(7, waitedMSElem.numberLong());
+}
-} // unnamed namespace
-} // namespace repl
-} // namespace mongo
+} // unnamed namespace
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_client_info.cpp b/src/mongo/db/repl/repl_client_info.cpp
index 631c121c223..18191bfaffc 100644
--- a/src/mongo/db/repl/repl_client_info.cpp
+++ b/src/mongo/db/repl/repl_client_info.cpp
@@ -39,15 +39,15 @@
namespace mongo {
namespace repl {
- const Client::Decoration<ReplClientInfo> ReplClientInfo::forClient =
- Client::declareDecoration<ReplClientInfo>();
+const Client::Decoration<ReplClientInfo> ReplClientInfo::forClient =
+ Client::declareDecoration<ReplClientInfo>();
- long long ReplClientInfo::getTerm() {
- if (_cachedTerm == kUninitializedTerm) {
- _cachedTerm = getGlobalReplicationCoordinator()->getTerm();
- }
- return _cachedTerm;
+long long ReplClientInfo::getTerm() {
+ if (_cachedTerm == kUninitializedTerm) {
+ _cachedTerm = getGlobalReplicationCoordinator()->getTerm();
}
+ return _cachedTerm;
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/repl_client_info.h b/src/mongo/db/repl/repl_client_info.h
index 69694f2795d..576142fd9cd 100644
--- a/src/mongo/db/repl/repl_client_info.h
+++ b/src/mongo/db/repl/repl_client_info.h
@@ -34,39 +34,47 @@
namespace mongo {
- class BSONObjBuilder;
- class Client;
+class BSONObjBuilder;
+class Client;
namespace repl {
- class ReplClientInfo {
- public:
- static const Client::Decoration<ReplClientInfo> forClient;
+class ReplClientInfo {
+public:
+ static const Client::Decoration<ReplClientInfo> forClient;
- void setLastOp(const OpTime& op) { _lastOp = op; }
- OpTime getLastOp() const { return _lastOp; }
+ void setLastOp(const OpTime& op) {
+ _lastOp = op;
+ }
+ OpTime getLastOp() const {
+ return _lastOp;
+ }
- // Only used for master/slave
- void setRemoteID(OID rid) { _remoteId = rid; }
- OID getRemoteID() const { return _remoteId; }
+ // Only used for master/slave
+ void setRemoteID(OID rid) {
+ _remoteId = rid;
+ }
+ OID getRemoteID() const {
+ return _remoteId;
+ }
- // If we haven't cached a term from replication coordinator, get the current term
- // and cache it during the life cycle of this client.
- //
- // Used by logOp() to attach the current term to each log entries. Assume we don't change
- // the term since caching it. This is true for write commands, since we acquire the
- // global lock (IX) for write commands and stepping down also needs that lock (S).
- // Stepping down will kill all user operations, so there is no write after stepping down
- // in the case of yielding.
- long long getTerm();
+ // If we haven't cached a term from replication coordinator, get the current term
+ // and cache it during the life cycle of this client.
+ //
+ // Used by logOp() to attach the current term to each log entries. Assume we don't change
+ // the term since caching it. This is true for write commands, since we acquire the
+ // global lock (IX) for write commands and stepping down also needs that lock (S).
+ // Stepping down will kill all user operations, so there is no write after stepping down
+ // in the case of yielding.
+ long long getTerm();
- private:
- static const long long kUninitializedTerm = -1;
+private:
+ static const long long kUninitializedTerm = -1;
- OpTime _lastOp = OpTime();
- OID _remoteId = OID();
- long long _cachedTerm = kUninitializedTerm;
- };
+ OpTime _lastOp = OpTime();
+ OID _remoteId = OID();
+ long long _cachedTerm = kUninitializedTerm;
+};
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_command.cpp b/src/mongo/db/repl/repl_set_command.cpp
index 5ff784352cd..2dd2178cdba 100644
--- a/src/mongo/db/repl/repl_set_command.cpp
+++ b/src/mongo/db/repl/repl_set_command.cpp
@@ -35,15 +35,15 @@
namespace mongo {
namespace repl {
- Status ReplSetCommand::checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::internal)) {
- return {ErrorCodes::Unauthorized, "Unauthorized"};
- }
- return Status::OK();
+Status ReplSetCommand::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::internal)) {
+ return {ErrorCodes::Unauthorized, "Unauthorized"};
}
+ return Status::OK();
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_command.h b/src/mongo/db/repl/repl_set_command.h
index 0694eb28157..0b8c74c44b8 100644
--- a/src/mongo/db/repl/repl_set_command.h
+++ b/src/mongo/db/repl/repl_set_command.h
@@ -34,29 +34,35 @@
namespace mongo {
- class Status;
- class ClientBasic;
- class BSONObj;
+class Status;
+class ClientBasic;
+class BSONObj;
namespace repl {
- /**
- * Base class for repl set commands.
- */
- class ReplSetCommand : public Command {
- protected:
- ReplSetCommand(const char * s, bool show=false) : Command(s, show) { }
+/**
+ * Base class for repl set commands.
+ */
+class ReplSetCommand : public Command {
+protected:
+ ReplSetCommand(const char* s, bool show = false) : Command(s, show) {}
- bool slaveOk() const override { return true; }
+ bool slaveOk() const override {
+ return true;
+ }
- bool adminOnly() const override { return true; }
+ bool adminOnly() const override {
+ return true;
+ }
- bool isWriteCommandForConfigServer() const override { return false; }
+ bool isWriteCommandForConfigServer() const override {
+ return false;
+ }
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) override;
- };
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) override;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_declare_election_winner.cpp b/src/mongo/db/repl/repl_set_declare_election_winner.cpp
index 688428ca221..776d20a7a18 100644
--- a/src/mongo/db/repl/repl_set_declare_election_winner.cpp
+++ b/src/mongo/db/repl/repl_set_declare_election_winner.cpp
@@ -36,36 +36,35 @@
namespace mongo {
namespace repl {
- class CmdReplSetDeclareElectionWinner : public ReplSetCommand {
- public:
- CmdReplSetDeclareElectionWinner() : ReplSetCommand("replSetDeclareElectionWinner") { }
- private:
- bool run(OperationContext* txn,
- const std::string&,
- BSONObj& cmdObj,
- int,
- std::string& errmsg,
- BSONObjBuilder& result) final {
+class CmdReplSetDeclareElectionWinner : public ReplSetCommand {
+public:
+ CmdReplSetDeclareElectionWinner() : ReplSetCommand("replSetDeclareElectionWinner") {}
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- ReplSetDeclareElectionWinnerArgs parsedArgs;
- status = parsedArgs.initialize(cmdObj);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+private:
+ bool run(OperationContext* txn,
+ const std::string&,
+ BSONObj& cmdObj,
+ int,
+ std::string& errmsg,
+ BSONObjBuilder& result) final {
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- long long responseTerm = -1;
- status = getGlobalReplicationCoordinator()->processReplSetDeclareElectionWinner(
- parsedArgs,
- &responseTerm);
- result.append("term", responseTerm);
+ ReplSetDeclareElectionWinnerArgs parsedArgs;
+ status = parsedArgs.initialize(cmdObj);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdReplSetDeclareElectionWinner;
-} // namespace repl
-} // namespace mongo
+ long long responseTerm = -1;
+ status = getGlobalReplicationCoordinator()->processReplSetDeclareElectionWinner(
+ parsedArgs, &responseTerm);
+ result.append("term", responseTerm);
+ return appendCommandStatus(result, status);
+ }
+} cmdReplSetDeclareElectionWinner;
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_declare_election_winner_args.cpp b/src/mongo/db/repl/repl_set_declare_election_winner_args.cpp
index f206c8e528a..0961dc901de 100644
--- a/src/mongo/db/repl/repl_set_declare_election_winner_args.cpp
+++ b/src/mongo/db/repl/repl_set_declare_election_winner_args.cpp
@@ -36,133 +36,120 @@ namespace mongo {
namespace repl {
namespace {
- const std::string kCommandName = "replSetDeclareElectionWinner";
- const std::string kErrorMessageFieldName = "errmsg";
- const std::string kErrorCodeFieldName = "code";
- const std::string kOkFieldName = "ok";
- const std::string kSetNameFieldName = "setName";
- const std::string kTermFieldName = "term";
- const std::string kWinnerIdFieldName = "winnerId";
-
- const std::string kLegalArgsFieldNames[] = {
- kCommandName,
- kSetNameFieldName,
- kTermFieldName,
- kWinnerIdFieldName,
- };
-
- const std::string kLegalResponseFieldNames[] = {
- kErrorMessageFieldName,
- kErrorCodeFieldName,
- kOkFieldName,
- kTermFieldName,
- };
+const std::string kCommandName = "replSetDeclareElectionWinner";
+const std::string kErrorMessageFieldName = "errmsg";
+const std::string kErrorCodeFieldName = "code";
+const std::string kOkFieldName = "ok";
+const std::string kSetNameFieldName = "setName";
+const std::string kTermFieldName = "term";
+const std::string kWinnerIdFieldName = "winnerId";
+
+const std::string kLegalArgsFieldNames[] = {
+ kCommandName, kSetNameFieldName, kTermFieldName, kWinnerIdFieldName,
+};
+
+const std::string kLegalResponseFieldNames[] = {
+ kErrorMessageFieldName, kErrorCodeFieldName, kOkFieldName, kTermFieldName,
+};
} // namespace
- Status ReplSetDeclareElectionWinnerArgs::initialize(const BSONObj& argsObj) {
- Status status = bsonCheckOnlyHasFields("ReplSetDeclareElectionWinner",
- argsObj,
- kLegalArgsFieldNames);
- if (!status.isOK())
- return status;
-
- status = bsonExtractStringField(argsObj, kSetNameFieldName, &_setName);
- if (!status.isOK())
- return status;
-
- status = bsonExtractIntegerField(argsObj, kTermFieldName, &_term);
- if (!status.isOK())
- return status;
-
- status = bsonExtractIntegerField(argsObj, kWinnerIdFieldName, &_winnerId);
- if (!status.isOK())
- return status;
-
- return Status::OK();
- }
-
- std::string ReplSetDeclareElectionWinnerArgs::getReplSetName() const {
- return _setName;
- }
-
- long long ReplSetDeclareElectionWinnerArgs::getTerm() const {
- return _term;
- }
-
- long long ReplSetDeclareElectionWinnerArgs::getWinnerId() const {
- return _winnerId;
- }
-
- void ReplSetDeclareElectionWinnerArgs::addToBSON(BSONObjBuilder* builder) const {
- builder->append("replSetDeclareElectionWinner", 1);
- builder->append(kSetNameFieldName, _setName);
- builder->append(kTermFieldName, _term);
- builder->appendIntOrLL(kWinnerIdFieldName, _winnerId);
- }
-
- BSONObj ReplSetDeclareElectionWinnerArgs::toBSON() const {
- BSONObjBuilder builder;
- addToBSON(&builder);
- return builder.obj();
- }
-
- Status ReplSetDeclareElectionWinnerResponse::initialize(const BSONObj& argsObj) {
- Status status = bsonCheckOnlyHasFields("ReplSetDeclareElectionWinner",
- argsObj,
- kLegalResponseFieldNames);
- if (!status.isOK())
- return status;
-
- status = bsonExtractIntegerField(argsObj, kTermFieldName, &_term);
- if (!status.isOK())
- return status;
-
- status = bsonExtractIntegerFieldWithDefault(argsObj,
- kErrorCodeFieldName,
- ErrorCodes::OK,
- &_code);
- if (!status.isOK())
- return status;
-
- status = bsonExtractStringFieldWithDefault(argsObj,
- kErrorMessageFieldName,
- "",
- &_errmsg);
- if (!status.isOK())
- return status;
-
- status = bsonExtractBooleanField(argsObj, kOkFieldName, &_ok);
- if (!status.isOK())
- return status;
-
- return Status::OK();
- }
-
- bool ReplSetDeclareElectionWinnerResponse::getOk() const {
- return _ok;
- }
-
- long long ReplSetDeclareElectionWinnerResponse::getTerm() const {
- return _term;
- }
-
- long long ReplSetDeclareElectionWinnerResponse::getErrorCode() const {
- return _code;
- }
-
- const std::string& ReplSetDeclareElectionWinnerResponse::getErrorMsg() const {
- return _errmsg;
- }
-
- void ReplSetDeclareElectionWinnerResponse::addToBSON(BSONObjBuilder* builder) const {
- builder->append(kOkFieldName, _ok);
- builder->append(kTermFieldName, _term);
- if (_code != ErrorCodes::OK) {
- builder->append(kErrorCodeFieldName, _code);
- builder->append(kErrorMessageFieldName, _errmsg);
- }
+Status ReplSetDeclareElectionWinnerArgs::initialize(const BSONObj& argsObj) {
+ Status status =
+ bsonCheckOnlyHasFields("ReplSetDeclareElectionWinner", argsObj, kLegalArgsFieldNames);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractStringField(argsObj, kSetNameFieldName, &_setName);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractIntegerField(argsObj, kTermFieldName, &_term);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractIntegerField(argsObj, kWinnerIdFieldName, &_winnerId);
+ if (!status.isOK())
+ return status;
+
+ return Status::OK();
+}
+
+std::string ReplSetDeclareElectionWinnerArgs::getReplSetName() const {
+ return _setName;
+}
+
+long long ReplSetDeclareElectionWinnerArgs::getTerm() const {
+ return _term;
+}
+
+long long ReplSetDeclareElectionWinnerArgs::getWinnerId() const {
+ return _winnerId;
+}
+
+void ReplSetDeclareElectionWinnerArgs::addToBSON(BSONObjBuilder* builder) const {
+ builder->append("replSetDeclareElectionWinner", 1);
+ builder->append(kSetNameFieldName, _setName);
+ builder->append(kTermFieldName, _term);
+ builder->appendIntOrLL(kWinnerIdFieldName, _winnerId);
+}
+
+BSONObj ReplSetDeclareElectionWinnerArgs::toBSON() const {
+ BSONObjBuilder builder;
+ addToBSON(&builder);
+ return builder.obj();
+}
+
+Status ReplSetDeclareElectionWinnerResponse::initialize(const BSONObj& argsObj) {
+ Status status =
+ bsonCheckOnlyHasFields("ReplSetDeclareElectionWinner", argsObj, kLegalResponseFieldNames);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractIntegerField(argsObj, kTermFieldName, &_term);
+ if (!status.isOK())
+ return status;
+
+ status =
+ bsonExtractIntegerFieldWithDefault(argsObj, kErrorCodeFieldName, ErrorCodes::OK, &_code);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractStringFieldWithDefault(argsObj, kErrorMessageFieldName, "", &_errmsg);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractBooleanField(argsObj, kOkFieldName, &_ok);
+ if (!status.isOK())
+ return status;
+
+ return Status::OK();
+}
+
+bool ReplSetDeclareElectionWinnerResponse::getOk() const {
+ return _ok;
+}
+
+long long ReplSetDeclareElectionWinnerResponse::getTerm() const {
+ return _term;
+}
+
+long long ReplSetDeclareElectionWinnerResponse::getErrorCode() const {
+ return _code;
+}
+
+const std::string& ReplSetDeclareElectionWinnerResponse::getErrorMsg() const {
+ return _errmsg;
+}
+
+void ReplSetDeclareElectionWinnerResponse::addToBSON(BSONObjBuilder* builder) const {
+ builder->append(kOkFieldName, _ok);
+ builder->append(kTermFieldName, _term);
+ if (_code != ErrorCodes::OK) {
+ builder->append(kErrorCodeFieldName, _code);
+ builder->append(kErrorMessageFieldName, _errmsg);
}
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_declare_election_winner_args.h b/src/mongo/db/repl/repl_set_declare_election_winner_args.h
index fb4d8f30a15..dd96fd94854 100644
--- a/src/mongo/db/repl/repl_set_declare_election_winner_args.h
+++ b/src/mongo/db/repl/repl_set_declare_election_winner_args.h
@@ -34,45 +34,45 @@
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
+class BSONObj;
+class BSONObjBuilder;
namespace repl {
- class ReplSetDeclareElectionWinnerArgs {
- public:
- Status initialize(const BSONObj& argsObj);
+class ReplSetDeclareElectionWinnerArgs {
+public:
+ Status initialize(const BSONObj& argsObj);
- std::string getReplSetName() const;
- long long getTerm() const;
- long long getWinnerId() const;
+ std::string getReplSetName() const;
+ long long getTerm() const;
+ long long getWinnerId() const;
- void addToBSON(BSONObjBuilder* builder) const;
- BSONObj toBSON() const;
+ void addToBSON(BSONObjBuilder* builder) const;
+ BSONObj toBSON() const;
- private:
- std::string _setName;
- long long _term = -1; // The term for which the winner is being declared.
- long long _winnerId = -1; // replSet id of the member who was the winner.
- };
+private:
+ std::string _setName;
+ long long _term = -1; // The term for which the winner is being declared.
+ long long _winnerId = -1; // replSet id of the member who was the winner.
+};
- class ReplSetDeclareElectionWinnerResponse {
- public:
- Status initialize(const BSONObj& argsObj);
-
- bool getOk() const;
- long long getTerm() const;
- long long getErrorCode() const;
- const std::string& getErrorMsg() const;
+class ReplSetDeclareElectionWinnerResponse {
+public:
+ Status initialize(const BSONObj& argsObj);
- void addToBSON(BSONObjBuilder* builder) const;
+ bool getOk() const;
+ long long getTerm() const;
+ long long getErrorCode() const;
+ const std::string& getErrorMsg() const;
- private:
- bool _ok = false;
- long long _term = -1;
- long long _code = ErrorCodes::OK;
- std::string _errmsg;
- };
+ void addToBSON(BSONObjBuilder* builder) const;
-} // namespace repl
-} // namespace mongo
+private:
+ bool _ok = false;
+ long long _term = -1;
+ long long _code = ErrorCodes::OK;
+ std::string _errmsg;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_heartbeat_args.cpp b/src/mongo/db/repl/repl_set_heartbeat_args.cpp
index 75eee68348f..babca5a0dfa 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_args.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_args.cpp
@@ -39,142 +39,133 @@ namespace repl {
namespace {
- const std::string kCheckEmptyFieldName = "checkEmpty";
- const std::string kProtocolVersionFieldName = "pv";
- const std::string kConfigVersionFieldName = "v";
- const std::string kSenderIdFieldName = "fromId";
- const std::string kSetNameFieldName = "replSetHeartbeat";
- const std::string kSenderHostFieldName = "from";
-
- const std::string kLegalHeartbeatFieldNames[] = {
- kCheckEmptyFieldName,
- kProtocolVersionFieldName,
- kConfigVersionFieldName,
- kSenderIdFieldName,
- kSetNameFieldName,
- kSenderHostFieldName
- };
-
-} // namespace
-
- ReplSetHeartbeatArgs::ReplSetHeartbeatArgs() :
- _hasCheckEmpty(false),
- _hasProtocolVersion(false),
- _hasConfigVersion(false),
- _hasSenderId(false),
- _hasSetName(false),
- _hasSenderHost(false),
- _checkEmpty(false),
- _protocolVersion(-1),
- _configVersion(-1),
- _senderId(-1),
- _setName(""),
- _senderHost(HostAndPort()) {}
-
- Status ReplSetHeartbeatArgs::initialize(const BSONObj& argsObj) {
- Status status = bsonCheckOnlyHasFields("ReplSetHeartbeatArgs",
- argsObj,
- kLegalHeartbeatFieldNames);
+const std::string kCheckEmptyFieldName = "checkEmpty";
+const std::string kProtocolVersionFieldName = "pv";
+const std::string kConfigVersionFieldName = "v";
+const std::string kSenderIdFieldName = "fromId";
+const std::string kSetNameFieldName = "replSetHeartbeat";
+const std::string kSenderHostFieldName = "from";
+
+const std::string kLegalHeartbeatFieldNames[] = {kCheckEmptyFieldName,
+ kProtocolVersionFieldName,
+ kConfigVersionFieldName,
+ kSenderIdFieldName,
+ kSetNameFieldName,
+ kSenderHostFieldName};
+
+} // namespace
+
+ReplSetHeartbeatArgs::ReplSetHeartbeatArgs()
+ : _hasCheckEmpty(false),
+ _hasProtocolVersion(false),
+ _hasConfigVersion(false),
+ _hasSenderId(false),
+ _hasSetName(false),
+ _hasSenderHost(false),
+ _checkEmpty(false),
+ _protocolVersion(-1),
+ _configVersion(-1),
+ _senderId(-1),
+ _setName(""),
+ _senderHost(HostAndPort()) {}
+
+Status ReplSetHeartbeatArgs::initialize(const BSONObj& argsObj) {
+ Status status =
+ bsonCheckOnlyHasFields("ReplSetHeartbeatArgs", argsObj, kLegalHeartbeatFieldNames);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractBooleanFieldWithDefault(argsObj, kCheckEmptyFieldName, false, &_checkEmpty);
+ if (!status.isOK())
+ return status;
+ _hasCheckEmpty = true;
+
+ status = bsonExtractIntegerField(argsObj, kProtocolVersionFieldName, &_protocolVersion);
+ if (!status.isOK())
+ return status;
+ _hasProtocolVersion = true;
+
+ status = bsonExtractIntegerField(argsObj, kConfigVersionFieldName, &_configVersion);
+ if (!status.isOK())
+ return status;
+ _hasConfigVersion = true;
+
+ status = bsonExtractIntegerFieldWithDefault(argsObj, kSenderIdFieldName, -1, &_senderId);
+ if (!status.isOK())
+ return status;
+ _hasSenderId = true;
+
+ status = bsonExtractStringField(argsObj, kSetNameFieldName, &_setName);
+ if (!status.isOK())
+ return status;
+ _hasSetName = true;
+
+ std::string hostAndPortString;
+ status =
+ bsonExtractStringFieldWithDefault(argsObj, kSenderHostFieldName, "", &hostAndPortString);
+ if (!status.isOK())
+ return status;
+
+ if (!hostAndPortString.empty()) {
+ status = _senderHost.initialize(hostAndPortString);
if (!status.isOK())
return status;
-
- status = bsonExtractBooleanFieldWithDefault(argsObj,
- kCheckEmptyFieldName,
- false,
- &_checkEmpty);
- if (!status.isOK())
- return status;
- _hasCheckEmpty = true;
-
- status = bsonExtractIntegerField(argsObj, kProtocolVersionFieldName, &_protocolVersion);
- if (!status.isOK())
- return status;
- _hasProtocolVersion = true;
-
- status = bsonExtractIntegerField(argsObj, kConfigVersionFieldName, &_configVersion);
- if (!status.isOK())
- return status;
- _hasConfigVersion = true;
-
- status = bsonExtractIntegerFieldWithDefault(argsObj, kSenderIdFieldName, -1, &_senderId);
- if (!status.isOK())
- return status;
- _hasSenderId = true;
-
- status = bsonExtractStringField(argsObj, kSetNameFieldName, &_setName);
- if (!status.isOK())
- return status;
- _hasSetName = true;
-
- std::string hostAndPortString;
- status = bsonExtractStringFieldWithDefault(
- argsObj,
- kSenderHostFieldName,
- "",
- &hostAndPortString);
- if (!status.isOK())
- return status;
-
- if (!hostAndPortString.empty()) {
- status = _senderHost.initialize(hostAndPortString);
- if (!status.isOK())
- return status;
- _hasSenderHost = true;
- }
-
- return Status::OK();
- }
-
- bool ReplSetHeartbeatArgs::isInitialized() const {
- return _hasProtocolVersion && _hasConfigVersion && _hasSetName;
- }
-
- BSONObj ReplSetHeartbeatArgs::toBSON() const {
- invariant(isInitialized());
- BSONObjBuilder builder;
- builder.append("replSetHeartbeat", _setName);
- builder.appendIntOrLL("pv", _protocolVersion);
- builder.appendIntOrLL("v", _configVersion);
- builder.append("from", _hasSenderHost ? _senderHost.toString() : "");
-
- if (_hasSenderId) {
- builder.appendIntOrLL("fromId", _senderId);
- }
- if (_hasCheckEmpty) {
- builder.append("checkEmpty", _checkEmpty);
- }
- return builder.obj();
- }
-
- void ReplSetHeartbeatArgs::setCheckEmpty(bool newVal) {
- _checkEmpty = newVal;
- _hasCheckEmpty = true;
+ _hasSenderHost = true;
}
- void ReplSetHeartbeatArgs::setProtocolVersion(long long newVal) {
- _protocolVersion = newVal;
- _hasProtocolVersion = true;
- }
+ return Status::OK();
+}
- void ReplSetHeartbeatArgs::setConfigVersion(long long newVal) {
- _configVersion = newVal;
- _hasConfigVersion = true;
- }
+bool ReplSetHeartbeatArgs::isInitialized() const {
+ return _hasProtocolVersion && _hasConfigVersion && _hasSetName;
+}
- void ReplSetHeartbeatArgs::setSenderId(long long newVal) {
- _senderId = newVal;
- _hasSenderId = true;
- }
+BSONObj ReplSetHeartbeatArgs::toBSON() const {
+ invariant(isInitialized());
+ BSONObjBuilder builder;
+ builder.append("replSetHeartbeat", _setName);
+ builder.appendIntOrLL("pv", _protocolVersion);
+ builder.appendIntOrLL("v", _configVersion);
+ builder.append("from", _hasSenderHost ? _senderHost.toString() : "");
- void ReplSetHeartbeatArgs::setSetName(std::string newVal) {
- _setName = newVal;
- _hasSetName = true;
+ if (_hasSenderId) {
+ builder.appendIntOrLL("fromId", _senderId);
}
-
- void ReplSetHeartbeatArgs::setSenderHost(HostAndPort newVal) {
- _senderHost = newVal;
- _hasSenderHost = true;
+ if (_hasCheckEmpty) {
+ builder.append("checkEmpty", _checkEmpty);
}
+ return builder.obj();
+}
+
+void ReplSetHeartbeatArgs::setCheckEmpty(bool newVal) {
+ _checkEmpty = newVal;
+ _hasCheckEmpty = true;
+}
+
+void ReplSetHeartbeatArgs::setProtocolVersion(long long newVal) {
+ _protocolVersion = newVal;
+ _hasProtocolVersion = true;
+}
+
+void ReplSetHeartbeatArgs::setConfigVersion(long long newVal) {
+ _configVersion = newVal;
+ _hasConfigVersion = true;
+}
+
+void ReplSetHeartbeatArgs::setSenderId(long long newVal) {
+ _senderId = newVal;
+ _hasSenderId = true;
+}
+
+void ReplSetHeartbeatArgs::setSetName(std::string newVal) {
+ _setName = newVal;
+ _hasSetName = true;
+}
+
+void ReplSetHeartbeatArgs::setSenderHost(HostAndPort newVal) {
+ _senderHost = newVal;
+ _hasSenderHost = true;
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_heartbeat_args.h b/src/mongo/db/repl/repl_set_heartbeat_args.h
index 487be758524..f03e3260a04 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_args.h
+++ b/src/mongo/db/repl/repl_set_heartbeat_args.h
@@ -34,101 +34,125 @@
namespace mongo {
- class BSONObj;
- class Status;
+class BSONObj;
+class Status;
namespace repl {
+/**
+ * Arguments to the replSetHeartbeat command.
+ */
+class ReplSetHeartbeatArgs {
+public:
+ ReplSetHeartbeatArgs();
+
+ /**
+ * Initializes this ReplSetHeartbeatArgs from the contents of args.
+ */
+ Status initialize(const BSONObj& argsObj);
+
+ /**
+ * Returns true if all required fields have been initialized.
+ */
+ bool isInitialized() const;
+
+ /**
+ * Returns whether the sender would like to know whether the node is empty or not.
+ */
+ bool getCheckEmpty() const {
+ return _checkEmpty;
+ }
+
+ /**
+ * Gets the version of the Heartbeat protocol being used by the sender.
+ */
+ long long getProtocolVersion() const {
+ return _protocolVersion;
+ }
+
+ /**
+ * Gets the ReplSetConfig version number of the sender.
+ */
+ long long getConfigVersion() const {
+ return _configVersion;
+ }
+
+ /**
+ * Gets the _id of the sender in their ReplSetConfig.
+ */
+ long long getSenderId() const {
+ return _senderId;
+ }
+
+ /**
+ * Gets the replSet name of the sender's replica set.
+ */
+ std::string getSetName() const {
+ return _setName;
+ }
+
+ /**
+ * Gets the HostAndPort of the sender.
+ */
+ HostAndPort getSenderHost() const {
+ return _senderHost;
+ }
+
+ /**
+ * The below methods check whether or not value in the method name has been set.
+ */
+ bool hasCheckEmpty() {
+ return _hasCheckEmpty;
+ }
+ bool hasProtocolVersion() {
+ return _hasProtocolVersion;
+ }
+ bool hasConfigVersion() {
+ return _hasConfigVersion;
+ }
+ bool hasSenderId() {
+ return _hasSenderId;
+ }
+ bool hasSetName() {
+ return _hasSetName;
+ }
+ bool hasSenderHost() {
+ return _hasSenderHost;
+ }
+
+ /**
+ * The below methods set the value in the method name to 'newVal'.
+ */
+ void setCheckEmpty(bool newVal);
+ void setProtocolVersion(long long newVal);
+ void setConfigVersion(long long newVal);
+ void setSenderId(long long newVal);
+ void setSetName(std::string newVal);
+ void setSenderHost(HostAndPort newVal);
+
/**
- * Arguments to the replSetHeartbeat command.
+ * Returns a BSONified version of the object.
+ * Should only be called if the mandatory fields have been set.
+ * Optional fields are only included if they have been set.
*/
- class ReplSetHeartbeatArgs {
- public:
- ReplSetHeartbeatArgs();
-
- /**
- * Initializes this ReplSetHeartbeatArgs from the contents of args.
- */
- Status initialize(const BSONObj& argsObj);
-
- /**
- * Returns true if all required fields have been initialized.
- */
- bool isInitialized() const;
-
- /**
- * Returns whether the sender would like to know whether the node is empty or not.
- */
- bool getCheckEmpty() const { return _checkEmpty; }
-
- /**
- * Gets the version of the Heartbeat protocol being used by the sender.
- */
- long long getProtocolVersion() const { return _protocolVersion; }
-
- /**
- * Gets the ReplSetConfig version number of the sender.
- */
- long long getConfigVersion() const { return _configVersion; }
-
- /**
- * Gets the _id of the sender in their ReplSetConfig.
- */
- long long getSenderId() const { return _senderId; }
-
- /**
- * Gets the replSet name of the sender's replica set.
- */
- std::string getSetName() const { return _setName; }
-
- /**
- * Gets the HostAndPort of the sender.
- */
- HostAndPort getSenderHost() const { return _senderHost; }
-
- /**
- * The below methods check whether or not value in the method name has been set.
- */
- bool hasCheckEmpty() { return _hasCheckEmpty; }
- bool hasProtocolVersion() { return _hasProtocolVersion; }
- bool hasConfigVersion() { return _hasConfigVersion; }
- bool hasSenderId() { return _hasSenderId; }
- bool hasSetName() { return _hasSetName; }
- bool hasSenderHost() { return _hasSenderHost; }
-
- /**
- * The below methods set the value in the method name to 'newVal'.
- */
- void setCheckEmpty(bool newVal);
- void setProtocolVersion(long long newVal);
- void setConfigVersion(long long newVal);
- void setSenderId(long long newVal);
- void setSetName(std::string newVal);
- void setSenderHost(HostAndPort newVal);
-
- /**
- * Returns a BSONified version of the object.
- * Should only be called if the mandatory fields have been set.
- * Optional fields are only included if they have been set.
- */
- BSONObj toBSON() const;
-
- private:
- bool _hasCheckEmpty;
- bool _hasProtocolVersion;
- bool _hasConfigVersion;
- bool _hasSenderId;
- bool _hasSetName;
- bool _hasSenderHost;
-
- // look at the body of the isInitialized() function to see which fields are mandatory
- bool _checkEmpty;
- long long _protocolVersion;
- long long _configVersion;
- long long _senderId;
- std::string _setName;
- HostAndPort _senderHost;
- };
-
-} // namespace repl
-} // namespace mongo
+ BSONObj toBSON() const;
+
+private:
+ bool _hasCheckEmpty;
+ bool _hasProtocolVersion;
+ bool _hasConfigVersion;
+ bool _hasSenderId;
+ bool _hasSetName;
+ bool _hasSenderHost;
+
+ // look at the body of the isInitialized() function to see which fields are mandatory
+ bool _checkEmpty;
+ long long _protocolVersion;
+ long long _configVersion;
+ long long _senderId;
+ std::string _setName;
+ HostAndPort _senderHost;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp b/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp
index b65b9e99521..c79249815f9 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp
@@ -40,113 +40,107 @@ namespace repl {
namespace {
- const std::string kCheckEmptyFieldName = "checkEmpty";
- const std::string kConfigVersionFieldName = "configVersion";
- const std::string kSenderHostFieldName = "from";
- const std::string kSenderIdFieldName = "fromId";
- const std::string kSetNameFieldName = "replSetHeartbeat";
- const std::string kTermFieldName = "term";
-
- const std::string kLegalHeartbeatFieldNames[] = {
- kCheckEmptyFieldName,
- kConfigVersionFieldName,
- kSenderHostFieldName,
- kSenderIdFieldName,
- kSetNameFieldName,
- kTermFieldName
- };
-
-} // namespace
-
- Status ReplSetHeartbeatArgsV1::initialize(const BSONObj& argsObj) {
- Status status = bsonCheckOnlyHasFields("ReplSetHeartbeatArgs",
- argsObj,
- kLegalHeartbeatFieldNames);
+const std::string kCheckEmptyFieldName = "checkEmpty";
+const std::string kConfigVersionFieldName = "configVersion";
+const std::string kSenderHostFieldName = "from";
+const std::string kSenderIdFieldName = "fromId";
+const std::string kSetNameFieldName = "replSetHeartbeat";
+const std::string kTermFieldName = "term";
+
+const std::string kLegalHeartbeatFieldNames[] = {kCheckEmptyFieldName,
+ kConfigVersionFieldName,
+ kSenderHostFieldName,
+ kSenderIdFieldName,
+ kSetNameFieldName,
+ kTermFieldName};
+
+} // namespace
+
+Status ReplSetHeartbeatArgsV1::initialize(const BSONObj& argsObj) {
+ Status status =
+ bsonCheckOnlyHasFields("ReplSetHeartbeatArgs", argsObj, kLegalHeartbeatFieldNames);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractBooleanFieldWithDefault(argsObj, kCheckEmptyFieldName, false, &_checkEmpty);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractIntegerField(argsObj, kConfigVersionFieldName, &_configVersion);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractIntegerFieldWithDefault(argsObj, kSenderIdFieldName, -1, &_senderId);
+ if (!status.isOK())
+ return status;
+
+ std::string hostAndPortString;
+ status = bsonExtractStringField(argsObj, kSenderHostFieldName, &hostAndPortString);
+ if (!status.isOK())
+ return status;
+ if (!hostAndPortString.empty()) {
+ status = _senderHost.initialize(hostAndPortString);
if (!status.isOK())
return status;
+ _hasSender = true;
+ }
- status = bsonExtractBooleanFieldWithDefault(argsObj,
- kCheckEmptyFieldName,
- false,
- &_checkEmpty);
- if (!status.isOK())
- return status;
+ status = bsonExtractIntegerField(argsObj, kTermFieldName, &_term);
+ if (!status.isOK())
+ return status;
- status = bsonExtractIntegerField(argsObj, kConfigVersionFieldName, &_configVersion);
- if (!status.isOK())
- return status;
+ status = bsonExtractStringField(argsObj, kSetNameFieldName, &_setName);
+ if (!status.isOK())
+ return status;
- status = bsonExtractIntegerFieldWithDefault(argsObj, kSenderIdFieldName, -1, &_senderId);
- if (!status.isOK())
- return status;
+ return Status::OK();
+}
- std::string hostAndPortString;
- status = bsonExtractStringField(argsObj, kSenderHostFieldName, &hostAndPortString);
- if (!status.isOK())
- return status;
- if (!hostAndPortString.empty()) {
- status = _senderHost.initialize(hostAndPortString);
- if (!status.isOK())
- return status;
- _hasSender = true;
- }
-
- status = bsonExtractIntegerField(argsObj, kTermFieldName, &_term);
- if (!status.isOK())
- return status;
+bool ReplSetHeartbeatArgsV1::isInitialized() const {
+ return _configVersion != -1 && _term != -1 && !_setName.empty();
+}
- status = bsonExtractStringField(argsObj, kSetNameFieldName, &_setName);
- if (!status.isOK())
- return status;
+void ReplSetHeartbeatArgsV1::setConfigVersion(long long newVal) {
+ _configVersion = newVal;
+}
- return Status::OK();
- }
-
- bool ReplSetHeartbeatArgsV1::isInitialized() const {
- return _configVersion != -1 && _term != -1 && !_setName.empty();
- }
-
- void ReplSetHeartbeatArgsV1::setConfigVersion(long long newVal) {
- _configVersion = newVal;
- }
-
- void ReplSetHeartbeatArgsV1::setSenderHost(const HostAndPort& newVal) {
- _senderHost = newVal;
- }
+void ReplSetHeartbeatArgsV1::setSenderHost(const HostAndPort& newVal) {
+ _senderHost = newVal;
+}
- void ReplSetHeartbeatArgsV1::setSenderId(long long newVal) {
- _senderId = newVal;
- }
+void ReplSetHeartbeatArgsV1::setSenderId(long long newVal) {
+ _senderId = newVal;
+}
- void ReplSetHeartbeatArgsV1::setSetName(const std::string& newVal) {
- _setName = newVal;
- }
+void ReplSetHeartbeatArgsV1::setSetName(const std::string& newVal) {
+ _setName = newVal;
+}
- void ReplSetHeartbeatArgsV1::setTerm(long long newVal) {
- _term = newVal;
- }
+void ReplSetHeartbeatArgsV1::setTerm(long long newVal) {
+ _term = newVal;
+}
- void ReplSetHeartbeatArgsV1::setCheckEmpty() {
- _checkEmpty = true;
- }
+void ReplSetHeartbeatArgsV1::setCheckEmpty() {
+ _checkEmpty = true;
+}
- BSONObj ReplSetHeartbeatArgsV1::toBSON() const {
- invariant(isInitialized());
- BSONObjBuilder builder;
- addToBSON(&builder);
- return builder.obj();
- }
+BSONObj ReplSetHeartbeatArgsV1::toBSON() const {
+ invariant(isInitialized());
+ BSONObjBuilder builder;
+ addToBSON(&builder);
+ return builder.obj();
+}
- void ReplSetHeartbeatArgsV1::addToBSON(BSONObjBuilder* builder) const {
- builder->append(kSetNameFieldName, _setName);
- if (_checkEmpty) {
- builder->append(kCheckEmptyFieldName, _checkEmpty);
- }
- builder->appendIntOrLL(kConfigVersionFieldName, _configVersion);
- builder->append(kSenderHostFieldName, _hasSender ? _senderHost.toString() : "");
- builder->appendIntOrLL(kSenderIdFieldName, _senderId);
- builder->appendIntOrLL(kTermFieldName, _term);
+void ReplSetHeartbeatArgsV1::addToBSON(BSONObjBuilder* builder) const {
+ builder->append(kSetNameFieldName, _setName);
+ if (_checkEmpty) {
+ builder->append(kCheckEmptyFieldName, _checkEmpty);
}
+ builder->appendIntOrLL(kConfigVersionFieldName, _configVersion);
+ builder->append(kSenderHostFieldName, _hasSender ? _senderHost.toString() : "");
+ builder->appendIntOrLL(kSenderIdFieldName, _senderId);
+ builder->appendIntOrLL(kTermFieldName, _term);
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_heartbeat_args_v1.h b/src/mongo/db/repl/repl_set_heartbeat_args_v1.h
index a7f6691a31a..2a6dbddd3dd 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_args_v1.h
+++ b/src/mongo/db/repl/repl_set_heartbeat_args_v1.h
@@ -34,86 +34,98 @@
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class Status;
+class BSONObj;
+class BSONObjBuilder;
+class Status;
namespace repl {
+/**
+ * Arguments to the replSetHeartbeat command.
+ */
+class ReplSetHeartbeatArgsV1 {
+public:
+ /**
+ * Initializes this ReplSetHeartbeatArgsV1 from the contents of args.
+ */
+ Status initialize(const BSONObj& argsObj);
+
+ /**
+ * Returns true if all required fields have been initialized.
+ */
+ bool isInitialized() const;
+
+ /**
+ * Gets the ReplSetConfig version number of the sender.
+ */
+ long long getConfigVersion() const {
+ return _configVersion;
+ }
+
+ /**
+ * Gets the _id of the sender in their ReplSetConfig.
+ */
+ long long getSenderId() const {
+ return _senderId;
+ }
+
+ /**
+ * Gets the HostAndPort of the sender.
+ */
+ HostAndPort getSenderHost() const {
+ return _senderHost;
+ }
+
+ /**
+ * Gets the replSet name of the sender's replica set.
+ */
+ std::string getSetName() const {
+ return _setName;
+ }
+
+ /**
+ * Gets the term the sender believes it to be.
+ */
+ long long getTerm() const {
+ return _term;
+ }
+
+ /**
+ * Returns whether or not the sender is checking for emptiness.
+ */
+ bool hasCheckEmpty() const {
+ return _checkEmpty;
+ }
+
+ /**
+ * The below methods set the value in the method name to 'newVal'.
+ */
+ void setConfigVersion(long long newVal);
+ void setSenderId(long long newVal);
+ void setSenderHost(const HostAndPort& newVal);
+ void setSetName(const std::string& newVal);
+ void setTerm(long long newVal);
+ void setCheckEmpty();
+
/**
- * Arguments to the replSetHeartbeat command.
+ * Returns a BSONified version of the object.
+ * Should only be called if the mandatory fields have been set.
+ * Optional fields are only included if they have been set.
*/
- class ReplSetHeartbeatArgsV1 {
- public:
- /**
- * Initializes this ReplSetHeartbeatArgsV1 from the contents of args.
- */
- Status initialize(const BSONObj& argsObj);
-
- /**
- * Returns true if all required fields have been initialized.
- */
- bool isInitialized() const;
-
- /**
- * Gets the ReplSetConfig version number of the sender.
- */
- long long getConfigVersion() const { return _configVersion; }
-
- /**
- * Gets the _id of the sender in their ReplSetConfig.
- */
- long long getSenderId() const { return _senderId; }
-
- /**
- * Gets the HostAndPort of the sender.
- */
- HostAndPort getSenderHost() const { return _senderHost; }
-
- /**
- * Gets the replSet name of the sender's replica set.
- */
- std::string getSetName() const { return _setName; }
-
- /**
- * Gets the term the sender believes it to be.
- */
- long long getTerm() const { return _term; }
-
- /**
- * Returns whether or not the sender is checking for emptiness.
- */
- bool hasCheckEmpty() const { return _checkEmpty; }
-
- /**
- * The below methods set the value in the method name to 'newVal'.
- */
- void setConfigVersion(long long newVal);
- void setSenderId(long long newVal);
- void setSenderHost(const HostAndPort& newVal);
- void setSetName(const std::string& newVal);
- void setTerm(long long newVal);
- void setCheckEmpty();
-
- /**
- * Returns a BSONified version of the object.
- * Should only be called if the mandatory fields have been set.
- * Optional fields are only included if they have been set.
- */
- BSONObj toBSON() const;
-
- void addToBSON(BSONObjBuilder* builder) const;
-
- private:
- // look at the body of the isInitialized() function to see which fields are mandatory
- long long _configVersion = -1;
- long long _senderId = -1;
- long long _term = -1;
- bool _checkEmpty = false;
- bool _hasSender = false;
- std::string _setName;
- HostAndPort _senderHost;
- };
-
-} // namespace repl
-} // namespace mongo
+ BSONObj toBSON() const;
+
+ void addToBSON(BSONObjBuilder* builder) const;
+
+private:
+ // look at the body of the isInitialized() function to see which fields are mandatory
+ long long _configVersion = -1;
+ long long _senderId = -1;
+ long long _term = -1;
+ bool _checkEmpty = false;
+ bool _hasSender = false;
+ std::string _setName;
+ HostAndPort _senderHost;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.cpp b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
index 43b62a43f48..d2a77c58a5f 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
@@ -45,345 +45,335 @@ namespace mongo {
namespace repl {
namespace {
- const std::string kConfigFieldName = "config";
- const std::string kConfigVersionFieldName = "v";
- const std::string kElectionTimeFieldName = "electionTime";
- const std::string kErrMsgFieldName = "errmsg";
- const std::string kErrorCodeFieldName = "code";
- const std::string kHasDataFieldName = "hasData";
- const std::string kHasStateDisagreementFieldName = "stateDisagreement";
- const std::string kHbMessageFieldName = "hbmsg";
- const std::string kIsElectableFieldName = "e";
- const std::string kIsReplSetFieldName = "rs";
- const std::string kMemberStateFieldName = "state";
- const std::string kMismatchFieldName = "mismatch";
- const std::string kOkFieldName = "ok";
- const std::string kOpTimeFieldName = "opTime";
- const std::string kPrimaryIdFieldName = "primaryId";
- const std::string kReplSetFieldName = "set";
- const std::string kSyncSourceFieldName = "syncingTo";
- const std::string kTermFieldName = "term";
- const std::string kTimeFieldName = "time";
- const std::string kTimestampFieldName = "ts";
+const std::string kConfigFieldName = "config";
+const std::string kConfigVersionFieldName = "v";
+const std::string kElectionTimeFieldName = "electionTime";
+const std::string kErrMsgFieldName = "errmsg";
+const std::string kErrorCodeFieldName = "code";
+const std::string kHasDataFieldName = "hasData";
+const std::string kHasStateDisagreementFieldName = "stateDisagreement";
+const std::string kHbMessageFieldName = "hbmsg";
+const std::string kIsElectableFieldName = "e";
+const std::string kIsReplSetFieldName = "rs";
+const std::string kMemberStateFieldName = "state";
+const std::string kMismatchFieldName = "mismatch";
+const std::string kOkFieldName = "ok";
+const std::string kOpTimeFieldName = "opTime";
+const std::string kPrimaryIdFieldName = "primaryId";
+const std::string kReplSetFieldName = "set";
+const std::string kSyncSourceFieldName = "syncingTo";
+const std::string kTermFieldName = "term";
+const std::string kTimeFieldName = "time";
+const std::string kTimestampFieldName = "ts";
} // namespace
- void ReplSetHeartbeatResponse::addToBSON(BSONObjBuilder* builder,
- bool isProtocolVersionV1) const {
- if (_mismatch) {
- *builder << kOkFieldName << 0.0;
- *builder << kMismatchFieldName << _mismatch;
- return;
- }
-
- builder->append(kOkFieldName, 1.0);
- if (_timeSet) {
- *builder << kTimeFieldName << durationCount<Seconds>(_time);
- }
- if (_electionTimeSet) {
- builder->appendDate(kElectionTimeFieldName,
- Date_t::fromMillisSinceEpoch(_electionTime.asLL()));
- }
- if (_configSet) {
- *builder << kConfigFieldName << _config.toBSON();
- }
- if (_electableSet) {
- *builder << kIsElectableFieldName << _electable;
- }
- if (_isReplSet) {
- *builder << "rs" << _isReplSet;
- }
- if (_stateDisagreement) {
- *builder << kHasStateDisagreementFieldName << _stateDisagreement;
- }
- if (_stateSet) {
- builder->appendIntOrLL(kMemberStateFieldName, _state.s);
- }
- if (_configVersion != -1) {
- *builder << kConfigVersionFieldName << _configVersion;
- }
- *builder << kHbMessageFieldName << _hbmsg;
- if (!_setName.empty()) {
- *builder << kReplSetFieldName << _setName;
- }
- if (!_syncingTo.empty()) {
- *builder << kSyncSourceFieldName << _syncingTo.toString();
- }
- if (_hasDataSet) {
- builder->append(kHasDataFieldName, _hasData);
- }
- if (_term != -1) {
- builder->append(kTermFieldName, _term);
- }
- if (_primaryIdSet) {
- builder->append(kPrimaryIdFieldName, _primaryId);
- }
- if (_opTimeSet) {
- if (isProtocolVersionV1) {
- BSONObjBuilder opTime(builder->subobjStart(kOpTimeFieldName));
- opTime.append(kTimestampFieldName, _opTime.getTimestamp());
- opTime.append(kTermFieldName, _opTime.getTerm());
- opTime.done();
- }
- else {
- builder->appendDate(kOpTimeFieldName,
- Date_t::fromMillisSinceEpoch(_opTime.getTimestamp().asLL()));
- }
- }
-
+void ReplSetHeartbeatResponse::addToBSON(BSONObjBuilder* builder, bool isProtocolVersionV1) const {
+ if (_mismatch) {
+ *builder << kOkFieldName << 0.0;
+ *builder << kMismatchFieldName << _mismatch;
+ return;
}
- BSONObj ReplSetHeartbeatResponse::toBSON(bool isProtocolVersionV1) const {
- BSONObjBuilder builder;
- addToBSON(&builder, isProtocolVersionV1);
- return builder.obj();
+ builder->append(kOkFieldName, 1.0);
+ if (_timeSet) {
+ *builder << kTimeFieldName << durationCount<Seconds>(_time);
}
-
- Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc, long long term) {
-
- // Old versions set this even though they returned not "ok"
- _mismatch = doc[kMismatchFieldName].trueValue();
- if (_mismatch)
- return Status(ErrorCodes::InconsistentReplicaSetNames,
- "replica set name doesn't match.");
-
- // Old versions sometimes set the replica set name ("set") but ok:0
- const BSONElement replSetNameElement = doc[kReplSetFieldName];
- if (replSetNameElement.eoo()) {
- _setName.clear();
- }
- else if (replSetNameElement.type() != String) {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "Expected \"" <<
- kReplSetFieldName << "\" field in response to replSetHeartbeat to have "
- "type String, but found " << typeName(replSetNameElement.type()));
- }
- else {
- _setName = replSetNameElement.String();
- }
-
- if (_setName.empty() && !doc[kOkFieldName].trueValue()) {
- std::string errMsg = doc[kErrMsgFieldName].str();
-
- BSONElement errCodeElem = doc[kErrorCodeFieldName];
- if (errCodeElem.ok()) {
- if (!errCodeElem.isNumber())
- return Status(ErrorCodes::BadValue, "Error code is not a number!");
-
- int errorCode = errCodeElem.numberInt();
- return Status(ErrorCodes::Error(errorCode), errMsg);
- }
- return Status(ErrorCodes::UnknownError, errMsg);
- }
-
- const BSONElement hasDataElement = doc[kHasDataFieldName];
- _hasDataSet = !hasDataElement.eoo();
- _hasData = hasDataElement.trueValue();
-
- const BSONElement electionTimeElement = doc[kElectionTimeFieldName];
- if (electionTimeElement.eoo()) {
- _electionTimeSet = false;
- }
- else if (electionTimeElement.type() == bsonTimestamp) {
- _electionTimeSet = true;
- _electionTime = electionTimeElement.timestamp();
- }
- else if (electionTimeElement.type() == Date) {
- _electionTimeSet = true;
- _electionTime = Timestamp(electionTimeElement.date());
- }
- else {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "Expected \"" <<
- kElectionTimeFieldName << "\" field in response to replSetHeartbeat "
- "command to have type Date or Timestamp, but found type " <<
- typeName(electionTimeElement.type()));
- }
-
- const BSONElement timeElement = doc[kTimeFieldName];
- if (timeElement.eoo()) {
- _timeSet = false;
- }
- else if (timeElement.isNumber()) {
- _timeSet = true;
- _time = Seconds(timeElement.numberLong());
- }
- else {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "Expected \"" <<
- kTimeFieldName << "\" field in response to replSetHeartbeat "
- "command to have a numeric type, but found type " <<
- typeName(timeElement.type()));
- }
-
- _isReplSet = doc[kIsReplSetFieldName].trueValue();
-
- // In order to support both the 3.0(V0) and 3.2(V1) heartbeats we must parse the OpTime
- // field based on its type. If it is a Date, we parse it as the timestamp and use
- // initialize's term argument to complete the OpTime type. If it is an Object, then it's
- // V1 and we construct an OpTime out of its nested fields.
- const BSONElement opTimeElement = doc[kOpTimeFieldName];
- if (opTimeElement.eoo()) {
- _opTimeSet = false;
- }
- else if (opTimeElement.type() == bsonTimestamp) {
- _opTimeSet = true;
- _opTime = OpTime(opTimeElement.timestamp(), term);
- }
- else if (opTimeElement.type() == Date) {
- _opTimeSet = true;
- _opTime = OpTime(Timestamp(opTimeElement.date()), term);
- }
- else if (opTimeElement.type() == Object) {
- BSONObj opTime = opTimeElement.Obj();
- Timestamp ts;
- Status status = bsonExtractTimestampField(opTime, kTimestampFieldName, &ts);
- if (!status.isOK())
- return status;
- long long extractedTerm;
- status = bsonExtractIntegerField(opTime, kTermFieldName, &extractedTerm);
- if (!status.isOK())
- return status;
-
- _opTimeSet = true;
- _opTime = OpTime(ts, extractedTerm);
- // since a v1 OpTime was in the response, the member must be part of a replset
- _isReplSet = true;
- }
- else {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "Expected \"" <<
- kOpTimeFieldName << "\" field in response to replSetHeartbeat "
- "command to have type Date or Timestamp, but found type " <<
- typeName(opTimeElement.type()));
- }
-
- const BSONElement electableElement = doc[kIsElectableFieldName];
- if (electableElement.eoo()) {
- _electableSet = false;
- }
- else {
- _electableSet = true;
- _electable = electableElement.trueValue();
- }
-
- const BSONElement memberStateElement = doc[kMemberStateFieldName];
- if (memberStateElement.eoo()) {
- _stateSet = false;
- }
- else if (memberStateElement.type() != NumberInt &&
- memberStateElement.type() != NumberLong) {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "Expected \"" <<
- kMemberStateFieldName << "\" field in response to replSetHeartbeat "
- "command to have type NumberInt or NumberLong, but found type " <<
- typeName(memberStateElement.type()));
- }
- else {
- long long stateInt = memberStateElement.numberLong();
- if (stateInt < 0 || stateInt > MemberState::RS_MAX) {
- return Status(ErrorCodes::BadValue, str::stream() << "Value for \"" <<
- kMemberStateFieldName << "\" in response to replSetHeartbeat is "
- "out of range; legal values are non-negative and no more than " <<
- MemberState::RS_MAX);
- }
- _stateSet = true;
- _state = MemberState(static_cast<int>(stateInt));
- }
-
- _stateDisagreement = doc[kHasStateDisagreementFieldName].trueValue();
-
-
- // Not required for the case of uninitialized members -- they have no config
- const BSONElement configVersionElement = doc[kConfigVersionFieldName];
-
- // If we have an optime then we must have a configVersion
- if (_opTimeSet && configVersionElement.eoo()) {
- return Status(ErrorCodes::NoSuchKey, str::stream() <<
- "Response to replSetHeartbeat missing required \"" <<
- kConfigVersionFieldName << "\" field even though initialized");
- }
-
- // If there is a "v" (config version) then it must be an int.
- if (!configVersionElement.eoo() && configVersionElement.type() != NumberInt) {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "Expected \"" <<
- kConfigVersionFieldName <<
- "\" field in response to replSetHeartbeat to have "
- "type NumberInt, but found " << typeName(configVersionElement.type()));
+ if (_electionTimeSet) {
+ builder->appendDate(kElectionTimeFieldName,
+ Date_t::fromMillisSinceEpoch(_electionTime.asLL()));
+ }
+ if (_configSet) {
+ *builder << kConfigFieldName << _config.toBSON();
+ }
+ if (_electableSet) {
+ *builder << kIsElectableFieldName << _electable;
+ }
+ if (_isReplSet) {
+ *builder << "rs" << _isReplSet;
+ }
+ if (_stateDisagreement) {
+ *builder << kHasStateDisagreementFieldName << _stateDisagreement;
+ }
+ if (_stateSet) {
+ builder->appendIntOrLL(kMemberStateFieldName, _state.s);
+ }
+ if (_configVersion != -1) {
+ *builder << kConfigVersionFieldName << _configVersion;
+ }
+ *builder << kHbMessageFieldName << _hbmsg;
+ if (!_setName.empty()) {
+ *builder << kReplSetFieldName << _setName;
+ }
+ if (!_syncingTo.empty()) {
+ *builder << kSyncSourceFieldName << _syncingTo.toString();
+ }
+ if (_hasDataSet) {
+ builder->append(kHasDataFieldName, _hasData);
+ }
+ if (_term != -1) {
+ builder->append(kTermFieldName, _term);
+ }
+ if (_primaryIdSet) {
+ builder->append(kPrimaryIdFieldName, _primaryId);
+ }
+ if (_opTimeSet) {
+ if (isProtocolVersionV1) {
+ BSONObjBuilder opTime(builder->subobjStart(kOpTimeFieldName));
+ opTime.append(kTimestampFieldName, _opTime.getTimestamp());
+ opTime.append(kTermFieldName, _opTime.getTerm());
+ opTime.done();
+ } else {
+ builder->appendDate(kOpTimeFieldName,
+ Date_t::fromMillisSinceEpoch(_opTime.getTimestamp().asLL()));
}
- _configVersion = configVersionElement.numberInt();
+ }
+}
+
+BSONObj ReplSetHeartbeatResponse::toBSON(bool isProtocolVersionV1) const {
+ BSONObjBuilder builder;
+ addToBSON(&builder, isProtocolVersionV1);
+ return builder.obj();
+}
+
+Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc, long long term) {
+ // Old versions set this even though they returned not "ok"
+ _mismatch = doc[kMismatchFieldName].trueValue();
+ if (_mismatch)
+ return Status(ErrorCodes::InconsistentReplicaSetNames, "replica set name doesn't match.");
+
+ // Old versions sometimes set the replica set name ("set") but ok:0
+ const BSONElement replSetNameElement = doc[kReplSetFieldName];
+ if (replSetNameElement.eoo()) {
+ _setName.clear();
+ } else if (replSetNameElement.type() != String) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"" << kReplSetFieldName
+ << "\" field in response to replSetHeartbeat to have "
+ "type String, but found "
+ << typeName(replSetNameElement.type()));
+ } else {
+ _setName = replSetNameElement.String();
+ }
- const BSONElement hbMsgElement = doc[kHbMessageFieldName];
- if (hbMsgElement.eoo()) {
- _hbmsg.clear();
- }
- else if (hbMsgElement.type() != String) {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "Expected \"" <<
- kHbMessageFieldName << "\" field in response to replSetHeartbeat to have "
- "type String, but found " << typeName(hbMsgElement.type()));
- }
- else {
- _hbmsg = hbMsgElement.String();
- }
+ if (_setName.empty() && !doc[kOkFieldName].trueValue()) {
+ std::string errMsg = doc[kErrMsgFieldName].str();
- const BSONElement syncingToElement = doc[kSyncSourceFieldName];
- if (syncingToElement.eoo()) {
- _syncingTo = HostAndPort();
- }
- else if (syncingToElement.type() != String) {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "Expected \"" <<
- kSyncSourceFieldName << "\" field in response to replSetHeartbeat to "
- "have type String, but found " << typeName(syncingToElement.type()));
- }
- else {
- _syncingTo = HostAndPort(syncingToElement.String());
- }
+ BSONElement errCodeElem = doc[kErrorCodeFieldName];
+ if (errCodeElem.ok()) {
+ if (!errCodeElem.isNumber())
+ return Status(ErrorCodes::BadValue, "Error code is not a number!");
- const BSONElement rsConfigElement = doc[kConfigFieldName];
- if (rsConfigElement.eoo()) {
- _configSet = false;
- _config = ReplicaSetConfig();
- return Status::OK();
+ int errorCode = errCodeElem.numberInt();
+ return Status(ErrorCodes::Error(errorCode), errMsg);
}
- else if (rsConfigElement.type() != Object) {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "Expected \"" <<
- kConfigFieldName << "\" in response to replSetHeartbeat to have type "
- "Object, but found " << typeName(rsConfigElement.type()));
- }
- _configSet = true;
+ return Status(ErrorCodes::UnknownError, errMsg);
+ }
- return _config.initialize(rsConfigElement.Obj());
+ const BSONElement hasDataElement = doc[kHasDataFieldName];
+ _hasDataSet = !hasDataElement.eoo();
+ _hasData = hasDataElement.trueValue();
+
+ const BSONElement electionTimeElement = doc[kElectionTimeFieldName];
+ if (electionTimeElement.eoo()) {
+ _electionTimeSet = false;
+ } else if (electionTimeElement.type() == bsonTimestamp) {
+ _electionTimeSet = true;
+ _electionTime = electionTimeElement.timestamp();
+ } else if (electionTimeElement.type() == Date) {
+ _electionTimeSet = true;
+ _electionTime = Timestamp(electionTimeElement.date());
+ } else {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"" << kElectionTimeFieldName
+ << "\" field in response to replSetHeartbeat "
+ "command to have type Date or Timestamp, but found type "
+ << typeName(electionTimeElement.type()));
}
- MemberState ReplSetHeartbeatResponse::getState() const {
- invariant(_stateSet);
- return _state;
+ const BSONElement timeElement = doc[kTimeFieldName];
+ if (timeElement.eoo()) {
+ _timeSet = false;
+ } else if (timeElement.isNumber()) {
+ _timeSet = true;
+ _time = Seconds(timeElement.numberLong());
+ } else {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"" << kTimeFieldName
+ << "\" field in response to replSetHeartbeat "
+ "command to have a numeric type, but found type "
+ << typeName(timeElement.type()));
}
- Timestamp ReplSetHeartbeatResponse::getElectionTime() const {
- invariant(_electionTimeSet);
- return _electionTime;
+ _isReplSet = doc[kIsReplSetFieldName].trueValue();
+
+ // In order to support both the 3.0(V0) and 3.2(V1) heartbeats we must parse the OpTime
+ // field based on its type. If it is a Date, we parse it as the timestamp and use
+ // initialize's term argument to complete the OpTime type. If it is an Object, then it's
+ // V1 and we construct an OpTime out of its nested fields.
+ const BSONElement opTimeElement = doc[kOpTimeFieldName];
+ if (opTimeElement.eoo()) {
+ _opTimeSet = false;
+ } else if (opTimeElement.type() == bsonTimestamp) {
+ _opTimeSet = true;
+ _opTime = OpTime(opTimeElement.timestamp(), term);
+ } else if (opTimeElement.type() == Date) {
+ _opTimeSet = true;
+ _opTime = OpTime(Timestamp(opTimeElement.date()), term);
+ } else if (opTimeElement.type() == Object) {
+ BSONObj opTime = opTimeElement.Obj();
+ Timestamp ts;
+ Status status = bsonExtractTimestampField(opTime, kTimestampFieldName, &ts);
+ if (!status.isOK())
+ return status;
+ long long extractedTerm;
+ status = bsonExtractIntegerField(opTime, kTermFieldName, &extractedTerm);
+ if (!status.isOK())
+ return status;
+
+ _opTimeSet = true;
+ _opTime = OpTime(ts, extractedTerm);
+ // since a v1 OpTime was in the response, the member must be part of a replset
+ _isReplSet = true;
+ } else {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"" << kOpTimeFieldName
+ << "\" field in response to replSetHeartbeat "
+ "command to have type Date or Timestamp, but found type "
+ << typeName(opTimeElement.type()));
}
- bool ReplSetHeartbeatResponse::isElectable() const {
- invariant(_electableSet);
- return _electable;
+ const BSONElement electableElement = doc[kIsElectableFieldName];
+ if (electableElement.eoo()) {
+ _electableSet = false;
+ } else {
+ _electableSet = true;
+ _electable = electableElement.trueValue();
}
- Seconds ReplSetHeartbeatResponse::getTime() const {
- invariant(_timeSet);
- return _time;
+ const BSONElement memberStateElement = doc[kMemberStateFieldName];
+ if (memberStateElement.eoo()) {
+ _stateSet = false;
+ } else if (memberStateElement.type() != NumberInt && memberStateElement.type() != NumberLong) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << "Expected \"" << kMemberStateFieldName
+ << "\" field in response to replSetHeartbeat "
+ "command to have type NumberInt or NumberLong, but found type "
+ << typeName(memberStateElement.type()));
+ } else {
+ long long stateInt = memberStateElement.numberLong();
+ if (stateInt < 0 || stateInt > MemberState::RS_MAX) {
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Value for \"" << kMemberStateFieldName
+ << "\" in response to replSetHeartbeat is "
+ "out of range; legal values are non-negative and no more than "
+ << MemberState::RS_MAX);
+ }
+ _stateSet = true;
+ _state = MemberState(static_cast<int>(stateInt));
}
- const ReplicaSetConfig& ReplSetHeartbeatResponse::getConfig() const {
- invariant(_configSet);
- return _config;
+ _stateDisagreement = doc[kHasStateDisagreementFieldName].trueValue();
+
+
+ // Not required for the case of uninitialized members -- they have no config
+ const BSONElement configVersionElement = doc[kConfigVersionFieldName];
+
+ // If we have an optime then we must have a configVersion
+ if (_opTimeSet && configVersionElement.eoo()) {
+ return Status(ErrorCodes::NoSuchKey,
+ str::stream() << "Response to replSetHeartbeat missing required \""
+ << kConfigVersionFieldName
+ << "\" field even though initialized");
}
- long long ReplSetHeartbeatResponse::getPrimaryId() const {
- invariant(_primaryIdSet);
- return _primaryId;
+ // If there is a "v" (config version) then it must be an int.
+ if (!configVersionElement.eoo() && configVersionElement.type() != NumberInt) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"" << kConfigVersionFieldName
+ << "\" field in response to replSetHeartbeat to have "
+ "type NumberInt, but found "
+ << typeName(configVersionElement.type()));
+ }
+ _configVersion = configVersionElement.numberInt();
+
+ const BSONElement hbMsgElement = doc[kHbMessageFieldName];
+ if (hbMsgElement.eoo()) {
+ _hbmsg.clear();
+ } else if (hbMsgElement.type() != String) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"" << kHbMessageFieldName
+ << "\" field in response to replSetHeartbeat to have "
+ "type String, but found " << typeName(hbMsgElement.type()));
+ } else {
+ _hbmsg = hbMsgElement.String();
}
- OpTime ReplSetHeartbeatResponse::getOpTime() const {
- invariant(_opTimeSet);
- return _opTime;
+ const BSONElement syncingToElement = doc[kSyncSourceFieldName];
+ if (syncingToElement.eoo()) {
+ _syncingTo = HostAndPort();
+ } else if (syncingToElement.type() != String) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"" << kSyncSourceFieldName
+ << "\" field in response to replSetHeartbeat to "
+ "have type String, but found "
+ << typeName(syncingToElement.type()));
+ } else {
+ _syncingTo = HostAndPort(syncingToElement.String());
}
-} // namespace repl
-} // namespace mongo
+ const BSONElement rsConfigElement = doc[kConfigFieldName];
+ if (rsConfigElement.eoo()) {
+ _configSet = false;
+ _config = ReplicaSetConfig();
+ return Status::OK();
+ } else if (rsConfigElement.type() != Object) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"" << kConfigFieldName
+ << "\" in response to replSetHeartbeat to have type "
+ "Object, but found " << typeName(rsConfigElement.type()));
+ }
+ _configSet = true;
+
+ return _config.initialize(rsConfigElement.Obj());
+}
+
+MemberState ReplSetHeartbeatResponse::getState() const {
+ invariant(_stateSet);
+ return _state;
+}
+
+Timestamp ReplSetHeartbeatResponse::getElectionTime() const {
+ invariant(_electionTimeSet);
+ return _electionTime;
+}
+
+bool ReplSetHeartbeatResponse::isElectable() const {
+ invariant(_electableSet);
+ return _electable;
+}
+
+Seconds ReplSetHeartbeatResponse::getTime() const {
+ invariant(_timeSet);
+ return _time;
+}
+
+const ReplicaSetConfig& ReplSetHeartbeatResponse::getConfig() const {
+ invariant(_configSet);
+ return _config;
+}
+
+long long ReplSetHeartbeatResponse::getPrimaryId() const {
+ invariant(_primaryIdSet);
+ return _primaryId;
+}
+
+OpTime ReplSetHeartbeatResponse::getOpTime() const {
+ invariant(_opTimeSet);
+ return _opTime;
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.h b/src/mongo/db/repl/repl_set_heartbeat_response.h
index 8d8fa04b4b5..b3fba2a4803 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response.h
+++ b/src/mongo/db/repl/repl_set_heartbeat_response.h
@@ -37,169 +37,244 @@
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class Status;
+class BSONObj;
+class BSONObjBuilder;
+class Status;
namespace repl {
+/**
+ * Response structure for the replSetHeartbeat command.
+ */
+class ReplSetHeartbeatResponse {
+public:
+ /**
+ * Initializes this ReplSetHeartbeatResponse from the contents of "doc".
+ * "term" is only used to complete a V0 OpTime (which is really a Timestamp).
+ */
+ Status initialize(const BSONObj& doc, long long term);
+
+ /**
+ * Appends all non-default values to "builder".
+ */
+ void addToBSON(BSONObjBuilder* builder, bool isProtocolVersionV1) const;
+
+ /**
+ * Returns a BSONObj consisting of all non-default values to "builder".
+ */
+ BSONObj toBSON(bool isProtocolVersionV1) const;
+
+ /**
+ * Returns toBSON().toString()
+ */
+ const std::string toString() const {
+ return toBSON(true).toString();
+ }
+
+ bool hasDataSet() const {
+ return _hasDataSet;
+ }
+ bool hasData() const {
+ return _hasData;
+ }
+ bool isMismatched() const {
+ return _mismatch;
+ }
+ bool isReplSet() const {
+ return _isReplSet;
+ }
+ bool isStateDisagreement() const {
+ return _stateDisagreement;
+ }
+ const std::string& getReplicaSetName() const {
+ return _setName;
+ }
+ bool hasState() const {
+ return _stateSet;
+ }
+ MemberState getState() const;
+ bool hasElectionTime() const {
+ return _electionTimeSet;
+ }
+ Timestamp getElectionTime() const;
+ bool hasIsElectable() const {
+ return _electableSet;
+ }
+ bool isElectable() const;
+ const std::string& getHbMsg() const {
+ return _hbmsg;
+ }
+ bool hasTime() const {
+ return _timeSet;
+ }
+ Seconds getTime() const;
+ const HostAndPort& getSyncingTo() const {
+ return _syncingTo;
+ }
+ int getConfigVersion() const {
+ return _configVersion;
+ }
+ bool hasConfig() const {
+ return _configSet;
+ }
+ const ReplicaSetConfig& getConfig() const;
+ bool hasPrimaryId() const {
+ return _primaryIdSet;
+ }
+ long long getPrimaryId() const;
+ long long getTerm() const {
+ return _term;
+ }
+ bool hasOpTime() const {
+ return _opTimeSet;
+ }
+ OpTime getOpTime() const;
+
+ /**
+ * Sets _mismatch to true.
+ */
+ void noteMismatched() {
+ _mismatch = true;
+ }
+
+ /**
+ * Sets _isReplSet to true.
+ */
+ void noteReplSet() {
+ _isReplSet = true;
+ }
+
+ /**
+ * Sets _stateDisagreement to true.
+ */
+ void noteStateDisagreement() {
+ _stateDisagreement = true;
+ }
+
+ /**
+ * Sets _hasData to true, and _hasDataSet to true to indicate _hasData has been modified
+ */
+ void noteHasData() {
+ _hasDataSet = _hasData = true;
+ }
+
/**
- * Response structure for the replSetHeartbeat command.
- */
- class ReplSetHeartbeatResponse {
- public:
-
- /**
- * Initializes this ReplSetHeartbeatResponse from the contents of "doc".
- * "term" is only used to complete a V0 OpTime (which is really a Timestamp).
- */
- Status initialize(const BSONObj& doc, long long term);
-
- /**
- * Appends all non-default values to "builder".
- */
- void addToBSON(BSONObjBuilder* builder, bool isProtocolVersionV1) const;
-
- /**
- * Returns a BSONObj consisting of all non-default values to "builder".
- */
- BSONObj toBSON(bool isProtocolVersionV1) const;
-
- /**
- * Returns toBSON().toString()
- */
- const std::string toString() const { return toBSON(true).toString(); }
-
- bool hasDataSet() const { return _hasDataSet; }
- bool hasData() const { return _hasData; }
- bool isMismatched() const { return _mismatch; }
- bool isReplSet() const { return _isReplSet; }
- bool isStateDisagreement() const { return _stateDisagreement; }
- const std::string& getReplicaSetName() const { return _setName; }
- bool hasState() const { return _stateSet; }
- MemberState getState() const;
- bool hasElectionTime() const { return _electionTimeSet; }
- Timestamp getElectionTime() const;
- bool hasIsElectable() const { return _electableSet; }
- bool isElectable() const;
- const std::string& getHbMsg() const { return _hbmsg; }
- bool hasTime() const { return _timeSet; }
- Seconds getTime() const;
- const HostAndPort& getSyncingTo() const { return _syncingTo; }
- int getConfigVersion() const { return _configVersion; }
- bool hasConfig() const { return _configSet; }
- const ReplicaSetConfig& getConfig() const;
- bool hasPrimaryId() const { return _primaryIdSet; }
- long long getPrimaryId() const;
- long long getTerm() const { return _term; }
- bool hasOpTime() const { return _opTimeSet; }
- OpTime getOpTime() const;
-
- /**
- * Sets _mismatch to true.
- */
- void noteMismatched() { _mismatch = true; }
-
- /**
- * Sets _isReplSet to true.
- */
- void noteReplSet() { _isReplSet = true; }
-
- /**
- * Sets _stateDisagreement to true.
- */
- void noteStateDisagreement() { _stateDisagreement = true; }
-
- /**
- * Sets _hasData to true, and _hasDataSet to true to indicate _hasData has been modified
- */
- void noteHasData() { _hasDataSet = _hasData = true;}
-
- /**
- * Sets _setName to "name".
- */
- void setSetName(std::string name) { _setName = name; }
-
- /**
- * Sets _state to "state".
- */
- void setState(MemberState state) { _stateSet = true; _state = state; }
-
- /**
- * Sets the optional "electionTime" field to the given Timestamp.
- */
- void setElectionTime(Timestamp time) { _electionTimeSet = true; _electionTime = time; }
-
- /**
- * Sets _electable to "electable" and sets _electableSet to true to indicate
- * that the value of _electable has been modified.
- */
- void setElectable(bool electable) { _electableSet = true; _electable = electable; }
-
- /**
- * Sets _hbmsg to "hbmsg".
- */
- void setHbMsg(std::string hbmsg) { _hbmsg = hbmsg; }
-
- /**
- * Sets the optional "time" field of the response to "theTime", which is
- * a count of seconds since the UNIX epoch.
- */
- void setTime(Seconds theTime) { _timeSet = true; _time = theTime; }
-
- /**
- * Sets _syncingTo to "syncingTo".
- */
- void setSyncingTo(const HostAndPort& syncingTo) { _syncingTo = syncingTo; }
-
- /**
- * Sets _configVersion to "configVersion".
- */
- void setConfigVersion(int configVersion) { _configVersion = configVersion; }
-
- /**
- * Initializes _config with "config".
- */
- void setConfig(const ReplicaSetConfig& config) { _configSet = true; _config = config; }
-
- void setPrimaryId(long long primaryId) { _primaryIdSet = true; _primaryId = primaryId; }
- void setOpTime(OpTime time) { _opTimeSet = true; _opTime = time; }
- void setTerm(long long term) { _term = term; }
- private:
- bool _electionTimeSet = false;
- Timestamp _electionTime;
-
- bool _timeSet = false;
- Seconds _time = Seconds(0); // Seconds since UNIX epoch.
-
- bool _opTimeSet = false;
- OpTime _opTime;
-
- bool _electableSet = false;
- bool _electable = false;
-
- bool _hasDataSet = false;
- bool _hasData = false;
-
- bool _mismatch = false;
- bool _isReplSet = false;
- bool _stateDisagreement = false;
-
- bool _stateSet = false;
- MemberState _state;
-
- int _configVersion = -1;
- std::string _setName;
- std::string _hbmsg;
- HostAndPort _syncingTo;
-
- bool _configSet = false;
- ReplicaSetConfig _config;
-
- bool _primaryIdSet = false;
- long long _primaryId = -1;
- long long _term = -1;
- };
-
-} // namespace repl
-} // namespace mongo
+ * Sets _setName to "name".
+ */
+ void setSetName(std::string name) {
+ _setName = name;
+ }
+
+ /**
+ * Sets _state to "state".
+ */
+ void setState(MemberState state) {
+ _stateSet = true;
+ _state = state;
+ }
+
+ /**
+ * Sets the optional "electionTime" field to the given Timestamp.
+ */
+ void setElectionTime(Timestamp time) {
+ _electionTimeSet = true;
+ _electionTime = time;
+ }
+
+ /**
+ * Sets _electable to "electable" and sets _electableSet to true to indicate
+ * that the value of _electable has been modified.
+ */
+ void setElectable(bool electable) {
+ _electableSet = true;
+ _electable = electable;
+ }
+
+ /**
+ * Sets _hbmsg to "hbmsg".
+ */
+ void setHbMsg(std::string hbmsg) {
+ _hbmsg = hbmsg;
+ }
+
+ /**
+ * Sets the optional "time" field of the response to "theTime", which is
+ * a count of seconds since the UNIX epoch.
+ */
+ void setTime(Seconds theTime) {
+ _timeSet = true;
+ _time = theTime;
+ }
+
+ /**
+ * Sets _syncingTo to "syncingTo".
+ */
+ void setSyncingTo(const HostAndPort& syncingTo) {
+ _syncingTo = syncingTo;
+ }
+
+ /**
+ * Sets _configVersion to "configVersion".
+ */
+ void setConfigVersion(int configVersion) {
+ _configVersion = configVersion;
+ }
+
+ /**
+ * Initializes _config with "config".
+ */
+ void setConfig(const ReplicaSetConfig& config) {
+ _configSet = true;
+ _config = config;
+ }
+
+ void setPrimaryId(long long primaryId) {
+ _primaryIdSet = true;
+ _primaryId = primaryId;
+ }
+ void setOpTime(OpTime time) {
+ _opTimeSet = true;
+ _opTime = time;
+ }
+ void setTerm(long long term) {
+ _term = term;
+ }
+
+private:
+ bool _electionTimeSet = false;
+ Timestamp _electionTime;
+
+ bool _timeSet = false;
+ Seconds _time = Seconds(0); // Seconds since UNIX epoch.
+
+ bool _opTimeSet = false;
+ OpTime _opTime;
+
+ bool _electableSet = false;
+ bool _electable = false;
+
+ bool _hasDataSet = false;
+ bool _hasData = false;
+
+ bool _mismatch = false;
+ bool _isReplSet = false;
+ bool _stateDisagreement = false;
+
+ bool _stateSet = false;
+ MemberState _state;
+
+ int _configVersion = -1;
+ std::string _setName;
+ std::string _hbmsg;
+ HostAndPort _syncingTo;
+
+ bool _configSet = false;
+ ReplicaSetConfig _config;
+
+ bool _primaryIdSet = false;
+ long long _primaryId = -1;
+ long long _term = -1;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
index aed87536dcc..45c8dba3e1f 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
@@ -37,730 +37,732 @@ namespace mongo {
namespace repl {
namespace {
- using std::unique_ptr;
-
- bool stringContains(const std::string &haystack, const std::string& needle) {
- return haystack.find(needle) != std::string::npos;
- }
-
- TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
- int fieldsSet = 2;
- ReplSetHeartbeatResponse hbResponse;
- ReplSetHeartbeatResponse hbResponseObjRoundTripChecker;
- ASSERT_EQUALS(false, hbResponse.hasState());
- ASSERT_EQUALS(false, hbResponse.hasElectionTime());
- ASSERT_EQUALS(false, hbResponse.hasIsElectable());
- ASSERT_EQUALS(false, hbResponse.hasTime());
- ASSERT_EQUALS(false, hbResponse.hasOpTime());
- ASSERT_EQUALS(false, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(false, hbResponse.isReplSet());
- ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("", hbResponse.getReplicaSetName());
- ASSERT_EQUALS("", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
- ASSERT_EQUALS(-1, hbResponse.getConfigVersion());
-
- BSONObj hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
-
- Status initializeResult = Status::OK();
- ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toString());
-
- // set version
- hbResponse.setConfigVersion(1);
- ++fieldsSet;
- ASSERT_EQUALS(false, hbResponse.hasState());
- ASSERT_EQUALS(false, hbResponse.hasElectionTime());
- ASSERT_EQUALS(false, hbResponse.hasIsElectable());
- ASSERT_EQUALS(false, hbResponse.hasTime());
- ASSERT_EQUALS(false, hbResponse.hasOpTime());
- ASSERT_EQUALS(false, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(false, hbResponse.isReplSet());
- ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("", hbResponse.getReplicaSetName());
- ASSERT_EQUALS("", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
- ASSERT_EQUALS(1, hbResponseObj["v"].Number());
-
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toString());
-
- // set setname
- hbResponse.setSetName("rs0");
- ++fieldsSet;
- ASSERT_EQUALS(false, hbResponse.hasState());
- ASSERT_EQUALS(false, hbResponse.hasElectionTime());
- ASSERT_EQUALS(false, hbResponse.hasIsElectable());
- ASSERT_EQUALS(false, hbResponse.hasTime());
- ASSERT_EQUALS(false, hbResponse.hasOpTime());
- ASSERT_EQUALS(false, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(false, hbResponse.isReplSet());
- ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
- ASSERT_EQUALS("", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
- ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
- ASSERT_EQUALS(1, hbResponseObj["v"].Number());
-
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toString());
-
- // set electionTime
- hbResponse.setElectionTime(Timestamp(10,0));
- ++fieldsSet;
- ASSERT_EQUALS(false, hbResponse.hasState());
- ASSERT_EQUALS(true, hbResponse.hasElectionTime());
- ASSERT_EQUALS(false, hbResponse.hasIsElectable());
- ASSERT_EQUALS(false, hbResponse.hasTime());
- ASSERT_EQUALS(false, hbResponse.hasOpTime());
- ASSERT_EQUALS(false, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(false, hbResponse.isReplSet());
- ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
- ASSERT_EQUALS("", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
- ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
- ASSERT_EQUALS(1, hbResponseObj["v"].Number());
- ASSERT_EQUALS(Timestamp(10,0), hbResponseObj["electionTime"].timestamp());
-
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toString());
-
- // set opTime
- hbResponse.setOpTime(OpTime(Timestamp(10), 0));
- ++fieldsSet;
- ASSERT_EQUALS(false, hbResponse.hasState());
- ASSERT_EQUALS(true, hbResponse.hasElectionTime());
- ASSERT_EQUALS(false, hbResponse.hasIsElectable());
- ASSERT_EQUALS(false, hbResponse.hasTime());
- ASSERT_EQUALS(true, hbResponse.hasOpTime());
- ASSERT_EQUALS(false, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(false, hbResponse.isReplSet());
- ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
- ASSERT_EQUALS("", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
- ASSERT_EQUALS(OpTime(Timestamp(0,10), 0), hbResponse.getOpTime());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
- ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
- ASSERT_EQUALS(1, hbResponseObj["v"].Number());
- ASSERT_EQUALS(Timestamp(10,0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0,10), hbResponseObj["opTime"].timestamp());
-
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(),
- hbResponseObjRoundTripChecker.toBSON(false).toString());
-
- // set time
- hbResponse.setTime(Seconds(10));
- ++fieldsSet;
- ASSERT_EQUALS(false, hbResponse.hasState());
- ASSERT_EQUALS(true, hbResponse.hasElectionTime());
- ASSERT_EQUALS(false, hbResponse.hasIsElectable());
- ASSERT_EQUALS(true, hbResponse.hasTime());
- ASSERT_EQUALS(true, hbResponse.hasOpTime());
- ASSERT_EQUALS(false, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(false, hbResponse.isReplSet());
- ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
- ASSERT_EQUALS("", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
- ASSERT_EQUALS(OpTime(Timestamp(0,10), 0), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
- ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
- ASSERT_EQUALS(1, hbResponseObj["v"].Number());
- ASSERT_EQUALS(Timestamp(10,0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0,10), hbResponseObj["opTime"].timestamp());
- ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
-
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(),
- hbResponseObjRoundTripChecker.toBSON(false).toString());
-
- // set electable
- hbResponse.setElectable(true);
- ++fieldsSet;
- ASSERT_EQUALS(false, hbResponse.hasState());
- ASSERT_EQUALS(true, hbResponse.hasElectionTime());
- ASSERT_EQUALS(true, hbResponse.hasIsElectable());
- ASSERT_EQUALS(true, hbResponse.hasTime());
- ASSERT_EQUALS(true, hbResponse.hasOpTime());
- ASSERT_EQUALS(false, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(false, hbResponse.isReplSet());
- ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
- ASSERT_EQUALS("", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
- ASSERT_EQUALS(OpTime(Timestamp(0,10), 0), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
- ASSERT_EQUALS(true, hbResponse.isElectable());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
- ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
- ASSERT_EQUALS(1, hbResponseObj["v"].Number());
- ASSERT_EQUALS(Timestamp(10,0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0,10), hbResponseObj["opTime"].timestamp());
- ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
- ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
-
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(),
- hbResponseObjRoundTripChecker.toBSON(false).toString());
-
- // set config
- ReplicaSetConfig config;
- hbResponse.setConfig(config);
- ++fieldsSet;
- ASSERT_EQUALS(false, hbResponse.hasState());
- ASSERT_EQUALS(true, hbResponse.hasElectionTime());
- ASSERT_EQUALS(true, hbResponse.hasIsElectable());
- ASSERT_EQUALS(true, hbResponse.hasTime());
- ASSERT_EQUALS(true, hbResponse.hasOpTime());
- ASSERT_EQUALS(true, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(false, hbResponse.isReplSet());
- ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
- ASSERT_EQUALS("", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
- ASSERT_EQUALS(OpTime(Timestamp(0,10), 0), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
- ASSERT_EQUALS(true, hbResponse.isElectable());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
- ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
- ASSERT_EQUALS(1, hbResponseObj["v"].Number());
- ASSERT_EQUALS(Timestamp(10,0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0,10), hbResponseObj["opTime"].timestamp());
- ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
- ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
-
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(),
- hbResponseObjRoundTripChecker.toBSON(false).toString());
-
- // set state
- hbResponse.setState(MemberState(MemberState::RS_SECONDARY));
- ++fieldsSet;
- ASSERT_EQUALS(true, hbResponse.hasState());
- ASSERT_EQUALS(true, hbResponse.hasElectionTime());
- ASSERT_EQUALS(true, hbResponse.hasIsElectable());
- ASSERT_EQUALS(true, hbResponse.hasTime());
- ASSERT_EQUALS(true, hbResponse.hasOpTime());
- ASSERT_EQUALS(true, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(false, hbResponse.isReplSet());
- ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
- ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
- hbResponse.getState().toString());
- ASSERT_EQUALS("", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
- ASSERT_EQUALS(OpTime(Timestamp(0,10), 0), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
- ASSERT_EQUALS(true, hbResponse.isElectable());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
- ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
- ASSERT_EQUALS(1, hbResponseObj["v"].Number());
- ASSERT_EQUALS(Timestamp(10,0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0,10), hbResponseObj["opTime"].timestamp());
- ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
- ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
- ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
-
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(),
- hbResponseObjRoundTripChecker.toBSON(false).toString());
-
- // set stateDisagreement
- hbResponse.noteStateDisagreement();
- ++fieldsSet;
- ASSERT_EQUALS(true, hbResponse.hasState());
- ASSERT_EQUALS(true, hbResponse.hasElectionTime());
- ASSERT_EQUALS(true, hbResponse.hasIsElectable());
- ASSERT_EQUALS(true, hbResponse.hasTime());
- ASSERT_EQUALS(true, hbResponse.hasOpTime());
- ASSERT_EQUALS(true, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(false, hbResponse.isReplSet());
- ASSERT_EQUALS(true, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
- ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
- hbResponse.getState().toString());
- ASSERT_EQUALS("", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
- ASSERT_EQUALS(OpTime(Timestamp(0,10), 0), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
- ASSERT_EQUALS(true, hbResponse.isElectable());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
- ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
- ASSERT_EQUALS(1, hbResponseObj["v"].Number());
- ASSERT_EQUALS(Timestamp(10,0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0,10), hbResponseObj["opTime"].timestamp());
- ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
- ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
- ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
- ASSERT_EQUALS(false, hbResponseObj["mismatch"].trueValue());
- ASSERT_EQUALS(true, hbResponseObj["stateDisagreement"].trueValue());
-
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(),
- hbResponseObjRoundTripChecker.toBSON(false).toString());
-
- // set replSet
- hbResponse.noteReplSet();
- ++fieldsSet;
- ASSERT_EQUALS(true, hbResponse.hasState());
- ASSERT_EQUALS(true, hbResponse.hasElectionTime());
- ASSERT_EQUALS(true, hbResponse.hasIsElectable());
- ASSERT_EQUALS(true, hbResponse.hasTime());
- ASSERT_EQUALS(true, hbResponse.hasOpTime());
- ASSERT_EQUALS(true, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(true, hbResponse.isReplSet());
- ASSERT_EQUALS(true, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
- ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
- hbResponse.getState().toString());
- ASSERT_EQUALS("", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
- ASSERT_EQUALS(OpTime(Timestamp(0,10), 0), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
- ASSERT_EQUALS(true, hbResponse.isElectable());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
- ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
- ASSERT_EQUALS(1, hbResponseObj["v"].Number());
- ASSERT_EQUALS(Timestamp(10,0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0,10), hbResponseObj["opTime"].timestamp());
- ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
- ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
- ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
- ASSERT_EQUALS(false, hbResponseObj["mismatch"].trueValue());
- ASSERT_EQUALS(true, hbResponseObj["stateDisagreement"].trueValue());
- ASSERT_EQUALS(true, hbResponseObj["rs"].trueValue());
-
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(),
- hbResponseObjRoundTripChecker.toBSON(false).toString());
-
- // set syncingTo
- hbResponse.setSyncingTo(HostAndPort("syncTarget"));
- ++fieldsSet;
- ASSERT_EQUALS(true, hbResponse.hasState());
- ASSERT_EQUALS(true, hbResponse.hasElectionTime());
- ASSERT_EQUALS(true, hbResponse.hasIsElectable());
- ASSERT_EQUALS(true, hbResponse.hasTime());
- ASSERT_EQUALS(true, hbResponse.hasOpTime());
- ASSERT_EQUALS(true, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(true, hbResponse.isReplSet());
- ASSERT_EQUALS(true, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
- ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
- hbResponse.getState().toString());
- ASSERT_EQUALS("", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort("syncTarget"), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
- ASSERT_EQUALS(OpTime(Timestamp(0,10), 0), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
- ASSERT_EQUALS(true, hbResponse.isElectable());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
- ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
- ASSERT_EQUALS(1, hbResponseObj["v"].Number());
- ASSERT_EQUALS(Timestamp(10,0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0,10), hbResponseObj["opTime"].timestamp());
- ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
- ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
- ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
- ASSERT_EQUALS(false, hbResponseObj["mismatch"].trueValue());
- ASSERT_EQUALS(true, hbResponseObj["stateDisagreement"].trueValue());
- ASSERT_EQUALS(true, hbResponseObj["rs"].trueValue());
- ASSERT_EQUALS("syncTarget:27017", hbResponseObj["syncingTo"].String());
-
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(),
- hbResponseObjRoundTripChecker.toBSON(false).toString());
-
- // set hbmsg
- hbResponse.setHbMsg("lub dub");
- ASSERT_EQUALS(true, hbResponse.hasState());
- ASSERT_EQUALS(true, hbResponse.hasElectionTime());
- ASSERT_EQUALS(true, hbResponse.hasIsElectable());
- ASSERT_EQUALS(true, hbResponse.hasTime());
- ASSERT_EQUALS(true, hbResponse.hasOpTime());
- ASSERT_EQUALS(true, hbResponse.hasConfig());
- ASSERT_EQUALS(false, hbResponse.isMismatched());
- ASSERT_EQUALS(true, hbResponse.isReplSet());
- ASSERT_EQUALS(true, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
- ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
- hbResponse.getState().toString());
- ASSERT_EQUALS("lub dub", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort("syncTarget"), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
- ASSERT_EQUALS(OpTime(Timestamp(0,10), 0), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
- ASSERT_EQUALS(true, hbResponse.isElectable());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
- ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
- ASSERT_EQUALS("lub dub", hbResponseObj["hbmsg"].String());
- ASSERT_EQUALS(1, hbResponseObj["v"].Number());
- ASSERT_EQUALS(Timestamp(10,0), hbResponseObj["electionTime"].timestamp());
- ASSERT_EQUALS(Timestamp(0,10), hbResponseObj["opTime"].timestamp());
- ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
- ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
- ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
- ASSERT_EQUALS(false, hbResponseObj["mismatch"].trueValue());
- ASSERT_EQUALS(true, hbResponseObj["stateDisagreement"].trueValue());
- ASSERT_EQUALS(true, hbResponseObj["rs"].trueValue());
- ASSERT_EQUALS("syncTarget:27017", hbResponseObj["syncingTo"].String());
-
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(hbResponseObj.toString(),
- hbResponseObjRoundTripChecker.toBSON(false).toString());
-
- // set mismatched
- hbResponse.noteMismatched();
- ASSERT_EQUALS(true, hbResponse.hasState());
- ASSERT_EQUALS(true, hbResponse.hasElectionTime());
- ASSERT_EQUALS(true, hbResponse.hasIsElectable());
- ASSERT_EQUALS(true, hbResponse.hasTime());
- ASSERT_EQUALS(true, hbResponse.hasOpTime());
- ASSERT_EQUALS(true, hbResponse.hasConfig());
- ASSERT_EQUALS(true, hbResponse.isMismatched());
- ASSERT_EQUALS(true, hbResponse.isReplSet());
- ASSERT_EQUALS(true, hbResponse.isStateDisagreement());
- ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
- ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
- hbResponse.getState().toString());
- ASSERT_EQUALS("lub dub", hbResponse.getHbMsg());
- ASSERT_EQUALS(HostAndPort("syncTarget"), hbResponse.getSyncingTo());
- ASSERT_EQUALS(1, hbResponse.getConfigVersion());
- ASSERT_EQUALS(Timestamp(10,0), hbResponse.getElectionTime());
- ASSERT_EQUALS(OpTime(Timestamp(0,10), 0), hbResponse.getOpTime());
- ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
- ASSERT_EQUALS(true, hbResponse.isElectable());
- ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
-
- hbResponseObj = hbResponse.toBSON(false);
- ASSERT_EQUALS(2, hbResponseObj.nFields());
- ASSERT_EQUALS(true, hbResponseObj["mismatch"].trueValue());
-
- // NOTE: Does not check round-trip. Once noteMismached is set the bson will return an error
- // from initialize parsing.
- initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
- ASSERT_NOT_EQUALS(Status::OK(), initializeResult);
- ASSERT_EQUALS(ErrorCodes::InconsistentReplicaSetNames, initializeResult.code());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeWrongElectionTimeType) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 1.0 << "electionTime" << "hello");
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
- ASSERT_EQUALS("Expected \"electionTime\" field in response to replSetHeartbeat command to "
- "have type Date or Timestamp, but found type String",
- result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeWrongTimeType) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 1.0 << "time" << "hello");
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
- ASSERT_EQUALS("Expected \"time\" field in response to replSetHeartbeat command to "
- "have a numeric type, but found type String",
- result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeWrongOpTimeType) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 1.0 << "opTime" << "hello");
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
- ASSERT_EQUALS("Expected \"opTime\" field in response to replSetHeartbeat command to "
- "have type Date or Timestamp, but found type String",
- result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeMemberStateWrongType) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 1.0 << "state" << "hello");
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
- ASSERT_EQUALS("Expected \"state\" field in response to replSetHeartbeat command to "
- "have type NumberInt or NumberLong, but found type String",
- result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeMemberStateTooLow) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 1.0 << "state" << -1);
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::BadValue, result);
- ASSERT_EQUALS("Value for \"state\" in response to replSetHeartbeat is out of range; "
- "legal values are non-negative and no more than 10",
- result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeMemberStateTooHigh) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 1.0 << "state" << 11);
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::BadValue, result);
- ASSERT_EQUALS("Value for \"state\" in response to replSetHeartbeat is out of range; "
- "legal values are non-negative and no more than 10",
- result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeVersionWrongType) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 1.0 <<
- "v" << "hello");
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
- ASSERT_EQUALS("Expected \"v\" field in response to replSetHeartbeat to "
- "have type NumberInt, but found String",
- result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeReplSetNameWrongType) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 1.0 <<
- "v" << 2 << // needs a version to get this far in initialize()
- "set" << 4);
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
- ASSERT_EQUALS("Expected \"set\" field in response to replSetHeartbeat to "
- "have type String, but found NumberInt32",
- result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeHeartbeatMeessageWrongType) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 1.0 <<
- "v" << 2 << // needs a version to get this far in initialize()
- "hbmsg" << 4);
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
- ASSERT_EQUALS("Expected \"hbmsg\" field in response to replSetHeartbeat to "
- "have type String, but found NumberInt32",
- result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeSyncingToWrongType) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 1.0 <<
- "v" << 2 << // needs a version to get this far in initialize()
- "syncingTo" << 4);
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
- ASSERT_EQUALS("Expected \"syncingTo\" field in response to replSetHeartbeat to "
- "have type String, but found NumberInt32",
- result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeConfigWrongType) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 1.0 <<
- "v" << 2 << // needs a version to get this far in initialize()
- "config" << 4);
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
- ASSERT_EQUALS("Expected \"config\" in response to replSetHeartbeat to "
- "have type Object, but found NumberInt32",
- result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeBadConfig) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 1.0 <<
- "v" << 2 << // needs a version to get this far in initialize()
- "config" << BSON("illegalFieldName" << 2));
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::BadValue, result);
- ASSERT_EQUALS("Unexpected field illegalFieldName in replica set configuration",
- result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeBothElectionTimeTypesSameResult) {
- ReplSetHeartbeatResponse hbResponseDate;
- ReplSetHeartbeatResponse hbResponseTimestamp;
- BSONObjBuilder initializerDate;
- BSONObjBuilder initializerTimestamp;
- Date_t electionTime = Date_t::fromMillisSinceEpoch(974132);
-
- initializerDate.append("ok", 1.0);
- initializerDate.append("v", 1);
- initializerDate.appendDate("electionTime", electionTime);
- Status result = hbResponseDate.initialize(initializerDate.obj(), 0);
- ASSERT_EQUALS(Status::OK(), result);
-
- initializerTimestamp.append("ok", 1.0);
- initializerTimestamp.append("v", 1);
- initializerTimestamp.appendTimestamp("electionTime", electionTime.toULL());
- result = hbResponseTimestamp.initialize(initializerTimestamp.obj(), 0);
- ASSERT_EQUALS(Status::OK(), result);
-
- ASSERT_EQUALS(hbResponseTimestamp.getElectionTime(), hbResponseTimestamp.getElectionTime());
- }
-
- TEST(ReplSetHeartbeatResponse, InitializeBothOpTimeTypesSameResult) {
- ReplSetHeartbeatResponse hbResponseDate;
- ReplSetHeartbeatResponse hbResponseTimestamp;
- BSONObjBuilder initializerDate;
- BSONObjBuilder initializerTimestamp;
- Date_t opTime = Date_t::fromMillisSinceEpoch(974132);
-
- initializerDate.append("ok", 1.0);
- initializerDate.append("v", 1);
- initializerDate.appendDate("opTime", opTime);
- Status result = hbResponseDate.initialize(initializerDate.obj(), 0);
- ASSERT_EQUALS(Status::OK(), result);
-
- initializerTimestamp.append("ok", 1.0);
- initializerTimestamp.append("v", 1);
- initializerTimestamp.appendTimestamp("opTime", opTime.toULL());
- result = hbResponseTimestamp.initialize(initializerTimestamp.obj(), 0);
- ASSERT_EQUALS(Status::OK(), result);
-
- ASSERT_EQUALS(hbResponseTimestamp.getOpTime(), hbResponseTimestamp.getOpTime());
- }
-
- TEST(ReplSetHeartbeatResponse, NoConfigStillInitializing) {
- ReplSetHeartbeatResponse hbResp;
- std::string msg = "still initializing";
- Status result = hbResp.initialize(BSON("ok" << 1.0 <<
- "rs" << true <<
- "hbmsg" << msg), 0);
- ASSERT_EQUALS(Status::OK(), result);
- ASSERT_EQUALS(true, hbResp.isReplSet());
- ASSERT_EQUALS(msg, hbResp.getHbMsg());
- }
-
- TEST(ReplSetHeartbeatResponse, InvalidResponseOpTimeMissesConfigVersion) {
- ReplSetHeartbeatResponse hbResp;
- std::string msg = "still initializing";
- Status result = hbResp.initialize(BSON("ok" << 1.0 <<
- "opTime" << Timestamp()), 0);
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.code());
- ASSERT_TRUE(stringContains(result.reason(), "\"v\""))
- << result.reason() << " doesn't contain 'v' field required error msg";
- }
-
- TEST(ReplSetHeartbeatResponse, MismatchedRepliSetNames) {
- ReplSetHeartbeatResponse hbResponse;
- BSONObj initializerObj = BSON("ok" << 0.0 << "mismatch" << true);
- Status result = hbResponse.initialize(initializerObj, 0);
- ASSERT_EQUALS(ErrorCodes::InconsistentReplicaSetNames, result.code());
- }
-
- TEST(ReplSetHeartbeatResponse, AuthFailure) {
- ReplSetHeartbeatResponse hbResp;
- std::string errMsg = "Unauthorized";
- Status result = hbResp.initialize(BSON("ok" << 0.0 <<
- "errmsg" << errMsg <<
- "code" << ErrorCodes::Unauthorized), 0);
- ASSERT_EQUALS(ErrorCodes::Unauthorized, result.code());
- ASSERT_EQUALS(errMsg, result.reason());
- }
-
- TEST(ReplSetHeartbeatResponse, ServerError) {
- ReplSetHeartbeatResponse hbResp;
- std::string errMsg = "Random Error";
- Status result = hbResp.initialize(BSON("ok" << 0.0 << "errmsg" << errMsg ), 0);
- ASSERT_EQUALS(ErrorCodes::UnknownError, result.code());
- ASSERT_EQUALS(errMsg, result.reason());
- }
+using std::unique_ptr;
+
+bool stringContains(const std::string& haystack, const std::string& needle) {
+ return haystack.find(needle) != std::string::npos;
+}
+
+TEST(ReplSetHeartbeatResponse, DefaultConstructThenSlowlyBuildToFullObj) {
+ int fieldsSet = 2;
+ ReplSetHeartbeatResponse hbResponse;
+ ReplSetHeartbeatResponse hbResponseObjRoundTripChecker;
+ ASSERT_EQUALS(false, hbResponse.hasState());
+ ASSERT_EQUALS(false, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(false, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(false, hbResponse.hasTime());
+ ASSERT_EQUALS(false, hbResponse.hasOpTime());
+ ASSERT_EQUALS(false, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(false, hbResponse.isReplSet());
+ ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS("", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(-1, hbResponse.getConfigVersion());
+
+ BSONObj hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
+
+ Status initializeResult = Status::OK();
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toString());
+
+ // set version
+ hbResponse.setConfigVersion(1);
+ ++fieldsSet;
+ ASSERT_EQUALS(false, hbResponse.hasState());
+ ASSERT_EQUALS(false, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(false, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(false, hbResponse.hasTime());
+ ASSERT_EQUALS(false, hbResponse.hasOpTime());
+ ASSERT_EQUALS(false, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(false, hbResponse.isReplSet());
+ ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS("", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
+ ASSERT_EQUALS(1, hbResponseObj["v"].Number());
+
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toString());
+
+ // set setname
+ hbResponse.setSetName("rs0");
+ ++fieldsSet;
+ ASSERT_EQUALS(false, hbResponse.hasState());
+ ASSERT_EQUALS(false, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(false, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(false, hbResponse.hasTime());
+ ASSERT_EQUALS(false, hbResponse.hasOpTime());
+ ASSERT_EQUALS(false, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(false, hbResponse.isReplSet());
+ ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS("", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
+ ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
+ ASSERT_EQUALS(1, hbResponseObj["v"].Number());
+
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toString());
+
+ // set electionTime
+ hbResponse.setElectionTime(Timestamp(10, 0));
+ ++fieldsSet;
+ ASSERT_EQUALS(false, hbResponse.hasState());
+ ASSERT_EQUALS(true, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(false, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(false, hbResponse.hasTime());
+ ASSERT_EQUALS(false, hbResponse.hasOpTime());
+ ASSERT_EQUALS(false, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(false, hbResponse.isReplSet());
+ ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS("", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
+ ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
+ ASSERT_EQUALS(1, hbResponseObj["v"].Number());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
+
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toString());
+
+ // set opTime
+ hbResponse.setOpTime(OpTime(Timestamp(10), 0));
+ ++fieldsSet;
+ ASSERT_EQUALS(false, hbResponse.hasState());
+ ASSERT_EQUALS(true, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(false, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(false, hbResponse.hasTime());
+ ASSERT_EQUALS(true, hbResponse.hasOpTime());
+ ASSERT_EQUALS(false, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(false, hbResponse.isReplSet());
+ ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS("", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
+ ASSERT_EQUALS(OpTime(Timestamp(0, 10), 0), hbResponse.getOpTime());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
+ ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
+ ASSERT_EQUALS(1, hbResponseObj["v"].Number());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["opTime"].timestamp());
+
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+
+ // set time
+ hbResponse.setTime(Seconds(10));
+ ++fieldsSet;
+ ASSERT_EQUALS(false, hbResponse.hasState());
+ ASSERT_EQUALS(true, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(false, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(true, hbResponse.hasTime());
+ ASSERT_EQUALS(true, hbResponse.hasOpTime());
+ ASSERT_EQUALS(false, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(false, hbResponse.isReplSet());
+ ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS("", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
+ ASSERT_EQUALS(OpTime(Timestamp(0, 10), 0), hbResponse.getOpTime());
+ ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
+ ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
+ ASSERT_EQUALS(1, hbResponseObj["v"].Number());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
+
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+
+ // set electable
+ hbResponse.setElectable(true);
+ ++fieldsSet;
+ ASSERT_EQUALS(false, hbResponse.hasState());
+ ASSERT_EQUALS(true, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(true, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(true, hbResponse.hasTime());
+ ASSERT_EQUALS(true, hbResponse.hasOpTime());
+ ASSERT_EQUALS(false, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(false, hbResponse.isReplSet());
+ ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS("", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
+ ASSERT_EQUALS(OpTime(Timestamp(0, 10), 0), hbResponse.getOpTime());
+ ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(true, hbResponse.isElectable());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
+ ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
+ ASSERT_EQUALS(1, hbResponseObj["v"].Number());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
+ ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
+
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+
+ // set config
+ ReplicaSetConfig config;
+ hbResponse.setConfig(config);
+ ++fieldsSet;
+ ASSERT_EQUALS(false, hbResponse.hasState());
+ ASSERT_EQUALS(true, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(true, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(true, hbResponse.hasTime());
+ ASSERT_EQUALS(true, hbResponse.hasOpTime());
+ ASSERT_EQUALS(true, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(false, hbResponse.isReplSet());
+ ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS("", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
+ ASSERT_EQUALS(OpTime(Timestamp(0, 10), 0), hbResponse.getOpTime());
+ ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(true, hbResponse.isElectable());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
+ ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
+ ASSERT_EQUALS(1, hbResponseObj["v"].Number());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
+ ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
+
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+
+ // set state
+ hbResponse.setState(MemberState(MemberState::RS_SECONDARY));
+ ++fieldsSet;
+ ASSERT_EQUALS(true, hbResponse.hasState());
+ ASSERT_EQUALS(true, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(true, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(true, hbResponse.hasTime());
+ ASSERT_EQUALS(true, hbResponse.hasOpTime());
+ ASSERT_EQUALS(true, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(false, hbResponse.isReplSet());
+ ASSERT_EQUALS(false, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
+ hbResponse.getState().toString());
+ ASSERT_EQUALS("", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
+ ASSERT_EQUALS(OpTime(Timestamp(0, 10), 0), hbResponse.getOpTime());
+ ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(true, hbResponse.isElectable());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
+ ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
+ ASSERT_EQUALS(1, hbResponseObj["v"].Number());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
+ ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
+ ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
+
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+
+ // set stateDisagreement
+ hbResponse.noteStateDisagreement();
+ ++fieldsSet;
+ ASSERT_EQUALS(true, hbResponse.hasState());
+ ASSERT_EQUALS(true, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(true, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(true, hbResponse.hasTime());
+ ASSERT_EQUALS(true, hbResponse.hasOpTime());
+ ASSERT_EQUALS(true, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(false, hbResponse.isReplSet());
+ ASSERT_EQUALS(true, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
+ hbResponse.getState().toString());
+ ASSERT_EQUALS("", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
+ ASSERT_EQUALS(OpTime(Timestamp(0, 10), 0), hbResponse.getOpTime());
+ ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(true, hbResponse.isElectable());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
+ ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
+ ASSERT_EQUALS(1, hbResponseObj["v"].Number());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
+ ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
+ ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
+ ASSERT_EQUALS(false, hbResponseObj["mismatch"].trueValue());
+ ASSERT_EQUALS(true, hbResponseObj["stateDisagreement"].trueValue());
+
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+
+ // set replSet
+ hbResponse.noteReplSet();
+ ++fieldsSet;
+ ASSERT_EQUALS(true, hbResponse.hasState());
+ ASSERT_EQUALS(true, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(true, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(true, hbResponse.hasTime());
+ ASSERT_EQUALS(true, hbResponse.hasOpTime());
+ ASSERT_EQUALS(true, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(true, hbResponse.isReplSet());
+ ASSERT_EQUALS(true, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
+ hbResponse.getState().toString());
+ ASSERT_EQUALS("", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort(), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
+ ASSERT_EQUALS(OpTime(Timestamp(0, 10), 0), hbResponse.getOpTime());
+ ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(true, hbResponse.isElectable());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
+ ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
+ ASSERT_EQUALS(1, hbResponseObj["v"].Number());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
+ ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
+ ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
+ ASSERT_EQUALS(false, hbResponseObj["mismatch"].trueValue());
+ ASSERT_EQUALS(true, hbResponseObj["stateDisagreement"].trueValue());
+ ASSERT_EQUALS(true, hbResponseObj["rs"].trueValue());
+
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+
+ // set syncingTo
+ hbResponse.setSyncingTo(HostAndPort("syncTarget"));
+ ++fieldsSet;
+ ASSERT_EQUALS(true, hbResponse.hasState());
+ ASSERT_EQUALS(true, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(true, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(true, hbResponse.hasTime());
+ ASSERT_EQUALS(true, hbResponse.hasOpTime());
+ ASSERT_EQUALS(true, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(true, hbResponse.isReplSet());
+ ASSERT_EQUALS(true, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
+ hbResponse.getState().toString());
+ ASSERT_EQUALS("", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort("syncTarget"), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
+ ASSERT_EQUALS(OpTime(Timestamp(0, 10), 0), hbResponse.getOpTime());
+ ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(true, hbResponse.isElectable());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
+ ASSERT_EQUALS("", hbResponseObj["hbmsg"].String());
+ ASSERT_EQUALS(1, hbResponseObj["v"].Number());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
+ ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
+ ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
+ ASSERT_EQUALS(false, hbResponseObj["mismatch"].trueValue());
+ ASSERT_EQUALS(true, hbResponseObj["stateDisagreement"].trueValue());
+ ASSERT_EQUALS(true, hbResponseObj["rs"].trueValue());
+ ASSERT_EQUALS("syncTarget:27017", hbResponseObj["syncingTo"].String());
+
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+
+ // set hbmsg
+ hbResponse.setHbMsg("lub dub");
+ ASSERT_EQUALS(true, hbResponse.hasState());
+ ASSERT_EQUALS(true, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(true, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(true, hbResponse.hasTime());
+ ASSERT_EQUALS(true, hbResponse.hasOpTime());
+ ASSERT_EQUALS(true, hbResponse.hasConfig());
+ ASSERT_EQUALS(false, hbResponse.isMismatched());
+ ASSERT_EQUALS(true, hbResponse.isReplSet());
+ ASSERT_EQUALS(true, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
+ hbResponse.getState().toString());
+ ASSERT_EQUALS("lub dub", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort("syncTarget"), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
+ ASSERT_EQUALS(OpTime(Timestamp(0, 10), 0), hbResponse.getOpTime());
+ ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(true, hbResponse.isElectable());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(fieldsSet, hbResponseObj.nFields());
+ ASSERT_EQUALS("rs0", hbResponseObj["set"].String());
+ ASSERT_EQUALS("lub dub", hbResponseObj["hbmsg"].String());
+ ASSERT_EQUALS(1, hbResponseObj["v"].Number());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponseObj["electionTime"].timestamp());
+ ASSERT_EQUALS(Timestamp(0, 10), hbResponseObj["opTime"].timestamp());
+ ASSERT_EQUALS(10, hbResponseObj["time"].numberLong());
+ ASSERT_EQUALS(true, hbResponseObj["e"].trueValue());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponseObj["config"].Obj().toString());
+ ASSERT_EQUALS(2, hbResponseObj["state"].numberLong());
+ ASSERT_EQUALS(false, hbResponseObj["mismatch"].trueValue());
+ ASSERT_EQUALS(true, hbResponseObj["stateDisagreement"].trueValue());
+ ASSERT_EQUALS(true, hbResponseObj["rs"].trueValue());
+ ASSERT_EQUALS("syncTarget:27017", hbResponseObj["syncingTo"].String());
+
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(hbResponseObj.toString(), hbResponseObjRoundTripChecker.toBSON(false).toString());
+
+ // set mismatched
+ hbResponse.noteMismatched();
+ ASSERT_EQUALS(true, hbResponse.hasState());
+ ASSERT_EQUALS(true, hbResponse.hasElectionTime());
+ ASSERT_EQUALS(true, hbResponse.hasIsElectable());
+ ASSERT_EQUALS(true, hbResponse.hasTime());
+ ASSERT_EQUALS(true, hbResponse.hasOpTime());
+ ASSERT_EQUALS(true, hbResponse.hasConfig());
+ ASSERT_EQUALS(true, hbResponse.isMismatched());
+ ASSERT_EQUALS(true, hbResponse.isReplSet());
+ ASSERT_EQUALS(true, hbResponse.isStateDisagreement());
+ ASSERT_EQUALS("rs0", hbResponse.getReplicaSetName());
+ ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
+ hbResponse.getState().toString());
+ ASSERT_EQUALS("lub dub", hbResponse.getHbMsg());
+ ASSERT_EQUALS(HostAndPort("syncTarget"), hbResponse.getSyncingTo());
+ ASSERT_EQUALS(1, hbResponse.getConfigVersion());
+ ASSERT_EQUALS(Timestamp(10, 0), hbResponse.getElectionTime());
+ ASSERT_EQUALS(OpTime(Timestamp(0, 10), 0), hbResponse.getOpTime());
+ ASSERT_EQUALS(Seconds(10), hbResponse.getTime());
+ ASSERT_EQUALS(true, hbResponse.isElectable());
+ ASSERT_EQUALS(config.toBSON().toString(), hbResponse.getConfig().toBSON().toString());
+
+ hbResponseObj = hbResponse.toBSON(false);
+ ASSERT_EQUALS(2, hbResponseObj.nFields());
+ ASSERT_EQUALS(true, hbResponseObj["mismatch"].trueValue());
+
+ // NOTE: Does not check round-trip. Once noteMismached is set the bson will return an error
+ // from initialize parsing.
+ initializeResult = hbResponseObjRoundTripChecker.initialize(hbResponseObj, 0);
+ ASSERT_NOT_EQUALS(Status::OK(), initializeResult);
+ ASSERT_EQUALS(ErrorCodes::InconsistentReplicaSetNames, initializeResult.code());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeWrongElectionTimeType) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj = BSON("ok" << 1.0 << "electionTime"
+ << "hello");
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
+ ASSERT_EQUALS(
+ "Expected \"electionTime\" field in response to replSetHeartbeat command to "
+ "have type Date or Timestamp, but found type String",
+ result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeWrongTimeType) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj = BSON("ok" << 1.0 << "time"
+ << "hello");
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
+ ASSERT_EQUALS(
+ "Expected \"time\" field in response to replSetHeartbeat command to "
+ "have a numeric type, but found type String",
+ result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeWrongOpTimeType) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj = BSON("ok" << 1.0 << "opTime"
+ << "hello");
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
+ ASSERT_EQUALS(
+ "Expected \"opTime\" field in response to replSetHeartbeat command to "
+ "have type Date or Timestamp, but found type String",
+ result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeMemberStateWrongType) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj = BSON("ok" << 1.0 << "state"
+ << "hello");
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
+ ASSERT_EQUALS(
+ "Expected \"state\" field in response to replSetHeartbeat command to "
+ "have type NumberInt or NumberLong, but found type String",
+ result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeMemberStateTooLow) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj = BSON("ok" << 1.0 << "state" << -1);
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::BadValue, result);
+ ASSERT_EQUALS(
+ "Value for \"state\" in response to replSetHeartbeat is out of range; "
+ "legal values are non-negative and no more than 10",
+ result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeMemberStateTooHigh) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj = BSON("ok" << 1.0 << "state" << 11);
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::BadValue, result);
+ ASSERT_EQUALS(
+ "Value for \"state\" in response to replSetHeartbeat is out of range; "
+ "legal values are non-negative and no more than 10",
+ result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeVersionWrongType) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj = BSON("ok" << 1.0 << "v"
+ << "hello");
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
+ ASSERT_EQUALS(
+ "Expected \"v\" field in response to replSetHeartbeat to "
+ "have type NumberInt, but found String",
+ result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeReplSetNameWrongType) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
+ "set" << 4);
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
+ ASSERT_EQUALS(
+ "Expected \"set\" field in response to replSetHeartbeat to "
+ "have type String, but found NumberInt32",
+ result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeHeartbeatMeessageWrongType) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
+ "hbmsg" << 4);
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
+ ASSERT_EQUALS(
+ "Expected \"hbmsg\" field in response to replSetHeartbeat to "
+ "have type String, but found NumberInt32",
+ result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeSyncingToWrongType) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
+ "syncingTo" << 4);
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
+ ASSERT_EQUALS(
+ "Expected \"syncingTo\" field in response to replSetHeartbeat to "
+ "have type String, but found NumberInt32",
+ result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeConfigWrongType) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
+ "config" << 4);
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
+ ASSERT_EQUALS(
+ "Expected \"config\" in response to replSetHeartbeat to "
+ "have type Object, but found NumberInt32",
+ result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeBadConfig) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj =
+ BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
+ "config" << BSON("illegalFieldName" << 2));
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::BadValue, result);
+ ASSERT_EQUALS("Unexpected field illegalFieldName in replica set configuration",
+ result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeBothElectionTimeTypesSameResult) {
+ ReplSetHeartbeatResponse hbResponseDate;
+ ReplSetHeartbeatResponse hbResponseTimestamp;
+ BSONObjBuilder initializerDate;
+ BSONObjBuilder initializerTimestamp;
+ Date_t electionTime = Date_t::fromMillisSinceEpoch(974132);
+
+ initializerDate.append("ok", 1.0);
+ initializerDate.append("v", 1);
+ initializerDate.appendDate("electionTime", electionTime);
+ Status result = hbResponseDate.initialize(initializerDate.obj(), 0);
+ ASSERT_EQUALS(Status::OK(), result);
+
+ initializerTimestamp.append("ok", 1.0);
+ initializerTimestamp.append("v", 1);
+ initializerTimestamp.appendTimestamp("electionTime", electionTime.toULL());
+ result = hbResponseTimestamp.initialize(initializerTimestamp.obj(), 0);
+ ASSERT_EQUALS(Status::OK(), result);
+
+ ASSERT_EQUALS(hbResponseTimestamp.getElectionTime(), hbResponseTimestamp.getElectionTime());
+}
+
+TEST(ReplSetHeartbeatResponse, InitializeBothOpTimeTypesSameResult) {
+ ReplSetHeartbeatResponse hbResponseDate;
+ ReplSetHeartbeatResponse hbResponseTimestamp;
+ BSONObjBuilder initializerDate;
+ BSONObjBuilder initializerTimestamp;
+ Date_t opTime = Date_t::fromMillisSinceEpoch(974132);
+
+ initializerDate.append("ok", 1.0);
+ initializerDate.append("v", 1);
+ initializerDate.appendDate("opTime", opTime);
+ Status result = hbResponseDate.initialize(initializerDate.obj(), 0);
+ ASSERT_EQUALS(Status::OK(), result);
+
+ initializerTimestamp.append("ok", 1.0);
+ initializerTimestamp.append("v", 1);
+ initializerTimestamp.appendTimestamp("opTime", opTime.toULL());
+ result = hbResponseTimestamp.initialize(initializerTimestamp.obj(), 0);
+ ASSERT_EQUALS(Status::OK(), result);
+
+ ASSERT_EQUALS(hbResponseTimestamp.getOpTime(), hbResponseTimestamp.getOpTime());
+}
+
+TEST(ReplSetHeartbeatResponse, NoConfigStillInitializing) {
+ ReplSetHeartbeatResponse hbResp;
+ std::string msg = "still initializing";
+ Status result = hbResp.initialize(BSON("ok" << 1.0 << "rs" << true << "hbmsg" << msg), 0);
+ ASSERT_EQUALS(Status::OK(), result);
+ ASSERT_EQUALS(true, hbResp.isReplSet());
+ ASSERT_EQUALS(msg, hbResp.getHbMsg());
+}
+
+TEST(ReplSetHeartbeatResponse, InvalidResponseOpTimeMissesConfigVersion) {
+ ReplSetHeartbeatResponse hbResp;
+ std::string msg = "still initializing";
+ Status result = hbResp.initialize(BSON("ok" << 1.0 << "opTime" << Timestamp()), 0);
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.code());
+ ASSERT_TRUE(stringContains(result.reason(), "\"v\""))
+ << result.reason() << " doesn't contain 'v' field required error msg";
+}
+
+TEST(ReplSetHeartbeatResponse, MismatchedRepliSetNames) {
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj initializerObj = BSON("ok" << 0.0 << "mismatch" << true);
+ Status result = hbResponse.initialize(initializerObj, 0);
+ ASSERT_EQUALS(ErrorCodes::InconsistentReplicaSetNames, result.code());
+}
+
+TEST(ReplSetHeartbeatResponse, AuthFailure) {
+ ReplSetHeartbeatResponse hbResp;
+ std::string errMsg = "Unauthorized";
+ Status result = hbResp.initialize(
+ BSON("ok" << 0.0 << "errmsg" << errMsg << "code" << ErrorCodes::Unauthorized), 0);
+ ASSERT_EQUALS(ErrorCodes::Unauthorized, result.code());
+ ASSERT_EQUALS(errMsg, result.reason());
+}
+
+TEST(ReplSetHeartbeatResponse, ServerError) {
+ ReplSetHeartbeatResponse hbResp;
+ std::string errMsg = "Random Error";
+ Status result = hbResp.initialize(BSON("ok" << 0.0 << "errmsg" << errMsg), 0);
+ ASSERT_EQUALS(ErrorCodes::UnknownError, result.code());
+ ASSERT_EQUALS(errMsg, result.reason());
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/repl_set_html_summary.cpp b/src/mongo/db/repl/repl_set_html_summary.cpp
index e0d95a6e8db..821e8258d2e 100644
--- a/src/mongo/db/repl/repl_set_html_summary.cpp
+++ b/src/mongo/db/repl/repl_set_html_summary.cpp
@@ -42,181 +42,175 @@
namespace mongo {
namespace repl {
- ReplSetHtmlSummary::ReplSetHtmlSummary() : _selfIndex(-1), _primaryIndex(-1), _selfUptime(0) {}
+ReplSetHtmlSummary::ReplSetHtmlSummary() : _selfIndex(-1), _primaryIndex(-1), _selfUptime(0) {}
namespace {
- /**
- * Turns an unsigned int representing a duration of time in milliseconds and turns it into
- * a human readable time string representation.
- */
- std::string ago(unsigned int duration) {
- std::stringstream s;
- if( duration < 180 ) {
- s << duration << " sec";
- if( duration != 1 ) s << 's';
- }
- else if( duration < 3600 ) {
- s.precision(2);
- s << duration / 60.0 << " mins";
- }
- else {
- s.precision(2);
- s << duration / 3600.0 << " hrs";
- }
- return s.str();
+/**
+ * Turns an unsigned int representing a duration of time in milliseconds and turns it into
+ * a human readable time string representation.
+ */
+std::string ago(unsigned int duration) {
+ std::stringstream s;
+ if (duration < 180) {
+ s << duration << " sec";
+ if (duration != 1)
+ s << 's';
+ } else if (duration < 3600) {
+ s.precision(2);
+ s << duration / 60.0 << " mins";
+ } else {
+ s.precision(2);
+ s << duration / 3600.0 << " hrs";
}
+ return s.str();
+}
- unsigned int timeDifference(Date_t now, Date_t past) {
- return static_cast<unsigned int>(past != Date_t() ? durationCount<Seconds>(now - past) : 0);
- }
+unsigned int timeDifference(Date_t now, Date_t past) {
+ return static_cast<unsigned int>(past != Date_t() ? durationCount<Seconds>(now - past) : 0);
+}
- std::string stateAsHtml(const MemberState& s) {
- using namespace html;
-
- if( s.s == MemberState::RS_STARTUP )
- return a("",
- "server still starting up, or still trying to initiate the set",
- "STARTUP");
- if( s.s == MemberState::RS_PRIMARY )
- return a("", "this server thinks it is primary", "PRIMARY");
- if( s.s == MemberState::RS_SECONDARY )
- return a("", "this server thinks it is a secondary (slave mode)", "SECONDARY");
- if( s.s == MemberState::RS_RECOVERING )
- return a("",
- "recovering/resyncing; after recovery usually auto-transitions to secondary",
- "RECOVERING");
- if( s.s == MemberState::RS_STARTUP2 )
- return a("", "loaded config, still determining who is primary", "STARTUP2");
- if( s.s == MemberState::RS_ARBITER )
- return a("", "this server is an arbiter only", "ARBITER");
- if( s.s == MemberState::RS_DOWN )
- return a("", "member is down, slow, or unreachable", "DOWN");
- if( s.s == MemberState::RS_ROLLBACK )
- return a("", "rolling back operations to get in sync", "ROLLBACK");
- if( s.s == MemberState::RS_UNKNOWN)
- return a("", "we do not know what state this node is in", "UNKNOWN");
- if( s.s == MemberState::RS_REMOVED)
- return a("", "this server has been removed from the replica set config", "ROLLBACK");
- return "";
- }
+std::string stateAsHtml(const MemberState& s) {
+ using namespace html;
+
+ if (s.s == MemberState::RS_STARTUP)
+ return a("", "server still starting up, or still trying to initiate the set", "STARTUP");
+ if (s.s == MemberState::RS_PRIMARY)
+ return a("", "this server thinks it is primary", "PRIMARY");
+ if (s.s == MemberState::RS_SECONDARY)
+ return a("", "this server thinks it is a secondary (slave mode)", "SECONDARY");
+ if (s.s == MemberState::RS_RECOVERING)
+ return a("",
+ "recovering/resyncing; after recovery usually auto-transitions to secondary",
+ "RECOVERING");
+ if (s.s == MemberState::RS_STARTUP2)
+ return a("", "loaded config, still determining who is primary", "STARTUP2");
+ if (s.s == MemberState::RS_ARBITER)
+ return a("", "this server is an arbiter only", "ARBITER");
+ if (s.s == MemberState::RS_DOWN)
+ return a("", "member is down, slow, or unreachable", "DOWN");
+ if (s.s == MemberState::RS_ROLLBACK)
+ return a("", "rolling back operations to get in sync", "ROLLBACK");
+ if (s.s == MemberState::RS_UNKNOWN)
+ return a("", "we do not know what state this node is in", "UNKNOWN");
+ if (s.s == MemberState::RS_REMOVED)
+ return a("", "this server has been removed from the replica set config", "ROLLBACK");
+ return "";
+}
}
- const std::string ReplSetHtmlSummary::toHtmlString() const {
- using namespace html;
+const std::string ReplSetHtmlSummary::toHtmlString() const {
+ using namespace html;
- std::stringstream s;
+ std::stringstream s;
- if (!_config.isInitialized()) {
- s << p("Still starting up, or else replset is not yet initiated.");
- return s.str();
- }
- if (_selfIndex < 0) {
- s << p("This node is not a member of its replica set configuration, it most likely was"
- " removed recently");
- return s.str();
- }
+ if (!_config.isInitialized()) {
+ s << p("Still starting up, or else replset is not yet initiated.");
+ return s.str();
+ }
+ if (_selfIndex < 0) {
+ s << p(
+ "This node is not a member of its replica set configuration, it most likely was"
+ " removed recently");
+ return s.str();
+ }
- int votesUp = 0;
- int totalVotes = 0;
- // Build table of node information.
- std::stringstream memberTable;
- const char *h[] =
- {"Member",
- "<a title=\"member id in the replset config\">id</a>",
- "Up",
- "<a title=\"length of time we have been continuously connected to the other member "
- "with no reconnects (for self, shows uptime)\">cctime</a>",
- "<a title=\"when this server last received a heartbeat response - includes error code "
- "responses\">Last heartbeat</a>",
- "Votes",
- "Priority",
- "State",
- "Messages",
- "<a title=\"how up to date this server is. this value polled every few seconds so "
- "actually lag is typically lower than value shown here.\">optime</a>",
- 0
- };
- memberTable << table(h);
-
- for (int i = 0; i < _config.getNumMembers(); ++i) {
- const MemberConfig& memberConfig = _config.getMemberAt(i);
- const MemberHeartbeatData& memberHB = _hbData[i];
- bool isSelf = _selfIndex == i;
- bool up = memberHB.getHealth() > 0;
-
- totalVotes += memberConfig.getNumVotes();
- if (up || isSelf) {
- votesUp += memberConfig.getNumVotes();
- }
+ int votesUp = 0;
+ int totalVotes = 0;
+ // Build table of node information.
+ std::stringstream memberTable;
+ const char* h[] = {
+ "Member",
+ "<a title=\"member id in the replset config\">id</a>",
+ "Up",
+ "<a title=\"length of time we have been continuously connected to the other member "
+ "with no reconnects (for self, shows uptime)\">cctime</a>",
+ "<a title=\"when this server last received a heartbeat response - includes error code "
+ "responses\">Last heartbeat</a>",
+ "Votes",
+ "Priority",
+ "State",
+ "Messages",
+ "<a title=\"how up to date this server is. this value polled every few seconds so "
+ "actually lag is typically lower than value shown here.\">optime</a>",
+ 0};
+ memberTable << table(h);
+
+ for (int i = 0; i < _config.getNumMembers(); ++i) {
+ const MemberConfig& memberConfig = _config.getMemberAt(i);
+ const MemberHeartbeatData& memberHB = _hbData[i];
+ bool isSelf = _selfIndex == i;
+ bool up = memberHB.getHealth() > 0;
+
+ totalVotes += memberConfig.getNumVotes();
+ if (up || isSelf) {
+ votesUp += memberConfig.getNumVotes();
+ }
- memberTable << tr();
- if (isSelf) {
- memberTable << td(memberConfig.getHostAndPort().toString() + " (me)");
- memberTable << td(memberConfig.getId());
- memberTable << td("1"); // up
- memberTable << td(ago(_selfUptime));
- memberTable << td(""); // last heartbeat
- memberTable << td(std::to_string(memberConfig.getNumVotes()));
- memberTable << td(std::to_string(memberConfig.getPriority()));
- memberTable << td(stateAsHtml(_selfState) +
- (memberConfig.isHidden() ? " (hidden)" : ""));
- memberTable << td(_selfHeartbeatMessage);
- memberTable << td(_selfOptime.toString());
+ memberTable << tr();
+ if (isSelf) {
+ memberTable << td(memberConfig.getHostAndPort().toString() + " (me)");
+ memberTable << td(memberConfig.getId());
+ memberTable << td("1"); // up
+ memberTable << td(ago(_selfUptime));
+ memberTable << td(""); // last heartbeat
+ memberTable << td(std::to_string(memberConfig.getNumVotes()));
+ memberTable << td(std::to_string(memberConfig.getPriority()));
+ memberTable << td(stateAsHtml(_selfState) +
+ (memberConfig.isHidden() ? " (hidden)" : ""));
+ memberTable << td(_selfHeartbeatMessage);
+ memberTable << td(_selfOptime.toString());
+ } else {
+ std::stringstream link;
+ link << "http://" << memberConfig.getHostAndPort().host() << ':'
+ << (memberConfig.getHostAndPort().port() + 1000) << "/_replSet";
+ memberTable << td(a(link.str(), "", memberConfig.getHostAndPort().toString()));
+ memberTable << td(memberConfig.getId());
+ memberTable << td(red(str::stream() << memberHB.getHealth(), !up));
+ const unsigned int uptime = timeDifference(_now, memberHB.getUpSince());
+ memberTable << td(ago(uptime));
+ if (memberHB.getLastHeartbeat() == Date_t()) {
+ memberTable << td("never");
+ } else {
+ memberTable << td(ago(timeDifference(_now, memberHB.getLastHeartbeat())));
}
- else {
- std::stringstream link;
- link << "http://" << memberConfig.getHostAndPort().host() << ':' <<
- (memberConfig.getHostAndPort().port() + 1000) << "/_replSet";
- memberTable << td( a(link.str(), "", memberConfig.getHostAndPort().toString()) );
- memberTable << td(memberConfig.getId());
- memberTable << td(red(str::stream() << memberHB.getHealth(), !up));
- const unsigned int uptime = timeDifference(_now, memberHB.getUpSince());
- memberTable << td(ago(uptime));
- if (memberHB.getLastHeartbeat() == Date_t()) {
- memberTable << td("never");
- }
- else {
- memberTable << td(ago(timeDifference(_now, memberHB.getLastHeartbeat())));
- }
- memberTable << td(std::to_string(memberConfig.getNumVotes()));
- memberTable << td(std::to_string(memberConfig.getPriority()));
- std::string state = memberHB.getState().toString() +
- (memberConfig.isHidden() ? " (hidden)" : "");
- if (up) {
- memberTable << td(state);
- }
- else {
- memberTable << td( grey(str::stream() << "(was " << state << ')', true) );
- }
- memberTable << td(grey(memberHB.getLastHeartbeatMsg(), !up));
- // TODO(dannenberg): change timestamp to optime in V1
- memberTable << td(memberHB.getLastHeartbeat() == Date_t() ?
- "?" : memberHB.getOpTime().toString());
+ memberTable << td(std::to_string(memberConfig.getNumVotes()));
+ memberTable << td(std::to_string(memberConfig.getPriority()));
+ std::string state =
+ memberHB.getState().toString() + (memberConfig.isHidden() ? " (hidden)" : "");
+ if (up) {
+ memberTable << td(state);
+ } else {
+ memberTable << td(grey(str::stream() << "(was " << state << ')', true));
}
- memberTable << _tr();
+ memberTable << td(grey(memberHB.getLastHeartbeatMsg(), !up));
+ // TODO(dannenberg): change timestamp to optime in V1
+ memberTable << td(
+ memberHB.getLastHeartbeat() == Date_t() ? "?" : memberHB.getOpTime().toString());
}
- memberTable << _table();
+ memberTable << _tr();
+ }
+ memberTable << _table();
- s << table(0, false);
- s << tr("Set name:", _config.getReplSetName());
- bool majorityUp = votesUp * 2 > totalVotes;
- s << tr("Majority up:", majorityUp ? "yes" : "no" );
+ s << table(0, false);
+ s << tr("Set name:", _config.getReplSetName());
+ bool majorityUp = votesUp * 2 > totalVotes;
+ s << tr("Majority up:", majorityUp ? "yes" : "no");
- const MemberConfig& selfConfig = _config.getMemberAt(_selfIndex);
+ const MemberConfig& selfConfig = _config.getMemberAt(_selfIndex);
- if (_primaryIndex >= 0 && _primaryIndex != _selfIndex && !selfConfig.isArbiter()) {
- int lag = _hbData[_primaryIndex].getOpTime().getTimestamp().getSecs() -
- _selfOptime.getTimestamp().getSecs();
- s << tr("Lag: ", str::stream() << lag << " secs");
- }
+ if (_primaryIndex >= 0 && _primaryIndex != _selfIndex && !selfConfig.isArbiter()) {
+ int lag = _hbData[_primaryIndex].getOpTime().getTimestamp().getSecs() -
+ _selfOptime.getTimestamp().getSecs();
+ s << tr("Lag: ", str::stream() << lag << " secs");
+ }
- s << _table();
+ s << _table();
- s << memberTable.str();
+ s << memberTable.str();
- return s.str();
- }
+ return s.str();
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_html_summary.h b/src/mongo/db/repl/repl_set_html_summary.h
index 997c5a3ceac..70e3c3eae3b 100644
--- a/src/mongo/db/repl/repl_set_html_summary.h
+++ b/src/mongo/db/repl/repl_set_html_summary.h
@@ -39,64 +39,63 @@ namespace mongo {
namespace repl {
- /**
- * Class containing all the information needed to build the replSet page on http interface,
- * and the logic to generate that page.
- */
- class ReplSetHtmlSummary {
- public:
- ReplSetHtmlSummary();
-
- const std::string toHtmlString() const;
-
- void setConfig(const ReplicaSetConfig& config) {
- _config = config;
- }
-
- void setHBData(const std::vector<MemberHeartbeatData>& hbData) {
- _hbData = hbData;
- }
-
- void setSelfIndex(int index) {
- _selfIndex = index;
- }
-
- void setPrimaryIndex(int index) {
- _primaryIndex = index;
- }
-
- void setSelfOptime(const OpTime& ts) {
- _selfOptime = ts;
- }
-
- void setSelfUptime(unsigned int time) {
- _selfUptime = time;
- }
-
- void setNow(Date_t now) {
- _now = now;
- }
-
- void setSelfState(const MemberState& state) {
- _selfState = state;
- }
-
- void setSelfHeartbeatMessage(StringData msg) {
- _selfHeartbeatMessage = msg.toString();
- }
-
- private:
-
- ReplicaSetConfig _config;
- std::vector<MemberHeartbeatData> _hbData;
- Date_t _now;
- int _selfIndex;
- int _primaryIndex;
- OpTime _selfOptime;
- unsigned int _selfUptime;
- MemberState _selfState;
- std::string _selfHeartbeatMessage;
- };
-
-} // namespace repl
-} // namespace mongo
+/**
+ * Class containing all the information needed to build the replSet page on http interface,
+ * and the logic to generate that page.
+ */
+class ReplSetHtmlSummary {
+public:
+ ReplSetHtmlSummary();
+
+ const std::string toHtmlString() const;
+
+ void setConfig(const ReplicaSetConfig& config) {
+ _config = config;
+ }
+
+ void setHBData(const std::vector<MemberHeartbeatData>& hbData) {
+ _hbData = hbData;
+ }
+
+ void setSelfIndex(int index) {
+ _selfIndex = index;
+ }
+
+ void setPrimaryIndex(int index) {
+ _primaryIndex = index;
+ }
+
+ void setSelfOptime(const OpTime& ts) {
+ _selfOptime = ts;
+ }
+
+ void setSelfUptime(unsigned int time) {
+ _selfUptime = time;
+ }
+
+ void setNow(Date_t now) {
+ _now = now;
+ }
+
+ void setSelfState(const MemberState& state) {
+ _selfState = state;
+ }
+
+ void setSelfHeartbeatMessage(StringData msg) {
+ _selfHeartbeatMessage = msg.toString();
+ }
+
+private:
+ ReplicaSetConfig _config;
+ std::vector<MemberHeartbeatData> _hbData;
+ Date_t _now;
+ int _selfIndex;
+ int _primaryIndex;
+ OpTime _selfOptime;
+ unsigned int _selfUptime;
+ MemberState _selfState;
+ std::string _selfHeartbeatMessage;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_request_votes.cpp b/src/mongo/db/repl/repl_set_request_votes.cpp
index 3d823de4930..92a72a1a6c4 100644
--- a/src/mongo/db/repl/repl_set_request_votes.cpp
+++ b/src/mongo/db/repl/repl_set_request_votes.cpp
@@ -36,36 +36,35 @@
namespace mongo {
namespace repl {
- class CmdReplSetRequestVotes : public ReplSetCommand {
- public:
- CmdReplSetRequestVotes() : ReplSetCommand("replSetRequestVotes") { }
- private:
- bool run(OperationContext* txn,
- const std::string&,
- BSONObj& cmdObj,
- int,
- std::string& errmsg,
- BSONObjBuilder& result) final {
+class CmdReplSetRequestVotes : public ReplSetCommand {
+public:
+ CmdReplSetRequestVotes() : ReplSetCommand("replSetRequestVotes") {}
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- ReplSetRequestVotesArgs parsedArgs;
- status = parsedArgs.initialize(cmdObj);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+private:
+ bool run(OperationContext* txn,
+ const std::string&,
+ BSONObj& cmdObj,
+ int,
+ std::string& errmsg,
+ BSONObjBuilder& result) final {
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- ReplSetRequestVotesResponse response;
- status = getGlobalReplicationCoordinator()->processReplSetRequestVotes(txn,
- parsedArgs,
- &response);
- response.addToBSON(&result);
+ ReplSetRequestVotesArgs parsedArgs;
+ status = parsedArgs.initialize(cmdObj);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdReplSetRequestVotes;
-} // namespace repl
-} // namespace mongo
+ ReplSetRequestVotesResponse response;
+ status = getGlobalReplicationCoordinator()->processReplSetRequestVotes(
+ txn, parsedArgs, &response);
+ response.addToBSON(&result);
+ return appendCommandStatus(result, status);
+ }
+} cmdReplSetRequestVotes;
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_request_votes_args.cpp b/src/mongo/db/repl/repl_set_request_votes_args.cpp
index 8c29e37b29d..8a119ff0013 100644
--- a/src/mongo/db/repl/repl_set_request_votes_args.cpp
+++ b/src/mongo/db/repl/repl_set_request_votes_args.cpp
@@ -36,173 +36,167 @@ namespace mongo {
namespace repl {
namespace {
- const std::string kCandidateIdFieldName = "candidateId";
- const std::string kCommandName = "replSetRequestVotes";
- const std::string kConfigVersionFieldName = "configVersion";
- const std::string kDryRunFieldName = "dryRun";
- const std::string kLastCommittedOpFieldName = "lastCommittedOp";
- const std::string kOkFieldName = "ok";
- const std::string kOpTimeFieldName = "ts";
- const std::string kReasonFieldName = "reason";
- const std::string kSetNameFieldName = "setName";
- const std::string kTermFieldName = "term";
- const std::string kVoteGrantedFieldName = "voteGranted";
-
- const std::string kLegalArgsFieldNames[] = {
- kCandidateIdFieldName,
- kCommandName,
- kConfigVersionFieldName,
- kDryRunFieldName,
- kLastCommittedOpFieldName,
- kOpTimeFieldName,
- kSetNameFieldName,
- kTermFieldName,
- };
-
- const std::string kLegalResponseFieldNames[] = {
- kOkFieldName,
- kReasonFieldName,
- kTermFieldName,
- kVoteGrantedFieldName,
- };
+const std::string kCandidateIdFieldName = "candidateId";
+const std::string kCommandName = "replSetRequestVotes";
+const std::string kConfigVersionFieldName = "configVersion";
+const std::string kDryRunFieldName = "dryRun";
+const std::string kLastCommittedOpFieldName = "lastCommittedOp";
+const std::string kOkFieldName = "ok";
+const std::string kOpTimeFieldName = "ts";
+const std::string kReasonFieldName = "reason";
+const std::string kSetNameFieldName = "setName";
+const std::string kTermFieldName = "term";
+const std::string kVoteGrantedFieldName = "voteGranted";
+
+const std::string kLegalArgsFieldNames[] = {
+ kCandidateIdFieldName,
+ kCommandName,
+ kConfigVersionFieldName,
+ kDryRunFieldName,
+ kLastCommittedOpFieldName,
+ kOpTimeFieldName,
+ kSetNameFieldName,
+ kTermFieldName,
+};
+
+const std::string kLegalResponseFieldNames[] = {
+ kOkFieldName, kReasonFieldName, kTermFieldName, kVoteGrantedFieldName,
+};
} // namespace
- Status ReplSetRequestVotesArgs::initialize(const BSONObj& argsObj) {
- Status status = bsonCheckOnlyHasFields("ReplSetRequestVotes",
- argsObj,
- kLegalArgsFieldNames);
- if (!status.isOK())
- return status;
-
- status = bsonExtractIntegerField(argsObj, kTermFieldName, &_term);
- if (!status.isOK())
- return status;
-
- status = bsonExtractIntegerField(argsObj, kCandidateIdFieldName, &_candidateId);
- if (!status.isOK())
- return status;
-
- status = bsonExtractIntegerField(argsObj, kConfigVersionFieldName, &_cfgver);
- if (!status.isOK())
- return status;
-
- status = bsonExtractStringField(argsObj, kSetNameFieldName, &_setName);
- if (!status.isOK())
- return status;
-
- status = bsonExtractBooleanField(argsObj, kDryRunFieldName, &_dryRun);
- if (!status.isOK())
- return status;
-
- // extracting the lastCommittedOp is a bit of a process
- BSONObj lastCommittedOp = argsObj[kLastCommittedOpFieldName].Obj();
- Timestamp ts;
- status = bsonExtractTimestampField(lastCommittedOp, kOpTimeFieldName, &ts);
- if (!status.isOK())
- return status;
- long long term;
- status = bsonExtractIntegerField(lastCommittedOp, kTermFieldName, &term);
- if (!status.isOK())
- return status;
- _lastCommittedOp = OpTime(lastCommittedOp[kOpTimeFieldName].timestamp(),
- lastCommittedOp[kTermFieldName].Long());
-
- return Status::OK();
- }
-
- const std::string& ReplSetRequestVotesArgs::getSetName() const {
- return _setName;
- }
-
- long long ReplSetRequestVotesArgs::getTerm() const {
- return _term;
- }
-
- long long ReplSetRequestVotesArgs::getCandidateId() const {
- return _candidateId;
- }
-
- long long ReplSetRequestVotesArgs::getConfigVersion() const {
- return _cfgver;
- }
-
- OpTime ReplSetRequestVotesArgs::getLastCommittedOp() const {
- return _lastCommittedOp;
- }
-
- bool ReplSetRequestVotesArgs::isADryRun() const {
- return _dryRun;
- }
-
- void ReplSetRequestVotesArgs::addToBSON(BSONObjBuilder* builder) const {
- builder->append(kCommandName, 1);
- builder->append(kSetNameFieldName, _setName);
- builder->append(kDryRunFieldName, _dryRun);
- builder->append(kTermFieldName, _term);
- builder->appendIntOrLL(kCandidateIdFieldName, _candidateId);
- builder->appendIntOrLL(kConfigVersionFieldName, _cfgver);
- BSONObjBuilder lastCommittedOp(builder->subobjStart(kLastCommittedOpFieldName));
- lastCommittedOp.append(kOpTimeFieldName, _lastCommittedOp.getTimestamp());
- lastCommittedOp.append(kTermFieldName, _lastCommittedOp.getTerm());
- lastCommittedOp.done();
- }
-
- Status ReplSetRequestVotesResponse::initialize(const BSONObj& argsObj) {
- Status status = bsonCheckOnlyHasFields("ReplSetRequestVotes",
- argsObj,
- kLegalResponseFieldNames);
- if (!status.isOK())
- return status;
-
- status = bsonExtractIntegerField(argsObj, kTermFieldName, &_term);
- if (!status.isOK())
- return status;
-
- status = bsonExtractBooleanField(argsObj, kVoteGrantedFieldName, &_voteGranted);
- if (!status.isOK())
- return status;
-
- status = bsonExtractStringField(argsObj, kReasonFieldName, &_reason);
- if (!status.isOK())
- return status;
-
- status = bsonExtractBooleanField(argsObj, kOkFieldName, &_ok);
- if (!status.isOK())
- return status;
-
- return Status::OK();
- }
-
- bool ReplSetRequestVotesResponse::getOk() const {
- return _ok;
- }
-
- long long ReplSetRequestVotesResponse::getTerm() const {
- return _term;
- }
-
- bool ReplSetRequestVotesResponse::getVoteGranted() const {
- return _voteGranted;
- }
-
- const std::string& ReplSetRequestVotesResponse::getReason() const {
- return _reason;
- }
-
- void ReplSetRequestVotesResponse::addToBSON(BSONObjBuilder* builder) const {
- builder->append(kOkFieldName, _ok);
- builder->append(kTermFieldName, _term);
- builder->append(kVoteGrantedFieldName, _voteGranted);
- builder->append(kReasonFieldName, _reason);
- }
-
- BSONObj ReplSetRequestVotesResponse::toBSON() const {
- BSONObjBuilder builder;
- addToBSON(&builder);
- return builder.obj();
- }
-
-} // namespace repl
-} // namespace mongo
+Status ReplSetRequestVotesArgs::initialize(const BSONObj& argsObj) {
+ Status status = bsonCheckOnlyHasFields("ReplSetRequestVotes", argsObj, kLegalArgsFieldNames);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractIntegerField(argsObj, kTermFieldName, &_term);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractIntegerField(argsObj, kCandidateIdFieldName, &_candidateId);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractIntegerField(argsObj, kConfigVersionFieldName, &_cfgver);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractStringField(argsObj, kSetNameFieldName, &_setName);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractBooleanField(argsObj, kDryRunFieldName, &_dryRun);
+ if (!status.isOK())
+ return status;
+
+ // extracting the lastCommittedOp is a bit of a process
+ BSONObj lastCommittedOp = argsObj[kLastCommittedOpFieldName].Obj();
+ Timestamp ts;
+ status = bsonExtractTimestampField(lastCommittedOp, kOpTimeFieldName, &ts);
+ if (!status.isOK())
+ return status;
+ long long term;
+ status = bsonExtractIntegerField(lastCommittedOp, kTermFieldName, &term);
+ if (!status.isOK())
+ return status;
+ _lastCommittedOp = OpTime(lastCommittedOp[kOpTimeFieldName].timestamp(),
+ lastCommittedOp[kTermFieldName].Long());
+
+ return Status::OK();
+}
+
+const std::string& ReplSetRequestVotesArgs::getSetName() const {
+ return _setName;
+}
+
+long long ReplSetRequestVotesArgs::getTerm() const {
+ return _term;
+}
+
+long long ReplSetRequestVotesArgs::getCandidateId() const {
+ return _candidateId;
+}
+
+long long ReplSetRequestVotesArgs::getConfigVersion() const {
+ return _cfgver;
+}
+
+OpTime ReplSetRequestVotesArgs::getLastCommittedOp() const {
+ return _lastCommittedOp;
+}
+
+bool ReplSetRequestVotesArgs::isADryRun() const {
+ return _dryRun;
+}
+
+void ReplSetRequestVotesArgs::addToBSON(BSONObjBuilder* builder) const {
+ builder->append(kCommandName, 1);
+ builder->append(kSetNameFieldName, _setName);
+ builder->append(kDryRunFieldName, _dryRun);
+ builder->append(kTermFieldName, _term);
+ builder->appendIntOrLL(kCandidateIdFieldName, _candidateId);
+ builder->appendIntOrLL(kConfigVersionFieldName, _cfgver);
+ BSONObjBuilder lastCommittedOp(builder->subobjStart(kLastCommittedOpFieldName));
+ lastCommittedOp.append(kOpTimeFieldName, _lastCommittedOp.getTimestamp());
+ lastCommittedOp.append(kTermFieldName, _lastCommittedOp.getTerm());
+ lastCommittedOp.done();
+}
+
+Status ReplSetRequestVotesResponse::initialize(const BSONObj& argsObj) {
+ Status status =
+ bsonCheckOnlyHasFields("ReplSetRequestVotes", argsObj, kLegalResponseFieldNames);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractIntegerField(argsObj, kTermFieldName, &_term);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractBooleanField(argsObj, kVoteGrantedFieldName, &_voteGranted);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractStringField(argsObj, kReasonFieldName, &_reason);
+ if (!status.isOK())
+ return status;
+
+ status = bsonExtractBooleanField(argsObj, kOkFieldName, &_ok);
+ if (!status.isOK())
+ return status;
+
+ return Status::OK();
+}
+
+bool ReplSetRequestVotesResponse::getOk() const {
+ return _ok;
+}
+
+long long ReplSetRequestVotesResponse::getTerm() const {
+ return _term;
+}
+
+bool ReplSetRequestVotesResponse::getVoteGranted() const {
+ return _voteGranted;
+}
+
+const std::string& ReplSetRequestVotesResponse::getReason() const {
+ return _reason;
+}
+
+void ReplSetRequestVotesResponse::addToBSON(BSONObjBuilder* builder) const {
+ builder->append(kOkFieldName, _ok);
+ builder->append(kTermFieldName, _term);
+ builder->append(kVoteGrantedFieldName, _voteGranted);
+ builder->append(kReasonFieldName, _reason);
+}
+
+BSONObj ReplSetRequestVotesResponse::toBSON() const {
+ BSONObjBuilder builder;
+ addToBSON(&builder);
+ return builder.obj();
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_set_request_votes_args.h b/src/mongo/db/repl/repl_set_request_votes_args.h
index a0f2dc2157b..00ba4d06c1c 100644
--- a/src/mongo/db/repl/repl_set_request_votes_args.h
+++ b/src/mongo/db/repl/repl_set_request_votes_args.h
@@ -34,55 +34,63 @@
namespace mongo {
- class BSONObj;
+class BSONObj;
namespace repl {
- class ReplSetRequestVotesArgs {
- public:
- Status initialize(const BSONObj& argsObj);
-
- const std::string& getSetName() const;
- long long getTerm() const;
- long long getCandidateId() const;
- long long getConfigVersion() const;
- OpTime getLastCommittedOp() const;
- bool isADryRun() const;
-
- void addToBSON(BSONObjBuilder* builder) const;
-
- private:
- std::string _setName; // Name of the replset
- long long _term = -1; // Current known term of the command issuer
- long long _candidateId = -1; // replSet id of the member who sent the replSetRequestVotesCmd
- long long _cfgver = -1; // replSet config version known to the command issuer
- OpTime _lastCommittedOp; // The last known committed op of the command issuer
- bool _dryRun = false; // Indicates this is a pre-election check when true
- };
-
- class ReplSetRequestVotesResponse {
- public:
- Status initialize(const BSONObj& argsObj);
-
- void setOk(bool ok) { _ok = ok; }
- void setVoteGranted(bool voteGranted) { _voteGranted = voteGranted; }
- void setTerm(long long term) { _term = term; }
- void setReason(const std::string& reason) { _reason = reason; }
-
- bool getOk() const;
- long long getTerm() const;
- bool getVoteGranted() const;
- const std::string& getReason() const;
-
- void addToBSON(BSONObjBuilder* builder) const;
- BSONObj toBSON() const;
-
- private:
- bool _ok = false;
- long long _term = -1;
- bool _voteGranted = false;
- std::string _reason;
- };
-
-} // namespace repl
-} // namespace mongo
+class ReplSetRequestVotesArgs {
+public:
+ Status initialize(const BSONObj& argsObj);
+
+ const std::string& getSetName() const;
+ long long getTerm() const;
+ long long getCandidateId() const;
+ long long getConfigVersion() const;
+ OpTime getLastCommittedOp() const;
+ bool isADryRun() const;
+
+ void addToBSON(BSONObjBuilder* builder) const;
+
+private:
+ std::string _setName; // Name of the replset
+ long long _term = -1; // Current known term of the command issuer
+ long long _candidateId = -1; // replSet id of the member who sent the replSetRequestVotesCmd
+ long long _cfgver = -1; // replSet config version known to the command issuer
+ OpTime _lastCommittedOp; // The last known committed op of the command issuer
+ bool _dryRun = false; // Indicates this is a pre-election check when true
+};
+
+class ReplSetRequestVotesResponse {
+public:
+ Status initialize(const BSONObj& argsObj);
+
+ void setOk(bool ok) {
+ _ok = ok;
+ }
+ void setVoteGranted(bool voteGranted) {
+ _voteGranted = voteGranted;
+ }
+ void setTerm(long long term) {
+ _term = term;
+ }
+ void setReason(const std::string& reason) {
+ _reason = reason;
+ }
+
+ bool getOk() const;
+ long long getTerm() const;
+ bool getVoteGranted() const;
+ const std::string& getReason() const;
+
+ void addToBSON(BSONObjBuilder* builder) const;
+ BSONObj toBSON() const;
+
+private:
+ bool _ok = false;
+ long long _term = -1;
+ bool _voteGranted = false;
+ std::string _reason;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/repl_settings.cpp b/src/mongo/db/repl/repl_settings.cpp
index 3b22a3203eb..a385d89c55b 100644
--- a/src/mongo/db/repl/repl_settings.cpp
+++ b/src/mongo/db/repl/repl_settings.cpp
@@ -35,13 +35,12 @@
namespace mongo {
namespace repl {
- MONGO_EXPORT_STARTUP_SERVER_PARAMETER(maxSyncSourceLagSecs, int, 30);
- MONGO_INITIALIZER(maxSyncSourceLagSecsCheck) (InitializerContext*) {
- if (maxSyncSourceLagSecs < 1) {
- return Status(ErrorCodes::BadValue, "maxSyncSourceLagSecs must be > 0");
- }
- return Status::OK();
+MONGO_EXPORT_STARTUP_SERVER_PARAMETER(maxSyncSourceLagSecs, int, 30);
+MONGO_INITIALIZER(maxSyncSourceLagSecsCheck)(InitializerContext*) {
+ if (maxSyncSourceLagSecs < 1) {
+ return Status(ErrorCodes::BadValue, "maxSyncSourceLagSecs must be > 0");
}
-
+ return Status::OK();
+}
}
}
diff --git a/src/mongo/db/repl/repl_settings.h b/src/mongo/db/repl/repl_settings.h
index cec0b90040f..5c1e6032acc 100644
--- a/src/mongo/db/repl/repl_settings.h
+++ b/src/mongo/db/repl/repl_settings.h
@@ -38,90 +38,91 @@
namespace mongo {
namespace repl {
- extern int maxSyncSourceLagSecs;
-
- bool anyReplEnabled();
-
- /* replication slave? (possibly with slave)
- --slave cmd line setting -> SimpleSlave
- */
- typedef enum { NotSlave=0, SimpleSlave } SlaveTypes;
-
- class ReplSettings {
- public:
- SlaveTypes slave;
-
- /** true means we are master and doing replication. if we are not writing to oplog, this won't be true. */
- bool master;
-
- bool fastsync;
-
- bool autoresync;
-
- int slavedelay;
-
- long long oplogSize; // --oplogSize
-
- // for master/slave replication
- std::string source; // --source
- std::string only; // --only
- int pretouch; // --pretouch for replication application (experimental)
-
- std::string replSet; // --replSet[/<seedlist>]
- std::string ourSetName() const {
- std::string setname;
- size_t sl = replSet.find('/');
- if( sl == std::string::npos )
- return replSet;
- return replSet.substr(0, sl);
- }
- bool usingReplSets() const { return !replSet.empty(); }
-
- std::string rsIndexPrefetch;// --indexPrefetch
-
- ReplSettings()
- : slave(NotSlave),
- master(false),
- fastsync(),
- autoresync(false),
- slavedelay(),
- oplogSize(0),
- pretouch(0) {
- }
-
- // TODO(spencer): Remove explicit copy constructor after we no longer have mutable state
- // in ReplSettings.
- ReplSettings(const ReplSettings& other) :
- slave(other.slave),
- master(other.master),
- fastsync(other.fastsync),
- autoresync(other.autoresync),
- slavedelay(other.slavedelay),
- oplogSize(other.oplogSize),
- source(other.source),
- only(other.only),
- pretouch(other.pretouch),
- replSet(other.replSet),
- rsIndexPrefetch(other.rsIndexPrefetch) {}
-
- ReplSettings& operator=(const ReplSettings& other) {
- if (this == &other) return *this;
-
- slave = other.slave;
- master = other.master;
- fastsync = other.fastsync;
- autoresync = other.autoresync;
- slavedelay = other.slavedelay;
- oplogSize = other.oplogSize;
- source = other.source;
- only = other.only;
- pretouch = other.pretouch;
- replSet = other.replSet;
- rsIndexPrefetch = other.rsIndexPrefetch;
- return *this;
- }
+extern int maxSyncSourceLagSecs;
+
+bool anyReplEnabled();
- };
+/* replication slave? (possibly with slave)
+ --slave cmd line setting -> SimpleSlave
+*/
+typedef enum { NotSlave = 0, SimpleSlave } SlaveTypes;
+
+class ReplSettings {
+public:
+ SlaveTypes slave;
+
+ /** true means we are master and doing replication. if we are not writing to oplog, this won't be true. */
+ bool master;
+
+ bool fastsync;
+
+ bool autoresync;
+
+ int slavedelay;
+
+ long long oplogSize; // --oplogSize
+
+ // for master/slave replication
+ std::string source; // --source
+ std::string only; // --only
+ int pretouch; // --pretouch for replication application (experimental)
+
+ std::string replSet; // --replSet[/<seedlist>]
+ std::string ourSetName() const {
+ std::string setname;
+ size_t sl = replSet.find('/');
+ if (sl == std::string::npos)
+ return replSet;
+ return replSet.substr(0, sl);
+ }
+ bool usingReplSets() const {
+ return !replSet.empty();
+ }
+
+ std::string rsIndexPrefetch; // --indexPrefetch
+
+ ReplSettings()
+ : slave(NotSlave),
+ master(false),
+ fastsync(),
+ autoresync(false),
+ slavedelay(),
+ oplogSize(0),
+ pretouch(0) {}
+
+ // TODO(spencer): Remove explicit copy constructor after we no longer have mutable state
+ // in ReplSettings.
+ ReplSettings(const ReplSettings& other)
+ : slave(other.slave),
+ master(other.master),
+ fastsync(other.fastsync),
+ autoresync(other.autoresync),
+ slavedelay(other.slavedelay),
+ oplogSize(other.oplogSize),
+ source(other.source),
+ only(other.only),
+ pretouch(other.pretouch),
+ replSet(other.replSet),
+ rsIndexPrefetch(other.rsIndexPrefetch) {}
+
+ ReplSettings& operator=(const ReplSettings& other) {
+ if (this == &other)
+ return *this;
-} // namespace repl
-} // namespace mongo
+ slave = other.slave;
+ master = other.master;
+ fastsync = other.fastsync;
+ autoresync = other.autoresync;
+ slavedelay = other.slavedelay;
+ oplogSize = other.oplogSize;
+ source = other.source;
+ only = other.only;
+ pretouch = other.pretouch;
+ replSet = other.replSet;
+ rsIndexPrefetch = other.rsIndexPrefetch;
+ return *this;
+ }
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replica_set_config.cpp b/src/mongo/db/repl/replica_set_config.cpp
index ab901fe5886..eef500a2328 100644
--- a/src/mongo/db/repl/replica_set_config.cpp
+++ b/src/mongo/db/repl/replica_set_config.cpp
@@ -41,542 +41,521 @@ namespace mongo {
namespace repl {
#ifndef _MSC_VER
- const size_t ReplicaSetConfig::kMaxMembers;
- const size_t ReplicaSetConfig::kMaxVotingMembers;
+const size_t ReplicaSetConfig::kMaxMembers;
+const size_t ReplicaSetConfig::kMaxVotingMembers;
#endif
- const std::string ReplicaSetConfig::kVersionFieldName = "version";
- const std::string ReplicaSetConfig::kMajorityWriteConcernModeName = "$majority";
- const Seconds ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod(10);
+const std::string ReplicaSetConfig::kVersionFieldName = "version";
+const std::string ReplicaSetConfig::kMajorityWriteConcernModeName = "$majority";
+const Seconds ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod(10);
namespace {
- const std::string kIdFieldName = "_id";
- const std::string kMembersFieldName = "members";
- const std::string kSettingsFieldName = "settings";
- const std::string kStepDownCheckWriteConcernModeName = "$stepDownCheck";
- const std::string kProtocolVersionFieldName = "protocolVersion";
-
- const std::string kLegalConfigTopFieldNames[] = {
- kIdFieldName,
- ReplicaSetConfig::kVersionFieldName,
- kMembersFieldName,
- kSettingsFieldName,
- kProtocolVersionFieldName
- };
-
- const std::string kHeartbeatTimeoutFieldName = "heartbeatTimeoutSecs";
- const std::string kChainingAllowedFieldName = "chainingAllowed";
- const std::string kGetLastErrorDefaultsFieldName = "getLastErrorDefaults";
- const std::string kGetLastErrorModesFieldName = "getLastErrorModes";
+const std::string kIdFieldName = "_id";
+const std::string kMembersFieldName = "members";
+const std::string kSettingsFieldName = "settings";
+const std::string kStepDownCheckWriteConcernModeName = "$stepDownCheck";
+const std::string kProtocolVersionFieldName = "protocolVersion";
-} // namespace
-
- ReplicaSetConfig::ReplicaSetConfig() : _isInitialized(false),
- _heartbeatTimeoutPeriod(0),
- _protocolVersion(0) {}
-
- Status ReplicaSetConfig::initialize(const BSONObj& cfg) {
- _isInitialized = false;
- _members.clear();
- Status status = bsonCheckOnlyHasFields(
- "replica set configuration", cfg, kLegalConfigTopFieldNames);
- if (!status.isOK())
- return status;
-
- //
- // Parse replSetName
- //
- status = bsonExtractStringField(cfg, kIdFieldName, &_replSetName);
- if (!status.isOK())
- return status;
+const std::string kLegalConfigTopFieldNames[] = {kIdFieldName,
+ ReplicaSetConfig::kVersionFieldName,
+ kMembersFieldName,
+ kSettingsFieldName,
+ kProtocolVersionFieldName};
- //
- // Parse version
- //
- status = bsonExtractIntegerField(cfg, kVersionFieldName, &_version);
- if (!status.isOK())
- return status;
+const std::string kHeartbeatTimeoutFieldName = "heartbeatTimeoutSecs";
+const std::string kChainingAllowedFieldName = "chainingAllowed";
+const std::string kGetLastErrorDefaultsFieldName = "getLastErrorDefaults";
+const std::string kGetLastErrorModesFieldName = "getLastErrorModes";
- //
- // Parse members
- //
- BSONElement membersElement;
- status = bsonExtractTypedField(cfg, kMembersFieldName, Array, &membersElement);
- if (!status.isOK())
- return status;
-
- for (BSONObj::iterator membersIterator(membersElement.Obj()); membersIterator.more();) {
- BSONElement memberElement = membersIterator.next();
- if (memberElement.type() != Object) {
- return Status(ErrorCodes::TypeMismatch, str::stream() <<
- "Expected type of " << kMembersFieldName << "." <<
- memberElement.fieldName() << " to be Object, but found " <<
- typeName(memberElement.type()));
- }
- _members.resize(_members.size() + 1);
- status = _members.back().initialize(memberElement.Obj(), &_tagConfig);
- if (!status.isOK())
- return status;
- }
+} // namespace
- //
- // Parse settings
- //
- BSONElement settingsElement;
- status = bsonExtractTypedField(cfg, kSettingsFieldName, Object, &settingsElement);
- BSONObj settings;
- if (status.isOK()) {
- settings = settingsElement.Obj();
- }
- else if (status != ErrorCodes::NoSuchKey) {
- return status;
- }
- status = _parseSettingsSubdocument(settings);
+ReplicaSetConfig::ReplicaSetConfig()
+ : _isInitialized(false), _heartbeatTimeoutPeriod(0), _protocolVersion(0) {}
+
+Status ReplicaSetConfig::initialize(const BSONObj& cfg) {
+ _isInitialized = false;
+ _members.clear();
+ Status status =
+ bsonCheckOnlyHasFields("replica set configuration", cfg, kLegalConfigTopFieldNames);
+ if (!status.isOK())
+ return status;
+
+ //
+ // Parse replSetName
+ //
+ status = bsonExtractStringField(cfg, kIdFieldName, &_replSetName);
+ if (!status.isOK())
+ return status;
+
+ //
+ // Parse version
+ //
+ status = bsonExtractIntegerField(cfg, kVersionFieldName, &_version);
+ if (!status.isOK())
+ return status;
+
+ //
+ // Parse members
+ //
+ BSONElement membersElement;
+ status = bsonExtractTypedField(cfg, kMembersFieldName, Array, &membersElement);
+ if (!status.isOK())
+ return status;
+
+ for (BSONObj::iterator membersIterator(membersElement.Obj()); membersIterator.more();) {
+ BSONElement memberElement = membersIterator.next();
+ if (memberElement.type() != Object) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected type of " << kMembersFieldName << "."
+ << memberElement.fieldName() << " to be Object, but found "
+ << typeName(memberElement.type()));
+ }
+ _members.resize(_members.size() + 1);
+ status = _members.back().initialize(memberElement.Obj(), &_tagConfig);
if (!status.isOK())
return status;
+ }
- //
- // Parse protocol version
- //
- BSONElement protocolVersionElement;
- status = bsonExtractIntegerField(cfg, kProtocolVersionFieldName, &_protocolVersion);
- if (!status.isOK() && status != ErrorCodes::NoSuchKey) {
- return status;
- }
-
- _calculateMajorities();
- _addInternalWriteConcernModes();
- _isInitialized = true;
- return Status::OK();
+ //
+ // Parse settings
+ //
+ BSONElement settingsElement;
+ status = bsonExtractTypedField(cfg, kSettingsFieldName, Object, &settingsElement);
+ BSONObj settings;
+ if (status.isOK()) {
+ settings = settingsElement.Obj();
+ } else if (status != ErrorCodes::NoSuchKey) {
+ return status;
+ }
+ status = _parseSettingsSubdocument(settings);
+ if (!status.isOK())
+ return status;
+
+ //
+ // Parse protocol version
+ //
+ BSONElement protocolVersionElement;
+ status = bsonExtractIntegerField(cfg, kProtocolVersionFieldName, &_protocolVersion);
+ if (!status.isOK() && status != ErrorCodes::NoSuchKey) {
+ return status;
}
- Status ReplicaSetConfig::_parseSettingsSubdocument(const BSONObj& settings) {
- //
- // Parse heartbeatTimeoutSecs
- //
- BSONElement hbTimeoutSecsElement = settings[kHeartbeatTimeoutFieldName];
- if (hbTimeoutSecsElement.eoo()) {
- _heartbeatTimeoutPeriod = Seconds(kDefaultHeartbeatTimeoutPeriod);
- }
- else if (hbTimeoutSecsElement.isNumber()) {
- _heartbeatTimeoutPeriod = Seconds(hbTimeoutSecsElement.numberInt());
- }
- else {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "Expected type of " <<
- kSettingsFieldName << "." << kHeartbeatTimeoutFieldName <<
- " to be a number, but found a value of type " <<
- typeName(hbTimeoutSecsElement.type()));
- }
+ _calculateMajorities();
+ _addInternalWriteConcernModes();
+ _isInitialized = true;
+ return Status::OK();
+}
+
+Status ReplicaSetConfig::_parseSettingsSubdocument(const BSONObj& settings) {
+ //
+ // Parse heartbeatTimeoutSecs
+ //
+ BSONElement hbTimeoutSecsElement = settings[kHeartbeatTimeoutFieldName];
+ if (hbTimeoutSecsElement.eoo()) {
+ _heartbeatTimeoutPeriod = Seconds(kDefaultHeartbeatTimeoutPeriod);
+ } else if (hbTimeoutSecsElement.isNumber()) {
+ _heartbeatTimeoutPeriod = Seconds(hbTimeoutSecsElement.numberInt());
+ } else {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected type of " << kSettingsFieldName << "."
+ << kHeartbeatTimeoutFieldName
+ << " to be a number, but found a value of type "
+ << typeName(hbTimeoutSecsElement.type()));
+ }
- //
- // Parse chainingAllowed
- //
- Status status = bsonExtractBooleanFieldWithDefault(settings,
- kChainingAllowedFieldName,
- true,
- &_chainingAllowed);
+ //
+ // Parse chainingAllowed
+ //
+ Status status = bsonExtractBooleanFieldWithDefault(
+ settings, kChainingAllowedFieldName, true, &_chainingAllowed);
+ if (!status.isOK())
+ return status;
+
+ //
+ // Parse getLastErrorDefaults
+ //
+ BSONElement gleDefaultsElement;
+ status = bsonExtractTypedField(
+ settings, kGetLastErrorDefaultsFieldName, Object, &gleDefaultsElement);
+ if (status.isOK()) {
+ status = _defaultWriteConcern.parse(gleDefaultsElement.Obj());
if (!status.isOK())
return status;
+ } else if (status == ErrorCodes::NoSuchKey) {
+ // Default write concern is w: 1.
+ _defaultWriteConcern.reset();
+ _defaultWriteConcern.wNumNodes = 1;
+ } else {
+ return status;
+ }
- //
- // Parse getLastErrorDefaults
- //
- BSONElement gleDefaultsElement;
- status = bsonExtractTypedField(settings,
- kGetLastErrorDefaultsFieldName,
- Object,
- &gleDefaultsElement);
- if (status.isOK()) {
- status = _defaultWriteConcern.parse(gleDefaultsElement.Obj());
- if (!status.isOK())
- return status;
- }
- else if (status == ErrorCodes::NoSuchKey) {
- // Default write concern is w: 1.
- _defaultWriteConcern.reset();
- _defaultWriteConcern.wNumNodes = 1;
- }
- else {
- return status;
- }
+ //
+ // Parse getLastErrorModes
+ //
+ BSONElement gleModesElement;
+ status = bsonExtractTypedField(settings, kGetLastErrorModesFieldName, Object, &gleModesElement);
+ BSONObj gleModes;
+ if (status.isOK()) {
+ gleModes = gleModesElement.Obj();
+ } else if (status != ErrorCodes::NoSuchKey) {
+ return status;
+ }
- //
- // Parse getLastErrorModes
- //
- BSONElement gleModesElement;
- status = bsonExtractTypedField(settings,
- kGetLastErrorModesFieldName,
- Object,
- &gleModesElement);
- BSONObj gleModes;
- if (status.isOK()) {
- gleModes = gleModesElement.Obj();
- }
- else if (status != ErrorCodes::NoSuchKey) {
- return status;
+ for (BSONObj::iterator gleModeIter(gleModes); gleModeIter.more();) {
+ const BSONElement modeElement = gleModeIter.next();
+ if (_customWriteConcernModes.find(modeElement.fieldNameStringData()) !=
+ _customWriteConcernModes.end()) {
+ return Status(ErrorCodes::DuplicateKey,
+ str::stream() << kSettingsFieldName << '.' << kGetLastErrorModesFieldName
+ << " contains multiple fields named "
+ << modeElement.fieldName());
+ }
+ if (modeElement.type() != Object) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName << '.'
+ << modeElement.fieldName() << " to be an Object, not "
+ << typeName(modeElement.type()));
}
-
- for (BSONObj::iterator gleModeIter(gleModes); gleModeIter.more();) {
- const BSONElement modeElement = gleModeIter.next();
- if (_customWriteConcernModes.find(modeElement.fieldNameStringData()) !=
- _customWriteConcernModes.end()) {
-
- return Status(ErrorCodes::DuplicateKey, str::stream() << kSettingsFieldName <<
- '.' << kGetLastErrorModesFieldName <<
- " contains multiple fields named " << modeElement.fieldName());
+ ReplicaSetTagPattern pattern = _tagConfig.makePattern();
+ for (BSONObj::iterator constraintIter(modeElement.Obj()); constraintIter.more();) {
+ const BSONElement constraintElement = constraintIter.next();
+ if (!constraintElement.isNumber()) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream()
+ << "Expected " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName << '.' << modeElement.fieldName()
+ << '.' << constraintElement.fieldName() << " to be a number, not "
+ << typeName(constraintElement.type()));
}
- if (modeElement.type() != Object) {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "Expected " <<
- kSettingsFieldName << '.' << kGetLastErrorModesFieldName << '.' <<
- modeElement.fieldName() << " to be an Object, not " <<
- typeName(modeElement.type()));
+ const int minCount = constraintElement.numberInt();
+ if (minCount <= 0) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Value of " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName << '.'
+ << modeElement.fieldName() << '.'
+ << constraintElement.fieldName()
+ << " must be positive, but found " << minCount);
}
- ReplicaSetTagPattern pattern = _tagConfig.makePattern();
- for (BSONObj::iterator constraintIter(modeElement.Obj()); constraintIter.more();) {
- const BSONElement constraintElement = constraintIter.next();
- if (!constraintElement.isNumber()) {
- return Status(ErrorCodes::TypeMismatch, str::stream() << "Expected " <<
- kSettingsFieldName << '.' << kGetLastErrorModesFieldName << '.' <<
- modeElement.fieldName() << '.' << constraintElement.fieldName() <<
- " to be a number, not " << typeName(constraintElement.type()));
- }
- const int minCount = constraintElement.numberInt();
- if (minCount <= 0) {
- return Status(ErrorCodes::BadValue, str::stream() << "Value of " <<
- kSettingsFieldName << '.' << kGetLastErrorModesFieldName << '.' <<
- modeElement.fieldName() << '.' << constraintElement.fieldName() <<
- " must be positive, but found " << minCount);
- }
- status = _tagConfig.addTagCountConstraintToPattern(
- &pattern,
- constraintElement.fieldNameStringData(),
- minCount);
- if (!status.isOK()) {
- return status;
- }
+ status = _tagConfig.addTagCountConstraintToPattern(
+ &pattern, constraintElement.fieldNameStringData(), minCount);
+ if (!status.isOK()) {
+ return status;
}
- _customWriteConcernModes[modeElement.fieldNameStringData()] = pattern;
}
- return Status::OK();
+ _customWriteConcernModes[modeElement.fieldNameStringData()] = pattern;
+ }
+ return Status::OK();
+}
+
+Status ReplicaSetConfig::validate() const {
+ if (_version <= 0 || _version > std::numeric_limits<int>::max()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << kVersionFieldName << " field value of " << _version
+ << " is out of range");
+ }
+ if (_replSetName.empty()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Replica set configuration must have non-empty "
+ << kIdFieldName << " field");
+ }
+ if (_heartbeatTimeoutPeriod < Seconds(0)) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << kSettingsFieldName << '.' << kHeartbeatTimeoutFieldName
+ << " field value must be non-negative, "
+ "but found " << _heartbeatTimeoutPeriod.count());
+ }
+ if (_members.size() > kMaxMembers || _members.empty()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Replica set configuration contains " << _members.size()
+ << " members, but must have at least 1 and no more than "
+ << kMaxMembers);
}
- Status ReplicaSetConfig::validate() const {
- if (_version <= 0 || _version > std::numeric_limits<int>::max()) {
- return Status(ErrorCodes::BadValue, str::stream() << kVersionFieldName <<
- " field value of " << _version << " is out of range");
- }
- if (_replSetName.empty()) {
- return Status(ErrorCodes::BadValue, str::stream() <<
- "Replica set configuration must have non-empty " << kIdFieldName <<
- " field");
+ size_t localhostCount = 0;
+ size_t voterCount = 0;
+ size_t arbiterCount = 0;
+ size_t electableCount = 0;
+ for (size_t i = 0; i < _members.size(); ++i) {
+ const MemberConfig& memberI = _members[i];
+ Status status = memberI.validate();
+ if (!status.isOK())
+ return status;
+ if (memberI.getHostAndPort().isLocalHost()) {
+ ++localhostCount;
}
- if (_heartbeatTimeoutPeriod < Seconds(0)) {
- return Status(ErrorCodes::BadValue, str::stream() << kSettingsFieldName << '.' <<
- kHeartbeatTimeoutFieldName << " field value must be non-negative, "
- "but found " << _heartbeatTimeoutPeriod.count());
+ if (memberI.isVoter()) {
+ ++voterCount;
}
- if (_members.size() > kMaxMembers || _members.empty()) {
- return Status(ErrorCodes::BadValue, str::stream() <<
- "Replica set configuration contains " << _members.size() <<
- " members, but must have at least 1 and no more than " << kMaxMembers);
+ // Nodes may be arbiters or electable, or neither, but never both.
+ if (memberI.isArbiter()) {
+ ++arbiterCount;
+ } else if (memberI.getPriority() > 0) {
+ ++electableCount;
}
-
- size_t localhostCount = 0;
- size_t voterCount = 0;
- size_t arbiterCount = 0;
- size_t electableCount = 0;
- for (size_t i = 0; i < _members.size(); ++i) {
- const MemberConfig& memberI = _members[i];
- Status status = memberI.validate();
- if (!status.isOK())
- return status;
- if (memberI.getHostAndPort().isLocalHost()) {
- ++localhostCount;
- }
- if (memberI.isVoter()) {
- ++voterCount;
- }
- // Nodes may be arbiters or electable, or neither, but never both.
- if (memberI.isArbiter()) {
- ++arbiterCount;
- }
- else if (memberI.getPriority() > 0) {
- ++electableCount;
+ for (size_t j = 0; j < _members.size(); ++j) {
+ if (i == j)
+ continue;
+ const MemberConfig& memberJ = _members[j];
+ if (memberI.getId() == memberJ.getId()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream()
+ << "Found two member configurations with same "
+ << MemberConfig::kIdFieldName << " field, " << kMembersFieldName
+ << "." << i << "." << MemberConfig::kIdFieldName
+ << " == " << kMembersFieldName << "." << j << "."
+ << MemberConfig::kIdFieldName << " == " << memberI.getId());
}
- for (size_t j = 0; j < _members.size(); ++j) {
- if (i == j)
- continue;
- const MemberConfig& memberJ = _members[j];
- if (memberI.getId() == memberJ.getId()) {
- return Status(
- ErrorCodes::BadValue, str::stream() <<
- "Found two member configurations with same " <<
- MemberConfig::kIdFieldName << " field, " <<
- kMembersFieldName << "." << i << "." << MemberConfig::kIdFieldName <<
- " == " <<
- kMembersFieldName << "." << j << "." << MemberConfig::kIdFieldName <<
- " == " << memberI.getId());
- }
- if (memberI.getHostAndPort() == memberJ.getHostAndPort()) {
- return Status(
- ErrorCodes::BadValue, str::stream() <<
- "Found two member configurations with same " <<
- MemberConfig::kHostFieldName << " field, " <<
- kMembersFieldName << "." << i << "." << MemberConfig::kHostFieldName <<
- " == " <<
- kMembersFieldName << "." << j << "." << MemberConfig::kHostFieldName <<
- " == " << memberI.getHostAndPort().toString());
- }
+ if (memberI.getHostAndPort() == memberJ.getHostAndPort()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Found two member configurations with same "
+ << MemberConfig::kHostFieldName << " field, "
+ << kMembersFieldName << "." << i << "."
+ << MemberConfig::kHostFieldName
+ << " == " << kMembersFieldName << "." << j << "."
+ << MemberConfig::kHostFieldName
+ << " == " << memberI.getHostAndPort().toString());
}
}
+ }
- if (localhostCount != 0 && localhostCount != _members.size()) {
- return Status(ErrorCodes::BadValue, str::stream() <<
- "Either all host names in a replica set configuration must be localhost "
- "references, or none must be; found " << localhostCount << " out of " <<
- _members.size());
- }
+ if (localhostCount != 0 && localhostCount != _members.size()) {
+ return Status(
+ ErrorCodes::BadValue,
+ str::stream()
+ << "Either all host names in a replica set configuration must be localhost "
+ "references, or none must be; found " << localhostCount << " out of "
+ << _members.size());
+ }
- if (voterCount > kMaxVotingMembers || voterCount == 0) {
- return Status(ErrorCodes::BadValue, str::stream() <<
- "Replica set configuration contains " << voterCount <<
- " voting members, but must be at least 1 and no more than " <<
- kMaxVotingMembers);
- }
+ if (voterCount > kMaxVotingMembers || voterCount == 0) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Replica set configuration contains " << voterCount
+ << " voting members, but must be at least 1 and no more than "
+ << kMaxVotingMembers);
+ }
- if (electableCount == 0) {
- return Status(ErrorCodes::BadValue, "Replica set configuration must contain at least "
- "one non-arbiter member with priority > 0");
- }
+ if (electableCount == 0) {
+ return Status(ErrorCodes::BadValue,
+ "Replica set configuration must contain at least "
+ "one non-arbiter member with priority > 0");
+ }
- // TODO(schwerin): Validate satisfiability of write modes? Omitting for backwards
- // compatibility.
- if (_defaultWriteConcern.wMode.empty()) {
- if (_defaultWriteConcern.wNumNodes == 0) {
- return Status(ErrorCodes::BadValue,
- "Default write concern mode must wait for at least 1 member");
- }
+ // TODO(schwerin): Validate satisfiability of write modes? Omitting for backwards
+ // compatibility.
+ if (_defaultWriteConcern.wMode.empty()) {
+ if (_defaultWriteConcern.wNumNodes == 0) {
+ return Status(ErrorCodes::BadValue,
+ "Default write concern mode must wait for at least 1 member");
}
- else {
- if (WriteConcernOptions::kMajority != _defaultWriteConcern.wMode &&
- !findCustomWriteMode(_defaultWriteConcern.wMode).isOK()) {
- return Status(ErrorCodes::BadValue, str::stream() <<
- "Default write concern requires undefined write mode " <<
- _defaultWriteConcern.wMode);
- }
- }
-
- if (_protocolVersion < 0 || _protocolVersion > std::numeric_limits<int>::max()) {
- return Status(ErrorCodes::BadValue, str::stream() << kProtocolVersionFieldName <<
- " field value of " << _protocolVersion << " is out of range");
+ } else {
+ if (WriteConcernOptions::kMajority != _defaultWriteConcern.wMode &&
+ !findCustomWriteMode(_defaultWriteConcern.wMode).isOK()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Default write concern requires undefined write mode "
+ << _defaultWriteConcern.wMode);
}
-
- return Status::OK();
}
- Status ReplicaSetConfig::checkIfWriteConcernCanBeSatisfied(
- const WriteConcernOptions& writeConcern) const {
- if (!writeConcern.wMode.empty() && writeConcern.wMode != WriteConcernOptions::kMajority) {
- StatusWith<ReplicaSetTagPattern> tagPatternStatus =
- findCustomWriteMode(writeConcern.wMode);
- if (!tagPatternStatus.isOK()) {
- return tagPatternStatus.getStatus();
- }
+ if (_protocolVersion < 0 || _protocolVersion > std::numeric_limits<int>::max()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << kProtocolVersionFieldName << " field value of "
+ << _protocolVersion << " is out of range");
+ }
- ReplicaSetTagMatch matcher(tagPatternStatus.getValue());
- for (size_t j = 0; j < _members.size(); ++j) {
- const MemberConfig& memberConfig = _members[j];
- for (MemberConfig::TagIterator it = memberConfig.tagsBegin();
- it != memberConfig.tagsEnd(); ++it) {
- if (matcher.update(*it)) {
- return Status::OK();
- }
+ return Status::OK();
+}
+
+Status ReplicaSetConfig::checkIfWriteConcernCanBeSatisfied(
+ const WriteConcernOptions& writeConcern) const {
+ if (!writeConcern.wMode.empty() && writeConcern.wMode != WriteConcernOptions::kMajority) {
+ StatusWith<ReplicaSetTagPattern> tagPatternStatus = findCustomWriteMode(writeConcern.wMode);
+ if (!tagPatternStatus.isOK()) {
+ return tagPatternStatus.getStatus();
+ }
+
+ ReplicaSetTagMatch matcher(tagPatternStatus.getValue());
+ for (size_t j = 0; j < _members.size(); ++j) {
+ const MemberConfig& memberConfig = _members[j];
+ for (MemberConfig::TagIterator it = memberConfig.tagsBegin();
+ it != memberConfig.tagsEnd();
+ ++it) {
+ if (matcher.update(*it)) {
+ return Status::OK();
}
}
- // Even if all the nodes in the set had a given write it still would not satisfy this
- // write concern mode.
- return Status(ErrorCodes::CannotSatisfyWriteConcern,
- str::stream() << "Not enough nodes match write concern mode \""
- << writeConcern.wMode << "\"");
}
- else {
- int nodesRemaining = writeConcern.wNumNodes;
- for (size_t j = 0; j < _members.size(); ++j) {
- if (!_members[j].isArbiter()) { // Only count data-bearing nodes
- --nodesRemaining;
- if (nodesRemaining <= 0) {
- return Status::OK();
- }
+ // Even if all the nodes in the set had a given write it still would not satisfy this
+ // write concern mode.
+ return Status(ErrorCodes::CannotSatisfyWriteConcern,
+ str::stream() << "Not enough nodes match write concern mode \""
+ << writeConcern.wMode << "\"");
+ } else {
+ int nodesRemaining = writeConcern.wNumNodes;
+ for (size_t j = 0; j < _members.size(); ++j) {
+ if (!_members[j].isArbiter()) { // Only count data-bearing nodes
+ --nodesRemaining;
+ if (nodesRemaining <= 0) {
+ return Status::OK();
}
}
- return Status(ErrorCodes::CannotSatisfyWriteConcern, "Not enough data-bearing nodes");
}
+ return Status(ErrorCodes::CannotSatisfyWriteConcern, "Not enough data-bearing nodes");
}
+}
- const MemberConfig& ReplicaSetConfig::getMemberAt(size_t i) const {
- invariant(i < _members.size());
- return _members[i];
- }
+const MemberConfig& ReplicaSetConfig::getMemberAt(size_t i) const {
+ invariant(i < _members.size());
+ return _members[i];
+}
- const MemberConfig* ReplicaSetConfig::findMemberByID(int id) const {
- for (std::vector<MemberConfig>::const_iterator it = _members.begin();
- it != _members.end(); ++it) {
- if (it->getId() == id) {
- return &(*it);
- }
+const MemberConfig* ReplicaSetConfig::findMemberByID(int id) const {
+ for (std::vector<MemberConfig>::const_iterator it = _members.begin(); it != _members.end();
+ ++it) {
+ if (it->getId() == id) {
+ return &(*it);
}
- return NULL;
}
+ return NULL;
+}
- const int ReplicaSetConfig::findMemberIndexByHostAndPort(const HostAndPort& hap) const {
- int x = 0;
- for (std::vector<MemberConfig>::const_iterator it = _members.begin();
- it != _members.end(); ++it) {
-
- if (it->getHostAndPort() == hap) {
- return x;
- }
- ++x;
+const int ReplicaSetConfig::findMemberIndexByHostAndPort(const HostAndPort& hap) const {
+ int x = 0;
+ for (std::vector<MemberConfig>::const_iterator it = _members.begin(); it != _members.end();
+ ++it) {
+ if (it->getHostAndPort() == hap) {
+ return x;
}
- return -1;
+ ++x;
}
+ return -1;
+}
- const int ReplicaSetConfig::findMemberIndexByConfigId(long long configId) const {
- int x = 0;
- for (const auto& member : _members) {
-
- if (member.getId() == configId) {
- return x;
- }
- ++x;
+const int ReplicaSetConfig::findMemberIndexByConfigId(long long configId) const {
+ int x = 0;
+ for (const auto& member : _members) {
+ if (member.getId() == configId) {
+ return x;
}
- return -1;
+ ++x;
}
-
- const MemberConfig* ReplicaSetConfig::findMemberByHostAndPort(const HostAndPort& hap) const {
- int idx = findMemberIndexByHostAndPort(hap);
- return idx != -1 ? &getMemberAt(idx) : NULL;
+ return -1;
+}
+
+const MemberConfig* ReplicaSetConfig::findMemberByHostAndPort(const HostAndPort& hap) const {
+ int idx = findMemberIndexByHostAndPort(hap);
+ return idx != -1 ? &getMemberAt(idx) : NULL;
+}
+
+ReplicaSetTag ReplicaSetConfig::findTag(StringData key, StringData value) const {
+ return _tagConfig.findTag(key, value);
+}
+
+StatusWith<ReplicaSetTagPattern> ReplicaSetConfig::findCustomWriteMode(
+ StringData patternName) const {
+ const StringMap<ReplicaSetTagPattern>::const_iterator iter =
+ _customWriteConcernModes.find(patternName);
+ if (iter == _customWriteConcernModes.end()) {
+ return StatusWith<ReplicaSetTagPattern>(
+ ErrorCodes::UnknownReplWriteConcern,
+ str::stream() << "No write concern mode named '" << escape(patternName.toString())
+ << "' found in replica set configuration");
}
-
- ReplicaSetTag ReplicaSetConfig::findTag(StringData key, StringData value) const {
- return _tagConfig.findTag(key, value);
+ return StatusWith<ReplicaSetTagPattern>(iter->second);
+}
+
+void ReplicaSetConfig::_calculateMajorities() {
+ const int voters = std::count_if(_members.begin(),
+ _members.end(),
+ stdx::bind(&MemberConfig::isVoter, stdx::placeholders::_1));
+ const int arbiters =
+ std::count_if(_members.begin(),
+ _members.end(),
+ stdx::bind(&MemberConfig::isArbiter, stdx::placeholders::_1));
+ _totalVotingMembers = voters;
+ _majorityVoteCount = voters / 2 + 1;
+ _writeMajority = std::min(_majorityVoteCount, voters - arbiters);
+}
+
+void ReplicaSetConfig::_addInternalWriteConcernModes() {
+ // $majority: the majority of voting nodes or all non-arbiter voting nodes if
+ // the majority of voting nodes are arbiters.
+ ReplicaSetTagPattern pattern = _tagConfig.makePattern();
+
+ Status status = _tagConfig.addTagCountConstraintToPattern(
+ &pattern, MemberConfig::kInternalVoterTagName, _writeMajority);
+
+ if (status.isOK()) {
+ _customWriteConcernModes[kMajorityWriteConcernModeName] = pattern;
+ } else if (status != ErrorCodes::NoSuchKey) {
+ // NoSuchKey means we have no $voter-tagged nodes in this config;
+ // other errors are unexpected.
+ fassert(28693, status);
}
- StatusWith<ReplicaSetTagPattern> ReplicaSetConfig::findCustomWriteMode(
- StringData patternName) const {
-
- const StringMap<ReplicaSetTagPattern>::const_iterator iter = _customWriteConcernModes.find(
- patternName);
- if (iter == _customWriteConcernModes.end()) {
- return StatusWith<ReplicaSetTagPattern>(
- ErrorCodes::UnknownReplWriteConcern,
- str::stream() <<
- "No write concern mode named '" << escape(patternName.toString()) <<
- "' found in replica set configuration");
- }
- return StatusWith<ReplicaSetTagPattern>(iter->second);
+ // $stepDownCheck: one electable node plus ourselves
+ pattern = _tagConfig.makePattern();
+ status = _tagConfig.addTagCountConstraintToPattern(
+ &pattern, MemberConfig::kInternalElectableTagName, 2);
+ if (status.isOK()) {
+ _customWriteConcernModes[kStepDownCheckWriteConcernModeName] = pattern;
+ } else if (status != ErrorCodes::NoSuchKey) {
+ // NoSuchKey means we have no $electable-tagged nodes in this config;
+ // other errors are unexpected
+ fassert(28694, status);
}
+}
- void ReplicaSetConfig::_calculateMajorities() {
- const int voters = std::count_if(
- _members.begin(),
- _members.end(),
- stdx::bind(&MemberConfig::isVoter, stdx::placeholders::_1));
- const int arbiters = std::count_if(
- _members.begin(),
- _members.end(),
- stdx::bind(&MemberConfig::isArbiter, stdx::placeholders::_1));
- _totalVotingMembers = voters;
- _majorityVoteCount = voters / 2 + 1;
- _writeMajority = std::min(_majorityVoteCount, voters - arbiters);
- }
-
- void ReplicaSetConfig::_addInternalWriteConcernModes() {
- // $majority: the majority of voting nodes or all non-arbiter voting nodes if
- // the majority of voting nodes are arbiters.
- ReplicaSetTagPattern pattern = _tagConfig.makePattern();
-
- Status status = _tagConfig.addTagCountConstraintToPattern(
- &pattern,
- MemberConfig::kInternalVoterTagName,
- _writeMajority);
-
- if (status.isOK()) {
- _customWriteConcernModes[kMajorityWriteConcernModeName] = pattern;
- }
- else if (status != ErrorCodes::NoSuchKey) {
- // NoSuchKey means we have no $voter-tagged nodes in this config;
- // other errors are unexpected.
- fassert(28693, status);
- }
+BSONObj ReplicaSetConfig::toBSON() const {
+ BSONObjBuilder configBuilder;
+ configBuilder.append(kIdFieldName, _replSetName);
+ configBuilder.appendIntOrLL(kVersionFieldName, _version);
- // $stepDownCheck: one electable node plus ourselves
- pattern = _tagConfig.makePattern();
- status = _tagConfig.addTagCountConstraintToPattern(&pattern,
- MemberConfig::kInternalElectableTagName,
- 2);
- if (status.isOK()) {
- _customWriteConcernModes[kStepDownCheckWriteConcernModeName] = pattern;
- }
- else if (status != ErrorCodes::NoSuchKey) {
- // NoSuchKey means we have no $electable-tagged nodes in this config;
- // other errors are unexpected
- fassert(28694, status);
- }
+ BSONArrayBuilder members(configBuilder.subarrayStart(kMembersFieldName));
+ for (MemberIterator mem = membersBegin(); mem != membersEnd(); mem++) {
+ members.append(mem->toBSON(getTagConfig()));
}
-
- BSONObj ReplicaSetConfig::toBSON() const {
- BSONObjBuilder configBuilder;
- configBuilder.append(kIdFieldName, _replSetName);
- configBuilder.appendIntOrLL(kVersionFieldName, _version);
-
- BSONArrayBuilder members(configBuilder.subarrayStart(kMembersFieldName));
- for (MemberIterator mem = membersBegin(); mem != membersEnd(); mem++) {
- members.append(mem->toBSON(getTagConfig()));
- }
- members.done();
-
- BSONObjBuilder settingsBuilder(configBuilder.subobjStart(kSettingsFieldName));
- settingsBuilder.append(kChainingAllowedFieldName, _chainingAllowed);
- settingsBuilder.appendIntOrLL(kHeartbeatTimeoutFieldName, _heartbeatTimeoutPeriod.count());
-
- BSONObjBuilder gleModes(settingsBuilder.subobjStart(kGetLastErrorModesFieldName));
- for (StringMap<ReplicaSetTagPattern>::const_iterator mode =
- _customWriteConcernModes.begin();
- mode != _customWriteConcernModes.end();
- ++mode) {
- if (mode->first[0] == '$') {
- // Filter out internal modes
- continue;
- }
- BSONObjBuilder modeBuilder(gleModes.subobjStart(mode->first));
- for (ReplicaSetTagPattern::ConstraintIterator itr = mode->second.constraintsBegin();
- itr != mode->second.constraintsEnd();
- itr++) {
- modeBuilder.append(_tagConfig.getTagKey(ReplicaSetTag(itr->getKeyIndex(), 0)),
- itr->getMinCount());
- }
- modeBuilder.done();
- }
- gleModes.done();
-
- settingsBuilder.append(kGetLastErrorDefaultsFieldName, _defaultWriteConcern.toBSON());
- settingsBuilder.append(kProtocolVersionFieldName, _protocolVersion);
- settingsBuilder.done();
- return configBuilder.obj();
+ members.done();
+
+ BSONObjBuilder settingsBuilder(configBuilder.subobjStart(kSettingsFieldName));
+ settingsBuilder.append(kChainingAllowedFieldName, _chainingAllowed);
+ settingsBuilder.appendIntOrLL(kHeartbeatTimeoutFieldName, _heartbeatTimeoutPeriod.count());
+
+ BSONObjBuilder gleModes(settingsBuilder.subobjStart(kGetLastErrorModesFieldName));
+ for (StringMap<ReplicaSetTagPattern>::const_iterator mode = _customWriteConcernModes.begin();
+ mode != _customWriteConcernModes.end();
+ ++mode) {
+ if (mode->first[0] == '$') {
+ // Filter out internal modes
+ continue;
+ }
+ BSONObjBuilder modeBuilder(gleModes.subobjStart(mode->first));
+ for (ReplicaSetTagPattern::ConstraintIterator itr = mode->second.constraintsBegin();
+ itr != mode->second.constraintsEnd();
+ itr++) {
+ modeBuilder.append(_tagConfig.getTagKey(ReplicaSetTag(itr->getKeyIndex(), 0)),
+ itr->getMinCount());
+ }
+ modeBuilder.done();
}
-
- std::vector<std::string> ReplicaSetConfig::getWriteConcernNames() const {
- std::vector<std::string> names;
- for (StringMap<ReplicaSetTagPattern>::const_iterator mode =
- _customWriteConcernModes.begin();
- mode != _customWriteConcernModes.end();
- ++mode) {
- names.push_back(mode->first);
- }
- return names;
- }
+ gleModes.done();
+
+ settingsBuilder.append(kGetLastErrorDefaultsFieldName, _defaultWriteConcern.toBSON());
+ settingsBuilder.append(kProtocolVersionFieldName, _protocolVersion);
+ settingsBuilder.done();
+ return configBuilder.obj();
+}
+
+std::vector<std::string> ReplicaSetConfig::getWriteConcernNames() const {
+ std::vector<std::string> names;
+ for (StringMap<ReplicaSetTagPattern>::const_iterator mode = _customWriteConcernModes.begin();
+ mode != _customWriteConcernModes.end();
+ ++mode) {
+ names.push_back(mode->first);
+ }
+ return names;
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replica_set_config.h b/src/mongo/db/repl/replica_set_config.h
index fcd880705ea..2db58856234 100644
--- a/src/mongo/db/repl/replica_set_config.h
+++ b/src/mongo/db/repl/replica_set_config.h
@@ -41,219 +41,249 @@
namespace mongo {
- class BSONObj;
+class BSONObj;
namespace repl {
+/**
+ * Representation of the configuration information about a particular replica set.
+ */
+class ReplicaSetConfig {
+public:
+ typedef std::vector<MemberConfig>::const_iterator MemberIterator;
+
+ static const std::string kVersionFieldName;
+ static const std::string kMajorityWriteConcernModeName;
+
+ static const size_t kMaxMembers = 50;
+ static const size_t kMaxVotingMembers = 7;
+ static const Seconds kDefaultHeartbeatTimeoutPeriod;
+
+ ReplicaSetConfig();
+ std::string asBson() {
+ return "";
+ }
+ /**
+ * Initializes this ReplicaSetConfig from the contents of "cfg".
+ */
+ Status initialize(const BSONObj& cfg);
+
+ /**
+ * Returns true if this object has been successfully initialized or copied from
+ * an initialized object.
+ */
+ bool isInitialized() const {
+ return _isInitialized;
+ }
+
+ /**
+ * Performs basic consistency checks on the replica set configuration.
+ */
+ Status validate() const;
+
+ /**
+ * Checks if this configuration can satisfy the given write concern.
+ *
+ * Things that are taken into consideration include:
+ * 1. If the set has enough data-bearing members.
+ * 2. If the write concern mode exists.
+ * 3. If there are enough members for the write concern mode specified.
+ */
+ Status checkIfWriteConcernCanBeSatisfied(const WriteConcernOptions& writeConcern) const;
+
+ /**
+ * Gets the version of this configuration.
+ *
+ * The version number sequences configurations of the replica set, so that
+ * nodes may distinguish between "older" and "newer" configurations.
+ */
+ long long getConfigVersion() const {
+ return _version;
+ }
+
+ /**
+ * Gets the name (_id field value) of the replica set described by this configuration.
+ */
+ const std::string& getReplSetName() const {
+ return _replSetName;
+ }
+
+ /**
+ * Gets the number of members in this configuration.
+ */
+ int getNumMembers() const {
+ return _members.size();
+ }
+
+ /**
+ * Gets a begin iterator over the MemberConfigs stored in this ReplicaSetConfig.
+ */
+ MemberIterator membersBegin() const {
+ return _members.begin();
+ }
+
+ /**
+ * Gets an end iterator over the MemberConfigs stored in this ReplicaSetConfig.
+ */
+ MemberIterator membersEnd() const {
+ return _members.end();
+ }
+
+ /**
+ * Access a MemberConfig element by index.
+ */
+ const MemberConfig& getMemberAt(size_t i) const;
+
+ /**
+ * Returns a pointer to the MemberConfig corresponding to the member with the given _id in
+ * the config, or NULL if there is no member with that ID.
+ */
+ const MemberConfig* findMemberByID(int id) const;
+
+ /**
+ * Returns a pointer to the MemberConfig corresponding to the member with the given
+ * HostAndPort in the config, or NULL if there is no member with that address.
+ */
+ const MemberConfig* findMemberByHostAndPort(const HostAndPort& hap) const;
+
+ /**
+ * Returns a MemberConfig index position corresponding to the member with the given
+ * HostAndPort in the config, or -1 if there is no member with that address.
+ */
+ const int findMemberIndexByHostAndPort(const HostAndPort& hap) const;
+
+ /**
+ * Returns a MemberConfig index position corresponding to the member with the given
+ * _id in the config, or -1 if there is no member with that address.
+ */
+ const int findMemberIndexByConfigId(long long configId) const;
+
+ /**
+ * Gets the default write concern for the replica set described by this configuration.
+ */
+ const WriteConcernOptions& getDefaultWriteConcern() const {
+ return _defaultWriteConcern;
+ }
+
+ /**
+ * Gets the amount of time to wait for a response to hearbeats sent to other
+ * nodes in the replica set.
+ */
+ Seconds getHeartbeatTimeoutPeriod() const {
+ return _heartbeatTimeoutPeriod;
+ }
+
+ /**
+ * Gets the amount of time to wait for a response to hearbeats sent to other
+ * nodes in the replica set, as above, but returns a Milliseconds instead of
+ * Seconds object.
+ */
+ Milliseconds getHeartbeatTimeoutPeriodMillis() const {
+ return _heartbeatTimeoutPeriod;
+ }
+
+ /**
+ * Gets the number of votes required to win an election.
+ */
+ int getMajorityVoteCount() const {
+ return _majorityVoteCount;
+ }
+
+ /**
+ * Gets the number of voters.
+ */
+ int getTotalVotingMembers() const {
+ return _totalVotingMembers;
+ }
+
+ /**
+ * Returns true if automatic (not explicitly set) chaining is allowed.
+ */
+ bool isChainingAllowed() const {
+ return _chainingAllowed;
+ }
+
+ /**
+ * Returns a ReplicaSetTag with the given "key" and "value", or an invalid
+ * tag if the configuration describes no such tag.
+ */
+ ReplicaSetTag findTag(StringData key, StringData value) const;
+
+ /**
+ * Returns the pattern corresponding to "patternName" in this configuration.
+ * If "patternName" is not a valid pattern in this configuration, returns
+ * ErrorCodes::NoSuchKey.
+ */
+ StatusWith<ReplicaSetTagPattern> findCustomWriteMode(StringData patternName) const;
+
+ /**
+ * Returns the "tags configuration" for this replicaset.
+ *
+ * NOTE(schwerin): Not clear if this should be used other than for reporting/debugging.
+ */
+ const ReplicaSetTagConfig& getTagConfig() const {
+ return _tagConfig;
+ }
+
+ /**
+ * Returns the config as a BSONObj.
+ */
+ BSONObj toBSON() const;
+
+ /**
+ * Returns a vector of strings which are the names of the WriteConcernModes.
+ * Currently used in unit tests to compare two configs.
+ */
+ std::vector<std::string> getWriteConcernNames() const;
+
+ /**
+ * Returns the number of voting data-bearing members that must acknowledge a write
+ * in order to satisfy a write concern of {w: "majority"}.
+ */
+ int getWriteMajority() const {
+ return _writeMajority;
+ }
+
+ /**
+ * Gets the protocol version for this configuration.
+ *
+ * The protocol version number currently determines what election protocol is used by the
+ * cluster; 0 is the default and indicates the old 3.0 election protocol.
+ */
+ long long getProtocolVersion() const {
+ return _protocolVersion;
+ }
+
+private:
+ /**
+ * Parses the "settings" subdocument of a replica set configuration.
+ */
+ Status _parseSettingsSubdocument(const BSONObj& settings);
+
+ /**
+ * Calculates and stores the majority for electing a primary (_majorityVoteCount).
+ */
+ void _calculateMajorities();
+
/**
- * Representation of the configuration information about a particular replica set.
+ * Adds internal write concern modes to the getLastErrorModes list.
*/
- class ReplicaSetConfig {
- public:
- typedef std::vector<MemberConfig>::const_iterator MemberIterator;
-
- static const std::string kVersionFieldName;
- static const std::string kMajorityWriteConcernModeName;
-
- static const size_t kMaxMembers = 50;
- static const size_t kMaxVotingMembers = 7;
- static const Seconds kDefaultHeartbeatTimeoutPeriod;
-
- ReplicaSetConfig();
- std::string asBson() { return ""; }
- /**
- * Initializes this ReplicaSetConfig from the contents of "cfg".
- */
- Status initialize(const BSONObj& cfg);
-
- /**
- * Returns true if this object has been successfully initialized or copied from
- * an initialized object.
- */
- bool isInitialized() const { return _isInitialized; }
-
- /**
- * Performs basic consistency checks on the replica set configuration.
- */
- Status validate() const;
-
- /**
- * Checks if this configuration can satisfy the given write concern.
- *
- * Things that are taken into consideration include:
- * 1. If the set has enough data-bearing members.
- * 2. If the write concern mode exists.
- * 3. If there are enough members for the write concern mode specified.
- */
- Status checkIfWriteConcernCanBeSatisfied(const WriteConcernOptions& writeConcern) const;
-
- /**
- * Gets the version of this configuration.
- *
- * The version number sequences configurations of the replica set, so that
- * nodes may distinguish between "older" and "newer" configurations.
- */
- long long getConfigVersion() const { return _version; }
-
- /**
- * Gets the name (_id field value) of the replica set described by this configuration.
- */
- const std::string& getReplSetName() const { return _replSetName; }
-
- /**
- * Gets the number of members in this configuration.
- */
- int getNumMembers() const { return _members.size(); }
-
- /**
- * Gets a begin iterator over the MemberConfigs stored in this ReplicaSetConfig.
- */
- MemberIterator membersBegin() const { return _members.begin(); }
-
- /**
- * Gets an end iterator over the MemberConfigs stored in this ReplicaSetConfig.
- */
- MemberIterator membersEnd() const { return _members.end(); }
-
- /**
- * Access a MemberConfig element by index.
- */
- const MemberConfig& getMemberAt(size_t i) const;
-
- /**
- * Returns a pointer to the MemberConfig corresponding to the member with the given _id in
- * the config, or NULL if there is no member with that ID.
- */
- const MemberConfig* findMemberByID(int id) const;
-
- /**
- * Returns a pointer to the MemberConfig corresponding to the member with the given
- * HostAndPort in the config, or NULL if there is no member with that address.
- */
- const MemberConfig* findMemberByHostAndPort(const HostAndPort& hap) const;
-
- /**
- * Returns a MemberConfig index position corresponding to the member with the given
- * HostAndPort in the config, or -1 if there is no member with that address.
- */
- const int findMemberIndexByHostAndPort(const HostAndPort& hap) const;
-
- /**
- * Returns a MemberConfig index position corresponding to the member with the given
- * _id in the config, or -1 if there is no member with that address.
- */
- const int findMemberIndexByConfigId(long long configId) const;
-
- /**
- * Gets the default write concern for the replica set described by this configuration.
- */
- const WriteConcernOptions& getDefaultWriteConcern() const { return _defaultWriteConcern; }
-
- /**
- * Gets the amount of time to wait for a response to hearbeats sent to other
- * nodes in the replica set.
- */
- Seconds getHeartbeatTimeoutPeriod() const { return _heartbeatTimeoutPeriod; }
-
- /**
- * Gets the amount of time to wait for a response to hearbeats sent to other
- * nodes in the replica set, as above, but returns a Milliseconds instead of
- * Seconds object.
- */
- Milliseconds getHeartbeatTimeoutPeriodMillis() const {
- return _heartbeatTimeoutPeriod;
- }
-
- /**
- * Gets the number of votes required to win an election.
- */
- int getMajorityVoteCount() const { return _majorityVoteCount; }
-
- /**
- * Gets the number of voters.
- */
- int getTotalVotingMembers() const { return _totalVotingMembers; }
-
- /**
- * Returns true if automatic (not explicitly set) chaining is allowed.
- */
- bool isChainingAllowed() const { return _chainingAllowed; }
-
- /**
- * Returns a ReplicaSetTag with the given "key" and "value", or an invalid
- * tag if the configuration describes no such tag.
- */
- ReplicaSetTag findTag(StringData key, StringData value) const;
-
- /**
- * Returns the pattern corresponding to "patternName" in this configuration.
- * If "patternName" is not a valid pattern in this configuration, returns
- * ErrorCodes::NoSuchKey.
- */
- StatusWith<ReplicaSetTagPattern> findCustomWriteMode(StringData patternName) const;
-
- /**
- * Returns the "tags configuration" for this replicaset.
- *
- * NOTE(schwerin): Not clear if this should be used other than for reporting/debugging.
- */
- const ReplicaSetTagConfig& getTagConfig() const { return _tagConfig; }
-
- /**
- * Returns the config as a BSONObj.
- */
- BSONObj toBSON() const;
-
- /**
- * Returns a vector of strings which are the names of the WriteConcernModes.
- * Currently used in unit tests to compare two configs.
- */
- std::vector<std::string> getWriteConcernNames() const;
-
- /**
- * Returns the number of voting data-bearing members that must acknowledge a write
- * in order to satisfy a write concern of {w: "majority"}.
- */
- int getWriteMajority() const { return _writeMajority; }
-
- /**
- * Gets the protocol version for this configuration.
- *
- * The protocol version number currently determines what election protocol is used by the
- * cluster; 0 is the default and indicates the old 3.0 election protocol.
- */
- long long getProtocolVersion() const { return _protocolVersion; }
-
- private:
- /**
- * Parses the "settings" subdocument of a replica set configuration.
- */
- Status _parseSettingsSubdocument(const BSONObj& settings);
-
- /**
- * Calculates and stores the majority for electing a primary (_majorityVoteCount).
- */
- void _calculateMajorities();
-
- /**
- * Adds internal write concern modes to the getLastErrorModes list.
- */
- void _addInternalWriteConcernModes();
-
- bool _isInitialized;
- long long _version;
- std::string _replSetName;
- std::vector<MemberConfig> _members;
- WriteConcernOptions _defaultWriteConcern;
- Seconds _heartbeatTimeoutPeriod;
- bool _chainingAllowed;
- int _majorityVoteCount;
- int _writeMajority;
- int _totalVotingMembers;
- ReplicaSetTagConfig _tagConfig;
- StringMap<ReplicaSetTagPattern> _customWriteConcernModes;
- long long _protocolVersion;
- };
+ void _addInternalWriteConcernModes();
+
+ bool _isInitialized;
+ long long _version;
+ std::string _replSetName;
+ std::vector<MemberConfig> _members;
+ WriteConcernOptions _defaultWriteConcern;
+ Seconds _heartbeatTimeoutPeriod;
+ bool _chainingAllowed;
+ int _majorityVoteCount;
+ int _writeMajority;
+ int _totalVotingMembers;
+ ReplicaSetTagConfig _tagConfig;
+ StringMap<ReplicaSetTagPattern> _customWriteConcernModes;
+ long long _protocolVersion;
+};
} // namespace repl
diff --git a/src/mongo/db/repl/replica_set_config_checks.cpp b/src/mongo/db/repl/replica_set_config_checks.cpp
index 7b97d3679a3..6b972063c6a 100644
--- a/src/mongo/db/repl/replica_set_config_checks.cpp
+++ b/src/mongo/db/repl/replica_set_config_checks.cpp
@@ -40,247 +40,234 @@ namespace mongo {
namespace repl {
namespace {
- /**
- * Finds the index of the one member configuration in "newConfig" that corresponds
- * to the current node (as identified by "externalState").
- *
- * Returns an error if the current node does not appear or appears multiple times in
- * "newConfig".
- */
- StatusWith<int> findSelfInConfig(
- ReplicationCoordinatorExternalState* externalState,
- const ReplicaSetConfig& newConfig) {
-
- std::vector<ReplicaSetConfig::MemberIterator> meConfigs;
- for (ReplicaSetConfig::MemberIterator iter = newConfig.membersBegin();
- iter != newConfig.membersEnd();
- ++iter) {
- if (externalState->isSelf(iter->getHostAndPort())) {
- meConfigs.push_back(iter);
- }
- }
- if (meConfigs.empty()) {
- return StatusWith<int>(ErrorCodes::NodeNotFound, str::stream() <<
- "No host described in new configuration " <<
- newConfig.getConfigVersion() << " for replica set " <<
- newConfig.getReplSetName() << " maps to this node");
- }
- if (meConfigs.size() > 1) {
- str::stream message;
- message << "The hosts " << meConfigs.front()->getHostAndPort().toString();
- for (size_t i = 1; i < meConfigs.size() - 1; ++i) {
- message << ", " << meConfigs[i]->getHostAndPort().toString();
- }
- message << " and " << meConfigs.back()->getHostAndPort().toString() <<
- " all map to this node in new configuration version " <<
- newConfig.getConfigVersion() << " for replica set " << newConfig.getReplSetName();
- return StatusWith<int>(ErrorCodes::DuplicateKey, message);
+/**
+ * Finds the index of the one member configuration in "newConfig" that corresponds
+ * to the current node (as identified by "externalState").
+ *
+ * Returns an error if the current node does not appear or appears multiple times in
+ * "newConfig".
+ */
+StatusWith<int> findSelfInConfig(ReplicationCoordinatorExternalState* externalState,
+ const ReplicaSetConfig& newConfig) {
+ std::vector<ReplicaSetConfig::MemberIterator> meConfigs;
+ for (ReplicaSetConfig::MemberIterator iter = newConfig.membersBegin();
+ iter != newConfig.membersEnd();
+ ++iter) {
+ if (externalState->isSelf(iter->getHostAndPort())) {
+ meConfigs.push_back(iter);
}
-
- int myIndex = std::distance(newConfig.membersBegin(), meConfigs.front());
- invariant(myIndex >= 0 && myIndex < newConfig.getNumMembers());
- return StatusWith<int>(myIndex);
}
-
- /**
- * Checks if the node with the given config index is electable, returning a useful
- * status message if not.
- */
- Status checkElectable(const ReplicaSetConfig& newConfig, int configIndex) {
- const MemberConfig& myConfig = newConfig.getMemberAt(configIndex);
- if (!myConfig.isElectable()) {
- return Status(
- ErrorCodes::NodeNotElectable, str::stream() <<
- "This node, " << myConfig.getHostAndPort().toString() << ", with _id " <<
- myConfig.getId() << " is not electable under the new configuration version " <<
- newConfig.getConfigVersion() << " for replica set " <<
- newConfig.getReplSetName());
- }
- return Status::OK();
+ if (meConfigs.empty()) {
+ return StatusWith<int>(ErrorCodes::NodeNotFound,
+ str::stream() << "No host described in new configuration "
+ << newConfig.getConfigVersion() << " for replica set "
+ << newConfig.getReplSetName() << " maps to this node");
}
-
- /**
- * Like findSelfInConfig, above, but also returns an error if the member configuration
- * for this node is not electable, as this is a requirement for nodes accepting
- * reconfig or initiate commands.
- */
- StatusWith<int> findSelfInConfigIfElectable(
- ReplicationCoordinatorExternalState* externalState,
- const ReplicaSetConfig& newConfig) {
- StatusWith<int> result = findSelfInConfig(externalState, newConfig);
- if (result.isOK()) {
- Status status = checkElectable(newConfig, result.getValue());
- if (!status.isOK()) {
- return StatusWith<int>(status);
- }
+ if (meConfigs.size() > 1) {
+ str::stream message;
+ message << "The hosts " << meConfigs.front()->getHostAndPort().toString();
+ for (size_t i = 1; i < meConfigs.size() - 1; ++i) {
+ message << ", " << meConfigs[i]->getHostAndPort().toString();
}
- return result;
+ message << " and " << meConfigs.back()->getHostAndPort().toString()
+ << " all map to this node in new configuration version "
+ << newConfig.getConfigVersion() << " for replica set "
+ << newConfig.getReplSetName();
+ return StatusWith<int>(ErrorCodes::DuplicateKey, message);
}
- /**
- * Compares two initialized and validated replica set configurations, and checks to
- * see if "newConfig" is a legal successor configuration to "oldConfig".
- *
- * Returns Status::OK() if "newConfig" may replace "oldConfig", or an indicative error
- * otherwise.
- *
- * The checks performed by this test are necessary, but may not be sufficient for
- * ensuring that "newConfig" is a legal successor to "oldConfig". For example,
- * a legal reconfiguration must typically be executed on a node that is currently
- * primary under "oldConfig" and is electable under "newConfig". Such checks that
- * require knowledge of which node is executing the configuration are out of scope
- * for this function.
- */
- Status validateOldAndNewConfigsCompatible(
- const ReplicaSetConfig& oldConfig,
- const ReplicaSetConfig& newConfig) {
- invariant(newConfig.isInitialized());
- invariant(oldConfig.isInitialized());
-
- if (oldConfig.getConfigVersion() >= newConfig.getConfigVersion()) {
- return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- str::stream() <<
- "New replica set configuration version must be greater than old, but " <<
- newConfig.getConfigVersion() << " is not greater than " <<
- oldConfig.getConfigVersion() << " for replica set " <<
- newConfig.getReplSetName());
- }
+ int myIndex = std::distance(newConfig.membersBegin(), meConfigs.front());
+ invariant(myIndex >= 0 && myIndex < newConfig.getNumMembers());
+ return StatusWith<int>(myIndex);
+}
- if (oldConfig.getReplSetName() != newConfig.getReplSetName()) {
- return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- str::stream() <<
- "New and old configurations differ in replica set name; "
- "old was " << oldConfig.getReplSetName() << ", and new is " <<
- newConfig.getReplSetName());
- }
-
- //
- // For every member config mNew in newConfig, if there exists member config mOld
- // in oldConfig such that mNew.getHostAndPort() == mOld.getHostAndPort(), it is required
- // that mNew.getId() == mOld.getId().
- //
- // Also, one may not use reconfig to change the value of the buildIndexes or
- // arbiterOnly flags.
- //
- for (ReplicaSetConfig::MemberIterator mNew = newConfig.membersBegin();
- mNew != newConfig.membersEnd();
- ++mNew) {
- for (ReplicaSetConfig::MemberIterator mOld = oldConfig.membersBegin();
- mOld != oldConfig.membersEnd();
- ++mOld) {
-
- const bool idsEqual = mOld->getId() == mNew->getId();
- const bool hostsEqual = mOld->getHostAndPort() == mNew->getHostAndPort();
- if (!idsEqual && !hostsEqual) {
- continue;
- }
- if (hostsEqual && !idsEqual) {
- return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- str::stream() <<
- "New and old configurations both have members with " <<
- MemberConfig::kHostFieldName << " of " <<
- mOld->getHostAndPort().toString() <<
- " but in the new configuration the " <<
- MemberConfig::kIdFieldName << " field is " <<
- mNew->getId() << " and in the old configuration it is " <<
- mOld->getId() <<
- " for replica set " << newConfig.getReplSetName());
- }
- // At this point, the _id and host fields are equal, so we're looking at the old and
- // new configurations for the same member node.
- const bool buildIndexesFlagsEqual =
- mOld->shouldBuildIndexes() == mNew->shouldBuildIndexes();
- if (!buildIndexesFlagsEqual) {
- return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- str::stream() <<
- "New and old configurations differ in the setting of the "
- "buildIndexes field for member " <<
- mOld->getHostAndPort().toString() <<
- "; to make this change, remove then re-add the member");
- }
- const bool arbiterFlagsEqual = mOld->isArbiter() == mNew->isArbiter();
- if (!arbiterFlagsEqual) {
- return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- str::stream() <<
- "New and old configurations differ in the setting of the "
- "arbiterOnly field for member " <<
- mOld->getHostAndPort().toString() <<
- "; to make this change, remove then re-add the member");
-
- }
- }
- }
- return Status::OK();
+/**
+ * Checks if the node with the given config index is electable, returning a useful
+ * status message if not.
+ */
+Status checkElectable(const ReplicaSetConfig& newConfig, int configIndex) {
+ const MemberConfig& myConfig = newConfig.getMemberAt(configIndex);
+ if (!myConfig.isElectable()) {
+ return Status(ErrorCodes::NodeNotElectable,
+ str::stream() << "This node, " << myConfig.getHostAndPort().toString()
+ << ", with _id " << myConfig.getId()
+ << " is not electable under the new configuration version "
+ << newConfig.getConfigVersion() << " for replica set "
+ << newConfig.getReplSetName());
}
-} // namespace
+ return Status::OK();
+}
- StatusWith<int> validateConfigForStartUp(
- ReplicationCoordinatorExternalState* externalState,
- const ReplicaSetConfig& oldConfig,
- const ReplicaSetConfig& newConfig) {
- Status status = newConfig.validate();
+/**
+ * Like findSelfInConfig, above, but also returns an error if the member configuration
+ * for this node is not electable, as this is a requirement for nodes accepting
+ * reconfig or initiate commands.
+ */
+StatusWith<int> findSelfInConfigIfElectable(ReplicationCoordinatorExternalState* externalState,
+ const ReplicaSetConfig& newConfig) {
+ StatusWith<int> result = findSelfInConfig(externalState, newConfig);
+ if (result.isOK()) {
+ Status status = checkElectable(newConfig, result.getValue());
if (!status.isOK()) {
return StatusWith<int>(status);
}
- if (oldConfig.isInitialized()) {
- status = validateOldAndNewConfigsCompatible(oldConfig, newConfig);
- if (!status.isOK()) {
- return StatusWith<int>(status);
- }
- }
- return findSelfInConfig(externalState, newConfig);
}
+ return result;
+}
- StatusWith<int> validateConfigForInitiate(
- ReplicationCoordinatorExternalState* externalState,
- const ReplicaSetConfig& newConfig) {
- Status status = newConfig.validate();
- if (!status.isOK()) {
- return StatusWith<int>(status);
- }
- if (newConfig.getConfigVersion() != 1) {
- return StatusWith<int>(
- ErrorCodes::NewReplicaSetConfigurationIncompatible,
- str::stream() << "Configuration used to initiate a replica set must " <<
- " have version 1, but found " << newConfig.getConfigVersion());
- }
- return findSelfInConfigIfElectable(externalState, newConfig);
+/**
+ * Compares two initialized and validated replica set configurations, and checks to
+ * see if "newConfig" is a legal successor configuration to "oldConfig".
+ *
+ * Returns Status::OK() if "newConfig" may replace "oldConfig", or an indicative error
+ * otherwise.
+ *
+ * The checks performed by this test are necessary, but may not be sufficient for
+ * ensuring that "newConfig" is a legal successor to "oldConfig". For example,
+ * a legal reconfiguration must typically be executed on a node that is currently
+ * primary under "oldConfig" and is electable under "newConfig". Such checks that
+ * require knowledge of which node is executing the configuration are out of scope
+ * for this function.
+ */
+Status validateOldAndNewConfigsCompatible(const ReplicaSetConfig& oldConfig,
+ const ReplicaSetConfig& newConfig) {
+ invariant(newConfig.isInitialized());
+ invariant(oldConfig.isInitialized());
+
+ if (oldConfig.getConfigVersion() >= newConfig.getConfigVersion()) {
+ return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ str::stream()
+ << "New replica set configuration version must be greater than old, but "
+ << newConfig.getConfigVersion() << " is not greater than "
+ << oldConfig.getConfigVersion() << " for replica set "
+ << newConfig.getReplSetName());
}
- StatusWith<int> validateConfigForReconfig(
- ReplicationCoordinatorExternalState* externalState,
- const ReplicaSetConfig& oldConfig,
- const ReplicaSetConfig& newConfig,
- bool force) {
+ if (oldConfig.getReplSetName() != newConfig.getReplSetName()) {
+ return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ str::stream() << "New and old configurations differ in replica set name; "
+ "old was " << oldConfig.getReplSetName() << ", and new is "
+ << newConfig.getReplSetName());
+ }
- Status status = newConfig.validate();
- if (!status.isOK()) {
- return StatusWith<int>(status);
+ //
+ // For every member config mNew in newConfig, if there exists member config mOld
+ // in oldConfig such that mNew.getHostAndPort() == mOld.getHostAndPort(), it is required
+ // that mNew.getId() == mOld.getId().
+ //
+ // Also, one may not use reconfig to change the value of the buildIndexes or
+ // arbiterOnly flags.
+ //
+ for (ReplicaSetConfig::MemberIterator mNew = newConfig.membersBegin();
+ mNew != newConfig.membersEnd();
+ ++mNew) {
+ for (ReplicaSetConfig::MemberIterator mOld = oldConfig.membersBegin();
+ mOld != oldConfig.membersEnd();
+ ++mOld) {
+ const bool idsEqual = mOld->getId() == mNew->getId();
+ const bool hostsEqual = mOld->getHostAndPort() == mNew->getHostAndPort();
+ if (!idsEqual && !hostsEqual) {
+ continue;
+ }
+ if (hostsEqual && !idsEqual) {
+ return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ str::stream()
+ << "New and old configurations both have members with "
+ << MemberConfig::kHostFieldName << " of "
+ << mOld->getHostAndPort().toString()
+ << " but in the new configuration the "
+ << MemberConfig::kIdFieldName << " field is " << mNew->getId()
+ << " and in the old configuration it is " << mOld->getId()
+ << " for replica set " << newConfig.getReplSetName());
+ }
+ // At this point, the _id and host fields are equal, so we're looking at the old and
+ // new configurations for the same member node.
+ const bool buildIndexesFlagsEqual =
+ mOld->shouldBuildIndexes() == mNew->shouldBuildIndexes();
+ if (!buildIndexesFlagsEqual) {
+ return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ str::stream()
+ << "New and old configurations differ in the setting of the "
+ "buildIndexes field for member "
+ << mOld->getHostAndPort().toString()
+ << "; to make this change, remove then re-add the member");
+ }
+ const bool arbiterFlagsEqual = mOld->isArbiter() == mNew->isArbiter();
+ if (!arbiterFlagsEqual) {
+ return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ str::stream()
+ << "New and old configurations differ in the setting of the "
+ "arbiterOnly field for member "
+ << mOld->getHostAndPort().toString()
+ << "; to make this change, remove then re-add the member");
+ }
}
+ }
+ return Status::OK();
+}
+} // namespace
+StatusWith<int> validateConfigForStartUp(ReplicationCoordinatorExternalState* externalState,
+ const ReplicaSetConfig& oldConfig,
+ const ReplicaSetConfig& newConfig) {
+ Status status = newConfig.validate();
+ if (!status.isOK()) {
+ return StatusWith<int>(status);
+ }
+ if (oldConfig.isInitialized()) {
status = validateOldAndNewConfigsCompatible(oldConfig, newConfig);
if (!status.isOK()) {
return StatusWith<int>(status);
}
+ }
+ return findSelfInConfig(externalState, newConfig);
+}
- if (force) {
- return findSelfInConfig(externalState, newConfig);
- }
-
- return findSelfInConfigIfElectable(externalState, newConfig);
+StatusWith<int> validateConfigForInitiate(ReplicationCoordinatorExternalState* externalState,
+ const ReplicaSetConfig& newConfig) {
+ Status status = newConfig.validate();
+ if (!status.isOK()) {
+ return StatusWith<int>(status);
+ }
+ if (newConfig.getConfigVersion() != 1) {
+ return StatusWith<int>(ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ str::stream() << "Configuration used to initiate a replica set must "
+ << " have version 1, but found "
+ << newConfig.getConfigVersion());
}
+ return findSelfInConfigIfElectable(externalState, newConfig);
+}
- StatusWith<int> validateConfigForHeartbeatReconfig(
- ReplicationCoordinatorExternalState* externalState,
- const ReplicaSetConfig& newConfig) {
+StatusWith<int> validateConfigForReconfig(ReplicationCoordinatorExternalState* externalState,
+ const ReplicaSetConfig& oldConfig,
+ const ReplicaSetConfig& newConfig,
+ bool force) {
+ Status status = newConfig.validate();
+ if (!status.isOK()) {
+ return StatusWith<int>(status);
+ }
- Status status = newConfig.validate();
- if (!status.isOK()) {
- return StatusWith<int>(status);
- }
+ status = validateOldAndNewConfigsCompatible(oldConfig, newConfig);
+ if (!status.isOK()) {
+ return StatusWith<int>(status);
+ }
+ if (force) {
return findSelfInConfig(externalState, newConfig);
}
+ return findSelfInConfigIfElectable(externalState, newConfig);
+}
+
+StatusWith<int> validateConfigForHeartbeatReconfig(
+ ReplicationCoordinatorExternalState* externalState, const ReplicaSetConfig& newConfig) {
+ Status status = newConfig.validate();
+ if (!status.isOK()) {
+ return StatusWith<int>(status);
+ }
+
+ return findSelfInConfig(externalState, newConfig);
+}
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replica_set_config_checks.h b/src/mongo/db/repl/replica_set_config_checks.h
index ba7ad90f3fc..adeb4758093 100644
--- a/src/mongo/db/repl/replica_set_config_checks.h
+++ b/src/mongo/db/repl/replica_set_config_checks.h
@@ -33,61 +33,57 @@
namespace mongo {
namespace repl {
- class ReplicationCoordinatorExternalState;
- class ReplicaSetConfig;
+class ReplicationCoordinatorExternalState;
+class ReplicaSetConfig;
- /**
- * Validates that "newConfig" is a legal configuration that the current
- * node can accept from its local storage during startup.
- *
- * Returns the index of the current node's member configuration in "newConfig",
- * on success, and an indicative error on failure.
- *
- * If "oldConfig" is valid, this method only succeds if "newConfig" is a legal
- * successor configuration.
- */
- StatusWith<int> validateConfigForStartUp(
- ReplicationCoordinatorExternalState* externalState,
- const ReplicaSetConfig& oldConfig,
- const ReplicaSetConfig& newConfig);
+/**
+ * Validates that "newConfig" is a legal configuration that the current
+ * node can accept from its local storage during startup.
+ *
+ * Returns the index of the current node's member configuration in "newConfig",
+ * on success, and an indicative error on failure.
+ *
+ * If "oldConfig" is valid, this method only succeds if "newConfig" is a legal
+ * successor configuration.
+ */
+StatusWith<int> validateConfigForStartUp(ReplicationCoordinatorExternalState* externalState,
+ const ReplicaSetConfig& oldConfig,
+ const ReplicaSetConfig& newConfig);
- /**
- * Validates that "newConfig" is a legal initial configuration that can be
- * initiated by the current node (identified via "externalState").
- *
- * Returns the index of the current node's member configuration in "newConfig",
- * on success, and an indicative error on failure.
- */
- StatusWith<int> validateConfigForInitiate(
- ReplicationCoordinatorExternalState* externalState,
- const ReplicaSetConfig& newConfig);
+/**
+ * Validates that "newConfig" is a legal initial configuration that can be
+ * initiated by the current node (identified via "externalState").
+ *
+ * Returns the index of the current node's member configuration in "newConfig",
+ * on success, and an indicative error on failure.
+ */
+StatusWith<int> validateConfigForInitiate(ReplicationCoordinatorExternalState* externalState,
+ const ReplicaSetConfig& newConfig);
- /**
- * Validates that "newConfig" is a legal successor configuration to "oldConfig" that can be
- * initiated by the current node (identified via "externalState").
- *
- * If "force" is set to true, then compatibility with the old configuration and electability of
- * the current node in "newConfig" are not considered when determining if the reconfig is valid.
- *
- * Returns the index of the current node's member configuration in "newConfig",
- * on success, and an indicative error on failure.
- */
- StatusWith<int> validateConfigForReconfig(
- ReplicationCoordinatorExternalState* externalState,
- const ReplicaSetConfig& oldConfig,
- const ReplicaSetConfig& newConfig,
- bool force);
+/**
+ * Validates that "newConfig" is a legal successor configuration to "oldConfig" that can be
+ * initiated by the current node (identified via "externalState").
+ *
+ * If "force" is set to true, then compatibility with the old configuration and electability of
+ * the current node in "newConfig" are not considered when determining if the reconfig is valid.
+ *
+ * Returns the index of the current node's member configuration in "newConfig",
+ * on success, and an indicative error on failure.
+ */
+StatusWith<int> validateConfigForReconfig(ReplicationCoordinatorExternalState* externalState,
+ const ReplicaSetConfig& oldConfig,
+ const ReplicaSetConfig& newConfig,
+ bool force);
- /**
- * Validates that "newConfig" is an acceptable configuration when received in a heartbeat
- * reasponse.
- *
- * If the new configuration omits the current node, but is otherwise valid, returns
- * ErrorCodes::NodeNotFound. If the configuration is wholly valid, returns Status::OK().
- * Otherwise, returns some other error status.
- */
- StatusWith<int> validateConfigForHeartbeatReconfig(
- ReplicationCoordinatorExternalState* externalState,
- const ReplicaSetConfig& newConfig);
+/**
+ * Validates that "newConfig" is an acceptable configuration when received in a heartbeat
+ * reasponse.
+ *
+ * If the new configuration omits the current node, but is otherwise valid, returns
+ * ErrorCodes::NodeNotFound. If the configuration is wholly valid, returns Status::OK().
+ * Otherwise, returns some other error status.
+ */
+StatusWith<int> validateConfigForHeartbeatReconfig(
+ ReplicationCoordinatorExternalState* externalState, const ReplicaSetConfig& newConfig);
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replica_set_config_checks_test.cpp b/src/mongo/db/repl/replica_set_config_checks_test.cpp
index efb39f5e0fa..d495421689d 100644
--- a/src/mongo/db/repl/replica_set_config_checks_test.cpp
+++ b/src/mongo/db/repl/replica_set_config_checks_test.cpp
@@ -40,660 +40,679 @@ namespace mongo {
namespace repl {
namespace {
- TEST(ValidateConfigForInitiate, VersionMustBe1) {
- ReplicationCoordinatorExternalStateMock rses;
- rses.addSelf(HostAndPort("h1"));
-
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1")))));
- ASSERT_EQUALS(
- ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForInitiate(&rses, config).getStatus());
- }
-
- TEST(ValidateConfigForInitiate, MustFindSelf) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2") <<
- BSON("_id" << 3 << "host" << "h3")))));
- ReplicationCoordinatorExternalStateMock notPresentExternalState;
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
- ReplicationCoordinatorExternalStateMock presentTwiceExternalState;
- presentTwiceExternalState.addSelf(HostAndPort("h3"));
- presentTwiceExternalState.addSelf(HostAndPort("h1"));
-
- ASSERT_EQUALS(ErrorCodes::NodeNotFound,
- validateConfigForInitiate(&notPresentExternalState, config).getStatus());
- ASSERT_EQUALS(ErrorCodes::DuplicateKey,
- validateConfigForInitiate(&presentTwiceExternalState, config).getStatus());
- ASSERT_EQUALS(1, unittest::assertGet(validateConfigForInitiate(&presentOnceExternalState,
- config)));
- }
-
- TEST(ValidateConfigForInitiate, SelfMustBeElectable) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2" <<
- "priority" << 0) <<
- BSON("_id" << 3 << "host" << "h3")))));
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
-
- ASSERT_EQUALS(ErrorCodes::NodeNotElectable,
- validateConfigForInitiate(&presentOnceExternalState, config).getStatus());
- }
-
- TEST(ValidateConfigForReconfig, NewConfigVersionNumberMustBeHigherThanOld) {
- ReplicationCoordinatorExternalStateMock externalState;
- externalState.addSelf(HostAndPort("h1"));
-
- ReplicaSetConfig oldConfig;
- ReplicaSetConfig newConfig;
-
- // Two configurations, identical except for version.
- ASSERT_OK(oldConfig.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2") <<
- BSON("_id" << 3 << "host" << "h3")))));
-
- ASSERT_OK(newConfig.initialize(
- BSON("_id" << "rs0" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2") <<
- BSON("_id" << 3 << "host" << "h3")))));
-
- ASSERT_OK(oldConfig.validate());
- ASSERT_OK(newConfig.validate());
-
- // Can reconfig from old to new.
- ASSERT_OK(validateConfigForReconfig(&externalState,
- oldConfig,
- newConfig,
- false).getStatus());
-
-
- // Cannot reconfig from old to old (versions must be different).
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(&externalState,
- oldConfig,
- oldConfig,
- false).getStatus());
- // Forced reconfigs also do not allow this.
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(&externalState,
- oldConfig,
- oldConfig,
- true).getStatus());
-
- // Cannot reconfig from new to old (versions must increase).
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(&externalState,
- newConfig,
- oldConfig,
- false).getStatus());
- // Forced reconfigs also do not allow this.
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(&externalState,
- newConfig,
- oldConfig,
- true).getStatus());
- }
-
- TEST(ValidateConfigForReconfig, NewConfigMustNotChangeSetName) {
- ReplicationCoordinatorExternalStateMock externalState;
- externalState.addSelf(HostAndPort("h1"));
-
- ReplicaSetConfig oldConfig;
- ReplicaSetConfig newConfig;
-
- // Two configurations, compatible except for set name.
- ASSERT_OK(oldConfig.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2") <<
- BSON("_id" << 3 << "host" << "h3")))));
-
- ASSERT_OK(newConfig.initialize(
- BSON("_id" << "rs1" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2") <<
- BSON("_id" << 3 << "host" << "h3")))));
-
- ASSERT_OK(oldConfig.validate());
- ASSERT_OK(newConfig.validate());
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(&externalState,
- oldConfig,
- newConfig,
- false).getStatus());
- // Forced reconfigs also do not allow this.
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(&externalState,
- newConfig,
- oldConfig,
- true).getStatus());
- }
-
- TEST(ValidateConfigForReconfig, NewConfigMustNotFlipBuildIndexesFlag) {
- ReplicationCoordinatorExternalStateMock externalState;
- externalState.addSelf(HostAndPort("h1"));
-
- ReplicaSetConfig oldConfig;
- ReplicaSetConfig newConfig;
- ReplicaSetConfig oldConfigRefresh;
-
- // Three configurations, two compatible except that h2 flips the buildIndex flag.
- // The third, compatible with the first.
- ASSERT_OK(oldConfig.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2" <<
- "buildIndexes" << false <<
- "priority" << 0) <<
- BSON("_id" << 3 << "host" << "h3")))));
-
- ASSERT_OK(newConfig.initialize(
- BSON("_id" << "rs0" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2" <<
- "buildIndexes" << true <<
- "priority" << 0) <<
- BSON("_id" << 3 << "host" << "h3")))));
-
- ASSERT_OK(oldConfigRefresh.initialize(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2" <<
- "buildIndexes" << false <<
- "priority" << 0) <<
- BSON("_id" << 3 << "host" << "h3")))));
-
- ASSERT_OK(oldConfig.validate());
- ASSERT_OK(newConfig.validate());
- ASSERT_OK(oldConfigRefresh.validate());
- ASSERT_OK(validateConfigForReconfig(&externalState,
- oldConfig,
- oldConfigRefresh,
- false).getStatus());
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(&externalState,
- oldConfig,
- newConfig,
- false).getStatus());
-
- // Forced reconfigs also do not allow this.
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(&externalState,
- oldConfig,
- newConfig,
- true).getStatus());
- }
-
- TEST(ValidateConfigForReconfig, NewConfigMustNotFlipArbiterFlag) {
- ReplicationCoordinatorExternalStateMock externalState;
- externalState.addSelf(HostAndPort("h1"));
-
- ReplicaSetConfig oldConfig;
- ReplicaSetConfig newConfig;
- ReplicaSetConfig oldConfigRefresh;
-
- // Three configurations, two compatible except that h2 flips the arbiterOnly flag.
- // The third, compatible with the first.
- ASSERT_OK(oldConfig.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2" <<
- "arbiterOnly" << false) <<
- BSON("_id" << 3 << "host" << "h3")))));
-
- ASSERT_OK(newConfig.initialize(
- BSON("_id" << "rs0" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2" <<
- "arbiterOnly" << true) <<
- BSON("_id" << 3 << "host" << "h3")))));
-
- ASSERT_OK(oldConfigRefresh.initialize(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2" <<
- "arbiterOnly" << false) <<
- BSON("_id" << 3 << "host" << "h3")))));
-
- ASSERT_OK(oldConfig.validate());
- ASSERT_OK(newConfig.validate());
- ASSERT_OK(oldConfigRefresh.validate());
- ASSERT_OK(validateConfigForReconfig(&externalState,
- oldConfig,
- oldConfigRefresh,
- false).getStatus());
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(&externalState,
- oldConfig,
- newConfig,
- false).getStatus());
- // Forced reconfigs also do not allow this.
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(&externalState,
- oldConfig,
- newConfig,
- true).getStatus());
- }
-
- TEST(ValidateConfigForReconfig, HostAndIdRemappingRestricted) {
- // When reconfiguring a replica set, it is allowed to introduce (host, id) pairs
- // absent from the old config only when the hosts and ids were both individually
- // absent in the old config.
-
- ReplicationCoordinatorExternalStateMock externalState;
- externalState.addSelf(HostAndPort("h1"));
-
- ReplicaSetConfig oldConfig;
- ReplicaSetConfig legalNewConfigWithNewHostAndId;
- ReplicaSetConfig illegalNewConfigReusingHost;
- ReplicaSetConfig illegalNewConfigReusingId;
-
- ASSERT_OK(oldConfig.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2") <<
- BSON("_id" << 3 << "host" << "h3")))));
- ASSERT_OK(oldConfig.validate());
-
- //
- // Here, the new config is valid because we've replaced (2, "h2") with
- // (4, "h4"), so neither the member _id or host name were reused.
- //
- ASSERT_OK(legalNewConfigWithNewHostAndId.initialize(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 4 << "host" << "h4") <<
- BSON("_id" << 3 << "host" << "h3")))));
- ASSERT_OK(legalNewConfigWithNewHostAndId.validate());
- ASSERT_OK(validateConfigForReconfig(&externalState,
- oldConfig,
- legalNewConfigWithNewHostAndId,
- false).getStatus());
-
- //
- // Here, the new config is invalid because we've reused host name "h2" with
- // new _id 4.
- //
- ASSERT_OK(illegalNewConfigReusingHost.initialize(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 4 << "host" << "h2") <<
- BSON("_id" << 3 << "host" << "h3")))));
- ASSERT_OK(illegalNewConfigReusingHost.validate());
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(&externalState,
- oldConfig,
- illegalNewConfigReusingHost,
- false).getStatus());
- // Forced reconfigs also do not allow this.
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(&externalState,
- oldConfig,
- illegalNewConfigReusingHost,
- true).getStatus());
- //
- // Here, the new config is valid, because all we've changed is the name of
- // the host representing _id 2.
- //
- ASSERT_OK(illegalNewConfigReusingId.initialize(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h4") <<
- BSON("_id" << 3 << "host" << "h3")))));
- ASSERT_OK(illegalNewConfigReusingId.validate());
- ASSERT_OK(validateConfigForReconfig(&externalState,
- oldConfig,
- illegalNewConfigReusingId,
- false).getStatus());
- }
-
- TEST(ValidateConfigForReconfig, MustFindSelf) {
- // Old and new config are same except for version change; this is just testing that we can
- // find ourself in the new config.
- ReplicaSetConfig oldConfig;
- ASSERT_OK(oldConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2") <<
- BSON("_id" << 3 << "host" << "h3")))));
-
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2") <<
- BSON("_id" << 3 << "host" << "h3")))));
- ReplicationCoordinatorExternalStateMock notPresentExternalState;
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
- ReplicationCoordinatorExternalStateMock presentThriceExternalState;
- presentThriceExternalState.addSelf(HostAndPort("h3"));
- presentThriceExternalState.addSelf(HostAndPort("h2"));
- presentThriceExternalState.addSelf(HostAndPort("h1"));
-
- ASSERT_EQUALS(ErrorCodes::NodeNotFound,
- validateConfigForReconfig(&notPresentExternalState,
- oldConfig,
- newConfig,
- false).getStatus());
- ASSERT_EQUALS(ErrorCodes::DuplicateKey,
- validateConfigForReconfig(&presentThriceExternalState,
- oldConfig,
- newConfig,
- false).getStatus());
- ASSERT_EQUALS(1, unittest::assertGet(validateConfigForReconfig(&presentOnceExternalState,
- oldConfig,
- newConfig,
- false)));
- // Forced reconfigs also do not allow this.
- ASSERT_EQUALS(ErrorCodes::NodeNotFound,
- validateConfigForReconfig(&notPresentExternalState,
- oldConfig,
- newConfig,
- true).getStatus());
- ASSERT_EQUALS(ErrorCodes::DuplicateKey,
- validateConfigForReconfig(&presentThriceExternalState,
- oldConfig,
- newConfig,
- true).getStatus());
- ASSERT_EQUALS(1, unittest::assertGet(validateConfigForReconfig(&presentOnceExternalState,
- oldConfig,
- newConfig,
- true)));
- }
-
- TEST(ValidateConfigForReconfig, SelfMustEndElectable) {
- // Old and new config are same except for version change and the electability of one node;
- // this is just testing that we must be electable in the new config.
- ReplicaSetConfig oldConfig;
- ASSERT_OK(oldConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2") <<
- BSON("_id" << 3 << "host" << "h3")))));
-
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 << "host" << "h2" <<
- "priority" << 0) <<
- BSON("_id" << 3 << "host" << "h3")))));
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
-
- ASSERT_EQUALS(ErrorCodes::NodeNotElectable,
- validateConfigForReconfig(&presentOnceExternalState,
- oldConfig,
- newConfig,
- false).getStatus());
- // Forced reconfig does not require electability.
- ASSERT_OK(validateConfigForReconfig(&presentOnceExternalState,
- oldConfig,
- newConfig,
- true).getStatus());
- }
-
- TEST(ValidateConfigForInitiate, NewConfigInvalid) {
- // The new config is not valid due to a duplicate _id value. This tests that if the new
- // config is invalid, validateConfigForInitiate will return a status indicating what is
- // wrong with the new config.
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2") <<
- BSON("_id" << 0 << "host" << "h3")))));
-
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_EQUALS(ErrorCodes::BadValue, validateConfigForInitiate(&presentOnceExternalState,
- newConfig).getStatus());
- }
-
- TEST(ValidateConfigForReconfig, NewConfigInvalid) {
- // The new config is not valid due to a duplicate _id value. This tests that if the new
- // config is invalid, validateConfigForReconfig will return a status indicating what is
- // wrong with the new config.
- ReplicaSetConfig oldConfig;
- ASSERT_OK(oldConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2")))));
-
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2") <<
- BSON("_id" << 0 << "host" << "h3")))));
-
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_EQUALS(ErrorCodes::BadValue, validateConfigForReconfig(&presentOnceExternalState,
- oldConfig,
- newConfig,
- false).getStatus());
- // Forced reconfigs also do not allow this.
- ASSERT_EQUALS(ErrorCodes::BadValue, validateConfigForReconfig(&presentOnceExternalState,
- oldConfig,
- newConfig,
- true).getStatus());
- }
-
- TEST(ValidateConfigForStartUp, NewConfigInvalid) {
- // The new config is not valid due to a duplicate _id value. This tests that if the new
- // config is invalid, validateConfigForStartUp will return a status indicating what is wrong
- // with the new config.
- ReplicaSetConfig oldConfig;
- ASSERT_OK(oldConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2")))));
-
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2") <<
- BSON("_id" << 0 << "host" << "h3")))));
-
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_EQUALS(ErrorCodes::BadValue, validateConfigForStartUp(&presentOnceExternalState,
- oldConfig,
- newConfig).getStatus());
- }
-
- TEST(ValidateConfigForStartUp, OldAndNewConfigIncompatible) {
- // The new config is not compatible with the old config due to a member changing _ids. This
- // tests that validateConfigForStartUp will return a status indicating the incompatiblilty
- // between the old and new config.
- ReplicaSetConfig oldConfig;
- ASSERT_OK(oldConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2") <<
- BSON("_id" << 1 << "host" << "h3")))));
-
-
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 2 << "host" << "h2") <<
- BSON("_id" << 1 << "host" << "h3")))));
-
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForStartUp(&presentOnceExternalState,
- oldConfig,
- newConfig).getStatus());
- }
-
- TEST(ValidateConfigForStartUp, OldAndNewConfigCompatible) {
- // The new config is compatible with the old config. This tests that
- // validateConfigForStartUp will return a Status::OK() indicating the validity of this
- // config change.
- ReplicaSetConfig oldConfig;
- ASSERT_OK(oldConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2") <<
- BSON("_id" << 1 << "host" << "h3")))));
-
-
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2" <<
- "priority" << 3) <<
- BSON("_id" << 1 << "host" << "h3")))));
-
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_OK(validateConfigForStartUp(&presentOnceExternalState,
- oldConfig,
- newConfig).getStatus());
- }
-
- TEST(ValidateConfigForHeartbeatReconfig, NewConfigInvalid) {
- // The new config is not valid due to a duplicate _id value. This tests that if the new
- // config is invalid, validateConfigForHeartbeatReconfig will return a status indicating
- // what is wrong with the new config.
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2") <<
- BSON("_id" << 0 << "host" << "h3")))));
-
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_EQUALS(ErrorCodes::BadValue,
- validateConfigForHeartbeatReconfig(&presentOnceExternalState,
- newConfig).getStatus());
- }
-
- TEST(ValidateConfigForHeartbeatReconfig, NewConfigValid) {
- // The new config is valid. This tests that validateConfigForHeartbeatReconfig will return
- // a Status::OK() indicating the validity of this config change.
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2") <<
- BSON("_id" << 1 << "host" << "h3")))));
-
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_OK(validateConfigForHeartbeatReconfig(&presentOnceExternalState,
- newConfig).getStatus());
- }
-
- TEST(ValidateForReconfig, ForceStillNeedsValidConfig) {
- // The new config is invalid due to two nodes with the same _id value. This tests that
- // ValidateForReconfig fails with an invalid config, even if force is true.
- ReplicaSetConfig oldConfig;
- ASSERT_OK(oldConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2") <<
- BSON("_id" << 1 << "host" << "h3")))));
-
-
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2") <<
- BSON("_id" << 0 << "host" << "h3")))));
-
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_EQUALS(ErrorCodes::BadValue,
- validateConfigForReconfig(&presentOnceExternalState,
- oldConfig,
- newConfig,
- true).getStatus());
- }
-
- TEST(ValidateForReconfig, ForceStillNeedsSelfPresent) {
- // The new config does not contain self. This tests that ValidateForReconfig fails
- // if the member receiving it is absent from the config, even if force is true.
- ReplicaSetConfig oldConfig;
- ASSERT_OK(oldConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "h2") <<
- BSON("_id" << 1 << "host" << "h3")))));
-
-
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h3") <<
- BSON("_id" << 2 << "host" << "h4")))));
-
- ReplicationCoordinatorExternalStateMock presentOnceExternalState;
- presentOnceExternalState.addSelf(HostAndPort("h2"));
- ASSERT_EQUALS(ErrorCodes::NodeNotFound,
- validateConfigForReconfig(&presentOnceExternalState,
- oldConfig,
- newConfig,
- true).getStatus());
- }
+TEST(ValidateConfigForInitiate, VersionMustBe1) {
+ ReplicationCoordinatorExternalStateMock rses;
+ rses.addSelf(HostAndPort("h1"));
+
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")))));
+ ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForInitiate(&rses, config).getStatus());
+}
+
+TEST(ValidateConfigForInitiate, MustFindSelf) {
+ ReplicaSetConfig config;
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2") << BSON("_id" << 3 << "host"
+ << "h3")))));
+ ReplicationCoordinatorExternalStateMock notPresentExternalState;
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+ ReplicationCoordinatorExternalStateMock presentTwiceExternalState;
+ presentTwiceExternalState.addSelf(HostAndPort("h3"));
+ presentTwiceExternalState.addSelf(HostAndPort("h1"));
+
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound,
+ validateConfigForInitiate(&notPresentExternalState, config).getStatus());
+ ASSERT_EQUALS(ErrorCodes::DuplicateKey,
+ validateConfigForInitiate(&presentTwiceExternalState, config).getStatus());
+ ASSERT_EQUALS(
+ 1, unittest::assertGet(validateConfigForInitiate(&presentOnceExternalState, config)));
+}
+
+TEST(ValidateConfigForInitiate, SelfMustBeElectable) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+
+ ASSERT_EQUALS(ErrorCodes::NodeNotElectable,
+ validateConfigForInitiate(&presentOnceExternalState, config).getStatus());
+}
+
+TEST(ValidateConfigForReconfig, NewConfigVersionNumberMustBeHigherThanOld) {
+ ReplicationCoordinatorExternalStateMock externalState;
+ externalState.addSelf(HostAndPort("h1"));
+
+ ReplicaSetConfig oldConfig;
+ ReplicaSetConfig newConfig;
+
+ // Two configurations, identical except for version.
+ ASSERT_OK(
+ oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2") << BSON("_id" << 3 << "host"
+ << "h3")))));
+
+ ASSERT_OK(
+ newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2") << BSON("_id" << 3 << "host"
+ << "h3")))));
+
+ ASSERT_OK(oldConfig.validate());
+ ASSERT_OK(newConfig.validate());
+
+ // Can reconfig from old to new.
+ ASSERT_OK(validateConfigForReconfig(&externalState, oldConfig, newConfig, false).getStatus());
+
+
+ // Cannot reconfig from old to old (versions must be different).
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, oldConfig, oldConfig, false).getStatus());
+ // Forced reconfigs also do not allow this.
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, oldConfig, oldConfig, true).getStatus());
+
+ // Cannot reconfig from new to old (versions must increase).
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, newConfig, oldConfig, false).getStatus());
+ // Forced reconfigs also do not allow this.
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, newConfig, oldConfig, true).getStatus());
+}
+
+TEST(ValidateConfigForReconfig, NewConfigMustNotChangeSetName) {
+ ReplicationCoordinatorExternalStateMock externalState;
+ externalState.addSelf(HostAndPort("h1"));
+
+ ReplicaSetConfig oldConfig;
+ ReplicaSetConfig newConfig;
+
+ // Two configurations, compatible except for set name.
+ ASSERT_OK(
+ oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2") << BSON("_id" << 3 << "host"
+ << "h3")))));
+
+ ASSERT_OK(
+ newConfig.initialize(BSON("_id"
+ << "rs1"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2") << BSON("_id" << 3 << "host"
+ << "h3")))));
+
+ ASSERT_OK(oldConfig.validate());
+ ASSERT_OK(newConfig.validate());
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, oldConfig, newConfig, false).getStatus());
+ // Forced reconfigs also do not allow this.
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, newConfig, oldConfig, true).getStatus());
+}
+
+TEST(ValidateConfigForReconfig, NewConfigMustNotFlipBuildIndexesFlag) {
+ ReplicationCoordinatorExternalStateMock externalState;
+ externalState.addSelf(HostAndPort("h1"));
+
+ ReplicaSetConfig oldConfig;
+ ReplicaSetConfig newConfig;
+ ReplicaSetConfig oldConfigRefresh;
+
+ // Three configurations, two compatible except that h2 flips the buildIndex flag.
+ // The third, compatible with the first.
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "buildIndexes" << false
+ << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
+
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "buildIndexes" << true
+ << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
+
+ ASSERT_OK(
+ oldConfigRefresh.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "buildIndexes" << false
+ << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
+
+ ASSERT_OK(oldConfig.validate());
+ ASSERT_OK(newConfig.validate());
+ ASSERT_OK(oldConfigRefresh.validate());
+ ASSERT_OK(
+ validateConfigForReconfig(&externalState, oldConfig, oldConfigRefresh, false).getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, oldConfig, newConfig, false).getStatus());
+
+ // Forced reconfigs also do not allow this.
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, oldConfig, newConfig, true).getStatus());
+}
+
+TEST(ValidateConfigForReconfig, NewConfigMustNotFlipArbiterFlag) {
+ ReplicationCoordinatorExternalStateMock externalState;
+ externalState.addSelf(HostAndPort("h1"));
+
+ ReplicaSetConfig oldConfig;
+ ReplicaSetConfig newConfig;
+ ReplicaSetConfig oldConfigRefresh;
+
+ // Three configurations, two compatible except that h2 flips the arbiterOnly flag.
+ // The third, compatible with the first.
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "arbiterOnly" << false)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
+
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
+
+ ASSERT_OK(
+ oldConfigRefresh.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "arbiterOnly" << false)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
+
+ ASSERT_OK(oldConfig.validate());
+ ASSERT_OK(newConfig.validate());
+ ASSERT_OK(oldConfigRefresh.validate());
+ ASSERT_OK(
+ validateConfigForReconfig(&externalState, oldConfig, oldConfigRefresh, false).getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, oldConfig, newConfig, false).getStatus());
+ // Forced reconfigs also do not allow this.
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, oldConfig, newConfig, true).getStatus());
+}
+
+TEST(ValidateConfigForReconfig, HostAndIdRemappingRestricted) {
+ // When reconfiguring a replica set, it is allowed to introduce (host, id) pairs
+ // absent from the old config only when the hosts and ids were both individually
+ // absent in the old config.
+
+ ReplicationCoordinatorExternalStateMock externalState;
+ externalState.addSelf(HostAndPort("h1"));
+
+ ReplicaSetConfig oldConfig;
+ ReplicaSetConfig legalNewConfigWithNewHostAndId;
+ ReplicaSetConfig illegalNewConfigReusingHost;
+ ReplicaSetConfig illegalNewConfigReusingId;
+
+ ASSERT_OK(
+ oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2") << BSON("_id" << 3 << "host"
+ << "h3")))));
+ ASSERT_OK(oldConfig.validate());
+
+ //
+ // Here, the new config is valid because we've replaced (2, "h2") with
+ // (4, "h4"), so neither the member _id or host name were reused.
+ //
+ ASSERT_OK(
+ legalNewConfigWithNewHostAndId.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 4 << "host"
+ << "h4")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
+ ASSERT_OK(legalNewConfigWithNewHostAndId.validate());
+ ASSERT_OK(validateConfigForReconfig(
+ &externalState, oldConfig, legalNewConfigWithNewHostAndId, false).getStatus());
+
+ //
+ // Here, the new config is invalid because we've reused host name "h2" with
+ // new _id 4.
+ //
+ ASSERT_OK(illegalNewConfigReusingHost.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 4 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
+ ASSERT_OK(illegalNewConfigReusingHost.validate());
+ ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(
+ &externalState, oldConfig, illegalNewConfigReusingHost, false).getStatus());
+ // Forced reconfigs also do not allow this.
+ ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(
+ &externalState, oldConfig, illegalNewConfigReusingHost, true).getStatus());
+ //
+ // Here, the new config is valid, because all we've changed is the name of
+ // the host representing _id 2.
+ //
+ ASSERT_OK(illegalNewConfigReusingId.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h4")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
+ ASSERT_OK(illegalNewConfigReusingId.validate());
+ ASSERT_OK(validateConfigForReconfig(&externalState, oldConfig, illegalNewConfigReusingId, false)
+ .getStatus());
+}
+
+TEST(ValidateConfigForReconfig, MustFindSelf) {
+ // Old and new config are same except for version change; this is just testing that we can
+ // find ourself in the new config.
+ ReplicaSetConfig oldConfig;
+ ASSERT_OK(
+ oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2") << BSON("_id" << 3 << "host"
+ << "h3")))));
+
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(
+ newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2") << BSON("_id" << 3 << "host"
+ << "h3")))));
+ ReplicationCoordinatorExternalStateMock notPresentExternalState;
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+ ReplicationCoordinatorExternalStateMock presentThriceExternalState;
+ presentThriceExternalState.addSelf(HostAndPort("h3"));
+ presentThriceExternalState.addSelf(HostAndPort("h2"));
+ presentThriceExternalState.addSelf(HostAndPort("h1"));
+
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound,
+ validateConfigForReconfig(&notPresentExternalState, oldConfig, newConfig, false)
+ .getStatus());
+ ASSERT_EQUALS(ErrorCodes::DuplicateKey,
+ validateConfigForReconfig(
+ &presentThriceExternalState, oldConfig, newConfig, false).getStatus());
+ ASSERT_EQUALS(1,
+ unittest::assertGet(validateConfigForReconfig(
+ &presentOnceExternalState, oldConfig, newConfig, false)));
+ // Forced reconfigs also do not allow this.
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound,
+ validateConfigForReconfig(&notPresentExternalState, oldConfig, newConfig, true)
+ .getStatus());
+ ASSERT_EQUALS(ErrorCodes::DuplicateKey,
+ validateConfigForReconfig(&presentThriceExternalState, oldConfig, newConfig, true)
+ .getStatus());
+ ASSERT_EQUALS(1,
+ unittest::assertGet(validateConfigForReconfig(
+ &presentOnceExternalState, oldConfig, newConfig, true)));
+}
+
+TEST(ValidateConfigForReconfig, SelfMustEndElectable) {
+ // Old and new config are same except for version change and the electability of one node;
+ // this is just testing that we must be electable in the new config.
+ ReplicaSetConfig oldConfig;
+ ASSERT_OK(
+ oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2") << BSON("_id" << 3 << "host"
+ << "h3")))));
+
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "priority" << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+
+ ASSERT_EQUALS(ErrorCodes::NodeNotElectable,
+ validateConfigForReconfig(&presentOnceExternalState, oldConfig, newConfig, false)
+ .getStatus());
+ // Forced reconfig does not require electability.
+ ASSERT_OK(validateConfigForReconfig(&presentOnceExternalState, oldConfig, newConfig, true)
+ .getStatus());
+}
+
+TEST(ValidateConfigForInitiate, NewConfigInvalid) {
+ // The new config is not valid due to a duplicate _id value. This tests that if the new
+ // config is invalid, validateConfigForInitiate will return a status indicating what is
+ // wrong with the new config.
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2")
+ << BSON("_id" << 0 << "host"
+ << "h3")))));
+
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+ ASSERT_EQUALS(ErrorCodes::BadValue,
+ validateConfigForInitiate(&presentOnceExternalState, newConfig).getStatus());
+}
+
+TEST(ValidateConfigForReconfig, NewConfigInvalid) {
+ // The new config is not valid due to a duplicate _id value. This tests that if the new
+ // config is invalid, validateConfigForReconfig will return a status indicating what is
+ // wrong with the new config.
+ ReplicaSetConfig oldConfig;
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2")))));
+
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2")
+ << BSON("_id" << 0 << "host"
+ << "h3")))));
+
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+ ASSERT_EQUALS(ErrorCodes::BadValue,
+ validateConfigForReconfig(&presentOnceExternalState, oldConfig, newConfig, false)
+ .getStatus());
+ // Forced reconfigs also do not allow this.
+ ASSERT_EQUALS(ErrorCodes::BadValue,
+ validateConfigForReconfig(&presentOnceExternalState, oldConfig, newConfig, true)
+ .getStatus());
+}
+
+TEST(ValidateConfigForStartUp, NewConfigInvalid) {
+ // The new config is not valid due to a duplicate _id value. This tests that if the new
+ // config is invalid, validateConfigForStartUp will return a status indicating what is wrong
+ // with the new config.
+ ReplicaSetConfig oldConfig;
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2")))));
+
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2")
+ << BSON("_id" << 0 << "host"
+ << "h3")))));
+
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+ ASSERT_EQUALS(
+ ErrorCodes::BadValue,
+ validateConfigForStartUp(&presentOnceExternalState, oldConfig, newConfig).getStatus());
+}
+
+TEST(ValidateConfigForStartUp, OldAndNewConfigIncompatible) {
+ // The new config is not compatible with the old config due to a member changing _ids. This
+ // tests that validateConfigForStartUp will return a status indicating the incompatiblilty
+ // between the old and new config.
+ ReplicaSetConfig oldConfig;
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2")
+ << BSON("_id" << 1 << "host"
+ << "h3")))));
+
+
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 1 << "host"
+ << "h3")))));
+
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForStartUp(&presentOnceExternalState, oldConfig, newConfig).getStatus());
+}
+
+TEST(ValidateConfigForStartUp, OldAndNewConfigCompatible) {
+ // The new config is compatible with the old config. This tests that
+ // validateConfigForStartUp will return a Status::OK() indicating the validity of this
+ // config change.
+ ReplicaSetConfig oldConfig;
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2")
+ << BSON("_id" << 1 << "host"
+ << "h3")))));
+
+
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2"
+ << "priority" << 3)
+ << BSON("_id" << 1 << "host"
+ << "h3")))));
+
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+ ASSERT_OK(
+ validateConfigForStartUp(&presentOnceExternalState, oldConfig, newConfig).getStatus());
+}
+
+TEST(ValidateConfigForHeartbeatReconfig, NewConfigInvalid) {
+ // The new config is not valid due to a duplicate _id value. This tests that if the new
+ // config is invalid, validateConfigForHeartbeatReconfig will return a status indicating
+ // what is wrong with the new config.
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2")
+ << BSON("_id" << 0 << "host"
+ << "h3")))));
+
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+ ASSERT_EQUALS(
+ ErrorCodes::BadValue,
+ validateConfigForHeartbeatReconfig(&presentOnceExternalState, newConfig).getStatus());
+}
+
+TEST(ValidateConfigForHeartbeatReconfig, NewConfigValid) {
+ // The new config is valid. This tests that validateConfigForHeartbeatReconfig will return
+ // a Status::OK() indicating the validity of this config change.
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2")
+ << BSON("_id" << 1 << "host"
+ << "h3")))));
+
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+ ASSERT_OK(validateConfigForHeartbeatReconfig(&presentOnceExternalState, newConfig).getStatus());
+}
+
+TEST(ValidateForReconfig, ForceStillNeedsValidConfig) {
+ // The new config is invalid due to two nodes with the same _id value. This tests that
+ // ValidateForReconfig fails with an invalid config, even if force is true.
+ ReplicaSetConfig oldConfig;
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2")
+ << BSON("_id" << 1 << "host"
+ << "h3")))));
+
+
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2")
+ << BSON("_id" << 0 << "host"
+ << "h3")))));
+
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+ ASSERT_EQUALS(ErrorCodes::BadValue,
+ validateConfigForReconfig(&presentOnceExternalState, oldConfig, newConfig, true)
+ .getStatus());
+}
+
+TEST(ValidateForReconfig, ForceStillNeedsSelfPresent) {
+ // The new config does not contain self. This tests that ValidateForReconfig fails
+ // if the member receiving it is absent from the config, even if force is true.
+ ReplicaSetConfig oldConfig;
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h2")
+ << BSON("_id" << 1 << "host"
+ << "h3")))));
+
+
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h3")
+ << BSON("_id" << 2 << "host"
+ << "h4")))));
+
+ ReplicationCoordinatorExternalStateMock presentOnceExternalState;
+ presentOnceExternalState.addSelf(HostAndPort("h2"));
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound,
+ validateConfigForReconfig(&presentOnceExternalState, oldConfig, newConfig, true)
+ .getStatus());
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/replica_set_config_test.cpp b/src/mongo/db/repl/replica_set_config_test.cpp
index 8bc5a247498..734552d4ed1 100644
--- a/src/mongo/db/repl/replica_set_config_test.cpp
+++ b/src/mongo/db/repl/replica_set_config_test.cpp
@@ -36,682 +36,731 @@ namespace mongo {
namespace repl {
namespace {
- TEST(ReplicaSetConfig, ParseMinimalConfigAndCheckDefaults) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")))));
- ASSERT_OK(config.validate());
- ASSERT_EQUALS("rs0", config.getReplSetName());
- ASSERT_EQUALS(1, config.getConfigVersion());
- ASSERT_EQUALS(1, config.getNumMembers());
- ASSERT_EQUALS(0, config.membersBegin()->getId());
- ASSERT_EQUALS(1, config.getDefaultWriteConcern().wNumNodes);
- ASSERT_EQUALS("", config.getDefaultWriteConcern().wMode);
- ASSERT_EQUALS(Seconds(10), config.getHeartbeatTimeoutPeriod());
- ASSERT_TRUE(config.isChainingAllowed());
- ASSERT_EQUALS(0, config.getProtocolVersion());
- }
-
- TEST(ReplicaSetConfig, ParseLargeConfigAndCheckAccessors) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1234 <<
- "members" << BSON_ARRAY(BSON("_id" << 234 <<
- "host" << "localhost:12345" <<
- "tags" << BSON("NYC" << "NY"))) <<
- "settings" << BSON("getLastErrorDefaults" <<
- BSON("w" << "majority") <<
- "getLastErrorModes" << BSON("eastCoast" <<
- BSON("NYC" << 1 )) <<
- "chainingAllowed" << false <<
- "heartbeatTimeoutSecs" << 120) <<
- "protocolVersion" << 2)));
- ASSERT_OK(config.validate());
- ASSERT_EQUALS("rs0", config.getReplSetName());
- ASSERT_EQUALS(1234, config.getConfigVersion());
- ASSERT_EQUALS(1, config.getNumMembers());
- ASSERT_EQUALS(234, config.membersBegin()->getId());
- ASSERT_EQUALS(0, config.getDefaultWriteConcern().wNumNodes);
- ASSERT_EQUALS("majority", config.getDefaultWriteConcern().wMode);
- ASSERT_FALSE(config.isChainingAllowed());
- ASSERT_EQUALS(Seconds(120), config.getHeartbeatTimeoutPeriod());
- ASSERT_EQUALS(2, config.getProtocolVersion());
- }
-
- TEST(ReplicaSetConfig, MajorityCalculationThreeVotersNoArbiters) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1") <<
- BSON("_id" << 4 << "host" << "h4:1" << "votes" << 0) <<
- BSON("_id" << 5 << "host" << "h5:1" << "votes" << 0)))));
- ASSERT_OK(config.validate());
-
- ASSERT_EQUALS(2, config.getWriteMajority());
- }
-
- TEST(ReplicaSetConfig, MajorityCalculationNearlyHalfArbiters) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2) <<
- BSON("host" << "node4:12345" <<
- "_id" << 3 <<
- "arbiterOnly" << true) <<
- BSON("host" << "node5:12345" <<
- "_id" << 4 <<
- "arbiterOnly" << true)))));
- ASSERT_OK(config.validate());
- ASSERT_EQUALS(3, config.getWriteMajority());
- }
-
- TEST(ReplicaSetConfig, MajorityCalculationNearlyHalfArbitersOthersNoVote) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" <<
- "_id" << 0 <<
- "votes" << 0) <<
- BSON("host" << "node2:12345" <<
- "_id" << 1 <<
- "votes" << 0) <<
- BSON("host" << "node3:12345" <<
- "_id" << 2 <<
- "votes" << 0) <<
- BSON("host" << "node4:12345" <<
- "_id" << 3 <<
- "arbiterOnly" << true) <<
- BSON("host" << "node5:12345" <<
- "_id" << 4 <<
- "arbiterOnly" << true)))));
- ASSERT_OK(config.validate());
- ASSERT_EQUALS(0, config.getWriteMajority());
- }
-
- TEST(ReplicaSetConfig, MajorityCalculationEvenNumberOfMembers) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2) <<
- BSON("host" << "node4:12345" << "_id" << 3)))));
- ASSERT_OK(config.validate());
- ASSERT_EQUALS(3, config.getWriteMajority());
- }
-
- TEST(ReplicaSetConfig, MajorityCalculationNearlyHalfSecondariesNoVotes) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" <<
- "_id" << 1 <<
- "votes" << 0) <<
- BSON("host" << "node3:12345" <<
- "_id" << 2 <<
- "votes" << 0) <<
- BSON("host" << "node4:12345" << "_id" << 3) <<
- BSON("host" << "node5:12345" << "_id" << 4)))));
- ASSERT_OK(config.validate());
- ASSERT_EQUALS(2, config.getWriteMajority());
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithBadOrMissingIdField) {
- ReplicaSetConfig config;
- // Replica set name must be a string.
- ASSERT_EQUALS(
- ErrorCodes::TypeMismatch,
- config.initialize(
- BSON("_id" << 1 <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")))));
-
- // Replica set name must be present.
- ASSERT_EQUALS(
- ErrorCodes::NoSuchKey,
- config.initialize(
- BSON("version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")))));
-
- // Empty repl set name parses, but does not validate.
- ASSERT_OK(config.initialize(
- BSON("_id" << "" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")))));
-
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithBadOrMissingVersionField) {
- ReplicaSetConfig config;
- // Config version field must be present.
- ASSERT_EQUALS(
- ErrorCodes::NoSuchKey,
- config.initialize(
- BSON("_id" << "rs0" <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")))));
- ASSERT_EQUALS(
- ErrorCodes::TypeMismatch,
- config.initialize(
- BSON("_id" << "rs0" <<
- "version" << "1" <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")))));
-
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1.0 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")))));
- ASSERT_OK(config.validate());
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 0.0 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")))));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" <<
- static_cast<long long>(std::numeric_limits<int>::max()) + 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")))));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithBadMembers) {
- ReplicaSetConfig config;
- ASSERT_EQUALS(ErrorCodes::TypeMismatch,
- config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345") <<
- "localhost:23456"))));
- ASSERT_EQUALS(ErrorCodes::NoSuchKey,
- config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("host" << "localhost:12345")))));
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithLocalNonLocalHostMix) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost") <<
- BSON("_id" << 1 <<
- "host" << "otherhost")))));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithNoElectableNodes) {
- ReplicaSetConfig config;
- const BSONObj configBsonNoElectableNodes = BSON(
- "_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "localhost:1" << "priority" << 0) <<
- BSON("_id" << 1 << "host" << "localhost:2" << "priority" << 0)));
-
- ASSERT_OK(config.initialize(configBsonNoElectableNodes));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
-
- const BSONObj configBsonNoElectableNodesOneArbiter = BSON(
- "_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "localhost:1" << "arbiterOnly" << 1) <<
- BSON("_id" << 1 << "host" << "localhost:2" << "priority" << 0)));
-
- ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
-
- const BSONObj configBsonNoElectableNodesTwoArbiters = BSON(
- "_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "localhost:1" << "arbiterOnly" << 1) <<
- BSON("_id" << 1 << "host" << "localhost:2" << "arbiterOnly" << 1)));
-
- ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
-
- const BSONObj configBsonOneElectableNode = BSON(
- "_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "localhost:1" << "priority" << 0) <<
- BSON("_id" << 1 << "host" << "localhost:2" << "priority" << 1)));
- ASSERT_OK(config.initialize(configBsonOneElectableNode));
- ASSERT_OK(config.validate());
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithTooFewVoters) {
- ReplicaSetConfig config;
- const BSONObj configBsonNoVoters = BSON(
- "_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "localhost:1" << "votes" << 0) <<
- BSON("_id" << 1 << "host" << "localhost:2" << "votes" << 0)));
-
- ASSERT_OK(config.initialize(configBsonNoVoters));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
-
- const BSONObj configBsonOneVoter = BSON(
- "_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "localhost:1" << "votes" << 0) <<
- BSON("_id" << 1 << "host" << "localhost:2" << "votes" << 1)));
- ASSERT_OK(config.initialize(configBsonOneVoter));
- ASSERT_OK(config.validate());
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithTooManyVoters) {
- ReplicaSetConfig config;
- namespace mmb = mutablebson;
- mmb::Document configDoc;
- mmb::Element configDocRoot = configDoc.root();
- ASSERT_OK(configDocRoot.appendString("_id", "rs0"));
- ASSERT_OK(configDocRoot.appendInt("version", 1));
- mmb::Element membersArray = configDoc.makeElementArray("members");
- ASSERT_OK(configDocRoot.pushBack(membersArray));
- for (size_t i = 0; i < ReplicaSetConfig::kMaxVotingMembers + 1; ++i) {
- mmb::Element memberElement = configDoc.makeElementObject("");
- ASSERT_OK(membersArray.pushBack(memberElement));
- ASSERT_OK(memberElement.appendInt("_id", i));
- ASSERT_OK(memberElement.appendString(
- "host", std::string(str::stream() << "localhost" << i + 1)));
- ASSERT_OK(memberElement.appendInt("votes", 1));
- }
-
- const BSONObj configBsonTooManyVoters = configDoc.getObject();
-
- membersArray.leftChild().findFirstChildNamed("votes").setValueInt(0);
- const BSONObj configBsonMaxVoters = configDoc.getObject();
-
-
- ASSERT_OK(config.initialize(configBsonMaxVoters));
- ASSERT_OK(config.validate());
- ASSERT_OK(config.initialize(configBsonTooManyVoters));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithDuplicateHost) {
- ReplicaSetConfig config;
- const BSONObj configBson = BSON(
- "_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "localhost:1") <<
- BSON("_id" << 1 << "host" << "localhost:1")));
- ASSERT_OK(config.initialize(configBson));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithTooManyNodes) {
- ReplicaSetConfig config;
- namespace mmb = mutablebson;
- mmb::Document configDoc;
- mmb::Element configDocRoot = configDoc.root();
- ASSERT_OK(configDocRoot.appendString("_id", "rs0"));
- ASSERT_OK(configDocRoot.appendInt("version", 1));
- mmb::Element membersArray = configDoc.makeElementArray("members");
- ASSERT_OK(configDocRoot.pushBack(membersArray));
- for (size_t i = 0; i < ReplicaSetConfig::kMaxMembers; ++i) {
- mmb::Element memberElement = configDoc.makeElementObject("");
- ASSERT_OK(membersArray.pushBack(memberElement));
- ASSERT_OK(memberElement.appendInt("_id", i));
- ASSERT_OK(memberElement.appendString(
- "host", std::string(str::stream() << "localhost" << i + 1)));
- if (i >= ReplicaSetConfig::kMaxVotingMembers) {
- ASSERT_OK(memberElement.appendInt("votes", 0));
- }
- }
- const BSONObj configBsonMaxNodes = configDoc.getObject();
-
+TEST(ReplicaSetConfig, ParseMinimalConfigAndCheckDefaults) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
+ ASSERT_OK(config.validate());
+ ASSERT_EQUALS("rs0", config.getReplSetName());
+ ASSERT_EQUALS(1, config.getConfigVersion());
+ ASSERT_EQUALS(1, config.getNumMembers());
+ ASSERT_EQUALS(0, config.membersBegin()->getId());
+ ASSERT_EQUALS(1, config.getDefaultWriteConcern().wNumNodes);
+ ASSERT_EQUALS("", config.getDefaultWriteConcern().wMode);
+ ASSERT_EQUALS(Seconds(10), config.getHeartbeatTimeoutPeriod());
+ ASSERT_TRUE(config.isChainingAllowed());
+ ASSERT_EQUALS(0, config.getProtocolVersion());
+}
+
+TEST(ReplicaSetConfig, ParseLargeConfigAndCheckAccessors) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(BSON(
+ "_id"
+ << "rs0"
+ << "version" << 1234 << "members" << BSON_ARRAY(BSON("_id" << 234 << "host"
+ << "localhost:12345"
+ << "tags" << BSON("NYC"
+ << "NY")))
+ << "settings" << BSON("getLastErrorDefaults"
+ << BSON("w"
+ << "majority") << "getLastErrorModes"
+ << BSON("eastCoast" << BSON("NYC" << 1)) << "chainingAllowed" << false
+ << "heartbeatTimeoutSecs" << 120) << "protocolVersion" << 2)));
+ ASSERT_OK(config.validate());
+ ASSERT_EQUALS("rs0", config.getReplSetName());
+ ASSERT_EQUALS(1234, config.getConfigVersion());
+ ASSERT_EQUALS(1, config.getNumMembers());
+ ASSERT_EQUALS(234, config.membersBegin()->getId());
+ ASSERT_EQUALS(0, config.getDefaultWriteConcern().wNumNodes);
+ ASSERT_EQUALS("majority", config.getDefaultWriteConcern().wMode);
+ ASSERT_FALSE(config.isChainingAllowed());
+ ASSERT_EQUALS(Seconds(120), config.getHeartbeatTimeoutPeriod());
+ ASSERT_EQUALS(2, config.getProtocolVersion());
+}
+
+TEST(ReplicaSetConfig, MajorityCalculationThreeVotersNoArbiters) {
+ ReplicaSetConfig config;
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1"
+ << "votes" << 0)
+ << BSON("_id" << 5 << "host"
+ << "h5:1"
+ << "votes" << 0)))));
+ ASSERT_OK(config.validate());
+
+ ASSERT_EQUALS(2, config.getWriteMajority());
+}
+
+TEST(ReplicaSetConfig, MajorityCalculationNearlyHalfArbiters) {
+ ReplicaSetConfig config;
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3 << "arbiterOnly" << true)
+ << BSON("host"
+ << "node5:12345"
+ << "_id" << 4 << "arbiterOnly" << true)))));
+ ASSERT_OK(config.validate());
+ ASSERT_EQUALS(3, config.getWriteMajority());
+}
+
+TEST(ReplicaSetConfig, MajorityCalculationNearlyHalfArbitersOthersNoVote) {
+ ReplicaSetConfig config;
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0 << "votes" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1 << "votes" << 0)
+ << BSON("host"
+ << "node3:12345"
+ << "_id" << 2 << "votes" << 0)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3 << "arbiterOnly" << true)
+ << BSON("host"
+ << "node5:12345"
+ << "_id" << 4 << "arbiterOnly" << true)))));
+ ASSERT_OK(config.validate());
+ ASSERT_EQUALS(0, config.getWriteMajority());
+}
+
+TEST(ReplicaSetConfig, MajorityCalculationEvenNumberOfMembers) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3)))));
+ ASSERT_OK(config.validate());
+ ASSERT_EQUALS(3, config.getWriteMajority());
+}
+
+TEST(ReplicaSetConfig, MajorityCalculationNearlyHalfSecondariesNoVotes) {
+ ReplicaSetConfig config;
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1 << "votes" << 0)
+ << BSON("host"
+ << "node3:12345"
+ << "_id" << 2 << "votes" << 0)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3) << BSON("host"
+ << "node5:12345"
+ << "_id" << 4)))));
+ ASSERT_OK(config.validate());
+ ASSERT_EQUALS(2, config.getWriteMajority());
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithBadOrMissingIdField) {
+ ReplicaSetConfig config;
+ // Replica set name must be a string.
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch,
+ config.initialize(BSON("_id" << 1 << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
+
+ // Replica set name must be present.
+ ASSERT_EQUALS(
+ ErrorCodes::NoSuchKey,
+ config.initialize(
+ BSON("version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
+
+ // Empty repl set name parses, but does not validate.
+ ASSERT_OK(config.initialize(BSON("_id"
+ << ""
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
+
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithBadOrMissingVersionField) {
+ ReplicaSetConfig config;
+ // Config version field must be present.
+ ASSERT_EQUALS(
+ ErrorCodes::NoSuchKey,
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
+ ASSERT_EQUALS(
+ ErrorCodes::TypeMismatch,
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << "1"
+ << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
+
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1.0 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
+ ASSERT_OK(config.validate());
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 0.0 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << static_cast<long long>(std::numeric_limits<int>::max()) + 1
+ << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithBadMembers) {
+ ReplicaSetConfig config;
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch,
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")
+ << "localhost:23456"))));
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey,
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "localhost:12345")))));
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithLocalNonLocalHostMix) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost")
+ << BSON("_id" << 1 << "host"
+ << "otherhost")))));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithNoElectableNodes) {
+ ReplicaSetConfig config;
+ const BSONObj configBsonNoElectableNodes = BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "priority" << 0)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "priority"
+ << 0)));
+
+ ASSERT_OK(config.initialize(configBsonNoElectableNodes));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+
+ const BSONObj configBsonNoElectableNodesOneArbiter =
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "arbiterOnly" << 1)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "priority" << 0)));
+
+ ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+
+ const BSONObj configBsonNoElectableNodesTwoArbiters =
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "arbiterOnly" << 1)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "arbiterOnly" << 1)));
+
+ ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+
+ const BSONObj configBsonOneElectableNode = BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "priority" << 0)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "priority"
+ << 1)));
+ ASSERT_OK(config.initialize(configBsonOneElectableNode));
+ ASSERT_OK(config.validate());
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithTooFewVoters) {
+ ReplicaSetConfig config;
+ const BSONObj configBsonNoVoters = BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "votes" << 0)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "votes" << 0)));
+
+ ASSERT_OK(config.initialize(configBsonNoVoters));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+
+ const BSONObj configBsonOneVoter = BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "votes" << 0)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "votes" << 1)));
+ ASSERT_OK(config.initialize(configBsonOneVoter));
+ ASSERT_OK(config.validate());
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithTooManyVoters) {
+ ReplicaSetConfig config;
+ namespace mmb = mutablebson;
+ mmb::Document configDoc;
+ mmb::Element configDocRoot = configDoc.root();
+ ASSERT_OK(configDocRoot.appendString("_id", "rs0"));
+ ASSERT_OK(configDocRoot.appendInt("version", 1));
+ mmb::Element membersArray = configDoc.makeElementArray("members");
+ ASSERT_OK(configDocRoot.pushBack(membersArray));
+ for (size_t i = 0; i < ReplicaSetConfig::kMaxVotingMembers + 1; ++i) {
mmb::Element memberElement = configDoc.makeElementObject("");
ASSERT_OK(membersArray.pushBack(memberElement));
- ASSERT_OK(memberElement.appendInt("_id", ReplicaSetConfig::kMaxMembers));
- ASSERT_OK(memberElement.appendString(
- "host", std::string(str::stream() <<
- "localhost" << ReplicaSetConfig::kMaxMembers + 1)));
- ASSERT_OK(memberElement.appendInt("votes", 0));
- const BSONObj configBsonTooManyNodes = configDoc.getObject();
-
-
- ASSERT_OK(config.initialize(configBsonMaxNodes));
- ASSERT_OK(config.validate());
- ASSERT_OK(config.initialize(configBsonTooManyNodes));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithUnexpectedField) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "unexpectedfield" << "value"));
- ASSERT_EQUALS(ErrorCodes::BadValue, status);
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithNonArrayMembersField) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << "value"));
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithNonNumericHeartbeatTimeoutSecsField) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON("heartbeatTimeoutSecs" << "no")));
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithNonBoolChainingAllowedField) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON("chainingAllowed" << "no")));
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
+ ASSERT_OK(memberElement.appendInt("_id", i));
+ ASSERT_OK(
+ memberElement.appendString("host", std::string(str::stream() << "localhost" << i + 1)));
+ ASSERT_OK(memberElement.appendInt("votes", 1));
}
- TEST(ReplicaSetConfig, ParseFailsWithNonObjectSettingsField) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << "none"));
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithGetLastErrorDefaultsFieldUnparseable) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON("getLastErrorDefaults" << BSON(
- "fsync" << "seven"))));
- ASSERT_EQUALS(ErrorCodes::FailedToParse, status);
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithNonObjectGetLastErrorDefaultsField) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON("getLastErrorDefaults" << "no")));
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithNonObjectGetLastErrorModesField) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON("getLastErrorModes" << "no")));
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithDuplicateGetLastErrorModesField) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345" <<
- "tags" << BSON("tag" << "yes"))) <<
- "settings" << BSON("getLastErrorModes" << BSON(
- "one" << BSON("tag" << 1) <<
- "one" << BSON("tag" << 1)))));
- ASSERT_EQUALS(ErrorCodes::DuplicateKey, status);
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithNonObjectGetLastErrorModesEntryField) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345" <<
- "tags" << BSON("tag" << "yes"))) <<
- "settings" << BSON("getLastErrorModes" << BSON(
- "one" << 1))));
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithNonNumericGetLastErrorModesConstraintValue) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345" <<
- "tags" << BSON("tag" << "yes"))) <<
- "settings" << BSON("getLastErrorModes" << BSON(
- "one" << BSON("tag" << "no")))));
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithNegativeGetLastErrorModesConstraintValue) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345" <<
- "tags" << BSON("tag" << "yes"))) <<
- "settings" << BSON("getLastErrorModes" << BSON(
- "one" << BSON("tag" << -1)))));
- ASSERT_EQUALS(ErrorCodes::BadValue, status);
- }
-
- TEST(ReplicaSetConfig, ParseFailsWithNonExistentGetLastErrorModesConstraintTag) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345" <<
- "tags" << BSON("tag" << "yes"))) <<
- "settings" << BSON("getLastErrorModes" << BSON(
- "one" << BSON("tag2" << 1)))));
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, status);
- }
-
- TEST(ReplicaSetConfig, ValidateFailsWithDuplicateMemberId) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345") <<
- BSON("_id" << 0 <<
- "host" << "someoneelse:12345"))));
- ASSERT_OK(status);
-
- status = config.validate();
- ASSERT_EQUALS(ErrorCodes::BadValue, status);
- }
-
- TEST(ReplicaSetConfig, ValidateFailsWithInvalidMember) {
- ReplicaSetConfig config;
- Status status = config.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345" <<
- "hidden" << true))));
- ASSERT_OK(status);
-
- status = config.validate();
- ASSERT_EQUALS(ErrorCodes::BadValue, status);
- }
-
- TEST(ReplicaSetConfig, ChainingAllowedField) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON("chainingAllowed" << true))));
- ASSERT_OK(config.validate());
- ASSERT_TRUE(config.isChainingAllowed());
-
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON("chainingAllowed" << false))));
- ASSERT_OK(config.validate());
- ASSERT_FALSE(config.isChainingAllowed());
- }
-
- TEST(ReplicaSetConfig, HeartbeatTimeoutField) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON("heartbeatTimeoutSecs" << 20))));
- ASSERT_OK(config.validate());
- ASSERT_EQUALS(Seconds(20), config.getHeartbeatTimeoutPeriod());
-
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON("heartbeatTimeoutSecs" << -20))));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- }
-
- TEST(ReplicaSetConfig, GleDefaultField) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON(
- "getLastErrorDefaults" << BSON("w" << "majority")))));
- ASSERT_OK(config.validate());
- ASSERT_EQUALS("majority", config.getDefaultWriteConcern().wMode);
-
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON(
- "getLastErrorDefaults" << BSON("w" << "frim")))));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
-
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON(
- "getLastErrorDefaults" << BSON("w" << 0)))));
- ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
-
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345" <<
- "tags" << BSON("a" << "v"))) <<
- "settings" << BSON(
- "getLastErrorDefaults" << BSON("w" << "frim") <<
- "getLastErrorModes" << BSON("frim" << BSON("a" << 1))))));
- ASSERT_OK(config.validate());
- ASSERT_EQUALS("frim", config.getDefaultWriteConcern().wMode);
- ASSERT_OK(config.findCustomWriteMode("frim").getStatus());
- }
-
- bool operator==(const MemberConfig& a, const MemberConfig& b) {
- // do tag comparisons
- for (MemberConfig::TagIterator itrA = a.tagsBegin(); itrA != a.tagsEnd(); ++itrA) {
- if (std::find(b.tagsBegin(), b.tagsEnd(), *itrA) == b.tagsEnd()) {
- return false;
- }
+ const BSONObj configBsonTooManyVoters = configDoc.getObject();
+
+ membersArray.leftChild().findFirstChildNamed("votes").setValueInt(0);
+ const BSONObj configBsonMaxVoters = configDoc.getObject();
+
+
+ ASSERT_OK(config.initialize(configBsonMaxVoters));
+ ASSERT_OK(config.validate());
+ ASSERT_OK(config.initialize(configBsonTooManyVoters));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithDuplicateHost) {
+ ReplicaSetConfig config;
+ const BSONObj configBson = BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1")
+ << BSON("_id" << 1 << "host"
+ << "localhost:1")));
+ ASSERT_OK(config.initialize(configBson));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithTooManyNodes) {
+ ReplicaSetConfig config;
+ namespace mmb = mutablebson;
+ mmb::Document configDoc;
+ mmb::Element configDocRoot = configDoc.root();
+ ASSERT_OK(configDocRoot.appendString("_id", "rs0"));
+ ASSERT_OK(configDocRoot.appendInt("version", 1));
+ mmb::Element membersArray = configDoc.makeElementArray("members");
+ ASSERT_OK(configDocRoot.pushBack(membersArray));
+ for (size_t i = 0; i < ReplicaSetConfig::kMaxMembers; ++i) {
+ mmb::Element memberElement = configDoc.makeElementObject("");
+ ASSERT_OK(membersArray.pushBack(memberElement));
+ ASSERT_OK(memberElement.appendInt("_id", i));
+ ASSERT_OK(
+ memberElement.appendString("host", std::string(str::stream() << "localhost" << i + 1)));
+ if (i >= ReplicaSetConfig::kMaxVotingMembers) {
+ ASSERT_OK(memberElement.appendInt("votes", 0));
}
- return a.getId() == b.getId() &&
- a.getHostAndPort() == b.getHostAndPort() &&
- a.getPriority() == b.getPriority() &&
- a.getSlaveDelay() == b.getSlaveDelay() &&
- a.isVoter() == b.isVoter() &&
- a.isArbiter() == b.isArbiter() &&
- a.isHidden() == b.isHidden() &&
- a.shouldBuildIndexes() == b.shouldBuildIndexes() &&
- a.getNumTags() == b.getNumTags();
}
-
- bool operator==(const ReplicaSetConfig& a, const ReplicaSetConfig& b) {
- // compare WriteConcernModes
- std::vector<std::string> modeNames = a.getWriteConcernNames();
- for (std::vector<std::string>::iterator it = modeNames.begin();
- it != modeNames.end();
- it++) {
- ReplicaSetTagPattern patternA = a.findCustomWriteMode(*it).getValue();
- ReplicaSetTagPattern patternB = b.findCustomWriteMode(*it).getValue();
- for (ReplicaSetTagPattern::ConstraintIterator itrA = patternA.constraintsBegin();
- itrA != patternA.constraintsEnd();
- itrA++) {
- bool same = false;
- for (ReplicaSetTagPattern::ConstraintIterator itrB = patternB.constraintsBegin();
- itrB != patternB.constraintsEnd();
- itrB++) {
- if (itrA->getKeyIndex() == itrB->getKeyIndex() &&
- itrA->getMinCount() == itrB->getMinCount()) {
- same = true;
- break;
- }
- }
- if (!same) {
- return false;
- }
- }
+ const BSONObj configBsonMaxNodes = configDoc.getObject();
+
+ mmb::Element memberElement = configDoc.makeElementObject("");
+ ASSERT_OK(membersArray.pushBack(memberElement));
+ ASSERT_OK(memberElement.appendInt("_id", ReplicaSetConfig::kMaxMembers));
+ ASSERT_OK(memberElement.appendString(
+ "host", std::string(str::stream() << "localhost" << ReplicaSetConfig::kMaxMembers + 1)));
+ ASSERT_OK(memberElement.appendInt("votes", 0));
+ const BSONObj configBsonTooManyNodes = configDoc.getObject();
+
+
+ ASSERT_OK(config.initialize(configBsonMaxNodes));
+ ASSERT_OK(config.validate());
+ ASSERT_OK(config.initialize(configBsonTooManyNodes));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithUnexpectedField) {
+ ReplicaSetConfig config;
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "unexpectedfield"
+ << "value"));
+ ASSERT_EQUALS(ErrorCodes::BadValue, status);
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithNonArrayMembersField) {
+ ReplicaSetConfig config;
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << "value"));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithNonNumericHeartbeatTimeoutSecsField) {
+ ReplicaSetConfig config;
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("heartbeatTimeoutSecs"
+ << "no")));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithNonBoolChainingAllowedField) {
+ ReplicaSetConfig config;
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("chainingAllowed"
+ << "no")));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithNonObjectSettingsField) {
+ ReplicaSetConfig config;
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")) << "settings"
+ << "none"));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithGetLastErrorDefaultsFieldUnparseable) {
+ ReplicaSetConfig config;
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")) << "settings"
+ << BSON("getLastErrorDefaults" << BSON("fsync"
+ << "seven"))));
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, status);
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithNonObjectGetLastErrorDefaultsField) {
+ ReplicaSetConfig config;
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("getLastErrorDefaults"
+ << "no")));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithNonObjectGetLastErrorModesField) {
+ ReplicaSetConfig config;
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings" << BSON("getLastErrorModes"
+ << "no")));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithDuplicateGetLastErrorModesField) {
+ ReplicaSetConfig config;
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags" << BSON("tag"
+ << "yes"))) << "settings"
+ << BSON("getLastErrorModes"
+ << BSON("one" << BSON("tag" << 1) << "one"
+ << BSON("tag" << 1)))));
+ ASSERT_EQUALS(ErrorCodes::DuplicateKey, status);
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithNonObjectGetLastErrorModesEntryField) {
+ ReplicaSetConfig config;
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags" << BSON("tag"
+ << "yes"))) << "settings"
+ << BSON("getLastErrorModes" << BSON("one" << 1))));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithNonNumericGetLastErrorModesConstraintValue) {
+ ReplicaSetConfig config;
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags" << BSON("tag"
+ << "yes"))) << "settings"
+ << BSON("getLastErrorModes" << BSON("one" << BSON("tag"
+ << "no")))));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithNegativeGetLastErrorModesConstraintValue) {
+ ReplicaSetConfig config;
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags" << BSON("tag"
+ << "yes"))) << "settings"
+ << BSON("getLastErrorModes" << BSON("one" << BSON("tag" << -1)))));
+ ASSERT_EQUALS(ErrorCodes::BadValue, status);
+}
+
+TEST(ReplicaSetConfig, ParseFailsWithNonExistentGetLastErrorModesConstraintTag) {
+ ReplicaSetConfig config;
+ Status status =
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags" << BSON("tag"
+ << "yes"))) << "settings"
+ << BSON("getLastErrorModes" << BSON("one" << BSON("tag2" << 1)))));
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, status);
+}
+
+TEST(ReplicaSetConfig, ValidateFailsWithDuplicateMemberId) {
+ ReplicaSetConfig config;
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")
+ << BSON("_id" << 0 << "host"
+ << "someoneelse:12345"))));
+ ASSERT_OK(status);
+
+ status = config.validate();
+ ASSERT_EQUALS(ErrorCodes::BadValue, status);
+}
+
+TEST(ReplicaSetConfig, ValidateFailsWithInvalidMember) {
+ ReplicaSetConfig config;
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "hidden" << true))));
+ ASSERT_OK(status);
+
+ status = config.validate();
+ ASSERT_EQUALS(ErrorCodes::BadValue, status);
+}
+
+TEST(ReplicaSetConfig, ChainingAllowedField) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")) << "settings"
+ << BSON("chainingAllowed" << true))));
+ ASSERT_OK(config.validate());
+ ASSERT_TRUE(config.isChainingAllowed());
+
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")) << "settings"
+ << BSON("chainingAllowed" << false))));
+ ASSERT_OK(config.validate());
+ ASSERT_FALSE(config.isChainingAllowed());
+}
+
+TEST(ReplicaSetConfig, HeartbeatTimeoutField) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")) << "settings"
+ << BSON("heartbeatTimeoutSecs" << 20))));
+ ASSERT_OK(config.validate());
+ ASSERT_EQUALS(Seconds(20), config.getHeartbeatTimeoutPeriod());
+
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")) << "settings"
+ << BSON("heartbeatTimeoutSecs" << -20))));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+}
+
+TEST(ReplicaSetConfig, GleDefaultField) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")) << "settings"
+ << BSON("getLastErrorDefaults" << BSON("w"
+ << "majority")))));
+ ASSERT_OK(config.validate());
+ ASSERT_EQUALS("majority", config.getDefaultWriteConcern().wMode);
+
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")) << "settings"
+ << BSON("getLastErrorDefaults" << BSON("w"
+ << "frim")))));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")) << "settings"
+ << BSON("getLastErrorDefaults" << BSON("w" << 0)))));
+ ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
+
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags" << BSON("a"
+ << "v")))
+ << "settings" << BSON("getLastErrorDefaults"
+ << BSON("w"
+ << "frim") << "getLastErrorModes"
+ << BSON("frim" << BSON("a" << 1))))));
+ ASSERT_OK(config.validate());
+ ASSERT_EQUALS("frim", config.getDefaultWriteConcern().wMode);
+ ASSERT_OK(config.findCustomWriteMode("frim").getStatus());
+}
+
+bool operator==(const MemberConfig& a, const MemberConfig& b) {
+ // do tag comparisons
+ for (MemberConfig::TagIterator itrA = a.tagsBegin(); itrA != a.tagsEnd(); ++itrA) {
+ if (std::find(b.tagsBegin(), b.tagsEnd(), *itrA) == b.tagsEnd()) {
+ return false;
}
-
- // compare the members
- for (ReplicaSetConfig::MemberIterator memA = a.membersBegin();
- memA != a.membersEnd();
- memA++) {
+ }
+ return a.getId() == b.getId() && a.getHostAndPort() == b.getHostAndPort() &&
+ a.getPriority() == b.getPriority() && a.getSlaveDelay() == b.getSlaveDelay() &&
+ a.isVoter() == b.isVoter() && a.isArbiter() == b.isArbiter() &&
+ a.isHidden() == b.isHidden() && a.shouldBuildIndexes() == b.shouldBuildIndexes() &&
+ a.getNumTags() == b.getNumTags();
+}
+
+bool operator==(const ReplicaSetConfig& a, const ReplicaSetConfig& b) {
+ // compare WriteConcernModes
+ std::vector<std::string> modeNames = a.getWriteConcernNames();
+ for (std::vector<std::string>::iterator it = modeNames.begin(); it != modeNames.end(); it++) {
+ ReplicaSetTagPattern patternA = a.findCustomWriteMode(*it).getValue();
+ ReplicaSetTagPattern patternB = b.findCustomWriteMode(*it).getValue();
+ for (ReplicaSetTagPattern::ConstraintIterator itrA = patternA.constraintsBegin();
+ itrA != patternA.constraintsEnd();
+ itrA++) {
bool same = false;
- for (ReplicaSetConfig::MemberIterator memB = b.membersBegin();
- memB != b.membersEnd();
- memB++) {
- if (*memA == *memB) {
+ for (ReplicaSetTagPattern::ConstraintIterator itrB = patternB.constraintsBegin();
+ itrB != patternB.constraintsEnd();
+ itrB++) {
+ if (itrA->getKeyIndex() == itrB->getKeyIndex() &&
+ itrA->getMinCount() == itrB->getMinCount()) {
same = true;
break;
}
@@ -720,292 +769,446 @@ namespace {
return false;
}
}
-
- // simple comparisons
- return a.getReplSetName() == b.getReplSetName() &&
- a.getConfigVersion() == b.getConfigVersion() &&
- a.getNumMembers() == b.getNumMembers() &&
- a.getHeartbeatTimeoutPeriod() == b.getHeartbeatTimeoutPeriod() &&
- a.isChainingAllowed() == b.isChainingAllowed() &&
- a.getDefaultWriteConcern().wNumNodes == b.getDefaultWriteConcern().wNumNodes &&
- a.getDefaultWriteConcern().wMode == b.getDefaultWriteConcern().wMode &&
- a.getProtocolVersion() == b.getProtocolVersion();
}
- TEST(ReplicaSetConfig, toBSONRoundTripAbility) {
- ReplicaSetConfig configA;
- ReplicaSetConfig configB;
- ASSERT_OK(configA.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "localhost:12345")) <<
- "settings" << BSON("heartbeatTimeoutSecs" << 20))));
- ASSERT_OK(configB.initialize(configA.toBSON()));
- ASSERT_TRUE(configA == configB);
- }
-
- TEST(ReplicaSetConfig, toBSONRoundTripAbilityLarge) {
- ReplicaSetConfig configA;
- ReplicaSetConfig configB;
- ASSERT_OK(configA.initialize(
- BSON("_id" << "asdf"
- << "version" << 9
- << "members" << BSON_ARRAY(
- BSON("_id" << 0
- << "host" << "localhost:12345"
- << "arbiterOnly" << true
- << "votes" << 1
- ) <<
- BSON("_id" << 3
- << "host" << "localhost:3828"
- << "arbiterOnly" << false
- << "hidden" << true
- << "buildIndexes" << false
- << "priority" << 0
- << "slaveDelay" << 17
- << "votes" << 0
- << "tags" << BSON("coast" << "east" << "ssd" << "true")
- ) <<
- BSON("_id" << 2
- << "host" << "foo.com:3828"
- << "priority" << 9
- << "votes" << 0
- << "tags" << BSON("coast" << "west" << "hdd" << "true")
- ))
- << "settings" << BSON("heartbeatTimeoutSecs" << 20
- << "chainingAllowd" << true
- << "getLastErrorDefaults" << BSON("w" << "majority")
- << "getLastErrorModes" << BSON(
- "disks" << BSON("ssd" << 1 << "hdd" << 1)
- << "coasts" << BSON("coast" << 2)))
- )));
- ASSERT_OK(configB.initialize(configA.toBSON()));
- ASSERT_TRUE(configA == configB);
- }
-
- TEST(ReplicaSetConfig, toBSONRoundTripAbilityInvalid) {
- ReplicaSetConfig configA;
- ReplicaSetConfig configB;
- ASSERT_OK(configA.initialize(
- BSON("_id" << ""
- << "version" << -3
- << "members" << BSON_ARRAY(
- BSON("_id" << 0
- << "host" << "localhost:12345"
- << "arbiterOnly" << true
- << "votes" << 0
- ) <<
- BSON("_id" << 0
- << "host" << "localhost:3828"
- << "arbiterOnly" << false
- << "buildIndexes" << false
- << "priority" << 2
- ) <<
- BSON("_id" << 2
- << "host" << "localhost:3828"
- << "priority" << 9
- << "votes" << 0
- ))
- << "settings" << BSON("heartbeatTimeoutSecs" << -20))));
- ASSERT_OK(configB.initialize(configA.toBSON()));
- ASSERT_NOT_OK(configA.validate());
- ASSERT_NOT_OK(configB.validate());
- ASSERT_TRUE(configA == configB);
- }
-
- TEST(ReplicaSetConfig, CheckIfWriteConcernCanBeSatisfied) {
- ReplicaSetConfig configA;
- ASSERT_OK(configA.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "node0" <<
- "tags" << BSON("dc" << "NA" <<
- "rack" << "rackNA1")) <<
- BSON("_id" << 1 <<
- "host" << "node1" <<
- "tags" << BSON("dc" << "NA" <<
- "rack" << "rackNA2")) <<
- BSON("_id" << 2 <<
- "host" << "node2" <<
- "tags" << BSON("dc" << "NA" <<
- "rack" << "rackNA3")) <<
- BSON("_id" << 3 <<
- "host" << "node3" <<
- "tags" << BSON("dc" << "EU" <<
- "rack" << "rackEU1")) <<
- BSON("_id" << 4 <<
- "host" << "node4" <<
- "tags" << BSON("dc" << "EU" <<
- "rack" << "rackEU2")) <<
- BSON("_id" << 5 <<
- "host" << "node5" <<
- "arbiterOnly" << true)) <<
- "settings" << BSON("getLastErrorModes" <<
- BSON("valid" << BSON("dc" << 2 << "rack" << 3) <<
- "invalidNotEnoughValues" << BSON("dc" << 3) <<
- "invalidNotEnoughNodes" << BSON("rack" << 6))))));
-
- WriteConcernOptions validNumberWC;
- validNumberWC.wNumNodes = 5;
- ASSERT_OK(configA.checkIfWriteConcernCanBeSatisfied(validNumberWC));
-
- WriteConcernOptions invalidNumberWC;
- invalidNumberWC.wNumNodes = 6;
- ASSERT_EQUALS(ErrorCodes::CannotSatisfyWriteConcern,
- configA.checkIfWriteConcernCanBeSatisfied(invalidNumberWC));
-
- WriteConcernOptions majorityWC;
- majorityWC.wMode = "majority";
- ASSERT_OK(configA.checkIfWriteConcernCanBeSatisfied(majorityWC));
-
- WriteConcernOptions validModeWC;
- validModeWC.wMode = "valid";
- ASSERT_OK(configA.checkIfWriteConcernCanBeSatisfied(validModeWC));
-
- WriteConcernOptions fakeModeWC;
- fakeModeWC.wMode = "fake";
- ASSERT_EQUALS(ErrorCodes::UnknownReplWriteConcern,
- configA.checkIfWriteConcernCanBeSatisfied(fakeModeWC));
-
- WriteConcernOptions invalidModeNotEnoughValuesWC;
- invalidModeNotEnoughValuesWC.wMode = "invalidNotEnoughValues";
- ASSERT_EQUALS(ErrorCodes::CannotSatisfyWriteConcern,
- configA.checkIfWriteConcernCanBeSatisfied(invalidModeNotEnoughValuesWC));
-
- WriteConcernOptions invalidModeNotEnoughNodesWC;
- invalidModeNotEnoughNodesWC.wMode = "invalidNotEnoughNodes";
- ASSERT_EQUALS(ErrorCodes::CannotSatisfyWriteConcern,
- configA.checkIfWriteConcernCanBeSatisfied(invalidModeNotEnoughNodesWC));
- }
-
- TEST(ReplicaSetConfig, CheckMaximumNodesOkay) {
- ReplicaSetConfig configA;
- ReplicaSetConfig configB;
- ASSERT_OK(configA.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "node0") <<
- BSON("_id" << 1 << "host" << "node1") <<
- BSON("_id" << 2 << "host" << "node2") <<
- BSON("_id" << 3 << "host" << "node3") <<
- BSON("_id" << 4 << "host" << "node4") <<
- BSON("_id" << 5 << "host" << "node5") <<
- BSON("_id" << 6 << "host" << "node6") <<
- BSON("_id" << 7 << "host" << "node7" << "votes" << 0) <<
- BSON("_id" << 8 << "host" << "node8" << "votes" << 0) <<
- BSON("_id" << 9 << "host" << "node9" << "votes" << 0) <<
- BSON("_id" << 10 << "host" << "node10" << "votes" << 0) <<
- BSON("_id" << 11 << "host" << "node11" << "votes" << 0) <<
- BSON("_id" << 12 << "host" << "node12" << "votes" << 0) <<
- BSON("_id" << 13 << "host" << "node13" << "votes" << 0) <<
- BSON("_id" << 14 << "host" << "node14" << "votes" << 0) <<
- BSON("_id" << 15 << "host" << "node15" << "votes" << 0) <<
- BSON("_id" << 16 << "host" << "node16" << "votes" << 0) <<
- BSON("_id" << 17 << "host" << "node17" << "votes" << 0) <<
- BSON("_id" << 18 << "host" << "node18" << "votes" << 0) <<
- BSON("_id" << 19 << "host" << "node19" << "votes" << 0) <<
- BSON("_id" << 20 << "host" << "node20" << "votes" << 0) <<
- BSON("_id" << 21 << "host" << "node21" << "votes" << 0) <<
- BSON("_id" << 22 << "host" << "node22" << "votes" << 0) <<
- BSON("_id" << 23 << "host" << "node23" << "votes" << 0) <<
- BSON("_id" << 24 << "host" << "node24" << "votes" << 0) <<
- BSON("_id" << 25 << "host" << "node25" << "votes" << 0) <<
- BSON("_id" << 26 << "host" << "node26" << "votes" << 0) <<
- BSON("_id" << 27 << "host" << "node27" << "votes" << 0) <<
- BSON("_id" << 28 << "host" << "node28" << "votes" << 0) <<
- BSON("_id" << 29 << "host" << "node29" << "votes" << 0) <<
- BSON("_id" << 30 << "host" << "node30" << "votes" << 0) <<
- BSON("_id" << 31 << "host" << "node31" << "votes" << 0) <<
- BSON("_id" << 32 << "host" << "node32" << "votes" << 0) <<
- BSON("_id" << 33 << "host" << "node33" << "votes" << 0) <<
- BSON("_id" << 34 << "host" << "node34" << "votes" << 0) <<
- BSON("_id" << 35 << "host" << "node35" << "votes" << 0) <<
- BSON("_id" << 36 << "host" << "node36" << "votes" << 0) <<
- BSON("_id" << 37 << "host" << "node37" << "votes" << 0) <<
- BSON("_id" << 38 << "host" << "node38" << "votes" << 0) <<
- BSON("_id" << 39 << "host" << "node39" << "votes" << 0) <<
- BSON("_id" << 40 << "host" << "node40" << "votes" << 0) <<
- BSON("_id" << 41 << "host" << "node41" << "votes" << 0) <<
- BSON("_id" << 42 << "host" << "node42" << "votes" << 0) <<
- BSON("_id" << 43 << "host" << "node43" << "votes" << 0) <<
- BSON("_id" << 44 << "host" << "node44" << "votes" << 0) <<
- BSON("_id" << 45 << "host" << "node45" << "votes" << 0) <<
- BSON("_id" << 46 << "host" << "node46" << "votes" << 0) <<
- BSON("_id" << 47 << "host" << "node47" << "votes" << 0) <<
- BSON("_id" << 48 << "host" << "node48" << "votes" << 0) <<
- BSON("_id" << 49 << "host" << "node49" << "votes" << 0)))));
- ASSERT_OK(configB.initialize(configA.toBSON()));
- ASSERT_OK(configA.validate());
- ASSERT_OK(configB.validate());
- ASSERT_TRUE(configA == configB);
+ // compare the members
+ for (ReplicaSetConfig::MemberIterator memA = a.membersBegin(); memA != a.membersEnd(); memA++) {
+ bool same = false;
+ for (ReplicaSetConfig::MemberIterator memB = b.membersBegin(); memB != b.membersEnd();
+ memB++) {
+ if (*memA == *memB) {
+ same = true;
+ break;
+ }
+ }
+ if (!same) {
+ return false;
+ }
}
- TEST(ReplicaSetConfig, CheckBeyondMaximumNodesFailsValidate) {
- ReplicaSetConfig configA;
- ReplicaSetConfig configB;
- ASSERT_OK(configA.initialize(
- BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "node0") <<
- BSON("_id" << 1 << "host" << "node1") <<
- BSON("_id" << 2 << "host" << "node2") <<
- BSON("_id" << 3 << "host" << "node3") <<
- BSON("_id" << 4 << "host" << "node4") <<
- BSON("_id" << 5 << "host" << "node5") <<
- BSON("_id" << 6 << "host" << "node6") <<
- BSON("_id" << 7 << "host" << "node7" << "votes" << 0) <<
- BSON("_id" << 8 << "host" << "node8" << "votes" << 0) <<
- BSON("_id" << 9 << "host" << "node9" << "votes" << 0) <<
- BSON("_id" << 10 << "host" << "node10" << "votes" << 0) <<
- BSON("_id" << 11 << "host" << "node11" << "votes" << 0) <<
- BSON("_id" << 12 << "host" << "node12" << "votes" << 0) <<
- BSON("_id" << 13 << "host" << "node13" << "votes" << 0) <<
- BSON("_id" << 14 << "host" << "node14" << "votes" << 0) <<
- BSON("_id" << 15 << "host" << "node15" << "votes" << 0) <<
- BSON("_id" << 16 << "host" << "node16" << "votes" << 0) <<
- BSON("_id" << 17 << "host" << "node17" << "votes" << 0) <<
- BSON("_id" << 18 << "host" << "node18" << "votes" << 0) <<
- BSON("_id" << 19 << "host" << "node19" << "votes" << 0) <<
- BSON("_id" << 20 << "host" << "node20" << "votes" << 0) <<
- BSON("_id" << 21 << "host" << "node21" << "votes" << 0) <<
- BSON("_id" << 22 << "host" << "node22" << "votes" << 0) <<
- BSON("_id" << 23 << "host" << "node23" << "votes" << 0) <<
- BSON("_id" << 24 << "host" << "node24" << "votes" << 0) <<
- BSON("_id" << 25 << "host" << "node25" << "votes" << 0) <<
- BSON("_id" << 26 << "host" << "node26" << "votes" << 0) <<
- BSON("_id" << 27 << "host" << "node27" << "votes" << 0) <<
- BSON("_id" << 28 << "host" << "node28" << "votes" << 0) <<
- BSON("_id" << 29 << "host" << "node29" << "votes" << 0) <<
- BSON("_id" << 30 << "host" << "node30" << "votes" << 0) <<
- BSON("_id" << 31 << "host" << "node31" << "votes" << 0) <<
- BSON("_id" << 32 << "host" << "node32" << "votes" << 0) <<
- BSON("_id" << 33 << "host" << "node33" << "votes" << 0) <<
- BSON("_id" << 34 << "host" << "node34" << "votes" << 0) <<
- BSON("_id" << 35 << "host" << "node35" << "votes" << 0) <<
- BSON("_id" << 36 << "host" << "node36" << "votes" << 0) <<
- BSON("_id" << 37 << "host" << "node37" << "votes" << 0) <<
- BSON("_id" << 38 << "host" << "node38" << "votes" << 0) <<
- BSON("_id" << 39 << "host" << "node39" << "votes" << 0) <<
- BSON("_id" << 40 << "host" << "node40" << "votes" << 0) <<
- BSON("_id" << 41 << "host" << "node41" << "votes" << 0) <<
- BSON("_id" << 42 << "host" << "node42" << "votes" << 0) <<
- BSON("_id" << 43 << "host" << "node43" << "votes" << 0) <<
- BSON("_id" << 44 << "host" << "node44" << "votes" << 0) <<
- BSON("_id" << 45 << "host" << "node45" << "votes" << 0) <<
- BSON("_id" << 46 << "host" << "node46" << "votes" << 0) <<
- BSON("_id" << 47 << "host" << "node47" << "votes" << 0) <<
- BSON("_id" << 48 << "host" << "node48" << "votes" << 0) <<
- BSON("_id" << 49 << "host" << "node49" << "votes" << 0) <<
- BSON("_id" << 50 << "host" << "node50" << "votes" << 0)))));
- ASSERT_OK(configB.initialize(configA.toBSON()));
- ASSERT_NOT_OK(configA.validate());
- ASSERT_NOT_OK(configB.validate());
- ASSERT_TRUE(configA == configB);
- }
+ // simple comparisons
+ return a.getReplSetName() == b.getReplSetName() &&
+ a.getConfigVersion() == b.getConfigVersion() && a.getNumMembers() == b.getNumMembers() &&
+ a.getHeartbeatTimeoutPeriod() == b.getHeartbeatTimeoutPeriod() &&
+ a.isChainingAllowed() == b.isChainingAllowed() &&
+ a.getDefaultWriteConcern().wNumNodes == b.getDefaultWriteConcern().wNumNodes &&
+ a.getDefaultWriteConcern().wMode == b.getDefaultWriteConcern().wMode &&
+ a.getProtocolVersion() == b.getProtocolVersion();
+}
+
+TEST(ReplicaSetConfig, toBSONRoundTripAbility) {
+ ReplicaSetConfig configA;
+ ReplicaSetConfig configB;
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")) << "settings"
+ << BSON("heartbeatTimeoutSecs" << 20))));
+ ASSERT_OK(configB.initialize(configA.toBSON()));
+ ASSERT_TRUE(configA == configB);
+}
+
+TEST(ReplicaSetConfig, toBSONRoundTripAbilityLarge) {
+ ReplicaSetConfig configA;
+ ReplicaSetConfig configB;
+ ASSERT_OK(configA.initialize(BSON(
+ "_id"
+ << "asdf"
+ << "version" << 9 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "arbiterOnly" << true << "votes" << 1)
+ << BSON("_id" << 3 << "host"
+ << "localhost:3828"
+ << "arbiterOnly" << false << "hidden" << true << "buildIndexes"
+ << false << "priority" << 0 << "slaveDelay" << 17 << "votes"
+ << 0 << "tags" << BSON("coast"
+ << "east"
+ << "ssd"
+ << "true"))
+ << BSON("_id" << 2 << "host"
+ << "foo.com:3828"
+ << "priority" << 9 << "votes" << 0 << "tags"
+ << BSON("coast"
+ << "west"
+ << "hdd"
+ << "true"))) << "settings"
+ << BSON("heartbeatTimeoutSecs" << 20 << "chainingAllowd" << true << "getLastErrorDefaults"
+ << BSON("w"
+ << "majority") << "getLastErrorModes"
+ << BSON("disks" << BSON("ssd" << 1 << "hdd" << 1) << "coasts"
+ << BSON("coast" << 2))))));
+ ASSERT_OK(configB.initialize(configA.toBSON()));
+ ASSERT_TRUE(configA == configB);
+}
+
+TEST(ReplicaSetConfig, toBSONRoundTripAbilityInvalid) {
+ ReplicaSetConfig configA;
+ ReplicaSetConfig configB;
+ ASSERT_OK(
+ configA.initialize(BSON("_id"
+ << ""
+ << "version" << -3 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "arbiterOnly" << true << "votes" << 0)
+ << BSON("_id" << 0 << "host"
+ << "localhost:3828"
+ << "arbiterOnly" << false
+ << "buildIndexes" << false << "priority"
+ << 2)
+ << BSON("_id" << 2 << "host"
+ << "localhost:3828"
+ << "priority" << 9 << "votes" << 0))
+ << "settings" << BSON("heartbeatTimeoutSecs" << -20))));
+ ASSERT_OK(configB.initialize(configA.toBSON()));
+ ASSERT_NOT_OK(configA.validate());
+ ASSERT_NOT_OK(configB.validate());
+ ASSERT_TRUE(configA == configB);
+}
+
+TEST(ReplicaSetConfig, CheckIfWriteConcernCanBeSatisfied) {
+ ReplicaSetConfig configA;
+ ASSERT_OK(configA.initialize(BSON(
+ "_id"
+ << "rs0"
+ << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node0"
+ << "tags" << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA1"))
+ << BSON("_id" << 1 << "host"
+ << "node1"
+ << "tags" << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA2"))
+ << BSON("_id" << 2 << "host"
+ << "node2"
+ << "tags" << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA3"))
+ << BSON("_id" << 3 << "host"
+ << "node3"
+ << "tags" << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU1"))
+ << BSON("_id" << 4 << "host"
+ << "node4"
+ << "tags" << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU2"))
+ << BSON("_id" << 5 << "host"
+ << "node5"
+ << "arbiterOnly" << true))
+ << "settings" << BSON("getLastErrorModes"
+ << BSON("valid" << BSON("dc" << 2 << "rack" << 3)
+ << "invalidNotEnoughValues" << BSON("dc" << 3)
+ << "invalidNotEnoughNodes" << BSON("rack" << 6))))));
+
+ WriteConcernOptions validNumberWC;
+ validNumberWC.wNumNodes = 5;
+ ASSERT_OK(configA.checkIfWriteConcernCanBeSatisfied(validNumberWC));
+
+ WriteConcernOptions invalidNumberWC;
+ invalidNumberWC.wNumNodes = 6;
+ ASSERT_EQUALS(ErrorCodes::CannotSatisfyWriteConcern,
+ configA.checkIfWriteConcernCanBeSatisfied(invalidNumberWC));
+
+ WriteConcernOptions majorityWC;
+ majorityWC.wMode = "majority";
+ ASSERT_OK(configA.checkIfWriteConcernCanBeSatisfied(majorityWC));
+
+ WriteConcernOptions validModeWC;
+ validModeWC.wMode = "valid";
+ ASSERT_OK(configA.checkIfWriteConcernCanBeSatisfied(validModeWC));
+
+ WriteConcernOptions fakeModeWC;
+ fakeModeWC.wMode = "fake";
+ ASSERT_EQUALS(ErrorCodes::UnknownReplWriteConcern,
+ configA.checkIfWriteConcernCanBeSatisfied(fakeModeWC));
+
+ WriteConcernOptions invalidModeNotEnoughValuesWC;
+ invalidModeNotEnoughValuesWC.wMode = "invalidNotEnoughValues";
+ ASSERT_EQUALS(ErrorCodes::CannotSatisfyWriteConcern,
+ configA.checkIfWriteConcernCanBeSatisfied(invalidModeNotEnoughValuesWC));
+
+ WriteConcernOptions invalidModeNotEnoughNodesWC;
+ invalidModeNotEnoughNodesWC.wMode = "invalidNotEnoughNodes";
+ ASSERT_EQUALS(ErrorCodes::CannotSatisfyWriteConcern,
+ configA.checkIfWriteConcernCanBeSatisfied(invalidModeNotEnoughNodesWC));
+}
+
+TEST(ReplicaSetConfig, CheckMaximumNodesOkay) {
+ ReplicaSetConfig configA;
+ ReplicaSetConfig configB;
+ ASSERT_OK(configA.initialize(
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node0")
+ << BSON("_id" << 1 << "host"
+ << "node1") << BSON("_id" << 2 << "host"
+ << "node2")
+ << BSON("_id" << 3 << "host"
+ << "node3") << BSON("_id" << 4 << "host"
+ << "node4")
+ << BSON("_id" << 5 << "host"
+ << "node5") << BSON("_id" << 6 << "host"
+ << "node6")
+ << BSON("_id" << 7 << "host"
+ << "node7"
+ << "votes" << 0) << BSON("_id" << 8 << "host"
+ << "node8"
+ << "votes" << 0)
+ << BSON("_id" << 9 << "host"
+ << "node9"
+ << "votes" << 0) << BSON("_id" << 10 << "host"
+ << "node10"
+ << "votes" << 0)
+ << BSON("_id" << 11 << "host"
+ << "node11"
+ << "votes" << 0) << BSON("_id" << 12 << "host"
+ << "node12"
+ << "votes" << 0)
+ << BSON("_id" << 13 << "host"
+ << "node13"
+ << "votes" << 0) << BSON("_id" << 14 << "host"
+ << "node14"
+ << "votes" << 0)
+ << BSON("_id" << 15 << "host"
+ << "node15"
+ << "votes" << 0) << BSON("_id" << 16 << "host"
+ << "node16"
+ << "votes" << 0)
+ << BSON("_id" << 17 << "host"
+ << "node17"
+ << "votes" << 0) << BSON("_id" << 18 << "host"
+ << "node18"
+ << "votes" << 0)
+ << BSON("_id" << 19 << "host"
+ << "node19"
+ << "votes" << 0) << BSON("_id" << 20 << "host"
+ << "node20"
+ << "votes" << 0)
+ << BSON("_id" << 21 << "host"
+ << "node21"
+ << "votes" << 0) << BSON("_id" << 22 << "host"
+ << "node22"
+ << "votes" << 0)
+ << BSON("_id" << 23 << "host"
+ << "node23"
+ << "votes" << 0) << BSON("_id" << 24 << "host"
+ << "node24"
+ << "votes" << 0)
+ << BSON("_id" << 25 << "host"
+ << "node25"
+ << "votes" << 0) << BSON("_id" << 26 << "host"
+ << "node26"
+ << "votes" << 0)
+ << BSON("_id" << 27 << "host"
+ << "node27"
+ << "votes" << 0) << BSON("_id" << 28 << "host"
+ << "node28"
+ << "votes" << 0)
+ << BSON("_id" << 29 << "host"
+ << "node29"
+ << "votes" << 0) << BSON("_id" << 30 << "host"
+ << "node30"
+ << "votes" << 0)
+ << BSON("_id" << 31 << "host"
+ << "node31"
+ << "votes" << 0) << BSON("_id" << 32 << "host"
+ << "node32"
+ << "votes" << 0)
+ << BSON("_id" << 33 << "host"
+ << "node33"
+ << "votes" << 0) << BSON("_id" << 34 << "host"
+ << "node34"
+ << "votes" << 0)
+ << BSON("_id" << 35 << "host"
+ << "node35"
+ << "votes" << 0) << BSON("_id" << 36 << "host"
+ << "node36"
+ << "votes" << 0)
+ << BSON("_id" << 37 << "host"
+ << "node37"
+ << "votes" << 0) << BSON("_id" << 38 << "host"
+ << "node38"
+ << "votes" << 0)
+ << BSON("_id" << 39 << "host"
+ << "node39"
+ << "votes" << 0) << BSON("_id" << 40 << "host"
+ << "node40"
+ << "votes" << 0)
+ << BSON("_id" << 41 << "host"
+ << "node41"
+ << "votes" << 0) << BSON("_id" << 42 << "host"
+ << "node42"
+ << "votes" << 0)
+ << BSON("_id" << 43 << "host"
+ << "node43"
+ << "votes" << 0) << BSON("_id" << 44 << "host"
+ << "node44"
+ << "votes" << 0)
+ << BSON("_id" << 45 << "host"
+ << "node45"
+ << "votes" << 0) << BSON("_id" << 46 << "host"
+ << "node46"
+ << "votes" << 0)
+ << BSON("_id" << 47 << "host"
+ << "node47"
+ << "votes" << 0) << BSON("_id" << 48 << "host"
+ << "node48"
+ << "votes" << 0)
+ << BSON("_id" << 49 << "host"
+ << "node49"
+ << "votes" << 0)))));
+ ASSERT_OK(configB.initialize(configA.toBSON()));
+ ASSERT_OK(configA.validate());
+ ASSERT_OK(configB.validate());
+ ASSERT_TRUE(configA == configB);
+}
+
+TEST(ReplicaSetConfig, CheckBeyondMaximumNodesFailsValidate) {
+ ReplicaSetConfig configA;
+ ReplicaSetConfig configB;
+ ASSERT_OK(configA.initialize(
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node0")
+ << BSON("_id" << 1 << "host"
+ << "node1") << BSON("_id" << 2 << "host"
+ << "node2")
+ << BSON("_id" << 3 << "host"
+ << "node3") << BSON("_id" << 4 << "host"
+ << "node4")
+ << BSON("_id" << 5 << "host"
+ << "node5") << BSON("_id" << 6 << "host"
+ << "node6")
+ << BSON("_id" << 7 << "host"
+ << "node7"
+ << "votes" << 0) << BSON("_id" << 8 << "host"
+ << "node8"
+ << "votes" << 0)
+ << BSON("_id" << 9 << "host"
+ << "node9"
+ << "votes" << 0) << BSON("_id" << 10 << "host"
+ << "node10"
+ << "votes" << 0)
+ << BSON("_id" << 11 << "host"
+ << "node11"
+ << "votes" << 0) << BSON("_id" << 12 << "host"
+ << "node12"
+ << "votes" << 0)
+ << BSON("_id" << 13 << "host"
+ << "node13"
+ << "votes" << 0) << BSON("_id" << 14 << "host"
+ << "node14"
+ << "votes" << 0)
+ << BSON("_id" << 15 << "host"
+ << "node15"
+ << "votes" << 0) << BSON("_id" << 16 << "host"
+ << "node16"
+ << "votes" << 0)
+ << BSON("_id" << 17 << "host"
+ << "node17"
+ << "votes" << 0) << BSON("_id" << 18 << "host"
+ << "node18"
+ << "votes" << 0)
+ << BSON("_id" << 19 << "host"
+ << "node19"
+ << "votes" << 0) << BSON("_id" << 20 << "host"
+ << "node20"
+ << "votes" << 0)
+ << BSON("_id" << 21 << "host"
+ << "node21"
+ << "votes" << 0) << BSON("_id" << 22 << "host"
+ << "node22"
+ << "votes" << 0)
+ << BSON("_id" << 23 << "host"
+ << "node23"
+ << "votes" << 0) << BSON("_id" << 24 << "host"
+ << "node24"
+ << "votes" << 0)
+ << BSON("_id" << 25 << "host"
+ << "node25"
+ << "votes" << 0) << BSON("_id" << 26 << "host"
+ << "node26"
+ << "votes" << 0)
+ << BSON("_id" << 27 << "host"
+ << "node27"
+ << "votes" << 0) << BSON("_id" << 28 << "host"
+ << "node28"
+ << "votes" << 0)
+ << BSON("_id" << 29 << "host"
+ << "node29"
+ << "votes" << 0) << BSON("_id" << 30 << "host"
+ << "node30"
+ << "votes" << 0)
+ << BSON("_id" << 31 << "host"
+ << "node31"
+ << "votes" << 0) << BSON("_id" << 32 << "host"
+ << "node32"
+ << "votes" << 0)
+ << BSON("_id" << 33 << "host"
+ << "node33"
+ << "votes" << 0) << BSON("_id" << 34 << "host"
+ << "node34"
+ << "votes" << 0)
+ << BSON("_id" << 35 << "host"
+ << "node35"
+ << "votes" << 0) << BSON("_id" << 36 << "host"
+ << "node36"
+ << "votes" << 0)
+ << BSON("_id" << 37 << "host"
+ << "node37"
+ << "votes" << 0) << BSON("_id" << 38 << "host"
+ << "node38"
+ << "votes" << 0)
+ << BSON("_id" << 39 << "host"
+ << "node39"
+ << "votes" << 0) << BSON("_id" << 40 << "host"
+ << "node40"
+ << "votes" << 0)
+ << BSON("_id" << 41 << "host"
+ << "node41"
+ << "votes" << 0) << BSON("_id" << 42 << "host"
+ << "node42"
+ << "votes" << 0)
+ << BSON("_id" << 43 << "host"
+ << "node43"
+ << "votes" << 0) << BSON("_id" << 44 << "host"
+ << "node44"
+ << "votes" << 0)
+ << BSON("_id" << 45 << "host"
+ << "node45"
+ << "votes" << 0) << BSON("_id" << 46 << "host"
+ << "node46"
+ << "votes" << 0)
+ << BSON("_id" << 47 << "host"
+ << "node47"
+ << "votes" << 0) << BSON("_id" << 48 << "host"
+ << "node48"
+ << "votes" << 0)
+ << BSON("_id" << 49 << "host"
+ << "node49"
+ << "votes" << 0) << BSON("_id" << 50 << "host"
+ << "node50"
+ << "votes" << 0)))));
+ ASSERT_OK(configB.initialize(configA.toBSON()));
+ ASSERT_NOT_OK(configA.validate());
+ ASSERT_NOT_OK(configB.validate());
+ ASSERT_TRUE(configA == configB);
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/replica_set_tag.cpp b/src/mongo/db/repl/replica_set_tag.cpp
index 1d6fcc0766d..ed6781cfc95 100644
--- a/src/mongo/db/repl/replica_set_tag.cpp
+++ b/src/mongo/db/repl/replica_set_tag.cpp
@@ -41,206 +41,198 @@
namespace mongo {
namespace repl {
- bool ReplicaSetTag::operator==(const ReplicaSetTag& other) const {
- return _keyIndex == other._keyIndex && _valueIndex == other._valueIndex;
- }
-
- bool ReplicaSetTag::operator!=(const ReplicaSetTag& other) const {
- return !(*this == other);
- }
-
- void ReplicaSetTagPattern::addTagCountConstraint(int32_t keyIndex, int32_t minCount) {
- const std::vector<TagCountConstraint>::iterator iter = std::find_if(
- _constraints.begin(),
- _constraints.end(),
- stdx::bind(std::equal_to<int32_t>(),
- keyIndex,
- stdx::bind(&TagCountConstraint::getKeyIndex, stdx::placeholders::_1)));
- if (iter == _constraints.end()) {
- _constraints.push_back(TagCountConstraint(keyIndex, minCount));
- }
- else if (iter->getMinCount() < minCount) {
- *iter = TagCountConstraint(keyIndex, minCount);
- }
- }
-
- ReplicaSetTagPattern::TagCountConstraint::TagCountConstraint(int32_t keyIndex,
- int32_t minCount) :
- _keyIndex(keyIndex), _minCount(minCount) {}
-
- ReplicaSetTagMatch::ReplicaSetTagMatch(const ReplicaSetTagPattern& pattern) {
- for (ReplicaSetTagPattern::ConstraintIterator iter = pattern.constraintsBegin();
- iter != pattern.constraintsEnd();
- ++iter) {
-
- _boundTagValues.push_back(BoundTagValue(*iter));
- }
- }
-
- bool ReplicaSetTagMatch::update(const ReplicaSetTag& tag) {
- const std::vector<BoundTagValue>::iterator iter = std::find_if(
- _boundTagValues.begin(),
- _boundTagValues.end(),
- stdx::bind(std::equal_to<int32_t>(), tag.getKeyIndex(), stdx::bind(
- &BoundTagValue::getKeyIndex, stdx::placeholders::_1)));
- if (iter != _boundTagValues.end()) {
- if (!sequenceContains(iter->boundValues, tag.getValueIndex())) {
- iter->boundValues.push_back(tag.getValueIndex());
- }
+bool ReplicaSetTag::operator==(const ReplicaSetTag& other) const {
+ return _keyIndex == other._keyIndex && _valueIndex == other._valueIndex;
+}
+
+bool ReplicaSetTag::operator!=(const ReplicaSetTag& other) const {
+ return !(*this == other);
+}
+
+void ReplicaSetTagPattern::addTagCountConstraint(int32_t keyIndex, int32_t minCount) {
+ const std::vector<TagCountConstraint>::iterator iter = std::find_if(
+ _constraints.begin(),
+ _constraints.end(),
+ stdx::bind(std::equal_to<int32_t>(),
+ keyIndex,
+ stdx::bind(&TagCountConstraint::getKeyIndex, stdx::placeholders::_1)));
+ if (iter == _constraints.end()) {
+ _constraints.push_back(TagCountConstraint(keyIndex, minCount));
+ } else if (iter->getMinCount() < minCount) {
+ *iter = TagCountConstraint(keyIndex, minCount);
+ }
+}
+
+ReplicaSetTagPattern::TagCountConstraint::TagCountConstraint(int32_t keyIndex, int32_t minCount)
+ : _keyIndex(keyIndex), _minCount(minCount) {}
+
+ReplicaSetTagMatch::ReplicaSetTagMatch(const ReplicaSetTagPattern& pattern) {
+ for (ReplicaSetTagPattern::ConstraintIterator iter = pattern.constraintsBegin();
+ iter != pattern.constraintsEnd();
+ ++iter) {
+ _boundTagValues.push_back(BoundTagValue(*iter));
+ }
+}
+
+bool ReplicaSetTagMatch::update(const ReplicaSetTag& tag) {
+ const std::vector<BoundTagValue>::iterator iter =
+ std::find_if(_boundTagValues.begin(),
+ _boundTagValues.end(),
+ stdx::bind(std::equal_to<int32_t>(),
+ tag.getKeyIndex(),
+ stdx::bind(&BoundTagValue::getKeyIndex, stdx::placeholders::_1)));
+ if (iter != _boundTagValues.end()) {
+ if (!sequenceContains(iter->boundValues, tag.getValueIndex())) {
+ iter->boundValues.push_back(tag.getValueIndex());
}
- return isSatisfied();
}
-
- bool ReplicaSetTagMatch::isSatisfied() const {
- const std::vector<BoundTagValue>::const_iterator iter = std::find_if(
- _boundTagValues.begin(),
- _boundTagValues.end(),
- stdx::bind(std::logical_not<bool>(),
- stdx::bind(&BoundTagValue::isSatisfied, stdx::placeholders::_1)));
- return iter == _boundTagValues.end();
- }
-
- bool ReplicaSetTagMatch::BoundTagValue::isSatisfied() const {
- return constraint.getMinCount() <= int32_t(boundValues.size());
- }
-
- ReplicaSetTag ReplicaSetTagConfig::makeTag(StringData key, StringData value) {
- int32_t keyIndex = _findKeyIndex(key);
- if (size_t(keyIndex) == _tagData.size()) {
- _tagData.push_back(make_pair(key.toString(), ValueVector()));
- }
- ValueVector& values = _tagData[keyIndex].second;
- for (size_t valueIndex = 0; valueIndex < values.size(); ++valueIndex) {
- if (values[valueIndex] != value)
- continue;
- return ReplicaSetTag(keyIndex, int32_t(valueIndex));
- }
- values.push_back(value.toString());
- return ReplicaSetTag(keyIndex, int32_t(values.size()) - 1);
- }
-
- ReplicaSetTag ReplicaSetTagConfig::findTag(StringData key,
- StringData value) const {
- int32_t keyIndex = _findKeyIndex(key);
- if (size_t(keyIndex) == _tagData.size())
- return ReplicaSetTag(-1, -1);
- const ValueVector& values = _tagData[keyIndex].second;
- for (size_t valueIndex = 0; valueIndex < values.size(); ++valueIndex) {
- if (values[valueIndex] == value) {
- return ReplicaSetTag(keyIndex, int32_t(valueIndex));
- }
- }
+ return isSatisfied();
+}
+
+bool ReplicaSetTagMatch::isSatisfied() const {
+ const std::vector<BoundTagValue>::const_iterator iter =
+ std::find_if(_boundTagValues.begin(),
+ _boundTagValues.end(),
+ stdx::bind(std::logical_not<bool>(),
+ stdx::bind(&BoundTagValue::isSatisfied, stdx::placeholders::_1)));
+ return iter == _boundTagValues.end();
+}
+
+bool ReplicaSetTagMatch::BoundTagValue::isSatisfied() const {
+ return constraint.getMinCount() <= int32_t(boundValues.size());
+}
+
+ReplicaSetTag ReplicaSetTagConfig::makeTag(StringData key, StringData value) {
+ int32_t keyIndex = _findKeyIndex(key);
+ if (size_t(keyIndex) == _tagData.size()) {
+ _tagData.push_back(make_pair(key.toString(), ValueVector()));
+ }
+ ValueVector& values = _tagData[keyIndex].second;
+ for (size_t valueIndex = 0; valueIndex < values.size(); ++valueIndex) {
+ if (values[valueIndex] != value)
+ continue;
+ return ReplicaSetTag(keyIndex, int32_t(valueIndex));
+ }
+ values.push_back(value.toString());
+ return ReplicaSetTag(keyIndex, int32_t(values.size()) - 1);
+}
+
+ReplicaSetTag ReplicaSetTagConfig::findTag(StringData key, StringData value) const {
+ int32_t keyIndex = _findKeyIndex(key);
+ if (size_t(keyIndex) == _tagData.size())
return ReplicaSetTag(-1, -1);
- }
-
- ReplicaSetTagPattern ReplicaSetTagConfig::makePattern() const {
- return ReplicaSetTagPattern();
- }
-
- Status ReplicaSetTagConfig::addTagCountConstraintToPattern(ReplicaSetTagPattern* pattern,
- StringData tagKey,
- int32_t minCount) const {
- int32_t keyIndex = _findKeyIndex(tagKey);
- if (size_t(keyIndex) == _tagData.size()) {
- return Status(ErrorCodes::NoSuchKey,
- str::stream() << "No replica set tag key " << tagKey << " in config");
- }
- pattern->addTagCountConstraint(keyIndex, minCount);
- return Status::OK();
- }
-
- int32_t ReplicaSetTagConfig::_findKeyIndex(StringData key) const {
- size_t i;
- for (i = 0; i < _tagData.size(); ++i) {
- if (_tagData[i].first == key) {
- break;
- }
- }
- return int32_t(i);
- }
-
- std::string ReplicaSetTagConfig::getTagKey(const ReplicaSetTag& tag) const {
- invariant(tag.isValid() && size_t(tag.getKeyIndex()) < _tagData.size());
- return _tagData[tag.getKeyIndex()].first;
- }
-
- std::string ReplicaSetTagConfig::getTagValue(const ReplicaSetTag& tag) const {
- invariant(tag.isValid() && size_t(tag.getKeyIndex()) < _tagData.size());
- const ValueVector& values = _tagData[tag.getKeyIndex()].second;
- invariant(tag.getValueIndex() >= 0 && size_t(tag.getValueIndex()) < values.size());
- return values[tag.getValueIndex()];
- }
-
- void ReplicaSetTagConfig::put(const ReplicaSetTag& tag, std::ostream& os) const {
- BSONObjBuilder builder;
- _appendTagKey(tag.getKeyIndex(), &builder);
- _appendTagValue(tag.getKeyIndex(), tag.getValueIndex(), &builder);
- os << builder.done();
- }
-
- void ReplicaSetTagConfig::put(const ReplicaSetTagPattern& pattern, std::ostream& os) const {
- BSONObjBuilder builder;
- BSONArrayBuilder allConstraintsBuilder(builder.subarrayStart("constraints"));
- for (ReplicaSetTagPattern::ConstraintIterator iter = pattern.constraintsBegin();
- iter != pattern.constraintsEnd();
- ++iter) {
-
- BSONObjBuilder constraintBuilder(allConstraintsBuilder.subobjStart());
- _appendConstraint(*iter, &constraintBuilder);
+ const ValueVector& values = _tagData[keyIndex].second;
+ for (size_t valueIndex = 0; valueIndex < values.size(); ++valueIndex) {
+ if (values[valueIndex] == value) {
+ return ReplicaSetTag(keyIndex, int32_t(valueIndex));
}
- allConstraintsBuilder.doneFast();
- os << builder.done();
}
-
- void ReplicaSetTagConfig::put(const ReplicaSetTagMatch& matcher, std::ostream& os) const {
- BSONObjBuilder builder;
- BSONArrayBuilder allBindingsBuilder(builder.subarrayStart("bindings"));
- for (size_t i = 0; i < matcher._boundTagValues.size(); ++i) {
-
- BSONObjBuilder bindingBuilder(allBindingsBuilder.subobjStart());
- _appendConstraint(matcher._boundTagValues[i].constraint, &bindingBuilder);
- BSONArrayBuilder boundValues(bindingBuilder.subarrayStart("boundValues"));
- for (size_t j = 0; j < matcher._boundTagValues[i].boundValues.size(); ++j) {
- BSONObjBuilder bvb(boundValues.subobjStart());
- _appendTagValue(matcher._boundTagValues[i].constraint.getKeyIndex(),
- matcher._boundTagValues[i].boundValues[j],
- &bvb);
- }
+ return ReplicaSetTag(-1, -1);
+}
+
+ReplicaSetTagPattern ReplicaSetTagConfig::makePattern() const {
+ return ReplicaSetTagPattern();
+}
+
+Status ReplicaSetTagConfig::addTagCountConstraintToPattern(ReplicaSetTagPattern* pattern,
+ StringData tagKey,
+ int32_t minCount) const {
+ int32_t keyIndex = _findKeyIndex(tagKey);
+ if (size_t(keyIndex) == _tagData.size()) {
+ return Status(ErrorCodes::NoSuchKey,
+ str::stream() << "No replica set tag key " << tagKey << " in config");
+ }
+ pattern->addTagCountConstraint(keyIndex, minCount);
+ return Status::OK();
+}
+
+int32_t ReplicaSetTagConfig::_findKeyIndex(StringData key) const {
+ size_t i;
+ for (i = 0; i < _tagData.size(); ++i) {
+ if (_tagData[i].first == key) {
+ break;
}
- allBindingsBuilder.doneFast();
- os << builder.done();
}
-
- void ReplicaSetTagConfig::_appendTagKey(int32_t keyIndex, BSONObjBuilder* builder) const {
- if (keyIndex < 0 || size_t(keyIndex) >= _tagData.size()) {
- builder->append("tagKey", int(keyIndex));
- }
- else {
- builder->append("tagKey", _tagData[keyIndex].first);
+ return int32_t(i);
+}
+
+std::string ReplicaSetTagConfig::getTagKey(const ReplicaSetTag& tag) const {
+ invariant(tag.isValid() && size_t(tag.getKeyIndex()) < _tagData.size());
+ return _tagData[tag.getKeyIndex()].first;
+}
+
+std::string ReplicaSetTagConfig::getTagValue(const ReplicaSetTag& tag) const {
+ invariant(tag.isValid() && size_t(tag.getKeyIndex()) < _tagData.size());
+ const ValueVector& values = _tagData[tag.getKeyIndex()].second;
+ invariant(tag.getValueIndex() >= 0 && size_t(tag.getValueIndex()) < values.size());
+ return values[tag.getValueIndex()];
+}
+
+void ReplicaSetTagConfig::put(const ReplicaSetTag& tag, std::ostream& os) const {
+ BSONObjBuilder builder;
+ _appendTagKey(tag.getKeyIndex(), &builder);
+ _appendTagValue(tag.getKeyIndex(), tag.getValueIndex(), &builder);
+ os << builder.done();
+}
+
+void ReplicaSetTagConfig::put(const ReplicaSetTagPattern& pattern, std::ostream& os) const {
+ BSONObjBuilder builder;
+ BSONArrayBuilder allConstraintsBuilder(builder.subarrayStart("constraints"));
+ for (ReplicaSetTagPattern::ConstraintIterator iter = pattern.constraintsBegin();
+ iter != pattern.constraintsEnd();
+ ++iter) {
+ BSONObjBuilder constraintBuilder(allConstraintsBuilder.subobjStart());
+ _appendConstraint(*iter, &constraintBuilder);
+ }
+ allConstraintsBuilder.doneFast();
+ os << builder.done();
+}
+
+void ReplicaSetTagConfig::put(const ReplicaSetTagMatch& matcher, std::ostream& os) const {
+ BSONObjBuilder builder;
+ BSONArrayBuilder allBindingsBuilder(builder.subarrayStart("bindings"));
+ for (size_t i = 0; i < matcher._boundTagValues.size(); ++i) {
+ BSONObjBuilder bindingBuilder(allBindingsBuilder.subobjStart());
+ _appendConstraint(matcher._boundTagValues[i].constraint, &bindingBuilder);
+ BSONArrayBuilder boundValues(bindingBuilder.subarrayStart("boundValues"));
+ for (size_t j = 0; j < matcher._boundTagValues[i].boundValues.size(); ++j) {
+ BSONObjBuilder bvb(boundValues.subobjStart());
+ _appendTagValue(matcher._boundTagValues[i].constraint.getKeyIndex(),
+ matcher._boundTagValues[i].boundValues[j],
+ &bvb);
}
}
-
- void ReplicaSetTagConfig::_appendTagValue(int32_t keyIndex,
- int32_t valueIndex,
- BSONObjBuilder* builder) const {
- if (keyIndex < 0 || size_t(keyIndex) >= _tagData.size()) {
- builder->append("tagValue", valueIndex);
- return;
- }
- KeyValueVector::const_reference keyEntry = _tagData[keyIndex];
- if (valueIndex < 0 || size_t(valueIndex) < keyEntry.second.size()) {
- builder->append("tagValue", valueIndex);
- }
- builder->append("tagValue", keyEntry.second[valueIndex]);
- }
-
- void ReplicaSetTagConfig::_appendConstraint(
- const ReplicaSetTagPattern::TagCountConstraint& constraint,
- BSONObjBuilder* builder) const {
-
- _appendTagKey(constraint.getKeyIndex(), builder);
- builder->append("minCount", int(constraint.getMinCount()));
- }
+ allBindingsBuilder.doneFast();
+ os << builder.done();
+}
+
+void ReplicaSetTagConfig::_appendTagKey(int32_t keyIndex, BSONObjBuilder* builder) const {
+ if (keyIndex < 0 || size_t(keyIndex) >= _tagData.size()) {
+ builder->append("tagKey", int(keyIndex));
+ } else {
+ builder->append("tagKey", _tagData[keyIndex].first);
+ }
+}
+
+void ReplicaSetTagConfig::_appendTagValue(int32_t keyIndex,
+ int32_t valueIndex,
+ BSONObjBuilder* builder) const {
+ if (keyIndex < 0 || size_t(keyIndex) >= _tagData.size()) {
+ builder->append("tagValue", valueIndex);
+ return;
+ }
+ KeyValueVector::const_reference keyEntry = _tagData[keyIndex];
+ if (valueIndex < 0 || size_t(valueIndex) < keyEntry.second.size()) {
+ builder->append("tagValue", valueIndex);
+ }
+ builder->append("tagValue", keyEntry.second[valueIndex]);
+}
+
+void ReplicaSetTagConfig::_appendConstraint(
+ const ReplicaSetTagPattern::TagCountConstraint& constraint, BSONObjBuilder* builder) const {
+ _appendTagKey(constraint.getKeyIndex(), builder);
+ builder->append("minCount", int(constraint.getMinCount()));
+}
} // namespace repl
diff --git a/src/mongo/db/repl/replica_set_tag.h b/src/mongo/db/repl/replica_set_tag.h
index 8c93f62cbc0..4227ec56956 100644
--- a/src/mongo/db/repl/replica_set_tag.h
+++ b/src/mongo/db/repl/replica_set_tag.h
@@ -38,265 +38,282 @@
#include "mongo/platform/cstdint.h"
namespace mongo {
- class BSONObjBuilder;
+class BSONObjBuilder;
namespace repl {
+/**
+ * Representation of a tag on a replica set node.
+ *
+ * Tags are only meaningful when used with a copy of the ReplicaSetTagConfig that
+ * created them.
+ */
+class ReplicaSetTag {
+public:
/**
- * Representation of a tag on a replica set node.
- *
- * Tags are only meaningful when used with a copy of the ReplicaSetTagConfig that
- * created them.
+ * Default constructor, produces an uninitialized tag.
*/
- class ReplicaSetTag {
- public:
- /**
- * Default constructor, produces an uninitialized tag.
- */
- ReplicaSetTag() {}
-
- /**
- * Constructs a tag with the given key and value indexes.
- * Do not call directly; used by ReplicaSetTagConfig.
- */
- ReplicaSetTag(int32_t keyIndex, int32_t valueIndex) :
- _keyIndex(keyIndex),
- _valueIndex(valueIndex) {}
-
- /**
- * Returns true if the tag is not explicitly invalid.
- */
- bool isValid() const { return _keyIndex >= 0; }
-
- /**
- * Gets the key index of the tag.
- */
- int32_t getKeyIndex() const { return _keyIndex; }
-
- /**
- * Gets the value index of the tag.
- */
- int32_t getValueIndex() const { return _valueIndex; }
-
- /**
- * Compares two tags from the *same* ReplicaSetTagConfig for equality.
- */
- bool operator==(const ReplicaSetTag& other) const;
-
- /**
- * Compares two tags from the *same* ReplicaSetTagConfig for inequality.
- */
- bool operator!=(const ReplicaSetTag& other) const;
+ ReplicaSetTag() {}
- private:
- // The index of the key in the associated ReplicaSetTagConfig.
- int32_t _keyIndex;
+ /**
+ * Constructs a tag with the given key and value indexes.
+ * Do not call directly; used by ReplicaSetTagConfig.
+ */
+ ReplicaSetTag(int32_t keyIndex, int32_t valueIndex)
+ : _keyIndex(keyIndex), _valueIndex(valueIndex) {}
- // The index of the value in the entry for the key in the associated ReplicaSetTagConfig.
- int32_t _valueIndex;
- };
+ /**
+ * Returns true if the tag is not explicitly invalid.
+ */
+ bool isValid() const {
+ return _keyIndex >= 0;
+ }
+
+ /**
+ * Gets the key index of the tag.
+ */
+ int32_t getKeyIndex() const {
+ return _keyIndex;
+ }
+
+ /**
+ * Gets the value index of the tag.
+ */
+ int32_t getValueIndex() const {
+ return _valueIndex;
+ }
+
+ /**
+ * Compares two tags from the *same* ReplicaSetTagConfig for equality.
+ */
+ bool operator==(const ReplicaSetTag& other) const;
+
+ /**
+ * Compares two tags from the *same* ReplicaSetTagConfig for inequality.
+ */
+ bool operator!=(const ReplicaSetTag& other) const;
+
+private:
+ // The index of the key in the associated ReplicaSetTagConfig.
+ int32_t _keyIndex;
+
+ // The index of the value in the entry for the key in the associated ReplicaSetTagConfig.
+ int32_t _valueIndex;
+};
+/**
+ * Representation of a tag matching pattern, like { "dc": 2, "rack": 3 }, of the form
+ * used for tagged replica set writes.
+ */
+class ReplicaSetTagPattern {
+public:
/**
- * Representation of a tag matching pattern, like { "dc": 2, "rack": 3 }, of the form
- * used for tagged replica set writes.
+ * Representation of a single tag's minimum count constraint in a pattern.
*/
- class ReplicaSetTagPattern {
+ class TagCountConstraint {
public:
- /**
- * Representation of a single tag's minimum count constraint in a pattern.
- */
- class TagCountConstraint {
- public:
- TagCountConstraint() {}
- TagCountConstraint(int32_t keyIndex, int32_t minCount);
- int32_t getKeyIndex() const { return _keyIndex; }
- int32_t getMinCount() const { return _minCount; }
- private:
- int32_t _keyIndex;
- int32_t _minCount;
- };
-
- typedef std::vector<TagCountConstraint>::const_iterator ConstraintIterator;
-
- /**
- * Adds a count constraint for the given key index with the given count.
- *
- * Do not call directly, but use the addTagCountConstraintToPattern method
- * of ReplicaSetTagConfig.
- */
- void addTagCountConstraint(int32_t keyIndex, int32_t minCount);
-
- /**
- * Gets the begin iterator over the constraints in this pattern.
- */
- ConstraintIterator constraintsBegin() const { return _constraints.begin(); }
-
- /**
- * Gets the end iterator over the constraints in this pattern.
- */
- ConstraintIterator constraintsEnd() const { return _constraints.end(); }
+ TagCountConstraint() {}
+ TagCountConstraint(int32_t keyIndex, int32_t minCount);
+ int32_t getKeyIndex() const {
+ return _keyIndex;
+ }
+ int32_t getMinCount() const {
+ return _minCount;
+ }
private:
- std::vector<TagCountConstraint> _constraints;
+ int32_t _keyIndex;
+ int32_t _minCount;
};
+ typedef std::vector<TagCountConstraint>::const_iterator ConstraintIterator;
+
/**
- * State object for progressive detection of ReplicaSetTagPattern constraint satisfaction.
+ * Adds a count constraint for the given key index with the given count.
*
- * This is an abstraction of the replica set write tag satisfaction problem.
+ * Do not call directly, but use the addTagCountConstraintToPattern method
+ * of ReplicaSetTagConfig.
+ */
+ void addTagCountConstraint(int32_t keyIndex, int32_t minCount);
+
+ /**
+ * Gets the begin iterator over the constraints in this pattern.
+ */
+ ConstraintIterator constraintsBegin() const {
+ return _constraints.begin();
+ }
+
+ /**
+ * Gets the end iterator over the constraints in this pattern.
+ */
+ ConstraintIterator constraintsEnd() const {
+ return _constraints.end();
+ }
+
+private:
+ std::vector<TagCountConstraint> _constraints;
+};
+
+/**
+ * State object for progressive detection of ReplicaSetTagPattern constraint satisfaction.
+ *
+ * This is an abstraction of the replica set write tag satisfaction problem.
+ *
+ * Replica set tag matching is an event-driven constraint satisfaction process. This type
+ * represents the state of that process. It is initialized from a pattern object, then
+ * progressively updated with tags. After processing a sequence of tags sufficient to satisfy
+ * the pattern, isSatisfied() becomes true.
+ */
+class ReplicaSetTagMatch {
+ friend class ReplicaSetTagConfig;
+
+public:
+ /**
+ * Constructs an empty match object, equivalent to one that matches an
+ * empty pattern.
+ */
+ ReplicaSetTagMatch() {}
+
+ /**
+ * Constructs a clean match object for the given pattern.
+ */
+ explicit ReplicaSetTagMatch(const ReplicaSetTagPattern& pattern);
+
+ /**
+ * Updates the match state based on the data for the given tag.
*
- * Replica set tag matching is an event-driven constraint satisfaction process. This type
- * represents the state of that process. It is initialized from a pattern object, then
- * progressively updated with tags. After processing a sequence of tags sufficient to satisfy
- * the pattern, isSatisfied() becomes true.
+ * Returns true if, after this update, isSatisfied() is true.
*/
- class ReplicaSetTagMatch {
- friend class ReplicaSetTagConfig;
- public:
- /**
- * Constructs an empty match object, equivalent to one that matches an
- * empty pattern.
- */
- ReplicaSetTagMatch() {}
-
- /**
- * Constructs a clean match object for the given pattern.
- */
- explicit ReplicaSetTagMatch(const ReplicaSetTagPattern& pattern);
-
- /**
- * Updates the match state based on the data for the given tag.
- *
- * Returns true if, after this update, isSatisfied() is true.
- */
- bool update(const ReplicaSetTag& tag);
-
- /**
- * Returns true if the match has received a sequence of tags sufficient to satisfy the
- * pattern.
- */
+ bool update(const ReplicaSetTag& tag);
+
+ /**
+ * Returns true if the match has received a sequence of tags sufficient to satisfy the
+ * pattern.
+ */
+ bool isSatisfied() const;
+
+private:
+ /**
+ * Representation of the state related to a single tag key in the match pattern.
+ * Consists of a constraint (key index and min count for satisfaction) and a list
+ * of already observed values.
+ *
+ * A BoundTagValue is satisfied when the size of boundValues is at least
+ * constraint.getMinCount().
+ */
+ struct BoundTagValue {
+ BoundTagValue() {}
+ explicit BoundTagValue(const ReplicaSetTagPattern::TagCountConstraint& aConstraint)
+ : constraint(aConstraint) {}
+
+ int32_t getKeyIndex() const {
+ return constraint.getKeyIndex();
+ }
bool isSatisfied() const;
- private:
- /**
- * Representation of the state related to a single tag key in the match pattern.
- * Consists of a constraint (key index and min count for satisfaction) and a list
- * of already observed values.
- *
- * A BoundTagValue is satisfied when the size of boundValues is at least
- * constraint.getMinCount().
- */
- struct BoundTagValue {
- BoundTagValue() {}
- explicit BoundTagValue(const ReplicaSetTagPattern::TagCountConstraint& aConstraint) :
- constraint(aConstraint) {}
-
- int32_t getKeyIndex() const { return constraint.getKeyIndex(); }
- bool isSatisfied() const;
-
- ReplicaSetTagPattern::TagCountConstraint constraint;
- std::vector<int32_t> boundValues;
- };
- std::vector<BoundTagValue> _boundTagValues;
+ ReplicaSetTagPattern::TagCountConstraint constraint;
+ std::vector<int32_t> boundValues;
};
+ std::vector<BoundTagValue> _boundTagValues;
+};
+
+/**
+ * Representation of the tag configuration information for a replica set.
+ *
+ * This type, like all in this file, is copyable. Tags and patterns from one instance of this
+ * class are compatible with other instances of this class that are *copies* of the original
+ * instance.
+ */
+class ReplicaSetTagConfig {
+public:
+ /**
+ * Finds or allocates a tag with the given "key" and "value" strings.
+ */
+ ReplicaSetTag makeTag(StringData key, StringData value);
+
+ /**
+ * Finds a tag with the given key and value strings, or returns a tag whose isValid() method
+ * returns false if the configuration has never allocated such a tag via makeTag().
+ */
+ ReplicaSetTag findTag(StringData key, StringData value) const;
+
+ /**
+ * Makes a new, empty pattern object.
+ */
+ ReplicaSetTagPattern makePattern() const;
/**
- * Representation of the tag configuration information for a replica set.
+ * Adds a constraint clause to the given "pattern". This particular
+ * constraint requires that at least "minCount" distinct tags with the given "tagKey"
+ * be observed. Two tags "t1" and "t2" are distinct if "t1 != t2", so this constraint
+ * means that we must see at least "minCount" tags with the specified "tagKey".
+ */
+ Status addTagCountConstraintToPattern(ReplicaSetTagPattern* pattern,
+ StringData tagKey,
+ int32_t minCount) const;
+
+ /**
+ * Gets the string key for the given "tag".
*
- * This type, like all in this file, is copyable. Tags and patterns from one instance of this
- * class are compatible with other instances of this class that are *copies* of the original
- * instance.
+ * Behavior is undefined if "tag" is not valid or was not from this
+ * config or one of its copies.
*/
- class ReplicaSetTagConfig {
- public:
- /**
- * Finds or allocates a tag with the given "key" and "value" strings.
- */
- ReplicaSetTag makeTag(StringData key, StringData value);
-
- /**
- * Finds a tag with the given key and value strings, or returns a tag whose isValid() method
- * returns false if the configuration has never allocated such a tag via makeTag().
- */
- ReplicaSetTag findTag(StringData key, StringData value) const;
-
- /**
- * Makes a new, empty pattern object.
- */
- ReplicaSetTagPattern makePattern() const;
-
- /**
- * Adds a constraint clause to the given "pattern". This particular
- * constraint requires that at least "minCount" distinct tags with the given "tagKey"
- * be observed. Two tags "t1" and "t2" are distinct if "t1 != t2", so this constraint
- * means that we must see at least "minCount" tags with the specified "tagKey".
- */
- Status addTagCountConstraintToPattern(ReplicaSetTagPattern* pattern,
- StringData tagKey,
- int32_t minCount) const;
-
- /**
- * Gets the string key for the given "tag".
- *
- * Behavior is undefined if "tag" is not valid or was not from this
- * config or one of its copies.
- */
- std::string getTagKey(const ReplicaSetTag& tag) const;
-
- /**
- * Gets the string value for the given "tag".
- *
- * Like getTagKey, above, behavior is undefined if "tag" is not valid or was not from this
- * config or one of its copies.
- */
- std::string getTagValue(const ReplicaSetTag& tag) const;
-
- /**
- * Helper that writes a string debugging representation of "tag" to "os".
- */
- void put(const ReplicaSetTag& tag, std::ostream& os) const;
-
- /**
- * Helper that writes a string debugging representation of "pattern" to "os".
- */
- void put(const ReplicaSetTagPattern& pattern, std::ostream& os) const;
-
- /**
- * Helper that writes a string debugging representation of "matcher" to "os".
- */
- void put(const ReplicaSetTagMatch& matcher, std::ostream& os) const;
+ std::string getTagKey(const ReplicaSetTag& tag) const;
- private:
- typedef std::vector<std::string> ValueVector;
- typedef std::vector<std::pair<std::string, ValueVector> > KeyValueVector;
-
- /**
- * Returns the index corresponding to "key", or _tagData.size() if there is no
- * such index.
- */
- int32_t _findKeyIndex(StringData key) const;
-
- /**
- * Helper that writes a "tagKey" field for the given "keyIndex" to "builder".
- */
- void _appendTagKey(int32_t keyIndex, BSONObjBuilder* builder) const;
-
- /**
- * Helper that writes a "tagValue" field for the given "keyIndex" and "valueIndex"
- * to "builder".
- */
- void _appendTagValue(int32_t keyIndex, int32_t valueIndex, BSONObjBuilder* builder) const;
-
- /**
- * Helper that writes a constraint object to "builder".
- */
- void _appendConstraint(const ReplicaSetTagPattern::TagCountConstraint& constraint,
- BSONObjBuilder* builder) const;
-
- // Data about known tags. Conceptually, it maps between keys and their indexes,
- // keys and their associated values, and (key, value) pairs and the values' indexes.
- KeyValueVector _tagData;
- };
+ /**
+ * Gets the string value for the given "tag".
+ *
+ * Like getTagKey, above, behavior is undefined if "tag" is not valid or was not from this
+ * config or one of its copies.
+ */
+ std::string getTagValue(const ReplicaSetTag& tag) const;
+
+ /**
+ * Helper that writes a string debugging representation of "tag" to "os".
+ */
+ void put(const ReplicaSetTag& tag, std::ostream& os) const;
+
+ /**
+ * Helper that writes a string debugging representation of "pattern" to "os".
+ */
+ void put(const ReplicaSetTagPattern& pattern, std::ostream& os) const;
+
+ /**
+ * Helper that writes a string debugging representation of "matcher" to "os".
+ */
+ void put(const ReplicaSetTagMatch& matcher, std::ostream& os) const;
+
+private:
+ typedef std::vector<std::string> ValueVector;
+ typedef std::vector<std::pair<std::string, ValueVector>> KeyValueVector;
+
+ /**
+ * Returns the index corresponding to "key", or _tagData.size() if there is no
+ * such index.
+ */
+ int32_t _findKeyIndex(StringData key) const;
+
+ /**
+ * Helper that writes a "tagKey" field for the given "keyIndex" to "builder".
+ */
+ void _appendTagKey(int32_t keyIndex, BSONObjBuilder* builder) const;
+
+ /**
+ * Helper that writes a "tagValue" field for the given "keyIndex" and "valueIndex"
+ * to "builder".
+ */
+ void _appendTagValue(int32_t keyIndex, int32_t valueIndex, BSONObjBuilder* builder) const;
+
+ /**
+ * Helper that writes a constraint object to "builder".
+ */
+ void _appendConstraint(const ReplicaSetTagPattern::TagCountConstraint& constraint,
+ BSONObjBuilder* builder) const;
+
+ // Data about known tags. Conceptually, it maps between keys and their indexes,
+ // keys and their associated values, and (key, value) pairs and the values' indexes.
+ KeyValueVector _tagData;
+};
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replica_set_tag_test.cpp b/src/mongo/db/repl/replica_set_tag_test.cpp
index 1a2bdf9e120..1d70ee39bbe 100644
--- a/src/mongo/db/repl/replica_set_tag_test.cpp
+++ b/src/mongo/db/repl/replica_set_tag_test.cpp
@@ -33,129 +33,129 @@ namespace mongo {
namespace repl {
namespace {
- template <typename T>
- class StreamPutter {
- public:
- StreamPutter(const ReplicaSetTagConfig& tagConfig, const T& item) :
- _tagConfig(&tagConfig), _item(&item) {}
- void put(std::ostream& os) const {
- _tagConfig->put(*_item, os);
- }
-
- private:
- const ReplicaSetTagConfig* _tagConfig;
- const T* _item;
- };
-
- template <typename T>
- StreamPutter<T> streamput(const ReplicaSetTagConfig& tagConfig, const T& item) {
- return StreamPutter<T>(tagConfig, item);
+template <typename T>
+class StreamPutter {
+public:
+ StreamPutter(const ReplicaSetTagConfig& tagConfig, const T& item)
+ : _tagConfig(&tagConfig), _item(&item) {}
+ void put(std::ostream& os) const {
+ _tagConfig->put(*_item, os);
}
- template <typename T>
- std::ostream& operator<<(std::ostream& os, const StreamPutter<T>& putter) {
- putter.put(os);
- return os;
+private:
+ const ReplicaSetTagConfig* _tagConfig;
+ const T* _item;
+};
+
+template <typename T>
+StreamPutter<T> streamput(const ReplicaSetTagConfig& tagConfig, const T& item) {
+ return StreamPutter<T>(tagConfig, item);
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const StreamPutter<T>& putter) {
+ putter.put(os);
+ return os;
+}
+
+TEST(ReplicaSetTagConfigTest, MakeAndFindTags) {
+ ReplicaSetTagConfig tagConfig;
+ ReplicaSetTag dcNY = tagConfig.makeTag("dc", "ny");
+ ReplicaSetTag dcRI = tagConfig.makeTag("dc", "ri");
+ ReplicaSetTag rack1 = tagConfig.makeTag("rack", "1");
+ ReplicaSetTag rack2 = tagConfig.makeTag("rack", "2");
+ ASSERT_TRUE(dcNY.isValid());
+ ASSERT_EQUALS("dc", tagConfig.getTagKey(dcNY));
+ ASSERT_EQUALS("ny", tagConfig.getTagValue(dcNY));
+ ASSERT_EQUALS("dc", tagConfig.getTagKey(dcRI));
+ ASSERT_EQUALS("ri", tagConfig.getTagValue(dcRI));
+ ASSERT_EQUALS("rack", tagConfig.getTagKey(rack1));
+ ASSERT_EQUALS("1", tagConfig.getTagValue(rack1));
+ ASSERT_EQUALS("rack", tagConfig.getTagKey(rack2));
+ ASSERT_EQUALS("2", tagConfig.getTagValue(rack2));
+
+ ASSERT_EQUALS(rack1.getKeyIndex(), rack2.getKeyIndex());
+ ASSERT_NOT_EQUALS(rack1.getKeyIndex(), dcRI.getKeyIndex());
+ ASSERT_NOT_EQUALS(rack1.getValueIndex(), rack2.getValueIndex());
+
+ ASSERT_TRUE(rack1 == tagConfig.makeTag("rack", "1"));
+ ASSERT_TRUE(rack1 == tagConfig.findTag("rack", "1"));
+ ASSERT_FALSE(tagConfig.findTag("rack", "7").isValid());
+ ASSERT_FALSE(tagConfig.findTag("country", "us").isValid());
+}
+
+class ReplicaSetTagMatchTest : public unittest::Test {
+public:
+ void setUp() {
+ dcNY = tagConfig.makeTag("dc", "ny");
+ dcVA = tagConfig.makeTag("dc", "va");
+ dcRI = tagConfig.makeTag("dc", "ri");
+ rack1 = tagConfig.makeTag("rack", "1");
+ rack2 = tagConfig.makeTag("rack", "2");
+ rack3 = tagConfig.makeTag("rack", "3");
+ rack4 = tagConfig.makeTag("rack", "4");
}
- TEST(ReplicaSetTagConfigTest, MakeAndFindTags) {
- ReplicaSetTagConfig tagConfig;
- ReplicaSetTag dcNY = tagConfig.makeTag("dc", "ny");
- ReplicaSetTag dcRI = tagConfig.makeTag("dc", "ri");
- ReplicaSetTag rack1 = tagConfig.makeTag("rack", "1");
- ReplicaSetTag rack2 = tagConfig.makeTag("rack", "2");
- ASSERT_TRUE(dcNY.isValid());
- ASSERT_EQUALS("dc", tagConfig.getTagKey(dcNY));
- ASSERT_EQUALS("ny", tagConfig.getTagValue(dcNY));
- ASSERT_EQUALS("dc", tagConfig.getTagKey(dcRI));
- ASSERT_EQUALS("ri", tagConfig.getTagValue(dcRI));
- ASSERT_EQUALS("rack", tagConfig.getTagKey(rack1));
- ASSERT_EQUALS("1", tagConfig.getTagValue(rack1));
- ASSERT_EQUALS("rack", tagConfig.getTagKey(rack2));
- ASSERT_EQUALS("2", tagConfig.getTagValue(rack2));
-
- ASSERT_EQUALS(rack1.getKeyIndex(), rack2.getKeyIndex());
- ASSERT_NOT_EQUALS(rack1.getKeyIndex(), dcRI.getKeyIndex());
- ASSERT_NOT_EQUALS(rack1.getValueIndex(), rack2.getValueIndex());
-
- ASSERT_TRUE(rack1 == tagConfig.makeTag("rack", "1"));
- ASSERT_TRUE(rack1 == tagConfig.findTag("rack", "1"));
- ASSERT_FALSE(tagConfig.findTag("rack", "7").isValid());
- ASSERT_FALSE(tagConfig.findTag("country", "us").isValid());
- }
-
- class ReplicaSetTagMatchTest : public unittest::Test {
- public:
- void setUp() {
- dcNY = tagConfig.makeTag("dc", "ny");
- dcVA = tagConfig.makeTag("dc", "va");
- dcRI = tagConfig.makeTag("dc", "ri");
- rack1 = tagConfig.makeTag("rack", "1");
- rack2 = tagConfig.makeTag("rack", "2");
- rack3 = tagConfig.makeTag("rack", "3");
- rack4 = tagConfig.makeTag("rack", "4");
- }
-
- protected:
- ReplicaSetTagConfig tagConfig;
- ReplicaSetTag dcNY;
- ReplicaSetTag dcVA;
- ReplicaSetTag dcRI;
- ReplicaSetTag rack1;
- ReplicaSetTag rack2;
- ReplicaSetTag rack3;
- ReplicaSetTag rack4;
- };
-
- TEST_F(ReplicaSetTagMatchTest, EmptyPatternAlwaysSatisfied) {
- ReplicaSetTagPattern pattern = tagConfig.makePattern();
- ASSERT_TRUE(ReplicaSetTagMatch(pattern).isSatisfied());
- ASSERT_OK(tagConfig.addTagCountConstraintToPattern(&pattern, "dc", 0));
- ASSERT_TRUE(ReplicaSetTagMatch(pattern).isSatisfied());
- }
-
- TEST_F(ReplicaSetTagMatchTest, SingleTagConstraint) {
- ReplicaSetTagPattern pattern = tagConfig.makePattern();
- ASSERT_OK(tagConfig.addTagCountConstraintToPattern(&pattern, "dc", 2));
- ReplicaSetTagMatch matcher(pattern);
- ASSERT_FALSE(matcher.isSatisfied());
- ASSERT_FALSE(matcher.update(dcVA)); // One DC alone won't satisfy "dc: 2".
- ASSERT_FALSE(matcher.update(rack2)); // Adding one rack won't satisfy.
- ASSERT_FALSE(matcher.update(rack3)); // Two racks won't satisfy "dc: 2".
- ASSERT_FALSE(matcher.update(dcVA)); // Same tag twice won't satisfy.
- ASSERT_TRUE(matcher.update(dcRI)); // Two DCs satisfies.
- ASSERT_TRUE(matcher.isSatisfied());
- ASSERT_TRUE(matcher.update(dcNY)); // Three DCs satisfies.
- ASSERT_TRUE(matcher.update(rack1)); // Once matcher is satisfied, it stays satisfied.
- }
-
- TEST_F(ReplicaSetTagMatchTest, MaskingConstraints) {
- // The highest count constraint for a tag key is the only one that matters.
- ReplicaSetTagPattern pattern = tagConfig.makePattern();
- ASSERT_OK(tagConfig.addTagCountConstraintToPattern(&pattern, "rack", 2));
- ASSERT_OK(tagConfig.addTagCountConstraintToPattern(&pattern, "rack", 3));
- ReplicaSetTagMatch matcher(pattern);
- ASSERT_FALSE(matcher.isSatisfied());
- ASSERT_FALSE(matcher.update(rack2));
- ASSERT_FALSE(matcher.update(rack3));
- ASSERT_FALSE(matcher.update(rack2));
- ASSERT_TRUE(matcher.update(rack1));
- }
-
- TEST_F(ReplicaSetTagMatchTest, MultipleConstraints) {
- ReplicaSetTagPattern pattern = tagConfig.makePattern();
- ASSERT_OK(tagConfig.addTagCountConstraintToPattern(&pattern, "dc", 3));
- ASSERT_OK(tagConfig.addTagCountConstraintToPattern(&pattern, "rack", 2));
- ReplicaSetTagMatch matcher(pattern);
- ASSERT_FALSE(matcher.isSatisfied());
- ASSERT_FALSE(matcher.update(dcVA));
- ASSERT_FALSE(matcher.update(rack2));
- ASSERT_FALSE(matcher.update(rack3));
- ASSERT_FALSE(matcher.update(dcVA));
- ASSERT_FALSE(matcher.update(dcRI));
- ASSERT_TRUE(matcher.update(dcNY));
- ASSERT_TRUE(matcher.isSatisfied());
- }
+protected:
+ ReplicaSetTagConfig tagConfig;
+ ReplicaSetTag dcNY;
+ ReplicaSetTag dcVA;
+ ReplicaSetTag dcRI;
+ ReplicaSetTag rack1;
+ ReplicaSetTag rack2;
+ ReplicaSetTag rack3;
+ ReplicaSetTag rack4;
+};
+
+TEST_F(ReplicaSetTagMatchTest, EmptyPatternAlwaysSatisfied) {
+ ReplicaSetTagPattern pattern = tagConfig.makePattern();
+ ASSERT_TRUE(ReplicaSetTagMatch(pattern).isSatisfied());
+ ASSERT_OK(tagConfig.addTagCountConstraintToPattern(&pattern, "dc", 0));
+ ASSERT_TRUE(ReplicaSetTagMatch(pattern).isSatisfied());
+}
+
+TEST_F(ReplicaSetTagMatchTest, SingleTagConstraint) {
+ ReplicaSetTagPattern pattern = tagConfig.makePattern();
+ ASSERT_OK(tagConfig.addTagCountConstraintToPattern(&pattern, "dc", 2));
+ ReplicaSetTagMatch matcher(pattern);
+ ASSERT_FALSE(matcher.isSatisfied());
+ ASSERT_FALSE(matcher.update(dcVA)); // One DC alone won't satisfy "dc: 2".
+ ASSERT_FALSE(matcher.update(rack2)); // Adding one rack won't satisfy.
+ ASSERT_FALSE(matcher.update(rack3)); // Two racks won't satisfy "dc: 2".
+ ASSERT_FALSE(matcher.update(dcVA)); // Same tag twice won't satisfy.
+ ASSERT_TRUE(matcher.update(dcRI)); // Two DCs satisfies.
+ ASSERT_TRUE(matcher.isSatisfied());
+ ASSERT_TRUE(matcher.update(dcNY)); // Three DCs satisfies.
+ ASSERT_TRUE(matcher.update(rack1)); // Once matcher is satisfied, it stays satisfied.
+}
+
+TEST_F(ReplicaSetTagMatchTest, MaskingConstraints) {
+ // The highest count constraint for a tag key is the only one that matters.
+ ReplicaSetTagPattern pattern = tagConfig.makePattern();
+ ASSERT_OK(tagConfig.addTagCountConstraintToPattern(&pattern, "rack", 2));
+ ASSERT_OK(tagConfig.addTagCountConstraintToPattern(&pattern, "rack", 3));
+ ReplicaSetTagMatch matcher(pattern);
+ ASSERT_FALSE(matcher.isSatisfied());
+ ASSERT_FALSE(matcher.update(rack2));
+ ASSERT_FALSE(matcher.update(rack3));
+ ASSERT_FALSE(matcher.update(rack2));
+ ASSERT_TRUE(matcher.update(rack1));
+}
+
+TEST_F(ReplicaSetTagMatchTest, MultipleConstraints) {
+ ReplicaSetTagPattern pattern = tagConfig.makePattern();
+ ASSERT_OK(tagConfig.addTagCountConstraintToPattern(&pattern, "dc", 3));
+ ASSERT_OK(tagConfig.addTagCountConstraintToPattern(&pattern, "rack", 2));
+ ReplicaSetTagMatch matcher(pattern);
+ ASSERT_FALSE(matcher.isSatisfied());
+ ASSERT_FALSE(matcher.update(dcVA));
+ ASSERT_FALSE(matcher.update(rack2));
+ ASSERT_FALSE(matcher.update(rack3));
+ ASSERT_FALSE(matcher.update(dcVA));
+ ASSERT_FALSE(matcher.update(dcRI));
+ ASSERT_TRUE(matcher.update(dcNY));
+ ASSERT_TRUE(matcher.isSatisfied());
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/replication_coordinator.cpp b/src/mongo/db/repl/replication_coordinator.cpp
index 8d1ab039fea..f7c3c200245 100644
--- a/src/mongo/db/repl/replication_coordinator.cpp
+++ b/src/mongo/db/repl/replication_coordinator.cpp
@@ -36,29 +36,29 @@ namespace repl {
namespace {
- const auto getReplicationCoordinator =
- ServiceContext::declareDecoration<std::unique_ptr<ReplicationCoordinator>>();
+const auto getReplicationCoordinator =
+ ServiceContext::declareDecoration<std::unique_ptr<ReplicationCoordinator>>();
}
- ReplicationCoordinator::ReplicationCoordinator() {}
- ReplicationCoordinator::~ReplicationCoordinator() {}
+ReplicationCoordinator::ReplicationCoordinator() {}
+ReplicationCoordinator::~ReplicationCoordinator() {}
- // TODO(dannenberg) remove when master slave is removed
- const char *replAllDead = 0;
+// TODO(dannenberg) remove when master slave is removed
+const char* replAllDead = 0;
- ReplicationCoordinator* ReplicationCoordinator::get(ServiceContext* service) {
- return getReplicationCoordinator(service).get();
- }
+ReplicationCoordinator* ReplicationCoordinator::get(ServiceContext* service) {
+ return getReplicationCoordinator(service).get();
+}
- ReplicationCoordinator* ReplicationCoordinator::get(ServiceContext& service) {
- return getReplicationCoordinator(service).get();
- }
+ReplicationCoordinator* ReplicationCoordinator::get(ServiceContext& service) {
+ return getReplicationCoordinator(service).get();
+}
- void ReplicationCoordinator::set(ServiceContext* service,
- std::unique_ptr<ReplicationCoordinator> replCoord) {
- auto& coordinator = getReplicationCoordinator(service);
- coordinator = std::move(replCoord);
- }
+void ReplicationCoordinator::set(ServiceContext* service,
+ std::unique_ptr<ReplicationCoordinator> replCoord) {
+ auto& coordinator = getReplicationCoordinator(service);
+ coordinator = std::move(replCoord);
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h
index 44d78da3374..9bc7179bb90 100644
--- a/src/mongo/db/repl/replication_coordinator.h
+++ b/src/mongo/db/repl/replication_coordinator.h
@@ -40,617 +40,605 @@
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class IndexDescriptor;
- class NamespaceString;
- class OperationContext;
- class ServiceContext;
- class Timestamp;
- struct WriteConcernOptions;
+class BSONObj;
+class BSONObjBuilder;
+class IndexDescriptor;
+class NamespaceString;
+class OperationContext;
+class ServiceContext;
+class Timestamp;
+struct WriteConcernOptions;
namespace repl {
- class BackgroundSync;
- class HandshakeArgs;
- class IsMasterResponse;
- class OplogReader;
- class OpTime;
- class ReadAfterOpTimeArgs;
- class ReadAfterOpTimeResponse;
- class ReplSetDeclareElectionWinnerArgs;
- class ReplSetDeclareElectionWinnerResponse;
- class ReplSetHeartbeatArgs;
- class ReplSetHeartbeatArgsV1;
- class ReplSetHeartbeatResponse;
- class ReplSetHtmlSummary;
- class ReplSetRequestVotesArgs;
- class ReplSetRequestVotesResponse;
- class ReplicaSetConfig;
- class UpdatePositionArgs;
-
- /**
- * Global variable that contains a std::string telling why master/slave halted
+class BackgroundSync;
+class HandshakeArgs;
+class IsMasterResponse;
+class OplogReader;
+class OpTime;
+class ReadAfterOpTimeArgs;
+class ReadAfterOpTimeResponse;
+class ReplSetDeclareElectionWinnerArgs;
+class ReplSetDeclareElectionWinnerResponse;
+class ReplSetHeartbeatArgs;
+class ReplSetHeartbeatArgsV1;
+class ReplSetHeartbeatResponse;
+class ReplSetHtmlSummary;
+class ReplSetRequestVotesArgs;
+class ReplSetRequestVotesResponse;
+class ReplicaSetConfig;
+class UpdatePositionArgs;
+
+/**
+ * Global variable that contains a std::string telling why master/slave halted
+ *
+ * "dead" means something really bad happened like replication falling completely out of sync.
+ * when non-null, we are dead and the string is informational
+ *
+ * TODO(dannenberg) remove when master slave goes
+ */
+extern const char* replAllDead;
+
+/**
+ * The ReplicationCoordinator is responsible for coordinating the interaction of replication
+ * with the rest of the system. The public methods on ReplicationCoordinator are the public
+ * API that the replication subsystem presents to the rest of the codebase.
+ */
+class ReplicationCoordinator : public ReplicationProgressManager {
+ MONGO_DISALLOW_COPYING(ReplicationCoordinator);
+
+public:
+ static ReplicationCoordinator* get(ServiceContext* service);
+ static ReplicationCoordinator* get(ServiceContext& service);
+ static void set(ServiceContext* service,
+ std::unique_ptr<ReplicationCoordinator> replCoordinator);
+
+ struct StatusAndDuration {
+ public:
+ Status status;
+ Milliseconds duration;
+
+ StatusAndDuration(const Status& stat, Milliseconds ms) : status(stat), duration(ms) {}
+ };
+
+ virtual ~ReplicationCoordinator();
+
+ /**
+ * Does any initial bookkeeping needed to start replication, and instructs the other
+ * components of the replication system to start up whatever threads and do whatever
+ * initialization they need.
+ */
+ virtual void startReplication(OperationContext* txn) = 0;
+
+ /**
+ * Does whatever cleanup is required to stop replication, including instructing the other
+ * components of the replication system to shut down and stop any threads they are using,
+ * blocking until all replication-related shutdown tasks are complete.
+ */
+ virtual void shutdown() = 0;
+
+ /**
+ * Returns a reference to the parsed command line arguments that are related to replication.
+ */
+ virtual const ReplSettings& getSettings() const = 0;
+
+ enum Mode { modeNone = 0, modeReplSet, modeMasterSlave };
+
+ /**
+ * Returns a value indicating whether this node was configured at start-up to run
+ * standalone, as part of a master-slave pair, or as a member of a replica set.
+ */
+ virtual Mode getReplicationMode() const = 0;
+
+ /**
+ * Returns true if this node is configured to be a member of a replica set or master/slave
+ * setup.
+ */
+ virtual bool isReplEnabled() const = 0;
+
+ /**
+ * Returns the current replica set state of this node (PRIMARY, SECONDARY, STARTUP, etc).
+ * It is invalid to call this unless getReplicationMode() == modeReplSet.
+ */
+ virtual MemberState getMemberState() const = 0;
+
+ /**
+ * Returns true if this node is in state PRIMARY or SECONDARY.
*
- * "dead" means something really bad happened like replication falling completely out of sync.
- * when non-null, we are dead and the string is informational
+ * It is invalid to call this unless getReplicationMode() == modeReplSet.
*
- * TODO(dannenberg) remove when master slave goes
+ * This method may be optimized to reduce synchronization overhead compared to
+ * reading the current member state with getMemberState().
*/
- extern const char *replAllDead;
-
+ virtual bool isInPrimaryOrSecondaryState() const = 0;
+
+
/**
- * The ReplicationCoordinator is responsible for coordinating the interaction of replication
- * with the rest of the system. The public methods on ReplicationCoordinator are the public
- * API that the replication subsystem presents to the rest of the codebase.
+ * Returns how slave delayed this node is configured to be.
+ *
+ * Raises a DBException if this node is not a member of the current replica set
+ * configuration.
*/
- class ReplicationCoordinator : public ReplicationProgressManager {
- MONGO_DISALLOW_COPYING(ReplicationCoordinator);
+ virtual Seconds getSlaveDelaySecs() const = 0;
- public:
- static ReplicationCoordinator* get(ServiceContext* service);
- static ReplicationCoordinator* get(ServiceContext& service);
- static void set(ServiceContext* service,
- std::unique_ptr<ReplicationCoordinator> replCoordinator);
-
- struct StatusAndDuration {
- public:
- Status status;
- Milliseconds duration;
-
- StatusAndDuration(const Status& stat, Milliseconds ms) : status(stat),
- duration(ms) {}
- };
-
- virtual ~ReplicationCoordinator();
-
- /**
- * Does any initial bookkeeping needed to start replication, and instructs the other
- * components of the replication system to start up whatever threads and do whatever
- * initialization they need.
- */
- virtual void startReplication(OperationContext* txn) = 0;
-
- /**
- * Does whatever cleanup is required to stop replication, including instructing the other
- * components of the replication system to shut down and stop any threads they are using,
- * blocking until all replication-related shutdown tasks are complete.
- */
- virtual void shutdown() = 0;
-
- /**
- * Returns a reference to the parsed command line arguments that are related to replication.
- */
- virtual const ReplSettings& getSettings() const = 0;
-
- enum Mode {
- modeNone = 0,
- modeReplSet,
- modeMasterSlave
- };
-
- /**
- * Returns a value indicating whether this node was configured at start-up to run
- * standalone, as part of a master-slave pair, or as a member of a replica set.
- */
- virtual Mode getReplicationMode() const = 0;
-
- /**
- * Returns true if this node is configured to be a member of a replica set or master/slave
- * setup.
- */
- virtual bool isReplEnabled() const = 0;
-
- /**
- * Returns the current replica set state of this node (PRIMARY, SECONDARY, STARTUP, etc).
- * It is invalid to call this unless getReplicationMode() == modeReplSet.
- */
- virtual MemberState getMemberState() const = 0;
-
- /**
- * Returns true if this node is in state PRIMARY or SECONDARY.
- *
- * It is invalid to call this unless getReplicationMode() == modeReplSet.
- *
- * This method may be optimized to reduce synchronization overhead compared to
- * reading the current member state with getMemberState().
- */
- virtual bool isInPrimaryOrSecondaryState() const = 0;
-
-
- /**
- * Returns how slave delayed this node is configured to be.
- *
- * Raises a DBException if this node is not a member of the current replica set
- * configuration.
- */
- virtual Seconds getSlaveDelaySecs() const = 0;
-
- /**
- * Clears the list of sync sources we have blacklisted.
- */
- virtual void clearSyncSourceBlacklist() = 0;
-
- /**
- * Blocks the calling thread for up to writeConcern.wTimeout millis, or until "opTime" has
- * been replicated to at least a set of nodes that satisfies the writeConcern, whichever
- * comes first. A writeConcern.wTimeout of 0 indicates no timeout (block forever) and a
- * writeConcern.wTimeout of -1 indicates return immediately after checking. Return codes:
- * ErrorCodes::WriteConcernFailed if the writeConcern.wTimeout is reached before
- * the data has been sufficiently replicated
- * ErrorCodes::ExceededTimeLimit if the txn->getMaxTimeMicrosRemaining is reached before
- * the data has been sufficiently replicated
- * ErrorCodes::NotMaster if the node is not Primary/Master
- * ErrorCodes::UnknownReplWriteConcern if the writeConcern.wMode contains a write concern
- * mode that is not known
- * ErrorCodes::ShutdownInProgress if we are mid-shutdown
- * ErrorCodes::Interrupted if the operation was killed with killop()
- */
- virtual StatusAndDuration awaitReplication(OperationContext* txn,
- const OpTime& opTime,
- const WriteConcernOptions& writeConcern) = 0;
-
- /**
- * Like awaitReplication(), above, but waits for the replication of the last operation
- * performed on the client associated with "txn".
- */
- virtual StatusAndDuration awaitReplicationOfLastOpForClient(
- OperationContext* txn,
- const WriteConcernOptions& writeConcern) = 0;
-
- /**
- * Causes this node to relinquish being primary for at least 'stepdownTime'. If 'force' is
- * false, before doing so it will wait for 'waitTime' for one other node to be within 10
- * seconds of this node's optime before stepping down. Returns a Status with the code
- * ErrorCodes::ExceededTimeLimit if no secondary catches up within waitTime,
- * ErrorCodes::NotMaster if you are no longer primary when trying to step down,
- * ErrorCodes::SecondaryAheadOfPrimary if we are primary but there is another node that
- * seems to be ahead of us in replication, and Status::OK otherwise.
- */
- virtual Status stepDown(OperationContext* txn,
- bool force,
- const Milliseconds& waitTime,
- const Milliseconds& stepdownTime) = 0;
-
- /**
- * Returns true if the node can be considered master for the purpose of introspective
- * commands such as isMaster() and rs.status().
- */
- virtual bool isMasterForReportingPurposes() = 0;
-
- /**
- * Returns true if it is valid for this node to accept writes on the given database.
- * Currently this is true only if this node is Primary, master in master/slave,
- * a standalone, or is writing to the local database.
- *
- * If a node was started with the replSet argument, but has not yet received a config, it
- * will not be able to receive writes to a database other than local (it will not be
- * treated as standalone node).
- *
- * NOTE: This function can only be meaningfully called while the caller holds the global
- * lock in some mode other than MODE_NONE.
- */
- virtual bool canAcceptWritesForDatabase(StringData dbName) = 0;
-
- /**
- * Returns true if it is valid for this node to accept writes on the given namespace.
- *
- * The result of this function should be consistent with canAcceptWritesForDatabase()
- * for the database the namespace refers to, with additional checks on the collection.
- */
- virtual bool canAcceptWritesFor(const NamespaceString& ns) = 0;
-
- /**
- * Checks if the current replica set configuration can satisfy the given write concern.
- *
- * Things that are taken into consideration include:
- * 1. If the set has enough data-bearing members.
- * 2. If the write concern mode exists.
- * 3. If there are enough members for the write concern mode specified.
- */
- virtual Status checkIfWriteConcernCanBeSatisfied(
- const WriteConcernOptions& writeConcern) const = 0;
-
- /**
- * Returns Status::OK() if it is valid for this node to serve reads on the given collection
- * and an errorcode indicating why the node cannot if it cannot.
- */
- virtual Status checkCanServeReadsFor(OperationContext* txn,
- const NamespaceString& ns,
- bool slaveOk) = 0;
-
- /**
- * Returns true if this node should ignore unique index constraints on new documents.
- * Currently this is needed for nodes in STARTUP2, RECOVERING, and ROLLBACK states.
- */
- virtual bool shouldIgnoreUniqueIndex(const IndexDescriptor* idx) = 0;
-
- /**
- * Updates our internal tracking of the last OpTime applied for the given slave
- * identified by "rid". Only valid to call in master/slave mode
- */
- virtual Status setLastOptimeForSlave(const OID& rid, const Timestamp& ts) = 0;
-
- /**
- * Updates our internal tracking of the last OpTime applied to this node.
- *
- * The new value of "opTime" must be no less than any prior value passed to this method, and
- * it is the caller's job to properly synchronize this behavior. The exception to this rule
- * is that after calls to resetLastOpTimeFromOplog(), the minimum acceptable value for
- * "opTime" is reset based on the contents of the oplog, and may go backwards due to
- * rollback.
- */
- virtual void setMyLastOptime(const OpTime& opTime) = 0;
-
- /**
- * Same as above, but used during places we need to zero our last optime.
- */
- virtual void resetMyLastOptime() = 0;
-
- /**
- * Updates our the message we include in heartbeat responses.
- */
- virtual void setMyHeartbeatMessage(const std::string& msg) = 0;
-
- /**
- * Returns the last optime recorded by setMyLastOptime.
- */
- virtual OpTime getMyLastOptime() const = 0;
-
- /**
- * Waits until the optime of the current node is at least the opTime specified in
- * 'settings'.
- *
- * The returned ReadAfterOpTimeResponse object's didWait() method returns true if
- * an attempt was made to wait for the specified opTime. Cases when this can be
- * false could include:
- *
- * 1. No read after opTime was specified.
- * 2. Attempting to do read after opTime when node is not a replica set member.
- *
- * Note: getDuration() on the returned ReadAfterOpTimeResponse will only be valid if
- * its didWait() method returns true.
- */
- virtual ReadAfterOpTimeResponse waitUntilOpTime(OperationContext* txn,
- const ReadAfterOpTimeArgs& settings) = 0;
-
- /**
- * Retrieves and returns the current election id, which is a unique id that is local to
- * this node and changes every time we become primary.
- * TODO(spencer): Use term instead.
- */
- virtual OID getElectionId() = 0;
-
- /**
- * Returns the RID for this node. The RID is used to identify this node to our sync source
- * when sending updates about our replication progress.
- */
- virtual OID getMyRID() const = 0;
-
- /**
- * Returns the id for this node as specified in the current replica set configuration.
- */
- virtual int getMyId() const = 0;
-
- /**
- * Sets this node into a specific follower mode.
- *
- * Returns true if the follower mode was successfully set. Returns false if the
- * node is or becomes a leader before setFollowerMode completes.
- *
- * Follower modes are RS_STARTUP2 (initial sync), RS_SECONDARY, RS_ROLLBACK and
- * RS_RECOVERING. They are the valid states of a node whose topology coordinator has the
- * follower role.
- *
- * This is essentially an interface that allows the applier to prevent the node from
- * becoming a candidate or accepting reads, depending on circumstances in the oplog
- * application process.
- */
- virtual bool setFollowerMode(const MemberState& newState) = 0;
-
- /**
- * Returns true if the coordinator wants the applier to pause application.
- *
- * If this returns true, the applier should call signalDrainComplete() when it has
- * completed draining its operation buffer and no further ops are being applied.
- */
- virtual bool isWaitingForApplierToDrain() = 0;
-
- /**
- * Signals that a previously requested pause and drain of the applier buffer
- * has completed.
- *
- * This is an interface that allows the applier to reenable writes after
- * a successful election triggers the draining of the applier buffer.
- */
- virtual void signalDrainComplete(OperationContext* txn) = 0;
-
- /**
- * Signals the sync source feedback thread to wake up and send a handshake and
- * replSetUpdatePosition command to our sync source.
- */
- virtual void signalUpstreamUpdater() = 0;
-
- /**
- * Prepares a BSONObj describing an invocation of the replSetUpdatePosition command that can
- * be sent to this node's sync source to update it about our progress in replication.
- *
- * The returned bool indicates whether or not the command was created.
- */
- virtual bool prepareReplSetUpdatePositionCommand(BSONObjBuilder* cmdBuilder) = 0;
-
- /**
- * Handles an incoming replSetGetStatus command. Adds BSON to 'result'.
- */
- virtual Status processReplSetGetStatus(BSONObjBuilder* result) = 0;
-
- /**
- * Handles an incoming isMaster command for a replica set node. Should not be
- * called on a master-slave or standalone node.
- */
- virtual void fillIsMasterForReplSet(IsMasterResponse* result) = 0;
-
- /**
- * Adds to "result" a description of the slaveInfo data structure used to map RIDs to their
- * last known optimes.
- */
- virtual void appendSlaveInfoData(BSONObjBuilder* result) = 0;
-
- /**
- * Returns a copy of the current ReplicaSetConfig.
- */
- virtual ReplicaSetConfig getConfig() const = 0;
-
- /**
- * Handles an incoming replSetGetConfig command. Adds BSON to 'result'.
- */
- virtual void processReplSetGetConfig(BSONObjBuilder* result) = 0;
-
- /**
- * Toggles maintenanceMode to the value expressed by 'activate'
- * return Status::OK if the change worked, NotSecondary if it failed because we are
- * PRIMARY, and OperationFailed if we are not currently in maintenance mode
- */
- virtual Status setMaintenanceMode(bool activate) = 0;
-
- /**
- * Retrieves the current count of maintenanceMode and returns 'true' if greater than 0.
- */
- virtual bool getMaintenanceMode() = 0;
-
- /**
- * Handles an incoming replSetSyncFrom command. Adds BSON to 'result'
- * returns Status::OK if the sync target could be set and an ErrorCode indicating why it
- * couldn't otherwise.
- */
- virtual Status processReplSetSyncFrom(const HostAndPort& target,
- BSONObjBuilder* resultObj) = 0;
-
- /**
- * Handles an incoming replSetFreeze command. Adds BSON to 'resultObj'
- * returns Status::OK() if the node is a member of a replica set with a config and an
- * error Status otherwise
- */
- virtual Status processReplSetFreeze(int secs, BSONObjBuilder* resultObj) = 0;
-
- /**
- * Handles an incoming heartbeat command with arguments 'args'. Populates 'response';
- * returns a Status with either OK or an error message.
- */
- virtual Status processHeartbeat(const ReplSetHeartbeatArgs& args,
- ReplSetHeartbeatResponse* response) = 0;
- virtual Status processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
- ReplSetHeartbeatResponse* response) = 0;
-
-
- /**
- * Arguments for the replSetReconfig command.
- */
- struct ReplSetReconfigArgs {
- BSONObj newConfigObj;
- bool force;
- };
-
- /**
- * Handles an incoming replSetReconfig command. Adds BSON to 'resultObj';
- * returns a Status with either OK or an error message.
- */
- virtual Status processReplSetReconfig(OperationContext* txn,
- const ReplSetReconfigArgs& args,
- BSONObjBuilder* resultObj) = 0;
-
- /*
- * Handles an incoming replSetInitiate command. If "configObj" is empty, generates a default
- * configuration to use.
- * Adds BSON to 'resultObj'; returns a Status with either OK or an error message.
- */
- virtual Status processReplSetInitiate(OperationContext* txn,
- const BSONObj& configObj,
- BSONObjBuilder* resultObj) = 0;
-
- /*
- * Handles an incoming replSetGetRBID command.
- * Adds BSON to 'resultObj'; returns a Status with either OK or an error message.
- */
- virtual Status processReplSetGetRBID(BSONObjBuilder* resultObj) = 0;
-
- /**
- * Increments this process's rollback id. Called every time a rollback occurs.
- */
- virtual void incrementRollbackID() = 0;
-
- /**
- * Arguments to the replSetFresh command.
- */
- struct ReplSetFreshArgs {
- std::string setName; // Name of the replset
- HostAndPort who; // host and port of the member that sent the replSetFresh command
- unsigned id; // replSet id of the member that sent the replSetFresh command
- int cfgver; // replSet config version that the member who sent the command thinks it has
- Timestamp opTime; // last optime seen by the member who sent the replSetFresh command
- };
-
- /*
- * Handles an incoming replSetFresh command.
- * Adds BSON to 'resultObj'; returns a Status with either OK or an error message.
- */
- virtual Status processReplSetFresh(const ReplSetFreshArgs& args,
- BSONObjBuilder* resultObj) = 0;
-
- /**
- * Arguments to the replSetElect command.
- */
- struct ReplSetElectArgs {
- std::string set; // Name of the replset
- int whoid; // replSet id of the member that sent the replSetFresh command
- int cfgver; // replSet config version that the member who sent the command thinks it has
- OID round; // unique ID for this election
- };
-
- /*
- * Handles an incoming replSetElect command.
- * Adds BSON to 'resultObj'; returns a Status with either OK or an error message.
- */
- virtual Status processReplSetElect(const ReplSetElectArgs& args,
- BSONObjBuilder* resultObj) = 0;
-
- /**
- * Handles an incoming replSetUpdatePosition command, updating each node's oplog progress.
- * Returns Status::OK() if all updates are processed correctly, NodeNotFound
- * if any updating node cannot be found in the config, InvalidReplicaSetConfig if the
- * "configVersion" sent in any of the updates doesn't match our config version, or
- * NotMasterOrSecondaryCode if we are in state REMOVED or otherwise don't have a valid
- * replica set config.
- * If a non-OK status is returned, it is unspecified whether none or some of the updates
- * were applied.
- * "configVersion" will be populated with our config version if and only if we return
- * InvalidReplicaSetConfig.
- */
- virtual Status processReplSetUpdatePosition(const UpdatePositionArgs& updates,
- long long* configVersion) = 0;
-
- /**
- * Handles an incoming Handshake command. Associates the node's 'remoteID' with its
- * 'handshake' object. This association is used to update internal representation of
- * replication progress and to forward the node's replication progress upstream when this
- * node is being chained through in master/slave replication.
- *
- * Returns ErrorCodes::IllegalOperation if we're not running with master/slave replication.
- */
- virtual Status processHandshake(OperationContext* txn, const HandshakeArgs& handshake) = 0;
-
- /**
- * Returns a bool indicating whether or not this node builds indexes.
- */
- virtual bool buildsIndexes() = 0;
-
- /**
- * Returns a vector of members that have applied the operation with OpTime 'op'.
- */
- virtual std::vector<HostAndPort> getHostsWrittenTo(const OpTime& op) = 0;
-
- /**
- * Returns a vector of the members other than ourself in the replica set, as specified in
- * the replica set config. Invalid to call if we are not in replica set mode. Returns
- * an empty vector if we do not have a valid config.
- */
- virtual std::vector<HostAndPort> getOtherNodesInReplSet() const = 0;
-
- /**
- * Returns a BSONObj containing a representation of the current default write concern.
- */
- virtual WriteConcernOptions getGetLastErrorDefault() = 0;
-
- /**
- * Checks that the --replSet flag was passed when starting up the node and that the node
- * has a valid replica set config.
- *
- * Returns a Status indicating whether those conditions are met with errorcode
- * NoReplicationEnabled if --replSet was not present during start up or with errorcode
- * NotYetInitialized in the absence of a valid config. Also adds error info to "result".
- */
- virtual Status checkReplEnabledForCommand(BSONObjBuilder* result) = 0;
-
- /**
- * Chooses a viable sync source, or, if none available, returns empty HostAndPort.
- */
- virtual HostAndPort chooseNewSyncSource() = 0;
-
- /**
- * Blacklists choosing 'host' as a sync source until time 'until'.
- */
- virtual void blacklistSyncSource(const HostAndPort& host, Date_t until) = 0;
-
- /**
- * Loads the optime from the last op in the oplog into the coordinator's lastOpApplied
- * value.
- */
- virtual void resetLastOpTimeFromOplog(OperationContext* txn) = 0;
-
- /**
- * Determines if a new sync source should be considered.
- * currentSource: the current sync source
- */
- virtual bool shouldChangeSyncSource(const HostAndPort& currentSource) = 0;
-
- /**
- * Returns the OpTime of the latest replica set-committed op known to this server.
- * Committed means a majority of the voting nodes of the config are known to have the
- * operation in their oplogs. This implies such ops will never be rolled back.
- */
- virtual OpTime getLastCommittedOpTime() const = 0;
-
- /*
- * Handles an incoming replSetRequestVotes command.
- * Adds BSON to 'resultObj'; returns a Status with either OK or an error message.
- */
- virtual Status processReplSetRequestVotes(OperationContext* txn,
- const ReplSetRequestVotesArgs& args,
- ReplSetRequestVotesResponse* response) = 0;
-
- /*
- * Handles an incoming replSetDeclareElectionWinner command.
- * Returns a Status with either OK or an error message.
- * Populates responseTerm with the current term from our perspective.
- */
- virtual Status processReplSetDeclareElectionWinner(
- const ReplSetDeclareElectionWinnerArgs& args,
- long long* responseTerm) = 0;
-
- /**
- * Prepares a BSONObj describing the current term, primary, and lastOp information.
- */
- virtual void prepareCursorResponseInfo(BSONObjBuilder* objBuilder) = 0;
-
- /**
- * Returns true if the V1 election protocol is being used and false otherwise.
- */
- virtual bool isV1ElectionProtocol() = 0;
-
- /**
- * Writes into 'output' all the information needed to generate a summary of the current
- * replication state for use by the web interface.
- */
- virtual void summarizeAsHtml(ReplSetHtmlSummary* output) = 0;
-
- /**
- * Returns the current term.
- */
- virtual long long getTerm() = 0;
-
- /**
- * Attempts to update the current term for the V1 election protocol. If the term changes and
- * this node is primary, relinquishes primary.
- * Returns true if the term was updated (that is, when "term" was higher than the previously
- * recorded term) and false otherwise.
- */
- virtual bool updateTerm(long long term) = 0;
-
- protected:
-
- ReplicationCoordinator();
+ /**
+ * Clears the list of sync sources we have blacklisted.
+ */
+ virtual void clearSyncSourceBlacklist() = 0;
+
+ /**
+ * Blocks the calling thread for up to writeConcern.wTimeout millis, or until "opTime" has
+ * been replicated to at least a set of nodes that satisfies the writeConcern, whichever
+ * comes first. A writeConcern.wTimeout of 0 indicates no timeout (block forever) and a
+ * writeConcern.wTimeout of -1 indicates return immediately after checking. Return codes:
+ * ErrorCodes::WriteConcernFailed if the writeConcern.wTimeout is reached before
+ * the data has been sufficiently replicated
+ * ErrorCodes::ExceededTimeLimit if the txn->getMaxTimeMicrosRemaining is reached before
+ * the data has been sufficiently replicated
+ * ErrorCodes::NotMaster if the node is not Primary/Master
+ * ErrorCodes::UnknownReplWriteConcern if the writeConcern.wMode contains a write concern
+ * mode that is not known
+ * ErrorCodes::ShutdownInProgress if we are mid-shutdown
+ * ErrorCodes::Interrupted if the operation was killed with killop()
+ */
+ virtual StatusAndDuration awaitReplication(OperationContext* txn,
+ const OpTime& opTime,
+ const WriteConcernOptions& writeConcern) = 0;
+
+ /**
+ * Like awaitReplication(), above, but waits for the replication of the last operation
+ * performed on the client associated with "txn".
+ */
+ virtual StatusAndDuration awaitReplicationOfLastOpForClient(
+ OperationContext* txn, const WriteConcernOptions& writeConcern) = 0;
+
+ /**
+ * Causes this node to relinquish being primary for at least 'stepdownTime'. If 'force' is
+ * false, before doing so it will wait for 'waitTime' for one other node to be within 10
+ * seconds of this node's optime before stepping down. Returns a Status with the code
+ * ErrorCodes::ExceededTimeLimit if no secondary catches up within waitTime,
+ * ErrorCodes::NotMaster if you are no longer primary when trying to step down,
+ * ErrorCodes::SecondaryAheadOfPrimary if we are primary but there is another node that
+ * seems to be ahead of us in replication, and Status::OK otherwise.
+ */
+ virtual Status stepDown(OperationContext* txn,
+ bool force,
+ const Milliseconds& waitTime,
+ const Milliseconds& stepdownTime) = 0;
+
+ /**
+ * Returns true if the node can be considered master for the purpose of introspective
+ * commands such as isMaster() and rs.status().
+ */
+ virtual bool isMasterForReportingPurposes() = 0;
+
+ /**
+ * Returns true if it is valid for this node to accept writes on the given database.
+ * Currently this is true only if this node is Primary, master in master/slave,
+ * a standalone, or is writing to the local database.
+ *
+ * If a node was started with the replSet argument, but has not yet received a config, it
+ * will not be able to receive writes to a database other than local (it will not be
+ * treated as standalone node).
+ *
+ * NOTE: This function can only be meaningfully called while the caller holds the global
+ * lock in some mode other than MODE_NONE.
+ */
+ virtual bool canAcceptWritesForDatabase(StringData dbName) = 0;
+
+ /**
+ * Returns true if it is valid for this node to accept writes on the given namespace.
+ *
+ * The result of this function should be consistent with canAcceptWritesForDatabase()
+ * for the database the namespace refers to, with additional checks on the collection.
+ */
+ virtual bool canAcceptWritesFor(const NamespaceString& ns) = 0;
+
+ /**
+ * Checks if the current replica set configuration can satisfy the given write concern.
+ *
+ * Things that are taken into consideration include:
+ * 1. If the set has enough data-bearing members.
+ * 2. If the write concern mode exists.
+ * 3. If there are enough members for the write concern mode specified.
+ */
+ virtual Status checkIfWriteConcernCanBeSatisfied(
+ const WriteConcernOptions& writeConcern) const = 0;
+
+ /**
+ * Returns Status::OK() if it is valid for this node to serve reads on the given collection
+ * and an errorcode indicating why the node cannot if it cannot.
+ */
+ virtual Status checkCanServeReadsFor(OperationContext* txn,
+ const NamespaceString& ns,
+ bool slaveOk) = 0;
+
+ /**
+ * Returns true if this node should ignore unique index constraints on new documents.
+ * Currently this is needed for nodes in STARTUP2, RECOVERING, and ROLLBACK states.
+ */
+ virtual bool shouldIgnoreUniqueIndex(const IndexDescriptor* idx) = 0;
+
+ /**
+ * Updates our internal tracking of the last OpTime applied for the given slave
+ * identified by "rid". Only valid to call in master/slave mode
+ */
+ virtual Status setLastOptimeForSlave(const OID& rid, const Timestamp& ts) = 0;
+
+ /**
+ * Updates our internal tracking of the last OpTime applied to this node.
+ *
+ * The new value of "opTime" must be no less than any prior value passed to this method, and
+ * it is the caller's job to properly synchronize this behavior. The exception to this rule
+ * is that after calls to resetLastOpTimeFromOplog(), the minimum acceptable value for
+ * "opTime" is reset based on the contents of the oplog, and may go backwards due to
+ * rollback.
+ */
+ virtual void setMyLastOptime(const OpTime& opTime) = 0;
+
+ /**
+ * Same as above, but used during places we need to zero our last optime.
+ */
+ virtual void resetMyLastOptime() = 0;
+
+ /**
+ * Updates our the message we include in heartbeat responses.
+ */
+ virtual void setMyHeartbeatMessage(const std::string& msg) = 0;
+
+ /**
+ * Returns the last optime recorded by setMyLastOptime.
+ */
+ virtual OpTime getMyLastOptime() const = 0;
+
+ /**
+ * Waits until the optime of the current node is at least the opTime specified in
+ * 'settings'.
+ *
+ * The returned ReadAfterOpTimeResponse object's didWait() method returns true if
+ * an attempt was made to wait for the specified opTime. Cases when this can be
+ * false could include:
+ *
+ * 1. No read after opTime was specified.
+ * 2. Attempting to do read after opTime when node is not a replica set member.
+ *
+ * Note: getDuration() on the returned ReadAfterOpTimeResponse will only be valid if
+ * its didWait() method returns true.
+ */
+ virtual ReadAfterOpTimeResponse waitUntilOpTime(OperationContext* txn,
+ const ReadAfterOpTimeArgs& settings) = 0;
+
+ /**
+ * Retrieves and returns the current election id, which is a unique id that is local to
+ * this node and changes every time we become primary.
+ * TODO(spencer): Use term instead.
+ */
+ virtual OID getElectionId() = 0;
+
+ /**
+ * Returns the RID for this node. The RID is used to identify this node to our sync source
+ * when sending updates about our replication progress.
+ */
+ virtual OID getMyRID() const = 0;
+
+ /**
+ * Returns the id for this node as specified in the current replica set configuration.
+ */
+ virtual int getMyId() const = 0;
+
+ /**
+ * Sets this node into a specific follower mode.
+ *
+ * Returns true if the follower mode was successfully set. Returns false if the
+ * node is or becomes a leader before setFollowerMode completes.
+ *
+ * Follower modes are RS_STARTUP2 (initial sync), RS_SECONDARY, RS_ROLLBACK and
+ * RS_RECOVERING. They are the valid states of a node whose topology coordinator has the
+ * follower role.
+ *
+ * This is essentially an interface that allows the applier to prevent the node from
+ * becoming a candidate or accepting reads, depending on circumstances in the oplog
+ * application process.
+ */
+ virtual bool setFollowerMode(const MemberState& newState) = 0;
+
+ /**
+ * Returns true if the coordinator wants the applier to pause application.
+ *
+ * If this returns true, the applier should call signalDrainComplete() when it has
+ * completed draining its operation buffer and no further ops are being applied.
+ */
+ virtual bool isWaitingForApplierToDrain() = 0;
+
+ /**
+ * Signals that a previously requested pause and drain of the applier buffer
+ * has completed.
+ *
+ * This is an interface that allows the applier to reenable writes after
+ * a successful election triggers the draining of the applier buffer.
+ */
+ virtual void signalDrainComplete(OperationContext* txn) = 0;
+
+ /**
+ * Signals the sync source feedback thread to wake up and send a handshake and
+ * replSetUpdatePosition command to our sync source.
+ */
+ virtual void signalUpstreamUpdater() = 0;
+
+ /**
+ * Prepares a BSONObj describing an invocation of the replSetUpdatePosition command that can
+ * be sent to this node's sync source to update it about our progress in replication.
+ *
+ * The returned bool indicates whether or not the command was created.
+ */
+ virtual bool prepareReplSetUpdatePositionCommand(BSONObjBuilder* cmdBuilder) = 0;
+
+ /**
+ * Handles an incoming replSetGetStatus command. Adds BSON to 'result'.
+ */
+ virtual Status processReplSetGetStatus(BSONObjBuilder* result) = 0;
+
+ /**
+ * Handles an incoming isMaster command for a replica set node. Should not be
+ * called on a master-slave or standalone node.
+ */
+ virtual void fillIsMasterForReplSet(IsMasterResponse* result) = 0;
+
+ /**
+ * Adds to "result" a description of the slaveInfo data structure used to map RIDs to their
+ * last known optimes.
+ */
+ virtual void appendSlaveInfoData(BSONObjBuilder* result) = 0;
+
+ /**
+ * Returns a copy of the current ReplicaSetConfig.
+ */
+ virtual ReplicaSetConfig getConfig() const = 0;
+
+ /**
+ * Handles an incoming replSetGetConfig command. Adds BSON to 'result'.
+ */
+ virtual void processReplSetGetConfig(BSONObjBuilder* result) = 0;
+
+ /**
+ * Toggles maintenanceMode to the value expressed by 'activate'
+ * return Status::OK if the change worked, NotSecondary if it failed because we are
+ * PRIMARY, and OperationFailed if we are not currently in maintenance mode
+ */
+ virtual Status setMaintenanceMode(bool activate) = 0;
+
+ /**
+ * Retrieves the current count of maintenanceMode and returns 'true' if greater than 0.
+ */
+ virtual bool getMaintenanceMode() = 0;
+
+ /**
+ * Handles an incoming replSetSyncFrom command. Adds BSON to 'result'
+ * returns Status::OK if the sync target could be set and an ErrorCode indicating why it
+ * couldn't otherwise.
+ */
+ virtual Status processReplSetSyncFrom(const HostAndPort& target, BSONObjBuilder* resultObj) = 0;
+
+ /**
+ * Handles an incoming replSetFreeze command. Adds BSON to 'resultObj'
+ * returns Status::OK() if the node is a member of a replica set with a config and an
+ * error Status otherwise
+ */
+ virtual Status processReplSetFreeze(int secs, BSONObjBuilder* resultObj) = 0;
+
+ /**
+ * Handles an incoming heartbeat command with arguments 'args'. Populates 'response';
+ * returns a Status with either OK or an error message.
+ */
+ virtual Status processHeartbeat(const ReplSetHeartbeatArgs& args,
+ ReplSetHeartbeatResponse* response) = 0;
+ virtual Status processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
+ ReplSetHeartbeatResponse* response) = 0;
+
+
+ /**
+ * Arguments for the replSetReconfig command.
+ */
+ struct ReplSetReconfigArgs {
+ BSONObj newConfigObj;
+ bool force;
+ };
+
+ /**
+ * Handles an incoming replSetReconfig command. Adds BSON to 'resultObj';
+ * returns a Status with either OK or an error message.
+ */
+ virtual Status processReplSetReconfig(OperationContext* txn,
+ const ReplSetReconfigArgs& args,
+ BSONObjBuilder* resultObj) = 0;
+
+ /*
+ * Handles an incoming replSetInitiate command. If "configObj" is empty, generates a default
+ * configuration to use.
+ * Adds BSON to 'resultObj'; returns a Status with either OK or an error message.
+ */
+ virtual Status processReplSetInitiate(OperationContext* txn,
+ const BSONObj& configObj,
+ BSONObjBuilder* resultObj) = 0;
+
+ /*
+ * Handles an incoming replSetGetRBID command.
+ * Adds BSON to 'resultObj'; returns a Status with either OK or an error message.
+ */
+ virtual Status processReplSetGetRBID(BSONObjBuilder* resultObj) = 0;
+
+ /**
+ * Increments this process's rollback id. Called every time a rollback occurs.
+ */
+ virtual void incrementRollbackID() = 0;
+
+ /**
+ * Arguments to the replSetFresh command.
+ */
+ struct ReplSetFreshArgs {
+ std::string setName; // Name of the replset
+ HostAndPort who; // host and port of the member that sent the replSetFresh command
+ unsigned id; // replSet id of the member that sent the replSetFresh command
+ int cfgver; // replSet config version that the member who sent the command thinks it has
+ Timestamp opTime; // last optime seen by the member who sent the replSetFresh command
+ };
+
+ /*
+ * Handles an incoming replSetFresh command.
+ * Adds BSON to 'resultObj'; returns a Status with either OK or an error message.
+ */
+ virtual Status processReplSetFresh(const ReplSetFreshArgs& args, BSONObjBuilder* resultObj) = 0;
+ /**
+ * Arguments to the replSetElect command.
+ */
+ struct ReplSetElectArgs {
+ std::string set; // Name of the replset
+ int whoid; // replSet id of the member that sent the replSetFresh command
+ int cfgver; // replSet config version that the member who sent the command thinks it has
+ OID round; // unique ID for this election
};
-} // namespace repl
-} // namespace mongo
+ /*
+ * Handles an incoming replSetElect command.
+ * Adds BSON to 'resultObj'; returns a Status with either OK or an error message.
+ */
+ virtual Status processReplSetElect(const ReplSetElectArgs& args, BSONObjBuilder* resultObj) = 0;
+
+ /**
+ * Handles an incoming replSetUpdatePosition command, updating each node's oplog progress.
+ * Returns Status::OK() if all updates are processed correctly, NodeNotFound
+ * if any updating node cannot be found in the config, InvalidReplicaSetConfig if the
+ * "configVersion" sent in any of the updates doesn't match our config version, or
+ * NotMasterOrSecondaryCode if we are in state REMOVED or otherwise don't have a valid
+ * replica set config.
+ * If a non-OK status is returned, it is unspecified whether none or some of the updates
+ * were applied.
+ * "configVersion" will be populated with our config version if and only if we return
+ * InvalidReplicaSetConfig.
+ */
+ virtual Status processReplSetUpdatePosition(const UpdatePositionArgs& updates,
+ long long* configVersion) = 0;
+
+ /**
+ * Handles an incoming Handshake command. Associates the node's 'remoteID' with its
+ * 'handshake' object. This association is used to update internal representation of
+ * replication progress and to forward the node's replication progress upstream when this
+ * node is being chained through in master/slave replication.
+ *
+ * Returns ErrorCodes::IllegalOperation if we're not running with master/slave replication.
+ */
+ virtual Status processHandshake(OperationContext* txn, const HandshakeArgs& handshake) = 0;
+
+ /**
+ * Returns a bool indicating whether or not this node builds indexes.
+ */
+ virtual bool buildsIndexes() = 0;
+
+ /**
+ * Returns a vector of members that have applied the operation with OpTime 'op'.
+ */
+ virtual std::vector<HostAndPort> getHostsWrittenTo(const OpTime& op) = 0;
+
+ /**
+ * Returns a vector of the members other than ourself in the replica set, as specified in
+ * the replica set config. Invalid to call if we are not in replica set mode. Returns
+ * an empty vector if we do not have a valid config.
+ */
+ virtual std::vector<HostAndPort> getOtherNodesInReplSet() const = 0;
+
+ /**
+ * Returns a BSONObj containing a representation of the current default write concern.
+ */
+ virtual WriteConcernOptions getGetLastErrorDefault() = 0;
+
+ /**
+ * Checks that the --replSet flag was passed when starting up the node and that the node
+ * has a valid replica set config.
+ *
+ * Returns a Status indicating whether those conditions are met with errorcode
+ * NoReplicationEnabled if --replSet was not present during start up or with errorcode
+ * NotYetInitialized in the absence of a valid config. Also adds error info to "result".
+ */
+ virtual Status checkReplEnabledForCommand(BSONObjBuilder* result) = 0;
+
+ /**
+ * Chooses a viable sync source, or, if none available, returns empty HostAndPort.
+ */
+ virtual HostAndPort chooseNewSyncSource() = 0;
+
+ /**
+ * Blacklists choosing 'host' as a sync source until time 'until'.
+ */
+ virtual void blacklistSyncSource(const HostAndPort& host, Date_t until) = 0;
+
+ /**
+ * Loads the optime from the last op in the oplog into the coordinator's lastOpApplied
+ * value.
+ */
+ virtual void resetLastOpTimeFromOplog(OperationContext* txn) = 0;
+
+ /**
+ * Determines if a new sync source should be considered.
+ * currentSource: the current sync source
+ */
+ virtual bool shouldChangeSyncSource(const HostAndPort& currentSource) = 0;
+
+ /**
+ * Returns the OpTime of the latest replica set-committed op known to this server.
+ * Committed means a majority of the voting nodes of the config are known to have the
+ * operation in their oplogs. This implies such ops will never be rolled back.
+ */
+ virtual OpTime getLastCommittedOpTime() const = 0;
+
+ /*
+ * Handles an incoming replSetRequestVotes command.
+ * Adds BSON to 'resultObj'; returns a Status with either OK or an error message.
+ */
+ virtual Status processReplSetRequestVotes(OperationContext* txn,
+ const ReplSetRequestVotesArgs& args,
+ ReplSetRequestVotesResponse* response) = 0;
+
+ /*
+ * Handles an incoming replSetDeclareElectionWinner command.
+ * Returns a Status with either OK or an error message.
+ * Populates responseTerm with the current term from our perspective.
+ */
+ virtual Status processReplSetDeclareElectionWinner(const ReplSetDeclareElectionWinnerArgs& args,
+ long long* responseTerm) = 0;
+
+ /**
+ * Prepares a BSONObj describing the current term, primary, and lastOp information.
+ */
+ virtual void prepareCursorResponseInfo(BSONObjBuilder* objBuilder) = 0;
+
+ /**
+ * Returns true if the V1 election protocol is being used and false otherwise.
+ */
+ virtual bool isV1ElectionProtocol() = 0;
+
+ /**
+ * Writes into 'output' all the information needed to generate a summary of the current
+ * replication state for use by the web interface.
+ */
+ virtual void summarizeAsHtml(ReplSetHtmlSummary* output) = 0;
+
+ /**
+ * Returns the current term.
+ */
+ virtual long long getTerm() = 0;
+
+ /**
+ * Attempts to update the current term for the V1 election protocol. If the term changes and
+ * this node is primary, relinquishes primary.
+ * Returns true if the term was updated (that is, when "term" was higher than the previously
+ * recorded term) and false otherwise.
+ */
+ virtual bool updateTerm(long long term) = 0;
+
+protected:
+ ReplicationCoordinator();
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_external_state.cpp b/src/mongo/db/repl/replication_coordinator_external_state.cpp
index 68403755b07..fbeddfba68a 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state.cpp
@@ -33,8 +33,8 @@
namespace mongo {
namespace repl {
- ReplicationCoordinatorExternalState::ReplicationCoordinatorExternalState() {}
- ReplicationCoordinatorExternalState::~ReplicationCoordinatorExternalState() {}
+ReplicationCoordinatorExternalState::ReplicationCoordinatorExternalState() {}
+ReplicationCoordinatorExternalState::~ReplicationCoordinatorExternalState() {}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_external_state.h b/src/mongo/db/repl/replication_coordinator_external_state.h
index 10e842bf36a..44e60c9b113 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state.h
@@ -36,150 +36,150 @@
namespace mongo {
- class BSONObj;
- class OID;
- class OperationContext;
- class Status;
- struct HostAndPort;
- template <typename T> class StatusWith;
+class BSONObj;
+class OID;
+class OperationContext;
+class Status;
+struct HostAndPort;
+template <typename T>
+class StatusWith;
namespace repl {
- class LastVote;
-
- /**
- * This class represents the interface the ReplicationCoordinator uses to interact with the
- * rest of the system. All functionality of the ReplicationCoordinatorImpl that would introduce
- * dependencies on large sections of the server code and thus break the unit testability of
- * ReplicationCoordinatorImpl should be moved here.
- */
- class ReplicationCoordinatorExternalState {
- MONGO_DISALLOW_COPYING(ReplicationCoordinatorExternalState);
- public:
-
- ReplicationCoordinatorExternalState();
- virtual ~ReplicationCoordinatorExternalState();
-
- /**
- * Starts the background sync, producer, and sync source feedback threads
- *
- * NOTE: Only starts threads if they are not already started,
- */
- virtual void startThreads() = 0;
-
- /**
- * Starts the Master/Slave threads and sets up logOp
- */
- virtual void startMasterSlave(OperationContext* txn) = 0;
-
- /**
- * Performs any necessary external state specific shutdown tasks, such as cleaning up
- * the threads it started.
- */
- virtual void shutdown() = 0;
-
- /**
- * Creates the oplog and writes the first entry.
- */
- virtual void initiateOplog(OperationContext* txn) = 0;
-
- /**
- * Simple wrapper around SyncSourceFeedback::forwardSlaveProgress. Signals to the
- * SyncSourceFeedback thread that it needs to wake up and send a replSetUpdatePosition
- * command upstream.
- */
- virtual void forwardSlaveProgress() = 0;
-
- /**
- * Queries the singleton document in local.me. If it exists and our hostname has not
- * changed since we wrote, returns the RID stored in the object. If the document does not
- * exist or our hostname doesn't match what was recorded in local.me, generates a new OID
- * to use as our RID, stores it in local.me, and returns it.
- */
- virtual OID ensureMe(OperationContext*) = 0;
-
- /**
- * Returns true if "host" is one of the network identities of this node.
- */
- virtual bool isSelf(const HostAndPort& host) = 0;
-
- /**
- * Gets the replica set config document from local storage, or returns an error.
- */
- virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* txn) = 0;
-
- /**
- * Stores the replica set config document in local storage, or returns an error.
- */
- virtual Status storeLocalConfigDocument(OperationContext* txn, const BSONObj& config) = 0;
-
- /**
- * Gets the replica set lastVote document from local storage, or returns an error.
- */
- virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* txn) = 0;
-
- /**
- * Stores the replica set lastVote document in local storage, or returns an error.
- */
- virtual Status storeLocalLastVoteDocument(OperationContext* txn,
- const LastVote& lastVote) = 0;
-
- /**
- * Sets the global opTime to be 'newTime'.
- */
- virtual void setGlobalTimestamp(const Timestamp& newTime) = 0;
-
- /**
- * Gets the last optime of an operation performed on this host, from stable
- * storage.
- */
- virtual StatusWith<OpTime> loadLastOpTime(OperationContext* txn) = 0;
-
- /**
- * Returns the HostAndPort of the remote client connected to us that initiated the operation
- * represented by "txn".
- */
- virtual HostAndPort getClientHostAndPort(const OperationContext* txn) = 0;
-
- /**
- * Closes all connections except those marked with the keepOpen property, which should
- * just be connections used for heartbeating.
- * This is used during stepdown, and transition out of primary.
- */
- virtual void closeConnections() = 0;
-
- /**
- * Kills all operations that have a Client that is associated with an incoming user
- * connection. Used during stepdown.
- */
- virtual void killAllUserOperations(OperationContext* txn) = 0;
-
- /**
- * Clears all cached sharding metadata on this server. This is called after stepDown to
- * ensure that if the node becomes primary again in the future it will reload an up-to-date
- * version of the sharding data.
- */
- virtual void clearShardingState() = 0;
-
- /**
- * Notifies the bgsync and syncSourceFeedback threads to choose a new sync source.
- */
- virtual void signalApplierToChooseNewSyncSource() = 0;
-
- /**
- * Returns an OperationContext, owned by the caller, that may be used in methods of
- * the same instance that require an OperationContext.
- */
- virtual OperationContext* createOperationContext(const std::string& threadName) = 0;
-
- /**
- * Drops all temporary collections on all databases except "local".
- *
- * The implementation may assume that the caller has acquired the global exclusive lock
- * for "txn".
- */
- virtual void dropAllTempCollections(OperationContext* txn) = 0;
- };
-
-} // namespace repl
-} // namespace mongo
+class LastVote;
+
+/**
+ * This class represents the interface the ReplicationCoordinator uses to interact with the
+ * rest of the system. All functionality of the ReplicationCoordinatorImpl that would introduce
+ * dependencies on large sections of the server code and thus break the unit testability of
+ * ReplicationCoordinatorImpl should be moved here.
+ */
+class ReplicationCoordinatorExternalState {
+ MONGO_DISALLOW_COPYING(ReplicationCoordinatorExternalState);
+
+public:
+ ReplicationCoordinatorExternalState();
+ virtual ~ReplicationCoordinatorExternalState();
+
+ /**
+ * Starts the background sync, producer, and sync source feedback threads
+ *
+ * NOTE: Only starts threads if they are not already started,
+ */
+ virtual void startThreads() = 0;
+
+ /**
+ * Starts the Master/Slave threads and sets up logOp
+ */
+ virtual void startMasterSlave(OperationContext* txn) = 0;
+
+ /**
+ * Performs any necessary external state specific shutdown tasks, such as cleaning up
+ * the threads it started.
+ */
+ virtual void shutdown() = 0;
+
+ /**
+ * Creates the oplog and writes the first entry.
+ */
+ virtual void initiateOplog(OperationContext* txn) = 0;
+
+ /**
+ * Simple wrapper around SyncSourceFeedback::forwardSlaveProgress. Signals to the
+ * SyncSourceFeedback thread that it needs to wake up and send a replSetUpdatePosition
+ * command upstream.
+ */
+ virtual void forwardSlaveProgress() = 0;
+
+ /**
+ * Queries the singleton document in local.me. If it exists and our hostname has not
+ * changed since we wrote, returns the RID stored in the object. If the document does not
+ * exist or our hostname doesn't match what was recorded in local.me, generates a new OID
+ * to use as our RID, stores it in local.me, and returns it.
+ */
+ virtual OID ensureMe(OperationContext*) = 0;
+
+ /**
+ * Returns true if "host" is one of the network identities of this node.
+ */
+ virtual bool isSelf(const HostAndPort& host) = 0;
+
+ /**
+ * Gets the replica set config document from local storage, or returns an error.
+ */
+ virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* txn) = 0;
+
+ /**
+ * Stores the replica set config document in local storage, or returns an error.
+ */
+ virtual Status storeLocalConfigDocument(OperationContext* txn, const BSONObj& config) = 0;
+
+ /**
+ * Gets the replica set lastVote document from local storage, or returns an error.
+ */
+ virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* txn) = 0;
+
+ /**
+ * Stores the replica set lastVote document in local storage, or returns an error.
+ */
+ virtual Status storeLocalLastVoteDocument(OperationContext* txn, const LastVote& lastVote) = 0;
+
+ /**
+ * Sets the global opTime to be 'newTime'.
+ */
+ virtual void setGlobalTimestamp(const Timestamp& newTime) = 0;
+
+ /**
+ * Gets the last optime of an operation performed on this host, from stable
+ * storage.
+ */
+ virtual StatusWith<OpTime> loadLastOpTime(OperationContext* txn) = 0;
+
+ /**
+ * Returns the HostAndPort of the remote client connected to us that initiated the operation
+ * represented by "txn".
+ */
+ virtual HostAndPort getClientHostAndPort(const OperationContext* txn) = 0;
+
+ /**
+ * Closes all connections except those marked with the keepOpen property, which should
+ * just be connections used for heartbeating.
+ * This is used during stepdown, and transition out of primary.
+ */
+ virtual void closeConnections() = 0;
+
+ /**
+ * Kills all operations that have a Client that is associated with an incoming user
+ * connection. Used during stepdown.
+ */
+ virtual void killAllUserOperations(OperationContext* txn) = 0;
+
+ /**
+ * Clears all cached sharding metadata on this server. This is called after stepDown to
+ * ensure that if the node becomes primary again in the future it will reload an up-to-date
+ * version of the sharding data.
+ */
+ virtual void clearShardingState() = 0;
+
+ /**
+ * Notifies the bgsync and syncSourceFeedback threads to choose a new sync source.
+ */
+ virtual void signalApplierToChooseNewSyncSource() = 0;
+
+ /**
+ * Returns an OperationContext, owned by the caller, that may be used in methods of
+ * the same instance that require an OperationContext.
+ */
+ virtual OperationContext* createOperationContext(const std::string& threadName) = 0;
+
+ /**
+ * Drops all temporary collections on all databases except "local".
+ *
+ * The implementation may assume that the caller has acquired the global exclusive lock
+ * for "txn".
+ */
+ virtual void dropAllTempCollections(OperationContext* txn) = 0;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index a1b5c609bf8..34976b02ba5 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -70,272 +70,256 @@ namespace mongo {
namespace repl {
namespace {
- const char configCollectionName[] = "local.system.replset";
- const char configDatabaseName[] = "local";
- const char lastVoteCollectionName[] = "local.replset.election";
- const char lastVoteDatabaseName[] = "local";
- const char meCollectionName[] = "local.me";
- const char meDatabaseName[] = "local";
- const char tsFieldName[] = "ts";
+const char configCollectionName[] = "local.system.replset";
+const char configDatabaseName[] = "local";
+const char lastVoteCollectionName[] = "local.replset.election";
+const char lastVoteDatabaseName[] = "local";
+const char meCollectionName[] = "local.me";
+const char meDatabaseName[] = "local";
+const char tsFieldName[] = "ts";
} // namespace
- ReplicationCoordinatorExternalStateImpl::ReplicationCoordinatorExternalStateImpl() :
- _startedThreads(false)
- , _nextThreadId(0) {}
- ReplicationCoordinatorExternalStateImpl::~ReplicationCoordinatorExternalStateImpl() {}
+ReplicationCoordinatorExternalStateImpl::ReplicationCoordinatorExternalStateImpl()
+ : _startedThreads(false), _nextThreadId(0) {}
+ReplicationCoordinatorExternalStateImpl::~ReplicationCoordinatorExternalStateImpl() {}
- void ReplicationCoordinatorExternalStateImpl::startThreads() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
- if (_startedThreads) {
- return;
- }
- log() << "Starting replication applier threads";
- _applierThread.reset(new stdx::thread(runSyncThread));
+void ReplicationCoordinatorExternalStateImpl::startThreads() {
+ stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ if (_startedThreads) {
+ return;
+ }
+ log() << "Starting replication applier threads";
+ _applierThread.reset(new stdx::thread(runSyncThread));
+ BackgroundSync* bgsync = BackgroundSync::get();
+ _producerThread.reset(new stdx::thread(stdx::bind(&BackgroundSync::producerThread, bgsync)));
+ _syncSourceFeedbackThread.reset(
+ new stdx::thread(stdx::bind(&SyncSourceFeedback::run, &_syncSourceFeedback)));
+ _startedThreads = true;
+}
+
+void ReplicationCoordinatorExternalStateImpl::startMasterSlave(OperationContext* txn) {
+ repl::startMasterSlave(txn);
+}
+
+void ReplicationCoordinatorExternalStateImpl::shutdown() {
+ stdx::lock_guard<stdx::mutex> lk(_threadMutex);
+ if (_startedThreads) {
+ log() << "Stopping replication applier threads";
+ _syncSourceFeedback.shutdown();
+ _syncSourceFeedbackThread->join();
+ _applierThread->join();
BackgroundSync* bgsync = BackgroundSync::get();
- _producerThread.reset(new stdx::thread(stdx::bind(&BackgroundSync::producerThread,
- bgsync)));
- _syncSourceFeedbackThread.reset(new stdx::thread(stdx::bind(&SyncSourceFeedback::run,
- &_syncSourceFeedback)));
- _startedThreads = true;
+ bgsync->shutdown();
+ _producerThread->join();
}
+}
- void ReplicationCoordinatorExternalStateImpl::startMasterSlave(OperationContext* txn) {
- repl::startMasterSlave(txn);
- }
+void ReplicationCoordinatorExternalStateImpl::initiateOplog(OperationContext* txn) {
+ createOplog(txn);
- void ReplicationCoordinatorExternalStateImpl::shutdown() {
- stdx::lock_guard<stdx::mutex> lk(_threadMutex);
- if (_startedThreads) {
- log() << "Stopping replication applier threads";
- _syncSourceFeedback.shutdown();
- _syncSourceFeedbackThread->join();
- _applierThread->join();
- BackgroundSync* bgsync = BackgroundSync::get();
- bgsync->shutdown();
- _producerThread->join();
- }
- }
-
- void ReplicationCoordinatorExternalStateImpl::initiateOplog(OperationContext* txn) {
- createOplog(txn);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction scopedXact(txn, MODE_X);
+ Lock::GlobalWrite globalWrite(txn->lockState());
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWrite(txn->lockState());
-
- WriteUnitOfWork wuow(txn);
- getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, BSON("msg" << "initiating set"));
- wuow.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "initiate oplog entry", "local.oplog.rs");
+ WriteUnitOfWork wuow(txn);
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(txn,
+ BSON("msg"
+ << "initiating set"));
+ wuow.commit();
}
-
- void ReplicationCoordinatorExternalStateImpl::forwardSlaveProgress() {
- _syncSourceFeedback.forwardSlaveProgress();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "initiate oplog entry", "local.oplog.rs");
+}
+
+void ReplicationCoordinatorExternalStateImpl::forwardSlaveProgress() {
+ _syncSourceFeedback.forwardSlaveProgress();
+}
+
+OID ReplicationCoordinatorExternalStateImpl::ensureMe(OperationContext* txn) {
+ std::string myname = getHostName();
+ OID myRID;
+ {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lock(txn->lockState(), meDatabaseName, MODE_X);
+
+ BSONObj me;
+ // local.me is an identifier for a server for getLastError w:2+
+ // TODO: handle WriteConflictExceptions below
+ if (!Helpers::getSingleton(txn, meCollectionName, me) || !me.hasField("host") ||
+ me["host"].String() != myname) {
+ myRID = OID::gen();
+
+ // clean out local.me
+ Helpers::emptyCollection(txn, meCollectionName);
+
+ // repopulate
+ BSONObjBuilder b;
+ b.append("_id", myRID);
+ b.append("host", myname);
+ Helpers::putSingleton(txn, meCollectionName, b.done());
+ } else {
+ myRID = me["_id"].OID();
+ }
}
+ return myRID;
+}
- OID ReplicationCoordinatorExternalStateImpl::ensureMe(OperationContext* txn) {
- std::string myname = getHostName();
- OID myRID;
- {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lock(txn->lockState(), meDatabaseName, MODE_X);
-
- BSONObj me;
- // local.me is an identifier for a server for getLastError w:2+
- // TODO: handle WriteConflictExceptions below
- if (!Helpers::getSingleton(txn, meCollectionName, me) ||
- !me.hasField("host") ||
- me["host"].String() != myname) {
-
- myRID = OID::gen();
-
- // clean out local.me
- Helpers::emptyCollection(txn, meCollectionName);
-
- // repopulate
- BSONObjBuilder b;
- b.append("_id", myRID);
- b.append("host", myname);
- Helpers::putSingleton(txn, meCollectionName, b.done());
- } else {
- myRID = me["_id"].OID();
+StatusWith<BSONObj> ReplicationCoordinatorExternalStateImpl::loadLocalConfigDocument(
+ OperationContext* txn) {
+ try {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ BSONObj config;
+ if (!Helpers::getSingleton(txn, configCollectionName, config)) {
+ return StatusWith<BSONObj>(
+ ErrorCodes::NoMatchingDocument,
+ str::stream() << "Did not find replica set configuration document in "
+ << configCollectionName);
}
+ return StatusWith<BSONObj>(config);
}
- return myRID;
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "load replica set config", configCollectionName);
+ } catch (const DBException& ex) {
+ return StatusWith<BSONObj>(ex.toStatus());
}
+}
- StatusWith<BSONObj> ReplicationCoordinatorExternalStateImpl::loadLocalConfigDocument(
- OperationContext* txn) {
- try {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- BSONObj config;
- if (!Helpers::getSingleton(txn, configCollectionName, config)) {
- return StatusWith<BSONObj>(
- ErrorCodes::NoMatchingDocument,
- str::stream() << "Did not find replica set configuration document in "
- << configCollectionName);
- }
- return StatusWith<BSONObj>(config);
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn,
- "load replica set config",
- configCollectionName);
- }
- catch (const DBException& ex) {
- return StatusWith<BSONObj>(ex.toStatus());
+Status ReplicationCoordinatorExternalStateImpl::storeLocalConfigDocument(OperationContext* txn,
+ const BSONObj& config) {
+ try {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbWriteLock(txn->lockState(), configDatabaseName, MODE_X);
+ Helpers::putSingleton(txn, configCollectionName, config);
+ return Status::OK();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "save replica set config", configCollectionName);
+ } catch (const DBException& ex) {
+ return ex.toStatus();
}
+}
- Status ReplicationCoordinatorExternalStateImpl::storeLocalConfigDocument(
- OperationContext* txn,
- const BSONObj& config) {
- try {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbWriteLock(txn->lockState(), configDatabaseName, MODE_X);
- Helpers::putSingleton(txn, configCollectionName, config);
- return Status::OK();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn,
- "save replica set config",
- configCollectionName);
- }
- catch (const DBException& ex) {
- return ex.toStatus();
+StatusWith<LastVote> ReplicationCoordinatorExternalStateImpl::loadLocalLastVoteDocument(
+ OperationContext* txn) {
+ try {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ BSONObj lastVoteObj;
+ if (!Helpers::getSingleton(txn, lastVoteCollectionName, lastVoteObj)) {
+ return StatusWith<LastVote>(ErrorCodes::NoMatchingDocument,
+ str::stream()
+ << "Did not find replica set lastVote document in "
+ << lastVoteCollectionName);
+ }
+ LastVote lastVote;
+ lastVote.initialize(lastVoteObj);
+ return StatusWith<LastVote>(lastVote);
}
-
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
+ txn, "load replica set lastVote", lastVoteCollectionName);
+ } catch (const DBException& ex) {
+ return StatusWith<LastVote>(ex.toStatus());
}
+}
- StatusWith<LastVote> ReplicationCoordinatorExternalStateImpl::loadLocalLastVoteDocument(
- OperationContext* txn) {
- try {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- BSONObj lastVoteObj;
- if (!Helpers::getSingleton(txn, lastVoteCollectionName, lastVoteObj)) {
- return StatusWith<LastVote>(
- ErrorCodes::NoMatchingDocument,
- str::stream() << "Did not find replica set lastVote document in "
- << lastVoteCollectionName);
- }
- LastVote lastVote;
- lastVote.initialize(lastVoteObj);
- return StatusWith<LastVote>(lastVote);
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn,
- "load replica set lastVote",
- lastVoteCollectionName);
- }
- catch (const DBException& ex) {
- return StatusWith<LastVote>(ex.toStatus());
+Status ReplicationCoordinatorExternalStateImpl::storeLocalLastVoteDocument(
+ OperationContext* txn, const LastVote& lastVote) {
+ BSONObj lastVoteObj = lastVote.toBSON();
+ try {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbWriteLock(txn->lockState(), lastVoteDatabaseName, MODE_X);
+ Helpers::putSingleton(txn, lastVoteCollectionName, lastVoteObj);
+ return Status::OK();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
+ txn, "save replica set lastVote", lastVoteCollectionName);
+ MONGO_UNREACHABLE;
+ } catch (const DBException& ex) {
+ return ex.toStatus();
}
-
- Status ReplicationCoordinatorExternalStateImpl::storeLocalLastVoteDocument(
- OperationContext* txn,
- const LastVote& lastVote) {
- BSONObj lastVoteObj = lastVote.toBSON();
- try {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbWriteLock(txn->lockState(), lastVoteDatabaseName, MODE_X);
- Helpers::putSingleton(txn, lastVoteCollectionName, lastVoteObj);
- return Status::OK();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn,
- "save replica set lastVote",
- lastVoteCollectionName);
- MONGO_UNREACHABLE;
- }
- catch (const DBException& ex) {
- return ex.toStatus();
+}
+
+void ReplicationCoordinatorExternalStateImpl::setGlobalTimestamp(const Timestamp& newTime) {
+ setNewTimestamp(newTime);
+}
+
+StatusWith<OpTime> ReplicationCoordinatorExternalStateImpl::loadLastOpTime(OperationContext* txn) {
+ // TODO: handle WriteConflictExceptions below
+ try {
+ BSONObj oplogEntry;
+ if (!Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry)) {
+ return StatusWith<OpTime>(ErrorCodes::NoMatchingDocument,
+ str::stream() << "Did not find any entries in "
+ << rsOplogName);
}
-
- }
-
- void ReplicationCoordinatorExternalStateImpl::setGlobalTimestamp(const Timestamp& newTime) {
- setNewTimestamp(newTime);
- }
-
- StatusWith<OpTime> ReplicationCoordinatorExternalStateImpl::loadLastOpTime(
- OperationContext* txn) {
-
- // TODO: handle WriteConflictExceptions below
- try {
- BSONObj oplogEntry;
- if (!Helpers::getLast(txn, rsOplogName.c_str(), oplogEntry)) {
- return StatusWith<OpTime>(
- ErrorCodes::NoMatchingDocument,
- str::stream() << "Did not find any entries in " << rsOplogName);
- }
- BSONElement tsElement = oplogEntry[tsFieldName];
- if (tsElement.eoo()) {
- return StatusWith<OpTime>(
- ErrorCodes::NoSuchKey,
- str::stream() << "Most recent entry in " << rsOplogName << " missing \"" <<
- tsFieldName << "\" field");
- }
- if (tsElement.type() != bsonTimestamp) {
- return StatusWith<OpTime>(
- ErrorCodes::TypeMismatch,
- str::stream() << "Expected type of \"" << tsFieldName <<
- "\" in most recent " << rsOplogName <<
- " entry to have type Timestamp, but found " << typeName(tsElement.type()));
- }
- return StatusWith<OpTime>(extractOpTime(oplogEntry));
+ BSONElement tsElement = oplogEntry[tsFieldName];
+ if (tsElement.eoo()) {
+ return StatusWith<OpTime>(ErrorCodes::NoSuchKey,
+ str::stream() << "Most recent entry in " << rsOplogName
+ << " missing \"" << tsFieldName << "\" field");
}
- catch (const DBException& ex) {
- return StatusWith<OpTime>(ex.toStatus());
+ if (tsElement.type() != bsonTimestamp) {
+ return StatusWith<OpTime>(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected type of \"" << tsFieldName
+ << "\" in most recent " << rsOplogName
+ << " entry to have type Timestamp, but found "
+ << typeName(tsElement.type()));
}
+ return StatusWith<OpTime>(extractOpTime(oplogEntry));
+ } catch (const DBException& ex) {
+ return StatusWith<OpTime>(ex.toStatus());
}
-
- bool ReplicationCoordinatorExternalStateImpl::isSelf(const HostAndPort& host) {
- return repl::isSelf(host);
-
- }
-
- HostAndPort ReplicationCoordinatorExternalStateImpl::getClientHostAndPort(
- const OperationContext* txn) {
- return HostAndPort(txn->getClient()->clientAddress(true));
- }
-
- void ReplicationCoordinatorExternalStateImpl::closeConnections() {
- MessagingPort::closeAllSockets(executor::NetworkInterface::kMessagingPortKeepOpen);
- }
-
- void ReplicationCoordinatorExternalStateImpl::killAllUserOperations(OperationContext* txn) {
- ServiceContext* environment = getGlobalServiceContext();
- environment->killAllUserOperations(txn);
- }
-
- void ReplicationCoordinatorExternalStateImpl::clearShardingState() {
- shardingState.clearCollectionMetadata();
- }
-
- void ReplicationCoordinatorExternalStateImpl::signalApplierToChooseNewSyncSource() {
- BackgroundSync::get()->clearSyncTarget();
- }
-
- OperationContext* ReplicationCoordinatorExternalStateImpl::createOperationContext(
- const std::string& threadName) {
- Client::initThreadIfNotAlready(threadName.c_str());
- return new OperationContextImpl();
- }
-
- void ReplicationCoordinatorExternalStateImpl::dropAllTempCollections(OperationContext* txn) {
- std::vector<std::string> dbNames;
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->listDatabases(&dbNames);
-
- for (std::vector<std::string>::iterator it = dbNames.begin(); it != dbNames.end(); ++it) {
- // The local db is special because it isn't replicated. It is cleared at startup even on
- // replica set members.
- if (*it == "local")
- continue;
- LOG(2) << "Removing temporary collections from " << *it;
- Database* db = dbHolder().get(txn, *it);
- // Since we must be holding the global lock during this function, if listDatabases
- // returned this dbname, we should be able to get a reference to it - it can't have
- // been dropped.
- invariant(db);
- db->clearTmpCollections(txn);
- }
+}
+
+bool ReplicationCoordinatorExternalStateImpl::isSelf(const HostAndPort& host) {
+ return repl::isSelf(host);
+}
+
+HostAndPort ReplicationCoordinatorExternalStateImpl::getClientHostAndPort(
+ const OperationContext* txn) {
+ return HostAndPort(txn->getClient()->clientAddress(true));
+}
+
+void ReplicationCoordinatorExternalStateImpl::closeConnections() {
+ MessagingPort::closeAllSockets(executor::NetworkInterface::kMessagingPortKeepOpen);
+}
+
+void ReplicationCoordinatorExternalStateImpl::killAllUserOperations(OperationContext* txn) {
+ ServiceContext* environment = getGlobalServiceContext();
+ environment->killAllUserOperations(txn);
+}
+
+void ReplicationCoordinatorExternalStateImpl::clearShardingState() {
+ shardingState.clearCollectionMetadata();
+}
+
+void ReplicationCoordinatorExternalStateImpl::signalApplierToChooseNewSyncSource() {
+ BackgroundSync::get()->clearSyncTarget();
+}
+
+OperationContext* ReplicationCoordinatorExternalStateImpl::createOperationContext(
+ const std::string& threadName) {
+ Client::initThreadIfNotAlready(threadName.c_str());
+ return new OperationContextImpl();
+}
+
+void ReplicationCoordinatorExternalStateImpl::dropAllTempCollections(OperationContext* txn) {
+ std::vector<std::string> dbNames;
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ storageEngine->listDatabases(&dbNames);
+
+ for (std::vector<std::string>::iterator it = dbNames.begin(); it != dbNames.end(); ++it) {
+ // The local db is special because it isn't replicated. It is cleared at startup even on
+ // replica set members.
+ if (*it == "local")
+ continue;
+ LOG(2) << "Removing temporary collections from " << *it;
+ Database* db = dbHolder().get(txn, *it);
+ // Since we must be holding the global lock during this function, if listDatabases
+ // returned this dbname, we should be able to get a reference to it - it can't have
+ // been dropped.
+ invariant(db);
+ db->clearTmpCollections(txn);
}
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
index 4b08a2c126a..d0f64c82c1d 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h
@@ -37,61 +37,61 @@
namespace mongo {
namespace repl {
- class ReplicationCoordinatorExternalStateImpl : public ReplicationCoordinatorExternalState {
- MONGO_DISALLOW_COPYING(ReplicationCoordinatorExternalStateImpl);
- public:
+class ReplicationCoordinatorExternalStateImpl : public ReplicationCoordinatorExternalState {
+ MONGO_DISALLOW_COPYING(ReplicationCoordinatorExternalStateImpl);
- ReplicationCoordinatorExternalStateImpl();
- virtual ~ReplicationCoordinatorExternalStateImpl();
- virtual void startThreads();
- virtual void startMasterSlave(OperationContext* txn);
- virtual void shutdown();
- virtual void initiateOplog(OperationContext* txn);
- virtual void forwardSlaveProgress();
- virtual OID ensureMe(OperationContext* txn);
- virtual bool isSelf(const HostAndPort& host);
- virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* txn);
- virtual Status storeLocalConfigDocument(OperationContext* txn, const BSONObj& config);
- virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* txn);
- virtual Status storeLocalLastVoteDocument(OperationContext* txn, const LastVote& lastVote);
- virtual void setGlobalTimestamp(const Timestamp& newTime);
- virtual StatusWith<OpTime> loadLastOpTime(OperationContext* txn);
- virtual HostAndPort getClientHostAndPort(const OperationContext* txn);
- virtual void closeConnections();
- virtual void killAllUserOperations(OperationContext* txn);
- virtual void clearShardingState();
- virtual void signalApplierToChooseNewSyncSource();
- virtual OperationContext* createOperationContext(const std::string& threadName);
- virtual void dropAllTempCollections(OperationContext* txn);
+public:
+ ReplicationCoordinatorExternalStateImpl();
+ virtual ~ReplicationCoordinatorExternalStateImpl();
+ virtual void startThreads();
+ virtual void startMasterSlave(OperationContext* txn);
+ virtual void shutdown();
+ virtual void initiateOplog(OperationContext* txn);
+ virtual void forwardSlaveProgress();
+ virtual OID ensureMe(OperationContext* txn);
+ virtual bool isSelf(const HostAndPort& host);
+ virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* txn);
+ virtual Status storeLocalConfigDocument(OperationContext* txn, const BSONObj& config);
+ virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* txn);
+ virtual Status storeLocalLastVoteDocument(OperationContext* txn, const LastVote& lastVote);
+ virtual void setGlobalTimestamp(const Timestamp& newTime);
+ virtual StatusWith<OpTime> loadLastOpTime(OperationContext* txn);
+ virtual HostAndPort getClientHostAndPort(const OperationContext* txn);
+ virtual void closeConnections();
+ virtual void killAllUserOperations(OperationContext* txn);
+ virtual void clearShardingState();
+ virtual void signalApplierToChooseNewSyncSource();
+ virtual OperationContext* createOperationContext(const std::string& threadName);
+ virtual void dropAllTempCollections(OperationContext* txn);
- std::string getNextOpContextThreadName();
+ std::string getNextOpContextThreadName();
- private:
- // Guards starting threads and setting _startedThreads
- stdx::mutex _threadMutex;
+private:
+ // Guards starting threads and setting _startedThreads
+ stdx::mutex _threadMutex;
- // True when the threads have been started
- bool _startedThreads;
+ // True when the threads have been started
+ bool _startedThreads;
- // The SyncSourceFeedback class is responsible for sending replSetUpdatePosition commands
- // for forwarding replication progress information upstream when there is chained
- // replication.
- SyncSourceFeedback _syncSourceFeedback;
+ // The SyncSourceFeedback class is responsible for sending replSetUpdatePosition commands
+ // for forwarding replication progress information upstream when there is chained
+ // replication.
+ SyncSourceFeedback _syncSourceFeedback;
- // Thread running SyncSourceFeedback::run().
- std::unique_ptr<stdx::thread> _syncSourceFeedbackThread;
+ // Thread running SyncSourceFeedback::run().
+ std::unique_ptr<stdx::thread> _syncSourceFeedbackThread;
- // Thread running runSyncThread().
- std::unique_ptr<stdx::thread> _applierThread;
+ // Thread running runSyncThread().
+ std::unique_ptr<stdx::thread> _applierThread;
- // Thread running BackgroundSync::producerThread().
- std::unique_ptr<stdx::thread> _producerThread;
+ // Thread running BackgroundSync::producerThread().
+ std::unique_ptr<stdx::thread> _producerThread;
- // Mutex guarding the _nextThreadId value to prevent concurrent incrementing.
- stdx::mutex _nextThreadIdMutex;
- // Number used to uniquely name threads.
- long long _nextThreadId;
- };
+ // Mutex guarding the _nextThreadId value to prevent concurrent incrementing.
+ stdx::mutex _nextThreadIdMutex;
+ // Number used to uniquely name threads.
+ long long _nextThreadId;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
index 2ab0103f6b1..ee6594084c0 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp
@@ -42,156 +42,147 @@
namespace mongo {
namespace repl {
- ReplicationCoordinatorExternalStateMock::ReplicationCoordinatorExternalStateMock()
- : _localRsConfigDocument(ErrorCodes::NoMatchingDocument, "No local config document"),
- _localRsLastVoteDocument(ErrorCodes::NoMatchingDocument, "No local lastVote document"),
- _lastOpTime(ErrorCodes::NoMatchingDocument, "No last oplog entry"),
- _canAcquireGlobalSharedLock(true),
- _storeLocalConfigDocumentStatus(Status::OK()),
- _storeLocalLastVoteDocumentStatus(Status::OK()),
- _storeLocalConfigDocumentShouldHang(false),
- _storeLocalLastVoteDocumentShouldHang(false),
- _connectionsClosed(false) {
- }
-
- ReplicationCoordinatorExternalStateMock::~ReplicationCoordinatorExternalStateMock() {}
-
- void ReplicationCoordinatorExternalStateMock::startThreads() {}
- void ReplicationCoordinatorExternalStateMock::startMasterSlave(OperationContext*) {}
- void ReplicationCoordinatorExternalStateMock::initiateOplog(OperationContext* txn) {}
- void ReplicationCoordinatorExternalStateMock::shutdown() {}
- void ReplicationCoordinatorExternalStateMock::forwardSlaveProgress() {}
-
- OID ReplicationCoordinatorExternalStateMock::ensureMe(OperationContext*) {
- return OID::gen();
- }
-
- bool ReplicationCoordinatorExternalStateMock::isSelf(const HostAndPort& host) {
- return sequenceContains(_selfHosts, host);
- }
-
- void ReplicationCoordinatorExternalStateMock::addSelf(const HostAndPort& host) {
- _selfHosts.push_back(host);
+ReplicationCoordinatorExternalStateMock::ReplicationCoordinatorExternalStateMock()
+ : _localRsConfigDocument(ErrorCodes::NoMatchingDocument, "No local config document"),
+ _localRsLastVoteDocument(ErrorCodes::NoMatchingDocument, "No local lastVote document"),
+ _lastOpTime(ErrorCodes::NoMatchingDocument, "No last oplog entry"),
+ _canAcquireGlobalSharedLock(true),
+ _storeLocalConfigDocumentStatus(Status::OK()),
+ _storeLocalLastVoteDocumentStatus(Status::OK()),
+ _storeLocalConfigDocumentShouldHang(false),
+ _storeLocalLastVoteDocumentShouldHang(false),
+ _connectionsClosed(false) {}
+
+ReplicationCoordinatorExternalStateMock::~ReplicationCoordinatorExternalStateMock() {}
+
+void ReplicationCoordinatorExternalStateMock::startThreads() {}
+void ReplicationCoordinatorExternalStateMock::startMasterSlave(OperationContext*) {}
+void ReplicationCoordinatorExternalStateMock::initiateOplog(OperationContext* txn) {}
+void ReplicationCoordinatorExternalStateMock::shutdown() {}
+void ReplicationCoordinatorExternalStateMock::forwardSlaveProgress() {}
+
+OID ReplicationCoordinatorExternalStateMock::ensureMe(OperationContext*) {
+ return OID::gen();
+}
+
+bool ReplicationCoordinatorExternalStateMock::isSelf(const HostAndPort& host) {
+ return sequenceContains(_selfHosts, host);
+}
+
+void ReplicationCoordinatorExternalStateMock::addSelf(const HostAndPort& host) {
+ _selfHosts.push_back(host);
+}
+
+HostAndPort ReplicationCoordinatorExternalStateMock::getClientHostAndPort(
+ const OperationContext* txn) {
+ return _clientHostAndPort;
+}
+
+void ReplicationCoordinatorExternalStateMock::setClientHostAndPort(
+ const HostAndPort& clientHostAndPort) {
+ _clientHostAndPort = clientHostAndPort;
+}
+
+StatusWith<BSONObj> ReplicationCoordinatorExternalStateMock::loadLocalConfigDocument(
+ OperationContext* txn) {
+ return _localRsConfigDocument;
+}
+
+Status ReplicationCoordinatorExternalStateMock::storeLocalConfigDocument(OperationContext* txn,
+ const BSONObj& config) {
+ {
+ stdx::unique_lock<stdx::mutex> lock(_shouldHangConfigMutex);
+ while (_storeLocalConfigDocumentShouldHang) {
+ _shouldHangConfigCondVar.wait(lock);
+ }
}
-
- HostAndPort ReplicationCoordinatorExternalStateMock::getClientHostAndPort(
- const OperationContext* txn) {
- return _clientHostAndPort;
+ if (_storeLocalConfigDocumentStatus.isOK()) {
+ setLocalConfigDocument(StatusWith<BSONObj>(config));
+ return Status::OK();
}
+ return _storeLocalConfigDocumentStatus;
+}
- void ReplicationCoordinatorExternalStateMock::setClientHostAndPort(
- const HostAndPort& clientHostAndPort) {
- _clientHostAndPort = clientHostAndPort;
- }
+void ReplicationCoordinatorExternalStateMock::setLocalConfigDocument(
+ const StatusWith<BSONObj>& localConfigDocument) {
+ _localRsConfigDocument = localConfigDocument;
+}
- StatusWith<BSONObj> ReplicationCoordinatorExternalStateMock::loadLocalConfigDocument(
- OperationContext* txn) {
- return _localRsConfigDocument;
- }
+StatusWith<LastVote> ReplicationCoordinatorExternalStateMock::loadLocalLastVoteDocument(
+ OperationContext* txn) {
+ return _localRsLastVoteDocument;
+}
- Status ReplicationCoordinatorExternalStateMock::storeLocalConfigDocument(
- OperationContext* txn,
- const BSONObj& config) {
- {
- stdx::unique_lock<stdx::mutex> lock(_shouldHangConfigMutex);
- while (_storeLocalConfigDocumentShouldHang) {
- _shouldHangConfigCondVar.wait(lock);
- }
- }
- if (_storeLocalConfigDocumentStatus.isOK()) {
- setLocalConfigDocument(StatusWith<BSONObj>(config));
- return Status::OK();
+Status ReplicationCoordinatorExternalStateMock::storeLocalLastVoteDocument(
+ OperationContext* txn, const LastVote& lastVote) {
+ {
+ stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
+ while (_storeLocalLastVoteDocumentShouldHang) {
+ _shouldHangLastVoteCondVar.wait(lock);
}
- return _storeLocalConfigDocumentStatus;
- }
-
- void ReplicationCoordinatorExternalStateMock::setLocalConfigDocument(
- const StatusWith<BSONObj>& localConfigDocument) {
-
- _localRsConfigDocument = localConfigDocument;
}
-
- StatusWith<LastVote> ReplicationCoordinatorExternalStateMock::loadLocalLastVoteDocument(
- OperationContext* txn) {
- return _localRsLastVoteDocument;
+ if (_storeLocalLastVoteDocumentStatus.isOK()) {
+ setLocalLastVoteDocument(StatusWith<LastVote>(lastVote));
+ return Status::OK();
}
+ return _storeLocalLastVoteDocumentStatus;
+}
- Status ReplicationCoordinatorExternalStateMock::storeLocalLastVoteDocument(
- OperationContext* txn,
- const LastVote& lastVote) {
- {
- stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
- while (_storeLocalLastVoteDocumentShouldHang) {
- _shouldHangLastVoteCondVar.wait(lock);
- }
- }
- if (_storeLocalLastVoteDocumentStatus.isOK()) {
- setLocalLastVoteDocument(StatusWith<LastVote>(lastVote));
- return Status::OK();
- }
- return _storeLocalLastVoteDocumentStatus;
- }
+void ReplicationCoordinatorExternalStateMock::setLocalLastVoteDocument(
+ const StatusWith<LastVote>& localLastVoteDocument) {
+ _localRsLastVoteDocument = localLastVoteDocument;
+}
- void ReplicationCoordinatorExternalStateMock::setLocalLastVoteDocument(
- const StatusWith<LastVote>& localLastVoteDocument) {
+void ReplicationCoordinatorExternalStateMock::setGlobalTimestamp(const Timestamp& newTime) {}
- _localRsLastVoteDocument = localLastVoteDocument;
- }
-
- void ReplicationCoordinatorExternalStateMock::setGlobalTimestamp(const Timestamp& newTime) {
- }
-
- StatusWith<OpTime> ReplicationCoordinatorExternalStateMock::loadLastOpTime(
- OperationContext* txn) {
- return _lastOpTime;
- }
+StatusWith<OpTime> ReplicationCoordinatorExternalStateMock::loadLastOpTime(OperationContext* txn) {
+ return _lastOpTime;
+}
- void ReplicationCoordinatorExternalStateMock::setLastOpTime(
- const StatusWith<OpTime>& lastApplied) {
- _lastOpTime = lastApplied;
- }
+void ReplicationCoordinatorExternalStateMock::setLastOpTime(const StatusWith<OpTime>& lastApplied) {
+ _lastOpTime = lastApplied;
+}
- void ReplicationCoordinatorExternalStateMock::setStoreLocalConfigDocumentStatus(Status status) {
- _storeLocalConfigDocumentStatus = status;
- }
+void ReplicationCoordinatorExternalStateMock::setStoreLocalConfigDocumentStatus(Status status) {
+ _storeLocalConfigDocumentStatus = status;
+}
- void ReplicationCoordinatorExternalStateMock::setStoreLocalConfigDocumentToHang(bool hang) {
- stdx::unique_lock<stdx::mutex> lock(_shouldHangConfigMutex);
- _storeLocalConfigDocumentShouldHang = hang;
- if (!hang) {
- _shouldHangConfigCondVar.notify_all();
- }
+void ReplicationCoordinatorExternalStateMock::setStoreLocalConfigDocumentToHang(bool hang) {
+ stdx::unique_lock<stdx::mutex> lock(_shouldHangConfigMutex);
+ _storeLocalConfigDocumentShouldHang = hang;
+ if (!hang) {
+ _shouldHangConfigCondVar.notify_all();
}
+}
- void ReplicationCoordinatorExternalStateMock::setStoreLocalLastVoteDocumentStatus(
- Status status) {
- _storeLocalLastVoteDocumentStatus = status;
- }
+void ReplicationCoordinatorExternalStateMock::setStoreLocalLastVoteDocumentStatus(Status status) {
+ _storeLocalLastVoteDocumentStatus = status;
+}
- void ReplicationCoordinatorExternalStateMock::setStoreLocalLastVoteDocumentToHang(bool hang) {
- stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
- _storeLocalLastVoteDocumentShouldHang = hang;
- if (!hang) {
- _shouldHangLastVoteCondVar.notify_all();
- }
+void ReplicationCoordinatorExternalStateMock::setStoreLocalLastVoteDocumentToHang(bool hang) {
+ stdx::unique_lock<stdx::mutex> lock(_shouldHangLastVoteMutex);
+ _storeLocalLastVoteDocumentShouldHang = hang;
+ if (!hang) {
+ _shouldHangLastVoteCondVar.notify_all();
}
+}
- void ReplicationCoordinatorExternalStateMock::closeConnections() {
- _connectionsClosed = true;
- }
+void ReplicationCoordinatorExternalStateMock::closeConnections() {
+ _connectionsClosed = true;
+}
- void ReplicationCoordinatorExternalStateMock::killAllUserOperations(OperationContext* txn) {}
+void ReplicationCoordinatorExternalStateMock::killAllUserOperations(OperationContext* txn) {}
- void ReplicationCoordinatorExternalStateMock::clearShardingState() {}
+void ReplicationCoordinatorExternalStateMock::clearShardingState() {}
- void ReplicationCoordinatorExternalStateMock::signalApplierToChooseNewSyncSource() {}
+void ReplicationCoordinatorExternalStateMock::signalApplierToChooseNewSyncSource() {}
- OperationContext* ReplicationCoordinatorExternalStateMock::createOperationContext(
- const std::string& threadName) {
- return new OperationContextReplMock;
- }
+OperationContext* ReplicationCoordinatorExternalStateMock::createOperationContext(
+ const std::string& threadName) {
+ return new OperationContextReplMock;
+}
- void ReplicationCoordinatorExternalStateMock::dropAllTempCollections(OperationContext* txn) {}
+void ReplicationCoordinatorExternalStateMock::dropAllTempCollections(OperationContext* txn) {}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.h b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
index 1602601a33a..a93b4a5038a 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.h
@@ -44,103 +44,104 @@
namespace mongo {
namespace repl {
- class ReplicationCoordinatorExternalStateMock : public ReplicationCoordinatorExternalState {
- MONGO_DISALLOW_COPYING(ReplicationCoordinatorExternalStateMock);
- public:
- class GlobalSharedLockAcquirer;
-
- ReplicationCoordinatorExternalStateMock();
- virtual ~ReplicationCoordinatorExternalStateMock();
- virtual void startThreads();
- virtual void startMasterSlave(OperationContext*);
- virtual void shutdown();
- virtual void initiateOplog(OperationContext* txn);
- virtual void forwardSlaveProgress();
- virtual OID ensureMe(OperationContext*);
- virtual bool isSelf(const HostAndPort& host);
- virtual HostAndPort getClientHostAndPort(const OperationContext* txn);
- virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* txn);
- virtual Status storeLocalConfigDocument(OperationContext* txn, const BSONObj& config);
- virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* txn);
- virtual Status storeLocalLastVoteDocument(OperationContext* txn, const LastVote& lastVote);
- virtual void setGlobalTimestamp(const Timestamp& newTime);
- virtual StatusWith<OpTime> loadLastOpTime(OperationContext* txn);
- virtual void closeConnections();
- virtual void killAllUserOperations(OperationContext* txn);
- virtual void clearShardingState();
- virtual void signalApplierToChooseNewSyncSource();
- virtual OperationContext* createOperationContext(const std::string& threadName);
- virtual void dropAllTempCollections(OperationContext* txn);
-
- /**
- * Adds "host" to the list of hosts that this mock will match when responding to "isSelf"
- * messages.
- */
- void addSelf(const HostAndPort& host);
-
- /**
- * Sets the return value for subsequent calls to loadLocalConfigDocument().
- */
- void setLocalConfigDocument(const StatusWith<BSONObj>& localConfigDocument);
-
- /**
- * Sets the return value for subsequent calls to loadLocalLastVoteDocument().
- */
- void setLocalLastVoteDocument(const StatusWith<LastVote>& localLastVoteDocument);
-
- /**
- * Sets the return value for subsequent calls to getClientHostAndPort().
- */
- void setClientHostAndPort(const HostAndPort& clientHostAndPort);
-
- /**
- * Sets the return value for subsequent calls to loadLastOpTimeApplied.
- */
- void setLastOpTime(const StatusWith<OpTime>& lastApplied);
-
- /**
- * Sets the return value for subsequent calls to storeLocalConfigDocument().
- * If "status" is Status::OK(), the subsequent calls will call the underlying funtion.
- */
- void setStoreLocalConfigDocumentStatus(Status status);
-
- /**
- * Sets whether or not subsequent calls to storeLocalConfigDocument() should hang
- * indefinitely or not based on the value of "hang".
- */
- void setStoreLocalConfigDocumentToHang(bool hang);
-
- /**
- * Sets the return value for subsequent calls to storeLocalLastVoteDocument().
- * If "status" is Status::OK(), the subsequent calls will call the underlying funtion.
- */
- void setStoreLocalLastVoteDocumentStatus(Status status);
-
- /**
- * Sets whether or not subsequent calls to storeLocalLastVoteDocument() should hang
- * indefinitely or not based on the value of "hang".
- */
- void setStoreLocalLastVoteDocumentToHang(bool hang);
-
- private:
- StatusWith<BSONObj> _localRsConfigDocument;
- StatusWith<LastVote> _localRsLastVoteDocument;
- StatusWith<OpTime> _lastOpTime;
- std::vector<HostAndPort> _selfHosts;
- bool _canAcquireGlobalSharedLock;
- Status _storeLocalConfigDocumentStatus;
- Status _storeLocalLastVoteDocumentStatus;
- // mutex and cond var for controlling stroeLocalConfigDocument()'s hanging
- stdx::mutex _shouldHangConfigMutex;
- stdx::condition_variable _shouldHangConfigCondVar;
- // mutex and cond var for controlling stroeLocalLastVoteDocument()'s hanging
- stdx::mutex _shouldHangLastVoteMutex;
- stdx::condition_variable _shouldHangLastVoteCondVar;
- bool _storeLocalConfigDocumentShouldHang;
- bool _storeLocalLastVoteDocumentShouldHang;
- bool _connectionsClosed;
- HostAndPort _clientHostAndPort;
- };
-
-} // namespace repl
-} // namespace mongo
+class ReplicationCoordinatorExternalStateMock : public ReplicationCoordinatorExternalState {
+ MONGO_DISALLOW_COPYING(ReplicationCoordinatorExternalStateMock);
+
+public:
+ class GlobalSharedLockAcquirer;
+
+ ReplicationCoordinatorExternalStateMock();
+ virtual ~ReplicationCoordinatorExternalStateMock();
+ virtual void startThreads();
+ virtual void startMasterSlave(OperationContext*);
+ virtual void shutdown();
+ virtual void initiateOplog(OperationContext* txn);
+ virtual void forwardSlaveProgress();
+ virtual OID ensureMe(OperationContext*);
+ virtual bool isSelf(const HostAndPort& host);
+ virtual HostAndPort getClientHostAndPort(const OperationContext* txn);
+ virtual StatusWith<BSONObj> loadLocalConfigDocument(OperationContext* txn);
+ virtual Status storeLocalConfigDocument(OperationContext* txn, const BSONObj& config);
+ virtual StatusWith<LastVote> loadLocalLastVoteDocument(OperationContext* txn);
+ virtual Status storeLocalLastVoteDocument(OperationContext* txn, const LastVote& lastVote);
+ virtual void setGlobalTimestamp(const Timestamp& newTime);
+ virtual StatusWith<OpTime> loadLastOpTime(OperationContext* txn);
+ virtual void closeConnections();
+ virtual void killAllUserOperations(OperationContext* txn);
+ virtual void clearShardingState();
+ virtual void signalApplierToChooseNewSyncSource();
+ virtual OperationContext* createOperationContext(const std::string& threadName);
+ virtual void dropAllTempCollections(OperationContext* txn);
+
+ /**
+ * Adds "host" to the list of hosts that this mock will match when responding to "isSelf"
+ * messages.
+ */
+ void addSelf(const HostAndPort& host);
+
+ /**
+ * Sets the return value for subsequent calls to loadLocalConfigDocument().
+ */
+ void setLocalConfigDocument(const StatusWith<BSONObj>& localConfigDocument);
+
+ /**
+ * Sets the return value for subsequent calls to loadLocalLastVoteDocument().
+ */
+ void setLocalLastVoteDocument(const StatusWith<LastVote>& localLastVoteDocument);
+
+ /**
+ * Sets the return value for subsequent calls to getClientHostAndPort().
+ */
+ void setClientHostAndPort(const HostAndPort& clientHostAndPort);
+
+ /**
+ * Sets the return value for subsequent calls to loadLastOpTimeApplied.
+ */
+ void setLastOpTime(const StatusWith<OpTime>& lastApplied);
+
+ /**
+ * Sets the return value for subsequent calls to storeLocalConfigDocument().
+ * If "status" is Status::OK(), the subsequent calls will call the underlying funtion.
+ */
+ void setStoreLocalConfigDocumentStatus(Status status);
+
+ /**
+ * Sets whether or not subsequent calls to storeLocalConfigDocument() should hang
+ * indefinitely or not based on the value of "hang".
+ */
+ void setStoreLocalConfigDocumentToHang(bool hang);
+
+ /**
+ * Sets the return value for subsequent calls to storeLocalLastVoteDocument().
+ * If "status" is Status::OK(), the subsequent calls will call the underlying funtion.
+ */
+ void setStoreLocalLastVoteDocumentStatus(Status status);
+
+ /**
+ * Sets whether or not subsequent calls to storeLocalLastVoteDocument() should hang
+ * indefinitely or not based on the value of "hang".
+ */
+ void setStoreLocalLastVoteDocumentToHang(bool hang);
+
+private:
+ StatusWith<BSONObj> _localRsConfigDocument;
+ StatusWith<LastVote> _localRsLastVoteDocument;
+ StatusWith<OpTime> _lastOpTime;
+ std::vector<HostAndPort> _selfHosts;
+ bool _canAcquireGlobalSharedLock;
+ Status _storeLocalConfigDocumentStatus;
+ Status _storeLocalLastVoteDocumentStatus;
+ // mutex and cond var for controlling stroeLocalConfigDocument()'s hanging
+ stdx::mutex _shouldHangConfigMutex;
+ stdx::condition_variable _shouldHangConfigCondVar;
+ // mutex and cond var for controlling stroeLocalLastVoteDocument()'s hanging
+ stdx::mutex _shouldHangLastVoteMutex;
+ stdx::condition_variable _shouldHangLastVoteCondVar;
+ bool _storeLocalConfigDocumentShouldHang;
+ bool _storeLocalLastVoteDocumentShouldHang;
+ bool _connectionsClosed;
+ HostAndPort _clientHostAndPort;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_global.cpp b/src/mongo/db/repl/replication_coordinator_global.cpp
index e35ead35e96..c7a163218f7 100644
--- a/src/mongo/db/repl/replication_coordinator_global.cpp
+++ b/src/mongo/db/repl/replication_coordinator_global.cpp
@@ -34,16 +34,15 @@
namespace mongo {
namespace repl {
- ReplicationCoordinator* getGlobalReplicationCoordinator() {
- ReplicationCoordinator* globalReplCoordinator = ReplicationCoordinator::get(
- getGlobalServiceContext());
- return globalReplCoordinator;
- }
+ReplicationCoordinator* getGlobalReplicationCoordinator() {
+ ReplicationCoordinator* globalReplCoordinator =
+ ReplicationCoordinator::get(getGlobalServiceContext());
+ return globalReplCoordinator;
+}
- void setGlobalReplicationCoordinator(ReplicationCoordinator* coord) {
- repl::ReplicationCoordinator::set(getGlobalServiceContext(),
- std::move(
- std::unique_ptr<ReplicationCoordinator>(coord)));
- }
-} // namespace repl
-} // namespace mongo
+void setGlobalReplicationCoordinator(ReplicationCoordinator* coord) {
+ repl::ReplicationCoordinator::set(getGlobalServiceContext(),
+ std::move(std::unique_ptr<ReplicationCoordinator>(coord)));
+}
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_global.h b/src/mongo/db/repl/replication_coordinator_global.h
index c107959dbf6..a18033fd162 100644
--- a/src/mongo/db/repl/replication_coordinator_global.h
+++ b/src/mongo/db/repl/replication_coordinator_global.h
@@ -33,8 +33,8 @@
namespace mongo {
namespace repl {
- ReplicationCoordinator* getGlobalReplicationCoordinator();
- void setGlobalReplicationCoordinator(ReplicationCoordinator* coordinator);
+ReplicationCoordinator* getGlobalReplicationCoordinator();
+void setGlobalReplicationCoordinator(ReplicationCoordinator* coordinator);
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index bd1378699ad..e3ba34932de 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -77,716 +77,683 @@ namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterface;
+using executor::NetworkInterface;
- void lockAndCall(stdx::unique_lock<stdx::mutex>* lk, const stdx::function<void ()>& fn) {
- if (!lk->owns_lock()) {
- lk->lock();
- }
- fn();
+void lockAndCall(stdx::unique_lock<stdx::mutex>* lk, const stdx::function<void()>& fn) {
+ if (!lk->owns_lock()) {
+ lk->lock();
}
+ fn();
+}
- /**
- * Implements the force-reconfig behavior of incrementing config version by a large random
- * number.
- */
- BSONObj incrementConfigVersionByRandom(BSONObj config) {
- BSONObjBuilder builder;
- for (BSONObjIterator iter(config); iter.more(); iter.next()) {
- BSONElement elem = *iter;
- if (elem.fieldNameStringData() == ReplicaSetConfig::kVersionFieldName &&
- elem.isNumber()) {
-
- std::unique_ptr<SecureRandom> generator(SecureRandom::create());
- const int random = std::abs(static_cast<int>(generator->nextInt64()) % 100000);
- builder.appendIntOrLL(ReplicaSetConfig::kVersionFieldName,
- elem.numberLong() + 10000 + random);
- }
- else {
- builder.append(elem);
- }
- }
- return builder.obj();
- }
-
-} //namespace
-
- struct ReplicationCoordinatorImpl::WaiterInfo {
-
- /**
- * Constructor takes the list of waiters and enqueues itself on the list, removing itself
- * in the destructor.
- */
- WaiterInfo(std::vector<WaiterInfo*>* _list,
- unsigned int _opID,
- const OpTime* _opTime,
- const WriteConcernOptions* _writeConcern,
- stdx::condition_variable* _condVar) : list(_list),
- master(true),
- opID(_opID),
- opTime(_opTime),
- writeConcern(_writeConcern),
- condVar(_condVar) {
- list->push_back(this);
- }
-
- ~WaiterInfo() {
- list->erase(std::remove(list->begin(), list->end(), this), list->end());
- }
-
- std::vector<WaiterInfo*>* list;
- bool master; // Set to false to indicate that stepDown was called while waiting
- const unsigned int opID;
- const OpTime* opTime;
- const WriteConcernOptions* writeConcern;
- stdx::condition_variable* condVar;
- };
-
-namespace {
- ReplicationCoordinator::Mode getReplicationModeFromSettings(const ReplSettings& settings) {
- if (settings.usingReplSets()) {
- return ReplicationCoordinator::modeReplSet;
- }
- if (settings.master || settings.slave) {
- return ReplicationCoordinator::modeMasterSlave;
+/**
+ * Implements the force-reconfig behavior of incrementing config version by a large random
+ * number.
+ */
+BSONObj incrementConfigVersionByRandom(BSONObj config) {
+ BSONObjBuilder builder;
+ for (BSONObjIterator iter(config); iter.more(); iter.next()) {
+ BSONElement elem = *iter;
+ if (elem.fieldNameStringData() == ReplicaSetConfig::kVersionFieldName && elem.isNumber()) {
+ std::unique_ptr<SecureRandom> generator(SecureRandom::create());
+ const int random = std::abs(static_cast<int>(generator->nextInt64()) % 100000);
+ builder.appendIntOrLL(ReplicaSetConfig::kVersionFieldName,
+ elem.numberLong() + 10000 + random);
+ } else {
+ builder.append(elem);
}
- return ReplicationCoordinator::modeNone;
}
-} // namespace
-
- ReplicationCoordinatorImpl::ReplicationCoordinatorImpl(
- const ReplSettings& settings,
- ReplicationCoordinatorExternalState* externalState,
- TopologyCoordinator* topCoord,
- int64_t prngSeed,
- NetworkInterface* network,
- StorageInterface* storage,
- ReplicationExecutor* replExec) :
- _settings(settings),
- _replMode(getReplicationModeFromSettings(settings)),
- _topCoord(topCoord),
- _replExecutorIfOwned(replExec ? nullptr :
- new ReplicationExecutor(network,
- storage,
- prngSeed)),
- _replExecutor(replExec ? *replExec : *_replExecutorIfOwned),
- _externalState(externalState),
- _inShutdown(false),
- _memberState(MemberState::RS_STARTUP),
- _isWaitingForDrainToComplete(false),
- _rsConfigState(kConfigPreStart),
- _selfIndex(-1),
- _sleptLastElection(false),
- _canAcceptNonLocalWrites(!(settings.usingReplSets() || settings.slave)),
- _canServeNonLocalReads(0U),
- _dr(DataReplicatorOptions(), &_replExecutor, this) {
-
- if (!isReplEnabled()) {
- return;
- }
+ return builder.obj();
+}
- std::unique_ptr<SecureRandom> rbidGenerator(SecureRandom::create());
- _rbid = static_cast<int>(rbidGenerator->nextInt64());
- if (_rbid < 0) {
- // Ensure _rbid is always positive
- _rbid = -_rbid;
- }
+} // namespace
- // Make sure there is always an entry in _slaveInfo for ourself.
- SlaveInfo selfInfo;
- selfInfo.self = true;
- _slaveInfo.push_back(selfInfo);
- }
-
- ReplicationCoordinatorImpl::ReplicationCoordinatorImpl(
- const ReplSettings& settings,
- ReplicationCoordinatorExternalState* externalState,
- NetworkInterface* network,
- StorageInterface* storage,
- TopologyCoordinator* topCoord,
- int64_t prngSeed) : ReplicationCoordinatorImpl(settings,
- externalState,
- topCoord,
- prngSeed,
- network,
- storage,
- nullptr) { }
-
- ReplicationCoordinatorImpl::ReplicationCoordinatorImpl(
- const ReplSettings& settings,
- ReplicationCoordinatorExternalState* externalState,
- TopologyCoordinator* topCoord,
- ReplicationExecutor* replExec,
- int64_t prngSeed) : ReplicationCoordinatorImpl(settings,
- externalState,
- topCoord,
- prngSeed,
- nullptr,
- nullptr,
- replExec) { }
-
- ReplicationCoordinatorImpl::~ReplicationCoordinatorImpl() {}
-
- void ReplicationCoordinatorImpl::waitForStartUpComplete() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
- _rsConfigStateChange.wait(lk);
- }
- }
+struct ReplicationCoordinatorImpl::WaiterInfo {
+ /**
+ * Constructor takes the list of waiters and enqueues itself on the list, removing itself
+ * in the destructor.
+ */
+ WaiterInfo(std::vector<WaiterInfo*>* _list,
+ unsigned int _opID,
+ const OpTime* _opTime,
+ const WriteConcernOptions* _writeConcern,
+ stdx::condition_variable* _condVar)
+ : list(_list),
+ master(true),
+ opID(_opID),
+ opTime(_opTime),
+ writeConcern(_writeConcern),
+ condVar(_condVar) {
+ list->push_back(this);
+ }
+
+ ~WaiterInfo() {
+ list->erase(std::remove(list->begin(), list->end(), this), list->end());
+ }
+
+ std::vector<WaiterInfo*>* list;
+ bool master; // Set to false to indicate that stepDown was called while waiting
+ const unsigned int opID;
+ const OpTime* opTime;
+ const WriteConcernOptions* writeConcern;
+ stdx::condition_variable* condVar;
+};
- ReplicaSetConfig ReplicationCoordinatorImpl::getReplicaSetConfig_forTest() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _rsConfig;
+namespace {
+ReplicationCoordinator::Mode getReplicationModeFromSettings(const ReplSettings& settings) {
+ if (settings.usingReplSets()) {
+ return ReplicationCoordinator::modeReplSet;
}
-
- void ReplicationCoordinatorImpl::_updateLastVote(const LastVote& lastVote) {
- _topCoord->loadLastVote(lastVote);
+ if (settings.master || settings.slave) {
+ return ReplicationCoordinator::modeMasterSlave;
}
+ return ReplicationCoordinator::modeNone;
+}
+} // namespace
- bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* txn) {
-
- StatusWith<LastVote> lastVote = _externalState->loadLocalLastVoteDocument(txn);
- if (!lastVote.isOK()) {
- log() << "Did not find local voted for document at startup; " << lastVote.getStatus();
- }
- else {
- LastVote vote = lastVote.getValue();
- _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_updateLastVote,
- this,
- vote));
- }
-
- StatusWith<BSONObj> cfg = _externalState->loadLocalConfigDocument(txn);
- if (!cfg.isOK()) {
- log() << "Did not find local replica set configuration document at startup; " <<
- cfg.getStatus();
- return true;
- }
- ReplicaSetConfig localConfig;
- Status status = localConfig.initialize(cfg.getValue());
- if (!status.isOK()) {
- error() << "Locally stored replica set configuration does not parse; See "
- "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
- "for information on how to recover from this. Got \"" <<
- status << "\" while parsing " << cfg.getValue();
- fassertFailedNoTrace(28545);
- }
-
- StatusWith<OpTime> lastOpTimeStatus = _externalState->loadLastOpTime(txn);
-
- // Use a callback here, because _finishLoadLocalConfig calls isself() which requires
- // that the server's networking layer be up and running and accepting connections, which
- // doesn't happen until startReplication finishes.
+ReplicationCoordinatorImpl::ReplicationCoordinatorImpl(
+ const ReplSettings& settings,
+ ReplicationCoordinatorExternalState* externalState,
+ TopologyCoordinator* topCoord,
+ int64_t prngSeed,
+ NetworkInterface* network,
+ StorageInterface* storage,
+ ReplicationExecutor* replExec)
+ : _settings(settings),
+ _replMode(getReplicationModeFromSettings(settings)),
+ _topCoord(topCoord),
+ _replExecutorIfOwned(replExec ? nullptr
+ : new ReplicationExecutor(network, storage, prngSeed)),
+ _replExecutor(replExec ? *replExec : *_replExecutorIfOwned),
+ _externalState(externalState),
+ _inShutdown(false),
+ _memberState(MemberState::RS_STARTUP),
+ _isWaitingForDrainToComplete(false),
+ _rsConfigState(kConfigPreStart),
+ _selfIndex(-1),
+ _sleptLastElection(false),
+ _canAcceptNonLocalWrites(!(settings.usingReplSets() || settings.slave)),
+ _canServeNonLocalReads(0U),
+ _dr(DataReplicatorOptions(), &_replExecutor, this) {
+ if (!isReplEnabled()) {
+ return;
+ }
+
+ std::unique_ptr<SecureRandom> rbidGenerator(SecureRandom::create());
+ _rbid = static_cast<int>(rbidGenerator->nextInt64());
+ if (_rbid < 0) {
+ // Ensure _rbid is always positive
+ _rbid = -_rbid;
+ }
+
+ // Make sure there is always an entry in _slaveInfo for ourself.
+ SlaveInfo selfInfo;
+ selfInfo.self = true;
+ _slaveInfo.push_back(selfInfo);
+}
+
+ReplicationCoordinatorImpl::ReplicationCoordinatorImpl(
+ const ReplSettings& settings,
+ ReplicationCoordinatorExternalState* externalState,
+ NetworkInterface* network,
+ StorageInterface* storage,
+ TopologyCoordinator* topCoord,
+ int64_t prngSeed)
+ : ReplicationCoordinatorImpl(
+ settings, externalState, topCoord, prngSeed, network, storage, nullptr) {}
+
+ReplicationCoordinatorImpl::ReplicationCoordinatorImpl(
+ const ReplSettings& settings,
+ ReplicationCoordinatorExternalState* externalState,
+ TopologyCoordinator* topCoord,
+ ReplicationExecutor* replExec,
+ int64_t prngSeed)
+ : ReplicationCoordinatorImpl(
+ settings, externalState, topCoord, prngSeed, nullptr, nullptr, replExec) {}
+
+ReplicationCoordinatorImpl::~ReplicationCoordinatorImpl() {}
+
+void ReplicationCoordinatorImpl::waitForStartUpComplete() {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
+ _rsConfigStateChange.wait(lk);
+ }
+}
+
+ReplicaSetConfig ReplicationCoordinatorImpl::getReplicaSetConfig_forTest() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _rsConfig;
+}
+
+void ReplicationCoordinatorImpl::_updateLastVote(const LastVote& lastVote) {
+ _topCoord->loadLastVote(lastVote);
+}
+
+bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* txn) {
+ StatusWith<LastVote> lastVote = _externalState->loadLocalLastVoteDocument(txn);
+ if (!lastVote.isOK()) {
+ log() << "Did not find local voted for document at startup; " << lastVote.getStatus();
+ } else {
+ LastVote vote = lastVote.getValue();
_replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_finishLoadLocalConfig,
- this,
- stdx::placeholders::_1,
- localConfig,
- lastOpTimeStatus));
- return false;
+ stdx::bind(&ReplicationCoordinatorImpl::_updateLastVote, this, vote));
}
- void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicaSetConfig& localConfig,
- const StatusWith<OpTime>& lastOpTimeStatus) {
- if (!cbData.status.isOK()) {
- LOG(1) << "Loading local replica set configuration failed due to " << cbData.status;
- return;
- }
-
- StatusWith<int> myIndex = validateConfigForStartUp(_externalState.get(),
- _rsConfig,
- localConfig);
- if (!myIndex.isOK()) {
- if (myIndex.getStatus() == ErrorCodes::NodeNotFound ||
- myIndex.getStatus() == ErrorCodes::DuplicateKey) {
- warning() << "Locally stored replica set configuration does not have a valid entry "
- "for the current node; waiting for reconfig or remote heartbeat; Got \"" <<
- myIndex.getStatus() << "\" while validating " << localConfig.toBSON();
- myIndex = StatusWith<int>(-1);
- }
- else {
- error() << "Locally stored replica set configuration is invalid; See "
- "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config"
- " for information on how to recover from this. Got \"" <<
- myIndex.getStatus() << "\" while validating " << localConfig.toBSON();
- fassertFailedNoTrace(28544);
- }
- }
-
- if (localConfig.getReplSetName() != _settings.ourSetName()) {
- warning() << "Local replica set configuration document reports set name of " <<
- localConfig.getReplSetName() << ", but command line reports " <<
- _settings.ourSetName() << "; waitng for reconfig or remote heartbeat";
+ StatusWith<BSONObj> cfg = _externalState->loadLocalConfigDocument(txn);
+ if (!cfg.isOK()) {
+ log() << "Did not find local replica set configuration document at startup; "
+ << cfg.getStatus();
+ return true;
+ }
+ ReplicaSetConfig localConfig;
+ Status status = localConfig.initialize(cfg.getValue());
+ if (!status.isOK()) {
+ error() << "Locally stored replica set configuration does not parse; See "
+ "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
+ "for information on how to recover from this. Got \"" << status
+ << "\" while parsing " << cfg.getValue();
+ fassertFailedNoTrace(28545);
+ }
+
+ StatusWith<OpTime> lastOpTimeStatus = _externalState->loadLastOpTime(txn);
+
+ // Use a callback here, because _finishLoadLocalConfig calls isself() which requires
+ // that the server's networking layer be up and running and accepting connections, which
+ // doesn't happen until startReplication finishes.
+ _replExecutor.scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_finishLoadLocalConfig,
+ this,
+ stdx::placeholders::_1,
+ localConfig,
+ lastOpTimeStatus));
+ return false;
+}
+
+void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicaSetConfig& localConfig,
+ const StatusWith<OpTime>& lastOpTimeStatus) {
+ if (!cbData.status.isOK()) {
+ LOG(1) << "Loading local replica set configuration failed due to " << cbData.status;
+ return;
+ }
+
+ StatusWith<int> myIndex =
+ validateConfigForStartUp(_externalState.get(), _rsConfig, localConfig);
+ if (!myIndex.isOK()) {
+ if (myIndex.getStatus() == ErrorCodes::NodeNotFound ||
+ myIndex.getStatus() == ErrorCodes::DuplicateKey) {
+ warning() << "Locally stored replica set configuration does not have a valid entry "
+ "for the current node; waiting for reconfig or remote heartbeat; Got \""
+ << myIndex.getStatus() << "\" while validating " << localConfig.toBSON();
myIndex = StatusWith<int>(-1);
+ } else {
+ error() << "Locally stored replica set configuration is invalid; See "
+ "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config"
+ " for information on how to recover from this. Got \"" << myIndex.getStatus()
+ << "\" while validating " << localConfig.toBSON();
+ fassertFailedNoTrace(28544);
}
-
- // Do not check optime, if this node is an arbiter.
- bool isArbiter = myIndex.getValue() != -1 &&
- localConfig.getMemberAt(myIndex.getValue()).isArbiter();
- OpTime lastOpTime;
- if (!isArbiter) {
- if (!lastOpTimeStatus.isOK()) {
- warning() << "Failed to load timestamp of most recently applied operation; " <<
- lastOpTimeStatus.getStatus();
- }
- else {
- lastOpTime = lastOpTimeStatus.getValue();
- }
- }
-
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- invariant(_rsConfigState == kConfigStartingUp);
- const PostMemberStateUpdateAction action =
- _setCurrentRSConfig_inlock(localConfig, myIndex.getValue());
- _setMyLastOptime_inlock(&lk, lastOpTime, false);
- _externalState->setGlobalTimestamp(lastOpTime.getTimestamp());
- if (lk.owns_lock()) {
- lk.unlock();
- }
- _performPostMemberStateUpdateAction(action);
- _externalState->startThreads();
}
- void ReplicationCoordinatorImpl::startReplication(OperationContext* txn) {
- if (!isReplEnabled()) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- _setConfigState_inlock(kConfigReplicationDisabled);
- return;
- }
-
- {
- OID rid = _externalState->ensureMe(txn);
-
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- fassert(18822, !_inShutdown);
- _setConfigState_inlock(kConfigStartingUp);
- _myRID = rid;
- _slaveInfo[_getMyIndexInSlaveInfo_inlock()].rid = rid;
- }
-
- if (!_settings.usingReplSets()) {
- // Must be Master/Slave
- invariant(_settings.master || _settings.slave);
- _externalState->startMasterSlave(txn);
- return;
- }
-
- _topCoordDriverThread.reset(new stdx::thread(stdx::bind(&ReplicationExecutor::run,
- &_replExecutor)));
+ if (localConfig.getReplSetName() != _settings.ourSetName()) {
+ warning() << "Local replica set configuration document reports set name of "
+ << localConfig.getReplSetName() << ", but command line reports "
+ << _settings.ourSetName() << "; waitng for reconfig or remote heartbeat";
+ myIndex = StatusWith<int>(-1);
+ }
- bool doneLoadingConfig = _startLoadLocalConfig(txn);
- if (doneLoadingConfig) {
- // If we're not done loading the config, then the config state will be set by
- // _finishLoadLocalConfig.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- invariant(!_rsConfig.isInitialized());
- _setConfigState_inlock(kConfigUninitialized);
+ // Do not check optime, if this node is an arbiter.
+ bool isArbiter =
+ myIndex.getValue() != -1 && localConfig.getMemberAt(myIndex.getValue()).isArbiter();
+ OpTime lastOpTime;
+ if (!isArbiter) {
+ if (!lastOpTimeStatus.isOK()) {
+ warning() << "Failed to load timestamp of most recently applied operation; "
+ << lastOpTimeStatus.getStatus();
+ } else {
+ lastOpTime = lastOpTimeStatus.getValue();
}
}
- void ReplicationCoordinatorImpl::shutdown() {
- // Shutdown must:
- // * prevent new threads from blocking in awaitReplication
- // * wake up all existing threads blocking in awaitReplication
- // * tell the ReplicationExecutor to shut down
- // * wait for the thread running the ReplicationExecutor to finish
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ invariant(_rsConfigState == kConfigStartingUp);
+ const PostMemberStateUpdateAction action =
+ _setCurrentRSConfig_inlock(localConfig, myIndex.getValue());
+ _setMyLastOptime_inlock(&lk, lastOpTime, false);
+ _externalState->setGlobalTimestamp(lastOpTime.getTimestamp());
+ if (lk.owns_lock()) {
+ lk.unlock();
+ }
+ _performPostMemberStateUpdateAction(action);
+ _externalState->startThreads();
+}
- if (!_settings.usingReplSets()) {
- return;
- }
+void ReplicationCoordinatorImpl::startReplication(OperationContext* txn) {
+ if (!isReplEnabled()) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _setConfigState_inlock(kConfigReplicationDisabled);
+ return;
+ }
- {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- fassert(28533, !_inShutdown);
- _inShutdown = true;
- if (_rsConfigState == kConfigPreStart) {
- warning() << "ReplicationCoordinatorImpl::shutdown() called before "
- "startReplication() finished. Shutting down without cleaning up the "
- "replication system";
- return;
- }
- fassert(18823, _rsConfigState != kConfigStartingUp);
- for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
- it != _replicationWaiterList.end(); ++it) {
- WaiterInfo* waiter = *it;
- waiter->condVar->notify_all();
- }
- }
+ {
+ OID rid = _externalState->ensureMe(txn);
- _replExecutor.shutdown();
- _topCoordDriverThread->join(); // must happen outside _mutex
- _externalState->shutdown();
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ fassert(18822, !_inShutdown);
+ _setConfigState_inlock(kConfigStartingUp);
+ _myRID = rid;
+ _slaveInfo[_getMyIndexInSlaveInfo_inlock()].rid = rid;
}
- const ReplSettings& ReplicationCoordinatorImpl::getSettings() const {
- return _settings;
+ if (!_settings.usingReplSets()) {
+ // Must be Master/Slave
+ invariant(_settings.master || _settings.slave);
+ _externalState->startMasterSlave(txn);
+ return;
}
- ReplicationCoordinator::Mode ReplicationCoordinatorImpl::getReplicationMode() const {
- return _replMode;
- }
+ _topCoordDriverThread.reset(
+ new stdx::thread(stdx::bind(&ReplicationExecutor::run, &_replExecutor)));
- MemberState ReplicationCoordinatorImpl::getMemberState() const {
+ bool doneLoadingConfig = _startLoadLocalConfig(txn);
+ if (doneLoadingConfig) {
+ // If we're not done loading the config, then the config state will be set by
+ // _finishLoadLocalConfig.
stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _getMemberState_inlock();
+ invariant(!_rsConfig.isInitialized());
+ _setConfigState_inlock(kConfigUninitialized);
}
+}
- MemberState ReplicationCoordinatorImpl::_getMemberState_inlock() const {
- return _memberState;
+void ReplicationCoordinatorImpl::shutdown() {
+ // Shutdown must:
+ // * prevent new threads from blocking in awaitReplication
+ // * wake up all existing threads blocking in awaitReplication
+ // * tell the ReplicationExecutor to shut down
+ // * wait for the thread running the ReplicationExecutor to finish
+
+ if (!_settings.usingReplSets()) {
+ return;
}
- Seconds ReplicationCoordinatorImpl::getSlaveDelaySecs() const {
+ {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- invariant(_rsConfig.isInitialized());
- uassert(28524,
- "Node not a member of the current set configuration",
- _selfIndex != -1);
- return _rsConfig.getMemberAt(_selfIndex).getSlaveDelay();
- }
-
- void ReplicationCoordinatorImpl::clearSyncSourceBlacklist() {
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_clearSyncSourceBlacklist_finish,
- this,
- stdx::placeholders::_1));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ fassert(28533, !_inShutdown);
+ _inShutdown = true;
+ if (_rsConfigState == kConfigPreStart) {
+ warning() << "ReplicationCoordinatorImpl::shutdown() called before "
+ "startReplication() finished. Shutting down without cleaning up the "
+ "replication system";
return;
}
- fassert(18907, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
+ fassert(18823, _rsConfigState != kConfigStartingUp);
+ for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
+ it != _replicationWaiterList.end();
+ ++it) {
+ WaiterInfo* waiter = *it;
+ waiter->condVar->notify_all();
+ }
+ }
+
+ _replExecutor.shutdown();
+ _topCoordDriverThread->join(); // must happen outside _mutex
+ _externalState->shutdown();
+}
+
+const ReplSettings& ReplicationCoordinatorImpl::getSettings() const {
+ return _settings;
+}
+
+ReplicationCoordinator::Mode ReplicationCoordinatorImpl::getReplicationMode() const {
+ return _replMode;
+}
+
+MemberState ReplicationCoordinatorImpl::getMemberState() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _getMemberState_inlock();
+}
+
+MemberState ReplicationCoordinatorImpl::_getMemberState_inlock() const {
+ return _memberState;
+}
+
+Seconds ReplicationCoordinatorImpl::getSlaveDelaySecs() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ invariant(_rsConfig.isInitialized());
+ uassert(28524, "Node not a member of the current set configuration", _selfIndex != -1);
+ return _rsConfig.getMemberAt(_selfIndex).getSlaveDelay();
+}
+
+void ReplicationCoordinatorImpl::clearSyncSourceBlacklist() {
+ CBHStatus cbh = _replExecutor.scheduleWork(
+ stdx::bind(&ReplicationCoordinatorImpl::_clearSyncSourceBlacklist_finish,
+ this,
+ stdx::placeholders::_1));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
+ }
+ fassert(18907, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+}
+
+void ReplicationCoordinatorImpl::_clearSyncSourceBlacklist_finish(
+ const ReplicationExecutor::CallbackArgs& cbData) {
+ if (cbData.status == ErrorCodes::CallbackCanceled)
+ return;
+ _topCoord->clearSyncSourceBlacklist();
+}
+
+bool ReplicationCoordinatorImpl::setFollowerMode(const MemberState& newState) {
+ StatusWith<ReplicationExecutor::EventHandle> finishedSettingFollowerMode =
+ _replExecutor.makeEvent();
+ if (finishedSettingFollowerMode.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return false;
}
-
- void ReplicationCoordinatorImpl::_clearSyncSourceBlacklist_finish(
- const ReplicationExecutor::CallbackArgs& cbData) {
- if (cbData.status == ErrorCodes::CallbackCanceled)
- return;
- _topCoord->clearSyncSourceBlacklist();
+ fassert(18812, finishedSettingFollowerMode.getStatus());
+ bool success = false;
+ CBHStatus cbh =
+ _replExecutor.scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_setFollowerModeFinish,
+ this,
+ stdx::placeholders::_1,
+ newState,
+ finishedSettingFollowerMode.getValue(),
+ &success));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return false;
}
+ fassert(18699, cbh.getStatus());
+ _replExecutor.waitForEvent(finishedSettingFollowerMode.getValue());
+ return success;
+}
- bool ReplicationCoordinatorImpl::setFollowerMode(const MemberState& newState) {
- StatusWith<ReplicationExecutor::EventHandle> finishedSettingFollowerMode =
- _replExecutor.makeEvent();
- if (finishedSettingFollowerMode.getStatus() == ErrorCodes::ShutdownInProgress) {
- return false;
- }
- fassert(18812, finishedSettingFollowerMode.getStatus());
- bool success = false;
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_setFollowerModeFinish,
- this,
- stdx::placeholders::_1,
- newState,
- finishedSettingFollowerMode.getValue(),
- &success));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return false;
- }
- fassert(18699, cbh.getStatus());
- _replExecutor.waitForEvent(finishedSettingFollowerMode.getValue());
- return success;
+void ReplicationCoordinatorImpl::_setFollowerModeFinish(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const MemberState& newState,
+ const ReplicationExecutor::EventHandle& finishedSettingFollowerMode,
+ bool* success) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ return;
}
-
- void ReplicationCoordinatorImpl::_setFollowerModeFinish(
- const ReplicationExecutor::CallbackArgs& cbData,
- const MemberState& newState,
- const ReplicationExecutor::EventHandle& finishedSettingFollowerMode,
- bool* success) {
-
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- return;
- }
- if (newState == _topCoord->getMemberState()) {
- *success = true;
- _replExecutor.signalEvent(finishedSettingFollowerMode);
- return;
- }
- if (_topCoord->getRole() == TopologyCoordinator::Role::leader) {
- *success = false;
- _replExecutor.signalEvent(finishedSettingFollowerMode);
- return;
- }
-
- if (_topCoord->getRole() == TopologyCoordinator::Role::candidate) {
- // We are a candidate, which means _topCoord believs us to be in state RS_SECONDARY, and
- // we know that newState != RS_SECONDARY because we would have returned early, above if
- // the old and new state were equal. So, cancel the running election and try again to
- // finish setting the follower mode.
- invariant(_freshnessChecker);
- _freshnessChecker->cancel(&_replExecutor);
- if (_electCmdRunner) {
- _electCmdRunner->cancel(&_replExecutor);
- }
- _replExecutor.onEvent(
- _electionFinishedEvent,
- stdx::bind(&ReplicationCoordinatorImpl::_setFollowerModeFinish,
- this,
- stdx::placeholders::_1,
- newState,
- finishedSettingFollowerMode,
- success));
- return;
- }
-
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- _topCoord->setFollowerMode(newState.s);
-
- const PostMemberStateUpdateAction action =
- _updateMemberStateFromTopologyCoordinator_inlock();
- lk.unlock();
- _performPostMemberStateUpdateAction(action);
+ if (newState == _topCoord->getMemberState()) {
*success = true;
_replExecutor.signalEvent(finishedSettingFollowerMode);
+ return;
}
-
- bool ReplicationCoordinatorImpl::isWaitingForApplierToDrain() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _isWaitingForDrainToComplete;
- }
-
- void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* txn) {
- // This logic is a little complicated in order to avoid acquiring the global exclusive lock
- // unnecessarily. This is important because the applier may call signalDrainComplete()
- // whenever it wants, not only when the ReplicationCoordinator is expecting it.
- //
- // The steps are:
- // 1.) Check to see if we're waiting for this signal. If not, return early.
- // 2.) Otherwise, release the mutex while acquiring the global exclusive lock,
- // since that might take a while (NB there's a deadlock cycle otherwise, too).
- // 3.) Re-check to see if we've somehow left drain mode. If we have not, clear
- // _isWaitingForDrainToComplete, set the flag allowing non-local database writes and
- // drop the mutex. At this point, no writes can occur from other threads, due to the
- // global exclusive lock.
- // 4.) Drop all temp collections.
- // 5.) Drop the global exclusive lock.
- //
- // Because replicatable writes are forbidden while in drain mode, and we don't exit drain
- // mode until we have the global exclusive lock, which forbids all other threads from making
- // writes, we know that from the time that _isWaitingForDrainToComplete is set in
- // _performPostMemberStateUpdateAction(kActionWinElection) until this method returns, no
- // external writes will be processed. This is important so that a new temp collection isn't
- // introduced on the new primary before we drop all the temp collections.
-
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- if (!_isWaitingForDrainToComplete) {
- return;
- }
- lk.unlock();
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
- lk.lock();
- if (!_isWaitingForDrainToComplete) {
- return;
+ if (_topCoord->getRole() == TopologyCoordinator::Role::leader) {
+ *success = false;
+ _replExecutor.signalEvent(finishedSettingFollowerMode);
+ return;
+ }
+
+ if (_topCoord->getRole() == TopologyCoordinator::Role::candidate) {
+ // We are a candidate, which means _topCoord believs us to be in state RS_SECONDARY, and
+ // we know that newState != RS_SECONDARY because we would have returned early, above if
+ // the old and new state were equal. So, cancel the running election and try again to
+ // finish setting the follower mode.
+ invariant(_freshnessChecker);
+ _freshnessChecker->cancel(&_replExecutor);
+ if (_electCmdRunner) {
+ _electCmdRunner->cancel(&_replExecutor);
+ }
+ _replExecutor.onEvent(_electionFinishedEvent,
+ stdx::bind(&ReplicationCoordinatorImpl::_setFollowerModeFinish,
+ this,
+ stdx::placeholders::_1,
+ newState,
+ finishedSettingFollowerMode,
+ success));
+ return;
+ }
+
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ _topCoord->setFollowerMode(newState.s);
+
+ const PostMemberStateUpdateAction action = _updateMemberStateFromTopologyCoordinator_inlock();
+ lk.unlock();
+ _performPostMemberStateUpdateAction(action);
+ *success = true;
+ _replExecutor.signalEvent(finishedSettingFollowerMode);
+}
+
+bool ReplicationCoordinatorImpl::isWaitingForApplierToDrain() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _isWaitingForDrainToComplete;
+}
+
+void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* txn) {
+ // This logic is a little complicated in order to avoid acquiring the global exclusive lock
+ // unnecessarily. This is important because the applier may call signalDrainComplete()
+ // whenever it wants, not only when the ReplicationCoordinator is expecting it.
+ //
+ // The steps are:
+ // 1.) Check to see if we're waiting for this signal. If not, return early.
+ // 2.) Otherwise, release the mutex while acquiring the global exclusive lock,
+ // since that might take a while (NB there's a deadlock cycle otherwise, too).
+ // 3.) Re-check to see if we've somehow left drain mode. If we have not, clear
+ // _isWaitingForDrainToComplete, set the flag allowing non-local database writes and
+ // drop the mutex. At this point, no writes can occur from other threads, due to the
+ // global exclusive lock.
+ // 4.) Drop all temp collections.
+ // 5.) Drop the global exclusive lock.
+ //
+ // Because replicatable writes are forbidden while in drain mode, and we don't exit drain
+ // mode until we have the global exclusive lock, which forbids all other threads from making
+ // writes, we know that from the time that _isWaitingForDrainToComplete is set in
+ // _performPostMemberStateUpdateAction(kActionWinElection) until this method returns, no
+ // external writes will be processed. This is important so that a new temp collection isn't
+ // introduced on the new primary before we drop all the temp collections.
+
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ if (!_isWaitingForDrainToComplete) {
+ return;
+ }
+ lk.unlock();
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite globalWriteLock(txn->lockState());
+ lk.lock();
+ if (!_isWaitingForDrainToComplete) {
+ return;
+ }
+ _isWaitingForDrainToComplete = false;
+ _canAcceptNonLocalWrites = true;
+ lk.unlock();
+ _externalState->dropAllTempCollections(txn);
+ log() << "transition to primary complete; database writes are now permitted" << rsLog;
+}
+
+void ReplicationCoordinatorImpl::signalUpstreamUpdater() {
+ _externalState->forwardSlaveProgress();
+}
+
+ReplicationCoordinatorImpl::SlaveInfo* ReplicationCoordinatorImpl::_findSlaveInfoByMemberID_inlock(
+ int memberId) {
+ for (SlaveInfoVector::iterator it = _slaveInfo.begin(); it != _slaveInfo.end(); ++it) {
+ if (it->memberId == memberId) {
+ return &(*it);
+ }
+ }
+ return NULL;
+}
+
+ReplicationCoordinatorImpl::SlaveInfo* ReplicationCoordinatorImpl::_findSlaveInfoByRID_inlock(
+ const OID& rid) {
+ for (SlaveInfoVector::iterator it = _slaveInfo.begin(); it != _slaveInfo.end(); ++it) {
+ if (it->rid == rid) {
+ return &(*it);
+ }
+ }
+ return NULL;
+}
+
+void ReplicationCoordinatorImpl::_addSlaveInfo_inlock(const SlaveInfo& slaveInfo) {
+ invariant(getReplicationMode() == modeMasterSlave);
+ _slaveInfo.push_back(slaveInfo);
+
+ // Wake up any threads waiting for replication that now have their replication
+ // check satisfied
+ _wakeReadyWaiters_inlock();
+}
+
+void ReplicationCoordinatorImpl::_updateSlaveInfoOptime_inlock(SlaveInfo* slaveInfo,
+ const OpTime& opTime) {
+ slaveInfo->opTime = opTime;
+
+ // Wake up any threads waiting for replication that now have their replication
+ // check satisfied
+ _wakeReadyWaiters_inlock();
+}
+
+void ReplicationCoordinatorImpl::_updateSlaveInfoFromConfig_inlock() {
+ invariant(_settings.usingReplSets());
+
+ SlaveInfoVector oldSlaveInfos;
+ _slaveInfo.swap(oldSlaveInfos);
+
+ if (_selfIndex == -1) {
+ // If we aren't in the config then the only data we care about is for ourself
+ for (SlaveInfoVector::const_iterator it = oldSlaveInfos.begin(); it != oldSlaveInfos.end();
+ ++it) {
+ if (it->self) {
+ SlaveInfo slaveInfo = *it;
+ slaveInfo.memberId = -1;
+ _slaveInfo.push_back(slaveInfo);
+ return;
+ }
}
- _isWaitingForDrainToComplete = false;
- _canAcceptNonLocalWrites = true;
- lk.unlock();
- _externalState->dropAllTempCollections(txn);
- log() << "transition to primary complete; database writes are now permitted" << rsLog;
+ invariant(false); // There should always have been an entry for ourself
}
- void ReplicationCoordinatorImpl::signalUpstreamUpdater() {
- _externalState->forwardSlaveProgress();
- }
+ for (int i = 0; i < _rsConfig.getNumMembers(); ++i) {
+ const MemberConfig& memberConfig = _rsConfig.getMemberAt(i);
+ int memberId = memberConfig.getId();
+ const HostAndPort& memberHostAndPort = memberConfig.getHostAndPort();
- ReplicationCoordinatorImpl::SlaveInfo*
- ReplicationCoordinatorImpl::_findSlaveInfoByMemberID_inlock(int memberId) {
- for (SlaveInfoVector::iterator it = _slaveInfo.begin(); it != _slaveInfo.end(); ++it) {
- if (it->memberId == memberId) {
- return &(*it);
- }
- }
- return NULL;
- }
+ SlaveInfo slaveInfo;
- ReplicationCoordinatorImpl::SlaveInfo*
- ReplicationCoordinatorImpl::_findSlaveInfoByRID_inlock(const OID& rid) {
- for (SlaveInfoVector::iterator it = _slaveInfo.begin(); it != _slaveInfo.end(); ++it) {
- if (it->rid == rid) {
- return &(*it);
+ // Check if the node existed with the same member ID and hostname in the old data
+ for (SlaveInfoVector::const_iterator it = oldSlaveInfos.begin(); it != oldSlaveInfos.end();
+ ++it) {
+ if ((it->memberId == memberId && it->hostAndPort == memberHostAndPort) ||
+ (i == _selfIndex && it->self)) {
+ slaveInfo = *it;
}
}
- return NULL;
- }
- void ReplicationCoordinatorImpl::_addSlaveInfo_inlock(const SlaveInfo& slaveInfo) {
- invariant(getReplicationMode() == modeMasterSlave);
+ // Make sure you have the most up-to-date info for member ID and hostAndPort.
+ slaveInfo.memberId = memberId;
+ slaveInfo.hostAndPort = memberHostAndPort;
_slaveInfo.push_back(slaveInfo);
-
- // Wake up any threads waiting for replication that now have their replication
- // check satisfied
- _wakeReadyWaiters_inlock();
}
+ invariant(static_cast<int>(_slaveInfo.size()) == _rsConfig.getNumMembers());
+}
- void ReplicationCoordinatorImpl::_updateSlaveInfoOptime_inlock(SlaveInfo* slaveInfo,
- const OpTime& opTime) {
-
- slaveInfo->opTime = opTime;
-
- // Wake up any threads waiting for replication that now have their replication
- // check satisfied
- _wakeReadyWaiters_inlock();
- }
-
- void ReplicationCoordinatorImpl::_updateSlaveInfoFromConfig_inlock() {
+size_t ReplicationCoordinatorImpl::_getMyIndexInSlaveInfo_inlock() const {
+ if (getReplicationMode() == modeMasterSlave) {
+ // Self data always lives in the first entry in _slaveInfo for master/slave
+ return 0;
+ } else {
invariant(_settings.usingReplSets());
-
- SlaveInfoVector oldSlaveInfos;
- _slaveInfo.swap(oldSlaveInfos);
-
if (_selfIndex == -1) {
- // If we aren't in the config then the only data we care about is for ourself
- for (SlaveInfoVector::const_iterator it = oldSlaveInfos.begin();
- it != oldSlaveInfos.end(); ++it) {
- if (it->self) {
- SlaveInfo slaveInfo = *it;
- slaveInfo.memberId = -1;
- _slaveInfo.push_back(slaveInfo);
- return;
- }
- }
- invariant(false); // There should always have been an entry for ourself
- }
-
- for (int i = 0; i < _rsConfig.getNumMembers(); ++i) {
- const MemberConfig& memberConfig = _rsConfig.getMemberAt(i);
- int memberId = memberConfig.getId();
- const HostAndPort& memberHostAndPort = memberConfig.getHostAndPort();
-
- SlaveInfo slaveInfo;
-
- // Check if the node existed with the same member ID and hostname in the old data
- for (SlaveInfoVector::const_iterator it = oldSlaveInfos.begin();
- it != oldSlaveInfos.end(); ++it) {
- if ((it->memberId == memberId && it->hostAndPort == memberHostAndPort)
- || (i == _selfIndex && it->self)) {
- slaveInfo = *it;
- }
- }
-
- // Make sure you have the most up-to-date info for member ID and hostAndPort.
- slaveInfo.memberId = memberId;
- slaveInfo.hostAndPort = memberHostAndPort;
- _slaveInfo.push_back(slaveInfo);
- }
- invariant(static_cast<int>(_slaveInfo.size()) == _rsConfig.getNumMembers());
- }
-
- size_t ReplicationCoordinatorImpl::_getMyIndexInSlaveInfo_inlock() const {
- if (getReplicationMode() == modeMasterSlave) {
- // Self data always lives in the first entry in _slaveInfo for master/slave
+ invariant(_slaveInfo.size() == 1);
return 0;
- }
- else {
- invariant(_settings.usingReplSets());
- if (_selfIndex == -1) {
- invariant(_slaveInfo.size() == 1);
- return 0;
- }
- else {
- return _selfIndex;
- }
+ } else {
+ return _selfIndex;
}
}
+}
- Status ReplicationCoordinatorImpl::setLastOptimeForSlave(const OID& rid,
- const Timestamp& ts) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- massert(28576,
- "Received an old style replication progress update, which is only used for Master/"
- "Slave replication now, but this node is not using Master/Slave replication. "
- "This is likely caused by an old (pre-2.6) member syncing from this node.",
- getReplicationMode() == modeMasterSlave);
-
- // Term == 0 for master-slave
- OpTime opTime(ts, OpTime::kDefaultTerm);
- SlaveInfo* slaveInfo = _findSlaveInfoByRID_inlock(rid);
- if (slaveInfo) {
- if (slaveInfo->opTime < opTime) {
- _updateSlaveInfoOptime_inlock(slaveInfo, opTime);
- }
- }
- else {
- SlaveInfo newSlaveInfo;
- newSlaveInfo.rid = rid;
- newSlaveInfo.opTime = opTime;
- _addSlaveInfo_inlock(newSlaveInfo);
- }
- return Status::OK();
- }
+Status ReplicationCoordinatorImpl::setLastOptimeForSlave(const OID& rid, const Timestamp& ts) {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ massert(28576,
+ "Received an old style replication progress update, which is only used for Master/"
+ "Slave replication now, but this node is not using Master/Slave replication. "
+ "This is likely caused by an old (pre-2.6) member syncing from this node.",
+ getReplicationMode() == modeMasterSlave);
- void ReplicationCoordinatorImpl::setMyHeartbeatMessage(const std::string& msg) {
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&TopologyCoordinator::setMyHeartbeatMessage,
- _topCoord.get(),
- _replExecutor.now(),
- msg));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return;
+ // Term == 0 for master-slave
+ OpTime opTime(ts, OpTime::kDefaultTerm);
+ SlaveInfo* slaveInfo = _findSlaveInfoByRID_inlock(rid);
+ if (slaveInfo) {
+ if (slaveInfo->opTime < opTime) {
+ _updateSlaveInfoOptime_inlock(slaveInfo, opTime);
+ }
+ } else {
+ SlaveInfo newSlaveInfo;
+ newSlaveInfo.rid = rid;
+ newSlaveInfo.opTime = opTime;
+ _addSlaveInfo_inlock(newSlaveInfo);
+ }
+ return Status::OK();
+}
+
+void ReplicationCoordinatorImpl::setMyHeartbeatMessage(const std::string& msg) {
+ CBHStatus cbh = _replExecutor.scheduleWork(stdx::bind(
+ &TopologyCoordinator::setMyHeartbeatMessage, _topCoord.get(), _replExecutor.now(), msg));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
+ }
+ fassert(28540, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+}
+
+void ReplicationCoordinatorImpl::setMyLastOptime(const OpTime& opTime) {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ _setMyLastOptime_inlock(&lock, opTime, false);
+}
+
+void ReplicationCoordinatorImpl::resetMyLastOptime() {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ // Reset to uninitialized OpTime
+ _setMyLastOptime_inlock(&lock, OpTime(), true);
+}
+
+void ReplicationCoordinatorImpl::_setMyLastOptime_inlock(stdx::unique_lock<stdx::mutex>* lock,
+ const OpTime& opTime,
+ bool isRollbackAllowed) {
+ invariant(lock->owns_lock());
+ SlaveInfo* mySlaveInfo = &_slaveInfo[_getMyIndexInSlaveInfo_inlock()];
+ invariant(isRollbackAllowed || mySlaveInfo->opTime <= opTime);
+ _updateSlaveInfoOptime_inlock(mySlaveInfo, opTime);
+
+ if (getReplicationMode() != modeReplSet) {
+ return;
+ }
+
+ for (auto& opTimeWaiter : _opTimeWaiterList) {
+ if (*(opTimeWaiter->opTime) <= opTime) {
+ opTimeWaiter->condVar->notify_all();
}
- fassert(28540, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- }
-
- void ReplicationCoordinatorImpl::setMyLastOptime(const OpTime& opTime) {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- _setMyLastOptime_inlock(&lock, opTime, false);
}
- void ReplicationCoordinatorImpl::resetMyLastOptime() {
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- // Reset to uninitialized OpTime
- _setMyLastOptime_inlock(&lock, OpTime(), true);
+ if (_getMemberState_inlock().primary()) {
+ return;
}
- void ReplicationCoordinatorImpl::_setMyLastOptime_inlock(
- stdx::unique_lock<stdx::mutex>* lock, const OpTime& opTime, bool isRollbackAllowed) {
- invariant(lock->owns_lock());
- SlaveInfo* mySlaveInfo = &_slaveInfo[_getMyIndexInSlaveInfo_inlock()];
- invariant(isRollbackAllowed || mySlaveInfo->opTime <= opTime);
- _updateSlaveInfoOptime_inlock(mySlaveInfo, opTime);
-
- if (getReplicationMode() != modeReplSet) {
- return;
- }
+ lock->unlock();
- for (auto& opTimeWaiter : _opTimeWaiterList) {
- if (*(opTimeWaiter->opTime) <= opTime) {
- opTimeWaiter->condVar->notify_all();
- }
- }
+ _externalState->forwardSlaveProgress(); // Must do this outside _mutex
+}
- if (_getMemberState_inlock().primary()) {
- return;
- }
+OpTime ReplicationCoordinatorImpl::getMyLastOptime() const {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ return _getMyLastOptime_inlock();
+}
- lock->unlock();
+ReadAfterOpTimeResponse ReplicationCoordinatorImpl::waitUntilOpTime(
+ OperationContext* txn, const ReadAfterOpTimeArgs& settings) {
+ const auto& ts = settings.getOpTime();
- _externalState->forwardSlaveProgress(); // Must do this outside _mutex
+ if (ts.isNull()) {
+ return ReadAfterOpTimeResponse();
}
- OpTime ReplicationCoordinatorImpl::getMyLastOptime() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- return _getMyLastOptime_inlock();
+ if (getReplicationMode() != repl::ReplicationCoordinator::modeReplSet) {
+ return ReadAfterOpTimeResponse(
+ Status(ErrorCodes::NotAReplicaSet,
+ "node needs to be a replica set member to use read after opTime"));
}
- ReadAfterOpTimeResponse ReplicationCoordinatorImpl::waitUntilOpTime(
- OperationContext* txn,
- const ReadAfterOpTimeArgs& settings) {
- const auto& ts = settings.getOpTime();
-
- if (ts.isNull()) {
- return ReadAfterOpTimeResponse();
- }
-
- if (getReplicationMode() != repl::ReplicationCoordinator::modeReplSet) {
- return ReadAfterOpTimeResponse(Status(ErrorCodes::NotAReplicaSet,
- "node needs to be a replica set member to use read after opTime"));
- }
-
- // TODO: SERVER-18298 enable code once V1 protocol is fully implemented.
+// TODO: SERVER-18298 enable code once V1 protocol is fully implemented.
#if 0
if (!isV1ElectionProtocol()) {
return ReadAfterOpTimeResponse(Status(ErrorCodes::IncompatibleElectionProtocol,
@@ -795,1013 +762,976 @@ namespace {
}
#endif
- Timer timer;
- stdx::unique_lock<stdx::mutex> lock(_mutex);
-
- while (ts > _getMyLastOptime_inlock()) {
- Status interruptedStatus = txn->checkForInterruptNoAssert();
- if (!interruptedStatus.isOK()) {
- return ReadAfterOpTimeResponse(interruptedStatus, Milliseconds(timer.millis()));
- }
-
- if (_inShutdown) {
- return ReadAfterOpTimeResponse(
- Status(ErrorCodes::ShutdownInProgress, "shutting down"),
- Milliseconds(timer.millis()));
- }
-
- stdx::condition_variable condVar;
- WaiterInfo waitInfo(&_opTimeWaiterList,
- txn->getOpID(),
- &ts,
- nullptr, // Don't care about write concern.
- &condVar);
+ Timer timer;
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
- if (CurOp::get(txn)->isMaxTimeSet()) {
- condVar.wait_for(lock, Microseconds(txn->getRemainingMaxTimeMicros()));
- }
- else {
- condVar.wait(lock);
- }
+ while (ts > _getMyLastOptime_inlock()) {
+ Status interruptedStatus = txn->checkForInterruptNoAssert();
+ if (!interruptedStatus.isOK()) {
+ return ReadAfterOpTimeResponse(interruptedStatus, Milliseconds(timer.millis()));
}
- return ReadAfterOpTimeResponse(Status::OK(), Milliseconds(timer.millis()));
- }
+ if (_inShutdown) {
+ return ReadAfterOpTimeResponse(Status(ErrorCodes::ShutdownInProgress, "shutting down"),
+ Milliseconds(timer.millis()));
+ }
- OpTime ReplicationCoordinatorImpl::_getMyLastOptime_inlock() const {
- return _slaveInfo[_getMyIndexInSlaveInfo_inlock()].opTime;
+ stdx::condition_variable condVar;
+ WaiterInfo waitInfo(&_opTimeWaiterList,
+ txn->getOpID(),
+ &ts,
+ nullptr, // Don't care about write concern.
+ &condVar);
+
+ if (CurOp::get(txn)->isMaxTimeSet()) {
+ condVar.wait_for(lock, Microseconds(txn->getRemainingMaxTimeMicros()));
+ } else {
+ condVar.wait(lock);
+ }
}
- Status ReplicationCoordinatorImpl::setLastOptime_forTest(long long cfgVer,
- long long memberId,
- const OpTime& opTime) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- invariant(getReplicationMode() == modeReplSet);
+ return ReadAfterOpTimeResponse(Status::OK(), Milliseconds(timer.millis()));
+}
- const UpdatePositionArgs::UpdateInfo update(OID(), opTime, cfgVer, memberId);
- long long configVersion;
- return _setLastOptime_inlock(update, &configVersion);
- }
+OpTime ReplicationCoordinatorImpl::_getMyLastOptime_inlock() const {
+ return _slaveInfo[_getMyIndexInSlaveInfo_inlock()].opTime;
+}
- Status ReplicationCoordinatorImpl::_setLastOptime_inlock(
- const UpdatePositionArgs::UpdateInfo& args, long long* configVersion) {
+Status ReplicationCoordinatorImpl::setLastOptime_forTest(long long cfgVer,
+ long long memberId,
+ const OpTime& opTime) {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ invariant(getReplicationMode() == modeReplSet);
- if (_selfIndex == -1) {
- // Ignore updates when we're in state REMOVED
- return Status(ErrorCodes::NotMasterOrSecondaryCode,
- "Received replSetUpdatePosition command but we are in state REMOVED");
- }
- invariant(getReplicationMode() == modeReplSet);
-
- if (args.memberId < 0) {
- std::string errmsg = str::stream()
- << "Received replSetUpdatePosition for node with memberId "
- << args.memberId << " which is negative and therefore invalid";
- LOG(1) << errmsg;
- return Status(ErrorCodes::NodeNotFound, errmsg);
- }
+ const UpdatePositionArgs::UpdateInfo update(OID(), opTime, cfgVer, memberId);
+ long long configVersion;
+ return _setLastOptime_inlock(update, &configVersion);
+}
- if (args.rid == _getMyRID_inlock() ||
- args.memberId == _rsConfig.getMemberAt(_selfIndex).getId()) {
- // Do not let remote nodes tell us what our optime is.
- return Status::OK();
- }
+Status ReplicationCoordinatorImpl::_setLastOptime_inlock(const UpdatePositionArgs::UpdateInfo& args,
+ long long* configVersion) {
+ if (_selfIndex == -1) {
+ // Ignore updates when we're in state REMOVED
+ return Status(ErrorCodes::NotMasterOrSecondaryCode,
+ "Received replSetUpdatePosition command but we are in state REMOVED");
+ }
+ invariant(getReplicationMode() == modeReplSet);
- LOG(2) << "received notification that node with memberID " << args.memberId <<
- " in config with version " << args.cfgver << " has reached optime: " << args.ts;
-
- SlaveInfo* slaveInfo = NULL;
- if (args.cfgver != _rsConfig.getConfigVersion()) {
- std::string errmsg = str::stream()
- << "Received replSetUpdatePosition for node with memberId "
- << args.memberId << " whose config version of " << args.cfgver
- << " doesn't match our config version of "
- << _rsConfig.getConfigVersion();
- LOG(1) << errmsg;
- *configVersion = _rsConfig.getConfigVersion();
- return Status(ErrorCodes::InvalidReplicaSetConfig, errmsg);
- }
+ if (args.memberId < 0) {
+ std::string errmsg = str::stream()
+ << "Received replSetUpdatePosition for node with memberId " << args.memberId
+ << " which is negative and therefore invalid";
+ LOG(1) << errmsg;
+ return Status(ErrorCodes::NodeNotFound, errmsg);
+ }
- slaveInfo = _findSlaveInfoByMemberID_inlock(args.memberId);
- if (!slaveInfo) {
- invariant(!_rsConfig.findMemberByID(args.memberId));
+ if (args.rid == _getMyRID_inlock() ||
+ args.memberId == _rsConfig.getMemberAt(_selfIndex).getId()) {
+ // Do not let remote nodes tell us what our optime is.
+ return Status::OK();
+ }
- std::string errmsg = str::stream()
- << "Received replSetUpdatePosition for node with memberId "
- << args.memberId << " which doesn't exist in our config";
- LOG(1) << errmsg;
- return Status(ErrorCodes::NodeNotFound, errmsg);
- }
+ LOG(2) << "received notification that node with memberID " << args.memberId
+ << " in config with version " << args.cfgver << " has reached optime: " << args.ts;
- invariant(args.memberId == slaveInfo->memberId);
+ SlaveInfo* slaveInfo = NULL;
+ if (args.cfgver != _rsConfig.getConfigVersion()) {
+ std::string errmsg = str::stream()
+ << "Received replSetUpdatePosition for node with memberId " << args.memberId
+ << " whose config version of " << args.cfgver << " doesn't match our config version of "
+ << _rsConfig.getConfigVersion();
+ LOG(1) << errmsg;
+ *configVersion = _rsConfig.getConfigVersion();
+ return Status(ErrorCodes::InvalidReplicaSetConfig, errmsg);
+ }
- LOG(3) << "Node with memberID " << args.memberId << " currently has optime " <<
- slaveInfo->opTime << "; updating to " << args.ts;
+ slaveInfo = _findSlaveInfoByMemberID_inlock(args.memberId);
+ if (!slaveInfo) {
+ invariant(!_rsConfig.findMemberByID(args.memberId));
- // Only update remote optimes if they increase.
- if (slaveInfo->opTime < args.ts) {
- _updateSlaveInfoOptime_inlock(slaveInfo, args.ts);
- }
- _updateLastCommittedOpTime_inlock();
- return Status::OK();
+ std::string errmsg = str::stream()
+ << "Received replSetUpdatePosition for node with memberId " << args.memberId
+ << " which doesn't exist in our config";
+ LOG(1) << errmsg;
+ return Status(ErrorCodes::NodeNotFound, errmsg);
}
- void ReplicationCoordinatorImpl::interrupt(unsigned opId) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
- it != _replicationWaiterList.end(); ++it) {
- WaiterInfo* info = *it;
- if (info->opID == opId) {
- info->condVar->notify_all();
- return;
- }
- }
+ invariant(args.memberId == slaveInfo->memberId);
- for (auto& opTimeWaiter : _opTimeWaiterList) {
- if (opTimeWaiter->opID == opId) {
- opTimeWaiter->condVar->notify_all();
- return;
- }
- }
+ LOG(3) << "Node with memberID " << args.memberId << " currently has optime "
+ << slaveInfo->opTime << "; updating to " << args.ts;
- _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_signalStepDownWaitersFromCallback,
- this,
- stdx::placeholders::_1));
+ // Only update remote optimes if they increase.
+ if (slaveInfo->opTime < args.ts) {
+ _updateSlaveInfoOptime_inlock(slaveInfo, args.ts);
}
+ _updateLastCommittedOpTime_inlock();
+ return Status::OK();
+}
- void ReplicationCoordinatorImpl::interruptAll() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
- it != _replicationWaiterList.end(); ++it) {
- WaiterInfo* info = *it;
+void ReplicationCoordinatorImpl::interrupt(unsigned opId) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
+ it != _replicationWaiterList.end();
+ ++it) {
+ WaiterInfo* info = *it;
+ if (info->opID == opId) {
info->condVar->notify_all();
+ return;
}
+ }
- for (auto& opTimeWaiter : _opTimeWaiterList) {
+ for (auto& opTimeWaiter : _opTimeWaiterList) {
+ if (opTimeWaiter->opID == opId) {
opTimeWaiter->condVar->notify_all();
+ return;
}
-
- _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_signalStepDownWaitersFromCallback,
- this,
- stdx::placeholders::_1));
}
- bool ReplicationCoordinatorImpl::_doneWaitingForReplication_inlock(
- const OpTime& opTime, const WriteConcernOptions& writeConcern) {
- Status status = _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
- if (!status.isOK()) {
- return true;
- }
+ _replExecutor.scheduleWork(
+ stdx::bind(&ReplicationCoordinatorImpl::_signalStepDownWaitersFromCallback,
+ this,
+ stdx::placeholders::_1));
+}
- if (!writeConcern.wMode.empty()) {
- StringData patternName;
- if (writeConcern.wMode == WriteConcernOptions::kMajority) {
- patternName = ReplicaSetConfig::kMajorityWriteConcernModeName;
- }
- else {
- patternName = writeConcern.wMode;
- }
- StatusWith<ReplicaSetTagPattern> tagPattern =
- _rsConfig.findCustomWriteMode(patternName);
- if (!tagPattern.isOK()) {
- return true;
- }
- return _haveTaggedNodesReachedOpTime_inlock(opTime, tagPattern.getValue());
- }
- else {
- return _haveNumNodesReachedOpTime_inlock(opTime, writeConcern.wNumNodes);
- }
+void ReplicationCoordinatorImpl::interruptAll() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
+ it != _replicationWaiterList.end();
+ ++it) {
+ WaiterInfo* info = *it;
+ info->condVar->notify_all();
}
- bool ReplicationCoordinatorImpl::_haveNumNodesReachedOpTime_inlock(const OpTime& opTime,
- int numNodes) {
- if (_getMyLastOptime_inlock() < opTime) {
- // Secondaries that are for some reason ahead of us should not allow us to
- // satisfy a write concern if we aren't caught up ourselves.
- return false;
- }
+ for (auto& opTimeWaiter : _opTimeWaiterList) {
+ opTimeWaiter->condVar->notify_all();
+ }
- for (SlaveInfoVector::iterator it = _slaveInfo.begin();
- it != _slaveInfo.end(); ++it) {
+ _replExecutor.scheduleWork(
+ stdx::bind(&ReplicationCoordinatorImpl::_signalStepDownWaitersFromCallback,
+ this,
+ stdx::placeholders::_1));
+}
- const OpTime& slaveTime = it->opTime;
- if (slaveTime >= opTime) {
- --numNodes;
- }
+bool ReplicationCoordinatorImpl::_doneWaitingForReplication_inlock(
+ const OpTime& opTime, const WriteConcernOptions& writeConcern) {
+ Status status = _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
+ if (!status.isOK()) {
+ return true;
+ }
- if (numNodes <= 0) {
- return true;
- }
+ if (!writeConcern.wMode.empty()) {
+ StringData patternName;
+ if (writeConcern.wMode == WriteConcernOptions::kMajority) {
+ patternName = ReplicaSetConfig::kMajorityWriteConcernModeName;
+ } else {
+ patternName = writeConcern.wMode;
}
- return false;
+ StatusWith<ReplicaSetTagPattern> tagPattern = _rsConfig.findCustomWriteMode(patternName);
+ if (!tagPattern.isOK()) {
+ return true;
+ }
+ return _haveTaggedNodesReachedOpTime_inlock(opTime, tagPattern.getValue());
+ } else {
+ return _haveNumNodesReachedOpTime_inlock(opTime, writeConcern.wNumNodes);
}
+}
- bool ReplicationCoordinatorImpl::_haveTaggedNodesReachedOpTime_inlock(
- const OpTime& opTime, const ReplicaSetTagPattern& tagPattern) {
-
- ReplicaSetTagMatch matcher(tagPattern);
- for (SlaveInfoVector::iterator it = _slaveInfo.begin();
- it != _slaveInfo.end(); ++it) {
-
- const OpTime& slaveTime = it->opTime;
- if (slaveTime >= opTime) {
- // This node has reached the desired optime, now we need to check if it is a part
- // of the tagPattern.
- const MemberConfig* memberConfig = _rsConfig.findMemberByID(it->memberId);
- invariant(memberConfig);
- for (MemberConfig::TagIterator it = memberConfig->tagsBegin();
- it != memberConfig->tagsEnd(); ++it) {
- if (matcher.update(*it)) {
- return true;
- }
- }
- }
- }
+bool ReplicationCoordinatorImpl::_haveNumNodesReachedOpTime_inlock(const OpTime& opTime,
+ int numNodes) {
+ if (_getMyLastOptime_inlock() < opTime) {
+ // Secondaries that are for some reason ahead of us should not allow us to
+ // satisfy a write concern if we aren't caught up ourselves.
return false;
}
- ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitReplication(
- OperationContext* txn,
- const OpTime& opTime,
- const WriteConcernOptions& writeConcern) {
- Timer timer;
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- return _awaitReplication_inlock(&timer, &lock, txn, opTime, writeConcern);
- }
-
- ReplicationCoordinator::StatusAndDuration
- ReplicationCoordinatorImpl::awaitReplicationOfLastOpForClient(
- OperationContext* txn,
- const WriteConcernOptions& writeConcern) {
- Timer timer;
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- return _awaitReplication_inlock(
- &timer,
- &lock,
- txn,
- ReplClientInfo::forClient(txn->getClient()).getLastOp(),
- writeConcern);
- }
-
- ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::_awaitReplication_inlock(
- const Timer* timer,
- stdx::unique_lock<stdx::mutex>* lock,
- OperationContext* txn,
- const OpTime& opTime,
- const WriteConcernOptions& writeConcern) {
-
- const Mode replMode = getReplicationMode();
- if (replMode == modeNone || serverGlobalParams.configsvr) {
- // no replication check needed (validated above)
- return StatusAndDuration(Status::OK(), Milliseconds(timer->millis()));
- }
-
- if (replMode == modeMasterSlave && writeConcern.wMode == WriteConcernOptions::kMajority) {
- // with master/slave, majority is equivalent to w=1
- return StatusAndDuration(Status::OK(), Milliseconds(timer->millis()));
- }
-
- if (opTime.isNull()) {
- // If waiting for the empty optime, always say it's been replicated.
- return StatusAndDuration(Status::OK(), Milliseconds(timer->millis()));
- }
-
- if (replMode == modeReplSet && !_memberState.primary()) {
- return StatusAndDuration(Status(ErrorCodes::NotMaster,
- "Not master while waiting for replication"),
- Milliseconds(timer->millis()));
+ for (SlaveInfoVector::iterator it = _slaveInfo.begin(); it != _slaveInfo.end(); ++it) {
+ const OpTime& slaveTime = it->opTime;
+ if (slaveTime >= opTime) {
+ --numNodes;
}
- if (writeConcern.wMode.empty()) {
- if (writeConcern.wNumNodes < 1) {
- return StatusAndDuration(Status::OK(), Milliseconds(timer->millis()));
- }
- else if (writeConcern.wNumNodes == 1 && _getMyLastOptime_inlock() >= opTime) {
- return StatusAndDuration(Status::OK(), Milliseconds(timer->millis()));
- }
+ if (numNodes <= 0) {
+ return true;
}
+ }
+ return false;
+}
- // Must hold _mutex before constructing waitInfo as it will modify _replicationWaiterList
- stdx::condition_variable condVar;
- WaiterInfo waitInfo(
- &_replicationWaiterList, txn->getOpID(), &opTime, &writeConcern, &condVar);
- while (!_doneWaitingForReplication_inlock(opTime, writeConcern)) {
- const Milliseconds elapsed{timer->millis()};
-
- Status interruptedStatus = txn->checkForInterruptNoAssert();
- if (!interruptedStatus.isOK()) {
- return StatusAndDuration(interruptedStatus, elapsed);
- }
-
- if (!waitInfo.master) {
- return StatusAndDuration(Status(ErrorCodes::NotMaster,
- "Not master anymore while waiting for replication"
- " - this most likely means that a step down"
- " occurred while waiting for replication"),
- elapsed);
- }
-
- if (writeConcern.wTimeout != WriteConcernOptions::kNoTimeout &&
- elapsed > Milliseconds{writeConcern.wTimeout}) {
- return StatusAndDuration(Status(ErrorCodes::WriteConcernFailed,
- "waiting for replication timed out"),
- elapsed);
- }
-
- if (_inShutdown) {
- return StatusAndDuration(Status(ErrorCodes::ShutdownInProgress,
- "Replication is being shut down"),
- elapsed);
- }
-
- const Microseconds maxTimeMicrosRemaining{txn->getRemainingMaxTimeMicros()};
- Microseconds waitTime = Microseconds::max();
- if (maxTimeMicrosRemaining != Microseconds::zero()) {
- waitTime = maxTimeMicrosRemaining;
- }
- if (writeConcern.wTimeout != WriteConcernOptions::kNoTimeout) {
- waitTime = std::min<Microseconds>(Milliseconds{writeConcern.wTimeout} - elapsed,
- waitTime);
- }
-
- if (waitTime == Microseconds::max()) {
- condVar.wait(*lock);
- }
- else {
- condVar.wait_for(*lock, waitTime);
+bool ReplicationCoordinatorImpl::_haveTaggedNodesReachedOpTime_inlock(
+ const OpTime& opTime, const ReplicaSetTagPattern& tagPattern) {
+ ReplicaSetTagMatch matcher(tagPattern);
+ for (SlaveInfoVector::iterator it = _slaveInfo.begin(); it != _slaveInfo.end(); ++it) {
+ const OpTime& slaveTime = it->opTime;
+ if (slaveTime >= opTime) {
+ // This node has reached the desired optime, now we need to check if it is a part
+ // of the tagPattern.
+ const MemberConfig* memberConfig = _rsConfig.findMemberByID(it->memberId);
+ invariant(memberConfig);
+ for (MemberConfig::TagIterator it = memberConfig->tagsBegin();
+ it != memberConfig->tagsEnd();
+ ++it) {
+ if (matcher.update(*it)) {
+ return true;
+ }
}
}
-
- Status status = _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
- if (!status.isOK()) {
- return StatusAndDuration(status, Milliseconds(timer->millis()));
- }
-
+ }
+ return false;
+}
+
+ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitReplication(
+ OperationContext* txn, const OpTime& opTime, const WriteConcernOptions& writeConcern) {
+ Timer timer;
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ return _awaitReplication_inlock(&timer, &lock, txn, opTime, writeConcern);
+}
+
+ReplicationCoordinator::StatusAndDuration
+ReplicationCoordinatorImpl::awaitReplicationOfLastOpForClient(
+ OperationContext* txn, const WriteConcernOptions& writeConcern) {
+ Timer timer;
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ return _awaitReplication_inlock(
+ &timer, &lock, txn, ReplClientInfo::forClient(txn->getClient()).getLastOp(), writeConcern);
+}
+
+ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::_awaitReplication_inlock(
+ const Timer* timer,
+ stdx::unique_lock<stdx::mutex>* lock,
+ OperationContext* txn,
+ const OpTime& opTime,
+ const WriteConcernOptions& writeConcern) {
+ const Mode replMode = getReplicationMode();
+ if (replMode == modeNone || serverGlobalParams.configsvr) {
+ // no replication check needed (validated above)
return StatusAndDuration(Status::OK(), Milliseconds(timer->millis()));
}
- Status ReplicationCoordinatorImpl::stepDown(OperationContext* txn,
- bool force,
- const Milliseconds& waitTime,
- const Milliseconds& stepdownTime) {
- const Date_t startTime = _replExecutor.now();
- const Date_t stepDownUntil = startTime + stepdownTime;
- const Date_t waitUntil = startTime + waitTime;
-
- if (!getMemberState().primary()) {
- // Note this check is inherently racy - it's always possible for the node to
- // stepdown from some other path before we acquire the global shared lock, but
- // that's okay because we are resiliant to that happening in _stepDownContinue.
- return Status(ErrorCodes::NotMaster, "not primary so can't step down");
- }
-
- LockResult lockState = txn->lockState()->lockGlobalBegin(MODE_S);
- // We've requested the global shared lock which will stop new writes from coming in,
- // but existing writes could take a long time to finish, so kill all user operations
- // to help us get the global lock faster.
- _externalState->killAllUserOperations(txn);
-
- if (lockState == LOCK_WAITING) {
- lockState = txn->lockState()->lockGlobalComplete(
- durationCount<Milliseconds>(stepdownTime));
- if (lockState == LOCK_TIMEOUT) {
- return Status(ErrorCodes::ExceededTimeLimit,
- "Could not acquire the global shared lock within the amount of time "
- "specified that we should step down for");
- }
- }
- invariant(lockState == LOCK_OK);
- ON_BLOCK_EXIT(&Locker::unlockAll, txn->lockState());
- // From this point onward we are guaranteed to be holding the global shared lock.
+ if (replMode == modeMasterSlave && writeConcern.wMode == WriteConcernOptions::kMajority) {
+ // with master/slave, majority is equivalent to w=1
+ return StatusAndDuration(Status::OK(), Milliseconds(timer->millis()));
+ }
- StatusWith<ReplicationExecutor::EventHandle> finishedEvent = _replExecutor.makeEvent();
- if (finishedEvent.getStatus() == ErrorCodes::ShutdownInProgress) {
- return finishedEvent.getStatus();
- }
- fassert(26000, finishedEvent.getStatus());
- Status result(ErrorCodes::InternalError, "didn't set status in _stepDownContinue");
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_stepDownContinue,
- this,
- stdx::placeholders::_1,
- finishedEvent.getValue(),
- txn,
- waitUntil,
- stepDownUntil,
- force,
- &result));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return cbh.getStatus();
- }
- fassert(18809, cbh.getStatus());
- cbh = _replExecutor.scheduleWorkAt(
- waitUntil,
- stdx::bind(&ReplicationCoordinatorImpl::_signalStepDownWaitersFromCallback,
- this,
- stdx::placeholders::_1));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return cbh.getStatus();
- }
- fassert(26001, cbh.getStatus());
- _replExecutor.waitForEvent(finishedEvent.getValue());
- return result;
+ if (opTime.isNull()) {
+ // If waiting for the empty optime, always say it's been replicated.
+ return StatusAndDuration(Status::OK(), Milliseconds(timer->millis()));
}
- void ReplicationCoordinatorImpl::_signalStepDownWaitersFromCallback(
- const ReplicationExecutor::CallbackArgs& cbData) {
- if (!cbData.status.isOK()) {
- return;
- }
+ if (replMode == modeReplSet && !_memberState.primary()) {
+ return StatusAndDuration(
+ Status(ErrorCodes::NotMaster, "Not master while waiting for replication"),
+ Milliseconds(timer->millis()));
+ }
- _signalStepDownWaiters();
- }
-
- void ReplicationCoordinatorImpl::_signalStepDownWaiters() {
- std::for_each(_stepDownWaiters.begin(),
- _stepDownWaiters.end(),
- stdx::bind(&ReplicationExecutor::signalEvent,
- &_replExecutor,
- stdx::placeholders::_1));
- _stepDownWaiters.clear();
- }
-
- void ReplicationCoordinatorImpl::_stepDownContinue(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicationExecutor::EventHandle finishedEvent,
- OperationContext* txn,
- const Date_t waitUntil,
- const Date_t stepDownUntil,
- bool force,
- Status* result) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- // Cancelation only occurs on shutdown, which will also handle signaling the event.
- *result = Status(ErrorCodes::ShutdownInProgress, "Shutting down replication");
- return;
+ if (writeConcern.wMode.empty()) {
+ if (writeConcern.wNumNodes < 1) {
+ return StatusAndDuration(Status::OK(), Milliseconds(timer->millis()));
+ } else if (writeConcern.wNumNodes == 1 && _getMyLastOptime_inlock() >= opTime) {
+ return StatusAndDuration(Status::OK(), Milliseconds(timer->millis()));
}
+ }
- ScopeGuard allFinishedGuard = MakeGuard(
- stdx::bind(&ReplicationExecutor::signalEvent, &_replExecutor, finishedEvent));
- if (!cbData.status.isOK()) {
- *result = cbData.status;
- return;
- }
+ // Must hold _mutex before constructing waitInfo as it will modify _replicationWaiterList
+ stdx::condition_variable condVar;
+ WaiterInfo waitInfo(&_replicationWaiterList, txn->getOpID(), &opTime, &writeConcern, &condVar);
+ while (!_doneWaitingForReplication_inlock(opTime, writeConcern)) {
+ const Milliseconds elapsed{timer->millis()};
Status interruptedStatus = txn->checkForInterruptNoAssert();
if (!interruptedStatus.isOK()) {
- *result = interruptedStatus;
- return;
+ return StatusAndDuration(interruptedStatus, elapsed);
}
- if (_topCoord->getRole() != TopologyCoordinator::Role::leader) {
- *result = Status(ErrorCodes::NotMaster,
- "Already stepped down from primary while processing step down "
- "request");
- return;
- }
- const Date_t now = _replExecutor.now();
- if (now >= stepDownUntil) {
- *result = Status(ErrorCodes::ExceededTimeLimit,
- "By the time we were ready to step down, we were already past the "
- "time we were supposed to step down until");
- return;
+ if (!waitInfo.master) {
+ return StatusAndDuration(Status(ErrorCodes::NotMaster,
+ "Not master anymore while waiting for replication"
+ " - this most likely means that a step down"
+ " occurred while waiting for replication"),
+ elapsed);
}
- bool forceNow = now >= waitUntil ? force : false;
- if (_topCoord->stepDown(stepDownUntil, forceNow, getMyLastOptime())) {
- // Schedule work to (potentially) step back up once the stepdown period has ended.
- _replExecutor.scheduleWorkAt(stepDownUntil,
- stdx::bind(&ReplicationCoordinatorImpl::_handleTimePassing,
- this,
- stdx::placeholders::_1));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- const PostMemberStateUpdateAction action =
- _updateMemberStateFromTopologyCoordinator_inlock();
- lk.unlock();
- _performPostMemberStateUpdateAction(action);
- *result = Status::OK();
- return;
+ if (writeConcern.wTimeout != WriteConcernOptions::kNoTimeout &&
+ elapsed > Milliseconds{writeConcern.wTimeout}) {
+ return StatusAndDuration(
+ Status(ErrorCodes::WriteConcernFailed, "waiting for replication timed out"),
+ elapsed);
}
- // Step down failed. Keep waiting if we can, otherwise finish.
- if (now >= waitUntil) {
- *result = Status(ErrorCodes::ExceededTimeLimit, str::stream() <<
- "No electable secondaries caught up as of " <<
- dateToISOStringLocal(now) <<
- ". Please use {force: true} to force node to step down.");
- return;
+ if (_inShutdown) {
+ return StatusAndDuration(
+ Status(ErrorCodes::ShutdownInProgress, "Replication is being shut down"), elapsed);
}
- if (_stepDownWaiters.empty()) {
- StatusWith<ReplicationExecutor::EventHandle> reschedEvent =
- _replExecutor.makeEvent();
- if (!reschedEvent.isOK()) {
- *result = reschedEvent.getStatus();
- return;
- }
- _stepDownWaiters.push_back(reschedEvent.getValue());
+ const Microseconds maxTimeMicrosRemaining{txn->getRemainingMaxTimeMicros()};
+ Microseconds waitTime = Microseconds::max();
+ if (maxTimeMicrosRemaining != Microseconds::zero()) {
+ waitTime = maxTimeMicrosRemaining;
}
- CBHStatus cbh = _replExecutor.onEvent(
- _stepDownWaiters.back(),
- stdx::bind(&ReplicationCoordinatorImpl::_stepDownContinue,
- this,
- stdx::placeholders::_1,
- finishedEvent,
- txn,
- waitUntil,
- stepDownUntil,
- force,
- result));
- if (!cbh.isOK()) {
- *result = cbh.getStatus();
- return;
+ if (writeConcern.wTimeout != WriteConcernOptions::kNoTimeout) {
+ waitTime =
+ std::min<Microseconds>(Milliseconds{writeConcern.wTimeout} - elapsed, waitTime);
}
- allFinishedGuard.Dismiss();
- }
- void ReplicationCoordinatorImpl::_handleTimePassing(
- const ReplicationExecutor::CallbackArgs& cbData) {
- if (!cbData.status.isOK()) {
- return;
- }
+ if (waitTime == Microseconds::max()) {
+ condVar.wait(*lock);
+ } else {
+ condVar.wait_for(*lock, waitTime);
+ }
+ }
+
+ Status status = _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
+ if (!status.isOK()) {
+ return StatusAndDuration(status, Milliseconds(timer->millis()));
+ }
+
+ return StatusAndDuration(Status::OK(), Milliseconds(timer->millis()));
+}
+
+Status ReplicationCoordinatorImpl::stepDown(OperationContext* txn,
+ bool force,
+ const Milliseconds& waitTime,
+ const Milliseconds& stepdownTime) {
+ const Date_t startTime = _replExecutor.now();
+ const Date_t stepDownUntil = startTime + stepdownTime;
+ const Date_t waitUntil = startTime + waitTime;
+
+ if (!getMemberState().primary()) {
+ // Note this check is inherently racy - it's always possible for the node to
+ // stepdown from some other path before we acquire the global shared lock, but
+ // that's okay because we are resiliant to that happening in _stepDownContinue.
+ return Status(ErrorCodes::NotMaster, "not primary so can't step down");
+ }
+
+ LockResult lockState = txn->lockState()->lockGlobalBegin(MODE_S);
+ // We've requested the global shared lock which will stop new writes from coming in,
+ // but existing writes could take a long time to finish, so kill all user operations
+ // to help us get the global lock faster.
+ _externalState->killAllUserOperations(txn);
+
+ if (lockState == LOCK_WAITING) {
+ lockState = txn->lockState()->lockGlobalComplete(durationCount<Milliseconds>(stepdownTime));
+ if (lockState == LOCK_TIMEOUT) {
+ return Status(ErrorCodes::ExceededTimeLimit,
+ "Could not acquire the global shared lock within the amount of time "
+ "specified that we should step down for");
+ }
+ }
+ invariant(lockState == LOCK_OK);
+ ON_BLOCK_EXIT(&Locker::unlockAll, txn->lockState());
+ // From this point onward we are guaranteed to be holding the global shared lock.
+
+ StatusWith<ReplicationExecutor::EventHandle> finishedEvent = _replExecutor.makeEvent();
+ if (finishedEvent.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return finishedEvent.getStatus();
+ }
+ fassert(26000, finishedEvent.getStatus());
+ Status result(ErrorCodes::InternalError, "didn't set status in _stepDownContinue");
+ CBHStatus cbh =
+ _replExecutor.scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_stepDownContinue,
+ this,
+ stdx::placeholders::_1,
+ finishedEvent.getValue(),
+ txn,
+ waitUntil,
+ stepDownUntil,
+ force,
+ &result));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return cbh.getStatus();
+ }
+ fassert(18809, cbh.getStatus());
+ cbh = _replExecutor.scheduleWorkAt(
+ waitUntil,
+ stdx::bind(&ReplicationCoordinatorImpl::_signalStepDownWaitersFromCallback,
+ this,
+ stdx::placeholders::_1));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return cbh.getStatus();
+ }
+ fassert(26001, cbh.getStatus());
+ _replExecutor.waitForEvent(finishedEvent.getValue());
+ return result;
+}
+
+void ReplicationCoordinatorImpl::_signalStepDownWaitersFromCallback(
+ const ReplicationExecutor::CallbackArgs& cbData) {
+ if (!cbData.status.isOK()) {
+ return;
+ }
+
+ _signalStepDownWaiters();
+}
+
+void ReplicationCoordinatorImpl::_signalStepDownWaiters() {
+ std::for_each(
+ _stepDownWaiters.begin(),
+ _stepDownWaiters.end(),
+ stdx::bind(&ReplicationExecutor::signalEvent, &_replExecutor, stdx::placeholders::_1));
+ _stepDownWaiters.clear();
+}
+
+void ReplicationCoordinatorImpl::_stepDownContinue(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicationExecutor::EventHandle finishedEvent,
+ OperationContext* txn,
+ const Date_t waitUntil,
+ const Date_t stepDownUntil,
+ bool force,
+ Status* result) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ // Cancelation only occurs on shutdown, which will also handle signaling the event.
+ *result = Status(ErrorCodes::ShutdownInProgress, "Shutting down replication");
+ return;
+ }
+
+ ScopeGuard allFinishedGuard =
+ MakeGuard(stdx::bind(&ReplicationExecutor::signalEvent, &_replExecutor, finishedEvent));
+ if (!cbData.status.isOK()) {
+ *result = cbData.status;
+ return;
+ }
+
+ Status interruptedStatus = txn->checkForInterruptNoAssert();
+ if (!interruptedStatus.isOK()) {
+ *result = interruptedStatus;
+ return;
+ }
+
+ if (_topCoord->getRole() != TopologyCoordinator::Role::leader) {
+ *result = Status(ErrorCodes::NotMaster,
+ "Already stepped down from primary while processing step down "
+ "request");
+ return;
+ }
+ const Date_t now = _replExecutor.now();
+ if (now >= stepDownUntil) {
+ *result = Status(ErrorCodes::ExceededTimeLimit,
+ "By the time we were ready to step down, we were already past the "
+ "time we were supposed to step down until");
+ return;
+ }
+ bool forceNow = now >= waitUntil ? force : false;
+ if (_topCoord->stepDown(stepDownUntil, forceNow, getMyLastOptime())) {
+ // Schedule work to (potentially) step back up once the stepdown period has ended.
+ _replExecutor.scheduleWorkAt(stepDownUntil,
+ stdx::bind(&ReplicationCoordinatorImpl::_handleTimePassing,
+ this,
+ stdx::placeholders::_1));
- if (_topCoord->becomeCandidateIfStepdownPeriodOverAndSingleNodeSet(_replExecutor.now())) {
- _performPostMemberStateUpdateAction(kActionWinElection);
- }
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ const PostMemberStateUpdateAction action =
+ _updateMemberStateFromTopologyCoordinator_inlock();
+ lk.unlock();
+ _performPostMemberStateUpdateAction(action);
+ *result = Status::OK();
+ return;
}
- bool ReplicationCoordinatorImpl::isMasterForReportingPurposes() {
- if (_settings.usingReplSets()) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- if (getReplicationMode() == modeReplSet && _getMemberState_inlock().primary()) {
- return true;
- }
- return false;
- }
-
- if (!_settings.slave)
- return true;
-
-
- // TODO(dannenberg) replAllDead is bad and should be removed when master slave is removed
- if (replAllDead) {
- return false;
- }
-
- if (_settings.master) {
- // if running with --master --slave, allow.
- return true;
- }
-
- return false;
+ // Step down failed. Keep waiting if we can, otherwise finish.
+ if (now >= waitUntil) {
+ *result = Status(ErrorCodes::ExceededTimeLimit,
+ str::stream() << "No electable secondaries caught up as of "
+ << dateToISOStringLocal(now)
+ << ". Please use {force: true} to force node to step down.");
+ return;
}
- bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase(StringData dbName) {
- // _canAcceptNonLocalWrites is always true for standalone nodes, always false for nodes
- // started with --slave, and adjusted based on primary+drain state in replica sets.
- //
- // That is, stand-alone nodes, non-slave nodes and drained replica set primaries can always
- // accept writes. Similarly, writes are always permitted to the "local" database. Finally,
- // in the event that a node is started with --slave and --master, we allow writes unless the
- // master/slave system has set the replAllDead flag.
- if (_canAcceptNonLocalWrites) {
- return true;
- }
- if (dbName == "local") {
- return true;
+ if (_stepDownWaiters.empty()) {
+ StatusWith<ReplicationExecutor::EventHandle> reschedEvent = _replExecutor.makeEvent();
+ if (!reschedEvent.isOK()) {
+ *result = reschedEvent.getStatus();
+ return;
}
- return !replAllDead && _settings.master;
+ _stepDownWaiters.push_back(reschedEvent.getValue());
}
-
- bool ReplicationCoordinatorImpl::canAcceptWritesFor(const NamespaceString& ns) {
- if (_memberState.rollback() && ns.isOplog()) {
- return false;
- }
- StringData dbName = ns.db();
- return canAcceptWritesForDatabase(dbName);
+ CBHStatus cbh = _replExecutor.onEvent(_stepDownWaiters.back(),
+ stdx::bind(&ReplicationCoordinatorImpl::_stepDownContinue,
+ this,
+ stdx::placeholders::_1,
+ finishedEvent,
+ txn,
+ waitUntil,
+ stepDownUntil,
+ force,
+ result));
+ if (!cbh.isOK()) {
+ *result = cbh.getStatus();
+ return;
}
+ allFinishedGuard.Dismiss();
+}
- Status ReplicationCoordinatorImpl::checkCanServeReadsFor(OperationContext* txn,
- const NamespaceString& ns,
- bool slaveOk) {
- if (_memberState.rollback() && ns.isOplog()) {
- return Status(ErrorCodes::NotMasterOrSecondaryCode,
- "cannot read from oplog collection while in rollback");
- }
- if (txn->getClient()->isInDirectClient()) {
- return Status::OK();
- }
- if (canAcceptWritesFor(ns)) {
- return Status::OK();
- }
- if (_settings.slave || _settings.master) {
- return Status::OK();
- }
- if (slaveOk) {
- if (_canServeNonLocalReads.loadRelaxed()) {
- return Status::OK();
- }
- return Status(
- ErrorCodes::NotMasterOrSecondaryCode,
- "not master or secondary; cannot currently read from this replSet member");
- }
- return Status(ErrorCodes::NotMasterNoSlaveOkCode, "not master and slaveOk=false");
+void ReplicationCoordinatorImpl::_handleTimePassing(
+ const ReplicationExecutor::CallbackArgs& cbData) {
+ if (!cbData.status.isOK()) {
+ return;
}
- bool ReplicationCoordinatorImpl::isInPrimaryOrSecondaryState() const {
- return _canServeNonLocalReads.loadRelaxed();
+ if (_topCoord->becomeCandidateIfStepdownPeriodOverAndSingleNodeSet(_replExecutor.now())) {
+ _performPostMemberStateUpdateAction(kActionWinElection);
}
+}
- bool ReplicationCoordinatorImpl::shouldIgnoreUniqueIndex(const IndexDescriptor* idx) {
- if (!idx->unique()) {
- return false;
- }
- // Never ignore _id index
- if (idx->isIdIndex()) {
- return false;
- }
- if (nsToDatabaseSubstring(idx->parentNS()) == "local" ) {
- // always enforce on local
- return false;
- }
+bool ReplicationCoordinatorImpl::isMasterForReportingPurposes() {
+ if (_settings.usingReplSets()) {
stdx::lock_guard<stdx::mutex> lock(_mutex);
- if (getReplicationMode() != modeReplSet) {
- return false;
- }
- // see SERVER-6671
- MemberState ms = _getMemberState_inlock();
- switch ( ms.s ) {
- case MemberState::RS_SECONDARY:
- case MemberState::RS_RECOVERING:
- case MemberState::RS_ROLLBACK:
- case MemberState::RS_STARTUP2:
+ if (getReplicationMode() == modeReplSet && _getMemberState_inlock().primary()) {
return true;
- default:
- return false;
}
+ return false;
}
- OID ReplicationCoordinatorImpl::getElectionId() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- return _electionId;
- }
-
- OID ReplicationCoordinatorImpl::getMyRID() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- return _getMyRID_inlock();
- }
-
- OID ReplicationCoordinatorImpl::_getMyRID_inlock() const {
- return _myRID;
- }
+ if (!_settings.slave)
+ return true;
- int ReplicationCoordinatorImpl::getMyId() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- return _getMyId_inlock();
- }
- int ReplicationCoordinatorImpl::_getMyId_inlock() const {
- const MemberConfig& self = _rsConfig.getMemberAt(_selfIndex);
- return self.getId();
+ // TODO(dannenberg) replAllDead is bad and should be removed when master slave is removed
+ if (replAllDead) {
+ return false;
}
- bool ReplicationCoordinatorImpl::prepareReplSetUpdatePositionCommand(
- BSONObjBuilder* cmdBuilder) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- invariant(_rsConfig.isInitialized());
- // do not send updates if we have been removed from the config
- if (_selfIndex == -1) {
- return false;
- }
- cmdBuilder->append("replSetUpdatePosition", 1);
- // create an array containing objects each member connected to us and for ourself
- BSONArrayBuilder arrayBuilder(cmdBuilder->subarrayStart("optimes"));
- {
- for (SlaveInfoVector::const_iterator itr = _slaveInfo.begin();
- itr != _slaveInfo.end(); ++itr) {
- if (itr->opTime.isNull()) {
- // Don't include info on members we haven't heard from yet.
- continue;
- }
- BSONObjBuilder entry(arrayBuilder.subobjStart());
- entry.append("_id", itr->rid);
- entry.append("optime", itr->opTime.getTimestamp());
- entry.append("memberId", itr->memberId);
- entry.append("cfgver", _rsConfig.getConfigVersion());
- // SERVER-14550 Even though the "config" field isn't used on the other end in 3.0,
- // we need to keep sending it for 2.6 compatibility.
- // TODO(spencer): Remove this after 3.0 is released.
- const MemberConfig* member = _rsConfig.findMemberByID(itr->memberId);
- fassert(18651, member);
- entry.append("config", member->toBSON(_rsConfig.getTagConfig()));
- }
- }
+ if (_settings.master) {
+ // if running with --master --slave, allow.
return true;
}
- Status ReplicationCoordinatorImpl::processReplSetGetStatus(BSONObjBuilder* response) {
- Status result(ErrorCodes::InternalError, "didn't set status in prepareStatusResponse");
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&TopologyCoordinator::prepareStatusResponse,
- _topCoord.get(),
- stdx::placeholders::_1,
- _replExecutor.now(),
- time(0) - serverGlobalParams.started,
- getMyLastOptime(),
- response,
- &result));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
- }
- fassert(18640, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
+ return false;
+}
- return result;
+bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase(StringData dbName) {
+ // _canAcceptNonLocalWrites is always true for standalone nodes, always false for nodes
+ // started with --slave, and adjusted based on primary+drain state in replica sets.
+ //
+ // That is, stand-alone nodes, non-slave nodes and drained replica set primaries can always
+ // accept writes. Similarly, writes are always permitted to the "local" database. Finally,
+ // in the event that a node is started with --slave and --master, we allow writes unless the
+ // master/slave system has set the replAllDead flag.
+ if (_canAcceptNonLocalWrites) {
+ return true;
}
-
- void ReplicationCoordinatorImpl::fillIsMasterForReplSet(IsMasterResponse* response) {
- invariant(getSettings().usingReplSets());
-
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_fillIsMasterForReplSet_finish,
- this,
- stdx::placeholders::_1,
- response));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- response->markAsShutdownInProgress();
- return;
- }
- fassert(28602, cbh.getStatus());
-
- _replExecutor.wait(cbh.getValue());
- if (isWaitingForApplierToDrain()) {
- // Report that we are secondary to ismaster callers until drain completes.
- response->setIsMaster(false);
- response->setIsSecondary(true);
- }
+ if (dbName == "local") {
+ return true;
}
+ return !replAllDead && _settings.master;
+}
- void ReplicationCoordinatorImpl::_fillIsMasterForReplSet_finish(
- const ReplicationExecutor::CallbackArgs& cbData, IsMasterResponse* response) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- response->markAsShutdownInProgress();
- return;
- }
- _topCoord->fillIsMasterForReplSet(response);
+bool ReplicationCoordinatorImpl::canAcceptWritesFor(const NamespaceString& ns) {
+ if (_memberState.rollback() && ns.isOplog()) {
+ return false;
}
+ StringData dbName = ns.db();
+ return canAcceptWritesForDatabase(dbName);
+}
- void ReplicationCoordinatorImpl::appendSlaveInfoData(BSONObjBuilder* result) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- BSONArrayBuilder replicationProgress(result->subarrayStart("replicationProgress"));
- {
- for (SlaveInfoVector::const_iterator itr = _slaveInfo.begin();
- itr != _slaveInfo.end(); ++itr) {
- BSONObjBuilder entry(replicationProgress.subobjStart());
- entry.append("rid", itr->rid);
- // TODO(siyuan) Output term of OpTime
- entry.append("optime", itr->opTime.getTimestamp());
- entry.append("host", itr->hostAndPort.toString());
- if (getReplicationMode() == modeReplSet) {
- if (_selfIndex == -1) {
- continue;
- }
- invariant(itr->memberId >= 0);
- entry.append("memberId", itr->memberId);
- }
- }
- }
+Status ReplicationCoordinatorImpl::checkCanServeReadsFor(OperationContext* txn,
+ const NamespaceString& ns,
+ bool slaveOk) {
+ if (_memberState.rollback() && ns.isOplog()) {
+ return Status(ErrorCodes::NotMasterOrSecondaryCode,
+ "cannot read from oplog collection while in rollback");
}
-
- ReplicaSetConfig ReplicationCoordinatorImpl::getConfig() const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- return _rsConfig;
+ if (txn->getClient()->isInDirectClient()) {
+ return Status::OK();
}
-
- void ReplicationCoordinatorImpl::processReplSetGetConfig(BSONObjBuilder* result) {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- result->append("config", _rsConfig.toBSON());
+ if (canAcceptWritesFor(ns)) {
+ return Status::OK();
}
-
- bool ReplicationCoordinatorImpl::getMaintenanceMode() {
- bool maintenanceMode(false);
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_getMaintenanceMode_helper,
- this,
- stdx::placeholders::_1,
- &maintenanceMode));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return false;
- }
- fassert(18811, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- return maintenanceMode;
+ if (_settings.slave || _settings.master) {
+ return Status::OK();
}
-
- void ReplicationCoordinatorImpl::_getMaintenanceMode_helper(
- const ReplicationExecutor::CallbackArgs& cbData,
- bool* maintenanceMode) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- return;
+ if (slaveOk) {
+ if (_canServeNonLocalReads.loadRelaxed()) {
+ return Status::OK();
}
- *maintenanceMode = _topCoord->getMaintenanceCount() > 0;
+ return Status(ErrorCodes::NotMasterOrSecondaryCode,
+ "not master or secondary; cannot currently read from this replSet member");
}
+ return Status(ErrorCodes::NotMasterNoSlaveOkCode, "not master and slaveOk=false");
+}
- Status ReplicationCoordinatorImpl::setMaintenanceMode(bool activate) {
- if (getReplicationMode() != modeReplSet) {
- return Status(ErrorCodes::NoReplicationEnabled,
- "can only set maintenance mode on replica set members");
- }
+bool ReplicationCoordinatorImpl::isInPrimaryOrSecondaryState() const {
+ return _canServeNonLocalReads.loadRelaxed();
+}
- Status result(ErrorCodes::InternalError, "didn't set status in _setMaintenanceMode_helper");
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_setMaintenanceMode_helper,
- this,
- stdx::placeholders::_1,
- activate,
- &result));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return cbh.getStatus();
- }
- fassert(18698, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- return result;
+bool ReplicationCoordinatorImpl::shouldIgnoreUniqueIndex(const IndexDescriptor* idx) {
+ if (!idx->unique()) {
+ return false;
}
-
- void ReplicationCoordinatorImpl::_setMaintenanceMode_helper(
- const ReplicationExecutor::CallbackArgs& cbData,
- bool activate,
- Status* result) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- *result = Status(ErrorCodes::ShutdownInProgress, "replication system is shutting down");
- return;
- }
-
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- if (_getMemberState_inlock().primary()) {
- *result = Status(ErrorCodes::NotSecondary, "primaries can't modify maintenance mode");
- return;
- }
-
- int curMaintenanceCalls = _topCoord->getMaintenanceCount();
- if (activate) {
- log() << "going into maintenance mode with " << curMaintenanceCalls
- << " other maintenance mode tasks in progress" << rsLog;
- _topCoord->adjustMaintenanceCountBy(1);
- }
- else if (curMaintenanceCalls > 0) {
- invariant(_topCoord->getRole() == TopologyCoordinator::Role::follower);
-
- _topCoord->adjustMaintenanceCountBy(-1);
-
- log() << "leaving maintenance mode (" << curMaintenanceCalls-1
- << " other maintenance mode tasks ongoing)" << rsLog;
- } else {
- warning() << "Attempted to leave maintenance mode but it is not currently active";
- *result = Status(ErrorCodes::OperationFailed, "already out of maintenance mode");
- return;
- }
-
- const PostMemberStateUpdateAction action =
- _updateMemberStateFromTopologyCoordinator_inlock();
- *result = Status::OK();
- lk.unlock();
- _performPostMemberStateUpdateAction(action);
+ // Never ignore _id index
+ if (idx->isIdIndex()) {
+ return false;
}
-
- Status ReplicationCoordinatorImpl::processReplSetSyncFrom(const HostAndPort& target,
- BSONObjBuilder* resultObj) {
- Status result(ErrorCodes::InternalError, "didn't set status in prepareSyncFromResponse");
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&TopologyCoordinator::prepareSyncFromResponse,
- _topCoord.get(),
- stdx::placeholders::_1,
- target,
- _getMyLastOptime_inlock(),
- resultObj,
- &result));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
- }
- fassert(18649, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- return result;
- }
-
- Status ReplicationCoordinatorImpl::processReplSetFreeze(int secs, BSONObjBuilder* resultObj) {
- Status result(ErrorCodes::InternalError, "didn't set status in prepareFreezeResponse");
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_processReplSetFreeze_finish,
- this,
- stdx::placeholders::_1,
- secs,
- resultObj,
- &result));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return cbh.getStatus();
- }
- fassert(18641, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- return result;
- }
-
- void ReplicationCoordinatorImpl::_processReplSetFreeze_finish(
- const ReplicationExecutor::CallbackArgs& cbData,
- int secs,
- BSONObjBuilder* response,
- Status* result) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- *result = Status(ErrorCodes::ShutdownInProgress, "replication system is shutting down");
- return;
- }
-
- _topCoord->prepareFreezeResponse(_replExecutor.now(), secs, response);
-
- if (_topCoord->getRole() == TopologyCoordinator::Role::candidate) {
- // If we just unfroze and ended our stepdown period and we are a one node replica set,
- // the topology coordinator will have gone into the candidate role to signal that we
- // need to elect ourself.
- _performPostMemberStateUpdateAction(kActionWinElection);
- }
- *result = Status::OK();
+ if (nsToDatabaseSubstring(idx->parentNS()) == "local") {
+ // always enforce on local
+ return false;
}
-
- Status ReplicationCoordinatorImpl::processHeartbeat(const ReplSetHeartbeatArgs& args,
- ReplSetHeartbeatResponse* response) {
- {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- if (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
- return Status(ErrorCodes::NotYetInitialized,
- "Received heartbeat while still initializing replication system");
- }
- }
-
- Status result(ErrorCodes::InternalError, "didn't set status in prepareHeartbeatResponse");
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_processHeartbeatFinish,
- this,
- stdx::placeholders::_1,
- args,
- response,
- &result));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
- }
- fassert(18508, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- return result;
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ if (getReplicationMode() != modeReplSet) {
+ return false;
}
-
- void ReplicationCoordinatorImpl::_processHeartbeatFinish(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplSetHeartbeatArgs& args,
- ReplSetHeartbeatResponse* response,
- Status* outStatus) {
-
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- *outStatus = Status(ErrorCodes::ShutdownInProgress, "Replication shutdown in progress");
- return;
- }
- fassert(18910, cbData.status);
- const Date_t now = _replExecutor.now();
- *outStatus = _topCoord->prepareHeartbeatResponse(
- now,
- args,
- _settings.ourSetName(),
- getMyLastOptime(),
- response);
- if ((outStatus->isOK() || *outStatus == ErrorCodes::InvalidReplicaSetConfig) &&
- _selfIndex < 0) {
- // If this node does not belong to the configuration it knows about, send heartbeats
- // back to any node that sends us a heartbeat, in case one of those remote nodes has
- // a configuration that contains us. Chances are excellent that it will, since that
- // is the only reason for a remote node to send this node a heartbeat request.
- if (!args.getSenderHost().empty() && _seedList.insert(args.getSenderHost()).second) {
- _scheduleHeartbeatToTarget(args.getSenderHost(), -1, now);
+ // see SERVER-6671
+ MemberState ms = _getMemberState_inlock();
+ switch (ms.s) {
+ case MemberState::RS_SECONDARY:
+ case MemberState::RS_RECOVERING:
+ case MemberState::RS_ROLLBACK:
+ case MemberState::RS_STARTUP2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+OID ReplicationCoordinatorImpl::getElectionId() {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ return _electionId;
+}
+
+OID ReplicationCoordinatorImpl::getMyRID() const {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ return _getMyRID_inlock();
+}
+
+OID ReplicationCoordinatorImpl::_getMyRID_inlock() const {
+ return _myRID;
+}
+
+int ReplicationCoordinatorImpl::getMyId() const {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ return _getMyId_inlock();
+}
+
+int ReplicationCoordinatorImpl::_getMyId_inlock() const {
+ const MemberConfig& self = _rsConfig.getMemberAt(_selfIndex);
+ return self.getId();
+}
+
+bool ReplicationCoordinatorImpl::prepareReplSetUpdatePositionCommand(BSONObjBuilder* cmdBuilder) {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ invariant(_rsConfig.isInitialized());
+ // do not send updates if we have been removed from the config
+ if (_selfIndex == -1) {
+ return false;
+ }
+ cmdBuilder->append("replSetUpdatePosition", 1);
+ // create an array containing objects each member connected to us and for ourself
+ BSONArrayBuilder arrayBuilder(cmdBuilder->subarrayStart("optimes"));
+ {
+ for (SlaveInfoVector::const_iterator itr = _slaveInfo.begin(); itr != _slaveInfo.end();
+ ++itr) {
+ if (itr->opTime.isNull()) {
+ // Don't include info on members we haven't heard from yet.
+ continue;
+ }
+ BSONObjBuilder entry(arrayBuilder.subobjStart());
+ entry.append("_id", itr->rid);
+ entry.append("optime", itr->opTime.getTimestamp());
+ entry.append("memberId", itr->memberId);
+ entry.append("cfgver", _rsConfig.getConfigVersion());
+ // SERVER-14550 Even though the "config" field isn't used on the other end in 3.0,
+ // we need to keep sending it for 2.6 compatibility.
+ // TODO(spencer): Remove this after 3.0 is released.
+ const MemberConfig* member = _rsConfig.findMemberByID(itr->memberId);
+ fassert(18651, member);
+ entry.append("config", member->toBSON(_rsConfig.getTagConfig()));
+ }
+ }
+ return true;
+}
+
+Status ReplicationCoordinatorImpl::processReplSetGetStatus(BSONObjBuilder* response) {
+ Status result(ErrorCodes::InternalError, "didn't set status in prepareStatusResponse");
+ CBHStatus cbh =
+ _replExecutor.scheduleWork(stdx::bind(&TopologyCoordinator::prepareStatusResponse,
+ _topCoord.get(),
+ stdx::placeholders::_1,
+ _replExecutor.now(),
+ time(0) - serverGlobalParams.started,
+ getMyLastOptime(),
+ response,
+ &result));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
+ }
+ fassert(18640, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+
+ return result;
+}
+
+void ReplicationCoordinatorImpl::fillIsMasterForReplSet(IsMasterResponse* response) {
+ invariant(getSettings().usingReplSets());
+
+ CBHStatus cbh = _replExecutor.scheduleWork(
+ stdx::bind(&ReplicationCoordinatorImpl::_fillIsMasterForReplSet_finish,
+ this,
+ stdx::placeholders::_1,
+ response));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ response->markAsShutdownInProgress();
+ return;
+ }
+ fassert(28602, cbh.getStatus());
+
+ _replExecutor.wait(cbh.getValue());
+ if (isWaitingForApplierToDrain()) {
+ // Report that we are secondary to ismaster callers until drain completes.
+ response->setIsMaster(false);
+ response->setIsSecondary(true);
+ }
+}
+
+void ReplicationCoordinatorImpl::_fillIsMasterForReplSet_finish(
+ const ReplicationExecutor::CallbackArgs& cbData, IsMasterResponse* response) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ response->markAsShutdownInProgress();
+ return;
+ }
+ _topCoord->fillIsMasterForReplSet(response);
+}
+
+void ReplicationCoordinatorImpl::appendSlaveInfoData(BSONObjBuilder* result) {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ BSONArrayBuilder replicationProgress(result->subarrayStart("replicationProgress"));
+ {
+ for (SlaveInfoVector::const_iterator itr = _slaveInfo.begin(); itr != _slaveInfo.end();
+ ++itr) {
+ BSONObjBuilder entry(replicationProgress.subobjStart());
+ entry.append("rid", itr->rid);
+ // TODO(siyuan) Output term of OpTime
+ entry.append("optime", itr->opTime.getTimestamp());
+ entry.append("host", itr->hostAndPort.toString());
+ if (getReplicationMode() == modeReplSet) {
+ if (_selfIndex == -1) {
+ continue;
+ }
+ invariant(itr->memberId >= 0);
+ entry.append("memberId", itr->memberId);
}
}
}
+}
- Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* txn,
- const ReplSetReconfigArgs& args,
- BSONObjBuilder* resultObj) {
-
- log() << "replSetReconfig admin command received from client";
+ReplicaSetConfig ReplicationCoordinatorImpl::getConfig() const {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ return _rsConfig;
+}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+void ReplicationCoordinatorImpl::processReplSetGetConfig(BSONObjBuilder* result) {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ result->append("config", _rsConfig.toBSON());
+}
- while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
- _rsConfigStateChange.wait(lk);
- }
-
- switch (_rsConfigState) {
+bool ReplicationCoordinatorImpl::getMaintenanceMode() {
+ bool maintenanceMode(false);
+ CBHStatus cbh = _replExecutor.scheduleWork(
+ stdx::bind(&ReplicationCoordinatorImpl::_getMaintenanceMode_helper,
+ this,
+ stdx::placeholders::_1,
+ &maintenanceMode));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return false;
+ }
+ fassert(18811, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ return maintenanceMode;
+}
+
+void ReplicationCoordinatorImpl::_getMaintenanceMode_helper(
+ const ReplicationExecutor::CallbackArgs& cbData, bool* maintenanceMode) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ *maintenanceMode = _topCoord->getMaintenanceCount() > 0;
+}
+
+Status ReplicationCoordinatorImpl::setMaintenanceMode(bool activate) {
+ if (getReplicationMode() != modeReplSet) {
+ return Status(ErrorCodes::NoReplicationEnabled,
+ "can only set maintenance mode on replica set members");
+ }
+
+ Status result(ErrorCodes::InternalError, "didn't set status in _setMaintenanceMode_helper");
+ CBHStatus cbh = _replExecutor.scheduleWork(
+ stdx::bind(&ReplicationCoordinatorImpl::_setMaintenanceMode_helper,
+ this,
+ stdx::placeholders::_1,
+ activate,
+ &result));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return cbh.getStatus();
+ }
+ fassert(18698, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ return result;
+}
+
+void ReplicationCoordinatorImpl::_setMaintenanceMode_helper(
+ const ReplicationExecutor::CallbackArgs& cbData, bool activate, Status* result) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ *result = Status(ErrorCodes::ShutdownInProgress, "replication system is shutting down");
+ return;
+ }
+
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ if (_getMemberState_inlock().primary()) {
+ *result = Status(ErrorCodes::NotSecondary, "primaries can't modify maintenance mode");
+ return;
+ }
+
+ int curMaintenanceCalls = _topCoord->getMaintenanceCount();
+ if (activate) {
+ log() << "going into maintenance mode with " << curMaintenanceCalls
+ << " other maintenance mode tasks in progress" << rsLog;
+ _topCoord->adjustMaintenanceCountBy(1);
+ } else if (curMaintenanceCalls > 0) {
+ invariant(_topCoord->getRole() == TopologyCoordinator::Role::follower);
+
+ _topCoord->adjustMaintenanceCountBy(-1);
+
+ log() << "leaving maintenance mode (" << curMaintenanceCalls - 1
+ << " other maintenance mode tasks ongoing)" << rsLog;
+ } else {
+ warning() << "Attempted to leave maintenance mode but it is not currently active";
+ *result = Status(ErrorCodes::OperationFailed, "already out of maintenance mode");
+ return;
+ }
+
+ const PostMemberStateUpdateAction action = _updateMemberStateFromTopologyCoordinator_inlock();
+ *result = Status::OK();
+ lk.unlock();
+ _performPostMemberStateUpdateAction(action);
+}
+
+Status ReplicationCoordinatorImpl::processReplSetSyncFrom(const HostAndPort& target,
+ BSONObjBuilder* resultObj) {
+ Status result(ErrorCodes::InternalError, "didn't set status in prepareSyncFromResponse");
+ CBHStatus cbh =
+ _replExecutor.scheduleWork(stdx::bind(&TopologyCoordinator::prepareSyncFromResponse,
+ _topCoord.get(),
+ stdx::placeholders::_1,
+ target,
+ _getMyLastOptime_inlock(),
+ resultObj,
+ &result));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
+ }
+ fassert(18649, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ return result;
+}
+
+Status ReplicationCoordinatorImpl::processReplSetFreeze(int secs, BSONObjBuilder* resultObj) {
+ Status result(ErrorCodes::InternalError, "didn't set status in prepareFreezeResponse");
+ CBHStatus cbh = _replExecutor.scheduleWork(
+ stdx::bind(&ReplicationCoordinatorImpl::_processReplSetFreeze_finish,
+ this,
+ stdx::placeholders::_1,
+ secs,
+ resultObj,
+ &result));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return cbh.getStatus();
+ }
+ fassert(18641, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ return result;
+}
+
+void ReplicationCoordinatorImpl::_processReplSetFreeze_finish(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ int secs,
+ BSONObjBuilder* response,
+ Status* result) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ *result = Status(ErrorCodes::ShutdownInProgress, "replication system is shutting down");
+ return;
+ }
+
+ _topCoord->prepareFreezeResponse(_replExecutor.now(), secs, response);
+
+ if (_topCoord->getRole() == TopologyCoordinator::Role::candidate) {
+ // If we just unfroze and ended our stepdown period and we are a one node replica set,
+ // the topology coordinator will have gone into the candidate role to signal that we
+ // need to elect ourself.
+ _performPostMemberStateUpdateAction(kActionWinElection);
+ }
+ *result = Status::OK();
+}
+
+Status ReplicationCoordinatorImpl::processHeartbeat(const ReplSetHeartbeatArgs& args,
+ ReplSetHeartbeatResponse* response) {
+ {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ if (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
+ return Status(ErrorCodes::NotYetInitialized,
+ "Received heartbeat while still initializing replication system");
+ }
+ }
+
+ Status result(ErrorCodes::InternalError, "didn't set status in prepareHeartbeatResponse");
+ CBHStatus cbh =
+ _replExecutor.scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_processHeartbeatFinish,
+ this,
+ stdx::placeholders::_1,
+ args,
+ response,
+ &result));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
+ }
+ fassert(18508, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ return result;
+}
+
+void ReplicationCoordinatorImpl::_processHeartbeatFinish(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplSetHeartbeatArgs& args,
+ ReplSetHeartbeatResponse* response,
+ Status* outStatus) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ *outStatus = Status(ErrorCodes::ShutdownInProgress, "Replication shutdown in progress");
+ return;
+ }
+ fassert(18910, cbData.status);
+ const Date_t now = _replExecutor.now();
+ *outStatus = _topCoord->prepareHeartbeatResponse(
+ now, args, _settings.ourSetName(), getMyLastOptime(), response);
+ if ((outStatus->isOK() || *outStatus == ErrorCodes::InvalidReplicaSetConfig) &&
+ _selfIndex < 0) {
+ // If this node does not belong to the configuration it knows about, send heartbeats
+ // back to any node that sends us a heartbeat, in case one of those remote nodes has
+ // a configuration that contains us. Chances are excellent that it will, since that
+ // is the only reason for a remote node to send this node a heartbeat request.
+ if (!args.getSenderHost().empty() && _seedList.insert(args.getSenderHost()).second) {
+ _scheduleHeartbeatToTarget(args.getSenderHost(), -1, now);
+ }
+ }
+}
+
+Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* txn,
+ const ReplSetReconfigArgs& args,
+ BSONObjBuilder* resultObj) {
+ log() << "replSetReconfig admin command received from client";
+
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+
+ while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
+ _rsConfigStateChange.wait(lk);
+ }
+
+ switch (_rsConfigState) {
case kConfigSteady:
break;
case kConfigUninitialized:
return Status(ErrorCodes::NotYetInitialized,
"Node not yet initialized; use the replSetInitiate command");
case kConfigReplicationDisabled:
- invariant(false); // should be unreachable due to !_settings.usingReplSets() check above
+ invariant(
+ false); // should be unreachable due to !_settings.usingReplSets() check above
case kConfigInitiating:
case kConfigReconfiguring:
case kConfigHBReconfiguring:
@@ -1811,283 +1741,264 @@ namespace {
default:
severe() << "Unexpected _rsConfigState " << int(_rsConfigState);
fassertFailed(18914);
- }
-
- invariant(_rsConfig.isInitialized());
-
- if (!args.force && !_getMemberState_inlock().primary()) {
- return Status(ErrorCodes::NotMaster, str::stream() <<
- "replSetReconfig should only be run on PRIMARY, but my state is " <<
- _getMemberState_inlock().toString() <<
- "; use the \"force\" argument to override");
- }
-
- _setConfigState_inlock(kConfigReconfiguring);
- ScopeGuard configStateGuard = MakeGuard(
- lockAndCall,
- &lk,
- stdx::bind(&ReplicationCoordinatorImpl::_setConfigState_inlock,
- this,
- kConfigSteady));
-
- ReplicaSetConfig oldConfig = _rsConfig;
- lk.unlock();
-
- ReplicaSetConfig newConfig;
- BSONObj newConfigObj = args.newConfigObj;
- if (args.force) {
- newConfigObj = incrementConfigVersionByRandom(newConfigObj);
- }
- Status status = newConfig.initialize(newConfigObj);
- if (!status.isOK()) {
- error() << "replSetReconfig got " << status << " while parsing " << newConfigObj;
- return Status(ErrorCodes::InvalidReplicaSetConfig, status.reason());;
- }
- if (newConfig.getReplSetName() != _settings.ourSetName()) {
- str::stream errmsg;
- errmsg << "Attempting to reconfigure a replica set with name " <<
- newConfig.getReplSetName() << ", but command line reports " <<
- _settings.ourSetName() << "; rejecting";
- error() << std::string(errmsg);
- return Status(ErrorCodes::InvalidReplicaSetConfig, errmsg);
- }
-
- StatusWith<int> myIndex = validateConfigForReconfig(
- _externalState.get(),
- oldConfig,
- newConfig,
- args.force);
- if (!myIndex.isOK()) {
- error() << "replSetReconfig got " << myIndex.getStatus() << " while validating " <<
- newConfigObj;
- return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- myIndex.getStatus().reason());
- }
-
- log() << "replSetReconfig config object with " << newConfig.getNumMembers() <<
- " members parses ok";
-
- if (!args.force) {
- status = checkQuorumForReconfig(&_replExecutor,
- newConfig,
- myIndex.getValue());
- if (!status.isOK()) {
- error() << "replSetReconfig failed; " << status;
- return status;
- }
- }
-
- status = _externalState->storeLocalConfigDocument(txn, newConfig.toBSON());
- if (!status.isOK()) {
- error() << "replSetReconfig failed to store config document; " << status;
- return status;
- }
-
- const stdx::function<void (const ReplicationExecutor::CallbackArgs&)> reconfigFinishFn(
- stdx::bind(&ReplicationCoordinatorImpl::_finishReplSetReconfig,
- this,
- stdx::placeholders::_1,
- newConfig,
- myIndex.getValue()));
-
- // If it's a force reconfig, the primary node may not be electable after the configuration
- // change. In case we are that primary node, finish the reconfig under the global lock,
- // so that the step down occurs safely.
- CBHStatus cbh =
- args.force ?
- _replExecutor.scheduleWorkWithGlobalExclusiveLock(reconfigFinishFn) :
- _replExecutor.scheduleWork(reconfigFinishFn);
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return status;
- }
- fassert(18824, cbh.getStatus());
- configStateGuard.Dismiss();
- _replExecutor.wait(cbh.getValue());
- return Status::OK();
}
- void ReplicationCoordinatorImpl::_finishReplSetReconfig(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicaSetConfig& newConfig,
- int myIndex) {
+ invariant(_rsConfig.isInitialized());
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- invariant(_rsConfigState == kConfigReconfiguring);
- invariant(_rsConfig.isInitialized());
- const PostMemberStateUpdateAction action = _setCurrentRSConfig_inlock(newConfig, myIndex);
- lk.unlock();
- _performPostMemberStateUpdateAction(action);
+ if (!args.force && !_getMemberState_inlock().primary()) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream()
+ << "replSetReconfig should only be run on PRIMARY, but my state is "
+ << _getMemberState_inlock().toString()
+ << "; use the \"force\" argument to override");
}
- Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* txn,
- const BSONObj& configObj,
- BSONObjBuilder* resultObj) {
- log() << "replSetInitiate admin command received from client";
+ _setConfigState_inlock(kConfigReconfiguring);
+ ScopeGuard configStateGuard = MakeGuard(
+ lockAndCall,
+ &lk,
+ stdx::bind(&ReplicationCoordinatorImpl::_setConfigState_inlock, this, kConfigSteady));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- if (!_settings.usingReplSets()) {
- return Status(ErrorCodes::NoReplicationEnabled, "server is not running with --replSet");
- }
-
- while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
- _rsConfigStateChange.wait(lk);
- }
+ ReplicaSetConfig oldConfig = _rsConfig;
+ lk.unlock();
- if (_rsConfigState != kConfigUninitialized) {
- resultObj->append("info",
- "try querying local.system.replset to see current configuration");
- return Status(ErrorCodes::AlreadyInitialized, "already initialized");
- }
- invariant(!_rsConfig.isInitialized());
- _setConfigState_inlock(kConfigInitiating);
- ScopeGuard configStateGuard = MakeGuard(
- lockAndCall,
- &lk,
- stdx::bind(&ReplicationCoordinatorImpl::_setConfigState_inlock,
- this,
- kConfigUninitialized));
- lk.unlock();
-
- ReplicaSetConfig newConfig;
- Status status = newConfig.initialize(configObj);
- if (!status.isOK()) {
- error() << "replSet initiate got " << status << " while parsing " << configObj;
- return Status(ErrorCodes::InvalidReplicaSetConfig, status.reason());;
- }
- if (newConfig.getReplSetName() != _settings.ourSetName()) {
- str::stream errmsg;
- errmsg << "Attempting to initiate a replica set with name " <<
- newConfig.getReplSetName() << ", but command line reports " <<
- _settings.ourSetName() << "; rejecting";
- error() << std::string(errmsg);
- return Status(ErrorCodes::InvalidReplicaSetConfig, errmsg);
- }
-
- StatusWith<int> myIndex = validateConfigForInitiate(_externalState.get(), newConfig);
- if (!myIndex.isOK()) {
- error() << "replSet initiate got " << myIndex.getStatus() << " while validating " <<
- configObj;
- return Status(ErrorCodes::InvalidReplicaSetConfig, myIndex.getStatus().reason());
- }
-
- log() << "replSetInitiate config object with " << newConfig.getNumMembers() <<
- " members parses ok";
+ ReplicaSetConfig newConfig;
+ BSONObj newConfigObj = args.newConfigObj;
+ if (args.force) {
+ newConfigObj = incrementConfigVersionByRandom(newConfigObj);
+ }
+ Status status = newConfig.initialize(newConfigObj);
+ if (!status.isOK()) {
+ error() << "replSetReconfig got " << status << " while parsing " << newConfigObj;
+ return Status(ErrorCodes::InvalidReplicaSetConfig, status.reason());
+ ;
+ }
+ if (newConfig.getReplSetName() != _settings.ourSetName()) {
+ str::stream errmsg;
+ errmsg << "Attempting to reconfigure a replica set with name " << newConfig.getReplSetName()
+ << ", but command line reports " << _settings.ourSetName() << "; rejecting";
+ error() << std::string(errmsg);
+ return Status(ErrorCodes::InvalidReplicaSetConfig, errmsg);
+ }
- status = checkQuorumForInitiate(
- &_replExecutor,
- newConfig,
- myIndex.getValue());
+ StatusWith<int> myIndex =
+ validateConfigForReconfig(_externalState.get(), oldConfig, newConfig, args.force);
+ if (!myIndex.isOK()) {
+ error() << "replSetReconfig got " << myIndex.getStatus() << " while validating "
+ << newConfigObj;
+ return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ myIndex.getStatus().reason());
+ }
- if (!status.isOK()) {
- error() << "replSetInitiate failed; " << status;
- return status;
- }
+ log() << "replSetReconfig config object with " << newConfig.getNumMembers()
+ << " members parses ok";
- status = _externalState->storeLocalConfigDocument(txn, newConfig.toBSON());
+ if (!args.force) {
+ status = checkQuorumForReconfig(&_replExecutor, newConfig, myIndex.getValue());
if (!status.isOK()) {
- error() << "replSetInitiate failed to store config document; " << status;
+ error() << "replSetReconfig failed; " << status;
return status;
}
+ }
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_finishReplSetInitiate,
- this,
- stdx::placeholders::_1,
- newConfig,
- myIndex.getValue()));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return status;
- }
- configStateGuard.Dismiss();
- fassert(18654, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
-
- if (status.isOK()) {
- // Create the oplog with the first entry, and start repl threads.
- _externalState->initiateOplog(txn);
- _externalState->startThreads();
- }
+ status = _externalState->storeLocalConfigDocument(txn, newConfig.toBSON());
+ if (!status.isOK()) {
+ error() << "replSetReconfig failed to store config document; " << status;
return status;
}
- void ReplicationCoordinatorImpl::_finishReplSetInitiate(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicaSetConfig& newConfig,
- int myIndex) {
+ const stdx::function<void(const ReplicationExecutor::CallbackArgs&)> reconfigFinishFn(
+ stdx::bind(&ReplicationCoordinatorImpl::_finishReplSetReconfig,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ myIndex.getValue()));
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- invariant(_rsConfigState == kConfigInitiating);
- invariant(!_rsConfig.isInitialized());
- const PostMemberStateUpdateAction action = _setCurrentRSConfig_inlock(newConfig, myIndex);
- lk.unlock();
- _performPostMemberStateUpdateAction(action);
+ // If it's a force reconfig, the primary node may not be electable after the configuration
+ // change. In case we are that primary node, finish the reconfig under the global lock,
+ // so that the step down occurs safely.
+ CBHStatus cbh = args.force ? _replExecutor.scheduleWorkWithGlobalExclusiveLock(reconfigFinishFn)
+ : _replExecutor.scheduleWork(reconfigFinishFn);
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return status;
+ }
+ fassert(18824, cbh.getStatus());
+ configStateGuard.Dismiss();
+ _replExecutor.wait(cbh.getValue());
+ return Status::OK();
+}
+
+void ReplicationCoordinatorImpl::_finishReplSetReconfig(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicaSetConfig& newConfig,
+ int myIndex) {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ invariant(_rsConfigState == kConfigReconfiguring);
+ invariant(_rsConfig.isInitialized());
+ const PostMemberStateUpdateAction action = _setCurrentRSConfig_inlock(newConfig, myIndex);
+ lk.unlock();
+ _performPostMemberStateUpdateAction(action);
+}
+
+Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* txn,
+ const BSONObj& configObj,
+ BSONObjBuilder* resultObj) {
+ log() << "replSetInitiate admin command received from client";
+
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ if (!_settings.usingReplSets()) {
+ return Status(ErrorCodes::NoReplicationEnabled, "server is not running with --replSet");
+ }
+
+ while (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
+ _rsConfigStateChange.wait(lk);
+ }
+
+ if (_rsConfigState != kConfigUninitialized) {
+ resultObj->append("info", "try querying local.system.replset to see current configuration");
+ return Status(ErrorCodes::AlreadyInitialized, "already initialized");
+ }
+ invariant(!_rsConfig.isInitialized());
+ _setConfigState_inlock(kConfigInitiating);
+ ScopeGuard configStateGuard = MakeGuard(
+ lockAndCall,
+ &lk,
+ stdx::bind(
+ &ReplicationCoordinatorImpl::_setConfigState_inlock, this, kConfigUninitialized));
+ lk.unlock();
+
+ ReplicaSetConfig newConfig;
+ Status status = newConfig.initialize(configObj);
+ if (!status.isOK()) {
+ error() << "replSet initiate got " << status << " while parsing " << configObj;
+ return Status(ErrorCodes::InvalidReplicaSetConfig, status.reason());
+ ;
+ }
+ if (newConfig.getReplSetName() != _settings.ourSetName()) {
+ str::stream errmsg;
+ errmsg << "Attempting to initiate a replica set with name " << newConfig.getReplSetName()
+ << ", but command line reports " << _settings.ourSetName() << "; rejecting";
+ error() << std::string(errmsg);
+ return Status(ErrorCodes::InvalidReplicaSetConfig, errmsg);
+ }
+
+ StatusWith<int> myIndex = validateConfigForInitiate(_externalState.get(), newConfig);
+ if (!myIndex.isOK()) {
+ error() << "replSet initiate got " << myIndex.getStatus() << " while validating "
+ << configObj;
+ return Status(ErrorCodes::InvalidReplicaSetConfig, myIndex.getStatus().reason());
+ }
+
+ log() << "replSetInitiate config object with " << newConfig.getNumMembers()
+ << " members parses ok";
+
+ status = checkQuorumForInitiate(&_replExecutor, newConfig, myIndex.getValue());
+
+ if (!status.isOK()) {
+ error() << "replSetInitiate failed; " << status;
+ return status;
}
- void ReplicationCoordinatorImpl::_setConfigState_inlock(ConfigState newState) {
- if (newState != _rsConfigState) {
- _rsConfigState = newState;
- _rsConfigStateChange.notify_all();
- }
+ status = _externalState->storeLocalConfigDocument(txn, newConfig.toBSON());
+ if (!status.isOK()) {
+ error() << "replSetInitiate failed to store config document; " << status;
+ return status;
}
- ReplicationCoordinatorImpl::PostMemberStateUpdateAction
- ReplicationCoordinatorImpl::_updateMemberStateFromTopologyCoordinator_inlock() {
- const MemberState newState = _topCoord->getMemberState();
- if (newState == _memberState) {
- if (_topCoord->getRole() == TopologyCoordinator::Role::candidate) {
- invariant(_rsConfig.getNumMembers() == 1 &&
- _selfIndex == 0 &&
- _rsConfig.getMemberAt(0).isElectable());
- return kActionWinElection;
- }
- return kActionNone;
- }
+ CBHStatus cbh =
+ _replExecutor.scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_finishReplSetInitiate,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ myIndex.getValue()));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return status;
+ }
+ configStateGuard.Dismiss();
+ fassert(18654, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
- PostMemberStateUpdateAction result;
- if (_memberState.primary() || newState.removed() || newState.rollback()) {
- // Wake up any threads blocked in awaitReplication, close connections, etc.
- for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
- it != _replicationWaiterList.end(); ++it) {
- WaiterInfo* info = *it;
- info->master = false;
- info->condVar->notify_all();
- }
- _isWaitingForDrainToComplete = false;
- _canAcceptNonLocalWrites = false;
- result = kActionCloseAllConnections;
- }
- else {
- result = kActionFollowerModeStateChange;
+ if (status.isOK()) {
+ // Create the oplog with the first entry, and start repl threads.
+ _externalState->initiateOplog(txn);
+ _externalState->startThreads();
+ }
+ return status;
+}
+
+void ReplicationCoordinatorImpl::_finishReplSetInitiate(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicaSetConfig& newConfig,
+ int myIndex) {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ invariant(_rsConfigState == kConfigInitiating);
+ invariant(!_rsConfig.isInitialized());
+ const PostMemberStateUpdateAction action = _setCurrentRSConfig_inlock(newConfig, myIndex);
+ lk.unlock();
+ _performPostMemberStateUpdateAction(action);
+}
+
+void ReplicationCoordinatorImpl::_setConfigState_inlock(ConfigState newState) {
+ if (newState != _rsConfigState) {
+ _rsConfigState = newState;
+ _rsConfigStateChange.notify_all();
+ }
+}
+
+ReplicationCoordinatorImpl::PostMemberStateUpdateAction
+ReplicationCoordinatorImpl::_updateMemberStateFromTopologyCoordinator_inlock() {
+ const MemberState newState = _topCoord->getMemberState();
+ if (newState == _memberState) {
+ if (_topCoord->getRole() == TopologyCoordinator::Role::candidate) {
+ invariant(_rsConfig.getNumMembers() == 1 && _selfIndex == 0 &&
+ _rsConfig.getMemberAt(0).isElectable());
+ return kActionWinElection;
}
+ return kActionNone;
+ }
- if (_memberState.secondary() && !newState.primary()) {
- // Switching out of SECONDARY, but not to PRIMARY.
- _canServeNonLocalReads.store(0U);
- }
- else if (!_memberState.primary() && newState.secondary()) {
- // Switching into SECONDARY, but not from PRIMARY.
- _canServeNonLocalReads.store(1U);
+ PostMemberStateUpdateAction result;
+ if (_memberState.primary() || newState.removed() || newState.rollback()) {
+ // Wake up any threads blocked in awaitReplication, close connections, etc.
+ for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
+ it != _replicationWaiterList.end();
+ ++it) {
+ WaiterInfo* info = *it;
+ info->master = false;
+ info->condVar->notify_all();
}
+ _isWaitingForDrainToComplete = false;
+ _canAcceptNonLocalWrites = false;
+ result = kActionCloseAllConnections;
+ } else {
+ result = kActionFollowerModeStateChange;
+ }
- if (newState.secondary() && _topCoord->getRole() == TopologyCoordinator::Role::candidate) {
- // When transitioning to SECONDARY, the only way for _topCoord to report the candidate
- // role is if the configuration represents a single-node replica set. In that case, the
- // overriding requirement is to elect this singleton node primary.
- invariant(_rsConfig.getNumMembers() == 1 &&
- _selfIndex == 0 &&
- _rsConfig.getMemberAt(0).isElectable());
- result = kActionWinElection;
- }
+ if (_memberState.secondary() && !newState.primary()) {
+ // Switching out of SECONDARY, but not to PRIMARY.
+ _canServeNonLocalReads.store(0U);
+ } else if (!_memberState.primary() && newState.secondary()) {
+ // Switching into SECONDARY, but not from PRIMARY.
+ _canServeNonLocalReads.store(1U);
+ }
- _memberState = newState;
- log() << "transition to " << newState.toString() << rsLog;
- return result;
+ if (newState.secondary() && _topCoord->getRole() == TopologyCoordinator::Role::candidate) {
+ // When transitioning to SECONDARY, the only way for _topCoord to report the candidate
+ // role is if the configuration represents a single-node replica set. In that case, the
+ // overriding requirement is to elect this singleton node primary.
+ invariant(_rsConfig.getNumMembers() == 1 && _selfIndex == 0 &&
+ _rsConfig.getMemberAt(0).isElectable());
+ result = kActionWinElection;
}
- void ReplicationCoordinatorImpl::_performPostMemberStateUpdateAction(
- PostMemberStateUpdateAction action) {
+ _memberState = newState;
+ log() << "transition to " << newState.toString() << rsLog;
+ return result;
+}
- switch (action) {
+void ReplicationCoordinatorImpl::_performPostMemberStateUpdateAction(
+ PostMemberStateUpdateAction action) {
+ switch (action) {
case kActionNone:
break;
case kActionFollowerModeStateChange:
@@ -2114,702 +2025,671 @@ namespace {
default:
severe() << "Unknown post member state update action " << static_cast<int>(action);
fassertFailed(26010);
- }
}
-
- Status ReplicationCoordinatorImpl::processReplSetGetRBID(BSONObjBuilder* resultObj) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- resultObj->append("rbid", _rbid);
- return Status::OK();
- }
-
- void ReplicationCoordinatorImpl::incrementRollbackID() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- ++_rbid;
- }
-
- Status ReplicationCoordinatorImpl::processReplSetFresh(const ReplSetFreshArgs& args,
- BSONObjBuilder* resultObj) {
-
- Status result(ErrorCodes::InternalError, "didn't set status in prepareFreshResponse");
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_processReplSetFresh_finish,
- this,
- stdx::placeholders::_1,
- args,
- resultObj,
- &result));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
- }
- fassert(18652, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- return result;
- }
-
- void ReplicationCoordinatorImpl::_processReplSetFresh_finish(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplSetFreshArgs& args,
- BSONObjBuilder* response,
- Status* result) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- *result = Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
- return;
+}
+
+Status ReplicationCoordinatorImpl::processReplSetGetRBID(BSONObjBuilder* resultObj) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ resultObj->append("rbid", _rbid);
+ return Status::OK();
+}
+
+void ReplicationCoordinatorImpl::incrementRollbackID() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ ++_rbid;
+}
+
+Status ReplicationCoordinatorImpl::processReplSetFresh(const ReplSetFreshArgs& args,
+ BSONObjBuilder* resultObj) {
+ Status result(ErrorCodes::InternalError, "didn't set status in prepareFreshResponse");
+ CBHStatus cbh = _replExecutor.scheduleWork(
+ stdx::bind(&ReplicationCoordinatorImpl::_processReplSetFresh_finish,
+ this,
+ stdx::placeholders::_1,
+ args,
+ resultObj,
+ &result));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
+ }
+ fassert(18652, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ return result;
+}
+
+void ReplicationCoordinatorImpl::_processReplSetFresh_finish(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplSetFreshArgs& args,
+ BSONObjBuilder* response,
+ Status* result) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ *result = Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
+ return;
+ }
+
+ _topCoord->prepareFreshResponse(args, _replExecutor.now(), getMyLastOptime(), response, result);
+}
+
+Status ReplicationCoordinatorImpl::processReplSetElect(const ReplSetElectArgs& args,
+ BSONObjBuilder* responseObj) {
+ Status result = Status(ErrorCodes::InternalError, "status not set by callback");
+ CBHStatus cbh = _replExecutor.scheduleWork(
+ stdx::bind(&ReplicationCoordinatorImpl::_processReplSetElect_finish,
+ this,
+ stdx::placeholders::_1,
+ args,
+ responseObj,
+ &result));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
+ }
+ fassert(18657, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ return result;
+}
+
+void ReplicationCoordinatorImpl::_processReplSetElect_finish(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplSetElectArgs& args,
+ BSONObjBuilder* response,
+ Status* result) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ *result = Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
+ return;
+ }
+
+ _topCoord->prepareElectResponse(args, _replExecutor.now(), getMyLastOptime(), response, result);
+}
+
+ReplicationCoordinatorImpl::PostMemberStateUpdateAction
+ReplicationCoordinatorImpl::_setCurrentRSConfig_inlock(const ReplicaSetConfig& newConfig,
+ int myIndex) {
+ invariant(_settings.usingReplSets());
+ _cancelHeartbeats();
+ _setConfigState_inlock(kConfigSteady);
+ // Must get this before changing our config.
+ OpTime myOptime = _getMyLastOptime_inlock();
+ _topCoord->updateConfig(newConfig, myIndex, _replExecutor.now(), myOptime);
+ _rsConfig = newConfig;
+ log() << "New replica set config in use: " << _rsConfig.toBSON() << rsLog;
+ _selfIndex = myIndex;
+ if (_selfIndex >= 0) {
+ log() << "This node is " << _rsConfig.getMemberAt(_selfIndex).getHostAndPort()
+ << " in the config";
+ } else {
+ log() << "This node is not a member of the config";
+ }
+
+ const PostMemberStateUpdateAction action = _updateMemberStateFromTopologyCoordinator_inlock();
+ _updateSlaveInfoFromConfig_inlock();
+ if (_selfIndex >= 0) {
+ // Don't send heartbeats if we're not in the config, if we get re-added one of the
+ // nodes in the set will contact us.
+ _startHeartbeats();
+ }
+ _wakeReadyWaiters_inlock();
+ return action;
+}
+
+void ReplicationCoordinatorImpl::_wakeReadyWaiters_inlock() {
+ for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
+ it != _replicationWaiterList.end();
+ ++it) {
+ WaiterInfo* info = *it;
+ if (_doneWaitingForReplication_inlock(*info->opTime, *info->writeConcern)) {
+ info->condVar->notify_all();
}
-
- _topCoord->prepareFreshResponse(
- args, _replExecutor.now(), getMyLastOptime(), response, result);
}
+}
- Status ReplicationCoordinatorImpl::processReplSetElect(const ReplSetElectArgs& args,
- BSONObjBuilder* responseObj) {
- Status result = Status(ErrorCodes::InternalError, "status not set by callback");
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_processReplSetElect_finish,
- this,
- stdx::placeholders::_1,
- args,
- responseObj,
- &result));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
- }
- fassert(18657, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- return result;
- }
-
- void ReplicationCoordinatorImpl::_processReplSetElect_finish(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplSetElectArgs& args,
- BSONObjBuilder* response,
- Status* result) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- *result = Status(ErrorCodes::ShutdownInProgress, "replication shutdown in progress");
- return;
- }
-
- _topCoord->prepareElectResponse(
- args, _replExecutor.now(), getMyLastOptime(), response, result);
- }
-
- ReplicationCoordinatorImpl::PostMemberStateUpdateAction
- ReplicationCoordinatorImpl::_setCurrentRSConfig_inlock(
- const ReplicaSetConfig& newConfig,
- int myIndex) {
- invariant(_settings.usingReplSets());
- _cancelHeartbeats();
- _setConfigState_inlock(kConfigSteady);
- // Must get this before changing our config.
- OpTime myOptime = _getMyLastOptime_inlock();
- _topCoord->updateConfig(
- newConfig,
- myIndex,
- _replExecutor.now(),
- myOptime);
- _rsConfig = newConfig;
- log() << "New replica set config in use: " << _rsConfig.toBSON() << rsLog;
- _selfIndex = myIndex;
- if (_selfIndex >= 0) {
- log() << "This node is " <<
- _rsConfig.getMemberAt(_selfIndex).getHostAndPort() << " in the config";
- }
- else {
- log() << "This node is not a member of the config";
- }
-
- const PostMemberStateUpdateAction action =
- _updateMemberStateFromTopologyCoordinator_inlock();
- _updateSlaveInfoFromConfig_inlock();
- if (_selfIndex >= 0) {
- // Don't send heartbeats if we're not in the config, if we get re-added one of the
- // nodes in the set will contact us.
- _startHeartbeats();
- }
- _wakeReadyWaiters_inlock();
- return action;
- }
-
- void ReplicationCoordinatorImpl::_wakeReadyWaiters_inlock(){
- for (std::vector<WaiterInfo*>::iterator it = _replicationWaiterList.begin();
- it != _replicationWaiterList.end(); ++it) {
- WaiterInfo* info = *it;
- if (_doneWaitingForReplication_inlock(*info->opTime, *info->writeConcern)) {
- info->condVar->notify_all();
- }
+Status ReplicationCoordinatorImpl::processReplSetUpdatePosition(const UpdatePositionArgs& updates,
+ long long* configVersion) {
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
+ Status status = Status::OK();
+ bool somethingChanged = false;
+ for (UpdatePositionArgs::UpdateIterator update = updates.updatesBegin();
+ update != updates.updatesEnd();
+ ++update) {
+ status = _setLastOptime_inlock(*update, configVersion);
+ if (!status.isOK()) {
+ break;
}
+ somethingChanged = true;
}
- Status ReplicationCoordinatorImpl::processReplSetUpdatePosition(
- const UpdatePositionArgs& updates, long long* configVersion) {
-
- stdx::unique_lock<stdx::mutex> lock(_mutex);
- Status status = Status::OK();
- bool somethingChanged = false;
- for (UpdatePositionArgs::UpdateIterator update = updates.updatesBegin();
- update != updates.updatesEnd();
- ++update) {
- status = _setLastOptime_inlock(*update, configVersion);
- if (!status.isOK()) {
- break;
- }
- somethingChanged = true;
- }
-
- if (somethingChanged && !_getMemberState_inlock().primary()) {
- lock.unlock();
- // Must do this outside _mutex
- // TODO: enable _dr, remove _externalState when DataReplicator is used excl.
- //_dr.slavesHaveProgressed();
- _externalState->forwardSlaveProgress();
- }
- return status;
+ if (somethingChanged && !_getMemberState_inlock().primary()) {
+ lock.unlock();
+ // Must do this outside _mutex
+ // TODO: enable _dr, remove _externalState when DataReplicator is used excl.
+ //_dr.slavesHaveProgressed();
+ _externalState->forwardSlaveProgress();
}
+ return status;
+}
- Status ReplicationCoordinatorImpl::processHandshake(OperationContext* txn,
- const HandshakeArgs& handshake) {
- LOG(2) << "Received handshake " << handshake.toBSON();
-
- stdx::unique_lock<stdx::mutex> lock(_mutex);
-
- if (getReplicationMode() != modeMasterSlave) {
- return Status(ErrorCodes::IllegalOperation,
- "The handshake command is only used for master/slave replication");
- }
-
- SlaveInfo* slaveInfo = _findSlaveInfoByRID_inlock(handshake.getRid());
- if (slaveInfo) {
- return Status::OK(); // nothing to do
- }
-
- SlaveInfo newSlaveInfo;
- newSlaveInfo.rid = handshake.getRid();
- newSlaveInfo.memberId = -1;
- newSlaveInfo.hostAndPort = _externalState->getClientHostAndPort(txn);
- // Don't call _addSlaveInfo_inlock as that would wake sleepers unnecessarily.
- _slaveInfo.push_back(newSlaveInfo);
+Status ReplicationCoordinatorImpl::processHandshake(OperationContext* txn,
+ const HandshakeArgs& handshake) {
+ LOG(2) << "Received handshake " << handshake.toBSON();
- return Status::OK();
- }
+ stdx::unique_lock<stdx::mutex> lock(_mutex);
- bool ReplicationCoordinatorImpl::buildsIndexes() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (_selfIndex == -1) {
- return true;
- }
- const MemberConfig& self = _rsConfig.getMemberAt(_selfIndex);
- return self.shouldBuildIndexes();
+ if (getReplicationMode() != modeMasterSlave) {
+ return Status(ErrorCodes::IllegalOperation,
+ "The handshake command is only used for master/slave replication");
}
- std::vector<HostAndPort> ReplicationCoordinatorImpl::getHostsWrittenTo(const OpTime& op) {
- std::vector<HostAndPort> hosts;
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- for (size_t i = 0; i < _slaveInfo.size(); ++i) {
- const SlaveInfo& slaveInfo = _slaveInfo[i];
- if (slaveInfo.opTime < op) {
- continue;
- }
-
- if (getReplicationMode() == modeMasterSlave && slaveInfo.rid == _getMyRID_inlock()) {
- // Master-slave doesn't know the HostAndPort for itself at this point.
- continue;
- }
- hosts.push_back(slaveInfo.hostAndPort);
- }
- return hosts;
+ SlaveInfo* slaveInfo = _findSlaveInfoByRID_inlock(handshake.getRid());
+ if (slaveInfo) {
+ return Status::OK(); // nothing to do
}
- std::vector<HostAndPort> ReplicationCoordinatorImpl::getOtherNodesInReplSet() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- invariant(_settings.usingReplSets());
+ SlaveInfo newSlaveInfo;
+ newSlaveInfo.rid = handshake.getRid();
+ newSlaveInfo.memberId = -1;
+ newSlaveInfo.hostAndPort = _externalState->getClientHostAndPort(txn);
+ // Don't call _addSlaveInfo_inlock as that would wake sleepers unnecessarily.
+ _slaveInfo.push_back(newSlaveInfo);
- std::vector<HostAndPort> nodes;
- if (_selfIndex == -1) {
- return nodes;
- }
-
- for (int i = 0; i < _rsConfig.getNumMembers(); ++i) {
- if (i == _selfIndex)
- continue;
-
- nodes.push_back(_rsConfig.getMemberAt(i).getHostAndPort());
- }
- return nodes;
- }
+ return Status::OK();
+}
- Status ReplicationCoordinatorImpl::checkIfWriteConcernCanBeSatisfied(
- const WriteConcernOptions& writeConcern) const {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- return _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
+bool ReplicationCoordinatorImpl::buildsIndexes() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ if (_selfIndex == -1) {
+ return true;
}
+ const MemberConfig& self = _rsConfig.getMemberAt(_selfIndex);
+ return self.shouldBuildIndexes();
+}
- Status ReplicationCoordinatorImpl::_checkIfWriteConcernCanBeSatisfied_inlock(
- const WriteConcernOptions& writeConcern) const {
- if (getReplicationMode() == modeNone) {
- return Status(ErrorCodes::NoReplicationEnabled,
- "No replication enabled when checking if write concern can be satisfied");
+std::vector<HostAndPort> ReplicationCoordinatorImpl::getHostsWrittenTo(const OpTime& op) {
+ std::vector<HostAndPort> hosts;
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ for (size_t i = 0; i < _slaveInfo.size(); ++i) {
+ const SlaveInfo& slaveInfo = _slaveInfo[i];
+ if (slaveInfo.opTime < op) {
+ continue;
}
- if (getReplicationMode() == modeMasterSlave) {
- if (!writeConcern.wMode.empty()) {
- return Status(ErrorCodes::UnknownReplWriteConcern,
- "Cannot use named write concern modes in master-slave");
- }
- // No way to know how many slaves there are, so assume any numeric mode is possible.
- return Status::OK();
+ if (getReplicationMode() == modeMasterSlave && slaveInfo.rid == _getMyRID_inlock()) {
+ // Master-slave doesn't know the HostAndPort for itself at this point.
+ continue;
}
-
- invariant(getReplicationMode() == modeReplSet);
- return _rsConfig.checkIfWriteConcernCanBeSatisfied(writeConcern);
+ hosts.push_back(slaveInfo.hostAndPort);
}
+ return hosts;
+}
- WriteConcernOptions ReplicationCoordinatorImpl::getGetLastErrorDefault() {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- if (_rsConfig.isInitialized()) {
- return _rsConfig.getDefaultWriteConcern();
- }
- return WriteConcernOptions();
- }
-
- Status ReplicationCoordinatorImpl::checkReplEnabledForCommand(BSONObjBuilder* result) {
- if (!_settings.usingReplSets()) {
- if (serverGlobalParams.configsvr) {
- result->append("info", "configsvr"); // for shell prompt
- }
- return Status(ErrorCodes::NoReplicationEnabled, "not running with --replSet");
- }
+std::vector<HostAndPort> ReplicationCoordinatorImpl::getOtherNodesInReplSet() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ invariant(_settings.usingReplSets());
- if (getMemberState().startup()) {
- result->append("info", "run rs.initiate(...) if not yet done for the set");
- return Status(ErrorCodes::NotYetInitialized, "no replset config has been received");
- }
-
- return Status::OK();
- }
-
- bool ReplicationCoordinatorImpl::isReplEnabled() const {
- return getReplicationMode() != modeNone;
+ std::vector<HostAndPort> nodes;
+ if (_selfIndex == -1) {
+ return nodes;
}
- void ReplicationCoordinatorImpl::_chooseNewSyncSource(
- const ReplicationExecutor::CallbackArgs& cbData,
- HostAndPort* newSyncSource) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- return;
- }
- *newSyncSource = _topCoord->chooseNewSyncSource(_replExecutor.now(),
- getMyLastOptime());
- }
+ for (int i = 0; i < _rsConfig.getNumMembers(); ++i) {
+ if (i == _selfIndex)
+ continue;
- HostAndPort ReplicationCoordinatorImpl::chooseNewSyncSource() {
- HostAndPort newSyncSource;
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_chooseNewSyncSource,
- this,
- stdx::placeholders::_1,
- &newSyncSource));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return newSyncSource; // empty
- }
- fassert(18740, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- return newSyncSource;
+ nodes.push_back(_rsConfig.getMemberAt(i).getHostAndPort());
}
+ return nodes;
+}
- void ReplicationCoordinatorImpl::_blacklistSyncSource(
- const ReplicationExecutor::CallbackArgs& cbData,
- const HostAndPort& host,
- Date_t until) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- return;
- }
- _topCoord->blacklistSyncSource(host, until);
-
- CBHStatus cbh = _replExecutor.scheduleWorkAt(
- until,
- stdx::bind(&ReplicationCoordinatorImpl::_unblacklistSyncSource,
- this,
- stdx::placeholders::_1,
- host));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return;
- }
- fassert(28610, cbh.getStatus());
- }
+Status ReplicationCoordinatorImpl::checkIfWriteConcernCanBeSatisfied(
+ const WriteConcernOptions& writeConcern) const {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ return _checkIfWriteConcernCanBeSatisfied_inlock(writeConcern);
+}
- void ReplicationCoordinatorImpl::_unblacklistSyncSource(
- const ReplicationExecutor::CallbackArgs& cbData,
- const HostAndPort& host) {
- if (cbData.status == ErrorCodes::CallbackCanceled)
- return;
- _topCoord->unblacklistSyncSource(host, _replExecutor.now());
+Status ReplicationCoordinatorImpl::_checkIfWriteConcernCanBeSatisfied_inlock(
+ const WriteConcernOptions& writeConcern) const {
+ if (getReplicationMode() == modeNone) {
+ return Status(ErrorCodes::NoReplicationEnabled,
+ "No replication enabled when checking if write concern can be satisfied");
}
- void ReplicationCoordinatorImpl::blacklistSyncSource(const HostAndPort& host, Date_t until) {
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_blacklistSyncSource,
- this,
- stdx::placeholders::_1,
- host,
- until));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return;
- }
- fassert(18741, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- }
-
- void ReplicationCoordinatorImpl::resetLastOpTimeFromOplog(OperationContext* txn) {
- StatusWith<OpTime> lastOpTimeStatus = _externalState->loadLastOpTime(txn);
- OpTime lastOpTime;
- if (!lastOpTimeStatus.isOK()) {
- warning() << "Failed to load timestamp of most recently applied operation; " <<
- lastOpTimeStatus.getStatus();
- }
- else {
- lastOpTime = lastOpTimeStatus.getValue();
+ if (getReplicationMode() == modeMasterSlave) {
+ if (!writeConcern.wMode.empty()) {
+ return Status(ErrorCodes::UnknownReplWriteConcern,
+ "Cannot use named write concern modes in master-slave");
}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- _setMyLastOptime_inlock(&lk, lastOpTime, true);
- _externalState->setGlobalTimestamp(lastOpTime.getTimestamp());
+ // No way to know how many slaves there are, so assume any numeric mode is possible.
+ return Status::OK();
}
- void ReplicationCoordinatorImpl::_shouldChangeSyncSource(
- const ReplicationExecutor::CallbackArgs& cbData,
- const HostAndPort& currentSource,
- bool* shouldChange) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- return;
- }
-
- *shouldChange = _topCoord->shouldChangeSyncSource(currentSource, _replExecutor.now());
+ invariant(getReplicationMode() == modeReplSet);
+ return _rsConfig.checkIfWriteConcernCanBeSatisfied(writeConcern);
+}
+
+WriteConcernOptions ReplicationCoordinatorImpl::getGetLastErrorDefault() {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ if (_rsConfig.isInitialized()) {
+ return _rsConfig.getDefaultWriteConcern();
+ }
+ return WriteConcernOptions();
+}
+
+Status ReplicationCoordinatorImpl::checkReplEnabledForCommand(BSONObjBuilder* result) {
+ if (!_settings.usingReplSets()) {
+ if (serverGlobalParams.configsvr) {
+ result->append("info", "configsvr"); // for shell prompt
+ }
+ return Status(ErrorCodes::NoReplicationEnabled, "not running with --replSet");
+ }
+
+ if (getMemberState().startup()) {
+ result->append("info", "run rs.initiate(...) if not yet done for the set");
+ return Status(ErrorCodes::NotYetInitialized, "no replset config has been received");
+ }
+
+ return Status::OK();
+}
+
+bool ReplicationCoordinatorImpl::isReplEnabled() const {
+ return getReplicationMode() != modeNone;
+}
+
+void ReplicationCoordinatorImpl::_chooseNewSyncSource(
+ const ReplicationExecutor::CallbackArgs& cbData, HostAndPort* newSyncSource) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ *newSyncSource = _topCoord->chooseNewSyncSource(_replExecutor.now(), getMyLastOptime());
+}
+
+HostAndPort ReplicationCoordinatorImpl::chooseNewSyncSource() {
+ HostAndPort newSyncSource;
+ CBHStatus cbh =
+ _replExecutor.scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_chooseNewSyncSource,
+ this,
+ stdx::placeholders::_1,
+ &newSyncSource));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return newSyncSource; // empty
+ }
+ fassert(18740, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ return newSyncSource;
+}
+
+void ReplicationCoordinatorImpl::_blacklistSyncSource(
+ const ReplicationExecutor::CallbackArgs& cbData, const HostAndPort& host, Date_t until) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ _topCoord->blacklistSyncSource(host, until);
+
+ CBHStatus cbh =
+ _replExecutor.scheduleWorkAt(until,
+ stdx::bind(&ReplicationCoordinatorImpl::_unblacklistSyncSource,
+ this,
+ stdx::placeholders::_1,
+ host));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
+ }
+ fassert(28610, cbh.getStatus());
+}
+
+void ReplicationCoordinatorImpl::_unblacklistSyncSource(
+ const ReplicationExecutor::CallbackArgs& cbData, const HostAndPort& host) {
+ if (cbData.status == ErrorCodes::CallbackCanceled)
+ return;
+ _topCoord->unblacklistSyncSource(host, _replExecutor.now());
+}
+
+void ReplicationCoordinatorImpl::blacklistSyncSource(const HostAndPort& host, Date_t until) {
+ CBHStatus cbh =
+ _replExecutor.scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_blacklistSyncSource,
+ this,
+ stdx::placeholders::_1,
+ host,
+ until));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
+ }
+ fassert(18741, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+}
+
+void ReplicationCoordinatorImpl::resetLastOpTimeFromOplog(OperationContext* txn) {
+ StatusWith<OpTime> lastOpTimeStatus = _externalState->loadLastOpTime(txn);
+ OpTime lastOpTime;
+ if (!lastOpTimeStatus.isOK()) {
+ warning() << "Failed to load timestamp of most recently applied operation; "
+ << lastOpTimeStatus.getStatus();
+ } else {
+ lastOpTime = lastOpTimeStatus.getValue();
+ }
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ _setMyLastOptime_inlock(&lk, lastOpTime, true);
+ _externalState->setGlobalTimestamp(lastOpTime.getTimestamp());
+}
+
+void ReplicationCoordinatorImpl::_shouldChangeSyncSource(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const HostAndPort& currentSource,
+ bool* shouldChange) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+
+ *shouldChange = _topCoord->shouldChangeSyncSource(currentSource, _replExecutor.now());
+}
+
+bool ReplicationCoordinatorImpl::shouldChangeSyncSource(const HostAndPort& currentSource) {
+ bool shouldChange(false);
+ CBHStatus cbh =
+ _replExecutor.scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_shouldChangeSyncSource,
+ this,
+ stdx::placeholders::_1,
+ currentSource,
+ &shouldChange));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return false;
}
+ fassert(18906, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ return shouldChange;
+}
- bool ReplicationCoordinatorImpl::shouldChangeSyncSource(const HostAndPort& currentSource) {
- bool shouldChange(false);
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_shouldChangeSyncSource,
- this,
- stdx::placeholders::_1,
- currentSource,
- &shouldChange));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return false;
- }
- fassert(18906, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- return shouldChange;
+void ReplicationCoordinatorImpl::_updateLastCommittedOpTime_inlock() {
+ if (!_getMemberState_inlock().primary()) {
+ return;
}
+ StatusWith<ReplicaSetTagPattern> tagPattern =
+ _rsConfig.findCustomWriteMode(ReplicaSetConfig::kMajorityWriteConcernModeName);
+ invariant(tagPattern.isOK());
+ ReplicaSetTagMatch matcher{tagPattern.getValue()};
- void ReplicationCoordinatorImpl::_updateLastCommittedOpTime_inlock() {
- if (!_getMemberState_inlock().primary()) {
- return;
- }
- StatusWith<ReplicaSetTagPattern> tagPattern =
- _rsConfig.findCustomWriteMode(ReplicaSetConfig::kMajorityWriteConcernModeName);
- invariant(tagPattern.isOK());
- ReplicaSetTagMatch matcher{tagPattern.getValue()};
-
- std::vector<OpTime> votingNodesOpTimes;
+ std::vector<OpTime> votingNodesOpTimes;
- for (const auto& sI : _slaveInfo) {
- auto memberConfig = _rsConfig.findMemberByID(sI.memberId);
- invariant(memberConfig);
- for (auto tagIt = memberConfig->tagsBegin();
- tagIt != memberConfig->tagsEnd(); ++tagIt) {
- if (matcher.update(*tagIt)) {
- votingNodesOpTimes.push_back(sI.opTime);
- break;
- }
+ for (const auto& sI : _slaveInfo) {
+ auto memberConfig = _rsConfig.findMemberByID(sI.memberId);
+ invariant(memberConfig);
+ for (auto tagIt = memberConfig->tagsBegin(); tagIt != memberConfig->tagsEnd(); ++tagIt) {
+ if (matcher.update(*tagIt)) {
+ votingNodesOpTimes.push_back(sI.opTime);
+ break;
}
}
- invariant(votingNodesOpTimes.size() > 0);
- std::sort(votingNodesOpTimes.begin(), votingNodesOpTimes.end());
-
- // Use the index of the minimum quorum in the vector of nodes.
- _lastCommittedOpTime = votingNodesOpTimes[(votingNodesOpTimes.size() - 1) / 2];
}
+ invariant(votingNodesOpTimes.size() > 0);
+ std::sort(votingNodesOpTimes.begin(), votingNodesOpTimes.end());
- OpTime ReplicationCoordinatorImpl::getLastCommittedOpTime() const {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- return _lastCommittedOpTime;
- }
-
- Status ReplicationCoordinatorImpl::processReplSetRequestVotes(
- OperationContext* txn,
- const ReplSetRequestVotesArgs& args,
- ReplSetRequestVotesResponse* response) {
- if (!isV1ElectionProtocol()) {
- return {ErrorCodes::BadValue, "not using election protocol v1"};
- }
-
- updateTerm(args.getTerm());
-
- Status result{ErrorCodes::InternalError, "didn't set status in processReplSetRequestVotes"};
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_processReplSetRequestVotes_finish,
- this,
- stdx::placeholders::_1,
- args,
- response,
- &result));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return cbh.getStatus();
- }
- _replExecutor.wait(cbh.getValue());
- if (response->getVoteGranted()) {
- LastVote lastVote;
- lastVote.setTerm(args.getTerm());
- lastVote.setCandidateId(args.getCandidateId());
-
- Status status = _externalState->storeLocalLastVoteDocument(txn, lastVote);
- if (!status.isOK()) {
- error() << "replSetRequestVotes failed to store LastVote document; " << status;
- return status;
- }
-
- }
- return result;
- }
+ // Use the index of the minimum quorum in the vector of nodes.
+ _lastCommittedOpTime = votingNodesOpTimes[(votingNodesOpTimes.size() - 1) / 2];
+}
- void ReplicationCoordinatorImpl::_processReplSetRequestVotes_finish(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplSetRequestVotesArgs& args,
- ReplSetRequestVotesResponse* response,
- Status* result) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- *result = Status(ErrorCodes::ShutdownInProgress, "replication system is shutting down");
- return;
- }
+OpTime ReplicationCoordinatorImpl::getLastCommittedOpTime() const {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ return _lastCommittedOpTime;
+}
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- _topCoord->processReplSetRequestVotes(args, response, getMyLastOptime());
- *result = Status::OK();
+Status ReplicationCoordinatorImpl::processReplSetRequestVotes(
+ OperationContext* txn,
+ const ReplSetRequestVotesArgs& args,
+ ReplSetRequestVotesResponse* response) {
+ if (!isV1ElectionProtocol()) {
+ return {ErrorCodes::BadValue, "not using election protocol v1"};
}
- Status ReplicationCoordinatorImpl::processReplSetDeclareElectionWinner(
- const ReplSetDeclareElectionWinnerArgs& args,
- long long* responseTerm) {
- if (!isV1ElectionProtocol()) {
- return {ErrorCodes::BadValue, "not using election protocol v1"};
- }
+ updateTerm(args.getTerm());
- updateTerm(args.getTerm());
-
- Status result{ErrorCodes::InternalError,
- "didn't set status in processReplSetDeclareElectionWinner"};
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_processReplSetDeclareElectionWinner_finish,
- this,
- stdx::placeholders::_1,
- args,
- responseTerm,
- &result));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return cbh.getStatus();
- }
- _replExecutor.wait(cbh.getValue());
- return result;
+ Status result{ErrorCodes::InternalError, "didn't set status in processReplSetRequestVotes"};
+ CBHStatus cbh = _replExecutor.scheduleWork(
+ stdx::bind(&ReplicationCoordinatorImpl::_processReplSetRequestVotes_finish,
+ this,
+ stdx::placeholders::_1,
+ args,
+ response,
+ &result));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return cbh.getStatus();
}
+ _replExecutor.wait(cbh.getValue());
+ if (response->getVoteGranted()) {
+ LastVote lastVote;
+ lastVote.setTerm(args.getTerm());
+ lastVote.setCandidateId(args.getCandidateId());
- void ReplicationCoordinatorImpl::_processReplSetDeclareElectionWinner_finish(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplSetDeclareElectionWinnerArgs& args,
- long long* responseTerm,
- Status* result) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- *result = Status(ErrorCodes::ShutdownInProgress, "replication system is shutting down");
- return;
- }
- *result = _topCoord->processReplSetDeclareElectionWinner(args, responseTerm);
- }
-
- void ReplicationCoordinatorImpl::prepareCursorResponseInfo(BSONObjBuilder* objBuilder) {
- if (getReplicationMode() == modeReplSet && isV1ElectionProtocol()) {
- BSONObjBuilder replObj(objBuilder->subobjStart("repl"));
- _topCoord->prepareCursorResponseInfo(objBuilder, getLastCommittedOpTime());
- replObj.done();
+ Status status = _externalState->storeLocalLastVoteDocument(txn, lastVote);
+ if (!status.isOK()) {
+ error() << "replSetRequestVotes failed to store LastVote document; " << status;
+ return status;
}
}
-
- bool ReplicationCoordinatorImpl::isV1ElectionProtocol() {
- return getConfig().getProtocolVersion() == 1;
+ return result;
+}
+
+void ReplicationCoordinatorImpl::_processReplSetRequestVotes_finish(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplSetRequestVotesArgs& args,
+ ReplSetRequestVotesResponse* response,
+ Status* result) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ *result = Status(ErrorCodes::ShutdownInProgress, "replication system is shutting down");
+ return;
+ }
+
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ _topCoord->processReplSetRequestVotes(args, response, getMyLastOptime());
+ *result = Status::OK();
+}
+
+Status ReplicationCoordinatorImpl::processReplSetDeclareElectionWinner(
+ const ReplSetDeclareElectionWinnerArgs& args, long long* responseTerm) {
+ if (!isV1ElectionProtocol()) {
+ return {ErrorCodes::BadValue, "not using election protocol v1"};
+ }
+
+ updateTerm(args.getTerm());
+
+ Status result{ErrorCodes::InternalError,
+ "didn't set status in processReplSetDeclareElectionWinner"};
+ CBHStatus cbh = _replExecutor.scheduleWork(
+ stdx::bind(&ReplicationCoordinatorImpl::_processReplSetDeclareElectionWinner_finish,
+ this,
+ stdx::placeholders::_1,
+ args,
+ responseTerm,
+ &result));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return cbh.getStatus();
+ }
+ _replExecutor.wait(cbh.getValue());
+ return result;
+}
+
+void ReplicationCoordinatorImpl::_processReplSetDeclareElectionWinner_finish(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplSetDeclareElectionWinnerArgs& args,
+ long long* responseTerm,
+ Status* result) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ *result = Status(ErrorCodes::ShutdownInProgress, "replication system is shutting down");
+ return;
+ }
+ *result = _topCoord->processReplSetDeclareElectionWinner(args, responseTerm);
+}
+
+void ReplicationCoordinatorImpl::prepareCursorResponseInfo(BSONObjBuilder* objBuilder) {
+ if (getReplicationMode() == modeReplSet && isV1ElectionProtocol()) {
+ BSONObjBuilder replObj(objBuilder->subobjStart("repl"));
+ _topCoord->prepareCursorResponseInfo(objBuilder, getLastCommittedOpTime());
+ replObj.done();
+ }
+}
+
+bool ReplicationCoordinatorImpl::isV1ElectionProtocol() {
+ return getConfig().getProtocolVersion() == 1;
+}
+
+Status ReplicationCoordinatorImpl::processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
+ ReplSetHeartbeatResponse* response) {
+ {
+ stdx::lock_guard<stdx::mutex> lock(_mutex);
+ if (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
+ return Status(ErrorCodes::NotYetInitialized,
+ "Received heartbeat while still initializing replication system");
+ }
+ }
+
+ Status result(ErrorCodes::InternalError, "didn't set status in prepareHeartbeatResponse");
+ CBHStatus cbh = _replExecutor.scheduleWork(
+ stdx::bind(&ReplicationCoordinatorImpl::_processHeartbeatFinishV1,
+ this,
+ stdx::placeholders::_1,
+ args,
+ response,
+ &result));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return {ErrorCodes::ShutdownInProgress, "replication shutdown in progress"};
+ }
+ fassert(28645, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ return result;
+}
+
+void ReplicationCoordinatorImpl::_processHeartbeatFinishV1(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplSetHeartbeatArgsV1& args,
+ ReplSetHeartbeatResponse* response,
+ Status* outStatus) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ *outStatus = {ErrorCodes::ShutdownInProgress, "Replication shutdown in progress"};
+ return;
+ }
+ fassert(28655, cbData.status);
+ const Date_t now = _replExecutor.now();
+ *outStatus = _topCoord->prepareHeartbeatResponseV1(
+ now, args, _settings.ourSetName(), getMyLastOptime(), response);
+ if ((outStatus->isOK() || *outStatus == ErrorCodes::InvalidReplicaSetConfig) &&
+ _selfIndex < 0) {
+ // If this node does not belong to the configuration it knows about, send heartbeats
+ // back to any node that sends us a heartbeat, in case one of those remote nodes has
+ // a configuration that contains us. Chances are excellent that it will, since that
+ // is the only reason for a remote node to send this node a heartbeat request.
+ if (!args.getSenderHost().empty() && _seedList.insert(args.getSenderHost()).second) {
+ _scheduleHeartbeatToTarget(args.getSenderHost(), -1, now);
+ }
+ }
+}
+
+void ReplicationCoordinatorImpl::summarizeAsHtml(ReplSetHtmlSummary* output) {
+ CBHStatus cbh =
+ _replExecutor.scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_summarizeAsHtml_finish,
+ this,
+ stdx::placeholders::_1,
+ output));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
+ }
+ fassert(28638, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+}
+
+void ReplicationCoordinatorImpl::_summarizeAsHtml_finish(const CallbackArgs& cbData,
+ ReplSetHtmlSummary* output) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+
+ output->setSelfOptime(getMyLastOptime());
+ output->setSelfUptime(time(0) - serverGlobalParams.started);
+ output->setNow(_replExecutor.now());
+
+ _topCoord->summarizeAsHtml(output);
+}
+
+long long ReplicationCoordinatorImpl::getTerm() {
+ long long term = OpTime::kDefaultTerm;
+ CBHStatus cbh = _replExecutor.scheduleWork(stdx::bind(
+ &ReplicationCoordinatorImpl::_getTerm_helper, this, stdx::placeholders::_1, &term));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return term;
}
-
- Status ReplicationCoordinatorImpl::processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
- ReplSetHeartbeatResponse* response) {
- {
- stdx::lock_guard<stdx::mutex> lock(_mutex);
- if (_rsConfigState == kConfigPreStart || _rsConfigState == kConfigStartingUp) {
- return Status(ErrorCodes::NotYetInitialized,
- "Received heartbeat while still initializing replication system");
- }
- }
-
- Status result(ErrorCodes::InternalError, "didn't set status in prepareHeartbeatResponse");
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_processHeartbeatFinishV1,
- this,
- stdx::placeholders::_1,
- args,
- response,
- &result));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return {ErrorCodes::ShutdownInProgress, "replication shutdown in progress"};
- }
- fassert(28645, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- return result;
+ fassert(28660, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ return term;
+}
+
+void ReplicationCoordinatorImpl::_getTerm_helper(const ReplicationExecutor::CallbackArgs& cbData,
+ long long* term) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ *term = _topCoord->getTerm();
+}
+
+bool ReplicationCoordinatorImpl::updateTerm(long long term) {
+ bool updated = false;
+ CBHStatus cbh =
+ _replExecutor.scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_updateTerm_helper,
+ this,
+ stdx::placeholders::_1,
+ term,
+ &updated,
+ nullptr));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return false;
}
-
- void ReplicationCoordinatorImpl::_processHeartbeatFinishV1(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplSetHeartbeatArgsV1& args,
- ReplSetHeartbeatResponse* response,
- Status* outStatus) {
-
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- *outStatus = {ErrorCodes::ShutdownInProgress, "Replication shutdown in progress"};
- return;
- }
- fassert(28655, cbData.status);
- const Date_t now = _replExecutor.now();
- *outStatus = _topCoord->prepareHeartbeatResponseV1(
- now,
- args,
- _settings.ourSetName(),
- getMyLastOptime(),
- response);
- if ((outStatus->isOK() || *outStatus == ErrorCodes::InvalidReplicaSetConfig) &&
- _selfIndex < 0) {
- // If this node does not belong to the configuration it knows about, send heartbeats
- // back to any node that sends us a heartbeat, in case one of those remote nodes has
- // a configuration that contains us. Chances are excellent that it will, since that
- // is the only reason for a remote node to send this node a heartbeat request.
- if (!args.getSenderHost().empty() && _seedList.insert(args.getSenderHost()).second) {
- _scheduleHeartbeatToTarget(args.getSenderHost(), -1, now);
- }
- }
+ fassert(28670, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ return updated;
+}
+
+bool ReplicationCoordinatorImpl::updateTerm_forTest(long long term) {
+ bool updated = false;
+ Handle cbHandle;
+ CBHStatus cbh =
+ _replExecutor.scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_updateTerm_helper,
+ this,
+ stdx::placeholders::_1,
+ term,
+ &updated,
+ &cbHandle));
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return false;
}
+ fassert(28673, cbh.getStatus());
+ _replExecutor.wait(cbh.getValue());
+ _replExecutor.wait(cbHandle);
+ return updated;
+}
- void ReplicationCoordinatorImpl::summarizeAsHtml(ReplSetHtmlSummary* output) {
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_summarizeAsHtml_finish,
- this,
- stdx::placeholders::_1,
- output));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return;
- }
- fassert(28638, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
+void ReplicationCoordinatorImpl::_updateTerm_helper(const ReplicationExecutor::CallbackArgs& cbData,
+ long long term,
+ bool* updated,
+ Handle* cbHandle) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ return;
}
- void ReplicationCoordinatorImpl::_summarizeAsHtml_finish(const CallbackArgs& cbData,
- ReplSetHtmlSummary* output) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- return;
- }
-
- output->setSelfOptime(getMyLastOptime());
- output->setSelfUptime(time(0) - serverGlobalParams.started);
- output->setNow(_replExecutor.now());
+ *updated = _updateTerm_incallback(term, cbHandle);
+}
- _topCoord->summarizeAsHtml(output);
- }
-
- long long ReplicationCoordinatorImpl::getTerm() {
- long long term = OpTime::kDefaultTerm;
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_getTerm_helper,
- this,
- stdx::placeholders::_1,
- &term));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return term;
- }
- fassert(28660, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- return term;
- }
+bool ReplicationCoordinatorImpl::_updateTerm_incallback(long long term, Handle* cbHandle) {
+ bool updated = _topCoord->updateTerm(term);
- void ReplicationCoordinatorImpl::_getTerm_helper(
- const ReplicationExecutor::CallbackArgs& cbData,
- long long* term) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- return;
- }
- *term = _topCoord->getTerm();
- }
-
- bool ReplicationCoordinatorImpl::updateTerm(long long term) {
- bool updated = false;
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_updateTerm_helper,
- this,
- stdx::placeholders::_1,
- term,
- &updated,
- nullptr));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return false;
- }
- fassert(28670, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- return updated;
- }
-
- bool ReplicationCoordinatorImpl::updateTerm_forTest(long long term) {
- bool updated = false;
- Handle cbHandle;
- CBHStatus cbh = _replExecutor.scheduleWork(
- stdx::bind(&ReplicationCoordinatorImpl::_updateTerm_helper,
- this,
- stdx::placeholders::_1,
- term,
- &updated,
- &cbHandle));
+ if (updated && getMemberState().primary()) {
+ log() << "stepping down from primary, because a new term has begun";
+ _topCoord->prepareForStepDown();
+ CBHStatus cbh = _replExecutor.scheduleWorkWithGlobalExclusiveLock(
+ stdx::bind(&ReplicationCoordinatorImpl::_stepDownFinish, this, stdx::placeholders::_1));
if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return false;
- }
- fassert(28673, cbh.getStatus());
- _replExecutor.wait(cbh.getValue());
- _replExecutor.wait(cbHandle);
- return updated;
- }
-
- void ReplicationCoordinatorImpl::_updateTerm_helper(
- const ReplicationExecutor::CallbackArgs& cbData,
- long long term,
- bool* updated,
- Handle* cbHandle) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- return;
+ return true;
}
-
- *updated = _updateTerm_incallback(term, cbHandle);
- }
-
- bool ReplicationCoordinatorImpl::_updateTerm_incallback(long long term, Handle* cbHandle) {
- bool updated = _topCoord->updateTerm(term);
-
- if (updated && getMemberState().primary()) {
- log() << "stepping down from primary, because a new term has begun";
- _topCoord->prepareForStepDown();
- CBHStatus cbh = _replExecutor.scheduleWorkWithGlobalExclusiveLock(
- stdx::bind(&ReplicationCoordinatorImpl::_stepDownFinish,
- this,
- stdx::placeholders::_1));
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return true;
- }
- fassert(28672, cbh.getStatus());
- if (cbHandle) {
- *cbHandle = cbh.getValue();
- }
+ fassert(28672, cbh.getStatus());
+ if (cbHandle) {
+ *cbHandle = cbh.getValue();
}
- return updated;
}
+ return updated;
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 8fcd9671dae..7183145abcd 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -53,991 +53,973 @@
namespace mongo {
- class Timer;
- template <typename T> class StatusWith;
+class Timer;
+template <typename T>
+class StatusWith;
namespace repl {
- class ElectCmdRunner;
- class ElectionWinnerDeclarer;
- class FreshnessChecker;
- class HandshakeArgs;
- class HeartbeatResponseAction;
- class LastVote;
- class OplogReader;
- class ReplSetDeclareElectionWinnerArgs;
- class ReplSetRequestVotesArgs;
- class ReplicaSetConfig;
- class SyncSourceFeedback;
- class TopologyCoordinator;
- class VoteRequester;
+class ElectCmdRunner;
+class ElectionWinnerDeclarer;
+class FreshnessChecker;
+class HandshakeArgs;
+class HeartbeatResponseAction;
+class LastVote;
+class OplogReader;
+class ReplSetDeclareElectionWinnerArgs;
+class ReplSetRequestVotesArgs;
+class ReplicaSetConfig;
+class SyncSourceFeedback;
+class TopologyCoordinator;
+class VoteRequester;
- class ReplicationCoordinatorImpl : public ReplicationCoordinator,
- public KillOpListenerInterface {
- MONGO_DISALLOW_COPYING(ReplicationCoordinatorImpl);
+class ReplicationCoordinatorImpl : public ReplicationCoordinator, public KillOpListenerInterface {
+ MONGO_DISALLOW_COPYING(ReplicationCoordinatorImpl);
- public:
+public:
+ // Takes ownership of the "externalState", "topCoord" and "network" objects.
+ ReplicationCoordinatorImpl(const ReplSettings& settings,
+ ReplicationCoordinatorExternalState* externalState,
+ executor::NetworkInterface* network,
+ StorageInterface* storage,
+ TopologyCoordinator* topoCoord,
+ int64_t prngSeed);
+ // Takes ownership of the "externalState" and "topCoord" objects.
+ ReplicationCoordinatorImpl(const ReplSettings& settings,
+ ReplicationCoordinatorExternalState* externalState,
+ TopologyCoordinator* topoCoord,
+ ReplicationExecutor* replExec,
+ int64_t prngSeed);
+ virtual ~ReplicationCoordinatorImpl();
- // Takes ownership of the "externalState", "topCoord" and "network" objects.
- ReplicationCoordinatorImpl(const ReplSettings& settings,
- ReplicationCoordinatorExternalState* externalState,
- executor::NetworkInterface* network,
- StorageInterface* storage,
- TopologyCoordinator* topoCoord,
- int64_t prngSeed);
- // Takes ownership of the "externalState" and "topCoord" objects.
- ReplicationCoordinatorImpl(const ReplSettings& settings,
- ReplicationCoordinatorExternalState* externalState,
- TopologyCoordinator* topoCoord,
- ReplicationExecutor* replExec,
- int64_t prngSeed);
- virtual ~ReplicationCoordinatorImpl();
+ // ================== Members of public ReplicationCoordinator API ===================
- // ================== Members of public ReplicationCoordinator API ===================
+ virtual void startReplication(OperationContext* txn) override;
- virtual void startReplication(OperationContext* txn) override;
+ virtual void shutdown() override;
- virtual void shutdown() override;
+ virtual const ReplSettings& getSettings() const override;
- virtual const ReplSettings& getSettings() const override;
+ virtual Mode getReplicationMode() const override;
- virtual Mode getReplicationMode() const override;
+ virtual MemberState getMemberState() const override;
- virtual MemberState getMemberState() const override;
+ virtual bool isInPrimaryOrSecondaryState() const override;
- virtual bool isInPrimaryOrSecondaryState() const override;
+ virtual Seconds getSlaveDelaySecs() const override;
- virtual Seconds getSlaveDelaySecs() const override;
+ virtual void clearSyncSourceBlacklist() override;
- virtual void clearSyncSourceBlacklist() override;
+ /*
+ * Implementation of the KillOpListenerInterface interrupt method so that we can wake up
+ * threads blocked in awaitReplication() when a killOp command comes in.
+ */
+ virtual void interrupt(unsigned opId);
- /*
- * Implementation of the KillOpListenerInterface interrupt method so that we can wake up
- * threads blocked in awaitReplication() when a killOp command comes in.
- */
- virtual void interrupt(unsigned opId);
+ /*
+ * Implementation of the KillOpListenerInterface interruptAll method so that we can wake up
+ * threads blocked in awaitReplication() when we kill all operations.
+ */
+ virtual void interruptAll();
- /*
- * Implementation of the KillOpListenerInterface interruptAll method so that we can wake up
- * threads blocked in awaitReplication() when we kill all operations.
- */
- virtual void interruptAll();
+ virtual ReplicationCoordinator::StatusAndDuration awaitReplication(
+ OperationContext* txn, const OpTime& opTime, const WriteConcernOptions& writeConcern);
- virtual ReplicationCoordinator::StatusAndDuration awaitReplication(
- OperationContext* txn,
- const OpTime& opTime,
- const WriteConcernOptions& writeConcern);
+ virtual ReplicationCoordinator::StatusAndDuration awaitReplicationOfLastOpForClient(
+ OperationContext* txn, const WriteConcernOptions& writeConcern);
- virtual ReplicationCoordinator::StatusAndDuration awaitReplicationOfLastOpForClient(
- OperationContext* txn,
- const WriteConcernOptions& writeConcern);
+ virtual Status stepDown(OperationContext* txn,
+ bool force,
+ const Milliseconds& waitTime,
+ const Milliseconds& stepdownTime);
- virtual Status stepDown(OperationContext* txn,
- bool force,
- const Milliseconds& waitTime,
- const Milliseconds& stepdownTime);
+ virtual bool isMasterForReportingPurposes();
- virtual bool isMasterForReportingPurposes();
+ virtual bool canAcceptWritesForDatabase(StringData dbName);
- virtual bool canAcceptWritesForDatabase(StringData dbName);
+ bool canAcceptWritesFor(const NamespaceString& ns) override;
- bool canAcceptWritesFor(const NamespaceString& ns) override;
+ virtual Status checkIfWriteConcernCanBeSatisfied(const WriteConcernOptions& writeConcern) const;
- virtual Status checkIfWriteConcernCanBeSatisfied(
- const WriteConcernOptions& writeConcern) const;
+ virtual Status checkCanServeReadsFor(OperationContext* txn,
+ const NamespaceString& ns,
+ bool slaveOk);
- virtual Status checkCanServeReadsFor(OperationContext* txn,
- const NamespaceString& ns,
- bool slaveOk);
+ virtual bool shouldIgnoreUniqueIndex(const IndexDescriptor* idx);
- virtual bool shouldIgnoreUniqueIndex(const IndexDescriptor* idx);
+ virtual Status setLastOptimeForSlave(const OID& rid, const Timestamp& ts);
- virtual Status setLastOptimeForSlave(const OID& rid, const Timestamp& ts);
+ virtual void setMyLastOptime(const OpTime& opTime);
- virtual void setMyLastOptime(const OpTime& opTime);
+ virtual void resetMyLastOptime();
- virtual void resetMyLastOptime();
+ virtual void setMyHeartbeatMessage(const std::string& msg);
- virtual void setMyHeartbeatMessage(const std::string& msg);
+ virtual OpTime getMyLastOptime() const override;
- virtual OpTime getMyLastOptime() const override;
+ virtual ReadAfterOpTimeResponse waitUntilOpTime(OperationContext* txn,
+ const ReadAfterOpTimeArgs& settings) override;
- virtual ReadAfterOpTimeResponse waitUntilOpTime(
- OperationContext* txn,
- const ReadAfterOpTimeArgs& settings) override;
+ virtual OID getElectionId() override;
- virtual OID getElectionId() override;
+ virtual OID getMyRID() const override;
- virtual OID getMyRID() const override;
+ virtual int getMyId() const override;
- virtual int getMyId() const override;
+ virtual bool setFollowerMode(const MemberState& newState) override;
- virtual bool setFollowerMode(const MemberState& newState) override;
+ virtual bool isWaitingForApplierToDrain() override;
- virtual bool isWaitingForApplierToDrain() override;
+ virtual void signalDrainComplete(OperationContext* txn) override;
- virtual void signalDrainComplete(OperationContext* txn) override;
+ virtual void signalUpstreamUpdater() override;
- virtual void signalUpstreamUpdater() override;
+ virtual bool prepareReplSetUpdatePositionCommand(BSONObjBuilder* cmdBuilder) override;
- virtual bool prepareReplSetUpdatePositionCommand(BSONObjBuilder* cmdBuilder) override;
+ virtual Status processReplSetGetStatus(BSONObjBuilder* result) override;
- virtual Status processReplSetGetStatus(BSONObjBuilder* result) override;
+ virtual void fillIsMasterForReplSet(IsMasterResponse* result) override;
- virtual void fillIsMasterForReplSet(IsMasterResponse* result) override;
+ virtual void appendSlaveInfoData(BSONObjBuilder* result) override;
- virtual void appendSlaveInfoData(BSONObjBuilder* result) override;
+ virtual ReplicaSetConfig getConfig() const override;
- virtual ReplicaSetConfig getConfig() const override;
+ virtual void processReplSetGetConfig(BSONObjBuilder* result) override;
- virtual void processReplSetGetConfig(BSONObjBuilder* result) override;
+ virtual Status setMaintenanceMode(bool activate) override;
- virtual Status setMaintenanceMode(bool activate) override;
+ virtual bool getMaintenanceMode() override;
- virtual bool getMaintenanceMode() override;
+ virtual Status processReplSetSyncFrom(const HostAndPort& target,
+ BSONObjBuilder* resultObj) override;
- virtual Status processReplSetSyncFrom(const HostAndPort& target,
- BSONObjBuilder* resultObj) override;
+ virtual Status processReplSetFreeze(int secs, BSONObjBuilder* resultObj) override;
- virtual Status processReplSetFreeze(int secs, BSONObjBuilder* resultObj) override;
+ virtual Status processHeartbeat(const ReplSetHeartbeatArgs& args,
+ ReplSetHeartbeatResponse* response) override;
- virtual Status processHeartbeat(const ReplSetHeartbeatArgs& args,
- ReplSetHeartbeatResponse* response) override;
+ virtual Status processReplSetReconfig(OperationContext* txn,
+ const ReplSetReconfigArgs& args,
+ BSONObjBuilder* resultObj) override;
- virtual Status processReplSetReconfig(OperationContext* txn,
- const ReplSetReconfigArgs& args,
- BSONObjBuilder* resultObj) override;
+ virtual Status processReplSetInitiate(OperationContext* txn,
+ const BSONObj& configObj,
+ BSONObjBuilder* resultObj) override;
- virtual Status processReplSetInitiate(OperationContext* txn,
- const BSONObj& configObj,
- BSONObjBuilder* resultObj) override;
+ virtual Status processReplSetGetRBID(BSONObjBuilder* resultObj) override;
- virtual Status processReplSetGetRBID(BSONObjBuilder* resultObj) override;
+ virtual void incrementRollbackID() override;
- virtual void incrementRollbackID() override;
+ virtual Status processReplSetFresh(const ReplSetFreshArgs& args,
+ BSONObjBuilder* resultObj) override;
- virtual Status processReplSetFresh(const ReplSetFreshArgs& args,
- BSONObjBuilder* resultObj) override;
+ virtual Status processReplSetElect(const ReplSetElectArgs& args,
+ BSONObjBuilder* response) override;
- virtual Status processReplSetElect(const ReplSetElectArgs& args,
- BSONObjBuilder* response) override;
+ virtual Status processReplSetUpdatePosition(const UpdatePositionArgs& updates,
+ long long* configVersion) override;
- virtual Status processReplSetUpdatePosition(const UpdatePositionArgs& updates,
- long long* configVersion) override;
+ virtual Status processHandshake(OperationContext* txn, const HandshakeArgs& handshake) override;
- virtual Status processHandshake(OperationContext* txn,
- const HandshakeArgs& handshake) override;
+ virtual bool buildsIndexes() override;
- virtual bool buildsIndexes() override;
+ virtual std::vector<HostAndPort> getHostsWrittenTo(const OpTime& op) override;
- virtual std::vector<HostAndPort> getHostsWrittenTo(const OpTime& op) override;
+ virtual std::vector<HostAndPort> getOtherNodesInReplSet() const override;
- virtual std::vector<HostAndPort> getOtherNodesInReplSet() const override;
+ virtual WriteConcernOptions getGetLastErrorDefault() override;
- virtual WriteConcernOptions getGetLastErrorDefault() override;
+ virtual Status checkReplEnabledForCommand(BSONObjBuilder* result) override;
- virtual Status checkReplEnabledForCommand(BSONObjBuilder* result) override;
+ virtual bool isReplEnabled() const override;
- virtual bool isReplEnabled() const override;
+ virtual HostAndPort chooseNewSyncSource() override;
- virtual HostAndPort chooseNewSyncSource() override;
+ virtual void blacklistSyncSource(const HostAndPort& host, Date_t until) override;
- virtual void blacklistSyncSource(const HostAndPort& host, Date_t until) override;
+ virtual void resetLastOpTimeFromOplog(OperationContext* txn) override;
- virtual void resetLastOpTimeFromOplog(OperationContext* txn) override;
+ virtual bool shouldChangeSyncSource(const HostAndPort& currentSource) override;
- virtual bool shouldChangeSyncSource(const HostAndPort& currentSource) override;
+ virtual OpTime getLastCommittedOpTime() const override;
- virtual OpTime getLastCommittedOpTime() const override;
+ virtual Status processReplSetRequestVotes(OperationContext* txn,
+ const ReplSetRequestVotesArgs& args,
+ ReplSetRequestVotesResponse* response) override;
- virtual Status processReplSetRequestVotes(OperationContext* txn,
- const ReplSetRequestVotesArgs& args,
- ReplSetRequestVotesResponse* response) override;
+ virtual Status processReplSetDeclareElectionWinner(const ReplSetDeclareElectionWinnerArgs& args,
+ long long* responseTerm) override;
- virtual Status processReplSetDeclareElectionWinner(
- const ReplSetDeclareElectionWinnerArgs& args,
- long long* responseTerm) override;
+ virtual void prepareCursorResponseInfo(BSONObjBuilder* objBuilder);
- virtual void prepareCursorResponseInfo(BSONObjBuilder* objBuilder);
+ virtual Status processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
+ ReplSetHeartbeatResponse* response) override;
- virtual Status processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
- ReplSetHeartbeatResponse* response) override;
+ virtual bool isV1ElectionProtocol() override;
- virtual bool isV1ElectionProtocol() override;
+ virtual void summarizeAsHtml(ReplSetHtmlSummary* s) override;
- virtual void summarizeAsHtml(ReplSetHtmlSummary* s) override;
+ /**
+ * Get current term from topology coordinator
+ */
+ virtual long long getTerm() override;
- /**
- * Get current term from topology coordinator
- */
- virtual long long getTerm() override;
+ virtual bool updateTerm(long long term) override;
- virtual bool updateTerm(long long term) override;
+ // ================== Test support API ===================
- // ================== Test support API ===================
+ /**
+ * If called after startReplication(), blocks until all asynchronous
+ * activities associated with replication start-up complete.
+ */
+ void waitForStartUpComplete();
- /**
- * If called after startReplication(), blocks until all asynchronous
- * activities associated with replication start-up complete.
- */
- void waitForStartUpComplete();
+ /**
+ * Gets the replica set configuration in use by the node.
+ */
+ ReplicaSetConfig getReplicaSetConfig_forTest();
- /**
- * Gets the replica set configuration in use by the node.
- */
- ReplicaSetConfig getReplicaSetConfig_forTest();
+ /**
+ * Simple wrapper around _setLastOptime_inlock to make it easier to test.
+ */
+ Status setLastOptime_forTest(long long cfgVer, long long memberId, const OpTime& opTime);
- /**
- * Simple wrapper around _setLastOptime_inlock to make it easier to test.
- */
- Status setLastOptime_forTest(long long cfgVer, long long memberId, const OpTime& opTime);
-
- bool updateTerm_forTest(long long term);
-
- private:
- ReplicationCoordinatorImpl(const ReplSettings& settings,
- ReplicationCoordinatorExternalState* externalState,
- TopologyCoordinator* topCoord,
- int64_t prngSeed,
- executor::NetworkInterface* network,
- StorageInterface* storage,
- ReplicationExecutor* replExec);
- /**
- * Configuration states for a replica set node.
- *
- * Transition diagram:
- *
- * PreStart ------------------> ReplicationDisabled
- * |
- * |
- * v
- * StartingUp -------> Uninitialized <------> Initiating
- * \ ^ |
- * ------- | |
- * | | |
- * v v |
- * Reconfig <---> Steady <----> HBReconfig |
- * ^ /
- * | /
- * \ /
- * -----------------------
- */
- enum ConfigState {
- kConfigPreStart,
- kConfigStartingUp,
- kConfigReplicationDisabled,
- kConfigUninitialized,
- kConfigSteady,
- kConfigInitiating,
- kConfigReconfiguring,
- kConfigHBReconfiguring
- };
-
- /**
- * Type describing actions to take after a change to the MemberState _memberState.
- */
- enum PostMemberStateUpdateAction {
- kActionNone,
- kActionCloseAllConnections, // Also indicates that we should clear sharding state.
- kActionFollowerModeStateChange,
- kActionWinElection
- };
-
- // Struct that holds information about clients waiting for replication.
- struct WaiterInfo;
-
- // Struct that holds information about nodes in this replication group, mainly used for
- // tracking replication progress for write concern satisfaction.
- struct SlaveInfo {
- OpTime opTime; // Our last known OpTime that this slave has replicated to.
- HostAndPort hostAndPort; // Client address of the slave.
- int memberId; // Id of the node in the replica set config, or -1 if we're not a replSet.
- OID rid; // RID of the node.
- bool self; // Whether this SlaveInfo stores the information about ourself
- SlaveInfo() : memberId(-1), self(false) {}
- };
-
- typedef std::vector<SlaveInfo> SlaveInfoVector;
-
- typedef std::vector<ReplicationExecutor::CallbackHandle> HeartbeatHandles;
-
- /**
- * Looks up the SlaveInfo in _slaveInfo associated with the given RID and returns a pointer
- * to it, or returns NULL if there is no SlaveInfo with the given RID.
- */
- SlaveInfo* _findSlaveInfoByRID_inlock(const OID& rid);
-
- /**
- * Looks up the SlaveInfo in _slaveInfo associated with the given member ID and returns a
- * pointer to it, or returns NULL if there is no SlaveInfo with the given member ID.
- */
- SlaveInfo* _findSlaveInfoByMemberID_inlock(int memberID);
-
- /**
- * Adds the given SlaveInfo to _slaveInfo and wakes up any threads waiting for replication
- * that now have their write concern satisfied. Only valid to call in master/slave setups.
- */
- void _addSlaveInfo_inlock(const SlaveInfo& slaveInfo);
-
- /**
- * Updates the item in _slaveInfo pointed to by 'slaveInfo' with the given OpTime 'opTime'
- * and wakes up any threads waiting for replication that now have their write concern
- * satisfied.
- */
- void _updateSlaveInfoOptime_inlock(SlaveInfo* slaveInfo, const OpTime& opTime);
-
- /**
- * Returns the index into _slaveInfo where data corresponding to ourself is stored.
- * For more info on the rules about how we know where our entry is, see the comment for
- * _slaveInfo.
- */
- size_t _getMyIndexInSlaveInfo_inlock() const;
-
- /**
- * Helper method that removes entries from _slaveInfo if they correspond to a node
- * with a member ID that is not in the current replica set config. Will always leave an
- * entry for ourself at the beginning of _slaveInfo, even if we aren't present in the
- * config.
- */
- void _updateSlaveInfoFromConfig_inlock();
-
- /**
- * Helper to update our saved config, cancel any pending heartbeats, and kick off sending
- * new heartbeats based on the new config. Must *only* be called from within the
- * ReplicationExecutor context.
- *
- * Returns an action to be performed after unlocking _mutex, via
- * _performPostMemberStateUpdateAction.
- */
- PostMemberStateUpdateAction _setCurrentRSConfig_inlock(
- const ReplicaSetConfig& newConfig,
- int myIndex);
-
- /**
- * Helper to wake waiters in _replicationWaiterList that are doneWaitingForReplication.
- */
- void _wakeReadyWaiters_inlock();
-
- /**
- * Helper method for setting/unsetting maintenance mode. Scheduled by setMaintenanceMode()
- * to run in a global write lock in the replication executor thread.
- */
- void _setMaintenanceMode_helper(const ReplicationExecutor::CallbackArgs& cbData,
- bool activate,
- Status* result);
-
- /**
- * Helper method for retrieving maintenance mode. Scheduled by getMaintenanceMode() to run
- * in the replication executor thread.
- */
- void _getMaintenanceMode_helper(const ReplicationExecutor::CallbackArgs& cbData,
- bool* maintenanceMode);
-
- /**
- * Bottom half of fillIsMasterForReplSet.
- */
- void _fillIsMasterForReplSet_finish(const ReplicationExecutor::CallbackArgs& cbData,
- IsMasterResponse* result);
-
- /**
- * Bottom half of processReplSetFresh.
- */
- void _processReplSetFresh_finish(const ReplicationExecutor::CallbackArgs& cbData,
- const ReplSetFreshArgs& args,
- BSONObjBuilder* response,
- Status* result);
-
- /**
- * Bottom half of processReplSetElect.
- */
- void _processReplSetElect_finish(const ReplicationExecutor::CallbackArgs& cbData,
- const ReplSetElectArgs& args,
- BSONObjBuilder* response,
- Status* result);
-
- /**
- * Bottom half of processReplSetFreeze.
- */
- void _processReplSetFreeze_finish(const ReplicationExecutor::CallbackArgs& cbData,
- int secs,
- BSONObjBuilder* response,
- Status* result);
- /*
- * Bottom half of clearSyncSourceBlacklist
- */
- void _clearSyncSourceBlacklist_finish(const ReplicationExecutor::CallbackArgs& cbData);
-
- /**
- * Bottom half of processReplSetDeclareElectionWinner.
- */
- void _processReplSetDeclareElectionWinner_finish(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplSetDeclareElectionWinnerArgs& args,
- long long* responseTerm,
- Status* result);
-
- /**
- * Bottom half of processReplSetRequestVotes.
- */
- void _processReplSetRequestVotes_finish(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplSetRequestVotesArgs& args,
- ReplSetRequestVotesResponse* response,
- Status* result);
-
- /**
- * Scheduled to cause the ReplicationCoordinator to reconsider any state that might
- * need to change as a result of time passing - for instance becoming PRIMARY when a single
- * node replica set member's stepDown period ends.
- */
- void _handleTimePassing(const ReplicationExecutor::CallbackArgs& cbData);
-
- /**
- * Helper method for _awaitReplication that takes an already locked unique_lock and a
- * Timer for timing the operation which has been counting since before the lock was
- * acquired.
- */
- ReplicationCoordinator::StatusAndDuration _awaitReplication_inlock(
- const Timer* timer,
- stdx::unique_lock<stdx::mutex>* lock,
- OperationContext* txn,
- const OpTime& opTime,
- const WriteConcernOptions& writeConcern);
-
- /*
- * Returns true if the given writeConcern is satisfied up to "optime" or is unsatisfiable.
- */
- bool _doneWaitingForReplication_inlock(const OpTime& opTime,
- const WriteConcernOptions& writeConcern);
-
- /**
- * Helper for _doneWaitingForReplication_inlock that takes an integer write concern.
- */
- bool _haveNumNodesReachedOpTime_inlock(const OpTime& opTime, int numNodes);
-
- /**
- * Helper for _doneWaitingForReplication_inlock that takes a tag pattern representing a
- * named write concern mode.
- */
- bool _haveTaggedNodesReachedOpTime_inlock(const OpTime& opTime,
- const ReplicaSetTagPattern& tagPattern);
-
- Status _checkIfWriteConcernCanBeSatisfied_inlock(
- const WriteConcernOptions& writeConcern) const;
-
- /**
- * Triggers all callbacks that are blocked waiting for new heartbeat data
- * to decide whether or not to finish a step down.
- * Should only be called from executor callbacks.
- */
- void _signalStepDownWaitersFromCallback(const ReplicationExecutor::CallbackArgs& cbData);
- void _signalStepDownWaiters();
-
- /**
- * Helper for stepDown run within a ReplicationExecutor callback. This method assumes
- * it is running within a global shared lock, and thus that no writes are going on at the
- * same time.
- */
- void _stepDownContinue(const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicationExecutor::EventHandle finishedEvent,
- OperationContext* txn,
- Date_t waitUntil,
- Date_t stepdownUntil,
- bool force,
- Status* result);
-
- OID _getMyRID_inlock() const;
-
- int _getMyId_inlock() const;
-
- OpTime _getMyLastOptime_inlock() const;
-
- /**
- * Bottom half of setFollowerMode.
- *
- * May reschedule itself after the current election, so it is not sufficient to
- * wait for a callback scheduled to execute this method to complete. Instead,
- * supply an event, "finishedSettingFollowerMode", and wait for that event to
- * be signaled. Do not observe "*success" until after the event is signaled.
- */
- void _setFollowerModeFinish(
- const ReplicationExecutor::CallbackArgs& cbData,
- const MemberState& newState,
- const ReplicationExecutor::EventHandle& finishedSettingFollowerMode,
- bool* success);
-
- /**
- * Helper method for updating our tracking of the last optime applied by a given node.
- * This is only valid to call on replica sets.
- * "configVersion" will be populated with our config version if it and the configVersion
- * of "args" differ.
- */
- Status _setLastOptime_inlock(const UpdatePositionArgs::UpdateInfo& args,
- long long* configVersion);
-
- /**
- * Helper method for setMyLastOptime that takes in a unique lock on
- * _mutex. The passed in lock must already be locked. It is unspecified what state the
- * lock will be in after this method finishes.
- *
- * This function has the same rules for "opTime" as setMyLastOptime(), unless
- * "isRollbackAllowed" is true.
- */
- void _setMyLastOptime_inlock(stdx::unique_lock<stdx::mutex>* lock,
- const OpTime& opTime,
- bool isRollbackAllowed);
-
- /**
- * Schedules a heartbeat to be sent to "target" at "when". "targetIndex" is the index
- * into the replica set config members array that corresponds to the "target", or -1 if
- * "target" is not in _rsConfig.
- */
- void _scheduleHeartbeatToTarget(const HostAndPort& target, int targetIndex, Date_t when);
-
- /**
- * Processes each heartbeat response.
- *
- * Schedules additional heartbeats, triggers elections and step downs, etc.
- */
- void _handleHeartbeatResponse(const ReplicationExecutor::RemoteCommandCallbackArgs& cbData,
- int targetIndex);
-
- void _handleHeartbeatResponseV1(
- const ReplicationExecutor::RemoteCommandCallbackArgs& cbData,
- int targetIndex);
-
- void _trackHeartbeatHandle(const StatusWith<ReplicationExecutor::CallbackHandle>& handle);
-
- void _untrackHeartbeatHandle(const ReplicationExecutor::CallbackHandle& handle);
-
- /**
- * Helper for _handleHeartbeatResponse.
- *
- * Updates the optime associated with the member at "memberIndex" in our config.
- */
- void _updateOpTimeFromHeartbeat_inlock(int memberIndex, const OpTime& optime);
-
- /**
- * Starts a heartbeat for each member in the current config. Called within the executor
- * context.
- */
- void _startHeartbeats();
-
- /**
- * Cancels all heartbeats. Called within executor context.
- */
- void _cancelHeartbeats();
-
- /**
- * Asynchronously sends a heartbeat to "target". "targetIndex" is the index
- * into the replica set config members array that corresponds to the "target", or -1 if
- * we don't have a valid replica set config.
- *
- * Scheduled by _scheduleHeartbeatToTarget.
- */
- void _doMemberHeartbeat(ReplicationExecutor::CallbackArgs cbData,
- const HostAndPort& target,
- int targetIndex);
-
-
- MemberState _getMemberState_inlock() const;
-
- /**
- * Callback that gives the TopologyCoordinator an initial LastVote document from
- * local storage.
- *
- * Called only during replication startup. All other updates come from the
- * TopologyCoordinator itself.
- */
- void _updateLastVote(const LastVote& lastVote);
-
- /**
- * Starts loading the replication configuration from local storage, and if it is valid,
- * schedules a callback (of _finishLoadLocalConfig) to set it as the current replica set
- * config (sets _rsConfig and _thisMembersConfigIndex).
- * Returns true if it finishes loading the local config, which most likely means there
- * was no local config at all or it was invalid in some way, and false if there was a valid
- * config detected but more work is needed to set it as the local config (which will be
- * handled by the callback to _finishLoadLocalConfig).
- */
- bool _startLoadLocalConfig(OperationContext* txn);
-
- /**
- * Callback that finishes the work started in _startLoadLocalConfig and sets _rsConfigState
- * to kConfigSteady, so that we can begin processing heartbeats and reconfigs.
- */
- void _finishLoadLocalConfig(const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicaSetConfig& localConfig,
- const StatusWith<OpTime>& lastOpTimeStatus);
-
- /**
- * Callback that finishes the work of processReplSetInitiate() inside the replication
- * executor context, in the event of a successful quorum check.
- */
- void _finishReplSetInitiate(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicaSetConfig& newConfig,
- int myIndex);
-
- /**
- * Callback that finishes the work of processReplSetReconfig inside the replication
- * executor context, in the event of a successful quorum check.
- */
- void _finishReplSetReconfig(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicaSetConfig& newConfig,
- int myIndex);
-
- /**
- * Changes _rsConfigState to newState, and notify any waiters.
- */
- void _setConfigState_inlock(ConfigState newState);
-
- /**
- * Updates the cached value, _memberState, to match _topCoord's reported
- * member state, from getMemberState().
- *
- * Returns an enum indicating what action to take after releasing _mutex, if any.
- * Call performPostMemberStateUpdateAction on the return value after releasing
- * _mutex.
- */
- PostMemberStateUpdateAction _updateMemberStateFromTopologyCoordinator_inlock();
-
- /**
- * Performs a post member-state update action. Do not call while holding _mutex.
- */
- void _performPostMemberStateUpdateAction(PostMemberStateUpdateAction action);
-
- /**
- * Begins an attempt to elect this node.
- * Called after an incoming heartbeat changes this node's view of the set such that it
- * believes it can be elected PRIMARY.
- * For proper concurrency, must be called via a ReplicationExecutor callback.
- *
- * For old style elections the election path is:
- * _startElectSelf()
- * _onFreshnessCheckComplete()
- * _onElectCmdRunnerComplete()
- * For V1 (raft) style elections the election path is:
- * _startElectSelfV1()
- * _onDryRunComplete()
- * _onVoteRequestComplete()
- * _onElectionWinnerDeclarerComplete()
- */
- void _startElectSelf();
- void _startElectSelfV1();
-
- /**
- * Callback called when the FreshnessChecker has completed; checks the results and
- * decides whether to continue election proceedings.
- **/
- void _onFreshnessCheckComplete();
-
- /**
- * Callback called when the ElectCmdRunner has completed; checks the results and
- * decides whether to complete the election and change state to primary.
- **/
- void _onElectCmdRunnerComplete();
-
- /**
- * Callback called when the dryRun VoteRequester has completed; checks the results and
- * decides whether to conduct a proper election.
- * "originalTerm" was the term during which the dry run began, if the term has since
- * changed, do not run for election.
- */
- void _onDryRunComplete(long long originalTerm);
-
- /**
- * Callback called when the VoteRequester has completed; checks the results and
- * decides whether to change state to primary and alert other nodes of our primary-ness.
- * "originalTerm" was the term during which the election began, if the term has since
- * changed, do not step up as primary.
- */
- void _onVoteRequestComplete(long long originalTerm);
-
- /**
- * Callback called when the ElectWinnerDeclarer has completed; checks the results and
- * if we received any negative responses, relinquish primary.
- */
- void _onElectionWinnerDeclarerComplete();
-
- /**
- * Callback called after a random delay, to prevent repeated election ties.
- */
- void _recoverFromElectionTie(const ReplicationExecutor::CallbackArgs& cbData);
-
- /**
- * Chooses a new sync source. Must be scheduled as a callback.
- *
- * Calls into the Topology Coordinator, which uses its current view of the set to choose
- * the most appropriate sync source.
- */
- void _chooseNewSyncSource(const ReplicationExecutor::CallbackArgs& cbData,
- HostAndPort* newSyncSource);
-
- /**
- * Adds 'host' to the sync source blacklist until 'until'. A blacklisted source cannot
- * be chosen as a sync source. Schedules a callback to unblacklist the sync source to be
- * run at 'until'.
- *
- * Must be scheduled as a callback.
- */
- void _blacklistSyncSource(const ReplicationExecutor::CallbackArgs& cbData,
- const HostAndPort& host,
- Date_t until);
-
- /**
- * Removes 'host' from the sync source blacklist. If 'host' isn't found, it's simply
- * ignored and no error is thrown.
- *
- * Must be scheduled as a callback.
- */
- void _unblacklistSyncSource(const ReplicationExecutor::CallbackArgs& cbData,
- const HostAndPort& host);
-
- /**
- * Determines if a new sync source should be considered.
- *
- * Must be scheduled as a callback.
- */
- void _shouldChangeSyncSource(const ReplicationExecutor::CallbackArgs& cbData,
- const HostAndPort& currentSource,
- bool* shouldChange);
-
- /**
- * Schedules a request that the given host step down; logs any errors.
- */
- void _requestRemotePrimaryStepdown(const HostAndPort& target);
-
- void _heartbeatStepDownStart();
-
- /**
- * Completes a step-down of the current node. Must be run with a global
- * shared or global exclusive lock.
- */
- void _stepDownFinish(const ReplicationExecutor::CallbackArgs& cbData);
-
- /**
- * Schedules a replica set config change.
- */
- void _scheduleHeartbeatReconfig(const ReplicaSetConfig& newConfig);
-
- /**
- * Callback that continues a heartbeat-initiated reconfig after a running election
- * completes.
- */
- void _heartbeatReconfigAfterElectionCanceled(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicaSetConfig& newConfig);
-
- /**
- * Method to write a configuration transmitted via heartbeat message to stable storage.
- */
- void _heartbeatReconfigStore(const ReplicationExecutor::CallbackArgs& cbd,
- const ReplicaSetConfig& newConfig);
-
- /**
- * Conclusion actions of a heartbeat-triggered reconfiguration.
- */
- void _heartbeatReconfigFinish(const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicaSetConfig& newConfig,
- StatusWith<int> myIndex);
-
- /**
- * Utility method that schedules or performs actions specified by a HeartbeatResponseAction
- * returned by a TopologyCoordinator::processHeartbeatResponse(V1) call with the given
- * value of "responseStatus".
- */
- void _handleHeartbeatResponseAction(
- const HeartbeatResponseAction& action,
- const StatusWith<ReplSetHeartbeatResponse>& responseStatus);
-
- /**
- * Bottom half of processHeartbeat(), which runs in the replication executor.
- */
- void _processHeartbeatFinish(const ReplicationExecutor::CallbackArgs& cbData,
- const ReplSetHeartbeatArgs& args,
- ReplSetHeartbeatResponse* response,
- Status* outStatus);
-
- /**
- * Bottom half of processHeartbeatV1(), which runs in the replication executor.
- */
- void _processHeartbeatFinishV1(const ReplicationExecutor::CallbackArgs& cbData,
- const ReplSetHeartbeatArgsV1& args,
- ReplSetHeartbeatResponse* response,
- Status* outStatus);
- /**
- * Scan the SlaveInfoVector and determine the highest OplogEntry present on a majority of
- * servers; set _lastCommittedOpTime to this new entry, if greater than the current entry.
- */
- void _updateLastCommittedOpTime_inlock();
-
- void _summarizeAsHtml_finish(const ReplicationExecutor::CallbackArgs& cbData,
- ReplSetHtmlSummary* output);
-
- /**
- * Callback that gets the current term from topology coordinator.
- */
- void _getTerm_helper(const ReplicationExecutor::CallbackArgs& cbData, long long* term);
-
-
- /**
- * Callback that attempts to set the current term in topology coordinator and
- * relinquishes primary if the term actually changes and we are primary.
- */
- void _updateTerm_helper(const ReplicationExecutor::CallbackArgs& cbData,
- long long term,
- bool* updated,
- Handle* cbHandle);
- bool _updateTerm_incallback(long long term, Handle* cbHandle);
-
- //
- // All member variables are labeled with one of the following codes indicating the
- // synchronization rules for accessing them.
- //
- // (R) Read-only in concurrent operation; no synchronization required.
- // (S) Self-synchronizing; access in any way from any context.
- // (PS) Pointer is read-only in concurrent operation, item pointed to is self-synchronizing;
- // Access in any context.
- // (M) Reads and writes guarded by _mutex
- // (X) Reads and writes must be performed in a callback in _replExecutor
- // (MX) Must hold _mutex and be in a callback in _replExecutor to write; must either hold
- // _mutex or be in a callback in _replExecutor to read.
- // (GX) Readable under a global intent lock. Must either hold global lock in exclusive
- // mode (MODE_X) or both hold global lock in shared mode (MODE_S) and be in executor
- // context to write.
- // (I) Independently synchronized, see member variable comment.
-
- // Protects member data of this ReplicationCoordinator.
- mutable stdx::mutex _mutex; // (S)
-
- // Handles to actively queued heartbeats.
- HeartbeatHandles _heartbeatHandles; // (X)
-
- // When this node does not know itself to be a member of a config, it adds
- // every host that sends it a heartbeat request to this set, and also starts
- // sending heartbeat requests to that host. This set is cleared whenever
- // a node discovers that it is a member of a config.
- unordered_set<HostAndPort> _seedList; // (X)
-
- // Parsed command line arguments related to replication.
- const ReplSettings _settings; // (R)
-
- // Mode of replication specified by _settings.
- const Mode _replMode; // (R)
-
- // Pointer to the TopologyCoordinator owned by this ReplicationCoordinator.
- std::unique_ptr<TopologyCoordinator> _topCoord; // (X)
-
- // If the executer is owned then this will be set, but should not be used.
- // This is only used to clean up and destroy the replExec if owned
- std::unique_ptr<ReplicationExecutor> _replExecutorIfOwned; // (S)
- // Executor that drives the topology coordinator.
- ReplicationExecutor& _replExecutor; // (S)
-
- // Pointer to the ReplicationCoordinatorExternalState owned by this ReplicationCoordinator.
- std::unique_ptr<ReplicationCoordinatorExternalState> _externalState; // (PS)
-
- // Thread that drives actions in the topology coordinator
- // Set in startReplication() and thereafter accessed in shutdown.
- std::unique_ptr<stdx::thread> _topCoordDriverThread; // (I)
-
- // Our RID, used to identify us to our sync source when sending replication progress
- // updates upstream. Set once in startReplication() and then never modified again.
- OID _myRID; // (M)
-
- // Rollback ID. Used to check if a rollback happened during some interval of time
- // TODO: ideally this should only change on rollbacks NOT on mongod restarts also.
- int _rbid; // (M)
-
- // list of information about clients waiting on replication. Does *not* own the
- // WaiterInfos.
- std::vector<WaiterInfo*> _replicationWaiterList; // (M)
-
- // list of information about clients waiting for a particular opTime.
- // Does *not* own the WaiterInfos.
- std::vector<WaiterInfo*> _opTimeWaiterList; // (M)
-
- // Set to true when we are in the process of shutting down replication.
- bool _inShutdown; // (M)
-
- // Election ID of the last election that resulted in this node becoming primary.
- OID _electionId; // (M)
-
- // Vector containing known information about each member (such as replication
- // progress and member ID) in our replica set or each member replicating from
- // us in a master-slave deployment. In master/slave, the first entry is
- // guaranteed to correspond to ourself. In replica sets where we don't have a
- // valid config or are in state REMOVED then the vector will be a single element
- // just with info about ourself. In replica sets with a valid config the elements
- // will be in the same order as the members in the replica set config, thus
- // the entry for ourself will be at _thisMemberConfigIndex.
- SlaveInfoVector _slaveInfo; // (M)
-
- // Current ReplicaSet state.
- MemberState _memberState; // (MX)
-
- // True if we are waiting for the applier to finish draining.
- bool _isWaitingForDrainToComplete; // (M)
-
- // Used to signal threads waiting for changes to _rsConfigState.
- stdx::condition_variable _rsConfigStateChange; // (M)
-
- // Represents the configuration state of the coordinator, which controls how and when
- // _rsConfig may change. See the state transition diagram in the type definition of
- // ConfigState for details.
- ConfigState _rsConfigState; // (M)
-
- // The current ReplicaSet configuration object, including the information about tag groups
- // that is used to satisfy write concern requests with named gle modes.
- ReplicaSetConfig _rsConfig; // (MX)
-
- // This member's index position in the current config.
- int _selfIndex; // (MX)
-
- // Vector of events that should be signaled whenever new heartbeat data comes in.
- std::vector<ReplicationExecutor::EventHandle> _stepDownWaiters; // (X)
+ bool updateTerm_forTest(long long term);
- // State for conducting an election of this node.
- // the presence of a non-null _freshnessChecker pointer indicates that an election is
- // currently in progress. When using the V1 protocol, a non-null _voteRequester pointer
- // indicates this instead.
- // Only one election is allowed at a time.
- std::unique_ptr<FreshnessChecker> _freshnessChecker; // (X)
+private:
+ ReplicationCoordinatorImpl(const ReplSettings& settings,
+ ReplicationCoordinatorExternalState* externalState,
+ TopologyCoordinator* topCoord,
+ int64_t prngSeed,
+ executor::NetworkInterface* network,
+ StorageInterface* storage,
+ ReplicationExecutor* replExec);
+ /**
+ * Configuration states for a replica set node.
+ *
+ * Transition diagram:
+ *
+ * PreStart ------------------> ReplicationDisabled
+ * |
+ * |
+ * v
+ * StartingUp -------> Uninitialized <------> Initiating
+ * \ ^ |
+ * ------- | |
+ * | | |
+ * v v |
+ * Reconfig <---> Steady <----> HBReconfig |
+ * ^ /
+ * | /
+ * \ /
+ * -----------------------
+ */
+ enum ConfigState {
+ kConfigPreStart,
+ kConfigStartingUp,
+ kConfigReplicationDisabled,
+ kConfigUninitialized,
+ kConfigSteady,
+ kConfigInitiating,
+ kConfigReconfiguring,
+ kConfigHBReconfiguring
+ };
+
+ /**
+ * Type describing actions to take after a change to the MemberState _memberState.
+ */
+ enum PostMemberStateUpdateAction {
+ kActionNone,
+ kActionCloseAllConnections, // Also indicates that we should clear sharding state.
+ kActionFollowerModeStateChange,
+ kActionWinElection
+ };
- std::unique_ptr<ElectCmdRunner> _electCmdRunner; // (X)
+ // Struct that holds information about clients waiting for replication.
+ struct WaiterInfo;
+
+ // Struct that holds information about nodes in this replication group, mainly used for
+ // tracking replication progress for write concern satisfaction.
+ struct SlaveInfo {
+ OpTime opTime; // Our last known OpTime that this slave has replicated to.
+ HostAndPort hostAndPort; // Client address of the slave.
+ int memberId; // Id of the node in the replica set config, or -1 if we're not a replSet.
+ OID rid; // RID of the node.
+ bool self; // Whether this SlaveInfo stores the information about ourself
+ SlaveInfo() : memberId(-1), self(false) {}
+ };
- std::unique_ptr<VoteRequester> _voteRequester; // (X)
+ typedef std::vector<SlaveInfo> SlaveInfoVector;
+
+ typedef std::vector<ReplicationExecutor::CallbackHandle> HeartbeatHandles;
+
+ /**
+ * Looks up the SlaveInfo in _slaveInfo associated with the given RID and returns a pointer
+ * to it, or returns NULL if there is no SlaveInfo with the given RID.
+ */
+ SlaveInfo* _findSlaveInfoByRID_inlock(const OID& rid);
+
+ /**
+ * Looks up the SlaveInfo in _slaveInfo associated with the given member ID and returns a
+ * pointer to it, or returns NULL if there is no SlaveInfo with the given member ID.
+ */
+ SlaveInfo* _findSlaveInfoByMemberID_inlock(int memberID);
+
+ /**
+ * Adds the given SlaveInfo to _slaveInfo and wakes up any threads waiting for replication
+ * that now have their write concern satisfied. Only valid to call in master/slave setups.
+ */
+ void _addSlaveInfo_inlock(const SlaveInfo& slaveInfo);
+
+ /**
+ * Updates the item in _slaveInfo pointed to by 'slaveInfo' with the given OpTime 'opTime'
+ * and wakes up any threads waiting for replication that now have their write concern
+ * satisfied.
+ */
+ void _updateSlaveInfoOptime_inlock(SlaveInfo* slaveInfo, const OpTime& opTime);
+
+ /**
+ * Returns the index into _slaveInfo where data corresponding to ourself is stored.
+ * For more info on the rules about how we know where our entry is, see the comment for
+ * _slaveInfo.
+ */
+ size_t _getMyIndexInSlaveInfo_inlock() const;
+
+ /**
+ * Helper method that removes entries from _slaveInfo if they correspond to a node
+ * with a member ID that is not in the current replica set config. Will always leave an
+ * entry for ourself at the beginning of _slaveInfo, even if we aren't present in the
+ * config.
+ */
+ void _updateSlaveInfoFromConfig_inlock();
+
+ /**
+ * Helper to update our saved config, cancel any pending heartbeats, and kick off sending
+ * new heartbeats based on the new config. Must *only* be called from within the
+ * ReplicationExecutor context.
+ *
+ * Returns an action to be performed after unlocking _mutex, via
+ * _performPostMemberStateUpdateAction.
+ */
+ PostMemberStateUpdateAction _setCurrentRSConfig_inlock(const ReplicaSetConfig& newConfig,
+ int myIndex);
+
+ /**
+ * Helper to wake waiters in _replicationWaiterList that are doneWaitingForReplication.
+ */
+ void _wakeReadyWaiters_inlock();
+
+ /**
+ * Helper method for setting/unsetting maintenance mode. Scheduled by setMaintenanceMode()
+ * to run in a global write lock in the replication executor thread.
+ */
+ void _setMaintenanceMode_helper(const ReplicationExecutor::CallbackArgs& cbData,
+ bool activate,
+ Status* result);
+
+ /**
+ * Helper method for retrieving maintenance mode. Scheduled by getMaintenanceMode() to run
+ * in the replication executor thread.
+ */
+ void _getMaintenanceMode_helper(const ReplicationExecutor::CallbackArgs& cbData,
+ bool* maintenanceMode);
+
+ /**
+ * Bottom half of fillIsMasterForReplSet.
+ */
+ void _fillIsMasterForReplSet_finish(const ReplicationExecutor::CallbackArgs& cbData,
+ IsMasterResponse* result);
+
+ /**
+ * Bottom half of processReplSetFresh.
+ */
+ void _processReplSetFresh_finish(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplSetFreshArgs& args,
+ BSONObjBuilder* response,
+ Status* result);
+
+ /**
+ * Bottom half of processReplSetElect.
+ */
+ void _processReplSetElect_finish(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplSetElectArgs& args,
+ BSONObjBuilder* response,
+ Status* result);
+
+ /**
+ * Bottom half of processReplSetFreeze.
+ */
+ void _processReplSetFreeze_finish(const ReplicationExecutor::CallbackArgs& cbData,
+ int secs,
+ BSONObjBuilder* response,
+ Status* result);
+ /*
+ * Bottom half of clearSyncSourceBlacklist
+ */
+ void _clearSyncSourceBlacklist_finish(const ReplicationExecutor::CallbackArgs& cbData);
+
+ /**
+ * Bottom half of processReplSetDeclareElectionWinner.
+ */
+ void _processReplSetDeclareElectionWinner_finish(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplSetDeclareElectionWinnerArgs& args,
+ long long* responseTerm,
+ Status* result);
+
+ /**
+ * Bottom half of processReplSetRequestVotes.
+ */
+ void _processReplSetRequestVotes_finish(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplSetRequestVotesArgs& args,
+ ReplSetRequestVotesResponse* response,
+ Status* result);
+
+ /**
+ * Scheduled to cause the ReplicationCoordinator to reconsider any state that might
+ * need to change as a result of time passing - for instance becoming PRIMARY when a single
+ * node replica set member's stepDown period ends.
+ */
+ void _handleTimePassing(const ReplicationExecutor::CallbackArgs& cbData);
+
+ /**
+ * Helper method for _awaitReplication that takes an already locked unique_lock and a
+ * Timer for timing the operation which has been counting since before the lock was
+ * acquired.
+ */
+ ReplicationCoordinator::StatusAndDuration _awaitReplication_inlock(
+ const Timer* timer,
+ stdx::unique_lock<stdx::mutex>* lock,
+ OperationContext* txn,
+ const OpTime& opTime,
+ const WriteConcernOptions& writeConcern);
+
+ /*
+ * Returns true if the given writeConcern is satisfied up to "optime" or is unsatisfiable.
+ */
+ bool _doneWaitingForReplication_inlock(const OpTime& opTime,
+ const WriteConcernOptions& writeConcern);
+
+ /**
+ * Helper for _doneWaitingForReplication_inlock that takes an integer write concern.
+ */
+ bool _haveNumNodesReachedOpTime_inlock(const OpTime& opTime, int numNodes);
+
+ /**
+ * Helper for _doneWaitingForReplication_inlock that takes a tag pattern representing a
+ * named write concern mode.
+ */
+ bool _haveTaggedNodesReachedOpTime_inlock(const OpTime& opTime,
+ const ReplicaSetTagPattern& tagPattern);
+
+ Status _checkIfWriteConcernCanBeSatisfied_inlock(const WriteConcernOptions& writeConcern) const;
+
+ /**
+ * Triggers all callbacks that are blocked waiting for new heartbeat data
+ * to decide whether or not to finish a step down.
+ * Should only be called from executor callbacks.
+ */
+ void _signalStepDownWaitersFromCallback(const ReplicationExecutor::CallbackArgs& cbData);
+ void _signalStepDownWaiters();
+
+ /**
+ * Helper for stepDown run within a ReplicationExecutor callback. This method assumes
+ * it is running within a global shared lock, and thus that no writes are going on at the
+ * same time.
+ */
+ void _stepDownContinue(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicationExecutor::EventHandle finishedEvent,
+ OperationContext* txn,
+ Date_t waitUntil,
+ Date_t stepdownUntil,
+ bool force,
+ Status* result);
+
+ OID _getMyRID_inlock() const;
+
+ int _getMyId_inlock() const;
+
+ OpTime _getMyLastOptime_inlock() const;
+
+ /**
+ * Bottom half of setFollowerMode.
+ *
+ * May reschedule itself after the current election, so it is not sufficient to
+ * wait for a callback scheduled to execute this method to complete. Instead,
+ * supply an event, "finishedSettingFollowerMode", and wait for that event to
+ * be signaled. Do not observe "*success" until after the event is signaled.
+ */
+ void _setFollowerModeFinish(const ReplicationExecutor::CallbackArgs& cbData,
+ const MemberState& newState,
+ const ReplicationExecutor::EventHandle& finishedSettingFollowerMode,
+ bool* success);
+
+ /**
+ * Helper method for updating our tracking of the last optime applied by a given node.
+ * This is only valid to call on replica sets.
+ * "configVersion" will be populated with our config version if it and the configVersion
+ * of "args" differ.
+ */
+ Status _setLastOptime_inlock(const UpdatePositionArgs::UpdateInfo& args,
+ long long* configVersion);
+
+ /**
+ * Helper method for setMyLastOptime that takes in a unique lock on
+ * _mutex. The passed in lock must already be locked. It is unspecified what state the
+ * lock will be in after this method finishes.
+ *
+ * This function has the same rules for "opTime" as setMyLastOptime(), unless
+ * "isRollbackAllowed" is true.
+ */
+ void _setMyLastOptime_inlock(stdx::unique_lock<stdx::mutex>* lock,
+ const OpTime& opTime,
+ bool isRollbackAllowed);
+
+ /**
+ * Schedules a heartbeat to be sent to "target" at "when". "targetIndex" is the index
+ * into the replica set config members array that corresponds to the "target", or -1 if
+ * "target" is not in _rsConfig.
+ */
+ void _scheduleHeartbeatToTarget(const HostAndPort& target, int targetIndex, Date_t when);
+
+ /**
+ * Processes each heartbeat response.
+ *
+ * Schedules additional heartbeats, triggers elections and step downs, etc.
+ */
+ void _handleHeartbeatResponse(const ReplicationExecutor::RemoteCommandCallbackArgs& cbData,
+ int targetIndex);
+
+ void _handleHeartbeatResponseV1(const ReplicationExecutor::RemoteCommandCallbackArgs& cbData,
+ int targetIndex);
+
+ void _trackHeartbeatHandle(const StatusWith<ReplicationExecutor::CallbackHandle>& handle);
+
+ void _untrackHeartbeatHandle(const ReplicationExecutor::CallbackHandle& handle);
+
+ /**
+ * Helper for _handleHeartbeatResponse.
+ *
+ * Updates the optime associated with the member at "memberIndex" in our config.
+ */
+ void _updateOpTimeFromHeartbeat_inlock(int memberIndex, const OpTime& optime);
+
+ /**
+ * Starts a heartbeat for each member in the current config. Called within the executor
+ * context.
+ */
+ void _startHeartbeats();
+
+ /**
+ * Cancels all heartbeats. Called within executor context.
+ */
+ void _cancelHeartbeats();
+
+ /**
+ * Asynchronously sends a heartbeat to "target". "targetIndex" is the index
+ * into the replica set config members array that corresponds to the "target", or -1 if
+ * we don't have a valid replica set config.
+ *
+ * Scheduled by _scheduleHeartbeatToTarget.
+ */
+ void _doMemberHeartbeat(ReplicationExecutor::CallbackArgs cbData,
+ const HostAndPort& target,
+ int targetIndex);
+
+
+ MemberState _getMemberState_inlock() const;
+
+ /**
+ * Callback that gives the TopologyCoordinator an initial LastVote document from
+ * local storage.
+ *
+ * Called only during replication startup. All other updates come from the
+ * TopologyCoordinator itself.
+ */
+ void _updateLastVote(const LastVote& lastVote);
+
+ /**
+ * Starts loading the replication configuration from local storage, and if it is valid,
+ * schedules a callback (of _finishLoadLocalConfig) to set it as the current replica set
+ * config (sets _rsConfig and _thisMembersConfigIndex).
+ * Returns true if it finishes loading the local config, which most likely means there
+ * was no local config at all or it was invalid in some way, and false if there was a valid
+ * config detected but more work is needed to set it as the local config (which will be
+ * handled by the callback to _finishLoadLocalConfig).
+ */
+ bool _startLoadLocalConfig(OperationContext* txn);
+
+ /**
+ * Callback that finishes the work started in _startLoadLocalConfig and sets _rsConfigState
+ * to kConfigSteady, so that we can begin processing heartbeats and reconfigs.
+ */
+ void _finishLoadLocalConfig(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicaSetConfig& localConfig,
+ const StatusWith<OpTime>& lastOpTimeStatus);
+
+ /**
+ * Callback that finishes the work of processReplSetInitiate() inside the replication
+ * executor context, in the event of a successful quorum check.
+ */
+ void _finishReplSetInitiate(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicaSetConfig& newConfig,
+ int myIndex);
+
+ /**
+ * Callback that finishes the work of processReplSetReconfig inside the replication
+ * executor context, in the event of a successful quorum check.
+ */
+ void _finishReplSetReconfig(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicaSetConfig& newConfig,
+ int myIndex);
+
+ /**
+ * Changes _rsConfigState to newState, and notify any waiters.
+ */
+ void _setConfigState_inlock(ConfigState newState);
+
+ /**
+ * Updates the cached value, _memberState, to match _topCoord's reported
+ * member state, from getMemberState().
+ *
+ * Returns an enum indicating what action to take after releasing _mutex, if any.
+ * Call performPostMemberStateUpdateAction on the return value after releasing
+ * _mutex.
+ */
+ PostMemberStateUpdateAction _updateMemberStateFromTopologyCoordinator_inlock();
+
+ /**
+ * Performs a post member-state update action. Do not call while holding _mutex.
+ */
+ void _performPostMemberStateUpdateAction(PostMemberStateUpdateAction action);
+
+ /**
+ * Begins an attempt to elect this node.
+ * Called after an incoming heartbeat changes this node's view of the set such that it
+ * believes it can be elected PRIMARY.
+ * For proper concurrency, must be called via a ReplicationExecutor callback.
+ *
+ * For old style elections the election path is:
+ * _startElectSelf()
+ * _onFreshnessCheckComplete()
+ * _onElectCmdRunnerComplete()
+ * For V1 (raft) style elections the election path is:
+ * _startElectSelfV1()
+ * _onDryRunComplete()
+ * _onVoteRequestComplete()
+ * _onElectionWinnerDeclarerComplete()
+ */
+ void _startElectSelf();
+ void _startElectSelfV1();
+
+ /**
+ * Callback called when the FreshnessChecker has completed; checks the results and
+ * decides whether to continue election proceedings.
+ **/
+ void _onFreshnessCheckComplete();
+
+ /**
+ * Callback called when the ElectCmdRunner has completed; checks the results and
+ * decides whether to complete the election and change state to primary.
+ **/
+ void _onElectCmdRunnerComplete();
+
+ /**
+ * Callback called when the dryRun VoteRequester has completed; checks the results and
+ * decides whether to conduct a proper election.
+ * "originalTerm" was the term during which the dry run began, if the term has since
+ * changed, do not run for election.
+ */
+ void _onDryRunComplete(long long originalTerm);
+
+ /**
+ * Callback called when the VoteRequester has completed; checks the results and
+ * decides whether to change state to primary and alert other nodes of our primary-ness.
+ * "originalTerm" was the term during which the election began, if the term has since
+ * changed, do not step up as primary.
+ */
+ void _onVoteRequestComplete(long long originalTerm);
+
+ /**
+ * Callback called when the ElectWinnerDeclarer has completed; checks the results and
+ * if we received any negative responses, relinquish primary.
+ */
+ void _onElectionWinnerDeclarerComplete();
+
+ /**
+ * Callback called after a random delay, to prevent repeated election ties.
+ */
+ void _recoverFromElectionTie(const ReplicationExecutor::CallbackArgs& cbData);
+
+ /**
+ * Chooses a new sync source. Must be scheduled as a callback.
+ *
+ * Calls into the Topology Coordinator, which uses its current view of the set to choose
+ * the most appropriate sync source.
+ */
+ void _chooseNewSyncSource(const ReplicationExecutor::CallbackArgs& cbData,
+ HostAndPort* newSyncSource);
+
+ /**
+ * Adds 'host' to the sync source blacklist until 'until'. A blacklisted source cannot
+ * be chosen as a sync source. Schedules a callback to unblacklist the sync source to be
+ * run at 'until'.
+ *
+ * Must be scheduled as a callback.
+ */
+ void _blacklistSyncSource(const ReplicationExecutor::CallbackArgs& cbData,
+ const HostAndPort& host,
+ Date_t until);
+
+ /**
+ * Removes 'host' from the sync source blacklist. If 'host' isn't found, it's simply
+ * ignored and no error is thrown.
+ *
+ * Must be scheduled as a callback.
+ */
+ void _unblacklistSyncSource(const ReplicationExecutor::CallbackArgs& cbData,
+ const HostAndPort& host);
+
+ /**
+ * Determines if a new sync source should be considered.
+ *
+ * Must be scheduled as a callback.
+ */
+ void _shouldChangeSyncSource(const ReplicationExecutor::CallbackArgs& cbData,
+ const HostAndPort& currentSource,
+ bool* shouldChange);
+
+ /**
+ * Schedules a request that the given host step down; logs any errors.
+ */
+ void _requestRemotePrimaryStepdown(const HostAndPort& target);
+
+ void _heartbeatStepDownStart();
+
+ /**
+ * Completes a step-down of the current node. Must be run with a global
+ * shared or global exclusive lock.
+ */
+ void _stepDownFinish(const ReplicationExecutor::CallbackArgs& cbData);
+
+ /**
+ * Schedules a replica set config change.
+ */
+ void _scheduleHeartbeatReconfig(const ReplicaSetConfig& newConfig);
+
+ /**
+ * Callback that continues a heartbeat-initiated reconfig after a running election
+ * completes.
+ */
+ void _heartbeatReconfigAfterElectionCanceled(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicaSetConfig& newConfig);
+
+ /**
+ * Method to write a configuration transmitted via heartbeat message to stable storage.
+ */
+ void _heartbeatReconfigStore(const ReplicationExecutor::CallbackArgs& cbd,
+ const ReplicaSetConfig& newConfig);
+
+ /**
+ * Conclusion actions of a heartbeat-triggered reconfiguration.
+ */
+ void _heartbeatReconfigFinish(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicaSetConfig& newConfig,
+ StatusWith<int> myIndex);
+
+ /**
+ * Utility method that schedules or performs actions specified by a HeartbeatResponseAction
+ * returned by a TopologyCoordinator::processHeartbeatResponse(V1) call with the given
+ * value of "responseStatus".
+ */
+ void _handleHeartbeatResponseAction(const HeartbeatResponseAction& action,
+ const StatusWith<ReplSetHeartbeatResponse>& responseStatus);
+
+ /**
+ * Bottom half of processHeartbeat(), which runs in the replication executor.
+ */
+ void _processHeartbeatFinish(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplSetHeartbeatArgs& args,
+ ReplSetHeartbeatResponse* response,
+ Status* outStatus);
+
+ /**
+ * Bottom half of processHeartbeatV1(), which runs in the replication executor.
+ */
+ void _processHeartbeatFinishV1(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplSetHeartbeatArgsV1& args,
+ ReplSetHeartbeatResponse* response,
+ Status* outStatus);
+ /**
+ * Scan the SlaveInfoVector and determine the highest OplogEntry present on a majority of
+ * servers; set _lastCommittedOpTime to this new entry, if greater than the current entry.
+ */
+ void _updateLastCommittedOpTime_inlock();
+
+ void _summarizeAsHtml_finish(const ReplicationExecutor::CallbackArgs& cbData,
+ ReplSetHtmlSummary* output);
+
+ /**
+ * Callback that gets the current term from topology coordinator.
+ */
+ void _getTerm_helper(const ReplicationExecutor::CallbackArgs& cbData, long long* term);
+
+
+ /**
+ * Callback that attempts to set the current term in topology coordinator and
+ * relinquishes primary if the term actually changes and we are primary.
+ */
+ void _updateTerm_helper(const ReplicationExecutor::CallbackArgs& cbData,
+ long long term,
+ bool* updated,
+ Handle* cbHandle);
+ bool _updateTerm_incallback(long long term, Handle* cbHandle);
+
+ //
+ // All member variables are labeled with one of the following codes indicating the
+ // synchronization rules for accessing them.
+ //
+ // (R) Read-only in concurrent operation; no synchronization required.
+ // (S) Self-synchronizing; access in any way from any context.
+ // (PS) Pointer is read-only in concurrent operation, item pointed to is self-synchronizing;
+ // Access in any context.
+ // (M) Reads and writes guarded by _mutex
+ // (X) Reads and writes must be performed in a callback in _replExecutor
+ // (MX) Must hold _mutex and be in a callback in _replExecutor to write; must either hold
+ // _mutex or be in a callback in _replExecutor to read.
+ // (GX) Readable under a global intent lock. Must either hold global lock in exclusive
+ // mode (MODE_X) or both hold global lock in shared mode (MODE_S) and be in executor
+ // context to write.
+ // (I) Independently synchronized, see member variable comment.
+
+ // Protects member data of this ReplicationCoordinator.
+ mutable stdx::mutex _mutex; // (S)
+
+ // Handles to actively queued heartbeats.
+ HeartbeatHandles _heartbeatHandles; // (X)
+
+ // When this node does not know itself to be a member of a config, it adds
+ // every host that sends it a heartbeat request to this set, and also starts
+ // sending heartbeat requests to that host. This set is cleared whenever
+ // a node discovers that it is a member of a config.
+ unordered_set<HostAndPort> _seedList; // (X)
+
+ // Parsed command line arguments related to replication.
+ const ReplSettings _settings; // (R)
+
+ // Mode of replication specified by _settings.
+ const Mode _replMode; // (R)
+
+ // Pointer to the TopologyCoordinator owned by this ReplicationCoordinator.
+ std::unique_ptr<TopologyCoordinator> _topCoord; // (X)
+
+ // If the executer is owned then this will be set, but should not be used.
+ // This is only used to clean up and destroy the replExec if owned
+ std::unique_ptr<ReplicationExecutor> _replExecutorIfOwned; // (S)
+ // Executor that drives the topology coordinator.
+ ReplicationExecutor& _replExecutor; // (S)
+
+ // Pointer to the ReplicationCoordinatorExternalState owned by this ReplicationCoordinator.
+ std::unique_ptr<ReplicationCoordinatorExternalState> _externalState; // (PS)
+
+ // Thread that drives actions in the topology coordinator
+ // Set in startReplication() and thereafter accessed in shutdown.
+ std::unique_ptr<stdx::thread> _topCoordDriverThread; // (I)
+
+ // Our RID, used to identify us to our sync source when sending replication progress
+ // updates upstream. Set once in startReplication() and then never modified again.
+ OID _myRID; // (M)
+
+ // Rollback ID. Used to check if a rollback happened during some interval of time
+ // TODO: ideally this should only change on rollbacks NOT on mongod restarts also.
+ int _rbid; // (M)
+
+ // list of information about clients waiting on replication. Does *not* own the
+ // WaiterInfos.
+ std::vector<WaiterInfo*> _replicationWaiterList; // (M)
+
+ // list of information about clients waiting for a particular opTime.
+ // Does *not* own the WaiterInfos.
+ std::vector<WaiterInfo*> _opTimeWaiterList; // (M)
+
+ // Set to true when we are in the process of shutting down replication.
+ bool _inShutdown; // (M)
+
+ // Election ID of the last election that resulted in this node becoming primary.
+ OID _electionId; // (M)
+
+ // Vector containing known information about each member (such as replication
+ // progress and member ID) in our replica set or each member replicating from
+ // us in a master-slave deployment. In master/slave, the first entry is
+ // guaranteed to correspond to ourself. In replica sets where we don't have a
+ // valid config or are in state REMOVED then the vector will be a single element
+ // just with info about ourself. In replica sets with a valid config the elements
+ // will be in the same order as the members in the replica set config, thus
+ // the entry for ourself will be at _thisMemberConfigIndex.
+ SlaveInfoVector _slaveInfo; // (M)
+
+ // Current ReplicaSet state.
+ MemberState _memberState; // (MX)
+
+ // True if we are waiting for the applier to finish draining.
+ bool _isWaitingForDrainToComplete; // (M)
+
+ // Used to signal threads waiting for changes to _rsConfigState.
+ stdx::condition_variable _rsConfigStateChange; // (M)
+
+ // Represents the configuration state of the coordinator, which controls how and when
+ // _rsConfig may change. See the state transition diagram in the type definition of
+ // ConfigState for details.
+ ConfigState _rsConfigState; // (M)
+
+ // The current ReplicaSet configuration object, including the information about tag groups
+ // that is used to satisfy write concern requests with named gle modes.
+ ReplicaSetConfig _rsConfig; // (MX)
+
+ // This member's index position in the current config.
+ int _selfIndex; // (MX)
+
+ // Vector of events that should be signaled whenever new heartbeat data comes in.
+ std::vector<ReplicationExecutor::EventHandle> _stepDownWaiters; // (X)
+
+ // State for conducting an election of this node.
+ // the presence of a non-null _freshnessChecker pointer indicates that an election is
+ // currently in progress. When using the V1 protocol, a non-null _voteRequester pointer
+ // indicates this instead.
+ // Only one election is allowed at a time.
+ std::unique_ptr<FreshnessChecker> _freshnessChecker; // (X)
- std::unique_ptr<ElectionWinnerDeclarer> _electionWinnerDeclarer; // (X)
+ std::unique_ptr<ElectCmdRunner> _electCmdRunner; // (X)
- // Event that the election code will signal when the in-progress election completes.
- // Unspecified value when _freshnessChecker is NULL.
- ReplicationExecutor::EventHandle _electionFinishedEvent; // (X)
+ std::unique_ptr<VoteRequester> _voteRequester; // (X)
- // Whether we slept last time we attempted an election but possibly tied with other nodes.
- bool _sleptLastElection; // (X)
+ std::unique_ptr<ElectionWinnerDeclarer> _electionWinnerDeclarer; // (X)
- // Flag that indicates whether writes to databases other than "local" are allowed. Used to
- // answer canAcceptWritesForDatabase() and canAcceptWritesFor() questions.
- // Always true for standalone nodes and masters in master-slave relationships.
- bool _canAcceptNonLocalWrites; // (GX)
+ // Event that the election code will signal when the in-progress election completes.
+ // Unspecified value when _freshnessChecker is NULL.
+ ReplicationExecutor::EventHandle _electionFinishedEvent; // (X)
- // Flag that indicates whether reads from databases other than "local" are allowed. Unlike
- // _canAcceptNonLocalWrites, above, this question is about admission control on secondaries,
- // and we do not require that its observers be strongly synchronized. Accidentally
- // providing the prior value for a limited period of time is acceptable. Also unlike
- // _canAcceptNonLocalWrites, its value is only meaningful on replica set secondaries.
- AtomicUInt32 _canServeNonLocalReads; // (S)
+ // Whether we slept last time we attempted an election but possibly tied with other nodes.
+ bool _sleptLastElection; // (X)
- // OpTime of the latest committed operation. Matches the concurrency level of _slaveInfo.
- OpTime _lastCommittedOpTime; // (M)
+ // Flag that indicates whether writes to databases other than "local" are allowed. Used to
+ // answer canAcceptWritesForDatabase() and canAcceptWritesFor() questions.
+ // Always true for standalone nodes and masters in master-slave relationships.
+ bool _canAcceptNonLocalWrites; // (GX)
- // Data Replicator used to replicate data
- DataReplicator _dr; // (S)
+ // Flag that indicates whether reads from databases other than "local" are allowed. Unlike
+ // _canAcceptNonLocalWrites, above, this question is about admission control on secondaries,
+ // and we do not require that its observers be strongly synchronized. Accidentally
+ // providing the prior value for a limited period of time is acceptable. Also unlike
+ // _canAcceptNonLocalWrites, its value is only meaningful on replica set secondaries.
+ AtomicUInt32 _canServeNonLocalReads; // (S)
- };
+ // OpTime of the latest committed operation. Matches the concurrency level of _slaveInfo.
+ OpTime _lastCommittedOpTime; // (M)
+
+ // Data Replicator used to replicate data
+ DataReplicator _dr; // (S)
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
index 35f5fdf9f9d..d298decf65f 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
@@ -42,54 +42,55 @@ namespace mongo {
namespace repl {
namespace {
- class LoseElectionGuard {
- MONGO_DISALLOW_COPYING(LoseElectionGuard);
- public:
- LoseElectionGuard(
- TopologyCoordinator* topCoord,
- ReplicationExecutor* executor,
- std::unique_ptr<FreshnessChecker>* freshnessChecker,
- std::unique_ptr<ElectCmdRunner>* electCmdRunner,
- ReplicationExecutor::EventHandle* electionFinishedEvent)
- : _topCoord(topCoord),
- _executor(executor),
- _freshnessChecker(freshnessChecker),
- _electCmdRunner(electCmdRunner),
- _electionFinishedEvent(electionFinishedEvent),
- _dismissed(false) {
+class LoseElectionGuard {
+ MONGO_DISALLOW_COPYING(LoseElectionGuard);
+
+public:
+ LoseElectionGuard(TopologyCoordinator* topCoord,
+ ReplicationExecutor* executor,
+ std::unique_ptr<FreshnessChecker>* freshnessChecker,
+ std::unique_ptr<ElectCmdRunner>* electCmdRunner,
+ ReplicationExecutor::EventHandle* electionFinishedEvent)
+ : _topCoord(topCoord),
+ _executor(executor),
+ _freshnessChecker(freshnessChecker),
+ _electCmdRunner(electCmdRunner),
+ _electionFinishedEvent(electionFinishedEvent),
+ _dismissed(false) {}
+
+ ~LoseElectionGuard() {
+ if (_dismissed) {
+ return;
}
-
- ~LoseElectionGuard() {
- if (_dismissed) {
- return;
- }
- _topCoord->processLoseElection();
- _freshnessChecker->reset(NULL);
- _electCmdRunner->reset(NULL);
- if (_electionFinishedEvent->isValid()) {
- _executor->signalEvent(*_electionFinishedEvent);
- }
+ _topCoord->processLoseElection();
+ _freshnessChecker->reset(NULL);
+ _electCmdRunner->reset(NULL);
+ if (_electionFinishedEvent->isValid()) {
+ _executor->signalEvent(*_electionFinishedEvent);
}
+ }
- void dismiss() { _dismissed = true; }
+ void dismiss() {
+ _dismissed = true;
+ }
- private:
- TopologyCoordinator* const _topCoord;
- ReplicationExecutor* const _executor;
- std::unique_ptr<FreshnessChecker>* const _freshnessChecker;
- std::unique_ptr<ElectCmdRunner>* const _electCmdRunner;
- const ReplicationExecutor::EventHandle* _electionFinishedEvent;
- bool _dismissed;
- };
+private:
+ TopologyCoordinator* const _topCoord;
+ ReplicationExecutor* const _executor;
+ std::unique_ptr<FreshnessChecker>* const _freshnessChecker;
+ std::unique_ptr<ElectCmdRunner>* const _electCmdRunner;
+ const ReplicationExecutor::EventHandle* _electionFinishedEvent;
+ bool _dismissed;
+};
} // namespace
- void ReplicationCoordinatorImpl::_startElectSelf() {
- invariant(!_freshnessChecker);
- invariant(!_electCmdRunner);
+void ReplicationCoordinatorImpl::_startElectSelf() {
+ invariant(!_freshnessChecker);
+ invariant(!_electCmdRunner);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- switch (_rsConfigState) {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ switch (_rsConfigState) {
case kConfigSteady:
break;
case kConfigInitiating:
@@ -100,183 +101,183 @@ namespace {
_topCoord->processLoseElection();
return;
default:
- severe() << "Entered replica set election code while in illegal config state " <<
- int(_rsConfigState);
+ severe() << "Entered replica set election code while in illegal config state "
+ << int(_rsConfigState);
fassertFailed(18913);
- }
+ }
- log() << "Standing for election";
- const StatusWith<ReplicationExecutor::EventHandle> finishEvh = _replExecutor.makeEvent();
- if (finishEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return;
- }
- fassert(18680, finishEvh.getStatus());
- _electionFinishedEvent = finishEvh.getValue();
- LoseElectionGuard lossGuard(_topCoord.get(),
- &_replExecutor,
- &_freshnessChecker,
- &_electCmdRunner,
- &_electionFinishedEvent);
+ log() << "Standing for election";
+ const StatusWith<ReplicationExecutor::EventHandle> finishEvh = _replExecutor.makeEvent();
+ if (finishEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
+ }
+ fassert(18680, finishEvh.getStatus());
+ _electionFinishedEvent = finishEvh.getValue();
+ LoseElectionGuard lossGuard(_topCoord.get(),
+ &_replExecutor,
+ &_freshnessChecker,
+ &_electCmdRunner,
+ &_electionFinishedEvent);
+
+
+ invariant(_rsConfig.getMemberAt(_selfIndex).isElectable());
+ OpTime lastOpTimeApplied(_getMyLastOptime_inlock());
+
+ if (lastOpTimeApplied.isNull()) {
+ log() << "not trying to elect self, "
+ "do not yet have a complete set of data from any point in time";
+ return;
+ }
+ _freshnessChecker.reset(new FreshnessChecker);
+
+ // This is necessary because the freshnessChecker may call directly into winning an
+ // election, if there are no other MaybeUp nodes. Winning an election attempts to lock
+ // _mutex again.
+ lk.unlock();
+
+ StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _freshnessChecker->start(
+ &_replExecutor,
+ lastOpTimeApplied.getTimestamp(),
+ _rsConfig,
+ _selfIndex,
+ _topCoord->getMaybeUpHostAndPorts(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onFreshnessCheckComplete, this));
+ if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
+ }
+ fassert(18681, nextPhaseEvh.getStatus());
+ lossGuard.dismiss();
+}
+
+void ReplicationCoordinatorImpl::_onFreshnessCheckComplete() {
+ invariant(_freshnessChecker);
+ invariant(!_electCmdRunner);
+ LoseElectionGuard lossGuard(_topCoord.get(),
+ &_replExecutor,
+ &_freshnessChecker,
+ &_electCmdRunner,
+ &_electionFinishedEvent);
+
+ if (_freshnessChecker->isCanceled()) {
+ LOG(2) << "Election canceled during freshness check phase";
+ return;
+ }
- invariant(_rsConfig.getMemberAt(_selfIndex).isElectable());
- OpTime lastOpTimeApplied(_getMyLastOptime_inlock());
+ const Date_t now(_replExecutor.now());
+ const FreshnessChecker::ElectionAbortReason abortReason =
+ _freshnessChecker->shouldAbortElection();
- if (lastOpTimeApplied.isNull()) {
- log() << "not trying to elect self, "
- "do not yet have a complete set of data from any point in time";
+ // need to not sleep after last time sleeping,
+ switch (abortReason) {
+ case FreshnessChecker::None:
+ break;
+ case FreshnessChecker::FreshnessTie:
+ if ((_selfIndex != 0) && !_sleptLastElection) {
+ const auto ms = Milliseconds(_replExecutor.nextRandomInt64(1000) + 50);
+ const Date_t nextCandidateTime = now + ms;
+ log() << "possible election tie; sleeping " << ms.count() << "ms until "
+ << dateToISOStringLocal(nextCandidateTime);
+ _topCoord->setElectionSleepUntil(nextCandidateTime);
+ _replExecutor.scheduleWorkAt(
+ nextCandidateTime,
+ stdx::bind(&ReplicationCoordinatorImpl::_recoverFromElectionTie,
+ this,
+ stdx::placeholders::_1));
+ _sleptLastElection = true;
+ return;
+ }
+ _sleptLastElection = false;
+ break;
+ case FreshnessChecker::FresherNodeFound:
+ log() << "not electing self, we are not freshest";
return;
- }
-
- _freshnessChecker.reset(new FreshnessChecker);
-
- // This is necessary because the freshnessChecker may call directly into winning an
- // election, if there are no other MaybeUp nodes. Winning an election attempts to lock
- // _mutex again.
- lk.unlock();
-
- StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _freshnessChecker->start(
- &_replExecutor,
- lastOpTimeApplied.getTimestamp(),
- _rsConfig,
- _selfIndex,
- _topCoord->getMaybeUpHostAndPorts(),
- stdx::bind(&ReplicationCoordinatorImpl::_onFreshnessCheckComplete, this));
- if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ case FreshnessChecker::QuorumUnreachable:
+ log() << "not electing self, we could not contact enough voting members";
+ return;
+ default:
+ log() << "not electing self due to election abort message :"
+ << static_cast<int>(abortReason);
return;
- }
- fassert(18681, nextPhaseEvh.getStatus());
- lossGuard.dismiss();
}
- void ReplicationCoordinatorImpl::_onFreshnessCheckComplete() {
- invariant(_freshnessChecker);
- invariant(!_electCmdRunner);
- LoseElectionGuard lossGuard(_topCoord.get(),
- &_replExecutor,
- &_freshnessChecker,
- &_electCmdRunner,
- &_electionFinishedEvent);
-
- if (_freshnessChecker->isCanceled()) {
- LOG(2) << "Election canceled during freshness check phase";
- return;
- }
+ log() << "running for election";
+ // Secure our vote for ourself first
+ if (!_topCoord->voteForMyself(now)) {
+ return;
+ }
- const Date_t now(_replExecutor.now());
- const FreshnessChecker::ElectionAbortReason abortReason =
- _freshnessChecker->shouldAbortElection();
-
- // need to not sleep after last time sleeping,
- switch (abortReason) {
- case FreshnessChecker::None:
- break;
- case FreshnessChecker::FreshnessTie:
- if ((_selfIndex != 0) && !_sleptLastElection) {
- const auto ms = Milliseconds(_replExecutor.nextRandomInt64(1000) + 50);
- const Date_t nextCandidateTime = now + ms;
- log() << "possible election tie; sleeping " << ms.count() << "ms until " <<
- dateToISOStringLocal(nextCandidateTime);
- _topCoord->setElectionSleepUntil(nextCandidateTime);
- _replExecutor.scheduleWorkAt(
- nextCandidateTime,
- stdx::bind(&ReplicationCoordinatorImpl::_recoverFromElectionTie,
- this,
- stdx::placeholders::_1));
- _sleptLastElection = true;
- return;
- }
- _sleptLastElection = false;
- break;
- case FreshnessChecker::FresherNodeFound:
- log() << "not electing self, we are not freshest";
- return;
- case FreshnessChecker::QuorumUnreachable:
- log() << "not electing self, we could not contact enough voting members";
- return;
- default:
- log() << "not electing self due to election abort message :"
- << static_cast<int>(abortReason);
- return;
- }
+ _electCmdRunner.reset(new ElectCmdRunner);
+ StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _electCmdRunner->start(
+ &_replExecutor,
+ _rsConfig,
+ _selfIndex,
+ _topCoord->getMaybeUpHostAndPorts(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onElectCmdRunnerComplete, this));
+ if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
+ }
+ fassert(18685, nextPhaseEvh.getStatus());
+ lossGuard.dismiss();
+}
+
+void ReplicationCoordinatorImpl::_onElectCmdRunnerComplete() {
+ LoseElectionGuard lossGuard(_topCoord.get(),
+ &_replExecutor,
+ &_freshnessChecker,
+ &_electCmdRunner,
+ &_electionFinishedEvent);
+
+ invariant(_freshnessChecker);
+ invariant(_electCmdRunner);
+ if (_electCmdRunner->isCanceled()) {
+ LOG(2) << "Election canceled during elect self phase";
+ return;
+ }
- log() << "running for election";
- // Secure our vote for ourself first
- if (!_topCoord->voteForMyself(now)) {
- return;
- }
+ const int receivedVotes = _electCmdRunner->getReceivedVotes();
- _electCmdRunner.reset(new ElectCmdRunner);
- StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _electCmdRunner->start(
- &_replExecutor,
- _rsConfig,
- _selfIndex,
- _topCoord->getMaybeUpHostAndPorts(),
- stdx::bind(&ReplicationCoordinatorImpl::_onElectCmdRunnerComplete, this));
- if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return;
- }
- fassert(18685, nextPhaseEvh.getStatus());
- lossGuard.dismiss();
+ if (receivedVotes < _rsConfig.getMajorityVoteCount()) {
+ log() << "couldn't elect self, only received " << receivedVotes
+ << " votes, but needed at least " << _rsConfig.getMajorityVoteCount();
+ // Suppress ourselves from standing for election again, giving other nodes a chance
+ // to win their elections.
+ const auto ms = Milliseconds(_replExecutor.nextRandomInt64(1000) + 50);
+ const Date_t now(_replExecutor.now());
+ const Date_t nextCandidateTime = now + ms;
+ log() << "waiting until " << nextCandidateTime << " before standing for election again";
+ _topCoord->setElectionSleepUntil(nextCandidateTime);
+ _replExecutor.scheduleWorkAt(
+ nextCandidateTime,
+ stdx::bind(&ReplicationCoordinatorImpl::_recoverFromElectionTie,
+ this,
+ stdx::placeholders::_1));
+ return;
}
- void ReplicationCoordinatorImpl::_onElectCmdRunnerComplete() {
- LoseElectionGuard lossGuard(_topCoord.get(),
- &_replExecutor,
- &_freshnessChecker,
- &_electCmdRunner,
- &_electionFinishedEvent);
-
- invariant(_freshnessChecker);
- invariant(_electCmdRunner);
- if (_electCmdRunner->isCanceled()) {
- LOG(2) << "Election canceled during elect self phase";
- return;
- }
+ if (_rsConfig.getConfigVersion() != _freshnessChecker->getOriginalConfigVersion()) {
+ log() << "config version changed during our election, ignoring result";
+ return;
+ }
- const int receivedVotes = _electCmdRunner->getReceivedVotes();
-
- if (receivedVotes < _rsConfig.getMajorityVoteCount()) {
- log() << "couldn't elect self, only received " << receivedVotes <<
- " votes, but needed at least " << _rsConfig.getMajorityVoteCount();
- // Suppress ourselves from standing for election again, giving other nodes a chance
- // to win their elections.
- const auto ms = Milliseconds(_replExecutor.nextRandomInt64(1000) + 50);
- const Date_t now(_replExecutor.now());
- const Date_t nextCandidateTime = now + ms;
- log() << "waiting until " << nextCandidateTime << " before standing for election again";
- _topCoord->setElectionSleepUntil(nextCandidateTime);
- _replExecutor.scheduleWorkAt(
- nextCandidateTime,
- stdx::bind(&ReplicationCoordinatorImpl::_recoverFromElectionTie,
- this,
- stdx::placeholders::_1));
- return;
- }
+ log() << "election succeeded, assuming primary role";
- if (_rsConfig.getConfigVersion() != _freshnessChecker->getOriginalConfigVersion()) {
- log() << "config version changed during our election, ignoring result";
- return;
- }
-
- log() << "election succeeded, assuming primary role";
+ lossGuard.dismiss();
+ _freshnessChecker.reset(NULL);
+ _electCmdRunner.reset(NULL);
+ _performPostMemberStateUpdateAction(kActionWinElection);
+ _replExecutor.signalEvent(_electionFinishedEvent);
+}
- lossGuard.dismiss();
- _freshnessChecker.reset(NULL);
- _electCmdRunner.reset(NULL);
- _performPostMemberStateUpdateAction(kActionWinElection);
- _replExecutor.signalEvent(_electionFinishedEvent);
+void ReplicationCoordinatorImpl::_recoverFromElectionTie(
+ const ReplicationExecutor::CallbackArgs& cbData) {
+ if (!cbData.status.isOK()) {
+ return;
}
-
- void ReplicationCoordinatorImpl::_recoverFromElectionTie(
- const ReplicationExecutor::CallbackArgs& cbData) {
- if (!cbData.status.isOK()) {
- return;
- }
- if (_topCoord->checkShouldStandForElection(_replExecutor.now(), getMyLastOptime())) {
- _startElectSelf();
- }
+ if (_topCoord->checkShouldStandForElection(_replExecutor.now(), getMyLastOptime())) {
+ _startElectSelf();
}
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
index a0e4149de24..b187ccd74c2 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
@@ -48,367 +48,371 @@ namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
-
- class ReplCoordElectTest : public ReplCoordTest {
- protected:
- void simulateEnoughHeartbeatsForElectability();
- void simulateFreshEnoughForElectability();
- };
-
- void ReplCoordElectTest::simulateEnoughHeartbeatsForElectability() {
- ReplicationCoordinatorImpl* replCoord = getReplCoord();
- ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- for (int i = 0; i < rsConfig.getNumMembers() - 1; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- ReplSetHeartbeatArgs hbArgs;
- if (hbArgs.initialize(request.cmdObj).isOK()) {
- ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName(rsConfig.getReplSetName());
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(rsConfig.getConfigVersion());
- BSONObjBuilder respObj;
- respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
- }
- else {
- error() << "Black holing unexpected request to " << request.target << ": " <<
- request.cmdObj;
- net->blackHole(noi);
- }
- net->runReadyNetworkOperations();
+using executor::NetworkInterfaceMock;
+
+class ReplCoordElectTest : public ReplCoordTest {
+protected:
+ void simulateEnoughHeartbeatsForElectability();
+ void simulateFreshEnoughForElectability();
+};
+
+void ReplCoordElectTest::simulateEnoughHeartbeatsForElectability() {
+ ReplicationCoordinatorImpl* replCoord = getReplCoord();
+ ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ for (int i = 0; i < rsConfig.getNumMembers() - 1; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ ReplSetHeartbeatArgs hbArgs;
+ if (hbArgs.initialize(request.cmdObj).isOK()) {
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName(rsConfig.getReplSetName());
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(rsConfig.getConfigVersion());
+ BSONObjBuilder respObj;
+ respObj << "ok" << 1;
+ hbResp.addToBSON(&respObj, false);
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
+ } else {
+ error() << "Black holing unexpected request to " << request.target << ": "
+ << request.cmdObj;
+ net->blackHole(noi);
}
- net->exitNetwork();
+ net->runReadyNetworkOperations();
}
+ net->exitNetwork();
+}
- void ReplCoordElectTest::simulateFreshEnoughForElectability() {
- ReplicationCoordinatorImpl* replCoord = getReplCoord();
- ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- for (int i = 0; i < rsConfig.getNumMembers() - 1; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- if (request.cmdObj.firstElement().fieldNameStringData() == "replSetFresh") {
- net->scheduleResponse(noi, net->now(), makeResponseStatus(
- BSON("ok" << 1 <<
- "fresher" << false <<
- "opTime" << Date_t::fromMillisSinceEpoch(
- Timestamp(0, 0).asLL()) <<
- "veto" << false)));
- }
- else {
- error() << "Black holing unexpected request to " << request.target << ": " <<
- request.cmdObj;
- net->blackHole(noi);
- }
- net->runReadyNetworkOperations();
+void ReplCoordElectTest::simulateFreshEnoughForElectability() {
+ ReplicationCoordinatorImpl* replCoord = getReplCoord();
+ ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ for (int i = 0; i < rsConfig.getNumMembers() - 1; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ if (request.cmdObj.firstElement().fieldNameStringData() == "replSetFresh") {
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "fresher" << false << "opTime"
+ << Date_t::fromMillisSinceEpoch(Timestamp(0, 0).asLL())
+ << "veto" << false)));
+ } else {
+ error() << "Black holing unexpected request to " << request.target << ": "
+ << request.cmdObj;
+ net->blackHole(noi);
}
- net->exitNetwork();
+ net->runReadyNetworkOperations();
}
+ net->exitNetwork();
+}
- TEST_F(ReplCoordElectTest, ElectTooSoon) {
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- // Election never starts because we haven't set a lastOpTimeApplied value yet, via a
- // heartbeat.
- startCapturingLogMessages();
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345"))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- simulateEnoughHeartbeatsForElectability();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1, countLogLinesContaining("node has no applied oplog entries"));
- }
+TEST_F(ReplCoordElectTest, ElectTooSoon) {
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
+ // Election never starts because we haven't set a lastOpTimeApplied value yet, via a
+ // heartbeat.
+ startCapturingLogMessages();
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ simulateEnoughHeartbeatsForElectability();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("node has no applied oplog entries"));
+}
- /**
- * This test checks that an election can happen when only one node is up, and it has the
- * vote(s) to win.
- */
- TEST_F(ReplCoordElectTest, ElectTwoNodesWithOneZeroVoter) {
- OperationContextReplMock txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345" <<
- "votes" << 0 << "hidden" << true <<
- "priority" << 0))),
- HostAndPort("node1", 12345));
-
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
-
- ASSERT(getReplCoord()->getMemberState().secondary()) <<
- getReplCoord()->getMemberState().toString();
-
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(10,0), 0));
-
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- // blackhole heartbeat
- net->scheduleResponse(noi,
- net->now(),
- ResponseStatus(ErrorCodes::OperationFailed, "timeout"));
- net->runReadyNetworkOperations();
- // blackhole freshness
- const NetworkInterfaceMock::NetworkOperationIterator noi2 = net->getNextReadyRequest();
- net->scheduleResponse(noi2,
- net->now(),
- ResponseStatus(ErrorCodes::OperationFailed, "timeout"));
- net->runReadyNetworkOperations();
- net->exitNetwork();
-
- ASSERT(getReplCoord()->getMemberState().primary()) <<
- getReplCoord()->getMemberState().toString();
- ASSERT(getReplCoord()->isWaitingForApplierToDrain());
-
- // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
- IsMasterResponse imResponse;
- getReplCoord()->fillIsMasterForReplSet(&imResponse);
- ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
- ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- getReplCoord()->signalDrainComplete(&txn);
- getReplCoord()->fillIsMasterForReplSet(&imResponse);
- ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
- ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- }
+/**
+ * This test checks that an election can happen when only one node is up, and it has the
+ * vote(s) to win.
+ */
+TEST_F(ReplCoordElectTest, ElectTwoNodesWithOneZeroVoter) {
+ OperationContextReplMock txn;
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"
+ << "votes" << 0 << "hidden" << true << "priority" << 0))),
+ HostAndPort("node1", 12345));
+
+ getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+
+ ASSERT(getReplCoord()->getMemberState().secondary())
+ << getReplCoord()->getMemberState().toString();
+
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(10, 0), 0));
+
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ // blackhole heartbeat
+ net->scheduleResponse(noi, net->now(), ResponseStatus(ErrorCodes::OperationFailed, "timeout"));
+ net->runReadyNetworkOperations();
+ // blackhole freshness
+ const NetworkInterfaceMock::NetworkOperationIterator noi2 = net->getNextReadyRequest();
+ net->scheduleResponse(noi2, net->now(), ResponseStatus(ErrorCodes::OperationFailed, "timeout"));
+ net->runReadyNetworkOperations();
+ net->exitNetwork();
+
+ ASSERT(getReplCoord()->getMemberState().primary())
+ << getReplCoord()->getMemberState().toString();
+ ASSERT(getReplCoord()->isWaitingForApplierToDrain());
+
+ // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
+ IsMasterResponse imResponse;
+ getReplCoord()->fillIsMasterForReplSet(&imResponse);
+ ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
+ ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
+ getReplCoord()->signalDrainComplete(&txn);
+ getReplCoord()->fillIsMasterForReplSet(&imResponse);
+ ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
+ ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
+}
- TEST_F(ReplCoordElectTest, Elect1NodeSuccess) {
- OperationContextReplMock txn;
- startCapturingLogMessages();
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345"))),
- HostAndPort("node1", 12345));
-
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
-
- ASSERT(getReplCoord()->getMemberState().primary()) <<
- getReplCoord()->getMemberState().toString();
- ASSERT(getReplCoord()->isWaitingForApplierToDrain());
-
- // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
- IsMasterResponse imResponse;
- getReplCoord()->fillIsMasterForReplSet(&imResponse);
- ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
- ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- getReplCoord()->signalDrainComplete(&txn);
- getReplCoord()->fillIsMasterForReplSet(&imResponse);
- ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
- ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- }
+TEST_F(ReplCoordElectTest, Elect1NodeSuccess) {
+ OperationContextReplMock txn;
+ startCapturingLogMessages();
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345"))),
+ HostAndPort("node1", 12345));
+
+ getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+
+ ASSERT(getReplCoord()->getMemberState().primary())
+ << getReplCoord()->getMemberState().toString();
+ ASSERT(getReplCoord()->isWaitingForApplierToDrain());
+
+ // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
+ IsMasterResponse imResponse;
+ getReplCoord()->fillIsMasterForReplSet(&imResponse);
+ ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
+ ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
+ getReplCoord()->signalDrainComplete(&txn);
+ getReplCoord()->fillIsMasterForReplSet(&imResponse);
+ ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
+ ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
+}
- TEST_F(ReplCoordElectTest, ElectManyNodesSuccess) {
- BSONObj configObj = BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")
- << BSON("_id" << 2 << "host" << "node2:12345")
- << BSON("_id" << 3 << "host" << "node3:12345")
- ));
- assertStartSuccess(configObj, HostAndPort("node1", 12345));
- OperationContextNoop txn;
- getReplCoord()->setMyLastOptime(OpTime(Timestamp (100, 1), 0));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- startCapturingLogMessages();
- simulateSuccessfulElection();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1, countLogLinesContaining("election succeeded"));
- }
+TEST_F(ReplCoordElectTest, ElectManyNodesSuccess) {
+ BSONObj configObj = BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")));
+ assertStartSuccess(configObj, HostAndPort("node1", 12345));
+ OperationContextNoop txn;
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 1), 0));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ startCapturingLogMessages();
+ simulateSuccessfulElection();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("election succeeded"));
+}
- TEST_F(ReplCoordElectTest, ElectNotEnoughVotes) {
- // one responds with -10000 votes, and one doesn't respond, and we are not elected
- startCapturingLogMessages();
- BSONObj configObj = BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")
- << BSON("_id" << 2 << "host" << "node2:12345")
- << BSON("_id" << 3 << "host" << "node3:12345")
- ));
- assertStartSuccess(configObj, HostAndPort("node1", 12345));
- ReplicaSetConfig config = assertMakeRSConfig(configObj);
-
- OperationContextNoop txn;
- OpTime time1(Timestamp(100, 1), 0);
- getReplCoord()->setMyLastOptime(time1);
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- simulateEnoughHeartbeatsForElectability();
- simulateFreshEnoughForElectability();
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- while (net->hasReadyRequests()) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- if (request.target != HostAndPort("node2", 12345)) {
- net->blackHole(noi);
- }
- else if (request.cmdObj.firstElement().fieldNameStringData() != "replSetElect") {
- net->blackHole(noi);
- }
- else {
- net->scheduleResponse(
- noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 <<
- "vote" << -10000 <<
- "round" << OID())));
- }
- net->runReadyNetworkOperations();
+TEST_F(ReplCoordElectTest, ElectNotEnoughVotes) {
+ // one responds with -10000 votes, and one doesn't respond, and we are not elected
+ startCapturingLogMessages();
+ BSONObj configObj = BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")));
+ assertStartSuccess(configObj, HostAndPort("node1", 12345));
+ ReplicaSetConfig config = assertMakeRSConfig(configObj);
+
+ OperationContextNoop txn;
+ OpTime time1(Timestamp(100, 1), 0);
+ getReplCoord()->setMyLastOptime(time1);
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+
+ simulateEnoughHeartbeatsForElectability();
+ simulateFreshEnoughForElectability();
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ while (net->hasReadyRequests()) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ if (request.target != HostAndPort("node2", 12345)) {
+ net->blackHole(noi);
+ } else if (request.cmdObj.firstElement().fieldNameStringData() != "replSetElect") {
+ net->blackHole(noi);
+ } else {
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "vote" << -10000 << "round" << OID())));
}
- net->exitNetwork();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining("couldn't elect self, only received -9999 votes"));
+ net->runReadyNetworkOperations();
}
+ net->exitNetwork();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("couldn't elect self, only received -9999 votes"));
+}
- TEST_F(ReplCoordElectTest, ElectWrongTypeForVote) {
- // one responds with a bad 'vote' field, and one doesn't respond, and we are not elected
- startCapturingLogMessages();
- BSONObj configObj = BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")
- << BSON("_id" << 2 << "host" << "node2:12345")
- << BSON("_id" << 3 << "host" << "node3:12345")
- ));
- assertStartSuccess(configObj, HostAndPort("node1", 12345));
- ReplicaSetConfig config = assertMakeRSConfig(configObj);
-
- OperationContextNoop txn;
- OpTime time1(Timestamp(100, 1), 0);
- getReplCoord()->setMyLastOptime(time1);
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- simulateEnoughHeartbeatsForElectability();
- simulateFreshEnoughForElectability();
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- while (net->hasReadyRequests()) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- if (request.target != HostAndPort("node2", 12345)) {
- net->blackHole(noi);
- }
- else if (request.cmdObj.firstElement().fieldNameStringData() != "replSetElect") {
- net->blackHole(noi);
- }
- else {
- net->scheduleResponse(
- noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 <<
- "vote" << "yea" <<
- "round" << OID())));
- }
- net->runReadyNetworkOperations();
+TEST_F(ReplCoordElectTest, ElectWrongTypeForVote) {
+ // one responds with a bad 'vote' field, and one doesn't respond, and we are not elected
+ startCapturingLogMessages();
+ BSONObj configObj = BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")));
+ assertStartSuccess(configObj, HostAndPort("node1", 12345));
+ ReplicaSetConfig config = assertMakeRSConfig(configObj);
+
+ OperationContextNoop txn;
+ OpTime time1(Timestamp(100, 1), 0);
+ getReplCoord()->setMyLastOptime(time1);
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+
+ simulateEnoughHeartbeatsForElectability();
+ simulateFreshEnoughForElectability();
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ while (net->hasReadyRequests()) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ if (request.target != HostAndPort("node2", 12345)) {
+ net->blackHole(noi);
+ } else if (request.cmdObj.firstElement().fieldNameStringData() != "replSetElect") {
+ net->blackHole(noi);
+ } else {
+ net->scheduleResponse(noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "vote"
+ << "yea"
+ << "round" << OID())));
}
- net->exitNetwork();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining("wrong type for vote argument in replSetElect command"));
+ net->runReadyNetworkOperations();
}
+ net->exitNetwork();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1,
+ countLogLinesContaining("wrong type for vote argument in replSetElect command"));
+}
- TEST_F(ReplCoordElectTest, ElectionDuringHBReconfigFails) {
- // start up, receive reconfig via heartbeat while at the same time, become candidate.
- // candidate state should be cleared.
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345") <<
- BSON("_id" << 3 << "host" << "node3:12345") <<
- BSON("_id" << 4 << "host" << "node4:12345") <<
- BSON("_id" << 5 << "host" << "node5:12345") )),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100,0), 0));
-
- // set hbreconfig to hang while in progress
- getExternalState()->setStoreLocalConfigDocumentToHang(true);
-
- // hb reconfig
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- ReplSetHeartbeatResponse hbResp2;
- ReplicaSetConfig config;
- config.initialize(BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 <<
- "host" << "node1:12345") <<
- BSON("_id" << 2 <<
- "host" << "node2:12345"))));
- hbResp2.setConfig(config);
- hbResp2.setConfigVersion(3);
- hbResp2.setSetName("mySet");
- hbResp2.setState(MemberState::RS_SECONDARY);
- BSONObjBuilder respObj2;
- respObj2 << "ok" << 1;
- hbResp2.addToBSON(&respObj2, false);
- net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
- const NetworkInterfaceMock::NetworkOperationIterator noi2 = net->getNextReadyRequest();
- net->scheduleResponse(noi2, net->now(), makeResponseStatus(respObj2.obj()));
- net->runReadyNetworkOperations();
- getNet()->exitNetwork();
-
- // prepare candidacy
- BSONObjBuilder result;
- ReplicationCoordinator::ReplSetReconfigArgs args;
- args.force = false;
- args.newConfigObj = config.toBSON();
- ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
-
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(2));
- startCapturingLogMessages();
-
- // receive sufficient heartbeats to trigger an election
- ReplicationCoordinatorImpl* replCoord = getReplCoord();
- ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
- net->enterNetwork();
- for (int i = 0; i < 2; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- ReplSetHeartbeatArgs hbArgs;
- if (hbArgs.initialize(request.cmdObj).isOK()) {
- ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName(rsConfig.getReplSetName());
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(rsConfig.getConfigVersion());
- BSONObjBuilder respObj;
- respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
- }
- else {
- error() << "Black holing unexpected request to " << request.target << ": " <<
- request.cmdObj;
- net->blackHole(noi);
- }
- net->runReadyNetworkOperations();
+TEST_F(ReplCoordElectTest, ElectionDuringHBReconfigFails) {
+ // start up, receive reconfig via heartbeat while at the same time, become candidate.
+ // candidate state should be cleared.
+ OperationContextNoop txn;
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345") << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 4 << "host"
+ << "node4:12345") << BSON("_id" << 5 << "host"
+ << "node5:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+
+ // set hbreconfig to hang while in progress
+ getExternalState()->setStoreLocalConfigDocumentToHang(true);
+
+ // hb reconfig
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ ReplSetHeartbeatResponse hbResp2;
+ ReplicaSetConfig config;
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))));
+ hbResp2.setConfig(config);
+ hbResp2.setConfigVersion(3);
+ hbResp2.setSetName("mySet");
+ hbResp2.setState(MemberState::RS_SECONDARY);
+ BSONObjBuilder respObj2;
+ respObj2 << "ok" << 1;
+ hbResp2.addToBSON(&respObj2, false);
+ net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
+ const NetworkInterfaceMock::NetworkOperationIterator noi2 = net->getNextReadyRequest();
+ net->scheduleResponse(noi2, net->now(), makeResponseStatus(respObj2.obj()));
+ net->runReadyNetworkOperations();
+ getNet()->exitNetwork();
+
+ // prepare candidacy
+ BSONObjBuilder result;
+ ReplicationCoordinator::ReplSetReconfigArgs args;
+ args.force = false;
+ args.newConfigObj = config.toBSON();
+ ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
+ getReplCoord()->processReplSetReconfig(&txn, args, &result));
+
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(2));
+ startCapturingLogMessages();
+
+ // receive sufficient heartbeats to trigger an election
+ ReplicationCoordinatorImpl* replCoord = getReplCoord();
+ ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
+ net->enterNetwork();
+ for (int i = 0; i < 2; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ ReplSetHeartbeatArgs hbArgs;
+ if (hbArgs.initialize(request.cmdObj).isOK()) {
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName(rsConfig.getReplSetName());
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(rsConfig.getConfigVersion());
+ BSONObjBuilder respObj;
+ respObj << "ok" << 1;
+ hbResp.addToBSON(&respObj, false);
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
+ } else {
+ error() << "Black holing unexpected request to " << request.target << ": "
+ << request.cmdObj;
+ net->blackHole(noi);
}
-
- stopCapturingLogMessages();
- // ensure node does not stand for election
- ASSERT_EQUALS(1,
- countLogLinesContaining("Not standing for election; processing "
- "a configuration change"));
- getExternalState()->setStoreLocalConfigDocumentToHang(false);
+ net->runReadyNetworkOperations();
}
+ stopCapturingLogMessages();
+ // ensure node does not stand for election
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ "Not standing for election; processing "
+ "a configuration change"));
+ getExternalState()->setStoreLocalConfigDocumentToHang(false);
+}
}
}
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index 686f7bbe5d3..36a28e24a37 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -44,55 +44,56 @@ namespace mongo {
namespace repl {
namespace {
- class LoseElectionGuardV1 {
- MONGO_DISALLOW_COPYING(LoseElectionGuardV1);
- public:
- LoseElectionGuardV1(
- TopologyCoordinator* topCoord,
- ReplicationExecutor* executor,
- std::unique_ptr<VoteRequester>* voteRequester,
- std::unique_ptr<ElectionWinnerDeclarer>* electionWinnerDeclarer,
- ReplicationExecutor::EventHandle* electionFinishedEvent)
- : _topCoord(topCoord),
- _executor(executor),
- _voteRequester(voteRequester),
- _electionWinnerDeclarer(electionWinnerDeclarer),
- _electionFinishedEvent(electionFinishedEvent),
- _dismissed(false) {
+class LoseElectionGuardV1 {
+ MONGO_DISALLOW_COPYING(LoseElectionGuardV1);
+
+public:
+ LoseElectionGuardV1(TopologyCoordinator* topCoord,
+ ReplicationExecutor* executor,
+ std::unique_ptr<VoteRequester>* voteRequester,
+ std::unique_ptr<ElectionWinnerDeclarer>* electionWinnerDeclarer,
+ ReplicationExecutor::EventHandle* electionFinishedEvent)
+ : _topCoord(topCoord),
+ _executor(executor),
+ _voteRequester(voteRequester),
+ _electionWinnerDeclarer(electionWinnerDeclarer),
+ _electionFinishedEvent(electionFinishedEvent),
+ _dismissed(false) {}
+
+ ~LoseElectionGuardV1() {
+ if (_dismissed) {
+ return;
}
-
- ~LoseElectionGuardV1() {
- if (_dismissed) {
- return;
- }
- _topCoord->processLoseElection();
- _electionWinnerDeclarer->reset(nullptr);
- _voteRequester->reset(nullptr);
- if (_electionFinishedEvent->isValid()) {
- _executor->signalEvent(*_electionFinishedEvent);
- }
+ _topCoord->processLoseElection();
+ _electionWinnerDeclarer->reset(nullptr);
+ _voteRequester->reset(nullptr);
+ if (_electionFinishedEvent->isValid()) {
+ _executor->signalEvent(*_electionFinishedEvent);
}
+ }
- void dismiss() { _dismissed = true; }
+ void dismiss() {
+ _dismissed = true;
+ }
- private:
- TopologyCoordinator* const _topCoord;
- ReplicationExecutor* const _executor;
- std::unique_ptr<VoteRequester>* const _voteRequester;
- std::unique_ptr<ElectionWinnerDeclarer>* const _electionWinnerDeclarer;
- const ReplicationExecutor::EventHandle* _electionFinishedEvent;
- bool _dismissed;
- };
+private:
+ TopologyCoordinator* const _topCoord;
+ ReplicationExecutor* const _executor;
+ std::unique_ptr<VoteRequester>* const _voteRequester;
+ std::unique_ptr<ElectionWinnerDeclarer>* const _electionWinnerDeclarer;
+ const ReplicationExecutor::EventHandle* _electionFinishedEvent;
+ bool _dismissed;
+};
} // namespace
- void ReplicationCoordinatorImpl::_startElectSelfV1() {
- invariant(!_electionWinnerDeclarer);
- invariant(!_voteRequester);
- invariant(!_freshnessChecker);
+void ReplicationCoordinatorImpl::_startElectSelfV1() {
+ invariant(!_electionWinnerDeclarer);
+ invariant(!_voteRequester);
+ invariant(!_freshnessChecker);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- switch (_rsConfigState) {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ switch (_rsConfigState) {
case kConfigSteady:
break;
case kConfigInitiating:
@@ -103,183 +104,175 @@ namespace {
_topCoord->processLoseElection();
return;
default:
- severe() << "Entered replica set election code while in illegal config state " <<
- int(_rsConfigState);
+ severe() << "Entered replica set election code while in illegal config state "
+ << int(_rsConfigState);
fassertFailed(28641);
- }
-
- const StatusWith<ReplicationExecutor::EventHandle> finishEvh = _replExecutor.makeEvent();
- if (finishEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return;
- }
- fassert(28642, finishEvh.getStatus());
- _electionFinishedEvent = finishEvh.getValue();
- LoseElectionGuardV1 lossGuard(_topCoord.get(),
- &_replExecutor,
- &_voteRequester,
- &_electionWinnerDeclarer,
- &_electionFinishedEvent);
-
-
- invariant(_rsConfig.getMemberAt(_selfIndex).isElectable());
- OpTime lastOpTimeApplied(_getMyLastOptime_inlock());
-
- if (lastOpTimeApplied == OpTime()) {
- log() << "not trying to elect self, "
- "do not yet have a complete set of data from any point in time";
- return;
- }
-
- log() << "conducting a dry run election to see if we could be elected";
- _voteRequester.reset(new VoteRequester);
-
- // This is necessary because the voteRequester may call directly into winning an
- // election, if there are no other MaybeUp nodes. Winning an election attempts to lock
- // _mutex again.
- lk.unlock();
-
- long long term = _topCoord->getTerm();
- StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _voteRequester->start(
- &_replExecutor,
- _rsConfig,
- _rsConfig.getMemberAt(_selfIndex).getId(),
- _topCoord->getTerm(),
- true, // dry run
- getMyLastOptime(),
- stdx::bind(&ReplicationCoordinatorImpl::_onDryRunComplete, this, term));
- if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return;
- }
- fassert(28685, nextPhaseEvh.getStatus());
- lossGuard.dismiss();
}
- void ReplicationCoordinatorImpl::_onDryRunComplete(long long originalTerm) {
- invariant(_voteRequester);
- invariant(!_electionWinnerDeclarer);
- LoseElectionGuardV1 lossGuard(_topCoord.get(),
- &_replExecutor,
- &_voteRequester,
- &_electionWinnerDeclarer,
- &_electionFinishedEvent);
-
- if (_topCoord->getTerm() != originalTerm) {
- log() << "not running for primary, we have been superceded already";
- return;
- }
-
- const VoteRequester::VoteRequestResult endResult = _voteRequester->getResult();
-
- if (endResult == VoteRequester::InsufficientVotes) {
- log() << "not running for primary, we received insufficient votes";
- return;
- }
- else if (endResult == VoteRequester::StaleTerm) {
- log() << "not running for primary, we have been superceded already";
- return;
- }
- else if (endResult != VoteRequester::SuccessfullyElected) {
- log() << "not running for primary, we received an unexpected problem";
- return;
- }
-
- log() << "dry election run succeeded, running for election";
- _topCoord->incrementTerm();
- // Secure our vote for ourself first
- _topCoord->voteForMyselfV1();
-
- _voteRequester.reset(new VoteRequester);
-
- StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _voteRequester->start(
- &_replExecutor,
- _rsConfig,
- _rsConfig.getMemberAt(_selfIndex).getId(),
- _topCoord->getTerm(),
- false,
- getMyLastOptime(),
- stdx::bind(&ReplicationCoordinatorImpl::_onVoteRequestComplete,
- this,
- originalTerm + 1));
- if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return;
- }
- fassert(28643, nextPhaseEvh.getStatus());
- lossGuard.dismiss();
+ const StatusWith<ReplicationExecutor::EventHandle> finishEvh = _replExecutor.makeEvent();
+ if (finishEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
+ }
+ fassert(28642, finishEvh.getStatus());
+ _electionFinishedEvent = finishEvh.getValue();
+ LoseElectionGuardV1 lossGuard(_topCoord.get(),
+ &_replExecutor,
+ &_voteRequester,
+ &_electionWinnerDeclarer,
+ &_electionFinishedEvent);
+
+
+ invariant(_rsConfig.getMemberAt(_selfIndex).isElectable());
+ OpTime lastOpTimeApplied(_getMyLastOptime_inlock());
+
+ if (lastOpTimeApplied == OpTime()) {
+ log() << "not trying to elect self, "
+ "do not yet have a complete set of data from any point in time";
+ return;
}
- void ReplicationCoordinatorImpl::_onVoteRequestComplete(long long originalTerm) {
- invariant(_voteRequester);
- invariant(!_electionWinnerDeclarer);
- LoseElectionGuardV1 lossGuard(_topCoord.get(),
- &_replExecutor,
- &_voteRequester,
- &_electionWinnerDeclarer,
- &_electionFinishedEvent);
-
- if (_topCoord->getTerm() != originalTerm) {
- log() << "not becoming primary, we have been superceded already";
- return;
- }
-
- const VoteRequester::VoteRequestResult endResult = _voteRequester->getResult();
+ log() << "conducting a dry run election to see if we could be elected";
+ _voteRequester.reset(new VoteRequester);
+
+ // This is necessary because the voteRequester may call directly into winning an
+ // election, if there are no other MaybeUp nodes. Winning an election attempts to lock
+ // _mutex again.
+ lk.unlock();
+
+ long long term = _topCoord->getTerm();
+ StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _voteRequester->start(
+ &_replExecutor,
+ _rsConfig,
+ _rsConfig.getMemberAt(_selfIndex).getId(),
+ _topCoord->getTerm(),
+ true, // dry run
+ getMyLastOptime(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onDryRunComplete, this, term));
+ if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
+ }
+ fassert(28685, nextPhaseEvh.getStatus());
+ lossGuard.dismiss();
+}
+
+void ReplicationCoordinatorImpl::_onDryRunComplete(long long originalTerm) {
+ invariant(_voteRequester);
+ invariant(!_electionWinnerDeclarer);
+ LoseElectionGuardV1 lossGuard(_topCoord.get(),
+ &_replExecutor,
+ &_voteRequester,
+ &_electionWinnerDeclarer,
+ &_electionFinishedEvent);
+
+ if (_topCoord->getTerm() != originalTerm) {
+ log() << "not running for primary, we have been superceded already";
+ return;
+ }
- if (endResult == VoteRequester::InsufficientVotes) {
- log() << "not becoming primary, we received insufficient votes";
- return;
- }
- else if (endResult == VoteRequester::StaleTerm) {
- log() << "not becoming primary, we have been superceded already";
- return;
- }
- else if (endResult != VoteRequester::SuccessfullyElected) {
- log() << "not becoming primary, we received an unexpected problem";
- return;
- }
+ const VoteRequester::VoteRequestResult endResult = _voteRequester->getResult();
+
+ if (endResult == VoteRequester::InsufficientVotes) {
+ log() << "not running for primary, we received insufficient votes";
+ return;
+ } else if (endResult == VoteRequester::StaleTerm) {
+ log() << "not running for primary, we have been superceded already";
+ return;
+ } else if (endResult != VoteRequester::SuccessfullyElected) {
+ log() << "not running for primary, we received an unexpected problem";
+ return;
+ }
- log() << "election succeeded, assuming primary role";
- _performPostMemberStateUpdateAction(kActionWinElection);
-
- _electionWinnerDeclarer.reset(new ElectionWinnerDeclarer);
- StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _electionWinnerDeclarer->start(
- &_replExecutor,
- _rsConfig.getReplSetName(),
- _rsConfig.getMemberAt(_selfIndex).getId(),
- _topCoord->getTerm(),
- _topCoord->getMaybeUpHostAndPorts(),
- stdx::bind(&ReplicationCoordinatorImpl::_onElectionWinnerDeclarerComplete, this));
- if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return;
- }
- fassert(28644, nextPhaseEvh.getStatus());
- lossGuard.dismiss();
+ log() << "dry election run succeeded, running for election";
+ _topCoord->incrementTerm();
+ // Secure our vote for ourself first
+ _topCoord->voteForMyselfV1();
+
+ _voteRequester.reset(new VoteRequester);
+
+ StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _voteRequester->start(
+ &_replExecutor,
+ _rsConfig,
+ _rsConfig.getMemberAt(_selfIndex).getId(),
+ _topCoord->getTerm(),
+ false,
+ getMyLastOptime(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onVoteRequestComplete, this, originalTerm + 1));
+ if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
+ }
+ fassert(28643, nextPhaseEvh.getStatus());
+ lossGuard.dismiss();
+}
+
+void ReplicationCoordinatorImpl::_onVoteRequestComplete(long long originalTerm) {
+ invariant(_voteRequester);
+ invariant(!_electionWinnerDeclarer);
+ LoseElectionGuardV1 lossGuard(_topCoord.get(),
+ &_replExecutor,
+ &_voteRequester,
+ &_electionWinnerDeclarer,
+ &_electionFinishedEvent);
+
+ if (_topCoord->getTerm() != originalTerm) {
+ log() << "not becoming primary, we have been superceded already";
+ return;
}
- void ReplicationCoordinatorImpl::_onElectionWinnerDeclarerComplete() {
- LoseElectionGuardV1 lossGuard(_topCoord.get(),
- &_replExecutor,
- &_voteRequester,
- &_electionWinnerDeclarer,
- &_electionFinishedEvent);
-
- invariant(_voteRequester);
- invariant(_electionWinnerDeclarer);
-
- const Status endResult = _electionWinnerDeclarer->getStatus();
-
- if (!endResult.isOK()) {
- log() << "stepping down from primary, because: " << endResult;
- _topCoord->prepareForStepDown();
- _replExecutor.scheduleWorkWithGlobalExclusiveLock(
- stdx::bind(&ReplicationCoordinatorImpl::_stepDownFinish,
- this,
- stdx::placeholders::_1));
- }
+ const VoteRequester::VoteRequestResult endResult = _voteRequester->getResult();
+
+ if (endResult == VoteRequester::InsufficientVotes) {
+ log() << "not becoming primary, we received insufficient votes";
+ return;
+ } else if (endResult == VoteRequester::StaleTerm) {
+ log() << "not becoming primary, we have been superceded already";
+ return;
+ } else if (endResult != VoteRequester::SuccessfullyElected) {
+ log() << "not becoming primary, we received an unexpected problem";
+ return;
+ }
- lossGuard.dismiss();
- _voteRequester.reset(nullptr);
- _electionWinnerDeclarer.reset(nullptr);
- _replExecutor.signalEvent(_electionFinishedEvent);
+ log() << "election succeeded, assuming primary role";
+ _performPostMemberStateUpdateAction(kActionWinElection);
+
+ _electionWinnerDeclarer.reset(new ElectionWinnerDeclarer);
+ StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _electionWinnerDeclarer->start(
+ &_replExecutor,
+ _rsConfig.getReplSetName(),
+ _rsConfig.getMemberAt(_selfIndex).getId(),
+ _topCoord->getTerm(),
+ _topCoord->getMaybeUpHostAndPorts(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onElectionWinnerDeclarerComplete, this));
+ if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
+ }
+ fassert(28644, nextPhaseEvh.getStatus());
+ lossGuard.dismiss();
+}
+
+void ReplicationCoordinatorImpl::_onElectionWinnerDeclarerComplete() {
+ LoseElectionGuardV1 lossGuard(_topCoord.get(),
+ &_replExecutor,
+ &_voteRequester,
+ &_electionWinnerDeclarer,
+ &_electionFinishedEvent);
+
+ invariant(_voteRequester);
+ invariant(_electionWinnerDeclarer);
+
+ const Status endResult = _electionWinnerDeclarer->getStatus();
+
+ if (!endResult.isOK()) {
+ log() << "stepping down from primary, because: " << endResult;
+ _topCoord->prepareForStepDown();
+ _replExecutor.scheduleWorkWithGlobalExclusiveLock(
+ stdx::bind(&ReplicationCoordinatorImpl::_stepDownFinish, this, stdx::placeholders::_1));
}
+ lossGuard.dismiss();
+ _voteRequester.reset(nullptr);
+ _electionWinnerDeclarer.reset(nullptr);
+ _replExecutor.signalEvent(_electionFinishedEvent);
+}
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index c3c1deddf88..0e3300bf079 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -48,575 +48,590 @@ namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
-
- class ReplCoordElectV1Test : public ReplCoordTest {
- protected:
- void simulateEnoughHeartbeatsForElectability();
- void simulateSuccessfulDryRun();
- };
-
- void ReplCoordElectV1Test::simulateEnoughHeartbeatsForElectability() {
- ReplicationCoordinatorImpl* replCoord = getReplCoord();
- ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- for (int i = 0; i < rsConfig.getNumMembers() - 1; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- ReplSetHeartbeatArgsV1 hbArgs;
- if (hbArgs.initialize(request.cmdObj).isOK()) {
- ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName(rsConfig.getReplSetName());
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(rsConfig.getConfigVersion());
- BSONObjBuilder respObj;
- net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
- }
- else {
- error() << "Black holing unexpected request to " << request.target << ": " <<
- request.cmdObj;
- net->blackHole(noi);
- }
- net->runReadyNetworkOperations();
+using executor::NetworkInterfaceMock;
+
+class ReplCoordElectV1Test : public ReplCoordTest {
+protected:
+ void simulateEnoughHeartbeatsForElectability();
+ void simulateSuccessfulDryRun();
+};
+
+void ReplCoordElectV1Test::simulateEnoughHeartbeatsForElectability() {
+ ReplicationCoordinatorImpl* replCoord = getReplCoord();
+ ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ for (int i = 0; i < rsConfig.getNumMembers() - 1; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ ReplSetHeartbeatArgsV1 hbArgs;
+ if (hbArgs.initialize(request.cmdObj).isOK()) {
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName(rsConfig.getReplSetName());
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(rsConfig.getConfigVersion());
+ BSONObjBuilder respObj;
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
+ } else {
+ error() << "Black holing unexpected request to " << request.target << ": "
+ << request.cmdObj;
+ net->blackHole(noi);
}
- net->exitNetwork();
+ net->runReadyNetworkOperations();
}
+ net->exitNetwork();
+}
- void ReplCoordElectV1Test::simulateSuccessfulDryRun() {
- ReplicationCoordinatorImpl* replCoord = getReplCoord();
- ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- for (int i = 0; i < rsConfig.getNumMembers() / 2; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
- net->scheduleResponse(noi, net->now(), makeResponseStatus(
- BSON("ok" << 1 <<
- "reason" << "" <<
- "term" << request.cmdObj["term"].Long() <<
- "voteGranted" << true)));
- }
- else {
- error() << "Black holing unexpected request to " << request.target << ": " <<
- request.cmdObj;
- net->blackHole(noi);
- }
- net->runReadyNetworkOperations();
+void ReplCoordElectV1Test::simulateSuccessfulDryRun() {
+ ReplicationCoordinatorImpl* replCoord = getReplCoord();
+ ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ for (int i = 0; i < rsConfig.getNumMembers() / 2; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true)));
+ } else {
+ error() << "Black holing unexpected request to " << request.target << ": "
+ << request.cmdObj;
+ net->blackHole(noi);
}
- net->exitNetwork();
- }
-
- TEST_F(ReplCoordElectV1Test, ElectTooSoon) {
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- // Election never starts because we haven't set a lastOpTimeApplied value yet, via a
- // heartbeat.
- startCapturingLogMessages();
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345")) <<
- "protocolVersion" << 1),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- simulateEnoughHeartbeatsForElectability();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1, countLogLinesContaining("node has no applied oplog entries"));
+ net->runReadyNetworkOperations();
}
+ net->exitNetwork();
+}
- TEST_F(ReplCoordElectV1Test, ElectTwoNodesWithOneZeroVoter) {
- OperationContextReplMock txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345" <<
- "votes" << 0 << "hidden" << true <<
- "priority" << 0)) <<
- "protocolVersion" << 1),
- HostAndPort("node1", 12345));
+TEST_F(ReplCoordElectV1Test, ElectTooSoon) {
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
+ // Election never starts because we haven't set a lastOpTimeApplied value yet, via a
+ // heartbeat.
+ startCapturingLogMessages();
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")) << "protocolVersion"
+ << 1),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ simulateEnoughHeartbeatsForElectability();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("node has no applied oplog entries"));
+}
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+TEST_F(ReplCoordElectV1Test, ElectTwoNodesWithOneZeroVoter) {
+ OperationContextReplMock txn;
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"
+ << "votes" << 0 << "hidden" << true << "priority" << 0))
+ << "protocolVersion" << 1),
+ HostAndPort("node1", 12345));
+
+ getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+
+ ASSERT(getReplCoord()->getMemberState().secondary())
+ << getReplCoord()->getMemberState().toString();
+
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(10, 0), 0));
+
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ net->scheduleResponse(noi, net->now(), ResponseStatus(ErrorCodes::OperationFailed, "timeout"));
+ net->runReadyNetworkOperations();
+ net->exitNetwork();
+
+ ASSERT(getReplCoord()->getMemberState().primary())
+ << getReplCoord()->getMemberState().toString();
+ ASSERT(getReplCoord()->isWaitingForApplierToDrain());
+
+ // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
+ IsMasterResponse imResponse;
+ getReplCoord()->fillIsMasterForReplSet(&imResponse);
+ ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
+ ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
+ getReplCoord()->signalDrainComplete(&txn);
+ getReplCoord()->fillIsMasterForReplSet(&imResponse);
+ ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
+ ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
+}
- ASSERT(getReplCoord()->getMemberState().secondary()) <<
- getReplCoord()->getMemberState().toString();
+TEST_F(ReplCoordElectV1Test, Elect1NodeSuccess) {
+ OperationContextReplMock txn;
+ startCapturingLogMessages();
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")) << "protocolVersion" << 1),
+ HostAndPort("node1", 12345));
+
+ getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+
+ ASSERT(getReplCoord()->getMemberState().primary())
+ << getReplCoord()->getMemberState().toString();
+ ASSERT(getReplCoord()->isWaitingForApplierToDrain());
+
+ // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
+ IsMasterResponse imResponse;
+ getReplCoord()->fillIsMasterForReplSet(&imResponse);
+ ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
+ ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
+ getReplCoord()->signalDrainComplete(&txn);
+ getReplCoord()->fillIsMasterForReplSet(&imResponse);
+ ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
+ ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
+}
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(10,0), 0));
+TEST_F(ReplCoordElectV1Test, ElectManyNodesSuccess) {
+ BSONObj configObj = BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")) << "protocolVersion"
+ << 1);
+ assertStartSuccess(configObj, HostAndPort("node1", 12345));
+ OperationContextNoop txn;
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 1), 0));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ startCapturingLogMessages();
+ simulateSuccessfulV1Election();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("election succeeded"));
+}
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
+TEST_F(ReplCoordElectV1Test, ElectNotEnoughVotesInDryRun) {
+ startCapturingLogMessages();
+ BSONObj configObj = BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")) << "protocolVersion"
+ << 1);
+ assertStartSuccess(configObj, HostAndPort("node1", 12345));
+ ReplicaSetConfig config = assertMakeRSConfig(configObj);
+
+ OperationContextNoop txn;
+ OpTime time1(Timestamp(100, 1), 0);
+ getReplCoord()->setMyLastOptime(time1);
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+
+ simulateEnoughHeartbeatsForElectability();
+
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ while (net->hasReadyRequests()) {
const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- net->scheduleResponse(noi,
- net->now(),
- ResponseStatus(ErrorCodes::OperationFailed, "timeout"));
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ if (request.cmdObj.firstElement().fieldNameStringData() != "replSetRequestVotes") {
+ net->blackHole(noi);
+ } else {
+ net->scheduleResponse(noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "term" << 0 << "voteGranted"
+ << false << "reason"
+ << "don't like him much")));
+ }
net->runReadyNetworkOperations();
- net->exitNetwork();
-
- ASSERT(getReplCoord()->getMemberState().primary()) <<
- getReplCoord()->getMemberState().toString();
- ASSERT(getReplCoord()->isWaitingForApplierToDrain());
-
- // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
- IsMasterResponse imResponse;
- getReplCoord()->fillIsMasterForReplSet(&imResponse);
- ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
- ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- getReplCoord()->signalDrainComplete(&txn);
- getReplCoord()->fillIsMasterForReplSet(&imResponse);
- ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
- ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- }
-
- TEST_F(ReplCoordElectV1Test, Elect1NodeSuccess) {
- OperationContextReplMock txn;
- startCapturingLogMessages();
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")) <<
- "protocolVersion" << 1),
- HostAndPort("node1", 12345));
-
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
-
- ASSERT(getReplCoord()->getMemberState().primary()) <<
- getReplCoord()->getMemberState().toString();
- ASSERT(getReplCoord()->isWaitingForApplierToDrain());
-
- // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true.
- IsMasterResponse imResponse;
- getReplCoord()->fillIsMasterForReplSet(&imResponse);
- ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
- ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- getReplCoord()->signalDrainComplete(&txn);
- getReplCoord()->fillIsMasterForReplSet(&imResponse);
- ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
- ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- }
-
- TEST_F(ReplCoordElectV1Test, ElectManyNodesSuccess) {
- BSONObj configObj = BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")
- << BSON("_id" << 2 << "host" << "node2:12345")
- << BSON("_id" << 3 << "host" << "node3:12345")
- ) <<
- "protocolVersion" << 1);
- assertStartSuccess(configObj, HostAndPort("node1", 12345));
- OperationContextNoop txn;
- getReplCoord()->setMyLastOptime(OpTime(Timestamp (100, 1), 0));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- startCapturingLogMessages();
- simulateSuccessfulV1Election();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1, countLogLinesContaining("election succeeded"));
}
+ net->exitNetwork();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(
+ 1, countLogLinesContaining("not running for primary, we received insufficient votes"));
+}
- TEST_F(ReplCoordElectV1Test, ElectNotEnoughVotesInDryRun) {
- startCapturingLogMessages();
- BSONObj configObj = BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")
- << BSON("_id" << 2 << "host" << "node2:12345")
- << BSON("_id" << 3 << "host" << "node3:12345")
- ) <<
- "protocolVersion" << 1);
- assertStartSuccess(configObj, HostAndPort("node1", 12345));
- ReplicaSetConfig config = assertMakeRSConfig(configObj);
-
- OperationContextNoop txn;
- OpTime time1(Timestamp(100, 1), 0);
- getReplCoord()->setMyLastOptime(time1);
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- simulateEnoughHeartbeatsForElectability();
-
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- while (net->hasReadyRequests()) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- if (request.cmdObj.firstElement().fieldNameStringData() != "replSetRequestVotes") {
- net->blackHole(noi);
- }
- else {
- net->scheduleResponse(
- noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 <<
- "term" << 0 <<
- "voteGranted" << false <<
- "reason" << "don't like him much")));
- }
- net->runReadyNetworkOperations();
+TEST_F(ReplCoordElectV1Test, ElectStaleTermInDryRun) {
+ startCapturingLogMessages();
+ BSONObj configObj = BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")) << "protocolVersion"
+ << 1);
+ assertStartSuccess(configObj, HostAndPort("node1", 12345));
+ ReplicaSetConfig config = assertMakeRSConfig(configObj);
+
+ OperationContextNoop txn;
+ OpTime time1(Timestamp(100, 1), 0);
+ getReplCoord()->setMyLastOptime(time1);
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+
+ simulateEnoughHeartbeatsForElectability();
+
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ while (net->hasReadyRequests()) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ if (request.cmdObj.firstElement().fieldNameStringData() != "replSetRequestVotes") {
+ net->blackHole(noi);
+ } else {
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long() + 1
+ << "voteGranted" << false << "reason"
+ << "quit living in the past")));
}
- net->exitNetwork();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining("not running for primary, we received insufficient votes"));
+ net->runReadyNetworkOperations();
}
+ net->exitNetwork();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(
+ 1, countLogLinesContaining("not running for primary, we have been superceded already"));
+}
- TEST_F(ReplCoordElectV1Test, ElectStaleTermInDryRun) {
- startCapturingLogMessages();
- BSONObj configObj = BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")
- << BSON("_id" << 2 << "host" << "node2:12345")
- << BSON("_id" << 3 << "host" << "node3:12345")
- ) <<
- "protocolVersion" << 1);
- assertStartSuccess(configObj, HostAndPort("node1", 12345));
- ReplicaSetConfig config = assertMakeRSConfig(configObj);
-
- OperationContextNoop txn;
- OpTime time1(Timestamp(100, 1), 0);
- getReplCoord()->setMyLastOptime(time1);
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- simulateEnoughHeartbeatsForElectability();
-
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- while (net->hasReadyRequests()) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- if (request.cmdObj.firstElement().fieldNameStringData() != "replSetRequestVotes") {
- net->blackHole(noi);
- }
- else {
- net->scheduleResponse(
- noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 <<
- "term" << request.cmdObj["term"].Long() + 1 <<
- "voteGranted" << false <<
- "reason" << "quit living in the past")));
- }
- net->runReadyNetworkOperations();
+TEST_F(ReplCoordElectV1Test, ElectionDuringHBReconfigFails) {
+ // start up, receive reconfig via heartbeat while at the same time, become candidate.
+ // candidate state should be cleared.
+ OperationContextNoop txn;
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345") << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 4 << "host"
+ << "node4:12345") << BSON("_id" << 5 << "host"
+ << "node5:12345"))
+ << "protocolVersion" << 1),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+
+ // set hbreconfig to hang while in progress
+ getExternalState()->setStoreLocalConfigDocumentToHang(true);
+
+ // hb reconfig
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ ReplSetHeartbeatResponse hbResp2;
+ ReplicaSetConfig config;
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")) << "protocolVersion"
+ << 1));
+ hbResp2.setConfig(config);
+ hbResp2.setConfigVersion(3);
+ hbResp2.setSetName("mySet");
+ hbResp2.setState(MemberState::RS_SECONDARY);
+ net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
+ const NetworkInterfaceMock::NetworkOperationIterator noi2 = net->getNextReadyRequest();
+ net->scheduleResponse(noi2, net->now(), makeResponseStatus(hbResp2.toBSON(true)));
+ net->runReadyNetworkOperations();
+ getNet()->exitNetwork();
+
+ // prepare candidacy
+ BSONObjBuilder result;
+ ReplicationCoordinator::ReplSetReconfigArgs args;
+ args.force = false;
+ args.newConfigObj = config.toBSON();
+ ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
+ getReplCoord()->processReplSetReconfig(&txn, args, &result));
+
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(2));
+ startCapturingLogMessages();
+
+ // receive sufficient heartbeats to trigger an election
+ ReplicationCoordinatorImpl* replCoord = getReplCoord();
+ ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
+ net->enterNetwork();
+ for (int i = 0; i < 2; ++i) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ ReplSetHeartbeatArgsV1 hbArgs;
+ if (hbArgs.initialize(request.cmdObj).isOK()) {
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName(rsConfig.getReplSetName());
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(rsConfig.getConfigVersion());
+ BSONObjBuilder respObj;
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
+ } else {
+ error() << "Black holing unexpected request to " << request.target << ": "
+ << request.cmdObj;
+ net->blackHole(noi);
}
- net->exitNetwork();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining(
- "not running for primary, we have been superceded already"));
- }
-
- TEST_F(ReplCoordElectV1Test, ElectionDuringHBReconfigFails) {
- // start up, receive reconfig via heartbeat while at the same time, become candidate.
- // candidate state should be cleared.
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345") <<
- BSON("_id" << 3 << "host" << "node3:12345") <<
- BSON("_id" << 4 << "host" << "node4:12345") <<
- BSON("_id" << 5 << "host" << "node5:12345")) <<
- "protocolVersion" << 1),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100,0), 0));
-
- // set hbreconfig to hang while in progress
- getExternalState()->setStoreLocalConfigDocumentToHang(true);
-
- // hb reconfig
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- ReplSetHeartbeatResponse hbResp2;
- ReplicaSetConfig config;
- config.initialize(BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 <<
- "host" << "node1:12345") <<
- BSON("_id" << 2 <<
- "host" << "node2:12345")) <<
- "protocolVersion" << 1));
- hbResp2.setConfig(config);
- hbResp2.setConfigVersion(3);
- hbResp2.setSetName("mySet");
- hbResp2.setState(MemberState::RS_SECONDARY);
- net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
- const NetworkInterfaceMock::NetworkOperationIterator noi2 = net->getNextReadyRequest();
- net->scheduleResponse(noi2, net->now(), makeResponseStatus(hbResp2.toBSON(true)));
net->runReadyNetworkOperations();
- getNet()->exitNetwork();
-
- // prepare candidacy
- BSONObjBuilder result;
- ReplicationCoordinator::ReplSetReconfigArgs args;
- args.force = false;
- args.newConfigObj = config.toBSON();
- ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
-
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(2));
- startCapturingLogMessages();
-
- // receive sufficient heartbeats to trigger an election
- ReplicationCoordinatorImpl* replCoord = getReplCoord();
- ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
- net->enterNetwork();
- for (int i = 0; i < 2; ++i) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- ReplSetHeartbeatArgsV1 hbArgs;
- if (hbArgs.initialize(request.cmdObj).isOK()) {
- ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName(rsConfig.getReplSetName());
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(rsConfig.getConfigVersion());
- BSONObjBuilder respObj;
- net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
- }
- else {
- error() << "Black holing unexpected request to " << request.target << ": " <<
- request.cmdObj;
- net->blackHole(noi);
- }
- net->runReadyNetworkOperations();
- }
- net->exitNetwork();
-
- stopCapturingLogMessages();
- // ensure node does not stand for election
- ASSERT_EQUALS(1,
- countLogLinesContaining("Not standing for election; processing "
- "a configuration change"));
- getExternalState()->setStoreLocalConfigDocumentToHang(false);
}
+ net->exitNetwork();
+
+ stopCapturingLogMessages();
+ // ensure node does not stand for election
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ "Not standing for election; processing "
+ "a configuration change"));
+ getExternalState()->setStoreLocalConfigDocumentToHang(false);
+}
- TEST_F(ReplCoordElectV1Test, ElectionSucceedsButDeclaringWinnerFails) {
- startCapturingLogMessages();
- BSONObj configObj = BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")
- << BSON("_id" << 2 << "host" << "node2:12345")
- << BSON("_id" << 3 << "host" << "node3:12345")
- ) <<
- "protocolVersion" << 1);
- assertStartSuccess(configObj, HostAndPort("node1", 12345));
- ReplicaSetConfig config = assertMakeRSConfig(configObj);
-
- OperationContextNoop txn;
- OpTime time1(Timestamp(100, 1), 0);
- getReplCoord()->setMyLastOptime(time1);
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- simulateEnoughHeartbeatsForElectability();
-
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- while (net->hasReadyRequests()) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
- net->scheduleResponse(
- noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 <<
- "term" << (request.cmdObj["dryRun"].Bool() ?
- request.cmdObj["term"].Long() - 1 :
- request.cmdObj["term"].Long()) <<
- "voteGranted" << true)));
- }
- else if (request.cmdObj.firstElement().fieldNameStringData() ==
- "replSetDeclareElectionWinner") {
- net->scheduleResponse(noi, net->now(), makeResponseStatus(
- BSON("ok" << 0 <<
- "code" << ErrorCodes::BadValue <<
- "errmsg" << "term has already passed" <<
- "term" << request.cmdObj["term"].Long() + 1)));
- }
- else {
- error() << "Black holing unexpected request to " << request.target << ": " <<
- request.cmdObj;
- net->blackHole(noi);
- }
- net->runReadyNetworkOperations();
+TEST_F(ReplCoordElectV1Test, ElectionSucceedsButDeclaringWinnerFails) {
+ startCapturingLogMessages();
+ BSONObj configObj = BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")) << "protocolVersion"
+ << 1);
+ assertStartSuccess(configObj, HostAndPort("node1", 12345));
+ ReplicaSetConfig config = assertMakeRSConfig(configObj);
+
+ OperationContextNoop txn;
+ OpTime time1(Timestamp(100, 1), 0);
+ getReplCoord()->setMyLastOptime(time1);
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+
+ simulateEnoughHeartbeatsForElectability();
+
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ while (net->hasReadyRequests()) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "term"
+ << (request.cmdObj["dryRun"].Bool()
+ ? request.cmdObj["term"].Long() - 1
+ : request.cmdObj["term"].Long())
+ << "voteGranted" << true)));
+ } else if (request.cmdObj.firstElement().fieldNameStringData() ==
+ "replSetDeclareElectionWinner") {
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 0 << "code" << ErrorCodes::BadValue << "errmsg"
+ << "term has already passed"
+ << "term" << request.cmdObj["term"].Long() + 1)));
+ } else {
+ error() << "Black holing unexpected request to " << request.target << ": "
+ << request.cmdObj;
+ net->blackHole(noi);
}
- net->exitNetwork();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1, countLogLinesContaining("stepping down from primary, because:"));
+ net->runReadyNetworkOperations();
}
+ net->exitNetwork();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("stepping down from primary, because:"));
+}
- TEST_F(ReplCoordElectV1Test, ElectNotEnoughVotes) {
- startCapturingLogMessages();
- BSONObj configObj = BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")
- << BSON("_id" << 2 << "host" << "node2:12345")
- << BSON("_id" << 3 << "host" << "node3:12345")
- ) <<
- "protocolVersion" << 1);
- assertStartSuccess(configObj, HostAndPort("node1", 12345));
- ReplicaSetConfig config = assertMakeRSConfig(configObj);
-
- OperationContextNoop txn;
- OpTime time1(Timestamp(100, 1), 0);
- getReplCoord()->setMyLastOptime(time1);
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- simulateEnoughHeartbeatsForElectability();
- simulateSuccessfulDryRun();
-
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- while (net->hasReadyRequests()) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- if (request.cmdObj.firstElement().fieldNameStringData() != "replSetRequestVotes") {
- net->blackHole(noi);
- }
- else {
- net->scheduleResponse(
- noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 <<
- "term" << 1 <<
- "voteGranted" << false <<
- "reason" << "don't like him much")));
- }
- net->runReadyNetworkOperations();
+TEST_F(ReplCoordElectV1Test, ElectNotEnoughVotes) {
+ startCapturingLogMessages();
+ BSONObj configObj = BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")) << "protocolVersion"
+ << 1);
+ assertStartSuccess(configObj, HostAndPort("node1", 12345));
+ ReplicaSetConfig config = assertMakeRSConfig(configObj);
+
+ OperationContextNoop txn;
+ OpTime time1(Timestamp(100, 1), 0);
+ getReplCoord()->setMyLastOptime(time1);
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+
+ simulateEnoughHeartbeatsForElectability();
+ simulateSuccessfulDryRun();
+
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ while (net->hasReadyRequests()) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ if (request.cmdObj.firstElement().fieldNameStringData() != "replSetRequestVotes") {
+ net->blackHole(noi);
+ } else {
+ net->scheduleResponse(noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "term" << 1 << "voteGranted"
+ << false << "reason"
+ << "don't like him much")));
}
- net->exitNetwork();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining("not becoming primary, we received insufficient votes"));
+ net->runReadyNetworkOperations();
}
+ net->exitNetwork();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1,
+ countLogLinesContaining("not becoming primary, we received insufficient votes"));
+}
- TEST_F(ReplCoordElectV1Test, ElectStaleTerm) {
- startCapturingLogMessages();
- BSONObj configObj = BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")
- << BSON("_id" << 2 << "host" << "node2:12345")
- << BSON("_id" << 3 << "host" << "node3:12345")
- ) <<
- "protocolVersion" << 1);
- assertStartSuccess(configObj, HostAndPort("node1", 12345));
- ReplicaSetConfig config = assertMakeRSConfig(configObj);
-
- OperationContextNoop txn;
- OpTime time1(Timestamp(100, 1), 0);
- getReplCoord()->setMyLastOptime(time1);
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- simulateEnoughHeartbeatsForElectability();
- simulateSuccessfulDryRun();
-
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- while (net->hasReadyRequests()) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- if (request.cmdObj.firstElement().fieldNameStringData() != "replSetRequestVotes") {
- net->blackHole(noi);
- }
- else {
- net->scheduleResponse(
- noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 <<
- "term" << request.cmdObj["term"].Long() + 1 <<
- "voteGranted" << false <<
- "reason" << "quit living in the past")));
- }
- net->runReadyNetworkOperations();
+TEST_F(ReplCoordElectV1Test, ElectStaleTerm) {
+ startCapturingLogMessages();
+ BSONObj configObj = BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")) << "protocolVersion"
+ << 1);
+ assertStartSuccess(configObj, HostAndPort("node1", 12345));
+ ReplicaSetConfig config = assertMakeRSConfig(configObj);
+
+ OperationContextNoop txn;
+ OpTime time1(Timestamp(100, 1), 0);
+ getReplCoord()->setMyLastOptime(time1);
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+
+ simulateEnoughHeartbeatsForElectability();
+ simulateSuccessfulDryRun();
+
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ while (net->hasReadyRequests()) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ if (request.cmdObj.firstElement().fieldNameStringData() != "replSetRequestVotes") {
+ net->blackHole(noi);
+ } else {
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long() + 1
+ << "voteGranted" << false << "reason"
+ << "quit living in the past")));
}
- net->exitNetwork();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining("not becoming primary, we have been superceded already"));
+ net->runReadyNetworkOperations();
}
+ net->exitNetwork();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1,
+ countLogLinesContaining("not becoming primary, we have been superceded already"));
+}
- TEST_F(ReplCoordElectV1Test, ElectTermChangeDuringDryRun) {
- startCapturingLogMessages();
- BSONObj configObj = BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")
- << BSON("_id" << 2 << "host" << "node2:12345")
- << BSON("_id" << 3 << "host" << "node3:12345")
- ) <<
- "protocolVersion" << 1);
- assertStartSuccess(configObj, HostAndPort("node1", 12345));
- ReplicaSetConfig config = assertMakeRSConfig(configObj);
-
- OperationContextNoop txn;
- OpTime time1(Timestamp(100, 1), 0);
- getReplCoord()->setMyLastOptime(time1);
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- simulateEnoughHeartbeatsForElectability();
- // update to a future term before dry run completes
- getReplCoord()->updateTerm(1000);
- simulateSuccessfulDryRun();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining(
- "not running for primary, we have been superceded already"));
- }
+TEST_F(ReplCoordElectV1Test, ElectTermChangeDuringDryRun) {
+ startCapturingLogMessages();
+ BSONObj configObj = BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")) << "protocolVersion"
+ << 1);
+ assertStartSuccess(configObj, HostAndPort("node1", 12345));
+ ReplicaSetConfig config = assertMakeRSConfig(configObj);
+
+ OperationContextNoop txn;
+ OpTime time1(Timestamp(100, 1), 0);
+ getReplCoord()->setMyLastOptime(time1);
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+
+ simulateEnoughHeartbeatsForElectability();
+ // update to a future term before dry run completes
+ getReplCoord()->updateTerm(1000);
+ simulateSuccessfulDryRun();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(
+ 1, countLogLinesContaining("not running for primary, we have been superceded already"));
+}
- TEST_F(ReplCoordElectV1Test, ElectTermChangeDuringActualElection) {
- startCapturingLogMessages();
- BSONObj configObj = BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345")
- << BSON("_id" << 2 << "host" << "node2:12345")
- << BSON("_id" << 3 << "host" << "node3:12345")
- ) <<
- "protocolVersion" << 1);
- assertStartSuccess(configObj, HostAndPort("node1", 12345));
- ReplicaSetConfig config = assertMakeRSConfig(configObj);
-
- OperationContextNoop txn;
- OpTime time1(Timestamp(100, 1), 0);
- getReplCoord()->setMyLastOptime(time1);
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- simulateEnoughHeartbeatsForElectability();
- simulateSuccessfulDryRun();
- // update to a future term before the election completes
- getReplCoord()->updateTerm(1000);
-
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- while (net->hasReadyRequests()) {
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- if (request.cmdObj.firstElement().fieldNameStringData() != "replSetRequestVotes") {
- net->blackHole(noi);
- }
- else {
- net->scheduleResponse(
- noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 <<
- "term" << request.cmdObj["term"].Long() <<
- "voteGranted" << true <<
- "reason" << "")));
- }
- net->runReadyNetworkOperations();
+TEST_F(ReplCoordElectV1Test, ElectTermChangeDuringActualElection) {
+ startCapturingLogMessages();
+ BSONObj configObj = BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")) << "protocolVersion"
+ << 1);
+ assertStartSuccess(configObj, HostAndPort("node1", 12345));
+ ReplicaSetConfig config = assertMakeRSConfig(configObj);
+
+ OperationContextNoop txn;
+ OpTime time1(Timestamp(100, 1), 0);
+ getReplCoord()->setMyLastOptime(time1);
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+
+ simulateEnoughHeartbeatsForElectability();
+ simulateSuccessfulDryRun();
+ // update to a future term before the election completes
+ getReplCoord()->updateTerm(1000);
+
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ while (net->hasReadyRequests()) {
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ if (request.cmdObj.firstElement().fieldNameStringData() != "replSetRequestVotes") {
+ net->blackHole(noi);
+ } else {
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true << "reason"
+ << "")));
}
- net->exitNetwork();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining("not becoming primary, we have been superceded already"));
+ net->runReadyNetworkOperations();
}
-
+ net->exitNetwork();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1,
+ countLogLinesContaining("not becoming primary, we have been superceded already"));
+}
}
}
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 4afdb8594e8..61ae5ddecdc 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -57,155 +57,131 @@ namespace repl {
namespace {
- typedef ReplicationExecutor::CallbackHandle CBHandle;
+typedef ReplicationExecutor::CallbackHandle CBHandle;
-} //namespace
-
- void ReplicationCoordinatorImpl::_doMemberHeartbeat(ReplicationExecutor::CallbackArgs cbData,
- const HostAndPort& target,
- int targetIndex) {
-
- _untrackHeartbeatHandle(cbData.myHandle);
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- return;
- }
-
- const Date_t now = _replExecutor.now();
- BSONObj heartbeatObj;
- Milliseconds timeout(0);
- if (isV1ElectionProtocol()) {
- const std::pair<ReplSetHeartbeatArgsV1, Milliseconds> hbRequest =
- _topCoord->prepareHeartbeatRequestV1(
- now,
- _settings.ourSetName(),
- target);
- heartbeatObj = hbRequest.first.toBSON();
- timeout = hbRequest.second;
- }
- else {
- const std::pair<ReplSetHeartbeatArgs, Milliseconds> hbRequest =
- _topCoord->prepareHeartbeatRequest(
- now,
- _settings.ourSetName(),
- target);
- heartbeatObj = hbRequest.first.toBSON();
- timeout = hbRequest.second;
- }
-
- const RemoteCommandRequest request(target, "admin", heartbeatObj, timeout);
- const ReplicationExecutor::RemoteCommandCallbackFn callback = stdx::bind(
- &ReplicationCoordinatorImpl::_handleHeartbeatResponse,
- this,
- stdx::placeholders::_1,
- targetIndex);
+} // namespace
- _trackHeartbeatHandle(_replExecutor.scheduleRemoteCommand(request, callback));
+void ReplicationCoordinatorImpl::_doMemberHeartbeat(ReplicationExecutor::CallbackArgs cbData,
+ const HostAndPort& target,
+ int targetIndex) {
+ _untrackHeartbeatHandle(cbData.myHandle);
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ return;
}
- void ReplicationCoordinatorImpl::_scheduleHeartbeatToTarget(
- const HostAndPort& target,
- int targetIndex,
- Date_t when) {
-
- LOG(2) << "Scheduling heartbeat to " << target << " at " << dateToISOStringUTC(when);
- _trackHeartbeatHandle(
- _replExecutor.scheduleWorkAt(
- when,
- stdx::bind(&ReplicationCoordinatorImpl::_doMemberHeartbeat,
- this,
- stdx::placeholders::_1,
- target,
- targetIndex)));
+ const Date_t now = _replExecutor.now();
+ BSONObj heartbeatObj;
+ Milliseconds timeout(0);
+ if (isV1ElectionProtocol()) {
+ const std::pair<ReplSetHeartbeatArgsV1, Milliseconds> hbRequest =
+ _topCoord->prepareHeartbeatRequestV1(now, _settings.ourSetName(), target);
+ heartbeatObj = hbRequest.first.toBSON();
+ timeout = hbRequest.second;
+ } else {
+ const std::pair<ReplSetHeartbeatArgs, Milliseconds> hbRequest =
+ _topCoord->prepareHeartbeatRequest(now, _settings.ourSetName(), target);
+ heartbeatObj = hbRequest.first.toBSON();
+ timeout = hbRequest.second;
}
- void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
- const ReplicationExecutor::RemoteCommandCallbackArgs& cbData, int targetIndex) {
+ const RemoteCommandRequest request(target, "admin", heartbeatObj, timeout);
+ const ReplicationExecutor::RemoteCommandCallbackFn callback =
+ stdx::bind(&ReplicationCoordinatorImpl::_handleHeartbeatResponse,
+ this,
+ stdx::placeholders::_1,
+ targetIndex);
+
+ _trackHeartbeatHandle(_replExecutor.scheduleRemoteCommand(request, callback));
+}
+
+void ReplicationCoordinatorImpl::_scheduleHeartbeatToTarget(const HostAndPort& target,
+ int targetIndex,
+ Date_t when) {
+ LOG(2) << "Scheduling heartbeat to " << target << " at " << dateToISOStringUTC(when);
+ _trackHeartbeatHandle(
+ _replExecutor.scheduleWorkAt(when,
+ stdx::bind(&ReplicationCoordinatorImpl::_doMemberHeartbeat,
+ this,
+ stdx::placeholders::_1,
+ target,
+ targetIndex)));
+}
+
+void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
+ const ReplicationExecutor::RemoteCommandCallbackArgs& cbData, int targetIndex) {
+ // remove handle from queued heartbeats
+ _untrackHeartbeatHandle(cbData.myHandle);
+
+ // Parse and validate the response. At the end of this step, if responseStatus is OK then
+ // hbResponse is valid.
+ Status responseStatus = cbData.response.getStatus();
+ if (responseStatus == ErrorCodes::CallbackCanceled) {
+ return;
+ }
- // remove handle from queued heartbeats
- _untrackHeartbeatHandle(cbData.myHandle);
+ const HostAndPort& target = cbData.request.target;
+ ReplSetHeartbeatResponse hbResponse;
+ BSONObj resp;
+ if (responseStatus.isOK()) {
+ resp = cbData.response.getValue().data;
+ responseStatus = hbResponse.initialize(resp, _topCoord->getTerm());
+ }
+ const Date_t now = _replExecutor.now();
+ const OpTime lastApplied = getMyLastOptime(); // Locks and unlocks _mutex.
+ Milliseconds networkTime(0);
+ StatusWith<ReplSetHeartbeatResponse> hbStatusResponse(hbResponse);
- // Parse and validate the response. At the end of this step, if responseStatus is OK then
- // hbResponse is valid.
- Status responseStatus = cbData.response.getStatus();
- if (responseStatus == ErrorCodes::CallbackCanceled) {
- return;
+ if (responseStatus.isOK()) {
+ networkTime = cbData.response.getValue().elapsedMillis;
+ _updateTerm_incallback(hbStatusResponse.getValue().getTerm(), nullptr);
+ } else {
+ log() << "Error in heartbeat request to " << target << "; " << responseStatus;
+ if (!resp.isEmpty()) {
+ LOG(3) << "heartbeat response: " << resp;
}
- const HostAndPort& target = cbData.request.target;
- ReplSetHeartbeatResponse hbResponse;
- BSONObj resp;
- if (responseStatus.isOK()) {
- resp = cbData.response.getValue().data;
- responseStatus = hbResponse.initialize(resp, _topCoord->getTerm());
- }
- const Date_t now = _replExecutor.now();
- const OpTime lastApplied = getMyLastOptime(); // Locks and unlocks _mutex.
- Milliseconds networkTime(0);
- StatusWith<ReplSetHeartbeatResponse> hbStatusResponse(hbResponse);
-
- if (responseStatus.isOK()) {
- networkTime = cbData.response.getValue().elapsedMillis;
- _updateTerm_incallback(hbStatusResponse.getValue().getTerm(), nullptr);
- }
- else {
- log() << "Error in heartbeat request to " << target << "; " << responseStatus;
- if (!resp.isEmpty()) {
- LOG(3) << "heartbeat response: " << resp;
- }
+ hbStatusResponse = StatusWith<ReplSetHeartbeatResponse>(responseStatus);
+ }
- hbStatusResponse = StatusWith<ReplSetHeartbeatResponse>(responseStatus);
- }
+ HeartbeatResponseAction action = _topCoord->processHeartbeatResponse(
+ now, networkTime, target, hbStatusResponse, lastApplied);
- HeartbeatResponseAction action =
- _topCoord->processHeartbeatResponse(
- now,
- networkTime,
- target,
- hbStatusResponse,
- lastApplied);
-
- if (action.getAction() == HeartbeatResponseAction::NoAction &&
- hbStatusResponse.isOK() &&
- hbStatusResponse.getValue().hasOpTime() &&
- targetIndex >= 0 &&
- hbStatusResponse.getValue().hasState() &&
- hbStatusResponse.getValue().getState() != MemberState::RS_PRIMARY) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- if (hbStatusResponse.getValue().getConfigVersion() == _rsConfig.getConfigVersion()) {
- _updateOpTimeFromHeartbeat_inlock(targetIndex,
- hbStatusResponse.getValue().getOpTime());
- // TODO: Enable with Data Replicator
- //lk.unlock();
- //_dr.slavesHaveProgressed();
- }
+ if (action.getAction() == HeartbeatResponseAction::NoAction && hbStatusResponse.isOK() &&
+ hbStatusResponse.getValue().hasOpTime() && targetIndex >= 0 &&
+ hbStatusResponse.getValue().hasState() &&
+ hbStatusResponse.getValue().getState() != MemberState::RS_PRIMARY) {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ if (hbStatusResponse.getValue().getConfigVersion() == _rsConfig.getConfigVersion()) {
+ _updateOpTimeFromHeartbeat_inlock(targetIndex, hbStatusResponse.getValue().getOpTime());
+ // TODO: Enable with Data Replicator
+ // lk.unlock();
+ //_dr.slavesHaveProgressed();
}
+ }
- _signalStepDownWaiters();
+ _signalStepDownWaiters();
- _scheduleHeartbeatToTarget(
- target,
- targetIndex,
- std::max(now, action.getNextHeartbeatStartDate()));
+ _scheduleHeartbeatToTarget(
+ target, targetIndex, std::max(now, action.getNextHeartbeatStartDate()));
- _handleHeartbeatResponseAction(action, hbStatusResponse);
- }
+ _handleHeartbeatResponseAction(action, hbStatusResponse);
+}
- void ReplicationCoordinatorImpl::_updateOpTimeFromHeartbeat_inlock(int targetIndex,
- const OpTime& optime) {
- invariant(_selfIndex >= 0);
- invariant(targetIndex >= 0);
+void ReplicationCoordinatorImpl::_updateOpTimeFromHeartbeat_inlock(int targetIndex,
+ const OpTime& optime) {
+ invariant(_selfIndex >= 0);
+ invariant(targetIndex >= 0);
- SlaveInfo& slaveInfo = _slaveInfo[targetIndex];
- if (optime > slaveInfo.opTime) {
- _updateSlaveInfoOptime_inlock(&slaveInfo, optime);
- }
+ SlaveInfo& slaveInfo = _slaveInfo[targetIndex];
+ if (optime > slaveInfo.opTime) {
+ _updateSlaveInfoOptime_inlock(&slaveInfo, optime);
}
+}
- void ReplicationCoordinatorImpl::_handleHeartbeatResponseAction(
- const HeartbeatResponseAction& action,
- const StatusWith<ReplSetHeartbeatResponse>& responseStatus) {
-
- switch (action.getAction()) {
+void ReplicationCoordinatorImpl::_handleHeartbeatResponseAction(
+ const HeartbeatResponseAction& action,
+ const StatusWith<ReplSetHeartbeatResponse>& responseStatus) {
+ switch (action.getAction()) {
case HeartbeatResponseAction::NoAction:
// Update the cached member state if different than the current topology member state
if (_memberState != _topCoord->getMemberState()) {
@@ -223,8 +199,7 @@ namespace {
case HeartbeatResponseAction::StartElection:
if (isV1ElectionProtocol()) {
_startElectSelfV1();
- }
- else {
+ } else {
_startElectSelf();
}
break;
@@ -235,312 +210,290 @@ namespace {
case HeartbeatResponseAction::StepDownRemotePrimary: {
invariant(action.getPrimaryConfigIndex() != _selfIndex);
_requestRemotePrimaryStepdown(
- _rsConfig.getMemberAt(action.getPrimaryConfigIndex()).getHostAndPort());
+ _rsConfig.getMemberAt(action.getPrimaryConfigIndex()).getHostAndPort());
break;
}
default:
severe() << "Illegal heartbeat response action code " << int(action.getAction());
invariant(false);
- }
}
+}
namespace {
- /**
- * This callback is purely for logging and has no effect on any other operations
- */
- void remoteStepdownCallback(const ReplicationExecutor::RemoteCommandCallbackArgs& cbData) {
-
- const Status status = cbData.response.getStatus();
- if (status == ErrorCodes::CallbackCanceled) {
- return;
- }
+/**
+ * This callback is purely for logging and has no effect on any other operations
+ */
+void remoteStepdownCallback(const ReplicationExecutor::RemoteCommandCallbackArgs& cbData) {
+ const Status status = cbData.response.getStatus();
+ if (status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
- if (status.isOK()) {
- LOG(1) << "stepdown of primary(" << cbData.request.target
- << ") succeeded with response -- "
- << cbData.response.getValue().data;
- }
- else {
- warning() << "stepdown of primary(" << cbData.request.target
- << ") failed due to " << cbData.response.getStatus();
- }
+ if (status.isOK()) {
+ LOG(1) << "stepdown of primary(" << cbData.request.target << ") succeeded with response -- "
+ << cbData.response.getValue().data;
+ } else {
+ warning() << "stepdown of primary(" << cbData.request.target << ") failed due to "
+ << cbData.response.getStatus();
}
+}
} // namespace
- void ReplicationCoordinatorImpl::_requestRemotePrimaryStepdown(const HostAndPort& target) {
- RemoteCommandRequest request(target, "admin", BSON("replSetStepDown" << 1));
-
- log() << "Requesting " << target << " step down from primary";
- CBHStatus cbh = _replExecutor.scheduleRemoteCommand(
- request, remoteStepdownCallback);
- if (cbh.getStatus() != ErrorCodes::ShutdownInProgress) {
- fassert(18808, cbh.getStatus());
- }
- }
+void ReplicationCoordinatorImpl::_requestRemotePrimaryStepdown(const HostAndPort& target) {
+ RemoteCommandRequest request(target, "admin", BSON("replSetStepDown" << 1));
- void ReplicationCoordinatorImpl::_heartbeatStepDownStart() {
- log() << "Stepping down from primary in response to heartbeat";
- _replExecutor.scheduleWorkWithGlobalExclusiveLock(
- stdx::bind(&ReplicationCoordinatorImpl::_stepDownFinish,
- this,
- stdx::placeholders::_1));
+ log() << "Requesting " << target << " step down from primary";
+ CBHStatus cbh = _replExecutor.scheduleRemoteCommand(request, remoteStepdownCallback);
+ if (cbh.getStatus() != ErrorCodes::ShutdownInProgress) {
+ fassert(18808, cbh.getStatus());
}
+}
- void ReplicationCoordinatorImpl::_stepDownFinish(
- const ReplicationExecutor::CallbackArgs& cbData) {
+void ReplicationCoordinatorImpl::_heartbeatStepDownStart() {
+ log() << "Stepping down from primary in response to heartbeat";
+ _replExecutor.scheduleWorkWithGlobalExclusiveLock(
+ stdx::bind(&ReplicationCoordinatorImpl::_stepDownFinish, this, stdx::placeholders::_1));
+}
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- return;
- }
- invariant(cbData.txn);
- // TODO Add invariant that we've got global shared or global exclusive lock, when supported
- // by lock manager.
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- _topCoord->stepDownIfPending();
- const PostMemberStateUpdateAction action =
- _updateMemberStateFromTopologyCoordinator_inlock();
- lk.unlock();
- _performPostMemberStateUpdateAction(action);
+void ReplicationCoordinatorImpl::_stepDownFinish(const ReplicationExecutor::CallbackArgs& cbData) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ invariant(cbData.txn);
+ // TODO Add invariant that we've got global shared or global exclusive lock, when supported
+ // by lock manager.
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ _topCoord->stepDownIfPending();
+ const PostMemberStateUpdateAction action = _updateMemberStateFromTopologyCoordinator_inlock();
+ lk.unlock();
+ _performPostMemberStateUpdateAction(action);
+}
+
+void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig(const ReplicaSetConfig& newConfig) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ if (_inShutdown) {
+ return;
}
- void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig(const ReplicaSetConfig& newConfig) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (_inShutdown) {
- return;
- }
-
- switch (_rsConfigState) {
+ switch (_rsConfigState) {
case kConfigStartingUp:
- LOG(1) << "Ignoring new configuration with version " << newConfig.getConfigVersion() <<
- " because still attempting to load local configuration information";
+ LOG(1) << "Ignoring new configuration with version " << newConfig.getConfigVersion()
+ << " because still attempting to load local configuration information";
return;
case kConfigUninitialized:
case kConfigSteady:
- LOG(1) << "Received new config via heartbeat with version " <<
- newConfig.getConfigVersion();
+ LOG(1) << "Received new config via heartbeat with version "
+ << newConfig.getConfigVersion();
break;
case kConfigInitiating:
case kConfigReconfiguring:
case kConfigHBReconfiguring:
- LOG(1) << "Ignoring new configuration with version " << newConfig.getConfigVersion() <<
- " because already in the midst of a configuration process";
+ LOG(1) << "Ignoring new configuration with version " << newConfig.getConfigVersion()
+ << " because already in the midst of a configuration process";
return;
default:
- severe() << "Reconfiguration request occurred while _rsConfigState == " <<
- int(_rsConfigState) << "; aborting.";
+ severe() << "Reconfiguration request occurred while _rsConfigState == "
+ << int(_rsConfigState) << "; aborting.";
fassertFailed(18807);
- }
- _setConfigState_inlock(kConfigHBReconfiguring);
- invariant(!_rsConfig.isInitialized() ||
- _rsConfig.getConfigVersion() < newConfig.getConfigVersion());
- if (_freshnessChecker) {
- _freshnessChecker->cancel(&_replExecutor);
- if (_electCmdRunner) {
- _electCmdRunner->cancel(&_replExecutor);
- }
- _replExecutor.onEvent(
- _electionFinishedEvent,
- stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigAfterElectionCanceled,
- this,
- stdx::placeholders::_1,
- newConfig));
- return;
- }
- _replExecutor.scheduleDBWork(stdx::bind(
- &ReplicationCoordinatorImpl::_heartbeatReconfigStore,
- this,
- stdx::placeholders::_1,
- newConfig));
+ }
+ _setConfigState_inlock(kConfigHBReconfiguring);
+ invariant(!_rsConfig.isInitialized() ||
+ _rsConfig.getConfigVersion() < newConfig.getConfigVersion());
+ if (_freshnessChecker) {
+ _freshnessChecker->cancel(&_replExecutor);
+ if (_electCmdRunner) {
+ _electCmdRunner->cancel(&_replExecutor);
+ }
+ _replExecutor.onEvent(
+ _electionFinishedEvent,
+ stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigAfterElectionCanceled,
+ this,
+ stdx::placeholders::_1,
+ newConfig));
+ return;
+ }
+ _replExecutor.scheduleDBWork(stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
+ this,
+ stdx::placeholders::_1,
+ newConfig));
+}
+
+void ReplicationCoordinatorImpl::_heartbeatReconfigAfterElectionCanceled(
+ const ReplicationExecutor::CallbackArgs& cbData, const ReplicaSetConfig& newConfig) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ fassert(18911, cbData.status);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ if (_inShutdown) {
+ return;
}
- void ReplicationCoordinatorImpl::_heartbeatReconfigAfterElectionCanceled(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicaSetConfig& newConfig) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- return;
- }
- fassert(18911, cbData.status);
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (_inShutdown) {
- return;
- }
-
- _replExecutor.scheduleDBWork(stdx::bind(
- &ReplicationCoordinatorImpl::_heartbeatReconfigStore,
- this,
- stdx::placeholders::_1,
- newConfig));
+ _replExecutor.scheduleDBWork(stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
+ this,
+ stdx::placeholders::_1,
+ newConfig));
+}
+
+void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
+ const ReplicationExecutor::CallbackArgs& cbd, const ReplicaSetConfig& newConfig) {
+ if (cbd.status.code() == ErrorCodes::CallbackCanceled) {
+ log() << "The callback to persist the replica set configuration was canceled - "
+ << "the configuration was not persisted but was used: " << newConfig.toBSON();
+ return;
}
- void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
- const ReplicationExecutor::CallbackArgs& cbd,
- const ReplicaSetConfig& newConfig) {
+ stdx::unique_lock<stdx::mutex> lk(_mutex, stdx::defer_lock);
- if (cbd.status.code() == ErrorCodes::CallbackCanceled) {
- log() << "The callback to persist the replica set configuration was canceled - "
- << "the configuration was not persisted but was used: " << newConfig.toBSON();
+ const StatusWith<int> myIndex =
+ validateConfigForHeartbeatReconfig(_externalState.get(), newConfig);
+
+ if (myIndex.getStatus() == ErrorCodes::NodeNotFound) {
+ lk.lock();
+ // If this node absent in newConfig, and this node was not previously initialized,
+ // return to kConfigUninitialized immediately, rather than storing the config and
+ // transitioning into the RS_REMOVED state. See SERVER-15740.
+ if (!_rsConfig.isInitialized()) {
+ invariant(_rsConfigState == kConfigHBReconfiguring);
+ LOG(1) << "Ignoring new configuration in heartbeat response because we are "
+ "uninitialized and not a member of the new configuration";
+ _setConfigState_inlock(kConfigUninitialized);
return;
}
+ lk.unlock();
+ }
- stdx::unique_lock<stdx::mutex> lk(_mutex, stdx::defer_lock);
-
- const StatusWith<int> myIndex = validateConfigForHeartbeatReconfig(
- _externalState.get(),
- newConfig);
-
- if (myIndex.getStatus() == ErrorCodes::NodeNotFound) {
- lk.lock();
- // If this node absent in newConfig, and this node was not previously initialized,
- // return to kConfigUninitialized immediately, rather than storing the config and
- // transitioning into the RS_REMOVED state. See SERVER-15740.
- if (!_rsConfig.isInitialized()) {
- invariant(_rsConfigState == kConfigHBReconfiguring);
- LOG(1) << "Ignoring new configuration in heartbeat response because we are "
- "uninitialized and not a member of the new configuration";
- _setConfigState_inlock(kConfigUninitialized);
- return;
- }
- lk.unlock();
- }
+ if (!myIndex.getStatus().isOK() && myIndex.getStatus() != ErrorCodes::NodeNotFound) {
+ warning() << "Not persisting new configuration in heartbeat response to disk because "
+ "it is invalid: " << myIndex.getStatus();
+ } else {
+ Status status = _externalState->storeLocalConfigDocument(cbd.txn, newConfig.toBSON());
- if (!myIndex.getStatus().isOK() && myIndex.getStatus() != ErrorCodes::NodeNotFound) {
- warning() << "Not persisting new configuration in heartbeat response to disk because "
- "it is invalid: "<< myIndex.getStatus();
- }
- else {
- Status status = _externalState->storeLocalConfigDocument(cbd.txn, newConfig.toBSON());
-
- lk.lock();
- if (!status.isOK()) {
- error() << "Ignoring new configuration in heartbeat response because we failed to"
- " write it to stable storage; " << status;
- invariant(_rsConfigState == kConfigHBReconfiguring);
- if (_rsConfig.isInitialized()) {
- _setConfigState_inlock(kConfigSteady);
- }
- else {
- _setConfigState_inlock(kConfigUninitialized);
- }
- return;
+ lk.lock();
+ if (!status.isOK()) {
+ error() << "Ignoring new configuration in heartbeat response because we failed to"
+ " write it to stable storage; " << status;
+ invariant(_rsConfigState == kConfigHBReconfiguring);
+ if (_rsConfig.isInitialized()) {
+ _setConfigState_inlock(kConfigSteady);
+ } else {
+ _setConfigState_inlock(kConfigUninitialized);
}
-
- lk.unlock();
-
- _externalState->startThreads();
+ return;
}
- const stdx::function<void (const ReplicationExecutor::CallbackArgs&)> reconfigFinishFn(
- stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
- this,
- stdx::placeholders::_1,
- newConfig,
- myIndex));
+ lk.unlock();
- // Make sure that the reconfigFinishFn doesn't finish until we've reset
- // _heartbeatReconfigThread.
- lk.lock();
- if (_memberState.primary()) {
- // If the primary is receiving a heartbeat reconfig, that strongly suggests
- // that there has been a force reconfiguration. In any event, it might lead
- // to this node stepping down as primary, so we'd better do it with the global
- // lock.
- _replExecutor.scheduleWorkWithGlobalExclusiveLock(reconfigFinishFn);
- }
- else {
- _replExecutor.scheduleWork(reconfigFinishFn);
- }
+ _externalState->startThreads();
}
- void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicaSetConfig& newConfig,
- StatusWith<int> myIndex) {
- if (cbData.status == ErrorCodes::CallbackCanceled) {
- return;
- }
+ const stdx::function<void(const ReplicationExecutor::CallbackArgs&)> reconfigFinishFn(
+ stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ myIndex));
+
+ // Make sure that the reconfigFinishFn doesn't finish until we've reset
+ // _heartbeatReconfigThread.
+ lk.lock();
+ if (_memberState.primary()) {
+ // If the primary is receiving a heartbeat reconfig, that strongly suggests
+ // that there has been a force reconfiguration. In any event, it might lead
+ // to this node stepping down as primary, so we'd better do it with the global
+ // lock.
+ _replExecutor.scheduleWorkWithGlobalExclusiveLock(reconfigFinishFn);
+ } else {
+ _replExecutor.scheduleWork(reconfigFinishFn);
+ }
+}
+
+void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
+ const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicaSetConfig& newConfig,
+ StatusWith<int> myIndex) {
+ if (cbData.status == ErrorCodes::CallbackCanceled) {
+ return;
+ }
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- invariant(_rsConfigState == kConfigHBReconfiguring);
- invariant(!_rsConfig.isInitialized() ||
- _rsConfig.getConfigVersion() < newConfig.getConfigVersion());
-
- if (_getMemberState_inlock().primary() && !cbData.txn) {
- // Not having an OperationContext in the CallbackData means we definitely aren't holding
- // the global lock. Since we're primary and this reconfig could cause us to stepdown,
- // reschedule this work with the global exclusive lock so the stepdown is safe.
- // TODO(spencer): When we *do* have an OperationContext, consult it to confirm that
- // we are indeed holding the global lock.
- _replExecutor.scheduleWorkWithGlobalExclusiveLock(
- stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
- this,
- stdx::placeholders::_1,
- newConfig,
- myIndex));
- return;
- }
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ invariant(_rsConfigState == kConfigHBReconfiguring);
+ invariant(!_rsConfig.isInitialized() ||
+ _rsConfig.getConfigVersion() < newConfig.getConfigVersion());
+
+ if (_getMemberState_inlock().primary() && !cbData.txn) {
+ // Not having an OperationContext in the CallbackData means we definitely aren't holding
+ // the global lock. Since we're primary and this reconfig could cause us to stepdown,
+ // reschedule this work with the global exclusive lock so the stepdown is safe.
+ // TODO(spencer): When we *do* have an OperationContext, consult it to confirm that
+ // we are indeed holding the global lock.
+ _replExecutor.scheduleWorkWithGlobalExclusiveLock(
+ stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ myIndex));
+ return;
+ }
- if (!myIndex.isOK()) {
- switch (myIndex.getStatus().code()) {
+ if (!myIndex.isOK()) {
+ switch (myIndex.getStatus().code()) {
case ErrorCodes::NodeNotFound:
- log() << "Cannot find self in new replica set configuration; I must be removed; " <<
- myIndex.getStatus();
+ log() << "Cannot find self in new replica set configuration; I must be removed; "
+ << myIndex.getStatus();
break;
case ErrorCodes::DuplicateKey:
error() << "Several entries in new config represent this node; "
- "Removing self until an acceptable configuration arrives; " <<
- myIndex.getStatus();
+ "Removing self until an acceptable configuration arrives; "
+ << myIndex.getStatus();
break;
default:
error() << "Could not validate configuration received from remote node; "
- "Removing self until an acceptable configuration arrives; " <<
- myIndex.getStatus();
+ "Removing self until an acceptable configuration arrives; "
+ << myIndex.getStatus();
break;
- }
- myIndex = StatusWith<int>(-1);
}
- const PostMemberStateUpdateAction action =
- _setCurrentRSConfig_inlock(newConfig, myIndex.getValue());
- lk.unlock();
- _performPostMemberStateUpdateAction(action);
- }
-
- void ReplicationCoordinatorImpl::_trackHeartbeatHandle(const StatusWith<CBHandle>& handle) {
- if (handle.getStatus() == ErrorCodes::ShutdownInProgress) {
- return;
- }
- fassert(18912, handle.getStatus());
- _heartbeatHandles.push_back(handle.getValue());
+ myIndex = StatusWith<int>(-1);
}
-
- void ReplicationCoordinatorImpl::_untrackHeartbeatHandle(const CBHandle& handle) {
- const HeartbeatHandles::iterator newEnd = std::remove(
- _heartbeatHandles.begin(),
- _heartbeatHandles.end(),
- handle);
- invariant(newEnd != _heartbeatHandles.end());
- _heartbeatHandles.erase(newEnd, _heartbeatHandles.end());
+ const PostMemberStateUpdateAction action =
+ _setCurrentRSConfig_inlock(newConfig, myIndex.getValue());
+ lk.unlock();
+ _performPostMemberStateUpdateAction(action);
+}
+
+void ReplicationCoordinatorImpl::_trackHeartbeatHandle(const StatusWith<CBHandle>& handle) {
+ if (handle.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return;
}
-
- void ReplicationCoordinatorImpl::_cancelHeartbeats() {
- std::for_each(_heartbeatHandles.begin(),
- _heartbeatHandles.end(),
- stdx::bind(&ReplicationExecutor::cancel,
- &_replExecutor,
- stdx::placeholders::_1));
- // Heartbeat callbacks will remove themselves from _heartbeatHandles when they execute with
- // CallbackCanceled status, so it's better to leave the handles in the list, for now.
- }
-
- void ReplicationCoordinatorImpl::_startHeartbeats() {
- const Date_t now = _replExecutor.now();
- _seedList.clear();
- for (int i = 0; i < _rsConfig.getNumMembers(); ++i) {
- if (i == _selfIndex) {
- continue;
- }
- _scheduleHeartbeatToTarget(_rsConfig.getMemberAt(i).getHostAndPort(), i, now);
- }
+ fassert(18912, handle.getStatus());
+ _heartbeatHandles.push_back(handle.getValue());
+}
+
+void ReplicationCoordinatorImpl::_untrackHeartbeatHandle(const CBHandle& handle) {
+ const HeartbeatHandles::iterator newEnd =
+ std::remove(_heartbeatHandles.begin(), _heartbeatHandles.end(), handle);
+ invariant(newEnd != _heartbeatHandles.end());
+ _heartbeatHandles.erase(newEnd, _heartbeatHandles.end());
+}
+
+void ReplicationCoordinatorImpl::_cancelHeartbeats() {
+ std::for_each(_heartbeatHandles.begin(),
+ _heartbeatHandles.end(),
+ stdx::bind(&ReplicationExecutor::cancel, &_replExecutor, stdx::placeholders::_1));
+ // Heartbeat callbacks will remove themselves from _heartbeatHandles when they execute with
+ // CallbackCanceled status, so it's better to leave the handles in the list, for now.
+}
+
+void ReplicationCoordinatorImpl::_startHeartbeats() {
+ const Date_t now = _replExecutor.now();
+ _seedList.clear();
+ for (int i = 0; i < _rsConfig.getNumMembers(); ++i) {
+ if (i == _selfIndex) {
+ continue;
+ }
+ _scheduleHeartbeatToTarget(_rsConfig.getMemberAt(i).getHostAndPort(), i, now);
}
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
index 2afcad55859..f992bf8e94b 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
@@ -47,206 +47,212 @@ namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
-
- class ReplCoordHBTest : public ReplCoordTest {
- protected:
- void assertMemberState(MemberState expected, std::string msg = "");
- ReplSetHeartbeatResponse receiveHeartbeatFrom(
- const ReplicaSetConfig& rsConfig,
- int sourceId,
- const HostAndPort& source);
- };
-
- void ReplCoordHBTest::assertMemberState(const MemberState expected, std::string msg) {
- const MemberState actual = getReplCoord()->getMemberState();
- ASSERT(expected == actual) << "Expected coordinator to report state " <<
- expected.toString() << " but found " << actual.toString() << " - " << msg;
- }
+using executor::NetworkInterfaceMock;
- ReplSetHeartbeatResponse ReplCoordHBTest::receiveHeartbeatFrom(
- const ReplicaSetConfig& rsConfig,
- int sourceId,
- const HostAndPort& source) {
- ReplSetHeartbeatArgs hbArgs;
- hbArgs.setProtocolVersion(1);
- hbArgs.setConfigVersion(rsConfig.getConfigVersion());
- hbArgs.setSetName(rsConfig.getReplSetName());
- hbArgs.setSenderHost(source);
- hbArgs.setSenderId(sourceId);
- ASSERT(hbArgs.isInitialized());
-
- ReplSetHeartbeatResponse response;
- ASSERT_OK(getReplCoord()->processHeartbeat(hbArgs, &response));
- return response;
- }
+class ReplCoordHBTest : public ReplCoordTest {
+protected:
+ void assertMemberState(MemberState expected, std::string msg = "");
+ ReplSetHeartbeatResponse receiveHeartbeatFrom(const ReplicaSetConfig& rsConfig,
+ int sourceId,
+ const HostAndPort& source);
+};
- TEST_F(ReplCoordHBTest, JoinExistingReplSet) {
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1"))));
- init("mySet");
- addSelf(HostAndPort("h2", 1));
- const Date_t startDate = getNet()->now();
- start();
- enterNetwork();
- assertMemberState(MemberState::RS_STARTUP);
- NetworkInterfaceMock* net = getNet();
- ASSERT_FALSE(net->hasReadyRequests());
- exitNetwork();
- receiveHeartbeatFrom(rsConfig, 1, HostAndPort("h1", 1));
-
- enterNetwork();
- NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
- ReplSetHeartbeatArgs hbArgs;
- ASSERT_OK(hbArgs.initialize(request.cmdObj));
- ASSERT_EQUALS("mySet", hbArgs.getSetName());
- ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
- ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName("mySet");
- hbResp.setState(MemberState::RS_PRIMARY);
- hbResp.noteReplSet();
- hbResp.setConfigVersion(rsConfig.getConfigVersion());
- hbResp.setConfig(rsConfig);
- BSONObjBuilder responseBuilder;
- responseBuilder << "ok" << 1;
- hbResp.addToBSON(&responseBuilder, false);
- net->scheduleResponse(noi,
- startDate + Milliseconds(200),
- makeResponseStatus(responseBuilder.obj()));
- assertRunUntil(startDate + Milliseconds(200));
-
- // Because the new config is stored using an out-of-band thread, we need to perform some
- // extra synchronization to let the executor finish the heartbeat reconfig. We know that
- // after the out-of-band thread completes, it schedules new heartbeats. We assume that no
- // other network operations get scheduled during or before the reconfig, though this may
- // cease to be true in the future.
- noi = net->getNextReadyRequest();
-
- assertMemberState(MemberState::RS_STARTUP2);
- OperationContextNoop txn;
- ReplicaSetConfig storedConfig;
- ASSERT_OK(storedConfig.initialize(
- unittest::assertGet(getExternalState()->loadLocalConfigDocument(&txn))));
- ASSERT_OK(storedConfig.validate());
- ASSERT_EQUALS(3, storedConfig.getConfigVersion());
- ASSERT_EQUALS(3, storedConfig.getNumMembers());
- exitNetwork();
- }
+void ReplCoordHBTest::assertMemberState(const MemberState expected, std::string msg) {
+ const MemberState actual = getReplCoord()->getMemberState();
+ ASSERT(expected == actual) << "Expected coordinator to report state " << expected.toString()
+ << " but found " << actual.toString() << " - " << msg;
+}
- TEST_F(ReplCoordHBTest, DoNotJoinReplSetIfNotAMember) {
- // Tests that a node in RS_STARTUP will not transition to RS_REMOVED if it receives a
- // configuration that does not contain it.
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1"))));
- init("mySet");
- addSelf(HostAndPort("h4", 1));
- const Date_t startDate = getNet()->now();
- start();
- enterNetwork();
- assertMemberState(MemberState::RS_STARTUP, "1");
- NetworkInterfaceMock* net = getNet();
- ASSERT_FALSE(net->hasReadyRequests());
- exitNetwork();
- receiveHeartbeatFrom(rsConfig, 1, HostAndPort("h1", 1));
-
- enterNetwork();
- NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
- ReplSetHeartbeatArgs hbArgs;
- ASSERT_OK(hbArgs.initialize(request.cmdObj));
- ASSERT_EQUALS("mySet", hbArgs.getSetName());
- ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
- ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName("mySet");
- hbResp.setState(MemberState::RS_PRIMARY);
- hbResp.noteReplSet();
- hbResp.setConfigVersion(rsConfig.getConfigVersion());
- hbResp.setConfig(rsConfig);
- BSONObjBuilder responseBuilder;
- responseBuilder << "ok" << 1;
- hbResp.addToBSON(&responseBuilder, false);
- net->scheduleResponse(noi,
- startDate + Milliseconds(200),
- makeResponseStatus(responseBuilder.obj()));
- assertRunUntil(startDate + Milliseconds(2200));
-
- // Because the new config is stored using an out-of-band thread, we need to perform some
- // extra synchronization to let the executor finish the heartbeat reconfig. We know that
- // after the out-of-band thread completes, it schedules new heartbeats. We assume that no
- // other network operations get scheduled during or before the reconfig, though this may
- // cease to be true in the future.
- noi = net->getNextReadyRequest();
-
- assertMemberState(MemberState::RS_STARTUP, "2");
- OperationContextNoop txn;
-
- StatusWith<BSONObj> loadedConfig(getExternalState()->loadLocalConfigDocument(&txn));
- ASSERT_NOT_OK(loadedConfig.getStatus()) << loadedConfig.getValue();
- exitNetwork();
- }
+ReplSetHeartbeatResponse ReplCoordHBTest::receiveHeartbeatFrom(const ReplicaSetConfig& rsConfig,
+ int sourceId,
+ const HostAndPort& source) {
+ ReplSetHeartbeatArgs hbArgs;
+ hbArgs.setProtocolVersion(1);
+ hbArgs.setConfigVersion(rsConfig.getConfigVersion());
+ hbArgs.setSetName(rsConfig.getReplSetName());
+ hbArgs.setSenderHost(source);
+ hbArgs.setSenderId(sourceId);
+ ASSERT(hbArgs.isInitialized());
- TEST_F(ReplCoordHBTest, NotYetInitializedConfigStateEarlyReturn) {
- // ensure that if we've yet to receive an initial config, we return NotYetInitialized
- init("mySet");
- ReplSetHeartbeatArgs hbArgs;
- hbArgs.setProtocolVersion(1);
- hbArgs.setConfigVersion(3);
- hbArgs.setSetName("mySet");
- hbArgs.setSenderHost(HostAndPort("h1:1"));
- hbArgs.setSenderId(1);
- ASSERT(hbArgs.isInitialized());
-
- ReplSetHeartbeatResponse response;
- Status status = getReplCoord()->processHeartbeat(hbArgs, &response);
- ASSERT_EQUALS(ErrorCodes::NotYetInitialized, status.code());
- }
+ ReplSetHeartbeatResponse response;
+ ASSERT_OK(getReplCoord()->processHeartbeat(hbArgs, &response));
+ return response;
+}
+
+TEST_F(ReplCoordHBTest, JoinExistingReplSet) {
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
+ ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1"))));
+ init("mySet");
+ addSelf(HostAndPort("h2", 1));
+ const Date_t startDate = getNet()->now();
+ start();
+ enterNetwork();
+ assertMemberState(MemberState::RS_STARTUP);
+ NetworkInterfaceMock* net = getNet();
+ ASSERT_FALSE(net->hasReadyRequests());
+ exitNetwork();
+ receiveHeartbeatFrom(rsConfig, 1, HostAndPort("h1", 1));
+
+ enterNetwork();
+ NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
+ ReplSetHeartbeatArgs hbArgs;
+ ASSERT_OK(hbArgs.initialize(request.cmdObj));
+ ASSERT_EQUALS("mySet", hbArgs.getSetName());
+ ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName("mySet");
+ hbResp.setState(MemberState::RS_PRIMARY);
+ hbResp.noteReplSet();
+ hbResp.setConfigVersion(rsConfig.getConfigVersion());
+ hbResp.setConfig(rsConfig);
+ BSONObjBuilder responseBuilder;
+ responseBuilder << "ok" << 1;
+ hbResp.addToBSON(&responseBuilder, false);
+ net->scheduleResponse(
+ noi, startDate + Milliseconds(200), makeResponseStatus(responseBuilder.obj()));
+ assertRunUntil(startDate + Milliseconds(200));
+
+ // Because the new config is stored using an out-of-band thread, we need to perform some
+ // extra synchronization to let the executor finish the heartbeat reconfig. We know that
+ // after the out-of-band thread completes, it schedules new heartbeats. We assume that no
+ // other network operations get scheduled during or before the reconfig, though this may
+ // cease to be true in the future.
+ noi = net->getNextReadyRequest();
+
+ assertMemberState(MemberState::RS_STARTUP2);
+ OperationContextNoop txn;
+ ReplicaSetConfig storedConfig;
+ ASSERT_OK(storedConfig.initialize(
+ unittest::assertGet(getExternalState()->loadLocalConfigDocument(&txn))));
+ ASSERT_OK(storedConfig.validate());
+ ASSERT_EQUALS(3, storedConfig.getConfigVersion());
+ ASSERT_EQUALS(3, storedConfig.getNumMembers());
+ exitNetwork();
+}
+
+TEST_F(ReplCoordHBTest, DoNotJoinReplSetIfNotAMember) {
+ // Tests that a node in RS_STARTUP will not transition to RS_REMOVED if it receives a
+ // configuration that does not contain it.
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
+ ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1"))));
+ init("mySet");
+ addSelf(HostAndPort("h4", 1));
+ const Date_t startDate = getNet()->now();
+ start();
+ enterNetwork();
+ assertMemberState(MemberState::RS_STARTUP, "1");
+ NetworkInterfaceMock* net = getNet();
+ ASSERT_FALSE(net->hasReadyRequests());
+ exitNetwork();
+ receiveHeartbeatFrom(rsConfig, 1, HostAndPort("h1", 1));
- TEST_F(ReplCoordHBTest, OnlyUnauthorizedUpCausesRecovering) {
- // Tests that a node that only has auth error heartbeats is recovering
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345"))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- // process heartbeat
- enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- getNet()->scheduleResponse(noi, getNet()->now(), makeResponseStatus(
- BSON("ok" << 0.0 <<
- "errmsg" << "unauth'd" <<
- "code" << ErrorCodes::Unauthorized)));
-
- if (request.target != HostAndPort("node2", 12345)
- && request.cmdObj.firstElement().fieldNameStringData() != "replSetHeartbeat") {
- error() << "Black holing unexpected request to "
- << request.target << ": " << request.cmdObj;
- getNet()->blackHole(noi);
- }
- getNet()->runReadyNetworkOperations();
- exitNetwork();
-
- ASSERT_TRUE(getTopoCoord().getMemberState().recovering());
- assertMemberState(MemberState::RS_RECOVERING, "0");
+ enterNetwork();
+ NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
+ ReplSetHeartbeatArgs hbArgs;
+ ASSERT_OK(hbArgs.initialize(request.cmdObj));
+ ASSERT_EQUALS("mySet", hbArgs.getSetName());
+ ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName("mySet");
+ hbResp.setState(MemberState::RS_PRIMARY);
+ hbResp.noteReplSet();
+ hbResp.setConfigVersion(rsConfig.getConfigVersion());
+ hbResp.setConfig(rsConfig);
+ BSONObjBuilder responseBuilder;
+ responseBuilder << "ok" << 1;
+ hbResp.addToBSON(&responseBuilder, false);
+ net->scheduleResponse(
+ noi, startDate + Milliseconds(200), makeResponseStatus(responseBuilder.obj()));
+ assertRunUntil(startDate + Milliseconds(2200));
+
+ // Because the new config is stored using an out-of-band thread, we need to perform some
+ // extra synchronization to let the executor finish the heartbeat reconfig. We know that
+ // after the out-of-band thread completes, it schedules new heartbeats. We assume that no
+ // other network operations get scheduled during or before the reconfig, though this may
+ // cease to be true in the future.
+ noi = net->getNextReadyRequest();
+
+ assertMemberState(MemberState::RS_STARTUP, "2");
+ OperationContextNoop txn;
+
+ StatusWith<BSONObj> loadedConfig(getExternalState()->loadLocalConfigDocument(&txn));
+ ASSERT_NOT_OK(loadedConfig.getStatus()) << loadedConfig.getValue();
+ exitNetwork();
+}
+
+TEST_F(ReplCoordHBTest, NotYetInitializedConfigStateEarlyReturn) {
+ // ensure that if we've yet to receive an initial config, we return NotYetInitialized
+ init("mySet");
+ ReplSetHeartbeatArgs hbArgs;
+ hbArgs.setProtocolVersion(1);
+ hbArgs.setConfigVersion(3);
+ hbArgs.setSetName("mySet");
+ hbArgs.setSenderHost(HostAndPort("h1:1"));
+ hbArgs.setSenderId(1);
+ ASSERT(hbArgs.isInitialized());
+
+ ReplSetHeartbeatResponse response;
+ Status status = getReplCoord()->processHeartbeat(hbArgs, &response);
+ ASSERT_EQUALS(ErrorCodes::NotYetInitialized, status.code());
+}
+
+TEST_F(ReplCoordHBTest, OnlyUnauthorizedUpCausesRecovering) {
+ // Tests that a node that only has auth error heartbeats is recovering
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+
+ // process heartbeat
+ enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ getNet()->scheduleResponse(
+ noi,
+ getNet()->now(),
+ makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
+ << "unauth'd"
+ << "code" << ErrorCodes::Unauthorized)));
+
+ if (request.target != HostAndPort("node2", 12345) &&
+ request.cmdObj.firstElement().fieldNameStringData() != "replSetHeartbeat") {
+ error() << "Black holing unexpected request to " << request.target << ": "
+ << request.cmdObj;
+ getNet()->blackHole(noi);
}
+ getNet()->runReadyNetworkOperations();
+ exitNetwork();
+
+ ASSERT_TRUE(getTopoCoord().getMemberState().recovering());
+ assertMemberState(MemberState::RS_RECOVERING, "0");
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index 4ae6a358e53..c51e8e48929 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -47,206 +47,212 @@ namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
-
- class ReplCoordHBV1Test : public ReplCoordTest {
- protected:
- void assertMemberState(MemberState expected, std::string msg = "");
- ReplSetHeartbeatResponse receiveHeartbeatFrom(
- const ReplicaSetConfig& rsConfig,
- int sourceId,
- const HostAndPort& source);
- };
-
- void ReplCoordHBV1Test::assertMemberState(const MemberState expected, std::string msg) {
- const MemberState actual = getReplCoord()->getMemberState();
- ASSERT(expected == actual) << "Expected coordinator to report state " <<
- expected.toString() << " but found " << actual.toString() << " - " << msg;
- }
+using executor::NetworkInterfaceMock;
- ReplSetHeartbeatResponse ReplCoordHBV1Test::receiveHeartbeatFrom(
- const ReplicaSetConfig& rsConfig,
- int sourceId,
- const HostAndPort& source) {
- ReplSetHeartbeatArgsV1 hbArgs;
- hbArgs.setConfigVersion(rsConfig.getConfigVersion());
- hbArgs.setSetName(rsConfig.getReplSetName());
- hbArgs.setSenderHost(source);
- hbArgs.setSenderId(sourceId);
- hbArgs.setTerm(1);
- ASSERT(hbArgs.isInitialized());
-
- ReplSetHeartbeatResponse response;
- ASSERT_OK(getReplCoord()->processHeartbeatV1(hbArgs, &response));
- return response;
- }
+class ReplCoordHBV1Test : public ReplCoordTest {
+protected:
+ void assertMemberState(MemberState expected, std::string msg = "");
+ ReplSetHeartbeatResponse receiveHeartbeatFrom(const ReplicaSetConfig& rsConfig,
+ int sourceId,
+ const HostAndPort& source);
+};
- TEST_F(ReplCoordHBV1Test, JoinExistingReplSet) {
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1")) <<
- "protocolVersion" << 1));
- init("mySet");
- addSelf(HostAndPort("h2", 1));
- const Date_t startDate = getNet()->now();
- start();
- enterNetwork();
- assertMemberState(MemberState::RS_STARTUP);
- NetworkInterfaceMock* net = getNet();
- ASSERT_FALSE(net->hasReadyRequests());
- exitNetwork();
- receiveHeartbeatFrom(rsConfig, 1, HostAndPort("h1", 1));
-
- enterNetwork();
- NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
- ReplSetHeartbeatArgs hbArgs;
- ASSERT_OK(hbArgs.initialize(request.cmdObj));
- ASSERT_EQUALS("mySet", hbArgs.getSetName());
- ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
- ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName("mySet");
- hbResp.setState(MemberState::RS_PRIMARY);
- hbResp.setConfigVersion(rsConfig.getConfigVersion());
- hbResp.setConfig(rsConfig);
- BSONObjBuilder responseBuilder;
- responseBuilder << "ok" << 1;
- hbResp.addToBSON(&responseBuilder, true);
- net->scheduleResponse(noi,
- startDate + Milliseconds(200),
- makeResponseStatus(responseBuilder.obj()));
- assertRunUntil(startDate + Milliseconds(200));
-
- // Because the new config is stored using an out-of-band thread, we need to perform some
- // extra synchronization to let the executor finish the heartbeat reconfig. We know that
- // after the out-of-band thread completes, it schedules new heartbeats. We assume that no
- // other network operations get scheduled during or before the reconfig, though this may
- // cease to be true in the future.
- noi = net->getNextReadyRequest();
-
- assertMemberState(MemberState::RS_STARTUP2);
- OperationContextNoop txn;
- ReplicaSetConfig storedConfig;
- ASSERT_OK(storedConfig.initialize(
- unittest::assertGet(getExternalState()->loadLocalConfigDocument(&txn))));
- ASSERT_OK(storedConfig.validate());
- ASSERT_EQUALS(3, storedConfig.getConfigVersion());
- ASSERT_EQUALS(3, storedConfig.getNumMembers());
- exitNetwork();
- }
+void ReplCoordHBV1Test::assertMemberState(const MemberState expected, std::string msg) {
+ const MemberState actual = getReplCoord()->getMemberState();
+ ASSERT(expected == actual) << "Expected coordinator to report state " << expected.toString()
+ << " but found " << actual.toString() << " - " << msg;
+}
- TEST_F(ReplCoordHBV1Test, DoNotJoinReplSetIfNotAMember) {
- // Tests that a node in RS_STARTUP will not transition to RS_REMOVED if it receives a
- // configuration that does not contain it.
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplicaSetConfig rsConfig = assertMakeRSConfig(
- BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "h1:1") <<
- BSON("_id" << 2 << "host" << "h2:1") <<
- BSON("_id" << 3 << "host" << "h3:1")) <<
- "protocolVersion" << 1));
- init("mySet");
- addSelf(HostAndPort("h4", 1));
- const Date_t startDate = getNet()->now();
- start();
- enterNetwork();
- assertMemberState(MemberState::RS_STARTUP, "1");
- NetworkInterfaceMock* net = getNet();
- ASSERT_FALSE(net->hasReadyRequests());
- exitNetwork();
- receiveHeartbeatFrom(rsConfig, 1, HostAndPort("h1", 1));
-
- enterNetwork();
- NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
- ReplSetHeartbeatArgs hbArgs;
- ASSERT_OK(hbArgs.initialize(request.cmdObj));
- ASSERT_EQUALS("mySet", hbArgs.getSetName());
- ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
- ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName("mySet");
- hbResp.setState(MemberState::RS_PRIMARY);
- hbResp.setConfigVersion(rsConfig.getConfigVersion());
- hbResp.setConfig(rsConfig);
- BSONObjBuilder responseBuilder;
- responseBuilder << "ok" << 1;
- hbResp.addToBSON(&responseBuilder, true);
- net->scheduleResponse(noi,
- startDate + Milliseconds(200),
- makeResponseStatus(responseBuilder.obj()));
- assertRunUntil(startDate + Milliseconds(2200));
-
- // Because the new config is stored using an out-of-band thread, we need to perform some
- // extra synchronization to let the executor finish the heartbeat reconfig. We know that
- // after the out-of-band thread completes, it schedules new heartbeats. We assume that no
- // other network operations get scheduled during or before the reconfig, though this may
- // cease to be true in the future.
- noi = net->getNextReadyRequest();
-
- assertMemberState(MemberState::RS_STARTUP, "2");
- OperationContextNoop txn;
-
- StatusWith<BSONObj> loadedConfig(getExternalState()->loadLocalConfigDocument(&txn));
- ASSERT_NOT_OK(loadedConfig.getStatus()) << loadedConfig.getValue();
- exitNetwork();
- }
+ReplSetHeartbeatResponse ReplCoordHBV1Test::receiveHeartbeatFrom(const ReplicaSetConfig& rsConfig,
+ int sourceId,
+ const HostAndPort& source) {
+ ReplSetHeartbeatArgsV1 hbArgs;
+ hbArgs.setConfigVersion(rsConfig.getConfigVersion());
+ hbArgs.setSetName(rsConfig.getReplSetName());
+ hbArgs.setSenderHost(source);
+ hbArgs.setSenderId(sourceId);
+ hbArgs.setTerm(1);
+ ASSERT(hbArgs.isInitialized());
- TEST_F(ReplCoordHBV1Test, NotYetInitializedConfigStateEarlyReturn) {
- // ensure that if we've yet to receive an initial config, we return NotYetInitialized
- init("mySet");
- ReplSetHeartbeatArgsV1 hbArgs;
- hbArgs.setConfigVersion(3);
- hbArgs.setSetName("mySet");
- hbArgs.setSenderHost(HostAndPort("h1:1"));
- hbArgs.setSenderId(1);
- hbArgs.setTerm(1);
- ASSERT(hbArgs.isInitialized());
-
- ReplSetHeartbeatResponse response;
- Status status = getReplCoord()->processHeartbeatV1(hbArgs, &response);
- ASSERT_EQUALS(ErrorCodes::NotYetInitialized, status.code());
- }
+ ReplSetHeartbeatResponse response;
+ ASSERT_OK(getReplCoord()->processHeartbeatV1(hbArgs, &response));
+ return response;
+}
+
+TEST_F(ReplCoordHBV1Test, JoinExistingReplSet) {
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
+ ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1"))
+ << "protocolVersion" << 1));
+ init("mySet");
+ addSelf(HostAndPort("h2", 1));
+ const Date_t startDate = getNet()->now();
+ start();
+ enterNetwork();
+ assertMemberState(MemberState::RS_STARTUP);
+ NetworkInterfaceMock* net = getNet();
+ ASSERT_FALSE(net->hasReadyRequests());
+ exitNetwork();
+ receiveHeartbeatFrom(rsConfig, 1, HostAndPort("h1", 1));
+
+ enterNetwork();
+ NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
+ ReplSetHeartbeatArgs hbArgs;
+ ASSERT_OK(hbArgs.initialize(request.cmdObj));
+ ASSERT_EQUALS("mySet", hbArgs.getSetName());
+ ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName("mySet");
+ hbResp.setState(MemberState::RS_PRIMARY);
+ hbResp.setConfigVersion(rsConfig.getConfigVersion());
+ hbResp.setConfig(rsConfig);
+ BSONObjBuilder responseBuilder;
+ responseBuilder << "ok" << 1;
+ hbResp.addToBSON(&responseBuilder, true);
+ net->scheduleResponse(
+ noi, startDate + Milliseconds(200), makeResponseStatus(responseBuilder.obj()));
+ assertRunUntil(startDate + Milliseconds(200));
+
+ // Because the new config is stored using an out-of-band thread, we need to perform some
+ // extra synchronization to let the executor finish the heartbeat reconfig. We know that
+ // after the out-of-band thread completes, it schedules new heartbeats. We assume that no
+ // other network operations get scheduled during or before the reconfig, though this may
+ // cease to be true in the future.
+ noi = net->getNextReadyRequest();
+
+ assertMemberState(MemberState::RS_STARTUP2);
+ OperationContextNoop txn;
+ ReplicaSetConfig storedConfig;
+ ASSERT_OK(storedConfig.initialize(
+ unittest::assertGet(getExternalState()->loadLocalConfigDocument(&txn))));
+ ASSERT_OK(storedConfig.validate());
+ ASSERT_EQUALS(3, storedConfig.getConfigVersion());
+ ASSERT_EQUALS(3, storedConfig.getNumMembers());
+ exitNetwork();
+}
+
+TEST_F(ReplCoordHBV1Test, DoNotJoinReplSetIfNotAMember) {
+ // Tests that a node in RS_STARTUP will not transition to RS_REMOVED if it receives a
+ // configuration that does not contain it.
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
+ ReplicaSetConfig rsConfig =
+ assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1") << BSON("_id" << 3 << "host"
+ << "h3:1"))
+ << "protocolVersion" << 1));
+ init("mySet");
+ addSelf(HostAndPort("h4", 1));
+ const Date_t startDate = getNet()->now();
+ start();
+ enterNetwork();
+ assertMemberState(MemberState::RS_STARTUP, "1");
+ NetworkInterfaceMock* net = getNet();
+ ASSERT_FALSE(net->hasReadyRequests());
+ exitNetwork();
+ receiveHeartbeatFrom(rsConfig, 1, HostAndPort("h1", 1));
- TEST_F(ReplCoordHBV1Test, OnlyUnauthorizedUpCausesRecovering) {
- // Tests that a node that only has auth error heartbeats is recovering
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345"))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
-
- // process heartbeat
- enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- getNet()->scheduleResponse(noi, getNet()->now(), makeResponseStatus(
- BSON("ok" << 0.0 <<
- "errmsg" << "unauth'd" <<
- "code" << ErrorCodes::Unauthorized)));
-
- if (request.target != HostAndPort("node2", 12345)
- && request.cmdObj.firstElement().fieldNameStringData() != "replSetHeartbeat") {
- error() << "Black holing unexpected request to "
- << request.target << ": " << request.cmdObj;
- getNet()->blackHole(noi);
- }
- getNet()->runReadyNetworkOperations();
- exitNetwork();
-
- ASSERT_TRUE(getTopoCoord().getMemberState().recovering());
- assertMemberState(MemberState::RS_RECOVERING, "0");
+ enterNetwork();
+ NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ ASSERT_EQUALS(HostAndPort("h1", 1), request.target);
+ ReplSetHeartbeatArgs hbArgs;
+ ASSERT_OK(hbArgs.initialize(request.cmdObj));
+ ASSERT_EQUALS("mySet", hbArgs.getSetName());
+ ASSERT_EQUALS(-2, hbArgs.getConfigVersion());
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName("mySet");
+ hbResp.setState(MemberState::RS_PRIMARY);
+ hbResp.setConfigVersion(rsConfig.getConfigVersion());
+ hbResp.setConfig(rsConfig);
+ BSONObjBuilder responseBuilder;
+ responseBuilder << "ok" << 1;
+ hbResp.addToBSON(&responseBuilder, true);
+ net->scheduleResponse(
+ noi, startDate + Milliseconds(200), makeResponseStatus(responseBuilder.obj()));
+ assertRunUntil(startDate + Milliseconds(2200));
+
+ // Because the new config is stored using an out-of-band thread, we need to perform some
+ // extra synchronization to let the executor finish the heartbeat reconfig. We know that
+ // after the out-of-band thread completes, it schedules new heartbeats. We assume that no
+ // other network operations get scheduled during or before the reconfig, though this may
+ // cease to be true in the future.
+ noi = net->getNextReadyRequest();
+
+ assertMemberState(MemberState::RS_STARTUP, "2");
+ OperationContextNoop txn;
+
+ StatusWith<BSONObj> loadedConfig(getExternalState()->loadLocalConfigDocument(&txn));
+ ASSERT_NOT_OK(loadedConfig.getStatus()) << loadedConfig.getValue();
+ exitNetwork();
+}
+
+TEST_F(ReplCoordHBV1Test, NotYetInitializedConfigStateEarlyReturn) {
+ // ensure that if we've yet to receive an initial config, we return NotYetInitialized
+ init("mySet");
+ ReplSetHeartbeatArgsV1 hbArgs;
+ hbArgs.setConfigVersion(3);
+ hbArgs.setSetName("mySet");
+ hbArgs.setSenderHost(HostAndPort("h1:1"));
+ hbArgs.setSenderId(1);
+ hbArgs.setTerm(1);
+ ASSERT(hbArgs.isInitialized());
+
+ ReplSetHeartbeatResponse response;
+ Status status = getReplCoord()->processHeartbeatV1(hbArgs, &response);
+ ASSERT_EQUALS(ErrorCodes::NotYetInitialized, status.code());
+}
+
+TEST_F(ReplCoordHBV1Test, OnlyUnauthorizedUpCausesRecovering) {
+ // Tests that a node that only has auth error heartbeats is recovering
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+
+ // process heartbeat
+ enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ getNet()->scheduleResponse(
+ noi,
+ getNet()->now(),
+ makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
+ << "unauth'd"
+ << "code" << ErrorCodes::Unauthorized)));
+
+ if (request.target != HostAndPort("node2", 12345) &&
+ request.cmdObj.firstElement().fieldNameStringData() != "replSetHeartbeat") {
+ error() << "Black holing unexpected request to " << request.target << ": "
+ << request.cmdObj;
+ getNet()->blackHole(noi);
}
+ getNet()->runReadyNetworkOperations();
+ exitNetwork();
+
+ ASSERT_TRUE(getTopoCoord().getMemberState().recovering());
+ assertMemberState(MemberState::RS_RECOVERING, "0");
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 5a9eb6ef965..6255e799d67 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -38,7 +38,7 @@
#include "mongo/db/repl/replication_coordinator_external_state_mock.h"
#include "mongo/db/repl/replication_coordinator_impl.h"
#include "mongo/db/repl/replication_coordinator_test_fixture.h"
-#include "mongo/db/repl/replication_coordinator.h" // ReplSetReconfigArgs
+#include "mongo/db/repl/replication_coordinator.h" // ReplSetReconfigArgs
#include "mongo/executor/network_interface_mock.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
@@ -47,485 +47,518 @@ namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
- typedef ReplicationCoordinator::ReplSetReconfigArgs ReplSetReconfigArgs;
-
- TEST_F(ReplCoordTest, ReconfigBeforeInitialized) {
- // start up but do not initiate
- OperationContextNoop txn;
- init();
- start();
- BSONObjBuilder result;
- ReplSetReconfigArgs args;
-
- ASSERT_EQUALS(ErrorCodes::NotYetInitialized,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
- ASSERT_TRUE(result.obj().isEmpty());
- }
-
- TEST_F(ReplCoordTest, ReconfigWhileNotPrimary) {
- // start up, become secondary, receive reconfig
- OperationContextNoop txn;
- init();
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345") )),
- HostAndPort("node1", 12345));
-
- BSONObjBuilder result;
- ReplSetReconfigArgs args;
- args.force = false;
- ASSERT_EQUALS(ErrorCodes::NotMaster,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
- ASSERT_TRUE(result.obj().isEmpty());
- }
-
- TEST_F(ReplCoordTest, ReconfigWithUninitializableConfig) {
- // start up, become primary, receive uninitializable config
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345") )),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
-
- BSONObjBuilder result;
- ReplSetReconfigArgs args;
- args.force = false;
- args.newConfigObj = BSON("_id" << "mySet" <<
- "version" << 2 <<
- "invalidlyNamedField" << 3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 <<
- "host" << "node1:12345" <<
- "arbiterOnly" << true) <<
- BSON("_id" << 2 <<
- "host" << "node2:12345" <<
- "arbiterOnly" << true)));
- // ErrorCodes::BadValue should be propagated from ReplicaSetConfig::initialize()
- ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
- ASSERT_TRUE(result.obj().isEmpty());
- }
-
- TEST_F(ReplCoordTest, ReconfigWithWrongReplSetName) {
- // start up, become primary, receive config with incorrect replset name
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345") )),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
-
- BSONObjBuilder result;
- ReplSetReconfigArgs args;
- args.force = false;
- args.newConfigObj = BSON("_id" << "notMySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 <<
- "host" << "node1:12345") <<
- BSON("_id" << 2 <<
- "host" << "node2:12345")));
-
- ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
- ASSERT_TRUE(result.obj().isEmpty());
- }
-
- TEST_F(ReplCoordTest, ReconfigValidateFails) {
- // start up, become primary, validate fails
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345") )),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
-
- BSONObjBuilder result;
- ReplSetReconfigArgs args;
- args.force = false;
- args.newConfigObj = BSON("_id" << "mySet" <<
- "version" << -3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 <<
- "host" << "node1:12345") <<
- BSON("_id" << 2 <<
- "host" << "node2:12345")));
-
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
- ASSERT_TRUE(result.obj().isEmpty());
- }
-
- void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) {
- OperationContextNoop txn;
- BSONObjBuilder garbage;
- *status = replCoord->processReplSetInitiate(
- &txn,
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345"))),
- &garbage);
- }
-
- void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
- OperationContextNoop txn;
- BSONObjBuilder garbage;
- ReplSetReconfigArgs args;
- args.force = false;
- args.newConfigObj = BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345" <<
- "priority" << 3)));
- *status = replCoord->processReplSetReconfig(&txn, args, &garbage);
- }
-
- TEST_F(ReplCoordTest, ReconfigQuorumCheckFails) {
- // start up, become primary, fail during quorum check due to a heartbeat
- // containing a higher config version
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345") )),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
-
- Status status(ErrorCodes::InternalError, "Not Set");
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
-
- NetworkInterfaceMock* net = getNet();
- getNet()->enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- repl::ReplSetHeartbeatArgs hbArgs;
- ASSERT_OK(hbArgs.initialize(request.cmdObj));
- repl::ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName("mySet");
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(5);
- BSONObjBuilder respObj;
- respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
- net->runReadyNetworkOperations();
- getNet()->exitNetwork();
- reconfigThread.join();
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
- }
-
- TEST_F(ReplCoordTest, ReconfigStoreLocalConfigDocumentFails) {
- // start up, become primary, saving the config fails
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345") )),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
-
- Status status(ErrorCodes::InternalError, "Not Set");
- getExternalState()->setStoreLocalConfigDocumentStatus(Status(ErrorCodes::OutOfDiskSpace,
- "The test set this"));
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
-
- NetworkInterfaceMock* net = getNet();
- getNet()->enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- repl::ReplSetHeartbeatArgs hbArgs;
- ASSERT_OK(hbArgs.initialize(request.cmdObj));
- repl::ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName("mySet");
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(2);
- BSONObjBuilder respObj;
- respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
- net->runReadyNetworkOperations();
- getNet()->exitNetwork();
- reconfigThread.join();
- ASSERT_EQUALS(ErrorCodes::OutOfDiskSpace, status);
- }
-
- TEST_F(ReplCoordTest, ReconfigWhileReconfiggingFails) {
- // start up, become primary, reconfig, then before that reconfig concludes, reconfig again
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345") )),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
-
- Status status(ErrorCodes::InternalError, "Not Set");
- // first reconfig
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
- getNet()->enterNetwork();
- getNet()->blackHole(getNet()->getNextReadyRequest());
- getNet()->exitNetwork();
-
- // second reconfig
- BSONObjBuilder result;
- ReplSetReconfigArgs args;
- args.force = false;
- args.newConfigObj = BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 <<
- "host" << "node1:12345") <<
- BSON("_id" << 2 <<
- "host" << "node2:12345")));
-
- ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
- ASSERT_TRUE(result.obj().isEmpty());
-
- shutdown();
- reconfigThread.join();
- }
-
- TEST_F(ReplCoordTest, ReconfigWhileInitializingFails) {
- // start up, initiate, then before that initiate concludes, reconfig
- OperationContextNoop txn;
- init();
- start(HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
-
- // initiate
- Status status(ErrorCodes::InternalError, "Not Set");
- stdx::thread initateThread(stdx::bind(doReplSetInitiate, getReplCoord(), &status));
- getNet()->enterNetwork();
- getNet()->blackHole(getNet()->getNextReadyRequest());
- getNet()->exitNetwork();
-
- // reconfig
- BSONObjBuilder result;
- ReplSetReconfigArgs args;
- args.force = false;
- args.newConfigObj = BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 <<
- "host" << "node1:12345") <<
- BSON("_id" << 2 <<
- "host" << "node2:12345")));
-
- ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
- ASSERT_TRUE(result.obj().isEmpty());
-
- shutdown();
- initateThread.join();
- }
-
- TEST_F(ReplCoordTest, ReconfigSuccessful) {
- // start up, become primary, reconfig successfully
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345"))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
- simulateSuccessfulElection();
-
- Status status(ErrorCodes::InternalError, "Not Set");
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
-
- NetworkInterfaceMock* net = getNet();
- getNet()->enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- repl::ReplSetHeartbeatArgs hbArgs;
- ASSERT_OK(hbArgs.initialize(request.cmdObj));
- repl::ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName("mySet");
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(2);
- BSONObjBuilder respObj;
- respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
- net->runReadyNetworkOperations();
- getNet()->exitNetwork();
- reconfigThread.join();
- ASSERT_OK(status);
- }
-
- TEST_F(ReplCoordTest, ReconfigDuringHBReconfigFails) {
- // start up, become primary, receive reconfig via heartbeat, then a second one
- // from reconfig
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345") )),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100,0), 0));
- simulateSuccessfulElection();
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
-
- // set hbreconfig to hang while in progress
- getExternalState()->setStoreLocalConfigDocumentToHang(true);
-
- // hb reconfig
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- ReplSetHeartbeatResponse hbResp2;
- ReplicaSetConfig config;
- config.initialize(BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 <<
- "host" << "node1:12345") <<
- BSON("_id" << 2 <<
- "host" << "node2:12345"))));
- hbResp2.setConfig(config);
- hbResp2.setConfigVersion(3);
- hbResp2.setSetName("mySet");
- hbResp2.setState(MemberState::RS_SECONDARY);
- BSONObjBuilder respObj2;
- respObj2 << "ok" << 1;
- hbResp2.addToBSON(&respObj2, false);
- net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
- const NetworkInterfaceMock::NetworkOperationIterator noi2 = net->getNextReadyRequest();
- net->scheduleResponse(noi2, net->now(), makeResponseStatus(respObj2.obj()));
- net->runReadyNetworkOperations();
- getNet()->exitNetwork();
-
- // reconfig
- BSONObjBuilder result;
- ReplSetReconfigArgs args;
- args.force = false;
- args.newConfigObj = config.toBSON();
- ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
-
- getExternalState()->setStoreLocalConfigDocumentToHang(false);
- }
-
- TEST_F(ReplCoordTest, HBReconfigDuringReconfigFails) {
- // start up, become primary, reconfig, while reconfigging receive reconfig via heartbeat
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345") )),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100,0), 0));
- simulateSuccessfulElection();
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
-
- // start reconfigThread
- Status status(ErrorCodes::InternalError, "Not Set");
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
-
- // wait for reconfigThread to create network requests to ensure the replication coordinator
- // is in state kConfigReconfiguring
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- net->blackHole(net->getNextReadyRequest());
-
- // schedule hb reconfig
- net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- ReplSetHeartbeatResponse hbResp;
- ReplicaSetConfig config;
- config.initialize(BSON("_id" << "mySet" <<
- "version" << 4 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 <<
- "host" << "node1:12345") <<
- BSON("_id" << 2 <<
- "host" << "node2:12345"))));
- hbResp.setConfig(config);
- hbResp.setConfigVersion(4);
- hbResp.setSetName("mySet");
- hbResp.setState(MemberState::RS_SECONDARY);
- BSONObjBuilder respObj2;
- respObj2 << "ok" << 1;
- hbResp.addToBSON(&respObj2, false);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj2.obj()));
-
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(1));
- startCapturingLogMessages();
- // execute hb reconfig, which should fail with a log message; confirmed at end of test
- net->runReadyNetworkOperations();
- // respond to reconfig's quorum check so that we can join that thread and exit cleanly
- net->exitNetwork();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining("because already in the midst of a configuration process"));
- shutdown();
- reconfigThread.join();
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Log());
- }
-
- TEST_F(ReplCoordTest, ForceReconfigWhileNotPrimarySuccessful) {
- // start up, become a secondary, receive a forced reconfig
- OperationContextNoop txn;
- init();
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:12345") )),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
-
- // fail before forced
- BSONObjBuilder result;
- ReplSetReconfigArgs args;
- args.force = false;
- args.newConfigObj = BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 <<
- "host" << "node1:12345") <<
- BSON("_id" << 2 <<
- "host" << "node2:12345")));
- ASSERT_EQUALS(ErrorCodes::NotMaster,
- getReplCoord()->processReplSetReconfig(&txn, args, &result));
-
- // forced should succeed
- args.force = true;
- ASSERT_OK(getReplCoord()->processReplSetReconfig(&txn, args, &result));
- getReplCoord()->processReplSetGetConfig(&result);
-
- // ensure forced reconfig results in a random larger version
- ASSERT_GREATER_THAN(result.obj()["config"].Obj()["version"].numberInt(), 3);
- }
-
-} // anonymous namespace
-} // namespace repl
-} // namespace mongo
+using executor::NetworkInterfaceMock;
+typedef ReplicationCoordinator::ReplSetReconfigArgs ReplSetReconfigArgs;
+
+TEST_F(ReplCoordTest, ReconfigBeforeInitialized) {
+ // start up but do not initiate
+ OperationContextNoop txn;
+ init();
+ start();
+ BSONObjBuilder result;
+ ReplSetReconfigArgs args;
+
+ ASSERT_EQUALS(ErrorCodes::NotYetInitialized,
+ getReplCoord()->processReplSetReconfig(&txn, args, &result));
+ ASSERT_TRUE(result.obj().isEmpty());
+}
+
+TEST_F(ReplCoordTest, ReconfigWhileNotPrimary) {
+ // start up, become secondary, receive reconfig
+ OperationContextNoop txn;
+ init();
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+
+ BSONObjBuilder result;
+ ReplSetReconfigArgs args;
+ args.force = false;
+ ASSERT_EQUALS(ErrorCodes::NotMaster,
+ getReplCoord()->processReplSetReconfig(&txn, args, &result));
+ ASSERT_TRUE(result.obj().isEmpty());
+}
+
+TEST_F(ReplCoordTest, ReconfigWithUninitializableConfig) {
+ // start up, become primary, receive uninitializable config
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+ simulateSuccessfulElection();
+
+ BSONObjBuilder result;
+ ReplSetReconfigArgs args;
+ args.force = false;
+ args.newConfigObj = BSON("_id"
+ << "mySet"
+ << "version" << 2 << "invalidlyNamedField" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"
+ << "arbiterOnly" << true)));
+ // ErrorCodes::BadValue should be propagated from ReplicaSetConfig::initialize()
+ ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
+ getReplCoord()->processReplSetReconfig(&txn, args, &result));
+ ASSERT_TRUE(result.obj().isEmpty());
+}
+
+TEST_F(ReplCoordTest, ReconfigWithWrongReplSetName) {
+ // start up, become primary, receive config with incorrect replset name
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+ simulateSuccessfulElection();
+
+ BSONObjBuilder result;
+ ReplSetReconfigArgs args;
+ args.force = false;
+ args.newConfigObj = BSON("_id"
+ << "notMySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")));
+
+ ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
+ getReplCoord()->processReplSetReconfig(&txn, args, &result));
+ ASSERT_TRUE(result.obj().isEmpty());
+}
+
+TEST_F(ReplCoordTest, ReconfigValidateFails) {
+ // start up, become primary, validate fails
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+ simulateSuccessfulElection();
+
+ BSONObjBuilder result;
+ ReplSetReconfigArgs args;
+ args.force = false;
+ args.newConfigObj = BSON("_id"
+ << "mySet"
+ << "version" << -3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")));
+
+ ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ getReplCoord()->processReplSetReconfig(&txn, args, &result));
+ ASSERT_TRUE(result.obj().isEmpty());
+}
+
+void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) {
+ OperationContextNoop txn;
+ BSONObjBuilder garbage;
+ *status =
+ replCoord->processReplSetInitiate(&txn,
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ &garbage);
+}
+
+void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
+ OperationContextNoop txn;
+ BSONObjBuilder garbage;
+ ReplSetReconfigArgs args;
+ args.force = false;
+ args.newConfigObj = BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"
+ << "priority" << 3)));
+ *status = replCoord->processReplSetReconfig(&txn, args, &garbage);
+}
+
+TEST_F(ReplCoordTest, ReconfigQuorumCheckFails) {
+ // start up, become primary, fail during quorum check due to a heartbeat
+ // containing a higher config version
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+ simulateSuccessfulElection();
+
+ Status status(ErrorCodes::InternalError, "Not Set");
+ stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
+
+ NetworkInterfaceMock* net = getNet();
+ getNet()->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ repl::ReplSetHeartbeatArgs hbArgs;
+ ASSERT_OK(hbArgs.initialize(request.cmdObj));
+ repl::ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName("mySet");
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(5);
+ BSONObjBuilder respObj;
+ respObj << "ok" << 1;
+ hbResp.addToBSON(&respObj, false);
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
+ net->runReadyNetworkOperations();
+ getNet()->exitNetwork();
+ reconfigThread.join();
+ ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
+}
+
+TEST_F(ReplCoordTest, ReconfigStoreLocalConfigDocumentFails) {
+ // start up, become primary, saving the config fails
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+ simulateSuccessfulElection();
+
+ Status status(ErrorCodes::InternalError, "Not Set");
+ getExternalState()->setStoreLocalConfigDocumentStatus(
+ Status(ErrorCodes::OutOfDiskSpace, "The test set this"));
+ stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
+
+ NetworkInterfaceMock* net = getNet();
+ getNet()->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ repl::ReplSetHeartbeatArgs hbArgs;
+ ASSERT_OK(hbArgs.initialize(request.cmdObj));
+ repl::ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName("mySet");
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(2);
+ BSONObjBuilder respObj;
+ respObj << "ok" << 1;
+ hbResp.addToBSON(&respObj, false);
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
+ net->runReadyNetworkOperations();
+ getNet()->exitNetwork();
+ reconfigThread.join();
+ ASSERT_EQUALS(ErrorCodes::OutOfDiskSpace, status);
+}
+
+TEST_F(ReplCoordTest, ReconfigWhileReconfiggingFails) {
+ // start up, become primary, reconfig, then before that reconfig concludes, reconfig again
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+ simulateSuccessfulElection();
+
+ Status status(ErrorCodes::InternalError, "Not Set");
+ // first reconfig
+ stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
+ getNet()->enterNetwork();
+ getNet()->blackHole(getNet()->getNextReadyRequest());
+ getNet()->exitNetwork();
+
+ // second reconfig
+ BSONObjBuilder result;
+ ReplSetReconfigArgs args;
+ args.force = false;
+ args.newConfigObj = BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")));
+
+ ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
+ getReplCoord()->processReplSetReconfig(&txn, args, &result));
+ ASSERT_TRUE(result.obj().isEmpty());
+
+ shutdown();
+ reconfigThread.join();
+}
+
+TEST_F(ReplCoordTest, ReconfigWhileInitializingFails) {
+ // start up, initiate, then before that initiate concludes, reconfig
+ OperationContextNoop txn;
+ init();
+ start(HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+
+ // initiate
+ Status status(ErrorCodes::InternalError, "Not Set");
+ stdx::thread initateThread(stdx::bind(doReplSetInitiate, getReplCoord(), &status));
+ getNet()->enterNetwork();
+ getNet()->blackHole(getNet()->getNextReadyRequest());
+ getNet()->exitNetwork();
+
+ // reconfig
+ BSONObjBuilder result;
+ ReplSetReconfigArgs args;
+ args.force = false;
+ args.newConfigObj = BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")));
+
+ ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
+ getReplCoord()->processReplSetReconfig(&txn, args, &result));
+ ASSERT_TRUE(result.obj().isEmpty());
+
+ shutdown();
+ initateThread.join();
+}
+
+TEST_F(ReplCoordTest, ReconfigSuccessful) {
+ // start up, become primary, reconfig successfully
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+ simulateSuccessfulElection();
+
+ Status status(ErrorCodes::InternalError, "Not Set");
+ stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
+
+ NetworkInterfaceMock* net = getNet();
+ getNet()->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ repl::ReplSetHeartbeatArgs hbArgs;
+ ASSERT_OK(hbArgs.initialize(request.cmdObj));
+ repl::ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName("mySet");
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(2);
+ BSONObjBuilder respObj;
+ respObj << "ok" << 1;
+ hbResp.addToBSON(&respObj, false);
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
+ net->runReadyNetworkOperations();
+ getNet()->exitNetwork();
+ reconfigThread.join();
+ ASSERT_OK(status);
+}
+
+TEST_F(ReplCoordTest, ReconfigDuringHBReconfigFails) {
+ // start up, become primary, receive reconfig via heartbeat, then a second one
+ // from reconfig
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+ simulateSuccessfulElection();
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+
+ // set hbreconfig to hang while in progress
+ getExternalState()->setStoreLocalConfigDocumentToHang(true);
+
+ // hb reconfig
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ ReplSetHeartbeatResponse hbResp2;
+ ReplicaSetConfig config;
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))));
+ hbResp2.setConfig(config);
+ hbResp2.setConfigVersion(3);
+ hbResp2.setSetName("mySet");
+ hbResp2.setState(MemberState::RS_SECONDARY);
+ BSONObjBuilder respObj2;
+ respObj2 << "ok" << 1;
+ hbResp2.addToBSON(&respObj2, false);
+ net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
+ const NetworkInterfaceMock::NetworkOperationIterator noi2 = net->getNextReadyRequest();
+ net->scheduleResponse(noi2, net->now(), makeResponseStatus(respObj2.obj()));
+ net->runReadyNetworkOperations();
+ getNet()->exitNetwork();
+
+ // reconfig
+ BSONObjBuilder result;
+ ReplSetReconfigArgs args;
+ args.force = false;
+ args.newConfigObj = config.toBSON();
+ ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress,
+ getReplCoord()->processReplSetReconfig(&txn, args, &result));
+
+ getExternalState()->setStoreLocalConfigDocumentToHang(false);
+}
+
+TEST_F(ReplCoordTest, HBReconfigDuringReconfigFails) {
+ // start up, become primary, reconfig, while reconfigging receive reconfig via heartbeat
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+ simulateSuccessfulElection();
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+
+ // start reconfigThread
+ Status status(ErrorCodes::InternalError, "Not Set");
+ stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
+
+ // wait for reconfigThread to create network requests to ensure the replication coordinator
+ // is in state kConfigReconfiguring
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ net->blackHole(net->getNextReadyRequest());
+
+ // schedule hb reconfig
+ net->runUntil(net->now() + Seconds(10)); // run until we've sent a heartbeat request
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ ReplSetHeartbeatResponse hbResp;
+ ReplicaSetConfig config;
+ config.initialize(BSON("_id"
+ << "mySet"
+ << "version" << 4 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))));
+ hbResp.setConfig(config);
+ hbResp.setConfigVersion(4);
+ hbResp.setSetName("mySet");
+ hbResp.setState(MemberState::RS_SECONDARY);
+ BSONObjBuilder respObj2;
+ respObj2 << "ok" << 1;
+ hbResp.addToBSON(&respObj2, false);
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj2.obj()));
+
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(1));
+ startCapturingLogMessages();
+ // execute hb reconfig, which should fail with a log message; confirmed at end of test
+ net->runReadyNetworkOperations();
+ // respond to reconfig's quorum check so that we can join that thread and exit cleanly
+ net->exitNetwork();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(
+ 1, countLogLinesContaining("because already in the midst of a configuration process"));
+ shutdown();
+ reconfigThread.join();
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Log());
+}
+
+TEST_F(ReplCoordTest, ForceReconfigWhileNotPrimarySuccessful) {
+ // start up, become a secondary, receive a forced reconfig
+ OperationContextNoop txn;
+ init();
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 0), 0));
+
+ // fail before forced
+ BSONObjBuilder result;
+ ReplSetReconfigArgs args;
+ args.force = false;
+ args.newConfigObj = BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")));
+ ASSERT_EQUALS(ErrorCodes::NotMaster,
+ getReplCoord()->processReplSetReconfig(&txn, args, &result));
+
+ // forced should succeed
+ args.force = true;
+ ASSERT_OK(getReplCoord()->processReplSetReconfig(&txn, args, &result));
+ getReplCoord()->processReplSetGetConfig(&result);
+
+ // ensure forced reconfig results in a random larger version
+ ASSERT_GREATER_THAN(result.obj()["config"].Obj()["version"].numberInt(), 3);
+}
+
+} // anonymous namespace
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index b625f45fe07..452c07519e1 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -46,7 +46,7 @@
#include "mongo/db/repl/repl_set_heartbeat_args.h"
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replica_set_config.h"
-#include "mongo/db/repl/replication_coordinator.h" // ReplSetReconfigArgs
+#include "mongo/db/repl/replication_coordinator.h" // ReplSetReconfigArgs
#include "mongo/db/repl/replication_coordinator_external_state_mock.h"
#include "mongo/db/repl/replication_coordinator_impl.h"
#include "mongo/db/repl/replication_coordinator_test_fixture.h"
@@ -67,2039 +67,2141 @@ namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
- typedef ReplicationCoordinator::ReplSetReconfigArgs ReplSetReconfigArgs;
- Status kInterruptedStatus(ErrorCodes::Interrupted, "operation was interrupted");
+using executor::NetworkInterfaceMock;
+typedef ReplicationCoordinator::ReplSetReconfigArgs ReplSetReconfigArgs;
+Status kInterruptedStatus(ErrorCodes::Interrupted, "operation was interrupted");
+
+// Helper class to wrap Timestamp as an OpTime with term 0.
+struct OpTimeWithTermZero {
+ OpTimeWithTermZero(unsigned int sec, unsigned int i) : timestamp(sec, i) {}
+ operator OpTime() const {
+ return OpTime(timestamp, 0);
+ }
+
+ Timestamp timestamp;
+};
+
+TEST_F(ReplCoordTest, StartupWithValidLocalConfig) {
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345"))),
+ HostAndPort("node1", 12345));
+}
+
+TEST_F(ReplCoordTest, StartupWithConfigMissingSelf) {
+ startCapturingLogMessages();
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:54321"))),
+ HostAndPort("node3", 12345));
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("NodeNotFound"));
+}
+
+TEST_F(ReplCoordTest, StartupWithLocalConfigSetNameMismatch) {
+ init("mySet");
+ startCapturingLogMessages();
+ assertStartSuccess(BSON("_id"
+ << "notMySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345"))),
+ HostAndPort("node1", 12345));
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("reports set name of notMySet,"));
+}
+
+TEST_F(ReplCoordTest, StartupWithNoLocalConfig) {
+ startCapturingLogMessages();
+ start();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(2, countLogLinesContaining("Did not find local "));
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+}
+
+TEST_F(ReplCoordTest, InitiateFailsWithEmptyConfig) {
+ OperationContextNoop txn;
+ init("mySet");
+ start(HostAndPort("node1", 12345));
+ BSONObjBuilder result;
+ ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
+ getReplCoord()->processReplSetInitiate(&txn, BSONObj(), &result));
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+}
+
+TEST_F(ReplCoordTest, InitiateSucceedsWithOneNodeConfig) {
+ OperationContextNoop txn;
+ init("mySet");
+ start(HostAndPort("node1", 12345));
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+
+ // Starting uninitialized, show that we can perform the initiate behavior.
+ BSONObjBuilder result1;
+ ASSERT_OK(
+ getReplCoord()->processReplSetInitiate(&txn,
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345"))),
+ &result1));
+ ASSERT_EQUALS(ReplicationCoordinator::modeReplSet, getReplCoord()->getReplicationMode());
+
+ // Show that initiate fails after it has already succeeded.
+ BSONObjBuilder result2;
+ ASSERT_EQUALS(
+ ErrorCodes::AlreadyInitialized,
+ getReplCoord()->processReplSetInitiate(&txn,
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345"))),
+ &result2));
+
+ // Still in repl set mode, even after failed reinitiate.
+ ASSERT_EQUALS(ReplicationCoordinator::modeReplSet, getReplCoord()->getReplicationMode());
+}
+
+TEST_F(ReplCoordTest, InitiateSucceedsAfterFailing) {
+ OperationContextNoop txn;
+ init("mySet");
+ start(HostAndPort("node1", 12345));
+ BSONObjBuilder result;
+ ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
+ getReplCoord()->processReplSetInitiate(&txn, BSONObj(), &result));
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+
+ // Having failed to initiate once, show that we can now initiate.
+ BSONObjBuilder result1;
+ ASSERT_OK(
+ getReplCoord()->processReplSetInitiate(&txn,
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345"))),
+ &result1));
+ ASSERT_EQUALS(ReplicationCoordinator::modeReplSet, getReplCoord()->getReplicationMode());
+}
+
+TEST_F(ReplCoordTest, InitiateFailsIfAlreadyInitialized) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345"))),
+ HostAndPort("node1", 12345));
+ BSONObjBuilder result;
+ ASSERT_EQUALS(
+ ErrorCodes::AlreadyInitialized,
+ getReplCoord()->processReplSetInitiate(&txn,
+ BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345"))),
+ &result));
+}
+
+TEST_F(ReplCoordTest, InitiateFailsIfSelfMissing) {
+ OperationContextNoop txn;
+ BSONObjBuilder result;
+ init("mySet");
+ start(HostAndPort("node1", 12345));
+ ASSERT_EQUALS(
+ ErrorCodes::InvalidReplicaSetConfig,
+ getReplCoord()->processReplSetInitiate(&txn,
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node4"))),
+ &result));
+}
+
+void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) {
+ OperationContextNoop txn;
+ BSONObjBuilder garbage;
+ *status =
+ replCoord->processReplSetInitiate(&txn,
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345")
+ << BSON("_id" << 1 << "host"
+ << "node2:54321"))),
+ &garbage);
+}
+
+TEST_F(ReplCoordTest, InitiateFailsIfQuorumNotMet) {
+ init("mySet");
+ start(HostAndPort("node1", 12345));
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+
+ ReplSetHeartbeatArgs hbArgs;
+ hbArgs.setSetName("mySet");
+ hbArgs.setProtocolVersion(1);
+ hbArgs.setConfigVersion(1);
+ hbArgs.setCheckEmpty(true);
+ hbArgs.setSenderHost(HostAndPort("node1", 12345));
+ hbArgs.setSenderId(0);
+
+ Status status(ErrorCodes::InternalError, "Not set");
+ stdx::thread prsiThread(stdx::bind(doReplSetInitiate, getReplCoord(), &status));
+ const Date_t startDate = getNet()->now();
+ getNet()->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
+ ASSERT_EQUALS(HostAndPort("node2", 54321), noi->getRequest().target);
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(hbArgs.toBSON(), noi->getRequest().cmdObj);
+ getNet()->scheduleResponse(
+ noi, startDate + Milliseconds(10), ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
+ getNet()->runUntil(startDate + Milliseconds(10));
+ getNet()->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), getNet()->now());
+ prsiThread.join();
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+}
+
+TEST_F(ReplCoordTest, InitiatePassesIfQuorumMet) {
+ init("mySet");
+ start(HostAndPort("node1", 12345));
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+
+ ReplSetHeartbeatArgs hbArgs;
+ hbArgs.setSetName("mySet");
+ hbArgs.setProtocolVersion(1);
+ hbArgs.setConfigVersion(1);
+ hbArgs.setCheckEmpty(true);
+ hbArgs.setSenderHost(HostAndPort("node1", 12345));
+ hbArgs.setSenderId(0);
+
+ Status status(ErrorCodes::InternalError, "Not set");
+ stdx::thread prsiThread(stdx::bind(doReplSetInitiate, getReplCoord(), &status));
+ const Date_t startDate = getNet()->now();
+ getNet()->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
+ ASSERT_EQUALS(HostAndPort("node2", 54321), noi->getRequest().target);
+ ASSERT_EQUALS("admin", noi->getRequest().dbname);
+ ASSERT_EQUALS(hbArgs.toBSON(), noi->getRequest().cmdObj);
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setConfigVersion(0);
+ getNet()->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(hbResp.toBSON(false), Milliseconds(8))));
+ getNet()->runUntil(startDate + Milliseconds(10));
+ getNet()->exitNetwork();
+ ASSERT_EQUALS(startDate + Milliseconds(10), getNet()->now());
+ prsiThread.join();
+ ASSERT_OK(status);
+ ASSERT_EQUALS(ReplicationCoordinator::modeReplSet, getReplCoord()->getReplicationMode());
+}
+
+TEST_F(ReplCoordTest, InitiateFailsWithSetNameMismatch) {
+ OperationContextNoop txn;
+ init("mySet");
+ start(HostAndPort("node1", 12345));
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+
+ BSONObjBuilder result1;
+ ASSERT_EQUALS(
+ ErrorCodes::InvalidReplicaSetConfig,
+ getReplCoord()->processReplSetInitiate(&txn,
+ BSON("_id"
+ << "wrongSet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345"))),
+ &result1));
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+}
+
+TEST_F(ReplCoordTest, InitiateFailsWithoutReplSetFlag) {
+ OperationContextNoop txn;
+ init("");
+ start(HostAndPort("node1", 12345));
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+
+ BSONObjBuilder result1;
+ ASSERT_EQUALS(
+ ErrorCodes::NoReplicationEnabled,
+ getReplCoord()->processReplSetInitiate(&txn,
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345"))),
+ &result1));
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+}
+
+TEST_F(ReplCoordTest, InitiateFailsWhileStoringLocalConfigDocument) {
+ OperationContextNoop txn;
+ init("mySet");
+ start(HostAndPort("node1", 12345));
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+
+ BSONObjBuilder result1;
+ getExternalState()->setStoreLocalConfigDocumentStatus(
+ Status(ErrorCodes::OutOfDiskSpace, "The test set this"));
+ ASSERT_EQUALS(
+ ErrorCodes::OutOfDiskSpace,
+ getReplCoord()->processReplSetInitiate(&txn,
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345"))),
+ &result1));
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+}
+
+TEST_F(ReplCoordTest, CheckReplEnabledForCommandNotRepl) {
+ // pass in settings to avoid having a replSet
+ ReplSettings settings;
+ init(settings);
+ start();
+
+ // check status NoReplicationEnabled and empty result
+ BSONObjBuilder result;
+ Status status = getReplCoord()->checkReplEnabledForCommand(&result);
+ ASSERT_EQUALS(status, ErrorCodes::NoReplicationEnabled);
+ ASSERT_TRUE(result.obj().isEmpty());
+}
+
+TEST_F(ReplCoordTest, checkReplEnabledForCommandConfigSvr) {
+ ReplSettings settings;
+ serverGlobalParams.configsvr = true;
+ init(settings);
+ start();
+
+ // check status NoReplicationEnabled and result mentions configsrv
+ BSONObjBuilder result;
+ Status status = getReplCoord()->checkReplEnabledForCommand(&result);
+ ASSERT_EQUALS(status, ErrorCodes::NoReplicationEnabled);
+ ASSERT_EQUALS(result.obj()["info"].String(), "configsvr");
+ serverGlobalParams.configsvr = false;
+}
+
+TEST_F(ReplCoordTest, checkReplEnabledForCommandNoConfig) {
+ start();
+
+ // check status NotYetInitialized and result mentions rs.initiate
+ BSONObjBuilder result;
+ Status status = getReplCoord()->checkReplEnabledForCommand(&result);
+ ASSERT_EQUALS(status, ErrorCodes::NotYetInitialized);
+ ASSERT_TRUE(result.obj()["info"].String().find("rs.initiate") != std::string::npos);
+}
+
+TEST_F(ReplCoordTest, checkReplEnabledForCommandWorking) {
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0))),
+ HostAndPort("node1", 12345));
+
+ // check status OK and result is empty
+ BSONObjBuilder result;
+ Status status = getReplCoord()->checkReplEnabledForCommand(&result);
+ ASSERT_EQUALS(status, Status::OK());
+ ASSERT_TRUE(result.obj().isEmpty());
+}
+
+TEST_F(ReplCoordTest, BasicRBIDUsage) {
+ start();
+ BSONObjBuilder result;
+ getReplCoord()->processReplSetGetRBID(&result);
+ long long initialValue = result.obj()["rbid"].Int();
+ getReplCoord()->incrementRollbackID();
+
+ BSONObjBuilder result2;
+ getReplCoord()->processReplSetGetRBID(&result2);
+ long long incrementedValue = result2.obj()["rbid"].Int();
+ ASSERT_EQUALS(incrementedValue, initialValue + 1);
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationNoReplEnabled) {
+ init("");
+ OperationContextNoop txn;
+ OpTimeWithTermZero time(100, 1);
+
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
+ writeConcern.wNumNodes = 2;
+
+ // Because we didn't set ReplSettings.replSet, it will think we're a standalone so
+ // awaitReplication will always work.
+ ReplicationCoordinator::StatusAndDuration statusAndDur =
+ getReplCoord()->awaitReplication(&txn, time, writeConcern);
+ ASSERT_OK(statusAndDur.status);
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationMasterSlaveMajorityBaseCase) {
+ ReplSettings settings;
+ settings.master = true;
+ init(settings);
+ OperationContextNoop txn;
+ OpTimeWithTermZero time(100, 1);
+
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
+ writeConcern.wNumNodes = 2;
+
+
+ writeConcern.wNumNodes = 0;
+ writeConcern.wMode = WriteConcernOptions::kMajority;
+ // w:majority always works on master/slave
+ ReplicationCoordinator::StatusAndDuration statusAndDur =
+ getReplCoord()->awaitReplication(&txn, time, writeConcern);
+ ASSERT_OK(statusAndDur.status);
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationReplSetBaseCases) {
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2))),
+ HostAndPort("node1", 12345));
+
+ OperationContextNoop txn;
+ OpTimeWithTermZero time(100, 1);
+
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
+ writeConcern.wNumNodes = 0; // Waiting for 0 nodes always works
+ writeConcern.wMode = "";
+
+ // Should fail when not primary
+ ReplicationCoordinator::StatusAndDuration statusAndDur =
+ getReplCoord()->awaitReplication(&txn, time, writeConcern);
+ ASSERT_EQUALS(ErrorCodes::NotMaster, statusAndDur.status);
+
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
+ simulateSuccessfulElection();
+
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time, writeConcern);
+ ASSERT_OK(statusAndDur.status);
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationNumberOfNodesNonBlocking) {
+ OperationContextNoop txn;
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2) << BSON("host"
+ << "node4:12345"
+ << "_id" << 3))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
+ simulateSuccessfulElection();
+
+ OpTimeWithTermZero time1(100, 1);
+ OpTimeWithTermZero time2(100, 2);
+
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
+ writeConcern.wNumNodes = 1;
+
+ // 1 node waiting for time 1
+ ReplicationCoordinator::StatusAndDuration statusAndDur =
+ getReplCoord()->awaitReplication(&txn, time1, writeConcern);
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+ getReplCoord()->setMyLastOptime(time1);
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time1, writeConcern);
+ ASSERT_OK(statusAndDur.status);
+
+ // 2 nodes waiting for time1
+ writeConcern.wNumNodes = 2;
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time1, writeConcern);
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time1, writeConcern);
+ ASSERT_OK(statusAndDur.status);
+
+ // 2 nodes waiting for time2
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time2, writeConcern);
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+ getReplCoord()->setMyLastOptime(time2);
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time2, writeConcern);
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 3, time2));
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time2, writeConcern);
+ ASSERT_OK(statusAndDur.status);
+
+ // 3 nodes waiting for time2
+ writeConcern.wNumNodes = 3;
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time2, writeConcern);
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time2));
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time2, writeConcern);
+ ASSERT_OK(statusAndDur.status);
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationNamedModesNonBlocking) {
+ OperationContextNoop txn;
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node0"
+ << "tags" << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA1"))
+ << BSON("_id" << 1 << "host"
+ << "node1"
+ << "tags" << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA2"))
+ << BSON("_id" << 2 << "host"
+ << "node2"
+ << "tags" << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA3"))
+ << BSON("_id" << 3 << "host"
+ << "node3"
+ << "tags" << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU1"))
+ << BSON("_id" << 4 << "host"
+ << "node4"
+ << "tags" << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU2"))) << "settings"
+ << BSON("getLastErrorModes" << BSON("multiDC" << BSON("dc" << 2) << "multiDCAndRack"
+ << BSON("dc" << 2 << "rack" << 3)))),
+ HostAndPort("node0"));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
+ simulateSuccessfulElection();
+
+ OpTimeWithTermZero time1(100, 1);
+ OpTimeWithTermZero time2(100, 2);
+
+ // Test invalid write concern
+ WriteConcernOptions invalidWriteConcern;
+ invalidWriteConcern.wTimeout = WriteConcernOptions::kNoWaiting;
+ invalidWriteConcern.wMode = "fakemode";
+
+ ReplicationCoordinator::StatusAndDuration statusAndDur =
+ getReplCoord()->awaitReplication(&txn, time1, invalidWriteConcern);
+ ASSERT_EQUALS(ErrorCodes::UnknownReplWriteConcern, statusAndDur.status);
+
+
+ // Set up valid write concerns for the rest of the test
+ WriteConcernOptions majorityWriteConcern;
+ majorityWriteConcern.wTimeout = WriteConcernOptions::kNoWaiting;
+ majorityWriteConcern.wMode = WriteConcernOptions::kMajority;
+
+ WriteConcernOptions multiDCWriteConcern;
+ multiDCWriteConcern.wTimeout = WriteConcernOptions::kNoWaiting;
+ multiDCWriteConcern.wMode = "multiDC";
+
+ WriteConcernOptions multiRackWriteConcern;
+ multiRackWriteConcern.wTimeout = WriteConcernOptions::kNoWaiting;
+ multiRackWriteConcern.wMode = "multiDCAndRack";
+
+
+ // Nothing satisfied
+ getReplCoord()->setMyLastOptime(time1);
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time1, majorityWriteConcern);
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time1, multiDCWriteConcern);
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time1, multiRackWriteConcern);
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+
+ // Majority satisfied but not either custom mode
+ getReplCoord()->setLastOptime_forTest(2, 1, time1);
+ getReplCoord()->setLastOptime_forTest(2, 2, time1);
+
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time1, majorityWriteConcern);
+ ASSERT_OK(statusAndDur.status);
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time1, multiDCWriteConcern);
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time1, multiRackWriteConcern);
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+
+ // All modes satisfied
+ getReplCoord()->setLastOptime_forTest(2, 3, time1);
+
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time1, majorityWriteConcern);
+ ASSERT_OK(statusAndDur.status);
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time1, multiDCWriteConcern);
+ ASSERT_OK(statusAndDur.status);
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time1, multiRackWriteConcern);
+ ASSERT_OK(statusAndDur.status);
+
+ // multiDC satisfied but not majority or multiRack
+ getReplCoord()->setMyLastOptime(time2);
+ getReplCoord()->setLastOptime_forTest(2, 3, time2);
+
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time2, majorityWriteConcern);
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time2, multiDCWriteConcern);
+ ASSERT_OK(statusAndDur.status);
+ statusAndDur = getReplCoord()->awaitReplication(&txn, time2, multiRackWriteConcern);
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+}
- // Helper class to wrap Timestamp as an OpTime with term 0.
- struct OpTimeWithTermZero {
- OpTimeWithTermZero(unsigned int sec, unsigned int i) : timestamp(sec, i) { }
- operator OpTime() const { return OpTime(timestamp, 0); }
-
- Timestamp timestamp;
- };
-
- TEST_F(ReplCoordTest, StartupWithValidLocalConfig) {
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345"))),
- HostAndPort("node1", 12345));
- }
-
- TEST_F(ReplCoordTest, StartupWithConfigMissingSelf) {
- startCapturingLogMessages();
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node2:54321"))),
- HostAndPort("node3", 12345));
- stopCapturingLogMessages();
- ASSERT_EQUALS(1, countLogLinesContaining("NodeNotFound"));
- }
-
- TEST_F(ReplCoordTest, StartupWithLocalConfigSetNameMismatch) {
- init("mySet");
- startCapturingLogMessages();
- assertStartSuccess(
- BSON("_id" << "notMySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345"))),
- HostAndPort("node1", 12345));
- stopCapturingLogMessages();
- ASSERT_EQUALS(1, countLogLinesContaining("reports set name of notMySet,"));
- }
-
- TEST_F(ReplCoordTest, StartupWithNoLocalConfig) {
- startCapturingLogMessages();
- start();
- stopCapturingLogMessages();
- ASSERT_EQUALS(2, countLogLinesContaining("Did not find local "));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
- }
-
- TEST_F(ReplCoordTest, InitiateFailsWithEmptyConfig) {
- OperationContextNoop txn;
- init("mySet");
- start(HostAndPort("node1", 12345));
- BSONObjBuilder result;
- ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetInitiate(&txn, BSONObj(), &result));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
- }
-
- TEST_F(ReplCoordTest, InitiateSucceedsWithOneNodeConfig) {
- OperationContextNoop txn;
- init("mySet");
- start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
-
- // Starting uninitialized, show that we can perform the initiate behavior.
- BSONObjBuilder result1;
- ASSERT_OK(getReplCoord()->processReplSetInitiate(
- &txn,
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "node1:12345"))),
- &result1));
- ASSERT_EQUALS(ReplicationCoordinator::modeReplSet, getReplCoord()->getReplicationMode());
-
- // Show that initiate fails after it has already succeeded.
- BSONObjBuilder result2;
- ASSERT_EQUALS(ErrorCodes::AlreadyInitialized,
- getReplCoord()->processReplSetInitiate(
- &txn,
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "node1:12345"))),
- &result2));
-
- // Still in repl set mode, even after failed reinitiate.
- ASSERT_EQUALS(ReplicationCoordinator::modeReplSet, getReplCoord()->getReplicationMode());
- }
-
- TEST_F(ReplCoordTest, InitiateSucceedsAfterFailing) {
- OperationContextNoop txn;
- init("mySet");
- start(HostAndPort("node1", 12345));
- BSONObjBuilder result;
- ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetInitiate(&txn, BSONObj(), &result));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
-
- // Having failed to initiate once, show that we can now initiate.
- BSONObjBuilder result1;
- ASSERT_OK(getReplCoord()->processReplSetInitiate(
- &txn,
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "node1:12345"))),
- &result1));
- ASSERT_EQUALS(ReplicationCoordinator::modeReplSet, getReplCoord()->getReplicationMode());
- }
-
- TEST_F(ReplCoordTest, InitiateFailsIfAlreadyInitialized) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 << "host" << "node1:12345"))),
- HostAndPort("node1", 12345));
- BSONObjBuilder result;
- ASSERT_EQUALS(ErrorCodes::AlreadyInitialized,
- getReplCoord()->processReplSetInitiate(
- &txn,
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 1 <<
- "host" << "node1:12345"))),
- &result));
- }
-
- TEST_F(ReplCoordTest, InitiateFailsIfSelfMissing) {
- OperationContextNoop txn;
- BSONObjBuilder result;
- init("mySet");
- start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetInitiate(
- &txn,
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "node4"))),
- &result));
- }
-
- void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) {
- OperationContextNoop txn;
- BSONObjBuilder garbage;
- *status = replCoord->processReplSetInitiate(
- &txn,
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "node1:12345") <<
- BSON("_id" << 1 << "host" << "node2:54321"))),
- &garbage);
- }
-
- TEST_F(ReplCoordTest, InitiateFailsIfQuorumNotMet) {
- init("mySet");
- start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
-
- ReplSetHeartbeatArgs hbArgs;
- hbArgs.setSetName("mySet");
- hbArgs.setProtocolVersion(1);
- hbArgs.setConfigVersion(1);
- hbArgs.setCheckEmpty(true);
- hbArgs.setSenderHost(HostAndPort("node1", 12345));
- hbArgs.setSenderId(0);
-
- Status status(ErrorCodes::InternalError, "Not set");
- stdx::thread prsiThread(stdx::bind(doReplSetInitiate, getReplCoord(), &status));
- const Date_t startDate = getNet()->now();
- getNet()->enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
- ASSERT_EQUALS(HostAndPort("node2", 54321), noi->getRequest().target);
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(hbArgs.toBSON(), noi->getRequest().cmdObj);
- getNet()->scheduleResponse(noi, startDate + Milliseconds(10),
- ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
- getNet()->runUntil(startDate + Milliseconds(10));
- getNet()->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), getNet()->now());
- prsiThread.join();
- ASSERT_EQUALS(ErrorCodes::NodeNotFound, status);
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
- }
-
- TEST_F(ReplCoordTest, InitiatePassesIfQuorumMet) {
- init("mySet");
- start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
-
- ReplSetHeartbeatArgs hbArgs;
- hbArgs.setSetName("mySet");
- hbArgs.setProtocolVersion(1);
- hbArgs.setConfigVersion(1);
- hbArgs.setCheckEmpty(true);
- hbArgs.setSenderHost(HostAndPort("node1", 12345));
- hbArgs.setSenderId(0);
-
- Status status(ErrorCodes::InternalError, "Not set");
- stdx::thread prsiThread(stdx::bind(doReplSetInitiate, getReplCoord(), &status));
- const Date_t startDate = getNet()->now();
- getNet()->enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
- ASSERT_EQUALS(HostAndPort("node2", 54321), noi->getRequest().target);
- ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(hbArgs.toBSON(), noi->getRequest().cmdObj);
- ReplSetHeartbeatResponse hbResp;
- hbResp.setConfigVersion(0);
- getNet()->scheduleResponse(
- noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(hbResp.toBSON(false), Milliseconds(8))));
- getNet()->runUntil(startDate + Milliseconds(10));
- getNet()->exitNetwork();
- ASSERT_EQUALS(startDate + Milliseconds(10), getNet()->now());
- prsiThread.join();
- ASSERT_OK(status);
- ASSERT_EQUALS(ReplicationCoordinator::modeReplSet, getReplCoord()->getReplicationMode());
- }
-
- TEST_F(ReplCoordTest, InitiateFailsWithSetNameMismatch) {
- OperationContextNoop txn;
- init("mySet");
- start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
-
- BSONObjBuilder result1;
- ASSERT_EQUALS(
- ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetInitiate(
- &txn,
- BSON("_id" << "wrongSet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "node1:12345"))),
- &result1));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
- }
-
- TEST_F(ReplCoordTest, InitiateFailsWithoutReplSetFlag) {
- OperationContextNoop txn;
- init("");
- start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
-
- BSONObjBuilder result1;
- ASSERT_EQUALS(
- ErrorCodes::NoReplicationEnabled,
- getReplCoord()->processReplSetInitiate(
- &txn,
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "node1:12345"))),
- &result1));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
- }
-
- TEST_F(ReplCoordTest, InitiateFailsWhileStoringLocalConfigDocument) {
- OperationContextNoop txn;
- init("mySet");
- start(HostAndPort("node1", 12345));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
-
- BSONObjBuilder result1;
- getExternalState()->setStoreLocalConfigDocumentStatus(Status(ErrorCodes::OutOfDiskSpace,
- "The test set this"));
- ASSERT_EQUALS(
- ErrorCodes::OutOfDiskSpace,
- getReplCoord()->processReplSetInitiate(
- &txn,
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "node1:12345"))),
- &result1));
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
- }
-
- TEST_F(ReplCoordTest, CheckReplEnabledForCommandNotRepl) {
- // pass in settings to avoid having a replSet
- ReplSettings settings;
- init(settings);
- start();
-
- // check status NoReplicationEnabled and empty result
- BSONObjBuilder result;
- Status status = getReplCoord()->checkReplEnabledForCommand(&result);
- ASSERT_EQUALS(status, ErrorCodes::NoReplicationEnabled);
- ASSERT_TRUE(result.obj().isEmpty());
- }
-
- TEST_F(ReplCoordTest, checkReplEnabledForCommandConfigSvr) {
- ReplSettings settings;
- serverGlobalParams.configsvr = true;
- init(settings);
- start();
-
- // check status NoReplicationEnabled and result mentions configsrv
- BSONObjBuilder result;
- Status status = getReplCoord()->checkReplEnabledForCommand(&result);
- ASSERT_EQUALS(status, ErrorCodes::NoReplicationEnabled);
- ASSERT_EQUALS(result.obj()["info"].String(), "configsvr");
- serverGlobalParams.configsvr = false;
- }
-
- TEST_F(ReplCoordTest, checkReplEnabledForCommandNoConfig) {
- start();
-
- // check status NotYetInitialized and result mentions rs.initiate
- BSONObjBuilder result;
- Status status = getReplCoord()->checkReplEnabledForCommand(&result);
- ASSERT_EQUALS(status, ErrorCodes::NotYetInitialized);
- ASSERT_TRUE(result.obj()["info"].String().find("rs.initiate") != std::string::npos);
- }
-
- TEST_F(ReplCoordTest, checkReplEnabledForCommandWorking) {
- assertStartSuccess(BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" <<
- "_id" << 0 ))),
- HostAndPort("node1", 12345));
-
- // check status OK and result is empty
- BSONObjBuilder result;
- Status status = getReplCoord()->checkReplEnabledForCommand(&result);
- ASSERT_EQUALS(status, Status::OK());
- ASSERT_TRUE(result.obj().isEmpty());
- }
-
- TEST_F(ReplCoordTest, BasicRBIDUsage) {
- start();
- BSONObjBuilder result;
- getReplCoord()->processReplSetGetRBID(&result);
- long long initialValue = result.obj()["rbid"].Int();
- getReplCoord()->incrementRollbackID();
-
- BSONObjBuilder result2;
- getReplCoord()->processReplSetGetRBID(&result2);
- long long incrementedValue = result2.obj()["rbid"].Int();
- ASSERT_EQUALS(incrementedValue, initialValue + 1);
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationNoReplEnabled) {
- init("");
- OperationContextNoop txn;
- OpTimeWithTermZero time(100, 1);
-
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
- writeConcern.wNumNodes = 2;
-
- // Because we didn't set ReplSettings.replSet, it will think we're a standalone so
- // awaitReplication will always work.
- ReplicationCoordinator::StatusAndDuration statusAndDur =
- getReplCoord()->awaitReplication(&txn, time, writeConcern);
- ASSERT_OK(statusAndDur.status);
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationMasterSlaveMajorityBaseCase) {
- ReplSettings settings;
- settings.master = true;
- init(settings);
- OperationContextNoop txn;
- OpTimeWithTermZero time(100, 1);
-
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
- writeConcern.wNumNodes = 2;
-
-
- writeConcern.wNumNodes = 0;
- writeConcern.wMode = WriteConcernOptions::kMajority;
- // w:majority always works on master/slave
- ReplicationCoordinator::StatusAndDuration statusAndDur = getReplCoord()->awaitReplication(
- &txn, time, writeConcern);
- ASSERT_OK(statusAndDur.status);
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationReplSetBaseCases) {
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2))),
- HostAndPort("node1", 12345));
-
- OperationContextNoop txn;
- OpTimeWithTermZero time(100, 1);
-
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
- writeConcern.wNumNodes = 0; // Waiting for 0 nodes always works
- writeConcern.wMode = "";
-
- // Should fail when not primary
- ReplicationCoordinator::StatusAndDuration statusAndDur = getReplCoord()->awaitReplication(
- &txn, time, writeConcern);
- ASSERT_EQUALS(ErrorCodes::NotMaster, statusAndDur.status);
-
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
-
- statusAndDur = getReplCoord()->awaitReplication(&txn, time, writeConcern);
- ASSERT_OK(statusAndDur.status);
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationNumberOfNodesNonBlocking) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2) <<
- BSON("host" << "node4:12345" << "_id" << 3))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
-
- OpTimeWithTermZero time1(100, 1);
- OpTimeWithTermZero time2(100, 2);
-
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
- writeConcern.wNumNodes = 1;
-
- // 1 node waiting for time 1
- ReplicationCoordinator::StatusAndDuration statusAndDur =
- getReplCoord()->awaitReplication(&txn, time1, writeConcern);
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- getReplCoord()->setMyLastOptime(time1);
- statusAndDur = getReplCoord()->awaitReplication(&txn, time1, writeConcern);
- ASSERT_OK(statusAndDur.status);
-
- // 2 nodes waiting for time1
- writeConcern.wNumNodes = 2;
- statusAndDur = getReplCoord()->awaitReplication(&txn, time1, writeConcern);
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
- statusAndDur = getReplCoord()->awaitReplication(&txn, time1, writeConcern);
- ASSERT_OK(statusAndDur.status);
-
- // 2 nodes waiting for time2
- statusAndDur = getReplCoord()->awaitReplication(&txn, time2, writeConcern);
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- getReplCoord()->setMyLastOptime(time2);
- statusAndDur = getReplCoord()->awaitReplication(&txn, time2, writeConcern);
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 3, time2));
- statusAndDur = getReplCoord()->awaitReplication(&txn, time2, writeConcern);
- ASSERT_OK(statusAndDur.status);
-
- // 3 nodes waiting for time2
- writeConcern.wNumNodes = 3;
- statusAndDur = getReplCoord()->awaitReplication(&txn, time2, writeConcern);
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time2));
- statusAndDur = getReplCoord()->awaitReplication(&txn, time2, writeConcern);
- ASSERT_OK(statusAndDur.status);
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationNamedModesNonBlocking) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "node0" <<
- "tags" << BSON("dc" << "NA" <<
- "rack" << "rackNA1")) <<
- BSON("_id" << 1 <<
- "host" << "node1" <<
- "tags" << BSON("dc" << "NA" <<
- "rack" << "rackNA2")) <<
- BSON("_id" << 2 <<
- "host" << "node2" <<
- "tags" << BSON("dc" << "NA" <<
- "rack" << "rackNA3")) <<
- BSON("_id" << 3 <<
- "host" << "node3" <<
- "tags" << BSON("dc" << "EU" <<
- "rack" << "rackEU1")) <<
- BSON("_id" << 4 <<
- "host" << "node4" <<
- "tags" << BSON("dc" << "EU" <<
- "rack" << "rackEU2"))) <<
- "settings" << BSON("getLastErrorModes" <<
- BSON("multiDC" << BSON("dc" << 2) <<
- "multiDCAndRack" << BSON("dc" << 2 << "rack" << 3)))),
- HostAndPort("node0"));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
-
- OpTimeWithTermZero time1(100, 1);
- OpTimeWithTermZero time2(100, 2);
-
- // Test invalid write concern
- WriteConcernOptions invalidWriteConcern;
- invalidWriteConcern.wTimeout = WriteConcernOptions::kNoWaiting;
- invalidWriteConcern.wMode = "fakemode";
-
- ReplicationCoordinator::StatusAndDuration statusAndDur =
- getReplCoord()->awaitReplication(&txn, time1, invalidWriteConcern);
- ASSERT_EQUALS(ErrorCodes::UnknownReplWriteConcern, statusAndDur.status);
-
-
- // Set up valid write concerns for the rest of the test
- WriteConcernOptions majorityWriteConcern;
- majorityWriteConcern.wTimeout = WriteConcernOptions::kNoWaiting;
- majorityWriteConcern.wMode = WriteConcernOptions::kMajority;
-
- WriteConcernOptions multiDCWriteConcern;
- multiDCWriteConcern.wTimeout = WriteConcernOptions::kNoWaiting;
- multiDCWriteConcern.wMode = "multiDC";
-
- WriteConcernOptions multiRackWriteConcern;
- multiRackWriteConcern.wTimeout = WriteConcernOptions::kNoWaiting;
- multiRackWriteConcern.wMode = "multiDCAndRack";
-
-
- // Nothing satisfied
- getReplCoord()->setMyLastOptime(time1);
- statusAndDur = getReplCoord()->awaitReplication(&txn, time1, majorityWriteConcern);
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(&txn, time1, multiDCWriteConcern);
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(&txn, time1, multiRackWriteConcern);
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
-
- // Majority satisfied but not either custom mode
- getReplCoord()->setLastOptime_forTest(2, 1, time1);
- getReplCoord()->setLastOptime_forTest(2, 2, time1);
-
- statusAndDur = getReplCoord()->awaitReplication(&txn, time1, majorityWriteConcern);
- ASSERT_OK(statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(&txn, time1, multiDCWriteConcern);
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(&txn, time1, multiRackWriteConcern);
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
-
- // All modes satisfied
- getReplCoord()->setLastOptime_forTest(2, 3, time1);
-
- statusAndDur = getReplCoord()->awaitReplication(&txn, time1, majorityWriteConcern);
- ASSERT_OK(statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(&txn, time1, multiDCWriteConcern);
- ASSERT_OK(statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(&txn, time1, multiRackWriteConcern);
- ASSERT_OK(statusAndDur.status);
-
- // multiDC satisfied but not majority or multiRack
- getReplCoord()->setMyLastOptime(time2);
- getReplCoord()->setLastOptime_forTest(2, 3, time2);
-
- statusAndDur = getReplCoord()->awaitReplication(&txn, time2, majorityWriteConcern);
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(&txn, time2, multiDCWriteConcern);
- ASSERT_OK(statusAndDur.status);
- statusAndDur = getReplCoord()->awaitReplication(&txn, time2, multiRackWriteConcern);
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- }
-
- /**
- * Used to wait for replication in a separate thread without blocking execution of the test.
- * To use, set the optime and write concern to be passed to awaitReplication and then call
- * start(), which will spawn a thread that calls awaitReplication. No calls may be made
- * on the ReplicationAwaiter instance between calling start and getResult(). After returning
- * from getResult(), you can call reset() to allow the awaiter to be reused for another
- * awaitReplication call.
- */
- class ReplicationAwaiter {
- public:
-
- ReplicationAwaiter(ReplicationCoordinatorImpl* replCoord, OperationContext* txn) :
- _replCoord(replCoord), _finished(false),
- _result(ReplicationCoordinator::StatusAndDuration(
- Status::OK(), Milliseconds(0))) {}
-
- void setOpTime(const OpTime& ot) {
- _optime = ot;
- }
-
- void setWriteConcern(const WriteConcernOptions& wc) {
- _writeConcern = wc;
- }
-
- // may block
- ReplicationCoordinator::StatusAndDuration getResult() {
- _thread->join();
- ASSERT(_finished);
- return _result;
- }
-
- void start(OperationContext* txn) {
- ASSERT(!_finished);
- _thread.reset(new stdx::thread(stdx::bind(&ReplicationAwaiter::_awaitReplication,
- this,
- txn)));
- }
-
- void reset() {
- ASSERT(_finished);
- _finished = false;
- _result = ReplicationCoordinator::StatusAndDuration(
- Status::OK(), Milliseconds(0));
- }
-
- private:
-
- void _awaitReplication(OperationContext* txn) {
- _result = _replCoord->awaitReplication(txn, _optime, _writeConcern);
- _finished = true;
- }
-
- ReplicationCoordinatorImpl* _replCoord;
- bool _finished;
- OpTime _optime;
- WriteConcernOptions _writeConcern;
- ReplicationCoordinator::StatusAndDuration _result;
- std::unique_ptr<stdx::thread> _thread;
- };
-
- TEST_F(ReplCoordTest, AwaitReplicationNumberOfNodesBlocking) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
-
- ReplicationAwaiter awaiter(getReplCoord(), &txn);
-
- OpTimeWithTermZero time1(100, 1);
- OpTimeWithTermZero time2(100, 2);
-
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
- writeConcern.wNumNodes = 2;
-
- // 2 nodes waiting for time1
- awaiter.setOpTime(time1);
- awaiter.setWriteConcern(writeConcern);
- awaiter.start(&txn);
- getReplCoord()->setMyLastOptime(time1);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
- ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
- ASSERT_OK(statusAndDur.status);
- awaiter.reset();
-
- // 2 nodes waiting for time2
- awaiter.setOpTime(time2);
- awaiter.start(&txn);
- getReplCoord()->setMyLastOptime(time2);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time2));
- statusAndDur = awaiter.getResult();
- ASSERT_OK(statusAndDur.status);
- awaiter.reset();
-
- // 3 nodes waiting for time2
- writeConcern.wNumNodes = 3;
- awaiter.setWriteConcern(writeConcern);
- awaiter.start(&txn);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time2));
- statusAndDur = awaiter.getResult();
- ASSERT_OK(statusAndDur.status);
- awaiter.reset();
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationTimeout) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
-
- ReplicationAwaiter awaiter(getReplCoord(), &txn);
-
- OpTimeWithTermZero time1(100, 1);
- OpTimeWithTermZero time2(100, 2);
-
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = 50;
- writeConcern.wNumNodes = 2;
-
- // 2 nodes waiting for time2
- awaiter.setOpTime(time2);
- awaiter.setWriteConcern(writeConcern);
- awaiter.start(&txn);
- getReplCoord()->setMyLastOptime(time2);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
- ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
- awaiter.reset();
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationShutdown) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
-
- ReplicationAwaiter awaiter(getReplCoord(), &txn);
-
- OpTimeWithTermZero time1(100, 1);
- OpTimeWithTermZero time2(100, 2);
-
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
- writeConcern.wNumNodes = 2;
-
- // 2 nodes waiting for time2
- awaiter.setOpTime(time2);
- awaiter.setWriteConcern(writeConcern);
- awaiter.start(&txn);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time1));
- shutdown();
- ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, statusAndDur.status);
- awaiter.reset();
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationStepDown) {
- // Test that a thread blocked in awaitReplication will be woken up and return NotMaster
- // if the node steps down while it is waiting.
- OperationContextReplMock txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
-
- ReplicationAwaiter awaiter(getReplCoord(), &txn);
-
- OpTimeWithTermZero time1(100, 1);
- OpTimeWithTermZero time2(100, 2);
-
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
- writeConcern.wNumNodes = 2;
-
- // 2 nodes waiting for time2
- awaiter.setOpTime(time2);
- awaiter.setWriteConcern(writeConcern);
- awaiter.start(&txn);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time1));
- getReplCoord()->stepDown(&txn, true, Milliseconds(0), Milliseconds(1000));
- ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
- ASSERT_EQUALS(ErrorCodes::NotMaster, statusAndDur.status);
- awaiter.reset();
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationInterrupt) {
- // Tests that a thread blocked in awaitReplication can be killed by a killOp operation
- const unsigned int opID = 100;
- OperationContextReplMock txn{opID};
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << "node1") <<
- BSON("_id" << 1 << "host" << "node2") <<
- BSON("_id" << 2 << "host" << "node3"))),
- HostAndPort("node1"));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
-
- ReplicationAwaiter awaiter(getReplCoord(), &txn);
-
- OpTimeWithTermZero time1(100, 1);
- OpTimeWithTermZero time2(100, 2);
-
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
- writeConcern.wNumNodes = 2;
-
-
- // 2 nodes waiting for time2
- awaiter.setOpTime(time2);
- awaiter.setWriteConcern(writeConcern);
- awaiter.start(&txn);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time1));
-
- txn.setCheckForInterruptStatus(kInterruptedStatus);
- getReplCoord()->interrupt(opID);
- ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
- ASSERT_EQUALS(ErrorCodes::Interrupted, statusAndDur.status);
- awaiter.reset();
- }
-
- class StepDownTest : public ReplCoordTest {
- protected:
- OID myRid;
- OID rid2;
- OID rid3;
-
- private:
- virtual void setUp() {
- ReplCoordTest::setUp();
- init("mySet/test1:1234,test2:1234,test3:1234");
-
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << "test1:1234") <<
- BSON("_id" << 1 << "host" << "test2:1234") <<
- BSON("_id" << 2 << "host" << "test3:1234"))),
- HostAndPort("test1", 1234));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- myRid = getReplCoord()->getMyRID();
- }
- };
-
- TEST_F(ReplCoordTest, UpdateTerm) {
+/**
+ * Used to wait for replication in a separate thread without blocking execution of the test.
+ * To use, set the optime and write concern to be passed to awaitReplication and then call
+ * start(), which will spawn a thread that calls awaitReplication. No calls may be made
+ * on the ReplicationAwaiter instance between calling start and getResult(). After returning
+ * from getResult(), you can call reset() to allow the awaiter to be reused for another
+ * awaitReplication call.
+ */
+class ReplicationAwaiter {
+public:
+ ReplicationAwaiter(ReplicationCoordinatorImpl* replCoord, OperationContext* txn)
+ : _replCoord(replCoord),
+ _finished(false),
+ _result(ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0))) {}
+
+ void setOpTime(const OpTime& ot) {
+ _optime = ot;
+ }
+
+ void setWriteConcern(const WriteConcernOptions& wc) {
+ _writeConcern = wc;
+ }
+
+ // may block
+ ReplicationCoordinator::StatusAndDuration getResult() {
+ _thread->join();
+ ASSERT(_finished);
+ return _result;
+ }
+
+ void start(OperationContext* txn) {
+ ASSERT(!_finished);
+ _thread.reset(
+ new stdx::thread(stdx::bind(&ReplicationAwaiter::_awaitReplication, this, txn)));
+ }
+
+ void reset() {
+ ASSERT(_finished);
+ _finished = false;
+ _result = ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0));
+ }
+
+private:
+ void _awaitReplication(OperationContext* txn) {
+ _result = _replCoord->awaitReplication(txn, _optime, _writeConcern);
+ _finished = true;
+ }
+
+ ReplicationCoordinatorImpl* _replCoord;
+ bool _finished;
+ OpTime _optime;
+ WriteConcernOptions _writeConcern;
+ ReplicationCoordinator::StatusAndDuration _result;
+ std::unique_ptr<stdx::thread> _thread;
+};
+
+TEST_F(ReplCoordTest, AwaitReplicationNumberOfNodesBlocking) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
+ simulateSuccessfulElection();
+
+ ReplicationAwaiter awaiter(getReplCoord(), &txn);
+
+ OpTimeWithTermZero time1(100, 1);
+ OpTimeWithTermZero time2(100, 2);
+
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
+ writeConcern.wNumNodes = 2;
+
+ // 2 nodes waiting for time1
+ awaiter.setOpTime(time1);
+ awaiter.setWriteConcern(writeConcern);
+ awaiter.start(&txn);
+ getReplCoord()->setMyLastOptime(time1);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
+ ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
+ ASSERT_OK(statusAndDur.status);
+ awaiter.reset();
+
+ // 2 nodes waiting for time2
+ awaiter.setOpTime(time2);
+ awaiter.start(&txn);
+ getReplCoord()->setMyLastOptime(time2);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time2));
+ statusAndDur = awaiter.getResult();
+ ASSERT_OK(statusAndDur.status);
+ awaiter.reset();
+
+ // 3 nodes waiting for time2
+ writeConcern.wNumNodes = 3;
+ awaiter.setWriteConcern(writeConcern);
+ awaiter.start(&txn);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time2));
+ statusAndDur = awaiter.getResult();
+ ASSERT_OK(statusAndDur.status);
+ awaiter.reset();
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationTimeout) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
+ simulateSuccessfulElection();
+
+ ReplicationAwaiter awaiter(getReplCoord(), &txn);
+
+ OpTimeWithTermZero time1(100, 1);
+ OpTimeWithTermZero time2(100, 2);
+
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = 50;
+ writeConcern.wNumNodes = 2;
+
+ // 2 nodes waiting for time2
+ awaiter.setOpTime(time2);
+ awaiter.setWriteConcern(writeConcern);
+ awaiter.start(&txn);
+ getReplCoord()->setMyLastOptime(time2);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
+ ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
+ awaiter.reset();
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationShutdown) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
+ simulateSuccessfulElection();
+
+ ReplicationAwaiter awaiter(getReplCoord(), &txn);
+
+ OpTimeWithTermZero time1(100, 1);
+ OpTimeWithTermZero time2(100, 2);
+
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
+ writeConcern.wNumNodes = 2;
+
+ // 2 nodes waiting for time2
+ awaiter.setOpTime(time2);
+ awaiter.setWriteConcern(writeConcern);
+ awaiter.start(&txn);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time1));
+ shutdown();
+ ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, statusAndDur.status);
+ awaiter.reset();
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationStepDown) {
+ // Test that a thread blocked in awaitReplication will be woken up and return NotMaster
+ // if the node steps down while it is waiting.
+ OperationContextReplMock txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
+ simulateSuccessfulElection();
+
+ ReplicationAwaiter awaiter(getReplCoord(), &txn);
+
+ OpTimeWithTermZero time1(100, 1);
+ OpTimeWithTermZero time2(100, 2);
+
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
+ writeConcern.wNumNodes = 2;
+
+ // 2 nodes waiting for time2
+ awaiter.setOpTime(time2);
+ awaiter.setWriteConcern(writeConcern);
+ awaiter.start(&txn);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time1));
+ getReplCoord()->stepDown(&txn, true, Milliseconds(0), Milliseconds(1000));
+ ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
+ ASSERT_EQUALS(ErrorCodes::NotMaster, statusAndDur.status);
+ awaiter.reset();
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationInterrupt) {
+ // Tests that a thread blocked in awaitReplication can be killed by a killOp operation
+ const unsigned int opID = 100;
+ OperationContextReplMock txn{opID};
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1")
+ << BSON("_id" << 1 << "host"
+ << "node2") << BSON("_id" << 2 << "host"
+ << "node3"))),
+ HostAndPort("node1"));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
+ simulateSuccessfulElection();
+
+ ReplicationAwaiter awaiter(getReplCoord(), &txn);
+
+ OpTimeWithTermZero time1(100, 1);
+ OpTimeWithTermZero time2(100, 2);
+
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
+ writeConcern.wNumNodes = 2;
+
+
+ // 2 nodes waiting for time2
+ awaiter.setOpTime(time2);
+ awaiter.setWriteConcern(writeConcern);
+ awaiter.start(&txn);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time1));
+
+ txn.setCheckForInterruptStatus(kInterruptedStatus);
+ getReplCoord()->interrupt(opID);
+ ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
+ ASSERT_EQUALS(ErrorCodes::Interrupted, statusAndDur.status);
+ awaiter.reset();
+}
+
+class StepDownTest : public ReplCoordTest {
+protected:
+ OID myRid;
+ OID rid2;
+ OID rid3;
+
+private:
+ virtual void setUp() {
ReplCoordTest::setUp();
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << "test1:1234") <<
- BSON("_id" << 1 << "host" << "test2:1234") <<
- BSON("_id" << 2 << "host" << "test3:1234")) <<
- "protocolVersion" << 1),
- HostAndPort("test1", 1234));
- getReplCoord()->setMyLastOptime(OpTime(Timestamp (100, 1), 0));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test1", 1234));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
-
- simulateSuccessfulV1Election();
-
- ASSERT_EQUALS(1, getReplCoord()->getTerm());
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
-
- // lower term, no change
- getReplCoord()->updateTerm(0);
- ASSERT_EQUALS(1, getReplCoord()->getTerm());
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
-
- // same term, no change
- getReplCoord()->updateTerm(1);
- ASSERT_EQUALS(1, getReplCoord()->getTerm());
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
-
- // higher term, step down and change term
- Handle cbHandle;
- getReplCoord()->updateTerm_forTest(2);
- ASSERT_EQUALS(2, getReplCoord()->getTerm());
- ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
-
- }
-
- TEST_F(StepDownTest, StepDownNotPrimary) {
- OperationContextReplMock txn;
- OpTimeWithTermZero optime1(100, 1);
- // All nodes are caught up
- getReplCoord()->setMyLastOptime(optime1);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
-
- Status status = getReplCoord()->stepDown(&txn, false, Milliseconds(0), Milliseconds(0));
- ASSERT_EQUALS(ErrorCodes::NotMaster, status);
- ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
- }
-
- TEST_F(StepDownTest, StepDownTimeoutAcquiringGlobalLock) {
- OperationContextReplMock txn;
- OpTimeWithTermZero optime1(100, 1);
- // All nodes are caught up
- getReplCoord()->setMyLastOptime(optime1);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
-
- simulateSuccessfulElection();
-
- // Make sure stepDown cannot grab the global shared lock
- Lock::GlobalWrite lk(txn.lockState());
-
- Status status = getReplCoord()->stepDown(&txn, false, Milliseconds(0), Milliseconds(1000));
- ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
- }
-
- TEST_F(StepDownTest, StepDownNoWaiting) {
- OperationContextReplMock txn;
- OpTimeWithTermZero optime1(100, 1);
- // All nodes are caught up
- getReplCoord()->setMyLastOptime(optime1);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
-
- simulateSuccessfulElection();
-
- enterNetwork();
- getNet()->runUntil(getNet()->now() + Seconds(2));
- ASSERT(getNet()->hasReadyRequests());
- NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
- RemoteCommandRequest request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- ReplSetHeartbeatArgs hbArgs;
- if (hbArgs.initialize(request.cmdObj).isOK()) {
- ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName(hbArgs.getSetName());
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(hbArgs.getConfigVersion());
- hbResp.setOpTime(optime1);
- BSONObjBuilder respObj;
- respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
- getNet()->scheduleResponse(noi, getNet()->now(), makeResponseStatus(respObj.obj()));
- }
- while (getNet()->hasReadyRequests()) {
- getNet()->blackHole(getNet()->getNextReadyRequest());
- }
- getNet()->runReadyNetworkOperations();
- exitNetwork();
-
-
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
- ASSERT_OK(getReplCoord()->stepDown(&txn, false, Milliseconds(0), Milliseconds(1000)));
- enterNetwork(); // So we can safely inspect the topology coordinator
- ASSERT_EQUALS(getNet()->now() + Seconds(1), getTopoCoord().getStepDownTime());
- ASSERT_TRUE(getTopoCoord().getMemberState().secondary());
- exitNetwork();
- ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
- }
-
- TEST_F(ReplCoordTest, StepDownAndBackUpSingleNode) {
- init("mySet");
-
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << "test1:1234"))),
- HostAndPort("test1", 1234));
- OperationContextReplMock txn;
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
-
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
- ASSERT_OK(getReplCoord()->stepDown(&txn, true, Milliseconds(0), Milliseconds(1000)));
- getNet()->enterNetwork(); // Must do this before inspecting the topocoord
- Date_t stepdownUntil = getNet()->now() + Seconds(1);
- ASSERT_EQUALS(stepdownUntil, getTopoCoord().getStepDownTime());
- ASSERT_TRUE(getTopoCoord().getMemberState().secondary());
- ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
-
- // Now run time forward and make sure that the node becomes primary again when the stepdown
- // period ends.
- getNet()->runUntil(stepdownUntil);
- ASSERT_EQUALS(stepdownUntil, getNet()->now());
- ASSERT_TRUE(getTopoCoord().getMemberState().primary());
- getNet()->exitNetwork();
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
- }
-
- /**
- * Used to run wait for stepDown() to finish in a separate thread without blocking execution of
- * the test. To use, set the values of "force", "waitTime", and "stepDownTime", which will be
- * used as the arguments passed to stepDown, and then call
- * start(), which will spawn a thread that calls stepDown. No calls may be made
- * on the StepDownRunner instance between calling start and getResult(). After returning
- * from getResult(), you can call reset() to allow the StepDownRunner to be reused for another
- * stepDown call.
- */
- class StepDownRunner {
- public:
-
- StepDownRunner(ReplicationCoordinatorImpl* replCoord) :
- _replCoord(replCoord), _finished(false), _result(Status::OK()), _force(false),
- _waitTime(0), _stepDownTime(0) {}
-
- // may block
- Status getResult() {
- _thread->join();
- ASSERT(_finished);
- return _result;
- }
-
- void start(OperationContext* txn) {
- ASSERT(!_finished);
- _thread.reset(new stdx::thread(stdx::bind(&StepDownRunner::_stepDown,
- this,
- txn)));
- }
-
- void reset() {
- ASSERT(_finished);
- _finished = false;
- _result = Status(ErrorCodes::InternalError, "Result Status never set");
- }
-
- void setForce(bool force) {
- _force = force;
- }
-
- void setWaitTime(const Milliseconds& waitTime) {
- _waitTime = waitTime;
- }
-
- void setStepDownTime(const Milliseconds& stepDownTime) {
- _stepDownTime = stepDownTime;
- }
-
- private:
-
- void _stepDown(OperationContext* txn) {
- _result = _replCoord->stepDown(txn, _force, _waitTime, _stepDownTime);
- _finished = true;
- }
-
- ReplicationCoordinatorImpl* _replCoord;
- bool _finished;
- Status _result;
- std::unique_ptr<stdx::thread> _thread;
- bool _force;
- Milliseconds _waitTime;
- Milliseconds _stepDownTime;
- };
-
- TEST_F(StepDownTest, StepDownNotCaughtUp) {
- OperationContextReplMock txn;
- OpTimeWithTermZero optime1(100, 1);
- OpTimeWithTermZero optime2(100, 2);
- // No secondary is caught up
- getReplCoord()->setMyLastOptime(optime2);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
-
- // Try to stepDown but time out because no secondaries are caught up
- StepDownRunner runner(getReplCoord());
- runner.setForce(false);
- runner.setWaitTime(Milliseconds(0));
- runner.setStepDownTime(Milliseconds(1000));
-
- simulateSuccessfulElection();
-
- runner.start(&txn);
- Status status = runner.getResult();
- ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
-
- // Now use "force" to force it to step down even though no one is caught up
- runner.reset();
- getNet()->enterNetwork();
- const Date_t startDate = getNet()->now();
- while (startDate + Milliseconds(1000) < getNet()->now()) {
- while (getNet()->hasReadyRequests()) {
- getNet()->blackHole(getNet()->getNextReadyRequest());
- }
- getNet()->runUntil(startDate + Milliseconds(1000));
- }
- getNet()->exitNetwork();
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
- runner.setForce(true);
- runner.start(&txn);
- status = runner.getResult();
- ASSERT_OK(status);
- ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
-
- }
-
- TEST_F(StepDownTest, StepDownCatchUp) {
- OperationContextReplMock txn;
- OpTimeWithTermZero optime1(100, 1);
- OpTimeWithTermZero optime2(100, 2);
- // No secondary is caught up
- getReplCoord()->setMyLastOptime(optime2);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
-
- // stepDown where the secondary actually has to catch up before the stepDown can succeed
- StepDownRunner runner(getReplCoord());
- runner.setForce(false);
- runner.setWaitTime(Milliseconds(10000));
- runner.setStepDownTime(Milliseconds(60000));
-
- simulateSuccessfulElection();
-
- runner.start(&txn);
-
- // Make a secondary actually catch up
- enterNetwork();
- getNet()->runUntil(getNet()->now() + Milliseconds(2000));
- ASSERT(getNet()->hasReadyRequests());
- NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
- RemoteCommandRequest request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- ReplSetHeartbeatArgs hbArgs;
- if (hbArgs.initialize(request.cmdObj).isOK()) {
- ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName(hbArgs.getSetName());
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(hbArgs.getConfigVersion());
- hbResp.setOpTime(optime2);
- BSONObjBuilder respObj;
- respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
- getNet()->scheduleResponse(noi, getNet()->now(), makeResponseStatus(respObj.obj()));
- }
- while (getNet()->hasReadyRequests()) {
- getNet()->blackHole(getNet()->getNextReadyRequest());
- }
- getNet()->runReadyNetworkOperations();
- exitNetwork();
-
- ASSERT_OK(runner.getResult());
- ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
- }
-
- TEST_F(StepDownTest, InterruptStepDown) {
- const unsigned int opID = 100;
- OperationContextReplMock txn{opID};
- OpTimeWithTermZero optime1(100, 1);
- OpTimeWithTermZero optime2(100, 2);
- // No secondary is caught up
- getReplCoord()->setMyLastOptime(optime2);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
-
- // stepDown where the secondary actually has to catch up before the stepDown can succeed
- StepDownRunner runner(getReplCoord());
- runner.setForce(false);
- runner.setWaitTime(Milliseconds(10000));
- runner.setStepDownTime(Milliseconds(60000));
-
- simulateSuccessfulElection();
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
-
- runner.start(&txn);
-
- txn.setCheckForInterruptStatus(kInterruptedStatus);
- getReplCoord()->interrupt(opID);
-
- ASSERT_EQUALS(ErrorCodes::Interrupted, runner.getResult());
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
- }
+ myRid = getReplCoord()->getMyRID();
+ }
+};
+
+TEST_F(ReplCoordTest, UpdateTerm) {
+ ReplCoordTest::setUp();
+ init("mySet/test1:1234,test2:1234,test3:1234");
+
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234") << BSON("_id" << 2 << "host"
+ << "test3:1234"))
+ << "protocolVersion" << 1),
+ HostAndPort("test1", 1234));
+ getReplCoord()->setMyLastOptime(OpTime(Timestamp(100, 1), 0));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
+
+ simulateSuccessfulV1Election();
+
+ ASSERT_EQUALS(1, getReplCoord()->getTerm());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+
+ // lower term, no change
+ getReplCoord()->updateTerm(0);
+ ASSERT_EQUALS(1, getReplCoord()->getTerm());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+
+ // same term, no change
+ getReplCoord()->updateTerm(1);
+ ASSERT_EQUALS(1, getReplCoord()->getTerm());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+
+ // higher term, step down and change term
+ Handle cbHandle;
+ getReplCoord()->updateTerm_forTest(2);
+ ASSERT_EQUALS(2, getReplCoord()->getTerm());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
+}
+
+TEST_F(StepDownTest, StepDownNotPrimary) {
+ OperationContextReplMock txn;
+ OpTimeWithTermZero optime1(100, 1);
+ // All nodes are caught up
+ getReplCoord()->setMyLastOptime(optime1);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
+
+ Status status = getReplCoord()->stepDown(&txn, false, Milliseconds(0), Milliseconds(0));
+ ASSERT_EQUALS(ErrorCodes::NotMaster, status);
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
+}
+
+TEST_F(StepDownTest, StepDownTimeoutAcquiringGlobalLock) {
+ OperationContextReplMock txn;
+ OpTimeWithTermZero optime1(100, 1);
+ // All nodes are caught up
+ getReplCoord()->setMyLastOptime(optime1);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
+
+ simulateSuccessfulElection();
+
+ // Make sure stepDown cannot grab the global shared lock
+ Lock::GlobalWrite lk(txn.lockState());
+
+ Status status = getReplCoord()->stepDown(&txn, false, Milliseconds(0), Milliseconds(1000));
+ ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+}
+
+TEST_F(StepDownTest, StepDownNoWaiting) {
+ OperationContextReplMock txn;
+ OpTimeWithTermZero optime1(100, 1);
+ // All nodes are caught up
+ getReplCoord()->setMyLastOptime(optime1);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
+
+ simulateSuccessfulElection();
+
+ enterNetwork();
+ getNet()->runUntil(getNet()->now() + Seconds(2));
+ ASSERT(getNet()->hasReadyRequests());
+ NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
+ RemoteCommandRequest request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ ReplSetHeartbeatArgs hbArgs;
+ if (hbArgs.initialize(request.cmdObj).isOK()) {
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName(hbArgs.getSetName());
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(hbArgs.getConfigVersion());
+ hbResp.setOpTime(optime1);
+ BSONObjBuilder respObj;
+ respObj << "ok" << 1;
+ hbResp.addToBSON(&respObj, false);
+ getNet()->scheduleResponse(noi, getNet()->now(), makeResponseStatus(respObj.obj()));
+ }
+ while (getNet()->hasReadyRequests()) {
+ getNet()->blackHole(getNet()->getNextReadyRequest());
+ }
+ getNet()->runReadyNetworkOperations();
+ exitNetwork();
+
+
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+ ASSERT_OK(getReplCoord()->stepDown(&txn, false, Milliseconds(0), Milliseconds(1000)));
+ enterNetwork(); // So we can safely inspect the topology coordinator
+ ASSERT_EQUALS(getNet()->now() + Seconds(1), getTopoCoord().getStepDownTime());
+ ASSERT_TRUE(getTopoCoord().getMemberState().secondary());
+ exitNetwork();
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
+}
+
+TEST_F(ReplCoordTest, StepDownAndBackUpSingleNode) {
+ init("mySet");
+
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234"))),
+ HostAndPort("test1", 1234));
+ OperationContextReplMock txn;
+ getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+ ASSERT_OK(getReplCoord()->stepDown(&txn, true, Milliseconds(0), Milliseconds(1000)));
+ getNet()->enterNetwork(); // Must do this before inspecting the topocoord
+ Date_t stepdownUntil = getNet()->now() + Seconds(1);
+ ASSERT_EQUALS(stepdownUntil, getTopoCoord().getStepDownTime());
+ ASSERT_TRUE(getTopoCoord().getMemberState().secondary());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
+
+ // Now run time forward and make sure that the node becomes primary again when the stepdown
+ // period ends.
+ getNet()->runUntil(stepdownUntil);
+ ASSERT_EQUALS(stepdownUntil, getNet()->now());
+ ASSERT_TRUE(getTopoCoord().getMemberState().primary());
+ getNet()->exitNetwork();
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+}
- TEST_F(ReplCoordTest, GetReplicationModeNone) {
- init();
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
- }
+/**
+ * Used to run wait for stepDown() to finish in a separate thread without blocking execution of
+ * the test. To use, set the values of "force", "waitTime", and "stepDownTime", which will be
+ * used as the arguments passed to stepDown, and then call
+ * start(), which will spawn a thread that calls stepDown. No calls may be made
+ * on the StepDownRunner instance between calling start and getResult(). After returning
+ * from getResult(), you can call reset() to allow the StepDownRunner to be reused for another
+ * stepDown call.
+ */
+class StepDownRunner {
+public:
+ StepDownRunner(ReplicationCoordinatorImpl* replCoord)
+ : _replCoord(replCoord),
+ _finished(false),
+ _result(Status::OK()),
+ _force(false),
+ _waitTime(0),
+ _stepDownTime(0) {}
- TEST_F(ReplCoordTest, GetReplicationModeMaster) {
- // modeMasterSlave if master set
- ReplSettings settings;
- settings.master = true;
- init(settings);
- ASSERT_EQUALS(ReplicationCoordinator::modeMasterSlave,
- getReplCoord()->getReplicationMode());
+ // may block
+ Status getResult() {
+ _thread->join();
+ ASSERT(_finished);
+ return _result;
}
- TEST_F(ReplCoordTest, GetReplicationModeSlave) {
- // modeMasterSlave if the slave flag was set
- ReplSettings settings;
- settings.slave = SimpleSlave;
- init(settings);
- ASSERT_EQUALS(ReplicationCoordinator::modeMasterSlave,
- getReplCoord()->getReplicationMode());
+ void start(OperationContext* txn) {
+ ASSERT(!_finished);
+ _thread.reset(new stdx::thread(stdx::bind(&StepDownRunner::_stepDown, this, txn)));
}
- TEST_F(ReplCoordTest, GetReplicationModeRepl) {
- // modeReplSet if the set name was supplied.
- ReplSettings settings;
- settings.replSet = "mySet/node1:12345";
- init(settings);
- ASSERT_EQUALS(ReplicationCoordinator::modeReplSet, getReplCoord()->getReplicationMode());
- ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0 ))),
- HostAndPort("node1", 12345));
+ void reset() {
+ ASSERT(_finished);
+ _finished = false;
+ _result = Status(ErrorCodes::InternalError, "Result Status never set");
}
- TEST_F(ReplCoordTest, TestPrepareReplSetUpdatePositionCommand) {
- OperationContextNoop txn;
- init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << "test1:1234") <<
- BSON("_id" << 1 << "host" << "test2:1234") <<
- BSON("_id" << 2 << "host" << "test3:1234"))),
- HostAndPort("test1", 1234));
- OpTimeWithTermZero optime1(100, 1);
- OpTimeWithTermZero optime2(100, 2);
- OpTimeWithTermZero optime3(2, 1);
- getReplCoord()->setMyLastOptime(optime1);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime2));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime3));
-
- // Check that the proper BSON is generated for the replSetUpdatePositionCommand
- BSONObjBuilder cmdBuilder;
- getReplCoord()->prepareReplSetUpdatePositionCommand(&cmdBuilder);
- BSONObj cmd = cmdBuilder.done();
-
- ASSERT_EQUALS(2, cmd.nFields());
- ASSERT_EQUALS("replSetUpdatePosition", cmd.firstElement().fieldNameStringData());
-
- std::set<long long> memberIds;
- BSONForEach(entryElement, cmd["optimes"].Obj()) {
- BSONObj entry = entryElement.Obj();
- long long memberId = entry["memberId"].Number();
- memberIds.insert(memberId);
- if (memberId == 0) {
- // TODO(siyuan) Update when we change replSetUpdatePosition format
- ASSERT_EQUALS(optime1.timestamp, entry["optime"].timestamp());
- } else if (memberId == 1) {
- ASSERT_EQUALS(optime2.timestamp, entry["optime"].timestamp());
- } else {
- ASSERT_EQUALS(2, memberId);
- ASSERT_EQUALS(optime3.timestamp, entry["optime"].timestamp());
- }
- }
- ASSERT_EQUALS(3U, memberIds.size()); // Make sure we saw all 3 nodes
+ void setForce(bool force) {
+ _force = force;
}
- TEST_F(ReplCoordTest, SetMaintenanceMode) {
- init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << "test1:1234") <<
- BSON("_id" << 1 << "host" << "test2:1234") <<
- BSON("_id" << 2 << "host" << "test3:1234"))),
- HostAndPort("test2", 1234));
- OperationContextNoop txn;
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
-
- // Can't unset maintenance mode if it was never set to begin with.
- Status status = getReplCoord()->setMaintenanceMode(false);
- ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
- ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
-
- // valid set
- ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
- ASSERT_TRUE(getReplCoord()->getMemberState().recovering());
-
- // If we go into rollback while in maintenance mode, our state changes to RS_ROLLBACK.
- getReplCoord()->setFollowerMode(MemberState::RS_ROLLBACK);
- ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
-
- // When we go back to SECONDARY, we still observe RECOVERING because of maintenance mode.
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(getReplCoord()->getMemberState().recovering());
-
- // Can set multiple times
- ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
- ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
-
- // Need to unset the number of times you set
- ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
- ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
- ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
- status = getReplCoord()->setMaintenanceMode(false);
- // fourth one fails b/c we only set three times
- ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
- // Unsetting maintenance mode changes our state to secondary if maintenance mode was
- // the only thinking keeping us out of it.
- ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
-
- // From rollback, entering and exiting maintenance mode doesn't change perceived
- // state.
- getReplCoord()->setFollowerMode(MemberState::RS_ROLLBACK);
- ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
- ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
- ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
- ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
- ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
-
- // Rollback is sticky even if entered while in maintenance mode.
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
- ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
- ASSERT_TRUE(getReplCoord()->getMemberState().recovering());
- getReplCoord()->setFollowerMode(MemberState::RS_ROLLBACK);
- ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
- ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
- ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
-
- // Can't modify maintenance mode when PRIMARY
- simulateSuccessfulElection();
-
- status = getReplCoord()->setMaintenanceMode(true);
- ASSERT_EQUALS(ErrorCodes::NotSecondary, status);
- ASSERT_TRUE(getReplCoord()->getMemberState().primary());
-
- simulateStepDownOnIsolation();
-
- status = getReplCoord()->setMaintenanceMode(false);
- ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
- ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
- ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
+ void setWaitTime(const Milliseconds& waitTime) {
+ _waitTime = waitTime;
}
- TEST_F(ReplCoordTest, GetHostsWrittenToReplSet) {
- HostAndPort myHost("node1:12345");
- HostAndPort client1Host("node2:12345");
- HostAndPort client2Host("node3:12345") ;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << myHost.toString()) <<
- BSON("_id" << 1 << "host" << client1Host.toString()) <<
- BSON("_id" << 2 << "host" << client2Host.toString()))),
- HostAndPort("node1", 12345));
- OperationContextNoop txn;
-
- OpTimeWithTermZero time1(100, 1);
- OpTimeWithTermZero time2(100, 2);
-
- getReplCoord()->setMyLastOptime(time2);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
-
- std::vector<HostAndPort> caughtUpHosts = getReplCoord()->getHostsWrittenTo(time2);
- ASSERT_EQUALS(1U, caughtUpHosts.size());
- ASSERT_EQUALS(myHost, caughtUpHosts[0]);
-
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time2));
- caughtUpHosts = getReplCoord()->getHostsWrittenTo(time2);
- ASSERT_EQUALS(2U, caughtUpHosts.size());
- if (myHost == caughtUpHosts[0]) {
- ASSERT_EQUALS(client2Host, caughtUpHosts[1]);
- }
- else {
- ASSERT_EQUALS(client2Host, caughtUpHosts[0]);
- ASSERT_EQUALS(myHost, caughtUpHosts[1]);
- }
+ void setStepDownTime(const Milliseconds& stepDownTime) {
+ _stepDownTime = stepDownTime;
}
- TEST_F(ReplCoordTest, GetHostsWrittenToMasterSlave) {
- ReplSettings settings;
- settings.master = true;
- init(settings);
- HostAndPort clientHost("node2:12345");
- OperationContextNoop txn;
-
- OID client = OID::gen();
- OpTimeWithTermZero time1(100, 1);
- OpTimeWithTermZero time2(100, 2);
-
- getExternalState()->setClientHostAndPort(clientHost);
- HandshakeArgs handshake;
- ASSERT_OK(handshake.initialize(BSON("handshake" << client)));
- ASSERT_OK(getReplCoord()->processHandshake(&txn, handshake));
-
- getReplCoord()->setMyLastOptime(time2);
- ASSERT_OK(getReplCoord()->setLastOptimeForSlave(client, time1.timestamp));
-
- std::vector<HostAndPort> caughtUpHosts = getReplCoord()->getHostsWrittenTo(time2);
- ASSERT_EQUALS(0U, caughtUpHosts.size()); // self doesn't get included in master-slave
-
- ASSERT_OK(getReplCoord()->setLastOptimeForSlave(client, time2.timestamp));
- caughtUpHosts = getReplCoord()->getHostsWrittenTo(time2);
- ASSERT_EQUALS(1U, caughtUpHosts.size());
- ASSERT_EQUALS(clientHost, caughtUpHosts[0]);
+private:
+ void _stepDown(OperationContext* txn) {
+ _result = _replCoord->stepDown(txn, _force, _waitTime, _stepDownTime);
+ _finished = true;
}
- TEST_F(ReplCoordTest, GetOtherNodesInReplSetNoConfig) {
- start();
- ASSERT_EQUALS(0U, getReplCoord()->getOtherNodesInReplSet().size());
- }
+ ReplicationCoordinatorImpl* _replCoord;
+ bool _finished;
+ Status _result;
+ std::unique_ptr<stdx::thread> _thread;
+ bool _force;
+ Milliseconds _waitTime;
+ Milliseconds _stepDownTime;
+};
- TEST_F(ReplCoordTest, GetOtherNodesInReplSet) {
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << "h1") <<
- BSON("_id" << 1 << "host" << "h2") <<
- BSON("_id" << 2 <<
- "host" << "h3" <<
- "priority" << 0 <<
- "hidden" << true))),
- HostAndPort("h1"));
-
- std::vector<HostAndPort> otherNodes = getReplCoord()->getOtherNodesInReplSet();
- ASSERT_EQUALS(2U, otherNodes.size());
- if (otherNodes[0] == HostAndPort("h2")) {
- ASSERT_EQUALS(HostAndPort("h3"), otherNodes[1]);
- }
- else {
- ASSERT_EQUALS(HostAndPort("h3"), otherNodes[0]);
- ASSERT_EQUALS(HostAndPort("h2"), otherNodes[0]);
- }
- }
+TEST_F(StepDownTest, StepDownNotCaughtUp) {
+ OperationContextReplMock txn;
+ OpTimeWithTermZero optime1(100, 1);
+ OpTimeWithTermZero optime2(100, 2);
+ // No secondary is caught up
+ getReplCoord()->setMyLastOptime(optime2);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
- TEST_F(ReplCoordTest, IsMasterNoConfig) {
- start();
- IsMasterResponse response;
+ // Try to stepDown but time out because no secondaries are caught up
+ StepDownRunner runner(getReplCoord());
+ runner.setForce(false);
+ runner.setWaitTime(Milliseconds(0));
+ runner.setStepDownTime(Milliseconds(1000));
- getReplCoord()->fillIsMasterForReplSet(&response);
- ASSERT_FALSE(response.isConfigSet());
- BSONObj responseObj = response.toBSON();
- ASSERT_FALSE(responseObj["ismaster"].Bool());
- ASSERT_FALSE(responseObj["secondary"].Bool());
- ASSERT_TRUE(responseObj["isreplicaset"].Bool());
- ASSERT_EQUALS("Does not have a valid replica set config", responseObj["info"].String());
+ simulateSuccessfulElection();
- IsMasterResponse roundTripped;
- ASSERT_OK(roundTripped.initialize(response.toBSON()));
- }
+ runner.start(&txn);
+ Status status = runner.getResult();
+ ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
- TEST_F(ReplCoordTest, IsMaster) {
- HostAndPort h1("h1");
- HostAndPort h2("h2");
- HostAndPort h3("h3");
- HostAndPort h4("h4");
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << h1.toString()) <<
- BSON("_id" << 1 << "host" << h2.toString()) <<
- BSON("_id" << 2 <<
- "host" << h3.toString() <<
- "arbiterOnly" << true) <<
- BSON("_id" << 3 <<
- "host" << h4.toString() <<
- "priority" << 0 <<
- "tags" << BSON("key1" << "value1" <<
- "key2" << "value2")))),
- h4);
- getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
-
- IsMasterResponse response;
- getReplCoord()->fillIsMasterForReplSet(&response);
-
- ASSERT_EQUALS("mySet", response.getReplSetName());
- ASSERT_EQUALS(2, response.getReplSetVersion());
- ASSERT_FALSE(response.isMaster());
- ASSERT_TRUE(response.isSecondary());
- // TODO(spencer): test that response includes current primary when there is one.
- ASSERT_FALSE(response.isArbiterOnly());
- ASSERT_TRUE(response.isPassive());
- ASSERT_FALSE(response.isHidden());
- ASSERT_TRUE(response.shouldBuildIndexes());
- ASSERT_EQUALS(Seconds(0), response.getSlaveDelay());
- ASSERT_EQUALS(h4, response.getMe());
-
- std::vector<HostAndPort> hosts = response.getHosts();
- ASSERT_EQUALS(2U, hosts.size());
- if (hosts[0] == h1) {
- ASSERT_EQUALS(h2, hosts[1]);
- }
- else {
- ASSERT_EQUALS(h2, hosts[0]);
- ASSERT_EQUALS(h1, hosts[1]);
+ // Now use "force" to force it to step down even though no one is caught up
+ runner.reset();
+ getNet()->enterNetwork();
+ const Date_t startDate = getNet()->now();
+ while (startDate + Milliseconds(1000) < getNet()->now()) {
+ while (getNet()->hasReadyRequests()) {
+ getNet()->blackHole(getNet()->getNextReadyRequest());
}
- std::vector<HostAndPort> passives = response.getPassives();
- ASSERT_EQUALS(1U, passives.size());
- ASSERT_EQUALS(h4, passives[0]);
- std::vector<HostAndPort> arbiters = response.getArbiters();
- ASSERT_EQUALS(1U, arbiters.size());
- ASSERT_EQUALS(h3, arbiters[0]);
-
- unordered_map<std::string, std::string> tags = response.getTags();
- ASSERT_EQUALS(2U, tags.size());
- ASSERT_EQUALS("value1", tags["key1"]);
- ASSERT_EQUALS("value2", tags["key2"]);
-
- IsMasterResponse roundTripped;
- ASSERT_OK(roundTripped.initialize(response.toBSON()));
- }
-
- TEST_F(ReplCoordTest, ShutDownBeforeStartUpFinished) {
- init();
- startCapturingLogMessages();
- getReplCoord()->shutdown();
- stopCapturingLogMessages();
- ASSERT_EQUALS(1,
- countLogLinesContaining("shutdown() called before startReplication() finished"));
- }
-
- TEST_F(ReplCoordTest, UpdatePositionWithConfigVersionAndMemberIdTest) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- simulateSuccessfulElection();
-
- OpTimeWithTermZero time1(100, 1);
- OpTimeWithTermZero time2(100, 2);
- OpTimeWithTermZero staleTime(10, 0);
- getReplCoord()->setMyLastOptime(time1);
-
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
- writeConcern.wNumNodes = 1;
-
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
-
- // receive updatePosition containing ourself, should not process the update for self
- UpdatePositionArgs args;
- ASSERT_OK(args.initialize(BSON("replSetUpdatePosition" << 1 <<
- "optimes" << BSON_ARRAY(
- BSON("cfgver" << 2 <<
- "memberId" << 0 <<
- "optime" << time2.timestamp)))));
-
- ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
-
- // receive updatePosition with incorrect config version
- UpdatePositionArgs args2;
- ASSERT_OK(args2.initialize(BSON("replSetUpdatePosition" << 1 <<
- "optimes" << BSON_ARRAY(
- BSON("cfgver" << 3 <<
- "memberId" << 1 <<
- "optime" << time2.timestamp)))));
-
- long long cfgver;
- ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
- getReplCoord()->processReplSetUpdatePosition(args2, &cfgver));
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
-
- // receive updatePosition with nonexistent member id
- UpdatePositionArgs args3;
- ASSERT_OK(args3.initialize(BSON("replSetUpdatePosition" << 1 <<
- "optimes" << BSON_ARRAY(
- BSON("cfgver" << 2 <<
- "memberId" << 9 <<
- "optime" << time2.timestamp)))));
-
- ASSERT_EQUALS(ErrorCodes::NodeNotFound,
- getReplCoord()->processReplSetUpdatePosition(args3, 0));
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
-
- // receive a good update position
- getReplCoord()->setMyLastOptime(time2);
- UpdatePositionArgs args4;
- ASSERT_OK(args4.initialize(BSON("replSetUpdatePosition" << 1 <<
- "optimes" << BSON_ARRAY(
- BSON("cfgver" << 2 <<
- "memberId" << 1 <<
- "optime" << time2.timestamp) <<
- BSON("cfgver" << 2 <<
- "memberId" << 2 <<
- "optime" << time2.timestamp)))));
-
- ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args4, 0));
- ASSERT_OK(getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
-
- writeConcern.wNumNodes = 3;
- ASSERT_OK(getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
- }
-
- void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
- OperationContextNoop txn;
- BSONObjBuilder garbage;
- ReplSetReconfigArgs args;
- args.force = false;
- args.newConfigObj = BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 <<
- "host" << "node1:12345" <<
- "priority" << 3) <<
- BSON("_id" << 1 << "host" << "node2:12345") <<
- BSON("_id" << 2 << "host" << "node3:12345")));
- *status = replCoord->processReplSetReconfig(&txn, args, &garbage);
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationReconfigSimple) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 2));
- simulateSuccessfulElection();
-
- OpTimeWithTermZero time(100, 2);
-
- // 3 nodes waiting for time
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
- writeConcern.wNumNodes = 3;
-
- ReplicationAwaiter awaiter(getReplCoord(), &txn);
- awaiter.setOpTime(time);
- awaiter.setWriteConcern(writeConcern);
- awaiter.start(&txn);
-
- // reconfig
- Status status(ErrorCodes::InternalError, "Not Set");
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
-
- NetworkInterfaceMock* net = getNet();
- getNet()->enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- repl::ReplSetHeartbeatArgs hbArgs;
- ASSERT_OK(hbArgs.initialize(request.cmdObj));
- repl::ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName("mySet");
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(2);
- BSONObjBuilder respObj;
- respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
- net->runReadyNetworkOperations();
- getNet()->exitNetwork();
- reconfigThread.join();
- ASSERT_OK(status);
-
- // satisfy write concern
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(3, 0, time));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(3, 1, time));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(3, 2, time));
- ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
- ASSERT_OK(statusAndDur.status);
- awaiter.reset();
- }
-
- void doReplSetReconfigToFewer(ReplicationCoordinatorImpl* replCoord, Status* status) {
- OperationContextNoop txn;
- BSONObjBuilder garbage;
- ReplSetReconfigArgs args;
- args.force = false;
- args.newConfigObj = BSON("_id" << "mySet" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "node1:12345") <<
- BSON("_id" << 2 << "host" << "node3:12345")));
- *status = replCoord->processReplSetReconfig(&txn, args, &garbage);
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationReconfigNodeCountExceedsNumberOfNodes) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 2));
- simulateSuccessfulElection();
-
- OpTimeWithTermZero time(100, 2);
-
- // 3 nodes waiting for time
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
- writeConcern.wNumNodes = 3;
-
- ReplicationAwaiter awaiter(getReplCoord(), &txn);
- awaiter.setOpTime(time);
- awaiter.setWriteConcern(writeConcern);
- awaiter.start(&txn);
-
- // reconfig to fewer nodes
- Status status(ErrorCodes::InternalError, "Not Set");
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfigToFewer, getReplCoord(), &status));
-
- NetworkInterfaceMock* net = getNet();
- getNet()->enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- repl::ReplSetHeartbeatArgs hbArgs;
- ASSERT_OK(hbArgs.initialize(request.cmdObj));
- repl::ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName("mySet");
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(2);
- BSONObjBuilder respObj;
- respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
- net->runReadyNetworkOperations();
- getNet()->exitNetwork();
- reconfigThread.join();
- ASSERT_OK(status);
- std::cout << "asdf" << std::endl;
-
- // writeconcern feasability should be reevaluated and an error should be returned
- ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
- ASSERT_EQUALS(ErrorCodes::CannotSatisfyWriteConcern, statusAndDur.status);
- awaiter.reset();
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationReconfigToSmallerMajority) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2) <<
- BSON("host" << "node4:12345" << "_id" << 3) <<
- BSON("host" << "node5:12345" << "_id" << 4))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 1));
- simulateSuccessfulElection();
-
- OpTimeWithTermZero time(100, 2);
-
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time));
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time));
-
-
- // majority nodes waiting for time
- WriteConcernOptions writeConcern;
- writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
- writeConcern.wMode = WriteConcernOptions::kMajority;
-
- ReplicationAwaiter awaiter(getReplCoord(), &txn);
- awaiter.setOpTime(time);
- awaiter.setWriteConcern(writeConcern);
- awaiter.start(&txn);
-
- // demonstrate that majority cannot currently be satisfied
- WriteConcernOptions writeConcern2;
- writeConcern2.wTimeout = WriteConcernOptions::kNoWaiting;
- writeConcern2.wMode = WriteConcernOptions::kMajority;
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(&txn, time, writeConcern2).status);
-
- // reconfig to three nodes
- Status status(ErrorCodes::InternalError, "Not Set");
- stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
-
- NetworkInterfaceMock* net = getNet();
- getNet()->enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- repl::ReplSetHeartbeatArgs hbArgs;
- ASSERT_OK(hbArgs.initialize(request.cmdObj));
- repl::ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName("mySet");
+ getNet()->runUntil(startDate + Milliseconds(1000));
+ }
+ getNet()->exitNetwork();
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+ runner.setForce(true);
+ runner.start(&txn);
+ status = runner.getResult();
+ ASSERT_OK(status);
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
+}
+
+TEST_F(StepDownTest, StepDownCatchUp) {
+ OperationContextReplMock txn;
+ OpTimeWithTermZero optime1(100, 1);
+ OpTimeWithTermZero optime2(100, 2);
+ // No secondary is caught up
+ getReplCoord()->setMyLastOptime(optime2);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
+
+ // stepDown where the secondary actually has to catch up before the stepDown can succeed
+ StepDownRunner runner(getReplCoord());
+ runner.setForce(false);
+ runner.setWaitTime(Milliseconds(10000));
+ runner.setStepDownTime(Milliseconds(60000));
+
+ simulateSuccessfulElection();
+
+ runner.start(&txn);
+
+ // Make a secondary actually catch up
+ enterNetwork();
+ getNet()->runUntil(getNet()->now() + Milliseconds(2000));
+ ASSERT(getNet()->hasReadyRequests());
+ NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
+ RemoteCommandRequest request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ ReplSetHeartbeatArgs hbArgs;
+ if (hbArgs.initialize(request.cmdObj).isOK()) {
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName(hbArgs.getSetName());
hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(2);
+ hbResp.setConfigVersion(hbArgs.getConfigVersion());
+ hbResp.setOpTime(optime2);
BSONObjBuilder respObj;
respObj << "ok" << 1;
hbResp.addToBSON(&respObj, false);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
- net->runReadyNetworkOperations();
- getNet()->exitNetwork();
- reconfigThread.join();
- ASSERT_OK(status);
-
- // writeconcern feasability should be reevaluated and be satisfied
- ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
- ASSERT_OK(statusAndDur.status);
- awaiter.reset();
- }
-
- TEST_F(ReplCoordTest, AwaitReplicationMajority) {
- // Test that we can satisfy majority write concern can only be
- // statisfied by voting data-bearing members.
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2) <<
- BSON("host" << "node4:12345" <<
- "_id" << 3 <<
- "votes" << 0) <<
- BSON("host" << "node5:12345" <<
- "_id" << 4 <<
- "arbiterOnly" << true))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- OpTimeWithTermZero time(100, 0);
- getReplCoord()->setMyLastOptime(time);
- simulateSuccessfulElection();
-
- WriteConcernOptions majorityWriteConcern;
- majorityWriteConcern.wTimeout = WriteConcernOptions::kNoWaiting;
- majorityWriteConcern.wMode = WriteConcernOptions::kMajority;
-
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(&txn, time, majorityWriteConcern).status);
-
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time));
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(&txn, time, majorityWriteConcern).status);
-
- // this member does not vote and as a result should not count towards write concern
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 3, time));
- ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
- getReplCoord()->awaitReplication(&txn, time, majorityWriteConcern).status);
-
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time));
- ASSERT_OK(getReplCoord()->awaitReplication(&txn, time, majorityWriteConcern).status);
- }
-
- TEST_F(ReplCoordTest, LastCommittedOpTime) {
- // Test that the commit level advances properly.
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0) <<
- BSON("host" << "node2:12345" << "_id" << 1) <<
- BSON("host" << "node3:12345" << "_id" << 2) <<
- BSON("host" << "node4:12345" <<
- "_id" << 3 <<
- "votes" << 0) <<
- BSON("host" << "node5:12345" <<
- "_id" << 4 <<
- "arbiterOnly" << true))),
- HostAndPort("node1", 12345));
- ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
- OpTimeWithTermZero zero(0, 0);
- OpTimeWithTermZero time(100, 0);
- getReplCoord()->setMyLastOptime(time);
- simulateSuccessfulElection();
-
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time));
- ASSERT_EQUALS((OpTime)zero, getReplCoord()->getLastCommittedOpTime());
-
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 3, time));
- ASSERT_EQUALS((OpTime)zero, getReplCoord()->getLastCommittedOpTime());
-
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time));
- ASSERT_EQUALS((OpTime)time, getReplCoord()->getLastCommittedOpTime());
-
-
- // Set a new, later OpTime.
- OpTimeWithTermZero newTime = OpTimeWithTermZero(100, 1);
- getReplCoord()->setMyLastOptime(newTime);
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 3, newTime));
- ASSERT_EQUALS((OpTime)time, getReplCoord()->getLastCommittedOpTime());
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, newTime));
- // Reached majority of voting nodes with newTime.
- ASSERT_EQUALS((OpTime)newTime, getReplCoord()->getLastCommittedOpTime());
- ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, newTime));
- ASSERT_EQUALS((OpTime)newTime, getReplCoord()->getLastCommittedOpTime());
- }
-
- TEST_F(ReplCoordTest, CantUseReadAfterIfNotReplSet) {
- init(ReplSettings());
- OperationContextNoop txn;
- auto result = getReplCoord()->waitUntilOpTime(&txn,
- ReadAfterOpTimeArgs(OpTimeWithTermZero(50, 0)));
-
- ASSERT_FALSE(result.didWait());
- ASSERT_EQUALS(ErrorCodes::NotAReplicaSet, result.getStatus());
- }
-
- TEST_F(ReplCoordTest, ReadAfterWhileShutdown) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0))),
- HostAndPort("node1", 12345));
-
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(10, 0));
-
- shutdown();
-
- auto result = getReplCoord()->waitUntilOpTime(&txn,
- ReadAfterOpTimeArgs(OpTimeWithTermZero(50, 0)));
-
- ASSERT_TRUE(result.didWait());
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, result.getStatus());
- }
-
- TEST_F(ReplCoordTest, ReadAfterInterrupted) {
- OperationContextReplMock txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0))),
- HostAndPort("node1", 12345));
-
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(10, 0));
-
- txn.setCheckForInterruptStatus(Status(ErrorCodes::Interrupted, "test"));
-
- auto result = getReplCoord()->waitUntilOpTime(&txn,
- ReadAfterOpTimeArgs(OpTimeWithTermZero(50, 0)));
-
- ASSERT_TRUE(result.didWait());
- ASSERT_EQUALS(ErrorCodes::Interrupted, result.getStatus());
- }
-
- TEST_F(ReplCoordTest, ReadAfterNoOpTime) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0))),
- HostAndPort("node1", 12345));
-
- auto result = getReplCoord()->waitUntilOpTime(&txn, ReadAfterOpTimeArgs());
-
- ASSERT_FALSE(result.didWait());
- ASSERT_OK(result.getStatus());
- }
-
- TEST_F(ReplCoordTest, ReadAfterGreaterOpTime) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0))),
- HostAndPort("node1", 12345));
-
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
- auto result = getReplCoord()->waitUntilOpTime(&txn,
- ReadAfterOpTimeArgs(OpTimeWithTermZero(50, 0)));
-
- ASSERT_TRUE(result.didWait());
- ASSERT_OK(result.getStatus());
- }
-
- TEST_F(ReplCoordTest, ReadAfterEqualOpTime) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0))),
- HostAndPort("node1", 12345));
-
-
- OpTimeWithTermZero time(100, 0);
- getReplCoord()->setMyLastOptime(time);
- auto result = getReplCoord()->waitUntilOpTime(&txn, ReadAfterOpTimeArgs(time));
-
- ASSERT_TRUE(result.didWait());
- ASSERT_OK(result.getStatus());
- }
-
- TEST_F(ReplCoordTest, ReadAfterDeferredGreaterOpTime) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0))),
- HostAndPort("node1", 12345));
-
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(0, 0));
-
- auto pseudoLogOp = std::async(std::launch::async, [this]() {
- // Not guaranteed to be scheduled after waitUnitl blocks...
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(200, 0));
- });
-
- auto result = getReplCoord()->waitUntilOpTime(&txn,
- ReadAfterOpTimeArgs(OpTimeWithTermZero(100, 0)));
- pseudoLogOp.get();
-
- ASSERT_TRUE(result.didWait());
- ASSERT_OK(result.getStatus());
- }
-
- TEST_F(ReplCoordTest, ReadAfterDeferredEqualOpTime) {
- OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id" << "mySet" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(BSON("host" << "node1:12345" << "_id" << 0))),
- HostAndPort("node1", 12345));
-
- getReplCoord()->setMyLastOptime(OpTimeWithTermZero(0, 0));
-
- OpTimeWithTermZero opTimeToWait(100, 0);
-
- auto pseudoLogOp = std::async(std::launch::async, [this, &opTimeToWait]() {
- // Not guaranteed to be scheduled after waitUnitl blocks...
- getReplCoord()->setMyLastOptime(opTimeToWait);
- });
-
- auto result = getReplCoord()->waitUntilOpTime(&txn, ReadAfterOpTimeArgs(opTimeToWait));
- pseudoLogOp.get();
-
- ASSERT_TRUE(result.didWait());
- ASSERT_OK(result.getStatus());
+ getNet()->scheduleResponse(noi, getNet()->now(), makeResponseStatus(respObj.obj()));
+ }
+ while (getNet()->hasReadyRequests()) {
+ getNet()->blackHole(getNet()->getNextReadyRequest());
+ }
+ getNet()->runReadyNetworkOperations();
+ exitNetwork();
+
+ ASSERT_OK(runner.getResult());
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
+}
+
+TEST_F(StepDownTest, InterruptStepDown) {
+ const unsigned int opID = 100;
+ OperationContextReplMock txn{opID};
+ OpTimeWithTermZero optime1(100, 1);
+ OpTimeWithTermZero optime2(100, 2);
+ // No secondary is caught up
+ getReplCoord()->setMyLastOptime(optime2);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime1));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime1));
+
+ // stepDown where the secondary actually has to catch up before the stepDown can succeed
+ StepDownRunner runner(getReplCoord());
+ runner.setForce(false);
+ runner.setWaitTime(Milliseconds(10000));
+ runner.setStepDownTime(Milliseconds(60000));
+
+ simulateSuccessfulElection();
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+
+ runner.start(&txn);
+
+ txn.setCheckForInterruptStatus(kInterruptedStatus);
+ getReplCoord()->interrupt(opID);
+
+ ASSERT_EQUALS(ErrorCodes::Interrupted, runner.getResult());
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+}
+
+TEST_F(ReplCoordTest, GetReplicationModeNone) {
+ init();
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+}
+
+TEST_F(ReplCoordTest, GetReplicationModeMaster) {
+ // modeMasterSlave if master set
+ ReplSettings settings;
+ settings.master = true;
+ init(settings);
+ ASSERT_EQUALS(ReplicationCoordinator::modeMasterSlave, getReplCoord()->getReplicationMode());
+}
+
+TEST_F(ReplCoordTest, GetReplicationModeSlave) {
+ // modeMasterSlave if the slave flag was set
+ ReplSettings settings;
+ settings.slave = SimpleSlave;
+ init(settings);
+ ASSERT_EQUALS(ReplicationCoordinator::modeMasterSlave, getReplCoord()->getReplicationMode());
+}
+
+TEST_F(ReplCoordTest, GetReplicationModeRepl) {
+ // modeReplSet if the set name was supplied.
+ ReplSettings settings;
+ settings.replSet = "mySet/node1:12345";
+ init(settings);
+ ASSERT_EQUALS(ReplicationCoordinator::modeReplSet, getReplCoord()->getReplicationMode());
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0))),
+ HostAndPort("node1", 12345));
+}
+
+TEST_F(ReplCoordTest, TestPrepareReplSetUpdatePositionCommand) {
+ OperationContextNoop txn;
+ init("mySet/test1:1234,test2:1234,test3:1234");
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234") << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test1", 1234));
+ OpTimeWithTermZero optime1(100, 1);
+ OpTimeWithTermZero optime2(100, 2);
+ OpTimeWithTermZero optime3(2, 1);
+ getReplCoord()->setMyLastOptime(optime1);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 1, optime2));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(1, 2, optime3));
+
+ // Check that the proper BSON is generated for the replSetUpdatePositionCommand
+ BSONObjBuilder cmdBuilder;
+ getReplCoord()->prepareReplSetUpdatePositionCommand(&cmdBuilder);
+ BSONObj cmd = cmdBuilder.done();
+
+ ASSERT_EQUALS(2, cmd.nFields());
+ ASSERT_EQUALS("replSetUpdatePosition", cmd.firstElement().fieldNameStringData());
+
+ std::set<long long> memberIds;
+ BSONForEach(entryElement, cmd["optimes"].Obj()) {
+ BSONObj entry = entryElement.Obj();
+ long long memberId = entry["memberId"].Number();
+ memberIds.insert(memberId);
+ if (memberId == 0) {
+ // TODO(siyuan) Update when we change replSetUpdatePosition format
+ ASSERT_EQUALS(optime1.timestamp, entry["optime"].timestamp());
+ } else if (memberId == 1) {
+ ASSERT_EQUALS(optime2.timestamp, entry["optime"].timestamp());
+ } else {
+ ASSERT_EQUALS(2, memberId);
+ ASSERT_EQUALS(optime3.timestamp, entry["optime"].timestamp());
+ }
}
-
- // TODO(schwerin): Unit test election id updating
+ ASSERT_EQUALS(3U, memberIds.size()); // Make sure we saw all 3 nodes
+}
+
+TEST_F(ReplCoordTest, SetMaintenanceMode) {
+ init("mySet/test1:1234,test2:1234,test3:1234");
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234") << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
+ OperationContextNoop txn;
+ getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
+
+ // Can't unset maintenance mode if it was never set to begin with.
+ Status status = getReplCoord()->setMaintenanceMode(false);
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
+
+ // valid set
+ ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
+ ASSERT_TRUE(getReplCoord()->getMemberState().recovering());
+
+ // If we go into rollback while in maintenance mode, our state changes to RS_ROLLBACK.
+ getReplCoord()->setFollowerMode(MemberState::RS_ROLLBACK);
+ ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
+
+ // When we go back to SECONDARY, we still observe RECOVERING because of maintenance mode.
+ getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+ ASSERT_TRUE(getReplCoord()->getMemberState().recovering());
+
+ // Can set multiple times
+ ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
+ ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
+
+ // Need to unset the number of times you set
+ ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
+ ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
+ ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
+ status = getReplCoord()->setMaintenanceMode(false);
+ // fourth one fails b/c we only set three times
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
+ // Unsetting maintenance mode changes our state to secondary if maintenance mode was
+ // the only thinking keeping us out of it.
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
+
+ // From rollback, entering and exiting maintenance mode doesn't change perceived
+ // state.
+ getReplCoord()->setFollowerMode(MemberState::RS_ROLLBACK);
+ ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
+ ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
+ ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
+ ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
+ ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
+
+ // Rollback is sticky even if entered while in maintenance mode.
+ getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
+ ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
+ ASSERT_TRUE(getReplCoord()->getMemberState().recovering());
+ getReplCoord()->setFollowerMode(MemberState::RS_ROLLBACK);
+ ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
+ ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
+ ASSERT_TRUE(getReplCoord()->getMemberState().rollback());
+ getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
+
+ // Can't modify maintenance mode when PRIMARY
+ simulateSuccessfulElection();
+
+ status = getReplCoord()->setMaintenanceMode(true);
+ ASSERT_EQUALS(ErrorCodes::NotSecondary, status);
+ ASSERT_TRUE(getReplCoord()->getMemberState().primary());
+
+ simulateStepDownOnIsolation();
+
+ status = getReplCoord()->setMaintenanceMode(false);
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, status);
+ ASSERT_OK(getReplCoord()->setMaintenanceMode(true));
+ ASSERT_OK(getReplCoord()->setMaintenanceMode(false));
+}
+
+TEST_F(ReplCoordTest, GetHostsWrittenToReplSet) {
+ HostAndPort myHost("node1:12345");
+ HostAndPort client1Host("node2:12345");
+ HostAndPort client2Host("node3:12345");
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host" << myHost.toString())
+ << BSON("_id" << 1 << "host" << client1Host.toString())
+ << BSON("_id" << 2 << "host" << client2Host.toString()))),
+ HostAndPort("node1", 12345));
+ OperationContextNoop txn;
+
+ OpTimeWithTermZero time1(100, 1);
+ OpTimeWithTermZero time2(100, 2);
+
+ getReplCoord()->setMyLastOptime(time2);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time1));
+
+ std::vector<HostAndPort> caughtUpHosts = getReplCoord()->getHostsWrittenTo(time2);
+ ASSERT_EQUALS(1U, caughtUpHosts.size());
+ ASSERT_EQUALS(myHost, caughtUpHosts[0]);
+
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time2));
+ caughtUpHosts = getReplCoord()->getHostsWrittenTo(time2);
+ ASSERT_EQUALS(2U, caughtUpHosts.size());
+ if (myHost == caughtUpHosts[0]) {
+ ASSERT_EQUALS(client2Host, caughtUpHosts[1]);
+ } else {
+ ASSERT_EQUALS(client2Host, caughtUpHosts[0]);
+ ASSERT_EQUALS(myHost, caughtUpHosts[1]);
+ }
+}
+
+TEST_F(ReplCoordTest, GetHostsWrittenToMasterSlave) {
+ ReplSettings settings;
+ settings.master = true;
+ init(settings);
+ HostAndPort clientHost("node2:12345");
+ OperationContextNoop txn;
+
+ OID client = OID::gen();
+ OpTimeWithTermZero time1(100, 1);
+ OpTimeWithTermZero time2(100, 2);
+
+ getExternalState()->setClientHostAndPort(clientHost);
+ HandshakeArgs handshake;
+ ASSERT_OK(handshake.initialize(BSON("handshake" << client)));
+ ASSERT_OK(getReplCoord()->processHandshake(&txn, handshake));
+
+ getReplCoord()->setMyLastOptime(time2);
+ ASSERT_OK(getReplCoord()->setLastOptimeForSlave(client, time1.timestamp));
+
+ std::vector<HostAndPort> caughtUpHosts = getReplCoord()->getHostsWrittenTo(time2);
+ ASSERT_EQUALS(0U, caughtUpHosts.size()); // self doesn't get included in master-slave
+
+ ASSERT_OK(getReplCoord()->setLastOptimeForSlave(client, time2.timestamp));
+ caughtUpHosts = getReplCoord()->getHostsWrittenTo(time2);
+ ASSERT_EQUALS(1U, caughtUpHosts.size());
+ ASSERT_EQUALS(clientHost, caughtUpHosts[0]);
+}
+
+TEST_F(ReplCoordTest, GetOtherNodesInReplSetNoConfig) {
+ start();
+ ASSERT_EQUALS(0U, getReplCoord()->getOtherNodesInReplSet().size());
+}
+
+TEST_F(ReplCoordTest, GetOtherNodesInReplSet) {
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "h1")
+ << BSON("_id" << 1 << "host"
+ << "h2")
+ << BSON("_id" << 2 << "host"
+ << "h3"
+ << "priority" << 0 << "hidden" << true))),
+ HostAndPort("h1"));
+
+ std::vector<HostAndPort> otherNodes = getReplCoord()->getOtherNodesInReplSet();
+ ASSERT_EQUALS(2U, otherNodes.size());
+ if (otherNodes[0] == HostAndPort("h2")) {
+ ASSERT_EQUALS(HostAndPort("h3"), otherNodes[1]);
+ } else {
+ ASSERT_EQUALS(HostAndPort("h3"), otherNodes[0]);
+ ASSERT_EQUALS(HostAndPort("h2"), otherNodes[0]);
+ }
+}
+
+TEST_F(ReplCoordTest, IsMasterNoConfig) {
+ start();
+ IsMasterResponse response;
+
+ getReplCoord()->fillIsMasterForReplSet(&response);
+ ASSERT_FALSE(response.isConfigSet());
+ BSONObj responseObj = response.toBSON();
+ ASSERT_FALSE(responseObj["ismaster"].Bool());
+ ASSERT_FALSE(responseObj["secondary"].Bool());
+ ASSERT_TRUE(responseObj["isreplicaset"].Bool());
+ ASSERT_EQUALS("Does not have a valid replica set config", responseObj["info"].String());
+
+ IsMasterResponse roundTripped;
+ ASSERT_OK(roundTripped.initialize(response.toBSON()));
+}
+
+TEST_F(ReplCoordTest, IsMaster) {
+ HostAndPort h1("h1");
+ HostAndPort h2("h2");
+ HostAndPort h3("h3");
+ HostAndPort h4("h4");
+ assertStartSuccess(
+ BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host" << h1.toString())
+ << BSON("_id" << 1 << "host" << h2.toString())
+ << BSON("_id" << 2 << "host" << h3.toString() << "arbiterOnly" << true)
+ << BSON("_id" << 3 << "host" << h4.toString() << "priority" << 0
+ << "tags" << BSON("key1"
+ << "value1"
+ << "key2"
+ << "value2")))),
+ h4);
+ getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
+ ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
+
+ IsMasterResponse response;
+ getReplCoord()->fillIsMasterForReplSet(&response);
+
+ ASSERT_EQUALS("mySet", response.getReplSetName());
+ ASSERT_EQUALS(2, response.getReplSetVersion());
+ ASSERT_FALSE(response.isMaster());
+ ASSERT_TRUE(response.isSecondary());
+ // TODO(spencer): test that response includes current primary when there is one.
+ ASSERT_FALSE(response.isArbiterOnly());
+ ASSERT_TRUE(response.isPassive());
+ ASSERT_FALSE(response.isHidden());
+ ASSERT_TRUE(response.shouldBuildIndexes());
+ ASSERT_EQUALS(Seconds(0), response.getSlaveDelay());
+ ASSERT_EQUALS(h4, response.getMe());
+
+ std::vector<HostAndPort> hosts = response.getHosts();
+ ASSERT_EQUALS(2U, hosts.size());
+ if (hosts[0] == h1) {
+ ASSERT_EQUALS(h2, hosts[1]);
+ } else {
+ ASSERT_EQUALS(h2, hosts[0]);
+ ASSERT_EQUALS(h1, hosts[1]);
+ }
+ std::vector<HostAndPort> passives = response.getPassives();
+ ASSERT_EQUALS(1U, passives.size());
+ ASSERT_EQUALS(h4, passives[0]);
+ std::vector<HostAndPort> arbiters = response.getArbiters();
+ ASSERT_EQUALS(1U, arbiters.size());
+ ASSERT_EQUALS(h3, arbiters[0]);
+
+ unordered_map<std::string, std::string> tags = response.getTags();
+ ASSERT_EQUALS(2U, tags.size());
+ ASSERT_EQUALS("value1", tags["key1"]);
+ ASSERT_EQUALS("value2", tags["key2"]);
+
+ IsMasterResponse roundTripped;
+ ASSERT_OK(roundTripped.initialize(response.toBSON()));
+}
+
+TEST_F(ReplCoordTest, ShutDownBeforeStartUpFinished) {
+ init();
+ startCapturingLogMessages();
+ getReplCoord()->shutdown();
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1,
+ countLogLinesContaining("shutdown() called before startReplication() finished"));
+}
+
+TEST_F(ReplCoordTest, UpdatePositionWithConfigVersionAndMemberIdTest) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
+ simulateSuccessfulElection();
+
+ OpTimeWithTermZero time1(100, 1);
+ OpTimeWithTermZero time2(100, 2);
+ OpTimeWithTermZero staleTime(10, 0);
+ getReplCoord()->setMyLastOptime(time1);
+
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = WriteConcernOptions::kNoWaiting;
+ writeConcern.wNumNodes = 1;
+
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
+ getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
+
+ // receive updatePosition containing ourself, should not process the update for self
+ UpdatePositionArgs args;
+ ASSERT_OK(args.initialize(BSON("replSetUpdatePosition"
+ << 1 << "optimes"
+ << BSON_ARRAY(BSON("cfgver" << 2 << "memberId" << 0 << "optime"
+ << time2.timestamp)))));
+
+ ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
+ getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
+
+ // receive updatePosition with incorrect config version
+ UpdatePositionArgs args2;
+ ASSERT_OK(args2.initialize(BSON("replSetUpdatePosition"
+ << 1 << "optimes"
+ << BSON_ARRAY(BSON("cfgver" << 3 << "memberId" << 1 << "optime"
+ << time2.timestamp)))));
+
+ long long cfgver;
+ ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
+ getReplCoord()->processReplSetUpdatePosition(args2, &cfgver));
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
+ getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
+
+ // receive updatePosition with nonexistent member id
+ UpdatePositionArgs args3;
+ ASSERT_OK(args3.initialize(BSON("replSetUpdatePosition"
+ << 1 << "optimes"
+ << BSON_ARRAY(BSON("cfgver" << 2 << "memberId" << 9 << "optime"
+ << time2.timestamp)))));
+
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound, getReplCoord()->processReplSetUpdatePosition(args3, 0));
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
+ getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
+
+ // receive a good update position
+ getReplCoord()->setMyLastOptime(time2);
+ UpdatePositionArgs args4;
+ ASSERT_OK(args4.initialize(
+ BSON("replSetUpdatePosition"
+ << 1 << "optimes"
+ << BSON_ARRAY(
+ BSON("cfgver" << 2 << "memberId" << 1 << "optime" << time2.timestamp)
+ << BSON("cfgver" << 2 << "memberId" << 2 << "optime" << time2.timestamp)))));
+
+ ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args4, 0));
+ ASSERT_OK(getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
+
+ writeConcern.wNumNodes = 3;
+ ASSERT_OK(getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
+}
+
+void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
+ OperationContextNoop txn;
+ BSONObjBuilder garbage;
+ ReplSetReconfigArgs args;
+ args.force = false;
+ args.newConfigObj = BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345"
+ << "priority" << 3)
+ << BSON("_id" << 1 << "host"
+ << "node2:12345")
+ << BSON("_id" << 2 << "host"
+ << "node3:12345")));
+ *status = replCoord->processReplSetReconfig(&txn, args, &garbage);
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationReconfigSimple) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 2));
+ simulateSuccessfulElection();
+
+ OpTimeWithTermZero time(100, 2);
+
+ // 3 nodes waiting for time
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
+ writeConcern.wNumNodes = 3;
+
+ ReplicationAwaiter awaiter(getReplCoord(), &txn);
+ awaiter.setOpTime(time);
+ awaiter.setWriteConcern(writeConcern);
+ awaiter.start(&txn);
+
+ // reconfig
+ Status status(ErrorCodes::InternalError, "Not Set");
+ stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
+
+ NetworkInterfaceMock* net = getNet();
+ getNet()->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ repl::ReplSetHeartbeatArgs hbArgs;
+ ASSERT_OK(hbArgs.initialize(request.cmdObj));
+ repl::ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName("mySet");
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(2);
+ BSONObjBuilder respObj;
+ respObj << "ok" << 1;
+ hbResp.addToBSON(&respObj, false);
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
+ net->runReadyNetworkOperations();
+ getNet()->exitNetwork();
+ reconfigThread.join();
+ ASSERT_OK(status);
+
+ // satisfy write concern
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(3, 0, time));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(3, 1, time));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(3, 2, time));
+ ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
+ ASSERT_OK(statusAndDur.status);
+ awaiter.reset();
+}
+
+void doReplSetReconfigToFewer(ReplicationCoordinatorImpl* replCoord, Status* status) {
+ OperationContextNoop txn;
+ BSONObjBuilder garbage;
+ ReplSetReconfigArgs args;
+ args.force = false;
+ args.newConfigObj = BSON("_id"
+ << "mySet"
+ << "version" << 3 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node3:12345")));
+ *status = replCoord->processReplSetReconfig(&txn, args, &garbage);
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationReconfigNodeCountExceedsNumberOfNodes) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 2));
+ simulateSuccessfulElection();
+
+ OpTimeWithTermZero time(100, 2);
+
+ // 3 nodes waiting for time
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
+ writeConcern.wNumNodes = 3;
+
+ ReplicationAwaiter awaiter(getReplCoord(), &txn);
+ awaiter.setOpTime(time);
+ awaiter.setWriteConcern(writeConcern);
+ awaiter.start(&txn);
+
+ // reconfig to fewer nodes
+ Status status(ErrorCodes::InternalError, "Not Set");
+ stdx::thread reconfigThread(stdx::bind(doReplSetReconfigToFewer, getReplCoord(), &status));
+
+ NetworkInterfaceMock* net = getNet();
+ getNet()->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ repl::ReplSetHeartbeatArgs hbArgs;
+ ASSERT_OK(hbArgs.initialize(request.cmdObj));
+ repl::ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName("mySet");
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(2);
+ BSONObjBuilder respObj;
+ respObj << "ok" << 1;
+ hbResp.addToBSON(&respObj, false);
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
+ net->runReadyNetworkOperations();
+ getNet()->exitNetwork();
+ reconfigThread.join();
+ ASSERT_OK(status);
+ std::cout << "asdf" << std::endl;
+
+ // writeconcern feasability should be reevaluated and an error should be returned
+ ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
+ ASSERT_EQUALS(ErrorCodes::CannotSatisfyWriteConcern, statusAndDur.status);
+ awaiter.reset();
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationReconfigToSmallerMajority) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3) << BSON("host"
+ << "node5:12345"
+ << "_id" << 4))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 1));
+ simulateSuccessfulElection();
+
+ OpTimeWithTermZero time(100, 2);
+
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time));
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time));
+
+
+ // majority nodes waiting for time
+ WriteConcernOptions writeConcern;
+ writeConcern.wTimeout = WriteConcernOptions::kNoTimeout;
+ writeConcern.wMode = WriteConcernOptions::kMajority;
+
+ ReplicationAwaiter awaiter(getReplCoord(), &txn);
+ awaiter.setOpTime(time);
+ awaiter.setWriteConcern(writeConcern);
+ awaiter.start(&txn);
+
+ // demonstrate that majority cannot currently be satisfied
+ WriteConcernOptions writeConcern2;
+ writeConcern2.wTimeout = WriteConcernOptions::kNoWaiting;
+ writeConcern2.wMode = WriteConcernOptions::kMajority;
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
+ getReplCoord()->awaitReplication(&txn, time, writeConcern2).status);
+
+ // reconfig to three nodes
+ Status status(ErrorCodes::InternalError, "Not Set");
+ stdx::thread reconfigThread(stdx::bind(doReplSetReconfig, getReplCoord(), &status));
+
+ NetworkInterfaceMock* net = getNet();
+ getNet()->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ repl::ReplSetHeartbeatArgs hbArgs;
+ ASSERT_OK(hbArgs.initialize(request.cmdObj));
+ repl::ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName("mySet");
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(2);
+ BSONObjBuilder respObj;
+ respObj << "ok" << 1;
+ hbResp.addToBSON(&respObj, false);
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
+ net->runReadyNetworkOperations();
+ getNet()->exitNetwork();
+ reconfigThread.join();
+ ASSERT_OK(status);
+
+ // writeconcern feasability should be reevaluated and be satisfied
+ ReplicationCoordinator::StatusAndDuration statusAndDur = awaiter.getResult();
+ ASSERT_OK(statusAndDur.status);
+ awaiter.reset();
+}
+
+TEST_F(ReplCoordTest, AwaitReplicationMajority) {
+ // Test that we can satisfy majority write concern can only be
+ // statisfied by voting data-bearing members.
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3 << "votes" << 0)
+ << BSON("host"
+ << "node5:12345"
+ << "_id" << 4 << "arbiterOnly" << true))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ OpTimeWithTermZero time(100, 0);
+ getReplCoord()->setMyLastOptime(time);
+ simulateSuccessfulElection();
+
+ WriteConcernOptions majorityWriteConcern;
+ majorityWriteConcern.wTimeout = WriteConcernOptions::kNoWaiting;
+ majorityWriteConcern.wMode = WriteConcernOptions::kMajority;
+
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
+ getReplCoord()->awaitReplication(&txn, time, majorityWriteConcern).status);
+
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time));
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
+ getReplCoord()->awaitReplication(&txn, time, majorityWriteConcern).status);
+
+ // this member does not vote and as a result should not count towards write concern
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 3, time));
+ ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
+ getReplCoord()->awaitReplication(&txn, time, majorityWriteConcern).status);
+
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time));
+ ASSERT_OK(getReplCoord()->awaitReplication(&txn, time, majorityWriteConcern).status);
+}
+
+TEST_F(ReplCoordTest, LastCommittedOpTime) {
+ // Test that the commit level advances properly.
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id" << 1) << BSON("host"
+ << "node3:12345"
+ << "_id" << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id" << 3 << "votes" << 0)
+ << BSON("host"
+ << "node5:12345"
+ << "_id" << 4 << "arbiterOnly" << true))),
+ HostAndPort("node1", 12345));
+ ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
+ OpTimeWithTermZero zero(0, 0);
+ OpTimeWithTermZero time(100, 0);
+ getReplCoord()->setMyLastOptime(time);
+ simulateSuccessfulElection();
+
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, time));
+ ASSERT_EQUALS((OpTime)zero, getReplCoord()->getLastCommittedOpTime());
+
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 3, time));
+ ASSERT_EQUALS((OpTime)zero, getReplCoord()->getLastCommittedOpTime());
+
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, time));
+ ASSERT_EQUALS((OpTime)time, getReplCoord()->getLastCommittedOpTime());
+
+
+ // Set a new, later OpTime.
+ OpTimeWithTermZero newTime = OpTimeWithTermZero(100, 1);
+ getReplCoord()->setMyLastOptime(newTime);
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 3, newTime));
+ ASSERT_EQUALS((OpTime)time, getReplCoord()->getLastCommittedOpTime());
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 2, newTime));
+ // Reached majority of voting nodes with newTime.
+ ASSERT_EQUALS((OpTime)newTime, getReplCoord()->getLastCommittedOpTime());
+ ASSERT_OK(getReplCoord()->setLastOptime_forTest(2, 1, newTime));
+ ASSERT_EQUALS((OpTime)newTime, getReplCoord()->getLastCommittedOpTime());
+}
+
+TEST_F(ReplCoordTest, CantUseReadAfterIfNotReplSet) {
+ init(ReplSettings());
+ OperationContextNoop txn;
+ auto result =
+ getReplCoord()->waitUntilOpTime(&txn, ReadAfterOpTimeArgs(OpTimeWithTermZero(50, 0)));
+
+ ASSERT_FALSE(result.didWait());
+ ASSERT_EQUALS(ErrorCodes::NotAReplicaSet, result.getStatus());
+}
+
+TEST_F(ReplCoordTest, ReadAfterWhileShutdown) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0))),
+ HostAndPort("node1", 12345));
+
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(10, 0));
+
+ shutdown();
+
+ auto result =
+ getReplCoord()->waitUntilOpTime(&txn, ReadAfterOpTimeArgs(OpTimeWithTermZero(50, 0)));
+
+ ASSERT_TRUE(result.didWait());
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, result.getStatus());
+}
+
+TEST_F(ReplCoordTest, ReadAfterInterrupted) {
+ OperationContextReplMock txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0))),
+ HostAndPort("node1", 12345));
+
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(10, 0));
+
+ txn.setCheckForInterruptStatus(Status(ErrorCodes::Interrupted, "test"));
+
+ auto result =
+ getReplCoord()->waitUntilOpTime(&txn, ReadAfterOpTimeArgs(OpTimeWithTermZero(50, 0)));
+
+ ASSERT_TRUE(result.didWait());
+ ASSERT_EQUALS(ErrorCodes::Interrupted, result.getStatus());
+}
+
+TEST_F(ReplCoordTest, ReadAfterNoOpTime) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0))),
+ HostAndPort("node1", 12345));
+
+ auto result = getReplCoord()->waitUntilOpTime(&txn, ReadAfterOpTimeArgs());
+
+ ASSERT_FALSE(result.didWait());
+ ASSERT_OK(result.getStatus());
+}
+
+TEST_F(ReplCoordTest, ReadAfterGreaterOpTime) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0))),
+ HostAndPort("node1", 12345));
+
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(100, 0));
+ auto result =
+ getReplCoord()->waitUntilOpTime(&txn, ReadAfterOpTimeArgs(OpTimeWithTermZero(50, 0)));
+
+ ASSERT_TRUE(result.didWait());
+ ASSERT_OK(result.getStatus());
+}
+
+TEST_F(ReplCoordTest, ReadAfterEqualOpTime) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0))),
+ HostAndPort("node1", 12345));
+
+
+ OpTimeWithTermZero time(100, 0);
+ getReplCoord()->setMyLastOptime(time);
+ auto result = getReplCoord()->waitUntilOpTime(&txn, ReadAfterOpTimeArgs(time));
+
+ ASSERT_TRUE(result.didWait());
+ ASSERT_OK(result.getStatus());
+}
+
+TEST_F(ReplCoordTest, ReadAfterDeferredGreaterOpTime) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0))),
+ HostAndPort("node1", 12345));
+
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(0, 0));
+
+ auto pseudoLogOp = std::async(std::launch::async,
+ [this]() {
+ // Not guaranteed to be scheduled after waitUnitl blocks...
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(200, 0));
+ });
+
+ auto result =
+ getReplCoord()->waitUntilOpTime(&txn, ReadAfterOpTimeArgs(OpTimeWithTermZero(100, 0)));
+ pseudoLogOp.get();
+
+ ASSERT_TRUE(result.didWait());
+ ASSERT_OK(result.getStatus());
+}
+
+TEST_F(ReplCoordTest, ReadAfterDeferredEqualOpTime) {
+ OperationContextNoop txn;
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id" << 0))),
+ HostAndPort("node1", 12345));
+
+ getReplCoord()->setMyLastOptime(OpTimeWithTermZero(0, 0));
+
+ OpTimeWithTermZero opTimeToWait(100, 0);
+
+ auto pseudoLogOp = std::async(std::launch::async,
+ [this, &opTimeToWait]() {
+ // Not guaranteed to be scheduled after waitUnitl blocks...
+ getReplCoord()->setMyLastOptime(opTimeToWait);
+ });
+
+ auto result = getReplCoord()->waitUntilOpTime(&txn, ReadAfterOpTimeArgs(opTimeToWait));
+ pseudoLogOp.get();
+
+ ASSERT_TRUE(result.didWait());
+ ASSERT_OK(result.getStatus());
+}
+
+// TODO(schwerin): Unit test election id updating
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp
index 9d739707925..19ee9730cae 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_mock.cpp
@@ -41,315 +41,312 @@
namespace mongo {
namespace repl {
- using std::vector;
+using std::vector;
+
+ReplicationCoordinatorMock::ReplicationCoordinatorMock(const ReplSettings& settings)
+ : _settings(settings) {}
+ReplicationCoordinatorMock::~ReplicationCoordinatorMock() {}
+
+void ReplicationCoordinatorMock::startReplication(OperationContext* txn) {
+ // TODO
+}
+
+void ReplicationCoordinatorMock::shutdown() {
+ // TODO
+}
+
+const ReplSettings& ReplicationCoordinatorMock::getSettings() const {
+ return _settings;
+}
+
+bool ReplicationCoordinatorMock::isReplEnabled() const {
+ return _settings.usingReplSets() || _settings.master || _settings.slave;
+}
+
+ReplicationCoordinator::Mode ReplicationCoordinatorMock::getReplicationMode() const {
+ if (_settings.usingReplSets()) {
+ return modeReplSet;
+ }
+ if (_settings.master || _settings.slave) {
+ return modeMasterSlave;
+ }
+ return modeNone;
+}
+
+MemberState ReplicationCoordinatorMock::getMemberState() const {
+ return _memberState;
+}
+
+bool ReplicationCoordinatorMock::isInPrimaryOrSecondaryState() const {
+ invariant(false);
+}
+
+Seconds ReplicationCoordinatorMock::getSlaveDelaySecs() const {
+ return Seconds(0);
+}
+
+void ReplicationCoordinatorMock::clearSyncSourceBlacklist() {}
+
+ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorMock::awaitReplication(
+ OperationContext* txn, const OpTime& opTime, const WriteConcernOptions& writeConcern) {
+ // TODO
+ return StatusAndDuration(Status::OK(), Milliseconds(0));
+}
+
+ReplicationCoordinator::StatusAndDuration
+ReplicationCoordinatorMock::awaitReplicationOfLastOpForClient(
+ OperationContext* txn, const WriteConcernOptions& writeConcern) {
+ return StatusAndDuration(Status::OK(), Milliseconds(0));
+}
+
+Status ReplicationCoordinatorMock::stepDown(OperationContext* txn,
+ bool force,
+ const Milliseconds& waitTime,
+ const Milliseconds& stepdownTime) {
+ return Status::OK();
+}
- ReplicationCoordinatorMock::ReplicationCoordinatorMock(const ReplSettings& settings) :
- _settings(settings) {}
- ReplicationCoordinatorMock::~ReplicationCoordinatorMock() {}
+bool ReplicationCoordinatorMock::isMasterForReportingPurposes() {
+ // TODO
+ return true;
+}
- void ReplicationCoordinatorMock::startReplication(OperationContext* txn) {
- // TODO
- }
-
- void ReplicationCoordinatorMock::shutdown() {
- // TODO
- }
-
- const ReplSettings& ReplicationCoordinatorMock::getSettings() const {
- return _settings;
- }
-
- bool ReplicationCoordinatorMock::isReplEnabled() const {
- return _settings.usingReplSets() || _settings.master || _settings.slave;
- }
-
- ReplicationCoordinator::Mode ReplicationCoordinatorMock::getReplicationMode() const {
- if (_settings.usingReplSets()) {
- return modeReplSet;
- }
- if (_settings.master || _settings.slave) {
- return modeMasterSlave;
- }
- return modeNone;
- }
-
- MemberState ReplicationCoordinatorMock::getMemberState() const {
- return _memberState;
- }
-
- bool ReplicationCoordinatorMock::isInPrimaryOrSecondaryState() const {
- invariant(false);
- }
-
- Seconds ReplicationCoordinatorMock::getSlaveDelaySecs() const {
- return Seconds(0);
- }
-
- void ReplicationCoordinatorMock::clearSyncSourceBlacklist() {}
-
- ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorMock::awaitReplication(
- OperationContext* txn,
- const OpTime& opTime,
- const WriteConcernOptions& writeConcern) {
- // TODO
- return StatusAndDuration(Status::OK(), Milliseconds(0));
- }
-
- ReplicationCoordinator::StatusAndDuration
- ReplicationCoordinatorMock::awaitReplicationOfLastOpForClient(
- OperationContext* txn,
- const WriteConcernOptions& writeConcern) {
- return StatusAndDuration(Status::OK(), Milliseconds(0));
- }
-
- Status ReplicationCoordinatorMock::stepDown(OperationContext* txn,
- bool force,
- const Milliseconds& waitTime,
- const Milliseconds& stepdownTime) {
- return Status::OK();
- }
-
- bool ReplicationCoordinatorMock::isMasterForReportingPurposes() {
- // TODO
- return true;
- }
-
- bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(StringData dbName) {
- // TODO
- return true;
- }
-
- bool ReplicationCoordinatorMock::canAcceptWritesFor(const NamespaceString& ns) {
- // TODO
- return canAcceptWritesForDatabase(ns.db());
- }
-
- Status ReplicationCoordinatorMock::checkCanServeReadsFor(OperationContext* txn,
- const NamespaceString& ns,
- bool slaveOk) {
- // TODO
- return Status::OK();
- }
-
- bool ReplicationCoordinatorMock::shouldIgnoreUniqueIndex(const IndexDescriptor* idx) {
- // TODO
- return false;
- }
-
- Status ReplicationCoordinatorMock::setLastOptimeForSlave(const OID& rid, const Timestamp& ts) {
- return Status::OK();
- }
-
- void ReplicationCoordinatorMock::setMyHeartbeatMessage(const std::string& msg) {
- // TODO
- }
-
- void ReplicationCoordinatorMock::setMyLastOptime(const OpTime& opTime) {
- _myLastOpTime = opTime;
- }
-
- void ReplicationCoordinatorMock::resetMyLastOptime() {
- _myLastOpTime = OpTime();
- }
-
- OpTime ReplicationCoordinatorMock::getMyLastOptime() const {
- return _myLastOpTime;
- }
-
- ReadAfterOpTimeResponse ReplicationCoordinatorMock::waitUntilOpTime(
- OperationContext* txn,
- const ReadAfterOpTimeArgs& settings) {
- return ReadAfterOpTimeResponse();
- }
-
-
- OID ReplicationCoordinatorMock::getElectionId() {
- // TODO
- return OID();
- }
-
- OID ReplicationCoordinatorMock::getMyRID() const {
- return OID();
- }
-
- int ReplicationCoordinatorMock::getMyId() const {
- return 0;
- }
-
- bool ReplicationCoordinatorMock::setFollowerMode(const MemberState& newState) {
- _memberState = newState;
- return true;
- }
-
- bool ReplicationCoordinatorMock::isWaitingForApplierToDrain() {
- return false;
- }
-
- void ReplicationCoordinatorMock::signalDrainComplete(OperationContext*) {}
-
- void ReplicationCoordinatorMock::signalUpstreamUpdater() {}
-
- bool ReplicationCoordinatorMock::prepareReplSetUpdatePositionCommand(
- BSONObjBuilder* cmdBuilder) {
- cmdBuilder->append("replSetUpdatePosition", 1);
- return true;
- }
-
- ReplicaSetConfig ReplicationCoordinatorMock::getConfig() const {
- return ReplicaSetConfig();
- }
-
- void ReplicationCoordinatorMock::processReplSetGetConfig(BSONObjBuilder* result) {
- // TODO
- }
-
- Status ReplicationCoordinatorMock::processReplSetGetStatus(BSONObjBuilder* result) {
- return Status::OK();
- }
-
- void ReplicationCoordinatorMock::fillIsMasterForReplSet(IsMasterResponse* result) {}
-
- void ReplicationCoordinatorMock::appendSlaveInfoData(BSONObjBuilder* result) {}
-
- Status ReplicationCoordinatorMock::setMaintenanceMode(bool activate) {
- return Status::OK();
- }
-
- bool ReplicationCoordinatorMock::getMaintenanceMode() {
- return false;
- }
-
- Status ReplicationCoordinatorMock::processReplSetSyncFrom(const HostAndPort& target,
- BSONObjBuilder* resultObj) {
- // TODO
- return Status::OK();
- }
-
- Status ReplicationCoordinatorMock::processReplSetFreeze(int secs, BSONObjBuilder* resultObj) {
- // TODO
- return Status::OK();
- }
-
- Status ReplicationCoordinatorMock::processHeartbeat(const ReplSetHeartbeatArgs& args,
- ReplSetHeartbeatResponse* response) {
- return Status::OK();
- }
-
- Status ReplicationCoordinatorMock::processReplSetReconfig(OperationContext* txn,
- const ReplSetReconfigArgs& args,
- BSONObjBuilder* resultObj) {
- return Status::OK();
- }
-
- Status ReplicationCoordinatorMock::processReplSetInitiate(OperationContext* txn,
- const BSONObj& configObj,
- BSONObjBuilder* resultObj) {
- return Status::OK();
- }
-
- Status ReplicationCoordinatorMock::processReplSetGetRBID(BSONObjBuilder* resultObj) {
- return Status::OK();
- }
-
- void ReplicationCoordinatorMock::incrementRollbackID() {}
-
- Status ReplicationCoordinatorMock::processReplSetFresh(const ReplSetFreshArgs& args,
- BSONObjBuilder* resultObj) {
- return Status::OK();
- }
-
- Status ReplicationCoordinatorMock::processReplSetElect(const ReplSetElectArgs& args,
- BSONObjBuilder* resultObj) {
- // TODO
- return Status::OK();
- }
-
- Status ReplicationCoordinatorMock::processReplSetUpdatePosition(
- const UpdatePositionArgs& updates, long long* configVersion) {
- // TODO
- return Status::OK();
- }
-
- Status ReplicationCoordinatorMock::processHandshake(OperationContext* txn,
- const HandshakeArgs& handshake) {
- return Status::OK();
- }
-
- bool ReplicationCoordinatorMock::buildsIndexes() {
- // TODO
- return true;
- }
-
- std::vector<HostAndPort> ReplicationCoordinatorMock::getHostsWrittenTo(const OpTime& op) {
- return std::vector<HostAndPort>();
- }
-
- vector<HostAndPort> ReplicationCoordinatorMock::getOtherNodesInReplSet() const {
- return std::vector<HostAndPort>();
- }
-
- Status ReplicationCoordinatorMock::checkIfWriteConcernCanBeSatisfied(
- const WriteConcernOptions& writeConcern) const {
- return Status::OK();
- }
-
- WriteConcernOptions ReplicationCoordinatorMock::getGetLastErrorDefault() {
- return WriteConcernOptions();
- }
-
- Status ReplicationCoordinatorMock::checkReplEnabledForCommand(BSONObjBuilder* result) {
- // TODO
- return Status::OK();
- }
-
- HostAndPort ReplicationCoordinatorMock::chooseNewSyncSource() {
- return HostAndPort();
- }
-
- void ReplicationCoordinatorMock::blacklistSyncSource(const HostAndPort& host, Date_t until) {
- }
-
- void ReplicationCoordinatorMock::resetLastOpTimeFromOplog(OperationContext* txn) {
- invariant(false);
- }
-
- bool ReplicationCoordinatorMock::shouldChangeSyncSource(const HostAndPort& currentSource) {
- invariant(false);
- }
-
- OpTime ReplicationCoordinatorMock::getLastCommittedOpTime() const {
- return OpTime();
- }
-
- Status ReplicationCoordinatorMock::processReplSetRequestVotes(
- OperationContext* txn,
- const ReplSetRequestVotesArgs& args,
- ReplSetRequestVotesResponse* response) {
- return Status::OK();
- }
-
- Status ReplicationCoordinatorMock::processReplSetDeclareElectionWinner(
- const ReplSetDeclareElectionWinnerArgs& args,
- long long* responseTerm) {
- return Status::OK();
- }
-
- void ReplicationCoordinatorMock::prepareCursorResponseInfo(BSONObjBuilder* objBuilder) {}
-
- Status ReplicationCoordinatorMock::processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
- ReplSetHeartbeatResponse* response) {
- return Status::OK();
- }
-
- bool ReplicationCoordinatorMock::isV1ElectionProtocol() {
- return true;
- }
-
- void ReplicationCoordinatorMock::summarizeAsHtml(ReplSetHtmlSummary* output) {}
-
- long long ReplicationCoordinatorMock::getTerm() { return OpTime::kDefaultTerm; }
-
- bool ReplicationCoordinatorMock::updateTerm(long long term) { return false; }
+bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(StringData dbName) {
+ // TODO
+ return true;
+}
-} // namespace repl
-} // namespace mongo
+bool ReplicationCoordinatorMock::canAcceptWritesFor(const NamespaceString& ns) {
+ // TODO
+ return canAcceptWritesForDatabase(ns.db());
+}
+
+Status ReplicationCoordinatorMock::checkCanServeReadsFor(OperationContext* txn,
+ const NamespaceString& ns,
+ bool slaveOk) {
+ // TODO
+ return Status::OK();
+}
+
+bool ReplicationCoordinatorMock::shouldIgnoreUniqueIndex(const IndexDescriptor* idx) {
+ // TODO
+ return false;
+}
+
+Status ReplicationCoordinatorMock::setLastOptimeForSlave(const OID& rid, const Timestamp& ts) {
+ return Status::OK();
+}
+
+void ReplicationCoordinatorMock::setMyHeartbeatMessage(const std::string& msg) {
+ // TODO
+}
+
+void ReplicationCoordinatorMock::setMyLastOptime(const OpTime& opTime) {
+ _myLastOpTime = opTime;
+}
+
+void ReplicationCoordinatorMock::resetMyLastOptime() {
+ _myLastOpTime = OpTime();
+}
+
+OpTime ReplicationCoordinatorMock::getMyLastOptime() const {
+ return _myLastOpTime;
+}
+
+ReadAfterOpTimeResponse ReplicationCoordinatorMock::waitUntilOpTime(
+ OperationContext* txn, const ReadAfterOpTimeArgs& settings) {
+ return ReadAfterOpTimeResponse();
+}
+
+
+OID ReplicationCoordinatorMock::getElectionId() {
+ // TODO
+ return OID();
+}
+
+OID ReplicationCoordinatorMock::getMyRID() const {
+ return OID();
+}
+
+int ReplicationCoordinatorMock::getMyId() const {
+ return 0;
+}
+
+bool ReplicationCoordinatorMock::setFollowerMode(const MemberState& newState) {
+ _memberState = newState;
+ return true;
+}
+
+bool ReplicationCoordinatorMock::isWaitingForApplierToDrain() {
+ return false;
+}
+
+void ReplicationCoordinatorMock::signalDrainComplete(OperationContext*) {}
+
+void ReplicationCoordinatorMock::signalUpstreamUpdater() {}
+
+bool ReplicationCoordinatorMock::prepareReplSetUpdatePositionCommand(BSONObjBuilder* cmdBuilder) {
+ cmdBuilder->append("replSetUpdatePosition", 1);
+ return true;
+}
+
+ReplicaSetConfig ReplicationCoordinatorMock::getConfig() const {
+ return ReplicaSetConfig();
+}
+
+void ReplicationCoordinatorMock::processReplSetGetConfig(BSONObjBuilder* result) {
+ // TODO
+}
+
+Status ReplicationCoordinatorMock::processReplSetGetStatus(BSONObjBuilder* result) {
+ return Status::OK();
+}
+
+void ReplicationCoordinatorMock::fillIsMasterForReplSet(IsMasterResponse* result) {}
+
+void ReplicationCoordinatorMock::appendSlaveInfoData(BSONObjBuilder* result) {}
+
+Status ReplicationCoordinatorMock::setMaintenanceMode(bool activate) {
+ return Status::OK();
+}
+
+bool ReplicationCoordinatorMock::getMaintenanceMode() {
+ return false;
+}
+
+Status ReplicationCoordinatorMock::processReplSetSyncFrom(const HostAndPort& target,
+ BSONObjBuilder* resultObj) {
+ // TODO
+ return Status::OK();
+}
+
+Status ReplicationCoordinatorMock::processReplSetFreeze(int secs, BSONObjBuilder* resultObj) {
+ // TODO
+ return Status::OK();
+}
+
+Status ReplicationCoordinatorMock::processHeartbeat(const ReplSetHeartbeatArgs& args,
+ ReplSetHeartbeatResponse* response) {
+ return Status::OK();
+}
+
+Status ReplicationCoordinatorMock::processReplSetReconfig(OperationContext* txn,
+ const ReplSetReconfigArgs& args,
+ BSONObjBuilder* resultObj) {
+ return Status::OK();
+}
+
+Status ReplicationCoordinatorMock::processReplSetInitiate(OperationContext* txn,
+ const BSONObj& configObj,
+ BSONObjBuilder* resultObj) {
+ return Status::OK();
+}
+
+Status ReplicationCoordinatorMock::processReplSetGetRBID(BSONObjBuilder* resultObj) {
+ return Status::OK();
+}
+
+void ReplicationCoordinatorMock::incrementRollbackID() {}
+
+Status ReplicationCoordinatorMock::processReplSetFresh(const ReplSetFreshArgs& args,
+ BSONObjBuilder* resultObj) {
+ return Status::OK();
+}
+
+Status ReplicationCoordinatorMock::processReplSetElect(const ReplSetElectArgs& args,
+ BSONObjBuilder* resultObj) {
+ // TODO
+ return Status::OK();
+}
+
+Status ReplicationCoordinatorMock::processReplSetUpdatePosition(const UpdatePositionArgs& updates,
+ long long* configVersion) {
+ // TODO
+ return Status::OK();
+}
+
+Status ReplicationCoordinatorMock::processHandshake(OperationContext* txn,
+ const HandshakeArgs& handshake) {
+ return Status::OK();
+}
+
+bool ReplicationCoordinatorMock::buildsIndexes() {
+ // TODO
+ return true;
+}
+
+std::vector<HostAndPort> ReplicationCoordinatorMock::getHostsWrittenTo(const OpTime& op) {
+ return std::vector<HostAndPort>();
+}
+
+vector<HostAndPort> ReplicationCoordinatorMock::getOtherNodesInReplSet() const {
+ return std::vector<HostAndPort>();
+}
+
+Status ReplicationCoordinatorMock::checkIfWriteConcernCanBeSatisfied(
+ const WriteConcernOptions& writeConcern) const {
+ return Status::OK();
+}
+
+WriteConcernOptions ReplicationCoordinatorMock::getGetLastErrorDefault() {
+ return WriteConcernOptions();
+}
+
+Status ReplicationCoordinatorMock::checkReplEnabledForCommand(BSONObjBuilder* result) {
+ // TODO
+ return Status::OK();
+}
+
+HostAndPort ReplicationCoordinatorMock::chooseNewSyncSource() {
+ return HostAndPort();
+}
+
+void ReplicationCoordinatorMock::blacklistSyncSource(const HostAndPort& host, Date_t until) {}
+
+void ReplicationCoordinatorMock::resetLastOpTimeFromOplog(OperationContext* txn) {
+ invariant(false);
+}
+
+bool ReplicationCoordinatorMock::shouldChangeSyncSource(const HostAndPort& currentSource) {
+ invariant(false);
+}
+
+OpTime ReplicationCoordinatorMock::getLastCommittedOpTime() const {
+ return OpTime();
+}
+
+Status ReplicationCoordinatorMock::processReplSetRequestVotes(
+ OperationContext* txn,
+ const ReplSetRequestVotesArgs& args,
+ ReplSetRequestVotesResponse* response) {
+ return Status::OK();
+}
+
+Status ReplicationCoordinatorMock::processReplSetDeclareElectionWinner(
+ const ReplSetDeclareElectionWinnerArgs& args, long long* responseTerm) {
+ return Status::OK();
+}
+
+void ReplicationCoordinatorMock::prepareCursorResponseInfo(BSONObjBuilder* objBuilder) {}
+
+Status ReplicationCoordinatorMock::processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
+ ReplSetHeartbeatResponse* response) {
+ return Status::OK();
+}
+
+bool ReplicationCoordinatorMock::isV1ElectionProtocol() {
+ return true;
+}
+
+void ReplicationCoordinatorMock::summarizeAsHtml(ReplSetHtmlSummary* output) {}
+
+long long ReplicationCoordinatorMock::getTerm() {
+ return OpTime::kDefaultTerm;
+}
+
+bool ReplicationCoordinatorMock::updateTerm(long long term) {
+ return false;
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_mock.h b/src/mongo/db/repl/replication_coordinator_mock.h
index 2f404b3d157..2856878bd6f 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.h
+++ b/src/mongo/db/repl/replication_coordinator_mock.h
@@ -35,187 +35,176 @@
namespace mongo {
namespace repl {
- /**
- * A mock ReplicationCoordinator. Currently it is extremely simple and exists solely to link
- * into dbtests.
- */
- class ReplicationCoordinatorMock : public ReplicationCoordinator {
- MONGO_DISALLOW_COPYING(ReplicationCoordinatorMock);
-
- public:
-
- ReplicationCoordinatorMock(const ReplSettings& settings);
- virtual ~ReplicationCoordinatorMock();
+/**
+ * A mock ReplicationCoordinator. Currently it is extremely simple and exists solely to link
+ * into dbtests.
+ */
+class ReplicationCoordinatorMock : public ReplicationCoordinator {
+ MONGO_DISALLOW_COPYING(ReplicationCoordinatorMock);
- virtual void startReplication(OperationContext* txn);
+public:
+ ReplicationCoordinatorMock(const ReplSettings& settings);
+ virtual ~ReplicationCoordinatorMock();
- virtual void shutdown();
+ virtual void startReplication(OperationContext* txn);
- virtual const ReplSettings& getSettings() const;
+ virtual void shutdown();
- virtual bool isReplEnabled() const;
+ virtual const ReplSettings& getSettings() const;
- virtual Mode getReplicationMode() const;
+ virtual bool isReplEnabled() const;
- virtual MemberState getMemberState() const;
+ virtual Mode getReplicationMode() const;
- virtual bool isInPrimaryOrSecondaryState() const;
+ virtual MemberState getMemberState() const;
- virtual Seconds getSlaveDelaySecs() const;
+ virtual bool isInPrimaryOrSecondaryState() const;
- virtual void clearSyncSourceBlacklist();
+ virtual Seconds getSlaveDelaySecs() const;
- virtual ReplicationCoordinator::StatusAndDuration awaitReplication(
- OperationContext* txn,
- const OpTime& opTime,
- const WriteConcernOptions& writeConcern);
+ virtual void clearSyncSourceBlacklist();
- virtual ReplicationCoordinator::StatusAndDuration awaitReplicationOfLastOpForClient(
- OperationContext* txn,
- const WriteConcernOptions& writeConcern);
+ virtual ReplicationCoordinator::StatusAndDuration awaitReplication(
+ OperationContext* txn, const OpTime& opTime, const WriteConcernOptions& writeConcern);
- virtual Status stepDown(OperationContext* txn,
- bool force,
- const Milliseconds& waitTime,
- const Milliseconds& stepdownTime);
+ virtual ReplicationCoordinator::StatusAndDuration awaitReplicationOfLastOpForClient(
+ OperationContext* txn, const WriteConcernOptions& writeConcern);
- virtual bool isMasterForReportingPurposes();
+ virtual Status stepDown(OperationContext* txn,
+ bool force,
+ const Milliseconds& waitTime,
+ const Milliseconds& stepdownTime);
- virtual bool canAcceptWritesForDatabase(StringData dbName);
+ virtual bool isMasterForReportingPurposes();
- bool canAcceptWritesFor(const NamespaceString& ns) override;
+ virtual bool canAcceptWritesForDatabase(StringData dbName);
- virtual Status checkIfWriteConcernCanBeSatisfied(
- const WriteConcernOptions& writeConcern) const;
+ bool canAcceptWritesFor(const NamespaceString& ns) override;
- virtual Status checkCanServeReadsFor(OperationContext* txn,
- const NamespaceString& ns,
- bool slaveOk);
+ virtual Status checkIfWriteConcernCanBeSatisfied(const WriteConcernOptions& writeConcern) const;
- virtual bool shouldIgnoreUniqueIndex(const IndexDescriptor* idx);
+ virtual Status checkCanServeReadsFor(OperationContext* txn,
+ const NamespaceString& ns,
+ bool slaveOk);
- virtual Status setLastOptimeForSlave(const OID& rid, const Timestamp& ts);
+ virtual bool shouldIgnoreUniqueIndex(const IndexDescriptor* idx);
- virtual void setMyLastOptime(const OpTime& opTime);
+ virtual Status setLastOptimeForSlave(const OID& rid, const Timestamp& ts);
- virtual void resetMyLastOptime();
+ virtual void setMyLastOptime(const OpTime& opTime);
- virtual void setMyHeartbeatMessage(const std::string& msg);
+ virtual void resetMyLastOptime();
- virtual OpTime getMyLastOptime() const;
+ virtual void setMyHeartbeatMessage(const std::string& msg);
- virtual ReadAfterOpTimeResponse waitUntilOpTime(
- OperationContext* txn,
- const ReadAfterOpTimeArgs& settings) override;
+ virtual OpTime getMyLastOptime() const;
- virtual OID getElectionId();
+ virtual ReadAfterOpTimeResponse waitUntilOpTime(OperationContext* txn,
+ const ReadAfterOpTimeArgs& settings) override;
- virtual OID getMyRID() const;
+ virtual OID getElectionId();
- virtual int getMyId() const;
+ virtual OID getMyRID() const;
- virtual bool setFollowerMode(const MemberState& newState);
+ virtual int getMyId() const;
- virtual bool isWaitingForApplierToDrain();
+ virtual bool setFollowerMode(const MemberState& newState);
- virtual void signalDrainComplete(OperationContext*);
+ virtual bool isWaitingForApplierToDrain();
- virtual void signalUpstreamUpdater();
+ virtual void signalDrainComplete(OperationContext*);
- virtual bool prepareReplSetUpdatePositionCommand(BSONObjBuilder* cmdBuilder);
+ virtual void signalUpstreamUpdater();
- virtual Status processReplSetGetStatus(BSONObjBuilder* result);
+ virtual bool prepareReplSetUpdatePositionCommand(BSONObjBuilder* cmdBuilder);
- virtual void fillIsMasterForReplSet(IsMasterResponse* result);
+ virtual Status processReplSetGetStatus(BSONObjBuilder* result);
- virtual void appendSlaveInfoData(BSONObjBuilder* result);
+ virtual void fillIsMasterForReplSet(IsMasterResponse* result);
- virtual ReplicaSetConfig getConfig() const;
+ virtual void appendSlaveInfoData(BSONObjBuilder* result);
- virtual void processReplSetGetConfig(BSONObjBuilder* result);
+ virtual ReplicaSetConfig getConfig() const;
- virtual Status setMaintenanceMode(bool activate);
+ virtual void processReplSetGetConfig(BSONObjBuilder* result);
- virtual bool getMaintenanceMode();
+ virtual Status setMaintenanceMode(bool activate);
- virtual Status processReplSetSyncFrom(const HostAndPort& target,
- BSONObjBuilder* resultObj);
+ virtual bool getMaintenanceMode();
- virtual Status processReplSetFreeze(int secs, BSONObjBuilder* resultObj);
+ virtual Status processReplSetSyncFrom(const HostAndPort& target, BSONObjBuilder* resultObj);
- virtual Status processHeartbeat(const ReplSetHeartbeatArgs& args,
- ReplSetHeartbeatResponse* response);
+ virtual Status processReplSetFreeze(int secs, BSONObjBuilder* resultObj);
- virtual Status processReplSetReconfig(OperationContext* txn,
- const ReplSetReconfigArgs& args,
- BSONObjBuilder* resultObj);
+ virtual Status processHeartbeat(const ReplSetHeartbeatArgs& args,
+ ReplSetHeartbeatResponse* response);
- virtual Status processReplSetInitiate(OperationContext* txn,
- const BSONObj& configObj,
- BSONObjBuilder* resultObj);
+ virtual Status processReplSetReconfig(OperationContext* txn,
+ const ReplSetReconfigArgs& args,
+ BSONObjBuilder* resultObj);
- virtual Status processReplSetGetRBID(BSONObjBuilder* resultObj);
+ virtual Status processReplSetInitiate(OperationContext* txn,
+ const BSONObj& configObj,
+ BSONObjBuilder* resultObj);
- virtual void incrementRollbackID();
+ virtual Status processReplSetGetRBID(BSONObjBuilder* resultObj);
- virtual Status processReplSetFresh(const ReplSetFreshArgs& args,
- BSONObjBuilder* resultObj);
+ virtual void incrementRollbackID();
- virtual Status processReplSetElect(const ReplSetElectArgs& args,
- BSONObjBuilder* resultObj);
+ virtual Status processReplSetFresh(const ReplSetFreshArgs& args, BSONObjBuilder* resultObj);
- virtual Status processReplSetUpdatePosition(const UpdatePositionArgs& updates,
- long long* configVersion);
+ virtual Status processReplSetElect(const ReplSetElectArgs& args, BSONObjBuilder* resultObj);
- virtual Status processHandshake(OperationContext* txn, const HandshakeArgs& handshake);
+ virtual Status processReplSetUpdatePosition(const UpdatePositionArgs& updates,
+ long long* configVersion);
- virtual bool buildsIndexes();
+ virtual Status processHandshake(OperationContext* txn, const HandshakeArgs& handshake);
- virtual std::vector<HostAndPort> getHostsWrittenTo(const OpTime& op);
+ virtual bool buildsIndexes();
- virtual std::vector<HostAndPort> getOtherNodesInReplSet() const;
+ virtual std::vector<HostAndPort> getHostsWrittenTo(const OpTime& op);
- virtual WriteConcernOptions getGetLastErrorDefault();
+ virtual std::vector<HostAndPort> getOtherNodesInReplSet() const;
- virtual Status checkReplEnabledForCommand(BSONObjBuilder* result);
+ virtual WriteConcernOptions getGetLastErrorDefault();
- virtual HostAndPort chooseNewSyncSource();
+ virtual Status checkReplEnabledForCommand(BSONObjBuilder* result);
- virtual void blacklistSyncSource(const HostAndPort& host, Date_t until);
+ virtual HostAndPort chooseNewSyncSource();
- virtual void resetLastOpTimeFromOplog(OperationContext* txn);
+ virtual void blacklistSyncSource(const HostAndPort& host, Date_t until);
- virtual bool shouldChangeSyncSource(const HostAndPort& currentSource);
+ virtual void resetLastOpTimeFromOplog(OperationContext* txn);
- virtual OpTime getLastCommittedOpTime() const;
+ virtual bool shouldChangeSyncSource(const HostAndPort& currentSource);
- virtual Status processReplSetRequestVotes(OperationContext* txn,
- const ReplSetRequestVotesArgs& args,
- ReplSetRequestVotesResponse* response);
+ virtual OpTime getLastCommittedOpTime() const;
- virtual Status processReplSetDeclareElectionWinner(
- const ReplSetDeclareElectionWinnerArgs& args,
- long long* responseTerm);
+ virtual Status processReplSetRequestVotes(OperationContext* txn,
+ const ReplSetRequestVotesArgs& args,
+ ReplSetRequestVotesResponse* response);
- virtual void prepareCursorResponseInfo(BSONObjBuilder* objBuilder);
+ virtual Status processReplSetDeclareElectionWinner(const ReplSetDeclareElectionWinnerArgs& args,
+ long long* responseTerm);
- virtual Status processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
- ReplSetHeartbeatResponse* response);
+ virtual void prepareCursorResponseInfo(BSONObjBuilder* objBuilder);
- virtual bool isV1ElectionProtocol();
+ virtual Status processHeartbeatV1(const ReplSetHeartbeatArgsV1& args,
+ ReplSetHeartbeatResponse* response);
- virtual void summarizeAsHtml(ReplSetHtmlSummary* output);
+ virtual bool isV1ElectionProtocol();
- virtual long long getTerm();
+ virtual void summarizeAsHtml(ReplSetHtmlSummary* output);
- virtual bool updateTerm(long long term);
+ virtual long long getTerm();
- private:
+ virtual bool updateTerm(long long term);
- const ReplSettings _settings;
- MemberState _memberState;
- OpTime _myLastOpTime;
- };
+private:
+ const ReplSettings _settings;
+ MemberState _memberState;
+ OpTime _myLastOpTime;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index 2359de1f751..47f25a6bbdd 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -51,290 +51,269 @@ namespace mongo {
namespace repl {
namespace {
- bool stringContains(const std::string &haystack, const std::string& needle) {
- return haystack.find(needle) != std::string::npos;
- }
+bool stringContains(const std::string& haystack, const std::string& needle) {
+ return haystack.find(needle) != std::string::npos;
+}
} // namespace
- using executor::NetworkInterfaceMock;
+using executor::NetworkInterfaceMock;
- ReplicaSetConfig ReplCoordTest::assertMakeRSConfig(const BSONObj& configBson) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(configBson));
- ASSERT_OK(config.validate());
- return config;
- }
+ReplicaSetConfig ReplCoordTest::assertMakeRSConfig(const BSONObj& configBson) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(configBson));
+ ASSERT_OK(config.validate());
+ return config;
+}
- ReplCoordTest::ReplCoordTest() : _callShutdown(false) {}
- ReplCoordTest::~ReplCoordTest() {}
+ReplCoordTest::ReplCoordTest() : _callShutdown(false) {}
+ReplCoordTest::~ReplCoordTest() {}
- void ReplCoordTest::setUp() {
- _settings.replSet = "mySet/node1:12345,node2:54321";
- }
+void ReplCoordTest::setUp() {
+ _settings.replSet = "mySet/node1:12345,node2:54321";
+}
- void ReplCoordTest::tearDown() {
- if (_externalState) {
- _externalState->setStoreLocalConfigDocumentToHang(false);
- }
- if (_callShutdown) {
- shutdown();
- }
+void ReplCoordTest::tearDown() {
+ if (_externalState) {
+ _externalState->setStoreLocalConfigDocumentToHang(false);
}
-
- void ReplCoordTest::assertRunUntil(Date_t newTime) {
- this->_net->runUntil(newTime);
- ASSERT_EQUALS(newTime, getNet()->now());
+ if (_callShutdown) {
+ shutdown();
}
-
- void ReplCoordTest::enterNetwork() {
- getNet()->enterNetwork();
- }
-
- void ReplCoordTest::exitNetwork() {
- getNet()->exitNetwork();
+}
+
+void ReplCoordTest::assertRunUntil(Date_t newTime) {
+ this->_net->runUntil(newTime);
+ ASSERT_EQUALS(newTime, getNet()->now());
+}
+
+void ReplCoordTest::enterNetwork() {
+ getNet()->enterNetwork();
+}
+
+void ReplCoordTest::exitNetwork() {
+ getNet()->exitNetwork();
+}
+
+void ReplCoordTest::addSelf(const HostAndPort& selfHost) {
+ getExternalState()->addSelf(selfHost);
+}
+
+void ReplCoordTest::init() {
+ invariant(!_repl);
+ invariant(!_callShutdown);
+
+ // PRNG seed for tests.
+ const int64_t seed = 0;
+
+ _topo = new TopologyCoordinatorImpl(Seconds(0));
+ _net = new NetworkInterfaceMock;
+ _storage = new StorageInterfaceMock;
+ _externalState = new ReplicationCoordinatorExternalStateMock;
+ _repl.reset(
+ new ReplicationCoordinatorImpl(_settings, _externalState, _net, _storage, _topo, seed));
+}
+
+void ReplCoordTest::init(const ReplSettings& settings) {
+ _settings = settings;
+ init();
+}
+
+void ReplCoordTest::init(const std::string& replSet) {
+ _settings.replSet = replSet;
+ init();
+}
+
+void ReplCoordTest::start() {
+ invariant(!_callShutdown);
+ // if we haven't initialized yet, do that first.
+ if (!_repl) {
+ init();
}
- void ReplCoordTest::addSelf(const HostAndPort& selfHost) {
- getExternalState()->addSelf(selfHost);
- }
+ OperationContextNoop txn;
+ _repl->startReplication(&txn);
+ _repl->waitForStartUpComplete();
+ _callShutdown = true;
+}
- void ReplCoordTest::init() {
- invariant(!_repl);
- invariant(!_callShutdown);
-
- // PRNG seed for tests.
- const int64_t seed = 0;
-
- _topo = new TopologyCoordinatorImpl(Seconds(0));
- _net = new NetworkInterfaceMock;
- _storage = new StorageInterfaceMock;
- _externalState = new ReplicationCoordinatorExternalStateMock;
- _repl.reset(new ReplicationCoordinatorImpl(_settings,
- _externalState,
- _net,
- _storage,
- _topo,
- seed));
- }
-
- void ReplCoordTest::init(const ReplSettings& settings) {
- _settings = settings;
+void ReplCoordTest::start(const BSONObj& configDoc, const HostAndPort& selfHost) {
+ if (!_repl) {
init();
}
+ _externalState->setLocalConfigDocument(StatusWith<BSONObj>(configDoc));
+ _externalState->addSelf(selfHost);
+ start();
+}
- void ReplCoordTest::init(const std::string& replSet) {
- _settings.replSet = replSet;
+void ReplCoordTest::start(const HostAndPort& selfHost) {
+ if (!_repl) {
init();
}
-
- void ReplCoordTest::start() {
- invariant(!_callShutdown);
- // if we haven't initialized yet, do that first.
- if (!_repl) {
- init();
- }
-
- OperationContextNoop txn;
- _repl->startReplication(&txn);
- _repl->waitForStartUpComplete();
- _callShutdown = true;
- }
-
- void ReplCoordTest::start(const BSONObj& configDoc, const HostAndPort& selfHost) {
- if (!_repl) {
- init();
- }
- _externalState->setLocalConfigDocument(StatusWith<BSONObj>(configDoc));
- _externalState->addSelf(selfHost);
- start();
- }
-
- void ReplCoordTest::start(const HostAndPort& selfHost) {
- if (!_repl) {
- init();
- }
- _externalState->addSelf(selfHost);
- start();
- }
-
- void ReplCoordTest::assertStartSuccess(
- const BSONObj& configDoc,
- const HostAndPort& selfHost) {
- start(configDoc, selfHost);
- ASSERT_NE(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
- }
-
- ResponseStatus ReplCoordTest::makeResponseStatus(const BSONObj& doc, Milliseconds millis) {
- log() << "Responding with " << doc;
- return ResponseStatus(RemoteCommandResponse(doc, millis));
- }
-
- void ReplCoordTest::simulateSuccessfulV1Election() {
- OperationContextReplMock txn;
- ReplicationCoordinatorImpl* replCoord = getReplCoord();
- NetworkInterfaceMock* net = getNet();
- ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
- ASSERT(replCoord->getMemberState().secondary()) <<
- replCoord->getMemberState().toString();
- while (!replCoord->getMemberState().primary()) {
- log() << "Waiting on network in state " << replCoord->getMemberState();
- getNet()->enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- ReplSetHeartbeatArgsV1 hbArgs;
- Status status = hbArgs.initialize(request.cmdObj);
- if (hbArgs.initialize(request.cmdObj).isOK()) {
- ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName(rsConfig.getReplSetName());
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(rsConfig.getConfigVersion());
- net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
- }
- else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
-
- net->scheduleResponse(noi, net->now(), makeResponseStatus(
- BSON("ok" << 1 <<
- "reason" << "" <<
- "term" << request.cmdObj["term"].Long() <<
- "voteGranted" << true)));
- }
- else if (request.cmdObj.firstElement().fieldNameStringData() ==
- "replSetDeclareElectionWinner") {
- net->scheduleResponse(noi, net->now(), makeResponseStatus(
- BSON("ok" << 1 <<
- "term" << request.cmdObj["term"].Long())));
- }
- else {
- error() << "Black holing unexpected request to " << request.target << ": " <<
- request.cmdObj;
- net->blackHole(noi);
- }
- net->runReadyNetworkOperations();
- getNet()->exitNetwork();
+ _externalState->addSelf(selfHost);
+ start();
+}
+
+void ReplCoordTest::assertStartSuccess(const BSONObj& configDoc, const HostAndPort& selfHost) {
+ start(configDoc, selfHost);
+ ASSERT_NE(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
+}
+
+ResponseStatus ReplCoordTest::makeResponseStatus(const BSONObj& doc, Milliseconds millis) {
+ log() << "Responding with " << doc;
+ return ResponseStatus(RemoteCommandResponse(doc, millis));
+}
+
+void ReplCoordTest::simulateSuccessfulV1Election() {
+ OperationContextReplMock txn;
+ ReplicationCoordinatorImpl* replCoord = getReplCoord();
+ NetworkInterfaceMock* net = getNet();
+ ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
+ ASSERT(replCoord->getMemberState().secondary()) << replCoord->getMemberState().toString();
+ while (!replCoord->getMemberState().primary()) {
+ log() << "Waiting on network in state " << replCoord->getMemberState();
+ getNet()->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ ReplSetHeartbeatArgsV1 hbArgs;
+ Status status = hbArgs.initialize(request.cmdObj);
+ if (hbArgs.initialize(request.cmdObj).isOK()) {
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName(rsConfig.getReplSetName());
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(rsConfig.getConfigVersion());
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
+ } else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term" << request.cmdObj["term"].Long()
+ << "voteGranted" << true)));
+ } else if (request.cmdObj.firstElement().fieldNameStringData() ==
+ "replSetDeclareElectionWinner") {
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long())));
+ } else {
+ error() << "Black holing unexpected request to " << request.target << ": "
+ << request.cmdObj;
+ net->blackHole(noi);
}
- ASSERT(replCoord->isWaitingForApplierToDrain());
- ASSERT(replCoord->getMemberState().primary()) <<
- replCoord->getMemberState().toString();
-
- IsMasterResponse imResponse;
- replCoord->fillIsMasterForReplSet(&imResponse);
- ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
- ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- replCoord->signalDrainComplete(&txn);
- replCoord->fillIsMasterForReplSet(&imResponse);
- ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
- ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
-
- ASSERT(replCoord->getMemberState().primary()) <<
- replCoord->getMemberState().toString();
+ net->runReadyNetworkOperations();
+ getNet()->exitNetwork();
}
-
- void ReplCoordTest::simulateSuccessfulElection() {
- OperationContextReplMock txn;
- ReplicationCoordinatorImpl* replCoord = getReplCoord();
- NetworkInterfaceMock* net = getNet();
- ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
- ASSERT(replCoord->getMemberState().secondary()) <<
- replCoord->getMemberState().toString();
- while (!replCoord->getMemberState().primary()) {
- log() << "Waiting on network in state " << replCoord->getMemberState();
- getNet()->enterNetwork();
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- ReplSetHeartbeatArgs hbArgs;
- if (hbArgs.initialize(request.cmdObj).isOK()) {
- ReplSetHeartbeatResponse hbResp;
- hbResp.setSetName(rsConfig.getReplSetName());
- hbResp.setState(MemberState::RS_SECONDARY);
- hbResp.setConfigVersion(rsConfig.getConfigVersion());
- BSONObjBuilder respObj;
- respObj << "ok" << 1;
- hbResp.addToBSON(&respObj, false);
- net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
- }
- else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetFresh") {
- net->scheduleResponse(noi, net->now(), makeResponseStatus(
- BSON("ok" << 1 <<
- "fresher" << false <<
- "opTime" << Date_t() <<
- "veto" << false)));
- }
- else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetElect") {
- net->scheduleResponse(noi, net->now(), makeResponseStatus(
- BSON("ok" << 1 <<
- "vote" << 1 <<
- "round" << request.cmdObj["round"].OID())));
- }
- else {
- error() << "Black holing unexpected request to " << request.target << ": " <<
- request.cmdObj;
- net->blackHole(noi);
- }
- net->runReadyNetworkOperations();
- getNet()->exitNetwork();
+ ASSERT(replCoord->isWaitingForApplierToDrain());
+ ASSERT(replCoord->getMemberState().primary()) << replCoord->getMemberState().toString();
+
+ IsMasterResponse imResponse;
+ replCoord->fillIsMasterForReplSet(&imResponse);
+ ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
+ ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
+ replCoord->signalDrainComplete(&txn);
+ replCoord->fillIsMasterForReplSet(&imResponse);
+ ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
+ ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
+
+ ASSERT(replCoord->getMemberState().primary()) << replCoord->getMemberState().toString();
+}
+
+void ReplCoordTest::simulateSuccessfulElection() {
+ OperationContextReplMock txn;
+ ReplicationCoordinatorImpl* replCoord = getReplCoord();
+ NetworkInterfaceMock* net = getNet();
+ ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
+ ASSERT(replCoord->getMemberState().secondary()) << replCoord->getMemberState().toString();
+ while (!replCoord->getMemberState().primary()) {
+ log() << "Waiting on network in state " << replCoord->getMemberState();
+ getNet()->enterNetwork();
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ ReplSetHeartbeatArgs hbArgs;
+ if (hbArgs.initialize(request.cmdObj).isOK()) {
+ ReplSetHeartbeatResponse hbResp;
+ hbResp.setSetName(rsConfig.getReplSetName());
+ hbResp.setState(MemberState::RS_SECONDARY);
+ hbResp.setConfigVersion(rsConfig.getConfigVersion());
+ BSONObjBuilder respObj;
+ respObj << "ok" << 1;
+ hbResp.addToBSON(&respObj, false);
+ net->scheduleResponse(noi, net->now(), makeResponseStatus(respObj.obj()));
+ } else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetFresh") {
+ net->scheduleResponse(
+ noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "fresher" << false << "opTime" << Date_t()
+ << "veto" << false)));
+ } else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetElect") {
+ net->scheduleResponse(noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "vote" << 1 << "round"
+ << request.cmdObj["round"].OID())));
+ } else {
+ error() << "Black holing unexpected request to " << request.target << ": "
+ << request.cmdObj;
+ net->blackHole(noi);
}
- ASSERT(replCoord->isWaitingForApplierToDrain());
- ASSERT(replCoord->getMemberState().primary()) <<
- replCoord->getMemberState().toString();
-
- IsMasterResponse imResponse;
- replCoord->fillIsMasterForReplSet(&imResponse);
- ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
- ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
- replCoord->signalDrainComplete(&txn);
- replCoord->fillIsMasterForReplSet(&imResponse);
- ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
- ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
-
- ASSERT(replCoord->getMemberState().primary()) <<
- replCoord->getMemberState().toString();
+ net->runReadyNetworkOperations();
+ getNet()->exitNetwork();
}
-
- void ReplCoordTest::simulateStepDownOnIsolation() {
- ReplicationCoordinatorImpl* replCoord = getReplCoord();
- NetworkInterfaceMock* net = getNet();
- ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
- ASSERT(replCoord->getMemberState().primary()) <<
- replCoord->getMemberState().toString();
- while (replCoord->getMemberState().primary()) {
- log() << "Waiting on network in state " << replCoord->getMemberState();
- getNet()->enterNetwork();
- net->runUntil(net->now() + Seconds(10));
- const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- const RemoteCommandRequest& request = noi->getRequest();
- log() << request.target.toString() << " processing " << request.cmdObj;
- ReplSetHeartbeatArgs hbArgs;
- if (hbArgs.initialize(request.cmdObj).isOK()) {
- net->scheduleResponse(noi,
- net->now(),
- ResponseStatus(ErrorCodes::NetworkTimeout, "Nobody's home"));
- }
- else {
- error() << "Black holing unexpected request to " << request.target << ": " <<
- request.cmdObj;
- net->blackHole(noi);
- }
- net->runReadyNetworkOperations();
- getNet()->exitNetwork();
+ ASSERT(replCoord->isWaitingForApplierToDrain());
+ ASSERT(replCoord->getMemberState().primary()) << replCoord->getMemberState().toString();
+
+ IsMasterResponse imResponse;
+ replCoord->fillIsMasterForReplSet(&imResponse);
+ ASSERT_FALSE(imResponse.isMaster()) << imResponse.toBSON().toString();
+ ASSERT_TRUE(imResponse.isSecondary()) << imResponse.toBSON().toString();
+ replCoord->signalDrainComplete(&txn);
+ replCoord->fillIsMasterForReplSet(&imResponse);
+ ASSERT_TRUE(imResponse.isMaster()) << imResponse.toBSON().toString();
+ ASSERT_FALSE(imResponse.isSecondary()) << imResponse.toBSON().toString();
+
+ ASSERT(replCoord->getMemberState().primary()) << replCoord->getMemberState().toString();
+}
+
+void ReplCoordTest::simulateStepDownOnIsolation() {
+ ReplicationCoordinatorImpl* replCoord = getReplCoord();
+ NetworkInterfaceMock* net = getNet();
+ ReplicaSetConfig rsConfig = replCoord->getReplicaSetConfig_forTest();
+ ASSERT(replCoord->getMemberState().primary()) << replCoord->getMemberState().toString();
+ while (replCoord->getMemberState().primary()) {
+ log() << "Waiting on network in state " << replCoord->getMemberState();
+ getNet()->enterNetwork();
+ net->runUntil(net->now() + Seconds(10));
+ const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ const RemoteCommandRequest& request = noi->getRequest();
+ log() << request.target.toString() << " processing " << request.cmdObj;
+ ReplSetHeartbeatArgs hbArgs;
+ if (hbArgs.initialize(request.cmdObj).isOK()) {
+ net->scheduleResponse(
+ noi, net->now(), ResponseStatus(ErrorCodes::NetworkTimeout, "Nobody's home"));
+ } else {
+ error() << "Black holing unexpected request to " << request.target << ": "
+ << request.cmdObj;
+ net->blackHole(noi);
}
+ net->runReadyNetworkOperations();
+ getNet()->exitNetwork();
}
-
- void ReplCoordTest::shutdown() {
- invariant(_callShutdown);
- _net->exitNetwork();
- _repl->shutdown();
- _callShutdown = false;
- }
-
- int64_t ReplCoordTest::countLogLinesContaining(const std::string& needle) {
- return std::count_if(getCapturedLogMessages().begin(),
- getCapturedLogMessages().end(),
- stdx::bind(stringContains,
- stdx::placeholders::_1,
- needle));
- }
+}
+
+void ReplCoordTest::shutdown() {
+ invariant(_callShutdown);
+ _net->exitNetwork();
+ _repl->shutdown();
+ _callShutdown = false;
+}
+
+int64_t ReplCoordTest::countLogLinesContaining(const std::string& needle) {
+ return std::count_if(getCapturedLogMessages().begin(),
+ getCapturedLogMessages().end(),
+ stdx::bind(stringContains, stdx::placeholders::_1, needle));
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.h b/src/mongo/db/repl/replication_coordinator_test_fixture.h
index 22ab10a1e25..d38ef060eb7 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.h
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.h
@@ -37,162 +37,170 @@
namespace mongo {
- class BSONObj;
- struct HostAndPort;
+class BSONObj;
+struct HostAndPort;
namespace executor {
- class NetworkInterfaceMock;
-} // namespace executor
+class NetworkInterfaceMock;
+} // namespace executor
namespace repl {
- class ReplicaSetConfig;
- class ReplicationCoordinatorExternalStateMock;
- class ReplicationCoordinatorImpl;
- class StorageInterfaceMock;
- class TopologyCoordinatorImpl;
-
- /**
- * Fixture for testing ReplicationCoordinatorImpl behaviors.
- */
- class ReplCoordTest : public mongo::unittest::Test {
- public:
- /**
- * Makes a ResponseStatus with the given "doc" response and optional elapsed time "millis".
- */
- static ResponseStatus makeResponseStatus(const BSONObj& doc,
- Milliseconds millis = Milliseconds(0));
-
- /**
- * Constructs a ReplicaSetConfig from the given BSON, or raises a test failure exception.
- */
- static ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBSON);
-
- ReplCoordTest();
- virtual ~ReplCoordTest();
-
- protected:
- virtual void setUp();
- virtual void tearDown();
-
- /**
- * Gets the network mock.
- */
- executor::NetworkInterfaceMock* getNet() { return _net; }
-
- /**
- * Gets the replication coordinator under test.
- */
- ReplicationCoordinatorImpl* getReplCoord() { return _repl.get();}
-
- /**
- * Gets the topology coordinator used by the replication coordinator under test.
- */
- TopologyCoordinatorImpl& getTopoCoord() { return *_topo;}
-
- /**
- * Gets the external state used by the replication coordinator under test.
- */
- ReplicationCoordinatorExternalStateMock* getExternalState() { return _externalState; }
-
- /**
- * Adds "selfHost" to the list of hosts that identify as "this" host.
- */
- void addSelf(const HostAndPort& selfHost);
-
- /**
- * Moves time forward in the network until the new time, and asserts if now!=newTime after
- */
- void assertRunUntil(Date_t newTime);
-
- /**
- * Shorthand for getNet()->enterNetwork()
- */
- void enterNetwork();
-
- /**
- * Shorthand for getNet()->exitNetwork()
- */
- void exitNetwork();
-
- /**
- * Initializes the objects under test; this behavior is optional, in case you need to call
- * any methods on the network or coordinator objects before calling start.
- */
- void init();
-
- /**
- * Initializes the objects under test, using the given "settings".
- */
- void init(const ReplSettings& settings);
-
- /**
- * Initializes the objects under test, using "replSet" as the name of the replica set under
- * test.
- */
- void init(const std::string& replSet);
-
- /**
- * Starts the replication coordinator under test, with no local config document and
- * no notion of what host or hosts are represented by the network interface.
- */
- void start();
-
- /**
- * Starts the replication coordinator under test, with the given configuration in
- * local storage and the given host name.
- */
- void start(const BSONObj& configDoc, const HostAndPort& selfHost);
-
- /**
- * Starts the replication coordinator under test with the given host name.
- */
- void start(const HostAndPort& selfHost);
-
- /**
- * Brings the repl coord from SECONDARY to PRIMARY by simulating the messages required to
- * elect it.
- *
- * Behavior is unspecified if node does not have a clean config, is not in SECONDARY, etc.
- */
- void simulateSuccessfulElection();
- void simulateSuccessfulV1Election();
-
- /**
- * Brings the repl coord from PRIMARY to SECONDARY by simulating a period of time in which
- * all heartbeats respond with an error condition, such as time out.
- */
- void simulateStepDownOnIsolation();
-
- /**
- * Asserts that calling start(configDoc, selfHost) successfully initiates the
- * ReplicationCoordinator under test.
- */
- void assertStartSuccess(const BSONObj& configDoc, const HostAndPort& selfHost);
-
- /**
- * Shuts down the objects under test.
- */
- void shutdown();
-
- /**
- * Returns the number of collected log lines containing "needle".
- */
- int64_t countLogLinesContaining(const std::string& needle);
-
- private:
- std::unique_ptr<ReplicationCoordinatorImpl> _repl;
- // Owned by ReplicationCoordinatorImpl
- TopologyCoordinatorImpl* _topo;
- // Owned by ReplicationCoordinatorImpl
- executor::NetworkInterfaceMock* _net;
- // Owned by ReplicationCoordinatorImpl
- StorageInterfaceMock* _storage;
- // Owned by ReplicationCoordinatorImpl
- ReplicationCoordinatorExternalStateMock* _externalState;
- ReplSettings _settings;
- bool _callShutdown;
- };
+class ReplicaSetConfig;
+class ReplicationCoordinatorExternalStateMock;
+class ReplicationCoordinatorImpl;
+class StorageInterfaceMock;
+class TopologyCoordinatorImpl;
+
+/**
+ * Fixture for testing ReplicationCoordinatorImpl behaviors.
+ */
+class ReplCoordTest : public mongo::unittest::Test {
+public:
+ /**
+ * Makes a ResponseStatus with the given "doc" response and optional elapsed time "millis".
+ */
+ static ResponseStatus makeResponseStatus(const BSONObj& doc,
+ Milliseconds millis = Milliseconds(0));
+
+ /**
+ * Constructs a ReplicaSetConfig from the given BSON, or raises a test failure exception.
+ */
+ static ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBSON);
+
+ ReplCoordTest();
+ virtual ~ReplCoordTest();
+
+protected:
+ virtual void setUp();
+ virtual void tearDown();
+
+ /**
+ * Gets the network mock.
+ */
+ executor::NetworkInterfaceMock* getNet() {
+ return _net;
+ }
+
+ /**
+ * Gets the replication coordinator under test.
+ */
+ ReplicationCoordinatorImpl* getReplCoord() {
+ return _repl.get();
+ }
+
+ /**
+ * Gets the topology coordinator used by the replication coordinator under test.
+ */
+ TopologyCoordinatorImpl& getTopoCoord() {
+ return *_topo;
+ }
+
+ /**
+ * Gets the external state used by the replication coordinator under test.
+ */
+ ReplicationCoordinatorExternalStateMock* getExternalState() {
+ return _externalState;
+ }
+
+ /**
+ * Adds "selfHost" to the list of hosts that identify as "this" host.
+ */
+ void addSelf(const HostAndPort& selfHost);
+
+ /**
+ * Moves time forward in the network until the new time, and asserts if now!=newTime after
+ */
+ void assertRunUntil(Date_t newTime);
+
+ /**
+ * Shorthand for getNet()->enterNetwork()
+ */
+ void enterNetwork();
+
+ /**
+ * Shorthand for getNet()->exitNetwork()
+ */
+ void exitNetwork();
+
+ /**
+ * Initializes the objects under test; this behavior is optional, in case you need to call
+ * any methods on the network or coordinator objects before calling start.
+ */
+ void init();
+
+ /**
+ * Initializes the objects under test, using the given "settings".
+ */
+ void init(const ReplSettings& settings);
+
+ /**
+ * Initializes the objects under test, using "replSet" as the name of the replica set under
+ * test.
+ */
+ void init(const std::string& replSet);
+
+ /**
+ * Starts the replication coordinator under test, with no local config document and
+ * no notion of what host or hosts are represented by the network interface.
+ */
+ void start();
+
+ /**
+ * Starts the replication coordinator under test, with the given configuration in
+ * local storage and the given host name.
+ */
+ void start(const BSONObj& configDoc, const HostAndPort& selfHost);
+
+ /**
+ * Starts the replication coordinator under test with the given host name.
+ */
+ void start(const HostAndPort& selfHost);
+
+ /**
+ * Brings the repl coord from SECONDARY to PRIMARY by simulating the messages required to
+ * elect it.
+ *
+ * Behavior is unspecified if node does not have a clean config, is not in SECONDARY, etc.
+ */
+ void simulateSuccessfulElection();
+ void simulateSuccessfulV1Election();
+
+ /**
+ * Brings the repl coord from PRIMARY to SECONDARY by simulating a period of time in which
+ * all heartbeats respond with an error condition, such as time out.
+ */
+ void simulateStepDownOnIsolation();
+
+ /**
+ * Asserts that calling start(configDoc, selfHost) successfully initiates the
+ * ReplicationCoordinator under test.
+ */
+ void assertStartSuccess(const BSONObj& configDoc, const HostAndPort& selfHost);
+
+ /**
+ * Shuts down the objects under test.
+ */
+ void shutdown();
+
+ /**
+ * Returns the number of collected log lines containing "needle".
+ */
+ int64_t countLogLinesContaining(const std::string& needle);
+
+private:
+ std::unique_ptr<ReplicationCoordinatorImpl> _repl;
+ // Owned by ReplicationCoordinatorImpl
+ TopologyCoordinatorImpl* _topo;
+ // Owned by ReplicationCoordinatorImpl
+ executor::NetworkInterfaceMock* _net;
+ // Owned by ReplicationCoordinatorImpl
+ StorageInterfaceMock* _storage;
+ // Owned by ReplicationCoordinatorImpl
+ ReplicationCoordinatorExternalStateMock* _externalState;
+ ReplSettings _settings;
+ bool _callShutdown;
+};
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_executor.cpp b/src/mongo/db/repl/replication_executor.cpp
index a944e76751f..f6f04e51a8d 100644
--- a/src/mongo/db/repl/replication_executor.cpp
+++ b/src/mongo/db/repl/replication_executor.cpp
@@ -46,563 +46,515 @@ namespace mongo {
namespace repl {
namespace {
- stdx::function<void ()> makeNoExcept(const stdx::function<void ()> &fn);
+stdx::function<void()> makeNoExcept(const stdx::function<void()>& fn);
} // namespace
- using executor::NetworkInterface;
-
- ReplicationExecutor::ReplicationExecutor(NetworkInterface* netInterface,
- StorageInterface* storageInterface,
- int64_t prngSeed) :
- _random(prngSeed),
- _networkInterface(netInterface),
- _storageInterface(storageInterface),
- _totalEventWaiters(0),
- _inShutdown(false),
- _dblockWorkers(OldThreadPool::DoNotStartThreadsTag(),
- 3,
- "replExecDBWorker-"),
- _dblockTaskRunner(
- &_dblockWorkers,
- stdx::bind(&StorageInterface::createOperationContext, storageInterface)),
- _dblockExclusiveLockTaskRunner(
- &_dblockWorkers,
- stdx::bind(&StorageInterface::createOperationContext, storageInterface)),
- _nextId(0) {
- }
-
- ReplicationExecutor::~ReplicationExecutor() {}
-
- std::string ReplicationExecutor::getDiagnosticString() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _getDiagnosticString_inlock();
- }
-
- std::string ReplicationExecutor::_getDiagnosticString_inlock() const {
- str::stream output;
- output << "ReplicationExecutor";
- output << " networkInProgress:" << _networkInProgressQueue.size();
- output << " dbWorkInProgress:" << _dbWorkInProgressQueue.size();
- output << " exclusiveInProgress:" << _exclusiveLockInProgressQueue.size();
- output << " sleeperQueue:" << _sleepersQueue.size();
- output << " ready:" << _readyQueue.size();
- output << " free:" << _freeQueue.size();
- output << " unsignaledEvents:" << _unsignaledEvents.size();
- output << " eventWaiters:" << _totalEventWaiters;
- output << " shuttingDown:" << _inShutdown;
- output << " networkInterface:" << _networkInterface->getDiagnosticString();
- return output;
- }
-
- Date_t ReplicationExecutor::now() {
- return _networkInterface->now();
- }
-
- void ReplicationExecutor::run() {
- setThreadName("ReplicationExecutor");
- _networkInterface->startup();
- _dblockWorkers.startThreads();
- std::pair<WorkItem, CallbackHandle> work;
- while ((work = getWork()).first.callback.isValid()) {
- {
- stdx::lock_guard<stdx::mutex> lk(_terribleExLockSyncMutex);
- const Callback* callback = _getCallbackFromHandle(work.first.callback);
- const Status inStatus = callback->_isCanceled ?
- Status(ErrorCodes::CallbackCanceled, "Callback canceled") :
- Status::OK();
- makeNoExcept(stdx::bind(callback->_callbackFn,
- CallbackArgs(this, work.second, inStatus)))();
- }
- signalEvent(work.first.finishedEvent);
- }
- finishShutdown();
- _networkInterface->shutdown();
- }
-
- void ReplicationExecutor::shutdown() {
- // Correct shutdown needs to:
- // * Disable future work queueing.
- // * drain all of the unsignaled events, sleepers, and ready queue, by running those
- // callbacks with a "shutdown" or "canceled" status.
- // * Signal all threads blocked in waitForEvent, and wait for them to return from that method.
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- _inShutdown = true;
-
- _readyQueue.splice(_readyQueue.end(), _dbWorkInProgressQueue);
- _readyQueue.splice(_readyQueue.end(), _exclusiveLockInProgressQueue);
- _readyQueue.splice(_readyQueue.end(), _networkInProgressQueue);
- _readyQueue.splice(_readyQueue.end(), _sleepersQueue);
- for (auto event : _unsignaledEvents) {
- _readyQueue.splice(_readyQueue.end(), _getEventFromHandle(event)->_waiters);
- }
- for (auto readyWork : _readyQueue) {
- _getCallbackFromHandle(readyWork.callback)->_isCanceled = true;
- }
- _networkInterface->signalWorkAvailable();
- }
-
- void ReplicationExecutor::finishShutdown() {
- _dblockExclusiveLockTaskRunner.cancel();
- _dblockTaskRunner.cancel();
- _dblockWorkers.join();
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- invariant(_inShutdown);
- invariant(_dbWorkInProgressQueue.empty());
- invariant(_exclusiveLockInProgressQueue.empty());
- invariant(_readyQueue.empty());
- invariant(_sleepersQueue.empty());
-
- while (!_unsignaledEvents.empty()) {
- EventList::iterator eventIter = _unsignaledEvents.begin();
- invariant(_getEventFromHandle(*eventIter)->_waiters.empty());
- signalEvent_inlock(*eventIter);
- }
-
- while (_totalEventWaiters > 0)
- _noMoreWaitingThreads.wait(lk);
-
- invariant(_dbWorkInProgressQueue.empty());
- invariant(_exclusiveLockInProgressQueue.empty());
- invariant(_readyQueue.empty());
- invariant(_sleepersQueue.empty());
- invariant(_unsignaledEvents.empty());
- }
-
- void ReplicationExecutor::maybeNotifyShutdownComplete_inlock() {
- if (_totalEventWaiters == 0)
- _noMoreWaitingThreads.notify_all();
- }
-
- StatusWith<ReplicationExecutor::EventHandle> ReplicationExecutor::makeEvent() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return makeEvent_inlock();
- }
-
- StatusWith<ReplicationExecutor::EventHandle> ReplicationExecutor::makeEvent_inlock() {
- if (_inShutdown)
- return StatusWith<EventHandle>(ErrorCodes::ShutdownInProgress, "Shutdown in progress");
-
- _unsignaledEvents.emplace_back();
- auto event = std::make_shared<Event>(this, --_unsignaledEvents.end());
- setEventForHandle(&_unsignaledEvents.back(), std::move(event));
- return _unsignaledEvents.back();
- }
-
- void ReplicationExecutor::signalEvent(const EventHandle& eventHandle) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- signalEvent_inlock(eventHandle);
- }
-
- void ReplicationExecutor::signalEvent_inlock(const EventHandle& eventHandle) {
- Event* event = _getEventFromHandle(eventHandle);
- event->_signal_inlock();
- _unsignaledEvents.erase(event->_iter);
- }
-
- void ReplicationExecutor::waitForEvent(const EventHandle& event) {
- _getEventFromHandle(event)->waitUntilSignaled();
- }
-
- void ReplicationExecutor::cancel(const CallbackHandle& cbHandle) {
- _getCallbackFromHandle(cbHandle)->cancel();
- };
-
- void ReplicationExecutor::wait(const CallbackHandle& cbHandle) {
- _getCallbackFromHandle(cbHandle)->waitForCompletion();
- };
-
- StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::onEvent(
- const EventHandle& eventHandle,
- const CallbackFn& work) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- WorkQueue* queue = &_readyQueue;
- Event* event = _getEventFromHandle(eventHandle);
- if (!event->_isSignaled) {
- queue = &event->_waiters;
- }
- else {
- queue = &_readyQueue;
- }
- return enqueueWork_inlock(queue, work);
- }
-
- static void remoteCommandFinished(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicationExecutor::RemoteCommandCallbackFn& cb,
- const RemoteCommandRequest& request,
- const ResponseStatus& response) {
-
- if (cbData.status.isOK()) {
- cb(ReplicationExecutor::RemoteCommandCallbackArgs(
- cbData.executor, cbData.myHandle, request, response));
- }
- else {
- cb(ReplicationExecutor::RemoteCommandCallbackArgs(
- cbData.executor,
- cbData.myHandle,
- request,
- ResponseStatus(cbData.status)));
+using executor::NetworkInterface;
+
+ReplicationExecutor::ReplicationExecutor(NetworkInterface* netInterface,
+ StorageInterface* storageInterface,
+ int64_t prngSeed)
+ : _random(prngSeed),
+ _networkInterface(netInterface),
+ _storageInterface(storageInterface),
+ _totalEventWaiters(0),
+ _inShutdown(false),
+ _dblockWorkers(OldThreadPool::DoNotStartThreadsTag(), 3, "replExecDBWorker-"),
+ _dblockTaskRunner(&_dblockWorkers,
+ stdx::bind(&StorageInterface::createOperationContext, storageInterface)),
+ _dblockExclusiveLockTaskRunner(
+ &_dblockWorkers, stdx::bind(&StorageInterface::createOperationContext, storageInterface)),
+ _nextId(0) {}
+
+ReplicationExecutor::~ReplicationExecutor() {}
+
+std::string ReplicationExecutor::getDiagnosticString() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _getDiagnosticString_inlock();
+}
+
+std::string ReplicationExecutor::_getDiagnosticString_inlock() const {
+ str::stream output;
+ output << "ReplicationExecutor";
+ output << " networkInProgress:" << _networkInProgressQueue.size();
+ output << " dbWorkInProgress:" << _dbWorkInProgressQueue.size();
+ output << " exclusiveInProgress:" << _exclusiveLockInProgressQueue.size();
+ output << " sleeperQueue:" << _sleepersQueue.size();
+ output << " ready:" << _readyQueue.size();
+ output << " free:" << _freeQueue.size();
+ output << " unsignaledEvents:" << _unsignaledEvents.size();
+ output << " eventWaiters:" << _totalEventWaiters;
+ output << " shuttingDown:" << _inShutdown;
+ output << " networkInterface:" << _networkInterface->getDiagnosticString();
+ return output;
+}
+
+Date_t ReplicationExecutor::now() {
+ return _networkInterface->now();
+}
+
+void ReplicationExecutor::run() {
+ setThreadName("ReplicationExecutor");
+ _networkInterface->startup();
+ _dblockWorkers.startThreads();
+ std::pair<WorkItem, CallbackHandle> work;
+ while ((work = getWork()).first.callback.isValid()) {
+ {
+ stdx::lock_guard<stdx::mutex> lk(_terribleExLockSyncMutex);
+ const Callback* callback = _getCallbackFromHandle(work.first.callback);
+ const Status inStatus = callback->_isCanceled
+ ? Status(ErrorCodes::CallbackCanceled, "Callback canceled")
+ : Status::OK();
+ makeNoExcept(
+ stdx::bind(callback->_callbackFn, CallbackArgs(this, work.second, inStatus)))();
}
- }
-
- static void remoteCommandFailedEarly(
- const ReplicationExecutor::CallbackArgs& cbData,
- const ReplicationExecutor::RemoteCommandCallbackFn& cb,
- const RemoteCommandRequest& request) {
-
- invariant(!cbData.status.isOK());
+ signalEvent(work.first.finishedEvent);
+ }
+ finishShutdown();
+ _networkInterface->shutdown();
+}
+
+void ReplicationExecutor::shutdown() {
+ // Correct shutdown needs to:
+ // * Disable future work queueing.
+ // * drain all of the unsignaled events, sleepers, and ready queue, by running those
+ // callbacks with a "shutdown" or "canceled" status.
+ // * Signal all threads blocked in waitForEvent, and wait for them to return from that method.
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _inShutdown = true;
+
+ _readyQueue.splice(_readyQueue.end(), _dbWorkInProgressQueue);
+ _readyQueue.splice(_readyQueue.end(), _exclusiveLockInProgressQueue);
+ _readyQueue.splice(_readyQueue.end(), _networkInProgressQueue);
+ _readyQueue.splice(_readyQueue.end(), _sleepersQueue);
+ for (auto event : _unsignaledEvents) {
+ _readyQueue.splice(_readyQueue.end(), _getEventFromHandle(event)->_waiters);
+ }
+ for (auto readyWork : _readyQueue) {
+ _getCallbackFromHandle(readyWork.callback)->_isCanceled = true;
+ }
+ _networkInterface->signalWorkAvailable();
+}
+
+void ReplicationExecutor::finishShutdown() {
+ _dblockExclusiveLockTaskRunner.cancel();
+ _dblockTaskRunner.cancel();
+ _dblockWorkers.join();
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ invariant(_inShutdown);
+ invariant(_dbWorkInProgressQueue.empty());
+ invariant(_exclusiveLockInProgressQueue.empty());
+ invariant(_readyQueue.empty());
+ invariant(_sleepersQueue.empty());
+
+ while (!_unsignaledEvents.empty()) {
+ EventList::iterator eventIter = _unsignaledEvents.begin();
+ invariant(_getEventFromHandle(*eventIter)->_waiters.empty());
+ signalEvent_inlock(*eventIter);
+ }
+
+ while (_totalEventWaiters > 0)
+ _noMoreWaitingThreads.wait(lk);
+
+ invariant(_dbWorkInProgressQueue.empty());
+ invariant(_exclusiveLockInProgressQueue.empty());
+ invariant(_readyQueue.empty());
+ invariant(_sleepersQueue.empty());
+ invariant(_unsignaledEvents.empty());
+}
+
+void ReplicationExecutor::maybeNotifyShutdownComplete_inlock() {
+ if (_totalEventWaiters == 0)
+ _noMoreWaitingThreads.notify_all();
+}
+
+StatusWith<ReplicationExecutor::EventHandle> ReplicationExecutor::makeEvent() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return makeEvent_inlock();
+}
+
+StatusWith<ReplicationExecutor::EventHandle> ReplicationExecutor::makeEvent_inlock() {
+ if (_inShutdown)
+ return StatusWith<EventHandle>(ErrorCodes::ShutdownInProgress, "Shutdown in progress");
+
+ _unsignaledEvents.emplace_back();
+ auto event = std::make_shared<Event>(this, --_unsignaledEvents.end());
+ setEventForHandle(&_unsignaledEvents.back(), std::move(event));
+ return _unsignaledEvents.back();
+}
+
+void ReplicationExecutor::signalEvent(const EventHandle& eventHandle) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ signalEvent_inlock(eventHandle);
+}
+
+void ReplicationExecutor::signalEvent_inlock(const EventHandle& eventHandle) {
+ Event* event = _getEventFromHandle(eventHandle);
+ event->_signal_inlock();
+ _unsignaledEvents.erase(event->_iter);
+}
+
+void ReplicationExecutor::waitForEvent(const EventHandle& event) {
+ _getEventFromHandle(event)->waitUntilSignaled();
+}
+
+void ReplicationExecutor::cancel(const CallbackHandle& cbHandle) {
+ _getCallbackFromHandle(cbHandle)->cancel();
+};
+
+void ReplicationExecutor::wait(const CallbackHandle& cbHandle) {
+ _getCallbackFromHandle(cbHandle)->waitForCompletion();
+};
+
+StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::onEvent(
+ const EventHandle& eventHandle, const CallbackFn& work) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ WorkQueue* queue = &_readyQueue;
+ Event* event = _getEventFromHandle(eventHandle);
+ if (!event->_isSignaled) {
+ queue = &event->_waiters;
+ } else {
+ queue = &_readyQueue;
+ }
+ return enqueueWork_inlock(queue, work);
+}
+
+static void remoteCommandFinished(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicationExecutor::RemoteCommandCallbackFn& cb,
+ const RemoteCommandRequest& request,
+ const ResponseStatus& response) {
+ if (cbData.status.isOK()) {
cb(ReplicationExecutor::RemoteCommandCallbackArgs(
- cbData.executor,
- cbData.myHandle,
- request,
- ResponseStatus(cbData.status)));
- }
-
- void ReplicationExecutor::_finishRemoteCommand(
- const RemoteCommandRequest& request,
- const ResponseStatus& response,
- const CallbackHandle& cbHandle,
- const uint64_t expectedHandleGeneration,
- const RemoteCommandCallbackFn& cb) {
-
- Callback* callback = _getCallbackFromHandle(cbHandle);
- const WorkQueue::iterator iter = callback->_iter;
-
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (_inShutdown) {
- return;
- }
-
- if (expectedHandleGeneration != iter->generation) {
- return;
- }
-
- LOG(4) << "Received remote response: "
- << (response.isOK() ? response.getValue().toString() :
- response.getStatus().toString());
-
- callback->_callbackFn = stdx::bind(remoteCommandFinished,
- stdx::placeholders::_1,
- cb,
- request,
- response);
- _readyQueue.splice(_readyQueue.end(), _networkInProgressQueue, iter);
- }
-
- StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleRemoteCommand(
- const RemoteCommandRequest& request,
- const RemoteCommandCallbackFn& cb) {
- RemoteCommandRequest scheduledRequest = request;
- if (request.timeout == RemoteCommandRequest::kNoTimeout) {
- scheduledRequest.expirationDate = RemoteCommandRequest::kNoExpirationDate;
- }
- else {
- scheduledRequest.expirationDate = _networkInterface->now() + scheduledRequest.timeout;
- }
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- StatusWith<CallbackHandle> handle = enqueueWork_inlock(
- &_networkInProgressQueue,
- stdx::bind(remoteCommandFailedEarly,
- stdx::placeholders::_1,
- cb,
- scheduledRequest));
- if (handle.isOK()) {
- _getCallbackFromHandle(handle.getValue())->_iter->isNetworkOperation = true;
-
- LOG(4) << "Scheduling remote request: " << request.toString();
-
- _networkInterface->startCommand(
- handle.getValue(),
- scheduledRequest,
- stdx::bind(&ReplicationExecutor::_finishRemoteCommand,
+ cbData.executor, cbData.myHandle, request, response));
+ } else {
+ cb(ReplicationExecutor::RemoteCommandCallbackArgs(
+ cbData.executor, cbData.myHandle, request, ResponseStatus(cbData.status)));
+ }
+}
+
+static void remoteCommandFailedEarly(const ReplicationExecutor::CallbackArgs& cbData,
+ const ReplicationExecutor::RemoteCommandCallbackFn& cb,
+ const RemoteCommandRequest& request) {
+ invariant(!cbData.status.isOK());
+ cb(ReplicationExecutor::RemoteCommandCallbackArgs(
+ cbData.executor, cbData.myHandle, request, ResponseStatus(cbData.status)));
+}
+
+void ReplicationExecutor::_finishRemoteCommand(const RemoteCommandRequest& request,
+ const ResponseStatus& response,
+ const CallbackHandle& cbHandle,
+ const uint64_t expectedHandleGeneration,
+ const RemoteCommandCallbackFn& cb) {
+ Callback* callback = _getCallbackFromHandle(cbHandle);
+ const WorkQueue::iterator iter = callback->_iter;
+
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ if (_inShutdown) {
+ return;
+ }
+
+ if (expectedHandleGeneration != iter->generation) {
+ return;
+ }
+
+ LOG(4) << "Received remote response: " << (response.isOK() ? response.getValue().toString()
+ : response.getStatus().toString());
+
+ callback->_callbackFn =
+ stdx::bind(remoteCommandFinished, stdx::placeholders::_1, cb, request, response);
+ _readyQueue.splice(_readyQueue.end(), _networkInProgressQueue, iter);
+}
+
+StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleRemoteCommand(
+ const RemoteCommandRequest& request, const RemoteCommandCallbackFn& cb) {
+ RemoteCommandRequest scheduledRequest = request;
+ if (request.timeout == RemoteCommandRequest::kNoTimeout) {
+ scheduledRequest.expirationDate = RemoteCommandRequest::kNoExpirationDate;
+ } else {
+ scheduledRequest.expirationDate = _networkInterface->now() + scheduledRequest.timeout;
+ }
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ StatusWith<CallbackHandle> handle = enqueueWork_inlock(
+ &_networkInProgressQueue,
+ stdx::bind(remoteCommandFailedEarly, stdx::placeholders::_1, cb, scheduledRequest));
+ if (handle.isOK()) {
+ _getCallbackFromHandle(handle.getValue())->_iter->isNetworkOperation = true;
+
+ LOG(4) << "Scheduling remote request: " << request.toString();
+
+ _networkInterface->startCommand(
+ handle.getValue(),
+ scheduledRequest,
+ stdx::bind(&ReplicationExecutor::_finishRemoteCommand,
+ this,
+ scheduledRequest,
+ stdx::placeholders::_1,
+ handle.getValue(),
+ _getCallbackFromHandle(handle.getValue())->_iter->generation,
+ cb));
+ }
+ return handle;
+}
+
+StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleWork(
+ const CallbackFn& work) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _networkInterface->signalWorkAvailable();
+ return enqueueWork_inlock(&_readyQueue, work);
+}
+
+StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleWorkAt(
+ Date_t when, const CallbackFn& work) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ WorkQueue temp;
+ StatusWith<CallbackHandle> cbHandle = enqueueWork_inlock(&temp, work);
+ if (!cbHandle.isOK())
+ return cbHandle;
+ _getCallbackFromHandle(cbHandle.getValue())->_iter->readyDate = when;
+ WorkQueue::iterator insertBefore = _sleepersQueue.begin();
+ while (insertBefore != _sleepersQueue.end() && insertBefore->readyDate <= when)
+ ++insertBefore;
+ _sleepersQueue.splice(insertBefore, temp, temp.begin());
+ return cbHandle;
+}
+
+StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleDBWork(
+ const CallbackFn& work) {
+ return scheduleDBWork(work, NamespaceString(), MODE_NONE);
+}
+
+StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleDBWork(
+ const CallbackFn& work, const NamespaceString& nss, LockMode mode) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ StatusWith<CallbackHandle> handle = enqueueWork_inlock(&_dbWorkInProgressQueue, work);
+ if (handle.isOK()) {
+ auto doOp = stdx::bind(&ReplicationExecutor::_doOperation,
this,
- scheduledRequest,
stdx::placeholders::_1,
+ stdx::placeholders::_2,
handle.getValue(),
- _getCallbackFromHandle(handle.getValue())->_iter->generation,
- cb));
- }
- return handle;
- }
-
- StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleWork(
- const CallbackFn& work) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- _networkInterface->signalWorkAvailable();
- return enqueueWork_inlock(&_readyQueue, work);
- }
-
- StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::scheduleWorkAt(
- Date_t when,
- const CallbackFn& work) {
-
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- WorkQueue temp;
- StatusWith<CallbackHandle> cbHandle = enqueueWork_inlock(&temp, work);
- if (!cbHandle.isOK())
- return cbHandle;
- _getCallbackFromHandle(cbHandle.getValue())->_iter->readyDate = when;
- WorkQueue::iterator insertBefore = _sleepersQueue.begin();
- while (insertBefore != _sleepersQueue.end() && insertBefore->readyDate <= when)
- ++insertBefore;
- _sleepersQueue.splice(insertBefore, temp, temp.begin());
- return cbHandle;
- }
-
- StatusWith<ReplicationExecutor::CallbackHandle>
- ReplicationExecutor::scheduleDBWork(const CallbackFn& work) {
- return scheduleDBWork(work, NamespaceString(), MODE_NONE);
- }
-
- StatusWith<ReplicationExecutor::CallbackHandle>
- ReplicationExecutor::scheduleDBWork(const CallbackFn& work,
- const NamespaceString& nss,
- LockMode mode) {
-
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- StatusWith<CallbackHandle> handle = enqueueWork_inlock(&_dbWorkInProgressQueue,
- work);
- if (handle.isOK()) {
- auto doOp = stdx::bind(
- &ReplicationExecutor::_doOperation,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2,
- handle.getValue(),
- &_dbWorkInProgressQueue,
- nullptr);
- auto task = [doOp](OperationContext* txn, const Status& status) {
- makeNoExcept(stdx::bind(doOp, txn, status))();
- return TaskRunner::NextAction::kDisposeOperationContext;
- };
- if (mode == MODE_NONE && nss.ns().empty()) {
- _dblockTaskRunner.schedule(task);
- }
- else {
- _dblockTaskRunner.schedule(DatabaseTask::makeCollectionLockTask(task, nss, mode));
- }
- }
- return handle;
- }
-
- void ReplicationExecutor::_doOperation(OperationContext* txn,
- const Status& taskRunnerStatus,
- const CallbackHandle& cbHandle,
- WorkQueue* workQueue,
- stdx::mutex* terribleExLockSyncMutex) {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- if (_inShutdown)
- return;
- Callback* callback = _getCallbackFromHandle(cbHandle);
- const WorkQueue::iterator iter = callback->_iter;
- iter->callback = CallbackHandle();
- _freeQueue.splice(_freeQueue.begin(), *workQueue, iter);
- lk.unlock();
- {
- std::unique_ptr<stdx::lock_guard<stdx::mutex> > terribleLock(
- terribleExLockSyncMutex ?
- new stdx::lock_guard<stdx::mutex>(*terribleExLockSyncMutex) :
- nullptr);
- // Only possible task runner error status is CallbackCanceled.
- callback->_callbackFn(CallbackArgs(this,
- cbHandle,
- (callback->_isCanceled || !taskRunnerStatus.isOK() ?
- Status(ErrorCodes::CallbackCanceled,
- "Callback canceled") :
- Status::OK()),
- txn));
+ &_dbWorkInProgressQueue,
+ nullptr);
+ auto task = [doOp](OperationContext* txn, const Status& status) {
+ makeNoExcept(stdx::bind(doOp, txn, status))();
+ return TaskRunner::NextAction::kDisposeOperationContext;
+ };
+ if (mode == MODE_NONE && nss.ns().empty()) {
+ _dblockTaskRunner.schedule(task);
+ } else {
+ _dblockTaskRunner.schedule(DatabaseTask::makeCollectionLockTask(task, nss, mode));
}
- lk.lock();
- signalEvent_inlock(callback->_finishedEvent);
}
-
- StatusWith<ReplicationExecutor::CallbackHandle>
- ReplicationExecutor::scheduleWorkWithGlobalExclusiveLock(
- const CallbackFn& work) {
-
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- StatusWith<CallbackHandle> handle = enqueueWork_inlock(&_exclusiveLockInProgressQueue,
- work);
- if (handle.isOK()) {
- auto doOp = stdx::bind(
- &ReplicationExecutor::_doOperation,
- this,
- stdx::placeholders::_1,
- stdx::placeholders::_2,
- handle.getValue(),
- &_exclusiveLockInProgressQueue,
- &_terribleExLockSyncMutex);
- _dblockExclusiveLockTaskRunner.schedule(
- DatabaseTask::makeGlobalExclusiveLockTask(
- [doOp](OperationContext* txn, const Status& status) {
+ return handle;
+}
+
+void ReplicationExecutor::_doOperation(OperationContext* txn,
+ const Status& taskRunnerStatus,
+ const CallbackHandle& cbHandle,
+ WorkQueue* workQueue,
+ stdx::mutex* terribleExLockSyncMutex) {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ if (_inShutdown)
+ return;
+ Callback* callback = _getCallbackFromHandle(cbHandle);
+ const WorkQueue::iterator iter = callback->_iter;
+ iter->callback = CallbackHandle();
+ _freeQueue.splice(_freeQueue.begin(), *workQueue, iter);
+ lk.unlock();
+ {
+ std::unique_ptr<stdx::lock_guard<stdx::mutex>> terribleLock(
+ terribleExLockSyncMutex ? new stdx::lock_guard<stdx::mutex>(*terribleExLockSyncMutex)
+ : nullptr);
+ // Only possible task runner error status is CallbackCanceled.
+ callback->_callbackFn(
+ CallbackArgs(this,
+ cbHandle,
+ (callback->_isCanceled || !taskRunnerStatus.isOK()
+ ? Status(ErrorCodes::CallbackCanceled, "Callback canceled")
+ : Status::OK()),
+ txn));
+ }
+ lk.lock();
+ signalEvent_inlock(callback->_finishedEvent);
+}
+
+StatusWith<ReplicationExecutor::CallbackHandle>
+ReplicationExecutor::scheduleWorkWithGlobalExclusiveLock(const CallbackFn& work) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ StatusWith<CallbackHandle> handle = enqueueWork_inlock(&_exclusiveLockInProgressQueue, work);
+ if (handle.isOK()) {
+ auto doOp = stdx::bind(&ReplicationExecutor::_doOperation,
+ this,
+ stdx::placeholders::_1,
+ stdx::placeholders::_2,
+ handle.getValue(),
+ &_exclusiveLockInProgressQueue,
+ &_terribleExLockSyncMutex);
+ _dblockExclusiveLockTaskRunner.schedule(DatabaseTask::makeGlobalExclusiveLockTask(
+ [doOp](OperationContext* txn, const Status& status) {
makeNoExcept(stdx::bind(doOp, txn, status))();
return TaskRunner::NextAction::kDisposeOperationContext;
}));
- }
- return handle;
- }
-
- std::pair<ReplicationExecutor::WorkItem, ReplicationExecutor::CallbackHandle>
- ReplicationExecutor::getWork() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- while (true) {
- const Date_t now = _networkInterface->now();
- Date_t nextWakeupDate = scheduleReadySleepers_inlock(now);
- if (!_readyQueue.empty()) {
- break;
- }
- else if (_inShutdown) {
- return std::make_pair(WorkItem(), CallbackHandle());
- }
- lk.unlock();
- if (nextWakeupDate == Date_t::max()) {
- _networkInterface->waitForWork();
- }
- else {
- _networkInterface->waitForWorkUntil(nextWakeupDate);
- }
- lk.lock();
- }
- const WorkItem work = *_readyQueue.begin();
- const CallbackHandle cbHandle = work.callback;
- _readyQueue.begin()->callback = CallbackHandle();
- _freeQueue.splice(_freeQueue.begin(), _readyQueue, _readyQueue.begin());
- return std::make_pair(work, cbHandle);
- }
-
- int64_t ReplicationExecutor::nextRandomInt64(int64_t limit) {
- return _random.nextInt64(limit);
- }
-
- Date_t ReplicationExecutor::scheduleReadySleepers_inlock(const Date_t now) {
- WorkQueue::iterator iter = _sleepersQueue.begin();
- while ((iter != _sleepersQueue.end()) && (iter->readyDate <= now)) {
- ++iter;
- }
- _readyQueue.splice(_readyQueue.end(), _sleepersQueue, _sleepersQueue.begin(), iter);
- if (iter == _sleepersQueue.end()) {
- // indicate no sleeper to wait for
- return Date_t::max();
- }
- return iter->readyDate;
- }
-
- StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::enqueueWork_inlock(
- WorkQueue* queue, const CallbackFn& callbackFn) {
-
- invariant(callbackFn);
- StatusWith<EventHandle> event = makeEvent_inlock();
- if (!event.isOK())
- return StatusWith<CallbackHandle>(event.getStatus());
-
- if (_freeQueue.empty())
- _freeQueue.push_front(WorkItem());
- const WorkQueue::iterator iter = _freeQueue.begin();
- WorkItem& work = *iter;
-
- invariant(!work.callback.isValid());
- setCallbackForHandle(&work.callback, std::shared_ptr<executor::TaskExecutor::CallbackState>(
- new Callback(this, callbackFn, iter, event.getValue())));
-
- work.generation++;
- work.finishedEvent = event.getValue();
- work.readyDate = Date_t();
- queue->splice(queue->end(), _freeQueue, iter);
- return StatusWith<CallbackHandle>(work.callback);
}
-
- ReplicationExecutor::WorkItem::WorkItem() : generation(0U),
- isNetworkOperation(false) {}
-
- ReplicationExecutor::Event::Event(ReplicationExecutor* executor,
- const EventList::iterator& iter) :
- executor::TaskExecutor::EventState(), _executor(executor), _isSignaled(false), _iter(iter) {}
-
- ReplicationExecutor::Event::~Event() {}
-
- void ReplicationExecutor::Event::signal() {
- // Must go through executor to signal so that this can be removed from the _unsignaledEvents
- // EventList.
- _executor->signalEvent(*_iter);
- }
-
- void ReplicationExecutor::Event::_signal_inlock() {
- invariant(!_isSignaled);
- _isSignaled = true;
-
- if (!_waiters.empty()) {
- _executor->_readyQueue.splice(_executor->_readyQueue.end(), _waiters);
- _executor->_networkInterface->signalWorkAvailable();
+ return handle;
+}
+
+std::pair<ReplicationExecutor::WorkItem, ReplicationExecutor::CallbackHandle>
+ReplicationExecutor::getWork() {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ while (true) {
+ const Date_t now = _networkInterface->now();
+ Date_t nextWakeupDate = scheduleReadySleepers_inlock(now);
+ if (!_readyQueue.empty()) {
+ break;
+ } else if (_inShutdown) {
+ return std::make_pair(WorkItem(), CallbackHandle());
}
-
- _isSignaledCondition.notify_all();
- }
-
- void ReplicationExecutor::Event::waitUntilSignaled() {
- stdx::unique_lock<stdx::mutex> lk(_executor->_mutex);
- ++_executor->_totalEventWaiters;
- while (!_isSignaled) {
- _isSignaledCondition.wait(lk);
+ lk.unlock();
+ if (nextWakeupDate == Date_t::max()) {
+ _networkInterface->waitForWork();
+ } else {
+ _networkInterface->waitForWorkUntil(nextWakeupDate);
}
- --_executor->_totalEventWaiters;
- _executor->maybeNotifyShutdownComplete_inlock();
- }
-
- bool ReplicationExecutor::Event::isSignaled() {
- stdx::lock_guard<stdx::mutex> lk(_executor->_mutex);
- return _isSignaled;
+ lk.lock();
}
-
- ReplicationExecutor::Callback::Callback(ReplicationExecutor* executor,
- const CallbackFn callbackFn,
- const WorkQueue::iterator& iter,
- const EventHandle& finishedEvent) :
- executor::TaskExecutor::CallbackState(),
- _executor(executor),
- _callbackFn(callbackFn),
- _isCanceled(false),
- _iter(iter),
- _finishedEvent(finishedEvent) {}
-
- ReplicationExecutor::Callback::~Callback() {}
-
- void ReplicationExecutor::Callback::cancel() {
- stdx::unique_lock<stdx::mutex> lk(_executor->_mutex);
- _isCanceled = true;
- if (_iter->isNetworkOperation) {
- lk.unlock();
- _executor->_networkInterface->cancelCommand(_iter->callback);
- }
+ const WorkItem work = *_readyQueue.begin();
+ const CallbackHandle cbHandle = work.callback;
+ _readyQueue.begin()->callback = CallbackHandle();
+ _freeQueue.splice(_freeQueue.begin(), _readyQueue, _readyQueue.begin());
+ return std::make_pair(work, cbHandle);
+}
+
+int64_t ReplicationExecutor::nextRandomInt64(int64_t limit) {
+ return _random.nextInt64(limit);
+}
+
+Date_t ReplicationExecutor::scheduleReadySleepers_inlock(const Date_t now) {
+ WorkQueue::iterator iter = _sleepersQueue.begin();
+ while ((iter != _sleepersQueue.end()) && (iter->readyDate <= now)) {
+ ++iter;
+ }
+ _readyQueue.splice(_readyQueue.end(), _sleepersQueue, _sleepersQueue.begin(), iter);
+ if (iter == _sleepersQueue.end()) {
+ // indicate no sleeper to wait for
+ return Date_t::max();
+ }
+ return iter->readyDate;
+}
+
+StatusWith<ReplicationExecutor::CallbackHandle> ReplicationExecutor::enqueueWork_inlock(
+ WorkQueue* queue, const CallbackFn& callbackFn) {
+ invariant(callbackFn);
+ StatusWith<EventHandle> event = makeEvent_inlock();
+ if (!event.isOK())
+ return StatusWith<CallbackHandle>(event.getStatus());
+
+ if (_freeQueue.empty())
+ _freeQueue.push_front(WorkItem());
+ const WorkQueue::iterator iter = _freeQueue.begin();
+ WorkItem& work = *iter;
+
+ invariant(!work.callback.isValid());
+ setCallbackForHandle(&work.callback,
+ std::shared_ptr<executor::TaskExecutor::CallbackState>(
+ new Callback(this, callbackFn, iter, event.getValue())));
+
+ work.generation++;
+ work.finishedEvent = event.getValue();
+ work.readyDate = Date_t();
+ queue->splice(queue->end(), _freeQueue, iter);
+ return StatusWith<CallbackHandle>(work.callback);
+}
+
+ReplicationExecutor::WorkItem::WorkItem() : generation(0U), isNetworkOperation(false) {}
+
+ReplicationExecutor::Event::Event(ReplicationExecutor* executor, const EventList::iterator& iter)
+ : executor::TaskExecutor::EventState(), _executor(executor), _isSignaled(false), _iter(iter) {}
+
+ReplicationExecutor::Event::~Event() {}
+
+void ReplicationExecutor::Event::signal() {
+ // Must go through executor to signal so that this can be removed from the _unsignaledEvents
+ // EventList.
+ _executor->signalEvent(*_iter);
+}
+
+void ReplicationExecutor::Event::_signal_inlock() {
+ invariant(!_isSignaled);
+ _isSignaled = true;
+
+ if (!_waiters.empty()) {
+ _executor->_readyQueue.splice(_executor->_readyQueue.end(), _waiters);
+ _executor->_networkInterface->signalWorkAvailable();
+ }
+
+ _isSignaledCondition.notify_all();
+}
+
+void ReplicationExecutor::Event::waitUntilSignaled() {
+ stdx::unique_lock<stdx::mutex> lk(_executor->_mutex);
+ ++_executor->_totalEventWaiters;
+ while (!_isSignaled) {
+ _isSignaledCondition.wait(lk);
+ }
+ --_executor->_totalEventWaiters;
+ _executor->maybeNotifyShutdownComplete_inlock();
+}
+
+bool ReplicationExecutor::Event::isSignaled() {
+ stdx::lock_guard<stdx::mutex> lk(_executor->_mutex);
+ return _isSignaled;
+}
+
+ReplicationExecutor::Callback::Callback(ReplicationExecutor* executor,
+ const CallbackFn callbackFn,
+ const WorkQueue::iterator& iter,
+ const EventHandle& finishedEvent)
+ : executor::TaskExecutor::CallbackState(),
+ _executor(executor),
+ _callbackFn(callbackFn),
+ _isCanceled(false),
+ _iter(iter),
+ _finishedEvent(finishedEvent) {}
+
+ReplicationExecutor::Callback::~Callback() {}
+
+void ReplicationExecutor::Callback::cancel() {
+ stdx::unique_lock<stdx::mutex> lk(_executor->_mutex);
+ _isCanceled = true;
+ if (_iter->isNetworkOperation) {
+ lk.unlock();
+ _executor->_networkInterface->cancelCommand(_iter->callback);
}
+}
- void ReplicationExecutor::Callback::waitForCompletion() {
- _executor->waitForEvent(_finishedEvent);
- }
+void ReplicationExecutor::Callback::waitForCompletion() {
+ _executor->waitForEvent(_finishedEvent);
+}
- ReplicationExecutor::Event* ReplicationExecutor::_getEventFromHandle(
- const EventHandle& eventHandle) {
- return static_cast<Event*>(getEventFromHandle(eventHandle));
- }
+ReplicationExecutor::Event* ReplicationExecutor::_getEventFromHandle(
+ const EventHandle& eventHandle) {
+ return static_cast<Event*>(getEventFromHandle(eventHandle));
+}
- ReplicationExecutor::Callback* ReplicationExecutor::_getCallbackFromHandle(
- const CallbackHandle& callbackHandle) {
- return static_cast<Callback*>(getCallbackFromHandle(callbackHandle));
- }
+ReplicationExecutor::Callback* ReplicationExecutor::_getCallbackFromHandle(
+ const CallbackHandle& callbackHandle) {
+ return static_cast<Callback*>(getCallbackFromHandle(callbackHandle));
+}
namespace {
- void callNoExcept(const stdx::function<void ()>& fn) {
- try {
- fn();
- }
- catch (...) {
- std::terminate();
- }
+void callNoExcept(const stdx::function<void()>& fn) {
+ try {
+ fn();
+ } catch (...) {
+ std::terminate();
}
+}
- stdx::function<void ()> makeNoExcept(const stdx::function<void ()> &fn) {
- return stdx::bind(callNoExcept, fn);
- }
+stdx::function<void()> makeNoExcept(const stdx::function<void()>& fn) {
+ return stdx::bind(callNoExcept, fn);
+}
} // namespace
diff --git a/src/mongo/db/repl/replication_executor.h b/src/mongo/db/repl/replication_executor.h
index a6749bb26a4..0dfa97f9cf0 100644
--- a/src/mongo/db/repl/replication_executor.h
+++ b/src/mongo/db/repl/replication_executor.h
@@ -51,357 +51,349 @@
namespace mongo {
- class NamespaceString;
- class OperationContext;
+class NamespaceString;
+class OperationContext;
-namespace executor{
- class NetworkInterface;
-} // namespace executor
+namespace executor {
+class NetworkInterface;
+} // namespace executor
namespace repl {
- class StorageInterface;
+class StorageInterface;
+
+/**
+ * Implementation of the TaskExecutor interface for providing an event loop for driving state
+ * machines in replication.
+ *
+ * Usage: Instantiate an executor, schedule a work item, call run().
+ *
+ * Implementation details:
+ *
+ * The executor is composed of several WorkQueues, which are queues of WorkItems. WorkItems
+ * describe units of work -- a callback and state needed to track its lifecycle. The iterators
+ * pointing to WorkItems are spliced between the WorkQueues, rather than copying WorkItems
+ * themselves. Further, those WorkQueue::iterators are never invalidated during the life of an
+ * executor. They may be recycled to represent new work items, but when that happens, a counter
+ * on the WorkItem is incremented, to disambiguate.
+ *
+ * All work executed by the run() method of the executor is popped off the front of the
+ * _readyQueue. Remote commands blocked on the network can be found in the
+ * _networkInProgressQueue. Callbacks waiting for a timer to expire are in the _sleepersQueue.
+ * When the network returns or the timer expires, items from these two queues are transferred to
+ * the back of the _readyQueue.
+ *
+ * The _exclusiveLockInProgressQueue, which represents work items to execute while holding the
+ * GlobalWrite lock, is exceptional. WorkItems in that queue execute in unspecified order with
+ * respect to work in the _readyQueue or other WorkItems in the _exclusiveLockInProgressQueue,
+ * but they are executed in a single serial order with respect to those other WorkItems. The
+ * _terribleExLockSyncMutex is used to provide this serialization, until such time as the global
+ * lock may be passed from one thread to another.
+ */
+class ReplicationExecutor final : public executor::TaskExecutor {
+ MONGO_DISALLOW_COPYING(ReplicationExecutor);
+
+public:
+ /**
+ * Constructs a new executor.
+ *
+ * Takes ownership of the passed NetworkInterface object.
+ */
+ ReplicationExecutor(executor::NetworkInterface* netInterface,
+ StorageInterface* storageInterface,
+ int64_t pnrgSeed);
+
+ /**
+ * Destroys an executor.
+ */
+ virtual ~ReplicationExecutor();
+
+ std::string getDiagnosticString() override;
+ Date_t now() override;
+ void shutdown() override;
+ void signalEvent(const EventHandle& event) override;
+ StatusWith<EventHandle> makeEvent() override;
+ StatusWith<CallbackHandle> onEvent(const EventHandle& event, const CallbackFn& work) override;
+ void waitForEvent(const EventHandle& event) override;
+ StatusWith<CallbackHandle> scheduleWork(const CallbackFn& work) override;
+ StatusWith<CallbackHandle> scheduleWorkAt(Date_t when, const CallbackFn& work) override;
+ StatusWith<CallbackHandle> scheduleRemoteCommand(const RemoteCommandRequest& request,
+ const RemoteCommandCallbackFn& cb) override;
+ void cancel(const CallbackHandle& cbHandle) override;
+ void wait(const CallbackHandle& cbHandle) override;
+
+
+ /**
+ * Executes the run loop. May be called up to one time.
+ *
+ * Returns after the executor has been shutdown and is safe to delete.
+ */
+ void run();
/**
- * Implementation of the TaskExecutor interface for providing an event loop for driving state
- * machines in replication.
+ * Schedules DB "work" to be run by the executor..
*
- * Usage: Instantiate an executor, schedule a work item, call run().
+ * Takes no locks for caller - global, database or collection.
*
- * Implementation details:
+ * The "work" will run exclusively with other DB work items. All DB work items
+ * are run the in order they are scheduled.
*
- * The executor is composed of several WorkQueues, which are queues of WorkItems. WorkItems
- * describe units of work -- a callback and state needed to track its lifecycle. The iterators
- * pointing to WorkItems are spliced between the WorkQueues, rather than copying WorkItems
- * themselves. Further, those WorkQueue::iterators are never invalidated during the life of an
- * executor. They may be recycled to represent new work items, but when that happens, a counter
- * on the WorkItem is incremented, to disambiguate.
+ * The "work" may run concurrently with other non-DB work items,
+ * but there are no ordering guarantees provided with respect to
+ * any other work item.
*
- * All work executed by the run() method of the executor is popped off the front of the
- * _readyQueue. Remote commands blocked on the network can be found in the
- * _networkInProgressQueue. Callbacks waiting for a timer to expire are in the _sleepersQueue.
- * When the network returns or the timer expires, items from these two queues are transferred to
- * the back of the _readyQueue.
+ * Returns a handle for waiting on or canceling the callback, or
+ * ErrorCodes::ShutdownInProgress.
*
- * The _exclusiveLockInProgressQueue, which represents work items to execute while holding the
- * GlobalWrite lock, is exceptional. WorkItems in that queue execute in unspecified order with
- * respect to work in the _readyQueue or other WorkItems in the _exclusiveLockInProgressQueue,
- * but they are executed in a single serial order with respect to those other WorkItems. The
- * _terribleExLockSyncMutex is used to provide this serialization, until such time as the global
- * lock may be passed from one thread to another.
+ * May be called by client threads or callbacks running in the executor.
*/
- class ReplicationExecutor final : public executor::TaskExecutor {
- MONGO_DISALLOW_COPYING(ReplicationExecutor);
- public:
-
- /**
- * Constructs a new executor.
- *
- * Takes ownership of the passed NetworkInterface object.
- */
- ReplicationExecutor(executor::NetworkInterface* netInterface,
- StorageInterface* storageInterface,
- int64_t pnrgSeed);
-
- /**
- * Destroys an executor.
- */
- virtual ~ReplicationExecutor();
-
- std::string getDiagnosticString() override;
- Date_t now() override;
- void shutdown() override;
- void signalEvent(const EventHandle& event) override;
- StatusWith<EventHandle> makeEvent() override;
- StatusWith<CallbackHandle> onEvent(const EventHandle& event,
- const CallbackFn& work) override;
- void waitForEvent(const EventHandle& event) override;
- StatusWith<CallbackHandle> scheduleWork(const CallbackFn& work) override;
- StatusWith<CallbackHandle> scheduleWorkAt(Date_t when, const CallbackFn& work) override;
- StatusWith<CallbackHandle> scheduleRemoteCommand(
- const RemoteCommandRequest& request,
- const RemoteCommandCallbackFn& cb) override;
- void cancel(const CallbackHandle& cbHandle) override;
- void wait(const CallbackHandle& cbHandle) override;
-
-
- /**
- * Executes the run loop. May be called up to one time.
- *
- * Returns after the executor has been shutdown and is safe to delete.
- */
- void run();
-
- /**
- * Schedules DB "work" to be run by the executor..
- *
- * Takes no locks for caller - global, database or collection.
- *
- * The "work" will run exclusively with other DB work items. All DB work items
- * are run the in order they are scheduled.
- *
- * The "work" may run concurrently with other non-DB work items,
- * but there are no ordering guarantees provided with respect to
- * any other work item.
- *
- * Returns a handle for waiting on or canceling the callback, or
- * ErrorCodes::ShutdownInProgress.
- *
- * May be called by client threads or callbacks running in the executor.
- */
- StatusWith<CallbackHandle> scheduleDBWork(const CallbackFn& work);
-
- /**
- * Schedules DB "work" to be run by the executor while holding the collection lock.
- *
- * Takes collection lock in specified mode (and slightly more permissive lock for the
- * database lock) but not the global exclusive lock.
- *
- * The "work" will run exclusively with other DB work items. All DB work items
- * are run the in order they are scheduled.
- *
- * The "work" may run concurrently with other non-DB work items,
- * but there are no ordering guarantees provided with respect to
- * any other work item.
- *
- * Returns a handle for waiting on or canceling the callback, or
- * ErrorCodes::ShutdownInProgress.
- *
- * May be called by client threads or callbacks running in the executor.
- */
- StatusWith<CallbackHandle> scheduleDBWork(const CallbackFn& work,
- const NamespaceString& nss,
- LockMode mode);
-
- /**
- * Schedules "work" to be run by the executor while holding the global exclusive lock.
- *
- * Takes collection lock in specified mode (and slightly more permissive lock for the
- * database lock) but not the global exclusive lock.
- *
- * The "work" will run exclusively, as though it were executed by the main
- * run loop, but there are no ordering guarantees provided with respect to
- * any other work item.
- *
- * Returns a handle for waiting on or canceling the callback, or
- * ErrorCodes::ShutdownInProgress.
- *
- * May be called by client threads or callbacks running in the executor.
- */
- StatusWith<CallbackHandle> scheduleWorkWithGlobalExclusiveLock(
- const CallbackFn& work);
-
- /**
- * Returns an int64_t generated by the prng with a max value of "limit".
- */
- int64_t nextRandomInt64(int64_t limit);
-
- private:
- class Callback;
- class Event;
- struct WorkItem;
- friend class Callback;
- friend class Event;
-
-
- /**
- * A linked list of WorkItem objects.
- *
- * WorkItems get moved among lists by splicing iterators of work lists together,
- * not by copying underlying WorkItem objects.
- */
- typedef stdx::list<WorkItem> WorkQueue;
-
- /**
- * A linked list of EventHandles.
- */
- typedef stdx::list<EventHandle> EventList;
-
- /**
- * Returns diagnostic info
- */
- std::string _getDiagnosticString_inlock() const;
-
- /**
- * Implementation of makeEvent() for use when _mutex is already held.
- */
- StatusWith<EventHandle> makeEvent_inlock();
-
- /**
- * Implementation of signalEvent() for use when _mutex is already held.
- */
- void signalEvent_inlock(const EventHandle&);
-
- /**
- * Gets a single piece of work to execute.
- *
- * If the "callback" member of the returned WorkItem is falsey, that is a signal
- * to the run loop to wait for shutdown.
- */
- std::pair<WorkItem, CallbackHandle> getWork();
-
- /**
- * Marks as runnable any sleepers whose ready date has passed as of "now".
- * Returns the date when the next sleeper will be ready, or Date_t(~0ULL) if there are no
- * remaining sleepers.
- */
- Date_t scheduleReadySleepers_inlock(Date_t now);
-
- /**
- * Enqueues "callback" into "queue".
- */
- StatusWith<CallbackHandle> enqueueWork_inlock(WorkQueue* queue, const CallbackFn& callback);
-
- /**
- * Notifies interested parties that shutdown has completed, if it has.
- */
- void maybeNotifyShutdownComplete_inlock();
-
- /**
- * Completes the shutdown process. Called by run().
- */
- void finishShutdown();
-
- void _finishRemoteCommand(
- const RemoteCommandRequest& request,
- const StatusWith<RemoteCommandResponse>& response,
- const CallbackHandle& cbHandle,
- const uint64_t expectedHandleGeneration,
- const RemoteCommandCallbackFn& cb);
-
- /**
- * Executes the callback referenced by "cbHandle", and moves the underlying
- * WorkQueue::iterator from "workQueue" into the _freeQueue.
- *
- * "txn" is a pointer to the OperationContext.
- *
- * "status" is the callback status from the task runner. Only possible values are
- * Status::OK and ErrorCodes::CallbackCanceled (when task runner is canceled).
- *
- * If "terribleExLockSyncMutex" is not null, serializes execution of "cbHandle" with the
- * execution of other callbacks.
- */
- void _doOperation(OperationContext* txn,
- const Status& taskRunnerStatus,
- const CallbackHandle& cbHandle,
- WorkQueue* workQueue,
- stdx::mutex* terribleExLockSyncMutex);
-
- /**
- * Wrapper around TaskExecutor::getCallbackFromHandle that return an Event* instead of
- * a generic EventState*.
- */
- Event* _getEventFromHandle(const EventHandle& eventHandle);
-
- /**
- * Wrapper around TaskExecutor::getCallbackFromHandle that return an Event* instead of
- * a generic EventState*.
- */
- Callback* _getCallbackFromHandle(const CallbackHandle& callbackHandle);
-
- // PRNG; seeded at class construction time.
- PseudoRandom _random;
-
- std::unique_ptr<executor::NetworkInterface> _networkInterface;
- std::unique_ptr<StorageInterface> _storageInterface;
- stdx::mutex _mutex;
- stdx::mutex _terribleExLockSyncMutex;
- stdx::condition_variable _noMoreWaitingThreads;
- WorkQueue _freeQueue;
- WorkQueue _readyQueue;
- WorkQueue _dbWorkInProgressQueue;
- WorkQueue _exclusiveLockInProgressQueue;
- WorkQueue _networkInProgressQueue;
- WorkQueue _sleepersQueue;
- EventList _unsignaledEvents;
- int64_t _totalEventWaiters;
- bool _inShutdown;
- OldThreadPool _dblockWorkers;
- TaskRunner _dblockTaskRunner;
- TaskRunner _dblockExclusiveLockTaskRunner;
- uint64_t _nextId;
- };
-
- class ReplicationExecutor::Callback : public executor::TaskExecutor::CallbackState {
- friend class ReplicationExecutor;
-
- public:
-
- Callback(ReplicationExecutor* executor,
- const CallbackFn callbackFn,
- const WorkQueue::iterator& iter,
- const EventHandle& finishedEvent);
- virtual ~Callback();
-
- void cancel() override;
- void waitForCompletion() override;
-
- private:
-
- ReplicationExecutor* _executor;
-
- // All members other than _executor are protected by the executor's _mutex.
- CallbackFn _callbackFn;
- bool _isCanceled;
- WorkQueue::iterator _iter;
- EventHandle _finishedEvent;
- };
-
- typedef ReplicationExecutor::ResponseStatus ResponseStatus;
+ StatusWith<CallbackHandle> scheduleDBWork(const CallbackFn& work);
/**
- * Description of a scheduled but not-yet-run work item.
+ * Schedules DB "work" to be run by the executor while holding the collection lock.
*
- * Once created, WorkItem objects remain in scope until the executor is destroyed.
- * However, over their lifetime, they may represent many different work items. This
- * divorces the lifetime of CallbackHandles from the lifetime of WorkItem objects, but
- * requires a unique generation identifier in CallbackHandles and WorkItem objects.
+ * Takes collection lock in specified mode (and slightly more permissive lock for the
+ * database lock) but not the global exclusive lock.
+ *
+ * The "work" will run exclusively with other DB work items. All DB work items
+ * are run the in order they are scheduled.
+ *
+ * The "work" may run concurrently with other non-DB work items,
+ * but there are no ordering guarantees provided with respect to
+ * any other work item.
+ *
+ * Returns a handle for waiting on or canceling the callback, or
+ * ErrorCodes::ShutdownInProgress.
+ *
+ * May be called by client threads or callbacks running in the executor.
+ */
+ StatusWith<CallbackHandle> scheduleDBWork(const CallbackFn& work,
+ const NamespaceString& nss,
+ LockMode mode);
+
+ /**
+ * Schedules "work" to be run by the executor while holding the global exclusive lock.
+ *
+ * Takes collection lock in specified mode (and slightly more permissive lock for the
+ * database lock) but not the global exclusive lock.
+ *
+ * The "work" will run exclusively, as though it were executed by the main
+ * run loop, but there are no ordering guarantees provided with respect to
+ * any other work item.
+ *
+ * Returns a handle for waiting on or canceling the callback, or
+ * ErrorCodes::ShutdownInProgress.
+ *
+ * May be called by client threads or callbacks running in the executor.
+ */
+ StatusWith<CallbackHandle> scheduleWorkWithGlobalExclusiveLock(const CallbackFn& work);
+
+ /**
+ * Returns an int64_t generated by the prng with a max value of "limit".
+ */
+ int64_t nextRandomInt64(int64_t limit);
+
+private:
+ class Callback;
+ class Event;
+ struct WorkItem;
+ friend class Callback;
+ friend class Event;
+
+
+ /**
+ * A linked list of WorkItem objects.
*
- * WorkItem is copyable so that it may be stored in a list. However, in practice they
- * should only be copied by getWork() and when allocating new entries into a WorkQueue (not
- * when moving entries between work lists).
+ * WorkItems get moved among lists by splicing iterators of work lists together,
+ * not by copying underlying WorkItem objects.
*/
- struct ReplicationExecutor::WorkItem {
- WorkItem();
- uint64_t generation;
- CallbackHandle callback;
- EventHandle finishedEvent;
- Date_t readyDate;
- bool isNetworkOperation;
- };
+ typedef stdx::list<WorkItem> WorkQueue;
/**
- * Description of an event.
+ * A linked list of EventHandles.
+ */
+ typedef stdx::list<EventHandle> EventList;
+
+ /**
+ * Returns diagnostic info
+ */
+ std::string _getDiagnosticString_inlock() const;
+
+ /**
+ * Implementation of makeEvent() for use when _mutex is already held.
+ */
+ StatusWith<EventHandle> makeEvent_inlock();
+
+ /**
+ * Implementation of signalEvent() for use when _mutex is already held.
+ */
+ void signalEvent_inlock(const EventHandle&);
+
+ /**
+ * Gets a single piece of work to execute.
*
- * Like WorkItem, above, but for events. On signaling, the executor removes the event from the
- * "unsignaled" EventList and schedules all work items in the _waiters list.
+ * If the "callback" member of the returned WorkItem is falsey, that is a signal
+ * to the run loop to wait for shutdown.
*/
- class ReplicationExecutor::Event : public executor::TaskExecutor::EventState {
- friend class ReplicationExecutor;
+ std::pair<WorkItem, CallbackHandle> getWork();
- public:
+ /**
+ * Marks as runnable any sleepers whose ready date has passed as of "now".
+ * Returns the date when the next sleeper will be ready, or Date_t(~0ULL) if there are no
+ * remaining sleepers.
+ */
+ Date_t scheduleReadySleepers_inlock(Date_t now);
- Event(ReplicationExecutor* executor, const EventList::iterator& iter);
- virtual ~Event();
+ /**
+ * Enqueues "callback" into "queue".
+ */
+ StatusWith<CallbackHandle> enqueueWork_inlock(WorkQueue* queue, const CallbackFn& callback);
- void signal() override;
- void waitUntilSignaled() override;
- bool isSignaled() override;
+ /**
+ * Notifies interested parties that shutdown has completed, if it has.
+ */
+ void maybeNotifyShutdownComplete_inlock();
- private:
+ /**
+ * Completes the shutdown process. Called by run().
+ */
+ void finishShutdown();
- // Note that the caller is responsible for removing any references to any EventHandles
- // pointing to this event.
- void _signal_inlock();
+ void _finishRemoteCommand(const RemoteCommandRequest& request,
+ const StatusWith<RemoteCommandResponse>& response,
+ const CallbackHandle& cbHandle,
+ const uint64_t expectedHandleGeneration,
+ const RemoteCommandCallbackFn& cb);
- ReplicationExecutor* _executor;
+ /**
+ * Executes the callback referenced by "cbHandle", and moves the underlying
+ * WorkQueue::iterator from "workQueue" into the _freeQueue.
+ *
+ * "txn" is a pointer to the OperationContext.
+ *
+ * "status" is the callback status from the task runner. Only possible values are
+ * Status::OK and ErrorCodes::CallbackCanceled (when task runner is canceled).
+ *
+ * If "terribleExLockSyncMutex" is not null, serializes execution of "cbHandle" with the
+ * execution of other callbacks.
+ */
+ void _doOperation(OperationContext* txn,
+ const Status& taskRunnerStatus,
+ const CallbackHandle& cbHandle,
+ WorkQueue* workQueue,
+ stdx::mutex* terribleExLockSyncMutex);
- // All members other than _executor are protected by the executor's _mutex.
- bool _isSignaled;
- stdx::condition_variable _isSignaledCondition;
- EventList::iterator _iter;
- WorkQueue _waiters;
- };
+ /**
+ * Wrapper around TaskExecutor::getCallbackFromHandle that return an Event* instead of
+ * a generic EventState*.
+ */
+ Event* _getEventFromHandle(const EventHandle& eventHandle);
+
+ /**
+ * Wrapper around TaskExecutor::getCallbackFromHandle that return an Event* instead of
+ * a generic EventState*.
+ */
+ Callback* _getCallbackFromHandle(const CallbackHandle& callbackHandle);
+
+ // PRNG; seeded at class construction time.
+ PseudoRandom _random;
+
+ std::unique_ptr<executor::NetworkInterface> _networkInterface;
+ std::unique_ptr<StorageInterface> _storageInterface;
+ stdx::mutex _mutex;
+ stdx::mutex _terribleExLockSyncMutex;
+ stdx::condition_variable _noMoreWaitingThreads;
+ WorkQueue _freeQueue;
+ WorkQueue _readyQueue;
+ WorkQueue _dbWorkInProgressQueue;
+ WorkQueue _exclusiveLockInProgressQueue;
+ WorkQueue _networkInProgressQueue;
+ WorkQueue _sleepersQueue;
+ EventList _unsignaledEvents;
+ int64_t _totalEventWaiters;
+ bool _inShutdown;
+ OldThreadPool _dblockWorkers;
+ TaskRunner _dblockTaskRunner;
+ TaskRunner _dblockExclusiveLockTaskRunner;
+ uint64_t _nextId;
+};
+
+class ReplicationExecutor::Callback : public executor::TaskExecutor::CallbackState {
+ friend class ReplicationExecutor;
+
+public:
+ Callback(ReplicationExecutor* executor,
+ const CallbackFn callbackFn,
+ const WorkQueue::iterator& iter,
+ const EventHandle& finishedEvent);
+ virtual ~Callback();
+
+ void cancel() override;
+ void waitForCompletion() override;
+
+private:
+ ReplicationExecutor* _executor;
+
+ // All members other than _executor are protected by the executor's _mutex.
+ CallbackFn _callbackFn;
+ bool _isCanceled;
+ WorkQueue::iterator _iter;
+ EventHandle _finishedEvent;
+};
+
+typedef ReplicationExecutor::ResponseStatus ResponseStatus;
+
+/**
+ * Description of a scheduled but not-yet-run work item.
+ *
+ * Once created, WorkItem objects remain in scope until the executor is destroyed.
+ * However, over their lifetime, they may represent many different work items. This
+ * divorces the lifetime of CallbackHandles from the lifetime of WorkItem objects, but
+ * requires a unique generation identifier in CallbackHandles and WorkItem objects.
+ *
+ * WorkItem is copyable so that it may be stored in a list. However, in practice they
+ * should only be copied by getWork() and when allocating new entries into a WorkQueue (not
+ * when moving entries between work lists).
+ */
+struct ReplicationExecutor::WorkItem {
+ WorkItem();
+ uint64_t generation;
+ CallbackHandle callback;
+ EventHandle finishedEvent;
+ Date_t readyDate;
+ bool isNetworkOperation;
+};
+
+/**
+ * Description of an event.
+ *
+ * Like WorkItem, above, but for events. On signaling, the executor removes the event from the
+ * "unsignaled" EventList and schedules all work items in the _waiters list.
+ */
+class ReplicationExecutor::Event : public executor::TaskExecutor::EventState {
+ friend class ReplicationExecutor;
+
+public:
+ Event(ReplicationExecutor* executor, const EventList::iterator& iter);
+ virtual ~Event();
+
+ void signal() override;
+ void waitUntilSignaled() override;
+ bool isSignaled() override;
+
+private:
+ // Note that the caller is responsible for removing any references to any EventHandles
+ // pointing to this event.
+ void _signal_inlock();
+
+ ReplicationExecutor* _executor;
+
+ // All members other than _executor are protected by the executor's _mutex.
+ bool _isSignaled;
+ stdx::condition_variable _isSignaledCondition;
+ EventList::iterator _iter;
+ WorkQueue _waiters;
+};
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_executor_test.cpp b/src/mongo/db/repl/replication_executor_test.cpp
index d299706f75b..c421397701c 100644
--- a/src/mongo/db/repl/replication_executor_test.cpp
+++ b/src/mongo/db/repl/replication_executor_test.cpp
@@ -48,513 +48,462 @@ namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
-
- bool operator==(const RemoteCommandRequest lhs,
- const RemoteCommandRequest rhs) {
- return lhs.target == rhs.target &&
- lhs.dbname == rhs.dbname &&
- lhs.cmdObj == rhs.cmdObj;
- }
-
- bool operator!=(const RemoteCommandRequest lhs,
- const RemoteCommandRequest rhs) {
- return !(lhs == rhs);
- }
-
- void setStatus(const ReplicationExecutor::CallbackArgs& cbData, Status* target) {
- *target = cbData.status;
- }
-
- void setStatusAndShutdown(const ReplicationExecutor::CallbackArgs& cbData,
- Status* target) {
- setStatus(cbData, target);
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }
-
- void setStatusAndTriggerEvent(const ReplicationExecutor::CallbackArgs& cbData,
- Status* outStatus,
- ReplicationExecutor::EventHandle event) {
- *outStatus = cbData.status;
- if (!cbData.status.isOK())
- return;
- cbData.executor->signalEvent(event);
- }
-
- void scheduleSetStatusAndShutdown(const ReplicationExecutor::CallbackArgs& cbData,
- Status* outStatus1,
- Status* outStatus2) {
- if (!cbData.status.isOK()) {
- *outStatus1 = cbData.status;
- return;
- }
- *outStatus1= cbData.executor->scheduleWork(stdx::bind(setStatusAndShutdown,
- stdx::placeholders::_1,
- outStatus2)).getStatus();
- }
-
- const int64_t prngSeed = 1;
-
- TEST_F(ReplicationExecutorTest, RunOne) {
- ReplicationExecutor& executor = getExecutor();
- Status status = getDetectableErrorStatus();
- ASSERT_OK(executor.scheduleWork(stdx::bind(setStatusAndShutdown,
- stdx::placeholders::_1,
- &status)).getStatus());
- executor.run();
- ASSERT_OK(status);
- }
-
- TEST_F(ReplicationExecutorTest, Schedule1ButShutdown) {
- ReplicationExecutor& executor = getExecutor();
- Status status = getDetectableErrorStatus();
- ASSERT_OK(executor.scheduleWork(stdx::bind(setStatusAndShutdown,
- stdx::placeholders::_1,
- &status)).getStatus());
- executor.shutdown();
- executor.run();
- ASSERT_EQUALS(status, ErrorCodes::CallbackCanceled);
- }
-
- TEST_F(ReplicationExecutorTest, Schedule2Cancel1) {
- ReplicationExecutor& executor = getExecutor();
- Status status1 = getDetectableErrorStatus();
- Status status2 = getDetectableErrorStatus();
- ReplicationExecutor::CallbackHandle cb = unittest::assertGet(
- executor.scheduleWork(stdx::bind(setStatusAndShutdown,
- stdx::placeholders::_1,
- &status1)));
- executor.cancel(cb);
- ASSERT_OK(executor.scheduleWork(stdx::bind(setStatusAndShutdown,
- stdx::placeholders::_1,
- &status2)).getStatus());
- executor.run();
- ASSERT_EQUALS(status1, ErrorCodes::CallbackCanceled);
- ASSERT_OK(status2);
- }
-
- TEST_F(ReplicationExecutorTest, OneSchedulesAnother) {
- ReplicationExecutor& executor = getExecutor();
- Status status1 = getDetectableErrorStatus();
- Status status2 = getDetectableErrorStatus();
- ASSERT_OK(executor.scheduleWork(stdx::bind(scheduleSetStatusAndShutdown,
- stdx::placeholders::_1,
- &status1,
- &status2)).getStatus());
- executor.run();
- ASSERT_OK(status1);
- ASSERT_OK(status2);
+using executor::NetworkInterfaceMock;
+
+bool operator==(const RemoteCommandRequest lhs, const RemoteCommandRequest rhs) {
+ return lhs.target == rhs.target && lhs.dbname == rhs.dbname && lhs.cmdObj == rhs.cmdObj;
+}
+
+bool operator!=(const RemoteCommandRequest lhs, const RemoteCommandRequest rhs) {
+ return !(lhs == rhs);
+}
+
+void setStatus(const ReplicationExecutor::CallbackArgs& cbData, Status* target) {
+ *target = cbData.status;
+}
+
+void setStatusAndShutdown(const ReplicationExecutor::CallbackArgs& cbData, Status* target) {
+ setStatus(cbData, target);
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+}
+
+void setStatusAndTriggerEvent(const ReplicationExecutor::CallbackArgs& cbData,
+ Status* outStatus,
+ ReplicationExecutor::EventHandle event) {
+ *outStatus = cbData.status;
+ if (!cbData.status.isOK())
+ return;
+ cbData.executor->signalEvent(event);
+}
+
+void scheduleSetStatusAndShutdown(const ReplicationExecutor::CallbackArgs& cbData,
+ Status* outStatus1,
+ Status* outStatus2) {
+ if (!cbData.status.isOK()) {
+ *outStatus1 = cbData.status;
+ return;
}
-
- class EventChainAndWaitingTest {
- MONGO_DISALLOW_COPYING(EventChainAndWaitingTest);
- public:
- EventChainAndWaitingTest();
- void run();
- private:
- void onGo(const ReplicationExecutor::CallbackArgs& cbData);
- void onGoAfterTriggered(const ReplicationExecutor::CallbackArgs& cbData);
-
- NetworkInterfaceMock* net;
- StorageInterfaceMock* storage;
- ReplicationExecutor executor;
- stdx::thread executorThread;
- const ReplicationExecutor::EventHandle goEvent;
- const ReplicationExecutor::EventHandle event2;
- const ReplicationExecutor::EventHandle event3;
- ReplicationExecutor::EventHandle triggerEvent;
- ReplicationExecutor::CallbackFn triggered2;
- ReplicationExecutor::CallbackFn triggered3;
- Status status1;
- Status status2;
- Status status3;
- Status status4;
- Status status5;
- };
-
- TEST(ReplicationExecutorTest, EventChainAndWaiting) {
- EventChainAndWaitingTest().run();
+ *outStatus1 = cbData.executor->scheduleWork(stdx::bind(setStatusAndShutdown,
+ stdx::placeholders::_1,
+ outStatus2)).getStatus();
+}
+
+const int64_t prngSeed = 1;
+
+TEST_F(ReplicationExecutorTest, RunOne) {
+ ReplicationExecutor& executor = getExecutor();
+ Status status = getDetectableErrorStatus();
+ ASSERT_OK(
+ executor.scheduleWork(stdx::bind(setStatusAndShutdown, stdx::placeholders::_1, &status))
+ .getStatus());
+ executor.run();
+ ASSERT_OK(status);
+}
+
+TEST_F(ReplicationExecutorTest, Schedule1ButShutdown) {
+ ReplicationExecutor& executor = getExecutor();
+ Status status = getDetectableErrorStatus();
+ ASSERT_OK(
+ executor.scheduleWork(stdx::bind(setStatusAndShutdown, stdx::placeholders::_1, &status))
+ .getStatus());
+ executor.shutdown();
+ executor.run();
+ ASSERT_EQUALS(status, ErrorCodes::CallbackCanceled);
+}
+
+TEST_F(ReplicationExecutorTest, Schedule2Cancel1) {
+ ReplicationExecutor& executor = getExecutor();
+ Status status1 = getDetectableErrorStatus();
+ Status status2 = getDetectableErrorStatus();
+ ReplicationExecutor::CallbackHandle cb = unittest::assertGet(
+ executor.scheduleWork(stdx::bind(setStatusAndShutdown, stdx::placeholders::_1, &status1)));
+ executor.cancel(cb);
+ ASSERT_OK(
+ executor.scheduleWork(stdx::bind(setStatusAndShutdown, stdx::placeholders::_1, &status2))
+ .getStatus());
+ executor.run();
+ ASSERT_EQUALS(status1, ErrorCodes::CallbackCanceled);
+ ASSERT_OK(status2);
+}
+
+TEST_F(ReplicationExecutorTest, OneSchedulesAnother) {
+ ReplicationExecutor& executor = getExecutor();
+ Status status1 = getDetectableErrorStatus();
+ Status status2 = getDetectableErrorStatus();
+ ASSERT_OK(executor.scheduleWork(stdx::bind(scheduleSetStatusAndShutdown,
+ stdx::placeholders::_1,
+ &status1,
+ &status2)).getStatus());
+ executor.run();
+ ASSERT_OK(status1);
+ ASSERT_OK(status2);
+}
+
+class EventChainAndWaitingTest {
+ MONGO_DISALLOW_COPYING(EventChainAndWaitingTest);
+
+public:
+ EventChainAndWaitingTest();
+ void run();
+
+private:
+ void onGo(const ReplicationExecutor::CallbackArgs& cbData);
+ void onGoAfterTriggered(const ReplicationExecutor::CallbackArgs& cbData);
+
+ NetworkInterfaceMock* net;
+ StorageInterfaceMock* storage;
+ ReplicationExecutor executor;
+ stdx::thread executorThread;
+ const ReplicationExecutor::EventHandle goEvent;
+ const ReplicationExecutor::EventHandle event2;
+ const ReplicationExecutor::EventHandle event3;
+ ReplicationExecutor::EventHandle triggerEvent;
+ ReplicationExecutor::CallbackFn triggered2;
+ ReplicationExecutor::CallbackFn triggered3;
+ Status status1;
+ Status status2;
+ Status status3;
+ Status status4;
+ Status status5;
+};
+
+TEST(ReplicationExecutorTest, EventChainAndWaiting) {
+ EventChainAndWaitingTest().run();
+}
+
+EventChainAndWaitingTest::EventChainAndWaitingTest()
+ : net(new NetworkInterfaceMock),
+ storage(new StorageInterfaceMock),
+ executor(net, storage, prngSeed),
+ executorThread(stdx::bind(&ReplicationExecutor::run, &executor)),
+ goEvent(unittest::assertGet(executor.makeEvent())),
+ event2(unittest::assertGet(executor.makeEvent())),
+ event3(unittest::assertGet(executor.makeEvent())),
+ status1(ErrorCodes::InternalError, "Not mutated"),
+ status2(ErrorCodes::InternalError, "Not mutated"),
+ status3(ErrorCodes::InternalError, "Not mutated"),
+ status4(ErrorCodes::InternalError, "Not mutated"),
+ status5(ErrorCodes::InternalError, "Not mutated") {
+ triggered2 = stdx::bind(setStatusAndTriggerEvent, stdx::placeholders::_1, &status2, event2);
+ triggered3 = stdx::bind(setStatusAndTriggerEvent, stdx::placeholders::_1, &status3, event3);
+}
+
+void EventChainAndWaitingTest::run() {
+ executor.onEvent(goEvent,
+ stdx::bind(&EventChainAndWaitingTest::onGo, this, stdx::placeholders::_1));
+ executor.signalEvent(goEvent);
+ executor.waitForEvent(goEvent);
+ executor.waitForEvent(event2);
+ executor.waitForEvent(event3);
+
+ ReplicationExecutor::EventHandle neverSignaledEvent = unittest::assertGet(executor.makeEvent());
+ stdx::thread neverSignaledWaiter(
+ stdx::bind(&ReplicationExecutor::waitForEvent, &executor, neverSignaledEvent));
+ ReplicationExecutor::CallbackHandle shutdownCallback = unittest::assertGet(
+ executor.scheduleWork(stdx::bind(setStatusAndShutdown, stdx::placeholders::_1, &status5)));
+ executor.wait(shutdownCallback);
+ neverSignaledWaiter.join();
+ executorThread.join();
+ ASSERT_OK(status1);
+ ASSERT_OK(status2);
+ ASSERT_OK(status3);
+ ASSERT_OK(status4);
+ ASSERT_OK(status5);
+}
+
+void EventChainAndWaitingTest::onGo(const ReplicationExecutor::CallbackArgs& cbData) {
+ if (!cbData.status.isOK()) {
+ status1 = cbData.status;
+ return;
}
-
- EventChainAndWaitingTest::EventChainAndWaitingTest() :
- net(new NetworkInterfaceMock),
- storage(new StorageInterfaceMock),
- executor(net, storage, prngSeed),
- executorThread(stdx::bind(&ReplicationExecutor::run, &executor)),
- goEvent(unittest::assertGet(executor.makeEvent())),
- event2(unittest::assertGet(executor.makeEvent())),
- event3(unittest::assertGet(executor.makeEvent())),
- status1(ErrorCodes::InternalError, "Not mutated"),
- status2(ErrorCodes::InternalError, "Not mutated"),
- status3(ErrorCodes::InternalError, "Not mutated"),
- status4(ErrorCodes::InternalError, "Not mutated"),
- status5(ErrorCodes::InternalError, "Not mutated") {
-
- triggered2 = stdx::bind(setStatusAndTriggerEvent,
- stdx::placeholders::_1,
- &status2,
- event2);
- triggered3 = stdx::bind(setStatusAndTriggerEvent,
- stdx::placeholders::_1,
- &status3,
- event3);
+ executor::TaskExecutor* executor = cbData.executor;
+ StatusWith<ReplicationExecutor::EventHandle> errorOrTriggerEvent = executor->makeEvent();
+ if (!errorOrTriggerEvent.isOK()) {
+ status1 = errorOrTriggerEvent.getStatus();
+ executor->shutdown();
+ return;
}
-
- void EventChainAndWaitingTest::run() {
- executor.onEvent(goEvent,
- stdx::bind(&EventChainAndWaitingTest::onGo,
- this,
- stdx::placeholders::_1));
- executor.signalEvent(goEvent);
- executor.waitForEvent(goEvent);
- executor.waitForEvent(event2);
- executor.waitForEvent(event3);
-
- ReplicationExecutor::EventHandle neverSignaledEvent =
- unittest::assertGet(executor.makeEvent());
- stdx::thread neverSignaledWaiter(stdx::bind(&ReplicationExecutor::waitForEvent,
- &executor,
- neverSignaledEvent));
- ReplicationExecutor::CallbackHandle shutdownCallback = unittest::assertGet(
- executor.scheduleWork(stdx::bind(setStatusAndShutdown,
- stdx::placeholders::_1,
- &status5)));
- executor.wait(shutdownCallback);
- neverSignaledWaiter.join();
- executorThread.join();
- ASSERT_OK(status1);
- ASSERT_OK(status2);
- ASSERT_OK(status3);
- ASSERT_OK(status4);
- ASSERT_OK(status5);
+ triggerEvent = errorOrTriggerEvent.getValue();
+ StatusWith<ReplicationExecutor::CallbackHandle> cbHandle =
+ executor->onEvent(triggerEvent, triggered2);
+ if (!cbHandle.isOK()) {
+ status1 = cbHandle.getStatus();
+ executor->shutdown();
+ return;
}
-
- void EventChainAndWaitingTest::onGo(const ReplicationExecutor::CallbackArgs& cbData) {
- if (!cbData.status.isOK()) {
- status1 = cbData.status;
- return;
- }
- executor::TaskExecutor* executor = cbData.executor;
- StatusWith<ReplicationExecutor::EventHandle> errorOrTriggerEvent = executor->makeEvent();
- if (!errorOrTriggerEvent.isOK()) {
- status1 = errorOrTriggerEvent.getStatus();
- executor->shutdown();
- return;
- }
- triggerEvent = errorOrTriggerEvent.getValue();
- StatusWith<ReplicationExecutor::CallbackHandle> cbHandle = executor->onEvent(
- triggerEvent, triggered2);
- if (!cbHandle.isOK()) {
- status1 = cbHandle.getStatus();
- executor->shutdown();
- return;
- }
- cbHandle = executor->onEvent(triggerEvent, triggered3);
- if (!cbHandle.isOK()) {
- status1 = cbHandle.getStatus();
- executor->shutdown();
- return;
- }
-
- cbHandle = executor->onEvent(
- goEvent,
- stdx::bind(&EventChainAndWaitingTest::onGoAfterTriggered,
- this,
- stdx::placeholders::_1));
- if (!cbHandle.isOK()) {
- status1 = cbHandle.getStatus();
- executor->shutdown();
- return;
- }
- status1 = Status::OK();
+ cbHandle = executor->onEvent(triggerEvent, triggered3);
+ if (!cbHandle.isOK()) {
+ status1 = cbHandle.getStatus();
+ executor->shutdown();
+ return;
}
- void EventChainAndWaitingTest::onGoAfterTriggered(
- const ReplicationExecutor::CallbackArgs& cbData) {
- status4 = cbData.status;
- if (!cbData.status.isOK()) {
- return;
- }
- cbData.executor->signalEvent(triggerEvent);
+ cbHandle = executor->onEvent(
+ goEvent,
+ stdx::bind(&EventChainAndWaitingTest::onGoAfterTriggered, this, stdx::placeholders::_1));
+ if (!cbHandle.isOK()) {
+ status1 = cbHandle.getStatus();
+ executor->shutdown();
+ return;
}
+ status1 = Status::OK();
+}
- TEST_F(ReplicationExecutorTest, ScheduleWorkAt) {
- NetworkInterfaceMock* net = getNet();
- ReplicationExecutor& executor = getExecutor();
- launchExecutorThread();
- Status status1 = getDetectableErrorStatus();
- Status status2 = getDetectableErrorStatus();
- Status status3 = getDetectableErrorStatus();
- const Date_t now = net->now();
- const ReplicationExecutor::CallbackHandle cb1 =
- unittest::assertGet(executor.scheduleWorkAt(now + Milliseconds(100),
- stdx::bind(setStatus,
- stdx::placeholders::_1,
- &status1)));
- unittest::assertGet(executor.scheduleWorkAt(now + Milliseconds(5000),
- stdx::bind(setStatus,
- stdx::placeholders::_1,
- &status3)));
- const ReplicationExecutor::CallbackHandle cb2 =
- unittest::assertGet(executor.scheduleWorkAt(now + Milliseconds(200),
- stdx::bind(setStatusAndShutdown,
- stdx::placeholders::_1,
- &status2)));
- const Date_t startTime = net->now();
- net->runUntil(startTime + Milliseconds(200));
- ASSERT_EQUALS(startTime + Milliseconds(200), net->now());
- executor.wait(cb1);
- executor.wait(cb2);
- ASSERT_OK(status1);
- ASSERT_OK(status2);
- executor.shutdown();
- joinExecutorThread();
- ASSERT_EQUALS(status3, ErrorCodes::CallbackCanceled);
+void EventChainAndWaitingTest::onGoAfterTriggered(const ReplicationExecutor::CallbackArgs& cbData) {
+ status4 = cbData.status;
+ if (!cbData.status.isOK()) {
+ return;
}
-
- std::string getRequestDescription(const RemoteCommandRequest& request) {
- return mongoutils::str::stream() << "Request(" << request.target.toString() << ", " <<
- request.dbname << ", " << request.cmdObj << ')';
- }
-
- static void setStatusOnRemoteCommandCompletion(
- const ReplicationExecutor::RemoteCommandCallbackArgs& cbData,
- const RemoteCommandRequest& expectedRequest,
- Status* outStatus) {
-
- if (cbData.request != expectedRequest) {
- *outStatus = Status(
- ErrorCodes::BadValue,
- mongoutils::str::stream() << "Actual request: " <<
- getRequestDescription(cbData.request) << "; expected: " <<
- getRequestDescription(expectedRequest));
- return;
- }
- *outStatus = cbData.response.getStatus();
- }
-
- TEST_F(ReplicationExecutorTest, ScheduleRemoteCommand) {
- NetworkInterfaceMock* net = getNet();
- ReplicationExecutor& executor = getExecutor();
- launchExecutorThread();
- Status status1 = getDetectableErrorStatus();
- const RemoteCommandRequest request(
- HostAndPort("localhost", 27017),
- "mydb",
- BSON("whatsUp" << "doc"));
- ReplicationExecutor::CallbackHandle cbHandle = unittest::assertGet(
- executor.scheduleRemoteCommand(
- request,
- stdx::bind(setStatusOnRemoteCommandCompletion,
- stdx::placeholders::_1,
- request,
- &status1)));
- ASSERT(net->hasReadyRequests());
- NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- net->scheduleResponse(noi,
- net->now(),
- ResponseStatus(ErrorCodes::NoSuchKey, "I'm missing"));
- net->runReadyNetworkOperations();
- ASSERT(!net->hasReadyRequests());
- executor.wait(cbHandle);
- executor.shutdown();
- joinExecutorThread();
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, status1);
- }
-
- TEST_F(ReplicationExecutorTest, ScheduleAndCancelRemoteCommand) {
- ReplicationExecutor& executor = getExecutor();
- Status status1 = getDetectableErrorStatus();
- const RemoteCommandRequest request(
- HostAndPort("localhost", 27017),
- "mydb",
- BSON("whatsUp" << "doc"));
- ReplicationExecutor::CallbackHandle cbHandle = unittest::assertGet(
- executor.scheduleRemoteCommand(
- request,
- stdx::bind(setStatusOnRemoteCommandCompletion,
- stdx::placeholders::_1,
- request,
- &status1)));
- executor.cancel(cbHandle);
- launchExecutorThread();
- getNet()->runReadyNetworkOperations();
- executor.wait(cbHandle);
- executor.shutdown();
- joinExecutorThread();
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status1);
- }
-
- TEST_F(ReplicationExecutorTest, ScheduleDBWorkAndExclusiveWorkConcurrently) {
- unittest::Barrier barrier(2U);
- NamespaceString nss("mydb", "mycoll");
- ReplicationExecutor& executor = getExecutor();
- Status status1 = getDetectableErrorStatus();
- OperationContext* txn = nullptr;
- using CallbackData = ReplicationExecutor::CallbackArgs;
- ASSERT_OK(executor.scheduleDBWork([&](const CallbackData& cbData) {
- status1 = cbData.status;
- txn = cbData.txn;
- barrier.countDownAndWait();
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }).getStatus());
- ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
- barrier.countDownAndWait();
- }).getStatus());
- executor.run();
- ASSERT_OK(status1);
- ASSERT(txn);
- }
-
- TEST_F(ReplicationExecutorTest, ScheduleDBWorkWithCollectionLock) {
- NamespaceString nss("mydb", "mycoll");
- ReplicationExecutor& executor = getExecutor();
- Status status1 = getDetectableErrorStatus();
- OperationContext* txn = nullptr;
- bool collectionIsLocked = false;
- using CallbackData = ReplicationExecutor::CallbackArgs;
- ASSERT_OK(executor.scheduleDBWork([&](const CallbackData& cbData) {
- status1 = cbData.status;
- txn = cbData.txn;
- collectionIsLocked = txn ?
- txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X) :
- false;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }, nss, MODE_X).getStatus());
- executor.run();
- ASSERT_OK(status1);
- ASSERT(txn);
- ASSERT_TRUE(collectionIsLocked);
- }
-
- TEST_F(ReplicationExecutorTest, ScheduleExclusiveLockOperation) {
- ReplicationExecutor& executor = getExecutor();
- Status status1 = getDetectableErrorStatus();
- OperationContext* txn = nullptr;
- bool lockIsW = false;
- using CallbackData = ReplicationExecutor::CallbackArgs;
- ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
- status1 = cbData.status;
- txn = cbData.txn;
- lockIsW = txn ? txn->lockState()->isW() : false;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }).getStatus());
- executor.run();
- ASSERT_OK(status1);
- ASSERT(txn);
- ASSERT_TRUE(lockIsW);
- }
-
- TEST_F(ReplicationExecutorTest, ShutdownBeforeRunningSecondExclusiveLockOperation) {
- ReplicationExecutor& executor = getExecutor();
- using CallbackData = ReplicationExecutor::CallbackArgs;
- Status status1 = getDetectableErrorStatus();
- ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
- status1 = cbData.status;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }).getStatus());
- // Second db work item is invoked by the main executor thread because the work item is
- // moved from the exclusive lock queue to the ready work item queue when the first callback
- // cancels the executor.
- Status status2 = getDetectableErrorStatus();
- ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
- status2 = cbData.status;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }).getStatus());
- executor.run();
- ASSERT_OK(status1);
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status2.code());
- }
-
- TEST_F(ReplicationExecutorTest, RemoteCommandWithTimeout) {
- NetworkInterfaceMock* net = getNet();
- ReplicationExecutor& executor = getExecutor();
- Status status(ErrorCodes::InternalError, "");
- launchExecutorThread();
- const RemoteCommandRequest request(
- HostAndPort("lazy", 27017),
- "admin",
- BSON("sleep" << 1),
- Milliseconds(1));
- ReplicationExecutor::CallbackHandle cbHandle = unittest::assertGet(
- executor.scheduleRemoteCommand(
- request,
- stdx::bind(setStatusOnRemoteCommandCompletion,
- stdx::placeholders::_1,
- request,
- &status)));
- ASSERT(net->hasReadyRequests());
- const Date_t startTime = net->now();
- NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- net->scheduleResponse(noi,
- startTime + Milliseconds(2),
- ResponseStatus(ErrorCodes::ExceededTimeLimit, "I took too long"));
- net->runUntil(startTime + Milliseconds(2));
- ASSERT_EQUALS(startTime + Milliseconds(2), net->now());
- executor.wait(cbHandle);
- ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
- }
-
- TEST_F(ReplicationExecutorTest, CallbackHandleComparison) {
- ReplicationExecutor& executor = getExecutor();
- Status status(ErrorCodes::InternalError, "");
- const RemoteCommandRequest request(
- HostAndPort("lazy", 27017),
- "admin",
- BSON("cmd" << 1));
- ReplicationExecutor::CallbackHandle cbHandle1 = unittest::assertGet(
- executor.scheduleRemoteCommand(
- request,
- stdx::bind(setStatusOnRemoteCommandCompletion,
- stdx::placeholders::_1,
- request,
- &status)));
- ReplicationExecutor::CallbackHandle cbHandle2 = unittest::assertGet(
- executor.scheduleRemoteCommand(
- request,
- stdx::bind(setStatusOnRemoteCommandCompletion,
- stdx::placeholders::_1,
- request,
- &status)));
-
- // test equality
- ASSERT_TRUE(cbHandle1 == cbHandle1);
- ASSERT_TRUE(cbHandle2 == cbHandle2);
- ASSERT_FALSE(cbHandle1 != cbHandle1);
- ASSERT_FALSE(cbHandle2 != cbHandle2);
-
- // test inequality
- ASSERT_TRUE(cbHandle1 != cbHandle2);
- ASSERT_TRUE(cbHandle2 != cbHandle1);
- ASSERT_FALSE(cbHandle1 == cbHandle2);
- ASSERT_FALSE(cbHandle2 == cbHandle1);
-
- ReplicationExecutor::CallbackHandle cbHandle1Copy = cbHandle1;
- ASSERT_TRUE(cbHandle1 == cbHandle1Copy);
- ASSERT_TRUE(cbHandle1Copy == cbHandle1);
- ASSERT_FALSE(cbHandle1Copy != cbHandle1);
- ASSERT_FALSE(cbHandle1 != cbHandle1Copy);
-
- std::vector<ReplicationExecutor::CallbackHandle> cbs;
- cbs.push_back(cbHandle1);
- cbs.push_back(cbHandle2);
- ASSERT(cbHandle1 != cbHandle2);
- std::vector<ReplicationExecutor::CallbackHandle>::iterator foundHandle =
- std::find(cbs.begin(),
- cbs.end(),
- cbHandle1);
- ASSERT_TRUE(cbs.end() != foundHandle);
- ASSERT_TRUE(cbHandle1 == *foundHandle);
- launchExecutorThread();
- executor.shutdown();
- joinExecutorThread();
+ cbData.executor->signalEvent(triggerEvent);
+}
+
+TEST_F(ReplicationExecutorTest, ScheduleWorkAt) {
+ NetworkInterfaceMock* net = getNet();
+ ReplicationExecutor& executor = getExecutor();
+ launchExecutorThread();
+ Status status1 = getDetectableErrorStatus();
+ Status status2 = getDetectableErrorStatus();
+ Status status3 = getDetectableErrorStatus();
+ const Date_t now = net->now();
+ const ReplicationExecutor::CallbackHandle cb1 = unittest::assertGet(executor.scheduleWorkAt(
+ now + Milliseconds(100), stdx::bind(setStatus, stdx::placeholders::_1, &status1)));
+ unittest::assertGet(executor.scheduleWorkAt(
+ now + Milliseconds(5000), stdx::bind(setStatus, stdx::placeholders::_1, &status3)));
+ const ReplicationExecutor::CallbackHandle cb2 = unittest::assertGet(executor.scheduleWorkAt(
+ now + Milliseconds(200),
+ stdx::bind(setStatusAndShutdown, stdx::placeholders::_1, &status2)));
+ const Date_t startTime = net->now();
+ net->runUntil(startTime + Milliseconds(200));
+ ASSERT_EQUALS(startTime + Milliseconds(200), net->now());
+ executor.wait(cb1);
+ executor.wait(cb2);
+ ASSERT_OK(status1);
+ ASSERT_OK(status2);
+ executor.shutdown();
+ joinExecutorThread();
+ ASSERT_EQUALS(status3, ErrorCodes::CallbackCanceled);
+}
+
+std::string getRequestDescription(const RemoteCommandRequest& request) {
+ return mongoutils::str::stream() << "Request(" << request.target.toString() << ", "
+ << request.dbname << ", " << request.cmdObj << ')';
+}
+
+static void setStatusOnRemoteCommandCompletion(
+ const ReplicationExecutor::RemoteCommandCallbackArgs& cbData,
+ const RemoteCommandRequest& expectedRequest,
+ Status* outStatus) {
+ if (cbData.request != expectedRequest) {
+ *outStatus = Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Actual request: " << getRequestDescription(cbData.request)
+ << "; expected: " << getRequestDescription(expectedRequest));
+ return;
}
+ *outStatus = cbData.response.getStatus();
+}
+
+TEST_F(ReplicationExecutorTest, ScheduleRemoteCommand) {
+ NetworkInterfaceMock* net = getNet();
+ ReplicationExecutor& executor = getExecutor();
+ launchExecutorThread();
+ Status status1 = getDetectableErrorStatus();
+ const RemoteCommandRequest request(HostAndPort("localhost", 27017),
+ "mydb",
+ BSON("whatsUp"
+ << "doc"));
+ ReplicationExecutor::CallbackHandle cbHandle =
+ unittest::assertGet(executor.scheduleRemoteCommand(
+ request,
+ stdx::bind(
+ setStatusOnRemoteCommandCompletion, stdx::placeholders::_1, request, &status1)));
+ ASSERT(net->hasReadyRequests());
+ NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ net->scheduleResponse(noi, net->now(), ResponseStatus(ErrorCodes::NoSuchKey, "I'm missing"));
+ net->runReadyNetworkOperations();
+ ASSERT(!net->hasReadyRequests());
+ executor.wait(cbHandle);
+ executor.shutdown();
+ joinExecutorThread();
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, status1);
+}
+
+TEST_F(ReplicationExecutorTest, ScheduleAndCancelRemoteCommand) {
+ ReplicationExecutor& executor = getExecutor();
+ Status status1 = getDetectableErrorStatus();
+ const RemoteCommandRequest request(HostAndPort("localhost", 27017),
+ "mydb",
+ BSON("whatsUp"
+ << "doc"));
+ ReplicationExecutor::CallbackHandle cbHandle =
+ unittest::assertGet(executor.scheduleRemoteCommand(
+ request,
+ stdx::bind(
+ setStatusOnRemoteCommandCompletion, stdx::placeholders::_1, request, &status1)));
+ executor.cancel(cbHandle);
+ launchExecutorThread();
+ getNet()->runReadyNetworkOperations();
+ executor.wait(cbHandle);
+ executor.shutdown();
+ joinExecutorThread();
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status1);
+}
+
+TEST_F(ReplicationExecutorTest, ScheduleDBWorkAndExclusiveWorkConcurrently) {
+ unittest::Barrier barrier(2U);
+ NamespaceString nss("mydb", "mycoll");
+ ReplicationExecutor& executor = getExecutor();
+ Status status1 = getDetectableErrorStatus();
+ OperationContext* txn = nullptr;
+ using CallbackData = ReplicationExecutor::CallbackArgs;
+ ASSERT_OK(executor.scheduleDBWork([&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ txn = cbData.txn;
+ barrier.countDownAndWait();
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ }).getStatus());
+ ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
+ barrier.countDownAndWait();
+ }).getStatus());
+ executor.run();
+ ASSERT_OK(status1);
+ ASSERT(txn);
+}
+
+TEST_F(ReplicationExecutorTest, ScheduleDBWorkWithCollectionLock) {
+ NamespaceString nss("mydb", "mycoll");
+ ReplicationExecutor& executor = getExecutor();
+ Status status1 = getDetectableErrorStatus();
+ OperationContext* txn = nullptr;
+ bool collectionIsLocked = false;
+ using CallbackData = ReplicationExecutor::CallbackArgs;
+ ASSERT_OK(executor.scheduleDBWork([&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ txn = cbData.txn;
+ collectionIsLocked =
+ txn ? txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X) : false;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ }, nss, MODE_X).getStatus());
+ executor.run();
+ ASSERT_OK(status1);
+ ASSERT(txn);
+ ASSERT_TRUE(collectionIsLocked);
+}
+
+TEST_F(ReplicationExecutorTest, ScheduleExclusiveLockOperation) {
+ ReplicationExecutor& executor = getExecutor();
+ Status status1 = getDetectableErrorStatus();
+ OperationContext* txn = nullptr;
+ bool lockIsW = false;
+ using CallbackData = ReplicationExecutor::CallbackArgs;
+ ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ txn = cbData.txn;
+ lockIsW = txn ? txn->lockState()->isW() : false;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ }).getStatus());
+ executor.run();
+ ASSERT_OK(status1);
+ ASSERT(txn);
+ ASSERT_TRUE(lockIsW);
+}
+
+TEST_F(ReplicationExecutorTest, ShutdownBeforeRunningSecondExclusiveLockOperation) {
+ ReplicationExecutor& executor = getExecutor();
+ using CallbackData = ReplicationExecutor::CallbackArgs;
+ Status status1 = getDetectableErrorStatus();
+ ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ }).getStatus());
+ // Second db work item is invoked by the main executor thread because the work item is
+ // moved from the exclusive lock queue to the ready work item queue when the first callback
+ // cancels the executor.
+ Status status2 = getDetectableErrorStatus();
+ ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
+ status2 = cbData.status;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ }).getStatus());
+ executor.run();
+ ASSERT_OK(status1);
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status2.code());
+}
+
+TEST_F(ReplicationExecutorTest, RemoteCommandWithTimeout) {
+ NetworkInterfaceMock* net = getNet();
+ ReplicationExecutor& executor = getExecutor();
+ Status status(ErrorCodes::InternalError, "");
+ launchExecutorThread();
+ const RemoteCommandRequest request(
+ HostAndPort("lazy", 27017), "admin", BSON("sleep" << 1), Milliseconds(1));
+ ReplicationExecutor::CallbackHandle cbHandle =
+ unittest::assertGet(executor.scheduleRemoteCommand(
+ request,
+ stdx::bind(
+ setStatusOnRemoteCommandCompletion, stdx::placeholders::_1, request, &status)));
+ ASSERT(net->hasReadyRequests());
+ const Date_t startTime = net->now();
+ NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ net->scheduleResponse(noi,
+ startTime + Milliseconds(2),
+ ResponseStatus(ErrorCodes::ExceededTimeLimit, "I took too long"));
+ net->runUntil(startTime + Milliseconds(2));
+ ASSERT_EQUALS(startTime + Milliseconds(2), net->now());
+ executor.wait(cbHandle);
+ ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, status);
+}
+
+TEST_F(ReplicationExecutorTest, CallbackHandleComparison) {
+ ReplicationExecutor& executor = getExecutor();
+ Status status(ErrorCodes::InternalError, "");
+ const RemoteCommandRequest request(HostAndPort("lazy", 27017), "admin", BSON("cmd" << 1));
+ ReplicationExecutor::CallbackHandle cbHandle1 =
+ unittest::assertGet(executor.scheduleRemoteCommand(
+ request,
+ stdx::bind(
+ setStatusOnRemoteCommandCompletion, stdx::placeholders::_1, request, &status)));
+ ReplicationExecutor::CallbackHandle cbHandle2 =
+ unittest::assertGet(executor.scheduleRemoteCommand(
+ request,
+ stdx::bind(
+ setStatusOnRemoteCommandCompletion, stdx::placeholders::_1, request, &status)));
+
+ // test equality
+ ASSERT_TRUE(cbHandle1 == cbHandle1);
+ ASSERT_TRUE(cbHandle2 == cbHandle2);
+ ASSERT_FALSE(cbHandle1 != cbHandle1);
+ ASSERT_FALSE(cbHandle2 != cbHandle2);
+
+ // test inequality
+ ASSERT_TRUE(cbHandle1 != cbHandle2);
+ ASSERT_TRUE(cbHandle2 != cbHandle1);
+ ASSERT_FALSE(cbHandle1 == cbHandle2);
+ ASSERT_FALSE(cbHandle2 == cbHandle1);
+
+ ReplicationExecutor::CallbackHandle cbHandle1Copy = cbHandle1;
+ ASSERT_TRUE(cbHandle1 == cbHandle1Copy);
+ ASSERT_TRUE(cbHandle1Copy == cbHandle1);
+ ASSERT_FALSE(cbHandle1Copy != cbHandle1);
+ ASSERT_FALSE(cbHandle1 != cbHandle1Copy);
+
+ std::vector<ReplicationExecutor::CallbackHandle> cbs;
+ cbs.push_back(cbHandle1);
+ cbs.push_back(cbHandle2);
+ ASSERT(cbHandle1 != cbHandle2);
+ std::vector<ReplicationExecutor::CallbackHandle>::iterator foundHandle =
+ std::find(cbs.begin(), cbs.end(), cbHandle1);
+ ASSERT_TRUE(cbs.end() != foundHandle);
+ ASSERT_TRUE(cbHandle1 == *foundHandle);
+ launchExecutorThread();
+ executor.shutdown();
+ joinExecutorThread();
+}
} // namespace
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_executor_test_fixture.cpp b/src/mongo/db/repl/replication_executor_test_fixture.cpp
index bcd07d0db68..6172ca01a33 100644
--- a/src/mongo/db/repl/replication_executor_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_executor_test_fixture.cpp
@@ -39,47 +39,46 @@ namespace repl {
namespace {
- const int64_t prngSeed = 1;
+const int64_t prngSeed = 1;
-} // namespace
+} // namespace
- // static
- Status ReplicationExecutorTest::getDetectableErrorStatus() {
- return Status(ErrorCodes::InternalError, "Not mutated");
- }
+// static
+Status ReplicationExecutorTest::getDetectableErrorStatus() {
+ return Status(ErrorCodes::InternalError, "Not mutated");
+}
- void ReplicationExecutorTest::launchExecutorThread() {
- ASSERT(!_executorThread);
- _executorThread.reset(
- new stdx::thread(stdx::bind(&ReplicationExecutor::run, _executor.get())));
- postExecutorThreadLaunch();
- }
+void ReplicationExecutorTest::launchExecutorThread() {
+ ASSERT(!_executorThread);
+ _executorThread.reset(new stdx::thread(stdx::bind(&ReplicationExecutor::run, _executor.get())));
+ postExecutorThreadLaunch();
+}
- void ReplicationExecutorTest::postExecutorThreadLaunch() {
- _net->enterNetwork();
- }
+void ReplicationExecutorTest::postExecutorThreadLaunch() {
+ _net->enterNetwork();
+}
- void ReplicationExecutorTest::joinExecutorThread() {
- ASSERT(_executorThread);
- getNet()->exitNetwork();
- _executorThread->join();
- _executorThread.reset();
- }
+void ReplicationExecutorTest::joinExecutorThread() {
+ ASSERT(_executorThread);
+ getNet()->exitNetwork();
+ _executorThread->join();
+ _executorThread.reset();
+}
- void ReplicationExecutorTest::setUp() {
- _net = new executor::NetworkInterfaceMock;
- _storage = new StorageInterfaceMock;
- _executor.reset(new ReplicationExecutor(_net, _storage, prngSeed));
- }
+void ReplicationExecutorTest::setUp() {
+ _net = new executor::NetworkInterfaceMock;
+ _storage = new StorageInterfaceMock;
+ _executor.reset(new ReplicationExecutor(_net, _storage, prngSeed));
+}
- void ReplicationExecutorTest::tearDown() {
- if (_executorThread) {
- _executor->shutdown();
- joinExecutorThread();
- }
- _executor.reset();
- _net = nullptr;
+void ReplicationExecutorTest::tearDown() {
+ if (_executorThread) {
+ _executor->shutdown();
+ joinExecutorThread();
}
+ _executor.reset();
+ _net = nullptr;
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_executor_test_fixture.h b/src/mongo/db/repl/replication_executor_test_fixture.h
index a6fec40ebd2..e89dd99ecb6 100644
--- a/src/mongo/db/repl/replication_executor_test_fixture.h
+++ b/src/mongo/db/repl/replication_executor_test_fixture.h
@@ -34,73 +34,76 @@
namespace mongo {
namespace executor {
- class NetworkInterfaceMock;
-} // namespace executor
+class NetworkInterfaceMock;
+} // namespace executor
namespace repl {
- using std::unique_ptr;
+using std::unique_ptr;
- class ReplicationExecutor;
- class StorageInterfaceMock;
+class ReplicationExecutor;
+class StorageInterfaceMock;
+
+/**
+ * Test fixture for tests that require a ReplicationExecutor backed by
+ * a NetworkInterfaceMock.
+ */
+class ReplicationExecutorTest : public unittest::Test {
+public:
+ /**
+ * Creates an initial error status suitable for checking if
+ * component has modified the 'status' field in test fixture.
+ */
+ static Status getDetectableErrorStatus();
+
+protected:
+ executor::NetworkInterfaceMock* getNet() {
+ return _net;
+ }
+ ReplicationExecutor& getExecutor() {
+ return *_executor;
+ }
+ /**
+ * Runs ReplicationExecutor in background.
+ */
+ void launchExecutorThread();
+
+ /**
+ * Anything that needs to be done after launchExecutorThread should go in here.
+ */
+ virtual void postExecutorThreadLaunch();
/**
- * Test fixture for tests that require a ReplicationExecutor backed by
- * a NetworkInterfaceMock.
+ * Waits for background ReplicationExecutor to stop running.
+ *
+ * The executor should be shutdown prior to calling this function
+ * or the test may block indefinitely.
*/
- class ReplicationExecutorTest : public unittest::Test {
- public:
-
- /**
- * Creates an initial error status suitable for checking if
- * component has modified the 'status' field in test fixture.
- */
- static Status getDetectableErrorStatus();
-
- protected:
- executor::NetworkInterfaceMock* getNet() { return _net; }
- ReplicationExecutor& getExecutor() { return *_executor; }
- /**
- * Runs ReplicationExecutor in background.
- */
- void launchExecutorThread();
-
- /**
- * Anything that needs to be done after launchExecutorThread should go in here.
- */
- virtual void postExecutorThreadLaunch();
-
- /**
- * Waits for background ReplicationExecutor to stop running.
- *
- * The executor should be shutdown prior to calling this function
- * or the test may block indefinitely.
- */
- void joinExecutorThread();
-
- /**
- * Initializes both the NetworkInterfaceMock and ReplicationExecutor but
- * does not run the executor in the background.
- *
- * To run the executor in the background, tests should invoke launchExecutorThread() or
- * override this function() to achieve the same effect.
- */
- void setUp() override;
-
- /**
- * Destroys the replication executor.
- *
- * Shuts down running background executor.
- */
- void tearDown() override;
-
-
- private:
- executor::NetworkInterfaceMock* _net;
- StorageInterfaceMock* _storage;
- unique_ptr<ReplicationExecutor> _executor;
- unique_ptr<stdx::thread> _executorThread;
- };
+ void joinExecutorThread();
+
+ /**
+ * Initializes both the NetworkInterfaceMock and ReplicationExecutor but
+ * does not run the executor in the background.
+ *
+ * To run the executor in the background, tests should invoke launchExecutorThread() or
+ * override this function() to achieve the same effect.
+ */
+ void setUp() override;
+
+ /**
+ * Destroys the replication executor.
+ *
+ * Shuts down running background executor.
+ */
+ void tearDown() override;
+
+
+private:
+ executor::NetworkInterfaceMock* _net;
+ StorageInterfaceMock* _storage;
+ unique_ptr<ReplicationExecutor> _executor;
+ unique_ptr<stdx::thread> _executorThread;
+};
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 343a939fc95..57b403aa434 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -50,189 +50,193 @@
namespace mongo {
- using std::unique_ptr;
- using std::list;
- using std::string;
- using std::stringstream;
+using std::unique_ptr;
+using std::list;
+using std::string;
+using std::stringstream;
namespace repl {
- void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int level) {
- ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- if (replCoord->getSettings().usingReplSets()) {
- IsMasterResponse isMasterResponse;
- replCoord->fillIsMasterForReplSet(&isMasterResponse);
- result.appendElements(isMasterResponse.toBSON());
- if (level) {
- replCoord->appendSlaveInfoData(&result);
- }
- return;
- }
-
- // TODO(dannenberg) replAllDead is bad and should be removed when master slave is removed
- if (replAllDead) {
- result.append("ismaster", 0);
- string s = string("dead: ") + replAllDead;
- result.append("info", s);
+void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int level) {
+ ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
+ if (replCoord->getSettings().usingReplSets()) {
+ IsMasterResponse isMasterResponse;
+ replCoord->fillIsMasterForReplSet(&isMasterResponse);
+ result.appendElements(isMasterResponse.toBSON());
+ if (level) {
+ replCoord->appendSlaveInfoData(&result);
}
- else {
- result.appendBool("ismaster",
- getGlobalReplicationCoordinator()->isMasterForReportingPurposes());
+ return;
+ }
+
+ // TODO(dannenberg) replAllDead is bad and should be removed when master slave is removed
+ if (replAllDead) {
+ result.append("ismaster", 0);
+ string s = string("dead: ") + replAllDead;
+ result.append("info", s);
+ } else {
+ result.appendBool("ismaster",
+ getGlobalReplicationCoordinator()->isMasterForReportingPurposes());
+ }
+
+ if (level) {
+ BSONObjBuilder sources(result.subarrayStart("sources"));
+
+ int n = 0;
+ list<BSONObj> src;
+ {
+ const char* localSources = "local.sources";
+ AutoGetCollectionForRead ctx(txn, localSources);
+ unique_ptr<PlanExecutor> exec(
+ InternalPlanner::collectionScan(txn, localSources, ctx.getCollection()));
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ src.push_back(obj);
+ }
}
-
- if (level) {
- BSONObjBuilder sources( result.subarrayStart( "sources" ) );
-
- int n = 0;
- list<BSONObj> src;
+
+ for (list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++) {
+ BSONObj s = *i;
+ BSONObjBuilder bb;
+ bb.append(s["host"]);
+ string sourcename = s["source"].valuestr();
+ if (sourcename != "main")
+ bb.append(s["source"]);
{
- const char* localSources = "local.sources";
- AutoGetCollectionForRead ctx(txn, localSources);
- unique_ptr<PlanExecutor> exec(
- InternalPlanner::collectionScan(txn, localSources, ctx.getCollection()));
- BSONObj obj;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- src.push_back(obj);
- }
+ BSONElement e = s["syncedTo"];
+ BSONObjBuilder t(bb.subobjStart("syncedTo"));
+ t.appendDate("time", e.timestampTime());
+ t.append("inc", e.timestampInc());
+ t.done();
}
-
- for( list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++ ) {
- BSONObj s = *i;
- BSONObjBuilder bb;
- bb.append( s["host"] );
- string sourcename = s["source"].valuestr();
- if ( sourcename != "main" )
- bb.append( s["source"] );
- {
- BSONElement e = s["syncedTo"];
- BSONObjBuilder t( bb.subobjStart( "syncedTo" ) );
- t.appendDate( "time" , e.timestampTime() );
- t.append( "inc" , e.timestampInc() );
- t.done();
- }
-
- if ( level > 1 ) {
- wassert(!txn->lockState()->isLocked());
- // note: there is no so-style timeout on this connection; perhaps we should have one.
- ScopedDbConnection conn(s["host"].valuestr());
-
- DBClientConnection *cliConn = dynamic_cast< DBClientConnection* >( &conn.conn() );
- if ( cliConn && replAuthenticate(cliConn) ) {
- BSONObj first = conn->findOne( (string)"local.oplog.$" + sourcename,
- Query().sort( BSON( "$natural" << 1 ) ) );
- BSONObj last = conn->findOne( (string)"local.oplog.$" + sourcename,
- Query().sort( BSON( "$natural" << -1 ) ) );
- bb.appendDate( "masterFirst" , first["ts"].timestampTime() );
- bb.appendDate( "masterLast" , last["ts"].timestampTime() );
- const auto lag =
- (last["ts"].timestampTime() - s["syncedTo"].timestampTime());
- bb.append("lagSeconds", durationCount<Milliseconds>(lag) / 1000.0);
- }
- conn.done();
+
+ if (level > 1) {
+ wassert(!txn->lockState()->isLocked());
+ // note: there is no so-style timeout on this connection; perhaps we should have one.
+ ScopedDbConnection conn(s["host"].valuestr());
+
+ DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn());
+ if (cliConn && replAuthenticate(cliConn)) {
+ BSONObj first = conn->findOne((string) "local.oplog.$" + sourcename,
+ Query().sort(BSON("$natural" << 1)));
+ BSONObj last = conn->findOne((string) "local.oplog.$" + sourcename,
+ Query().sort(BSON("$natural" << -1)));
+ bb.appendDate("masterFirst", first["ts"].timestampTime());
+ bb.appendDate("masterLast", last["ts"].timestampTime());
+ const auto lag = (last["ts"].timestampTime() - s["syncedTo"].timestampTime());
+ bb.append("lagSeconds", durationCount<Milliseconds>(lag) / 1000.0);
}
-
- sources.append( BSONObjBuilder::numStr( n++ ) , bb.obj() );
+ conn.done();
}
-
- sources.done();
- replCoord->appendSlaveInfoData(&result);
+ sources.append(BSONObjBuilder::numStr(n++), bb.obj());
}
+
+ sources.done();
+
+ replCoord->appendSlaveInfoData(&result);
}
-
- class ReplicationInfoServerStatus : public ServerStatusSection {
- public:
- ReplicationInfoServerStatus() : ServerStatusSection( "repl" ){}
- bool includeByDefault() const { return true; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
-
- if (!getGlobalReplicationCoordinator()->isReplEnabled()) {
- return BSONObj();
- }
-
- int level = configElement.numberInt();
-
- BSONObjBuilder result;
- appendReplicationInfo(txn, result, level);
- getGlobalReplicationCoordinator()->processReplSetGetRBID(&result);
-
- return result.obj();
+}
+
+class ReplicationInfoServerStatus : public ServerStatusSection {
+public:
+ ReplicationInfoServerStatus() : ServerStatusSection("repl") {}
+ bool includeByDefault() const {
+ return true;
+ }
+
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ if (!getGlobalReplicationCoordinator()->isReplEnabled()) {
+ return BSONObj();
}
- } replicationInfoServerStatus;
+ int level = configElement.numberInt();
- class OplogInfoServerStatus : public ServerStatusSection {
- public:
- OplogInfoServerStatus() : ServerStatusSection( "oplog" ){}
- bool includeByDefault() const { return false; }
+ BSONObjBuilder result;
+ appendReplicationInfo(txn, result, level);
+ getGlobalReplicationCoordinator()->processReplSetGetRBID(&result);
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
+ return result.obj();
+ }
- ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- if (!replCoord->isReplEnabled()) {
- return BSONObj();
- }
+} replicationInfoServerStatus;
- BSONObjBuilder result;
- // TODO(siyuan) Output term of OpTime
- result.append("latestOptime", replCoord->getMyLastOptime().getTimestamp());
-
- const std::string& oplogNS =
- replCoord->getReplicationMode() == ReplicationCoordinator::modeReplSet ?
- rsOplogName : masterSlaveOplogName;
- BSONObj o;
- uassert(17347,
- "Problem reading earliest entry from oplog",
- Helpers::getSingleton(txn, oplogNS.c_str(), o));
- result.append("earliestOptime", o["ts"].timestamp());
- return result.obj();
- }
- } oplogInfoServerStatus;
+class OplogInfoServerStatus : public ServerStatusSection {
+public:
+ OplogInfoServerStatus() : ServerStatusSection("oplog") {}
+ bool includeByDefault() const {
+ return false;
+ }
- class CmdIsMaster : public Command {
- public:
- virtual bool requiresAuth() { return false; }
- virtual bool slaveOk() const {
- return true;
- }
- virtual void help( stringstream &help ) const {
- help << "Check if this server is primary for a replica pair/set; also if it is --master or --slave in simple master/slave setups.\n";
- help << "{ isMaster : 1 }";
- }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- CmdIsMaster() : Command("isMaster", true, "ismaster") { }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- /* currently request to arbiter is (somewhat arbitrarily) an ismaster request that is not
- authenticated.
- */
- if ( cmdObj["forShell"].trueValue() )
- LastError::get(txn->getClient()).disable();
-
- appendReplicationInfo(txn, result, 0);
-
- result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize);
- result.appendNumber("maxMessageSizeBytes", MaxMessageSizeBytes);
- result.appendNumber("maxWriteBatchSize", BatchedCommandRequest::kMaxWriteBatchSize);
- result.appendDate("localTime", jsTime());
- result.append("maxWireVersion", maxWireVersion);
- result.append("minWireVersion", minWireVersion);
- return true;
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
+ if (!replCoord->isReplEnabled()) {
+ return BSONObj();
}
- } cmdismaster;
- OpCounterServerStatusSection replOpCounterServerStatusSection( "opcountersRepl", &replOpCounters );
+ BSONObjBuilder result;
+ // TODO(siyuan) Output term of OpTime
+ result.append("latestOptime", replCoord->getMyLastOptime().getTimestamp());
+
+ const std::string& oplogNS =
+ replCoord->getReplicationMode() == ReplicationCoordinator::modeReplSet
+ ? rsOplogName
+ : masterSlaveOplogName;
+ BSONObj o;
+ uassert(17347,
+ "Problem reading earliest entry from oplog",
+ Helpers::getSingleton(txn, oplogNS.c_str(), o));
+ result.append("earliestOptime", o["ts"].timestamp());
+ return result.obj();
+ }
+} oplogInfoServerStatus;
+
+class CmdIsMaster : public Command {
+public:
+ virtual bool requiresAuth() {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "Check if this server is primary for a replica pair/set; also if it is --master or "
+ "--slave in simple master/slave setups.\n";
+ help << "{ isMaster : 1 }";
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ CmdIsMaster() : Command("isMaster", true, "ismaster") {}
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ /* currently request to arbiter is (somewhat arbitrarily) an ismaster request that is not
+ authenticated.
+ */
+ if (cmdObj["forShell"].trueValue())
+ LastError::get(txn->getClient()).disable();
+
+ appendReplicationInfo(txn, result, 0);
+
+ result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize);
+ result.appendNumber("maxMessageSizeBytes", MaxMessageSizeBytes);
+ result.appendNumber("maxWriteBatchSize", BatchedCommandRequest::kMaxWriteBatchSize);
+ result.appendDate("localTime", jsTime());
+ result.append("maxWireVersion", maxWireVersion);
+ result.append("minWireVersion", minWireVersion);
+ return true;
+ }
+} cmdismaster;
+
+OpCounterServerStatusSection replOpCounterServerStatusSection("opcountersRepl", &replOpCounters);
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replset_commands.cpp b/src/mongo/db/repl/replset_commands.cpp
index 600fbfe52a8..c26bed4775b 100644
--- a/src/mongo/db/repl/replset_commands.cpp
+++ b/src/mongo/db/repl/replset_commands.cpp
@@ -61,757 +61,740 @@
namespace mongo {
namespace repl {
- using std::string;
- using std::stringstream;
-
- // Testing only, enabled via command-line.
- class CmdReplSetTest : public ReplSetCommand {
- public:
- virtual void help( stringstream &help ) const {
- help << "Just for regression tests.\n";
- }
- // No auth needed because it only works when enabled via command line.
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return Status::OK();
- }
- CmdReplSetTest() : ReplSetCommand("replSetTest") { }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- log() << "replSetTest command received: " << cmdObj.toString();
-
- if( cmdObj.hasElement("forceInitialSyncFailure") ) {
- replSetForceInitialSyncFailure = (unsigned) cmdObj["forceInitialSyncFailure"].Number();
- return true;
- }
-
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return appendCommandStatus(result, status);
-
- return false;
- }
- };
- MONGO_INITIALIZER(RegisterReplSetTestCmd)(InitializerContext* context) {
- if (Command::testCommandsEnabled) {
- // Leaked intentionally: a Command registers itself when constructed.
- new CmdReplSetTest();
- }
+using std::string;
+using std::stringstream;
+
+// Testing only, enabled via command-line.
+class CmdReplSetTest : public ReplSetCommand {
+public:
+ virtual void help(stringstream& help) const {
+ help << "Just for regression tests.\n";
+ }
+ // No auth needed because it only works when enabled via command line.
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
return Status::OK();
}
+ CmdReplSetTest() : ReplSetCommand("replSetTest") {}
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ log() << "replSetTest command received: " << cmdObj.toString();
+
+ if (cmdObj.hasElement("forceInitialSyncFailure")) {
+ replSetForceInitialSyncFailure = (unsigned)cmdObj["forceInitialSyncFailure"].Number();
+ return true;
+ }
- /** get rollback id. used to check if a rollback happened during some interval of time.
- as consumed, the rollback id is not in any particular order, it simply changes on each rollback.
- @see incRBID()
- */
- class CmdReplSetGetRBID : public ReplSetCommand {
- public:
- CmdReplSetGetRBID() : ReplSetCommand("replSetGetRBID") {}
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return appendCommandStatus(result, status);
-
- status = getGlobalReplicationCoordinator()->processReplSetGetRBID(&result);
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK())
return appendCommandStatus(result, status);
- }
- } cmdReplSetRBID;
-
- class CmdReplSetGetStatus : public ReplSetCommand {
- public:
- virtual void help( stringstream &help ) const {
- help << "Report status of a replica set from the POV of this server\n";
- help << "{ replSetGetStatus : 1 }";
- help << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
- }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::replSetGetStatus);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
- CmdReplSetGetStatus() : ReplSetCommand("replSetGetStatus", true) { }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- if ( cmdObj["forShell"].trueValue() )
- LastError::get(txn->getClient()).disable();
-
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return appendCommandStatus(result, status);
- status = getGlobalReplicationCoordinator()->processReplSetGetStatus(&result);
+ return false;
+ }
+};
+MONGO_INITIALIZER(RegisterReplSetTestCmd)(InitializerContext* context) {
+ if (Command::testCommandsEnabled) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new CmdReplSetTest();
+ }
+ return Status::OK();
+}
+
+/** get rollback id. used to check if a rollback happened during some interval of time.
+ as consumed, the rollback id is not in any particular order, it simply changes on each rollback.
+ @see incRBID()
+*/
+class CmdReplSetGetRBID : public ReplSetCommand {
+public:
+ CmdReplSetGetRBID() : ReplSetCommand("replSetGetRBID") {}
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK())
return appendCommandStatus(result, status);
+
+ status = getGlobalReplicationCoordinator()->processReplSetGetRBID(&result);
+ return appendCommandStatus(result, status);
+ }
+} cmdReplSetRBID;
+
+class CmdReplSetGetStatus : public ReplSetCommand {
+public:
+ virtual void help(stringstream& help) const {
+ help << "Report status of a replica set from the POV of this server\n";
+ help << "{ replSetGetStatus : 1 }";
+ help << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::replSetGetStatus);
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- } cmdReplSetGetStatus;
-
- class CmdReplSetGetConfig : public ReplSetCommand {
- public:
- virtual void help( stringstream &help ) const {
- help << "Returns the current replica set configuration";
- help << "{ replSetGetConfig : 1 }";
- help << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
- }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::replSetGetConfig);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
- CmdReplSetGetConfig() : ReplSetCommand("replSetGetConfig", true) { }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return appendCommandStatus(result, status);
+ return Status::OK();
+ }
+ CmdReplSetGetStatus() : ReplSetCommand("replSetGetStatus", true) {}
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ if (cmdObj["forShell"].trueValue())
+ LastError::get(txn->getClient()).disable();
+
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
- getGlobalReplicationCoordinator()->processReplSetGetConfig(&result);
- return true;
+ status = getGlobalReplicationCoordinator()->processReplSetGetStatus(&result);
+ return appendCommandStatus(result, status);
+ }
+} cmdReplSetGetStatus;
+
+class CmdReplSetGetConfig : public ReplSetCommand {
+public:
+ virtual void help(stringstream& help) const {
+ help << "Returns the current replica set configuration";
+ help << "{ replSetGetConfig : 1 }";
+ help << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::replSetGetConfig);
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- } cmdReplSetGetConfig;
+ return Status::OK();
+ }
+ CmdReplSetGetConfig() : ReplSetCommand("replSetGetConfig", true) {}
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
+
+ getGlobalReplicationCoordinator()->processReplSetGetConfig(&result);
+ return true;
+ }
+} cmdReplSetGetConfig;
namespace {
- HostAndPort someHostAndPortForMe() {
- const char* ips = serverGlobalParams.bind_ip.c_str();
- while (*ips) {
- std::string ip;
- const char* comma = strchr(ips, ',');
- if (comma) {
- ip = std::string(ips, comma - ips);
- ips = comma + 1;
- }
- else {
- ip = std::string(ips);
- ips = "";
- }
- HostAndPort h = HostAndPort(ip, serverGlobalParams.port);
- if (!h.isLocalHost()) {
- return h;
- }
+HostAndPort someHostAndPortForMe() {
+ const char* ips = serverGlobalParams.bind_ip.c_str();
+ while (*ips) {
+ std::string ip;
+ const char* comma = strchr(ips, ',');
+ if (comma) {
+ ip = std::string(ips, comma - ips);
+ ips = comma + 1;
+ } else {
+ ip = std::string(ips);
+ ips = "";
+ }
+ HostAndPort h = HostAndPort(ip, serverGlobalParams.port);
+ if (!h.isLocalHost()) {
+ return h;
}
+ }
+
+ std::string h = getHostName();
+ verify(!h.empty());
+ verify(h != "localhost");
+ return HostAndPort(h, serverGlobalParams.port);
+}
+
+void parseReplSetSeedList(ReplicationCoordinatorExternalState* externalState,
+ const std::string& replSetString,
+ std::string* setname,
+ std::vector<HostAndPort>* seeds) {
+ const char* p = replSetString.c_str();
+ const char* slash = strchr(p, '/');
+ std::set<HostAndPort> seedSet;
+ if (slash) {
+ *setname = string(p, slash - p);
+ } else {
+ *setname = p;
+ }
- std::string h = getHostName();
- verify(!h.empty());
- verify(h != "localhost");
- return HostAndPort(h, serverGlobalParams.port);
+ if (slash == 0) {
+ return;
}
- void parseReplSetSeedList(ReplicationCoordinatorExternalState* externalState,
- const std::string& replSetString,
- std::string* setname,
- std::vector<HostAndPort>* seeds) {
- const char *p = replSetString.c_str();
- const char *slash = strchr(p, '/');
- std::set<HostAndPort> seedSet;
- if (slash) {
- *setname = string(p, slash-p);
+ p = slash + 1;
+ while (1) {
+ const char* comma = strchr(p, ',');
+ if (comma == 0) {
+ comma = strchr(p, 0);
+ }
+ if (p == comma) {
+ break;
+ }
+ HostAndPort m;
+ try {
+ m = HostAndPort(string(p, comma - p));
+ } catch (...) {
+ uassert(13114, "bad --replSet seed hostname", false);
+ }
+ uassert(13096, "bad --replSet command line config string - dups?", seedSet.count(m) == 0);
+ seedSet.insert(m);
+ // uassert(13101, "can't use localhost in replset host list", !m.isLocalHost());
+ if (externalState->isSelf(m)) {
+ LOG(1) << "ignoring seed " << m.toString() << " (=self)";
+ } else {
+ seeds->push_back(m);
+ }
+ if (*comma == 0) {
+ break;
+ }
+ p = comma + 1;
+ }
+}
+} // namespace
+
+class CmdReplSetInitiate : public ReplSetCommand {
+public:
+ CmdReplSetInitiate() : ReplSetCommand("replSetInitiate") {}
+ virtual void help(stringstream& h) const {
+ h << "Initiate/christen a replica set.";
+ h << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::replSetConfigure);
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- else {
- *setname = p;
+ return Status::OK();
+ }
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ BSONObj configObj;
+ if (cmdObj["replSetInitiate"].type() == Object) {
+ configObj = cmdObj["replSetInitiate"].Obj();
+ }
+
+ std::string replSetString = getGlobalReplicationCoordinator()->getSettings().replSet;
+ if (replSetString.empty()) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::NoReplicationEnabled,
+ "This node was not started with the replSet "
+ "option"));
+ }
+
+ if (configObj.isEmpty()) {
+ string noConfigMessage =
+ "no configuration specified. "
+ "Using a default configuration for the set";
+ result.append("info2", noConfigMessage);
+ log() << "initiate : " << noConfigMessage;
+
+ ReplicationCoordinatorExternalStateImpl externalState;
+ std::string name;
+ std::vector<HostAndPort> seeds;
+ parseReplSetSeedList(&externalState, replSetString, &name, &seeds); // may throw...
+
+ BSONObjBuilder b;
+ b.append("_id", name);
+ b.append("version", 1);
+ BSONObjBuilder members;
+ HostAndPort me = someHostAndPortForMe();
+ members.append("0", BSON("_id" << 0 << "host" << me.toString()));
+ result.append("me", me.toString());
+ for (unsigned i = 0; i < seeds.size(); i++) {
+ members.append(BSONObjBuilder::numStr(i + 1),
+ BSON("_id" << i + 1 << "host" << seeds[i].toString()));
+ }
+ b.appendArray("members", members.obj());
+ configObj = b.obj();
+ log() << "created this configuration for initiation : " << configObj.toString();
+ }
+
+ if (configObj.getField("version").eoo()) {
+ // Missing version field defaults to version 1.
+ BSONObjBuilder builder;
+ builder.appendElements(configObj);
+ builder.append("version", 1);
+ configObj = builder.obj();
+ }
+
+ Status status =
+ getGlobalReplicationCoordinator()->processReplSetInitiate(txn, configObj, &result);
+ return appendCommandStatus(result, status);
+ }
+} cmdReplSetInitiate;
+
+class CmdReplSetReconfig : public ReplSetCommand {
+public:
+ virtual void help(stringstream& help) const {
+ help << "Adjust configuration of a replica set\n";
+ help << "{ replSetReconfig : config_object }";
+ help << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::replSetConfigure);
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
-
- if (slash == 0) {
- return;
+ return Status::OK();
+ }
+ CmdReplSetReconfig() : ReplSetCommand("replSetReconfig") {}
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- p = slash + 1;
- while (1) {
- const char *comma = strchr(p, ',');
- if (comma == 0) {
- comma = strchr(p,0);
- }
- if (p == comma) {
- break;
- }
- HostAndPort m;
- try {
- m = HostAndPort( string(p, comma-p) );
- }
- catch (...) {
- uassert(13114, "bad --replSet seed hostname", false);
- }
- uassert(13096, "bad --replSet command line config string - dups?",
- seedSet.count(m) == 0);
- seedSet.insert(m);
- //uassert(13101, "can't use localhost in replset host list", !m.isLocalHost());
- if (externalState->isSelf(m)) {
- LOG(1) << "ignoring seed " << m.toString() << " (=self)";
- }
- else {
- seeds->push_back(m);
- }
- if (*comma == 0) {
- break;
- }
- p = comma + 1;
- }
- }
-} // namespace
-
- class CmdReplSetInitiate : public ReplSetCommand {
- public:
- CmdReplSetInitiate() : ReplSetCommand("replSetInitiate") { }
- virtual void help(stringstream& h) const {
- h << "Initiate/christen a replica set.";
- h << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
- }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::replSetConfigure);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
- virtual bool run(OperationContext* txn,
- const string& ,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- BSONObj configObj;
- if( cmdObj["replSetInitiate"].type() == Object ) {
- configObj = cmdObj["replSetInitiate"].Obj();
- }
+ if (cmdObj["replSetReconfig"].type() != Object) {
+ errmsg = "no configuration specified";
+ return false;
+ }
- std::string replSetString = getGlobalReplicationCoordinator()->getSettings().replSet;
- if (replSetString.empty()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::NoReplicationEnabled,
- "This node was not started with the replSet "
- "option"));
- }
+ ReplicationCoordinator::ReplSetReconfigArgs parsedArgs;
+ parsedArgs.newConfigObj = cmdObj["replSetReconfig"].Obj();
+ parsedArgs.force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
+ status =
+ getGlobalReplicationCoordinator()->processReplSetReconfig(txn, parsedArgs, &result);
- if (configObj.isEmpty()) {
- string noConfigMessage = "no configuration specified. "
- "Using a default configuration for the set";
- result.append("info2", noConfigMessage);
- log() << "initiate : " << noConfigMessage;
-
- ReplicationCoordinatorExternalStateImpl externalState;
- std::string name;
- std::vector<HostAndPort> seeds;
- parseReplSetSeedList(
- &externalState,
- replSetString,
- &name,
- &seeds); // may throw...
-
- BSONObjBuilder b;
- b.append("_id", name);
- b.append("version", 1);
- BSONObjBuilder members;
- HostAndPort me = someHostAndPortForMe();
- members.append("0", BSON( "_id" << 0 << "host" << me.toString() ));
- result.append("me", me.toString());
- for( unsigned i = 0; i < seeds.size(); i++ ) {
- members.append(BSONObjBuilder::numStr(i+1),
- BSON( "_id" << i+1 << "host" << seeds[i].toString()));
- }
- b.appendArray("members", members.obj());
- configObj = b.obj();
- log() << "created this configuration for initiation : " <<
- configObj.toString();
- }
+ ScopedTransaction scopedXact(txn, MODE_X);
+ Lock::GlobalWrite globalWrite(txn->lockState());
- if (configObj.getField("version").eoo()) {
- // Missing version field defaults to version 1.
- BSONObjBuilder builder;
- builder.appendElements(configObj);
- builder.append("version", 1);
- configObj = builder.obj();
- }
+ WriteUnitOfWork wuow(txn);
+ if (status.isOK() && !parsedArgs.force) {
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(
+ txn,
+ BSON("msg"
+ << "Reconfig set"
+ << "version" << parsedArgs.newConfigObj["version"]));
+ }
+ wuow.commit();
- Status status = getGlobalReplicationCoordinator()->processReplSetInitiate(txn,
- configObj,
- &result);
- return appendCommandStatus(result, status);
+ return appendCommandStatus(result, status);
+ }
+} cmdReplSetReconfig;
+
+class CmdReplSetFreeze : public ReplSetCommand {
+public:
+ virtual void help(stringstream& help) const {
+ help << "{ replSetFreeze : <seconds> }";
+ help << "'freeze' state of member to the extent we can do that. What this really means is "
+ "that\n";
+ help << "this node will not attempt to become primary until the time period specified "
+ "expires.\n";
+ help << "You can call again with {replSetFreeze:0} to unfreeze sooner.\n";
+ help << "A process restart unfreezes the member also.\n";
+ help << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::replSetStateChange);
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- } cmdReplSetInitiate;
-
- class CmdReplSetReconfig : public ReplSetCommand {
- public:
- virtual void help( stringstream &help ) const {
- help << "Adjust configuration of a replica set\n";
- help << "{ replSetReconfig : config_object }";
- help << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
- }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::replSetConfigure);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
- CmdReplSetReconfig() : ReplSetCommand("replSetReconfig") { }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ return Status::OK();
+ }
+ CmdReplSetFreeze() : ReplSetCommand("replSetFreeze") {}
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
- if( cmdObj["replSetReconfig"].type() != Object ) {
- errmsg = "no configuration specified";
- return false;
- }
+ int secs = (int)cmdObj.firstElement().numberInt();
+ return appendCommandStatus(
+ result, getGlobalReplicationCoordinator()->processReplSetFreeze(secs, &result));
+ }
+} cmdReplSetFreeze;
+
+class CmdReplSetStepDown : public ReplSetCommand {
+public:
+ virtual void help(stringstream& help) const {
+ help << "{ replSetStepDown : <seconds> }\n";
+ help << "Step down as primary. Will not try to reelect self for the specified time period "
+ "(1 minute if no numeric secs value specified).\n";
+ help << "(If another member with same priority takes over in the meantime, it will stay "
+ "primary.)\n";
+ help << "http://dochub.mongodb.org/core/replicasetcommands";
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::replSetStateChange);
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ return Status::OK();
+ }
+ CmdReplSetStepDown() : ReplSetCommand("replSetStepDown") {}
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
- ReplicationCoordinator::ReplSetReconfigArgs parsedArgs;
- parsedArgs.newConfigObj = cmdObj["replSetReconfig"].Obj();
- parsedArgs.force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
- status = getGlobalReplicationCoordinator()->processReplSetReconfig(txn,
- parsedArgs,
- &result);
-
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWrite(txn->lockState());
-
- WriteUnitOfWork wuow(txn);
- if (status.isOK() && !parsedArgs.force) {
- getGlobalServiceContext()->getOpObserver()->onOpMessage(
- txn,
- BSON("msg" << "Reconfig set" <<
- "version" << parsedArgs.newConfigObj["version"]));
- }
- wuow.commit();
+ const bool force = cmdObj["force"].trueValue();
+ long long stepDownForSecs = cmdObj.firstElement().numberLong();
+ if (stepDownForSecs == 0) {
+ stepDownForSecs = 60;
+ } else if (stepDownForSecs < 0) {
+ status = Status(ErrorCodes::BadValue, "stepdown period must be a positive integer");
return appendCommandStatus(result, status);
}
- } cmdReplSetReconfig;
-
- class CmdReplSetFreeze : public ReplSetCommand {
- public:
- virtual void help( stringstream &help ) const {
- help << "{ replSetFreeze : <seconds> }";
- help << "'freeze' state of member to the extent we can do that. What this really means is that\n";
- help << "this node will not attempt to become primary until the time period specified expires.\n";
- help << "You can call again with {replSetFreeze:0} to unfreeze sooner.\n";
- help << "A process restart unfreezes the member also.\n";
- help << "\nhttp://dochub.mongodb.org/core/replicasetcommands";
- }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::replSetStateChange);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
- CmdReplSetFreeze() : ReplSetCommand("replSetFreeze") { }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return appendCommandStatus(result, status);
-
- int secs = (int) cmdObj.firstElement().numberInt();
- return appendCommandStatus(
- result,
- getGlobalReplicationCoordinator()->processReplSetFreeze(secs, &result));
- }
- } cmdReplSetFreeze;
-
- class CmdReplSetStepDown: public ReplSetCommand {
- public:
- virtual void help( stringstream &help ) const {
- help << "{ replSetStepDown : <seconds> }\n";
- help << "Step down as primary. Will not try to reelect self for the specified time period (1 minute if no numeric secs value specified).\n";
- help << "(If another member with same priority takes over in the meantime, it will stay primary.)\n";
- help << "http://dochub.mongodb.org/core/replicasetcommands";
- }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::replSetStateChange);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
- CmdReplSetStepDown() : ReplSetCommand("replSetStepDown") { }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return appendCommandStatus(result, status);
-
- const bool force = cmdObj["force"].trueValue();
- long long stepDownForSecs = cmdObj.firstElement().numberLong();
- if (stepDownForSecs == 0) {
- stepDownForSecs = 60;
- }
- else if (stepDownForSecs < 0) {
- status = Status(ErrorCodes::BadValue,
- "stepdown period must be a positive integer");
- return appendCommandStatus(result, status);
- }
-
- long long secondaryCatchUpPeriodSecs;
- status = bsonExtractIntegerField(cmdObj,
- "secondaryCatchUpPeriodSecs",
- &secondaryCatchUpPeriodSecs);
- if (status.code() == ErrorCodes::NoSuchKey) {
- // if field is absent, default values
- if (force) {
- secondaryCatchUpPeriodSecs = 0;
- }
- else {
- secondaryCatchUpPeriodSecs = 10;
- }
- }
- else if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- if (secondaryCatchUpPeriodSecs < 0) {
- status = Status(ErrorCodes::BadValue,
- "secondaryCatchUpPeriodSecs period must be a positive or absent");
- return appendCommandStatus(result, status);
- }
-
- if (stepDownForSecs < secondaryCatchUpPeriodSecs) {
- status = Status(ErrorCodes::BadValue,
- "stepdown period must be longer than secondaryCatchUpPeriodSecs");
- return appendCommandStatus(result, status);
+ long long secondaryCatchUpPeriodSecs;
+ status = bsonExtractIntegerField(
+ cmdObj, "secondaryCatchUpPeriodSecs", &secondaryCatchUpPeriodSecs);
+ if (status.code() == ErrorCodes::NoSuchKey) {
+ // if field is absent, default values
+ if (force) {
+ secondaryCatchUpPeriodSecs = 0;
+ } else {
+ secondaryCatchUpPeriodSecs = 10;
}
+ } else if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- log() << "Attempting to step down in response to replSetStepDown command";
+ if (secondaryCatchUpPeriodSecs < 0) {
+ status = Status(ErrorCodes::BadValue,
+ "secondaryCatchUpPeriodSecs period must be a positive or absent");
+ return appendCommandStatus(result, status);
+ }
- status = getGlobalReplicationCoordinator()->stepDown(
- txn,
- force,
- Seconds(secondaryCatchUpPeriodSecs),
- Seconds(stepDownForSecs));
+ if (stepDownForSecs < secondaryCatchUpPeriodSecs) {
+ status = Status(ErrorCodes::BadValue,
+ "stepdown period must be longer than secondaryCatchUpPeriodSecs");
return appendCommandStatus(result, status);
}
- } cmdReplSetStepDown;
-
- class CmdReplSetMaintenance: public ReplSetCommand {
- public:
- virtual void help( stringstream &help ) const {
- help << "{ replSetMaintenance : bool }\n";
- help << "Enable or disable maintenance mode.";
- }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::replSetStateChange);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
- CmdReplSetMaintenance() : ReplSetCommand("replSetMaintenance") { }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return appendCommandStatus(result, status);
- return appendCommandStatus(
- result,
- getGlobalReplicationCoordinator()->setMaintenanceMode(
- cmdObj["replSetMaintenance"].trueValue()));
- }
- } cmdReplSetMaintenance;
-
- class CmdReplSetSyncFrom: public ReplSetCommand {
- public:
- virtual void help( stringstream &help ) const {
- help << "{ replSetSyncFrom : \"host:port\" }\n";
- help << "Change who this member is syncing from.";
- }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::replSetStateChange);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
- CmdReplSetSyncFrom() : ReplSetCommand("replSetSyncFrom") { }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return appendCommandStatus(result, status);
+ log() << "Attempting to step down in response to replSetStepDown command";
- HostAndPort targetHostAndPort;
- status = targetHostAndPort.initialize(cmdObj["replSetSyncFrom"].valuestrsafe());
- if (!status.isOK())
- return appendCommandStatus(result, status);
+ status = getGlobalReplicationCoordinator()->stepDown(
+ txn, force, Seconds(secondaryCatchUpPeriodSecs), Seconds(stepDownForSecs));
+ return appendCommandStatus(result, status);
+ }
+} cmdReplSetStepDown;
- return appendCommandStatus(
- result,
- getGlobalReplicationCoordinator()->processReplSetSyncFrom(targetHostAndPort,
- &result));
- }
- } cmdReplSetSyncFrom;
-
- class CmdReplSetUpdatePosition: public ReplSetCommand {
- public:
- CmdReplSetUpdatePosition() : ReplSetCommand("replSetUpdatePosition") { }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return appendCommandStatus(result, status);
+class CmdReplSetMaintenance : public ReplSetCommand {
+public:
+ virtual void help(stringstream& help) const {
+ help << "{ replSetMaintenance : bool }\n";
+ help << "Enable or disable maintenance mode.";
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::replSetStateChange);
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ return Status::OK();
+ }
+ CmdReplSetMaintenance() : ReplSetCommand("replSetMaintenance") {}
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
- // accept and ignore handshakes sent from old (3.0-series) nodes without erroring to
- // enable mixed-version operation, since we no longer use the handshakes
- if (cmdObj.hasField("handshake")) {
- return true;
- }
-
- UpdatePositionArgs args;
- status = args.initialize(cmdObj);
- if (!status.isOK())
- return appendCommandStatus(result, status);
+ return appendCommandStatus(result,
+ getGlobalReplicationCoordinator()->setMaintenanceMode(
+ cmdObj["replSetMaintenance"].trueValue()));
+ }
+} cmdReplSetMaintenance;
- // in the case of an update from a member with an invalid replica set config,
- // we return our current config version
- long long configVersion = -1;
- status = getGlobalReplicationCoordinator()->
- processReplSetUpdatePosition(args, &configVersion);
+class CmdReplSetSyncFrom : public ReplSetCommand {
+public:
+ virtual void help(stringstream& help) const {
+ help << "{ replSetSyncFrom : \"host:port\" }\n";
+ help << "Change who this member is syncing from.";
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::replSetStateChange);
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ return Status::OK();
+ }
+ CmdReplSetSyncFrom() : ReplSetCommand("replSetSyncFrom") {}
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
- if (status == ErrorCodes::InvalidReplicaSetConfig) {
- result.append("configVersion", configVersion);
- }
+ HostAndPort targetHostAndPort;
+ status = targetHostAndPort.initialize(cmdObj["replSetSyncFrom"].valuestrsafe());
+ if (!status.isOK())
return appendCommandStatus(result, status);
- }
- } cmdReplSetUpdatePosition;
-namespace {
- /**
- * Returns true if there is no data on this server. Useful when starting replication.
- * The "local" database does NOT count except for "rs.oplog" collection.
- * Used to set the hasData field on replset heartbeat command response.
- */
- bool replHasDatabases(OperationContext* txn) {
- std::vector<string> names;
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->listDatabases(&names);
-
- if( names.size() >= 2 ) return true;
- if( names.size() == 1 ) {
- if( names[0] != "local" )
- return true;
-
- // we have a local database. return true if oplog isn't empty
- BSONObj o;
- if (Helpers::getSingleton(txn, repl::rsOplogName.c_str(), o)) {
- return true;
- }
- }
- return false;
+ return appendCommandStatus(
+ result,
+ getGlobalReplicationCoordinator()->processReplSetSyncFrom(targetHostAndPort, &result));
}
+} cmdReplSetSyncFrom;
+
+class CmdReplSetUpdatePosition : public ReplSetCommand {
+public:
+ CmdReplSetUpdatePosition() : ReplSetCommand("replSetUpdatePosition") {}
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
-} // namespace
+ // accept and ignore handshakes sent from old (3.0-series) nodes without erroring to
+ // enable mixed-version operation, since we no longer use the handshakes
+ if (cmdObj.hasField("handshake")) {
+ return true;
+ }
- MONGO_FP_DECLARE(rsDelayHeartbeatResponse);
+ UpdatePositionArgs args;
+ status = args.initialize(cmdObj);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
- /* { replSetHeartbeat : <setname> } */
- class CmdReplSetHeartbeat : public ReplSetCommand {
- public:
- CmdReplSetHeartbeat() : ReplSetCommand("replSetHeartbeat") { }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
+ // in the case of an update from a member with an invalid replica set config,
+ // we return our current config version
+ long long configVersion = -1;
+ status =
+ getGlobalReplicationCoordinator()->processReplSetUpdatePosition(args, &configVersion);
- MONGO_FAIL_POINT_BLOCK(rsDelayHeartbeatResponse, delay) {
- const BSONObj& data = delay.getData();
- sleepsecs(data["delay"].numberInt());
- }
+ if (status == ErrorCodes::InvalidReplicaSetConfig) {
+ result.append("configVersion", configVersion);
+ }
+ return appendCommandStatus(result, status);
+ }
+} cmdReplSetUpdatePosition;
- Status status = Status(ErrorCodes::InternalError, "status not set in heartbeat code");
- /* we don't call ReplSetCommand::check() here because heartbeat
- checks many things that are pre-initialization. */
- if (!getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
- status = Status(ErrorCodes::NoReplicationEnabled, "not running with --replSet");
- return appendCommandStatus(result, status);
- }
+namespace {
+/**
+ * Returns true if there is no data on this server. Useful when starting replication.
+ * The "local" database does NOT count except for "rs.oplog" collection.
+ * Used to set the hasData field on replset heartbeat command response.
+ */
+bool replHasDatabases(OperationContext* txn) {
+ std::vector<string> names;
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ storageEngine->listDatabases(&names);
+
+ if (names.size() >= 2)
+ return true;
+ if (names.size() == 1) {
+ if (names[0] != "local")
+ return true;
- /* we want to keep heartbeat connections open when relinquishing primary.
- tag them here. */
- {
- AbstractMessagingPort *mp = txn->getClient()->port();
- if( mp )
- mp->tag |= executor::NetworkInterface::kMessagingPortKeepOpen;
- }
+ // we have a local database. return true if oplog isn't empty
+ BSONObj o;
+ if (Helpers::getSingleton(txn, repl::rsOplogName.c_str(), o)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+MONGO_FP_DECLARE(rsDelayHeartbeatResponse);
+
+/* { replSetHeartbeat : <setname> } */
+class CmdReplSetHeartbeat : public ReplSetCommand {
+public:
+ CmdReplSetHeartbeat() : ReplSetCommand("replSetHeartbeat") {}
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ MONGO_FAIL_POINT_BLOCK(rsDelayHeartbeatResponse, delay) {
+ const BSONObj& data = delay.getData();
+ sleepsecs(data["delay"].numberInt());
+ }
+
+ Status status = Status(ErrorCodes::InternalError, "status not set in heartbeat code");
+ /* we don't call ReplSetCommand::check() here because heartbeat
+ checks many things that are pre-initialization. */
+ if (!getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
+ status = Status(ErrorCodes::NoReplicationEnabled, "not running with --replSet");
+ return appendCommandStatus(result, status);
+ }
- if (getGlobalReplicationCoordinator()->isV1ElectionProtocol()) {
- ReplSetHeartbeatArgsV1 args;
- status = args.initialize(cmdObj);
- if (status.isOK()) {
- ReplSetHeartbeatResponse response;
- status = getGlobalReplicationCoordinator()->processHeartbeatV1(args, &response);
- if (status.isOK())
- response.addToBSON(&result, true);
- return appendCommandStatus(result, status);
- }
- // else: fall through to old heartbeat protocol as it is likely that
- // a new node just joined the set
- }
+ /* we want to keep heartbeat connections open when relinquishing primary.
+ tag them here. */
+ {
+ AbstractMessagingPort* mp = txn->getClient()->port();
+ if (mp)
+ mp->tag |= executor::NetworkInterface::kMessagingPortKeepOpen;
+ }
- ReplSetHeartbeatArgs args;
+ if (getGlobalReplicationCoordinator()->isV1ElectionProtocol()) {
+ ReplSetHeartbeatArgsV1 args;
status = args.initialize(cmdObj);
- if (!status.isOK()) {
+ if (status.isOK()) {
+ ReplSetHeartbeatResponse response;
+ status = getGlobalReplicationCoordinator()->processHeartbeatV1(args, &response);
+ if (status.isOK())
+ response.addToBSON(&result, true);
return appendCommandStatus(result, status);
}
+ // else: fall through to old heartbeat protocol as it is likely that
+ // a new node just joined the set
+ }
- // ugh.
- if (args.getCheckEmpty()) {
- result.append("hasData", replHasDatabases(txn));
- }
-
- ReplSetHeartbeatResponse response;
- status = getGlobalReplicationCoordinator()->processHeartbeat(args, &response);
- if (status.isOK())
- response.addToBSON(&result, false);
+ ReplSetHeartbeatArgs args;
+ status = args.initialize(cmdObj);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdReplSetHeartbeat;
-
- /** the first cmd called by a node seeking election and it's a basic sanity
- test: do any of the nodes it can reach know that it can't be the primary?
- */
- class CmdReplSetFresh : public ReplSetCommand {
- public:
- CmdReplSetFresh() : ReplSetCommand("replSetFresh") { }
-
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return appendCommandStatus(result, status);
- ReplicationCoordinator::ReplSetFreshArgs parsedArgs;
- parsedArgs.id = cmdObj["id"].Int();
- parsedArgs.setName = cmdObj["set"].String();
- parsedArgs.who = HostAndPort(cmdObj["who"].String());
- BSONElement cfgverElement = cmdObj["cfgver"];
- uassert(28525,
- str::stream() << "Expected cfgver argument to replSetFresh command to have "
- "numeric type, but found " << typeName(cfgverElement.type()),
- cfgverElement.isNumber());
- parsedArgs.cfgver = cfgverElement.safeNumberLong();
- parsedArgs.opTime = Timestamp(cmdObj["opTime"].Date());
-
- status = getGlobalReplicationCoordinator()->processReplSetFresh(parsedArgs, &result);
- return appendCommandStatus(result, status);
+ // ugh.
+ if (args.getCheckEmpty()) {
+ result.append("hasData", replHasDatabases(txn));
}
- } cmdReplSetFresh;
-
- class CmdReplSetElect : public ReplSetCommand {
- public:
- CmdReplSetElect() : ReplSetCommand("replSetElect") { }
- private:
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- DEV log() << "received elect msg " << cmdObj.toString();
- else LOG(2) << "received elect msg " << cmdObj.toString();
-
- Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
- if (!status.isOK())
- return appendCommandStatus(result, status);
- ReplicationCoordinator::ReplSetElectArgs parsedArgs;
- parsedArgs.set = cmdObj["set"].String();
- parsedArgs.whoid = cmdObj["whoid"].Int();
- BSONElement cfgverElement = cmdObj["cfgver"];
- uassert(28526,
- str::stream() << "Expected cfgver argument to replSetElect command to have "
- "numeric type, but found " << typeName(cfgverElement.type()),
- cfgverElement.isNumber());
- parsedArgs.cfgver = cfgverElement.safeNumberLong();
- parsedArgs.round = cmdObj["round"].OID();
-
- status = getGlobalReplicationCoordinator()->processReplSetElect(parsedArgs, &result);
+ ReplSetHeartbeatResponse response;
+ status = getGlobalReplicationCoordinator()->processHeartbeat(args, &response);
+ if (status.isOK())
+ response.addToBSON(&result, false);
+ return appendCommandStatus(result, status);
+ }
+} cmdReplSetHeartbeat;
+
+/** the first cmd called by a node seeking election and it's a basic sanity
+ test: do any of the nodes it can reach know that it can't be the primary?
+ */
+class CmdReplSetFresh : public ReplSetCommand {
+public:
+ CmdReplSetFresh() : ReplSetCommand("replSetFresh") {}
+
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK())
return appendCommandStatus(result, status);
- }
- } cmdReplSetElect;
-} // namespace repl
-} // namespace mongo
+ ReplicationCoordinator::ReplSetFreshArgs parsedArgs;
+ parsedArgs.id = cmdObj["id"].Int();
+ parsedArgs.setName = cmdObj["set"].String();
+ parsedArgs.who = HostAndPort(cmdObj["who"].String());
+ BSONElement cfgverElement = cmdObj["cfgver"];
+ uassert(28525,
+ str::stream() << "Expected cfgver argument to replSetFresh command to have "
+ "numeric type, but found " << typeName(cfgverElement.type()),
+ cfgverElement.isNumber());
+ parsedArgs.cfgver = cfgverElement.safeNumberLong();
+ parsedArgs.opTime = Timestamp(cmdObj["opTime"].Date());
+
+ status = getGlobalReplicationCoordinator()->processReplSetFresh(parsedArgs, &result);
+ return appendCommandStatus(result, status);
+ }
+} cmdReplSetFresh;
+
+class CmdReplSetElect : public ReplSetCommand {
+public:
+ CmdReplSetElect() : ReplSetCommand("replSetElect") {}
+
+private:
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ DEV log() << "received elect msg " << cmdObj.toString();
+ else LOG(2) << "received elect msg " << cmdObj.toString();
+
+ Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
+
+ ReplicationCoordinator::ReplSetElectArgs parsedArgs;
+ parsedArgs.set = cmdObj["set"].String();
+ parsedArgs.whoid = cmdObj["whoid"].Int();
+ BSONElement cfgverElement = cmdObj["cfgver"];
+ uassert(28526,
+ str::stream() << "Expected cfgver argument to replSetElect command to have "
+ "numeric type, but found " << typeName(cfgverElement.type()),
+ cfgverElement.isNumber());
+ parsedArgs.cfgver = cfgverElement.safeNumberLong();
+ parsedArgs.round = cmdObj["round"].OID();
+
+ status = getGlobalReplicationCoordinator()->processReplSetElect(parsedArgs, &result);
+ return appendCommandStatus(result, status);
+ }
+} cmdReplSetElect;
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/replset_web_handler.cpp b/src/mongo/db/repl/replset_web_handler.cpp
index 12fe6ab8bd0..6ec53363396 100644
--- a/src/mongo/db/repl/replset_web_handler.cpp
+++ b/src/mongo/db/repl/replset_web_handler.cpp
@@ -41,56 +41,55 @@
namespace mongo {
namespace repl {
- using namespace html;
+using namespace html;
- class ReplSetHandler : public DbWebHandler {
- public:
- ReplSetHandler() : DbWebHandler( "_replSet" , 1 , false ) {}
+class ReplSetHandler : public DbWebHandler {
+public:
+ ReplSetHandler() : DbWebHandler("_replSet", 1, false) {}
- virtual bool handles( const std::string& url ) const {
- return str::startsWith( url , "/_replSet" );
- }
-
- virtual void handle( OperationContext* txn,
- const char *rq,
- const std::string& url,
- BSONObj params,
- std::string& responseMsg,
- int& responseCode,
- std::vector<std::string>& headers,
- const SockAddr &from ) {
- responseMsg = _replSet(txn);
- responseCode = 200;
- }
+ virtual bool handles(const std::string& url) const {
+ return str::startsWith(url, "/_replSet");
+ }
- /* /_replSet show replica set status in html format */
- std::string _replSet(OperationContext* txn) {
- std::stringstream s;
- s << start("Replica Set Status " + prettyHostName());
- s << p( a("/", "back", "Home") + " | " +
- a("/local/system.replset/?html=1", "", "View Replset Config") + " | " +
- a("/replSetGetStatus?text=1", "", "replSetGetStatus") + " | " +
- a("http://dochub.mongodb.org/core/replicasets", "", "Docs")
- );
+ virtual void handle(OperationContext* txn,
+ const char* rq,
+ const std::string& url,
+ BSONObj params,
+ std::string& responseMsg,
+ int& responseCode,
+ std::vector<std::string>& headers,
+ const SockAddr& from) {
+ responseMsg = _replSet(txn);
+ responseCode = 200;
+ }
- ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- if (replCoord->getReplicationMode() != ReplicationCoordinator::modeReplSet) {
- s << p("Not using --replSet");
- s << _end();
- return s.str();
- }
+ /* /_replSet show replica set status in html format */
+ std::string _replSet(OperationContext* txn) {
+ std::stringstream s;
+ s << start("Replica Set Status " + prettyHostName());
+ s << p(a("/", "back", "Home") + " | " +
+ a("/local/system.replset/?html=1", "", "View Replset Config") + " | " +
+ a("/replSetGetStatus?text=1", "", "replSetGetStatus") + " | " +
+ a("http://dochub.mongodb.org/core/replicasets", "", "Docs"));
- ReplSetHtmlSummary summary;
- replCoord->summarizeAsHtml(&summary);
- s << summary.toHtmlString();
-
- s << p("Recent replset log activity:");
- fillRsLog(&s);
+ ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
+ if (replCoord->getReplicationMode() != ReplicationCoordinator::modeReplSet) {
+ s << p("Not using --replSet");
s << _end();
return s.str();
}
- } replSetHandler;
+ ReplSetHtmlSummary summary;
+ replCoord->summarizeAsHtml(&summary);
+ s << summary.toHtmlString();
+
+ s << p("Recent replset log activity:");
+ fillRsLog(&s);
+ s << _end();
+ return s.str();
+ }
+
+} replSetHandler;
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp
index 90218fd017f..b05bc2dbdd9 100644
--- a/src/mongo/db/repl/reporter.cpp
+++ b/src/mongo/db/repl/reporter.cpp
@@ -38,130 +38,124 @@
namespace mongo {
namespace repl {
- ReplicationProgressManager::~ReplicationProgressManager() {}
-
- Reporter::Reporter(ReplicationExecutor* executor,
- ReplicationProgressManager* replicationProgressManager,
- const HostAndPort& target)
- : _executor(executor),
- _updatePositionSource(replicationProgressManager),
- _target(target),
- _status(Status::OK()),
- _willRunAgain(false),
- _active(false) {
-
- uassert(ErrorCodes::BadValue, "null replication executor", executor);
- uassert(ErrorCodes::BadValue,
- "null replication progress manager",
- replicationProgressManager);
- uassert(ErrorCodes::BadValue, "target name cannot be empty", !target.empty());
+ReplicationProgressManager::~ReplicationProgressManager() {}
+
+Reporter::Reporter(ReplicationExecutor* executor,
+ ReplicationProgressManager* replicationProgressManager,
+ const HostAndPort& target)
+ : _executor(executor),
+ _updatePositionSource(replicationProgressManager),
+ _target(target),
+ _status(Status::OK()),
+ _willRunAgain(false),
+ _active(false) {
+ uassert(ErrorCodes::BadValue, "null replication executor", executor);
+ uassert(ErrorCodes::BadValue, "null replication progress manager", replicationProgressManager);
+ uassert(ErrorCodes::BadValue, "target name cannot be empty", !target.empty());
+}
+
+Reporter::~Reporter() {
+ DESTRUCTOR_GUARD(cancel(););
+}
+
+void Reporter::cancel() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+
+ if (!_active) {
+ return;
}
- Reporter::~Reporter() {
- DESTRUCTOR_GUARD(
- cancel();
- );
- }
+ _status = Status(ErrorCodes::CallbackCanceled, "Reporter no longer valid");
+ _willRunAgain = false;
+ invariant(_remoteCommandCallbackHandle.isValid());
+ _executor->cancel(_remoteCommandCallbackHandle);
+}
- void Reporter::cancel() {
+void Reporter::wait() {
+ ReplicationExecutor::CallbackHandle handle;
+ {
stdx::lock_guard<stdx::mutex> lk(_mutex);
-
if (!_active) {
return;
}
-
- _status = Status(ErrorCodes::CallbackCanceled, "Reporter no longer valid");
- _willRunAgain = false;
- invariant(_remoteCommandCallbackHandle.isValid());
- _executor->cancel(_remoteCommandCallbackHandle);
- }
-
- void Reporter::wait() {
- ReplicationExecutor::CallbackHandle handle;
- {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (!_active) {
- return;
- }
- if (!_remoteCommandCallbackHandle.isValid()) {
- return;
- }
- handle = _remoteCommandCallbackHandle;
+ if (!_remoteCommandCallbackHandle.isValid()) {
+ return;
}
- _executor->wait(handle);
+ handle = _remoteCommandCallbackHandle;
}
+ _executor->wait(handle);
+}
- Status Reporter::trigger() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _schedule_inlock();
- }
-
- Status Reporter::_schedule_inlock() {
- if (!_status.isOK()) {
- return _status;
- }
-
- if (_active) {
- _willRunAgain = true;
- return _status;
- }
-
- LOG(2) << "Reporter scheduling report to : " << _target;
-
- BSONObjBuilder cmd;
- if (!_updatePositionSource->prepareReplSetUpdatePositionCommand(&cmd)) {
- // Returning NodeNotFound because currently this is the only way
- // prepareReplSetUpdatePositionCommand() can fail in production.
- return Status(ErrorCodes::NodeNotFound,
- "Reporter failed to create replSetUpdatePositionCommand command.");
- }
- auto cmdObj = cmd.obj();
- StatusWith<ReplicationExecutor::CallbackHandle> scheduleResult =
- _executor->scheduleRemoteCommand(
- RemoteCommandRequest(_target, "admin", cmdObj),
- stdx::bind(&Reporter::_callback, this, stdx::placeholders::_1));
-
- if (!scheduleResult.isOK()) {
- _status = scheduleResult.getStatus();
- LOG(2) << "Reporter failed to schedule with status: " << _status;
+Status Reporter::trigger() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _schedule_inlock();
+}
- return _status;
- }
-
- _active = true;
- _willRunAgain = false;
- _remoteCommandCallbackHandle = scheduleResult.getValue();
- return Status::OK();
+Status Reporter::_schedule_inlock() {
+ if (!_status.isOK()) {
+ return _status;
}
- void Reporter::_callback(const ReplicationExecutor::RemoteCommandCallbackArgs& rcbd) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+ if (_active) {
+ _willRunAgain = true;
+ return _status;
+ }
- _status = rcbd.response.getStatus();
- _active = false;
+ LOG(2) << "Reporter scheduling report to : " << _target;
- LOG(2) << "Reporter ended with status: " << _status << " after reporting to " << _target;
- if (_status.isOK() && _willRunAgain) {
- _schedule_inlock();
- }
- else {
- _willRunAgain = false;
- }
+ BSONObjBuilder cmd;
+ if (!_updatePositionSource->prepareReplSetUpdatePositionCommand(&cmd)) {
+ // Returning NodeNotFound because currently this is the only way
+ // prepareReplSetUpdatePositionCommand() can fail in production.
+ return Status(ErrorCodes::NodeNotFound,
+ "Reporter failed to create replSetUpdatePositionCommand command.");
}
+ auto cmdObj = cmd.obj();
+ StatusWith<ReplicationExecutor::CallbackHandle> scheduleResult =
+ _executor->scheduleRemoteCommand(
+ RemoteCommandRequest(_target, "admin", cmdObj),
+ stdx::bind(&Reporter::_callback, this, stdx::placeholders::_1));
+
+ if (!scheduleResult.isOK()) {
+ _status = scheduleResult.getStatus();
+ LOG(2) << "Reporter failed to schedule with status: " << _status;
- Status Reporter::getStatus() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
return _status;
}
- bool Reporter::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _active;
- }
+ _active = true;
+ _willRunAgain = false;
+ _remoteCommandCallbackHandle = scheduleResult.getValue();
+ return Status::OK();
+}
- bool Reporter::willRunAgain() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _willRunAgain;
+void Reporter::_callback(const ReplicationExecutor::RemoteCommandCallbackArgs& rcbd) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+
+ _status = rcbd.response.getStatus();
+ _active = false;
+
+ LOG(2) << "Reporter ended with status: " << _status << " after reporting to " << _target;
+ if (_status.isOK() && _willRunAgain) {
+ _schedule_inlock();
+ } else {
+ _willRunAgain = false;
}
-} // namespace repl
-} // namespace mongo
+}
+
+Status Reporter::getStatus() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _status;
+}
+
+bool Reporter::isActive() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _active;
+}
+
+bool Reporter::willRunAgain() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _willRunAgain;
+}
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/reporter.h b/src/mongo/db/repl/reporter.h
index 44145373326..c7c502e8f5f 100644
--- a/src/mongo/db/repl/reporter.h
+++ b/src/mongo/db/repl/reporter.h
@@ -35,90 +35,90 @@
namespace mongo {
namespace repl {
- class ReplicationProgressManager {
- public:
- virtual bool prepareReplSetUpdatePositionCommand(BSONObjBuilder* cmdBuilder) = 0;
- virtual ~ReplicationProgressManager();
- };
-
- class Reporter {
- MONGO_DISALLOW_COPYING(Reporter);
-
- public:
- Reporter(ReplicationExecutor* executor,
- ReplicationProgressManager* replicationProgressManager,
- const HostAndPort& target);
- virtual ~Reporter();
-
- /**
- * Returns true if a remote command has been scheduled (but not completed)
- * with the executor.
- */
- bool isActive() const;
-
- /**
- * Returns true if a remote command should be scheduled once the current one returns
- * from the executor.
- */
- bool willRunAgain() const;
-
- /**
- * Cancels remote command request.
- * Returns immediately if the Reporter is not active.
- */
- void cancel();
-
- /**
- * Waits for last/current executor handle to finish.
- * Returns immediately if the handle is invalid.
- */
- void wait();
-
- /**
- * Signals to the Reporter that there is new information to be sent to the "_target" server.
- * Returns the _status, indicating any error the Reporter has encountered.
- */
- Status trigger();
-
- /**
- * Returns the previous return status so that the owner can decide whether the Reporter
- * needs a new target to whom it can report.
- */
- Status getStatus() const;
-
- private:
- /**
- * Schedules remote command to be run by the executor
- */
- Status _schedule_inlock();
-
- /**
- * Callback for remote command.
- */
- void _callback(const ReplicationExecutor::RemoteCommandCallbackArgs& rcbd);
-
- // Not owned by us.
- ReplicationExecutor* _executor;
- ReplicationProgressManager* _updatePositionSource;
-
- // Host to whom the Reporter sends updates.
- HostAndPort _target;
-
- // Protects member data of this Reporter.
- mutable stdx::mutex _mutex;
-
- // Stores the most recent Status returned from the ReplicationExecutor.
- Status _status;
-
- // _willRunAgain is true when Reporter is scheduled to be run by the executor and subsequent
- // updates have come in.
- bool _willRunAgain;
- // _active is true when Reporter is scheduled to be run by the executor.
- bool _active;
-
- // Callback handle to the scheduled remote command.
- ReplicationExecutor::CallbackHandle _remoteCommandCallbackHandle;
- };
-
-} // namespace repl
-} // namespace mongo
+class ReplicationProgressManager {
+public:
+ virtual bool prepareReplSetUpdatePositionCommand(BSONObjBuilder* cmdBuilder) = 0;
+ virtual ~ReplicationProgressManager();
+};
+
+class Reporter {
+ MONGO_DISALLOW_COPYING(Reporter);
+
+public:
+ Reporter(ReplicationExecutor* executor,
+ ReplicationProgressManager* replicationProgressManager,
+ const HostAndPort& target);
+ virtual ~Reporter();
+
+ /**
+ * Returns true if a remote command has been scheduled (but not completed)
+ * with the executor.
+ */
+ bool isActive() const;
+
+ /**
+ * Returns true if a remote command should be scheduled once the current one returns
+ * from the executor.
+ */
+ bool willRunAgain() const;
+
+ /**
+ * Cancels remote command request.
+ * Returns immediately if the Reporter is not active.
+ */
+ void cancel();
+
+ /**
+ * Waits for last/current executor handle to finish.
+ * Returns immediately if the handle is invalid.
+ */
+ void wait();
+
+ /**
+ * Signals to the Reporter that there is new information to be sent to the "_target" server.
+ * Returns the _status, indicating any error the Reporter has encountered.
+ */
+ Status trigger();
+
+ /**
+ * Returns the previous return status so that the owner can decide whether the Reporter
+ * needs a new target to whom it can report.
+ */
+ Status getStatus() const;
+
+private:
+ /**
+ * Schedules remote command to be run by the executor
+ */
+ Status _schedule_inlock();
+
+ /**
+ * Callback for remote command.
+ */
+ void _callback(const ReplicationExecutor::RemoteCommandCallbackArgs& rcbd);
+
+ // Not owned by us.
+ ReplicationExecutor* _executor;
+ ReplicationProgressManager* _updatePositionSource;
+
+ // Host to whom the Reporter sends updates.
+ HostAndPort _target;
+
+ // Protects member data of this Reporter.
+ mutable stdx::mutex _mutex;
+
+ // Stores the most recent Status returned from the ReplicationExecutor.
+ Status _status;
+
+ // _willRunAgain is true when Reporter is scheduled to be run by the executor and subsequent
+ // updates have come in.
+ bool _willRunAgain;
+ // _active is true when Reporter is scheduled to be run by the executor.
+ bool _active;
+
+ // Callback handle to the scheduled remote command.
+ ReplicationExecutor::CallbackHandle _remoteCommandCallbackHandle;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/reporter_test.cpp b/src/mongo/db/repl/reporter_test.cpp
index 180ccf36088..507bd49017f 100644
--- a/src/mongo/db/repl/reporter_test.cpp
+++ b/src/mongo/db/repl/reporter_test.cpp
@@ -36,248 +36,246 @@
namespace {
- using namespace mongo;
- using namespace mongo::repl;
- using executor::NetworkInterfaceMock;
-
- class MockProgressManager : public ReplicationProgressManager {
- public:
- void updateMap(int memberId, const Timestamp& ts) {
- progressMap[memberId] = ts;
- }
-
- void setResult(bool newResult) {
- _result = newResult;
- }
-
- bool prepareReplSetUpdatePositionCommand(BSONObjBuilder* cmdBuilder) {
- if (!_result) {
- return _result;
- }
- cmdBuilder->append("replSetUpdatePosition", 1);
- BSONArrayBuilder arrayBuilder(cmdBuilder->subarrayStart("optimes"));
- for (auto itr = progressMap.begin(); itr != progressMap.end(); ++itr) {
- BSONObjBuilder entry(arrayBuilder.subobjStart());
- entry.append("optime", itr->second);
- entry.append("memberId", itr->first);
- entry.append("cfgver", 1);
- }
- return true;
- }
- private:
- std::map<int, Timestamp> progressMap;
- bool _result = true;
- };
-
- class ReporterTest : public ReplicationExecutorTest {
- public:
-
- ReporterTest();
- void scheduleNetworkResponse(const BSONObj& obj);
- void scheduleNetworkResponse(ErrorCodes::Error code, const std::string& reason);
- void finishProcessingNetworkResponse();
-
- protected:
-
- void setUp() override;
- void tearDown() override;
-
- std::unique_ptr<Reporter> reporter;
- std::unique_ptr<MockProgressManager> posUpdater;
-
- };
-
- ReporterTest::ReporterTest() {}
-
- void ReporterTest::setUp() {
- ReplicationExecutorTest::setUp();
- posUpdater.reset(new MockProgressManager());
- reporter.reset(new Reporter(&getExecutor(), posUpdater.get(), HostAndPort("h1")));
- launchExecutorThread();
+using namespace mongo;
+using namespace mongo::repl;
+using executor::NetworkInterfaceMock;
+
+class MockProgressManager : public ReplicationProgressManager {
+public:
+ void updateMap(int memberId, const Timestamp& ts) {
+ progressMap[memberId] = ts;
}
- void ReporterTest::tearDown() {
- ReplicationExecutorTest::tearDown();
- // Executor may still invoke reporter's callback before shutting down.
- posUpdater.reset();
- reporter.reset();
+ void setResult(bool newResult) {
+ _result = newResult;
}
- void ReporterTest::scheduleNetworkResponse(const BSONObj& obj) {
- NetworkInterfaceMock* net = getNet();
- ASSERT_TRUE(net->hasReadyRequests());
- Milliseconds millis(0);
- RemoteCommandResponse response(obj, millis);
- ReplicationExecutor::ResponseStatus responseStatus(response);
- net->scheduleResponse(net->getNextReadyRequest(), net->now(), responseStatus);
+ bool prepareReplSetUpdatePositionCommand(BSONObjBuilder* cmdBuilder) {
+ if (!_result) {
+ return _result;
+ }
+ cmdBuilder->append("replSetUpdatePosition", 1);
+ BSONArrayBuilder arrayBuilder(cmdBuilder->subarrayStart("optimes"));
+ for (auto itr = progressMap.begin(); itr != progressMap.end(); ++itr) {
+ BSONObjBuilder entry(arrayBuilder.subobjStart());
+ entry.append("optime", itr->second);
+ entry.append("memberId", itr->first);
+ entry.append("cfgver", 1);
+ }
+ return true;
}
- void ReporterTest::scheduleNetworkResponse(ErrorCodes::Error code, const std::string& reason) {
- NetworkInterfaceMock* net = getNet();
- ASSERT_TRUE(net->hasReadyRequests());
- ReplicationExecutor::ResponseStatus responseStatus(code, reason);
- net->scheduleResponse(net->getNextReadyRequest(), net->now(), responseStatus);
- }
-
- TEST_F(ReporterTest, InvalidConstruction) {
- // null ReplicationProgressManager
- ASSERT_THROWS(Reporter(&getExecutor(), nullptr, HostAndPort("h1")), UserException);
-
- // null ReplicationExecutor
- ASSERT_THROWS(Reporter(nullptr, posUpdater.get(), HostAndPort("h1")), UserException);
-
- // empty HostAndPort
- ASSERT_THROWS(Reporter(&getExecutor(), posUpdater.get(), HostAndPort()), UserException);
- }
-
- TEST_F(ReporterTest, IsActiveOnceScheduled) {
- ASSERT_FALSE(reporter->isActive());
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- }
-
- TEST_F(ReporterTest, CancelWithoutScheduled) {
- ASSERT_FALSE(reporter->isActive());
- reporter->cancel();
- ASSERT_FALSE(reporter->isActive());
- }
-
- TEST_F(ReporterTest, ShutdownBeforeSchedule) {
- getExecutor().shutdown();
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, reporter->trigger());
- ASSERT_FALSE(reporter->isActive());
- }
-
- // If an error is returned, it should be recorded in the Reporter and be returned when triggered
- TEST_F(ReporterTest, ErrorsAreStoredInTheReporter) {
- posUpdater->updateMap(0, Timestamp(3,0));
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- scheduleNetworkResponse(ErrorCodes::NoSuchKey, "waaaah");
- getNet()->runReadyNetworkOperations();
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, reporter->getStatus());
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, reporter->trigger());
- ASSERT_FALSE(reporter->isActive());
- ASSERT_FALSE(getNet()->hasReadyRequests());
- }
-
- // If an error is returned, it should be recorded in the Reporter and not run again.
- TEST_F(ReporterTest, ErrorsStopTheReporter) {
- posUpdater->updateMap(0, Timestamp(3,0));
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_FALSE(reporter->willRunAgain());
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_TRUE(reporter->willRunAgain());
-
- scheduleNetworkResponse(ErrorCodes::NoSuchKey, "waaaah");
- getNet()->runReadyNetworkOperations();
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, reporter->getStatus());
- ASSERT_FALSE(reporter->willRunAgain());
- ASSERT_FALSE(reporter->isActive());
- ASSERT_FALSE(getNet()->hasReadyRequests());
- }
-
- // Schedule while we are already scheduled, it should set willRunAgain, then automatically
- // schedule itself after finishing.
- TEST_F(ReporterTest, DoubleScheduleShouldCauseRescheduleImmediatelyAfterRespondedTo) {
- posUpdater->updateMap(0, Timestamp(3,0));
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_FALSE(reporter->willRunAgain());
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_TRUE(reporter->willRunAgain());
-
- scheduleNetworkResponse(BSON("ok" << 1));
- getNet()->runReadyNetworkOperations();
- ASSERT_TRUE(getNet()->hasReadyRequests());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_FALSE(reporter->willRunAgain());
-
- scheduleNetworkResponse(BSON("ok" << 1));
- getNet()->runReadyNetworkOperations();
- ASSERT_FALSE(getNet()->hasReadyRequests());
- ASSERT_FALSE(reporter->isActive());
- ASSERT_FALSE(reporter->willRunAgain());
- }
-
- // Schedule multiple times while we are already scheduled, it should set willRunAgain,
- // then automatically schedule itself after finishing, but not a third time since the latter
- // two will contain the same batch of updates.
- TEST_F(ReporterTest, TripleScheduleShouldCauseRescheduleImmediatelyAfterRespondedToOnlyOnce) {
- posUpdater->updateMap(0, Timestamp(3,0));
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_FALSE(reporter->willRunAgain());
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_TRUE(reporter->willRunAgain());
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_TRUE(reporter->willRunAgain());
-
- scheduleNetworkResponse(BSON("ok" << 1));
- getNet()->runReadyNetworkOperations();
- ASSERT_TRUE(getNet()->hasReadyRequests());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_FALSE(reporter->willRunAgain());
-
- scheduleNetworkResponse(BSON("ok" << 1));
- getNet()->runReadyNetworkOperations();
- ASSERT_FALSE(getNet()->hasReadyRequests());
- ASSERT_FALSE(reporter->isActive());
- ASSERT_FALSE(reporter->willRunAgain());
- }
-
- TEST_F(ReporterTest, CancelWhileScheduled) {
- posUpdater->updateMap(0, Timestamp(3,0));
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_FALSE(reporter->willRunAgain());
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_TRUE(reporter->willRunAgain());
-
- reporter->cancel();
- getNet()->runReadyNetworkOperations();
- ASSERT_FALSE(reporter->isActive());
- ASSERT_FALSE(reporter->willRunAgain());
- ASSERT_FALSE(getNet()->hasReadyRequests());
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, reporter->getStatus());
-
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, reporter->trigger());
- }
-
- TEST_F(ReporterTest, CancelAfterFirstReturns) {
- posUpdater->updateMap(0, Timestamp(3,0));
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_FALSE(reporter->willRunAgain());
- ASSERT_OK(reporter->trigger());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_TRUE(reporter->willRunAgain());
-
- scheduleNetworkResponse(BSON("ok" << 1));
- getNet()->runReadyNetworkOperations();
- ASSERT_TRUE(getNet()->hasReadyRequests());
- ASSERT_TRUE(reporter->isActive());
- ASSERT_FALSE(reporter->willRunAgain());
-
- reporter->cancel();
- getNet()->runReadyNetworkOperations();
- ASSERT_FALSE(reporter->isActive());
- ASSERT_FALSE(reporter->willRunAgain());
- ASSERT_FALSE(getNet()->hasReadyRequests());
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, reporter->getStatus());
-
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, reporter->trigger());
- }
-
- TEST_F(ReporterTest, ProgressManagerFails) {
- posUpdater->setResult(false);
- ASSERT_EQUALS(ErrorCodes::NodeNotFound, reporter->trigger().code());
- }
-
-} // namespace
+private:
+ std::map<int, Timestamp> progressMap;
+ bool _result = true;
+};
+
+class ReporterTest : public ReplicationExecutorTest {
+public:
+ ReporterTest();
+ void scheduleNetworkResponse(const BSONObj& obj);
+ void scheduleNetworkResponse(ErrorCodes::Error code, const std::string& reason);
+ void finishProcessingNetworkResponse();
+
+protected:
+ void setUp() override;
+ void tearDown() override;
+
+ std::unique_ptr<Reporter> reporter;
+ std::unique_ptr<MockProgressManager> posUpdater;
+};
+
+ReporterTest::ReporterTest() {}
+
+void ReporterTest::setUp() {
+ ReplicationExecutorTest::setUp();
+ posUpdater.reset(new MockProgressManager());
+ reporter.reset(new Reporter(&getExecutor(), posUpdater.get(), HostAndPort("h1")));
+ launchExecutorThread();
+}
+
+void ReporterTest::tearDown() {
+ ReplicationExecutorTest::tearDown();
+ // Executor may still invoke reporter's callback before shutting down.
+ posUpdater.reset();
+ reporter.reset();
+}
+
+void ReporterTest::scheduleNetworkResponse(const BSONObj& obj) {
+ NetworkInterfaceMock* net = getNet();
+ ASSERT_TRUE(net->hasReadyRequests());
+ Milliseconds millis(0);
+ RemoteCommandResponse response(obj, millis);
+ ReplicationExecutor::ResponseStatus responseStatus(response);
+ net->scheduleResponse(net->getNextReadyRequest(), net->now(), responseStatus);
+}
+
+void ReporterTest::scheduleNetworkResponse(ErrorCodes::Error code, const std::string& reason) {
+ NetworkInterfaceMock* net = getNet();
+ ASSERT_TRUE(net->hasReadyRequests());
+ ReplicationExecutor::ResponseStatus responseStatus(code, reason);
+ net->scheduleResponse(net->getNextReadyRequest(), net->now(), responseStatus);
+}
+
+TEST_F(ReporterTest, InvalidConstruction) {
+ // null ReplicationProgressManager
+ ASSERT_THROWS(Reporter(&getExecutor(), nullptr, HostAndPort("h1")), UserException);
+
+ // null ReplicationExecutor
+ ASSERT_THROWS(Reporter(nullptr, posUpdater.get(), HostAndPort("h1")), UserException);
+
+ // empty HostAndPort
+ ASSERT_THROWS(Reporter(&getExecutor(), posUpdater.get(), HostAndPort()), UserException);
+}
+
+TEST_F(ReporterTest, IsActiveOnceScheduled) {
+ ASSERT_FALSE(reporter->isActive());
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+}
+
+TEST_F(ReporterTest, CancelWithoutScheduled) {
+ ASSERT_FALSE(reporter->isActive());
+ reporter->cancel();
+ ASSERT_FALSE(reporter->isActive());
+}
+
+TEST_F(ReporterTest, ShutdownBeforeSchedule) {
+ getExecutor().shutdown();
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, reporter->trigger());
+ ASSERT_FALSE(reporter->isActive());
+}
+
+// If an error is returned, it should be recorded in the Reporter and be returned when triggered
+TEST_F(ReporterTest, ErrorsAreStoredInTheReporter) {
+ posUpdater->updateMap(0, Timestamp(3, 0));
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+ scheduleNetworkResponse(ErrorCodes::NoSuchKey, "waaaah");
+ getNet()->runReadyNetworkOperations();
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, reporter->getStatus());
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, reporter->trigger());
+ ASSERT_FALSE(reporter->isActive());
+ ASSERT_FALSE(getNet()->hasReadyRequests());
+}
+
+// If an error is returned, it should be recorded in the Reporter and not run again.
+TEST_F(ReporterTest, ErrorsStopTheReporter) {
+ posUpdater->updateMap(0, Timestamp(3, 0));
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_FALSE(reporter->willRunAgain());
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_TRUE(reporter->willRunAgain());
+
+ scheduleNetworkResponse(ErrorCodes::NoSuchKey, "waaaah");
+ getNet()->runReadyNetworkOperations();
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, reporter->getStatus());
+ ASSERT_FALSE(reporter->willRunAgain());
+ ASSERT_FALSE(reporter->isActive());
+ ASSERT_FALSE(getNet()->hasReadyRequests());
+}
+
+// Schedule while we are already scheduled, it should set willRunAgain, then automatically
+// schedule itself after finishing.
+TEST_F(ReporterTest, DoubleScheduleShouldCauseRescheduleImmediatelyAfterRespondedTo) {
+ posUpdater->updateMap(0, Timestamp(3, 0));
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_FALSE(reporter->willRunAgain());
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_TRUE(reporter->willRunAgain());
+
+ scheduleNetworkResponse(BSON("ok" << 1));
+ getNet()->runReadyNetworkOperations();
+ ASSERT_TRUE(getNet()->hasReadyRequests());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_FALSE(reporter->willRunAgain());
+
+ scheduleNetworkResponse(BSON("ok" << 1));
+ getNet()->runReadyNetworkOperations();
+ ASSERT_FALSE(getNet()->hasReadyRequests());
+ ASSERT_FALSE(reporter->isActive());
+ ASSERT_FALSE(reporter->willRunAgain());
+}
+
+// Schedule multiple times while we are already scheduled, it should set willRunAgain,
+// then automatically schedule itself after finishing, but not a third time since the latter
+// two will contain the same batch of updates.
+TEST_F(ReporterTest, TripleScheduleShouldCauseRescheduleImmediatelyAfterRespondedToOnlyOnce) {
+ posUpdater->updateMap(0, Timestamp(3, 0));
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_FALSE(reporter->willRunAgain());
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_TRUE(reporter->willRunAgain());
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_TRUE(reporter->willRunAgain());
+
+ scheduleNetworkResponse(BSON("ok" << 1));
+ getNet()->runReadyNetworkOperations();
+ ASSERT_TRUE(getNet()->hasReadyRequests());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_FALSE(reporter->willRunAgain());
+
+ scheduleNetworkResponse(BSON("ok" << 1));
+ getNet()->runReadyNetworkOperations();
+ ASSERT_FALSE(getNet()->hasReadyRequests());
+ ASSERT_FALSE(reporter->isActive());
+ ASSERT_FALSE(reporter->willRunAgain());
+}
+
+TEST_F(ReporterTest, CancelWhileScheduled) {
+ posUpdater->updateMap(0, Timestamp(3, 0));
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_FALSE(reporter->willRunAgain());
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_TRUE(reporter->willRunAgain());
+
+ reporter->cancel();
+ getNet()->runReadyNetworkOperations();
+ ASSERT_FALSE(reporter->isActive());
+ ASSERT_FALSE(reporter->willRunAgain());
+ ASSERT_FALSE(getNet()->hasReadyRequests());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, reporter->getStatus());
+
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, reporter->trigger());
+}
+
+TEST_F(ReporterTest, CancelAfterFirstReturns) {
+ posUpdater->updateMap(0, Timestamp(3, 0));
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_FALSE(reporter->willRunAgain());
+ ASSERT_OK(reporter->trigger());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_TRUE(reporter->willRunAgain());
+
+ scheduleNetworkResponse(BSON("ok" << 1));
+ getNet()->runReadyNetworkOperations();
+ ASSERT_TRUE(getNet()->hasReadyRequests());
+ ASSERT_TRUE(reporter->isActive());
+ ASSERT_FALSE(reporter->willRunAgain());
+
+ reporter->cancel();
+ getNet()->runReadyNetworkOperations();
+ ASSERT_FALSE(reporter->isActive());
+ ASSERT_FALSE(reporter->willRunAgain());
+ ASSERT_FALSE(getNet()->hasReadyRequests());
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, reporter->getStatus());
+
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, reporter->trigger());
+}
+
+TEST_F(ReporterTest, ProgressManagerFails) {
+ posUpdater->setResult(false);
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound, reporter->trigger().code());
+}
+
+} // namespace
diff --git a/src/mongo/db/repl/resync.cpp b/src/mongo/db/repl/resync.cpp
index 0daa1ef3197..bdab9c63cc4 100644
--- a/src/mongo/db/repl/resync.cpp
+++ b/src/mongo/db/repl/resync.cpp
@@ -35,99 +35,99 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
namespace repl {
- // operator requested resynchronization of replication (on a slave or secondary). {resync: 1}
- class CmdResync : public Command {
- public:
- virtual bool slaveOk() const {
- return true;
- }
- virtual bool adminOnly() const {
- return true;
- }
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::resync);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
+// operator requested resynchronization of replication (on a slave or secondary). {resync: 1}
+class CmdResync : public Command {
+public:
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::resync);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
- void help(stringstream& h) const {
- h << "resync (from scratch) a stale slave or replica set secondary node.\n";
- }
+ void help(stringstream& h) const {
+ h << "resync (from scratch) a stale slave or replica set secondary node.\n";
+ }
- CmdResync() : Command("resync") { }
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
+ CmdResync() : Command("resync") {}
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite globalWriteLock(txn->lockState());
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
-
- ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- if (getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
- const MemberState memberState = replCoord->getMemberState();
- if (memberState.startup()) {
- return appendCommandStatus(result, Status(ErrorCodes::NotYetInitialized,
- "no replication yet active"));
- }
- if (memberState.primary() ||
- !replCoord->setFollowerMode(MemberState::RS_STARTUP2)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotSecondary,
- "primaries cannot resync"));
- }
- BackgroundSync::get()->setInitialSyncRequestedFlag(true);
- return true;
+ ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
+ if (getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
+ const MemberState memberState = replCoord->getMemberState();
+ if (memberState.startup()) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::NotYetInitialized, "no replication yet active"));
}
-
- // below this comment pertains only to master/slave replication
- if ( cmdObj.getBoolField( "force" ) ) {
- if ( !waitForSyncToFinish(txn, errmsg ) )
- return false;
- replAllDead = "resync forced";
+ if (memberState.primary() || !replCoord->setFollowerMode(MemberState::RS_STARTUP2)) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::NotSecondary, "primaries cannot resync"));
}
- // TODO(dannenberg) replAllDead is bad and should be removed when masterslave is removed
- if (!replAllDead) {
- errmsg = "not dead, no need to resync";
- return false;
- }
- if ( !waitForSyncToFinish(txn, errmsg ) )
- return false;
-
- ReplSource::forceResyncDead( txn, "client" );
- result.append( "info", "triggered resync for all sources" );
-
+ BackgroundSync::get()->setInitialSyncRequestedFlag(true);
return true;
}
- bool waitForSyncToFinish(OperationContext* txn, string &errmsg) const {
- // Wait for slave thread to finish syncing, so sources will be be
- // reloaded with new saved state on next pass.
- Timer t;
- while ( 1 ) {
- if ( syncing == 0 || t.millis() > 30000 )
- break;
- {
- Lock::TempRelease t(txn->lockState());
- relinquishSyncingSome = 1;
- sleepmillis(1);
- }
- }
- if ( syncing ) {
- errmsg = "timeout waiting for sync() to finish";
+ // below this comment pertains only to master/slave replication
+ if (cmdObj.getBoolField("force")) {
+ if (!waitForSyncToFinish(txn, errmsg))
return false;
+ replAllDead = "resync forced";
+ }
+ // TODO(dannenberg) replAllDead is bad and should be removed when masterslave is removed
+ if (!replAllDead) {
+ errmsg = "not dead, no need to resync";
+ return false;
+ }
+ if (!waitForSyncToFinish(txn, errmsg))
+ return false;
+
+ ReplSource::forceResyncDead(txn, "client");
+ result.append("info", "triggered resync for all sources");
+
+ return true;
+ }
+
+ bool waitForSyncToFinish(OperationContext* txn, string& errmsg) const {
+ // Wait for slave thread to finish syncing, so sources will be be
+ // reloaded with new saved state on next pass.
+ Timer t;
+ while (1) {
+ if (syncing == 0 || t.millis() > 30000)
+ break;
+ {
+ Lock::TempRelease t(txn->lockState());
+ relinquishSyncingSome = 1;
+ sleepmillis(1);
}
- return true;
}
- } cmdResync;
-} // namespace repl
-} // namespace mongo
+ if (syncing) {
+ errmsg = "timeout waiting for sync() to finish";
+ return false;
+ }
+ return true;
+ }
+} cmdResync;
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/roll_back_local_operations.cpp b/src/mongo/db/repl/roll_back_local_operations.cpp
index 2cf07fa36b7..2117458e9f4 100644
--- a/src/mongo/db/repl/roll_back_local_operations.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations.cpp
@@ -41,157 +41,145 @@ namespace repl {
namespace {
- Timestamp getTimestamp(const BSONObj& operation) {
- return operation["ts"].timestamp();
- }
-
- Timestamp getTimestamp(const OplogInterface::Iterator::Value& oplogValue) {
- return getTimestamp(oplogValue.first);
- }
-
- long long getHash(const BSONObj& operation) {
- return operation["h"].Long();
- }
-
- long long getHash(const OplogInterface::Iterator::Value& oplogValue) {
- return getHash(oplogValue.first);
- }
-
-} // namespace
-
- RollBackLocalOperations::RollBackLocalOperations(
- const OplogInterface& localOplog,
- const RollbackOperationFn& rollbackOperation)
-
- : _localOplogIterator(localOplog.makeIterator()),
- _rollbackOperation(rollbackOperation),
- _scanned(0) {
-
- uassert(ErrorCodes::BadValue, "invalid local oplog iterator", _localOplogIterator);
- uassert(ErrorCodes::BadValue, "null roll back operation function", rollbackOperation);
+Timestamp getTimestamp(const BSONObj& operation) {
+ return operation["ts"].timestamp();
+}
+
+Timestamp getTimestamp(const OplogInterface::Iterator::Value& oplogValue) {
+ return getTimestamp(oplogValue.first);
+}
+
+long long getHash(const BSONObj& operation) {
+ return operation["h"].Long();
+}
+
+long long getHash(const OplogInterface::Iterator::Value& oplogValue) {
+ return getHash(oplogValue.first);
+}
+
+} // namespace
+
+RollBackLocalOperations::RollBackLocalOperations(const OplogInterface& localOplog,
+ const RollbackOperationFn& rollbackOperation)
+
+ : _localOplogIterator(localOplog.makeIterator()),
+ _rollbackOperation(rollbackOperation),
+ _scanned(0) {
+ uassert(ErrorCodes::BadValue, "invalid local oplog iterator", _localOplogIterator);
+ uassert(ErrorCodes::BadValue, "null roll back operation function", rollbackOperation);
+}
+
+StatusWith<RollBackLocalOperations::RollbackCommonPoint> RollBackLocalOperations::onRemoteOperation(
+ const BSONObj& operation) {
+ if (_scanned == 0) {
+ auto result = _localOplogIterator->next();
+ if (!result.isOK()) {
+ return StatusWith<RollbackCommonPoint>(ErrorCodes::OplogStartMissing,
+ "no oplog during initsync");
+ }
+ _localOplogValue = result.getValue();
+
+ long long diff = static_cast<long long>(getTimestamp(_localOplogValue).getSecs()) -
+ getTimestamp(operation).getSecs();
+ // diff could be positive, negative, or zero
+ log() << "rollback our last optime: " << getTimestamp(_localOplogValue).toStringPretty();
+ log() << "rollback their last optime: " << getTimestamp(operation).toStringPretty();
+ log() << "rollback diff in end of log times: " << diff << " seconds";
+ if (diff > 1800) {
+ severe() << "rollback too long a time period for a rollback.";
+ return StatusWith<RollbackCommonPoint>(
+ ErrorCodes::ExceededTimeLimit,
+ "rollback error: not willing to roll back more than 30 minutes of data");
+ }
}
- StatusWith<RollBackLocalOperations::RollbackCommonPoint>
- RollBackLocalOperations::onRemoteOperation(const BSONObj& operation) {
-
- if (_scanned == 0) {
- auto result = _localOplogIterator->next();
- if (!result.isOK()) {
- return StatusWith<RollbackCommonPoint>(ErrorCodes::OplogStartMissing,
- "no oplog during initsync");
- }
- _localOplogValue = result.getValue();
-
- long long diff =
- static_cast<long long>(getTimestamp(_localOplogValue).getSecs()) -
- getTimestamp(operation).getSecs();
- // diff could be positive, negative, or zero
- log() << "rollback our last optime: "
- << getTimestamp(_localOplogValue).toStringPretty();
- log() << "rollback their last optime: " << getTimestamp(operation).toStringPretty();
- log() << "rollback diff in end of log times: " << diff << " seconds";
- if (diff > 1800) {
- severe() << "rollback too long a time period for a rollback.";
- return StatusWith<RollbackCommonPoint>(
- ErrorCodes::ExceededTimeLimit,
- "rollback error: not willing to roll back more than 30 minutes of data");
- }
+ while (getTimestamp(_localOplogValue) > getTimestamp(operation)) {
+ _scanned++;
+ auto status = _rollbackOperation(_localOplogValue.first);
+ if (!status.isOK()) {
+ invariant(ErrorCodes::NoSuchKey != status.code());
+ return status;
}
-
- while (getTimestamp(_localOplogValue) > getTimestamp(operation)) {
- _scanned++;
- auto status = _rollbackOperation(_localOplogValue.first);
- if (!status.isOK()) {
- invariant(ErrorCodes::NoSuchKey != status.code());
- return status;
- }
- auto result = _localOplogIterator->next();
- if (!result.isOK()) {
- severe() << "rollback error RS101 reached beginning of local oplog";
- log() << " scanned: " << _scanned;
- log() << " theirTime: " << getTimestamp(operation).toStringLong();
- log() << " ourTime: " << getTimestamp(_localOplogValue).toStringLong();
- return StatusWith<RollbackCommonPoint>(
- ErrorCodes::NoMatchingDocument,
- "RS101 reached beginning of local oplog [2]");
- }
- _localOplogValue = result.getValue();
+ auto result = _localOplogIterator->next();
+ if (!result.isOK()) {
+ severe() << "rollback error RS101 reached beginning of local oplog";
+ log() << " scanned: " << _scanned;
+ log() << " theirTime: " << getTimestamp(operation).toStringLong();
+ log() << " ourTime: " << getTimestamp(_localOplogValue).toStringLong();
+ return StatusWith<RollbackCommonPoint>(ErrorCodes::NoMatchingDocument,
+ "RS101 reached beginning of local oplog [2]");
}
+ _localOplogValue = result.getValue();
+ }
- if (getTimestamp(_localOplogValue) == getTimestamp(operation)) {
- _scanned++;
- if (getHash(_localOplogValue) == getHash(operation)) {
- return StatusWith<RollbackCommonPoint>(
- std::make_pair(getTimestamp(_localOplogValue), _localOplogValue.second));
- }
- auto status = _rollbackOperation(_localOplogValue.first);
- if (!status.isOK()) {
- invariant(ErrorCodes::NoSuchKey != status.code());
- return status;
- }
- auto result = _localOplogIterator->next();
- if (!result.isOK()) {
- severe() << "rollback error RS101 reached beginning of local oplog";
- log() << " scanned: " << _scanned;
- log() << " theirTime: " << getTimestamp(operation).toStringLong();
- log() << " ourTime: " << getTimestamp(_localOplogValue).toStringLong();
- return StatusWith<RollbackCommonPoint>(
- ErrorCodes::NoMatchingDocument,
- "RS101 reached beginning of local oplog [1]");
- }
- _localOplogValue = result.getValue();
+ if (getTimestamp(_localOplogValue) == getTimestamp(operation)) {
+ _scanned++;
+ if (getHash(_localOplogValue) == getHash(operation)) {
return StatusWith<RollbackCommonPoint>(
- ErrorCodes::NoSuchKey,
- "Unable to determine common point - same timestamp but different hash. "
- "Need to process additional remote operations.");
+ std::make_pair(getTimestamp(_localOplogValue), _localOplogValue.second));
}
-
- if (getTimestamp(_localOplogValue) < getTimestamp(operation)) {
- _scanned++;
- return StatusWith<RollbackCommonPoint>(
- ErrorCodes::NoSuchKey,
- "Unable to determine common point. "
- "Need to process additional remote operations.");
+ auto status = _rollbackOperation(_localOplogValue.first);
+ if (!status.isOK()) {
+ invariant(ErrorCodes::NoSuchKey != status.code());
+ return status;
}
-
- return RollbackCommonPoint(Timestamp(Seconds(1), 0), RecordId());
+ auto result = _localOplogIterator->next();
+ if (!result.isOK()) {
+ severe() << "rollback error RS101 reached beginning of local oplog";
+ log() << " scanned: " << _scanned;
+ log() << " theirTime: " << getTimestamp(operation).toStringLong();
+ log() << " ourTime: " << getTimestamp(_localOplogValue).toStringLong();
+ return StatusWith<RollbackCommonPoint>(ErrorCodes::NoMatchingDocument,
+ "RS101 reached beginning of local oplog [1]");
+ }
+ _localOplogValue = result.getValue();
+ return StatusWith<RollbackCommonPoint>(
+ ErrorCodes::NoSuchKey,
+ "Unable to determine common point - same timestamp but different hash. "
+ "Need to process additional remote operations.");
}
- StatusWith<RollBackLocalOperations::RollbackCommonPoint> syncRollBackLocalOperations(
- const OplogInterface& localOplog,
- const OplogInterface& remoteOplog,
- const RollBackLocalOperations::RollbackOperationFn& rollbackOperation) {
-
- auto remoteIterator = remoteOplog.makeIterator();
- auto remoteResult = remoteIterator->next();
- if (!remoteResult.isOK()) {
- return StatusWith<RollBackLocalOperations::RollbackCommonPoint>(
- ErrorCodes::InvalidSyncSource,
- "remote oplog empty or unreadable");
- }
+ if (getTimestamp(_localOplogValue) < getTimestamp(operation)) {
+ _scanned++;
+ return StatusWith<RollbackCommonPoint>(ErrorCodes::NoSuchKey,
+ "Unable to determine common point. "
+ "Need to process additional remote operations.");
+ }
- RollBackLocalOperations finder(localOplog, rollbackOperation);
- Timestamp theirTime;
- while (remoteResult.isOK()) {
- theirTime = remoteResult.getValue().first["ts"].timestamp();
- BSONObj theirObj = remoteResult.getValue().first;
- auto result = finder.onRemoteOperation(theirObj);
- if (result.isOK()) {
- return result.getValue();
- }
- else if (result.getStatus().code() != ErrorCodes::NoSuchKey) {
- return result;
- }
- remoteResult = remoteIterator->next();
- }
+ return RollbackCommonPoint(Timestamp(Seconds(1), 0), RecordId());
+}
- severe() << "rollback error RS100 reached beginning of remote oplog";
- log() << " them: " << remoteOplog.toString();
- log() << " theirTime: " << theirTime.toStringLong();
+StatusWith<RollBackLocalOperations::RollbackCommonPoint> syncRollBackLocalOperations(
+ const OplogInterface& localOplog,
+ const OplogInterface& remoteOplog,
+ const RollBackLocalOperations::RollbackOperationFn& rollbackOperation) {
+ auto remoteIterator = remoteOplog.makeIterator();
+ auto remoteResult = remoteIterator->next();
+ if (!remoteResult.isOK()) {
return StatusWith<RollBackLocalOperations::RollbackCommonPoint>(
- ErrorCodes::NoMatchingDocument,
- "RS100 reached beginning of remote oplog [1]");
+ ErrorCodes::InvalidSyncSource, "remote oplog empty or unreadable");
+ }
+
+ RollBackLocalOperations finder(localOplog, rollbackOperation);
+ Timestamp theirTime;
+ while (remoteResult.isOK()) {
+ theirTime = remoteResult.getValue().first["ts"].timestamp();
+ BSONObj theirObj = remoteResult.getValue().first;
+ auto result = finder.onRemoteOperation(theirObj);
+ if (result.isOK()) {
+ return result.getValue();
+ } else if (result.getStatus().code() != ErrorCodes::NoSuchKey) {
+ return result;
+ }
+ remoteResult = remoteIterator->next();
}
-} // namespace repl
-} // namespace mongo
+ severe() << "rollback error RS100 reached beginning of remote oplog";
+ log() << " them: " << remoteOplog.toString();
+ log() << " theirTime: " << theirTime.toStringLong();
+ return StatusWith<RollBackLocalOperations::RollbackCommonPoint>(
+ ErrorCodes::NoMatchingDocument, "RS100 reached beginning of remote oplog [1]");
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/roll_back_local_operations.h b/src/mongo/db/repl/roll_back_local_operations.h
index 4a9d5b71cd8..20eb923083d 100644
--- a/src/mongo/db/repl/roll_back_local_operations.h
+++ b/src/mongo/db/repl/roll_back_local_operations.h
@@ -39,57 +39,55 @@
namespace mongo {
namespace repl {
- class RollBackLocalOperations {
- MONGO_DISALLOW_COPYING(RollBackLocalOperations);
- public:
+class RollBackLocalOperations {
+ MONGO_DISALLOW_COPYING(RollBackLocalOperations);
- /**
- * Type of function to roll back an operation or process it for future use.
- * It can return any status except ErrorCodes::NoSuchKey. See onRemoteOperation().
- */
- using RollbackOperationFn = stdx::function<Status (const BSONObj&)>;
-
- using RollbackCommonPoint = std::pair<Timestamp, RecordId>;
-
- /**
- * Initializes rollback processor with a valid local oplog.
- * Whenever we encounter an operation in the local oplog that has to be rolled back,
- * we will pass it to 'rollbackOperation'.
- */
- RollBackLocalOperations(const OplogInterface& localOplog,
- const RollbackOperationFn& rollbackOperation);
-
- virtual ~RollBackLocalOperations() = default;
-
- /**
- * Process single remote operation.
- * Returns ErrorCodes::NoSuchKey if common point has not been found and
- * additional operations have to be read from the remote oplog.
- */
- StatusWith<RollbackCommonPoint> onRemoteOperation(const BSONObj& operation);
+public:
+ /**
+ * Type of function to roll back an operation or process it for future use.
+ * It can return any status except ErrorCodes::NoSuchKey. See onRemoteOperation().
+ */
+ using RollbackOperationFn = stdx::function<Status(const BSONObj&)>;
- private:
+ using RollbackCommonPoint = std::pair<Timestamp, RecordId>;
- std::unique_ptr<OplogInterface::Iterator> _localOplogIterator;
- RollbackOperationFn _rollbackOperation;
- OplogInterface::Iterator::Value _localOplogValue;
- unsigned long long _scanned;
+ /**
+ * Initializes rollback processor with a valid local oplog.
+ * Whenever we encounter an operation in the local oplog that has to be rolled back,
+ * we will pass it to 'rollbackOperation'.
+ */
+ RollBackLocalOperations(const OplogInterface& localOplog,
+ const RollbackOperationFn& rollbackOperation);
- };
+ virtual ~RollBackLocalOperations() = default;
/**
- * Rolls back every operation in the local oplog that is not in the remote oplog, in reverse
- * order.
- *
- * Whenever we encounter an operation in the local oplog that has to be rolled back,
- * we will pass it to 'rollbackOperation' starting with the most recent operation.
- * It is up to 'rollbackOperation' to roll back this operation immediately or
- * process it for future use.
+ * Process single remote operation.
+ * Returns ErrorCodes::NoSuchKey if common point has not been found and
+ * additional operations have to be read from the remote oplog.
*/
- StatusWith<RollBackLocalOperations::RollbackCommonPoint> syncRollBackLocalOperations(
- const OplogInterface& localOplog,
- const OplogInterface& remoteOplog,
- const RollBackLocalOperations::RollbackOperationFn& rollbackOperation);
+ StatusWith<RollbackCommonPoint> onRemoteOperation(const BSONObj& operation);
+
+private:
+ std::unique_ptr<OplogInterface::Iterator> _localOplogIterator;
+ RollbackOperationFn _rollbackOperation;
+ OplogInterface::Iterator::Value _localOplogValue;
+ unsigned long long _scanned;
+};
+
+/**
+ * Rolls back every operation in the local oplog that is not in the remote oplog, in reverse
+ * order.
+ *
+ * Whenever we encounter an operation in the local oplog that has to be rolled back,
+ * we will pass it to 'rollbackOperation' starting with the most recent operation.
+ * It is up to 'rollbackOperation' to roll back this operation immediately or
+ * process it for future use.
+ */
+StatusWith<RollBackLocalOperations::RollbackCommonPoint> syncRollBackLocalOperations(
+ const OplogInterface& localOplog,
+ const OplogInterface& remoteOplog,
+ const RollBackLocalOperations::RollbackOperationFn& rollbackOperation);
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/roll_back_local_operations_test.cpp b/src/mongo/db/repl/roll_back_local_operations_test.cpp
index 9fae8ce648c..06af9890571 100644
--- a/src/mongo/db/repl/roll_back_local_operations_test.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations_test.cpp
@@ -37,396 +37,369 @@
namespace {
- using namespace mongo;
- using namespace mongo::repl;
+using namespace mongo;
+using namespace mongo::repl;
- const OplogInterfaceMock::Operations kEmptyMockOperations;
+const OplogInterfaceMock::Operations kEmptyMockOperations;
- BSONObj makeOp(int seconds, long long hash) {
- return BSON("ts" << Timestamp(Seconds(seconds), 0) << "h" << hash);
- }
-
- int recordId = 0;
- OplogInterfaceMock::Operation makeOpAndRecordId(int seconds, long long hash) {
- return std::make_pair(makeOp(seconds, hash), RecordId(++recordId));
- }
-
- TEST(RollBackLocalOperationsTest, InvalidLocalOplogIterator) {
- class InvalidOplogInterface : public OplogInterface {
- public:
- std::string toString() const override { return ""; }
- std::unique_ptr<Iterator> makeIterator() const override {
- return std::unique_ptr<Iterator>();
- }
- } invalidOplog;
- ASSERT_THROWS_CODE(
- RollBackLocalOperations(invalidOplog, [](const BSONObj&) { return Status::OK(); }),
- UserException,
- ErrorCodes::BadValue);
- }
-
- TEST(RollBackLocalOperationsTest, InvalidRollbackOperationFunction) {
- ASSERT_THROWS_CODE(
- RollBackLocalOperations(OplogInterfaceMock({makeOpAndRecordId(1, 0)}),
- RollBackLocalOperations::RollbackOperationFn()),
- UserException,
- ErrorCodes::BadValue);
- }
-
- TEST(RollBackLocalOperationsTest, EmptyLocalOplog) {
- OplogInterfaceMock localOplog(kEmptyMockOperations);
- RollBackLocalOperations finder(localOplog, [](const BSONObj&) { return Status::OK(); });
- auto result = finder.onRemoteOperation(makeOp(1, 0));
- ASSERT_EQUALS(ErrorCodes::OplogStartMissing, result.getStatus().code());
- }
-
- TEST(RollBackLocalOperationsTest, RollbackPeriodTooLong) {
- OplogInterfaceMock localOplog({makeOpAndRecordId(1802, 0)});
- RollBackLocalOperations finder(localOplog, [](const BSONObj&) { return Status::OK(); });
- auto result = finder.onRemoteOperation(makeOp(1, 0));
- ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, result.getStatus().code());
- }
-
- TEST(RollBackLocalOperationsTest, RollbackMultipleLocalOperations) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(5, 1),
- makeOpAndRecordId(4, 1),
- makeOpAndRecordId(3, 1),
- makeOpAndRecordId(2, 1),
- commonOperation,
- });
- OplogInterfaceMock localOplog(localOperations);
- auto i = localOperations.cbegin();
- auto rollbackOperation = [&](const BSONObj& operation) {
- ASSERT_EQUALS(i->first, operation);
- i++;
- return Status::OK();
- };
- RollBackLocalOperations finder(localOplog, rollbackOperation);
- auto result = finder.onRemoteOperation(commonOperation.first);
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
- ASSERT_EQUALS(commonOperation.second, result.getValue().second);
- ASSERT_FALSE(i == localOperations.cend());
- ASSERT_EQUALS(commonOperation.first, i->first);
- i++;
- ASSERT_TRUE(i == localOperations.cend());
- }
-
- TEST(RollBackLocalOperationsTest, RollbackOperationFailed) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(2, 1),
- commonOperation,
- });
- OplogInterfaceMock localOplog(localOperations);
- auto rollbackOperation = [&](const BSONObj& operation) {
- return Status(ErrorCodes::OperationFailed, "");
- };
- RollBackLocalOperations finder(localOplog, rollbackOperation);
- auto result = finder.onRemoteOperation(commonOperation.first);
- ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
- }
+BSONObj makeOp(int seconds, long long hash) {
+ return BSON("ts" << Timestamp(Seconds(seconds), 0) << "h" << hash);
+}
- TEST(RollBackLocalOperationsTest, EndOfLocalOplog) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(2, 1),
- });
- OplogInterfaceMock localOplog(localOperations);
- RollBackLocalOperations finder(localOplog, [](const BSONObj&) { return Status::OK(); });
- auto result = finder.onRemoteOperation(commonOperation.first);
- ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
- }
+int recordId = 0;
+OplogInterfaceMock::Operation makeOpAndRecordId(int seconds, long long hash) {
+ return std::make_pair(makeOp(seconds, hash), RecordId(++recordId));
+}
- TEST(RollBackLocalOperationsTest, SkipRemoteOperations) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(5, 1),
- makeOpAndRecordId(4, 1),
- makeOpAndRecordId(2, 1),
- commonOperation,
- });
- OplogInterfaceMock localOplog(localOperations);
- auto i = localOperations.cbegin();
- auto rollbackOperation = [&](const BSONObj& operation) {
- ASSERT_EQUALS(i->first, operation);
- i++;
- return Status::OK();
- };
- RollBackLocalOperations finder(localOplog, rollbackOperation);
- {
- auto result = finder.onRemoteOperation(makeOp(6,1));
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
- ASSERT_TRUE(i == localOperations.cbegin());
+TEST(RollBackLocalOperationsTest, InvalidLocalOplogIterator) {
+ class InvalidOplogInterface : public OplogInterface {
+ public:
+ std::string toString() const override {
+ return "";
}
- {
- auto result = finder.onRemoteOperation(makeOp(3,1));
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
- ASSERT_TRUE(std::distance(localOperations.cbegin(), i) == 2);
+ std::unique_ptr<Iterator> makeIterator() const override {
+ return std::unique_ptr<Iterator>();
}
- auto result = finder.onRemoteOperation(commonOperation.first);
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
- ASSERT_EQUALS(commonOperation.second, result.getValue().second);
- ASSERT_FALSE(i == localOperations.cend());
- ASSERT_EQUALS(commonOperation.first, i->first);
+ } invalidOplog;
+ ASSERT_THROWS_CODE(
+ RollBackLocalOperations(invalidOplog, [](const BSONObj&) { return Status::OK(); }),
+ UserException,
+ ErrorCodes::BadValue);
+}
+
+TEST(RollBackLocalOperationsTest, InvalidRollbackOperationFunction) {
+ ASSERT_THROWS_CODE(RollBackLocalOperations(OplogInterfaceMock({makeOpAndRecordId(1, 0)}),
+ RollBackLocalOperations::RollbackOperationFn()),
+ UserException,
+ ErrorCodes::BadValue);
+}
+
+TEST(RollBackLocalOperationsTest, EmptyLocalOplog) {
+ OplogInterfaceMock localOplog(kEmptyMockOperations);
+ RollBackLocalOperations finder(localOplog, [](const BSONObj&) { return Status::OK(); });
+ auto result = finder.onRemoteOperation(makeOp(1, 0));
+ ASSERT_EQUALS(ErrorCodes::OplogStartMissing, result.getStatus().code());
+}
+
+TEST(RollBackLocalOperationsTest, RollbackPeriodTooLong) {
+ OplogInterfaceMock localOplog({makeOpAndRecordId(1802, 0)});
+ RollBackLocalOperations finder(localOplog, [](const BSONObj&) { return Status::OK(); });
+ auto result = finder.onRemoteOperation(makeOp(1, 0));
+ ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit, result.getStatus().code());
+}
+
+TEST(RollBackLocalOperationsTest, RollbackMultipleLocalOperations) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ OplogInterfaceMock::Operations localOperations({
+ makeOpAndRecordId(5, 1),
+ makeOpAndRecordId(4, 1),
+ makeOpAndRecordId(3, 1),
+ makeOpAndRecordId(2, 1),
+ commonOperation,
+ });
+ OplogInterfaceMock localOplog(localOperations);
+ auto i = localOperations.cbegin();
+ auto rollbackOperation = [&](const BSONObj& operation) {
+ ASSERT_EQUALS(i->first, operation);
i++;
- ASSERT_TRUE(i == localOperations.cend());
- }
-
- TEST(RollBackLocalOperationsTest, SameTimestampDifferentHashess) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(1, 5),
- makeOpAndRecordId(1, 3),
- commonOperation,
- });
- OplogInterfaceMock localOplog(localOperations);
- auto i = localOperations.cbegin();
- auto rollbackOperation = [&](const BSONObj& operation) {
- ASSERT_EQUALS(i->first, operation);
- i++;
- return Status::OK();
- };
- RollBackLocalOperations finder(localOplog, rollbackOperation);
- {
- auto result = finder.onRemoteOperation(makeOp(1,4));
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
- ASSERT_TRUE(std::distance(localOperations.cbegin(), i) == 1);
- }
- {
- auto result = finder.onRemoteOperation(makeOp(1,2));
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
- ASSERT_TRUE(std::distance(localOperations.cbegin(), i) == 2);
- }
- auto result = finder.onRemoteOperation(commonOperation.first);
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
- ASSERT_EQUALS(commonOperation.second, result.getValue().second);
- ASSERT_FALSE(i == localOperations.cend());
- ASSERT_EQUALS(commonOperation.first, i->first);
+ return Status::OK();
+ };
+ RollBackLocalOperations finder(localOplog, rollbackOperation);
+ auto result = finder.onRemoteOperation(commonOperation.first);
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
+ ASSERT_EQUALS(commonOperation.second, result.getValue().second);
+ ASSERT_FALSE(i == localOperations.cend());
+ ASSERT_EQUALS(commonOperation.first, i->first);
+ i++;
+ ASSERT_TRUE(i == localOperations.cend());
+}
+
+TEST(RollBackLocalOperationsTest, RollbackOperationFailed) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ OplogInterfaceMock::Operations localOperations({
+ makeOpAndRecordId(2, 1), commonOperation,
+ });
+ OplogInterfaceMock localOplog(localOperations);
+ auto rollbackOperation =
+ [&](const BSONObj& operation) { return Status(ErrorCodes::OperationFailed, ""); };
+ RollBackLocalOperations finder(localOplog, rollbackOperation);
+ auto result = finder.onRemoteOperation(commonOperation.first);
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
+}
+
+TEST(RollBackLocalOperationsTest, EndOfLocalOplog) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ OplogInterfaceMock::Operations localOperations({
+ makeOpAndRecordId(2, 1),
+ });
+ OplogInterfaceMock localOplog(localOperations);
+ RollBackLocalOperations finder(localOplog, [](const BSONObj&) { return Status::OK(); });
+ auto result = finder.onRemoteOperation(commonOperation.first);
+ ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
+}
+
+TEST(RollBackLocalOperationsTest, SkipRemoteOperations) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ OplogInterfaceMock::Operations localOperations({
+ makeOpAndRecordId(5, 1), makeOpAndRecordId(4, 1), makeOpAndRecordId(2, 1), commonOperation,
+ });
+ OplogInterfaceMock localOplog(localOperations);
+ auto i = localOperations.cbegin();
+ auto rollbackOperation = [&](const BSONObj& operation) {
+ ASSERT_EQUALS(i->first, operation);
i++;
- ASSERT_TRUE(i == localOperations.cend());
+ return Status::OK();
+ };
+ RollBackLocalOperations finder(localOplog, rollbackOperation);
+ {
+ auto result = finder.onRemoteOperation(makeOp(6, 1));
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
+ ASSERT_TRUE(i == localOperations.cbegin());
}
-
- TEST(RollBackLocalOperationsTest, SameTimestampDifferentHashesRollbackOperationFailed) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(1, 3),
- commonOperation,
- });
- OplogInterfaceMock localOplog(localOperations);
- auto rollbackOperation = [&](const BSONObj& operation) {
- return Status(ErrorCodes::OperationFailed, "");
- };
- RollBackLocalOperations finder(localOplog, rollbackOperation);
- auto result = finder.onRemoteOperation(makeOp(1,2));
- ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
- }
-
- TEST(RollBackLocalOperationsTest, SameTimestampDifferentHashesEndOfLocalOplog) {
- OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(1, 3),
- });
- OplogInterfaceMock localOplog(localOperations);
- RollBackLocalOperations finder(localOplog, [](const BSONObj&) { return Status::OK(); });
- auto result = finder.onRemoteOperation(makeOp(1,2));
- ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
+ {
+ auto result = finder.onRemoteOperation(makeOp(3, 1));
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
+ ASSERT_TRUE(std::distance(localOperations.cbegin(), i) == 2);
}
-
- TEST(SyncRollBackLocalOperationsTest, OplogStartMissing) {
- ASSERT_EQUALS(
- ErrorCodes::OplogStartMissing,
- syncRollBackLocalOperations(
- OplogInterfaceMock(kEmptyMockOperations),
- OplogInterfaceMock({makeOpAndRecordId(1, 0)}),
- [](const BSONObj&) { return Status::OK(); }).getStatus().code());
- }
-
- TEST(SyncRollBackLocalOperationsTest, RemoteOplogMissing) {
- ASSERT_EQUALS(
- ErrorCodes::InvalidSyncSource,
- syncRollBackLocalOperations(
- OplogInterfaceMock({makeOpAndRecordId(1, 0)}),
- OplogInterfaceMock(kEmptyMockOperations),
- [](const BSONObj&) { return Status::OK(); }).getStatus().code());
- }
-
- TEST(SyncRollBackLocalOperationsTest, RollbackPeriodTooLong) {
- ASSERT_EQUALS(
- ErrorCodes::ExceededTimeLimit,
- syncRollBackLocalOperations(
- OplogInterfaceMock({makeOpAndRecordId(1802, 0)}),
- OplogInterfaceMock({makeOpAndRecordId(1, 0)}),
- [](const BSONObj&) { return Status::OK(); }).getStatus().code());
- }
-
- TEST(SyncRollBackLocalOperationsTest, RollbackTwoOperations) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- OplogInterfaceMock::Operations localOperations({
- makeOpAndRecordId(3, 1),
- makeOpAndRecordId(2, 1),
- commonOperation,
- });
- auto i = localOperations.cbegin();
- auto result =
- syncRollBackLocalOperations(
- OplogInterfaceMock(localOperations),
- OplogInterfaceMock({commonOperation}),
- [&](const BSONObj& operation) {
- ASSERT_EQUALS(i->first, operation);
- i++;
- return Status::OK();
- });
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
- ASSERT_EQUALS(commonOperation.second, result.getValue().second);
- ASSERT_FALSE(i == localOperations.cend());
- ASSERT_EQUALS(commonOperation.first, i->first);
+ auto result = finder.onRemoteOperation(commonOperation.first);
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
+ ASSERT_EQUALS(commonOperation.second, result.getValue().second);
+ ASSERT_FALSE(i == localOperations.cend());
+ ASSERT_EQUALS(commonOperation.first, i->first);
+ i++;
+ ASSERT_TRUE(i == localOperations.cend());
+}
+
+TEST(RollBackLocalOperationsTest, SameTimestampDifferentHashess) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ OplogInterfaceMock::Operations localOperations({
+ makeOpAndRecordId(1, 5), makeOpAndRecordId(1, 3), commonOperation,
+ });
+ OplogInterfaceMock localOplog(localOperations);
+ auto i = localOperations.cbegin();
+ auto rollbackOperation = [&](const BSONObj& operation) {
+ ASSERT_EQUALS(i->first, operation);
i++;
- ASSERT_TRUE(i == localOperations.cend());
- }
-
- TEST(SyncRollBackLocalOperationsTest, SkipOneRemoteOperation) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- auto remoteOperation = makeOpAndRecordId(2, 1);
- auto result =
- syncRollBackLocalOperations(
- OplogInterfaceMock({commonOperation}),
- OplogInterfaceMock({remoteOperation, commonOperation}),
- [&](const BSONObj& operation) {
- FAIL("should not reach here");
- return Status::OK();
- });
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
- ASSERT_EQUALS(commonOperation.second, result.getValue().second);
+ return Status::OK();
+ };
+ RollBackLocalOperations finder(localOplog, rollbackOperation);
+ {
+ auto result = finder.onRemoteOperation(makeOp(1, 4));
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
+ ASSERT_TRUE(std::distance(localOperations.cbegin(), i) == 1);
}
-
- TEST(SyncRollBackLocalOperationsTest, SameTimestampDifferentHashes) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- auto localOperation = makeOpAndRecordId(1, 2);
- auto remoteOperation = makeOpAndRecordId(1, 3);
- bool called = false;
- auto result =
- syncRollBackLocalOperations(
- OplogInterfaceMock({localOperation, commonOperation}),
- OplogInterfaceMock({remoteOperation, commonOperation}),
- [&](const BSONObj& operation) {
- ASSERT_EQUALS(localOperation.first, operation);
- called = true;
- return Status::OK();
- });
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
- ASSERT_EQUALS(commonOperation.second, result.getValue().second);
- ASSERT_TRUE(called);
- }
-
- TEST(SyncRollBackLocalOperationsTest, SameTimestampEndOfLocalOplog) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- auto localOperation = makeOpAndRecordId(1, 2);
- auto remoteOperation = makeOpAndRecordId(1, 3);
- bool called = false;
- auto result =
- syncRollBackLocalOperations(
- OplogInterfaceMock({localOperation}),
- OplogInterfaceMock({remoteOperation, commonOperation}),
- [&](const BSONObj& operation) {
- ASSERT_EQUALS(localOperation.first, operation);
- called = true;
- return Status::OK();
- });
- ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
- ASSERT_STRING_CONTAINS(result.getStatus().reason(),
- "RS101 reached beginning of local oplog [1]");
- ASSERT_TRUE(called);
+ {
+ auto result = finder.onRemoteOperation(makeOp(1, 2));
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
+ ASSERT_TRUE(std::distance(localOperations.cbegin(), i) == 2);
}
-
- TEST(SyncRollBackLocalOperationsTest, SameTimestampRollbackOperationFailed) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- auto localOperation = makeOpAndRecordId(1, 2);
- auto remoteOperation = makeOpAndRecordId(1, 3);
- auto result =
- syncRollBackLocalOperations(
- OplogInterfaceMock({localOperation, commonOperation}),
- OplogInterfaceMock({remoteOperation, commonOperation}),
- [&](const BSONObj& operation) {
- return Status(ErrorCodes::OperationFailed, "");
- });
- ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
- }
-
- TEST(SyncRollBackLocalOperationsTest, SameTimestampEndOfRemoteOplog) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- auto localOperation = makeOpAndRecordId(1, 2);
- auto remoteOperation = makeOpAndRecordId(1, 3);
- bool called = false;
- auto result =
- syncRollBackLocalOperations(
- OplogInterfaceMock({localOperation, commonOperation}),
- OplogInterfaceMock({remoteOperation}),
- [&](const BSONObj& operation) {
- ASSERT_EQUALS(localOperation.first, operation);
- called = true;
- return Status::OK();
- });
- ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
- ASSERT_STRING_CONTAINS(result.getStatus().reason(),
- "RS100 reached beginning of remote oplog");
- ASSERT_TRUE(called);
- }
-
- TEST(SyncRollBackLocalOperationsTest, DifferentTimestampEndOfLocalOplog) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- auto localOperation = makeOpAndRecordId(3, 1);
- auto remoteOperation = makeOpAndRecordId(2, 1);
- bool called = false;
- auto result =
- syncRollBackLocalOperations(
- OplogInterfaceMock({localOperation}),
- OplogInterfaceMock({remoteOperation, commonOperation}),
- [&](const BSONObj& operation) {
- ASSERT_EQUALS(localOperation.first, operation);
- called = true;
- return Status::OK();
- });
- ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
- ASSERT_STRING_CONTAINS(result.getStatus().reason(),
- "RS101 reached beginning of local oplog [2]");
- ASSERT_TRUE(called);
- }
-
- TEST(SyncRollBackLocalOperationsTest, DifferentTimestampRollbackOperationFailed) {
- auto localOperation = makeOpAndRecordId(3, 1);
- auto remoteOperation = makeOpAndRecordId(2, 1);
- auto result =
- syncRollBackLocalOperations(
- OplogInterfaceMock({localOperation}),
- OplogInterfaceMock({remoteOperation}),
- [&](const BSONObj& operation) {
- return Status(ErrorCodes::OperationFailed, "");
- });
- ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
- }
-
- TEST(SyncRollBackLocalOperationsTest, DifferentTimestampEndOfRemoteOplog) {
- auto commonOperation = makeOpAndRecordId(1, 1);
- auto localOperation = makeOpAndRecordId(2, 1);
- auto remoteOperation = makeOpAndRecordId(3, 1);
- auto result =
- syncRollBackLocalOperations(
- OplogInterfaceMock({localOperation, commonOperation}),
- OplogInterfaceMock({remoteOperation}),
- [&](const BSONObj& operation) {
- FAIL("Should not reach here");
- return Status::OK();
- });
- ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
- ASSERT_STRING_CONTAINS(result.getStatus().reason(),
- "RS100 reached beginning of remote oplog [1]");
- }
-
-} // namespace
+ auto result = finder.onRemoteOperation(commonOperation.first);
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
+ ASSERT_EQUALS(commonOperation.second, result.getValue().second);
+ ASSERT_FALSE(i == localOperations.cend());
+ ASSERT_EQUALS(commonOperation.first, i->first);
+ i++;
+ ASSERT_TRUE(i == localOperations.cend());
+}
+
+TEST(RollBackLocalOperationsTest, SameTimestampDifferentHashesRollbackOperationFailed) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ OplogInterfaceMock::Operations localOperations({
+ makeOpAndRecordId(1, 3), commonOperation,
+ });
+ OplogInterfaceMock localOplog(localOperations);
+ auto rollbackOperation =
+ [&](const BSONObj& operation) { return Status(ErrorCodes::OperationFailed, ""); };
+ RollBackLocalOperations finder(localOplog, rollbackOperation);
+ auto result = finder.onRemoteOperation(makeOp(1, 2));
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
+}
+
+TEST(RollBackLocalOperationsTest, SameTimestampDifferentHashesEndOfLocalOplog) {
+ OplogInterfaceMock::Operations localOperations({
+ makeOpAndRecordId(1, 3),
+ });
+ OplogInterfaceMock localOplog(localOperations);
+ RollBackLocalOperations finder(localOplog, [](const BSONObj&) { return Status::OK(); });
+ auto result = finder.onRemoteOperation(makeOp(1, 2));
+ ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
+}
+
+TEST(SyncRollBackLocalOperationsTest, OplogStartMissing) {
+ ASSERT_EQUALS(ErrorCodes::OplogStartMissing,
+ syncRollBackLocalOperations(OplogInterfaceMock(kEmptyMockOperations),
+ OplogInterfaceMock({makeOpAndRecordId(1, 0)}),
+ [](const BSONObj&) { return Status::OK(); })
+ .getStatus()
+ .code());
+}
+
+TEST(SyncRollBackLocalOperationsTest, RemoteOplogMissing) {
+ ASSERT_EQUALS(ErrorCodes::InvalidSyncSource,
+ syncRollBackLocalOperations(OplogInterfaceMock({makeOpAndRecordId(1, 0)}),
+ OplogInterfaceMock(kEmptyMockOperations),
+ [](const BSONObj&) { return Status::OK(); })
+ .getStatus()
+ .code());
+}
+
+TEST(SyncRollBackLocalOperationsTest, RollbackPeriodTooLong) {
+ ASSERT_EQUALS(ErrorCodes::ExceededTimeLimit,
+ syncRollBackLocalOperations(OplogInterfaceMock({makeOpAndRecordId(1802, 0)}),
+ OplogInterfaceMock({makeOpAndRecordId(1, 0)}),
+ [](const BSONObj&) { return Status::OK(); })
+ .getStatus()
+ .code());
+}
+
+TEST(SyncRollBackLocalOperationsTest, RollbackTwoOperations) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ OplogInterfaceMock::Operations localOperations({
+ makeOpAndRecordId(3, 1), makeOpAndRecordId(2, 1), commonOperation,
+ });
+ auto i = localOperations.cbegin();
+ auto result = syncRollBackLocalOperations(OplogInterfaceMock(localOperations),
+ OplogInterfaceMock({commonOperation}),
+ [&](const BSONObj& operation) {
+ ASSERT_EQUALS(i->first, operation);
+ i++;
+ return Status::OK();
+ });
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
+ ASSERT_EQUALS(commonOperation.second, result.getValue().second);
+ ASSERT_FALSE(i == localOperations.cend());
+ ASSERT_EQUALS(commonOperation.first, i->first);
+ i++;
+ ASSERT_TRUE(i == localOperations.cend());
+}
+
+TEST(SyncRollBackLocalOperationsTest, SkipOneRemoteOperation) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ auto remoteOperation = makeOpAndRecordId(2, 1);
+ auto result =
+ syncRollBackLocalOperations(OplogInterfaceMock({commonOperation}),
+ OplogInterfaceMock({remoteOperation, commonOperation}),
+ [&](const BSONObj& operation) {
+ FAIL("should not reach here");
+ return Status::OK();
+ });
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
+ ASSERT_EQUALS(commonOperation.second, result.getValue().second);
+}
+
+TEST(SyncRollBackLocalOperationsTest, SameTimestampDifferentHashes) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ auto localOperation = makeOpAndRecordId(1, 2);
+ auto remoteOperation = makeOpAndRecordId(1, 3);
+ bool called = false;
+ auto result =
+ syncRollBackLocalOperations(OplogInterfaceMock({localOperation, commonOperation}),
+ OplogInterfaceMock({remoteOperation, commonOperation}),
+ [&](const BSONObj& operation) {
+ ASSERT_EQUALS(localOperation.first, operation);
+ called = true;
+ return Status::OK();
+ });
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
+ ASSERT_EQUALS(commonOperation.second, result.getValue().second);
+ ASSERT_TRUE(called);
+}
+
+TEST(SyncRollBackLocalOperationsTest, SameTimestampEndOfLocalOplog) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ auto localOperation = makeOpAndRecordId(1, 2);
+ auto remoteOperation = makeOpAndRecordId(1, 3);
+ bool called = false;
+ auto result =
+ syncRollBackLocalOperations(OplogInterfaceMock({localOperation}),
+ OplogInterfaceMock({remoteOperation, commonOperation}),
+ [&](const BSONObj& operation) {
+ ASSERT_EQUALS(localOperation.first, operation);
+ called = true;
+ return Status::OK();
+ });
+ ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
+ ASSERT_STRING_CONTAINS(result.getStatus().reason(),
+ "RS101 reached beginning of local oplog [1]");
+ ASSERT_TRUE(called);
+}
+
+TEST(SyncRollBackLocalOperationsTest, SameTimestampRollbackOperationFailed) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ auto localOperation = makeOpAndRecordId(1, 2);
+ auto remoteOperation = makeOpAndRecordId(1, 3);
+ auto result = syncRollBackLocalOperations(
+ OplogInterfaceMock({localOperation, commonOperation}),
+ OplogInterfaceMock({remoteOperation, commonOperation}),
+ [&](const BSONObj& operation) { return Status(ErrorCodes::OperationFailed, ""); });
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
+}
+
+TEST(SyncRollBackLocalOperationsTest, SameTimestampEndOfRemoteOplog) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ auto localOperation = makeOpAndRecordId(1, 2);
+ auto remoteOperation = makeOpAndRecordId(1, 3);
+ bool called = false;
+ auto result = syncRollBackLocalOperations(OplogInterfaceMock({localOperation, commonOperation}),
+ OplogInterfaceMock({remoteOperation}),
+ [&](const BSONObj& operation) {
+ ASSERT_EQUALS(localOperation.first, operation);
+ called = true;
+ return Status::OK();
+ });
+ ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
+ ASSERT_STRING_CONTAINS(result.getStatus().reason(), "RS100 reached beginning of remote oplog");
+ ASSERT_TRUE(called);
+}
+
+TEST(SyncRollBackLocalOperationsTest, DifferentTimestampEndOfLocalOplog) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ auto localOperation = makeOpAndRecordId(3, 1);
+ auto remoteOperation = makeOpAndRecordId(2, 1);
+ bool called = false;
+ auto result =
+ syncRollBackLocalOperations(OplogInterfaceMock({localOperation}),
+ OplogInterfaceMock({remoteOperation, commonOperation}),
+ [&](const BSONObj& operation) {
+ ASSERT_EQUALS(localOperation.first, operation);
+ called = true;
+ return Status::OK();
+ });
+ ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
+ ASSERT_STRING_CONTAINS(result.getStatus().reason(),
+ "RS101 reached beginning of local oplog [2]");
+ ASSERT_TRUE(called);
+}
+
+TEST(SyncRollBackLocalOperationsTest, DifferentTimestampRollbackOperationFailed) {
+ auto localOperation = makeOpAndRecordId(3, 1);
+ auto remoteOperation = makeOpAndRecordId(2, 1);
+ auto result = syncRollBackLocalOperations(
+ OplogInterfaceMock({localOperation}),
+ OplogInterfaceMock({remoteOperation}),
+ [&](const BSONObj& operation) { return Status(ErrorCodes::OperationFailed, ""); });
+ ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
+}
+
+TEST(SyncRollBackLocalOperationsTest, DifferentTimestampEndOfRemoteOplog) {
+ auto commonOperation = makeOpAndRecordId(1, 1);
+ auto localOperation = makeOpAndRecordId(2, 1);
+ auto remoteOperation = makeOpAndRecordId(3, 1);
+ auto result = syncRollBackLocalOperations(OplogInterfaceMock({localOperation, commonOperation}),
+ OplogInterfaceMock({remoteOperation}),
+ [&](const BSONObj& operation) {
+ FAIL("Should not reach here");
+ return Status::OK();
+ });
+ ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
+ ASSERT_STRING_CONTAINS(result.getStatus().reason(),
+ "RS100 reached beginning of remote oplog [1]");
+}
+
+} // namespace
diff --git a/src/mongo/db/repl/rollback_source.h b/src/mongo/db/repl/rollback_source.h
index 304a81717dc..3e8d6f55578 100644
--- a/src/mongo/db/repl/rollback_source.h
+++ b/src/mongo/db/repl/rollback_source.h
@@ -34,57 +34,56 @@
namespace mongo {
- class NamespaceString;
- class OperationContext;
+class NamespaceString;
+class OperationContext;
namespace repl {
- class OplogInterface;
+class OplogInterface;
+
+/**
+ * Interface for rollback-related operations on the sync source.
+ */
+class RollbackSource {
+ MONGO_DISALLOW_COPYING(RollbackSource);
+
+public:
+ RollbackSource() = default;
+
+ virtual ~RollbackSource() = default;
+
+ /**
+ * Returns remote oplog interface.
+ * Read oplog entries with OplogInterface::makeIterator().
+ */
+ virtual const OplogInterface& getOplog() const = 0;
+
+ /**
+ * Returns rollback ID.
+ */
+ virtual int getRollbackId() const = 0;
/**
- * Interface for rollback-related operations on the sync source.
+ * Returns last operation in oplog.
*/
- class RollbackSource {
- MONGO_DISALLOW_COPYING(RollbackSource);
- public:
-
- RollbackSource() = default;
-
- virtual ~RollbackSource() = default;
-
- /**
- * Returns remote oplog interface.
- * Read oplog entries with OplogInterface::makeIterator().
- */
- virtual const OplogInterface& getOplog() const = 0;
-
- /**
- * Returns rollback ID.
- */
- virtual int getRollbackId() const = 0;
-
- /**
- * Returns last operation in oplog.
- */
- virtual BSONObj getLastOperation() const = 0;
-
- /**
- * Fetch a single document from the sync source.
- */
- virtual BSONObj findOne(const NamespaceString& nss, const BSONObj& filter) const = 0;
-
- /**
- * Clones a single collection from the sync source.
- */
- virtual void copyCollectionFromRemote(OperationContext* txn,
- const NamespaceString& nss) const = 0;
-
- /**
- * Returns collection info.
- */
- virtual StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const = 0;
-
- };
-
-} // namespace repl
-} // namespace mongo
+ virtual BSONObj getLastOperation() const = 0;
+
+ /**
+ * Fetch a single document from the sync source.
+ */
+ virtual BSONObj findOne(const NamespaceString& nss, const BSONObj& filter) const = 0;
+
+ /**
+ * Clones a single collection from the sync source.
+ */
+ virtual void copyCollectionFromRemote(OperationContext* txn,
+ const NamespaceString& nss) const = 0;
+
+ /**
+ * Returns collection info.
+ */
+ virtual StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const = 0;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp
index bdda5e331d9..7dbfd2e9cd7 100644
--- a/src/mongo/db/repl/rollback_source_impl.cpp
+++ b/src/mongo/db/repl/rollback_source_impl.cpp
@@ -41,58 +41,56 @@
namespace mongo {
namespace repl {
- RollbackSourceImpl::RollbackSourceImpl(DBClientConnection* conn,
- const std::string& collectionName)
- : _conn(conn),
- _collectionName(collectionName),
- _oplog(conn, collectionName) { }
+RollbackSourceImpl::RollbackSourceImpl(DBClientConnection* conn, const std::string& collectionName)
+ : _conn(conn), _collectionName(collectionName), _oplog(conn, collectionName) {}
- const OplogInterface& RollbackSourceImpl::getOplog() const {
- return _oplog;
- }
+const OplogInterface& RollbackSourceImpl::getOplog() const {
+ return _oplog;
+}
- int RollbackSourceImpl::getRollbackId() const {
- bo info;
- _conn->simpleCommand("admin", &info, "replSetGetRBID");
- return info["rbid"].numberInt();
- }
+int RollbackSourceImpl::getRollbackId() const {
+ bo info;
+ _conn->simpleCommand("admin", &info, "replSetGetRBID");
+ return info["rbid"].numberInt();
+}
- BSONObj RollbackSourceImpl::getLastOperation() const {
- const Query query = Query().sort(BSON("$natural" << -1));
- return _conn->findOne(_collectionName, query, 0, QueryOption_SlaveOk);
- }
+BSONObj RollbackSourceImpl::getLastOperation() const {
+ const Query query = Query().sort(BSON("$natural" << -1));
+ return _conn->findOne(_collectionName, query, 0, QueryOption_SlaveOk);
+}
- BSONObj RollbackSourceImpl::findOne(const NamespaceString& nss, const BSONObj& filter) const {
- return _conn->findOne(nss.toString(), filter, NULL, QueryOption_SlaveOk).getOwned();
- }
+BSONObj RollbackSourceImpl::findOne(const NamespaceString& nss, const BSONObj& filter) const {
+ return _conn->findOne(nss.toString(), filter, NULL, QueryOption_SlaveOk).getOwned();
+}
- void RollbackSourceImpl::copyCollectionFromRemote(OperationContext* txn,
- const NamespaceString& nss) const {
- std::string errmsg;
- std::unique_ptr<DBClientConnection> tmpConn(new DBClientConnection());
- uassert(15908,
- errmsg,
- tmpConn->connect(_conn->getServerHostAndPort(), errmsg) &&
+void RollbackSourceImpl::copyCollectionFromRemote(OperationContext* txn,
+ const NamespaceString& nss) const {
+ std::string errmsg;
+ std::unique_ptr<DBClientConnection> tmpConn(new DBClientConnection());
+ uassert(15908,
+ errmsg,
+ tmpConn->connect(_conn->getServerHostAndPort(), errmsg) &&
replAuthenticate(tmpConn.get()));
- // cloner owns _conn in unique_ptr
- Cloner cloner;
- cloner.setConnection(tmpConn.release());
- uassert(15909, str::stream() <<
- "replSet rollback error resyncing collection " << nss.ns() << ' ' << errmsg,
- cloner.copyCollection(txn, nss.ns(), BSONObj(), errmsg, true, false, true));
- }
+ // cloner owns _conn in unique_ptr
+ Cloner cloner;
+ cloner.setConnection(tmpConn.release());
+ uassert(15909,
+ str::stream() << "replSet rollback error resyncing collection " << nss.ns() << ' '
+ << errmsg,
+ cloner.copyCollection(txn, nss.ns(), BSONObj(), errmsg, true, false, true));
+}
- StatusWith<BSONObj> RollbackSourceImpl::getCollectionInfo(const NamespaceString& nss) const {
- std::list<BSONObj> info =
- _conn->getCollectionInfos(nss.db().toString(), BSON("name" << nss.coll()));
- if (info.empty()) {
- return StatusWith<BSONObj>(ErrorCodes::NoSuchKey, str::stream() <<
- "no collection info found: " << nss.ns());
- }
- invariant(info.size() == 1U);
- return info.front();
+StatusWith<BSONObj> RollbackSourceImpl::getCollectionInfo(const NamespaceString& nss) const {
+ std::list<BSONObj> info =
+ _conn->getCollectionInfos(nss.db().toString(), BSON("name" << nss.coll()));
+ if (info.empty()) {
+ return StatusWith<BSONObj>(ErrorCodes::NoSuchKey,
+ str::stream() << "no collection info found: " << nss.ns());
}
+ invariant(info.size() == 1U);
+ return info.front();
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/rollback_source_impl.h b/src/mongo/db/repl/rollback_source_impl.h
index 3be6ef4339b..8adad2fd0ac 100644
--- a/src/mongo/db/repl/rollback_source_impl.h
+++ b/src/mongo/db/repl/rollback_source_impl.h
@@ -35,40 +35,36 @@
namespace mongo {
- class DBClientConnection;
+class DBClientConnection;
namespace repl {
- /**
- * Rollback source implementation using a connection.
- */
-
- class RollbackSourceImpl : public RollbackSource {
- public:
-
- explicit RollbackSourceImpl(DBClientConnection* conn, const std::string& collectionName);
-
- const OplogInterface& getOplog() const override;
+/**
+ * Rollback source implementation using a connection.
+ */
- int getRollbackId() const override;
+class RollbackSourceImpl : public RollbackSource {
+public:
+ explicit RollbackSourceImpl(DBClientConnection* conn, const std::string& collectionName);
- BSONObj getLastOperation() const override;
+ const OplogInterface& getOplog() const override;
- BSONObj findOne(const NamespaceString& nss, const BSONObj& filter) const override;
+ int getRollbackId() const override;
- void copyCollectionFromRemote(OperationContext* txn,
- const NamespaceString& nss) const override;
+ BSONObj getLastOperation() const override;
- StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const override;
+ BSONObj findOne(const NamespaceString& nss, const BSONObj& filter) const override;
- private:
+ void copyCollectionFromRemote(OperationContext* txn, const NamespaceString& nss) const override;
- DBClientConnection* _conn;
- std::string _collectionName;
- OplogInterfaceRemote _oplog;
+ StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const override;
- };
+private:
+ DBClientConnection* _conn;
+ std::string _collectionName;
+ OplogInterfaceRemote _oplog;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index fc1a10de72d..af233900267 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -63,478 +63,467 @@ namespace mongo {
namespace repl {
namespace {
- using std::list;
- using std::string;
-
- // Failpoint which fails initial sync and leaves on oplog entry in the buffer.
- MONGO_FP_DECLARE(failInitSyncWithBufferedEntriesLeft);
-
- /**
- * Truncates the oplog (removes any documents) and resets internal variables that were
- * originally initialized or affected by using values from the oplog at startup time. These
- * include the last applied optime, the last fetched optime, and the sync source blacklist.
- * Also resets the bgsync thread so that it reconnects its sync source after the oplog has been
- * truncated.
- */
- void truncateAndResetOplog(OperationContext* txn,
- ReplicationCoordinator* replCoord,
- BackgroundSync* bgsync) {
- // Clear minvalid
- setMinValid(txn, OpTime());
-
- AutoGetDb autoDb(txn, "local", MODE_X);
- massert(28585, "no local database found", autoDb.getDb());
- invariant(txn->lockState()->isCollectionLockedForMode(rsOplogName, MODE_X));
- // Note: the following order is important.
- // The bgsync thread uses an empty optime as a sentinel to know to wait
- // for initial sync; thus, we must
- // ensure the lastAppliedOptime is empty before restarting the bgsync thread
- // via stop().
- // We must clear the sync source blacklist after calling stop()
- // because the bgsync thread, while running, may update the blacklist.
- replCoord->resetMyLastOptime();
- bgsync->stop();
- bgsync->setLastAppliedHash(0);
- bgsync->clearBuffer();
-
- replCoord->clearSyncSourceBlacklist();
-
- // Truncate the oplog in case there was a prior initial sync that failed.
- Collection* collection = autoDb.getDb()->getCollection(rsOplogName);
- fassert(28565, collection);
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
- Status status = collection->truncate(txn);
- fassert(28564, status);
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "truncate", collection->ns().ns());
+using std::list;
+using std::string;
+
+// Failpoint which fails initial sync and leaves on oplog entry in the buffer.
+MONGO_FP_DECLARE(failInitSyncWithBufferedEntriesLeft);
+
+/**
+ * Truncates the oplog (removes any documents) and resets internal variables that were
+ * originally initialized or affected by using values from the oplog at startup time. These
+ * include the last applied optime, the last fetched optime, and the sync source blacklist.
+ * Also resets the bgsync thread so that it reconnects its sync source after the oplog has been
+ * truncated.
+ */
+void truncateAndResetOplog(OperationContext* txn,
+ ReplicationCoordinator* replCoord,
+ BackgroundSync* bgsync) {
+ // Clear minvalid
+ setMinValid(txn, OpTime());
+
+ AutoGetDb autoDb(txn, "local", MODE_X);
+ massert(28585, "no local database found", autoDb.getDb());
+ invariant(txn->lockState()->isCollectionLockedForMode(rsOplogName, MODE_X));
+ // Note: the following order is important.
+ // The bgsync thread uses an empty optime as a sentinel to know to wait
+ // for initial sync; thus, we must
+ // ensure the lastAppliedOptime is empty before restarting the bgsync thread
+ // via stop().
+ // We must clear the sync source blacklist after calling stop()
+ // because the bgsync thread, while running, may update the blacklist.
+ replCoord->resetMyLastOptime();
+ bgsync->stop();
+ bgsync->setLastAppliedHash(0);
+ bgsync->clearBuffer();
+
+ replCoord->clearSyncSourceBlacklist();
+
+ // Truncate the oplog in case there was a prior initial sync that failed.
+ Collection* collection = autoDb.getDb()->getCollection(rsOplogName);
+ fassert(28565, collection);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ Status status = collection->truncate(txn);
+ fassert(28564, status);
+ wunit.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "truncate", collection->ns().ns());
+}
- /**
- * Confirms that the "admin" database contains a supported version of the auth
- * data schema. Terminates the process if the "admin" contains clearly incompatible
- * auth data.
- */
- void checkAdminDatabasePostClone(OperationContext* txn, Database* adminDb) {
- // Assumes txn holds MODE_X or MODE_S lock on "admin" database.
- if (!adminDb) {
+/**
+ * Confirms that the "admin" database contains a supported version of the auth
+ * data schema. Terminates the process if the "admin" contains clearly incompatible
+ * auth data.
+ */
+void checkAdminDatabasePostClone(OperationContext* txn, Database* adminDb) {
+ // Assumes txn holds MODE_X or MODE_S lock on "admin" database.
+ if (!adminDb) {
+ return;
+ }
+ Collection* const usersCollection =
+ adminDb->getCollection(AuthorizationManager::usersCollectionNamespace);
+ const bool hasUsers =
+ usersCollection && !Helpers::findOne(txn, usersCollection, BSONObj(), false).isNull();
+ Collection* const adminVersionCollection =
+ adminDb->getCollection(AuthorizationManager::versionCollectionNamespace);
+ BSONObj authSchemaVersionDocument;
+ if (!adminVersionCollection ||
+ !Helpers::findOne(txn,
+ adminVersionCollection,
+ AuthorizationManager::versionDocumentQuery,
+ authSchemaVersionDocument)) {
+ if (!hasUsers) {
+ // It's OK to have no auth version document if there are no user documents.
return;
}
- Collection* const usersCollection =
- adminDb->getCollection(AuthorizationManager::usersCollectionNamespace);
- const bool hasUsers = usersCollection &&
- !Helpers::findOne(txn, usersCollection, BSONObj(), false).isNull();
- Collection* const adminVersionCollection =
- adminDb->getCollection(AuthorizationManager::versionCollectionNamespace);
- BSONObj authSchemaVersionDocument;
- if (!adminVersionCollection || !Helpers::findOne(txn,
- adminVersionCollection,
- AuthorizationManager::versionDocumentQuery,
- authSchemaVersionDocument)) {
- if (!hasUsers) {
- // It's OK to have no auth version document if there are no user documents.
- return;
- }
- severe() << "During initial sync, found documents in " <<
- AuthorizationManager::usersCollectionNamespace <<
- " but could not find an auth schema version document in " <<
- AuthorizationManager::versionCollectionNamespace;
- severe() << "This indicates that the primary of this replica set was not successfully "
- "upgraded to schema version " << AuthorizationManager::schemaVersion26Final <<
- ", which is the minimum supported schema version in this version of MongoDB";
- fassertFailedNoTrace(28620);
- }
- long long foundSchemaVersion;
- Status status = bsonExtractIntegerField(authSchemaVersionDocument,
- AuthorizationManager::schemaVersionFieldName,
- &foundSchemaVersion);
+ severe() << "During initial sync, found documents in "
+ << AuthorizationManager::usersCollectionNamespace
+ << " but could not find an auth schema version document in "
+ << AuthorizationManager::versionCollectionNamespace;
+ severe() << "This indicates that the primary of this replica set was not successfully "
+ "upgraded to schema version " << AuthorizationManager::schemaVersion26Final
+ << ", which is the minimum supported schema version in this version of MongoDB";
+ fassertFailedNoTrace(28620);
+ }
+ long long foundSchemaVersion;
+ Status status = bsonExtractIntegerField(authSchemaVersionDocument,
+ AuthorizationManager::schemaVersionFieldName,
+ &foundSchemaVersion);
+ if (!status.isOK()) {
+ severe() << "During initial sync, found malformed auth schema version document: " << status
+ << "; document: " << authSchemaVersionDocument;
+ fassertFailedNoTrace(28618);
+ }
+ if ((foundSchemaVersion != AuthorizationManager::schemaVersion26Final) &&
+ (foundSchemaVersion != AuthorizationManager::schemaVersion28SCRAM)) {
+ severe() << "During initial sync, found auth schema version " << foundSchemaVersion
+ << ", but this version of MongoDB only supports schema versions "
+ << AuthorizationManager::schemaVersion26Final << " and "
+ << AuthorizationManager::schemaVersion28SCRAM;
+ fassertFailedNoTrace(28619);
+ }
+}
+
+bool _initialSyncClone(OperationContext* txn,
+ Cloner& cloner,
+ const std::string& host,
+ const list<string>& dbs,
+ bool dataPass) {
+ for (list<string>::const_iterator i = dbs.begin(); i != dbs.end(); i++) {
+ const string db = *i;
+ if (db == "local")
+ continue;
+
+ if (dataPass)
+ log() << "initial sync cloning db: " << db;
+ else
+ log() << "initial sync cloning indexes for : " << db;
+
+ CloneOptions options;
+ options.fromDB = db;
+ options.slaveOk = true;
+ options.useReplAuth = true;
+ options.snapshot = false;
+ options.mayYield = true;
+ options.mayBeInterrupted = false;
+ options.syncData = dataPass;
+ options.syncIndexes = !dataPass;
+
+ // Make database stable
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbWrite(txn->lockState(), db, MODE_X);
+
+ Status status = cloner.copyDb(txn, db, host, options, NULL);
if (!status.isOK()) {
- severe() << "During initial sync, found malformed auth schema version document: " <<
- status << "; document: " << authSchemaVersionDocument;
- fassertFailedNoTrace(28618);
+ log() << "initial sync: error while " << (dataPass ? "cloning " : "indexing ") << db
+ << ". " << status.toString();
+ return false;
}
- if ((foundSchemaVersion != AuthorizationManager::schemaVersion26Final) &&
- (foundSchemaVersion != AuthorizationManager::schemaVersion28SCRAM)) {
- severe() << "During initial sync, found auth schema version " << foundSchemaVersion <<
- ", but this version of MongoDB only supports schema versions " <<
- AuthorizationManager::schemaVersion26Final << " and " <<
- AuthorizationManager::schemaVersion28SCRAM;
- fassertFailedNoTrace(28619);
+
+ if (db == "admin") {
+ checkAdminDatabasePostClone(txn, dbHolder().get(txn, db));
}
}
- bool _initialSyncClone(OperationContext* txn,
- Cloner& cloner,
- const std::string& host,
- const list<string>& dbs,
- bool dataPass) {
-
- for( list<string>::const_iterator i = dbs.begin(); i != dbs.end(); i++ ) {
- const string db = *i;
- if ( db == "local" )
- continue;
-
- if ( dataPass )
- log() << "initial sync cloning db: " << db;
- else
- log() << "initial sync cloning indexes for : " << db;
-
- CloneOptions options;
- options.fromDB = db;
- options.slaveOk = true;
- options.useReplAuth = true;
- options.snapshot = false;
- options.mayYield = true;
- options.mayBeInterrupted = false;
- options.syncData = dataPass;
- options.syncIndexes = ! dataPass;
-
- // Make database stable
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbWrite(txn->lockState(), db, MODE_X);
-
- Status status = cloner.copyDb(txn, db, host, options, NULL);
- if (!status.isOK()) {
- log() << "initial sync: error while "
- << (dataPass ? "cloning " : "indexing ") << db
- << ". " << status.toString();
- return false;
- }
-
- if (db == "admin") {
- checkAdminDatabasePostClone(txn, dbHolder().get(txn, db));
- }
- }
+ return true;
+}
- return true;
+/**
+ * Replays the sync target's oplog from lastOp to the latest op on the sync target.
+ *
+ * @param syncer either initial sync (can reclone missing docs) or "normal" sync (no recloning)
+ * @param r the oplog reader
+ * @return if applying the oplog succeeded
+ */
+bool _initialSyncApplyOplog(OperationContext* ctx, repl::SyncTail& syncer, OplogReader* r) {
+ const OpTime startOpTime = getGlobalReplicationCoordinator()->getMyLastOptime();
+ BSONObj lastOp;
+
+ // If the fail point is set, exit failing.
+ if (MONGO_FAIL_POINT(failInitSyncWithBufferedEntriesLeft)) {
+ log() << "adding fake oplog entry to buffer.";
+ BackgroundSync::get()->pushTestOpToBuffer(BSON(
+ "ts" << startOpTime.getTimestamp() << "t" << startOpTime.getTerm() << "v" << 1 << "op"
+ << "n"));
+ return false;
}
- /**
- * Replays the sync target's oplog from lastOp to the latest op on the sync target.
- *
- * @param syncer either initial sync (can reclone missing docs) or "normal" sync (no recloning)
- * @param r the oplog reader
- * @return if applying the oplog succeeded
- */
- bool _initialSyncApplyOplog( OperationContext* ctx,
- repl::SyncTail& syncer,
- OplogReader* r) {
- const OpTime startOpTime = getGlobalReplicationCoordinator()->getMyLastOptime();
- BSONObj lastOp;
-
- // If the fail point is set, exit failing.
- if (MONGO_FAIL_POINT(failInitSyncWithBufferedEntriesLeft)) {
- log() << "adding fake oplog entry to buffer.";
- BackgroundSync::get()->pushTestOpToBuffer(
- BSON("ts" << startOpTime.getTimestamp() <<
- "t" << startOpTime.getTerm() <<
- "v" << 1 <<
- "op" << "n"));
- return false;
- }
-
- try {
- // It may have been a long time since we last used this connection to
- // query the oplog, depending on the size of the databases we needed to clone.
- // A common problem is that TCP keepalives are set too infrequent, and thus
- // our connection here is terminated by a firewall due to inactivity.
- // Solution is to increase the TCP keepalive frequency.
- lastOp = r->getLastOp(rsOplogName);
- } catch ( SocketException & ) {
- HostAndPort host = r->getHost();
- log() << "connection lost to " << host.toString() <<
- "; is your tcp keepalive interval set appropriately?";
- if ( !r->connect(host) ) {
- error() << "initial sync couldn't connect to " << host.toString();
- throw;
- }
- // retry
- lastOp = r->getLastOp(rsOplogName);
+ try {
+ // It may have been a long time since we last used this connection to
+ // query the oplog, depending on the size of the databases we needed to clone.
+ // A common problem is that TCP keepalives are set too infrequent, and thus
+ // our connection here is terminated by a firewall due to inactivity.
+ // Solution is to increase the TCP keepalive frequency.
+ lastOp = r->getLastOp(rsOplogName);
+ } catch (SocketException&) {
+ HostAndPort host = r->getHost();
+ log() << "connection lost to " << host.toString()
+ << "; is your tcp keepalive interval set appropriately?";
+ if (!r->connect(host)) {
+ error() << "initial sync couldn't connect to " << host.toString();
+ throw;
}
+ // retry
+ lastOp = r->getLastOp(rsOplogName);
+ }
- if (lastOp.isEmpty()) {
- error() << "initial sync lastOp is empty";
- sleepsecs(1);
- return false;
- }
+ if (lastOp.isEmpty()) {
+ error() << "initial sync lastOp is empty";
+ sleepsecs(1);
+ return false;
+ }
- OpTime stopOpTime = extractOpTime(lastOp);
+ OpTime stopOpTime = extractOpTime(lastOp);
- // If we already have what we need then return.
- if (stopOpTime == startOpTime)
- return true;
+ // If we already have what we need then return.
+ if (stopOpTime == startOpTime)
+ return true;
- verify( !stopOpTime.isNull() );
- verify( stopOpTime > startOpTime );
+ verify(!stopOpTime.isNull());
+ verify(stopOpTime > startOpTime);
- // apply till stopOpTime
- try {
- LOG(2) << "Applying oplog entries from " << startOpTime << " until " << stopOpTime;
- syncer.oplogApplication(ctx, stopOpTime);
+ // apply till stopOpTime
+ try {
+ LOG(2) << "Applying oplog entries from " << startOpTime << " until " << stopOpTime;
+ syncer.oplogApplication(ctx, stopOpTime);
- if (inShutdown()) {
- return false;
- }
- }
- catch (const DBException&) {
- warning() << "initial sync failed during oplog application phase, and will retry";
- sleepsecs(5);
+ if (inShutdown()) {
return false;
}
-
- return true;
+ } catch (const DBException&) {
+ warning() << "initial sync failed during oplog application phase, and will retry";
+ sleepsecs(5);
+ return false;
}
- void _tryToApplyOpWithRetry(OperationContext* txn, SyncTail* init, const BSONObj& op) {
- try {
- if (!SyncTail::syncApply(txn, op, false).isOK()) {
- bool retry;
- {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- retry = init->shouldRetry(txn, op);
- }
+ return true;
+}
+
+void _tryToApplyOpWithRetry(OperationContext* txn, SyncTail* init, const BSONObj& op) {
+ try {
+ if (!SyncTail::syncApply(txn, op, false).isOK()) {
+ bool retry;
+ {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ retry = init->shouldRetry(txn, op);
+ }
- if (retry) {
- // retry
- if (!SyncTail::syncApply(txn, op, false).isOK()) {
- uasserted(28542,
- str::stream() << "During initial sync, failed to apply op: "
- << op);
- }
+ if (retry) {
+ // retry
+ if (!SyncTail::syncApply(txn, op, false).isOK()) {
+ uasserted(28542,
+ str::stream() << "During initial sync, failed to apply op: " << op);
}
- // If shouldRetry() returns false, fall through.
- // This can happen if the document that was moved and missed by Cloner
- // subsequently got deleted and no longer exists on the Sync Target at all
}
+ // If shouldRetry() returns false, fall through.
+ // This can happen if the document that was moved and missed by Cloner
+ // subsequently got deleted and no longer exists on the Sync Target at all
}
- catch (const DBException& e) {
- error() << "exception: " << causedBy(e) << " on: " << op.toString();
- uasserted(28541,
- str::stream() << "During initial sync, failed to apply op: "
- << op);
- }
+ } catch (const DBException& e) {
+ error() << "exception: " << causedBy(e) << " on: " << op.toString();
+ uasserted(28541, str::stream() << "During initial sync, failed to apply op: " << op);
}
+}
- /**
- * Do the initial sync for this member. There are several steps to this process:
- *
- * 0. Add _initialSyncFlag to minValid collection to tell us to restart initial sync if we
- * crash in the middle of this procedure
- * 1. Record start time.
- * 2. Clone.
- * 3. Set minValid1 to sync target's latest op time.
- * 4. Apply ops from start to minValid1, fetching missing docs as needed.
- * 5. Set minValid2 to sync target's latest op time.
- * 6. Apply ops from minValid1 to minValid2.
- * 7. Build indexes.
- * 8. Set minValid3 to sync target's latest op time.
- * 9. Apply ops from minValid2 to minValid3.
- 10. Cleanup minValid collection: remove _initialSyncFlag field, set ts to minValid3 OpTime
- *
- * At that point, initial sync is finished. Note that the oplog from the sync target is applied
- * three times: step 4, 6, and 8. 4 may involve refetching, 6 should not. By the end of 6,
- * this member should have consistent data. 8 is "cosmetic," it is only to get this member
- * closer to the latest op time before it can transition out of startup state
- *
- * Returns a Status with ErrorCode::ShutdownInProgress if the node enters shutdown,
- * ErrorCode::InitialSyncOplogSourceMissing if the node fails to find an sync source, Status::OK
- * if everything worked, and ErrorCode::InitialSyncFailure for all other error cases.
- */
- Status _initialSync() {
-
- log() << "initial sync pending";
-
- BackgroundSync* bgsync(BackgroundSync::get());
- OperationContextImpl txn;
- txn.setReplicatedWrites(false);
- DisableDocumentValidation validationDisabler(&txn);
- ReplicationCoordinator* replCoord(getGlobalReplicationCoordinator());
-
- // reset state for initial sync
- truncateAndResetOplog(&txn, replCoord, bgsync);
-
- OplogReader r;
- Timestamp now(duration_cast<Seconds>(Milliseconds(curTimeMillis64())), 0);
- OpTime nowOpTime(now, std::numeric_limits<long long>::max());
-
- while (r.getHost().empty()) {
- // We must prime the sync source selector so that it considers all candidates regardless
- // of oplog position, by passing in "now" with max term as the last op fetched time.
- r.connectToSyncSource(&txn, nowOpTime, replCoord);
- if (r.getHost().empty()) {
- std::string msg =
- "no valid sync sources found in current replset to do an initial sync";
- log() << msg;
- return Status(ErrorCodes::InitialSyncOplogSourceMissing, msg);
- }
-
- if (inShutdown()) {
- return Status(ErrorCodes::ShutdownInProgress, "shutting down");
- }
+/**
+ * Do the initial sync for this member. There are several steps to this process:
+ *
+ * 0. Add _initialSyncFlag to minValid collection to tell us to restart initial sync if we
+ * crash in the middle of this procedure
+ * 1. Record start time.
+ * 2. Clone.
+ * 3. Set minValid1 to sync target's latest op time.
+ * 4. Apply ops from start to minValid1, fetching missing docs as needed.
+ * 5. Set minValid2 to sync target's latest op time.
+ * 6. Apply ops from minValid1 to minValid2.
+ * 7. Build indexes.
+ * 8. Set minValid3 to sync target's latest op time.
+ * 9. Apply ops from minValid2 to minValid3.
+ 10. Cleanup minValid collection: remove _initialSyncFlag field, set ts to minValid3 OpTime
+ *
+ * At that point, initial sync is finished. Note that the oplog from the sync target is applied
+ * three times: step 4, 6, and 8. 4 may involve refetching, 6 should not. By the end of 6,
+ * this member should have consistent data. 8 is "cosmetic," it is only to get this member
+ * closer to the latest op time before it can transition out of startup state
+ *
+ * Returns a Status with ErrorCode::ShutdownInProgress if the node enters shutdown,
+ * ErrorCode::InitialSyncOplogSourceMissing if the node fails to find an sync source, Status::OK
+ * if everything worked, and ErrorCode::InitialSyncFailure for all other error cases.
+ */
+Status _initialSync() {
+ log() << "initial sync pending";
+
+ BackgroundSync* bgsync(BackgroundSync::get());
+ OperationContextImpl txn;
+ txn.setReplicatedWrites(false);
+ DisableDocumentValidation validationDisabler(&txn);
+ ReplicationCoordinator* replCoord(getGlobalReplicationCoordinator());
+
+ // reset state for initial sync
+ truncateAndResetOplog(&txn, replCoord, bgsync);
+
+ OplogReader r;
+ Timestamp now(duration_cast<Seconds>(Milliseconds(curTimeMillis64())), 0);
+ OpTime nowOpTime(now, std::numeric_limits<long long>::max());
+
+ while (r.getHost().empty()) {
+ // We must prime the sync source selector so that it considers all candidates regardless
+ // of oplog position, by passing in "now" with max term as the last op fetched time.
+ r.connectToSyncSource(&txn, nowOpTime, replCoord);
+ if (r.getHost().empty()) {
+ std::string msg =
+ "no valid sync sources found in current replset to do an initial sync";
+ log() << msg;
+ return Status(ErrorCodes::InitialSyncOplogSourceMissing, msg);
}
- InitialSync init(bgsync);
- init.setHostname(r.getHost().toString());
-
- BSONObj lastOp = r.getLastOp(rsOplogName);
- if ( lastOp.isEmpty() ) {
- std::string msg = "initial sync couldn't read remote oplog";
- log() << msg;
- sleepsecs(15);
- return Status(ErrorCodes::InitialSyncFailure, msg);
+ if (inShutdown()) {
+ return Status(ErrorCodes::ShutdownInProgress, "shutting down");
}
+ }
- // Add field to minvalid document to tell us to restart initial sync if we crash
- setInitialSyncFlag(&txn);
+ InitialSync init(bgsync);
+ init.setHostname(r.getHost().toString());
- log() << "initial sync drop all databases";
- dropAllDatabasesExceptLocal(&txn);
+ BSONObj lastOp = r.getLastOp(rsOplogName);
+ if (lastOp.isEmpty()) {
+ std::string msg = "initial sync couldn't read remote oplog";
+ log() << msg;
+ sleepsecs(15);
+ return Status(ErrorCodes::InitialSyncFailure, msg);
+ }
- log() << "initial sync clone all databases";
+ // Add field to minvalid document to tell us to restart initial sync if we crash
+ setInitialSyncFlag(&txn);
- list<string> dbs = r.conn()->getDatabaseNames();
- {
- // Clone admin database first, to catch schema errors.
- list<string>::iterator admin = std::find(dbs.begin(), dbs.end(), "admin");
- if (admin != dbs.end()) {
- dbs.splice(dbs.begin(), dbs, admin);
- }
- }
+ log() << "initial sync drop all databases";
+ dropAllDatabasesExceptLocal(&txn);
- Cloner cloner;
- if (!_initialSyncClone(&txn, cloner, r.conn()->getServerAddress(), dbs, true)) {
- return Status(ErrorCodes::InitialSyncFailure, "initial sync failed data cloning");
+ log() << "initial sync clone all databases";
+
+ list<string> dbs = r.conn()->getDatabaseNames();
+ {
+ // Clone admin database first, to catch schema errors.
+ list<string>::iterator admin = std::find(dbs.begin(), dbs.end(), "admin");
+ if (admin != dbs.end()) {
+ dbs.splice(dbs.begin(), dbs, admin);
}
+ }
- log() << "initial sync data copy, starting syncup";
+ Cloner cloner;
+ if (!_initialSyncClone(&txn, cloner, r.conn()->getServerAddress(), dbs, true)) {
+ return Status(ErrorCodes::InitialSyncFailure, "initial sync failed data cloning");
+ }
- // prime oplog
- _tryToApplyOpWithRetry(&txn, &init, lastOp);
- std::deque<BSONObj> ops;
- ops.push_back(lastOp);
+ log() << "initial sync data copy, starting syncup";
- OpTime lastOptime = writeOpsToOplog(&txn, ops);
- ReplClientInfo::forClient(txn.getClient()).setLastOp(lastOptime);
- replCoord->setMyLastOptime(lastOptime);
- setNewTimestamp(lastOptime.getTimestamp());
+ // prime oplog
+ _tryToApplyOpWithRetry(&txn, &init, lastOp);
+ std::deque<BSONObj> ops;
+ ops.push_back(lastOp);
- std::string msg = "oplog sync 1 of 3";
- log() << msg;
- if (!_initialSyncApplyOplog(&txn, init, &r)) {
- return Status(ErrorCodes::InitialSyncFailure,
- str::stream() << "initial sync failed: " << msg);
- }
+ OpTime lastOptime = writeOpsToOplog(&txn, ops);
+ ReplClientInfo::forClient(txn.getClient()).setLastOp(lastOptime);
+ replCoord->setMyLastOptime(lastOptime);
+ setNewTimestamp(lastOptime.getTimestamp());
- // Now we sync to the latest op on the sync target _again_, as we may have recloned ops
- // that were "from the future" compared with minValid. During this second application,
- // nothing should need to be recloned.
- msg = "oplog sync 2 of 3";
- log() << msg;
- if (!_initialSyncApplyOplog(&txn, init, &r)) {
- return Status(ErrorCodes::InitialSyncFailure,
- str::stream() << "initial sync failed: " << msg);
- }
- // data should now be consistent
+ std::string msg = "oplog sync 1 of 3";
+ log() << msg;
+ if (!_initialSyncApplyOplog(&txn, init, &r)) {
+ return Status(ErrorCodes::InitialSyncFailure,
+ str::stream() << "initial sync failed: " << msg);
+ }
- msg = "initial sync building indexes";
- log() << msg;
- if (!_initialSyncClone(&txn, cloner, r.conn()->getServerAddress(), dbs, false)) {
- return Status(ErrorCodes::InitialSyncFailure,
- str::stream() << "initial sync failed: " << msg);
- }
+ // Now we sync to the latest op on the sync target _again_, as we may have recloned ops
+ // that were "from the future" compared with minValid. During this second application,
+ // nothing should need to be recloned.
+ msg = "oplog sync 2 of 3";
+ log() << msg;
+ if (!_initialSyncApplyOplog(&txn, init, &r)) {
+ return Status(ErrorCodes::InitialSyncFailure,
+ str::stream() << "initial sync failed: " << msg);
+ }
+ // data should now be consistent
- // WARNING: If the 3rd oplog sync step is removed we must reset minValid
- // to the last entry on the source server so that we don't come
- // out of recovering until we get there (since the previous steps
- // could have fetched newer document than the oplog entry we were applying from).
- msg = "oplog sync 3 of 3";
- log() << msg;
+ msg = "initial sync building indexes";
+ log() << msg;
+ if (!_initialSyncClone(&txn, cloner, r.conn()->getServerAddress(), dbs, false)) {
+ return Status(ErrorCodes::InitialSyncFailure,
+ str::stream() << "initial sync failed: " << msg);
+ }
- SyncTail tail(bgsync, multiSyncApply);
- if (!_initialSyncApplyOplog(&txn, tail, &r)) {
- return Status(ErrorCodes::InitialSyncFailure,
- str::stream() << "initial sync failed: " << msg);
- }
+ // WARNING: If the 3rd oplog sync step is removed we must reset minValid
+ // to the last entry on the source server so that we don't come
+ // out of recovering until we get there (since the previous steps
+ // could have fetched newer document than the oplog entry we were applying from).
+ msg = "oplog sync 3 of 3";
+ log() << msg;
+
+ SyncTail tail(bgsync, multiSyncApply);
+ if (!_initialSyncApplyOplog(&txn, tail, &r)) {
+ return Status(ErrorCodes::InitialSyncFailure,
+ str::stream() << "initial sync failed: " << msg);
+ }
- // ---------
+ // ---------
- Status status = getGlobalAuthorizationManager()->initialize(&txn);
- if (!status.isOK()) {
- warning() << "Failed to reinitialize auth data after initial sync. " << status;
- return status;
- }
+ Status status = getGlobalAuthorizationManager()->initialize(&txn);
+ if (!status.isOK()) {
+ warning() << "Failed to reinitialize auth data after initial sync. " << status;
+ return status;
+ }
- log() << "initial sync finishing up";
+ log() << "initial sync finishing up";
- {
- ScopedTransaction scopedXact(&txn, MODE_IX);
- AutoGetDb autodb(&txn, "local", MODE_X);
- OpTime lastOpTimeWritten(getGlobalReplicationCoordinator()->getMyLastOptime());
- log() << "set minValid=" << lastOpTimeWritten;
+ {
+ ScopedTransaction scopedXact(&txn, MODE_IX);
+ AutoGetDb autodb(&txn, "local", MODE_X);
+ OpTime lastOpTimeWritten(getGlobalReplicationCoordinator()->getMyLastOptime());
+ log() << "set minValid=" << lastOpTimeWritten;
- // Initial sync is now complete. Flag this by setting minValid to the last thing
- // we synced.
- setMinValid(&txn, lastOpTimeWritten);
+ // Initial sync is now complete. Flag this by setting minValid to the last thing
+ // we synced.
+ setMinValid(&txn, lastOpTimeWritten);
- // Clear the initial sync flag.
- clearInitialSyncFlag(&txn);
- BackgroundSync::get()->setInitialSyncRequestedFlag(false);
- }
+ // Clear the initial sync flag.
+ clearInitialSyncFlag(&txn);
+ BackgroundSync::get()->setInitialSyncRequestedFlag(false);
+ }
- // If we just cloned & there were no ops applied, we still want the primary to know where
- // we're up to
- bgsync->notify(&txn);
+ // If we just cloned & there were no ops applied, we still want the primary to know where
+ // we're up to
+ bgsync->notify(&txn);
- log() << "initial sync done";
- return Status::OK();
- }
-} // namespace
+ log() << "initial sync done";
+ return Status::OK();
+}
+} // namespace
- void syncDoInitialSync() {
- static const int maxFailedAttempts = 10;
+void syncDoInitialSync() {
+ static const int maxFailedAttempts = 10;
- {
- OperationContextImpl txn;
- createOplog(&txn);
- }
+ {
+ OperationContextImpl txn;
+ createOplog(&txn);
+ }
- int failedAttempts = 0;
- while ( failedAttempts < maxFailedAttempts ) {
- try {
- // leave loop when successful
- Status status = _initialSync();
- if (status.isOK()) {
- break;
- }
- if (status == ErrorCodes::InitialSyncOplogSourceMissing) {
- sleepsecs(1);
- return;
- }
+ int failedAttempts = 0;
+ while (failedAttempts < maxFailedAttempts) {
+ try {
+ // leave loop when successful
+ Status status = _initialSync();
+ if (status.isOK()) {
+ break;
}
- catch(const DBException& e) {
- error() << e ;
- // Return if in shutdown
- if (inShutdown()) {
- return;
- }
+ if (status == ErrorCodes::InitialSyncOplogSourceMissing) {
+ sleepsecs(1);
+ return;
}
-
+ } catch (const DBException& e) {
+ error() << e;
+ // Return if in shutdown
if (inShutdown()) {
return;
}
-
- error() << "initial sync attempt failed, "
- << (maxFailedAttempts - ++failedAttempts) << " attempts remaining";
- sleepsecs(5);
}
- // No need to print a stack
- if (failedAttempts >= maxFailedAttempts) {
- severe() << "The maximum number of retries have been exhausted for initial sync.";
- fassertFailedNoTrace(16233);
+ if (inShutdown()) {
+ return;
}
+
+ error() << "initial sync attempt failed, " << (maxFailedAttempts - ++failedAttempts)
+ << " attempts remaining";
+ sleepsecs(5);
+ }
+
+ // No need to print a stack
+ if (failedAttempts >= maxFailedAttempts) {
+ severe() << "The maximum number of retries have been exhausted for initial sync.";
+ fassertFailedNoTrace(16233);
}
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/rs_initialsync.h b/src/mongo/db/repl/rs_initialsync.h
index 659bb5ad577..7add22b9a37 100644
--- a/src/mongo/db/repl/rs_initialsync.h
+++ b/src/mongo/db/repl/rs_initialsync.h
@@ -30,10 +30,10 @@
namespace mongo {
namespace repl {
- /**
- * Begins an initial sync of a node. This drops all data, chooses a sync source,
- * and runs the cloner from that sync source. The node's state is not changed.
- */
- void syncDoInitialSync();
+/**
+ * Begins an initial sync of a node. This drops all data, chooses a sync source,
+ * and runs the cloner from that sync source. The node's state is not changed.
+ */
+void syncDoInitialSync();
}
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index e5f01a6c8c7..d7a4c151910 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -102,762 +102,711 @@
namespace mongo {
- using std::shared_ptr;
- using std::unique_ptr;
- using std::endl;
- using std::list;
- using std::map;
- using std::set;
- using std::string;
- using std::pair;
+using std::shared_ptr;
+using std::unique_ptr;
+using std::endl;
+using std::list;
+using std::map;
+using std::set;
+using std::string;
+using std::pair;
namespace repl {
namespace {
- class RSFatalException : public std::exception {
- public:
- RSFatalException(std::string m = "replica set fatal exception")
- : msg(m) {}
- virtual ~RSFatalException() throw() {};
- virtual const char* what() const throw() {
- return msg.c_str();
- }
- private:
- std::string msg;
- };
-
- struct DocID {
- // ns and _id both point into ownedObj's buffer
- BSONObj ownedObj;
- const char* ns;
- BSONElement _id;
- bool operator<(const DocID& other) const {
- int comp = strcmp(ns, other.ns);
- if (comp < 0)
- return true;
- if (comp > 0)
- return false;
- return _id < other._id;
- }
- };
+class RSFatalException : public std::exception {
+public:
+ RSFatalException(std::string m = "replica set fatal exception") : msg(m) {}
+ virtual ~RSFatalException() throw(){};
+ virtual const char* what() const throw() {
+ return msg.c_str();
+ }
- struct FixUpInfo {
- // note this is a set -- if there are many $inc's on a single document we need to rollback,
- // we only need to refetch it once.
- set<DocID> toRefetch;
+private:
+ std::string msg;
+};
+
+struct DocID {
+ // ns and _id both point into ownedObj's buffer
+ BSONObj ownedObj;
+ const char* ns;
+ BSONElement _id;
+ bool operator<(const DocID& other) const {
+ int comp = strcmp(ns, other.ns);
+ if (comp < 0)
+ return true;
+ if (comp > 0)
+ return false;
+ return _id < other._id;
+ }
+};
- // collections to drop
- set<string> toDrop;
+struct FixUpInfo {
+ // note this is a set -- if there are many $inc's on a single document we need to rollback,
+ // we only need to refetch it once.
+ set<DocID> toRefetch;
- set<string> collectionsToResyncData;
- set<string> collectionsToResyncMetadata;
+ // collections to drop
+ set<string> toDrop;
- Timestamp commonPoint;
- RecordId commonPointOurDiskloc;
+ set<string> collectionsToResyncData;
+ set<string> collectionsToResyncMetadata;
- int rbid; // remote server's current rollback sequence #
- };
+ Timestamp commonPoint;
+ RecordId commonPointOurDiskloc;
+ int rbid; // remote server's current rollback sequence #
+};
- Status refetch(FixUpInfo& fixUpInfo, const BSONObj& ourObj) {
- const char* op = ourObj.getStringField("op");
- if (*op == 'n')
- return Status::OK();
- if (ourObj.objsize() > 512 * 1024 * 1024)
- throw RSFatalException("rollback too large");
+Status refetch(FixUpInfo& fixUpInfo, const BSONObj& ourObj) {
+ const char* op = ourObj.getStringField("op");
+ if (*op == 'n')
+ return Status::OK();
- DocID doc;
- doc.ownedObj = ourObj.getOwned();
- doc.ns = doc.ownedObj.getStringField("ns");
- if (*doc.ns == '\0') {
- warning() << "ignoring op on rollback no ns TODO : "
- << doc.ownedObj.toString();
- return Status::OK();
- }
+ if (ourObj.objsize() > 512 * 1024 * 1024)
+ throw RSFatalException("rollback too large");
- BSONObj obj = doc.ownedObj.getObjectField(*op=='u' ? "o2" : "o");
- if (obj.isEmpty()) {
- warning() << "ignoring op on rollback : " << doc.ownedObj.toString();
- return Status::OK();
- }
+ DocID doc;
+ doc.ownedObj = ourObj.getOwned();
+ doc.ns = doc.ownedObj.getStringField("ns");
+ if (*doc.ns == '\0') {
+ warning() << "ignoring op on rollback no ns TODO : " << doc.ownedObj.toString();
+ return Status::OK();
+ }
- if (*op == 'c') {
- BSONElement first = obj.firstElement();
- NamespaceString nss(doc.ns); // foo.$cmd
- string cmdname = first.fieldName();
- Command* cmd = Command::findCommand(cmdname.c_str());
- if (cmd == NULL) {
- severe() << "rollback no such command " << first.fieldName();
- return Status(ErrorCodes::UnrecoverableRollbackError, str::stream() <<
- "rollback no such command " << first.fieldName(),
- 18751);
- }
- if (cmdname == "create") {
- // Create collection operation
- // { ts: ..., h: ..., op: "c", ns: "foo.$cmd", o: { create: "abc", ... } }
- string ns = nss.db().toString() + '.' + obj["create"].String(); // -> foo.abc
- fixUpInfo.toDrop.insert(ns);
- return Status::OK();
- }
- else if (cmdname == "drop") {
- string ns = nss.db().toString() + '.' + first.valuestr();
- fixUpInfo.collectionsToResyncData.insert(ns);
- return Status::OK();
- }
- else if (cmdname == "dropIndexes" || cmdname == "deleteIndexes") {
- // TODO: this is bad. we simply full resync the collection here,
- // which could be very slow.
- warning() << "rollback of dropIndexes is slow in this version of "
- << "mongod";
- string ns = nss.db().toString() + '.' + first.valuestr();
- fixUpInfo.collectionsToResyncData.insert(ns);
- return Status::OK();
- }
- else if (cmdname == "renameCollection") {
- // TODO: slow.
- warning() << "rollback of renameCollection is slow in this version of "
- << "mongod";
- string from = first.valuestr();
- string to = obj["to"].String();
- fixUpInfo.collectionsToResyncData.insert(from);
- fixUpInfo.collectionsToResyncData.insert(to);
- return Status::OK();
- }
- else if (cmdname == "dropDatabase") {
- severe() << "rollback : can't rollback drop database full resync "
- << "will be required";
- log() << obj.toString();
- throw RSFatalException();
- }
- else if (cmdname == "collMod") {
- const auto ns = NamespaceString(cmd->parseNs(nss.db().toString(), obj));
- for (auto field : obj) {
- const auto modification = field.fieldNameStringData();
- if (modification == cmdname) {
- continue; // Skipping command name.
- }
+ BSONObj obj = doc.ownedObj.getObjectField(*op == 'u' ? "o2" : "o");
+ if (obj.isEmpty()) {
+ warning() << "ignoring op on rollback : " << doc.ownedObj.toString();
+ return Status::OK();
+ }
- if (modification == "validator"
- || modification == "usePowerOf2Sizes"
- || modification == "noPadding") {
- fixUpInfo.collectionsToResyncMetadata.insert(ns);
- continue;
- }
+ if (*op == 'c') {
+ BSONElement first = obj.firstElement();
+ NamespaceString nss(doc.ns); // foo.$cmd
+ string cmdname = first.fieldName();
+ Command* cmd = Command::findCommand(cmdname.c_str());
+ if (cmd == NULL) {
+ severe() << "rollback no such command " << first.fieldName();
+ return Status(ErrorCodes::UnrecoverableRollbackError,
+ str::stream() << "rollback no such command " << first.fieldName(),
+ 18751);
+ }
+ if (cmdname == "create") {
+ // Create collection operation
+ // { ts: ..., h: ..., op: "c", ns: "foo.$cmd", o: { create: "abc", ... } }
+ string ns = nss.db().toString() + '.' + obj["create"].String(); // -> foo.abc
+ fixUpInfo.toDrop.insert(ns);
+ return Status::OK();
+ } else if (cmdname == "drop") {
+ string ns = nss.db().toString() + '.' + first.valuestr();
+ fixUpInfo.collectionsToResyncData.insert(ns);
+ return Status::OK();
+ } else if (cmdname == "dropIndexes" || cmdname == "deleteIndexes") {
+ // TODO: this is bad. we simply full resync the collection here,
+ // which could be very slow.
+ warning() << "rollback of dropIndexes is slow in this version of "
+ << "mongod";
+ string ns = nss.db().toString() + '.' + first.valuestr();
+ fixUpInfo.collectionsToResyncData.insert(ns);
+ return Status::OK();
+ } else if (cmdname == "renameCollection") {
+ // TODO: slow.
+ warning() << "rollback of renameCollection is slow in this version of "
+ << "mongod";
+ string from = first.valuestr();
+ string to = obj["to"].String();
+ fixUpInfo.collectionsToResyncData.insert(from);
+ fixUpInfo.collectionsToResyncData.insert(to);
+ return Status::OK();
+ } else if (cmdname == "dropDatabase") {
+ severe() << "rollback : can't rollback drop database full resync "
+ << "will be required";
+ log() << obj.toString();
+ throw RSFatalException();
+ } else if (cmdname == "collMod") {
+ const auto ns = NamespaceString(cmd->parseNs(nss.db().toString(), obj));
+ for (auto field : obj) {
+ const auto modification = field.fieldNameStringData();
+ if (modification == cmdname) {
+ continue; // Skipping command name.
+ }
- severe() << "cannot rollback a collMod command: " << obj;
- throw RSFatalException();
+ if (modification == "validator" || modification == "usePowerOf2Sizes" ||
+ modification == "noPadding") {
+ fixUpInfo.collectionsToResyncMetadata.insert(ns);
+ continue;
}
- }
- else {
- severe() << "can't rollback this command yet: "
- << obj.toString();
- log() << "cmdname=" << cmdname;
+
+ severe() << "cannot rollback a collMod command: " << obj;
throw RSFatalException();
}
+ } else {
+ severe() << "can't rollback this command yet: " << obj.toString();
+ log() << "cmdname=" << cmdname;
+ throw RSFatalException();
}
+ }
- doc._id = obj["_id"];
- if (doc._id.eoo()) {
- warning() << "ignoring op on rollback no _id TODO : " << doc.ns << ' '
- << doc.ownedObj.toString();
- return Status::OK();
- }
-
- fixUpInfo.toRefetch.insert(doc);
+ doc._id = obj["_id"];
+ if (doc._id.eoo()) {
+ warning() << "ignoring op on rollback no _id TODO : " << doc.ns << ' '
+ << doc.ownedObj.toString();
return Status::OK();
}
+ fixUpInfo.toRefetch.insert(doc);
+ return Status::OK();
+}
- void syncFixUp(OperationContext* txn,
- FixUpInfo& fixUpInfo,
- const RollbackSource& rollbackSource,
- ReplicationCoordinator* replCoord) {
- // fetch all first so we needn't handle interruption in a fancy way
- unsigned long long totalSize = 0;
+void syncFixUp(OperationContext* txn,
+ FixUpInfo& fixUpInfo,
+ const RollbackSource& rollbackSource,
+ ReplicationCoordinator* replCoord) {
+ // fetch all first so we needn't handle interruption in a fancy way
- list< pair<DocID, BSONObj> > goodVersions;
+ unsigned long long totalSize = 0;
- BSONObj newMinValid;
+ list<pair<DocID, BSONObj>> goodVersions;
- // fetch all the goodVersions of each document from current primary
- DocID doc;
- unsigned long long numFetched = 0;
- try {
- for (set<DocID>::iterator it = fixUpInfo.toRefetch.begin();
- it != fixUpInfo.toRefetch.end();
- it++) {
- doc = *it;
-
- verify(!doc._id.eoo());
-
- {
- // TODO : slow. lots of round trips.
- numFetched++;
- BSONObj good = rollbackSource.findOne(NamespaceString(doc.ns), doc._id.wrap());
- totalSize += good.objsize();
- uassert(13410, "replSet too much data to roll back",
- totalSize < 300 * 1024 * 1024);
-
- // note good might be eoo, indicating we should delete it
- goodVersions.push_back(pair<DocID, BSONObj>(doc,good));
- }
- }
- newMinValid = rollbackSource.getLastOperation();
- if (newMinValid.isEmpty()) {
- error() << "rollback error newMinValid empty?";
- return;
- }
- }
- catch (const DBException& e) {
- LOG(1) << "rollback re-get objects: " << e.toString();
- error() << "rollback couldn't re-get ns:" << doc.ns << " _id:" << doc._id << ' '
- << numFetched << '/' << fixUpInfo.toRefetch.size();
- throw e;
- }
+ BSONObj newMinValid;
- log() << "rollback 3.5";
- if (fixUpInfo.rbid != rollbackSource.getRollbackId()) {
- // Our source rolled back itself so the data we received isn't necessarily consistent.
- warning() << "rollback rbid on source changed during rollback, "
- << "cancelling this attempt";
- return;
- }
+ // fetch all the goodVersions of each document from current primary
+ DocID doc;
+ unsigned long long numFetched = 0;
+ try {
+ for (set<DocID>::iterator it = fixUpInfo.toRefetch.begin(); it != fixUpInfo.toRefetch.end();
+ it++) {
+ doc = *it;
- // update them
- log() << "rollback 4 n:" << goodVersions.size();
+ verify(!doc._id.eoo());
- bool warn = false;
+ {
+ // TODO : slow. lots of round trips.
+ numFetched++;
+ BSONObj good = rollbackSource.findOne(NamespaceString(doc.ns), doc._id.wrap());
+ totalSize += good.objsize();
+ uassert(13410, "replSet too much data to roll back", totalSize < 300 * 1024 * 1024);
- invariant(!fixUpInfo.commonPointOurDiskloc.isNull());
+ // note good might be eoo, indicating we should delete it
+ goodVersions.push_back(pair<DocID, BSONObj>(doc, good));
+ }
+ }
+ newMinValid = rollbackSource.getLastOperation();
+ if (newMinValid.isEmpty()) {
+ error() << "rollback error newMinValid empty?";
+ return;
+ }
+ } catch (const DBException& e) {
+ LOG(1) << "rollback re-get objects: " << e.toString();
+ error() << "rollback couldn't re-get ns:" << doc.ns << " _id:" << doc._id << ' '
+ << numFetched << '/' << fixUpInfo.toRefetch.size();
+ throw e;
+ }
- // we have items we are writing that aren't from a point-in-time. thus best not to come
- // online until we get to that point in freshness.
- OpTime minValid = extractOpTime(newMinValid);
- log() << "minvalid=" << minValid;
- setMinValid(txn, minValid);
+ log() << "rollback 3.5";
+ if (fixUpInfo.rbid != rollbackSource.getRollbackId()) {
+ // Our source rolled back itself so the data we received isn't necessarily consistent.
+ warning() << "rollback rbid on source changed during rollback, "
+ << "cancelling this attempt";
+ return;
+ }
- // any full collection resyncs required?
- if (!fixUpInfo.collectionsToResyncData.empty()
- || !fixUpInfo.collectionsToResyncMetadata.empty()) {
+ // update them
+ log() << "rollback 4 n:" << goodVersions.size();
- for (const string& ns : fixUpInfo.collectionsToResyncData) {
- log() << "rollback 4.1.1 coll resync " << ns;
+ bool warn = false;
- fixUpInfo.collectionsToResyncMetadata.erase(ns);
+ invariant(!fixUpInfo.commonPointOurDiskloc.isNull());
- const NamespaceString nss(ns);
+ // we have items we are writing that aren't from a point-in-time. thus best not to come
+ // online until we get to that point in freshness.
+ OpTime minValid = extractOpTime(newMinValid);
+ log() << "minvalid=" << minValid;
+ setMinValid(txn, minValid);
+ // any full collection resyncs required?
+ if (!fixUpInfo.collectionsToResyncData.empty() ||
+ !fixUpInfo.collectionsToResyncMetadata.empty()) {
+ for (const string& ns : fixUpInfo.collectionsToResyncData) {
+ log() << "rollback 4.1.1 coll resync " << ns;
- {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X);
- Database* db = dbHolder().openDb(txn, nss.db().toString());
- invariant(db);
- WriteUnitOfWork wunit(txn);
- db->dropCollection(txn, ns);
- wunit.commit();
- }
+ fixUpInfo.collectionsToResyncMetadata.erase(ns);
- rollbackSource.copyCollectionFromRemote(txn, nss);
- }
+ const NamespaceString nss(ns);
- for (const string& ns : fixUpInfo.collectionsToResyncMetadata) {
- log() << "rollback 4.1.2 coll metadata resync " << ns;
- const NamespaceString nss(ns);
+ {
ScopedTransaction transaction(txn, MODE_IX);
Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X);
- auto db = dbHolder().openDb(txn, nss.db().toString());
+ Database* db = dbHolder().openDb(txn, nss.db().toString());
invariant(db);
- auto collection = db->getCollection(ns);
- invariant(collection);
- auto cce = collection->getCatalogEntry();
+ WriteUnitOfWork wunit(txn);
+ db->dropCollection(txn, ns);
+ wunit.commit();
+ }
- auto infoResult = rollbackSource.getCollectionInfo(nss);
+ rollbackSource.copyCollectionFromRemote(txn, nss);
+ }
- if (!infoResult.isOK()) {
- // Collection dropped by "them" so we should drop it too.
- log() << ns << " not found on remote host, dropping";
- fixUpInfo.toDrop.insert(ns);
- continue;
- }
+ for (const string& ns : fixUpInfo.collectionsToResyncMetadata) {
+ log() << "rollback 4.1.2 coll metadata resync " << ns;
- auto info = infoResult.getValue();
- CollectionOptions options;
- if (auto optionsField = info["options"]) {
- if (optionsField.type() != Object) {
- throw RSFatalException(str::stream() << "Failed to parse options "
- << info << ": expected 'options' to be an "
- << "Object, got " << typeName(optionsField.type()));
- }
+ const NamespaceString nss(ns);
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X);
+ auto db = dbHolder().openDb(txn, nss.db().toString());
+ invariant(db);
+ auto collection = db->getCollection(ns);
+ invariant(collection);
+ auto cce = collection->getCatalogEntry();
- auto status = options.parse(optionsField.Obj());
- if (!status.isOK()) {
- throw RSFatalException(str::stream() << "Failed to parse options "
- << info << ": "
- << status.toString());
- }
- }
- else {
- // Use default options.
- }
+ auto infoResult = rollbackSource.getCollectionInfo(nss);
- WriteUnitOfWork wuow(txn);
- if (options.flagsSet || cce->getCollectionOptions(txn).flagsSet) {
- cce->updateFlags(txn, options.flags);
+ if (!infoResult.isOK()) {
+ // Collection dropped by "them" so we should drop it too.
+ log() << ns << " not found on remote host, dropping";
+ fixUpInfo.toDrop.insert(ns);
+ continue;
+ }
+
+ auto info = infoResult.getValue();
+ CollectionOptions options;
+ if (auto optionsField = info["options"]) {
+ if (optionsField.type() != Object) {
+ throw RSFatalException(str::stream() << "Failed to parse options " << info
+ << ": expected 'options' to be an "
+ << "Object, got "
+ << typeName(optionsField.type()));
}
- auto status = collection->setValidator(txn, options.validator);
+ auto status = options.parse(optionsField.Obj());
if (!status.isOK()) {
- throw RSFatalException(str::stream() << "Failed to set validator: "
- << status.toString());
+ throw RSFatalException(str::stream() << "Failed to parse options " << info
+ << ": " << status.toString());
}
- wuow.commit();
+ } else {
+ // Use default options.
}
- // we did more reading from primary, so check it again for a rollback (which would mess
- // us up), and make minValid newer.
- log() << "rollback 4.2";
-
- string err;
- try {
- newMinValid = rollbackSource.getLastOperation();
- if (newMinValid.isEmpty()) {
- err = "can't get minvalid from sync source";
- }
- else {
- OpTime minValid = extractOpTime(newMinValid);
- log() << "minvalid=" << minValid;
- setMinValid(txn, minValid);
- }
- }
- catch (const DBException& e) {
- err = "can't get/set minvalid: ";
- err += e.what();
- }
- if (fixUpInfo.rbid != rollbackSource.getRollbackId()) {
- // our source rolled back itself. so the data we received isn't necessarily
- // consistent. however, we've now done writes. thus we have a problem.
- err += "rbid at primary changed during resync/rollback";
+ WriteUnitOfWork wuow(txn);
+ if (options.flagsSet || cce->getCollectionOptions(txn).flagsSet) {
+ cce->updateFlags(txn, options.flags);
}
- if (!err.empty()) {
- severe() << "rolling back : " << err
- << ". A full resync will be necessary.";
- // TODO: reset minvalid so that we are permanently in fatal state
- // TODO: don't be fatal, but rather, get all the data first.
- throw RSFatalException();
+
+ auto status = collection->setValidator(txn, options.validator);
+ if (!status.isOK()) {
+ throw RSFatalException(str::stream()
+ << "Failed to set validator: " << status.toString());
}
- log() << "rollback 4.3";
+ wuow.commit();
}
- map<string,shared_ptr<Helpers::RemoveSaver> > removeSavers;
+ // we did more reading from primary, so check it again for a rollback (which would mess
+ // us up), and make minValid newer.
+ log() << "rollback 4.2";
- log() << "rollback 4.6";
- // drop collections to drop before doing individual fixups - that might make things faster
- // below actually if there were subsequent inserts to rollback
- for (set<string>::iterator it = fixUpInfo.toDrop.begin();
- it != fixUpInfo.toDrop.end();
- it++) {
- log() << "rollback drop: " << *it;
-
- ScopedTransaction transaction(txn, MODE_IX);
- const NamespaceString nss(*it);
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X);
- Database* db = dbHolder().get(txn, nsToDatabaseSubstring(*it));
- if (db) {
- WriteUnitOfWork wunit(txn);
+ string err;
+ try {
+ newMinValid = rollbackSource.getLastOperation();
+ if (newMinValid.isEmpty()) {
+ err = "can't get minvalid from sync source";
+ } else {
+ OpTime minValid = extractOpTime(newMinValid);
+ log() << "minvalid=" << minValid;
+ setMinValid(txn, minValid);
+ }
+ } catch (const DBException& e) {
+ err = "can't get/set minvalid: ";
+ err += e.what();
+ }
+ if (fixUpInfo.rbid != rollbackSource.getRollbackId()) {
+ // our source rolled back itself. so the data we received isn't necessarily
+ // consistent. however, we've now done writes. thus we have a problem.
+ err += "rbid at primary changed during resync/rollback";
+ }
+ if (!err.empty()) {
+ severe() << "rolling back : " << err << ". A full resync will be necessary.";
+ // TODO: reset minvalid so that we are permanently in fatal state
+ // TODO: don't be fatal, but rather, get all the data first.
+ throw RSFatalException();
+ }
+ log() << "rollback 4.3";
+ }
- shared_ptr<Helpers::RemoveSaver>& removeSaver = removeSavers[*it];
- if (!removeSaver)
- removeSaver.reset(new Helpers::RemoveSaver("rollback", "", *it));
-
- // perform a collection scan and write all documents in the collection to disk
- std::unique_ptr<PlanExecutor> exec(
- InternalPlanner::collectionScan(txn,
- *it,
- db->getCollection(*it)));
- BSONObj curObj;
- PlanExecutor::ExecState execState;
- while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, NULL))) {
- removeSaver->goingToDelete(curObj);
- }
- if (execState != PlanExecutor::IS_EOF) {
- if (execState == PlanExecutor::FAILURE &&
- WorkingSetCommon::isValidStatusMemberObject(curObj)) {
- Status errorStatus = WorkingSetCommon::getMemberObjectStatus(curObj);
- severe() << "rolling back createCollection on " << *it
- << " failed with " << errorStatus
- << ". A full resync is necessary.";
- }
- else {
- severe() << "rolling back createCollection on " << *it
- << " failed. A full resync is necessary.";
- }
-
- throw RSFatalException();
+ map<string, shared_ptr<Helpers::RemoveSaver>> removeSavers;
+
+ log() << "rollback 4.6";
+ // drop collections to drop before doing individual fixups - that might make things faster
+ // below actually if there were subsequent inserts to rollback
+ for (set<string>::iterator it = fixUpInfo.toDrop.begin(); it != fixUpInfo.toDrop.end(); it++) {
+ log() << "rollback drop: " << *it;
+
+ ScopedTransaction transaction(txn, MODE_IX);
+ const NamespaceString nss(*it);
+ Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X);
+ Database* db = dbHolder().get(txn, nsToDatabaseSubstring(*it));
+ if (db) {
+ WriteUnitOfWork wunit(txn);
+
+ shared_ptr<Helpers::RemoveSaver>& removeSaver = removeSavers[*it];
+ if (!removeSaver)
+ removeSaver.reset(new Helpers::RemoveSaver("rollback", "", *it));
+
+ // perform a collection scan and write all documents in the collection to disk
+ std::unique_ptr<PlanExecutor> exec(
+ InternalPlanner::collectionScan(txn, *it, db->getCollection(*it)));
+ BSONObj curObj;
+ PlanExecutor::ExecState execState;
+ while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, NULL))) {
+ removeSaver->goingToDelete(curObj);
+ }
+ if (execState != PlanExecutor::IS_EOF) {
+ if (execState == PlanExecutor::FAILURE &&
+ WorkingSetCommon::isValidStatusMemberObject(curObj)) {
+ Status errorStatus = WorkingSetCommon::getMemberObjectStatus(curObj);
+ severe() << "rolling back createCollection on " << *it << " failed with "
+ << errorStatus << ". A full resync is necessary.";
+ } else {
+ severe() << "rolling back createCollection on " << *it
+ << " failed. A full resync is necessary.";
}
- db->dropCollection(txn, *it);
- wunit.commit();
+ throw RSFatalException();
}
+
+ db->dropCollection(txn, *it);
+ wunit.commit();
}
+ }
- log() << "rollback 4.7";
- unsigned deletes = 0, updates = 0;
- time_t lastProgressUpdate = time(0);
- time_t progressUpdateGap = 10;
- for (list<pair<DocID, BSONObj> >::iterator it = goodVersions.begin();
- it != goodVersions.end();
- it++) {
- time_t now = time(0);
- if (now - lastProgressUpdate > progressUpdateGap) {
- log() << deletes << " delete and "
- << updates << " update operations processed out of "
- << goodVersions.size() << " total operations";
- lastProgressUpdate = now;
+ log() << "rollback 4.7";
+ unsigned deletes = 0, updates = 0;
+ time_t lastProgressUpdate = time(0);
+ time_t progressUpdateGap = 10;
+ for (list<pair<DocID, BSONObj>>::iterator it = goodVersions.begin(); it != goodVersions.end();
+ it++) {
+ time_t now = time(0);
+ if (now - lastProgressUpdate > progressUpdateGap) {
+ log() << deletes << " delete and " << updates << " update operations processed out of "
+ << goodVersions.size() << " total operations";
+ lastProgressUpdate = now;
+ }
+ const DocID& doc = it->first;
+ BSONObj pattern = doc._id.wrap(); // { _id : ... }
+ try {
+ verify(doc.ns && *doc.ns);
+ if (fixUpInfo.collectionsToResyncData.count(doc.ns)) {
+ // we just synced this entire collection
+ continue;
}
- const DocID& doc = it->first;
- BSONObj pattern = doc._id.wrap(); // { _id : ... }
- try {
- verify(doc.ns && *doc.ns);
- if (fixUpInfo.collectionsToResyncData.count(doc.ns)) {
- // we just synced this entire collection
- continue;
- }
- // keep an archive of items rolled back
- shared_ptr<Helpers::RemoveSaver>& removeSaver = removeSavers[doc.ns];
- if (!removeSaver)
- removeSaver.reset(new Helpers::RemoveSaver("rollback", "", doc.ns));
+ // keep an archive of items rolled back
+ shared_ptr<Helpers::RemoveSaver>& removeSaver = removeSavers[doc.ns];
+ if (!removeSaver)
+ removeSaver.reset(new Helpers::RemoveSaver("rollback", "", doc.ns));
- // todo: lots of overhead in context, this can be faster
- const NamespaceString docNss(doc.ns);
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock docDbLock(txn->lockState(), docNss.db(), MODE_X);
- OldClientContext ctx(txn, doc.ns);
+ // todo: lots of overhead in context, this can be faster
+ const NamespaceString docNss(doc.ns);
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock docDbLock(txn->lockState(), docNss.db(), MODE_X);
+ OldClientContext ctx(txn, doc.ns);
+
+ // Add the doc to our rollback file
+ BSONObj obj;
+ Collection* collection = ctx.db()->getCollection(doc.ns);
+
+ // Do not log an error when undoing an insert on a no longer existent collection.
+ // It is likely that the collection was dropped as part of rolling back a
+ // createCollection command and regardless, the document no longer exists.
+ if (collection) {
+ bool found = Helpers::findOne(txn, collection, pattern, obj, false);
+ if (found) {
+ removeSaver->goingToDelete(obj);
+ } else {
+ error() << "rollback cannot find object: " << pattern << " in namespace "
+ << doc.ns;
+ }
+ }
- // Add the doc to our rollback file
- BSONObj obj;
- Collection* collection = ctx.db()->getCollection(doc.ns);
+ if (it->second.isEmpty()) {
+ // wasn't on the primary; delete.
+ // TODO 1.6 : can't delete from a capped collection. need to handle that here.
+ deletes++;
- // Do not log an error when undoing an insert on a no longer existent collection.
- // It is likely that the collection was dropped as part of rolling back a
- // createCollection command and regardless, the document no longer exists.
if (collection) {
- bool found = Helpers::findOne(txn, collection, pattern, obj, false);
- if (found) {
- removeSaver->goingToDelete(obj);
- }
- else {
- error() << "rollback cannot find object: " << pattern
- << " in namespace " << doc.ns;
- }
- }
-
- if (it->second.isEmpty()) {
- // wasn't on the primary; delete.
- // TODO 1.6 : can't delete from a capped collection. need to handle that here.
- deletes++;
-
- if (collection) {
- if (collection->isCapped()) {
- // can't delete from a capped collection - so we truncate instead. if
- // this item must go, so must all successors!!!
- try {
- // TODO: IIRC cappedTruncateAfter does not handle completely empty.
- // this will crazy slow if no _id index.
- long long start = Listener::getElapsedTimeMillis();
- RecordId loc = Helpers::findOne(txn, collection, pattern, false);
- if (Listener::getElapsedTimeMillis() - start > 200)
- warning() << "roll back slow no _id index for "
- << doc.ns << " perhaps?";
- // would be faster but requires index:
- // RecordId loc = Helpers::findById(nsd, pattern);
- if (!loc.isNull()) {
- try {
- collection->temp_cappedTruncateAfter(txn, loc, true);
- }
- catch (const DBException& e) {
- if (e.getCode() == 13415) {
- // hack: need to just make cappedTruncate do this...
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
- uassertStatusOK(collection->truncate(txn));
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- txn,
- "truncate",
- collection->ns().ns());
- }
- else {
- throw e;
+ if (collection->isCapped()) {
+ // can't delete from a capped collection - so we truncate instead. if
+ // this item must go, so must all successors!!!
+ try {
+ // TODO: IIRC cappedTruncateAfter does not handle completely empty.
+ // this will crazy slow if no _id index.
+ long long start = Listener::getElapsedTimeMillis();
+ RecordId loc = Helpers::findOne(txn, collection, pattern, false);
+ if (Listener::getElapsedTimeMillis() - start > 200)
+ warning() << "roll back slow no _id index for " << doc.ns
+ << " perhaps?";
+ // would be faster but requires index:
+ // RecordId loc = Helpers::findById(nsd, pattern);
+ if (!loc.isNull()) {
+ try {
+ collection->temp_cappedTruncateAfter(txn, loc, true);
+ } catch (const DBException& e) {
+ if (e.getCode() == 13415) {
+ // hack: need to just make cappedTruncate do this...
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ uassertStatusOK(collection->truncate(txn));
+ wunit.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
+ txn, "truncate", collection->ns().ns());
+ } else {
+ throw e;
}
}
}
- catch (const DBException& e) {
- error() << "rolling back capped collection rec "
- << doc.ns << ' ' << e.toString();
- }
- }
- else {
- deleteObjects(txn,
- ctx.db(),
- doc.ns,
- pattern,
- PlanExecutor::YIELD_MANUAL,
- true, // justone
- true); // god
+ } catch (const DBException& e) {
+ error() << "rolling back capped collection rec " << doc.ns << ' '
+ << e.toString();
}
- // did we just empty the collection? if so let's check if it even
- // exists on the source.
- if (collection->numRecords(txn) == 0) {
- try {
- NamespaceString nss(doc.ns);
- auto infoResult = rollbackSource.getCollectionInfo(nss);
- if (!infoResult.isOK()) {
- // we should drop
- WriteUnitOfWork wunit(txn);
- ctx.db()->dropCollection(txn, doc.ns);
- wunit.commit();
- }
- }
- catch (const DBException&) {
- // this isn't *that* big a deal, but is bad.
- warning() << "rollback error querying for existence of "
- << doc.ns << " at the primary, ignoring";
+ } else {
+ deleteObjects(txn,
+ ctx.db(),
+ doc.ns,
+ pattern,
+ PlanExecutor::YIELD_MANUAL,
+ true, // justone
+ true); // god
+ }
+ // did we just empty the collection? if so let's check if it even
+ // exists on the source.
+ if (collection->numRecords(txn) == 0) {
+ try {
+ NamespaceString nss(doc.ns);
+ auto infoResult = rollbackSource.getCollectionInfo(nss);
+ if (!infoResult.isOK()) {
+ // we should drop
+ WriteUnitOfWork wunit(txn);
+ ctx.db()->dropCollection(txn, doc.ns);
+ wunit.commit();
}
+ } catch (const DBException&) {
+ // this isn't *that* big a deal, but is bad.
+ warning() << "rollback error querying for existence of " << doc.ns
+ << " at the primary, ignoring";
}
}
}
- else {
- // TODO faster...
- OpDebug debug;
- updates++;
-
- const NamespaceString requestNs(doc.ns);
- UpdateRequest request(requestNs);
-
- request.setQuery(pattern);
- request.setUpdates(it->second);
- request.setGod();
- request.setUpsert();
- UpdateLifecycleImpl updateLifecycle(true, requestNs);
- request.setLifecycle(&updateLifecycle);
-
- update(txn, ctx.db(), request, &debug);
-
- }
- }
- catch (const DBException& e) {
- log() << "exception in rollback ns:" << doc.ns << ' ' << pattern.toString()
- << ' ' << e.toString() << " ndeletes:" << deletes;
- warn = true;
- }
- }
-
- removeSavers.clear(); // this effectively closes all of them
- log() << "rollback 5 d:" << deletes << " u:" << updates;
- log() << "rollback 6";
-
- // clean up oplog
- LOG(2) << "rollback truncate oplog after " <<
- fixUpInfo.commonPoint.toStringPretty();
- {
- const NamespaceString oplogNss(rsOplogName);
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock oplogDbLock(txn->lockState(), oplogNss.db(), MODE_IX);
- Lock::CollectionLock oplogCollectionLoc(txn->lockState(), oplogNss.ns(), MODE_X);
- OldClientContext ctx(txn, rsOplogName);
- Collection* oplogCollection = ctx.db()->getCollection(rsOplogName);
- if (!oplogCollection) {
- fassertFailedWithStatusNoTrace(
- 13423,
- Status(ErrorCodes::UnrecoverableRollbackError, str::stream() <<
- "Can't find " << rsOplogName));
+ } else {
+ // TODO faster...
+ OpDebug debug;
+ updates++;
+
+ const NamespaceString requestNs(doc.ns);
+ UpdateRequest request(requestNs);
+
+ request.setQuery(pattern);
+ request.setUpdates(it->second);
+ request.setGod();
+ request.setUpsert();
+ UpdateLifecycleImpl updateLifecycle(true, requestNs);
+ request.setLifecycle(&updateLifecycle);
+
+ update(txn, ctx.db(), request, &debug);
}
- // TODO: fatal error if this throws?
- oplogCollection->temp_cappedTruncateAfter(txn, fixUpInfo.commonPointOurDiskloc, false);
- }
-
- Status status = getGlobalAuthorizationManager()->initialize(txn);
- if (!status.isOK()) {
- warning() << "Failed to reinitialize auth data after rollback: " << status;
+ } catch (const DBException& e) {
+ log() << "exception in rollback ns:" << doc.ns << ' ' << pattern.toString() << ' '
+ << e.toString() << " ndeletes:" << deletes;
warn = true;
}
-
- // Reload the lastOpTimeApplied value in the replcoord and the lastAppliedHash value in
- // bgsync to reflect our new last op.
- replCoord->resetLastOpTimeFromOplog(txn);
- BackgroundSync::get()->loadLastAppliedHash(txn);
-
- // done
- if (warn)
- warning() << "issues during syncRollback, see log";
- else
- log() << "rollback done";
}
- Status _syncRollback(OperationContext* txn,
- const OplogInterface& localOplog,
- const RollbackSource& rollbackSource,
- ReplicationCoordinator* replCoord,
- const SleepSecondsFn& sleepSecondsFn) {
- invariant(!txn->lockState()->isLocked());
-
- log() << "rollback 0";
-
- /** by doing this, we will not service reads (return an error as we aren't in secondary
- * state. that perhaps is moot because of the write lock above, but that write lock
- * probably gets deferred or removed or yielded later anyway.
- *
- * also, this is better for status reporting - we know what is happening.
- */
- {
- Lock::GlobalWrite globalWrite(txn->lockState());
- if (!replCoord->setFollowerMode(MemberState::RS_ROLLBACK)) {
- return Status(
- ErrorCodes::OperationFailed, str::stream() <<
- "Cannot transition from " << replCoord->getMemberState().toString() <<
- " to " << MemberState(MemberState::RS_ROLLBACK).toString());
- }
+ removeSavers.clear(); // this effectively closes all of them
+ log() << "rollback 5 d:" << deletes << " u:" << updates;
+ log() << "rollback 6";
+
+ // clean up oplog
+ LOG(2) << "rollback truncate oplog after " << fixUpInfo.commonPoint.toStringPretty();
+ {
+ const NamespaceString oplogNss(rsOplogName);
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock oplogDbLock(txn->lockState(), oplogNss.db(), MODE_IX);
+ Lock::CollectionLock oplogCollectionLoc(txn->lockState(), oplogNss.ns(), MODE_X);
+ OldClientContext ctx(txn, rsOplogName);
+ Collection* oplogCollection = ctx.db()->getCollection(rsOplogName);
+ if (!oplogCollection) {
+ fassertFailedWithStatusNoTrace(13423,
+ Status(ErrorCodes::UnrecoverableRollbackError,
+ str::stream() << "Can't find " << rsOplogName));
}
+ // TODO: fatal error if this throws?
+ oplogCollection->temp_cappedTruncateAfter(txn, fixUpInfo.commonPointOurDiskloc, false);
+ }
- FixUpInfo how;
- log() << "rollback 1";
- how.rbid = rollbackSource.getRollbackId();
- {
- log() << "rollback 2 FindCommonPoint";
- try {
- auto processOperationForFixUp = [&how](const BSONObj& operation) {
- return refetch(how, operation);
- };
- auto res = syncRollBackLocalOperations(
- localOplog,
- rollbackSource.getOplog(),
- processOperationForFixUp);
- if (!res.isOK()) {
- switch (res.getStatus().code()) {
- case ErrorCodes::OplogStartMissing:
- case ErrorCodes::UnrecoverableRollbackError:
- sleepSecondsFn(Seconds(1));
- return res.getStatus();
- default:
- throw RSFatalException(res.getStatus().toString());
- }
- }
- else {
- how.commonPoint = res.getValue().first;
- how.commonPointOurDiskloc = res.getValue().second;
- }
- }
- catch (const RSFatalException& e) {
- error() << string(e.what());
- return Status(ErrorCodes::UnrecoverableRollbackError, str::stream() <<
- "need to rollback, but unable to determine common point between"
- "local and remote oplog: " << e.what(),
- 18752);
- }
- catch (const DBException& e) {
- warning() << "rollback 2 exception " << e.toString() << "; sleeping 1 min";
+ Status status = getGlobalAuthorizationManager()->initialize(txn);
+ if (!status.isOK()) {
+ warning() << "Failed to reinitialize auth data after rollback: " << status;
+ warn = true;
+ }
- sleepSecondsFn(Seconds(60));
- throw;
- }
+ // Reload the lastOpTimeApplied value in the replcoord and the lastAppliedHash value in
+ // bgsync to reflect our new last op.
+ replCoord->resetLastOpTimeFromOplog(txn);
+ BackgroundSync::get()->loadLastAppliedHash(txn);
+
+ // done
+ if (warn)
+ warning() << "issues during syncRollback, see log";
+ else
+ log() << "rollback done";
+}
+
+Status _syncRollback(OperationContext* txn,
+ const OplogInterface& localOplog,
+ const RollbackSource& rollbackSource,
+ ReplicationCoordinator* replCoord,
+ const SleepSecondsFn& sleepSecondsFn) {
+ invariant(!txn->lockState()->isLocked());
+
+ log() << "rollback 0";
+
+ /** by doing this, we will not service reads (return an error as we aren't in secondary
+ * state. that perhaps is moot because of the write lock above, but that write lock
+ * probably gets deferred or removed or yielded later anyway.
+ *
+ * also, this is better for status reporting - we know what is happening.
+ */
+ {
+ Lock::GlobalWrite globalWrite(txn->lockState());
+ if (!replCoord->setFollowerMode(MemberState::RS_ROLLBACK)) {
+ return Status(ErrorCodes::OperationFailed,
+ str::stream() << "Cannot transition from "
+ << replCoord->getMemberState().toString() << " to "
+ << MemberState(MemberState::RS_ROLLBACK).toString());
}
+ }
- log() << "rollback 3 fixup";
-
- replCoord->incrementRollbackID();
+ FixUpInfo how;
+ log() << "rollback 1";
+ how.rbid = rollbackSource.getRollbackId();
+ {
+ log() << "rollback 2 FindCommonPoint";
try {
- syncFixUp(txn, how, rollbackSource, replCoord);
- }
- catch (const RSFatalException& e) {
- error() << "exception during rollback: " << e.what();
- return Status(ErrorCodes::UnrecoverableRollbackError, str::stream() <<
- "exception during rollback: " << e.what(),
- 18753);
- }
- catch (...) {
- replCoord->incrementRollbackID();
-
- if (!replCoord->setFollowerMode(MemberState::RS_RECOVERING)) {
- warning() << "Failed to transition into " <<
- MemberState(MemberState::RS_RECOVERING) << "; expected to be in state " <<
- MemberState(MemberState::RS_ROLLBACK) << "but found self in " <<
- replCoord->getMemberState();
+ auto processOperationForFixUp =
+ [&how](const BSONObj& operation) { return refetch(how, operation); };
+ auto res = syncRollBackLocalOperations(
+ localOplog, rollbackSource.getOplog(), processOperationForFixUp);
+ if (!res.isOK()) {
+ switch (res.getStatus().code()) {
+ case ErrorCodes::OplogStartMissing:
+ case ErrorCodes::UnrecoverableRollbackError:
+ sleepSecondsFn(Seconds(1));
+ return res.getStatus();
+ default:
+ throw RSFatalException(res.getStatus().toString());
+ }
+ } else {
+ how.commonPoint = res.getValue().first;
+ how.commonPointOurDiskloc = res.getValue().second;
}
-
+ } catch (const RSFatalException& e) {
+ error() << string(e.what());
+ return Status(ErrorCodes::UnrecoverableRollbackError,
+ str::stream()
+ << "need to rollback, but unable to determine common point between"
+ "local and remote oplog: " << e.what(),
+ 18752);
+ } catch (const DBException& e) {
+ warning() << "rollback 2 exception " << e.toString() << "; sleeping 1 min";
+
+ sleepSecondsFn(Seconds(60));
throw;
}
+ }
+
+ log() << "rollback 3 fixup";
+
+ replCoord->incrementRollbackID();
+ try {
+ syncFixUp(txn, how, rollbackSource, replCoord);
+ } catch (const RSFatalException& e) {
+ error() << "exception during rollback: " << e.what();
+ return Status(ErrorCodes::UnrecoverableRollbackError,
+ str::stream() << "exception during rollback: " << e.what(),
+ 18753);
+ } catch (...) {
replCoord->incrementRollbackID();
- // success - leave "ROLLBACK" state
- // can go to SECONDARY once minvalid is achieved
if (!replCoord->setFollowerMode(MemberState::RS_RECOVERING)) {
- warning() << "Failed to transition into " << MemberState(MemberState::RS_RECOVERING) <<
- "; expected to be in state " << MemberState(MemberState::RS_ROLLBACK) <<
- "but found self in " << replCoord->getMemberState();
+ warning() << "Failed to transition into " << MemberState(MemberState::RS_RECOVERING)
+ << "; expected to be in state " << MemberState(MemberState::RS_ROLLBACK)
+ << "but found self in " << replCoord->getMemberState();
}
- return Status::OK();
+ throw;
}
-
-} // namespace
-
- Status syncRollback(OperationContext* txn,
- const OpTime& lastOpTimeApplied,
- const OplogInterface& localOplog,
- const RollbackSource& rollbackSource,
- ReplicationCoordinator* replCoord,
- const SleepSecondsFn& sleepSecondsFn) {
-
- invariant(txn);
- invariant(replCoord);
-
- // check that we are at minvalid, otherwise we cannot rollback as we may be in an
- // inconsistent state
- {
- OpTime minvalid = getMinValid(txn);
- if( minvalid > lastOpTimeApplied ) {
- severe() << "need to rollback, but in inconsistent state" << endl;
- return Status(ErrorCodes::UnrecoverableRollbackError, str::stream() <<
- "need to rollback, but in inconsistent state. " <<
- "minvalid: " << minvalid.toString() << " our last optime: " <<
- lastOpTimeApplied.toString(),
- 18750);
- }
- }
-
- log() << "beginning rollback" << rsLog;
-
- DisableDocumentValidation validationDisabler(txn);
- txn->setReplicatedWrites(false);
- Status status = _syncRollback(txn,
- localOplog,
- rollbackSource,
- replCoord,
- sleepSecondsFn);
-
- log() << "rollback finished" << rsLog;
- return status;
+ replCoord->incrementRollbackID();
+
+ // success - leave "ROLLBACK" state
+ // can go to SECONDARY once minvalid is achieved
+ if (!replCoord->setFollowerMode(MemberState::RS_RECOVERING)) {
+ warning() << "Failed to transition into " << MemberState(MemberState::RS_RECOVERING)
+ << "; expected to be in state " << MemberState(MemberState::RS_ROLLBACK)
+ << "but found self in " << replCoord->getMemberState();
}
- Status syncRollback(OperationContext* txn,
- const OpTime& lastOpTimeWritten,
- const OplogInterface& localOplog,
- const RollbackSource& rollbackSource,
- ReplicationCoordinator* replCoord) {
-
- return syncRollback(txn,
- lastOpTimeWritten,
- localOplog,
- rollbackSource,
- replCoord,
- [](Seconds seconds) { sleepsecs(seconds.count()); });
+ return Status::OK();
+}
+
+} // namespace
+
+Status syncRollback(OperationContext* txn,
+ const OpTime& lastOpTimeApplied,
+ const OplogInterface& localOplog,
+ const RollbackSource& rollbackSource,
+ ReplicationCoordinator* replCoord,
+ const SleepSecondsFn& sleepSecondsFn) {
+ invariant(txn);
+ invariant(replCoord);
+
+ // check that we are at minvalid, otherwise we cannot rollback as we may be in an
+ // inconsistent state
+ {
+ OpTime minvalid = getMinValid(txn);
+ if (minvalid > lastOpTimeApplied) {
+ severe() << "need to rollback, but in inconsistent state" << endl;
+ return Status(ErrorCodes::UnrecoverableRollbackError,
+ str::stream() << "need to rollback, but in inconsistent state. "
+ << "minvalid: " << minvalid.toString()
+ << " our last optime: " << lastOpTimeApplied.toString(),
+ 18750);
+ }
}
-} // namespace repl
-} // namespace mongo
+ log() << "beginning rollback" << rsLog;
+
+ DisableDocumentValidation validationDisabler(txn);
+ txn->setReplicatedWrites(false);
+ Status status = _syncRollback(txn, localOplog, rollbackSource, replCoord, sleepSecondsFn);
+
+ log() << "rollback finished" << rsLog;
+ return status;
+}
+
+Status syncRollback(OperationContext* txn,
+ const OpTime& lastOpTimeWritten,
+ const OplogInterface& localOplog,
+ const RollbackSource& rollbackSource,
+ ReplicationCoordinator* replCoord) {
+ return syncRollback(txn,
+ lastOpTimeWritten,
+ localOplog,
+ rollbackSource,
+ replCoord,
+ [](Seconds seconds) { sleepsecs(seconds.count()); });
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/rs_rollback.h b/src/mongo/db/repl/rs_rollback.h
index 4409d9e0be9..793521393a9 100644
--- a/src/mongo/db/repl/rs_rollback.h
+++ b/src/mongo/db/repl/rs_rollback.h
@@ -37,56 +37,56 @@
namespace mongo {
- class DBClientConnection;
- class NamespaceString;
- class OperationContext;
+class DBClientConnection;
+class NamespaceString;
+class OperationContext;
namespace repl {
- class OplogInterface;
- class OpTime;
- class ReplicationCoordinator;
- class RollbackSource;
+class OplogInterface;
+class OpTime;
+class ReplicationCoordinator;
+class RollbackSource;
- /**
- * Initiates the rollback process.
- * This function assumes the preconditions for undertaking rollback have already been met;
- * we have ops in our oplog that our sync source does not have, and we are not currently
- * PRIMARY.
- * The rollback procedure is:
- * - find the common point between this node and its sync source
- * - undo operations by fetching all documents affected, then replaying
- * the sync source's oplog until we reach the time in the oplog when we fetched the last
- * document.
- * This function can throw std::exception on failures.
- * This function runs a command on the sync source to detect if the sync source rolls back
- * while our rollback is in progress.
- *
- * @param txn Used to read and write from this node's databases
- * @param lastOpTimeWritten The last OpTime applied by the applier
- * @param localOplog reads the oplog on this server.
- * @param rollbackSource interface for sync source:
- * provides oplog; and
- * supports fetching documents and copying collections.
- * @param replCoord Used to track the rollback ID and to change the follower state
- *
- * Failures: Most failures are returned as a status but some failures throw an std::exception.
- */
+/**
+ * Initiates the rollback process.
+ * This function assumes the preconditions for undertaking rollback have already been met;
+ * we have ops in our oplog that our sync source does not have, and we are not currently
+ * PRIMARY.
+ * The rollback procedure is:
+ * - find the common point between this node and its sync source
+ * - undo operations by fetching all documents affected, then replaying
+ * the sync source's oplog until we reach the time in the oplog when we fetched the last
+ * document.
+ * This function can throw std::exception on failures.
+ * This function runs a command on the sync source to detect if the sync source rolls back
+ * while our rollback is in progress.
+ *
+ * @param txn Used to read and write from this node's databases
+ * @param lastOpTimeWritten The last OpTime applied by the applier
+ * @param localOplog reads the oplog on this server.
+ * @param rollbackSource interface for sync source:
+ * provides oplog; and
+ * supports fetching documents and copying collections.
+ * @param replCoord Used to track the rollback ID and to change the follower state
+ *
+ * Failures: Most failures are returned as a status but some failures throw an std::exception.
+ */
- using SleepSecondsFn = stdx::function<void (Seconds)>;
+using SleepSecondsFn = stdx::function<void(Seconds)>;
- Status syncRollback(OperationContext* txn,
- const OpTime& lastOpTimeWritten,
- const OplogInterface& localOplog,
- const RollbackSource& rollbackSource,
- ReplicationCoordinator* replCoord,
- const SleepSecondsFn& sleepSecondsFn);
+Status syncRollback(OperationContext* txn,
+ const OpTime& lastOpTimeWritten,
+ const OplogInterface& localOplog,
+ const RollbackSource& rollbackSource,
+ ReplicationCoordinator* replCoord,
+ const SleepSecondsFn& sleepSecondsFn);
- Status syncRollback(OperationContext* txn,
- const OpTime& lastOpTimeWritten,
- const OplogInterface& localOplog,
- const RollbackSource& rollbackSource,
- ReplicationCoordinator* replCoord);
+Status syncRollback(OperationContext* txn,
+ const OpTime& lastOpTimeWritten,
+ const OplogInterface& localOplog,
+ const RollbackSource& rollbackSource,
+ ReplicationCoordinator* replCoord);
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index c794f280244..a51d6aff511 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -54,522 +54,509 @@
namespace {
- using namespace mongo;
- using namespace mongo::repl;
-
- const OplogInterfaceMock::Operations kEmptyMockOperations;
-
- ReplSettings createReplSettings() {
- ReplSettings settings;
- settings.oplogSize = 5 * 1024 * 1024;
- settings.replSet = "mySet/node1:12345";
- return settings;
+using namespace mongo;
+using namespace mongo::repl;
+
+const OplogInterfaceMock::Operations kEmptyMockOperations;
+
+ReplSettings createReplSettings() {
+ ReplSettings settings;
+ settings.oplogSize = 5 * 1024 * 1024;
+ settings.replSet = "mySet/node1:12345";
+ return settings;
+}
+
+class ReplicationCoordinatorRollbackMock : public ReplicationCoordinatorMock {
+public:
+ ReplicationCoordinatorRollbackMock();
+ void resetLastOpTimeFromOplog(OperationContext* txn) override;
+};
+
+ReplicationCoordinatorRollbackMock::ReplicationCoordinatorRollbackMock()
+ : ReplicationCoordinatorMock(createReplSettings()) {}
+
+void ReplicationCoordinatorRollbackMock::resetLastOpTimeFromOplog(OperationContext* txn) {}
+
+class RollbackSourceMock : public RollbackSource {
+public:
+ RollbackSourceMock(std::unique_ptr<OplogInterface> oplog);
+ int getRollbackId() const override;
+ const OplogInterface& getOplog() const override;
+ BSONObj getLastOperation() const override;
+ BSONObj findOne(const NamespaceString& nss, const BSONObj& filter) const override;
+ void copyCollectionFromRemote(OperationContext* txn, const NamespaceString& nss) const override;
+ StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const override;
+
+private:
+ std::unique_ptr<OplogInterface> _oplog;
+};
+
+RollbackSourceMock::RollbackSourceMock(std::unique_ptr<OplogInterface> oplog)
+ : _oplog(std::move(oplog)) {}
+
+const OplogInterface& RollbackSourceMock::getOplog() const {
+ return *_oplog;
+}
+
+int RollbackSourceMock::getRollbackId() const {
+ return 0;
+}
+
+BSONObj RollbackSourceMock::getLastOperation() const {
+ auto iter = _oplog->makeIterator();
+ auto result = iter->next();
+ ASSERT_OK(result.getStatus());
+ return result.getValue().first;
+}
+
+BSONObj RollbackSourceMock::findOne(const NamespaceString& nss, const BSONObj& filter) const {
+ return BSONObj();
+}
+
+void RollbackSourceMock::copyCollectionFromRemote(OperationContext* txn,
+ const NamespaceString& nss) const {}
+
+StatusWith<BSONObj> RollbackSourceMock::getCollectionInfo(const NamespaceString& nss) const {
+ return BSON("name" << nss.ns() << "options" << BSONObj());
+}
+
+class RSRollbackTest : public unittest::Test {
+protected:
+ std::unique_ptr<OperationContext> _txn;
+
+ // Owned by service context
+ ReplicationCoordinator* _coordinator;
+
+private:
+ void setUp() override;
+ void tearDown() override;
+};
+
+void RSRollbackTest::setUp() {
+ ServiceContext* serviceContext = getGlobalServiceContext();
+ if (!serviceContext->getGlobalStorageEngine()) {
+ // When using the 'devnull' storage engine, it is fine for the temporary directory to
+ // go away after the global storage engine is initialized.
+ unittest::TempDir tempDir("rs_rollback_test");
+ mongo::storageGlobalParams.dbpath = tempDir.path();
+ mongo::storageGlobalParams.dbpath = tempDir.path();
+ mongo::storageGlobalParams.engine = "inMemoryExperiment";
+ mongo::storageGlobalParams.engineSetByUser = true;
+ serviceContext->initializeGlobalStorageEngine();
}
- class ReplicationCoordinatorRollbackMock : public ReplicationCoordinatorMock {
- public:
- ReplicationCoordinatorRollbackMock();
- void resetLastOpTimeFromOplog(OperationContext* txn) override;
- };
+ Client::initThreadIfNotAlready();
+ _txn.reset(new OperationContextReplMock(&cc(), 1));
+ _coordinator = new ReplicationCoordinatorRollbackMock();
- ReplicationCoordinatorRollbackMock::ReplicationCoordinatorRollbackMock()
- : ReplicationCoordinatorMock(createReplSettings()) { }
+ setGlobalReplicationCoordinator(_coordinator);
- void ReplicationCoordinatorRollbackMock::resetLastOpTimeFromOplog(OperationContext* txn) { }
+ setOplogCollectionName();
+}
- class RollbackSourceMock : public RollbackSource {
- public:
- RollbackSourceMock(std::unique_ptr<OplogInterface> oplog);
- int getRollbackId() const override;
- const OplogInterface& getOplog() const override;
- BSONObj getLastOperation() const override;
- BSONObj findOne(const NamespaceString& nss, const BSONObj& filter) const override;
- void copyCollectionFromRemote(OperationContext* txn,
- const NamespaceString& nss) const override;
- StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const override;
- private:
- std::unique_ptr<OplogInterface> _oplog;
- };
-
- RollbackSourceMock::RollbackSourceMock(std::unique_ptr<OplogInterface> oplog)
- : _oplog(std::move(oplog)) { }
-
- const OplogInterface& RollbackSourceMock::getOplog() const {
- return *_oplog;
+void RSRollbackTest::tearDown() {
+ {
+ Lock::GlobalWrite globalLock(_txn->lockState());
+ BSONObjBuilder unused;
+ invariant(mongo::dbHolder().closeAll(_txn.get(), unused, false));
}
-
- int RollbackSourceMock::getRollbackId() const {
- return 0;
- }
-
- BSONObj RollbackSourceMock::getLastOperation() const {
- auto iter = _oplog->makeIterator();
- auto result = iter->next();
- ASSERT_OK(result.getStatus());
- return result.getValue().first;
- }
-
- BSONObj RollbackSourceMock::findOne(const NamespaceString& nss, const BSONObj& filter) const {
- return BSONObj();
- }
-
- void RollbackSourceMock::copyCollectionFromRemote(OperationContext* txn,
- const NamespaceString& nss) const { }
-
- StatusWith<BSONObj> RollbackSourceMock::getCollectionInfo(const NamespaceString& nss) const {
- return BSON("name" << nss.ns() << "options" << BSONObj());
- }
-
- class RSRollbackTest : public unittest::Test {
- protected:
- std::unique_ptr<OperationContext> _txn;
-
- // Owned by service context
- ReplicationCoordinator* _coordinator;
-
- private:
- void setUp() override;
- void tearDown() override;
+ _txn.reset();
+ setGlobalReplicationCoordinator(nullptr);
+}
+
+void noSleep(Seconds seconds) {}
+
+TEST_F(RSRollbackTest, InconsistentMinValid) {
+ repl::setMinValid(_txn.get(), OpTime(Timestamp(Seconds(1), 0), 0));
+ auto status = syncRollback(_txn.get(),
+ OpTime(),
+ OplogInterfaceMock(kEmptyMockOperations),
+ RollbackSourceMock(std::unique_ptr<OplogInterface>(
+ new OplogInterfaceMock(kEmptyMockOperations))),
+ _coordinator,
+ noSleep);
+ ASSERT_EQUALS(ErrorCodes::UnrecoverableRollbackError, status.code());
+ ASSERT_EQUALS(18750, status.location());
+}
+
+TEST_F(RSRollbackTest, SetFollowerModeFailed) {
+ class ReplicationCoordinatorSetFollowerModeMock : public ReplicationCoordinatorMock {
+ public:
+ ReplicationCoordinatorSetFollowerModeMock()
+ : ReplicationCoordinatorMock(createReplSettings()) {}
+ MemberState getMemberState() const override {
+ return MemberState::RS_DOWN;
+ }
+ bool setFollowerMode(const MemberState& newState) override {
+ return false;
+ }
};
-
- void RSRollbackTest::setUp() {
- ServiceContext* serviceContext = getGlobalServiceContext();
- if (!serviceContext->getGlobalStorageEngine()) {
- // When using the 'devnull' storage engine, it is fine for the temporary directory to
- // go away after the global storage engine is initialized.
- unittest::TempDir tempDir("rs_rollback_test");
- mongo::storageGlobalParams.dbpath = tempDir.path();
- mongo::storageGlobalParams.dbpath = tempDir.path();
- mongo::storageGlobalParams.engine = "inMemoryExperiment";
- mongo::storageGlobalParams.engineSetByUser = true;
- serviceContext->initializeGlobalStorageEngine();
+ _coordinator = new ReplicationCoordinatorSetFollowerModeMock();
+ setGlobalReplicationCoordinator(_coordinator);
+
+ ASSERT_EQUALS(ErrorCodes::OperationFailed,
+ syncRollback(_txn.get(),
+ OpTime(),
+ OplogInterfaceMock(kEmptyMockOperations),
+ RollbackSourceMock(std::unique_ptr<OplogInterface>(
+ new OplogInterfaceMock(kEmptyMockOperations))),
+ _coordinator,
+ noSleep).code());
+}
+
+TEST_F(RSRollbackTest, OplogStartMissing) {
+ OpTime ts(Timestamp(Seconds(1), 0), 0);
+ auto operation =
+ std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId());
+ ASSERT_EQUALS(
+ ErrorCodes::OplogStartMissing,
+ syncRollback(_txn.get(),
+ OpTime(),
+ OplogInterfaceMock(kEmptyMockOperations),
+ RollbackSourceMock(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
+ operation,
+ }))),
+ _coordinator,
+ noSleep).code());
+}
+
+TEST_F(RSRollbackTest, NoRemoteOpLog) {
+ OpTime ts(Timestamp(Seconds(1), 0), 0);
+ auto operation =
+ std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId());
+ auto status = syncRollback(_txn.get(),
+ ts,
+ OplogInterfaceMock({operation}),
+ RollbackSourceMock(std::unique_ptr<OplogInterface>(
+ new OplogInterfaceMock(kEmptyMockOperations))),
+ _coordinator,
+ noSleep);
+ ASSERT_EQUALS(ErrorCodes::UnrecoverableRollbackError, status.code());
+ ASSERT_EQUALS(18752, status.location());
+}
+
+TEST_F(RSRollbackTest, RemoteGetRollbackIdThrows) {
+ OpTime ts(Timestamp(Seconds(1), 0), 0);
+ auto operation =
+ std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId());
+ class RollbackSourceLocal : public RollbackSourceMock {
+ public:
+ RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
+ : RollbackSourceMock(std::move(oplog)) {}
+ int getRollbackId() const override {
+ uassert(ErrorCodes::UnknownError, "getRollbackId() failed", false);
}
+ };
+ ASSERT_THROWS_CODE(syncRollback(_txn.get(),
+ ts,
+ OplogInterfaceMock({operation}),
+ RollbackSourceLocal(std::unique_ptr<OplogInterface>(
+ new OplogInterfaceMock(kEmptyMockOperations))),
+ _coordinator,
+ noSleep),
+ UserException,
+ ErrorCodes::UnknownError);
+}
+
+TEST_F(RSRollbackTest, BothOplogsAtCommonPoint) {
+ createOplog(_txn.get());
+ OpTime ts(Timestamp(Seconds(1), 0), 1);
+ auto operation =
+ std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId(1));
+ ASSERT_OK(
+ syncRollback(_txn.get(),
+ ts,
+ OplogInterfaceMock({operation}),
+ RollbackSourceMock(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
+ operation,
+ }))),
+ _coordinator,
+ noSleep));
+}
- Client::initThreadIfNotAlready();
- _txn.reset(new OperationContextReplMock(&cc(), 1));
- _coordinator = new ReplicationCoordinatorRollbackMock();
-
- setGlobalReplicationCoordinator(_coordinator);
-
- setOplogCollectionName();
- }
+/**
+ * Create test collection
+ */
+void _createCollection(OperationContext* txn,
+ const NamespaceString& nss,
+ const CollectionOptions& options) {
+ Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X);
+ mongo::WriteUnitOfWork wuow(txn);
+ auto db = dbHolder().openDb(txn, nss.db());
+ ASSERT_TRUE(db);
+ db->dropCollection(txn, nss.ns());
+ ASSERT_TRUE(db->createCollection(txn, nss.ns(), options));
+ wuow.commit();
+}
+
+void _createCollection(OperationContext* txn,
+ const std::string& nss,
+ const CollectionOptions& options) {
+ _createCollection(txn, NamespaceString(nss), options);
+}
- void RSRollbackTest::tearDown() {
- {
- Lock::GlobalWrite globalLock(_txn->lockState());
- BSONObjBuilder unused;
- invariant(mongo::dbHolder().closeAll(_txn.get(), unused, false));
+/**
+ * Test function to roll back a delete operation.
+ * Returns number of records in collection after rolling back delete operation.
+ * If collection does not exist after rolling back, returns -1.
+ */
+int _testRollBackDelete(OperationContext* txn,
+ ReplicationCoordinator* coordinator,
+ const BSONObj& documentAtSource) {
+ auto commonOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
+ auto deleteOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "h" << 1LL << "op"
+ << "d"
+ << "ns"
+ << "test.t"
+ << "o" << BSON("_id" << 0)),
+ RecordId(2));
+ class RollbackSourceLocal : public RollbackSourceMock {
+ public:
+ RollbackSourceLocal(const BSONObj& documentAtSource, std::unique_ptr<OplogInterface> oplog)
+ : RollbackSourceMock(std::move(oplog)),
+ called(false),
+ _documentAtSource(documentAtSource) {}
+ BSONObj findOne(const NamespaceString& nss, const BSONObj& filter) const {
+ called = true;
+ return _documentAtSource;
}
- _txn.reset();
- setGlobalReplicationCoordinator(nullptr);
- }
-
- void noSleep(Seconds seconds) {}
-
- TEST_F(RSRollbackTest, InconsistentMinValid) {
- repl::setMinValid(_txn.get(), OpTime(Timestamp(Seconds(1), 0), 0));
- auto status =
- syncRollback(
- _txn.get(),
- OpTime(),
- OplogInterfaceMock(kEmptyMockOperations),
- RollbackSourceMock(std::unique_ptr<OplogInterface>(
- new OplogInterfaceMock(kEmptyMockOperations))),
- _coordinator,
- noSleep);
- ASSERT_EQUALS(ErrorCodes::UnrecoverableRollbackError, status.code());
- ASSERT_EQUALS(18750, status.location());
- }
-
- TEST_F(RSRollbackTest, SetFollowerModeFailed) {
- class ReplicationCoordinatorSetFollowerModeMock : public ReplicationCoordinatorMock {
- public:
- ReplicationCoordinatorSetFollowerModeMock()
- : ReplicationCoordinatorMock(createReplSettings()) { }
- MemberState getMemberState() const override { return MemberState::RS_DOWN; }
- bool setFollowerMode(const MemberState& newState) override { return false; }
- };
- _coordinator = new ReplicationCoordinatorSetFollowerModeMock();
- setGlobalReplicationCoordinator(_coordinator);
-
- ASSERT_EQUALS(
- ErrorCodes::OperationFailed,
- syncRollback(
- _txn.get(),
- OpTime(),
- OplogInterfaceMock(kEmptyMockOperations),
- RollbackSourceMock(std::unique_ptr<OplogInterface>(
- new OplogInterfaceMock(kEmptyMockOperations))),
- _coordinator,
- noSleep).code());
- }
-
- TEST_F(RSRollbackTest, OplogStartMissing) {
- OpTime ts(Timestamp(Seconds(1), 0), 0);
- auto operation =
- std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId());
- ASSERT_EQUALS(
- ErrorCodes::OplogStartMissing,
- syncRollback(
- _txn.get(),
- OpTime(),
- OplogInterfaceMock(kEmptyMockOperations),
- RollbackSourceMock(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
- operation,
- }))),
- _coordinator,
- noSleep).code());
- }
+ mutable bool called;
- TEST_F(RSRollbackTest, NoRemoteOpLog) {
- OpTime ts(Timestamp(Seconds(1), 0), 0);
- auto operation =
- std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId());
- auto status =
- syncRollback(
- _txn.get(),
- ts,
- OplogInterfaceMock({operation}),
- RollbackSourceMock(std::unique_ptr<OplogInterface>(
- new OplogInterfaceMock(kEmptyMockOperations))),
- _coordinator,
- noSleep);
- ASSERT_EQUALS(ErrorCodes::UnrecoverableRollbackError, status.code());
- ASSERT_EQUALS(18752, status.location());
- }
-
- TEST_F(RSRollbackTest, RemoteGetRollbackIdThrows) {
- OpTime ts(Timestamp(Seconds(1), 0), 0);
- auto operation =
- std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId());
- class RollbackSourceLocal : public RollbackSourceMock {
- public:
- RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
- : RollbackSourceMock(std::move(oplog)) { }
- int getRollbackId() const override {
- uassert(ErrorCodes::UnknownError, "getRollbackId() failed", false);
- }
- };
- ASSERT_THROWS_CODE(
- syncRollback(
- _txn.get(),
- ts,
- OplogInterfaceMock({operation}),
- RollbackSourceLocal(std::unique_ptr<OplogInterface>(
- new OplogInterfaceMock(kEmptyMockOperations))),
- _coordinator,
- noSleep),
- UserException,
- ErrorCodes::UnknownError);
- }
-
- TEST_F(RSRollbackTest, BothOplogsAtCommonPoint) {
- createOplog(_txn.get());
- OpTime ts(Timestamp(Seconds(1), 0), 1);
- auto operation =
- std::make_pair(BSON("ts" << ts.getTimestamp() << "h" << ts.getTerm()), RecordId(1));
- ASSERT_OK(
- syncRollback(
- _txn.get(),
- ts,
- OplogInterfaceMock({operation}),
- RollbackSourceMock(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
- operation,
- }))),
- _coordinator,
- noSleep));
+ private:
+ BSONObj _documentAtSource;
+ };
+ RollbackSourceLocal rollbackSource(documentAtSource,
+ std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
+ commonOperation,
+ })));
+ OpTime opTime(deleteOperation.first["ts"].timestamp(), deleteOperation.first["h"].Long());
+ ASSERT_OK(syncRollback(txn,
+ opTime,
+ OplogInterfaceMock({deleteOperation, commonOperation}),
+ rollbackSource,
+ coordinator,
+ noSleep));
+ ASSERT_TRUE(rollbackSource.called);
+
+ Lock::DBLock dbLock(txn->lockState(), "test", MODE_S);
+ Lock::CollectionLock collLock(txn->lockState(), "test.t", MODE_S);
+ auto db = dbHolder().get(txn, "test");
+ ASSERT_TRUE(db);
+ auto collection = db->getCollection("test.t");
+ if (!collection) {
+ return -1;
}
-
- /**
- * Create test collection
- */
- void _createCollection(OperationContext* txn,
- const NamespaceString& nss,
- const CollectionOptions& options) {
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_X);
- mongo::WriteUnitOfWork wuow(txn);
- auto db = dbHolder().openDb(txn, nss.db());
+ return collection->getRecordStore()->numRecords(txn);
+}
+
+TEST_F(RSRollbackTest, RollBackDeleteNoDocumentAtSourceCollectionDoesNotExist) {
+ createOplog(_txn.get());
+ ASSERT_EQUALS(-1, _testRollBackDelete(_txn.get(), _coordinator, BSONObj()));
+}
+
+TEST_F(RSRollbackTest, RollBackDeleteNoDocumentAtSourceCollectionExistsNonCapped) {
+ createOplog(_txn.get());
+ _createCollection(_txn.get(), "test.t", CollectionOptions());
+ _testRollBackDelete(_txn.get(), _coordinator, BSONObj());
+ ASSERT_EQUALS(0, _testRollBackDelete(_txn.get(), _coordinator, BSONObj()));
+}
+
+TEST_F(RSRollbackTest, RollBackDeleteNoDocumentAtSourceCollectionExistsCapped) {
+ createOplog(_txn.get());
+ CollectionOptions options;
+ options.capped = true;
+ _createCollection(_txn.get(), "test.t", options);
+ ASSERT_EQUALS(0, _testRollBackDelete(_txn.get(), _coordinator, BSONObj()));
+}
+
+TEST_F(RSRollbackTest, RollBackDeleteRestoreDocument) {
+ createOplog(_txn.get());
+ _createCollection(_txn.get(), "test.t", CollectionOptions());
+ BSONObj doc = BSON("_id" << 0 << "a" << 1);
+ _testRollBackDelete(_txn.get(), _coordinator, doc);
+ ASSERT_EQUALS(1, _testRollBackDelete(_txn.get(), _coordinator, doc));
+}
+
+TEST_F(RSRollbackTest, RollbackUnknownCommand) {
+ createOplog(_txn.get());
+ auto commonOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
+ auto unknownCommandOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "h" << 1LL << "op"
+ << "c"
+ << "ns"
+ << "test.t"
+ << "o" << BSON("unknown_command"
+ << "t")),
+ RecordId(2));
+ {
+ Lock::DBLock dbLock(_txn->lockState(), "test", MODE_X);
+ mongo::WriteUnitOfWork wuow(_txn.get());
+ auto db = dbHolder().openDb(_txn.get(), "test");
ASSERT_TRUE(db);
- db->dropCollection(txn, nss.ns());
- ASSERT_TRUE(db->createCollection(txn, nss.ns(), options));
+ ASSERT_TRUE(db->getOrCreateCollection(_txn.get(), "test.t"));
wuow.commit();
}
-
- void _createCollection(OperationContext* txn,
- const std::string& nss,
- const CollectionOptions& options) {
- _createCollection(txn, NamespaceString(nss), options);
- }
-
- /**
- * Test function to roll back a delete operation.
- * Returns number of records in collection after rolling back delete operation.
- * If collection does not exist after rolling back, returns -1.
- */
- int _testRollBackDelete(OperationContext* txn,
- ReplicationCoordinator* coordinator,
- const BSONObj& documentAtSource) {
- auto commonOperation =
- std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
- auto deleteOperation =
- std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) <<
- "h" << 1LL <<
- "op" << "d" <<
- "ns" << "test.t" <<
- "o" << BSON("_id" << 0)),
- RecordId(2));
- class RollbackSourceLocal : public RollbackSourceMock {
- public:
- RollbackSourceLocal(const BSONObj& documentAtSource,
- std::unique_ptr<OplogInterface> oplog)
- : RollbackSourceMock(std::move(oplog)),
- called(false),
- _documentAtSource(documentAtSource) { }
- BSONObj findOne(const NamespaceString& nss, const BSONObj& filter) const {
- called = true;
- return _documentAtSource;
- }
- mutable bool called;
- private:
- BSONObj _documentAtSource;
- };
- RollbackSourceLocal rollbackSource(
- documentAtSource,
- std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
- commonOperation,
- })));
- OpTime opTime(deleteOperation.first["ts"].timestamp(),
- deleteOperation.first["h"].Long());
- ASSERT_OK(
- syncRollback(
- txn,
- opTime,
- OplogInterfaceMock({deleteOperation, commonOperation}),
- rollbackSource,
- coordinator,
- noSleep));
- ASSERT_TRUE(rollbackSource.called);
-
- Lock::DBLock dbLock(txn->lockState(), "test", MODE_S);
- Lock::CollectionLock collLock(txn->lockState(), "test.t", MODE_S);
- auto db = dbHolder().get(txn, "test");
- ASSERT_TRUE(db);
- auto collection = db->getCollection("test.t");
- if (!collection) {
- return -1;
+ OpTime opTime(unknownCommandOperation.first["ts"].timestamp(),
+ unknownCommandOperation.first["h"].Long());
+ auto status =
+ syncRollback(_txn.get(),
+ opTime,
+ OplogInterfaceMock({unknownCommandOperation, commonOperation}),
+ RollbackSourceMock(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
+ commonOperation,
+ }))),
+ _coordinator,
+ noSleep);
+ ASSERT_EQUALS(ErrorCodes::UnrecoverableRollbackError, status.code());
+ ASSERT_EQUALS(18751, status.location());
+}
+
+TEST_F(RSRollbackTest, RollbackDropCollectionCommand) {
+ createOplog(_txn.get());
+ auto commonOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
+ auto dropCollectionOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "h" << 1LL << "op"
+ << "c"
+ << "ns"
+ << "test.t"
+ << "o" << BSON("drop"
+ << "t")),
+ RecordId(2));
+ class RollbackSourceLocal : public RollbackSourceMock {
+ public:
+ RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
+ : RollbackSourceMock(std::move(oplog)), called(false) {}
+ void copyCollectionFromRemote(OperationContext* txn,
+ const NamespaceString& nss) const override {
+ called = true;
}
- return collection->getRecordStore()->numRecords(txn);
- }
-
- TEST_F(RSRollbackTest, RollBackDeleteNoDocumentAtSourceCollectionDoesNotExist) {
- createOplog(_txn.get());
- ASSERT_EQUALS(-1, _testRollBackDelete(_txn.get(), _coordinator, BSONObj()));
- }
-
- TEST_F(RSRollbackTest, RollBackDeleteNoDocumentAtSourceCollectionExistsNonCapped) {
- createOplog(_txn.get());
- _createCollection(_txn.get(), "test.t", CollectionOptions());
- _testRollBackDelete(_txn.get(), _coordinator, BSONObj());
- ASSERT_EQUALS(0, _testRollBackDelete(_txn.get(), _coordinator, BSONObj()));
- }
-
- TEST_F(RSRollbackTest, RollBackDeleteNoDocumentAtSourceCollectionExistsCapped) {
- createOplog(_txn.get());
- CollectionOptions options;
- options.capped = true;
- _createCollection(_txn.get(), "test.t", options);
- ASSERT_EQUALS(0, _testRollBackDelete(_txn.get(), _coordinator, BSONObj()));
- }
-
- TEST_F(RSRollbackTest, RollBackDeleteRestoreDocument) {
- createOplog(_txn.get());
- _createCollection(_txn.get(), "test.t", CollectionOptions());
- BSONObj doc = BSON("_id" << 0 << "a" << 1);
- _testRollBackDelete(_txn.get(), _coordinator, doc);
- ASSERT_EQUALS(1, _testRollBackDelete(_txn.get(), _coordinator, doc));
+ mutable bool called;
+ };
+ RollbackSourceLocal rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
+ commonOperation,
+ })));
+ _createCollection(_txn.get(), "test.t", CollectionOptions());
+ OpTime opTime(dropCollectionOperation.first["ts"].timestamp(),
+ dropCollectionOperation.first["h"].Long());
+ ASSERT_OK(syncRollback(_txn.get(),
+ opTime,
+ OplogInterfaceMock({dropCollectionOperation, commonOperation}),
+ rollbackSource,
+ _coordinator,
+ noSleep));
+ ASSERT_TRUE(rollbackSource.called);
+}
+
+TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
+ createOplog(_txn.get());
+ auto commonOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
+ auto createCollectionOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "h" << 1LL << "op"
+ << "c"
+ << "ns"
+ << "test.t"
+ << "o" << BSON("create"
+ << "t")),
+ RecordId(2));
+ RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
+ commonOperation,
+ })));
+ _createCollection(_txn.get(), "test.t", CollectionOptions());
+ OpTime opTime(createCollectionOperation.first["ts"].timestamp(),
+ createCollectionOperation.first["h"].Long());
+ ASSERT_OK(syncRollback(_txn.get(),
+ opTime,
+ OplogInterfaceMock({createCollectionOperation, commonOperation}),
+ rollbackSource,
+ _coordinator,
+ noSleep));
+ {
+ Lock::DBLock dbLock(_txn->lockState(), "test", MODE_S);
+ auto db = dbHolder().get(_txn.get(), "test");
+ ASSERT_TRUE(db);
+ ASSERT_FALSE(db->getCollection("test.t"));
}
-
- TEST_F(RSRollbackTest, RollbackUnknownCommand) {
- createOplog(_txn.get());
- auto commonOperation =
- std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
- auto unknownCommandOperation =
- std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) <<
- "h" << 1LL <<
- "op" << "c" <<
- "ns" << "test.t" <<
- "o" << BSON("unknown_command" << "t")),
- RecordId(2));
- {
- Lock::DBLock dbLock(_txn->lockState(), "test", MODE_X);
- mongo::WriteUnitOfWork wuow(_txn.get());
- auto db = dbHolder().openDb(_txn.get(), "test");
- ASSERT_TRUE(db);
- ASSERT_TRUE(db->getOrCreateCollection(_txn.get(), "test.t"));
- wuow.commit();
+}
+
+TEST_F(RSRollbackTest, RollbackCollectionModificationCommand) {
+ createOplog(_txn.get());
+ auto commonOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
+ auto collectionModificationOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "h" << 1LL << "op"
+ << "c"
+ << "ns"
+ << "test.t"
+ << "o" << BSON("collMod"
+ << "t"
+ << "noPadding" << false)),
+ RecordId(2));
+ class RollbackSourceLocal : public RollbackSourceMock {
+ public:
+ RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
+ : RollbackSourceMock(std::move(oplog)), called(false) {}
+ StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const {
+ called = true;
+ return RollbackSourceMock::getCollectionInfo(nss);
}
- OpTime opTime(unknownCommandOperation.first["ts"].timestamp(),
- unknownCommandOperation.first["h"].Long());
- auto status =
- syncRollback(
- _txn.get(),
- opTime,
- OplogInterfaceMock({unknownCommandOperation, commonOperation}),
- RollbackSourceMock(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
- commonOperation,
- }))),
- _coordinator,
- noSleep);
- ASSERT_EQUALS(ErrorCodes::UnrecoverableRollbackError, status.code());
- ASSERT_EQUALS(18751, status.location());
- }
-
- TEST_F(RSRollbackTest, RollbackDropCollectionCommand) {
- createOplog(_txn.get());
- auto commonOperation =
- std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
- auto dropCollectionOperation =
- std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) <<
- "h" << 1LL <<
- "op" << "c" <<
- "ns" << "test.t" <<
- "o" << BSON("drop" << "t")),
- RecordId(2));
- class RollbackSourceLocal : public RollbackSourceMock {
- public:
- RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
- : RollbackSourceMock(std::move(oplog)),
- called(false) { }
- void copyCollectionFromRemote(OperationContext* txn,
- const NamespaceString& nss) const override {
- called = true;
- }
- mutable bool called;
- };
- RollbackSourceLocal rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
- commonOperation,
- })));
- _createCollection(_txn.get(), "test.t", CollectionOptions());
- OpTime opTime(dropCollectionOperation.first["ts"].timestamp(),
- dropCollectionOperation.first["h"].Long());
- ASSERT_OK(
- syncRollback(
- _txn.get(),
- opTime,
- OplogInterfaceMock({dropCollectionOperation, commonOperation}),
- rollbackSource,
- _coordinator,
- noSleep));
- ASSERT_TRUE(rollbackSource.called);
- }
-
- TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
- createOplog(_txn.get());
- auto commonOperation =
- std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
- auto createCollectionOperation =
- std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) <<
- "h" << 1LL <<
- "op" << "c" <<
- "ns" << "test.t" <<
- "o" << BSON("create" << "t")),
- RecordId(2));
- RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
- commonOperation,
- })));
- _createCollection(_txn.get(), "test.t", CollectionOptions());
- OpTime opTime(createCollectionOperation.first["ts"].timestamp(),
- createCollectionOperation.first["h"].Long());
- ASSERT_OK(
- syncRollback(
- _txn.get(),
- opTime,
- OplogInterfaceMock({createCollectionOperation, commonOperation}),
- rollbackSource,
- _coordinator,
- noSleep));
- {
- Lock::DBLock dbLock(_txn->lockState(), "test", MODE_S);
- auto db = dbHolder().get(_txn.get(), "test");
- ASSERT_TRUE(db);
- ASSERT_FALSE(db->getCollection("test.t"));
+ mutable bool called;
+ };
+ RollbackSourceLocal rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
+ commonOperation,
+ })));
+ _createCollection(_txn.get(), "test.t", CollectionOptions());
+ OpTime opTime(collectionModificationOperation.first["ts"].timestamp(),
+ collectionModificationOperation.first["h"].Long());
+ ASSERT_OK(syncRollback(_txn.get(),
+ opTime,
+ OplogInterfaceMock({collectionModificationOperation, commonOperation}),
+ rollbackSource,
+ _coordinator,
+ noSleep));
+ ASSERT_TRUE(rollbackSource.called);
+}
+
+TEST_F(RSRollbackTest, RollbackCollectionModificationCommandInvalidCollectionOptions) {
+ createOplog(_txn.get());
+ auto commonOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
+ auto collectionModificationOperation =
+ std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) << "h" << 1LL << "op"
+ << "c"
+ << "ns"
+ << "test.t"
+ << "o" << BSON("collMod"
+ << "t"
+ << "noPadding" << false)),
+ RecordId(2));
+ class RollbackSourceLocal : public RollbackSourceMock {
+ public:
+ RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
+ : RollbackSourceMock(std::move(oplog)) {}
+ StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const {
+ return BSON("name" << nss.ns() << "options" << 12345);
}
- }
-
- TEST_F(RSRollbackTest, RollbackCollectionModificationCommand) {
- createOplog(_txn.get());
- auto commonOperation =
- std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
- auto collectionModificationOperation =
- std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) <<
- "h" << 1LL <<
- "op" << "c" <<
- "ns" << "test.t" <<
- "o" << BSON("collMod" << "t" << "noPadding" << false)),
- RecordId(2));
- class RollbackSourceLocal : public RollbackSourceMock {
- public:
- RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
- : RollbackSourceMock(std::move(oplog)),
- called(false) { }
- StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const {
- called = true;
- return RollbackSourceMock::getCollectionInfo(nss);
- }
- mutable bool called;
- };
- RollbackSourceLocal rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
- commonOperation,
- })));
- _createCollection(_txn.get(), "test.t", CollectionOptions());
- OpTime opTime(collectionModificationOperation.first["ts"].timestamp(),
- collectionModificationOperation.first["h"].Long());
- ASSERT_OK(
- syncRollback(
- _txn.get(),
- opTime,
- OplogInterfaceMock({collectionModificationOperation, commonOperation}),
- rollbackSource,
- _coordinator,
- noSleep));
- ASSERT_TRUE(rollbackSource.called);
- }
-
- TEST_F(RSRollbackTest, RollbackCollectionModificationCommandInvalidCollectionOptions) {
- createOplog(_txn.get());
- auto commonOperation =
- std::make_pair(BSON("ts" << Timestamp(Seconds(1), 0) << "h" << 1LL), RecordId(1));
- auto collectionModificationOperation =
- std::make_pair(BSON("ts" << Timestamp(Seconds(2), 0) <<
- "h" << 1LL <<
- "op" << "c" <<
- "ns" << "test.t" <<
- "o" << BSON("collMod" << "t" << "noPadding" << false)),
- RecordId(2));
- class RollbackSourceLocal : public RollbackSourceMock {
- public:
- RollbackSourceLocal(std::unique_ptr<OplogInterface> oplog)
- : RollbackSourceMock(std::move(oplog)) { }
- StatusWith<BSONObj> getCollectionInfo(const NamespaceString& nss) const {
- return BSON("name" << nss.ns() << "options" << 12345);
- }
- };
- RollbackSourceLocal rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
- commonOperation,
- })));
- _createCollection(_txn.get(), "test.t", CollectionOptions());
- OpTime opTime(collectionModificationOperation.first["ts"].timestamp(),
- collectionModificationOperation.first["h"].Long());
- auto status =
- syncRollback(
- _txn.get(),
- opTime,
- OplogInterfaceMock({collectionModificationOperation, commonOperation}),
- rollbackSource,
- _coordinator,
- noSleep);
- ASSERT_EQUALS(ErrorCodes::UnrecoverableRollbackError, status.code());
- ASSERT_EQUALS(18753, status.location());
- }
-
-} // namespace
+ };
+ RollbackSourceLocal rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
+ commonOperation,
+ })));
+ _createCollection(_txn.get(), "test.t", CollectionOptions());
+ OpTime opTime(collectionModificationOperation.first["ts"].timestamp(),
+ collectionModificationOperation.first["h"].Long());
+ auto status =
+ syncRollback(_txn.get(),
+ opTime,
+ OplogInterfaceMock({collectionModificationOperation, commonOperation}),
+ rollbackSource,
+ _coordinator,
+ noSleep);
+ ASSERT_EQUALS(ErrorCodes::UnrecoverableRollbackError, status.code());
+ ASSERT_EQUALS(18753, status.location());
+}
+
+} // namespace
diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp
index 3f2ee40b963..b1332337370 100644
--- a/src/mongo/db/repl/rs_sync.cpp
+++ b/src/mongo/db/repl/rs_sync.cpp
@@ -62,88 +62,84 @@
namespace mongo {
namespace repl {
- void runSyncThread() {
- Client::initThread("rsSync");
- AuthorizationSession::get(cc())->grantInternalAuthorization();
- ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
-
- // Set initial indexPrefetch setting
- const std::string& prefetch = replCoord->getSettings().rsIndexPrefetch;
- if (!prefetch.empty()) {
- BackgroundSync::IndexPrefetchConfig prefetchConfig = BackgroundSync::PREFETCH_ALL;
- if (prefetch == "none")
- prefetchConfig = BackgroundSync::PREFETCH_NONE;
- else if (prefetch == "_id_only")
- prefetchConfig = BackgroundSync::PREFETCH_ID_ONLY;
- else if (prefetch == "all")
- prefetchConfig = BackgroundSync::PREFETCH_ALL;
- else {
- warning() << "unrecognized indexPrefetch setting " << prefetch << ", defaulting "
- << "to \"all\"";
- }
- BackgroundSync::get()->setIndexPrefetchConfig(prefetchConfig);
+void runSyncThread() {
+ Client::initThread("rsSync");
+ AuthorizationSession::get(cc())->grantInternalAuthorization();
+ ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
+
+ // Set initial indexPrefetch setting
+ const std::string& prefetch = replCoord->getSettings().rsIndexPrefetch;
+ if (!prefetch.empty()) {
+ BackgroundSync::IndexPrefetchConfig prefetchConfig = BackgroundSync::PREFETCH_ALL;
+ if (prefetch == "none")
+ prefetchConfig = BackgroundSync::PREFETCH_NONE;
+ else if (prefetch == "_id_only")
+ prefetchConfig = BackgroundSync::PREFETCH_ID_ONLY;
+ else if (prefetch == "all")
+ prefetchConfig = BackgroundSync::PREFETCH_ALL;
+ else {
+ warning() << "unrecognized indexPrefetch setting " << prefetch << ", defaulting "
+ << "to \"all\"";
}
+ BackgroundSync::get()->setIndexPrefetchConfig(prefetchConfig);
+ }
- while (!inShutdown()) {
- // After a reconfig, we may not be in the replica set anymore, so
- // check that we are in the set (and not an arbiter) before
- // trying to sync with other replicas.
- // TODO(spencer): Use a condition variable to await loading a config
- if (replCoord->getMemberState().startup()) {
- warning() << "did not receive a valid config yet, sleeping 5 seconds ";
- sleepsecs(5);
- continue;
- }
+ while (!inShutdown()) {
+ // After a reconfig, we may not be in the replica set anymore, so
+ // check that we are in the set (and not an arbiter) before
+ // trying to sync with other replicas.
+ // TODO(spencer): Use a condition variable to await loading a config
+ if (replCoord->getMemberState().startup()) {
+ warning() << "did not receive a valid config yet, sleeping 5 seconds ";
+ sleepsecs(5);
+ continue;
+ }
- const MemberState memberState = replCoord->getMemberState();
+ const MemberState memberState = replCoord->getMemberState();
- // An arbiter can never transition to any other state, and doesn't replicate, ever
- if (memberState.arbiter()) {
- break;
- }
+ // An arbiter can never transition to any other state, and doesn't replicate, ever
+ if (memberState.arbiter()) {
+ break;
+ }
+
+ // If we are removed then we don't belong to the set anymore
+ if (memberState.removed()) {
+ sleepsecs(5);
+ continue;
+ }
- // If we are removed then we don't belong to the set anymore
- if (memberState.removed()) {
- sleepsecs(5);
+ try {
+ if (memberState.primary() && !replCoord->isWaitingForApplierToDrain()) {
+ sleepsecs(1);
continue;
}
- try {
-
- if (memberState.primary() && !replCoord->isWaitingForApplierToDrain()) {
- sleepsecs(1);
- continue;
- }
-
- bool initialSyncRequested = BackgroundSync::get()->getInitialSyncRequestedFlag();
- // Check criteria for doing an initial sync:
- // 1. If the oplog is empty, do an initial sync
- // 2. If minValid has _initialSyncFlag set, do an initial sync
- // 3. If initialSyncRequested is true
- if (getGlobalReplicationCoordinator()->getMyLastOptime().isNull() ||
- getInitialSyncFlag() ||
- initialSyncRequested) {
- syncDoInitialSync();
- continue; // start from top again in case sync failed.
- }
- if (!replCoord->setFollowerMode(MemberState::RS_RECOVERING)) {
- continue;
- }
-
- /* we have some data. continue tailing. */
- SyncTail tail(BackgroundSync::get(), multiSyncApply);
- tail.oplogApplication();
+ bool initialSyncRequested = BackgroundSync::get()->getInitialSyncRequestedFlag();
+ // Check criteria for doing an initial sync:
+ // 1. If the oplog is empty, do an initial sync
+ // 2. If minValid has _initialSyncFlag set, do an initial sync
+ // 3. If initialSyncRequested is true
+ if (getGlobalReplicationCoordinator()->getMyLastOptime().isNull() ||
+ getInitialSyncFlag() || initialSyncRequested) {
+ syncDoInitialSync();
+ continue; // start from top again in case sync failed.
}
- catch(const DBException& e) {
- log() << "Received exception while syncing: " << e.toString();
- sleepsecs(10);
- }
- catch(const std::exception& e) {
- log() << "Received exception while syncing: " << e.what();
- sleepsecs(10);
+ if (!replCoord->setFollowerMode(MemberState::RS_RECOVERING)) {
+ continue;
}
+
+ /* we have some data. continue tailing. */
+ SyncTail tail(BackgroundSync::get(), multiSyncApply);
+ tail.oplogApplication();
+ } catch (const DBException& e) {
+ log() << "Received exception while syncing: " << e.toString();
+ sleepsecs(10);
+ } catch (const std::exception& e) {
+ log() << "Received exception while syncing: " << e.what();
+ sleepsecs(10);
}
}
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/rs_sync.h b/src/mongo/db/repl/rs_sync.h
index af099db43a5..ec174268b5c 100644
--- a/src/mongo/db/repl/rs_sync.h
+++ b/src/mongo/db/repl/rs_sync.h
@@ -40,8 +40,8 @@
namespace mongo {
namespace repl {
- // Body of the thread that will do the background sync.
- void runSyncThread();
+// Body of the thread that will do the background sync.
+void runSyncThread();
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/rslog.cpp b/src/mongo/db/repl/rslog.cpp
index 9a02f64ce27..f48d6399847 100644
--- a/src/mongo/db/repl/rslog.cpp
+++ b/src/mongo/db/repl/rslog.cpp
@@ -36,12 +36,12 @@
namespace mongo {
namespace repl {
- static RamLog* _rsLog = RamLog::get("rs");
- logger::Tee* rsLog = _rsLog;
+static RamLog* _rsLog = RamLog::get("rs");
+logger::Tee* rsLog = _rsLog;
- void fillRsLog(std::stringstream* s) {
- _rsLog->toHTML(*s);
- }
+void fillRsLog(std::stringstream* s) {
+ _rsLog->toHTML(*s);
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/rslog.h b/src/mongo/db/repl/rslog.h
index 7a6624c876d..5b0b694d7bf 100644
--- a/src/mongo/db/repl/rslog.h
+++ b/src/mongo/db/repl/rslog.h
@@ -33,15 +33,15 @@
namespace mongo {
namespace logger {
- class Tee;
-} // namespace logger
+class Tee;
+} // namespace logger
namespace repl {
- void fillRsLog(std::stringstream* s);
+void fillRsLog(std::stringstream* s);
- // ramlog used for replSet actions
- extern logger::Tee* rsLog;
+// ramlog used for replSet actions
+extern logger::Tee* rsLog;
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/scatter_gather_algorithm.cpp b/src/mongo/db/repl/scatter_gather_algorithm.cpp
index 5e65a8f2df1..78fc22fa38f 100644
--- a/src/mongo/db/repl/scatter_gather_algorithm.cpp
+++ b/src/mongo/db/repl/scatter_gather_algorithm.cpp
@@ -33,7 +33,7 @@
namespace mongo {
namespace repl {
- ScatterGatherAlgorithm::~ScatterGatherAlgorithm() {}
+ScatterGatherAlgorithm::~ScatterGatherAlgorithm() {}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/scatter_gather_algorithm.h b/src/mongo/db/repl/scatter_gather_algorithm.h
index 9b6bd05d896..c5e06e4d3c9 100644
--- a/src/mongo/db/repl/scatter_gather_algorithm.h
+++ b/src/mongo/db/repl/scatter_gather_algorithm.h
@@ -34,46 +34,47 @@
namespace mongo {
- template <typename T> class StatusWith;
+template <typename T>
+class StatusWith;
namespace repl {
+/**
+ * Interface for a specialization of a scatter-gather algorithm that sends
+ * requests to a set of targets, and then processes responses until it has
+ * seen enough.
+ *
+ * To use, call getRequests() to get a vector of request objects describing network operations.
+ * Start performing the network operations in any order, and then, until
+ * hasReceivedSufficientResponses() returns true, call processResponse for each response as it
+ * arrives. Once hasReceivedSufficientResponses() you may cancel outstanding network
+ * operations, and must stop calling processResponse. Implementations of this interface may
+ * assume that processResponse() is never called after hasReceivedSufficientResponses() returns
+ * true.
+ */
+class ScatterGatherAlgorithm {
+public:
/**
- * Interface for a specialization of a scatter-gather algorithm that sends
- * requests to a set of targets, and then processes responses until it has
- * seen enough.
- *
- * To use, call getRequests() to get a vector of request objects describing network operations.
- * Start performing the network operations in any order, and then, until
- * hasReceivedSufficientResponses() returns true, call processResponse for each response as it
- * arrives. Once hasReceivedSufficientResponses() you may cancel outstanding network
- * operations, and must stop calling processResponse. Implementations of this interface may
- * assume that processResponse() is never called after hasReceivedSufficientResponses() returns
- * true.
+ * Returns the list of requests that should be sent.
*/
- class ScatterGatherAlgorithm {
- public:
- /**
- * Returns the list of requests that should be sent.
- */
- virtual std::vector<RemoteCommandRequest> getRequests() const = 0;
+ virtual std::vector<RemoteCommandRequest> getRequests() const = 0;
- /**
- * Method to call once for each received response.
- */
- virtual void processResponse(const RemoteCommandRequest& request,
- const ResponseStatus& response) = 0;
+ /**
+ * Method to call once for each received response.
+ */
+ virtual void processResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response) = 0;
- /**
- * Returns true if no more calls to processResponse are needed to consider the
- * algorithm complete. Once this method returns true, one should no longer
- * call processResponse.
- */
- virtual bool hasReceivedSufficientResponses() const = 0;
+ /**
+ * Returns true if no more calls to processResponse are needed to consider the
+ * algorithm complete. Once this method returns true, one should no longer
+ * call processResponse.
+ */
+ virtual bool hasReceivedSufficientResponses() const = 0;
- protected:
- virtual ~ScatterGatherAlgorithm(); // Shouldn't actually be virtual.
- };
+protected:
+ virtual ~ScatterGatherAlgorithm(); // Shouldn't actually be virtual.
+};
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/scatter_gather_runner.cpp b/src/mongo/db/repl/scatter_gather_runner.cpp
index 52b8b3ce43f..6161f71db63 100644
--- a/src/mongo/db/repl/scatter_gather_runner.cpp
+++ b/src/mongo/db/repl/scatter_gather_runner.cpp
@@ -41,127 +41,111 @@
namespace mongo {
namespace repl {
- ScatterGatherRunner::ScatterGatherRunner(ScatterGatherAlgorithm* algorithm) :
- _algorithm(algorithm),
- _started(false) {
+ScatterGatherRunner::ScatterGatherRunner(ScatterGatherAlgorithm* algorithm)
+ : _algorithm(algorithm), _started(false) {}
+
+ScatterGatherRunner::~ScatterGatherRunner() {}
+
+static void startTrampoline(const ReplicationExecutor::CallbackArgs& cbData,
+ ScatterGatherRunner* runner,
+ StatusWith<ReplicationExecutor::EventHandle>* result) {
+ // TODO: remove static cast once ScatterGatherRunner is designed to work with a generic
+ // TaskExecutor.
+ ReplicationExecutor* executor = static_cast<ReplicationExecutor*>(cbData.executor);
+ *result = runner->start(executor);
+}
+
+Status ScatterGatherRunner::run(ReplicationExecutor* executor) {
+ StatusWith<ReplicationExecutor::EventHandle> finishEvh(ErrorCodes::InternalError, "Not set");
+ StatusWith<ReplicationExecutor::CallbackHandle> startCBH = executor->scheduleWork(
+ stdx::bind(startTrampoline, stdx::placeholders::_1, this, &finishEvh));
+ if (!startCBH.isOK()) {
+ return startCBH.getStatus();
}
-
- ScatterGatherRunner::~ScatterGatherRunner() {
- }
-
- static void startTrampoline(const ReplicationExecutor::CallbackArgs& cbData,
- ScatterGatherRunner* runner,
- StatusWith<ReplicationExecutor::EventHandle>* result) {
-
- // TODO: remove static cast once ScatterGatherRunner is designed to work with a generic
- // TaskExecutor.
- ReplicationExecutor* executor = static_cast<ReplicationExecutor*>(cbData.executor);
- *result = runner->start(executor);
+ executor->wait(startCBH.getValue());
+ if (!finishEvh.isOK()) {
+ return finishEvh.getStatus();
}
-
- Status ScatterGatherRunner::run(ReplicationExecutor* executor) {
- StatusWith<ReplicationExecutor::EventHandle> finishEvh(ErrorCodes::InternalError,
- "Not set");
- StatusWith<ReplicationExecutor::CallbackHandle> startCBH = executor->scheduleWork(
- stdx::bind(startTrampoline, stdx::placeholders::_1, this, &finishEvh));
- if (!startCBH.isOK()) {
- return startCBH.getStatus();
- }
- executor->wait(startCBH.getValue());
- if (!finishEvh.isOK()) {
- return finishEvh.getStatus();
- }
- executor->waitForEvent(finishEvh.getValue());
- return Status::OK();
+ executor->waitForEvent(finishEvh.getValue());
+ return Status::OK();
+}
+
+StatusWith<ReplicationExecutor::EventHandle> ScatterGatherRunner::start(
+ ReplicationExecutor* executor, const stdx::function<void()>& onCompletion) {
+ invariant(!_started);
+ _started = true;
+ _actualResponses = 0;
+ _onCompletion = onCompletion;
+ StatusWith<ReplicationExecutor::EventHandle> evh = executor->makeEvent();
+ if (!evh.isOK()) {
+ return evh;
}
-
- StatusWith<ReplicationExecutor::EventHandle> ScatterGatherRunner::start(
- ReplicationExecutor* executor,
- const stdx::function<void ()>& onCompletion) {
-
- invariant(!_started);
- _started = true;
- _actualResponses = 0;
- _onCompletion = onCompletion;
- StatusWith<ReplicationExecutor::EventHandle> evh = executor->makeEvent();
- if (!evh.isOK()) {
- return evh;
- }
- _sufficientResponsesReceived = evh.getValue();
- ScopeGuard earlyReturnGuard = MakeGuard(
- &ScatterGatherRunner::_signalSufficientResponsesReceived,
- this,
- executor);
-
- const ReplicationExecutor::RemoteCommandCallbackFn cb = stdx::bind(
- &ScatterGatherRunner::_processResponse,
- stdx::placeholders::_1,
- this);
-
- std::vector<RemoteCommandRequest> requests = _algorithm->getRequests();
- for (size_t i = 0; i < requests.size(); ++i) {
- const StatusWith<ReplicationExecutor::CallbackHandle> cbh =
- executor->scheduleRemoteCommand(requests[i], cb);
- if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
- return StatusWith<ReplicationExecutor::EventHandle>(cbh.getStatus());
- }
- fassert(18743, cbh.getStatus());
- _callbacks.push_back(cbh.getValue());
+ _sufficientResponsesReceived = evh.getValue();
+ ScopeGuard earlyReturnGuard =
+ MakeGuard(&ScatterGatherRunner::_signalSufficientResponsesReceived, this, executor);
+
+ const ReplicationExecutor::RemoteCommandCallbackFn cb =
+ stdx::bind(&ScatterGatherRunner::_processResponse, stdx::placeholders::_1, this);
+
+ std::vector<RemoteCommandRequest> requests = _algorithm->getRequests();
+ for (size_t i = 0; i < requests.size(); ++i) {
+ const StatusWith<ReplicationExecutor::CallbackHandle> cbh =
+ executor->scheduleRemoteCommand(requests[i], cb);
+ if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
+ return StatusWith<ReplicationExecutor::EventHandle>(cbh.getStatus());
}
-
- if (_callbacks.empty() || _algorithm->hasReceivedSufficientResponses()) {
- invariant(_algorithm->hasReceivedSufficientResponses());
- _signalSufficientResponsesReceived(executor);
- }
-
- earlyReturnGuard.Dismiss();
- return evh;
+ fassert(18743, cbh.getStatus());
+ _callbacks.push_back(cbh.getValue());
}
- void ScatterGatherRunner::cancel(ReplicationExecutor* executor) {
- invariant(_started);
+ if (_callbacks.empty() || _algorithm->hasReceivedSufficientResponses()) {
+ invariant(_algorithm->hasReceivedSufficientResponses());
_signalSufficientResponsesReceived(executor);
}
- void ScatterGatherRunner::_processResponse(
- const ReplicationExecutor::RemoteCommandCallbackArgs& cbData,
- ScatterGatherRunner* runner) {
-
- // It is possible that the ScatterGatherRunner has already gone out of scope, if the
- // response indicates the callback was canceled. In that case, do not access any members
- // of "runner" and return immediately.
- if (cbData.response.getStatus() == ErrorCodes::CallbackCanceled) {
- return;
- }
-
- ++runner->_actualResponses;
- runner->_algorithm->processResponse(cbData.request, cbData.response);
- if (runner->_algorithm->hasReceivedSufficientResponses()) {
- // TODO: remove static cast once ScatterGatherRunner is designed to work with a generic
- // TaskExecutor.
- ReplicationExecutor* executor = static_cast<ReplicationExecutor*>(cbData.executor);
- runner->_signalSufficientResponsesReceived(executor);
- }
- else {
- invariant(runner->_actualResponses < runner->_callbacks.size());
- }
+ earlyReturnGuard.Dismiss();
+ return evh;
+}
+
+void ScatterGatherRunner::cancel(ReplicationExecutor* executor) {
+ invariant(_started);
+ _signalSufficientResponsesReceived(executor);
+}
+
+void ScatterGatherRunner::_processResponse(
+ const ReplicationExecutor::RemoteCommandCallbackArgs& cbData, ScatterGatherRunner* runner) {
+ // It is possible that the ScatterGatherRunner has already gone out of scope, if the
+ // response indicates the callback was canceled. In that case, do not access any members
+ // of "runner" and return immediately.
+ if (cbData.response.getStatus() == ErrorCodes::CallbackCanceled) {
+ return;
}
- void ScatterGatherRunner::_signalSufficientResponsesReceived(ReplicationExecutor* executor) {
- if (_sufficientResponsesReceived.isValid()) {
- std::for_each(_callbacks.begin(),
- _callbacks.end(),
- stdx::bind(&ReplicationExecutor::cancel,
- executor,
- stdx::placeholders::_1));
- const ReplicationExecutor::EventHandle h = _sufficientResponsesReceived;
- _sufficientResponsesReceived = ReplicationExecutor::EventHandle();
- if (_onCompletion) {
- _onCompletion();
- }
- executor->signalEvent(h);
+ ++runner->_actualResponses;
+ runner->_algorithm->processResponse(cbData.request, cbData.response);
+ if (runner->_algorithm->hasReceivedSufficientResponses()) {
+ // TODO: remove static cast once ScatterGatherRunner is designed to work with a generic
+ // TaskExecutor.
+ ReplicationExecutor* executor = static_cast<ReplicationExecutor*>(cbData.executor);
+ runner->_signalSufficientResponsesReceived(executor);
+ } else {
+ invariant(runner->_actualResponses < runner->_callbacks.size());
+ }
+}
+
+void ScatterGatherRunner::_signalSufficientResponsesReceived(ReplicationExecutor* executor) {
+ if (_sufficientResponsesReceived.isValid()) {
+ std::for_each(_callbacks.begin(),
+ _callbacks.end(),
+ stdx::bind(&ReplicationExecutor::cancel, executor, stdx::placeholders::_1));
+ const ReplicationExecutor::EventHandle h = _sufficientResponsesReceived;
+ _sufficientResponsesReceived = ReplicationExecutor::EventHandle();
+ if (_onCompletion) {
+ _onCompletion();
}
+ executor->signalEvent(h);
}
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/scatter_gather_runner.h b/src/mongo/db/repl/scatter_gather_runner.h
index 089fa178dde..ad7b8d93aa5 100644
--- a/src/mongo/db/repl/scatter_gather_runner.h
+++ b/src/mongo/db/repl/scatter_gather_runner.h
@@ -36,88 +36,90 @@
namespace mongo {
- template <typename T> class StatusWith;
+template <typename T>
+class StatusWith;
namespace repl {
- class ScatterGatherAlgorithm;
+class ScatterGatherAlgorithm;
+
+/**
+ * Implementation of a scatter-gather behavior using a ReplicationExecutor.
+ */
+class ScatterGatherRunner {
+ MONGO_DISALLOW_COPYING(ScatterGatherRunner);
+
+public:
+ /**
+ * Constructs a new runner whose underlying algorithm is "algorithm".
+ *
+ * "algorithm" must remain in scope until the runner's destructor completes.
+ */
+ explicit ScatterGatherRunner(ScatterGatherAlgorithm* algorithm);
+
+ ~ScatterGatherRunner();
+
+ /**
+ * Runs the scatter-gather process using "executor", and blocks until it completes.
+ *
+ * Must _not_ be run from inside the executor context.
+ *
+ * Returns ErrorCodes::ShutdownInProgress if the executor enters or is already in
+ * the shutdown state before run() can schedule execution of the scatter-gather
+ * in the executor. Note that if the executor is shut down after the algorithm
+ * is scheduled but before it completes, this method will return Status::OK(),
+ * just as it does when it runs successfully to completion.
+ */
+ Status run(ReplicationExecutor* executor);
+
+ /**
+ * Starts executing the scatter-gather process using "executor".
+ *
+ * On success, returns an event handle that will be signaled when the runner has
+ * finished executing the scatter-gather process. After that event has been
+ * signaled, it is safe for the caller to examine any state on "algorithm".
+ *
+ * This method must be called inside the executor context.
+ *
+ * onCompletion is an optional callback that will be executed in executor context
+ * immediately prior to signaling the event handle returned here. It must never
+ * throw exceptions. It may examine the state of the algorithm object.
+ *
+ * NOTE: If the executor starts to shut down before onCompletion executes, onCompletion may
+ * never execute, even though the returned event will eventually be signaled.
+ */
+ StatusWith<ReplicationExecutor::EventHandle> start(
+ ReplicationExecutor* executor,
+ const stdx::function<void()>& onCompletion = stdx::function<void()>());
+
+ /**
+ * Informs the runner to cancel further processing. The "executor" argument
+ * must point to the same executor passed to "start()".
+ *
+ * Like start, this method must be called from within the executor context.
+ */
+ void cancel(ReplicationExecutor* executor);
+
+private:
+ /**
+ * Callback invoked once for every response from the network.
+ */
+ static void _processResponse(const ReplicationExecutor::RemoteCommandCallbackArgs& cbData,
+ ScatterGatherRunner* runner);
/**
- * Implementation of a scatter-gather behavior using a ReplicationExecutor.
+ * Method that performs all actions required when _algorithm indicates a sufficient
+ * number of respones have been received.
*/
- class ScatterGatherRunner {
- MONGO_DISALLOW_COPYING(ScatterGatherRunner);
- public:
- /**
- * Constructs a new runner whose underlying algorithm is "algorithm".
- *
- * "algorithm" must remain in scope until the runner's destructor completes.
- */
- explicit ScatterGatherRunner(ScatterGatherAlgorithm* algorithm);
-
- ~ScatterGatherRunner();
-
- /**
- * Runs the scatter-gather process using "executor", and blocks until it completes.
- *
- * Must _not_ be run from inside the executor context.
- *
- * Returns ErrorCodes::ShutdownInProgress if the executor enters or is already in
- * the shutdown state before run() can schedule execution of the scatter-gather
- * in the executor. Note that if the executor is shut down after the algorithm
- * is scheduled but before it completes, this method will return Status::OK(),
- * just as it does when it runs successfully to completion.
- */
- Status run(ReplicationExecutor* executor);
-
- /**
- * Starts executing the scatter-gather process using "executor".
- *
- * On success, returns an event handle that will be signaled when the runner has
- * finished executing the scatter-gather process. After that event has been
- * signaled, it is safe for the caller to examine any state on "algorithm".
- *
- * This method must be called inside the executor context.
- *
- * onCompletion is an optional callback that will be executed in executor context
- * immediately prior to signaling the event handle returned here. It must never
- * throw exceptions. It may examine the state of the algorithm object.
- *
- * NOTE: If the executor starts to shut down before onCompletion executes, onCompletion may
- * never execute, even though the returned event will eventually be signaled.
- */
- StatusWith<ReplicationExecutor::EventHandle> start(
- ReplicationExecutor* executor,
- const stdx::function<void ()>& onCompletion = stdx::function<void ()>());
-
- /**
- * Informs the runner to cancel further processing. The "executor" argument
- * must point to the same executor passed to "start()".
- *
- * Like start, this method must be called from within the executor context.
- */
- void cancel(ReplicationExecutor* executor);
-
- private:
- /**
- * Callback invoked once for every response from the network.
- */
- static void _processResponse(const ReplicationExecutor::RemoteCommandCallbackArgs& cbData,
- ScatterGatherRunner* runner);
-
- /**
- * Method that performs all actions required when _algorithm indicates a sufficient
- * number of respones have been received.
- */
- void _signalSufficientResponsesReceived(ReplicationExecutor* executor);
-
- ScatterGatherAlgorithm* _algorithm;
- stdx::function<void ()> _onCompletion;
- ReplicationExecutor::EventHandle _sufficientResponsesReceived;
- std::vector<ReplicationExecutor::CallbackHandle> _callbacks;
- size_t _actualResponses;
- bool _started;
- };
+ void _signalSufficientResponsesReceived(ReplicationExecutor* executor);
+
+ ScatterGatherAlgorithm* _algorithm;
+ stdx::function<void()> _onCompletion;
+ ReplicationExecutor::EventHandle _sufficientResponsesReceived;
+ std::vector<ReplicationExecutor::CallbackHandle> _callbacks;
+ size_t _actualResponses;
+ bool _started;
+};
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/scatter_gather_test.cpp b/src/mongo/db/repl/scatter_gather_test.cpp
index 6d3673fa927..fb605a597e0 100644
--- a/src/mongo/db/repl/scatter_gather_test.cpp
+++ b/src/mongo/db/repl/scatter_gather_test.cpp
@@ -41,308 +41,288 @@ namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
-
- /**
- * Algorithm for testing the ScatterGatherRunner, which will finish running when finish() is
- * called, or upon receiving responses from two nodes. Creates a three requests algorithm
- * simulating running an algorithm against three other nodes.
- */
- class ScatterGatherTestAlgorithm : public ScatterGatherAlgorithm {
- public:
- ScatterGatherTestAlgorithm(int64_t maxResponses = 2) :
- _done(false),
- _numResponses(0),
- _maxResponses(maxResponses) {}
-
- virtual std::vector<RemoteCommandRequest> getRequests() const {
- std::vector<RemoteCommandRequest> requests;
- for (int i = 0; i < 3; i++) {
- requests.push_back(RemoteCommandRequest(
- HostAndPort("hostname", i),
- "admin",
- BSONObj(),
- Milliseconds(30*1000)));
- }
- return requests;
- }
-
- virtual void processResponse(
- const RemoteCommandRequest& request,
- const ResponseStatus& response) {
- _numResponses++;
- }
-
- void finish() {
- _done = true;
- }
+using executor::NetworkInterfaceMock;
- virtual bool hasReceivedSufficientResponses() const {
- if (_done) {
- return _done;
- }
-
- return _numResponses >= _maxResponses;
- }
-
- int getResponseCount() {
- return _numResponses;
+/**
+ * Algorithm for testing the ScatterGatherRunner, which will finish running when finish() is
+ * called, or upon receiving responses from two nodes. Creates a three requests algorithm
+ * simulating running an algorithm against three other nodes.
+ */
+class ScatterGatherTestAlgorithm : public ScatterGatherAlgorithm {
+public:
+ ScatterGatherTestAlgorithm(int64_t maxResponses = 2)
+ : _done(false), _numResponses(0), _maxResponses(maxResponses) {}
+
+ virtual std::vector<RemoteCommandRequest> getRequests() const {
+ std::vector<RemoteCommandRequest> requests;
+ for (int i = 0; i < 3; i++) {
+ requests.push_back(RemoteCommandRequest(
+ HostAndPort("hostname", i), "admin", BSONObj(), Milliseconds(30 * 1000)));
}
-
- private:
-
- bool _done;
- int64_t _numResponses;
- int64_t _maxResponses;
- };
-
- /**
- * ScatterGatherTest base class which sets up the ReplicationExecutor and NetworkInterfaceMock.
- */
- class ScatterGatherTest : public mongo::unittest::Test {
- protected:
-
- NetworkInterfaceMock* getNet() { return _net; }
- ReplicationExecutor* getExecutor() { return _executor.get(); }
-
- int64_t countLogLinesContaining(const std::string& needle);
- private:
-
- void setUp();
- void tearDown();
-
- // owned by _executor
- NetworkInterfaceMock* _net;
- StorageInterfaceMock* _storage;
- std::unique_ptr<ReplicationExecutor> _executor;
- std::unique_ptr<stdx::thread> _executorThread;
- };
-
- void ScatterGatherTest::setUp() {
- _net = new NetworkInterfaceMock;
- _storage = new StorageInterfaceMock;
- _executor.reset(new ReplicationExecutor(_net, _storage, 1 /* prng seed */));
- _executorThread.reset(new stdx::thread(stdx::bind(&ReplicationExecutor::run,
- _executor.get())));
+ return requests;
}
- void ScatterGatherTest::tearDown() {
- _executor->shutdown();
- _executorThread->join();
+ virtual void processResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response) {
+ _numResponses++;
}
+ void finish() {
+ _done = true;
+ }
- // Used to run a ScatterGatherRunner in a separate thread, to avoid blocking test execution.
- class ScatterGatherRunnerRunner {
- public:
-
- ScatterGatherRunnerRunner(ScatterGatherRunner* sgr, ReplicationExecutor* executor) :
- _sgr(sgr),
- _executor(executor),
- _result(Status(ErrorCodes::BadValue, "failed to set status")) {}
-
- // Could block if _sgr has not finished
- Status getResult() {
- _thread->join();
- return _result;
- }
-
- void run() {
- _thread.reset(new stdx::thread(stdx::bind(&ScatterGatherRunnerRunner::_run,
- this,
- _executor)));
- }
-
- private:
-
- void _run(ReplicationExecutor* executor) {
- _result = _sgr->run(_executor);
+ virtual bool hasReceivedSufficientResponses() const {
+ if (_done) {
+ return _done;
}
- ScatterGatherRunner* _sgr;
- ReplicationExecutor* _executor;
- Status _result;
- std::unique_ptr<stdx::thread> _thread;
- };
-
- // Simple onCompletion function which will toggle a bool, so that we can check the logs to
- // ensure the onCompletion function ran when expected.
- void onCompletionTestFunction(bool* ran) {
- *ran = true;
+ return _numResponses >= _maxResponses;
}
- // Confirm that running via start() will finish and run the onComplete function once sufficient
- // responses have been received.
- // Confirm that deleting both the ScatterGatherTestAlgorithm and ScatterGatherRunner while
- // scheduled callbacks still exist will not be unsafe (ASAN builder) after the algorithm has
- // completed.
- TEST_F(ScatterGatherTest, DeleteAlgorithmAfterItHasCompleted) {
- ScatterGatherTestAlgorithm* sga = new ScatterGatherTestAlgorithm();
- ScatterGatherRunner* sgr = new ScatterGatherRunner(sga);
- bool ranCompletion = false;
- StatusWith<ReplicationExecutor::EventHandle> status = sgr->start(getExecutor(),
- stdx::bind(&onCompletionTestFunction, &ranCompletion));
- ASSERT_OK(status.getStatus());
- ASSERT_FALSE(ranCompletion);
-
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- net->scheduleResponse(noi,
- net->now() + Seconds(2),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1),
- Milliseconds(10))));
- ASSERT_FALSE(ranCompletion);
-
- noi = net->getNextReadyRequest();
- net->scheduleResponse(noi,
- net->now() + Seconds(2),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1),
- Milliseconds(10))));
- ASSERT_FALSE(ranCompletion);
-
- noi = net->getNextReadyRequest();
- net->scheduleResponse(noi,
- net->now() + Seconds(5),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1),
- Milliseconds(10))));
- ASSERT_FALSE(ranCompletion);
-
- net->runUntil(net->now() + Seconds(2));
- ASSERT_TRUE(ranCompletion);
-
- delete sga;
- delete sgr;
-
- net->runReadyNetworkOperations();
-
- net->exitNetwork();
+ int getResponseCount() {
+ return _numResponses;
}
- // Confirm that shutting the ReplicationExecutor down before calling run() will cause run()
- // to return ErrorCodes::ShutdownInProgress.
- TEST_F(ScatterGatherTest, ShutdownExecutorBeforeRun) {
- ScatterGatherTestAlgorithm sga;
- ScatterGatherRunner sgr(&sga);
- getExecutor()->shutdown();
- sga.finish();
- Status status = sgr.run(getExecutor());
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status);
- }
+private:
+ bool _done;
+ int64_t _numResponses;
+ int64_t _maxResponses;
+};
- // Confirm that shutting the ReplicationExecutor down after calling run(), but before run()
- // finishes will cause run() to return Status::OK().
- TEST_F(ScatterGatherTest, ShutdownExecutorAfterRun) {
- ScatterGatherTestAlgorithm sga;
- ScatterGatherRunner sgr(&sga);
- ScatterGatherRunnerRunner sgrr(&sgr, getExecutor());
- sgrr.run();
- // need to wait for the scatter-gather to be scheduled in the executor
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- net->blackHole(noi);
- net->exitNetwork();
- getExecutor()->shutdown();
- Status status = sgrr.getResult();
- ASSERT_OK(status);
+/**
+ * ScatterGatherTest base class which sets up the ReplicationExecutor and NetworkInterfaceMock.
+ */
+class ScatterGatherTest : public mongo::unittest::Test {
+protected:
+ NetworkInterfaceMock* getNet() {
+ return _net;
}
-
- // Confirm that shutting the ReplicationExecutor down before calling start() will cause start()
- // to return ErrorCodes::ShutdownInProgress and should not run onCompletion().
- TEST_F(ScatterGatherTest, ShutdownExecutorBeforeStart) {
- ScatterGatherTestAlgorithm sga;
- ScatterGatherRunner sgr(&sga);
- getExecutor()->shutdown();
- bool ranCompletion = false;
- StatusWith<ReplicationExecutor::EventHandle> status = sgr.start(getExecutor(),
- stdx::bind(&onCompletionTestFunction, &ranCompletion));
- sga.finish();
- ASSERT_FALSE(ranCompletion);
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.getStatus());
+ ReplicationExecutor* getExecutor() {
+ return _executor.get();
}
- // Confirm that shutting the ReplicationExecutor down after calling start() will cause start()
- // to return Status::OK and should not run onCompletion().
- TEST_F(ScatterGatherTest, ShutdownExecutorAfterStart) {
- ScatterGatherTestAlgorithm sga;
- ScatterGatherRunner sgr(&sga);
- bool ranCompletion = false;
- StatusWith<ReplicationExecutor::EventHandle> status = sgr.start(getExecutor(),
- stdx::bind(&onCompletionTestFunction, &ranCompletion));
- getExecutor()->shutdown();
- sga.finish();
- ASSERT_FALSE(ranCompletion);
- ASSERT_OK(status.getStatus());
+ int64_t countLogLinesContaining(const std::string& needle);
+
+private:
+ void setUp();
+ void tearDown();
+
+ // owned by _executor
+ NetworkInterfaceMock* _net;
+ StorageInterfaceMock* _storage;
+ std::unique_ptr<ReplicationExecutor> _executor;
+ std::unique_ptr<stdx::thread> _executorThread;
+};
+
+void ScatterGatherTest::setUp() {
+ _net = new NetworkInterfaceMock;
+ _storage = new StorageInterfaceMock;
+ _executor.reset(new ReplicationExecutor(_net, _storage, 1 /* prng seed */));
+ _executorThread.reset(new stdx::thread(stdx::bind(&ReplicationExecutor::run, _executor.get())));
+}
+
+void ScatterGatherTest::tearDown() {
+ _executor->shutdown();
+ _executorThread->join();
+}
+
+
+// Used to run a ScatterGatherRunner in a separate thread, to avoid blocking test execution.
+class ScatterGatherRunnerRunner {
+public:
+ ScatterGatherRunnerRunner(ScatterGatherRunner* sgr, ReplicationExecutor* executor)
+ : _sgr(sgr),
+ _executor(executor),
+ _result(Status(ErrorCodes::BadValue, "failed to set status")) {}
+
+ // Could block if _sgr has not finished
+ Status getResult() {
+ _thread->join();
+ return _result;
}
- // Confirm that responses are not processed once sufficient responses have been received.
- TEST_F(ScatterGatherTest, DoNotProcessMoreThanSufficientResponses) {
- ScatterGatherTestAlgorithm sga;
- ScatterGatherRunner sgr(&sga);
- bool ranCompletion = false;
- StatusWith<ReplicationExecutor::EventHandle> status = sgr.start(getExecutor(),
- stdx::bind(&onCompletionTestFunction, &ranCompletion));
- ASSERT_OK(status.getStatus());
- ASSERT_FALSE(ranCompletion);
-
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- net->scheduleResponse(noi,
- net->now() + Seconds(2),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1),
- Milliseconds(10))));
- ASSERT_FALSE(ranCompletion);
-
- noi = net->getNextReadyRequest();
- net->scheduleResponse(noi,
- net->now() + Seconds(2),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1),
- Milliseconds(10))));
- ASSERT_FALSE(ranCompletion);
-
- noi = net->getNextReadyRequest();
- net->scheduleResponse(noi,
- net->now() + Seconds(5),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1),
- Milliseconds(10))));
- ASSERT_FALSE(ranCompletion);
-
- net->runUntil(net->now() + Seconds(2));
- ASSERT_TRUE(ranCompletion);
-
- net->runReadyNetworkOperations();
- // the third resposne should not be processed, so the count should not increment
- ASSERT_EQUALS(2, sga.getResponseCount());
-
- net->exitNetwork();
+ void run() {
+ _thread.reset(
+ new stdx::thread(stdx::bind(&ScatterGatherRunnerRunner::_run, this, _executor)));
}
- // Confirm that starting with sufficient responses received will immediate complete.
- TEST_F(ScatterGatherTest, DoNotCreateCallbacksIfHasSufficientResponsesReturnsTrueImmediately) {
- ScatterGatherTestAlgorithm sga;
- // set hasReceivedSufficientResponses to return true before the run starts
- sga.finish();
- ScatterGatherRunner sgr(&sga);
- bool ranCompletion = false;
- StatusWith<ReplicationExecutor::EventHandle> status = sgr.start(getExecutor(),
- stdx::bind(&onCompletionTestFunction, &ranCompletion));
- ASSERT_OK(status.getStatus());
- ASSERT_TRUE(ranCompletion);
-
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- ASSERT_FALSE(net->hasReadyRequests());
- net->exitNetwork();
+private:
+ void _run(ReplicationExecutor* executor) {
+ _result = _sgr->run(_executor);
}
+ ScatterGatherRunner* _sgr;
+ ReplicationExecutor* _executor;
+ Status _result;
+ std::unique_ptr<stdx::thread> _thread;
+};
+
+// Simple onCompletion function which will toggle a bool, so that we can check the logs to
+// ensure the onCompletion function ran when expected.
+void onCompletionTestFunction(bool* ran) {
+ *ran = true;
+}
+
+// Confirm that running via start() will finish and run the onComplete function once sufficient
+// responses have been received.
+// Confirm that deleting both the ScatterGatherTestAlgorithm and ScatterGatherRunner while
+// scheduled callbacks still exist will not be unsafe (ASAN builder) after the algorithm has
+// completed.
+TEST_F(ScatterGatherTest, DeleteAlgorithmAfterItHasCompleted) {
+ ScatterGatherTestAlgorithm* sga = new ScatterGatherTestAlgorithm();
+ ScatterGatherRunner* sgr = new ScatterGatherRunner(sga);
+ bool ranCompletion = false;
+ StatusWith<ReplicationExecutor::EventHandle> status =
+ sgr->start(getExecutor(), stdx::bind(&onCompletionTestFunction, &ranCompletion));
+ ASSERT_OK(status.getStatus());
+ ASSERT_FALSE(ranCompletion);
+
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ net->scheduleResponse(noi,
+ net->now() + Seconds(2),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(10))));
+ ASSERT_FALSE(ranCompletion);
+
+ noi = net->getNextReadyRequest();
+ net->scheduleResponse(noi,
+ net->now() + Seconds(2),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(10))));
+ ASSERT_FALSE(ranCompletion);
+
+ noi = net->getNextReadyRequest();
+ net->scheduleResponse(noi,
+ net->now() + Seconds(5),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(10))));
+ ASSERT_FALSE(ranCompletion);
+
+ net->runUntil(net->now() + Seconds(2));
+ ASSERT_TRUE(ranCompletion);
+
+ delete sga;
+ delete sgr;
+
+ net->runReadyNetworkOperations();
+
+ net->exitNetwork();
+}
+
+// Confirm that shutting the ReplicationExecutor down before calling run() will cause run()
+// to return ErrorCodes::ShutdownInProgress.
+TEST_F(ScatterGatherTest, ShutdownExecutorBeforeRun) {
+ ScatterGatherTestAlgorithm sga;
+ ScatterGatherRunner sgr(&sga);
+ getExecutor()->shutdown();
+ sga.finish();
+ Status status = sgr.run(getExecutor());
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status);
+}
+
+// Confirm that shutting the ReplicationExecutor down after calling run(), but before run()
+// finishes will cause run() to return Status::OK().
+TEST_F(ScatterGatherTest, ShutdownExecutorAfterRun) {
+ ScatterGatherTestAlgorithm sga;
+ ScatterGatherRunner sgr(&sga);
+ ScatterGatherRunnerRunner sgrr(&sgr, getExecutor());
+ sgrr.run();
+ // need to wait for the scatter-gather to be scheduled in the executor
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ net->blackHole(noi);
+ net->exitNetwork();
+ getExecutor()->shutdown();
+ Status status = sgrr.getResult();
+ ASSERT_OK(status);
+}
+
+// Confirm that shutting the ReplicationExecutor down before calling start() will cause start()
+// to return ErrorCodes::ShutdownInProgress and should not run onCompletion().
+TEST_F(ScatterGatherTest, ShutdownExecutorBeforeStart) {
+ ScatterGatherTestAlgorithm sga;
+ ScatterGatherRunner sgr(&sga);
+ getExecutor()->shutdown();
+ bool ranCompletion = false;
+ StatusWith<ReplicationExecutor::EventHandle> status =
+ sgr.start(getExecutor(), stdx::bind(&onCompletionTestFunction, &ranCompletion));
+ sga.finish();
+ ASSERT_FALSE(ranCompletion);
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, status.getStatus());
+}
+
+// Confirm that shutting the ReplicationExecutor down after calling start() will cause start()
+// to return Status::OK and should not run onCompletion().
+TEST_F(ScatterGatherTest, ShutdownExecutorAfterStart) {
+ ScatterGatherTestAlgorithm sga;
+ ScatterGatherRunner sgr(&sga);
+ bool ranCompletion = false;
+ StatusWith<ReplicationExecutor::EventHandle> status =
+ sgr.start(getExecutor(), stdx::bind(&onCompletionTestFunction, &ranCompletion));
+ getExecutor()->shutdown();
+ sga.finish();
+ ASSERT_FALSE(ranCompletion);
+ ASSERT_OK(status.getStatus());
+}
+
+// Confirm that responses are not processed once sufficient responses have been received.
+TEST_F(ScatterGatherTest, DoNotProcessMoreThanSufficientResponses) {
+ ScatterGatherTestAlgorithm sga;
+ ScatterGatherRunner sgr(&sga);
+ bool ranCompletion = false;
+ StatusWith<ReplicationExecutor::EventHandle> status =
+ sgr.start(getExecutor(), stdx::bind(&onCompletionTestFunction, &ranCompletion));
+ ASSERT_OK(status.getStatus());
+ ASSERT_FALSE(ranCompletion);
+
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ net->scheduleResponse(noi,
+ net->now() + Seconds(2),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(10))));
+ ASSERT_FALSE(ranCompletion);
+
+ noi = net->getNextReadyRequest();
+ net->scheduleResponse(noi,
+ net->now() + Seconds(2),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(10))));
+ ASSERT_FALSE(ranCompletion);
+
+ noi = net->getNextReadyRequest();
+ net->scheduleResponse(noi,
+ net->now() + Seconds(5),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(10))));
+ ASSERT_FALSE(ranCompletion);
+
+ net->runUntil(net->now() + Seconds(2));
+ ASSERT_TRUE(ranCompletion);
+
+ net->runReadyNetworkOperations();
+ // the third resposne should not be processed, so the count should not increment
+ ASSERT_EQUALS(2, sga.getResponseCount());
+
+ net->exitNetwork();
+}
+
+// Confirm that starting with sufficient responses received will immediate complete.
+TEST_F(ScatterGatherTest, DoNotCreateCallbacksIfHasSufficientResponsesReturnsTrueImmediately) {
+ ScatterGatherTestAlgorithm sga;
+ // set hasReceivedSufficientResponses to return true before the run starts
+ sga.finish();
+ ScatterGatherRunner sgr(&sga);
+ bool ranCompletion = false;
+ StatusWith<ReplicationExecutor::EventHandle> status =
+ sgr.start(getExecutor(), stdx::bind(&onCompletionTestFunction, &ranCompletion));
+ ASSERT_OK(status.getStatus());
+ ASSERT_TRUE(ranCompletion);
+
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ ASSERT_FALSE(net->hasReadyRequests());
+ net->exitNetwork();
+}
+
#if 0
// TODO Enable this test once we have a way to test for invariants.
@@ -386,41 +366,35 @@ namespace {
net->exitNetwork();
ASSERT_FALSE(ranCompletion);
}
-#endif // 0
-
- // Confirm that running via run() will finish once sufficient responses have been received.
- TEST_F(ScatterGatherTest, SuccessfulScatterGatherViaRun) {
- ScatterGatherTestAlgorithm sga;
- ScatterGatherRunner sgr(&sga);
- ScatterGatherRunnerRunner sgrr(&sgr, getExecutor());
- sgrr.run();
-
- NetworkInterfaceMock* net = getNet();
- net->enterNetwork();
- NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
- net->scheduleResponse(noi,
- net->now(),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1),
- Milliseconds(10))));
- net->runReadyNetworkOperations();
-
- noi = net->getNextReadyRequest();
- net->blackHole(noi);
- net->runReadyNetworkOperations();
-
- noi = net->getNextReadyRequest();
- net->scheduleResponse(noi,
- net->now(),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1),
- Milliseconds(10))));
- net->runReadyNetworkOperations();
- net->exitNetwork();
-
- Status status = sgrr.getResult();
- ASSERT_OK(status);
- }
+#endif // 0
+
+// Confirm that running via run() will finish once sufficient responses have been received.
+TEST_F(ScatterGatherTest, SuccessfulScatterGatherViaRun) {
+ ScatterGatherTestAlgorithm sga;
+ ScatterGatherRunner sgr(&sga);
+ ScatterGatherRunnerRunner sgrr(&sgr, getExecutor());
+ sgrr.run();
+
+ NetworkInterfaceMock* net = getNet();
+ net->enterNetwork();
+ NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
+ net->scheduleResponse(
+ noi, net->now(), ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(10))));
+ net->runReadyNetworkOperations();
+
+ noi = net->getNextReadyRequest();
+ net->blackHole(noi);
+ net->runReadyNetworkOperations();
+
+ noi = net->getNextReadyRequest();
+ net->scheduleResponse(
+ noi, net->now(), ResponseStatus(RemoteCommandResponse(BSON("ok" << 1), Milliseconds(10))));
+ net->runReadyNetworkOperations();
+ net->exitNetwork();
+
+ Status status = sgrr.getResult();
+ ASSERT_OK(status);
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/storage_interface.cpp b/src/mongo/db/repl/storage_interface.cpp
index c09d76ad9ff..0b883d827b9 100644
--- a/src/mongo/db/repl/storage_interface.cpp
+++ b/src/mongo/db/repl/storage_interface.cpp
@@ -35,8 +35,8 @@
namespace mongo {
namespace repl {
- StorageInterface::StorageInterface() {}
- StorageInterface::~StorageInterface() {}
+StorageInterface::StorageInterface() {}
+StorageInterface::~StorageInterface() {}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/storage_interface.h b/src/mongo/db/repl/storage_interface.h
index df51692b2f1..1f9d0576741 100644
--- a/src/mongo/db/repl/storage_interface.h
+++ b/src/mongo/db/repl/storage_interface.h
@@ -32,28 +32,26 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
namespace repl {
+/**
+ * Storage interface used by used by the ReplicationExecutor inside mongod for supporting
+ * ReplicationExectutor's ability to take database locks.
+ */
+class StorageInterface {
+public:
+ virtual ~StorageInterface();
+
/**
- * Storage interface used by used by the ReplicationExecutor inside mongod for supporting
- * ReplicationExectutor's ability to take database locks.
+ * Creates an operation context for running database operations.
*/
- class StorageInterface {
- public:
- virtual ~StorageInterface();
-
- /**
- * Creates an operation context for running database operations.
- */
- virtual OperationContext* createOperationContext() = 0;
-
- protected:
-
- StorageInterface();
+ virtual OperationContext* createOperationContext() = 0;
- };
+protected:
+ StorageInterface();
+};
} // namespace repl
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 73a14ce6330..a58f85964b4 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -39,16 +39,16 @@
namespace mongo {
namespace repl {
- StorageInterfaceImpl::StorageInterfaceImpl() : StorageInterface() {}
- StorageInterfaceImpl::~StorageInterfaceImpl() { }
-
- OperationContext* StorageInterfaceImpl::createOperationContext() {
- if (!ClientBasic::getCurrent()) {
- Client::initThreadIfNotAlready();
- AuthorizationSession::get(*ClientBasic::getCurrent())->grantInternalAuthorization();
- }
- return new OperationContextImpl();
+StorageInterfaceImpl::StorageInterfaceImpl() : StorageInterface() {}
+StorageInterfaceImpl::~StorageInterfaceImpl() {}
+
+OperationContext* StorageInterfaceImpl::createOperationContext() {
+ if (!ClientBasic::getCurrent()) {
+ Client::initThreadIfNotAlready();
+ AuthorizationSession::get(*ClientBasic::getCurrent())->grantInternalAuthorization();
}
+ return new OperationContextImpl();
+}
} // namespace repl
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/repl/storage_interface_impl.h b/src/mongo/db/repl/storage_interface_impl.h
index 24cc8268f17..fa378e537fd 100644
--- a/src/mongo/db/repl/storage_interface_impl.h
+++ b/src/mongo/db/repl/storage_interface_impl.h
@@ -33,18 +33,17 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
namespace repl {
- class StorageInterfaceImpl : public StorageInterface {
- public:
- explicit StorageInterfaceImpl();
- virtual ~StorageInterfaceImpl();
+class StorageInterfaceImpl : public StorageInterface {
+public:
+ explicit StorageInterfaceImpl();
+ virtual ~StorageInterfaceImpl();
- OperationContext* createOperationContext() override;
-
- };
+ OperationContext* createOperationContext() override;
+};
} // namespace repl
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/repl/storage_interface_mock.cpp b/src/mongo/db/repl/storage_interface_mock.cpp
index 4a6f4a7a293..b620c65276d 100644
--- a/src/mongo/db/repl/storage_interface_mock.cpp
+++ b/src/mongo/db/repl/storage_interface_mock.cpp
@@ -37,13 +37,13 @@
namespace mongo {
namespace repl {
- StorageInterfaceMock::StorageInterfaceMock() {}
+StorageInterfaceMock::StorageInterfaceMock() {}
- StorageInterfaceMock::~StorageInterfaceMock() { }
+StorageInterfaceMock::~StorageInterfaceMock() {}
- OperationContext* StorageInterfaceMock::createOperationContext() {
- return new OperationContextReplMock();
- }
+OperationContext* StorageInterfaceMock::createOperationContext() {
+ return new OperationContextReplMock();
+}
} // namespace repl
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/repl/storage_interface_mock.h b/src/mongo/db/repl/storage_interface_mock.h
index 4bd3e63ec9d..8ce76adb642 100644
--- a/src/mongo/db/repl/storage_interface_mock.h
+++ b/src/mongo/db/repl/storage_interface_mock.h
@@ -33,17 +33,17 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
namespace repl {
- class StorageInterfaceMock : public StorageInterface {
- public:
- explicit StorageInterfaceMock();
- virtual ~StorageInterfaceMock();
+class StorageInterfaceMock : public StorageInterface {
+public:
+ explicit StorageInterfaceMock();
+ virtual ~StorageInterfaceMock();
- OperationContext* createOperationContext() override;
- };
+ OperationContext* createOperationContext() override;
+};
} // namespace repl
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 49c70c3c2b7..602523a5471 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -51,161 +51,158 @@
namespace mongo {
- using std::endl;
- using std::string;
+using std::endl;
+using std::string;
namespace repl {
- SyncSourceFeedback::SyncSourceFeedback() : _positionChanged(false),
- _shutdownSignaled(false) {}
- SyncSourceFeedback::~SyncSourceFeedback() {}
+SyncSourceFeedback::SyncSourceFeedback() : _positionChanged(false), _shutdownSignaled(false) {}
+SyncSourceFeedback::~SyncSourceFeedback() {}
- void SyncSourceFeedback::_resetConnection() {
- LOG(1) << "resetting connection in sync source feedback";
- _connection.reset();
- }
+void SyncSourceFeedback::_resetConnection() {
+ LOG(1) << "resetting connection in sync source feedback";
+ _connection.reset();
+}
- bool SyncSourceFeedback::replAuthenticate() {
- if (!getGlobalAuthorizationManager()->isAuthEnabled())
- return true;
+bool SyncSourceFeedback::replAuthenticate() {
+ if (!getGlobalAuthorizationManager()->isAuthEnabled())
+ return true;
- if (!isInternalAuthSet())
- return false;
- return authenticateInternalUser(_connection.get());
- }
+ if (!isInternalAuthSet())
+ return false;
+ return authenticateInternalUser(_connection.get());
+}
- bool SyncSourceFeedback::_connect(OperationContext* txn, const HostAndPort& host) {
- if (hasConnection()) {
- return true;
- }
- log() << "setting syncSourceFeedback to " << host.toString();
- _connection.reset(new DBClientConnection(false, OplogReader::tcp_timeout));
- string errmsg;
- try {
- if (!_connection->connect(host, errmsg) ||
- (getGlobalAuthorizationManager()->isAuthEnabled() && !replAuthenticate())) {
- _resetConnection();
- log() << errmsg << endl;
- return false;
- }
- }
- catch (const DBException& e) {
- error() << "Error connecting to " << host.toString() << ": " << e.what();
+bool SyncSourceFeedback::_connect(OperationContext* txn, const HostAndPort& host) {
+ if (hasConnection()) {
+ return true;
+ }
+ log() << "setting syncSourceFeedback to " << host.toString();
+ _connection.reset(new DBClientConnection(false, OplogReader::tcp_timeout));
+ string errmsg;
+ try {
+ if (!_connection->connect(host, errmsg) ||
+ (getGlobalAuthorizationManager()->isAuthEnabled() && !replAuthenticate())) {
_resetConnection();
+ log() << errmsg << endl;
return false;
}
-
- return hasConnection();
+ } catch (const DBException& e) {
+ error() << "Error connecting to " << host.toString() << ": " << e.what();
+ _resetConnection();
+ return false;
}
- void SyncSourceFeedback::forwardSlaveProgress() {
- stdx::unique_lock<stdx::mutex> lock(_mtx);
- _positionChanged = true;
- _cond.notify_all();
- }
+ return hasConnection();
+}
+
+void SyncSourceFeedback::forwardSlaveProgress() {
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
+ _positionChanged = true;
+ _cond.notify_all();
+}
- Status SyncSourceFeedback::updateUpstream(OperationContext* txn) {
- ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- if (replCoord->getMemberState().primary()) {
- // primary has no one to update to
+Status SyncSourceFeedback::updateUpstream(OperationContext* txn) {
+ ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
+ if (replCoord->getMemberState().primary()) {
+ // primary has no one to update to
+ return Status::OK();
+ }
+ BSONObjBuilder cmd;
+ {
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
+ // the command could not be created, likely because the node was removed from the set
+ if (!replCoord->prepareReplSetUpdatePositionCommand(&cmd)) {
return Status::OK();
}
- BSONObjBuilder cmd;
- {
- stdx::unique_lock<stdx::mutex> lock(_mtx);
- // the command could not be created, likely because the node was removed from the set
- if (!replCoord->prepareReplSetUpdatePositionCommand(&cmd)) {
- return Status::OK();
- }
- }
- BSONObj res;
+ }
+ BSONObj res;
+
+ LOG(2) << "Sending slave oplog progress to upstream updater: " << cmd.done();
+ try {
+ _connection->runCommand("admin", cmd.obj(), res);
+ } catch (const DBException& e) {
+ log() << "SyncSourceFeedback error sending update: " << e.what() << endl;
+ // blacklist sync target for .5 seconds and find a new one
+ replCoord->blacklistSyncSource(_syncTarget, Date_t::now() + Milliseconds(500));
+ BackgroundSync::get()->clearSyncTarget();
+ _resetConnection();
+ return e.toStatus();
+ }
- LOG(2) << "Sending slave oplog progress to upstream updater: " << cmd.done();
- try {
- _connection->runCommand("admin", cmd.obj(), res);
- }
- catch (const DBException& e) {
- log() << "SyncSourceFeedback error sending update: " << e.what() << endl;
- // blacklist sync target for .5 seconds and find a new one
+ Status status = Command::getStatusFromCommandResult(res);
+ if (!status.isOK()) {
+ log() << "SyncSourceFeedback error sending update, response: " << res.toString() << endl;
+ // blacklist sync target for .5 seconds and find a new one, unless we were rejected due
+ // to the syncsource having a newer config
+ if (status != ErrorCodes::InvalidReplicaSetConfig || res["cfgver"].eoo() ||
+ res["cfgver"].numberLong() < replCoord->getConfig().getConfigVersion()) {
replCoord->blacklistSyncSource(_syncTarget, Date_t::now() + Milliseconds(500));
BackgroundSync::get()->clearSyncTarget();
_resetConnection();
- return e.toStatus();
- }
-
- Status status = Command::getStatusFromCommandResult(res);
- if (!status.isOK()) {
- log() << "SyncSourceFeedback error sending update, response: " << res.toString() <<endl;
- // blacklist sync target for .5 seconds and find a new one, unless we were rejected due
- // to the syncsource having a newer config
- if (status != ErrorCodes::InvalidReplicaSetConfig || res["cfgver"].eoo() ||
- res["cfgver"].numberLong() < replCoord->getConfig().getConfigVersion()) {
- replCoord->blacklistSyncSource(_syncTarget, Date_t::now() + Milliseconds(500));
- BackgroundSync::get()->clearSyncTarget();
- _resetConnection();
- }
}
-
- return status;
}
- void SyncSourceFeedback::shutdown() {
- stdx::unique_lock<stdx::mutex> lock(_mtx);
- _shutdownSignaled = true;
- _cond.notify_all();
- }
+ return status;
+}
- void SyncSourceFeedback::run() {
- Client::initThread("SyncSourceFeedback");
+void SyncSourceFeedback::shutdown() {
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
+ _shutdownSignaled = true;
+ _cond.notify_all();
+}
- ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- while (true) { // breaks once _shutdownSignaled is true
- {
- stdx::unique_lock<stdx::mutex> lock(_mtx);
- while (!_positionChanged && !_shutdownSignaled) {
- _cond.wait(lock);
- }
+void SyncSourceFeedback::run() {
+ Client::initThread("SyncSourceFeedback");
- if (_shutdownSignaled) {
- break;
- }
+ ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
+ while (true) { // breaks once _shutdownSignaled is true
+ {
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
+ while (!_positionChanged && !_shutdownSignaled) {
+ _cond.wait(lock);
+ }
- _positionChanged = false;
+ if (_shutdownSignaled) {
+ break;
}
- auto txn = cc().makeOperationContext();
- MemberState state = replCoord->getMemberState();
- if (state.primary() || state.startup()) {
- _resetConnection();
+ _positionChanged = false;
+ }
+
+ auto txn = cc().makeOperationContext();
+ MemberState state = replCoord->getMemberState();
+ if (state.primary() || state.startup()) {
+ _resetConnection();
+ continue;
+ }
+ const HostAndPort target = BackgroundSync::get()->getSyncTarget();
+ if (_syncTarget != target) {
+ _resetConnection();
+ _syncTarget = target;
+ }
+ if (!hasConnection()) {
+ // fix connection if need be
+ if (target.empty()) {
+ sleepmillis(500);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
+ _positionChanged = true;
continue;
}
- const HostAndPort target = BackgroundSync::get()->getSyncTarget();
- if (_syncTarget != target) {
- _resetConnection();
- _syncTarget = target;
- }
- if (!hasConnection()) {
- // fix connection if need be
- if (target.empty()) {
- sleepmillis(500);
- stdx::unique_lock<stdx::mutex> lock(_mtx);
- _positionChanged = true;
- continue;
- }
- if (!_connect(txn.get(), target)) {
- sleepmillis(500);
- stdx::unique_lock<stdx::mutex> lock(_mtx);
- _positionChanged = true;
- continue;
- }
- }
- Status status = updateUpstream(txn.get());
- if (!status.isOK()) {
+ if (!_connect(txn.get(), target)) {
sleepmillis(500);
stdx::unique_lock<stdx::mutex> lock(_mtx);
_positionChanged = true;
+ continue;
}
}
+ Status status = updateUpstream(txn.get());
+ if (!status.isOK()) {
+ sleepmillis(500);
+ stdx::unique_lock<stdx::mutex> lock(_mtx);
+ _positionChanged = true;
+ }
}
-} // namespace repl
-} // namespace mongo
+}
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/sync_source_feedback.h b/src/mongo/db/repl/sync_source_feedback.h
index ebbd55b1dbf..8e08fcf25b0 100644
--- a/src/mongo/db/repl/sync_source_feedback.h
+++ b/src/mongo/db/repl/sync_source_feedback.h
@@ -36,59 +36,59 @@
#include "mongo/util/net/hostandport.h"
namespace mongo {
- class OperationContext;
+class OperationContext;
namespace repl {
- class SyncSourceFeedback {
- public:
- SyncSourceFeedback();
- ~SyncSourceFeedback();
+class SyncSourceFeedback {
+public:
+ SyncSourceFeedback();
+ ~SyncSourceFeedback();
- /// Notifies the SyncSourceFeedbackThread to wake up and send an update upstream of slave
- /// replication progress.
- void forwardSlaveProgress();
+ /// Notifies the SyncSourceFeedbackThread to wake up and send an update upstream of slave
+ /// replication progress.
+ void forwardSlaveProgress();
- /// Loops continuously until shutdown() is called, passing updates when they are present.
- void run();
+ /// Loops continuously until shutdown() is called, passing updates when they are present.
+ void run();
- /// Signals the run() method to terminate.
- void shutdown();
+ /// Signals the run() method to terminate.
+ void shutdown();
- private:
- void _resetConnection();
+private:
+ void _resetConnection();
- /**
- * Authenticates _connection using the server's cluster-membership credentials.
- *
- * Returns true on successful authentication.
- */
- bool replAuthenticate();
+ /**
+ * Authenticates _connection using the server's cluster-membership credentials.
+ *
+ * Returns true on successful authentication.
+ */
+ bool replAuthenticate();
- /* Inform the sync target of our current position in the oplog, as well as the positions
- * of all secondaries chained through us.
- */
- Status updateUpstream(OperationContext* txn);
+ /* Inform the sync target of our current position in the oplog, as well as the positions
+ * of all secondaries chained through us.
+ */
+ Status updateUpstream(OperationContext* txn);
- bool hasConnection() {
- return _connection.get();
- }
+ bool hasConnection() {
+ return _connection.get();
+ }
- /// Connect to sync target.
- bool _connect(OperationContext* txn, const HostAndPort& host);
+ /// Connect to sync target.
+ bool _connect(OperationContext* txn, const HostAndPort& host);
- // the member we are currently syncing from
- HostAndPort _syncTarget;
- // our connection to our sync target
- std::unique_ptr<DBClientConnection> _connection;
- // protects cond, _shutdownSignaled, and _positionChanged.
- stdx::mutex _mtx;
- // used to alert our thread of changes which need to be passed up the chain
- stdx::condition_variable _cond;
- // used to indicate a position change which has not yet been pushed along
- bool _positionChanged;
- // Once this is set to true the _run method will terminate
- bool _shutdownSignaled;
- };
-} // namespace repl
-} // namespace mongo
+ // the member we are currently syncing from
+ HostAndPort _syncTarget;
+ // our connection to our sync target
+ std::unique_ptr<DBClientConnection> _connection;
+ // protects cond, _shutdownSignaled, and _positionChanged.
+ stdx::mutex _mtx;
+ // used to alert our thread of changes which need to be passed up the chain
+ stdx::condition_variable _cond;
+ // used to indicate a position change which has not yet been pushed along
+ bool _positionChanged;
+ // Once this is set to true the _run method will terminate
+ bool _shutdownSignaled;
+};
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 2bfe05728fe..2a1e4463139 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -66,260 +66,247 @@
namespace mongo {
- using std::endl;
+using std::endl;
namespace repl {
#if defined(MONGO_PLATFORM_64)
- const int replWriterThreadCount = 16;
- const int replPrefetcherThreadCount = 16;
+const int replWriterThreadCount = 16;
+const int replPrefetcherThreadCount = 16;
#elif defined(MONGO_PLATFORM_32)
- const int replWriterThreadCount = 2;
- const int replPrefetcherThreadCount = 2;
+const int replWriterThreadCount = 2;
+const int replPrefetcherThreadCount = 2;
#else
#error need to include something that defines MONGO_PLATFORM_XX
#endif
- static Counter64 opsAppliedStats;
+static Counter64 opsAppliedStats;
- //The oplog entries applied
- static ServerStatusMetricField<Counter64> displayOpsApplied( "repl.apply.ops",
- &opsAppliedStats );
+// The oplog entries applied
+static ServerStatusMetricField<Counter64> displayOpsApplied("repl.apply.ops", &opsAppliedStats);
- MONGO_FP_DECLARE(rsSyncApplyStop);
+MONGO_FP_DECLARE(rsSyncApplyStop);
- // Number and time of each ApplyOps worker pool round
- static TimerStats applyBatchStats;
- static ServerStatusMetricField<TimerStats> displayOpBatchesApplied(
- "repl.apply.batches",
- &applyBatchStats );
- void initializePrefetchThread() {
- if (!ClientBasic::getCurrent()) {
- Client::initThreadIfNotAlready();
- AuthorizationSession::get(cc())->grantInternalAuthorization();
- }
+// Number and time of each ApplyOps worker pool round
+static TimerStats applyBatchStats;
+static ServerStatusMetricField<TimerStats> displayOpBatchesApplied("repl.apply.batches",
+ &applyBatchStats);
+void initializePrefetchThread() {
+ if (!ClientBasic::getCurrent()) {
+ Client::initThreadIfNotAlready();
+ AuthorizationSession::get(cc())->grantInternalAuthorization();
}
- namespace {
- bool isCrudOpType( const char* field ) {
- switch ( field[0] ) {
- case 'd':
- case 'i':
- case 'u':
- return field[1] == 0;
- }
- return false;
- }
+}
+namespace {
+bool isCrudOpType(const char* field) {
+ switch (field[0]) {
+ case 'd':
+ case 'i':
+ case 'u':
+ return field[1] == 0;
}
+ return false;
+}
+}
- SyncTail::SyncTail(BackgroundSyncInterface *q, MultiSyncApplyFunc func) :
- _networkQueue(q),
- _applyFunc(func),
- _writerPool(replWriterThreadCount, "repl writer worker "),
- _prefetcherPool(replPrefetcherThreadCount, "repl prefetch worker ")
- {}
-
- SyncTail::~SyncTail() {}
-
- bool SyncTail::peek(BSONObj* op) {
- return _networkQueue->peek(op);
- }
+SyncTail::SyncTail(BackgroundSyncInterface* q, MultiSyncApplyFunc func)
+ : _networkQueue(q),
+ _applyFunc(func),
+ _writerPool(replWriterThreadCount, "repl writer worker "),
+ _prefetcherPool(replPrefetcherThreadCount, "repl prefetch worker ") {}
- // static
- Status SyncTail::syncApply(OperationContext* txn,
- const BSONObj &op,
- bool convertUpdateToUpsert,
- ApplyOperationInLockFn applyOperationInLock,
- ApplyCommandInLockFn applyCommandInLock,
- IncrementOpsAppliedStatsFn incrementOpsAppliedStats) {
+SyncTail::~SyncTail() {}
- if (inShutdown()) {
- return Status::OK();
- }
+bool SyncTail::peek(BSONObj* op) {
+ return _networkQueue->peek(op);
+}
- // Count each log op application as a separate operation, for reporting purposes
- CurOp individualOp(txn);
+// static
+Status SyncTail::syncApply(OperationContext* txn,
+ const BSONObj& op,
+ bool convertUpdateToUpsert,
+ ApplyOperationInLockFn applyOperationInLock,
+ ApplyCommandInLockFn applyCommandInLock,
+ IncrementOpsAppliedStatsFn incrementOpsAppliedStats) {
+ if (inShutdown()) {
+ return Status::OK();
+ }
- const char *ns = op.getStringField("ns");
- verify(ns);
+ // Count each log op application as a separate operation, for reporting purposes
+ CurOp individualOp(txn);
- const char* opType = op["op"].valuestrsafe();
+ const char* ns = op.getStringField("ns");
+ verify(ns);
- bool isCommand(opType[0] == 'c');
- bool isNoOp(opType[0] == 'n');
+ const char* opType = op["op"].valuestrsafe();
- if ( (*ns == '\0') || (*ns == '.') ) {
- // this is ugly
- // this is often a no-op
- // but can't be 100% sure
- if (!isNoOp) {
- error() << "skipping bad op in oplog: " << op.toString();
- }
- return Status::OK();
- }
+ bool isCommand(opType[0] == 'c');
+ bool isNoOp(opType[0] == 'n');
- if (isCommand) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- // a command may need a global write lock. so we will conservatively go
- // ahead and grab one here. suboptimal. :-(
- Lock::GlobalWrite globalWriteLock(txn->lockState());
-
- // special case apply for commands to avoid implicit database creation
- Status status = applyCommandInLock(txn, op);
- incrementOpsAppliedStats();
- return status;
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "syncApply_command", ns);
+ if ((*ns == '\0') || (*ns == '.')) {
+ // this is ugly
+ // this is often a no-op
+ // but can't be 100% sure
+ if (!isNoOp) {
+ error() << "skipping bad op in oplog: " << op.toString();
}
+ return Status::OK();
+ }
- auto applyOp = [&](Database* db) {
- // For non-initial-sync, we convert updates to upserts
- // to suppress errors when replaying oplog entries.
- txn->setReplicatedWrites(false);
- DisableDocumentValidation validationDisabler(txn);
+ if (isCommand) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ // a command may need a global write lock. so we will conservatively go
+ // ahead and grab one here. suboptimal. :-(
+ Lock::GlobalWrite globalWriteLock(txn->lockState());
- Status status = applyOperationInLock(txn, db, op, convertUpdateToUpsert);
+ // special case apply for commands to avoid implicit database creation
+ Status status = applyCommandInLock(txn, op);
incrementOpsAppliedStats();
return status;
- };
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "syncApply_command", ns);
+ }
+
+ auto applyOp = [&](Database* db) {
+ // For non-initial-sync, we convert updates to upserts
+ // to suppress errors when replaying oplog entries.
+ txn->setReplicatedWrites(false);
+ DisableDocumentValidation validationDisabler(txn);
+
+ Status status = applyOperationInLock(txn, db, op, convertUpdateToUpsert);
+ incrementOpsAppliedStats();
+ return status;
+ };
- if (isNoOp ||
- (opType[0] == 'i' && nsToCollectionSubstring( ns ) == "system.indexes")) {
- auto opStr = isNoOp ? "syncApply_noop" : "syncApply_indexBuild";
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- Lock::DBLock dbLock(txn->lockState(), nsToDatabaseSubstring(ns), MODE_X);
- OldClientContext ctx(txn, ns);
- return applyOp(ctx.db());
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, opStr, ns);
+ if (isNoOp || (opType[0] == 'i' && nsToCollectionSubstring(ns) == "system.indexes")) {
+ auto opStr = isNoOp ? "syncApply_noop" : "syncApply_indexBuild";
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ Lock::DBLock dbLock(txn->lockState(), nsToDatabaseSubstring(ns), MODE_X);
+ OldClientContext ctx(txn, ns);
+ return applyOp(ctx.db());
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, opStr, ns);
+ }
- if (isCrudOpType(opType)) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- // DB lock always acquires the global lock
- std::unique_ptr<Lock::DBLock> dbLock;
- std::unique_ptr<Lock::CollectionLock> collectionLock;
- std::unique_ptr<OldClientContext> ctx;
+ if (isCrudOpType(opType)) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ // DB lock always acquires the global lock
+ std::unique_ptr<Lock::DBLock> dbLock;
+ std::unique_ptr<Lock::CollectionLock> collectionLock;
+ std::unique_ptr<OldClientContext> ctx;
- auto dbName = nsToDatabaseSubstring(ns);
+ auto dbName = nsToDatabaseSubstring(ns);
- auto resetLocks = [&](LockMode mode) {
- collectionLock.reset();
- dbLock.reset(new Lock::DBLock(txn->lockState(), dbName, mode));
- collectionLock.reset(new Lock::CollectionLock(txn->lockState(), ns, mode));
- };
+ auto resetLocks = [&](LockMode mode) {
+ collectionLock.reset();
+ dbLock.reset(new Lock::DBLock(txn->lockState(), dbName, mode));
+ collectionLock.reset(new Lock::CollectionLock(txn->lockState(), ns, mode));
+ };
- resetLocks(MODE_IX);
- if (!dbHolder().get(txn, dbName)) {
- // need to create database, try again
+ resetLocks(MODE_IX);
+ if (!dbHolder().get(txn, dbName)) {
+ // need to create database, try again
+ resetLocks(MODE_X);
+ ctx.reset(new OldClientContext(txn, ns));
+ } else {
+ ctx.reset(new OldClientContext(txn, ns));
+ if (!ctx->db()->getCollection(ns)) {
+ // uh, oh, we need to create collection
+ // try again
+ ctx.reset();
resetLocks(MODE_X);
ctx.reset(new OldClientContext(txn, ns));
}
- else {
- ctx.reset(new OldClientContext(txn, ns));
- if (!ctx->db()->getCollection(ns)) {
- // uh, oh, we need to create collection
- // try again
- ctx.reset();
- resetLocks(MODE_X);
- ctx.reset(new OldClientContext(txn, ns));
- }
- }
+ }
- return applyOp(ctx->db());
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "syncApply_CRUD", ns);
+ return applyOp(ctx->db());
}
-
- // unknown opType
- str::stream ss;
- ss << "bad opType '" << opType << "' in oplog entry: " << op.toString();
- error() << std::string(ss);
- return Status(ErrorCodes::BadValue, ss);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "syncApply_CRUD", ns);
}
- Status SyncTail::syncApply(OperationContext* txn,
- const BSONObj &op,
- bool convertUpdateToUpsert) {
- return syncApply(txn,
- op,
- convertUpdateToUpsert,
- applyOperation_inlock,
- applyCommand_inlock,
- stdx::bind(&Counter64::increment, &opsAppliedStats, 1ULL));
- }
+ // unknown opType
+ str::stream ss;
+ ss << "bad opType '" << opType << "' in oplog entry: " << op.toString();
+ error() << std::string(ss);
+ return Status(ErrorCodes::BadValue, ss);
+}
+
+Status SyncTail::syncApply(OperationContext* txn, const BSONObj& op, bool convertUpdateToUpsert) {
+ return syncApply(txn,
+ op,
+ convertUpdateToUpsert,
+ applyOperation_inlock,
+ applyCommand_inlock,
+ stdx::bind(&Counter64::increment, &opsAppliedStats, 1ULL));
+}
namespace {
- // The pool threads call this to prefetch each op
- void prefetchOp(const BSONObj& op) {
- initializePrefetchThread();
-
- const char *ns = op.getStringField("ns");
- if (ns && (ns[0] != '\0')) {
- try {
- // one possible tweak here would be to stay in the read lock for this database
- // for multiple prefetches if they are for the same database.
- OperationContextImpl txn;
- AutoGetCollectionForRead ctx(&txn, ns);
- Database* db = ctx.getDb();
- if (db) {
- prefetchPagesForReplicatedOp(&txn, db, op);
- }
- }
- catch (const DBException& e) {
- LOG(2) << "ignoring exception in prefetchOp(): " << e.what() << endl;
- }
- catch (const std::exception& e) {
- log() << "Unhandled std::exception in prefetchOp(): " << e.what() << endl;
- fassertFailed(16397);
+// The pool threads call this to prefetch each op
+void prefetchOp(const BSONObj& op) {
+ initializePrefetchThread();
+
+ const char* ns = op.getStringField("ns");
+ if (ns && (ns[0] != '\0')) {
+ try {
+ // one possible tweak here would be to stay in the read lock for this database
+ // for multiple prefetches if they are for the same database.
+ OperationContextImpl txn;
+ AutoGetCollectionForRead ctx(&txn, ns);
+ Database* db = ctx.getDb();
+ if (db) {
+ prefetchPagesForReplicatedOp(&txn, db, op);
}
+ } catch (const DBException& e) {
+ LOG(2) << "ignoring exception in prefetchOp(): " << e.what() << endl;
+ } catch (const std::exception& e) {
+ log() << "Unhandled std::exception in prefetchOp(): " << e.what() << endl;
+ fassertFailed(16397);
}
}
+}
- // Doles out all the work to the reader pool threads and waits for them to complete
- void prefetchOps(const std::deque<BSONObj>& ops,
- OldThreadPool* prefetcherPool) {
- invariant(prefetcherPool);
- for (std::deque<BSONObj>::const_iterator it = ops.begin();
- it != ops.end();
- ++it) {
- prefetcherPool->schedule(&prefetchOp, *it);
- }
- prefetcherPool->join();
+// Doles out all the work to the reader pool threads and waits for them to complete
+void prefetchOps(const std::deque<BSONObj>& ops, OldThreadPool* prefetcherPool) {
+ invariant(prefetcherPool);
+ for (std::deque<BSONObj>::const_iterator it = ops.begin(); it != ops.end(); ++it) {
+ prefetcherPool->schedule(&prefetchOp, *it);
}
+ prefetcherPool->join();
+}
- // Doles out all the work to the writer pool threads and waits for them to complete
- void applyOps(const std::vector< std::vector<BSONObj> >& writerVectors,
- OldThreadPool* writerPool,
- SyncTail::MultiSyncApplyFunc func,
- SyncTail* sync) {
- TimerHolder timer(&applyBatchStats);
- for (std::vector< std::vector<BSONObj> >::const_iterator it = writerVectors.begin();
- it != writerVectors.end();
- ++it) {
- if (!it->empty()) {
- writerPool->schedule(func, boost::cref(*it), sync);
- }
+// Doles out all the work to the writer pool threads and waits for them to complete
+void applyOps(const std::vector<std::vector<BSONObj>>& writerVectors,
+ OldThreadPool* writerPool,
+ SyncTail::MultiSyncApplyFunc func,
+ SyncTail* sync) {
+ TimerHolder timer(&applyBatchStats);
+ for (std::vector<std::vector<BSONObj>>::const_iterator it = writerVectors.begin();
+ it != writerVectors.end();
+ ++it) {
+ if (!it->empty()) {
+ writerPool->schedule(func, boost::cref(*it), sync);
}
- writerPool->join();
}
+ writerPool->join();
+}
- void fillWriterVectors(const std::deque<BSONObj>& ops,
- std::vector< std::vector<BSONObj> >* writerVectors) {
-
- for (std::deque<BSONObj>::const_iterator it = ops.begin();
- it != ops.end();
- ++it) {
- const BSONElement e = it->getField("ns");
- verify(e.type() == String);
- const char* ns = e.valuestr();
- int len = e.valuestrsize();
- uint32_t hash = 0;
- MurmurHash3_x86_32( ns, len, 0, &hash);
-
- const char* opType = it->getField( "op" ).valuestrsafe();
-
- if (getGlobalServiceContext()->getGlobalStorageEngine()->supportsDocLocking() &&
- isCrudOpType(opType)) {
- BSONElement id;
- switch (opType[0]) {
+void fillWriterVectors(const std::deque<BSONObj>& ops,
+ std::vector<std::vector<BSONObj>>* writerVectors) {
+ for (std::deque<BSONObj>::const_iterator it = ops.begin(); it != ops.end(); ++it) {
+ const BSONElement e = it->getField("ns");
+ verify(e.type() == String);
+ const char* ns = e.valuestr();
+ int len = e.valuestrsize();
+ uint32_t hash = 0;
+ MurmurHash3_x86_32(ns, len, 0, &hash);
+
+ const char* opType = it->getField("op").valuestrsafe();
+
+ if (getGlobalServiceContext()->getGlobalStorageEngine()->supportsDocLocking() &&
+ isCrudOpType(opType)) {
+ BSONElement id;
+ switch (opType[0]) {
case 'u':
id = it->getField("o2").Obj()["_id"];
break;
@@ -327,571 +314,554 @@ namespace {
case 'i':
id = it->getField("o").Obj()["_id"];
break;
- }
-
- const size_t idHash = BSONElement::Hasher()( id );
- MurmurHash3_x86_32(&idHash, sizeof(idHash), hash, &hash);
}
- (*writerVectors)[hash % writerVectors->size()].push_back(*it);
- }
- }
-
-} // namespace
-
- // Doles out all the work to the writer pool threads and waits for them to complete
- // static
- OpTime SyncTail::multiApply(OperationContext* txn,
- const OpQueue& ops,
- OldThreadPool* prefetcherPool,
- OldThreadPool* writerPool,
- MultiSyncApplyFunc func,
- SyncTail* sync,
- bool supportsWaitingUntilDurable) {
- invariant(prefetcherPool);
- invariant(writerPool);
- invariant(func);
- invariant(sync);
-
- if (getGlobalServiceContext()->getGlobalStorageEngine()->isMmapV1()) {
- // Use a ThreadPool to prefetch all the operations in a batch.
- prefetchOps(ops.getDeque(), prefetcherPool);
+ const size_t idHash = BSONElement::Hasher()(id);
+ MurmurHash3_x86_32(&idHash, sizeof(idHash), hash, &hash);
}
-
- std::vector< std::vector<BSONObj> > writerVectors(replWriterThreadCount);
-
- fillWriterVectors(ops.getDeque(), &writerVectors);
- LOG(2) << "replication batch size is " << ops.getDeque().size() << endl;
- // We must grab this because we're going to grab write locks later.
- // We hold this mutex the entire time we're writing; it doesn't matter
- // because all readers are blocked anyway.
- stdx::lock_guard<SimpleMutex> fsynclk(filesLockedFsync);
- // stop all readers until we're done
- Lock::ParallelBatchWriterMode pbwm(txn->lockState());
-
- ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- if (replCoord->getMemberState().primary() &&
- !replCoord->isWaitingForApplierToDrain()) {
+ (*writerVectors)[hash % writerVectors->size()].push_back(*it);
+ }
+}
- severe() << "attempting to replicate ops while primary";
- fassertFailed(28527);
- }
+} // namespace
- applyOps(writerVectors, writerPool, func, sync);
+// Doles out all the work to the writer pool threads and waits for them to complete
+// static
+OpTime SyncTail::multiApply(OperationContext* txn,
+ const OpQueue& ops,
+ OldThreadPool* prefetcherPool,
+ OldThreadPool* writerPool,
+ MultiSyncApplyFunc func,
+ SyncTail* sync,
+ bool supportsWaitingUntilDurable) {
+ invariant(prefetcherPool);
+ invariant(writerPool);
+ invariant(func);
+ invariant(sync);
+
+ if (getGlobalServiceContext()->getGlobalStorageEngine()->isMmapV1()) {
+ // Use a ThreadPool to prefetch all the operations in a batch.
+ prefetchOps(ops.getDeque(), prefetcherPool);
+ }
- if (inShutdown()) {
- return OpTime();
- }
+ std::vector<std::vector<BSONObj>> writerVectors(replWriterThreadCount);
- const bool mustWaitUntilDurable = replCoord->isV1ElectionProtocol() &&
- supportsWaitingUntilDurable;
- if (mustWaitUntilDurable) {
- txn->recoveryUnit()->goingToWaitUntilDurable();
- }
+ fillWriterVectors(ops.getDeque(), &writerVectors);
+ LOG(2) << "replication batch size is " << ops.getDeque().size() << endl;
+ // We must grab this because we're going to grab write locks later.
+ // We hold this mutex the entire time we're writing; it doesn't matter
+ // because all readers are blocked anyway.
+ stdx::lock_guard<SimpleMutex> fsynclk(filesLockedFsync);
- OpTime lastOpTime = writeOpsToOplog(txn, ops.getDeque());
+ // stop all readers until we're done
+ Lock::ParallelBatchWriterMode pbwm(txn->lockState());
- if (mustWaitUntilDurable) {
- txn->recoveryUnit()->waitUntilDurable();
- }
- ReplClientInfo::forClient(txn->getClient()).setLastOp(lastOpTime);
- replCoord->setMyLastOptime(lastOpTime);
- setNewTimestamp(lastOpTime.getTimestamp());
+ ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
+ if (replCoord->getMemberState().primary() && !replCoord->isWaitingForApplierToDrain()) {
+ severe() << "attempting to replicate ops while primary";
+ fassertFailed(28527);
+ }
- BackgroundSync::get()->notify(txn);
+ applyOps(writerVectors, writerPool, func, sync);
- return lastOpTime;
+ if (inShutdown()) {
+ return OpTime();
}
- void SyncTail::oplogApplication(OperationContext* txn, const OpTime& endOpTime) {
- _applyOplogUntil(txn, endOpTime);
+ const bool mustWaitUntilDurable =
+ replCoord->isV1ElectionProtocol() && supportsWaitingUntilDurable;
+ if (mustWaitUntilDurable) {
+ txn->recoveryUnit()->goingToWaitUntilDurable();
}
- /* applies oplog from "now" until endOpTime using the applier threads for initial sync*/
- void SyncTail::_applyOplogUntil(OperationContext* txn, const OpTime& endOpTime) {
- unsigned long long bytesApplied = 0;
- unsigned long long entriesApplied = 0;
- while (true) {
- OpQueue ops;
-
- while (!tryPopAndWaitForMore(txn, &ops, getGlobalReplicationCoordinator())) {
- // nothing came back last time, so go again
- if (ops.empty()) continue;
+ OpTime lastOpTime = writeOpsToOplog(txn, ops.getDeque());
- // Check if we reached the end
- const BSONObj currentOp = ops.back();
- const OpTime currentOpTime = extractOpTime(currentOp);
+ if (mustWaitUntilDurable) {
+ txn->recoveryUnit()->waitUntilDurable();
+ }
+ ReplClientInfo::forClient(txn->getClient()).setLastOp(lastOpTime);
+ replCoord->setMyLastOptime(lastOpTime);
+ setNewTimestamp(lastOpTime.getTimestamp());
- // When we reach the end return this batch
- if (currentOpTime == endOpTime) {
- break;
- }
- else if (currentOpTime > endOpTime) {
- severe() << "Applied past expected end " << endOpTime << " to " << currentOpTime
- << " without seeing it. Rollback?";
- fassertFailedNoTrace(18693);
- }
+ BackgroundSync::get()->notify(txn);
- // apply replication batch limits
- if (ops.getSize() > replBatchLimitBytes)
- break;
- if (ops.getDeque().size() > replBatchLimitOperations)
- break;
- };
+ return lastOpTime;
+}
- if (ops.empty()) {
- severe() << "got no ops for batch...";
- fassertFailedNoTrace(18692);
- }
+void SyncTail::oplogApplication(OperationContext* txn, const OpTime& endOpTime) {
+ _applyOplogUntil(txn, endOpTime);
+}
- const BSONObj lastOp = ops.back().getOwned();
+/* applies oplog from "now" until endOpTime using the applier threads for initial sync*/
+void SyncTail::_applyOplogUntil(OperationContext* txn, const OpTime& endOpTime) {
+ unsigned long long bytesApplied = 0;
+ unsigned long long entriesApplied = 0;
+ while (true) {
+ OpQueue ops;
- // Tally operation information
- bytesApplied += ops.getSize();
- entriesApplied += ops.getDeque().size();
+ while (!tryPopAndWaitForMore(txn, &ops, getGlobalReplicationCoordinator())) {
+ // nothing came back last time, so go again
+ if (ops.empty())
+ continue;
- const OpTime lastOpTime = multiApply(txn,
- ops,
- &_prefetcherPool,
- &_writerPool,
- _applyFunc,
- this,
- supportsWaitingUntilDurable());
- if (inShutdown()) {
- return;
+ // Check if we reached the end
+ const BSONObj currentOp = ops.back();
+ const OpTime currentOpTime = extractOpTime(currentOp);
+
+ // When we reach the end return this batch
+ if (currentOpTime == endOpTime) {
+ break;
+ } else if (currentOpTime > endOpTime) {
+ severe() << "Applied past expected end " << endOpTime << " to " << currentOpTime
+ << " without seeing it. Rollback?";
+ fassertFailedNoTrace(18693);
}
- // if the last op applied was our end, return
- if (lastOpTime == endOpTime) {
- LOG(1) << "SyncTail applied " << entriesApplied
- << " entries (" << bytesApplied << " bytes)"
- << " and finished at opTime " << endOpTime;
- return;
- }
- } // end of while (true)
- }
+ // apply replication batch limits
+ if (ops.getSize() > replBatchLimitBytes)
+ break;
+ if (ops.getDeque().size() > replBatchLimitOperations)
+ break;
+ };
-namespace {
- void tryToGoLiveAsASecondary(OperationContext* txn, ReplicationCoordinator* replCoord) {
- if (replCoord->isInPrimaryOrSecondaryState()) {
- return;
+ if (ops.empty()) {
+ severe() << "got no ops for batch...";
+ fassertFailedNoTrace(18692);
}
- ScopedTransaction transaction(txn, MODE_S);
- Lock::GlobalRead readLock(txn->lockState());
+ const BSONObj lastOp = ops.back().getOwned();
- if (replCoord->getMaintenanceMode()) {
- // we're not actually going live
- return;
- }
+ // Tally operation information
+ bytesApplied += ops.getSize();
+ entriesApplied += ops.getDeque().size();
- // Only state RECOVERING can transition to SECONDARY.
- MemberState state(replCoord->getMemberState());
- if (!state.recovering()) {
+ const OpTime lastOpTime = multiApply(txn,
+ ops,
+ &_prefetcherPool,
+ &_writerPool,
+ _applyFunc,
+ this,
+ supportsWaitingUntilDurable());
+ if (inShutdown()) {
return;
}
- OpTime minvalid = getMinValid(txn);
- if (minvalid > replCoord->getMyLastOptime()) {
+ // if the last op applied was our end, return
+ if (lastOpTime == endOpTime) {
+ LOG(1) << "SyncTail applied " << entriesApplied << " entries (" << bytesApplied
+ << " bytes)"
+ << " and finished at opTime " << endOpTime;
return;
}
+ } // end of while (true)
+}
- bool worked = replCoord->setFollowerMode(MemberState::RS_SECONDARY);
- if (!worked) {
- warning() << "Failed to transition into " << MemberState(MemberState::RS_SECONDARY)
- << ". Current state: " << replCoord->getMemberState();
- }
+namespace {
+void tryToGoLiveAsASecondary(OperationContext* txn, ReplicationCoordinator* replCoord) {
+ if (replCoord->isInPrimaryOrSecondaryState()) {
+ return;
}
-}
- /* tail an oplog. ok to return, will be re-called. */
- void SyncTail::oplogApplication() {
- ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
+ ScopedTransaction transaction(txn, MODE_S);
+ Lock::GlobalRead readLock(txn->lockState());
- while(!inShutdown()) {
- OpQueue ops;
- OperationContextImpl txn;
+ if (replCoord->getMaintenanceMode()) {
+ // we're not actually going live
+ return;
+ }
- Timer batchTimer;
- int lastTimeChecked = 0;
+ // Only state RECOVERING can transition to SECONDARY.
+ MemberState state(replCoord->getMemberState());
+ if (!state.recovering()) {
+ return;
+ }
- do {
- int now = batchTimer.seconds();
+ OpTime minvalid = getMinValid(txn);
+ if (minvalid > replCoord->getMyLastOptime()) {
+ return;
+ }
- // apply replication batch limits
- if (!ops.empty()) {
- if (now > replBatchLimitSeconds)
- break;
- if (ops.getDeque().size() > replBatchLimitOperations)
- break;
- }
- // occasionally check some things
- // (always checked in the first iteration of this do-while loop, because
- // ops is empty)
- if (ops.empty() || now > lastTimeChecked) {
- BackgroundSync* bgsync = BackgroundSync::get();
- if (bgsync->getInitialSyncRequestedFlag()) {
- // got a resync command
- return;
- }
- lastTimeChecked = now;
- // can we become secondary?
- // we have to check this before calling mgr, as we must be a secondary to
- // become primary
- tryToGoLiveAsASecondary(&txn, replCoord);
- }
+ bool worked = replCoord->setFollowerMode(MemberState::RS_SECONDARY);
+ if (!worked) {
+ warning() << "Failed to transition into " << MemberState(MemberState::RS_SECONDARY)
+ << ". Current state: " << replCoord->getMemberState();
+ }
+}
+}
- const int slaveDelaySecs = replCoord->getSlaveDelaySecs().count();
- if (!ops.empty() && slaveDelaySecs > 0) {
- const BSONObj lastOp = ops.back();
- const unsigned int opTimestampSecs = lastOp["ts"].timestamp().getSecs();
+/* tail an oplog. ok to return, will be re-called. */
+void SyncTail::oplogApplication() {
+ ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- // Stop the batch as the lastOp is too new to be applied. If we continue
- // on, we can get ops that are way ahead of the delay and this will
- // make this thread sleep longer when handleSlaveDelay is called
- // and apply ops much sooner than we like.
- if (opTimestampSecs > static_cast<unsigned int>(time(0) - slaveDelaySecs)) {
- break;
- }
- }
- // keep fetching more ops as long as we haven't filled up a full batch yet
- } while (!tryPopAndWaitForMore(&txn, &ops, replCoord) && // tryPopAndWaitForMore returns
- // true when we need to end a
- // batch early
- (ops.getSize() < replBatchLimitBytes) &&
- !inShutdown());
-
- // For pausing replication in tests
- while (MONGO_FAIL_POINT(rsSyncApplyStop)) {
- sleepmillis(0);
- }
+ while (!inShutdown()) {
+ OpQueue ops;
+ OperationContextImpl txn;
- if (ops.empty()) {
- continue;
- }
+ Timer batchTimer;
+ int lastTimeChecked = 0;
- const BSONObj lastOp = ops.back();
- handleSlaveDelay(lastOp);
-
- // Set minValid to the last op to be applied in this next batch.
- // This will cause this node to go into RECOVERING state
- // if we should crash and restart before updating the oplog
- setMinValid(&txn, extractOpTime(lastOp));
- multiApply(&txn,
- ops,
- &_prefetcherPool,
- &_writerPool,
- _applyFunc,
- this,
- supportsWaitingUntilDurable());
- }
- }
+ do {
+ int now = batchTimer.seconds();
- // Copies ops out of the bgsync queue into the deque passed in as a parameter.
- // Returns true if the batch should be ended early.
- // Batch should end early if we encounter a command, or if
- // there are no further ops in the bgsync queue to read.
- // This function also blocks 1 second waiting for new ops to appear in the bgsync
- // queue. We can't block forever because there are maintenance things we need
- // to periodically check in the loop.
- bool SyncTail::tryPopAndWaitForMore(OperationContext* txn,
- SyncTail::OpQueue* ops,
- ReplicationCoordinator* replCoord) {
- BSONObj op;
- // Check to see if there are ops waiting in the bgsync queue
- bool peek_success = peek(&op);
-
- if (!peek_success) {
- // if we don't have anything in the queue, wait a bit for something to appear
- if (ops->empty()) {
- if (replCoord->isWaitingForApplierToDrain()) {
- BackgroundSync::get()->waitUntilPaused();
- if (peek(&op)) {
- // The producer generated a last batch of ops before pausing so return
- // false so that we'll come back and apply them before signaling the drain
- // is complete.
- return false;
- }
- replCoord->signalDrainComplete(txn);
+ // apply replication batch limits
+ if (!ops.empty()) {
+ if (now > replBatchLimitSeconds)
+ break;
+ if (ops.getDeque().size() > replBatchLimitOperations)
+ break;
+ }
+ // occasionally check some things
+ // (always checked in the first iteration of this do-while loop, because
+ // ops is empty)
+ if (ops.empty() || now > lastTimeChecked) {
+ BackgroundSync* bgsync = BackgroundSync::get();
+ if (bgsync->getInitialSyncRequestedFlag()) {
+ // got a resync command
+ return;
}
- // block up to 1 second
- _networkQueue->waitForMore();
- return false;
+ lastTimeChecked = now;
+ // can we become secondary?
+ // we have to check this before calling mgr, as we must be a secondary to
+ // become primary
+ tryToGoLiveAsASecondary(&txn, replCoord);
}
- // otherwise, apply what we have
- return true;
- }
-
- const char* ns = op["ns"].valuestrsafe();
-
- // check for commands
- if ((op["op"].valuestrsafe()[0] == 'c') ||
- // Index builds are acheived through the use of an insert op, not a command op.
- // The following line is the same as what the insert code uses to detect an index build.
- ( *ns != '\0' && nsToCollectionSubstring(ns) == "system.indexes" )) {
+ const int slaveDelaySecs = replCoord->getSlaveDelaySecs().count();
+ if (!ops.empty() && slaveDelaySecs > 0) {
+ const BSONObj lastOp = ops.back();
+ const unsigned int opTimestampSecs = lastOp["ts"].timestamp().getSecs();
- if (ops->empty()) {
- // apply commands one-at-a-time
- ops->push_back(op);
- _networkQueue->consume();
+ // Stop the batch as the lastOp is too new to be applied. If we continue
+ // on, we can get ops that are way ahead of the delay and this will
+ // make this thread sleep longer when handleSlaveDelay is called
+ // and apply ops much sooner than we like.
+ if (opTimestampSecs > static_cast<unsigned int>(time(0) - slaveDelaySecs)) {
+ break;
+ }
}
-
- // otherwise, apply what we have so far and come back for the command
- return true;
+ // keep fetching more ops as long as we haven't filled up a full batch yet
+ } while (!tryPopAndWaitForMore(&txn, &ops, replCoord) && // tryPopAndWaitForMore returns
+ // true when we need to end a
+ // batch early
+ (ops.getSize() < replBatchLimitBytes) &&
+ !inShutdown());
+
+ // For pausing replication in tests
+ while (MONGO_FAIL_POINT(rsSyncApplyStop)) {
+ sleepmillis(0);
}
- // check for oplog version change
- BSONElement elemVersion = op["v"];
- int curVersion = 0;
- if (elemVersion.eoo())
- // missing version means version 1
- curVersion = 1;
- else
- curVersion = elemVersion.Int();
-
- if (curVersion != OPLOG_VERSION) {
- severe() << "expected oplog version " << OPLOG_VERSION << " but found version "
- << curVersion << " in oplog entry: " << op;
- fassertFailedNoTrace(18820);
+ if (ops.empty()) {
+ continue;
}
-
- // Copy the op to the deque and remove it from the bgsync queue.
- ops->push_back(op);
- _networkQueue->consume();
- // Go back for more ops
- return false;
+ const BSONObj lastOp = ops.back();
+ handleSlaveDelay(lastOp);
+
+ // Set minValid to the last op to be applied in this next batch.
+ // This will cause this node to go into RECOVERING state
+ // if we should crash and restart before updating the oplog
+ setMinValid(&txn, extractOpTime(lastOp));
+ multiApply(&txn,
+ ops,
+ &_prefetcherPool,
+ &_writerPool,
+ _applyFunc,
+ this,
+ supportsWaitingUntilDurable());
}
+}
- void SyncTail::handleSlaveDelay(const BSONObj& lastOp) {
- ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
- int slaveDelaySecs = replCoord->getSlaveDelaySecs().count();
-
- // ignore slaveDelay if the box is still initializing. once
- // it becomes secondary we can worry about it.
- if( slaveDelaySecs > 0 && replCoord->getMemberState().secondary() ) {
- const Timestamp ts = lastOp["ts"].timestamp();
- long long a = ts.getSecs();
- long long b = time(0);
- long long lag = b - a;
- long long sleeptime = slaveDelaySecs - lag;
- if( sleeptime > 0 ) {
- uassert(12000, "rs slaveDelay differential too big check clocks and systems",
- sleeptime < 0x40000000);
- if( sleeptime < 60 ) {
- sleepsecs((int) sleeptime);
- }
- else {
- warning() << "slavedelay causing a long sleep of " << sleeptime
- << " seconds";
- // sleep(hours) would prevent reconfigs from taking effect & such!
- long long waitUntil = b + sleeptime;
- while(time(0) < waitUntil) {
- sleepsecs(6);
-
- // Handle reconfigs that changed the slave delay
- if (replCoord->getSlaveDelaySecs().count() != slaveDelaySecs)
- break;
- }
+// Copies ops out of the bgsync queue into the deque passed in as a parameter.
+// Returns true if the batch should be ended early.
+// Batch should end early if we encounter a command, or if
+// there are no further ops in the bgsync queue to read.
+// This function also blocks 1 second waiting for new ops to appear in the bgsync
+// queue. We can't block forever because there are maintenance things we need
+// to periodically check in the loop.
+bool SyncTail::tryPopAndWaitForMore(OperationContext* txn,
+ SyncTail::OpQueue* ops,
+ ReplicationCoordinator* replCoord) {
+ BSONObj op;
+ // Check to see if there are ops waiting in the bgsync queue
+ bool peek_success = peek(&op);
+
+ if (!peek_success) {
+ // if we don't have anything in the queue, wait a bit for something to appear
+ if (ops->empty()) {
+ if (replCoord->isWaitingForApplierToDrain()) {
+ BackgroundSync::get()->waitUntilPaused();
+ if (peek(&op)) {
+ // The producer generated a last batch of ops before pausing so return
+ // false so that we'll come back and apply them before signaling the drain
+ // is complete.
+ return false;
}
+ replCoord->signalDrainComplete(txn);
}
- } // endif slaveDelay
+ // block up to 1 second
+ _networkQueue->waitForMore();
+ return false;
+ }
+
+ // otherwise, apply what we have
+ return true;
}
- void SyncTail::setHostname(const std::string& hostname) {
- _hostname = hostname;
+ const char* ns = op["ns"].valuestrsafe();
+
+ // check for commands
+ if ((op["op"].valuestrsafe()[0] == 'c') ||
+ // Index builds are acheived through the use of an insert op, not a command op.
+ // The following line is the same as what the insert code uses to detect an index build.
+ (*ns != '\0' && nsToCollectionSubstring(ns) == "system.indexes")) {
+ if (ops->empty()) {
+ // apply commands one-at-a-time
+ ops->push_back(op);
+ _networkQueue->consume();
+ }
+
+ // otherwise, apply what we have so far and come back for the command
+ return true;
}
- BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
- OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query?
- const char *ns = o.getStringField("ns");
+ // check for oplog version change
+ BSONElement elemVersion = op["v"];
+ int curVersion = 0;
+ if (elemVersion.eoo())
+ // missing version means version 1
+ curVersion = 1;
+ else
+ curVersion = elemVersion.Int();
+
+ if (curVersion != OPLOG_VERSION) {
+ severe() << "expected oplog version " << OPLOG_VERSION << " but found version "
+ << curVersion << " in oplog entry: " << op;
+ fassertFailedNoTrace(18820);
+ }
- // capped collections
- Collection* collection = db->getCollection(ns);
- if ( collection && collection->isCapped() ) {
- log() << "missing doc, but this is okay for a capped collection (" << ns << ")";
- return BSONObj();
- }
+ // Copy the op to the deque and remove it from the bgsync queue.
+ ops->push_back(op);
+ _networkQueue->consume();
- const int retryMax = 3;
- for (int retryCount = 1; retryCount <= retryMax; ++retryCount) {
- if (retryCount != 1) {
- // if we are retrying, sleep a bit to let the network possibly recover
- sleepsecs(retryCount * retryCount);
- }
- try {
- bool ok = missingObjReader.connect(HostAndPort(_hostname));
- if (!ok) {
- warning() << "network problem detected while connecting to the "
- << "sync source, attempt " << retryCount << " of "
- << retryMax << endl;
- continue; // try again
+ // Go back for more ops
+ return false;
+}
+
+void SyncTail::handleSlaveDelay(const BSONObj& lastOp) {
+ ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
+ int slaveDelaySecs = replCoord->getSlaveDelaySecs().count();
+
+ // ignore slaveDelay if the box is still initializing. once
+ // it becomes secondary we can worry about it.
+ if (slaveDelaySecs > 0 && replCoord->getMemberState().secondary()) {
+ const Timestamp ts = lastOp["ts"].timestamp();
+ long long a = ts.getSecs();
+ long long b = time(0);
+ long long lag = b - a;
+ long long sleeptime = slaveDelaySecs - lag;
+ if (sleeptime > 0) {
+ uassert(12000,
+ "rs slaveDelay differential too big check clocks and systems",
+ sleeptime < 0x40000000);
+ if (sleeptime < 60) {
+ sleepsecs((int)sleeptime);
+ } else {
+ warning() << "slavedelay causing a long sleep of " << sleeptime << " seconds";
+ // sleep(hours) would prevent reconfigs from taking effect & such!
+ long long waitUntil = b + sleeptime;
+ while (time(0) < waitUntil) {
+ sleepsecs(6);
+
+ // Handle reconfigs that changed the slave delay
+ if (replCoord->getSlaveDelaySecs().count() != slaveDelaySecs)
+ break;
}
}
- catch (const SocketException&) {
- warning() << "network problem detected while connecting to the "
- << "sync source, attempt " << retryCount << " of "
- << retryMax << endl;
- continue; // try again
- }
+ }
+ } // endif slaveDelay
+}
- // might be more than just _id in the update criteria
- BSONObj query = BSONObjBuilder().append(o.getObjectField("o2")["_id"]).obj();
- BSONObj missingObj;
- try {
- missingObj = missingObjReader.findOne(ns, query);
- }
- catch (const SocketException&) {
- warning() << "network problem detected while fetching a missing document from the "
- << "sync source, attempt " << retryCount << " of "
- << retryMax << endl;
- continue; // try again
- }
- catch (DBException& e) {
- error() << "assertion fetching missing object: " << e.what() << endl;
- throw;
+void SyncTail::setHostname(const std::string& hostname) {
+ _hostname = hostname;
+}
+
+BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
+ OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query?
+ const char* ns = o.getStringField("ns");
+
+ // capped collections
+ Collection* collection = db->getCollection(ns);
+ if (collection && collection->isCapped()) {
+ log() << "missing doc, but this is okay for a capped collection (" << ns << ")";
+ return BSONObj();
+ }
+
+ const int retryMax = 3;
+ for (int retryCount = 1; retryCount <= retryMax; ++retryCount) {
+ if (retryCount != 1) {
+ // if we are retrying, sleep a bit to let the network possibly recover
+ sleepsecs(retryCount * retryCount);
+ }
+ try {
+ bool ok = missingObjReader.connect(HostAndPort(_hostname));
+ if (!ok) {
+ warning() << "network problem detected while connecting to the "
+ << "sync source, attempt " << retryCount << " of " << retryMax << endl;
+ continue; // try again
}
+ } catch (const SocketException&) {
+ warning() << "network problem detected while connecting to the "
+ << "sync source, attempt " << retryCount << " of " << retryMax << endl;
+ continue; // try again
+ }
- // success!
- return missingObj;
+ // might be more than just _id in the update criteria
+ BSONObj query = BSONObjBuilder().append(o.getObjectField("o2")["_id"]).obj();
+ BSONObj missingObj;
+ try {
+ missingObj = missingObjReader.findOne(ns, query);
+ } catch (const SocketException&) {
+ warning() << "network problem detected while fetching a missing document from the "
+ << "sync source, attempt " << retryCount << " of " << retryMax << endl;
+ continue; // try again
+ } catch (DBException& e) {
+ error() << "assertion fetching missing object: " << e.what() << endl;
+ throw;
}
- // retry count exceeded
- msgasserted(15916, str::stream() <<
- "Can no longer connect to initial sync source: " << _hostname);
- }
- bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) {
- const NamespaceString nss(o.getStringField("ns"));
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- // Take an X lock on the database in order to preclude other modifications.
- // Also, the database might not exist yet, so create it.
- AutoGetOrCreateDb autoDb(txn, nss.db(), MODE_X);
- Database* const db = autoDb.getDb();
+ // success!
+ return missingObj;
+ }
+ // retry count exceeded
+ msgasserted(15916,
+ str::stream() << "Can no longer connect to initial sync source: " << _hostname);
+}
- // we don't have the object yet, which is possible on initial sync. get it.
- log() << "adding missing object" << endl; // rare enough we can log
+bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) {
+ const NamespaceString nss(o.getStringField("ns"));
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ // Take an X lock on the database in order to preclude other modifications.
+ // Also, the database might not exist yet, so create it.
+ AutoGetOrCreateDb autoDb(txn, nss.db(), MODE_X);
+ Database* const db = autoDb.getDb();
- BSONObj missingObj = getMissingDoc(txn, db, o);
+ // we don't have the object yet, which is possible on initial sync. get it.
+ log() << "adding missing object" << endl; // rare enough we can log
- if( missingObj.isEmpty() ) {
- log() << "missing object not found on source."
- " presumably deleted later in oplog";
- log() << "o2: " << o.getObjectField("o2").toString();
- log() << "o firstfield: " << o.getObjectField("o").firstElementFieldName();
+ BSONObj missingObj = getMissingDoc(txn, db, o);
- return false;
- }
- else {
- WriteUnitOfWork wunit(txn);
+ if (missingObj.isEmpty()) {
+ log() << "missing object not found on source."
+ " presumably deleted later in oplog";
+ log() << "o2: " << o.getObjectField("o2").toString();
+ log() << "o firstfield: " << o.getObjectField("o").firstElementFieldName();
- Collection* const coll = db->getOrCreateCollection(txn, nss.toString());
- invariant(coll);
+ return false;
+ } else {
+ WriteUnitOfWork wunit(txn);
- StatusWith<RecordId> result = coll->insertDocument(txn, missingObj, true);
- uassert(15917,
- str::stream() << "failed to insert missing doc: "
- << result.getStatus().toString(),
- result.isOK() );
+ Collection* const coll = db->getOrCreateCollection(txn, nss.toString());
+ invariant(coll);
- LOG(1) << "inserted missing doc: " << missingObj.toString() << endl;
+ StatusWith<RecordId> result = coll->insertDocument(txn, missingObj, true);
+ uassert(
+ 15917,
+ str::stream() << "failed to insert missing doc: " << result.getStatus().toString(),
+ result.isOK());
- wunit.commit();
- return true;
- }
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "InsertRetry", nss.ns());
+ LOG(1) << "inserted missing doc: " << missingObj.toString() << endl;
- // fixes compile errors on GCC - see SERVER-18219 for details
- MONGO_UNREACHABLE;
+ wunit.commit();
+ return true;
+ }
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "InsertRetry", nss.ns());
- static AtomicUInt32 replWriterWorkerId;
+ // fixes compile errors on GCC - see SERVER-18219 for details
+ MONGO_UNREACHABLE;
+}
- static void initializeWriterThread() {
- // Only do this once per thread
- if (!ClientBasic::getCurrent()) {
- Client::initThreadIfNotAlready();
- AuthorizationSession::get(cc())->grantInternalAuthorization();
- }
+static AtomicUInt32 replWriterWorkerId;
+
+static void initializeWriterThread() {
+ // Only do this once per thread
+ if (!ClientBasic::getCurrent()) {
+ Client::initThreadIfNotAlready();
+ AuthorizationSession::get(cc())->grantInternalAuthorization();
}
+}
- // This free function is used by the writer threads to apply each op
- void multiSyncApply(const std::vector<BSONObj>& ops, SyncTail* st) {
- initializeWriterThread();
+// This free function is used by the writer threads to apply each op
+void multiSyncApply(const std::vector<BSONObj>& ops, SyncTail* st) {
+ initializeWriterThread();
- OperationContextImpl txn;
- txn.setReplicatedWrites(false);
- DisableDocumentValidation validationDisabler(&txn);
+ OperationContextImpl txn;
+ txn.setReplicatedWrites(false);
+ DisableDocumentValidation validationDisabler(&txn);
- // allow us to get through the magic barrier
- txn.lockState()->setIsBatchWriter(true);
+ // allow us to get through the magic barrier
+ txn.lockState()->setIsBatchWriter(true);
- bool convertUpdatesToUpserts = true;
+ bool convertUpdatesToUpserts = true;
- for (std::vector<BSONObj>::const_iterator it = ops.begin();
- it != ops.end();
- ++it) {
- try {
- if (!SyncTail::syncApply(&txn, *it, convertUpdatesToUpserts).isOK()) {
- fassertFailedNoTrace(16359);
- }
+ for (std::vector<BSONObj>::const_iterator it = ops.begin(); it != ops.end(); ++it) {
+ try {
+ if (!SyncTail::syncApply(&txn, *it, convertUpdatesToUpserts).isOK()) {
+ fassertFailedNoTrace(16359);
}
- catch (const DBException& e) {
- error() << "writer worker caught exception: " << causedBy(e)
- << " on: " << it->toString();
-
- if (inShutdown()) {
- return;
- }
+ } catch (const DBException& e) {
+ error() << "writer worker caught exception: " << causedBy(e)
+ << " on: " << it->toString();
- fassertFailedNoTrace(16360);
+ if (inShutdown()) {
+ return;
}
+
+ fassertFailedNoTrace(16360);
}
}
+}
- // This free function is used by the initial sync writer threads to apply each op
- void multiInitialSyncApply(const std::vector<BSONObj>& ops, SyncTail* st) {
- initializeWriterThread();
-
- OperationContextImpl txn;
- txn.setReplicatedWrites(false);
- DisableDocumentValidation validationDisabler(&txn);
+// This free function is used by the initial sync writer threads to apply each op
+void multiInitialSyncApply(const std::vector<BSONObj>& ops, SyncTail* st) {
+ initializeWriterThread();
- // allow us to get through the magic barrier
- txn.lockState()->setIsBatchWriter(true);
+ OperationContextImpl txn;
+ txn.setReplicatedWrites(false);
+ DisableDocumentValidation validationDisabler(&txn);
- bool convertUpdatesToUpserts = false;
+ // allow us to get through the magic barrier
+ txn.lockState()->setIsBatchWriter(true);
- for (std::vector<BSONObj>::const_iterator it = ops.begin();
- it != ops.end();
- ++it) {
- try {
- if (!SyncTail::syncApply(&txn, *it, convertUpdatesToUpserts).isOK()) {
+ bool convertUpdatesToUpserts = false;
- if (st->shouldRetry(&txn, *it)) {
- if (!SyncTail::syncApply(&txn, *it, convertUpdatesToUpserts).isOK()) {
- fassertFailedNoTrace(15915);
- }
+ for (std::vector<BSONObj>::const_iterator it = ops.begin(); it != ops.end(); ++it) {
+ try {
+ if (!SyncTail::syncApply(&txn, *it, convertUpdatesToUpserts).isOK()) {
+ if (st->shouldRetry(&txn, *it)) {
+ if (!SyncTail::syncApply(&txn, *it, convertUpdatesToUpserts).isOK()) {
+ fassertFailedNoTrace(15915);
}
-
- // If shouldRetry() returns false, fall through.
- // This can happen if the document that was moved and missed by Cloner
- // subsequently got deleted and no longer exists on the Sync Target at all
}
- }
- catch (const DBException& e) {
- error() << "writer worker caught exception: " << causedBy(e)
- << " on: " << it->toString();
- if (inShutdown()) {
- return;
- }
+ // If shouldRetry() returns false, fall through.
+ // This can happen if the document that was moved and missed by Cloner
+ // subsequently got deleted and no longer exists on the Sync Target at all
+ }
+ } catch (const DBException& e) {
+ error() << "writer worker caught exception: " << causedBy(e)
+ << " on: " << it->toString();
- fassertFailedNoTrace(16361);
+ if (inShutdown()) {
+ return;
}
+
+ fassertFailedNoTrace(16361);
}
}
+}
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/sync_tail.h b/src/mongo/db/repl/sync_tail.h
index 74f17cdea12..2e4424b71cf 100644
--- a/src/mongo/db/repl/sync_tail.h
+++ b/src/mongo/db/repl/sync_tail.h
@@ -38,160 +38,162 @@
namespace mongo {
- class Database;
- class OperationContext;
+class Database;
+class OperationContext;
namespace repl {
- class BackgroundSyncInterface;
- class ReplicationCoordinator;
- class OpTime;
+class BackgroundSyncInterface;
+class ReplicationCoordinator;
+class OpTime;
+
+/**
+ * "Normal" replica set syncing
+ */
+class SyncTail {
+public:
+ using MultiSyncApplyFunc = stdx::function<void(const std::vector<BSONObj>& ops, SyncTail* st)>;
+
+ /**
+ * Type of function that takes a non-command op and applies it locally.
+ * Used for applying from an oplog.
+ * Last boolean argument 'convertUpdateToUpsert' converts some updates to upserts for
+ * idempotency reasons.
+ * Returns failure status if the op was an update that could not be applied.
+ */
+ using ApplyOperationInLockFn =
+ stdx::function<Status(OperationContext*, Database*, const BSONObj&, bool)>;
+
+ /**
+ * Type of function that takes a command op and applies it locally.
+ * Used for applying from an oplog.
+ * Returns failure status if the op that could not be applied.
+ */
+ using ApplyCommandInLockFn = stdx::function<Status(OperationContext*, const BSONObj&)>;
+
+ /**
+ * Type of function to increment "repl.apply.ops" server status metric.
+ */
+ using IncrementOpsAppliedStatsFn = stdx::function<void()>;
+
+ SyncTail(BackgroundSyncInterface* q, MultiSyncApplyFunc func);
+ virtual ~SyncTail();
/**
- * "Normal" replica set syncing
+ * Applies the operation that is in param o.
+ * Functions for applying operations/commands and increment server status counters may
+ * be overridden for testing.
*/
- class SyncTail {
+ static Status syncApply(OperationContext* txn,
+ const BSONObj& o,
+ bool convertUpdateToUpsert,
+ ApplyOperationInLockFn applyOperationInLock,
+ ApplyCommandInLockFn applyCommandInLock,
+ IncrementOpsAppliedStatsFn incrementOpsAppliedStats);
+
+ static Status syncApply(OperationContext* txn, const BSONObj& o, bool convertUpdateToUpsert);
+
+ /**
+ * Runs _applyOplogUntil(stopOpTime)
+ */
+ virtual void oplogApplication(OperationContext* txn, const OpTime& stopOpTime);
+
+ void oplogApplication();
+ bool peek(BSONObj* obj);
+
+ class OpQueue {
public:
- using MultiSyncApplyFunc =
- stdx::function<void (const std::vector<BSONObj>& ops, SyncTail* st)>;
-
- /**
- * Type of function that takes a non-command op and applies it locally.
- * Used for applying from an oplog.
- * Last boolean argument 'convertUpdateToUpsert' converts some updates to upserts for
- * idempotency reasons.
- * Returns failure status if the op was an update that could not be applied.
- */
- using ApplyOperationInLockFn =
- stdx::function<Status (OperationContext*, Database*, const BSONObj&, bool)>;
-
- /**
- * Type of function that takes a command op and applies it locally.
- * Used for applying from an oplog.
- * Returns failure status if the op that could not be applied.
- */
- using ApplyCommandInLockFn = stdx::function<Status (OperationContext*, const BSONObj&)>;
-
- /**
- * Type of function to increment "repl.apply.ops" server status metric.
- */
- using IncrementOpsAppliedStatsFn = stdx::function<void ()>;
-
- SyncTail(BackgroundSyncInterface *q, MultiSyncApplyFunc func);
- virtual ~SyncTail();
-
- /**
- * Applies the operation that is in param o.
- * Functions for applying operations/commands and increment server status counters may
- * be overridden for testing.
- */
- static Status syncApply(OperationContext* txn,
- const BSONObj &o,
- bool convertUpdateToUpsert,
- ApplyOperationInLockFn applyOperationInLock,
- ApplyCommandInLockFn applyCommandInLock,
- IncrementOpsAppliedStatsFn incrementOpsAppliedStats);
-
- static Status syncApply(OperationContext* txn,
- const BSONObj &o,
- bool convertUpdateToUpsert);
-
- /**
- * Runs _applyOplogUntil(stopOpTime)
- */
- virtual void oplogApplication(OperationContext* txn, const OpTime& stopOpTime);
-
- void oplogApplication();
- bool peek(BSONObj* obj);
-
- class OpQueue {
- public:
- OpQueue() : _size(0) {}
- size_t getSize() const { return _size; }
- const std::deque<BSONObj>& getDeque() const { return _deque; }
- void push_back(BSONObj& op) {
- _deque.push_back(op);
- _size += op.objsize();
- }
- bool empty() const {
- return _deque.empty();
- }
-
- BSONObj back() const {
- invariant(!_deque.empty());
- return _deque.back();
- }
-
- private:
- std::deque<BSONObj> _deque;
- size_t _size;
- };
-
- // returns true if we should continue waiting for BSONObjs, false if we should
- // stop waiting and apply the queue we have. Only returns false if !ops.empty().
- bool tryPopAndWaitForMore(OperationContext* txn,
- OpQueue* ops,
- ReplicationCoordinator* replCoord);
-
- /**
- * Fetch a single document referenced in the operation from the sync source.
- */
- virtual BSONObj getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o);
-
- /**
- * If applyOperation_inlock should be called again after an update fails.
- */
- virtual bool shouldRetry(OperationContext* txn, const BSONObj& o);
- void setHostname(const std::string& hostname);
-
- protected:
- // Cap the batches using the limit on journal commits.
- // This works out to be 100 MB (64 bit) or 50 MB (32 bit)
- static const unsigned int replBatchLimitBytes = dur::UncommittedBytesLimit;
- static const int replBatchLimitSeconds = 1;
- static const unsigned int replBatchLimitOperations = 5000;
-
- // SyncTail base class always supports awaiting commit if any op has j:true flag
- // that indicates awaiting commit before updating last OpTime.
- virtual bool supportsWaitingUntilDurable() { return true; }
-
- // Prefetch and write a deque of operations, using the supplied function.
- // Initial Sync and Sync Tail each use a different function.
- // Returns the last OpTime applied.
- static OpTime multiApply(OperationContext* txn,
- const OpQueue& ops,
- OldThreadPool* prefetcherPool,
- OldThreadPool* writerPool,
- MultiSyncApplyFunc func,
- SyncTail* sync,
- bool supportsAwaitingCommit);
-
- /**
- * Applies oplog entries until reaching "endOpTime".
- *
- * NOTE:Will not transition or check states
- */
- void _applyOplogUntil(OperationContext* txn, const OpTime& endOpTime);
+ OpQueue() : _size(0) {}
+ size_t getSize() const {
+ return _size;
+ }
+ const std::deque<BSONObj>& getDeque() const {
+ return _deque;
+ }
+ void push_back(BSONObj& op) {
+ _deque.push_back(op);
+ _size += op.objsize();
+ }
+ bool empty() const {
+ return _deque.empty();
+ }
+
+ BSONObj back() const {
+ invariant(!_deque.empty());
+ return _deque.back();
+ }
private:
- std::string _hostname;
+ std::deque<BSONObj> _deque;
+ size_t _size;
+ };
- BackgroundSyncInterface* _networkQueue;
+ // returns true if we should continue waiting for BSONObjs, false if we should
+ // stop waiting and apply the queue we have. Only returns false if !ops.empty().
+ bool tryPopAndWaitForMore(OperationContext* txn,
+ OpQueue* ops,
+ ReplicationCoordinator* replCoord);
- // Function to use during applyOps
- MultiSyncApplyFunc _applyFunc;
+ /**
+ * Fetch a single document referenced in the operation from the sync source.
+ */
+ virtual BSONObj getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o);
- void handleSlaveDelay(const BSONObj& op);
+ /**
+ * If applyOperation_inlock should be called again after an update fails.
+ */
+ virtual bool shouldRetry(OperationContext* txn, const BSONObj& o);
+ void setHostname(const std::string& hostname);
+
+protected:
+ // Cap the batches using the limit on journal commits.
+ // This works out to be 100 MB (64 bit) or 50 MB (32 bit)
+ static const unsigned int replBatchLimitBytes = dur::UncommittedBytesLimit;
+ static const int replBatchLimitSeconds = 1;
+ static const unsigned int replBatchLimitOperations = 5000;
+
+ // SyncTail base class always supports awaiting commit if any op has j:true flag
+ // that indicates awaiting commit before updating last OpTime.
+ virtual bool supportsWaitingUntilDurable() {
+ return true;
+ }
+
+ // Prefetch and write a deque of operations, using the supplied function.
+ // Initial Sync and Sync Tail each use a different function.
+ // Returns the last OpTime applied.
+ static OpTime multiApply(OperationContext* txn,
+ const OpQueue& ops,
+ OldThreadPool* prefetcherPool,
+ OldThreadPool* writerPool,
+ MultiSyncApplyFunc func,
+ SyncTail* sync,
+ bool supportsAwaitingCommit);
- // persistent pool of worker threads for writing ops to the databases
- OldThreadPool _writerPool;
- // persistent pool of worker threads for prefetching
- OldThreadPool _prefetcherPool;
+ /**
+ * Applies oplog entries until reaching "endOpTime".
+ *
+ * NOTE:Will not transition or check states
+ */
+ void _applyOplogUntil(OperationContext* txn, const OpTime& endOpTime);
- };
+private:
+ std::string _hostname;
+
+ BackgroundSyncInterface* _networkQueue;
+
+ // Function to use during applyOps
+ MultiSyncApplyFunc _applyFunc;
+
+ void handleSlaveDelay(const BSONObj& op);
+
+ // persistent pool of worker threads for writing ops to the databases
+ OldThreadPool _writerPool;
+ // persistent pool of worker threads for prefetching
+ OldThreadPool _prefetcherPool;
+};
- // These free functions are used by the thread pool workers to write ops to the db.
- void multiSyncApply(const std::vector<BSONObj>& ops, SyncTail* st);
- void multiInitialSyncApply(const std::vector<BSONObj>& ops, SyncTail* st);
+// These free functions are used by the thread pool workers to write ops to the db.
+void multiSyncApply(const std::vector<BSONObj>& ops, SyncTail* st);
+void multiInitialSyncApply(const std::vector<BSONObj>& ops, SyncTail* st);
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index f1e2a3051da..d718f188b5e 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -50,253 +50,272 @@
namespace {
- using namespace mongo;
- using namespace mongo::repl;
+using namespace mongo;
+using namespace mongo::repl;
- class BackgroundSyncMock : public BackgroundSyncInterface {
- public:
- bool peek(BSONObj* op) override;
- void consume() override;
- void waitForMore() override;
- };
+class BackgroundSyncMock : public BackgroundSyncInterface {
+public:
+ bool peek(BSONObj* op) override;
+ void consume() override;
+ void waitForMore() override;
+};
- bool BackgroundSyncMock::peek(BSONObj* op) { return false; }
- void BackgroundSyncMock::consume() { }
- void BackgroundSyncMock::waitForMore() { }
+bool BackgroundSyncMock::peek(BSONObj* op) {
+ return false;
+}
+void BackgroundSyncMock::consume() {}
+void BackgroundSyncMock::waitForMore() {}
- class SyncTailTest : public unittest::Test {
- protected:
- void _testSyncApplyInsertDocument(LockMode expectedMode);
+class SyncTailTest : public unittest::Test {
+protected:
+ void _testSyncApplyInsertDocument(LockMode expectedMode);
- std::unique_ptr<OperationContext> _txn;
- unsigned int _opsApplied;
- SyncTail::ApplyOperationInLockFn _applyOp;
- SyncTail::ApplyCommandInLockFn _applyCmd;
- SyncTail::IncrementOpsAppliedStatsFn _incOps;
+ std::unique_ptr<OperationContext> _txn;
+ unsigned int _opsApplied;
+ SyncTail::ApplyOperationInLockFn _applyOp;
+ SyncTail::ApplyCommandInLockFn _applyCmd;
+ SyncTail::IncrementOpsAppliedStatsFn _incOps;
- private:
- void setUp() override;
- void tearDown() override;
- };
+private:
+ void setUp() override;
+ void tearDown() override;
+};
- void SyncTailTest::setUp() {
- ServiceContext* serviceContext = getGlobalServiceContext();
- if (!serviceContext->getGlobalStorageEngine()) {
- // When using the 'devnull' storage engine, it is fine for the temporary directory to
- // go away after the global storage engine is initialized.
- unittest::TempDir tempDir("sync_tail_test");
- mongo::storageGlobalParams.dbpath = tempDir.path();
- mongo::storageGlobalParams.engine = "devnull";
- mongo::storageGlobalParams.engineSetByUser = true;
- serviceContext->initializeGlobalStorageEngine();
- }
- ReplSettings replSettings;
- replSettings.oplogSize = 5 * 1024 * 1024;
+void SyncTailTest::setUp() {
+ ServiceContext* serviceContext = getGlobalServiceContext();
+ if (!serviceContext->getGlobalStorageEngine()) {
+ // When using the 'devnull' storage engine, it is fine for the temporary directory to
+ // go away after the global storage engine is initialized.
+ unittest::TempDir tempDir("sync_tail_test");
+ mongo::storageGlobalParams.dbpath = tempDir.path();
+ mongo::storageGlobalParams.engine = "devnull";
+ mongo::storageGlobalParams.engineSetByUser = true;
+ serviceContext->initializeGlobalStorageEngine();
+ }
+ ReplSettings replSettings;
+ replSettings.oplogSize = 5 * 1024 * 1024;
- setGlobalReplicationCoordinator(new ReplicationCoordinatorMock(replSettings));
+ setGlobalReplicationCoordinator(new ReplicationCoordinatorMock(replSettings));
- Client::initThreadIfNotAlready();
- _txn.reset(new OperationContextReplMock(&cc(), 0));
- _opsApplied = 0;
- _applyOp = [](OperationContext* txn,
- Database* db,
- const BSONObj& op,
- bool convertUpdateToUpsert) {
- return Status::OK();
- };
- _applyCmd = [](OperationContext* txn, const BSONObj& op) {
+ Client::initThreadIfNotAlready();
+ _txn.reset(new OperationContextReplMock(&cc(), 0));
+ _opsApplied = 0;
+ _applyOp =
+ [](OperationContext* txn, Database* db, const BSONObj& op, bool convertUpdateToUpsert) {
return Status::OK();
};
- _incOps = [this]() { _opsApplied++; };
- }
+ _applyCmd = [](OperationContext* txn, const BSONObj& op) { return Status::OK(); };
+ _incOps = [this]() { _opsApplied++; };
+}
- void SyncTailTest::tearDown() {
- {
- Lock::GlobalWrite globalLock(_txn->lockState());
- BSONObjBuilder unused;
- invariant(mongo::dbHolder().closeAll(_txn.get(), unused, false));
- }
- _txn.reset();
- setGlobalReplicationCoordinator(nullptr);
+void SyncTailTest::tearDown() {
+ {
+ Lock::GlobalWrite globalLock(_txn->lockState());
+ BSONObjBuilder unused;
+ invariant(mongo::dbHolder().closeAll(_txn.get(), unused, false));
}
+ _txn.reset();
+ setGlobalReplicationCoordinator(nullptr);
+}
- TEST_F(SyncTailTest, Peek) {
- BackgroundSyncMock bgsync;
- SyncTail syncTail(&bgsync, [](const std::vector<BSONObj>& ops, SyncTail* st) { });
- BSONObj obj;
- ASSERT_FALSE(syncTail.peek(&obj));
- }
+TEST_F(SyncTailTest, Peek) {
+ BackgroundSyncMock bgsync;
+ SyncTail syncTail(&bgsync, [](const std::vector<BSONObj>& ops, SyncTail* st) {});
+ BSONObj obj;
+ ASSERT_FALSE(syncTail.peek(&obj));
+}
- TEST_F(SyncTailTest, SyncApplyNoNamespaceBadOp) {
- const BSONObj op = BSON("op" << "x");
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, _applyOp, _applyCmd, _incOps));
- ASSERT_EQUALS(0U, _opsApplied);
- }
+TEST_F(SyncTailTest, SyncApplyNoNamespaceBadOp) {
+ const BSONObj op = BSON("op"
+ << "x");
+ ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, _applyOp, _applyCmd, _incOps));
+ ASSERT_EQUALS(0U, _opsApplied);
+}
- TEST_F(SyncTailTest, SyncApplyNoNamespaceNoOp) {
- ASSERT_OK(SyncTail::syncApply(_txn.get(), BSON("op" << "n"), false));
- ASSERT_EQUALS(0U, _opsApplied);
- }
+TEST_F(SyncTailTest, SyncApplyNoNamespaceNoOp) {
+ ASSERT_OK(SyncTail::syncApply(_txn.get(),
+ BSON("op"
+ << "n"),
+ false));
+ ASSERT_EQUALS(0U, _opsApplied);
+}
- TEST_F(SyncTailTest, SyncApplyBadOp) {
- const BSONObj op = BSON("op" << "x" << "ns" << "test.t");
- ASSERT_EQUALS(
- ErrorCodes::BadValue,
- SyncTail::syncApply(_txn.get(), op, false, _applyOp, _applyCmd, _incOps).code());
- ASSERT_EQUALS(0U, _opsApplied);
- }
+TEST_F(SyncTailTest, SyncApplyBadOp) {
+ const BSONObj op = BSON("op"
+ << "x"
+ << "ns"
+ << "test.t");
+ ASSERT_EQUALS(ErrorCodes::BadValue,
+ SyncTail::syncApply(_txn.get(), op, false, _applyOp, _applyCmd, _incOps).code());
+ ASSERT_EQUALS(0U, _opsApplied);
+}
- TEST_F(SyncTailTest, SyncApplyNoOp) {
- const BSONObj op = BSON("op" << "n" << "ns" << "test.t");
- bool applyOpCalled = false;
- SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
- Database* db,
- const BSONObj& theOperation,
- bool convertUpdateToUpsert) {
- applyOpCalled = true;
- ASSERT_TRUE(txn);
- ASSERT_TRUE(txn->lockState()->isDbLockedForMode("test", MODE_X));
- ASSERT_FALSE(txn->writesAreReplicated());
- ASSERT_TRUE(documentValidationDisabled(txn));
- ASSERT_TRUE(db);
- ASSERT_EQUALS(op, theOperation);
- ASSERT_FALSE(convertUpdateToUpsert);
- return Status::OK();
- };
- SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
- const BSONObj& theOperation) {
+TEST_F(SyncTailTest, SyncApplyNoOp) {
+ const BSONObj op = BSON("op"
+ << "n"
+ << "ns"
+ << "test.t");
+ bool applyOpCalled = false;
+ SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
+ Database* db,
+ const BSONObj& theOperation,
+ bool convertUpdateToUpsert) {
+ applyOpCalled = true;
+ ASSERT_TRUE(txn);
+ ASSERT_TRUE(txn->lockState()->isDbLockedForMode("test", MODE_X));
+ ASSERT_FALSE(txn->writesAreReplicated());
+ ASSERT_TRUE(documentValidationDisabled(txn));
+ ASSERT_TRUE(db);
+ ASSERT_EQUALS(op, theOperation);
+ ASSERT_FALSE(convertUpdateToUpsert);
+ return Status::OK();
+ };
+ SyncTail::ApplyCommandInLockFn applyCmd =
+ [&](OperationContext* txn, const BSONObj& theOperation) {
FAIL("applyCommand unexpectedly invoked.");
return Status::OK();
};
- ASSERT_TRUE(_txn->writesAreReplicated());
- ASSERT_FALSE(documentValidationDisabled(_txn.get()));
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
- ASSERT_TRUE(applyOpCalled);
- ASSERT_EQUALS(1U, _opsApplied);
- }
+ ASSERT_TRUE(_txn->writesAreReplicated());
+ ASSERT_FALSE(documentValidationDisabled(_txn.get()));
+ ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
+ ASSERT_TRUE(applyOpCalled);
+ ASSERT_EQUALS(1U, _opsApplied);
+}
- TEST_F(SyncTailTest, SyncApplyNoOpApplyOpThrowsException) {
- const BSONObj op = BSON("op" << "n" << "ns" << "test.t");
- int applyOpCalled = 0;
- SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
- Database* db,
- const BSONObj& theOperation,
- bool convertUpdateToUpsert) {
- applyOpCalled++;
- if (applyOpCalled < 5) {
- throw WriteConflictException();
- }
- return Status::OK();
- };
- SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
- const BSONObj& theOperation) {
+TEST_F(SyncTailTest, SyncApplyNoOpApplyOpThrowsException) {
+ const BSONObj op = BSON("op"
+ << "n"
+ << "ns"
+ << "test.t");
+ int applyOpCalled = 0;
+ SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
+ Database* db,
+ const BSONObj& theOperation,
+ bool convertUpdateToUpsert) {
+ applyOpCalled++;
+ if (applyOpCalled < 5) {
+ throw WriteConflictException();
+ }
+ return Status::OK();
+ };
+ SyncTail::ApplyCommandInLockFn applyCmd =
+ [&](OperationContext* txn, const BSONObj& theOperation) {
FAIL("applyCommand unexpectedly invoked.");
return Status::OK();
};
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
- ASSERT_EQUALS(5, applyOpCalled);
- ASSERT_EQUALS(1U, _opsApplied);
- }
+ ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
+ ASSERT_EQUALS(5, applyOpCalled);
+ ASSERT_EQUALS(1U, _opsApplied);
+}
- void SyncTailTest::_testSyncApplyInsertDocument(LockMode expectedMode) {
- const BSONObj op = BSON("op" << "i" << "ns" << "test.t");
- bool applyOpCalled = false;
- SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
- Database* db,
- const BSONObj& theOperation,
- bool convertUpdateToUpsert) {
- applyOpCalled = true;
- ASSERT_TRUE(txn);
- ASSERT_TRUE(txn->lockState()->isDbLockedForMode("test", expectedMode));
- ASSERT_TRUE(txn->lockState()->isCollectionLockedForMode("test.t", expectedMode));
- ASSERT_FALSE(txn->writesAreReplicated());
- ASSERT_TRUE(documentValidationDisabled(txn));
- ASSERT_TRUE(db);
- ASSERT_EQUALS(op, theOperation);
- ASSERT_TRUE(convertUpdateToUpsert);
- return Status::OK();
- };
- SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
- const BSONObj& theOperation) {
+void SyncTailTest::_testSyncApplyInsertDocument(LockMode expectedMode) {
+ const BSONObj op = BSON("op"
+ << "i"
+ << "ns"
+ << "test.t");
+ bool applyOpCalled = false;
+ SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
+ Database* db,
+ const BSONObj& theOperation,
+ bool convertUpdateToUpsert) {
+ applyOpCalled = true;
+ ASSERT_TRUE(txn);
+ ASSERT_TRUE(txn->lockState()->isDbLockedForMode("test", expectedMode));
+ ASSERT_TRUE(txn->lockState()->isCollectionLockedForMode("test.t", expectedMode));
+ ASSERT_FALSE(txn->writesAreReplicated());
+ ASSERT_TRUE(documentValidationDisabled(txn));
+ ASSERT_TRUE(db);
+ ASSERT_EQUALS(op, theOperation);
+ ASSERT_TRUE(convertUpdateToUpsert);
+ return Status::OK();
+ };
+ SyncTail::ApplyCommandInLockFn applyCmd =
+ [&](OperationContext* txn, const BSONObj& theOperation) {
FAIL("applyCommand unexpectedly invoked.");
return Status::OK();
};
- ASSERT_TRUE(_txn->writesAreReplicated());
- ASSERT_FALSE(documentValidationDisabled(_txn.get()));
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, true, applyOp, applyCmd, _incOps));
- ASSERT_TRUE(applyOpCalled);
- ASSERT_EQUALS(1U, _opsApplied);
- }
+ ASSERT_TRUE(_txn->writesAreReplicated());
+ ASSERT_FALSE(documentValidationDisabled(_txn.get()));
+ ASSERT_OK(SyncTail::syncApply(_txn.get(), op, true, applyOp, applyCmd, _incOps));
+ ASSERT_TRUE(applyOpCalled);
+ ASSERT_EQUALS(1U, _opsApplied);
+}
- TEST_F(SyncTailTest, SyncApplyInsertDocumentDatabaseMissing) {
- _testSyncApplyInsertDocument(MODE_X);
- }
+TEST_F(SyncTailTest, SyncApplyInsertDocumentDatabaseMissing) {
+ _testSyncApplyInsertDocument(MODE_X);
+}
- TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionMissing) {
- {
- Lock::GlobalWrite globalLock(_txn->lockState());
- bool justCreated = false;
- Database* db = dbHolder().openDb(_txn.get(), "test", &justCreated);
- ASSERT_TRUE(db);
- ASSERT_TRUE(justCreated);
- }
- _testSyncApplyInsertDocument(MODE_X);
+TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionMissing) {
+ {
+ Lock::GlobalWrite globalLock(_txn->lockState());
+ bool justCreated = false;
+ Database* db = dbHolder().openDb(_txn.get(), "test", &justCreated);
+ ASSERT_TRUE(db);
+ ASSERT_TRUE(justCreated);
}
+ _testSyncApplyInsertDocument(MODE_X);
+}
- TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionExists) {
- {
- Lock::GlobalWrite globalLock(_txn->lockState());
- bool justCreated = false;
- Database* db = dbHolder().openDb(_txn.get(), "test", &justCreated);
- ASSERT_TRUE(db);
- ASSERT_TRUE(justCreated);
- Collection* collection = db->createCollection(_txn.get(), "test.t");
- ASSERT_TRUE(collection);
- }
- _testSyncApplyInsertDocument(MODE_IX);
+TEST_F(SyncTailTest, SyncApplyInsertDocumentCollectionExists) {
+ {
+ Lock::GlobalWrite globalLock(_txn->lockState());
+ bool justCreated = false;
+ Database* db = dbHolder().openDb(_txn.get(), "test", &justCreated);
+ ASSERT_TRUE(db);
+ ASSERT_TRUE(justCreated);
+ Collection* collection = db->createCollection(_txn.get(), "test.t");
+ ASSERT_TRUE(collection);
}
+ _testSyncApplyInsertDocument(MODE_IX);
+}
- TEST_F(SyncTailTest, SyncApplyIndexBuild) {
- const BSONObj op = BSON("op" << "i" << "ns" << "test.system.indexes");
- bool applyOpCalled = false;
- SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
- Database* db,
- const BSONObj& theOperation,
- bool convertUpdateToUpsert) {
- applyOpCalled = true;
- ASSERT_TRUE(txn);
- ASSERT_TRUE(txn->lockState()->isDbLockedForMode("test", MODE_X));
- ASSERT_FALSE(txn->writesAreReplicated());
- ASSERT_TRUE(documentValidationDisabled(txn));
- ASSERT_TRUE(db);
- ASSERT_EQUALS(op, theOperation);
- ASSERT_FALSE(convertUpdateToUpsert);
- return Status::OK();
- };
- SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
- const BSONObj& theOperation) {
+TEST_F(SyncTailTest, SyncApplyIndexBuild) {
+ const BSONObj op = BSON("op"
+ << "i"
+ << "ns"
+ << "test.system.indexes");
+ bool applyOpCalled = false;
+ SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
+ Database* db,
+ const BSONObj& theOperation,
+ bool convertUpdateToUpsert) {
+ applyOpCalled = true;
+ ASSERT_TRUE(txn);
+ ASSERT_TRUE(txn->lockState()->isDbLockedForMode("test", MODE_X));
+ ASSERT_FALSE(txn->writesAreReplicated());
+ ASSERT_TRUE(documentValidationDisabled(txn));
+ ASSERT_TRUE(db);
+ ASSERT_EQUALS(op, theOperation);
+ ASSERT_FALSE(convertUpdateToUpsert);
+ return Status::OK();
+ };
+ SyncTail::ApplyCommandInLockFn applyCmd =
+ [&](OperationContext* txn, const BSONObj& theOperation) {
FAIL("applyCommand unexpectedly invoked.");
return Status::OK();
};
- ASSERT_TRUE(_txn->writesAreReplicated());
- ASSERT_FALSE(documentValidationDisabled(_txn.get()));
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
- ASSERT_TRUE(applyOpCalled);
- ASSERT_EQUALS(1U, _opsApplied);
- }
+ ASSERT_TRUE(_txn->writesAreReplicated());
+ ASSERT_FALSE(documentValidationDisabled(_txn.get()));
+ ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
+ ASSERT_TRUE(applyOpCalled);
+ ASSERT_EQUALS(1U, _opsApplied);
+}
- TEST_F(SyncTailTest, SyncApplyCommand) {
- const BSONObj op = BSON("op" << "c" << "ns" << "test.t");
- bool applyCmdCalled = false;
- SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
- Database* db,
- const BSONObj& theOperation,
- bool convertUpdateToUpsert) {
- FAIL("applyOperation unexpectedly invoked.");
- return Status::OK();
- };
- SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
- const BSONObj& theOperation) {
+TEST_F(SyncTailTest, SyncApplyCommand) {
+ const BSONObj op = BSON("op"
+ << "c"
+ << "ns"
+ << "test.t");
+ bool applyCmdCalled = false;
+ SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
+ Database* db,
+ const BSONObj& theOperation,
+ bool convertUpdateToUpsert) {
+ FAIL("applyOperation unexpectedly invoked.");
+ return Status::OK();
+ };
+ SyncTail::ApplyCommandInLockFn applyCmd =
+ [&](OperationContext* txn, const BSONObj& theOperation) {
applyCmdCalled = true;
ASSERT_TRUE(txn);
ASSERT_TRUE(txn->lockState()->isW());
@@ -305,34 +324,37 @@ namespace {
ASSERT_EQUALS(op, theOperation);
return Status::OK();
};
- ASSERT_TRUE(_txn->writesAreReplicated());
- ASSERT_FALSE(documentValidationDisabled(_txn.get()));
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
- ASSERT_TRUE(applyCmdCalled);
- ASSERT_EQUALS(1U, _opsApplied);
- }
+ ASSERT_TRUE(_txn->writesAreReplicated());
+ ASSERT_FALSE(documentValidationDisabled(_txn.get()));
+ ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
+ ASSERT_TRUE(applyCmdCalled);
+ ASSERT_EQUALS(1U, _opsApplied);
+}
- TEST_F(SyncTailTest, SyncApplyCommandThrowsException) {
- const BSONObj op = BSON("op" << "c" << "ns" << "test.t");
- int applyCmdCalled = 0;
- SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
- Database* db,
- const BSONObj& theOperation,
- bool convertUpdateToUpsert) {
- FAIL("applyOperation unexpectedly invoked.");
- return Status::OK();
- };
- SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
- const BSONObj& theOperation) {
+TEST_F(SyncTailTest, SyncApplyCommandThrowsException) {
+ const BSONObj op = BSON("op"
+ << "c"
+ << "ns"
+ << "test.t");
+ int applyCmdCalled = 0;
+ SyncTail::ApplyOperationInLockFn applyOp = [&](OperationContext* txn,
+ Database* db,
+ const BSONObj& theOperation,
+ bool convertUpdateToUpsert) {
+ FAIL("applyOperation unexpectedly invoked.");
+ return Status::OK();
+ };
+ SyncTail::ApplyCommandInLockFn applyCmd =
+ [&](OperationContext* txn, const BSONObj& theOperation) {
applyCmdCalled++;
if (applyCmdCalled < 5) {
throw WriteConflictException();
}
return Status::OK();
};
- ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
- ASSERT_EQUALS(5, applyCmdCalled);
- ASSERT_EQUALS(1U, _opsApplied);
- }
+ ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
+ ASSERT_EQUALS(5, applyCmdCalled);
+ ASSERT_EQUALS(1U, _opsApplied);
+}
-} // namespace
+} // namespace
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index fc0a594ac83..385a76207cd 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -46,165 +46,162 @@ namespace repl {
namespace {
- /**
- * Runs a single task runner task.
- * Any exceptions thrown by the task will be logged and converted into a
- * next action of kCancel.
- */
- TaskRunner::NextAction runSingleTask(const TaskRunner::Task& task,
- OperationContext* txn,
- const Status& status) {
- try {
- return task(txn, status);
+/**
+ * Runs a single task runner task.
+ * Any exceptions thrown by the task will be logged and converted into a
+ * next action of kCancel.
+ */
+TaskRunner::NextAction runSingleTask(const TaskRunner::Task& task,
+ OperationContext* txn,
+ const Status& status) {
+ try {
+ return task(txn, status);
+ } catch (...) {
+ log() << "Unhandled exception in task runner: " << exceptionToStatus();
+ }
+ return TaskRunner::NextAction::kCancel;
+}
+
+} // namespace
+
+// static
+TaskRunner::Task TaskRunner::makeCancelTask() {
+ return [](OperationContext* txn, const Status& status) { return NextAction::kCancel; };
+}
+
+TaskRunner::TaskRunner(OldThreadPool* threadPool,
+ const CreateOperationContextFn& createOperationContext)
+ : _threadPool(threadPool),
+ _createOperationContext(createOperationContext),
+ _active(false),
+ _cancelRequested(false) {
+ uassert(ErrorCodes::BadValue, "null thread pool", threadPool);
+ uassert(ErrorCodes::BadValue, "null operation context factory", createOperationContext);
+}
+
+TaskRunner::~TaskRunner() {
+ try {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ if (!_active) {
+ return;
}
- catch (...) {
- log() << "Unhandled exception in task runner: " << exceptionToStatus();
+ _cancelRequested = true;
+ _condition.notify_all();
+ while (_active) {
+ _condition.wait(lk);
}
- return TaskRunner::NextAction::kCancel;
+ } catch (...) {
+ error() << "unexpected exception destroying task runner: " << exceptionToStatus();
}
+}
-} // namespace
+std::string TaskRunner::getDiagnosticString() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ str::stream output;
+ output << "TaskRunner";
+ output << " scheduled tasks: " << _tasks.size();
+ output << " active: " << _active;
+ output << " cancel requested: " << _cancelRequested;
+ return output;
+}
- // static
- TaskRunner::Task TaskRunner::makeCancelTask() {
- return [](OperationContext* txn, const Status& status) {
- return NextAction::kCancel;
- };
- }
+bool TaskRunner::isActive() const {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return _active;
+}
- TaskRunner::TaskRunner(OldThreadPool* threadPool,
- const CreateOperationContextFn& createOperationContext)
- : _threadPool(threadPool),
- _createOperationContext(createOperationContext),
- _active(false),
- _cancelRequested(false) {
+void TaskRunner::schedule(const Task& task) {
+ invariant(task);
- uassert(ErrorCodes::BadValue, "null thread pool", threadPool);
- uassert(ErrorCodes::BadValue, "null operation context factory", createOperationContext);
- }
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
- TaskRunner::~TaskRunner() {
- try {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- if (!_active) {
- return;
- }
- _cancelRequested = true;
- _condition.notify_all();
- while (_active) {
- _condition.wait(lk);
- }
- }
- catch (...) {
- error() << "unexpected exception destroying task runner: " << exceptionToStatus();
- }
- }
+ _tasks.push_back(task);
+ _condition.notify_all();
- std::string TaskRunner::getDiagnosticString() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- str::stream output;
- output << "TaskRunner";
- output << " scheduled tasks: " << _tasks.size();
- output << " active: " << _active;
- output << " cancel requested: " << _cancelRequested;
- return output;
+ if (_active) {
+ return;
}
- bool TaskRunner::isActive() const {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _active;
- }
+ _threadPool->schedule(stdx::bind(&TaskRunner::_runTasks, this));
- void TaskRunner::schedule(const Task& task) {
- invariant(task);
+ _active = true;
+ _cancelRequested = false;
+}
- stdx::lock_guard<stdx::mutex> lk(_mutex);
+void TaskRunner::cancel() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _cancelRequested = true;
+ _condition.notify_all();
+}
- _tasks.push_back(task);
- _condition.notify_all();
+void TaskRunner::_runTasks() {
+ std::unique_ptr<OperationContext> txn;
- if (_active) {
- return;
+ while (Task task = _waitForNextTask()) {
+ if (!txn) {
+ txn.reset(_createOperationContext());
}
- _threadPool->schedule(stdx::bind(&TaskRunner::_runTasks, this));
-
- _active = true;
- _cancelRequested = false;
- }
+ NextAction nextAction = runSingleTask(task, txn.get(), Status::OK());
- void TaskRunner::cancel() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- _cancelRequested = true;
- _condition.notify_all();
- }
-
- void TaskRunner::_runTasks() {
- std::unique_ptr<OperationContext> txn;
-
- while (Task task = _waitForNextTask()) {
- if (!txn) {
- txn.reset(_createOperationContext());
- }
-
- NextAction nextAction = runSingleTask(task, txn.get(), Status::OK());
-
- if (nextAction != NextAction::kKeepOperationContext) {
- txn.reset();
- }
-
- if (nextAction == NextAction::kCancel) {
- break;
- }
- // Release thread back to pool after disposing if no scheduled tasks in queue.
- if (nextAction == NextAction::kDisposeOperationContext ||
- nextAction == NextAction::kInvalid) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (_tasks.empty()) {
- _finishRunTasks_inlock();
- return;
- }
- }
+ if (nextAction != NextAction::kKeepOperationContext) {
+ txn.reset();
}
- txn.reset();
- std::list<Task> tasks;
- {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- tasks.swap(_tasks);
+ if (nextAction == NextAction::kCancel) {
+ break;
}
-
- // Cancel remaining tasks with a CallbackCanceled status.
- for (auto task : tasks) {
- runSingleTask(task, nullptr, Status(ErrorCodes::CallbackCanceled,
- "this task has been canceled by a previously invoked task"));
+ // Release thread back to pool after disposing if no scheduled tasks in queue.
+ if (nextAction == NextAction::kDisposeOperationContext ||
+ nextAction == NextAction::kInvalid) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ if (_tasks.empty()) {
+ _finishRunTasks_inlock();
+ return;
+ }
}
+ }
+ txn.reset();
+ std::list<Task> tasks;
+ {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- _finishRunTasks_inlock();
+ tasks.swap(_tasks);
}
- void TaskRunner::_finishRunTasks_inlock() {
- _active = false;
- _cancelRequested = false;
- _condition.notify_all();
+ // Cancel remaining tasks with a CallbackCanceled status.
+ for (auto task : tasks) {
+ runSingleTask(task,
+ nullptr,
+ Status(ErrorCodes::CallbackCanceled,
+ "this task has been canceled by a previously invoked task"));
}
- TaskRunner::Task TaskRunner::_waitForNextTask() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _finishRunTasks_inlock();
+}
- while (_tasks.empty() && !_cancelRequested) {
- _condition.wait(lk);
- }
+void TaskRunner::_finishRunTasks_inlock() {
+ _active = false;
+ _cancelRequested = false;
+ _condition.notify_all();
+}
- if (_cancelRequested) {
- return Task();
- }
+TaskRunner::Task TaskRunner::_waitForNextTask() {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
- Task task = _tasks.front();
- _tasks.pop_front();
- return task;
+ while (_tasks.empty() && !_cancelRequested) {
+ _condition.wait(lk);
}
-} // namespace repl
-} // namespace mongo
+ if (_cancelRequested) {
+ return Task();
+ }
+
+ Task task = _tasks.front();
+ _tasks.pop_front();
+ return task;
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/task_runner.h b/src/mongo/db/repl/task_runner.h
index 33e879e08b8..f041174ffd9 100644
--- a/src/mongo/db/repl/task_runner.h
+++ b/src/mongo/db/repl/task_runner.h
@@ -37,129 +37,127 @@
namespace mongo {
- class OperationContext;
- class Status;
- class OldThreadPool;
+class OperationContext;
+class Status;
+class OldThreadPool;
namespace repl {
- class TaskRunner {
- MONGO_DISALLOW_COPYING(TaskRunner);
- public:
-
- /**
- * Represents next steps of task runner.
- */
- enum class NextAction {
- kInvalid=0,
- kDisposeOperationContext=1,
- kKeepOperationContext=2,
- kCancel=3,
- };
-
- using CreateOperationContextFn = stdx::function<OperationContext*()>;
- using Task = stdx::function<NextAction (OperationContext*, const Status&)>;
-
- /**
- * Creates a Task returning kCancel. This is useful in shutting down the task runner after
- * running a series of tasks.
- *
- * Without a cancellation task, the client would need to coordinate the completion of the
- * last task with calling cancel() on the task runner.
- */
- static Task makeCancelTask();
-
- TaskRunner(OldThreadPool* threadPool,
- const CreateOperationContextFn& createOperationContext);
-
- virtual ~TaskRunner();
-
- /**
- * Returns diagnostic information.
- */
- std::string getDiagnosticString() const;
-
- /**
- * Returns true if there are any scheduled or actively running tasks.
- */
- bool isActive() const;
-
- /**
- * Schedules a task to be run by the task runner. Tasks are run in the same order that they
- * are scheduled.
- *
- * This transitions the task runner to an active state.
- *
- * The task runner creates an operation context using '_createOperationContext'
- * prior to running a scheduled task. Depending on the NextAction returned from the
- * task, operation contexts may be shared between consecutive tasks invoked by the task
- * runner.
- *
- * On completion, each task is expected to return a NextAction to the task runner.
- *
- * If the task returns kDisposeOperationContext, the task runner destroys the operation
- * context. The next task to be invoked will receive a new operation context.
- *
- * If the task returns kKeepOperationContext, the task runner will retain the operation
- * context to pass to the next task in the queue.
- *
- * If the task returns kCancel, the task runner will destroy the operation context and
- * cancel the remaining tasks (each task will be invoked with a status containing the
- * code ErrorCodes::CallbackCanceled). After all the tasks have been canceled, the task
- * runner will become inactive.
- *
- * If the task returns kInvalid, this NextAction will be handled in the same way as
- * kDisposeOperationContext.
- *
- * If the status passed to the task is not OK, the task should not proceed and return
- * immediately. This is usually the case when the task runner is canceled. Accessing the
- * operation context in the task will result in undefined behavior.
- */
- void schedule(const Task& task);
-
- /**
- * If there is a task that is already running, allows the task to run to completion.
- * Cancels all scheduled tasks that have not been run. Canceled tasks will still be
- * invoked with a status containing the code ErrorCodes::CallbackCanceled.
- * After all active tasks have completed and unscheduled tasks have been canceled, the
- * task runner will go into an inactive state.
- *
- * It is a no-op to call cancel() before scheduling any tasks.
- */
- void cancel();
-
- private:
-
- /**
- * Runs tasks in a loop.
- * Loop exits when any of the tasks returns a non-kContinue next action.
- */
- void _runTasks();
- void _finishRunTasks_inlock();
-
- /**
- * Waits for next scheduled task to be added to queue.
- * Returns null task when task runner is stopped.
- */
- Task _waitForNextTask();
-
- OldThreadPool* _threadPool;
- CreateOperationContextFn _createOperationContext;
-
- // Protects member data of this TaskRunner.
- mutable stdx::mutex _mutex;
-
- stdx::condition_variable _condition;
-
- // _active is true when there are scheduled tasks in the task queue or
- // when a task is being run by the task runner.
- bool _active;
-
- bool _cancelRequested;
-
- // FIFO queue of scheduled tasks
- std::list<Task> _tasks;
+class TaskRunner {
+ MONGO_DISALLOW_COPYING(TaskRunner);
+
+public:
+ /**
+ * Represents next steps of task runner.
+ */
+ enum class NextAction {
+ kInvalid = 0,
+ kDisposeOperationContext = 1,
+ kKeepOperationContext = 2,
+ kCancel = 3,
};
-} // namespace repl
-} // namespace mongo
+ using CreateOperationContextFn = stdx::function<OperationContext*()>;
+ using Task = stdx::function<NextAction(OperationContext*, const Status&)>;
+
+ /**
+ * Creates a Task returning kCancel. This is useful in shutting down the task runner after
+ * running a series of tasks.
+ *
+ * Without a cancellation task, the client would need to coordinate the completion of the
+ * last task with calling cancel() on the task runner.
+ */
+ static Task makeCancelTask();
+
+ TaskRunner(OldThreadPool* threadPool, const CreateOperationContextFn& createOperationContext);
+
+ virtual ~TaskRunner();
+
+ /**
+ * Returns diagnostic information.
+ */
+ std::string getDiagnosticString() const;
+
+ /**
+ * Returns true if there are any scheduled or actively running tasks.
+ */
+ bool isActive() const;
+
+ /**
+ * Schedules a task to be run by the task runner. Tasks are run in the same order that they
+ * are scheduled.
+ *
+ * This transitions the task runner to an active state.
+ *
+ * The task runner creates an operation context using '_createOperationContext'
+ * prior to running a scheduled task. Depending on the NextAction returned from the
+ * task, operation contexts may be shared between consecutive tasks invoked by the task
+ * runner.
+ *
+ * On completion, each task is expected to return a NextAction to the task runner.
+ *
+ * If the task returns kDisposeOperationContext, the task runner destroys the operation
+ * context. The next task to be invoked will receive a new operation context.
+ *
+ * If the task returns kKeepOperationContext, the task runner will retain the operation
+ * context to pass to the next task in the queue.
+ *
+ * If the task returns kCancel, the task runner will destroy the operation context and
+ * cancel the remaining tasks (each task will be invoked with a status containing the
+ * code ErrorCodes::CallbackCanceled). After all the tasks have been canceled, the task
+ * runner will become inactive.
+ *
+ * If the task returns kInvalid, this NextAction will be handled in the same way as
+ * kDisposeOperationContext.
+ *
+ * If the status passed to the task is not OK, the task should not proceed and return
+ * immediately. This is usually the case when the task runner is canceled. Accessing the
+ * operation context in the task will result in undefined behavior.
+ */
+ void schedule(const Task& task);
+
+ /**
+ * If there is a task that is already running, allows the task to run to completion.
+ * Cancels all scheduled tasks that have not been run. Canceled tasks will still be
+ * invoked with a status containing the code ErrorCodes::CallbackCanceled.
+ * After all active tasks have completed and unscheduled tasks have been canceled, the
+ * task runner will go into an inactive state.
+ *
+ * It is a no-op to call cancel() before scheduling any tasks.
+ */
+ void cancel();
+
+private:
+ /**
+ * Runs tasks in a loop.
+ * Loop exits when any of the tasks returns a non-kContinue next action.
+ */
+ void _runTasks();
+ void _finishRunTasks_inlock();
+
+ /**
+ * Waits for next scheduled task to be added to queue.
+ * Returns null task when task runner is stopped.
+ */
+ Task _waitForNextTask();
+
+ OldThreadPool* _threadPool;
+ CreateOperationContextFn _createOperationContext;
+
+ // Protects member data of this TaskRunner.
+ mutable stdx::mutex _mutex;
+
+ stdx::condition_variable _condition;
+
+ // _active is true when there are scheduled tasks in the task queue or
+ // when a task is being run by the task runner.
+ bool _active;
+
+ bool _cancelRequested;
+
+ // FIFO queue of scheduled tasks
+ std::list<Task> _tasks;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/task_runner_test.cpp b/src/mongo/db/repl/task_runner_test.cpp
index c1e4d10b731..3c6f9d29f45 100644
--- a/src/mongo/db/repl/task_runner_test.cpp
+++ b/src/mongo/db/repl/task_runner_test.cpp
@@ -40,312 +40,311 @@
namespace {
- using namespace mongo;
- using namespace mongo::repl;
-
- using Task = TaskRunner::Task;
-
- TEST_F(TaskRunnerTest, InvalidConstruction) {
- // Null thread pool.
- ASSERT_THROWS_CODE(TaskRunner(nullptr, []() -> OperationContext* { return nullptr; }),
- UserException,
- ErrorCodes::BadValue);
-
- // Null function for creating operation contexts.
- ASSERT_THROWS_CODE(TaskRunner(&getThreadPool(), TaskRunner::CreateOperationContextFn()),
- UserException,
- ErrorCodes::BadValue);
- }
-
- TEST_F(TaskRunnerTest, GetDiagnosticString) {
- ASSERT_FALSE(getTaskRunner().getDiagnosticString().empty());
- }
-
- TEST_F(TaskRunnerTest, CallbackValues) {
- stdx::mutex mutex;
- bool called = false;
- OperationContext* txn = nullptr;
- Status status = getDetectableErrorStatus();
- auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
- called = true;
- txn = theTxn;
- status = theStatus;
- return TaskRunner::NextAction::kCancel;
- };
- getTaskRunner().schedule(task);
- getThreadPool().join();
- ASSERT_FALSE(getTaskRunner().isActive());
-
+using namespace mongo;
+using namespace mongo::repl;
+
+using Task = TaskRunner::Task;
+
+TEST_F(TaskRunnerTest, InvalidConstruction) {
+ // Null thread pool.
+ ASSERT_THROWS_CODE(TaskRunner(nullptr, []() -> OperationContext* { return nullptr; }),
+ UserException,
+ ErrorCodes::BadValue);
+
+ // Null function for creating operation contexts.
+ ASSERT_THROWS_CODE(TaskRunner(&getThreadPool(), TaskRunner::CreateOperationContextFn()),
+ UserException,
+ ErrorCodes::BadValue);
+}
+
+TEST_F(TaskRunnerTest, GetDiagnosticString) {
+ ASSERT_FALSE(getTaskRunner().getDiagnosticString().empty());
+}
+
+TEST_F(TaskRunnerTest, CallbackValues) {
+ stdx::mutex mutex;
+ bool called = false;
+ OperationContext* txn = nullptr;
+ Status status = getDetectableErrorStatus();
+ auto task = [&](OperationContext* theTxn, const Status& theStatus) {
stdx::lock_guard<stdx::mutex> lk(mutex);
- ASSERT_TRUE(called);
- ASSERT(txn);
- ASSERT_OK(status);
- }
-
- TEST_F(TaskRunnerTest, OperationContextFactoryReturnsNull) {
- resetTaskRunner(new TaskRunner(&getThreadPool(), []() -> OperationContext* {
- return nullptr;
- }));
- stdx::mutex mutex;
- bool called = false;
- OperationContextNoop opCtxNoop;
- OperationContext* txn = &opCtxNoop;
- Status status = getDetectableErrorStatus();
- auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
- called = true;
- txn = theTxn;
- status = theStatus;
- return TaskRunner::NextAction::kCancel;
- };
- getTaskRunner().schedule(task);
- getThreadPool().join();
- ASSERT_FALSE(getTaskRunner().isActive());
-
+ called = true;
+ txn = theTxn;
+ status = theStatus;
+ return TaskRunner::NextAction::kCancel;
+ };
+ getTaskRunner().schedule(task);
+ getThreadPool().join();
+ ASSERT_FALSE(getTaskRunner().isActive());
+
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ ASSERT_TRUE(called);
+ ASSERT(txn);
+ ASSERT_OK(status);
+}
+
+TEST_F(TaskRunnerTest, OperationContextFactoryReturnsNull) {
+ resetTaskRunner(
+ new TaskRunner(&getThreadPool(), []() -> OperationContext* { return nullptr; }));
+ stdx::mutex mutex;
+ bool called = false;
+ OperationContextNoop opCtxNoop;
+ OperationContext* txn = &opCtxNoop;
+ Status status = getDetectableErrorStatus();
+ auto task = [&](OperationContext* theTxn, const Status& theStatus) {
stdx::lock_guard<stdx::mutex> lk(mutex);
- ASSERT_TRUE(called);
- ASSERT_FALSE(txn);
- ASSERT_OK(status);
- }
-
- std::vector<int> _testRunTaskTwice(TaskRunnerTest& test,
- TaskRunner::NextAction nextAction,
- stdx::function<void(const Task& task)> schedule) {
- unittest::Barrier barrier(2U);
- stdx::mutex mutex;
- int i = 0;
- OperationContext* txn[2] = {nullptr, nullptr};
- int txnId[2] = {-100, -100};
- auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
- int j = i++;
- if (j >= 2) {
- return TaskRunner::NextAction::kInvalid;
- }
- txn[j] = theTxn;
- txnId[j] = TaskRunnerTest::getOperationContextId(txn[j]);
- TaskRunner::NextAction result = j == 0 ? nextAction : TaskRunner::NextAction::kCancel;
- barrier.countDownAndWait();
- return result;
- };
-
- schedule(task);
- ASSERT_TRUE(test.getTaskRunner().isActive());
- barrier.countDownAndWait();
-
- schedule(task);
- ASSERT_TRUE(test.getTaskRunner().isActive());
+ called = true;
+ txn = theTxn;
+ status = theStatus;
+ return TaskRunner::NextAction::kCancel;
+ };
+ getTaskRunner().schedule(task);
+ getThreadPool().join();
+ ASSERT_FALSE(getTaskRunner().isActive());
+
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ ASSERT_TRUE(called);
+ ASSERT_FALSE(txn);
+ ASSERT_OK(status);
+}
+
+std::vector<int> _testRunTaskTwice(TaskRunnerTest& test,
+ TaskRunner::NextAction nextAction,
+ stdx::function<void(const Task& task)> schedule) {
+ unittest::Barrier barrier(2U);
+ stdx::mutex mutex;
+ int i = 0;
+ OperationContext* txn[2] = {nullptr, nullptr};
+ int txnId[2] = {-100, -100};
+ auto task = [&](OperationContext* theTxn, const Status& theStatus) {
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ int j = i++;
+ if (j >= 2) {
+ return TaskRunner::NextAction::kInvalid;
+ }
+ txn[j] = theTxn;
+ txnId[j] = TaskRunnerTest::getOperationContextId(txn[j]);
+ TaskRunner::NextAction result = j == 0 ? nextAction : TaskRunner::NextAction::kCancel;
barrier.countDownAndWait();
+ return result;
+ };
+
+ schedule(task);
+ ASSERT_TRUE(test.getTaskRunner().isActive());
+ barrier.countDownAndWait();
+
+ schedule(task);
+ ASSERT_TRUE(test.getTaskRunner().isActive());
+ barrier.countDownAndWait();
+
+ test.getThreadPool().join();
+ ASSERT_FALSE(test.getTaskRunner().isActive());
+
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ ASSERT_EQUALS(2, i);
+ ASSERT(txn[0]);
+ ASSERT(txn[1]);
+ ASSERT_NOT_LESS_THAN(txnId[0], 0);
+ ASSERT_NOT_LESS_THAN(txnId[1], 0);
+ return {txnId[0], txnId[1]};
+}
+
+std::vector<int> _testRunTaskTwice(TaskRunnerTest& test, TaskRunner::NextAction nextAction) {
+ auto schedule = [&](const Task& task) { test.getTaskRunner().schedule(task); };
+ return _testRunTaskTwice(test, nextAction, schedule);
+}
+
+TEST_F(TaskRunnerTest, RunTaskTwiceDisposeOperationContext) {
+ std::vector<int> txnId =
+ _testRunTaskTwice(*this, TaskRunner::NextAction::kDisposeOperationContext);
+ ASSERT_NOT_EQUALS(txnId[0], txnId[1]);
+}
+
+// Joining thread pool before scheduling first task has no effect.
+// Joining thread pool before scheduling second task ensures that task runner releases
+// thread back to pool after disposing of operation context.
+TEST_F(TaskRunnerTest, RunTaskTwiceDisposeOperationContextJoinThreadPoolBeforeScheduling) {
+ auto schedule = [this](const Task& task) {
+ getThreadPool().join();
+ getTaskRunner().schedule(task);
+ };
+ std::vector<int> txnId =
+ _testRunTaskTwice(*this, TaskRunner::NextAction::kDisposeOperationContext, schedule);
+ ASSERT_NOT_EQUALS(txnId[0], txnId[1]);
+}
+
+TEST_F(TaskRunnerTest, RunTaskTwiceKeepOperationContext) {
+ std::vector<int> txnId =
+ _testRunTaskTwice(*this, TaskRunner::NextAction::kKeepOperationContext);
+ ASSERT_EQUALS(txnId[0], txnId[1]);
+}
+
+TEST_F(TaskRunnerTest, SkipSecondTask) {
+ stdx::mutex mutex;
+ int i = 0;
+ OperationContext* txn[2] = {nullptr, nullptr};
+ Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()};
+ stdx::condition_variable condition;
+ bool schedulingDone = false;
+ auto task = [&](OperationContext* theTxn, const Status& theStatus) {
+ stdx::unique_lock<stdx::mutex> lk(mutex);
+ int j = i++;
+ if (j >= 2) {
+ return TaskRunner::NextAction::kCancel;
+ }
+ txn[j] = theTxn;
+ status[j] = theStatus;
- test.getThreadPool().join();
- ASSERT_FALSE(test.getTaskRunner().isActive());
+ // Wait for the test code to schedule the second task.
+ while (!schedulingDone) {
+ condition.wait(lk);
+ }
+ return TaskRunner::NextAction::kCancel;
+ };
+ getTaskRunner().schedule(task);
+ ASSERT_TRUE(getTaskRunner().isActive());
+ getTaskRunner().schedule(task);
+ {
stdx::lock_guard<stdx::mutex> lk(mutex);
- ASSERT_EQUALS(2, i);
- ASSERT(txn[0]);
- ASSERT(txn[1]);
- ASSERT_NOT_LESS_THAN(txnId[0], 0);
- ASSERT_NOT_LESS_THAN(txnId[1], 0);
- return {txnId[0], txnId[1]};
+ schedulingDone = true;
+ condition.notify_all();
}
-
- std::vector<int> _testRunTaskTwice(TaskRunnerTest& test, TaskRunner::NextAction nextAction) {
- auto schedule = [&](const Task& task) { test.getTaskRunner().schedule(task); };
- return _testRunTaskTwice(test, nextAction, schedule);
- }
-
- TEST_F(TaskRunnerTest, RunTaskTwiceDisposeOperationContext) {
- std::vector<int> txnId =
- _testRunTaskTwice(*this, TaskRunner::NextAction::kDisposeOperationContext);
- ASSERT_NOT_EQUALS(txnId[0], txnId[1]);
- }
-
- // Joining thread pool before scheduling first task has no effect.
- // Joining thread pool before scheduling second task ensures that task runner releases
- // thread back to pool after disposing of operation context.
- TEST_F(TaskRunnerTest, RunTaskTwiceDisposeOperationContextJoinThreadPoolBeforeScheduling) {
- auto schedule = [this](const Task& task) {
- getThreadPool().join();
- getTaskRunner().schedule(task);
- };
- std::vector<int> txnId =
- _testRunTaskTwice(*this, TaskRunner::NextAction::kDisposeOperationContext, schedule);
- ASSERT_NOT_EQUALS(txnId[0], txnId[1]);
- }
-
- TEST_F(TaskRunnerTest, RunTaskTwiceKeepOperationContext) {
- std::vector<int> txnId =
- _testRunTaskTwice(*this, TaskRunner::NextAction::kKeepOperationContext);
- ASSERT_EQUALS(txnId[0], txnId[1]);
- }
-
- TEST_F(TaskRunnerTest, SkipSecondTask) {
- stdx::mutex mutex;
- int i = 0;
- OperationContext* txn[2] = {nullptr, nullptr};
- Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()};
- stdx::condition_variable condition;
- bool schedulingDone = false;
- auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
- int j = i++;
- if (j >= 2) {
- return TaskRunner::NextAction::kCancel;
- }
- txn[j] = theTxn;
- status[j] = theStatus;
-
- // Wait for the test code to schedule the second task.
- while (!schedulingDone) {
- condition.wait(lk);
- }
-
+ getThreadPool().join();
+ ASSERT_FALSE(getTaskRunner().isActive());
+
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ ASSERT_EQUALS(2, i);
+ ASSERT(txn[0]);
+ ASSERT_OK(status[0]);
+ ASSERT_FALSE(txn[1]);
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status[1].code());
+}
+
+TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
+ stdx::mutex mutex;
+ int i = 0;
+ OperationContext* txn[2] = {nullptr, nullptr};
+ Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()};
+ stdx::condition_variable condition;
+ bool schedulingDone = false;
+ auto task = [&](OperationContext* theTxn, const Status& theStatus) {
+ stdx::unique_lock<stdx::mutex> lk(mutex);
+ int j = i++;
+ if (j >= 2) {
return TaskRunner::NextAction::kCancel;
- };
- getTaskRunner().schedule(task);
- ASSERT_TRUE(getTaskRunner().isActive());
- getTaskRunner().schedule(task);
- {
- stdx::lock_guard<stdx::mutex> lk(mutex);
- schedulingDone = true;
- condition.notify_all();
}
- getThreadPool().join();
- ASSERT_FALSE(getTaskRunner().isActive());
+ txn[j] = theTxn;
+ status[j] = theStatus;
- stdx::lock_guard<stdx::mutex> lk(mutex);
- ASSERT_EQUALS(2, i);
- ASSERT(txn[0]);
- ASSERT_OK(status[0]);
- ASSERT_FALSE(txn[1]);
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status[1].code());
- }
-
- TEST_F(TaskRunnerTest, FirstTaskThrowsException) {
- stdx::mutex mutex;
- int i = 0;
- OperationContext* txn[2] = {nullptr, nullptr};
- Status status[2] = {getDetectableErrorStatus(), getDetectableErrorStatus()};
- stdx::condition_variable condition;
- bool schedulingDone = false;
- auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::unique_lock<stdx::mutex> lk(mutex);
- int j = i++;
- if (j >= 2) {
- return TaskRunner::NextAction::kCancel;
- }
- txn[j] = theTxn;
- status[j] = theStatus;
-
- // Wait for the test code to schedule the second task.
- while (!schedulingDone) {
- condition.wait(lk);
- }
-
- // Throwing an exception from the first task should cancel
- // unscheduled tasks and make the task runner inactive.
- // When the second (canceled) task throws an exception, it should be ignored.
- uassert(ErrorCodes::OperationFailed, "task failure", false);
-
- // not reached.
- invariant(false);
- return TaskRunner::NextAction::kKeepOperationContext;
- };
- getTaskRunner().schedule(task);
- ASSERT_TRUE(getTaskRunner().isActive());
- getTaskRunner().schedule(task);
- {
- stdx::lock_guard<stdx::mutex> lk(mutex);
- schedulingDone = true;
- condition.notify_all();
+ // Wait for the test code to schedule the second task.
+ while (!schedulingDone) {
+ condition.wait(lk);
}
- getThreadPool().join();
- ASSERT_FALSE(getTaskRunner().isActive());
+ // Throwing an exception from the first task should cancel
+ // unscheduled tasks and make the task runner inactive.
+ // When the second (canceled) task throws an exception, it should be ignored.
+ uassert(ErrorCodes::OperationFailed, "task failure", false);
+
+ // not reached.
+ invariant(false);
+ return TaskRunner::NextAction::kKeepOperationContext;
+ };
+ getTaskRunner().schedule(task);
+ ASSERT_TRUE(getTaskRunner().isActive());
+ getTaskRunner().schedule(task);
+ {
stdx::lock_guard<stdx::mutex> lk(mutex);
- ASSERT_EQUALS(2, i);
- ASSERT(txn[0]);
- ASSERT_OK(status[0]);
- ASSERT_FALSE(txn[1]);
- ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status[1].code());
+ schedulingDone = true;
+ condition.notify_all();
}
-
- TEST_F(TaskRunnerTest, Cancel) {
- stdx::mutex mutex;
- stdx::condition_variable condition;
- Status status = getDetectableErrorStatus();
- bool taskRunning = false;
-
- // Running this task causes the task runner to wait for another task that
- // is never scheduled.
- auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
- status = theStatus;
- taskRunning = true;
- condition.notify_all();
- return TaskRunner::NextAction::kKeepOperationContext;
- };
-
- // Calling cancel() before schedule() has no effect.
- // The task should still be invoked with a successful status.
- getTaskRunner().cancel();
-
- getTaskRunner().schedule(task);
- ASSERT_TRUE(getTaskRunner().isActive());
- {
- stdx::unique_lock<stdx::mutex> lk(mutex);
- while (!taskRunning) {
- condition.wait(lk);
- }
+ getThreadPool().join();
+ ASSERT_FALSE(getTaskRunner().isActive());
+
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ ASSERT_EQUALS(2, i);
+ ASSERT(txn[0]);
+ ASSERT_OK(status[0]);
+ ASSERT_FALSE(txn[1]);
+ ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status[1].code());
+}
+
+TEST_F(TaskRunnerTest, Cancel) {
+ stdx::mutex mutex;
+ stdx::condition_variable condition;
+ Status status = getDetectableErrorStatus();
+ bool taskRunning = false;
+
+ // Running this task causes the task runner to wait for another task that
+ // is never scheduled.
+ auto task = [&](OperationContext* theTxn, const Status& theStatus) {
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ status = theStatus;
+ taskRunning = true;
+ condition.notify_all();
+ return TaskRunner::NextAction::kKeepOperationContext;
+ };
+
+ // Calling cancel() before schedule() has no effect.
+ // The task should still be invoked with a successful status.
+ getTaskRunner().cancel();
+
+ getTaskRunner().schedule(task);
+ ASSERT_TRUE(getTaskRunner().isActive());
+ {
+ stdx::unique_lock<stdx::mutex> lk(mutex);
+ while (!taskRunning) {
+ condition.wait(lk);
}
+ }
- // It is fine to call cancel() multiple times.
- getTaskRunner().cancel();
- getTaskRunner().cancel();
+ // It is fine to call cancel() multiple times.
+ getTaskRunner().cancel();
+ getTaskRunner().cancel();
- getThreadPool().join();
- ASSERT_FALSE(getTaskRunner().isActive());
+ getThreadPool().join();
+ ASSERT_FALSE(getTaskRunner().isActive());
- // This status will not be OK if canceling the task runner
- // before scheduling the task results in the task being canceled.
- stdx::lock_guard<stdx::mutex> lk(mutex);
- ASSERT_OK(status);
- }
+ // This status will not be OK if canceling the task runner
+ // before scheduling the task results in the task being canceled.
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ ASSERT_OK(status);
+}
- TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
- stdx::mutex mutex;
- stdx::condition_variable condition;
- Status status = getDetectableErrorStatus();
- bool taskRunning = false;
-
- // Running this task causes the task runner to wait for another task that
- // is never scheduled.
- auto task = [&](OperationContext* theTxn, const Status& theStatus) {
- stdx::lock_guard<stdx::mutex> lk(mutex);
- status = theStatus;
- taskRunning = true;
- condition.notify_all();
- return TaskRunner::NextAction::kKeepOperationContext;
- };
+TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) {
+ stdx::mutex mutex;
+ stdx::condition_variable condition;
+ Status status = getDetectableErrorStatus();
+ bool taskRunning = false;
- getTaskRunner().schedule(task);
- ASSERT_TRUE(getTaskRunner().isActive());
- {
- stdx::unique_lock<stdx::mutex> lk(mutex);
- while (!taskRunning) {
- condition.wait(lk);
- }
+ // Running this task causes the task runner to wait for another task that
+ // is never scheduled.
+ auto task = [&](OperationContext* theTxn, const Status& theStatus) {
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ status = theStatus;
+ taskRunning = true;
+ condition.notify_all();
+ return TaskRunner::NextAction::kKeepOperationContext;
+ };
+
+ getTaskRunner().schedule(task);
+ ASSERT_TRUE(getTaskRunner().isActive());
+ {
+ stdx::unique_lock<stdx::mutex> lk(mutex);
+ while (!taskRunning) {
+ condition.wait(lk);
}
+ }
- destroyTaskRunner();
+ destroyTaskRunner();
- getThreadPool().join();
+ getThreadPool().join();
- // This status will not be OK if canceling the task runner
- // before scheduling the task results in the task being canceled.
- stdx::lock_guard<stdx::mutex> lk(mutex);
- ASSERT_OK(status);
- }
+ // This status will not be OK if canceling the task runner
+ // before scheduling the task results in the task being canceled.
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ ASSERT_OK(status);
+}
-} // namespace
+} // namespace
diff --git a/src/mongo/db/repl/task_runner_test_fixture.cpp b/src/mongo/db/repl/task_runner_test_fixture.cpp
index d595fc4b9f3..ede8abb989f 100644
--- a/src/mongo/db/repl/task_runner_test_fixture.cpp
+++ b/src/mongo/db/repl/task_runner_test_fixture.cpp
@@ -39,69 +39,76 @@
namespace mongo {
namespace repl {
- using namespace mongo;
- using namespace mongo::repl;
+using namespace mongo;
+using namespace mongo::repl;
namespace {
- const int kNumThreads = 3;
+const int kNumThreads = 3;
- AtomicInt32 _nextId;
+AtomicInt32 _nextId;
- class TaskRunnerOperationContext : public OperationContextNoop {
- public:
- TaskRunnerOperationContext() : _id(_nextId.fetchAndAdd(1)) { }
- int getId() const { return _id; }
- private:
- int _id;
- };
-
-
-} // namespace
-
- Status TaskRunnerTest::getDetectableErrorStatus() {
- return Status(ErrorCodes::InternalError, "Not mutated");
+class TaskRunnerOperationContext : public OperationContextNoop {
+public:
+ TaskRunnerOperationContext() : _id(_nextId.fetchAndAdd(1)) {}
+ int getId() const {
+ return _id;
}
- int TaskRunnerTest::getOperationContextId(OperationContext* txn) {
- if (!txn) { return -1; }
- TaskRunnerOperationContext* taskRunnerTxn = dynamic_cast<TaskRunnerOperationContext*>(txn);
- if (!taskRunnerTxn) { return -2; }
- return taskRunnerTxn->getId();
- }
+private:
+ int _id;
+};
- OperationContext* TaskRunnerTest::createOperationContext() const {
- return new TaskRunnerOperationContext();
- }
- TaskRunner& TaskRunnerTest::getTaskRunner() const {
- ASSERT(_taskRunner.get());
- return *_taskRunner;
- }
+} // namespace
- OldThreadPool& TaskRunnerTest::getThreadPool() const {
- ASSERT(_threadPool.get());
- return *_threadPool;
- }
+Status TaskRunnerTest::getDetectableErrorStatus() {
+ return Status(ErrorCodes::InternalError, "Not mutated");
+}
- void TaskRunnerTest::resetTaskRunner(TaskRunner* taskRunner) {
- _taskRunner.reset(taskRunner);
+int TaskRunnerTest::getOperationContextId(OperationContext* txn) {
+ if (!txn) {
+ return -1;
}
-
- void TaskRunnerTest::destroyTaskRunner() {
- _taskRunner.reset();
- }
-
- void TaskRunnerTest::setUp() {
- _threadPool.reset(new OldThreadPool(kNumThreads, "TaskRunnerTest-"));
- resetTaskRunner(new TaskRunner(_threadPool.get(),
- stdx::bind(&TaskRunnerTest::createOperationContext, this)));
+ TaskRunnerOperationContext* taskRunnerTxn = dynamic_cast<TaskRunnerOperationContext*>(txn);
+ if (!taskRunnerTxn) {
+ return -2;
}
-
- void TaskRunnerTest::tearDown() {
- destroyTaskRunner();
- _threadPool.reset();
- }
-
-} // namespace repl
-} // namespace mongo
+ return taskRunnerTxn->getId();
+}
+
+OperationContext* TaskRunnerTest::createOperationContext() const {
+ return new TaskRunnerOperationContext();
+}
+
+TaskRunner& TaskRunnerTest::getTaskRunner() const {
+ ASSERT(_taskRunner.get());
+ return *_taskRunner;
+}
+
+OldThreadPool& TaskRunnerTest::getThreadPool() const {
+ ASSERT(_threadPool.get());
+ return *_threadPool;
+}
+
+void TaskRunnerTest::resetTaskRunner(TaskRunner* taskRunner) {
+ _taskRunner.reset(taskRunner);
+}
+
+void TaskRunnerTest::destroyTaskRunner() {
+ _taskRunner.reset();
+}
+
+void TaskRunnerTest::setUp() {
+ _threadPool.reset(new OldThreadPool(kNumThreads, "TaskRunnerTest-"));
+ resetTaskRunner(new TaskRunner(_threadPool.get(),
+ stdx::bind(&TaskRunnerTest::createOperationContext, this)));
+}
+
+void TaskRunnerTest::tearDown() {
+ destroyTaskRunner();
+ _threadPool.reset();
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/task_runner_test_fixture.h b/src/mongo/db/repl/task_runner_test_fixture.h
index 70b25923498..dcbc13bbc07 100644
--- a/src/mongo/db/repl/task_runner_test_fixture.h
+++ b/src/mongo/db/repl/task_runner_test_fixture.h
@@ -35,48 +35,47 @@
namespace mongo {
- class OldThreadPool;
- class OperationContext;
+class OldThreadPool;
+class OperationContext;
namespace repl {
- class TaskRunner;
+class TaskRunner;
+
+/**
+ * Test fixture for tests that require a TaskRunner and/or
+ * ThreadPool.
+ */
+class TaskRunnerTest : public unittest::Test {
+public:
+ static Status getDetectableErrorStatus();
/**
- * Test fixture for tests that require a TaskRunner and/or
- * ThreadPool.
+ * Returns ID of mock operation context returned from createOperationContext().
+ * Returns -1 if txn is null.
+ * Returns -2 if txn cannot be converted to a mock operation context containing an ID.
*/
- class TaskRunnerTest : public unittest::Test {
- public:
- static Status getDetectableErrorStatus();
-
- /**
- * Returns ID of mock operation context returned from createOperationContext().
- * Returns -1 if txn is null.
- * Returns -2 if txn cannot be converted to a mock operation context containing an ID.
- */
- static int getOperationContextId(OperationContext* txn);
+ static int getOperationContextId(OperationContext* txn);
- /**
- * Returns an noop operation context with an embedded numerical ID.
- */
- virtual OperationContext* createOperationContext() const;
-
- OldThreadPool& getThreadPool() const;
- TaskRunner& getTaskRunner() const;
+ /**
+ * Returns an noop operation context with an embedded numerical ID.
+ */
+ virtual OperationContext* createOperationContext() const;
- void resetTaskRunner(TaskRunner* taskRunner);
- void destroyTaskRunner();
+ OldThreadPool& getThreadPool() const;
+ TaskRunner& getTaskRunner() const;
- protected:
+ void resetTaskRunner(TaskRunner* taskRunner);
+ void destroyTaskRunner();
- void setUp() override;
- void tearDown() override;
+protected:
+ void setUp() override;
+ void tearDown() override;
- private:
- std::unique_ptr<OldThreadPool> _threadPool;
- std::unique_ptr<TaskRunner> _taskRunner;
- };
+private:
+ std::unique_ptr<OldThreadPool> _threadPool;
+ std::unique_ptr<TaskRunner> _taskRunner;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp
index 99738a38421..7ca7ba6aa84 100644
--- a/src/mongo/db/repl/topology_coordinator.cpp
+++ b/src/mongo/db/repl/topology_coordinator.cpp
@@ -39,30 +39,30 @@
namespace mongo {
namespace repl {
namespace {
- static const int kLeaderValue = 0;
- static const int kFollowerValue = 1;
- static const int kCandidateValue = 2;
+static const int kLeaderValue = 0;
+static const int kFollowerValue = 1;
+static const int kCandidateValue = 2;
} // namespace
- const TopologyCoordinator::Role TopologyCoordinator::Role::leader(kLeaderValue);
- const TopologyCoordinator::Role TopologyCoordinator::Role::follower(kFollowerValue);
- const TopologyCoordinator::Role TopologyCoordinator::Role::candidate(kCandidateValue);
+const TopologyCoordinator::Role TopologyCoordinator::Role::leader(kLeaderValue);
+const TopologyCoordinator::Role TopologyCoordinator::Role::follower(kFollowerValue);
+const TopologyCoordinator::Role TopologyCoordinator::Role::candidate(kCandidateValue);
- TopologyCoordinator::Role::Role(int value) : _value(value) {}
+TopologyCoordinator::Role::Role(int value) : _value(value) {}
- std::string TopologyCoordinator::Role::toString() const {
- switch(_value) {
+std::string TopologyCoordinator::Role::toString() const {
+ switch (_value) {
case kLeaderValue:
return "leader";
case kFollowerValue:
return "follower";
case kCandidateValue:
return "candidate";
- }
- invariant(false);
}
+ invariant(false);
+}
- TopologyCoordinator::~TopologyCoordinator() {}
+TopologyCoordinator::~TopologyCoordinator() {}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/topology_coordinator.h b/src/mongo/db/repl/topology_coordinator.h
index 3af054b3010..0dbc81baac5 100644
--- a/src/mongo/db/repl/topology_coordinator.h
+++ b/src/mongo/db/repl/topology_coordinator.h
@@ -40,444 +40,444 @@
namespace mongo {
- class Timestamp;
+class Timestamp;
namespace repl {
- class HeartbeatResponseAction;
- class OpTime;
- class ReplSetHeartbeatArgs;
- class ReplicaSetConfig;
- class TagSubgroup;
- class LastVote;
- struct MemberState;
+class HeartbeatResponseAction;
+class OpTime;
+class ReplSetHeartbeatArgs;
+class ReplicaSetConfig;
+class TagSubgroup;
+class LastVote;
+struct MemberState;
+
+/**
+ * Replication Topology Coordinator interface.
+ *
+ * This object is responsible for managing the topology of the cluster.
+ * Tasks include consensus and leader election, chaining, and configuration management.
+ * Methods of this class should be non-blocking.
+ */
+class TopologyCoordinator {
+ MONGO_DISALLOW_COPYING(TopologyCoordinator);
+
+public:
+ class Role;
+
+ virtual ~TopologyCoordinator();
+
+ ////////////////////////////////////////////////////////////
+ //
+ // State inspection methods.
+ //
+ ////////////////////////////////////////////////////////////
+
+ /**
+ * Gets the role of this member in the replication protocol.
+ */
+ virtual Role getRole() const = 0;
+
+ /**
+ * Gets the MemberState of this member in the replica set.
+ */
+ virtual MemberState getMemberState() const = 0;
+
+ /**
+ * Returns the address of the current sync source, or an empty HostAndPort if there is no
+ * current sync source.
+ */
+ virtual HostAndPort getSyncSourceAddress() const = 0;
+
+ /**
+ * Retrieves a vector of HostAndPorts containing all nodes that are neither DOWN nor
+ * ourself.
+ */
+ virtual std::vector<HostAndPort> getMaybeUpHostAndPorts() const = 0;
/**
- * Replication Topology Coordinator interface.
+ * Gets the earliest time the current node will stand for election.
+ */
+ virtual Date_t getStepDownTime() const = 0;
+
+ /**
+ * Gets the current value of the maintenance mode counter.
+ */
+ virtual int getMaintenanceCount() const = 0;
+
+ /**
+ * Gets the latest term this member is aware of. If this member is the primary,
+ * it's the current term of the replica set.
+ */
+ virtual long long getTerm() const = 0;
+
+ /**
+ * Sets the latest term this member is aware of to the higher of its current value and
+ * the value passed in as "term".
+ * Returns true if the local term value is changed.
+ */
+ virtual bool updateTerm(long long term) = 0;
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Basic state manipulation methods.
+ //
+ ////////////////////////////////////////////////////////////
+
+ /**
+ * Sets the index into the config used when we next choose a sync source
+ */
+ virtual void setForceSyncSourceIndex(int index) = 0;
+
+ /**
+ * Chooses and sets a new sync source, based on our current knowledge of the world.
+ */
+ virtual HostAndPort chooseNewSyncSource(Date_t now, const OpTime& lastOpApplied) = 0;
+
+ /**
+ * Suppresses selecting "host" as sync source until "until".
+ */
+ virtual void blacklistSyncSource(const HostAndPort& host, Date_t until) = 0;
+
+ /**
+ * Removes a single entry "host" from the list of potential sync sources which we
+ * have blacklisted, if it is supposed to be unblacklisted by "now".
+ */
+ virtual void unblacklistSyncSource(const HostAndPort& host, Date_t now) = 0;
+
+ /**
+ * Clears the list of potential sync sources we have blacklisted.
+ */
+ virtual void clearSyncSourceBlacklist() = 0;
+
+ /**
+ * Determines if a new sync source should be chosen, if a better candidate sync source is
+ * available. If the current sync source's last optime is more than _maxSyncSourceLagSecs
+ * behind any syncable source, this function returns true.
+ *
+ * "now" is used to skip over currently blacklisted sync sources.
+ */
+ virtual bool shouldChangeSyncSource(const HostAndPort& currentSource, Date_t now) const = 0;
+
+ /**
+ * Checks whether we are a single node set and we are not in a stepdown period. If so,
+ * puts us into candidate mode, otherwise does nothing. This is used to ensure that
+ * nodes in a single node replset become primary again when their stepdown period ends.
+ */
+ virtual bool becomeCandidateIfStepdownPeriodOverAndSingleNodeSet(Date_t now) = 0;
+
+ /**
+ * Sets the earliest time the current node will stand for election to "newTime".
+ *
+ * Until this time, while the node may report itself as electable, it will not stand
+ * for election.
+ */
+ virtual void setElectionSleepUntil(Date_t newTime) = 0;
+
+ /**
+ * Sets the reported mode of this node to one of RS_SECONDARY, RS_STARTUP2, RS_ROLLBACK or
+ * RS_RECOVERING, when getRole() == Role::follower. This is the interface by which the
+ * applier changes the reported member state of the current node, and enables or suppresses
+ * electability of the current node. All modes but RS_SECONDARY indicate an unelectable
+ * follower state (one that cannot transition to candidate).
+ */
+ virtual void setFollowerMode(MemberState::MS newMode) = 0;
+
+ /**
+ * Adjusts the maintenance mode count by "inc".
+ *
+ * It is an error to call this method if getRole() does not return Role::follower.
+ * It is an error to allow the maintenance count to go negative.
+ */
+ virtual void adjustMaintenanceCountBy(int inc) = 0;
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Methods that prepare responses to command requests.
+ //
+ ////////////////////////////////////////////////////////////
+
+ // produces a reply to a replSetSyncFrom command
+ virtual void prepareSyncFromResponse(const ReplicationExecutor::CallbackArgs& data,
+ const HostAndPort& target,
+ const OpTime& lastOpApplied,
+ BSONObjBuilder* response,
+ Status* result) = 0;
+
+ // produce a reply to a replSetFresh command
+ virtual void prepareFreshResponse(const ReplicationCoordinator::ReplSetFreshArgs& args,
+ Date_t now,
+ const OpTime& lastOpApplied,
+ BSONObjBuilder* response,
+ Status* result) = 0;
+
+ // produce a reply to a received electCmd
+ virtual void prepareElectResponse(const ReplicationCoordinator::ReplSetElectArgs& args,
+ Date_t now,
+ const OpTime& lastOpApplied,
+ BSONObjBuilder* response,
+ Status* result) = 0;
+
+ // produce a reply to a heartbeat
+ virtual Status prepareHeartbeatResponse(Date_t now,
+ const ReplSetHeartbeatArgs& args,
+ const std::string& ourSetName,
+ const OpTime& lastOpApplied,
+ ReplSetHeartbeatResponse* response) = 0;
+
+ // produce a reply to a V1 heartbeat
+ virtual Status prepareHeartbeatResponseV1(Date_t now,
+ const ReplSetHeartbeatArgsV1& args,
+ const std::string& ourSetName,
+ const OpTime& lastOpApplied,
+ ReplSetHeartbeatResponse* response) = 0;
+
+ // produce a reply to a status request
+ virtual void prepareStatusResponse(const ReplicationExecutor::CallbackArgs& data,
+ Date_t now,
+ unsigned uptime,
+ const OpTime& lastOpApplied,
+ BSONObjBuilder* response,
+ Status* result) = 0;
+
+ // produce a reply to an ismaster request. It is only valid to call this if we are a
+ // replset.
+ virtual void fillIsMasterForReplSet(IsMasterResponse* response) = 0;
+
+ // produce a reply to a freeze request
+ virtual void prepareFreezeResponse(Date_t now, int secs, BSONObjBuilder* response) = 0;
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Methods for sending and receiving heartbeats,
+ // reconfiguring and handling the results of standing for
+ // election.
+ //
+ ////////////////////////////////////////////////////////////
+
+ /**
+ * Updates the topology coordinator's notion of the replica set configuration.
*
- * This object is responsible for managing the topology of the cluster.
- * Tasks include consensus and leader election, chaining, and configuration management.
- * Methods of this class should be non-blocking.
- */
- class TopologyCoordinator {
- MONGO_DISALLOW_COPYING(TopologyCoordinator);
- public:
- class Role;
-
- virtual ~TopologyCoordinator();
-
- ////////////////////////////////////////////////////////////
- //
- // State inspection methods.
- //
- ////////////////////////////////////////////////////////////
-
- /**
- * Gets the role of this member in the replication protocol.
- */
- virtual Role getRole() const = 0;
-
- /**
- * Gets the MemberState of this member in the replica set.
- */
- virtual MemberState getMemberState() const = 0;
-
- /**
- * Returns the address of the current sync source, or an empty HostAndPort if there is no
- * current sync source.
- */
- virtual HostAndPort getSyncSourceAddress() const = 0;
-
- /**
- * Retrieves a vector of HostAndPorts containing all nodes that are neither DOWN nor
- * ourself.
- */
- virtual std::vector<HostAndPort> getMaybeUpHostAndPorts() const = 0;
-
- /**
- * Gets the earliest time the current node will stand for election.
- */
- virtual Date_t getStepDownTime() const = 0;
-
- /**
- * Gets the current value of the maintenance mode counter.
- */
- virtual int getMaintenanceCount() const = 0;
-
- /**
- * Gets the latest term this member is aware of. If this member is the primary,
- * it's the current term of the replica set.
- */
- virtual long long getTerm() const = 0;
-
- /**
- * Sets the latest term this member is aware of to the higher of its current value and
- * the value passed in as "term".
- * Returns true if the local term value is changed.
- */
- virtual bool updateTerm(long long term) = 0;
-
- ////////////////////////////////////////////////////////////
- //
- // Basic state manipulation methods.
- //
- ////////////////////////////////////////////////////////////
-
- /**
- * Sets the index into the config used when we next choose a sync source
- */
- virtual void setForceSyncSourceIndex(int index) = 0;
-
- /**
- * Chooses and sets a new sync source, based on our current knowledge of the world.
- */
- virtual HostAndPort chooseNewSyncSource(Date_t now, const OpTime& lastOpApplied) = 0;
-
- /**
- * Suppresses selecting "host" as sync source until "until".
- */
- virtual void blacklistSyncSource(const HostAndPort& host, Date_t until) = 0;
-
- /**
- * Removes a single entry "host" from the list of potential sync sources which we
- * have blacklisted, if it is supposed to be unblacklisted by "now".
- */
- virtual void unblacklistSyncSource(const HostAndPort& host, Date_t now) = 0;
-
- /**
- * Clears the list of potential sync sources we have blacklisted.
- */
- virtual void clearSyncSourceBlacklist() = 0;
-
- /**
- * Determines if a new sync source should be chosen, if a better candidate sync source is
- * available. If the current sync source's last optime is more than _maxSyncSourceLagSecs
- * behind any syncable source, this function returns true.
- *
- * "now" is used to skip over currently blacklisted sync sources.
- */
- virtual bool shouldChangeSyncSource(const HostAndPort& currentSource, Date_t now) const = 0;
-
- /**
- * Checks whether we are a single node set and we are not in a stepdown period. If so,
- * puts us into candidate mode, otherwise does nothing. This is used to ensure that
- * nodes in a single node replset become primary again when their stepdown period ends.
- */
- virtual bool becomeCandidateIfStepdownPeriodOverAndSingleNodeSet(Date_t now) = 0;
-
- /**
- * Sets the earliest time the current node will stand for election to "newTime".
- *
- * Until this time, while the node may report itself as electable, it will not stand
- * for election.
- */
- virtual void setElectionSleepUntil(Date_t newTime) = 0;
-
- /**
- * Sets the reported mode of this node to one of RS_SECONDARY, RS_STARTUP2, RS_ROLLBACK or
- * RS_RECOVERING, when getRole() == Role::follower. This is the interface by which the
- * applier changes the reported member state of the current node, and enables or suppresses
- * electability of the current node. All modes but RS_SECONDARY indicate an unelectable
- * follower state (one that cannot transition to candidate).
- */
- virtual void setFollowerMode(MemberState::MS newMode) = 0;
-
- /**
- * Adjusts the maintenance mode count by "inc".
- *
- * It is an error to call this method if getRole() does not return Role::follower.
- * It is an error to allow the maintenance count to go negative.
- */
- virtual void adjustMaintenanceCountBy(int inc) = 0;
-
- ////////////////////////////////////////////////////////////
- //
- // Methods that prepare responses to command requests.
- //
- ////////////////////////////////////////////////////////////
-
- // produces a reply to a replSetSyncFrom command
- virtual void prepareSyncFromResponse(const ReplicationExecutor::CallbackArgs& data,
- const HostAndPort& target,
- const OpTime& lastOpApplied,
- BSONObjBuilder* response,
- Status* result) = 0;
-
- // produce a reply to a replSetFresh command
- virtual void prepareFreshResponse(const ReplicationCoordinator::ReplSetFreshArgs& args,
- Date_t now,
- const OpTime& lastOpApplied,
- BSONObjBuilder* response,
- Status* result) = 0;
-
- // produce a reply to a received electCmd
- virtual void prepareElectResponse(const ReplicationCoordinator::ReplSetElectArgs& args,
- Date_t now,
- const OpTime& lastOpApplied,
- BSONObjBuilder* response,
- Status* result) = 0;
-
- // produce a reply to a heartbeat
- virtual Status prepareHeartbeatResponse(Date_t now,
- const ReplSetHeartbeatArgs& args,
- const std::string& ourSetName,
- const OpTime& lastOpApplied,
- ReplSetHeartbeatResponse* response) = 0;
-
- // produce a reply to a V1 heartbeat
- virtual Status prepareHeartbeatResponseV1(Date_t now,
- const ReplSetHeartbeatArgsV1& args,
- const std::string& ourSetName,
- const OpTime& lastOpApplied,
- ReplSetHeartbeatResponse* response) = 0;
-
- // produce a reply to a status request
- virtual void prepareStatusResponse(const ReplicationExecutor::CallbackArgs& data,
- Date_t now,
- unsigned uptime,
- const OpTime& lastOpApplied,
- BSONObjBuilder* response,
- Status* result) = 0;
-
- // produce a reply to an ismaster request. It is only valid to call this if we are a
- // replset.
- virtual void fillIsMasterForReplSet(IsMasterResponse* response) = 0;
-
- // produce a reply to a freeze request
- virtual void prepareFreezeResponse(Date_t now, int secs, BSONObjBuilder* response) = 0;
-
- ////////////////////////////////////////////////////////////
- //
- // Methods for sending and receiving heartbeats,
- // reconfiguring and handling the results of standing for
- // election.
- //
- ////////////////////////////////////////////////////////////
-
- /**
- * Updates the topology coordinator's notion of the replica set configuration.
- *
- * "newConfig" is the new configuration, and "selfIndex" is the index of this
- * node's configuration information in "newConfig", or "selfIndex" is -1 to
- * indicate that this node is not a member of "newConfig".
- *
- * newConfig.isInitialized() should be true, though implementations may accept
- * configurations where this is not true, for testing purposes.
- */
- virtual void updateConfig(const ReplicaSetConfig& newConfig,
- int selfIndex,
- Date_t now,
- const OpTime& lastOpApplied) = 0;
-
- /**
- * Prepares a heartbeat request appropriate for sending to "target", assuming the
- * current time is "now". "ourSetName" is used as the name for our replica set if
- * the topology coordinator does not have a valid configuration installed.
- *
- * The returned pair contains proper arguments for a replSetHeartbeat command, and
- * an amount of time to wait for the response.
- *
- * This call should be paired (with intervening network communication) with a call to
- * processHeartbeatResponse for the same "target".
- */
- virtual std::pair<ReplSetHeartbeatArgs, Milliseconds> prepareHeartbeatRequest(
- Date_t now,
- const std::string& ourSetName,
- const HostAndPort& target) = 0;
- virtual std::pair<ReplSetHeartbeatArgsV1, Milliseconds> prepareHeartbeatRequestV1(
- Date_t now,
- const std::string& ourSetName,
- const HostAndPort& target) = 0;
-
- /**
- * Processes a heartbeat response from "target" that arrived around "now", having
- * spent "networkRoundTripTime" millis on the network.
- *
- * Updates internal topology coordinator state, and returns instructions about what action
- * to take next.
- *
- * If the next action indicates StartElection, the topology coordinator has transitioned to
- * the "candidate" role, and will remain there until processWinElection or
- * processLoseElection are called.
- *
- * If the next action indicates "StepDownSelf", the topology coordinator has transitioned
- * to the "follower" role from "leader", and the caller should take any necessary actions
- * to become a follower.
- *
- * If the next action indicates "StepDownRemotePrimary", the caller should take steps to
- * cause the specified remote host to step down from primary to secondary.
- *
- * If the next action indicates "Reconfig", the caller should verify the configuration in
- * hbResponse is acceptable, perform any other reconfiguration actions it must, and call
- * updateConfig with the new configuration and the appropriate value for "selfIndex". It
- * must also wrap up any outstanding elections (by calling processLoseElection or
- * processWinElection) before calling updateConfig.
- *
- * This call should be paired (with intervening network communication) with a call to
- * prepareHeartbeatRequest for the same "target".
- */
- virtual HeartbeatResponseAction processHeartbeatResponse(
- Date_t now,
- Milliseconds networkRoundTripTime,
- const HostAndPort& target,
- const StatusWith<ReplSetHeartbeatResponse>& hbResponse,
- const OpTime& myLastOpApplied) = 0;
-
- /**
- * If getRole() == Role::candidate and this node has not voted too recently, updates the
- * lastVote tracker and returns true. Otherwise, returns false.
- */
- virtual bool voteForMyself(Date_t now) = 0;
-
- /**
- * Increase the term.
- */
- virtual void incrementTerm() = 0;
-
- /**
- * Set lastVote to be for ourself in this term.
- */
- virtual void voteForMyselfV1() = 0;
-
- /**
- * Performs state updates associated with winning an election.
- *
- * It is an error to call this if the topology coordinator is not in candidate mode.
- *
- * Exactly one of either processWinElection or processLoseElection must be called if
- * processHeartbeatResponse returns StartElection, to exit candidate mode.
- */
- virtual void processWinElection(OID electionId, Timestamp electionOpTime) = 0;
-
- /**
- * Performs state updates associated with losing an election.
- *
- * It is an error to call this if the topology coordinator is not in candidate mode.
- *
- * Exactly one of either processWinElection or processLoseElection must be called if
- * processHeartbeatResponse returns StartElection, to exit candidate mode.
- */
- virtual void processLoseElection() = 0;
-
- /**
- * Tries to transition the coordinator from the leader role to the follower role.
- *
- * Fails if "force" is not set and no follower is known to be up. It is illegal
- * to call this method if the node is not leader.
- *
- * Returns whether or not the step down succeeded.
- */
- virtual bool stepDown(Date_t until, bool force, const OpTime& lastOpApplied) = 0;
-
- /**
- * Sometimes a request to step down comes in (like via a heartbeat), but we don't have the
- * global exclusive lock so we can't actually stepdown at that moment. When that happens
- * we record that a stepdown request is pending and schedule work to stepdown in the global
- * lock. This method is called after holding the global lock to perform the actual
- * stepdown, but only if the node hasn't already stepped down another way since the work was
- * scheduled. Returns true if it actually steps down, and false otherwise.
- */
- virtual bool stepDownIfPending() = 0;
-
- /**
- * Considers whether or not this node should stand for election, and returns true
- * if the node has transitioned to candidate role as a result of the call.
- */
- virtual bool checkShouldStandForElection(Date_t now, const OpTime& lastOpApplied) = 0;
-
- /**
- * Set the outgoing heartbeat message from self
- */
- virtual void setMyHeartbeatMessage(const Date_t now, const std::string& s) = 0;
-
- /**
- * Prepares a BSONObj describing the current term, primary, and lastOp information.
- */
- virtual void prepareCursorResponseInfo(BSONObjBuilder* objBuilder,
- const OpTime& lastCommittedOpTime) const = 0;
-
- /**
- * Writes into 'output' all the information needed to generate a summary of the current
- * replication state for use by the web interface.
- */
- virtual void summarizeAsHtml(ReplSetHtmlSummary* output) = 0;
-
- /**
- * Prepares a ReplSetRequestVotesResponse.
- */
- virtual void processReplSetRequestVotes(const ReplSetRequestVotesArgs& args,
- ReplSetRequestVotesResponse* response,
- const OpTime& lastAppliedOpTime) = 0;
-
- /**
- * Determines whether or not the newly elected primary is valid from our perspective.
- * If it is, sets the _currentPrimaryIndex and term to the received values.
- * If it is not, return ErrorCode::BadValue and the current term from our perspective.
- * Populate responseTerm with the current term from our perspective.
- */
- virtual Status processReplSetDeclareElectionWinner(
- const ReplSetDeclareElectionWinnerArgs& args,
- long long* responseTerm) = 0;
-
- /**
- * Loads an initial LastVote document, which was read from local storage.
- *
- * Called only during replication startup. All other updates are done internally.
- */
- virtual void loadLastVote(const LastVote& lastVote) = 0;
-
- /**
- * Returns the most recent term this node is aware of.
- */
- virtual long long getTerm() = 0;
-
- /**
- * Readies the TopologyCoordinator for stepdown.
- */
- virtual void prepareForStepDown() = 0;
-
- protected:
- TopologyCoordinator() {}
- };
-
- /**
- * Type that denotes the role of a node in the replication protocol.
+ * "newConfig" is the new configuration, and "selfIndex" is the index of this
+ * node's configuration information in "newConfig", or "selfIndex" is -1 to
+ * indicate that this node is not a member of "newConfig".
*
- * The role is distinct from MemberState, in that it only deals with the
- * roles a node plays in the basic protocol -- leader, follower and candidate.
- * The mapping between MemberState and Role is complex -- several MemberStates
- * map to the follower role, and MemberState::RS_SECONDARY maps to either
- * follower or candidate roles, e.g.
+ * newConfig.isInitialized() should be true, though implementations may accept
+ * configurations where this is not true, for testing purposes.
*/
- class TopologyCoordinator::Role {
- public:
- /**
- * Constant indicating leader role.
- */
- static const Role leader;
+ virtual void updateConfig(const ReplicaSetConfig& newConfig,
+ int selfIndex,
+ Date_t now,
+ const OpTime& lastOpApplied) = 0;
- /**
- * Constant indicating follower role.
- */
- static const Role follower;
+ /**
+ * Prepares a heartbeat request appropriate for sending to "target", assuming the
+ * current time is "now". "ourSetName" is used as the name for our replica set if
+ * the topology coordinator does not have a valid configuration installed.
+ *
+ * The returned pair contains proper arguments for a replSetHeartbeat command, and
+ * an amount of time to wait for the response.
+ *
+ * This call should be paired (with intervening network communication) with a call to
+ * processHeartbeatResponse for the same "target".
+ */
+ virtual std::pair<ReplSetHeartbeatArgs, Milliseconds> prepareHeartbeatRequest(
+ Date_t now, const std::string& ourSetName, const HostAndPort& target) = 0;
+ virtual std::pair<ReplSetHeartbeatArgsV1, Milliseconds> prepareHeartbeatRequestV1(
+ Date_t now, const std::string& ourSetName, const HostAndPort& target) = 0;
- /**
- * Constant indicating candidate role
- */
- static const Role candidate;
+ /**
+ * Processes a heartbeat response from "target" that arrived around "now", having
+ * spent "networkRoundTripTime" millis on the network.
+ *
+ * Updates internal topology coordinator state, and returns instructions about what action
+ * to take next.
+ *
+ * If the next action indicates StartElection, the topology coordinator has transitioned to
+ * the "candidate" role, and will remain there until processWinElection or
+ * processLoseElection are called.
+ *
+ * If the next action indicates "StepDownSelf", the topology coordinator has transitioned
+ * to the "follower" role from "leader", and the caller should take any necessary actions
+ * to become a follower.
+ *
+ * If the next action indicates "StepDownRemotePrimary", the caller should take steps to
+ * cause the specified remote host to step down from primary to secondary.
+ *
+ * If the next action indicates "Reconfig", the caller should verify the configuration in
+ * hbResponse is acceptable, perform any other reconfiguration actions it must, and call
+ * updateConfig with the new configuration and the appropriate value for "selfIndex". It
+ * must also wrap up any outstanding elections (by calling processLoseElection or
+ * processWinElection) before calling updateConfig.
+ *
+ * This call should be paired (with intervening network communication) with a call to
+ * prepareHeartbeatRequest for the same "target".
+ */
+ virtual HeartbeatResponseAction processHeartbeatResponse(
+ Date_t now,
+ Milliseconds networkRoundTripTime,
+ const HostAndPort& target,
+ const StatusWith<ReplSetHeartbeatResponse>& hbResponse,
+ const OpTime& myLastOpApplied) = 0;
+
+ /**
+ * If getRole() == Role::candidate and this node has not voted too recently, updates the
+ * lastVote tracker and returns true. Otherwise, returns false.
+ */
+ virtual bool voteForMyself(Date_t now) = 0;
+
+ /**
+ * Increase the term.
+ */
+ virtual void incrementTerm() = 0;
+
+ /**
+ * Set lastVote to be for ourself in this term.
+ */
+ virtual void voteForMyselfV1() = 0;
+
+ /**
+ * Performs state updates associated with winning an election.
+ *
+ * It is an error to call this if the topology coordinator is not in candidate mode.
+ *
+ * Exactly one of either processWinElection or processLoseElection must be called if
+ * processHeartbeatResponse returns StartElection, to exit candidate mode.
+ */
+ virtual void processWinElection(OID electionId, Timestamp electionOpTime) = 0;
+
+ /**
+ * Performs state updates associated with losing an election.
+ *
+ * It is an error to call this if the topology coordinator is not in candidate mode.
+ *
+ * Exactly one of either processWinElection or processLoseElection must be called if
+ * processHeartbeatResponse returns StartElection, to exit candidate mode.
+ */
+ virtual void processLoseElection() = 0;
+
+ /**
+ * Tries to transition the coordinator from the leader role to the follower role.
+ *
+ * Fails if "force" is not set and no follower is known to be up. It is illegal
+ * to call this method if the node is not leader.
+ *
+ * Returns whether or not the step down succeeded.
+ */
+ virtual bool stepDown(Date_t until, bool force, const OpTime& lastOpApplied) = 0;
+
+ /**
+ * Sometimes a request to step down comes in (like via a heartbeat), but we don't have the
+ * global exclusive lock so we can't actually stepdown at that moment. When that happens
+ * we record that a stepdown request is pending and schedule work to stepdown in the global
+ * lock. This method is called after holding the global lock to perform the actual
+ * stepdown, but only if the node hasn't already stepped down another way since the work was
+ * scheduled. Returns true if it actually steps down, and false otherwise.
+ */
+ virtual bool stepDownIfPending() = 0;
+
+ /**
+ * Considers whether or not this node should stand for election, and returns true
+ * if the node has transitioned to candidate role as a result of the call.
+ */
+ virtual bool checkShouldStandForElection(Date_t now, const OpTime& lastOpApplied) = 0;
+
+ /**
+ * Set the outgoing heartbeat message from self
+ */
+ virtual void setMyHeartbeatMessage(const Date_t now, const std::string& s) = 0;
+
+ /**
+ * Prepares a BSONObj describing the current term, primary, and lastOp information.
+ */
+ virtual void prepareCursorResponseInfo(BSONObjBuilder* objBuilder,
+ const OpTime& lastCommittedOpTime) const = 0;
+
+ /**
+ * Writes into 'output' all the information needed to generate a summary of the current
+ * replication state for use by the web interface.
+ */
+ virtual void summarizeAsHtml(ReplSetHtmlSummary* output) = 0;
+
+ /**
+ * Prepares a ReplSetRequestVotesResponse.
+ */
+ virtual void processReplSetRequestVotes(const ReplSetRequestVotesArgs& args,
+ ReplSetRequestVotesResponse* response,
+ const OpTime& lastAppliedOpTime) = 0;
+
+ /**
+ * Determines whether or not the newly elected primary is valid from our perspective.
+ * If it is, sets the _currentPrimaryIndex and term to the received values.
+ * If it is not, return ErrorCode::BadValue and the current term from our perspective.
+ * Populate responseTerm with the current term from our perspective.
+ */
+ virtual Status processReplSetDeclareElectionWinner(const ReplSetDeclareElectionWinnerArgs& args,
+ long long* responseTerm) = 0;
+
+ /**
+ * Loads an initial LastVote document, which was read from local storage.
+ *
+ * Called only during replication startup. All other updates are done internally.
+ */
+ virtual void loadLastVote(const LastVote& lastVote) = 0;
+
+ /**
+ * Returns the most recent term this node is aware of.
+ */
+ virtual long long getTerm() = 0;
+
+ /**
+ * Readies the TopologyCoordinator for stepdown.
+ */
+ virtual void prepareForStepDown() = 0;
+
+protected:
+ TopologyCoordinator() {}
+};
+
+/**
+ * Type that denotes the role of a node in the replication protocol.
+ *
+ * The role is distinct from MemberState, in that it only deals with the
+ * roles a node plays in the basic protocol -- leader, follower and candidate.
+ * The mapping between MemberState and Role is complex -- several MemberStates
+ * map to the follower role, and MemberState::RS_SECONDARY maps to either
+ * follower or candidate roles, e.g.
+ */
+class TopologyCoordinator::Role {
+public:
+ /**
+ * Constant indicating leader role.
+ */
+ static const Role leader;
+
+ /**
+ * Constant indicating follower role.
+ */
+ static const Role follower;
+
+ /**
+ * Constant indicating candidate role
+ */
+ static const Role candidate;
- Role() {}
+ Role() {}
- bool operator==(Role other) const { return _value == other._value; }
- bool operator!=(Role other) const { return _value != other._value; }
+ bool operator==(Role other) const {
+ return _value == other._value;
+ }
+ bool operator!=(Role other) const {
+ return _value != other._value;
+ }
- std::string toString() const;
+ std::string toString() const;
- private:
- explicit Role(int value);
+private:
+ explicit Role(int value);
- int _value;
- };
+ int _value;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/topology_coordinator_impl.cpp b/src/mongo/db/repl/topology_coordinator_impl.cpp
index 08d7ac7198c..a2905c1eacb 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl.cpp
@@ -55,1308 +55,1232 @@
namespace mongo {
namespace repl {
- using std::vector;
+using std::vector;
- const Seconds TopologyCoordinatorImpl::VoteLease::leaseTime = Seconds(30);
+const Seconds TopologyCoordinatorImpl::VoteLease::leaseTime = Seconds(30);
namespace {
- template <typename T>
- int indexOfIterator(const std::vector<T>& vec,
- typename std::vector<T>::const_iterator& it) {
- return static_cast<int>(it - vec.begin());
- }
-
- // Interval between the time the last heartbeat from a node was received successfully, or
- // the time when we gave up retrying, and when the next heartbeat should be sent to a target.
- const auto kHeartbeatInterval = Seconds{2};
+template <typename T>
+int indexOfIterator(const std::vector<T>& vec, typename std::vector<T>::const_iterator& it) {
+ return static_cast<int>(it - vec.begin());
+}
- // Maximum number of retries for a failed heartbeat.
- const int kMaxHeartbeatRetries = 2;
+// Interval between the time the last heartbeat from a node was received successfully, or
+// the time when we gave up retrying, and when the next heartbeat should be sent to a target.
+const auto kHeartbeatInterval = Seconds{2};
- /**
- * Returns true if the only up heartbeats are auth errors.
- */
- bool _hasOnlyAuthErrorUpHeartbeats(const std::vector<MemberHeartbeatData>& hbdata,
- const int selfIndex) {
- bool foundAuthError = false;
- for (std::vector<MemberHeartbeatData>::const_iterator it = hbdata.begin();
- it != hbdata.end();
- ++it) {
- if (indexOfIterator(hbdata, it) == selfIndex) {
- continue;
- }
+// Maximum number of retries for a failed heartbeat.
+const int kMaxHeartbeatRetries = 2;
- if (it->up()) {
- return false;
- }
+/**
+ * Returns true if the only up heartbeats are auth errors.
+ */
+bool _hasOnlyAuthErrorUpHeartbeats(const std::vector<MemberHeartbeatData>& hbdata,
+ const int selfIndex) {
+ bool foundAuthError = false;
+ for (std::vector<MemberHeartbeatData>::const_iterator it = hbdata.begin(); it != hbdata.end();
+ ++it) {
+ if (indexOfIterator(hbdata, it) == selfIndex) {
+ continue;
+ }
- if (it->hasAuthIssue()) {
- foundAuthError = true;
- }
+ if (it->up()) {
+ return false;
}
- return foundAuthError;
+ if (it->hasAuthIssue()) {
+ foundAuthError = true;
+ }
}
-} // namespace
+ return foundAuthError;
+}
- PingStats::PingStats() :
- count(0),
- value(std::numeric_limits<unsigned int>::max()),
- _numFailuresSinceLastStart(std::numeric_limits<int>::max()) {
- }
+} // namespace
- void PingStats::start(Date_t now) {
- _lastHeartbeatStartDate = now;
- _numFailuresSinceLastStart = 0;
+PingStats::PingStats()
+ : count(0),
+ value(std::numeric_limits<unsigned int>::max()),
+ _numFailuresSinceLastStart(std::numeric_limits<int>::max()) {}
+
+void PingStats::start(Date_t now) {
+ _lastHeartbeatStartDate = now;
+ _numFailuresSinceLastStart = 0;
+}
+
+void PingStats::hit(int millis) {
+ _numFailuresSinceLastStart = std::numeric_limits<int>::max();
+ ++count;
+ value = value == std::numeric_limits<unsigned int>::max()
+ ? millis
+ : static_cast<unsigned long>((value * .8) + (millis * .2));
+}
+
+void PingStats::miss() {
+ ++_numFailuresSinceLastStart;
+}
+
+TopologyCoordinatorImpl::TopologyCoordinatorImpl(Seconds maxSyncSourceLagSecs)
+ : _role(Role::follower),
+ _term(0),
+ _currentPrimaryIndex(-1),
+ _forceSyncSourceIndex(-1),
+ _maxSyncSourceLagSecs(maxSyncSourceLagSecs),
+ _selfIndex(-1),
+ _stepDownPending(false),
+ _maintenanceModeCalls(0),
+ _followerMode(MemberState::RS_STARTUP2) {
+ invariant(getMemberState() == MemberState::RS_STARTUP);
+}
+
+TopologyCoordinator::Role TopologyCoordinatorImpl::getRole() const {
+ return _role;
+}
+
+void TopologyCoordinatorImpl::setForceSyncSourceIndex(int index) {
+ invariant(_forceSyncSourceIndex < _rsConfig.getNumMembers());
+ _forceSyncSourceIndex = index;
+}
+
+HostAndPort TopologyCoordinatorImpl::getSyncSourceAddress() const {
+ return _syncSource;
+}
+
+HostAndPort TopologyCoordinatorImpl::chooseNewSyncSource(Date_t now, const OpTime& lastOpApplied) {
+ // If we are primary, then we aren't syncing from anyone (else).
+ if (_iAmPrimary()) {
+ return HostAndPort();
+ }
+
+ // If we are not a member of the current replica set configuration, no sync source is valid.
+ if (_selfIndex == -1) {
+ LOG(2) << "Cannot sync from any members because we are not in the replica set config";
+ return HostAndPort();
+ }
+
+ // if we have a target we've requested to sync from, use it
+ if (_forceSyncSourceIndex != -1) {
+ invariant(_forceSyncSourceIndex < _rsConfig.getNumMembers());
+ _syncSource = _rsConfig.getMemberAt(_forceSyncSourceIndex).getHostAndPort();
+ _forceSyncSourceIndex = -1;
+ std::string msg(str::stream() << "syncing from: " << _syncSource.toString()
+ << " by request");
+ log() << msg << rsLog;
+ setMyHeartbeatMessage(now, msg);
+ return _syncSource;
}
- void PingStats::hit(int millis) {
- _numFailuresSinceLastStart = std::numeric_limits<int>::max();
- ++count;
- value = value == std::numeric_limits<unsigned int>::max() ? millis :
- static_cast<unsigned long>((value * .8) + (millis * .2));
- }
+ // wait for 2N pings (not counting ourselves) before choosing a sync target
+ int needMorePings = (_hbdata.size() - 1) * 2 - _getTotalPings();
- void PingStats::miss() {
- ++_numFailuresSinceLastStart;
+ if (needMorePings > 0) {
+ OCCASIONALLY log() << "waiting for " << needMorePings
+ << " pings from other members before syncing";
+ _syncSource = HostAndPort();
+ return _syncSource;
}
- TopologyCoordinatorImpl::TopologyCoordinatorImpl(Seconds maxSyncSourceLagSecs) :
- _role(Role::follower),
- _term(0),
- _currentPrimaryIndex(-1),
- _forceSyncSourceIndex(-1),
- _maxSyncSourceLagSecs(maxSyncSourceLagSecs),
- _selfIndex(-1),
- _stepDownPending(false),
- _maintenanceModeCalls(0),
- _followerMode(MemberState::RS_STARTUP2)
- {
- invariant(getMemberState() == MemberState::RS_STARTUP);
+ // If we are only allowed to sync from the primary, set that
+ if (!_rsConfig.isChainingAllowed()) {
+ if (_currentPrimaryIndex == -1) {
+ LOG(1) << "Cannot select sync source because chaining is"
+ " not allowed and primary is unknown/down";
+ _syncSource = HostAndPort();
+ return _syncSource;
+ } else if (_memberIsBlacklisted(*_currentPrimaryMember(), now)) {
+ LOG(1) << "Cannot select sync source because chaining is"
+ "not allowed and primary is not currently accepting our updates";
+ _syncSource = HostAndPort();
+ return _syncSource;
+ } else {
+ _syncSource = _rsConfig.getMemberAt(_currentPrimaryIndex).getHostAndPort();
+ std::string msg(str::stream() << "syncing from primary: " << _syncSource.toString());
+ log() << msg << rsLog;
+ setMyHeartbeatMessage(now, msg);
+ return _syncSource;
+ }
}
- TopologyCoordinator::Role TopologyCoordinatorImpl::getRole() const {
- return _role;
- }
+ // find the member with the lowest ping time that is ahead of me
- void TopologyCoordinatorImpl::setForceSyncSourceIndex(int index) {
- invariant(_forceSyncSourceIndex < _rsConfig.getNumMembers());
- _forceSyncSourceIndex = index;
+ // Find primary's oplog time. Reject sync candidates that are more than
+ // maxSyncSourceLagSecs seconds behind.
+ OpTime primaryOpTime;
+ if (_currentPrimaryIndex != -1) {
+ primaryOpTime = _hbdata[_currentPrimaryIndex].getOpTime();
+ } else {
+ // choose a time that will exclude no candidates, since we don't see a primary
+ primaryOpTime = OpTime(Timestamp(_maxSyncSourceLagSecs, 0), 0);
}
- HostAndPort TopologyCoordinatorImpl::getSyncSourceAddress() const {
- return _syncSource;
+ if (primaryOpTime.getSecs() < static_cast<unsigned int>(_maxSyncSourceLagSecs.count())) {
+ // erh - I think this means there was just a new election
+ // and we don't yet know the new primary's optime
+ primaryOpTime = OpTime(Timestamp(_maxSyncSourceLagSecs, 0), 0);
}
- HostAndPort TopologyCoordinatorImpl::chooseNewSyncSource(Date_t now,
- const OpTime& lastOpApplied) {
- // If we are primary, then we aren't syncing from anyone (else).
- if (_iAmPrimary()) {
- return HostAndPort();
- }
+ OpTime oldestSyncOpTime(Timestamp(primaryOpTime.getSecs() - _maxSyncSourceLagSecs.count(), 0),
+ primaryOpTime.getTerm());
- // If we are not a member of the current replica set configuration, no sync source is valid.
- if (_selfIndex == -1) {
- LOG(2) << "Cannot sync from any members because we are not in the replica set config";
- return HostAndPort();
- }
+ int closestIndex = -1;
- // if we have a target we've requested to sync from, use it
- if (_forceSyncSourceIndex != -1) {
- invariant(_forceSyncSourceIndex < _rsConfig.getNumMembers());
- _syncSource = _rsConfig.getMemberAt(_forceSyncSourceIndex).getHostAndPort();
- _forceSyncSourceIndex = -1;
- std::string msg(str::stream() << "syncing from: "
- << _syncSource.toString() << " by request");
- log() << msg << rsLog;
- setMyHeartbeatMessage(now, msg);
- return _syncSource;
- }
-
- // wait for 2N pings (not counting ourselves) before choosing a sync target
- int needMorePings = (_hbdata.size() - 1) * 2 - _getTotalPings();
-
- if (needMorePings > 0) {
- OCCASIONALLY log() << "waiting for " << needMorePings
- << " pings from other members before syncing";
- _syncSource = HostAndPort();
- return _syncSource;
- }
-
- // If we are only allowed to sync from the primary, set that
- if (!_rsConfig.isChainingAllowed()) {
- if (_currentPrimaryIndex == -1) {
- LOG(1) << "Cannot select sync source because chaining is"
- " not allowed and primary is unknown/down";
- _syncSource = HostAndPort();
- return _syncSource;
+ // Make two attempts. The first attempt, we ignore those nodes with
+ // slave delay higher than our own, hidden nodes, and nodes that are excessively lagged.
+ // The second attempt includes such nodes, in case those are the only ones we can reach.
+ // This loop attempts to set 'closestIndex'.
+ for (int attempts = 0; attempts < 2; ++attempts) {
+ for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin();
+ it != _hbdata.end();
+ ++it) {
+ const int itIndex = indexOfIterator(_hbdata, it);
+ // Don't consider ourselves.
+ if (itIndex == _selfIndex) {
+ continue;
}
- else if (_memberIsBlacklisted(*_currentPrimaryMember(), now)) {
- LOG(1) << "Cannot select sync source because chaining is"
- "not allowed and primary is not currently accepting our updates";
- _syncSource = HostAndPort();
- return _syncSource;
+ // Candidate must be up to be considered.
+ if (!it->up()) {
+ continue;
}
- else {
- _syncSource = _rsConfig.getMemberAt(_currentPrimaryIndex).getHostAndPort();
- std::string msg(str::stream() << "syncing from primary: "
- << _syncSource.toString());
- log() << msg << rsLog;
- setMyHeartbeatMessage(now, msg);
- return _syncSource;
+ // Candidate must be PRIMARY or SECONDARY state to be considered.
+ if (!it->getState().readable()) {
+ continue;
}
- }
- // find the member with the lowest ping time that is ahead of me
+ const MemberConfig& itMemberConfig(_rsConfig.getMemberAt(itIndex));
- // Find primary's oplog time. Reject sync candidates that are more than
- // maxSyncSourceLagSecs seconds behind.
- OpTime primaryOpTime;
- if (_currentPrimaryIndex != -1) {
- primaryOpTime = _hbdata[_currentPrimaryIndex].getOpTime();
- }
- else {
- // choose a time that will exclude no candidates, since we don't see a primary
- primaryOpTime = OpTime(Timestamp(_maxSyncSourceLagSecs, 0), 0);
- }
-
- if (primaryOpTime.getSecs() <
- static_cast<unsigned int>(_maxSyncSourceLagSecs.count())) {
- // erh - I think this means there was just a new election
- // and we don't yet know the new primary's optime
- primaryOpTime = OpTime(Timestamp(_maxSyncSourceLagSecs, 0), 0);
- }
-
- OpTime oldestSyncOpTime(
- Timestamp(primaryOpTime.getSecs() - _maxSyncSourceLagSecs.count(), 0),
- primaryOpTime.getTerm());
-
- int closestIndex = -1;
-
- // Make two attempts. The first attempt, we ignore those nodes with
- // slave delay higher than our own, hidden nodes, and nodes that are excessively lagged.
- // The second attempt includes such nodes, in case those are the only ones we can reach.
- // This loop attempts to set 'closestIndex'.
- for (int attempts = 0; attempts < 2; ++attempts) {
- for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin();
- it != _hbdata.end();
- ++it) {
- const int itIndex = indexOfIterator(_hbdata, it);
- // Don't consider ourselves.
- if (itIndex == _selfIndex) {
- continue;
- }
- // Candidate must be up to be considered.
- if (!it->up()) {
- continue;
- }
- // Candidate must be PRIMARY or SECONDARY state to be considered.
- if (!it->getState().readable()) {
- continue;
- }
-
- const MemberConfig& itMemberConfig(_rsConfig.getMemberAt(itIndex));
-
- // Candidate must build indexes if we build indexes, to be considered.
- if (_selfConfig().shouldBuildIndexes()) {
- if (!itMemberConfig.shouldBuildIndexes()) {
- continue;
- }
- }
-
- // only consider candidates that are ahead of where we are
- if (it->getOpTime() <= lastOpApplied) {
+ // Candidate must build indexes if we build indexes, to be considered.
+ if (_selfConfig().shouldBuildIndexes()) {
+ if (!itMemberConfig.shouldBuildIndexes()) {
continue;
}
+ }
- // omit candidates that are excessively behind, on the first attempt at least.
- if (attempts == 0 &&
- it->getOpTime() < oldestSyncOpTime) {
- continue;
- }
+ // only consider candidates that are ahead of where we are
+ if (it->getOpTime() <= lastOpApplied) {
+ continue;
+ }
- // omit nodes that are more latent than anything we've already considered
- if ((closestIndex != -1) &&
- (_getPing(itMemberConfig.getHostAndPort())
- > _getPing(_rsConfig.getMemberAt(closestIndex).getHostAndPort()))) {
- continue;
- }
+ // omit candidates that are excessively behind, on the first attempt at least.
+ if (attempts == 0 && it->getOpTime() < oldestSyncOpTime) {
+ continue;
+ }
- if (attempts == 0) {
- if (_selfConfig().getSlaveDelay() < itMemberConfig.getSlaveDelay()
- || itMemberConfig.isHidden()) {
- continue; // skip this one in the first attempt
- }
- }
+ // omit nodes that are more latent than anything we've already considered
+ if ((closestIndex != -1) &&
+ (_getPing(itMemberConfig.getHostAndPort()) >
+ _getPing(_rsConfig.getMemberAt(closestIndex).getHostAndPort()))) {
+ continue;
+ }
- if (_memberIsBlacklisted(itMemberConfig, now)) {
- continue;
+ if (attempts == 0) {
+ if (_selfConfig().getSlaveDelay() < itMemberConfig.getSlaveDelay() ||
+ itMemberConfig.isHidden()) {
+ continue; // skip this one in the first attempt
}
-
- // This candidate has passed all tests; set 'closestIndex'
- closestIndex = itIndex;
}
- if (closestIndex != -1) break; // no need for second attempt
- }
- if (closestIndex == -1) {
- // Did not find any members to sync from
- std::string msg("could not find member to sync from");
- // Only log when we had a valid sync source before
- if (!_syncSource.empty()) {
- log() << msg << rsLog;
+ if (_memberIsBlacklisted(itMemberConfig, now)) {
+ continue;
}
- setMyHeartbeatMessage(now, msg);
- _syncSource = HostAndPort();
- return _syncSource;
+ // This candidate has passed all tests; set 'closestIndex'
+ closestIndex = itIndex;
}
- _syncSource = _rsConfig.getMemberAt(closestIndex).getHostAndPort();
- std::string msg(str::stream() << "syncing from: " << _syncSource.toString(), 0);
- log() << msg << rsLog;
- setMyHeartbeatMessage(now, msg);
- return _syncSource;
+ if (closestIndex != -1)
+ break; // no need for second attempt
}
- bool TopologyCoordinatorImpl::_memberIsBlacklisted(const MemberConfig& memberConfig,
- Date_t now) const {
- std::map<HostAndPort,Date_t>::const_iterator blacklisted =
- _syncSourceBlacklist.find(memberConfig.getHostAndPort());
- if (blacklisted != _syncSourceBlacklist.end()) {
- if (blacklisted->second > now) {
- return true;
- }
+ if (closestIndex == -1) {
+ // Did not find any members to sync from
+ std::string msg("could not find member to sync from");
+ // Only log when we had a valid sync source before
+ if (!_syncSource.empty()) {
+ log() << msg << rsLog;
}
- return false;
- }
+ setMyHeartbeatMessage(now, msg);
- void TopologyCoordinatorImpl::blacklistSyncSource(const HostAndPort& host, Date_t until) {
- LOG(2) << "blacklisting " << host << " until " << until.toString();
- _syncSourceBlacklist[host] = until;
+ _syncSource = HostAndPort();
+ return _syncSource;
}
-
- void TopologyCoordinatorImpl::unblacklistSyncSource(const HostAndPort& host, Date_t now) {
- std::map<HostAndPort, Date_t>::iterator hostItr = _syncSourceBlacklist.find(host);
- if (hostItr != _syncSourceBlacklist.end() && now >= hostItr->second) {
- LOG(2) << "unblacklisting " << host;
- _syncSourceBlacklist.erase(hostItr);
+ _syncSource = _rsConfig.getMemberAt(closestIndex).getHostAndPort();
+ std::string msg(str::stream() << "syncing from: " << _syncSource.toString(), 0);
+ log() << msg << rsLog;
+ setMyHeartbeatMessage(now, msg);
+ return _syncSource;
+}
+
+bool TopologyCoordinatorImpl::_memberIsBlacklisted(const MemberConfig& memberConfig,
+ Date_t now) const {
+ std::map<HostAndPort, Date_t>::const_iterator blacklisted =
+ _syncSourceBlacklist.find(memberConfig.getHostAndPort());
+ if (blacklisted != _syncSourceBlacklist.end()) {
+ if (blacklisted->second > now) {
+ return true;
}
}
+ return false;
+}
- void TopologyCoordinatorImpl::clearSyncSourceBlacklist() {
- _syncSourceBlacklist.clear();
- }
-
- void TopologyCoordinatorImpl::prepareSyncFromResponse(
- const ReplicationExecutor::CallbackArgs& data,
- const HostAndPort& target,
- const OpTime& lastOpApplied,
- BSONObjBuilder* response,
- Status* result) {
- if (data.status == ErrorCodes::CallbackCanceled) {
- *result = Status(ErrorCodes::ShutdownInProgress, "replication system is shutting down");
- return;
- }
-
- response->append("syncFromRequested", target.toString());
-
- if (_selfIndex == -1) {
- *result = Status(ErrorCodes::NotSecondary,
- "Removed and uninitialized nodes do not sync");
- return;
- }
-
- const MemberConfig& selfConfig = _selfConfig();
- if (selfConfig.isArbiter()) {
- *result = Status(ErrorCodes::NotSecondary, "arbiters don't sync");
- return;
- }
- if (_selfIndex == _currentPrimaryIndex) {
- *result = Status(ErrorCodes::NotSecondary, "primaries don't sync");
- return;
- }
+void TopologyCoordinatorImpl::blacklistSyncSource(const HostAndPort& host, Date_t until) {
+ LOG(2) << "blacklisting " << host << " until " << until.toString();
+ _syncSourceBlacklist[host] = until;
+}
- ReplicaSetConfig::MemberIterator targetConfig = _rsConfig.membersEnd();
- int targetIndex = 0;
- for (ReplicaSetConfig::MemberIterator it = _rsConfig.membersBegin();
- it != _rsConfig.membersEnd(); ++it) {
- if (it->getHostAndPort() == target) {
- targetConfig = it;
- break;
- }
- ++targetIndex;
- }
- if (targetConfig == _rsConfig.membersEnd()) {
- *result = Status(ErrorCodes::NodeNotFound,
- str::stream() << "Could not find member \"" << target.toString() <<
- "\" in replica set");
- return;
- }
- if (targetIndex == _selfIndex) {
- *result = Status(ErrorCodes::InvalidOptions, "I cannot sync from myself");
- return;
- }
- if (targetConfig->isArbiter()) {
- *result = Status(ErrorCodes::InvalidOptions,
- str::stream() << "Cannot sync from \"" << target.toString() <<
- "\" because it is an arbiter");
- return;
- }
- if (!targetConfig->shouldBuildIndexes() && selfConfig.shouldBuildIndexes()) {
- *result = Status(ErrorCodes::InvalidOptions,
- str::stream() << "Cannot sync from \"" << target.toString() <<
- "\" because it does not build indexes");
- return;
- }
-
- const MemberHeartbeatData& hbdata = _hbdata[targetIndex];
- if (hbdata.hasAuthIssue()) {
- *result = Status(ErrorCodes::Unauthorized,
- str::stream() << "not authorized to communicate with " <<
- target.toString());
- return;
- }
- if (hbdata.getHealth() == 0) {
- *result = Status(ErrorCodes::HostUnreachable,
- str::stream() << "I cannot reach the requested member: " <<
- target.toString());
- return;
- }
- if (hbdata.getOpTime().getSecs()+10 < lastOpApplied.getSecs()) {
- warning() << "attempting to sync from " << target
- << ", but its latest opTime is " << hbdata.getOpTime().getSecs()
- << " and ours is " << lastOpApplied.getSecs() << " so this may not work";
- response->append("warning",
- str::stream() << "requested member \"" << target.toString() <<
- "\" is more than 10 seconds behind us");
- // not returning bad Status, just warning
- }
-
- HostAndPort prevSyncSource = getSyncSourceAddress();
- if (!prevSyncSource.empty()) {
- response->append("prevSyncTarget", prevSyncSource.toString());
- }
-
- setForceSyncSourceIndex(targetIndex);
- *result = Status::OK();
+void TopologyCoordinatorImpl::unblacklistSyncSource(const HostAndPort& host, Date_t now) {
+ std::map<HostAndPort, Date_t>::iterator hostItr = _syncSourceBlacklist.find(host);
+ if (hostItr != _syncSourceBlacklist.end() && now >= hostItr->second) {
+ LOG(2) << "unblacklisting " << host;
+ _syncSourceBlacklist.erase(hostItr);
}
+}
- void TopologyCoordinatorImpl::prepareFreshResponse(
- const ReplicationCoordinator::ReplSetFreshArgs& args,
- const Date_t now,
- const OpTime& lastOpApplied,
- BSONObjBuilder* response,
- Status* result) {
-
- if (_selfIndex == -1) {
- *result = Status(ErrorCodes::ReplicaSetNotFound,
- "Cannot participate in elections because not initialized");
- return;
- }
+void TopologyCoordinatorImpl::clearSyncSourceBlacklist() {
+ _syncSourceBlacklist.clear();
+}
- if (args.setName != _rsConfig.getReplSetName()) {
- *result = Status(ErrorCodes::ReplicaSetNotFound,
- str::stream() << "Wrong repl set name. Expected: " <<
- _rsConfig.getReplSetName() <<
- ", received: " << args.setName);
- return;
- }
+void TopologyCoordinatorImpl::prepareSyncFromResponse(const ReplicationExecutor::CallbackArgs& data,
+ const HostAndPort& target,
+ const OpTime& lastOpApplied,
+ BSONObjBuilder* response,
+ Status* result) {
+ if (data.status == ErrorCodes::CallbackCanceled) {
+ *result = Status(ErrorCodes::ShutdownInProgress, "replication system is shutting down");
+ return;
+ }
- if (args.id == static_cast<unsigned>(_selfConfig().getId())) {
- *result = Status(ErrorCodes::BadValue,
- str::stream() << "Received replSetFresh command from member with the "
- "same member ID as ourself: " << args.id);
- return;
- }
+ response->append("syncFromRequested", target.toString());
- bool weAreFresher = false;
- if( _rsConfig.getConfigVersion() > args.cfgver ) {
- log() << "replSet member " << args.who << " is not yet aware its cfg version "
- << args.cfgver << " is stale";
- response->append("info", "config version stale");
- weAreFresher = true;
- }
- // check not only our own optime, but any other member we can reach
- else if (OpTime(args.opTime, _term) < _latestKnownOpTime(lastOpApplied)) {
- weAreFresher = true;
- }
- response->appendDate("opTime",
- Date_t::fromMillisSinceEpoch(lastOpApplied.getTimestamp().asLL()));
- response->append("fresher", weAreFresher);
-
- std::string errmsg;
- bool doVeto = _shouldVetoMember(args, now, lastOpApplied, &errmsg);
- response->append("veto", doVeto);
- if (doVeto) {
- response->append("errmsg", errmsg);
- }
- *result = Status::OK();
+ if (_selfIndex == -1) {
+ *result = Status(ErrorCodes::NotSecondary, "Removed and uninitialized nodes do not sync");
+ return;
}
- bool TopologyCoordinatorImpl::_shouldVetoMember(
- const ReplicationCoordinator::ReplSetFreshArgs& args,
- const Date_t& now,
- const OpTime& lastOpApplied,
- std::string* errmsg) const {
+ const MemberConfig& selfConfig = _selfConfig();
+ if (selfConfig.isArbiter()) {
+ *result = Status(ErrorCodes::NotSecondary, "arbiters don't sync");
+ return;
+ }
+ if (_selfIndex == _currentPrimaryIndex) {
+ *result = Status(ErrorCodes::NotSecondary, "primaries don't sync");
+ return;
+ }
- if (_rsConfig.getConfigVersion() < args.cfgver) {
- // We are stale; do not veto.
- return false;
+ ReplicaSetConfig::MemberIterator targetConfig = _rsConfig.membersEnd();
+ int targetIndex = 0;
+ for (ReplicaSetConfig::MemberIterator it = _rsConfig.membersBegin();
+ it != _rsConfig.membersEnd();
+ ++it) {
+ if (it->getHostAndPort() == target) {
+ targetConfig = it;
+ break;
}
+ ++targetIndex;
+ }
+ if (targetConfig == _rsConfig.membersEnd()) {
+ *result = Status(ErrorCodes::NodeNotFound,
+ str::stream() << "Could not find member \"" << target.toString()
+ << "\" in replica set");
+ return;
+ }
+ if (targetIndex == _selfIndex) {
+ *result = Status(ErrorCodes::InvalidOptions, "I cannot sync from myself");
+ return;
+ }
+ if (targetConfig->isArbiter()) {
+ *result = Status(ErrorCodes::InvalidOptions,
+ str::stream() << "Cannot sync from \"" << target.toString()
+ << "\" because it is an arbiter");
+ return;
+ }
+ if (!targetConfig->shouldBuildIndexes() && selfConfig.shouldBuildIndexes()) {
+ *result = Status(ErrorCodes::InvalidOptions,
+ str::stream() << "Cannot sync from \"" << target.toString()
+ << "\" because it does not build indexes");
+ return;
+ }
+
+ const MemberHeartbeatData& hbdata = _hbdata[targetIndex];
+ if (hbdata.hasAuthIssue()) {
+ *result =
+ Status(ErrorCodes::Unauthorized,
+ str::stream() << "not authorized to communicate with " << target.toString());
+ return;
+ }
+ if (hbdata.getHealth() == 0) {
+ *result =
+ Status(ErrorCodes::HostUnreachable,
+ str::stream() << "I cannot reach the requested member: " << target.toString());
+ return;
+ }
+ if (hbdata.getOpTime().getSecs() + 10 < lastOpApplied.getSecs()) {
+ warning() << "attempting to sync from " << target << ", but its latest opTime is "
+ << hbdata.getOpTime().getSecs() << " and ours is " << lastOpApplied.getSecs()
+ << " so this may not work";
+ response->append("warning",
+ str::stream() << "requested member \"" << target.toString()
+ << "\" is more than 10 seconds behind us");
+ // not returning bad Status, just warning
+ }
+
+ HostAndPort prevSyncSource = getSyncSourceAddress();
+ if (!prevSyncSource.empty()) {
+ response->append("prevSyncTarget", prevSyncSource.toString());
+ }
+
+ setForceSyncSourceIndex(targetIndex);
+ *result = Status::OK();
+}
+
+void TopologyCoordinatorImpl::prepareFreshResponse(
+ const ReplicationCoordinator::ReplSetFreshArgs& args,
+ const Date_t now,
+ const OpTime& lastOpApplied,
+ BSONObjBuilder* response,
+ Status* result) {
+ if (_selfIndex == -1) {
+ *result = Status(ErrorCodes::ReplicaSetNotFound,
+ "Cannot participate in elections because not initialized");
+ return;
+ }
+
+ if (args.setName != _rsConfig.getReplSetName()) {
+ *result =
+ Status(ErrorCodes::ReplicaSetNotFound,
+ str::stream() << "Wrong repl set name. Expected: " << _rsConfig.getReplSetName()
+ << ", received: " << args.setName);
+ return;
+ }
+
+ if (args.id == static_cast<unsigned>(_selfConfig().getId())) {
+ *result = Status(ErrorCodes::BadValue,
+ str::stream() << "Received replSetFresh command from member with the "
+ "same member ID as ourself: " << args.id);
+ return;
+ }
+
+ bool weAreFresher = false;
+ if (_rsConfig.getConfigVersion() > args.cfgver) {
+ log() << "replSet member " << args.who << " is not yet aware its cfg version "
+ << args.cfgver << " is stale";
+ response->append("info", "config version stale");
+ weAreFresher = true;
+ }
+ // check not only our own optime, but any other member we can reach
+ else if (OpTime(args.opTime, _term) < _latestKnownOpTime(lastOpApplied)) {
+ weAreFresher = true;
+ }
+ response->appendDate("opTime",
+ Date_t::fromMillisSinceEpoch(lastOpApplied.getTimestamp().asLL()));
+ response->append("fresher", weAreFresher);
+
+ std::string errmsg;
+ bool doVeto = _shouldVetoMember(args, now, lastOpApplied, &errmsg);
+ response->append("veto", doVeto);
+ if (doVeto) {
+ response->append("errmsg", errmsg);
+ }
+ *result = Status::OK();
+}
+
+bool TopologyCoordinatorImpl::_shouldVetoMember(
+ const ReplicationCoordinator::ReplSetFreshArgs& args,
+ const Date_t& now,
+ const OpTime& lastOpApplied,
+ std::string* errmsg) const {
+ if (_rsConfig.getConfigVersion() < args.cfgver) {
+ // We are stale; do not veto.
+ return false;
+ }
- const unsigned int memberID = args.id;
- const int hopefulIndex = _getMemberIndex(memberID);
- invariant(hopefulIndex != _selfIndex);
- const int highestPriorityIndex = _getHighestPriorityElectableIndex(now, lastOpApplied);
+ const unsigned int memberID = args.id;
+ const int hopefulIndex = _getMemberIndex(memberID);
+ invariant(hopefulIndex != _selfIndex);
+ const int highestPriorityIndex = _getHighestPriorityElectableIndex(now, lastOpApplied);
- if (hopefulIndex == -1) {
- *errmsg = str::stream() << "replSet couldn't find member with id " << memberID;
- return true;
- }
+ if (hopefulIndex == -1) {
+ *errmsg = str::stream() << "replSet couldn't find member with id " << memberID;
+ return true;
+ }
- if (_iAmPrimary() && lastOpApplied >= _hbdata[hopefulIndex].getOpTime()) {
- // hbinfo is not updated for ourself, so if we are primary we have to check the
- // primary's last optime separately
- *errmsg = str::stream() << "I am already primary, " <<
- _rsConfig.getMemberAt(hopefulIndex).getHostAndPort().toString() <<
- " can try again once I've stepped down";
- return true;
- }
+ if (_iAmPrimary() && lastOpApplied >= _hbdata[hopefulIndex].getOpTime()) {
+ // hbinfo is not updated for ourself, so if we are primary we have to check the
+ // primary's last optime separately
+ *errmsg = str::stream() << "I am already primary, "
+ << _rsConfig.getMemberAt(hopefulIndex).getHostAndPort().toString()
+ << " can try again once I've stepped down";
+ return true;
+ }
- if (_currentPrimaryIndex != -1 &&
- (hopefulIndex != _currentPrimaryIndex) &&
- (_hbdata[_currentPrimaryIndex].getOpTime() >=
- _hbdata[hopefulIndex].getOpTime())) {
- // other members might be aware of more up-to-date nodes
- *errmsg = str::stream() <<
- _rsConfig.getMemberAt(hopefulIndex).getHostAndPort().toString() <<
- " is trying to elect itself but " <<
- _rsConfig.getMemberAt(_currentPrimaryIndex).getHostAndPort().toString() <<
- " is already primary and more up-to-date";
- return true;
- }
+ if (_currentPrimaryIndex != -1 && (hopefulIndex != _currentPrimaryIndex) &&
+ (_hbdata[_currentPrimaryIndex].getOpTime() >= _hbdata[hopefulIndex].getOpTime())) {
+ // other members might be aware of more up-to-date nodes
+ *errmsg =
+ str::stream() << _rsConfig.getMemberAt(hopefulIndex).getHostAndPort().toString()
+ << " is trying to elect itself but "
+ << _rsConfig.getMemberAt(_currentPrimaryIndex).getHostAndPort().toString()
+ << " is already primary and more up-to-date";
+ return true;
+ }
- if ((highestPriorityIndex != -1)) {
- const MemberConfig& hopefulMember = _rsConfig.getMemberAt(hopefulIndex);
- const MemberConfig& priorityMember = _rsConfig.getMemberAt(highestPriorityIndex);
-
- if (priorityMember.getPriority() > hopefulMember.getPriority()) {
- *errmsg = str::stream()
- << hopefulMember.getHostAndPort().toString()
- << " has lower priority of " << hopefulMember.getPriority() << " than "
- << priorityMember.getHostAndPort().toString()
- << " which has a priority of " << priorityMember.getPriority();
- return true;
- }
- }
+ if ((highestPriorityIndex != -1)) {
+ const MemberConfig& hopefulMember = _rsConfig.getMemberAt(hopefulIndex);
+ const MemberConfig& priorityMember = _rsConfig.getMemberAt(highestPriorityIndex);
- UnelectableReasonMask reason = _getUnelectableReason(hopefulIndex, lastOpApplied);
- reason &= ~RefusesToStand;
- if (reason) {
- *errmsg = str::stream()
- << "I don't think "
- << _rsConfig.getMemberAt(hopefulIndex).getHostAndPort().toString()
- << " is electable because the " << _getUnelectableReasonString(reason);
+ if (priorityMember.getPriority() > hopefulMember.getPriority()) {
+ *errmsg = str::stream() << hopefulMember.getHostAndPort().toString()
+ << " has lower priority of " << hopefulMember.getPriority()
+ << " than " << priorityMember.getHostAndPort().toString()
+ << " which has a priority of " << priorityMember.getPriority();
return true;
}
-
- return false;
}
- // produce a reply to a received electCmd
- void TopologyCoordinatorImpl::prepareElectResponse(
- const ReplicationCoordinator::ReplSetElectArgs& args,
- const Date_t now,
- const OpTime& lastOpApplied,
- BSONObjBuilder* response,
- Status* result) {
-
- if (_selfIndex == -1) {
- *result = Status(ErrorCodes::ReplicaSetNotFound,
- "Cannot participate in election because not initialized");
- return;
- }
-
- const long long myver = _rsConfig.getConfigVersion();
- const int highestPriorityIndex = _getHighestPriorityElectableIndex(now, lastOpApplied);
-
- const MemberConfig* primary = _currentPrimaryMember();
- const MemberConfig* hopeful = _rsConfig.findMemberByID(args.whoid);
- const MemberConfig* highestPriority = highestPriorityIndex == -1 ? NULL :
- &_rsConfig.getMemberAt(highestPriorityIndex);
-
- int vote = 0;
- if (args.set != _rsConfig.getReplSetName()) {
- log() << "replSet error received an elect request for '" << args.set
- << "' but our set name is '" <<
- _rsConfig.getReplSetName() << "'";
- }
- else if ( myver < args.cfgver ) {
- // we are stale. don't vote
- log() << "replSetElect not voting because our config version is stale. Our version: " <<
- myver << ", their version: " << args.cfgver;
- }
- else if ( myver > args.cfgver ) {
- // they are stale!
- log() << "replSetElect command received stale config version # during election. "
- "Our version: " << myver << ", their version: " << args.cfgver;
- vote = -10000;
- }
- else if (!hopeful) {
- log() << "replSetElect couldn't find member with id " << args.whoid;
- vote = -10000;
- }
- else if (_iAmPrimary()) {
- log() << "I am already primary, " << hopeful->getHostAndPort().toString()
- << " can try again once I've stepped down";
- vote = -10000;
- }
- else if (primary) {
- log() << hopeful->getHostAndPort().toString() << " is trying to elect itself but "
- << primary->getHostAndPort().toString() << " is already primary";
- vote = -10000;
- }
- else if (highestPriority && highestPriority->getPriority() > hopeful->getPriority()) {
- // TODO(spencer): What if the lower-priority member is more up-to-date?
- log() << hopeful->getHostAndPort().toString() << " has lower priority than "
- << highestPriority->getHostAndPort().toString();
- vote = -10000;
- }
- else if (_voteLease.when + VoteLease::leaseTime >= now &&
- _voteLease.whoId != args.whoid) {
- log() << "replSet voting no for "
- << hopeful->getHostAndPort().toString()
- << "; voted for " << _voteLease.whoHostAndPort.toString() << ' '
- << durationCount<Seconds>(now - _voteLease.when) << " secs ago";
- }
- else {
- _voteLease.when = now;
- _voteLease.whoId = args.whoid;
- _voteLease.whoHostAndPort = hopeful->getHostAndPort();
- vote = _selfConfig().getNumVotes();
- invariant(hopeful->getId() == args.whoid);
- if (vote > 0) {
- log() << "replSetElect voting yea for " << hopeful->getHostAndPort().toString()
- << " (" << args.whoid << ')';
- }
- }
-
- response->append("vote", vote);
- response->append("round", args.round);
- *result = Status::OK();
+ UnelectableReasonMask reason = _getUnelectableReason(hopefulIndex, lastOpApplied);
+ reason &= ~RefusesToStand;
+ if (reason) {
+ *errmsg = str::stream() << "I don't think "
+ << _rsConfig.getMemberAt(hopefulIndex).getHostAndPort().toString()
+ << " is electable because the "
+ << _getUnelectableReasonString(reason);
+ return true;
}
- // produce a reply to a heartbeat
- Status TopologyCoordinatorImpl::prepareHeartbeatResponse(
- Date_t now,
- const ReplSetHeartbeatArgs& args,
- const std::string& ourSetName,
- const OpTime& lastOpApplied,
- ReplSetHeartbeatResponse* response) {
-
- if (args.getProtocolVersion() != 1) {
+ return false;
+}
+
+// produce a reply to a received electCmd
+void TopologyCoordinatorImpl::prepareElectResponse(
+ const ReplicationCoordinator::ReplSetElectArgs& args,
+ const Date_t now,
+ const OpTime& lastOpApplied,
+ BSONObjBuilder* response,
+ Status* result) {
+ if (_selfIndex == -1) {
+ *result = Status(ErrorCodes::ReplicaSetNotFound,
+ "Cannot participate in election because not initialized");
+ return;
+ }
+
+ const long long myver = _rsConfig.getConfigVersion();
+ const int highestPriorityIndex = _getHighestPriorityElectableIndex(now, lastOpApplied);
+
+ const MemberConfig* primary = _currentPrimaryMember();
+ const MemberConfig* hopeful = _rsConfig.findMemberByID(args.whoid);
+ const MemberConfig* highestPriority =
+ highestPriorityIndex == -1 ? NULL : &_rsConfig.getMemberAt(highestPriorityIndex);
+
+ int vote = 0;
+ if (args.set != _rsConfig.getReplSetName()) {
+ log() << "replSet error received an elect request for '" << args.set
+ << "' but our set name is '" << _rsConfig.getReplSetName() << "'";
+ } else if (myver < args.cfgver) {
+ // we are stale. don't vote
+ log() << "replSetElect not voting because our config version is stale. Our version: "
+ << myver << ", their version: " << args.cfgver;
+ } else if (myver > args.cfgver) {
+ // they are stale!
+ log() << "replSetElect command received stale config version # during election. "
+ "Our version: " << myver << ", their version: " << args.cfgver;
+ vote = -10000;
+ } else if (!hopeful) {
+ log() << "replSetElect couldn't find member with id " << args.whoid;
+ vote = -10000;
+ } else if (_iAmPrimary()) {
+ log() << "I am already primary, " << hopeful->getHostAndPort().toString()
+ << " can try again once I've stepped down";
+ vote = -10000;
+ } else if (primary) {
+ log() << hopeful->getHostAndPort().toString() << " is trying to elect itself but "
+ << primary->getHostAndPort().toString() << " is already primary";
+ vote = -10000;
+ } else if (highestPriority && highestPriority->getPriority() > hopeful->getPriority()) {
+ // TODO(spencer): What if the lower-priority member is more up-to-date?
+ log() << hopeful->getHostAndPort().toString() << " has lower priority than "
+ << highestPriority->getHostAndPort().toString();
+ vote = -10000;
+ } else if (_voteLease.when + VoteLease::leaseTime >= now && _voteLease.whoId != args.whoid) {
+ log() << "replSet voting no for " << hopeful->getHostAndPort().toString() << "; voted for "
+ << _voteLease.whoHostAndPort.toString() << ' '
+ << durationCount<Seconds>(now - _voteLease.when) << " secs ago";
+ } else {
+ _voteLease.when = now;
+ _voteLease.whoId = args.whoid;
+ _voteLease.whoHostAndPort = hopeful->getHostAndPort();
+ vote = _selfConfig().getNumVotes();
+ invariant(hopeful->getId() == args.whoid);
+ if (vote > 0) {
+ log() << "replSetElect voting yea for " << hopeful->getHostAndPort().toString() << " ("
+ << args.whoid << ')';
+ }
+ }
+
+ response->append("vote", vote);
+ response->append("round", args.round);
+ *result = Status::OK();
+}
+
+// produce a reply to a heartbeat
+Status TopologyCoordinatorImpl::prepareHeartbeatResponse(Date_t now,
+ const ReplSetHeartbeatArgs& args,
+ const std::string& ourSetName,
+ const OpTime& lastOpApplied,
+ ReplSetHeartbeatResponse* response) {
+ if (args.getProtocolVersion() != 1) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "replset: incompatible replset protocol version: "
+ << args.getProtocolVersion());
+ }
+
+ // Verify that replica set names match
+ const std::string rshb = args.getSetName();
+ if (ourSetName != rshb) {
+ log() << "replSet set names do not match, ours: " << ourSetName
+ << "; remote node's: " << rshb;
+ response->noteMismatched();
+ return Status(ErrorCodes::InconsistentReplicaSetNames,
+ str::stream() << "Our set name of " << ourSetName << " does not match name "
+ << rshb << " reported by remote node");
+ }
+
+ const MemberState myState = getMemberState();
+ if (_selfIndex == -1) {
+ if (myState.removed()) {
+ return Status(ErrorCodes::InvalidReplicaSetConfig,
+ "Our replica set configuration is invalid or does not include us");
+ }
+ } else {
+ invariant(_rsConfig.getReplSetName() == args.getSetName());
+ if (args.getSenderId() == _selfConfig().getId()) {
return Status(ErrorCodes::BadValue,
- str::stream() << "replset: incompatible replset protocol version: "
- << args.getProtocolVersion());
- }
-
- // Verify that replica set names match
- const std::string rshb = args.getSetName();
- if (ourSetName != rshb) {
- log() << "replSet set names do not match, ours: " << ourSetName <<
- "; remote node's: " << rshb;
- response->noteMismatched();
- return Status(ErrorCodes::InconsistentReplicaSetNames, str::stream() <<
- "Our set name of " << ourSetName << " does not match name " << rshb <<
- " reported by remote node");
- }
-
- const MemberState myState = getMemberState();
- if (_selfIndex == -1) {
- if (myState.removed()) {
- return Status(ErrorCodes::InvalidReplicaSetConfig,
- "Our replica set configuration is invalid or does not include us");
- }
+ str::stream() << "Received heartbeat from member with the same "
+ "member ID as ourself: " << args.getSenderId());
}
- else {
- invariant(_rsConfig.getReplSetName() == args.getSetName());
- if (args.getSenderId() == _selfConfig().getId()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Received heartbeat from member with the same "
- "member ID as ourself: " << args.getSenderId());
- }
- }
-
- // This is a replica set
- response->noteReplSet();
-
- response->setSetName(ourSetName);
- response->setState(myState.s);
- if (myState.primary()) {
- response->setElectionTime(_electionTime);
- }
-
- // Are we electable
- response->setElectable(!_getMyUnelectableReason(now, lastOpApplied));
+ }
- // Heartbeat status message
- response->setHbMsg(_getHbmsg(now));
- response->setTime(duration_cast<Seconds>(now - Date_t{}));
- response->setOpTime(lastOpApplied);
+ // This is a replica set
+ response->noteReplSet();
- if (!_syncSource.empty()) {
- response->setSyncingTo(_syncSource);
- }
-
- if (!_rsConfig.isInitialized()) {
- response->setConfigVersion(-2);
- return Status::OK();
- }
+ response->setSetName(ourSetName);
+ response->setState(myState.s);
+ if (myState.primary()) {
+ response->setElectionTime(_electionTime);
+ }
- const long long v = _rsConfig.getConfigVersion();
- response->setConfigVersion(v);
- // Deliver new config if caller's version is older than ours
- if (v > args.getConfigVersion()) {
- response->setConfig(_rsConfig);
- }
+ // Are we electable
+ response->setElectable(!_getMyUnelectableReason(now, lastOpApplied));
- // Resolve the caller's id in our Member list
- int from = -1;
- if (v == args.getConfigVersion() && args.getSenderId() != -1) {
- from = _getMemberIndex(args.getSenderId());
- }
- if (from == -1) {
- // Can't find the member, so we leave out the stateDisagreement field
- return Status::OK();
- }
- invariant(from != _selfIndex);
+ // Heartbeat status message
+ response->setHbMsg(_getHbmsg(now));
+ response->setTime(duration_cast<Seconds>(now - Date_t{}));
+ response->setOpTime(lastOpApplied);
- // if we thought that this node is down, let it know
- if (!_hbdata[from].up()) {
- response->noteStateDisagreement();
- }
+ if (!_syncSource.empty()) {
+ response->setSyncingTo(_syncSource);
+ }
- // note that we got a heartbeat from this node
- _hbdata[from].setLastHeartbeatRecv(now);
+ if (!_rsConfig.isInitialized()) {
+ response->setConfigVersion(-2);
return Status::OK();
}
- Status TopologyCoordinatorImpl::prepareHeartbeatResponseV1(
- Date_t now,
- const ReplSetHeartbeatArgsV1& args,
- const std::string& ourSetName,
- const OpTime& lastOpApplied,
- ReplSetHeartbeatResponse* response) {
-
- // Verify that replica set names match
- const std::string rshb = args.getSetName();
- if (ourSetName != rshb) {
- log() << "replSet set names do not match, ours: " << ourSetName <<
- "; remote node's: " << rshb;
- return Status(ErrorCodes::InconsistentReplicaSetNames, str::stream() <<
- "Our set name of " << ourSetName << " does not match name " << rshb <<
- " reported by remote node");
- }
+ const long long v = _rsConfig.getConfigVersion();
+ response->setConfigVersion(v);
+ // Deliver new config if caller's version is older than ours
+ if (v > args.getConfigVersion()) {
+ response->setConfig(_rsConfig);
+ }
- const MemberState myState = getMemberState();
- if (_selfIndex == -1) {
- if (myState.removed()) {
- return Status(ErrorCodes::InvalidReplicaSetConfig,
- "Our replica set configuration is invalid or does not include us");
- }
- }
- else {
- if (args.getSenderId() == _selfConfig().getId()) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "Received heartbeat from member with the same "
- "member ID as ourself: " << args.getSenderId());
- }
+ // Resolve the caller's id in our Member list
+ int from = -1;
+ if (v == args.getConfigVersion() && args.getSenderId() != -1) {
+ from = _getMemberIndex(args.getSenderId());
+ }
+ if (from == -1) {
+ // Can't find the member, so we leave out the stateDisagreement field
+ return Status::OK();
+ }
+ invariant(from != _selfIndex);
+
+ // if we thought that this node is down, let it know
+ if (!_hbdata[from].up()) {
+ response->noteStateDisagreement();
+ }
+
+ // note that we got a heartbeat from this node
+ _hbdata[from].setLastHeartbeatRecv(now);
+ return Status::OK();
+}
+
+Status TopologyCoordinatorImpl::prepareHeartbeatResponseV1(Date_t now,
+ const ReplSetHeartbeatArgsV1& args,
+ const std::string& ourSetName,
+ const OpTime& lastOpApplied,
+ ReplSetHeartbeatResponse* response) {
+ // Verify that replica set names match
+ const std::string rshb = args.getSetName();
+ if (ourSetName != rshb) {
+ log() << "replSet set names do not match, ours: " << ourSetName
+ << "; remote node's: " << rshb;
+ return Status(ErrorCodes::InconsistentReplicaSetNames,
+ str::stream() << "Our set name of " << ourSetName << " does not match name "
+ << rshb << " reported by remote node");
+ }
+
+ const MemberState myState = getMemberState();
+ if (_selfIndex == -1) {
+ if (myState.removed()) {
+ return Status(ErrorCodes::InvalidReplicaSetConfig,
+ "Our replica set configuration is invalid or does not include us");
+ }
+ } else {
+ if (args.getSenderId() == _selfConfig().getId()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "Received heartbeat from member with the same "
+ "member ID as ourself: " << args.getSenderId());
}
+ }
- response->setSetName(ourSetName);
-
- response->setState(myState.s);
-
- response->setOpTime(lastOpApplied);
+ response->setSetName(ourSetName);
- if (_currentPrimaryIndex != -1) {
- response->setPrimaryId(_rsConfig.getMemberAt(_currentPrimaryIndex).getId());
- }
+ response->setState(myState.s);
- response->setTerm(_term);
+ response->setOpTime(lastOpApplied);
- if (!_syncSource.empty()) {
- response->setSyncingTo(_syncSource);
- }
-
- if (!_rsConfig.isInitialized()) {
- response->setConfigVersion(-2);
- return Status::OK();
- }
+ if (_currentPrimaryIndex != -1) {
+ response->setPrimaryId(_rsConfig.getMemberAt(_currentPrimaryIndex).getId());
+ }
- const long long v = _rsConfig.getConfigVersion();
- response->setConfigVersion(v);
- // Deliver new config if caller's version is older than ours
- if (v > args.getConfigVersion()) {
- response->setConfig(_rsConfig);
- }
+ response->setTerm(_term);
- // Resolve the caller's id in our Member list
- int from = -1;
- if (v == args.getConfigVersion() && args.getSenderId() != -1) {
- from = _getMemberIndex(args.getSenderId());
- }
- if (from == -1) {
- return Status::OK();
- }
- invariant(from != _selfIndex);
+ if (!_syncSource.empty()) {
+ response->setSyncingTo(_syncSource);
+ }
- // note that we got a heartbeat from this node
- _hbdata[from].setLastHeartbeatRecv(now);
+ if (!_rsConfig.isInitialized()) {
+ response->setConfigVersion(-2);
return Status::OK();
}
- int TopologyCoordinatorImpl::_getMemberIndex(int id) const {
- int index = 0;
- for (ReplicaSetConfig::MemberIterator it = _rsConfig.membersBegin();
- it != _rsConfig.membersEnd();
- ++it, ++index) {
- if (it->getId() == id) {
- return index;
- }
- }
- return -1;
+ const long long v = _rsConfig.getConfigVersion();
+ response->setConfigVersion(v);
+ // Deliver new config if caller's version is older than ours
+ if (v > args.getConfigVersion()) {
+ response->setConfig(_rsConfig);
}
- std::pair<ReplSetHeartbeatArgs, Milliseconds> TopologyCoordinatorImpl::prepareHeartbeatRequest(
- Date_t now,
- const std::string& ourSetName,
- const HostAndPort& target) {
-
- PingStats& hbStats = _pings[target];
- Milliseconds alreadyElapsed = now - hbStats.getLastHeartbeatStartDate();
- if (!_rsConfig.isInitialized() ||
- (hbStats.getNumFailuresSinceLastStart() > kMaxHeartbeatRetries) ||
- (alreadyElapsed >= _rsConfig.getHeartbeatTimeoutPeriodMillis())) {
-
- // This is either the first request ever for "target", or the heartbeat timeout has
- // passed, so we're starting a "new" heartbeat.
- hbStats.start(now);
- alreadyElapsed = Milliseconds(0);
- }
- ReplSetHeartbeatArgs hbArgs;
- hbArgs.setProtocolVersion(1);
- hbArgs.setCheckEmpty(false);
- if (_rsConfig.isInitialized()) {
- hbArgs.setSetName(_rsConfig.getReplSetName());
- hbArgs.setConfigVersion(_rsConfig.getConfigVersion());
- if (_selfIndex >= 0) {
- const MemberConfig& me = _selfConfig();
- hbArgs.setSenderHost(me.getHostAndPort());
- hbArgs.setSenderId(me.getId());
- }
- }
- else {
- hbArgs.setSetName(ourSetName);
- hbArgs.setConfigVersion(-2);
- }
-
- const Milliseconds timeoutPeriod(
- _rsConfig.isInitialized() ?
- _rsConfig.getHeartbeatTimeoutPeriodMillis() :
- ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod);
- const Milliseconds timeout = timeoutPeriod - alreadyElapsed;
- return std::make_pair(hbArgs, timeout);
- }
-
- std::pair<ReplSetHeartbeatArgsV1, Milliseconds>
- TopologyCoordinatorImpl::prepareHeartbeatRequestV1(
- Date_t now,
- const std::string& ourSetName,
- const HostAndPort& target) {
-
- PingStats& hbStats = _pings[target];
- Milliseconds alreadyElapsed(now.asInt64() - hbStats.getLastHeartbeatStartDate().asInt64());
- if (!_rsConfig.isInitialized() ||
- (hbStats.getNumFailuresSinceLastStart() > kMaxHeartbeatRetries) ||
- (alreadyElapsed >= _rsConfig.getHeartbeatTimeoutPeriodMillis())) {
-
- // This is either the first request ever for "target", or the heartbeat timeout has
- // passed, so we're starting a "new" heartbeat.
- hbStats.start(now);
- alreadyElapsed = Milliseconds(0);
- }
- ReplSetHeartbeatArgsV1 hbArgs;
+ // Resolve the caller's id in our Member list
+ int from = -1;
+ if (v == args.getConfigVersion() && args.getSenderId() != -1) {
+ from = _getMemberIndex(args.getSenderId());
+ }
+ if (from == -1) {
+ return Status::OK();
+ }
+ invariant(from != _selfIndex);
+
+ // note that we got a heartbeat from this node
+ _hbdata[from].setLastHeartbeatRecv(now);
+ return Status::OK();
+}
+
+int TopologyCoordinatorImpl::_getMemberIndex(int id) const {
+ int index = 0;
+ for (ReplicaSetConfig::MemberIterator it = _rsConfig.membersBegin();
+ it != _rsConfig.membersEnd();
+ ++it, ++index) {
+ if (it->getId() == id) {
+ return index;
+ }
+ }
+ return -1;
+}
+
+std::pair<ReplSetHeartbeatArgs, Milliseconds> TopologyCoordinatorImpl::prepareHeartbeatRequest(
+ Date_t now, const std::string& ourSetName, const HostAndPort& target) {
+ PingStats& hbStats = _pings[target];
+ Milliseconds alreadyElapsed = now - hbStats.getLastHeartbeatStartDate();
+ if (!_rsConfig.isInitialized() ||
+ (hbStats.getNumFailuresSinceLastStart() > kMaxHeartbeatRetries) ||
+ (alreadyElapsed >= _rsConfig.getHeartbeatTimeoutPeriodMillis())) {
+ // This is either the first request ever for "target", or the heartbeat timeout has
+ // passed, so we're starting a "new" heartbeat.
+ hbStats.start(now);
+ alreadyElapsed = Milliseconds(0);
+ }
+ ReplSetHeartbeatArgs hbArgs;
+ hbArgs.setProtocolVersion(1);
+ hbArgs.setCheckEmpty(false);
+ if (_rsConfig.isInitialized()) {
hbArgs.setSetName(_rsConfig.getReplSetName());
hbArgs.setConfigVersion(_rsConfig.getConfigVersion());
if (_selfIndex >= 0) {
const MemberConfig& me = _selfConfig();
- hbArgs.setSenderId(me.getId());
hbArgs.setSenderHost(me.getHostAndPort());
+ hbArgs.setSenderId(me.getId());
}
- hbArgs.setTerm(_term);
-
- const Milliseconds timeoutPeriod(
- _rsConfig.isInitialized() ?
- _rsConfig.getHeartbeatTimeoutPeriodMillis() :
- Milliseconds(
- ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod.count()));
- const Milliseconds timeout(timeoutPeriod.count() - alreadyElapsed.count());
- return std::make_pair(hbArgs, timeout);
- }
-
- HeartbeatResponseAction TopologyCoordinatorImpl::processHeartbeatResponse(
- Date_t now,
- Milliseconds networkRoundTripTime,
- const HostAndPort& target,
- const StatusWith<ReplSetHeartbeatResponse>& hbResponse,
- const OpTime& myLastOpApplied) {
-
- const MemberState originalState = getMemberState();
- PingStats& hbStats = _pings[target];
- invariant(hbStats.getLastHeartbeatStartDate() != Date_t());
- if (!hbResponse.isOK()) {
- hbStats.miss();
- }
- else {
- hbStats.hit(networkRoundTripTime.count());
- // Log diagnostics.
- if (hbResponse.getValue().isStateDisagreement()) {
- LOG(1) << target <<
- " thinks that we are down because they cannot send us heartbeats.";
- }
- }
-
- const bool isUnauthorized =
- (hbResponse.getStatus().code() == ErrorCodes::Unauthorized) ||
- (hbResponse.getStatus().code() == ErrorCodes::AuthenticationFailed);
-
- const Milliseconds alreadyElapsed = now - hbStats.getLastHeartbeatStartDate();
- Date_t nextHeartbeatStartDate;
- // determine next start time
- if (_rsConfig.isInitialized() &&
- (hbStats.getNumFailuresSinceLastStart() <= kMaxHeartbeatRetries) &&
- (alreadyElapsed < _rsConfig.getHeartbeatTimeoutPeriodMillis())) {
-
- if (isUnauthorized) {
- nextHeartbeatStartDate = now + kHeartbeatInterval;
- } else {
- nextHeartbeatStartDate = now;
- }
- }
- else {
+ } else {
+ hbArgs.setSetName(ourSetName);
+ hbArgs.setConfigVersion(-2);
+ }
+
+ const Milliseconds timeoutPeriod(_rsConfig.isInitialized()
+ ? _rsConfig.getHeartbeatTimeoutPeriodMillis()
+ : ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod);
+ const Milliseconds timeout = timeoutPeriod - alreadyElapsed;
+ return std::make_pair(hbArgs, timeout);
+}
+
+std::pair<ReplSetHeartbeatArgsV1, Milliseconds> TopologyCoordinatorImpl::prepareHeartbeatRequestV1(
+ Date_t now, const std::string& ourSetName, const HostAndPort& target) {
+ PingStats& hbStats = _pings[target];
+ Milliseconds alreadyElapsed(now.asInt64() - hbStats.getLastHeartbeatStartDate().asInt64());
+ if (!_rsConfig.isInitialized() ||
+ (hbStats.getNumFailuresSinceLastStart() > kMaxHeartbeatRetries) ||
+ (alreadyElapsed >= _rsConfig.getHeartbeatTimeoutPeriodMillis())) {
+ // This is either the first request ever for "target", or the heartbeat timeout has
+ // passed, so we're starting a "new" heartbeat.
+ hbStats.start(now);
+ alreadyElapsed = Milliseconds(0);
+ }
+ ReplSetHeartbeatArgsV1 hbArgs;
+ hbArgs.setSetName(_rsConfig.getReplSetName());
+ hbArgs.setConfigVersion(_rsConfig.getConfigVersion());
+ if (_selfIndex >= 0) {
+ const MemberConfig& me = _selfConfig();
+ hbArgs.setSenderId(me.getId());
+ hbArgs.setSenderHost(me.getHostAndPort());
+ }
+ hbArgs.setTerm(_term);
+
+ const Milliseconds timeoutPeriod(
+ _rsConfig.isInitialized()
+ ? _rsConfig.getHeartbeatTimeoutPeriodMillis()
+ : Milliseconds(ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod.count()));
+ const Milliseconds timeout(timeoutPeriod.count() - alreadyElapsed.count());
+ return std::make_pair(hbArgs, timeout);
+}
+
+HeartbeatResponseAction TopologyCoordinatorImpl::processHeartbeatResponse(
+ Date_t now,
+ Milliseconds networkRoundTripTime,
+ const HostAndPort& target,
+ const StatusWith<ReplSetHeartbeatResponse>& hbResponse,
+ const OpTime& myLastOpApplied) {
+ const MemberState originalState = getMemberState();
+ PingStats& hbStats = _pings[target];
+ invariant(hbStats.getLastHeartbeatStartDate() != Date_t());
+ if (!hbResponse.isOK()) {
+ hbStats.miss();
+ } else {
+ hbStats.hit(networkRoundTripTime.count());
+ // Log diagnostics.
+ if (hbResponse.getValue().isStateDisagreement()) {
+ LOG(1) << target << " thinks that we are down because they cannot send us heartbeats.";
+ }
+ }
+
+ const bool isUnauthorized = (hbResponse.getStatus().code() == ErrorCodes::Unauthorized) ||
+ (hbResponse.getStatus().code() == ErrorCodes::AuthenticationFailed);
+
+ const Milliseconds alreadyElapsed = now - hbStats.getLastHeartbeatStartDate();
+ Date_t nextHeartbeatStartDate;
+ // determine next start time
+ if (_rsConfig.isInitialized() &&
+ (hbStats.getNumFailuresSinceLastStart() <= kMaxHeartbeatRetries) &&
+ (alreadyElapsed < _rsConfig.getHeartbeatTimeoutPeriodMillis())) {
+ if (isUnauthorized) {
nextHeartbeatStartDate = now + kHeartbeatInterval;
+ } else {
+ nextHeartbeatStartDate = now;
}
+ } else {
+ nextHeartbeatStartDate = now + kHeartbeatInterval;
+ }
- if (hbResponse.isOK() && hbResponse.getValue().hasConfig()) {
- const long long currentConfigVersion =
- _rsConfig.isInitialized() ? _rsConfig.getConfigVersion() : -2;
- const ReplicaSetConfig& newConfig = hbResponse.getValue().getConfig();
- if (newConfig.getConfigVersion() > currentConfigVersion) {
- HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeReconfigAction();
- nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
- return nextAction;
- }
- else {
- // Could be we got the newer version before we got the response, or the
- // target erroneously sent us one, even through it isn't newer.
- if (newConfig.getConfigVersion() < currentConfigVersion) {
- LOG(1) << "Config version from heartbeat was older than ours.";
- }
- else {
- LOG(2) << "Config from heartbeat response was same as ours.";
- }
- if (logger::globalLogDomain()->shouldLog(
- MongoLogDefaultComponent_component,
- ::mongo::LogstreamBuilder::severityCast(2))) {
- LogstreamBuilder lsb = log();
- if (_rsConfig.isInitialized()) {
- lsb << "Current config: " << _rsConfig.toBSON() << "; ";
- }
- lsb << "Config in heartbeat: " << newConfig.toBSON();
- }
- }
- }
-
- // Check if the heartbeat target is in our config. If it isn't, there's nothing left to do,
- // so return early.
- if (!_rsConfig.isInitialized()) {
- HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeNoAction();
+ if (hbResponse.isOK() && hbResponse.getValue().hasConfig()) {
+ const long long currentConfigVersion =
+ _rsConfig.isInitialized() ? _rsConfig.getConfigVersion() : -2;
+ const ReplicaSetConfig& newConfig = hbResponse.getValue().getConfig();
+ if (newConfig.getConfigVersion() > currentConfigVersion) {
+ HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeReconfigAction();
nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
return nextAction;
- }
- const int memberIndex = _rsConfig.findMemberIndexByHostAndPort(target);
- if (memberIndex == -1) {
- LOG(1) << "Could not find " << target << " in current config so ignoring --"
- " current config: " << _rsConfig.toBSON();
- HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeNoAction();
- nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
- return nextAction;
- }
- invariant(memberIndex != _selfIndex);
-
- MemberHeartbeatData& hbData = _hbdata[memberIndex];
- const MemberConfig member = _rsConfig.getMemberAt(memberIndex);
- if (!hbResponse.isOK()) {
- if (isUnauthorized) {
- LOG(1) << "setAuthIssue: heartbeat response failed due to authentication"
- " issue for member _id:" << member.getId();
- hbData.setAuthIssue(now);
- }
- else if (hbStats.getNumFailuresSinceLastStart() > kMaxHeartbeatRetries ||
- alreadyElapsed >= _rsConfig.getHeartbeatTimeoutPeriodMillis()) {
-
- LOG(1) << "setDownValues: heartbeat response failed for member _id:"
- << member.getId() << ", msg: "
- << hbResponse.getStatus().reason();
-
- hbData.setDownValues(now, hbResponse.getStatus().reason());
+ } else {
+ // Could be we got the newer version before we got the response, or the
+ // target erroneously sent us one, even through it isn't newer.
+ if (newConfig.getConfigVersion() < currentConfigVersion) {
+ LOG(1) << "Config version from heartbeat was older than ours.";
+ } else {
+ LOG(2) << "Config from heartbeat response was same as ours.";
}
- else {
- LOG(3) << "Bad heartbeat response from " << target <<
- "; trying again; Retries left: " <<
- (kMaxHeartbeatRetries - hbStats.getNumFailuresSinceLastStart()) <<
- "; " << alreadyElapsed.count() << "ms have already elapsed";
+ if (logger::globalLogDomain()->shouldLog(MongoLogDefaultComponent_component,
+ ::mongo::LogstreamBuilder::severityCast(2))) {
+ LogstreamBuilder lsb = log();
+ if (_rsConfig.isInitialized()) {
+ lsb << "Current config: " << _rsConfig.toBSON() << "; ";
+ }
+ lsb << "Config in heartbeat: " << newConfig.toBSON();
}
}
- else {
- ReplSetHeartbeatResponse hbr = hbResponse.getValue();
- LOG(3) << "setUpValues: heartbeat response good for member _id:"
- << member.getId() << ", msg: "
- << hbr.getHbMsg();
- hbData.setUpValues(now, member.getHostAndPort(), hbr);
- }
- HeartbeatResponseAction nextAction = _updateHeartbeatDataImpl(
- memberIndex,
- originalState,
- now,
- myLastOpApplied);
+ }
+ // Check if the heartbeat target is in our config. If it isn't, there's nothing left to do,
+ // so return early.
+ if (!_rsConfig.isInitialized()) {
+ HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeNoAction();
nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
return nextAction;
}
+ const int memberIndex = _rsConfig.findMemberIndexByHostAndPort(target);
+ if (memberIndex == -1) {
+ LOG(1) << "Could not find " << target << " in current config so ignoring --"
+ " current config: " << _rsConfig.toBSON();
+ HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeNoAction();
+ nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
+ return nextAction;
+ }
+ invariant(memberIndex != _selfIndex);
+
+ MemberHeartbeatData& hbData = _hbdata[memberIndex];
+ const MemberConfig member = _rsConfig.getMemberAt(memberIndex);
+ if (!hbResponse.isOK()) {
+ if (isUnauthorized) {
+ LOG(1) << "setAuthIssue: heartbeat response failed due to authentication"
+ " issue for member _id:" << member.getId();
+ hbData.setAuthIssue(now);
+ } else if (hbStats.getNumFailuresSinceLastStart() > kMaxHeartbeatRetries ||
+ alreadyElapsed >= _rsConfig.getHeartbeatTimeoutPeriodMillis()) {
+ LOG(1) << "setDownValues: heartbeat response failed for member _id:" << member.getId()
+ << ", msg: " << hbResponse.getStatus().reason();
+
+ hbData.setDownValues(now, hbResponse.getStatus().reason());
+ } else {
+ LOG(3) << "Bad heartbeat response from " << target << "; trying again; Retries left: "
+ << (kMaxHeartbeatRetries - hbStats.getNumFailuresSinceLastStart()) << "; "
+ << alreadyElapsed.count() << "ms have already elapsed";
+ }
+ } else {
+ ReplSetHeartbeatResponse hbr = hbResponse.getValue();
+ LOG(3) << "setUpValues: heartbeat response good for member _id:" << member.getId()
+ << ", msg: " << hbr.getHbMsg();
+ hbData.setUpValues(now, member.getHostAndPort(), hbr);
+ }
+ HeartbeatResponseAction nextAction =
+ _updateHeartbeatDataImpl(memberIndex, originalState, now, myLastOpApplied);
+
+ nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
+ return nextAction;
+}
+
+HeartbeatResponseAction TopologyCoordinatorImpl::_updateHeartbeatDataImpl(
+ int updatedConfigIndex,
+ const MemberState& originalState,
+ Date_t now,
+ const OpTime& lastOpApplied) {
+ // This method has two interrelated responsibilities, performed in two phases.
+ //
+ // First, it updates the local notion of which remote node, if any is primary. In the
+ // process, it may request a remote primary to step down because there is a higher priority
+ // node waiting, or because the local node thinks it is primary and that it has a more
+ // recent electionTime. It may instead decide that the local node should step down itself,
+ // because a remote has a more recent election time.
+ //
+ // Second, if there is no remote primary, and the local node is not primary, it considers
+ // whether or not to stand for election.
+ invariant(updatedConfigIndex != _selfIndex);
+
+ // We are missing from the config, so do not participate in primary maintenance or election.
+ if (_selfIndex == -1) {
+ return HeartbeatResponseAction::makeNoAction();
+ }
+
+ ////////////////////
+ // Phase 1
+ ////////////////////
+
+ // If we believe the node whose data was just updated is primary, confirm that
+ // the updated data supports that notion. If not, erase our notion of who is primary.
+ if (updatedConfigIndex == _currentPrimaryIndex) {
+ const MemberHeartbeatData& updatedHBData = _hbdata[updatedConfigIndex];
+ if (!updatedHBData.up() || !updatedHBData.getState().primary()) {
+ _currentPrimaryIndex = -1;
+ }
+ }
+
+ // If the current primary is not highest priority and up to date (within 10s),
+ // have them/me stepdown.
+ if (_currentPrimaryIndex != -1) {
+ // check if we should ask the primary (possibly ourselves) to step down
+ const int highestPriorityIndex = _getHighestPriorityElectableIndex(now, lastOpApplied);
+ if (highestPriorityIndex != -1) {
+ const MemberConfig& currentPrimaryMember = _rsConfig.getMemberAt(_currentPrimaryIndex);
+ const MemberConfig& highestPriorityMember = _rsConfig.getMemberAt(highestPriorityIndex);
+ const OpTime highestPriorityMemberOptime = highestPriorityIndex == _selfIndex
+ ? lastOpApplied
+ : _hbdata[highestPriorityIndex].getOpTime();
- HeartbeatResponseAction TopologyCoordinatorImpl::_updateHeartbeatDataImpl(
- int updatedConfigIndex,
- const MemberState& originalState,
- Date_t now,
- const OpTime& lastOpApplied) {
-
- // This method has two interrelated responsibilities, performed in two phases.
- //
- // First, it updates the local notion of which remote node, if any is primary. In the
- // process, it may request a remote primary to step down because there is a higher priority
- // node waiting, or because the local node thinks it is primary and that it has a more
- // recent electionTime. It may instead decide that the local node should step down itself,
- // because a remote has a more recent election time.
- //
- // Second, if there is no remote primary, and the local node is not primary, it considers
- // whether or not to stand for election.
- invariant(updatedConfigIndex != _selfIndex);
-
- // We are missing from the config, so do not participate in primary maintenance or election.
- if (_selfIndex == -1) {
- return HeartbeatResponseAction::makeNoAction();
- }
-
- ////////////////////
- // Phase 1
- ////////////////////
-
- // If we believe the node whose data was just updated is primary, confirm that
- // the updated data supports that notion. If not, erase our notion of who is primary.
- if (updatedConfigIndex == _currentPrimaryIndex) {
- const MemberHeartbeatData& updatedHBData = _hbdata[updatedConfigIndex];
- if (!updatedHBData.up() || !updatedHBData.getState().primary()) {
- _currentPrimaryIndex = -1;
- }
- }
+ if ((highestPriorityMember.getPriority() > currentPrimaryMember.getPriority()) &&
+ _isOpTimeCloseEnoughToLatestToElect(highestPriorityMemberOptime, lastOpApplied)) {
+ const OpTime latestOpTime = _latestKnownOpTime(lastOpApplied);
- // If the current primary is not highest priority and up to date (within 10s),
- // have them/me stepdown.
- if (_currentPrimaryIndex != -1) {
- // check if we should ask the primary (possibly ourselves) to step down
- const int highestPriorityIndex = _getHighestPriorityElectableIndex(now, lastOpApplied);
- if (highestPriorityIndex != -1) {
- const MemberConfig& currentPrimaryMember =
- _rsConfig.getMemberAt(_currentPrimaryIndex);
- const MemberConfig& highestPriorityMember =
- _rsConfig.getMemberAt(highestPriorityIndex);
- const OpTime highestPriorityMemberOptime = highestPriorityIndex == _selfIndex ?
- lastOpApplied : _hbdata[highestPriorityIndex].getOpTime();
-
- if ((highestPriorityMember.getPriority() > currentPrimaryMember.getPriority()) &&
- _isOpTimeCloseEnoughToLatestToElect(highestPriorityMemberOptime,
- lastOpApplied)) {
- const OpTime latestOpTime = _latestKnownOpTime(lastOpApplied);
-
- if (_iAmPrimary()) {
- if (_stepDownPending) {
- return HeartbeatResponseAction::makeNoAction();
- }
- _stepDownPending = true;
- log() << "Stepping down self (priority "
- << currentPrimaryMember.getPriority() << ") because "
- << highestPriorityMember.getHostAndPort() << " has higher priority "
- << highestPriorityMember.getPriority() << " and is only "
- << (latestOpTime.getSecs() - highestPriorityMemberOptime.getSecs())
- << " seconds behind me";
- const Date_t until = now + VoteLease::leaseTime + kHeartbeatInterval;
- if (_electionSleepUntil < until) {
- _electionSleepUntil = until;
- }
- return HeartbeatResponseAction::makeStepDownSelfAction(_selfIndex);
+ if (_iAmPrimary()) {
+ if (_stepDownPending) {
+ return HeartbeatResponseAction::makeNoAction();
}
- else if ((highestPriorityIndex == _selfIndex) &&
- (_electionSleepUntil <= now)) {
- // If this node is the highest priority node, and it is not in
- // an inter-election sleep period, ask the current primary to step down.
- // This is an optimization, because the remote primary will almost certainly
- // notice this node's electability promptly, via its own heartbeat process.
- log() << "Requesting that " << currentPrimaryMember.getHostAndPort()
- << " (priority " << currentPrimaryMember.getPriority()
- << ") step down because I have higher priority "
- << highestPriorityMember.getPriority() << " and am only "
- << (latestOpTime.getSecs() - highestPriorityMemberOptime.getSecs())
- << " seconds behind it";
- int primaryIndex = _currentPrimaryIndex;
- _currentPrimaryIndex = -1;
- return HeartbeatResponseAction::makeStepDownRemoteAction(primaryIndex);
+ _stepDownPending = true;
+ log() << "Stepping down self (priority " << currentPrimaryMember.getPriority()
+ << ") because " << highestPriorityMember.getHostAndPort()
+ << " has higher priority " << highestPriorityMember.getPriority()
+ << " and is only "
+ << (latestOpTime.getSecs() - highestPriorityMemberOptime.getSecs())
+ << " seconds behind me";
+ const Date_t until = now + VoteLease::leaseTime + kHeartbeatInterval;
+ if (_electionSleepUntil < until) {
+ _electionSleepUntil = until;
}
+ return HeartbeatResponseAction::makeStepDownSelfAction(_selfIndex);
+ } else if ((highestPriorityIndex == _selfIndex) && (_electionSleepUntil <= now)) {
+ // If this node is the highest priority node, and it is not in
+ // an inter-election sleep period, ask the current primary to step down.
+ // This is an optimization, because the remote primary will almost certainly
+ // notice this node's electability promptly, via its own heartbeat process.
+ log() << "Requesting that " << currentPrimaryMember.getHostAndPort()
+ << " (priority " << currentPrimaryMember.getPriority()
+ << ") step down because I have higher priority "
+ << highestPriorityMember.getPriority() << " and am only "
+ << (latestOpTime.getSecs() - highestPriorityMemberOptime.getSecs())
+ << " seconds behind it";
+ int primaryIndex = _currentPrimaryIndex;
+ _currentPrimaryIndex = -1;
+ return HeartbeatResponseAction::makeStepDownRemoteAction(primaryIndex);
}
}
}
+ }
- // Scan the member list's heartbeat data for who is primary, and update
- // _currentPrimaryIndex and _role, or request a remote to step down, as necessary.
- {
- int remotePrimaryIndex = -1;
- for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin();
- it != _hbdata.end();
- ++it) {
- const int itIndex = indexOfIterator(_hbdata, it);
- if (itIndex == _selfIndex) {
- continue;
- }
-
- if( it->getState().primary() && it->up() ) {
- if (remotePrimaryIndex != -1) {
- // two other nodes think they are primary (asynchronously polled)
- // -- wait for things to settle down.
- warning() << "two remote primaries (transiently)";
- return HeartbeatResponseAction::makeNoAction();
- }
- remotePrimaryIndex = itIndex;
- }
+ // Scan the member list's heartbeat data for who is primary, and update
+ // _currentPrimaryIndex and _role, or request a remote to step down, as necessary.
+ {
+ int remotePrimaryIndex = -1;
+ for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin();
+ it != _hbdata.end();
+ ++it) {
+ const int itIndex = indexOfIterator(_hbdata, it);
+ if (itIndex == _selfIndex) {
+ continue;
}
- if (remotePrimaryIndex != -1) {
- // If it's the same as last time, don't do anything further.
- if (_currentPrimaryIndex == remotePrimaryIndex) {
+ if (it->getState().primary() && it->up()) {
+ if (remotePrimaryIndex != -1) {
+ // two other nodes think they are primary (asynchronously polled)
+ // -- wait for things to settle down.
+ warning() << "two remote primaries (transiently)";
return HeartbeatResponseAction::makeNoAction();
}
- // Clear last heartbeat message on ourselves (why?)
- setMyHeartbeatMessage(now, "");
-
- // If we are also primary, this is a problem. Determine who should step down.
- if (_iAmPrimary()) {
- Timestamp remoteElectionTime = _hbdata[remotePrimaryIndex].getElectionTime();
- log() << "another primary seen with election time "
- << remoteElectionTime << " my election time is " << _electionTime;
-
- // Step down whomever has the older election time.
- if (remoteElectionTime > _electionTime) {
- if (_stepDownPending) {
- return HeartbeatResponseAction::makeNoAction();
- }
- _stepDownPending = true;
- log() << "stepping down; another primary was elected more recently";
- return HeartbeatResponseAction::makeStepDownSelfAction(_selfIndex);
- }
- else {
- log() << "another PRIMARY detected and it should step down"
- " since it was elected earlier than me";
- return HeartbeatResponseAction::makeStepDownRemoteAction(
- remotePrimaryIndex);
- }
- }
-
- _currentPrimaryIndex = remotePrimaryIndex;
- return HeartbeatResponseAction::makeNoAction();
+ remotePrimaryIndex = itIndex;
}
}
- ////////////////////
- // Phase 2
- ////////////////////
+ if (remotePrimaryIndex != -1) {
+ // If it's the same as last time, don't do anything further.
+ if (_currentPrimaryIndex == remotePrimaryIndex) {
+ return HeartbeatResponseAction::makeNoAction();
+ }
+ // Clear last heartbeat message on ourselves (why?)
+ setMyHeartbeatMessage(now, "");
- // We do not believe any remote to be primary.
+ // If we are also primary, this is a problem. Determine who should step down.
+ if (_iAmPrimary()) {
+ Timestamp remoteElectionTime = _hbdata[remotePrimaryIndex].getElectionTime();
+ log() << "another primary seen with election time " << remoteElectionTime
+ << " my election time is " << _electionTime;
- // If we are primary, check if we can still see majority of the set;
- // stepdown if we can't.
- if (_iAmPrimary()) {
- if (CannotSeeMajority & _getMyUnelectableReason(now, lastOpApplied)) {
- if (_stepDownPending) {
- return HeartbeatResponseAction::makeNoAction();
+ // Step down whomever has the older election time.
+ if (remoteElectionTime > _electionTime) {
+ if (_stepDownPending) {
+ return HeartbeatResponseAction::makeNoAction();
+ }
+ _stepDownPending = true;
+ log() << "stepping down; another primary was elected more recently";
+ return HeartbeatResponseAction::makeStepDownSelfAction(_selfIndex);
+ } else {
+ log() << "another PRIMARY detected and it should step down"
+ " since it was elected earlier than me";
+ return HeartbeatResponseAction::makeStepDownRemoteAction(remotePrimaryIndex);
}
- _stepDownPending = true;
- log() << "can't see a majority of the set, relinquishing primary";
- return HeartbeatResponseAction::makeStepDownSelfAction(_selfIndex);
}
- LOG(2) << "Choosing to remain primary";
+ _currentPrimaryIndex = remotePrimaryIndex;
return HeartbeatResponseAction::makeNoAction();
}
+ }
- fassert(18505, _currentPrimaryIndex == -1);
-
- const MemberState currentState = getMemberState();
- if (originalState.recovering() && currentState.secondary()) {
- // We just transitioned from RECOVERING to SECONDARY, this can only happen if we
- // received a heartbeat with an auth error when previously all the heartbeats we'd
- // received had auth errors. In this case, don't return makeElectAction() because
- // that could cause the election to start before the ReplicationCoordinator has updated
- // its notion of the member state to SECONDARY. Instead return noAction so that the
- // ReplicationCooridinator knows to update its tracking of the member state off of the
- // TopologyCoordinator, and leave starting the election until the next heartbeat comes
- // back.
- return HeartbeatResponseAction::makeNoAction();
- }
+ ////////////////////
+ // Phase 2
+ ////////////////////
- // At this point, there is no primary anywhere. Check to see if we should become a
- // candidate.
- if (!checkShouldStandForElection(now, lastOpApplied)) {
- return HeartbeatResponseAction::makeNoAction();
+ // We do not believe any remote to be primary.
+
+ // If we are primary, check if we can still see majority of the set;
+ // stepdown if we can't.
+ if (_iAmPrimary()) {
+ if (CannotSeeMajority & _getMyUnelectableReason(now, lastOpApplied)) {
+ if (_stepDownPending) {
+ return HeartbeatResponseAction::makeNoAction();
+ }
+ _stepDownPending = true;
+ log() << "can't see a majority of the set, relinquishing primary";
+ return HeartbeatResponseAction::makeStepDownSelfAction(_selfIndex);
}
- return HeartbeatResponseAction::makeElectAction();
+
+ LOG(2) << "Choosing to remain primary";
+ return HeartbeatResponseAction::makeNoAction();
}
- bool TopologyCoordinatorImpl::checkShouldStandForElection(
- Date_t now, const OpTime& lastOpApplied) {
- if (_currentPrimaryIndex != -1) {
- return false;
- }
- invariant (_role != Role::leader);
+ fassert(18505, _currentPrimaryIndex == -1);
- if (_role == Role::candidate) {
- LOG(2) << "Not standing for election again; already candidate";
- return false;
- }
+ const MemberState currentState = getMemberState();
+ if (originalState.recovering() && currentState.secondary()) {
+ // We just transitioned from RECOVERING to SECONDARY, this can only happen if we
+ // received a heartbeat with an auth error when previously all the heartbeats we'd
+ // received had auth errors. In this case, don't return makeElectAction() because
+ // that could cause the election to start before the ReplicationCoordinator has updated
+ // its notion of the member state to SECONDARY. Instead return noAction so that the
+ // ReplicationCooridinator knows to update its tracking of the member state off of the
+ // TopologyCoordinator, and leave starting the election until the next heartbeat comes
+ // back.
+ return HeartbeatResponseAction::makeNoAction();
+ }
- const UnelectableReasonMask unelectableReason = _getMyUnelectableReason(now, lastOpApplied);
- if (NotCloseEnoughToLatestOptime & unelectableReason) {
- LOG(2) << "Not standing for election because " <<
- _getUnelectableReasonString(unelectableReason) << "; my last optime is " <<
- lastOpApplied << " and the newest is " << _latestKnownOpTime(lastOpApplied);
- return false;
- }
- if (unelectableReason) {
- LOG(2) << "Not standing for election because " <<
- _getUnelectableReasonString(unelectableReason);
- return false;
- }
- if (_electionSleepUntil > now) {
- LOG(2) << "Not standing for election before " <<
- dateToISOStringLocal(_electionSleepUntil) << " because I stood too recently";
- return false;
- }
- // All checks passed, become a candidate and start election proceedings.
- _role = Role::candidate;
- return true;
+ // At this point, there is no primary anywhere. Check to see if we should become a
+ // candidate.
+ if (!checkShouldStandForElection(now, lastOpApplied)) {
+ return HeartbeatResponseAction::makeNoAction();
}
+ return HeartbeatResponseAction::makeElectAction();
+}
- bool TopologyCoordinatorImpl::_aMajoritySeemsToBeUp() const {
- int vUp = 0;
- for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin();
- it != _hbdata.end();
- ++it) {
- const int itIndex = indexOfIterator(_hbdata, it);
- if (itIndex == _selfIndex || it->up()) {
- vUp += _rsConfig.getMemberAt(itIndex).getNumVotes();
- }
- }
+bool TopologyCoordinatorImpl::checkShouldStandForElection(Date_t now, const OpTime& lastOpApplied) {
+ if (_currentPrimaryIndex != -1) {
+ return false;
+ }
+ invariant(_role != Role::leader);
- return vUp * 2 > _rsConfig.getTotalVotingMembers();
+ if (_role == Role::candidate) {
+ LOG(2) << "Not standing for election again; already candidate";
+ return false;
}
- bool TopologyCoordinatorImpl::_isOpTimeCloseEnoughToLatestToElect(
- const OpTime& otherOpTime, const OpTime& ourLastOpApplied) const {
- const OpTime latestKnownOpTime = _latestKnownOpTime(ourLastOpApplied);
- // Use addition instead of subtraction to avoid overflow.
- return otherOpTime.getSecs() + 10 >= (latestKnownOpTime.getSecs());
+ const UnelectableReasonMask unelectableReason = _getMyUnelectableReason(now, lastOpApplied);
+ if (NotCloseEnoughToLatestOptime & unelectableReason) {
+ LOG(2) << "Not standing for election because "
+ << _getUnelectableReasonString(unelectableReason) << "; my last optime is "
+ << lastOpApplied << " and the newest is " << _latestKnownOpTime(lastOpApplied);
+ return false;
+ }
+ if (unelectableReason) {
+ LOG(2) << "Not standing for election because "
+ << _getUnelectableReasonString(unelectableReason);
+ return false;
}
+ if (_electionSleepUntil > now) {
+ LOG(2) << "Not standing for election before " << dateToISOStringLocal(_electionSleepUntil)
+ << " because I stood too recently";
+ return false;
+ }
+ // All checks passed, become a candidate and start election proceedings.
+ _role = Role::candidate;
+ return true;
+}
- bool TopologyCoordinatorImpl::_iAmPrimary() const {
- if (_role == Role::leader) {
- invariant(_currentPrimaryIndex == _selfIndex);
- return true;
+bool TopologyCoordinatorImpl::_aMajoritySeemsToBeUp() const {
+ int vUp = 0;
+ for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin(); it != _hbdata.end();
+ ++it) {
+ const int itIndex = indexOfIterator(_hbdata, it);
+ if (itIndex == _selfIndex || it->up()) {
+ vUp += _rsConfig.getMemberAt(itIndex).getNumVotes();
}
- return false;
}
- OpTime TopologyCoordinatorImpl::_latestKnownOpTime(const OpTime& ourLastOpApplied) const {
- OpTime latest = ourLastOpApplied;
+ return vUp * 2 > _rsConfig.getTotalVotingMembers();
+}
- for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin();
- it != _hbdata.end();
- ++it) {
+bool TopologyCoordinatorImpl::_isOpTimeCloseEnoughToLatestToElect(
+ const OpTime& otherOpTime, const OpTime& ourLastOpApplied) const {
+ const OpTime latestKnownOpTime = _latestKnownOpTime(ourLastOpApplied);
+ // Use addition instead of subtraction to avoid overflow.
+ return otherOpTime.getSecs() + 10 >= (latestKnownOpTime.getSecs());
+}
- if (indexOfIterator(_hbdata, it) == _selfIndex) {
- continue;
- }
- if (!it->up()) {
- continue;
- }
+bool TopologyCoordinatorImpl::_iAmPrimary() const {
+ if (_role == Role::leader) {
+ invariant(_currentPrimaryIndex == _selfIndex);
+ return true;
+ }
+ return false;
+}
- OpTime optime = it->getOpTime();
+OpTime TopologyCoordinatorImpl::_latestKnownOpTime(const OpTime& ourLastOpApplied) const {
+ OpTime latest = ourLastOpApplied;
- if (optime > latest) {
- latest = optime;
- }
+ for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin(); it != _hbdata.end();
+ ++it) {
+ if (indexOfIterator(_hbdata, it) == _selfIndex) {
+ continue;
}
+ if (!it->up()) {
+ continue;
+ }
+
+ OpTime optime = it->getOpTime();
- return latest;
+ if (optime > latest) {
+ latest = optime;
+ }
}
- bool TopologyCoordinatorImpl::_isMemberHigherPriority(int memberOneIndex,
- int memberTwoIndex) const {
- if (memberOneIndex == -1)
- return false;
+ return latest;
+}
- if (memberTwoIndex == -1)
- return true;
+bool TopologyCoordinatorImpl::_isMemberHigherPriority(int memberOneIndex,
+ int memberTwoIndex) const {
+ if (memberOneIndex == -1)
+ return false;
- return _rsConfig.getMemberAt(memberOneIndex).getPriority() >
- _rsConfig.getMemberAt(memberTwoIndex).getPriority();
- }
+ if (memberTwoIndex == -1)
+ return true;
- int TopologyCoordinatorImpl::_getHighestPriorityElectableIndex(
- Date_t now, const OpTime& lastOpApplied) const {
- int maxIndex = -1;
- for (int currentIndex = 0; currentIndex < _rsConfig.getNumMembers(); currentIndex++) {
- UnelectableReasonMask reason = currentIndex == _selfIndex ?
- _getMyUnelectableReason(now, lastOpApplied) :
- _getUnelectableReason(currentIndex, lastOpApplied);
- if (None == reason && _isMemberHigherPriority(currentIndex, maxIndex)) {
- maxIndex = currentIndex;
- }
- }
+ return _rsConfig.getMemberAt(memberOneIndex).getPriority() >
+ _rsConfig.getMemberAt(memberTwoIndex).getPriority();
+}
- return maxIndex;
+int TopologyCoordinatorImpl::_getHighestPriorityElectableIndex(Date_t now,
+ const OpTime& lastOpApplied) const {
+ int maxIndex = -1;
+ for (int currentIndex = 0; currentIndex < _rsConfig.getNumMembers(); currentIndex++) {
+ UnelectableReasonMask reason = currentIndex == _selfIndex
+ ? _getMyUnelectableReason(now, lastOpApplied)
+ : _getUnelectableReason(currentIndex, lastOpApplied);
+ if (None == reason && _isMemberHigherPriority(currentIndex, maxIndex)) {
+ maxIndex = currentIndex;
+ }
}
- void TopologyCoordinatorImpl::prepareForStepDown() {
- _stepDownPending = true;
- }
+ return maxIndex;
+}
- void TopologyCoordinatorImpl::changeMemberState_forTest(const MemberState& newMemberState,
- const Timestamp& electionTime) {
- invariant(_selfIndex != -1);
- if (newMemberState == getMemberState())
- return;
- switch(newMemberState.s) {
+void TopologyCoordinatorImpl::prepareForStepDown() {
+ _stepDownPending = true;
+}
+
+void TopologyCoordinatorImpl::changeMemberState_forTest(const MemberState& newMemberState,
+ const Timestamp& electionTime) {
+ invariant(_selfIndex != -1);
+ if (newMemberState == getMemberState())
+ return;
+ switch (newMemberState.s) {
case MemberState::RS_PRIMARY:
_role = Role::candidate;
processWinElection(OID(), electionTime);
@@ -1374,728 +1298,692 @@ namespace {
}
break;
case MemberState::RS_STARTUP:
- updateConfig(
- ReplicaSetConfig(),
- -1,
- Date_t(),
- OpTime());
+ updateConfig(ReplicaSetConfig(), -1, Date_t(), OpTime());
break;
default:
severe() << "Cannot switch to state " << newMemberState;
invariant(false);
- }
- if (getMemberState() != newMemberState.s) {
- severe() << "Expected to enter state " << newMemberState << " but am now in " <<
- getMemberState();
- invariant(false);
- }
- log() << newMemberState;
}
-
- void TopologyCoordinatorImpl::_setCurrentPrimaryForTest(int primaryIndex) {
- if (primaryIndex == _selfIndex) {
- changeMemberState_forTest(MemberState::RS_PRIMARY);
- }
- else {
- if (_iAmPrimary()) {
- changeMemberState_forTest(MemberState::RS_SECONDARY);
- }
- if (primaryIndex != -1) {
- ReplSetHeartbeatResponse hbResponse;
- hbResponse.setState(MemberState::RS_PRIMARY);
- hbResponse.setElectionTime(Timestamp());
- hbResponse.setOpTime(_hbdata[primaryIndex].getOpTime());
- hbResponse.setSyncingTo(HostAndPort());
- hbResponse.setHbMsg("");
- _hbdata[primaryIndex].setUpValues(
- _hbdata[primaryIndex].getLastHeartbeat(),
- _rsConfig.getMemberAt(primaryIndex).getHostAndPort(),
- hbResponse);
- }
- _currentPrimaryIndex = primaryIndex;
- }
+ if (getMemberState() != newMemberState.s) {
+ severe() << "Expected to enter state " << newMemberState << " but am now in "
+ << getMemberState();
+ invariant(false);
}
+ log() << newMemberState;
+}
- const MemberConfig* TopologyCoordinatorImpl::_currentPrimaryMember() const {
- if (_currentPrimaryIndex == -1)
- return NULL;
-
- return &(_rsConfig.getMemberAt(_currentPrimaryIndex));
- }
-
- void TopologyCoordinatorImpl::prepareStatusResponse(
- const ReplicationExecutor::CallbackArgs& data,
- Date_t now,
- unsigned selfUptime,
- const OpTime& lastOpApplied,
- BSONObjBuilder* response,
- Status* result) {
- if (data.status == ErrorCodes::CallbackCanceled) {
- *result = Status(ErrorCodes::ShutdownInProgress, "replication system is shutting down");
- return;
- }
-
- // output for each member
- vector<BSONObj> membersOut;
- const MemberState myState = getMemberState();
+void TopologyCoordinatorImpl::_setCurrentPrimaryForTest(int primaryIndex) {
+ if (primaryIndex == _selfIndex) {
+ changeMemberState_forTest(MemberState::RS_PRIMARY);
+ } else {
+ if (_iAmPrimary()) {
+ changeMemberState_forTest(MemberState::RS_SECONDARY);
+ }
+ if (primaryIndex != -1) {
+ ReplSetHeartbeatResponse hbResponse;
+ hbResponse.setState(MemberState::RS_PRIMARY);
+ hbResponse.setElectionTime(Timestamp());
+ hbResponse.setOpTime(_hbdata[primaryIndex].getOpTime());
+ hbResponse.setSyncingTo(HostAndPort());
+ hbResponse.setHbMsg("");
+ _hbdata[primaryIndex].setUpValues(_hbdata[primaryIndex].getLastHeartbeat(),
+ _rsConfig.getMemberAt(primaryIndex).getHostAndPort(),
+ hbResponse);
+ }
+ _currentPrimaryIndex = primaryIndex;
+ }
+}
+
+const MemberConfig* TopologyCoordinatorImpl::_currentPrimaryMember() const {
+ if (_currentPrimaryIndex == -1)
+ return NULL;
+
+ return &(_rsConfig.getMemberAt(_currentPrimaryIndex));
+}
+
+void TopologyCoordinatorImpl::prepareStatusResponse(const ReplicationExecutor::CallbackArgs& data,
+ Date_t now,
+ unsigned selfUptime,
+ const OpTime& lastOpApplied,
+ BSONObjBuilder* response,
+ Status* result) {
+ if (data.status == ErrorCodes::CallbackCanceled) {
+ *result = Status(ErrorCodes::ShutdownInProgress, "replication system is shutting down");
+ return;
+ }
+
+ // output for each member
+ vector<BSONObj> membersOut;
+ const MemberState myState = getMemberState();
+
+ if (_selfIndex == -1) {
+ // We're REMOVED or have an invalid config
+ response->append("state", static_cast<int>(myState.s));
+ response->append("stateStr", myState.toString());
+ response->append("uptime", selfUptime);
+
+ BSONObjBuilder opTime(response->subobjStart("optime"));
+ opTime.append("ts", lastOpApplied.getTimestamp());
+ opTime.append("term", lastOpApplied.getTerm());
+ opTime.done();
+
+ response->appendDate("optimeDate",
+ Date_t::fromDurationSinceEpoch(Seconds(lastOpApplied.getSecs())));
+ if (_maintenanceModeCalls) {
+ response->append("maintenanceMode", _maintenanceModeCalls);
+ }
+ std::string s = _getHbmsg(now);
+ if (!s.empty())
+ response->append("infoMessage", s);
+ *result = Status(ErrorCodes::InvalidReplicaSetConfig,
+ "Our replica set config is invalid or we are not a member of it");
+ return;
+ }
+
+ for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin(); it != _hbdata.end();
+ ++it) {
+ const int itIndex = indexOfIterator(_hbdata, it);
+ if (itIndex == _selfIndex) {
+ // add self
+ BSONObjBuilder bb;
+ bb.append("_id", _selfConfig().getId());
+ bb.append("name", _selfConfig().getHostAndPort().toString());
+ bb.append("health", 1.0);
+ bb.append("state", static_cast<int>(myState.s));
+ bb.append("stateStr", myState.toString());
+ bb.append("uptime", selfUptime);
+ if (!_selfConfig().isArbiter()) {
+ BSONObjBuilder opTime(bb.subobjStart("optime"));
+ opTime.append("ts", lastOpApplied.getTimestamp());
+ opTime.append("term", lastOpApplied.getTerm());
+ opTime.done();
+
+ bb.appendDate("optimeDate",
+ Date_t::fromDurationSinceEpoch(Seconds(lastOpApplied.getSecs())));
+ }
+
+ if (!_syncSource.empty() && !_iAmPrimary()) {
+ bb.append("syncingTo", _syncSource.toString());
+ }
- if (_selfIndex == -1) {
- // We're REMOVED or have an invalid config
- response->append("state", static_cast<int>(myState.s));
- response->append("stateStr", myState.toString());
- response->append("uptime", selfUptime);
-
- BSONObjBuilder opTime(response->subobjStart("optime"));
- opTime.append("ts", lastOpApplied.getTimestamp());
- opTime.append("term", lastOpApplied.getTerm());
- opTime.done();
-
- response->appendDate("optimeDate",
- Date_t::fromDurationSinceEpoch(Seconds(lastOpApplied.getSecs())));
if (_maintenanceModeCalls) {
- response->append("maintenanceMode", _maintenanceModeCalls);
+ bb.append("maintenanceMode", _maintenanceModeCalls);
}
- std::string s = _getHbmsg(now);
- if( !s.empty() )
- response->append("infoMessage", s);
- *result = Status(ErrorCodes::InvalidReplicaSetConfig,
- "Our replica set config is invalid or we are not a member of it");
- return;
- }
-
- for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin();
- it != _hbdata.end();
- ++it) {
- const int itIndex = indexOfIterator(_hbdata, it);
- if (itIndex == _selfIndex) {
- // add self
- BSONObjBuilder bb;
- bb.append("_id", _selfConfig().getId());
- bb.append("name", _selfConfig().getHostAndPort().toString());
- bb.append("health", 1.0);
- bb.append("state", static_cast<int>(myState.s));
- bb.append("stateStr", myState.toString());
- bb.append("uptime", selfUptime);
- if (!_selfConfig().isArbiter()) {
- BSONObjBuilder opTime(bb.subobjStart("optime"));
- opTime.append("ts", lastOpApplied.getTimestamp());
- opTime.append("term", lastOpApplied.getTerm());
- opTime.done();
-
- bb.appendDate("optimeDate",
- Date_t::fromDurationSinceEpoch(Seconds(lastOpApplied.getSecs())));
- }
-
- if (!_syncSource.empty() && !_iAmPrimary()) {
- bb.append("syncingTo", _syncSource.toString());
- }
-
- if (_maintenanceModeCalls) {
- bb.append("maintenanceMode", _maintenanceModeCalls);
- }
-
- std::string s = _getHbmsg(now);
- if( !s.empty() )
- bb.append("infoMessage", s);
- if (myState.primary()) {
- bb.append("electionTime", _electionTime);
- bb.appendDate("electionDate",
- Date_t::fromDurationSinceEpoch(Seconds(_electionTime.getSecs())));
- }
- bb.appendIntOrLL("configVersion", _rsConfig.getConfigVersion());
- bb.append("self", true);
- membersOut.push_back(bb.obj());
+ std::string s = _getHbmsg(now);
+ if (!s.empty())
+ bb.append("infoMessage", s);
+
+ if (myState.primary()) {
+ bb.append("electionTime", _electionTime);
+ bb.appendDate("electionDate",
+ Date_t::fromDurationSinceEpoch(Seconds(_electionTime.getSecs())));
+ }
+ bb.appendIntOrLL("configVersion", _rsConfig.getConfigVersion());
+ bb.append("self", true);
+ membersOut.push_back(bb.obj());
+ } else {
+ // add non-self member
+ const MemberConfig& itConfig = _rsConfig.getMemberAt(itIndex);
+ BSONObjBuilder bb;
+ bb.append("_id", itConfig.getId());
+ bb.append("name", itConfig.getHostAndPort().toString());
+ double h = it->getHealth();
+ bb.append("health", h);
+ const MemberState state = it->getState();
+ bb.append("state", static_cast<int>(state.s));
+ if (h == 0) {
+ // if we can't connect the state info is from the past
+ // and could be confusing to show
+ bb.append("stateStr", "(not reachable/healthy)");
+ } else {
+ bb.append("stateStr", it->getState().toString());
+ }
+
+ const unsigned int uptime = static_cast<unsigned int>((
+ it->getUpSince() != Date_t() ? durationCount<Seconds>(now - it->getUpSince()) : 0));
+ bb.append("uptime", uptime);
+ if (!itConfig.isArbiter()) {
+ BSONObjBuilder opTime(bb.subobjStart("optime"));
+ opTime.append("ts", it->getOpTime().getTimestamp());
+ opTime.append("term", it->getOpTime().getTerm());
+ opTime.done();
+
+ bb.appendDate("optimeDate",
+ Date_t::fromDurationSinceEpoch(Seconds(it->getOpTime().getSecs())));
+ }
+ bb.appendDate("lastHeartbeat", it->getLastHeartbeat());
+ bb.appendDate("lastHeartbeatRecv", it->getLastHeartbeatRecv());
+ const int ping = _getPing(itConfig.getHostAndPort());
+ if (ping != -1) {
+ bb.append("pingMs", ping);
+ std::string s = it->getLastHeartbeatMsg();
+ if (!s.empty())
+ bb.append("lastHeartbeatMessage", s);
+ }
+ if (it->hasAuthIssue()) {
+ bb.append("authenticated", false);
+ }
+ const HostAndPort& syncSource = it->getSyncSource();
+ if (!syncSource.empty() && !state.primary()) {
+ bb.append("syncingTo", syncSource.toString());
}
- else {
- // add non-self member
- const MemberConfig& itConfig = _rsConfig.getMemberAt(itIndex);
- BSONObjBuilder bb;
- bb.append("_id", itConfig.getId());
- bb.append("name", itConfig.getHostAndPort().toString());
- double h = it->getHealth();
- bb.append("health", h);
- const MemberState state = it->getState();
- bb.append("state", static_cast<int>(state.s));
- if( h == 0 ) {
- // if we can't connect the state info is from the past
- // and could be confusing to show
- bb.append("stateStr", "(not reachable/healthy)");
- }
- else {
- bb.append("stateStr", it->getState().toString());
- }
-
- const unsigned int uptime = static_cast<unsigned int>(
- (it->getUpSince() != Date_t()?
- durationCount<Seconds>(now - it->getUpSince()) :
- 0));
- bb.append("uptime", uptime);
- if (!itConfig.isArbiter()) {
- BSONObjBuilder opTime(bb.subobjStart("optime"));
- opTime.append("ts", it->getOpTime().getTimestamp());
- opTime.append("term", it->getOpTime().getTerm());
- opTime.done();
-
- bb.appendDate("optimeDate",
- Date_t::fromDurationSinceEpoch(Seconds(it->getOpTime().getSecs())));
- }
- bb.appendDate("lastHeartbeat", it->getLastHeartbeat());
- bb.appendDate("lastHeartbeatRecv", it->getLastHeartbeatRecv());
- const int ping = _getPing(itConfig.getHostAndPort());
- if (ping != -1) {
- bb.append("pingMs", ping);
- std::string s = it->getLastHeartbeatMsg();
- if( !s.empty() )
- bb.append("lastHeartbeatMessage", s);
- }
- if (it->hasAuthIssue()) {
- bb.append("authenticated", false);
- }
- const HostAndPort& syncSource = it->getSyncSource();
- if (!syncSource.empty() && !state.primary()) {
- bb.append("syncingTo", syncSource.toString());
- }
- if (state == MemberState::RS_PRIMARY) {
- bb.append("electionTime", it->getElectionTime());
- bb.appendDate("electionDate",
- Date_t::fromDurationSinceEpoch(
- Seconds(it->getElectionTime().getSecs())));
- }
- bb.appendIntOrLL("configVersion", it->getConfigVersion());
- membersOut.push_back(bb.obj());
+ if (state == MemberState::RS_PRIMARY) {
+ bb.append("electionTime", it->getElectionTime());
+ bb.appendDate(
+ "electionDate",
+ Date_t::fromDurationSinceEpoch(Seconds(it->getElectionTime().getSecs())));
}
+ bb.appendIntOrLL("configVersion", it->getConfigVersion());
+ membersOut.push_back(bb.obj());
}
+ }
- // sort members bson
- sort(membersOut.begin(), membersOut.end());
-
- response->append("set",
- _rsConfig.isInitialized() ? _rsConfig.getReplSetName() : "");
- response->append("date", now);
- response->append("myState", myState.s);
+ // sort members bson
+ sort(membersOut.begin(), membersOut.end());
- // Add sync source info
- if (!_syncSource.empty() && !myState.primary() && !myState.removed()) {
- response->append("syncingTo", _syncSource.toString());
- }
+ response->append("set", _rsConfig.isInitialized() ? _rsConfig.getReplSetName() : "");
+ response->append("date", now);
+ response->append("myState", myState.s);
- response->append("members", membersOut);
- *result = Status::OK();
+ // Add sync source info
+ if (!_syncSource.empty() && !myState.primary() && !myState.removed()) {
+ response->append("syncingTo", _syncSource.toString());
}
- void TopologyCoordinatorImpl::fillIsMasterForReplSet(IsMasterResponse* response) {
+ response->append("members", membersOut);
+ *result = Status::OK();
+}
- const MemberState myState = getMemberState();
- if (!_rsConfig.isInitialized() || myState.removed()) {
- response->markAsNoConfig();
- return;
- }
-
- response->setReplSetName(_rsConfig.getReplSetName());
- response->setReplSetVersion(_rsConfig.getConfigVersion());
- response->setIsMaster(myState.primary());
- response->setIsSecondary(myState.secondary());
+void TopologyCoordinatorImpl::fillIsMasterForReplSet(IsMasterResponse* response) {
+ const MemberState myState = getMemberState();
+ if (!_rsConfig.isInitialized() || myState.removed()) {
+ response->markAsNoConfig();
+ return;
+ }
- {
- for (ReplicaSetConfig::MemberIterator it = _rsConfig.membersBegin();
- it != _rsConfig.membersEnd(); ++it) {
- if (it->isHidden() || it->getSlaveDelay() > Seconds{0}) {
- continue;
- }
+ response->setReplSetName(_rsConfig.getReplSetName());
+ response->setReplSetVersion(_rsConfig.getConfigVersion());
+ response->setIsMaster(myState.primary());
+ response->setIsSecondary(myState.secondary());
- if (it->isElectable()) {
- response->addHost(it->getHostAndPort());
- }
- else if (it->isArbiter()) {
- response->addArbiter(it->getHostAndPort());
- }
- else {
- response->addPassive(it->getHostAndPort());
- }
+ {
+ for (ReplicaSetConfig::MemberIterator it = _rsConfig.membersBegin();
+ it != _rsConfig.membersEnd();
+ ++it) {
+ if (it->isHidden() || it->getSlaveDelay() > Seconds{0}) {
+ continue;
}
- }
-
- const MemberConfig* curPrimary = _currentPrimaryMember();
- if (curPrimary) {
- response->setPrimary(curPrimary->getHostAndPort());
- }
- const MemberConfig& selfConfig = _rsConfig.getMemberAt(_selfIndex);
- if (selfConfig.isArbiter()) {
- response->setIsArbiterOnly(true);
- }
- else if (selfConfig.getPriority() == 0) {
- response->setIsPassive(true);
- }
- if (selfConfig.getSlaveDelay().count()) {
- response->setSlaveDelay(selfConfig.getSlaveDelay());
- }
- if (selfConfig.isHidden()) {
- response->setIsHidden(true);
- }
- if (!selfConfig.shouldBuildIndexes()) {
- response->setShouldBuildIndexes(false);
- }
- const ReplicaSetTagConfig tagConfig = _rsConfig.getTagConfig();
- if (selfConfig.hasTags(tagConfig)) {
- for (MemberConfig::TagIterator tag = selfConfig.tagsBegin();
- tag != selfConfig.tagsEnd(); ++tag) {
- std::string tagKey = tagConfig.getTagKey(*tag);
- if (tagKey[0] == '$') {
- // Filter out internal tags
- continue;
- }
- response->addTag(tagKey, tagConfig.getTagValue(*tag));
+ if (it->isElectable()) {
+ response->addHost(it->getHostAndPort());
+ } else if (it->isArbiter()) {
+ response->addArbiter(it->getHostAndPort());
+ } else {
+ response->addPassive(it->getHostAndPort());
}
}
- response->setMe(selfConfig.getHostAndPort());
- if (_iAmPrimary()) {
- response->setElectionId(_electionId);
- }
}
- void TopologyCoordinatorImpl::prepareFreezeResponse(
- Date_t now, int secs, BSONObjBuilder* response) {
-
- if (secs == 0) {
- _stepDownUntil = now;
- log() << "'unfreezing'";
- response->append("info", "unfreezing");
-
- if (_followerMode == MemberState::RS_SECONDARY &&
- _rsConfig.getNumMembers() == 1 &&
- _selfIndex == 0 &&
- _rsConfig.getMemberAt(_selfIndex).isElectable()) {
- // If we are a one-node replica set, we're the one member,
- // we're electable, and we are currently in followerMode SECONDARY,
- // we must transition to candidate now that our stepdown period
- // is no longer active, in leiu of heartbeats.
- _role = Role::candidate;
- }
- }
- else {
- if ( secs == 1 )
- response->append("warning", "you really want to freeze for only 1 second?");
+ const MemberConfig* curPrimary = _currentPrimaryMember();
+ if (curPrimary) {
+ response->setPrimary(curPrimary->getHostAndPort());
+ }
- if (!_iAmPrimary()) {
- _stepDownUntil = std::max(_stepDownUntil, now + Seconds(secs));
- log() << "'freezing' for " << secs << " seconds";
- }
- else {
- log() << "received freeze command but we are primary";
+ const MemberConfig& selfConfig = _rsConfig.getMemberAt(_selfIndex);
+ if (selfConfig.isArbiter()) {
+ response->setIsArbiterOnly(true);
+ } else if (selfConfig.getPriority() == 0) {
+ response->setIsPassive(true);
+ }
+ if (selfConfig.getSlaveDelay().count()) {
+ response->setSlaveDelay(selfConfig.getSlaveDelay());
+ }
+ if (selfConfig.isHidden()) {
+ response->setIsHidden(true);
+ }
+ if (!selfConfig.shouldBuildIndexes()) {
+ response->setShouldBuildIndexes(false);
+ }
+ const ReplicaSetTagConfig tagConfig = _rsConfig.getTagConfig();
+ if (selfConfig.hasTags(tagConfig)) {
+ for (MemberConfig::TagIterator tag = selfConfig.tagsBegin(); tag != selfConfig.tagsEnd();
+ ++tag) {
+ std::string tagKey = tagConfig.getTagKey(*tag);
+ if (tagKey[0] == '$') {
+ // Filter out internal tags
+ continue;
}
+ response->addTag(tagKey, tagConfig.getTagValue(*tag));
}
}
+ response->setMe(selfConfig.getHostAndPort());
+ if (_iAmPrimary()) {
+ response->setElectionId(_electionId);
+ }
+}
- bool TopologyCoordinatorImpl::becomeCandidateIfStepdownPeriodOverAndSingleNodeSet(Date_t now) {
- if (_stepDownUntil > now) {
- return false;
- }
+void TopologyCoordinatorImpl::prepareFreezeResponse(Date_t now,
+ int secs,
+ BSONObjBuilder* response) {
+ if (secs == 0) {
+ _stepDownUntil = now;
+ log() << "'unfreezing'";
+ response->append("info", "unfreezing");
- if (_followerMode == MemberState::RS_SECONDARY &&
- _rsConfig.getNumMembers() == 1 &&
- _selfIndex == 0 &&
- _rsConfig.getMemberAt(_selfIndex).isElectable()) {
- // If the new config describes a one-node replica set, we're the one member,
+ if (_followerMode == MemberState::RS_SECONDARY && _rsConfig.getNumMembers() == 1 &&
+ _selfIndex == 0 && _rsConfig.getMemberAt(_selfIndex).isElectable()) {
+ // If we are a one-node replica set, we're the one member,
// we're electable, and we are currently in followerMode SECONDARY,
- // we must transition to candidate, in leiu of heartbeats.
+ // we must transition to candidate now that our stepdown period
+ // is no longer active, in leiu of heartbeats.
_role = Role::candidate;
- return true;
}
- return false;
- }
+ } else {
+ if (secs == 1)
+ response->append("warning", "you really want to freeze for only 1 second?");
- void TopologyCoordinatorImpl::setElectionSleepUntil(Date_t newTime) {
- if (_electionSleepUntil < newTime) {
- _electionSleepUntil = newTime;
+ if (!_iAmPrimary()) {
+ _stepDownUntil = std::max(_stepDownUntil, now + Seconds(secs));
+ log() << "'freezing' for " << secs << " seconds";
+ } else {
+ log() << "received freeze command but we are primary";
}
}
+}
- Timestamp TopologyCoordinatorImpl::getElectionTime() const {
- return _electionTime;
- }
-
- OID TopologyCoordinatorImpl::getElectionId() const {
- return _electionId;
- }
-
- int TopologyCoordinatorImpl::getCurrentPrimaryIndex() const {
- return _currentPrimaryIndex;
+bool TopologyCoordinatorImpl::becomeCandidateIfStepdownPeriodOverAndSingleNodeSet(Date_t now) {
+ if (_stepDownUntil > now) {
+ return false;
}
- Date_t TopologyCoordinatorImpl::getStepDownTime() const {
- return _stepDownUntil;
+ if (_followerMode == MemberState::RS_SECONDARY && _rsConfig.getNumMembers() == 1 &&
+ _selfIndex == 0 && _rsConfig.getMemberAt(_selfIndex).isElectable()) {
+ // If the new config describes a one-node replica set, we're the one member,
+ // we're electable, and we are currently in followerMode SECONDARY,
+ // we must transition to candidate, in leiu of heartbeats.
+ _role = Role::candidate;
+ return true;
}
-
- void TopologyCoordinatorImpl::_updateHeartbeatDataForReconfig(const ReplicaSetConfig& newConfig,
- int selfIndex,
- Date_t now) {
- std::vector<MemberHeartbeatData> oldHeartbeats;
- _hbdata.swap(oldHeartbeats);
-
- int index = 0;
- for (ReplicaSetConfig::MemberIterator it = newConfig.membersBegin();
- it != newConfig.membersEnd();
- ++it, ++index) {
- const MemberConfig& newMemberConfig = *it;
- // TODO: C++11: use emplace_back()
- if (index == selfIndex) {
- // Insert placeholder for ourself, though we will never consult it.
- _hbdata.push_back(MemberHeartbeatData());
- }
- else {
- MemberHeartbeatData newHeartbeatData;
- for (int oldIndex = 0; oldIndex < _rsConfig.getNumMembers(); ++oldIndex) {
- const MemberConfig& oldMemberConfig = _rsConfig.getMemberAt(oldIndex);
- if (oldMemberConfig.getId() == newMemberConfig.getId() &&
- oldMemberConfig.getHostAndPort() == newMemberConfig.getHostAndPort()) {
- // This member existed in the old config with the same member ID and
- // HostAndPort, so copy its heartbeat data over.
- newHeartbeatData = oldHeartbeats[oldIndex];
- break;
- }
+ return false;
+}
+
+void TopologyCoordinatorImpl::setElectionSleepUntil(Date_t newTime) {
+ if (_electionSleepUntil < newTime) {
+ _electionSleepUntil = newTime;
+ }
+}
+
+Timestamp TopologyCoordinatorImpl::getElectionTime() const {
+ return _electionTime;
+}
+
+OID TopologyCoordinatorImpl::getElectionId() const {
+ return _electionId;
+}
+
+int TopologyCoordinatorImpl::getCurrentPrimaryIndex() const {
+ return _currentPrimaryIndex;
+}
+
+Date_t TopologyCoordinatorImpl::getStepDownTime() const {
+ return _stepDownUntil;
+}
+
+void TopologyCoordinatorImpl::_updateHeartbeatDataForReconfig(const ReplicaSetConfig& newConfig,
+ int selfIndex,
+ Date_t now) {
+ std::vector<MemberHeartbeatData> oldHeartbeats;
+ _hbdata.swap(oldHeartbeats);
+
+ int index = 0;
+ for (ReplicaSetConfig::MemberIterator it = newConfig.membersBegin();
+ it != newConfig.membersEnd();
+ ++it, ++index) {
+ const MemberConfig& newMemberConfig = *it;
+ // TODO: C++11: use emplace_back()
+ if (index == selfIndex) {
+ // Insert placeholder for ourself, though we will never consult it.
+ _hbdata.push_back(MemberHeartbeatData());
+ } else {
+ MemberHeartbeatData newHeartbeatData;
+ for (int oldIndex = 0; oldIndex < _rsConfig.getNumMembers(); ++oldIndex) {
+ const MemberConfig& oldMemberConfig = _rsConfig.getMemberAt(oldIndex);
+ if (oldMemberConfig.getId() == newMemberConfig.getId() &&
+ oldMemberConfig.getHostAndPort() == newMemberConfig.getHostAndPort()) {
+ // This member existed in the old config with the same member ID and
+ // HostAndPort, so copy its heartbeat data over.
+ newHeartbeatData = oldHeartbeats[oldIndex];
+ break;
}
- _hbdata.push_back(newHeartbeatData);
}
+ _hbdata.push_back(newHeartbeatData);
}
}
+}
- // This function installs a new config object and recreates MemberHeartbeatData objects
- // that reflect the new config.
- void TopologyCoordinatorImpl::updateConfig(const ReplicaSetConfig& newConfig,
- int selfIndex,
- Date_t now,
- const OpTime& lastOpApplied) {
- invariant(_role != Role::candidate);
- invariant(selfIndex < newConfig.getNumMembers());
+// This function installs a new config object and recreates MemberHeartbeatData objects
+// that reflect the new config.
+void TopologyCoordinatorImpl::updateConfig(const ReplicaSetConfig& newConfig,
+ int selfIndex,
+ Date_t now,
+ const OpTime& lastOpApplied) {
+ invariant(_role != Role::candidate);
+ invariant(selfIndex < newConfig.getNumMembers());
- _updateHeartbeatDataForReconfig(newConfig, selfIndex, now);
- _rsConfig = newConfig;
- _selfIndex = selfIndex;
- _forceSyncSourceIndex = -1;
+ _updateHeartbeatDataForReconfig(newConfig, selfIndex, now);
+ _rsConfig = newConfig;
+ _selfIndex = selfIndex;
+ _forceSyncSourceIndex = -1;
- if (_role == Role::leader) {
- if (_selfIndex == -1) {
- log() << "Could not remain primary because no longer a member of the replica set";
- }
- else if (!_selfConfig().isElectable()) {
- log() <<" Could not remain primary because no longer electable";
- }
- else {
- // Don't stepdown if you don't have to.
- _currentPrimaryIndex = _selfIndex;
- return;
- }
- _role = Role::follower;
+ if (_role == Role::leader) {
+ if (_selfIndex == -1) {
+ log() << "Could not remain primary because no longer a member of the replica set";
+ } else if (!_selfConfig().isElectable()) {
+ log() << " Could not remain primary because no longer electable";
+ } else {
+ // Don't stepdown if you don't have to.
+ _currentPrimaryIndex = _selfIndex;
+ return;
}
+ _role = Role::follower;
+ }
- // By this point we know we are in Role::follower
- _currentPrimaryIndex = -1; // force secondaries to re-detect who the primary is
- _stepDownPending = false;
-
- if (_followerMode == MemberState::RS_SECONDARY &&
- _rsConfig.getNumMembers() == 1 &&
- _selfIndex == 0 &&
- _rsConfig.getMemberAt(_selfIndex).isElectable()) {
- // If the new config describes a one-node replica set, we're the one member,
- // we're electable, and we are currently in followerMode SECONDARY,
- // we must transition to candidate, in leiu of heartbeats.
- _role = Role::candidate;
- }
+ // By this point we know we are in Role::follower
+ _currentPrimaryIndex = -1; // force secondaries to re-detect who the primary is
+ _stepDownPending = false;
+
+ if (_followerMode == MemberState::RS_SECONDARY && _rsConfig.getNumMembers() == 1 &&
+ _selfIndex == 0 && _rsConfig.getMemberAt(_selfIndex).isElectable()) {
+ // If the new config describes a one-node replica set, we're the one member,
+ // we're electable, and we are currently in followerMode SECONDARY,
+ // we must transition to candidate, in leiu of heartbeats.
+ _role = Role::candidate;
}
- std::string TopologyCoordinatorImpl::_getHbmsg(Date_t now) const {
- // ignore messages over 2 minutes old
- if ((now - _hbmsgTime) > Seconds{120}) {
- return "";
- }
- return _hbmsg;
+}
+std::string TopologyCoordinatorImpl::_getHbmsg(Date_t now) const {
+ // ignore messages over 2 minutes old
+ if ((now - _hbmsgTime) > Seconds{120}) {
+ return "";
}
+ return _hbmsg;
+}
- void TopologyCoordinatorImpl::setMyHeartbeatMessage(const Date_t now,
- const std::string& message) {
- _hbmsgTime = now;
- _hbmsg = message;
- }
+void TopologyCoordinatorImpl::setMyHeartbeatMessage(const Date_t now, const std::string& message) {
+ _hbmsgTime = now;
+ _hbmsg = message;
+}
+
+const MemberConfig& TopologyCoordinatorImpl::_selfConfig() const {
+ return _rsConfig.getMemberAt(_selfIndex);
+}
- const MemberConfig& TopologyCoordinatorImpl::_selfConfig() const {
- return _rsConfig.getMemberAt(_selfIndex);
+TopologyCoordinatorImpl::UnelectableReasonMask TopologyCoordinatorImpl::_getUnelectableReason(
+ int index, const OpTime& lastOpApplied) const {
+ invariant(index != _selfIndex);
+ const MemberConfig& memberConfig = _rsConfig.getMemberAt(index);
+ const MemberHeartbeatData& hbData = _hbdata[index];
+ UnelectableReasonMask result = None;
+ if (memberConfig.isArbiter()) {
+ result |= ArbiterIAm;
+ }
+ if (memberConfig.getPriority() <= 0) {
+ result |= NoPriority;
+ }
+ if (hbData.getState() != MemberState::RS_SECONDARY) {
+ result |= NotSecondary;
+ }
+ if (!_isOpTimeCloseEnoughToLatestToElect(hbData.getOpTime(), lastOpApplied)) {
+ result |= NotCloseEnoughToLatestOptime;
+ }
+ if (hbData.up() && hbData.isUnelectable()) {
+ result |= RefusesToStand;
}
+ invariant(result || memberConfig.isElectable());
+ return result;
+}
- TopologyCoordinatorImpl::UnelectableReasonMask TopologyCoordinatorImpl::_getUnelectableReason(
- int index,
- const OpTime& lastOpApplied) const {
- invariant(index != _selfIndex);
- const MemberConfig& memberConfig = _rsConfig.getMemberAt(index);
- const MemberHeartbeatData& hbData = _hbdata[index];
- UnelectableReasonMask result = None;
- if (memberConfig.isArbiter()) {
- result |= ArbiterIAm;
- }
- if (memberConfig.getPriority() <= 0) {
- result |= NoPriority;
- }
- if (hbData.getState() != MemberState::RS_SECONDARY) {
- result |= NotSecondary;
- }
- if (!_isOpTimeCloseEnoughToLatestToElect(hbData.getOpTime(), lastOpApplied)) {
- result |= NotCloseEnoughToLatestOptime;
- }
- if (hbData.up() && hbData.isUnelectable()) {
- result |= RefusesToStand;
- }
- invariant(result || memberConfig.isElectable());
+TopologyCoordinatorImpl::UnelectableReasonMask TopologyCoordinatorImpl::_getMyUnelectableReason(
+ const Date_t now, const OpTime& lastApplied) const {
+ UnelectableReasonMask result = None;
+ if (lastApplied.isNull()) {
+ result |= NoData;
+ }
+ if (!_aMajoritySeemsToBeUp()) {
+ result |= CannotSeeMajority;
+ }
+ if (_selfIndex == -1) {
+ result |= NotInitialized;
return result;
}
+ if (_selfConfig().isArbiter()) {
+ result |= ArbiterIAm;
+ }
+ if (_selfConfig().getPriority() <= 0) {
+ result |= NoPriority;
+ }
+ if (_stepDownUntil > now) {
+ result |= StepDownPeriodActive;
+ }
+ if (_voteLease.whoId != -1 && _voteLease.whoId != _rsConfig.getMemberAt(_selfIndex).getId() &&
+ _voteLease.when + VoteLease::leaseTime >= now) {
+ result |= VotedTooRecently;
+ }
- TopologyCoordinatorImpl::UnelectableReasonMask TopologyCoordinatorImpl::_getMyUnelectableReason(
- const Date_t now,
- const OpTime& lastApplied) const {
-
- UnelectableReasonMask result = None;
- if (lastApplied.isNull()) {
- result |= NoData;
- }
- if (!_aMajoritySeemsToBeUp()) {
- result |= CannotSeeMajority;
- }
- if (_selfIndex == -1) {
- result |= NotInitialized;
- return result;
- }
- if (_selfConfig().isArbiter()) {
- result |= ArbiterIAm;
- }
- if (_selfConfig().getPriority() <= 0) {
- result |= NoPriority;
- }
- if (_stepDownUntil > now) {
- result |= StepDownPeriodActive;
- }
- if (_voteLease.whoId != -1 &&
- _voteLease.whoId !=_rsConfig.getMemberAt(_selfIndex).getId() &&
- _voteLease.when + VoteLease::leaseTime >= now) {
- result |= VotedTooRecently;
- }
-
- // Cannot be electable unless secondary or already primary
- if (!getMemberState().secondary() && !_iAmPrimary()) {
- result |= NotSecondary;
- }
- if (!_isOpTimeCloseEnoughToLatestToElect(lastApplied, lastApplied)) {
- result |= NotCloseEnoughToLatestOptime;
- }
- return result;
+ // Cannot be electable unless secondary or already primary
+ if (!getMemberState().secondary() && !_iAmPrimary()) {
+ result |= NotSecondary;
+ }
+ if (!_isOpTimeCloseEnoughToLatestToElect(lastApplied, lastApplied)) {
+ result |= NotCloseEnoughToLatestOptime;
}
+ return result;
+}
- std::string TopologyCoordinatorImpl::_getUnelectableReasonString(
- const UnelectableReasonMask ur) const {
- invariant(ur);
- str::stream ss;
- bool hasWrittenToStream = false;
- if (ur & NoData) {
- ss << "node has no applied oplog entries";
- hasWrittenToStream = true;
- }
- if (ur & VotedTooRecently) {
- if (hasWrittenToStream) {
- ss << "; ";
- }
- hasWrittenToStream = true;
- ss << "I recently voted for " << _voteLease.whoHostAndPort.toString();
- }
- if (ur & CannotSeeMajority) {
- if (hasWrittenToStream) {
- ss << "; ";
- }
- hasWrittenToStream = true;
- ss << "I cannot see a majority";
- }
- if (ur & ArbiterIAm) {
- if (hasWrittenToStream) {
- ss << "; ";
- }
- hasWrittenToStream = true;
- ss << "member is an arbiter";
+std::string TopologyCoordinatorImpl::_getUnelectableReasonString(
+ const UnelectableReasonMask ur) const {
+ invariant(ur);
+ str::stream ss;
+ bool hasWrittenToStream = false;
+ if (ur & NoData) {
+ ss << "node has no applied oplog entries";
+ hasWrittenToStream = true;
+ }
+ if (ur & VotedTooRecently) {
+ if (hasWrittenToStream) {
+ ss << "; ";
}
- if (ur & NoPriority) {
- if (hasWrittenToStream) {
- ss << "; ";
- }
- hasWrittenToStream = true;
- ss << "member has zero priority";
+ hasWrittenToStream = true;
+ ss << "I recently voted for " << _voteLease.whoHostAndPort.toString();
+ }
+ if (ur & CannotSeeMajority) {
+ if (hasWrittenToStream) {
+ ss << "; ";
}
- if (ur & StepDownPeriodActive) {
- if (hasWrittenToStream) {
- ss << "; ";
- }
- hasWrittenToStream = true;
- ss << "I am still waiting for stepdown period to end at " <<
- dateToISOStringLocal(_stepDownUntil);
+ hasWrittenToStream = true;
+ ss << "I cannot see a majority";
+ }
+ if (ur & ArbiterIAm) {
+ if (hasWrittenToStream) {
+ ss << "; ";
}
- if (ur & NotSecondary) {
- if (hasWrittenToStream) {
- ss << "; ";
- }
- hasWrittenToStream = true;
- ss << "member is not currently a secondary";
+ hasWrittenToStream = true;
+ ss << "member is an arbiter";
+ }
+ if (ur & NoPriority) {
+ if (hasWrittenToStream) {
+ ss << "; ";
}
- if (ur & NotCloseEnoughToLatestOptime) {
- if (hasWrittenToStream) {
- ss << "; ";
- }
- hasWrittenToStream = true;
- ss << "member is more than 10 seconds behind the most up-to-date member";
+ hasWrittenToStream = true;
+ ss << "member has zero priority";
+ }
+ if (ur & StepDownPeriodActive) {
+ if (hasWrittenToStream) {
+ ss << "; ";
}
- if (ur & NotInitialized) {
- if (hasWrittenToStream) {
- ss << "; ";
- }
- hasWrittenToStream = true;
- ss << "node is not a member of a valid replica set configuration";
+ hasWrittenToStream = true;
+ ss << "I am still waiting for stepdown period to end at "
+ << dateToISOStringLocal(_stepDownUntil);
+ }
+ if (ur & NotSecondary) {
+ if (hasWrittenToStream) {
+ ss << "; ";
}
- if (ur & RefusesToStand) {
- if (hasWrittenToStream) {
- ss << "; ";
- }
- hasWrittenToStream = true;
- ss << "most recent heartbeat indicates node will not stand for election";
+ hasWrittenToStream = true;
+ ss << "member is not currently a secondary";
+ }
+ if (ur & NotCloseEnoughToLatestOptime) {
+ if (hasWrittenToStream) {
+ ss << "; ";
}
- if (!hasWrittenToStream) {
- severe() << "Invalid UnelectableReasonMask value 0x" << integerToHex(ur);
- fassertFailed(26011);
+ hasWrittenToStream = true;
+ ss << "member is more than 10 seconds behind the most up-to-date member";
+ }
+ if (ur & NotInitialized) {
+ if (hasWrittenToStream) {
+ ss << "; ";
}
- ss << " (mask 0x" << integerToHex(ur) << ")";
- return ss;
+ hasWrittenToStream = true;
+ ss << "node is not a member of a valid replica set configuration";
}
-
- int TopologyCoordinatorImpl::_getPing(const HostAndPort& host) {
- return _pings[host].getMillis();
+ if (ur & RefusesToStand) {
+ if (hasWrittenToStream) {
+ ss << "; ";
+ }
+ hasWrittenToStream = true;
+ ss << "most recent heartbeat indicates node will not stand for election";
}
-
- void TopologyCoordinatorImpl::_setElectionTime(const Timestamp& newElectionTime) {
- _electionTime = newElectionTime;
+ if (!hasWrittenToStream) {
+ severe() << "Invalid UnelectableReasonMask value 0x" << integerToHex(ur);
+ fassertFailed(26011);
}
+ ss << " (mask 0x" << integerToHex(ur) << ")";
+ return ss;
+}
- int TopologyCoordinatorImpl::_getTotalPings() {
- PingMap::iterator it = _pings.begin();
- PingMap::iterator end = _pings.end();
- int totalPings = 0;
- while (it != end) {
- totalPings += it->second.getCount();
- it++;
- }
- return totalPings;
- }
+int TopologyCoordinatorImpl::_getPing(const HostAndPort& host) {
+ return _pings[host].getMillis();
+}
- std::vector<HostAndPort> TopologyCoordinatorImpl::getMaybeUpHostAndPorts() const {
- std::vector<HostAndPort> upHosts;
- for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin();
- it != _hbdata.end();
- ++it) {
- const int itIndex = indexOfIterator(_hbdata, it);
- if (itIndex == _selfIndex) {
- continue; // skip ourselves
- }
- if (!it->maybeUp()) {
- continue; // skip DOWN nodes
- }
+void TopologyCoordinatorImpl::_setElectionTime(const Timestamp& newElectionTime) {
+ _electionTime = newElectionTime;
+}
- upHosts.push_back(_rsConfig.getMemberAt(itIndex).getHostAndPort());
- }
- return upHosts;
+int TopologyCoordinatorImpl::_getTotalPings() {
+ PingMap::iterator it = _pings.begin();
+ PingMap::iterator end = _pings.end();
+ int totalPings = 0;
+ while (it != end) {
+ totalPings += it->second.getCount();
+ it++;
}
+ return totalPings;
+}
- bool TopologyCoordinatorImpl::voteForMyself(Date_t now) {
- if (_role != Role::candidate) {
- return false;
+std::vector<HostAndPort> TopologyCoordinatorImpl::getMaybeUpHostAndPorts() const {
+ std::vector<HostAndPort> upHosts;
+ for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin(); it != _hbdata.end();
+ ++it) {
+ const int itIndex = indexOfIterator(_hbdata, it);
+ if (itIndex == _selfIndex) {
+ continue; // skip ourselves
}
- int selfId = _selfConfig().getId();
- if ((_voteLease.when + VoteLease::leaseTime >= now)
- && (_voteLease.whoId != selfId)) {
- log() << "not voting yea for " << selfId <<
- " voted for " << _voteLease.whoHostAndPort.toString() << ' ' <<
- durationCount<Seconds>(now - _voteLease.when) << " secs ago";
- return false;
+ if (!it->maybeUp()) {
+ continue; // skip DOWN nodes
}
- _voteLease.when = now;
- _voteLease.whoId = selfId;
- _voteLease.whoHostAndPort = _selfConfig().getHostAndPort();
- return true;
+
+ upHosts.push_back(_rsConfig.getMemberAt(itIndex).getHostAndPort());
}
+ return upHosts;
+}
- MemberState TopologyCoordinatorImpl::getMemberState() const {
- if (_selfIndex == -1) {
- if (_rsConfig.isInitialized()) {
- return MemberState::RS_REMOVED;
- }
- return MemberState::RS_STARTUP;
- }
- if (_role == Role::leader) {
- invariant(_currentPrimaryIndex == _selfIndex);
- return MemberState::RS_PRIMARY;
- }
- const MemberConfig& myConfig = _selfConfig();
- if (myConfig.isArbiter()) {
- return MemberState::RS_ARBITER;
- }
- if (((_maintenanceModeCalls > 0) || (_hasOnlyAuthErrorUpHeartbeats(_hbdata, _selfIndex)))
- && (_followerMode == MemberState::RS_SECONDARY)) {
- return MemberState::RS_RECOVERING;
- }
- return _followerMode;
+bool TopologyCoordinatorImpl::voteForMyself(Date_t now) {
+ if (_role != Role::candidate) {
+ return false;
}
+ int selfId = _selfConfig().getId();
+ if ((_voteLease.when + VoteLease::leaseTime >= now) && (_voteLease.whoId != selfId)) {
+ log() << "not voting yea for " << selfId << " voted for "
+ << _voteLease.whoHostAndPort.toString() << ' '
+ << durationCount<Seconds>(now - _voteLease.when) << " secs ago";
+ return false;
+ }
+ _voteLease.when = now;
+ _voteLease.whoId = selfId;
+ _voteLease.whoHostAndPort = _selfConfig().getHostAndPort();
+ return true;
+}
- void TopologyCoordinatorImpl::processWinElection(
- OID electionId,
- Timestamp electionOpTime) {
- invariant(_role == Role::candidate);
- _electionTime = electionOpTime;
- _electionId = electionId;
- _role = Role::leader;
- _currentPrimaryIndex = _selfIndex;
- _syncSource = HostAndPort();
- _forceSyncSourceIndex = -1;
+MemberState TopologyCoordinatorImpl::getMemberState() const {
+ if (_selfIndex == -1) {
+ if (_rsConfig.isInitialized()) {
+ return MemberState::RS_REMOVED;
+ }
+ return MemberState::RS_STARTUP;
+ }
+ if (_role == Role::leader) {
+ invariant(_currentPrimaryIndex == _selfIndex);
+ return MemberState::RS_PRIMARY;
+ }
+ const MemberConfig& myConfig = _selfConfig();
+ if (myConfig.isArbiter()) {
+ return MemberState::RS_ARBITER;
}
+ if (((_maintenanceModeCalls > 0) || (_hasOnlyAuthErrorUpHeartbeats(_hbdata, _selfIndex))) &&
+ (_followerMode == MemberState::RS_SECONDARY)) {
+ return MemberState::RS_RECOVERING;
+ }
+ return _followerMode;
+}
- void TopologyCoordinatorImpl::processLoseElection() {
- invariant(_role == Role::candidate);
- const HostAndPort syncSourceAddress = getSyncSourceAddress();
- _electionTime = Timestamp(0, 0);
- _electionId = OID();
- _role = Role::follower;
+void TopologyCoordinatorImpl::processWinElection(OID electionId, Timestamp electionOpTime) {
+ invariant(_role == Role::candidate);
+ _electionTime = electionOpTime;
+ _electionId = electionId;
+ _role = Role::leader;
+ _currentPrimaryIndex = _selfIndex;
+ _syncSource = HostAndPort();
+ _forceSyncSourceIndex = -1;
+}
- // Clear voteLease time, if we voted for ourselves in this election.
- // This will allow us to vote for others.
- if (_voteLease.whoId == _selfConfig().getId()) {
- _voteLease.when = Date_t();
- }
+void TopologyCoordinatorImpl::processLoseElection() {
+ invariant(_role == Role::candidate);
+ const HostAndPort syncSourceAddress = getSyncSourceAddress();
+ _electionTime = Timestamp(0, 0);
+ _electionId = OID();
+ _role = Role::follower;
+
+ // Clear voteLease time, if we voted for ourselves in this election.
+ // This will allow us to vote for others.
+ if (_voteLease.whoId == _selfConfig().getId()) {
+ _voteLease.when = Date_t();
}
+}
- bool TopologyCoordinatorImpl::stepDown(Date_t until, bool force, const OpTime& lastOpApplied) {
- bool canStepDown = force;
- for (int i = 0; !canStepDown && i < _rsConfig.getNumMembers(); ++i) {
- if (i == _selfIndex) {
- continue;
- }
- UnelectableReasonMask reason = _getUnelectableReason(i, lastOpApplied);
- if (!reason && _hbdata[i].getOpTime() >= lastOpApplied) {
- canStepDown = true;
- }
+bool TopologyCoordinatorImpl::stepDown(Date_t until, bool force, const OpTime& lastOpApplied) {
+ bool canStepDown = force;
+ for (int i = 0; !canStepDown && i < _rsConfig.getNumMembers(); ++i) {
+ if (i == _selfIndex) {
+ continue;
}
-
- if (!canStepDown) {
- return false;
+ UnelectableReasonMask reason = _getUnelectableReason(i, lastOpApplied);
+ if (!reason && _hbdata[i].getOpTime() >= lastOpApplied) {
+ canStepDown = true;
}
- _stepDownUntil = until;
- _stepDownSelfAndReplaceWith(-1);
- return true;
}
- void TopologyCoordinatorImpl::setFollowerMode(MemberState::MS newMode) {
- invariant(_role == Role::follower);
- switch (newMode) {
+ if (!canStepDown) {
+ return false;
+ }
+ _stepDownUntil = until;
+ _stepDownSelfAndReplaceWith(-1);
+ return true;
+}
+
+void TopologyCoordinatorImpl::setFollowerMode(MemberState::MS newMode) {
+ invariant(_role == Role::follower);
+ switch (newMode) {
case MemberState::RS_RECOVERING:
case MemberState::RS_ROLLBACK:
case MemberState::RS_SECONDARY:
@@ -2104,223 +1992,209 @@ namespace {
break;
default:
invariant(false);
- }
-
- if (_followerMode != MemberState::RS_SECONDARY) {
- return;
- }
-
- // When a single node replica set transitions to SECONDARY, we must check if we should
- // be a candidate here. This is necessary because a single node replica set has no
- // heartbeats that would normally change the role to candidate.
-
- if (_rsConfig.getNumMembers() == 1 &&
- _selfIndex == 0 &&
- _rsConfig.getMemberAt(_selfIndex).isElectable()) {
- _role = Role::candidate;
- }
}
- bool TopologyCoordinatorImpl::stepDownIfPending() {
- if (!_stepDownPending) {
- return false;
- }
-
- int remotePrimaryIndex = -1;
- for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin();
- it != _hbdata.end(); ++it) {
- const int itIndex = indexOfIterator(_hbdata, it);
- if (itIndex == _selfIndex) {
- continue;
- }
-
- if (it->getState().primary() && it->up()) {
- if (remotePrimaryIndex != -1) {
- // two other nodes think they are primary (asynchronously polled)
- // -- wait for things to settle down.
- remotePrimaryIndex = -1;
- warning() << "two remote primaries (transiently)";
- break;
- }
- remotePrimaryIndex = itIndex;
- }
- }
- _stepDownSelfAndReplaceWith(remotePrimaryIndex);
- return true;
- }
-
- void TopologyCoordinatorImpl::_stepDownSelfAndReplaceWith(int newPrimary) {
- invariant(_role == Role::leader);
- invariant(_selfIndex != -1);
- invariant(_selfIndex != newPrimary);
- invariant(_selfIndex == _currentPrimaryIndex);
- _currentPrimaryIndex = newPrimary;
- _role = Role::follower;
- _stepDownPending = false;
+ if (_followerMode != MemberState::RS_SECONDARY) {
+ return;
}
- void TopologyCoordinatorImpl::adjustMaintenanceCountBy(int inc) {
- invariant(_role == Role::follower);
- _maintenanceModeCalls += inc;
- invariant(_maintenanceModeCalls >= 0);
- }
-
- int TopologyCoordinatorImpl::getMaintenanceCount() const {
- return _maintenanceModeCalls;
- }
+ // When a single node replica set transitions to SECONDARY, we must check if we should
+ // be a candidate here. This is necessary because a single node replica set has no
+ // heartbeats that would normally change the role to candidate.
- bool TopologyCoordinatorImpl::updateTerm(long long term) {
- if (term <= _term) {
- return false;
- }
- _term = term;
- return true;
+ if (_rsConfig.getNumMembers() == 1 && _selfIndex == 0 &&
+ _rsConfig.getMemberAt(_selfIndex).isElectable()) {
+ _role = Role::candidate;
}
+}
- long long TopologyCoordinatorImpl::getTerm() const {
- return _term;
+bool TopologyCoordinatorImpl::stepDownIfPending() {
+ if (!_stepDownPending) {
+ return false;
}
- bool TopologyCoordinatorImpl::shouldChangeSyncSource(const HostAndPort& currentSource,
- Date_t now) const {
- // Methodology:
- // If there exists a viable sync source member other than currentSource, whose oplog has
- // reached an optime greater than _maxSyncSourceLagSecs later than currentSource's, return
- // true.
-
- // If the user requested a sync source change, return true.
- if (_forceSyncSourceIndex != -1) {
- return true;
- }
-
- const int currentMemberIndex = _rsConfig.findMemberIndexByHostAndPort(currentSource);
- if (currentMemberIndex == -1) {
- return true;
- }
- invariant(currentMemberIndex != _selfIndex);
-
- OpTime currentOpTime = _hbdata[currentMemberIndex].getOpTime();
- if (currentOpTime.isNull()) {
- // Haven't received a heartbeat from the sync source yet, so can't tell if we should
- // change.
- return false;
+ int remotePrimaryIndex = -1;
+ for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin(); it != _hbdata.end();
+ ++it) {
+ const int itIndex = indexOfIterator(_hbdata, it);
+ if (itIndex == _selfIndex) {
+ continue;
}
- unsigned int currentSecs = currentOpTime.getSecs();
- unsigned int goalSecs = currentSecs + _maxSyncSourceLagSecs.count();
- for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin();
- it != _hbdata.end();
- ++it) {
- const int itIndex = indexOfIterator(_hbdata, it);
- const MemberConfig& candidateConfig = _rsConfig.getMemberAt(itIndex);
- if (it->up() &&
- (candidateConfig.shouldBuildIndexes() || !_selfConfig().shouldBuildIndexes()) &&
- it->getState().readable() &&
- !_memberIsBlacklisted(candidateConfig, now) &&
- goalSecs < it->getOpTime().getSecs()) {
- log() << "changing sync target because current sync target's most recent OpTime is "
- << currentOpTime.toString() << " which is more than "
- << _maxSyncSourceLagSecs.count() << " seconds behind member "
- << candidateConfig.getHostAndPort().toString()
- << " whose most recent OpTime is " << it->getOpTime().toString();
- invariant(itIndex != _selfIndex);
- return true;
+ if (it->getState().primary() && it->up()) {
+ if (remotePrimaryIndex != -1) {
+ // two other nodes think they are primary (asynchronously polled)
+ // -- wait for things to settle down.
+ remotePrimaryIndex = -1;
+ warning() << "two remote primaries (transiently)";
+ break;
}
+ remotePrimaryIndex = itIndex;
}
- return false;
}
+ _stepDownSelfAndReplaceWith(remotePrimaryIndex);
+ return true;
+}
- void TopologyCoordinatorImpl::prepareCursorResponseInfo(
- BSONObjBuilder* objBuilder,
- const OpTime& lastCommittedOpTime) const {
- objBuilder->append("term", _term);
- objBuilder->append("lastOpCommittedTimestamp", lastCommittedOpTime.getTimestamp());
- objBuilder->append("lastOpCommittedTerm", lastCommittedOpTime.getTerm());
- objBuilder->append("configVersion", _rsConfig.getConfigVersion());
- objBuilder->append("primaryId", _rsConfig.getMemberAt(_currentPrimaryIndex).getId());
- }
+void TopologyCoordinatorImpl::_stepDownSelfAndReplaceWith(int newPrimary) {
+ invariant(_role == Role::leader);
+ invariant(_selfIndex != -1);
+ invariant(_selfIndex != newPrimary);
+ invariant(_selfIndex == _currentPrimaryIndex);
+ _currentPrimaryIndex = newPrimary;
+ _role = Role::follower;
+ _stepDownPending = false;
+}
- void TopologyCoordinatorImpl::summarizeAsHtml(ReplSetHtmlSummary* output) {
- output->setConfig(_rsConfig);
- output->setHBData(_hbdata);
- output->setSelfIndex(_selfIndex);
- output->setPrimaryIndex(_currentPrimaryIndex);
- output->setSelfState(getMemberState());
- output->setSelfHeartbeatMessage(_hbmsg);
- }
+void TopologyCoordinatorImpl::adjustMaintenanceCountBy(int inc) {
+ invariant(_role == Role::follower);
+ _maintenanceModeCalls += inc;
+ invariant(_maintenanceModeCalls >= 0);
+}
- void TopologyCoordinatorImpl::processReplSetRequestVotes(
- const ReplSetRequestVotesArgs& args,
- ReplSetRequestVotesResponse* response,
- const OpTime& lastAppliedOpTime) {
- response->setOk(true);
- response->setTerm(_term);
-
- if (args.getTerm() < _term) {
- response->setVoteGranted(false);
- response->setReason("candidate's term is lower than mine");
- }
- else if (args.getConfigVersion() != _rsConfig.getConfigVersion()) {
- response->setVoteGranted(false);
- response->setReason("candidate's config version differs from mine");
- }
- else if (args.getSetName() != _rsConfig.getReplSetName()) {
- response->setVoteGranted(false);
- response->setReason("candidate's set name differs from mine");
- }
- else if (args.getLastCommittedOp() < lastAppliedOpTime) {
- response->setVoteGranted(false);
- response->setReason("candidate's data is staler than mine");
- }
- else if (!args.isADryRun() && _lastVote.getTerm() == args.getTerm()) {
- response->setVoteGranted(false);
- response->setReason("already voted for another candidate this term");
- }
- else {
- if (!args.isADryRun()) {
- _lastVote.setTerm(args.getTerm());
- _lastVote.setCandidateId(args.getCandidateId());
- }
- response->setVoteGranted(true);
- }
+int TopologyCoordinatorImpl::getMaintenanceCount() const {
+ return _maintenanceModeCalls;
+}
+bool TopologyCoordinatorImpl::updateTerm(long long term) {
+ if (term <= _term) {
+ return false;
}
+ _term = term;
+ return true;
+}
- Status TopologyCoordinatorImpl::processReplSetDeclareElectionWinner(
- const ReplSetDeclareElectionWinnerArgs& args,
- long long* responseTerm) {
- *responseTerm = _term;
- if (args.getReplSetName() != _rsConfig.getReplSetName()) {
- return {ErrorCodes::BadValue, "replSet name does not match"};
- }
- else if (args.getTerm() < _term) {
- return {ErrorCodes::BadValue, "term has already passed"};
- }
- else if (args.getTerm() == _term && _currentPrimaryIndex > -1 &&
- args.getWinnerId() != _rsConfig.getMemberAt(_currentPrimaryIndex).getId()) {
- return {ErrorCodes::BadValue, "term already has a primary"};
- }
+long long TopologyCoordinatorImpl::getTerm() const {
+ return _term;
+}
- _currentPrimaryIndex = _rsConfig.findMemberIndexByConfigId(args.getWinnerId());
- return Status::OK();
- }
+bool TopologyCoordinatorImpl::shouldChangeSyncSource(const HostAndPort& currentSource,
+ Date_t now) const {
+ // Methodology:
+ // If there exists a viable sync source member other than currentSource, whose oplog has
+ // reached an optime greater than _maxSyncSourceLagSecs later than currentSource's, return
+ // true.
- void TopologyCoordinatorImpl::loadLastVote(const LastVote& lastVote) {
- _lastVote = lastVote;
+ // If the user requested a sync source change, return true.
+ if (_forceSyncSourceIndex != -1) {
+ return true;
}
- long long TopologyCoordinatorImpl::getTerm() {
- return _term;
+ const int currentMemberIndex = _rsConfig.findMemberIndexByHostAndPort(currentSource);
+ if (currentMemberIndex == -1) {
+ return true;
}
+ invariant(currentMemberIndex != _selfIndex);
- void TopologyCoordinatorImpl::incrementTerm() {
- _term++;
+ OpTime currentOpTime = _hbdata[currentMemberIndex].getOpTime();
+ if (currentOpTime.isNull()) {
+ // Haven't received a heartbeat from the sync source yet, so can't tell if we should
+ // change.
+ return false;
}
-
- void TopologyCoordinatorImpl::voteForMyselfV1() {
- _lastVote.setTerm(_term);
- _lastVote.setCandidateId(_selfConfig().getId());
+ unsigned int currentSecs = currentOpTime.getSecs();
+ unsigned int goalSecs = currentSecs + _maxSyncSourceLagSecs.count();
+
+ for (std::vector<MemberHeartbeatData>::const_iterator it = _hbdata.begin(); it != _hbdata.end();
+ ++it) {
+ const int itIndex = indexOfIterator(_hbdata, it);
+ const MemberConfig& candidateConfig = _rsConfig.getMemberAt(itIndex);
+ if (it->up() &&
+ (candidateConfig.shouldBuildIndexes() || !_selfConfig().shouldBuildIndexes()) &&
+ it->getState().readable() && !_memberIsBlacklisted(candidateConfig, now) &&
+ goalSecs < it->getOpTime().getSecs()) {
+ log() << "changing sync target because current sync target's most recent OpTime is "
+ << currentOpTime.toString() << " which is more than "
+ << _maxSyncSourceLagSecs.count() << " seconds behind member "
+ << candidateConfig.getHostAndPort().toString() << " whose most recent OpTime is "
+ << it->getOpTime().toString();
+ invariant(itIndex != _selfIndex);
+ return true;
+ }
}
-
-} // namespace repl
-} // namespace mongo
+ return false;
+}
+
+void TopologyCoordinatorImpl::prepareCursorResponseInfo(BSONObjBuilder* objBuilder,
+ const OpTime& lastCommittedOpTime) const {
+ objBuilder->append("term", _term);
+ objBuilder->append("lastOpCommittedTimestamp", lastCommittedOpTime.getTimestamp());
+ objBuilder->append("lastOpCommittedTerm", lastCommittedOpTime.getTerm());
+ objBuilder->append("configVersion", _rsConfig.getConfigVersion());
+ objBuilder->append("primaryId", _rsConfig.getMemberAt(_currentPrimaryIndex).getId());
+}
+
+void TopologyCoordinatorImpl::summarizeAsHtml(ReplSetHtmlSummary* output) {
+ output->setConfig(_rsConfig);
+ output->setHBData(_hbdata);
+ output->setSelfIndex(_selfIndex);
+ output->setPrimaryIndex(_currentPrimaryIndex);
+ output->setSelfState(getMemberState());
+ output->setSelfHeartbeatMessage(_hbmsg);
+}
+
+void TopologyCoordinatorImpl::processReplSetRequestVotes(const ReplSetRequestVotesArgs& args,
+ ReplSetRequestVotesResponse* response,
+ const OpTime& lastAppliedOpTime) {
+ response->setOk(true);
+ response->setTerm(_term);
+
+ if (args.getTerm() < _term) {
+ response->setVoteGranted(false);
+ response->setReason("candidate's term is lower than mine");
+ } else if (args.getConfigVersion() != _rsConfig.getConfigVersion()) {
+ response->setVoteGranted(false);
+ response->setReason("candidate's config version differs from mine");
+ } else if (args.getSetName() != _rsConfig.getReplSetName()) {
+ response->setVoteGranted(false);
+ response->setReason("candidate's set name differs from mine");
+ } else if (args.getLastCommittedOp() < lastAppliedOpTime) {
+ response->setVoteGranted(false);
+ response->setReason("candidate's data is staler than mine");
+ } else if (!args.isADryRun() && _lastVote.getTerm() == args.getTerm()) {
+ response->setVoteGranted(false);
+ response->setReason("already voted for another candidate this term");
+ } else {
+ if (!args.isADryRun()) {
+ _lastVote.setTerm(args.getTerm());
+ _lastVote.setCandidateId(args.getCandidateId());
+ }
+ response->setVoteGranted(true);
+ }
+}
+
+Status TopologyCoordinatorImpl::processReplSetDeclareElectionWinner(
+ const ReplSetDeclareElectionWinnerArgs& args, long long* responseTerm) {
+ *responseTerm = _term;
+ if (args.getReplSetName() != _rsConfig.getReplSetName()) {
+ return {ErrorCodes::BadValue, "replSet name does not match"};
+ } else if (args.getTerm() < _term) {
+ return {ErrorCodes::BadValue, "term has already passed"};
+ } else if (args.getTerm() == _term && _currentPrimaryIndex > -1 &&
+ args.getWinnerId() != _rsConfig.getMemberAt(_currentPrimaryIndex).getId()) {
+ return {ErrorCodes::BadValue, "term already has a primary"};
+ }
+
+ _currentPrimaryIndex = _rsConfig.findMemberIndexByConfigId(args.getWinnerId());
+ return Status::OK();
+}
+
+void TopologyCoordinatorImpl::loadLastVote(const LastVote& lastVote) {
+ _lastVote = lastVote;
+}
+
+long long TopologyCoordinatorImpl::getTerm() {
+ return _term;
+}
+
+void TopologyCoordinatorImpl::incrementTerm() {
+ _term++;
+}
+
+void TopologyCoordinatorImpl::voteForMyselfV1() {
+ _lastVote.setTerm(_term);
+ _lastVote.setCandidateId(_selfConfig().getId());
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/topology_coordinator_impl.h b/src/mongo/db/repl/topology_coordinator_impl.h
index cfc8e88e5f1..c6dd3e26533 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.h
+++ b/src/mongo/db/repl/topology_coordinator_impl.h
@@ -43,389 +43,385 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
namespace repl {
+/**
+ * Represents a latency measurement for each replica set member based on heartbeat requests.
+ * The measurement is an average weighted 80% to the old value, and 20% to the new value.
+ *
+ * Also stores information about heartbeat progress and retries.
+ */
+class PingStats {
+public:
+ PingStats();
+
/**
- * Represents a latency measurement for each replica set member based on heartbeat requests.
- * The measurement is an average weighted 80% to the old value, and 20% to the new value.
+ * Records that a new heartbeat request started at "now".
*
- * Also stores information about heartbeat progress and retries.
+ * This resets the failure count used in determining whether the next request to a target
+ * should be a retry or a regularly scheduled heartbeat message.
*/
- class PingStats {
- public:
- PingStats();
-
- /**
- * Records that a new heartbeat request started at "now".
- *
- * This resets the failure count used in determining whether the next request to a target
- * should be a retry or a regularly scheduled heartbeat message.
- */
- void start(Date_t now);
-
- /**
- * Records that a heartbeat request completed successfully, and that "millis" milliseconds
- * were spent for a single network roundtrip plus remote processing time.
- */
- void hit(int millis);
-
- /**
- * Records that a heartbeat request failed.
- */
- void miss();
-
- /**
- * Gets the number of hit() calls.
- */
- unsigned int getCount() const { return count; }
-
- /**
- * Gets the weighted average round trip time for heartbeat messages to the target.
- */
- unsigned int getMillis() const { return value; }
-
- /**
- * Gets the date at which start() was last called, which is used to determine if
- * a heartbeat should be retried or if the time limit has expired.
- */
- Date_t getLastHeartbeatStartDate() const { return _lastHeartbeatStartDate; }
-
- /**
- * Gets the number of failures since start() was last called.
- *
- * This value is incremented by calls to miss(), cleared by calls to start() and
- * set to the maximum possible value by calls to hit().
- */
- int getNumFailuresSinceLastStart() const { return _numFailuresSinceLastStart; }
-
- private:
- unsigned int count;
- unsigned int value;
- Date_t _lastHeartbeatStartDate;
- int _numFailuresSinceLastStart;
- };
+ void start(Date_t now);
- class TopologyCoordinatorImpl : public TopologyCoordinator {
- public:
- /**
- * Constructs a Topology Coordinator object.
- * @param maxSyncSourceLagSecs a sync source is re-evaluated after it lags behind further
- * than this amount.
- **/
- TopologyCoordinatorImpl(Seconds maxSyncSourceLagSecs);
-
- ////////////////////////////////////////////////////////////
- //
- // Implementation of TopologyCoordinator interface
- //
- ////////////////////////////////////////////////////////////
-
- virtual Role getRole() const;
- virtual MemberState getMemberState() const;
- virtual HostAndPort getSyncSourceAddress() const;
- virtual std::vector<HostAndPort> getMaybeUpHostAndPorts() const;
- virtual int getMaintenanceCount() const;
- virtual long long getTerm() const;
- virtual bool updateTerm(long long term);
- virtual void setForceSyncSourceIndex(int index);
- virtual HostAndPort chooseNewSyncSource(Date_t now,
- const OpTime& lastOpApplied);
- virtual void blacklistSyncSource(const HostAndPort& host, Date_t until);
- virtual void unblacklistSyncSource(const HostAndPort& host, Date_t now);
- virtual void clearSyncSourceBlacklist();
- virtual bool shouldChangeSyncSource(const HostAndPort& currentSource, Date_t now) const;
- virtual bool becomeCandidateIfStepdownPeriodOverAndSingleNodeSet(Date_t now);
- virtual void setElectionSleepUntil(Date_t newTime);
- virtual void setFollowerMode(MemberState::MS newMode);
- virtual void adjustMaintenanceCountBy(int inc);
- virtual void prepareSyncFromResponse(const ReplicationExecutor::CallbackArgs& data,
- const HostAndPort& target,
- const OpTime& lastOpApplied,
- BSONObjBuilder* response,
- Status* result);
- virtual void prepareFreshResponse(const ReplicationCoordinator::ReplSetFreshArgs& args,
- Date_t now,
- const OpTime& lastOpApplied,
- BSONObjBuilder* response,
- Status* result);
- virtual void prepareElectResponse(const ReplicationCoordinator::ReplSetElectArgs& args,
- Date_t now,
- const OpTime& lastOpApplied,
- BSONObjBuilder* response,
- Status* result);
- virtual Status prepareHeartbeatResponse(Date_t now,
- const ReplSetHeartbeatArgs& args,
- const std::string& ourSetName,
- const OpTime& lastOpApplied,
- ReplSetHeartbeatResponse* response);
- virtual Status prepareHeartbeatResponseV1(Date_t now,
- const ReplSetHeartbeatArgsV1& args,
- const std::string& ourSetName,
- const OpTime& lastOpApplied,
- ReplSetHeartbeatResponse* response);
- virtual void prepareStatusResponse(const ReplicationExecutor::CallbackArgs& data,
- Date_t now,
- unsigned uptime,
- const OpTime& lastOpApplied,
- BSONObjBuilder* response,
- Status* result);
- virtual void fillIsMasterForReplSet(IsMasterResponse* response);
- virtual void prepareFreezeResponse(Date_t now, int secs, BSONObjBuilder* response);
- virtual void updateConfig(const ReplicaSetConfig& newConfig,
- int selfIndex,
- Date_t now,
- const OpTime& lastOpApplied);
- virtual std::pair<ReplSetHeartbeatArgs, Milliseconds> prepareHeartbeatRequest(
- Date_t now,
- const std::string& ourSetName,
- const HostAndPort& target);
- virtual std::pair<ReplSetHeartbeatArgsV1, Milliseconds> prepareHeartbeatRequestV1(
- Date_t now,
- const std::string& ourSetName,
- const HostAndPort& target);
- virtual HeartbeatResponseAction processHeartbeatResponse(
- Date_t now,
- Milliseconds networkRoundTripTime,
- const HostAndPort& target,
- const StatusWith<ReplSetHeartbeatResponse>& hbResponse,
- const OpTime& myLastOpApplied);
- virtual bool voteForMyself(Date_t now);
- virtual void processWinElection(OID electionId, Timestamp electionOpTime);
- virtual void processLoseElection();
- virtual bool checkShouldStandForElection(Date_t now, const OpTime& lastOpApplied);
- virtual void setMyHeartbeatMessage(const Date_t now, const std::string& message);
- virtual bool stepDown(Date_t until, bool force, const OpTime& lastOpApplied);
- virtual bool stepDownIfPending();
- virtual Date_t getStepDownTime() const;
- virtual void prepareCursorResponseInfo(BSONObjBuilder* objBuilder,
- const OpTime& lastCommitttedOpTime) const;
- Status processReplSetDeclareElectionWinner(const ReplSetDeclareElectionWinnerArgs& args,
- long long* responseTerm);
- virtual void processReplSetRequestVotes(const ReplSetRequestVotesArgs& args,
- ReplSetRequestVotesResponse* response,
- const OpTime& lastAppliedOpTime);
- virtual void summarizeAsHtml(ReplSetHtmlSummary* output);
- virtual void loadLastVote(const LastVote& lastVote);
- virtual void incrementTerm();
- virtual void voteForMyselfV1();
- virtual long long getTerm();
- virtual void prepareForStepDown();
-
- ////////////////////////////////////////////////////////////
- //
- // Test support methods
- //
- ////////////////////////////////////////////////////////////
-
- // Changes _memberState to newMemberState. Only for testing.
- void changeMemberState_forTest(const MemberState& newMemberState,
- const Timestamp& electionTime = Timestamp(0,0));
-
- // Sets "_electionTime" to "newElectionTime". Only for testing.
- void _setElectionTime(const Timestamp& newElectionTime);
-
- // Sets _currentPrimaryIndex to the given index. Should only be used in unit tests!
- // TODO(spencer): Remove this once we can easily call for an election in unit tests to
- // set the current primary.
- void _setCurrentPrimaryForTest(int primaryIndex);
-
- // Returns _electionTime. Only used in unittests.
- Timestamp getElectionTime() const;
-
- // Returns _electionId. Only used in unittests.
- OID getElectionId() const;
-
- // Returns _currentPrimaryIndex. Only used in unittests.
- int getCurrentPrimaryIndex() const;
-
- private:
-
- enum UnelectableReason {
- None = 0,
- CannotSeeMajority = 1 << 0,
- NotCloseEnoughToLatestOptime = 1 << 1,
- ArbiterIAm = 1 << 2,
- NotSecondary = 1 << 3,
- NoPriority = 1 << 4,
- StepDownPeriodActive = 1 << 5,
- NoData = 1 << 6,
- NotInitialized = 1 << 7,
- VotedTooRecently = 1 << 8,
- RefusesToStand = 1 << 9
- };
- typedef int UnelectableReasonMask;
-
- // Returns the number of heartbeat pings which have occurred.
- int _getTotalPings();
-
- // Returns the current "ping" value for the given member by their address
- int _getPing(const HostAndPort& host);
-
- // Determines if we will veto the member specified by "args.id", given that the last op
- // we have applied locally is "lastOpApplied".
- // If we veto, the errmsg will be filled in with a reason
- bool _shouldVetoMember(const ReplicationCoordinator::ReplSetFreshArgs& args,
- const Date_t& now,
- const OpTime& lastOpApplied,
- std::string* errmsg) const;
-
- // Returns the index of the member with the matching id, or -1 if none match.
- int _getMemberIndex(int id) const;
-
- // Sees if a majority number of votes are held by members who are currently "up"
- bool _aMajoritySeemsToBeUp() const;
-
- // Is otherOpTime close enough (within 10 seconds) to the latest known optime to qualify
- // for an election
- bool _isOpTimeCloseEnoughToLatestToElect(const OpTime& otherOpTime,
- const OpTime& ourLastOpApplied) const;
-
- // Returns reason why "self" member is unelectable
- UnelectableReasonMask _getMyUnelectableReason(
- const Date_t now,
- const OpTime& lastOpApplied) const;
-
- // Returns reason why memberIndex is unelectable
- UnelectableReasonMask _getUnelectableReason(
- int memberIndex,
- const OpTime& lastOpApplied) const;
-
- // Returns the nice text of why the node is unelectable
- std::string _getUnelectableReasonString(UnelectableReasonMask ur) const;
-
- // Return true if we are currently primary
- bool _iAmPrimary() const;
-
- // Scans through all members that are 'up' and return the latest known optime.
- OpTime _latestKnownOpTime(const OpTime& ourLastOpApplied) const;
-
- // Scans the electable set and returns the highest priority member index
- int _getHighestPriorityElectableIndex(Date_t now, const OpTime& lastOpApplied) const;
-
- // Returns true if "one" member is higher priority than "two" member
- bool _isMemberHigherPriority(int memberOneIndex, int memberTwoIndex) const;
-
- // Helper shortcut to self config
- const MemberConfig& _selfConfig() const;
-
- // Returns NULL if there is no primary, or the MemberConfig* for the current primary
- const MemberConfig* _currentPrimaryMember() const;
-
- /**
- * Performs updating "_hbdata" and "_currentPrimaryIndex" for processHeartbeatResponse().
- */
- HeartbeatResponseAction _updateHeartbeatDataImpl(
- int updatedConfigIndex,
- const MemberState& originalState,
- Date_t now,
- const OpTime& lastOpApplied);
-
- /**
- * Updates _hbdata based on the newConfig, ensuring that every member in the newConfig
- * has an entry in _hbdata. If any nodes in the newConfig are also present in
- * _currentConfig, copies their heartbeat info into the corresponding entry in the updated
- * _hbdata vector.
- */
- void _updateHeartbeatDataForReconfig(const ReplicaSetConfig& newConfig,
- int selfIndex,
- Date_t now);
-
- void _stepDownSelfAndReplaceWith(int newPrimary);
-
- MemberState _getMyState() const;
-
- /**
- * Looks up the provided member in the blacklist and returns true if the member's blacklist
- * expire time is after 'now'. If the member is found but the expire time is before 'now',
- * the function returns false. If the member is not found in the blacklist, the function
- * returns false.
- **/
- bool _memberIsBlacklisted(const MemberConfig& memberConfig, Date_t now) const;
-
- // This node's role in the replication protocol.
- Role _role;
-
- // This is a unique id that is generated and set each time we transition to PRIMARY, as the
- // result of an election.
- OID _electionId;
- // The time at which the current PRIMARY was elected.
- Timestamp _electionTime;
-
- // This node's election term. The term is used as part of the consensus algorithm to elect
- // and maintain one primary (leader) node in the cluster.
- long long _term = 0;
-
- // the index of the member we currently believe is primary, if one exists, otherwise -1
- int _currentPrimaryIndex;
-
- // the hostandport we are currently syncing from
- // empty if no sync source (we are primary, or we cannot connect to anyone yet)
- HostAndPort _syncSource;
- // These members are not chosen as sync sources for a period of time, due to connection
- // issues with them
- std::map<HostAndPort, Date_t> _syncSourceBlacklist;
- // The next sync source to be chosen, requested via a replSetSyncFrom command
- int _forceSyncSourceIndex;
- // How far this node must fall behind before considering switching sync sources
- Seconds _maxSyncSourceLagSecs;
+ /**
+ * Records that a heartbeat request completed successfully, and that "millis" milliseconds
+ * were spent for a single network roundtrip plus remote processing time.
+ */
+ void hit(int millis);
- // "heartbeat message"
- // sent in requestHeartbeat respond in field "hbm"
- std::string _hbmsg;
- Date_t _hbmsgTime; // when it was logged
-
- // heartbeat msg to send to others; descriptive diagnostic info
- std::string _getHbmsg(Date_t now) const;
-
- int _selfIndex; // this node's index in _members and _currentConfig
-
- ReplicaSetConfig _rsConfig; // The current config, including a vector of MemberConfigs
-
- // heartbeat data for each member. It is guaranteed that this vector will be maintained
- // in the same order as the MemberConfigs in _currentConfig, therefore the member config
- // index can be used to index into this vector as well.
- std::vector<MemberHeartbeatData> _hbdata;
-
- // Indicates that we've received a request to stepdown from PRIMARY (likely via a heartbeat)
- bool _stepDownPending;
+ /**
+ * Records that a heartbeat request failed.
+ */
+ void miss();
- // Time when stepDown command expires
- Date_t _stepDownUntil;
-
- // A time before which this node will not stand for election.
- Date_t _electionSleepUntil;
-
- // The number of calls we have had to enter maintenance mode
- int _maintenanceModeCalls;
-
- // The sub-mode of follower that we are in. Legal values are RS_SECONDARY, RS_RECOVERING,
- // RS_STARTUP2 (initial sync) and RS_ROLLBACK. Only meaningful if _role == Role::follower.
- // Configured via setFollowerMode(). If the sub-mode is RS_SECONDARY, then the effective
- // sub-mode is either RS_SECONDARY or RS_RECOVERING, depending on _maintenanceModeCalls.
- // Rather than accesing this variable direclty, one should use the getMemberState() method,
- // which computes the replica set node state on the fly.
- MemberState::MS _followerMode;
-
- typedef std::map<HostAndPort, PingStats> PingMap;
- // Ping stats for each member by HostAndPort;
- PingMap _pings;
-
- // Last vote info from the election
- struct VoteLease {
-
- static const Seconds leaseTime;
-
- Date_t when;
- int whoId = -1;
- HostAndPort whoHostAndPort;
- } _voteLease;
-
- // V1 last vote info for elections
- LastVote _lastVote;
+ /**
+ * Gets the number of hit() calls.
+ */
+ unsigned int getCount() const {
+ return count;
+ }
+ /**
+ * Gets the weighted average round trip time for heartbeat messages to the target.
+ */
+ unsigned int getMillis() const {
+ return value;
+ }
+
+ /**
+ * Gets the date at which start() was last called, which is used to determine if
+ * a heartbeat should be retried or if the time limit has expired.
+ */
+ Date_t getLastHeartbeatStartDate() const {
+ return _lastHeartbeatStartDate;
+ }
+
+ /**
+ * Gets the number of failures since start() was last called.
+ *
+ * This value is incremented by calls to miss(), cleared by calls to start() and
+ * set to the maximum possible value by calls to hit().
+ */
+ int getNumFailuresSinceLastStart() const {
+ return _numFailuresSinceLastStart;
+ }
+
+private:
+ unsigned int count;
+ unsigned int value;
+ Date_t _lastHeartbeatStartDate;
+ int _numFailuresSinceLastStart;
+};
+
+class TopologyCoordinatorImpl : public TopologyCoordinator {
+public:
+ /**
+ * Constructs a Topology Coordinator object.
+ * @param maxSyncSourceLagSecs a sync source is re-evaluated after it lags behind further
+ * than this amount.
+ **/
+ TopologyCoordinatorImpl(Seconds maxSyncSourceLagSecs);
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Implementation of TopologyCoordinator interface
+ //
+ ////////////////////////////////////////////////////////////
+
+ virtual Role getRole() const;
+ virtual MemberState getMemberState() const;
+ virtual HostAndPort getSyncSourceAddress() const;
+ virtual std::vector<HostAndPort> getMaybeUpHostAndPorts() const;
+ virtual int getMaintenanceCount() const;
+ virtual long long getTerm() const;
+ virtual bool updateTerm(long long term);
+ virtual void setForceSyncSourceIndex(int index);
+ virtual HostAndPort chooseNewSyncSource(Date_t now, const OpTime& lastOpApplied);
+ virtual void blacklistSyncSource(const HostAndPort& host, Date_t until);
+ virtual void unblacklistSyncSource(const HostAndPort& host, Date_t now);
+ virtual void clearSyncSourceBlacklist();
+ virtual bool shouldChangeSyncSource(const HostAndPort& currentSource, Date_t now) const;
+ virtual bool becomeCandidateIfStepdownPeriodOverAndSingleNodeSet(Date_t now);
+ virtual void setElectionSleepUntil(Date_t newTime);
+ virtual void setFollowerMode(MemberState::MS newMode);
+ virtual void adjustMaintenanceCountBy(int inc);
+ virtual void prepareSyncFromResponse(const ReplicationExecutor::CallbackArgs& data,
+ const HostAndPort& target,
+ const OpTime& lastOpApplied,
+ BSONObjBuilder* response,
+ Status* result);
+ virtual void prepareFreshResponse(const ReplicationCoordinator::ReplSetFreshArgs& args,
+ Date_t now,
+ const OpTime& lastOpApplied,
+ BSONObjBuilder* response,
+ Status* result);
+ virtual void prepareElectResponse(const ReplicationCoordinator::ReplSetElectArgs& args,
+ Date_t now,
+ const OpTime& lastOpApplied,
+ BSONObjBuilder* response,
+ Status* result);
+ virtual Status prepareHeartbeatResponse(Date_t now,
+ const ReplSetHeartbeatArgs& args,
+ const std::string& ourSetName,
+ const OpTime& lastOpApplied,
+ ReplSetHeartbeatResponse* response);
+ virtual Status prepareHeartbeatResponseV1(Date_t now,
+ const ReplSetHeartbeatArgsV1& args,
+ const std::string& ourSetName,
+ const OpTime& lastOpApplied,
+ ReplSetHeartbeatResponse* response);
+ virtual void prepareStatusResponse(const ReplicationExecutor::CallbackArgs& data,
+ Date_t now,
+ unsigned uptime,
+ const OpTime& lastOpApplied,
+ BSONObjBuilder* response,
+ Status* result);
+ virtual void fillIsMasterForReplSet(IsMasterResponse* response);
+ virtual void prepareFreezeResponse(Date_t now, int secs, BSONObjBuilder* response);
+ virtual void updateConfig(const ReplicaSetConfig& newConfig,
+ int selfIndex,
+ Date_t now,
+ const OpTime& lastOpApplied);
+ virtual std::pair<ReplSetHeartbeatArgs, Milliseconds> prepareHeartbeatRequest(
+ Date_t now, const std::string& ourSetName, const HostAndPort& target);
+ virtual std::pair<ReplSetHeartbeatArgsV1, Milliseconds> prepareHeartbeatRequestV1(
+ Date_t now, const std::string& ourSetName, const HostAndPort& target);
+ virtual HeartbeatResponseAction processHeartbeatResponse(
+ Date_t now,
+ Milliseconds networkRoundTripTime,
+ const HostAndPort& target,
+ const StatusWith<ReplSetHeartbeatResponse>& hbResponse,
+ const OpTime& myLastOpApplied);
+ virtual bool voteForMyself(Date_t now);
+ virtual void processWinElection(OID electionId, Timestamp electionOpTime);
+ virtual void processLoseElection();
+ virtual bool checkShouldStandForElection(Date_t now, const OpTime& lastOpApplied);
+ virtual void setMyHeartbeatMessage(const Date_t now, const std::string& message);
+ virtual bool stepDown(Date_t until, bool force, const OpTime& lastOpApplied);
+ virtual bool stepDownIfPending();
+ virtual Date_t getStepDownTime() const;
+ virtual void prepareCursorResponseInfo(BSONObjBuilder* objBuilder,
+ const OpTime& lastCommitttedOpTime) const;
+ Status processReplSetDeclareElectionWinner(const ReplSetDeclareElectionWinnerArgs& args,
+ long long* responseTerm);
+ virtual void processReplSetRequestVotes(const ReplSetRequestVotesArgs& args,
+ ReplSetRequestVotesResponse* response,
+ const OpTime& lastAppliedOpTime);
+ virtual void summarizeAsHtml(ReplSetHtmlSummary* output);
+ virtual void loadLastVote(const LastVote& lastVote);
+ virtual void incrementTerm();
+ virtual void voteForMyselfV1();
+ virtual long long getTerm();
+ virtual void prepareForStepDown();
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Test support methods
+ //
+ ////////////////////////////////////////////////////////////
+
+ // Changes _memberState to newMemberState. Only for testing.
+ void changeMemberState_forTest(const MemberState& newMemberState,
+ const Timestamp& electionTime = Timestamp(0, 0));
+
+ // Sets "_electionTime" to "newElectionTime". Only for testing.
+ void _setElectionTime(const Timestamp& newElectionTime);
+
+ // Sets _currentPrimaryIndex to the given index. Should only be used in unit tests!
+ // TODO(spencer): Remove this once we can easily call for an election in unit tests to
+ // set the current primary.
+ void _setCurrentPrimaryForTest(int primaryIndex);
+
+ // Returns _electionTime. Only used in unittests.
+ Timestamp getElectionTime() const;
+
+ // Returns _electionId. Only used in unittests.
+ OID getElectionId() const;
+
+ // Returns _currentPrimaryIndex. Only used in unittests.
+ int getCurrentPrimaryIndex() const;
+
+private:
+ enum UnelectableReason {
+ None = 0,
+ CannotSeeMajority = 1 << 0,
+ NotCloseEnoughToLatestOptime = 1 << 1,
+ ArbiterIAm = 1 << 2,
+ NotSecondary = 1 << 3,
+ NoPriority = 1 << 4,
+ StepDownPeriodActive = 1 << 5,
+ NoData = 1 << 6,
+ NotInitialized = 1 << 7,
+ VotedTooRecently = 1 << 8,
+ RefusesToStand = 1 << 9
};
+ typedef int UnelectableReasonMask;
+
+ // Returns the number of heartbeat pings which have occurred.
+ int _getTotalPings();
+
+ // Returns the current "ping" value for the given member by their address
+ int _getPing(const HostAndPort& host);
-} // namespace repl
-} // namespace mongo
+ // Determines if we will veto the member specified by "args.id", given that the last op
+ // we have applied locally is "lastOpApplied".
+ // If we veto, the errmsg will be filled in with a reason
+ bool _shouldVetoMember(const ReplicationCoordinator::ReplSetFreshArgs& args,
+ const Date_t& now,
+ const OpTime& lastOpApplied,
+ std::string* errmsg) const;
+
+ // Returns the index of the member with the matching id, or -1 if none match.
+ int _getMemberIndex(int id) const;
+
+ // Sees if a majority number of votes are held by members who are currently "up"
+ bool _aMajoritySeemsToBeUp() const;
+
+ // Is otherOpTime close enough (within 10 seconds) to the latest known optime to qualify
+ // for an election
+ bool _isOpTimeCloseEnoughToLatestToElect(const OpTime& otherOpTime,
+ const OpTime& ourLastOpApplied) const;
+
+ // Returns reason why "self" member is unelectable
+ UnelectableReasonMask _getMyUnelectableReason(const Date_t now,
+ const OpTime& lastOpApplied) const;
+
+ // Returns reason why memberIndex is unelectable
+ UnelectableReasonMask _getUnelectableReason(int memberIndex, const OpTime& lastOpApplied) const;
+
+ // Returns the nice text of why the node is unelectable
+ std::string _getUnelectableReasonString(UnelectableReasonMask ur) const;
+
+ // Return true if we are currently primary
+ bool _iAmPrimary() const;
+
+ // Scans through all members that are 'up' and return the latest known optime.
+ OpTime _latestKnownOpTime(const OpTime& ourLastOpApplied) const;
+
+ // Scans the electable set and returns the highest priority member index
+ int _getHighestPriorityElectableIndex(Date_t now, const OpTime& lastOpApplied) const;
+
+ // Returns true if "one" member is higher priority than "two" member
+ bool _isMemberHigherPriority(int memberOneIndex, int memberTwoIndex) const;
+
+ // Helper shortcut to self config
+ const MemberConfig& _selfConfig() const;
+
+ // Returns NULL if there is no primary, or the MemberConfig* for the current primary
+ const MemberConfig* _currentPrimaryMember() const;
+
+ /**
+ * Performs updating "_hbdata" and "_currentPrimaryIndex" for processHeartbeatResponse().
+ */
+ HeartbeatResponseAction _updateHeartbeatDataImpl(int updatedConfigIndex,
+ const MemberState& originalState,
+ Date_t now,
+ const OpTime& lastOpApplied);
+
+ /**
+ * Updates _hbdata based on the newConfig, ensuring that every member in the newConfig
+ * has an entry in _hbdata. If any nodes in the newConfig are also present in
+ * _currentConfig, copies their heartbeat info into the corresponding entry in the updated
+ * _hbdata vector.
+ */
+ void _updateHeartbeatDataForReconfig(const ReplicaSetConfig& newConfig,
+ int selfIndex,
+ Date_t now);
+
+ void _stepDownSelfAndReplaceWith(int newPrimary);
+
+ MemberState _getMyState() const;
+
+ /**
+ * Looks up the provided member in the blacklist and returns true if the member's blacklist
+ * expire time is after 'now'. If the member is found but the expire time is before 'now',
+ * the function returns false. If the member is not found in the blacklist, the function
+ * returns false.
+ **/
+ bool _memberIsBlacklisted(const MemberConfig& memberConfig, Date_t now) const;
+
+ // This node's role in the replication protocol.
+ Role _role;
+
+ // This is a unique id that is generated and set each time we transition to PRIMARY, as the
+ // result of an election.
+ OID _electionId;
+ // The time at which the current PRIMARY was elected.
+ Timestamp _electionTime;
+
+ // This node's election term. The term is used as part of the consensus algorithm to elect
+ // and maintain one primary (leader) node in the cluster.
+ long long _term = 0;
+
+ // the index of the member we currently believe is primary, if one exists, otherwise -1
+ int _currentPrimaryIndex;
+
+ // the hostandport we are currently syncing from
+ // empty if no sync source (we are primary, or we cannot connect to anyone yet)
+ HostAndPort _syncSource;
+ // These members are not chosen as sync sources for a period of time, due to connection
+ // issues with them
+ std::map<HostAndPort, Date_t> _syncSourceBlacklist;
+ // The next sync source to be chosen, requested via a replSetSyncFrom command
+ int _forceSyncSourceIndex;
+ // How far this node must fall behind before considering switching sync sources
+ Seconds _maxSyncSourceLagSecs;
+
+ // "heartbeat message"
+ // sent in requestHeartbeat respond in field "hbm"
+ std::string _hbmsg;
+ Date_t _hbmsgTime; // when it was logged
+
+ // heartbeat msg to send to others; descriptive diagnostic info
+ std::string _getHbmsg(Date_t now) const;
+
+ int _selfIndex; // this node's index in _members and _currentConfig
+
+ ReplicaSetConfig _rsConfig; // The current config, including a vector of MemberConfigs
+
+ // heartbeat data for each member. It is guaranteed that this vector will be maintained
+ // in the same order as the MemberConfigs in _currentConfig, therefore the member config
+ // index can be used to index into this vector as well.
+ std::vector<MemberHeartbeatData> _hbdata;
+
+ // Indicates that we've received a request to stepdown from PRIMARY (likely via a heartbeat)
+ bool _stepDownPending;
+
+ // Time when stepDown command expires
+ Date_t _stepDownUntil;
+
+ // A time before which this node will not stand for election.
+ Date_t _electionSleepUntil;
+
+ // The number of calls we have had to enter maintenance mode
+ int _maintenanceModeCalls;
+
+ // The sub-mode of follower that we are in. Legal values are RS_SECONDARY, RS_RECOVERING,
+ // RS_STARTUP2 (initial sync) and RS_ROLLBACK. Only meaningful if _role == Role::follower.
+ // Configured via setFollowerMode(). If the sub-mode is RS_SECONDARY, then the effective
+ // sub-mode is either RS_SECONDARY or RS_RECOVERING, depending on _maintenanceModeCalls.
+ // Rather than accesing this variable direclty, one should use the getMemberState() method,
+ // which computes the replica set node state on the fly.
+ MemberState::MS _followerMode;
+
+ typedef std::map<HostAndPort, PingStats> PingMap;
+ // Ping stats for each member by HostAndPort;
+ PingMap _pings;
+
+ // Last vote info from the election
+ struct VoteLease {
+ static const Seconds leaseTime;
+
+ Date_t when;
+ int whoId = -1;
+ HostAndPort whoHostAndPort;
+ } _voteLease;
+
+ // V1 last vote info for elections
+ LastVote _lastVote;
+};
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/topology_coordinator_impl_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
index 7378feb4067..b791ade3b96 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
@@ -53,1628 +53,1372 @@ namespace mongo {
namespace repl {
namespace {
- Date_t operator++(Date_t& d, int) {
- Date_t result = d;
- d += Milliseconds(1);
- return result;
+Date_t operator++(Date_t& d, int) {
+ Date_t result = d;
+ d += Milliseconds(1);
+ return result;
+}
+
+bool stringContains(const std::string& haystack, const std::string& needle) {
+ return haystack.find(needle) != std::string::npos;
+}
+
+class TopoCoordTest : public mongo::unittest::Test {
+public:
+ virtual void setUp() {
+ _topo.reset(new TopologyCoordinatorImpl(Seconds(100)));
+ _now = Date_t();
+ _selfIndex = -1;
+ _cbData.reset(new ReplicationExecutor::CallbackArgs(
+ NULL, ReplicationExecutor::CallbackHandle(), Status::OK()));
}
- bool stringContains(const std::string &haystack, const std::string& needle) {
- return haystack.find(needle) != std::string::npos;
+ virtual void tearDown() {
+ _topo.reset(NULL);
+ _cbData.reset(NULL);
}
- class TopoCoordTest : public mongo::unittest::Test {
- public:
- virtual void setUp() {
- _topo.reset(new TopologyCoordinatorImpl(Seconds(100)));
- _now = Date_t();
- _selfIndex = -1;
- _cbData.reset(new ReplicationExecutor::CallbackArgs(
- NULL, ReplicationExecutor::CallbackHandle(), Status::OK()));
- }
-
- virtual void tearDown() {
- _topo.reset(NULL);
- _cbData.reset(NULL);
- }
-
- protected:
- TopologyCoordinatorImpl& getTopoCoord() {return *_topo;}
- ReplicationExecutor::CallbackArgs cbData() {return *_cbData;}
- Date_t& now() {return _now;}
-
- int64_t countLogLinesContaining(const std::string& needle) {
- return std::count_if(getCapturedLogMessages().begin(),
- getCapturedLogMessages().end(),
- stdx::bind(stringContains,
- stdx::placeholders::_1,
- needle));
- }
-
- void makeSelfPrimary(const Timestamp& electionOpTime = Timestamp(0,0)) {
- getTopoCoord().changeMemberState_forTest(MemberState::RS_PRIMARY, electionOpTime);
- getTopoCoord()._setCurrentPrimaryForTest(_selfIndex);
- }
-
- void setSelfMemberState(const MemberState& newState) {
- getTopoCoord().changeMemberState_forTest(newState);
- }
-
- int getCurrentPrimaryIndex() {
- return getTopoCoord().getCurrentPrimaryIndex();
- }
- // Update config and set selfIndex
- // If "now" is passed in, set _now to now+1
- void updateConfig(BSONObj cfg,
- int selfIndex,
- Date_t now = Date_t::fromMillisSinceEpoch(-1),
- const OpTime& lastOp = OpTime()) {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(cfg));
- ASSERT_OK(config.validate());
-
- _selfIndex = selfIndex;
-
- if (now == Date_t::fromMillisSinceEpoch(-1)) {
- getTopoCoord().updateConfig(config, selfIndex, _now, lastOp);
- _now += Milliseconds(1);
- }
- else {
- invariant(now > _now);
- getTopoCoord().updateConfig(config, selfIndex, now, lastOp);
- _now = now + Milliseconds(1);
- }
- }
-
- HeartbeatResponseAction receiveUpHeartbeat(
- const HostAndPort& member,
- const std::string& setName,
- MemberState memberState,
- const OpTime& electionTime,
- const OpTime& lastOpTimeSender,
- const OpTime& lastOpTimeReceiver) {
- return _receiveHeartbeatHelper(Status::OK(),
- member,
- setName,
- memberState,
- electionTime.getTimestamp(),
- lastOpTimeSender,
- lastOpTimeReceiver,
- Milliseconds(1));
- }
-
- HeartbeatResponseAction receiveDownHeartbeat(
- const HostAndPort& member,
- const std::string& setName,
- const OpTime& lastOpTimeReceiver,
- ErrorCodes::Error errcode = ErrorCodes::HostUnreachable) {
- // timed out heartbeat to mark a node as down
-
- Milliseconds roundTripTime{ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod};
- return _receiveHeartbeatHelper(Status(errcode, ""),
- member,
- setName,
- MemberState::RS_UNKNOWN,
- Timestamp(),
- OpTime(),
- lastOpTimeReceiver,
- roundTripTime);
- }
-
- HeartbeatResponseAction heartbeatFromMember(const HostAndPort& member,
- const std::string& setName,
- MemberState memberState,
- const OpTime& lastOpTimeSender,
- Milliseconds roundTripTime = Milliseconds(1)) {
- return _receiveHeartbeatHelper(Status::OK(),
- member,
- setName,
- memberState,
- Timestamp(),
- lastOpTimeSender,
- OpTime(),
- roundTripTime);
- }
-
- private:
-
- HeartbeatResponseAction _receiveHeartbeatHelper(Status responseStatus,
- const HostAndPort& member,
- const std::string& setName,
- MemberState memberState,
- Timestamp electionTime,
- const OpTime& lastOpTimeSender,
- const OpTime& lastOpTimeReceiver,
- Milliseconds roundTripTime) {
-
- ReplSetHeartbeatResponse hb;
- hb.setConfigVersion(1);
- hb.setState(memberState);
- hb.setOpTime(lastOpTimeSender);
- hb.setElectionTime(electionTime);
-
- StatusWith<ReplSetHeartbeatResponse> hbResponse =
- responseStatus.isOK() ?
- StatusWith<ReplSetHeartbeatResponse>(hb) :
- StatusWith<ReplSetHeartbeatResponse>(responseStatus);
-
- getTopoCoord().prepareHeartbeatRequest(now(),
- setName,
- member);
- now() += roundTripTime;
- return getTopoCoord().processHeartbeatResponse(now(),
- roundTripTime,
- member,
- hbResponse,
- lastOpTimeReceiver);
- }
-
- private:
- unique_ptr<TopologyCoordinatorImpl> _topo;
- unique_ptr<ReplicationExecutor::CallbackArgs> _cbData;
- Date_t _now;
- int _selfIndex;
- };
-
- TEST_F(TopoCoordTest, ChooseSyncSourceBasic) {
- // if we do not have an index in the config, we should get an empty syncsource
- HostAndPort newSyncSource = getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_TRUE(newSyncSource.empty());
-
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- // member h2 is the furthest ahead
- heartbeatFromMember(HostAndPort("h2"),
- "rs0",
- MemberState::RS_SECONDARY,
- OpTime(Timestamp(1,0), 0));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime());
-
- // We start with no sync source
- ASSERT(getTopoCoord().getSyncSourceAddress().empty());
-
- // Fail due to insufficient number of pings
- newSyncSource = getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(getTopoCoord().getSyncSourceAddress(), newSyncSource);
- ASSERT(getTopoCoord().getSyncSourceAddress().empty());
-
- // Record 2nd round of pings to allow choosing a new sync source; all members equidistant
- heartbeatFromMember(HostAndPort("h2"),
- "rs0",
- MemberState::RS_SECONDARY,
- OpTime(Timestamp(1,0), 0));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime());
-
- // Should choose h2, since it is furthest ahead
- newSyncSource = getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(getTopoCoord().getSyncSourceAddress(), newSyncSource);
- ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
-
- // h3 becomes further ahead, so it should be chosen
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(2,0), 0));
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
-
- // h3 becomes an invalid candidate for sync source; should choose h2 again
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_RECOVERING,
- OpTime(Timestamp(2,0), 0));
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
-
- // h3 back in SECONDARY and ahead
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(2,0), 0));
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
-
- // h3 goes down
- receiveDownHeartbeat(HostAndPort("h3"), "rs0", OpTime());
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
-
- // h3 back up and ahead
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(2,0), 0));
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
-
+protected:
+ TopologyCoordinatorImpl& getTopoCoord() {
+ return *_topo;
}
-
- TEST_F(TopoCoordTest, ChooseSyncSourceCandidates) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "hself") <<
- BSON("_id" << 10 << "host" << "h1") <<
- BSON("_id" << 20 << "host" << "h2" <<
- "buildIndexes" << false << "priority" << 0) <<
- BSON("_id" << 30 << "host" << "h3" <<
- "hidden" << true << "priority" << 0 << "votes" << 0) <<
- BSON("_id" << 40 << "host" << "h4" <<"arbiterOnly" << true) <<
- BSON("_id" << 50 << "host" << "h5" <<
- "slaveDelay" << 1 << "priority" << 0) <<
- BSON("_id" << 60 << "host" << "h6") <<
- BSON("_id" << 70 << "host" << "hprimary"))),
- 0);
-
- setSelfMemberState(MemberState::RS_SECONDARY);
- OpTime lastOpTimeWeApplied = OpTime(Timestamp(100,0), 0);
-
- heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(501, 0), 0), Milliseconds(700));
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(501, 0), 0), Milliseconds(600));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(501, 0), 0), Milliseconds(500));
- heartbeatFromMember(HostAndPort("h4"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(501, 0), 0), Milliseconds(400));
- heartbeatFromMember(HostAndPort("h5"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(501, 0), 0), Milliseconds(300));
-
- // This node is lagged further than maxSyncSourceLagSeconds.
- heartbeatFromMember(HostAndPort("h6"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(499, 0), 0), Milliseconds(200));
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- heartbeatFromMember(HostAndPort("hprimary"), "rs0", MemberState::RS_PRIMARY,
- OpTime(Timestamp(600, 0), 0), Milliseconds(100));
- ASSERT_EQUALS(7, getCurrentPrimaryIndex());
-
- // Record 2nd round of pings to allow choosing a new sync source
- heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(501, 0), 0), Milliseconds(700));
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(501, 0), 0), Milliseconds(600));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(501, 0), 0), Milliseconds(500));
- heartbeatFromMember(HostAndPort("h4"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(501, 0), 0), Milliseconds(400));
- heartbeatFromMember(HostAndPort("h5"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(501, 0), 0), Milliseconds(300));
- heartbeatFromMember(HostAndPort("h6"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(499, 0), 0), Milliseconds(200));
- heartbeatFromMember(HostAndPort("hprimary"), "rs0", MemberState::RS_PRIMARY,
- OpTime(Timestamp(600, 0), 0), Milliseconds(100));
-
- // Should choose primary first; it's closest
- getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
- ASSERT_EQUALS(HostAndPort("hprimary"), getTopoCoord().getSyncSourceAddress());
-
- // Primary goes far far away
- heartbeatFromMember(HostAndPort("hprimary"), "rs0", MemberState::RS_PRIMARY,
- OpTime(Timestamp(600, 0), 0), Milliseconds(100000000));
-
- // Should choose h4. (if an arbiter has an oplog, it's a valid sync source)
- // h6 is not considered because it is outside the maxSyncLagSeconds window,
- getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
- ASSERT_EQUALS(HostAndPort("h4"), getTopoCoord().getSyncSourceAddress());
-
- // h4 goes down; should choose h1
- receiveDownHeartbeat(HostAndPort("h4"), "rs0", OpTime());
- getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
- ASSERT_EQUALS(HostAndPort("h1"), getTopoCoord().getSyncSourceAddress());
-
- // Primary and h1 go down; should choose h6
- receiveDownHeartbeat(HostAndPort("h1"), "rs0", OpTime());
- receiveDownHeartbeat(HostAndPort("hprimary"), "rs0", OpTime());
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
- ASSERT_EQUALS(HostAndPort("h6"), getTopoCoord().getSyncSourceAddress());
-
- // h6 goes down; should choose h5
- receiveDownHeartbeat(HostAndPort("h6"), "rs0", OpTime());
- getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
- ASSERT_EQUALS(HostAndPort("h5"), getTopoCoord().getSyncSourceAddress());
-
- // h5 goes down; should choose h3
- receiveDownHeartbeat(HostAndPort("h5"), "rs0", OpTime());
- getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
- ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
-
- // h3 goes down; no sync source candidates remain
- receiveDownHeartbeat(HostAndPort("h3"), "rs0", OpTime());
- getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
- ASSERT(getTopoCoord().getSyncSourceAddress().empty());
+ ReplicationExecutor::CallbackArgs cbData() {
+ return *_cbData;
}
-
-
- TEST_F(TopoCoordTest, ChooseSyncSourceChainingNotAllowed) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "settings" << BSON("chainingAllowed" << false) <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
-
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(1, 0), 0), Milliseconds(100));
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(1, 0), 0), Milliseconds(100));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(0, 0), 0), Milliseconds(300));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(0, 0), 0), Milliseconds(300));
-
- // No primary situation: should choose no sync source.
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT(getTopoCoord().getSyncSourceAddress().empty());
-
- // Add primary
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_PRIMARY,
- OpTime(Timestamp(0, 0), 0), Milliseconds(300));
- ASSERT_EQUALS(2, getCurrentPrimaryIndex());
-
- // h3 is primary and should be chosen as sync source, despite being further away than h2
- // and the primary (h3) being behind our most recently applied optime
- getTopoCoord().chooseNewSyncSource(now()++, OpTime(Timestamp(10,0), 0));
- ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
-
+ Date_t& now() {
+ return _now;
}
- TEST_F(TopoCoordTest, EmptySyncSourceOnPrimary) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
-
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(1, 0), 0), Milliseconds(100));
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(1, 0), 0), Milliseconds(100));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(0, 0), 0), Milliseconds(300));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(0, 0), 0), Milliseconds(300));
-
- // No primary situation: should choose h2 sync source.
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
-
- // Become primary
- makeSelfPrimary(Timestamp(3.0));
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
-
- // Check sync source
- ASSERT_EQUALS(HostAndPort(), getTopoCoord().getSyncSourceAddress());
+ int64_t countLogLinesContaining(const std::string& needle) {
+ return std::count_if(getCapturedLogMessages().begin(),
+ getCapturedLogMessages().end(),
+ stdx::bind(stringContains, stdx::placeholders::_1, needle));
}
- TEST_F(TopoCoordTest, ForceSyncSource) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
-
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- // two rounds of heartbeat pings from each member
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(1, 0), 0), Milliseconds(300));
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(1, 0), 0), Milliseconds(300));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(2, 0), 0), Milliseconds(100));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(2, 0), 0), Milliseconds(100));
-
- // force should overrule other defaults
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
- getTopoCoord().setForceSyncSourceIndex(1);
- // force should cause shouldChangeSyncSource() to return true
- // even if the currentSource is the force target
- ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("h2"), now()));
- ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("h3"), now()));
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
-
- // force should only work for one call to chooseNewSyncSource
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
+ void makeSelfPrimary(const Timestamp& electionOpTime = Timestamp(0, 0)) {
+ getTopoCoord().changeMemberState_forTest(MemberState::RS_PRIMARY, electionOpTime);
+ getTopoCoord()._setCurrentPrimaryForTest(_selfIndex);
}
- TEST_F(TopoCoordTest, BlacklistSyncSource) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
-
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(1, 0), 0), Milliseconds(300));
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(1, 0), 0), Milliseconds(300));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(2, 0), 0), Milliseconds(100));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(2, 0), 0), Milliseconds(100));
-
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
-
- Date_t expireTime = Date_t::fromMillisSinceEpoch(1000);
- getTopoCoord().blacklistSyncSource(HostAndPort("h3"), expireTime);
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- // Should choose second best choice now that h3 is blacklisted.
- ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
-
- // After time has passed, should go back to original sync source
- getTopoCoord().chooseNewSyncSource(expireTime, OpTime());
- ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
+ void setSelfMemberState(const MemberState& newState) {
+ getTopoCoord().changeMemberState_forTest(newState);
}
- TEST_F(TopoCoordTest, BlacklistSyncSourceNoChaining) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "settings" << BSON("chainingAllowed" << false) <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
-
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_PRIMARY,
- OpTime(Timestamp(2, 0), 0), Milliseconds(100));
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_PRIMARY,
- OpTime(Timestamp(2, 0), 0), Milliseconds(100));
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(2, 0), 0), Milliseconds(100));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(2, 0), 0), Milliseconds(100));
-
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
-
- Date_t expireTime = Date_t::fromMillisSinceEpoch(1000);
- getTopoCoord().blacklistSyncSource(HostAndPort("h2"), expireTime);
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- // Can't choose any sync source now.
- ASSERT(getTopoCoord().getSyncSourceAddress().empty());
-
- // After time has passed, should go back to the primary
- getTopoCoord().chooseNewSyncSource(expireTime, OpTime());
- ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
+ int getCurrentPrimaryIndex() {
+ return getTopoCoord().getCurrentPrimaryIndex();
}
-
- TEST_F(TopoCoordTest, OnlyUnauthorizedUpCausesRecovering) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
-
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- // Generate enough heartbeats to select a sync source below
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(1, 0), 0), Milliseconds(300));
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(1, 0), 0), Milliseconds(300));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(2, 0), 0), Milliseconds(100));
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(2, 0), 0), Milliseconds(100));
-
- ASSERT_EQUALS(HostAndPort("h3"),
- getTopoCoord().chooseNewSyncSource(now()++, OpTime()));
- ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
- // Good state setup done
-
- // Mark nodes down, ensure that we have no source and are secondary
- receiveDownHeartbeat(HostAndPort("h2"), "rs0", OpTime(), ErrorCodes::NetworkTimeout);
- receiveDownHeartbeat(HostAndPort("h3"), "rs0", OpTime(), ErrorCodes::NetworkTimeout);
- ASSERT_TRUE(getTopoCoord().chooseNewSyncSource(now()++, OpTime()).empty());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
-
- // Mark nodes down + unauth, ensure that we have no source and are secondary
- receiveDownHeartbeat(HostAndPort("h2"), "rs0", OpTime(), ErrorCodes::NetworkTimeout);
- receiveDownHeartbeat(HostAndPort("h3"), "rs0", OpTime(), ErrorCodes::Unauthorized);
- ASSERT_TRUE(getTopoCoord().chooseNewSyncSource(now()++, OpTime()).empty());
- ASSERT_EQUALS(MemberState::RS_RECOVERING, getTopoCoord().getMemberState().s);
-
- // Having an auth error but with another node up should bring us out of RECOVERING
- HeartbeatResponseAction action = receiveUpHeartbeat(HostAndPort("h2"),
- "rs0",
- MemberState::RS_SECONDARY,
- OpTime(),
- OpTime(Timestamp(2, 0), 0),
- OpTime(Timestamp(2, 0), 0));
- ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
- // Test that the heartbeat that brings us from RECOVERING to SECONDARY doesn't initiate
- // an election (SERVER-17164)
- ASSERT_NO_ACTION(action.getAction());
+ // Update config and set selfIndex
+ // If "now" is passed in, set _now to now+1
+ void updateConfig(BSONObj cfg,
+ int selfIndex,
+ Date_t now = Date_t::fromMillisSinceEpoch(-1),
+ const OpTime& lastOp = OpTime()) {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(cfg));
+ ASSERT_OK(config.validate());
+
+ _selfIndex = selfIndex;
+
+ if (now == Date_t::fromMillisSinceEpoch(-1)) {
+ getTopoCoord().updateConfig(config, selfIndex, _now, lastOp);
+ _now += Milliseconds(1);
+ } else {
+ invariant(now > _now);
+ getTopoCoord().updateConfig(config, selfIndex, now, lastOp);
+ _now = now + Milliseconds(1);
+ }
}
- TEST_F(TopoCoordTest, ReceiveHeartbeatWhileAbsentFromConfig) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "h1") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- -1);
- ASSERT_NO_ACTION(heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY,
- OpTime(Timestamp(1, 0), 0), Milliseconds(300)).getAction());
+ HeartbeatResponseAction receiveUpHeartbeat(const HostAndPort& member,
+ const std::string& setName,
+ MemberState memberState,
+ const OpTime& electionTime,
+ const OpTime& lastOpTimeSender,
+ const OpTime& lastOpTimeReceiver) {
+ return _receiveHeartbeatHelper(Status::OK(),
+ member,
+ setName,
+ memberState,
+ electionTime.getTimestamp(),
+ lastOpTimeSender,
+ lastOpTimeReceiver,
+ Milliseconds(1));
}
- TEST_F(TopoCoordTest, PrepareSyncFromResponse) {
- OpTime staleOpTime(Timestamp(1, 1), 0);
- OpTime ourOpTime(Timestamp(staleOpTime.getSecs() + 11, 1), 0);
-
- Status result = Status::OK();
- BSONObjBuilder response;
-
- // if we do not have an index in the config, we should get ErrorCodes::NotSecondary
- getTopoCoord().prepareSyncFromResponse(cbData(), HostAndPort("h1"),
- ourOpTime, &response, &result);
- ASSERT_EQUALS(ErrorCodes::NotSecondary, result);
- ASSERT_EQUALS("Removed and uninitialized nodes do not sync", result.reason());
-
- // Test trying to sync from another node when we are an arbiter
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 <<
- "host" << "hself" <<
- "arbiterOnly" << true) <<
- BSON("_id" << 1 <<
- "host" << "h1"))),
- 0);
-
- getTopoCoord().prepareSyncFromResponse(cbData(), HostAndPort("h1"),
- ourOpTime, &response, &result);
- ASSERT_EQUALS(ErrorCodes::NotSecondary, result);
- ASSERT_EQUALS("arbiters don't sync", result.reason());
-
- // Set up config for the rest of the tests
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "hself") <<
- BSON("_id" << 1 << "host" << "h1" << "arbiterOnly" << true) <<
- BSON("_id" << 2 << "host" << "h2" <<
- "priority" << 0 << "buildIndexes" << false) <<
- BSON("_id" << 3 << "host" << "h3") <<
- BSON("_id" << 4 << "host" << "h4") <<
- BSON("_id" << 5 << "host" << "h5") <<
- BSON("_id" << 6 << "host" << "h6"))),
- 0);
-
- // Try to sync while PRIMARY
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- makeSelfPrimary();
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
- getTopoCoord()._setCurrentPrimaryForTest(0);
- BSONObjBuilder response1;
- getTopoCoord().prepareSyncFromResponse(
- cbData(), HostAndPort("h3"), ourOpTime, &response1, &result);
- ASSERT_EQUALS(ErrorCodes::NotSecondary, result);
- ASSERT_EQUALS("primaries don't sync", result.reason());
- ASSERT_EQUALS("h3:27017", response1.obj()["syncFromRequested"].String());
-
- // Try to sync from non-existent member
- setSelfMemberState(MemberState::RS_SECONDARY);
- getTopoCoord()._setCurrentPrimaryForTest(-1);
- BSONObjBuilder response2;
- getTopoCoord().prepareSyncFromResponse(
- cbData(), HostAndPort("fakemember"), ourOpTime, &response2, &result);
- ASSERT_EQUALS(ErrorCodes::NodeNotFound, result);
- ASSERT_EQUALS("Could not find member \"fakemember:27017\" in replica set", result.reason());
-
- // Try to sync from self
- BSONObjBuilder response3;
- getTopoCoord().prepareSyncFromResponse(
- cbData(), HostAndPort("hself"), ourOpTime, &response3, &result);
- ASSERT_EQUALS(ErrorCodes::InvalidOptions, result);
- ASSERT_EQUALS("I cannot sync from myself", result.reason());
-
- // Try to sync from an arbiter
- BSONObjBuilder response4;
- getTopoCoord().prepareSyncFromResponse(
- cbData(), HostAndPort("h1"), ourOpTime, &response4, &result);
- ASSERT_EQUALS(ErrorCodes::InvalidOptions, result);
- ASSERT_EQUALS("Cannot sync from \"h1:27017\" because it is an arbiter", result.reason());
-
- // Try to sync from a node that doesn't build indexes
- BSONObjBuilder response5;
- getTopoCoord().prepareSyncFromResponse(
- cbData(), HostAndPort("h2"), ourOpTime, &response5, &result);
- ASSERT_EQUALS(ErrorCodes::InvalidOptions, result);
- ASSERT_EQUALS("Cannot sync from \"h2:27017\" because it does not build indexes",
- result.reason());
-
- // Try to sync from a member that is down
- receiveDownHeartbeat(HostAndPort("h4"), "rs0", OpTime());
-
- BSONObjBuilder response7;
- getTopoCoord().prepareSyncFromResponse(
- cbData(), HostAndPort("h4"), ourOpTime, &response7, &result);
- ASSERT_EQUALS(ErrorCodes::HostUnreachable, result);
- ASSERT_EQUALS("I cannot reach the requested member: h4:27017", result.reason());
-
- // Sync successfully from a member that is stale
- heartbeatFromMember(HostAndPort("h5"), "rs0", MemberState::RS_SECONDARY,
- staleOpTime, Milliseconds(100));
-
- BSONObjBuilder response8;
- getTopoCoord().prepareSyncFromResponse(
- cbData(), HostAndPort("h5"), ourOpTime, &response8, &result);
- ASSERT_OK(result);
- ASSERT_EQUALS("requested member \"h5:27017\" is more than 10 seconds behind us",
- response8.obj()["warning"].String());
- getTopoCoord().chooseNewSyncSource(now()++, ourOpTime);
- ASSERT_EQUALS(HostAndPort("h5"), getTopoCoord().getSyncSourceAddress());
-
- // Sync successfully from an up-to-date member
- heartbeatFromMember(HostAndPort("h6"), "rs0", MemberState::RS_SECONDARY,
- ourOpTime, Milliseconds(100));
-
- BSONObjBuilder response9;
- getTopoCoord().prepareSyncFromResponse(
- cbData(), HostAndPort("h6"), ourOpTime, &response9, &result);
- ASSERT_OK(result);
- BSONObj response9Obj = response9.obj();
- ASSERT_FALSE(response9Obj.hasField("warning"));
- ASSERT_EQUALS(HostAndPort("h5").toString(), response9Obj["prevSyncTarget"].String());
- getTopoCoord().chooseNewSyncSource(now()++, ourOpTime);
- ASSERT_EQUALS(HostAndPort("h6"), getTopoCoord().getSyncSourceAddress());
-
- // node goes down between forceSync and chooseNewSyncSource
- BSONObjBuilder response10;
- getTopoCoord().prepareSyncFromResponse(
- cbData(), HostAndPort("h6"), ourOpTime, &response10, &result);
- BSONObj response10Obj = response10.obj();
- ASSERT_FALSE(response10Obj.hasField("warning"));
- ASSERT_EQUALS(HostAndPort("h6").toString(), response10Obj["prevSyncTarget"].String());
- receiveDownHeartbeat(HostAndPort("h6"), "rs0", OpTime());
- HostAndPort syncSource = getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h6"), syncSource);
-
- // Try to sync from a member that is unauth'd
- receiveDownHeartbeat(HostAndPort("h5"), "rs0", OpTime(), ErrorCodes::Unauthorized);
-
- BSONObjBuilder response11;
- getTopoCoord().prepareSyncFromResponse(
- cbData(), HostAndPort("h5"), ourOpTime, &response11, &result);
- ASSERT_NOT_OK(result);
- ASSERT_EQUALS(ErrorCodes::Unauthorized, result.code());
- ASSERT_EQUALS("not authorized to communicate with h5:27017",
- result.reason());
-
- // Sync successfully from an up-to-date member.
- heartbeatFromMember(HostAndPort("h6"), "rs0", MemberState::RS_SECONDARY,
- ourOpTime, Milliseconds(100));
- BSONObjBuilder response12;
- getTopoCoord().prepareSyncFromResponse(
- cbData(), HostAndPort("h6"), ourOpTime, &response12, &result);
- ASSERT_OK(result);
- syncSource = getTopoCoord().chooseNewSyncSource(now()++, OpTime());
- ASSERT_EQUALS(HostAndPort("h6"), syncSource);
+ HeartbeatResponseAction receiveDownHeartbeat(
+ const HostAndPort& member,
+ const std::string& setName,
+ const OpTime& lastOpTimeReceiver,
+ ErrorCodes::Error errcode = ErrorCodes::HostUnreachable) {
+ // timed out heartbeat to mark a node as down
+
+ Milliseconds roundTripTime{ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod};
+ return _receiveHeartbeatHelper(Status(errcode, ""),
+ member,
+ setName,
+ MemberState::RS_UNKNOWN,
+ Timestamp(),
+ OpTime(),
+ lastOpTimeReceiver,
+ roundTripTime);
}
- TEST_F(TopoCoordTest, ReplSetGetStatus) {
- // This test starts by configuring a TopologyCoordinator as a member of a 4 node replica
- // set, with each node in a different state.
- // The first node is DOWN, as if we tried heartbeating them and it failed in some way.
- // The second node is in state SECONDARY, as if we've received a valid heartbeat from them.
- // The third node is in state UNKNOWN, as if we've not yet had any heartbeating activity
- // with them yet. The fourth node is PRIMARY and corresponds to ourself, which gets its
- // information for replSetGetStatus from a different source than the nodes that aren't
- // ourself. After this setup, we call prepareStatusResponse and make sure that the fields
- // returned for each member match our expectations.
- Date_t startupTime = Date_t::fromMillisSinceEpoch(100);
- Date_t heartbeatTime = Date_t::fromMillisSinceEpoch(5000);
- Seconds uptimeSecs(10);
- Date_t curTime = heartbeatTime + uptimeSecs;
- Timestamp electionTime(1, 2);
- OpTime oplogProgress(Timestamp(3, 4), 0);
- std::string setName = "mySet";
+ HeartbeatResponseAction heartbeatFromMember(const HostAndPort& member,
+ const std::string& setName,
+ MemberState memberState,
+ const OpTime& lastOpTimeSender,
+ Milliseconds roundTripTime = Milliseconds(1)) {
+ return _receiveHeartbeatHelper(Status::OK(),
+ member,
+ setName,
+ memberState,
+ Timestamp(),
+ lastOpTimeSender,
+ OpTime(),
+ roundTripTime);
+ }
+private:
+ HeartbeatResponseAction _receiveHeartbeatHelper(Status responseStatus,
+ const HostAndPort& member,
+ const std::string& setName,
+ MemberState memberState,
+ Timestamp electionTime,
+ const OpTime& lastOpTimeSender,
+ const OpTime& lastOpTimeReceiver,
+ Milliseconds roundTripTime) {
ReplSetHeartbeatResponse hb;
hb.setConfigVersion(1);
- hb.setState(MemberState::RS_SECONDARY);
+ hb.setState(memberState);
+ hb.setOpTime(lastOpTimeSender);
hb.setElectionTime(electionTime);
- hb.setHbMsg("READY");
- hb.setOpTime(oplogProgress);
- StatusWith<ReplSetHeartbeatResponse> hbResponseGood =
- StatusWith<ReplSetHeartbeatResponse>(hb);
-
- updateConfig(BSON("_id" << setName <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << "test0:1234") <<
- BSON("_id" << 1 << "host" << "test1:1234") <<
- BSON("_id" << 2 << "host" << "test2:1234") <<
- BSON("_id" << 3 << "host" << "test3:1234"))),
- 3,
- startupTime + Milliseconds(1));
-
- // Now that the replica set is setup, put the members into the states we want them in.
- HostAndPort member = HostAndPort("test0:1234");
- getTopoCoord().prepareHeartbeatRequest(startupTime + Milliseconds(1), setName, member);
- getTopoCoord().processHeartbeatResponse(startupTime + Milliseconds(2),
- Milliseconds(1),
- member,
- hbResponseGood,
- OpTime());
- getTopoCoord().prepareHeartbeatRequest(startupTime + Milliseconds(3), setName, member);
- Date_t timeoutTime = startupTime + Milliseconds(3) +
- ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod;
-
- StatusWith<ReplSetHeartbeatResponse> hbResponseDown =
- StatusWith<ReplSetHeartbeatResponse>(Status(ErrorCodes::HostUnreachable, ""));
-
- getTopoCoord().processHeartbeatResponse(timeoutTime,
- Milliseconds(5000),
- member,
- hbResponseDown,
- OpTime());
-
- member = HostAndPort("test1:1234");
- getTopoCoord().prepareHeartbeatRequest(startupTime + Milliseconds(2),
- setName,
- member);
- getTopoCoord().processHeartbeatResponse(heartbeatTime,
- Milliseconds(4000),
- member,
- hbResponseGood,
- OpTime());
- makeSelfPrimary();
-
- // Now node 0 is down, node 1 is up, and for node 2 we have no heartbeat data yet.
- BSONObjBuilder statusBuilder;
- Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
- getTopoCoord().prepareStatusResponse(cbData(),
- curTime,
- uptimeSecs.count(),
- oplogProgress,
- &statusBuilder,
- &resultStatus);
- ASSERT_OK(resultStatus);
- BSONObj rsStatus = statusBuilder.obj();
-
- // Test results for all non-self members
- ASSERT_EQUALS(setName, rsStatus["set"].String());
- ASSERT_EQUALS(curTime.asInt64(), rsStatus["date"].Date().asInt64());
- std::vector<BSONElement> memberArray = rsStatus["members"].Array();
- ASSERT_EQUALS(4U, memberArray.size());
- BSONObj member0Status = memberArray[0].Obj();
- BSONObj member1Status = memberArray[1].Obj();
- BSONObj member2Status = memberArray[2].Obj();
-
- // Test member 0, the node that's DOWN
- ASSERT_EQUALS(0, member0Status["_id"].numberInt());
- ASSERT_EQUALS("test0:1234", member0Status["name"].str());
- ASSERT_EQUALS(0, member0Status["health"].numberDouble());
- ASSERT_EQUALS(MemberState::RS_DOWN, member0Status["state"].numberInt());
- ASSERT_EQUALS("(not reachable/healthy)", member0Status["stateStr"].str());
- ASSERT_EQUALS(0, member0Status["uptime"].numberInt());
- ASSERT_EQUALS(Timestamp(), Timestamp(member0Status["optime"]["ts"].timestampValue()));
- ASSERT_TRUE(member0Status.hasField("optimeDate"));
- ASSERT_EQUALS(Date_t::fromMillisSinceEpoch(Timestamp().getSecs() * 1000ULL),
- member0Status["optimeDate"].Date());
- ASSERT_EQUALS(timeoutTime, member0Status["lastHeartbeat"].date());
- ASSERT_EQUALS(Date_t(), member0Status["lastHeartbeatRecv"].date());
-
- // Test member 1, the node that's SECONDARY
- ASSERT_EQUALS(1, member1Status["_id"].Int());
- ASSERT_EQUALS("test1:1234", member1Status["name"].String());
- ASSERT_EQUALS(1, member1Status["health"].Double());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, member1Status["state"].numberInt());
- ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
- member1Status["stateStr"].String());
- ASSERT_EQUALS(uptimeSecs.count(), member1Status["uptime"].numberInt());
- ASSERT_EQUALS(oplogProgress.getTimestamp(),
- Timestamp(member1Status["optime"]["ts"].timestampValue()));
- ASSERT_TRUE(member1Status.hasField("optimeDate"));
- ASSERT_EQUALS(Date_t::fromMillisSinceEpoch(oplogProgress.getSecs() * 1000ULL),
- member1Status["optimeDate"].Date());
- ASSERT_EQUALS(heartbeatTime, member1Status["lastHeartbeat"].date());
- ASSERT_EQUALS(Date_t(), member1Status["lastHeartbeatRecv"].date());
- ASSERT_EQUALS("READY", member1Status["lastHeartbeatMessage"].str());
-
- // Test member 2, the node that's UNKNOWN
- ASSERT_EQUALS(2, member2Status["_id"].numberInt());
- ASSERT_EQUALS("test2:1234", member2Status["name"].str());
- ASSERT_EQUALS(-1, member2Status["health"].numberDouble());
- ASSERT_EQUALS(MemberState::RS_UNKNOWN, member2Status["state"].numberInt());
- ASSERT_EQUALS(MemberState(MemberState::RS_UNKNOWN).toString(),
- member2Status["stateStr"].str());
- ASSERT_TRUE(member2Status.hasField("uptime"));
- ASSERT_TRUE(member2Status.hasField("optime"));
- ASSERT_TRUE(member2Status.hasField("optimeDate"));
- ASSERT_FALSE(member2Status.hasField("lastHearbeat"));
- ASSERT_FALSE(member2Status.hasField("lastHearbeatRecv"));
-
- // Now test results for ourself, the PRIMARY
- ASSERT_EQUALS(MemberState::RS_PRIMARY, rsStatus["myState"].numberInt());
- BSONObj selfStatus = memberArray[3].Obj();
- ASSERT_TRUE(selfStatus["self"].boolean());
- ASSERT_EQUALS(3, selfStatus["_id"].numberInt());
- ASSERT_EQUALS("test3:1234", selfStatus["name"].str());
- ASSERT_EQUALS(1, selfStatus["health"].numberDouble());
- ASSERT_EQUALS(MemberState::RS_PRIMARY, selfStatus["state"].numberInt());
- ASSERT_EQUALS(MemberState(MemberState::RS_PRIMARY).toString(),
- selfStatus["stateStr"].str());
- ASSERT_EQUALS(uptimeSecs.count(), selfStatus["uptime"].numberInt());
- ASSERT_EQUALS(oplogProgress.getTimestamp(),
- Timestamp(selfStatus["optime"]["ts"].timestampValue()));
- ASSERT_TRUE(selfStatus.hasField("optimeDate"));
- ASSERT_EQUALS(Date_t::fromMillisSinceEpoch(oplogProgress.getSecs() * 1000ULL),
- selfStatus["optimeDate"].Date());
-
- // TODO(spencer): Test electionTime and pingMs are set properly
- }
-
- TEST_F(TopoCoordTest, ReplSetGetStatusFails) {
- // This test starts by configuring a TopologyCoordinator to NOT be a member of a 3 node
- // replica set. Then running prepareStatusResponse should fail.
- Date_t startupTime = Date_t::fromMillisSinceEpoch(100);
- Date_t heartbeatTime = Date_t::fromMillisSinceEpoch(5000);
- Seconds uptimeSecs(10);
- Date_t curTime = heartbeatTime + uptimeSecs;
- OpTime oplogProgress(Timestamp(3, 4), 0);
- std::string setName = "mySet";
-
- updateConfig(BSON("_id" << setName <<
- "version" << 1 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << "test0:1234") <<
- BSON("_id" << 1 << "host" << "test1:1234") <<
- BSON("_id" << 2 << "host" << "test2:1234"))),
- -1, // This one is not part of the replica set.
- startupTime + Milliseconds(1));
-
- BSONObjBuilder statusBuilder;
- Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
- getTopoCoord().prepareStatusResponse(cbData(),
- curTime,
- uptimeSecs.count(),
- oplogProgress,
- &statusBuilder,
- &resultStatus);
- ASSERT_NOT_OK(resultStatus);
- ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, resultStatus);
- }
-
- TEST_F(TopoCoordTest, PrepareFreshResponse) {
- ReplicationCoordinator::ReplSetFreshArgs args;
- OpTime freshestOpTime(Timestamp(15, 10), 0);
- OpTime ourOpTime(Timestamp(10, 10), 0);
- OpTime staleOpTime(Timestamp(1, 1), 0);
- Status internalErrorStatus(ErrorCodes::InternalError, "didn't set status");
-
- // if we do not have an index in the config, we should get ErrorCodes::ReplicaSetNotFound
- BSONObjBuilder responseBuilder;
- Status status = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder, &status);
- ASSERT_EQUALS(ErrorCodes::ReplicaSetNotFound, status);
- ASSERT_EQUALS("Cannot participate in elections because not initialized", status.reason());
- ASSERT_TRUE(responseBuilder.obj().isEmpty());
-
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 10 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 <<
- "host" << "hself" <<
- "priority" << 10) <<
- BSON("_id" << 20 << "host" << "h1") <<
- BSON("_id" << 30 << "host" << "h2") <<
- BSON("_id" << 40 <<
- "host" << "h3" <<
- "priority" << 10))),
- 0);
- // Test with incorrect replset name
- args.setName = "fakeset";
-
- BSONObjBuilder responseBuilder0;
- Status status0 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder0, &status0);
- ASSERT_EQUALS(ErrorCodes::ReplicaSetNotFound, status0);
- ASSERT_TRUE(responseBuilder0.obj().isEmpty());
-
- heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
-
- // Test with old config version
- args.setName = "rs0";
- args.cfgver = 5;
- args.id = 20;
- args.who = HostAndPort("h1");
- args.opTime = ourOpTime.getTimestamp();
-
- BSONObjBuilder responseBuilder1;
- Status status1 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder1, &status1);
- ASSERT_OK(status1);
- BSONObj response1 = responseBuilder1.obj();
- ASSERT_EQUALS("config version stale", response1["info"].String());
- ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response1["opTime"].timestampValue()));
- ASSERT_TRUE(response1["fresher"].Bool());
- ASSERT_FALSE(response1["veto"].Bool());
- ASSERT_FALSE(response1.hasField("errmsg"));
-
- // Test with non-existent node.
- args.cfgver = 10;
- args.id = 0;
- args.who = HostAndPort("fakenode");
-
- BSONObjBuilder responseBuilder2;
- Status status2 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder2, &status2);
- ASSERT_OK(status2);
- BSONObj response2 = responseBuilder2.obj();
- ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response2["opTime"].timestampValue()));
- ASSERT_FALSE(response2["fresher"].Bool());
- ASSERT_TRUE(response2["veto"].Bool());
- ASSERT_EQUALS("replSet couldn't find member with id 0", response2["errmsg"].String());
-
-
- // Test when we are primary.
- args.id = 20;
- args.who = HostAndPort("h1");
-
- makeSelfPrimary();
-
- BSONObjBuilder responseBuilder3;
- Status status3 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder3, &status3);
- ASSERT_OK(status3);
- BSONObj response3 = responseBuilder3.obj();
- ASSERT_FALSE(response3.hasField("info"));
- ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response3["opTime"].timestampValue()));
- ASSERT_FALSE(response3["fresher"].Bool());
- ASSERT_TRUE(response3["veto"].Bool());
- ASSERT_EQUALS("I am already primary, h1:27017 can try again once I've stepped down",
- response3["errmsg"].String());
-
-
- // Test when someone else is primary.
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
- setSelfMemberState(MemberState::RS_SECONDARY);
- getTopoCoord()._setCurrentPrimaryForTest(2);
-
- BSONObjBuilder responseBuilder4;
- Status status4 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder4, &status4);
- ASSERT_OK(status4);
- BSONObj response4 = responseBuilder4.obj();
- ASSERT_FALSE(response4.hasField("info"));
- ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response4["opTime"].timestampValue()));
- ASSERT_FALSE(response4["fresher"].Bool());
- ASSERT_TRUE(response4["veto"].Bool());
- ASSERT_EQUALS(
- "h1:27017 is trying to elect itself but h2:27017 is already primary and more "
- "up-to-date",
- response4["errmsg"].String());
-
-
- // Test trying to elect a node that is caught up but isn't the highest priority node.
- heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, staleOpTime);
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
-
- BSONObjBuilder responseBuilder5;
- Status status5 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder5, &status5);
- ASSERT_OK(status5);
- BSONObj response5 = responseBuilder5.obj();
- ASSERT_FALSE(response5.hasField("info"));
- ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response5["opTime"].timestampValue()));
- ASSERT_FALSE(response5["fresher"].Bool());
- ASSERT_TRUE(response5["veto"].Bool());
- ASSERT(response5["errmsg"].String().find("h1:27017 has lower priority of 1 than") !=
- std::string::npos) << response5["errmsg"].String();
-
- // Test trying to elect a node that isn't electable because its down
- args.id = 40;
- args.who = HostAndPort("h3");
-
- receiveDownHeartbeat(HostAndPort("h3"), "rs0", OpTime());
-
- BSONObjBuilder responseBuilder6;
- Status status6 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder6, &status6);
- ASSERT_OK(status6);
- BSONObj response6 = responseBuilder6.obj();
- ASSERT_FALSE(response6.hasField("info"));
- ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response6["opTime"].timestampValue()));
- ASSERT_FALSE(response6["fresher"].Bool());
- ASSERT_TRUE(response6["veto"].Bool());
- ASSERT_NE(std::string::npos, response6["errmsg"].String().find(
- "I don't think h3:27017 is electable because the member is not "
- "currently a secondary")) << response6["errmsg"].String();
-
- // Test trying to elect a node that isn't electable because it's PRIMARY
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_PRIMARY, ourOpTime);
- ASSERT_EQUALS(3, getCurrentPrimaryIndex());
-
- BSONObjBuilder responseBuilder7;
- Status status7 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder7, &status7);
- ASSERT_OK(status7);
- BSONObj response7 = responseBuilder7.obj();
- ASSERT_FALSE(response7.hasField("info"));
- ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response7["opTime"].timestampValue()));
- ASSERT_FALSE(response7["fresher"].Bool());
- ASSERT_TRUE(response7["veto"].Bool());
- ASSERT_NE(std::string::npos, response7["errmsg"].String().find(
- "I don't think h3:27017 is electable because the member is not "
- "currently a secondary")) << response7["errmsg"].String();
-
- // Test trying to elect a node that isn't electable because it's STARTUP
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_STARTUP, ourOpTime);
-
- BSONObjBuilder responseBuilder8;
- Status status8 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder8, &status8);
- ASSERT_OK(status8);
- BSONObj response8 = responseBuilder8.obj();
- ASSERT_FALSE(response8.hasField("info"));
- ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response8["opTime"].timestampValue()));
- ASSERT_FALSE(response8["fresher"].Bool());
- ASSERT_TRUE(response8["veto"].Bool());
- ASSERT_NE(std::string::npos, response8["errmsg"].String().find(
- "I don't think h3:27017 is electable because the member is not "
- "currently a secondary")) << response8["errmsg"].String();
-
- // Test trying to elect a node that isn't electable because it's RECOVERING
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_RECOVERING, ourOpTime);
-
- BSONObjBuilder responseBuilder9;
- Status status9 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder9, &status9);
- ASSERT_OK(status9);
- BSONObj response9 = responseBuilder9.obj();
- ASSERT_FALSE(response9.hasField("info"));
- ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response9["opTime"].timestampValue()));
- ASSERT_FALSE(response9["fresher"].Bool());
- ASSERT_TRUE(response9["veto"].Bool());
- ASSERT_NE(std::string::npos, response9["errmsg"].String().find(
- "I don't think h3:27017 is electable because the member is not "
- "currently a secondary")) << response9["errmsg"].String();
-
- // Test trying to elect a node that is fresher but lower priority than the existing primary
- args.id = 30;
- args.who = HostAndPort("h2");
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_PRIMARY, ourOpTime);
- ASSERT_EQUALS(3, getCurrentPrimaryIndex());
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, freshestOpTime);
-
- BSONObjBuilder responseBuilder10;
- Status status10 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder10, &status10);
- ASSERT_OK(status10);
- BSONObj response10 = responseBuilder10.obj();
- ASSERT_FALSE(response10.hasField("info"));
- ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response10["opTime"].timestampValue()));
- ASSERT_TRUE(response10["fresher"].Bool());
- ASSERT_TRUE(response10["veto"].Bool());
- ASSERT_TRUE(response10.hasField("errmsg"));
-
-
- // Test trying to elect a valid node
- args.id = 40;
- args.who = HostAndPort("h3");
-
- receiveDownHeartbeat(HostAndPort("h2"), "rs0", OpTime());
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
-
- BSONObjBuilder responseBuilder11;
- Status status11 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(
- args, Date_t(), ourOpTime, &responseBuilder11, &status11);
- ASSERT_OK(status11);
- BSONObj response11 = responseBuilder11.obj();
- ASSERT_FALSE(response11.hasField("info")) << response11.toString();
- ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response11["opTime"].timestampValue()));
- ASSERT_FALSE(response11["fresher"].Bool()) << response11.toString();
- ASSERT_FALSE(response11["veto"].Bool()) << response11.toString();
- ASSERT_FALSE(response11.hasField("errmsg")) << response11.toString();
-
- // Test with our id
- args.id = 10;
- BSONObjBuilder responseBuilder12;
- Status status12 = internalErrorStatus;
- getTopoCoord().prepareFreshResponse(
- args, Date_t(), ourOpTime, &responseBuilder12, &status12);
- ASSERT_EQUALS(ErrorCodes::BadValue, status12);
- ASSERT_EQUALS(
- "Received replSetFresh command from member with the same member ID as ourself: 10",
- status12.reason());
- ASSERT_TRUE(responseBuilder12.obj().isEmpty());
+ StatusWith<ReplSetHeartbeatResponse> hbResponse = responseStatus.isOK()
+ ? StatusWith<ReplSetHeartbeatResponse>(hb)
+ : StatusWith<ReplSetHeartbeatResponse>(responseStatus);
+ getTopoCoord().prepareHeartbeatRequest(now(), setName, member);
+ now() += roundTripTime;
+ return getTopoCoord().processHeartbeatResponse(
+ now(), roundTripTime, member, hbResponse, lastOpTimeReceiver);
}
- class HeartbeatResponseTest : public TopoCoordTest {
- public:
-
- virtual void setUp() {
- TopoCoordTest::setUp();
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 5 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017")) <<
- "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
- }
-
- };
-
- class HeartbeatResponseTestOneRetry : public HeartbeatResponseTest {
- public:
- virtual void setUp() {
- HeartbeatResponseTest::setUp();
-
- // Bring up the node we are heartbeating.
- _target = HostAndPort("host2", 27017);
- Date_t _upRequestDate = unittest::assertGet(dateFromISOString("2014-08-29T12:55Z"));
- std::pair<ReplSetHeartbeatArgs, Milliseconds> uppingRequest =
- getTopoCoord().prepareHeartbeatRequest(_upRequestDate,
- "rs0",
- _target);
- HeartbeatResponseAction upAction =
- getTopoCoord().processHeartbeatResponse(
- _upRequestDate,
- Milliseconds(0),
- _target,
- makeStatusWith<ReplSetHeartbeatResponse>(),
- OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
- ASSERT_EQUALS(HeartbeatResponseAction::NoAction, upAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
-
-
- // Time of first request for this heartbeat period
- _firstRequestDate = unittest::assertGet(dateFromISOString("2014-08-29T13:00Z"));
-
- // Initial heartbeat attempt prepared, at t + 0.
- std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
- getTopoCoord().prepareHeartbeatRequest(_firstRequestDate,
- "rs0",
- _target);
- // 5 seconds to successfully complete the heartbeat before the timeout expires.
- ASSERT_EQUALS(5000, request.second.count());
-
- // Initial heartbeat request fails at t + 4000ms
- HeartbeatResponseAction action =
- getTopoCoord().processHeartbeatResponse(
- _firstRequestDate + Seconds(4), // 4 seconds elapsed, retry allowed.
- Milliseconds(3990), // Spent 3.99 of the 4 seconds in the network.
- _target,
- StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::ExceededTimeLimit,
- "Took too long"),
- OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
-
- ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- // Because the heartbeat failed without timing out, we expect to retry immediately.
- ASSERT_EQUALS(_firstRequestDate + Seconds(4), action.getNextHeartbeatStartDate());
-
- // First heartbeat retry prepared, at t + 4000ms.
- request =
- getTopoCoord().prepareHeartbeatRequest(
- _firstRequestDate + Milliseconds(4000),
+private:
+ unique_ptr<TopologyCoordinatorImpl> _topo;
+ unique_ptr<ReplicationExecutor::CallbackArgs> _cbData;
+ Date_t _now;
+ int _selfIndex;
+};
+
+TEST_F(TopoCoordTest, ChooseSyncSourceBasic) {
+ // if we do not have an index in the config, we should get an empty syncsource
+ HostAndPort newSyncSource = getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_TRUE(newSyncSource.empty());
+
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ // member h2 is the furthest ahead
+ heartbeatFromMember(
+ HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(1, 0), 0));
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime());
+
+ // We start with no sync source
+ ASSERT(getTopoCoord().getSyncSourceAddress().empty());
+
+ // Fail due to insufficient number of pings
+ newSyncSource = getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(getTopoCoord().getSyncSourceAddress(), newSyncSource);
+ ASSERT(getTopoCoord().getSyncSourceAddress().empty());
+
+ // Record 2nd round of pings to allow choosing a new sync source; all members equidistant
+ heartbeatFromMember(
+ HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(1, 0), 0));
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime());
+
+ // Should choose h2, since it is furthest ahead
+ newSyncSource = getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(getTopoCoord().getSyncSourceAddress(), newSyncSource);
+ ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
+
+ // h3 becomes further ahead, so it should be chosen
+ heartbeatFromMember(
+ HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(2, 0), 0));
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
+
+ // h3 becomes an invalid candidate for sync source; should choose h2 again
+ heartbeatFromMember(
+ HostAndPort("h3"), "rs0", MemberState::RS_RECOVERING, OpTime(Timestamp(2, 0), 0));
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
+
+ // h3 back in SECONDARY and ahead
+ heartbeatFromMember(
+ HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(2, 0), 0));
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
+
+ // h3 goes down
+ receiveDownHeartbeat(HostAndPort("h3"), "rs0", OpTime());
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
+
+ // h3 back up and ahead
+ heartbeatFromMember(
+ HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(2, 0), 0));
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
+}
+
+TEST_F(TopoCoordTest, ChooseSyncSourceCandidates) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself")
+ << BSON("_id" << 10 << "host"
+ << "h1")
+ << BSON("_id" << 20 << "host"
+ << "h2"
+ << "buildIndexes" << false << "priority" << 0)
+ << BSON("_id" << 30 << "host"
+ << "h3"
+ << "hidden" << true << "priority" << 0 << "votes"
+ << 0) << BSON("_id" << 40 << "host"
+ << "h4"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 50 << "host"
+ << "h5"
+ << "slaveDelay" << 1 << "priority" << 0)
+ << BSON("_id" << 60 << "host"
+ << "h6") << BSON("_id" << 70 << "host"
+ << "hprimary"))),
+ 0);
+
+ setSelfMemberState(MemberState::RS_SECONDARY);
+ OpTime lastOpTimeWeApplied = OpTime(Timestamp(100, 0), 0);
+
+ heartbeatFromMember(HostAndPort("h1"),
"rs0",
- _target);
- // One second left to complete the heartbeat.
- ASSERT_EQUALS(1000, request.second.count());
-
- // Ensure a single failed heartbeat did not cause the node to be marked down
- BSONObjBuilder statusBuilder;
- Status resultStatus(ErrorCodes::InternalError,
- "prepareStatusResponse didn't set result");
- getTopoCoord().prepareStatusResponse(cbData(),
- _firstRequestDate + Milliseconds(4000),
- 10,
- OpTime(Timestamp(100,0), 0),
- &statusBuilder,
- &resultStatus);
- ASSERT_OK(resultStatus);
- BSONObj rsStatus = statusBuilder.obj();
- std::vector<BSONElement> memberArray = rsStatus["members"].Array();
- BSONObj member1Status = memberArray[1].Obj();
-
- ASSERT_EQUALS(1, member1Status["_id"].Int());
- ASSERT_EQUALS(1, member1Status["health"].Double());
-
- }
-
- Date_t firstRequestDate() {
- return _firstRequestDate;
- }
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(501, 0), 0),
+ Milliseconds(700));
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(501, 0), 0),
+ Milliseconds(600));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(501, 0), 0),
+ Milliseconds(500));
+ heartbeatFromMember(HostAndPort("h4"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(501, 0), 0),
+ Milliseconds(400));
+ heartbeatFromMember(HostAndPort("h5"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(501, 0), 0),
+ Milliseconds(300));
- HostAndPort target() {
- return _target;
- }
+ // This node is lagged further than maxSyncSourceLagSeconds.
+ heartbeatFromMember(HostAndPort("h6"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(499, 0), 0),
+ Milliseconds(200));
- private:
- Date_t _firstRequestDate;
- HostAndPort _target;
-
- };
-
- class HeartbeatResponseTestTwoRetries : public HeartbeatResponseTestOneRetry {
- public:
- virtual void setUp() {
- HeartbeatResponseTestOneRetry::setUp();
- // First retry fails at t + 4500ms
- HeartbeatResponseAction action =
- getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4500), // 4.5 of the 5 seconds elapsed;
- // could retry.
- Milliseconds(400), // Spent 0.4 of the 0.5 seconds in the network.
- target(),
- StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::NodeNotFound, "Bad DNS?"),
- OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
- ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- // Because the first retry failed without timing out, we expect to retry immediately.
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(4500),
- action.getNextHeartbeatStartDate());
-
- // Second retry prepared at t + 4500ms.
- std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
- getTopoCoord().prepareHeartbeatRequest(
- firstRequestDate() + Milliseconds(4500),
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ heartbeatFromMember(HostAndPort("hprimary"),
"rs0",
- target());
- // 500ms left to complete the heartbeat.
- ASSERT_EQUALS(500, request.second.count());
-
- // Ensure a second failed heartbeat did not cause the node to be marked down
- BSONObjBuilder statusBuilder;
- Status resultStatus(ErrorCodes::InternalError,
- "prepareStatusResponse didn't set result");
- getTopoCoord().prepareStatusResponse(cbData(),
- firstRequestDate() + Seconds(4),
- 10,
- OpTime(Timestamp(100,0), 0),
- &statusBuilder,
- &resultStatus);
- ASSERT_OK(resultStatus);
- BSONObj rsStatus = statusBuilder.obj();
- std::vector<BSONElement> memberArray = rsStatus["members"].Array();
- BSONObj member1Status = memberArray[1].Obj();
-
- ASSERT_EQUALS(1, member1Status["_id"].Int());
- ASSERT_EQUALS(1, member1Status["health"].Double());
- }
- };
+ MemberState::RS_PRIMARY,
+ OpTime(Timestamp(600, 0), 0),
+ Milliseconds(100));
+ ASSERT_EQUALS(7, getCurrentPrimaryIndex());
- class HeartbeatResponseHighVerbosityTest : public HeartbeatResponseTest {
- public:
+ // Record 2nd round of pings to allow choosing a new sync source
+ heartbeatFromMember(HostAndPort("h1"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(501, 0), 0),
+ Milliseconds(700));
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(501, 0), 0),
+ Milliseconds(600));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(501, 0), 0),
+ Milliseconds(500));
+ heartbeatFromMember(HostAndPort("h4"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(501, 0), 0),
+ Milliseconds(400));
+ heartbeatFromMember(HostAndPort("h5"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(501, 0), 0),
+ Milliseconds(300));
+ heartbeatFromMember(HostAndPort("h6"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(499, 0), 0),
+ Milliseconds(200));
+ heartbeatFromMember(HostAndPort("hprimary"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ OpTime(Timestamp(600, 0), 0),
+ Milliseconds(100));
- virtual void setUp() {
- HeartbeatResponseTest::setUp();
- // set verbosity as high as the highest verbosity log message we'd like to check for
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- }
+ // Should choose primary first; it's closest
+ getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
+ ASSERT_EQUALS(HostAndPort("hprimary"), getTopoCoord().getSyncSourceAddress());
- virtual void tearDown() {
- HeartbeatResponseTest::tearDown();
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Log());
- }
+ // Primary goes far far away
+ heartbeatFromMember(HostAndPort("hprimary"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ OpTime(Timestamp(600, 0), 0),
+ Milliseconds(100000000));
+
+ // Should choose h4. (if an arbiter has an oplog, it's a valid sync source)
+ // h6 is not considered because it is outside the maxSyncLagSeconds window,
+ getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
+ ASSERT_EQUALS(HostAndPort("h4"), getTopoCoord().getSyncSourceAddress());
+
+ // h4 goes down; should choose h1
+ receiveDownHeartbeat(HostAndPort("h4"), "rs0", OpTime());
+ getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
+ ASSERT_EQUALS(HostAndPort("h1"), getTopoCoord().getSyncSourceAddress());
+
+ // Primary and h1 go down; should choose h6
+ receiveDownHeartbeat(HostAndPort("h1"), "rs0", OpTime());
+ receiveDownHeartbeat(HostAndPort("hprimary"), "rs0", OpTime());
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
+ ASSERT_EQUALS(HostAndPort("h6"), getTopoCoord().getSyncSourceAddress());
+
+ // h6 goes down; should choose h5
+ receiveDownHeartbeat(HostAndPort("h6"), "rs0", OpTime());
+ getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
+ ASSERT_EQUALS(HostAndPort("h5"), getTopoCoord().getSyncSourceAddress());
+
+ // h5 goes down; should choose h3
+ receiveDownHeartbeat(HostAndPort("h5"), "rs0", OpTime());
+ getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
+ ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
+
+ // h3 goes down; no sync source candidates remain
+ receiveDownHeartbeat(HostAndPort("h3"), "rs0", OpTime());
+ getTopoCoord().chooseNewSyncSource(now()++, lastOpTimeWeApplied);
+ ASSERT(getTopoCoord().getSyncSourceAddress().empty());
+}
+
+
+TEST_F(TopoCoordTest, ChooseSyncSourceChainingNotAllowed) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(1, 0), 0),
+ Milliseconds(100));
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(1, 0), 0),
+ Milliseconds(100));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(0, 0), 0),
+ Milliseconds(300));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(0, 0), 0),
+ Milliseconds(300));
- };
+ // No primary situation: should choose no sync source.
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT(getTopoCoord().getSyncSourceAddress().empty());
- TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataNodeBelivesWeAreDown) {
- OpTime lastOpTimeApplied = OpTime(Timestamp(3,0), 0);
+ // Add primary
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ OpTime(Timestamp(0, 0), 0),
+ Milliseconds(300));
+ ASSERT_EQUALS(2, getCurrentPrimaryIndex());
+
+ // h3 is primary and should be chosen as sync source, despite being further away than h2
+ // and the primary (h3) being behind our most recently applied optime
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime(Timestamp(10, 0), 0));
+ ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
+}
+
+TEST_F(TopoCoordTest, EmptySyncSourceOnPrimary) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(1, 0), 0),
+ Milliseconds(100));
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(1, 0), 0),
+ Milliseconds(100));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(0, 0), 0),
+ Milliseconds(300));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(0, 0), 0),
+ Milliseconds(300));
+
+ // No primary situation: should choose h2 sync source.
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
+
+ // Become primary
+ makeSelfPrimary(Timestamp(3.0));
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+
+ // Check sync source
+ ASSERT_EQUALS(HostAndPort(), getTopoCoord().getSyncSourceAddress());
+}
+
+TEST_F(TopoCoordTest, ForceSyncSource) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ // two rounds of heartbeat pings from each member
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(1, 0), 0),
+ Milliseconds(300));
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(1, 0), 0),
+ Milliseconds(300));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(2, 0), 0),
+ Milliseconds(100));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(2, 0), 0),
+ Milliseconds(100));
+
+ // force should overrule other defaults
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
+ getTopoCoord().setForceSyncSourceIndex(1);
+ // force should cause shouldChangeSyncSource() to return true
+ // even if the currentSource is the force target
+ ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("h2"), now()));
+ ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("h3"), now()));
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
+
+ // force should only work for one call to chooseNewSyncSource
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
+}
+
+TEST_F(TopoCoordTest, BlacklistSyncSource) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(1, 0), 0),
+ Milliseconds(300));
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(1, 0), 0),
+ Milliseconds(300));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(2, 0), 0),
+ Milliseconds(100));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(2, 0), 0),
+ Milliseconds(100));
+
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
+
+ Date_t expireTime = Date_t::fromMillisSinceEpoch(1000);
+ getTopoCoord().blacklistSyncSource(HostAndPort("h3"), expireTime);
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ // Should choose second best choice now that h3 is blacklisted.
+ ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
+
+ // After time has passed, should go back to original sync source
+ getTopoCoord().chooseNewSyncSource(expireTime, OpTime());
+ ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().getSyncSourceAddress());
+}
+
+TEST_F(TopoCoordTest, BlacklistSyncSourceNoChaining) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ OpTime(Timestamp(2, 0), 0),
+ Milliseconds(100));
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ OpTime(Timestamp(2, 0), 0),
+ Milliseconds(100));
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
- // request heartbeat
- std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
- getTopoCoord().prepareHeartbeatRequest(now()++, "rs0", HostAndPort("host2"));
-
- ReplSetHeartbeatResponse believesWeAreDownResponse;
- believesWeAreDownResponse.noteReplSet();
- believesWeAreDownResponse.setSetName("rs0");
- believesWeAreDownResponse.setState(MemberState::RS_SECONDARY);
- believesWeAreDownResponse.setElectable(true);
- believesWeAreDownResponse.noteStateDisagreement();
- startCapturingLogMessages();
- HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
- now()++, // Time is left.
- Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
- HostAndPort("host2"),
- StatusWith<ReplSetHeartbeatResponse>(believesWeAreDownResponse),
- lastOpTimeApplied);
- stopCapturingLogMessages();
- ASSERT_NO_ACTION(action.getAction());
- ASSERT_EQUALS(1, countLogLinesContaining("host2:27017 thinks that we are down"));
-
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(2, 0), 0),
+ Milliseconds(100));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(2, 0), 0),
+ Milliseconds(100));
+
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
+
+ Date_t expireTime = Date_t::fromMillisSinceEpoch(1000);
+ getTopoCoord().blacklistSyncSource(HostAndPort("h2"), expireTime);
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ // Can't choose any sync source now.
+ ASSERT(getTopoCoord().getSyncSourceAddress().empty());
+
+ // After time has passed, should go back to the primary
+ getTopoCoord().chooseNewSyncSource(expireTime, OpTime());
+ ASSERT_EQUALS(HostAndPort("h2"), getTopoCoord().getSyncSourceAddress());
+}
+
+TEST_F(TopoCoordTest, OnlyUnauthorizedUpCausesRecovering) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ // Generate enough heartbeats to select a sync source below
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(1, 0), 0),
+ Milliseconds(300));
+ heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(1, 0), 0),
+ Milliseconds(300));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(2, 0), 0),
+ Milliseconds(100));
+ heartbeatFromMember(HostAndPort("h3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(2, 0), 0),
+ Milliseconds(100));
+
+ ASSERT_EQUALS(HostAndPort("h3"), getTopoCoord().chooseNewSyncSource(now()++, OpTime()));
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
+ // Good state setup done
+
+ // Mark nodes down, ensure that we have no source and are secondary
+ receiveDownHeartbeat(HostAndPort("h2"), "rs0", OpTime(), ErrorCodes::NetworkTimeout);
+ receiveDownHeartbeat(HostAndPort("h3"), "rs0", OpTime(), ErrorCodes::NetworkTimeout);
+ ASSERT_TRUE(getTopoCoord().chooseNewSyncSource(now()++, OpTime()).empty());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
+
+ // Mark nodes down + unauth, ensure that we have no source and are secondary
+ receiveDownHeartbeat(HostAndPort("h2"), "rs0", OpTime(), ErrorCodes::NetworkTimeout);
+ receiveDownHeartbeat(HostAndPort("h3"), "rs0", OpTime(), ErrorCodes::Unauthorized);
+ ASSERT_TRUE(getTopoCoord().chooseNewSyncSource(now()++, OpTime()).empty());
+ ASSERT_EQUALS(MemberState::RS_RECOVERING, getTopoCoord().getMemberState().s);
+
+ // Having an auth error but with another node up should bring us out of RECOVERING
+ HeartbeatResponseAction action = receiveUpHeartbeat(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(),
+ OpTime(Timestamp(2, 0), 0),
+ OpTime(Timestamp(2, 0), 0));
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
+ // Test that the heartbeat that brings us from RECOVERING to SECONDARY doesn't initiate
+ // an election (SERVER-17164)
+ ASSERT_NO_ACTION(action.getAction());
+}
+
+TEST_F(TopoCoordTest, ReceiveHeartbeatWhileAbsentFromConfig) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "h1")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ -1);
+ ASSERT_NO_ACTION(heartbeatFromMember(HostAndPort("h2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ OpTime(Timestamp(1, 0), 0),
+ Milliseconds(300)).getAction());
+}
+
+TEST_F(TopoCoordTest, PrepareSyncFromResponse) {
+ OpTime staleOpTime(Timestamp(1, 1), 0);
+ OpTime ourOpTime(Timestamp(staleOpTime.getSecs() + 11, 1), 0);
+
+ Status result = Status::OK();
+ BSONObjBuilder response;
+
+ // if we do not have an index in the config, we should get ErrorCodes::NotSecondary
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("h1"), ourOpTime, &response, &result);
+ ASSERT_EQUALS(ErrorCodes::NotSecondary, result);
+ ASSERT_EQUALS("Removed and uninitialized nodes do not sync", result.reason());
+
+ // Test trying to sync from another node when we are an arbiter
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "hself"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 1 << "host"
+ << "h1"))),
+ 0);
+
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("h1"), ourOpTime, &response, &result);
+ ASSERT_EQUALS(ErrorCodes::NotSecondary, result);
+ ASSERT_EQUALS("arbiters don't sync", result.reason());
+
+ // Set up config for the rest of the tests
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "hself")
+ << BSON("_id" << 1 << "host"
+ << "h1"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "priority" << 0 << "buildIndexes" << false)
+ << BSON("_id" << 3 << "host"
+ << "h3") << BSON("_id" << 4 << "host"
+ << "h4")
+ << BSON("_id" << 5 << "host"
+ << "h5") << BSON("_id" << 6 << "host"
+ << "h6"))),
+ 0);
+
+ // Try to sync while PRIMARY
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ makeSelfPrimary();
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+ getTopoCoord()._setCurrentPrimaryForTest(0);
+ BSONObjBuilder response1;
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("h3"), ourOpTime, &response1, &result);
+ ASSERT_EQUALS(ErrorCodes::NotSecondary, result);
+ ASSERT_EQUALS("primaries don't sync", result.reason());
+ ASSERT_EQUALS("h3:27017", response1.obj()["syncFromRequested"].String());
+
+ // Try to sync from non-existent member
+ setSelfMemberState(MemberState::RS_SECONDARY);
+ getTopoCoord()._setCurrentPrimaryForTest(-1);
+ BSONObjBuilder response2;
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("fakemember"), ourOpTime, &response2, &result);
+ ASSERT_EQUALS(ErrorCodes::NodeNotFound, result);
+ ASSERT_EQUALS("Could not find member \"fakemember:27017\" in replica set", result.reason());
+
+ // Try to sync from self
+ BSONObjBuilder response3;
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("hself"), ourOpTime, &response3, &result);
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, result);
+ ASSERT_EQUALS("I cannot sync from myself", result.reason());
+
+ // Try to sync from an arbiter
+ BSONObjBuilder response4;
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("h1"), ourOpTime, &response4, &result);
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, result);
+ ASSERT_EQUALS("Cannot sync from \"h1:27017\" because it is an arbiter", result.reason());
+
+ // Try to sync from a node that doesn't build indexes
+ BSONObjBuilder response5;
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("h2"), ourOpTime, &response5, &result);
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, result);
+ ASSERT_EQUALS("Cannot sync from \"h2:27017\" because it does not build indexes",
+ result.reason());
+
+ // Try to sync from a member that is down
+ receiveDownHeartbeat(HostAndPort("h4"), "rs0", OpTime());
+
+ BSONObjBuilder response7;
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("h4"), ourOpTime, &response7, &result);
+ ASSERT_EQUALS(ErrorCodes::HostUnreachable, result);
+ ASSERT_EQUALS("I cannot reach the requested member: h4:27017", result.reason());
+
+ // Sync successfully from a member that is stale
+ heartbeatFromMember(
+ HostAndPort("h5"), "rs0", MemberState::RS_SECONDARY, staleOpTime, Milliseconds(100));
+
+ BSONObjBuilder response8;
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("h5"), ourOpTime, &response8, &result);
+ ASSERT_OK(result);
+ ASSERT_EQUALS("requested member \"h5:27017\" is more than 10 seconds behind us",
+ response8.obj()["warning"].String());
+ getTopoCoord().chooseNewSyncSource(now()++, ourOpTime);
+ ASSERT_EQUALS(HostAndPort("h5"), getTopoCoord().getSyncSourceAddress());
+
+ // Sync successfully from an up-to-date member
+ heartbeatFromMember(
+ HostAndPort("h6"), "rs0", MemberState::RS_SECONDARY, ourOpTime, Milliseconds(100));
+
+ BSONObjBuilder response9;
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("h6"), ourOpTime, &response9, &result);
+ ASSERT_OK(result);
+ BSONObj response9Obj = response9.obj();
+ ASSERT_FALSE(response9Obj.hasField("warning"));
+ ASSERT_EQUALS(HostAndPort("h5").toString(), response9Obj["prevSyncTarget"].String());
+ getTopoCoord().chooseNewSyncSource(now()++, ourOpTime);
+ ASSERT_EQUALS(HostAndPort("h6"), getTopoCoord().getSyncSourceAddress());
+
+ // node goes down between forceSync and chooseNewSyncSource
+ BSONObjBuilder response10;
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("h6"), ourOpTime, &response10, &result);
+ BSONObj response10Obj = response10.obj();
+ ASSERT_FALSE(response10Obj.hasField("warning"));
+ ASSERT_EQUALS(HostAndPort("h6").toString(), response10Obj["prevSyncTarget"].String());
+ receiveDownHeartbeat(HostAndPort("h6"), "rs0", OpTime());
+ HostAndPort syncSource = getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h6"), syncSource);
+
+ // Try to sync from a member that is unauth'd
+ receiveDownHeartbeat(HostAndPort("h5"), "rs0", OpTime(), ErrorCodes::Unauthorized);
+
+ BSONObjBuilder response11;
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("h5"), ourOpTime, &response11, &result);
+ ASSERT_NOT_OK(result);
+ ASSERT_EQUALS(ErrorCodes::Unauthorized, result.code());
+ ASSERT_EQUALS("not authorized to communicate with h5:27017", result.reason());
+
+ // Sync successfully from an up-to-date member.
+ heartbeatFromMember(
+ HostAndPort("h6"), "rs0", MemberState::RS_SECONDARY, ourOpTime, Milliseconds(100));
+ BSONObjBuilder response12;
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("h6"), ourOpTime, &response12, &result);
+ ASSERT_OK(result);
+ syncSource = getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+ ASSERT_EQUALS(HostAndPort("h6"), syncSource);
+}
+
+TEST_F(TopoCoordTest, ReplSetGetStatus) {
+ // This test starts by configuring a TopologyCoordinator as a member of a 4 node replica
+ // set, with each node in a different state.
+ // The first node is DOWN, as if we tried heartbeating them and it failed in some way.
+ // The second node is in state SECONDARY, as if we've received a valid heartbeat from them.
+ // The third node is in state UNKNOWN, as if we've not yet had any heartbeating activity
+ // with them yet. The fourth node is PRIMARY and corresponds to ourself, which gets its
+ // information for replSetGetStatus from a different source than the nodes that aren't
+ // ourself. After this setup, we call prepareStatusResponse and make sure that the fields
+ // returned for each member match our expectations.
+ Date_t startupTime = Date_t::fromMillisSinceEpoch(100);
+ Date_t heartbeatTime = Date_t::fromMillisSinceEpoch(5000);
+ Seconds uptimeSecs(10);
+ Date_t curTime = heartbeatTime + uptimeSecs;
+ Timestamp electionTime(1, 2);
+ OpTime oplogProgress(Timestamp(3, 4), 0);
+ std::string setName = "mySet";
+
+ ReplSetHeartbeatResponse hb;
+ hb.setConfigVersion(1);
+ hb.setState(MemberState::RS_SECONDARY);
+ hb.setElectionTime(electionTime);
+ hb.setHbMsg("READY");
+ hb.setOpTime(oplogProgress);
+ StatusWith<ReplSetHeartbeatResponse> hbResponseGood = StatusWith<ReplSetHeartbeatResponse>(hb);
+
+ updateConfig(
+ BSON("_id" << setName << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test0:1234")
+ << BSON("_id" << 1 << "host"
+ << "test1:1234") << BSON("_id" << 2 << "host"
+ << "test2:1234")
+ << BSON("_id" << 3 << "host"
+ << "test3:1234"))),
+ 3,
+ startupTime + Milliseconds(1));
+
+ // Now that the replica set is setup, put the members into the states we want them in.
+ HostAndPort member = HostAndPort("test0:1234");
+ getTopoCoord().prepareHeartbeatRequest(startupTime + Milliseconds(1), setName, member);
+ getTopoCoord().processHeartbeatResponse(
+ startupTime + Milliseconds(2), Milliseconds(1), member, hbResponseGood, OpTime());
+ getTopoCoord().prepareHeartbeatRequest(startupTime + Milliseconds(3), setName, member);
+ Date_t timeoutTime =
+ startupTime + Milliseconds(3) + ReplicaSetConfig::kDefaultHeartbeatTimeoutPeriod;
+
+ StatusWith<ReplSetHeartbeatResponse> hbResponseDown =
+ StatusWith<ReplSetHeartbeatResponse>(Status(ErrorCodes::HostUnreachable, ""));
+
+ getTopoCoord().processHeartbeatResponse(
+ timeoutTime, Milliseconds(5000), member, hbResponseDown, OpTime());
+
+ member = HostAndPort("test1:1234");
+ getTopoCoord().prepareHeartbeatRequest(startupTime + Milliseconds(2), setName, member);
+ getTopoCoord().processHeartbeatResponse(
+ heartbeatTime, Milliseconds(4000), member, hbResponseGood, OpTime());
+ makeSelfPrimary();
+
+ // Now node 0 is down, node 1 is up, and for node 2 we have no heartbeat data yet.
+ BSONObjBuilder statusBuilder;
+ Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
+ getTopoCoord().prepareStatusResponse(
+ cbData(), curTime, uptimeSecs.count(), oplogProgress, &statusBuilder, &resultStatus);
+ ASSERT_OK(resultStatus);
+ BSONObj rsStatus = statusBuilder.obj();
+
+ // Test results for all non-self members
+ ASSERT_EQUALS(setName, rsStatus["set"].String());
+ ASSERT_EQUALS(curTime.asInt64(), rsStatus["date"].Date().asInt64());
+ std::vector<BSONElement> memberArray = rsStatus["members"].Array();
+ ASSERT_EQUALS(4U, memberArray.size());
+ BSONObj member0Status = memberArray[0].Obj();
+ BSONObj member1Status = memberArray[1].Obj();
+ BSONObj member2Status = memberArray[2].Obj();
+
+ // Test member 0, the node that's DOWN
+ ASSERT_EQUALS(0, member0Status["_id"].numberInt());
+ ASSERT_EQUALS("test0:1234", member0Status["name"].str());
+ ASSERT_EQUALS(0, member0Status["health"].numberDouble());
+ ASSERT_EQUALS(MemberState::RS_DOWN, member0Status["state"].numberInt());
+ ASSERT_EQUALS("(not reachable/healthy)", member0Status["stateStr"].str());
+ ASSERT_EQUALS(0, member0Status["uptime"].numberInt());
+ ASSERT_EQUALS(Timestamp(), Timestamp(member0Status["optime"]["ts"].timestampValue()));
+ ASSERT_TRUE(member0Status.hasField("optimeDate"));
+ ASSERT_EQUALS(Date_t::fromMillisSinceEpoch(Timestamp().getSecs() * 1000ULL),
+ member0Status["optimeDate"].Date());
+ ASSERT_EQUALS(timeoutTime, member0Status["lastHeartbeat"].date());
+ ASSERT_EQUALS(Date_t(), member0Status["lastHeartbeatRecv"].date());
+
+ // Test member 1, the node that's SECONDARY
+ ASSERT_EQUALS(1, member1Status["_id"].Int());
+ ASSERT_EQUALS("test1:1234", member1Status["name"].String());
+ ASSERT_EQUALS(1, member1Status["health"].Double());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, member1Status["state"].numberInt());
+ ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
+ member1Status["stateStr"].String());
+ ASSERT_EQUALS(uptimeSecs.count(), member1Status["uptime"].numberInt());
+ ASSERT_EQUALS(oplogProgress.getTimestamp(),
+ Timestamp(member1Status["optime"]["ts"].timestampValue()));
+ ASSERT_TRUE(member1Status.hasField("optimeDate"));
+ ASSERT_EQUALS(Date_t::fromMillisSinceEpoch(oplogProgress.getSecs() * 1000ULL),
+ member1Status["optimeDate"].Date());
+ ASSERT_EQUALS(heartbeatTime, member1Status["lastHeartbeat"].date());
+ ASSERT_EQUALS(Date_t(), member1Status["lastHeartbeatRecv"].date());
+ ASSERT_EQUALS("READY", member1Status["lastHeartbeatMessage"].str());
+
+ // Test member 2, the node that's UNKNOWN
+ ASSERT_EQUALS(2, member2Status["_id"].numberInt());
+ ASSERT_EQUALS("test2:1234", member2Status["name"].str());
+ ASSERT_EQUALS(-1, member2Status["health"].numberDouble());
+ ASSERT_EQUALS(MemberState::RS_UNKNOWN, member2Status["state"].numberInt());
+ ASSERT_EQUALS(MemberState(MemberState::RS_UNKNOWN).toString(), member2Status["stateStr"].str());
+ ASSERT_TRUE(member2Status.hasField("uptime"));
+ ASSERT_TRUE(member2Status.hasField("optime"));
+ ASSERT_TRUE(member2Status.hasField("optimeDate"));
+ ASSERT_FALSE(member2Status.hasField("lastHearbeat"));
+ ASSERT_FALSE(member2Status.hasField("lastHearbeatRecv"));
+
+ // Now test results for ourself, the PRIMARY
+ ASSERT_EQUALS(MemberState::RS_PRIMARY, rsStatus["myState"].numberInt());
+ BSONObj selfStatus = memberArray[3].Obj();
+ ASSERT_TRUE(selfStatus["self"].boolean());
+ ASSERT_EQUALS(3, selfStatus["_id"].numberInt());
+ ASSERT_EQUALS("test3:1234", selfStatus["name"].str());
+ ASSERT_EQUALS(1, selfStatus["health"].numberDouble());
+ ASSERT_EQUALS(MemberState::RS_PRIMARY, selfStatus["state"].numberInt());
+ ASSERT_EQUALS(MemberState(MemberState::RS_PRIMARY).toString(), selfStatus["stateStr"].str());
+ ASSERT_EQUALS(uptimeSecs.count(), selfStatus["uptime"].numberInt());
+ ASSERT_EQUALS(oplogProgress.getTimestamp(),
+ Timestamp(selfStatus["optime"]["ts"].timestampValue()));
+ ASSERT_TRUE(selfStatus.hasField("optimeDate"));
+ ASSERT_EQUALS(Date_t::fromMillisSinceEpoch(oplogProgress.getSecs() * 1000ULL),
+ selfStatus["optimeDate"].Date());
+
+ // TODO(spencer): Test electionTime and pingMs are set properly
+}
+
+TEST_F(TopoCoordTest, ReplSetGetStatusFails) {
+ // This test starts by configuring a TopologyCoordinator to NOT be a member of a 3 node
+ // replica set. Then running prepareStatusResponse should fail.
+ Date_t startupTime = Date_t::fromMillisSinceEpoch(100);
+ Date_t heartbeatTime = Date_t::fromMillisSinceEpoch(5000);
+ Seconds uptimeSecs(10);
+ Date_t curTime = heartbeatTime + uptimeSecs;
+ OpTime oplogProgress(Timestamp(3, 4), 0);
+ std::string setName = "mySet";
+
+ updateConfig(
+ BSON("_id" << setName << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test0:1234")
+ << BSON("_id" << 1 << "host"
+ << "test1:1234") << BSON("_id" << 2 << "host"
+ << "test2:1234"))),
+ -1, // This one is not part of the replica set.
+ startupTime + Milliseconds(1));
+
+ BSONObjBuilder statusBuilder;
+ Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
+ getTopoCoord().prepareStatusResponse(
+ cbData(), curTime, uptimeSecs.count(), oplogProgress, &statusBuilder, &resultStatus);
+ ASSERT_NOT_OK(resultStatus);
+ ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, resultStatus);
+}
+
+TEST_F(TopoCoordTest, PrepareFreshResponse) {
+ ReplicationCoordinator::ReplSetFreshArgs args;
+ OpTime freshestOpTime(Timestamp(15, 10), 0);
+ OpTime ourOpTime(Timestamp(10, 10), 0);
+ OpTime staleOpTime(Timestamp(1, 1), 0);
+ Status internalErrorStatus(ErrorCodes::InternalError, "didn't set status");
+
+ // if we do not have an index in the config, we should get ErrorCodes::ReplicaSetNotFound
+ BSONObjBuilder responseBuilder;
+ Status status = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder, &status);
+ ASSERT_EQUALS(ErrorCodes::ReplicaSetNotFound, status);
+ ASSERT_EQUALS("Cannot participate in elections because not initialized", status.reason());
+ ASSERT_TRUE(responseBuilder.obj().isEmpty());
+
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 10 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself"
+ << "priority" << 10)
+ << BSON("_id" << 20 << "host"
+ << "h1") << BSON("_id" << 30 << "host"
+ << "h2")
+ << BSON("_id" << 40 << "host"
+ << "h3"
+ << "priority" << 10))),
+ 0);
+
+ // Test with incorrect replset name
+ args.setName = "fakeset";
+
+ BSONObjBuilder responseBuilder0;
+ Status status0 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder0, &status0);
+ ASSERT_EQUALS(ErrorCodes::ReplicaSetNotFound, status0);
+ ASSERT_TRUE(responseBuilder0.obj().isEmpty());
+
+ heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
+
+ // Test with old config version
+ args.setName = "rs0";
+ args.cfgver = 5;
+ args.id = 20;
+ args.who = HostAndPort("h1");
+ args.opTime = ourOpTime.getTimestamp();
+
+ BSONObjBuilder responseBuilder1;
+ Status status1 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder1, &status1);
+ ASSERT_OK(status1);
+ BSONObj response1 = responseBuilder1.obj();
+ ASSERT_EQUALS("config version stale", response1["info"].String());
+ ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response1["opTime"].timestampValue()));
+ ASSERT_TRUE(response1["fresher"].Bool());
+ ASSERT_FALSE(response1["veto"].Bool());
+ ASSERT_FALSE(response1.hasField("errmsg"));
+
+ // Test with non-existent node.
+ args.cfgver = 10;
+ args.id = 0;
+ args.who = HostAndPort("fakenode");
+
+ BSONObjBuilder responseBuilder2;
+ Status status2 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder2, &status2);
+ ASSERT_OK(status2);
+ BSONObj response2 = responseBuilder2.obj();
+ ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response2["opTime"].timestampValue()));
+ ASSERT_FALSE(response2["fresher"].Bool());
+ ASSERT_TRUE(response2["veto"].Bool());
+ ASSERT_EQUALS("replSet couldn't find member with id 0", response2["errmsg"].String());
+
+
+ // Test when we are primary.
+ args.id = 20;
+ args.who = HostAndPort("h1");
+
+ makeSelfPrimary();
+
+ BSONObjBuilder responseBuilder3;
+ Status status3 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder3, &status3);
+ ASSERT_OK(status3);
+ BSONObj response3 = responseBuilder3.obj();
+ ASSERT_FALSE(response3.hasField("info"));
+ ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response3["opTime"].timestampValue()));
+ ASSERT_FALSE(response3["fresher"].Bool());
+ ASSERT_TRUE(response3["veto"].Bool());
+ ASSERT_EQUALS("I am already primary, h1:27017 can try again once I've stepped down",
+ response3["errmsg"].String());
+
+
+ // Test when someone else is primary.
+ heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+ getTopoCoord()._setCurrentPrimaryForTest(2);
+
+ BSONObjBuilder responseBuilder4;
+ Status status4 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder4, &status4);
+ ASSERT_OK(status4);
+ BSONObj response4 = responseBuilder4.obj();
+ ASSERT_FALSE(response4.hasField("info"));
+ ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response4["opTime"].timestampValue()));
+ ASSERT_FALSE(response4["fresher"].Bool());
+ ASSERT_TRUE(response4["veto"].Bool());
+ ASSERT_EQUALS(
+ "h1:27017 is trying to elect itself but h2:27017 is already primary and more "
+ "up-to-date",
+ response4["errmsg"].String());
+
+
+ // Test trying to elect a node that is caught up but isn't the highest priority node.
+ heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
+ heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, staleOpTime);
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
+
+ BSONObjBuilder responseBuilder5;
+ Status status5 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder5, &status5);
+ ASSERT_OK(status5);
+ BSONObj response5 = responseBuilder5.obj();
+ ASSERT_FALSE(response5.hasField("info"));
+ ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response5["opTime"].timestampValue()));
+ ASSERT_FALSE(response5["fresher"].Bool());
+ ASSERT_TRUE(response5["veto"].Bool());
+ ASSERT(response5["errmsg"].String().find("h1:27017 has lower priority of 1 than") !=
+ std::string::npos)
+ << response5["errmsg"].String();
+
+ // Test trying to elect a node that isn't electable because its down
+ args.id = 40;
+ args.who = HostAndPort("h3");
+
+ receiveDownHeartbeat(HostAndPort("h3"), "rs0", OpTime());
+
+ BSONObjBuilder responseBuilder6;
+ Status status6 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder6, &status6);
+ ASSERT_OK(status6);
+ BSONObj response6 = responseBuilder6.obj();
+ ASSERT_FALSE(response6.hasField("info"));
+ ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response6["opTime"].timestampValue()));
+ ASSERT_FALSE(response6["fresher"].Bool());
+ ASSERT_TRUE(response6["veto"].Bool());
+ ASSERT_NE(std::string::npos,
+ response6["errmsg"].String().find(
+ "I don't think h3:27017 is electable because the member is not "
+ "currently a secondary"))
+ << response6["errmsg"].String();
+
+ // Test trying to elect a node that isn't electable because it's PRIMARY
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_PRIMARY, ourOpTime);
+ ASSERT_EQUALS(3, getCurrentPrimaryIndex());
+
+ BSONObjBuilder responseBuilder7;
+ Status status7 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder7, &status7);
+ ASSERT_OK(status7);
+ BSONObj response7 = responseBuilder7.obj();
+ ASSERT_FALSE(response7.hasField("info"));
+ ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response7["opTime"].timestampValue()));
+ ASSERT_FALSE(response7["fresher"].Bool());
+ ASSERT_TRUE(response7["veto"].Bool());
+ ASSERT_NE(std::string::npos,
+ response7["errmsg"].String().find(
+ "I don't think h3:27017 is electable because the member is not "
+ "currently a secondary"))
+ << response7["errmsg"].String();
+
+ // Test trying to elect a node that isn't electable because it's STARTUP
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_STARTUP, ourOpTime);
+
+ BSONObjBuilder responseBuilder8;
+ Status status8 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder8, &status8);
+ ASSERT_OK(status8);
+ BSONObj response8 = responseBuilder8.obj();
+ ASSERT_FALSE(response8.hasField("info"));
+ ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response8["opTime"].timestampValue()));
+ ASSERT_FALSE(response8["fresher"].Bool());
+ ASSERT_TRUE(response8["veto"].Bool());
+ ASSERT_NE(std::string::npos,
+ response8["errmsg"].String().find(
+ "I don't think h3:27017 is electable because the member is not "
+ "currently a secondary"))
+ << response8["errmsg"].String();
+
+ // Test trying to elect a node that isn't electable because it's RECOVERING
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_RECOVERING, ourOpTime);
+
+ BSONObjBuilder responseBuilder9;
+ Status status9 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder9, &status9);
+ ASSERT_OK(status9);
+ BSONObj response9 = responseBuilder9.obj();
+ ASSERT_FALSE(response9.hasField("info"));
+ ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response9["opTime"].timestampValue()));
+ ASSERT_FALSE(response9["fresher"].Bool());
+ ASSERT_TRUE(response9["veto"].Bool());
+ ASSERT_NE(std::string::npos,
+ response9["errmsg"].String().find(
+ "I don't think h3:27017 is electable because the member is not "
+ "currently a secondary"))
+ << response9["errmsg"].String();
+
+ // Test trying to elect a node that is fresher but lower priority than the existing primary
+ args.id = 30;
+ args.who = HostAndPort("h2");
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_PRIMARY, ourOpTime);
+ ASSERT_EQUALS(3, getCurrentPrimaryIndex());
+ heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, freshestOpTime);
+
+ BSONObjBuilder responseBuilder10;
+ Status status10 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder10, &status10);
+ ASSERT_OK(status10);
+ BSONObj response10 = responseBuilder10.obj();
+ ASSERT_FALSE(response10.hasField("info"));
+ ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response10["opTime"].timestampValue()));
+ ASSERT_TRUE(response10["fresher"].Bool());
+ ASSERT_TRUE(response10["veto"].Bool());
+ ASSERT_TRUE(response10.hasField("errmsg"));
+
+
+ // Test trying to elect a valid node
+ args.id = 40;
+ args.who = HostAndPort("h3");
+
+ receiveDownHeartbeat(HostAndPort("h2"), "rs0", OpTime());
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
+
+ BSONObjBuilder responseBuilder11;
+ Status status11 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder11, &status11);
+ ASSERT_OK(status11);
+ BSONObj response11 = responseBuilder11.obj();
+ ASSERT_FALSE(response11.hasField("info")) << response11.toString();
+ ASSERT_EQUALS(ourOpTime.getTimestamp(), Timestamp(response11["opTime"].timestampValue()));
+ ASSERT_FALSE(response11["fresher"].Bool()) << response11.toString();
+ ASSERT_FALSE(response11["veto"].Bool()) << response11.toString();
+ ASSERT_FALSE(response11.hasField("errmsg")) << response11.toString();
+
+ // Test with our id
+ args.id = 10;
+ BSONObjBuilder responseBuilder12;
+ Status status12 = internalErrorStatus;
+ getTopoCoord().prepareFreshResponse(args, Date_t(), ourOpTime, &responseBuilder12, &status12);
+ ASSERT_EQUALS(ErrorCodes::BadValue, status12);
+ ASSERT_EQUALS(
+ "Received replSetFresh command from member with the same member ID as ourself: 10",
+ status12.reason());
+ ASSERT_TRUE(responseBuilder12.obj().isEmpty());
+}
+
+class HeartbeatResponseTest : public TopoCoordTest {
+public:
+ virtual void setUp() {
+ TopoCoordTest::setUp();
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 5 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
}
+};
+
+class HeartbeatResponseTestOneRetry : public HeartbeatResponseTest {
+public:
+ virtual void setUp() {
+ HeartbeatResponseTest::setUp();
+
+ // Bring up the node we are heartbeating.
+ _target = HostAndPort("host2", 27017);
+ Date_t _upRequestDate = unittest::assertGet(dateFromISOString("2014-08-29T12:55Z"));
+ std::pair<ReplSetHeartbeatArgs, Milliseconds> uppingRequest =
+ getTopoCoord().prepareHeartbeatRequest(_upRequestDate, "rs0", _target);
+ HeartbeatResponseAction upAction = getTopoCoord().processHeartbeatResponse(
+ _upRequestDate,
+ Milliseconds(0),
+ _target,
+ makeStatusWith<ReplSetHeartbeatResponse>(),
+ OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
+ ASSERT_EQUALS(HeartbeatResponseAction::NoAction, upAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataMemberNotInConfig) {
- OpTime lastOpTimeApplied = OpTime(Timestamp(3,0), 0);
-
- // request heartbeat
- std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
- getTopoCoord().prepareHeartbeatRequest(now()++, "rs0", HostAndPort("host5"));
-
- ReplSetHeartbeatResponse memberMissingResponse;
- memberMissingResponse.noteReplSet();
- memberMissingResponse.setSetName("rs0");
- memberMissingResponse.setState(MemberState::RS_SECONDARY);
- memberMissingResponse.setElectable(true);
- memberMissingResponse.noteStateDisagreement();
- startCapturingLogMessages();
- HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
- now()++, // Time is left.
- Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
- HostAndPort("host5"),
- StatusWith<ReplSetHeartbeatResponse>(memberMissingResponse),
- lastOpTimeApplied);
- stopCapturingLogMessages();
- ASSERT_NO_ACTION(action.getAction());
- ASSERT_EQUALS(1, countLogLinesContaining("Could not find host5:27017 in current config"));
- }
- TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataSameConfig) {
- OpTime lastOpTimeApplied = OpTime(Timestamp(3,0), 0);
+ // Time of first request for this heartbeat period
+ _firstRequestDate = unittest::assertGet(dateFromISOString("2014-08-29T13:00Z"));
- // request heartbeat
+ // Initial heartbeat attempt prepared, at t + 0.
std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
- getTopoCoord().prepareHeartbeatRequest(now()++, "rs0", HostAndPort("host2"));
-
- // construct a copy of the original config for log message checking later
- // see HeartbeatResponseTest for the origin of the original config
- ReplicaSetConfig originalConfig;
- originalConfig.initialize(BSON("_id" << "rs0" <<
- "version" << 5 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017")) <<
- "settings" << BSON("heartbeatTimeoutSecs" << 5)));
-
- ReplSetHeartbeatResponse sameConfigResponse;
- sameConfigResponse.noteReplSet();
- sameConfigResponse.setSetName("rs0");
- sameConfigResponse.setState(MemberState::RS_SECONDARY);
- sameConfigResponse.setElectable(true);
- sameConfigResponse.noteStateDisagreement();
- sameConfigResponse.setConfigVersion(2);
- sameConfigResponse.setConfig(originalConfig);
- startCapturingLogMessages();
- HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
- now()++, // Time is left.
- Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
- HostAndPort("host2"),
- StatusWith<ReplSetHeartbeatResponse>(sameConfigResponse),
- lastOpTimeApplied);
- stopCapturingLogMessages();
- ASSERT_NO_ACTION(action.getAction());
- ASSERT_EQUALS(1, countLogLinesContaining("Config from heartbeat response was "
- "same as ours."));
- }
-
- TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataOldConfig) {
- OpTime lastOpTimeApplied = OpTime(Timestamp(3,0), 0);
+ getTopoCoord().prepareHeartbeatRequest(_firstRequestDate, "rs0", _target);
+ // 5 seconds to successfully complete the heartbeat before the timeout expires.
+ ASSERT_EQUALS(5000, request.second.count());
- // request heartbeat
- std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
- getTopoCoord().prepareHeartbeatRequest(now()++, "rs0", HostAndPort("host2"));
-
- ReplSetHeartbeatResponse believesWeAreDownResponse;
- believesWeAreDownResponse.noteReplSet();
- believesWeAreDownResponse.setSetName("rs0");
- believesWeAreDownResponse.setState(MemberState::RS_SECONDARY);
- believesWeAreDownResponse.setElectable(true);
- believesWeAreDownResponse.noteStateDisagreement();
- startCapturingLogMessages();
+ // Initial heartbeat request fails at t + 4000ms
HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
- now()++, // Time is left.
- Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
- HostAndPort("host2"),
- StatusWith<ReplSetHeartbeatResponse>(believesWeAreDownResponse),
- lastOpTimeApplied);
- stopCapturingLogMessages();
- ASSERT_NO_ACTION(action.getAction());
- ASSERT_EQUALS(1, countLogLinesContaining("host2:27017 thinks that we are down"));
-
- }
+ _firstRequestDate + Seconds(4), // 4 seconds elapsed, retry allowed.
+ Milliseconds(3990), // Spent 3.99 of the 4 seconds in the network.
+ _target,
+ StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::ExceededTimeLimit, "Took too long"),
+ OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
- TEST_F(HeartbeatResponseTestOneRetry, DecideToReconfig) {
- // Confirm that action responses can come back from retries; in this, expect a Reconfig
- // action.
- ReplicaSetConfig newConfig;
- ASSERT_OK(newConfig.initialize(
- BSON("_id" << "rs0" <<
- "version" << 7 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017") <<
- BSON("_id" << 3 << "host" << "host4:27017")) <<
- "settings" << BSON("heartbeatTimeoutSecs" << 5))));
- ASSERT_OK(newConfig.validate());
-
- ReplSetHeartbeatResponse reconfigResponse;
- reconfigResponse.noteReplSet();
- reconfigResponse.setSetName("rs0");
- reconfigResponse.setState(MemberState::RS_SECONDARY);
- reconfigResponse.setElectable(true);
- reconfigResponse.setConfigVersion(7);
- reconfigResponse.setConfig(newConfig);
- HeartbeatResponseAction action =
- getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4500), // Time is left.
- Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
- target(),
- StatusWith<ReplSetHeartbeatResponse>(reconfigResponse),
- OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
- ASSERT_EQUALS(HeartbeatResponseAction::Reconfig, action.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
- }
-
- TEST_F(HeartbeatResponseTestOneRetry, DecideToStepDownRemotePrimary) {
- // Confirm that action responses can come back from retries; in this, expect a
- // StepDownRemotePrimary action.
-
- // make self primary
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- makeSelfPrimary(Timestamp(5,0));
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
-
- ReplSetHeartbeatResponse electedMoreRecentlyResponse;
- electedMoreRecentlyResponse.noteReplSet();
- electedMoreRecentlyResponse.setSetName("rs0");
- electedMoreRecentlyResponse.setState(MemberState::RS_PRIMARY);
- electedMoreRecentlyResponse.setElectable(true);
- electedMoreRecentlyResponse.setElectionTime(Timestamp(3,0));
- electedMoreRecentlyResponse.setConfigVersion(5);
- HeartbeatResponseAction action =
- getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4500), // Time is left.
- Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
- target(),
- StatusWith<ReplSetHeartbeatResponse>(electedMoreRecentlyResponse),
- OpTime()); // We've never applied anything.
- ASSERT_EQUALS(HeartbeatResponseAction::StepDownRemotePrimary, action.getAction());
- ASSERT_EQUALS(1, action.getPrimaryConfigIndex());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
- }
-
- TEST_F(HeartbeatResponseTestOneRetry, DecideToStepDownSelf) {
- // Confirm that action responses can come back from retries; in this, expect a StepDownSelf
- // action.
-
- // acknowledge the other member so that we see a majority
- HeartbeatResponseAction action = receiveDownHeartbeat(HostAndPort("host3"),
- "rs0",
- OpTime(Timestamp(100, 0), 0));
- ASSERT_NO_ACTION(action.getAction());
-
- // make us PRIMARY
- makeSelfPrimary();
-
- ReplSetHeartbeatResponse electedMoreRecentlyResponse;
- electedMoreRecentlyResponse.noteReplSet();
- electedMoreRecentlyResponse.setSetName("rs0");
- electedMoreRecentlyResponse.setState(MemberState::RS_PRIMARY);
- electedMoreRecentlyResponse.setElectable(false);
- electedMoreRecentlyResponse.setElectionTime(Timestamp(10,0));
- electedMoreRecentlyResponse.setConfigVersion(5);
- action =
- getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4500), // Time is left.
- Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
- target(),
- StatusWith<ReplSetHeartbeatResponse>(electedMoreRecentlyResponse),
- OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
- ASSERT_EQUALS(HeartbeatResponseAction::StepDownSelf, action.getAction());
- ASSERT_EQUALS(0, action.getPrimaryConfigIndex());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
- // Doesn't actually do the stepdown until stepDownIfPending is called
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
-
- ASSERT_TRUE(getTopoCoord().stepDownIfPending());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
- }
-
- TEST_F(HeartbeatResponseTestOneRetry, DecideToStartElection) {
- // Confirm that action responses can come back from retries; in this, expect a StartElection
- // action.
-
- // acknowledge the other member so that we see a majority
- OpTime election = OpTime(Timestamp(400,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(300,0), 0);
- HeartbeatResponseAction action = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(action.getAction());
-
- // make sure we are electable
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- ReplSetHeartbeatResponse startElectionResponse;
- startElectionResponse.noteReplSet();
- startElectionResponse.setSetName("rs0");
- startElectionResponse.setState(MemberState::RS_SECONDARY);
- startElectionResponse.setElectable(true);
- startElectionResponse.setConfigVersion(5);
- action =
- getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4500), // Time is left.
- Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
- target(),
- StatusWith<ReplSetHeartbeatResponse>(startElectionResponse),
- election);
- ASSERT_EQUALS(HeartbeatResponseAction::StartElection, action.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
- }
-
- TEST_F(HeartbeatResponseTestTwoRetries, HeartbeatRetriesAtMostTwice) {
- // Confirm that the topology coordinator attempts to retry a failed heartbeat two times
- // after initial failure, assuming that the heartbeat timeout (set to 5 seconds in the
- // fixture) has not expired.
- //
- // Failed heartbeats propose taking no action, other than scheduling the next heartbeat. We
- // can detect a retry vs the next regularly scheduled heartbeat because retries are
- // scheduled immediately, while subsequent heartbeats are scheduled after the hard-coded
- // heartbeat interval of 2 seconds.
-
- // Second retry fails at t + 4800ms
- HeartbeatResponseAction action =
- getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4800), // 4.8 of the 5 seconds elapsed;
- // could still retry.
- Milliseconds(100), // Spent 0.1 of the 0.3 seconds in the network.
- target(),
- StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::NodeNotFound, "Bad DNS?"),
- OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- // Because this is the second retry, rather than retry again, we expect to wait for the
- // heartbeat interval of 2 seconds to elapse.
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(6800), action.getNextHeartbeatStartDate());
+ // Because the heartbeat failed without timing out, we expect to retry immediately.
+ ASSERT_EQUALS(_firstRequestDate + Seconds(4), action.getNextHeartbeatStartDate());
- // Ensure a third failed heartbeat caused the node to be marked down
+ // First heartbeat retry prepared, at t + 4000ms.
+ request = getTopoCoord().prepareHeartbeatRequest(
+ _firstRequestDate + Milliseconds(4000), "rs0", _target);
+ // One second left to complete the heartbeat.
+ ASSERT_EQUALS(1000, request.second.count());
+
+ // Ensure a single failed heartbeat did not cause the node to be marked down
BSONObjBuilder statusBuilder;
- Status resultStatus(ErrorCodes::InternalError,
- "prepareStatusResponse didn't set result");
+ Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
getTopoCoord().prepareStatusResponse(cbData(),
- firstRequestDate() + Milliseconds(4900),
+ _firstRequestDate + Milliseconds(4000),
10,
- OpTime(Timestamp(100,0), 0),
+ OpTime(Timestamp(100, 0), 0),
&statusBuilder,
&resultStatus);
ASSERT_OK(resultStatus);
@@ -1683,209 +1427,53 @@ namespace {
BSONObj member1Status = memberArray[1].Obj();
ASSERT_EQUALS(1, member1Status["_id"].Int());
- ASSERT_EQUALS(0, member1Status["health"].Double());
- }
-
- TEST_F(HeartbeatResponseTestTwoRetries, DecideToStepDownRemotePrimary) {
- // Confirm that action responses can come back from retries; in this, expect a
- // StepDownRemotePrimary action.
-
- // make self primary
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- makeSelfPrimary(Timestamp(5,0));
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
-
- ReplSetHeartbeatResponse electedMoreRecentlyResponse;
- electedMoreRecentlyResponse.noteReplSet();
- electedMoreRecentlyResponse.setSetName("rs0");
- electedMoreRecentlyResponse.setState(MemberState::RS_PRIMARY);
- electedMoreRecentlyResponse.setElectable(true);
- electedMoreRecentlyResponse.setElectionTime(Timestamp(3,0));
- electedMoreRecentlyResponse.setConfigVersion(5);
- HeartbeatResponseAction action =
- getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(5000), // Time is left.
- Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
- target(),
- StatusWith<ReplSetHeartbeatResponse>(electedMoreRecentlyResponse),
- OpTime()); // We've never applied anything.
- ASSERT_EQUALS(HeartbeatResponseAction::StepDownRemotePrimary, action.getAction());
- ASSERT_EQUALS(1, action.getPrimaryConfigIndex());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(7000), action.getNextHeartbeatStartDate());
- }
-
- TEST_F(HeartbeatResponseTestTwoRetries, DecideToStepDownSelf) {
- // Confirm that action responses can come back from retries; in this, expect a StepDownSelf
- // action.
-
- // acknowledge the other member so that we see a majority
- HeartbeatResponseAction action = receiveDownHeartbeat(HostAndPort("host3"),
- "rs0",
- OpTime(Timestamp(100, 0), 0));
- ASSERT_NO_ACTION(action.getAction());
-
- // make us PRIMARY
- makeSelfPrimary();
-
- ReplSetHeartbeatResponse electedMoreRecentlyResponse;
- electedMoreRecentlyResponse.noteReplSet();
- electedMoreRecentlyResponse.setSetName("rs0");
- electedMoreRecentlyResponse.setState(MemberState::RS_PRIMARY);
- electedMoreRecentlyResponse.setElectable(false);
- electedMoreRecentlyResponse.setElectionTime(Timestamp(10,0));
- electedMoreRecentlyResponse.setConfigVersion(5);
- action =
- getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(5000), // Time is left.
- Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
- target(),
- StatusWith<ReplSetHeartbeatResponse>(electedMoreRecentlyResponse),
- OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
- ASSERT_EQUALS(HeartbeatResponseAction::StepDownSelf, action.getAction());
- ASSERT_EQUALS(0, action.getPrimaryConfigIndex());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(7000), action.getNextHeartbeatStartDate());
- // Doesn't actually do the stepdown until stepDownIfPending is called
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
-
- ASSERT_TRUE(getTopoCoord().stepDownIfPending());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
- }
-
- TEST_F(HeartbeatResponseTestTwoRetries, DecideToStartElection) {
- // Confirm that action responses can come back from retries; in this, expect a StartElection
- // action.
-
- // acknowledge the other member so that we see a majority
- OpTime election = OpTime(Timestamp(400,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(300,0), 0);
- HeartbeatResponseAction action = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(action.getAction());
-
- // make sure we are electable
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- ReplSetHeartbeatResponse startElectionResponse;
- startElectionResponse.noteReplSet();
- startElectionResponse.setSetName("rs0");
- startElectionResponse.setState(MemberState::RS_SECONDARY);
- startElectionResponse.setElectable(true);
- startElectionResponse.setConfigVersion(5);
- action =
- getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(5000), // Time is left.
- Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
- target(),
- StatusWith<ReplSetHeartbeatResponse>(startElectionResponse),
- election);
- ASSERT_EQUALS(HeartbeatResponseAction::StartElection, action.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(7000), action.getNextHeartbeatStartDate());
+ ASSERT_EQUALS(1, member1Status["health"].Double());
}
- TEST_F(HeartbeatResponseTest, HeartbeatTimeoutSuppressesFirstRetry) {
- // Confirm that the topology coordinator does not schedule an immediate heartbeat retry if
- // the heartbeat timeout period expired before the initial request completed.
-
- HostAndPort target("host2", 27017);
- Date_t firstRequestDate = unittest::assertGet(dateFromISOString("2014-08-29T13:00Z"));
-
- // Initial heartbeat request prepared, at t + 0.
- std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
- getTopoCoord().prepareHeartbeatRequest(firstRequestDate,
- "rs0",
- target);
- // 5 seconds to successfully complete the heartbeat before the timeout expires.
- ASSERT_EQUALS(5000, request.second.count());
-
- // Initial heartbeat request fails at t + 5000ms
- HeartbeatResponseAction action =
- getTopoCoord().processHeartbeatResponse(
- firstRequestDate + Milliseconds(5000), // Entire heartbeat period elapsed;
- // no retry allowed.
- Milliseconds(4990), // Spent 4.99 of the 4 seconds in the network.
- target,
- StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::ExceededTimeLimit,
- "Took too long"),
- OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
-
- ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- // Because the heartbeat timed out, we'll retry in 2 seconds.
- ASSERT_EQUALS(firstRequestDate + Milliseconds(7000), action.getNextHeartbeatStartDate());
+ Date_t firstRequestDate() {
+ return _firstRequestDate;
}
- TEST_F(HeartbeatResponseTestOneRetry, HeartbeatTimeoutSuppressesSecondRetry) {
- // Confirm that the topology coordinator does not schedule an second heartbeat retry if
- // the heartbeat timeout period expired before the first retry completed.
- HeartbeatResponseAction action =
- getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(5010), // Entire heartbeat period elapsed;
- // no retry allowed.
- Milliseconds(1000), // Spent 1 of the 1.01 seconds in the network.
- target(),
- StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::ExceededTimeLimit,
- "Took too long"),
- OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
-
- ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- // Because the heartbeat timed out, we'll retry in 2 seconds.
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(7010), action.getNextHeartbeatStartDate());
+ HostAndPort target() {
+ return _target;
}
- TEST_F(HeartbeatResponseTestTwoRetries, HeartbeatThreeNonconsecutiveFailures) {
- // Confirm that the topology coordinator does not mark a node down on three
- // nonconsecutive heartbeat failures.
- ReplSetHeartbeatResponse response;
- response.noteReplSet();
- response.setSetName("rs0");
- response.setState(MemberState::RS_SECONDARY);
- response.setElectable(true);
- response.setConfigVersion(5);
-
- // successful response (third response due to the two failures in setUp())
- HeartbeatResponseAction action =
- getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(4500),
- Milliseconds(400),
- target(),
- StatusWith<ReplSetHeartbeatResponse>(response),
- OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
+private:
+ Date_t _firstRequestDate;
+ HostAndPort _target;
+};
+class HeartbeatResponseTestTwoRetries : public HeartbeatResponseTestOneRetry {
+public:
+ virtual void setUp() {
+ HeartbeatResponseTestOneRetry::setUp();
+ // First retry fails at t + 4500ms
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate() + Milliseconds(4500), // 4.5 of the 5 seconds elapsed;
+ // could retry.
+ Milliseconds(400), // Spent 0.4 of the 0.5 seconds in the network.
+ target(),
+ StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::NodeNotFound, "Bad DNS?"),
+ OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- // Because the heartbeat succeeded, we'll retry in 2 seconds.
- ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
-
- // request next heartbeat
- getTopoCoord().prepareHeartbeatRequest(
- firstRequestDate() + Milliseconds(6500), "rs0", target());
- // third failed response
- action = getTopoCoord().processHeartbeatResponse(
- firstRequestDate() + Milliseconds(7100),
- Milliseconds(400),
- target(),
- StatusWith<ReplSetHeartbeatResponse>(Status{ErrorCodes::HostUnreachable, ""}),
- OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
+ // Because the first retry failed without timing out, we expect to retry immediately.
+ ASSERT_EQUALS(firstRequestDate() + Milliseconds(4500), action.getNextHeartbeatStartDate());
- ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ // Second retry prepared at t + 4500ms.
+ std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
+ getTopoCoord().prepareHeartbeatRequest(
+ firstRequestDate() + Milliseconds(4500), "rs0", target());
+ // 500ms left to complete the heartbeat.
+ ASSERT_EQUALS(500, request.second.count());
- // Ensure a third nonconsecutive heartbeat failure did not cause the node to be marked down
+ // Ensure a second failed heartbeat did not cause the node to be marked down
BSONObjBuilder statusBuilder;
- Status resultStatus(ErrorCodes::InternalError,
- "prepareStatusResponse didn't set result");
+ Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
getTopoCoord().prepareStatusResponse(cbData(),
- firstRequestDate() + Milliseconds(7000),
- 600,
- OpTime(Timestamp(100,0), 0),
+ firstRequestDate() + Seconds(4),
+ 10,
+ OpTime(Timestamp(100, 0), 0),
&statusBuilder,
&resultStatus);
ASSERT_OK(resultStatus);
@@ -1895,3057 +1483,3534 @@ namespace {
ASSERT_EQUALS(1, member1Status["_id"].Int());
ASSERT_EQUALS(1, member1Status["health"].Double());
-
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataNewPrimary) {
- OpTime election = OpTime(Timestamp(5,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(3,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataTwoPrimariesNewOneOlder) {
- OpTime election = OpTime(Timestamp(5,0), 0);
- OpTime election2 = OpTime(Timestamp(4,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(3,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
- ASSERT_NO_ACTION(nextAction.getAction());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_PRIMARY,
- election2,
- election,
- lastOpTimeApplied);
- // second primary does not change primary index
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
}
+};
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataTwoPrimariesNewOneNewer) {
- OpTime election = OpTime(Timestamp(4,0), 0);
- OpTime election2 = OpTime(Timestamp(5,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(3,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
- ASSERT_NO_ACTION(nextAction.getAction());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_PRIMARY,
- election2,
- election,
- lastOpTimeApplied);
- // second primary does not change primary index
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataTwoPrimariesIncludingMeNewOneOlder) {
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- makeSelfPrimary(Timestamp(5,0));
-
- OpTime election = OpTime(Timestamp(4,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(3,0), 0);
-
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
- ASSERT_EQUALS(HeartbeatResponseAction::StepDownRemotePrimary, nextAction.getAction());
- ASSERT_EQUALS(1, nextAction.getPrimaryConfigIndex());
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataStepDownPrimaryForHighPriorityFreshNode) {
- // In this test, the Topology coordinator sees a PRIMARY ("host2") and then sees a higher
- // priority and similarly fresh node ("host3"). However, since the coordinator's node
- // (host1) is not the higher priority node, it takes no action.
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 6 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017" << "priority" << 3)) <<
- "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- OpTime election = OpTime();
- OpTime lastOpTimeApplied = OpTime(Timestamp(13,0), 0);
- OpTime slightlyLessFreshLastOpTimeApplied = OpTime(Timestamp(3,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- slightlyLessFreshLastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_EQUALS(HeartbeatResponseAction::NoAction, nextAction.getAction());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataStepDownSelfForHighPriorityFreshNode) {
- // In this test, the Topology coordinator becomes PRIMARY and then sees a higher priority
- // and equally fresh node ("host3"). As a result it responds with a StepDownSelf action.
- //
- // Despite having stepped down, we should remain electable, in order to dissuade lower
- // priority nodes from standing for election.
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 6 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017" << "priority" << 3)) <<
- "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
- OpTime election = OpTime(Timestamp(1000,0), 0);
-
- getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- makeSelfPrimary(election.getTimestamp());
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
-
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- election,
- election);
- ASSERT_EQUALS(HeartbeatResponseAction::StepDownSelf, nextAction.getAction());
- ASSERT_EQUALS(0, nextAction.getPrimaryConfigIndex());
-
- // Process a heartbeat response to confirm that this node, which is no longer primary,
- // still tells other nodes that it is electable. This will stop lower priority nodes
- // from standing for election.
- ReplSetHeartbeatArgs hbArgs;
- hbArgs.setSetName("rs0");
- hbArgs.setProtocolVersion(1);
- hbArgs.setConfigVersion(6);
- hbArgs.setSenderId(1);
- hbArgs.setSenderHost(HostAndPort("host3", 27017));
- ReplSetHeartbeatResponse hbResp;
- ASSERT_OK(getTopoCoord().prepareHeartbeatResponse(now(),
- hbArgs,
- "rs0",
- election,
- &hbResp));
- ASSERT(!hbResp.hasIsElectable() || hbResp.isElectable()) << hbResp.toString();
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataDoNotStepDownSelfForHighPriorityStaleNode) {
- // In this test, the Topology coordinator becomes PRIMARY and then sees a higher priority
- // and stale node ("host3"). As a result it responds with NoAction.
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 6 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017" << "priority" << 3)) <<
- "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
- OpTime election = OpTime(Timestamp(1000,0), 0);
- OpTime staleTime = OpTime();
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- makeSelfPrimary(election.getTimestamp());
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
-
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- staleTime,
- election);
- ASSERT_NO_ACTION(nextAction.getAction());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataDoNotStepDownPrimaryForHighPriorityStaleNode) {
- // In this test, the Topology coordinator sees a PRIMARY ("host2") and then sees a higher
- // priority and stale node ("host3"). As a result it responds with NoAction.
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 6 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017" << "priority" << 3)) <<
- "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- OpTime election = OpTime(Timestamp(1000,0), 0);
- OpTime stale = OpTime();
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- election);
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- stale,
- election);
- ASSERT_NO_ACTION(nextAction.getAction());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataTwoPrimariesIncludingMeNewOneNewer) {
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- makeSelfPrimary(Timestamp(2,0));
-
- OpTime election = OpTime(Timestamp(4,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(3,0), 0);
-
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_EQUALS(HeartbeatResponseAction::StepDownSelf, nextAction.getAction());
- ASSERT_EQUALS(0, nextAction.getPrimaryConfigIndex());
- // Doesn't actually do the stepdown until stepDownIfPending is called
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
-
- ASSERT_TRUE(getTopoCoord().stepDownIfPending());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownNoMajority) {
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- OpTime election = OpTime(Timestamp(400,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(300,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajorityButNoPriority) {
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 5 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017" << "priority" << 0) <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017"))),
- 0);
-
- OpTime election = OpTime(Timestamp(400,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(300,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajorityButIAmStarting) {
- setSelfMemberState(MemberState::RS_STARTUP);
-
- OpTime election = OpTime(Timestamp(400,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(300,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajorityButIAmRecovering) {
- setSelfMemberState(MemberState::RS_RECOVERING);
-
- OpTime election = OpTime(Timestamp(400,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(300,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajorityButIHaveStepdownWait) {
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- OpTime election = OpTime(Timestamp(400,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(300,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- // freeze node to set stepdown wait
- BSONObjBuilder response;
- getTopoCoord().prepareFreezeResponse(now()++, 20, &response);
-
- nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajorityButIAmArbiter) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 5 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017" <<
- "arbiterOnly" << true) <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017"))),
- 0);
-
- OpTime election = OpTime(Timestamp(400,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(300,0), 0);
-
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajority) {
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- OpTime election = OpTime(Timestamp(400,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(399,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_EQUALS(HeartbeatResponseAction::StartElection, nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+class HeartbeatResponseHighVerbosityTest : public HeartbeatResponseTest {
+public:
+ virtual void setUp() {
+ HeartbeatResponseTest::setUp();
+ // set verbosity as high as the highest verbosity log message we'd like to check for
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
}
- TEST_F(HeartbeatResponseTest, ElectionStartElectionWhileCandidate) {
- // In this test, the TopologyCoordinator goes through the steps of a successful election,
- // during which it receives a heartbeat that would normally trigger it to become a candidate
- // and respond with a StartElection HeartbeatResponseAction. However, since it is already in
- // candidate state, it responds with a NoAction HeartbeatResponseAction. Then finishes by
- // being winning the election.
-
- // 1. All nodes heartbeat to indicate that they are up and that "host2" is PRIMARY.
- // 2. "host2" goes down, triggering an election.
- // 3. "host2" comes back, which would normally trigger election, but since the
- // TopologyCoordinator is already in candidate mode, does not.
- // 4. TopologyCoordinator concludes its freshness round successfully and wins the election.
-
- setSelfMemberState(MemberState::RS_SECONDARY);
- now() += Seconds(30); // we need to be more than LastVote::leaseTime from the start of time or
- // else some Date_t math goes horribly awry
-
- OpTime election = OpTime();
- OpTime lastOpTimeApplied = OpTime(Timestamp(130,0), 0);
- OID round = OID::gen();
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- // candidate time!
- nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_EQUALS(HeartbeatResponseAction::StartElection, nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
-
- // see the downed node as SECONDARY and decide to take no action, but are still a candidate
- nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
-
- // normally this would trigger StartElection, but we are already a candidate
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
-
- // now voteForSelf as though we received all our fresh responses
- ASSERT_TRUE(getTopoCoord().voteForMyself(now()++));
-
- // now win election and ensure _electionId and _electionTime are set properly
- getTopoCoord().processWinElection(round, election.getTimestamp());
- ASSERT_EQUALS(round, getTopoCoord().getElectionId());
- ASSERT_EQUALS(election.getTimestamp(), getTopoCoord().getElectionTime());
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+ virtual void tearDown() {
+ HeartbeatResponseTest::tearDown();
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Log());
}
+};
+
+TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataNodeBelivesWeAreDown) {
+ OpTime lastOpTimeApplied = OpTime(Timestamp(3, 0), 0);
+
+ // request heartbeat
+ std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
+ getTopoCoord().prepareHeartbeatRequest(now()++, "rs0", HostAndPort("host2"));
+
+ ReplSetHeartbeatResponse believesWeAreDownResponse;
+ believesWeAreDownResponse.noteReplSet();
+ believesWeAreDownResponse.setSetName("rs0");
+ believesWeAreDownResponse.setState(MemberState::RS_SECONDARY);
+ believesWeAreDownResponse.setElectable(true);
+ believesWeAreDownResponse.noteStateDisagreement();
+ startCapturingLogMessages();
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ now()++, // Time is left.
+ Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
+ HostAndPort("host2"),
+ StatusWith<ReplSetHeartbeatResponse>(believesWeAreDownResponse),
+ lastOpTimeApplied);
+ stopCapturingLogMessages();
+ ASSERT_NO_ACTION(action.getAction());
+ ASSERT_EQUALS(1, countLogLinesContaining("host2:27017 thinks that we are down"));
+}
+
+TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataMemberNotInConfig) {
+ OpTime lastOpTimeApplied = OpTime(Timestamp(3, 0), 0);
+
+ // request heartbeat
+ std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
+ getTopoCoord().prepareHeartbeatRequest(now()++, "rs0", HostAndPort("host5"));
+
+ ReplSetHeartbeatResponse memberMissingResponse;
+ memberMissingResponse.noteReplSet();
+ memberMissingResponse.setSetName("rs0");
+ memberMissingResponse.setState(MemberState::RS_SECONDARY);
+ memberMissingResponse.setElectable(true);
+ memberMissingResponse.noteStateDisagreement();
+ startCapturingLogMessages();
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ now()++, // Time is left.
+ Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
+ HostAndPort("host5"),
+ StatusWith<ReplSetHeartbeatResponse>(memberMissingResponse),
+ lastOpTimeApplied);
+ stopCapturingLogMessages();
+ ASSERT_NO_ACTION(action.getAction());
+ ASSERT_EQUALS(1, countLogLinesContaining("Could not find host5:27017 in current config"));
+}
+
+TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataSameConfig) {
+ OpTime lastOpTimeApplied = OpTime(Timestamp(3, 0), 0);
+
+ // request heartbeat
+ std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
+ getTopoCoord().prepareHeartbeatRequest(now()++, "rs0", HostAndPort("host2"));
+
+ // construct a copy of the original config for log message checking later
+ // see HeartbeatResponseTest for the origin of the original config
+ ReplicaSetConfig originalConfig;
+ originalConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 5 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017")) << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)));
+
+ ReplSetHeartbeatResponse sameConfigResponse;
+ sameConfigResponse.noteReplSet();
+ sameConfigResponse.setSetName("rs0");
+ sameConfigResponse.setState(MemberState::RS_SECONDARY);
+ sameConfigResponse.setElectable(true);
+ sameConfigResponse.noteStateDisagreement();
+ sameConfigResponse.setConfigVersion(2);
+ sameConfigResponse.setConfig(originalConfig);
+ startCapturingLogMessages();
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ now()++, // Time is left.
+ Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
+ HostAndPort("host2"),
+ StatusWith<ReplSetHeartbeatResponse>(sameConfigResponse),
+ lastOpTimeApplied);
+ stopCapturingLogMessages();
+ ASSERT_NO_ACTION(action.getAction());
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ "Config from heartbeat response was "
+ "same as ours."));
+}
+
+TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataOldConfig) {
+ OpTime lastOpTimeApplied = OpTime(Timestamp(3, 0), 0);
+
+ // request heartbeat
+ std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
+ getTopoCoord().prepareHeartbeatRequest(now()++, "rs0", HostAndPort("host2"));
+
+ ReplSetHeartbeatResponse believesWeAreDownResponse;
+ believesWeAreDownResponse.noteReplSet();
+ believesWeAreDownResponse.setSetName("rs0");
+ believesWeAreDownResponse.setState(MemberState::RS_SECONDARY);
+ believesWeAreDownResponse.setElectable(true);
+ believesWeAreDownResponse.noteStateDisagreement();
+ startCapturingLogMessages();
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ now()++, // Time is left.
+ Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
+ HostAndPort("host2"),
+ StatusWith<ReplSetHeartbeatResponse>(believesWeAreDownResponse),
+ lastOpTimeApplied);
+ stopCapturingLogMessages();
+ ASSERT_NO_ACTION(action.getAction());
+ ASSERT_EQUALS(1, countLogLinesContaining("host2:27017 thinks that we are down"));
+}
+
+TEST_F(HeartbeatResponseTestOneRetry, DecideToReconfig) {
+ // Confirm that action responses can come back from retries; in this, expect a Reconfig
+ // action.
+ ReplicaSetConfig newConfig;
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 7 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017")
+ << BSON("_id" << 3 << "host"
+ << "host4:27017")) << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5))));
+ ASSERT_OK(newConfig.validate());
+
+ ReplSetHeartbeatResponse reconfigResponse;
+ reconfigResponse.noteReplSet();
+ reconfigResponse.setSetName("rs0");
+ reconfigResponse.setState(MemberState::RS_SECONDARY);
+ reconfigResponse.setElectable(true);
+ reconfigResponse.setConfigVersion(7);
+ reconfigResponse.setConfig(newConfig);
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate() + Milliseconds(4500), // Time is left.
+ Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
+ target(),
+ StatusWith<ReplSetHeartbeatResponse>(reconfigResponse),
+ OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
+ ASSERT_EQUALS(HeartbeatResponseAction::Reconfig, action.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
+}
+
+TEST_F(HeartbeatResponseTestOneRetry, DecideToStepDownRemotePrimary) {
+ // Confirm that action responses can come back from retries; in this, expect a
+ // StepDownRemotePrimary action.
+
+ // make self primary
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ makeSelfPrimary(Timestamp(5, 0));
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+
+ ReplSetHeartbeatResponse electedMoreRecentlyResponse;
+ electedMoreRecentlyResponse.noteReplSet();
+ electedMoreRecentlyResponse.setSetName("rs0");
+ electedMoreRecentlyResponse.setState(MemberState::RS_PRIMARY);
+ electedMoreRecentlyResponse.setElectable(true);
+ electedMoreRecentlyResponse.setElectionTime(Timestamp(3, 0));
+ electedMoreRecentlyResponse.setConfigVersion(5);
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate() + Milliseconds(4500), // Time is left.
+ Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
+ target(),
+ StatusWith<ReplSetHeartbeatResponse>(electedMoreRecentlyResponse),
+ OpTime()); // We've never applied anything.
+ ASSERT_EQUALS(HeartbeatResponseAction::StepDownRemotePrimary, action.getAction());
+ ASSERT_EQUALS(1, action.getPrimaryConfigIndex());
+ ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
+}
+
+TEST_F(HeartbeatResponseTestOneRetry, DecideToStepDownSelf) {
+ // Confirm that action responses can come back from retries; in this, expect a StepDownSelf
+ // action.
+
+ // acknowledge the other member so that we see a majority
+ HeartbeatResponseAction action =
+ receiveDownHeartbeat(HostAndPort("host3"), "rs0", OpTime(Timestamp(100, 0), 0));
+ ASSERT_NO_ACTION(action.getAction());
+
+ // make us PRIMARY
+ makeSelfPrimary();
+
+ ReplSetHeartbeatResponse electedMoreRecentlyResponse;
+ electedMoreRecentlyResponse.noteReplSet();
+ electedMoreRecentlyResponse.setSetName("rs0");
+ electedMoreRecentlyResponse.setState(MemberState::RS_PRIMARY);
+ electedMoreRecentlyResponse.setElectable(false);
+ electedMoreRecentlyResponse.setElectionTime(Timestamp(10, 0));
+ electedMoreRecentlyResponse.setConfigVersion(5);
+ action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate() + Milliseconds(4500), // Time is left.
+ Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
+ target(),
+ StatusWith<ReplSetHeartbeatResponse>(electedMoreRecentlyResponse),
+ OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
+ ASSERT_EQUALS(HeartbeatResponseAction::StepDownSelf, action.getAction());
+ ASSERT_EQUALS(0, action.getPrimaryConfigIndex());
+ ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
+ // Doesn't actually do the stepdown until stepDownIfPending is called
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+
+ ASSERT_TRUE(getTopoCoord().stepDownIfPending());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+}
+
+TEST_F(HeartbeatResponseTestOneRetry, DecideToStartElection) {
+ // Confirm that action responses can come back from retries; in this, expect a StartElection
+ // action.
+
+ // acknowledge the other member so that we see a majority
+ OpTime election = OpTime(Timestamp(400, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
+ HeartbeatResponseAction action = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(action.getAction());
+
+ // make sure we are electable
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ ReplSetHeartbeatResponse startElectionResponse;
+ startElectionResponse.noteReplSet();
+ startElectionResponse.setSetName("rs0");
+ startElectionResponse.setState(MemberState::RS_SECONDARY);
+ startElectionResponse.setElectable(true);
+ startElectionResponse.setConfigVersion(5);
+ action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate() + Milliseconds(4500), // Time is left.
+ Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
+ target(),
+ StatusWith<ReplSetHeartbeatResponse>(startElectionResponse),
+ election);
+ ASSERT_EQUALS(HeartbeatResponseAction::StartElection, action.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
+}
+
+TEST_F(HeartbeatResponseTestTwoRetries, HeartbeatRetriesAtMostTwice) {
+ // Confirm that the topology coordinator attempts to retry a failed heartbeat two times
+ // after initial failure, assuming that the heartbeat timeout (set to 5 seconds in the
+ // fixture) has not expired.
+ //
+ // Failed heartbeats propose taking no action, other than scheduling the next heartbeat. We
+ // can detect a retry vs the next regularly scheduled heartbeat because retries are
+ // scheduled immediately, while subsequent heartbeats are scheduled after the hard-coded
+ // heartbeat interval of 2 seconds.
+
+ // Second retry fails at t + 4800ms
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate() + Milliseconds(4800), // 4.8 of the 5 seconds elapsed;
+ // could still retry.
+ Milliseconds(100), // Spent 0.1 of the 0.3 seconds in the network.
+ target(),
+ StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::NodeNotFound, "Bad DNS?"),
+ OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
+ ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ // Because this is the second retry, rather than retry again, we expect to wait for the
+ // heartbeat interval of 2 seconds to elapse.
+ ASSERT_EQUALS(firstRequestDate() + Milliseconds(6800), action.getNextHeartbeatStartDate());
+
+ // Ensure a third failed heartbeat caused the node to be marked down
+ BSONObjBuilder statusBuilder;
+ Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
+ getTopoCoord().prepareStatusResponse(cbData(),
+ firstRequestDate() + Milliseconds(4900),
+ 10,
+ OpTime(Timestamp(100, 0), 0),
+ &statusBuilder,
+ &resultStatus);
+ ASSERT_OK(resultStatus);
+ BSONObj rsStatus = statusBuilder.obj();
+ std::vector<BSONElement> memberArray = rsStatus["members"].Array();
+ BSONObj member1Status = memberArray[1].Obj();
+
+ ASSERT_EQUALS(1, member1Status["_id"].Int());
+ ASSERT_EQUALS(0, member1Status["health"].Double());
+}
+
+TEST_F(HeartbeatResponseTestTwoRetries, DecideToStepDownRemotePrimary) {
+ // Confirm that action responses can come back from retries; in this, expect a
+ // StepDownRemotePrimary action.
+
+ // make self primary
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ makeSelfPrimary(Timestamp(5, 0));
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+
+ ReplSetHeartbeatResponse electedMoreRecentlyResponse;
+ electedMoreRecentlyResponse.noteReplSet();
+ electedMoreRecentlyResponse.setSetName("rs0");
+ electedMoreRecentlyResponse.setState(MemberState::RS_PRIMARY);
+ electedMoreRecentlyResponse.setElectable(true);
+ electedMoreRecentlyResponse.setElectionTime(Timestamp(3, 0));
+ electedMoreRecentlyResponse.setConfigVersion(5);
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate() + Milliseconds(5000), // Time is left.
+ Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
+ target(),
+ StatusWith<ReplSetHeartbeatResponse>(electedMoreRecentlyResponse),
+ OpTime()); // We've never applied anything.
+ ASSERT_EQUALS(HeartbeatResponseAction::StepDownRemotePrimary, action.getAction());
+ ASSERT_EQUALS(1, action.getPrimaryConfigIndex());
+ ASSERT_EQUALS(firstRequestDate() + Milliseconds(7000), action.getNextHeartbeatStartDate());
+}
+
+TEST_F(HeartbeatResponseTestTwoRetries, DecideToStepDownSelf) {
+ // Confirm that action responses can come back from retries; in this, expect a StepDownSelf
+ // action.
+
+ // acknowledge the other member so that we see a majority
+ HeartbeatResponseAction action =
+ receiveDownHeartbeat(HostAndPort("host3"), "rs0", OpTime(Timestamp(100, 0), 0));
+ ASSERT_NO_ACTION(action.getAction());
+
+ // make us PRIMARY
+ makeSelfPrimary();
+
+ ReplSetHeartbeatResponse electedMoreRecentlyResponse;
+ electedMoreRecentlyResponse.noteReplSet();
+ electedMoreRecentlyResponse.setSetName("rs0");
+ electedMoreRecentlyResponse.setState(MemberState::RS_PRIMARY);
+ electedMoreRecentlyResponse.setElectable(false);
+ electedMoreRecentlyResponse.setElectionTime(Timestamp(10, 0));
+ electedMoreRecentlyResponse.setConfigVersion(5);
+ action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate() + Milliseconds(5000), // Time is left.
+ Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
+ target(),
+ StatusWith<ReplSetHeartbeatResponse>(electedMoreRecentlyResponse),
+ OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
+ ASSERT_EQUALS(HeartbeatResponseAction::StepDownSelf, action.getAction());
+ ASSERT_EQUALS(0, action.getPrimaryConfigIndex());
+ ASSERT_EQUALS(firstRequestDate() + Milliseconds(7000), action.getNextHeartbeatStartDate());
+ // Doesn't actually do the stepdown until stepDownIfPending is called
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+
+ ASSERT_TRUE(getTopoCoord().stepDownIfPending());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+}
+
+TEST_F(HeartbeatResponseTestTwoRetries, DecideToStartElection) {
+ // Confirm that action responses can come back from retries; in this, expect a StartElection
+ // action.
+
+ // acknowledge the other member so that we see a majority
+ OpTime election = OpTime(Timestamp(400, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
+ HeartbeatResponseAction action = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(action.getAction());
+
+ // make sure we are electable
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ ReplSetHeartbeatResponse startElectionResponse;
+ startElectionResponse.noteReplSet();
+ startElectionResponse.setSetName("rs0");
+ startElectionResponse.setState(MemberState::RS_SECONDARY);
+ startElectionResponse.setElectable(true);
+ startElectionResponse.setConfigVersion(5);
+ action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate() + Milliseconds(5000), // Time is left.
+ Milliseconds(400), // Spent 0.4 of the 0.5 second in the network.
+ target(),
+ StatusWith<ReplSetHeartbeatResponse>(startElectionResponse),
+ election);
+ ASSERT_EQUALS(HeartbeatResponseAction::StartElection, action.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ ASSERT_EQUALS(firstRequestDate() + Milliseconds(7000), action.getNextHeartbeatStartDate());
+}
+
+TEST_F(HeartbeatResponseTest, HeartbeatTimeoutSuppressesFirstRetry) {
+ // Confirm that the topology coordinator does not schedule an immediate heartbeat retry if
+ // the heartbeat timeout period expired before the initial request completed.
+
+ HostAndPort target("host2", 27017);
+ Date_t firstRequestDate = unittest::assertGet(dateFromISOString("2014-08-29T13:00Z"));
+
+ // Initial heartbeat request prepared, at t + 0.
+ std::pair<ReplSetHeartbeatArgs, Milliseconds> request =
+ getTopoCoord().prepareHeartbeatRequest(firstRequestDate, "rs0", target);
+ // 5 seconds to successfully complete the heartbeat before the timeout expires.
+ ASSERT_EQUALS(5000, request.second.count());
+
+ // Initial heartbeat request fails at t + 5000ms
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate + Milliseconds(5000), // Entire heartbeat period elapsed;
+ // no retry allowed.
+ Milliseconds(4990), // Spent 4.99 of the 4 seconds in the network.
+ target,
+ StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::ExceededTimeLimit, "Took too long"),
+ OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
+
+ ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ // Because the heartbeat timed out, we'll retry in 2 seconds.
+ ASSERT_EQUALS(firstRequestDate + Milliseconds(7000), action.getNextHeartbeatStartDate());
+}
+
+TEST_F(HeartbeatResponseTestOneRetry, HeartbeatTimeoutSuppressesSecondRetry) {
+ // Confirm that the topology coordinator does not schedule an second heartbeat retry if
+ // the heartbeat timeout period expired before the first retry completed.
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate() + Milliseconds(5010), // Entire heartbeat period elapsed;
+ // no retry allowed.
+ Milliseconds(1000), // Spent 1 of the 1.01 seconds in the network.
+ target(),
+ StatusWith<ReplSetHeartbeatResponse>(ErrorCodes::ExceededTimeLimit, "Took too long"),
+ OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
+
+ ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ // Because the heartbeat timed out, we'll retry in 2 seconds.
+ ASSERT_EQUALS(firstRequestDate() + Milliseconds(7010), action.getNextHeartbeatStartDate());
+}
+
+TEST_F(HeartbeatResponseTestTwoRetries, HeartbeatThreeNonconsecutiveFailures) {
+ // Confirm that the topology coordinator does not mark a node down on three
+ // nonconsecutive heartbeat failures.
+ ReplSetHeartbeatResponse response;
+ response.noteReplSet();
+ response.setSetName("rs0");
+ response.setState(MemberState::RS_SECONDARY);
+ response.setElectable(true);
+ response.setConfigVersion(5);
+
+ // successful response (third response due to the two failures in setUp())
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate() + Milliseconds(4500),
+ Milliseconds(400),
+ target(),
+ StatusWith<ReplSetHeartbeatResponse>(response),
+ OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
+
+ ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ // Because the heartbeat succeeded, we'll retry in 2 seconds.
+ ASSERT_EQUALS(firstRequestDate() + Milliseconds(6500), action.getNextHeartbeatStartDate());
+
+ // request next heartbeat
+ getTopoCoord().prepareHeartbeatRequest(
+ firstRequestDate() + Milliseconds(6500), "rs0", target());
+ // third failed response
+ action = getTopoCoord().processHeartbeatResponse(
+ firstRequestDate() + Milliseconds(7100),
+ Milliseconds(400),
+ target(),
+ StatusWith<ReplSetHeartbeatResponse>(Status{ErrorCodes::HostUnreachable, ""}),
+ OpTime(Timestamp(0, 0), 0)); // We've never applied anything.
+
+ ASSERT_EQUALS(HeartbeatResponseAction::NoAction, action.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+
+ // Ensure a third nonconsecutive heartbeat failure did not cause the node to be marked down
+ BSONObjBuilder statusBuilder;
+ Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
+ getTopoCoord().prepareStatusResponse(cbData(),
+ firstRequestDate() + Milliseconds(7000),
+ 600,
+ OpTime(Timestamp(100, 0), 0),
+ &statusBuilder,
+ &resultStatus);
+ ASSERT_OK(resultStatus);
+ BSONObj rsStatus = statusBuilder.obj();
+ std::vector<BSONElement> memberArray = rsStatus["members"].Array();
+ BSONObj member1Status = memberArray[1].Obj();
+
+ ASSERT_EQUALS(1, member1Status["_id"].Int());
+ ASSERT_EQUALS(1, member1Status["health"].Double());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataNewPrimary) {
+ OpTime election = OpTime(Timestamp(5, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(3, 0), 0);
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataTwoPrimariesNewOneOlder) {
+ OpTime election = OpTime(Timestamp(5, 0), 0);
+ OpTime election2 = OpTime(Timestamp(4, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(3, 0), 0);
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election2,
+ election,
+ lastOpTimeApplied);
+ // second primary does not change primary index
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataTwoPrimariesNewOneNewer) {
+ OpTime election = OpTime(Timestamp(4, 0), 0);
+ OpTime election2 = OpTime(Timestamp(5, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(3, 0), 0);
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election2,
+ election,
+ lastOpTimeApplied);
+ // second primary does not change primary index
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataTwoPrimariesIncludingMeNewOneOlder) {
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ makeSelfPrimary(Timestamp(5, 0));
+
+ OpTime election = OpTime(Timestamp(4, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(3, 0), 0);
+
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+ ASSERT_EQUALS(HeartbeatResponseAction::StepDownRemotePrimary, nextAction.getAction());
+ ASSERT_EQUALS(1, nextAction.getPrimaryConfigIndex());
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataStepDownPrimaryForHighPriorityFreshNode) {
+ // In this test, the Topology coordinator sees a PRIMARY ("host2") and then sees a higher
+ // priority and similarly fresh node ("host3"). However, since the coordinator's node
+ // (host1) is not the higher priority node, it takes no action.
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 6 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority" << 3))
+ << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ OpTime election = OpTime();
+ OpTime lastOpTimeApplied = OpTime(Timestamp(13, 0), 0);
+ OpTime slightlyLessFreshLastOpTimeApplied = OpTime(Timestamp(3, 0), 0);
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ slightlyLessFreshLastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_EQUALS(HeartbeatResponseAction::NoAction, nextAction.getAction());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataStepDownSelfForHighPriorityFreshNode) {
+ // In this test, the Topology coordinator becomes PRIMARY and then sees a higher priority
+ // and equally fresh node ("host3"). As a result it responds with a StepDownSelf action.
+ //
+ // Despite having stepped down, we should remain electable, in order to dissuade lower
+ // priority nodes from standing for election.
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 6 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority" << 3))
+ << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
+ OpTime election = OpTime(Timestamp(1000, 0), 0);
+
+ getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ makeSelfPrimary(election.getTimestamp());
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(
+ HostAndPort("host3"), "rs0", MemberState::RS_SECONDARY, election, election, election);
+ ASSERT_EQUALS(HeartbeatResponseAction::StepDownSelf, nextAction.getAction());
+ ASSERT_EQUALS(0, nextAction.getPrimaryConfigIndex());
+
+ // Process a heartbeat response to confirm that this node, which is no longer primary,
+ // still tells other nodes that it is electable. This will stop lower priority nodes
+ // from standing for election.
+ ReplSetHeartbeatArgs hbArgs;
+ hbArgs.setSetName("rs0");
+ hbArgs.setProtocolVersion(1);
+ hbArgs.setConfigVersion(6);
+ hbArgs.setSenderId(1);
+ hbArgs.setSenderHost(HostAndPort("host3", 27017));
+ ReplSetHeartbeatResponse hbResp;
+ ASSERT_OK(getTopoCoord().prepareHeartbeatResponse(now(), hbArgs, "rs0", election, &hbResp));
+ ASSERT(!hbResp.hasIsElectable() || hbResp.isElectable()) << hbResp.toString();
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataDoNotStepDownSelfForHighPriorityStaleNode) {
+ // In this test, the Topology coordinator becomes PRIMARY and then sees a higher priority
+ // and stale node ("host3"). As a result it responds with NoAction.
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 6 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority" << 3))
+ << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
+ OpTime election = OpTime(Timestamp(1000, 0), 0);
+ OpTime staleTime = OpTime();
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ makeSelfPrimary(election.getTimestamp());
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(
+ HostAndPort("host3"), "rs0", MemberState::RS_SECONDARY, election, staleTime, election);
+ ASSERT_NO_ACTION(nextAction.getAction());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataDoNotStepDownPrimaryForHighPriorityStaleNode) {
+ // In this test, the Topology coordinator sees a PRIMARY ("host2") and then sees a higher
+ // priority and stale node ("host3"). As a result it responds with NoAction.
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 6 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority" << 3))
+ << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ OpTime election = OpTime(Timestamp(1000, 0), 0);
+ OpTime stale = OpTime();
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(
+ HostAndPort("host2"), "rs0", MemberState::RS_PRIMARY, election, election, election);
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ nextAction = receiveUpHeartbeat(
+ HostAndPort("host3"), "rs0", MemberState::RS_SECONDARY, election, stale, election);
+ ASSERT_NO_ACTION(nextAction.getAction());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataTwoPrimariesIncludingMeNewOneNewer) {
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ makeSelfPrimary(Timestamp(2, 0));
+
+ OpTime election = OpTime(Timestamp(4, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(3, 0), 0);
+
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_EQUALS(HeartbeatResponseAction::StepDownSelf, nextAction.getAction());
+ ASSERT_EQUALS(0, nextAction.getPrimaryConfigIndex());
+ // Doesn't actually do the stepdown until stepDownIfPending is called
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
- TEST_F(HeartbeatResponseTest, ElectionVoteForAnotherNodeBeforeFreshnessReturns) {
- // In this test, the TopologyCoordinator goes through the steps of an election. However,
- // before its freshness round ends, it receives a fresh command followed by an elect command
- // from another node, both of which it responds positively to. The TopologyCoordinator's
- // freshness round then concludes successfully, but it fails to vote for itself, since it
- // recently voted for another node.
-
- // 1. All nodes heartbeat to indicate that they are up and that "host2" is PRIMARY.
- // 2. "host2" goes down, triggering an election.
- // 3. "host3" sends a fresh command, which the TopologyCoordinator responds to positively.
- // 4. "host3" sends an elect command, which the TopologyCoordinator responds to positively.
- // 5. The TopologyCoordinator's concludes its freshness round successfully.
- // 6. The TopologyCoordinator loses the election.
+ ASSERT_TRUE(getTopoCoord().stepDownIfPending());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+}
- setSelfMemberState(MemberState::RS_SECONDARY);
- now() += Seconds(30); // we need to be more than LastVote::leaseTime from the start of time or
- // else some Date_t math goes horribly awry
-
- OpTime election = OpTime();
- OpTime lastOpTimeApplied = OpTime(Timestamp(100,0), 0);
- OpTime fresherOpApplied = OpTime(Timestamp(200,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- // candidate time!
- nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_EQUALS(HeartbeatResponseAction::StartElection, nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
-
- Timestamp originalElectionTime = getTopoCoord().getElectionTime();
- OID originalElectionId = getTopoCoord().getElectionId();
- // prepare an incoming fresh command
- ReplicationCoordinator::ReplSetFreshArgs freshArgs;
- freshArgs.setName = "rs0";
- freshArgs.cfgver = 5;
- freshArgs.id = 2;
- freshArgs.who = HostAndPort("host3");
- freshArgs.opTime = fresherOpApplied.getTimestamp();
-
- BSONObjBuilder freshResponseBuilder;
- Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- getTopoCoord().prepareFreshResponse(
- freshArgs, now()++, lastOpTimeApplied, &freshResponseBuilder, &result);
- BSONObj response = freshResponseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(lastOpTimeApplied.getTimestamp(), Timestamp(response["opTime"].timestampValue()));
- ASSERT_FALSE(response["fresher"].trueValue());
- ASSERT_FALSE(response["veto"].trueValue());
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- // make sure incoming fresh commands do not change electionTime and electionId
- ASSERT_EQUALS(originalElectionTime, getTopoCoord().getElectionTime());
- ASSERT_EQUALS(originalElectionId, getTopoCoord().getElectionId());
-
- // an elect command comes in
- ReplicationCoordinator::ReplSetElectArgs electArgs;
- OID round = OID::gen();
- electArgs.set = "rs0";
- electArgs.round = round;
- electArgs.cfgver = 5;
- electArgs.whoid = 2;
-
- BSONObjBuilder electResponseBuilder;
- result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(
- electArgs, now()++, OpTime(), &electResponseBuilder, &result);
- stopCapturingLogMessages();
- response = electResponseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(1, response["vote"].Int());
- ASSERT_EQUALS(round, response["round"].OID());
- ASSERT_EQUALS(1, countLogLinesContaining("voting yea for host3:27017 (2)"));
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- // make sure incoming elect commands do not change electionTime and electionId
- ASSERT_EQUALS(originalElectionTime, getTopoCoord().getElectionTime());
- ASSERT_EQUALS(originalElectionId, getTopoCoord().getElectionId());
-
- // now voteForSelf as though we received all our fresh responses
- ASSERT_FALSE(getTopoCoord().voteForMyself(now()++));
-
- // receive a heartbeat indicating the other node was elected
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(2, getCurrentPrimaryIndex());
- // make sure seeing a new primary does not change electionTime and electionId
- ASSERT_EQUALS(originalElectionTime, getTopoCoord().getElectionTime());
- ASSERT_EQUALS(originalElectionId, getTopoCoord().getElectionId());
-
- // now lose election and ensure _electionTime and _electionId are 0'd out
- getTopoCoord().processLoseElection();
- ASSERT_EQUALS(OID(), getTopoCoord().getElectionId());
- ASSERT_EQUALS(Timestamp(), getTopoCoord().getElectionTime());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(2, getCurrentPrimaryIndex());
- }
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownNoMajority) {
+ setSelfMemberState(MemberState::RS_SECONDARY);
- TEST_F(HeartbeatResponseTest, ElectionRespondToFreshBeforeOurFreshnessReturns) {
- // In this test, the TopologyCoordinator goes through the steps of an election. However,
- // before its freshness round ends, the TopologyCoordinator receives a fresh command from
- // another node, which it responds positively to. Its freshness then ends successfully and
- // it wins the election. The other node's elect command then comes in and is responded to
- // negatively, maintaining the TopologyCoordinator's PRIMARY state.
+ OpTime election = OpTime(Timestamp(400, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
- // 1. All nodes heartbeat to indicate that they are up and that "host2" is PRIMARY.
- // 2. "host2" goes down, triggering an election.
- // 3. "host3" sends a fresh command, which the TopologyCoordinator responds to positively.
- // 4. The TopologyCoordinator concludes its freshness round successfully and wins
- // the election.
- // 5. "host3" sends an elect command, which the TopologyCoordinator responds to negatively.
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajorityButNoPriority) {
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 5 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority" << 0)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
+
+ OpTime election = OpTime(Timestamp(400, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajorityButIAmStarting) {
+ setSelfMemberState(MemberState::RS_STARTUP);
+
+ OpTime election = OpTime(Timestamp(400, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajorityButIAmRecovering) {
+ setSelfMemberState(MemberState::RS_RECOVERING);
+
+ OpTime election = OpTime(Timestamp(400, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
- setSelfMemberState(MemberState::RS_SECONDARY);
- now() += Seconds(30); // we need to be more than LastVote::leaseTime from the start of time or
- // else some Date_t math goes horribly awry
-
- OpTime election = OpTime();
- OpTime lastOpTimeApplied = OpTime(Timestamp(100,0), 0);
- OpTime fresherLastOpTimeApplied = OpTime(Timestamp(200,0), 0);
- OID round = OID::gen();
- OID remoteRound = OID::gen();
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- // candidate time!
- nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_EQUALS(HeartbeatResponseAction::StartElection, nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
-
- // prepare an incoming fresh command
- ReplicationCoordinator::ReplSetFreshArgs freshArgs;
- freshArgs.setName = "rs0";
- freshArgs.cfgver = 5;
- freshArgs.id = 2;
- freshArgs.who = HostAndPort("host3");
- freshArgs.opTime = fresherLastOpTimeApplied.getTimestamp();
-
- BSONObjBuilder freshResponseBuilder;
- Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- getTopoCoord().prepareFreshResponse(
- freshArgs, now()++, lastOpTimeApplied, &freshResponseBuilder, &result);
- BSONObj response = freshResponseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(lastOpTimeApplied.getTimestamp(), Timestamp(response["opTime"].timestampValue()));
- ASSERT_FALSE(response["fresher"].trueValue());
- ASSERT_FALSE(response["veto"].trueValue());
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
-
- // now voteForSelf as though we received all our fresh responses
- ASSERT_TRUE(getTopoCoord().voteForMyself(now()++));
- // now win election and ensure _electionId and _electionTime are set properly
- getTopoCoord().processWinElection(round, election.getTimestamp());
- ASSERT_EQUALS(round, getTopoCoord().getElectionId());
- ASSERT_EQUALS(election.getTimestamp(), getTopoCoord().getElectionTime());
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
-
- // an elect command comes in
- ReplicationCoordinator::ReplSetElectArgs electArgs;
- electArgs.set = "rs0";
- electArgs.round = remoteRound;
- electArgs.cfgver = 5;
- electArgs.whoid = 2;
-
- BSONObjBuilder electResponseBuilder;
- result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(
- electArgs, now()++, OpTime(), &electResponseBuilder, &result);
- stopCapturingLogMessages();
- response = electResponseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(-10000, response["vote"].Int());
- ASSERT_EQUALS(remoteRound, response["round"].OID());
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
- }
+ nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+}
- TEST_F(HeartbeatResponseTest, ElectionCompleteElectionThenReceiveFresh) {
- // In this test, the TopologyCoordinator goes through the steps of an election. After
- // being successfully elected, a fresher node sends a fresh command, which the
- // TopologyCoordinator responds positively to. The fresher node then sends an elect command,
- // which the Topology coordinator negatively to since the TopologyCoordinator just elected
- // itself.
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajorityButIHaveStepdownWait) {
+ setSelfMemberState(MemberState::RS_SECONDARY);
- // 1. All nodes heartbeat to indicate that they are up and that "host2" is PRIMARY.
- // 2. "host2" goes down, triggering an election.
- // 3. The TopologyCoordinator concludes its freshness round successfully and wins
- // the election.
- // 4. "host3" sends a fresh command, which the TopologyCoordinator responds to positively.
- // 5. "host3" sends an elect command, which the TopologyCoordinator responds to negatively.
+ OpTime election = OpTime(Timestamp(400, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
- now() += Seconds(30); // we need to be more than LastVote::leaseTime from the start of time or
- // else some Date_t math goes horribly awry
-
- OpTime election = OpTime();
- OpTime lastOpTimeApplied = OpTime(Timestamp(100,0), 0);
- OpTime fresherLastOpTimeApplied = OpTime(Timestamp(200,0), 0);
- OID round = OID::gen();
- OID remoteRound = OID::gen();
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- // candidate time!
- nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_EQUALS(HeartbeatResponseAction::StartElection, nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
-
- // now voteForSelf as though we received all our fresh responses
- ASSERT_TRUE(getTopoCoord().voteForMyself(now()++));
- // now win election
- getTopoCoord().processWinElection(round, election.getTimestamp());
- ASSERT_EQUALS(0, getTopoCoord().getCurrentPrimaryIndex());
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
-
- // prepare an incoming fresh command
- ReplicationCoordinator::ReplSetFreshArgs freshArgs;
- freshArgs.setName = "rs0";
- freshArgs.cfgver = 5;
- freshArgs.id = 2;
- freshArgs.who = HostAndPort("host3");
- freshArgs.opTime = fresherLastOpTimeApplied.getTimestamp();
-
- BSONObjBuilder freshResponseBuilder;
- Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- getTopoCoord().prepareFreshResponse(
- freshArgs, now()++, lastOpTimeApplied, &freshResponseBuilder, &result);
- BSONObj response = freshResponseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(lastOpTimeApplied.getTimestamp(),
- Timestamp(response["opTime"].timestampValue()));
- ASSERT_FALSE(response["fresher"].trueValue());
- ASSERT_TRUE(response["veto"].trueValue()) << response["errmsg"];
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
-
- // an elect command comes in
- ReplicationCoordinator::ReplSetElectArgs electArgs;
- electArgs.set = "rs0";
- electArgs.round = remoteRound;
- electArgs.cfgver = 5;
- electArgs.whoid = 2;
-
- BSONObjBuilder electResponseBuilder;
- result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(
- electArgs, now()++, OpTime(), &electResponseBuilder, &result);
- stopCapturingLogMessages();
- response = electResponseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(-10000, response["vote"].Int());
- ASSERT_EQUALS(remoteRound, response["round"].OID());
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajorityOfVotersUp) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 5 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017" << "votes" << 0) <<
- BSON("_id" << 3 << "host" << "host4:27017" << "votes" << 0) <<
- BSON("_id" << 4 << "host" << "host5:27017" << "votes" << 0) <<
- BSON("_id" << 5 << "host" << "host6:27017" << "votes" << 0) <<
- BSON("_id" << 6 << "host" << "host7:27017")) <<
- "settings" << BSON("heartbeatTimeoutSecs" << 5)),
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ // freeze node to set stepdown wait
+ BSONObjBuilder response;
+ getTopoCoord().prepareFreezeResponse(now()++, 20, &response);
+
+ nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajorityButIAmArbiter) {
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 5 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
+
+ OpTime election = OpTime(Timestamp(400, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
+
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajority) {
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ OpTime election = OpTime(Timestamp(400, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(399, 0), 0);
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_EQUALS(HeartbeatResponseAction::StartElection, nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+}
+
+TEST_F(HeartbeatResponseTest, ElectionStartElectionWhileCandidate) {
+ // In this test, the TopologyCoordinator goes through the steps of a successful election,
+ // during which it receives a heartbeat that would normally trigger it to become a candidate
+ // and respond with a StartElection HeartbeatResponseAction. However, since it is already in
+ // candidate state, it responds with a NoAction HeartbeatResponseAction. Then finishes by
+ // being winning the election.
+
+ // 1. All nodes heartbeat to indicate that they are up and that "host2" is PRIMARY.
+ // 2. "host2" goes down, triggering an election.
+ // 3. "host2" comes back, which would normally trigger election, but since the
+ // TopologyCoordinator is already in candidate mode, does not.
+ // 4. TopologyCoordinator concludes its freshness round successfully and wins the election.
+
+ setSelfMemberState(MemberState::RS_SECONDARY);
+ now() += Seconds(30); // we need to be more than LastVote::leaseTime from the start of time or
+ // else some Date_t math goes horribly awry
+
+ OpTime election = OpTime();
+ OpTime lastOpTimeApplied = OpTime(Timestamp(130, 0), 0);
+ OID round = OID::gen();
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ // candidate time!
+ nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_EQUALS(HeartbeatResponseAction::StartElection, nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+
+ // see the downed node as SECONDARY and decide to take no action, but are still a candidate
+ nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+
+ // normally this would trigger StartElection, but we are already a candidate
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+
+ // now voteForSelf as though we received all our fresh responses
+ ASSERT_TRUE(getTopoCoord().voteForMyself(now()++));
+
+ // now win election and ensure _electionId and _electionTime are set properly
+ getTopoCoord().processWinElection(round, election.getTimestamp());
+ ASSERT_EQUALS(round, getTopoCoord().getElectionId());
+ ASSERT_EQUALS(election.getTimestamp(), getTopoCoord().getElectionTime());
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+}
+
+TEST_F(HeartbeatResponseTest, ElectionVoteForAnotherNodeBeforeFreshnessReturns) {
+ // In this test, the TopologyCoordinator goes through the steps of an election. However,
+ // before its freshness round ends, it receives a fresh command followed by an elect command
+ // from another node, both of which it responds positively to. The TopologyCoordinator's
+ // freshness round then concludes successfully, but it fails to vote for itself, since it
+ // recently voted for another node.
+
+ // 1. All nodes heartbeat to indicate that they are up and that "host2" is PRIMARY.
+ // 2. "host2" goes down, triggering an election.
+ // 3. "host3" sends a fresh command, which the TopologyCoordinator responds to positively.
+ // 4. "host3" sends an elect command, which the TopologyCoordinator responds to positively.
+ // 5. The TopologyCoordinator's concludes its freshness round successfully.
+ // 6. The TopologyCoordinator loses the election.
+
+ setSelfMemberState(MemberState::RS_SECONDARY);
+ now() += Seconds(30); // we need to be more than LastVote::leaseTime from the start of time or
+ // else some Date_t math goes horribly awry
+
+ OpTime election = OpTime();
+ OpTime lastOpTimeApplied = OpTime(Timestamp(100, 0), 0);
+ OpTime fresherOpApplied = OpTime(Timestamp(200, 0), 0);
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ // candidate time!
+ nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_EQUALS(HeartbeatResponseAction::StartElection, nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+
+ Timestamp originalElectionTime = getTopoCoord().getElectionTime();
+ OID originalElectionId = getTopoCoord().getElectionId();
+ // prepare an incoming fresh command
+ ReplicationCoordinator::ReplSetFreshArgs freshArgs;
+ freshArgs.setName = "rs0";
+ freshArgs.cfgver = 5;
+ freshArgs.id = 2;
+ freshArgs.who = HostAndPort("host3");
+ freshArgs.opTime = fresherOpApplied.getTimestamp();
+
+ BSONObjBuilder freshResponseBuilder;
+ Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ getTopoCoord().prepareFreshResponse(
+ freshArgs, now()++, lastOpTimeApplied, &freshResponseBuilder, &result);
+ BSONObj response = freshResponseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(lastOpTimeApplied.getTimestamp(), Timestamp(response["opTime"].timestampValue()));
+ ASSERT_FALSE(response["fresher"].trueValue());
+ ASSERT_FALSE(response["veto"].trueValue());
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ // make sure incoming fresh commands do not change electionTime and electionId
+ ASSERT_EQUALS(originalElectionTime, getTopoCoord().getElectionTime());
+ ASSERT_EQUALS(originalElectionId, getTopoCoord().getElectionId());
+
+ // an elect command comes in
+ ReplicationCoordinator::ReplSetElectArgs electArgs;
+ OID round = OID::gen();
+ electArgs.set = "rs0";
+ electArgs.round = round;
+ electArgs.cfgver = 5;
+ electArgs.whoid = 2;
+
+ BSONObjBuilder electResponseBuilder;
+ result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(
+ electArgs, now()++, OpTime(), &electResponseBuilder, &result);
+ stopCapturingLogMessages();
+ response = electResponseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(1, response["vote"].Int());
+ ASSERT_EQUALS(round, response["round"].OID());
+ ASSERT_EQUALS(1, countLogLinesContaining("voting yea for host3:27017 (2)"));
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ // make sure incoming elect commands do not change electionTime and electionId
+ ASSERT_EQUALS(originalElectionTime, getTopoCoord().getElectionTime());
+ ASSERT_EQUALS(originalElectionId, getTopoCoord().getElectionId());
+
+ // now voteForSelf as though we received all our fresh responses
+ ASSERT_FALSE(getTopoCoord().voteForMyself(now()++));
+
+ // receive a heartbeat indicating the other node was elected
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(2, getCurrentPrimaryIndex());
+ // make sure seeing a new primary does not change electionTime and electionId
+ ASSERT_EQUALS(originalElectionTime, getTopoCoord().getElectionTime());
+ ASSERT_EQUALS(originalElectionId, getTopoCoord().getElectionId());
+
+ // now lose election and ensure _electionTime and _electionId are 0'd out
+ getTopoCoord().processLoseElection();
+ ASSERT_EQUALS(OID(), getTopoCoord().getElectionId());
+ ASSERT_EQUALS(Timestamp(), getTopoCoord().getElectionTime());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(2, getCurrentPrimaryIndex());
+}
+
+TEST_F(HeartbeatResponseTest, ElectionRespondToFreshBeforeOurFreshnessReturns) {
+ // In this test, the TopologyCoordinator goes through the steps of an election. However,
+ // before its freshness round ends, the TopologyCoordinator receives a fresh command from
+ // another node, which it responds positively to. Its freshness then ends successfully and
+ // it wins the election. The other node's elect command then comes in and is responded to
+ // negatively, maintaining the TopologyCoordinator's PRIMARY state.
+
+ // 1. All nodes heartbeat to indicate that they are up and that "host2" is PRIMARY.
+ // 2. "host2" goes down, triggering an election.
+ // 3. "host3" sends a fresh command, which the TopologyCoordinator responds to positively.
+ // 4. The TopologyCoordinator concludes its freshness round successfully and wins
+ // the election.
+ // 5. "host3" sends an elect command, which the TopologyCoordinator responds to negatively.
+
+ setSelfMemberState(MemberState::RS_SECONDARY);
+ now() += Seconds(30); // we need to be more than LastVote::leaseTime from the start of time or
+ // else some Date_t math goes horribly awry
+
+ OpTime election = OpTime();
+ OpTime lastOpTimeApplied = OpTime(Timestamp(100, 0), 0);
+ OpTime fresherLastOpTimeApplied = OpTime(Timestamp(200, 0), 0);
+ OID round = OID::gen();
+ OID remoteRound = OID::gen();
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ // candidate time!
+ nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_EQUALS(HeartbeatResponseAction::StartElection, nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+
+ // prepare an incoming fresh command
+ ReplicationCoordinator::ReplSetFreshArgs freshArgs;
+ freshArgs.setName = "rs0";
+ freshArgs.cfgver = 5;
+ freshArgs.id = 2;
+ freshArgs.who = HostAndPort("host3");
+ freshArgs.opTime = fresherLastOpTimeApplied.getTimestamp();
+
+ BSONObjBuilder freshResponseBuilder;
+ Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ getTopoCoord().prepareFreshResponse(
+ freshArgs, now()++, lastOpTimeApplied, &freshResponseBuilder, &result);
+ BSONObj response = freshResponseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(lastOpTimeApplied.getTimestamp(), Timestamp(response["opTime"].timestampValue()));
+ ASSERT_FALSE(response["fresher"].trueValue());
+ ASSERT_FALSE(response["veto"].trueValue());
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+
+ // now voteForSelf as though we received all our fresh responses
+ ASSERT_TRUE(getTopoCoord().voteForMyself(now()++));
+ // now win election and ensure _electionId and _electionTime are set properly
+ getTopoCoord().processWinElection(round, election.getTimestamp());
+ ASSERT_EQUALS(round, getTopoCoord().getElectionId());
+ ASSERT_EQUALS(election.getTimestamp(), getTopoCoord().getElectionTime());
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+
+ // an elect command comes in
+ ReplicationCoordinator::ReplSetElectArgs electArgs;
+ electArgs.set = "rs0";
+ electArgs.round = remoteRound;
+ electArgs.cfgver = 5;
+ electArgs.whoid = 2;
+
+ BSONObjBuilder electResponseBuilder;
+ result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(
+ electArgs, now()++, OpTime(), &electResponseBuilder, &result);
+ stopCapturingLogMessages();
+ response = electResponseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(-10000, response["vote"].Int());
+ ASSERT_EQUALS(remoteRound, response["round"].OID());
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+}
+
+TEST_F(HeartbeatResponseTest, ElectionCompleteElectionThenReceiveFresh) {
+ // In this test, the TopologyCoordinator goes through the steps of an election. After
+ // being successfully elected, a fresher node sends a fresh command, which the
+ // TopologyCoordinator responds positively to. The fresher node then sends an elect command,
+ // which the Topology coordinator negatively to since the TopologyCoordinator just elected
+ // itself.
+
+ // 1. All nodes heartbeat to indicate that they are up and that "host2" is PRIMARY.
+ // 2. "host2" goes down, triggering an election.
+ // 3. The TopologyCoordinator concludes its freshness round successfully and wins
+ // the election.
+ // 4. "host3" sends a fresh command, which the TopologyCoordinator responds to positively.
+ // 5. "host3" sends an elect command, which the TopologyCoordinator responds to negatively.
+
+ setSelfMemberState(MemberState::RS_SECONDARY);
+ now() += Seconds(30); // we need to be more than LastVote::leaseTime from the start of time or
+ // else some Date_t math goes horribly awry
+
+ OpTime election = OpTime();
+ OpTime lastOpTimeApplied = OpTime(Timestamp(100, 0), 0);
+ OpTime fresherLastOpTimeApplied = OpTime(Timestamp(200, 0), 0);
+ OID round = OID::gen();
+ OID remoteRound = OID::gen();
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ // candidate time!
+ nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_EQUALS(HeartbeatResponseAction::StartElection, nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+
+ // now voteForSelf as though we received all our fresh responses
+ ASSERT_TRUE(getTopoCoord().voteForMyself(now()++));
+ // now win election
+ getTopoCoord().processWinElection(round, election.getTimestamp());
+ ASSERT_EQUALS(0, getTopoCoord().getCurrentPrimaryIndex());
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+
+ // prepare an incoming fresh command
+ ReplicationCoordinator::ReplSetFreshArgs freshArgs;
+ freshArgs.setName = "rs0";
+ freshArgs.cfgver = 5;
+ freshArgs.id = 2;
+ freshArgs.who = HostAndPort("host3");
+ freshArgs.opTime = fresherLastOpTimeApplied.getTimestamp();
+
+ BSONObjBuilder freshResponseBuilder;
+ Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ getTopoCoord().prepareFreshResponse(
+ freshArgs, now()++, lastOpTimeApplied, &freshResponseBuilder, &result);
+ BSONObj response = freshResponseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(lastOpTimeApplied.getTimestamp(), Timestamp(response["opTime"].timestampValue()));
+ ASSERT_FALSE(response["fresher"].trueValue());
+ ASSERT_TRUE(response["veto"].trueValue()) << response["errmsg"];
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+
+ // an elect command comes in
+ ReplicationCoordinator::ReplSetElectArgs electArgs;
+ electArgs.set = "rs0";
+ electArgs.round = remoteRound;
+ electArgs.cfgver = 5;
+ electArgs.whoid = 2;
+
+ BSONObjBuilder electResponseBuilder;
+ result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(
+ electArgs, now()++, OpTime(), &electResponseBuilder, &result);
+ stopCapturingLogMessages();
+ response = electResponseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(-10000, response["vote"].Int());
+ ASSERT_EQUALS(remoteRound, response["round"].OID());
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataPrimaryDownMajorityOfVotersUp) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 5 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "votes" << 0)
+ << BSON("_id" << 3 << "host"
+ << "host4:27017"
+ << "votes" << 0) << BSON("_id" << 4 << "host"
+ << "host5:27017"
+ << "votes" << 0)
+ << BSON("_id" << 5 << "host"
+ << "host6:27017"
+ << "votes" << 0) << BSON("_id" << 6 << "host"
+ << "host7:27017"))
+ << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
+
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ OpTime election = OpTime(Timestamp(400, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+
+ // make sure all non-voting nodes are down, that way we do not have a majority of nodes
+ // but do have a majority of votes since one of two voting members is up and so are we
+ nextAction = receiveDownHeartbeat(HostAndPort("host3"), "rs0", lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ nextAction = receiveDownHeartbeat(HostAndPort("host4"), "rs0", lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ nextAction = receiveDownHeartbeat(HostAndPort("host5"), "rs0", lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ nextAction = receiveDownHeartbeat(HostAndPort("host6"), "rs0", lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ nextAction = receiveUpHeartbeat(HostAndPort("host7"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_EQUALS(HeartbeatResponseAction::StartElection, nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataRelinquishPrimaryDueToNodeDisappearing) {
+ // become PRIMARY
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ makeSelfPrimary(Timestamp(2, 0));
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+
+ // become aware of other nodes
+ heartbeatFromMember(
+ HostAndPort("host2"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(1, 0), 0));
+ heartbeatFromMember(
+ HostAndPort("host2"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(1, 0), 0));
+ heartbeatFromMember(HostAndPort("host3"), "rs0", MemberState::RS_SECONDARY, OpTime());
+ heartbeatFromMember(HostAndPort("host3"), "rs0", MemberState::RS_SECONDARY, OpTime());
+
+ // lose that awareness and be sure we are going to stepdown
+ HeartbeatResponseAction nextAction =
+ receiveDownHeartbeat(HostAndPort("host2"), "rs0", OpTime(Timestamp(100, 0), 0));
+ ASSERT_NO_ACTION(nextAction.getAction());
+ nextAction = receiveDownHeartbeat(HostAndPort("host3"), "rs0", OpTime(Timestamp(100, 0), 0));
+ ASSERT_EQUALS(HeartbeatResponseAction::StepDownSelf, nextAction.getAction());
+ ASSERT_EQUALS(0, nextAction.getPrimaryConfigIndex());
+ // Doesn't actually do the stepdown until stepDownIfPending is called
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(0, getCurrentPrimaryIndex());
+
+ ASSERT_TRUE(getTopoCoord().stepDownIfPending());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+}
+
+TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataRemoteDoesNotExist) {
+ OpTime election = OpTime(Timestamp(5, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(3, 0), 0);
+
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host9"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ election,
+ lastOpTimeApplied);
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+}
+
+class PrepareElectResponseTest : public TopoCoordTest {
+public:
+ PrepareElectResponseTest()
+ : round(OID::gen()), cbData(NULL, ReplicationExecutor::CallbackHandle(), Status::OK()) {}
+
+ virtual void setUp() {
+ TopoCoordTest::setUp();
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 10 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "hself")
+ << BSON("_id" << 1 << "host"
+ << "h1") << BSON("_id" << 2 << "host"
+ << "h2"
+ << "priority" << 10)
+ << BSON("_id" << 3 << "host"
+ << "h3"
+ << "priority" << 10))),
0);
-
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- OpTime election = OpTime(Timestamp(400,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(300,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
-
- // make sure all non-voting nodes are down, that way we do not have a majority of nodes
- // but do have a majority of votes since one of two voting members is up and so are we
- nextAction = receiveDownHeartbeat(HostAndPort("host3"), "rs0", lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- nextAction = receiveDownHeartbeat(HostAndPort("host4"), "rs0", lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- nextAction = receiveDownHeartbeat(HostAndPort("host5"), "rs0", lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- nextAction = receiveDownHeartbeat(HostAndPort("host6"), "rs0", lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- nextAction = receiveUpHeartbeat(HostAndPort("host7"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0", lastOpTimeApplied);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_EQUALS(HeartbeatResponseAction::StartElection, nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataRelinquishPrimaryDueToNodeDisappearing) {
- // become PRIMARY
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- makeSelfPrimary(Timestamp(2,0));
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
-
- // become aware of other nodes
- heartbeatFromMember(HostAndPort("host2"), "rs0",
- MemberState::RS_SECONDARY, OpTime(Timestamp(1,0), 0));
- heartbeatFromMember(HostAndPort("host2"), "rs0",
- MemberState::RS_SECONDARY, OpTime(Timestamp(1,0), 0));
- heartbeatFromMember(HostAndPort("host3"), "rs0", MemberState::RS_SECONDARY, OpTime());
- heartbeatFromMember(HostAndPort("host3"), "rs0", MemberState::RS_SECONDARY, OpTime());
-
- // lose that awareness and be sure we are going to stepdown
- HeartbeatResponseAction nextAction = receiveDownHeartbeat(HostAndPort("host2"),
- "rs0",
- OpTime(Timestamp(100, 0), 0));
- ASSERT_NO_ACTION(nextAction.getAction());
- nextAction =
- receiveDownHeartbeat(HostAndPort("host3"), "rs0", OpTime(Timestamp(100, 0), 0));
- ASSERT_EQUALS(HeartbeatResponseAction::StepDownSelf, nextAction.getAction());
- ASSERT_EQUALS(0, nextAction.getPrimaryConfigIndex());
- // Doesn't actually do the stepdown until stepDownIfPending is called
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(0, getCurrentPrimaryIndex());
-
- ASSERT_TRUE(getTopoCoord().stepDownIfPending());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- }
-
- TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataRemoteDoesNotExist) {
- OpTime election = OpTime(Timestamp(5,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(3,0), 0);
-
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host9"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- election,
- lastOpTimeApplied);
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- }
-
- class PrepareElectResponseTest : public TopoCoordTest {
- public:
-
- PrepareElectResponseTest() :
- round(OID::gen()),
- cbData(NULL, ReplicationExecutor::CallbackHandle(), Status::OK()) {}
-
- virtual void setUp() {
- TopoCoordTest::setUp();
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 10 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "hself") <<
- BSON("_id" << 1 << "host" << "h1") <<
- BSON("_id" << 2 <<
- "host" << "h2" <<
- "priority" << 10) <<
- BSON("_id" << 3 <<
- "host" << "h3" <<
- "priority" << 10))),
- 0);
- }
-
- protected:
- Date_t now;
- OID round;
- ReplicationExecutor::CallbackArgs cbData;
- };
-
- TEST_F(PrepareElectResponseTest, ElectResponseIncorrectReplSetName) {
- // Test with incorrect replset name
- ReplicationCoordinator::ReplSetElectArgs args;
- args.set = "fakeset";
- args.round = round;
- args.cfgver = 10;
- args.whoid = 1;
-
- BSONObjBuilder responseBuilder;
- Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(args,
- now += Seconds(60),
- OpTime(),
- &responseBuilder,
- &result);
- stopCapturingLogMessages();
- BSONObj response = responseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(0, response["vote"].Int());
- ASSERT_EQUALS(round, response["round"].OID());
- ASSERT_EQUALS(1,
- countLogLinesContaining("received an elect request for 'fakeset' but our "
- "set name is 'rs0'"));
-
- // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
- args.set = "rs0";
- BSONObjBuilder responseBuilder2;
- getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
- BSONObj response2 = responseBuilder2.obj();
- ASSERT_EQUALS(1, response2["vote"].Int());
- ASSERT_EQUALS(round, response2["round"].OID());
- }
-
- TEST_F(PrepareElectResponseTest, ElectResponseOurConfigStale) {
- // Test with us having a stale config version
- ReplicationCoordinator::ReplSetElectArgs args;
- args.set = "rs0";
- args.round = round;
- args.cfgver = 20;
- args.whoid = 1;
-
- BSONObjBuilder responseBuilder;
- Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(args,
- now += Seconds(60),
- OpTime(),
- &responseBuilder,
- &result);
- stopCapturingLogMessages();
- BSONObj response = responseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(0, response["vote"].Int());
- ASSERT_EQUALS(round, response["round"].OID());
- ASSERT_EQUALS(1,
- countLogLinesContaining("not voting because our config version is stale"));
-
- // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
- args.cfgver = 10;
- BSONObjBuilder responseBuilder2;
- getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
- BSONObj response2 = responseBuilder2.obj();
- ASSERT_EQUALS(1, response2["vote"].Int());
- ASSERT_EQUALS(round, response2["round"].OID());
- }
-
- TEST_F(PrepareElectResponseTest, ElectResponseTheirConfigStale) {
- // Test with them having a stale config version
- ReplicationCoordinator::ReplSetElectArgs args;
- args.set = "rs0";
- args.round = round;
- args.cfgver = 5;
- args.whoid = 1;
-
- BSONObjBuilder responseBuilder;
- Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(args,
- now += Seconds(60),
- OpTime(),
- &responseBuilder,
- &result);
- stopCapturingLogMessages();
- BSONObj response = responseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(-10000, response["vote"].Int());
- ASSERT_EQUALS(round, response["round"].OID());
- ASSERT_EQUALS(1,
- countLogLinesContaining("received stale config version # during election"));
-
- // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
- args.cfgver = 10;
- BSONObjBuilder responseBuilder2;
- getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
- BSONObj response2 = responseBuilder2.obj();
- ASSERT_EQUALS(1, response2["vote"].Int());
- ASSERT_EQUALS(round, response2["round"].OID());
- }
-
- TEST_F(PrepareElectResponseTest, ElectResponseNonExistentNode) {
- // Test with a non-existent node
- ReplicationCoordinator::ReplSetElectArgs args;
- args.set = "rs0";
- args.round = round;
- args.cfgver = 10;
- args.whoid = 99;
-
- BSONObjBuilder responseBuilder;
- Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(args,
- now += Seconds(60),
- OpTime(),
- &responseBuilder,
- &result);
- stopCapturingLogMessages();
- BSONObj response = responseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(-10000, response["vote"].Int());
- ASSERT_EQUALS(round, response["round"].OID());
- ASSERT_EQUALS(1, countLogLinesContaining("couldn't find member with id 99"));
-
- // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
- args.whoid = 1;
- BSONObjBuilder responseBuilder2;
- getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
- BSONObj response2 = responseBuilder2.obj();
- ASSERT_EQUALS(1, response2["vote"].Int());
- ASSERT_EQUALS(round, response2["round"].OID());
}
- TEST_F(PrepareElectResponseTest, ElectResponseWeArePrimary) {
- // Test when we are already primary
- ReplicationCoordinator::ReplSetElectArgs args;
- args.set = "rs0";
- args.round = round;
- args.cfgver = 10;
- args.whoid = 1;
-
- getTopoCoord()._setCurrentPrimaryForTest(0);
-
- BSONObjBuilder responseBuilder;
- Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(args,
- now += Seconds(60),
- OpTime(),
- &responseBuilder,
- &result);
- stopCapturingLogMessages();
- BSONObj response = responseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(-10000, response["vote"].Int());
- ASSERT_EQUALS(round, response["round"].OID());
- ASSERT_EQUALS(1, countLogLinesContaining("I am already primary"));
-
- // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
- getTopoCoord()._setCurrentPrimaryForTest(-1);
- BSONObjBuilder responseBuilder2;
- getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
- BSONObj response2 = responseBuilder2.obj();
- ASSERT_EQUALS(1, response2["vote"].Int());
- ASSERT_EQUALS(round, response2["round"].OID());
- }
-
- TEST_F(PrepareElectResponseTest, ElectResponseSomeoneElseIsPrimary) {
- // Test when someone else is already primary
- ReplicationCoordinator::ReplSetElectArgs args;
- args.set = "rs0";
- args.round = round;
- args.cfgver = 10;
- args.whoid = 1;
- getTopoCoord()._setCurrentPrimaryForTest(2);
-
- BSONObjBuilder responseBuilder;
- Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(args,
- now += Seconds(60),
- OpTime(),
- &responseBuilder,
- &result);
- stopCapturingLogMessages();
- BSONObj response = responseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(-10000, response["vote"].Int());
- ASSERT_EQUALS(round, response["round"].OID());
- ASSERT_EQUALS(1, countLogLinesContaining("h2:27017 is already primary"));
-
- // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
- getTopoCoord()._setCurrentPrimaryForTest(-1);
- BSONObjBuilder responseBuilder2;
- getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
- BSONObj response2 = responseBuilder2.obj();
- ASSERT_EQUALS(1, response2["vote"].Int());
- ASSERT_EQUALS(round, response2["round"].OID());
- }
-
- TEST_F(PrepareElectResponseTest, ElectResponseNotHighestPriority) {
- // Test trying to elect someone who isn't the highest priority node
- ReplicationCoordinator::ReplSetElectArgs args;
- args.set = "rs0";
- args.round = round;
- args.cfgver = 10;
- args.whoid = 1;
-
- heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime());
-
- BSONObjBuilder responseBuilder;
- Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(args,
- now += Seconds(60),
- OpTime(),
- &responseBuilder,
- &result);
- stopCapturingLogMessages();
- BSONObj response = responseBuilder.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(-10000, response["vote"].Int());
- ASSERT_EQUALS(round, response["round"].OID());
- ASSERT_EQUALS(1, countLogLinesContaining("h1:27017 has lower priority than h3:27017"));
-
- // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
- args.whoid = 3;
- BSONObjBuilder responseBuilder2;
- getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
- BSONObj response2 = responseBuilder2.obj();
- ASSERT_EQUALS(1, response2["vote"].Int());
- ASSERT_EQUALS(round, response2["round"].OID());
- }
-
- TEST_F(PrepareElectResponseTest, ElectResponseHighestPriorityOfLiveNodes) {
- // Test trying to elect someone who isn't the highest priority node, but all higher nodes
- // are down
- ReplicationCoordinator::ReplSetElectArgs args;
- args.set = "rs0";
- args.round = round;
- args.cfgver = 10;
- args.whoid = 1;
-
- receiveDownHeartbeat(HostAndPort("h3"), "rs0", OpTime());
- receiveDownHeartbeat(HostAndPort("h2"), "rs0", OpTime());
-
- BSONObjBuilder responseBuilder;
- Status result = Status::OK();
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(args,
- now += Seconds(60),
- OpTime(),
- &responseBuilder,
- &result);
- stopCapturingLogMessages();
- BSONObj response = responseBuilder.obj();
- ASSERT_EQUALS(1, response["vote"].Int());
- ASSERT_EQUALS(round, response["round"].OID());
- }
-
- TEST_F(PrepareElectResponseTest, ElectResponseValidVotes) {
- // Test a valid vote
- ReplicationCoordinator::ReplSetElectArgs args;
- args.set = "rs0";
- args.round = round;
- args.cfgver = 10;
- args.whoid = 2;
- now = Date_t::fromMillisSinceEpoch(100);
-
- BSONObjBuilder responseBuilder1;
- Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(args,
- now += Seconds(60),
- OpTime(),
- &responseBuilder1,
- &result);
- stopCapturingLogMessages();
- BSONObj response1 = responseBuilder1.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(1, response1["vote"].Int());
- ASSERT_EQUALS(round, response1["round"].OID());
- ASSERT_EQUALS(1, countLogLinesContaining("voting yea for h2:27017 (2)"));
-
- // Test what would be a valid vote except that we already voted too recently
- args.whoid = 3;
-
- BSONObjBuilder responseBuilder2;
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(args, now, OpTime(), &responseBuilder2, &result);
- stopCapturingLogMessages();
- BSONObj response2 = responseBuilder2.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(0, response2["vote"].Int());
- ASSERT_EQUALS(round, response2["round"].OID());
- ASSERT_EQUALS(1, countLogLinesContaining("voting no for h3:27017; "
- "voted for h2:27017 0 secs ago"));
-
- // Test that after enough time passes the same vote can proceed
- now += Seconds(30) + Milliseconds(1); // just over 30 seconds later
-
- BSONObjBuilder responseBuilder3;
- startCapturingLogMessages();
- getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder3, &result);
- stopCapturingLogMessages();
- BSONObj response3 = responseBuilder3.obj();
- ASSERT_OK(result);
- ASSERT_EQUALS(1, response3["vote"].Int());
- ASSERT_EQUALS(round, response3["round"].OID());
- ASSERT_EQUALS(1, countLogLinesContaining("voting yea for h3:27017 (3)"));
- }
-
- TEST_F(TopoCoordTest, ElectResponseNotInConfig) {
- ReplicationCoordinator::ReplSetElectArgs args;
- BSONObjBuilder response;
- Status status = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- getTopoCoord().prepareElectResponse(args, now(), OpTime(), &response, &status);
- ASSERT_EQUALS(ErrorCodes::ReplicaSetNotFound, status);
- ASSERT_EQUALS("Cannot participate in election because not initialized", status.reason());
- }
-
- class PrepareFreezeResponseTest : public TopoCoordTest {
- public:
-
- virtual void setUp() {
- TopoCoordTest::setUp();
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 5 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017"))),
- 0);
- }
-
- BSONObj prepareFreezeResponse(int duration) {
- BSONObjBuilder response;
- startCapturingLogMessages();
- getTopoCoord().prepareFreezeResponse(now()++, duration, &response);
- stopCapturingLogMessages();
- return response.obj();
- }
- };
-
- TEST_F(PrepareFreezeResponseTest, UnfreezeEvenWhenNotFrozen) {
- BSONObj response = prepareFreezeResponse(0);
- ASSERT_EQUALS("unfreezing", response["info"].String());
- ASSERT_EQUALS(1, countLogLinesContaining("'unfreezing'"));
- // 1 instead of 0 because it assigns to "now" in this case
- ASSERT_EQUALS(1LL, getTopoCoord().getStepDownTime().asInt64());
- }
-
- TEST_F(PrepareFreezeResponseTest, FreezeForOneSecond) {
- BSONObj response = prepareFreezeResponse(1);
- ASSERT_EQUALS("you really want to freeze for only 1 second?",
- response["warning"].String());
- ASSERT_EQUALS(1, countLogLinesContaining("'freezing' for 1 seconds"));
- // 1001 because "now" was incremented once during initialization + 1000 ms wait
- ASSERT_EQUALS(1001LL, getTopoCoord().getStepDownTime().asInt64());
- }
-
- TEST_F(PrepareFreezeResponseTest, FreezeForManySeconds) {
- BSONObj response = prepareFreezeResponse(20);
- ASSERT_TRUE(response.isEmpty());
- ASSERT_EQUALS(1, countLogLinesContaining("'freezing' for 20 seconds"));
- // 20001 because "now" was incremented once during initialization + 20000 ms wait
- ASSERT_EQUALS(20001LL, getTopoCoord().getStepDownTime().asInt64());
- }
-
- TEST_F(PrepareFreezeResponseTest, UnfreezeEvenWhenNotFrozenWhilePrimary) {
- makeSelfPrimary();
- BSONObj response = prepareFreezeResponse(0);
- ASSERT_EQUALS("unfreezing", response["info"].String());
- // doesn't mention being primary in this case for some reason
- ASSERT_EQUALS(0, countLogLinesContaining(
- "received freeze command but we are primary"));
- // 1 instead of 0 because it assigns to "now" in this case
- ASSERT_EQUALS(1LL, getTopoCoord().getStepDownTime().asInt64());
- }
-
- TEST_F(PrepareFreezeResponseTest, FreezeForOneSecondWhilePrimary) {
- makeSelfPrimary();
- BSONObj response = prepareFreezeResponse(1);
- ASSERT_EQUALS("you really want to freeze for only 1 second?",
- response["warning"].String());
- ASSERT_EQUALS(1, countLogLinesContaining(
- "received freeze command but we are primary"));
- ASSERT_EQUALS(0LL, getTopoCoord().getStepDownTime().asInt64());
- }
-
- TEST_F(PrepareFreezeResponseTest, FreezeForManySecondsWhilePrimary) {
- makeSelfPrimary();
- BSONObj response = prepareFreezeResponse(20);
- ASSERT_TRUE(response.isEmpty());
- ASSERT_EQUALS(1, countLogLinesContaining(
- "received freeze command but we are primary"));
- ASSERT_EQUALS(0LL, getTopoCoord().getStepDownTime().asInt64());
- }
-
- TEST_F(TopoCoordTest, UnfreezeWhileLoneNode) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 5 <<
- "members" << BSON_ARRAY(BSON("_id" << 0 << "host" << "host1:27017"))),
+protected:
+ Date_t now;
+ OID round;
+ ReplicationExecutor::CallbackArgs cbData;
+};
+
+TEST_F(PrepareElectResponseTest, ElectResponseIncorrectReplSetName) {
+ // Test with incorrect replset name
+ ReplicationCoordinator::ReplSetElectArgs args;
+ args.set = "fakeset";
+ args.round = round;
+ args.cfgver = 10;
+ args.whoid = 1;
+
+ BSONObjBuilder responseBuilder;
+ Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(
+ args, now += Seconds(60), OpTime(), &responseBuilder, &result);
+ stopCapturingLogMessages();
+ BSONObj response = responseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(0, response["vote"].Int());
+ ASSERT_EQUALS(round, response["round"].OID());
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ "received an elect request for 'fakeset' but our "
+ "set name is 'rs0'"));
+
+ // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
+ args.set = "rs0";
+ BSONObjBuilder responseBuilder2;
+ getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
+ BSONObj response2 = responseBuilder2.obj();
+ ASSERT_EQUALS(1, response2["vote"].Int());
+ ASSERT_EQUALS(round, response2["round"].OID());
+}
+
+TEST_F(PrepareElectResponseTest, ElectResponseOurConfigStale) {
+ // Test with us having a stale config version
+ ReplicationCoordinator::ReplSetElectArgs args;
+ args.set = "rs0";
+ args.round = round;
+ args.cfgver = 20;
+ args.whoid = 1;
+
+ BSONObjBuilder responseBuilder;
+ Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(
+ args, now += Seconds(60), OpTime(), &responseBuilder, &result);
+ stopCapturingLogMessages();
+ BSONObj response = responseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(0, response["vote"].Int());
+ ASSERT_EQUALS(round, response["round"].OID());
+ ASSERT_EQUALS(1, countLogLinesContaining("not voting because our config version is stale"));
+
+ // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
+ args.cfgver = 10;
+ BSONObjBuilder responseBuilder2;
+ getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
+ BSONObj response2 = responseBuilder2.obj();
+ ASSERT_EQUALS(1, response2["vote"].Int());
+ ASSERT_EQUALS(round, response2["round"].OID());
+}
+
+TEST_F(PrepareElectResponseTest, ElectResponseTheirConfigStale) {
+ // Test with them having a stale config version
+ ReplicationCoordinator::ReplSetElectArgs args;
+ args.set = "rs0";
+ args.round = round;
+ args.cfgver = 5;
+ args.whoid = 1;
+
+ BSONObjBuilder responseBuilder;
+ Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(
+ args, now += Seconds(60), OpTime(), &responseBuilder, &result);
+ stopCapturingLogMessages();
+ BSONObj response = responseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(-10000, response["vote"].Int());
+ ASSERT_EQUALS(round, response["round"].OID());
+ ASSERT_EQUALS(1, countLogLinesContaining("received stale config version # during election"));
+
+ // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
+ args.cfgver = 10;
+ BSONObjBuilder responseBuilder2;
+ getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
+ BSONObj response2 = responseBuilder2.obj();
+ ASSERT_EQUALS(1, response2["vote"].Int());
+ ASSERT_EQUALS(round, response2["round"].OID());
+}
+
+TEST_F(PrepareElectResponseTest, ElectResponseNonExistentNode) {
+ // Test with a non-existent node
+ ReplicationCoordinator::ReplSetElectArgs args;
+ args.set = "rs0";
+ args.round = round;
+ args.cfgver = 10;
+ args.whoid = 99;
+
+ BSONObjBuilder responseBuilder;
+ Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(
+ args, now += Seconds(60), OpTime(), &responseBuilder, &result);
+ stopCapturingLogMessages();
+ BSONObj response = responseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(-10000, response["vote"].Int());
+ ASSERT_EQUALS(round, response["round"].OID());
+ ASSERT_EQUALS(1, countLogLinesContaining("couldn't find member with id 99"));
+
+ // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
+ args.whoid = 1;
+ BSONObjBuilder responseBuilder2;
+ getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
+ BSONObj response2 = responseBuilder2.obj();
+ ASSERT_EQUALS(1, response2["vote"].Int());
+ ASSERT_EQUALS(round, response2["round"].OID());
+}
+
+TEST_F(PrepareElectResponseTest, ElectResponseWeArePrimary) {
+ // Test when we are already primary
+ ReplicationCoordinator::ReplSetElectArgs args;
+ args.set = "rs0";
+ args.round = round;
+ args.cfgver = 10;
+ args.whoid = 1;
+
+ getTopoCoord()._setCurrentPrimaryForTest(0);
+
+ BSONObjBuilder responseBuilder;
+ Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(
+ args, now += Seconds(60), OpTime(), &responseBuilder, &result);
+ stopCapturingLogMessages();
+ BSONObj response = responseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(-10000, response["vote"].Int());
+ ASSERT_EQUALS(round, response["round"].OID());
+ ASSERT_EQUALS(1, countLogLinesContaining("I am already primary"));
+
+ // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
+ getTopoCoord()._setCurrentPrimaryForTest(-1);
+ BSONObjBuilder responseBuilder2;
+ getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
+ BSONObj response2 = responseBuilder2.obj();
+ ASSERT_EQUALS(1, response2["vote"].Int());
+ ASSERT_EQUALS(round, response2["round"].OID());
+}
+
+TEST_F(PrepareElectResponseTest, ElectResponseSomeoneElseIsPrimary) {
+ // Test when someone else is already primary
+ ReplicationCoordinator::ReplSetElectArgs args;
+ args.set = "rs0";
+ args.round = round;
+ args.cfgver = 10;
+ args.whoid = 1;
+ getTopoCoord()._setCurrentPrimaryForTest(2);
+
+ BSONObjBuilder responseBuilder;
+ Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(
+ args, now += Seconds(60), OpTime(), &responseBuilder, &result);
+ stopCapturingLogMessages();
+ BSONObj response = responseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(-10000, response["vote"].Int());
+ ASSERT_EQUALS(round, response["round"].OID());
+ ASSERT_EQUALS(1, countLogLinesContaining("h2:27017 is already primary"));
+
+ // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
+ getTopoCoord()._setCurrentPrimaryForTest(-1);
+ BSONObjBuilder responseBuilder2;
+ getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
+ BSONObj response2 = responseBuilder2.obj();
+ ASSERT_EQUALS(1, response2["vote"].Int());
+ ASSERT_EQUALS(round, response2["round"].OID());
+}
+
+TEST_F(PrepareElectResponseTest, ElectResponseNotHighestPriority) {
+ // Test trying to elect someone who isn't the highest priority node
+ ReplicationCoordinator::ReplSetElectArgs args;
+ args.set = "rs0";
+ args.round = round;
+ args.cfgver = 10;
+ args.whoid = 1;
+
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime());
+
+ BSONObjBuilder responseBuilder;
+ Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(
+ args, now += Seconds(60), OpTime(), &responseBuilder, &result);
+ stopCapturingLogMessages();
+ BSONObj response = responseBuilder.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(-10000, response["vote"].Int());
+ ASSERT_EQUALS(round, response["round"].OID());
+ ASSERT_EQUALS(1, countLogLinesContaining("h1:27017 has lower priority than h3:27017"));
+
+ // Make sure nay votes, do not prevent subsequent yeas (the way a yea vote would)
+ args.whoid = 3;
+ BSONObjBuilder responseBuilder2;
+ getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder2, &result);
+ BSONObj response2 = responseBuilder2.obj();
+ ASSERT_EQUALS(1, response2["vote"].Int());
+ ASSERT_EQUALS(round, response2["round"].OID());
+}
+
+TEST_F(PrepareElectResponseTest, ElectResponseHighestPriorityOfLiveNodes) {
+ // Test trying to elect someone who isn't the highest priority node, but all higher nodes
+ // are down
+ ReplicationCoordinator::ReplSetElectArgs args;
+ args.set = "rs0";
+ args.round = round;
+ args.cfgver = 10;
+ args.whoid = 1;
+
+ receiveDownHeartbeat(HostAndPort("h3"), "rs0", OpTime());
+ receiveDownHeartbeat(HostAndPort("h2"), "rs0", OpTime());
+
+ BSONObjBuilder responseBuilder;
+ Status result = Status::OK();
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(
+ args, now += Seconds(60), OpTime(), &responseBuilder, &result);
+ stopCapturingLogMessages();
+ BSONObj response = responseBuilder.obj();
+ ASSERT_EQUALS(1, response["vote"].Int());
+ ASSERT_EQUALS(round, response["round"].OID());
+}
+
+TEST_F(PrepareElectResponseTest, ElectResponseValidVotes) {
+ // Test a valid vote
+ ReplicationCoordinator::ReplSetElectArgs args;
+ args.set = "rs0";
+ args.round = round;
+ args.cfgver = 10;
+ args.whoid = 2;
+ now = Date_t::fromMillisSinceEpoch(100);
+
+ BSONObjBuilder responseBuilder1;
+ Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(
+ args, now += Seconds(60), OpTime(), &responseBuilder1, &result);
+ stopCapturingLogMessages();
+ BSONObj response1 = responseBuilder1.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(1, response1["vote"].Int());
+ ASSERT_EQUALS(round, response1["round"].OID());
+ ASSERT_EQUALS(1, countLogLinesContaining("voting yea for h2:27017 (2)"));
+
+ // Test what would be a valid vote except that we already voted too recently
+ args.whoid = 3;
+
+ BSONObjBuilder responseBuilder2;
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(args, now, OpTime(), &responseBuilder2, &result);
+ stopCapturingLogMessages();
+ BSONObj response2 = responseBuilder2.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(0, response2["vote"].Int());
+ ASSERT_EQUALS(round, response2["round"].OID());
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ "voting no for h3:27017; "
+ "voted for h2:27017 0 secs ago"));
+
+ // Test that after enough time passes the same vote can proceed
+ now += Seconds(30) + Milliseconds(1); // just over 30 seconds later
+
+ BSONObjBuilder responseBuilder3;
+ startCapturingLogMessages();
+ getTopoCoord().prepareElectResponse(args, now++, OpTime(), &responseBuilder3, &result);
+ stopCapturingLogMessages();
+ BSONObj response3 = responseBuilder3.obj();
+ ASSERT_OK(result);
+ ASSERT_EQUALS(1, response3["vote"].Int());
+ ASSERT_EQUALS(round, response3["round"].OID());
+ ASSERT_EQUALS(1, countLogLinesContaining("voting yea for h3:27017 (3)"));
+}
+
+TEST_F(TopoCoordTest, ElectResponseNotInConfig) {
+ ReplicationCoordinator::ReplSetElectArgs args;
+ BSONObjBuilder response;
+ Status status = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ getTopoCoord().prepareElectResponse(args, now(), OpTime(), &response, &status);
+ ASSERT_EQUALS(ErrorCodes::ReplicaSetNotFound, status);
+ ASSERT_EQUALS("Cannot participate in election because not initialized", status.reason());
+}
+
+class PrepareFreezeResponseTest : public TopoCoordTest {
+public:
+ virtual void setUp() {
+ TopoCoordTest::setUp();
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 5 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017"))),
0);
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- BSONObjBuilder response;
- getTopoCoord().prepareFreezeResponse(now()++, 20, &response);
- ASSERT(response.obj().isEmpty());
- BSONObjBuilder response2;
- getTopoCoord().prepareFreezeResponse(now()++, 0, &response2);
- ASSERT_EQUALS("unfreezing", response2.obj()["info"].String());
- ASSERT(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
}
- class ShutdownInProgressTest : public TopoCoordTest {
- public:
-
- ShutdownInProgressTest() :
- ourCbData(NULL,
- ReplicationExecutor::CallbackHandle(),
- Status(ErrorCodes::CallbackCanceled, "")) {}
-
- virtual ReplicationExecutor::CallbackArgs cbData() { return ourCbData; }
-
- private:
- ReplicationExecutor::CallbackArgs ourCbData;
- };
-
- TEST_F(ShutdownInProgressTest, ShutdownInProgressWhenCallbackCanceledSyncFrom) {
- Status result = Status::OK();
- BSONObjBuilder response;
- getTopoCoord().prepareSyncFromResponse(cbData(),
- HostAndPort("host2:27017"),
- OpTime(),
- &response,
- &result);
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, result);
- ASSERT_TRUE(response.obj().isEmpty());
-
- }
-
- TEST_F(ShutdownInProgressTest, ShutDownInProgressWhenCallbackCanceledStatus) {
- Status result = Status::OK();
+ BSONObj prepareFreezeResponse(int duration) {
BSONObjBuilder response;
- getTopoCoord().prepareStatusResponse(cbData(),
- Date_t(),
- 0,
- OpTime(),
- &response,
- &result);
- ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, result);
- ASSERT_TRUE(response.obj().isEmpty());
- }
-
- class PrepareHeartbeatResponseTest : public TopoCoordTest {
- public:
-
- virtual void setUp() {
- TopoCoordTest::setUp();
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
- }
-
- void prepareHeartbeatResponse(const ReplSetHeartbeatArgs& args,
- OpTime lastOpApplied,
- ReplSetHeartbeatResponse* response,
- Status* result) {
- *result = getTopoCoord().prepareHeartbeatResponse(now()++,
- args,
- "rs0",
- lastOpApplied,
- response);
- }
-
- };
-
- class PrepareHeartbeatResponseV1Test : public TopoCoordTest {
- public:
-
- virtual void setUp() {
- TopoCoordTest::setUp();
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3")) <<
- "protocolVersion" << 1),
- 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
- }
-
- void prepareHeartbeatResponseV1(const ReplSetHeartbeatArgsV1& args,
- OpTime lastOpApplied,
- ReplSetHeartbeatResponse* response,
- Status* result) {
- *result = getTopoCoord().prepareHeartbeatResponseV1(now()++,
- args,
- "rs0",
- lastOpApplied,
- response);
- }
-
- };
-
- TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseBadSetName) {
- // set up args with incorrect replset name
- ReplSetHeartbeatArgsV1 args;
- args.setSetName("rs1");
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- startCapturingLogMessages();
- prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
- stopCapturingLogMessages();
- ASSERT_EQUALS(ErrorCodes::InconsistentReplicaSetNames, result);
- ASSERT(result.reason().find("repl set names do not match")) << "Actual string was \"" <<
- result.reason() << '"';
- ASSERT_EQUALS(1,
- countLogLinesContaining("replSet set names do not match, ours: rs0; remote "
- "node's: rs1"));
- // only protocolVersion should be set in this failure case
- ASSERT_EQUALS("", response.getReplicaSetName());
- }
-
- TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseWhenOutOfSet) {
- // reconfig self out of set
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 3 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3")) <<
- "protocolVersion" << 1),
- -1);
- ReplSetHeartbeatArgsV1 args;
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
- prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
- ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, result);
- ASSERT(result.reason().find("replica set configuration is invalid or does not include us"))
- << "Actual string was \"" << result.reason() << '"';
- // only protocolVersion should be set in this failure case
- ASSERT_EQUALS("", response.getReplicaSetName());
- }
-
- TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseFromSelf) {
- // set up args with our id as the senderId
- ReplSetHeartbeatArgsV1 args;
- args.setSetName("rs0");
- args.setSenderId(10);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
- prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
- ASSERT_EQUALS(ErrorCodes::BadValue, result);
- ASSERT(result.reason().find("from member with the same member ID as our self")) <<
- "Actual string was \"" << result.reason() << '"';
- // only protocolVersion should be set in this failure case
- ASSERT_EQUALS("", response.getReplicaSetName());
- }
-
- TEST_F(TopoCoordTest, PrepareHeartbeatResponseV1NoConfigYet) {
- // set up args and acknowledge sender
- ReplSetHeartbeatArgsV1 args;
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- // prepare response and check the results
- Status result = getTopoCoord().prepareHeartbeatResponseV1(now()++,
- args,
- "rs0",
- OpTime(),
- &response);
- ASSERT_OK(result);
- // this change to true because we can now see a majority, unlike in the previous cases
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(MemberState::RS_STARTUP, response.getState().s);
- ASSERT_EQUALS(OpTime(), response.getOpTime());
- ASSERT_EQUALS(0, response.getTerm());
- ASSERT_EQUALS(-2, response.getConfigVersion());
- }
-
- TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseSenderIDMissing) {
- // set up args without a senderID
- ReplSetHeartbeatArgsV1 args;
- args.setSetName("rs0");
- args.setConfigVersion(1);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
- ASSERT_OK(result);
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
- ASSERT_EQUALS(OpTime(), response.getOpTime());
- ASSERT_EQUALS(0, response.getTerm());
- ASSERT_EQUALS(1, response.getConfigVersion());
- }
-
- TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseSenderIDNotInConfig) {
- // set up args with a senderID which is not present in our config
- ReplSetHeartbeatArgsV1 args;
- args.setSetName("rs0");
- args.setConfigVersion(1);
- args.setSenderId(2);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
- ASSERT_OK(result);
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
- ASSERT_EQUALS(OpTime(), response.getOpTime());
- ASSERT_EQUALS(0, response.getTerm());
- ASSERT_EQUALS(1, response.getConfigVersion());
- }
-
- TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseConfigVersionLow) {
- // set up args with a config version lower than ours
- ReplSetHeartbeatArgsV1 args;
- args.setConfigVersion(0);
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
- ASSERT_OK(result);
- ASSERT_TRUE(response.hasConfig());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
- ASSERT_EQUALS(OpTime(), response.getOpTime());
- ASSERT_EQUALS(0, response.getTerm());
- ASSERT_EQUALS(1, response.getConfigVersion());
- }
-
- TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseConfigVersionHigh) {
- // set up args with a config version higher than ours
- ReplSetHeartbeatArgsV1 args;
- args.setConfigVersion(10);
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
- ASSERT_OK(result);
- ASSERT_FALSE(response.hasConfig());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
- ASSERT_EQUALS(OpTime(), response.getOpTime());
- ASSERT_EQUALS(0, response.getTerm());
- ASSERT_EQUALS(1, response.getConfigVersion());
- }
-
- TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseAsPrimary) {
- makeSelfPrimary(Timestamp(10,0));
-
- ReplSetHeartbeatArgsV1 args;
- args.setConfigVersion(1);
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponseV1(args, OpTime(Timestamp(11,0), 0), &response, &result);
- ASSERT_OK(result);
- ASSERT_FALSE(response.hasConfig());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(MemberState::RS_PRIMARY, response.getState().s);
- ASSERT_EQUALS(OpTime(Timestamp(11,0), 0), response.getOpTime());
- ASSERT_EQUALS(0, response.getTerm());
- ASSERT_EQUALS(1, response.getConfigVersion());
- }
-
- TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseWithSyncSource) {
- // get a sync source
- heartbeatFromMember(HostAndPort("h3"), "rs0",
- MemberState::RS_SECONDARY, OpTime());
- heartbeatFromMember(HostAndPort("h3"), "rs0",
- MemberState::RS_SECONDARY, OpTime());
- heartbeatFromMember(HostAndPort("h2"), "rs0",
- MemberState::RS_SECONDARY, OpTime(Timestamp(1,0), 0));
- heartbeatFromMember(HostAndPort("h2"), "rs0",
- MemberState::RS_SECONDARY, OpTime(Timestamp(1,0), 0));
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
-
- // set up args
- ReplSetHeartbeatArgsV1 args;
- args.setConfigVersion(1);
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponseV1(args, OpTime(Timestamp(100,0), 0), &response, &result);
- ASSERT_OK(result);
- ASSERT_FALSE(response.hasConfig());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
- ASSERT_EQUALS(OpTime(Timestamp(100,0), 0), response.getOpTime());
- ASSERT_EQUALS(0, response.getTerm());
- ASSERT_EQUALS(1, response.getConfigVersion());
- ASSERT_EQUALS(HostAndPort("h2"), response.getSyncingTo());
- }
-
- TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseBadProtocolVersion) {
- // set up args with bad protocol version
- ReplSetHeartbeatArgs args;
- args.setProtocolVersion(3);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponse(args, OpTime(), &response, &result);
- ASSERT_EQUALS(ErrorCodes::BadValue, result);
- ASSERT_EQUALS("replset: incompatible replset protocol version: 3", result.reason());
- ASSERT_EQUALS("", response.getHbMsg());
- }
-
- TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseFromSelf) {
- // set up args with incorrect replset name
- ReplSetHeartbeatArgs args;
- args.setProtocolVersion(1);
- args.setSetName("rs0");
- args.setSenderId(10);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
- prepareHeartbeatResponse(args, OpTime(), &response, &result);
- ASSERT_EQUALS(ErrorCodes::BadValue, result);
- ASSERT(result.reason().find("from member with the same member ID as our self")) <<
- "Actual string was \"" << result.reason() << '"';
- ASSERT_EQUALS("", response.getHbMsg());
- }
-
- TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseBadSetName) {
- // set up args with incorrect replset name
- ReplSetHeartbeatArgs args;
- args.setProtocolVersion(1);
- args.setSetName("rs1");
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- startCapturingLogMessages();
- prepareHeartbeatResponse(args, OpTime(), &response, &result);
- stopCapturingLogMessages();
- ASSERT_EQUALS(ErrorCodes::InconsistentReplicaSetNames, result);
- ASSERT(result.reason().find("repl set names do not match")) << "Actual string was \"" <<
- result.reason() << '"';
- ASSERT_EQUALS(1,
- countLogLinesContaining("replSet set names do not match, ours: rs0; remote "
- "node's: rs1"));
- ASSERT_TRUE(response.isMismatched());
- ASSERT_EQUALS("", response.getHbMsg());
- }
-
- TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseSenderIDMissing) {
- // set up args without a senderID
- ReplSetHeartbeatArgs args;
- args.setProtocolVersion(1);
- args.setSetName("rs0");
- args.setConfigVersion(1);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponse(args, OpTime(), &response, &result);
- ASSERT_OK(result);
- ASSERT_FALSE(response.isElectable());
- ASSERT_TRUE(response.isReplSet());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
- ASSERT_EQUALS(OpTime(), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
- ASSERT_EQUALS("", response.getHbMsg());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(1, response.getConfigVersion());
- }
-
- TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseSenderIDNotInConfig) {
- // set up args with a senderID which is not present in our config
- ReplSetHeartbeatArgs args;
- args.setProtocolVersion(1);
- args.setSetName("rs0");
- args.setConfigVersion(1);
- args.setSenderId(2);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponse(args, OpTime(), &response, &result);
- ASSERT_OK(result);
- ASSERT_FALSE(response.isElectable());
- ASSERT_TRUE(response.isReplSet());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
- ASSERT_EQUALS(OpTime(), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
- ASSERT_EQUALS("", response.getHbMsg());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(1, response.getConfigVersion());
- }
-
- TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseConfigVersionLow) {
- // set up args with a config version lower than ours
- ReplSetHeartbeatArgs args;
- args.setProtocolVersion(1);
- args.setConfigVersion(0);
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponse(args, OpTime(), &response, &result);
- ASSERT_OK(result);
- ASSERT_TRUE(response.hasConfig());
- ASSERT_FALSE(response.isElectable());
- ASSERT_TRUE(response.isReplSet());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
- ASSERT_EQUALS(OpTime(), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
- ASSERT_EQUALS("", response.getHbMsg());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(1, response.getConfigVersion());
- }
-
- TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseConfigVersionHigh) {
- // set up args with a config version higher than ours
- ReplSetHeartbeatArgs args;
- args.setProtocolVersion(1);
- args.setConfigVersion(10);
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponse(args, OpTime(), &response, &result);
- ASSERT_OK(result);
- ASSERT_FALSE(response.hasConfig());
- ASSERT_FALSE(response.isElectable());
- ASSERT_TRUE(response.isReplSet());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
- ASSERT_EQUALS(OpTime(), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
- ASSERT_EQUALS("", response.getHbMsg());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(1, response.getConfigVersion());
- }
-
- TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseSenderDown) {
- // set up args with sender down from our perspective
- ReplSetHeartbeatArgs args;
- args.setProtocolVersion(1);
- args.setConfigVersion(1);
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponse(args, OpTime(), &response, &result);
- ASSERT_OK(result);
- ASSERT_FALSE(response.isElectable());
- ASSERT_TRUE(response.isReplSet());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
- ASSERT_EQUALS(OpTime(), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
- ASSERT_EQUALS("", response.getHbMsg());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(1, response.getConfigVersion());
- ASSERT_TRUE(response.isStateDisagreement());
- }
-
- TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseSenderUp) {
- // set up args and acknowledge sender
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, OpTime());
- ReplSetHeartbeatArgs args;
- args.setProtocolVersion(1);
- args.setConfigVersion(1);
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponse(args, OpTime(Timestamp(100,0), 0), &response, &result);
- ASSERT_OK(result);
- // this change to true because we can now see a majority, unlike in the previous cases
- ASSERT_TRUE(response.isElectable());
- ASSERT_TRUE(response.isReplSet());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
- ASSERT_EQUALS(OpTime(Timestamp(100,0), 0), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
- ASSERT_EQUALS("", response.getHbMsg());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(1, response.getConfigVersion());
- }
-
- TEST_F(TopoCoordTest, PrepareHeartbeatResponseNoConfigYet) {
- // set up args and acknowledge sender
- ReplSetHeartbeatArgs args;
- args.setProtocolVersion(1);
- args.setConfigVersion(1);
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- // prepare response and check the results
- Status result = getTopoCoord().prepareHeartbeatResponse(now()++,
- args,
- "rs0",
- OpTime(),
- &response);
- ASSERT_OK(result);
- // this change to true because we can now see a majority, unlike in the previous cases
- ASSERT_FALSE(response.isElectable());
- ASSERT_TRUE(response.isReplSet());
- ASSERT_EQUALS(MemberState::RS_STARTUP, response.getState().s);
- ASSERT_EQUALS(OpTime(), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
- ASSERT_EQUALS("", response.getHbMsg());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(-2, response.getConfigVersion());
- }
-
- TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseAsPrimary) {
- makeSelfPrimary(Timestamp(10,0));
- heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, OpTime());
-
- ReplSetHeartbeatArgs args;
- args.setProtocolVersion(1);
- args.setConfigVersion(1);
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponse(args, OpTime(Timestamp(11,0), 0), &response, &result);
- ASSERT_OK(result);
- // electable because we are already primary
- ASSERT_TRUE(response.isElectable());
- ASSERT_TRUE(response.isReplSet());
- ASSERT_EQUALS(MemberState::RS_PRIMARY, response.getState().s);
- ASSERT_EQUALS(OpTime(Timestamp(11,0), 0), response.getOpTime());
- ASSERT_EQUALS(Timestamp(10,0), response.getElectionTime());
- ASSERT_EQUALS(0, response.getTime().count());
- ASSERT_EQUALS("", response.getHbMsg());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(1, response.getConfigVersion());
- }
-
- TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseWithSyncSource) {
- // get a sync source
- heartbeatFromMember(HostAndPort("h3"), "rs0",
- MemberState::RS_SECONDARY, OpTime());
- heartbeatFromMember(HostAndPort("h3"), "rs0",
- MemberState::RS_SECONDARY, OpTime());
- heartbeatFromMember(HostAndPort("h2"), "rs0",
- MemberState::RS_SECONDARY, OpTime(Timestamp(1,0), 0));
- heartbeatFromMember(HostAndPort("h2"), "rs0",
- MemberState::RS_SECONDARY, OpTime(Timestamp(1,0), 0));
- getTopoCoord().chooseNewSyncSource(now()++, OpTime());
-
- // set up args
- ReplSetHeartbeatArgs args;
- args.setProtocolVersion(1);
- args.setConfigVersion(1);
- args.setSetName("rs0");
- args.setSenderId(20);
- ReplSetHeartbeatResponse response;
- Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
-
- // prepare response and check the results
- prepareHeartbeatResponse(args, OpTime(Timestamp(100,0), 0), &response, &result);
- ASSERT_OK(result);
- ASSERT_TRUE(response.isElectable());
- ASSERT_TRUE(response.isReplSet());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
- ASSERT_EQUALS(OpTime(Timestamp(100,0), 0), response.getOpTime());
- ASSERT_EQUALS(0, response.getTime().count());
- // changed to a syncing message because our sync source changed recently
- ASSERT_EQUALS("syncing from: h2:27017", response.getHbMsg());
- ASSERT_EQUALS("rs0", response.getReplicaSetName());
- ASSERT_EQUALS(1, response.getConfigVersion());
- ASSERT_EQUALS(HostAndPort("h2"), response.getSyncingTo());
- }
-
- TEST_F(TopoCoordTest, SetFollowerSecondaryWhenLoneNode) {
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "hself"))),
- 0);
- ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
-
- // if we are the only node, we should become a candidate when we transition to SECONDARY
- ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
- }
-
- TEST_F(TopoCoordTest, CandidateWhenLoneSecondaryNodeReconfig) {
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
- ReplicaSetConfig cfg;
- cfg.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "hself" << "priority" << 0))));
- getTopoCoord().updateConfig(cfg, 0, now()++, OpTime());
- ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
-
- ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
-
- // we should become a candidate when we reconfig to become electable
-
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "hself"))),
- 0);
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- }
-
- TEST_F(TopoCoordTest, SetFollowerSecondaryWhenLoneUnelectableNode) {
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
- ReplicaSetConfig cfg;
- cfg.initialize(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "hself" << "priority" << 0))));
-
- getTopoCoord().updateConfig(cfg, 0, now()++, OpTime());
- ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
-
- // despite being the only node, we are unelectable, so we should not become a candidate
- ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
- }
-
- TEST_F(TopoCoordTest, ReconfigToBeAddedToTheSet) {
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
- // config to be absent from the set
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017"))),
- -1);
- // should become removed since we are not in the set
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s);
-
- // reconfig to add to set
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017"))),
- 0);
- // having been added to the config, we should no longer be REMOVED and should enter STARTUP2
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
- }
-
- TEST_F(TopoCoordTest, ReconfigToBeRemovedFromTheSet) {
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017"))),
- 0);
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
-
- // reconfig to remove self
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017"))),
- -1);
- // should become removed since we are no longer in the set
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s);
- }
-
- TEST_F(TopoCoordTest, ReconfigToBeRemovedFromTheSetAsPrimary) {
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017"))),
- 0);
- ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
- getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
-
- // win election and primary
- getTopoCoord().processWinElection(OID::gen(), Timestamp());
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
-
- // reconfig to remove self
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017"))),
- -1);
- // should become removed since we are no longer in the set even though we were primary
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s);
- }
-
- TEST_F(TopoCoordTest, ReconfigCanNoLongerBePrimary) {
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017"))),
- 0);
- ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
- getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
-
- // win election and primary
- getTopoCoord().processWinElection(OID::gen(), Timestamp());
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
-
- // now lose primary due to loss of electability
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017" << "priority" << 0) <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017"))),
- 0);
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
- }
-
- TEST_F(TopoCoordTest, ReconfigContinueToBePrimary) {
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017"))),
- 0);
-
- ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
- getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
- ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
-
- // win election and primary
- getTopoCoord().processWinElection(OID::gen(), Timestamp());
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
-
- // Now reconfig in ways that leave us electable and ensure we are still the primary.
- // Add hosts
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017"))),
- 0,
- Date_t::fromMillisSinceEpoch(-1),
- OpTime(Timestamp(10,0), 0));
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
-
- // Change priorities and tags
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017" << "priority" << 10) <<
- BSON("_id" << 1 <<
- "host" << "host2:27017" <<
- "priority" << 5 <<
- "tags" << BSON("dc" << "NA" << "rack" << "rack1")))),
- 0,
- Date_t::fromMillisSinceEpoch(-1),
- OpTime(Timestamp(10,0), 0));
- ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
- }
-
- TEST_F(TopoCoordTest, ReconfigKeepSecondary) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 1 << "host" << "host1:27017") <<
- BSON("_id" << 2 << "host" << "host2:27017"))),
- 0);
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
- setSelfMemberState(MemberState::RS_SECONDARY);
- ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
-
- // reconfig and stay secondary
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017"))),
- 0);
- ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
- ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
- }
-
- TEST_F(HeartbeatResponseTest, ReconfigBetweenHeartbeatRequestAndRepsonse) {
- OpTime election = OpTime(Timestamp(14,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(13,0), 0);
-
- // all three members up and secondaries
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- // now request from host3 and receive after host2 has been removed via reconfig
- getTopoCoord().prepareHeartbeatRequest(now()++, "rs0", HostAndPort("host3"));
-
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 2 << "host" << "host3:27017"))),
- 0);
-
- ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 <<
- "v" << 1 <<
- "state" << MemberState::RS_PRIMARY), 0);
- hb.setOpTime(lastOpTimeApplied);
- hb.setElectionTime(election.getTimestamp());
- StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
- HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(now()++,
- Milliseconds(0),
- HostAndPort("host3"),
- hbResponse,
- lastOpTimeApplied);
-
- // now primary should be host3, index 1, and we should perform NoAction in response
- ASSERT_EQUALS(1, getCurrentPrimaryIndex());
- ASSERT_NO_ACTION(action.getAction());
- }
-
- TEST_F(HeartbeatResponseTest, ReconfigNodeRemovedBetweenHeartbeatRequestAndRepsonse) {
- OpTime election = OpTime(Timestamp(14,0), 0);
- OpTime lastOpTimeApplied = OpTime(Timestamp(13,0), 0);
-
- // all three members up and secondaries
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_PRIMARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- // now request from host3 and receive after host2 has been removed via reconfig
- getTopoCoord().prepareHeartbeatRequest(now()++, "rs0", HostAndPort("host3"));
-
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host1:27017") <<
- BSON("_id" << 1 << "host" << "host2:27017"))),
- 0);
-
- ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 <<
- "v" << 1 <<
- "state" << MemberState::RS_PRIMARY), 0);
- hb.setOpTime(lastOpTimeApplied);
- hb.setElectionTime(election.getTimestamp());
- StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
- HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(now()++,
- Milliseconds(0),
- HostAndPort("host3"),
- hbResponse,
- lastOpTimeApplied);
-
- // primary should not be set and we should perform NoAction in response
- ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
- ASSERT_NO_ACTION(action.getAction());
- }
-
- TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceMemberNotInConfig) {
- // In this test, the TopologyCoordinator should tell us to change sync sources away from
- // "host4" since "host4" is absent from the config
- ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host4"), now()));
- }
-
- TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceMemberHasYetToHeartbeat) {
- // In this test, the TopologyCoordinator should not tell us to change sync sources away from
- // "host2" since we do not yet have a heartbeat (and as a result do not yet have an optime)
- // for "host2"
- ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
- }
-
- TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceFresherHappierMemberExists) {
- // In this test, the TopologyCoordinator should tell us to change sync sources away from
- // "host2" and to "host3" since "host2" is more than maxSyncSourceLagSecs(30) behind "host3"
- OpTime election = OpTime();
- OpTime lastOpTimeApplied = OpTime(Timestamp(4,0), 0);
- // ahead by more than maxSyncSourceLagSecs (30)
- OpTime fresherLastOpTimeApplied = OpTime(Timestamp(3005,0), 0);
-
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- fresherLastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- // set up complete, time for actual check
- startCapturingLogMessages();
- ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
- stopCapturingLogMessages();
- ASSERT_EQUALS(1, countLogLinesContaining("changing sync target"));
- }
-
- TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceFresherMemberIsBlackListed) {
- // In this test, the TopologyCoordinator should not tell us to change sync sources away from
- // "host2" and to "host3" despite "host2" being more than maxSyncSourceLagSecs(30) behind
- // "host3", since "host3" is blacklisted
- // Then, confirm that unblacklisting only works if time has passed the blacklist time.
- OpTime election = OpTime();
- OpTime lastOpTimeApplied = OpTime(Timestamp(400,0), 0);
- // ahead by more than maxSyncSourceLagSecs (30)
- OpTime fresherLastOpTimeApplied = OpTime(Timestamp(3005,0), 0);
-
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- fresherLastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- getTopoCoord().blacklistSyncSource(HostAndPort("host3"), now() + Milliseconds(100));
-
- // set up complete, time for actual check
- ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
-
- // unblacklist with too early a time (node should remained blacklisted)
- getTopoCoord().unblacklistSyncSource(HostAndPort("host3"), now() + Milliseconds(90));
- ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
-
- // unblacklist and it should succeed
- getTopoCoord().unblacklistSyncSource(HostAndPort("host3"), now() + Milliseconds(100));
startCapturingLogMessages();
- ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
+ getTopoCoord().prepareFreezeResponse(now()++, duration, &response);
stopCapturingLogMessages();
- ASSERT_EQUALS(1, countLogLinesContaining("changing sync target"));
+ return response.obj();
}
-
- TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceFresherMemberIsDown) {
- // In this test, the TopologyCoordinator should not tell us to change sync sources away from
- // "host2" and to "host3" despite "host2" being more than maxSyncSourceLagSecs(30) behind
- // "host3", since "host3" is down
- OpTime election = OpTime();
- OpTime lastOpTimeApplied = OpTime(Timestamp(400,0), 0);
- // ahead by more than maxSyncSourceLagSecs (30)
- OpTime fresherLastOpTimeApplied = OpTime(Timestamp(3005,0), 0);
-
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- fresherLastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- // set up complete, time for actual check
- nextAction = receiveDownHeartbeat(HostAndPort("host3"), "rs0", lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
+};
+
+TEST_F(PrepareFreezeResponseTest, UnfreezeEvenWhenNotFrozen) {
+ BSONObj response = prepareFreezeResponse(0);
+ ASSERT_EQUALS("unfreezing", response["info"].String());
+ ASSERT_EQUALS(1, countLogLinesContaining("'unfreezing'"));
+ // 1 instead of 0 because it assigns to "now" in this case
+ ASSERT_EQUALS(1LL, getTopoCoord().getStepDownTime().asInt64());
+}
+
+TEST_F(PrepareFreezeResponseTest, FreezeForOneSecond) {
+ BSONObj response = prepareFreezeResponse(1);
+ ASSERT_EQUALS("you really want to freeze for only 1 second?", response["warning"].String());
+ ASSERT_EQUALS(1, countLogLinesContaining("'freezing' for 1 seconds"));
+ // 1001 because "now" was incremented once during initialization + 1000 ms wait
+ ASSERT_EQUALS(1001LL, getTopoCoord().getStepDownTime().asInt64());
+}
+
+TEST_F(PrepareFreezeResponseTest, FreezeForManySeconds) {
+ BSONObj response = prepareFreezeResponse(20);
+ ASSERT_TRUE(response.isEmpty());
+ ASSERT_EQUALS(1, countLogLinesContaining("'freezing' for 20 seconds"));
+ // 20001 because "now" was incremented once during initialization + 20000 ms wait
+ ASSERT_EQUALS(20001LL, getTopoCoord().getStepDownTime().asInt64());
+}
+
+TEST_F(PrepareFreezeResponseTest, UnfreezeEvenWhenNotFrozenWhilePrimary) {
+ makeSelfPrimary();
+ BSONObj response = prepareFreezeResponse(0);
+ ASSERT_EQUALS("unfreezing", response["info"].String());
+ // doesn't mention being primary in this case for some reason
+ ASSERT_EQUALS(0, countLogLinesContaining("received freeze command but we are primary"));
+ // 1 instead of 0 because it assigns to "now" in this case
+ ASSERT_EQUALS(1LL, getTopoCoord().getStepDownTime().asInt64());
+}
+
+TEST_F(PrepareFreezeResponseTest, FreezeForOneSecondWhilePrimary) {
+ makeSelfPrimary();
+ BSONObj response = prepareFreezeResponse(1);
+ ASSERT_EQUALS("you really want to freeze for only 1 second?", response["warning"].String());
+ ASSERT_EQUALS(1, countLogLinesContaining("received freeze command but we are primary"));
+ ASSERT_EQUALS(0LL, getTopoCoord().getStepDownTime().asInt64());
+}
+
+TEST_F(PrepareFreezeResponseTest, FreezeForManySecondsWhilePrimary) {
+ makeSelfPrimary();
+ BSONObj response = prepareFreezeResponse(20);
+ ASSERT_TRUE(response.isEmpty());
+ ASSERT_EQUALS(1, countLogLinesContaining("received freeze command but we are primary"));
+ ASSERT_EQUALS(0LL, getTopoCoord().getStepDownTime().asInt64());
+}
+
+TEST_F(TopoCoordTest, UnfreezeWhileLoneNode) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 5 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ BSONObjBuilder response;
+ getTopoCoord().prepareFreezeResponse(now()++, 20, &response);
+ ASSERT(response.obj().isEmpty());
+ BSONObjBuilder response2;
+ getTopoCoord().prepareFreezeResponse(now()++, 0, &response2);
+ ASSERT_EQUALS("unfreezing", response2.obj()["info"].String());
+ ASSERT(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+}
+
+class ShutdownInProgressTest : public TopoCoordTest {
+public:
+ ShutdownInProgressTest()
+ : ourCbData(NULL,
+ ReplicationExecutor::CallbackHandle(),
+ Status(ErrorCodes::CallbackCanceled, "")) {}
+
+ virtual ReplicationExecutor::CallbackArgs cbData() {
+ return ourCbData;
}
- TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceFresherMemberIsNotReadable) {
- // In this test, the TopologyCoordinator should not tell us to change sync sources away from
- // "host2" and to "host3" despite "host2" being more than maxSyncSourceLagSecs(30) behind
- // "host3", since "host3" is in a non-readable mode (RS_ROLLBACK)
- OpTime election = OpTime();
- OpTime lastOpTimeApplied = OpTime(Timestamp(4,0), 0);
- // ahead by more than m, 0)axSyncSourceLagSecs (30)
- OpTime fresherLastOpTimeApplied = OpTime(Timestamp(3005,0), 0);
-
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_ROLLBACK,
- election,
- fresherLastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- // set up complete, time for actual check
- ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
- }
-
- TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceFresherMemberDoesNotBuildIndexes) {
- // In this test, the TopologyCoordinator should not tell us to change sync sources away from
- // "host2" and to "host3" despite "host2" being more than maxSyncSourceLagSecs(30) behind
- // "host3", since "host3" does not build indexes
- OpTime election = OpTime();
- OpTime lastOpTimeApplied = OpTime(Timestamp(4,0), 0);
- // ahead by more than maxSyncSourceLagSecs (30)
- OpTime fresherLastOpTimeApplied = OpTime(Timestamp(3005,0), 0);
-
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 6 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "hself") <<
- BSON("_id" << 1 << "host" << "host2") <<
- BSON("_id" << 2 << "host" << "host3" <<
- "buildIndexes" << false << "priority" << 0))),
- 0);
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- fresherLastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- // set up complete, time for actual check
- ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
- }
-
- TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceFresherMemberDoesNotBuildIndexesNorDoWe) {
- // In this test, the TopologyCoordinator should tell us to change sync sources away from
- // "host2" and to "host3" despite "host3" not building indexes because we do not build
- // indexes either and "host2" is more than maxSyncSourceLagSecs(30) behind "host3"
- OpTime election = OpTime();
- OpTime lastOpTimeApplied = OpTime(Timestamp(4,0), 0);
- // ahead by more than maxSyncSourceLagSecs (30)
- OpTime fresherLastOpTimeApplied = OpTime(Timestamp(3005,0), 0);
-
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 7 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "hself" <<
- "buildIndexes" << false << "priority" << 0) <<
- BSON("_id" << 1 << "host" << "host2") <<
- BSON("_id" << 2 << "host" << "host3" <<
- "buildIndexes" << false << "priority" << 0))),
- 0);
- HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- lastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
- nextAction = receiveUpHeartbeat(HostAndPort("host3"),
- "rs0",
- MemberState::RS_SECONDARY,
- election,
- fresherLastOpTimeApplied,
- lastOpTimeApplied);
- ASSERT_NO_ACTION(nextAction.getAction());
-
- // set up complete, time for actual check
- startCapturingLogMessages();
- ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
- stopCapturingLogMessages();
- ASSERT_EQUALS(1, countLogLinesContaining("changing sync target"));
- }
-
- TEST_F(TopoCoordTest, CheckShouldStandForElectionWithPrimary) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
+private:
+ ReplicationExecutor::CallbackArgs ourCbData;
+};
+
+TEST_F(ShutdownInProgressTest, ShutdownInProgressWhenCallbackCanceledSyncFrom) {
+ Status result = Status::OK();
+ BSONObjBuilder response;
+ getTopoCoord().prepareSyncFromResponse(
+ cbData(), HostAndPort("host2:27017"), OpTime(), &response, &result);
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, result);
+ ASSERT_TRUE(response.obj().isEmpty());
+}
+
+TEST_F(ShutdownInProgressTest, ShutDownInProgressWhenCallbackCanceledStatus) {
+ Status result = Status::OK();
+ BSONObjBuilder response;
+ getTopoCoord().prepareStatusResponse(cbData(), Date_t(), 0, OpTime(), &response, &result);
+ ASSERT_EQUALS(ErrorCodes::ShutdownInProgress, result);
+ ASSERT_TRUE(response.obj().isEmpty());
+}
+
+class PrepareHeartbeatResponseTest : public TopoCoordTest {
+public:
+ virtual void setUp() {
+ TopoCoordTest::setUp();
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
-
- heartbeatFromMember(HostAndPort("h2"), "rs0",
- MemberState::RS_PRIMARY, OpTime(Timestamp(1,0), 0));
- ASSERT_FALSE(getTopoCoord().checkShouldStandForElection(now()++, OpTime()));
}
- TEST_F(TopoCoordTest, CheckShouldStandForElectionNotCloseEnoughToLastOptime) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- heartbeatFromMember(HostAndPort("h2"), "rs0",
- MemberState::RS_SECONDARY, OpTime(Timestamp(10000,0), 0));
- ASSERT_FALSE(getTopoCoord().checkShouldStandForElection(now()++,
- OpTime(Timestamp(100,0), 0)));
+ void prepareHeartbeatResponse(const ReplSetHeartbeatArgs& args,
+ OpTime lastOpApplied,
+ ReplSetHeartbeatResponse* response,
+ Status* result) {
+ *result =
+ getTopoCoord().prepareHeartbeatResponse(now()++, args, "rs0", lastOpApplied, response);
}
-
- TEST_F(TopoCoordTest, VoteForMyselfFailsWhileNotCandidate) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
+};
+
+class PrepareHeartbeatResponseV1Test : public TopoCoordTest {
+public:
+ virtual void setUp() {
+ TopoCoordTest::setUp();
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))
+ << "protocolVersion" << 1),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
- ASSERT_FALSE(getTopoCoord().voteForMyself(now()++));
}
- TEST_F(TopoCoordTest, GetMemberStateArbiter) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself" << "arbiterOnly" << true) <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
- ASSERT_EQUALS(MemberState::RS_ARBITER, getTopoCoord().getMemberState().s);
- }
-
- TEST_F(TopoCoordTest, UnelectableIfAbsentFromConfig) {
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- startCapturingLogMessages();
- ASSERT_FALSE(getTopoCoord().checkShouldStandForElection(now()++,
- OpTime(Timestamp(10,0), 0)));
- stopCapturingLogMessages();
- ASSERT_EQUALS(1, countLogLinesContaining("not a member of a valid replica set config"));
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Log());
- }
-
- TEST_F(TopoCoordTest, UnelectableIfVotedRecently) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
- heartbeatFromMember(HostAndPort("h2"), "rs0",
- MemberState::RS_SECONDARY, OpTime(Timestamp(100,0), 0));
-
- // vote for another node
- OID remoteRound = OID::gen();
- ReplicationCoordinator::ReplSetElectArgs electArgs;
- electArgs.set = "rs0";
- electArgs.round = remoteRound;
- electArgs.cfgver = 1;
- electArgs.whoid = 20;
-
- // need to be 30 secs beyond the start of time to pass last vote lease
- now() += Seconds(30);
- BSONObjBuilder electResponseBuilder;
- Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
- getTopoCoord().prepareElectResponse(
- electArgs, now()++, OpTime(Timestamp(100,0), 0), &electResponseBuilder, &result);
- BSONObj response = electResponseBuilder.obj();
- ASSERT_OK(result);
- std::cout << response;
- ASSERT_EQUALS(1, response["vote"].Int());
- ASSERT_EQUALS(remoteRound, response["round"].OID());
-
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- startCapturingLogMessages();
- ASSERT_FALSE(getTopoCoord().checkShouldStandForElection(now()++,
- OpTime(Timestamp(10,0), 0)));
- stopCapturingLogMessages();
- ASSERT_EQUALS(1, countLogLinesContaining("I recently voted for "));
- logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Log());
- }
-
- TEST_F(TopoCoordTest, ProcessRequestVotesTwoRequestsForSameTerm) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- ReplSetRequestVotesArgs args;
- args.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "term" << 1LL
- << "candidateId" << 10LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response;
- OpTime lastAppliedOpTime;
-
- getTopoCoord().processReplSetRequestVotes(args, &response, lastAppliedOpTime);
- ASSERT_EQUALS("", response.getReason());
- ASSERT_TRUE(response.getVoteGranted());
-
- ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "term" << 1LL
- << "candidateId" << 20LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response2;
-
- // different candidate same term, should be a problem
- getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
- ASSERT_EQUALS("already voted for another candidate this term", response2.getReason());
- ASSERT_FALSE(response2.getVoteGranted());
-
- }
-
- TEST_F(TopoCoordTest, ProcessRequestVotesDryRunsDoNotDisallowFutureRequestVotes) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- // dry run
- ReplSetRequestVotesArgs args;
- args.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "dryRun" << true
- << "term" << 1LL
- << "candidateId" << 10LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response;
- OpTime lastAppliedOpTime;
-
- getTopoCoord().processReplSetRequestVotes(args, &response, lastAppliedOpTime);
- ASSERT_EQUALS("", response.getReason());
- ASSERT_TRUE(response.getVoteGranted());
-
- // second dry run fine
- ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "dryRun" << true
- << "term" << 1LL
- << "candidateId" << 10LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response2;
-
- getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
- ASSERT_EQUALS("", response2.getReason());
- ASSERT_TRUE(response2.getVoteGranted());
-
- // real request fine
- ReplSetRequestVotesArgs args3;
- args3.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "dryRun" << false
- << "term" << 1LL
- << "candidateId" << 10LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response3;
-
- getTopoCoord().processReplSetRequestVotes(args3, &response3, lastAppliedOpTime);
- ASSERT_EQUALS("", response3.getReason());
- ASSERT_TRUE(response3.getVoteGranted());
-
- // dry post real, fails
- ReplSetRequestVotesArgs args4;
- args4.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "dryRun" << false
- << "term" << 1LL
- << "candidateId" << 10LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response4;
-
- getTopoCoord().processReplSetRequestVotes(args4, &response4, lastAppliedOpTime);
- ASSERT_EQUALS("already voted for another candidate this term", response4.getReason());
- ASSERT_FALSE(response4.getVoteGranted());
-
- }
-
- TEST_F(TopoCoordTest, ProcessRequestVotesBadCommands) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- // mismatched setName
- ReplSetRequestVotesArgs args;
- args.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "wrongName"
- << "term" << 1LL
- << "candidateId" << 10LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response;
- OpTime lastAppliedOpTime;
-
- getTopoCoord().processReplSetRequestVotes(args, &response, lastAppliedOpTime);
- ASSERT_EQUALS("candidate's set name differs from mine", response.getReason());
- ASSERT_FALSE(response.getVoteGranted());
-
- // mismatched configVersion
- ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "term" << 1LL
- << "candidateId" << 20LL
- << "configVersion" << 0LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response2;
-
- getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
- ASSERT_EQUALS("candidate's config version differs from mine", response2.getReason());
- ASSERT_FALSE(response2.getVoteGranted());
-
- // set term higher by receiving a replSetDeclareElectionWinnerCommand
- ReplSetDeclareElectionWinnerArgs winnerArgs;
- winnerArgs.initialize(BSON("replSetDeclareElectionWinner" << 1
- << "setName" << "rs0"
- << "term" << 2
- << "winnerId" << 30));
- long long responseTerm;
- ASSERT(getTopoCoord().updateTerm(winnerArgs.getTerm()));
- ASSERT_OK(getTopoCoord().processReplSetDeclareElectionWinner(winnerArgs, &responseTerm));
- ASSERT_EQUALS(2, responseTerm);
-
- // stale term
- ReplSetRequestVotesArgs args3;
- args3.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "term" << 1LL
- << "candidateId" << 20LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response3;
-
- getTopoCoord().processReplSetRequestVotes(args3, &response3, lastAppliedOpTime);
- ASSERT_EQUALS("candidate's term is lower than mine", response3.getReason());
- ASSERT_EQUALS(2, response3.getTerm());
- ASSERT_FALSE(response3.getVoteGranted());
-
- // stale OpTime
- ReplSetRequestVotesArgs args4;
- args4.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "term" << 3LL
- << "candidateId" << 20LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response4;
- OpTime lastAppliedOpTime2 = {Timestamp(20, 0), 0};
-
- getTopoCoord().processReplSetRequestVotes(args4, &response4, lastAppliedOpTime2);
- ASSERT_EQUALS("candidate's data is staler than mine", response4.getReason());
- ASSERT_FALSE(response4.getVoteGranted());
- }
-
- TEST_F(TopoCoordTest, ProcessRequestVotesBadCommandsDryRun) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
- // set term to 1
- ASSERT(getTopoCoord().updateTerm(1));
- // and make sure we voted in term 1
- ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "term" << 1LL
- << "candidateId" << 10LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse responseForRealVote;
- OpTime lastAppliedOpTime;
-
- getTopoCoord().processReplSetRequestVotes(argsForRealVote,
- &responseForRealVote,
- lastAppliedOpTime);
- ASSERT_EQUALS("", responseForRealVote.getReason());
- ASSERT_TRUE(responseForRealVote.getVoteGranted());
-
-
- // mismatched setName
- ReplSetRequestVotesArgs args;
- args.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "wrongName"
- << "dryRun" << true
- << "term" << 2LL
- << "candidateId" << 10LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response;
-
- getTopoCoord().processReplSetRequestVotes(args, &response, lastAppliedOpTime);
- ASSERT_EQUALS("candidate's set name differs from mine", response.getReason());
- ASSERT_EQUALS(1, response.getTerm());
- ASSERT_FALSE(response.getVoteGranted());
-
- // mismatched configVersion
- ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "dryRun" << true
- << "term" << 2LL
- << "candidateId" << 20LL
- << "configVersion" << 0LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response2;
-
- getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
- ASSERT_EQUALS("candidate's config version differs from mine", response2.getReason());
- ASSERT_EQUALS(1, response2.getTerm());
- ASSERT_FALSE(response2.getVoteGranted());
-
- // stale term
- ReplSetRequestVotesArgs args3;
- args3.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "dryRun" << true
- << "term" << 0LL
- << "candidateId" << 20LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response3;
-
- getTopoCoord().processReplSetRequestVotes(args3, &response3, lastAppliedOpTime);
- ASSERT_EQUALS("candidate's term is lower than mine", response3.getReason());
- ASSERT_EQUALS(1, response3.getTerm());
- ASSERT_FALSE(response3.getVoteGranted());
-
- // repeat term
- ReplSetRequestVotesArgs args4;
- args4.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "dryRun" << true
- << "term" << 1LL
- << "candidateId" << 20LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response4;
-
- getTopoCoord().processReplSetRequestVotes(args4, &response4, lastAppliedOpTime);
- ASSERT_EQUALS("", response4.getReason());
- ASSERT_EQUALS(1, response4.getTerm());
- ASSERT_TRUE(response4.getVoteGranted());
-
- // stale OpTime
- ReplSetRequestVotesArgs args5;
- args5.initialize(BSON("replSetRequestVotes" << 1
- << "setName" << "rs0"
- << "dryRun" << true
- << "term" << 3LL
- << "candidateId" << 20LL
- << "configVersion" << 1LL
- << "lastCommittedOp" << BSON ("ts" << Timestamp(10, 0)
- << "term" << 0LL)));
- ReplSetRequestVotesResponse response5;
- OpTime lastAppliedOpTime2 = {Timestamp(20, 0), 0};
-
- getTopoCoord().processReplSetRequestVotes(args5, &response5, lastAppliedOpTime2);
- ASSERT_EQUALS("candidate's data is staler than mine", response5.getReason());
- ASSERT_EQUALS(1, response5.getTerm());
- ASSERT_FALSE(response5.getVoteGranted());
- }
-
- TEST_F(TopoCoordTest, ProcessDeclareElectionWinner) {
- updateConfig(BSON("_id" << "rs0" <<
- "version" << 1 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 10 << "host" << "hself") <<
- BSON("_id" << 20 << "host" << "h2") <<
- BSON("_id" << 30 << "host" << "h3"))),
- 0);
- setSelfMemberState(MemberState::RS_SECONDARY);
-
- // successful
- ReplSetDeclareElectionWinnerArgs winnerArgs;
- winnerArgs.initialize(BSON("replSetDeclareElectionWinner" << 1
- << "setName" << "rs0"
- << "term" << 2
- << "winnerId" << 30));
- long long responseTerm = -1;
- ASSERT(getTopoCoord().updateTerm(winnerArgs.getTerm()));
- ASSERT_OK(getTopoCoord().processReplSetDeclareElectionWinner(winnerArgs, &responseTerm));
- ASSERT_EQUALS(2, responseTerm);
-
- // repeat, should be problem free
- ReplSetDeclareElectionWinnerArgs winnerArgs2;
- winnerArgs2.initialize(BSON("replSetDeclareElectionWinner" << 1
- << "setName" << "rs0"
- << "term" << 2
- << "winnerId" << 30));
- long long responseTerm2 = -1;
- ASSERT_OK(getTopoCoord().processReplSetDeclareElectionWinner(winnerArgs2, &responseTerm2));
- ASSERT_EQUALS(2, responseTerm2);
-
- // same term, different primary, should fail
- ReplSetDeclareElectionWinnerArgs winnerArgs3;
- winnerArgs3.initialize(BSON("replSetDeclareElectionWinner" << 1
- << "setName" << "rs0"
- << "term" << 2
- << "winnerId" << 20));
- long long responseTerm3 = -1;
- ASSERT_EQUALS("term already has a primary",
- getTopoCoord().processReplSetDeclareElectionWinner(winnerArgs3,
- &responseTerm3).reason());
- ASSERT_EQUALS(2, responseTerm3);
-
- // stale term, should fail
- ReplSetDeclareElectionWinnerArgs winnerArgs4;
- winnerArgs4.initialize(BSON("replSetDeclareElectionWinner" << 1
- << "setName" << "rs0"
- << "term" << 0
- << "winnerId" << 20));
- long long responseTerm4 = -1;
- ASSERT_EQUALS("term has already passed",
- getTopoCoord().processReplSetDeclareElectionWinner(winnerArgs4,
- &responseTerm4).reason());
- ASSERT_EQUALS(2, responseTerm4);
-
- // wrong setName
- ReplSetDeclareElectionWinnerArgs winnerArgs5;
- winnerArgs5.initialize(BSON("replSetDeclareElectionWinner" << 1
- << "setName" << "wrongName"
- << "term" << 3
- << "winnerId" << 20));
- long long responseTerm5 = -1;
- ASSERT_EQUALS("replSet name does not match",
- getTopoCoord().processReplSetDeclareElectionWinner(winnerArgs5,
- &responseTerm5).reason());
- ASSERT_EQUALS(2, responseTerm5);
+ void prepareHeartbeatResponseV1(const ReplSetHeartbeatArgsV1& args,
+ OpTime lastOpApplied,
+ ReplSetHeartbeatResponse* response,
+ Status* result) {
+ *result = getTopoCoord().prepareHeartbeatResponseV1(
+ now()++, args, "rs0", lastOpApplied, response);
}
+};
+
+TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseBadSetName) {
+ // set up args with incorrect replset name
+ ReplSetHeartbeatArgsV1 args;
+ args.setSetName("rs1");
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ startCapturingLogMessages();
+ prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(ErrorCodes::InconsistentReplicaSetNames, result);
+ ASSERT(result.reason().find("repl set names do not match")) << "Actual string was \""
+ << result.reason() << '"';
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ "replSet set names do not match, ours: rs0; remote "
+ "node's: rs1"));
+ // only protocolVersion should be set in this failure case
+ ASSERT_EQUALS("", response.getReplicaSetName());
+}
+
+TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseWhenOutOfSet) {
+ // reconfig self out of set
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 3 << "members" << BSON_ARRAY(BSON("_id" << 20 << "host"
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))
+ << "protocolVersion" << 1),
+ -1);
+ ReplSetHeartbeatArgsV1 args;
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+ prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
+ ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, result);
+ ASSERT(result.reason().find("replica set configuration is invalid or does not include us"))
+ << "Actual string was \"" << result.reason() << '"';
+ // only protocolVersion should be set in this failure case
+ ASSERT_EQUALS("", response.getReplicaSetName());
+}
+
+TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseFromSelf) {
+ // set up args with our id as the senderId
+ ReplSetHeartbeatArgsV1 args;
+ args.setSetName("rs0");
+ args.setSenderId(10);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+ prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
+ ASSERT_EQUALS(ErrorCodes::BadValue, result);
+ ASSERT(result.reason().find("from member with the same member ID as our self"))
+ << "Actual string was \"" << result.reason() << '"';
+ // only protocolVersion should be set in this failure case
+ ASSERT_EQUALS("", response.getReplicaSetName());
+}
+
+TEST_F(TopoCoordTest, PrepareHeartbeatResponseV1NoConfigYet) {
+ // set up args and acknowledge sender
+ ReplSetHeartbeatArgsV1 args;
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ // prepare response and check the results
+ Status result =
+ getTopoCoord().prepareHeartbeatResponseV1(now()++, args, "rs0", OpTime(), &response);
+ ASSERT_OK(result);
+ // this change to true because we can now see a majority, unlike in the previous cases
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(MemberState::RS_STARTUP, response.getState().s);
+ ASSERT_EQUALS(OpTime(), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTerm());
+ ASSERT_EQUALS(-2, response.getConfigVersion());
+}
+
+TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseSenderIDMissing) {
+ // set up args without a senderID
+ ReplSetHeartbeatArgsV1 args;
+ args.setSetName("rs0");
+ args.setConfigVersion(1);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
+ ASSERT_OK(result);
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTerm());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+}
+
+TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseSenderIDNotInConfig) {
+ // set up args with a senderID which is not present in our config
+ ReplSetHeartbeatArgsV1 args;
+ args.setSetName("rs0");
+ args.setConfigVersion(1);
+ args.setSenderId(2);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
+ ASSERT_OK(result);
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTerm());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+}
+
+TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseConfigVersionLow) {
+ // set up args with a config version lower than ours
+ ReplSetHeartbeatArgsV1 args;
+ args.setConfigVersion(0);
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
+ ASSERT_OK(result);
+ ASSERT_TRUE(response.hasConfig());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTerm());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+}
+
+TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseConfigVersionHigh) {
+ // set up args with a config version higher than ours
+ ReplSetHeartbeatArgsV1 args;
+ args.setConfigVersion(10);
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponseV1(args, OpTime(), &response, &result);
+ ASSERT_OK(result);
+ ASSERT_FALSE(response.hasConfig());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTerm());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+}
+
+TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseAsPrimary) {
+ makeSelfPrimary(Timestamp(10, 0));
+
+ ReplSetHeartbeatArgsV1 args;
+ args.setConfigVersion(1);
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponseV1(args, OpTime(Timestamp(11, 0), 0), &response, &result);
+ ASSERT_OK(result);
+ ASSERT_FALSE(response.hasConfig());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(MemberState::RS_PRIMARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(Timestamp(11, 0), 0), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTerm());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+}
+
+TEST_F(PrepareHeartbeatResponseV1Test, PrepareHeartbeatResponseWithSyncSource) {
+ // get a sync source
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime());
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime());
+ heartbeatFromMember(
+ HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(1, 0), 0));
+ heartbeatFromMember(
+ HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(1, 0), 0));
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+
+ // set up args
+ ReplSetHeartbeatArgsV1 args;
+ args.setConfigVersion(1);
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponseV1(args, OpTime(Timestamp(100, 0), 0), &response, &result);
+ ASSERT_OK(result);
+ ASSERT_FALSE(response.hasConfig());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(Timestamp(100, 0), 0), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTerm());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+ ASSERT_EQUALS(HostAndPort("h2"), response.getSyncingTo());
+}
+
+TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseBadProtocolVersion) {
+ // set up args with bad protocol version
+ ReplSetHeartbeatArgs args;
+ args.setProtocolVersion(3);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponse(args, OpTime(), &response, &result);
+ ASSERT_EQUALS(ErrorCodes::BadValue, result);
+ ASSERT_EQUALS("replset: incompatible replset protocol version: 3", result.reason());
+ ASSERT_EQUALS("", response.getHbMsg());
+}
+
+TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseFromSelf) {
+ // set up args with incorrect replset name
+ ReplSetHeartbeatArgs args;
+ args.setProtocolVersion(1);
+ args.setSetName("rs0");
+ args.setSenderId(10);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+ prepareHeartbeatResponse(args, OpTime(), &response, &result);
+ ASSERT_EQUALS(ErrorCodes::BadValue, result);
+ ASSERT(result.reason().find("from member with the same member ID as our self"))
+ << "Actual string was \"" << result.reason() << '"';
+ ASSERT_EQUALS("", response.getHbMsg());
+}
+
+TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseBadSetName) {
+ // set up args with incorrect replset name
+ ReplSetHeartbeatArgs args;
+ args.setProtocolVersion(1);
+ args.setSetName("rs1");
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ startCapturingLogMessages();
+ prepareHeartbeatResponse(args, OpTime(), &response, &result);
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(ErrorCodes::InconsistentReplicaSetNames, result);
+ ASSERT(result.reason().find("repl set names do not match")) << "Actual string was \""
+ << result.reason() << '"';
+ ASSERT_EQUALS(1,
+ countLogLinesContaining(
+ "replSet set names do not match, ours: rs0; remote "
+ "node's: rs1"));
+ ASSERT_TRUE(response.isMismatched());
+ ASSERT_EQUALS("", response.getHbMsg());
+}
+
+TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseSenderIDMissing) {
+ // set up args without a senderID
+ ReplSetHeartbeatArgs args;
+ args.setProtocolVersion(1);
+ args.setSetName("rs0");
+ args.setConfigVersion(1);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponse(args, OpTime(), &response, &result);
+ ASSERT_OK(result);
+ ASSERT_FALSE(response.isElectable());
+ ASSERT_TRUE(response.isReplSet());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS("", response.getHbMsg());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+}
+
+TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseSenderIDNotInConfig) {
+ // set up args with a senderID which is not present in our config
+ ReplSetHeartbeatArgs args;
+ args.setProtocolVersion(1);
+ args.setSetName("rs0");
+ args.setConfigVersion(1);
+ args.setSenderId(2);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponse(args, OpTime(), &response, &result);
+ ASSERT_OK(result);
+ ASSERT_FALSE(response.isElectable());
+ ASSERT_TRUE(response.isReplSet());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS("", response.getHbMsg());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+}
+
+TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseConfigVersionLow) {
+ // set up args with a config version lower than ours
+ ReplSetHeartbeatArgs args;
+ args.setProtocolVersion(1);
+ args.setConfigVersion(0);
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponse(args, OpTime(), &response, &result);
+ ASSERT_OK(result);
+ ASSERT_TRUE(response.hasConfig());
+ ASSERT_FALSE(response.isElectable());
+ ASSERT_TRUE(response.isReplSet());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS("", response.getHbMsg());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+}
+
+TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseConfigVersionHigh) {
+ // set up args with a config version higher than ours
+ ReplSetHeartbeatArgs args;
+ args.setProtocolVersion(1);
+ args.setConfigVersion(10);
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponse(args, OpTime(), &response, &result);
+ ASSERT_OK(result);
+ ASSERT_FALSE(response.hasConfig());
+ ASSERT_FALSE(response.isElectable());
+ ASSERT_TRUE(response.isReplSet());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS("", response.getHbMsg());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+}
+
+TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseSenderDown) {
+ // set up args with sender down from our perspective
+ ReplSetHeartbeatArgs args;
+ args.setProtocolVersion(1);
+ args.setConfigVersion(1);
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponse(args, OpTime(), &response, &result);
+ ASSERT_OK(result);
+ ASSERT_FALSE(response.isElectable());
+ ASSERT_TRUE(response.isReplSet());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS("", response.getHbMsg());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+ ASSERT_TRUE(response.isStateDisagreement());
+}
+
+TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseSenderUp) {
+ // set up args and acknowledge sender
+ heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, OpTime());
+ ReplSetHeartbeatArgs args;
+ args.setProtocolVersion(1);
+ args.setConfigVersion(1);
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponse(args, OpTime(Timestamp(100, 0), 0), &response, &result);
+ ASSERT_OK(result);
+ // this change to true because we can now see a majority, unlike in the previous cases
+ ASSERT_TRUE(response.isElectable());
+ ASSERT_TRUE(response.isReplSet());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(Timestamp(100, 0), 0), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS("", response.getHbMsg());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+}
+
+TEST_F(TopoCoordTest, PrepareHeartbeatResponseNoConfigYet) {
+ // set up args and acknowledge sender
+ ReplSetHeartbeatArgs args;
+ args.setProtocolVersion(1);
+ args.setConfigVersion(1);
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ // prepare response and check the results
+ Status result =
+ getTopoCoord().prepareHeartbeatResponse(now()++, args, "rs0", OpTime(), &response);
+ ASSERT_OK(result);
+ // this change to true because we can now see a majority, unlike in the previous cases
+ ASSERT_FALSE(response.isElectable());
+ ASSERT_TRUE(response.isReplSet());
+ ASSERT_EQUALS(MemberState::RS_STARTUP, response.getState().s);
+ ASSERT_EQUALS(OpTime(), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS("", response.getHbMsg());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(-2, response.getConfigVersion());
+}
+
+TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseAsPrimary) {
+ makeSelfPrimary(Timestamp(10, 0));
+ heartbeatFromMember(HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, OpTime());
+
+ ReplSetHeartbeatArgs args;
+ args.setProtocolVersion(1);
+ args.setConfigVersion(1);
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponse(args, OpTime(Timestamp(11, 0), 0), &response, &result);
+ ASSERT_OK(result);
+ // electable because we are already primary
+ ASSERT_TRUE(response.isElectable());
+ ASSERT_TRUE(response.isReplSet());
+ ASSERT_EQUALS(MemberState::RS_PRIMARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(Timestamp(11, 0), 0), response.getOpTime());
+ ASSERT_EQUALS(Timestamp(10, 0), response.getElectionTime());
+ ASSERT_EQUALS(0, response.getTime().count());
+ ASSERT_EQUALS("", response.getHbMsg());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+}
+
+TEST_F(PrepareHeartbeatResponseTest, PrepareHeartbeatResponseWithSyncSource) {
+ // get a sync source
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime());
+ heartbeatFromMember(HostAndPort("h3"), "rs0", MemberState::RS_SECONDARY, OpTime());
+ heartbeatFromMember(
+ HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(1, 0), 0));
+ heartbeatFromMember(
+ HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(1, 0), 0));
+ getTopoCoord().chooseNewSyncSource(now()++, OpTime());
+
+ // set up args
+ ReplSetHeartbeatArgs args;
+ args.setProtocolVersion(1);
+ args.setConfigVersion(1);
+ args.setSetName("rs0");
+ args.setSenderId(20);
+ ReplSetHeartbeatResponse response;
+ Status result(ErrorCodes::InternalError, "prepareHeartbeatResponse didn't set result");
+
+ // prepare response and check the results
+ prepareHeartbeatResponse(args, OpTime(Timestamp(100, 0), 0), &response, &result);
+ ASSERT_OK(result);
+ ASSERT_TRUE(response.isElectable());
+ ASSERT_TRUE(response.isReplSet());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, response.getState().s);
+ ASSERT_EQUALS(OpTime(Timestamp(100, 0), 0), response.getOpTime());
+ ASSERT_EQUALS(0, response.getTime().count());
+ // changed to a syncing message because our sync source changed recently
+ ASSERT_EQUALS("syncing from: h2:27017", response.getHbMsg());
+ ASSERT_EQUALS("rs0", response.getReplicaSetName());
+ ASSERT_EQUALS(1, response.getConfigVersion());
+ ASSERT_EQUALS(HostAndPort("h2"), response.getSyncingTo());
+}
+
+TEST_F(TopoCoordTest, SetFollowerSecondaryWhenLoneNode) {
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself"))),
+ 0);
+ ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
+
+ // if we are the only node, we should become a candidate when we transition to SECONDARY
+ ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
+}
+
+TEST_F(TopoCoordTest, CandidateWhenLoneSecondaryNodeReconfig) {
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
+ ReplicaSetConfig cfg;
+ cfg.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself"
+ << "priority" << 0))));
+ getTopoCoord().updateConfig(cfg, 0, now()++, OpTime());
+ ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
+
+ ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
+ ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
+
+ // we should become a candidate when we reconfig to become electable
+
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself"))),
+ 0);
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+}
+
+TEST_F(TopoCoordTest, SetFollowerSecondaryWhenLoneUnelectableNode) {
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
+ ReplicaSetConfig cfg;
+ cfg.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself"
+ << "priority" << 0))));
+
+ getTopoCoord().updateConfig(cfg, 0, now()++, OpTime());
+ ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
+
+ // despite being the only node, we are unelectable, so we should not become a candidate
+ ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
+ ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
+}
+
+TEST_F(TopoCoordTest, ReconfigToBeAddedToTheSet) {
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
+ // config to be absent from the set
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ -1);
+ // should become removed since we are not in the set
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s);
+
+ // reconfig to add to set
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
+ // having been added to the config, we should no longer be REMOVED and should enter STARTUP2
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
+}
+
+TEST_F(TopoCoordTest, ReconfigToBeRemovedFromTheSet) {
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
+
+ // reconfig to remove self
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ -1);
+ // should become removed since we are no longer in the set
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s);
+}
+
+TEST_F(TopoCoordTest, ReconfigToBeRemovedFromTheSetAsPrimary) {
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
+ 0);
+ ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
+ getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+
+ // win election and primary
+ getTopoCoord().processWinElection(OID::gen(), Timestamp());
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
+
+ // reconfig to remove self
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ -1);
+ // should become removed since we are no longer in the set even though we were primary
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s);
+}
+
+TEST_F(TopoCoordTest, ReconfigCanNoLongerBePrimary) {
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
+ 0);
+ ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
+ getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+
+ // win election and primary
+ getTopoCoord().processWinElection(OID::gen(), Timestamp());
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
+
+ // now lose primary due to loss of electability
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority" << 0)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
+}
+
+TEST_F(TopoCoordTest, ReconfigContinueToBePrimary) {
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
+ 0);
+
+ ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
+ getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
+ ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
+
+ // win election and primary
+ getTopoCoord().processWinElection(OID::gen(), Timestamp());
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
+
+ // Now reconfig in ways that leave us electable and ensure we are still the primary.
+ // Add hosts
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0,
+ Date_t::fromMillisSinceEpoch(-1),
+ OpTime(Timestamp(10, 0), 0));
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
+
+ // Change priorities and tags
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority" << 10)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017"
+ << "priority" << 5 << "tags" << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rack1")))),
+ 0,
+ Date_t::fromMillisSinceEpoch(-1),
+ OpTime(Timestamp(10, 0), 0));
+ ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
+}
+
+TEST_F(TopoCoordTest, ReconfigKeepSecondary) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "host1:27017")
+ << BSON("_id" << 2 << "host"
+ << "host2:27017"))),
+ 0);
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
+
+ // reconfig and stay secondary
+ updateConfig(
+ BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017") << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
+ ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
+ ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
+}
+
+TEST_F(HeartbeatResponseTest, ReconfigBetweenHeartbeatRequestAndRepsonse) {
+ OpTime election = OpTime(Timestamp(14, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(13, 0), 0);
+
+ // all three members up and secondaries
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ // now request from host3 and receive after host2 has been removed via reconfig
+ getTopoCoord().prepareHeartbeatRequest(now()++, "rs0", HostAndPort("host3"));
+
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
+
+ ReplSetHeartbeatResponse hb;
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.setOpTime(lastOpTimeApplied);
+ hb.setElectionTime(election.getTimestamp());
+ StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ now()++, Milliseconds(0), HostAndPort("host3"), hbResponse, lastOpTimeApplied);
+
+ // now primary should be host3, index 1, and we should perform NoAction in response
+ ASSERT_EQUALS(1, getCurrentPrimaryIndex());
+ ASSERT_NO_ACTION(action.getAction());
+}
+
+TEST_F(HeartbeatResponseTest, ReconfigNodeRemovedBetweenHeartbeatRequestAndRepsonse) {
+ OpTime election = OpTime(Timestamp(14, 0), 0);
+ OpTime lastOpTimeApplied = OpTime(Timestamp(13, 0), 0);
+
+ // all three members up and secondaries
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_PRIMARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ // now request from host3 and receive after host2 has been removed via reconfig
+ getTopoCoord().prepareHeartbeatRequest(now()++, "rs0", HostAndPort("host3"));
+
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017"))),
+ 0);
+
+ ReplSetHeartbeatResponse hb;
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.setOpTime(lastOpTimeApplied);
+ hb.setElectionTime(election.getTimestamp());
+ StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
+ HeartbeatResponseAction action = getTopoCoord().processHeartbeatResponse(
+ now()++, Milliseconds(0), HostAndPort("host3"), hbResponse, lastOpTimeApplied);
+
+ // primary should not be set and we should perform NoAction in response
+ ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
+ ASSERT_NO_ACTION(action.getAction());
+}
+
+TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceMemberNotInConfig) {
+ // In this test, the TopologyCoordinator should tell us to change sync sources away from
+ // "host4" since "host4" is absent from the config
+ ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host4"), now()));
+}
+
+TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceMemberHasYetToHeartbeat) {
+ // In this test, the TopologyCoordinator should not tell us to change sync sources away from
+ // "host2" since we do not yet have a heartbeat (and as a result do not yet have an optime)
+ // for "host2"
+ ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
+}
+
+TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceFresherHappierMemberExists) {
+ // In this test, the TopologyCoordinator should tell us to change sync sources away from
+ // "host2" and to "host3" since "host2" is more than maxSyncSourceLagSecs(30) behind "host3"
+ OpTime election = OpTime();
+ OpTime lastOpTimeApplied = OpTime(Timestamp(4, 0), 0);
+ // ahead by more than maxSyncSourceLagSecs (30)
+ OpTime fresherLastOpTimeApplied = OpTime(Timestamp(3005, 0), 0);
+
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ fresherLastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ // set up complete, time for actual check
+ startCapturingLogMessages();
+ ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("changing sync target"));
+}
+
+TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceFresherMemberIsBlackListed) {
+ // In this test, the TopologyCoordinator should not tell us to change sync sources away from
+ // "host2" and to "host3" despite "host2" being more than maxSyncSourceLagSecs(30) behind
+ // "host3", since "host3" is blacklisted
+ // Then, confirm that unblacklisting only works if time has passed the blacklist time.
+ OpTime election = OpTime();
+ OpTime lastOpTimeApplied = OpTime(Timestamp(400, 0), 0);
+ // ahead by more than maxSyncSourceLagSecs (30)
+ OpTime fresherLastOpTimeApplied = OpTime(Timestamp(3005, 0), 0);
+
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ fresherLastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ getTopoCoord().blacklistSyncSource(HostAndPort("host3"), now() + Milliseconds(100));
+
+ // set up complete, time for actual check
+ ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
+
+ // unblacklist with too early a time (node should remained blacklisted)
+ getTopoCoord().unblacklistSyncSource(HostAndPort("host3"), now() + Milliseconds(90));
+ ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
+
+ // unblacklist and it should succeed
+ getTopoCoord().unblacklistSyncSource(HostAndPort("host3"), now() + Milliseconds(100));
+ startCapturingLogMessages();
+ ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("changing sync target"));
+}
+
+TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceFresherMemberIsDown) {
+ // In this test, the TopologyCoordinator should not tell us to change sync sources away from
+ // "host2" and to "host3" despite "host2" being more than maxSyncSourceLagSecs(30) behind
+ // "host3", since "host3" is down
+ OpTime election = OpTime();
+ OpTime lastOpTimeApplied = OpTime(Timestamp(400, 0), 0);
+ // ahead by more than maxSyncSourceLagSecs (30)
+ OpTime fresherLastOpTimeApplied = OpTime(Timestamp(3005, 0), 0);
+
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ fresherLastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ // set up complete, time for actual check
+ nextAction = receiveDownHeartbeat(HostAndPort("host3"), "rs0", lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
+}
+
+TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceFresherMemberIsNotReadable) {
+ // In this test, the TopologyCoordinator should not tell us to change sync sources away from
+ // "host2" and to "host3" despite "host2" being more than maxSyncSourceLagSecs(30) behind
+ // "host3", since "host3" is in a non-readable mode (RS_ROLLBACK)
+ OpTime election = OpTime();
+ OpTime lastOpTimeApplied = OpTime(Timestamp(4, 0), 0);
+ // ahead by more than m, 0)axSyncSourceLagSecs (30)
+ OpTime fresherLastOpTimeApplied = OpTime(Timestamp(3005, 0), 0);
+
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_ROLLBACK,
+ election,
+ fresherLastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ // set up complete, time for actual check
+ ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
+}
+
+TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceFresherMemberDoesNotBuildIndexes) {
+ // In this test, the TopologyCoordinator should not tell us to change sync sources away from
+ // "host2" and to "host3" despite "host2" being more than maxSyncSourceLagSecs(30) behind
+ // "host3", since "host3" does not build indexes
+ OpTime election = OpTime();
+ OpTime lastOpTimeApplied = OpTime(Timestamp(4, 0), 0);
+ // ahead by more than maxSyncSourceLagSecs (30)
+ OpTime fresherLastOpTimeApplied = OpTime(Timestamp(3005, 0), 0);
+
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 6 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "hself")
+ << BSON("_id" << 1 << "host"
+ << "host2")
+ << BSON("_id" << 2 << "host"
+ << "host3"
+ << "buildIndexes" << false << "priority" << 0))),
+ 0);
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ fresherLastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ // set up complete, time for actual check
+ ASSERT_FALSE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
+}
+
+TEST_F(HeartbeatResponseTest, ShouldChangeSyncSourceFresherMemberDoesNotBuildIndexesNorDoWe) {
+ // In this test, the TopologyCoordinator should tell us to change sync sources away from
+ // "host2" and to "host3" despite "host3" not building indexes because we do not build
+ // indexes either and "host2" is more than maxSyncSourceLagSecs(30) behind "host3"
+ OpTime election = OpTime();
+ OpTime lastOpTimeApplied = OpTime(Timestamp(4, 0), 0);
+ // ahead by more than maxSyncSourceLagSecs (30)
+ OpTime fresherLastOpTimeApplied = OpTime(Timestamp(3005, 0), 0);
+
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 7 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "hself"
+ << "buildIndexes" << false << "priority" << 0)
+ << BSON("_id" << 1 << "host"
+ << "host2")
+ << BSON("_id" << 2 << "host"
+ << "host3"
+ << "buildIndexes" << false << "priority" << 0))),
+ 0);
+ HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ lastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+ nextAction = receiveUpHeartbeat(HostAndPort("host3"),
+ "rs0",
+ MemberState::RS_SECONDARY,
+ election,
+ fresherLastOpTimeApplied,
+ lastOpTimeApplied);
+ ASSERT_NO_ACTION(nextAction.getAction());
+
+ // set up complete, time for actual check
+ startCapturingLogMessages();
+ ASSERT_TRUE(getTopoCoord().shouldChangeSyncSource(HostAndPort("host2"), now()));
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("changing sync target"));
+}
+
+TEST_F(TopoCoordTest, CheckShouldStandForElectionWithPrimary) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ heartbeatFromMember(
+ HostAndPort("h2"), "rs0", MemberState::RS_PRIMARY, OpTime(Timestamp(1, 0), 0));
+ ASSERT_FALSE(getTopoCoord().checkShouldStandForElection(now()++, OpTime()));
+}
+
+TEST_F(TopoCoordTest, CheckShouldStandForElectionNotCloseEnoughToLastOptime) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ heartbeatFromMember(
+ HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(10000, 0), 0));
+ ASSERT_FALSE(getTopoCoord().checkShouldStandForElection(now()++, OpTime(Timestamp(100, 0), 0)));
+}
+
+TEST_F(TopoCoordTest, VoteForMyselfFailsWhileNotCandidate) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+ ASSERT_FALSE(getTopoCoord().voteForMyself(now()++));
+}
+
+TEST_F(TopoCoordTest, GetMemberStateArbiter) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself"
+ << "arbiterOnly" << true)
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+ ASSERT_EQUALS(MemberState::RS_ARBITER, getTopoCoord().getMemberState().s);
+}
+
+TEST_F(TopoCoordTest, UnelectableIfAbsentFromConfig) {
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
+ startCapturingLogMessages();
+ ASSERT_FALSE(getTopoCoord().checkShouldStandForElection(now()++, OpTime(Timestamp(10, 0), 0)));
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("not a member of a valid replica set config"));
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Log());
+}
+
+TEST_F(TopoCoordTest, UnelectableIfVotedRecently) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+ heartbeatFromMember(
+ HostAndPort("h2"), "rs0", MemberState::RS_SECONDARY, OpTime(Timestamp(100, 0), 0));
+
+ // vote for another node
+ OID remoteRound = OID::gen();
+ ReplicationCoordinator::ReplSetElectArgs electArgs;
+ electArgs.set = "rs0";
+ electArgs.round = remoteRound;
+ electArgs.cfgver = 1;
+ electArgs.whoid = 20;
+
+ // need to be 30 secs beyond the start of time to pass last vote lease
+ now() += Seconds(30);
+ BSONObjBuilder electResponseBuilder;
+ Status result = Status(ErrorCodes::InternalError, "status not set by prepareElectResponse");
+ getTopoCoord().prepareElectResponse(
+ electArgs, now()++, OpTime(Timestamp(100, 0), 0), &electResponseBuilder, &result);
+ BSONObj response = electResponseBuilder.obj();
+ ASSERT_OK(result);
+ std::cout << response;
+ ASSERT_EQUALS(1, response["vote"].Int());
+ ASSERT_EQUALS(remoteRound, response["round"].OID());
+
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
+ startCapturingLogMessages();
+ ASSERT_FALSE(getTopoCoord().checkShouldStandForElection(now()++, OpTime(Timestamp(10, 0), 0)));
+ stopCapturingLogMessages();
+ ASSERT_EQUALS(1, countLogLinesContaining("I recently voted for "));
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Log());
+}
+
+TEST_F(TopoCoordTest, ProcessRequestVotesTwoRequestsForSameTerm) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ ReplSetRequestVotesArgs args;
+ args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term" << 1LL << "candidateId" << 10LL
+ << "configVersion" << 1LL << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response;
+ OpTime lastAppliedOpTime;
+
+ getTopoCoord().processReplSetRequestVotes(args, &response, lastAppliedOpTime);
+ ASSERT_EQUALS("", response.getReason());
+ ASSERT_TRUE(response.getVoteGranted());
+
+ ReplSetRequestVotesArgs args2;
+ args2.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "rs0"
+ << "term" << 1LL << "candidateId" << 20LL << "configVersion" << 1LL
+ << "lastCommittedOp" << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response2;
+
+ // different candidate same term, should be a problem
+ getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
+ ASSERT_EQUALS("already voted for another candidate this term", response2.getReason());
+ ASSERT_FALSE(response2.getVoteGranted());
+}
+
+TEST_F(TopoCoordTest, ProcessRequestVotesDryRunsDoNotDisallowFutureRequestVotes) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ // dry run
+ ReplSetRequestVotesArgs args;
+ args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun" << true << "term" << 1LL << "candidateId"
+ << 10LL << "configVersion" << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response;
+ OpTime lastAppliedOpTime;
+
+ getTopoCoord().processReplSetRequestVotes(args, &response, lastAppliedOpTime);
+ ASSERT_EQUALS("", response.getReason());
+ ASSERT_TRUE(response.getVoteGranted());
+
+ // second dry run fine
+ ReplSetRequestVotesArgs args2;
+ args2.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "rs0"
+ << "dryRun" << true << "term" << 1LL << "candidateId" << 10LL
+ << "configVersion" << 1LL << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response2;
+
+ getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
+ ASSERT_EQUALS("", response2.getReason());
+ ASSERT_TRUE(response2.getVoteGranted());
+
+ // real request fine
+ ReplSetRequestVotesArgs args3;
+ args3.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "rs0"
+ << "dryRun" << false << "term" << 1LL << "candidateId" << 10LL
+ << "configVersion" << 1LL << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response3;
+
+ getTopoCoord().processReplSetRequestVotes(args3, &response3, lastAppliedOpTime);
+ ASSERT_EQUALS("", response3.getReason());
+ ASSERT_TRUE(response3.getVoteGranted());
+
+ // dry post real, fails
+ ReplSetRequestVotesArgs args4;
+ args4.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "rs0"
+ << "dryRun" << false << "term" << 1LL << "candidateId" << 10LL
+ << "configVersion" << 1LL << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response4;
+
+ getTopoCoord().processReplSetRequestVotes(args4, &response4, lastAppliedOpTime);
+ ASSERT_EQUALS("already voted for another candidate this term", response4.getReason());
+ ASSERT_FALSE(response4.getVoteGranted());
+}
+
+TEST_F(TopoCoordTest, ProcessRequestVotesBadCommands) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ // mismatched setName
+ ReplSetRequestVotesArgs args;
+ args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "wrongName"
+ << "term" << 1LL << "candidateId" << 10LL
+ << "configVersion" << 1LL << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response;
+ OpTime lastAppliedOpTime;
+
+ getTopoCoord().processReplSetRequestVotes(args, &response, lastAppliedOpTime);
+ ASSERT_EQUALS("candidate's set name differs from mine", response.getReason());
+ ASSERT_FALSE(response.getVoteGranted());
+
+ // mismatched configVersion
+ ReplSetRequestVotesArgs args2;
+ args2.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "rs0"
+ << "term" << 1LL << "candidateId" << 20LL << "configVersion" << 0LL
+ << "lastCommittedOp" << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response2;
+
+ getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
+ ASSERT_EQUALS("candidate's config version differs from mine", response2.getReason());
+ ASSERT_FALSE(response2.getVoteGranted());
+
+ // set term higher by receiving a replSetDeclareElectionWinnerCommand
+ ReplSetDeclareElectionWinnerArgs winnerArgs;
+ winnerArgs.initialize(BSON("replSetDeclareElectionWinner" << 1 << "setName"
+ << "rs0"
+ << "term" << 2 << "winnerId" << 30));
+ long long responseTerm;
+ ASSERT(getTopoCoord().updateTerm(winnerArgs.getTerm()));
+ ASSERT_OK(getTopoCoord().processReplSetDeclareElectionWinner(winnerArgs, &responseTerm));
+ ASSERT_EQUALS(2, responseTerm);
+
+ // stale term
+ ReplSetRequestVotesArgs args3;
+ args3.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "rs0"
+ << "term" << 1LL << "candidateId" << 20LL << "configVersion" << 1LL
+ << "lastCommittedOp" << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response3;
+
+ getTopoCoord().processReplSetRequestVotes(args3, &response3, lastAppliedOpTime);
+ ASSERT_EQUALS("candidate's term is lower than mine", response3.getReason());
+ ASSERT_EQUALS(2, response3.getTerm());
+ ASSERT_FALSE(response3.getVoteGranted());
+
+ // stale OpTime
+ ReplSetRequestVotesArgs args4;
+ args4.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "rs0"
+ << "term" << 3LL << "candidateId" << 20LL << "configVersion" << 1LL
+ << "lastCommittedOp" << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response4;
+ OpTime lastAppliedOpTime2 = {Timestamp(20, 0), 0};
+
+ getTopoCoord().processReplSetRequestVotes(args4, &response4, lastAppliedOpTime2);
+ ASSERT_EQUALS("candidate's data is staler than mine", response4.getReason());
+ ASSERT_FALSE(response4.getVoteGranted());
+}
+
+TEST_F(TopoCoordTest, ProcessRequestVotesBadCommandsDryRun) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+ // set term to 1
+ ASSERT(getTopoCoord().updateTerm(1));
+ // and make sure we voted in term 1
+ ReplSetRequestVotesArgs argsForRealVote;
+ argsForRealVote.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "rs0"
+ << "term" << 1LL << "candidateId" << 10LL << "configVersion"
+ << 1LL << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse responseForRealVote;
+ OpTime lastAppliedOpTime;
+
+ getTopoCoord().processReplSetRequestVotes(
+ argsForRealVote, &responseForRealVote, lastAppliedOpTime);
+ ASSERT_EQUALS("", responseForRealVote.getReason());
+ ASSERT_TRUE(responseForRealVote.getVoteGranted());
+
+
+ // mismatched setName
+ ReplSetRequestVotesArgs args;
+ args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "wrongName"
+ << "dryRun" << true << "term" << 2LL << "candidateId"
+ << 10LL << "configVersion" << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response;
+
+ getTopoCoord().processReplSetRequestVotes(args, &response, lastAppliedOpTime);
+ ASSERT_EQUALS("candidate's set name differs from mine", response.getReason());
+ ASSERT_EQUALS(1, response.getTerm());
+ ASSERT_FALSE(response.getVoteGranted());
+
+ // mismatched configVersion
+ ReplSetRequestVotesArgs args2;
+ args2.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "rs0"
+ << "dryRun" << true << "term" << 2LL << "candidateId" << 20LL
+ << "configVersion" << 0LL << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response2;
+
+ getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
+ ASSERT_EQUALS("candidate's config version differs from mine", response2.getReason());
+ ASSERT_EQUALS(1, response2.getTerm());
+ ASSERT_FALSE(response2.getVoteGranted());
+
+ // stale term
+ ReplSetRequestVotesArgs args3;
+ args3.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "rs0"
+ << "dryRun" << true << "term" << 0LL << "candidateId" << 20LL
+ << "configVersion" << 1LL << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response3;
+
+ getTopoCoord().processReplSetRequestVotes(args3, &response3, lastAppliedOpTime);
+ ASSERT_EQUALS("candidate's term is lower than mine", response3.getReason());
+ ASSERT_EQUALS(1, response3.getTerm());
+ ASSERT_FALSE(response3.getVoteGranted());
+
+ // repeat term
+ ReplSetRequestVotesArgs args4;
+ args4.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "rs0"
+ << "dryRun" << true << "term" << 1LL << "candidateId" << 20LL
+ << "configVersion" << 1LL << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response4;
+
+ getTopoCoord().processReplSetRequestVotes(args4, &response4, lastAppliedOpTime);
+ ASSERT_EQUALS("", response4.getReason());
+ ASSERT_EQUALS(1, response4.getTerm());
+ ASSERT_TRUE(response4.getVoteGranted());
+
+ // stale OpTime
+ ReplSetRequestVotesArgs args5;
+ args5.initialize(BSON("replSetRequestVotes"
+ << 1 << "setName"
+ << "rs0"
+ << "dryRun" << true << "term" << 3LL << "candidateId" << 20LL
+ << "configVersion" << 1LL << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ ReplSetRequestVotesResponse response5;
+ OpTime lastAppliedOpTime2 = {Timestamp(20, 0), 0};
+
+ getTopoCoord().processReplSetRequestVotes(args5, &response5, lastAppliedOpTime2);
+ ASSERT_EQUALS("candidate's data is staler than mine", response5.getReason());
+ ASSERT_EQUALS(1, response5.getTerm());
+ ASSERT_FALSE(response5.getVoteGranted());
+}
+
+TEST_F(TopoCoordTest, ProcessDeclareElectionWinner) {
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 10 << "host"
+ << "hself")
+ << BSON("_id" << 20 << "host"
+ << "h2") << BSON("_id" << 30 << "host"
+ << "h3"))),
+ 0);
+ setSelfMemberState(MemberState::RS_SECONDARY);
+
+ // successful
+ ReplSetDeclareElectionWinnerArgs winnerArgs;
+ winnerArgs.initialize(BSON("replSetDeclareElectionWinner" << 1 << "setName"
+ << "rs0"
+ << "term" << 2 << "winnerId" << 30));
+ long long responseTerm = -1;
+ ASSERT(getTopoCoord().updateTerm(winnerArgs.getTerm()));
+ ASSERT_OK(getTopoCoord().processReplSetDeclareElectionWinner(winnerArgs, &responseTerm));
+ ASSERT_EQUALS(2, responseTerm);
+
+ // repeat, should be problem free
+ ReplSetDeclareElectionWinnerArgs winnerArgs2;
+ winnerArgs2.initialize(BSON("replSetDeclareElectionWinner" << 1 << "setName"
+ << "rs0"
+ << "term" << 2 << "winnerId" << 30));
+ long long responseTerm2 = -1;
+ ASSERT_OK(getTopoCoord().processReplSetDeclareElectionWinner(winnerArgs2, &responseTerm2));
+ ASSERT_EQUALS(2, responseTerm2);
+
+ // same term, different primary, should fail
+ ReplSetDeclareElectionWinnerArgs winnerArgs3;
+ winnerArgs3.initialize(BSON("replSetDeclareElectionWinner" << 1 << "setName"
+ << "rs0"
+ << "term" << 2 << "winnerId" << 20));
+ long long responseTerm3 = -1;
+ ASSERT_EQUALS(
+ "term already has a primary",
+ getTopoCoord().processReplSetDeclareElectionWinner(winnerArgs3, &responseTerm3).reason());
+ ASSERT_EQUALS(2, responseTerm3);
+
+ // stale term, should fail
+ ReplSetDeclareElectionWinnerArgs winnerArgs4;
+ winnerArgs4.initialize(BSON("replSetDeclareElectionWinner" << 1 << "setName"
+ << "rs0"
+ << "term" << 0 << "winnerId" << 20));
+ long long responseTerm4 = -1;
+ ASSERT_EQUALS(
+ "term has already passed",
+ getTopoCoord().processReplSetDeclareElectionWinner(winnerArgs4, &responseTerm4).reason());
+ ASSERT_EQUALS(2, responseTerm4);
+
+ // wrong setName
+ ReplSetDeclareElectionWinnerArgs winnerArgs5;
+ winnerArgs5.initialize(BSON("replSetDeclareElectionWinner" << 1 << "setName"
+ << "wrongName"
+ << "term" << 3 << "winnerId" << 20));
+ long long responseTerm5 = -1;
+ ASSERT_EQUALS(
+ "replSet name does not match",
+ getTopoCoord().processReplSetDeclareElectionWinner(winnerArgs5, &responseTerm5).reason());
+ ASSERT_EQUALS(2, responseTerm5);
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/repl/update_position_args.cpp b/src/mongo/db/repl/update_position_args.cpp
index bce62dff3dc..4265efc0586 100644
--- a/src/mongo/db/repl/update_position_args.cpp
+++ b/src/mongo/db/repl/update_position_args.cpp
@@ -39,109 +39,106 @@ namespace mongo {
namespace repl {
- UpdatePositionArgs::UpdateInfo::UpdateInfo(
- const OID& anRid, const OpTime& aTs, long long aCfgver, long long aMemberId)
- : rid(anRid), ts(aTs), cfgver(aCfgver), memberId(aMemberId) {}
+UpdatePositionArgs::UpdateInfo::UpdateInfo(const OID& anRid,
+ const OpTime& aTs,
+ long long aCfgver,
+ long long aMemberId)
+ : rid(anRid), ts(aTs), cfgver(aCfgver), memberId(aMemberId) {}
namespace {
- const std::string kCommandFieldName = "replSetUpdatePosition";
- const std::string kUpdateArrayFieldName = "optimes";
-
- const std::string kLegalUpdatePositionFieldNames[] = {
- kCommandFieldName,
- kUpdateArrayFieldName,
- };
-
- const std::string kMemberRIDFieldName = "_id";
- const std::string kMemberConfigFieldName = "config";
- const std::string kOpTimeFieldName = "optime";
- const std::string kMemberIdFieldName = "memberId";
- const std::string kConfigVersionFieldName = "cfgver";
-
- const std::string kLegalUpdateInfoFieldNames[] = {
- kMemberConfigFieldName,
- kMemberRIDFieldName,
- kOpTimeFieldName,
- kMemberIdFieldName,
- kConfigVersionFieldName,
- };
-
-} // namespace
+const std::string kCommandFieldName = "replSetUpdatePosition";
+const std::string kUpdateArrayFieldName = "optimes";
+
+const std::string kLegalUpdatePositionFieldNames[] = {
+ kCommandFieldName, kUpdateArrayFieldName,
+};
+
+const std::string kMemberRIDFieldName = "_id";
+const std::string kMemberConfigFieldName = "config";
+const std::string kOpTimeFieldName = "optime";
+const std::string kMemberIdFieldName = "memberId";
+const std::string kConfigVersionFieldName = "cfgver";
+
+const std::string kLegalUpdateInfoFieldNames[] = {
+ kMemberConfigFieldName,
+ kMemberRIDFieldName,
+ kOpTimeFieldName,
+ kMemberIdFieldName,
+ kConfigVersionFieldName,
+};
+
+} // namespace
+
+Status UpdatePositionArgs::initialize(const BSONObj& argsObj) {
+ Status status =
+ bsonCheckOnlyHasFields("UpdatePositionArgs", argsObj, kLegalUpdatePositionFieldNames);
+
+ if (!status.isOK())
+ return status;
+
+ // grab the array of changes
+ BSONElement updateArray;
+ status = bsonExtractTypedField(argsObj, kUpdateArrayFieldName, Array, &updateArray);
+ if (!status.isOK())
+ return status;
+
+ // now parse each array entry into an update
+ BSONObjIterator i(updateArray.Obj());
+ while (i.more()) {
+ BSONObj entry = i.next().Obj();
+ status = bsonCheckOnlyHasFields("UpdateInfoArgs", entry, kLegalUpdateInfoFieldNames);
+ if (!status.isOK())
+ return status;
- Status UpdatePositionArgs::initialize(const BSONObj& argsObj) {
- Status status = bsonCheckOnlyHasFields("UpdatePositionArgs",
- argsObj,
- kLegalUpdatePositionFieldNames);
+ Timestamp ts;
+ status = bsonExtractTimestampField(entry, kOpTimeFieldName, &ts);
+ if (!status.isOK())
+ return status;
+ // TODO(spencer): The following three fields are optional in 3.0, but should be made
+ // required or ignored in 3.0
+ long long cfgver;
+ status = bsonExtractIntegerFieldWithDefault(entry, kConfigVersionFieldName, -1, &cfgver);
if (!status.isOK())
return status;
- // grab the array of changes
- BSONElement updateArray;
- status = bsonExtractTypedField(argsObj, kUpdateArrayFieldName, Array, &updateArray);
+ OID rid;
+ status = bsonExtractOIDFieldWithDefault(entry, kMemberRIDFieldName, OID(), &rid);
if (!status.isOK())
return status;
- // now parse each array entry into an update
- BSONObjIterator i(updateArray.Obj());
- while(i.more()) {
- BSONObj entry = i.next().Obj();
- status = bsonCheckOnlyHasFields("UpdateInfoArgs",
- entry,
- kLegalUpdateInfoFieldNames);
- if (!status.isOK())
- return status;
-
- Timestamp ts;
- status = bsonExtractTimestampField(entry, kOpTimeFieldName, &ts);
- if (!status.isOK())
- return status;
-
- // TODO(spencer): The following three fields are optional in 3.0, but should be made
- // required or ignored in 3.0
- long long cfgver;
- status = bsonExtractIntegerFieldWithDefault(entry, kConfigVersionFieldName, -1, &cfgver);
- if (!status.isOK())
- return status;
-
- OID rid;
- status = bsonExtractOIDFieldWithDefault(entry, kMemberRIDFieldName, OID(), &rid);
- if (!status.isOK())
- return status;
-
- long long memberID;
- status = bsonExtractIntegerFieldWithDefault(entry, kMemberIdFieldName, -1, &memberID);
- if (!status.isOK())
- return status;
-
- // TODO(siyuan) parse and fill term whem adding it to update position command.
- _updates.push_back(UpdateInfo(rid, OpTime(ts, 0), cfgver, memberID));
- }
+ long long memberID;
+ status = bsonExtractIntegerFieldWithDefault(entry, kMemberIdFieldName, -1, &memberID);
+ if (!status.isOK())
+ return status;
- return Status::OK();
+ // TODO(siyuan) parse and fill term whem adding it to update position command.
+ _updates.push_back(UpdateInfo(rid, OpTime(ts, 0), cfgver, memberID));
}
- BSONObj UpdatePositionArgs::toBSON() const {
- BSONObjBuilder builder;
- // add command name
- builder.append(kCommandFieldName, 1);
-
- // build array of updates
- if (!_updates.empty()) {
- BSONArrayBuilder updateArray(builder.subarrayStart(kUpdateArrayFieldName));
- for (UpdatePositionArgs::UpdateIterator update = updatesBegin();
- update != updatesEnd();
- ++update) {
- updateArray.append(BSON(kMemberRIDFieldName << update->rid <<
- kOpTimeFieldName << update->ts.getTimestamp() <<
- kConfigVersionFieldName << update->cfgver <<
- kMemberIdFieldName << update->memberId));
- }
- updateArray.doneFast();
+ return Status::OK();
+}
+
+BSONObj UpdatePositionArgs::toBSON() const {
+ BSONObjBuilder builder;
+ // add command name
+ builder.append(kCommandFieldName, 1);
+
+ // build array of updates
+ if (!_updates.empty()) {
+ BSONArrayBuilder updateArray(builder.subarrayStart(kUpdateArrayFieldName));
+ for (UpdatePositionArgs::UpdateIterator update = updatesBegin(); update != updatesEnd();
+ ++update) {
+ updateArray.append(BSON(kMemberRIDFieldName << update->rid << kOpTimeFieldName
+ << update->ts.getTimestamp()
+ << kConfigVersionFieldName << update->cfgver
+ << kMemberIdFieldName << update->memberId));
}
- return builder.obj();
+ updateArray.doneFast();
}
+ return builder.obj();
+}
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/update_position_args.h b/src/mongo/db/repl/update_position_args.h
index a3450163d56..ecaf9ec5d4e 100644
--- a/src/mongo/db/repl/update_position_args.h
+++ b/src/mongo/db/repl/update_position_args.h
@@ -35,49 +35,54 @@
namespace mongo {
- class Status;
+class Status;
namespace repl {
- /**
- * Arguments to the handshake command.
- */
- class UpdatePositionArgs {
- public:
- struct UpdateInfo {
- UpdateInfo(const OID& anRid, const OpTime& aTs, long long aCfgver, long long aMemberId);
+/**
+ * Arguments to the handshake command.
+ */
+class UpdatePositionArgs {
+public:
+ struct UpdateInfo {
+ UpdateInfo(const OID& anRid, const OpTime& aTs, long long aCfgver, long long aMemberId);
- OID rid;
- OpTime ts;
- long long cfgver;
- long long memberId;
- };
+ OID rid;
+ OpTime ts;
+ long long cfgver;
+ long long memberId;
+ };
- typedef std::vector<UpdateInfo>::const_iterator UpdateIterator;
+ typedef std::vector<UpdateInfo>::const_iterator UpdateIterator;
- /**
- * Initializes this UpdatePositionArgs from the contents of "argsObj".
- */
- Status initialize(const BSONObj& argsObj);
+ /**
+ * Initializes this UpdatePositionArgs from the contents of "argsObj".
+ */
+ Status initialize(const BSONObj& argsObj);
- /**
- * Gets a begin iterator over the UpdateInfos stored in this UpdatePositionArgs.
- */
- UpdateIterator updatesBegin() const { return _updates.begin(); }
+ /**
+ * Gets a begin iterator over the UpdateInfos stored in this UpdatePositionArgs.
+ */
+ UpdateIterator updatesBegin() const {
+ return _updates.begin();
+ }
- /**
- * Gets an end iterator over the UpdateInfos stored in this UpdatePositionArgs.
- */
- UpdateIterator updatesEnd() const { return _updates.end(); }
+ /**
+ * Gets an end iterator over the UpdateInfos stored in this UpdatePositionArgs.
+ */
+ UpdateIterator updatesEnd() const {
+ return _updates.end();
+ }
- /**
- * Returns a BSONified version of the object.
- * _updates is only included if it is not empty.
- */
- BSONObj toBSON() const;
- private:
- std::vector<UpdateInfo> _updates;
- };
+ /**
+ * Returns a BSONified version of the object.
+ * _updates is only included if it is not empty.
+ */
+ BSONObj toBSON() const;
+
+private:
+ std::vector<UpdateInfo> _updates;
+};
-} // namespace repl
-} // namespace mongo
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/vote_requester.cpp b/src/mongo/db/repl/vote_requester.cpp
index 81691edf5d7..c23bd7d6916 100644
--- a/src/mongo/db/repl/vote_requester.cpp
+++ b/src/mongo/db/repl/vote_requester.cpp
@@ -42,128 +42,115 @@
namespace mongo {
namespace repl {
- VoteRequester::Algorithm::Algorithm(const ReplicaSetConfig& rsConfig,
- long long candidateId,
- long long term,
- bool dryRun,
- OpTime lastOplogEntry) :
- _rsConfig(rsConfig),
- _candidateId(candidateId),
- _term(term),
- _dryRun(dryRun),
- _lastOplogEntry(lastOplogEntry) {
-
- // populate targets with all voting members that aren't this node
- for (auto member = _rsConfig.membersBegin(); member != _rsConfig.membersEnd(); member++) {
- if (member->isVoter() && member->getId() != candidateId) {
- _targets.push_back(member->getHostAndPort());
- }
+VoteRequester::Algorithm::Algorithm(const ReplicaSetConfig& rsConfig,
+ long long candidateId,
+ long long term,
+ bool dryRun,
+ OpTime lastOplogEntry)
+ : _rsConfig(rsConfig),
+ _candidateId(candidateId),
+ _term(term),
+ _dryRun(dryRun),
+ _lastOplogEntry(lastOplogEntry) {
+ // populate targets with all voting members that aren't this node
+ for (auto member = _rsConfig.membersBegin(); member != _rsConfig.membersEnd(); member++) {
+ if (member->isVoter() && member->getId() != candidateId) {
+ _targets.push_back(member->getHostAndPort());
}
}
-
- VoteRequester::Algorithm::~Algorithm() {}
-
- std::vector<RemoteCommandRequest>
- VoteRequester::Algorithm::getRequests() const {
- BSONObjBuilder requestVotesCmdBuilder;
- requestVotesCmdBuilder.append("replSetRequestVotes", 1);
- requestVotesCmdBuilder.append("setName", _rsConfig.getReplSetName());
- requestVotesCmdBuilder.append("dryRun", _dryRun);
- requestVotesCmdBuilder.append("term", _term);
- requestVotesCmdBuilder.append("candidateId", _candidateId);
- requestVotesCmdBuilder.append("configVersion", _rsConfig.getConfigVersion());
-
- BSONObjBuilder lastCommittedOp(requestVotesCmdBuilder.subobjStart("lastCommittedOp"));
- lastCommittedOp.append("ts", _lastOplogEntry.getTimestamp());
- lastCommittedOp.append("term", _lastOplogEntry.getTerm());
- lastCommittedOp.done();
-
- const BSONObj requestVotesCmd = requestVotesCmdBuilder.obj();
-
- std::vector<RemoteCommandRequest> requests;
- for (const auto& target : _targets) {
- requests.push_back(RemoteCommandRequest(
- target,
- "admin",
- requestVotesCmd,
- Milliseconds(30*1000))); // trying to match current Socket timeout
- }
-
- return requests;
+}
+
+VoteRequester::Algorithm::~Algorithm() {}
+
+std::vector<RemoteCommandRequest> VoteRequester::Algorithm::getRequests() const {
+ BSONObjBuilder requestVotesCmdBuilder;
+ requestVotesCmdBuilder.append("replSetRequestVotes", 1);
+ requestVotesCmdBuilder.append("setName", _rsConfig.getReplSetName());
+ requestVotesCmdBuilder.append("dryRun", _dryRun);
+ requestVotesCmdBuilder.append("term", _term);
+ requestVotesCmdBuilder.append("candidateId", _candidateId);
+ requestVotesCmdBuilder.append("configVersion", _rsConfig.getConfigVersion());
+
+ BSONObjBuilder lastCommittedOp(requestVotesCmdBuilder.subobjStart("lastCommittedOp"));
+ lastCommittedOp.append("ts", _lastOplogEntry.getTimestamp());
+ lastCommittedOp.append("term", _lastOplogEntry.getTerm());
+ lastCommittedOp.done();
+
+ const BSONObj requestVotesCmd = requestVotesCmdBuilder.obj();
+
+ std::vector<RemoteCommandRequest> requests;
+ for (const auto& target : _targets) {
+ requests.push_back(RemoteCommandRequest(
+ target,
+ "admin",
+ requestVotesCmd,
+ Milliseconds(30 * 1000))); // trying to match current Socket timeout
}
- void VoteRequester::Algorithm::processResponse(
- const RemoteCommandRequest& request,
- const ResponseStatus& response) {
- _responsesProcessed++;
- if (!response.isOK()) { // failed response
- log() << "VoteRequester: Got failed response from " << request.target
- << ": " << response.getStatus();
- }
- else {
- ReplSetRequestVotesResponse voteResponse;
- voteResponse.initialize(response.getValue().data);
- if (voteResponse.getVoteGranted()) {
- _votes++;
- }
- else {
- log() << "VoteRequester: Got no vote from " << request.target
- << " because: " << voteResponse.getReason();
- }
-
- if (voteResponse.getTerm() > _term) {
- _staleTerm = true;
- }
+ return requests;
+}
+
+void VoteRequester::Algorithm::processResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response) {
+ _responsesProcessed++;
+ if (!response.isOK()) { // failed response
+ log() << "VoteRequester: Got failed response from " << request.target << ": "
+ << response.getStatus();
+ } else {
+ ReplSetRequestVotesResponse voteResponse;
+ voteResponse.initialize(response.getValue().data);
+ if (voteResponse.getVoteGranted()) {
+ _votes++;
+ } else {
+ log() << "VoteRequester: Got no vote from " << request.target
+ << " because: " << voteResponse.getReason();
}
- }
- bool VoteRequester::Algorithm::hasReceivedSufficientResponses() const {
- return _staleTerm ||
- _votes == _rsConfig.getMajorityVoteCount() ||
- _responsesProcessed == static_cast<int>(_targets.size());
- }
-
- VoteRequester::VoteRequestResult VoteRequester::Algorithm::getResult() const {
- if (_staleTerm) {
- return StaleTerm;
- }
- else if (_votes >= _rsConfig.getMajorityVoteCount()) {
- return SuccessfullyElected;
+ if (voteResponse.getTerm() > _term) {
+ _staleTerm = true;
}
- else {
- return InsufficientVotes;
- }
- }
-
- VoteRequester::VoteRequester() : _isCanceled(false) {}
- VoteRequester::~VoteRequester() {}
-
- StatusWith<ReplicationExecutor::EventHandle> VoteRequester::start(
- ReplicationExecutor* executor,
- const ReplicaSetConfig& rsConfig,
- long long candidateId,
- long long term,
- bool dryRun,
- OpTime lastOplogEntry,
- const stdx::function<void ()>& onCompletion) {
-
- _algorithm.reset(new Algorithm(rsConfig,
- candidateId,
- term,
- dryRun,
- lastOplogEntry));
- _runner.reset(new ScatterGatherRunner(_algorithm.get()));
- return _runner->start(executor, onCompletion);
}
-
- void VoteRequester::cancel(ReplicationExecutor* executor) {
- _isCanceled = true;
- _runner->cancel(executor);
+}
+
+bool VoteRequester::Algorithm::hasReceivedSufficientResponses() const {
+ return _staleTerm || _votes == _rsConfig.getMajorityVoteCount() ||
+ _responsesProcessed == static_cast<int>(_targets.size());
+}
+
+VoteRequester::VoteRequestResult VoteRequester::Algorithm::getResult() const {
+ if (_staleTerm) {
+ return StaleTerm;
+ } else if (_votes >= _rsConfig.getMajorityVoteCount()) {
+ return SuccessfullyElected;
+ } else {
+ return InsufficientVotes;
}
-
- VoteRequester::VoteRequestResult VoteRequester::getResult() const {
- return _algorithm->getResult();
- }
-
-} // namespace repl
-} // namespace mongo
+}
+
+VoteRequester::VoteRequester() : _isCanceled(false) {}
+VoteRequester::~VoteRequester() {}
+
+StatusWith<ReplicationExecutor::EventHandle> VoteRequester::start(
+ ReplicationExecutor* executor,
+ const ReplicaSetConfig& rsConfig,
+ long long candidateId,
+ long long term,
+ bool dryRun,
+ OpTime lastOplogEntry,
+ const stdx::function<void()>& onCompletion) {
+ _algorithm.reset(new Algorithm(rsConfig, candidateId, term, dryRun, lastOplogEntry));
+ _runner.reset(new ScatterGatherRunner(_algorithm.get()));
+ return _runner->start(executor, onCompletion);
+}
+
+void VoteRequester::cancel(ReplicationExecutor* executor) {
+ _isCanceled = true;
+ _runner->cancel(executor);
+}
+
+VoteRequester::VoteRequestResult VoteRequester::getResult() const {
+ return _algorithm->getResult();
+}
+
+} // namespace repl
+} // namespace mongo
diff --git a/src/mongo/db/repl/vote_requester.h b/src/mongo/db/repl/vote_requester.h
index a5102192ebd..8000d12ddb8 100644
--- a/src/mongo/db/repl/vote_requester.h
+++ b/src/mongo/db/repl/vote_requester.h
@@ -41,92 +41,91 @@
namespace mongo {
- class Status;
+class Status;
namespace repl {
- class ScatterGatherRunner;
- class ReplSetDeclareRequestVotesArgs;
+class ScatterGatherRunner;
+class ReplSetDeclareRequestVotesArgs;
- class VoteRequester {
- MONGO_DISALLOW_COPYING(VoteRequester);
- public:
+class VoteRequester {
+ MONGO_DISALLOW_COPYING(VoteRequester);
- enum VoteRequestResult {
- SuccessfullyElected,
- StaleTerm,
- InsufficientVotes,
- };
-
- class Algorithm : public ScatterGatherAlgorithm {
- public:
- Algorithm(const ReplicaSetConfig& rsConfig,
- long long candidateId,
- long long term,
- bool dryRun,
- OpTime lastOplogEntry);
- virtual ~Algorithm();
- virtual std::vector<RemoteCommandRequest> getRequests() const;
- virtual void processResponse(
- const RemoteCommandRequest& request,
- const ResponseStatus& response);
- virtual bool hasReceivedSufficientResponses() const;
-
- /**
- * Returns a VoteRequestResult indicating the result of the election.
- *
- * It is invalid to call this before hasReceivedSufficientResponses returns true.
- */
- VoteRequestResult getResult() const;
-
- private:
- const ReplicaSetConfig _rsConfig;
- const long long _candidateId;
- const long long _term;
- bool _dryRun = false; // this bool indicates this is a mock election when true
- const OpTime _lastOplogEntry;
- std::vector<HostAndPort> _targets;
- bool _staleTerm = false;
- long long _responsesProcessed = 0;
- long long _votes = 1;
- };
-
- VoteRequester();
- virtual ~VoteRequester();
+public:
+ enum VoteRequestResult {
+ SuccessfullyElected,
+ StaleTerm,
+ InsufficientVotes,
+ };
- /**
- * Begins the process of sending replSetRequestVotes commands to all non-DOWN nodes
- * in currentConfig, in attempt to receive sufficient votes to win the election.
- *
- * evh can be used to schedule a callback when the process is complete.
- * This function must be run in the executor, as it must be synchronous with the command
- * callbacks that it schedules.
- * If this function returns Status::OK(), evh is then guaranteed to be signaled.
- **/
- StatusWith<ReplicationExecutor::EventHandle> start(
- ReplicationExecutor* executor,
- const ReplicaSetConfig& rsConfig,
- long long candidateId,
- long long term,
- bool dryRun,
- OpTime lastOplogEntry,
- const stdx::function<void ()>& onCompletion = stdx::function<void ()>());
+ class Algorithm : public ScatterGatherAlgorithm {
+ public:
+ Algorithm(const ReplicaSetConfig& rsConfig,
+ long long candidateId,
+ long long term,
+ bool dryRun,
+ OpTime lastOplogEntry);
+ virtual ~Algorithm();
+ virtual std::vector<RemoteCommandRequest> getRequests() const;
+ virtual void processResponse(const RemoteCommandRequest& request,
+ const ResponseStatus& response);
+ virtual bool hasReceivedSufficientResponses() const;
/**
- * Informs the VoteRequester to cancel further processing. The "executor"
- * argument must point to the same executor passed to "start()".
+ * Returns a VoteRequestResult indicating the result of the election.
*
- * Like start(), this method must run in the executor context.
+ * It is invalid to call this before hasReceivedSufficientResponses returns true.
*/
- void cancel(ReplicationExecutor* executor);
-
VoteRequestResult getResult() const;
private:
- std::unique_ptr<Algorithm> _algorithm;
- std::unique_ptr<ScatterGatherRunner> _runner;
- bool _isCanceled = false;
+ const ReplicaSetConfig _rsConfig;
+ const long long _candidateId;
+ const long long _term;
+ bool _dryRun = false; // this bool indicates this is a mock election when true
+ const OpTime _lastOplogEntry;
+ std::vector<HostAndPort> _targets;
+ bool _staleTerm = false;
+ long long _responsesProcessed = 0;
+ long long _votes = 1;
};
+ VoteRequester();
+ virtual ~VoteRequester();
+
+ /**
+ * Begins the process of sending replSetRequestVotes commands to all non-DOWN nodes
+ * in currentConfig, in attempt to receive sufficient votes to win the election.
+ *
+ * evh can be used to schedule a callback when the process is complete.
+ * This function must be run in the executor, as it must be synchronous with the command
+ * callbacks that it schedules.
+ * If this function returns Status::OK(), evh is then guaranteed to be signaled.
+ **/
+ StatusWith<ReplicationExecutor::EventHandle> start(
+ ReplicationExecutor* executor,
+ const ReplicaSetConfig& rsConfig,
+ long long candidateId,
+ long long term,
+ bool dryRun,
+ OpTime lastOplogEntry,
+ const stdx::function<void()>& onCompletion = stdx::function<void()>());
+
+ /**
+ * Informs the VoteRequester to cancel further processing. The "executor"
+ * argument must point to the same executor passed to "start()".
+ *
+ * Like start(), this method must run in the executor context.
+ */
+ void cancel(ReplicationExecutor* executor);
+
+ VoteRequestResult getResult() const;
+
+private:
+ std::unique_ptr<Algorithm> _algorithm;
+ std::unique_ptr<ScatterGatherRunner> _runner;
+ bool _isCanceled = false;
+};
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/vote_requester_test.cpp b/src/mongo/db/repl/vote_requester_test.cpp
index d5b9d9ce3d5..fb5fb8d757f 100644
--- a/src/mongo/db/repl/vote_requester_test.cpp
+++ b/src/mongo/db/repl/vote_requester_test.cpp
@@ -44,345 +44,348 @@ namespace mongo {
namespace repl {
namespace {
- using executor::NetworkInterfaceMock;
- using unittest::assertGet;
-
- using RemoteCommandRequest = RemoteCommandRequest;
-
- bool stringContains(const std::string &haystack, const std::string& needle) {
- return haystack.find(needle) != std::string::npos;
- }
-
-
- class VoteRequesterTest : public mongo::unittest::Test {
- public:
- virtual void setUp() {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host0") <<
- BSON("_id" << 1 << "host" << "host1") <<
- BSON("_id" << 2 << "host" << "host2") <<
- BSON("_id" << 3 << "host" << "host3" << "votes" << 0) <<
- BSON("_id" << 4 << "host" << "host4" << "votes" << 0)))));
- ASSERT_OK(config.validate());
- long long candidateId = 0;
- long long term = 2;
- OpTime lastOplogEntry = OpTime(Timestamp(999,0), 1);
-
- _requester.reset(new VoteRequester::Algorithm(config,
- candidateId,
- term,
- false, // not a dryRun
- lastOplogEntry));
- }
-
- virtual void tearDown() {
- _requester.reset(NULL);
- }
-
- protected:
- int64_t countLogLinesContaining(const std::string& needle) {
- return std::count_if(getCapturedLogMessages().begin(),
- getCapturedLogMessages().end(),
- stdx::bind(stringContains,
- stdx::placeholders::_1,
- needle));
- }
-
- bool hasReceivedSufficientResponses() {
- return _requester->hasReceivedSufficientResponses();
- }
-
- void processResponse(const RemoteCommandRequest& request, const ResponseStatus& response) {
- _requester->processResponse(request, response);
- }
-
- VoteRequester::VoteRequestResult getResult() {
- return _requester->getResult();
- }
-
- RemoteCommandRequest requestFrom(std::string hostname) {
- return RemoteCommandRequest(HostAndPort(hostname),
- "", // fields do not matter in VoteRequester
- BSONObj(),
- Milliseconds(0));
- }
-
- ResponseStatus badResponseStatus() {
- return ResponseStatus(ErrorCodes::NodeNotFound, "not on my watch");
- }
-
- ResponseStatus votedYes() {
- ReplSetRequestVotesResponse response;
- response.setOk(true);
- response.setVoteGranted(true);
- response.setTerm(1);
- return ResponseStatus(NetworkInterfaceMock::Response(response.toBSON(),
- Milliseconds(10)));
- }
-
- ResponseStatus votedNoBecauseConfigVersionDoesNotMatch() {
- ReplSetRequestVotesResponse response;
- response.setOk(true);
- response.setVoteGranted(false);
- response.setTerm(1);
- response.setReason("candidate's config version differs from mine");
- return ResponseStatus(NetworkInterfaceMock::Response(response.toBSON(),
- Milliseconds(10)));
- }
-
- ResponseStatus votedNoBecauseSetNameDiffers() {
- ReplSetRequestVotesResponse response;
- response.setOk(true);
- response.setVoteGranted(false);
- response.setTerm(1);
- response.setReason("candidate's set name differs from mine");
- return ResponseStatus(NetworkInterfaceMock::Response(response.toBSON(),
- Milliseconds(10)));
- }
-
- ResponseStatus votedNoBecauseLastOpTimeIsGreater() {
- ReplSetRequestVotesResponse response;
- response.setOk(true);
- response.setVoteGranted(false);
- response.setTerm(1);
- response.setReason("candidate's data is staler than mine");
- return ResponseStatus(NetworkInterfaceMock::Response(response.toBSON(),
- Milliseconds(10)));
- }
-
- ResponseStatus votedNoBecauseTermIsGreater() {
- ReplSetRequestVotesResponse response;
- response.setOk(true);
- response.setVoteGranted(false);
- response.setTerm(3);
- response.setReason("candidate's term is lower than mine");
- return ResponseStatus(NetworkInterfaceMock::Response(response.toBSON(),
- Milliseconds(10)));
- }
-
- ResponseStatus votedNoBecauseAlreadyVoted() {
- ReplSetRequestVotesResponse response;
- response.setOk(true);
- response.setVoteGranted(false);
- response.setTerm(2);
- response.setReason("already voted for another candidate this term");
- return ResponseStatus(NetworkInterfaceMock::Response(response.toBSON(),
- Milliseconds(10)));
- }
-
- std::unique_ptr<VoteRequester::Algorithm> _requester;
- };
-
- class VoteRequesterDryRunTest : public VoteRequesterTest {
- public:
- virtual void setUp() {
- ReplicaSetConfig config;
- ASSERT_OK(config.initialize(
- BSON("_id" << "rs0" <<
- "version" << 2 <<
- "members" << BSON_ARRAY(
- BSON("_id" << 0 << "host" << "host0") <<
- BSON("_id" << 1 << "host" << "host1") <<
- BSON("_id" << 2 << "host" << "host2") <<
- BSON("_id" << 3 << "host" << "host3" << "votes" << 0) <<
- BSON("_id" << 4 << "host" << "host4" << "votes" << 0)))));
- ASSERT_OK(config.validate());
- long long candidateId = 0;
- long long term = 2;
- OpTime lastOplogEntry = OpTime(Timestamp(999,0), 1);
-
- _requester.reset(new VoteRequester::Algorithm(config,
- candidateId,
- term,
- true, // dryRun
- lastOplogEntry));
- }
-
- };
-
- TEST_F(VoteRequesterTest, ImmediateGoodResponseWinElection) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedYes());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
- }
-
- TEST_F(VoteRequesterTest, BadConfigVersionWinElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedNoBecauseConfigVersionDoesNotMatch());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
- processResponse(requestFrom("host2"), votedYes());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
- stopCapturingLogMessages();
+using executor::NetworkInterfaceMock;
+using unittest::assertGet;
+
+using RemoteCommandRequest = RemoteCommandRequest;
+
+bool stringContains(const std::string& haystack, const std::string& needle) {
+ return haystack.find(needle) != std::string::npos;
+}
+
+
+class VoteRequesterTest : public mongo::unittest::Test {
+public:
+ virtual void setUp() {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes" << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes" << 0)))));
+ ASSERT_OK(config.validate());
+ long long candidateId = 0;
+ long long term = 2;
+ OpTime lastOplogEntry = OpTime(Timestamp(999, 0), 1);
+
+ _requester.reset(new VoteRequester::Algorithm(config,
+ candidateId,
+ term,
+ false, // not a dryRun
+ lastOplogEntry));
}
- TEST_F(VoteRequesterTest, SetNameDiffersWinElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedNoBecauseSetNameDiffers());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
- processResponse(requestFrom("host2"), votedYes());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
- stopCapturingLogMessages();
+ virtual void tearDown() {
+ _requester.reset(NULL);
}
- TEST_F(VoteRequesterTest, LastOpTimeIsGreaterWinElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedNoBecauseLastOpTimeIsGreater());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
- processResponse(requestFrom("host2"), votedYes());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
- stopCapturingLogMessages();
+protected:
+ int64_t countLogLinesContaining(const std::string& needle) {
+ return std::count_if(getCapturedLogMessages().begin(),
+ getCapturedLogMessages().end(),
+ stdx::bind(stringContains, stdx::placeholders::_1, needle));
}
- TEST_F(VoteRequesterTest, FailedToContactWinElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), badResponseStatus());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host1"));
- processResponse(requestFrom("host2"), votedYes());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
- stopCapturingLogMessages();
+ bool hasReceivedSufficientResponses() {
+ return _requester->hasReceivedSufficientResponses();
}
- TEST_F(VoteRequesterTest, AlreadyVotedWinElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedNoBecauseAlreadyVoted());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
- processResponse(requestFrom("host2"), votedYes());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
- stopCapturingLogMessages();
+ void processResponse(const RemoteCommandRequest& request, const ResponseStatus& response) {
+ _requester->processResponse(request, response);
}
- TEST_F(VoteRequesterTest, StaleTermLoseElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedNoBecauseTermIsGreater());
- ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::StaleTerm, getResult());
- stopCapturingLogMessages();
+ VoteRequester::VoteRequestResult getResult() {
+ return _requester->getResult();
}
- TEST_F(VoteRequesterTest, NotEnoughVotesLoseElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedNoBecauseSetNameDiffers());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
- processResponse(requestFrom("host2"), badResponseStatus());
- ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host2"));
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::InsufficientVotes, getResult());
- stopCapturingLogMessages();
+ RemoteCommandRequest requestFrom(std::string hostname) {
+ return RemoteCommandRequest(HostAndPort(hostname),
+ "", // fields do not matter in VoteRequester
+ BSONObj(),
+ Milliseconds(0));
}
- TEST_F(VoteRequesterDryRunTest, ImmediateGoodResponseWinElection) {
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedYes());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+ ResponseStatus badResponseStatus() {
+ return ResponseStatus(ErrorCodes::NodeNotFound, "not on my watch");
}
- TEST_F(VoteRequesterDryRunTest, BadConfigVersionWinElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedNoBecauseConfigVersionDoesNotMatch());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
- processResponse(requestFrom("host2"), votedYes());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
- stopCapturingLogMessages();
+ ResponseStatus votedYes() {
+ ReplSetRequestVotesResponse response;
+ response.setOk(true);
+ response.setVoteGranted(true);
+ response.setTerm(1);
+ return ResponseStatus(NetworkInterfaceMock::Response(response.toBSON(), Milliseconds(10)));
}
- TEST_F(VoteRequesterDryRunTest, SetNameDiffersWinElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedNoBecauseSetNameDiffers());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
- processResponse(requestFrom("host2"), votedYes());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
- stopCapturingLogMessages();
+ ResponseStatus votedNoBecauseConfigVersionDoesNotMatch() {
+ ReplSetRequestVotesResponse response;
+ response.setOk(true);
+ response.setVoteGranted(false);
+ response.setTerm(1);
+ response.setReason("candidate's config version differs from mine");
+ return ResponseStatus(NetworkInterfaceMock::Response(response.toBSON(), Milliseconds(10)));
}
- TEST_F(VoteRequesterDryRunTest, LastOpTimeIsGreaterWinElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedNoBecauseLastOpTimeIsGreater());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
- processResponse(requestFrom("host2"), votedYes());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
- stopCapturingLogMessages();
+ ResponseStatus votedNoBecauseSetNameDiffers() {
+ ReplSetRequestVotesResponse response;
+ response.setOk(true);
+ response.setVoteGranted(false);
+ response.setTerm(1);
+ response.setReason("candidate's set name differs from mine");
+ return ResponseStatus(NetworkInterfaceMock::Response(response.toBSON(), Milliseconds(10)));
}
- TEST_F(VoteRequesterDryRunTest, FailedToContactWinElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), badResponseStatus());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host1"));
- processResponse(requestFrom("host2"), votedYes());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
- stopCapturingLogMessages();
+ ResponseStatus votedNoBecauseLastOpTimeIsGreater() {
+ ReplSetRequestVotesResponse response;
+ response.setOk(true);
+ response.setVoteGranted(false);
+ response.setTerm(1);
+ response.setReason("candidate's data is staler than mine");
+ return ResponseStatus(NetworkInterfaceMock::Response(response.toBSON(), Milliseconds(10)));
}
- TEST_F(VoteRequesterDryRunTest, AlreadyVotedWinElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedNoBecauseAlreadyVoted());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
- processResponse(requestFrom("host2"), votedYes());
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
- stopCapturingLogMessages();
+ ResponseStatus votedNoBecauseTermIsGreater() {
+ ReplSetRequestVotesResponse response;
+ response.setOk(true);
+ response.setVoteGranted(false);
+ response.setTerm(3);
+ response.setReason("candidate's term is lower than mine");
+ return ResponseStatus(NetworkInterfaceMock::Response(response.toBSON(), Milliseconds(10)));
}
- TEST_F(VoteRequesterDryRunTest, StaleTermLoseElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedNoBecauseTermIsGreater());
- ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::StaleTerm, getResult());
- stopCapturingLogMessages();
+ ResponseStatus votedNoBecauseAlreadyVoted() {
+ ReplSetRequestVotesResponse response;
+ response.setOk(true);
+ response.setVoteGranted(false);
+ response.setTerm(2);
+ response.setReason("already voted for another candidate this term");
+ return ResponseStatus(NetworkInterfaceMock::Response(response.toBSON(), Milliseconds(10)));
}
- TEST_F(VoteRequesterDryRunTest, NotEnoughVotesLoseElection) {
- startCapturingLogMessages();
- ASSERT_FALSE(hasReceivedSufficientResponses());
- processResponse(requestFrom("host1"), votedNoBecauseSetNameDiffers());
- ASSERT_FALSE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
- processResponse(requestFrom("host2"), badResponseStatus());
- ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host2"));
- ASSERT_TRUE(hasReceivedSufficientResponses());
- ASSERT_EQUALS(VoteRequester::InsufficientVotes, getResult());
- stopCapturingLogMessages();
+ std::unique_ptr<VoteRequester::Algorithm> _requester;
+};
+
+class VoteRequesterDryRunTest : public VoteRequesterTest {
+public:
+ virtual void setUp() {
+ ReplicaSetConfig config;
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version" << 2 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes" << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes" << 0)))));
+ ASSERT_OK(config.validate());
+ long long candidateId = 0;
+ long long term = 2;
+ OpTime lastOplogEntry = OpTime(Timestamp(999, 0), 1);
+
+ _requester.reset(new VoteRequester::Algorithm(config,
+ candidateId,
+ term,
+ true, // dryRun
+ lastOplogEntry));
}
+};
+
+TEST_F(VoteRequesterTest, ImmediateGoodResponseWinElection) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedYes());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+}
+
+TEST_F(VoteRequesterTest, BadConfigVersionWinElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedNoBecauseConfigVersionDoesNotMatch());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
+ processResponse(requestFrom("host2"), votedYes());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterTest, SetNameDiffersWinElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedNoBecauseSetNameDiffers());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
+ processResponse(requestFrom("host2"), votedYes());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterTest, LastOpTimeIsGreaterWinElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedNoBecauseLastOpTimeIsGreater());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
+ processResponse(requestFrom("host2"), votedYes());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterTest, FailedToContactWinElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), badResponseStatus());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host1"));
+ processResponse(requestFrom("host2"), votedYes());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterTest, AlreadyVotedWinElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedNoBecauseAlreadyVoted());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
+ processResponse(requestFrom("host2"), votedYes());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterTest, StaleTermLoseElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedNoBecauseTermIsGreater());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::StaleTerm, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterTest, NotEnoughVotesLoseElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedNoBecauseSetNameDiffers());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
+ processResponse(requestFrom("host2"), badResponseStatus());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host2"));
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::InsufficientVotes, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterDryRunTest, ImmediateGoodResponseWinElection) {
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedYes());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+}
+
+TEST_F(VoteRequesterDryRunTest, BadConfigVersionWinElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedNoBecauseConfigVersionDoesNotMatch());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
+ processResponse(requestFrom("host2"), votedYes());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterDryRunTest, SetNameDiffersWinElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedNoBecauseSetNameDiffers());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
+ processResponse(requestFrom("host2"), votedYes());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterDryRunTest, LastOpTimeIsGreaterWinElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedNoBecauseLastOpTimeIsGreater());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
+ processResponse(requestFrom("host2"), votedYes());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterDryRunTest, FailedToContactWinElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), badResponseStatus());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host1"));
+ processResponse(requestFrom("host2"), votedYes());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterDryRunTest, AlreadyVotedWinElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedNoBecauseAlreadyVoted());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
+ processResponse(requestFrom("host2"), votedYes());
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::SuccessfullyElected, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterDryRunTest, StaleTermLoseElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedNoBecauseTermIsGreater());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::StaleTerm, getResult());
+ stopCapturingLogMessages();
+}
+
+TEST_F(VoteRequesterDryRunTest, NotEnoughVotesLoseElection) {
+ startCapturingLogMessages();
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ processResponse(requestFrom("host1"), votedNoBecauseSetNameDiffers());
+ ASSERT_FALSE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got no vote from host1"));
+ processResponse(requestFrom("host2"), badResponseStatus());
+ ASSERT_EQUALS(1, countLogLinesContaining("Got failed response from host2"));
+ ASSERT_TRUE(hasReceivedSufficientResponses());
+ ASSERT_EQUALS(VoteRequester::InsufficientVotes, getResult());
+ stopCapturingLogMessages();
+}
} // namespace
} // namespace repl
diff --git a/src/mongo/db/resource.h b/src/mongo/db/resource.h
index b99544df86c..4b36987797a 100644
--- a/src/mongo/db/resource.h
+++ b/src/mongo/db/resource.h
@@ -30,15 +30,15 @@
// Microsoft Visual C++ generated include file.
// Used by db.rc
//
-#define IDI_ICON2 102
+#define IDI_ICON2 102
// Next default values for new objects
//
#ifdef APSTUDIO_INVOKED
#ifndef APSTUDIO_READONLY_SYMBOLS
-#define _APS_NEXT_RESOURCE_VALUE 104
-#define _APS_NEXT_COMMAND_VALUE 40001
-#define _APS_NEXT_CONTROL_VALUE 1001
-#define _APS_NEXT_SYMED_VALUE 101
+#define _APS_NEXT_RESOURCE_VALUE 104
+#define _APS_NEXT_COMMAND_VALUE 40001
+#define _APS_NEXT_CONTROL_VALUE 1001
+#define _APS_NEXT_SYMED_VALUE 101
#endif
#endif
diff --git a/src/mongo/db/restapi.cpp b/src/mongo/db/restapi.cpp
index f977e3afeef..984b4e10fa6 100644
--- a/src/mongo/db/restapi.cpp
+++ b/src/mongo/db/restapi.cpp
@@ -52,274 +52,267 @@
namespace mongo {
- bool getInitialSyncCompleted();
+bool getInitialSyncCompleted();
- using std::unique_ptr;
- using std::string;
- using std::stringstream;
- using std::endl;
- using std::vector;
+using std::unique_ptr;
+using std::string;
+using std::stringstream;
+using std::endl;
+using std::vector;
- using namespace html;
+using namespace html;
- class RESTHandler : public DbWebHandler {
- public:
- RESTHandler() : DbWebHandler( "DUMMY REST" , 1000 , true ) {}
+class RESTHandler : public DbWebHandler {
+public:
+ RESTHandler() : DbWebHandler("DUMMY REST", 1000, true) {}
- virtual bool handles( const string& url ) const {
- return
- url[0] == '/' &&
- url.find_last_of( '/' ) > 0;
- }
-
- virtual void handle( OperationContext* txn,
- const char *rq, const std::string& url, BSONObj params,
- string& responseMsg, int& responseCode,
- vector<string>& headers, const SockAddr &from ) {
+ virtual bool handles(const string& url) const {
+ return url[0] == '/' && url.find_last_of('/') > 0;
+ }
- DBDirectClient db( txn );
+ virtual void handle(OperationContext* txn,
+ const char* rq,
+ const std::string& url,
+ BSONObj params,
+ string& responseMsg,
+ int& responseCode,
+ vector<string>& headers,
+ const SockAddr& from) {
+ DBDirectClient db(txn);
+
+ string::size_type first = url.find("/", 1);
+ if (first == string::npos) {
+ responseCode = 400;
+ return;
+ }
- string::size_type first = url.find( "/" , 1 );
- if ( first == string::npos ) {
- responseCode = 400;
- return;
- }
+ string method = MiniWebServer::parseMethod(rq);
+ string dbname = url.substr(1, first - 1);
+ string coll = url.substr(first + 1);
+ string action = "";
+
+ string::size_type last = coll.find_last_of("/");
+ if (last == string::npos) {
+ action = coll;
+ coll = "_defaultCollection";
+ } else {
+ action = coll.substr(last + 1);
+ coll = coll.substr(0, last);
+ }
- string method = MiniWebServer::parseMethod( rq );
- string dbname = url.substr( 1 , first - 1 );
- string coll = url.substr( first + 1 );
- string action = "";
+ for (string::size_type i = 0; i < coll.size(); i++)
+ if (coll[i] == '/')
+ coll[i] = '.';
- string::size_type last = coll.find_last_of( "/" );
- if ( last == string::npos ) {
- action = coll;
- coll = "_defaultCollection";
- }
- else {
- action = coll.substr( last + 1 );
- coll = coll.substr( 0 , last );
- }
+ string fullns = MiniWebServer::urlDecode(dbname + "." + coll);
- for ( string::size_type i=0; i<coll.size(); i++ )
- if ( coll[i] == '/' )
- coll[i] = '.';
+ headers.push_back((string) "x-action: " + action);
+ headers.push_back((string) "x-ns: " + fullns);
- string fullns = MiniWebServer::urlDecode(dbname + "." + coll);
+ bool html = false;
- headers.push_back( (string)"x-action: " + action );
- headers.push_back( (string)"x-ns: " + fullns );
+ stringstream ss;
- bool html = false;
+ if (method == "GET") {
+ responseCode = 200;
+ html = handleRESTQuery(txn, fullns, action, params, responseCode, ss);
+ } else if (method == "POST") {
+ responseCode = 201;
+ handlePost(txn, fullns, MiniWebServer::body(rq), params, responseCode, ss);
+ } else {
+ responseCode = 400;
+ headers.push_back("X_err: bad request");
+ ss << "don't know how to handle a [" << method << "]";
+ log() << "don't know how to handle a [" << method << "]" << endl;
+ }
- stringstream ss;
+ if (html)
+ headers.push_back("Content-Type: text/html;charset=utf-8");
+ else
+ headers.push_back("Content-Type: text/plain;charset=utf-8");
- if ( method == "GET" ) {
- responseCode = 200;
- html = handleRESTQuery(txn, fullns, action, params, responseCode, ss);
- }
- else if ( method == "POST" ) {
- responseCode = 201;
- handlePost(txn, fullns, MiniWebServer::body(rq), params, responseCode, ss);
- }
- else {
- responseCode = 400;
- headers.push_back( "X_err: bad request" );
- ss << "don't know how to handle a [" << method << "]";
- log() << "don't know how to handle a [" << method << "]" << endl;
- }
-
- if( html )
- headers.push_back("Content-Type: text/html;charset=utf-8");
- else
- headers.push_back("Content-Type: text/plain;charset=utf-8");
+ responseMsg = ss.str();
+ }
- responseMsg = ss.str();
+ bool handleRESTQuery(OperationContext* txn,
+ const std::string& ns,
+ const std::string& action,
+ BSONObj& params,
+ int& responseCode,
+ stringstream& out) {
+ Timer t;
+
+ int html = _getOption(params["html"], 0);
+ int skip = _getOption(params["skip"], 0);
+ int num = _getOption(params["limit"],
+ _getOption(params["count"], 1000)); // count is old, limit is new
+
+ int one = 0;
+ if (params["one"].type() == String && tolower(params["one"].valuestr()[0]) == 't') {
+ num = 1;
+ one = 1;
}
- bool handleRESTQuery( OperationContext* txn,
- const std::string& ns,
- const std::string& action,
- BSONObj & params,
- int & responseCode,
- stringstream & out ) {
- Timer t;
-
- int html = _getOption( params["html"] , 0 );
- int skip = _getOption( params["skip"] , 0 );
- int num = _getOption( params["limit"] , _getOption( params["count" ] , 1000 ) ); // count is old, limit is new
-
- int one = 0;
- if ( params["one"].type() == String && tolower( params["one"].valuestr()[0] ) == 't' ) {
- num = 1;
- one = 1;
- }
+ BSONObjBuilder queryBuilder;
- BSONObjBuilder queryBuilder;
+ BSONObjIterator i(params);
+ while (i.more()) {
+ BSONElement e = i.next();
+ string name = e.fieldName();
+ if (name.find("filter_") != 0)
+ continue;
- BSONObjIterator i(params);
- while ( i.more() ) {
- BSONElement e = i.next();
- string name = e.fieldName();
- if ( name.find( "filter_" ) != 0 )
- continue;
+ string field = name.substr(7);
+ const char* val = e.valuestr();
- string field = name.substr(7);
- const char * val = e.valuestr();
+ char* temp;
- char * temp;
-
- // TODO: this is how i guess if something is a number. pretty lame right now
- double number = strtod( val , &temp );
- if ( temp != val )
- queryBuilder.append( field , number );
- else
- queryBuilder.append( field , val );
- }
+ // TODO: this is how i guess if something is a number. pretty lame right now
+ double number = strtod(val, &temp);
+ if (temp != val)
+ queryBuilder.append(field, number);
+ else
+ queryBuilder.append(field, val);
+ }
- BSONObj query = queryBuilder.obj();
+ BSONObj query = queryBuilder.obj();
- DBDirectClient db(txn);
- unique_ptr<DBClientCursor> cursor = db.query( ns.c_str() , query, num , skip );
- uassert( 13085 , "query failed for dbwebserver" , cursor.get() );
+ DBDirectClient db(txn);
+ unique_ptr<DBClientCursor> cursor = db.query(ns.c_str(), query, num, skip);
+ uassert(13085, "query failed for dbwebserver", cursor.get());
- if ( one ) {
- if ( cursor->more() ) {
- BSONObj obj = cursor->next();
- out << obj.jsonString(Strict,html?1:0) << '\n';
- }
- else {
- responseCode = 404;
- }
- return html != 0;
+ if (one) {
+ if (cursor->more()) {
+ BSONObj obj = cursor->next();
+ out << obj.jsonString(Strict, html ? 1 : 0) << '\n';
+ } else {
+ responseCode = 404;
}
+ return html != 0;
+ }
- if( html ) {
- string title = string("query ") + ns;
- out << start(title)
- << p(title)
- << "<pre>";
- }
- else {
- out << "{\n";
- out << " \"offset\" : " << skip << ",\n";
- out << " \"rows\": [\n";
- }
+ if (html) {
+ string title = string("query ") + ns;
+ out << start(title) << p(title) << "<pre>";
+ } else {
+ out << "{\n";
+ out << " \"offset\" : " << skip << ",\n";
+ out << " \"rows\": [\n";
+ }
- int howMany = 0;
- while ( cursor->more() ) {
- if ( howMany++ && html == 0 )
- out << " ,\n";
- BSONObj obj = cursor->next();
- if( html ) {
- if( out.tellp() > 4 * 1024 * 1024 ) {
- out << "Stopping output: more than 4MB returned and in html mode\n";
- break;
- }
- out << obj.jsonString(Strict, html?1:0) << "\n\n";
+ int howMany = 0;
+ while (cursor->more()) {
+ if (howMany++ && html == 0)
+ out << " ,\n";
+ BSONObj obj = cursor->next();
+ if (html) {
+ if (out.tellp() > 4 * 1024 * 1024) {
+ out << "Stopping output: more than 4MB returned and in html mode\n";
+ break;
}
- else {
- if( out.tellp() > 50 * 1024 * 1024 ) // 50MB limit - we are using ram
- break;
- out << " " << obj.jsonString();
- }
- }
-
- if( html ) {
- out << "</pre>\n";
- if( howMany == 0 ) out << p("Collection is empty");
- out << _end();
- }
- else {
- out << "\n ],\n\n";
- out << " \"total_rows\" : " << howMany << " ,\n";
- out << " \"query\" : " << query.jsonString() << " ,\n";
- out << " \"millis\" : " << t.millis() << '\n';
- out << "}\n";
+ out << obj.jsonString(Strict, html ? 1 : 0) << "\n\n";
+ } else {
+ if (out.tellp() > 50 * 1024 * 1024) // 50MB limit - we are using ram
+ break;
+ out << " " << obj.jsonString();
}
+ }
- return html != 0;
+ if (html) {
+ out << "</pre>\n";
+ if (howMany == 0)
+ out << p("Collection is empty");
+ out << _end();
+ } else {
+ out << "\n ],\n\n";
+ out << " \"total_rows\" : " << howMany << " ,\n";
+ out << " \"query\" : " << query.jsonString() << " ,\n";
+ out << " \"millis\" : " << t.millis() << '\n';
+ out << "}\n";
}
- // TODO Generate id and revision per couch POST spec
- void handlePost( OperationContext* txn,
- const std::string& ns,
- const char *body,
- BSONObj& params,
- int & responseCode,
- stringstream & out ) {
- try {
- BSONObj obj = fromjson( body );
+ return html != 0;
+ }
- DBDirectClient db(txn);
- db.insert( ns.c_str(), obj );
- }
- catch ( ... ) {
- responseCode = 400; // Bad Request. Seems reasonable for now.
- out << "{ \"ok\" : false }";
- return;
- }
+ // TODO Generate id and revision per couch POST spec
+ void handlePost(OperationContext* txn,
+ const std::string& ns,
+ const char* body,
+ BSONObj& params,
+ int& responseCode,
+ stringstream& out) {
+ try {
+ BSONObj obj = fromjson(body);
- responseCode = 201;
- out << "{ \"ok\" : true }";
+ DBDirectClient db(txn);
+ db.insert(ns.c_str(), obj);
+ } catch (...) {
+ responseCode = 400; // Bad Request. Seems reasonable for now.
+ out << "{ \"ok\" : false }";
+ return;
}
- int _getOption( BSONElement e , int def ) {
- if ( e.isNumber() )
- return e.numberInt();
- if ( e.type() == String )
- return atoi( e.valuestr() );
- return def;
- }
- } restHandler;
+ responseCode = 201;
+ out << "{ \"ok\" : true }";
+ }
- bool RestAdminAccess::haveAdminUsers(OperationContext* txn) const {
- AuthorizationSession* authzSession = AuthorizationSession::get(txn->getClient());
- return authzSession->getAuthorizationManager().hasAnyPrivilegeDocuments(txn);
+ int _getOption(BSONElement e, int def) {
+ if (e.isNumber())
+ return e.numberInt();
+ if (e.type() == String)
+ return atoi(e.valuestr());
+ return def;
}
+} restHandler;
- class LowLevelMongodStatus : public WebStatusPlugin {
- public:
- LowLevelMongodStatus() : WebStatusPlugin( "overview" , 5 , "(only reported if can acquire read lock quickly)" ) {}
-
- virtual void init() {}
-
- void _gotLock( int millis , stringstream& ss ) {
- const repl::ReplSettings& replSettings =
- repl::getGlobalReplicationCoordinator()->getSettings();
- ss << "<pre>\n";
- ss << "time to get readlock: " << millis << "ms\n";
- ss << "# Cursors: " << ClientCursor::totalOpen() << '\n';
- ss << "replication: ";
- if (*repl::replInfo)
- ss << "\nreplInfo: " << repl::replInfo << "\n\n";
- if (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
- repl::ReplicationCoordinator::modeReplSet) {
- ss << a("", "see replSetGetStatus link top of page") << "--replSet </a>"
- << replSettings.replSet;
- }
- // TODO(dannenberg) replAllDead is bad and should be removed when masterslave is removed
- if (repl::replAllDead)
- ss << "\n<b>replication replAllDead=" << repl::replAllDead << "</b>\n";
- else {
- ss << "\nmaster: " << replSettings.master << '\n';
- ss << "slave: " << replSettings.slave << '\n';
- ss << '\n';
- }
+bool RestAdminAccess::haveAdminUsers(OperationContext* txn) const {
+ AuthorizationSession* authzSession = AuthorizationSession::get(txn->getClient());
+ return authzSession->getAuthorizationManager().hasAnyPrivilegeDocuments(txn);
+}
- BackgroundOperation::dump(ss);
- ss << "</pre>\n";
+class LowLevelMongodStatus : public WebStatusPlugin {
+public:
+ LowLevelMongodStatus()
+ : WebStatusPlugin("overview", 5, "(only reported if can acquire read lock quickly)") {}
+
+ virtual void init() {}
+
+ void _gotLock(int millis, stringstream& ss) {
+ const repl::ReplSettings& replSettings =
+ repl::getGlobalReplicationCoordinator()->getSettings();
+ ss << "<pre>\n";
+ ss << "time to get readlock: " << millis << "ms\n";
+ ss << "# Cursors: " << ClientCursor::totalOpen() << '\n';
+ ss << "replication: ";
+ if (*repl::replInfo)
+ ss << "\nreplInfo: " << repl::replInfo << "\n\n";
+ if (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
+ repl::ReplicationCoordinator::modeReplSet) {
+ ss << a("", "see replSetGetStatus link top of page") << "--replSet </a>"
+ << replSettings.replSet;
+ }
+ // TODO(dannenberg) replAllDead is bad and should be removed when masterslave is removed
+ if (repl::replAllDead)
+ ss << "\n<b>replication replAllDead=" << repl::replAllDead << "</b>\n";
+ else {
+ ss << "\nmaster: " << replSettings.master << '\n';
+ ss << "slave: " << replSettings.slave << '\n';
+ ss << '\n';
}
- virtual void run(OperationContext* txn, stringstream& ss ) {
- Timer t;
- Lock::GlobalLock globalSLock(txn->lockState(), MODE_S, 300);
- if (globalSLock.isLocked()) {
- _gotLock(t.millis(), ss);
- }
- else {
- ss << "\n<b>timed out getting lock</b>\n";
- }
+ BackgroundOperation::dump(ss);
+ ss << "</pre>\n";
+ }
+
+ virtual void run(OperationContext* txn, stringstream& ss) {
+ Timer t;
+ Lock::GlobalLock globalSLock(txn->lockState(), MODE_S, 300);
+ if (globalSLock.isLocked()) {
+ _gotLock(t.millis(), ss);
+ } else {
+ ss << "\n<b>timed out getting lock</b>\n";
}
+ }
- } lowLevelMongodStatus;
+} lowLevelMongodStatus;
}
diff --git a/src/mongo/db/restapi.h b/src/mongo/db/restapi.h
index d73103ab785..50a603a88dd 100644
--- a/src/mongo/db/restapi.h
+++ b/src/mongo/db/restapi.h
@@ -39,11 +39,11 @@
namespace mongo {
- class RestAdminAccess : public AdminAccess {
- public:
- virtual ~RestAdminAccess() { }
+class RestAdminAccess : public AdminAccess {
+public:
+ virtual ~RestAdminAccess() {}
- virtual bool haveAdminUsers(OperationContext* txn) const;
- };
+ virtual bool haveAdminUsers(OperationContext* txn) const;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/server_extra_log_context.cpp b/src/mongo/db/server_extra_log_context.cpp
index 82ab6e7b4fd..c056651b0cb 100644
--- a/src/mongo/db/server_extra_log_context.cpp
+++ b/src/mongo/db/server_extra_log_context.cpp
@@ -41,41 +41,40 @@
namespace mongo {
namespace {
- // Server parameter controlling whether or not user ids are included in log entries.
- MONGO_EXPORT_STARTUP_SERVER_PARAMETER(logUserIds, bool, false);
+// Server parameter controlling whether or not user ids are included in log entries.
+MONGO_EXPORT_STARTUP_SERVER_PARAMETER(logUserIds, bool, false);
- /**
- * Note: When appending new strings to the builder, make sure to pass false to the
- * includeEndingNull parameter.
- */
- void appendServerExtraLogContext(BufBuilder& builder) {
- ClientBasic* clientBasic = ClientBasic::getCurrent();
- if (!clientBasic)
- return;
- if (!AuthorizationSession::exists(clientBasic))
- return;
+/**
+ * Note: When appending new strings to the builder, make sure to pass false to the
+ * includeEndingNull parameter.
+ */
+void appendServerExtraLogContext(BufBuilder& builder) {
+ ClientBasic* clientBasic = ClientBasic::getCurrent();
+ if (!clientBasic)
+ return;
+ if (!AuthorizationSession::exists(clientBasic))
+ return;
- UserNameIterator users =
- AuthorizationSession::get(clientBasic)->getAuthenticatedUserNames();
+ UserNameIterator users = AuthorizationSession::get(clientBasic)->getAuthenticatedUserNames();
- if (!users.more())
- return;
+ if (!users.more())
+ return;
- builder.appendStr("user:", false);
+ builder.appendStr("user:", false);
+ builder.appendStr(users.next().toString(), false);
+ while (users.more()) {
+ builder.appendChar(',');
builder.appendStr(users.next().toString(), false);
- while (users.more()) {
- builder.appendChar(',');
- builder.appendStr(users.next().toString(), false);
- }
- builder.appendChar(' ');
}
+ builder.appendChar(' ');
+}
- MONGO_INITIALIZER(SetServerLogContextFunction)(InitializerContext*) {
- if (!logUserIds)
- return Status::OK();
+MONGO_INITIALIZER(SetServerLogContextFunction)(InitializerContext*) {
+ if (!logUserIds)
+ return Status::OK();
- return logger::registerExtraLogContextFn(appendServerExtraLogContext);
- }
+ return logger::registerExtraLogContextFn(appendServerExtraLogContext);
+}
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/server_options.cpp b/src/mongo/db/server_options.cpp
index 6b4c749c6cf..d37442e4519 100644
--- a/src/mongo/db/server_options.cpp
+++ b/src/mongo/db/server_options.cpp
@@ -29,12 +29,12 @@
namespace mongo {
- /**
- * This struct represents global configuration data for the server. These options get set from
- * the command line and are used inline in the code. Note that much shared code uses this
- * struct, which is why it is here in its own file rather than in the same file as the code that
- * sets it via the command line, which would pull in more dependencies.
- */
- ServerGlobalParams serverGlobalParams;
+/**
+ * This struct represents global configuration data for the server. These options get set from
+ * the command line and are used inline in the code. Note that much shared code uses this
+ * struct, which is why it is here in its own file rather than in the same file as the code that
+ * sets it via the command line, which would pull in more dependencies.
+ */
+ServerGlobalParams serverGlobalParams;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/server_options.h b/src/mongo/db/server_options.h
index 46579e2484a..eb51098823c 100644
--- a/src/mongo/db/server_options.h
+++ b/src/mongo/db/server_options.h
@@ -29,121 +29,129 @@
#include "mongo/db/jsobj.h"
#include "mongo/platform/process_id.h"
-#include "mongo/util/net/listen.h" // For DEFAULT_MAX_CONN
+#include "mongo/util/net/listen.h" // For DEFAULT_MAX_CONN
namespace mongo {
- const int DEFAULT_UNIX_PERMS = 0700;
+const int DEFAULT_UNIX_PERMS = 0700;
+
+struct ServerGlobalParams {
+ ServerGlobalParams()
+ : port(DefaultDBPort),
+ rest(false),
+ jsonp(false),
+ indexBuildRetry(true),
+ quiet(false),
+ configsvr(false),
+ cpu(false),
+ objcheck(true),
+ defaultProfile(0),
+ slowMS(100),
+ defaultLocalThresholdMillis(15),
+ moveParanoia(true),
+ noUnixSocket(false),
+ doFork(0),
+ socket("/tmp"),
+ maxConns(DEFAULT_MAX_CONN),
+ unixSocketPermissions(DEFAULT_UNIX_PERMS),
+ logAppend(false),
+ logRenameOnRotate(true),
+ logWithSyslog(false),
+ isHttpInterfaceEnabled(false) {
+ started = time(0);
+ }
+
+ std::string binaryName; // mongod or mongos
+ std::string cwd; // cwd of when process started
+
+ int port; // --port
+ enum { DefaultDBPort = 27017, ConfigServerPort = 27019, ShardServerPort = 27018 };
+ bool isDefaultPort() const {
+ return port == DefaultDBPort;
+ }
+
+ std::string bind_ip; // --bind_ip
+ bool rest; // --rest
+ bool jsonp; // --jsonp
+
+ bool indexBuildRetry; // --noIndexBuildRetry
+
+ bool quiet; // --quiet
+
+ bool configsvr; // --configsvr
+
+ bool cpu; // --cpu show cpu time periodically
+
+ bool objcheck; // --objcheck
+
+ int defaultProfile; // --profile
+ int slowMS; // --time in ms that is "slow"
+ int defaultLocalThresholdMillis; // --localThreshold in ms to consider a node local
+ bool moveParanoia; // for move chunk paranoia
+
+ bool noUnixSocket; // --nounixsocket
+ bool doFork; // --fork
+ std::string socket; // UNIX domain socket directory
+
+ int maxConns; // Maximum number of simultaneous open connections.
+
+ int unixSocketPermissions; // permissions for the UNIX domain socket
+
+ std::string keyFile; // Path to keyfile, or empty if none.
+ std::string pidFile; // Path to pid file, or empty if none.
+
+ std::string logpath; // Path to log file, if logging to a file; otherwise, empty.
+ bool logAppend; // True if logging to a file in append mode.
+ bool logRenameOnRotate; // True if logging should rename log files on rotate
+ bool logWithSyslog; // True if logging to syslog; must not be set if logpath is set.
+ int syslogFacility; // Facility used when appending messages to the syslog.
+
+ bool isHttpInterfaceEnabled; // True if the dbwebserver should be enabled.
- struct ServerGlobalParams {
-
- ServerGlobalParams() :
- port(DefaultDBPort), rest(false), jsonp(false), indexBuildRetry(true), quiet(false),
- configsvr(false), cpu(false), objcheck(true), defaultProfile(0),
- slowMS(100), defaultLocalThresholdMillis(15), moveParanoia(true),
- noUnixSocket(false), doFork(0), socket("/tmp"), maxConns(DEFAULT_MAX_CONN),
- unixSocketPermissions(DEFAULT_UNIX_PERMS), logAppend(false), logRenameOnRotate(true),
- logWithSyslog(false), isHttpInterfaceEnabled(false)
- {
- started = time(0);
- }
-
- std::string binaryName; // mongod or mongos
- std::string cwd; // cwd of when process started
-
- int port; // --port
- enum {
- DefaultDBPort = 27017,
- ConfigServerPort = 27019,
- ShardServerPort = 27018
- };
- bool isDefaultPort() const { return port == DefaultDBPort; }
-
- std::string bind_ip; // --bind_ip
- bool rest; // --rest
- bool jsonp; // --jsonp
-
- bool indexBuildRetry; // --noIndexBuildRetry
-
- bool quiet; // --quiet
-
- bool configsvr; // --configsvr
-
- bool cpu; // --cpu show cpu time periodically
-
- bool objcheck; // --objcheck
-
- int defaultProfile; // --profile
- int slowMS; // --time in ms that is "slow"
- int defaultLocalThresholdMillis; // --localThreshold in ms to consider a node local
- bool moveParanoia; // for move chunk paranoia
-
- bool noUnixSocket; // --nounixsocket
- bool doFork; // --fork
- std::string socket; // UNIX domain socket directory
+#ifndef _WIN32
+ ProcessId parentProc; // --fork pid of initial process
+ ProcessId leaderProc; // --fork pid of leader process
+#endif
- int maxConns; // Maximum number of simultaneous open connections.
+ /**
+ * Switches to enable experimental (unsupported) features.
+ */
+ struct ExperimentalFeatures {
+ ExperimentalFeatures() : indexStatsCmdEnabled(false), storageDetailsCmdEnabled(false) {}
+ bool indexStatsCmdEnabled; // -- enableExperimentalIndexStatsCmd
+ bool storageDetailsCmdEnabled; // -- enableExperimentalStorageDetailsCmd
+ } experimental;
- int unixSocketPermissions; // permissions for the UNIX domain socket
+ time_t started;
- std::string keyFile; // Path to keyfile, or empty if none.
- std::string pidFile; // Path to pid file, or empty if none.
+ BSONArray argvArray;
+ BSONObj parsedOpts;
+ bool isAuthEnabled = false;
+ AtomicInt32 clusterAuthMode; // --clusterAuthMode, the internal cluster auth mode
- std::string logpath; // Path to log file, if logging to a file; otherwise, empty.
- bool logAppend; // True if logging to a file in append mode.
- bool logRenameOnRotate;// True if logging should rename log files on rotate
- bool logWithSyslog; // True if logging to syslog; must not be set if logpath is set.
- int syslogFacility; // Facility used when appending messages to the syslog.
+ enum ClusterAuthModes {
+ ClusterAuthMode_undefined,
+ /**
+ * Authenticate using keyfile, accept only keyfiles
+ */
+ ClusterAuthMode_keyFile,
- bool isHttpInterfaceEnabled; // True if the dbwebserver should be enabled.
+ /**
+ * Authenticate using keyfile, accept both keyfiles and X.509
+ */
+ ClusterAuthMode_sendKeyFile,
-#ifndef _WIN32
- ProcessId parentProc; // --fork pid of initial process
- ProcessId leaderProc; // --fork pid of leader process
-#endif
+ /**
+ * Authenticate using X.509, accept both keyfiles and X.509
+ */
+ ClusterAuthMode_sendX509,
/**
- * Switches to enable experimental (unsupported) features.
- */
- struct ExperimentalFeatures {
- ExperimentalFeatures()
- : indexStatsCmdEnabled(false)
- , storageDetailsCmdEnabled(false)
- {}
- bool indexStatsCmdEnabled; // -- enableExperimentalIndexStatsCmd
- bool storageDetailsCmdEnabled; // -- enableExperimentalStorageDetailsCmd
- } experimental;
-
- time_t started;
-
- BSONArray argvArray;
- BSONObj parsedOpts;
- bool isAuthEnabled = false;
- AtomicInt32 clusterAuthMode; // --clusterAuthMode, the internal cluster auth mode
-
- enum ClusterAuthModes {
- ClusterAuthMode_undefined,
- /**
- * Authenticate using keyfile, accept only keyfiles
- */
- ClusterAuthMode_keyFile,
-
- /**
- * Authenticate using keyfile, accept both keyfiles and X.509
- */
- ClusterAuthMode_sendKeyFile,
-
- /**
- * Authenticate using X.509, accept both keyfiles and X.509
- */
- ClusterAuthMode_sendX509,
-
- /**
- * Authenticate using X.509, accept only X.509
- */
- ClusterAuthMode_x509
- };
+ * Authenticate using X.509, accept only X.509
+ */
+ ClusterAuthMode_x509
};
+};
- extern ServerGlobalParams serverGlobalParams;
+extern ServerGlobalParams serverGlobalParams;
}
diff --git a/src/mongo/db/server_options_helpers.cpp b/src/mongo/db/server_options_helpers.cpp
index 84e6092813d..9833cf84541 100644
--- a/src/mongo/db/server_options_helpers.cpp
+++ b/src/mongo/db/server_options_helpers.cpp
@@ -52,7 +52,7 @@
#include "mongo/util/log.h"
#include "mongo/util/map_util.h"
#include "mongo/util/mongoutils/str.h"
-#include "mongo/util/net/listen.h" // For DEFAULT_MAX_CONN
+#include "mongo/util/net/listen.h" // For DEFAULT_MAX_CONN
#include "mongo/util/net/ssl_options.h"
#include "mongo/util/options_parser/startup_options.h"
@@ -77,906 +77,928 @@ namespace {
#if defined(SYSLOG_NAMES)
#if !defined(INTERNAL_NOPRI)
- typedef struct _code {
- const char* c_name;
- int c_val;
- } CODE;
-
- CODE facilitynames[] =
- {
- { "auth", LOG_AUTH },
- { "cron", LOG_CRON },
- { "daemon", LOG_DAEMON },
- { "kern", LOG_KERN },
- { "lpr", LOG_LPR },
- { "mail", LOG_MAIL },
- { "news", LOG_NEWS },
- { "security", LOG_AUTH }, /* DEPRECATED */
- { "syslog", LOG_SYSLOG },
- { "user", LOG_USER },
- { "uucp", LOG_UUCP },
- { "local0", LOG_LOCAL0 },
- { "local1", LOG_LOCAL1 },
- { "local2", LOG_LOCAL2 },
- { "local3", LOG_LOCAL3 },
- { "local4", LOG_LOCAL4 },
- { "local5", LOG_LOCAL5 },
- { "local6", LOG_LOCAL6 },
- { "local7", LOG_LOCAL7 },
- { NULL, -1 }
- };
-
-#endif // !defined(INTERNAL_NOPRI)
-#endif // defined(SYSLOG_NAMES)
-
-
-} // namespace
-
- Status addGeneralServerOptions(moe::OptionSection* options) {
- StringBuilder portInfoBuilder;
- StringBuilder maxConnInfoBuilder;
- std::stringstream unixSockPermsBuilder;
-
- portInfoBuilder << "specify port number - " << ServerGlobalParams::DefaultDBPort << " by default";
- maxConnInfoBuilder << "max number of simultaneous connections - "
- << DEFAULT_MAX_CONN << " by default";
- unixSockPermsBuilder << "permissions to set on UNIX domain socket file - "
- << "0" << std::oct << DEFAULT_UNIX_PERMS << " by default";
-
- options->addOptionChaining("help", "help,h", moe::Switch, "show this usage information")
- .setSources(moe::SourceAllLegacy);
-
- options->addOptionChaining("version", "version", moe::Switch, "show version information")
- .setSources(moe::SourceAllLegacy);
-
- options->addOptionChaining("config", "config,f", moe::String,
- "configuration file specifying additional options")
- .setSources(moe::SourceAllLegacy);
-
- // The verbosity level can be set at startup in the following ways. Note that if multiple
- // methods for setting the verbosity are specified simultaneously, the verbosity will be set
- // based on the whichever option specifies the highest level
- //
- // Command Line Option | Resulting Verbosity
- // _________________________________________
- // (none) | 0
- // --verbose "" | 0
- // --verbose | 1
- // --verbose v | 1
- // --verbose vv | 2 (etc.)
- // -v | 1
- // -vv | 2 (etc.)
- //
- // INI Config Option | Resulting Verbosity
- // _________________________________________
- // verbose= | 0
- // verbose=v | 1
- // verbose=vv | 2 (etc.)
- // v=true | 1
- // vv=true | 2 (etc.)
- //
- // YAML Config Option | Resulting Verbosity
- // _________________________________________
- // systemLog: |
- // verbosity: 5 | 5
- // systemLog: |
- // component: |
- // verbosity: 5 | 5
- // systemLog: |
- // component: |
- // Sharding: |
- // verbosity: 5 | 5 (for Sharding only, 0 for default)
- options->addOptionChaining("verbose", "verbose,v", moe::String,
- "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
- .setImplicit(moe::Value(std::string("v")))
- .setSources(moe::SourceAllLegacy);
-
- options->addOptionChaining("systemLog.verbosity", "", moe::Int, "set verbose level")
- .setSources(moe::SourceYAMLConfig);
-
- // log component hierarchy verbosity levels
- for (int i = 0; i < int(logger::LogComponent::kNumLogComponents); ++i) {
- logger::LogComponent component = static_cast<logger::LogComponent::Value>(i);
- if (component == logger::LogComponent::kDefault) {
- continue;
- }
- options->addOptionChaining("systemLog.component." + component.getDottedName() +
+typedef struct _code {
+ const char* c_name;
+ int c_val;
+} CODE;
+
+CODE facilitynames[] = {{"auth", LOG_AUTH},
+ {"cron", LOG_CRON},
+ {"daemon", LOG_DAEMON},
+ {"kern", LOG_KERN},
+ {"lpr", LOG_LPR},
+ {"mail", LOG_MAIL},
+ {"news", LOG_NEWS},
+ {"security", LOG_AUTH}, /* DEPRECATED */
+ {"syslog", LOG_SYSLOG},
+ {"user", LOG_USER},
+ {"uucp", LOG_UUCP},
+ {"local0", LOG_LOCAL0},
+ {"local1", LOG_LOCAL1},
+ {"local2", LOG_LOCAL2},
+ {"local3", LOG_LOCAL3},
+ {"local4", LOG_LOCAL4},
+ {"local5", LOG_LOCAL5},
+ {"local6", LOG_LOCAL6},
+ {"local7", LOG_LOCAL7},
+ {NULL, -1}};
+
+#endif // !defined(INTERNAL_NOPRI)
+#endif // defined(SYSLOG_NAMES)
+
+
+} // namespace
+
+Status addGeneralServerOptions(moe::OptionSection* options) {
+ StringBuilder portInfoBuilder;
+ StringBuilder maxConnInfoBuilder;
+ std::stringstream unixSockPermsBuilder;
+
+ portInfoBuilder << "specify port number - " << ServerGlobalParams::DefaultDBPort
+ << " by default";
+ maxConnInfoBuilder << "max number of simultaneous connections - " << DEFAULT_MAX_CONN
+ << " by default";
+ unixSockPermsBuilder << "permissions to set on UNIX domain socket file - "
+ << "0" << std::oct << DEFAULT_UNIX_PERMS << " by default";
+
+ options->addOptionChaining("help", "help,h", moe::Switch, "show this usage information")
+ .setSources(moe::SourceAllLegacy);
+
+ options->addOptionChaining("version", "version", moe::Switch, "show version information")
+ .setSources(moe::SourceAllLegacy);
+
+ options->addOptionChaining("config",
+ "config,f",
+ moe::String,
+ "configuration file specifying additional options")
+ .setSources(moe::SourceAllLegacy);
+
+ // The verbosity level can be set at startup in the following ways. Note that if multiple
+ // methods for setting the verbosity are specified simultaneously, the verbosity will be set
+ // based on the whichever option specifies the highest level
+ //
+ // Command Line Option | Resulting Verbosity
+ // _________________________________________
+ // (none) | 0
+ // --verbose "" | 0
+ // --verbose | 1
+ // --verbose v | 1
+ // --verbose vv | 2 (etc.)
+ // -v | 1
+ // -vv | 2 (etc.)
+ //
+ // INI Config Option | Resulting Verbosity
+ // _________________________________________
+ // verbose= | 0
+ // verbose=v | 1
+ // verbose=vv | 2 (etc.)
+ // v=true | 1
+ // vv=true | 2 (etc.)
+ //
+ // YAML Config Option | Resulting Verbosity
+ // _________________________________________
+ // systemLog: |
+ // verbosity: 5 | 5
+ // systemLog: |
+ // component: |
+ // verbosity: 5 | 5
+ // systemLog: |
+ // component: |
+ // Sharding: |
+ // verbosity: 5 | 5 (for Sharding only, 0 for default)
+ options->addOptionChaining(
+ "verbose",
+ "verbose,v",
+ moe::String,
+ "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
+ .setImplicit(moe::Value(std::string("v")))
+ .setSources(moe::SourceAllLegacy);
+
+ options->addOptionChaining("systemLog.verbosity", "", moe::Int, "set verbose level")
+ .setSources(moe::SourceYAMLConfig);
+
+ // log component hierarchy verbosity levels
+ for (int i = 0; i < int(logger::LogComponent::kNumLogComponents); ++i) {
+ logger::LogComponent component = static_cast<logger::LogComponent::Value>(i);
+ if (component == logger::LogComponent::kDefault) {
+ continue;
+ }
+ options->addOptionChaining("systemLog.component." + component.getDottedName() +
".verbosity",
- "", moe::Int,
- "set component verbose level for " +
- component.getDottedName())
- .setSources(moe::SourceYAMLConfig);
- }
-
- options->addOptionChaining("systemLog.quiet", "quiet", moe::Switch, "quieter output");
-
- options->addOptionChaining("net.port", "port", moe::Int, portInfoBuilder.str().c_str());
-
- options->addOptionChaining("net.bindIp", "bind_ip", moe::String,
- "comma separated list of ip addresses to listen on - all local ips by default");
-
- options->addOptionChaining("net.ipv6", "ipv6", moe::Switch,
- "enable IPv6 support (disabled by default)");
-
- options->addOptionChaining("net.maxIncomingConnections", "maxConns", moe::Int,
- maxConnInfoBuilder.str().c_str());
-
- options->addOptionChaining("logpath", "logpath", moe::String,
- "log file to send write to instead of stdout - has to be a file, not directory")
- .setSources(moe::SourceAllLegacy)
- .incompatibleWith("syslog");
-
- options->addOptionChaining("systemLog.path", "", moe::String,
- "log file to send writes to if logging to a file - has to be a file, not directory")
- .setSources(moe::SourceYAMLConfig)
- .hidden();
+ "",
+ moe::Int,
+ "set component verbose level for " + component.getDottedName())
+ .setSources(moe::SourceYAMLConfig);
+ }
- options->addOptionChaining("systemLog.destination", "", moe::String,
- "Destination of system log output. (syslog/file)")
- .setSources(moe::SourceYAMLConfig)
- .hidden()
- .format("(:?syslog)|(:?file)", "(syslog/file)");
+ options->addOptionChaining("systemLog.quiet", "quiet", moe::Switch, "quieter output");
+
+ options->addOptionChaining("net.port", "port", moe::Int, portInfoBuilder.str().c_str());
+
+ options->addOptionChaining(
+ "net.bindIp",
+ "bind_ip",
+ moe::String,
+ "comma separated list of ip addresses to listen on - all local ips by default");
+
+ options->addOptionChaining(
+ "net.ipv6", "ipv6", moe::Switch, "enable IPv6 support (disabled by default)");
+
+ options->addOptionChaining(
+ "net.maxIncomingConnections", "maxConns", moe::Int, maxConnInfoBuilder.str().c_str());
+
+ options->addOptionChaining(
+ "logpath",
+ "logpath",
+ moe::String,
+ "log file to send write to instead of stdout - has to be a file, not directory")
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("syslog");
+
+ options
+ ->addOptionChaining(
+ "systemLog.path",
+ "",
+ moe::String,
+ "log file to send writes to if logging to a file - has to be a file, not directory")
+ .setSources(moe::SourceYAMLConfig)
+ .hidden();
+
+ options->addOptionChaining("systemLog.destination",
+ "",
+ moe::String,
+ "Destination of system log output. (syslog/file)")
+ .setSources(moe::SourceYAMLConfig)
+ .hidden()
+ .format("(:?syslog)|(:?file)", "(syslog/file)");
#ifndef _WIN32
- options->addOptionChaining("syslog", "syslog", moe::Switch,
- "log to system's syslog facility instead of file or stdout")
- .incompatibleWith("logpath")
- .setSources(moe::SourceAllLegacy);
-
- options->addOptionChaining("systemLog.syslogFacility", "syslogFacility", moe::String,
- "syslog facility used for mongodb syslog message");
-
-#endif // _WIN32
- options->addOptionChaining("systemLog.logAppend", "logappend", moe::Switch,
- "append to logpath instead of over-writing");
-
- options->addOptionChaining("systemLog.logRotate", "logRotate", moe::String,
- "set the log rotation behavior (rename|reopen)");
-
- options->addOptionChaining("systemLog.timeStampFormat", "timeStampFormat", moe::String,
- "Desired format for timestamps in log messages. One of ctime, "
- "iso8601-utc or iso8601-local");
-
- options->addOptionChaining("processManagement.pidFilePath", "pidfilepath", moe::String,
- "full path to pidfile (if not set, no pidfile is created)");
-
- options->addOptionChaining("security.keyFile", "keyFile", moe::String,
- "private key for cluster authentication")
- .incompatibleWith("noauth");
-
- options->addOptionChaining("setParameter", "setParameter", moe::StringMap,
- "Set a configurable parameter")
- .composing();
-
- options->addOptionChaining("httpinterface", "httpinterface", moe::Switch,
- "enable http interface")
- .setSources(moe::SourceAllLegacy)
- .incompatibleWith("nohttpinterface");
+ options->addOptionChaining("syslog",
+ "syslog",
+ moe::Switch,
+ "log to system's syslog facility instead of file or stdout")
+ .incompatibleWith("logpath")
+ .setSources(moe::SourceAllLegacy);
+
+ options->addOptionChaining("systemLog.syslogFacility",
+ "syslogFacility",
+ moe::String,
+ "syslog facility used for mongodb syslog message");
- options->addOptionChaining("net.http.enabled", "", moe::Bool, "enable http interface")
- .setSources(moe::SourceYAMLConfig);
-
- options->addOptionChaining("net.http.port", "", moe::Switch,
- "port to listen on for http interface")
- .setSources(moe::SourceYAMLConfig);
-
- options->addOptionChaining("security.clusterAuthMode", "clusterAuthMode", moe::String,
- "Authentication mode used for cluster authentication. Alternatives are "
- "(keyFile|sendKeyFile|sendX509|x509)")
- .format("(:?keyFile)|(:?sendKeyFile)|(:?sendX509)|(:?x509)",
- "(keyFile/sendKeyFile/sendX509/x509)");
+#endif // _WIN32
+ options->addOptionChaining("systemLog.logAppend",
+ "logappend",
+ moe::Switch,
+ "append to logpath instead of over-writing");
+
+ options->addOptionChaining("systemLog.logRotate",
+ "logRotate",
+ moe::String,
+ "set the log rotation behavior (rename|reopen)");
+
+ options->addOptionChaining("systemLog.timeStampFormat",
+ "timeStampFormat",
+ moe::String,
+ "Desired format for timestamps in log messages. One of ctime, "
+ "iso8601-utc or iso8601-local");
+
+ options->addOptionChaining("processManagement.pidFilePath",
+ "pidfilepath",
+ moe::String,
+ "full path to pidfile (if not set, no pidfile is created)");
+
+ options->addOptionChaining("security.keyFile",
+ "keyFile",
+ moe::String,
+ "private key for cluster authentication").incompatibleWith("noauth");
+
+ options->addOptionChaining(
+ "setParameter", "setParameter", moe::StringMap, "Set a configurable parameter")
+ .composing();
+
+ options->addOptionChaining(
+ "httpinterface", "httpinterface", moe::Switch, "enable http interface")
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("nohttpinterface");
+
+ options->addOptionChaining("net.http.enabled", "", moe::Bool, "enable http interface")
+ .setSources(moe::SourceYAMLConfig);
+
+ options->addOptionChaining(
+ "net.http.port", "", moe::Switch, "port to listen on for http interface")
+ .setSources(moe::SourceYAMLConfig);
+
+ options->addOptionChaining(
+ "security.clusterAuthMode",
+ "clusterAuthMode",
+ moe::String,
+ "Authentication mode used for cluster authentication. Alternatives are "
+ "(keyFile|sendKeyFile|sendX509|x509)")
+ .format("(:?keyFile)|(:?sendKeyFile)|(:?sendX509)|(:?x509)",
+ "(keyFile/sendKeyFile/sendX509/x509)");
#ifndef _WIN32
- options->addOptionChaining("nounixsocket", "nounixsocket", moe::Switch,
- "disable listening on unix sockets")
- .setSources(moe::SourceAllLegacy);
+ options->addOptionChaining(
+ "nounixsocket", "nounixsocket", moe::Switch, "disable listening on unix sockets")
+ .setSources(moe::SourceAllLegacy);
- options->addOptionChaining("net.unixDomainSocket.enabled", "", moe::Bool,
- "disable listening on unix sockets")
- .setSources(moe::SourceYAMLConfig);
+ options->addOptionChaining(
+ "net.unixDomainSocket.enabled", "", moe::Bool, "disable listening on unix sockets")
+ .setSources(moe::SourceYAMLConfig);
- options->addOptionChaining("net.unixDomainSocket.pathPrefix", "unixSocketPrefix",
- moe::String, "alternative directory for UNIX domain sockets (defaults to /tmp)");
+ options->addOptionChaining("net.unixDomainSocket.pathPrefix",
+ "unixSocketPrefix",
+ moe::String,
+ "alternative directory for UNIX domain sockets (defaults to /tmp)");
- options->addOptionChaining("net.unixDomainSocket.filePermissions", "filePermissions",
- moe::Int, unixSockPermsBuilder.str() );
+ options->addOptionChaining("net.unixDomainSocket.filePermissions",
+ "filePermissions",
+ moe::Int,
+ unixSockPermsBuilder.str());
- options->addOptionChaining("processManagement.fork", "fork", moe::Switch,
- "fork server process");
+ options->addOptionChaining(
+ "processManagement.fork", "fork", moe::Switch, "fork server process");
#endif
- /* support for -vv -vvvv etc. */
- for (string s = "vv"; s.length() <= 12; s.append("v")) {
- options->addOptionChaining(s.c_str(), s.c_str(), moe::Switch, "verbose")
- .hidden()
- .setSources(moe::SourceAllLegacy);
- }
-
- // Extra hidden options
- options->addOptionChaining("nohttpinterface", "nohttpinterface", moe::Switch,
- "disable http interface")
- .hidden()
- .setSources(moe::SourceAllLegacy)
- .incompatibleWith("httpinterface");
-
- options->addOptionChaining("objcheck", "objcheck", moe::Switch,
- "inspect client data for validity on receipt (DEFAULT)")
- .hidden()
- .setSources(moe::SourceAllLegacy)
- .incompatibleWith("noobjcheck");
-
- options->addOptionChaining("noobjcheck", "noobjcheck", moe::Switch,
- "do NOT inspect client data for validity on receipt")
- .hidden()
- .setSources(moe::SourceAllLegacy)
- .incompatibleWith("objcheck");
-
- options->addOptionChaining("net.wireObjectCheck", "", moe::Bool,
- "inspect client data for validity on receipt (DEFAULT)")
- .hidden()
- .setSources(moe::SourceYAMLConfig);
-
- options->addOptionChaining("systemLog.traceAllExceptions", "traceExceptions", moe::Switch,
- "log stack traces for every exception")
- .hidden();
-
- options->addOptionChaining("enableExperimentalIndexStatsCmd",
- "enableExperimentalIndexStatsCmd", moe::Switch, "EXPERIMENTAL (UNSUPPORTED). "
- "Enable command computing aggregate statistics on indexes.")
- .hidden()
- .setSources(moe::SourceAllLegacy);
-
- options->addOptionChaining("enableExperimentalStorageDetailsCmd",
- "enableExperimentalStorageDetailsCmd", moe::Switch, "EXPERIMENTAL (UNSUPPORTED). "
- "Enable command computing aggregate statistics on storage.")
- .hidden()
- .setSources(moe::SourceAllLegacy);
-
- return Status::OK();
+ /* support for -vv -vvvv etc. */
+ for (string s = "vv"; s.length() <= 12; s.append("v")) {
+ options->addOptionChaining(s.c_str(), s.c_str(), moe::Switch, "verbose")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
}
- Status addWindowsServerOptions(moe::OptionSection* options) {
- options->addOptionChaining("install", "install", moe::Switch, "install Windows service")
- .setSources(moe::SourceAllLegacy);
-
- options->addOptionChaining("remove", "remove", moe::Switch, "remove Windows service")
- .setSources(moe::SourceAllLegacy);
-
- options->addOptionChaining("reinstall", "reinstall", moe::Switch,
- "reinstall Windows service (equivalent to --remove followed by --install)")
- .setSources(moe::SourceAllLegacy);
-
- options->addOptionChaining("processManagement.windowsService.serviceName",
- "serviceName", moe::String,
- "Windows service name");
-
- options->addOptionChaining("processManagement.windowsService.displayName",
- "serviceDisplayName", moe::String,
- "Windows service display name");
-
- options->addOptionChaining("processManagement.windowsService.description",
- "serviceDescription", moe::String,
- "Windows service description");
-
- options->addOptionChaining("processManagement.windowsService.serviceUser",
- "serviceUser", moe::String,
- "account for service execution");
+ // Extra hidden options
+ options->addOptionChaining(
+ "nohttpinterface", "nohttpinterface", moe::Switch, "disable http interface")
+ .hidden()
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("httpinterface");
+
+ options->addOptionChaining("objcheck",
+ "objcheck",
+ moe::Switch,
+ "inspect client data for validity on receipt (DEFAULT)")
+ .hidden()
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("noobjcheck");
+
+ options->addOptionChaining("noobjcheck",
+ "noobjcheck",
+ moe::Switch,
+ "do NOT inspect client data for validity on receipt")
+ .hidden()
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("objcheck");
+
+ options->addOptionChaining("net.wireObjectCheck",
+ "",
+ moe::Bool,
+ "inspect client data for validity on receipt (DEFAULT)")
+ .hidden()
+ .setSources(moe::SourceYAMLConfig);
+
+ options->addOptionChaining("systemLog.traceAllExceptions",
+ "traceExceptions",
+ moe::Switch,
+ "log stack traces for every exception").hidden();
+
+ options->addOptionChaining("enableExperimentalIndexStatsCmd",
+ "enableExperimentalIndexStatsCmd",
+ moe::Switch,
+ "EXPERIMENTAL (UNSUPPORTED). "
+ "Enable command computing aggregate statistics on indexes.")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
+
+ options->addOptionChaining("enableExperimentalStorageDetailsCmd",
+ "enableExperimentalStorageDetailsCmd",
+ moe::Switch,
+ "EXPERIMENTAL (UNSUPPORTED). "
+ "Enable command computing aggregate statistics on storage.")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
+
+ return Status::OK();
+}
+
+Status addWindowsServerOptions(moe::OptionSection* options) {
+ options->addOptionChaining("install", "install", moe::Switch, "install Windows service")
+ .setSources(moe::SourceAllLegacy);
+
+ options->addOptionChaining("remove", "remove", moe::Switch, "remove Windows service")
+ .setSources(moe::SourceAllLegacy);
+
+ options->addOptionChaining(
+ "reinstall",
+ "reinstall",
+ moe::Switch,
+ "reinstall Windows service (equivalent to --remove followed by --install)")
+ .setSources(moe::SourceAllLegacy);
+
+ options->addOptionChaining("processManagement.windowsService.serviceName",
+ "serviceName",
+ moe::String,
+ "Windows service name");
+
+ options->addOptionChaining("processManagement.windowsService.displayName",
+ "serviceDisplayName",
+ moe::String,
+ "Windows service display name");
+
+ options->addOptionChaining("processManagement.windowsService.description",
+ "serviceDescription",
+ moe::String,
+ "Windows service description");
+
+ options->addOptionChaining("processManagement.windowsService.serviceUser",
+ "serviceUser",
+ moe::String,
+ "account for service execution");
+
+ options->addOptionChaining("processManagement.windowsService.servicePassword",
+ "servicePassword",
+ moe::String,
+ "password used to authenticate serviceUser");
+
+ options->addOptionChaining("service", "service", moe::Switch, "start mongodb service")
+ .hidden()
+ .setSources(moe::SourceAllLegacy);
+
+ return Status::OK();
+}
- options->addOptionChaining("processManagement.windowsService.servicePassword",
- "servicePassword", moe::String,
- "password used to authenticate serviceUser");
-
- options->addOptionChaining("service", "service", moe::Switch, "start mongodb service")
- .hidden()
- .setSources(moe::SourceAllLegacy);
-
- return Status::OK();
+namespace {
+// Helpers for option storage
+Status setupBinaryName(const std::vector<std::string>& argv) {
+ if (argv.empty()) {
+ return Status(ErrorCodes::InternalError, "Cannot get binary name: argv array is empty");
}
- namespace {
- // Helpers for option storage
- Status setupBinaryName(const std::vector<std::string>& argv) {
-
- if (argv.empty()) {
- return Status(ErrorCodes::InternalError, "Cannot get binary name: argv array is empty");
- }
+ // setup binary name
+ serverGlobalParams.binaryName = argv[0];
+ size_t i = serverGlobalParams.binaryName.rfind('/');
+ if (i != string::npos) {
+ serverGlobalParams.binaryName = serverGlobalParams.binaryName.substr(i + 1);
+ }
+ return Status::OK();
+}
- // setup binary name
- serverGlobalParams.binaryName = argv[0];
- size_t i = serverGlobalParams.binaryName.rfind('/');
- if (i != string::npos) {
- serverGlobalParams.binaryName = serverGlobalParams.binaryName.substr(i + 1);
+Status setupCwd() {
+ // setup cwd
+ char buffer[1024];
+#ifdef _WIN32
+ verify(_getcwd(buffer, 1000));
+#else
+ verify(getcwd(buffer, 1000));
+#endif
+ serverGlobalParams.cwd = buffer;
+ return Status::OK();
+}
+
+Status setArgvArray(const std::vector<std::string>& argv) {
+ BSONArrayBuilder b;
+ std::vector<std::string> censoredArgv = argv;
+ cmdline_utils::censorArgsVector(&censoredArgv);
+ for (size_t i = 0; i < censoredArgv.size(); i++) {
+ b << censoredArgv[i];
+ }
+ serverGlobalParams.argvArray = b.arr();
+ return Status::OK();
+}
+
+Status setParsedOpts(const moe::Environment& params) {
+ serverGlobalParams.parsedOpts = params.toBSON();
+ cmdline_utils::censorBSONObj(&serverGlobalParams.parsedOpts);
+ return Status::OK();
+}
+} // namespace
+
+void printCommandLineOpts() {
+ log() << "options: " << serverGlobalParams.parsedOpts << endl;
+}
+
+Status validateServerOptions(const moe::Environment& params) {
+ if (params.count("verbose")) {
+ std::string verbosity = params["verbose"].as<std::string>();
+
+ // Skip this for backwards compatibility. See SERVER-11471.
+ if (verbosity != "true") {
+ for (std::string::iterator iterator = verbosity.begin(); iterator != verbosity.end();
+ iterator++) {
+ if (*iterator != 'v') {
+ return Status(ErrorCodes::BadValue,
+ "The \"verbose\" option string cannot contain any characters "
+ "other than \"v\"");
+ }
}
- return Status::OK();
}
+ }
- Status setupCwd() {
- // setup cwd
- char buffer[1024];
#ifdef _WIN32
- verify(_getcwd(buffer, 1000));
-#else
- verify(getcwd(buffer, 1000));
-#endif
- serverGlobalParams.cwd = buffer;
- return Status::OK();
+ if (params.count("install") || params.count("reinstall")) {
+ if (params.count("logpath") &&
+ !boost::filesystem::path(params["logpath"].as<string>()).is_absolute()) {
+ return Status(ErrorCodes::BadValue,
+ "logpath requires an absolute file path with Windows services");
}
- Status setArgvArray(const std::vector<std::string>& argv) {
- BSONArrayBuilder b;
- std::vector<std::string> censoredArgv = argv;
- cmdline_utils::censorArgsVector(&censoredArgv);
- for (size_t i=0; i < censoredArgv.size(); i++) {
- b << censoredArgv[i];
- }
- serverGlobalParams.argvArray = b.arr();
- return Status::OK();
+ if (params.count("config") &&
+ !boost::filesystem::path(params["config"].as<string>()).is_absolute()) {
+ return Status(ErrorCodes::BadValue,
+ "config requires an absolute file path with Windows services");
}
- Status setParsedOpts(const moe::Environment& params) {
- serverGlobalParams.parsedOpts = params.toBSON();
- cmdline_utils::censorBSONObj(&serverGlobalParams.parsedOpts);
- return Status::OK();
+ if (params.count("processManagement.pidFilePath") &&
+ !boost::filesystem::path(params["processManagement.pidFilePath"].as<string>())
+ .is_absolute()) {
+ return Status(ErrorCodes::BadValue,
+ "pidFilePath requires an absolute file path with Windows services");
}
- } //namespace
- void printCommandLineOpts() {
- log() << "options: " << serverGlobalParams.parsedOpts << endl;
+ if (params.count("security.keyFile") &&
+ !boost::filesystem::path(params["security.keyFile"].as<string>()).is_absolute()) {
+ return Status(ErrorCodes::BadValue,
+ "keyFile requires an absolute file path with Windows services");
+ }
}
+#endif
- Status validateServerOptions(const moe::Environment& params) {
- if (params.count("verbose")) {
- std::string verbosity = params["verbose"].as<std::string>();
+#ifdef MONGO_CONFIG_SSL
+ Status ret = validateSSLServerOptions(params);
+ if (!ret.isOK()) {
+ return ret;
+ }
+#endif
- // Skip this for backwards compatibility. See SERVER-11471.
- if (verbosity != "true") {
- for (std::string::iterator iterator = verbosity.begin();
- iterator != verbosity.end(); iterator++) {
- if (*iterator != 'v') {
- return Status(ErrorCodes::BadValue,
- "The \"verbose\" option string cannot contain any characters "
- "other than \"v\"");
- }
- }
- }
+ bool haveAuthenticationMechanisms = true;
+ bool hasAuthorizationEnabled = false;
+ if (params.count("security.authenticationMechanisms") &&
+ params["security.authenticationMechanisms"].as<std::vector<std::string>>().empty()) {
+ haveAuthenticationMechanisms = false;
+ }
+ if (params.count("setParameter")) {
+ std::map<std::string, std::string> parameters =
+ params["setParameter"].as<std::map<std::string, std::string>>();
+ auto authMechParameter = parameters.find("authenticationMechanisms");
+ if (authMechParameter != parameters.end() && authMechParameter->second.empty()) {
+ haveAuthenticationMechanisms = false;
}
+ }
+ if ((params.count("security.authorization") &&
+ params["security.authorization"].as<std::string>() == "enabled") ||
+ params.count("security.clusterAuthMode") || params.count("security.keyFile") ||
+ params.count("auth")) {
+ hasAuthorizationEnabled = true;
+ }
+ if (hasAuthorizationEnabled && !haveAuthenticationMechanisms) {
+ return Status(ErrorCodes::BadValue,
+ "Authorization is enabled but no authentication mechanisms are present.");
+ }
-#ifdef _WIN32
- if (params.count("install") || params.count("reinstall")) {
- if (params.count("logpath") &&
- !boost::filesystem::path(params["logpath"].as<string>()).is_absolute()) {
- return Status(ErrorCodes::BadValue,
- "logpath requires an absolute file path with Windows services");
- }
-
- if (params.count("config") &&
- !boost::filesystem::path(params["config"].as<string>()).is_absolute()) {
- return Status(ErrorCodes::BadValue,
- "config requires an absolute file path with Windows services");
- }
-
- if (params.count("processManagement.pidFilePath") &&
- !boost::filesystem::path(
- params["processManagement.pidFilePath"].as<string>()).is_absolute()) {
- return Status(ErrorCodes::BadValue,
- "pidFilePath requires an absolute file path with Windows services");
- }
-
- if (params.count("security.keyFile") &&
- !boost::filesystem::path(params["security.keyFile"].as<string>()).is_absolute()) {
- return Status(ErrorCodes::BadValue,
- "keyFile requires an absolute file path with Windows services");
- }
+ return Status::OK();
+}
+Status canonicalizeServerOptions(moe::Environment* params) {
+ // "net.wireObjectCheck" comes from the config file, so override it if either "objcheck" or
+ // "noobjcheck" are set, since those come from the command line.
+ if (params->count("objcheck")) {
+ Status ret =
+ params->set("net.wireObjectCheck", moe::Value((*params)["objcheck"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
-#endif
-
-#ifdef MONGO_CONFIG_SSL
- Status ret = validateSSLServerOptions(params);
+ ret = params->remove("objcheck");
if (!ret.isOK()) {
return ret;
}
-#endif
+ }
- bool haveAuthenticationMechanisms = true;
- bool hasAuthorizationEnabled = false;
- if (params.count("security.authenticationMechanisms") &&
- params["security.authenticationMechanisms"].as<std::vector<std::string> >().empty()) {
- haveAuthenticationMechanisms = false;
+ if (params->count("noobjcheck")) {
+ Status ret =
+ params->set("net.wireObjectCheck", moe::Value(!(*params)["noobjcheck"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
- if (params.count("setParameter")) {
- std::map<std::string, std::string> parameters =
- params["setParameter"].as<std::map<std::string, std::string> >();
- auto authMechParameter = parameters.find("authenticationMechanisms");
- if (authMechParameter != parameters.end() && authMechParameter->second.empty()) {
- haveAuthenticationMechanisms = false;
- }
+ ret = params->remove("noobjcheck");
+ if (!ret.isOK()) {
+ return ret;
}
- if ((params.count("security.authorization") &&
- params["security.authorization"].as<std::string>() == "enabled") ||
- params.count("security.clusterAuthMode") ||
- params.count("security.keyFile") ||
- params.count("auth")) {
- hasAuthorizationEnabled = true;
+ }
+
+ // "net.http.enabled" comes from the config file, so override it if "nohttpinterface" or
+ // "httpinterface" are set since those come from the command line.
+ if (params->count("nohttpinterface")) {
+ Status ret =
+ params->set("net.http.enabled", moe::Value(!(*params)["nohttpinterface"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
- if (hasAuthorizationEnabled && !haveAuthenticationMechanisms) {
- return Status(ErrorCodes::BadValue,
- "Authorization is enabled but no authentication mechanisms are present.");
+ ret = params->remove("nohttpinterface");
+ if (!ret.isOK()) {
+ return ret;
}
-
- return Status::OK();
}
-
- Status canonicalizeServerOptions(moe::Environment* params) {
-
- // "net.wireObjectCheck" comes from the config file, so override it if either "objcheck" or
- // "noobjcheck" are set, since those come from the command line.
- if (params->count("objcheck")) {
- Status ret = params->set("net.wireObjectCheck",
- moe::Value((*params)["objcheck"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("objcheck");
- if (!ret.isOK()) {
- return ret;
- }
+ if (params->count("httpinterface")) {
+ Status ret =
+ params->set("net.http.enabled", moe::Value((*params)["httpinterface"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
-
- if (params->count("noobjcheck")) {
- Status ret = params->set("net.wireObjectCheck",
- moe::Value(!(*params)["noobjcheck"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("noobjcheck");
- if (!ret.isOK()) {
- return ret;
- }
+ ret = params->remove("httpinterface");
+ if (!ret.isOK()) {
+ return ret;
}
+ }
- // "net.http.enabled" comes from the config file, so override it if "nohttpinterface" or
- // "httpinterface" are set since those come from the command line.
- if (params->count("nohttpinterface")) {
- Status ret = params->set("net.http.enabled",
- moe::Value(!(*params)["nohttpinterface"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("nohttpinterface");
- if (!ret.isOK()) {
- return ret;
- }
+ // "net.unixDomainSocket.enabled" comes from the config file, so override it if
+ // "nounixsocket" is set since that comes from the command line.
+ if (params->count("nounixsocket")) {
+ Status ret = params->set("net.unixDomainSocket.enabled",
+ moe::Value(!(*params)["nounixsocket"].as<bool>()));
+ if (!ret.isOK()) {
+ return ret;
}
- if (params->count("httpinterface")) {
- Status ret = params->set("net.http.enabled",
- moe::Value((*params)["httpinterface"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("httpinterface");
- if (!ret.isOK()) {
- return ret;
- }
+ ret = params->remove("nounixsocket");
+ if (!ret.isOK()) {
+ return ret;
}
+ }
- // "net.unixDomainSocket.enabled" comes from the config file, so override it if
- // "nounixsocket" is set since that comes from the command line.
- if (params->count("nounixsocket")) {
- Status ret = params->set("net.unixDomainSocket.enabled",
- moe::Value(!(*params)["nounixsocket"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("nounixsocket");
- if (!ret.isOK()) {
- return ret;
- }
+ // Handle both the "--verbose" string argument and the "-vvvv" arguments at the same time so
+ // that we ensure that we set the log level to the maximum of the options provided
+ int logLevel = -1;
+ for (std::string s = ""; s.length() <= 14; s.append("v")) {
+ if (!s.empty() && params->count(s) && (*params)[s].as<bool>() == true) {
+ logLevel = s.length();
}
- // Handle both the "--verbose" string argument and the "-vvvv" arguments at the same time so
- // that we ensure that we set the log level to the maximum of the options provided
- int logLevel = -1;
- for (std::string s = ""; s.length() <= 14; s.append("v")) {
- if (!s.empty() && params->count(s) && (*params)[s].as<bool>() == true) {
+ if (params->count("verbose")) {
+ std::string verbosity;
+ params->get("verbose", &verbosity);
+ if (s == verbosity ||
+ // Treat a verbosity of "true" the same as a single "v". See SERVER-11471.
+ (s == "v" && verbosity == "true")) {
logLevel = s.length();
}
-
- if (params->count("verbose")) {
- std::string verbosity;
- params->get("verbose", &verbosity);
- if (s == verbosity ||
- // Treat a verbosity of "true" the same as a single "v". See SERVER-11471.
- (s == "v" && verbosity == "true")) {
- logLevel = s.length();
- }
- }
-
- // Remove all "v" options we have already handled
- Status ret = params->remove(s);
- if (!ret.isOK()) {
- return ret;
- }
}
- if (logLevel != -1) {
- Status ret = params->set("systemLog.verbosity", moe::Value(logLevel));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("verbose");
- if (!ret.isOK()) {
- return ret;
- }
+ // Remove all "v" options we have already handled
+ Status ret = params->remove(s);
+ if (!ret.isOK()) {
+ return ret;
}
+ }
- if (params->count("logpath")) {
- std::string logpath;
- Status ret = params->get("logpath", &logpath);
- if (!ret.isOK()) {
- return ret;
- }
- if (logpath.empty()) {
- return Status(ErrorCodes::BadValue, "logpath cannot be empty if supplied");
- }
- ret = params->set("systemLog.destination", moe::Value(std::string("file")));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->set("systemLog.path", moe::Value(logpath));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("logpath");
- if (!ret.isOK()) {
- return ret;
- }
+ if (logLevel != -1) {
+ Status ret = params->set("systemLog.verbosity", moe::Value(logLevel));
+ if (!ret.isOK()) {
+ return ret;
}
-
- // "systemLog.destination" comes from the config file, so override it if "syslog" is set
- // since that comes from the command line.
- if (params->count("syslog") && (*params)["syslog"].as<bool>() == true) {
- Status ret = params->set("systemLog.destination", moe::Value(std::string("syslog")));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("syslog");
- if (!ret.isOK()) {
- return ret;
- }
+ ret = params->remove("verbose");
+ if (!ret.isOK()) {
+ return ret;
}
-
- return Status::OK();
}
- Status storeServerOptions(const moe::Environment& params,
- const std::vector<std::string>& args) {
-
- Status ret = setupBinaryName(args);
+ if (params->count("logpath")) {
+ std::string logpath;
+ Status ret = params->get("logpath", &logpath);
if (!ret.isOK()) {
return ret;
}
-
- ret = setupCwd();
+ if (logpath.empty()) {
+ return Status(ErrorCodes::BadValue, "logpath cannot be empty if supplied");
+ }
+ ret = params->set("systemLog.destination", moe::Value(std::string("file")));
if (!ret.isOK()) {
return ret;
}
-
- ret = setArgvArray(args);
+ ret = params->set("systemLog.path", moe::Value(logpath));
if (!ret.isOK()) {
return ret;
}
-
- ret = setParsedOpts(params);
+ ret = params->remove("logpath");
if (!ret.isOK()) {
return ret;
}
+ }
- // Check options that are not yet supported
- if (params.count("net.http.port")) {
- return Status(ErrorCodes::BadValue,
- "The net.http.port option is not currently supported");
+ // "systemLog.destination" comes from the config file, so override it if "syslog" is set
+ // since that comes from the command line.
+ if (params->count("syslog") && (*params)["syslog"].as<bool>() == true) {
+ Status ret = params->set("systemLog.destination", moe::Value(std::string("syslog")));
+ if (!ret.isOK()) {
+ return ret;
}
-
- if (params.count("systemLog.verbosity")) {
- int verbosity = params["systemLog.verbosity"].as<int>();
- if (verbosity < 0) {
- // This can only happen in YAML config
- return Status(ErrorCodes::BadValue,
- "systemLog.verbosity YAML Config cannot be negative");
- }
- logger::globalLogDomain()->setMinimumLoggedSeverity(
- logger::LogSeverity::Debug(verbosity));
+ ret = params->remove("syslog");
+ if (!ret.isOK()) {
+ return ret;
}
+ }
- // log component hierarchy verbosity levels
- for (int i = 0; i < int(logger::LogComponent::kNumLogComponents); ++i) {
- logger::LogComponent component = static_cast<logger::LogComponent::Value>(i);
- if (component == logger::LogComponent::kDefault) {
- continue;
- }
- const string dottedName = "systemLog.component." + component.getDottedName() +
- ".verbosity";
- if (params.count(dottedName)) {
- int verbosity = params[dottedName].as<int>();
- // Clear existing log level if log level is negative.
- if (verbosity < 0) {
- logger::globalLogDomain()->clearMinimumLoggedSeverity(component);
- }
- else {
- logger::globalLogDomain()->setMinimumLoggedSeverity(
- component,
- logger::LogSeverity::Debug(verbosity));
- }
- }
+ return Status::OK();
+}
+
+Status storeServerOptions(const moe::Environment& params, const std::vector<std::string>& args) {
+ Status ret = setupBinaryName(args);
+ if (!ret.isOK()) {
+ return ret;
+ }
+
+ ret = setupCwd();
+ if (!ret.isOK()) {
+ return ret;
+ }
+
+ ret = setArgvArray(args);
+ if (!ret.isOK()) {
+ return ret;
+ }
+
+ ret = setParsedOpts(params);
+ if (!ret.isOK()) {
+ return ret;
+ }
+
+ // Check options that are not yet supported
+ if (params.count("net.http.port")) {
+ return Status(ErrorCodes::BadValue, "The net.http.port option is not currently supported");
+ }
+
+ if (params.count("systemLog.verbosity")) {
+ int verbosity = params["systemLog.verbosity"].as<int>();
+ if (verbosity < 0) {
+ // This can only happen in YAML config
+ return Status(ErrorCodes::BadValue,
+ "systemLog.verbosity YAML Config cannot be negative");
}
+ logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(verbosity));
+ }
- if (params.count("enableExperimentalIndexStatsCmd")) {
- serverGlobalParams.experimental.indexStatsCmdEnabled =
- params["enableExperimentalIndexStatsCmd"].as<bool>();
+ // log component hierarchy verbosity levels
+ for (int i = 0; i < int(logger::LogComponent::kNumLogComponents); ++i) {
+ logger::LogComponent component = static_cast<logger::LogComponent::Value>(i);
+ if (component == logger::LogComponent::kDefault) {
+ continue;
}
- if (params.count("enableExperimentalStorageDetailsCmd")) {
- serverGlobalParams.experimental.storageDetailsCmdEnabled =
- params["enableExperimentalStorageDetailsCmd"].as<bool>();
+ const string dottedName = "systemLog.component." + component.getDottedName() + ".verbosity";
+ if (params.count(dottedName)) {
+ int verbosity = params[dottedName].as<int>();
+ // Clear existing log level if log level is negative.
+ if (verbosity < 0) {
+ logger::globalLogDomain()->clearMinimumLoggedSeverity(component);
+ } else {
+ logger::globalLogDomain()->setMinimumLoggedSeverity(
+ component, logger::LogSeverity::Debug(verbosity));
+ }
}
+ }
- if (params.count("net.port")) {
- serverGlobalParams.port = params["net.port"].as<int>();
- }
+ if (params.count("enableExperimentalIndexStatsCmd")) {
+ serverGlobalParams.experimental.indexStatsCmdEnabled =
+ params["enableExperimentalIndexStatsCmd"].as<bool>();
+ }
+ if (params.count("enableExperimentalStorageDetailsCmd")) {
+ serverGlobalParams.experimental.storageDetailsCmdEnabled =
+ params["enableExperimentalStorageDetailsCmd"].as<bool>();
+ }
- if (params.count("net.bindIp")) {
- serverGlobalParams.bind_ip = params["net.bindIp"].as<std::string>();
- }
+ if (params.count("net.port")) {
+ serverGlobalParams.port = params["net.port"].as<int>();
+ }
- if (params.count("net.ipv6") && params["net.ipv6"].as<bool>() == true) {
- enableIPv6();
- }
+ if (params.count("net.bindIp")) {
+ serverGlobalParams.bind_ip = params["net.bindIp"].as<std::string>();
+ }
- if (params.count("net.http.enabled")) {
- serverGlobalParams.isHttpInterfaceEnabled = params["net.http.enabled"].as<bool>();
- }
+ if (params.count("net.ipv6") && params["net.ipv6"].as<bool>() == true) {
+ enableIPv6();
+ }
- if (params.count("security.clusterAuthMode")) {
- std::string clusterAuthMode = params["security.clusterAuthMode"].as<std::string>();
+ if (params.count("net.http.enabled")) {
+ serverGlobalParams.isHttpInterfaceEnabled = params["net.http.enabled"].as<bool>();
+ }
- if (clusterAuthMode == "keyFile") {
- serverGlobalParams.clusterAuthMode.store
- (ServerGlobalParams::ClusterAuthMode_keyFile);
- }
- else if (clusterAuthMode == "sendKeyFile") {
- serverGlobalParams.clusterAuthMode.store
- (ServerGlobalParams::ClusterAuthMode_sendKeyFile);
- }
- else if (clusterAuthMode == "sendX509") {
- serverGlobalParams.clusterAuthMode.store
- (ServerGlobalParams::ClusterAuthMode_sendX509);
- }
- else if (clusterAuthMode == "x509") {
- serverGlobalParams.clusterAuthMode.store(ServerGlobalParams::ClusterAuthMode_x509);
- }
- else {
- return Status(ErrorCodes::BadValue,
- "unsupported value for clusterAuthMode " + clusterAuthMode );
- }
- }
- else {
- serverGlobalParams.clusterAuthMode.store(ServerGlobalParams::ClusterAuthMode_undefined);
+ if (params.count("security.clusterAuthMode")) {
+ std::string clusterAuthMode = params["security.clusterAuthMode"].as<std::string>();
+
+ if (clusterAuthMode == "keyFile") {
+ serverGlobalParams.clusterAuthMode.store(ServerGlobalParams::ClusterAuthMode_keyFile);
+ } else if (clusterAuthMode == "sendKeyFile") {
+ serverGlobalParams.clusterAuthMode.store(
+ ServerGlobalParams::ClusterAuthMode_sendKeyFile);
+ } else if (clusterAuthMode == "sendX509") {
+ serverGlobalParams.clusterAuthMode.store(ServerGlobalParams::ClusterAuthMode_sendX509);
+ } else if (clusterAuthMode == "x509") {
+ serverGlobalParams.clusterAuthMode.store(ServerGlobalParams::ClusterAuthMode_x509);
+ } else {
+ return Status(ErrorCodes::BadValue,
+ "unsupported value for clusterAuthMode " + clusterAuthMode);
}
+ } else {
+ serverGlobalParams.clusterAuthMode.store(ServerGlobalParams::ClusterAuthMode_undefined);
+ }
- if (params.count("systemLog.quiet")) {
- serverGlobalParams.quiet = params["systemLog.quiet"].as<bool>();
- }
+ if (params.count("systemLog.quiet")) {
+ serverGlobalParams.quiet = params["systemLog.quiet"].as<bool>();
+ }
- if (params.count("systemLog.traceAllExceptions")) {
- DBException::traceExceptions = params["systemLog.traceAllExceptions"].as<bool>();
- }
+ if (params.count("systemLog.traceAllExceptions")) {
+ DBException::traceExceptions = params["systemLog.traceAllExceptions"].as<bool>();
+ }
- if (params.count("net.maxIncomingConnections")) {
- serverGlobalParams.maxConns = params["net.maxIncomingConnections"].as<int>();
+ if (params.count("net.maxIncomingConnections")) {
+ serverGlobalParams.maxConns = params["net.maxIncomingConnections"].as<int>();
- if (serverGlobalParams.maxConns < 5) {
- return Status(ErrorCodes::BadValue, "maxConns has to be at least 5");
- }
+ if (serverGlobalParams.maxConns < 5) {
+ return Status(ErrorCodes::BadValue, "maxConns has to be at least 5");
}
+ }
- if (params.count("net.wireObjectCheck")) {
- serverGlobalParams.objcheck = params["net.wireObjectCheck"].as<bool>();
- }
+ if (params.count("net.wireObjectCheck")) {
+ serverGlobalParams.objcheck = params["net.wireObjectCheck"].as<bool>();
+ }
- if (params.count("net.bindIp")) {
- // passing in wildcard is the same as default behavior; remove and warn
- if (serverGlobalParams.bind_ip == "0.0.0.0") {
- std::cout << "warning: bind_ip of 0.0.0.0 is unnecessary; "
- << "listens on all ips by default" << endl;
- serverGlobalParams.bind_ip = "";
- }
+ if (params.count("net.bindIp")) {
+ // passing in wildcard is the same as default behavior; remove and warn
+ if (serverGlobalParams.bind_ip == "0.0.0.0") {
+ std::cout << "warning: bind_ip of 0.0.0.0 is unnecessary; "
+ << "listens on all ips by default" << endl;
+ serverGlobalParams.bind_ip = "";
}
+ }
#ifndef _WIN32
- if (params.count("net.unixDomainSocket.pathPrefix")) {
- serverGlobalParams.socket = params["net.unixDomainSocket.pathPrefix"].as<string>();
- }
+ if (params.count("net.unixDomainSocket.pathPrefix")) {
+ serverGlobalParams.socket = params["net.unixDomainSocket.pathPrefix"].as<string>();
+ }
- if (params.count("net.unixDomainSocket.enabled")) {
- serverGlobalParams.noUnixSocket = !params["net.unixDomainSocket.enabled"].as<bool>();
- }
- if (params.count("net.unixDomainSocket.filePermissions")) {
- serverGlobalParams.unixSocketPermissions =
- params["net.unixDomainSocket.filePermissions"].as<int>();
- }
+ if (params.count("net.unixDomainSocket.enabled")) {
+ serverGlobalParams.noUnixSocket = !params["net.unixDomainSocket.enabled"].as<bool>();
+ }
+ if (params.count("net.unixDomainSocket.filePermissions")) {
+ serverGlobalParams.unixSocketPermissions =
+ params["net.unixDomainSocket.filePermissions"].as<int>();
+ }
- if ((params.count("processManagement.fork") &&
- params["processManagement.fork"].as<bool>() == true) &&
- (!params.count("shutdown") || params["shutdown"].as<bool>() == false)) {
- serverGlobalParams.doFork = true;
- }
+ if ((params.count("processManagement.fork") &&
+ params["processManagement.fork"].as<bool>() == true) &&
+ (!params.count("shutdown") || params["shutdown"].as<bool>() == false)) {
+ serverGlobalParams.doFork = true;
+ }
#endif // _WIN32
- if (params.count("systemLog.timeStampFormat")) {
- using logger::MessageEventDetailsEncoder;
- std::string formatterName = params["systemLog.timeStampFormat"].as<string>();
- if (formatterName == "ctime") {
- MessageEventDetailsEncoder::setDateFormatter(outputDateAsCtime);
- }
- else if (formatterName == "iso8601-utc") {
- MessageEventDetailsEncoder::setDateFormatter(outputDateAsISOStringUTC);
- }
- else if (formatterName == "iso8601-local") {
- MessageEventDetailsEncoder::setDateFormatter(outputDateAsISOStringLocal);
- }
- else {
- StringBuilder sb;
- sb << "Value of logTimestampFormat must be one of ctime, iso8601-utc " <<
- "or iso8601-local; not \"" << formatterName << "\".";
- return Status(ErrorCodes::BadValue, sb.str());
- }
- }
- if (params.count("systemLog.destination")) {
- std::string systemLogDestination = params["systemLog.destination"].as<std::string>();
- if (systemLogDestination == "file") {
- if (params.count("systemLog.path")) {
- serverGlobalParams.logpath = params["systemLog.path"].as<std::string>();
- }
- else {
- return Status(ErrorCodes::BadValue,
- "systemLog.path is required if systemLog.destination is to a "
- "file");
- }
- }
- else if (systemLogDestination == "syslog") {
- if (params.count("systemLog.path")) {
- return Status(ErrorCodes::BadValue,
- "Can only use systemLog.path if systemLog.destination is to a "
- "file");
- }
- serverGlobalParams.logWithSyslog = true;
- }
- else {
- StringBuilder sb;
- sb << "Bad value for systemLog.destination: " << systemLogDestination
- << ". Supported targets are: (syslog|file)";
- return Status(ErrorCodes::BadValue, sb.str());
- }
+ if (params.count("systemLog.timeStampFormat")) {
+ using logger::MessageEventDetailsEncoder;
+ std::string formatterName = params["systemLog.timeStampFormat"].as<string>();
+ if (formatterName == "ctime") {
+ MessageEventDetailsEncoder::setDateFormatter(outputDateAsCtime);
+ } else if (formatterName == "iso8601-utc") {
+ MessageEventDetailsEncoder::setDateFormatter(outputDateAsISOStringUTC);
+ } else if (formatterName == "iso8601-local") {
+ MessageEventDetailsEncoder::setDateFormatter(outputDateAsISOStringLocal);
+ } else {
+ StringBuilder sb;
+ sb << "Value of logTimestampFormat must be one of ctime, iso8601-utc "
+ << "or iso8601-local; not \"" << formatterName << "\".";
+ return Status(ErrorCodes::BadValue, sb.str());
}
- else {
+ }
+ if (params.count("systemLog.destination")) {
+ std::string systemLogDestination = params["systemLog.destination"].as<std::string>();
+ if (systemLogDestination == "file") {
if (params.count("systemLog.path")) {
+ serverGlobalParams.logpath = params["systemLog.path"].as<std::string>();
+ } else {
return Status(ErrorCodes::BadValue,
- "Can only use systemLog.path if systemLog.destination is to a file");
+ "systemLog.path is required if systemLog.destination is to a "
+ "file");
}
-
+ } else if (systemLogDestination == "syslog") {
+ if (params.count("systemLog.path")) {
+ return Status(ErrorCodes::BadValue,
+ "Can only use systemLog.path if systemLog.destination is to a "
+ "file");
+ }
+ serverGlobalParams.logWithSyslog = true;
+ } else {
+ StringBuilder sb;
+ sb << "Bad value for systemLog.destination: " << systemLogDestination
+ << ". Supported targets are: (syslog|file)";
+ return Status(ErrorCodes::BadValue, sb.str());
+ }
+ } else {
+ if (params.count("systemLog.path")) {
+ return Status(ErrorCodes::BadValue,
+ "Can only use systemLog.path if systemLog.destination is to a file");
}
+ }
#ifndef _WIN32
- if (params.count("systemLog.syslogFacility")) {
- std::string facility = params["systemLog.syslogFacility"].as<string>();
- bool set = false;
- // match facility string to facility value
- size_t facilitynamesLength = sizeof(facilitynames)/sizeof(facilitynames[0]);
- for (unsigned long i = 0; i < facilitynamesLength &&
- facilitynames[i].c_name != NULL; i++) {
- if (!facility.compare(facilitynames[i].c_name)) {
- serverGlobalParams.syslogFacility = facilitynames[i].c_val;
- set = true;
- }
- }
- if (!set) {
- StringBuilder sb;
- sb << "ERROR: syslogFacility must be set to a string representing one of the "
- << "possible syslog facilities";
- return Status(ErrorCodes::BadValue, sb.str());
- }
- }
- else {
- serverGlobalParams.syslogFacility = LOG_USER;
- }
-#endif // _WIN32
+ if (params.count("systemLog.syslogFacility")) {
+ std::string facility = params["systemLog.syslogFacility"].as<string>();
+ bool set = false;
+ // match facility string to facility value
+ size_t facilitynamesLength = sizeof(facilitynames) / sizeof(facilitynames[0]);
+ for (unsigned long i = 0; i < facilitynamesLength && facilitynames[i].c_name != NULL; i++) {
+ if (!facility.compare(facilitynames[i].c_name)) {
+ serverGlobalParams.syslogFacility = facilitynames[i].c_val;
+ set = true;
+ }
+ }
+ if (!set) {
+ StringBuilder sb;
+ sb << "ERROR: syslogFacility must be set to a string representing one of the "
+ << "possible syslog facilities";
+ return Status(ErrorCodes::BadValue, sb.str());
+ }
+ } else {
+ serverGlobalParams.syslogFacility = LOG_USER;
+ }
+#endif // _WIN32
- if (params.count("systemLog.logAppend") &&
- params["systemLog.logAppend"].as<bool>() == true) {
- serverGlobalParams.logAppend = true;
- }
+ if (params.count("systemLog.logAppend") && params["systemLog.logAppend"].as<bool>() == true) {
+ serverGlobalParams.logAppend = true;
+ }
- if (params.count("systemLog.logRotate")) {
- std::string logRotateParam = params["systemLog.logRotate"].as<string>();
- if (logRotateParam == "reopen") {
- serverGlobalParams.logRenameOnRotate = false;
+ if (params.count("systemLog.logRotate")) {
+ std::string logRotateParam = params["systemLog.logRotate"].as<string>();
+ if (logRotateParam == "reopen") {
+ serverGlobalParams.logRenameOnRotate = false;
- if(serverGlobalParams.logAppend == false) {
- return Status(ErrorCodes::BadValue,
- "logAppend must equal true if logRotate is set to reopen" );
- }
- }
- else if (logRotateParam == "rename") {
- serverGlobalParams.logRenameOnRotate = true;
- }
- else {
+ if (serverGlobalParams.logAppend == false) {
return Status(ErrorCodes::BadValue,
- "unsupported value for logRotate " + logRotateParam );
+ "logAppend must equal true if logRotate is set to reopen");
}
+ } else if (logRotateParam == "rename") {
+ serverGlobalParams.logRenameOnRotate = true;
+ } else {
+ return Status(ErrorCodes::BadValue,
+ "unsupported value for logRotate " + logRotateParam);
}
+ }
- if (!serverGlobalParams.logpath.empty() && serverGlobalParams.logWithSyslog) {
- return Status(ErrorCodes::BadValue, "Cant use both a logpath and syslog ");
- }
+ if (!serverGlobalParams.logpath.empty() && serverGlobalParams.logWithSyslog) {
+ return Status(ErrorCodes::BadValue, "Cant use both a logpath and syslog ");
+ }
- if (serverGlobalParams.doFork && serverGlobalParams.logpath.empty() &&
- !serverGlobalParams.logWithSyslog) {
- return Status(ErrorCodes::BadValue, "--fork has to be used with --logpath or --syslog");
- }
+ if (serverGlobalParams.doFork && serverGlobalParams.logpath.empty() &&
+ !serverGlobalParams.logWithSyslog) {
+ return Status(ErrorCodes::BadValue, "--fork has to be used with --logpath or --syslog");
+ }
- if (params.count("security.keyFile")) {
- serverGlobalParams.keyFile =
- boost::filesystem::absolute(
- params["security.keyFile"].as<string>()).generic_string();
- }
+ if (params.count("security.keyFile")) {
+ serverGlobalParams.keyFile =
+ boost::filesystem::absolute(params["security.keyFile"].as<string>()).generic_string();
+ }
- if ( params.count("processManagement.pidFilePath")) {
- serverGlobalParams.pidFile = params["processManagement.pidFilePath"].as<string>();
- }
+ if (params.count("processManagement.pidFilePath")) {
+ serverGlobalParams.pidFile = params["processManagement.pidFilePath"].as<string>();
+ }
- if (params.count("setParameter")) {
- std::map<std::string, std::string> parameters =
- params["setParameter"].as<std::map<std::string, std::string> >();
- for (std::map<std::string, std::string>::iterator parametersIt = parameters.begin();
- parametersIt != parameters.end(); parametersIt++) {
- ServerParameter* parameter = mapFindWithDefault(
- ServerParameterSet::getGlobal()->getMap(),
- parametersIt->first,
- static_cast<ServerParameter*>(NULL));
- if (NULL == parameter) {
- StringBuilder sb;
- sb << "Illegal --setParameter parameter: \"" << parametersIt->first << "\"";
- return Status(ErrorCodes::BadValue, sb.str());
- }
- if (!parameter->allowedToChangeAtStartup()) {
- StringBuilder sb;
- sb << "Cannot use --setParameter to set \"" << parametersIt->first
- << "\" at startup";
- return Status(ErrorCodes::BadValue, sb.str());
- }
- Status status = parameter->setFromString(parametersIt->second);
- if (!status.isOK()) {
- StringBuilder sb;
- sb << "Bad value for parameter \"" << parametersIt->first << "\": "
- << status.reason();
- return Status(ErrorCodes::BadValue, sb.str());
- }
+ if (params.count("setParameter")) {
+ std::map<std::string, std::string> parameters =
+ params["setParameter"].as<std::map<std::string, std::string>>();
+ for (std::map<std::string, std::string>::iterator parametersIt = parameters.begin();
+ parametersIt != parameters.end();
+ parametersIt++) {
+ ServerParameter* parameter =
+ mapFindWithDefault(ServerParameterSet::getGlobal()->getMap(),
+ parametersIt->first,
+ static_cast<ServerParameter*>(NULL));
+ if (NULL == parameter) {
+ StringBuilder sb;
+ sb << "Illegal --setParameter parameter: \"" << parametersIt->first << "\"";
+ return Status(ErrorCodes::BadValue, sb.str());
+ }
+ if (!parameter->allowedToChangeAtStartup()) {
+ StringBuilder sb;
+ sb << "Cannot use --setParameter to set \"" << parametersIt->first
+ << "\" at startup";
+ return Status(ErrorCodes::BadValue, sb.str());
+ }
+ Status status = parameter->setFromString(parametersIt->second);
+ if (!status.isOK()) {
+ StringBuilder sb;
+ sb << "Bad value for parameter \"" << parametersIt->first
+ << "\": " << status.reason();
+ return Status(ErrorCodes::BadValue, sb.str());
}
}
- if (!params.count("security.clusterAuthMode") && params.count("security.keyFile")){
- serverGlobalParams.clusterAuthMode.store
- (ServerGlobalParams::ClusterAuthMode_keyFile);
- }
+ }
+ if (!params.count("security.clusterAuthMode") && params.count("security.keyFile")) {
+ serverGlobalParams.clusterAuthMode.store(ServerGlobalParams::ClusterAuthMode_keyFile);
+ }
#ifdef MONGO_CONFIG_SSL
- ret = storeSSLServerOptions(params);
- if (!ret.isOK()) {
- return ret;
- }
+ ret = storeSSLServerOptions(params);
+ if (!ret.isOK()) {
+ return ret;
+ }
#endif
- return Status::OK();
- }
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/server_options_helpers.h b/src/mongo/db/server_options_helpers.h
index ff12f4de3e2..f97ab00e795 100644
--- a/src/mongo/db/server_options_helpers.h
+++ b/src/mongo/db/server_options_helpers.h
@@ -34,37 +34,36 @@
namespace mongo {
- namespace optionenvironment {
- class OptionSection;
- class Environment;
- } // namespace optionenvironment
+namespace optionenvironment {
+class OptionSection;
+class Environment;
+} // namespace optionenvironment
- namespace moe = mongo::optionenvironment;
+namespace moe = mongo::optionenvironment;
- Status addGeneralServerOptions(moe::OptionSection* options);
+Status addGeneralServerOptions(moe::OptionSection* options);
- Status addWindowsServerOptions(moe::OptionSection* options);
+Status addWindowsServerOptions(moe::OptionSection* options);
- Status addSSLServerOptions(moe::OptionSection* options);
+Status addSSLServerOptions(moe::OptionSection* options);
- /**
- * Handle custom validation of server options that can not currently be done by using
- * Constraints in the Environment. See the "validate" function in the Environment class for
- * more details.
- */
- Status validateServerOptions(const moe::Environment& params);
+/**
+ * Handle custom validation of server options that can not currently be done by using
+ * Constraints in the Environment. See the "validate" function in the Environment class for
+ * more details.
+ */
+Status validateServerOptions(const moe::Environment& params);
- /**
- * Canonicalize server options for the given environment.
- *
- * For example, the options "objcheck", "noobjcheck", and "net.wireObjectCheck" should all be
- * merged into "net.wireObjectCheck".
- */
- Status canonicalizeServerOptions(moe::Environment* params);
+/**
+ * Canonicalize server options for the given environment.
+ *
+ * For example, the options "objcheck", "noobjcheck", and "net.wireObjectCheck" should all be
+ * merged into "net.wireObjectCheck".
+ */
+Status canonicalizeServerOptions(moe::Environment* params);
- Status storeServerOptions(const moe::Environment& params,
- const std::vector<std::string>& args);
+Status storeServerOptions(const moe::Environment& params, const std::vector<std::string>& args);
- void printCommandLineOpts();
+void printCommandLineOpts();
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/server_options_test.cpp b/src/mongo/db/server_options_test.cpp
index 496f11ddfb2..fb2d3e3fa52 100644
--- a/src/mongo/db/server_options_test.cpp
+++ b/src/mongo/db/server_options_test.cpp
@@ -36,384 +36,384 @@
namespace {
- using mongo::ErrorCodes;
- using mongo::Status;
-
- namespace moe = mongo::optionenvironment;
-
- class OptionsParserTester : public moe::OptionsParser {
- public:
- Status readConfigFile(const std::string& filename, std::string* config) {
- if (filename != _filename) {
- ::mongo::StringBuilder sb;
- sb << "Parser using filename: " << filename <<
- " which does not match expected filename: " << _filename;
- return Status(ErrorCodes::InternalError, sb.str());
- }
- *config = _config;
- return Status::OK();
+using mongo::ErrorCodes;
+using mongo::Status;
+
+namespace moe = mongo::optionenvironment;
+
+class OptionsParserTester : public moe::OptionsParser {
+public:
+ Status readConfigFile(const std::string& filename, std::string* config) {
+ if (filename != _filename) {
+ ::mongo::StringBuilder sb;
+ sb << "Parser using filename: " << filename
+ << " which does not match expected filename: " << _filename;
+ return Status(ErrorCodes::InternalError, sb.str());
}
- void setConfig(const std::string& filename, const std::string& config) {
- _filename = filename;
- _config = config;
- }
- private:
- std::string _filename;
- std::string _config;
- };
-
- TEST(Verbosity, Default) {
- OptionsParserTester parser;
- moe::Environment environment;
- moe::OptionSection options;
-
- // Reset the log level before we test
- ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
- ::mongo::logger::LogSeverity::Info());
-
- ASSERT_OK(::mongo::addGeneralServerOptions(&options));
-
- std::vector<std::string> argv;
- argv.push_back("binaryname");
- std::map<std::string, std::string> env_map;
-
- ASSERT_OK(parser.run(options, argv, env_map, &environment));
-
- ASSERT_OK(::mongo::validateServerOptions(environment));
- ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
- ASSERT_OK(::mongo::storeServerOptions(environment, argv));
-
- // Make sure the log level didn't change since we didn't specify any verbose options
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
- ::mongo::logger::LogSeverity::Info());
+ *config = _config;
+ return Status::OK();
}
-
- TEST(Verbosity, CommandLineImplicit) {
- OptionsParserTester parser;
- moe::Environment environment;
- moe::OptionSection options;
-
- // Reset the log level before we test
- ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
- ::mongo::logger::LogSeverity::Info());
-
- ASSERT_OK(::mongo::addGeneralServerOptions(&options));
-
- std::vector<std::string> argv;
- argv.push_back("binaryname");
- argv.push_back("--verbose");
- std::map<std::string, std::string> env_map;
-
- ASSERT_OK(parser.run(options, argv, env_map, &environment));
-
- ASSERT_OK(::mongo::validateServerOptions(environment));
- ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
- ASSERT_OK(::mongo::storeServerOptions(environment, argv));
-
- int verbosity = 1;
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
- ::mongo::logger::LogSeverity::Debug(verbosity));
+ void setConfig(const std::string& filename, const std::string& config) {
+ _filename = filename;
+ _config = config;
}
- TEST(Verbosity, CommandLineString) {
- OptionsParserTester parser;
- moe::Environment environment;
- moe::OptionSection options;
+private:
+ std::string _filename;
+ std::string _config;
+};
- // Reset the log level before we test
- ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
- ::mongo::logger::LogSeverity::Info());
+TEST(Verbosity, Default) {
+ OptionsParserTester parser;
+ moe::Environment environment;
+ moe::OptionSection options;
- ASSERT_OK(::mongo::addGeneralServerOptions(&options));
+ // Reset the log level before we test
+ ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
+ ::mongo::logger::LogSeverity::Info());
- std::vector<std::string> argv;
- argv.push_back("binaryname");
- argv.push_back("--verbose");
- argv.push_back("vvvv");
- std::map<std::string, std::string> env_map;
+ ASSERT_OK(::mongo::addGeneralServerOptions(&options));
- ASSERT_OK(parser.run(options, argv, env_map, &environment));
+ std::vector<std::string> argv;
+ argv.push_back("binaryname");
+ std::map<std::string, std::string> env_map;
- ASSERT_OK(::mongo::validateServerOptions(environment));
- ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
- ASSERT_OK(::mongo::storeServerOptions(environment, argv));
+ ASSERT_OK(parser.run(options, argv, env_map, &environment));
- int verbosity = 4;
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
- ::mongo::logger::LogSeverity::Debug(verbosity));
- }
+ ASSERT_OK(::mongo::validateServerOptions(environment));
+ ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
+ ASSERT_OK(::mongo::storeServerOptions(environment, argv));
- TEST(Verbosity, CommandLineEmptyString) {
- OptionsParserTester parser;
- moe::Environment environment;
- moe::OptionSection options;
+ // Make sure the log level didn't change since we didn't specify any verbose options
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
+ ::mongo::logger::LogSeverity::Info());
+}
- // Reset the log level before we test
- ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
- ::mongo::logger::LogSeverity::Info());
+TEST(Verbosity, CommandLineImplicit) {
+ OptionsParserTester parser;
+ moe::Environment environment;
+ moe::OptionSection options;
- ASSERT_OK(::mongo::addGeneralServerOptions(&options));
+ // Reset the log level before we test
+ ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
+ ::mongo::logger::LogSeverity::Info());
- std::vector<std::string> argv;
- argv.push_back("binaryname");
- argv.push_back("--verbose");
- argv.push_back("");
- std::map<std::string, std::string> env_map;
+ ASSERT_OK(::mongo::addGeneralServerOptions(&options));
- ASSERT_OK(parser.run(options, argv, env_map, &environment));
+ std::vector<std::string> argv;
+ argv.push_back("binaryname");
+ argv.push_back("--verbose");
+ std::map<std::string, std::string> env_map;
- ASSERT_OK(::mongo::validateServerOptions(environment));
- ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
- ASSERT_OK(::mongo::storeServerOptions(environment, argv));
+ ASSERT_OK(parser.run(options, argv, env_map, &environment));
- int verbosity = 0;
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
- ::mongo::logger::LogSeverity::Debug(verbosity));
- }
+ ASSERT_OK(::mongo::validateServerOptions(environment));
+ ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
+ ASSERT_OK(::mongo::storeServerOptions(environment, argv));
- TEST(Verbosity, CommandLineBadString) {
- OptionsParserTester parser;
- moe::Environment environment;
- moe::OptionSection options;
+ int verbosity = 1;
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
+ ::mongo::logger::LogSeverity::Debug(verbosity));
+}
- // Reset the log level before we test
- ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
- ::mongo::logger::LogSeverity::Info());
+TEST(Verbosity, CommandLineString) {
+ OptionsParserTester parser;
+ moe::Environment environment;
+ moe::OptionSection options;
- ASSERT_OK(::mongo::addGeneralServerOptions(&options));
+ // Reset the log level before we test
+ ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
+ ::mongo::logger::LogSeverity::Info());
- std::vector<std::string> argv;
- argv.push_back("binaryname");
- argv.push_back("--verbose");
- argv.push_back("beloud");
- std::map<std::string, std::string> env_map;
+ ASSERT_OK(::mongo::addGeneralServerOptions(&options));
- ASSERT_OK(parser.run(options, argv, env_map, &environment));
+ std::vector<std::string> argv;
+ argv.push_back("binaryname");
+ argv.push_back("--verbose");
+ argv.push_back("vvvv");
+ std::map<std::string, std::string> env_map;
- ASSERT_NOT_OK(::mongo::validateServerOptions(environment));
- }
+ ASSERT_OK(parser.run(options, argv, env_map, &environment));
- TEST(Verbosity, INIConfigString) {
- OptionsParserTester parser;
- moe::Environment environment;
- moe::OptionSection options;
+ ASSERT_OK(::mongo::validateServerOptions(environment));
+ ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
+ ASSERT_OK(::mongo::storeServerOptions(environment, argv));
- // Reset the log level before we test
- ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
- ::mongo::logger::LogSeverity::Info());
+ int verbosity = 4;
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
+ ::mongo::logger::LogSeverity::Debug(verbosity));
+}
- ASSERT_OK(::mongo::addGeneralServerOptions(&options));
+TEST(Verbosity, CommandLineEmptyString) {
+ OptionsParserTester parser;
+ moe::Environment environment;
+ moe::OptionSection options;
- std::vector<std::string> argv;
- argv.push_back("binaryname");
- argv.push_back("--config");
- argv.push_back("config.ini");
- std::map<std::string, std::string> env_map;
+ // Reset the log level before we test
+ ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
+ ::mongo::logger::LogSeverity::Info());
- parser.setConfig("config.ini", "verbose=vvvv");
+ ASSERT_OK(::mongo::addGeneralServerOptions(&options));
- ASSERT_OK(parser.run(options, argv, env_map, &environment));
+ std::vector<std::string> argv;
+ argv.push_back("binaryname");
+ argv.push_back("--verbose");
+ argv.push_back("");
+ std::map<std::string, std::string> env_map;
- ASSERT_OK(::mongo::validateServerOptions(environment));
- ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
- ASSERT_OK(::mongo::storeServerOptions(environment, argv));
+ ASSERT_OK(parser.run(options, argv, env_map, &environment));
- int verbosity = 4;
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
- ::mongo::logger::LogSeverity::Debug(verbosity));
- }
+ ASSERT_OK(::mongo::validateServerOptions(environment));
+ ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
+ ASSERT_OK(::mongo::storeServerOptions(environment, argv));
- TEST(Verbosity, INIConfigBadString) {
- OptionsParserTester parser;
- moe::Environment environment;
- moe::OptionSection options;
+ int verbosity = 0;
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
+ ::mongo::logger::LogSeverity::Debug(verbosity));
+}
- // Reset the log level before we test
- ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
- ::mongo::logger::LogSeverity::Info());
+TEST(Verbosity, CommandLineBadString) {
+ OptionsParserTester parser;
+ moe::Environment environment;
+ moe::OptionSection options;
- ASSERT_OK(::mongo::addGeneralServerOptions(&options));
+ // Reset the log level before we test
+ ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
+ ::mongo::logger::LogSeverity::Info());
- std::vector<std::string> argv;
- argv.push_back("binaryname");
- argv.push_back("--config");
- argv.push_back("config.ini");
- std::map<std::string, std::string> env_map;
+ ASSERT_OK(::mongo::addGeneralServerOptions(&options));
- parser.setConfig("config.ini", "verbose=beloud");
+ std::vector<std::string> argv;
+ argv.push_back("binaryname");
+ argv.push_back("--verbose");
+ argv.push_back("beloud");
+ std::map<std::string, std::string> env_map;
- ASSERT_OK(parser.run(options, argv, env_map, &environment));
+ ASSERT_OK(parser.run(options, argv, env_map, &environment));
- ASSERT_NOT_OK(::mongo::validateServerOptions(environment));
- }
+ ASSERT_NOT_OK(::mongo::validateServerOptions(environment));
+}
- TEST(Verbosity, INIConfigEmptyString) {
- OptionsParserTester parser;
- moe::Environment environment;
- moe::OptionSection options;
+TEST(Verbosity, INIConfigString) {
+ OptionsParserTester parser;
+ moe::Environment environment;
+ moe::OptionSection options;
- // Reset the log level before we test
- ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
- ::mongo::logger::LogSeverity::Info());
+ // Reset the log level before we test
+ ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
+ ::mongo::logger::LogSeverity::Info());
- ASSERT_OK(::mongo::addGeneralServerOptions(&options));
+ ASSERT_OK(::mongo::addGeneralServerOptions(&options));
- std::vector<std::string> argv;
- argv.push_back("binaryname");
- argv.push_back("--config");
- argv.push_back("config.ini");
- std::map<std::string, std::string> env_map;
+ std::vector<std::string> argv;
+ argv.push_back("binaryname");
+ argv.push_back("--config");
+ argv.push_back("config.ini");
+ std::map<std::string, std::string> env_map;
- parser.setConfig("config.ini", "verbose=");
+ parser.setConfig("config.ini", "verbose=vvvv");
- ASSERT_OK(parser.run(options, argv, env_map, &environment));
+ ASSERT_OK(parser.run(options, argv, env_map, &environment));
- ASSERT_OK(::mongo::validateServerOptions(environment));
- ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
- ASSERT_OK(::mongo::storeServerOptions(environment, argv));
+ ASSERT_OK(::mongo::validateServerOptions(environment));
+ ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
+ ASSERT_OK(::mongo::storeServerOptions(environment, argv));
- int verbosity = 0;
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
- ::mongo::logger::LogSeverity::Debug(verbosity));
- }
+ int verbosity = 4;
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
+ ::mongo::logger::LogSeverity::Debug(verbosity));
+}
- TEST(Verbosity, JSONConfigString) {
- OptionsParserTester parser;
- moe::Environment environment;
- moe::OptionSection options;
+TEST(Verbosity, INIConfigBadString) {
+ OptionsParserTester parser;
+ moe::Environment environment;
+ moe::OptionSection options;
- // Reset the log level before we test
- ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
- ::mongo::logger::LogSeverity::Info());
+ // Reset the log level before we test
+ ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
+ ::mongo::logger::LogSeverity::Info());
- ASSERT_OK(::mongo::addGeneralServerOptions(&options));
+ ASSERT_OK(::mongo::addGeneralServerOptions(&options));
- std::vector<std::string> argv;
- argv.push_back("binaryname");
- argv.push_back("--config");
- argv.push_back("config.json");
- std::map<std::string, std::string> env_map;
+ std::vector<std::string> argv;
+ argv.push_back("binaryname");
+ argv.push_back("--config");
+ argv.push_back("config.ini");
+ std::map<std::string, std::string> env_map;
- parser.setConfig("config.json", "{ \"systemLog.verbosity\" : 4 }");
+ parser.setConfig("config.ini", "verbose=beloud");
- ASSERT_OK(parser.run(options, argv, env_map, &environment));
+ ASSERT_OK(parser.run(options, argv, env_map, &environment));
- ASSERT_OK(::mongo::validateServerOptions(environment));
- ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
- ASSERT_OK(::mongo::storeServerOptions(environment, argv));
+ ASSERT_NOT_OK(::mongo::validateServerOptions(environment));
+}
- int verbosity = 4;
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
- ::mongo::logger::LogSeverity::Debug(verbosity));
- }
+TEST(Verbosity, INIConfigEmptyString) {
+ OptionsParserTester parser;
+ moe::Environment environment;
+ moe::OptionSection options;
- TEST(Verbosity, MultipleSourcesMultipleOptions) {
- OptionsParserTester parser;
- moe::Environment environment;
- moe::OptionSection options;
+ // Reset the log level before we test
+ ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
+ ::mongo::logger::LogSeverity::Info());
- // Reset the log level before we test
- ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
- ::mongo::logger::LogSeverity::Info());
+ ASSERT_OK(::mongo::addGeneralServerOptions(&options));
- ASSERT_OK(::mongo::addGeneralServerOptions(&options));
+ std::vector<std::string> argv;
+ argv.push_back("binaryname");
+ argv.push_back("--config");
+ argv.push_back("config.ini");
+ std::map<std::string, std::string> env_map;
- std::vector<std::string> argv;
- argv.push_back("binaryname");
- argv.push_back("--config");
- argv.push_back("config.json");
- argv.push_back("--verbose");
- argv.push_back("vvv");
- std::map<std::string, std::string> env_map;
+ parser.setConfig("config.ini", "verbose=");
- parser.setConfig("config.json", "{ \"systemLog.verbosity\" : 4 }");
+ ASSERT_OK(parser.run(options, argv, env_map, &environment));
- ASSERT_OK(parser.run(options, argv, env_map, &environment));
+ ASSERT_OK(::mongo::validateServerOptions(environment));
+ ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
+ ASSERT_OK(::mongo::storeServerOptions(environment, argv));
- ASSERT_OK(::mongo::validateServerOptions(environment));
- ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
- ASSERT_OK(::mongo::storeServerOptions(environment, argv));
+ int verbosity = 0;
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
+ ::mongo::logger::LogSeverity::Debug(verbosity));
+}
- int verbosity = 3;
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
- ::mongo::logger::LogSeverity::Debug(verbosity));
- }
+TEST(Verbosity, JSONConfigString) {
+ OptionsParserTester parser;
+ moe::Environment environment;
+ moe::OptionSection options;
- TEST(Verbosity, YAMLConfigStringLogComponent) {
- OptionsParserTester parser;
- moe::Environment environment;
- moe::OptionSection options;
-
- // Reset the log level before we test
- ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
- ::mongo::logger::LogSeverity::Info());
- // Log level for Storage will be cleared by config file value.
- ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
- ::mongo::logger::LogComponent::kStorage,
- ::mongo::logger::LogSeverity::Debug(1));
-
- ASSERT_OK(::mongo::addGeneralServerOptions(&options));
-
- std::vector<std::string> argv;
- argv.push_back("binaryname");
- argv.push_back("--config");
- argv.push_back("config.yaml");
- std::map<std::string, std::string> env_map;
-
- parser.setConfig("config.yaml",
- "systemLog:\n"
- " verbosity: 4\n"
- " component:\n"
- " accessControl:\n"
- " verbosity: 0\n"
- " storage:\n"
- " verbosity: -1\n"
- " journal:\n"
- " verbosity: 2\n");
-
- ASSERT_OK(parser.run(options, argv, env_map, &environment));
-
- ASSERT_OK(::mongo::validateServerOptions(environment));
- ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
- ASSERT_OK(::mongo::storeServerOptions(environment, argv));
-
- // Verify component log levels using global log domain.
- int verbosity = 4;
-
- // Default
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
- ::mongo::logger::LogSeverity::Debug(verbosity));
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(
- ::mongo::logger::LogComponent::kDefault),
- ::mongo::logger::LogSeverity::Debug(verbosity));
-
- // AccessControl
- ASSERT_TRUE(::mongo::logger::globalLogDomain()->hasMinimumLogSeverity(
- ::mongo::logger::LogComponent::kAccessControl));
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(
- ::mongo::logger::LogComponent::kAccessControl),
- ::mongo::logger::LogSeverity::Log());
-
- // Query - not mentioned in configuration. should match default.
- ASSERT_FALSE(::mongo::logger::globalLogDomain()->hasMinimumLogSeverity(
- ::mongo::logger::LogComponent::kStorage));
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(
- ::mongo::logger::LogComponent::kStorage),
- ::mongo::logger::LogSeverity::Debug(verbosity));
-
- // Storage - cleared by -1 value in configuration. should match default.
- ASSERT_FALSE(::mongo::logger::globalLogDomain()->hasMinimumLogSeverity(
- ::mongo::logger::LogComponent::kStorage));
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(
- ::mongo::logger::LogComponent::kStorage),
- ::mongo::logger::LogSeverity::Debug(verbosity));
-
- // Journaling - explicitly set to 2 in configuration.
- ASSERT_TRUE(::mongo::logger::globalLogDomain()->hasMinimumLogSeverity(
- ::mongo::logger::LogComponent::kJournal));
- ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(
- ::mongo::logger::LogComponent::kJournal),
- ::mongo::logger::LogSeverity::Debug(2));
- }
+ // Reset the log level before we test
+ ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
+ ::mongo::logger::LogSeverity::Info());
+
+ ASSERT_OK(::mongo::addGeneralServerOptions(&options));
-} // unnamed namespace
+ std::vector<std::string> argv;
+ argv.push_back("binaryname");
+ argv.push_back("--config");
+ argv.push_back("config.json");
+ std::map<std::string, std::string> env_map;
+
+ parser.setConfig("config.json", "{ \"systemLog.verbosity\" : 4 }");
+
+ ASSERT_OK(parser.run(options, argv, env_map, &environment));
+
+ ASSERT_OK(::mongo::validateServerOptions(environment));
+ ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
+ ASSERT_OK(::mongo::storeServerOptions(environment, argv));
+
+ int verbosity = 4;
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
+ ::mongo::logger::LogSeverity::Debug(verbosity));
+}
+
+TEST(Verbosity, MultipleSourcesMultipleOptions) {
+ OptionsParserTester parser;
+ moe::Environment environment;
+ moe::OptionSection options;
+
+ // Reset the log level before we test
+ ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
+ ::mongo::logger::LogSeverity::Info());
+
+ ASSERT_OK(::mongo::addGeneralServerOptions(&options));
+
+ std::vector<std::string> argv;
+ argv.push_back("binaryname");
+ argv.push_back("--config");
+ argv.push_back("config.json");
+ argv.push_back("--verbose");
+ argv.push_back("vvv");
+ std::map<std::string, std::string> env_map;
+
+ parser.setConfig("config.json", "{ \"systemLog.verbosity\" : 4 }");
+
+ ASSERT_OK(parser.run(options, argv, env_map, &environment));
+
+ ASSERT_OK(::mongo::validateServerOptions(environment));
+ ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
+ ASSERT_OK(::mongo::storeServerOptions(environment, argv));
+
+ int verbosity = 3;
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
+ ::mongo::logger::LogSeverity::Debug(verbosity));
+}
+
+TEST(Verbosity, YAMLConfigStringLogComponent) {
+ OptionsParserTester parser;
+ moe::Environment environment;
+ moe::OptionSection options;
+
+ // Reset the log level before we test
+ ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
+ ::mongo::logger::LogSeverity::Info());
+ // Log level for Storage will be cleared by config file value.
+ ::mongo::logger::globalLogDomain()->setMinimumLoggedSeverity(
+ ::mongo::logger::LogComponent::kStorage, ::mongo::logger::LogSeverity::Debug(1));
+
+ ASSERT_OK(::mongo::addGeneralServerOptions(&options));
+
+ std::vector<std::string> argv;
+ argv.push_back("binaryname");
+ argv.push_back("--config");
+ argv.push_back("config.yaml");
+ std::map<std::string, std::string> env_map;
+
+ parser.setConfig("config.yaml",
+ "systemLog:\n"
+ " verbosity: 4\n"
+ " component:\n"
+ " accessControl:\n"
+ " verbosity: 0\n"
+ " storage:\n"
+ " verbosity: -1\n"
+ " journal:\n"
+ " verbosity: 2\n");
+
+ ASSERT_OK(parser.run(options, argv, env_map, &environment));
+
+ ASSERT_OK(::mongo::validateServerOptions(environment));
+ ASSERT_OK(::mongo::canonicalizeServerOptions(&environment));
+ ASSERT_OK(::mongo::storeServerOptions(environment, argv));
+
+ // Verify component log levels using global log domain.
+ int verbosity = 4;
+
+ // Default
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(),
+ ::mongo::logger::LogSeverity::Debug(verbosity));
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(
+ ::mongo::logger::LogComponent::kDefault),
+ ::mongo::logger::LogSeverity::Debug(verbosity));
+
+ // AccessControl
+ ASSERT_TRUE(::mongo::logger::globalLogDomain()->hasMinimumLogSeverity(
+ ::mongo::logger::LogComponent::kAccessControl));
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(
+ ::mongo::logger::LogComponent::kAccessControl),
+ ::mongo::logger::LogSeverity::Log());
+
+ // Query - not mentioned in configuration. should match default.
+ ASSERT_FALSE(::mongo::logger::globalLogDomain()->hasMinimumLogSeverity(
+ ::mongo::logger::LogComponent::kStorage));
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(
+ ::mongo::logger::LogComponent::kStorage),
+ ::mongo::logger::LogSeverity::Debug(verbosity));
+
+ // Storage - cleared by -1 value in configuration. should match default.
+ ASSERT_FALSE(::mongo::logger::globalLogDomain()->hasMinimumLogSeverity(
+ ::mongo::logger::LogComponent::kStorage));
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(
+ ::mongo::logger::LogComponent::kStorage),
+ ::mongo::logger::LogSeverity::Debug(verbosity));
+
+ // Journaling - explicitly set to 2 in configuration.
+ ASSERT_TRUE(::mongo::logger::globalLogDomain()->hasMinimumLogSeverity(
+ ::mongo::logger::LogComponent::kJournal));
+ ASSERT_EQUALS(::mongo::logger::globalLogDomain()->getMinimumLogSeverity(
+ ::mongo::logger::LogComponent::kJournal),
+ ::mongo::logger::LogSeverity::Debug(2));
+}
+
+} // unnamed namespace
diff --git a/src/mongo/db/server_parameters.cpp b/src/mongo/db/server_parameters.cpp
index 0aeef5b4a42..7c8ade05063 100644
--- a/src/mongo/db/server_parameters.cpp
+++ b/src/mongo/db/server_parameters.cpp
@@ -36,84 +36,80 @@
namespace mongo {
- using std::string;
- using std::vector;
-
- namespace {
- ServerParameterSet* GLOBAL = NULL;
- }
-
- ServerParameter::ServerParameter( ServerParameterSet* sps, const std::string& name,
- bool allowedToChangeAtStartup, bool allowedToChangeAtRuntime )
- : _name( name ),
- _allowedToChangeAtStartup( allowedToChangeAtStartup ),
- _allowedToChangeAtRuntime( allowedToChangeAtRuntime ) {
-
- if ( sps ) {
- sps->add( this );
- }
- }
-
- ServerParameter::ServerParameter( ServerParameterSet* sps, const std::string& name )
- : _name( name ),
- _allowedToChangeAtStartup( true ),
- _allowedToChangeAtRuntime( true ) {
-
- if ( sps ) {
- sps->add( this );
- }
- }
-
- ServerParameter::~ServerParameter() {
+using std::string;
+using std::vector;
+
+namespace {
+ServerParameterSet* GLOBAL = NULL;
+}
+
+ServerParameter::ServerParameter(ServerParameterSet* sps,
+ const std::string& name,
+ bool allowedToChangeAtStartup,
+ bool allowedToChangeAtRuntime)
+ : _name(name),
+ _allowedToChangeAtStartup(allowedToChangeAtStartup),
+ _allowedToChangeAtRuntime(allowedToChangeAtRuntime) {
+ if (sps) {
+ sps->add(this);
}
+}
- ServerParameterSet* ServerParameterSet::getGlobal() {
- if ( !GLOBAL ) {
- GLOBAL = new ServerParameterSet();
- }
- return GLOBAL;
+ServerParameter::ServerParameter(ServerParameterSet* sps, const std::string& name)
+ : _name(name), _allowedToChangeAtStartup(true), _allowedToChangeAtRuntime(true) {
+ if (sps) {
+ sps->add(this);
}
+}
- void ServerParameterSet::add( ServerParameter* sp ) {
- ServerParameter*& x = _map[sp->name()];
- if ( x ) abort();
- x = sp;
- }
-
- template <typename T>
- Status ExportedServerParameter<T>::setFromString( const string& str ) {
- T value;
- Status status = parseNumberFromString( str, &value );
- if ( !status.isOK() )
- return status;
- return set( value );
- }
-
- template Status ExportedServerParameter<int>::setFromString( const string& str );
- template Status ExportedServerParameter<long long>::setFromString( const string& str );
- template Status ExportedServerParameter<double>::setFromString( const string& str );
-
- template<>
- Status ExportedServerParameter<string>::setFromString( const string& str ) {
- return set( str );
- }
-
- template<>
- Status ExportedServerParameter<bool>::setFromString( const string& str ) {
- if ( str == "true" ||
- str == "1" )
- return set(true);
- if ( str == "false" ||
- str == "0" )
- return set(false);
- return Status( ErrorCodes::BadValue, "can't convert string to bool" );
- }
+ServerParameter::~ServerParameter() {}
- template<>
- Status ExportedServerParameter< vector<string> >::setFromString( const string& str ) {
- vector<string> v;
- splitStringDelim( str, &v, ',' );
- return set( v );
+ServerParameterSet* ServerParameterSet::getGlobal() {
+ if (!GLOBAL) {
+ GLOBAL = new ServerParameterSet();
}
+ return GLOBAL;
+}
+
+void ServerParameterSet::add(ServerParameter* sp) {
+ ServerParameter*& x = _map[sp->name()];
+ if (x)
+ abort();
+ x = sp;
+}
+
+template <typename T>
+Status ExportedServerParameter<T>::setFromString(const string& str) {
+ T value;
+ Status status = parseNumberFromString(str, &value);
+ if (!status.isOK())
+ return status;
+ return set(value);
+}
+
+template Status ExportedServerParameter<int>::setFromString(const string& str);
+template Status ExportedServerParameter<long long>::setFromString(const string& str);
+template Status ExportedServerParameter<double>::setFromString(const string& str);
+
+template <>
+Status ExportedServerParameter<string>::setFromString(const string& str) {
+ return set(str);
+}
+
+template <>
+Status ExportedServerParameter<bool>::setFromString(const string& str) {
+ if (str == "true" || str == "1")
+ return set(true);
+ if (str == "false" || str == "0")
+ return set(false);
+ return Status(ErrorCodes::BadValue, "can't convert string to bool");
+}
+
+template <>
+Status ExportedServerParameter<vector<string>>::setFromString(const string& str) {
+ vector<string> v;
+ splitStringDelim(str, &v, ',');
+ return set(v);
+}
} // namespace mongo
diff --git a/src/mongo/db/server_parameters.h b/src/mongo/db/server_parameters.h
index 9c281e4499c..fed9a6b21e0 100644
--- a/src/mongo/db/server_parameters.h
+++ b/src/mongo/db/server_parameters.h
@@ -38,125 +38,140 @@
namespace mongo {
- class ServerParameterSet;
- class OperationContext;
+class ServerParameterSet;
+class OperationContext;
- /**
- * Lets you make server level settings easily configurable.
- * Hooks into (set|get)Paramter, as well as command line processing
- */
- class ServerParameter {
- public:
- typedef std::map< std::string, ServerParameter* > Map;
+/**
+ * Lets you make server level settings easily configurable.
+ * Hooks into (set|get)Paramter, as well as command line processing
+ */
+class ServerParameter {
+public:
+ typedef std::map<std::string, ServerParameter*> Map;
- ServerParameter( ServerParameterSet* sps, const std::string& name,
- bool allowedToChangeAtStartup, bool allowedToChangeAtRuntime );
- ServerParameter( ServerParameterSet* sps, const std::string& name );
- virtual ~ServerParameter();
+ ServerParameter(ServerParameterSet* sps,
+ const std::string& name,
+ bool allowedToChangeAtStartup,
+ bool allowedToChangeAtRuntime);
+ ServerParameter(ServerParameterSet* sps, const std::string& name);
+ virtual ~ServerParameter();
- std::string name() const { return _name; }
+ std::string name() const {
+ return _name;
+ }
- /**
- * @return if you can set on command line or config file
- */
- bool allowedToChangeAtStartup() const { return _allowedToChangeAtStartup; }
+ /**
+ * @return if you can set on command line or config file
+ */
+ bool allowedToChangeAtStartup() const {
+ return _allowedToChangeAtStartup;
+ }
- /**
- * @param if you can use (get|set)Parameter
- */
- bool allowedToChangeAtRuntime() const { return _allowedToChangeAtRuntime; }
+ /**
+ * @param if you can use (get|set)Parameter
+ */
+ bool allowedToChangeAtRuntime() const {
+ return _allowedToChangeAtRuntime;
+ }
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name ) = 0;
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) = 0;
- virtual Status set( const BSONElement& newValueElement ) = 0;
+ virtual Status set(const BSONElement& newValueElement) = 0;
- virtual Status setFromString( const std::string& str ) = 0;
+ virtual Status setFromString(const std::string& str) = 0;
- private:
- std::string _name;
- bool _allowedToChangeAtStartup;
- bool _allowedToChangeAtRuntime;
- };
+private:
+ std::string _name;
+ bool _allowedToChangeAtStartup;
+ bool _allowedToChangeAtRuntime;
+};
- class ServerParameterSet {
- public:
- typedef std::map< std::string, ServerParameter* > Map;
+class ServerParameterSet {
+public:
+ typedef std::map<std::string, ServerParameter*> Map;
- void add( ServerParameter* sp );
+ void add(ServerParameter* sp);
- const Map& getMap() const { return _map; }
+ const Map& getMap() const {
+ return _map;
+ }
- static ServerParameterSet* getGlobal();
+ static ServerParameterSet* getGlobal();
- private:
- Map _map;
- };
+private:
+ Map _map;
+};
+/**
+ * Implementation of ServerParameter for reading and writing a server parameter with a given
+ * name and type into a specific C++ variable.
+ */
+template <typename T>
+class ExportedServerParameter : public ServerParameter {
+public:
/**
- * Implementation of ServerParameter for reading and writing a server parameter with a given
- * name and type into a specific C++ variable.
+ * Construct an ExportedServerParameter in parameter set "sps", named "name", whose storage
+ * is at "value".
+ *
+ * If allowedToChangeAtStartup is true, the parameter may be set at the command line,
+ * e.g. via the --setParameter switch. If allowedToChangeAtRuntime is true, the parameter
+ * may be set at runtime, e.g. via the setParameter command.
*/
- template<typename T>
- class ExportedServerParameter : public ServerParameter {
- public:
-
- /**
- * Construct an ExportedServerParameter in parameter set "sps", named "name", whose storage
- * is at "value".
- *
- * If allowedToChangeAtStartup is true, the parameter may be set at the command line,
- * e.g. via the --setParameter switch. If allowedToChangeAtRuntime is true, the parameter
- * may be set at runtime, e.g. via the setParameter command.
- */
- ExportedServerParameter( ServerParameterSet* sps, const std::string& name, T* value,
- bool allowedToChangeAtStartup, bool allowedToChangeAtRuntime)
- : ServerParameter( sps, name, allowedToChangeAtStartup, allowedToChangeAtRuntime ),
- _value( value ) {}
- virtual ~ExportedServerParameter() {}
-
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
- b.append( name, *_value );
- }
-
- virtual Status set( const BSONElement& newValueElement );
- virtual Status set( const T& newValue );
-
- virtual const T& get() const { return *_value; }
-
- virtual Status setFromString( const std::string& str );
-
- protected:
-
- virtual Status validate( const T& potentialNewValue ){ return Status::OK(); }
-
- T* _value; // owned elsewhere
- };
+ ExportedServerParameter(ServerParameterSet* sps,
+ const std::string& name,
+ T* value,
+ bool allowedToChangeAtStartup,
+ bool allowedToChangeAtRuntime)
+ : ServerParameter(sps, name, allowedToChangeAtStartup, allowedToChangeAtRuntime),
+ _value(value) {}
+ virtual ~ExportedServerParameter() {}
+
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ b.append(name, *_value);
+ }
+
+ virtual Status set(const BSONElement& newValueElement);
+ virtual Status set(const T& newValue);
+
+ virtual const T& get() const {
+ return *_value;
+ }
+
+ virtual Status setFromString(const std::string& str);
+
+protected:
+ virtual Status validate(const T& potentialNewValue) {
+ return Status::OK();
+ }
+
+ T* _value; // owned elsewhere
+};
}
-#define MONGO_EXPORT_SERVER_PARAMETER_IMPL( NAME, TYPE, INITIAL_VALUE, \
- CHANGE_AT_STARTUP, CHANGE_AT_RUNTIME ) \
- TYPE NAME = INITIAL_VALUE; \
- ExportedServerParameter<TYPE> _##NAME(\
- ServerParameterSet::getGlobal(), #NAME, &NAME, CHANGE_AT_STARTUP, CHANGE_AT_RUNTIME )
+#define MONGO_EXPORT_SERVER_PARAMETER_IMPL( \
+ NAME, TYPE, INITIAL_VALUE, CHANGE_AT_STARTUP, CHANGE_AT_RUNTIME) \
+ TYPE NAME = INITIAL_VALUE; \
+ ExportedServerParameter<TYPE> _##NAME( \
+ ServerParameterSet::getGlobal(), #NAME, &NAME, CHANGE_AT_STARTUP, CHANGE_AT_RUNTIME)
/**
* Create a global variable of type "TYPE" named "NAME" with the given INITIAL_VALUE. The
* value may be set at startup or at runtime.
*/
-#define MONGO_EXPORT_SERVER_PARAMETER( NAME, TYPE, INITIAL_VALUE ) \
- MONGO_EXPORT_SERVER_PARAMETER_IMPL( NAME, TYPE, INITIAL_VALUE, true, true )
+#define MONGO_EXPORT_SERVER_PARAMETER(NAME, TYPE, INITIAL_VALUE) \
+ MONGO_EXPORT_SERVER_PARAMETER_IMPL(NAME, TYPE, INITIAL_VALUE, true, true)
/**
* Like MONGO_EXPORT_SERVER_PARAMETER, but the value may only be set at startup.
*/
-#define MONGO_EXPORT_STARTUP_SERVER_PARAMETER( NAME, TYPE, INITIAL_VALUE ) \
- MONGO_EXPORT_SERVER_PARAMETER_IMPL( NAME, TYPE, INITIAL_VALUE, true, false )
+#define MONGO_EXPORT_STARTUP_SERVER_PARAMETER(NAME, TYPE, INITIAL_VALUE) \
+ MONGO_EXPORT_SERVER_PARAMETER_IMPL(NAME, TYPE, INITIAL_VALUE, true, false)
/**
* Like MONGO_EXPORT_SERVER_PARAMETER, but the value may only be set at runtime.
*/
-#define MONGO_EXPORT_RUNTIME_SERVER_PARAMETER( NAME, TYPE, INITIAL_VALUE ) \
- MONGO_EXPORT_SERVER_PARAMETER_IMPL( NAME, TYPE, INITIAL_VALUE, false, true )
+#define MONGO_EXPORT_RUNTIME_SERVER_PARAMETER(NAME, TYPE, INITIAL_VALUE) \
+ MONGO_EXPORT_SERVER_PARAMETER_IMPL(NAME, TYPE, INITIAL_VALUE, false, true)
#include "server_parameters_inline.h"
diff --git a/src/mongo/db/server_parameters_inline.h b/src/mongo/db/server_parameters_inline.h
index 66f8ad56453..a05a820334c 100644
--- a/src/mongo/db/server_parameters_inline.h
+++ b/src/mongo/db/server_parameters_inline.h
@@ -32,25 +32,24 @@
namespace mongo {
- template<typename T>
- inline Status ExportedServerParameter<T>::set( const BSONElement& newValueElement ) {
- T newValue;
+template <typename T>
+inline Status ExportedServerParameter<T>::set(const BSONElement& newValueElement) {
+ T newValue;
- if ( !newValueElement.coerce( &newValue) )
- return Status( ErrorCodes::BadValue, "can't set value" );
+ if (!newValueElement.coerce(&newValue))
+ return Status(ErrorCodes::BadValue, "can't set value");
- return set( newValue );
- }
+ return set(newValue);
+}
- template<typename T>
- inline Status ExportedServerParameter<T>::set( const T& newValue ) {
+template <typename T>
+inline Status ExportedServerParameter<T>::set(const T& newValue) {
+ Status v = validate(newValue);
+ if (!v.isOK())
+ return v;
- Status v = validate( newValue );
- if ( !v.isOK() )
- return v;
-
- *_value = newValue;
- return Status::OK();
- }
+ *_value = newValue;
+ return Status::OK();
+}
} // namespace mongo
diff --git a/src/mongo/db/server_parameters_test.cpp b/src/mongo/db/server_parameters_test.cpp
index 6e74cde215d..bc7dea80e53 100644
--- a/src/mongo/db/server_parameters_test.cpp
+++ b/src/mongo/db/server_parameters_test.cpp
@@ -35,55 +35,55 @@
namespace mongo {
- using std::string;
- using std::vector;
-
- TEST( ServerParameters, Simple1 ) {
- int f = 5;
- ExportedServerParameter<int> ff( NULL, "ff", &f, true, true );
- ASSERT_EQUALS( "ff" , ff.name() );
- ASSERT_EQUALS( 5, ff.get() );
-
- ff.set( 6 );
- ASSERT_EQUALS( 6, ff.get() );
- ASSERT_EQUALS( 6, f );
-
- ff.set( BSON( "x" << 7 ).firstElement() );
- ASSERT_EQUALS( 7, ff.get() );
- ASSERT_EQUALS( 7, f );
-
- ff.setFromString( "8" );
- ASSERT_EQUALS( 8, ff.get() );
- ASSERT_EQUALS( 8, f );
-
- }
-
- TEST( ServerParameters, Vector1 ) {
- vector<string> v;
+using std::string;
+using std::vector;
+
+TEST(ServerParameters, Simple1) {
+ int f = 5;
+ ExportedServerParameter<int> ff(NULL, "ff", &f, true, true);
+ ASSERT_EQUALS("ff", ff.name());
+ ASSERT_EQUALS(5, ff.get());
+
+ ff.set(6);
+ ASSERT_EQUALS(6, ff.get());
+ ASSERT_EQUALS(6, f);
+
+ ff.set(BSON("x" << 7).firstElement());
+ ASSERT_EQUALS(7, ff.get());
+ ASSERT_EQUALS(7, f);
+
+ ff.setFromString("8");
+ ASSERT_EQUALS(8, ff.get());
+ ASSERT_EQUALS(8, f);
+}
- ExportedServerParameter< vector<string> > vv( NULL, "vv", &v, true, true );
+TEST(ServerParameters, Vector1) {
+ vector<string> v;
- BSONObj x = BSON( "x" << BSON_ARRAY( "a" << "b" << "c" ) );
- vv.set( x.firstElement() );
+ ExportedServerParameter<vector<string>> vv(NULL, "vv", &v, true, true);
- ASSERT_EQUALS( 3U, v.size() );
- ASSERT_EQUALS( "a", v[0] );
- ASSERT_EQUALS( "b", v[1] );
- ASSERT_EQUALS( "c", v[2] );
+ BSONObj x = BSON("x" << BSON_ARRAY("a"
+ << "b"
+ << "c"));
+ vv.set(x.firstElement());
- BSONObjBuilder b;
+ ASSERT_EQUALS(3U, v.size());
+ ASSERT_EQUALS("a", v[0]);
+ ASSERT_EQUALS("b", v[1]);
+ ASSERT_EQUALS("c", v[2]);
- OperationContextNoop txn;
- vv.append(&txn, b, vv.name());
+ BSONObjBuilder b;
- BSONObj y = b.obj();
- ASSERT( x.firstElement().woCompare( y.firstElement(), false ) == 0 );
+ OperationContextNoop txn;
+ vv.append(&txn, b, vv.name());
+ BSONObj y = b.obj();
+ ASSERT(x.firstElement().woCompare(y.firstElement(), false) == 0);
- vv.setFromString( "d,e" );
- ASSERT_EQUALS( 2U, v.size() );
- ASSERT_EQUALS( "d", v[0] );
- ASSERT_EQUALS( "e", v[1] );
- }
+ vv.setFromString("d,e");
+ ASSERT_EQUALS(2U, v.size());
+ ASSERT_EQUALS("d", v[0]);
+ ASSERT_EQUALS("e", v[1]);
+}
}
diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp
index 745cb2f9d76..8dfa7bfdc98 100644
--- a/src/mongo/db/service_context.cpp
+++ b/src/mongo/db/service_context.cpp
@@ -39,222 +39,217 @@
namespace mongo {
- namespace {
+namespace {
- ServiceContext* globalServiceContext = NULL;
+ServiceContext* globalServiceContext = NULL;
- } // namespace
+} // namespace
- bool hasGlobalServiceContext() { return globalServiceContext; }
+bool hasGlobalServiceContext() {
+ return globalServiceContext;
+}
- ServiceContext* getGlobalServiceContext() {
- fassert(17508, globalServiceContext);
- return globalServiceContext;
- }
-
- void setGlobalServiceContext(std::unique_ptr<ServiceContext>&& serviceContext) {
- fassert(17509, serviceContext.get());
+ServiceContext* getGlobalServiceContext() {
+ fassert(17508, globalServiceContext);
+ return globalServiceContext;
+}
- delete globalServiceContext;
+void setGlobalServiceContext(std::unique_ptr<ServiceContext>&& serviceContext) {
+ fassert(17509, serviceContext.get());
- globalServiceContext = serviceContext.release();
- }
+ delete globalServiceContext;
- bool _supportsDocLocking = false;
+ globalServiceContext = serviceContext.release();
+}
- bool supportsDocLocking() {
- return _supportsDocLocking;
- }
+bool _supportsDocLocking = false;
- bool isMMAPV1() {
- StorageEngine* globalStorageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+bool supportsDocLocking() {
+ return _supportsDocLocking;
+}
- invariant(globalStorageEngine);
- return globalStorageEngine->isMmapV1();
- }
+bool isMMAPV1() {
+ StorageEngine* globalStorageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- Status validateStorageOptions(const BSONObj& storageEngineOptions,
- stdx::function<Status (const StorageEngine::Factory* const, const BSONObj&)> validateFunc) {
-
- BSONObjIterator storageIt(storageEngineOptions);
- while (storageIt.more()) {
- BSONElement storageElement = storageIt.next();
- StringData storageEngineName = storageElement.fieldNameStringData();
- if (storageElement.type() != mongo::Object) {
- return Status(ErrorCodes::BadValue, str::stream()
- << "'storageEngine." << storageElement.fieldNameStringData()
- << "' has to be an embedded document.");
- }
+ invariant(globalStorageEngine);
+ return globalStorageEngine->isMmapV1();
+}
- std::unique_ptr<StorageFactoriesIterator> sfi(getGlobalServiceContext()->
- makeStorageFactoriesIterator());
- invariant(sfi);
- bool found = false;
- while (sfi->more()) {
- const StorageEngine::Factory* const& factory = sfi->next();
- if (storageEngineName != factory->getCanonicalName()) {
- continue;
- }
- Status status = validateFunc(factory, storageElement.Obj());
- if ( !status.isOK() ) {
- return status;
- }
- found = true;
- }
- if (!found) {
- return Status(ErrorCodes::InvalidOptions, str::stream() << storageEngineName <<
- " is not a registered storage engine for this server");
- }
+Status validateStorageOptions(
+ const BSONObj& storageEngineOptions,
+ stdx::function<Status(const StorageEngine::Factory* const, const BSONObj&)> validateFunc) {
+ BSONObjIterator storageIt(storageEngineOptions);
+ while (storageIt.more()) {
+ BSONElement storageElement = storageIt.next();
+ StringData storageEngineName = storageElement.fieldNameStringData();
+ if (storageElement.type() != mongo::Object) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "'storageEngine." << storageElement.fieldNameStringData()
+ << "' has to be an embedded document.");
}
- return Status::OK();
- }
-
- ServiceContext::~ServiceContext() {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- invariant(_clients.empty());
- }
- ServiceContext::UniqueClient ServiceContext::makeClient(std::string desc,
- AbstractMessagingPort* p) {
- std::unique_ptr<Client> client(new Client(std::move(desc), this, p));
- auto observer = _clientObservers.cbegin();
- try {
- for (; observer != _clientObservers.cend(); ++observer) {
- observer->get()->onCreateClient(client.get());
- }
- }
- catch (...) {
- try {
- while (observer != _clientObservers.cbegin()) {
- --observer;
- observer->get()->onDestroyClient(client.get());
- }
+ std::unique_ptr<StorageFactoriesIterator> sfi(
+ getGlobalServiceContext()->makeStorageFactoriesIterator());
+ invariant(sfi);
+ bool found = false;
+ while (sfi->more()) {
+ const StorageEngine::Factory* const& factory = sfi->next();
+ if (storageEngineName != factory->getCanonicalName()) {
+ continue;
}
- catch (...) {
- std::terminate();
+ Status status = validateFunc(factory, storageElement.Obj());
+ if (!status.isOK()) {
+ return status;
}
- throw;
+ found = true;
}
- {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- invariant(_clients.insert(client.get()).second);
+ if (!found) {
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream() << storageEngineName
+ << " is not a registered storage engine for this server");
}
- return UniqueClient(client.release());
- }
-
- TickSource* ServiceContext::getTickSource() const {
- return _tickSource.get();
- }
-
- void ServiceContext::setTickSource(std::unique_ptr<TickSource> newSource) {
- _tickSource = std::move(newSource);
}
-
- void ServiceContext::ClientDeleter::operator()(Client* client) const {
- ServiceContext* const service = client->getServiceContext();
- {
- stdx::lock_guard<stdx::mutex> lk(service->_mutex);
- invariant(service->_clients.erase(client));
+ return Status::OK();
+}
+
+ServiceContext::~ServiceContext() {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ invariant(_clients.empty());
+}
+
+ServiceContext::UniqueClient ServiceContext::makeClient(std::string desc,
+ AbstractMessagingPort* p) {
+ std::unique_ptr<Client> client(new Client(std::move(desc), this, p));
+ auto observer = _clientObservers.cbegin();
+ try {
+ for (; observer != _clientObservers.cend(); ++observer) {
+ observer->get()->onCreateClient(client.get());
}
+ } catch (...) {
try {
- for (const auto& observer : service->_clientObservers) {
- observer->onDestroyClient(client);
+ while (observer != _clientObservers.cbegin()) {
+ --observer;
+ observer->get()->onDestroyClient(client.get());
}
- }
- catch (...) {
+ } catch (...) {
std::terminate();
}
- delete client;
+ throw;
}
-
- ServiceContext::UniqueOperationContext ServiceContext::makeOperationContext(Client* client) {
- auto opCtx = _newOpCtx(client);
- auto observer = _clientObservers.begin();
- try {
- for (; observer != _clientObservers.cend(); ++observer) {
- observer->get()->onCreateOperationContext(opCtx.get());
- }
+ {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ invariant(_clients.insert(client.get()).second);
+ }
+ return UniqueClient(client.release());
+}
+
+TickSource* ServiceContext::getTickSource() const {
+ return _tickSource.get();
+}
+
+void ServiceContext::setTickSource(std::unique_ptr<TickSource> newSource) {
+ _tickSource = std::move(newSource);
+}
+
+void ServiceContext::ClientDeleter::operator()(Client* client) const {
+ ServiceContext* const service = client->getServiceContext();
+ {
+ stdx::lock_guard<stdx::mutex> lk(service->_mutex);
+ invariant(service->_clients.erase(client));
+ }
+ try {
+ for (const auto& observer : service->_clientObservers) {
+ observer->onDestroyClient(client);
}
- catch (...) {
- try {
- while (observer != _clientObservers.cbegin()) {
- --observer;
- observer->get()->onDestroyOperationContext(opCtx.get());
- }
- }
- catch (...) {
- std::terminate();
- }
- throw;
+ } catch (...) {
+ std::terminate();
+ }
+ delete client;
+}
+
+ServiceContext::UniqueOperationContext ServiceContext::makeOperationContext(Client* client) {
+ auto opCtx = _newOpCtx(client);
+ auto observer = _clientObservers.begin();
+ try {
+ for (; observer != _clientObservers.cend(); ++observer) {
+ observer->get()->onCreateOperationContext(opCtx.get());
}
- // // TODO(schwerin): When callers no longer construct their own OperationContexts directly,
- // // but only through the ServiceContext, uncomment the following. Until then, it must
- // // be done in the operation context destructors, which introduces a potential race.
- // {
- // stdx::lock_guard<Client> lk(*client);
- // client->setOperationContext(opCtx.get());
- // }
- return UniqueOperationContext(opCtx.release());
- };
-
- void ServiceContext::OperationContextDeleter::operator()(OperationContext* opCtx) const {
- auto client = opCtx->getClient();
- auto service = client->getServiceContext();
- // // TODO(schwerin): When callers no longer construct their own OperationContexts directly,
- // // but only through the ServiceContext, uncomment the following. Until then, it must
- // // be done in the operation context destructors, which introduces a potential race.
- // {
- // stdx::lock_guard<Client> lk(*client);
- // client->resetOperationContext();
- // }
+ } catch (...) {
try {
- for (const auto& observer : service->_clientObservers) {
- observer->onDestroyOperationContext(opCtx);
+ while (observer != _clientObservers.cbegin()) {
+ --observer;
+ observer->get()->onDestroyOperationContext(opCtx.get());
}
- }
- catch (...) {
+ } catch (...) {
std::terminate();
}
- delete opCtx;
+ throw;
}
-
- void ServiceContext::registerClientObserver(std::unique_ptr<ClientObserver> observer) {
- _clientObservers.push_back(std::move(observer));
+ // // TODO(schwerin): When callers no longer construct their own OperationContexts directly,
+ // // but only through the ServiceContext, uncomment the following. Until then, it must
+ // // be done in the operation context destructors, which introduces a potential race.
+ // {
+ // stdx::lock_guard<Client> lk(*client);
+ // client->setOperationContext(opCtx.get());
+ // }
+ return UniqueOperationContext(opCtx.release());
+};
+
+void ServiceContext::OperationContextDeleter::operator()(OperationContext* opCtx) const {
+ auto client = opCtx->getClient();
+ auto service = client->getServiceContext();
+ // // TODO(schwerin): When callers no longer construct their own OperationContexts directly,
+ // // but only through the ServiceContext, uncomment the following. Until then, it must
+ // // be done in the operation context destructors, which introduces a potential race.
+ // {
+ // stdx::lock_guard<Client> lk(*client);
+ // client->resetOperationContext();
+ // }
+ try {
+ for (const auto& observer : service->_clientObservers) {
+ observer->onDestroyOperationContext(opCtx);
+ }
+ } catch (...) {
+ std::terminate();
}
+ delete opCtx;
+}
- ServiceContext::LockedClientsCursor::LockedClientsCursor(ServiceContext* service)
- : _lock(service->_mutex),
- _curr(service->_clients.cbegin()),
- _end(service->_clients.cend()) {}
-
- Client* ServiceContext::LockedClientsCursor::next() {
- if (_curr == _end)
- return nullptr;
- Client* result = *_curr;
- ++_curr;
- return result;
- }
+void ServiceContext::registerClientObserver(std::unique_ptr<ClientObserver> observer) {
+ _clientObservers.push_back(std::move(observer));
+}
- BSONArray storageEngineList() {
- if (!hasGlobalServiceContext())
- return BSONArray();
+ServiceContext::LockedClientsCursor::LockedClientsCursor(ServiceContext* service)
+ : _lock(service->_mutex), _curr(service->_clients.cbegin()), _end(service->_clients.cend()) {}
- std::unique_ptr<StorageFactoriesIterator> sfi(
- getGlobalServiceContext()->makeStorageFactoriesIterator());
+Client* ServiceContext::LockedClientsCursor::next() {
+ if (_curr == _end)
+ return nullptr;
+ Client* result = *_curr;
+ ++_curr;
+ return result;
+}
- if (!sfi)
- return BSONArray();
+BSONArray storageEngineList() {
+ if (!hasGlobalServiceContext())
+ return BSONArray();
- BSONArrayBuilder engineArrayBuilder;
+ std::unique_ptr<StorageFactoriesIterator> sfi(
+ getGlobalServiceContext()->makeStorageFactoriesIterator());
- while (sfi->more()) {
- engineArrayBuilder.append(sfi->next()->getCanonicalName());
- }
+ if (!sfi)
+ return BSONArray();
- return engineArrayBuilder.arr();
- }
+ BSONArrayBuilder engineArrayBuilder;
- void appendStorageEngineList(BSONObjBuilder* result) {
- result->append("storageEngines", storageEngineList());
+ while (sfi->more()) {
+ engineArrayBuilder.append(sfi->next()->getCanonicalName());
}
+
+ return engineArrayBuilder.arr();
+}
+
+void appendStorageEngineList(BSONObjBuilder* result) {
+ result->append("storageEngines", storageEngineList());
+}
} // namespace mongo
diff --git a/src/mongo/db/service_context.h b/src/mongo/db/service_context.h
index 066f6c6bc70..8f07c34d5a1 100644
--- a/src/mongo/db/service_context.h
+++ b/src/mongo/db/service_context.h
@@ -40,362 +40,366 @@
namespace mongo {
- class AbstractMessagingPort;
- class Client;
- class OperationContext;
- class OpObserver;
+class AbstractMessagingPort;
+class Client;
+class OperationContext;
+class OpObserver;
+/**
+ * Classes that implement this interface can receive notification on killOp.
+ *
+ * See GlobalEnvironmentExperiment::registerKillOpListener() for more information, including
+ * limitations on the lifetime of registered listeners.
+ */
+class KillOpListenerInterface {
+public:
/**
- * Classes that implement this interface can receive notification on killOp.
- *
- * See GlobalEnvironmentExperiment::registerKillOpListener() for more information, including
- * limitations on the lifetime of registered listeners.
+ * Will be called *after* ops have been told they should die.
+ * Callback must not fail.
*/
- class KillOpListenerInterface {
- public:
- /**
- * Will be called *after* ops have been told they should die.
- * Callback must not fail.
- */
- virtual void interrupt(unsigned opId) = 0;
- virtual void interruptAll() = 0;
+ virtual void interrupt(unsigned opId) = 0;
+ virtual void interruptAll() = 0;
- protected:
- // Should not delete through a pointer of this type
- virtual ~KillOpListenerInterface() {}
- };
+protected:
+ // Should not delete through a pointer of this type
+ virtual ~KillOpListenerInterface() {}
+};
+
+class StorageFactoriesIterator {
+ MONGO_DISALLOW_COPYING(StorageFactoriesIterator);
+
+public:
+ virtual ~StorageFactoriesIterator() {}
+ virtual bool more() const = 0;
+ virtual const StorageEngine::Factory* next() = 0;
- class StorageFactoriesIterator {
- MONGO_DISALLOW_COPYING(StorageFactoriesIterator);
+protected:
+ StorageFactoriesIterator() {}
+};
+
+/**
+ * Class representing the context of a service, such as a MongoD database service or
+ * a MongoS routing service.
+ *
+ * A ServiceContext is the root of a hierarchy of contexts. A ServiceContext owns
+ * zero or more Clients, which in turn each own OperationContexts.
+ */
+class ServiceContext : public Decorable<ServiceContext> {
+ MONGO_DISALLOW_COPYING(ServiceContext);
+
+public:
+ /**
+ * Special deleter used for cleaning up Client objects owned by a ServiceContext.
+ * See UniqueClient, below.
+ */
+ class ClientDeleter {
public:
- virtual ~StorageFactoriesIterator() { }
- virtual bool more() const = 0;
- virtual const StorageEngine::Factory* next() = 0;
- protected:
- StorageFactoriesIterator() { }
+ void operator()(Client* client) const;
};
/**
- * Class representing the context of a service, such as a MongoD database service or
- * a MongoS routing service.
- *
- * A ServiceContext is the root of a hierarchy of contexts. A ServiceContext owns
- * zero or more Clients, which in turn each own OperationContexts.
+ * Observer interface implemented to hook client and operation context creation and
+ * destruction.
*/
- class ServiceContext : public Decorable<ServiceContext> {
- MONGO_DISALLOW_COPYING(ServiceContext);
+ class ClientObserver {
public:
- /**
- * Special deleter used for cleaning up Client objects owned by a ServiceContext.
- * See UniqueClient, below.
- */
- class ClientDeleter {
- public:
- void operator()(Client* client) const;
- };
+ virtual ~ClientObserver() = default;
/**
- * Observer interface implemented to hook client and operation context creation and
- * destruction.
- */
- class ClientObserver {
- public:
- virtual ~ClientObserver() = default;
-
- /**
- * Hook called after a new client "client" is created on a service by
- * service->makeClient().
- *
- * For a given client and registered instance of ClientObserver, if onCreateClient
- * returns without throwing an exception, onDestroyClient will be called when "client"
- * is deleted.
- */
- virtual void onCreateClient(Client* client) = 0;
-
- /**
- * Hook called on a "client" created by a service before deleting "client".
- *
- * Like a destructor, must not throw exceptions.
- */
- virtual void onDestroyClient(Client* client) = 0;
-
- /**
- * Hook called after a new operation context is created on a client by
- * service->makeOperationContext(client) or client->makeOperationContext().
- *
- * For a given operation context and registered instance of ClientObserver, if
- * onCreateOperationContext returns without throwing an exception,
- * onDestroyOperationContext will be called when "opCtx" is deleted.
- */
- virtual void onCreateOperationContext(OperationContext* opCtx) = 0;
-
- /**
- * Hook called on a "opCtx" created by a service before deleting "opCtx".
- *
- * Like a destructor, must not throw exceptions.
- */
- virtual void onDestroyOperationContext(OperationContext* opCtx) = 0;
- };
-
- using ClientSet = unordered_set<Client*>;
-
- /**
- * Cursor for enumerating the live Client objects belonging to a ServiceContext.
+ * Hook called after a new client "client" is created on a service by
+ * service->makeClient().
*
- * Lifetimes of this type are synchronized with client creation and destruction.
- */
- class LockedClientsCursor {
- public:
- /**
- * Constructs a cursor for enumerating the clients of "service", blocking "service" from
- * creating or destroying Client objects until this instance is destroyed.
- */
- explicit LockedClientsCursor(ServiceContext* service);
-
- /**
- * Returns the next client in the enumeration, or nullptr if there are no more clients.
- */
- Client* next();
-
- private:
- stdx::unique_lock<stdx::mutex> _lock;
- ClientSet::const_iterator _curr;
- ClientSet::const_iterator _end;
- };
-
- /**
- * Special deleter used for cleaning up OperationContext objects owned by a ServiceContext.
- * See UniqueOperationContext, below.
- */
- class OperationContextDeleter {
- public:
- void operator()(OperationContext* opCtx) const;
- };
-
- /**
- * This is the unique handle type for Clients created by a ServiceContext.
+ * For a given client and registered instance of ClientObserver, if onCreateClient
+ * returns without throwing an exception, onDestroyClient will be called when "client"
+ * is deleted.
*/
- using UniqueClient = std::unique_ptr<Client, ClientDeleter>;
+ virtual void onCreateClient(Client* client) = 0;
/**
- * This is the unique handle type for OperationContexts created by a ServiceContext.
- */
- using UniqueOperationContext = std::unique_ptr<OperationContext, OperationContextDeleter>;
-
- virtual ~ServiceContext();
-
- /**
- * Registers an observer of lifecycle events on Clients created by this ServiceContext.
- *
- * See the ClientObserver type, above, for details.
+ * Hook called on a "client" created by a service before deleting "client".
*
- * All calls to registerClientObserver must complete before ServiceContext
- * is used in multi-threaded operation, or is used to create clients via calls
- * to makeClient.
+ * Like a destructor, must not throw exceptions.
*/
- void registerClientObserver(std::unique_ptr<ClientObserver> observer);
+ virtual void onDestroyClient(Client* client) = 0;
/**
- * Creates a new Client object representing a client session associated with this
- * ServiceContext.
+ * Hook called after a new operation context is created on a client by
+ * service->makeOperationContext(client) or client->makeOperationContext().
*
- * The "desc" string is used to set a descriptive name for the client, used in logging.
- *
- * If supplied, "p" is the communication channel used for communicating with the client.
+ * For a given operation context and registered instance of ClientObserver, if
+ * onCreateOperationContext returns without throwing an exception,
+ * onDestroyOperationContext will be called when "opCtx" is deleted.
*/
- UniqueClient makeClient(std::string desc, AbstractMessagingPort* p = nullptr);
+ virtual void onCreateOperationContext(OperationContext* opCtx) = 0;
/**
- * Creates a new OperationContext on "client".
+ * Hook called on a "opCtx" created by a service before deleting "opCtx".
*
- * "client" must not have an active operation context.
+ * Like a destructor, must not throw exceptions.
*/
- UniqueOperationContext makeOperationContext(Client* client);
+ virtual void onDestroyOperationContext(OperationContext* opCtx) = 0;
+ };
- //
- // Storage
- //
-
- /**
- * Register a storage engine. Called from a MONGO_INIT that depends on initializiation of
- * the global environment.
- * Ownership of 'factory' is transferred to global environment upon registration.
- */
- virtual void registerStorageEngine(const std::string& name,
- const StorageEngine::Factory* factory) = 0;
+ using ClientSet = unordered_set<Client*>;
+ /**
+ * Cursor for enumerating the live Client objects belonging to a ServiceContext.
+ *
+ * Lifetimes of this type are synchronized with client creation and destruction.
+ */
+ class LockedClientsCursor {
+ public:
/**
- * Returns true if "name" refers to a registered storage engine.
+ * Constructs a cursor for enumerating the clients of "service", blocking "service" from
+ * creating or destroying Client objects until this instance is destroyed.
*/
- virtual bool isRegisteredStorageEngine(const std::string& name) = 0;
+ explicit LockedClientsCursor(ServiceContext* service);
/**
- * Produce an iterator over all registered storage engine factories.
- * Caller owns the returned object and is responsible for deleting when finished.
- *
- * Never returns nullptr.
+ * Returns the next client in the enumeration, or nullptr if there are no more clients.
*/
- virtual StorageFactoriesIterator* makeStorageFactoriesIterator() = 0;
+ Client* next();
- virtual void initializeGlobalStorageEngine() = 0;
+ private:
+ stdx::unique_lock<stdx::mutex> _lock;
+ ClientSet::const_iterator _curr;
+ ClientSet::const_iterator _end;
+ };
- /**
- * Shuts down storage engine cleanly and releases any locks on mongod.lock.
- */
- virtual void shutdownGlobalStorageEngineCleanly() = 0;
+ /**
+ * Special deleter used for cleaning up OperationContext objects owned by a ServiceContext.
+ * See UniqueOperationContext, below.
+ */
+ class OperationContextDeleter {
+ public:
+ void operator()(OperationContext* opCtx) const;
+ };
- /**
- * Return the storage engine instance we're using.
- */
- virtual StorageEngine* getGlobalStorageEngine() = 0;
+ /**
+ * This is the unique handle type for Clients created by a ServiceContext.
+ */
+ using UniqueClient = std::unique_ptr<Client, ClientDeleter>;
- //
- // Global operation management. This may not belong here and there may be too many methods
- // here.
- //
+ /**
+ * This is the unique handle type for OperationContexts created by a ServiceContext.
+ */
+ using UniqueOperationContext = std::unique_ptr<OperationContext, OperationContextDeleter>;
- /**
- * Signal all OperationContext(s) that they have been killed.
- */
- virtual void setKillAllOperations() = 0;
+ virtual ~ServiceContext();
- /**
- * Reset the operation kill state after a killAllOperations.
- * Used for testing.
- */
- virtual void unsetKillAllOperations() = 0;
+ /**
+ * Registers an observer of lifecycle events on Clients created by this ServiceContext.
+ *
+ * See the ClientObserver type, above, for details.
+ *
+ * All calls to registerClientObserver must complete before ServiceContext
+ * is used in multi-threaded operation, or is used to create clients via calls
+ * to makeClient.
+ */
+ void registerClientObserver(std::unique_ptr<ClientObserver> observer);
- /**
- * Get the state for killing all operations.
- */
- virtual bool getKillAllOperations() = 0;
+ /**
+ * Creates a new Client object representing a client session associated with this
+ * ServiceContext.
+ *
+ * The "desc" string is used to set a descriptive name for the client, used in logging.
+ *
+ * If supplied, "p" is the communication channel used for communicating with the client.
+ */
+ UniqueClient makeClient(std::string desc, AbstractMessagingPort* p = nullptr);
- /**
- * @param i opid of operation to kill
- * @return if operation was found
- **/
- virtual bool killOperation(unsigned int opId) = 0;
+ /**
+ * Creates a new OperationContext on "client".
+ *
+ * "client" must not have an active operation context.
+ */
+ UniqueOperationContext makeOperationContext(Client* client);
- /**
- * Kills all operations that have a Client that is associated with an incoming user
- * connection, except for the one associated with txn.
- */
- virtual void killAllUserOperations(const OperationContext* txn) = 0;
+ //
+ // Storage
+ //
- /**
- * Registers a listener to be notified each time an op is killed.
- *
- * listener does not become owned by the environment. As there is currently no way to
- * unregister, the listener object must outlive this ServiceContext object.
- */
- virtual void registerKillOpListener(KillOpListenerInterface* listener) = 0;
+ /**
+ * Register a storage engine. Called from a MONGO_INIT that depends on initializiation of
+ * the global environment.
+ * Ownership of 'factory' is transferred to global environment upon registration.
+ */
+ virtual void registerStorageEngine(const std::string& name,
+ const StorageEngine::Factory* factory) = 0;
- //
- // Global OpObserver.
- //
+ /**
+ * Returns true if "name" refers to a registered storage engine.
+ */
+ virtual bool isRegisteredStorageEngine(const std::string& name) = 0;
- /**
- * Set the OpObserver.
- */
- virtual void setOpObserver(std::unique_ptr<OpObserver> opObserver) = 0;
+ /**
+ * Produce an iterator over all registered storage engine factories.
+ * Caller owns the returned object and is responsible for deleting when finished.
+ *
+ * Never returns nullptr.
+ */
+ virtual StorageFactoriesIterator* makeStorageFactoriesIterator() = 0;
- /**
- * Return the OpObserver instance we're using.
- */
- virtual OpObserver* getOpObserver() = 0;
+ virtual void initializeGlobalStorageEngine() = 0;
- /**
- * Returns the tick source set in this context.
- */
- TickSource* getTickSource() const;
+ /**
+ * Shuts down storage engine cleanly and releases any locks on mongod.lock.
+ */
+ virtual void shutdownGlobalStorageEngineCleanly() = 0;
- /**
- * Replaces the current tick source with a new one. In other words, the old tick source
- * will be destroyed. So make sure that no one is using the old tick source when
- * calling this.
- */
- void setTickSource(std::unique_ptr<TickSource> newSource);
+ /**
+ * Return the storage engine instance we're using.
+ */
+ virtual StorageEngine* getGlobalStorageEngine() = 0;
- protected:
- ServiceContext() = default;
+ //
+ // Global operation management. This may not belong here and there may be too many methods
+ // here.
+ //
- /**
- * Mutex used to synchronize access to mutable state of this ServiceContext instance,
- * including possibly by its subclasses.
- */
- stdx::mutex _mutex;
+ /**
+ * Signal all OperationContext(s) that they have been killed.
+ */
+ virtual void setKillAllOperations() = 0;
- private:
- /**
- * Returns a new OperationContext. Private, for use by makeOperationContext.
- */
- virtual std::unique_ptr<OperationContext> _newOpCtx(Client* client) = 0;
+ /**
+ * Reset the operation kill state after a killAllOperations.
+ * Used for testing.
+ */
+ virtual void unsetKillAllOperations() = 0;
- /**
- * Vector of registered observers.
- */
- std::vector<std::unique_ptr<ClientObserver>> _clientObservers;
- ClientSet _clients;
+ /**
+ * Get the state for killing all operations.
+ */
+ virtual bool getKillAllOperations() = 0;
- std::unique_ptr<TickSource> _tickSource;
- };
+ /**
+ * @param i opid of operation to kill
+ * @return if operation was found
+ **/
+ virtual bool killOperation(unsigned int opId) = 0;
/**
- * Returns true if there is a global ServiceContext.
+ * Kills all operations that have a Client that is associated with an incoming user
+ * connection, except for the one associated with txn.
*/
- bool hasGlobalServiceContext();
+ virtual void killAllUserOperations(const OperationContext* txn) = 0;
/**
- * Returns the singleton ServiceContext for this server process.
- *
- * Fatal if there is currently no global ServiceContext.
+ * Registers a listener to be notified each time an op is killed.
*
- * Caller does not own pointer.
+ * listener does not become owned by the environment. As there is currently no way to
+ * unregister, the listener object must outlive this ServiceContext object.
*/
- ServiceContext* getGlobalServiceContext();
+ virtual void registerKillOpListener(KillOpListenerInterface* listener) = 0;
+
+ //
+ // Global OpObserver.
+ //
/**
- * Sets the global ServiceContext. If 'serviceContext' is NULL, un-sets and deletes
- * the current global ServiceContext.
- *
- * Takes ownership of 'serviceContext'.
+ * Set the OpObserver.
*/
- void setGlobalServiceContext(std::unique_ptr<ServiceContext>&& serviceContext);
+ virtual void setOpObserver(std::unique_ptr<OpObserver> opObserver) = 0;
/**
- * Shortcut for querying the storage engine about whether it supports document-level locking.
- * If this call becomes too expensive, we could cache the value somewhere so we don't have to
- * fetch the storage engine every time.
+ * Return the OpObserver instance we're using.
*/
- bool supportsDocLocking();
+ virtual OpObserver* getOpObserver() = 0;
/**
- * Returns true if the storage engine in use is MMAPV1.
+ * Returns the tick source set in this context.
*/
- bool isMMAPV1();
-
- /*
- * Extracts the storageEngine bson from the CollectionOptions provided. Loops through each
- * provided storageEngine and asks the matching registered storage engine if the
- * collection/index options are valid. Returns an error if the collection/index options are
- * invalid.
- * If no matching registered storage engine is found, return an error.
- * Validation function 'func' must be either:
- * - &StorageEngine::Factory::validateCollectionStorageOptions; or
- * - &StorageEngine::Factory::validateIndexStorageOptions
+ TickSource* getTickSource() const;
+
+ /**
+ * Replaces the current tick source with a new one. In other words, the old tick source
+ * will be destroyed. So make sure that no one is using the old tick source when
+ * calling this.
*/
- Status validateStorageOptions(const BSONObj& storageEngineOptions,
- stdx::function<Status (const StorageEngine::Factory* const, const BSONObj&)> validateFunc);
+ void setTickSource(std::unique_ptr<TickSource> newSource);
+
+protected:
+ ServiceContext() = default;
- /*
- * Returns a BSONArray containing the names of available storage engines, or an empty
- * array if there is no global ServiceContext
+ /**
+ * Mutex used to synchronize access to mutable state of this ServiceContext instance,
+ * including possibly by its subclasses.
*/
- BSONArray storageEngineList();
+ stdx::mutex _mutex;
- /*
- * Appends a the list of available storage engines to a BSONObjBuilder for reporting purposes.
+private:
+ /**
+ * Returns a new OperationContext. Private, for use by makeOperationContext.
*/
- void appendStorageEngineList(BSONObjBuilder* result);
+ virtual std::unique_ptr<OperationContext> _newOpCtx(Client* client) = 0;
+
+ /**
+ * Vector of registered observers.
+ */
+ std::vector<std::unique_ptr<ClientObserver>> _clientObservers;
+ ClientSet _clients;
+
+ std::unique_ptr<TickSource> _tickSource;
+};
+
+/**
+ * Returns true if there is a global ServiceContext.
+ */
+bool hasGlobalServiceContext();
+
+/**
+ * Returns the singleton ServiceContext for this server process.
+ *
+ * Fatal if there is currently no global ServiceContext.
+ *
+ * Caller does not own pointer.
+ */
+ServiceContext* getGlobalServiceContext();
+
+/**
+ * Sets the global ServiceContext. If 'serviceContext' is NULL, un-sets and deletes
+ * the current global ServiceContext.
+ *
+ * Takes ownership of 'serviceContext'.
+ */
+void setGlobalServiceContext(std::unique_ptr<ServiceContext>&& serviceContext);
+
+/**
+ * Shortcut for querying the storage engine about whether it supports document-level locking.
+ * If this call becomes too expensive, we could cache the value somewhere so we don't have to
+ * fetch the storage engine every time.
+ */
+bool supportsDocLocking();
+
+/**
+ * Returns true if the storage engine in use is MMAPV1.
+ */
+bool isMMAPV1();
+
+/*
+ * Extracts the storageEngine bson from the CollectionOptions provided. Loops through each
+ * provided storageEngine and asks the matching registered storage engine if the
+ * collection/index options are valid. Returns an error if the collection/index options are
+ * invalid.
+ * If no matching registered storage engine is found, return an error.
+ * Validation function 'func' must be either:
+ * - &StorageEngine::Factory::validateCollectionStorageOptions; or
+ * - &StorageEngine::Factory::validateIndexStorageOptions
+ */
+Status validateStorageOptions(
+ const BSONObj& storageEngineOptions,
+ stdx::function<Status(const StorageEngine::Factory* const, const BSONObj&)> validateFunc);
+
+/*
+ * Returns a BSONArray containing the names of available storage engines, or an empty
+ * array if there is no global ServiceContext
+ */
+BSONArray storageEngineList();
+
+/*
+ * Appends a the list of available storage engines to a BSONObjBuilder for reporting purposes.
+ */
+void appendStorageEngineList(BSONObjBuilder* result);
} // namespace mongo
diff --git a/src/mongo/db/service_context_d.cpp b/src/mongo/db/service_context_d.cpp
index f02423a9e1a..7fd76d14580 100644
--- a/src/mongo/db/service_context_d.cpp
+++ b/src/mongo/db/service_context_d.cpp
@@ -53,254 +53,248 @@
namespace mongo {
- MONGO_INITIALIZER(SetGlobalEnvironment)(InitializerContext* context) {
- setGlobalServiceContext(stdx::make_unique<ServiceContextMongoD>());
- return Status::OK();
- }
-
- ServiceContextMongoD::ServiceContextMongoD()
- : _globalKill(false),
- _storageEngine(NULL) { }
-
- ServiceContextMongoD::~ServiceContextMongoD() {
-
- }
-
- StorageEngine* ServiceContextMongoD::getGlobalStorageEngine() {
- // We don't check that globalStorageEngine is not-NULL here intentionally. We can encounter
- // an error before it's initialized and proceed to exitCleanly which is equipped to deal
- // with a NULL storage engine.
- return _storageEngine;
- }
-
- extern bool _supportsDocLocking;
-
- void ServiceContextMongoD::initializeGlobalStorageEngine() {
- // This should be set once.
- invariant(!_storageEngine);
-
- const std::string dbpath = storageGlobalParams.dbpath;
- if (auto existingStorageEngine = StorageEngineMetadata::getStorageEngineForPath(dbpath)) {
- if (storageGlobalParams.engineSetByUser) {
- // Verify that the name of the user-supplied storage engine matches the contents of
- // the metadata file.
- const StorageEngine::Factory* factory = mapFindWithDefault(
- _storageFactories,
- storageGlobalParams.engine,
- static_cast<const StorageEngine::Factory*>(nullptr));
-
- if (factory) {
- uassert(28662, str::stream()
- << "Cannot start server. Detected data files in " << dbpath << " created by"
- << " the '" << *existingStorageEngine << "' storage engine, but the"
- << " specified storage engine was '" << factory->getCanonicalName() << "'.",
+MONGO_INITIALIZER(SetGlobalEnvironment)(InitializerContext* context) {
+ setGlobalServiceContext(stdx::make_unique<ServiceContextMongoD>());
+ return Status::OK();
+}
+
+ServiceContextMongoD::ServiceContextMongoD() : _globalKill(false), _storageEngine(NULL) {}
+
+ServiceContextMongoD::~ServiceContextMongoD() {}
+
+StorageEngine* ServiceContextMongoD::getGlobalStorageEngine() {
+ // We don't check that globalStorageEngine is not-NULL here intentionally. We can encounter
+ // an error before it's initialized and proceed to exitCleanly which is equipped to deal
+ // with a NULL storage engine.
+ return _storageEngine;
+}
+
+extern bool _supportsDocLocking;
+
+void ServiceContextMongoD::initializeGlobalStorageEngine() {
+ // This should be set once.
+ invariant(!_storageEngine);
+
+ const std::string dbpath = storageGlobalParams.dbpath;
+ if (auto existingStorageEngine = StorageEngineMetadata::getStorageEngineForPath(dbpath)) {
+ if (storageGlobalParams.engineSetByUser) {
+ // Verify that the name of the user-supplied storage engine matches the contents of
+ // the metadata file.
+ const StorageEngine::Factory* factory =
+ mapFindWithDefault(_storageFactories,
+ storageGlobalParams.engine,
+ static_cast<const StorageEngine::Factory*>(nullptr));
+
+ if (factory) {
+ uassert(28662,
+ str::stream()
+ << "Cannot start server. Detected data files in " << dbpath
+ << " created by"
+ << " the '" << *existingStorageEngine << "' storage engine, but the"
+ << " specified storage engine was '" << factory->getCanonicalName()
+ << "'.",
factory->getCanonicalName() == *existingStorageEngine);
- }
- }
- else {
- // Otherwise set the active storage engine as the contents of the metadata file.
- log() << "Detected data files in " << dbpath << " created by the '"
- << *existingStorageEngine << "' storage engine, so setting the active"
- << " storage engine to '" << *existingStorageEngine << "'.";
- storageGlobalParams.engine = *existingStorageEngine;
}
+ } else {
+ // Otherwise set the active storage engine as the contents of the metadata file.
+ log() << "Detected data files in " << dbpath << " created by the '"
+ << *existingStorageEngine << "' storage engine, so setting the active"
+ << " storage engine to '" << *existingStorageEngine << "'.";
+ storageGlobalParams.engine = *existingStorageEngine;
}
- else if (!storageGlobalParams.engineSetByUser) {
- // Ensure the default storage engine is available with this build of mongod.
- uassert(28663, str::stream()
- << "Cannot start server. The default storage engine '" << storageGlobalParams.engine
- << "' is not available with this build of mongod. Please specify a different"
- << " storage engine explicitly, e.g. --storageEngine=mmapv1.",
+ } else if (!storageGlobalParams.engineSetByUser) {
+ // Ensure the default storage engine is available with this build of mongod.
+ uassert(28663,
+ str::stream()
+ << "Cannot start server. The default storage engine '"
+ << storageGlobalParams.engine
+ << "' is not available with this build of mongod. Please specify a different"
+ << " storage engine explicitly, e.g. --storageEngine=mmapv1.",
isRegisteredStorageEngine(storageGlobalParams.engine));
- }
+ }
- const StorageEngine::Factory* factory = _storageFactories[storageGlobalParams.engine];
+ const StorageEngine::Factory* factory = _storageFactories[storageGlobalParams.engine];
- uassert(18656, str::stream()
- << "Cannot start server with an unknown storage engine: " << storageGlobalParams.engine,
+ uassert(18656,
+ str::stream() << "Cannot start server with an unknown storage engine: "
+ << storageGlobalParams.engine,
factory);
- std::unique_ptr<StorageEngineMetadata> metadata = StorageEngineMetadata::forPath(dbpath);
+ std::unique_ptr<StorageEngineMetadata> metadata = StorageEngineMetadata::forPath(dbpath);
- // Validate options in metadata against current startup options.
- if (metadata.get()) {
- uassertStatusOK(factory->validateMetadata(*metadata, storageGlobalParams));
- }
+ // Validate options in metadata against current startup options.
+ if (metadata.get()) {
+ uassertStatusOK(factory->validateMetadata(*metadata, storageGlobalParams));
+ }
- try {
- _lockFile.reset(new StorageEngineLockFile(storageGlobalParams.dbpath));
- }
- catch (const std::exception& ex) {
- uassert(28596, str::stream()
- << "Unable to determine status of lock file in the data directory "
- << storageGlobalParams.dbpath << ": " << ex.what(),
+ try {
+ _lockFile.reset(new StorageEngineLockFile(storageGlobalParams.dbpath));
+ } catch (const std::exception& ex) {
+ uassert(28596,
+ str::stream() << "Unable to determine status of lock file in the data directory "
+ << storageGlobalParams.dbpath << ": " << ex.what(),
false);
- }
- if (_lockFile->createdByUncleanShutdown()) {
- warning() << "Detected unclean shutdown - "
- << _lockFile->getFilespec() << " is not empty.";
- }
- uassertStatusOK(_lockFile->open());
-
- ScopeGuard guard = MakeGuard(&StorageEngineLockFile::close, _lockFile.get());
- _storageEngine = factory->create(storageGlobalParams, *_lockFile);
- _storageEngine->finishInit();
- uassertStatusOK(_lockFile->writePid());
-
- // Write a new metadata file if it is not present.
- if (!metadata.get()) {
- metadata.reset(new StorageEngineMetadata(storageGlobalParams.dbpath));
- metadata->setStorageEngine(factory->getCanonicalName().toString());
- metadata->setStorageEngineOptions(factory->createMetadataOptions(storageGlobalParams));
- uassertStatusOK(metadata->write());
- }
+ }
+ if (_lockFile->createdByUncleanShutdown()) {
+ warning() << "Detected unclean shutdown - " << _lockFile->getFilespec() << " is not empty.";
+ }
+ uassertStatusOK(_lockFile->open());
+
+ ScopeGuard guard = MakeGuard(&StorageEngineLockFile::close, _lockFile.get());
+ _storageEngine = factory->create(storageGlobalParams, *_lockFile);
+ _storageEngine->finishInit();
+ uassertStatusOK(_lockFile->writePid());
+
+ // Write a new metadata file if it is not present.
+ if (!metadata.get()) {
+ metadata.reset(new StorageEngineMetadata(storageGlobalParams.dbpath));
+ metadata->setStorageEngine(factory->getCanonicalName().toString());
+ metadata->setStorageEngineOptions(factory->createMetadataOptions(storageGlobalParams));
+ uassertStatusOK(metadata->write());
+ }
- guard.Dismiss();
+ guard.Dismiss();
- _supportsDocLocking = _storageEngine->supportsDocLocking();
- }
+ _supportsDocLocking = _storageEngine->supportsDocLocking();
+}
- void ServiceContextMongoD::shutdownGlobalStorageEngineCleanly() {
- invariant(_storageEngine);
- invariant(_lockFile.get());
- _storageEngine->cleanShutdown();
- _lockFile->clearPidAndUnlock();
- }
+void ServiceContextMongoD::shutdownGlobalStorageEngineCleanly() {
+ invariant(_storageEngine);
+ invariant(_lockFile.get());
+ _storageEngine->cleanShutdown();
+ _lockFile->clearPidAndUnlock();
+}
- void ServiceContextMongoD::registerStorageEngine(const std::string& name,
- const StorageEngine::Factory* factory) {
- // No double-registering.
- invariant(0 == _storageFactories.count(name));
+void ServiceContextMongoD::registerStorageEngine(const std::string& name,
+ const StorageEngine::Factory* factory) {
+ // No double-registering.
+ invariant(0 == _storageFactories.count(name));
- // Some sanity checks: the factory must exist,
- invariant(factory);
+ // Some sanity checks: the factory must exist,
+ invariant(factory);
- // and all factories should be added before we pick a storage engine.
- invariant(NULL == _storageEngine);
+ // and all factories should be added before we pick a storage engine.
+ invariant(NULL == _storageEngine);
- _storageFactories[name] = factory;
- }
+ _storageFactories[name] = factory;
+}
- bool ServiceContextMongoD::isRegisteredStorageEngine(const std::string& name) {
- return _storageFactories.count(name);
- }
+bool ServiceContextMongoD::isRegisteredStorageEngine(const std::string& name) {
+ return _storageFactories.count(name);
+}
- StorageFactoriesIterator* ServiceContextMongoD::makeStorageFactoriesIterator() {
- return new StorageFactoriesIteratorMongoD(_storageFactories.begin(),
- _storageFactories.end());
- }
+StorageFactoriesIterator* ServiceContextMongoD::makeStorageFactoriesIterator() {
+ return new StorageFactoriesIteratorMongoD(_storageFactories.begin(), _storageFactories.end());
+}
- StorageFactoriesIteratorMongoD::StorageFactoriesIteratorMongoD(
- const ServiceContextMongoD::FactoryMap::const_iterator& begin,
- const ServiceContextMongoD::FactoryMap::const_iterator& end) :
- _curr(begin), _end(end) {
- }
+StorageFactoriesIteratorMongoD::StorageFactoriesIteratorMongoD(
+ const ServiceContextMongoD::FactoryMap::const_iterator& begin,
+ const ServiceContextMongoD::FactoryMap::const_iterator& end)
+ : _curr(begin), _end(end) {}
- bool StorageFactoriesIteratorMongoD::more() const {
- return _curr != _end;
- }
+bool StorageFactoriesIteratorMongoD::more() const {
+ return _curr != _end;
+}
- const StorageEngine::Factory* StorageFactoriesIteratorMongoD::next() {
- return _curr++->second;
- }
+const StorageEngine::Factory* StorageFactoriesIteratorMongoD::next() {
+ return _curr++->second;
+}
- void ServiceContextMongoD::setKillAllOperations() {
- stdx::lock_guard<stdx::mutex> clientLock(_mutex);
- _globalKill = true;
- for (const auto listener : _killOpListeners) {
- try {
- listener->interruptAll();
- }
- catch (...) {
- std::terminate();
- }
+void ServiceContextMongoD::setKillAllOperations() {
+ stdx::lock_guard<stdx::mutex> clientLock(_mutex);
+ _globalKill = true;
+ for (const auto listener : _killOpListeners) {
+ try {
+ listener->interruptAll();
+ } catch (...) {
+ std::terminate();
}
}
+}
- bool ServiceContextMongoD::getKillAllOperations() {
- return _globalKill;
- }
+bool ServiceContextMongoD::getKillAllOperations() {
+ return _globalKill;
+}
- bool ServiceContextMongoD::_killOperationsAssociatedWithClientAndOpId_inlock(
- Client* client, unsigned int opId) {
- OperationContext* opCtx = client->getOperationContext();
- if (!opCtx) {
- return false;
- }
- if (opCtx->getOpID() != opId) {
- return false;
- }
- _killOperation_inlock(opCtx);
- return true;
+bool ServiceContextMongoD::_killOperationsAssociatedWithClientAndOpId_inlock(Client* client,
+ unsigned int opId) {
+ OperationContext* opCtx = client->getOperationContext();
+ if (!opCtx) {
+ return false;
+ }
+ if (opCtx->getOpID() != opId) {
+ return false;
}
+ _killOperation_inlock(opCtx);
+ return true;
+}
- void ServiceContextMongoD::_killOperation_inlock(OperationContext* opCtx) {
- opCtx->markKilled();
+void ServiceContextMongoD::_killOperation_inlock(OperationContext* opCtx) {
+ opCtx->markKilled();
- for (const auto listener : _killOpListeners) {
- try {
- listener->interrupt(opCtx->getOpID());
- }
- catch (...) {
- std::terminate();
- }
+ for (const auto listener : _killOpListeners) {
+ try {
+ listener->interrupt(opCtx->getOpID());
+ } catch (...) {
+ std::terminate();
}
}
-
- bool ServiceContextMongoD::killOperation(unsigned int opId) {
- for (LockedClientsCursor cursor(this); Client* client = cursor.next();) {
- stdx::lock_guard<Client> lk(*client);
- bool found = _killOperationsAssociatedWithClientAndOpId_inlock(client, opId);
- if (found) {
- return true;
- }
+}
+
+bool ServiceContextMongoD::killOperation(unsigned int opId) {
+ for (LockedClientsCursor cursor(this); Client* client = cursor.next();) {
+ stdx::lock_guard<Client> lk(*client);
+ bool found = _killOperationsAssociatedWithClientAndOpId_inlock(client, opId);
+ if (found) {
+ return true;
}
-
- return false;
}
- void ServiceContextMongoD::killAllUserOperations(const OperationContext* txn) {
- for (LockedClientsCursor cursor(this); Client* client = cursor.next();) {
- if (!client->isFromUserConnection()) {
- // Don't kill system operations.
- continue;
- }
+ return false;
+}
- stdx::lock_guard<Client> lk(*client);
- OperationContext* toKill = client->getOperationContext();
- if (!toKill) {
- continue;
- }
+void ServiceContextMongoD::killAllUserOperations(const OperationContext* txn) {
+ for (LockedClientsCursor cursor(this); Client* client = cursor.next();) {
+ if (!client->isFromUserConnection()) {
+ // Don't kill system operations.
+ continue;
+ }
- if (toKill->getOpID() == txn->getOpID()) {
- // Don't kill ourself.
- continue;
- }
+ stdx::lock_guard<Client> lk(*client);
+ OperationContext* toKill = client->getOperationContext();
+ if (!toKill) {
+ continue;
+ }
- _killOperation_inlock(toKill);
+ if (toKill->getOpID() == txn->getOpID()) {
+ // Don't kill ourself.
+ continue;
}
- }
- void ServiceContextMongoD::unsetKillAllOperations() {
- _globalKill = false;
+ _killOperation_inlock(toKill);
}
+}
- void ServiceContextMongoD::registerKillOpListener(KillOpListenerInterface* listener) {
- stdx::lock_guard<stdx::mutex> clientLock(_mutex);
- _killOpListeners.push_back(listener);
- }
+void ServiceContextMongoD::unsetKillAllOperations() {
+ _globalKill = false;
+}
- std::unique_ptr<OperationContext> ServiceContextMongoD::_newOpCtx(Client* client) {
- invariant(&cc() == client);
- return stdx::make_unique<OperationContextImpl>();
- }
+void ServiceContextMongoD::registerKillOpListener(KillOpListenerInterface* listener) {
+ stdx::lock_guard<stdx::mutex> clientLock(_mutex);
+ _killOpListeners.push_back(listener);
+}
- void ServiceContextMongoD::setOpObserver(std::unique_ptr<OpObserver> opObserver) {
- _opObserver.reset(opObserver.get());
- }
+std::unique_ptr<OperationContext> ServiceContextMongoD::_newOpCtx(Client* client) {
+ invariant(&cc() == client);
+ return stdx::make_unique<OperationContextImpl>();
+}
- OpObserver* ServiceContextMongoD::getOpObserver() {
- return _opObserver.get();
- }
+void ServiceContextMongoD::setOpObserver(std::unique_ptr<OpObserver> opObserver) {
+ _opObserver.reset(opObserver.get());
+}
+
+OpObserver* ServiceContextMongoD::getOpObserver() {
+ return _opObserver.get();
+}
} // namespace mongo
diff --git a/src/mongo/db/service_context_d.h b/src/mongo/db/service_context_d.h
index 5ec8b7f6ecc..0c560ff17f4 100644
--- a/src/mongo/db/service_context_d.h
+++ b/src/mongo/db/service_context_d.h
@@ -35,98 +35,95 @@
namespace mongo {
- class Client;
- class StorageEngineLockFile;
+class Client;
+class StorageEngineLockFile;
- class ServiceContextMongoD final : public ServiceContext {
- public:
- typedef std::map<std::string, const StorageEngine::Factory*> FactoryMap;
+class ServiceContextMongoD final : public ServiceContext {
+public:
+ typedef std::map<std::string, const StorageEngine::Factory*> FactoryMap;
- ServiceContextMongoD();
+ ServiceContextMongoD();
- ~ServiceContextMongoD();
+ ~ServiceContextMongoD();
- StorageEngine* getGlobalStorageEngine() override;
+ StorageEngine* getGlobalStorageEngine() override;
- void initializeGlobalStorageEngine() override;
+ void initializeGlobalStorageEngine() override;
- void shutdownGlobalStorageEngineCleanly() override;
+ void shutdownGlobalStorageEngineCleanly() override;
- void registerStorageEngine(const std::string& name,
- const StorageEngine::Factory* factory) override;
+ void registerStorageEngine(const std::string& name,
+ const StorageEngine::Factory* factory) override;
- bool isRegisteredStorageEngine(const std::string& name) override;
+ bool isRegisteredStorageEngine(const std::string& name) override;
- StorageFactoriesIterator* makeStorageFactoriesIterator() override;
+ StorageFactoriesIterator* makeStorageFactoriesIterator() override;
- void setKillAllOperations() override;
+ void setKillAllOperations() override;
- void unsetKillAllOperations() override;
+ void unsetKillAllOperations() override;
- bool getKillAllOperations() override;
+ bool getKillAllOperations() override;
- bool killOperation(unsigned int opId) override;
+ bool killOperation(unsigned int opId) override;
- void killAllUserOperations(const OperationContext* txn) override;
+ void killAllUserOperations(const OperationContext* txn) override;
- void registerKillOpListener(KillOpListenerInterface* listener) override;
+ void registerKillOpListener(KillOpListenerInterface* listener) override;
- void setOpObserver(std::unique_ptr<OpObserver> opObserver) override;
+ void setOpObserver(std::unique_ptr<OpObserver> opObserver) override;
- OpObserver* getOpObserver() override;
+ OpObserver* getOpObserver() override;
- private:
+private:
+ std::unique_ptr<OperationContext> _newOpCtx(Client* client) override;
- std::unique_ptr<OperationContext> _newOpCtx(Client* client) override;
+ /**
+ * Kills the active operation on "client" if that operation is associated with operation id
+ * "opId".
+ *
+ * Returns true if an operation was killed.
+ *
+ * Must only be called by a thread owning both this service context's mutex and the
+ * client's.
+ */
+ bool _killOperationsAssociatedWithClientAndOpId_inlock(Client* client, unsigned int opId);
- /**
- * Kills the active operation on "client" if that operation is associated with operation id
- * "opId".
- *
- * Returns true if an operation was killed.
- *
- * Must only be called by a thread owning both this service context's mutex and the
- * client's.
- */
- bool _killOperationsAssociatedWithClientAndOpId_inlock(Client* client, unsigned int opId);
+ /**
+ * Kills the given operation.
+ *
+ * Caller must own the service context's _mutex.
+ */
+ void _killOperation_inlock(OperationContext* opCtx);
- /**
- * Kills the given operation.
- *
- * Caller must own the service context's _mutex.
- */
- void _killOperation_inlock(OperationContext* opCtx);
+ bool _globalKill;
- bool _globalKill;
+ // protected by parent class's _mutex
+ std::vector<KillOpListenerInterface*> _killOpListeners;
- // protected by parent class's _mutex
- std::vector<KillOpListenerInterface*> _killOpListeners;
+ std::unique_ptr<StorageEngineLockFile> _lockFile;
- std::unique_ptr<StorageEngineLockFile> _lockFile;
+ // logically owned here, but never deleted by anyone.
+ StorageEngine* _storageEngine;
- // logically owned here, but never deleted by anyone.
- StorageEngine* _storageEngine;
+ // logically owned here.
+ std::unique_ptr<OpObserver> _opObserver;
- // logically owned here.
- std::unique_ptr<OpObserver> _opObserver;
+ // All possible storage engines are registered here through MONGO_INIT.
+ FactoryMap _storageFactories;
+};
- // All possible storage engines are registered here through MONGO_INIT.
- FactoryMap _storageFactories;
- };
+class StorageFactoriesIteratorMongoD : public StorageFactoriesIterator {
+public:
+ typedef ServiceContextMongoD::FactoryMap::const_iterator FactoryMapIterator;
+ StorageFactoriesIteratorMongoD(const FactoryMapIterator& begin, const FactoryMapIterator& end);
- class StorageFactoriesIteratorMongoD : public StorageFactoriesIterator {
- public:
+ virtual bool more() const;
+ virtual const StorageEngine::Factory* next();
- typedef ServiceContextMongoD::FactoryMap::const_iterator FactoryMapIterator;
- StorageFactoriesIteratorMongoD(const FactoryMapIterator& begin,
- const FactoryMapIterator& end);
-
- virtual bool more() const;
- virtual const StorageEngine::Factory* next();
-
- private:
- FactoryMapIterator _curr;
- FactoryMapIterator _end;
- };
+private:
+ FactoryMapIterator _curr;
+ FactoryMapIterator _end;
+};
} // namespace mongo
diff --git a/src/mongo/db/service_context_noop.cpp b/src/mongo/db/service_context_noop.cpp
index d4ad381172c..65184906442 100644
--- a/src/mongo/db/service_context_noop.cpp
+++ b/src/mongo/db/service_context_noop.cpp
@@ -36,60 +36,60 @@
namespace mongo {
- StorageEngine* ServiceContextNoop::getGlobalStorageEngine() {
- return NULL;
- }
+StorageEngine* ServiceContextNoop::getGlobalStorageEngine() {
+ return NULL;
+}
- void ServiceContextNoop::initializeGlobalStorageEngine() {
- }
+void ServiceContextNoop::initializeGlobalStorageEngine() {}
- void ServiceContextNoop::shutdownGlobalStorageEngineCleanly() {
- }
+void ServiceContextNoop::shutdownGlobalStorageEngineCleanly() {}
- void ServiceContextNoop::registerStorageEngine(const std::string& name,
- const StorageEngine::Factory* factory) {
- // Takes ownership of 'factory' and deletes it because we don't need it.
- delete factory;
- }
+void ServiceContextNoop::registerStorageEngine(const std::string& name,
+ const StorageEngine::Factory* factory) {
+ // Takes ownership of 'factory' and deletes it because we don't need it.
+ delete factory;
+}
- bool ServiceContextNoop::isRegisteredStorageEngine(const std::string& name) {
- return false;
- }
+bool ServiceContextNoop::isRegisteredStorageEngine(const std::string& name) {
+ return false;
+}
- StorageFactoriesIterator* ServiceContextNoop::makeStorageFactoriesIterator() {
- class EmptySFI : public StorageFactoriesIterator {
- public:
- virtual bool more() const { return false; }
- virtual const StorageEngine::Factory* next() { invariant(false); }
- };
- return new EmptySFI();
- }
+StorageFactoriesIterator* ServiceContextNoop::makeStorageFactoriesIterator() {
+ class EmptySFI : public StorageFactoriesIterator {
+ public:
+ virtual bool more() const {
+ return false;
+ }
+ virtual const StorageEngine::Factory* next() {
+ invariant(false);
+ }
+ };
+ return new EmptySFI();
+}
- void ServiceContextNoop::setKillAllOperations() { }
+void ServiceContextNoop::setKillAllOperations() {}
- void ServiceContextNoop::unsetKillAllOperations() { }
+void ServiceContextNoop::unsetKillAllOperations() {}
- bool ServiceContextNoop::getKillAllOperations() {
- return false;
- }
+bool ServiceContextNoop::getKillAllOperations() {
+ return false;
+}
- bool ServiceContextNoop::killOperation(unsigned int opId) {
- return false;
- }
+bool ServiceContextNoop::killOperation(unsigned int opId) {
+ return false;
+}
- void ServiceContextNoop::killAllUserOperations(const OperationContext* txn) {}
+void ServiceContextNoop::killAllUserOperations(const OperationContext* txn) {}
- void ServiceContextNoop::registerKillOpListener(KillOpListenerInterface* listener) {
- }
+void ServiceContextNoop::registerKillOpListener(KillOpListenerInterface* listener) {}
- std::unique_ptr<OperationContext> ServiceContextNoop::_newOpCtx(Client* client) {
- return stdx::make_unique<OperationContextNoop>(client, _nextOpId.fetchAndAdd(1));
- }
+std::unique_ptr<OperationContext> ServiceContextNoop::_newOpCtx(Client* client) {
+ return stdx::make_unique<OperationContextNoop>(client, _nextOpId.fetchAndAdd(1));
+}
- void ServiceContextNoop::setOpObserver(std::unique_ptr<OpObserver> opObserver) {
- }
+void ServiceContextNoop::setOpObserver(std::unique_ptr<OpObserver> opObserver) {}
- OpObserver* ServiceContextNoop::getOpObserver() {
- return nullptr;
- }
+OpObserver* ServiceContextNoop::getOpObserver() {
+ return nullptr;
+}
} // namespace mongo
diff --git a/src/mongo/db/service_context_noop.h b/src/mongo/db/service_context_noop.h
index 8cf85865f21..0e74b61b7a2 100644
--- a/src/mongo/db/service_context_noop.h
+++ b/src/mongo/db/service_context_noop.h
@@ -32,41 +32,41 @@
namespace mongo {
- class ServiceContextNoop final : public ServiceContext {
- public:
- StorageEngine* getGlobalStorageEngine() override;
+class ServiceContextNoop final : public ServiceContext {
+public:
+ StorageEngine* getGlobalStorageEngine() override;
- void initializeGlobalStorageEngine() override;
+ void initializeGlobalStorageEngine() override;
- void shutdownGlobalStorageEngineCleanly() override;
+ void shutdownGlobalStorageEngineCleanly() override;
- void registerStorageEngine(const std::string& name,
- const StorageEngine::Factory* factory) override;
+ void registerStorageEngine(const std::string& name,
+ const StorageEngine::Factory* factory) override;
- bool isRegisteredStorageEngine(const std::string& name) override;
+ bool isRegisteredStorageEngine(const std::string& name) override;
- StorageFactoriesIterator* makeStorageFactoriesIterator() override;
+ StorageFactoriesIterator* makeStorageFactoriesIterator() override;
- bool killOperation(unsigned int opId) override;
+ bool killOperation(unsigned int opId) override;
- void killAllUserOperations(const OperationContext* txn) override;
+ void killAllUserOperations(const OperationContext* txn) override;
- void setKillAllOperations() override;
+ void setKillAllOperations() override;
- void unsetKillAllOperations() override;
+ void unsetKillAllOperations() override;
- bool getKillAllOperations() override;
+ bool getKillAllOperations() override;
- void registerKillOpListener(KillOpListenerInterface* listener) override;
+ void registerKillOpListener(KillOpListenerInterface* listener) override;
- std::unique_ptr<OperationContext> _newOpCtx(Client* client) override;
+ std::unique_ptr<OperationContext> _newOpCtx(Client* client) override;
- void setOpObserver(std::unique_ptr<OpObserver> opObserver) override;
+ void setOpObserver(std::unique_ptr<OpObserver> opObserver) override;
- OpObserver* getOpObserver() override;
+ OpObserver* getOpObserver() override;
- private:
- AtomicUInt32 _nextOpId{1};
- };
+private:
+ AtomicUInt32 _nextOpId{1};
+};
} // namespace mongo
diff --git a/src/mongo/db/sorter/sorter.cpp b/src/mongo/db/sorter/sorter.cpp
index 17444f24d9d..e589c9a8001 100644
--- a/src/mongo/db/sorter/sorter.cpp
+++ b/src/mongo/db/sorter/sorter.cpp
@@ -63,831 +63,851 @@
#include "mongo/util/unowned_ptr.h"
namespace mongo {
- namespace sorter {
+namespace sorter {
- using std::shared_ptr;
- using namespace mongoutils;
+using std::shared_ptr;
+using namespace mongoutils;
- // We need to use the "real" errno everywhere, not GetLastError() on Windows
- inline std::string myErrnoWithDescription() {
- int errnoCopy = errno;
- StringBuilder sb;
- sb << "errno:" << errnoCopy << ' ' << strerror(errnoCopy);
- return sb.str();
- }
+// We need to use the "real" errno everywhere, not GetLastError() on Windows
+inline std::string myErrnoWithDescription() {
+ int errnoCopy = errno;
+ StringBuilder sb;
+ sb << "errno:" << errnoCopy << ' ' << strerror(errnoCopy);
+ return sb.str();
+}
- template<typename Data, typename Comparator>
- void compIsntSane(const Comparator& comp, const Data& lhs, const Data& rhs) {
- PRINT(typeid(comp).name());
- PRINT(lhs.first);
- PRINT(lhs.second);
- PRINT(rhs.first);
- PRINT(rhs.second);
- PRINT(comp(lhs, rhs));
- PRINT(comp(rhs, lhs));
- dassert(false);
- }
+template <typename Data, typename Comparator>
+void compIsntSane(const Comparator& comp, const Data& lhs, const Data& rhs) {
+ PRINT(typeid(comp).name());
+ PRINT(lhs.first);
+ PRINT(lhs.second);
+ PRINT(rhs.first);
+ PRINT(rhs.second);
+ PRINT(comp(lhs, rhs));
+ PRINT(comp(rhs, lhs));
+ dassert(false);
+}
- template<typename Data, typename Comparator>
- void dassertCompIsSane(const Comparator& comp, const Data& lhs, const Data& rhs) {
+template <typename Data, typename Comparator>
+void dassertCompIsSane(const Comparator& comp, const Data& lhs, const Data& rhs) {
#if defined(MONGO_CONFIG_DEBUG_BUILD) && !defined(_MSC_VER)
- // MSVC++ already does similar verification in debug mode in addition to using
- // algorithms that do more comparisons. Doing our own verification in addition makes
- // debug builds considerably slower without any additional safety.
-
- // test reversed comparisons
- const int regular = comp(lhs, rhs);
- if (regular == 0) {
- if (!(comp(rhs, lhs) == 0)) compIsntSane(comp, lhs, rhs);
- } else if (regular < 0) {
- if (!(comp(rhs, lhs) > 0)) compIsntSane(comp, lhs, rhs);
- } else /*regular > 0*/ {
- if (!(comp(rhs, lhs) < 0)) compIsntSane(comp, lhs, rhs);
- }
+ // MSVC++ already does similar verification in debug mode in addition to using
+ // algorithms that do more comparisons. Doing our own verification in addition makes
+ // debug builds considerably slower without any additional safety.
+
+ // test reversed comparisons
+ const int regular = comp(lhs, rhs);
+ if (regular == 0) {
+ if (!(comp(rhs, lhs) == 0))
+ compIsntSane(comp, lhs, rhs);
+ } else if (regular < 0) {
+ if (!(comp(rhs, lhs) > 0))
+ compIsntSane(comp, lhs, rhs);
+ } else /*regular > 0*/ {
+ if (!(comp(rhs, lhs) < 0))
+ compIsntSane(comp, lhs, rhs);
+ }
- // test reflexivity
- if (!(comp(lhs, lhs) == 0)) compIsntSane(comp, lhs, lhs);
- if (!(comp(rhs, rhs) == 0)) compIsntSane(comp, rhs, rhs);
+ // test reflexivity
+ if (!(comp(lhs, lhs) == 0))
+ compIsntSane(comp, lhs, lhs);
+ if (!(comp(rhs, rhs) == 0))
+ compIsntSane(comp, rhs, rhs);
#endif
- }
+}
- /** Ensures a named file is deleted when this object goes out of scope */
- class FileDeleter {
- public:
- FileDeleter(const std::string& fileName) :_fileName(fileName) {}
- ~FileDeleter() {
- DESTRUCTOR_GUARD(
- boost::filesystem::remove(_fileName);
- )
- }
- private:
- const std::string _fileName;
- };
-
- /** Returns results from sorted in-memory storage */
- template <typename Key, typename Value>
- class InMemIterator : public SortIteratorInterface<Key, Value> {
- public:
- typedef std::pair<Key, Value> Data;
-
- /// No data to iterate
- InMemIterator() {}
-
- /// Only a single value
- InMemIterator(const Data& singleValue) :_data(1, singleValue) {}
-
- /// Any number of values
- template <typename Container>
- InMemIterator(const Container& input) :_data(input.begin(), input.end()) {}
-
- bool more() { return !_data.empty(); }
- Data next() {
- Data out = _data.front();
- _data.pop_front();
- return out;
- }
+/** Ensures a named file is deleted when this object goes out of scope */
+class FileDeleter {
+public:
+ FileDeleter(const std::string& fileName) : _fileName(fileName) {}
+ ~FileDeleter() {
+ DESTRUCTOR_GUARD(boost::filesystem::remove(_fileName);)
+ }
- private:
- std::deque<Data> _data;
- };
-
- /** Returns results in order from a single file */
- template <typename Key, typename Value>
- class FileIterator : public SortIteratorInterface<Key, Value> {
- public:
- typedef std::pair<typename Key::SorterDeserializeSettings
- ,typename Value::SorterDeserializeSettings
- > Settings;
- typedef std::pair<Key, Value> Data;
-
- FileIterator(const std::string& fileName,
- const Settings& settings,
- std::shared_ptr<FileDeleter> fileDeleter)
- : _settings(settings)
- , _done(false)
- , _fileName(fileName)
- , _fileDeleter(fileDeleter)
- , _file(_fileName.c_str(), std::ios::in | std::ios::binary)
- {
- massert(16814, str::stream() << "error opening file \"" << _fileName << "\": "
- << myErrnoWithDescription(),
- _file.good());
-
- massert(16815, str::stream() << "unexpected empty file: " << _fileName,
- boost::filesystem::file_size(_fileName) != 0);
- }
+private:
+ const std::string _fileName;
+};
- bool more() {
- if (!_done)
- fillIfNeeded(); // may change _done
- return !_done;
- }
+/** Returns results from sorted in-memory storage */
+template <typename Key, typename Value>
+class InMemIterator : public SortIteratorInterface<Key, Value> {
+public:
+ typedef std::pair<Key, Value> Data;
- Data next() {
- verify(!_done);
- fillIfNeeded();
+ /// No data to iterate
+ InMemIterator() {}
- Data out;
- // Note: key must be read before value so can't pass directly to Data constructor
- out.first = Key::deserializeForSorter(*_reader, _settings.first);
- out.second = Value::deserializeForSorter(*_reader, _settings.second);
- return out;
- }
+ /// Only a single value
+ InMemIterator(const Data& singleValue) : _data(1, singleValue) {}
- private:
- void fillIfNeeded() {
- verify(!_done);
+ /// Any number of values
+ template <typename Container>
+ InMemIterator(const Container& input)
+ : _data(input.begin(), input.end()) {}
- if (!_reader || _reader->atEof())
- fill();
- }
+ bool more() {
+ return !_data.empty();
+ }
+ Data next() {
+ Data out = _data.front();
+ _data.pop_front();
+ return out;
+ }
+
+private:
+ std::deque<Data> _data;
+};
+
+/** Returns results in order from a single file */
+template <typename Key, typename Value>
+class FileIterator : public SortIteratorInterface<Key, Value> {
+public:
+ typedef std::pair<typename Key::SorterDeserializeSettings,
+ typename Value::SorterDeserializeSettings> Settings;
+ typedef std::pair<Key, Value> Data;
+
+ FileIterator(const std::string& fileName,
+ const Settings& settings,
+ std::shared_ptr<FileDeleter> fileDeleter)
+ : _settings(settings),
+ _done(false),
+ _fileName(fileName),
+ _fileDeleter(fileDeleter),
+ _file(_fileName.c_str(), std::ios::in | std::ios::binary) {
+ massert(16814,
+ str::stream() << "error opening file \"" << _fileName
+ << "\": " << myErrnoWithDescription(),
+ _file.good());
- void fill() {
- int32_t rawSize;
- read(&rawSize, sizeof(rawSize));
- if (_done) return;
+ massert(16815,
+ str::stream() << "unexpected empty file: " << _fileName,
+ boost::filesystem::file_size(_fileName) != 0);
+ }
- // negative size means compressed
- const bool compressed = rawSize < 0;
- const int32_t blockSize = std::abs(rawSize);
+ bool more() {
+ if (!_done)
+ fillIfNeeded(); // may change _done
+ return !_done;
+ }
- _buffer.reset(new char[blockSize]);
- read(_buffer.get(), blockSize);
- massert(16816, "file too short?", !_done);
+ Data next() {
+ verify(!_done);
+ fillIfNeeded();
- if (!compressed) {
- _reader.reset(new BufReader(_buffer.get(), blockSize));
- return;
- }
+ Data out;
+ // Note: key must be read before value so can't pass directly to Data constructor
+ out.first = Key::deserializeForSorter(*_reader, _settings.first);
+ out.second = Value::deserializeForSorter(*_reader, _settings.second);
+ return out;
+ }
- dassert(snappy::IsValidCompressedBuffer(_buffer.get(), blockSize));
+private:
+ void fillIfNeeded() {
+ verify(!_done);
- size_t uncompressedSize;
- massert(17061, "couldn't get uncompressed length",
- snappy::GetUncompressedLength(_buffer.get(), blockSize, &uncompressedSize));
+ if (!_reader || _reader->atEof())
+ fill();
+ }
- std::unique_ptr<char[]> decompressionBuffer(new char[uncompressedSize]);
- massert(17062, "decompression failed",
- snappy::RawUncompress(_buffer.get(),
- blockSize,
- decompressionBuffer.get()));
+ void fill() {
+ int32_t rawSize;
+ read(&rawSize, sizeof(rawSize));
+ if (_done)
+ return;
- // hold on to decompressed data and throw out compressed data at block exit
- _buffer.swap(decompressionBuffer);
- _reader.reset(new BufReader(_buffer.get(), uncompressedSize));
- }
+ // negative size means compressed
+ const bool compressed = rawSize < 0;
+ const int32_t blockSize = std::abs(rawSize);
- // sets _done to true on EOF - asserts on any other error
- void read(void* out, size_t size) {
- _file.read(reinterpret_cast<char*>(out), size);
- if (!_file.good()) {
- if (_file.eof()) {
- _done = true;
- return;
- }
-
- msgasserted(16817, str::stream() << "error reading file \""
- << _fileName << "\": "
- << myErrnoWithDescription());
- }
- verify(_file.gcount() == static_cast<std::streamsize>(size));
- }
+ _buffer.reset(new char[blockSize]);
+ read(_buffer.get(), blockSize);
+ massert(16816, "file too short?", !_done);
- const Settings _settings;
- bool _done;
- std::unique_ptr<char[]> _buffer;
- std::unique_ptr<BufReader> _reader;
- std::string _fileName;
- std::shared_ptr<FileDeleter> _fileDeleter; // Must outlive _file
- std::ifstream _file;
- };
-
- /** Merge-sorts results from 0 or more FileIterators */
- template <typename Key, typename Value, typename Comparator>
- class MergeIterator : public SortIteratorInterface<Key, Value> {
- public:
- typedef SortIteratorInterface<Key, Value> Input;
- typedef std::pair<Key, Value> Data;
-
-
- MergeIterator(const std::vector<std::shared_ptr<Input> >& iters,
- const SortOptions& opts,
- const Comparator& comp)
- : _opts(opts)
- , _remaining(opts.limit ? opts.limit : std::numeric_limits<unsigned long long>::max())
- , _first(true)
- , _greater(comp)
- {
- for (size_t i = 0; i < iters.size(); i++) {
- if (iters[i]->more()) {
- _heap.push_back(
- std::make_shared<Stream>(i, iters[i]->next(), iters[i]));
- }
- }
-
- if (_heap.empty()) {
- _remaining = 0;
- return;
- }
-
- std::make_heap(_heap.begin(), _heap.end(), _greater);
- std::pop_heap(_heap.begin(), _heap.end(), _greater);
- _current = _heap.back();
- _heap.pop_back();
- }
+ if (!compressed) {
+ _reader.reset(new BufReader(_buffer.get(), blockSize));
+ return;
+ }
- bool more() {
- if (_remaining > 0 && (_first || !_heap.empty() || _current->more()))
- return true;
+ dassert(snappy::IsValidCompressedBuffer(_buffer.get(), blockSize));
- // We are done so clean up resources.
- // Can't do this in next() due to lifetime guarantees of unowned Data.
- _heap.clear();
- _current.reset();
- _remaining = 0;
+ size_t uncompressedSize;
+ massert(17061,
+ "couldn't get uncompressed length",
+ snappy::GetUncompressedLength(_buffer.get(), blockSize, &uncompressedSize));
- return false;
+ std::unique_ptr<char[]> decompressionBuffer(new char[uncompressedSize]);
+ massert(17062,
+ "decompression failed",
+ snappy::RawUncompress(_buffer.get(), blockSize, decompressionBuffer.get()));
+
+ // hold on to decompressed data and throw out compressed data at block exit
+ _buffer.swap(decompressionBuffer);
+ _reader.reset(new BufReader(_buffer.get(), uncompressedSize));
+ }
+
+ // sets _done to true on EOF - asserts on any other error
+ void read(void* out, size_t size) {
+ _file.read(reinterpret_cast<char*>(out), size);
+ if (!_file.good()) {
+ if (_file.eof()) {
+ _done = true;
+ return;
}
- Data next() {
- verify(_remaining);
+ msgasserted(16817,
+ str::stream() << "error reading file \"" << _fileName
+ << "\": " << myErrnoWithDescription());
+ }
+ verify(_file.gcount() == static_cast<std::streamsize>(size));
+ }
- _remaining--;
+ const Settings _settings;
+ bool _done;
+ std::unique_ptr<char[]> _buffer;
+ std::unique_ptr<BufReader> _reader;
+ std::string _fileName;
+ std::shared_ptr<FileDeleter> _fileDeleter; // Must outlive _file
+ std::ifstream _file;
+};
+
+/** Merge-sorts results from 0 or more FileIterators */
+template <typename Key, typename Value, typename Comparator>
+class MergeIterator : public SortIteratorInterface<Key, Value> {
+public:
+ typedef SortIteratorInterface<Key, Value> Input;
+ typedef std::pair<Key, Value> Data;
+
+
+ MergeIterator(const std::vector<std::shared_ptr<Input>>& iters,
+ const SortOptions& opts,
+ const Comparator& comp)
+ : _opts(opts),
+ _remaining(opts.limit ? opts.limit : std::numeric_limits<unsigned long long>::max()),
+ _first(true),
+ _greater(comp) {
+ for (size_t i = 0; i < iters.size(); i++) {
+ if (iters[i]->more()) {
+ _heap.push_back(std::make_shared<Stream>(i, iters[i]->next(), iters[i]));
+ }
+ }
- if (_first) {
- _first = false;
- return _current->current();
- }
+ if (_heap.empty()) {
+ _remaining = 0;
+ return;
+ }
- if (!_current->advance()) {
- verify(!_heap.empty());
+ std::make_heap(_heap.begin(), _heap.end(), _greater);
+ std::pop_heap(_heap.begin(), _heap.end(), _greater);
+ _current = _heap.back();
+ _heap.pop_back();
+ }
- std::pop_heap(_heap.begin(), _heap.end(), _greater);
- _current = _heap.back();
- _heap.pop_back();
- } else if (!_heap.empty() && _greater(_current, _heap.front())) {
- std::pop_heap(_heap.begin(), _heap.end(), _greater);
- std::swap(_current, _heap.back());
- std::push_heap(_heap.begin(), _heap.end(), _greater);
- }
+ bool more() {
+ if (_remaining > 0 && (_first || !_heap.empty() || _current->more()))
+ return true;
- return _current->current();
- }
+ // We are done so clean up resources.
+ // Can't do this in next() due to lifetime guarantees of unowned Data.
+ _heap.clear();
+ _current.reset();
+ _remaining = 0;
+ return false;
+ }
- private:
- class Stream { // Data + Iterator
- public:
- Stream(size_t fileNum, const Data& first, std::shared_ptr<Input> rest)
- : fileNum(fileNum)
- , _current(first)
- , _rest(rest)
- {}
-
- const Data& current() const { return _current; }
- bool more() { return _rest->more(); }
- bool advance() {
- if (!_rest->more())
- return false;
-
- _current = _rest->next();
- return true;
- }
-
- const size_t fileNum;
- private:
- Data _current;
- std::shared_ptr<Input> _rest;
- };
-
- class STLComparator { // uses greater rather than less-than to maintain a MinHeap
- public:
- explicit STLComparator(const Comparator& comp) : _comp(comp) {}
- bool operator () (unowned_ptr<const Stream> lhs,
- unowned_ptr<const Stream> rhs) const {
- // first compare data
- dassertCompIsSane(_comp, lhs->current(), rhs->current());
- int ret = _comp(lhs->current(), rhs->current());
- if (ret)
- return ret > 0;
-
- // then compare fileNums to ensure stability
- return lhs->fileNum > rhs->fileNum;
- }
- private:
- const Comparator _comp;
- };
-
- SortOptions _opts;
- unsigned long long _remaining;
- bool _first;
- std::shared_ptr<Stream> _current;
- std::vector<std::shared_ptr<Stream> > _heap; // MinHeap
- STLComparator _greater; // named so calls make sense
- };
-
- template <typename Key, typename Value, typename Comparator>
- class NoLimitSorter : public Sorter<Key, Value> {
- public:
- typedef std::pair<Key, Value> Data;
- typedef SortIteratorInterface<Key, Value> Iterator;
- typedef std::pair<typename Key::SorterDeserializeSettings
- ,typename Value::SorterDeserializeSettings
- > Settings;
-
- NoLimitSorter(const SortOptions& opts,
- const Comparator& comp,
- const Settings& settings = Settings())
- : _comp(comp)
- , _settings(settings)
- , _opts(opts)
- , _memUsed(0)
- { verify(_opts.limit == 0); }
-
- void add(const Key& key, const Value& val) {
- _data.push_back(std::make_pair(key, val));
-
- _memUsed += key.memUsageForSorter();
- _memUsed += val.memUsageForSorter();
-
- if (_memUsed > _opts.maxMemoryUsageBytes)
- spill();
- }
+ Data next() {
+ verify(_remaining);
- Iterator* done() {
- if (_iters.empty()) {
- sort();
- return new InMemIterator<Key, Value>(_data);
- }
+ _remaining--;
- spill();
- return Iterator::merge(_iters, _opts, _comp);
- }
+ if (_first) {
+ _first = false;
+ return _current->current();
+ }
- // TEMP these are here for compatibility. Will be replaced with a general stats API
- int numFiles() const { return _iters.size(); }
- size_t memUsed() const { return _memUsed; }
-
- private:
- class STLComparator {
- public:
- explicit STLComparator(const Comparator& comp) : _comp(comp) {}
- bool operator () (const Data& lhs, const Data& rhs) const {
- dassertCompIsSane(_comp, lhs, rhs);
- return _comp(lhs, rhs) < 0;
- }
- private:
- const Comparator& _comp;
- };
-
- void sort() {
- STLComparator less(_comp);
- std::stable_sort(_data.begin(), _data.end(), less);
-
- // Does 2x more compares than stable_sort
- // TODO test on windows
- //std::sort(_data.begin(), _data.end(), comp);
- }
+ if (!_current->advance()) {
+ verify(!_heap.empty());
- void spill() {
- if (_data.empty())
- return;
+ std::pop_heap(_heap.begin(), _heap.end(), _greater);
+ _current = _heap.back();
+ _heap.pop_back();
+ } else if (!_heap.empty() && _greater(_current, _heap.front())) {
+ std::pop_heap(_heap.begin(), _heap.end(), _greater);
+ std::swap(_current, _heap.back());
+ std::push_heap(_heap.begin(), _heap.end(), _greater);
+ }
- if (!_opts.extSortAllowed) {
- // XXX This error message is only correct for aggregation, but it is also the
- // only way this code could be hit at the moment. If the Sorter is used
- // elsewhere where extSortAllowed could possibly be false, this message will
- // need to be revisited.
- uasserted(16819, str::stream()
- << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
- << " bytes, but did not opt in to external sorting. Aborting operation."
- << " Pass allowDiskUse:true to opt in."
- );
- }
+ return _current->current();
+ }
- sort();
- SortedFileWriter<Key, Value> writer(_opts, _settings);
- for ( ; !_data.empty(); _data.pop_front()) {
- writer.addAlreadySorted(_data.front().first, _data.front().second);
- }
+private:
+ class Stream { // Data + Iterator
+ public:
+ Stream(size_t fileNum, const Data& first, std::shared_ptr<Input> rest)
+ : fileNum(fileNum), _current(first), _rest(rest) {}
- _iters.push_back(std::shared_ptr<Iterator>(writer.done()));
+ const Data& current() const {
+ return _current;
+ }
+ bool more() {
+ return _rest->more();
+ }
+ bool advance() {
+ if (!_rest->more())
+ return false;
- _memUsed = 0;
- }
+ _current = _rest->next();
+ return true;
+ }
- const Comparator _comp;
- const Settings _settings;
- SortOptions _opts;
- size_t _memUsed;
- std::deque<Data> _data; // the "current" data
- std::vector<std::shared_ptr<Iterator> > _iters; // data that has already been spilled
- };
-
- template <typename Key, typename Value, typename Comparator>
- class LimitOneSorter : public Sorter<Key, Value> {
- // Since this class is only used for limit==1, it omits all logic to
- // spill to disk and only tracks memory usage if explicitly requested.
- public:
- typedef std::pair<Key, Value> Data;
- typedef SortIteratorInterface<Key, Value> Iterator;
-
- LimitOneSorter(const SortOptions& opts, const Comparator& comp)
- : _comp(comp)
- , _haveData(false)
- { verify(opts.limit == 1); }
-
- void add(const Key& key, const Value& val) {
- Data contender(key, val);
-
- if (_haveData) {
- dassertCompIsSane(_comp, _best, contender);
- if (_comp(_best, contender) <= 0)
- return; // not good enough
- } else {
- _haveData = true;
- }
-
- _best = contender;
- }
+ const size_t fileNum;
+
+ private:
+ Data _current;
+ std::shared_ptr<Input> _rest;
+ };
+
+ class STLComparator { // uses greater rather than less-than to maintain a MinHeap
+ public:
+ explicit STLComparator(const Comparator& comp) : _comp(comp) {}
+ bool operator()(unowned_ptr<const Stream> lhs, unowned_ptr<const Stream> rhs) const {
+ // first compare data
+ dassertCompIsSane(_comp, lhs->current(), rhs->current());
+ int ret = _comp(lhs->current(), rhs->current());
+ if (ret)
+ return ret > 0;
+
+ // then compare fileNums to ensure stability
+ return lhs->fileNum > rhs->fileNum;
+ }
- Iterator* done() {
- if (_haveData) {
- return new InMemIterator<Key, Value>(_best);
- } else {
- return new InMemIterator<Key, Value>();
- }
- }
+ private:
+ const Comparator _comp;
+ };
+
+ SortOptions _opts;
+ unsigned long long _remaining;
+ bool _first;
+ std::shared_ptr<Stream> _current;
+ std::vector<std::shared_ptr<Stream>> _heap; // MinHeap
+ STLComparator _greater; // named so calls make sense
+};
+
+template <typename Key, typename Value, typename Comparator>
+class NoLimitSorter : public Sorter<Key, Value> {
+public:
+ typedef std::pair<Key, Value> Data;
+ typedef SortIteratorInterface<Key, Value> Iterator;
+ typedef std::pair<typename Key::SorterDeserializeSettings,
+ typename Value::SorterDeserializeSettings> Settings;
+
+ NoLimitSorter(const SortOptions& opts,
+ const Comparator& comp,
+ const Settings& settings = Settings())
+ : _comp(comp), _settings(settings), _opts(opts), _memUsed(0) {
+ verify(_opts.limit == 0);
+ }
- // TEMP these are here for compatibility. Will be replaced with a general stats API
- int numFiles() const { return 0; }
- size_t memUsed() const { return _best.first.memUsageForSorter()
- + _best.second.memUsageForSorter(); }
-
- private:
- const Comparator _comp;
- Data _best;
- bool _haveData; // false at start, set to true on first call to add()
- };
-
- template <typename Key, typename Value, typename Comparator>
- class TopKSorter : public Sorter<Key, Value> {
- public:
- typedef std::pair<Key, Value> Data;
- typedef SortIteratorInterface<Key, Value> Iterator;
- typedef std::pair<typename Key::SorterDeserializeSettings
- ,typename Value::SorterDeserializeSettings
- > Settings;
-
- TopKSorter(const SortOptions& opts,
- const Comparator& comp,
- const Settings& settings = Settings())
- : _comp(comp)
- , _settings(settings)
- , _opts(opts)
- , _memUsed(0)
- , _haveCutoff(false)
- , _worstCount(0)
- , _medianCount(0)
- {
- // This also *works* with limit==1 but LimitOneSorter should be used instead
- verify(_opts.limit > 1);
-
- // Preallocate a fixed sized vector of the required size if we
- // don't expect it to have a major impact on our memory budget.
- // This is the common case with small limits.
- if ((sizeof(Data) * opts.limit) < opts.maxMemoryUsageBytes / 10) {
- _data.reserve(opts.limit);
- }
- }
+ void add(const Key& key, const Value& val) {
+ _data.push_back(std::make_pair(key, val));
- void add(const Key& key, const Value& val) {
- STLComparator less(_comp);
- Data contender(key, val);
+ _memUsed += key.memUsageForSorter();
+ _memUsed += val.memUsageForSorter();
- if (_data.size() < _opts.limit) {
- if (_haveCutoff && !less(contender, _cutoff))
- return;
+ if (_memUsed > _opts.maxMemoryUsageBytes)
+ spill();
+ }
- _data.push_back(contender);
+ Iterator* done() {
+ if (_iters.empty()) {
+ sort();
+ return new InMemIterator<Key, Value>(_data);
+ }
- _memUsed += key.memUsageForSorter();
- _memUsed += val.memUsageForSorter();
+ spill();
+ return Iterator::merge(_iters, _opts, _comp);
+ }
- if (_data.size() == _opts.limit)
- std::make_heap(_data.begin(), _data.end(), less);
+ // TEMP these are here for compatibility. Will be replaced with a general stats API
+ int numFiles() const {
+ return _iters.size();
+ }
+ size_t memUsed() const {
+ return _memUsed;
+ }
- if (_memUsed > _opts.maxMemoryUsageBytes)
- spill();
+private:
+ class STLComparator {
+ public:
+ explicit STLComparator(const Comparator& comp) : _comp(comp) {}
+ bool operator()(const Data& lhs, const Data& rhs) const {
+ dassertCompIsSane(_comp, lhs, rhs);
+ return _comp(lhs, rhs) < 0;
+ }
- return;
- }
+ private:
+ const Comparator& _comp;
+ };
- verify(_data.size() == _opts.limit);
+ void sort() {
+ STLComparator less(_comp);
+ std::stable_sort(_data.begin(), _data.end(), less);
- if (!less(contender, _data.front()))
- return; // not good enough
+ // Does 2x more compares than stable_sort
+ // TODO test on windows
+ // std::sort(_data.begin(), _data.end(), comp);
+ }
- // Remove the old worst pair and insert the contender, adjusting _memUsed
+ void spill() {
+ if (_data.empty())
+ return;
- _memUsed += key.memUsageForSorter();
- _memUsed += val.memUsageForSorter();
+ if (!_opts.extSortAllowed) {
+ // XXX This error message is only correct for aggregation, but it is also the
+ // only way this code could be hit at the moment. If the Sorter is used
+ // elsewhere where extSortAllowed could possibly be false, this message will
+ // need to be revisited.
+ uasserted(16819,
+ str::stream()
+ << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
+ << " bytes, but did not opt in to external sorting. Aborting operation."
+ << " Pass allowDiskUse:true to opt in.");
+ }
- _memUsed -= _data.front().first.memUsageForSorter();
- _memUsed -= _data.front().second.memUsageForSorter();
+ sort();
- std::pop_heap(_data.begin(), _data.end(), less);
- _data.back() = contender;
- std::push_heap(_data.begin(), _data.end(), less);
+ SortedFileWriter<Key, Value> writer(_opts, _settings);
+ for (; !_data.empty(); _data.pop_front()) {
+ writer.addAlreadySorted(_data.front().first, _data.front().second);
+ }
- if (_memUsed > _opts.maxMemoryUsageBytes)
- spill();
- }
+ _iters.push_back(std::shared_ptr<Iterator>(writer.done()));
- Iterator* done() {
- if (_iters.empty()) {
- sort();
- return new InMemIterator<Key, Value>(_data);
- }
+ _memUsed = 0;
+ }
- spill();
- return Iterator::merge(_iters, _opts, _comp);
- }
+ const Comparator _comp;
+ const Settings _settings;
+ SortOptions _opts;
+ size_t _memUsed;
+ std::deque<Data> _data; // the "current" data
+ std::vector<std::shared_ptr<Iterator>> _iters; // data that has already been spilled
+};
+
+template <typename Key, typename Value, typename Comparator>
+class LimitOneSorter : public Sorter<Key, Value> {
+ // Since this class is only used for limit==1, it omits all logic to
+ // spill to disk and only tracks memory usage if explicitly requested.
+public:
+ typedef std::pair<Key, Value> Data;
+ typedef SortIteratorInterface<Key, Value> Iterator;
+
+ LimitOneSorter(const SortOptions& opts, const Comparator& comp)
+ : _comp(comp), _haveData(false) {
+ verify(opts.limit == 1);
+ }
- // TEMP these are here for compatibility. Will be replaced with a general stats API
- int numFiles() const { return _iters.size(); }
- size_t memUsed() const { return _memUsed; }
-
- private:
- class STLComparator {
- public:
- explicit STLComparator(const Comparator& comp) : _comp(comp) {}
- bool operator () (const Data& lhs, const Data& rhs) const {
- dassertCompIsSane(_comp, lhs, rhs);
- return _comp(lhs, rhs) < 0;
- }
- private:
- const Comparator& _comp;
- };
-
- void sort() {
- STLComparator less(_comp);
-
- if (_data.size() == _opts.limit) {
- std::sort_heap(_data.begin(), _data.end(), less);
- } else {
- std::stable_sort(_data.begin(), _data.end(), less);
- }
- }
+ void add(const Key& key, const Value& val) {
+ Data contender(key, val);
- // Can only be called after _data is sorted
- void updateCutoff() {
- // Theory of operation: We want to be able to eagerly ignore values we know will not
- // be in the TopK result set by setting _cutoff to a value we know we have at least
- // K values equal to or better than. There are two values that we track to
- // potentially become the next value of _cutoff: _worstSeen and _lastMedian. When
- // one of these values becomes the new _cutoff, its associated counter is reset to 0
- // and a new value is chosen for that member the next time we spill.
- //
- // _worstSeen is the worst value we've seen so that all kept values are better than
- // (or equal to) it. This means that once _worstCount >= _opts.limit there is no
- // reason to consider values worse than _worstSeen so it can become the new _cutoff.
- // This technique is especially useful when the input is already roughly sorted (eg
- // sorting ASC on an ObjectId or Date field) since we will quickly find a cutoff
- // that will exclude most later values, making the full TopK operation including
- // the MergeIterator phase is O(K) in space and O(N + K*Log(K)) in time.
- //
- // _lastMedian was the median of the _data in the first spill() either overall or
- // following a promotion of _lastMedian to _cutoff. We count the number of kept
- // values that are better than or equal to _lastMedian in _medianCount and can
- // promote _lastMedian to _cutoff once _medianCount >=_opts.limit. Assuming
- // reasonable median selection (which should happen when the data is completely
- // unsorted), after the first K spilled values, we will keep roughly 50% of the
- // incoming values, 25% after the second K, 12.5% after the third K, etc. This means
- // that by the time we spill 3*K values, we will have seen (1*K + 2*K + 4*K) values,
- // so the expected number of kept values is O(Log(N/K) * K). The final run time if
- // using the O(K*Log(N)) merge algorithm in MergeIterator is O(N + K*Log(K) +
- // K*LogLog(N/K)) which is much closer to O(N) than O(N*Log(K)).
- //
- // This leaves a currently unoptimized worst case of data that is already roughly
- // sorted, but in the wrong direction, such that the desired results are all the
- // last ones seen. It will require O(N) space and O(N*Log(K)) time. Since this
- // should be trivially detectable, as a future optimization it might be nice to
- // detect this case and reverse the direction of input (if possible) which would
- // turn this into the best case described above.
- //
- // Pedantic notes: The time complexities above (which count number of comparisons)
- // ignore the sorting of batches prior to spilling to disk since they make it more
- // confusing without changing the results. If you want to add them back in, add an
- // extra term to each time complexity of (SPACE_COMPLEXITY * Log(BATCH_SIZE)). Also,
- // all space complexities measure disk space rather than memory since this class is
- // O(1) in memory due to the _opts.maxMemoryUsageBytes limit.
-
- STLComparator less(_comp); // less is "better" for TopK.
-
- // Pick a new _worstSeen or _lastMedian if should.
- if (_worstCount == 0 || less(_worstSeen, _data.back())) {
- _worstSeen = _data.back();
- }
- if (_medianCount == 0) {
- size_t medianIndex = _data.size() / 2; // chooses the higher if size() is even.
- _lastMedian = _data[medianIndex];
- }
-
- // Add the counters of kept objects better than or equal to _worstSeen/_lastMedian.
- _worstCount += _data.size(); // everything is better or equal
- typename std::vector<Data>::iterator firstWorseThanLastMedian =
- std::upper_bound(_data.begin(), _data.end(), _lastMedian, less);
- _medianCount += std::distance(_data.begin(), firstWorseThanLastMedian);
-
-
- // Promote _worstSeen or _lastMedian to _cutoff and reset counters if should.
- if (_worstCount >= _opts.limit) {
- if (!_haveCutoff || less(_worstSeen, _cutoff)) {
- _cutoff = _worstSeen;
- _haveCutoff = true;
- }
- _worstCount = 0;
- }
- if (_medianCount >= _opts.limit) {
- if (!_haveCutoff || less(_lastMedian, _cutoff)) {
- _cutoff = _lastMedian;
- _haveCutoff = true;
- }
- _medianCount = 0;
- }
+ if (_haveData) {
+ dassertCompIsSane(_comp, _best, contender);
+ if (_comp(_best, contender) <= 0)
+ return; // not good enough
+ } else {
+ _haveData = true;
+ }
- }
+ _best = contender;
+ }
+
+ Iterator* done() {
+ if (_haveData) {
+ return new InMemIterator<Key, Value>(_best);
+ } else {
+ return new InMemIterator<Key, Value>();
+ }
+ }
- void spill() {
- if (_data.empty())
- return;
+ // TEMP these are here for compatibility. Will be replaced with a general stats API
+ int numFiles() const {
+ return 0;
+ }
+ size_t memUsed() const {
+ return _best.first.memUsageForSorter() + _best.second.memUsageForSorter();
+ }
- if (!_opts.extSortAllowed) {
- // XXX This error message is only correct for aggregation, but it is also the
- // only way this code could be hit at the moment. If the Sorter is used
- // elsewhere where extSortAllowed could possibly be false, this message will
- // need to be revisited.
- uasserted(16820, str::stream()
- << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
- << " bytes, but did not opt in to external sorting. Aborting operation."
- << " Pass allowDiskUse:true to opt in."
- );
- }
+private:
+ const Comparator _comp;
+ Data _best;
+ bool _haveData; // false at start, set to true on first call to add()
+};
+
+template <typename Key, typename Value, typename Comparator>
+class TopKSorter : public Sorter<Key, Value> {
+public:
+ typedef std::pair<Key, Value> Data;
+ typedef SortIteratorInterface<Key, Value> Iterator;
+ typedef std::pair<typename Key::SorterDeserializeSettings,
+ typename Value::SorterDeserializeSettings> Settings;
+
+ TopKSorter(const SortOptions& opts,
+ const Comparator& comp,
+ const Settings& settings = Settings())
+ : _comp(comp),
+ _settings(settings),
+ _opts(opts),
+ _memUsed(0),
+ _haveCutoff(false),
+ _worstCount(0),
+ _medianCount(0) {
+ // This also *works* with limit==1 but LimitOneSorter should be used instead
+ verify(_opts.limit > 1);
+
+ // Preallocate a fixed sized vector of the required size if we
+ // don't expect it to have a major impact on our memory budget.
+ // This is the common case with small limits.
+ if ((sizeof(Data) * opts.limit) < opts.maxMemoryUsageBytes / 10) {
+ _data.reserve(opts.limit);
+ }
+ }
- sort();
- updateCutoff();
+ void add(const Key& key, const Value& val) {
+ STLComparator less(_comp);
+ Data contender(key, val);
- SortedFileWriter<Key, Value> writer(_opts, _settings);
- for (size_t i=0; i<_data.size(); i++) {
- writer.addAlreadySorted(_data[i].first, _data[i].second);
- }
+ if (_data.size() < _opts.limit) {
+ if (_haveCutoff && !less(contender, _cutoff))
+ return;
- // clear _data and release backing array's memory
- std::vector<Data>().swap(_data);
+ _data.push_back(contender);
- _iters.push_back(std::shared_ptr<Iterator>(writer.done()));
+ _memUsed += key.memUsageForSorter();
+ _memUsed += val.memUsageForSorter();
- _memUsed = 0;
- }
+ if (_data.size() == _opts.limit)
+ std::make_heap(_data.begin(), _data.end(), less);
+
+ if (_memUsed > _opts.maxMemoryUsageBytes)
+ spill();
- const Comparator _comp;
- const Settings _settings;
- SortOptions _opts;
- size_t _memUsed;
- std::vector<Data> _data; // the "current" data. Organized as max-heap if size == limit.
- std::vector<std::shared_ptr<Iterator> > _iters; // data that has already been spilled
-
- // See updateCutoff() for a full description of how these members are used.
- bool _haveCutoff;
- Data _cutoff; // We can definitely ignore values worse than this.
- Data _worstSeen; // The worst Data seen so far. Reset when _worstCount >= _opts.limit.
- size_t _worstCount; // Number of docs better or equal to _worstSeen kept so far.
- Data _lastMedian; // Median of a batch. Reset when _medianCount >= _opts.limit.
- size_t _medianCount; // Number of docs better or equal to _lastMedian kept so far.
- };
-
- inline unsigned nextFileNumber() {
- // This is unified across all Sorter types and instances.
- static AtomicUInt32 fileCounter;
- return fileCounter.fetchAndAdd(1);
+ return;
}
- } // namespace sorter
- //
- // SortedFileWriter
- //
+ verify(_data.size() == _opts.limit);
+ if (!less(contender, _data.front()))
+ return; // not good enough
- template <typename Key, typename Value>
- SortedFileWriter<Key, Value>::SortedFileWriter(const SortOptions& opts,
- const Settings& settings)
- : _settings(settings)
- {
- namespace str = mongoutils::str;
+ // Remove the old worst pair and insert the contender, adjusting _memUsed
- // This should be checked by consumers, but if we get here don't allow writes.
- massert(16946, "Attempting to use external sort from mongos. This is not allowed.",
- !isMongos());
+ _memUsed += key.memUsageForSorter();
+ _memUsed += val.memUsageForSorter();
- massert(17148, "Attempting to use external sort without setting SortOptions::tempDir",
- !opts.tempDir.empty());
+ _memUsed -= _data.front().first.memUsageForSorter();
+ _memUsed -= _data.front().second.memUsageForSorter();
- {
- StringBuilder sb;
- sb << opts.tempDir << "/extsort." << sorter::nextFileNumber();
- _fileName = sb.str();
- }
+ std::pop_heap(_data.begin(), _data.end(), less);
+ _data.back() = contender;
+ std::push_heap(_data.begin(), _data.end(), less);
- boost::filesystem::create_directories(opts.tempDir);
+ if (_memUsed > _opts.maxMemoryUsageBytes)
+ spill();
+ }
- _file.open(_fileName.c_str(), std::ios::binary | std::ios::out);
- massert(16818, str::stream() << "error opening file \"" << _fileName << "\": "
- << sorter::myErrnoWithDescription(),
- _file.good());
+ Iterator* done() {
+ if (_iters.empty()) {
+ sort();
+ return new InMemIterator<Key, Value>(_data);
+ }
- _fileDeleter = std::make_shared<sorter::FileDeleter>(_fileName);
+ spill();
+ return Iterator::merge(_iters, _opts, _comp);
+ }
- // throw on failure
- _file.exceptions(std::ios::failbit | std::ios::badbit | std::ios::eofbit);
+ // TEMP these are here for compatibility. Will be replaced with a general stats API
+ int numFiles() const {
+ return _iters.size();
+ }
+ size_t memUsed() const {
+ return _memUsed;
}
- template <typename Key, typename Value>
- void SortedFileWriter<Key, Value>::addAlreadySorted(const Key& key, const Value& val) {
- key.serializeForSorter(_buffer);
- val.serializeForSorter(_buffer);
+private:
+ class STLComparator {
+ public:
+ explicit STLComparator(const Comparator& comp) : _comp(comp) {}
+ bool operator()(const Data& lhs, const Data& rhs) const {
+ dassertCompIsSane(_comp, lhs, rhs);
+ return _comp(lhs, rhs) < 0;
+ }
- if (_buffer.len() > 64*1024)
- spill();
+ private:
+ const Comparator& _comp;
+ };
+
+ void sort() {
+ STLComparator less(_comp);
+
+ if (_data.size() == _opts.limit) {
+ std::sort_heap(_data.begin(), _data.end(), less);
+ } else {
+ std::stable_sort(_data.begin(), _data.end(), less);
+ }
}
- template <typename Key, typename Value>
- void SortedFileWriter<Key, Value>::spill() {
- namespace str = mongoutils::str;
+ // Can only be called after _data is sorted
+ void updateCutoff() {
+ // Theory of operation: We want to be able to eagerly ignore values we know will not
+ // be in the TopK result set by setting _cutoff to a value we know we have at least
+ // K values equal to or better than. There are two values that we track to
+ // potentially become the next value of _cutoff: _worstSeen and _lastMedian. When
+ // one of these values becomes the new _cutoff, its associated counter is reset to 0
+ // and a new value is chosen for that member the next time we spill.
+ //
+ // _worstSeen is the worst value we've seen so that all kept values are better than
+ // (or equal to) it. This means that once _worstCount >= _opts.limit there is no
+ // reason to consider values worse than _worstSeen so it can become the new _cutoff.
+ // This technique is especially useful when the input is already roughly sorted (eg
+ // sorting ASC on an ObjectId or Date field) since we will quickly find a cutoff
+ // that will exclude most later values, making the full TopK operation including
+ // the MergeIterator phase is O(K) in space and O(N + K*Log(K)) in time.
+ //
+ // _lastMedian was the median of the _data in the first spill() either overall or
+ // following a promotion of _lastMedian to _cutoff. We count the number of kept
+ // values that are better than or equal to _lastMedian in _medianCount and can
+ // promote _lastMedian to _cutoff once _medianCount >=_opts.limit. Assuming
+ // reasonable median selection (which should happen when the data is completely
+ // unsorted), after the first K spilled values, we will keep roughly 50% of the
+ // incoming values, 25% after the second K, 12.5% after the third K, etc. This means
+ // that by the time we spill 3*K values, we will have seen (1*K + 2*K + 4*K) values,
+ // so the expected number of kept values is O(Log(N/K) * K). The final run time if
+ // using the O(K*Log(N)) merge algorithm in MergeIterator is O(N + K*Log(K) +
+ // K*LogLog(N/K)) which is much closer to O(N) than O(N*Log(K)).
+ //
+ // This leaves a currently unoptimized worst case of data that is already roughly
+ // sorted, but in the wrong direction, such that the desired results are all the
+ // last ones seen. It will require O(N) space and O(N*Log(K)) time. Since this
+ // should be trivially detectable, as a future optimization it might be nice to
+ // detect this case and reverse the direction of input (if possible) which would
+ // turn this into the best case described above.
+ //
+ // Pedantic notes: The time complexities above (which count number of comparisons)
+ // ignore the sorting of batches prior to spilling to disk since they make it more
+ // confusing without changing the results. If you want to add them back in, add an
+ // extra term to each time complexity of (SPACE_COMPLEXITY * Log(BATCH_SIZE)). Also,
+ // all space complexities measure disk space rather than memory since this class is
+ // O(1) in memory due to the _opts.maxMemoryUsageBytes limit.
+
+ STLComparator less(_comp); // less is "better" for TopK.
+
+ // Pick a new _worstSeen or _lastMedian if should.
+ if (_worstCount == 0 || less(_worstSeen, _data.back())) {
+ _worstSeen = _data.back();
+ }
+ if (_medianCount == 0) {
+ size_t medianIndex = _data.size() / 2; // chooses the higher if size() is even.
+ _lastMedian = _data[medianIndex];
+ }
- if (_buffer.len() == 0)
- return;
+ // Add the counters of kept objects better than or equal to _worstSeen/_lastMedian.
+ _worstCount += _data.size(); // everything is better or equal
+ typename std::vector<Data>::iterator firstWorseThanLastMedian =
+ std::upper_bound(_data.begin(), _data.end(), _lastMedian, less);
+ _medianCount += std::distance(_data.begin(), firstWorseThanLastMedian);
- std::string compressed;
- snappy::Compress(_buffer.buf(), _buffer.len(), &compressed);
- verify(compressed.size() <= size_t(std::numeric_limits<int32_t>::max()));
-
- try {
- if (compressed.size() < size_t(_buffer.len()/10*9)) {
- const int32_t size = -int32_t(compressed.size()); // negative means compressed
- _file.write(reinterpret_cast<const char*>(&size), sizeof(size));
- _file.write(compressed.data(), compressed.size());
- } else {
- const int32_t size = _buffer.len();
- _file.write(reinterpret_cast<const char*>(&size), sizeof(size));
- _file.write(_buffer.buf(), _buffer.len());
+
+ // Promote _worstSeen or _lastMedian to _cutoff and reset counters if should.
+ if (_worstCount >= _opts.limit) {
+ if (!_haveCutoff || less(_worstSeen, _cutoff)) {
+ _cutoff = _worstSeen;
+ _haveCutoff = true;
}
- } catch (const std::exception&) {
- msgasserted(16821, str::stream() << "error writing to file \"" << _fileName << "\": "
- << sorter::myErrnoWithDescription());
+ _worstCount = 0;
+ }
+ if (_medianCount >= _opts.limit) {
+ if (!_haveCutoff || less(_lastMedian, _cutoff)) {
+ _cutoff = _lastMedian;
+ _haveCutoff = true;
+ }
+ _medianCount = 0;
}
-
- _buffer.reset();
}
- template <typename Key, typename Value>
- SortIteratorInterface<Key, Value>* SortedFileWriter<Key, Value>::done() {
- spill();
- _file.close();
- return new sorter::FileIterator<Key, Value>(_fileName, _settings, _fileDeleter);
+ void spill() {
+ if (_data.empty())
+ return;
+
+ if (!_opts.extSortAllowed) {
+ // XXX This error message is only correct for aggregation, but it is also the
+ // only way this code could be hit at the moment. If the Sorter is used
+ // elsewhere where extSortAllowed could possibly be false, this message will
+ // need to be revisited.
+ uasserted(16820,
+ str::stream()
+ << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
+ << " bytes, but did not opt in to external sorting. Aborting operation."
+ << " Pass allowDiskUse:true to opt in.");
+ }
+
+ sort();
+ updateCutoff();
+
+ SortedFileWriter<Key, Value> writer(_opts, _settings);
+ for (size_t i = 0; i < _data.size(); i++) {
+ writer.addAlreadySorted(_data[i].first, _data[i].second);
+ }
+
+ // clear _data and release backing array's memory
+ std::vector<Data>().swap(_data);
+
+ _iters.push_back(std::shared_ptr<Iterator>(writer.done()));
+
+ _memUsed = 0;
}
- //
- // Factory Functions
- //
+ const Comparator _comp;
+ const Settings _settings;
+ SortOptions _opts;
+ size_t _memUsed;
+ std::vector<Data> _data; // the "current" data. Organized as max-heap if size == limit.
+ std::vector<std::shared_ptr<Iterator>> _iters; // data that has already been spilled
+
+ // See updateCutoff() for a full description of how these members are used.
+ bool _haveCutoff;
+ Data _cutoff; // We can definitely ignore values worse than this.
+ Data _worstSeen; // The worst Data seen so far. Reset when _worstCount >= _opts.limit.
+ size_t _worstCount; // Number of docs better or equal to _worstSeen kept so far.
+ Data _lastMedian; // Median of a batch. Reset when _medianCount >= _opts.limit.
+ size_t _medianCount; // Number of docs better or equal to _lastMedian kept so far.
+};
+
+inline unsigned nextFileNumber() {
+ // This is unified across all Sorter types and instances.
+ static AtomicUInt32 fileCounter;
+ return fileCounter.fetchAndAdd(1);
+}
+} // namespace sorter
+
+//
+// SortedFileWriter
+//
+
+
+template <typename Key, typename Value>
+SortedFileWriter<Key, Value>::SortedFileWriter(const SortOptions& opts, const Settings& settings)
+ : _settings(settings) {
+ namespace str = mongoutils::str;
- template <typename Key, typename Value>
- template <typename Comparator>
- SortIteratorInterface<Key, Value>* SortIteratorInterface<Key, Value>::merge(
- const std::vector<std::shared_ptr<SortIteratorInterface> >& iters,
- const SortOptions& opts,
- const Comparator& comp) {
- return new sorter::MergeIterator<Key, Value, Comparator>(iters, opts, comp);
+ // This should be checked by consumers, but if we get here don't allow writes.
+ massert(
+ 16946, "Attempting to use external sort from mongos. This is not allowed.", !isMongos());
+
+ massert(17148,
+ "Attempting to use external sort without setting SortOptions::tempDir",
+ !opts.tempDir.empty());
+
+ {
+ StringBuilder sb;
+ sb << opts.tempDir << "/extsort." << sorter::nextFileNumber();
+ _fileName = sb.str();
}
- template <typename Key, typename Value>
- template <typename Comparator>
- Sorter<Key, Value>* Sorter<Key, Value>::make(const SortOptions& opts,
- const Comparator& comp,
- const Settings& settings) {
+ boost::filesystem::create_directories(opts.tempDir);
- // This should be checked by consumers, but if it isn't try to fail early.
- massert(16947, "Attempting to use external sort from mongos. This is not allowed.",
- !(isMongos() && opts.extSortAllowed));
+ _file.open(_fileName.c_str(), std::ios::binary | std::ios::out);
+ massert(16818,
+ str::stream() << "error opening file \"" << _fileName
+ << "\": " << sorter::myErrnoWithDescription(),
+ _file.good());
- massert(17149, "Attempting to use external sort without setting SortOptions::tempDir",
- !(opts.extSortAllowed && opts.tempDir.empty()));
+ _fileDeleter = std::make_shared<sorter::FileDeleter>(_fileName);
+
+ // throw on failure
+ _file.exceptions(std::ios::failbit | std::ios::badbit | std::ios::eofbit);
+}
+
+template <typename Key, typename Value>
+void SortedFileWriter<Key, Value>::addAlreadySorted(const Key& key, const Value& val) {
+ key.serializeForSorter(_buffer);
+ val.serializeForSorter(_buffer);
+
+ if (_buffer.len() > 64 * 1024)
+ spill();
+}
- switch (opts.limit) {
- case 0: return new sorter::NoLimitSorter<Key, Value, Comparator>(opts, comp, settings);
- case 1: return new sorter::LimitOneSorter<Key, Value, Comparator>(opts, comp);
- default: return new sorter::TopKSorter<Key, Value, Comparator>(opts, comp, settings);
+template <typename Key, typename Value>
+void SortedFileWriter<Key, Value>::spill() {
+ namespace str = mongoutils::str;
+
+ if (_buffer.len() == 0)
+ return;
+
+ std::string compressed;
+ snappy::Compress(_buffer.buf(), _buffer.len(), &compressed);
+ verify(compressed.size() <= size_t(std::numeric_limits<int32_t>::max()));
+
+ try {
+ if (compressed.size() < size_t(_buffer.len() / 10 * 9)) {
+ const int32_t size = -int32_t(compressed.size()); // negative means compressed
+ _file.write(reinterpret_cast<const char*>(&size), sizeof(size));
+ _file.write(compressed.data(), compressed.size());
+ } else {
+ const int32_t size = _buffer.len();
+ _file.write(reinterpret_cast<const char*>(&size), sizeof(size));
+ _file.write(_buffer.buf(), _buffer.len());
}
+ } catch (const std::exception&) {
+ msgasserted(16821,
+ str::stream() << "error writing to file \"" << _fileName
+ << "\": " << sorter::myErrnoWithDescription());
}
+
+ _buffer.reset();
+}
+
+template <typename Key, typename Value>
+SortIteratorInterface<Key, Value>* SortedFileWriter<Key, Value>::done() {
+ spill();
+ _file.close();
+ return new sorter::FileIterator<Key, Value>(_fileName, _settings, _fileDeleter);
+}
+
+//
+// Factory Functions
+//
+
+template <typename Key, typename Value>
+template <typename Comparator>
+SortIteratorInterface<Key, Value>* SortIteratorInterface<Key, Value>::merge(
+ const std::vector<std::shared_ptr<SortIteratorInterface>>& iters,
+ const SortOptions& opts,
+ const Comparator& comp) {
+ return new sorter::MergeIterator<Key, Value, Comparator>(iters, opts, comp);
+}
+
+template <typename Key, typename Value>
+template <typename Comparator>
+Sorter<Key, Value>* Sorter<Key, Value>::make(const SortOptions& opts,
+ const Comparator& comp,
+ const Settings& settings) {
+ // This should be checked by consumers, but if it isn't try to fail early.
+ massert(16947,
+ "Attempting to use external sort from mongos. This is not allowed.",
+ !(isMongos() && opts.extSortAllowed));
+
+ massert(17149,
+ "Attempting to use external sort without setting SortOptions::tempDir",
+ !(opts.extSortAllowed && opts.tempDir.empty()));
+
+ switch (opts.limit) {
+ case 0:
+ return new sorter::NoLimitSorter<Key, Value, Comparator>(opts, comp, settings);
+ case 1:
+ return new sorter::LimitOneSorter<Key, Value, Comparator>(opts, comp);
+ default:
+ return new sorter::TopKSorter<Key, Value, Comparator>(opts, comp, settings);
+ }
+}
}
diff --git a/src/mongo/db/sorter/sorter.h b/src/mongo/db/sorter/sorter.h
index d8a117d83ee..5d19aef8c2d 100644
--- a/src/mongo/db/sorter/sorter.h
+++ b/src/mongo/db/sorter/sorter.h
@@ -83,154 +83,148 @@
*/
namespace mongo {
- namespace sorter {
- // Everything in this namespace is internal to the sorter
- class FileDeleter;
+namespace sorter {
+// Everything in this namespace is internal to the sorter
+class FileDeleter;
+}
+
+/**
+ * Runtime options that control the Sorter's behavior
+ */
+struct SortOptions {
+ unsigned long long limit; /// number of KV pairs to be returned. 0 for no limit.
+ size_t maxMemoryUsageBytes; /// Approximate.
+ bool extSortAllowed; /// If false, uassert if more mem needed than allowed.
+ std::string tempDir; /// Directory to directly place files in.
+ /// Must be explicitly set if extSortAllowed is true.
+
+ SortOptions() : limit(0), maxMemoryUsageBytes(64 * 1024 * 1024), extSortAllowed(false) {}
+
+ /// Fluent API to support expressions like SortOptions().Limit(1000).ExtSortAllowed(true)
+
+ SortOptions& Limit(unsigned long long newLimit) {
+ limit = newLimit;
+ return *this;
+ }
+
+ SortOptions& MaxMemoryUsageBytes(size_t newMaxMemoryUsageBytes) {
+ maxMemoryUsageBytes = newMaxMemoryUsageBytes;
+ return *this;
+ }
+
+ SortOptions& ExtSortAllowed(bool newExtSortAllowed = true) {
+ extSortAllowed = newExtSortAllowed;
+ return *this;
+ }
+
+ SortOptions& TempDir(const std::string& newTempDir) {
+ tempDir = newTempDir;
+ return *this;
}
+};
+
+/// This is the output from the sorting framework
+template <typename Key, typename Value>
+class SortIteratorInterface {
+ MONGO_DISALLOW_COPYING(SortIteratorInterface);
+
+public:
+ typedef std::pair<Key, Value> Data;
+
+ // Unowned objects are only valid until next call to any method
+
+ virtual bool more() = 0;
+ virtual std::pair<Key, Value> next() = 0;
+
+ virtual ~SortIteratorInterface() {}
+
+ /// Returns an iterator that merges the passed in iterators
+ template <typename Comparator>
+ static SortIteratorInterface* merge(
+ const std::vector<std::shared_ptr<SortIteratorInterface>>& iters,
+ const SortOptions& opts,
+ const Comparator& comp);
+
+protected:
+ SortIteratorInterface() {} // can only be constructed as a base
+};
+
+/// This is the main way to input data to the sorting framework
+template <typename Key, typename Value>
+class Sorter {
+ MONGO_DISALLOW_COPYING(Sorter);
+
+public:
+ typedef std::pair<Key, Value> Data;
+ typedef SortIteratorInterface<Key, Value> Iterator;
+ typedef std::pair<typename Key::SorterDeserializeSettings,
+ typename Value::SorterDeserializeSettings> Settings;
+
+ template <typename Comparator>
+ static Sorter* make(const SortOptions& opts,
+ const Comparator& comp,
+ const Settings& settings = Settings());
+
+ virtual void add(const Key&, const Value&) = 0;
+ virtual Iterator* done() = 0; /// Can't add more data after calling done()
+
+ virtual ~Sorter() {}
+
+ // TEMP these are here for compatibility. Will be replaced with a general stats API
+ virtual int numFiles() const = 0;
+ virtual size_t memUsed() const = 0;
+
+protected:
+ Sorter() {} // can only be constructed as a base
+};
+
+/// Writes pre-sorted data to a sorted file and hands-back an Iterator over that file.
+template <typename Key, typename Value>
+class SortedFileWriter {
+ MONGO_DISALLOW_COPYING(SortedFileWriter);
+
+public:
+ typedef SortIteratorInterface<Key, Value> Iterator;
+ typedef std::pair<typename Key::SorterDeserializeSettings,
+ typename Value::SorterDeserializeSettings> Settings;
+
+ explicit SortedFileWriter(const SortOptions& opts, const Settings& settings = Settings());
+
+ void addAlreadySorted(const Key&, const Value&);
+ Iterator* done(); /// Can't add more data after calling done()
+
+private:
+ void spill();
- /**
- * Runtime options that control the Sorter's behavior
- */
- struct SortOptions {
- unsigned long long limit; /// number of KV pairs to be returned. 0 for no limit.
- size_t maxMemoryUsageBytes; /// Approximate.
- bool extSortAllowed; /// If false, uassert if more mem needed than allowed.
- std::string tempDir; /// Directory to directly place files in.
- /// Must be explicitly set if extSortAllowed is true.
-
- SortOptions()
- : limit(0)
- , maxMemoryUsageBytes(64*1024*1024)
- , extSortAllowed(false)
- {}
-
- /// Fluent API to support expressions like SortOptions().Limit(1000).ExtSortAllowed(true)
-
- SortOptions& Limit(unsigned long long newLimit) {
- limit = newLimit;
- return *this;
- }
-
- SortOptions& MaxMemoryUsageBytes(size_t newMaxMemoryUsageBytes) {
- maxMemoryUsageBytes = newMaxMemoryUsageBytes;
- return *this;
- }
-
- SortOptions& ExtSortAllowed(bool newExtSortAllowed=true) {
- extSortAllowed = newExtSortAllowed;
- return *this;
- }
-
- SortOptions& TempDir(const std::string& newTempDir) {
- tempDir = newTempDir;
- return *this;
- }
- };
-
- /// This is the output from the sorting framework
- template <typename Key, typename Value>
- class SortIteratorInterface {
- MONGO_DISALLOW_COPYING(SortIteratorInterface);
- public:
- typedef std::pair<Key, Value> Data;
-
- // Unowned objects are only valid until next call to any method
-
- virtual bool more() =0;
- virtual std::pair<Key, Value> next() =0;
-
- virtual ~SortIteratorInterface() {}
-
- /// Returns an iterator that merges the passed in iterators
- template <typename Comparator>
- static SortIteratorInterface* merge(
- const std::vector<std::shared_ptr<SortIteratorInterface> >& iters,
- const SortOptions& opts,
- const Comparator& comp);
- protected:
- SortIteratorInterface() {} // can only be constructed as a base
- };
-
- /// This is the main way to input data to the sorting framework
- template <typename Key, typename Value>
- class Sorter {
- MONGO_DISALLOW_COPYING(Sorter);
- public:
- typedef std::pair<Key, Value> Data;
- typedef SortIteratorInterface<Key, Value> Iterator;
- typedef std::pair<typename Key::SorterDeserializeSettings
- ,typename Value::SorterDeserializeSettings
- > Settings;
-
- template <typename Comparator>
- static Sorter* make(const SortOptions& opts,
- const Comparator& comp,
- const Settings& settings = Settings());
-
- virtual void add(const Key&, const Value&) =0;
- virtual Iterator* done() =0; /// Can't add more data after calling done()
-
- virtual ~Sorter() {}
-
- // TEMP these are here for compatibility. Will be replaced with a general stats API
- virtual int numFiles() const =0;
- virtual size_t memUsed() const =0;
-
- protected:
- Sorter() {} // can only be constructed as a base
- };
-
- /// Writes pre-sorted data to a sorted file and hands-back an Iterator over that file.
- template <typename Key, typename Value>
- class SortedFileWriter {
- MONGO_DISALLOW_COPYING(SortedFileWriter);
- public:
- typedef SortIteratorInterface<Key, Value> Iterator;
- typedef std::pair<typename Key::SorterDeserializeSettings
- ,typename Value::SorterDeserializeSettings
- > Settings;
-
- explicit SortedFileWriter(const SortOptions& opts,
- const Settings& settings = Settings());
-
- void addAlreadySorted(const Key&, const Value&);
- Iterator* done(); /// Can't add more data after calling done()
-
- private:
- void spill();
-
- const Settings _settings;
- std::string _fileName;
- std::shared_ptr<sorter::FileDeleter> _fileDeleter; // Must outlive _file
- std::ofstream _file;
- BufBuilder _buffer;
- };
+ const Settings _settings;
+ std::string _fileName;
+ std::shared_ptr<sorter::FileDeleter> _fileDeleter; // Must outlive _file
+ std::ofstream _file;
+ BufBuilder _buffer;
+};
}
/**
* #include "mongo/db/sorter/sorter.cpp" and call this in a single translation
* unit once for each unique set of template parameters.
*/
-#define MONGO_CREATE_SORTER(Key, Value, Comparator) \
- /* public classes */ \
- template class ::mongo::Sorter<Key, Value>; \
- template class ::mongo::SortIteratorInterface<Key, Value>; \
- template class ::mongo::SortedFileWriter<Key, Value>; \
- /* internal classes */ \
- template class ::mongo::sorter::NoLimitSorter<Key, Value, Comparator>; \
- template class ::mongo::sorter::LimitOneSorter<Key, Value, Comparator>; \
- template class ::mongo::sorter::TopKSorter<Key, Value, Comparator>; \
- template class ::mongo::sorter::MergeIterator<Key, Value, Comparator>; \
- template class ::mongo::sorter::InMemIterator<Key, Value>; \
- template class ::mongo::sorter::FileIterator<Key, Value>; \
- /* factory functions */ \
- template ::mongo::SortIteratorInterface<Key, Value>* \
- ::mongo::SortIteratorInterface<Key, Value>::merge<Comparator>( \
- const std::vector<std::shared_ptr<SortIteratorInterface> >& iters, \
- const SortOptions& opts, \
- const Comparator& comp); \
- template ::mongo::Sorter<Key, Value>* \
- ::mongo::Sorter<Key, Value>::make<Comparator>( \
- const SortOptions& opts, \
- const Comparator& comp, \
- const Settings& settings);
+#define MONGO_CREATE_SORTER(Key, Value, Comparator) \
+ /* public classes */ \
+ template class ::mongo::Sorter<Key, Value>; \
+ template class ::mongo::SortIteratorInterface<Key, Value>; \
+ template class ::mongo::SortedFileWriter<Key, Value>; \
+ /* internal classes */ \
+ template class ::mongo::sorter::NoLimitSorter<Key, Value, Comparator>; \
+ template class ::mongo::sorter::LimitOneSorter<Key, Value, Comparator>; \
+ template class ::mongo::sorter::TopKSorter<Key, Value, Comparator>; \
+ template class ::mongo::sorter::MergeIterator<Key, Value, Comparator>; \
+ template class ::mongo::sorter::InMemIterator<Key, Value>; \
+ template class ::mongo::sorter::FileIterator<Key, Value>; \
+ /* factory functions */ \
+ template ::mongo::SortIteratorInterface<Key, Value>* ::mongo:: \
+ SortIteratorInterface<Key, Value>::merge<Comparator>( \
+ const std::vector<std::shared_ptr<SortIteratorInterface>>& iters, \
+ const SortOptions& opts, \
+ const Comparator& comp); \
+ template ::mongo::Sorter<Key, Value>* ::mongo::Sorter<Key, Value>::make<Comparator>( \
+ const SortOptions& opts, const Comparator& comp, const Settings& settings);
diff --git a/src/mongo/db/sorter/sorter_test.cpp b/src/mongo/db/sorter/sorter_test.cpp
index 423baa543fb..cdf43448f0b 100644
--- a/src/mongo/db/sorter/sorter_test.cpp
+++ b/src/mongo/db/sorter/sorter_test.cpp
@@ -42,511 +42,519 @@
#include "mongo/db/sorter/sorter.cpp"
namespace mongo {
- using namespace mongo::sorter;
- using std::make_shared;
- using std::pair;
+using namespace mongo::sorter;
+using std::make_shared;
+using std::pair;
+
+// Stub to avoid including the server_options library
+// TODO: This should go away once we can do these checks at compile time
+bool isMongos() {
+ return false;
+}
+
+//
+// Sorter framework testing utilities
+//
+
+class IntWrapper {
+public:
+ IntWrapper(int i = 0) : _i(i) {}
+ operator const int&() const {
+ return _i;
+ }
+
+ /// members for Sorter
+ struct SorterDeserializeSettings {}; // unused
+ void serializeForSorter(BufBuilder& buf) const {
+ buf.appendNum(_i);
+ }
+ static IntWrapper deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&) {
+ return buf.read<int>();
+ }
+ int memUsageForSorter() const {
+ return sizeof(IntWrapper);
+ }
+ IntWrapper getOwned() const {
+ return *this;
+ }
+
+private:
+ int _i;
+};
+
+typedef pair<IntWrapper, IntWrapper> IWPair;
+typedef SortIteratorInterface<IntWrapper, IntWrapper> IWIterator;
+typedef Sorter<IntWrapper, IntWrapper> IWSorter;
+
+enum Direction { ASC = 1, DESC = -1 };
+class IWComparator {
+public:
+ IWComparator(Direction dir = ASC) : _dir(dir) {}
+ int operator()(const IWPair& lhs, const IWPair& rhs) const {
+ if (lhs.first == rhs.first)
+ return 0;
+ if (lhs.first < rhs.first)
+ return -1 * _dir;
+ return 1 * _dir;
+ }
+
+private:
+ Direction _dir;
+};
+
+class IntIterator : public IWIterator {
+public:
+ IntIterator(int start = 0, int stop = INT_MAX, int increment = 1)
+ : _current(start), _increment(increment), _stop(stop) {}
+ bool more() {
+ if (_increment == 0)
+ return true;
+ if (_increment > 0)
+ return _current < _stop;
+ return _current > _stop;
+ }
+ IWPair next() {
+ IWPair out(_current, -_current);
+ _current += _increment;
+ return out;
+ }
- // Stub to avoid including the server_options library
- // TODO: This should go away once we can do these checks at compile time
- bool isMongos() {
+private:
+ int _current;
+ int _increment;
+ int _stop;
+};
+
+class EmptyIterator : public IWIterator {
+public:
+ bool more() {
return false;
}
+ Data next() {
+ verify(false);
+ }
+};
- //
- // Sorter framework testing utilities
- //
+class LimitIterator : public IWIterator {
+public:
+ LimitIterator(long long limit, std::shared_ptr<IWIterator> source)
+ : _remaining(limit), _source(source) {
+ verify(limit > 0);
+ }
- class IntWrapper {
- public:
- IntWrapper(int i=0) :_i(i) {}
- operator const int& () const { return _i; }
+ bool more() {
+ return _remaining && _source->more();
+ }
+ Data next() {
+ verify(more());
+ _remaining--;
+ return _source->next();
+ }
- /// members for Sorter
- struct SorterDeserializeSettings {}; // unused
- void serializeForSorter(BufBuilder& buf) const { buf.appendNum(_i); }
- static IntWrapper deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&) {
- return buf.read<int>();
+private:
+ long long _remaining;
+ std::shared_ptr<IWIterator> _source;
+};
+
+template <typename It1, typename It2>
+void _assertIteratorsEquivalent(It1 it1, It2 it2, int line) {
+ int iteration;
+ try {
+ for (iteration = 0; true; iteration++) {
+ ASSERT_EQUALS(it1->more(), it2->more());
+ ASSERT_EQUALS(it1->more(), it2->more()); // make sure more() is safe to call twice
+ if (!it1->more())
+ return;
+
+ IWPair pair1 = it1->next();
+ IWPair pair2 = it2->next();
+ ASSERT_EQUALS(pair1.first, pair2.first);
+ ASSERT_EQUALS(pair1.second, pair2.second);
}
- int memUsageForSorter() const { return sizeof(IntWrapper); }
- IntWrapper getOwned() const { return *this; }
- private:
- int _i;
- };
- typedef pair<IntWrapper, IntWrapper> IWPair;
- typedef SortIteratorInterface<IntWrapper, IntWrapper> IWIterator;
- typedef Sorter<IntWrapper, IntWrapper> IWSorter;
-
- enum Direction {ASC=1, DESC=-1};
- class IWComparator {
- public:
- IWComparator(Direction dir=ASC) :_dir(dir) {}
- int operator() (const IWPair& lhs, const IWPair& rhs) const {
- if (lhs.first == rhs.first) return 0;
- if (lhs.first < rhs.first) return -1 * _dir;
- return 1 * _dir;
+ } catch (...) {
+ mongo::unittest::log() << "Failure from line " << line << " on iteration " << iteration
+ << std::endl;
+ throw;
+ }
+}
+#define ASSERT_ITERATORS_EQUIVALENT(it1, it2) _assertIteratorsEquivalent(it1, it2, __LINE__)
+
+template <int N>
+std::shared_ptr<IWIterator> makeInMemIterator(const int(&array)[N]) {
+ std::vector<IWPair> vec;
+ for (int i = 0; i < N; i++)
+ vec.push_back(IWPair(array[i], -array[i]));
+ return std::make_shared<sorter::InMemIterator<IntWrapper, IntWrapper>>(vec);
+}
+
+template <typename IteratorPtr, int N>
+std::shared_ptr<IWIterator> mergeIterators(IteratorPtr(&array)[N],
+ Direction Dir = ASC,
+ const SortOptions& opts = SortOptions()) {
+ std::vector<std::shared_ptr<IWIterator>> vec;
+ for (int i = 0; i < N; i++)
+ vec.push_back(std::shared_ptr<IWIterator>(array[i]));
+ return std::shared_ptr<IWIterator>(IWIterator::merge(vec, opts, IWComparator(Dir)));
+}
+
+//
+// Tests for Sorter framework internals
+//
+
+class InMemIterTests {
+public:
+ void run() {
+ {
+ EmptyIterator empty;
+ sorter::InMemIterator<IntWrapper, IntWrapper> inMem;
+ ASSERT_ITERATORS_EQUIVALENT(&inMem, &empty);
}
- private:
- Direction _dir;
- };
+ {
+ static const int zeroUpTo20[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19};
+ ASSERT_ITERATORS_EQUIVALENT(makeInMemIterator(zeroUpTo20),
+ make_shared<IntIterator>(0, 20));
+ }
+ {
+ // make sure InMemIterator doesn't do any reordering on it's own
+ static const int unsorted[] = {6, 3, 7, 4, 0, 9, 5, 7, 1, 8};
+ class UnsortedIter : public IWIterator {
+ public:
+ UnsortedIter() : _pos(0) {}
+ bool more() {
+ return _pos < sizeof(unsorted) / sizeof(unsorted[0]);
+ }
+ IWPair next() {
+ IWPair ret(unsorted[_pos], -unsorted[_pos]);
+ _pos++;
+ return ret;
+ }
+ size_t _pos;
+ } unsortedIter;
- class IntIterator : public IWIterator {
- public:
- IntIterator(int start=0, int stop=INT_MAX, int increment=1)
- : _current(start)
- , _increment(increment)
- , _stop(stop)
- {}
- bool more() {
- if (_increment == 0) return true;
- if (_increment > 0) return _current < _stop;
- return _current > _stop;
+ ASSERT_ITERATORS_EQUIVALENT(makeInMemIterator(unsorted),
+ static_cast<IWIterator*>(&unsortedIter));
}
- IWPair next() {
- IWPair out(_current, -_current);
- _current += _increment;
- return out;
+ }
+};
+
+class SortedFileWriterAndFileIteratorTests {
+public:
+ void run() {
+ unittest::TempDir tempDir("sortedFileWriterTests");
+ const SortOptions opts = SortOptions().TempDir(tempDir.path());
+ { // small
+ SortedFileWriter<IntWrapper, IntWrapper> sorter(opts);
+ sorter.addAlreadySorted(0, 0);
+ sorter.addAlreadySorted(1, -1);
+ sorter.addAlreadySorted(2, -2);
+ sorter.addAlreadySorted(3, -3);
+ sorter.addAlreadySorted(4, -4);
+ ASSERT_ITERATORS_EQUIVALENT(std::shared_ptr<IWIterator>(sorter.done()),
+ make_shared<IntIterator>(0, 5));
}
+ { // big
+ SortedFileWriter<IntWrapper, IntWrapper> sorter(opts);
+ for (int i = 0; i < 10 * 1000 * 1000; i++)
+ sorter.addAlreadySorted(i, -i);
- private:
- int _current;
- int _increment;
- int _stop;
- };
+ ASSERT_ITERATORS_EQUIVALENT(std::shared_ptr<IWIterator>(sorter.done()),
+ make_shared<IntIterator>(0, 10 * 1000 * 1000));
+ }
- class EmptyIterator : public IWIterator {
- public:
- bool more() { return false; }
- Data next() { verify(false); }
- };
+ ASSERT(boost::filesystem::is_empty(tempDir.path()));
+ }
+};
- class LimitIterator : public IWIterator {
- public:
- LimitIterator(long long limit, std::shared_ptr<IWIterator> source)
- : _remaining(limit)
- , _source(source)
- { verify(limit > 0); }
-
- bool more() { return _remaining && _source->more(); }
- Data next() {
- verify(more());
- _remaining--;
- return _source->next();
+
+class MergeIteratorTests {
+public:
+ void run() {
+ { // test empty (no inputs)
+ std::vector<std::shared_ptr<IWIterator>> vec;
+ std::shared_ptr<IWIterator> mergeIter(
+ IWIterator::merge(vec, SortOptions(), IWComparator()));
+ ASSERT_ITERATORS_EQUIVALENT(mergeIter, make_shared<EmptyIterator>());
}
+ { // test empty (only empty inputs)
+ std::shared_ptr<IWIterator> iterators[] = {make_shared<EmptyIterator>(),
+ make_shared<EmptyIterator>(),
+ make_shared<EmptyIterator>()};
- private:
- long long _remaining;
- std::shared_ptr<IWIterator> _source;
- };
+ ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iterators, ASC),
+ make_shared<EmptyIterator>());
+ }
+
+ { // test ASC
+ std::shared_ptr<IWIterator> iterators[] = {
+ make_shared<IntIterator>(1, 20, 2) // 1, 3, ... 19
+ ,
+ make_shared<IntIterator>(0, 20, 2) // 0, 2, ... 18
+ };
- template <typename It1, typename It2>
- void _assertIteratorsEquivalent(It1 it1, It2 it2, int line) {
- int iteration;
- try {
- for (iteration = 0; true; iteration++) {
- ASSERT_EQUALS(it1->more(), it2->more());
- ASSERT_EQUALS(it1->more(), it2->more()); // make sure more() is safe to call twice
- if (!it1->more())
- return;
-
- IWPair pair1 = it1->next();
- IWPair pair2 = it2->next();
- ASSERT_EQUALS(pair1.first, pair2.first);
- ASSERT_EQUALS(pair1.second, pair2.second);
- }
-
- } catch (...) {
- mongo::unittest::log() <<
- "Failure from line " << line << " on iteration " << iteration << std::endl;
- throw;
+ ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iterators, ASC),
+ make_shared<IntIterator>(0, 20, 1));
}
- }
-#define ASSERT_ITERATORS_EQUIVALENT(it1, it2) _assertIteratorsEquivalent(it1, it2, __LINE__)
- template <int N>
- std::shared_ptr<IWIterator> makeInMemIterator(const int (&array)[N]) {
- std::vector<IWPair> vec;
- for (int i=0; i<N; i++)
- vec.push_back(IWPair(array[i], -array[i]));
- return std::make_shared<sorter::InMemIterator<IntWrapper, IntWrapper> >(vec);
- }
-
- template <typename IteratorPtr, int N>
- std::shared_ptr<IWIterator> mergeIterators(IteratorPtr (&array)[N],
- Direction Dir=ASC,
- const SortOptions& opts=SortOptions()) {
- std::vector<std::shared_ptr<IWIterator> > vec;
- for (int i=0; i<N; i++)
- vec.push_back(std::shared_ptr<IWIterator>(array[i]));
- return std::shared_ptr<IWIterator>(IWIterator::merge(vec, opts, IWComparator(Dir)));
- }
-
- //
- // Tests for Sorter framework internals
- //
-
- class InMemIterTests {
- public:
- void run() {
- {
- EmptyIterator empty;
- sorter::InMemIterator<IntWrapper, IntWrapper> inMem;
- ASSERT_ITERATORS_EQUIVALENT(&inMem, &empty);
- }
- {
- static const int zeroUpTo20[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19};
- ASSERT_ITERATORS_EQUIVALENT(makeInMemIterator(zeroUpTo20),
- make_shared<IntIterator>(0,20));
- }
- {
- // make sure InMemIterator doesn't do any reordering on it's own
- static const int unsorted[] = {6,3,7,4,0,9,5,7,1,8};
- class UnsortedIter : public IWIterator {
- public:
- UnsortedIter() :_pos(0) {}
- bool more() { return _pos < sizeof(unsorted)/sizeof(unsorted[0]); }
- IWPair next() {
- IWPair ret(unsorted[_pos], -unsorted[_pos]);
- _pos++;
- return ret;
- }
- size_t _pos;
- } unsortedIter;
-
- ASSERT_ITERATORS_EQUIVALENT(makeInMemIterator(unsorted),
- static_cast<IWIterator*>(&unsortedIter));
- }
+ { // test DESC with an empty source
+ std::shared_ptr<IWIterator> iterators[] = {
+ make_shared<IntIterator>(30, 0, -3) // 30, 27, ... 3
+ ,
+ make_shared<IntIterator>(29, 0, -3) // 29, 26, ... 2
+ ,
+ make_shared<IntIterator>(28, 0, -3) // 28, 25, ... 1
+ ,
+ make_shared<EmptyIterator>()};
+
+ ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iterators, DESC),
+ make_shared<IntIterator>(30, 0, -1));
}
- };
+ { // test Limit
+ std::shared_ptr<IWIterator> iterators[] = {
+ make_shared<IntIterator>(1, 20, 2) // 1, 3, ... 19
+ ,
+ make_shared<IntIterator>(0, 20, 2) // 0, 2, ... 18
+ };
- class SortedFileWriterAndFileIteratorTests {
- public:
- void run() {
- unittest::TempDir tempDir("sortedFileWriterTests");
- const SortOptions opts = SortOptions().TempDir(tempDir.path());
- { // small
- SortedFileWriter<IntWrapper, IntWrapper> sorter(opts);
- sorter.addAlreadySorted(0,0);
- sorter.addAlreadySorted(1,-1);
- sorter.addAlreadySorted(2,-2);
- sorter.addAlreadySorted(3,-3);
- sorter.addAlreadySorted(4,-4);
- ASSERT_ITERATORS_EQUIVALENT(std::shared_ptr<IWIterator>(sorter.done()),
- make_shared<IntIterator>(0,5));
- }
- { // big
- SortedFileWriter<IntWrapper, IntWrapper> sorter(opts);
- for (int i=0; i< 10*1000*1000; i++)
- sorter.addAlreadySorted(i,-i);
-
- ASSERT_ITERATORS_EQUIVALENT(std::shared_ptr<IWIterator>(sorter.done()),
- make_shared<IntIterator>(0,10*1000*1000));
- }
-
- ASSERT(boost::filesystem::is_empty(tempDir.path()));
+ ASSERT_ITERATORS_EQUIVALENT(
+ mergeIterators(iterators, ASC, SortOptions().Limit(10)),
+ make_shared<LimitIterator>(10, make_shared<IntIterator>(0, 20, 1)));
}
- };
+ }
+};
+namespace SorterTests {
+class Basic {
+public:
+ virtual ~Basic() {}
+ void run() {
+ unittest::TempDir tempDir("sorterTests");
+ const SortOptions opts = SortOptions().TempDir(tempDir.path());
- class MergeIteratorTests {
- public:
- void run() {
- { // test empty (no inputs)
- std::vector<std::shared_ptr<IWIterator> > vec;
- std::shared_ptr<IWIterator> mergeIter (IWIterator::merge(vec,
- SortOptions(),
- IWComparator()));
- ASSERT_ITERATORS_EQUIVALENT(mergeIter,
- make_shared<EmptyIterator>());
- }
- { // test empty (only empty inputs)
- std::shared_ptr<IWIterator> iterators[] =
- { make_shared<EmptyIterator>()
- , make_shared<EmptyIterator>()
- , make_shared<EmptyIterator>()
- };
-
- ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iterators, ASC),
- make_shared<EmptyIterator>());
- }
-
- { // test ASC
- std::shared_ptr<IWIterator> iterators[] =
- { make_shared<IntIterator>(1, 20, 2) // 1, 3, ... 19
- , make_shared<IntIterator>(0, 20, 2) // 0, 2, ... 18
- };
-
- ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iterators, ASC),
- make_shared<IntIterator>(0,20,1));
- }
-
- { // test DESC with an empty source
- std::shared_ptr<IWIterator> iterators[] =
- { make_shared<IntIterator>(30, 0, -3) // 30, 27, ... 3
- , make_shared<IntIterator>(29, 0, -3) // 29, 26, ... 2
- , make_shared<IntIterator>(28, 0, -3) // 28, 25, ... 1
- , make_shared<EmptyIterator>()
- };
-
- ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iterators, DESC),
- make_shared<IntIterator>(30,0,-1));
- }
- { // test Limit
- std::shared_ptr<IWIterator> iterators[] =
- { make_shared<IntIterator>(1, 20, 2) // 1, 3, ... 19
- , make_shared<IntIterator>(0, 20, 2) // 0, 2, ... 18
- };
-
- ASSERT_ITERATORS_EQUIVALENT(
- mergeIterators(iterators, ASC, SortOptions().Limit(10)),
- make_shared<LimitIterator>(10, make_shared<IntIterator>(0,20,1)));
- }
+ { // test empty (no limit)
+ ASSERT_ITERATORS_EQUIVALENT(done(makeSorter(opts)), make_shared<EmptyIterator>());
+ }
+ { // test empty (limit 1)
+ ASSERT_ITERATORS_EQUIVALENT(done(makeSorter(SortOptions(opts).Limit(1))),
+ make_shared<EmptyIterator>());
+ }
+ { // test empty (limit 10)
+ ASSERT_ITERATORS_EQUIVALENT(done(makeSorter(SortOptions(opts).Limit(10))),
+ make_shared<EmptyIterator>());
}
- };
- namespace SorterTests {
- class Basic {
- public:
- virtual ~Basic() {}
+ { // test all data ASC
+ std::shared_ptr<IWSorter> sorter = makeSorter(opts, IWComparator(ASC));
+ addData(sorter);
+ ASSERT_ITERATORS_EQUIVALENT(done(sorter), correct());
+ }
+ { // test all data DESC
+ std::shared_ptr<IWSorter> sorter = makeSorter(opts, IWComparator(DESC));
+ addData(sorter);
+ ASSERT_ITERATORS_EQUIVALENT(done(sorter), correctReverse());
+ }
- void run() {
- unittest::TempDir tempDir("sorterTests");
- const SortOptions opts = SortOptions().TempDir(tempDir.path());
+// The debug builds are too slow to run these tests.
+// Among other things, MSVC++ makes all heap functions O(N) not O(logN).
+#if !defined(MONGO_CONFIG_DEBUG_BUILD)
+ { // merge all data ASC
+ std::shared_ptr<IWSorter> sorters[] = {makeSorter(opts, IWComparator(ASC)),
+ makeSorter(opts, IWComparator(ASC))};
- { // test empty (no limit)
- ASSERT_ITERATORS_EQUIVALENT(done(makeSorter(opts)),
- make_shared<EmptyIterator>());
- }
- { // test empty (limit 1)
- ASSERT_ITERATORS_EQUIVALENT(done(makeSorter(SortOptions(opts).Limit(1))),
- make_shared<EmptyIterator>());
- }
- { // test empty (limit 10)
- ASSERT_ITERATORS_EQUIVALENT(done(makeSorter(SortOptions(opts).Limit(10))),
- make_shared<EmptyIterator>());
- }
+ addData(sorters[0]);
+ addData(sorters[1]);
- { // test all data ASC
- std::shared_ptr<IWSorter> sorter = makeSorter(opts, IWComparator(ASC));
- addData(sorter);
- ASSERT_ITERATORS_EQUIVALENT(done(sorter), correct());
- }
- { // test all data DESC
- std::shared_ptr<IWSorter> sorter = makeSorter(opts, IWComparator(DESC));
- addData(sorter);
- ASSERT_ITERATORS_EQUIVALENT(done(sorter), correctReverse());
- }
+ std::shared_ptr<IWIterator> iters1[] = {done(sorters[0]), done(sorters[1])};
+ std::shared_ptr<IWIterator> iters2[] = {correct(), correct()};
+ ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iters1, ASC), mergeIterators(iters2, ASC));
+ }
+ { // merge all data DESC and use multiple threads to insert
+ std::shared_ptr<IWSorter> sorters[] = {makeSorter(opts, IWComparator(DESC)),
+ makeSorter(opts, IWComparator(DESC))};
- // The debug builds are too slow to run these tests.
- // Among other things, MSVC++ makes all heap functions O(N) not O(logN).
-#if !defined(MONGO_CONFIG_DEBUG_BUILD)
- { // merge all data ASC
- std::shared_ptr<IWSorter> sorters[] = {
- makeSorter(opts, IWComparator(ASC)),
- makeSorter(opts, IWComparator(ASC))
- };
-
- addData(sorters[0]);
- addData(sorters[1]);
-
- std::shared_ptr<IWIterator> iters1[] = {done(sorters[0]), done(sorters[1])};
- std::shared_ptr<IWIterator> iters2[] = {correct(), correct()};
- ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iters1, ASC),
- mergeIterators(iters2, ASC));
- }
- { // merge all data DESC and use multiple threads to insert
- std::shared_ptr<IWSorter> sorters[] = {
- makeSorter(opts, IWComparator(DESC)),
- makeSorter(opts, IWComparator(DESC))
- };
-
- stdx::thread inBackground(&Basic::addData, this, sorters[0]);
- addData(sorters[1]);
- inBackground.join();
-
- std::shared_ptr<IWIterator> iters1[] = {done(sorters[0]), done(sorters[1])};
- std::shared_ptr<IWIterator> iters2[] = {correctReverse(), correctReverse()};
- ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iters1, DESC),
- mergeIterators(iters2, DESC));
- }
-#endif
- ASSERT(boost::filesystem::is_empty(tempDir.path()));
- }
-
- // add data to the sorter
- virtual void addData(unowned_ptr<IWSorter> sorter) {
- sorter->add(2,-2);
- sorter->add(1,-1);
- sorter->add(0,0);
- sorter->add(4,-4);
- sorter->add(3,-3);
- }
-
- // returns an iterator with the correct results
- virtual std::shared_ptr<IWIterator> correct() {
- return make_shared<IntIterator>(0,5); // 0, 1, ... 4
- }
-
- // like correct but with opposite sort direction
- virtual std::shared_ptr<IWIterator> correctReverse() {
- return make_shared<IntIterator>(4,-1,-1); // 4, 3, ... 0
- }
-
- // It is safe to ignore / overwrite any part of options
- virtual SortOptions adjustSortOptions(SortOptions opts) {
- return opts;
- }
-
- private:
-
- // Make a new sorter with desired opts and comp. Opts may be ignored but not comp
- std::shared_ptr<IWSorter> makeSorter(SortOptions opts,
- IWComparator comp=IWComparator(ASC)) {
- return std::shared_ptr<IWSorter>(IWSorter::make(adjustSortOptions(opts), comp));
- }
-
- std::shared_ptr<IWIterator> done(unowned_ptr<IWSorter> sorter) {
- return std::shared_ptr<IWIterator>(sorter->done());
- }
- };
-
- class Limit : public Basic {
- virtual SortOptions adjustSortOptions(SortOptions opts) {
- return opts.Limit(5);
- }
- void addData(unowned_ptr<IWSorter> sorter) {
- sorter->add(0,0);
- sorter->add(3,-3);
- sorter->add(4,-4);
- sorter->add(2,-2);
- sorter->add(1,-1);
- sorter->add(-1,1);
- }
- virtual std::shared_ptr<IWIterator> correct() {
- return make_shared<IntIterator>(-1,4);
- }
- virtual std::shared_ptr<IWIterator> correctReverse() {
- return make_shared<IntIterator>(4,-1,-1);
- }
- };
-
- class Dupes : public Basic {
- void addData(unowned_ptr<IWSorter> sorter) {
- sorter->add(1,-1);
- sorter->add(-1,1);
- sorter->add(1,-1);
- sorter->add(-1,1);
- sorter->add(1,-1);
- sorter->add(0,0);
- sorter->add(2,-2);
- sorter->add(-1,1);
- sorter->add(2,-2);
- sorter->add(3,-3);
- }
- virtual std::shared_ptr<IWIterator> correct() {
- const int array[] = {-1,-1,-1, 0, 1,1,1, 2,2, 3};
- return makeInMemIterator(array);
- }
- virtual std::shared_ptr<IWIterator> correctReverse() {
- const int array[] = {3, 2,2, 1,1,1, 0, -1,-1,-1};
- return makeInMemIterator(array);
- }
- };
-
- template <bool Random=true>
- class LotsOfDataLittleMemory : public Basic {
- public:
- LotsOfDataLittleMemory() :_array(new int[NUM_ITEMS]) {
- for (int i=0; i<NUM_ITEMS; i++)
- _array[i] = i;
-
- if (Random)
- std::random_shuffle(_array.get(), _array.get()+NUM_ITEMS);
- }
-
- SortOptions adjustSortOptions(SortOptions opts) {
- // Make sure we use a reasonable number of files when we spill
- BOOST_STATIC_ASSERT((NUM_ITEMS * sizeof(IWPair)) / MEM_LIMIT > 50);
- BOOST_STATIC_ASSERT((NUM_ITEMS * sizeof(IWPair)) / MEM_LIMIT < 500);
-
- return opts.MaxMemoryUsageBytes(MEM_LIMIT).ExtSortAllowed();
- }
-
- void addData(unowned_ptr<IWSorter> sorter) {
- for (int i=0; i<NUM_ITEMS; i++)
- sorter->add(_array[i], -_array[i]);
-
- if (typeid(*this) == typeid(LotsOfDataLittleMemory)) {
- // don't do this check in subclasses since they may set a limit
- ASSERT_GREATER_THAN_OR_EQUALS(static_cast<size_t>(sorter->numFiles()),
- (NUM_ITEMS * sizeof(IWPair)) / MEM_LIMIT);
- }
- }
-
- virtual std::shared_ptr<IWIterator> correct() {
- return make_shared<IntIterator>(0, NUM_ITEMS);
- }
- virtual std::shared_ptr<IWIterator> correctReverse() {
- return make_shared<IntIterator>(NUM_ITEMS-1, -1, -1);
- }
-
- enum Constants {
- NUM_ITEMS = 500*1000,
- MEM_LIMIT = 64*1024,
- };
- std::unique_ptr<int[]> _array;
- };
-
-
- template <long long Limit, bool Random=true>
- class LotsOfDataWithLimit : public LotsOfDataLittleMemory<Random> {
- typedef LotsOfDataLittleMemory<Random> Parent;
- SortOptions adjustSortOptions(SortOptions opts) {
- // Make sure our tests will spill or not as desired
- BOOST_STATIC_ASSERT(MEM_LIMIT / 2 > ( 100 * sizeof(IWPair)));
- BOOST_STATIC_ASSERT(MEM_LIMIT < (5000 * sizeof(IWPair)));
- BOOST_STATIC_ASSERT(MEM_LIMIT * 2 > (5000 * sizeof(IWPair)));
-
- // Make sure we use a reasonable number of files when we spill
- BOOST_STATIC_ASSERT((Parent::NUM_ITEMS * sizeof(IWPair)) / MEM_LIMIT > 100);
- BOOST_STATIC_ASSERT((Parent::NUM_ITEMS * sizeof(IWPair)) / MEM_LIMIT < 500);
-
- return opts.MaxMemoryUsageBytes(MEM_LIMIT).ExtSortAllowed().Limit(Limit);
- }
- virtual std::shared_ptr<IWIterator> correct() {
- return make_shared<LimitIterator>(Limit, Parent::correct());
- }
- virtual std::shared_ptr<IWIterator> correctReverse() {
- return make_shared<LimitIterator>(Limit, Parent::correctReverse());
- }
- enum { MEM_LIMIT = 32*1024 };
- };
- }
-
- class SorterSuite : public mongo::unittest::Suite {
- public:
- SorterSuite() :
- Suite( "sorter" ) {
+ stdx::thread inBackground(&Basic::addData, this, sorters[0]);
+ addData(sorters[1]);
+ inBackground.join();
+
+ std::shared_ptr<IWIterator> iters1[] = {done(sorters[0]), done(sorters[1])};
+ std::shared_ptr<IWIterator> iters2[] = {correctReverse(), correctReverse()};
+ ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iters1, DESC), mergeIterators(iters2, DESC));
}
+#endif
+ ASSERT(boost::filesystem::is_empty(tempDir.path()));
+ }
+
+ // add data to the sorter
+ virtual void addData(unowned_ptr<IWSorter> sorter) {
+ sorter->add(2, -2);
+ sorter->add(1, -1);
+ sorter->add(0, 0);
+ sorter->add(4, -4);
+ sorter->add(3, -3);
+ }
+
+ // returns an iterator with the correct results
+ virtual std::shared_ptr<IWIterator> correct() {
+ return make_shared<IntIterator>(0, 5); // 0, 1, ... 4
+ }
+
+ // like correct but with opposite sort direction
+ virtual std::shared_ptr<IWIterator> correctReverse() {
+ return make_shared<IntIterator>(4, -1, -1); // 4, 3, ... 0
+ }
+
+ // It is safe to ignore / overwrite any part of options
+ virtual SortOptions adjustSortOptions(SortOptions opts) {
+ return opts;
+ }
- void setupTests() {
- add<InMemIterTests>();
- add<SortedFileWriterAndFileIteratorTests>();
- add<MergeIteratorTests>();
- add<SorterTests::Basic>();
- add<SorterTests::Limit>();
- add<SorterTests::Dupes>();
- add<SorterTests::LotsOfDataLittleMemory</*random=*/false> >();
- add<SorterTests::LotsOfDataLittleMemory</*random=*/true> >();
- add<SorterTests::LotsOfDataWithLimit<1,/*random=*/false> >(); // limit=1 is special case
- add<SorterTests::LotsOfDataWithLimit<1,/*random=*/true> >(); // limit=1 is special case
- add<SorterTests::LotsOfDataWithLimit<100,/*random=*/false> >(); // fits in mem
- add<SorterTests::LotsOfDataWithLimit<100,/*random=*/true> >(); // fits in mem
- add<SorterTests::LotsOfDataWithLimit<5000,/*random=*/false> >(); // spills
- add<SorterTests::LotsOfDataWithLimit<5000,/*random=*/true> >(); // spills
+private:
+ // Make a new sorter with desired opts and comp. Opts may be ignored but not comp
+ std::shared_ptr<IWSorter> makeSorter(SortOptions opts, IWComparator comp = IWComparator(ASC)) {
+ return std::shared_ptr<IWSorter>(IWSorter::make(adjustSortOptions(opts), comp));
+ }
+
+ std::shared_ptr<IWIterator> done(unowned_ptr<IWSorter> sorter) {
+ return std::shared_ptr<IWIterator>(sorter->done());
+ }
+};
+
+class Limit : public Basic {
+ virtual SortOptions adjustSortOptions(SortOptions opts) {
+ return opts.Limit(5);
+ }
+ void addData(unowned_ptr<IWSorter> sorter) {
+ sorter->add(0, 0);
+ sorter->add(3, -3);
+ sorter->add(4, -4);
+ sorter->add(2, -2);
+ sorter->add(1, -1);
+ sorter->add(-1, 1);
+ }
+ virtual std::shared_ptr<IWIterator> correct() {
+ return make_shared<IntIterator>(-1, 4);
+ }
+ virtual std::shared_ptr<IWIterator> correctReverse() {
+ return make_shared<IntIterator>(4, -1, -1);
+ }
+};
+
+class Dupes : public Basic {
+ void addData(unowned_ptr<IWSorter> sorter) {
+ sorter->add(1, -1);
+ sorter->add(-1, 1);
+ sorter->add(1, -1);
+ sorter->add(-1, 1);
+ sorter->add(1, -1);
+ sorter->add(0, 0);
+ sorter->add(2, -2);
+ sorter->add(-1, 1);
+ sorter->add(2, -2);
+ sorter->add(3, -3);
+ }
+ virtual std::shared_ptr<IWIterator> correct() {
+ const int array[] = {-1, -1, -1, 0, 1, 1, 1, 2, 2, 3};
+ return makeInMemIterator(array);
+ }
+ virtual std::shared_ptr<IWIterator> correctReverse() {
+ const int array[] = {3, 2, 2, 1, 1, 1, 0, -1, -1, -1};
+ return makeInMemIterator(array);
+ }
+};
+
+template <bool Random = true>
+class LotsOfDataLittleMemory : public Basic {
+public:
+ LotsOfDataLittleMemory() : _array(new int[NUM_ITEMS]) {
+ for (int i = 0; i < NUM_ITEMS; i++)
+ _array[i] = i;
+
+ if (Random)
+ std::random_shuffle(_array.get(), _array.get() + NUM_ITEMS);
+ }
+
+ SortOptions adjustSortOptions(SortOptions opts) {
+ // Make sure we use a reasonable number of files when we spill
+ BOOST_STATIC_ASSERT((NUM_ITEMS * sizeof(IWPair)) / MEM_LIMIT > 50);
+ BOOST_STATIC_ASSERT((NUM_ITEMS * sizeof(IWPair)) / MEM_LIMIT < 500);
+
+ return opts.MaxMemoryUsageBytes(MEM_LIMIT).ExtSortAllowed();
+ }
+
+ void addData(unowned_ptr<IWSorter> sorter) {
+ for (int i = 0; i < NUM_ITEMS; i++)
+ sorter->add(_array[i], -_array[i]);
+
+ if (typeid(*this) == typeid(LotsOfDataLittleMemory)) {
+ // don't do this check in subclasses since they may set a limit
+ ASSERT_GREATER_THAN_OR_EQUALS(static_cast<size_t>(sorter->numFiles()),
+ (NUM_ITEMS * sizeof(IWPair)) / MEM_LIMIT);
}
+ }
+
+ virtual std::shared_ptr<IWIterator> correct() {
+ return make_shared<IntIterator>(0, NUM_ITEMS);
+ }
+ virtual std::shared_ptr<IWIterator> correctReverse() {
+ return make_shared<IntIterator>(NUM_ITEMS - 1, -1, -1);
+ }
+
+ enum Constants {
+ NUM_ITEMS = 500 * 1000,
+ MEM_LIMIT = 64 * 1024,
};
+ std::unique_ptr<int[]> _array;
+};
+
+
+template <long long Limit, bool Random = true>
+class LotsOfDataWithLimit : public LotsOfDataLittleMemory<Random> {
+ typedef LotsOfDataLittleMemory<Random> Parent;
+ SortOptions adjustSortOptions(SortOptions opts) {
+ // Make sure our tests will spill or not as desired
+ BOOST_STATIC_ASSERT(MEM_LIMIT / 2 > (100 * sizeof(IWPair)));
+ BOOST_STATIC_ASSERT(MEM_LIMIT < (5000 * sizeof(IWPair)));
+ BOOST_STATIC_ASSERT(MEM_LIMIT * 2 > (5000 * sizeof(IWPair)));
+
+ // Make sure we use a reasonable number of files when we spill
+ BOOST_STATIC_ASSERT((Parent::NUM_ITEMS * sizeof(IWPair)) / MEM_LIMIT > 100);
+ BOOST_STATIC_ASSERT((Parent::NUM_ITEMS * sizeof(IWPair)) / MEM_LIMIT < 500);
+
+ return opts.MaxMemoryUsageBytes(MEM_LIMIT).ExtSortAllowed().Limit(Limit);
+ }
+ virtual std::shared_ptr<IWIterator> correct() {
+ return make_shared<LimitIterator>(Limit, Parent::correct());
+ }
+ virtual std::shared_ptr<IWIterator> correctReverse() {
+ return make_shared<LimitIterator>(Limit, Parent::correctReverse());
+ }
+ enum { MEM_LIMIT = 32 * 1024 };
+};
+}
+
+class SorterSuite : public mongo::unittest::Suite {
+public:
+ SorterSuite() : Suite("sorter") {}
+
+ void setupTests() {
+ add<InMemIterTests>();
+ add<SortedFileWriterAndFileIteratorTests>();
+ add<MergeIteratorTests>();
+ add<SorterTests::Basic>();
+ add<SorterTests::Limit>();
+ add<SorterTests::Dupes>();
+ add<SorterTests::LotsOfDataLittleMemory</*random=*/false>>();
+ add<SorterTests::LotsOfDataLittleMemory</*random=*/true>>();
+ add<SorterTests::LotsOfDataWithLimit<1, /*random=*/false>>(); // limit=1 is special case
+ add<SorterTests::LotsOfDataWithLimit<1, /*random=*/true>>(); // limit=1 is special case
+ add<SorterTests::LotsOfDataWithLimit<100, /*random=*/false>>(); // fits in mem
+ add<SorterTests::LotsOfDataWithLimit<100, /*random=*/true>>(); // fits in mem
+ add<SorterTests::LotsOfDataWithLimit<5000, /*random=*/false>>(); // spills
+ add<SorterTests::LotsOfDataWithLimit<5000, /*random=*/true>>(); // spills
+ }
+};
- mongo::unittest::SuiteInstance<SorterSuite> extSortTests;
+mongo::unittest::SuiteInstance<SorterSuite> extSortTests;
}
diff --git a/src/mongo/db/startup_warnings_common.cpp b/src/mongo/db/startup_warnings_common.cpp
index 96597324fa0..2e2fe6ed0e6 100644
--- a/src/mongo/db/startup_warnings_common.cpp
+++ b/src/mongo/db/startup_warnings_common.cpp
@@ -41,49 +41,48 @@
namespace mongo {
- //
- // system warnings
- //
- void logCommonStartupWarnings() {
- // each message adds a leading and a trailing newline
+//
+// system warnings
+//
+void logCommonStartupWarnings() {
+ // each message adds a leading and a trailing newline
- bool warned = false;
- {
- const char * foo = strchr(versionString , '.') + 1;
- int bar = atoi(foo);
- if ((2 * (bar / 2)) != bar) {
- log() << startupWarningsLog;
- log() << "** NOTE: This is a development version (" << versionString
- << ") of MongoDB." << startupWarningsLog;
- log() << "** Not recommended for production." << startupWarningsLog;
- warned = true;
- }
+ bool warned = false;
+ {
+ const char* foo = strchr(versionString, '.') + 1;
+ int bar = atoi(foo);
+ if ((2 * (bar / 2)) != bar) {
+ log() << startupWarningsLog;
+ log() << "** NOTE: This is a development version (" << versionString << ") of MongoDB."
+ << startupWarningsLog;
+ log() << "** Not recommended for production." << startupWarningsLog;
+ warned = true;
}
+ }
#if defined(_WIN32) && !defined(_WIN64)
- // Warn user that they are running a 32-bit app on 64-bit Windows
- BOOL wow64Process;
- BOOL retWow64 = IsWow64Process(GetCurrentProcess(), &wow64Process);
- if (retWow64 && wow64Process) {
- log() << "** NOTE: This is a 32-bit MongoDB binary running on a 64-bit operating"
- << startupWarningsLog;
- log() << "** system. Switch to a 64-bit build of MongoDB to"
- << startupWarningsLog;
- log() << "** support larger databases." << startupWarningsLog;
- warned = true;
- }
+ // Warn user that they are running a 32-bit app on 64-bit Windows
+ BOOL wow64Process;
+ BOOL retWow64 = IsWow64Process(GetCurrentProcess(), &wow64Process);
+ if (retWow64 && wow64Process) {
+ log() << "** NOTE: This is a 32-bit MongoDB binary running on a 64-bit operating"
+ << startupWarningsLog;
+ log() << "** system. Switch to a 64-bit build of MongoDB to" << startupWarningsLog;
+ log() << "** support larger databases." << startupWarningsLog;
+ warned = true;
+ }
#endif
#if !defined(_WIN32)
- if (getuid() == 0) {
- log() << "** WARNING: You are running this process as the root user, "
- << "which is not recommended." << startupWarningsLog;
- warned = true;
- }
+ if (getuid() == 0) {
+ log() << "** WARNING: You are running this process as the root user, "
+ << "which is not recommended." << startupWarningsLog;
+ warned = true;
+ }
#endif
- if (warned) {
- log() << startupWarningsLog;
- }
+ if (warned) {
+ log() << startupWarningsLog;
}
-} // namespace mongo
+}
+} // namespace mongo
diff --git a/src/mongo/db/startup_warnings_common.h b/src/mongo/db/startup_warnings_common.h
index 9cccfd35592..ac77a2d5ce3 100644
--- a/src/mongo/db/startup_warnings_common.h
+++ b/src/mongo/db/startup_warnings_common.h
@@ -27,7 +27,7 @@
*/
namespace mongo {
- // Checks various startup conditions and logs any necessary warnings that
- // are common to both mongod and mongos processes.
- void logCommonStartupWarnings();
-} // namespace mongo
+// Checks various startup conditions and logs any necessary warnings that
+// are common to both mongod and mongos processes.
+void logCommonStartupWarnings();
+} // namespace mongo
diff --git a/src/mongo/db/startup_warnings_mongod.cpp b/src/mongo/db/startup_warnings_mongod.cpp
index ff1b1647ba0..4d73540482c 100644
--- a/src/mongo/db/startup_warnings_mongod.cpp
+++ b/src/mongo/db/startup_warnings_mongod.cpp
@@ -48,308 +48,293 @@
namespace mongo {
namespace {
- const std::string kTransparentHugePagesDirectory("/sys/kernel/mm/transparent_hugepage");
+const std::string kTransparentHugePagesDirectory("/sys/kernel/mm/transparent_hugepage");
} // namespace
- using std::ios_base;
- using std::string;
-
- // static
- StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter(
- const std::string& parameter) {
-
- return readTransparentHugePagesParameter(parameter, kTransparentHugePagesDirectory);
- }
-
- // static
- StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter(
- const std::string& parameter,
- const std::string& directory) {
-
- std::string opMode;
- try {
- boost::filesystem::path directoryPath(directory);
- if (!boost::filesystem::exists(directoryPath)) {
- return StatusWith<std::string>(ErrorCodes::NonExistentPath, str::stream()
- << "Unable to read non-existent transparent Huge Pages directory: "
- << directory);
- }
+using std::ios_base;
+using std::string;
+
+// static
+StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter(
+ const std::string& parameter) {
+ return readTransparentHugePagesParameter(parameter, kTransparentHugePagesDirectory);
+}
+
+// static
+StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter(
+ const std::string& parameter, const std::string& directory) {
+ std::string opMode;
+ try {
+ boost::filesystem::path directoryPath(directory);
+ if (!boost::filesystem::exists(directoryPath)) {
+ return StatusWith<std::string>(
+ ErrorCodes::NonExistentPath,
+ str::stream() << "Unable to read non-existent transparent Huge Pages directory: "
+ << directory);
+ }
- boost::filesystem::path parameterPath(directoryPath / parameter);
- if (!boost::filesystem::exists(parameterPath)) {
- return StatusWith<std::string>(ErrorCodes::NonExistentPath, str::stream()
- << "Unable to read non-existent transparent Huge Pages file: "
- << parameterPath.string());
- }
+ boost::filesystem::path parameterPath(directoryPath / parameter);
+ if (!boost::filesystem::exists(parameterPath)) {
+ return StatusWith<std::string>(
+ ErrorCodes::NonExistentPath,
+ str::stream() << "Unable to read non-existent transparent Huge Pages file: "
+ << parameterPath.string());
+ }
- std::string filename(parameterPath.string());
- std::ifstream ifs(filename.c_str());
- if (!ifs) {
- return StatusWith<std::string>(ErrorCodes::FileNotOpen, str::stream()
- << "Unable to open transparent Huge Pages file " << filename);
- }
+ std::string filename(parameterPath.string());
+ std::ifstream ifs(filename.c_str());
+ if (!ifs) {
+ return StatusWith<std::string>(
+ ErrorCodes::FileNotOpen,
+ str::stream() << "Unable to open transparent Huge Pages file " << filename);
+ }
- std::string line;
- if (!std::getline(ifs, line)) {
- int errorcode = errno;
- return StatusWith<std::string>(ErrorCodes::FileStreamFailed, str::stream()
- << "failed to read from " << filename << ": "
- << ((ifs.eof()) ? "EOF" : errnoWithDescription(errorcode)));
- }
+ std::string line;
+ if (!std::getline(ifs, line)) {
+ int errorcode = errno;
+ return StatusWith<std::string>(
+ ErrorCodes::FileStreamFailed,
+ str::stream() << "failed to read from " << filename << ": "
+ << ((ifs.eof()) ? "EOF" : errnoWithDescription(errorcode)));
+ }
- std::string::size_type posBegin = line.find("[");
- std::string::size_type posEnd = line.find("]");
- if (posBegin == string::npos || posEnd == string::npos ||
- posBegin >= posEnd) {
- return StatusWith<std::string>(ErrorCodes::FailedToParse, str::stream()
- << "cannot parse line: '" << line << "'");
- }
+ std::string::size_type posBegin = line.find("[");
+ std::string::size_type posEnd = line.find("]");
+ if (posBegin == string::npos || posEnd == string::npos || posBegin >= posEnd) {
+ return StatusWith<std::string>(ErrorCodes::FailedToParse,
+ str::stream() << "cannot parse line: '" << line << "'");
+ }
- opMode = line.substr(posBegin + 1, posEnd - posBegin - 1);
- if (opMode.empty()) {
- return StatusWith<std::string>(ErrorCodes::BadValue, str::stream()
- << "invalid mode in " << filename << ": '" << line << "'");
- }
+ opMode = line.substr(posBegin + 1, posEnd - posBegin - 1);
+ if (opMode.empty()) {
+ return StatusWith<std::string>(ErrorCodes::BadValue,
+ str::stream() << "invalid mode in " << filename << ": '"
+ << line << "'");
+ }
- // Check against acceptable values of opMode.
- if (opMode != "always" && opMode != "madvise" && opMode != "never") {
- return StatusWith<std::string>(ErrorCodes::BadValue, str::stream()
+ // Check against acceptable values of opMode.
+ if (opMode != "always" && opMode != "madvise" && opMode != "never") {
+ return StatusWith<std::string>(
+ ErrorCodes::BadValue,
+ str::stream()
<< "** WARNING: unrecognized transparent Huge Pages mode of operation in "
<< filename << ": '" << opMode << "''");
- }
- }
- catch (const boost::filesystem::filesystem_error& err) {
- return StatusWith<std::string>(ErrorCodes::UnknownError, str::stream()
- << "Failed to probe \"" << err.path1().string() << "\": "
- << err.code().message());
}
-
- return StatusWith<std::string>(opMode);
+ } catch (const boost::filesystem::filesystem_error& err) {
+ return StatusWith<std::string>(ErrorCodes::UnknownError,
+ str::stream() << "Failed to probe \"" << err.path1().string()
+ << "\": " << err.code().message());
}
- void logMongodStartupWarnings(const StorageGlobalParams& params) {
- logCommonStartupWarnings();
+ return StatusWith<std::string>(opMode);
+}
- bool warned = false;
+void logMongodStartupWarnings(const StorageGlobalParams& params) {
+ logCommonStartupWarnings();
- if (sizeof(int*) == 4) {
- log() << startupWarningsLog;
- log() << "** NOTE: This is a 32 bit MongoDB binary." << startupWarningsLog;
- log() << "** 32 bit builds are limited to less than 2GB of data "
- << "(or less with --journal)." << startupWarningsLog;
- if (!params.dur) {
- log() << "** Note that journaling defaults to off for 32 bit "
- << "and is currently off." << startupWarningsLog;
- }
- log() << "** See http://dochub.mongodb.org/core/32bit" << startupWarningsLog;
- warned = true;
- }
+ bool warned = false;
- if (!ProcessInfo::blockCheckSupported()) {
- log() << startupWarningsLog;
- log() << "** NOTE: your operating system version does not support the method that "
- << "MongoDB" << startupWarningsLog;
- log() << "** uses to detect impending page faults." << startupWarningsLog;
- log() << "** This may result in slower performance for certain use "
- << "cases" << startupWarningsLog;
- warned = true;
+ if (sizeof(int*) == 4) {
+ log() << startupWarningsLog;
+ log() << "** NOTE: This is a 32 bit MongoDB binary." << startupWarningsLog;
+ log() << "** 32 bit builds are limited to less than 2GB of data "
+ << "(or less with --journal)." << startupWarningsLog;
+ if (!params.dur) {
+ log() << "** Note that journaling defaults to off for 32 bit "
+ << "and is currently off." << startupWarningsLog;
}
+ log() << "** See http://dochub.mongodb.org/core/32bit" << startupWarningsLog;
+ warned = true;
+ }
+
+ if (!ProcessInfo::blockCheckSupported()) {
+ log() << startupWarningsLog;
+ log() << "** NOTE: your operating system version does not support the method that "
+ << "MongoDB" << startupWarningsLog;
+ log() << "** uses to detect impending page faults." << startupWarningsLog;
+ log() << "** This may result in slower performance for certain use "
+ << "cases" << startupWarningsLog;
+ warned = true;
+ }
#ifdef __linux__
- if (boost::filesystem::exists("/proc/vz") && !boost::filesystem::exists("/proc/bc")) {
- log() << startupWarningsLog;
- log() << "** WARNING: You are running in OpenVZ which can cause issues on versions "
- << "of RHEL older than RHEL6." << startupWarningsLog;
- warned = true;
- }
+ if (boost::filesystem::exists("/proc/vz") && !boost::filesystem::exists("/proc/bc")) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: You are running in OpenVZ which can cause issues on versions "
+ << "of RHEL older than RHEL6." << startupWarningsLog;
+ warned = true;
+ }
- bool hasMultipleNumaNodes = false;
- try {
- hasMultipleNumaNodes = boost::filesystem::exists("/sys/devices/system/node/node1");
- } catch(boost::filesystem::filesystem_error& e) {
- log() << startupWarningsLog;
- log() << "** WARNING: Cannot detect if NUMA interleaving is enabled. "
- << "Failed to probe \"" << e.path1().string() << "\": " << e.code().message()
- << startupWarningsLog;
- }
- if (hasMultipleNumaNodes) {
- // We are on a box with a NUMA enabled kernel and more than 1 numa node (they start at
- // node0)
- // Now we look at the first line of /proc/self/numa_maps
- //
- // Bad example:
- // $ cat /proc/self/numa_maps
- // 00400000 default file=/bin/cat mapped=6 N4=6
- //
- // Good example:
- // $ numactl --interleave=all cat /proc/self/numa_maps
- // 00400000 interleave:0-7 file=/bin/cat mapped=6 N4=6
-
- std::ifstream f("/proc/self/numa_maps", std::ifstream::in);
- if (f.is_open()) {
- std::string line; //we only need the first line
- std::getline(f, line);
- if (f.fail()) {
- warning() << "failed to read from /proc/self/numa_maps: "
- << errnoWithDescription() << startupWarningsLog;
+ bool hasMultipleNumaNodes = false;
+ try {
+ hasMultipleNumaNodes = boost::filesystem::exists("/sys/devices/system/node/node1");
+ } catch (boost::filesystem::filesystem_error& e) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: Cannot detect if NUMA interleaving is enabled. "
+ << "Failed to probe \"" << e.path1().string() << "\": " << e.code().message()
+ << startupWarningsLog;
+ }
+ if (hasMultipleNumaNodes) {
+ // We are on a box with a NUMA enabled kernel and more than 1 numa node (they start at
+ // node0)
+ // Now we look at the first line of /proc/self/numa_maps
+ //
+ // Bad example:
+ // $ cat /proc/self/numa_maps
+ // 00400000 default file=/bin/cat mapped=6 N4=6
+ //
+ // Good example:
+ // $ numactl --interleave=all cat /proc/self/numa_maps
+ // 00400000 interleave:0-7 file=/bin/cat mapped=6 N4=6
+
+ std::ifstream f("/proc/self/numa_maps", std::ifstream::in);
+ if (f.is_open()) {
+ std::string line; // we only need the first line
+ std::getline(f, line);
+ if (f.fail()) {
+ warning() << "failed to read from /proc/self/numa_maps: " << errnoWithDescription()
+ << startupWarningsLog;
+ warned = true;
+ } else {
+ // skip over pointer
+ std::string::size_type where = line.find(' ');
+ if ((where == std::string::npos) || (++where == line.size())) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: cannot parse numa_maps line: '" << line << "'"
+ << startupWarningsLog;
warned = true;
}
- else {
- // skip over pointer
- std::string::size_type where = line.find(' ');
- if ((where == std::string::npos) || (++where == line.size())) {
- log() << startupWarningsLog;
- log() << "** WARNING: cannot parse numa_maps line: '" << line << "'"
- << startupWarningsLog;
- warned = true;
- }
- // if the text following the space doesn't begin with 'interleave', then
- // issue the warning.
- else if (line.find("interleave", where) != where) {
- log() << startupWarningsLog;
- log() << "** WARNING: You are running on a NUMA machine."
- << startupWarningsLog;
- log() << "** We suggest launching mongod like this to avoid "
- << "performance problems:" << startupWarningsLog;
- log() << "** numactl --interleave=all mongod [other options]"
- << startupWarningsLog;
- warned = true;
- }
+ // if the text following the space doesn't begin with 'interleave', then
+ // issue the warning.
+ else if (line.find("interleave", where) != where) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: You are running on a NUMA machine." << startupWarningsLog;
+ log() << "** We suggest launching mongod like this to avoid "
+ << "performance problems:" << startupWarningsLog;
+ log() << "** numactl --interleave=all mongod [other options]"
+ << startupWarningsLog;
+ warned = true;
}
}
}
+ }
- if (params.dur) {
- std::fstream f("/proc/sys/vm/overcommit_memory", ios_base::in);
- unsigned val;
- f >> val;
-
- if (val == 2) {
- log() << startupWarningsLog;
- log() << "** WARNING: /proc/sys/vm/overcommit_memory is " << val
- << startupWarningsLog;
- log() << "** Journaling works best with it set to 0 or 1"
- << startupWarningsLog;
- }
- }
+ if (params.dur) {
+ std::fstream f("/proc/sys/vm/overcommit_memory", ios_base::in);
+ unsigned val;
+ f >> val;
- if (boost::filesystem::exists("/proc/sys/vm/zone_reclaim_mode")){
- std::fstream f("/proc/sys/vm/zone_reclaim_mode", ios_base::in);
- unsigned val;
- f >> val;
-
- if (val != 0) {
- log() << startupWarningsLog;
- log() << "** WARNING: /proc/sys/vm/zone_reclaim_mode is " << val
- << startupWarningsLog;
- log() << "** We suggest setting it to 0" << startupWarningsLog;
- log() << "** http://www.kernel.org/doc/Documentation/sysctl/vm.txt"
- << startupWarningsLog;
- }
+ if (val == 2) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: /proc/sys/vm/overcommit_memory is " << val << startupWarningsLog;
+ log() << "** Journaling works best with it set to 0 or 1"
+ << startupWarningsLog;
}
+ }
- // Transparent Hugepages checks
- StatusWith<std::string> transparentHugePagesEnabledResult =
- StartupWarningsMongod::readTransparentHugePagesParameter("enabled");
- if (transparentHugePagesEnabledResult.isOK()) {
- if (transparentHugePagesEnabledResult.getValue() == "always") {
- log() << startupWarningsLog;
- log() << "** WARNING: " << kTransparentHugePagesDirectory
- << "/enabled is 'always'."
- << startupWarningsLog;
- log() << "** We suggest setting it to 'never'"
- << startupWarningsLog;
- warned = true;
- }
+ if (boost::filesystem::exists("/proc/sys/vm/zone_reclaim_mode")) {
+ std::fstream f("/proc/sys/vm/zone_reclaim_mode", ios_base::in);
+ unsigned val;
+ f >> val;
+
+ if (val != 0) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: /proc/sys/vm/zone_reclaim_mode is " << val << startupWarningsLog;
+ log() << "** We suggest setting it to 0" << startupWarningsLog;
+ log() << "** http://www.kernel.org/doc/Documentation/sysctl/vm.txt"
+ << startupWarningsLog;
}
- else if (transparentHugePagesEnabledResult.getStatus().code() !=
- ErrorCodes::NonExistentPath) {
- warning() << startupWarningsLog;
- warning() << transparentHugePagesEnabledResult.getStatus().reason()
- << startupWarningsLog;
+ }
+
+ // Transparent Hugepages checks
+ StatusWith<std::string> transparentHugePagesEnabledResult =
+ StartupWarningsMongod::readTransparentHugePagesParameter("enabled");
+ if (transparentHugePagesEnabledResult.isOK()) {
+ if (transparentHugePagesEnabledResult.getValue() == "always") {
+ log() << startupWarningsLog;
+ log() << "** WARNING: " << kTransparentHugePagesDirectory << "/enabled is 'always'."
+ << startupWarningsLog;
+ log() << "** We suggest setting it to 'never'" << startupWarningsLog;
warned = true;
}
+ } else if (transparentHugePagesEnabledResult.getStatus().code() !=
+ ErrorCodes::NonExistentPath) {
+ warning() << startupWarningsLog;
+ warning() << transparentHugePagesEnabledResult.getStatus().reason() << startupWarningsLog;
+ warned = true;
+ }
- StatusWith<std::string> transparentHugePagesDefragResult =
- StartupWarningsMongod::readTransparentHugePagesParameter("defrag");
- if (transparentHugePagesDefragResult.isOK()) {
- if (transparentHugePagesDefragResult.getValue() == "always") {
- log() << startupWarningsLog;
- log() << "** WARNING: " << kTransparentHugePagesDirectory
- << "/defrag is 'always'."
- << startupWarningsLog;
- log() << "** We suggest setting it to 'never'"
- << startupWarningsLog;
- warned = true;
- }
- }
- else if (transparentHugePagesDefragResult.getStatus().code() !=
- ErrorCodes::NonExistentPath) {
- warning() << startupWarningsLog;
- warning() << transparentHugePagesDefragResult.getStatus().reason()
- << startupWarningsLog;
+ StatusWith<std::string> transparentHugePagesDefragResult =
+ StartupWarningsMongod::readTransparentHugePagesParameter("defrag");
+ if (transparentHugePagesDefragResult.isOK()) {
+ if (transparentHugePagesDefragResult.getValue() == "always") {
+ log() << startupWarningsLog;
+ log() << "** WARNING: " << kTransparentHugePagesDirectory << "/defrag is 'always'."
+ << startupWarningsLog;
+ log() << "** We suggest setting it to 'never'" << startupWarningsLog;
warned = true;
}
+ } else if (transparentHugePagesDefragResult.getStatus().code() != ErrorCodes::NonExistentPath) {
+ warning() << startupWarningsLog;
+ warning() << transparentHugePagesDefragResult.getStatus().reason() << startupWarningsLog;
+ warned = true;
+ }
#endif // __linux__
#if defined(RLIMIT_NPROC) && defined(RLIMIT_NOFILE)
- //Check that # of files rlmit > 1000 , and # of processes > # of files/2
- const unsigned int minNumFiles = 1000;
- const double filesToProcsRatio = 2.0;
- struct rlimit rlnproc;
- struct rlimit rlnofile;
-
- if(!getrlimit(RLIMIT_NPROC,&rlnproc) && !getrlimit(RLIMIT_NOFILE,&rlnofile)){
- if(rlnofile.rlim_cur < minNumFiles){
- log() << startupWarningsLog;
- log() << "** WARNING: soft rlimits too low. Number of files is "
- << rlnofile.rlim_cur
- << ", should be at least " << minNumFiles << startupWarningsLog;
- }
+ // Check that # of files rlmit > 1000 , and # of processes > # of files/2
+ const unsigned int minNumFiles = 1000;
+ const double filesToProcsRatio = 2.0;
+ struct rlimit rlnproc;
+ struct rlimit rlnofile;
+
+ if (!getrlimit(RLIMIT_NPROC, &rlnproc) && !getrlimit(RLIMIT_NOFILE, &rlnofile)) {
+ if (rlnofile.rlim_cur < minNumFiles) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: soft rlimits too low. Number of files is " << rlnofile.rlim_cur
+ << ", should be at least " << minNumFiles << startupWarningsLog;
+ }
- if(false){
- // juse to make things cleaner
- }
+ if (false) {
+ // juse to make things cleaner
+ }
#ifdef __APPLE__
- else if(rlnproc.rlim_cur >= 709){
- // os x doesn't make it easy to go higher
- // ERH thinks its ok not to add the warning in this case 7/3/2012
- }
+ else if (rlnproc.rlim_cur >= 709) {
+ // os x doesn't make it easy to go higher
+ // ERH thinks its ok not to add the warning in this case 7/3/2012
+ }
#endif
- else if(rlnproc.rlim_cur < rlnofile.rlim_cur/filesToProcsRatio){
- log() << startupWarningsLog;
- log() << "** WARNING: soft rlimits too low. rlimits set to "
- << rlnproc.rlim_cur << " processes, "
- << rlnofile.rlim_cur << " files. Number of processes should be at least "
- << rlnofile.rlim_cur/filesToProcsRatio << " : "
- << 1/filesToProcsRatio << " times number of files." << startupWarningsLog;
- }
- } else {
+ else if (rlnproc.rlim_cur < rlnofile.rlim_cur / filesToProcsRatio) {
log() << startupWarningsLog;
- log() << "** WARNING: getrlimit failed. " << errnoWithDescription()
- << startupWarningsLog;
+ log() << "** WARNING: soft rlimits too low. rlimits set to " << rlnproc.rlim_cur
+ << " processes, " << rlnofile.rlim_cur
+ << " files. Number of processes should be at least "
+ << rlnofile.rlim_cur / filesToProcsRatio << " : " << 1 / filesToProcsRatio
+ << " times number of files." << startupWarningsLog;
}
+ } else {
+ log() << startupWarningsLog;
+ log() << "** WARNING: getrlimit failed. " << errnoWithDescription() << startupWarningsLog;
+ }
#endif
#ifdef _WIN32
- ProcessInfo p;
-
- if (p.hasNumaEnabled()) {
- log() << startupWarningsLog;
- log() << "** WARNING: You are running on a NUMA machine."
- << startupWarningsLog;
- log() << "** We suggest disabling NUMA in the machine BIOS "
- << startupWarningsLog;
- log() << "** by enabling interleaving to avoid performance problems. "
- << startupWarningsLog;
- log() << "** See your BIOS documentation for more information."
- << startupWarningsLog;
- warned = true;
- }
-#endif // #ifdef _WIN32
+ ProcessInfo p;
+
+ if (p.hasNumaEnabled()) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: You are running on a NUMA machine." << startupWarningsLog;
+ log() << "** We suggest disabling NUMA in the machine BIOS " << startupWarningsLog;
+ log() << "** by enabling interleaving to avoid performance problems. "
+ << startupWarningsLog;
+ log() << "** See your BIOS documentation for more information."
+ << startupWarningsLog;
+ warned = true;
+ }
+#endif // #ifdef _WIN32
- if (warned) {
- log() << startupWarningsLog;
- }
+ if (warned) {
+ log() << startupWarningsLog;
}
-} // namespace mongo
+}
+} // namespace mongo
diff --git a/src/mongo/db/startup_warnings_mongod.h b/src/mongo/db/startup_warnings_mongod.h
index f53e1ce9daa..b1dbbd21b5c 100644
--- a/src/mongo/db/startup_warnings_mongod.h
+++ b/src/mongo/db/startup_warnings_mongod.h
@@ -31,30 +31,28 @@
namespace mongo {
- struct StorageGlobalParams;
+struct StorageGlobalParams;
- class StartupWarningsMongod {
- private:
- StartupWarningsMongod();
+class StartupWarningsMongod {
+private:
+ StartupWarningsMongod();
- public:
- /**
- * Reads Transparent HugePages kernel parameter in sysfs directory.
- * Linux only.
- */
- static StatusWith<std::string> readTransparentHugePagesParameter(
- const std::string& parameter);
+public:
+ /**
+ * Reads Transparent HugePages kernel parameter in sysfs directory.
+ * Linux only.
+ */
+ static StatusWith<std::string> readTransparentHugePagesParameter(const std::string& parameter);
- /**
- * For testing only.
- * Supports alternate directory for transparent huge pages files.
- */
- static StatusWith<std::string> readTransparentHugePagesParameter(
- const std::string& parameter,
- const std::string& directory);
- };
+ /**
+ * For testing only.
+ * Supports alternate directory for transparent huge pages files.
+ */
+ static StatusWith<std::string> readTransparentHugePagesParameter(const std::string& parameter,
+ const std::string& directory);
+};
- // Checks various startup conditions and logs any necessary warnings that
- // are specific to the mongod process.
- void logMongodStartupWarnings(const StorageGlobalParams& params);
-} // namespace mongo
+// Checks various startup conditions and logs any necessary warnings that
+// are specific to the mongod process.
+void logMongodStartupWarnings(const StorageGlobalParams& params);
+} // namespace mongo
diff --git a/src/mongo/db/startup_warnings_mongod_test.cpp b/src/mongo/db/startup_warnings_mongod_test.cpp
index 633a38b811a..3c00216d47b 100644
--- a/src/mongo/db/startup_warnings_mongod_test.cpp
+++ b/src/mongo/db/startup_warnings_mongod_test.cpp
@@ -37,101 +37,101 @@
namespace {
- using mongo::unittest::TempDir;
+using mongo::unittest::TempDir;
- using namespace mongo;
+using namespace mongo;
- TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterInvalidDirectory) {
- StatusWith<std::string> result =
- StartupWarningsMongod::readTransparentHugePagesParameter("no_such_directory", "param");
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::NonExistentPath, result.getStatus().code());
- }
+TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterInvalidDirectory) {
+ StatusWith<std::string> result =
+ StartupWarningsMongod::readTransparentHugePagesParameter("no_such_directory", "param");
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::NonExistentPath, result.getStatus().code());
+}
- TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterInvalidFile) {
- TempDir tempDir("StartupWarningsMongodTest_ReadTransparentHugePagesParameterInvalidFile");
- StatusWith<std::string> result =
- StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::NonExistentPath, result.getStatus().code());
- }
+TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterInvalidFile) {
+ TempDir tempDir("StartupWarningsMongodTest_ReadTransparentHugePagesParameterInvalidFile");
+ StatusWith<std::string> result =
+ StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::NonExistentPath, result.getStatus().code());
+}
- TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterEmptyFile) {
- TempDir tempDir("StartupWarningsMongodTest_ReadTransparentHugePagesParameterInvalidFile");
- {
- std::string filename(tempDir.path() + "/param");
- std::ofstream(filename.c_str());
- }
- StatusWith<std::string> result =
- StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::FileStreamFailed, result.getStatus().code());
+TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterEmptyFile) {
+ TempDir tempDir("StartupWarningsMongodTest_ReadTransparentHugePagesParameterInvalidFile");
+ {
+ std::string filename(tempDir.path() + "/param");
+ std::ofstream(filename.c_str());
}
+ StatusWith<std::string> result =
+ StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::FileStreamFailed, result.getStatus().code());
+}
- TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterBlankLine) {
- TempDir tempDir("StartupWarningsMongodTest_ReadTransparentHugePagesParameterBlankLine");
- {
- std::string filename(tempDir.path() + "/param");
- std::ofstream ofs(filename.c_str());
- ofs << std::endl;
- }
- StatusWith<std::string> result =
- StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterBlankLine) {
+ TempDir tempDir("StartupWarningsMongodTest_ReadTransparentHugePagesParameterBlankLine");
+ {
+ std::string filename(tempDir.path() + "/param");
+ std::ofstream ofs(filename.c_str());
+ ofs << std::endl;
}
+ StatusWith<std::string> result =
+ StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+}
- TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterInvalidFormat) {
- TempDir tempDir("StartupWarningsMongodTest_ReadTransparentHugePagesParameterBlankLine");
- {
- std::string filename(tempDir.path() + "/param");
- std::ofstream ofs(filename.c_str());
- ofs << "always madvise never" << std::endl;
- }
- StatusWith<std::string> result =
- StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterInvalidFormat) {
+ TempDir tempDir("StartupWarningsMongodTest_ReadTransparentHugePagesParameterBlankLine");
+ {
+ std::string filename(tempDir.path() + "/param");
+ std::ofstream ofs(filename.c_str());
+ ofs << "always madvise never" << std::endl;
}
+ StatusWith<std::string> result =
+ StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+}
- TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterEmptyOpMode) {
- TempDir tempDir("StartupWarningsMongodTest_ReadTransparentHugePagesParameterEmptyOpMode");
- {
- std::string filename(tempDir.path() + "/param");
- std::ofstream ofs(filename.c_str());
- ofs << "always madvise [] never" << std::endl;
- }
- StatusWith<std::string> result =
- StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterEmptyOpMode) {
+ TempDir tempDir("StartupWarningsMongodTest_ReadTransparentHugePagesParameterEmptyOpMode");
+ {
+ std::string filename(tempDir.path() + "/param");
+ std::ofstream ofs(filename.c_str());
+ ofs << "always madvise [] never" << std::endl;
}
+ StatusWith<std::string> result =
+ StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+}
- TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterUnrecognizedOpMode) {
- TempDir tempDir(
- "StartupWarningsMongodTest_ReadTransparentHugePagesParameterUnrecognizedOpMode");
- {
- std::string filename(tempDir.path() + "/param");
- std::ofstream ofs(filename.c_str());
- ofs << "always madvise never [unknown]" << std::endl;
- }
- StatusWith<std::string> result =
- StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterUnrecognizedOpMode) {
+ TempDir tempDir(
+ "StartupWarningsMongodTest_ReadTransparentHugePagesParameterUnrecognizedOpMode");
+ {
+ std::string filename(tempDir.path() + "/param");
+ std::ofstream ofs(filename.c_str());
+ ofs << "always madvise never [unknown]" << std::endl;
}
+ StatusWith<std::string> result =
+ StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
+}
- TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterValidFormat) {
- TempDir tempDir("StartupWarningsMongodTest_ReadTransparentHugePagesParameterBlankLine");
- {
- std::string filename(tempDir.path() + "/param");
- std::ofstream ofs(filename.c_str());
- ofs << "always madvise [never]" << std::endl;
- }
- StatusWith<std::string> result =
- StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
- ASSERT_OK(result.getStatus());
- ASSERT_EQUALS("never", result.getValue());
+TEST(StartupWarningsMongodTest, ReadTransparentHugePagesParameterValidFormat) {
+ TempDir tempDir("StartupWarningsMongodTest_ReadTransparentHugePagesParameterBlankLine");
+ {
+ std::string filename(tempDir.path() + "/param");
+ std::ofstream ofs(filename.c_str());
+ ofs << "always madvise [never]" << std::endl;
}
+ StatusWith<std::string> result =
+ StartupWarningsMongod::readTransparentHugePagesParameter("param", tempDir.path());
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQUALS("never", result.getValue());
+}
} // namespace
diff --git a/src/mongo/db/stats/counters.cpp b/src/mongo/db/stats/counters.cpp
index 51330b9d647..5b67e22dd6f 100644
--- a/src/mongo/db/stats/counters.cpp
+++ b/src/mongo/db/stats/counters.cpp
@@ -39,132 +39,134 @@
namespace mongo {
- using std::endl;
+using std::endl;
- OpCounters::OpCounters() {}
+OpCounters::OpCounters() {}
- void OpCounters::incInsertInWriteLock(int n) {
- RARELY _checkWrap();
- _insert.fetchAndAdd(n);
- }
+void OpCounters::incInsertInWriteLock(int n) {
+ RARELY _checkWrap();
+ _insert.fetchAndAdd(n);
+}
- void OpCounters::gotInsert() {
- RARELY _checkWrap();
- _insert.fetchAndAdd(1);
- }
+void OpCounters::gotInsert() {
+ RARELY _checkWrap();
+ _insert.fetchAndAdd(1);
+}
- void OpCounters::gotQuery() {
- RARELY _checkWrap();
- _query.fetchAndAdd(1);
- }
+void OpCounters::gotQuery() {
+ RARELY _checkWrap();
+ _query.fetchAndAdd(1);
+}
- void OpCounters::gotUpdate() {
- RARELY _checkWrap();
- _update.fetchAndAdd(1);
- }
+void OpCounters::gotUpdate() {
+ RARELY _checkWrap();
+ _update.fetchAndAdd(1);
+}
- void OpCounters::gotDelete() {
- RARELY _checkWrap();
- _delete.fetchAndAdd(1);
- }
+void OpCounters::gotDelete() {
+ RARELY _checkWrap();
+ _delete.fetchAndAdd(1);
+}
- void OpCounters::gotGetMore() {
- RARELY _checkWrap();
- _getmore.fetchAndAdd(1);
- }
+void OpCounters::gotGetMore() {
+ RARELY _checkWrap();
+ _getmore.fetchAndAdd(1);
+}
- void OpCounters::gotCommand() {
- RARELY _checkWrap();
- _command.fetchAndAdd(1);
- }
+void OpCounters::gotCommand() {
+ RARELY _checkWrap();
+ _command.fetchAndAdd(1);
+}
- void OpCounters::gotOp( int op , bool isCommand ) {
- switch ( op ) {
- case dbInsert: /*gotInsert();*/ break; // need to handle multi-insert
+void OpCounters::gotOp(int op, bool isCommand) {
+ switch (op) {
+ case dbInsert: /*gotInsert();*/
+ break; // need to handle multi-insert
case dbQuery:
- if ( isCommand )
+ if (isCommand)
gotCommand();
else
gotQuery();
break;
- case dbUpdate: gotUpdate(); break;
- case dbDelete: gotDelete(); break;
- case dbGetMore: gotGetMore(); break;
+ case dbUpdate:
+ gotUpdate();
+ break;
+ case dbDelete:
+ gotDelete();
+ break;
+ case dbGetMore:
+ gotGetMore();
+ break;
case dbKillCursors:
case opReply:
case dbMsg:
break;
- default: log() << "OpCounters::gotOp unknown op: " << op << endl;
- }
+ default:
+ log() << "OpCounters::gotOp unknown op: " << op << endl;
}
+}
- void OpCounters::_checkWrap() {
- const unsigned MAX = 1 << 30;
-
- bool wrap =
- _insert.loadRelaxed() > MAX ||
- _query.loadRelaxed() > MAX ||
- _update.loadRelaxed() > MAX ||
- _delete.loadRelaxed() > MAX ||
- _getmore.loadRelaxed() > MAX ||
- _command.loadRelaxed() > MAX;
-
- if ( wrap ) {
- _insert.store(0);
- _query.store(0);
- _update.store(0);
- _delete.store(0);
- _getmore.store(0);
- _command.store(0);
- }
- }
+void OpCounters::_checkWrap() {
+ const unsigned MAX = 1 << 30;
- BSONObj OpCounters::getObj() const {
- BSONObjBuilder b;
- b.append( "insert" , _insert.loadRelaxed() );
- b.append( "query" , _query.loadRelaxed() );
- b.append( "update" , _update.loadRelaxed() );
- b.append( "delete" , _delete.loadRelaxed() );
- b.append( "getmore" , _getmore.loadRelaxed() );
- b.append( "command" , _command.loadRelaxed() );
- return b.obj();
- }
+ bool wrap = _insert.loadRelaxed() > MAX || _query.loadRelaxed() > MAX ||
+ _update.loadRelaxed() > MAX || _delete.loadRelaxed() > MAX ||
+ _getmore.loadRelaxed() > MAX || _command.loadRelaxed() > MAX;
- void NetworkCounter::hit( long long bytesIn , long long bytesOut ) {
- const long long MAX = 1ULL << 60;
-
- // don't care about the race as its just a counter
- bool overflow = _bytesIn > MAX || _bytesOut > MAX;
-
- if ( overflow ) {
- _lock.lock();
- _overflows++;
- _bytesIn = bytesIn;
- _bytesOut = bytesOut;
- _requests = 1;
- _lock.unlock();
- }
- else {
- _lock.lock();
- _bytesIn += bytesIn;
- _bytesOut += bytesOut;
- _requests++;
- _lock.unlock();
- }
+ if (wrap) {
+ _insert.store(0);
+ _query.store(0);
+ _update.store(0);
+ _delete.store(0);
+ _getmore.store(0);
+ _command.store(0);
}
+}
+
+BSONObj OpCounters::getObj() const {
+ BSONObjBuilder b;
+ b.append("insert", _insert.loadRelaxed());
+ b.append("query", _query.loadRelaxed());
+ b.append("update", _update.loadRelaxed());
+ b.append("delete", _delete.loadRelaxed());
+ b.append("getmore", _getmore.loadRelaxed());
+ b.append("command", _command.loadRelaxed());
+ return b.obj();
+}
+
+void NetworkCounter::hit(long long bytesIn, long long bytesOut) {
+ const long long MAX = 1ULL << 60;
- void NetworkCounter::append( BSONObjBuilder& b ) {
+ // don't care about the race as its just a counter
+ bool overflow = _bytesIn > MAX || _bytesOut > MAX;
+
+ if (overflow) {
_lock.lock();
- b.appendNumber( "bytesIn" , _bytesIn );
- b.appendNumber( "bytesOut" , _bytesOut );
- b.appendNumber( "numRequests" , _requests );
+ _overflows++;
+ _bytesIn = bytesIn;
+ _bytesOut = bytesOut;
+ _requests = 1;
+ _lock.unlock();
+ } else {
+ _lock.lock();
+ _bytesIn += bytesIn;
+ _bytesOut += bytesOut;
+ _requests++;
_lock.unlock();
}
+}
+void NetworkCounter::append(BSONObjBuilder& b) {
+ _lock.lock();
+ b.appendNumber("bytesIn", _bytesIn);
+ b.appendNumber("bytesOut", _bytesOut);
+ b.appendNumber("numRequests", _requests);
+ _lock.unlock();
+}
- OpCounters globalOpCounters;
- OpCounters replOpCounters;
- NetworkCounter networkCounter;
+OpCounters globalOpCounters;
+OpCounters replOpCounters;
+NetworkCounter networkCounter;
}
diff --git a/src/mongo/db/stats/counters.h b/src/mongo/db/stats/counters.h
index b29ebcce618..1821f7b7f1e 100644
--- a/src/mongo/db/stats/counters.h
+++ b/src/mongo/db/stats/counters.h
@@ -38,64 +38,76 @@
namespace mongo {
- /**
- * for storing operation counters
- * note: not thread safe. ok with that for speed
- */
- class OpCounters {
- public:
+/**
+ * for storing operation counters
+ * note: not thread safe. ok with that for speed
+ */
+class OpCounters {
+public:
+ OpCounters();
+ void incInsertInWriteLock(int n);
+ void gotInsert();
+ void gotQuery();
+ void gotUpdate();
+ void gotDelete();
+ void gotGetMore();
+ void gotCommand();
+
+ void gotOp(int op, bool isCommand);
+
+ BSONObj getObj() const;
- OpCounters();
- void incInsertInWriteLock(int n);
- void gotInsert();
- void gotQuery();
- void gotUpdate();
- void gotDelete();
- void gotGetMore();
- void gotCommand();
+ // thse are used by snmp, and other things, do not remove
+ const AtomicUInt32* getInsert() const {
+ return &_insert;
+ }
+ const AtomicUInt32* getQuery() const {
+ return &_query;
+ }
+ const AtomicUInt32* getUpdate() const {
+ return &_update;
+ }
+ const AtomicUInt32* getDelete() const {
+ return &_delete;
+ }
+ const AtomicUInt32* getGetMore() const {
+ return &_getmore;
+ }
+ const AtomicUInt32* getCommand() const {
+ return &_command;
+ }
- void gotOp( int op , bool isCommand );
+private:
+ void _checkWrap();
- BSONObj getObj() const;
-
- // thse are used by snmp, and other things, do not remove
- const AtomicUInt32 * getInsert() const { return &_insert; }
- const AtomicUInt32 * getQuery() const { return &_query; }
- const AtomicUInt32 * getUpdate() const { return &_update; }
- const AtomicUInt32 * getDelete() const { return &_delete; }
- const AtomicUInt32 * getGetMore() const { return &_getmore; }
- const AtomicUInt32 * getCommand() const { return &_command; }
+ // todo: there will be a lot of cache line contention on these. need to do something
+ // else eventually.
+ AtomicUInt32 _insert;
+ AtomicUInt32 _query;
+ AtomicUInt32 _update;
+ AtomicUInt32 _delete;
+ AtomicUInt32 _getmore;
+ AtomicUInt32 _command;
+};
- private:
- void _checkWrap();
-
- // todo: there will be a lot of cache line contention on these. need to do something
- // else eventually.
- AtomicUInt32 _insert;
- AtomicUInt32 _query;
- AtomicUInt32 _update;
- AtomicUInt32 _delete;
- AtomicUInt32 _getmore;
- AtomicUInt32 _command;
- };
+extern OpCounters globalOpCounters;
+extern OpCounters replOpCounters;
- extern OpCounters globalOpCounters;
- extern OpCounters replOpCounters;
+class NetworkCounter {
+public:
+ NetworkCounter() : _bytesIn(0), _bytesOut(0), _requests(0), _overflows(0) {}
+ void hit(long long bytesIn, long long bytesOut);
+ void append(BSONObjBuilder& b);
- class NetworkCounter {
- public:
- NetworkCounter() : _bytesIn(0), _bytesOut(0), _requests(0), _overflows(0) {}
- void hit( long long bytesIn , long long bytesOut );
- void append( BSONObjBuilder& b );
- private:
- long long _bytesIn;
- long long _bytesOut;
- long long _requests;
+private:
+ long long _bytesIn;
+ long long _bytesOut;
+ long long _requests;
- long long _overflows;
+ long long _overflows;
- SpinLock _lock;
- };
+ SpinLock _lock;
+};
- extern NetworkCounter networkCounter;
+extern NetworkCounter networkCounter;
}
diff --git a/src/mongo/db/stats/fill_locker_info.cpp b/src/mongo/db/stats/fill_locker_info.cpp
index 028185f0f97..9541eb5de34 100644
--- a/src/mongo/db/stats/fill_locker_info.cpp
+++ b/src/mongo/db/stats/fill_locker_info.cpp
@@ -35,46 +35,45 @@
namespace mongo {
- void fillLockerInfo(const Locker::LockerInfo& lockerInfo, BSONObjBuilder& infoBuilder) {
- // "locks" section
- BSONObjBuilder locks(infoBuilder.subobjStart("locks"));
- const size_t locksSize = lockerInfo.locks.size();
+void fillLockerInfo(const Locker::LockerInfo& lockerInfo, BSONObjBuilder& infoBuilder) {
+ // "locks" section
+ BSONObjBuilder locks(infoBuilder.subobjStart("locks"));
+ const size_t locksSize = lockerInfo.locks.size();
- // Only add the last lock of each type, and use the largest mode encountered
- LockMode modeForType[ResourceTypesCount] = { }; // default initialize to zero (min value)
- for (size_t i = 0; i < locksSize; i++) {
- const Locker::OneLock& lock = lockerInfo.locks[i];
- const ResourceType lockType = lock.resourceId.getType();
- const LockMode lockMode = std::max(lock.mode, modeForType[lockType]);
+ // Only add the last lock of each type, and use the largest mode encountered
+ LockMode modeForType[ResourceTypesCount] = {}; // default initialize to zero (min value)
+ for (size_t i = 0; i < locksSize; i++) {
+ const Locker::OneLock& lock = lockerInfo.locks[i];
+ const ResourceType lockType = lock.resourceId.getType();
+ const LockMode lockMode = std::max(lock.mode, modeForType[lockType]);
- // Check that lockerInfo is sorted on resource type
- invariant(i == 0 || lockType >= lockerInfo.locks[i - 1].resourceId.getType());
+ // Check that lockerInfo is sorted on resource type
+ invariant(i == 0 || lockType >= lockerInfo.locks[i - 1].resourceId.getType());
- if (lock.resourceId == resourceIdLocalDB) {
- locks.append("local", legacyModeName(lock.mode));
- continue;
- }
+ if (lock.resourceId == resourceIdLocalDB) {
+ locks.append("local", legacyModeName(lock.mode));
+ continue;
+ }
- modeForType[lockType] = lockMode;
+ modeForType[lockType] = lockMode;
- if (i + 1 < locksSize && lockerInfo.locks[i + 1].resourceId.getType() == lockType) {
- continue; // skip this lock as it is not the last one of its type
- }
- else {
- locks.append(resourceTypeName(lockType), legacyModeName(lockMode));
- }
+ if (i + 1 < locksSize && lockerInfo.locks[i + 1].resourceId.getType() == lockType) {
+ continue; // skip this lock as it is not the last one of its type
+ } else {
+ locks.append(resourceTypeName(lockType), legacyModeName(lockMode));
}
- locks.done();
+ }
+ locks.done();
- // "waitingForLock" section
- infoBuilder.append("waitingForLock", lockerInfo.waitingResource.isValid());
+ // "waitingForLock" section
+ infoBuilder.append("waitingForLock", lockerInfo.waitingResource.isValid());
- // "lockStats" section
- {
- BSONObjBuilder lockStats(infoBuilder.subobjStart("lockStats"));
- lockerInfo.stats.report(&lockStats);
- lockStats.done();
- }
+ // "lockStats" section
+ {
+ BSONObjBuilder lockStats(infoBuilder.subobjStart("lockStats"));
+ lockerInfo.stats.report(&lockStats);
+ lockStats.done();
}
+}
} // namespace mongo
diff --git a/src/mongo/db/stats/fill_locker_info.h b/src/mongo/db/stats/fill_locker_info.h
index 440b91b2816..03b99a2e371 100644
--- a/src/mongo/db/stats/fill_locker_info.h
+++ b/src/mongo/db/stats/fill_locker_info.h
@@ -32,10 +32,10 @@
namespace mongo {
- /**
- * Constructs a human-readable BSON from the specified LockerInfo structure.
- * The lockerInfo must be sorted.
- */
- void fillLockerInfo(const Locker::LockerInfo& lockerInfo, BSONObjBuilder& infoBuilder);
+/**
+ * Constructs a human-readable BSON from the specified LockerInfo structure.
+ * The lockerInfo must be sorted.
+ */
+void fillLockerInfo(const Locker::LockerInfo& lockerInfo, BSONObjBuilder& infoBuilder);
} // namespace mongo
diff --git a/src/mongo/db/stats/fine_clock.h b/src/mongo/db/stats/fine_clock.h
index 21f72d212a1..78762b73870 100644
--- a/src/mongo/db/stats/fine_clock.h
+++ b/src/mongo/db/stats/fine_clock.h
@@ -35,45 +35,41 @@
namespace mongo {
- /**
- * This is a nano-second precision clock. We're skipping the
- * harware TSC in favor of clock_gettime() which in some systems
- * does not involve a trip to the OS (VDSO).
- *
- * We're exporting a type WallTime that is and should remain
- * opaque. The business of getting accurate time is still ongoing
- * and we may change the internal representation of this class.
- * (http://lwn.net/Articles/388188/)
- *
- * Really, you shouldn't be using this class in hot code paths for
- * platforms you're not sure whether the overhead is low.
- */
- class FineClock {
- public:
-
- typedef timespec WallTime;
+/**
+ * This is a nano-second precision clock. We're skipping the
+ * harware TSC in favor of clock_gettime() which in some systems
+ * does not involve a trip to the OS (VDSO).
+ *
+ * We're exporting a type WallTime that is and should remain
+ * opaque. The business of getting accurate time is still ongoing
+ * and we may change the internal representation of this class.
+ * (http://lwn.net/Articles/388188/)
+ *
+ * Really, you shouldn't be using this class in hot code paths for
+ * platforms you're not sure whether the overhead is low.
+ */
+class FineClock {
+public:
+ typedef timespec WallTime;
- static WallTime now() {
- struct timespec ts;
- clock_gettime(CLOCK_MONOTONIC, &ts);
- return ts;
- }
+ static WallTime now() {
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts;
+ }
- static uint64_t diffInNanos( WallTime end, WallTime start ) {
- uint64_t diff;
- if ( end.tv_nsec < start.tv_nsec ) {
- diff = 1000000000 * ( end.tv_sec - start.tv_sec - 1);
- diff += 1000000000 + end.tv_nsec - start.tv_nsec;
- }
- else {
- diff = 1000000000 * ( end.tv_sec - start.tv_sec );
- diff += end.tv_nsec - start.tv_nsec;
- }
- return diff;
+ static uint64_t diffInNanos(WallTime end, WallTime start) {
+ uint64_t diff;
+ if (end.tv_nsec < start.tv_nsec) {
+ diff = 1000000000 * (end.tv_sec - start.tv_sec - 1);
+ diff += 1000000000 + end.tv_nsec - start.tv_nsec;
+ } else {
+ diff = 1000000000 * (end.tv_sec - start.tv_sec);
+ diff += end.tv_nsec - start.tv_nsec;
}
-
- };
+ return diff;
+ }
+};
}
#endif // DB_STATS_FINE_CLOCK_HEADER
-
diff --git a/src/mongo/db/stats/lock_server_status_section.cpp b/src/mongo/db/stats/lock_server_status_section.cpp
index c8d583d403e..e320fe04930 100644
--- a/src/mongo/db/stats/lock_server_status_section.cpp
+++ b/src/mongo/db/stats/lock_server_status_section.cpp
@@ -37,103 +37,103 @@
namespace mongo {
namespace {
- class GlobalLockServerStatusSection : public ServerStatusSection {
- public:
- GlobalLockServerStatusSection() : ServerStatusSection("globalLock") {
- _started = curTimeMillis64();
- }
-
- virtual bool includeByDefault() const { return true; }
-
- virtual BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
-
- int numTotal = 0;
- int numWriteLocked = 0;
- int numReadLocked = 0;
- int numWaitingRead = 0;
- int numWaitingWrite = 0;
-
- // This returns the blocked lock states
- for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
- Client* client = cursor.next();) {
-
- invariant(client);
- ++numTotal;
- stdx::unique_lock<Client> uniqueLock(*client);
-
- const OperationContext* opCtx = client->getOperationContext();
- if (opCtx == NULL) continue;
-
- if (opCtx->lockState()->isWriteLocked()) {
- numWriteLocked++;
-
- if (opCtx->lockState()->getWaitingResource().isValid()) {
- numWaitingWrite++;
- }
+class GlobalLockServerStatusSection : public ServerStatusSection {
+public:
+ GlobalLockServerStatusSection() : ServerStatusSection("globalLock") {
+ _started = curTimeMillis64();
+ }
+
+ virtual bool includeByDefault() const {
+ return true;
+ }
+
+ virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ int numTotal = 0;
+ int numWriteLocked = 0;
+ int numReadLocked = 0;
+ int numWaitingRead = 0;
+ int numWaitingWrite = 0;
+
+ // This returns the blocked lock states
+ for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
+ Client* client = cursor.next();) {
+ invariant(client);
+ ++numTotal;
+ stdx::unique_lock<Client> uniqueLock(*client);
+
+ const OperationContext* opCtx = client->getOperationContext();
+ if (opCtx == NULL)
+ continue;
+
+ if (opCtx->lockState()->isWriteLocked()) {
+ numWriteLocked++;
+
+ if (opCtx->lockState()->getWaitingResource().isValid()) {
+ numWaitingWrite++;
}
- else if (opCtx->lockState()->isReadLocked()) {
- numReadLocked++;
+ } else if (opCtx->lockState()->isReadLocked()) {
+ numReadLocked++;
- if (opCtx->lockState()->getWaitingResource().isValid()) {
- numWaitingRead++;
- }
+ if (opCtx->lockState()->getWaitingResource().isValid()) {
+ numWaitingRead++;
}
}
+ }
- // Construct the actual return value out of the mutex
- BSONObjBuilder ret;
+ // Construct the actual return value out of the mutex
+ BSONObjBuilder ret;
- ret.append("totalTime", (long long)(1000 * (curTimeMillis64() - _started)));
+ ret.append("totalTime", (long long)(1000 * (curTimeMillis64() - _started)));
- {
- BSONObjBuilder currentQueueBuilder(ret.subobjStart("currentQueue"));
+ {
+ BSONObjBuilder currentQueueBuilder(ret.subobjStart("currentQueue"));
- currentQueueBuilder.append("total", numWaitingRead + numWaitingWrite);
- currentQueueBuilder.append("readers", numWaitingRead);
- currentQueueBuilder.append("writers", numWaitingWrite);
- currentQueueBuilder.done();
- }
+ currentQueueBuilder.append("total", numWaitingRead + numWaitingWrite);
+ currentQueueBuilder.append("readers", numWaitingRead);
+ currentQueueBuilder.append("writers", numWaitingWrite);
+ currentQueueBuilder.done();
+ }
- {
- BSONObjBuilder activeClientsBuilder(ret.subobjStart("activeClients"));
+ {
+ BSONObjBuilder activeClientsBuilder(ret.subobjStart("activeClients"));
- activeClientsBuilder.append("total", numTotal);
- activeClientsBuilder.append("readers", numReadLocked);
- activeClientsBuilder.append("writers", numWriteLocked);
- activeClientsBuilder.done();
- }
+ activeClientsBuilder.append("total", numTotal);
+ activeClientsBuilder.append("readers", numReadLocked);
+ activeClientsBuilder.append("writers", numWriteLocked);
+ activeClientsBuilder.done();
+ }
- ret.done();
+ ret.done();
- return ret.obj();
- }
+ return ret.obj();
+ }
- private:
- unsigned long long _started;
+private:
+ unsigned long long _started;
- } globalLockServerStatusSection;
+} globalLockServerStatusSection;
- class LockStatsServerStatusSection : public ServerStatusSection {
- public:
- LockStatsServerStatusSection() : ServerStatusSection("locks") { }
+class LockStatsServerStatusSection : public ServerStatusSection {
+public:
+ LockStatsServerStatusSection() : ServerStatusSection("locks") {}
- virtual bool includeByDefault() const { return true; }
+ virtual bool includeByDefault() const {
+ return true;
+ }
- virtual BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
- BSONObjBuilder ret;
+ virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObjBuilder ret;
- SingleThreadedLockStats stats;
- reportGlobalLockingStats(&stats);
+ SingleThreadedLockStats stats;
+ reportGlobalLockingStats(&stats);
- stats.report(&ret);
+ stats.report(&ret);
- return ret.obj();
- }
+ return ret.obj();
+ }
- } lockStatsServerStatusSection;
+} lockStatsServerStatusSection;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/stats/range_deleter_server_status.cpp b/src/mongo/db/stats/range_deleter_server_status.cpp
index e625e16f01b..817ffa444e4 100644
--- a/src/mongo/db/stats/range_deleter_server_status.cpp
+++ b/src/mongo/db/stats/range_deleter_server_status.cpp
@@ -32,69 +32,70 @@
namespace mongo {
- /**
- * Server status section for RangeDeleter.
- *
- * Sample format:
- *
- * rangeDeleter: {
- * lastDeleteStats: [
- * {
- * deleteDocs: NumberLong(5);
- * queueStart: ISODate("2014-06-11T22:45:30.221Z"),
- * queueEnd: ISODate("2014-06-11T22:45:30.221Z"),
- * deleteStart: ISODate("2014-06-11T22:45:30.221Z"),
- * deleteEnd: ISODate("2014-06-11T22:45:30.221Z"),
- * waitForReplStart: ISODate("2014-06-11T22:45:30.221Z"),
- * waitForReplEnd: ISODate("2014-06-11T22:45:30.221Z")
- * }
- * ]
- * }
- */
- class RangeDeleterServerStatusSection : public ServerStatusSection {
- public:
- RangeDeleterServerStatusSection() : ServerStatusSection( "rangeDeleter" ){}
- bool includeByDefault() const { return false; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
+/**
+ * Server status section for RangeDeleter.
+ *
+ * Sample format:
+ *
+ * rangeDeleter: {
+ * lastDeleteStats: [
+ * {
+ * deleteDocs: NumberLong(5);
+ * queueStart: ISODate("2014-06-11T22:45:30.221Z"),
+ * queueEnd: ISODate("2014-06-11T22:45:30.221Z"),
+ * deleteStart: ISODate("2014-06-11T22:45:30.221Z"),
+ * deleteEnd: ISODate("2014-06-11T22:45:30.221Z"),
+ * waitForReplStart: ISODate("2014-06-11T22:45:30.221Z"),
+ * waitForReplEnd: ISODate("2014-06-11T22:45:30.221Z")
+ * }
+ * ]
+ * }
+ */
+class RangeDeleterServerStatusSection : public ServerStatusSection {
+public:
+ RangeDeleterServerStatusSection() : ServerStatusSection("rangeDeleter") {}
+ bool includeByDefault() const {
+ return false;
+ }
- RangeDeleter* deleter = getDeleter();
- if (!deleter) {
- return BSONObj();
- }
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ RangeDeleter* deleter = getDeleter();
+ if (!deleter) {
+ return BSONObj();
+ }
- BSONObjBuilder result;
+ BSONObjBuilder result;
- OwnedPointerVector<DeleteJobStats> statsList;
- deleter->getStatsHistory(&statsList.mutableVector());
- BSONArrayBuilder oldStatsBuilder;
- for (OwnedPointerVector<DeleteJobStats>::const_iterator it = statsList.begin();
- it != statsList.end(); ++it) {
- BSONObjBuilder entryBuilder;
- entryBuilder.append("deletedDocs", (*it)->deletedDocCount);
+ OwnedPointerVector<DeleteJobStats> statsList;
+ deleter->getStatsHistory(&statsList.mutableVector());
+ BSONArrayBuilder oldStatsBuilder;
+ for (OwnedPointerVector<DeleteJobStats>::const_iterator it = statsList.begin();
+ it != statsList.end();
+ ++it) {
+ BSONObjBuilder entryBuilder;
+ entryBuilder.append("deletedDocs", (*it)->deletedDocCount);
- if ((*it)->queueEndTS > Date_t()) {
- entryBuilder.append("queueStart", (*it)->queueStartTS);
- entryBuilder.append("queueEnd", (*it)->queueEndTS);
- }
+ if ((*it)->queueEndTS > Date_t()) {
+ entryBuilder.append("queueStart", (*it)->queueStartTS);
+ entryBuilder.append("queueEnd", (*it)->queueEndTS);
+ }
- if ((*it)->deleteEndTS > Date_t()) {
- entryBuilder.append("deleteStart", (*it)->deleteStartTS);
- entryBuilder.append("deleteEnd", (*it)->deleteEndTS);
+ if ((*it)->deleteEndTS > Date_t()) {
+ entryBuilder.append("deleteStart", (*it)->deleteStartTS);
+ entryBuilder.append("deleteEnd", (*it)->deleteEndTS);
- if ((*it)->waitForReplEndTS > Date_t()) {
- entryBuilder.append("waitForReplStart", (*it)->waitForReplStartTS);
- entryBuilder.append("waitForReplEnd", (*it)->waitForReplEndTS);
- }
+ if ((*it)->waitForReplEndTS > Date_t()) {
+ entryBuilder.append("waitForReplStart", (*it)->waitForReplStartTS);
+ entryBuilder.append("waitForReplEnd", (*it)->waitForReplEndTS);
}
-
- oldStatsBuilder.append(entryBuilder.obj());
}
- result.append("lastDeleteStats", oldStatsBuilder.arr());
- return result.obj();
+ oldStatsBuilder.append(entryBuilder.obj());
}
+ result.append("lastDeleteStats", oldStatsBuilder.arr());
+
+ return result.obj();
+ }
- } rangeDeleterServerStatusSection;
+} rangeDeleterServerStatusSection;
}
diff --git a/src/mongo/db/stats/snapshots.cpp b/src/mongo/db/stats/snapshots.cpp
index 80b5a66c98f..ce14714b87d 100644
--- a/src/mongo/db/stats/snapshots.cpp
+++ b/src/mongo/db/stats/snapshots.cpp
@@ -45,83 +45,77 @@
*/
namespace mongo {
- using std::unique_ptr;
- using std::endl;
+using std::unique_ptr;
+using std::endl;
- void SnapshotData::takeSnapshot() {
- _created = curTimeMicros64();
- Top::get(getGlobalServiceContext()).cloneMap(_usage);
- }
+void SnapshotData::takeSnapshot() {
+ _created = curTimeMicros64();
+ Top::get(getGlobalServiceContext()).cloneMap(_usage);
+}
- SnapshotDelta::SnapshotDelta( const SnapshotData& older , const SnapshotData& newer )
- : _older( older ) , _newer( newer ) {
- verify( _newer._created > _older._created );
- _elapsed = _newer._created - _older._created;
- }
+SnapshotDelta::SnapshotDelta(const SnapshotData& older, const SnapshotData& newer)
+ : _older(older), _newer(newer) {
+ verify(_newer._created > _older._created);
+ _elapsed = _newer._created - _older._created;
+}
- Top::UsageMap SnapshotDelta::collectionUsageDiff() {
- verify( _newer._created > _older._created );
- Top::UsageMap u;
-
- for ( Top::UsageMap::const_iterator i=_newer._usage.begin();
- i != _newer._usage.end(); ++i ) {
- Top::UsageMap::const_iterator j = _older._usage.find(i->first);
- if (j != _older._usage.end())
- u[i->first] = Top::CollectionData( j->second , i->second );
- else
- u[i->first] = i->second;
- }
- return u;
- }
+Top::UsageMap SnapshotDelta::collectionUsageDiff() {
+ verify(_newer._created > _older._created);
+ Top::UsageMap u;
- Snapshots::Snapshots()
- : _loc(0)
- , _stored(0)
- {}
-
- const SnapshotData* Snapshots::takeSnapshot() {
- stdx::lock_guard<stdx::mutex> lk(_lock);
- _loc = ( _loc + 1 ) % kNumSnapshots;
- _snapshots[_loc].takeSnapshot();
- if ( _stored < kNumSnapshots )
- _stored++;
- return &_snapshots[_loc];
+ for (Top::UsageMap::const_iterator i = _newer._usage.begin(); i != _newer._usage.end(); ++i) {
+ Top::UsageMap::const_iterator j = _older._usage.find(i->first);
+ if (j != _older._usage.end())
+ u[i->first] = Top::CollectionData(j->second, i->second);
+ else
+ u[i->first] = i->second;
}
+ return u;
+}
- StatusWith<SnapshotDiff> Snapshots::computeDelta() {
- stdx::lock_guard<stdx::mutex> lk(_lock);
-
- // We need 2 snapshots to calculate a delta
- if (_stored < 2) {
- return StatusWith<SnapshotDiff>(ErrorCodes::BadValue,
- "Less than 2 snapshots exist");
- }
+Snapshots::Snapshots() : _loc(0), _stored(0) {}
- // The following logic depends on there being exactly 2 stored snapshots
- BOOST_STATIC_ASSERT(kNumSnapshots == 2);
+const SnapshotData* Snapshots::takeSnapshot() {
+ stdx::lock_guard<stdx::mutex> lk(_lock);
+ _loc = (_loc + 1) % kNumSnapshots;
+ _snapshots[_loc].takeSnapshot();
+ if (_stored < kNumSnapshots)
+ _stored++;
+ return &_snapshots[_loc];
+}
- // Current and previous napshot alternates between indexes 0 and 1
- int currIdx = _loc;
- int prevIdx = _loc > 0 ? 0 : 1;
- SnapshotDelta delta(_snapshots[prevIdx], _snapshots[currIdx]);
+StatusWith<SnapshotDiff> Snapshots::computeDelta() {
+ stdx::lock_guard<stdx::mutex> lk(_lock);
- return SnapshotDiff(delta.collectionUsageDiff(), delta.elapsed());
+ // We need 2 snapshots to calculate a delta
+ if (_stored < 2) {
+ return StatusWith<SnapshotDiff>(ErrorCodes::BadValue, "Less than 2 snapshots exist");
}
- void SnapshotThread::run() {
- Client::initThread("snapshot");
- while ( ! inShutdown() ) {
- try {
- statsSnapshots.takeSnapshot();
- }
- catch ( std::exception& e ) {
- log() << "ERROR in SnapshotThread: " << e.what() << endl;
- }
-
- sleepsecs(4);
+ // The following logic depends on there being exactly 2 stored snapshots
+ BOOST_STATIC_ASSERT(kNumSnapshots == 2);
+
+ // Current and previous napshot alternates between indexes 0 and 1
+ int currIdx = _loc;
+ int prevIdx = _loc > 0 ? 0 : 1;
+ SnapshotDelta delta(_snapshots[prevIdx], _snapshots[currIdx]);
+
+ return SnapshotDiff(delta.collectionUsageDiff(), delta.elapsed());
+}
+
+void SnapshotThread::run() {
+ Client::initThread("snapshot");
+ while (!inShutdown()) {
+ try {
+ statsSnapshots.takeSnapshot();
+ } catch (std::exception& e) {
+ log() << "ERROR in SnapshotThread: " << e.what() << endl;
}
+
+ sleepsecs(4);
}
+}
- Snapshots statsSnapshots;
- SnapshotThread snapshotThread;
+Snapshots statsSnapshots;
+SnapshotThread snapshotThread;
}
diff --git a/src/mongo/db/stats/snapshots.h b/src/mongo/db/stats/snapshots.h
index 6f3b90e0cb9..f9f47d661d1 100644
--- a/src/mongo/db/stats/snapshots.h
+++ b/src/mongo/db/stats/snapshots.h
@@ -41,76 +41,76 @@
*/
namespace mongo {
- class SnapshotThread;
+class SnapshotThread;
- /**
- * stores a point in time snapshot
- * i.e. all counters at a given time
- */
- class SnapshotData {
- void takeSnapshot();
-
- unsigned long long _created;
- Top::UsageMap _usage;
-
- friend class SnapshotThread;
- friend class SnapshotDelta;
- friend class Snapshots;
- };
-
- /**
- * contains performance information for a time period
- */
- class SnapshotDelta {
- public:
- SnapshotDelta( const SnapshotData& older , const SnapshotData& newer );
-
- unsigned long long elapsed() const {
- return _elapsed;
- }
-
- Top::UsageMap collectionUsageDiff();
-
- private:
- const SnapshotData& _older;
- const SnapshotData& _newer;
-
- unsigned long long _elapsed;
- };
-
- struct SnapshotDiff {
- Top::UsageMap usageDiff;
- unsigned long long timeElapsed;
-
- SnapshotDiff() = default;
- SnapshotDiff(Top::UsageMap map, unsigned long long elapsed)
- : usageDiff(std::move(map)), timeElapsed(elapsed) {}
- };
-
- class Snapshots {
- public:
- Snapshots();
-
- const SnapshotData* takeSnapshot();
-
- StatusWith<SnapshotDiff> computeDelta();
-
- private:
- stdx::mutex _lock;
- static const int kNumSnapshots = 2;
- SnapshotData _snapshots[kNumSnapshots];
- int _loc;
- int _stored;
- };
-
- class SnapshotThread : public BackgroundJob {
- public:
- virtual std::string name() const { return "snapshot"; }
- void run();
- };
+/**
+ * stores a point in time snapshot
+ * i.e. all counters at a given time
+ */
+class SnapshotData {
+ void takeSnapshot();
- extern Snapshots statsSnapshots;
- extern SnapshotThread snapshotThread;
+ unsigned long long _created;
+ Top::UsageMap _usage;
+ friend class SnapshotThread;
+ friend class SnapshotDelta;
+ friend class Snapshots;
+};
+/**
+ * contains performance information for a time period
+ */
+class SnapshotDelta {
+public:
+ SnapshotDelta(const SnapshotData& older, const SnapshotData& newer);
+
+ unsigned long long elapsed() const {
+ return _elapsed;
+ }
+
+ Top::UsageMap collectionUsageDiff();
+
+private:
+ const SnapshotData& _older;
+ const SnapshotData& _newer;
+
+ unsigned long long _elapsed;
+};
+
+struct SnapshotDiff {
+ Top::UsageMap usageDiff;
+ unsigned long long timeElapsed;
+
+ SnapshotDiff() = default;
+ SnapshotDiff(Top::UsageMap map, unsigned long long elapsed)
+ : usageDiff(std::move(map)), timeElapsed(elapsed) {}
+};
+
+class Snapshots {
+public:
+ Snapshots();
+
+ const SnapshotData* takeSnapshot();
+
+ StatusWith<SnapshotDiff> computeDelta();
+
+private:
+ stdx::mutex _lock;
+ static const int kNumSnapshots = 2;
+ SnapshotData _snapshots[kNumSnapshots];
+ int _loc;
+ int _stored;
+};
+
+class SnapshotThread : public BackgroundJob {
+public:
+ virtual std::string name() const {
+ return "snapshot";
+ }
+ void run();
+};
+
+extern Snapshots statsSnapshots;
+extern SnapshotThread snapshotThread;
}
diff --git a/src/mongo/db/stats/snapshots_webplugins.cpp b/src/mongo/db/stats/snapshots_webplugins.cpp
index cc604c6e983..8690935866a 100644
--- a/src/mongo/db/stats/snapshots_webplugins.cpp
+++ b/src/mongo/db/stats/snapshots_webplugins.cpp
@@ -36,81 +36,83 @@
namespace mongo {
namespace {
- using namespace html;
-
- using std::fixed;
- using std::setprecision;
- using std::string;
- using std::stringstream;
-
- class DBTopStatus : public WebStatusPlugin {
- public:
- DBTopStatus() : WebStatusPlugin( "dbtop" , 50 , "(occurrences|percent of elapsed)" ) {}
-
- void display( stringstream& ss , double elapsed , const Top::UsageData& usage ) {
- ss << "<td>";
- ss << usage.count;
- ss << "</td><td>";
- double per = 100 * ((double)usage.time)/elapsed;
- if( per == (int) per )
- ss << (int) per;
- else
- ss << setprecision(1) << fixed << per;
- ss << '%';
- ss << "</td>";
+using namespace html;
+
+using std::fixed;
+using std::setprecision;
+using std::string;
+using std::stringstream;
+
+class DBTopStatus : public WebStatusPlugin {
+public:
+ DBTopStatus() : WebStatusPlugin("dbtop", 50, "(occurrences|percent of elapsed)") {}
+
+ void display(stringstream& ss, double elapsed, const Top::UsageData& usage) {
+ ss << "<td>";
+ ss << usage.count;
+ ss << "</td><td>";
+ double per = 100 * ((double)usage.time) / elapsed;
+ if (per == (int)per)
+ ss << (int)per;
+ else
+ ss << setprecision(1) << fixed << per;
+ ss << '%';
+ ss << "</td>";
+ }
+
+ void display(stringstream& ss,
+ double elapsed,
+ const string& ns,
+ const Top::CollectionData& data) {
+ if (ns != "TOTAL" && data.total.count == 0)
+ return;
+ ss << "<tr><th>" << html::escape(ns) << "</th>";
+
+ display(ss, elapsed, data.total);
+
+ display(ss, elapsed, data.readLock);
+ display(ss, elapsed, data.writeLock);
+
+ display(ss, elapsed, data.queries);
+ display(ss, elapsed, data.getmore);
+ display(ss, elapsed, data.insert);
+ display(ss, elapsed, data.update);
+ display(ss, elapsed, data.remove);
+
+ ss << "</tr>\n";
+ }
+
+ void run(OperationContext* txn, stringstream& ss) {
+ StatusWith<SnapshotDiff> diff = statsSnapshots.computeDelta();
+
+ if (!diff.isOK())
+ return;
+
+ ss << "<table border=1 cellpadding=2 cellspacing=0>";
+ ss << "<tr align='left'><th>";
+ ss << a("http://dochub.mongodb.org/core/whatisanamespace", "namespace")
+ << "NS</a></th>"
+ "<th colspan=2>total</th>"
+ "<th colspan=2>Reads</th>"
+ "<th colspan=2>Writes</th>"
+ "<th colspan=2>Queries</th>"
+ "<th colspan=2>GetMores</th>"
+ "<th colspan=2>Inserts</th>"
+ "<th colspan=2>Updates</th>"
+ "<th colspan=2>Removes</th>";
+ ss << "</tr>\n";
+
+ const Top::UsageMap& usage = diff.getValue().usageDiff;
+ unsigned long long elapsed = diff.getValue().timeElapsed;
+ for (Top::UsageMap::const_iterator i = usage.begin(); i != usage.end(); ++i) {
+ display(ss, (double)elapsed, i->first, i->second);
}
- void display( stringstream& ss , double elapsed , const string& ns , const Top::CollectionData& data ) {
- if ( ns != "TOTAL" && data.total.count == 0 )
- return;
- ss << "<tr><th>" << html::escape( ns ) << "</th>";
+ ss << "</table>";
+ }
- display( ss , elapsed , data.total );
-
- display( ss , elapsed , data.readLock );
- display( ss , elapsed , data.writeLock );
-
- display( ss , elapsed , data.queries );
- display( ss , elapsed , data.getmore );
- display( ss , elapsed , data.insert );
- display( ss , elapsed , data.update );
- display( ss , elapsed , data.remove );
-
- ss << "</tr>\n";
- }
-
- void run(OperationContext* txn, stringstream& ss) {
- StatusWith<SnapshotDiff> diff = statsSnapshots.computeDelta();
-
- if ( ! diff.isOK() )
- return;
-
- ss << "<table border=1 cellpadding=2 cellspacing=0>";
- ss << "<tr align='left'><th>";
- ss << a("http://dochub.mongodb.org/core/whatisanamespace", "namespace") <<
- "NS</a></th>"
- "<th colspan=2>total</th>"
- "<th colspan=2>Reads</th>"
- "<th colspan=2>Writes</th>"
- "<th colspan=2>Queries</th>"
- "<th colspan=2>GetMores</th>"
- "<th colspan=2>Inserts</th>"
- "<th colspan=2>Updates</th>"
- "<th colspan=2>Removes</th>";
- ss << "</tr>\n";
-
- const Top::UsageMap& usage = diff.getValue().usageDiff;
- unsigned long long elapsed = diff.getValue().timeElapsed;
- for ( Top::UsageMap::const_iterator i=usage.begin(); i != usage.end(); ++i ) {
- display( ss , (double) elapsed , i->first , i->second );
- }
-
- ss << "</table>";
-
- }
-
- virtual void init() {}
- } dbtopStatus;
+ virtual void init() {}
+} dbtopStatus;
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/stats/timer_stats.cpp b/src/mongo/db/stats/timer_stats.cpp
index 728f05cb3f3..1030d296eb4 100644
--- a/src/mongo/db/stats/timer_stats.cpp
+++ b/src/mongo/db/stats/timer_stats.cpp
@@ -31,47 +31,44 @@
namespace mongo {
- TimerHolder::TimerHolder( TimerStats* stats )
- : _stats( stats ), _recorded( false ){
- }
-
- TimerHolder::~TimerHolder() {
- if ( ! _recorded ) {
- recordMillis();
- }
- }
+TimerHolder::TimerHolder(TimerStats* stats) : _stats(stats), _recorded(false) {}
- int TimerHolder::recordMillis() {
- _recorded = true;
- if ( _stats ) {
- return _stats->record( _t );
- }
- return _t.millis();
+TimerHolder::~TimerHolder() {
+ if (!_recorded) {
+ recordMillis();
}
+}
- void TimerStats::recordMillis( int millis ) {
- scoped_spinlock lk( _lock );
- _num++;
- _totalMillis += millis;
+int TimerHolder::recordMillis() {
+ _recorded = true;
+ if (_stats) {
+ return _stats->record(_t);
}
+ return _t.millis();
+}
- int TimerStats::record( const Timer& timer ) {
- int millis = timer.millis();
- recordMillis( millis );
- return millis;
- }
+void TimerStats::recordMillis(int millis) {
+ scoped_spinlock lk(_lock);
+ _num++;
+ _totalMillis += millis;
+}
- BSONObj TimerStats::getReport() const {
- long long n, t;
- {
- scoped_spinlock lk( _lock );
- n = _num;
- t = _totalMillis;
- }
- BSONObjBuilder b(64);
- b.appendNumber( "num", n );
- b.appendNumber( "totalMillis" , t );
- return b.obj();
+int TimerStats::record(const Timer& timer) {
+ int millis = timer.millis();
+ recordMillis(millis);
+ return millis;
+}
+BSONObj TimerStats::getReport() const {
+ long long n, t;
+ {
+ scoped_spinlock lk(_lock);
+ n = _num;
+ t = _totalMillis;
}
+ BSONObjBuilder b(64);
+ b.appendNumber("num", n);
+ b.appendNumber("totalMillis", t);
+ return b.obj();
+}
}
diff --git a/src/mongo/db/stats/timer_stats.h b/src/mongo/db/stats/timer_stats.h
index 9e3c4db5e5a..a1b3ef69dfd 100644
--- a/src/mongo/db/stats/timer_stats.h
+++ b/src/mongo/db/stats/timer_stats.h
@@ -36,54 +36,58 @@
namespace mongo {
+/**
+ * Holds timing information in milliseconds
+ * keeps track of number of times and total milliseconds
+ * so a diff can be computed
+ */
+class TimerStats {
+public:
+ void recordMillis(int millis);
+
/**
- * Holds timing information in milliseconds
- * keeps track of number of times and total milliseconds
- * so a diff can be computed
+ * @return number of millis
*/
- class TimerStats {
- public:
- void recordMillis( int millis );
+ int record(const Timer& timer);
- /**
- * @return number of millis
- */
- int record( const Timer& timer );
+ BSONObj getReport() const;
+ operator BSONObj() const {
+ return getReport();
+ }
- BSONObj getReport() const;
- operator BSONObj() const { return getReport(); }
+private:
+ mutable SpinLock _lock;
+ long long _num;
+ long long _totalMillis;
+};
- private:
- mutable SpinLock _lock;
- long long _num;
- long long _totalMillis;
- };
+/**
+ * Holds an instance of a Timer such that we the time is recorded
+ * when the TimerHolder goes out of scope
+ */
+class TimerHolder {
+public:
+ /** Destructor will record to TimerStats */
+ TimerHolder(TimerStats* stats);
+ /** Will record stats if recordMillis hasn't (based on _recorded) */
+ ~TimerHolder();
/**
- * Holds an instance of a Timer such that we the time is recorded
- * when the TimerHolder goes out of scope
+ * returns elapsed millis from internal timer
*/
- class TimerHolder {
- public:
- /** Destructor will record to TimerStats */
- TimerHolder( TimerStats* stats );
- /** Will record stats if recordMillis hasn't (based on _recorded) */
- ~TimerHolder();
+ int millis() const {
+ return _t.millis();
+ }
- /**
- * returns elapsed millis from internal timer
- */
- int millis() const { return _t.millis(); }
-
- /**
- * records the time in the TimerStats and marks that we've
- * already recorded so the destructor doesn't
- */
- int recordMillis();
+ /**
+ * records the time in the TimerStats and marks that we've
+ * already recorded so the destructor doesn't
+ */
+ int recordMillis();
- private:
- TimerStats* _stats;
- bool _recorded;
- Timer _t;
- };
+private:
+ TimerStats* _stats;
+ bool _recorded;
+ Timer _t;
+};
}
diff --git a/src/mongo/db/stats/top.cpp b/src/mongo/db/stats/top.cpp
index 903378e1834..f4f66d42a05 100644
--- a/src/mongo/db/stats/top.cpp
+++ b/src/mongo/db/stats/top.cpp
@@ -40,86 +40,84 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::stringstream;
- using std::vector;
+using std::endl;
+using std::string;
+using std::stringstream;
+using std::vector;
namespace {
- const auto getTop = ServiceContext::declareDecoration<Top>();
+const auto getTop = ServiceContext::declareDecoration<Top>();
-} // namespace
+} // namespace
- Top::UsageData::UsageData( const UsageData& older, const UsageData& newer ) {
- // this won't be 100% accurate on rollovers and drop(), but at least it won't be negative
- time = (newer.time >= older.time) ? (newer.time - older.time) : newer.time;
- count = (newer.count >= older.count) ? (newer.count - older.count) : newer.count;
- }
-
- Top::CollectionData::CollectionData( const CollectionData& older, const CollectionData& newer )
- : total( older.total, newer.total ),
- readLock( older.readLock, newer.readLock ),
- writeLock( older.writeLock, newer.writeLock ),
- queries( older.queries, newer.queries ),
- getmore( older.getmore, newer.getmore ),
- insert( older.insert, newer.insert ),
- update( older.update, newer.update ),
- remove( older.remove, newer.remove ),
- commands( older.commands, newer.commands ) {
-
- }
-
- // static
- Top& Top::get(ServiceContext* service) {
- return getTop(service);
- }
+Top::UsageData::UsageData(const UsageData& older, const UsageData& newer) {
+ // this won't be 100% accurate on rollovers and drop(), but at least it won't be negative
+ time = (newer.time >= older.time) ? (newer.time - older.time) : newer.time;
+ count = (newer.count >= older.count) ? (newer.count - older.count) : newer.count;
+}
- void Top::record( StringData ns, int op, int lockType, long long micros, bool command ) {
- if ( ns[0] == '?' )
- return;
+Top::CollectionData::CollectionData(const CollectionData& older, const CollectionData& newer)
+ : total(older.total, newer.total),
+ readLock(older.readLock, newer.readLock),
+ writeLock(older.writeLock, newer.writeLock),
+ queries(older.queries, newer.queries),
+ getmore(older.getmore, newer.getmore),
+ insert(older.insert, newer.insert),
+ update(older.update, newer.update),
+ remove(older.remove, newer.remove),
+ commands(older.commands, newer.commands) {}
+
+// static
+Top& Top::get(ServiceContext* service) {
+ return getTop(service);
+}
- //cout << "record: " << ns << "\t" << op << "\t" << command << endl;
- stdx::lock_guard<SimpleMutex> lk(_lock);
+void Top::record(StringData ns, int op, int lockType, long long micros, bool command) {
+ if (ns[0] == '?')
+ return;
- if ( ( command || op == dbQuery ) && ns == _lastDropped ) {
- _lastDropped = "";
- return;
- }
+ // cout << "record: " << ns << "\t" << op << "\t" << command << endl;
+ stdx::lock_guard<SimpleMutex> lk(_lock);
- CollectionData& coll = _usage[ns];
- _record( coll, op, lockType, micros, command );
+ if ((command || op == dbQuery) && ns == _lastDropped) {
+ _lastDropped = "";
+ return;
}
- void Top::_record( CollectionData& c, int op, int lockType, long long micros, bool command ) {
- c.total.inc( micros );
+ CollectionData& coll = _usage[ns];
+ _record(coll, op, lockType, micros, command);
+}
+
+void Top::_record(CollectionData& c, int op, int lockType, long long micros, bool command) {
+ c.total.inc(micros);
- if ( lockType > 0 )
- c.writeLock.inc( micros );
- else if ( lockType < 0 )
- c.readLock.inc( micros );
+ if (lockType > 0)
+ c.writeLock.inc(micros);
+ else if (lockType < 0)
+ c.readLock.inc(micros);
- switch ( op ) {
+ switch (op) {
case 0:
// use 0 for unknown, non-specific
break;
case dbUpdate:
- c.update.inc( micros );
+ c.update.inc(micros);
break;
case dbInsert:
- c.insert.inc( micros );
+ c.insert.inc(micros);
break;
case dbQuery:
- if ( command )
- c.commands.inc( micros );
+ if (command)
+ c.commands.inc(micros);
else
- c.queries.inc( micros );
+ c.queries.inc(micros);
break;
case dbGetMore:
- c.getmore.inc( micros );
+ c.getmore.inc(micros);
break;
case dbDelete:
- c.remove.inc( micros );
+ c.remove.inc(micros);
break;
case dbKillCursors:
break;
@@ -133,62 +131,60 @@ namespace {
break;
default:
log() << "unknown op in Top::record: " << op << endl;
- }
-
- }
-
- void Top::collectionDropped( StringData ns ) {
- stdx::lock_guard<SimpleMutex> lk(_lock);
- _usage.erase(ns);
- _lastDropped = ns.toString();
}
+}
- void Top::cloneMap(Top::UsageMap& out) const {
- stdx::lock_guard<SimpleMutex> lk(_lock);
- out = _usage;
- }
+void Top::collectionDropped(StringData ns) {
+ stdx::lock_guard<SimpleMutex> lk(_lock);
+ _usage.erase(ns);
+ _lastDropped = ns.toString();
+}
- void Top::append( BSONObjBuilder& b ) {
- stdx::lock_guard<SimpleMutex> lk( _lock );
- _appendToUsageMap( b, _usage );
- }
+void Top::cloneMap(Top::UsageMap& out) const {
+ stdx::lock_guard<SimpleMutex> lk(_lock);
+ out = _usage;
+}
- void Top::_appendToUsageMap( BSONObjBuilder& b, const UsageMap& map ) const {
- // pull all the names into a vector so we can sort them for the user
+void Top::append(BSONObjBuilder& b) {
+ stdx::lock_guard<SimpleMutex> lk(_lock);
+ _appendToUsageMap(b, _usage);
+}
- vector<string> names;
- for ( UsageMap::const_iterator i = map.begin(); i != map.end(); ++i ) {
- names.push_back( i->first );
- }
+void Top::_appendToUsageMap(BSONObjBuilder& b, const UsageMap& map) const {
+ // pull all the names into a vector so we can sort them for the user
- std::sort( names.begin(), names.end() );
+ vector<string> names;
+ for (UsageMap::const_iterator i = map.begin(); i != map.end(); ++i) {
+ names.push_back(i->first);
+ }
- for ( size_t i=0; i<names.size(); i++ ) {
- BSONObjBuilder bb( b.subobjStart( names[i] ) );
+ std::sort(names.begin(), names.end());
- const CollectionData& coll = map.find(names[i])->second;
+ for (size_t i = 0; i < names.size(); i++) {
+ BSONObjBuilder bb(b.subobjStart(names[i]));
- _appendStatsEntry( b, "total", coll.total );
+ const CollectionData& coll = map.find(names[i])->second;
- _appendStatsEntry( b, "readLock", coll.readLock );
- _appendStatsEntry( b, "writeLock", coll.writeLock );
+ _appendStatsEntry(b, "total", coll.total);
- _appendStatsEntry( b, "queries", coll.queries );
- _appendStatsEntry( b, "getmore", coll.getmore );
- _appendStatsEntry( b, "insert", coll.insert );
- _appendStatsEntry( b, "update", coll.update );
- _appendStatsEntry( b, "remove", coll.remove );
- _appendStatsEntry( b, "commands", coll.commands );
+ _appendStatsEntry(b, "readLock", coll.readLock);
+ _appendStatsEntry(b, "writeLock", coll.writeLock);
- bb.done();
- }
- }
+ _appendStatsEntry(b, "queries", coll.queries);
+ _appendStatsEntry(b, "getmore", coll.getmore);
+ _appendStatsEntry(b, "insert", coll.insert);
+ _appendStatsEntry(b, "update", coll.update);
+ _appendStatsEntry(b, "remove", coll.remove);
+ _appendStatsEntry(b, "commands", coll.commands);
- void Top::_appendStatsEntry( BSONObjBuilder& b, const char * statsName, const UsageData& map ) const {
- BSONObjBuilder bb( b.subobjStart( statsName ) );
- bb.appendNumber( "time", map.time );
- bb.appendNumber( "count", map.count );
bb.done();
}
+}
+void Top::_appendStatsEntry(BSONObjBuilder& b, const char* statsName, const UsageData& map) const {
+ BSONObjBuilder bb(b.subobjStart(statsName));
+ bb.appendNumber("time", map.time);
+ bb.appendNumber("count", map.count);
+ bb.done();
+}
}
diff --git a/src/mongo/db/stats/top.h b/src/mongo/db/stats/top.h
index d71605e8ef4..56090b5c408 100644
--- a/src/mongo/db/stats/top.h
+++ b/src/mongo/db/stats/top.h
@@ -36,66 +36,65 @@
namespace mongo {
- class ServiceContext;
-
- /**
- * tracks usage by collection
- */
- class Top {
-
- public:
- static Top& get(ServiceContext* service);
-
- Top() = default;
-
- struct UsageData {
- UsageData() : time(0), count(0) {}
- UsageData( const UsageData& older, const UsageData& newer );
- long long time;
- long long count;
-
- void inc( long long micros ) {
- count++;
- time += micros;
- }
- };
-
- struct CollectionData {
- /**
- * constructs a diff
- */
- CollectionData() {}
- CollectionData( const CollectionData& older, const CollectionData& newer );
-
- UsageData total;
-
- UsageData readLock;
- UsageData writeLock;
-
- UsageData queries;
- UsageData getmore;
- UsageData insert;
- UsageData update;
- UsageData remove;
- UsageData commands;
- };
-
- typedef StringMap<CollectionData> UsageMap;
-
- public:
- void record( StringData ns, int op, int lockType, long long micros, bool command );
- void append( BSONObjBuilder& b );
- void cloneMap(UsageMap& out) const;
- void collectionDropped( StringData ns );
-
- private:
- void _appendToUsageMap( BSONObjBuilder& b, const UsageMap& map ) const;
- void _appendStatsEntry( BSONObjBuilder& b, const char * statsName, const UsageData& map ) const;
- void _record( CollectionData& c, int op, int lockType, long long micros, bool command );
-
- mutable SimpleMutex _lock;
- UsageMap _usage;
- std::string _lastDropped;
+class ServiceContext;
+
+/**
+ * tracks usage by collection
+ */
+class Top {
+public:
+ static Top& get(ServiceContext* service);
+
+ Top() = default;
+
+ struct UsageData {
+ UsageData() : time(0), count(0) {}
+ UsageData(const UsageData& older, const UsageData& newer);
+ long long time;
+ long long count;
+
+ void inc(long long micros) {
+ count++;
+ time += micros;
+ }
};
-} // namespace mongo
+ struct CollectionData {
+ /**
+ * constructs a diff
+ */
+ CollectionData() {}
+ CollectionData(const CollectionData& older, const CollectionData& newer);
+
+ UsageData total;
+
+ UsageData readLock;
+ UsageData writeLock;
+
+ UsageData queries;
+ UsageData getmore;
+ UsageData insert;
+ UsageData update;
+ UsageData remove;
+ UsageData commands;
+ };
+
+ typedef StringMap<CollectionData> UsageMap;
+
+public:
+ void record(StringData ns, int op, int lockType, long long micros, bool command);
+ void append(BSONObjBuilder& b);
+ void cloneMap(UsageMap& out) const;
+ void collectionDropped(StringData ns);
+
+private:
+ void _appendToUsageMap(BSONObjBuilder& b, const UsageMap& map) const;
+ void _appendStatsEntry(BSONObjBuilder& b, const char* statsName, const UsageData& map) const;
+ void _record(CollectionData& c, int op, int lockType, long long micros, bool command);
+
+ mutable SimpleMutex _lock;
+ UsageMap _usage;
+ std::string _lastDropped;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/stats/top_test.cpp b/src/mongo/db/stats/top_test.cpp
index 8aa5bc6363d..27ed6397ed6 100644
--- a/src/mongo/db/stats/top_test.cpp
+++ b/src/mongo/db/stats/top_test.cpp
@@ -33,10 +33,10 @@
namespace {
- using namespace mongo;
+using namespace mongo;
- TEST(TopTest, CollectionDropped) {
- Top().collectionDropped("coll");
- }
+TEST(TopTest, CollectionDropped) {
+ Top().collectionDropped("coll");
+}
-} // namespace
+} // namespace
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index e1c7e527d69..976636b0bc6 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -32,169 +32,165 @@
namespace mongo {
- BSONCollectionCatalogEntry::BSONCollectionCatalogEntry( StringData ns )
- : CollectionCatalogEntry( ns ) {
- }
+BSONCollectionCatalogEntry::BSONCollectionCatalogEntry(StringData ns)
+ : CollectionCatalogEntry(ns) {}
- CollectionOptions BSONCollectionCatalogEntry::getCollectionOptions( OperationContext* txn ) const {
- MetaData md = _getMetaData( txn );
- return md.options;
- }
+CollectionOptions BSONCollectionCatalogEntry::getCollectionOptions(OperationContext* txn) const {
+ MetaData md = _getMetaData(txn);
+ return md.options;
+}
- int BSONCollectionCatalogEntry::getTotalIndexCount( OperationContext* txn ) const {
- MetaData md = _getMetaData( txn );
+int BSONCollectionCatalogEntry::getTotalIndexCount(OperationContext* txn) const {
+ MetaData md = _getMetaData(txn);
- return static_cast<int>( md.indexes.size() );
- }
+ return static_cast<int>(md.indexes.size());
+}
- int BSONCollectionCatalogEntry::getCompletedIndexCount( OperationContext* txn ) const {
- MetaData md = _getMetaData( txn );
+int BSONCollectionCatalogEntry::getCompletedIndexCount(OperationContext* txn) const {
+ MetaData md = _getMetaData(txn);
- int num = 0;
- for ( unsigned i = 0; i < md.indexes.size(); i++ ) {
- if ( md.indexes[i].ready )
- num++;
- }
- return num;
+ int num = 0;
+ for (unsigned i = 0; i < md.indexes.size(); i++) {
+ if (md.indexes[i].ready)
+ num++;
}
+ return num;
+}
- BSONObj BSONCollectionCatalogEntry::getIndexSpec( OperationContext* txn,
- StringData indexName ) const {
- MetaData md = _getMetaData( txn );
+BSONObj BSONCollectionCatalogEntry::getIndexSpec(OperationContext* txn,
+ StringData indexName) const {
+ MetaData md = _getMetaData(txn);
- int offset = md.findIndexOffset( indexName );
- invariant( offset >= 0 );
- return md.indexes[offset].spec.getOwned();
- }
+ int offset = md.findIndexOffset(indexName);
+ invariant(offset >= 0);
+ return md.indexes[offset].spec.getOwned();
+}
- void BSONCollectionCatalogEntry::getAllIndexes( OperationContext* txn,
- std::vector<std::string>* names ) const {
- MetaData md = _getMetaData( txn );
+void BSONCollectionCatalogEntry::getAllIndexes(OperationContext* txn,
+ std::vector<std::string>* names) const {
+ MetaData md = _getMetaData(txn);
- for ( unsigned i = 0; i < md.indexes.size(); i++ ) {
- names->push_back( md.indexes[i].spec["name"].String() );
- }
+ for (unsigned i = 0; i < md.indexes.size(); i++) {
+ names->push_back(md.indexes[i].spec["name"].String());
}
+}
- bool BSONCollectionCatalogEntry::isIndexMultikey( OperationContext* txn,
- StringData indexName) const {
- MetaData md = _getMetaData( txn );
+bool BSONCollectionCatalogEntry::isIndexMultikey(OperationContext* txn,
+ StringData indexName) const {
+ MetaData md = _getMetaData(txn);
- int offset = md.findIndexOffset( indexName );
- invariant( offset >= 0 );
- return md.indexes[offset].multikey;
- }
+ int offset = md.findIndexOffset(indexName);
+ invariant(offset >= 0);
+ return md.indexes[offset].multikey;
+}
- RecordId BSONCollectionCatalogEntry::getIndexHead( OperationContext* txn,
- StringData indexName ) const {
- MetaData md = _getMetaData( txn );
+RecordId BSONCollectionCatalogEntry::getIndexHead(OperationContext* txn,
+ StringData indexName) const {
+ MetaData md = _getMetaData(txn);
- int offset = md.findIndexOffset( indexName );
- invariant( offset >= 0 );
- return md.indexes[offset].head;
- }
+ int offset = md.findIndexOffset(indexName);
+ invariant(offset >= 0);
+ return md.indexes[offset].head;
+}
- bool BSONCollectionCatalogEntry::isIndexReady( OperationContext* txn,
- StringData indexName ) const {
- MetaData md = _getMetaData( txn );
+bool BSONCollectionCatalogEntry::isIndexReady(OperationContext* txn, StringData indexName) const {
+ MetaData md = _getMetaData(txn);
- int offset = md.findIndexOffset( indexName );
- invariant( offset >= 0 );
- return md.indexes[offset].ready;
- }
+ int offset = md.findIndexOffset(indexName);
+ invariant(offset >= 0);
+ return md.indexes[offset].ready;
+}
- // --------------------------
+// --------------------------
- void BSONCollectionCatalogEntry::IndexMetaData::updateTTLSetting( long long newExpireSeconds ) {
- BSONObjBuilder b;
- for ( BSONObjIterator bi( spec ); bi.more(); ) {
- BSONElement e = bi.next();
- if ( e.fieldNameStringData() == "expireAfterSeconds" ) {
- continue;
- }
- b.append( e );
+void BSONCollectionCatalogEntry::IndexMetaData::updateTTLSetting(long long newExpireSeconds) {
+ BSONObjBuilder b;
+ for (BSONObjIterator bi(spec); bi.more();) {
+ BSONElement e = bi.next();
+ if (e.fieldNameStringData() == "expireAfterSeconds") {
+ continue;
}
-
- b.append( "expireAfterSeconds", newExpireSeconds );
- spec = b.obj();
+ b.append(e);
}
- // --------------------------
+ b.append("expireAfterSeconds", newExpireSeconds);
+ spec = b.obj();
+}
- int BSONCollectionCatalogEntry::MetaData::findIndexOffset( StringData name ) const {
- for ( unsigned i = 0; i < indexes.size(); i++ )
- if ( indexes[i].name() == name )
- return i;
- return -1;
- }
+// --------------------------
- bool BSONCollectionCatalogEntry::MetaData::eraseIndex( StringData name ) {
- int indexOffset = findIndexOffset( name );
+int BSONCollectionCatalogEntry::MetaData::findIndexOffset(StringData name) const {
+ for (unsigned i = 0; i < indexes.size(); i++)
+ if (indexes[i].name() == name)
+ return i;
+ return -1;
+}
- if ( indexOffset < 0 ) {
- return false;
- }
+bool BSONCollectionCatalogEntry::MetaData::eraseIndex(StringData name) {
+ int indexOffset = findIndexOffset(name);
- indexes.erase( indexes.begin() + indexOffset );
- return true;
+ if (indexOffset < 0) {
+ return false;
}
- void BSONCollectionCatalogEntry::MetaData::rename( StringData toNS ) {
- ns = toNS.toString();
- for ( size_t i = 0; i < indexes.size(); i++ ) {
- BSONObj spec = indexes[i].spec;
- BSONObjBuilder b;
- b.append( "ns", toNS );
- b.appendElementsUnique( spec );
- indexes[i].spec = b.obj();
- }
- }
+ indexes.erase(indexes.begin() + indexOffset);
+ return true;
+}
- BSONObj BSONCollectionCatalogEntry::MetaData::toBSON() const {
+void BSONCollectionCatalogEntry::MetaData::rename(StringData toNS) {
+ ns = toNS.toString();
+ for (size_t i = 0; i < indexes.size(); i++) {
+ BSONObj spec = indexes[i].spec;
BSONObjBuilder b;
- b.append( "ns", ns );
- b.append( "options", options.toBSON() );
- {
- BSONArrayBuilder arr( b.subarrayStart( "indexes" ) );
- for ( unsigned i = 0; i < indexes.size(); i++ ) {
- BSONObjBuilder sub( arr.subobjStart() );
- sub.append( "spec", indexes[i].spec );
- sub.appendBool( "ready", indexes[i].ready );
- sub.appendBool( "multikey", indexes[i].multikey );
- sub.append( "head", static_cast<long long>(indexes[i].head.repr()) );
- sub.done();
- }
- arr.done();
+ b.append("ns", toNS);
+ b.appendElementsUnique(spec);
+ indexes[i].spec = b.obj();
+ }
+}
+
+BSONObj BSONCollectionCatalogEntry::MetaData::toBSON() const {
+ BSONObjBuilder b;
+ b.append("ns", ns);
+ b.append("options", options.toBSON());
+ {
+ BSONArrayBuilder arr(b.subarrayStart("indexes"));
+ for (unsigned i = 0; i < indexes.size(); i++) {
+ BSONObjBuilder sub(arr.subobjStart());
+ sub.append("spec", indexes[i].spec);
+ sub.appendBool("ready", indexes[i].ready);
+ sub.appendBool("multikey", indexes[i].multikey);
+ sub.append("head", static_cast<long long>(indexes[i].head.repr()));
+ sub.done();
}
- return b.obj();
+ arr.done();
}
+ return b.obj();
+}
- void BSONCollectionCatalogEntry::MetaData::parse( const BSONObj& obj ) {
- ns = obj["ns"].valuestrsafe();
+void BSONCollectionCatalogEntry::MetaData::parse(const BSONObj& obj) {
+ ns = obj["ns"].valuestrsafe();
- if ( obj["options"].isABSONObj() ) {
- options.parse( obj["options"].Obj() );
- }
+ if (obj["options"].isABSONObj()) {
+ options.parse(obj["options"].Obj());
+ }
- BSONElement e = obj["indexes"];
- if ( e.isABSONObj() ) {
- std::vector<BSONElement> entries = e.Array();
- for ( unsigned i = 0; i < entries.size(); i++ ) {
- BSONObj idx = entries[i].Obj();
- IndexMetaData imd;
- imd.spec = idx["spec"].Obj().getOwned();
- imd.ready = idx["ready"].trueValue();
- if (idx.hasField("head")) {
- imd.head = RecordId(idx["head"].Long());
- }
- else {
- imd.head = RecordId( idx["head_a"].Int(),
- idx["head_b"].Int() );
- }
- imd.multikey = idx["multikey"].trueValue();
- indexes.push_back( imd );
+ BSONElement e = obj["indexes"];
+ if (e.isABSONObj()) {
+ std::vector<BSONElement> entries = e.Array();
+ for (unsigned i = 0; i < entries.size(); i++) {
+ BSONObj idx = entries[i].Obj();
+ IndexMetaData imd;
+ imd.spec = idx["spec"].Obj().getOwned();
+ imd.ready = idx["ready"].trueValue();
+ if (idx.hasField("head")) {
+ imd.head = RecordId(idx["head"].Long());
+ } else {
+ imd.head = RecordId(idx["head_a"].Int(), idx["head_b"].Int());
}
+ imd.multikey = idx["multikey"].trueValue();
+ indexes.push_back(imd);
}
}
}
+}
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h
index 1f40eea247c..179c64591db 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.h
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.h
@@ -37,76 +37,71 @@
namespace mongo {
- /**
- * This is a helper class for any storage engine that wants to store catalog information
- * as BSON. It is totally optional to use this.
- */
- class BSONCollectionCatalogEntry : public CollectionCatalogEntry {
- public:
- BSONCollectionCatalogEntry( StringData ns );
-
- virtual ~BSONCollectionCatalogEntry(){}
-
- virtual CollectionOptions getCollectionOptions( OperationContext* txn ) const;
+/**
+ * This is a helper class for any storage engine that wants to store catalog information
+ * as BSON. It is totally optional to use this.
+ */
+class BSONCollectionCatalogEntry : public CollectionCatalogEntry {
+public:
+ BSONCollectionCatalogEntry(StringData ns);
- virtual int getTotalIndexCount( OperationContext* txn ) const;
+ virtual ~BSONCollectionCatalogEntry() {}
- virtual int getCompletedIndexCount( OperationContext* txn ) const;
+ virtual CollectionOptions getCollectionOptions(OperationContext* txn) const;
- virtual BSONObj getIndexSpec( OperationContext* txn,
- StringData idxName ) const;
+ virtual int getTotalIndexCount(OperationContext* txn) const;
- virtual void getAllIndexes( OperationContext* txn,
- std::vector<std::string>* names ) const;
+ virtual int getCompletedIndexCount(OperationContext* txn) const;
- virtual bool isIndexMultikey( OperationContext* txn,
- StringData indexName) const;
+ virtual BSONObj getIndexSpec(OperationContext* txn, StringData idxName) const;
- virtual RecordId getIndexHead( OperationContext* txn,
- StringData indexName ) const;
+ virtual void getAllIndexes(OperationContext* txn, std::vector<std::string>* names) const;
- virtual bool isIndexReady( OperationContext* txn,
- StringData indexName ) const;
+ virtual bool isIndexMultikey(OperationContext* txn, StringData indexName) const;
- // ------ for implementors
+ virtual RecordId getIndexHead(OperationContext* txn, StringData indexName) const;
- struct IndexMetaData {
- IndexMetaData() {}
- IndexMetaData( BSONObj s, bool r, RecordId h, bool m )
- : spec( s ), ready( r ), head( h ), multikey( m ) {}
+ virtual bool isIndexReady(OperationContext* txn, StringData indexName) const;
- void updateTTLSetting( long long newExpireSeconds );
+ // ------ for implementors
- std::string name() const { return spec["name"].String(); }
+ struct IndexMetaData {
+ IndexMetaData() {}
+ IndexMetaData(BSONObj s, bool r, RecordId h, bool m)
+ : spec(s), ready(r), head(h), multikey(m) {}
- BSONObj spec;
- bool ready;
- RecordId head;
- bool multikey;
- };
+ void updateTTLSetting(long long newExpireSeconds);
- struct MetaData {
- void parse( const BSONObj& obj );
- BSONObj toBSON() const;
+ std::string name() const {
+ return spec["name"].String();
+ }
- int findIndexOffset( StringData name ) const;
+ BSONObj spec;
+ bool ready;
+ RecordId head;
+ bool multikey;
+ };
- /**
- * Removes information about an index from the MetaData. Returns true if an index
- * called name existed and was deleted, and false otherwise.
- */
- bool eraseIndex( StringData name );
+ struct MetaData {
+ void parse(const BSONObj& obj);
+ BSONObj toBSON() const;
- void rename( StringData toNS );
+ int findIndexOffset(StringData name) const;
- std::string ns;
- CollectionOptions options;
- std::vector<IndexMetaData> indexes;
- };
+ /**
+ * Removes information about an index from the MetaData. Returns true if an index
+ * called name existed and was deleted, and false otherwise.
+ */
+ bool eraseIndex(StringData name);
- protected:
- virtual MetaData _getMetaData( OperationContext* txn ) const = 0;
+ void rename(StringData toNS);
+ std::string ns;
+ CollectionOptions options;
+ std::vector<IndexMetaData> indexes;
};
+protected:
+ virtual MetaData _getMetaData(OperationContext* txn) const = 0;
+};
}
diff --git a/src/mongo/db/storage/capped_callback.h b/src/mongo/db/storage/capped_callback.h
index 0ee4511f66a..4aa040b27a2 100644
--- a/src/mongo/db/storage/capped_callback.h
+++ b/src/mongo/db/storage/capped_callback.h
@@ -34,26 +34,25 @@
namespace mongo {
- class OperationContext;
- class RecordData;
+class OperationContext;
+class RecordData;
+
+/**
+ * When a capped collection has to delete a document, it needs a way to tell the caller
+ * what its deleting so it can unindex or do any other cleanup.
+ * This is that way.
+ */
+class CappedDocumentDeleteCallback {
+public:
+ virtual ~CappedDocumentDeleteCallback() {}
/**
- * When a capped collection has to delete a document, it needs a way to tell the caller
- * what its deleting so it can unindex or do any other cleanup.
- * This is that way.
+ * This will be called right before loc is deleted when wrapping.
+ * If data is unowned, it is only valid inside of this call. If implementations wish to
+ * stash a pointer, they must copy it.
*/
- class CappedDocumentDeleteCallback {
- public:
- virtual ~CappedDocumentDeleteCallback(){}
-
- /**
- * This will be called right before loc is deleted when wrapping.
- * If data is unowned, it is only valid inside of this call. If implementations wish to
- * stash a pointer, they must copy it.
- */
- virtual Status aboutToDeleteCapped( OperationContext* txn,
- const RecordId& loc,
- RecordData data ) = 0;
- };
-
+ virtual Status aboutToDeleteCapped(OperationContext* txn,
+ const RecordId& loc,
+ RecordData data) = 0;
+};
}
diff --git a/src/mongo/db/storage/devnull/devnull_init.cpp b/src/mongo/db/storage/devnull/devnull_init.cpp
index ad22a5ca25e..1216d642e8e 100644
--- a/src/mongo/db/storage/devnull/devnull_init.cpp
+++ b/src/mongo/db/storage/devnull/devnull_init.cpp
@@ -38,38 +38,35 @@
namespace mongo {
- namespace {
- class DevNullStorageEngineFactory : public StorageEngine::Factory {
- public:
- virtual StorageEngine* create(const StorageGlobalParams& params,
- const StorageEngineLockFile& lockFile) const {
- KVStorageEngineOptions options;
- options.directoryPerDB = params.directoryperdb;
- options.forRepair = params.repair;
- return new KVStorageEngine( new DevNullKVEngine(), options );
- }
-
- virtual StringData getCanonicalName() const {
- return "devnull";
- }
-
- virtual Status validateMetadata(const StorageEngineMetadata& metadata,
- const StorageGlobalParams& params) const {
- return Status::OK();
- }
+namespace {
+class DevNullStorageEngineFactory : public StorageEngine::Factory {
+public:
+ virtual StorageEngine* create(const StorageGlobalParams& params,
+ const StorageEngineLockFile& lockFile) const {
+ KVStorageEngineOptions options;
+ options.directoryPerDB = params.directoryperdb;
+ options.forRepair = params.repair;
+ return new KVStorageEngine(new DevNullKVEngine(), options);
+ }
- virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const {
- return BSONObj();
- }
- };
- } // namespace
+ virtual StringData getCanonicalName() const {
+ return "devnull";
+ }
- MONGO_INITIALIZER_WITH_PREREQUISITES(DevNullEngineInit,
- ("SetGlobalEnvironment"))
- (InitializerContext* context ) {
- getGlobalServiceContext()->registerStorageEngine("devnull", new DevNullStorageEngineFactory() );
+ virtual Status validateMetadata(const StorageEngineMetadata& metadata,
+ const StorageGlobalParams& params) const {
return Status::OK();
}
-}
+ virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const {
+ return BSONObj();
+ }
+};
+} // namespace
+MONGO_INITIALIZER_WITH_PREREQUISITES(DevNullEngineInit, ("SetGlobalEnvironment"))
+(InitializerContext* context) {
+ getGlobalServiceContext()->registerStorageEngine("devnull", new DevNullStorageEngineFactory());
+ return Status::OK();
+}
+}
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
index 1d1d039b7a6..25ebf6a5de6 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp
@@ -38,192 +38,216 @@
namespace mongo {
- class EmptyRecordCursor final : public RecordCursor {
- public:
- boost::optional<Record> next() final { return {}; }
- boost::optional<Record> seekExact(const RecordId& id) final { return {}; }
- void savePositioned() final {}
- bool restore(OperationContext* txn) final { return true; }
- };
-
- class DevNullRecordStore : public RecordStore {
- public:
- DevNullRecordStore( StringData ns, const CollectionOptions& options )
- : RecordStore( ns ), _options( options ) {
- _numInserts = 0;
- _dummy = BSON( "_id" << 1 );
- }
-
- virtual const char* name() const { return "devnull"; }
-
- virtual void setCappedDeleteCallback(CappedDocumentDeleteCallback*){}
-
- virtual long long dataSize( OperationContext* txn ) const { return 0; }
-
- virtual long long numRecords( OperationContext* txn ) const { return 0; }
-
- virtual bool isCapped() const { return _options.capped; }
-
- virtual int64_t storageSize( OperationContext* txn,
- BSONObjBuilder* extraInfo = NULL,
- int infoLevel = 0 ) const {
- return 0;
- }
-
- virtual RecordData dataFor( OperationContext* txn, const RecordId& loc) const {
- return RecordData( _dummy.objdata(), _dummy.objsize() );
- }
-
- virtual bool findRecord( OperationContext* txn, const RecordId& loc, RecordData* rd ) const {
- return false;
- }
-
- virtual void deleteRecord( OperationContext* txn, const RecordId& dl ) {}
-
- virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota ) {
- _numInserts++;
- return StatusWith<RecordId>( RecordId( 6, 4 ) );
- }
-
- virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota ) {
- _numInserts++;
- return StatusWith<RecordId>( RecordId( 6, 4 ) );
- }
-
- virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
- const RecordId& oldLocation,
- const char* data,
- int len,
- bool enforceQuota,
- UpdateNotifier* notifier ) {
- return StatusWith<RecordId>( oldLocation );
- }
-
- virtual bool updateWithDamagesSupported() const {
- return false;
- }
-
- virtual Status updateWithDamages( OperationContext* txn,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages ) {
- invariant(false);
- }
-
-
- std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward) const final {
- return stdx::make_unique<EmptyRecordCursor>();
- }
-
- virtual Status truncate( OperationContext* txn ) { return Status::OK(); }
-
- virtual void temp_cappedTruncateAfter(OperationContext* txn,
- RecordId end,
- bool inclusive) { }
-
- virtual Status validate( OperationContext* txn,
- bool full, bool scanData,
- ValidateAdaptor* adaptor,
- ValidateResults* results, BSONObjBuilder* output ) {
- return Status::OK();
- }
-
- virtual void appendCustomStats( OperationContext* txn,
- BSONObjBuilder* result,
- double scale ) const {
- result->appendNumber( "numInserts", _numInserts );
- }
-
- virtual Status touch( OperationContext* txn, BSONObjBuilder* output ) const {
- return Status::OK();
- }
-
- virtual void updateStatsAfterRepair(OperationContext* txn,
- long long numRecords,
- long long dataSize) {
- }
-
- private:
- CollectionOptions _options;
- long long _numInserts;
- BSONObj _dummy;
- };
-
- class DevNullSortedDataBuilderInterface : public SortedDataBuilderInterface {
- MONGO_DISALLOW_COPYING(DevNullSortedDataBuilderInterface);
-
- public:
- DevNullSortedDataBuilderInterface() { }
-
- virtual Status addKey(const BSONObj& key, const RecordId& loc) {
- return Status::OK();
- }
- };
-
- class DevNullSortedDataInterface : public SortedDataInterface {
- public:
- virtual ~DevNullSortedDataInterface() { }
-
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn,
- bool dupsAllowed) {
- return new DevNullSortedDataBuilderInterface();
- }
-
- virtual Status insert(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) { return Status::OK(); }
-
- virtual void unindex(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) { }
-
- virtual Status dupKeyCheck(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc) { return Status::OK(); }
-
- virtual void fullValidate(OperationContext* txn, bool full, long long* numKeysOut,
- BSONObjBuilder* output) const { }
-
- virtual bool appendCustomStats(OperationContext* txn, BSONObjBuilder* output, double scale)
- const {
- return false;
- }
-
- virtual long long getSpaceUsedBytes( OperationContext* txn ) const { return 0; }
-
- virtual bool isEmpty(OperationContext* txn) { return true; }
+class EmptyRecordCursor final : public RecordCursor {
+public:
+ boost::optional<Record> next() final {
+ return {};
+ }
+ boost::optional<Record> seekExact(const RecordId& id) final {
+ return {};
+ }
+ void savePositioned() final {}
+ bool restore(OperationContext* txn) final {
+ return true;
+ }
+};
+
+class DevNullRecordStore : public RecordStore {
+public:
+ DevNullRecordStore(StringData ns, const CollectionOptions& options)
+ : RecordStore(ns), _options(options) {
+ _numInserts = 0;
+ _dummy = BSON("_id" << 1);
+ }
- virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
- bool isForward) const {
- return {};
- }
+ virtual const char* name() const {
+ return "devnull";
+ }
- virtual Status initAsEmpty(OperationContext* txn) { return Status::OK(); }
- };
+ virtual void setCappedDeleteCallback(CappedDocumentDeleteCallback*) {}
+
+ virtual long long dataSize(OperationContext* txn) const {
+ return 0;
+ }
+
+ virtual long long numRecords(OperationContext* txn) const {
+ return 0;
+ }
+
+ virtual bool isCapped() const {
+ return _options.capped;
+ }
+
+ virtual int64_t storageSize(OperationContext* txn,
+ BSONObjBuilder* extraInfo = NULL,
+ int infoLevel = 0) const {
+ return 0;
+ }
+
+ virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const {
+ return RecordData(_dummy.objdata(), _dummy.objsize());
+ }
+
+ virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* rd) const {
+ return false;
+ }
+
+ virtual void deleteRecord(OperationContext* txn, const RecordId& dl) {}
+
+ virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ const char* data,
+ int len,
+ bool enforceQuota) {
+ _numInserts++;
+ return StatusWith<RecordId>(RecordId(6, 4));
+ }
+
+ virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota) {
+ _numInserts++;
+ return StatusWith<RecordId>(RecordId(6, 4));
+ }
+
+ virtual StatusWith<RecordId> updateRecord(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* data,
+ int len,
+ bool enforceQuota,
+ UpdateNotifier* notifier) {
+ return StatusWith<RecordId>(oldLocation);
+ }
+
+ virtual bool updateWithDamagesSupported() const {
+ return false;
+ }
+
+ virtual Status updateWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const RecordData& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages) {
+ invariant(false);
+ }
+
+
+ std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward) const final {
+ return stdx::make_unique<EmptyRecordCursor>();
+ }
+
+ virtual Status truncate(OperationContext* txn) {
+ return Status::OK();
+ }
+ virtual void temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {}
- RecordStore* DevNullKVEngine::getRecordStore( OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options ) {
- if ( ident == "_mdb_catalog" ) {
- return new InMemoryRecordStore( ns, &_catalogInfo );
- }
- return new DevNullRecordStore( ns, options );
+ virtual Status validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateAdaptor* adaptor,
+ ValidateResults* results,
+ BSONObjBuilder* output) {
+ return Status::OK();
}
- SortedDataInterface* DevNullKVEngine::getSortedDataInterface( OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc ) {
- return new DevNullSortedDataInterface();
+ virtual void appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale) const {
+ result->appendNumber("numInserts", _numInserts);
}
+ virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const {
+ return Status::OK();
+ }
+
+ virtual void updateStatsAfterRepair(OperationContext* txn,
+ long long numRecords,
+ long long dataSize) {}
+
+private:
+ CollectionOptions _options;
+ long long _numInserts;
+ BSONObj _dummy;
+};
+
+class DevNullSortedDataBuilderInterface : public SortedDataBuilderInterface {
+ MONGO_DISALLOW_COPYING(DevNullSortedDataBuilderInterface);
+
+public:
+ DevNullSortedDataBuilderInterface() {}
+
+ virtual Status addKey(const BSONObj& key, const RecordId& loc) {
+ return Status::OK();
+ }
+};
+
+class DevNullSortedDataInterface : public SortedDataInterface {
+public:
+ virtual ~DevNullSortedDataInterface() {}
+
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) {
+ return new DevNullSortedDataBuilderInterface();
+ }
+
+ virtual Status insert(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ return Status::OK();
+ }
+
+ virtual void unindex(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) {}
+
+ virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
+ return Status::OK();
+ }
+
+ virtual void fullValidate(OperationContext* txn,
+ bool full,
+ long long* numKeysOut,
+ BSONObjBuilder* output) const {}
+
+ virtual bool appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* output,
+ double scale) const {
+ return false;
+ }
+
+ virtual long long getSpaceUsedBytes(OperationContext* txn) const {
+ return 0;
+ }
+
+ virtual bool isEmpty(OperationContext* txn) {
+ return true;
+ }
+
+ virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ bool isForward) const {
+ return {};
+ }
+
+ virtual Status initAsEmpty(OperationContext* txn) {
+ return Status::OK();
+ }
+};
+
+
+RecordStore* DevNullKVEngine::getRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options) {
+ if (ident == "_mdb_catalog") {
+ return new InMemoryRecordStore(ns, &_catalogInfo);
+ }
+ return new DevNullRecordStore(ns, options);
+}
+
+SortedDataInterface* DevNullKVEngine::getSortedDataInterface(OperationContext* opCtx,
+ StringData ident,
+ const IndexDescriptor* desc) {
+ return new DevNullSortedDataInterface();
+}
}
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.h b/src/mongo/db/storage/devnull/devnull_kv_engine.h
index b6d14c52399..a6d559ef35a 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.h
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.h
@@ -36,74 +36,71 @@
namespace mongo {
- class DevNullKVEngine : public KVEngine {
- public:
- virtual ~DevNullKVEngine(){}
-
- virtual RecoveryUnit* newRecoveryUnit() {
- return new RecoveryUnitNoop();
- }
-
- virtual Status createRecordStore( OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options ) {
- return Status::OK();
- }
-
- virtual RecordStore* getRecordStore( OperationContext* opCtx,
- StringData ns,
+class DevNullKVEngine : public KVEngine {
+public:
+ virtual ~DevNullKVEngine() {}
+
+ virtual RecoveryUnit* newRecoveryUnit() {
+ return new RecoveryUnitNoop();
+ }
+
+ virtual Status createRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options) {
+ return Status::OK();
+ }
+
+ virtual RecordStore* getRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options);
+
+ virtual Status createSortedDataInterface(OperationContext* opCtx,
StringData ident,
- const CollectionOptions& options );
-
- virtual Status createSortedDataInterface( OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc ) {
- return Status::OK();
- }
-
- virtual SortedDataInterface* getSortedDataInterface( OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc );
-
- virtual Status dropIdent( OperationContext* opCtx,
- StringData ident ) {
- return Status::OK();
- }
-
- virtual bool supportsDocLocking() const {
- return true;
- }
-
- virtual bool supportsDirectoryPerDB() const {
- return false;
- }
-
- virtual bool isDurable() const {
- return true;
- }
-
- virtual int64_t getIdentSize( OperationContext* opCtx,
- StringData ident ) {
- return 1;
- }
-
- virtual Status repairIdent( OperationContext* opCtx,
- StringData ident ) {
- return Status::OK();
- }
-
- virtual bool hasIdent(OperationContext* opCtx, StringData ident) const {
- return true;
- }
-
- std::vector<std::string> getAllIdents( OperationContext* opCtx ) const {
- return std::vector<std::string>();
- }
-
- virtual void cleanShutdown() {};
-
- private:
- std::shared_ptr<void> _catalogInfo;
- };
+ const IndexDescriptor* desc) {
+ return Status::OK();
+ }
+
+ virtual SortedDataInterface* getSortedDataInterface(OperationContext* opCtx,
+ StringData ident,
+ const IndexDescriptor* desc);
+
+ virtual Status dropIdent(OperationContext* opCtx, StringData ident) {
+ return Status::OK();
+ }
+
+ virtual bool supportsDocLocking() const {
+ return true;
+ }
+
+ virtual bool supportsDirectoryPerDB() const {
+ return false;
+ }
+
+ virtual bool isDurable() const {
+ return true;
+ }
+
+ virtual int64_t getIdentSize(OperationContext* opCtx, StringData ident) {
+ return 1;
+ }
+
+ virtual Status repairIdent(OperationContext* opCtx, StringData ident) {
+ return Status::OK();
+ }
+
+ virtual bool hasIdent(OperationContext* opCtx, StringData ident) const {
+ return true;
+ }
+
+ std::vector<std::string> getAllIdents(OperationContext* opCtx) const {
+ return std::vector<std::string>();
+ }
+
+ virtual void cleanShutdown(){};
+
+private:
+ std::shared_ptr<void> _catalogInfo;
+};
}
diff --git a/src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp b/src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp
index 40da9035fbd..f40dff8e7ff 100644
--- a/src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_btree_impl.cpp
@@ -42,448 +42,450 @@
namespace mongo {
- using std::shared_ptr;
- using std::string;
- using std::vector;
+using std::shared_ptr;
+using std::string;
+using std::vector;
namespace {
- const int TempKeyMaxSize = 1024; // this goes away with SERVER-3372
+const int TempKeyMaxSize = 1024; // this goes away with SERVER-3372
- bool hasFieldNames(const BSONObj& obj) {
- BSONForEach(e, obj) {
- if (e.fieldName()[0])
- return true;
- }
- return false;
+bool hasFieldNames(const BSONObj& obj) {
+ BSONForEach(e, obj) {
+ if (e.fieldName()[0])
+ return true;
}
+ return false;
+}
- BSONObj stripFieldNames(const BSONObj& query) {
- if (!hasFieldNames(query))
- return query;
+BSONObj stripFieldNames(const BSONObj& query) {
+ if (!hasFieldNames(query))
+ return query;
- BSONObjBuilder bb;
- BSONForEach(e, query) {
- bb.appendAs(e, StringData());
- }
- return bb.obj();
+ BSONObjBuilder bb;
+ BSONForEach(e, query) {
+ bb.appendAs(e, StringData());
}
+ return bb.obj();
+}
+
+typedef std::set<IndexKeyEntry, IndexEntryComparison> IndexSet;
+
+// taken from btree_logic.cpp
+Status dupKeyError(const BSONObj& key) {
+ StringBuilder sb;
+ sb << "E11000 duplicate key error ";
+ // sb << "index: " << _indexName << " "; // TODO
+ sb << "dup key: " << key;
+ return Status(ErrorCodes::DuplicateKey, sb.str());
+}
+
+bool isDup(const IndexSet& data, const BSONObj& key, RecordId loc) {
+ const IndexSet::const_iterator it = data.find(IndexKeyEntry(key, RecordId()));
+ if (it == data.end())
+ return false;
- typedef std::set<IndexKeyEntry, IndexEntryComparison> IndexSet;
-
- // taken from btree_logic.cpp
- Status dupKeyError(const BSONObj& key) {
- StringBuilder sb;
- sb << "E11000 duplicate key error ";
- // sb << "index: " << _indexName << " "; // TODO
- sb << "dup key: " << key;
- return Status(ErrorCodes::DuplicateKey, sb.str());
+ // Not a dup if the entry is for the same loc.
+ return it->loc != loc;
+}
+
+class InMemoryBtreeBuilderImpl : public SortedDataBuilderInterface {
+public:
+ InMemoryBtreeBuilderImpl(IndexSet* data, long long* currentKeySize, bool dupsAllowed)
+ : _data(data),
+ _currentKeySize(currentKeySize),
+ _dupsAllowed(dupsAllowed),
+ _comparator(_data->key_comp()) {
+ invariant(_data->empty());
}
- bool isDup(const IndexSet& data, const BSONObj& key, RecordId loc) {
- const IndexSet::const_iterator it = data.find(IndexKeyEntry(key, RecordId()));
- if (it == data.end())
- return false;
+ Status addKey(const BSONObj& key, const RecordId& loc) {
+ // inserts should be in ascending (key, RecordId) order.
- // Not a dup if the entry is for the same loc.
- return it->loc != loc;
- }
-
- class InMemoryBtreeBuilderImpl : public SortedDataBuilderInterface {
- public:
- InMemoryBtreeBuilderImpl(IndexSet* data, long long* currentKeySize, bool dupsAllowed)
- : _data(data),
- _currentKeySize( currentKeySize ),
- _dupsAllowed(dupsAllowed),
- _comparator(_data->key_comp()) {
- invariant(_data->empty());
+ if (key.objsize() >= TempKeyMaxSize) {
+ return Status(ErrorCodes::KeyTooLong, "key too big");
}
- Status addKey(const BSONObj& key, const RecordId& loc) {
- // inserts should be in ascending (key, RecordId) order.
+ invariant(loc.isNormal());
+ invariant(!hasFieldNames(key));
- if ( key.objsize() >= TempKeyMaxSize ) {
- return Status(ErrorCodes::KeyTooLong, "key too big");
+ if (!_data->empty()) {
+ // Compare specified key with last inserted key, ignoring its RecordId
+ int cmp = _comparator.compare(IndexKeyEntry(key, RecordId()), *_last);
+ if (cmp < 0 || (_dupsAllowed && cmp == 0 && loc < _last->loc)) {
+ return Status(ErrorCodes::InternalError,
+ "expected ascending (key, RecordId) order in bulk builder");
+ } else if (!_dupsAllowed && cmp == 0 && loc != _last->loc) {
+ return dupKeyError(key);
}
+ }
- invariant(loc.isNormal());
- invariant(!hasFieldNames(key));
+ BSONObj owned = key.getOwned();
+ _last = _data->insert(_data->end(), IndexKeyEntry(owned, loc));
+ *_currentKeySize += key.objsize();
- if (!_data->empty()) {
- // Compare specified key with last inserted key, ignoring its RecordId
- int cmp = _comparator.compare(IndexKeyEntry(key, RecordId()), *_last);
- if (cmp < 0 || (_dupsAllowed && cmp == 0 && loc < _last->loc)) {
- return Status(ErrorCodes::InternalError,
- "expected ascending (key, RecordId) order in bulk builder");
- }
- else if (!_dupsAllowed && cmp == 0 && loc != _last->loc) {
- return dupKeyError(key);
- }
- }
+ return Status::OK();
+ }
- BSONObj owned = key.getOwned();
- _last = _data->insert(_data->end(), IndexKeyEntry(owned, loc));
- *_currentKeySize += key.objsize();
+private:
+ IndexSet* const _data;
+ long long* _currentKeySize;
+ const bool _dupsAllowed;
- return Status::OK();
- }
+ IndexEntryComparison _comparator; // used by the bulk builder to detect duplicate keys
+ IndexSet::const_iterator _last; // or (key, RecordId) ordering violations
+};
- private:
- IndexSet* const _data;
- long long* _currentKeySize;
- const bool _dupsAllowed;
+class InMemoryBtreeImpl : public SortedDataInterface {
+public:
+ InMemoryBtreeImpl(IndexSet* data) : _data(data) {
+ _currentKeySize = 0;
+ }
- IndexEntryComparison _comparator; // used by the bulk builder to detect duplicate keys
- IndexSet::const_iterator _last; // or (key, RecordId) ordering violations
- };
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) {
+ return new InMemoryBtreeBuilderImpl(_data, &_currentKeySize, dupsAllowed);
+ }
- class InMemoryBtreeImpl : public SortedDataInterface {
- public:
- InMemoryBtreeImpl(IndexSet* data)
- : _data(data) {
- _currentKeySize = 0;
+ virtual Status insert(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ invariant(loc.isNormal());
+ invariant(!hasFieldNames(key));
+
+ if (key.objsize() >= TempKeyMaxSize) {
+ string msg = mongoutils::str::stream()
+ << "InMemoryBtree::insert: key too large to index, failing " << ' ' << key.objsize()
+ << ' ' << key;
+ return Status(ErrorCodes::KeyTooLong, msg);
+ }
+
+ // TODO optimization: save the iterator from the dup-check to speed up insert
+ if (!dupsAllowed && isDup(*_data, key, loc))
+ return dupKeyError(key);
+
+ IndexKeyEntry entry(key.getOwned(), loc);
+ if (_data->insert(entry).second) {
+ _currentKeySize += key.objsize();
+ txn->recoveryUnit()->registerChange(new IndexChange(_data, entry, true));
}
+ return Status::OK();
+ }
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn,
- bool dupsAllowed) {
- return new InMemoryBtreeBuilderImpl(_data, &_currentKeySize, dupsAllowed);
+ virtual void unindex(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ invariant(loc.isNormal());
+ invariant(!hasFieldNames(key));
+
+ IndexKeyEntry entry(key.getOwned(), loc);
+ const size_t numDeleted = _data->erase(entry);
+ invariant(numDeleted <= 1);
+ if (numDeleted == 1) {
+ _currentKeySize -= key.objsize();
+ txn->recoveryUnit()->registerChange(new IndexChange(_data, entry, false));
}
+ }
- virtual Status insert(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) {
+ virtual void fullValidate(OperationContext* txn,
+ bool full,
+ long long* numKeysOut,
+ BSONObjBuilder* output) const {
+ // TODO check invariants?
+ *numKeysOut = _data->size();
+ }
- invariant(loc.isNormal());
- invariant(!hasFieldNames(key));
+ virtual bool appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* output,
+ double scale) const {
+ return false;
+ }
- if ( key.objsize() >= TempKeyMaxSize ) {
- string msg = mongoutils::str::stream()
- << "InMemoryBtree::insert: key too large to index, failing "
- << ' ' << key.objsize() << ' ' << key;
- return Status(ErrorCodes::KeyTooLong, msg);
- }
+ virtual long long getSpaceUsedBytes(OperationContext* txn) const {
+ return _currentKeySize + (sizeof(IndexKeyEntry) * _data->size());
+ }
- // TODO optimization: save the iterator from the dup-check to speed up insert
- if (!dupsAllowed && isDup(*_data, key, loc))
- return dupKeyError(key);
+ virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
+ invariant(!hasFieldNames(key));
+ if (isDup(*_data, key, loc))
+ return dupKeyError(key);
+ return Status::OK();
+ }
- IndexKeyEntry entry(key.getOwned(), loc);
- if ( _data->insert(entry).second ) {
- _currentKeySize += key.objsize();
- txn->recoveryUnit()->registerChange(new IndexChange(_data, entry, true));
- }
- return Status::OK();
- }
+ virtual bool isEmpty(OperationContext* txn) {
+ return _data->empty();
+ }
- virtual void unindex(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) {
- invariant(loc.isNormal());
- invariant(!hasFieldNames(key));
-
- IndexKeyEntry entry(key.getOwned(), loc);
- const size_t numDeleted = _data->erase(entry);
- invariant(numDeleted <= 1);
- if ( numDeleted == 1 ) {
- _currentKeySize -= key.objsize();
- txn->recoveryUnit()->registerChange(new IndexChange(_data, entry, false));
- }
- }
+ virtual Status touch(OperationContext* txn) const {
+ // already in memory...
+ return Status::OK();
+ }
- virtual void fullValidate(OperationContext* txn, bool full, long long *numKeysOut,
- BSONObjBuilder* output) const {
- // TODO check invariants?
- *numKeysOut = _data->size();
- }
+ class Cursor final : public SortedDataInterface::Cursor {
+ public:
+ Cursor(OperationContext* txn, const IndexSet& data, bool isForward)
+ : _txn(txn), _data(data), _forward(isForward), _it(data.end()) {}
- virtual bool appendCustomStats(OperationContext* txn, BSONObjBuilder* output, double scale)
- const {
- return false;
- }
+ boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
+ if (_lastMoveWasRestore) {
+ // Return current position rather than advancing.
+ _lastMoveWasRestore = false;
+ } else {
+ advance();
+ if (atEndPoint())
+ _isEOF = true;
+ }
- virtual long long getSpaceUsedBytes( OperationContext* txn ) const {
- return _currentKeySize + ( sizeof(IndexKeyEntry) * _data->size() );
+ if (_isEOF)
+ return {};
+ return *_it;
}
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
- invariant(!hasFieldNames(key));
- if (isDup(*_data, key, loc))
- return dupKeyError(key);
- return Status::OK();
+ void setEndPosition(const BSONObj& key, bool inclusive) override {
+ if (key.isEmpty()) {
+ // This means scan to end of index.
+ _endState = {};
+ return;
+ }
+
+ // NOTE: this uses the opposite min/max rules as a normal seek because a forward
+ // scan should land after the key if inclusive and before if exclusive.
+ _endState = EndState(stripFieldNames(key),
+ _forward == inclusive ? RecordId::max() : RecordId::min());
+ seekEndCursor();
}
- virtual bool isEmpty(OperationContext* txn) {
- return _data->empty();
+ boost::optional<IndexKeyEntry> seek(const BSONObj& key,
+ bool inclusive,
+ RequestedInfo parts) override {
+ const BSONObj query = stripFieldNames(key);
+ locate(query, _forward == inclusive ? RecordId::min() : RecordId::max());
+ _lastMoveWasRestore = false;
+ if (_isEOF)
+ return {};
+ dassert(inclusive ? compareKeys(_it->key, query) >= 0
+ : compareKeys(_it->key, query) > 0);
+ return *_it;
}
- virtual Status touch(OperationContext* txn) const{
- // already in memory...
- return Status::OK();
+ boost::optional<IndexKeyEntry> seek(const IndexSeekPoint& seekPoint,
+ RequestedInfo parts) override {
+ // Query encodes exclusive case so it can be treated as an inclusive query.
+ const BSONObj query = IndexEntryComparison::makeQueryObject(seekPoint, _forward);
+ locate(query, _forward ? RecordId::min() : RecordId::max());
+ _lastMoveWasRestore = false;
+ if (_isEOF)
+ return {};
+ dassert(compareKeys(_it->key, query) >= 0);
+ return *_it;
}
- class Cursor final : public SortedDataInterface::Cursor {
- public:
- Cursor(OperationContext* txn, const IndexSet& data, bool isForward)
- : _txn(txn),
- _data(data),
- _forward(isForward),
- _it(data.end())
- {}
-
- boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
- if (_lastMoveWasRestore) {
- // Return current position rather than advancing.
- _lastMoveWasRestore = false;
- }
- else {
- advance();
- if (atEndPoint()) _isEOF = true;
- }
+ void savePositioned() override {
+ // Keep original position if we haven't moved since the last restore.
+ _txn = nullptr;
+ if (_lastMoveWasRestore)
+ return;
- if (_isEOF) return {};
- return *_it;
- }
-
- void setEndPosition(const BSONObj& key, bool inclusive) override {
- if (key.isEmpty()) {
- // This means scan to end of index.
- _endState = {};
- return;
- }
-
- // NOTE: this uses the opposite min/max rules as a normal seek because a forward
- // scan should land after the key if inclusive and before if exclusive.
- _endState = EndState(stripFieldNames(key),
- _forward == inclusive ? RecordId::max() : RecordId::min());
- seekEndCursor();
+ if (_isEOF) {
+ saveUnpositioned();
+ return;
}
- boost::optional<IndexKeyEntry> seek(const BSONObj& key, bool inclusive,
- RequestedInfo parts) override {
- const BSONObj query = stripFieldNames(key);
- locate(query, _forward == inclusive ? RecordId::min() : RecordId::max());
- _lastMoveWasRestore = false;
- if (_isEOF) return {};
- dassert(inclusive ? compareKeys(_it->key, query) >= 0
- : compareKeys(_it->key, query) > 0);
- return *_it;
- }
+ _savedAtEnd = false;
+ _savedKey = _it->key.getOwned();
+ _savedLoc = _it->loc;
+ // Doing nothing with end cursor since it will do full reseek on restore.
+ }
- boost::optional<IndexKeyEntry> seek(const IndexSeekPoint& seekPoint,
- RequestedInfo parts) override {
- // Query encodes exclusive case so it can be treated as an inclusive query.
- const BSONObj query = IndexEntryComparison::makeQueryObject(seekPoint, _forward);
- locate(query, _forward ? RecordId::min() : RecordId::max());
- _lastMoveWasRestore = false;
- if (_isEOF) return {};
- dassert(compareKeys(_it->key, query) >= 0);
- return *_it;
- }
+ void saveUnpositioned() override {
+ _txn = nullptr;
+ _savedAtEnd = true;
+ // Doing nothing with end cursor since it will do full reseek on restore.
+ }
- void savePositioned() override {
- // Keep original position if we haven't moved since the last restore.
- _txn = nullptr;
- if (_lastMoveWasRestore) return;
+ void restore(OperationContext* txn) override {
+ _txn = txn;
- if (_isEOF) {
- saveUnpositioned();
- return;
- }
+ // Always do a full seek on restore. We cannot use our last position since index
+ // entries may have been inserted closer to our endpoint and we would need to move
+ // over them.
+ seekEndCursor();
- _savedAtEnd = false;
- _savedKey = _it->key.getOwned();
- _savedLoc = _it->loc;
- // Doing nothing with end cursor since it will do full reseek on restore.
+ if (_savedAtEnd) {
+ _isEOF = true;
+ return;
}
- void saveUnpositioned() override {
- _txn = nullptr;
- _savedAtEnd = true;
- // Doing nothing with end cursor since it will do full reseek on restore.
- }
+ // Need to find our position from the root.
+ locate(_savedKey, _savedLoc);
- void restore(OperationContext* txn) override {
- _txn = txn;
+ _lastMoveWasRestore = _isEOF // We weren't EOF but now are.
+ || _data.value_comp().compare(*_it, {_savedKey, _savedLoc}) != 0;
+ }
- // Always do a full seek on restore. We cannot use our last position since index
- // entries may have been inserted closer to our endpoint and we would need to move
- // over them.
- seekEndCursor();
+ private:
+ bool atEndPoint() const {
+ return _endState && _it == _endState->it;
+ }
- if (_savedAtEnd) {
+ // Advances once in the direction of the scan, updating _isEOF as needed.
+ // Does nothing if already _isEOF.
+ void advance() {
+ if (_isEOF)
+ return;
+ if (_forward) {
+ if (_it != _data.end())
+ ++_it;
+ if (_it == _data.end() || atEndPoint())
_isEOF = true;
- return;
+ } else {
+ if (_it == _data.begin() || _data.empty()) {
+ _isEOF = true;
+ } else {
+ --_it;
}
-
- // Need to find our position from the root.
- locate(_savedKey, _savedLoc);
-
- _lastMoveWasRestore = _isEOF // We weren't EOF but now are.
- || _data.value_comp().compare(*_it, {_savedKey, _savedLoc}) != 0;
+ if (atEndPoint())
+ _isEOF = true;
}
+ }
- private:
- bool atEndPoint() const {
- return _endState && _it == _endState->it;
+ bool atOrPastEndPointAfterSeeking() const {
+ if (_isEOF)
+ return true;
+ if (!_endState)
+ return false;
+
+ const int cmp = _data.value_comp().compare(*_it, _endState->query);
+
+ // We set up _endState->query to be in between the last in-range value and the first
+ // out-of-range value. In particular, it is constructed to never equal any legal
+ // index key.
+ dassert(cmp != 0);
+
+ if (_forward) {
+ // We may have landed after the end point.
+ return cmp > 0;
+ } else {
+ // We may have landed before the end point.
+ return cmp < 0;
}
+ }
- // Advances once in the direction of the scan, updating _isEOF as needed.
- // Does nothing if already _isEOF.
- void advance() {
- if (_isEOF) return;
- if (_forward) {
- if (_it != _data.end()) ++_it;
- if (_it == _data.end() || atEndPoint()) _isEOF = true;
- }
- else {
- if (_it == _data.begin() || _data.empty()) {
- _isEOF = true;
- }
- else {
- --_it;
- }
- if (atEndPoint()) _isEOF = true;
- }
+ void locate(const BSONObj& key, const RecordId& loc) {
+ _isEOF = false;
+ const auto query = IndexKeyEntry(key, loc);
+ _it = _data.lower_bound(query);
+ if (_forward) {
+ if (_it == _data.end())
+ _isEOF = true;
+ } else {
+ // lower_bound lands us on or after query. Reverse cursors must be on or before.
+ if (_it == _data.end() || _data.value_comp().compare(*_it, query) > 0)
+ advance(); // sets _isEOF if there is nothing more to return.
}
- bool atOrPastEndPointAfterSeeking() const {
- if (_isEOF) return true;
- if (!_endState) return false;
-
- const int cmp = _data.value_comp().compare(*_it, _endState->query);
+ if (atOrPastEndPointAfterSeeking())
+ _isEOF = true;
+ }
- // We set up _endState->query to be in between the last in-range value and the first
- // out-of-range value. In particular, it is constructed to never equal any legal
- // index key.
- dassert(cmp != 0);
+ // Returns comparison relative to direction of scan. If rhs would be seen later, returns
+ // a positive value.
+ int compareKeys(const BSONObj& lhs, const BSONObj& rhs) const {
+ int cmp = _data.value_comp().compare({lhs, RecordId()}, {rhs, RecordId()});
+ return _forward ? cmp : -cmp;
+ }
- if (_forward) {
- // We may have landed after the end point.
- return cmp > 0;
- }
- else {
- // We may have landed before the end point.
- return cmp < 0;
+ void seekEndCursor() {
+ if (!_endState || _data.empty())
+ return;
+
+ auto it = _data.lower_bound(_endState->query);
+ if (!_forward) {
+ // lower_bound lands us on or after query. Reverse cursors must be on or before.
+ if (it == _data.end() || _data.value_comp().compare(*it, _endState->query) > 0) {
+ if (it == _data.begin()) {
+ it = _data.end(); // all existing data in range.
+ } else {
+ --it;
+ }
}
}
- void locate(const BSONObj& key, const RecordId& loc) {
- _isEOF = false;
- const auto query = IndexKeyEntry(key, loc);
- _it = _data.lower_bound(query);
- if (_forward) {
- if (_it == _data.end()) _isEOF = true;
- }
- else {
- // lower_bound lands us on or after query. Reverse cursors must be on or before.
- if (_it == _data.end() || _data.value_comp().compare(*_it, query) > 0)
- advance(); // sets _isEOF if there is nothing more to return.
- }
+ if (it != _data.end())
+ dassert(compareKeys(it->key, _endState->query.key) >= 0);
+ _endState->it = it;
+ }
- if (atOrPastEndPointAfterSeeking()) _isEOF = true;
- }
+ OperationContext* _txn; // not owned
+ const IndexSet& _data;
+ const bool _forward;
+ bool _isEOF = true;
+ IndexSet::const_iterator _it;
- // Returns comparison relative to direction of scan. If rhs would be seen later, returns
- // a positive value.
- int compareKeys(const BSONObj& lhs, const BSONObj& rhs) const {
- int cmp = _data.value_comp().compare({lhs, RecordId()}, {rhs, RecordId()});
- return _forward ? cmp : -cmp;
- }
+ struct EndState {
+ EndState(BSONObj key, RecordId loc) : query(std::move(key), loc) {}
- void seekEndCursor() {
- if (!_endState || _data.empty()) return;
-
- auto it = _data.lower_bound(_endState->query);
- if (!_forward) {
- // lower_bound lands us on or after query. Reverse cursors must be on or before.
- if (it == _data.end() || _data.value_comp().compare(*it,
- _endState->query) > 0) {
- if (it == _data.begin()) {
- it = _data.end(); // all existing data in range.
- }
- else {
- --it;
- }
- }
- }
+ IndexKeyEntry query;
+ IndexSet::const_iterator it;
+ };
+ boost::optional<EndState> _endState;
- if (it != _data.end()) dassert(compareKeys(it->key, _endState->query.key) >= 0);
- _endState->it = it;
- }
+ // Used by next to decide to return current position rather than moving. Should be reset
+ // to false by any operation that moves the cursor, other than subsequent save/restore
+ // pairs.
+ bool _lastMoveWasRestore = false;
- OperationContext* _txn; // not owned
- const IndexSet& _data;
- const bool _forward;
- bool _isEOF = true;
- IndexSet::const_iterator _it;
-
- struct EndState {
- EndState(BSONObj key, RecordId loc) : query(std::move(key), loc) {}
-
- IndexKeyEntry query;
- IndexSet::const_iterator it;
- };
- boost::optional<EndState> _endState;
-
- // Used by next to decide to return current position rather than moving. Should be reset
- // to false by any operation that moves the cursor, other than subsequent save/restore
- // pairs.
- bool _lastMoveWasRestore = false;
-
- // For save/restore since _it may be invalidated during a yield.
- bool _savedAtEnd = false;
- BSONObj _savedKey;
- RecordId _savedLoc;
- };
+ // For save/restore since _it may be invalidated during a yield.
+ bool _savedAtEnd = false;
+ BSONObj _savedKey;
+ RecordId _savedLoc;
+ };
- virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(
- OperationContext* txn,
- bool isForward) const {
- return stdx::make_unique<Cursor>(txn, *_data, isForward);
- }
+ virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ bool isForward) const {
+ return stdx::make_unique<Cursor>(txn, *_data, isForward);
+ }
- virtual Status initAsEmpty(OperationContext* txn) {
- // No-op
- return Status::OK();
+ virtual Status initAsEmpty(OperationContext* txn) {
+ // No-op
+ return Status::OK();
+ }
+
+private:
+ class IndexChange : public RecoveryUnit::Change {
+ public:
+ IndexChange(IndexSet* data, const IndexKeyEntry& entry, bool insert)
+ : _data(data), _entry(entry), _insert(insert) {}
+
+ virtual void commit() {}
+ virtual void rollback() {
+ if (_insert)
+ _data->erase(_entry);
+ else
+ _data->insert(_entry);
}
private:
- class IndexChange : public RecoveryUnit::Change {
- public:
- IndexChange(IndexSet* data, const IndexKeyEntry& entry, bool insert)
- : _data(data), _entry(entry), _insert(insert)
- {}
-
- virtual void commit() {}
- virtual void rollback() {
- if (_insert)
- _data->erase(_entry);
- else
- _data->insert(_entry);
- }
-
- private:
- IndexSet* _data;
- const IndexKeyEntry _entry;
- const bool _insert;
- };
-
IndexSet* _data;
- long long _currentKeySize;
+ const IndexKeyEntry _entry;
+ const bool _insert;
};
-} // namespace
-
- // IndexCatalogEntry argument taken by non-const pointer for consistency with other Btree
- // factories. We don't actually modify it.
- SortedDataInterface* getInMemoryBtreeImpl(const Ordering& ordering,
- std::shared_ptr<void>* dataInOut) {
- invariant(dataInOut);
- if (!*dataInOut) {
- *dataInOut = std::make_shared<IndexSet>(IndexEntryComparison(ordering));
- }
- return new InMemoryBtreeImpl(static_cast<IndexSet*>(dataInOut->get()));
+
+ IndexSet* _data;
+ long long _currentKeySize;
+};
+} // namespace
+
+// IndexCatalogEntry argument taken by non-const pointer for consistency with other Btree
+// factories. We don't actually modify it.
+SortedDataInterface* getInMemoryBtreeImpl(const Ordering& ordering,
+ std::shared_ptr<void>* dataInOut) {
+ invariant(dataInOut);
+ if (!*dataInOut) {
+ *dataInOut = std::make_shared<IndexSet>(IndexEntryComparison(ordering));
}
+ return new InMemoryBtreeImpl(static_cast<IndexSet*>(dataInOut->get()));
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/in_memory/in_memory_btree_impl.h b/src/mongo/db/storage/in_memory/in_memory_btree_impl.h
index ee318312c78..ed330d40d10 100644
--- a/src/mongo/db/storage/in_memory/in_memory_btree_impl.h
+++ b/src/mongo/db/storage/in_memory/in_memory_btree_impl.h
@@ -35,13 +35,13 @@
namespace mongo {
- class IndexCatalogEntry;
-
- /**
- * Caller takes ownership.
- * All permanent data will be stored and fetch from dataInOut.
- */
- SortedDataInterface* getInMemoryBtreeImpl(const Ordering& ordering,
- std::shared_ptr<void>* dataInOut);
+class IndexCatalogEntry;
+
+/**
+ * Caller takes ownership.
+ * All permanent data will be stored and fetch from dataInOut.
+ */
+SortedDataInterface* getInMemoryBtreeImpl(const Ordering& ordering,
+ std::shared_ptr<void>* dataInOut);
} // namespace mongo
diff --git a/src/mongo/db/storage/in_memory/in_memory_btree_impl_test.cpp b/src/mongo/db/storage/in_memory/in_memory_btree_impl_test.cpp
index 867a093b3e0..719e187d548 100644
--- a/src/mongo/db/storage/in_memory/in_memory_btree_impl_test.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_btree_impl_test.cpp
@@ -38,27 +38,24 @@
namespace mongo {
- class InMemoryHarnessHelper final : public HarnessHelper {
- public:
- InMemoryHarnessHelper()
- : _order( Ordering::make( BSONObj() ) ) {
- }
+class InMemoryHarnessHelper final : public HarnessHelper {
+public:
+ InMemoryHarnessHelper() : _order(Ordering::make(BSONObj())) {}
- std::unique_ptr<SortedDataInterface> newSortedDataInterface( bool unique ) final {
- return std::unique_ptr<SortedDataInterface>(getInMemoryBtreeImpl(_order, &_data));
- }
-
- std::unique_ptr<RecoveryUnit> newRecoveryUnit() final {
- return stdx::make_unique<InMemoryRecoveryUnit>();
- }
-
- private:
- std::shared_ptr<void> _data; // used by InMemoryBtreeImpl
- Ordering _order;
- };
+ std::unique_ptr<SortedDataInterface> newSortedDataInterface(bool unique) final {
+ return std::unique_ptr<SortedDataInterface>(getInMemoryBtreeImpl(_order, &_data));
+ }
- std::unique_ptr<HarnessHelper> newHarnessHelper() {
- return stdx::make_unique<InMemoryHarnessHelper>();
+ std::unique_ptr<RecoveryUnit> newRecoveryUnit() final {
+ return stdx::make_unique<InMemoryRecoveryUnit>();
}
+private:
+ std::shared_ptr<void> _data; // used by InMemoryBtreeImpl
+ Ordering _order;
+};
+
+std::unique_ptr<HarnessHelper> newHarnessHelper() {
+ return stdx::make_unique<InMemoryHarnessHelper>();
+}
}
diff --git a/src/mongo/db/storage/in_memory/in_memory_engine.cpp b/src/mongo/db/storage/in_memory/in_memory_engine.cpp
index 395d002b26a..46a12c28b4c 100644
--- a/src/mongo/db/storage/in_memory/in_memory_engine.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_engine.cpp
@@ -37,70 +37,66 @@
namespace mongo {
- RecoveryUnit* InMemoryEngine::newRecoveryUnit() {
- return new InMemoryRecoveryUnit();
- }
+RecoveryUnit* InMemoryEngine::newRecoveryUnit() {
+ return new InMemoryRecoveryUnit();
+}
- Status InMemoryEngine::createRecordStore(OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options) {
- // All work done in getRecordStore
- return Status::OK();
- }
+Status InMemoryEngine::createRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options) {
+ // All work done in getRecordStore
+ return Status::OK();
+}
- RecordStore* InMemoryEngine::getRecordStore(OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (options.capped) {
- return new InMemoryRecordStore(ns,
- &_dataMap[ident],
- true,
- options.cappedSize ? options.cappedSize : 4096,
- options.cappedMaxDocs ? options.cappedMaxDocs : -1);
- }
- else {
- return new InMemoryRecordStore(ns, &_dataMap[ident]);
- }
+RecordStore* InMemoryEngine::getRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ if (options.capped) {
+ return new InMemoryRecordStore(ns,
+ &_dataMap[ident],
+ true,
+ options.cappedSize ? options.cappedSize : 4096,
+ options.cappedMaxDocs ? options.cappedMaxDocs : -1);
+ } else {
+ return new InMemoryRecordStore(ns, &_dataMap[ident]);
}
+}
- Status InMemoryEngine::createSortedDataInterface(OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc) {
-
- // All work done in getSortedDataInterface
- return Status::OK();
- }
+Status InMemoryEngine::createSortedDataInterface(OperationContext* opCtx,
+ StringData ident,
+ const IndexDescriptor* desc) {
+ // All work done in getSortedDataInterface
+ return Status::OK();
+}
- SortedDataInterface* InMemoryEngine::getSortedDataInterface(OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- return getInMemoryBtreeImpl(Ordering::make(desc->keyPattern()), &_dataMap[ident]);
- }
+SortedDataInterface* InMemoryEngine::getSortedDataInterface(OperationContext* opCtx,
+ StringData ident,
+ const IndexDescriptor* desc) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ return getInMemoryBtreeImpl(Ordering::make(desc->keyPattern()), &_dataMap[ident]);
+}
- Status InMemoryEngine::dropIdent(OperationContext* opCtx,
- StringData ident) {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- _dataMap.erase(ident);
- return Status::OK();
- }
+Status InMemoryEngine::dropIdent(OperationContext* opCtx, StringData ident) {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ _dataMap.erase(ident);
+ return Status::OK();
+}
- int64_t InMemoryEngine::getIdentSize( OperationContext* opCtx,
- StringData ident ) {
- return 1;
- }
+int64_t InMemoryEngine::getIdentSize(OperationContext* opCtx, StringData ident) {
+ return 1;
+}
- std::vector<std::string> InMemoryEngine::getAllIdents( OperationContext* opCtx ) const {
- std::vector<std::string> all;
- {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- for ( DataMap::const_iterator it = _dataMap.begin(); it != _dataMap.end(); ++it ) {
- all.push_back( it->first );
- }
+std::vector<std::string> InMemoryEngine::getAllIdents(OperationContext* opCtx) const {
+ std::vector<std::string> all;
+ {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ for (DataMap::const_iterator it = _dataMap.begin(); it != _dataMap.end(); ++it) {
+ all.push_back(it->first);
}
- return all;
}
+ return all;
+}
}
diff --git a/src/mongo/db/storage/in_memory/in_memory_engine.h b/src/mongo/db/storage/in_memory/in_memory_engine.h
index c7e527ec2f7..55f9c463055 100644
--- a/src/mongo/db/storage/in_memory/in_memory_engine.h
+++ b/src/mongo/db/storage/in_memory/in_memory_engine.h
@@ -36,60 +36,64 @@
namespace mongo {
- class InMemoryEngine : public KVEngine {
- public:
- virtual RecoveryUnit* newRecoveryUnit();
+class InMemoryEngine : public KVEngine {
+public:
+ virtual RecoveryUnit* newRecoveryUnit();
- virtual Status createRecordStore( OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options );
+ virtual Status createRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options);
- virtual RecordStore* getRecordStore( OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options );
+ virtual RecordStore* getRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options);
- virtual Status createSortedDataInterface( OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc );
+ virtual Status createSortedDataInterface(OperationContext* opCtx,
+ StringData ident,
+ const IndexDescriptor* desc);
- virtual SortedDataInterface* getSortedDataInterface( OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc );
+ virtual SortedDataInterface* getSortedDataInterface(OperationContext* opCtx,
+ StringData ident,
+ const IndexDescriptor* desc);
- virtual Status dropIdent( OperationContext* opCtx,
- StringData ident );
+ virtual Status dropIdent(OperationContext* opCtx, StringData ident);
- virtual bool supportsDocLocking() const { return false; }
+ virtual bool supportsDocLocking() const {
+ return false;
+ }
- virtual bool supportsDirectoryPerDB() const { return false; }
+ virtual bool supportsDirectoryPerDB() const {
+ return false;
+ }
- /**
- * This is sort of strange since "durable" has no meaning...
- */
- virtual bool isDurable() const { return true; }
+ /**
+ * This is sort of strange since "durable" has no meaning...
+ */
+ virtual bool isDurable() const {
+ return true;
+ }
- virtual int64_t getIdentSize( OperationContext* opCtx,
- StringData ident );
+ virtual int64_t getIdentSize(OperationContext* opCtx, StringData ident);
- virtual Status repairIdent( OperationContext* opCtx,
- StringData ident ) {
- return Status::OK();
- }
+ virtual Status repairIdent(OperationContext* opCtx, StringData ident) {
+ return Status::OK();
+ }
- virtual void cleanShutdown() {};
+ virtual void cleanShutdown(){};
- virtual bool hasIdent(OperationContext* opCtx, StringData ident) const {
- return _dataMap.find(ident) != _dataMap.end();;
- }
+ virtual bool hasIdent(OperationContext* opCtx, StringData ident) const {
+ return _dataMap.find(ident) != _dataMap.end();
+ ;
+ }
- std::vector<std::string> getAllIdents( OperationContext* opCtx ) const;
- private:
- typedef StringMap<std::shared_ptr<void> > DataMap;
+ std::vector<std::string> getAllIdents(OperationContext* opCtx) const;
- mutable stdx::mutex _mutex;
- DataMap _dataMap; // All actual data is owned in here
- };
+private:
+ typedef StringMap<std::shared_ptr<void>> DataMap;
+ mutable stdx::mutex _mutex;
+ DataMap _dataMap; // All actual data is owned in here
+};
}
diff --git a/src/mongo/db/storage/in_memory/in_memory_engine_test.cpp b/src/mongo/db/storage/in_memory/in_memory_engine_test.cpp
index ec31394baae..1427810637f 100644
--- a/src/mongo/db/storage/in_memory/in_memory_engine_test.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_engine_test.cpp
@@ -33,23 +33,25 @@
namespace mongo {
- class InMemoryKVHarnessHelper : public KVHarnessHelper {
- public:
- InMemoryKVHarnessHelper() : _engine( new InMemoryEngine()) {}
-
- virtual KVEngine* restartEngine() {
- // Intentionally not restarting since the in-memory storage engine
- // does not persist data across restarts
- return _engine.get();
- }
+class InMemoryKVHarnessHelper : public KVHarnessHelper {
+public:
+ InMemoryKVHarnessHelper() : _engine(new InMemoryEngine()) {}
+
+ virtual KVEngine* restartEngine() {
+ // Intentionally not restarting since the in-memory storage engine
+ // does not persist data across restarts
+ return _engine.get();
+ }
- virtual KVEngine* getEngine() { return _engine.get(); }
+ virtual KVEngine* getEngine() {
+ return _engine.get();
+ }
- private:
- std::unique_ptr<InMemoryEngine> _engine;
- };
+private:
+ std::unique_ptr<InMemoryEngine> _engine;
+};
- KVHarnessHelper* KVHarnessHelper::create() {
- return new InMemoryKVHarnessHelper();
- }
+KVHarnessHelper* KVHarnessHelper::create() {
+ return new InMemoryKVHarnessHelper();
+}
}
diff --git a/src/mongo/db/storage/in_memory/in_memory_init.cpp b/src/mongo/db/storage/in_memory/in_memory_init.cpp
index bca7d60ef6c..c3f36f0224a 100644
--- a/src/mongo/db/storage/in_memory/in_memory_init.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_init.cpp
@@ -37,41 +37,39 @@
namespace mongo {
- namespace {
+namespace {
- class InMemoryFactory : public StorageEngine::Factory {
- public:
- virtual ~InMemoryFactory() { }
- virtual StorageEngine* create(const StorageGlobalParams& params,
- const StorageEngineLockFile& lockFile) const {
- KVStorageEngineOptions options;
- options.directoryPerDB = params.directoryperdb;
- options.forRepair = params.repair;
- return new KVStorageEngine(new InMemoryEngine(), options);
- }
-
- virtual StringData getCanonicalName() const {
- return "inMemoryExperiment";
- }
+class InMemoryFactory : public StorageEngine::Factory {
+public:
+ virtual ~InMemoryFactory() {}
+ virtual StorageEngine* create(const StorageGlobalParams& params,
+ const StorageEngineLockFile& lockFile) const {
+ KVStorageEngineOptions options;
+ options.directoryPerDB = params.directoryperdb;
+ options.forRepair = params.repair;
+ return new KVStorageEngine(new InMemoryEngine(), options);
+ }
- virtual Status validateMetadata(const StorageEngineMetadata& metadata,
- const StorageGlobalParams& params) const {
- return Status::OK();
- }
+ virtual StringData getCanonicalName() const {
+ return "inMemoryExperiment";
+ }
- virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const {
- return BSONObj();
- }
- };
+ virtual Status validateMetadata(const StorageEngineMetadata& metadata,
+ const StorageGlobalParams& params) const {
+ return Status::OK();
+ }
- } // namespace
+ virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const {
+ return BSONObj();
+ }
+};
- MONGO_INITIALIZER_WITH_PREREQUISITES(InMemoryEngineInit,
- ("SetGlobalEnvironment"))
- (InitializerContext* context) {
+} // namespace
- getGlobalServiceContext()->registerStorageEngine("inMemoryExperiment", new InMemoryFactory());
- return Status::OK();
- }
+MONGO_INITIALIZER_WITH_PREREQUISITES(InMemoryEngineInit, ("SetGlobalEnvironment"))
+(InitializerContext* context) {
+ getGlobalServiceContext()->registerStorageEngine("inMemoryExperiment", new InMemoryFactory());
+ return Status::OK();
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/in_memory/in_memory_record_store.cpp b/src/mongo/db/storage/in_memory/in_memory_record_store.cpp
index b0c583954f6..af596f7a569 100644
--- a/src/mongo/db/storage/in_memory/in_memory_record_store.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_record_store.cpp
@@ -46,584 +46,569 @@
namespace mongo {
- using std::shared_ptr;
-
- class InMemoryRecordStore::InsertChange : public RecoveryUnit::Change {
- public:
- InsertChange(Data* data, RecordId loc) :_data(data), _loc(loc) {}
- virtual void commit() {}
- virtual void rollback() {
- Records::iterator it = _data->records.find(_loc);
- if (it != _data->records.end()) {
- _data->dataSize -= it->second.size;
- _data->records.erase(it);
- }
- }
-
- private:
- Data* const _data;
- const RecordId _loc;
- };
-
- // Works for both removes and updates
- class InMemoryRecordStore::RemoveChange : public RecoveryUnit::Change {
- public:
- RemoveChange(Data* data, RecordId loc, const InMemoryRecord& rec)
- :_data(data), _loc(loc), _rec(rec)
- {}
-
- virtual void commit() {}
- virtual void rollback() {
- Records::iterator it = _data->records.find(_loc);
- if (it != _data->records.end()) {
- _data->dataSize -= it->second.size;
- }
-
- _data->dataSize += _rec.size;
- _data->records[_loc] = _rec;
+using std::shared_ptr;
+
+class InMemoryRecordStore::InsertChange : public RecoveryUnit::Change {
+public:
+ InsertChange(Data* data, RecordId loc) : _data(data), _loc(loc) {}
+ virtual void commit() {}
+ virtual void rollback() {
+ Records::iterator it = _data->records.find(_loc);
+ if (it != _data->records.end()) {
+ _data->dataSize -= it->second.size;
+ _data->records.erase(it);
}
+ }
- private:
- Data* const _data;
- const RecordId _loc;
- const InMemoryRecord _rec;
- };
-
- class InMemoryRecordStore::TruncateChange : public RecoveryUnit::Change {
- public:
- TruncateChange(Data* data) : _data(data), _dataSize(0) {
- using std::swap;
- swap(_dataSize, _data->dataSize);
- swap(_records, _data->records);
+private:
+ Data* const _data;
+ const RecordId _loc;
+};
+
+// Works for both removes and updates
+class InMemoryRecordStore::RemoveChange : public RecoveryUnit::Change {
+public:
+ RemoveChange(Data* data, RecordId loc, const InMemoryRecord& rec)
+ : _data(data), _loc(loc), _rec(rec) {}
+
+ virtual void commit() {}
+ virtual void rollback() {
+ Records::iterator it = _data->records.find(_loc);
+ if (it != _data->records.end()) {
+ _data->dataSize -= it->second.size;
}
- virtual void commit() {}
- virtual void rollback() {
- using std::swap;
- swap(_dataSize, _data->dataSize);
- swap(_records, _data->records);
- }
+ _data->dataSize += _rec.size;
+ _data->records[_loc] = _rec;
+ }
- private:
- Data* const _data;
- int64_t _dataSize;
- Records _records;
- };
-
- class InMemoryRecordStore::Cursor final : public RecordCursor {
- public:
- Cursor(OperationContext* txn, const InMemoryRecordStore& rs)
- : _txn(txn)
- , _records(rs._data->records)
- , _isCapped(rs.isCapped())
- {}
-
- boost::optional<Record> next() final {
- if (_needFirstSeek) {
- _needFirstSeek = false;
- _it = _records.begin();
- }
- else if (!_lastMoveWasRestore && _it != _records.end()) {
- ++_it;
- }
- _lastMoveWasRestore = false;
+private:
+ Data* const _data;
+ const RecordId _loc;
+ const InMemoryRecord _rec;
+};
+
+class InMemoryRecordStore::TruncateChange : public RecoveryUnit::Change {
+public:
+ TruncateChange(Data* data) : _data(data), _dataSize(0) {
+ using std::swap;
+ swap(_dataSize, _data->dataSize);
+ swap(_records, _data->records);
+ }
- if (_it == _records.end()) return {};
- return {{_it->first, _it->second.toRecordData()}};
- }
+ virtual void commit() {}
+ virtual void rollback() {
+ using std::swap;
+ swap(_dataSize, _data->dataSize);
+ swap(_records, _data->records);
+ }
- boost::optional<Record> seekExact(const RecordId& id) final {
- _lastMoveWasRestore = false;
- _needFirstSeek = false;
- _it = _records.find(id);
- if (_it == _records.end()) return {};
- return {{_it->first, _it->second.toRecordData()}};
- }
+private:
+ Data* const _data;
+ int64_t _dataSize;
+ Records _records;
+};
- void savePositioned() final {
- _txn = nullptr;
- if (!_needFirstSeek && !_lastMoveWasRestore)
- _savedId = _it == _records.end() ? RecordId() : _it->first;
- }
+class InMemoryRecordStore::Cursor final : public RecordCursor {
+public:
+ Cursor(OperationContext* txn, const InMemoryRecordStore& rs)
+ : _txn(txn), _records(rs._data->records), _isCapped(rs.isCapped()) {}
- void saveUnpositioned() final {
- _txn = nullptr;
- _savedId = RecordId();
+ boost::optional<Record> next() final {
+ if (_needFirstSeek) {
+ _needFirstSeek = false;
+ _it = _records.begin();
+ } else if (!_lastMoveWasRestore && _it != _records.end()) {
+ ++_it;
}
+ _lastMoveWasRestore = false;
- bool restore(OperationContext* txn) final {
- _txn = txn;
- if (_savedId.isNull()) {
- _it = _records.end();
- return true;
- }
+ if (_it == _records.end())
+ return {};
+ return {{_it->first, _it->second.toRecordData()}};
+ }
- _it = _records.lower_bound(_savedId);
- _lastMoveWasRestore = _it == _records.end() || _it->first != _savedId;
+ boost::optional<Record> seekExact(const RecordId& id) final {
+ _lastMoveWasRestore = false;
+ _needFirstSeek = false;
+ _it = _records.find(id);
+ if (_it == _records.end())
+ return {};
+ return {{_it->first, _it->second.toRecordData()}};
+ }
- // Capped iterators die on invalidation rather than advancing.
- return !(_isCapped && _lastMoveWasRestore);
- }
+ void savePositioned() final {
+ _txn = nullptr;
+ if (!_needFirstSeek && !_lastMoveWasRestore)
+ _savedId = _it == _records.end() ? RecordId() : _it->first;
+ }
- private:
- unowned_ptr<OperationContext> _txn;
- Records::const_iterator _it;
- bool _needFirstSeek = true;
- bool _lastMoveWasRestore = false;
- RecordId _savedId; // Location to restore() to. Null means EOF.
-
- const InMemoryRecordStore::Records& _records;
- const bool _isCapped;
- };
-
- class InMemoryRecordStore::ReverseCursor final : public RecordCursor {
- public:
- ReverseCursor(OperationContext* txn, const InMemoryRecordStore& rs)
- : _txn(txn)
- , _records(rs._data->records)
- , _isCapped(rs.isCapped())
- {}
-
- boost::optional<Record> next() final {
- if (_needFirstSeek) {
- _needFirstSeek = false;
- _it = _records.rbegin();
- }
- else if (!_lastMoveWasRestore && _it != _records.rend()) {
- ++_it;
- }
- _lastMoveWasRestore = false;
+ void saveUnpositioned() final {
+ _txn = nullptr;
+ _savedId = RecordId();
+ }
- if (_it == _records.rend()) return {};
- return {{_it->first, _it->second.toRecordData()}};
+ bool restore(OperationContext* txn) final {
+ _txn = txn;
+ if (_savedId.isNull()) {
+ _it = _records.end();
+ return true;
}
- boost::optional<Record> seekExact(const RecordId& id) final {
- _lastMoveWasRestore = false;
- _needFirstSeek = false;
-
- auto forwardIt = _records.find(id);
- if (forwardIt == _records.end()) {
- _it = _records.rend();
- return {};
- }
+ _it = _records.lower_bound(_savedId);
+ _lastMoveWasRestore = _it == _records.end() || _it->first != _savedId;
- // The reverse_iterator will point to the preceding element, so increment the base
- // iterator to make it point past the found element.
- ++forwardIt;
- _it = Records::const_reverse_iterator(forwardIt);
- dassert(_it != _records.rend());
- dassert(_it->first == id);
- return {{_it->first, _it->second.toRecordData()}};
- }
-
- void savePositioned() final {
- _txn = nullptr;
- if (!_needFirstSeek && !_lastMoveWasRestore)
- _savedId = _it == _records.rend() ? RecordId() : _it->first;
- }
+ // Capped iterators die on invalidation rather than advancing.
+ return !(_isCapped && _lastMoveWasRestore);
+ }
- void saveUnpositioned() final {
- _txn = nullptr;
- _savedId = RecordId();
- }
+private:
+ unowned_ptr<OperationContext> _txn;
+ Records::const_iterator _it;
+ bool _needFirstSeek = true;
+ bool _lastMoveWasRestore = false;
+ RecordId _savedId; // Location to restore() to. Null means EOF.
- bool restore(OperationContext* txn) final {
- _txn = txn;
- if (_savedId.isNull()) {
- _it = _records.rend();
- return true;
- }
+ const InMemoryRecordStore::Records& _records;
+ const bool _isCapped;
+};
- // Note: upper_bound returns the first entry > _savedId and reverse_iterators
- // dereference to the element before their base iterator. This combine to make this
- // dereference to the first element <= _savedId which is what we want here.
- _it = Records::const_reverse_iterator(_records.upper_bound(_savedId));
- _lastMoveWasRestore = _it == _records.rend() || _it->first != _savedId;
+class InMemoryRecordStore::ReverseCursor final : public RecordCursor {
+public:
+ ReverseCursor(OperationContext* txn, const InMemoryRecordStore& rs)
+ : _txn(txn), _records(rs._data->records), _isCapped(rs.isCapped()) {}
- // Capped iterators die on invalidation rather than advancing.
- return !(_isCapped && _lastMoveWasRestore);
+ boost::optional<Record> next() final {
+ if (_needFirstSeek) {
+ _needFirstSeek = false;
+ _it = _records.rbegin();
+ } else if (!_lastMoveWasRestore && _it != _records.rend()) {
+ ++_it;
}
+ _lastMoveWasRestore = false;
- private:
- unowned_ptr<OperationContext> _txn;
- Records::const_reverse_iterator _it;
- bool _needFirstSeek = true;
- bool _lastMoveWasRestore = false;
- RecordId _savedId; // Location to restore() to. Null means EOF.
- const InMemoryRecordStore::Records& _records;
- const bool _isCapped;
- };
+ if (_it == _records.rend())
+ return {};
+ return {{_it->first, _it->second.toRecordData()}};
+ }
+ boost::optional<Record> seekExact(const RecordId& id) final {
+ _lastMoveWasRestore = false;
+ _needFirstSeek = false;
- //
- // RecordStore
- //
-
- InMemoryRecordStore::InMemoryRecordStore(StringData ns,
- std::shared_ptr<void>* dataInOut,
- bool isCapped,
- int64_t cappedMaxSize,
- int64_t cappedMaxDocs,
- CappedDocumentDeleteCallback* cappedDeleteCallback)
- : RecordStore(ns),
- _isCapped(isCapped),
- _cappedMaxSize(cappedMaxSize),
- _cappedMaxDocs(cappedMaxDocs),
- _cappedDeleteCallback(cappedDeleteCallback),
- _data(*dataInOut ? static_cast<Data*>(dataInOut->get())
- : new Data(NamespaceString::oplog(ns))) {
- if (!*dataInOut) {
- dataInOut->reset(_data); // takes ownership
+ auto forwardIt = _records.find(id);
+ if (forwardIt == _records.end()) {
+ _it = _records.rend();
+ return {};
}
- if (_isCapped) {
- invariant(_cappedMaxSize > 0);
- invariant(_cappedMaxDocs == -1 || _cappedMaxDocs > 0);
- }
- else {
- invariant(_cappedMaxSize == -1);
- invariant(_cappedMaxDocs == -1);
- }
+ // The reverse_iterator will point to the preceding element, so increment the base
+ // iterator to make it point past the found element.
+ ++forwardIt;
+ _it = Records::const_reverse_iterator(forwardIt);
+ dassert(_it != _records.rend());
+ dassert(_it->first == id);
+ return {{_it->first, _it->second.toRecordData()}};
}
- const char* InMemoryRecordStore::name() const { return "InMemory"; }
-
- RecordData InMemoryRecordStore::dataFor( OperationContext* txn, const RecordId& loc ) const {
- return recordFor(loc)->toRecordData();
+ void savePositioned() final {
+ _txn = nullptr;
+ if (!_needFirstSeek && !_lastMoveWasRestore)
+ _savedId = _it == _records.rend() ? RecordId() : _it->first;
}
- const InMemoryRecordStore::InMemoryRecord* InMemoryRecordStore::recordFor(
- const RecordId& loc) const {
- Records::const_iterator it = _data->records.find(loc);
- if ( it == _data->records.end() ) {
- error() << "InMemoryRecordStore::recordFor cannot find record for " << ns()
- << ":" << loc;
- }
- invariant(it != _data->records.end());
- return &it->second;
+ void saveUnpositioned() final {
+ _txn = nullptr;
+ _savedId = RecordId();
}
- InMemoryRecordStore::InMemoryRecord* InMemoryRecordStore::recordFor(const RecordId& loc) {
- Records::iterator it = _data->records.find(loc);
- if ( it == _data->records.end() ) {
- error() << "InMemoryRecordStore::recordFor cannot find record for " << ns()
- << ":" << loc;
+ bool restore(OperationContext* txn) final {
+ _txn = txn;
+ if (_savedId.isNull()) {
+ _it = _records.rend();
+ return true;
}
- invariant(it != _data->records.end());
- return &it->second;
- }
- bool InMemoryRecordStore::findRecord( OperationContext* txn,
- const RecordId& loc, RecordData* rd ) const {
- Records::const_iterator it = _data->records.find(loc);
- if ( it == _data->records.end() ) {
- return false;
- }
- *rd = it->second.toRecordData();
- return true;
+ // Note: upper_bound returns the first entry > _savedId and reverse_iterators
+ // dereference to the element before their base iterator. This combine to make this
+ // dereference to the first element <= _savedId which is what we want here.
+ _it = Records::const_reverse_iterator(_records.upper_bound(_savedId));
+ _lastMoveWasRestore = _it == _records.rend() || _it->first != _savedId;
+
+ // Capped iterators die on invalidation rather than advancing.
+ return !(_isCapped && _lastMoveWasRestore);
}
- void InMemoryRecordStore::deleteRecord(OperationContext* txn, const RecordId& loc) {
- InMemoryRecord* rec = recordFor(loc);
- txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *rec));
- _data->dataSize -= rec->size;
- invariant(_data->records.erase(loc) == 1);
+private:
+ unowned_ptr<OperationContext> _txn;
+ Records::const_reverse_iterator _it;
+ bool _needFirstSeek = true;
+ bool _lastMoveWasRestore = false;
+ RecordId _savedId; // Location to restore() to. Null means EOF.
+ const InMemoryRecordStore::Records& _records;
+ const bool _isCapped;
+};
+
+
+//
+// RecordStore
+//
+
+InMemoryRecordStore::InMemoryRecordStore(StringData ns,
+ std::shared_ptr<void>* dataInOut,
+ bool isCapped,
+ int64_t cappedMaxSize,
+ int64_t cappedMaxDocs,
+ CappedDocumentDeleteCallback* cappedDeleteCallback)
+ : RecordStore(ns),
+ _isCapped(isCapped),
+ _cappedMaxSize(cappedMaxSize),
+ _cappedMaxDocs(cappedMaxDocs),
+ _cappedDeleteCallback(cappedDeleteCallback),
+ _data(*dataInOut ? static_cast<Data*>(dataInOut->get())
+ : new Data(NamespaceString::oplog(ns))) {
+ if (!*dataInOut) {
+ dataInOut->reset(_data); // takes ownership
}
- bool InMemoryRecordStore::cappedAndNeedDelete(OperationContext* txn) const {
- if (!_isCapped)
- return false;
+ if (_isCapped) {
+ invariant(_cappedMaxSize > 0);
+ invariant(_cappedMaxDocs == -1 || _cappedMaxDocs > 0);
+ } else {
+ invariant(_cappedMaxSize == -1);
+ invariant(_cappedMaxDocs == -1);
+ }
+}
- if (_data->dataSize > _cappedMaxSize)
- return true;
+const char* InMemoryRecordStore::name() const {
+ return "InMemory";
+}
- if ((_cappedMaxDocs != -1) && (numRecords(txn) > _cappedMaxDocs))
- return true;
+RecordData InMemoryRecordStore::dataFor(OperationContext* txn, const RecordId& loc) const {
+ return recordFor(loc)->toRecordData();
+}
+const InMemoryRecordStore::InMemoryRecord* InMemoryRecordStore::recordFor(
+ const RecordId& loc) const {
+ Records::const_iterator it = _data->records.find(loc);
+ if (it == _data->records.end()) {
+ error() << "InMemoryRecordStore::recordFor cannot find record for " << ns() << ":" << loc;
+ }
+ invariant(it != _data->records.end());
+ return &it->second;
+}
+
+InMemoryRecordStore::InMemoryRecord* InMemoryRecordStore::recordFor(const RecordId& loc) {
+ Records::iterator it = _data->records.find(loc);
+ if (it == _data->records.end()) {
+ error() << "InMemoryRecordStore::recordFor cannot find record for " << ns() << ":" << loc;
+ }
+ invariant(it != _data->records.end());
+ return &it->second;
+}
+
+bool InMemoryRecordStore::findRecord(OperationContext* txn,
+ const RecordId& loc,
+ RecordData* rd) const {
+ Records::const_iterator it = _data->records.find(loc);
+ if (it == _data->records.end()) {
return false;
}
+ *rd = it->second.toRecordData();
+ return true;
+}
+
+void InMemoryRecordStore::deleteRecord(OperationContext* txn, const RecordId& loc) {
+ InMemoryRecord* rec = recordFor(loc);
+ txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *rec));
+ _data->dataSize -= rec->size;
+ invariant(_data->records.erase(loc) == 1);
+}
+
+bool InMemoryRecordStore::cappedAndNeedDelete(OperationContext* txn) const {
+ if (!_isCapped)
+ return false;
- void InMemoryRecordStore::cappedDeleteAsNeeded(OperationContext* txn) {
- while (cappedAndNeedDelete(txn)) {
- invariant(!_data->records.empty());
+ if (_data->dataSize > _cappedMaxSize)
+ return true;
- Records::iterator oldest = _data->records.begin();
- RecordId id = oldest->first;
- RecordData data = oldest->second.toRecordData();
+ if ((_cappedMaxDocs != -1) && (numRecords(txn) > _cappedMaxDocs))
+ return true;
- if (_cappedDeleteCallback)
- uassertStatusOK(_cappedDeleteCallback->aboutToDeleteCapped(txn, id, data));
+ return false;
+}
- deleteRecord(txn, id);
- }
- }
+void InMemoryRecordStore::cappedDeleteAsNeeded(OperationContext* txn) {
+ while (cappedAndNeedDelete(txn)) {
+ invariant(!_data->records.empty());
- StatusWith<RecordId> InMemoryRecordStore::extractAndCheckLocForOplog(const char* data,
- int len) const {
- StatusWith<RecordId> status = oploghack::extractKey(data, len);
- if (!status.isOK())
- return status;
+ Records::iterator oldest = _data->records.begin();
+ RecordId id = oldest->first;
+ RecordData data = oldest->second.toRecordData();
- if (!_data->records.empty() && status.getValue() <= _data->records.rbegin()->first)
- return StatusWith<RecordId>(ErrorCodes::BadValue, "ts not higher than highest");
+ if (_cappedDeleteCallback)
+ uassertStatusOK(_cappedDeleteCallback->aboutToDeleteCapped(txn, id, data));
- return status;
+ deleteRecord(txn, id);
}
+}
- StatusWith<RecordId> InMemoryRecordStore::insertRecord(OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota) {
- if (_isCapped && len > _cappedMaxSize) {
- // We use dataSize for capped rollover and we don't want to delete everything if we know
- // this won't fit.
- return StatusWith<RecordId>(ErrorCodes::BadValue,
- "object to insert exceeds cappedMaxSize");
- }
-
- InMemoryRecord rec(len);
- memcpy(rec.data.get(), data, len);
-
- RecordId loc;
- if (_data->isOplog) {
- StatusWith<RecordId> status = extractAndCheckLocForOplog(data, len);
- if (!status.isOK())
- return status;
- loc = status.getValue();
- }
- else {
- loc = allocateLoc();
- }
+StatusWith<RecordId> InMemoryRecordStore::extractAndCheckLocForOplog(const char* data,
+ int len) const {
+ StatusWith<RecordId> status = oploghack::extractKey(data, len);
+ if (!status.isOK())
+ return status;
- txn->recoveryUnit()->registerChange(new InsertChange(_data, loc));
- _data->dataSize += len;
- _data->records[loc] = rec;
+ if (!_data->records.empty() && status.getValue() <= _data->records.rbegin()->first)
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts not higher than highest");
- cappedDeleteAsNeeded(txn);
+ return status;
+}
- return StatusWith<RecordId>(loc);
+StatusWith<RecordId> InMemoryRecordStore::insertRecord(OperationContext* txn,
+ const char* data,
+ int len,
+ bool enforceQuota) {
+ if (_isCapped && len > _cappedMaxSize) {
+ // We use dataSize for capped rollover and we don't want to delete everything if we know
+ // this won't fit.
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize");
}
- StatusWith<RecordId> InMemoryRecordStore::insertRecord(OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota) {
- const int len = doc->documentSize();
- if (_isCapped && len > _cappedMaxSize) {
- // We use dataSize for capped rollover and we don't want to delete everything if we know
- // this won't fit.
- return StatusWith<RecordId>(ErrorCodes::BadValue,
- "object to insert exceeds cappedMaxSize");
- }
-
- InMemoryRecord rec(len);
- doc->writeDocument(rec.data.get());
-
- RecordId loc;
- if (_data->isOplog) {
- StatusWith<RecordId> status = extractAndCheckLocForOplog(rec.data.get(), len);
- if (!status.isOK())
- return status;
- loc = status.getValue();
- }
- else {
- loc = allocateLoc();
- }
+ InMemoryRecord rec(len);
+ memcpy(rec.data.get(), data, len);
- txn->recoveryUnit()->registerChange(new InsertChange(_data, loc));
- _data->dataSize += len;
- _data->records[loc] = rec;
-
- cappedDeleteAsNeeded(txn);
-
- return StatusWith<RecordId>(loc);
+ RecordId loc;
+ if (_data->isOplog) {
+ StatusWith<RecordId> status = extractAndCheckLocForOplog(data, len);
+ if (!status.isOK())
+ return status;
+ loc = status.getValue();
+ } else {
+ loc = allocateLoc();
}
- StatusWith<RecordId> InMemoryRecordStore::updateRecord(OperationContext* txn,
- const RecordId& loc,
- const char* data,
- int len,
- bool enforceQuota,
- UpdateNotifier* notifier ) {
- InMemoryRecord* oldRecord = recordFor( loc );
- int oldLen = oldRecord->size;
-
- if (_isCapped && len > oldLen) {
- return StatusWith<RecordId>( ErrorCodes::InternalError,
- "failing update: objects in a capped ns cannot grow",
- 10003 );
- }
-
- if (notifier) {
- // The in-memory KV engine uses the invalidation framework (does not support
- // doc-locking), and therefore must notify that it is updating a document.
- Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(txn, loc);
- if (!callbackStatus.isOK()) {
- return StatusWith<RecordId>(callbackStatus);
- }
- }
+ txn->recoveryUnit()->registerChange(new InsertChange(_data, loc));
+ _data->dataSize += len;
+ _data->records[loc] = rec;
- InMemoryRecord newRecord(len);
- memcpy(newRecord.data.get(), data, len);
+ cappedDeleteAsNeeded(txn);
- txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
- _data->dataSize += len - oldLen;
- *oldRecord = newRecord;
+ return StatusWith<RecordId>(loc);
+}
- cappedDeleteAsNeeded(txn);
-
- return StatusWith<RecordId>(loc);
+StatusWith<RecordId> InMemoryRecordStore::insertRecord(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota) {
+ const int len = doc->documentSize();
+ if (_isCapped && len > _cappedMaxSize) {
+ // We use dataSize for capped rollover and we don't want to delete everything if we know
+ // this won't fit.
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize");
}
- bool InMemoryRecordStore::updateWithDamagesSupported() const {
- // TODO: Currently the UpdateStage assumes that updateWithDamages will apply the
- // damages directly to the unowned BSONObj containing the record to be modified.
- // The implementation of updateWithDamages() below copies the old record to a
- // a new one and then applies the damages.
- //
- // We should be able to enable updateWithDamages() here once this assumption is
- // relaxed.
- return false;
- }
+ InMemoryRecord rec(len);
+ doc->writeDocument(rec.data.get());
- Status InMemoryRecordStore::updateWithDamages( OperationContext* txn,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages ) {
- InMemoryRecord* oldRecord = recordFor( loc );
- const int len = oldRecord->size;
-
- InMemoryRecord newRecord(len);
- memcpy(newRecord.data.get(), oldRecord->data.get(), len);
-
- txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
- *oldRecord = newRecord;
-
- cappedDeleteAsNeeded(txn);
-
- char* root = newRecord.data.get();
- mutablebson::DamageVector::const_iterator where = damages.begin();
- const mutablebson::DamageVector::const_iterator end = damages.end();
- for( ; where != end; ++where ) {
- const char* sourcePtr = damageSource + where->sourceOffset;
- char* targetPtr = root + where->targetOffset;
- std::memcpy(targetPtr, sourcePtr, where->size);
- }
+ RecordId loc;
+ if (_data->isOplog) {
+ StatusWith<RecordId> status = extractAndCheckLocForOplog(rec.data.get(), len);
+ if (!status.isOK())
+ return status;
+ loc = status.getValue();
+ } else {
+ loc = allocateLoc();
+ }
- *oldRecord = newRecord;
+ txn->recoveryUnit()->registerChange(new InsertChange(_data, loc));
+ _data->dataSize += len;
+ _data->records[loc] = rec;
- return Status::OK();
- }
+ cappedDeleteAsNeeded(txn);
- std::unique_ptr<RecordCursor> InMemoryRecordStore::getCursor(OperationContext* txn,
- bool forward) const {
+ return StatusWith<RecordId>(loc);
+}
- if (forward) return stdx::make_unique<Cursor>(txn, *this);
- return stdx::make_unique<ReverseCursor>(txn, *this);
- }
+StatusWith<RecordId> InMemoryRecordStore::updateRecord(OperationContext* txn,
+ const RecordId& loc,
+ const char* data,
+ int len,
+ bool enforceQuota,
+ UpdateNotifier* notifier) {
+ InMemoryRecord* oldRecord = recordFor(loc);
+ int oldLen = oldRecord->size;
- Status InMemoryRecordStore::truncate(OperationContext* txn) {
- // Unlike other changes, TruncateChange mutates _data on construction to perform the
- // truncate
- txn->recoveryUnit()->registerChange(new TruncateChange(_data));
- return Status::OK();
+ if (_isCapped && len > oldLen) {
+ return StatusWith<RecordId>(
+ ErrorCodes::InternalError, "failing update: objects in a capped ns cannot grow", 10003);
}
- void InMemoryRecordStore::temp_cappedTruncateAfter(OperationContext* txn,
- RecordId end,
- bool inclusive) {
- Records::iterator it = inclusive ? _data->records.lower_bound(end)
- : _data->records.upper_bound(end);
- while(it != _data->records.end()) {
- txn->recoveryUnit()->registerChange(new RemoveChange(_data, it->first, it->second));
- _data->dataSize -= it->second.size;
- _data->records.erase(it++);
+ if (notifier) {
+ // The in-memory KV engine uses the invalidation framework (does not support
+ // doc-locking), and therefore must notify that it is updating a document.
+ Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(txn, loc);
+ if (!callbackStatus.isOK()) {
+ return StatusWith<RecordId>(callbackStatus);
}
}
- Status InMemoryRecordStore::validate(OperationContext* txn,
- bool full,
- bool scanData,
- ValidateAdaptor* adaptor,
- ValidateResults* results,
- BSONObjBuilder* output) {
- results->valid = true;
- if (scanData && full) {
- for (Records::const_iterator it = _data->records.begin();
- it != _data->records.end(); ++it) {
- const InMemoryRecord& rec = it->second;
- size_t dataSize;
- const Status status = adaptor->validate(rec.toRecordData(), &dataSize);
- if (!status.isOK()) {
- results->valid = false;
- results->errors.push_back("invalid object detected (see logs)");
- log() << "Invalid object detected in " << _ns << ": " << status.reason();
- }
- }
- }
+ InMemoryRecord newRecord(len);
+ memcpy(newRecord.data.get(), data, len);
- output->appendNumber( "nrecords", _data->records.size() );
+ txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
+ _data->dataSize += len - oldLen;
+ *oldRecord = newRecord;
- return Status::OK();
+ cappedDeleteAsNeeded(txn);
- }
+ return StatusWith<RecordId>(loc);
+}
- void InMemoryRecordStore::appendCustomStats( OperationContext* txn,
- BSONObjBuilder* result,
- double scale ) const {
- result->appendBool( "capped", _isCapped );
- if ( _isCapped ) {
- result->appendIntOrLL( "max", _cappedMaxDocs );
- result->appendIntOrLL( "maxSize", _cappedMaxSize / scale );
- }
+bool InMemoryRecordStore::updateWithDamagesSupported() const {
+ // TODO: Currently the UpdateStage assumes that updateWithDamages will apply the
+ // damages directly to the unowned BSONObj containing the record to be modified.
+ // The implementation of updateWithDamages() below copies the old record to a
+ // a new one and then applies the damages.
+ //
+ // We should be able to enable updateWithDamages() here once this assumption is
+ // relaxed.
+ return false;
+}
+
+Status InMemoryRecordStore::updateWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const RecordData& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages) {
+ InMemoryRecord* oldRecord = recordFor(loc);
+ const int len = oldRecord->size;
+
+ InMemoryRecord newRecord(len);
+ memcpy(newRecord.data.get(), oldRecord->data.get(), len);
+
+ txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
+ *oldRecord = newRecord;
+
+ cappedDeleteAsNeeded(txn);
+
+ char* root = newRecord.data.get();
+ mutablebson::DamageVector::const_iterator where = damages.begin();
+ const mutablebson::DamageVector::const_iterator end = damages.end();
+ for (; where != end; ++where) {
+ const char* sourcePtr = damageSource + where->sourceOffset;
+ char* targetPtr = root + where->targetOffset;
+ std::memcpy(targetPtr, sourcePtr, where->size);
}
- Status InMemoryRecordStore::touch(OperationContext* txn, BSONObjBuilder* output) const {
- if (output) {
- output->append("numRanges", 1);
- output->append("millis", 0);
+ *oldRecord = newRecord;
+
+ return Status::OK();
+}
+
+std::unique_ptr<RecordCursor> InMemoryRecordStore::getCursor(OperationContext* txn,
+ bool forward) const {
+ if (forward)
+ return stdx::make_unique<Cursor>(txn, *this);
+ return stdx::make_unique<ReverseCursor>(txn, *this);
+}
+
+Status InMemoryRecordStore::truncate(OperationContext* txn) {
+ // Unlike other changes, TruncateChange mutates _data on construction to perform the
+ // truncate
+ txn->recoveryUnit()->registerChange(new TruncateChange(_data));
+ return Status::OK();
+}
+
+void InMemoryRecordStore::temp_cappedTruncateAfter(OperationContext* txn,
+ RecordId end,
+ bool inclusive) {
+ Records::iterator it =
+ inclusive ? _data->records.lower_bound(end) : _data->records.upper_bound(end);
+ while (it != _data->records.end()) {
+ txn->recoveryUnit()->registerChange(new RemoveChange(_data, it->first, it->second));
+ _data->dataSize -= it->second.size;
+ _data->records.erase(it++);
+ }
+}
+
+Status InMemoryRecordStore::validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateAdaptor* adaptor,
+ ValidateResults* results,
+ BSONObjBuilder* output) {
+ results->valid = true;
+ if (scanData && full) {
+ for (Records::const_iterator it = _data->records.begin(); it != _data->records.end();
+ ++it) {
+ const InMemoryRecord& rec = it->second;
+ size_t dataSize;
+ const Status status = adaptor->validate(rec.toRecordData(), &dataSize);
+ if (!status.isOK()) {
+ results->valid = false;
+ results->errors.push_back("invalid object detected (see logs)");
+ log() << "Invalid object detected in " << _ns << ": " << status.reason();
+ }
}
- return Status::OK();
}
- void InMemoryRecordStore::increaseStorageSize(OperationContext* txn,
- int size, bool enforceQuota) {
- // unclear what this would mean for this class. For now, just error if called.
- invariant(!"increaseStorageSize not yet implemented");
- }
+ output->appendNumber("nrecords", _data->records.size());
- int64_t InMemoryRecordStore::storageSize(OperationContext* txn,
- BSONObjBuilder* extraInfo,
- int infoLevel) const {
- // Note: not making use of extraInfo or infoLevel since we don't have extents
- const int64_t recordOverhead = numRecords(txn) * sizeof(InMemoryRecord);
- return _data->dataSize + recordOverhead;
- }
+ return Status::OK();
+}
- RecordId InMemoryRecordStore::allocateLoc() {
- RecordId out = RecordId(_data->nextId++);
- invariant(out < RecordId::max());
- return out;
+void InMemoryRecordStore::appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale) const {
+ result->appendBool("capped", _isCapped);
+ if (_isCapped) {
+ result->appendIntOrLL("max", _cappedMaxDocs);
+ result->appendIntOrLL("maxSize", _cappedMaxSize / scale);
}
+}
- boost::optional<RecordId> InMemoryRecordStore::oplogStartHack(
- OperationContext* txn,
- const RecordId& startingPosition) const {
-
- if (!_data->isOplog)
- return boost::none;
-
- const Records& records = _data->records;
-
- if (records.empty())
- return RecordId();
-
- Records::const_iterator it = records.lower_bound(startingPosition);
- if (it == records.end() || it->first > startingPosition)
- --it;
-
- return it->first;
+Status InMemoryRecordStore::touch(OperationContext* txn, BSONObjBuilder* output) const {
+ if (output) {
+ output->append("numRanges", 1);
+ output->append("millis", 0);
}
-
-} // namespace mongo
+ return Status::OK();
+}
+
+void InMemoryRecordStore::increaseStorageSize(OperationContext* txn, int size, bool enforceQuota) {
+ // unclear what this would mean for this class. For now, just error if called.
+ invariant(!"increaseStorageSize not yet implemented");
+}
+
+int64_t InMemoryRecordStore::storageSize(OperationContext* txn,
+ BSONObjBuilder* extraInfo,
+ int infoLevel) const {
+ // Note: not making use of extraInfo or infoLevel since we don't have extents
+ const int64_t recordOverhead = numRecords(txn) * sizeof(InMemoryRecord);
+ return _data->dataSize + recordOverhead;
+}
+
+RecordId InMemoryRecordStore::allocateLoc() {
+ RecordId out = RecordId(_data->nextId++);
+ invariant(out < RecordId::max());
+ return out;
+}
+
+boost::optional<RecordId> InMemoryRecordStore::oplogStartHack(
+ OperationContext* txn, const RecordId& startingPosition) const {
+ if (!_data->isOplog)
+ return boost::none;
+
+ const Records& records = _data->records;
+
+ if (records.empty())
+ return RecordId();
+
+ Records::const_iterator it = records.lower_bound(startingPosition);
+ if (it == records.end() || it->first > startingPosition)
+ --it;
+
+ return it->first;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/in_memory/in_memory_record_store.h b/src/mongo/db/storage/in_memory/in_memory_record_store.h
index 53df7883758..bd241555394 100644
--- a/src/mongo/db/storage/in_memory/in_memory_record_store.h
+++ b/src/mongo/db/storage/in_memory/in_memory_record_store.h
@@ -38,151 +38,164 @@
namespace mongo {
- /**
- * A RecordStore that stores all data in-memory.
- *
- * @param cappedMaxSize - required if isCapped. limit uses dataSize() in this impl.
- */
- class InMemoryRecordStore : public RecordStore {
- public:
- explicit InMemoryRecordStore(StringData ns,
- std::shared_ptr<void>* dataInOut,
- bool isCapped = false,
- int64_t cappedMaxSize = -1,
- int64_t cappedMaxDocs = -1,
- CappedDocumentDeleteCallback* cappedDeleteCallback = NULL);
-
- virtual const char* name() const;
-
- virtual RecordData dataFor( OperationContext* txn, const RecordId& loc ) const;
-
- virtual bool findRecord( OperationContext* txn, const RecordId& loc, RecordData* rd ) const;
-
- virtual void deleteRecord( OperationContext* txn, const RecordId& dl );
-
- virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota );
+/**
+ * A RecordStore that stores all data in-memory.
+ *
+ * @param cappedMaxSize - required if isCapped. limit uses dataSize() in this impl.
+ */
+class InMemoryRecordStore : public RecordStore {
+public:
+ explicit InMemoryRecordStore(StringData ns,
+ std::shared_ptr<void>* dataInOut,
+ bool isCapped = false,
+ int64_t cappedMaxSize = -1,
+ int64_t cappedMaxDocs = -1,
+ CappedDocumentDeleteCallback* cappedDeleteCallback = NULL);
- virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota );
+ virtual const char* name() const;
- virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
- const RecordId& oldLocation,
- const char* data,
- int len,
- bool enforceQuota,
- UpdateNotifier* notifier );
+ virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const;
- virtual bool updateWithDamagesSupported() const;
+ virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* rd) const;
- virtual Status updateWithDamages( OperationContext* txn,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages );
+ virtual void deleteRecord(OperationContext* txn, const RecordId& dl);
- std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward) const final;
+ virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ const char* data,
+ int len,
+ bool enforceQuota);
- virtual Status truncate( OperationContext* txn );
+ virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota);
- virtual void temp_cappedTruncateAfter( OperationContext* txn, RecordId end, bool inclusive );
+ virtual StatusWith<RecordId> updateRecord(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* data,
+ int len,
+ bool enforceQuota,
+ UpdateNotifier* notifier);
- virtual Status validate( OperationContext* txn,
- bool full,
- bool scanData,
- ValidateAdaptor* adaptor,
- ValidateResults* results, BSONObjBuilder* output );
+ virtual bool updateWithDamagesSupported() const;
- virtual void appendCustomStats( OperationContext* txn,
- BSONObjBuilder* result,
- double scale ) const;
+ virtual Status updateWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const RecordData& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages);
- virtual Status touch( OperationContext* txn, BSONObjBuilder* output ) const;
+ std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward) const final;
- virtual void increaseStorageSize( OperationContext* txn, int size, bool enforceQuota );
+ virtual Status truncate(OperationContext* txn);
- virtual int64_t storageSize( OperationContext* txn,
- BSONObjBuilder* extraInfo = NULL,
- int infoLevel = 0) const;
+ virtual void temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive);
- virtual long long dataSize( OperationContext* txn ) const { return _data->dataSize; }
+ virtual Status validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateAdaptor* adaptor,
+ ValidateResults* results,
+ BSONObjBuilder* output);
- virtual long long numRecords( OperationContext* txn ) const {
- return _data->records.size();
- }
+ virtual void appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale) const;
- virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
- const RecordId& startingPosition) const;
+ virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
- virtual void updateStatsAfterRepair(OperationContext* txn,
- long long numRecords,
- long long dataSize) {
- invariant(_data->records.size() == size_t(numRecords));
- _data->dataSize = dataSize;
- }
+ virtual void increaseStorageSize(OperationContext* txn, int size, bool enforceQuota);
- protected:
- struct InMemoryRecord {
- InMemoryRecord() :size(0) {}
- InMemoryRecord(int size) :size(size), data(new char[size]) {}
+ virtual int64_t storageSize(OperationContext* txn,
+ BSONObjBuilder* extraInfo = NULL,
+ int infoLevel = 0) const;
- RecordData toRecordData() const { return RecordData(data.get(), size); }
+ virtual long long dataSize(OperationContext* txn) const {
+ return _data->dataSize;
+ }
- int size;
- boost::shared_array<char> data;
- };
+ virtual long long numRecords(OperationContext* txn) const {
+ return _data->records.size();
+ }
- virtual const InMemoryRecord* recordFor( const RecordId& loc ) const;
- virtual InMemoryRecord* recordFor( const RecordId& loc );
+ virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const;
- public:
- //
- // Not in RecordStore interface
- //
+ virtual void updateStatsAfterRepair(OperationContext* txn,
+ long long numRecords,
+ long long dataSize) {
+ invariant(_data->records.size() == size_t(numRecords));
+ _data->dataSize = dataSize;
+ }
- typedef std::map<RecordId, InMemoryRecord> Records;
+protected:
+ struct InMemoryRecord {
+ InMemoryRecord() : size(0) {}
+ InMemoryRecord(int size) : size(size), data(new char[size]) {}
- bool isCapped() const { return _isCapped; }
- void setCappedDeleteCallback(CappedDocumentDeleteCallback* cb) {
- _cappedDeleteCallback = cb;
+ RecordData toRecordData() const {
+ return RecordData(data.get(), size);
}
- bool cappedMaxDocs() const { invariant(_isCapped); return _cappedMaxDocs; }
- bool cappedMaxSize() const { invariant(_isCapped); return _cappedMaxSize; }
- private:
- class InsertChange;
- class RemoveChange;
- class TruncateChange;
-
- class Cursor;
- class ReverseCursor;
-
- StatusWith<RecordId> extractAndCheckLocForOplog(const char* data, int len) const;
-
- RecordId allocateLoc();
- bool cappedAndNeedDelete(OperationContext* txn) const;
- void cappedDeleteAsNeeded(OperationContext* txn);
-
- // TODO figure out a proper solution to metadata
- const bool _isCapped;
- const int64_t _cappedMaxSize;
- const int64_t _cappedMaxDocs;
- CappedDocumentDeleteCallback* _cappedDeleteCallback;
-
- // This is the "persistent" data.
- struct Data {
- Data(bool isOplog) :dataSize(0), nextId(1), isOplog(isOplog) {}
-
- int64_t dataSize;
- Records records;
- int64_t nextId;
- const bool isOplog;
- };
+ int size;
+ boost::shared_array<char> data;
+ };
- Data* const _data;
+ virtual const InMemoryRecord* recordFor(const RecordId& loc) const;
+ virtual InMemoryRecord* recordFor(const RecordId& loc);
+
+public:
+ //
+ // Not in RecordStore interface
+ //
+
+ typedef std::map<RecordId, InMemoryRecord> Records;
+
+ bool isCapped() const {
+ return _isCapped;
+ }
+ void setCappedDeleteCallback(CappedDocumentDeleteCallback* cb) {
+ _cappedDeleteCallback = cb;
+ }
+ bool cappedMaxDocs() const {
+ invariant(_isCapped);
+ return _cappedMaxDocs;
+ }
+ bool cappedMaxSize() const {
+ invariant(_isCapped);
+ return _cappedMaxSize;
+ }
+
+private:
+ class InsertChange;
+ class RemoveChange;
+ class TruncateChange;
+
+ class Cursor;
+ class ReverseCursor;
+
+ StatusWith<RecordId> extractAndCheckLocForOplog(const char* data, int len) const;
+
+ RecordId allocateLoc();
+ bool cappedAndNeedDelete(OperationContext* txn) const;
+ void cappedDeleteAsNeeded(OperationContext* txn);
+
+ // TODO figure out a proper solution to metadata
+ const bool _isCapped;
+ const int64_t _cappedMaxSize;
+ const int64_t _cappedMaxDocs;
+ CappedDocumentDeleteCallback* _cappedDeleteCallback;
+
+ // This is the "persistent" data.
+ struct Data {
+ Data(bool isOplog) : dataSize(0), nextId(1), isOplog(isOplog) {}
+
+ int64_t dataSize;
+ Records records;
+ int64_t nextId;
+ const bool isOplog;
};
-} // namespace mongo
+ Data* const _data;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/in_memory/in_memory_record_store_test.cpp b/src/mongo/db/storage/in_memory/in_memory_record_store_test.cpp
index 42138116da9..aedbb4484db 100644
--- a/src/mongo/db/storage/in_memory/in_memory_record_store_test.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_record_store_test.cpp
@@ -37,24 +37,22 @@
namespace mongo {
- class InMemoryHarnessHelper : public HarnessHelper {
- public:
- InMemoryHarnessHelper() {
- }
+class InMemoryHarnessHelper : public HarnessHelper {
+public:
+ InMemoryHarnessHelper() {}
- virtual RecordStore* newNonCappedRecordStore() {
- return new InMemoryRecordStore( "a.b", &data );
- }
-
- virtual RecoveryUnit* newRecoveryUnit() {
- return new InMemoryRecoveryUnit();
- }
-
- std::shared_ptr<void> data;
- };
+ virtual RecordStore* newNonCappedRecordStore() {
+ return new InMemoryRecordStore("a.b", &data);
+ }
- HarnessHelper* newHarnessHelper() {
- return new InMemoryHarnessHelper();
+ virtual RecoveryUnit* newRecoveryUnit() {
+ return new InMemoryRecoveryUnit();
}
+ std::shared_ptr<void> data;
+};
+
+HarnessHelper* newHarnessHelper() {
+ return new InMemoryHarnessHelper();
+}
}
diff --git a/src/mongo/db/storage/in_memory/in_memory_recovery_unit.cpp b/src/mongo/db/storage/in_memory/in_memory_recovery_unit.cpp
index 7ccf4574d47..80999a59305 100644
--- a/src/mongo/db/storage/in_memory/in_memory_recovery_unit.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_recovery_unit.cpp
@@ -37,30 +37,28 @@
namespace mongo {
- void InMemoryRecoveryUnit::commitUnitOfWork() {
- try {
- for (Changes::iterator it = _changes.begin(), end = _changes.end(); it != end; ++it) {
- (*it)->commit();
- }
- _changes.clear();
- }
- catch (...) {
- std::terminate();
+void InMemoryRecoveryUnit::commitUnitOfWork() {
+ try {
+ for (Changes::iterator it = _changes.begin(), end = _changes.end(); it != end; ++it) {
+ (*it)->commit();
}
+ _changes.clear();
+ } catch (...) {
+ std::terminate();
}
+}
- void InMemoryRecoveryUnit::abortUnitOfWork() {
- try {
- for (Changes::reverse_iterator it = _changes.rbegin(), end = _changes.rend();
- it != end; ++it) {
- ChangePtr change = *it;
- LOG(2) << "CUSTOM ROLLBACK " << demangleName(typeid(*change));
- change->rollback();
- }
- _changes.clear();
- }
- catch (...) {
- std::terminate();
+void InMemoryRecoveryUnit::abortUnitOfWork() {
+ try {
+ for (Changes::reverse_iterator it = _changes.rbegin(), end = _changes.rend(); it != end;
+ ++it) {
+ ChangePtr change = *it;
+ LOG(2) << "CUSTOM ROLLBACK " << demangleName(typeid(*change));
+ change->rollback();
}
+ _changes.clear();
+ } catch (...) {
+ std::terminate();
}
}
+}
diff --git a/src/mongo/db/storage/in_memory/in_memory_recovery_unit.h b/src/mongo/db/storage/in_memory/in_memory_recovery_unit.h
index 2ef0552d58e..895e364ef04 100644
--- a/src/mongo/db/storage/in_memory/in_memory_recovery_unit.h
+++ b/src/mongo/db/storage/in_memory/in_memory_recovery_unit.h
@@ -37,37 +37,39 @@
namespace mongo {
- class SortedDataInterface;
+class SortedDataInterface;
- class InMemoryRecoveryUnit : public RecoveryUnit {
- public:
- void beginUnitOfWork(OperationContext* opCtx) final { };
- void commitUnitOfWork() final;
- void abortUnitOfWork() final;
+class InMemoryRecoveryUnit : public RecoveryUnit {
+public:
+ void beginUnitOfWork(OperationContext* opCtx) final{};
+ void commitUnitOfWork() final;
+ void abortUnitOfWork() final;
- virtual bool waitUntilDurable() {
- return true;
- }
+ virtual bool waitUntilDurable() {
+ return true;
+ }
- virtual void abandonSnapshot() {}
+ virtual void abandonSnapshot() {}
- virtual void registerChange(Change* change) {
- _changes.push_back(ChangePtr(change));
- }
+ virtual void registerChange(Change* change) {
+ _changes.push_back(ChangePtr(change));
+ }
- virtual void* writingPtr(void* data, size_t len) {
- invariant(!"don't call writingPtr");
- }
+ virtual void* writingPtr(void* data, size_t len) {
+ invariant(!"don't call writingPtr");
+ }
- virtual void setRollbackWritesDisabled() {}
+ virtual void setRollbackWritesDisabled() {}
- virtual SnapshotId getSnapshotId() const { return SnapshotId(); }
+ virtual SnapshotId getSnapshotId() const {
+ return SnapshotId();
+ }
- private:
- typedef std::shared_ptr<Change> ChangePtr;
- typedef std::vector<ChangePtr> Changes;
+private:
+ typedef std::shared_ptr<Change> ChangePtr;
+ typedef std::vector<ChangePtr> Changes;
- Changes _changes;
- };
+ Changes _changes;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/index_entry_comparison.cpp b/src/mongo/db/storage/index_entry_comparison.cpp
index 4a5d4fdab1a..41a1ae709e9 100644
--- a/src/mongo/db/storage/index_entry_comparison.cpp
+++ b/src/mongo/db/storage/index_entry_comparison.cpp
@@ -34,138 +34,134 @@
namespace mongo {
- std::ostream& operator<<(std::ostream& stream, const IndexKeyEntry& entry) {
- return stream << entry.key << '@' << entry.loc;
- }
-
- // Due to the limitations of various APIs, we need to use the same type (IndexKeyEntry)
- // for both the stored data and the "query". We cheat and encode extra information in the
- // first byte of the field names in the query. This works because all stored objects should
- // have all field names empty, so their first bytes are '\0'.
- enum BehaviorIfFieldIsEqual {
- normal = '\0',
- less = 'l',
- greater = 'g',
- };
-
- bool IndexEntryComparison::operator() (const IndexKeyEntry& lhs, const IndexKeyEntry& rhs)
- const {
- // implementing in memcmp style to ease reuse of this code.
- return compare(lhs, rhs) < 0;
- }
-
- // This should behave the same as customBSONCmp from btree_logic.cpp.
- //
- // Reading the comment in the .h file is highly recommended if you need to understand what this
- // function is doing
- int IndexEntryComparison::compare(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) const {
- BSONObjIterator lhsIt(lhs.key);
- BSONObjIterator rhsIt(rhs.key);
-
- // Iterate through both BSONObjects, comparing individual elements one by one
- for (unsigned mask = 1; lhsIt.more(); mask <<= 1) {
- if (!rhsIt.more())
- return _order.descending(mask) ? -1 : 1;
-
- const BSONElement l = lhsIt.next();
- const BSONElement r = rhsIt.next();
-
- if (int cmp = l.woCompare(r, /*compareFieldNames=*/false)) {
- if (cmp == std::numeric_limits<int>::min()) {
- // can't be negated
- cmp = -1;
- }
-
- return _order.descending(mask) ? -cmp : cmp;
- }
-
- // Here is where the weirdness begins. We sometimes want to fudge the comparison
- // when a key == the query to implement exclusive ranges.
- BehaviorIfFieldIsEqual lEqBehavior = BehaviorIfFieldIsEqual(l.fieldName()[0]);
- BehaviorIfFieldIsEqual rEqBehavior = BehaviorIfFieldIsEqual(r.fieldName()[0]);
-
- if (lEqBehavior) {
- // lhs is the query, rhs is the stored data
- invariant(rEqBehavior == normal);
- return lEqBehavior == less ? -1 : 1;
- }
-
- if (rEqBehavior) {
- // rhs is the query, lhs is the stored data, so reverse the returns
- invariant(lEqBehavior == normal);
- return rEqBehavior == less ? 1 : -1;
+std::ostream& operator<<(std::ostream& stream, const IndexKeyEntry& entry) {
+ return stream << entry.key << '@' << entry.loc;
+}
+
+// Due to the limitations of various APIs, we need to use the same type (IndexKeyEntry)
+// for both the stored data and the "query". We cheat and encode extra information in the
+// first byte of the field names in the query. This works because all stored objects should
+// have all field names empty, so their first bytes are '\0'.
+enum BehaviorIfFieldIsEqual {
+ normal = '\0',
+ less = 'l',
+ greater = 'g',
+};
+
+bool IndexEntryComparison::operator()(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) const {
+ // implementing in memcmp style to ease reuse of this code.
+ return compare(lhs, rhs) < 0;
+}
+
+// This should behave the same as customBSONCmp from btree_logic.cpp.
+//
+// Reading the comment in the .h file is highly recommended if you need to understand what this
+// function is doing
+int IndexEntryComparison::compare(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) const {
+ BSONObjIterator lhsIt(lhs.key);
+ BSONObjIterator rhsIt(rhs.key);
+
+ // Iterate through both BSONObjects, comparing individual elements one by one
+ for (unsigned mask = 1; lhsIt.more(); mask <<= 1) {
+ if (!rhsIt.more())
+ return _order.descending(mask) ? -1 : 1;
+
+ const BSONElement l = lhsIt.next();
+ const BSONElement r = rhsIt.next();
+
+ if (int cmp = l.woCompare(r, /*compareFieldNames=*/false)) {
+ if (cmp == std::numeric_limits<int>::min()) {
+ // can't be negated
+ cmp = -1;
}
+ return _order.descending(mask) ? -cmp : cmp;
}
- if(rhsIt.more())
- return -1;
-
- // This means just look at the key, not the loc.
- if (lhs.loc.isNull() || rhs.loc.isNull())
- return 0;
-
- return lhs.loc.compare(rhs.loc); // is supposed to ignore ordering
- }
+ // Here is where the weirdness begins. We sometimes want to fudge the comparison
+ // when a key == the query to implement exclusive ranges.
+ BehaviorIfFieldIsEqual lEqBehavior = BehaviorIfFieldIsEqual(l.fieldName()[0]);
+ BehaviorIfFieldIsEqual rEqBehavior = BehaviorIfFieldIsEqual(r.fieldName()[0]);
- // reading the comment in the .h file is highly recommended if you need to understand what this
- // function is doing
- BSONObj IndexEntryComparison::makeQueryObject(const BSONObj& keyPrefix,
- int prefixLen,
- bool prefixExclusive,
- const std::vector<const BSONElement*>& keySuffix,
- const std::vector<bool>& suffixInclusive,
- const int cursorDirection) {
-
- // Please read the comments in the header file to see why this is done.
- // The basic idea is that we use the field name to store a byte which indicates whether
- // each field in the query object is inclusive and exclusive, and if it is exclusive, in
- // which direction.
- const char exclusiveByte = (cursorDirection == 1 ? greater : less);
-
- const StringData exclusiveFieldName(&exclusiveByte, 1);
-
- BSONObjBuilder bb;
-
- // handle the prefix
- if (prefixLen > 0) {
- BSONObjIterator it(keyPrefix);
- for (int i = 0; i < prefixLen; i++) {
- invariant(it.more());
- const BSONElement e = it.next();
-
- if (prefixExclusive && i == prefixLen - 1) {
- bb.appendAs(e, exclusiveFieldName);
- }
- else {
- bb.appendAs(e, StringData());
- }
- }
+ if (lEqBehavior) {
+ // lhs is the query, rhs is the stored data
+ invariant(rEqBehavior == normal);
+ return lEqBehavior == less ? -1 : 1;
}
- // If the prefix is exclusive then the suffix does not matter as it will never be used
- if (prefixExclusive) {
- invariant(prefixLen > 0);
- return bb.obj();
+ if (rEqBehavior) {
+ // rhs is the query, lhs is the stored data, so reverse the returns
+ invariant(lEqBehavior == normal);
+ return rEqBehavior == less ? 1 : -1;
}
+ }
- // Handle the suffix. Note that the useful parts of the suffix start at index prefixLen
- // rather than at 0.
- invariant(keySuffix.size() == suffixInclusive.size());
- for (size_t i = prefixLen; i < keySuffix.size(); i++) {
- invariant(keySuffix[i]);
- if (suffixInclusive[i]) {
- bb.appendAs(*keySuffix[i], StringData());
+ if (rhsIt.more())
+ return -1;
+
+ // This means just look at the key, not the loc.
+ if (lhs.loc.isNull() || rhs.loc.isNull())
+ return 0;
+
+ return lhs.loc.compare(rhs.loc); // is supposed to ignore ordering
+}
+
+// reading the comment in the .h file is highly recommended if you need to understand what this
+// function is doing
+BSONObj IndexEntryComparison::makeQueryObject(const BSONObj& keyPrefix,
+ int prefixLen,
+ bool prefixExclusive,
+ const std::vector<const BSONElement*>& keySuffix,
+ const std::vector<bool>& suffixInclusive,
+ const int cursorDirection) {
+ // Please read the comments in the header file to see why this is done.
+ // The basic idea is that we use the field name to store a byte which indicates whether
+ // each field in the query object is inclusive and exclusive, and if it is exclusive, in
+ // which direction.
+ const char exclusiveByte = (cursorDirection == 1 ? greater : less);
+
+ const StringData exclusiveFieldName(&exclusiveByte, 1);
+
+ BSONObjBuilder bb;
+
+ // handle the prefix
+ if (prefixLen > 0) {
+ BSONObjIterator it(keyPrefix);
+ for (int i = 0; i < prefixLen; i++) {
+ invariant(it.more());
+ const BSONElement e = it.next();
+
+ if (prefixExclusive && i == prefixLen - 1) {
+ bb.appendAs(e, exclusiveFieldName);
} else {
- bb.appendAs(*keySuffix[i], exclusiveFieldName);
-
- // If an exclusive field exists then no fields after this will matter, since an
- // exclusive field never evaluates as equal
- return bb.obj();
+ bb.appendAs(e, StringData());
}
}
+ }
+ // If the prefix is exclusive then the suffix does not matter as it will never be used
+ if (prefixExclusive) {
+ invariant(prefixLen > 0);
return bb.obj();
}
-} // namespace mongo
+ // Handle the suffix. Note that the useful parts of the suffix start at index prefixLen
+ // rather than at 0.
+ invariant(keySuffix.size() == suffixInclusive.size());
+ for (size_t i = prefixLen; i < keySuffix.size(); i++) {
+ invariant(keySuffix[i]);
+ if (suffixInclusive[i]) {
+ bb.appendAs(*keySuffix[i], StringData());
+ } else {
+ bb.appendAs(*keySuffix[i], exclusiveFieldName);
+
+ // If an exclusive field exists then no fields after this will matter, since an
+ // exclusive field never evaluates as equal
+ return bb.obj();
+ }
+ }
+
+ return bb.obj();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/index_entry_comparison.h b/src/mongo/db/storage/index_entry_comparison.h
index dbdd15e0368..906192bb954 100644
--- a/src/mongo/db/storage/index_entry_comparison.h
+++ b/src/mongo/db/storage/index_entry_comparison.h
@@ -37,170 +37,170 @@
namespace mongo {
+/**
+ * Represents a single item in an index. An index item simply consists of a key
+ * and a disk location.
+ */
+struct IndexKeyEntry {
+ IndexKeyEntry(BSONObj key, RecordId loc) : key(std::move(key)), loc(std::move(loc)) {}
+
+ BSONObj key;
+ RecordId loc;
+};
+
+std::ostream& operator<<(std::ostream& stream, const IndexKeyEntry& entry);
+
+inline bool operator==(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) {
+ return std::tie(lhs.key, lhs.loc) == std::tie(rhs.key, rhs.loc);
+}
+
+inline bool operator!=(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) {
+ return std::tie(lhs.key, lhs.loc) != std::tie(rhs.key, rhs.loc);
+}
+
+/**
+ * Describes a query that can be compared against an IndexKeyEntry in a way that allows
+ * expressing exclusiveness on a prefix of the key. This is mostly used to express a location to
+ * seek to in an index that may not be representable as a valid key.
+ *
+ * The "key" used for comparison is the concatenation of the first 'prefixLen' elements of
+ * 'keyPrefix' followed by the last 'keySuffix.size() - prefixLen' elements of
+ * 'keySuffix'.
+ *
+ * The comparison is exclusive if either 'prefixExclusive' is true or if there are any false
+ * values in 'suffixInclusive' that are false at index >= 'prefixLen'.
+ *
+ * Portions of the key following the first exclusive part may be ignored.
+ *
+ * e.g.
+ *
+ * Suppose that
+ *
+ * keyPrefix = { "" : 1, "" : 2 }
+ * prefixLen = 1
+ * prefixExclusive = false
+ * keySuffix = [ IGNORED, { "" : 5 } ]
+ * suffixInclusive = [ IGNORED, false ]
+ *
+ * ==> key is { "" : 1, "" : 5 }
+ * with the comparison being done exclusively
+ *
+ * Suppose that
+ *
+ * keyPrefix = { "" : 1, "" : 2 }
+ * prefixLen = 1
+ * prefixExclusive = true
+ * keySuffix = IGNORED
+ * suffixInclusive = IGNORED
+ *
+ * ==> represented key is { "" : 1 }
+ * with the comparison being done exclusively
+ *
+ * 'prefixLen = 0' and 'prefixExclusive = true' are mutually incompatible.
+ *
+ * @see IndexEntryComparison::makeQueryObject
+ */
+struct IndexSeekPoint {
+ BSONObj keyPrefix;
+
+ /**
+ * Use this many fields in 'keyPrefix'.
+ */
+ int prefixLen = 0;
+
/**
- * Represents a single item in an index. An index item simply consists of a key
- * and a disk location.
+ * If true, compare exclusively on just the fields on keyPrefix and ignore the suffix.
*/
- struct IndexKeyEntry {
- IndexKeyEntry(BSONObj key, RecordId loc) :key(std::move(key)), loc(std::move(loc)) {}
+ bool prefixExclusive = false;
- BSONObj key;
- RecordId loc;
- };
+ /**
+ * Elements starting at index 'prefixLen' are logically appended to the prefix.
+ * The elements before index 'prefixLen' should be ignored.
+ */
+ std::vector<const BSONElement*> keySuffix;
- std::ostream& operator<<(std::ostream& stream, const IndexKeyEntry& entry);
+ /**
+ * If the ith element is false, ignore indexes > i in keySuffix and treat the
+ * concatenated key as exclusive.
+ * The elements before index 'prefixLen' should be ignored.
+ *
+ * Must have identical size as keySuffix.
+ */
+ std::vector<bool> suffixInclusive;
+};
- inline bool operator==(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) {
- return std::tie(lhs.key, lhs.loc) == std::tie(rhs.key, rhs.loc);
- }
+/**
+ * Compares two different IndexKeyEntry instances.
+ * The existence of compound indexes necessitates some complicated logic. This is meant to
+ * support the comparisons of IndexKeyEntries (that are stored in an index) with IndexSeekPoints
+ * (that were encoded with makeQueryObject) to support fine-grained control over whether the
+ * ranges of various keys comprising a compound index are inclusive or exclusive.
+ */
+class IndexEntryComparison {
+public:
+ IndexEntryComparison(Ordering order) : _order(order) {}
- inline bool operator!=(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) {
- return std::tie(lhs.key, lhs.loc) != std::tie(rhs.key, rhs.loc);
- }
+ bool operator()(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) const;
/**
- * Describes a query that can be compared against an IndexKeyEntry in a way that allows
- * expressing exclusiveness on a prefix of the key. This is mostly used to express a location to
- * seek to in an index that may not be representable as a valid key.
- *
- * The "key" used for comparison is the concatenation of the first 'prefixLen' elements of
- * 'keyPrefix' followed by the last 'keySuffix.size() - prefixLen' elements of
- * 'keySuffix'.
- *
- * The comparison is exclusive if either 'prefixExclusive' is true or if there are any false
- * values in 'suffixInclusive' that are false at index >= 'prefixLen'.
+ * Compares two IndexKeyEntries and returns -1 if lhs < rhs, 1 if lhs > rhs, and 0
+ * otherwise.
*
- * Portions of the key following the first exclusive part may be ignored.
- *
- * e.g.
- *
- * Suppose that
- *
- * keyPrefix = { "" : 1, "" : 2 }
- * prefixLen = 1
- * prefixExclusive = false
- * keySuffix = [ IGNORED, { "" : 5 } ]
- * suffixInclusive = [ IGNORED, false ]
- *
- * ==> key is { "" : 1, "" : 5 }
- * with the comparison being done exclusively
+ * IndexKeyEntries are compared lexicographically field by field in the BSONObj, followed by
+ * the RecordId. Either lhs or rhs (but not both) can be a query object returned by
+ * makeQueryObject(). See makeQueryObject() for a description of how its arguments affect
+ * the outcome of the comparison.
+ */
+ int compare(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) const;
+
+ /**
+ * Encodes the arguments into a query object suitable to pass in to compare().
*
- * Suppose that
+ * A query object is used for seeking an iterator to a position in a sorted index. The
+ * difference between a query object and the keys inserted into indexes is that query
+ * objects can be exclusive. This means that the first matching entry in the index is the
+ * first key in the index after the query. The meaning of "after" depends on
+ * cursorDirection.
*
- * keyPrefix = { "" : 1, "" : 2 }
- * prefixLen = 1
- * prefixExclusive = true
- * keySuffix = IGNORED
- * suffixInclusive = IGNORED
+ * The fields of the key are the combination of keyPrefix and keySuffix. The first prefixLen
+ * keys of keyPrefix are used, as well as the keys starting at the prefixLen index of
+ * keySuffix. The first prefixLen elements of keySuffix are ignored.
*
- * ==> represented key is { "" : 1 }
- * with the comparison being done exclusively
+ * If a field is marked as exclusive, then comparisons stop after that field and return
+ * either higher or lower, even if that field compares equal. If prefixExclusive is true and
+ * prefixLen is greater than 0, then the last field in the prefix is marked as exclusive. It
+ * is illegal to specify prefixExclusive as true with a prefixLen of 0. Each bool in
+ * suffixInclusive, starting at index prefixLen, indicates whether the corresponding element
+ * in keySuffix is inclusive or exclusive.
*
- * 'prefixLen = 0' and 'prefixExclusive = true' are mutually incompatible.
+ * Returned objects are for use in lookups only and should never be inserted into the
+ * database, as their format may change. The only reason this is the same type as the
+ * entries in an index is to support storage engines that require comparators that take
+ * arguments of the same type.
*
- * @see IndexEntryComparison::makeQueryObject
+ * A cursurDirection of 1 indicates a forward cursor, and -1 indicates a reverse cursor.
+ * This effects the result when the exclusive field compares equal.
*/
- struct IndexSeekPoint {
- BSONObj keyPrefix;
-
- /**
- * Use this many fields in 'keyPrefix'.
- */
- int prefixLen = 0;
-
- /**
- * If true, compare exclusively on just the fields on keyPrefix and ignore the suffix.
- */
- bool prefixExclusive = false;
-
- /**
- * Elements starting at index 'prefixLen' are logically appended to the prefix.
- * The elements before index 'prefixLen' should be ignored.
- */
- std::vector<const BSONElement*> keySuffix;
-
- /**
- * If the ith element is false, ignore indexes > i in keySuffix and treat the
- * concatenated key as exclusive.
- * The elements before index 'prefixLen' should be ignored.
- *
- * Must have identical size as keySuffix.
- */
- std::vector<bool> suffixInclusive;
- };
+ static BSONObj makeQueryObject(const BSONObj& keyPrefix,
+ int prefixLen,
+ bool prefixExclusive,
+ const std::vector<const BSONElement*>& keySuffix,
+ const std::vector<bool>& suffixInclusive,
+ const int cursorDirection);
+
+ static BSONObj makeQueryObject(const IndexSeekPoint& seekPoint, bool isForward) {
+ return makeQueryObject(seekPoint.keyPrefix,
+ seekPoint.prefixLen,
+ seekPoint.prefixExclusive,
+ seekPoint.keySuffix,
+ seekPoint.suffixInclusive,
+ isForward ? 1 : -1);
+ }
- /**
- * Compares two different IndexKeyEntry instances.
- * The existence of compound indexes necessitates some complicated logic. This is meant to
- * support the comparisons of IndexKeyEntries (that are stored in an index) with IndexSeekPoints
- * (that were encoded with makeQueryObject) to support fine-grained control over whether the
- * ranges of various keys comprising a compound index are inclusive or exclusive.
- */
- class IndexEntryComparison {
- public:
- IndexEntryComparison(Ordering order) : _order(order) {}
-
- bool operator() (const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) const;
-
- /**
- * Compares two IndexKeyEntries and returns -1 if lhs < rhs, 1 if lhs > rhs, and 0
- * otherwise.
- *
- * IndexKeyEntries are compared lexicographically field by field in the BSONObj, followed by
- * the RecordId. Either lhs or rhs (but not both) can be a query object returned by
- * makeQueryObject(). See makeQueryObject() for a description of how its arguments affect
- * the outcome of the comparison.
- */
- int compare(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) const;
-
- /**
- * Encodes the arguments into a query object suitable to pass in to compare().
- *
- * A query object is used for seeking an iterator to a position in a sorted index. The
- * difference between a query object and the keys inserted into indexes is that query
- * objects can be exclusive. This means that the first matching entry in the index is the
- * first key in the index after the query. The meaning of "after" depends on
- * cursorDirection.
- *
- * The fields of the key are the combination of keyPrefix and keySuffix. The first prefixLen
- * keys of keyPrefix are used, as well as the keys starting at the prefixLen index of
- * keySuffix. The first prefixLen elements of keySuffix are ignored.
- *
- * If a field is marked as exclusive, then comparisons stop after that field and return
- * either higher or lower, even if that field compares equal. If prefixExclusive is true and
- * prefixLen is greater than 0, then the last field in the prefix is marked as exclusive. It
- * is illegal to specify prefixExclusive as true with a prefixLen of 0. Each bool in
- * suffixInclusive, starting at index prefixLen, indicates whether the corresponding element
- * in keySuffix is inclusive or exclusive.
- *
- * Returned objects are for use in lookups only and should never be inserted into the
- * database, as their format may change. The only reason this is the same type as the
- * entries in an index is to support storage engines that require comparators that take
- * arguments of the same type.
- *
- * A cursurDirection of 1 indicates a forward cursor, and -1 indicates a reverse cursor.
- * This effects the result when the exclusive field compares equal.
- */
- static BSONObj makeQueryObject(const BSONObj& keyPrefix,
- int prefixLen,
- bool prefixExclusive,
- const std::vector<const BSONElement*>& keySuffix,
- const std::vector<bool>& suffixInclusive,
- const int cursorDirection);
-
- static BSONObj makeQueryObject(const IndexSeekPoint& seekPoint, bool isForward) {
- return makeQueryObject(seekPoint.keyPrefix,
- seekPoint.prefixLen,
- seekPoint.prefixExclusive,
- seekPoint.keySuffix,
- seekPoint.suffixInclusive,
- isForward ? 1 : -1);
- }
-
- private:
- // Ordering is used in comparison() to compare BSONElements
- const Ordering _order;
-
- }; // struct IndexEntryComparison
-
-} // namespace mongo
+private:
+ // Ordering is used in comparison() to compare BSONElements
+ const Ordering _order;
+
+}; // struct IndexEntryComparison
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp
index 526460c1d68..3d43d93e4f2 100644
--- a/src/mongo/db/storage/key_string.cpp
+++ b/src/mongo/db/storage/key_string.cpp
@@ -43,505 +43,517 @@
namespace mongo {
- using std::string;
-
- namespace {
- typedef KeyString::TypeBits TypeBits;
-
- namespace CType {
- // canonical types namespace. (would be enum class CType: uint8_t in C++11)
- // Note 0-9 and 246-255 are disallowed and reserved for value encodings.
- // For types that encode value information in the ctype byte, the value in this list is
- // the "generic" one to be used to represent all values of that ctype, such as in the
- // encoding of fields in Objects.
- const uint8_t kMinKey = 10;
- const uint8_t kUndefined = 15;
- const uint8_t kNullish = 20;
- const uint8_t kNumeric = 30;
- const uint8_t kStringLike = 60;
- const uint8_t kObject = 70;
- const uint8_t kArray = 80;
- const uint8_t kBinData = 90;
- const uint8_t kOID = 100;
- const uint8_t kBool = 110;
- const uint8_t kDate = 120;
- const uint8_t kTimestamp = 130;
- const uint8_t kRegEx = 140;
- const uint8_t kDBRef = 150;
- const uint8_t kCode = 160;
- const uint8_t kCodeWithScope = 170;
- const uint8_t kMaxKey = 240;
-
- // These are ordered by the numeric value of the values encoded in each format.
- // Therefore each format can be considered independently without considering
- // cross-format comparisons.
- const uint8_t kNumericNaN = kNumeric + 0;
- const uint8_t kNumericNegativeLargeDouble = kNumeric + 1; // <= -2**63 including -Inf
- const uint8_t kNumericNegative8ByteInt = kNumeric + 2;
- const uint8_t kNumericNegative7ByteInt = kNumeric + 3;
- const uint8_t kNumericNegative6ByteInt = kNumeric + 4;
- const uint8_t kNumericNegative5ByteInt = kNumeric + 5;
- const uint8_t kNumericNegative4ByteInt = kNumeric + 6;
- const uint8_t kNumericNegative3ByteInt = kNumeric + 7;
- const uint8_t kNumericNegative2ByteInt = kNumeric + 8;
- const uint8_t kNumericNegative1ByteInt = kNumeric + 9;
- const uint8_t kNumericNegativeSmallDouble = kNumeric + 10; // between 0 and -1 exclusive
- const uint8_t kNumericZero = kNumeric + 11;
- const uint8_t kNumericPositiveSmallDouble = kNumeric + 12; // between 0 and 1 exclusive
- const uint8_t kNumericPositive1ByteInt = kNumeric + 13;
- const uint8_t kNumericPositive2ByteInt = kNumeric + 14;
- const uint8_t kNumericPositive3ByteInt = kNumeric + 15;
- const uint8_t kNumericPositive4ByteInt = kNumeric + 16;
- const uint8_t kNumericPositive5ByteInt = kNumeric + 17;
- const uint8_t kNumericPositive6ByteInt = kNumeric + 18;
- const uint8_t kNumericPositive7ByteInt = kNumeric + 19;
- const uint8_t kNumericPositive8ByteInt = kNumeric + 20;
- const uint8_t kNumericPositiveLargeDouble = kNumeric + 21; // >= 2**63 including +Inf
- BOOST_STATIC_ASSERT(kNumericPositiveLargeDouble < kStringLike);
-
- const uint8_t kBoolFalse = kBool + 0;
- const uint8_t kBoolTrue = kBool + 1;
- BOOST_STATIC_ASSERT(kBoolTrue < kDate);
-
- size_t numBytesForInt(uint8_t ctype) {
- if (ctype >= kNumericPositive1ByteInt) {
- dassert(ctype <= kNumericPositive8ByteInt);
- return ctype - kNumericPositive1ByteInt + 1;
- }
-
- dassert(ctype <= kNumericNegative1ByteInt);
- dassert(ctype >= kNumericNegative8ByteInt);
- return kNumericNegative1ByteInt - ctype + 1;
- }
- } // namespace CType
-
- uint8_t bsonTypeToGenericKeyStringType(BSONType type) {
- switch (type) {
- case MinKey:
- return CType::kMinKey;
-
- case EOO:
- case jstNULL:
- return CType::kNullish;
-
- case Undefined:
- return CType::kUndefined;
-
- case NumberDouble:
- case NumberInt:
- case NumberLong:
- return CType::kNumeric;
-
- case mongo::String:
- case Symbol:
- return CType::kStringLike;
-
- case Object: return CType::kObject;
- case Array: return CType::kArray;
- case BinData: return CType::kBinData;
- case jstOID: return CType::kOID;
- case Bool: return CType::kBool;
- case Date: return CType::kDate;
- case bsonTimestamp: return CType::kTimestamp;
- case RegEx: return CType::kRegEx;
- case DBRef: return CType::kDBRef;
-
- case Code: return CType::kCode;
- case CodeWScope: return CType::kCodeWithScope;
-
- case MaxKey: return CType::kMaxKey;
- default:
- invariant(false);
- }
- }
-
- // First double that isn't an int64.
- const double kMinLargeDouble = 9223372036854775808.0; // 1ULL<<63
-
- const uint8_t kEnd = 0x4;
-
- // These overlay with CType or kEnd bytes and therefor must be less/greater than all of
- // them (and their inverses). They also can't equal 0 or 255 since that would collide with
- // the encoding of NUL bytes in strings as "\x00\xff".
- const uint8_t kLess = 1;
- const uint8_t kGreater = 254;
- } // namespace
-
- // some utility functions
- namespace {
- void memcpy_flipBits(void* dst, const void* src, size_t bytes) {
- const char* input = static_cast<const char*>(src);
- char* output = static_cast<char*>(dst);
- const char* const end = input + bytes;
- while (input != end) {
- *output++ = ~(*input++);
- }
- }
-
- template <typename T> T readType(BufReader* reader, bool inverted) {
- // TODO for C++11 to static_assert that T is integral
- T t = ConstDataView(static_cast<const char*>(reader->skip(sizeof(T)))).read<T>();
- if (inverted)
- return ~t;
- return t;
- }
-
- StringData readCString(BufReader* reader) {
- const char* start = static_cast<const char*>(reader->pos());
- const char* end = static_cast<const char*>(memchr(start, 0x0, reader->remaining()));
- invariant(end);
- size_t actualBytes = end - start;
- reader->skip(1 + actualBytes);
- return StringData(start, actualBytes);
- }
-
- /**
- * scratch must be empty when passed in. It will be used if there is a NUL byte in the
- * output string. In that case the returned StringData will point into scratch, otherwise
- * it will point directly into the input buffer.
- */
- StringData readCStringWithNuls(BufReader* reader, std::string* scratch) {
- const StringData initial = readCString(reader);
- if (reader->peek<unsigned char>() != 0xFF)
- return initial; // Don't alloc or copy for simple case with no NUL bytes.
-
- scratch->append(initial.rawData(), initial.size());
- while (reader->peek<unsigned char>() == 0xFF) {
- // Each time we enter this loop it means we hit a NUL byte encoded as "\x00\xFF".
- *scratch += '\0';
- reader->skip(1);
-
- const StringData nextPart = readCString(reader);
- scratch->append(nextPart.rawData(), nextPart.size());
- }
-
- return *scratch;
- }
-
- string readInvertedCString(BufReader* reader) {
- const char* start = static_cast<const char*>(reader->pos());
- const char* end = static_cast<const char*>(memchr(start, 0xFF, reader->remaining()));
- invariant(end);
- size_t actualBytes = end - start;
- string s(start, actualBytes);
- for (size_t i = 0; i < s.size(); i++) {
- s[i] = ~s[i];
- }
- reader->skip(1 + actualBytes);
- return s;
- }
-
- string readInvertedCStringWithNuls(BufReader* reader) {
- std::string out;
- do {
- if (!out.empty()) {
- // If this isn't our first pass through the loop it means we hit an NUL byte
- // encoded as "\xFF\00" in our inverted string.
- reader->skip(1);
- out += '\xFF'; // will be flipped to '\0' with rest of out before returning.
- }
+using std::string;
+
+namespace {
+typedef KeyString::TypeBits TypeBits;
+
+namespace CType {
+// canonical types namespace. (would be enum class CType: uint8_t in C++11)
+// Note 0-9 and 246-255 are disallowed and reserved for value encodings.
+// For types that encode value information in the ctype byte, the value in this list is
+// the "generic" one to be used to represent all values of that ctype, such as in the
+// encoding of fields in Objects.
+const uint8_t kMinKey = 10;
+const uint8_t kUndefined = 15;
+const uint8_t kNullish = 20;
+const uint8_t kNumeric = 30;
+const uint8_t kStringLike = 60;
+const uint8_t kObject = 70;
+const uint8_t kArray = 80;
+const uint8_t kBinData = 90;
+const uint8_t kOID = 100;
+const uint8_t kBool = 110;
+const uint8_t kDate = 120;
+const uint8_t kTimestamp = 130;
+const uint8_t kRegEx = 140;
+const uint8_t kDBRef = 150;
+const uint8_t kCode = 160;
+const uint8_t kCodeWithScope = 170;
+const uint8_t kMaxKey = 240;
+
+// These are ordered by the numeric value of the values encoded in each format.
+// Therefore each format can be considered independently without considering
+// cross-format comparisons.
+const uint8_t kNumericNaN = kNumeric + 0;
+const uint8_t kNumericNegativeLargeDouble = kNumeric + 1; // <= -2**63 including -Inf
+const uint8_t kNumericNegative8ByteInt = kNumeric + 2;
+const uint8_t kNumericNegative7ByteInt = kNumeric + 3;
+const uint8_t kNumericNegative6ByteInt = kNumeric + 4;
+const uint8_t kNumericNegative5ByteInt = kNumeric + 5;
+const uint8_t kNumericNegative4ByteInt = kNumeric + 6;
+const uint8_t kNumericNegative3ByteInt = kNumeric + 7;
+const uint8_t kNumericNegative2ByteInt = kNumeric + 8;
+const uint8_t kNumericNegative1ByteInt = kNumeric + 9;
+const uint8_t kNumericNegativeSmallDouble = kNumeric + 10; // between 0 and -1 exclusive
+const uint8_t kNumericZero = kNumeric + 11;
+const uint8_t kNumericPositiveSmallDouble = kNumeric + 12; // between 0 and 1 exclusive
+const uint8_t kNumericPositive1ByteInt = kNumeric + 13;
+const uint8_t kNumericPositive2ByteInt = kNumeric + 14;
+const uint8_t kNumericPositive3ByteInt = kNumeric + 15;
+const uint8_t kNumericPositive4ByteInt = kNumeric + 16;
+const uint8_t kNumericPositive5ByteInt = kNumeric + 17;
+const uint8_t kNumericPositive6ByteInt = kNumeric + 18;
+const uint8_t kNumericPositive7ByteInt = kNumeric + 19;
+const uint8_t kNumericPositive8ByteInt = kNumeric + 20;
+const uint8_t kNumericPositiveLargeDouble = kNumeric + 21; // >= 2**63 including +Inf
+BOOST_STATIC_ASSERT(kNumericPositiveLargeDouble < kStringLike);
+
+const uint8_t kBoolFalse = kBool + 0;
+const uint8_t kBoolTrue = kBool + 1;
+BOOST_STATIC_ASSERT(kBoolTrue < kDate);
+
+size_t numBytesForInt(uint8_t ctype) {
+ if (ctype >= kNumericPositive1ByteInt) {
+ dassert(ctype <= kNumericPositive8ByteInt);
+ return ctype - kNumericPositive1ByteInt + 1;
+ }
- const char* start = static_cast<const char*>(reader->pos());
- const char* end = static_cast<const char*>(
- memchr(start, 0xFF, reader->remaining()));
- invariant(end);
- size_t actualBytes = end - start;
+ dassert(ctype <= kNumericNegative1ByteInt);
+ dassert(ctype >= kNumericNegative8ByteInt);
+ return kNumericNegative1ByteInt - ctype + 1;
+}
+} // namespace CType
- out.append(start, actualBytes);
- reader->skip(1 + actualBytes);
- } while (reader->peek<unsigned char>() == 0x00);
+uint8_t bsonTypeToGenericKeyStringType(BSONType type) {
+ switch (type) {
+ case MinKey:
+ return CType::kMinKey;
- for (size_t i = 0; i < out.size(); i++) {
- out[i] = ~out[i];
- }
+ case EOO:
+ case jstNULL:
+ return CType::kNullish;
- return out;
- }
- } // namespace
+ case Undefined:
+ return CType::kUndefined;
+
+ case NumberDouble:
+ case NumberInt:
+ case NumberLong:
+ return CType::kNumeric;
+
+ case mongo::String:
+ case Symbol:
+ return CType::kStringLike;
+
+ case Object:
+ return CType::kObject;
+ case Array:
+ return CType::kArray;
+ case BinData:
+ return CType::kBinData;
+ case jstOID:
+ return CType::kOID;
+ case Bool:
+ return CType::kBool;
+ case Date:
+ return CType::kDate;
+ case bsonTimestamp:
+ return CType::kTimestamp;
+ case RegEx:
+ return CType::kRegEx;
+ case DBRef:
+ return CType::kDBRef;
+
+ case Code:
+ return CType::kCode;
+ case CodeWScope:
+ return CType::kCodeWithScope;
- void KeyString::resetToKey(const BSONObj& obj, Ordering ord, RecordId recordId) {
- resetToEmpty();
- _appendAllElementsForIndexing(obj, ord, kInclusive);
- appendRecordId(recordId);
+ case MaxKey:
+ return CType::kMaxKey;
+ default:
+ invariant(false);
}
-
- void KeyString::resetToKey(const BSONObj& obj, Ordering ord, Discriminator discriminator) {
- resetToEmpty();
- _appendAllElementsForIndexing(obj, ord, discriminator);
+}
+
+// First double that isn't an int64.
+const double kMinLargeDouble = 9223372036854775808.0; // 1ULL<<63
+
+const uint8_t kEnd = 0x4;
+
+// These overlay with CType or kEnd bytes and therefor must be less/greater than all of
+// them (and their inverses). They also can't equal 0 or 255 since that would collide with
+// the encoding of NUL bytes in strings as "\x00\xff".
+const uint8_t kLess = 1;
+const uint8_t kGreater = 254;
+} // namespace
+
+// some utility functions
+namespace {
+void memcpy_flipBits(void* dst, const void* src, size_t bytes) {
+ const char* input = static_cast<const char*>(src);
+ char* output = static_cast<char*>(dst);
+ const char* const end = input + bytes;
+ while (input != end) {
+ *output++ = ~(*input++);
}
+}
+
+template <typename T>
+T readType(BufReader* reader, bool inverted) {
+ // TODO for C++11 to static_assert that T is integral
+ T t = ConstDataView(static_cast<const char*>(reader->skip(sizeof(T)))).read<T>();
+ if (inverted)
+ return ~t;
+ return t;
+}
+
+StringData readCString(BufReader* reader) {
+ const char* start = static_cast<const char*>(reader->pos());
+ const char* end = static_cast<const char*>(memchr(start, 0x0, reader->remaining()));
+ invariant(end);
+ size_t actualBytes = end - start;
+ reader->skip(1 + actualBytes);
+ return StringData(start, actualBytes);
+}
- // ----------------------------------------------------------------------
- // ----------- APPEND CODE -------------------------------------------
- // ----------------------------------------------------------------------
-
- void KeyString::_appendAllElementsForIndexing(const BSONObj& obj, Ordering ord,
- Discriminator discriminator) {
- int elemCount = 0;
- BSONObjIterator it(obj);
- while (auto elem = it.next()) {
- const int elemIdx = elemCount++;
- const bool invert = (ord.get(elemIdx) == -1);
-
- _appendBsonValue(elem, invert, NULL);
-
- dassert(elem.fieldNameSize() < 3); // fieldNameSize includes the NUL
-
- // IndexEntryComparison::makeQueryObject() encodes a discriminator in the first byte of
- // the field name. This discriminator overrides the passed in one. Normal elements only
- // have the NUL byte terminator. Entries stored in an index are not allowed to have a
- // discriminator.
- if (char ch = *elem.fieldName()) {
- // l for less / g for greater.
- invariant(ch == 'l' || ch == 'g');
- discriminator = ch == 'l' ? kExclusiveBefore : kExclusiveAfter;
- invariant(!it.more());
- }
- }
-
- // The discriminator forces this KeyString to compare Less/Greater than any KeyString with
- // the same prefix of keys. As an example, this can be used to land on the first key in the
- // index with the value "a" regardless of the RecordId. In compound indexes it can use a
- // prefix of the full key to ignore the later keys.
- switch (discriminator) {
- case kExclusiveBefore: _append(kLess, false); break;
- case kExclusiveAfter: _append(kGreater, false); break;
- case kInclusive: break; // No discriminator byte.
- }
-
- // TODO consider omitting kEnd when using a discriminator byte. It is not a storage format
- // change since keystrings with discriminators are not allowed to be stored.
- _append(kEnd, false);
+/**
+ * scratch must be empty when passed in. It will be used if there is a NUL byte in the
+ * output string. In that case the returned StringData will point into scratch, otherwise
+ * it will point directly into the input buffer.
+ */
+StringData readCStringWithNuls(BufReader* reader, std::string* scratch) {
+ const StringData initial = readCString(reader);
+ if (reader->peek<unsigned char>() != 0xFF)
+ return initial; // Don't alloc or copy for simple case with no NUL bytes.
+
+ scratch->append(initial.rawData(), initial.size());
+ while (reader->peek<unsigned char>() == 0xFF) {
+ // Each time we enter this loop it means we hit a NUL byte encoded as "\x00\xFF".
+ *scratch += '\0';
+ reader->skip(1);
+
+ const StringData nextPart = readCString(reader);
+ scratch->append(nextPart.rawData(), nextPart.size());
}
- void KeyString::appendRecordId(RecordId loc) {
- // The RecordId encoding must be able to determine the full length starting from the last
- // byte, without knowing where the first byte is since it is stored at the end of a
- // KeyString, and we need to be able to read the RecordId without decoding the whole thing.
- //
- // This encoding places a number (N) between 0 and 7 in both the high 3 bits of the first
- // byte and the low 3 bits of the last byte. This is the number of bytes between the first
- // and last byte (ie total bytes is N + 2). The remaining bits of the first and last bytes
- // are combined with the bits of the in-between bytes to store the 64-bit RecordId in
- // big-endian order. This does not encode negative RecordIds to give maximum space to
- // positive RecordIds which are the only ones that are allowed to be stored in an index.
-
- int64_t raw = loc.repr();
- if (raw < 0) {
- // Note: we encode RecordId::min() and RecordId() the same which is ok, as they are
- // never stored so they will never be compared to each other.
- invariant(raw == RecordId::min().repr());
- raw = 0;
- }
- const uint64_t value = static_cast<uint64_t>(raw);
- const int bitsNeeded = 64 - countLeadingZeros64(raw);
- const int extraBytesNeeded = bitsNeeded <= 10
- ? 0
- : ((bitsNeeded - 10) + 7) / 8; // ceil((bitsNeeded - 10) / 8)
-
- // extraBytesNeeded must fit in 3 bits.
- dassert(extraBytesNeeded >= 0 && extraBytesNeeded < 8);
-
- // firstByte combines highest 5 bits of value with extraBytesNeeded.
- const uint8_t firstByte = uint8_t((extraBytesNeeded << 5)
- | (value >> (5 + (extraBytesNeeded * 8))));
- // lastByte combines lowest 5 bits of value with extraBytesNeeded.
- const uint8_t lastByte = uint8_t((value << 3) | extraBytesNeeded);
-
- // RecordIds are never appended inverted.
- _append(firstByte, false);
- if (extraBytesNeeded) {
- const uint64_t extraBytes = endian::nativeToBig(value >> 5);
- // Only using the low-order extraBytesNeeded bytes of extraBytes.
- _appendBytes(reinterpret_cast<const char*>(&extraBytes) + sizeof(extraBytes)
- - extraBytesNeeded,
- extraBytesNeeded,
- false);
- }
- _append(lastByte, false);
+ return *scratch;
+}
+
+string readInvertedCString(BufReader* reader) {
+ const char* start = static_cast<const char*>(reader->pos());
+ const char* end = static_cast<const char*>(memchr(start, 0xFF, reader->remaining()));
+ invariant(end);
+ size_t actualBytes = end - start;
+ string s(start, actualBytes);
+ for (size_t i = 0; i < s.size(); i++) {
+ s[i] = ~s[i];
}
-
- void KeyString::appendTypeBits(const TypeBits& typeBits) {
- // As an optimization, encode AllZeros as a single 0 byte.
- if (typeBits.isAllZeros()) {
- _append(uint8_t(0), false);
- return;
+ reader->skip(1 + actualBytes);
+ return s;
+}
+
+string readInvertedCStringWithNuls(BufReader* reader) {
+ std::string out;
+ do {
+ if (!out.empty()) {
+ // If this isn't our first pass through the loop it means we hit an NUL byte
+ // encoded as "\xFF\00" in our inverted string.
+ reader->skip(1);
+ out += '\xFF'; // will be flipped to '\0' with rest of out before returning.
}
- _appendBytes(typeBits.getBuffer(), typeBits.getSize(), false);
- }
+ const char* start = static_cast<const char*>(reader->pos());
+ const char* end = static_cast<const char*>(memchr(start, 0xFF, reader->remaining()));
+ invariant(end);
+ size_t actualBytes = end - start;
- void KeyString::_appendBool(bool val, bool invert) {
- _append(val ? CType::kBoolTrue : CType::kBoolFalse, invert);
- }
+ out.append(start, actualBytes);
+ reader->skip(1 + actualBytes);
+ } while (reader->peek<unsigned char>() == 0x00);
- void KeyString::_appendDate(Date_t val, bool invert) {
- _append(CType::kDate, invert);
- // see: http://en.wikipedia.org/wiki/Offset_binary
- uint64_t encoded = static_cast<uint64_t>(val.asInt64());
- encoded ^= (1LL << 63); // flip highest bit (equivalent to bias encoding)
- _append(endian::nativeToBig(encoded), invert);
+ for (size_t i = 0; i < out.size(); i++) {
+ out[i] = ~out[i];
}
- void KeyString::_appendTimestamp(Timestamp val, bool invert) {
- _append(CType::kTimestamp, invert);
- _append(endian::nativeToBig(val.asLL()), invert);
- }
-
- void KeyString::_appendOID(OID val, bool invert) {
- _append(CType::kOID, invert);
- _appendBytes(val.view().view(), OID::kOIDSize, invert);
+ return out;
+}
+} // namespace
+
+void KeyString::resetToKey(const BSONObj& obj, Ordering ord, RecordId recordId) {
+ resetToEmpty();
+ _appendAllElementsForIndexing(obj, ord, kInclusive);
+ appendRecordId(recordId);
+}
+
+void KeyString::resetToKey(const BSONObj& obj, Ordering ord, Discriminator discriminator) {
+ resetToEmpty();
+ _appendAllElementsForIndexing(obj, ord, discriminator);
+}
+
+// ----------------------------------------------------------------------
+// ----------- APPEND CODE -------------------------------------------
+// ----------------------------------------------------------------------
+
+void KeyString::_appendAllElementsForIndexing(const BSONObj& obj,
+ Ordering ord,
+ Discriminator discriminator) {
+ int elemCount = 0;
+ BSONObjIterator it(obj);
+ while (auto elem = it.next()) {
+ const int elemIdx = elemCount++;
+ const bool invert = (ord.get(elemIdx) == -1);
+
+ _appendBsonValue(elem, invert, NULL);
+
+ dassert(elem.fieldNameSize() < 3); // fieldNameSize includes the NUL
+
+ // IndexEntryComparison::makeQueryObject() encodes a discriminator in the first byte of
+ // the field name. This discriminator overrides the passed in one. Normal elements only
+ // have the NUL byte terminator. Entries stored in an index are not allowed to have a
+ // discriminator.
+ if (char ch = *elem.fieldName()) {
+ // l for less / g for greater.
+ invariant(ch == 'l' || ch == 'g');
+ discriminator = ch == 'l' ? kExclusiveBefore : kExclusiveAfter;
+ invariant(!it.more());
+ }
}
- void KeyString::_appendString(StringData val, bool invert) {
- _typeBits.appendString();
- _append(CType::kStringLike, invert);
- _appendStringLike(val, invert);
+ // The discriminator forces this KeyString to compare Less/Greater than any KeyString with
+ // the same prefix of keys. As an example, this can be used to land on the first key in the
+ // index with the value "a" regardless of the RecordId. In compound indexes it can use a
+ // prefix of the full key to ignore the later keys.
+ switch (discriminator) {
+ case kExclusiveBefore:
+ _append(kLess, false);
+ break;
+ case kExclusiveAfter:
+ _append(kGreater, false);
+ break;
+ case kInclusive:
+ break; // No discriminator byte.
}
- void KeyString::_appendSymbol(StringData val, bool invert) {
- _typeBits.appendSymbol();
- _append(CType::kStringLike, invert); // Symbols and Strings compare equally
- _appendStringLike(val, invert);
+ // TODO consider omitting kEnd when using a discriminator byte. It is not a storage format
+ // change since keystrings with discriminators are not allowed to be stored.
+ _append(kEnd, false);
+}
+
+void KeyString::appendRecordId(RecordId loc) {
+ // The RecordId encoding must be able to determine the full length starting from the last
+ // byte, without knowing where the first byte is since it is stored at the end of a
+ // KeyString, and we need to be able to read the RecordId without decoding the whole thing.
+ //
+ // This encoding places a number (N) between 0 and 7 in both the high 3 bits of the first
+ // byte and the low 3 bits of the last byte. This is the number of bytes between the first
+ // and last byte (ie total bytes is N + 2). The remaining bits of the first and last bytes
+ // are combined with the bits of the in-between bytes to store the 64-bit RecordId in
+ // big-endian order. This does not encode negative RecordIds to give maximum space to
+ // positive RecordIds which are the only ones that are allowed to be stored in an index.
+
+ int64_t raw = loc.repr();
+ if (raw < 0) {
+ // Note: we encode RecordId::min() and RecordId() the same which is ok, as they are
+ // never stored so they will never be compared to each other.
+ invariant(raw == RecordId::min().repr());
+ raw = 0;
}
-
- void KeyString::_appendCode(StringData val, bool invert) {
- _append(CType::kCode, invert);
- _appendStringLike(val, invert);
+ const uint64_t value = static_cast<uint64_t>(raw);
+ const int bitsNeeded = 64 - countLeadingZeros64(raw);
+ const int extraBytesNeeded =
+ bitsNeeded <= 10 ? 0 : ((bitsNeeded - 10) + 7) / 8; // ceil((bitsNeeded - 10) / 8)
+
+ // extraBytesNeeded must fit in 3 bits.
+ dassert(extraBytesNeeded >= 0 && extraBytesNeeded < 8);
+
+ // firstByte combines highest 5 bits of value with extraBytesNeeded.
+ const uint8_t firstByte =
+ uint8_t((extraBytesNeeded << 5) | (value >> (5 + (extraBytesNeeded * 8))));
+ // lastByte combines lowest 5 bits of value with extraBytesNeeded.
+ const uint8_t lastByte = uint8_t((value << 3) | extraBytesNeeded);
+
+ // RecordIds are never appended inverted.
+ _append(firstByte, false);
+ if (extraBytesNeeded) {
+ const uint64_t extraBytes = endian::nativeToBig(value >> 5);
+ // Only using the low-order extraBytesNeeded bytes of extraBytes.
+ _appendBytes(reinterpret_cast<const char*>(&extraBytes) + sizeof(extraBytes) -
+ extraBytesNeeded,
+ extraBytesNeeded,
+ false);
}
-
- void KeyString::_appendCodeWString(const BSONCodeWScope& val, bool invert) {
- _append(CType::kCodeWithScope, invert);
- _appendStringLike(val.code, invert);
- _appendBson(val.scope, invert);
+ _append(lastByte, false);
+}
+
+void KeyString::appendTypeBits(const TypeBits& typeBits) {
+ // As an optimization, encode AllZeros as a single 0 byte.
+ if (typeBits.isAllZeros()) {
+ _append(uint8_t(0), false);
+ return;
}
- void KeyString::_appendBinData(const BSONBinData& val, bool invert) {
- _append(CType::kBinData, invert);
- if (val.length < 0xff) {
- // size fits in one byte so use one byte to encode.
- _append(uint8_t(val.length), invert);
- }
- else {
- // Encode 0xff prefix to indicate that the size takes 4 bytes.
- _append(uint8_t(0xff), invert);
- _append(endian::nativeToBig(int32_t(val.length)), invert);
- }
- _append(uint8_t(val.type), invert);
- _appendBytes(val.data, val.length, invert);
+ _appendBytes(typeBits.getBuffer(), typeBits.getSize(), false);
+}
+
+void KeyString::_appendBool(bool val, bool invert) {
+ _append(val ? CType::kBoolTrue : CType::kBoolFalse, invert);
+}
+
+void KeyString::_appendDate(Date_t val, bool invert) {
+ _append(CType::kDate, invert);
+ // see: http://en.wikipedia.org/wiki/Offset_binary
+ uint64_t encoded = static_cast<uint64_t>(val.asInt64());
+ encoded ^= (1LL << 63); // flip highest bit (equivalent to bias encoding)
+ _append(endian::nativeToBig(encoded), invert);
+}
+
+void KeyString::_appendTimestamp(Timestamp val, bool invert) {
+ _append(CType::kTimestamp, invert);
+ _append(endian::nativeToBig(val.asLL()), invert);
+}
+
+void KeyString::_appendOID(OID val, bool invert) {
+ _append(CType::kOID, invert);
+ _appendBytes(val.view().view(), OID::kOIDSize, invert);
+}
+
+void KeyString::_appendString(StringData val, bool invert) {
+ _typeBits.appendString();
+ _append(CType::kStringLike, invert);
+ _appendStringLike(val, invert);
+}
+
+void KeyString::_appendSymbol(StringData val, bool invert) {
+ _typeBits.appendSymbol();
+ _append(CType::kStringLike, invert); // Symbols and Strings compare equally
+ _appendStringLike(val, invert);
+}
+
+void KeyString::_appendCode(StringData val, bool invert) {
+ _append(CType::kCode, invert);
+ _appendStringLike(val, invert);
+}
+
+void KeyString::_appendCodeWString(const BSONCodeWScope& val, bool invert) {
+ _append(CType::kCodeWithScope, invert);
+ _appendStringLike(val.code, invert);
+ _appendBson(val.scope, invert);
+}
+
+void KeyString::_appendBinData(const BSONBinData& val, bool invert) {
+ _append(CType::kBinData, invert);
+ if (val.length < 0xff) {
+ // size fits in one byte so use one byte to encode.
+ _append(uint8_t(val.length), invert);
+ } else {
+ // Encode 0xff prefix to indicate that the size takes 4 bytes.
+ _append(uint8_t(0xff), invert);
+ _append(endian::nativeToBig(int32_t(val.length)), invert);
}
-
- void KeyString::_appendRegex(const BSONRegEx& val, bool invert) {
- _append(CType::kRegEx, invert);
- // note: NULL is not allowed in pattern or flags
- _appendBytes(val.pattern.rawData(), val.pattern.size(), invert);
- _append(int8_t(0), invert);
- _appendBytes(val.flags.rawData(), val.flags.size(), invert);
- _append(int8_t(0), invert);
+ _append(uint8_t(val.type), invert);
+ _appendBytes(val.data, val.length, invert);
+}
+
+void KeyString::_appendRegex(const BSONRegEx& val, bool invert) {
+ _append(CType::kRegEx, invert);
+ // note: NULL is not allowed in pattern or flags
+ _appendBytes(val.pattern.rawData(), val.pattern.size(), invert);
+ _append(int8_t(0), invert);
+ _appendBytes(val.flags.rawData(), val.flags.size(), invert);
+ _append(int8_t(0), invert);
+}
+
+void KeyString::_appendDBRef(const BSONDBRef& val, bool invert) {
+ _append(CType::kDBRef, invert);
+ _append(endian::nativeToBig(int32_t(val.ns.size())), invert);
+ _appendBytes(val.ns.rawData(), val.ns.size(), invert);
+ _appendBytes(val.oid.view().view(), OID::kOIDSize, invert);
+}
+
+void KeyString::_appendArray(const BSONArray& val, bool invert) {
+ _append(CType::kArray, invert);
+ BSONForEach(elem, val) {
+ // No generic ctype byte needed here since no name is encoded.
+ _appendBsonValue(elem, invert, NULL);
}
-
- void KeyString::_appendDBRef(const BSONDBRef& val, bool invert) {
- _append(CType::kDBRef, invert);
- _append(endian::nativeToBig(int32_t(val.ns.size())), invert);
- _appendBytes(val.ns.rawData(), val.ns.size(), invert);
- _appendBytes(val.oid.view().view(), OID::kOIDSize, invert);
+ _append(int8_t(0), invert);
+}
+
+void KeyString::_appendObject(const BSONObj& val, bool invert) {
+ _append(CType::kObject, invert);
+ _appendBson(val, invert);
+}
+
+void KeyString::_appendNumberDouble(const double num, bool invert) {
+ if (num == 0.0 && std::signbit(num)) {
+ _typeBits.appendNegativeZero();
+ } else {
+ _typeBits.appendNumberDouble();
}
- void KeyString::_appendArray(const BSONArray& val, bool invert) {
- _append(CType::kArray, invert);
- BSONForEach(elem, val) {
- // No generic ctype byte needed here since no name is encoded.
- _appendBsonValue(elem, invert, NULL);
- }
- _append(int8_t(0), invert);
+ // no special cases needed for Inf,
+ // see http://en.wikipedia.org/wiki/IEEE_754-1985#Positive_and_negative_infinity
+ if (std::isnan(num)) {
+ _append(CType::kNumericNaN, invert);
+ return;
}
- void KeyString::_appendObject(const BSONObj& val, bool invert) {
- _append(CType::kObject, invert);
- _appendBson(val, invert);
+ if (num == 0.0) {
+ // We are collapsing -0.0 and 0.0 to the same value here.
+ // This is correct as IEEE-754 specifies that they compare as equal,
+ // however this prevents roundtripping -0.0.
+ // So if you put a -0.0 in, you'll get 0.0 out.
+ // We believe this to be ok.
+ _append(CType::kNumericZero, invert);
+ return;
}
- void KeyString::_appendNumberDouble(const double num, bool invert) {
- if (num == 0.0 && std::signbit(num)) {
- _typeBits.appendNegativeZero();
- }
- else {
- _typeBits.appendNumberDouble();
- }
+ const bool isNegative = num < 0.0;
+ const double magnitude = isNegative ? -num : num;
- // no special cases needed for Inf,
- // see http://en.wikipedia.org/wiki/IEEE_754-1985#Positive_and_negative_infinity
- if (std::isnan(num)) {
- _append(CType::kNumericNaN, invert);
- return;
- }
-
- if (num == 0.0) {
- // We are collapsing -0.0 and 0.0 to the same value here.
- // This is correct as IEEE-754 specifies that they compare as equal,
- // however this prevents roundtripping -0.0.
- // So if you put a -0.0 in, you'll get 0.0 out.
- // We believe this to be ok.
- _append(CType::kNumericZero, invert);
- return;
- }
-
- const bool isNegative = num < 0.0;
- const double magnitude = isNegative ? -num : num;
-
- if (magnitude < 1.0) {
- // This includes subnormal numbers.
- _appendSmallDouble(num, invert);
- return;
- }
-
- if (magnitude < kMinLargeDouble) {
- uint64_t integerPart = uint64_t(magnitude);
- if (double(integerPart) == magnitude) {
- // No fractional part
- _appendPreshiftedIntegerPortion(integerPart << 1, isNegative, invert);
- return;
- }
+ if (magnitude < 1.0) {
+ // This includes subnormal numbers.
+ _appendSmallDouble(num, invert);
+ return;
+ }
- // There is a fractional part.
- _appendPreshiftedIntegerPortion((integerPart << 1) | 1, isNegative, invert);
-
- // Append the bytes of the mantissa that include fractional bits.
- const size_t fractionalBits = (53 - (64 - countLeadingZeros64(integerPart)));
- const size_t fractionalBytes = (fractionalBits + 7) / 8;
- dassert(fractionalBytes > 0);
- uint64_t mantissa;
- memcpy(&mantissa, &num, sizeof(mantissa));
- mantissa &= ~(uint64_t(-1) << fractionalBits); // set non-fractional bits to 0;
- mantissa = endian::nativeToBig(mantissa);
-
- const void* firstUsedByte =
- reinterpret_cast<const char*>((&mantissa) + 1) - fractionalBytes;
- _appendBytes(firstUsedByte, fractionalBytes, isNegative ? !invert : invert);
+ if (magnitude < kMinLargeDouble) {
+ uint64_t integerPart = uint64_t(magnitude);
+ if (double(integerPart) == magnitude) {
+ // No fractional part
+ _appendPreshiftedIntegerPortion(integerPart << 1, isNegative, invert);
return;
}
- _appendLargeDouble(num, invert);
+ // There is a fractional part.
+ _appendPreshiftedIntegerPortion((integerPart << 1) | 1, isNegative, invert);
+
+ // Append the bytes of the mantissa that include fractional bits.
+ const size_t fractionalBits = (53 - (64 - countLeadingZeros64(integerPart)));
+ const size_t fractionalBytes = (fractionalBits + 7) / 8;
+ dassert(fractionalBytes > 0);
+ uint64_t mantissa;
+ memcpy(&mantissa, &num, sizeof(mantissa));
+ mantissa &= ~(uint64_t(-1) << fractionalBits); // set non-fractional bits to 0;
+ mantissa = endian::nativeToBig(mantissa);
+
+ const void* firstUsedByte =
+ reinterpret_cast<const char*>((&mantissa) + 1) - fractionalBytes;
+ _appendBytes(firstUsedByte, fractionalBytes, isNegative ? !invert : invert);
+ return;
}
- void KeyString::_appendNumberLong(const long long num, bool invert) {
- _typeBits.appendNumberLong();
- _appendInteger(num, invert);
- }
+ _appendLargeDouble(num, invert);
+}
- void KeyString::_appendNumberInt(const int num, bool invert) {
- _typeBits.appendNumberInt();
- _appendInteger(num, invert);
- }
+void KeyString::_appendNumberLong(const long long num, bool invert) {
+ _typeBits.appendNumberLong();
+ _appendInteger(num, invert);
+}
- void KeyString::_appendBsonValue(const BSONElement& elem,
- bool invert,
- const StringData* name) {
+void KeyString::_appendNumberInt(const int num, bool invert) {
+ _typeBits.appendNumberInt();
+ _appendInteger(num, invert);
+}
- if (name) {
- _appendBytes(name->rawData(), name->size() + 1, invert); // + 1 for NUL
- }
+void KeyString::_appendBsonValue(const BSONElement& elem, bool invert, const StringData* name) {
+ if (name) {
+ _appendBytes(name->rawData(), name->size() + 1, invert); // + 1 for NUL
+ }
- switch (elem.type()) {
+ switch (elem.type()) {
case MinKey:
case MaxKey:
case EOO:
@@ -550,10 +562,18 @@ namespace mongo {
_append(bsonTypeToGenericKeyStringType(elem.type()), invert);
break;
- case NumberDouble: _appendNumberDouble(elem._numberDouble(), invert); break;
- case String: _appendString(elem.valueStringData(), invert); break;
- case Object: _appendObject(elem.Obj(), invert); break;
- case Array: _appendArray(BSONArray(elem.Obj()), invert); break;
+ case NumberDouble:
+ _appendNumberDouble(elem._numberDouble(), invert);
+ break;
+ case String:
+ _appendString(elem.valueStringData(), invert);
+ break;
+ case Object:
+ _appendObject(elem.Obj(), invert);
+ break;
+ case Array:
+ _appendArray(BSONArray(elem.Obj()), invert);
+ break;
case BinData: {
int len;
const char* data = elem.binData(len);
@@ -561,606 +581,635 @@ namespace mongo {
break;
}
- case jstOID: _appendOID(elem.__oid(), invert); break;
- case Bool: _appendBool(elem.boolean(), invert); break;
- case Date: _appendDate(elem.date(), invert); break;
+ case jstOID:
+ _appendOID(elem.__oid(), invert);
+ break;
+ case Bool:
+ _appendBool(elem.boolean(), invert);
+ break;
+ case Date:
+ _appendDate(elem.date(), invert);
+ break;
- case RegEx: _appendRegex(BSONRegEx(elem.regex(), elem.regexFlags()), invert); break;
- case DBRef: _appendDBRef(BSONDBRef(elem.dbrefNS(), elem.dbrefOID()), invert); break;
- case Symbol: _appendSymbol(elem.valueStringData(), invert); break;
- case Code: _appendCode(elem.valueStringData(), invert); break;
+ case RegEx:
+ _appendRegex(BSONRegEx(elem.regex(), elem.regexFlags()), invert);
+ break;
+ case DBRef:
+ _appendDBRef(BSONDBRef(elem.dbrefNS(), elem.dbrefOID()), invert);
+ break;
+ case Symbol:
+ _appendSymbol(elem.valueStringData(), invert);
+ break;
+ case Code:
+ _appendCode(elem.valueStringData(), invert);
+ break;
case CodeWScope: {
- _appendCodeWString(BSONCodeWScope(StringData(elem.codeWScopeCode(),
- elem.codeWScopeCodeLen()-1),
- BSONObj(elem.codeWScopeScopeData())),
- invert);
+ _appendCodeWString(
+ BSONCodeWScope(StringData(elem.codeWScopeCode(), elem.codeWScopeCodeLen() - 1),
+ BSONObj(elem.codeWScopeScopeData())),
+ invert);
break;
}
- case NumberInt: _appendNumberInt(elem._numberInt(), invert); break;
- case bsonTimestamp: _appendTimestamp(elem.timestamp(), invert); break;
- case NumberLong: _appendNumberLong(elem._numberLong(), invert); break;
+ case NumberInt:
+ _appendNumberInt(elem._numberInt(), invert);
+ break;
+ case bsonTimestamp:
+ _appendTimestamp(elem.timestamp(), invert);
+ break;
+ case NumberLong:
+ _appendNumberLong(elem._numberLong(), invert);
+ break;
default:
invariant(false);
- }
}
+}
- /// -- lowest level
+/// -- lowest level
- void KeyString::_appendStringLike(StringData str, bool invert) {
- while (true) {
- size_t firstNul = strnlen(str.rawData(), str.size());
- // No NULs in string.
- _appendBytes(str.rawData(), firstNul, invert);
- if (firstNul == str.size() || firstNul == std::string::npos) {
- _append(int8_t(0), invert);
- break;
- }
-
- // replace "\x00" with "\x00\xFF"
- _appendBytes("\x00\xFF", 2, invert);
- str = str.substr(firstNul + 1); // skip over the NUL byte
+void KeyString::_appendStringLike(StringData str, bool invert) {
+ while (true) {
+ size_t firstNul = strnlen(str.rawData(), str.size());
+ // No NULs in string.
+ _appendBytes(str.rawData(), firstNul, invert);
+ if (firstNul == str.size() || firstNul == std::string::npos) {
+ _append(int8_t(0), invert);
+ break;
}
- }
- void KeyString::_appendBson(const BSONObj& obj, bool invert) {
- BSONForEach(elem, obj) {
- // Force the order to be based on (ctype, name, value).
- _append(bsonTypeToGenericKeyStringType(elem.type()), invert);
- StringData name = elem.fieldNameStringData();
- _appendBsonValue(elem, invert, &name);
- }
- _append(int8_t(0), invert);
+ // replace "\x00" with "\x00\xFF"
+ _appendBytes("\x00\xFF", 2, invert);
+ str = str.substr(firstNul + 1); // skip over the NUL byte
}
-
- void KeyString::_appendSmallDouble(double value, bool invert) {
- dassert(!std::isnan(value));
- dassert(value != 0.0);
-
- uint64_t data;
- memcpy(&data, &value, sizeof(data));
-
- if (value > 0) {
- _append(CType::kNumericPositiveSmallDouble, invert);
- _append(endian::nativeToBig(data), invert);
- }
- else {
- _append(CType::kNumericNegativeSmallDouble, invert);
- _append(endian::nativeToBig(data), !invert);
- }
+}
+
+void KeyString::_appendBson(const BSONObj& obj, bool invert) {
+ BSONForEach(elem, obj) {
+ // Force the order to be based on (ctype, name, value).
+ _append(bsonTypeToGenericKeyStringType(elem.type()), invert);
+ StringData name = elem.fieldNameStringData();
+ _appendBsonValue(elem, invert, &name);
+ }
+ _append(int8_t(0), invert);
+}
+
+void KeyString::_appendSmallDouble(double value, bool invert) {
+ dassert(!std::isnan(value));
+ dassert(value != 0.0);
+
+ uint64_t data;
+ memcpy(&data, &value, sizeof(data));
+
+ if (value > 0) {
+ _append(CType::kNumericPositiveSmallDouble, invert);
+ _append(endian::nativeToBig(data), invert);
+ } else {
+ _append(CType::kNumericNegativeSmallDouble, invert);
+ _append(endian::nativeToBig(data), !invert);
}
+}
- void KeyString::_appendLargeDouble(double value, bool invert) {
- dassert(!std::isnan(value));
- dassert(value != 0.0);
+void KeyString::_appendLargeDouble(double value, bool invert) {
+ dassert(!std::isnan(value));
+ dassert(value != 0.0);
- uint64_t data;
- memcpy(&data, &value, sizeof(data));
+ uint64_t data;
+ memcpy(&data, &value, sizeof(data));
- if (value > 0) {
- _append(CType::kNumericPositiveLargeDouble, invert);
- _append(endian::nativeToBig(data), invert);
- }
- else {
- _append(CType::kNumericNegativeLargeDouble, invert);
- _append(endian::nativeToBig(data), !invert);
- }
+ if (value > 0) {
+ _append(CType::kNumericPositiveLargeDouble, invert);
+ _append(endian::nativeToBig(data), invert);
+ } else {
+ _append(CType::kNumericNegativeLargeDouble, invert);
+ _append(endian::nativeToBig(data), !invert);
+ }
+}
+
+// Handles NumberLong and NumberInt which are encoded identically except for the TypeBits.
+void KeyString::_appendInteger(const long long num, bool invert) {
+ if (num == std::numeric_limits<long long>::min()) {
+ // -2**63 is exactly representable as a double and not as a positive int64.
+ // Therefore we encode it as a double.
+ dassert(-double(num) == kMinLargeDouble);
+ _appendLargeDouble(double(num), invert);
+ return;
}
- // Handles NumberLong and NumberInt which are encoded identically except for the TypeBits.
- void KeyString::_appendInteger(const long long num, bool invert) {
- if (num == std::numeric_limits<long long>::min()) {
- // -2**63 is exactly representable as a double and not as a positive int64.
- // Therefore we encode it as a double.
- dassert(-double(num) == kMinLargeDouble);
- _appendLargeDouble(double(num), invert);
- return;
- }
-
- if (num == 0) {
- _append(CType::kNumericZero, invert);
- return;
- }
-
- const bool isNegative = num < 0;
- const uint64_t magnitude = isNegative ? -num : num;
- _appendPreshiftedIntegerPortion(magnitude << 1, isNegative, invert);
+ if (num == 0) {
+ _append(CType::kNumericZero, invert);
+ return;
}
+ const bool isNegative = num < 0;
+ const uint64_t magnitude = isNegative ? -num : num;
+ _appendPreshiftedIntegerPortion(magnitude << 1, isNegative, invert);
+}
- void KeyString::_appendPreshiftedIntegerPortion(uint64_t value, bool isNegative, bool invert) {
- dassert(value != 0ull);
- dassert(value != 1ull);
- const size_t bytesNeeded = (64 - countLeadingZeros64(value) + 7) / 8;
+void KeyString::_appendPreshiftedIntegerPortion(uint64_t value, bool isNegative, bool invert) {
+ dassert(value != 0ull);
+ dassert(value != 1ull);
- // Append the low bytes of value in big endian order.
- value = endian::nativeToBig(value);
- const void* firstUsedByte = reinterpret_cast<const char*>((&value) + 1) - bytesNeeded;
+ const size_t bytesNeeded = (64 - countLeadingZeros64(value) + 7) / 8;
- if (isNegative) {
- _append(uint8_t(CType::kNumericNegative1ByteInt - (bytesNeeded - 1)), invert);
- _appendBytes(firstUsedByte, bytesNeeded, !invert);
- }
- else {
- _append(uint8_t(CType::kNumericPositive1ByteInt + (bytesNeeded - 1)), invert);
- _appendBytes(firstUsedByte, bytesNeeded, invert);
- }
- }
+ // Append the low bytes of value in big endian order.
+ value = endian::nativeToBig(value);
+ const void* firstUsedByte = reinterpret_cast<const char*>((&value) + 1) - bytesNeeded;
- template <typename T>
- void KeyString::_append(const T& thing, bool invert) {
- _appendBytes(&thing, sizeof(thing), invert);
+ if (isNegative) {
+ _append(uint8_t(CType::kNumericNegative1ByteInt - (bytesNeeded - 1)), invert);
+ _appendBytes(firstUsedByte, bytesNeeded, !invert);
+ } else {
+ _append(uint8_t(CType::kNumericPositive1ByteInt + (bytesNeeded - 1)), invert);
+ _appendBytes(firstUsedByte, bytesNeeded, invert);
}
+}
- void KeyString::_appendBytes(const void* source, size_t bytes, bool invert) {
- char* const base = _buffer.skip(bytes);
+template <typename T>
+void KeyString::_append(const T& thing, bool invert) {
+ _appendBytes(&thing, sizeof(thing), invert);
+}
- if (invert) {
- memcpy_flipBits(base, source, bytes);
+void KeyString::_appendBytes(const void* source, size_t bytes, bool invert) {
+ char* const base = _buffer.skip(bytes);
+
+ if (invert) {
+ memcpy_flipBits(base, source, bytes);
+ } else {
+ memcpy(base, source, bytes);
+ }
+}
+
+
+// ----------------------------------------------------------------------
+// ----------- DECODING CODE --------------------------------------------
+// ----------------------------------------------------------------------
+
+namespace {
+void toBsonValue(uint8_t ctype,
+ BufReader* reader,
+ TypeBits::Reader* typeBits,
+ bool inverted,
+ BSONObjBuilderValueStream* stream);
+
+void toBson(BufReader* reader, TypeBits::Reader* typeBits, bool inverted, BSONObjBuilder* builder) {
+ while (readType<uint8_t>(reader, inverted) != 0) {
+ if (inverted) {
+ std::string name = readInvertedCString(reader);
+ BSONObjBuilderValueStream& stream = *builder << name;
+ toBsonValue(readType<uint8_t>(reader, inverted), reader, typeBits, inverted, &stream);
} else {
- memcpy(base, source, bytes);
+ StringData name = readCString(reader);
+ BSONObjBuilderValueStream& stream = *builder << name;
+ toBsonValue(readType<uint8_t>(reader, inverted), reader, typeBits, inverted, &stream);
}
}
+}
+
+void toBsonValue(uint8_t ctype,
+ BufReader* reader,
+ TypeBits::Reader* typeBits,
+ bool inverted,
+ BSONObjBuilderValueStream* stream) {
+ // This is only used by the kNumeric.*ByteInt types, but needs to be declared up here
+ // since it is used across a fallthrough.
+ bool isNegative = false;
+
+ switch (ctype) {
+ case CType::kMinKey:
+ *stream << MINKEY;
+ break;
+ case CType::kMaxKey:
+ *stream << MAXKEY;
+ break;
+ case CType::kNullish:
+ *stream << BSONNULL;
+ break;
+ case CType::kUndefined:
+ *stream << BSONUndefined;
+ break;
+ case CType::kBoolTrue:
+ *stream << true;
+ break;
+ case CType::kBoolFalse:
+ *stream << false;
+ break;
- // ----------------------------------------------------------------------
- // ----------- DECODING CODE --------------------------------------------
- // ----------------------------------------------------------------------
-
- namespace {
- void toBsonValue(uint8_t ctype,
- BufReader* reader,
- TypeBits::Reader* typeBits,
- bool inverted,
- BSONObjBuilderValueStream* stream);
-
- void toBson(BufReader* reader, TypeBits::Reader* typeBits,
- bool inverted, BSONObjBuilder* builder) {
- while (readType<uint8_t>(reader, inverted) != 0) {
- if (inverted) {
- std::string name = readInvertedCString(reader);
- BSONObjBuilderValueStream& stream = *builder << name;
- toBsonValue(readType<uint8_t>(reader, inverted), reader, typeBits, inverted,
- &stream);
- }
- else {
- StringData name = readCString(reader);
- BSONObjBuilderValueStream& stream = *builder << name;
- toBsonValue(readType<uint8_t>(reader, inverted), reader, typeBits, inverted,
- &stream);
- }
- }
- }
+ case CType::kDate:
+ *stream << Date_t::fromMillisSinceEpoch(
+ endian::bigToNative(readType<uint64_t>(reader, inverted)) ^ (1LL << 63));
+ break;
- void toBsonValue(uint8_t ctype,
- BufReader* reader,
- TypeBits::Reader* typeBits,
- bool inverted,
- BSONObjBuilderValueStream* stream) {
-
- // This is only used by the kNumeric.*ByteInt types, but needs to be declared up here
- // since it is used across a fallthrough.
- bool isNegative = false;
-
- switch (ctype) {
- case CType::kMinKey: *stream << MINKEY; break;
- case CType::kMaxKey: *stream << MAXKEY; break;
- case CType::kNullish: *stream << BSONNULL; break;
- case CType::kUndefined: *stream << BSONUndefined; break;
-
- case CType::kBoolTrue: *stream << true; break;
- case CType::kBoolFalse: *stream << false; break;
-
- case CType::kDate:
- *stream << Date_t::fromMillisSinceEpoch(
- endian::bigToNative(readType<uint64_t>(reader, inverted)) ^ (1LL << 63));
- break;
-
- case CType::kTimestamp:
- *stream << Timestamp(endian::bigToNative(readType<uint64_t>(reader, inverted)));
- break;
-
- case CType::kOID:
- if (inverted) {
- char buf[OID::kOIDSize];
- memcpy_flipBits(buf, reader->skip(OID::kOIDSize), OID::kOIDSize);
- *stream << OID::from(buf);
- }
- else {
- *stream << OID::from(reader->skip(OID::kOIDSize));
- }
- break;
-
- case CType::kStringLike: {
- const uint8_t originalType = typeBits->readStringLike();
- if (inverted) {
- if (originalType == TypeBits::kString) {
- *stream << readInvertedCStringWithNuls(reader);
- }
- else {
- dassert(originalType == TypeBits::kSymbol);
- *stream << BSONSymbol(readInvertedCStringWithNuls(reader));
- }
-
- }
- else {
- std::string scratch;
- if (originalType == TypeBits::kString) {
- *stream << readCStringWithNuls(reader, &scratch);
- }
- else {
- dassert(originalType == TypeBits::kSymbol);
- *stream << BSONSymbol(readCStringWithNuls(reader, &scratch));
- }
- }
- break;
+ case CType::kTimestamp:
+ *stream << Timestamp(endian::bigToNative(readType<uint64_t>(reader, inverted)));
+ break;
+
+ case CType::kOID:
+ if (inverted) {
+ char buf[OID::kOIDSize];
+ memcpy_flipBits(buf, reader->skip(OID::kOIDSize), OID::kOIDSize);
+ *stream << OID::from(buf);
+ } else {
+ *stream << OID::from(reader->skip(OID::kOIDSize));
}
+ break;
- case CType::kCode: {
- if (inverted) {
- *stream << BSONCode(readInvertedCStringWithNuls(reader));
+ case CType::kStringLike: {
+ const uint8_t originalType = typeBits->readStringLike();
+ if (inverted) {
+ if (originalType == TypeBits::kString) {
+ *stream << readInvertedCStringWithNuls(reader);
+ } else {
+ dassert(originalType == TypeBits::kSymbol);
+ *stream << BSONSymbol(readInvertedCStringWithNuls(reader));
}
- else {
- std::string scratch;
- *stream << BSONCode(readCStringWithNuls(reader, &scratch));
+
+ } else {
+ std::string scratch;
+ if (originalType == TypeBits::kString) {
+ *stream << readCStringWithNuls(reader, &scratch);
+ } else {
+ dassert(originalType == TypeBits::kSymbol);
+ *stream << BSONSymbol(readCStringWithNuls(reader, &scratch));
}
- break;
}
+ break;
+ }
- case CType::kCodeWithScope: {
+ case CType::kCode: {
+ if (inverted) {
+ *stream << BSONCode(readInvertedCStringWithNuls(reader));
+ } else {
std::string scratch;
- StringData code; // will point to either scratch or the raw encoded bytes.
- if (inverted) {
- scratch = readInvertedCStringWithNuls(reader);
- code = scratch;
- }
- else {
- code = readCStringWithNuls(reader, &scratch);
- }
- // Not going to optimize CodeWScope.
- BSONObjBuilder scope;
- toBson(reader, typeBits, inverted, &scope);
- *stream << BSONCodeWScope(code, scope.done());
- break;
+ *stream << BSONCode(readCStringWithNuls(reader, &scratch));
}
+ break;
+ }
- case CType::kBinData: {
- size_t size = readType<uint8_t>(reader, inverted);
- if (size == 0xff) {
- // size was stored in 4 bytes.
- size = endian::bigToNative(readType<uint32_t>(reader, inverted));
- }
- BinDataType subType = BinDataType(readType<uint8_t>(reader, inverted));
- const void* ptr = reader->skip(size);
- if (!inverted) {
- *stream << BSONBinData(ptr, size, subType);
- }
- else {
- std::unique_ptr<char[]> flipped(new char[size]);
- memcpy_flipBits(flipped.get(), ptr, size);
- *stream << BSONBinData(flipped.get(), size, subType);
- }
- break;
+ case CType::kCodeWithScope: {
+ std::string scratch;
+ StringData code; // will point to either scratch or the raw encoded bytes.
+ if (inverted) {
+ scratch = readInvertedCStringWithNuls(reader);
+ code = scratch;
+ } else {
+ code = readCStringWithNuls(reader, &scratch);
}
+ // Not going to optimize CodeWScope.
+ BSONObjBuilder scope;
+ toBson(reader, typeBits, inverted, &scope);
+ *stream << BSONCodeWScope(code, scope.done());
+ break;
+ }
- case CType::kRegEx: {
- if (inverted) {
- string pattern = readInvertedCString(reader);
- string flags = readInvertedCString(reader);
- *stream << BSONRegEx(pattern, flags);
- }
- else {
- StringData pattern = readCString(reader);
- StringData flags = readCString(reader);
- *stream << BSONRegEx(pattern, flags);
- }
- break;
+ case CType::kBinData: {
+ size_t size = readType<uint8_t>(reader, inverted);
+ if (size == 0xff) {
+ // size was stored in 4 bytes.
+ size = endian::bigToNative(readType<uint32_t>(reader, inverted));
+ }
+ BinDataType subType = BinDataType(readType<uint8_t>(reader, inverted));
+ const void* ptr = reader->skip(size);
+ if (!inverted) {
+ *stream << BSONBinData(ptr, size, subType);
+ } else {
+ std::unique_ptr<char[]> flipped(new char[size]);
+ memcpy_flipBits(flipped.get(), ptr, size);
+ *stream << BSONBinData(flipped.get(), size, subType);
}
+ break;
+ }
- case CType::kDBRef: {
- size_t size = endian::bigToNative(readType<uint32_t>(reader, inverted));
- if (inverted) {
- std::unique_ptr<char[]> ns(new char[size]);
- memcpy_flipBits(ns.get(), reader->skip(size), size);
- char oidBytes[OID::kOIDSize];
- memcpy_flipBits(oidBytes, reader->skip(OID::kOIDSize), OID::kOIDSize);
- OID oid = OID::from(oidBytes);
- *stream << BSONDBRef(StringData(ns.get(), size), oid);
- }
- else {
- const char* ns = static_cast<const char*>(reader->skip(size));
- OID oid = OID::from(reader->skip(OID::kOIDSize));
- *stream << BSONDBRef(StringData(ns, size), oid);
- }
- break;
+ case CType::kRegEx: {
+ if (inverted) {
+ string pattern = readInvertedCString(reader);
+ string flags = readInvertedCString(reader);
+ *stream << BSONRegEx(pattern, flags);
+ } else {
+ StringData pattern = readCString(reader);
+ StringData flags = readCString(reader);
+ *stream << BSONRegEx(pattern, flags);
}
+ break;
+ }
- case CType::kObject: {
- BSONObjBuilder subObj(stream->subobjStart());
- toBson(reader, typeBits, inverted, &subObj);
- break;
+ case CType::kDBRef: {
+ size_t size = endian::bigToNative(readType<uint32_t>(reader, inverted));
+ if (inverted) {
+ std::unique_ptr<char[]> ns(new char[size]);
+ memcpy_flipBits(ns.get(), reader->skip(size), size);
+ char oidBytes[OID::kOIDSize];
+ memcpy_flipBits(oidBytes, reader->skip(OID::kOIDSize), OID::kOIDSize);
+ OID oid = OID::from(oidBytes);
+ *stream << BSONDBRef(StringData(ns.get(), size), oid);
+ } else {
+ const char* ns = static_cast<const char*>(reader->skip(size));
+ OID oid = OID::from(reader->skip(OID::kOIDSize));
+ *stream << BSONDBRef(StringData(ns, size), oid);
}
+ break;
+ }
- case CType::kArray: {
- BSONObjBuilder subArr(stream->subarrayStart());
- int index = 0;
- uint8_t elemType;
- while ((elemType = readType<uint8_t>(reader, inverted)) != 0) {
- toBsonValue(elemType,
- reader,
- typeBits,
- inverted,
- &(subArr << BSONObjBuilder::numStr(index++)));
- }
- break;
+ case CType::kObject: {
+ BSONObjBuilder subObj(stream->subobjStart());
+ toBson(reader, typeBits, inverted, &subObj);
+ break;
+ }
+
+ case CType::kArray: {
+ BSONObjBuilder subArr(stream->subarrayStart());
+ int index = 0;
+ uint8_t elemType;
+ while ((elemType = readType<uint8_t>(reader, inverted)) != 0) {
+ toBsonValue(elemType,
+ reader,
+ typeBits,
+ inverted,
+ &(subArr << BSONObjBuilder::numStr(index++)));
}
+ break;
+ }
- //
- // Numerics
- //
-
- case CType::kNumericNaN:
- invariant(typeBits->readNumeric() == TypeBits::kDouble);
- *stream << std::numeric_limits<double>::quiet_NaN();
- break;
-
- case CType::kNumericZero:
- switch(typeBits->readNumeric()) {
- case TypeBits::kDouble: *stream << 0.0; break;
- case TypeBits::kInt: *stream << 0; break;
- case TypeBits::kLong: *stream << 0ll; break;
- case TypeBits::kNegativeZero: *stream << -0.0; break;
- }
- break;
-
- case CType::kNumericNegativeLargeDouble:
- case CType::kNumericNegativeSmallDouble:
- inverted = !inverted;
- // fallthrough (format is the same as positive, but inverted)
-
- case CType::kNumericPositiveLargeDouble:
- case CType::kNumericPositiveSmallDouble: {
- // for these, the raw double was stored intact, including sign bit.
- const uint8_t originalType = typeBits->readNumeric();
- uint64_t encoded = readType<uint64_t>(reader, inverted);
- encoded = endian::bigToNative(encoded);
- double d;
- memcpy(&d, &encoded, sizeof(d));
-
- if (originalType == TypeBits::kDouble) {
- *stream << d;
- }
- else {
- // This can only happen for a single number.
- invariant(originalType == TypeBits::kLong);
- invariant(d == double(std::numeric_limits<long long>::min()));
- *stream << std::numeric_limits<long long>::min();
- }
+ //
+ // Numerics
+ //
- break;
+ case CType::kNumericNaN:
+ invariant(typeBits->readNumeric() == TypeBits::kDouble);
+ *stream << std::numeric_limits<double>::quiet_NaN();
+ break;
+
+ case CType::kNumericZero:
+ switch (typeBits->readNumeric()) {
+ case TypeBits::kDouble:
+ *stream << 0.0;
+ break;
+ case TypeBits::kInt:
+ *stream << 0;
+ break;
+ case TypeBits::kLong:
+ *stream << 0ll;
+ break;
+ case TypeBits::kNegativeZero:
+ *stream << -0.0;
+ break;
}
+ break;
- case CType::kNumericNegative8ByteInt:
- case CType::kNumericNegative7ByteInt:
- case CType::kNumericNegative6ByteInt:
- case CType::kNumericNegative5ByteInt:
- case CType::kNumericNegative4ByteInt:
- case CType::kNumericNegative3ByteInt:
- case CType::kNumericNegative2ByteInt:
- case CType::kNumericNegative1ByteInt:
- inverted = !inverted;
- isNegative = true;
- // fallthrough (format is the same as positive, but inverted)
-
- case CType::kNumericPositive1ByteInt:
- case CType::kNumericPositive2ByteInt:
- case CType::kNumericPositive3ByteInt:
- case CType::kNumericPositive4ByteInt:
- case CType::kNumericPositive5ByteInt:
- case CType::kNumericPositive6ByteInt:
- case CType::kNumericPositive7ByteInt:
- case CType::kNumericPositive8ByteInt: {
- const uint8_t originalType = typeBits->readNumeric();
-
- uint64_t encodedIntegerPart = 0;
- {
- size_t intBytesRemaining = CType::numBytesForInt(ctype);
- while (intBytesRemaining--) {
- encodedIntegerPart = (encodedIntegerPart << 8)
- | readType<uint8_t>(reader, inverted);
- }
- }
+ case CType::kNumericNegativeLargeDouble:
+ case CType::kNumericNegativeSmallDouble:
+ inverted = !inverted;
+ // fallthrough (format is the same as positive, but inverted)
+
+ case CType::kNumericPositiveLargeDouble:
+ case CType::kNumericPositiveSmallDouble: {
+ // for these, the raw double was stored intact, including sign bit.
+ const uint8_t originalType = typeBits->readNumeric();
+ uint64_t encoded = readType<uint64_t>(reader, inverted);
+ encoded = endian::bigToNative(encoded);
+ double d;
+ memcpy(&d, &encoded, sizeof(d));
+
+ if (originalType == TypeBits::kDouble) {
+ *stream << d;
+ } else {
+ // This can only happen for a single number.
+ invariant(originalType == TypeBits::kLong);
+ invariant(d == double(std::numeric_limits<long long>::min()));
+ *stream << std::numeric_limits<long long>::min();
+ }
- const bool haveFractionalPart = (encodedIntegerPart & 1);
- long long integerPart = encodedIntegerPart >> 1;
+ break;
+ }
- if (!haveFractionalPart) {
- if (isNegative)
- integerPart = -integerPart;
+ case CType::kNumericNegative8ByteInt:
+ case CType::kNumericNegative7ByteInt:
+ case CType::kNumericNegative6ByteInt:
+ case CType::kNumericNegative5ByteInt:
+ case CType::kNumericNegative4ByteInt:
+ case CType::kNumericNegative3ByteInt:
+ case CType::kNumericNegative2ByteInt:
+ case CType::kNumericNegative1ByteInt:
+ inverted = !inverted;
+ isNegative = true;
+ // fallthrough (format is the same as positive, but inverted)
+
+ case CType::kNumericPositive1ByteInt:
+ case CType::kNumericPositive2ByteInt:
+ case CType::kNumericPositive3ByteInt:
+ case CType::kNumericPositive4ByteInt:
+ case CType::kNumericPositive5ByteInt:
+ case CType::kNumericPositive6ByteInt:
+ case CType::kNumericPositive7ByteInt:
+ case CType::kNumericPositive8ByteInt: {
+ const uint8_t originalType = typeBits->readNumeric();
+
+ uint64_t encodedIntegerPart = 0;
+ {
+ size_t intBytesRemaining = CType::numBytesForInt(ctype);
+ while (intBytesRemaining--) {
+ encodedIntegerPart =
+ (encodedIntegerPart << 8) | readType<uint8_t>(reader, inverted);
+ }
+ }
- switch(originalType) {
- case TypeBits::kDouble: *stream << double(integerPart); break;
- case TypeBits::kInt: *stream << int(integerPart); break;
- case TypeBits::kLong: *stream << integerPart; break;
- case TypeBits::kNegativeZero: invariant(false);
- }
+ const bool haveFractionalPart = (encodedIntegerPart & 1);
+ long long integerPart = encodedIntegerPart >> 1;
+
+ if (!haveFractionalPart) {
+ if (isNegative)
+ integerPart = -integerPart;
+
+ switch (originalType) {
+ case TypeBits::kDouble:
+ *stream << double(integerPart);
+ break;
+ case TypeBits::kInt:
+ *stream << int(integerPart);
+ break;
+ case TypeBits::kLong:
+ *stream << integerPart;
+ break;
+ case TypeBits::kNegativeZero:
+ invariant(false);
+ }
+ } else {
+ // Nothing else can have a fractional part.
+ invariant(originalType == TypeBits::kDouble);
+
+ const uint64_t exponent = (64 - countLeadingZeros64(integerPart)) - 1;
+ const size_t fractionalBits = (52 - exponent);
+ const size_t fractionalBytes = (fractionalBits + 7) / 8;
+
+ // build up the bits of a double here.
+ uint64_t doubleBits = integerPart << fractionalBits;
+ doubleBits &= ~(1ull << 52); // clear implicit leading 1
+ doubleBits |= (exponent + 1023 /*bias*/) << 52;
+ if (isNegative) {
+ doubleBits |= (1ull << 63); // sign bit
}
- else {
- // Nothing else can have a fractional part.
- invariant(originalType == TypeBits::kDouble);
-
- const uint64_t exponent = (64 - countLeadingZeros64(integerPart)) - 1;
- const size_t fractionalBits = (52 - exponent);
- const size_t fractionalBytes = (fractionalBits + 7) / 8;
-
- // build up the bits of a double here.
- uint64_t doubleBits = integerPart << fractionalBits;
- doubleBits &= ~(1ull << 52); // clear implicit leading 1
- doubleBits |= (exponent + 1023/*bias*/) << 52;
- if (isNegative) {
- doubleBits |= (1ull << 63); // sign bit
- }
- for (size_t i = 0; i < fractionalBytes; i++) {
- // fold in the fractional bytes
- const uint64_t byte = readType<uint8_t>(reader, inverted);
- doubleBits |= (byte << ((fractionalBytes - i - 1) * 8));
- }
-
- double number;
- memcpy(&number, &doubleBits, sizeof(number));
- *stream << number;
+ for (size_t i = 0; i < fractionalBytes; i++) {
+ // fold in the fractional bytes
+ const uint64_t byte = readType<uint8_t>(reader, inverted);
+ doubleBits |= (byte << ((fractionalBytes - i - 1) * 8));
}
- break;
- }
- default: invariant(false);
- }
- }
- } // namespace
-
- BSONObj KeyString::toBson(const char* buffer, size_t len, Ordering ord,
- const TypeBits& typeBits) {
- BSONObjBuilder builder;
- BufReader reader(buffer, len);
- TypeBits::Reader typeBitsReader(typeBits);
- for (int i = 0; reader.remaining(); i++) {
- const bool invert = (ord.get(i) == -1);
- uint8_t ctype = readType<uint8_t>(&reader, invert);
- if (ctype == kLess || ctype == kGreater) {
- // This was just a discriminator which is logically part of the previous field. This
- // will only be encountered on queries, not in the keys stored in an index.
- // Note: this should probably affect the BSON key name of the last field, but it
- // must be read *after* the value so it isn't possible.
- ctype = readType<uint8_t>(&reader, invert);
+ double number;
+ memcpy(&number, &doubleBits, sizeof(number));
+ *stream << number;
}
- if (ctype == kEnd)
- break;
- toBsonValue(ctype, &reader, &typeBitsReader, invert, &(builder << ""));
+ break;
}
- return builder.obj();
+ default:
+ invariant(false);
}
+}
+} // namespace
+
+BSONObj KeyString::toBson(const char* buffer, size_t len, Ordering ord, const TypeBits& typeBits) {
+ BSONObjBuilder builder;
+ BufReader reader(buffer, len);
+ TypeBits::Reader typeBitsReader(typeBits);
+ for (int i = 0; reader.remaining(); i++) {
+ const bool invert = (ord.get(i) == -1);
+ uint8_t ctype = readType<uint8_t>(&reader, invert);
+ if (ctype == kLess || ctype == kGreater) {
+ // This was just a discriminator which is logically part of the previous field. This
+ // will only be encountered on queries, not in the keys stored in an index.
+ // Note: this should probably affect the BSON key name of the last field, but it
+ // must be read *after* the value so it isn't possible.
+ ctype = readType<uint8_t>(&reader, invert);
+ }
- BSONObj KeyString::toBson(StringData data, Ordering ord, const TypeBits& typeBits) {
- return toBson(data.rawData(), data.size(), ord, typeBits);
+ if (ctype == kEnd)
+ break;
+ toBsonValue(ctype, &reader, &typeBitsReader, invert, &(builder << ""));
}
-
- RecordId KeyString::decodeRecordIdAtEnd(const void* bufferRaw, size_t bufSize) {
- invariant(bufSize >= 2); // smallest possible encoding of a RecordId.
- const unsigned char* buffer = static_cast<const unsigned char*>(bufferRaw);
- const unsigned char lastByte = *(buffer + bufSize - 1);
- const size_t ridSize = 2 + (lastByte & 0x7); // stored in low 3 bits.
- invariant(bufSize >= ridSize);
- const unsigned char* firstBytePtr = buffer + bufSize - ridSize;
- BufReader reader(firstBytePtr, ridSize);
- return decodeRecordId(&reader);
+ return builder.obj();
+}
+
+BSONObj KeyString::toBson(StringData data, Ordering ord, const TypeBits& typeBits) {
+ return toBson(data.rawData(), data.size(), ord, typeBits);
+}
+
+RecordId KeyString::decodeRecordIdAtEnd(const void* bufferRaw, size_t bufSize) {
+ invariant(bufSize >= 2); // smallest possible encoding of a RecordId.
+ const unsigned char* buffer = static_cast<const unsigned char*>(bufferRaw);
+ const unsigned char lastByte = *(buffer + bufSize - 1);
+ const size_t ridSize = 2 + (lastByte & 0x7); // stored in low 3 bits.
+ invariant(bufSize >= ridSize);
+ const unsigned char* firstBytePtr = buffer + bufSize - ridSize;
+ BufReader reader(firstBytePtr, ridSize);
+ return decodeRecordId(&reader);
+}
+
+RecordId KeyString::decodeRecordId(BufReader* reader) {
+ const uint8_t firstByte = readType<uint8_t>(reader, false);
+ const uint8_t numExtraBytes = firstByte >> 5; // high 3 bits in firstByte
+ uint64_t repr = firstByte & 0x1f; // low 5 bits in firstByte
+ for (int i = 0; i < numExtraBytes; i++) {
+ repr = (repr << 8) | readType<uint8_t>(reader, false);
}
- RecordId KeyString::decodeRecordId(BufReader* reader) {
- const uint8_t firstByte = readType<uint8_t>(reader, false);
- const uint8_t numExtraBytes = firstByte >> 5; // high 3 bits in firstByte
- uint64_t repr = firstByte & 0x1f; // low 5 bits in firstByte
- for (int i = 0; i < numExtraBytes; i++) {
- repr = (repr << 8) | readType<uint8_t>(reader, false);
- }
+ const uint8_t lastByte = readType<uint8_t>(reader, false);
+ invariant((lastByte & 0x7) == numExtraBytes);
+ repr = (repr << 5) | (lastByte >> 3); // fold in high 5 bits of last byte
+ return RecordId(repr);
+}
- const uint8_t lastByte = readType<uint8_t>(reader, false);
- invariant((lastByte & 0x7) == numExtraBytes);
- repr = (repr << 5) | (lastByte >> 3); // fold in high 5 bits of last byte
- return RecordId(repr);
- }
+// ----------------------------------------------------------------------
+// --------- MISC class utils --------
+// ----------------------------------------------------------------------
- // ----------------------------------------------------------------------
- // --------- MISC class utils --------
- // ----------------------------------------------------------------------
+std::string KeyString::toString() const {
+ return toHex(getBuffer(), getSize());
+}
- std::string KeyString::toString() const {
- return toHex(getBuffer(), getSize());
- }
+int KeyString::compare(const KeyString& other) const {
+ int a = getSize();
+ int b = other.getSize();
- int KeyString::compare(const KeyString& other) const {
- int a = getSize();
- int b = other.getSize();
+ int min = std::min(a, b);
- int min = std::min(a, b);
+ int cmp = memcmp(getBuffer(), other.getBuffer(), min);
- int cmp = memcmp(getBuffer(), other.getBuffer(), min);
+ if (cmp) {
+ if (cmp < 0)
+ return -1;
+ return 1;
+ }
- if (cmp) {
- if (cmp < 0)
- return -1;
- return 1;
- }
+ // keys match
- // keys match
+ if (a == b)
+ return 0;
- if (a == b)
- return 0;
+ return a < b ? -1 : 1;
+}
- return a < b ? -1 : 1;
+void KeyString::TypeBits::resetFromBuffer(BufReader* reader) {
+ if (!reader->remaining()) {
+ // This means AllZeros state was encoded as an empty buffer.
+ reset();
+ return;
}
-
- void KeyString::TypeBits::resetFromBuffer(BufReader* reader) {
- if (!reader->remaining()) {
- // This means AllZeros state was encoded as an empty buffer.
- reset();
- return;
- }
- const uint8_t firstByte = readType<uint8_t>(reader, false);
- if (firstByte & 0x80) {
- // firstByte is the size byte.
- _isAllZeros = false; // it wouldn't be encoded like this if it was.
+ const uint8_t firstByte = readType<uint8_t>(reader, false);
+ if (firstByte & 0x80) {
+ // firstByte is the size byte.
+ _isAllZeros = false; // it wouldn't be encoded like this if it was.
- _buf[0] = firstByte;
- const uint8_t remainingBytes = getSizeByte();
- memcpy(_buf + 1, reader->skip(remainingBytes), remainingBytes);
- return;
- }
+ _buf[0] = firstByte;
+ const uint8_t remainingBytes = getSizeByte();
+ memcpy(_buf + 1, reader->skip(remainingBytes), remainingBytes);
+ return;
+ }
- // In remaining cases, firstByte is the only byte.
+ // In remaining cases, firstByte is the only byte.
- if (firstByte == 0) {
- // This means AllZeros state was encoded as a single 0 byte.
- reset();
- return;
- }
-
- _isAllZeros = false;
- setSizeByte(1);
- _buf[1] = firstByte;
+ if (firstByte == 0) {
+ // This means AllZeros state was encoded as a single 0 byte.
+ reset();
+ return;
}
- void KeyString::TypeBits::appendBit(uint8_t oneOrZero) {
- dassert(oneOrZero == 0 || oneOrZero == 1);
+ _isAllZeros = false;
+ setSizeByte(1);
+ _buf[1] = firstByte;
+}
- if (oneOrZero == 1) _isAllZeros = false;
+void KeyString::TypeBits::appendBit(uint8_t oneOrZero) {
+ dassert(oneOrZero == 0 || oneOrZero == 1);
- const uint8_t byte = (_curBit / 8) + 1;
- const uint8_t offsetInByte = _curBit % 8;
- if (offsetInByte == 0) {
- setSizeByte(byte);
- _buf[byte] = oneOrZero; // zeros bits 1-7
- }
- else {
- _buf[byte] |= (oneOrZero << offsetInByte);
- }
-
- _curBit++;
+ if (oneOrZero == 1)
+ _isAllZeros = false;
+
+ const uint8_t byte = (_curBit / 8) + 1;
+ const uint8_t offsetInByte = _curBit % 8;
+ if (offsetInByte == 0) {
+ setSizeByte(byte);
+ _buf[byte] = oneOrZero; // zeros bits 1-7
+ } else {
+ _buf[byte] |= (oneOrZero << offsetInByte);
}
- uint8_t KeyString::TypeBits::Reader::readBit() {
- if (_typeBits._isAllZeros) return 0;
+ _curBit++;
+}
- const uint8_t byte = (_curBit / 8) + 1;
- const uint8_t offsetInByte = _curBit % 8;
- _curBit++;
+uint8_t KeyString::TypeBits::Reader::readBit() {
+ if (_typeBits._isAllZeros)
+ return 0;
- dassert(byte <= _typeBits.getSizeByte());
+ const uint8_t byte = (_curBit / 8) + 1;
+ const uint8_t offsetInByte = _curBit % 8;
+ _curBit++;
- return (_typeBits._buf[byte] & (1 << offsetInByte)) ? 1 : 0;
- }
+ dassert(byte <= _typeBits.getSizeByte());
+
+ return (_typeBits._buf[byte] & (1 << offsetInByte)) ? 1 : 0;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/key_string.h b/src/mongo/db/storage/key_string.h
index 04b341521bf..7ef8ee0e723 100644
--- a/src/mongo/db/storage/key_string.h
+++ b/src/mongo/db/storage/key_string.h
@@ -39,287 +39,315 @@
namespace mongo {
- class KeyString {
+class KeyString {
+public:
+ /**
+ * Encodes info needed to restore the original BSONTypes from a KeyString. They cannot be
+ * stored in place since we don't want them to affect the ordering (1 and 1.0 compare as
+ * equal).
+ */
+ class TypeBits {
public:
+ // Sufficient bytes to encode extra type information for any BSON key that fits in 1KB.
+ // The encoding format will need to change if we raise this limit.
+ static const uint8_t kMaxBytesNeeded = 127;
+
+ TypeBits() {
+ reset();
+ }
/**
- * Encodes info needed to restore the original BSONTypes from a KeyString. They cannot be
- * stored in place since we don't want them to affect the ordering (1 and 1.0 compare as
- * equal).
+ * If there are no bytes remaining, assumes AllZeros. Otherwise, reads bytes out of the
+ * BufReader in the format described on the getBuffer() method.
*/
- class TypeBits {
- public:
- // Sufficient bytes to encode extra type information for any BSON key that fits in 1KB.
- // The encoding format will need to change if we raise this limit.
- static const uint8_t kMaxBytesNeeded = 127;
-
- TypeBits() { reset(); }
-
- /**
- * If there are no bytes remaining, assumes AllZeros. Otherwise, reads bytes out of the
- * BufReader in the format described on the getBuffer() method.
- */
- void resetFromBuffer(BufReader* reader);
- static TypeBits fromBuffer(BufReader* reader) {
- TypeBits out;
- out.resetFromBuffer(reader);
- return out;
- }
-
- /**
- * If true, no bits have been set to one. This is true if no bits have been set at all.
- */
- bool isAllZeros() const { return _isAllZeros; }
-
- /**
- * These methods return a buffer and size which encodes all of the type bits in this
- * instance.
- *
- * Encoded format:
- * Case 1 (first byte has high bit set to 1):
- * Remaining bits of first byte encode number of follow-up bytes that are data
- * bytes. Note that _buf is always maintained in this format but these methods may
- * return one of the other formats, if possible, by skipping over the first byte.
- *
- * Case 2 (first byte is 0x0):
- * This encodes the "AllZeros" state which represents an infinite stream of bits set
- * to 0. Callers may optionally encode this case as an empty buffer if they have
- * another way to mark the end of the buffer. There are no follow-up bytes.
- *
- * Case 3 (first byte isn't 0x0 but has high bit set to 0):
- * The first byte is the only data byte. This can represent any 7-bit sequence or an
- * 8-bit sequence if the 8th bit is 0, since the 8th bit is the same as the bit that
- * is 1 if the first byte is the size byte. There are no follow-up bytes.
- *
- * Within data bytes (ie everything excluding the size byte if there is one), bits are
- * packed in from low to high.
- */
- const uint8_t* getBuffer() const { return getSize() == 1 ? _buf + 1 : _buf; }
- size_t getSize() const {
- if (_isAllZeros) { // Case 2
- dassert(_buf[1] == 0);
- return 1;
- }
-
- uint8_t rawSize = getSizeByte();
- dassert(rawSize >= 1); // 0 should be handled as isAllZeros.
- if (rawSize == 1 && !(_buf[1] & 0x80)) { // Case 3
- return 1;
- }
-
- return rawSize + 1; // Case 1
- }
-
- //
- // Everything below is only for use by KeyString.
- //
-
- // Note: No space is used if all bits are 0 so the most common cases should be 0x0.
- static const uint8_t kString = 0x0;
- static const uint8_t kSymbol = 0x1;
-
- static const uint8_t kInt = 0x0;
- static const uint8_t kDouble = 0x1;
- static const uint8_t kLong = 0x2;
- static const uint8_t kNegativeZero = 0x3; // decodes as a double
-
- void reset() {
- _curBit = 0;
- _isAllZeros = true;
- setSizeByte(0);
- _buf[1] = 0;
- }
+ void resetFromBuffer(BufReader* reader);
+ static TypeBits fromBuffer(BufReader* reader) {
+ TypeBits out;
+ out.resetFromBuffer(reader);
+ return out;
+ }
- void appendString() { appendBit(kString); }
- void appendSymbol() { appendBit(kSymbol); }
+ /**
+ * If true, no bits have been set to one. This is true if no bits have been set at all.
+ */
+ bool isAllZeros() const {
+ return _isAllZeros;
+ }
- void appendNumberDouble() { appendBit(kDouble & 1); appendBit(kDouble >> 1); }
- void appendNumberInt() { appendBit(kInt & 1); appendBit(kInt >> 1); }
- void appendNumberLong() { appendBit(kLong & 1); appendBit(kLong >> 1); }
- void appendNegativeZero() {
- appendBit(kNegativeZero & 1);
- appendBit(kNegativeZero >> 1);
+ /**
+ * These methods return a buffer and size which encodes all of the type bits in this
+ * instance.
+ *
+ * Encoded format:
+ * Case 1 (first byte has high bit set to 1):
+ * Remaining bits of first byte encode number of follow-up bytes that are data
+ * bytes. Note that _buf is always maintained in this format but these methods may
+ * return one of the other formats, if possible, by skipping over the first byte.
+ *
+ * Case 2 (first byte is 0x0):
+ * This encodes the "AllZeros" state which represents an infinite stream of bits set
+ * to 0. Callers may optionally encode this case as an empty buffer if they have
+ * another way to mark the end of the buffer. There are no follow-up bytes.
+ *
+ * Case 3 (first byte isn't 0x0 but has high bit set to 0):
+ * The first byte is the only data byte. This can represent any 7-bit sequence or an
+ * 8-bit sequence if the 8th bit is 0, since the 8th bit is the same as the bit that
+ * is 1 if the first byte is the size byte. There are no follow-up bytes.
+ *
+ * Within data bytes (ie everything excluding the size byte if there is one), bits are
+ * packed in from low to high.
+ */
+ const uint8_t* getBuffer() const {
+ return getSize() == 1 ? _buf + 1 : _buf;
+ }
+ size_t getSize() const {
+ if (_isAllZeros) { // Case 2
+ dassert(_buf[1] == 0);
+ return 1;
}
- class Reader {
- public:
- /**
- * Passed in TypeBits must outlive this Reader instance.
- */
- explicit Reader(const TypeBits& typeBits) : _curBit(0), _typeBits(typeBits) {}
-
- uint8_t readStringLike() { return readBit(); }
- uint8_t readNumeric() {
- uint8_t lowBit = readBit();
- return lowBit | (readBit() << 1);
- }
-
- private:
- uint8_t readBit();
-
- size_t _curBit;
- const TypeBits& _typeBits;
- };
-
- private:
- /**
- * size only includes data bytes, not the size byte itself.
- */
- uint8_t getSizeByte() const { return _buf[0] & 0x3f; }
- void setSizeByte(uint8_t size) {
- dassert(size < kMaxBytesNeeded);
- _buf[0] = 0x80 | size;
+ uint8_t rawSize = getSizeByte();
+ dassert(rawSize >= 1); // 0 should be handled as isAllZeros.
+ if (rawSize == 1 && !(_buf[1] & 0x80)) { // Case 3
+ return 1;
}
- void appendBit(uint8_t oneOrZero);
-
- size_t _curBit;
- bool _isAllZeros;
+ return rawSize + 1; // Case 1
+ }
- // See getBuffer()/getSize() documentation for a description of how data is encoded.
- // Currently whole buffer is copied in default copy methods. If they ever show up as hot
- // in profiling, we should add copy operations that only copy the parts of _buf that are
- // in use.
- uint8_t _buf[1/*size*/ + kMaxBytesNeeded];
- };
+ //
+ // Everything below is only for use by KeyString.
+ //
- enum Discriminator {
- kInclusive, // Anything to be stored in an index must use this.
- kExclusiveBefore,
- kExclusiveAfter,
- };
+ // Note: No space is used if all bits are 0 so the most common cases should be 0x0.
+ static const uint8_t kString = 0x0;
+ static const uint8_t kSymbol = 0x1;
- KeyString() {}
+ static const uint8_t kInt = 0x0;
+ static const uint8_t kDouble = 0x1;
+ static const uint8_t kLong = 0x2;
+ static const uint8_t kNegativeZero = 0x3; // decodes as a double
- KeyString(const BSONObj& obj, Ordering ord, RecordId recordId) {
- resetToKey(obj, ord, recordId);
+ void reset() {
+ _curBit = 0;
+ _isAllZeros = true;
+ setSizeByte(0);
+ _buf[1] = 0;
}
- KeyString(const BSONObj& obj, Ordering ord, Discriminator discriminator = kInclusive) {
- resetToKey(obj, ord, discriminator);
+ void appendString() {
+ appendBit(kString);
+ }
+ void appendSymbol() {
+ appendBit(kSymbol);
}
- explicit KeyString(RecordId rid) {
- appendRecordId(rid);
+ void appendNumberDouble() {
+ appendBit(kDouble & 1);
+ appendBit(kDouble >> 1);
+ }
+ void appendNumberInt() {
+ appendBit(kInt & 1);
+ appendBit(kInt >> 1);
+ }
+ void appendNumberLong() {
+ appendBit(kLong & 1);
+ appendBit(kLong >> 1);
+ }
+ void appendNegativeZero() {
+ appendBit(kNegativeZero & 1);
+ appendBit(kNegativeZero >> 1);
}
- static BSONObj toBson(StringData data, Ordering ord, const TypeBits& types);
- static BSONObj toBson(const char* buffer, size_t len, Ordering ord,
- const TypeBits& types);
+ class Reader {
+ public:
+ /**
+ * Passed in TypeBits must outlive this Reader instance.
+ */
+ explicit Reader(const TypeBits& typeBits) : _curBit(0), _typeBits(typeBits) {}
- /**
- * Decodes a RecordId from the end of a buffer.
- */
- static RecordId decodeRecordIdAtEnd(const void* buf, size_t size);
+ uint8_t readStringLike() {
+ return readBit();
+ }
+ uint8_t readNumeric() {
+ uint8_t lowBit = readBit();
+ return lowBit | (readBit() << 1);
+ }
- /**
- * Decodes a RecordId, consuming all bytes needed from reader.
- */
- static RecordId decodeRecordId(BufReader* reader);
+ private:
+ uint8_t readBit();
- void appendRecordId(RecordId loc);
- void appendTypeBits(const TypeBits& bits);
+ size_t _curBit;
+ const TypeBits& _typeBits;
+ };
+ private:
/**
- * Resets to an empty state.
- * Equivalent to but faster than *this = KeyString()
+ * size only includes data bytes, not the size byte itself.
*/
- void resetToEmpty() {
- _buffer.reset();
- _typeBits.reset();
+ uint8_t getSizeByte() const {
+ return _buf[0] & 0x3f;
}
-
- void resetToKey(const BSONObj& obj, Ordering ord, RecordId recordId);
- void resetToKey(const BSONObj& obj, Ordering ord, Discriminator discriminator = kInclusive);
- void resetFromBuffer(const void* buffer, size_t size) {
- _buffer.reset();
- memcpy(_buffer.skip(size), buffer, size);
+ void setSizeByte(uint8_t size) {
+ dassert(size < kMaxBytesNeeded);
+ _buf[0] = 0x80 | size;
}
- const char* getBuffer() const { return _buffer.buf(); }
- size_t getSize() const { return _buffer.len(); }
- bool isEmpty() const { return _buffer.len() == 0; }
-
- const TypeBits& getTypeBits() const { return _typeBits; }
+ void appendBit(uint8_t oneOrZero);
- int compare(const KeyString& other) const;
+ size_t _curBit;
+ bool _isAllZeros;
- /**
- * @return a hex encoding of this key
- */
- std::string toString() const;
-
- private:
-
- void _appendAllElementsForIndexing(const BSONObj& obj, Ordering ord,
- Discriminator discriminator);
-
- void _appendBool(bool val, bool invert);
- void _appendDate(Date_t val, bool invert);
- void _appendTimestamp(Timestamp val, bool invert);
- void _appendOID(OID val, bool invert);
- void _appendString(StringData val, bool invert);
- void _appendSymbol(StringData val, bool invert);
- void _appendCode(StringData val, bool invert);
- void _appendCodeWString(const BSONCodeWScope& val, bool invert);
- void _appendBinData(const BSONBinData& val, bool invert);
- void _appendRegex(const BSONRegEx& val, bool invert);
- void _appendDBRef(const BSONDBRef& val, bool invert);
- void _appendArray(const BSONArray& val, bool invert);
- void _appendObject(const BSONObj& val, bool invert);
- void _appendNumberDouble(const double num, bool invert);
- void _appendNumberLong(const long long num, bool invert);
- void _appendNumberInt(const int num, bool invert);
+ // See getBuffer()/getSize() documentation for a description of how data is encoded.
+ // Currently whole buffer is copied in default copy methods. If they ever show up as hot
+ // in profiling, we should add copy operations that only copy the parts of _buf that are
+ // in use.
+ uint8_t _buf[1 /*size*/ + kMaxBytesNeeded];
+ };
- /**
- * @param name - optional, can be NULL
- * if NULL, not included in encoding
- * if not NULL, put in after type, before value
- */
- void _appendBsonValue(const BSONElement& elem,
- bool invert,
- const StringData* name);
-
- void _appendStringLike(StringData str, bool invert);
- void _appendBson(const BSONObj& obj, bool invert);
- void _appendSmallDouble(double value, bool invert);
- void _appendLargeDouble(double value, bool invert);
- void _appendInteger(const long long num, bool invert);
- void _appendPreshiftedIntegerPortion(uint64_t value, bool isNegative, bool invert);
-
- template <typename T> void _append(const T& thing, bool invert);
- void _appendBytes(const void* source, size_t bytes, bool invert);
-
- TypeBits _typeBits;
- StackBufBuilder _buffer;
+ enum Discriminator {
+ kInclusive, // Anything to be stored in an index must use this.
+ kExclusiveBefore,
+ kExclusiveAfter,
};
- inline bool operator<(const KeyString& lhs, const KeyString& rhs) {
- return lhs.compare(rhs) < 0;
+ KeyString() {}
+
+ KeyString(const BSONObj& obj, Ordering ord, RecordId recordId) {
+ resetToKey(obj, ord, recordId);
}
- inline bool operator<=(const KeyString& lhs, const KeyString& rhs) {
- return lhs.compare(rhs) <= 0;
+ KeyString(const BSONObj& obj, Ordering ord, Discriminator discriminator = kInclusive) {
+ resetToKey(obj, ord, discriminator);
}
- inline bool operator==(const KeyString& lhs, const KeyString& rhs) {
- return lhs.compare(rhs) == 0;
+ explicit KeyString(RecordId rid) {
+ appendRecordId(rid);
}
- inline bool operator>(const KeyString& lhs, const KeyString& rhs) {
- return lhs.compare(rhs) > 0;
+ static BSONObj toBson(StringData data, Ordering ord, const TypeBits& types);
+ static BSONObj toBson(const char* buffer, size_t len, Ordering ord, const TypeBits& types);
+
+ /**
+ * Decodes a RecordId from the end of a buffer.
+ */
+ static RecordId decodeRecordIdAtEnd(const void* buf, size_t size);
+
+ /**
+ * Decodes a RecordId, consuming all bytes needed from reader.
+ */
+ static RecordId decodeRecordId(BufReader* reader);
+
+ void appendRecordId(RecordId loc);
+ void appendTypeBits(const TypeBits& bits);
+
+ /**
+ * Resets to an empty state.
+ * Equivalent to but faster than *this = KeyString()
+ */
+ void resetToEmpty() {
+ _buffer.reset();
+ _typeBits.reset();
}
- inline bool operator>=(const KeyString& lhs, const KeyString& rhs) {
- return lhs.compare(rhs) >= 0;
+ void resetToKey(const BSONObj& obj, Ordering ord, RecordId recordId);
+ void resetToKey(const BSONObj& obj, Ordering ord, Discriminator discriminator = kInclusive);
+ void resetFromBuffer(const void* buffer, size_t size) {
+ _buffer.reset();
+ memcpy(_buffer.skip(size), buffer, size);
}
- inline bool operator!=(const KeyString& lhs, const KeyString& rhs) {
- return !(lhs == rhs);
+ const char* getBuffer() const {
+ return _buffer.buf();
+ }
+ size_t getSize() const {
+ return _buffer.len();
+ }
+ bool isEmpty() const {
+ return _buffer.len() == 0;
}
- inline std::ostream& operator<<(std::ostream& stream, const KeyString& value) {
- return stream << value.toString();
+ const TypeBits& getTypeBits() const {
+ return _typeBits;
}
-} // namespace mongo
+ int compare(const KeyString& other) const;
+
+ /**
+ * @return a hex encoding of this key
+ */
+ std::string toString() const;
+
+private:
+ void _appendAllElementsForIndexing(const BSONObj& obj,
+ Ordering ord,
+ Discriminator discriminator);
+
+ void _appendBool(bool val, bool invert);
+ void _appendDate(Date_t val, bool invert);
+ void _appendTimestamp(Timestamp val, bool invert);
+ void _appendOID(OID val, bool invert);
+ void _appendString(StringData val, bool invert);
+ void _appendSymbol(StringData val, bool invert);
+ void _appendCode(StringData val, bool invert);
+ void _appendCodeWString(const BSONCodeWScope& val, bool invert);
+ void _appendBinData(const BSONBinData& val, bool invert);
+ void _appendRegex(const BSONRegEx& val, bool invert);
+ void _appendDBRef(const BSONDBRef& val, bool invert);
+ void _appendArray(const BSONArray& val, bool invert);
+ void _appendObject(const BSONObj& val, bool invert);
+ void _appendNumberDouble(const double num, bool invert);
+ void _appendNumberLong(const long long num, bool invert);
+ void _appendNumberInt(const int num, bool invert);
+
+ /**
+ * @param name - optional, can be NULL
+ * if NULL, not included in encoding
+ * if not NULL, put in after type, before value
+ */
+ void _appendBsonValue(const BSONElement& elem, bool invert, const StringData* name);
+
+ void _appendStringLike(StringData str, bool invert);
+ void _appendBson(const BSONObj& obj, bool invert);
+ void _appendSmallDouble(double value, bool invert);
+ void _appendLargeDouble(double value, bool invert);
+ void _appendInteger(const long long num, bool invert);
+ void _appendPreshiftedIntegerPortion(uint64_t value, bool isNegative, bool invert);
+
+ template <typename T>
+ void _append(const T& thing, bool invert);
+ void _appendBytes(const void* source, size_t bytes, bool invert);
+
+ TypeBits _typeBits;
+ StackBufBuilder _buffer;
+};
+
+inline bool operator<(const KeyString& lhs, const KeyString& rhs) {
+ return lhs.compare(rhs) < 0;
+}
+
+inline bool operator<=(const KeyString& lhs, const KeyString& rhs) {
+ return lhs.compare(rhs) <= 0;
+}
+
+inline bool operator==(const KeyString& lhs, const KeyString& rhs) {
+ return lhs.compare(rhs) == 0;
+}
+
+inline bool operator>(const KeyString& lhs, const KeyString& rhs) {
+ return lhs.compare(rhs) > 0;
+}
+
+inline bool operator>=(const KeyString& lhs, const KeyString& rhs) {
+ return lhs.compare(rhs) >= 0;
+}
+
+inline bool operator!=(const KeyString& lhs, const KeyString& rhs) {
+ return !(lhs == rhs);
+}
+
+inline std::ostream& operator<<(std::ostream& stream, const KeyString& value) {
+ return stream << value.toString();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/key_string_test.cpp b/src/mongo/db/storage/key_string_test.cpp
index e085be8fbb9..082d01366a9 100644
--- a/src/mongo/db/storage/key_string_test.cpp
+++ b/src/mongo/db/storage/key_string_test.cpp
@@ -61,61 +61,60 @@ TEST(KeyStringTest, Simple1) {
KeyString(b, ALL_ASCENDING, RecordId()));
}
-#define ROUNDTRIP_ORDER(x, order) do { \
- const BSONObj _orig = x; \
- const KeyString _ks(_orig, order); \
- const BSONObj _converted = toBson(_ks, order); \
- ASSERT_EQ(_converted, _orig); \
- ASSERT(_converted.binaryEqual(_orig)); \
+#define ROUNDTRIP_ORDER(x, order) \
+ do { \
+ const BSONObj _orig = x; \
+ const KeyString _ks(_orig, order); \
+ const BSONObj _converted = toBson(_ks, order); \
+ ASSERT_EQ(_converted, _orig); \
+ ASSERT(_converted.binaryEqual(_orig)); \
} while (0)
-#define ROUNDTRIP(x) do { \
- ROUNDTRIP_ORDER(x, ALL_ASCENDING); \
- ROUNDTRIP_ORDER(x, ONE_DESCENDING); \
+#define ROUNDTRIP(x) \
+ do { \
+ ROUNDTRIP_ORDER(x, ALL_ASCENDING); \
+ ROUNDTRIP_ORDER(x, ONE_DESCENDING); \
} while (0)
-#define COMPARES_SAME(_x,_y) do { \
- KeyString _xKS(_x, ONE_ASCENDING); \
- KeyString _yKS(_y, ONE_ASCENDING); \
- if (_x == _y) { \
- ASSERT_EQUALS(_xKS, _yKS); \
- } \
- else if (_x < _y) { \
- ASSERT_LESS_THAN(_xKS, _yKS); \
- } \
- else { \
- ASSERT_LESS_THAN(_yKS, _xKS); \
- } \
- \
- _xKS.resetToKey(_x, ONE_DESCENDING); \
- _yKS.resetToKey(_y, ONE_DESCENDING); \
- if (_x == _y) { \
- ASSERT_EQUALS(_xKS, _yKS); \
- } \
- else if (_x < _y) { \
- ASSERT_GREATER_THAN(_xKS, _yKS); \
- } \
- else { \
- ASSERT_GREATER_THAN(_yKS, _xKS); \
- } \
+#define COMPARES_SAME(_x, _y) \
+ do { \
+ KeyString _xKS(_x, ONE_ASCENDING); \
+ KeyString _yKS(_y, ONE_ASCENDING); \
+ if (_x == _y) { \
+ ASSERT_EQUALS(_xKS, _yKS); \
+ } else if (_x < _y) { \
+ ASSERT_LESS_THAN(_xKS, _yKS); \
+ } else { \
+ ASSERT_LESS_THAN(_yKS, _xKS); \
+ } \
+ \
+ _xKS.resetToKey(_x, ONE_DESCENDING); \
+ _yKS.resetToKey(_y, ONE_DESCENDING); \
+ if (_x == _y) { \
+ ASSERT_EQUALS(_xKS, _yKS); \
+ } else if (_x < _y) { \
+ ASSERT_GREATER_THAN(_xKS, _yKS); \
+ } else { \
+ ASSERT_GREATER_THAN(_yKS, _xKS); \
+ } \
} while (0)
TEST(KeyStringTest, ActualBytesDouble) {
// just one test like this for utter sanity
- BSONObj a = BSON("" << 5.5 );
+ BSONObj a = BSON("" << 5.5);
KeyString ks(a, ALL_ASCENDING);
log() << "size: " << ks.getSize() << " hex [" << toHex(ks.getBuffer(), ks.getSize()) << "]";
ASSERT_EQUALS(10U, ks.getSize());
- string hex = "2B" // kNumericPositive1ByteInt
- "0B" // (5 << 1) | 1
- "02000000000000" // fractional bytes of double
- "04"; // kEnd
+ string hex =
+ "2B" // kNumericPositive1ByteInt
+ "0B" // (5 << 1) | 1
+ "02000000000000" // fractional bytes of double
+ "04"; // kEnd
- ASSERT_EQUALS(hex,
- toHex(ks.getBuffer(), ks.getSize()));
+ ASSERT_EQUALS(hex, toHex(ks.getBuffer(), ks.getSize()));
ks.resetToKey(a, Ordering::make(BSON("a" << -1)));
@@ -124,23 +123,23 @@ TEST(KeyStringTest, ActualBytesDouble) {
// last byte (kEnd) doesn't get flipped
string hexFlipped;
- for ( size_t i = 0; i < hex.size()-2; i += 2 ) {
+ for (size_t i = 0; i < hex.size() - 2; i += 2) {
char c = fromHex(hex.c_str() + i);
c = ~c;
hexFlipped += toHex(&c, 1);
}
- hexFlipped += hex.substr(hex.size()-2);
+ hexFlipped += hex.substr(hex.size() - 2);
- ASSERT_EQUALS(hexFlipped,
- toHex(ks.getBuffer(), ks.getSize()));
+ ASSERT_EQUALS(hexFlipped, toHex(ks.getBuffer(), ks.getSize()));
}
TEST(KeyStringTest, AllTypesSimple) {
ROUNDTRIP(BSON("" << 5.5));
- ROUNDTRIP(BSON("" << "abc"));
+ ROUNDTRIP(BSON(""
+ << "abc"));
ROUNDTRIP(BSON("" << BSON("a" << 5)));
ROUNDTRIP(BSON("" << BSON_ARRAY("a" << 5)));
- ROUNDTRIP(BSON("" << BSONBinData( "abc", 3, bdtCustom )));
+ ROUNDTRIP(BSON("" << BSONBinData("abc", 3, bdtCustom)));
ROUNDTRIP(BSON("" << BSONUndefined));
ROUNDTRIP(BSON("" << OID("abcdefabcdefabcdefabcdef")));
ROUNDTRIP(BSON("" << true));
@@ -148,7 +147,9 @@ TEST(KeyStringTest, AllTypesSimple) {
ROUNDTRIP(BSON("" << BSONRegEx("asdf", "x")));
ROUNDTRIP(BSON("" << BSONDBRef("db.c", OID("010203040506070809101112"))));
ROUNDTRIP(BSON("" << BSONCode("abc_code")));
- ROUNDTRIP(BSON("" << BSONCodeWScope("def_code", BSON("x_scope" << "a"))));
+ ROUNDTRIP(BSON("" << BSONCodeWScope("def_code",
+ BSON("x_scope"
+ << "a"))));
ROUNDTRIP(BSON("" << 5));
ROUNDTRIP(BSON("" << Timestamp(123123, 123)));
ROUNDTRIP(BSON("" << 1235123123123LL));
@@ -176,26 +177,27 @@ TEST(KeyStringTest, Array1) {
KeyString b(emptyArray, ALL_ASCENDING, RecordId(5));
ASSERT_LESS_THAN(a, b);
}
-
}
TEST(KeyStringTest, SubDoc1) {
ROUNDTRIP(BSON("" << BSON("foo" << 2)));
- ROUNDTRIP(BSON("" << BSON("foo" << 2 << "bar" << "asd")));
+ ROUNDTRIP(BSON("" << BSON("foo" << 2 << "bar"
+ << "asd")));
ROUNDTRIP(BSON("" << BSON("foo" << BSON_ARRAY(2 << 4))));
}
TEST(KeyStringTest, SubDoc2) {
- BSONObj a = BSON("" << BSON("a" << "foo"));
+ BSONObj a = BSON("" << BSON("a"
+ << "foo"));
BSONObj b = BSON("" << BSON("b" << 5.5));
BSONObj c = BSON("" << BSON("c" << BSON("x" << 5)));
ROUNDTRIP(a);
ROUNDTRIP(b);
ROUNDTRIP(c);
- COMPARES_SAME(a,b);
- COMPARES_SAME(a,c);
- COMPARES_SAME(b,c);
+ COMPARES_SAME(a, b);
+ COMPARES_SAME(a, c);
+ COMPARES_SAME(b, c);
}
@@ -248,7 +250,6 @@ TEST(KeyStringTest, LotsOfNumbers1) {
ROUNDTRIP(BSON("" << -(static_cast<int>(x) + 1)));
ROUNDTRIP(BSON("" << -(static_cast<double>(x) + 1)));
ROUNDTRIP(BSON("" << -(static_cast<double>(x) + 1.1)));
-
}
}
@@ -264,7 +265,6 @@ TEST(KeyStringTest, LotsOfNumbers2) {
}
TEST(KeyStringTest, RecordIdOrder1) {
-
Ordering ordering = Ordering::make(BSON("a" << 1));
KeyString a(BSON("" << 5), ordering, RecordId::min());
@@ -277,11 +277,9 @@ TEST(KeyStringTest, RecordIdOrder1) {
ASSERT_LESS_THAN(b, c);
ASSERT_LESS_THAN(c, d);
ASSERT_LESS_THAN(d, e);
-
}
TEST(KeyStringTest, RecordIdOrder2) {
-
Ordering ordering = Ordering::make(BSON("a" << -1 << "b" << -1));
KeyString a(BSON("" << 5 << "" << 6), ordering, RecordId::min());
@@ -298,7 +296,6 @@ TEST(KeyStringTest, RecordIdOrder2) {
}
TEST(KeyStringTest, RecordIdOrder2Double) {
-
Ordering ordering = Ordering::make(BSON("a" << -1 << "b" << -1));
KeyString a(BSON("" << 5.0 << "" << 6.0), ordering, RecordId::min());
@@ -311,7 +308,6 @@ TEST(KeyStringTest, RecordIdOrder2Double) {
}
TEST(KeyStringTest, Timestamp) {
-
BSONObj a = BSON("" << Timestamp(0, 0));
BSONObj b = BSON("" << Timestamp(1234, 1));
BSONObj c = BSON("" << Timestamp(1234, 2));
@@ -356,20 +352,19 @@ TEST(KeyStringTest, Timestamp) {
ASSERT(kb.compare(kc) > 0);
ASSERT(kc.compare(kd) > 0);
}
-
}
TEST(KeyStringTest, AllTypesRoundtrip) {
- for ( int i = 1; i <= JSTypeMax; i++ ) {
+ for (int i = 1; i <= JSTypeMax; i++) {
{
BSONObjBuilder b;
- b.appendMinForType("", i );
+ b.appendMinForType("", i);
BSONObj o = b.obj();
ROUNDTRIP(o);
}
{
BSONObjBuilder b;
- b.appendMaxForType("", i );
+ b.appendMaxForType("", i);
BSONObj o = b.obj();
ROUNDTRIP(o);
}
@@ -382,7 +377,7 @@ const std::vector<BSONObj>& getInterestingElements() {
if (!elements.empty()) {
return elements;
}
-
+
// These are used to test strings that include NUL bytes.
const StringData ball("ball", StringData::LiteralTag());
const StringData ball00n("ball\0\0n", StringData::LiteralTag());
@@ -407,8 +402,10 @@ const std::vector<BSONObj>& getInterestingElements() {
elements.push_back(BSON("" << -2.2));
elements.push_back(BSON("" << -12312312.2123123123123));
elements.push_back(BSON("" << 12312312.2123123123123));
- elements.push_back(BSON("" << "aaa"));
- elements.push_back(BSON("" << "AAA"));
+ elements.push_back(BSON(""
+ << "aaa"));
+ elements.push_back(BSON(""
+ << "AAA"));
elements.push_back(BSON("" << ball));
elements.push_back(BSON("" << ball00n));
elements.push_back(BSON("" << BSONSymbol(ball)));
@@ -424,22 +421,22 @@ const std::vector<BSONObj>& getInterestingElements() {
elements.push_back(BSON("" << BSONCode("abc_code")));
elements.push_back(BSON("" << BSONCode(ball)));
elements.push_back(BSON("" << BSONCode(ball00n)));
- elements.push_back(BSON("" << BSONCodeWScope("def_code1", BSON("x_scope" << "a"))));
- elements.push_back(BSON("" << BSONCodeWScope("def_code2", BSON("x_scope" << "a"))));
- elements.push_back(BSON("" << BSONCodeWScope("def_code2", BSON("x_scope" << "b"))));
+ elements.push_back(BSON("" << BSONCodeWScope("def_code1",
+ BSON("x_scope"
+ << "a"))));
+ elements.push_back(BSON("" << BSONCodeWScope("def_code2",
+ BSON("x_scope"
+ << "a"))));
+ elements.push_back(BSON("" << BSONCodeWScope("def_code2",
+ BSON("x_scope"
+ << "b"))));
elements.push_back(BSON("" << BSONCodeWScope(ball, BSON("a" << 1))));
elements.push_back(BSON("" << BSONCodeWScope(ball00n, BSON("a" << 1))));
elements.push_back(BSON("" << true));
elements.push_back(BSON("" << false));
// Something that needs multiple bytes of typeBits
- elements.push_back(BSON("" << BSON_ARRAY(""
- << BSONSymbol("")
- << 0
- << 0ll
- << 0.0
- << -0.0
- )));
+ elements.push_back(BSON("" << BSON_ARRAY("" << BSONSymbol("") << 0 << 0ll << 0.0 << -0.0)));
//
// Interesting numeric cases
@@ -488,12 +485,12 @@ const std::vector<BSONObj>& getInterestingElements() {
elements.push_back(BSON("" << (-lNum + 1)));
elements.push_back(BSON("" << (-lNum - 1)));
- if (powerOfTwo <= 52) { // is dNum - 0.5 representable?
+ if (powerOfTwo <= 52) { // is dNum - 0.5 representable?
elements.push_back(BSON("" << (dNum - 0.5)));
elements.push_back(BSON("" << -(dNum - 0.5)));
}
- if (powerOfTwo <= 51) { // is dNum + 0.5 representable?
+ if (powerOfTwo <= 51) { // is dNum + 0.5 representable?
elements.push_back(BSON("" << (dNum + 0.5)));
elements.push_back(BSON("" << -(dNum + 0.5)));
}
@@ -503,8 +500,8 @@ const std::vector<BSONObj>& getInterestingElements() {
// Numbers around +/- numeric_limits<long long>::max() which can't be represented
// precisely as a double.
const long long maxLL = std::numeric_limits<long long>::max();
- const double closestAbove = 9223372036854775808.0; // 2**63
- const double closestBelow = 9223372036854774784.0; // 2**63 - epsilon
+ const double closestAbove = 9223372036854775808.0; // 2**63
+ const double closestBelow = 9223372036854774784.0; // 2**63 - epsilon
elements.push_back(BSON("" << maxLL));
elements.push_back(BSON("" << (maxLL - 1)));
@@ -521,9 +518,9 @@ const std::vector<BSONObj>& getInterestingElements() {
// Numbers around numeric_limits<long long>::min() which can be represented precisely as
// a double, but not as a positive long long.
const long long minLL = std::numeric_limits<long long>::min();
- const double closestBelow = -9223372036854777856.0; // -2**63 - epsilon
- const double equal = -9223372036854775808.0; // 2**63
- const double closestAbove = -9223372036854774784.0; // -2**63 + epsilon
+ const double closestBelow = -9223372036854777856.0; // -2**63 - epsilon
+ const double equal = -9223372036854775808.0; // 2**63
+ const double closestAbove = -9223372036854774784.0; // -2**63 + epsilon
elements.push_back(BSON("" << minLL));
elements.push_back(BSON("" << equal));
@@ -537,44 +534,45 @@ const std::vector<BSONObj>& getInterestingElements() {
void testPermutation(const std::vector<BSONObj>& elementsOrig,
const std::vector<BSONObj>& orderings,
bool debug) {
-
// Since KeyStrings are compared using memcmp we can assume it provides a total ordering such
// that there won't be cases where (a < b && b < c && !(a < c)). This test still needs to ensure
// that it provides the *correct* total ordering.
for (size_t k = 0; k < orderings.size(); k++) {
BSONObj orderObj = orderings[k];
Ordering ordering = Ordering::make(orderObj);
- if (debug) log() << "ordering: " << orderObj;
+ if (debug)
+ log() << "ordering: " << orderObj;
std::vector<BSONObj> elements = elementsOrig;
std::stable_sort(elements.begin(), elements.end(), BSONObjCmp(orderObj));
for (size_t i = 0; i < elements.size(); i++) {
const BSONObj& o1 = elements[i];
- if (debug) log() << "\to1: " << o1;
+ if (debug)
+ log() << "\to1: " << o1;
ROUNDTRIP_ORDER(o1, ordering);
KeyString k1(o1, ordering);
- KeyString l1(BSON("l" << o1.firstElement()), ordering); // kLess
- KeyString g1(BSON("g" << o1.firstElement()), ordering); // kGreater
+ KeyString l1(BSON("l" << o1.firstElement()), ordering); // kLess
+ KeyString g1(BSON("g" << o1.firstElement()), ordering); // kGreater
ASSERT_LT(l1, k1);
ASSERT_GT(g1, k1);
if (i + 1 < elements.size()) {
const BSONObj& o2 = elements[i + 1];
- if (debug) log() << "\t\t o2: " << o2;
+ if (debug)
+ log() << "\t\t o2: " << o2;
KeyString k2(o2, ordering);
KeyString g2(BSON("g" << o2.firstElement()), ordering);
KeyString l2(BSON("l" << o2.firstElement()), ordering);
int bsonCmp = o1.woCompare(o2, ordering);
- invariant(bsonCmp <= 0); // We should be sorted...
+ invariant(bsonCmp <= 0); // We should be sorted...
if (bsonCmp == 0) {
ASSERT_EQ(k1, k2);
- }
- else {
+ } else {
ASSERT_LT(k1, k2);
}
@@ -592,8 +590,7 @@ void testPermutation(const std::vector<BSONObj>& elementsOrig,
ASSERT_EQ(g1, g2);
ASSERT_LT(l1, k2);
ASSERT_GT(g1, k2);
- }
- else {
+ } else {
// k1 is less than k2. Less(k2) and Greater(k1) should be between them.
ASSERT_LT(g1, k2);
ASSERT_GT(l2, k1);
@@ -619,7 +616,7 @@ TEST(KeyStringTest, AllPermCompare) {
}
TEST(KeyStringTest, AllPerm2Compare) {
- // This test can take over a minute without optimizations. Re-enable if you need to debug it.
+// This test can take over a minute without optimizations. Re-enable if you need to debug it.
#if !defined(MONGO_CONFIG_OPTIMIZED_BUILD)
log() << "\t\t\tskipping test on non-optimized build";
return;
@@ -654,13 +651,12 @@ TEST(KeyStringTest, AllPerm2Compare) {
testPermutation(elements, orderings, false);
}
-#define COMPARE_HELPER(LHS, RHS) \
- (((LHS) < (RHS)) ? -1 : (((LHS) == (RHS)) ? 0 : 1))
+#define COMPARE_HELPER(LHS, RHS) (((LHS) < (RHS)) ? -1 : (((LHS) == (RHS)) ? 0 : 1))
int compareLongToDouble(long long lhs, double rhs) {
if (rhs >= std::numeric_limits<long long>::max())
return -1;
- if (rhs < std::numeric_limits<long long>::min() )
+ if (rhs < std::numeric_limits<long long>::min())
return 1;
if (fabs(rhs) >= (1LL << 52)) {
@@ -670,7 +666,7 @@ int compareLongToDouble(long long lhs, double rhs) {
return COMPARE_HELPER(static_cast<double>(lhs), rhs);
}
-int compareNumbers(const BSONElement& lhs, const BSONElement& rhs ) {
+int compareNumbers(const BSONElement& lhs, const BSONElement& rhs) {
invariant(lhs.isNumber());
invariant(rhs.isNumber());
@@ -679,8 +675,7 @@ int compareNumbers(const BSONElement& lhs, const BSONElement& rhs ) {
return COMPARE_HELPER(lhs.numberLong(), rhs.numberLong());
}
return compareLongToDouble(lhs.numberLong(), rhs.Double());
- }
- else { // double
+ } else { // double
if (rhs.type() == NumberDouble) {
return COMPARE_HELPER(lhs.Double(), rhs.Double());
}
@@ -769,15 +764,13 @@ TEST(KeyStringTest, NumberOrderLots) {
const KeyString& b = *keyStrings[j];
ASSERT_EQUALS(a.compare(b), -b.compare(a));
- if (a.compare(b) != compareNumbers(numbers[i].firstElement(),
- numbers[j].firstElement())) {
+ if (a.compare(b) !=
+ compareNumbers(numbers[i].firstElement(), numbers[j].firstElement())) {
log() << numbers[i] << " " << numbers[j];
}
ASSERT_EQUALS(a.compare(b),
- compareNumbers(numbers[i].firstElement(),
- numbers[j].firstElement()));
-
+ compareNumbers(numbers[i].firstElement(), numbers[j].firstElement()));
}
}
}
@@ -786,7 +779,7 @@ TEST(KeyStringTest, RecordIds) {
for (int i = 0; i < 63; i++) {
const RecordId rid = RecordId(1ll << i);
- { // Test encoding / decoding of single RecordIds
+ { // Test encoding / decoding of single RecordIds
const KeyString ks(rid);
ASSERT_GTE(ks.getSize(), 2u);
ASSERT_LTE(ks.getSize(), 10u);
@@ -812,18 +805,21 @@ TEST(KeyStringTest, RecordIds) {
for (int j = 0; j < 63; j++) {
RecordId other = RecordId(1ll << j);
- if (rid == other) ASSERT_EQ(KeyString(rid), KeyString(other));
- if (rid < other) ASSERT_LT(KeyString(rid), KeyString(other));
- if (rid > other) ASSERT_GT(KeyString(rid), KeyString(other));
+ if (rid == other)
+ ASSERT_EQ(KeyString(rid), KeyString(other));
+ if (rid < other)
+ ASSERT_LT(KeyString(rid), KeyString(other));
+ if (rid > other)
+ ASSERT_GT(KeyString(rid), KeyString(other));
{
// Test concatenating RecordIds like in a unique index.
KeyString ks;
- ks.appendRecordId(RecordId::max()); // uses all bytes
+ ks.appendRecordId(RecordId::max()); // uses all bytes
ks.appendRecordId(rid);
- ks.appendRecordId(RecordId(0xDEADBEEF)); // uses some extra bytes
+ ks.appendRecordId(RecordId(0xDEADBEEF)); // uses some extra bytes
ks.appendRecordId(rid);
- ks.appendRecordId(RecordId(1)); // uses no extra bytes
+ ks.appendRecordId(RecordId(1)); // uses no extra bytes
ks.appendRecordId(rid);
ks.appendRecordId(other);
@@ -843,4 +839,3 @@ TEST(KeyStringTest, RecordIds) {
}
}
}
-
diff --git a/src/mongo/db/storage/kv/kv_catalog.cpp b/src/mongo/db/storage/kv/kv_catalog.cpp
index b24cc705226..df0a39faeee 100644
--- a/src/mongo/db/storage/kv/kv_catalog.cpp
+++ b/src/mongo/db/storage/kv/kv_catalog.cpp
@@ -45,398 +45,367 @@
namespace mongo {
namespace {
- // This is a global resource, which protects accesses to the catalog metadata (instance-wide).
- // It is never used with KVEngines that support doc-level locking so this should never conflict
- // with anything else.
- //
- // NOTE: Must be locked *before* _identLock.
- const ResourceId resourceIdCatalogMetadata(RESOURCE_METADATA, 1ULL);
+// This is a global resource, which protects accesses to the catalog metadata (instance-wide).
+// It is never used with KVEngines that support doc-level locking so this should never conflict
+// with anything else.
+//
+// NOTE: Must be locked *before* _identLock.
+const ResourceId resourceIdCatalogMetadata(RESOURCE_METADATA, 1ULL);
}
- using std::unique_ptr;
- using std::string;
+using std::unique_ptr;
+using std::string;
- class KVCatalog::AddIdentChange : public RecoveryUnit::Change {
- public:
- AddIdentChange(KVCatalog* catalog, StringData ident)
- :_catalog(catalog), _ident(ident.toString())
- {}
+class KVCatalog::AddIdentChange : public RecoveryUnit::Change {
+public:
+ AddIdentChange(KVCatalog* catalog, StringData ident)
+ : _catalog(catalog), _ident(ident.toString()) {}
- virtual void commit() {}
- virtual void rollback() {
- stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock);
- _catalog->_idents.erase(_ident);
- }
+ virtual void commit() {}
+ virtual void rollback() {
+ stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock);
+ _catalog->_idents.erase(_ident);
+ }
- KVCatalog* const _catalog;
- const std::string _ident;
- };
+ KVCatalog* const _catalog;
+ const std::string _ident;
+};
- class KVCatalog::RemoveIdentChange : public RecoveryUnit::Change {
- public:
- RemoveIdentChange(KVCatalog* catalog, StringData ident, const Entry& entry)
- :_catalog(catalog), _ident(ident.toString()), _entry(entry)
- {}
+class KVCatalog::RemoveIdentChange : public RecoveryUnit::Change {
+public:
+ RemoveIdentChange(KVCatalog* catalog, StringData ident, const Entry& entry)
+ : _catalog(catalog), _ident(ident.toString()), _entry(entry) {}
- virtual void commit() {}
- virtual void rollback() {
- stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock);
- _catalog->_idents[_ident] = _entry;
- }
+ virtual void commit() {}
+ virtual void rollback() {
+ stdx::lock_guard<stdx::mutex> lk(_catalog->_identsLock);
+ _catalog->_idents[_ident] = _entry;
+ }
+
+ KVCatalog* const _catalog;
+ const std::string _ident;
+ const Entry _entry;
+};
+
+KVCatalog::KVCatalog(RecordStore* rs,
+ bool isRsThreadSafe,
+ bool directoryPerDb,
+ bool directoryForIndexes)
+ : _rs(rs),
+ _isRsThreadSafe(isRsThreadSafe),
+ _directoryPerDb(directoryPerDb),
+ _directoryForIndexes(directoryForIndexes),
+ _rand(_newRand()) {}
+
+KVCatalog::~KVCatalog() {
+ _rs = NULL;
+}
+
+std::string KVCatalog::_newRand() {
+ return str::stream() << std::unique_ptr<SecureRandom>(SecureRandom::create())->nextInt64();
+}
- KVCatalog* const _catalog;
- const std::string _ident;
- const Entry _entry;
- };
-
- KVCatalog::KVCatalog( RecordStore* rs,
- bool isRsThreadSafe,
- bool directoryPerDb,
- bool directoryForIndexes )
- : _rs( rs )
- , _isRsThreadSafe(isRsThreadSafe)
- , _directoryPerDb(directoryPerDb)
- , _directoryForIndexes(directoryForIndexes)
- , _rand(_newRand())
- {}
-
- KVCatalog::~KVCatalog() {
- _rs = NULL;
+bool KVCatalog::_hasEntryCollidingWithRand() const {
+ // Only called from init() so don't need to lock.
+ for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) {
+ if (StringData(it->first).endsWith(_rand))
+ return true;
}
+ return false;
+}
- std::string KVCatalog::_newRand() {
- return str::stream()
- << std::unique_ptr<SecureRandom>(SecureRandom::create())->nextInt64();
+std::string KVCatalog::_newUniqueIdent(StringData ns, const char* kind) {
+ // If this changes to not put _rand at the end, _hasEntryCollidingWithRand will need fixing.
+ StringBuilder buf;
+ if (_directoryPerDb) {
+ buf << NamespaceString::escapeDbName(nsToDatabaseSubstring(ns)) << '/';
}
+ buf << kind;
+ buf << (_directoryForIndexes ? '/' : '-');
+ buf << _next.fetchAndAdd(1) << '-' << _rand;
+ return buf.str();
+}
- bool KVCatalog::_hasEntryCollidingWithRand() const {
- // Only called from init() so don't need to lock.
- for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) {
- if (StringData(it->first).endsWith(_rand))
- return true;
- }
- return false;
+void KVCatalog::init(OperationContext* opCtx) {
+ // No locking needed since called single threaded.
+ auto cursor = _rs->getCursor(opCtx);
+ while (auto record = cursor->next()) {
+ BSONObj obj = record->data.releaseToBson();
+
+ // No rollback since this is just loading already committed data.
+ string ns = obj["ns"].String();
+ string ident = obj["ident"].String();
+ _idents[ns] = Entry(ident, record->id);
}
- std::string KVCatalog::_newUniqueIdent(StringData ns, const char* kind) {
- // If this changes to not put _rand at the end, _hasEntryCollidingWithRand will need fixing.
- StringBuilder buf;
- if ( _directoryPerDb ) {
- buf << NamespaceString::escapeDbName( nsToDatabaseSubstring( ns ) ) << '/';
- }
- buf << kind;
- buf << ( _directoryForIndexes ? '/' : '-' );
- buf << _next.fetchAndAdd(1) << '-' << _rand;
- return buf.str();
+ // In the unlikely event that we have used this _rand before generate a new one.
+ while (_hasEntryCollidingWithRand()) {
+ _rand = _newRand();
}
+}
- void KVCatalog::init( OperationContext* opCtx ) {
- // No locking needed since called single threaded.
- auto cursor = _rs->getCursor(opCtx);
- while (auto record = cursor->next()) {
- BSONObj obj = record->data.releaseToBson();
+void KVCatalog::getAllCollections(std::vector<std::string>* out) const {
+ stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) {
+ out->push_back(it->first);
+ }
+}
- // No rollback since this is just loading already committed data.
- string ns = obj["ns"].String();
- string ident = obj["ident"].String();
- _idents[ns] = Entry(ident, record->id);
- }
+Status KVCatalog::newCollection(OperationContext* opCtx,
+ StringData ns,
+ const CollectionOptions& options) {
+ invariant(opCtx->lockState() == NULL ||
+ opCtx->lockState()->isDbLockedForMode(nsToDatabaseSubstring(ns), MODE_X));
- // In the unlikely event that we have used this _rand before generate a new one.
- while (_hasEntryCollidingWithRand()) {
- _rand = _newRand();
- }
+ std::unique_ptr<Lock::ResourceLock> rLk;
+ if (!_isRsThreadSafe && opCtx->lockState()) {
+ rLk.reset(new Lock::ResourceLock(opCtx->lockState(), resourceIdCatalogMetadata, MODE_X));
}
- void KVCatalog::getAllCollections( std::vector<std::string>* out ) const {
- stdx::lock_guard<stdx::mutex> lk( _identsLock );
- for ( NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it ) {
- out->push_back( it->first );
- }
+ const string ident = _newUniqueIdent(ns, "collection");
+
+ stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ Entry& old = _idents[ns.toString()];
+ if (!old.ident.empty()) {
+ return Status(ErrorCodes::NamespaceExists, "collection already exists");
}
- Status KVCatalog::newCollection( OperationContext* opCtx,
- StringData ns,
- const CollectionOptions& options ) {
- invariant( opCtx->lockState() == NULL ||
- opCtx->lockState()->isDbLockedForMode( nsToDatabaseSubstring(ns), MODE_X ) );
-
- std::unique_ptr<Lock::ResourceLock> rLk;
- if (!_isRsThreadSafe && opCtx->lockState()) {
- rLk.reset(new Lock::ResourceLock(opCtx->lockState(),
- resourceIdCatalogMetadata,
- MODE_X));
- }
+ opCtx->recoveryUnit()->registerChange(new AddIdentChange(this, ns));
- const string ident = _newUniqueIdent(ns, "collection");
+ BSONObj obj;
+ {
+ BSONObjBuilder b;
+ b.append("ns", ns);
+ b.append("ident", ident);
+ BSONCollectionCatalogEntry::MetaData md;
+ md.ns = ns.toString();
+ md.options = options;
+ b.append("md", md.toBSON());
+ obj = b.obj();
+ }
- stdx::lock_guard<stdx::mutex> lk( _identsLock );
- Entry& old = _idents[ns.toString()];
- if ( !old.ident.empty() ) {
- return Status( ErrorCodes::NamespaceExists, "collection already exists" );
- }
+ StatusWith<RecordId> res = _rs->insertRecord(opCtx, obj.objdata(), obj.objsize(), false);
+ if (!res.isOK())
+ return res.getStatus();
- opCtx->recoveryUnit()->registerChange(new AddIdentChange(this, ns));
-
- BSONObj obj;
- {
- BSONObjBuilder b;
- b.append( "ns", ns );
- b.append( "ident", ident );
- BSONCollectionCatalogEntry::MetaData md;
- md.ns = ns.toString();
- md.options = options;
- b.append( "md", md.toBSON() );
- obj = b.obj();
- }
+ old = Entry(ident, res.getValue());
+ LOG(1) << "stored meta data for " << ns << " @ " << res.getValue();
+ return Status::OK();
+}
- StatusWith<RecordId> res = _rs->insertRecord( opCtx, obj.objdata(), obj.objsize(), false );
- if ( !res.isOK() )
- return res.getStatus();
+std::string KVCatalog::getCollectionIdent(StringData ns) const {
+ stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ NSToIdentMap::const_iterator it = _idents.find(ns.toString());
+ invariant(it != _idents.end());
+ return it->second.ident;
+}
- old = Entry( ident, res.getValue() );
- LOG(1) << "stored meta data for " << ns << " @ " << res.getValue();
- return Status::OK();
- }
+std::string KVCatalog::getIndexIdent(OperationContext* opCtx,
+ StringData ns,
+ StringData idxName) const {
+ BSONObj obj = _findEntry(opCtx, ns);
+ BSONObj idxIdent = obj["idxIdent"].Obj();
+ return idxIdent[idxName].String();
+}
- std::string KVCatalog::getCollectionIdent( StringData ns ) const {
- stdx::lock_guard<stdx::mutex> lk( _identsLock );
- NSToIdentMap::const_iterator it = _idents.find( ns.toString() );
- invariant( it != _idents.end() );
- return it->second.ident;
+BSONObj KVCatalog::_findEntry(OperationContext* opCtx, StringData ns, RecordId* out) const {
+ std::unique_ptr<Lock::ResourceLock> rLk;
+ if (!_isRsThreadSafe && opCtx->lockState()) {
+ rLk.reset(new Lock::ResourceLock(opCtx->lockState(), resourceIdCatalogMetadata, MODE_S));
}
- std::string KVCatalog::getIndexIdent( OperationContext* opCtx,
- StringData ns,
- StringData idxName ) const {
- BSONObj obj = _findEntry( opCtx, ns );
- BSONObj idxIdent = obj["idxIdent"].Obj();
- return idxIdent[idxName].String();
+ RecordId dl;
+ {
+ stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ NSToIdentMap::const_iterator it = _idents.find(ns.toString());
+ invariant(it != _idents.end());
+ dl = it->second.storedLoc;
}
- BSONObj KVCatalog::_findEntry( OperationContext* opCtx,
- StringData ns,
- RecordId* out ) const {
-
- std::unique_ptr<Lock::ResourceLock> rLk;
- if (!_isRsThreadSafe && opCtx->lockState()) {
- rLk.reset(new Lock::ResourceLock(opCtx->lockState(),
- resourceIdCatalogMetadata,
- MODE_S));
- }
+ LOG(1) << "looking up metadata for: " << ns << " @ " << dl;
+ RecordData data;
+ if (!_rs->findRecord(opCtx, dl, &data)) {
+ // since the in memory meta data isn't managed with mvcc
+ // its possible for different transactions to see slightly
+ // different things, which is ok via the locking above.
+ return BSONObj();
+ }
- RecordId dl;
- {
- stdx::lock_guard<stdx::mutex> lk( _identsLock );
- NSToIdentMap::const_iterator it = _idents.find( ns.toString() );
- invariant( it != _idents.end() );
- dl = it->second.storedLoc;
- }
+ if (out)
+ *out = dl;
- LOG(1) << "looking up metadata for: " << ns << " @ " << dl;
- RecordData data;
- if ( !_rs->findRecord( opCtx, dl, &data ) ) {
- // since the in memory meta data isn't managed with mvcc
- // its possible for different transactions to see slightly
- // different things, which is ok via the locking above.
- return BSONObj();
- }
-
- if (out)
- *out = dl;
+ return data.releaseToBson().getOwned();
+}
- return data.releaseToBson().getOwned();
+const BSONCollectionCatalogEntry::MetaData KVCatalog::getMetaData(OperationContext* opCtx,
+ StringData ns) {
+ BSONObj obj = _findEntry(opCtx, ns);
+ LOG(3) << " fetched CCE metadata: " << obj;
+ BSONCollectionCatalogEntry::MetaData md;
+ const BSONElement mdElement = obj["md"];
+ if (mdElement.isABSONObj()) {
+ LOG(3) << "returning metadata: " << mdElement;
+ md.parse(mdElement.Obj());
}
+ return md;
+}
- const BSONCollectionCatalogEntry::MetaData KVCatalog::getMetaData( OperationContext* opCtx,
- StringData ns ) {
- BSONObj obj = _findEntry( opCtx, ns );
- LOG(3) << " fetched CCE metadata: " << obj;
- BSONCollectionCatalogEntry::MetaData md;
- const BSONElement mdElement = obj["md"];
- if ( mdElement.isABSONObj() ) {
- LOG(3) << "returning metadata: " << mdElement;
- md.parse( mdElement.Obj() );
- }
- return md;
+void KVCatalog::putMetaData(OperationContext* opCtx,
+ StringData ns,
+ BSONCollectionCatalogEntry::MetaData& md) {
+ std::unique_ptr<Lock::ResourceLock> rLk;
+ if (!_isRsThreadSafe && opCtx->lockState()) {
+ rLk.reset(new Lock::ResourceLock(opCtx->lockState(), resourceIdCatalogMetadata, MODE_X));
}
- void KVCatalog::putMetaData( OperationContext* opCtx,
- StringData ns,
- BSONCollectionCatalogEntry::MetaData& md ) {
-
- std::unique_ptr<Lock::ResourceLock> rLk;
- if (!_isRsThreadSafe && opCtx->lockState()) {
- rLk.reset(new Lock::ResourceLock(opCtx->lockState(),
- resourceIdCatalogMetadata,
- MODE_X));
+ RecordId loc;
+ BSONObj obj = _findEntry(opCtx, ns, &loc);
+
+ {
+ // rebuilt doc
+ BSONObjBuilder b;
+ b.append("md", md.toBSON());
+
+ BSONObjBuilder newIdentMap;
+ BSONObj oldIdentMap;
+ if (obj["idxIdent"].isABSONObj())
+ oldIdentMap = obj["idxIdent"].Obj();
+
+ // fix ident map
+ for (size_t i = 0; i < md.indexes.size(); i++) {
+ string name = md.indexes[i].name();
+ BSONElement e = oldIdentMap[name];
+ if (e.type() == String) {
+ newIdentMap.append(e);
+ continue;
+ }
+ // missing, create new
+ newIdentMap.append(name, _newUniqueIdent(ns, "index"));
}
+ b.append("idxIdent", newIdentMap.obj());
- RecordId loc;
- BSONObj obj = _findEntry( opCtx, ns, &loc );
-
- {
- // rebuilt doc
- BSONObjBuilder b;
- b.append( "md", md.toBSON() );
-
- BSONObjBuilder newIdentMap;
- BSONObj oldIdentMap;
- if ( obj["idxIdent"].isABSONObj() )
- oldIdentMap = obj["idxIdent"].Obj();
-
- // fix ident map
- for ( size_t i = 0; i < md.indexes.size(); i++ ) {
- string name = md.indexes[i].name();
- BSONElement e = oldIdentMap[name];
- if ( e.type() == String ) {
- newIdentMap.append( e );
- continue;
- }
- // missing, create new
- newIdentMap.append( name, _newUniqueIdent(ns, "index") );
- }
- b.append( "idxIdent", newIdentMap.obj() );
+ // add whatever is left
+ b.appendElementsUnique(obj);
+ obj = b.obj();
+ }
- // add whatever is left
- b.appendElementsUnique( obj );
- obj = b.obj();
- }
+ LOG(3) << "recording new metadata: " << obj;
+ StatusWith<RecordId> status =
+ _rs->updateRecord(opCtx, loc, obj.objdata(), obj.objsize(), false, NULL);
+ fassert(28521, status.getStatus());
+ invariant(status.getValue() == loc);
+}
- LOG(3) << "recording new metadata: " << obj;
- StatusWith<RecordId> status = _rs->updateRecord( opCtx,
- loc,
- obj.objdata(),
- obj.objsize(),
- false,
- NULL );
- fassert( 28521, status.getStatus() );
- invariant( status.getValue() == loc );
+Status KVCatalog::renameCollection(OperationContext* opCtx,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp) {
+ std::unique_ptr<Lock::ResourceLock> rLk;
+ if (!_isRsThreadSafe && opCtx->lockState()) {
+ rLk.reset(new Lock::ResourceLock(opCtx->lockState(), resourceIdCatalogMetadata, MODE_X));
}
- Status KVCatalog::renameCollection( OperationContext* opCtx,
- StringData fromNS,
- StringData toNS,
- bool stayTemp ) {
+ RecordId loc;
+ BSONObj old = _findEntry(opCtx, fromNS, &loc).getOwned();
+ {
+ BSONObjBuilder b;
- std::unique_ptr<Lock::ResourceLock> rLk;
- if (!_isRsThreadSafe && opCtx->lockState()) {
- rLk.reset(new Lock::ResourceLock(opCtx->lockState(),
- resourceIdCatalogMetadata,
- MODE_X));
- }
+ b.append("ns", toNS);
- RecordId loc;
- BSONObj old = _findEntry( opCtx, fromNS, &loc ).getOwned();
- {
- BSONObjBuilder b;
-
- b.append( "ns", toNS );
-
- BSONCollectionCatalogEntry::MetaData md;
- md.parse( old["md"].Obj() );
- md.rename( toNS );
- if ( !stayTemp )
- md.options.temp = false;
- b.append( "md", md.toBSON() );
-
- b.appendElementsUnique( old );
-
- BSONObj obj = b.obj();
- StatusWith<RecordId> status = _rs->updateRecord( opCtx,
- loc,
- obj.objdata(),
- obj.objsize(),
- false,
- NULL );
- fassert( 28522, status.getStatus() );
- invariant( status.getValue() == loc );
- }
+ BSONCollectionCatalogEntry::MetaData md;
+ md.parse(old["md"].Obj());
+ md.rename(toNS);
+ if (!stayTemp)
+ md.options.temp = false;
+ b.append("md", md.toBSON());
+
+ b.appendElementsUnique(old);
+
+ BSONObj obj = b.obj();
+ StatusWith<RecordId> status =
+ _rs->updateRecord(opCtx, loc, obj.objdata(), obj.objsize(), false, NULL);
+ fassert(28522, status.getStatus());
+ invariant(status.getValue() == loc);
+ }
- stdx::lock_guard<stdx::mutex> lk( _identsLock );
- const NSToIdentMap::iterator fromIt = _idents.find(fromNS.toString());
- invariant(fromIt != _idents.end());
+ stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ const NSToIdentMap::iterator fromIt = _idents.find(fromNS.toString());
+ invariant(fromIt != _idents.end());
- opCtx->recoveryUnit()->registerChange(new RemoveIdentChange(this, fromNS, fromIt->second));
- opCtx->recoveryUnit()->registerChange(new AddIdentChange(this, toNS));
+ opCtx->recoveryUnit()->registerChange(new RemoveIdentChange(this, fromNS, fromIt->second));
+ opCtx->recoveryUnit()->registerChange(new AddIdentChange(this, toNS));
- _idents.erase(fromIt);
- _idents[toNS.toString()] = Entry( old["ident"].String(), loc );
+ _idents.erase(fromIt);
+ _idents[toNS.toString()] = Entry(old["ident"].String(), loc);
- return Status::OK();
- }
+ return Status::OK();
+}
- Status KVCatalog::dropCollection( OperationContext* opCtx,
- StringData ns ) {
- invariant( opCtx->lockState() == NULL ||
- opCtx->lockState()->isDbLockedForMode( nsToDatabaseSubstring(ns), MODE_X ) );
- std::unique_ptr<Lock::ResourceLock> rLk;
- if (!_isRsThreadSafe && opCtx->lockState()) {
- rLk.reset(new Lock::ResourceLock(opCtx->lockState(),
- resourceIdCatalogMetadata,
- MODE_X));
- }
+Status KVCatalog::dropCollection(OperationContext* opCtx, StringData ns) {
+ invariant(opCtx->lockState() == NULL ||
+ opCtx->lockState()->isDbLockedForMode(nsToDatabaseSubstring(ns), MODE_X));
+ std::unique_ptr<Lock::ResourceLock> rLk;
+ if (!_isRsThreadSafe && opCtx->lockState()) {
+ rLk.reset(new Lock::ResourceLock(opCtx->lockState(), resourceIdCatalogMetadata, MODE_X));
+ }
- stdx::lock_guard<stdx::mutex> lk( _identsLock );
- const NSToIdentMap::iterator it = _idents.find(ns.toString());
- if (it == _idents.end()) {
- return Status( ErrorCodes::NamespaceNotFound, "collection not found" );
- }
+ stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ const NSToIdentMap::iterator it = _idents.find(ns.toString());
+ if (it == _idents.end()) {
+ return Status(ErrorCodes::NamespaceNotFound, "collection not found");
+ }
- opCtx->recoveryUnit()->registerChange(new RemoveIdentChange(this, ns, it->second));
+ opCtx->recoveryUnit()->registerChange(new RemoveIdentChange(this, ns, it->second));
- LOG(1) << "deleting metadata for " << ns << " @ " << it->second.storedLoc;
- _rs->deleteRecord( opCtx, it->second.storedLoc );
- _idents.erase(it);
+ LOG(1) << "deleting metadata for " << ns << " @ " << it->second.storedLoc;
+ _rs->deleteRecord(opCtx, it->second.storedLoc);
+ _idents.erase(it);
- return Status::OK();
- }
+ return Status::OK();
+}
- std::vector<std::string> KVCatalog::getAllIdentsForDB( StringData db ) const {
- std::vector<std::string> v;
+std::vector<std::string> KVCatalog::getAllIdentsForDB(StringData db) const {
+ std::vector<std::string> v;
- {
- stdx::lock_guard<stdx::mutex> lk( _identsLock );
- for ( NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it ) {
- NamespaceString ns( it->first );
- if ( ns.db() != db )
- continue;
- v.push_back( it->second.ident );
- }
+ {
+ stdx::lock_guard<stdx::mutex> lk(_identsLock);
+ for (NSToIdentMap::const_iterator it = _idents.begin(); it != _idents.end(); ++it) {
+ NamespaceString ns(it->first);
+ if (ns.db() != db)
+ continue;
+ v.push_back(it->second.ident);
}
-
- return v;
}
- std::vector<std::string> KVCatalog::getAllIdents( OperationContext* opCtx ) const {
- std::vector<std::string> v;
+ return v;
+}
- auto cursor = _rs->getCursor(opCtx);
- while (auto record = cursor->next()) {
- BSONObj obj = record->data.releaseToBson();
- v.push_back( obj["ident"].String() );
+std::vector<std::string> KVCatalog::getAllIdents(OperationContext* opCtx) const {
+ std::vector<std::string> v;
- BSONElement e = obj["idxIdent"];
- if ( !e.isABSONObj() )
- continue;
- BSONObj idxIdent = e.Obj();
+ auto cursor = _rs->getCursor(opCtx);
+ while (auto record = cursor->next()) {
+ BSONObj obj = record->data.releaseToBson();
+ v.push_back(obj["ident"].String());
- BSONObjIterator sub( idxIdent );
- while ( sub.more() ) {
- BSONElement e = sub.next();
- v.push_back( e.String() );
- }
- }
+ BSONElement e = obj["idxIdent"];
+ if (!e.isABSONObj())
+ continue;
+ BSONObj idxIdent = e.Obj();
- return v;
+ BSONObjIterator sub(idxIdent);
+ while (sub.more()) {
+ BSONElement e = sub.next();
+ v.push_back(e.String());
+ }
}
- bool KVCatalog::isUserDataIdent( StringData ident ) const {
- return
- ident.find( "index-" ) != std::string::npos ||
- ident.find( "index/" ) != std::string::npos ||
- ident.find( "collection-" ) != std::string::npos ||
- ident.find( "collection/" ) != std::string::npos;
- }
+ return v;
+}
+bool KVCatalog::isUserDataIdent(StringData ident) const {
+ return ident.find("index-") != std::string::npos || ident.find("index/") != std::string::npos ||
+ ident.find("collection-") != std::string::npos ||
+ ident.find("collection/") != std::string::npos;
+}
}
diff --git a/src/mongo/db/storage/kv/kv_catalog.h b/src/mongo/db/storage/kv/kv_catalog.h
index d253b9a1828..577fdba1faf 100644
--- a/src/mongo/db/storage/kv/kv_catalog.h
+++ b/src/mongo/db/storage/kv/kv_catalog.h
@@ -41,93 +41,81 @@
namespace mongo {
- class OperationContext;
- class RecordStore;
-
- class KVCatalog {
- public:
- /**
- * @param rs - does NOT take ownership
- */
- KVCatalog( RecordStore* rs,
- bool isRsThreadSafe,
- bool directoryPerDb,
- bool directoryForIndexes );
- ~KVCatalog();
-
- void init( OperationContext* opCtx );
-
- void getAllCollections( std::vector<std::string>* out ) const;
-
- /**
- * @return error or ident for instance
- */
- Status newCollection( OperationContext* opCtx,
- StringData ns,
- const CollectionOptions& options );
-
- std::string getCollectionIdent( StringData ns ) const;
-
- std::string getIndexIdent( OperationContext* opCtx,
- StringData ns,
- StringData idName ) const;
-
- const BSONCollectionCatalogEntry::MetaData getMetaData( OperationContext* opCtx,
- StringData ns );
- void putMetaData( OperationContext* opCtx,
- StringData ns,
- BSONCollectionCatalogEntry::MetaData& md );
-
- Status renameCollection( OperationContext* opCtx,
- StringData fromNS,
- StringData toNS,
- bool stayTemp );
-
- Status dropCollection( OperationContext* opCtx,
- StringData ns );
-
- std::vector<std::string> getAllIdentsForDB( StringData db ) const;
- std::vector<std::string> getAllIdents( OperationContext* opCtx ) const;
-
- bool isUserDataIdent( StringData ident ) const;
- private:
- class AddIdentChange;
- class RemoveIdentChange;
-
- BSONObj _findEntry( OperationContext* opCtx,
- StringData ns,
- RecordId* out=NULL ) const;
-
- /**
- * Generates a new unique identifier for a new "thing".
- * @param ns - the containing ns
- * @param kind - what this "thing" is, likely collection or index
- */
- std::string _newUniqueIdent(StringData ns, const char* kind);
-
- // Helpers only used by constructor and init(). Don't call from elsewhere.
- static std::string _newRand();
- bool _hasEntryCollidingWithRand() const;
-
- RecordStore* _rs; // not owned
- const bool _isRsThreadSafe;
- const bool _directoryPerDb;
- const bool _directoryForIndexes;
-
- // These two are only used for ident generation inside _newUniqueIdent.
- std::string _rand; // effectively const after init() returns
- AtomicUInt64 _next;
-
- struct Entry {
- Entry(){}
- Entry( std::string i, RecordId l )
- : ident(i), storedLoc( l ) {}
- std::string ident;
- RecordId storedLoc;
- };
- typedef std::map<std::string,Entry> NSToIdentMap;
- NSToIdentMap _idents;
- mutable stdx::mutex _identsLock;
- };
+class OperationContext;
+class RecordStore;
+
+class KVCatalog {
+public:
+ /**
+ * @param rs - does NOT take ownership
+ */
+ KVCatalog(RecordStore* rs, bool isRsThreadSafe, bool directoryPerDb, bool directoryForIndexes);
+ ~KVCatalog();
+
+ void init(OperationContext* opCtx);
+
+ void getAllCollections(std::vector<std::string>* out) const;
+
+ /**
+ * @return error or ident for instance
+ */
+ Status newCollection(OperationContext* opCtx, StringData ns, const CollectionOptions& options);
+
+ std::string getCollectionIdent(StringData ns) const;
+
+ std::string getIndexIdent(OperationContext* opCtx, StringData ns, StringData idName) const;
+
+ const BSONCollectionCatalogEntry::MetaData getMetaData(OperationContext* opCtx, StringData ns);
+ void putMetaData(OperationContext* opCtx,
+ StringData ns,
+ BSONCollectionCatalogEntry::MetaData& md);
+
+ Status renameCollection(OperationContext* opCtx,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp);
+
+ Status dropCollection(OperationContext* opCtx, StringData ns);
+ std::vector<std::string> getAllIdentsForDB(StringData db) const;
+ std::vector<std::string> getAllIdents(OperationContext* opCtx) const;
+
+ bool isUserDataIdent(StringData ident) const;
+
+private:
+ class AddIdentChange;
+ class RemoveIdentChange;
+
+ BSONObj _findEntry(OperationContext* opCtx, StringData ns, RecordId* out = NULL) const;
+
+ /**
+ * Generates a new unique identifier for a new "thing".
+ * @param ns - the containing ns
+ * @param kind - what this "thing" is, likely collection or index
+ */
+ std::string _newUniqueIdent(StringData ns, const char* kind);
+
+ // Helpers only used by constructor and init(). Don't call from elsewhere.
+ static std::string _newRand();
+ bool _hasEntryCollidingWithRand() const;
+
+ RecordStore* _rs; // not owned
+ const bool _isRsThreadSafe;
+ const bool _directoryPerDb;
+ const bool _directoryForIndexes;
+
+ // These two are only used for ident generation inside _newUniqueIdent.
+ std::string _rand; // effectively const after init() returns
+ AtomicUInt64 _next;
+
+ struct Entry {
+ Entry() {}
+ Entry(std::string i, RecordId l) : ident(i), storedLoc(l) {}
+ std::string ident;
+ RecordId storedLoc;
+ };
+ typedef std::map<std::string, Entry> NSToIdentMap;
+ NSToIdentMap _idents;
+ mutable stdx::mutex _identsLock;
+};
}
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
index 48a310a576e..ed92ccecc5f 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
@@ -36,157 +36,141 @@
namespace mongo {
- using std::string;
-
- class KVCollectionCatalogEntry::AddIndexChange : public RecoveryUnit::Change {
- public:
- AddIndexChange(OperationContext* opCtx, KVCollectionCatalogEntry* cce,
- StringData ident)
- : _opCtx(opCtx)
- , _cce(cce)
- , _ident(ident.toString())
- {}
-
- virtual void commit() {}
- virtual void rollback() {
- // Intentionally ignoring failure.
- _cce->_engine->dropIdent(_opCtx, _ident);
- }
-
- OperationContext* const _opCtx;
- KVCollectionCatalogEntry* const _cce;
- const std::string _ident;
- };
-
- class KVCollectionCatalogEntry::RemoveIndexChange : public RecoveryUnit::Change {
- public:
- RemoveIndexChange(OperationContext* opCtx, KVCollectionCatalogEntry* cce,
- StringData ident)
- : _opCtx(opCtx)
- , _cce(cce)
- , _ident(ident.toString())
- {}
-
- virtual void rollback() {}
- virtual void commit() {
- // Intentionally ignoring failure here. Since we've removed the metadata pointing to the
- // index, we should never see it again anyway.
- _cce->_engine->dropIdent(_opCtx, _ident);
- }
-
- OperationContext* const _opCtx;
- KVCollectionCatalogEntry* const _cce;
- const std::string _ident;
- };
-
-
- KVCollectionCatalogEntry::KVCollectionCatalogEntry( KVEngine* engine,
- KVCatalog* catalog,
- StringData ns,
- StringData ident,
- RecordStore* rs)
- : BSONCollectionCatalogEntry( ns ),
- _engine( engine ),
- _catalog( catalog ),
- _ident( ident.toString() ),
- _recordStore( rs ) {
- }
+using std::string;
- KVCollectionCatalogEntry::~KVCollectionCatalogEntry() {
- }
+class KVCollectionCatalogEntry::AddIndexChange : public RecoveryUnit::Change {
+public:
+ AddIndexChange(OperationContext* opCtx, KVCollectionCatalogEntry* cce, StringData ident)
+ : _opCtx(opCtx), _cce(cce), _ident(ident.toString()) {}
- bool KVCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
- StringData indexName,
- bool multikey ) {
- MetaData md = _getMetaData(txn);
-
- int offset = md.findIndexOffset( indexName );
- invariant( offset >= 0 );
- if ( md.indexes[offset].multikey == multikey )
- return false;
- md.indexes[offset].multikey = multikey;
- _catalog->putMetaData( txn, ns().toString(), md );
- return true;
+ virtual void commit() {}
+ virtual void rollback() {
+ // Intentionally ignoring failure.
+ _cce->_engine->dropIdent(_opCtx, _ident);
}
- void KVCollectionCatalogEntry::setIndexHead( OperationContext* txn,
- StringData indexName,
- const RecordId& newHead ) {
- MetaData md = _getMetaData( txn );
- int offset = md.findIndexOffset( indexName );
- invariant( offset >= 0 );
- md.indexes[offset].head = newHead;
- _catalog->putMetaData( txn, ns().toString(), md );
+ OperationContext* const _opCtx;
+ KVCollectionCatalogEntry* const _cce;
+ const std::string _ident;
+};
+
+class KVCollectionCatalogEntry::RemoveIndexChange : public RecoveryUnit::Change {
+public:
+ RemoveIndexChange(OperationContext* opCtx, KVCollectionCatalogEntry* cce, StringData ident)
+ : _opCtx(opCtx), _cce(cce), _ident(ident.toString()) {}
+
+ virtual void rollback() {}
+ virtual void commit() {
+ // Intentionally ignoring failure here. Since we've removed the metadata pointing to the
+ // index, we should never see it again anyway.
+ _cce->_engine->dropIdent(_opCtx, _ident);
}
- Status KVCollectionCatalogEntry::removeIndex( OperationContext* txn,
- StringData indexName ) {
- MetaData md = _getMetaData( txn );
-
- if (md.findIndexOffset(indexName) < 0)
- return Status::OK(); // never had the index so nothing to do.
+ OperationContext* const _opCtx;
+ KVCollectionCatalogEntry* const _cce;
+ const std::string _ident;
+};
+
+
+KVCollectionCatalogEntry::KVCollectionCatalogEntry(
+ KVEngine* engine, KVCatalog* catalog, StringData ns, StringData ident, RecordStore* rs)
+ : BSONCollectionCatalogEntry(ns),
+ _engine(engine),
+ _catalog(catalog),
+ _ident(ident.toString()),
+ _recordStore(rs) {}
+
+KVCollectionCatalogEntry::~KVCollectionCatalogEntry() {}
+
+bool KVCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
+ StringData indexName,
+ bool multikey) {
+ MetaData md = _getMetaData(txn);
+
+ int offset = md.findIndexOffset(indexName);
+ invariant(offset >= 0);
+ if (md.indexes[offset].multikey == multikey)
+ return false;
+ md.indexes[offset].multikey = multikey;
+ _catalog->putMetaData(txn, ns().toString(), md);
+ return true;
+}
- const string ident = _catalog->getIndexIdent( txn, ns().ns(), indexName );
+void KVCollectionCatalogEntry::setIndexHead(OperationContext* txn,
+ StringData indexName,
+ const RecordId& newHead) {
+ MetaData md = _getMetaData(txn);
+ int offset = md.findIndexOffset(indexName);
+ invariant(offset >= 0);
+ md.indexes[offset].head = newHead;
+ _catalog->putMetaData(txn, ns().toString(), md);
+}
- md.eraseIndex( indexName );
- _catalog->putMetaData( txn, ns().toString(), md );
+Status KVCollectionCatalogEntry::removeIndex(OperationContext* txn, StringData indexName) {
+ MetaData md = _getMetaData(txn);
- // Lazily remove to isolate underlying engine from rollback.
- txn->recoveryUnit()->registerChange(new RemoveIndexChange(txn, this, ident));
- return Status::OK();
- }
+ if (md.findIndexOffset(indexName) < 0)
+ return Status::OK(); // never had the index so nothing to do.
- Status KVCollectionCatalogEntry::prepareForIndexBuild( OperationContext* txn,
- const IndexDescriptor* spec ) {
- MetaData md = _getMetaData( txn );
- md.indexes.push_back( IndexMetaData( spec->infoObj(), false, RecordId(), false ) );
- _catalog->putMetaData( txn, ns().toString(), md );
+ const string ident = _catalog->getIndexIdent(txn, ns().ns(), indexName);
- string ident = _catalog->getIndexIdent( txn, ns().ns(), spec->indexName() );
+ md.eraseIndex(indexName);
+ _catalog->putMetaData(txn, ns().toString(), md);
- const Status status = _engine->createSortedDataInterface( txn, ident, spec );
- if (status.isOK()) {
- txn->recoveryUnit()->registerChange(new AddIndexChange(txn, this, ident));
- }
+ // Lazily remove to isolate underlying engine from rollback.
+ txn->recoveryUnit()->registerChange(new RemoveIndexChange(txn, this, ident));
+ return Status::OK();
+}
- return status;
- }
+Status KVCollectionCatalogEntry::prepareForIndexBuild(OperationContext* txn,
+ const IndexDescriptor* spec) {
+ MetaData md = _getMetaData(txn);
+ md.indexes.push_back(IndexMetaData(spec->infoObj(), false, RecordId(), false));
+ _catalog->putMetaData(txn, ns().toString(), md);
- void KVCollectionCatalogEntry::indexBuildSuccess( OperationContext* txn,
- StringData indexName ) {
- MetaData md = _getMetaData( txn );
- int offset = md.findIndexOffset( indexName );
- invariant( offset >= 0 );
- md.indexes[offset].ready = true;
- _catalog->putMetaData( txn, ns().toString(), md );
- }
+ string ident = _catalog->getIndexIdent(txn, ns().ns(), spec->indexName());
- void KVCollectionCatalogEntry::updateTTLSetting( OperationContext* txn,
- StringData idxName,
- long long newExpireSeconds ) {
- MetaData md = _getMetaData( txn );
- int offset = md.findIndexOffset( idxName );
- invariant( offset >= 0 );
- md.indexes[offset].updateTTLSetting( newExpireSeconds );
- _catalog->putMetaData( txn, ns().toString(), md );
+ const Status status = _engine->createSortedDataInterface(txn, ident, spec);
+ if (status.isOK()) {
+ txn->recoveryUnit()->registerChange(new AddIndexChange(txn, this, ident));
}
- void KVCollectionCatalogEntry::updateFlags(OperationContext* txn, int newValue) {
- MetaData md = _getMetaData( txn );
- md.options.flags = newValue;
- md.options.flagsSet = true;
- _catalog->putMetaData( txn, ns().toString(), md );
- }
+ return status;
+}
- void KVCollectionCatalogEntry::updateValidator(OperationContext* txn,
- const BSONObj& validator) {
- MetaData md = _getMetaData(txn);
- md.options.validator = validator;
- _catalog->putMetaData(txn, ns().toString(), md);
- }
+void KVCollectionCatalogEntry::indexBuildSuccess(OperationContext* txn, StringData indexName) {
+ MetaData md = _getMetaData(txn);
+ int offset = md.findIndexOffset(indexName);
+ invariant(offset >= 0);
+ md.indexes[offset].ready = true;
+ _catalog->putMetaData(txn, ns().toString(), md);
+}
- BSONCollectionCatalogEntry::MetaData KVCollectionCatalogEntry::_getMetaData( OperationContext* txn ) const {
- return _catalog->getMetaData( txn, ns().toString() );
- }
+void KVCollectionCatalogEntry::updateTTLSetting(OperationContext* txn,
+ StringData idxName,
+ long long newExpireSeconds) {
+ MetaData md = _getMetaData(txn);
+ int offset = md.findIndexOffset(idxName);
+ invariant(offset >= 0);
+ md.indexes[offset].updateTTLSetting(newExpireSeconds);
+ _catalog->putMetaData(txn, ns().toString(), md);
+}
+void KVCollectionCatalogEntry::updateFlags(OperationContext* txn, int newValue) {
+ MetaData md = _getMetaData(txn);
+ md.options.flags = newValue;
+ md.options.flagsSet = true;
+ _catalog->putMetaData(txn, ns().toString(), md);
+}
+
+void KVCollectionCatalogEntry::updateValidator(OperationContext* txn, const BSONObj& validator) {
+ MetaData md = _getMetaData(txn);
+ md.options.validator = validator;
+ _catalog->putMetaData(txn, ns().toString(), md);
+}
+
+BSONCollectionCatalogEntry::MetaData KVCollectionCatalogEntry::_getMetaData(
+ OperationContext* txn) const {
+ return _catalog->getMetaData(txn, ns().toString());
+}
}
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.h b/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
index 963c9d623f9..285db3754f7 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.h
@@ -36,60 +36,57 @@
namespace mongo {
- class KVCatalog;
- class KVEngine;
+class KVCatalog;
+class KVEngine;
- class KVCollectionCatalogEntry final : public BSONCollectionCatalogEntry {
- public:
- KVCollectionCatalogEntry( KVEngine* engine,
- KVCatalog* catalog,
- StringData ns,
- StringData ident,
- RecordStore* rs );
+class KVCollectionCatalogEntry final : public BSONCollectionCatalogEntry {
+public:
+ KVCollectionCatalogEntry(
+ KVEngine* engine, KVCatalog* catalog, StringData ns, StringData ident, RecordStore* rs);
- ~KVCollectionCatalogEntry() final;
+ ~KVCollectionCatalogEntry() final;
- int getMaxAllowedIndexes() const final { return 64; };
-
- bool setIndexIsMultikey(OperationContext* txn,
- StringData indexName,
- bool multikey = true) final;
+ int getMaxAllowedIndexes() const final {
+ return 64;
+ };
- void setIndexHead( OperationContext* txn,
- StringData indexName,
- const RecordId& newHead ) final;
+ bool setIndexIsMultikey(OperationContext* txn,
+ StringData indexName,
+ bool multikey = true) final;
- Status removeIndex( OperationContext* txn,
- StringData indexName ) final;
+ void setIndexHead(OperationContext* txn, StringData indexName, const RecordId& newHead) final;
- Status prepareForIndexBuild( OperationContext* txn,
- const IndexDescriptor* spec ) final;
+ Status removeIndex(OperationContext* txn, StringData indexName) final;
- void indexBuildSuccess( OperationContext* txn,
- StringData indexName ) final;
+ Status prepareForIndexBuild(OperationContext* txn, const IndexDescriptor* spec) final;
- void updateTTLSetting( OperationContext* txn,
- StringData idxName,
- long long newExpireSeconds ) final;
+ void indexBuildSuccess(OperationContext* txn, StringData indexName) final;
- void updateFlags(OperationContext* txn, int newValue) final;
+ void updateTTLSetting(OperationContext* txn,
+ StringData idxName,
+ long long newExpireSeconds) final;
- void updateValidator(OperationContext* txn, const BSONObj& validator) final;
+ void updateFlags(OperationContext* txn, int newValue) final;
- RecordStore* getRecordStore() { return _recordStore.get(); }
- const RecordStore* getRecordStore() const { return _recordStore.get(); }
+ void updateValidator(OperationContext* txn, const BSONObj& validator) final;
- protected:
- MetaData _getMetaData( OperationContext* txn ) const final;
+ RecordStore* getRecordStore() {
+ return _recordStore.get();
+ }
+ const RecordStore* getRecordStore() const {
+ return _recordStore.get();
+ }
- private:
- class AddIndexChange;
- class RemoveIndexChange;
+protected:
+ MetaData _getMetaData(OperationContext* txn) const final;
- KVEngine* _engine; // not owned
- KVCatalog* _catalog; // not owned
- std::string _ident;
- std::unique_ptr<RecordStore> _recordStore; // owned
- };
+private:
+ class AddIndexChange;
+ class RemoveIndexChange;
+ KVEngine* _engine; // not owned
+ KVCatalog* _catalog; // not owned
+ std::string _ident;
+ std::unique_ptr<RecordStore> _recordStore; // owned
+};
}
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry.cpp
index a72aa8a12b6..b88eeb6bc20 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry.cpp
@@ -38,333 +38,313 @@
namespace mongo {
- using std::string;
- using std::vector;
-
- class KVDatabaseCatalogEntry::AddCollectionChange : public RecoveryUnit::Change {
- public:
- AddCollectionChange(OperationContext* opCtx, KVDatabaseCatalogEntry* dce,
- StringData collection, StringData ident,
- bool dropOnRollback)
- : _opCtx(opCtx)
- , _dce(dce)
- , _collection(collection.toString())
- , _ident(ident.toString())
- , _dropOnRollback(dropOnRollback)
- {}
-
- virtual void commit() {}
- virtual void rollback() {
- if (_dropOnRollback) {
- // Intentionally ignoring failure
- _dce->_engine->getEngine()->dropIdent(_opCtx, _ident);
- }
-
- const CollectionMap::iterator it = _dce->_collections.find(_collection);
- if (it != _dce->_collections.end()) {
- delete it->second;
- _dce->_collections.erase(it);
- }
+using std::string;
+using std::vector;
+
+class KVDatabaseCatalogEntry::AddCollectionChange : public RecoveryUnit::Change {
+public:
+ AddCollectionChange(OperationContext* opCtx,
+ KVDatabaseCatalogEntry* dce,
+ StringData collection,
+ StringData ident,
+ bool dropOnRollback)
+ : _opCtx(opCtx),
+ _dce(dce),
+ _collection(collection.toString()),
+ _ident(ident.toString()),
+ _dropOnRollback(dropOnRollback) {}
+
+ virtual void commit() {}
+ virtual void rollback() {
+ if (_dropOnRollback) {
+ // Intentionally ignoring failure
+ _dce->_engine->getEngine()->dropIdent(_opCtx, _ident);
}
- OperationContext* const _opCtx;
- KVDatabaseCatalogEntry* const _dce;
- const std::string _collection;
- const std::string _ident;
- const bool _dropOnRollback;
- };
-
- class KVDatabaseCatalogEntry::RemoveCollectionChange : public RecoveryUnit::Change {
- public:
- RemoveCollectionChange(OperationContext* opCtx, KVDatabaseCatalogEntry* dce,
- StringData collection, StringData ident,
- KVCollectionCatalogEntry* entry, bool dropOnCommit)
- : _opCtx(opCtx)
- , _dce(dce)
- , _collection(collection.toString())
- , _ident(ident.toString())
- , _entry(entry)
- , _dropOnCommit(dropOnCommit)
- {}
-
- virtual void commit() {
- delete _entry;
-
- // Intentionally ignoring failure here. Since we've removed the metadata pointing to the
- // collection, we should never see it again anyway.
- if (_dropOnCommit)
- _dce->_engine->getEngine()->dropIdent( _opCtx, _ident );
- }
-
- virtual void rollback() {
- _dce->_collections[_collection] = _entry;
- }
-
- OperationContext* const _opCtx;
- KVDatabaseCatalogEntry* const _dce;
- const std::string _collection;
- const std::string _ident;
- KVCollectionCatalogEntry* const _entry;
- const bool _dropOnCommit;
- };
-
- KVDatabaseCatalogEntry::KVDatabaseCatalogEntry( StringData db, KVStorageEngine* engine )
- : DatabaseCatalogEntry( db ), _engine( engine ) {
-
- }
-
- KVDatabaseCatalogEntry::~KVDatabaseCatalogEntry() {
- for ( CollectionMap::const_iterator it = _collections.begin(); it != _collections.end(); ++it ) {
+ const CollectionMap::iterator it = _dce->_collections.find(_collection);
+ if (it != _dce->_collections.end()) {
delete it->second;
+ _dce->_collections.erase(it);
}
- _collections.clear();
}
- bool KVDatabaseCatalogEntry::exists() const {
- return !isEmpty();
+ OperationContext* const _opCtx;
+ KVDatabaseCatalogEntry* const _dce;
+ const std::string _collection;
+ const std::string _ident;
+ const bool _dropOnRollback;
+};
+
+class KVDatabaseCatalogEntry::RemoveCollectionChange : public RecoveryUnit::Change {
+public:
+ RemoveCollectionChange(OperationContext* opCtx,
+ KVDatabaseCatalogEntry* dce,
+ StringData collection,
+ StringData ident,
+ KVCollectionCatalogEntry* entry,
+ bool dropOnCommit)
+ : _opCtx(opCtx),
+ _dce(dce),
+ _collection(collection.toString()),
+ _ident(ident.toString()),
+ _entry(entry),
+ _dropOnCommit(dropOnCommit) {}
+
+ virtual void commit() {
+ delete _entry;
+
+ // Intentionally ignoring failure here. Since we've removed the metadata pointing to the
+ // collection, we should never see it again anyway.
+ if (_dropOnCommit)
+ _dce->_engine->getEngine()->dropIdent(_opCtx, _ident);
}
- bool KVDatabaseCatalogEntry::isEmpty() const {
- return _collections.empty();
+ virtual void rollback() {
+ _dce->_collections[_collection] = _entry;
}
- bool KVDatabaseCatalogEntry::hasUserData() const {
- return !isEmpty();
- }
+ OperationContext* const _opCtx;
+ KVDatabaseCatalogEntry* const _dce;
+ const std::string _collection;
+ const std::string _ident;
+ KVCollectionCatalogEntry* const _entry;
+ const bool _dropOnCommit;
+};
- int64_t KVDatabaseCatalogEntry::sizeOnDisk( OperationContext* opCtx ) const {
- int64_t size = 0;
+KVDatabaseCatalogEntry::KVDatabaseCatalogEntry(StringData db, KVStorageEngine* engine)
+ : DatabaseCatalogEntry(db), _engine(engine) {}
- for ( CollectionMap::const_iterator it = _collections.begin(); it != _collections.end(); ++it ) {
- const KVCollectionCatalogEntry* coll = it->second;
- if ( !coll )
- continue;
- size += coll->getRecordStore()->storageSize( opCtx );
+KVDatabaseCatalogEntry::~KVDatabaseCatalogEntry() {
+ for (CollectionMap::const_iterator it = _collections.begin(); it != _collections.end(); ++it) {
+ delete it->second;
+ }
+ _collections.clear();
+}
- vector<string> indexNames;
- coll->getAllIndexes( opCtx, &indexNames );
+bool KVDatabaseCatalogEntry::exists() const {
+ return !isEmpty();
+}
- for ( size_t i = 0; i < indexNames.size(); i++ ) {
- string ident = _engine->getCatalog()->getIndexIdent( opCtx,
- coll->ns().ns(),
- indexNames[i] );
- size += _engine->getEngine()->getIdentSize( opCtx, ident );
- }
- }
+bool KVDatabaseCatalogEntry::isEmpty() const {
+ return _collections.empty();
+}
- return size;
- }
+bool KVDatabaseCatalogEntry::hasUserData() const {
+ return !isEmpty();
+}
- void KVDatabaseCatalogEntry::appendExtraStats( OperationContext* opCtx,
- BSONObjBuilder* out,
- double scale ) const {
- }
+int64_t KVDatabaseCatalogEntry::sizeOnDisk(OperationContext* opCtx) const {
+ int64_t size = 0;
- bool KVDatabaseCatalogEntry::currentFilesCompatible( OperationContext* opCtx ) const {
- // todo
- return true;
- }
+ for (CollectionMap::const_iterator it = _collections.begin(); it != _collections.end(); ++it) {
+ const KVCollectionCatalogEntry* coll = it->second;
+ if (!coll)
+ continue;
+ size += coll->getRecordStore()->storageSize(opCtx);
- void KVDatabaseCatalogEntry::getCollectionNamespaces( std::list<std::string>* out ) const {
- for (CollectionMap::const_iterator it = _collections.begin();
- it != _collections.end();
- ++it) {
+ vector<string> indexNames;
+ coll->getAllIndexes(opCtx, &indexNames);
- out->push_back( it->first );
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ string ident =
+ _engine->getCatalog()->getIndexIdent(opCtx, coll->ns().ns(), indexNames[i]);
+ size += _engine->getEngine()->getIdentSize(opCtx, ident);
}
}
- CollectionCatalogEntry* KVDatabaseCatalogEntry::getCollectionCatalogEntry(
- StringData ns ) const {
-
- CollectionMap::const_iterator it = _collections.find( ns.toString() );
- if (it == _collections.end()) {
- return NULL;
- }
+ return size;
+}
- return it->second;
- }
+void KVDatabaseCatalogEntry::appendExtraStats(OperationContext* opCtx,
+ BSONObjBuilder* out,
+ double scale) const {}
- RecordStore* KVDatabaseCatalogEntry::getRecordStore( StringData ns ) const {
- CollectionMap::const_iterator it = _collections.find( ns.toString() );
- if (it == _collections.end()) {
- return NULL;
- }
+bool KVDatabaseCatalogEntry::currentFilesCompatible(OperationContext* opCtx) const {
+ // todo
+ return true;
+}
- return it->second->getRecordStore();
+void KVDatabaseCatalogEntry::getCollectionNamespaces(std::list<std::string>* out) const {
+ for (CollectionMap::const_iterator it = _collections.begin(); it != _collections.end(); ++it) {
+ out->push_back(it->first);
}
+}
- Status KVDatabaseCatalogEntry::createCollection( OperationContext* txn,
- StringData ns,
- const CollectionOptions& options,
- bool allocateDefaultSpace ) {
+CollectionCatalogEntry* KVDatabaseCatalogEntry::getCollectionCatalogEntry(StringData ns) const {
+ CollectionMap::const_iterator it = _collections.find(ns.toString());
+ if (it == _collections.end()) {
+ return NULL;
+ }
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ return it->second;
+}
- if (ns.empty()) {
- return Status(ErrorCodes::BadValue, "Collection namespace cannot be empty");
- }
+RecordStore* KVDatabaseCatalogEntry::getRecordStore(StringData ns) const {
+ CollectionMap::const_iterator it = _collections.find(ns.toString());
+ if (it == _collections.end()) {
+ return NULL;
+ }
- if (_collections.count(ns.toString())) {
- invariant(_collections[ns.toString()]);
- return Status(ErrorCodes::NamespaceExists, "collection already exists");
- }
+ return it->second->getRecordStore();
+}
- // need to create it
- Status status = _engine->getCatalog()->newCollection( txn, ns, options );
- if ( !status.isOK() )
- return status;
+Status KVDatabaseCatalogEntry::createCollection(OperationContext* txn,
+ StringData ns,
+ const CollectionOptions& options,
+ bool allocateDefaultSpace) {
+ invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
- string ident = _engine->getCatalog()->getCollectionIdent( ns );
+ if (ns.empty()) {
+ return Status(ErrorCodes::BadValue, "Collection namespace cannot be empty");
+ }
- status = _engine->getEngine()->createRecordStore( txn, ns, ident, options );
- if ( !status.isOK() )
- return status;
+ if (_collections.count(ns.toString())) {
+ invariant(_collections[ns.toString()]);
+ return Status(ErrorCodes::NamespaceExists, "collection already exists");
+ }
- RecordStore* rs = _engine->getEngine()->getRecordStore( txn, ns, ident, options );
- invariant( rs );
+ // need to create it
+ Status status = _engine->getCatalog()->newCollection(txn, ns, options);
+ if (!status.isOK())
+ return status;
- txn->recoveryUnit()->registerChange(new AddCollectionChange(txn, this, ns, ident, true));
- _collections[ns.toString()] =
- new KVCollectionCatalogEntry( _engine->getEngine(), _engine->getCatalog(),
- ns, ident, rs );
+ string ident = _engine->getCatalog()->getCollectionIdent(ns);
- return Status::OK();
- }
+ status = _engine->getEngine()->createRecordStore(txn, ns, ident, options);
+ if (!status.isOK())
+ return status;
- void KVDatabaseCatalogEntry::initCollection( OperationContext* opCtx,
- const std::string& ns,
- bool forRepair ) {
- invariant(!_collections.count(ns));
+ RecordStore* rs = _engine->getEngine()->getRecordStore(txn, ns, ident, options);
+ invariant(rs);
- const std::string ident = _engine->getCatalog()->getCollectionIdent( ns );
+ txn->recoveryUnit()->registerChange(new AddCollectionChange(txn, this, ns, ident, true));
+ _collections[ns.toString()] =
+ new KVCollectionCatalogEntry(_engine->getEngine(), _engine->getCatalog(), ns, ident, rs);
- RecordStore* rs;
- if (forRepair) {
- // Using a NULL rs since we don't want to open this record store before it has been
- // repaired. This also ensures that if we try to use it, it will blow up.
- rs = NULL;
- }
- else {
- BSONCollectionCatalogEntry::MetaData md = _engine->getCatalog()->getMetaData(opCtx, ns);
- rs = _engine->getEngine()->getRecordStore( opCtx, ns, ident, md.options );
- invariant( rs );
- }
+ return Status::OK();
+}
- // No change registration since this is only for committed collections
- _collections[ns] = new KVCollectionCatalogEntry( _engine->getEngine(),
- _engine->getCatalog(),
- ns,
- ident,
- rs );
+void KVDatabaseCatalogEntry::initCollection(OperationContext* opCtx,
+ const std::string& ns,
+ bool forRepair) {
+ invariant(!_collections.count(ns));
+
+ const std::string ident = _engine->getCatalog()->getCollectionIdent(ns);
+
+ RecordStore* rs;
+ if (forRepair) {
+ // Using a NULL rs since we don't want to open this record store before it has been
+ // repaired. This also ensures that if we try to use it, it will blow up.
+ rs = NULL;
+ } else {
+ BSONCollectionCatalogEntry::MetaData md = _engine->getCatalog()->getMetaData(opCtx, ns);
+ rs = _engine->getEngine()->getRecordStore(opCtx, ns, ident, md.options);
+ invariant(rs);
}
- void KVDatabaseCatalogEntry::reinitCollectionAfterRepair(OperationContext* opCtx,
- const std::string& ns) {
- // Get rid of the old entry.
- CollectionMap::iterator it = _collections.find(ns);
- invariant(it != _collections.end());
- delete it->second;
- _collections.erase(it);
+ // No change registration since this is only for committed collections
+ _collections[ns] =
+ new KVCollectionCatalogEntry(_engine->getEngine(), _engine->getCatalog(), ns, ident, rs);
+}
- // Now reopen fully initialized.
- initCollection(opCtx, ns, false);
- }
+void KVDatabaseCatalogEntry::reinitCollectionAfterRepair(OperationContext* opCtx,
+ const std::string& ns) {
+ // Get rid of the old entry.
+ CollectionMap::iterator it = _collections.find(ns);
+ invariant(it != _collections.end());
+ delete it->second;
+ _collections.erase(it);
- Status KVDatabaseCatalogEntry::renameCollection( OperationContext* txn,
- StringData fromNS,
- StringData toNS,
- bool stayTemp ) {
+ // Now reopen fully initialized.
+ initCollection(opCtx, ns, false);
+}
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+Status KVDatabaseCatalogEntry::renameCollection(OperationContext* txn,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp) {
+ invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
- RecordStore* originalRS = NULL;
+ RecordStore* originalRS = NULL;
- CollectionMap::const_iterator it = _collections.find( fromNS.toString() );
- if (it == _collections.end()) {
- return Status(ErrorCodes::NamespaceNotFound, "rename cannot find collection");
- }
+ CollectionMap::const_iterator it = _collections.find(fromNS.toString());
+ if (it == _collections.end()) {
+ return Status(ErrorCodes::NamespaceNotFound, "rename cannot find collection");
+ }
- originalRS = it->second->getRecordStore();
+ originalRS = it->second->getRecordStore();
- it = _collections.find( toNS.toString() );
- if (it != _collections.end()) {
- return Status(ErrorCodes::NamespaceExists, "for rename to already exists");
- }
+ it = _collections.find(toNS.toString());
+ if (it != _collections.end()) {
+ return Status(ErrorCodes::NamespaceExists, "for rename to already exists");
+ }
- const std::string identFrom = _engine->getCatalog()->getCollectionIdent( fromNS );
+ const std::string identFrom = _engine->getCatalog()->getCollectionIdent(fromNS);
- Status status = _engine->getEngine()->okToRename( txn, fromNS, toNS, identFrom, originalRS );
- if ( !status.isOK() )
- return status;
+ Status status = _engine->getEngine()->okToRename(txn, fromNS, toNS, identFrom, originalRS);
+ if (!status.isOK())
+ return status;
- status = _engine->getCatalog()->renameCollection( txn, fromNS, toNS, stayTemp );
- if ( !status.isOK() )
- return status;
+ status = _engine->getCatalog()->renameCollection(txn, fromNS, toNS, stayTemp);
+ if (!status.isOK())
+ return status;
- const std::string identTo = _engine->getCatalog()->getCollectionIdent( toNS );
+ const std::string identTo = _engine->getCatalog()->getCollectionIdent(toNS);
- invariant( identFrom == identTo );
+ invariant(identFrom == identTo);
- BSONCollectionCatalogEntry::MetaData md = _engine->getCatalog()->getMetaData( txn, toNS );
- RecordStore* rs = _engine->getEngine()->getRecordStore( txn, toNS, identTo, md.options );
+ BSONCollectionCatalogEntry::MetaData md = _engine->getCatalog()->getMetaData(txn, toNS);
+ RecordStore* rs = _engine->getEngine()->getRecordStore(txn, toNS, identTo, md.options);
- const CollectionMap::iterator itFrom = _collections.find(fromNS.toString());
- invariant(itFrom != _collections.end());
- txn->recoveryUnit()->registerChange(new RemoveCollectionChange(txn, this, fromNS, identFrom,
- itFrom->second, false));
- _collections.erase(itFrom);
+ const CollectionMap::iterator itFrom = _collections.find(fromNS.toString());
+ invariant(itFrom != _collections.end());
+ txn->recoveryUnit()->registerChange(
+ new RemoveCollectionChange(txn, this, fromNS, identFrom, itFrom->second, false));
+ _collections.erase(itFrom);
- txn->recoveryUnit()->registerChange(
- new AddCollectionChange(txn, this, toNS, identTo, false));
- _collections[toNS.toString()] =
- new KVCollectionCatalogEntry( _engine->getEngine(), _engine->getCatalog(),
- toNS, identTo, rs );
+ txn->recoveryUnit()->registerChange(new AddCollectionChange(txn, this, toNS, identTo, false));
+ _collections[toNS.toString()] = new KVCollectionCatalogEntry(
+ _engine->getEngine(), _engine->getCatalog(), toNS, identTo, rs);
- return Status::OK();
- }
+ return Status::OK();
+}
- Status KVDatabaseCatalogEntry::dropCollection(OperationContext* opCtx, StringData ns) {
- invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
+Status KVDatabaseCatalogEntry::dropCollection(OperationContext* opCtx, StringData ns) {
+ invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X));
- CollectionMap::const_iterator it = _collections.find( ns.toString() );
- if (it == _collections.end()) {
- return Status(ErrorCodes::NamespaceNotFound, "cannnot find collection to drop");
- }
+ CollectionMap::const_iterator it = _collections.find(ns.toString());
+ if (it == _collections.end()) {
+ return Status(ErrorCodes::NamespaceNotFound, "cannnot find collection to drop");
+ }
- KVCollectionCatalogEntry* const entry = it->second;
+ KVCollectionCatalogEntry* const entry = it->second;
- invariant(entry->getTotalIndexCount(opCtx) == entry->getCompletedIndexCount(opCtx));
+ invariant(entry->getTotalIndexCount(opCtx) == entry->getCompletedIndexCount(opCtx));
- {
- std::vector<std::string> indexNames;
- entry->getAllIndexes( opCtx, &indexNames );
- for ( size_t i = 0; i < indexNames.size(); i++ ) {
- entry->removeIndex( opCtx, indexNames[i] );
- }
+ {
+ std::vector<std::string> indexNames;
+ entry->getAllIndexes(opCtx, &indexNames);
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ entry->removeIndex(opCtx, indexNames[i]);
}
+ }
- invariant( entry->getTotalIndexCount( opCtx ) == 0 );
-
- const std::string ident = _engine->getCatalog()->getCollectionIdent(ns);
+ invariant(entry->getTotalIndexCount(opCtx) == 0);
- Status status = _engine->getCatalog()->dropCollection(opCtx, ns);
- if (!status.isOK()) {
- return status;
- }
+ const std::string ident = _engine->getCatalog()->getCollectionIdent(ns);
- // This will lazily delete the KVCollectionCatalogEntry and notify the storageEngine to
- // drop the collection only on WUOW::commit().
- opCtx->recoveryUnit()->registerChange(new RemoveCollectionChange(opCtx,
- this,
- ns,
- ident,
- it->second,
- true));
+ Status status = _engine->getCatalog()->dropCollection(opCtx, ns);
+ if (!status.isOK()) {
+ return status;
+ }
- _collections.erase( ns.toString() );
+ // This will lazily delete the KVCollectionCatalogEntry and notify the storageEngine to
+ // drop the collection only on WUOW::commit().
+ opCtx->recoveryUnit()->registerChange(
+ new RemoveCollectionChange(opCtx, this, ns, ident, it->second, true));
- return Status::OK();
- }
+ _collections.erase(ns.toString());
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry.h b/src/mongo/db/storage/kv/kv_database_catalog_entry.h
index 2bda9cc9afa..8cfd35f503b 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry.h
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry.h
@@ -37,69 +37,66 @@
namespace mongo {
- class KVCollectionCatalogEntry;
- class KVStorageEngine;
+class KVCollectionCatalogEntry;
+class KVStorageEngine;
- class KVDatabaseCatalogEntry : public DatabaseCatalogEntry {
- public:
- KVDatabaseCatalogEntry( StringData db, KVStorageEngine* engine );
- virtual ~KVDatabaseCatalogEntry();
+class KVDatabaseCatalogEntry : public DatabaseCatalogEntry {
+public:
+ KVDatabaseCatalogEntry(StringData db, KVStorageEngine* engine);
+ virtual ~KVDatabaseCatalogEntry();
- virtual bool exists() const;
- virtual bool isEmpty() const;
- virtual bool hasUserData() const;
+ virtual bool exists() const;
+ virtual bool isEmpty() const;
+ virtual bool hasUserData() const;
- virtual int64_t sizeOnDisk( OperationContext* opCtx ) const;
+ virtual int64_t sizeOnDisk(OperationContext* opCtx) const;
- virtual void appendExtraStats( OperationContext* opCtx,
- BSONObjBuilder* out,
- double scale ) const;
+ virtual void appendExtraStats(OperationContext* opCtx, BSONObjBuilder* out, double scale) const;
- virtual bool isOlderThan24( OperationContext* opCtx ) const { return false; }
- virtual void markIndexSafe24AndUp( OperationContext* opCtx ) {}
+ virtual bool isOlderThan24(OperationContext* opCtx) const {
+ return false;
+ }
+ virtual void markIndexSafe24AndUp(OperationContext* opCtx) {}
- virtual bool currentFilesCompatible( OperationContext* opCtx ) const;
+ virtual bool currentFilesCompatible(OperationContext* opCtx) const;
- virtual void getCollectionNamespaces( std::list<std::string>* out ) const;
+ virtual void getCollectionNamespaces(std::list<std::string>* out) const;
- virtual CollectionCatalogEntry* getCollectionCatalogEntry( StringData ns ) const;
+ virtual CollectionCatalogEntry* getCollectionCatalogEntry(StringData ns) const;
- virtual RecordStore* getRecordStore( StringData ns ) const;
+ virtual RecordStore* getRecordStore(StringData ns) const;
- virtual IndexAccessMethod* getIndex( OperationContext* txn,
- const CollectionCatalogEntry* collection,
- IndexCatalogEntry* index );
+ virtual IndexAccessMethod* getIndex(OperationContext* txn,
+ const CollectionCatalogEntry* collection,
+ IndexCatalogEntry* index);
- virtual Status createCollection( OperationContext* txn,
- StringData ns,
- const CollectionOptions& options,
- bool allocateDefaultSpace );
+ virtual Status createCollection(OperationContext* txn,
+ StringData ns,
+ const CollectionOptions& options,
+ bool allocateDefaultSpace);
- virtual Status renameCollection( OperationContext* txn,
- StringData fromNS,
- StringData toNS,
- bool stayTemp );
+ virtual Status renameCollection(OperationContext* txn,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp);
- virtual Status dropCollection( OperationContext* opCtx,
- StringData ns );
+ virtual Status dropCollection(OperationContext* opCtx, StringData ns);
- // --------------
+ // --------------
- void initCollection( OperationContext* opCtx,
- const std::string& ns,
- bool forRepair );
+ void initCollection(OperationContext* opCtx, const std::string& ns, bool forRepair);
- void initCollectionBeforeRepair(OperationContext* opCtx, const std::string& ns);
- void reinitCollectionAfterRepair(OperationContext* opCtx, const std::string& ns);
+ void initCollectionBeforeRepair(OperationContext* opCtx, const std::string& ns);
+ void reinitCollectionAfterRepair(OperationContext* opCtx, const std::string& ns);
- private:
- class AddCollectionChange;
- class RemoveCollectionChange;
+private:
+ class AddCollectionChange;
+ class RemoveCollectionChange;
- typedef std::map<std::string, KVCollectionCatalogEntry*> CollectionMap;
+ typedef std::map<std::string, KVCollectionCatalogEntry*> CollectionMap;
- KVStorageEngine* const _engine; // not owned here
- CollectionMap _collections;
- };
+ KVStorageEngine* const _engine; // not owned here
+ CollectionMap _collections;
+};
}
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_get_index.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry_get_index.cpp
index 3ca8f8e5631..4df30c2b98a 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_get_index.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_get_index.cpp
@@ -49,40 +49,37 @@
namespace mongo {
- IndexAccessMethod* KVDatabaseCatalogEntry::getIndex( OperationContext* txn,
- const CollectionCatalogEntry* collection,
- IndexCatalogEntry* index ) {
- IndexDescriptor* desc = index->descriptor();
+IndexAccessMethod* KVDatabaseCatalogEntry::getIndex(OperationContext* txn,
+ const CollectionCatalogEntry* collection,
+ IndexCatalogEntry* index) {
+ IndexDescriptor* desc = index->descriptor();
- const string& type = desc->getAccessMethodName();
+ const string& type = desc->getAccessMethodName();
- string ident = _engine->getCatalog()->getIndexIdent( txn,
- collection->ns().ns(),
- desc->indexName() );
+ string ident =
+ _engine->getCatalog()->getIndexIdent(txn, collection->ns().ns(), desc->indexName());
- SortedDataInterface* sdi =
- _engine->getEngine()->getSortedDataInterface( txn, ident, desc );
+ SortedDataInterface* sdi = _engine->getEngine()->getSortedDataInterface(txn, ident, desc);
- if ("" == type)
- return new BtreeAccessMethod( index, sdi );
+ if ("" == type)
+ return new BtreeAccessMethod(index, sdi);
- if (IndexNames::HASHED == type)
- return new HashAccessMethod( index, sdi );
+ if (IndexNames::HASHED == type)
+ return new HashAccessMethod(index, sdi);
- if (IndexNames::GEO_2DSPHERE == type)
- return new S2AccessMethod( index, sdi );
+ if (IndexNames::GEO_2DSPHERE == type)
+ return new S2AccessMethod(index, sdi);
- if (IndexNames::TEXT == type)
- return new FTSAccessMethod( index, sdi );
+ if (IndexNames::TEXT == type)
+ return new FTSAccessMethod(index, sdi);
- if (IndexNames::GEO_HAYSTACK == type)
- return new HaystackAccessMethod( index, sdi );
+ if (IndexNames::GEO_HAYSTACK == type)
+ return new HaystackAccessMethod(index, sdi);
- if (IndexNames::GEO_2D == type)
- return new TwoDAccessMethod( index, sdi );
-
- log() << "Can't find index for keyPattern " << desc->keyPattern();
- invariant( false );
- }
+ if (IndexNames::GEO_2D == type)
+ return new TwoDAccessMethod(index, sdi);
+ log() << "Can't find index for keyPattern " << desc->keyPattern();
+ invariant(false);
+}
}
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_get_index_mock.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry_get_index_mock.cpp
index 7ab8760db3a..6b453609a24 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_get_index_mock.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_get_index_mock.cpp
@@ -34,11 +34,10 @@
namespace mongo {
- // Used to satisfy link dependencies in unit test - not invoked.
- IndexAccessMethod* KVDatabaseCatalogEntry::getIndex(OperationContext* txn,
- const CollectionCatalogEntry* collection,
- IndexCatalogEntry* index) {
- invariant( false );
- }
-
+// Used to satisfy link dependencies in unit test - not invoked.
+IndexAccessMethod* KVDatabaseCatalogEntry::getIndex(OperationContext* txn,
+ const CollectionCatalogEntry* collection,
+ IndexCatalogEntry* index) {
+ invariant(false);
+}
}
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_test.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry_test.cpp
index 798add81b64..f9027d721c8 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_test.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_test.cpp
@@ -39,57 +39,57 @@
namespace {
- using namespace mongo;
+using namespace mongo;
- TEST(KVDatabaseCatalogEntryTest, CreateCollectionValidNamespace) {
- KVStorageEngine storageEngine(new DevNullKVEngine());
- storageEngine.finishInit();
- KVDatabaseCatalogEntry dbEntry("mydb", &storageEngine);
- OperationContextNoop ctx;
- ASSERT_OK(dbEntry.createCollection(&ctx, "mydb.mycoll", CollectionOptions(), true));
- std::list<std::string> collectionNamespaces;
- dbEntry.getCollectionNamespaces(&collectionNamespaces);
- ASSERT_FALSE(collectionNamespaces.empty());
- }
+TEST(KVDatabaseCatalogEntryTest, CreateCollectionValidNamespace) {
+ KVStorageEngine storageEngine(new DevNullKVEngine());
+ storageEngine.finishInit();
+ KVDatabaseCatalogEntry dbEntry("mydb", &storageEngine);
+ OperationContextNoop ctx;
+ ASSERT_OK(dbEntry.createCollection(&ctx, "mydb.mycoll", CollectionOptions(), true));
+ std::list<std::string> collectionNamespaces;
+ dbEntry.getCollectionNamespaces(&collectionNamespaces);
+ ASSERT_FALSE(collectionNamespaces.empty());
+}
- TEST(KVDatabaseCatalogEntryTest, CreateCollectionEmptyNamespace) {
- KVStorageEngine storageEngine(new DevNullKVEngine());
- storageEngine.finishInit();
- KVDatabaseCatalogEntry dbEntry("mydb", &storageEngine);
- OperationContextNoop ctx;
- ASSERT_NOT_OK(dbEntry.createCollection(&ctx, "", CollectionOptions(), true));
- std::list<std::string> collectionNamespaces;
- dbEntry.getCollectionNamespaces(&collectionNamespaces);
- ASSERT_TRUE(collectionNamespaces.empty());
- }
+TEST(KVDatabaseCatalogEntryTest, CreateCollectionEmptyNamespace) {
+ KVStorageEngine storageEngine(new DevNullKVEngine());
+ storageEngine.finishInit();
+ KVDatabaseCatalogEntry dbEntry("mydb", &storageEngine);
+ OperationContextNoop ctx;
+ ASSERT_NOT_OK(dbEntry.createCollection(&ctx, "", CollectionOptions(), true));
+ std::list<std::string> collectionNamespaces;
+ dbEntry.getCollectionNamespaces(&collectionNamespaces);
+ ASSERT_TRUE(collectionNamespaces.empty());
+}
- /**
- * Derived class of devnull KV engine where createRecordStore is overridden to fail
- * on an empty namespace (provided by the test).
- */
- class InvalidRecordStoreKVEngine : public DevNullKVEngine {
- public:
- virtual Status createRecordStore( OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options ) {
- if (ns == "fail.me") {
- return Status(ErrorCodes::BadValue, "failed to create record store");
- }
- return DevNullKVEngine::createRecordStore(opCtx, ns, ident, options);
+/**
+ * Derived class of devnull KV engine where createRecordStore is overridden to fail
+ * on an empty namespace (provided by the test).
+ */
+class InvalidRecordStoreKVEngine : public DevNullKVEngine {
+public:
+ virtual Status createRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options) {
+ if (ns == "fail.me") {
+ return Status(ErrorCodes::BadValue, "failed to create record store");
}
- };
-
- // After createCollection fails, collection namespaces should remain empty.
- TEST(KVDatabaseCatalogEntryTest, CreateCollectionInvalidRecordStore) {
- KVStorageEngine storageEngine(new InvalidRecordStoreKVEngine());
- storageEngine.finishInit();
- KVDatabaseCatalogEntry dbEntry("fail", &storageEngine);
- OperationContextNoop ctx;
- ASSERT_NOT_OK(dbEntry.createCollection(&ctx, "fail.me", CollectionOptions(), true));
- std::list<std::string> collectionNamespaces;
- dbEntry.getCollectionNamespaces(&collectionNamespaces);
- ASSERT_TRUE(collectionNamespaces.empty());
+ return DevNullKVEngine::createRecordStore(opCtx, ns, ident, options);
}
+};
+
+// After createCollection fails, collection namespaces should remain empty.
+TEST(KVDatabaseCatalogEntryTest, CreateCollectionInvalidRecordStore) {
+ KVStorageEngine storageEngine(new InvalidRecordStoreKVEngine());
+ storageEngine.finishInit();
+ KVDatabaseCatalogEntry dbEntry("fail", &storageEngine);
+ OperationContextNoop ctx;
+ ASSERT_NOT_OK(dbEntry.createCollection(&ctx, "fail.me", CollectionOptions(), true));
+ std::list<std::string> collectionNamespaces;
+ dbEntry.getCollectionNamespaces(&collectionNamespaces);
+ ASSERT_TRUE(collectionNamespaces.empty());
+}
} // namespace
diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h
index ccc127c1abd..bf73380e4e9 100644
--- a/src/mongo/db/storage/kv/kv_engine.h
+++ b/src/mongo/db/storage/kv/kv_engine.h
@@ -39,103 +39,100 @@
namespace mongo {
- class IndexDescriptor;
- class OperationContext;
- class RecordStore;
- class RecoveryUnit;
- class SortedDataInterface;
-
- class KVEngine {
- public:
-
- virtual RecoveryUnit* newRecoveryUnit() = 0;
-
- // ---------
-
- /**
- * Caller takes ownership
- * Having multiple out for the same ns is a rules violation;
- * Calling on a non-created ident is invalid and may crash.
- */
- virtual RecordStore* getRecordStore( OperationContext* opCtx,
- StringData ns,
+class IndexDescriptor;
+class OperationContext;
+class RecordStore;
+class RecoveryUnit;
+class SortedDataInterface;
+
+class KVEngine {
+public:
+ virtual RecoveryUnit* newRecoveryUnit() = 0;
+
+ // ---------
+
+ /**
+ * Caller takes ownership
+ * Having multiple out for the same ns is a rules violation;
+ * Calling on a non-created ident is invalid and may crash.
+ */
+ virtual RecordStore* getRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options) = 0;
+
+ virtual SortedDataInterface* getSortedDataInterface(OperationContext* opCtx,
+ StringData ident,
+ const IndexDescriptor* desc) = 0;
+
+ //
+ // The create and drop methods on KVEngine are not transactional. Transactional semantics
+ // are provided by the KVStorageEngine code that calls these. For example, drop will be
+ // called if a create is rolled back. A higher-level drop operation will only propagate to a
+ // drop call on the KVEngine once the WUOW commits. Therefore drops will never be rolled
+ // back and it is safe to immediately reclaim storage.
+ //
+
+ virtual Status createRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options) = 0;
+
+ virtual Status createSortedDataInterface(OperationContext* opCtx,
StringData ident,
- const CollectionOptions& options ) = 0;
-
- virtual SortedDataInterface* getSortedDataInterface( OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc ) = 0;
-
- //
- // The create and drop methods on KVEngine are not transactional. Transactional semantics
- // are provided by the KVStorageEngine code that calls these. For example, drop will be
- // called if a create is rolled back. A higher-level drop operation will only propagate to a
- // drop call on the KVEngine once the WUOW commits. Therefore drops will never be rolled
- // back and it is safe to immediately reclaim storage.
- //
-
- virtual Status createRecordStore( OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options ) = 0;
-
- virtual Status createSortedDataInterface( OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc ) = 0;
-
- virtual int64_t getIdentSize( OperationContext* opCtx,
- StringData ident ) = 0;
-
- virtual Status repairIdent( OperationContext* opCtx,
- StringData ident ) = 0;
-
- virtual Status dropIdent( OperationContext* opCtx,
- StringData ident ) = 0;
-
- // optional
- virtual int flushAllFiles( bool sync ) { return 0; }
-
- virtual bool isDurable() const = 0;
-
- /**
- * This must not change over the lifetime of the engine.
- */
- virtual bool supportsDocLocking() const = 0;
-
- /**
- * Returns true if storage engine supports --directoryperdb.
- * See:
- * http://docs.mongodb.org/manual/reference/program/mongod/#cmdoption--directoryperdb
- */
- virtual bool supportsDirectoryPerDB() const = 0;
-
- virtual Status okToRename( OperationContext* opCtx,
- StringData fromNS,
- StringData toNS,
- StringData ident,
- const RecordStore* originalRecordStore ) const {
- return Status::OK();
- }
-
- virtual bool hasIdent(OperationContext* opCtx, StringData ident) const = 0;
-
- virtual std::vector<std::string> getAllIdents( OperationContext* opCtx ) const = 0;
-
- /**
- * This method will be called before there is a clean shutdown. Storage engines should
- * override this method if they have clean-up to do that is different from unclean shutdown.
- * MongoDB will not call into the storage subsystem after calling this function.
- *
- * There is intentionally no uncleanShutdown().
- */
- virtual void cleanShutdown() = 0;
-
- /**
- * The destructor will never be called from mongod, but may be called from tests.
- * Engines may assume that this will only be called in the case of clean shutdown, even if
- * cleanShutdown() hasn't been called.
- */
- virtual ~KVEngine() {}
- };
-
+ const IndexDescriptor* desc) = 0;
+
+ virtual int64_t getIdentSize(OperationContext* opCtx, StringData ident) = 0;
+
+ virtual Status repairIdent(OperationContext* opCtx, StringData ident) = 0;
+
+ virtual Status dropIdent(OperationContext* opCtx, StringData ident) = 0;
+
+ // optional
+ virtual int flushAllFiles(bool sync) {
+ return 0;
+ }
+
+ virtual bool isDurable() const = 0;
+
+ /**
+ * This must not change over the lifetime of the engine.
+ */
+ virtual bool supportsDocLocking() const = 0;
+
+ /**
+ * Returns true if storage engine supports --directoryperdb.
+ * See:
+ * http://docs.mongodb.org/manual/reference/program/mongod/#cmdoption--directoryperdb
+ */
+ virtual bool supportsDirectoryPerDB() const = 0;
+
+ virtual Status okToRename(OperationContext* opCtx,
+ StringData fromNS,
+ StringData toNS,
+ StringData ident,
+ const RecordStore* originalRecordStore) const {
+ return Status::OK();
+ }
+
+ virtual bool hasIdent(OperationContext* opCtx, StringData ident) const = 0;
+
+ virtual std::vector<std::string> getAllIdents(OperationContext* opCtx) const = 0;
+
+ /**
+ * This method will be called before there is a clean shutdown. Storage engines should
+ * override this method if they have clean-up to do that is different from unclean shutdown.
+ * MongoDB will not call into the storage subsystem after calling this function.
+ *
+ * There is intentionally no uncleanShutdown().
+ */
+ virtual void cleanShutdown() = 0;
+
+ /**
+ * The destructor will never be called from mongod, but may be called from tests.
+ * Engines may assume that this will only be called in the case of clean shutdown, even if
+ * cleanShutdown() hasn't been called.
+ */
+ virtual ~KVEngine() {}
+};
}
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index 36aeefcf2c4..c225fb7ada3 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -41,372 +41,366 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
-
- namespace {
- class MyOperationContext : public OperationContextNoop {
- public:
- MyOperationContext( KVEngine* engine )
- : OperationContextNoop( engine->newRecoveryUnit() ) {
- }
- };
- }
-
- TEST( KVEngineTestHarness, SimpleRS1 ) {
- unique_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() );
- KVEngine* engine = helper->getEngine();
- ASSERT( engine );
-
- string ns = "a.b";
- unique_ptr<RecordStore> rs;
- {
- MyOperationContext opCtx( engine );
- ASSERT_OK( engine->createRecordStore( &opCtx, ns, ns, CollectionOptions() ) );
- rs.reset( engine->getRecordStore( &opCtx, ns, ns, CollectionOptions() ) );
- ASSERT( rs );
- }
-
-
- RecordId loc;
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- StatusWith<RecordId> res = rs->insertRecord( &opCtx, "abc", 4, false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
-
- {
- MyOperationContext opCtx( engine );
- ASSERT_EQUALS( string("abc"), rs->dataFor( &opCtx, loc ).data() );
- }
-
- {
- MyOperationContext opCtx( engine );
- std::vector<std::string> all = engine->getAllIdents( &opCtx );
- ASSERT_EQUALS( 1U, all.size() );
- ASSERT_EQUALS( ns, all[0] );
- }
+using std::unique_ptr;
+using std::string;
+
+namespace {
+class MyOperationContext : public OperationContextNoop {
+public:
+ MyOperationContext(KVEngine* engine) : OperationContextNoop(engine->newRecoveryUnit()) {}
+};
+}
+TEST(KVEngineTestHarness, SimpleRS1) {
+ unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
+ KVEngine* engine = helper->getEngine();
+ ASSERT(engine);
+
+ string ns = "a.b";
+ unique_ptr<RecordStore> rs;
+ {
+ MyOperationContext opCtx(engine);
+ ASSERT_OK(engine->createRecordStore(&opCtx, ns, ns, CollectionOptions()));
+ rs.reset(engine->getRecordStore(&opCtx, ns, ns, CollectionOptions()));
+ ASSERT(rs);
}
- TEST( KVEngineTestHarness, Restart1 ) {
- unique_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() );
- KVEngine* engine = helper->getEngine();
- ASSERT( engine );
-
- string ns = "a.b";
-
- // 'loc' holds location of "abc" and is referenced after restarting engine.
- RecordId loc;
- {
- unique_ptr<RecordStore> rs;
- {
- MyOperationContext opCtx( engine );
- ASSERT_OK( engine->createRecordStore( &opCtx, ns, ns, CollectionOptions() ) );
- rs.reset( engine->getRecordStore( &opCtx, ns, ns, CollectionOptions() ) );
- ASSERT( rs );
- }
-
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- StatusWith<RecordId> res = rs->insertRecord( &opCtx, "abc", 4, false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
-
- {
- MyOperationContext opCtx( engine );
- ASSERT_EQUALS( string("abc"), rs->dataFor( &opCtx, loc ).data() );
- }
- }
- engine = helper->restartEngine();
+ RecordId loc;
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ StatusWith<RecordId> res = rs->insertRecord(&opCtx, "abc", 4, false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
+ }
- {
- unique_ptr<RecordStore> rs;
- MyOperationContext opCtx( engine );
- rs.reset( engine->getRecordStore( &opCtx, ns, ns, CollectionOptions() ) );
- ASSERT_EQUALS( string("abc"), rs->dataFor( &opCtx, loc ).data() );
- }
+ {
+ MyOperationContext opCtx(engine);
+ ASSERT_EQUALS(string("abc"), rs->dataFor(&opCtx, loc).data());
+ }
+ {
+ MyOperationContext opCtx(engine);
+ std::vector<std::string> all = engine->getAllIdents(&opCtx);
+ ASSERT_EQUALS(1U, all.size());
+ ASSERT_EQUALS(ns, all[0]);
}
+}
+TEST(KVEngineTestHarness, Restart1) {
+ unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
+ KVEngine* engine = helper->getEngine();
+ ASSERT(engine);
- TEST( KVEngineTestHarness, SimpleSorted1 ) {
- unique_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() );
- KVEngine* engine = helper->getEngine();
- ASSERT( engine );
+ string ns = "a.b";
- string ident = "abc";
- IndexDescriptor desc( NULL, "", BSON( "key" << BSON( "a" << 1 ) ) );
- unique_ptr<SortedDataInterface> sorted;
+ // 'loc' holds location of "abc" and is referenced after restarting engine.
+ RecordId loc;
+ {
+ unique_ptr<RecordStore> rs;
{
- MyOperationContext opCtx( engine );
- ASSERT_OK( engine->createSortedDataInterface( &opCtx, ident, &desc ) );
- sorted.reset( engine->getSortedDataInterface( &opCtx, ident, &desc ) );
- ASSERT( sorted );
+ MyOperationContext opCtx(engine);
+ ASSERT_OK(engine->createRecordStore(&opCtx, ns, ns, CollectionOptions()));
+ rs.reset(engine->getRecordStore(&opCtx, ns, ns, CollectionOptions()));
+ ASSERT(rs);
}
{
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( sorted->insert( &opCtx, BSON( "" << 5 ), RecordId( 6, 4 ), true ) );
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ StatusWith<RecordId> res = rs->insertRecord(&opCtx, "abc", 4, false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
uow.commit();
}
{
- MyOperationContext opCtx( engine );
- ASSERT_EQUALS( 1, sorted->numEntries( &opCtx ) );
+ MyOperationContext opCtx(engine);
+ ASSERT_EQUALS(string("abc"), rs->dataFor(&opCtx, loc).data());
}
-
}
- TEST( KVCatalogTest, Coll1 ) {
- unique_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() );
- KVEngine* engine = helper->getEngine();
+ engine = helper->restartEngine();
+ {
unique_ptr<RecordStore> rs;
- unique_ptr<KVCatalog> catalog;
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( engine->createRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) );
- rs.reset( engine->getRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) );
- catalog.reset( new KVCatalog( rs.get(), true, false, false) );
- uow.commit();
- }
+ MyOperationContext opCtx(engine);
+ rs.reset(engine->getRecordStore(&opCtx, ns, ns, CollectionOptions()));
+ ASSERT_EQUALS(string("abc"), rs->dataFor(&opCtx, loc).data());
+ }
+}
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( catalog->newCollection( &opCtx, "a.b", CollectionOptions() ) );
- ASSERT_NOT_EQUALS( "a.b", catalog->getCollectionIdent( "a.b" ) );
- uow.commit();
- }
- string ident = catalog->getCollectionIdent( "a.b" );
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- catalog.reset( new KVCatalog( rs.get(), true, false, false) );
- catalog->init( &opCtx );
- uow.commit();
- }
- ASSERT_EQUALS( ident, catalog->getCollectionIdent( "a.b" ) );
+TEST(KVEngineTestHarness, SimpleSorted1) {
+ unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
+ KVEngine* engine = helper->getEngine();
+ ASSERT(engine);
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- catalog->dropCollection( &opCtx, "a.b" );
- catalog->newCollection( &opCtx, "a.b", CollectionOptions() );
- uow.commit();
- }
- ASSERT_NOT_EQUALS( ident, catalog->getCollectionIdent( "a.b" ) );
+ string ident = "abc";
+ IndexDescriptor desc(NULL, "", BSON("key" << BSON("a" << 1)));
+ unique_ptr<SortedDataInterface> sorted;
+ {
+ MyOperationContext opCtx(engine);
+ ASSERT_OK(engine->createSortedDataInterface(&opCtx, ident, &desc));
+ sorted.reset(engine->getSortedDataInterface(&opCtx, ident, &desc));
+ ASSERT(sorted);
}
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(sorted->insert(&opCtx, BSON("" << 5), RecordId(6, 4), true));
+ uow.commit();
+ }
- TEST( KVCatalogTest, Idx1 ) {
- unique_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() );
- KVEngine* engine = helper->getEngine();
-
- unique_ptr<RecordStore> rs;
- unique_ptr<KVCatalog> catalog;
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( engine->createRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) );
- rs.reset( engine->getRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) );
- catalog.reset( new KVCatalog( rs.get(), true, false, false) );
- uow.commit();
- }
-
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( catalog->newCollection( &opCtx, "a.b", CollectionOptions() ) );
- ASSERT_NOT_EQUALS( "a.b", catalog->getCollectionIdent( "a.b" ) );
- ASSERT_TRUE( catalog->isUserDataIdent( catalog->getCollectionIdent( "a.b" ) ) );
- uow.commit();
- }
+ {
+ MyOperationContext opCtx(engine);
+ ASSERT_EQUALS(1, sorted->numEntries(&opCtx));
+ }
+}
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
-
- BSONCollectionCatalogEntry::MetaData md;
- md.ns ="a.b";
- md.indexes.push_back( BSONCollectionCatalogEntry::IndexMetaData( BSON( "name" << "foo" ),
- false,
- RecordId(),
- false ) );
- catalog->putMetaData( &opCtx, "a.b", md );
- uow.commit();
- }
+TEST(KVCatalogTest, Coll1) {
+ unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
+ KVEngine* engine = helper->getEngine();
+
+ unique_ptr<RecordStore> rs;
+ unique_ptr<KVCatalog> catalog;
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
+ rs.reset(engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
+ catalog.reset(new KVCatalog(rs.get(), true, false, false));
+ uow.commit();
+ }
- string idxIndent;
- {
- MyOperationContext opCtx( engine );
- idxIndent = catalog->getIndexIdent( &opCtx, "a.b", "foo" );
- }
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(catalog->newCollection(&opCtx, "a.b", CollectionOptions()));
+ ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b"));
+ uow.commit();
+ }
- {
- MyOperationContext opCtx( engine );
- ASSERT_EQUALS( idxIndent, catalog->getIndexIdent( &opCtx, "a.b", "foo" ) );
- ASSERT_TRUE( catalog->isUserDataIdent( catalog->getIndexIdent( &opCtx, "a.b", "foo" ) ) );
- }
+ string ident = catalog->getCollectionIdent("a.b");
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ catalog.reset(new KVCatalog(rs.get(), true, false, false));
+ catalog->init(&opCtx);
+ uow.commit();
+ }
+ ASSERT_EQUALS(ident, catalog->getCollectionIdent("a.b"));
+
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ catalog->dropCollection(&opCtx, "a.b");
+ catalog->newCollection(&opCtx, "a.b", CollectionOptions());
+ uow.commit();
+ }
+ ASSERT_NOT_EQUALS(ident, catalog->getCollectionIdent("a.b"));
+}
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
-
- BSONCollectionCatalogEntry::MetaData md;
- md.ns ="a.b";
- catalog->putMetaData( &opCtx, "a.b", md ); // remove index
- md.indexes.push_back( BSONCollectionCatalogEntry::IndexMetaData( BSON( "name" << "foo" ),
- false,
- RecordId(),
- false ) );
- catalog->putMetaData( &opCtx, "a.b", md );
- uow.commit();
- }
- {
- MyOperationContext opCtx( engine );
- ASSERT_NOT_EQUALS( idxIndent, catalog->getIndexIdent( &opCtx, "a.b", "foo" ) );
- }
+TEST(KVCatalogTest, Idx1) {
+ unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
+ KVEngine* engine = helper->getEngine();
+ unique_ptr<RecordStore> rs;
+ unique_ptr<KVCatalog> catalog;
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
+ rs.reset(engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
+ catalog.reset(new KVCatalog(rs.get(), true, false, false));
+ uow.commit();
}
- TEST( KVCatalogTest, DirectoryPerDb1 ) {
- unique_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() );
- KVEngine* engine = helper->getEngine();
-
- unique_ptr<RecordStore> rs;
- unique_ptr<KVCatalog> catalog;
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( engine->createRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) );
- rs.reset( engine->getRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) );
- catalog.reset( new KVCatalog( rs.get(), true, true, false) );
- uow.commit();
- }
-
- { // collection
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( catalog->newCollection( &opCtx, "a.b", CollectionOptions() ) );
- ASSERT_STRING_CONTAINS( catalog->getCollectionIdent( "a.b" ), "a/" );
- ASSERT_TRUE( catalog->isUserDataIdent( catalog->getCollectionIdent( "a.b" ) ) );
- uow.commit();
- }
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(catalog->newCollection(&opCtx, "a.b", CollectionOptions()));
+ ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b"));
+ ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
+ uow.commit();
+ }
- { // index
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
-
- BSONCollectionCatalogEntry::MetaData md;
- md.ns ="a.b";
- md.indexes.push_back( BSONCollectionCatalogEntry::IndexMetaData( BSON( "name" << "foo" ),
- false,
- RecordId(),
- false ) );
- catalog->putMetaData( &opCtx, "a.b", md );
- ASSERT_STRING_CONTAINS( catalog->getIndexIdent( &opCtx, "a.b", "foo" ), "a/" );
- ASSERT_TRUE( catalog->isUserDataIdent( catalog->getIndexIdent( &opCtx, "a.b", "foo" ) ) );
- uow.commit();
- }
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+
+ BSONCollectionCatalogEntry::MetaData md;
+ md.ns = "a.b";
+ md.indexes.push_back(BSONCollectionCatalogEntry::IndexMetaData(BSON("name"
+ << "foo"),
+ false,
+ RecordId(),
+ false));
+ catalog->putMetaData(&opCtx, "a.b", md);
+ uow.commit();
+ }
+ string idxIndent;
+ {
+ MyOperationContext opCtx(engine);
+ idxIndent = catalog->getIndexIdent(&opCtx, "a.b", "foo");
}
- TEST( KVCatalogTest, Split1 ) {
- unique_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() );
- KVEngine* engine = helper->getEngine();
+ {
+ MyOperationContext opCtx(engine);
+ ASSERT_EQUALS(idxIndent, catalog->getIndexIdent(&opCtx, "a.b", "foo"));
+ ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo")));
+ }
- unique_ptr<RecordStore> rs;
- unique_ptr<KVCatalog> catalog;
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( engine->createRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) );
- rs.reset( engine->getRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) );
- catalog.reset( new KVCatalog( rs.get(), true, false, true) );
- uow.commit();
- }
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+
+ BSONCollectionCatalogEntry::MetaData md;
+ md.ns = "a.b";
+ catalog->putMetaData(&opCtx, "a.b", md); // remove index
+ md.indexes.push_back(BSONCollectionCatalogEntry::IndexMetaData(BSON("name"
+ << "foo"),
+ false,
+ RecordId(),
+ false));
+ catalog->putMetaData(&opCtx, "a.b", md);
+ uow.commit();
+ }
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( catalog->newCollection( &opCtx, "a.b", CollectionOptions() ) );
- ASSERT_STRING_CONTAINS( catalog->getCollectionIdent( "a.b" ), "collection/" );
- ASSERT_TRUE( catalog->isUserDataIdent( catalog->getCollectionIdent( "a.b" ) ) );
- uow.commit();
- }
+ {
+ MyOperationContext opCtx(engine);
+ ASSERT_NOT_EQUALS(idxIndent, catalog->getIndexIdent(&opCtx, "a.b", "foo"));
+ }
+}
- { // index
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
-
- BSONCollectionCatalogEntry::MetaData md;
- md.ns ="a.b";
- md.indexes.push_back( BSONCollectionCatalogEntry::IndexMetaData( BSON( "name" << "foo" ),
- false,
- RecordId(),
- false ) );
- catalog->putMetaData( &opCtx, "a.b", md );
- ASSERT_STRING_CONTAINS( catalog->getIndexIdent( &opCtx, "a.b", "foo" ), "index/" );
- ASSERT_TRUE( catalog->isUserDataIdent( catalog->getIndexIdent( &opCtx, "a.b", "foo" ) ) );
- uow.commit();
- }
+TEST(KVCatalogTest, DirectoryPerDb1) {
+ unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
+ KVEngine* engine = helper->getEngine();
+
+ unique_ptr<RecordStore> rs;
+ unique_ptr<KVCatalog> catalog;
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
+ rs.reset(engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
+ catalog.reset(new KVCatalog(rs.get(), true, true, false));
+ uow.commit();
+ }
+ { // collection
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(catalog->newCollection(&opCtx, "a.b", CollectionOptions()));
+ ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "a/");
+ ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
+ uow.commit();
}
- TEST( KVCatalogTest, DirectoryPerAndSplit1 ) {
- unique_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() );
- KVEngine* engine = helper->getEngine();
+ { // index
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+
+ BSONCollectionCatalogEntry::MetaData md;
+ md.ns = "a.b";
+ md.indexes.push_back(BSONCollectionCatalogEntry::IndexMetaData(BSON("name"
+ << "foo"),
+ false,
+ RecordId(),
+ false));
+ catalog->putMetaData(&opCtx, "a.b", md);
+ ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, "a.b", "foo"), "a/");
+ ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo")));
+ uow.commit();
+ }
+}
- unique_ptr<RecordStore> rs;
- unique_ptr<KVCatalog> catalog;
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( engine->createRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) );
- rs.reset( engine->getRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) );
- catalog.reset( new KVCatalog( rs.get(), true, true, true) );
- uow.commit();
- }
+TEST(KVCatalogTest, Split1) {
+ unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
+ KVEngine* engine = helper->getEngine();
+
+ unique_ptr<RecordStore> rs;
+ unique_ptr<KVCatalog> catalog;
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
+ rs.reset(engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
+ catalog.reset(new KVCatalog(rs.get(), true, false, true));
+ uow.commit();
+ }
- {
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
- ASSERT_OK( catalog->newCollection( &opCtx, "a.b", CollectionOptions() ) );
- ASSERT_STRING_CONTAINS( catalog->getCollectionIdent( "a.b" ), "a/collection/" );
- ASSERT_TRUE( catalog->isUserDataIdent( catalog->getCollectionIdent( "a.b" ) ) );
- uow.commit();
- }
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(catalog->newCollection(&opCtx, "a.b", CollectionOptions()));
+ ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "collection/");
+ ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
+ uow.commit();
+ }
- { // index
- MyOperationContext opCtx( engine );
- WriteUnitOfWork uow( &opCtx );
-
- BSONCollectionCatalogEntry::MetaData md;
- md.ns ="a.b";
- md.indexes.push_back( BSONCollectionCatalogEntry::IndexMetaData( BSON( "name" << "foo" ),
- false,
- RecordId(),
- false ) );
- catalog->putMetaData( &opCtx, "a.b", md );
- ASSERT_STRING_CONTAINS( catalog->getIndexIdent( &opCtx, "a.b", "foo" ), "a/index/" );
- ASSERT_TRUE( catalog->isUserDataIdent( catalog->getIndexIdent( &opCtx, "a.b", "foo" ) ) );
- uow.commit();
- }
+ { // index
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+
+ BSONCollectionCatalogEntry::MetaData md;
+ md.ns = "a.b";
+ md.indexes.push_back(BSONCollectionCatalogEntry::IndexMetaData(BSON("name"
+ << "foo"),
+ false,
+ RecordId(),
+ false));
+ catalog->putMetaData(&opCtx, "a.b", md);
+ ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, "a.b", "foo"), "index/");
+ ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo")));
+ uow.commit();
+ }
+}
+TEST(KVCatalogTest, DirectoryPerAndSplit1) {
+ unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
+ KVEngine* engine = helper->getEngine();
+
+ unique_ptr<RecordStore> rs;
+ unique_ptr<KVCatalog> catalog;
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
+ rs.reset(engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
+ catalog.reset(new KVCatalog(rs.get(), true, true, true));
+ uow.commit();
}
+ {
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+ ASSERT_OK(catalog->newCollection(&opCtx, "a.b", CollectionOptions()));
+ ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "a/collection/");
+ ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
+ uow.commit();
+ }
+ { // index
+ MyOperationContext opCtx(engine);
+ WriteUnitOfWork uow(&opCtx);
+
+ BSONCollectionCatalogEntry::MetaData md;
+ md.ns = "a.b";
+ md.indexes.push_back(BSONCollectionCatalogEntry::IndexMetaData(BSON("name"
+ << "foo"),
+ false,
+ RecordId(),
+ false));
+ catalog->putMetaData(&opCtx, "a.b", md);
+ ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, "a.b", "foo"), "a/index/");
+ ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo")));
+ uow.commit();
+ }
+}
}
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.h b/src/mongo/db/storage/kv/kv_engine_test_harness.h
index fd828681cc7..15ed43a1249 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.h
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.h
@@ -33,15 +33,15 @@
#include "mongo/db/storage/kv/kv_engine.h"
namespace mongo {
- class KVHarnessHelper {
- public:
- virtual ~KVHarnessHelper(){}
+class KVHarnessHelper {
+public:
+ virtual ~KVHarnessHelper() {}
- // returns same thing for entire life
- virtual KVEngine* getEngine() = 0;
+ // returns same thing for entire life
+ virtual KVEngine* getEngine() = 0;
- virtual KVEngine* restartEngine() = 0;
+ virtual KVEngine* restartEngine() = 0;
- static KVHarnessHelper* create();
- };
+ static KVHarnessHelper* create();
+};
}
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index 15a17987ff7..8e4d63f843d 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -41,238 +41,222 @@
namespace mongo {
- using std::string;
- using std::vector;
+using std::string;
+using std::vector;
- namespace {
- const std::string catalogInfo = "_mdb_catalog";
- }
+namespace {
+const std::string catalogInfo = "_mdb_catalog";
+}
- class KVStorageEngine::RemoveDBChange : public RecoveryUnit::Change {
- public:
- RemoveDBChange(KVStorageEngine* engine, StringData db, KVDatabaseCatalogEntry* entry)
- : _engine(engine)
- , _db(db.toString())
- , _entry(entry)
- {}
+class KVStorageEngine::RemoveDBChange : public RecoveryUnit::Change {
+public:
+ RemoveDBChange(KVStorageEngine* engine, StringData db, KVDatabaseCatalogEntry* entry)
+ : _engine(engine), _db(db.toString()), _entry(entry) {}
- virtual void commit() {
- delete _entry;
- }
+ virtual void commit() {
+ delete _entry;
+ }
- virtual void rollback() {
- stdx::lock_guard<stdx::mutex> lk(_engine->_dbsLock);
- _engine->_dbs[_db] = _entry;
- }
+ virtual void rollback() {
+ stdx::lock_guard<stdx::mutex> lk(_engine->_dbsLock);
+ _engine->_dbs[_db] = _entry;
+ }
+
+ KVStorageEngine* const _engine;
+ const std::string _db;
+ KVDatabaseCatalogEntry* const _entry;
+};
- KVStorageEngine* const _engine;
- const std::string _db;
- KVDatabaseCatalogEntry* const _entry;
- };
+KVStorageEngine::KVStorageEngine(KVEngine* engine, const KVStorageEngineOptions& options)
+ : _options(options), _engine(engine), _supportsDocLocking(_engine->supportsDocLocking()) {
+ uassert(28601,
+ "Storage engine does not support --directoryperdb",
+ !(options.directoryPerDB && !engine->supportsDirectoryPerDB()));
- KVStorageEngine::KVStorageEngine( KVEngine* engine,
- const KVStorageEngineOptions& options )
- : _options( options )
- , _engine( engine )
- , _supportsDocLocking(_engine->supportsDocLocking()) {
+ OperationContextNoop opCtx(_engine->newRecoveryUnit());
- uassert(28601, "Storage engine does not support --directoryperdb",
- !(options.directoryPerDB && !engine->supportsDirectoryPerDB()));
+ if (options.forRepair && engine->hasIdent(&opCtx, catalogInfo)) {
+ log() << "Repairing catalog metadata";
+ // TODO should also validate all BSON in the catalog.
+ engine->repairIdent(&opCtx, catalogInfo);
+ }
- OperationContextNoop opCtx( _engine->newRecoveryUnit() );
+ {
+ WriteUnitOfWork uow(&opCtx);
- if (options.forRepair && engine->hasIdent(&opCtx, catalogInfo)) {
- log() << "Repairing catalog metadata";
- // TODO should also validate all BSON in the catalog.
- engine->repairIdent(&opCtx, catalogInfo);
+ Status status =
+ _engine->createRecordStore(&opCtx, catalogInfo, catalogInfo, CollectionOptions());
+ // BadValue is usually caused by invalid configuration string.
+ // We still fassert() but without a stack trace.
+ if (status.code() == ErrorCodes::BadValue) {
+ fassertFailedNoTrace(28562);
}
-
- {
- WriteUnitOfWork uow( &opCtx );
-
- Status status = _engine->createRecordStore( &opCtx,
- catalogInfo,
- catalogInfo,
- CollectionOptions() );
- // BadValue is usually caused by invalid configuration string.
- // We still fassert() but without a stack trace.
- if (status.code() == ErrorCodes::BadValue) {
- fassertFailedNoTrace(28562);
- }
- fassert( 28520, status );
-
- _catalogRecordStore.reset( _engine->getRecordStore( &opCtx,
- catalogInfo,
- catalogInfo,
- CollectionOptions() ) );
- _catalog.reset( new KVCatalog( _catalogRecordStore.get(),
- _supportsDocLocking,
- _options.directoryPerDB,
- _options.directoryForIndexes) );
- _catalog->init( &opCtx );
-
- std::vector<std::string> collections;
- _catalog->getAllCollections( &collections );
-
- for ( size_t i = 0; i < collections.size(); i++ ) {
- std::string coll = collections[i];
- NamespaceString nss( coll );
- string dbName = nss.db().toString();
-
- // No rollback since this is only for committed dbs.
- KVDatabaseCatalogEntry*& db = _dbs[dbName];
- if ( !db ) {
- db = new KVDatabaseCatalogEntry( dbName, this );
- }
-
- db->initCollection( &opCtx, coll, options.forRepair );
+ fassert(28520, status);
+
+ _catalogRecordStore.reset(
+ _engine->getRecordStore(&opCtx, catalogInfo, catalogInfo, CollectionOptions()));
+ _catalog.reset(new KVCatalog(_catalogRecordStore.get(),
+ _supportsDocLocking,
+ _options.directoryPerDB,
+ _options.directoryForIndexes));
+ _catalog->init(&opCtx);
+
+ std::vector<std::string> collections;
+ _catalog->getAllCollections(&collections);
+
+ for (size_t i = 0; i < collections.size(); i++) {
+ std::string coll = collections[i];
+ NamespaceString nss(coll);
+ string dbName = nss.db().toString();
+
+ // No rollback since this is only for committed dbs.
+ KVDatabaseCatalogEntry*& db = _dbs[dbName];
+ if (!db) {
+ db = new KVDatabaseCatalogEntry(dbName, this);
}
- uow.commit();
+ db->initCollection(&opCtx, coll, options.forRepair);
}
- opCtx.recoveryUnit()->abandonSnapshot();
+ uow.commit();
+ }
+
+ opCtx.recoveryUnit()->abandonSnapshot();
- // now clean up orphaned idents
+ // now clean up orphaned idents
+ {
+ // get all idents
+ std::set<std::string> allIdents;
{
- // get all idents
- std::set<std::string> allIdents;
- {
- std::vector<std::string> v = _engine->getAllIdents( &opCtx );
- allIdents.insert( v.begin(), v.end() );
- allIdents.erase( catalogInfo );
- }
+ std::vector<std::string> v = _engine->getAllIdents(&opCtx);
+ allIdents.insert(v.begin(), v.end());
+ allIdents.erase(catalogInfo);
+ }
- // remove ones still in use
- {
- vector<string> idents = _catalog->getAllIdents( &opCtx );
- for ( size_t i = 0; i < idents.size(); i++ ) {
- allIdents.erase( idents[i] );
- }
+ // remove ones still in use
+ {
+ vector<string> idents = _catalog->getAllIdents(&opCtx);
+ for (size_t i = 0; i < idents.size(); i++) {
+ allIdents.erase(idents[i]);
}
+ }
- for ( std::set<std::string>::const_iterator it = allIdents.begin();
- it != allIdents.end();
- ++it ) {
- const std::string& toRemove = *it;
- if ( !_catalog->isUserDataIdent( toRemove ) )
- continue;
- log() << "dropping unused ident: " << toRemove;
- WriteUnitOfWork wuow( &opCtx );
- _engine->dropIdent( &opCtx, toRemove );
- wuow.commit();
- }
+ for (std::set<std::string>::const_iterator it = allIdents.begin(); it != allIdents.end();
+ ++it) {
+ const std::string& toRemove = *it;
+ if (!_catalog->isUserDataIdent(toRemove))
+ continue;
+ log() << "dropping unused ident: " << toRemove;
+ WriteUnitOfWork wuow(&opCtx);
+ _engine->dropIdent(&opCtx, toRemove);
+ wuow.commit();
}
+ }
+}
+void KVStorageEngine::cleanShutdown() {
+ for (DBMap::const_iterator it = _dbs.begin(); it != _dbs.end(); ++it) {
+ delete it->second;
}
+ _dbs.clear();
- void KVStorageEngine::cleanShutdown() {
+ _catalog.reset(NULL);
+ _catalogRecordStore.reset(NULL);
- for ( DBMap::const_iterator it = _dbs.begin(); it != _dbs.end(); ++it ) {
- delete it->second;
- }
- _dbs.clear();
+ _engine->cleanShutdown();
+ // intentionally not deleting _engine
+}
- _catalog.reset( NULL );
- _catalogRecordStore.reset( NULL );
+KVStorageEngine::~KVStorageEngine() {}
- _engine->cleanShutdown();
- // intentionally not deleting _engine
- }
+void KVStorageEngine::finishInit() {}
- KVStorageEngine::~KVStorageEngine() {
+RecoveryUnit* KVStorageEngine::newRecoveryUnit() {
+ if (!_engine) {
+ // shutdown
+ return NULL;
}
+ return _engine->newRecoveryUnit();
+}
- void KVStorageEngine::finishInit() {
- }
-
- RecoveryUnit* KVStorageEngine::newRecoveryUnit() {
- if ( !_engine ) {
- // shutdown
- return NULL;
- }
- return _engine->newRecoveryUnit();
+void KVStorageEngine::listDatabases(std::vector<std::string>* out) const {
+ stdx::lock_guard<stdx::mutex> lk(_dbsLock);
+ for (DBMap::const_iterator it = _dbs.begin(); it != _dbs.end(); ++it) {
+ if (it->second->isEmpty())
+ continue;
+ out->push_back(it->first);
}
+}
- void KVStorageEngine::listDatabases( std::vector<std::string>* out ) const {
- stdx::lock_guard<stdx::mutex> lk( _dbsLock );
- for ( DBMap::const_iterator it = _dbs.begin(); it != _dbs.end(); ++it ) {
- if ( it->second->isEmpty() )
- continue;
- out->push_back( it->first );
- }
+DatabaseCatalogEntry* KVStorageEngine::getDatabaseCatalogEntry(OperationContext* opCtx,
+ StringData dbName) {
+ stdx::lock_guard<stdx::mutex> lk(_dbsLock);
+ KVDatabaseCatalogEntry*& db = _dbs[dbName.toString()];
+ if (!db) {
+ // Not registering change since db creation is implicit and never rolled back.
+ db = new KVDatabaseCatalogEntry(dbName, this);
}
+ return db;
+}
- DatabaseCatalogEntry* KVStorageEngine::getDatabaseCatalogEntry( OperationContext* opCtx,
- StringData dbName ) {
- stdx::lock_guard<stdx::mutex> lk( _dbsLock );
- KVDatabaseCatalogEntry*& db = _dbs[dbName.toString()];
- if ( !db ) {
- // Not registering change since db creation is implicit and never rolled back.
- db = new KVDatabaseCatalogEntry( dbName, this );
- }
- return db;
- }
+Status KVStorageEngine::closeDatabase(OperationContext* txn, StringData db) {
+ // This is ok to be a no-op as there is no database layer in kv.
+ return Status::OK();
+}
- Status KVStorageEngine::closeDatabase( OperationContext* txn, StringData db ) {
- // This is ok to be a no-op as there is no database layer in kv.
- return Status::OK();
+Status KVStorageEngine::dropDatabase(OperationContext* txn, StringData db) {
+ KVDatabaseCatalogEntry* entry;
+ {
+ stdx::lock_guard<stdx::mutex> lk(_dbsLock);
+ DBMap::const_iterator it = _dbs.find(db.toString());
+ if (it == _dbs.end())
+ return Status(ErrorCodes::NamespaceNotFound, "db not found to drop");
+ entry = it->second;
}
- Status KVStorageEngine::dropDatabase( OperationContext* txn, StringData db ) {
-
- KVDatabaseCatalogEntry* entry;
- {
- stdx::lock_guard<stdx::mutex> lk( _dbsLock );
- DBMap::const_iterator it = _dbs.find( db.toString() );
- if ( it == _dbs.end() )
- return Status( ErrorCodes::NamespaceNotFound, "db not found to drop" );
- entry = it->second;
- }
-
- // This is called outside of a WUOW since MMAPv1 has unfortunate behavior around dropping
- // databases. We need to create one here since we want db dropping to all-or-nothing
- // wherever possible. Eventually we want to move this up so that it can include the logOp
- // inside of the WUOW, but that would require making DB dropping happen inside the Dur
- // system for MMAPv1.
- WriteUnitOfWork wuow(txn);
-
- std::list<std::string> toDrop;
- entry->getCollectionNamespaces( &toDrop );
+ // This is called outside of a WUOW since MMAPv1 has unfortunate behavior around dropping
+ // databases. We need to create one here since we want db dropping to all-or-nothing
+ // wherever possible. Eventually we want to move this up so that it can include the logOp
+ // inside of the WUOW, but that would require making DB dropping happen inside the Dur
+ // system for MMAPv1.
+ WriteUnitOfWork wuow(txn);
- for ( std::list<std::string>::iterator it = toDrop.begin(); it != toDrop.end(); ++it ) {
- string coll = *it;
- entry->dropCollection( txn, coll );
- }
- toDrop.clear();
- entry->getCollectionNamespaces( &toDrop );
- invariant( toDrop.empty() );
-
- {
- stdx::lock_guard<stdx::mutex> lk( _dbsLock );
- txn->recoveryUnit()->registerChange(new RemoveDBChange(this, db, entry));
- _dbs.erase( db.toString() );
- }
+ std::list<std::string> toDrop;
+ entry->getCollectionNamespaces(&toDrop);
- wuow.commit();
- return Status::OK();
+ for (std::list<std::string>::iterator it = toDrop.begin(); it != toDrop.end(); ++it) {
+ string coll = *it;
+ entry->dropCollection(txn, coll);
}
-
- int KVStorageEngine::flushAllFiles( bool sync ) {
- return _engine->flushAllFiles( sync );
+ toDrop.clear();
+ entry->getCollectionNamespaces(&toDrop);
+ invariant(toDrop.empty());
+
+ {
+ stdx::lock_guard<stdx::mutex> lk(_dbsLock);
+ txn->recoveryUnit()->registerChange(new RemoveDBChange(this, db, entry));
+ _dbs.erase(db.toString());
}
- bool KVStorageEngine::isDurable() const {
- return _engine->isDurable();
- }
+ wuow.commit();
+ return Status::OK();
+}
- Status KVStorageEngine::repairRecordStore(OperationContext* txn, const std::string& ns) {
- Status status = _engine->repairIdent(txn, _catalog->getCollectionIdent(ns));
- if (!status.isOK())
- return status;
+int KVStorageEngine::flushAllFiles(bool sync) {
+ return _engine->flushAllFiles(sync);
+}
- _dbs[nsToDatabase(ns)]->reinitCollectionAfterRepair(txn, ns);
- return Status::OK();
- }
+bool KVStorageEngine::isDurable() const {
+ return _engine->isDurable();
+}
+
+Status KVStorageEngine::repairRecordStore(OperationContext* txn, const std::string& ns) {
+ Status status = _engine->repairIdent(txn, _catalog->getCollectionIdent(ns));
+ if (!status.isOK())
+ return status;
+
+ _dbs[nsToDatabase(ns)]->reinitCollectionAfterRepair(txn, ns);
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.h b/src/mongo/db/storage/kv/kv_storage_engine.h
index 3159180d193..08836f6745b 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.h
+++ b/src/mongo/db/storage/kv/kv_storage_engine.h
@@ -40,77 +40,83 @@
namespace mongo {
- class KVCatalog;
- class KVEngine;
- class KVDatabaseCatalogEntry;
+class KVCatalog;
+class KVEngine;
+class KVDatabaseCatalogEntry;
- struct KVStorageEngineOptions {
- KVStorageEngineOptions() :
- directoryPerDB(false),
- directoryForIndexes(false),
- forRepair(false) {}
+struct KVStorageEngineOptions {
+ KVStorageEngineOptions()
+ : directoryPerDB(false), directoryForIndexes(false), forRepair(false) {}
- bool directoryPerDB;
- bool directoryForIndexes;
- bool forRepair;
- };
+ bool directoryPerDB;
+ bool directoryForIndexes;
+ bool forRepair;
+};
- class KVStorageEngine : public StorageEngine {
- public:
- /**
- * @param engine - owneership passes to me
- */
- KVStorageEngine( KVEngine* engine,
- const KVStorageEngineOptions& options = KVStorageEngineOptions() );
- virtual ~KVStorageEngine();
+class KVStorageEngine : public StorageEngine {
+public:
+ /**
+ * @param engine - owneership passes to me
+ */
+ KVStorageEngine(KVEngine* engine,
+ const KVStorageEngineOptions& options = KVStorageEngineOptions());
+ virtual ~KVStorageEngine();
- virtual void finishInit();
+ virtual void finishInit();
- virtual RecoveryUnit* newRecoveryUnit();
+ virtual RecoveryUnit* newRecoveryUnit();
- virtual void listDatabases( std::vector<std::string>* out ) const;
+ virtual void listDatabases(std::vector<std::string>* out) const;
- virtual DatabaseCatalogEntry* getDatabaseCatalogEntry( OperationContext* opCtx,
- StringData db );
+ virtual DatabaseCatalogEntry* getDatabaseCatalogEntry(OperationContext* opCtx, StringData db);
- virtual bool supportsDocLocking() const { return _supportsDocLocking; }
+ virtual bool supportsDocLocking() const {
+ return _supportsDocLocking;
+ }
- virtual Status closeDatabase( OperationContext* txn, StringData db );
+ virtual Status closeDatabase(OperationContext* txn, StringData db);
- virtual Status dropDatabase( OperationContext* txn, StringData db );
+ virtual Status dropDatabase(OperationContext* txn, StringData db);
- virtual int flushAllFiles( bool sync );
+ virtual int flushAllFiles(bool sync);
- virtual bool isDurable() const;
+ virtual bool isDurable() const;
- virtual Status repairRecordStore(OperationContext* txn, const std::string& ns);
+ virtual Status repairRecordStore(OperationContext* txn, const std::string& ns);
- virtual void cleanShutdown();
+ virtual void cleanShutdown();
- // ------ kv ------
+ // ------ kv ------
- KVEngine* getEngine() { return _engine.get(); }
- const KVEngine* getEngine() const { return _engine.get(); }
+ KVEngine* getEngine() {
+ return _engine.get();
+ }
+ const KVEngine* getEngine() const {
+ return _engine.get();
+ }
- KVCatalog* getCatalog() { return _catalog.get(); }
- const KVCatalog* getCatalog() const { return _catalog.get(); }
+ KVCatalog* getCatalog() {
+ return _catalog.get();
+ }
+ const KVCatalog* getCatalog() const {
+ return _catalog.get();
+ }
- private:
- class RemoveDBChange;
+private:
+ class RemoveDBChange;
- KVStorageEngineOptions _options;
+ KVStorageEngineOptions _options;
- // This must be the first member so it is destroyed last.
- std::unique_ptr<KVEngine> _engine;
+ // This must be the first member so it is destroyed last.
+ std::unique_ptr<KVEngine> _engine;
- const bool _supportsDocLocking;
+ const bool _supportsDocLocking;
- std::unique_ptr<RecordStore> _catalogRecordStore;
- std::unique_ptr<KVCatalog> _catalog;
-
- typedef std::map<std::string,KVDatabaseCatalogEntry*> DBMap;
- DBMap _dbs;
- mutable stdx::mutex _dbsLock;
- };
+ std::unique_ptr<RecordStore> _catalogRecordStore;
+ std::unique_ptr<KVCatalog> _catalog;
+ typedef std::map<std::string, KVDatabaseCatalogEntry*> DBMap;
+ DBMap _dbs;
+ mutable stdx::mutex _dbsLock;
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/aligned_builder.cpp b/src/mongo/db/storage/mmap_v1/aligned_builder.cpp
index 96b16e59f4a..8742f25e285 100644
--- a/src/mongo/db/storage/mmap_v1/aligned_builder.cpp
+++ b/src/mongo/db/storage/mmap_v1/aligned_builder.cpp
@@ -37,135 +37,136 @@
namespace mongo {
- using std::endl;
+using std::endl;
- AlignedBuilder::AlignedBuilder(unsigned initSize) {
- _len = 0;
- _malloc(initSize);
- uassert(13584, "out of memory AlignedBuilder", _p._allocationAddress);
- }
+AlignedBuilder::AlignedBuilder(unsigned initSize) {
+ _len = 0;
+ _malloc(initSize);
+ uassert(13584, "out of memory AlignedBuilder", _p._allocationAddress);
+}
- BOOST_STATIC_ASSERT(sizeof(void*) == sizeof(size_t));
+BOOST_STATIC_ASSERT(sizeof(void*) == sizeof(size_t));
- /** reset for a re-use. shrinks if > 128MB */
- void AlignedBuilder::reset() {
- _len = 0;
- RARELY {
- const unsigned sizeCap = 128*1024*1024;
- if (_p._size > sizeCap)
- _realloc(sizeCap, _len);
- }
+/** reset for a re-use. shrinks if > 128MB */
+void AlignedBuilder::reset() {
+ _len = 0;
+ RARELY {
+ const unsigned sizeCap = 128 * 1024 * 1024;
+ if (_p._size > sizeCap)
+ _realloc(sizeCap, _len);
}
+}
- /** reset with a hint as to the upcoming needed size specified */
- void AlignedBuilder::reset(unsigned sz) {
- _len = 0;
- unsigned Q = 32 * 1024 * 1024 - 1;
- unsigned want = (sz+Q) & (~Q);
- if( _p._size == want ) {
+/** reset with a hint as to the upcoming needed size specified */
+void AlignedBuilder::reset(unsigned sz) {
+ _len = 0;
+ unsigned Q = 32 * 1024 * 1024 - 1;
+ unsigned want = (sz + Q) & (~Q);
+ if (_p._size == want) {
+ return;
+ }
+ if (_p._size > want) {
+ if (_p._size <= 64 * 1024 * 1024)
return;
- }
- if( _p._size > want ) {
- if( _p._size <= 64 * 1024 * 1024 )
- return;
- bool downsize = false;
- RARELY { downsize = true; }
- if( !downsize )
- return;
+ bool downsize = false;
+ RARELY {
+ downsize = true;
}
- _realloc(want, _len);
- }
-
- void AlignedBuilder::mallocSelfAligned(unsigned sz) {
- verify( sz == _p._size );
- void *p = malloc(sz + Alignment - 1);
- _p._allocationAddress = p;
- size_t s = (size_t) p;
- size_t sold = s;
- s += Alignment - 1;
- s = (s/Alignment)*Alignment;
- verify( s >= sold ); // beginning
- verify( (s + sz) <= (sold + sz + Alignment - 1) ); //end
- _p._data = (char *) s;
+ if (!downsize)
+ return;
}
+ _realloc(want, _len);
+}
- /* "slow"/infrequent portion of 'grow()' */
- void NOINLINE_DECL AlignedBuilder::growReallocate(unsigned oldLen) {
- const unsigned MB = 1024*1024;
- const unsigned kMaxSize = (sizeof(int*) == 4) ? 512*MB : 2000*MB;
- const unsigned kWarnSize = (sizeof(int*) == 4) ? 256*MB : 512*MB;
+void AlignedBuilder::mallocSelfAligned(unsigned sz) {
+ verify(sz == _p._size);
+ void* p = malloc(sz + Alignment - 1);
+ _p._allocationAddress = p;
+ size_t s = (size_t)p;
+ size_t sold = s;
+ s += Alignment - 1;
+ s = (s / Alignment) * Alignment;
+ verify(s >= sold); // beginning
+ verify((s + sz) <= (sold + sz + Alignment - 1)); // end
+ _p._data = (char*)s;
+}
- const unsigned oldSize = _p._size;
+/* "slow"/infrequent portion of 'grow()' */
+void NOINLINE_DECL AlignedBuilder::growReallocate(unsigned oldLen) {
+ const unsigned MB = 1024 * 1024;
+ const unsigned kMaxSize = (sizeof(int*) == 4) ? 512 * MB : 2000 * MB;
+ const unsigned kWarnSize = (sizeof(int*) == 4) ? 256 * MB : 512 * MB;
- // Warn for unexpectedly large buffer
- wassert(_len <= kWarnSize);
+ const unsigned oldSize = _p._size;
- // Check validity of requested size
- invariant(_len > oldSize);
- if (_len > kMaxSize) {
- log() << "error writing journal: too much uncommitted data (" << _len << " bytes)";
- log() << "shutting down immediately to avoid corruption";
- fassert(28614, _len <= kMaxSize);
- }
+ // Warn for unexpectedly large buffer
+ wassert(_len <= kWarnSize);
- // Use smaller maximum for debug builds, as we should never be close the the maximum
- dassert(_len <= 256*MB);
+ // Check validity of requested size
+ invariant(_len > oldSize);
+ if (_len > kMaxSize) {
+ log() << "error writing journal: too much uncommitted data (" << _len << " bytes)";
+ log() << "shutting down immediately to avoid corruption";
+ fassert(28614, _len <= kMaxSize);
+ }
- // Compute newSize by doubling the existing maximum size until the maximum is reached
- invariant(oldSize > 0);
- uint64_t newSize = oldSize; // use 64 bits to defend against accidental overflow
- while (newSize < _len) {
- newSize *= 2;
- }
+ // Use smaller maximum for debug builds, as we should never be close the the maximum
+ dassert(_len <= 256 * MB);
- if (newSize > kMaxSize) {
- newSize = kMaxSize;
- }
+ // Compute newSize by doubling the existing maximum size until the maximum is reached
+ invariant(oldSize > 0);
+ uint64_t newSize = oldSize; // use 64 bits to defend against accidental overflow
+ while (newSize < _len) {
+ newSize *= 2;
+ }
- _realloc(newSize, oldLen);
+ if (newSize > kMaxSize) {
+ newSize = kMaxSize;
}
- void AlignedBuilder::_malloc(unsigned sz) {
- _p._size = sz;
+ _realloc(newSize, oldLen);
+}
+
+void AlignedBuilder::_malloc(unsigned sz) {
+ _p._size = sz;
#if defined(_WIN32)
- void *p = VirtualAlloc(0, sz, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
- _p._allocationAddress = p;
- _p._data = (char *) p;
+ void* p = VirtualAlloc(0, sz, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+ _p._allocationAddress = p;
+ _p._data = (char*)p;
#elif defined(__linux__)
- // in theory #ifdef _POSIX_VERSION should work, but it doesn't on OS X 10.4, and needs to be tested on solaris.
- // so for now, linux only for this.
- void *p = 0;
- int res = posix_memalign(&p, Alignment, sz);
- massert(13524, "out of memory AlignedBuilder", res == 0);
- _p._allocationAddress = p;
- _p._data = (char *) p;
+ // in theory #ifdef _POSIX_VERSION should work, but it doesn't on OS X 10.4, and needs to be tested on solaris.
+ // so for now, linux only for this.
+ void* p = 0;
+ int res = posix_memalign(&p, Alignment, sz);
+ massert(13524, "out of memory AlignedBuilder", res == 0);
+ _p._allocationAddress = p;
+ _p._data = (char*)p;
#else
- mallocSelfAligned(sz);
- verify( ((size_t) _p._data) % Alignment == 0 );
+ mallocSelfAligned(sz);
+ verify(((size_t)_p._data) % Alignment == 0);
#endif
- }
+}
- void AlignedBuilder::_realloc(unsigned newSize, unsigned oldLen) {
- // posix_memalign alignment is not maintained on reallocs, so we can't use realloc().
- AllocationInfo old = _p;
- _malloc(newSize);
- verify( oldLen <= _len );
- memcpy(_p._data, old._data, oldLen);
- _free(old._allocationAddress);
- }
+void AlignedBuilder::_realloc(unsigned newSize, unsigned oldLen) {
+ // posix_memalign alignment is not maintained on reallocs, so we can't use realloc().
+ AllocationInfo old = _p;
+ _malloc(newSize);
+ verify(oldLen <= _len);
+ memcpy(_p._data, old._data, oldLen);
+ _free(old._allocationAddress);
+}
- void AlignedBuilder::_free(void *p) {
+void AlignedBuilder::_free(void* p) {
#if defined(_WIN32)
- VirtualFree(p, 0, MEM_RELEASE);
+ VirtualFree(p, 0, MEM_RELEASE);
#else
- free(p);
+ free(p);
#endif
- }
-
- void AlignedBuilder::kill() {
- _free(_p._allocationAddress);
- _p._allocationAddress = 0;
- _p._data = 0;
- }
+}
+void AlignedBuilder::kill() {
+ _free(_p._allocationAddress);
+ _p._allocationAddress = 0;
+ _p._data = 0;
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/aligned_builder.h b/src/mongo/db/storage/mmap_v1/aligned_builder.h
index fb184424b66..f43cbee7d5d 100644
--- a/src/mongo/db/storage/mmap_v1/aligned_builder.h
+++ b/src/mongo/db/storage/mmap_v1/aligned_builder.h
@@ -33,104 +33,117 @@
namespace mongo {
- /** a page-aligned BufBuilder. */
- class AlignedBuilder {
- public:
- AlignedBuilder(unsigned init_size);
- ~AlignedBuilder() { kill(); }
-
- /** reset with a hint as to the upcoming needed size specified */
- void reset(unsigned sz);
-
- /** reset for a re-use. shrinks if > 128MB */
- void reset();
-
- /** note this may be deallocated (realloced) if you keep writing or reset(). */
- const char* buf() const { return _p._data; }
-
- /** leave room for some stuff later
- @return offset in the buffer that was our current position
- */
- size_t skip(unsigned n) {
- unsigned l = len();
- grow(n);
- return l;
+/** a page-aligned BufBuilder. */
+class AlignedBuilder {
+public:
+ AlignedBuilder(unsigned init_size);
+ ~AlignedBuilder() {
+ kill();
+ }
+
+ /** reset with a hint as to the upcoming needed size specified */
+ void reset(unsigned sz);
+
+ /** reset for a re-use. shrinks if > 128MB */
+ void reset();
+
+ /** note this may be deallocated (realloced) if you keep writing or reset(). */
+ const char* buf() const {
+ return _p._data;
+ }
+
+ /** leave room for some stuff later
+ @return offset in the buffer that was our current position
+ */
+ size_t skip(unsigned n) {
+ unsigned l = len();
+ grow(n);
+ return l;
+ }
+
+ /** if buffer grows pointer no longer valid */
+ char* atOfs(unsigned ofs) {
+ return _p._data + ofs;
+ }
+
+ /** if buffer grows pointer no longer valid */
+ char* cur() {
+ return _p._data + _len;
+ }
+
+ void appendChar(char j) {
+ *((char*)grow(sizeof(char))) = j;
+ }
+ void appendNum(char j) {
+ *((char*)grow(sizeof(char))) = j;
+ }
+ void appendNum(short j) {
+ *((short*)grow(sizeof(short))) = j;
+ }
+ void appendNum(int j) {
+ *((int*)grow(sizeof(int))) = j;
+ }
+ void appendNum(unsigned j) {
+ *((unsigned*)grow(sizeof(unsigned))) = j;
+ }
+ void appendNum(bool j) {
+ *((bool*)grow(sizeof(bool))) = j;
+ }
+ void appendNum(double j) {
+ *((double*)grow(sizeof(double))) = j;
+ }
+ void appendNum(long long j) {
+ *((long long*)grow(sizeof(long long))) = j;
+ }
+ void appendNum(unsigned long long j) {
+ *((unsigned long long*)grow(sizeof(unsigned long long))) = j;
+ }
+
+ void appendBuf(const void* src, size_t len) {
+ memcpy(grow((unsigned)len), src, len);
+ }
+
+ template <class T>
+ void appendStruct(const T& s) {
+ appendBuf(&s, sizeof(T));
+ }
+
+ void appendStr(StringData str, bool includeEOO = true) {
+ const unsigned len = str.size() + (includeEOO ? 1 : 0);
+ verify(len < (unsigned)BSONObjMaxUserSize);
+ str.copyTo(grow(len), includeEOO);
+ }
+
+ /** @return the in-use length */
+ unsigned len() const {
+ return _len;
+ }
+
+private:
+ static const unsigned Alignment = 8192;
+
+ /** returns the pre-grow write position */
+ inline char* grow(unsigned by) {
+ unsigned oldlen = _len;
+ _len += by;
+ if (MONGO_unlikely(_len > _p._size)) {
+ growReallocate(oldlen);
}
-
- /** if buffer grows pointer no longer valid */
- char* atOfs(unsigned ofs) { return _p._data + ofs; }
-
- /** if buffer grows pointer no longer valid */
- char* cur() { return _p._data + _len; }
-
- void appendChar(char j) {
- *((char*)grow(sizeof(char))) = j;
- }
- void appendNum(char j) {
- *((char*)grow(sizeof(char))) = j;
- }
- void appendNum(short j) {
- *((short*)grow(sizeof(short))) = j;
- }
- void appendNum(int j) {
- *((int*)grow(sizeof(int))) = j;
- }
- void appendNum(unsigned j) {
- *((unsigned*)grow(sizeof(unsigned))) = j;
- }
- void appendNum(bool j) {
- *((bool*)grow(sizeof(bool))) = j;
- }
- void appendNum(double j) {
- *((double*)grow(sizeof(double))) = j;
- }
- void appendNum(long long j) {
- *((long long*)grow(sizeof(long long))) = j;
- }
- void appendNum(unsigned long long j) {
- *((unsigned long long*)grow(sizeof(unsigned long long))) = j;
- }
-
- void appendBuf(const void *src, size_t len) { memcpy(grow((unsigned) len), src, len); }
-
- template<class T>
- void appendStruct(const T& s) { appendBuf(&s, sizeof(T)); }
-
- void appendStr(StringData str , bool includeEOO = true ) {
- const unsigned len = str.size() + ( includeEOO ? 1 : 0 );
- verify( len < (unsigned) BSONObjMaxUserSize );
- str.copyTo( grow(len), includeEOO );
- }
-
- /** @return the in-use length */
- unsigned len() const { return _len; }
-
- private:
- static const unsigned Alignment = 8192;
-
- /** returns the pre-grow write position */
- inline char* grow(unsigned by) {
- unsigned oldlen = _len;
- _len += by;
- if (MONGO_unlikely( _len > _p._size )) {
- growReallocate(oldlen);
- }
- return _p._data + oldlen;
- }
-
- void growReallocate(unsigned oldLenInUse);
- void kill();
- void mallocSelfAligned(unsigned sz);
- void _malloc(unsigned sz);
- void _realloc(unsigned newSize, unsigned oldLenInUse);
- void _free(void*);
-
- struct AllocationInfo {
- char *_data;
- void *_allocationAddress;
- unsigned _size;
- } _p;
- unsigned _len; // bytes in use
- };
-
+ return _p._data + oldlen;
+ }
+
+ void growReallocate(unsigned oldLenInUse);
+ void kill();
+ void mallocSelfAligned(unsigned sz);
+ void _malloc(unsigned sz);
+ void _realloc(unsigned newSize, unsigned oldLenInUse);
+ void _free(void*);
+
+ struct AllocationInfo {
+ char* _data;
+ void* _allocationAddress;
+ unsigned _size;
+ } _p;
+ unsigned _len; // bytes in use
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
index 422a6441e9a..ce1aa117fef 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_interface.cpp
@@ -39,340 +39,335 @@
namespace mongo {
namespace {
- using std::unique_ptr;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::string;
+using std::vector;
+
+template <class OnDiskFormat>
+class BtreeBuilderInterfaceImpl final : public SortedDataBuilderInterface {
+public:
+ BtreeBuilderInterfaceImpl(OperationContext* trans,
+ typename BtreeLogic<OnDiskFormat>::Builder* builder)
+ : _builder(builder), _trans(trans) {}
+
+ Status addKey(const BSONObj& key, const RecordId& loc) {
+ return _builder->addKey(key, DiskLoc::fromRecordId(loc));
+ }
- template <class OnDiskFormat>
- class BtreeBuilderInterfaceImpl final : public SortedDataBuilderInterface {
- public:
- BtreeBuilderInterfaceImpl(OperationContext* trans,
- typename BtreeLogic<OnDiskFormat>::Builder* builder)
- : _builder(builder), _trans(trans) { }
+private:
+ std::unique_ptr<typename BtreeLogic<OnDiskFormat>::Builder> _builder;
+
+ // Not owned here.
+ OperationContext* _trans;
+};
+
+template <class OnDiskFormat>
+class BtreeInterfaceImpl final : public SortedDataInterface {
+public:
+ BtreeInterfaceImpl(HeadManager* headManager,
+ RecordStore* recordStore,
+ SavedCursorRegistry* cursorRegistry,
+ const Ordering& ordering,
+ const string& indexName) {
+ _btree.reset(new BtreeLogic<OnDiskFormat>(
+ headManager, recordStore, cursorRegistry, ordering, indexName));
+ }
- Status addKey(const BSONObj& key, const RecordId& loc) {
- return _builder->addKey(key, DiskLoc::fromRecordId(loc));
- }
+ virtual ~BtreeInterfaceImpl() {}
- private:
- std::unique_ptr<typename BtreeLogic<OnDiskFormat>::Builder> _builder;
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) {
+ return new BtreeBuilderInterfaceImpl<OnDiskFormat>(txn,
+ _btree->newBuilder(txn, dupsAllowed));
+ }
- // Not owned here.
- OperationContext* _trans;
- };
+ virtual Status insert(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ return _btree->insert(txn, key, DiskLoc::fromRecordId(loc), dupsAllowed);
+ }
- template <class OnDiskFormat>
- class BtreeInterfaceImpl final : public SortedDataInterface {
- public:
- BtreeInterfaceImpl(HeadManager* headManager,
- RecordStore* recordStore,
- SavedCursorRegistry* cursorRegistry,
- const Ordering& ordering,
- const string& indexName) {
- _btree.reset(new BtreeLogic<OnDiskFormat>(headManager,
- recordStore,
- cursorRegistry,
- ordering,
- indexName));
- }
+ virtual void unindex(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ _btree->unindex(txn, key, DiskLoc::fromRecordId(loc));
+ }
- virtual ~BtreeInterfaceImpl() { }
+ virtual void fullValidate(OperationContext* txn,
+ bool full,
+ long long* numKeysOut,
+ BSONObjBuilder* output) const {
+ *numKeysOut = _btree->fullValidate(txn, NULL, false, false, 0);
+ }
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn,
- bool dupsAllowed) {
+ virtual bool appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* output,
+ double scale) const {
+ return false;
+ }
- return new BtreeBuilderInterfaceImpl<OnDiskFormat>(
- txn, _btree->newBuilder(txn, dupsAllowed));
- }
+ virtual long long getSpaceUsedBytes(OperationContext* txn) const {
+ return _btree->getRecordStore()->dataSize(txn);
+ }
- virtual Status insert(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) {
+ virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) {
+ return _btree->dupKeyCheck(txn, key, DiskLoc::fromRecordId(loc));
+ }
- return _btree->insert(txn, key, DiskLoc::fromRecordId(loc), dupsAllowed);
- }
+ virtual bool isEmpty(OperationContext* txn) {
+ return _btree->isEmpty(txn);
+ }
- virtual void unindex(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) {
+ virtual Status touch(OperationContext* txn) const {
+ return _btree->touch(txn);
+ }
- _btree->unindex(txn, key, DiskLoc::fromRecordId(loc));
- }
+ class Cursor final : public SortedDataInterface::Cursor {
+ public:
+ Cursor(OperationContext* txn, const BtreeLogic<OnDiskFormat>* btree, bool forward)
+ : _txn(txn), _btree(btree), _direction(forward ? 1 : -1), _ofs(0) {}
+
+ boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
+ if (isEOF())
+ return {};
+ if (_lastMoveWasRestore) {
+ // Return current position rather than advancing.
+ _lastMoveWasRestore = false;
+ } else {
+ _btree->advance(_txn, &_bucket, &_ofs, _direction);
+ }
- virtual void fullValidate(OperationContext* txn, bool full, long long *numKeysOut,
- BSONObjBuilder* output) const {
- *numKeysOut = _btree->fullValidate(txn, NULL, false, false, 0);
+ if (atEndPoint())
+ markEOF();
+ return curr(parts);
}
- virtual bool appendCustomStats(OperationContext* txn, BSONObjBuilder* output, double scale)
- const {
- return false;
- }
+ void setEndPosition(const BSONObj& key, bool inclusive) override {
+ if (key.isEmpty()) {
+ // This means scan to end of index.
+ _endState = {};
+ return;
+ }
- virtual long long getSpaceUsedBytes( OperationContext* txn ) const {
- return _btree->getRecordStore()->dataSize( txn );
+ _endState = {{key, inclusive}};
+ seekEndCursor(); // Completes initialization of _endState.
}
- virtual Status dupKeyCheck(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc) {
- return _btree->dupKeyCheck(txn, key, DiskLoc::fromRecordId(loc));
- }
+ boost::optional<IndexKeyEntry> seek(const BSONObj& key,
+ bool inclusive,
+ RequestedInfo parts) override {
+ locate(key, inclusive == forward() ? RecordId::min() : RecordId::max());
+ _lastMoveWasRestore = false;
- virtual bool isEmpty(OperationContext* txn) {
- return _btree->isEmpty(txn);
+ if (isEOF())
+ return {};
+ dassert(inclusive ? compareKeys(getKey(), key) >= 0 : compareKeys(getKey(), key) > 0);
+ return curr(parts);
}
- virtual Status touch(OperationContext* txn) const{
- return _btree->touch(txn);
- }
- class Cursor final : public SortedDataInterface::Cursor {
- public:
- Cursor(OperationContext* txn,
- const BtreeLogic<OnDiskFormat>* btree,
- bool forward)
- : _txn(txn),
- _btree(btree),
- _direction(forward ? 1 : -1),
- _ofs(0)
- {}
-
- boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
- if (isEOF()) return {};
- if (_lastMoveWasRestore) {
- // Return current position rather than advancing.
- _lastMoveWasRestore = false;
- }
- else {
- _btree->advance(_txn, &_bucket, &_ofs, _direction);
- }
+ boost::optional<IndexKeyEntry> seek(const IndexSeekPoint& seekPoint,
+ RequestedInfo parts) override {
+ bool canUseAdvanceTo = false;
+ if (!isEOF()) {
+ int cmp = _btree->customBSONCmp(getKey(), seekPoint, _direction);
- if (atEndPoint()) markEOF();
- return curr(parts);
+ // advanceTo requires that we are positioned "earlier" in the index than the
+ // seek point, in scan order.
+ canUseAdvanceTo = forward() ? cmp < 0 : cmp > 0;
}
- void setEndPosition(const BSONObj& key, bool inclusive) override {
- if (key.isEmpty()) {
- // This means scan to end of index.
- _endState = {};
- return;
- }
- _endState = {{key, inclusive}};
- seekEndCursor(); // Completes initialization of _endState.
+ if (canUseAdvanceTo) {
+ // This takes advantage of current location.
+ _btree->advanceTo(_txn, &_bucket, &_ofs, seekPoint, _direction);
+ } else {
+ // Start at root.
+ _bucket = _btree->getHead(_txn);
+ _ofs = 0;
+ _btree->customLocate(_txn, &_bucket, &_ofs, seekPoint, _direction);
}
- boost::optional<IndexKeyEntry> seek(const BSONObj& key, bool inclusive,
- RequestedInfo parts) override {
- locate(key, inclusive == forward() ? RecordId::min() : RecordId::max());
- _lastMoveWasRestore = false;
-
- if (isEOF()) return {};
- dassert(inclusive ? compareKeys(getKey(), key) >= 0
- : compareKeys(getKey(), key) > 0);
- return curr(parts);
- }
+ _lastMoveWasRestore = false;
+ if (atOrPastEndPointAfterSeeking())
+ markEOF();
+ return curr(parts);
+ }
- boost::optional<IndexKeyEntry> seek(const IndexSeekPoint& seekPoint,
- RequestedInfo parts) override {
- bool canUseAdvanceTo = false;
- if (!isEOF()) {
- int cmp = _btree->customBSONCmp(getKey(), seekPoint, _direction);
-
- // advanceTo requires that we are positioned "earlier" in the index than the
- // seek point, in scan order.
- canUseAdvanceTo = forward() ? cmp < 0 : cmp > 0;
- }
+ void savePositioned() override {
+ _txn = nullptr;
+ if (!_lastMoveWasRestore)
+ _savedEOF = isEOF();
- if (canUseAdvanceTo) {
- // This takes advantage of current location.
- _btree->advanceTo(_txn, &_bucket, &_ofs, seekPoint, _direction);
- }
- else {
- // Start at root.
- _bucket = _btree->getHead(_txn);
- _ofs = 0;
- _btree->customLocate(_txn, &_bucket, &_ofs, seekPoint, _direction);
+ if (!isEOF()) {
+ _saved.bucket = _bucket;
+ _btree->savedCursors()->registerCursor(&_saved);
+ // Don't want to change saved position if we only moved during restore.
+ if (!_lastMoveWasRestore) {
+ _saved.key = getKey().getOwned();
+ _saved.loc = getDiskLoc();
}
+ }
+ // Doing nothing with end cursor since it will do full reseek on restore.
+ }
- _lastMoveWasRestore = false;
+ void saveUnpositioned() override {
+ _txn = nullptr;
+ // Don't leak our registration if savePositioned() was previously called.
+ if (!_saved.bucket.isNull())
+ _btree->savedCursors()->unregisterCursor(&_saved);
- if (atOrPastEndPointAfterSeeking()) markEOF();
- return curr(parts);
- }
+ _saved.bucket = DiskLoc();
+ _savedEOF = true;
+ }
- void savePositioned() override {
- _txn = nullptr;
+ void restore(OperationContext* txn) override {
+ // guard against accidental double restore
+ invariant(!_txn);
+ _txn = txn;
- if (!_lastMoveWasRestore) _savedEOF = isEOF();
+ // Always do a full seek on restore. We cannot use our last position since index
+ // entries may have been inserted closer to our endpoint and we would need to move
+ // over them.
+ seekEndCursor();
- if (!isEOF()) {
- _saved.bucket = _bucket;
- _btree->savedCursors()->registerCursor(&_saved);
- // Don't want to change saved position if we only moved during restore.
- if (!_lastMoveWasRestore) {
- _saved.key = getKey().getOwned();
- _saved.loc = getDiskLoc();
- }
- }
- // Doing nothing with end cursor since it will do full reseek on restore.
+ if (_savedEOF) {
+ markEOF();
+ return;
}
- void saveUnpositioned() override {
- _txn = nullptr;
- // Don't leak our registration if savePositioned() was previously called.
- if (!_saved.bucket.isNull()) _btree->savedCursors()->unregisterCursor(&_saved);
-
- _saved.bucket = DiskLoc();
- _savedEOF = true;
+ if (_btree->savedCursors()->unregisterCursor(&_saved)) {
+ // We can use the fast restore mechanism.
+ _btree->restorePosition(_txn, _saved.key, _saved.loc, _direction, &_bucket, &_ofs);
+ } else {
+ // Need to find our position from the root.
+ locate(_saved.key, _saved.loc.toRecordId());
}
- void restore(OperationContext* txn) override {
- // guard against accidental double restore
- invariant(!_txn);
- _txn = txn;
+ _lastMoveWasRestore = isEOF() // We weren't EOF but now are.
+ || getDiskLoc() != _saved.loc || compareKeys(getKey(), _saved.key) != 0;
+ }
- // Always do a full seek on restore. We cannot use our last position since index
- // entries may have been inserted closer to our endpoint and we would need to move
- // over them.
- seekEndCursor();
+ private:
+ bool isEOF() const {
+ return _bucket.isNull();
+ }
+ void markEOF() {
+ _bucket = DiskLoc();
+ }
- if (_savedEOF) {
- markEOF();
- return;
- }
+ boost::optional<IndexKeyEntry> curr(RequestedInfo parts) {
+ if (isEOF())
+ return {};
+ return {{(parts & kWantKey) ? getKey() : BSONObj(),
+ (parts & kWantLoc) ? getDiskLoc().toRecordId() : RecordId()}};
+ }
- if (_btree->savedCursors()->unregisterCursor(&_saved)) {
- // We can use the fast restore mechanism.
- _btree->restorePosition(_txn, _saved.key, _saved.loc, _direction,
- &_bucket, &_ofs);
- }
- else {
- // Need to find our position from the root.
- locate(_saved.key, _saved.loc.toRecordId());
- }
+ bool atEndPoint() const {
+ return _endState && _bucket == _endState->bucket && (isEOF() || _ofs == _endState->ofs);
+ }
- _lastMoveWasRestore = isEOF() // We weren't EOF but now are.
- || getDiskLoc() != _saved.loc
- || compareKeys(getKey(), _saved.key) != 0;
- }
+ bool atOrPastEndPointAfterSeeking() const {
+ if (!_endState)
+ return false;
+ if (isEOF())
+ return true;
- private:
- bool isEOF() const { return _bucket.isNull(); }
- void markEOF() { _bucket = DiskLoc(); }
+ int cmp = compareKeys(getKey(), _endState->key);
+ return _endState->inclusive ? cmp > 0 : cmp >= 0;
+ }
- boost::optional<IndexKeyEntry> curr(RequestedInfo parts) {
- if (isEOF()) return {};
- return {{(parts & kWantKey) ? getKey() : BSONObj(),
- (parts & kWantLoc) ? getDiskLoc().toRecordId() : RecordId()}};
- }
+ void locate(const BSONObj& key, const RecordId& loc) {
+ _btree->locate(_txn, key, DiskLoc::fromRecordId(loc), _direction, &_ofs, &_bucket);
+ if (atOrPastEndPointAfterSeeking())
+ markEOF();
+ }
- bool atEndPoint() const {
- return _endState
- && _bucket == _endState->bucket
- && (isEOF() || _ofs == _endState->ofs);
- }
+ // Returns comparison relative to direction of scan. If rhs would be seen later, returns
+ // a positive value.
+ int compareKeys(const BSONObj& lhs, const BSONObj& rhs) const {
+ int cmp = lhs.woCompare(rhs, _btree->ordering(), /*considerFieldName*/ false);
+ return forward() ? cmp : -cmp;
+ }
- bool atOrPastEndPointAfterSeeking() const {
- if (!_endState) return false;
- if (isEOF()) return true;
-
- int cmp = compareKeys(getKey(), _endState->key);
- return _endState->inclusive ? cmp > 0 : cmp >= 0;
- }
+ BSONObj getKey() const {
+ return _btree->getKey(_txn, _bucket, _ofs);
+ }
+ DiskLoc getDiskLoc() const {
+ return _btree->getDiskLoc(_txn, _bucket, _ofs);
+ }
- void locate(const BSONObj& key, const RecordId& loc) {
- _btree->locate(_txn, key, DiskLoc::fromRecordId(loc), _direction, &_ofs, &_bucket);
- if (atOrPastEndPointAfterSeeking()) markEOF();
- }
+ void seekEndCursor() {
+ if (!_endState)
+ return;
+ _btree->locate(_txn,
+ _endState->key,
+ forward() == _endState->inclusive ? DiskLoc::max() : DiskLoc::min(),
+ _direction,
+ &_endState->ofs,
+ &_endState->bucket); // pure out params.
+ }
- // Returns comparison relative to direction of scan. If rhs would be seen later, returns
- // a positive value.
- int compareKeys(const BSONObj& lhs, const BSONObj& rhs) const {
- int cmp = lhs.woCompare(rhs, _btree->ordering(), /*considerFieldName*/false);
- return forward() ? cmp : -cmp;
- }
+ bool forward() const {
+ return _direction == 1;
+ }
- BSONObj getKey() const { return _btree->getKey(_txn, _bucket, _ofs); }
- DiskLoc getDiskLoc() const { return _btree->getDiskLoc(_txn, _bucket, _ofs); }
+ OperationContext* _txn; // not owned
+ const BtreeLogic<OnDiskFormat>* const _btree;
+ const int _direction;
- void seekEndCursor() {
- if (!_endState) return;
- _btree->locate(_txn,
- _endState->key,
- forward() == _endState->inclusive ? DiskLoc::max() : DiskLoc::min(),
- _direction,
- &_endState->ofs, &_endState->bucket); // pure out params.
- }
+ DiskLoc _bucket;
+ int _ofs;
- bool forward() const { return _direction == 1; }
-
- OperationContext* _txn; // not owned
- const BtreeLogic<OnDiskFormat>* const _btree;
- const int _direction;
-
- DiskLoc _bucket;
- int _ofs;
-
- struct EndState {
- BSONObj key;
- bool inclusive;
- DiskLoc bucket;
- int ofs;
- };
- boost::optional<EndState> _endState;
-
- // Used by next to decide to return current position rather than moving. Should be reset
- // to false by any operation that moves the cursor, other than subsequent save/restore
- // pairs.
- bool _lastMoveWasRestore = false;
-
- // Only used by save/restore() if _bucket is non-Null.
- bool _savedEOF = false;
- SavedCursorRegistry::SavedCursor _saved;
+ struct EndState {
+ BSONObj key;
+ bool inclusive;
+ DiskLoc bucket;
+ int ofs;
};
+ boost::optional<EndState> _endState;
- virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(
- OperationContext* txn,
- bool isForward = true) const {
- return stdx::make_unique<Cursor>(txn, _btree.get(), isForward);
- }
+ // Used by next to decide to return current position rather than moving. Should be reset
+ // to false by any operation that moves the cursor, other than subsequent save/restore
+ // pairs.
+ bool _lastMoveWasRestore = false;
- virtual Status initAsEmpty(OperationContext* txn) {
- return _btree->initAsEmpty(txn);
- }
-
- private:
- unique_ptr<BtreeLogic<OnDiskFormat> > _btree;
+ // Only used by save/restore() if _bucket is non-Null.
+ bool _savedEOF = false;
+ SavedCursorRegistry::SavedCursor _saved;
};
-} // namespace
-
- SortedDataInterface* getMMAPV1Interface(HeadManager* headManager,
- RecordStore* recordStore,
- SavedCursorRegistry* cursorRegistry,
- const Ordering& ordering,
- const string& indexName,
- int version) {
- if (0 == version) {
- return new BtreeInterfaceImpl<BtreeLayoutV0>(headManager,
- recordStore,
- cursorRegistry,
- ordering,
- indexName);
- }
- else {
- invariant(1 == version);
- return new BtreeInterfaceImpl<BtreeLayoutV1>(headManager,
- recordStore,
- cursorRegistry,
- ordering,
- indexName);
- }
+
+ virtual std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ bool isForward = true) const {
+ return stdx::make_unique<Cursor>(txn, _btree.get(), isForward);
+ }
+
+ virtual Status initAsEmpty(OperationContext* txn) {
+ return _btree->initAsEmpty(txn);
+ }
+
+private:
+ unique_ptr<BtreeLogic<OnDiskFormat>> _btree;
+};
+} // namespace
+
+SortedDataInterface* getMMAPV1Interface(HeadManager* headManager,
+ RecordStore* recordStore,
+ SavedCursorRegistry* cursorRegistry,
+ const Ordering& ordering,
+ const string& indexName,
+ int version) {
+ if (0 == version) {
+ return new BtreeInterfaceImpl<BtreeLayoutV0>(
+ headManager, recordStore, cursorRegistry, ordering, indexName);
+ } else {
+ invariant(1 == version);
+ return new BtreeInterfaceImpl<BtreeLayoutV1>(
+ headManager, recordStore, cursorRegistry, ordering, indexName);
}
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_interface.h b/src/mongo/db/storage/mmap_v1/btree/btree_interface.h
index cb2cdd21125..b5814c8a1f5 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_interface.h
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_interface.h
@@ -39,12 +39,12 @@
#pragma once
namespace mongo {
- class SavedCursorRegistry;
+class SavedCursorRegistry;
- SortedDataInterface* getMMAPV1Interface(HeadManager* headManager,
- RecordStore* recordStore,
- SavedCursorRegistry* cursorRegistry,
- const Ordering& ordering,
- const std::string& indexName,
- int version);
+SortedDataInterface* getMMAPV1Interface(HeadManager* headManager,
+ RecordStore* recordStore,
+ SavedCursorRegistry* cursorRegistry,
+ const Ordering& ordering,
+ const std::string& indexName,
+ int version);
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp
index 23f649bfcaa..1272ea4d080 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_interface_test.cpp
@@ -35,40 +35,32 @@
namespace mongo {
- using std::unique_ptr;
+using std::unique_ptr;
- class MyHarnessHelper final : public HarnessHelper {
- public:
- MyHarnessHelper()
- : _recordStore("a.b"),
- _order(Ordering::make(BSONObj())) {
- }
+class MyHarnessHelper final : public HarnessHelper {
+public:
+ MyHarnessHelper() : _recordStore("a.b"), _order(Ordering::make(BSONObj())) {}
- std::unique_ptr<SortedDataInterface> newSortedDataInterface(bool unique) final {
- std::unique_ptr<SortedDataInterface> sorted(getMMAPV1Interface(&_headManager,
- &_recordStore,
- &_cursorRegistry,
- _order,
- "a_1",
- 1));
- OperationContextNoop op;
- massertStatusOK(sorted->initAsEmpty(&op));
- return sorted;
- }
-
- std::unique_ptr<RecoveryUnit> newRecoveryUnit() final {
- return stdx::make_unique<HeapRecordStoreBtreeRecoveryUnit>();
- }
-
- private:
- TestHeadManager _headManager;
- HeapRecordStoreBtree _recordStore;
- SavedCursorRegistry _cursorRegistry;
- Ordering _order;
- };
+ std::unique_ptr<SortedDataInterface> newSortedDataInterface(bool unique) final {
+ std::unique_ptr<SortedDataInterface> sorted(
+ getMMAPV1Interface(&_headManager, &_recordStore, &_cursorRegistry, _order, "a_1", 1));
+ OperationContextNoop op;
+ massertStatusOK(sorted->initAsEmpty(&op));
+ return sorted;
+ }
- std::unique_ptr<HarnessHelper> newHarnessHelper() {
- return stdx::make_unique<MyHarnessHelper>();
+ std::unique_ptr<RecoveryUnit> newRecoveryUnit() final {
+ return stdx::make_unique<HeapRecordStoreBtreeRecoveryUnit>();
}
+private:
+ TestHeadManager _headManager;
+ HeapRecordStoreBtree _recordStore;
+ SavedCursorRegistry _cursorRegistry;
+ Ordering _order;
+};
+
+std::unique_ptr<HarnessHelper> newHarnessHelper() {
+ return stdx::make_unique<MyHarnessHelper>();
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
index 1afe24331cf..11e31b3fce7 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
@@ -42,2383 +42,2299 @@
namespace mongo {
- using std::unique_ptr;
- using std::dec;
- using std::endl;
- using std::hex;
- using std::make_pair;
- using std::pair;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- // BtreeLogic::Builder algorithm
- //
- // Phase 1:
- // Handled by caller. Extracts keys from raw documents and puts them in external sorter
- //
- // Phase 2 (the addKeys phase):
- // Add all keys to buckets. When a bucket gets full, pop the highest key (setting the
- // nextChild pointer of the bucket to the prevChild of the popped key), add the popped key to
- // a parent bucket, and create a new right sibling bucket to add the new key to. If the parent
- // bucket is full, this same operation is performed on the parent and all full ancestors. If
- // we get to the root and it is full, a new root is created above the current root. When
- // creating a new right sibling, it is set as its parent's nextChild as all keys in the right
- // sibling will be higher than all keys currently in the parent.
-
- //
- // Public Builder logic
- //
-
- template <class BtreeLayout>
- typename BtreeLogic<BtreeLayout>::Builder*
- BtreeLogic<BtreeLayout>::newBuilder(OperationContext* txn, bool dupsAllowed) {
- return new Builder(this, txn, dupsAllowed);
- }
-
- template <class BtreeLayout>
- BtreeLogic<BtreeLayout>::Builder::Builder(BtreeLogic* logic,
- OperationContext* txn,
- bool dupsAllowed)
- : _logic(logic),
- _dupsAllowed(dupsAllowed),
- _txn(txn) {
-
- // The normal bulk building path calls initAsEmpty, so we already have an empty root bucket.
- // This isn't the case in some unit tests that use the Builder directly rather than going
- // through an IndexAccessMethod.
- _rightLeafLoc = DiskLoc::fromRecordId(_logic->_headManager->getHead(txn));
- if (_rightLeafLoc.isNull()) {
- _rightLeafLoc = _logic->_addBucket(txn);
- _logic->_headManager->setHead(_txn, _rightLeafLoc.toRecordId());
- }
-
- // must be empty when starting
- invariant(_getBucket(_rightLeafLoc)->n == 0);
- }
-
- template <class BtreeLayout>
- class BtreeLogic<BtreeLayout>::Builder::SetRightLeafLocChange : public RecoveryUnit::Change {
- public:
- SetRightLeafLocChange(Builder* builder, DiskLoc oldLoc)
- : _builder(builder)
- , _oldLoc(oldLoc)
- {}
-
- virtual void commit() {}
- virtual void rollback() { _builder->_rightLeafLoc = _oldLoc; }
-
- Builder* _builder;
- const DiskLoc _oldLoc;
- };
-
- template <class BtreeLayout>
- Status BtreeLogic<BtreeLayout>::Builder::addKey(const BSONObj& keyObj, const DiskLoc& loc) {
- unique_ptr<KeyDataOwnedType> key(new KeyDataOwnedType(keyObj));
-
- if (key->dataSize() > BtreeLayout::KeyMax) {
- string msg = str::stream() << "Btree::insert: key too large to index, failing "
- << _logic->_indexName
- << ' ' << key->dataSize() << ' ' << key->toString();
- log() << msg << endl;
- return Status(ErrorCodes::KeyTooLong, msg);
- }
+using std::unique_ptr;
+using std::dec;
+using std::endl;
+using std::hex;
+using std::make_pair;
+using std::pair;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+// BtreeLogic::Builder algorithm
+//
+// Phase 1:
+// Handled by caller. Extracts keys from raw documents and puts them in external sorter
+//
+// Phase 2 (the addKeys phase):
+// Add all keys to buckets. When a bucket gets full, pop the highest key (setting the
+// nextChild pointer of the bucket to the prevChild of the popped key), add the popped key to
+// a parent bucket, and create a new right sibling bucket to add the new key to. If the parent
+// bucket is full, this same operation is performed on the parent and all full ancestors. If
+// we get to the root and it is full, a new root is created above the current root. When
+// creating a new right sibling, it is set as its parent's nextChild as all keys in the right
+// sibling will be higher than all keys currently in the parent.
+
+//
+// Public Builder logic
+//
+
+template <class BtreeLayout>
+typename BtreeLogic<BtreeLayout>::Builder* BtreeLogic<BtreeLayout>::newBuilder(
+ OperationContext* txn, bool dupsAllowed) {
+ return new Builder(this, txn, dupsAllowed);
+}
+
+template <class BtreeLayout>
+BtreeLogic<BtreeLayout>::Builder::Builder(BtreeLogic* logic,
+ OperationContext* txn,
+ bool dupsAllowed)
+ : _logic(logic), _dupsAllowed(dupsAllowed), _txn(txn) {
+ // The normal bulk building path calls initAsEmpty, so we already have an empty root bucket.
+ // This isn't the case in some unit tests that use the Builder directly rather than going
+ // through an IndexAccessMethod.
+ _rightLeafLoc = DiskLoc::fromRecordId(_logic->_headManager->getHead(txn));
+ if (_rightLeafLoc.isNull()) {
+ _rightLeafLoc = _logic->_addBucket(txn);
+ _logic->_headManager->setHead(_txn, _rightLeafLoc.toRecordId());
+ }
+
+ // must be empty when starting
+ invariant(_getBucket(_rightLeafLoc)->n == 0);
+}
+
+template <class BtreeLayout>
+class BtreeLogic<BtreeLayout>::Builder::SetRightLeafLocChange : public RecoveryUnit::Change {
+public:
+ SetRightLeafLocChange(Builder* builder, DiskLoc oldLoc) : _builder(builder), _oldLoc(oldLoc) {}
+
+ virtual void commit() {}
+ virtual void rollback() {
+ _builder->_rightLeafLoc = _oldLoc;
+ }
+
+ Builder* _builder;
+ const DiskLoc _oldLoc;
+};
+
+template <class BtreeLayout>
+Status BtreeLogic<BtreeLayout>::Builder::addKey(const BSONObj& keyObj, const DiskLoc& loc) {
+ unique_ptr<KeyDataOwnedType> key(new KeyDataOwnedType(keyObj));
+
+ if (key->dataSize() > BtreeLayout::KeyMax) {
+ string msg = str::stream() << "Btree::insert: key too large to index, failing "
+ << _logic->_indexName << ' ' << key->dataSize() << ' '
+ << key->toString();
+ log() << msg << endl;
+ return Status(ErrorCodes::KeyTooLong, msg);
+ }
+
+ // If we have a previous key to compare to...
+ if (_keyLast.get()) {
+ int cmp = _keyLast->woCompare(*key, _logic->_ordering);
+
+ // This shouldn't happen ever. We expect keys in sorted order.
+ if (cmp > 0) {
+ return Status(ErrorCodes::InternalError, "Bad key order in btree builder");
+ }
+
+ // This could easily happen..
+ if (!_dupsAllowed && (cmp == 0)) {
+ return Status(ErrorCodes::DuplicateKey, _logic->dupKeyError(*_keyLast));
+ }
+ }
+
+ BucketType* rightLeaf = _getModifiableBucket(_rightLeafLoc);
+ if (!_logic->pushBack(rightLeaf, loc, *key, DiskLoc())) {
+ // bucket was full, so split and try with the new node.
+ _txn->recoveryUnit()->registerChange(new SetRightLeafLocChange(this, _rightLeafLoc));
+ _rightLeafLoc = newBucket(rightLeaf, _rightLeafLoc);
+ rightLeaf = _getModifiableBucket(_rightLeafLoc);
+ invariant(_logic->pushBack(rightLeaf, loc, *key, DiskLoc()));
+ }
+
+ _keyLast = std::move(key);
+ return Status::OK();
+}
+
+//
+// Private Builder logic
+//
+
+template <class BtreeLayout>
+DiskLoc BtreeLogic<BtreeLayout>::Builder::newBucket(BucketType* leftSib, DiskLoc leftSibLoc) {
+ invariant(leftSib->n >= 2); // Guaranteed by sufficiently small KeyMax.
+
+ if (leftSib->parent.isNull()) {
+ // Making a new root
+ invariant(leftSibLoc.toRecordId() == _logic->_headManager->getHead(_txn));
+ const DiskLoc newRootLoc = _logic->_addBucket(_txn);
+ leftSib->parent = newRootLoc;
+ _logic->_headManager->setHead(_txn, newRootLoc.toRecordId());
+
+ // Set the newRoot's nextChild to point to leftSib for the invariant below.
+ BucketType* newRoot = _getBucket(newRootLoc);
+ *_txn->recoveryUnit()->writing(&newRoot->nextChild) = leftSibLoc;
+ }
+
+ DiskLoc parentLoc = leftSib->parent;
+ BucketType* parent = _getModifiableBucket(parentLoc);
+
+ // For the pushBack below to be correct, leftSib must be the right-most child of parent.
+ invariant(parent->nextChild == leftSibLoc);
+
+ // Pull right-most key out of leftSib and move to parent, splitting parent if necessary.
+ // Note that popBack() handles setting leftSib's nextChild to the former prevChildNode of
+ // the popped key.
+ KeyDataType key;
+ DiskLoc val;
+ _logic->popBack(leftSib, &val, &key);
+ if (!_logic->pushBack(parent, val, key, leftSibLoc)) {
+ // parent is full, so split it.
+ parentLoc = newBucket(parent, parentLoc);
+ parent = _getModifiableBucket(parentLoc);
+ invariant(_logic->pushBack(parent, val, key, leftSibLoc));
+ leftSib->parent = parentLoc;
+ }
+
+ // Create a new bucket to the right of leftSib and set its parent pointer and the downward
+ // nextChild pointer from the parent.
+ DiskLoc newBucketLoc = _logic->_addBucket(_txn);
+ BucketType* newBucket = _getBucket(newBucketLoc);
+ *_txn->recoveryUnit()->writing(&newBucket->parent) = parentLoc;
+ *_txn->recoveryUnit()->writing(&parent->nextChild) = newBucketLoc;
+ return newBucketLoc;
+}
+
+template <class BtreeLayout>
+typename BtreeLogic<BtreeLayout>::BucketType*
+BtreeLogic<BtreeLayout>::Builder::_getModifiableBucket(DiskLoc loc) {
+ return _logic->btreemod(_txn, _logic->getBucket(_txn, loc));
+}
+
+template <class BtreeLayout>
+typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::Builder::_getBucket(
+ DiskLoc loc) {
+ return _logic->getBucket(_txn, loc);
+}
+
+//
+// BtreeLogic logic
+//
+
+// static
+template <class BtreeLayout>
+typename BtreeLogic<BtreeLayout>::FullKey BtreeLogic<BtreeLayout>::getFullKey(
+ const BucketType* bucket, int i) {
+ if (i >= bucket->n) {
+ int code = 13000;
+ massert(code,
+ (string) "invalid keyNode: " + BSON("i" << i << "n" << bucket->n).jsonString(),
+ i < bucket->n);
+ }
+ return FullKey(bucket, i);
+}
+
+// static
+template <class BtreeLayout>
+typename BtreeLogic<BtreeLayout>::KeyHeaderType& BtreeLogic<BtreeLayout>::getKeyHeader(
+ BucketType* bucket, int i) {
+ return ((KeyHeaderType*)bucket->data)[i];
+}
+
+// static
+template <class BtreeLayout>
+const typename BtreeLogic<BtreeLayout>::KeyHeaderType& BtreeLogic<BtreeLayout>::getKeyHeader(
+ const BucketType* bucket, int i) {
+ return ((const KeyHeaderType*)bucket->data)[i];
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::markUnused(BucketType* bucket, int keyPos) {
+ invariant(keyPos >= 0 && keyPos < bucket->n);
+ getKeyHeader(bucket, keyPos).setUnused();
+}
+
+template <class BtreeLayout>
+char* BtreeLogic<BtreeLayout>::dataAt(BucketType* bucket, short ofs) {
+ return bucket->data + ofs;
+}
+
+template <class BtreeLayout>
+typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::btreemod(
+ OperationContext* txn, BucketType* bucket) {
+ txn->recoveryUnit()->writingPtr(bucket, BtreeLayout::BucketSize);
+ return bucket;
+}
+
+template <class BtreeLayout>
+int BtreeLogic<BtreeLayout>::totalDataSize(BucketType* bucket) {
+ return (int)(BtreeLayout::BucketSize - (bucket->data - (char*)bucket));
+}
+
+// We define this value as the maximum number of bytes such that, if we have
+// fewer than this many bytes, we must be able to either merge with or receive
+// keys from any neighboring node. If our utilization goes below this value we
+// know we can bring up the utilization with a simple operation. Ignoring the
+// 90/10 split policy which is sometimes employed and our 'unused' nodes, this
+// is a lower bound on bucket utilization for non root buckets.
+//
+// Note that the exact value here depends on the implementation of
+// _rebalancedSeparatorPos(). The conditions for lowWaterMark - 1 are as
+// follows: We know we cannot merge with the neighbor, so the total data size
+// for us, the neighbor, and the separator must be at least
+// BucketType::bodySize() + 1. We must be able to accept one key of any
+// allowed size, so our size plus storage for that additional key must be
+// <= BucketType::bodySize() / 2. This way, with the extra key we'll have a
+// new bucket data size < half the total data size and by the implementation
+// of _rebalancedSeparatorPos() the key must be added.
+template <class BtreeLayout>
+int BtreeLogic<BtreeLayout>::lowWaterMark() {
+ return BtreeLayout::BucketBodySize / 2 - BtreeLayout::KeyMax - sizeof(KeyHeaderType) + 1;
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::init(BucketType* bucket) {
+ BtreeLayout::initBucket(bucket);
+ bucket->parent.Null();
+ bucket->nextChild.Null();
+ bucket->flags = Packed;
+ bucket->n = 0;
+ bucket->emptySize = totalDataSize(bucket);
+ bucket->topSize = 0;
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::_unalloc(BucketType* bucket, int bytes) {
+ bucket->topSize -= bytes;
+ bucket->emptySize += bytes;
+}
- // If we have a previous key to compare to...
- if (_keyLast.get()) {
- int cmp = _keyLast->woCompare(*key, _logic->_ordering);
-
- // This shouldn't happen ever. We expect keys in sorted order.
- if (cmp > 0) {
- return Status(ErrorCodes::InternalError, "Bad key order in btree builder");
- }
+/**
+ * We allocate space from the end of the buffer for data. The keynodes grow from the front.
+ */
+template <class BtreeLayout>
+int BtreeLogic<BtreeLayout>::_alloc(BucketType* bucket, int bytes) {
+ invariant(bucket->emptySize >= bytes);
+ bucket->topSize += bytes;
+ bucket->emptySize -= bytes;
+ int ofs = totalDataSize(bucket) - bucket->topSize;
+ invariant(ofs > 0);
+ return ofs;
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::setNotPacked(BucketType* bucket) {
+ bucket->flags &= ~Packed;
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::setPacked(BucketType* bucket) {
+ bucket->flags |= Packed;
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::_delKeyAtPos(BucketType* bucket, int keypos, bool mayEmpty) {
+ invariant(keypos >= 0 && keypos <= bucket->n);
+ invariant(childLocForPos(bucket, keypos).isNull());
+ invariant((mayEmpty && bucket->n > 0) || bucket->n > 1 || bucket->nextChild.isNull());
+
+ bucket->emptySize += sizeof(KeyHeaderType);
+ bucket->n--;
+
+ for (int j = keypos; j < bucket->n; j++) {
+ getKeyHeader(bucket, j) = getKeyHeader(bucket, j + 1);
+ }
+
+ setNotPacked(bucket);
+}
- // This could easily happen..
- if (!_dupsAllowed && (cmp == 0)) {
- return Status(ErrorCodes::DuplicateKey, _logic->dupKeyError(*_keyLast));
- }
- }
-
- BucketType* rightLeaf = _getModifiableBucket(_rightLeafLoc);
- if (!_logic->pushBack(rightLeaf, loc, *key, DiskLoc())) {
- // bucket was full, so split and try with the new node.
- _txn->recoveryUnit()->registerChange(new SetRightLeafLocChange(this, _rightLeafLoc));
- _rightLeafLoc = newBucket(rightLeaf, _rightLeafLoc);
- rightLeaf = _getModifiableBucket(_rightLeafLoc);
- invariant(_logic->pushBack(rightLeaf, loc, *key, DiskLoc()));
- }
+/**
+ * Pull rightmost key from the bucket and set its prevChild pointer to be the nextChild for the
+ * whole bucket. It is assumed that caller already has the old value of the nextChild
+ * pointer and is about to add a pointer to it elsewhere in the tree.
+ *
+ * This is only used by BtreeLogic::Builder. Think very hard (and change this comment) before
+ * using it anywhere else.
+ *
+ * WARNING: The keyDataOut that is filled out by this function points to newly unalloced memory
+ * inside of this bucket. It only remains valid until the next write to this bucket.
+ */
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::popBack(BucketType* bucket,
+ DiskLoc* recordLocOut,
+ KeyDataType* keyDataOut) {
+ massert(17435, "n==0 in btree popBack()", bucket->n > 0);
+
+ invariant(getKeyHeader(bucket, bucket->n - 1).isUsed());
+
+ FullKey kn = getFullKey(bucket, bucket->n - 1);
+ *recordLocOut = kn.recordLoc;
+ keyDataOut->assign(kn.data);
+ int keysize = kn.data.dataSize();
+
+ // The left/prev child of the node we are popping now goes in to the nextChild slot as all
+ // of its keys are greater than all remaining keys in this node.
+ bucket->nextChild = kn.prevChildBucket;
+ bucket->n--;
+
+ // This is risky because the keyDataOut we filled out above will now point to this newly
+ // unalloced memory.
+ bucket->emptySize += sizeof(KeyHeaderType);
+ _unalloc(bucket, keysize);
+}
- _keyLast = std::move(key);
- return Status::OK();
+/**
+ * Add a key. Must be > all existing. Be careful to set next ptr right.
+ */
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::pushBack(BucketType* bucket,
+ const DiskLoc recordLoc,
+ const KeyDataType& key,
+ const DiskLoc prevChild) {
+ int bytesNeeded = key.dataSize() + sizeof(KeyHeaderType);
+ if (bytesNeeded > bucket->emptySize) {
+ return false;
}
+ invariant(bytesNeeded <= bucket->emptySize);
- //
- // Private Builder logic
- //
-
- template <class BtreeLayout>
- DiskLoc BtreeLogic<BtreeLayout>::Builder::newBucket(BucketType* leftSib,
- DiskLoc leftSibLoc) {
- invariant(leftSib->n >= 2); // Guaranteed by sufficiently small KeyMax.
-
- if (leftSib->parent.isNull()) {
- // Making a new root
- invariant(leftSibLoc.toRecordId() == _logic->_headManager->getHead(_txn));
- const DiskLoc newRootLoc = _logic->_addBucket(_txn);
- leftSib->parent = newRootLoc;
- _logic->_headManager->setHead(_txn, newRootLoc.toRecordId());
-
- // Set the newRoot's nextChild to point to leftSib for the invariant below.
- BucketType* newRoot = _getBucket(newRootLoc);
- *_txn->recoveryUnit()->writing(&newRoot->nextChild) = leftSibLoc;
- }
-
- DiskLoc parentLoc = leftSib->parent;
- BucketType* parent = _getModifiableBucket(parentLoc);
-
- // For the pushBack below to be correct, leftSib must be the right-most child of parent.
- invariant(parent->nextChild == leftSibLoc);
-
- // Pull right-most key out of leftSib and move to parent, splitting parent if necessary.
- // Note that popBack() handles setting leftSib's nextChild to the former prevChildNode of
- // the popped key.
- KeyDataType key;
- DiskLoc val;
- _logic->popBack(leftSib, &val, &key);
- if (!_logic->pushBack(parent, val, key, leftSibLoc)) {
- // parent is full, so split it.
- parentLoc = newBucket(parent, parentLoc);
- parent = _getModifiableBucket(parentLoc);
- invariant(_logic->pushBack(parent, val, key, leftSibLoc));
- leftSib->parent = parentLoc;
+ if (bucket->n) {
+ const FullKey klast = getFullKey(bucket, bucket->n - 1);
+ if (klast.data.woCompare(key, _ordering) > 0) {
+ log() << "btree bucket corrupt? "
+ "consider reindexing or running validate command" << endl;
+ log() << " klast: " << klast.data.toString() << endl;
+ log() << " key: " << key.toString() << endl;
+ invariant(false);
}
-
- // Create a new bucket to the right of leftSib and set its parent pointer and the downward
- // nextChild pointer from the parent.
- DiskLoc newBucketLoc = _logic->_addBucket(_txn);
- BucketType* newBucket = _getBucket(newBucketLoc);
- *_txn->recoveryUnit()->writing(&newBucket->parent) = parentLoc;
- *_txn->recoveryUnit()->writing(&parent->nextChild) = newBucketLoc;
- return newBucketLoc;
}
- template <class BtreeLayout>
- typename BtreeLogic<BtreeLayout>::BucketType*
- BtreeLogic<BtreeLayout>::Builder::_getModifiableBucket(DiskLoc loc) {
- return _logic->btreemod(_txn, _logic->getBucket(_txn, loc));
- }
+ bucket->emptySize -= sizeof(KeyHeaderType);
+ KeyHeaderType& kn = getKeyHeader(bucket, bucket->n++);
+ kn.prevChildBucket = prevChild;
+ kn.recordLoc = recordLoc;
+ kn.setKeyDataOfs((short)_alloc(bucket, key.dataSize()));
+ short ofs = kn.keyDataOfs();
+ char* p = dataAt(bucket, ofs);
+ memcpy(p, key.data(), key.dataSize());
+ return true;
+}
- template <class BtreeLayout>
- typename BtreeLogic<BtreeLayout>::BucketType*
- BtreeLogic<BtreeLayout>::Builder::_getBucket(DiskLoc loc) {
- return _logic->getBucket(_txn, loc);
- }
+/**
+ * Durability note:
+ *
+ * We do separate intent declarations herein. Arguably one could just declare the whole bucket
+ * given we do group commits. This is something we could investigate later as to what is
+ * faster.
+ **/
- //
- // BtreeLogic logic
- //
+/**
+ * Insert a key in a bucket with no complexity -- no splits required
+ * Returns false if a split is required.
+ */
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int& keypos,
+ const KeyDataType& key,
+ const DiskLoc recordLoc) {
+ invariant(bucket->n < 1024);
+ invariant(keypos >= 0 && keypos <= bucket->n);
- // static
- template <class BtreeLayout>
- typename BtreeLogic<BtreeLayout>::FullKey
- BtreeLogic<BtreeLayout>::getFullKey(const BucketType* bucket, int i) {
- if (i >= bucket->n) {
- int code = 13000;
- massert(code,
- (string)"invalid keyNode: " + BSON( "i" << i << "n" << bucket->n ).jsonString(),
- i < bucket->n );
+ int bytesNeeded = key.dataSize() + sizeof(KeyHeaderType);
+ if (bytesNeeded > bucket->emptySize) {
+ _pack(txn, bucket, bucketLoc, keypos);
+ if (bytesNeeded > bucket->emptySize) {
+ return false;
}
- return FullKey(bucket, i);
}
- // static
- template <class BtreeLayout>
- typename BtreeLogic<BtreeLayout>::KeyHeaderType&
- BtreeLogic<BtreeLayout>::getKeyHeader(BucketType* bucket, int i) {
- return ((KeyHeaderType*)bucket->data)[i];
- }
+ invariant(getBucket(txn, bucketLoc) == bucket);
- // static
- template <class BtreeLayout>
- const typename BtreeLogic<BtreeLayout>::KeyHeaderType&
- BtreeLogic<BtreeLayout>::getKeyHeader(const BucketType* bucket, int i) {
- return ((const KeyHeaderType*)bucket->data)[i];
- }
+ {
+ // declare that we will write to [k(keypos),k(n)]
+ char* start = reinterpret_cast<char*>(&getKeyHeader(bucket, keypos));
+ char* end = reinterpret_cast<char*>(&getKeyHeader(bucket, bucket->n + 1));
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::markUnused(BucketType* bucket, int keyPos) {
- invariant(keyPos >= 0 && keyPos < bucket->n);
- getKeyHeader(bucket, keyPos).setUnused();
+ // Declare that we will write to [k(keypos),k(n)]
+ txn->recoveryUnit()->writingPtr(start, end - start);
}
- template <class BtreeLayout>
- char* BtreeLogic<BtreeLayout>::dataAt(BucketType* bucket, short ofs) {
- return bucket->data + ofs;
+ // e.g. for n==3, keypos==2
+ // 1 4 9 -> 1 4 _ 9
+ for (int j = bucket->n; j > keypos; j--) {
+ getKeyHeader(bucket, j) = getKeyHeader(bucket, j - 1);
}
- template <class BtreeLayout>
- typename BtreeLogic<BtreeLayout>::BucketType*
- BtreeLogic<BtreeLayout>::btreemod(OperationContext* txn, BucketType* bucket) {
- txn->recoveryUnit()->writingPtr(bucket, BtreeLayout::BucketSize);
- return bucket;
- }
-
- template <class BtreeLayout>
- int BtreeLogic<BtreeLayout>::totalDataSize(BucketType* bucket) {
- return (int) (BtreeLayout::BucketSize - (bucket->data - (char*)bucket));
- }
-
- // We define this value as the maximum number of bytes such that, if we have
- // fewer than this many bytes, we must be able to either merge with or receive
- // keys from any neighboring node. If our utilization goes below this value we
- // know we can bring up the utilization with a simple operation. Ignoring the
- // 90/10 split policy which is sometimes employed and our 'unused' nodes, this
- // is a lower bound on bucket utilization for non root buckets.
- //
- // Note that the exact value here depends on the implementation of
- // _rebalancedSeparatorPos(). The conditions for lowWaterMark - 1 are as
- // follows: We know we cannot merge with the neighbor, so the total data size
- // for us, the neighbor, and the separator must be at least
- // BucketType::bodySize() + 1. We must be able to accept one key of any
- // allowed size, so our size plus storage for that additional key must be
- // <= BucketType::bodySize() / 2. This way, with the extra key we'll have a
- // new bucket data size < half the total data size and by the implementation
- // of _rebalancedSeparatorPos() the key must be added.
- template <class BtreeLayout>
- int BtreeLogic<BtreeLayout>::lowWaterMark() {
- return BtreeLayout::BucketBodySize / 2 - BtreeLayout::KeyMax - sizeof(KeyHeaderType) + 1;
- }
-
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::init(BucketType* bucket) {
- BtreeLayout::initBucket(bucket);
- bucket->parent.Null();
- bucket->nextChild.Null();
- bucket->flags = Packed;
- bucket->n = 0;
- bucket->emptySize = totalDataSize(bucket);
- bucket->topSize = 0;
- }
-
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::_unalloc(BucketType* bucket, int bytes) {
- bucket->topSize -= bytes;
- bucket->emptySize += bytes;
- }
+ size_t writeLen = sizeof(bucket->emptySize) + sizeof(bucket->topSize) + sizeof(bucket->n);
+ txn->recoveryUnit()->writingPtr(&bucket->emptySize, writeLen);
+ bucket->emptySize -= sizeof(KeyHeaderType);
+ bucket->n++;
- /**
- * We allocate space from the end of the buffer for data. The keynodes grow from the front.
- */
- template <class BtreeLayout>
- int BtreeLogic<BtreeLayout>::_alloc(BucketType* bucket, int bytes) {
- invariant(bucket->emptySize >= bytes);
- bucket->topSize += bytes;
- bucket->emptySize -= bytes;
- int ofs = totalDataSize(bucket) - bucket->topSize;
- invariant(ofs > 0);
- return ofs;
- }
+ // This _KeyNode was marked for writing above.
+ KeyHeaderType& kn = getKeyHeader(bucket, keypos);
+ kn.prevChildBucket.Null();
+ kn.recordLoc = recordLoc;
+ kn.setKeyDataOfs((short)_alloc(bucket, key.dataSize()));
+ char* p = dataAt(bucket, kn.keyDataOfs());
+ txn->recoveryUnit()->writingPtr(p, key.dataSize());
+ memcpy(p, key.data(), key.dataSize());
+ return true;
+}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::setNotPacked(BucketType* bucket) {
- bucket->flags &= ~Packed;
- }
+/**
+ * With this implementation, refPos == 0 disregards effect of refPos. index > 0 prevents
+ * creation of an empty bucket.
+ */
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::mayDropKey(BucketType* bucket, int index, int refPos) {
+ return index > 0 && (index != refPos) && getKeyHeader(bucket, index).isUnused() &&
+ getKeyHeader(bucket, index).prevChildBucket.isNull();
+}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::setPacked(BucketType* bucket) {
- bucket->flags |= Packed;
+template <class BtreeLayout>
+int BtreeLogic<BtreeLayout>::_packedDataSize(BucketType* bucket, int refPos) {
+ if (bucket->flags & Packed) {
+ return BtreeLayout::BucketSize - bucket->emptySize - BucketType::HeaderSize;
}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::_delKeyAtPos(BucketType* bucket, int keypos, bool mayEmpty) {
- invariant(keypos >= 0 && keypos <= bucket->n);
- invariant(childLocForPos(bucket, keypos).isNull());
- invariant((mayEmpty && bucket->n > 0) || bucket->n > 1 || bucket->nextChild.isNull());
-
- bucket->emptySize += sizeof(KeyHeaderType);
- bucket->n--;
-
- for (int j = keypos; j < bucket->n; j++) {
- getKeyHeader(bucket, j) = getKeyHeader(bucket, j + 1);
+ int size = 0;
+ for (int j = 0; j < bucket->n; ++j) {
+ if (mayDropKey(bucket, j, refPos)) {
+ continue;
}
-
- setNotPacked(bucket);
- }
-
- /**
- * Pull rightmost key from the bucket and set its prevChild pointer to be the nextChild for the
- * whole bucket. It is assumed that caller already has the old value of the nextChild
- * pointer and is about to add a pointer to it elsewhere in the tree.
- *
- * This is only used by BtreeLogic::Builder. Think very hard (and change this comment) before
- * using it anywhere else.
- *
- * WARNING: The keyDataOut that is filled out by this function points to newly unalloced memory
- * inside of this bucket. It only remains valid until the next write to this bucket.
- */
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::popBack(BucketType* bucket,
- DiskLoc* recordLocOut,
- KeyDataType* keyDataOut) {
-
- massert(17435, "n==0 in btree popBack()", bucket->n > 0 );
-
- invariant(getKeyHeader(bucket, bucket->n - 1).isUsed());
-
- FullKey kn = getFullKey(bucket, bucket->n - 1);
- *recordLocOut = kn.recordLoc;
- keyDataOut->assign(kn.data);
- int keysize = kn.data.dataSize();
-
- // The left/prev child of the node we are popping now goes in to the nextChild slot as all
- // of its keys are greater than all remaining keys in this node.
- bucket->nextChild = kn.prevChildBucket;
- bucket->n--;
-
- // This is risky because the keyDataOut we filled out above will now point to this newly
- // unalloced memory.
- bucket->emptySize += sizeof(KeyHeaderType);
- _unalloc(bucket, keysize);
- }
-
- /**
- * Add a key. Must be > all existing. Be careful to set next ptr right.
- */
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::pushBack(BucketType* bucket,
- const DiskLoc recordLoc,
- const KeyDataType& key,
- const DiskLoc prevChild) {
-
- int bytesNeeded = key.dataSize() + sizeof(KeyHeaderType);
- if (bytesNeeded > bucket->emptySize) {
- return false;
- }
- invariant(bytesNeeded <= bucket->emptySize);
-
- if (bucket->n) {
- const FullKey klast = getFullKey(bucket, bucket->n - 1);
- if (klast.data.woCompare(key, _ordering) > 0) {
- log() << "btree bucket corrupt? "
- "consider reindexing or running validate command" << endl;
- log() << " klast: " << klast.data.toString() << endl;
- log() << " key: " << key.toString() << endl;
- invariant(false);
- }
- }
-
- bucket->emptySize -= sizeof(KeyHeaderType);
- KeyHeaderType& kn = getKeyHeader(bucket, bucket->n++);
- kn.prevChildBucket = prevChild;
- kn.recordLoc = recordLoc;
- kn.setKeyDataOfs((short)_alloc(bucket, key.dataSize()));
- short ofs = kn.keyDataOfs();
- char *p = dataAt(bucket, ofs);
- memcpy(p, key.data(), key.dataSize());
- return true;
+ size += getFullKey(bucket, j).data.dataSize() + sizeof(KeyHeaderType);
}
- /**
- * Durability note:
- *
- * We do separate intent declarations herein. Arguably one could just declare the whole bucket
- * given we do group commits. This is something we could investigate later as to what is
- * faster.
- **/
-
- /**
- * Insert a key in a bucket with no complexity -- no splits required
- * Returns false if a split is required.
- */
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::basicInsert(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int& keypos,
- const KeyDataType& key,
- const DiskLoc recordLoc) {
- invariant(bucket->n < 1024);
- invariant(keypos >= 0 && keypos <= bucket->n);
+ return size;
+}
- int bytesNeeded = key.dataSize() + sizeof(KeyHeaderType);
- if (bytesNeeded > bucket->emptySize) {
- _pack(txn, bucket, bucketLoc, keypos);
- if (bytesNeeded > bucket->emptySize) {
- return false;
- }
- }
-
- invariant(getBucket(txn, bucketLoc) == bucket);
-
- {
- // declare that we will write to [k(keypos),k(n)]
- char* start = reinterpret_cast<char*>(&getKeyHeader(bucket, keypos));
- char* end = reinterpret_cast<char*>(&getKeyHeader(bucket, bucket->n + 1));
-
- // Declare that we will write to [k(keypos),k(n)]
- txn->recoveryUnit()->writingPtr(start, end - start);
- }
-
- // e.g. for n==3, keypos==2
- // 1 4 9 -> 1 4 _ 9
- for (int j = bucket->n; j > keypos; j--) {
- getKeyHeader(bucket, j) = getKeyHeader(bucket, j - 1);
- }
-
- size_t writeLen = sizeof(bucket->emptySize) + sizeof(bucket->topSize) + sizeof(bucket->n);
- txn->recoveryUnit()->writingPtr(&bucket->emptySize, writeLen);
- bucket->emptySize -= sizeof(KeyHeaderType);
- bucket->n++;
-
- // This _KeyNode was marked for writing above.
- KeyHeaderType& kn = getKeyHeader(bucket, keypos);
- kn.prevChildBucket.Null();
- kn.recordLoc = recordLoc;
- kn.setKeyDataOfs((short) _alloc(bucket, key.dataSize()));
- char *p = dataAt(bucket, kn.keyDataOfs());
- txn->recoveryUnit()->writingPtr(p, key.dataSize());
- memcpy(p, key.data(), key.dataSize());
- return true;
- }
-
- /**
- * With this implementation, refPos == 0 disregards effect of refPos. index > 0 prevents
- * creation of an empty bucket.
- */
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::mayDropKey(BucketType* bucket, int index, int refPos) {
- return index > 0
- && (index != refPos)
- && getKeyHeader(bucket, index).isUnused()
- && getKeyHeader(bucket, index).prevChildBucket.isNull();
- }
-
- template <class BtreeLayout>
- int BtreeLogic<BtreeLayout>::_packedDataSize(BucketType* bucket, int refPos) {
- if (bucket->flags & Packed) {
- return BtreeLayout::BucketSize - bucket->emptySize - BucketType::HeaderSize;
- }
-
- int size = 0;
- for (int j = 0; j < bucket->n; ++j) {
- if (mayDropKey(bucket, j, refPos)) {
- continue;
- }
- size += getFullKey(bucket, j).data.dataSize() + sizeof(KeyHeaderType);
- }
+/**
+ * When we delete things, we just leave empty space until the node is full and then we repack
+ * it.
+ */
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::_pack(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc thisLoc,
+ int& refPos) {
+ invariant(getBucket(txn, thisLoc) == bucket);
- return size;
+ if (bucket->flags & Packed) {
+ return;
}
- /**
- * When we delete things, we just leave empty space until the node is full and then we repack
- * it.
- */
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::_pack(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc thisLoc,
- int &refPos) {
-
- invariant(getBucket(txn, thisLoc) == bucket);
+ _packReadyForMod(btreemod(txn, bucket), refPos);
+}
- if (bucket->flags & Packed) {
- return;
- }
-
- _packReadyForMod(btreemod(txn, bucket), refPos);
+/**
+ * Version when write intent already declared.
+ */
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::_packReadyForMod(BucketType* bucket, int& refPos) {
+ if (bucket->flags & Packed) {
+ return;
}
- /**
- * Version when write intent already declared.
- */
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::_packReadyForMod(BucketType* bucket, int &refPos) {
- if (bucket->flags & Packed) {
- return;
- }
-
- int tdz = totalDataSize(bucket);
- char temp[BtreeLayout::BucketSize];
- int ofs = tdz;
- bucket->topSize = 0;
-
- int i = 0;
- for (int j = 0; j < bucket->n; j++) {
- if (mayDropKey(bucket, j, refPos)) {
- // key is unused and has no children - drop it
- continue;
- }
-
- if (i != j) {
- if (refPos == j) {
- // i < j so j will never be refPos again
- refPos = i;
- }
- getKeyHeader(bucket, i) = getKeyHeader(bucket, j);
- }
+ int tdz = totalDataSize(bucket);
+ char temp[BtreeLayout::BucketSize];
+ int ofs = tdz;
+ bucket->topSize = 0;
- short ofsold = getKeyHeader(bucket, i).keyDataOfs();
- int sz = getFullKey(bucket, i).data.dataSize();
- ofs -= sz;
- bucket->topSize += sz;
- memcpy(temp + ofs, dataAt(bucket, ofsold), sz);
- getKeyHeader(bucket, i).setKeyDataOfsSavingUse(ofs);
- ++i;
+ int i = 0;
+ for (int j = 0; j < bucket->n; j++) {
+ if (mayDropKey(bucket, j, refPos)) {
+ // key is unused and has no children - drop it
+ continue;
}
- if (refPos == bucket->n) {
- refPos = i;
- }
-
- bucket->n = i;
- int dataUsed = tdz - ofs;
- memcpy(bucket->data + ofs, temp + ofs, dataUsed);
-
- bucket->emptySize = tdz - dataUsed - bucket->n * sizeof(KeyHeaderType);
- int foo = bucket->emptySize;
- invariant( foo >= 0 );
- setPacked(bucket);
- assertValid(_indexName, bucket, _ordering);
- }
-
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::truncateTo(BucketType* bucket,
- int N,
- int &refPos) {
- bucket->n = N;
- setNotPacked(bucket);
- _packReadyForMod(bucket, refPos);
- }
-
- /**
- * In the standard btree algorithm, we would split based on the
- * existing keys _and_ the new key. But that's more work to
- * implement, so we split the existing keys and then add the new key.
- *
- * There are several published heuristic algorithms for doing splits, but basically what you
- * want are (1) even balancing between the two sides and (2) a small split key so the parent can
- * have a larger branching factor.
- *
- * We just have a simple algorithm right now: if a key includes the halfway point (or 10% way
- * point) in terms of bytes, split on that key; otherwise split on the key immediately to the
- * left of the halfway point (or 10% point).
- *
- * This function is expected to be called on a packed bucket.
- */
- template <class BtreeLayout>
- int BtreeLogic<BtreeLayout>::splitPos(BucketType* bucket, int keypos) {
- invariant(bucket->n > 2);
- int split = 0;
- int rightSize = 0;
-
- // When splitting a btree node, if the new key is greater than all the other keys, we should
- // not do an even split, but a 90/10 split. see SERVER-983. TODO I think we only want to
- // do the 90% split on the rhs node of the tree.
- int rightSizeLimit = (bucket->topSize + sizeof(KeyHeaderType) * bucket->n)
- / (keypos == bucket->n ? 10 : 2);
-
- for (int i = bucket->n - 1; i > -1; --i) {
- rightSize += getFullKey(bucket, i).data.dataSize() + sizeof(KeyHeaderType);
- if (rightSize > rightSizeLimit) {
- split = i;
- break;
+ if (i != j) {
+ if (refPos == j) {
+ // i < j so j will never be refPos again
+ refPos = i;
}
+ getKeyHeader(bucket, i) = getKeyHeader(bucket, j);
}
- // safeguards - we must not create an empty bucket
- if (split < 1) {
- split = 1;
- }
- else if (split > bucket->n - 2) {
- split = bucket->n - 2;
- }
-
- return split;
+ short ofsold = getKeyHeader(bucket, i).keyDataOfs();
+ int sz = getFullKey(bucket, i).data.dataSize();
+ ofs -= sz;
+ bucket->topSize += sz;
+ memcpy(temp + ofs, dataAt(bucket, ofsold), sz);
+ getKeyHeader(bucket, i).setKeyDataOfsSavingUse(ofs);
+ ++i;
}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::reserveKeysFront(BucketType* bucket, int nAdd) {
- invariant(bucket->emptySize >= int(sizeof(KeyHeaderType) * nAdd));
- bucket->emptySize -= sizeof(KeyHeaderType) * nAdd;
- for (int i = bucket->n - 1; i > -1; --i) {
- getKeyHeader(bucket, i + nAdd) = getKeyHeader(bucket, i);
- }
- bucket->n += nAdd;
- }
-
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::setKey(BucketType* bucket,
- int i,
- const DiskLoc recordLoc,
- const KeyDataType& key,
- const DiskLoc prevChildBucket) {
- KeyHeaderType &kn = getKeyHeader(bucket, i);
- kn.recordLoc = recordLoc;
- kn.prevChildBucket = prevChildBucket;
- short ofs = (short) _alloc(bucket, key.dataSize());
- kn.setKeyDataOfs(ofs);
- char *p = dataAt(bucket, ofs);
- memcpy(p, key.data(), key.dataSize());
- }
-
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::dropFront(BucketType* bucket,
- int nDrop,
- int &refpos) {
- for (int i = nDrop; i < bucket->n; ++i) {
- getKeyHeader(bucket, i - nDrop) = getKeyHeader(bucket, i);
- }
- bucket->n -= nDrop;
- setNotPacked(bucket);
- _packReadyForMod(bucket, refpos );
+ if (refPos == bucket->n) {
+ refPos = i;
}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
- DiskLoc* locInOut,
- int* keyOfsInOut,
- const IndexSeekPoint& seekPoint,
- int direction) const {
- pair<DiskLoc, int> unused;
+ bucket->n = i;
+ int dataUsed = tdz - ofs;
+ memcpy(bucket->data + ofs, temp + ofs, dataUsed);
- customLocate(txn, locInOut, keyOfsInOut, seekPoint, direction, unused);
- skipUnusedKeys(txn, locInOut, keyOfsInOut, direction);
- }
+ bucket->emptySize = tdz - dataUsed - bucket->n * sizeof(KeyHeaderType);
+ int foo = bucket->emptySize;
+ invariant(foo >= 0);
+ setPacked(bucket);
+ assertValid(_indexName, bucket, _ordering);
+}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
- DiskLoc* bucketLocInOut,
- int* posInOut,
- int direction) const {
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::truncateTo(BucketType* bucket, int N, int& refPos) {
+ bucket->n = N;
+ setNotPacked(bucket);
+ _packReadyForMod(bucket, refPos);
+}
- *bucketLocInOut = advance(txn, *bucketLocInOut, posInOut, direction);
- skipUnusedKeys(txn, bucketLocInOut, posInOut, direction);
+/**
+ * In the standard btree algorithm, we would split based on the
+ * existing keys _and_ the new key. But that's more work to
+ * implement, so we split the existing keys and then add the new key.
+ *
+ * There are several published heuristic algorithms for doing splits, but basically what you
+ * want are (1) even balancing between the two sides and (2) a small split key so the parent can
+ * have a larger branching factor.
+ *
+ * We just have a simple algorithm right now: if a key includes the halfway point (or 10% way
+ * point) in terms of bytes, split on that key; otherwise split on the key immediately to the
+ * left of the halfway point (or 10% point).
+ *
+ * This function is expected to be called on a packed bucket.
+ */
+template <class BtreeLayout>
+int BtreeLogic<BtreeLayout>::splitPos(BucketType* bucket, int keypos) {
+ invariant(bucket->n > 2);
+ int split = 0;
+ int rightSize = 0;
+
+ // When splitting a btree node, if the new key is greater than all the other keys, we should
+ // not do an even split, but a 90/10 split. see SERVER-983. TODO I think we only want to
+ // do the 90% split on the rhs node of the tree.
+ int rightSizeLimit =
+ (bucket->topSize + sizeof(KeyHeaderType) * bucket->n) / (keypos == bucket->n ? 10 : 2);
+
+ for (int i = bucket->n - 1; i > -1; --i) {
+ rightSize += getFullKey(bucket, i).data.dataSize() + sizeof(KeyHeaderType);
+ if (rightSize > rightSizeLimit) {
+ split = i;
+ break;
+ }
+ }
+
+ // safeguards - we must not create an empty bucket
+ if (split < 1) {
+ split = 1;
+ } else if (split > bucket->n - 2) {
+ split = bucket->n - 2;
+ }
+
+ return split;
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::reserveKeysFront(BucketType* bucket, int nAdd) {
+ invariant(bucket->emptySize >= int(sizeof(KeyHeaderType) * nAdd));
+ bucket->emptySize -= sizeof(KeyHeaderType) * nAdd;
+ for (int i = bucket->n - 1; i > -1; --i) {
+ getKeyHeader(bucket, i + nAdd) = getKeyHeader(bucket, i);
+ }
+ bucket->n += nAdd;
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::setKey(BucketType* bucket,
+ int i,
+ const DiskLoc recordLoc,
+ const KeyDataType& key,
+ const DiskLoc prevChildBucket) {
+ KeyHeaderType& kn = getKeyHeader(bucket, i);
+ kn.recordLoc = recordLoc;
+ kn.prevChildBucket = prevChildBucket;
+ short ofs = (short)_alloc(bucket, key.dataSize());
+ kn.setKeyDataOfs(ofs);
+ char* p = dataAt(bucket, ofs);
+ memcpy(p, key.data(), key.dataSize());
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::dropFront(BucketType* bucket, int nDrop, int& refpos) {
+ for (int i = nDrop; i < bucket->n; ++i) {
+ getKeyHeader(bucket, i - nDrop) = getKeyHeader(bucket, i);
+ }
+ bucket->n -= nDrop;
+ setNotPacked(bucket);
+ _packReadyForMod(bucket, refpos);
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
+ DiskLoc* locInOut,
+ int* keyOfsInOut,
+ const IndexSeekPoint& seekPoint,
+ int direction) const {
+ pair<DiskLoc, int> unused;
+
+ customLocate(txn, locInOut, keyOfsInOut, seekPoint, direction, unused);
+ skipUnusedKeys(txn, locInOut, keyOfsInOut, direction);
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
+ DiskLoc* bucketLocInOut,
+ int* posInOut,
+ int direction) const {
+ *bucketLocInOut = advance(txn, *bucketLocInOut, posInOut, direction);
+ skipUnusedKeys(txn, bucketLocInOut, posInOut, direction);
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::skipUnusedKeys(OperationContext* txn,
+ DiskLoc* loc,
+ int* pos,
+ int direction) const {
+ while (!loc->isNull() && !keyIsUsed(txn, *loc, *pos)) {
+ *loc = advance(txn, *loc, pos, direction);
}
+}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::skipUnusedKeys(OperationContext* txn,
- DiskLoc* loc,
- int* pos,
- int direction) const {
- while (!loc->isNull() && !keyIsUsed(txn, *loc, *pos)) {
- *loc = advance(txn, *loc, pos, direction);
- }
- }
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::advanceTo(OperationContext* txn,
+ DiskLoc* thisLocInOut,
+ int* keyOfsInOut,
+ const IndexSeekPoint& seekPoint,
+ int direction) const {
+ advanceToImpl(txn, thisLocInOut, keyOfsInOut, seekPoint, direction);
+ skipUnusedKeys(txn, thisLocInOut, keyOfsInOut, direction);
+}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::advanceTo(OperationContext* txn,
+/**
+ * find smallest/biggest value greater-equal/less-equal than specified
+ *
+ * starting thisLoc + keyOfs will be strictly less than/strictly greater than
+ * keyBegin/keyBeginLen/keyEnd
+ *
+ * All the direction checks below allowed me to refactor the code, but possibly separate forward
+ * and reverse implementations would be more efficient
+ */
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::advanceToImpl(OperationContext* txn,
DiskLoc* thisLocInOut,
int* keyOfsInOut,
const IndexSeekPoint& seekPoint,
int direction) const {
+ BucketType* bucket = getBucket(txn, *thisLocInOut);
- advanceToImpl(txn, thisLocInOut, keyOfsInOut, seekPoint, direction);
- skipUnusedKeys(txn, thisLocInOut, keyOfsInOut, direction);
- }
-
- /**
- * find smallest/biggest value greater-equal/less-equal than specified
- *
- * starting thisLoc + keyOfs will be strictly less than/strictly greater than
- * keyBegin/keyBeginLen/keyEnd
- *
- * All the direction checks below allowed me to refactor the code, but possibly separate forward
- * and reverse implementations would be more efficient
- */
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::advanceToImpl(OperationContext* txn,
- DiskLoc* thisLocInOut,
- int* keyOfsInOut,
- const IndexSeekPoint& seekPoint,
- int direction) const {
-
- BucketType* bucket = getBucket(txn, *thisLocInOut);
-
- int l, h;
- bool dontGoUp;
-
- if (direction > 0) {
- l = *keyOfsInOut;
- h = bucket->n - 1;
- int cmpResult = customBSONCmp(getFullKey(bucket, h).data.toBson(),
- seekPoint,
- direction);
- dontGoUp = (cmpResult >= 0);
- }
- else {
- l = 0;
- h = *keyOfsInOut;
- int cmpResult = customBSONCmp(getFullKey(bucket, l).data.toBson(),
- seekPoint,
- direction);
- dontGoUp = (cmpResult <= 0);
- }
+ int l, h;
+ bool dontGoUp;
- pair<DiskLoc, int> bestParent;
-
- if (dontGoUp) {
- // this comparison result assures h > l
- if (!customFind(txn,
- l,
- h,
- seekPoint,
- direction,
- thisLocInOut,
- keyOfsInOut,
- bestParent)) {
- return;
- }
+ if (direction > 0) {
+ l = *keyOfsInOut;
+ h = bucket->n - 1;
+ int cmpResult = customBSONCmp(getFullKey(bucket, h).data.toBson(), seekPoint, direction);
+ dontGoUp = (cmpResult >= 0);
+ } else {
+ l = 0;
+ h = *keyOfsInOut;
+ int cmpResult = customBSONCmp(getFullKey(bucket, l).data.toBson(), seekPoint, direction);
+ dontGoUp = (cmpResult <= 0);
+ }
+
+ pair<DiskLoc, int> bestParent;
+
+ if (dontGoUp) {
+ // this comparison result assures h > l
+ if (!customFind(txn, l, h, seekPoint, direction, thisLocInOut, keyOfsInOut, bestParent)) {
+ return;
}
- else {
- // go up parents until rightmost/leftmost node is >=/<= target or at top
- while (!bucket->parent.isNull()) {
- *thisLocInOut = bucket->parent;
- bucket = getBucket(txn,
- *thisLocInOut);
-
- if (direction > 0) {
- if (customBSONCmp(getFullKey(bucket, bucket->n - 1).data.toBson(),
- seekPoint,
- direction) >= 0 ) {
- break;
- }
+ } else {
+ // go up parents until rightmost/leftmost node is >=/<= target or at top
+ while (!bucket->parent.isNull()) {
+ *thisLocInOut = bucket->parent;
+ bucket = getBucket(txn, *thisLocInOut);
+
+ if (direction > 0) {
+ if (customBSONCmp(getFullKey(bucket, bucket->n - 1).data.toBson(),
+ seekPoint,
+ direction) >= 0) {
+ break;
}
- else {
- if (customBSONCmp(getFullKey(bucket, 0).data.toBson(),
- seekPoint,
- direction) <= 0) {
- break;
- }
+ } else {
+ if (customBSONCmp(getFullKey(bucket, 0).data.toBson(), seekPoint, direction) <= 0) {
+ break;
}
}
}
-
- customLocate(txn, thisLocInOut, keyOfsInOut, seekPoint, direction, bestParent);
}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
- DiskLoc* locInOut,
- int* keyOfsInOut,
- const IndexSeekPoint& seekPoint,
- int direction,
- pair<DiskLoc, int>& bestParent) const {
+ customLocate(txn, thisLocInOut, keyOfsInOut, seekPoint, direction, bestParent);
+}
- BucketType* bucket = getBucket(txn, *locInOut);
-
- if (0 == bucket->n) {
- *locInOut = DiskLoc();
- return;
- }
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::customLocate(OperationContext* txn,
+ DiskLoc* locInOut,
+ int* keyOfsInOut,
+ const IndexSeekPoint& seekPoint,
+ int direction,
+ pair<DiskLoc, int>& bestParent) const {
+ BucketType* bucket = getBucket(txn, *locInOut);
- // go down until find smallest/biggest >=/<= target
- for (;;) {
- int l = 0;
- int h = bucket->n - 1;
+ if (0 == bucket->n) {
+ *locInOut = DiskLoc();
+ return;
+ }
- // +direction: 0, -direction: h
- int z = (direction > 0) ? 0 : h;
+ // go down until find smallest/biggest >=/<= target
+ for (;;) {
+ int l = 0;
+ int h = bucket->n - 1;
- // leftmost/rightmost key may possibly be >=/<= search key
- int res = customBSONCmp(getFullKey(bucket, z).data.toBson(), seekPoint, direction);
- if (direction * res >= 0) {
- DiskLoc next;
- *keyOfsInOut = z;
+ // +direction: 0, -direction: h
+ int z = (direction > 0) ? 0 : h;
- if (direction > 0) {
- dassert(z == 0);
- next = getKeyHeader(bucket, 0).prevChildBucket;
- }
- else {
- next = bucket->nextChild;
- }
+ // leftmost/rightmost key may possibly be >=/<= search key
+ int res = customBSONCmp(getFullKey(bucket, z).data.toBson(), seekPoint, direction);
+ if (direction * res >= 0) {
+ DiskLoc next;
+ *keyOfsInOut = z;
- if (!next.isNull()) {
- bestParent = pair<DiskLoc, int>(*locInOut, *keyOfsInOut);
- *locInOut = next;
- bucket = getBucket(txn, *locInOut);
- continue;
- }
- else {
- return;
- }
+ if (direction > 0) {
+ dassert(z == 0);
+ next = getKeyHeader(bucket, 0).prevChildBucket;
+ } else {
+ next = bucket->nextChild;
}
- res = customBSONCmp(getFullKey(bucket, h - z).data.toBson(), seekPoint, direction);
- if (direction * res < 0) {
- DiskLoc next;
- if (direction > 0) {
- next = bucket->nextChild;
- }
- else {
- next = getKeyHeader(bucket, 0).prevChildBucket;
- }
+ if (!next.isNull()) {
+ bestParent = pair<DiskLoc, int>(*locInOut, *keyOfsInOut);
+ *locInOut = next;
+ bucket = getBucket(txn, *locInOut);
+ continue;
+ } else {
+ return;
+ }
+ }
- if (next.isNull()) {
- // if bestParent is null, we've hit the end and locInOut gets set to DiskLoc()
- *locInOut = bestParent.first;
- *keyOfsInOut = bestParent.second;
- return;
- }
- else {
- *locInOut = next;
- bucket = getBucket(txn, *locInOut);
- continue;
- }
+ res = customBSONCmp(getFullKey(bucket, h - z).data.toBson(), seekPoint, direction);
+ if (direction * res < 0) {
+ DiskLoc next;
+ if (direction > 0) {
+ next = bucket->nextChild;
+ } else {
+ next = getKeyHeader(bucket, 0).prevChildBucket;
}
- if (!customFind(txn,
- l,
- h,
- seekPoint,
- direction,
- locInOut,
- keyOfsInOut,
- bestParent)) {
+ if (next.isNull()) {
+ // if bestParent is null, we've hit the end and locInOut gets set to DiskLoc()
+ *locInOut = bestParent.first;
+ *keyOfsInOut = bestParent.second;
return;
+ } else {
+ *locInOut = next;
+ bucket = getBucket(txn, *locInOut);
+ continue;
}
-
- bucket = getBucket(txn, *locInOut);
}
- }
-
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::customFind(OperationContext* txn,
- int low,
- int high,
- const IndexSeekPoint& seekPoint,
- int direction,
- DiskLoc* thisLocInOut,
- int* keyOfsInOut,
- pair<DiskLoc, int>& bestParent) const {
- const BucketType* bucket = getBucket(txn, *thisLocInOut);
+ if (!customFind(txn, l, h, seekPoint, direction, locInOut, keyOfsInOut, bestParent)) {
+ return;
+ }
- for (;;) {
- if (low + 1 == high) {
- *keyOfsInOut = (direction > 0) ? high : low;
- DiskLoc next = getKeyHeader(bucket, high).prevChildBucket;
- if (!next.isNull()) {
- bestParent = make_pair(*thisLocInOut, *keyOfsInOut);
- *thisLocInOut = next;
- return true;
- }
- else {
- return false;
- }
+ bucket = getBucket(txn, *locInOut);
+ }
+}
+
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::customFind(OperationContext* txn,
+ int low,
+ int high,
+ const IndexSeekPoint& seekPoint,
+ int direction,
+ DiskLoc* thisLocInOut,
+ int* keyOfsInOut,
+ pair<DiskLoc, int>& bestParent) const {
+ const BucketType* bucket = getBucket(txn, *thisLocInOut);
+
+ for (;;) {
+ if (low + 1 == high) {
+ *keyOfsInOut = (direction > 0) ? high : low;
+ DiskLoc next = getKeyHeader(bucket, high).prevChildBucket;
+ if (!next.isNull()) {
+ bestParent = make_pair(*thisLocInOut, *keyOfsInOut);
+ *thisLocInOut = next;
+ return true;
+ } else {
+ return false;
}
+ }
- int middle = low + (high - low) / 2;
+ int middle = low + (high - low) / 2;
- int cmp = customBSONCmp(getFullKey(bucket, middle).data.toBson(), seekPoint, direction);
- if (cmp < 0) {
+ int cmp = customBSONCmp(getFullKey(bucket, middle).data.toBson(), seekPoint, direction);
+ if (cmp < 0) {
+ low = middle;
+ } else if (cmp > 0) {
+ high = middle;
+ } else {
+ if (direction < 0) {
low = middle;
- }
- else if (cmp > 0) {
+ } else {
high = middle;
}
- else {
- if (direction < 0) {
- low = middle;
- }
- else {
- high = middle;
- }
- }
}
}
+}
- /**
- * NOTE: Currently the Ordering implementation assumes a compound index will not have more keys
- * than an unsigned variable has bits. The same assumption is used in the implementation below
- * with respect to the 'mask' variable.
- *
- * 'l' is a regular bsonobj
- *
- * 'rBegin' is composed partly of an existing bsonobj, and the remaining keys are taken from a
- * vector of elements that frequently changes
- *
- * see https://jira.mongodb.org/browse/SERVER-371
- */
- // static
- template <class BtreeLayout>
- int BtreeLogic<BtreeLayout>::customBSONCmp(const BSONObj& left,
- const IndexSeekPoint& right,
- int direction) const {
- // XXX: make this readable
- dassert(right.keySuffix.size() == right.suffixInclusive.size());
-
- BSONObjIterator ll( left );
- BSONObjIterator rr( right.keyPrefix );
- unsigned mask = 1;
- size_t i = 0;
- for( ; i < size_t(right.prefixLen); ++i, mask <<= 1 ) {
- BSONElement lll = ll.next();
- BSONElement rrr = rr.next();
-
- int x = lll.woCompare( rrr, false );
- if ( _ordering.descending( mask ) )
- x = -x;
- if ( x != 0 )
- return x;
- }
- if (right.prefixExclusive) {
+/**
+ * NOTE: Currently the Ordering implementation assumes a compound index will not have more keys
+ * than an unsigned variable has bits. The same assumption is used in the implementation below
+ * with respect to the 'mask' variable.
+ *
+ * 'l' is a regular bsonobj
+ *
+ * 'rBegin' is composed partly of an existing bsonobj, and the remaining keys are taken from a
+ * vector of elements that frequently changes
+ *
+ * see https://jira.mongodb.org/browse/SERVER-371
+ */
+// static
+template <class BtreeLayout>
+int BtreeLogic<BtreeLayout>::customBSONCmp(const BSONObj& left,
+ const IndexSeekPoint& right,
+ int direction) const {
+ // XXX: make this readable
+ dassert(right.keySuffix.size() == right.suffixInclusive.size());
+
+ BSONObjIterator ll(left);
+ BSONObjIterator rr(right.keyPrefix);
+ unsigned mask = 1;
+ size_t i = 0;
+ for (; i < size_t(right.prefixLen); ++i, mask <<= 1) {
+ BSONElement lll = ll.next();
+ BSONElement rrr = rr.next();
+
+ int x = lll.woCompare(rrr, false);
+ if (_ordering.descending(mask))
+ x = -x;
+ if (x != 0)
+ return x;
+ }
+ if (right.prefixExclusive) {
+ return -direction;
+ }
+ for (; i < right.keySuffix.size(); ++i, mask <<= 1) {
+ if (!ll.more())
+ return -direction;
+
+ BSONElement lll = ll.next();
+ BSONElement rrr = *right.keySuffix[i];
+ int x = lll.woCompare(rrr, false);
+ if (_ordering.descending(mask))
+ x = -x;
+ if (x != 0)
+ return x;
+ if (!right.suffixInclusive[i]) {
return -direction;
}
- for( ; i < right.keySuffix.size(); ++i, mask <<= 1 ) {
- if (!ll.more())
- return -direction;
-
- BSONElement lll = ll.next();
- BSONElement rrr = *right.keySuffix[i];
- int x = lll.woCompare( rrr, false );
- if ( _ordering.descending( mask ) )
- x = -x;
- if ( x != 0 )
- return x;
- if ( !right.suffixInclusive[i] ) {
- return -direction;
- }
- }
- return ll.more() ? direction : 0;
}
+ return ll.more() ? direction : 0;
+}
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::exists(OperationContext* txn, const KeyDataType& key) const {
- int position = 0;
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::exists(OperationContext* txn, const KeyDataType& key) const {
+ int position = 0;
- // Find the DiskLoc
- bool found;
+ // Find the DiskLoc
+ bool found;
- DiskLoc bucket = _locate(txn, getRootLoc(txn), key, &position, &found, DiskLoc::min(), 1);
+ DiskLoc bucket = _locate(txn, getRootLoc(txn), key, &position, &found, DiskLoc::min(), 1);
- while (!bucket.isNull()) {
- FullKey fullKey = getFullKey(getBucket(txn, bucket), position);
- if (fullKey.header.isUsed()) {
- return fullKey.data.woEqual(key);
- }
- bucket = advance(txn, bucket, &position, 1);
+ while (!bucket.isNull()) {
+ FullKey fullKey = getFullKey(getBucket(txn, bucket), position);
+ if (fullKey.header.isUsed()) {
+ return fullKey.data.woEqual(key);
}
-
- return false;
+ bucket = advance(txn, bucket, &position, 1);
}
- template <class BtreeLayout>
- Status BtreeLogic<BtreeLayout>::dupKeyCheck(OperationContext* txn,
- const BSONObj& key,
- const DiskLoc& loc) const {
- KeyDataOwnedType theKey(key);
- if (!wouldCreateDup(txn, theKey, loc)) {
- return Status::OK();
- }
+ return false;
+}
- return Status(ErrorCodes::DuplicateKey, dupKeyError(theKey));
+template <class BtreeLayout>
+Status BtreeLogic<BtreeLayout>::dupKeyCheck(OperationContext* txn,
+ const BSONObj& key,
+ const DiskLoc& loc) const {
+ KeyDataOwnedType theKey(key);
+ if (!wouldCreateDup(txn, theKey, loc)) {
+ return Status::OK();
}
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::wouldCreateDup(OperationContext* txn,
- const KeyDataType& key,
- const DiskLoc self) const {
- int position;
- bool found;
-
- DiskLoc posLoc = _locate(txn, getRootLoc(txn), key, &position, &found, DiskLoc::min(), 1);
+ return Status(ErrorCodes::DuplicateKey, dupKeyError(theKey));
+}
- while (!posLoc.isNull()) {
- FullKey fullKey = getFullKey(getBucket(txn, posLoc), position);
- if (fullKey.header.isUsed()) {
- // TODO: we may not need fullKey.data until we know fullKey.header.isUsed() here
- // and elsewhere.
- if (fullKey.data.woEqual(key)) {
- return fullKey.recordLoc != self;
- }
- break;
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::wouldCreateDup(OperationContext* txn,
+ const KeyDataType& key,
+ const DiskLoc self) const {
+ int position;
+ bool found;
+
+ DiskLoc posLoc = _locate(txn, getRootLoc(txn), key, &position, &found, DiskLoc::min(), 1);
+
+ while (!posLoc.isNull()) {
+ FullKey fullKey = getFullKey(getBucket(txn, posLoc), position);
+ if (fullKey.header.isUsed()) {
+ // TODO: we may not need fullKey.data until we know fullKey.header.isUsed() here
+ // and elsewhere.
+ if (fullKey.data.woEqual(key)) {
+ return fullKey.recordLoc != self;
}
-
- posLoc = advance(txn, posLoc, &position, 1);
+ break;
}
- return false;
+
+ posLoc = advance(txn, posLoc, &position, 1);
}
+ return false;
+}
- template <class BtreeLayout>
- string BtreeLogic<BtreeLayout>::dupKeyError(const KeyDataType& key) const {
- stringstream ss;
- ss << "E11000 duplicate key error ";
- ss << "index: " << _indexName << " ";
- ss << "dup key: " << key.toString();
- return ss.str();
- }
-
- /**
- * Find a key within this btree bucket.
- *
- * When duplicate keys are allowed, we use the DiskLoc of the record as if it were part of the
- * key. That assures that even when there are many duplicates (e.g., 1 million) for a key, our
- * performance is still good.
- *
- * assertIfDup: if the key exists (ignoring the recordLoc), uassert
- *
- * pos: for existing keys k0...kn-1.
- * returns # it goes BEFORE. so key[pos-1] < key < key[pos]
- * returns n if it goes after the last existing key.
- * note result might be an Unused location!
- */
- template <class BtreeLayout>
- Status BtreeLogic<BtreeLayout>::_find(OperationContext* txn,
- BucketType* bucket,
- const KeyDataType& key,
- const DiskLoc& recordLoc,
- bool errorIfDup,
- int* keyPositionOut,
- bool* foundOut) const {
-
- // XXX: fix the ctor for DiskLoc56bit so we can just convert w/o assignment operator
- LocType genericRecordLoc;
- genericRecordLoc = recordLoc;
-
- bool dupsCheckedYet = false;
-
- int low = 0;
- int high = bucket->n - 1;
- int middle = (low + high) / 2;
-
- while (low <= high) {
- FullKey fullKey = getFullKey(bucket, middle);
- int cmp = key.woCompare(fullKey.data, _ordering);
-
- // The key data is the same.
- if (0 == cmp) {
- // Found the key in this bucket. If we're checking for dups...
- if (errorIfDup) {
- if (fullKey.header.isUnused()) {
- // It's ok that the key is there if it is unused. We need to check that
- // there aren't other entries for the key then. as it is very rare that
- // we get here, we don't put any coding effort in here to make this
- // particularly fast
- if (!dupsCheckedYet) {
- // This is expensive and we only want to do it once(? -- when would
- // it happen twice).
- dupsCheckedYet = true;
- if (exists(txn, key)) {
- if (wouldCreateDup(txn, key, genericRecordLoc)) {
- return Status(ErrorCodes::DuplicateKey, dupKeyError(key), 11000);
- }
- else {
- return Status(ErrorCodes::DuplicateKeyValue,
- "key/value already in index");
- }
+template <class BtreeLayout>
+string BtreeLogic<BtreeLayout>::dupKeyError(const KeyDataType& key) const {
+ stringstream ss;
+ ss << "E11000 duplicate key error ";
+ ss << "index: " << _indexName << " ";
+ ss << "dup key: " << key.toString();
+ return ss.str();
+}
+
+/**
+ * Find a key within this btree bucket.
+ *
+ * When duplicate keys are allowed, we use the DiskLoc of the record as if it were part of the
+ * key. That assures that even when there are many duplicates (e.g., 1 million) for a key, our
+ * performance is still good.
+ *
+ * assertIfDup: if the key exists (ignoring the recordLoc), uassert
+ *
+ * pos: for existing keys k0...kn-1.
+ * returns # it goes BEFORE. so key[pos-1] < key < key[pos]
+ * returns n if it goes after the last existing key.
+ * note result might be an Unused location!
+ */
+template <class BtreeLayout>
+Status BtreeLogic<BtreeLayout>::_find(OperationContext* txn,
+ BucketType* bucket,
+ const KeyDataType& key,
+ const DiskLoc& recordLoc,
+ bool errorIfDup,
+ int* keyPositionOut,
+ bool* foundOut) const {
+ // XXX: fix the ctor for DiskLoc56bit so we can just convert w/o assignment operator
+ LocType genericRecordLoc;
+ genericRecordLoc = recordLoc;
+
+ bool dupsCheckedYet = false;
+
+ int low = 0;
+ int high = bucket->n - 1;
+ int middle = (low + high) / 2;
+
+ while (low <= high) {
+ FullKey fullKey = getFullKey(bucket, middle);
+ int cmp = key.woCompare(fullKey.data, _ordering);
+
+ // The key data is the same.
+ if (0 == cmp) {
+ // Found the key in this bucket. If we're checking for dups...
+ if (errorIfDup) {
+ if (fullKey.header.isUnused()) {
+ // It's ok that the key is there if it is unused. We need to check that
+ // there aren't other entries for the key then. as it is very rare that
+ // we get here, we don't put any coding effort in here to make this
+ // particularly fast
+ if (!dupsCheckedYet) {
+ // This is expensive and we only want to do it once(? -- when would
+ // it happen twice).
+ dupsCheckedYet = true;
+ if (exists(txn, key)) {
+ if (wouldCreateDup(txn, key, genericRecordLoc)) {
+ return Status(ErrorCodes::DuplicateKey, dupKeyError(key), 11000);
+ } else {
+ return Status(ErrorCodes::DuplicateKeyValue,
+ "key/value already in index");
}
}
}
- else {
- if (fullKey.recordLoc == recordLoc) {
- return Status(ErrorCodes::DuplicateKeyValue,
- "key/value already in index");
- }
- else {
- return Status(ErrorCodes::DuplicateKey, dupKeyError(key), 11000);
- }
+ } else {
+ if (fullKey.recordLoc == recordLoc) {
+ return Status(ErrorCodes::DuplicateKeyValue, "key/value already in index");
+ } else {
+ return Status(ErrorCodes::DuplicateKey, dupKeyError(key), 11000);
}
}
+ }
- // If we're here dup keys are allowed, or the key is a dup but unused.
- LocType recordLocCopy = fullKey.recordLoc;
-
- // We clear this bit so we can test equality without the used bit messing us up.
- // XXX: document this
- // XXX: kill this GETOFS stuff
- recordLocCopy.GETOFS() &= ~1;
+ // If we're here dup keys are allowed, or the key is a dup but unused.
+ LocType recordLocCopy = fullKey.recordLoc;
- // Set 'cmp' to the comparison w/the DiskLoc and fall through below.
- cmp = recordLoc.compare(recordLocCopy);
- }
+ // We clear this bit so we can test equality without the used bit messing us up.
+ // XXX: document this
+ // XXX: kill this GETOFS stuff
+ recordLocCopy.GETOFS() &= ~1;
- if (cmp < 0) {
- high = middle - 1;
- }
- else if (cmp > 0) {
- low = middle + 1;
- }
- else {
- // Found it!
- *keyPositionOut = middle;
- *foundOut = true;
- return Status::OK();
- }
+ // Set 'cmp' to the comparison w/the DiskLoc and fall through below.
+ cmp = recordLoc.compare(recordLocCopy);
+ }
- middle = (low + high) / 2;
+ if (cmp < 0) {
+ high = middle - 1;
+ } else if (cmp > 0) {
+ low = middle + 1;
+ } else {
+ // Found it!
+ *keyPositionOut = middle;
+ *foundOut = true;
+ return Status::OK();
}
- // Not found.
- *keyPositionOut = low;
+ middle = (low + high) / 2;
+ }
+
+ // Not found.
+ *keyPositionOut = low;
- // Some debugging checks.
- if (low != bucket->n) {
- wassert(key.woCompare(getFullKey(bucket, low).data, _ordering) <= 0);
+ // Some debugging checks.
+ if (low != bucket->n) {
+ wassert(key.woCompare(getFullKey(bucket, low).data, _ordering) <= 0);
- if (low > 0) {
- if (getFullKey(bucket, low - 1).data.woCompare(key, _ordering) > 0) {
- DEV {
- log() << key.toString() << endl;
- log() << getFullKey(bucket, low - 1).data.toString() << endl;
- }
- wassert(false);
+ if (low > 0) {
+ if (getFullKey(bucket, low - 1).data.woCompare(key, _ordering) > 0) {
+ DEV {
+ log() << key.toString() << endl;
+ log() << getFullKey(bucket, low - 1).data.toString() << endl;
}
+ wassert(false);
}
}
-
- *foundOut = false;
- return Status::OK();
}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::delBucket(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc) {
- invariant(bucketLoc != getRootLoc(txn));
-
- _cursorRegistry->invalidateCursorsForBucket(bucketLoc);
-
- BucketType* p = getBucket(txn, bucket->parent);
- int parentIdx = indexInParent(txn, bucket, bucketLoc);
- *txn->recoveryUnit()->writing(&childLocForPos(p, parentIdx)) = DiskLoc();
- deallocBucket(txn, bucket, bucketLoc);
- }
+ *foundOut = false;
+ return Status::OK();
+}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::deallocBucket(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc) {
- bucket->n = BtreeLayout::INVALID_N_SENTINEL;
- bucket->parent.Null();
- _recordStore->deleteRecord(txn, bucketLoc.toRecordId());
- }
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::delBucket(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc) {
+ invariant(bucketLoc != getRootLoc(txn));
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::restorePosition(OperationContext* txn,
- const BSONObj& savedKey,
- const DiskLoc& savedLoc,
- int direction,
- DiskLoc* bucketLocInOut,
- int* keyOffsetInOut) const {
+ _cursorRegistry->invalidateCursorsForBucket(bucketLoc);
- // The caller has to ensure validity of the saved cursor using the SavedCursorRegistry
- BucketType* bucket = getBucket(txn, *bucketLocInOut);
- invariant(bucket);
- invariant(BtreeLayout::INVALID_N_SENTINEL != bucket->n);
+ BucketType* p = getBucket(txn, bucket->parent);
+ int parentIdx = indexInParent(txn, bucket, bucketLoc);
+ *txn->recoveryUnit()->writing(&childLocForPos(p, parentIdx)) = DiskLoc();
+ deallocBucket(txn, bucket, bucketLoc);
+}
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::deallocBucket(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc) {
+ bucket->n = BtreeLayout::INVALID_N_SENTINEL;
+ bucket->parent.Null();
+ _recordStore->deleteRecord(txn, bucketLoc.toRecordId());
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::restorePosition(OperationContext* txn,
+ const BSONObj& savedKey,
+ const DiskLoc& savedLoc,
+ int direction,
+ DiskLoc* bucketLocInOut,
+ int* keyOffsetInOut) const {
+ // The caller has to ensure validity of the saved cursor using the SavedCursorRegistry
+ BucketType* bucket = getBucket(txn, *bucketLocInOut);
+ invariant(bucket);
+ invariant(BtreeLayout::INVALID_N_SENTINEL != bucket->n);
+
+ if (_keyIsAt(savedKey, savedLoc, bucket, *keyOffsetInOut)) {
+ skipUnusedKeys(txn, bucketLocInOut, keyOffsetInOut, direction);
+ return;
+ }
+
+ if (*keyOffsetInOut > 0) {
+ (*keyOffsetInOut)--;
if (_keyIsAt(savedKey, savedLoc, bucket, *keyOffsetInOut)) {
skipUnusedKeys(txn, bucketLocInOut, keyOffsetInOut, direction);
return;
}
+ }
- if (*keyOffsetInOut > 0) {
- (*keyOffsetInOut)--;
- if (_keyIsAt(savedKey, savedLoc, bucket, *keyOffsetInOut)) {
- skipUnusedKeys(txn, bucketLocInOut, keyOffsetInOut, direction);
- return;
- }
- }
+ locate(txn, savedKey, savedLoc, direction, keyOffsetInOut, bucketLocInOut);
+}
- locate(txn, savedKey, savedLoc, direction, keyOffsetInOut, bucketLocInOut);
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::_keyIsAt(const BSONObj& savedKey,
+ const DiskLoc& savedLoc,
+ BucketType* bucket,
+ int keyPos) const {
+ if (keyPos >= bucket->n) {
+ return false;
}
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::_keyIsAt(const BSONObj& savedKey,
- const DiskLoc& savedLoc,
- BucketType* bucket,
- int keyPos) const {
- if (keyPos >= bucket->n) {
- return false;
- }
-
- FullKey key = getFullKey(bucket, keyPos);
- if (!key.data.toBson().binaryEqual(savedKey)) {
- return false;
- }
- return key.header.recordLoc == savedLoc;
+ FullKey key = getFullKey(bucket, keyPos);
+ if (!key.data.toBson().binaryEqual(savedKey)) {
+ return false;
}
+ return key.header.recordLoc == savedLoc;
+}
- /**
- * May delete the bucket 'bucket' rendering 'bucketLoc' invalid.
- */
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::delKeyAtPos(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int p) {
- invariant(bucket->n > 0);
- DiskLoc left = childLocForPos(bucket, p);
- if (bucket->n == 1) {
- if (left.isNull() && bucket->nextChild.isNull()) {
- _delKeyAtPos(bucket, p);
- if (isHead(bucket)) {
- // we don't delete the top bucket ever
- }
- else {
- if (!mayBalanceWithNeighbors(txn, bucket, bucketLoc)) {
- // An empty bucket is only allowed as a txnient state. If
- // there are no neighbors to balance with, we delete ourself.
- // This condition is only expected in legacy btrees.
- delBucket(txn, bucket, bucketLoc);
- }
+/**
+ * May delete the bucket 'bucket' rendering 'bucketLoc' invalid.
+ */
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::delKeyAtPos(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int p) {
+ invariant(bucket->n > 0);
+ DiskLoc left = childLocForPos(bucket, p);
+ if (bucket->n == 1) {
+ if (left.isNull() && bucket->nextChild.isNull()) {
+ _delKeyAtPos(bucket, p);
+ if (isHead(bucket)) {
+ // we don't delete the top bucket ever
+ } else {
+ if (!mayBalanceWithNeighbors(txn, bucket, bucketLoc)) {
+ // An empty bucket is only allowed as a txnient state. If
+ // there are no neighbors to balance with, we delete ourself.
+ // This condition is only expected in legacy btrees.
+ delBucket(txn, bucket, bucketLoc);
}
- return;
}
- deleteInternalKey(txn, bucket, bucketLoc, p);
return;
}
-
- if (left.isNull()) {
- _delKeyAtPos(bucket, p);
- mayBalanceWithNeighbors(txn, bucket, bucketLoc);
- }
- else {
- deleteInternalKey(txn, bucket, bucketLoc, p);
- }
+ deleteInternalKey(txn, bucket, bucketLoc, p);
+ return;
}
- /**
- * This function replaces the specified key (k) by either the prev or next key in the btree
- * (k'). We require that k have either a left or right child. If k has a left child, we set k'
- * to the prev key of k, which must be a leaf present in the left child. If k does not have a
- * left child, we set k' to the next key of k, which must be a leaf present in the right child.
- * When we replace k with k', we copy k' over k (which may cause a split) and then remove k'
- * from its original location. Because k' is stored in a descendent of k, replacing k by k'
- * will not modify the storage location of the original k', and we can easily remove k' from its
- * original location.
- *
- * This function is only needed in cases where k has a left or right child; in other cases a
- * simpler key removal implementation is possible.
- *
- * NOTE on noncompliant BtreeBuilder btrees: It is possible (though likely rare) for btrees
- * created by BtreeBuilder to have k' that is not a leaf, see SERVER-2732. These cases are
- * handled in the same manner as described in the "legacy btree structures" note below.
- *
- * NOTE on legacy btree structures: In legacy btrees, k' can be a nonleaf. In such a case we
- * 'delete' k by marking it as an unused node rather than replacing it with k'. Also, k' may be
- * a leaf but marked as an unused node. In such a case we replace k by k', preserving the key's
- * unused marking. This function is only expected to mark a key as unused when handling a
- * legacy btree.
- */
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int keypos) {
- DiskLoc lchild = childLocForPos(bucket, keypos);
- DiskLoc rchild = childLocForPos(bucket, keypos + 1);
- invariant(!lchild.isNull() || !rchild.isNull());
- int advanceDirection = lchild.isNull() ? 1 : -1;
- int advanceKeyOfs = keypos;
- DiskLoc advanceLoc = advance(txn, bucketLoc, &advanceKeyOfs, advanceDirection);
- // advanceLoc must be a descentant of thisLoc, because thisLoc has a
- // child in the proper direction and all descendants of thisLoc must be
- // nonempty because they are not the root.
- BucketType* advanceBucket = getBucket(txn, advanceLoc);
-
- if (!childLocForPos(advanceBucket, advanceKeyOfs).isNull()
- || !childLocForPos(advanceBucket, advanceKeyOfs + 1).isNull()) {
-
- markUnused(bucket, keypos);
- return;
- }
-
- FullKey kn = getFullKey(advanceBucket, advanceKeyOfs);
- // Because advanceLoc is a descendant of thisLoc, updating thisLoc will
- // not affect packing or keys of advanceLoc and kn will be stable
- // during the following setInternalKey()
- setInternalKey(txn, bucket, bucketLoc, keypos, kn.recordLoc, kn.data,
- childLocForPos(bucket, keypos),
- childLocForPos(bucket, keypos + 1));
- delKeyAtPos(txn, btreemod(txn, advanceBucket), advanceLoc, advanceKeyOfs);
+ if (left.isNull()) {
+ _delKeyAtPos(bucket, p);
+ mayBalanceWithNeighbors(txn, bucket, bucketLoc);
+ } else {
+ deleteInternalKey(txn, bucket, bucketLoc, p);
}
+}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::replaceWithNextChild(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc) {
-
- invariant(bucket->n == 0 && !bucket->nextChild.isNull() );
- if (bucket->parent.isNull()) {
- invariant(getRootLoc(txn) == bucketLoc);
- _headManager->setHead(txn, bucket->nextChild.toRecordId());
- }
- else {
- BucketType* parentBucket = getBucket(txn, bucket->parent);
- int bucketIndexInParent = indexInParent(txn, bucket, bucketLoc);
- *txn->recoveryUnit()->writing(&childLocForPos(parentBucket, bucketIndexInParent)) =
- bucket->nextChild;
- }
-
- *txn->recoveryUnit()->writing(&getBucket(txn, bucket->nextChild)->parent) = bucket->parent;
- _cursorRegistry->invalidateCursorsForBucket(bucketLoc);
- deallocBucket(txn, bucket, bucketLoc);
- }
-
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::canMergeChildren(OperationContext* txn,
+/**
+ * This function replaces the specified key (k) by either the prev or next key in the btree
+ * (k'). We require that k have either a left or right child. If k has a left child, we set k'
+ * to the prev key of k, which must be a leaf present in the left child. If k does not have a
+ * left child, we set k' to the next key of k, which must be a leaf present in the right child.
+ * When we replace k with k', we copy k' over k (which may cause a split) and then remove k'
+ * from its original location. Because k' is stored in a descendent of k, replacing k by k'
+ * will not modify the storage location of the original k', and we can easily remove k' from its
+ * original location.
+ *
+ * This function is only needed in cases where k has a left or right child; in other cases a
+ * simpler key removal implementation is possible.
+ *
+ * NOTE on noncompliant BtreeBuilder btrees: It is possible (though likely rare) for btrees
+ * created by BtreeBuilder to have k' that is not a leaf, see SERVER-2732. These cases are
+ * handled in the same manner as described in the "legacy btree structures" note below.
+ *
+ * NOTE on legacy btree structures: In legacy btrees, k' can be a nonleaf. In such a case we
+ * 'delete' k by marking it as an unused node rather than replacing it with k'. Also, k' may be
+ * a leaf but marked as an unused node. In such a case we replace k by k', preserving the key's
+ * unused marking. This function is only expected to mark a key as unused when handling a
+ * legacy btree.
+ */
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::deleteInternalKey(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int keypos) {
+ DiskLoc lchild = childLocForPos(bucket, keypos);
+ DiskLoc rchild = childLocForPos(bucket, keypos + 1);
+ invariant(!lchild.isNull() || !rchild.isNull());
+ int advanceDirection = lchild.isNull() ? 1 : -1;
+ int advanceKeyOfs = keypos;
+ DiskLoc advanceLoc = advance(txn, bucketLoc, &advanceKeyOfs, advanceDirection);
+ // advanceLoc must be a descentant of thisLoc, because thisLoc has a
+ // child in the proper direction and all descendants of thisLoc must be
+ // nonempty because they are not the root.
+ BucketType* advanceBucket = getBucket(txn, advanceLoc);
+
+ if (!childLocForPos(advanceBucket, advanceKeyOfs).isNull() ||
+ !childLocForPos(advanceBucket, advanceKeyOfs + 1).isNull()) {
+ markUnused(bucket, keypos);
+ return;
+ }
+
+ FullKey kn = getFullKey(advanceBucket, advanceKeyOfs);
+ // Because advanceLoc is a descendant of thisLoc, updating thisLoc will
+ // not affect packing or keys of advanceLoc and kn will be stable
+ // during the following setInternalKey()
+ setInternalKey(txn,
+ bucket,
+ bucketLoc,
+ keypos,
+ kn.recordLoc,
+ kn.data,
+ childLocForPos(bucket, keypos),
+ childLocForPos(bucket, keypos + 1));
+ delKeyAtPos(txn, btreemod(txn, advanceBucket), advanceLoc, advanceKeyOfs);
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::replaceWithNextChild(OperationContext* txn,
BucketType* bucket,
- const DiskLoc bucketLoc,
- const int leftIndex) {
- invariant(leftIndex >= 0 && leftIndex < bucket->n);
+ const DiskLoc bucketLoc) {
+ invariant(bucket->n == 0 && !bucket->nextChild.isNull());
+ if (bucket->parent.isNull()) {
+ invariant(getRootLoc(txn) == bucketLoc);
+ _headManager->setHead(txn, bucket->nextChild.toRecordId());
+ } else {
+ BucketType* parentBucket = getBucket(txn, bucket->parent);
+ int bucketIndexInParent = indexInParent(txn, bucket, bucketLoc);
+ *txn->recoveryUnit()->writing(&childLocForPos(parentBucket, bucketIndexInParent)) =
+ bucket->nextChild;
+ }
+
+ *txn->recoveryUnit()->writing(&getBucket(txn, bucket->nextChild)->parent) = bucket->parent;
+ _cursorRegistry->invalidateCursorsForBucket(bucketLoc);
+ deallocBucket(txn, bucket, bucketLoc);
+}
+
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::canMergeChildren(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ const int leftIndex) {
+ invariant(leftIndex >= 0 && leftIndex < bucket->n);
- DiskLoc leftNodeLoc = childLocForPos(bucket, leftIndex);
- DiskLoc rightNodeLoc = childLocForPos(bucket, leftIndex + 1);
+ DiskLoc leftNodeLoc = childLocForPos(bucket, leftIndex);
+ DiskLoc rightNodeLoc = childLocForPos(bucket, leftIndex + 1);
- if (leftNodeLoc.isNull() || rightNodeLoc.isNull()) {
- return false;
- }
+ if (leftNodeLoc.isNull() || rightNodeLoc.isNull()) {
+ return false;
+ }
- int pos = 0;
+ int pos = 0;
- BucketType* leftBucket = getBucket(txn, leftNodeLoc);
- BucketType* rightBucket = getBucket(txn, rightNodeLoc);
+ BucketType* leftBucket = getBucket(txn, leftNodeLoc);
+ BucketType* rightBucket = getBucket(txn, rightNodeLoc);
- int sum = BucketType::HeaderSize
- + _packedDataSize(leftBucket, pos)
- + _packedDataSize(rightBucket, pos)
- + getFullKey(bucket, leftIndex).data.dataSize()
- + sizeof(KeyHeaderType);
+ int sum = BucketType::HeaderSize + _packedDataSize(leftBucket, pos) +
+ _packedDataSize(rightBucket, pos) + getFullKey(bucket, leftIndex).data.dataSize() +
+ sizeof(KeyHeaderType);
- return sum <= BtreeLayout::BucketSize;
- }
+ return sum <= BtreeLayout::BucketSize;
+}
- /**
- * This implementation must respect the meaning and value of lowWaterMark. Also see comments in
- * splitPos().
- */
- template <class BtreeLayout>
- int BtreeLogic<BtreeLayout>::_rebalancedSeparatorPos(OperationContext* txn,
- BucketType* bucket,
- int leftIndex) {
- int split = -1;
- int rightSize = 0;
+/**
+ * This implementation must respect the meaning and value of lowWaterMark. Also see comments in
+ * splitPos().
+ */
+template <class BtreeLayout>
+int BtreeLogic<BtreeLayout>::_rebalancedSeparatorPos(OperationContext* txn,
+ BucketType* bucket,
+ int leftIndex) {
+ int split = -1;
+ int rightSize = 0;
- const BucketType* l = childForPos(txn, bucket, leftIndex);
- const BucketType* r = childForPos(txn, bucket, leftIndex + 1);
+ const BucketType* l = childForPos(txn, bucket, leftIndex);
+ const BucketType* r = childForPos(txn, bucket, leftIndex + 1);
- int KNS = sizeof(KeyHeaderType);
- int rightSizeLimit = ( l->topSize
- + l->n * KNS
- + getFullKey(bucket, leftIndex).data.dataSize()
- + KNS
- + r->topSize
- + r->n * KNS ) / 2;
+ int KNS = sizeof(KeyHeaderType);
+ int rightSizeLimit = (l->topSize + l->n * KNS + getFullKey(bucket, leftIndex).data.dataSize() +
+ KNS + r->topSize + r->n * KNS) /
+ 2;
- // This constraint should be ensured by only calling this function
- // if we go below the low water mark.
- invariant(rightSizeLimit < BtreeLayout::BucketBodySize);
+ // This constraint should be ensured by only calling this function
+ // if we go below the low water mark.
+ invariant(rightSizeLimit < BtreeLayout::BucketBodySize);
- for (int i = r->n - 1; i > -1; --i) {
- rightSize += getFullKey(r, i).data.dataSize() + KNS;
- if (rightSize > rightSizeLimit) {
- split = l->n + 1 + i;
- break;
- }
+ for (int i = r->n - 1; i > -1; --i) {
+ rightSize += getFullKey(r, i).data.dataSize() + KNS;
+ if (rightSize > rightSizeLimit) {
+ split = l->n + 1 + i;
+ break;
}
+ }
- if (split == -1) {
- rightSize += getFullKey(bucket, leftIndex).data.dataSize() + KNS;
- if (rightSize > rightSizeLimit) {
- split = l->n;
- }
+ if (split == -1) {
+ rightSize += getFullKey(bucket, leftIndex).data.dataSize() + KNS;
+ if (rightSize > rightSizeLimit) {
+ split = l->n;
}
+ }
- if (split == -1) {
- for (int i = l->n - 1; i > -1; --i) {
- rightSize += getFullKey(l, i).data.dataSize() + KNS;
- if (rightSize > rightSizeLimit) {
- split = i;
- break;
- }
+ if (split == -1) {
+ for (int i = l->n - 1; i > -1; --i) {
+ rightSize += getFullKey(l, i).data.dataSize() + KNS;
+ if (rightSize > rightSizeLimit) {
+ split = i;
+ break;
}
}
+ }
- // safeguards - we must not create an empty bucket
- if (split < 1) {
- split = 1;
- }
- else if (split > l->n + 1 + r->n - 2) {
- split = l->n + 1 + r->n - 2;
- }
-
- return split;
+ // safeguards - we must not create an empty bucket
+ if (split < 1) {
+ split = 1;
+ } else if (split > l->n + 1 + r->n - 2) {
+ split = l->n + 1 + r->n - 2;
}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::doMergeChildren(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int leftIndex) {
+ return split;
+}
- DiskLoc leftNodeLoc = childLocForPos(bucket, leftIndex);
- DiskLoc rightNodeLoc = childLocForPos(bucket, leftIndex + 1);
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::doMergeChildren(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int leftIndex) {
+ DiskLoc leftNodeLoc = childLocForPos(bucket, leftIndex);
+ DiskLoc rightNodeLoc = childLocForPos(bucket, leftIndex + 1);
- BucketType* l = btreemod(txn, getBucket(txn, leftNodeLoc));
- BucketType* r = btreemod(txn, getBucket(txn, rightNodeLoc));
+ BucketType* l = btreemod(txn, getBucket(txn, leftNodeLoc));
+ BucketType* r = btreemod(txn, getBucket(txn, rightNodeLoc));
- int pos = 0;
- _packReadyForMod(l, pos);
- _packReadyForMod(r, pos);
+ int pos = 0;
+ _packReadyForMod(l, pos);
+ _packReadyForMod(r, pos);
- // We know the additional keys below will fit in l because canMergeChildren() must be true.
- int oldLNum = l->n;
- // left child's right child becomes old parent key's left child
- FullKey knLeft = getFullKey(bucket, leftIndex);
- invariant(pushBack(l, knLeft.recordLoc, knLeft.data, l->nextChild));
+ // We know the additional keys below will fit in l because canMergeChildren() must be true.
+ int oldLNum = l->n;
+ // left child's right child becomes old parent key's left child
+ FullKey knLeft = getFullKey(bucket, leftIndex);
+ invariant(pushBack(l, knLeft.recordLoc, knLeft.data, l->nextChild));
- for (int i = 0; i < r->n; ++i) {
- FullKey kn = getFullKey(r, i);
- invariant(pushBack(l, kn.recordLoc, kn.data, kn.prevChildBucket));
- }
+ for (int i = 0; i < r->n; ++i) {
+ FullKey kn = getFullKey(r, i);
+ invariant(pushBack(l, kn.recordLoc, kn.data, kn.prevChildBucket));
+ }
- l->nextChild = r->nextChild;
- fixParentPtrs(txn, l, leftNodeLoc, oldLNum);
- delBucket(txn, r, rightNodeLoc);
+ l->nextChild = r->nextChild;
+ fixParentPtrs(txn, l, leftNodeLoc, oldLNum);
+ delBucket(txn, r, rightNodeLoc);
- childLocForPos(bucket, leftIndex + 1) = leftNodeLoc;
- childLocForPos(bucket, leftIndex) = DiskLoc();
- _delKeyAtPos(bucket, leftIndex, true);
+ childLocForPos(bucket, leftIndex + 1) = leftNodeLoc;
+ childLocForPos(bucket, leftIndex) = DiskLoc();
+ _delKeyAtPos(bucket, leftIndex, true);
- if (bucket->n == 0) {
- // Will trash bucket and bucketLoc.
- //
- // TODO To ensure all leaves are of equal height, we should ensure this is only called
- // on the root.
- replaceWithNextChild(txn, bucket, bucketLoc);
- }
- else {
- mayBalanceWithNeighbors(txn, bucket, bucketLoc);
- }
+ if (bucket->n == 0) {
+ // Will trash bucket and bucketLoc.
+ //
+ // TODO To ensure all leaves are of equal height, we should ensure this is only called
+ // on the root.
+ replaceWithNextChild(txn, bucket, bucketLoc);
+ } else {
+ mayBalanceWithNeighbors(txn, bucket, bucketLoc);
}
+}
- template <class BtreeLayout>
- int BtreeLogic<BtreeLayout>::indexInParent(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc) const {
- invariant(!bucket->parent.isNull());
- const BucketType* p = getBucket(txn, bucket->parent);
- if (p->nextChild == bucketLoc) {
- return p->n;
- }
+template <class BtreeLayout>
+int BtreeLogic<BtreeLayout>::indexInParent(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc) const {
+ invariant(!bucket->parent.isNull());
+ const BucketType* p = getBucket(txn, bucket->parent);
+ if (p->nextChild == bucketLoc) {
+ return p->n;
+ }
- for (int i = 0; i < p->n; ++i) {
- if (getKeyHeader(p, i).prevChildBucket == bucketLoc) {
- return i;
- }
+ for (int i = 0; i < p->n; ++i) {
+ if (getKeyHeader(p, i).prevChildBucket == bucketLoc) {
+ return i;
}
-
- log() << "ERROR: can't find ref to child bucket.\n";
- log() << "child: " << bucketLoc << "\n";
- //dump();
- log() << "Parent: " << bucket->parent << "\n";
- //p->dump();
- invariant(false);
- return -1; // just to compile
}
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::tryBalanceChildren(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int leftIndex) {
-
- // If we can merge, then we must merge rather than balance to preserve bucket utilization
- // constraints.
- if (canMergeChildren(txn, bucket, bucketLoc, leftIndex)) {
- return false;
- }
+ log() << "ERROR: can't find ref to child bucket.\n";
+ log() << "child: " << bucketLoc << "\n";
+ // dump();
+ log() << "Parent: " << bucket->parent << "\n";
+ // p->dump();
+ invariant(false);
+ return -1; // just to compile
+}
- doBalanceChildren(txn, btreemod(txn, bucket), bucketLoc, leftIndex);
- return true;
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::tryBalanceChildren(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int leftIndex) {
+ // If we can merge, then we must merge rather than balance to preserve bucket utilization
+ // constraints.
+ if (canMergeChildren(txn, bucket, bucketLoc, leftIndex)) {
+ return false;
}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::doBalanceLeftToRight(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int leftIndex,
- int split,
- BucketType* l,
- const DiskLoc lchild,
- BucketType* r,
- const DiskLoc rchild) {
-
- // TODO maybe do some audits the same way pushBack() does? As a precondition, rchild + the
- // old separator are <= half a body size, and lchild is at most completely full. Based on
- // the value of split, rchild will get <= half of the total bytes which is at most 75% of a
- // full body. So rchild will have room for the following keys:
- int rAdd = l->n - split;
- reserveKeysFront(r, rAdd);
-
- for (int i = split + 1, j = 0; i < l->n; ++i, ++j) {
- FullKey kn = getFullKey(l, i);
- setKey(r, j, kn.recordLoc, kn.data, kn.prevChildBucket);
- }
+ doBalanceChildren(txn, btreemod(txn, bucket), bucketLoc, leftIndex);
+ return true;
+}
- FullKey leftIndexKN = getFullKey(bucket, leftIndex);
- setKey(r, rAdd - 1, leftIndexKN.recordLoc, leftIndexKN.data, l->nextChild);
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::doBalanceLeftToRight(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int leftIndex,
+ int split,
+ BucketType* l,
+ const DiskLoc lchild,
+ BucketType* r,
+ const DiskLoc rchild) {
+ // TODO maybe do some audits the same way pushBack() does? As a precondition, rchild + the
+ // old separator are <= half a body size, and lchild is at most completely full. Based on
+ // the value of split, rchild will get <= half of the total bytes which is at most 75% of a
+ // full body. So rchild will have room for the following keys:
+ int rAdd = l->n - split;
+ reserveKeysFront(r, rAdd);
+
+ for (int i = split + 1, j = 0; i < l->n; ++i, ++j) {
+ FullKey kn = getFullKey(l, i);
+ setKey(r, j, kn.recordLoc, kn.data, kn.prevChildBucket);
+ }
+
+ FullKey leftIndexKN = getFullKey(bucket, leftIndex);
+ setKey(r, rAdd - 1, leftIndexKN.recordLoc, leftIndexKN.data, l->nextChild);
+
+ fixParentPtrs(txn, r, rchild, 0, rAdd - 1);
+
+ FullKey kn = getFullKey(l, split);
+ l->nextChild = kn.prevChildBucket;
+
+ // Because lchild is a descendant of thisLoc, updating thisLoc will not affect packing or
+ // keys of lchild and kn will be stable during the following setInternalKey()
+ setInternalKey(txn, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild);
+
+ // lchild and rchild cannot be merged, so there must be >0 (actually more) keys to the left
+ // of split.
+ int zeropos = 0;
+ truncateTo(l, split, zeropos);
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::doBalanceRightToLeft(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int leftIndex,
+ int split,
+ BucketType* l,
+ const DiskLoc lchild,
+ BucketType* r,
+ const DiskLoc rchild) {
+ // As a precondition, lchild + the old separator are <= half a body size,
+ // and rchild is at most completely full. Based on the value of split,
+ // lchild will get less than half of the total bytes which is at most 75%
+ // of a full body. So lchild will have room for the following keys:
+ int lN = l->n;
+
+ {
+ // left child's right child becomes old parent key's left child
+ FullKey kn = getFullKey(bucket, leftIndex);
+ invariant(pushBack(l, kn.recordLoc, kn.data, l->nextChild));
+ }
- fixParentPtrs(txn, r, rchild, 0, rAdd - 1);
+ for (int i = 0; i < split - lN - 1; ++i) {
+ FullKey kn = getFullKey(r, i);
+ invariant(pushBack(l, kn.recordLoc, kn.data, kn.prevChildBucket));
+ }
- FullKey kn = getFullKey(l, split);
+ {
+ FullKey kn = getFullKey(r, split - lN - 1);
l->nextChild = kn.prevChildBucket;
-
- // Because lchild is a descendant of thisLoc, updating thisLoc will not affect packing or
- // keys of lchild and kn will be stable during the following setInternalKey()
+ // Child lN was lchild's old nextChild, and don't need to fix that one.
+ fixParentPtrs(txn, l, lchild, lN + 1, l->n);
+ // Because rchild is a descendant of thisLoc, updating thisLoc will
+ // not affect packing or keys of rchild and kn will be stable
+ // during the following setInternalKey()
setInternalKey(txn, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild);
-
- // lchild and rchild cannot be merged, so there must be >0 (actually more) keys to the left
- // of split.
- int zeropos = 0;
- truncateTo(l, split, zeropos);
- }
-
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::doBalanceRightToLeft(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int leftIndex,
- int split,
- BucketType* l,
- const DiskLoc lchild,
- BucketType* r,
- const DiskLoc rchild) {
- // As a precondition, lchild + the old separator are <= half a body size,
- // and rchild is at most completely full. Based on the value of split,
- // lchild will get less than half of the total bytes which is at most 75%
- // of a full body. So lchild will have room for the following keys:
- int lN = l->n;
-
- {
- // left child's right child becomes old parent key's left child
- FullKey kn = getFullKey(bucket, leftIndex);
- invariant(pushBack(l, kn.recordLoc, kn.data, l->nextChild));
- }
-
- for (int i = 0; i < split - lN - 1; ++i) {
- FullKey kn = getFullKey(r, i);
- invariant(pushBack(l, kn.recordLoc, kn.data, kn.prevChildBucket));
- }
-
- {
- FullKey kn = getFullKey(r, split - lN - 1);
- l->nextChild = kn.prevChildBucket;
- // Child lN was lchild's old nextChild, and don't need to fix that one.
- fixParentPtrs(txn, l, lchild, lN + 1, l->n);
- // Because rchild is a descendant of thisLoc, updating thisLoc will
- // not affect packing or keys of rchild and kn will be stable
- // during the following setInternalKey()
- setInternalKey(txn, bucket, bucketLoc, leftIndex, kn.recordLoc, kn.data, lchild, rchild);
- }
-
- // lchild and rchild cannot be merged, so there must be >0 (actually more)
- // keys to the right of split.
- int zeropos = 0;
- dropFront(r, split - lN, zeropos);
}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::doBalanceChildren(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int leftIndex) {
-
- DiskLoc lchild = childLocForPos(bucket, leftIndex);
- DiskLoc rchild = childLocForPos(bucket, leftIndex + 1);
-
- int zeropos = 0;
- BucketType* l = btreemod(txn, getBucket(txn, lchild));
- _packReadyForMod(l, zeropos);
+ // lchild and rchild cannot be merged, so there must be >0 (actually more)
+ // keys to the right of split.
+ int zeropos = 0;
+ dropFront(r, split - lN, zeropos);
+}
- BucketType* r = btreemod(txn, getBucket(txn, rchild));
- _packReadyForMod(r, zeropos);
-
- int split = _rebalancedSeparatorPos(txn, bucket, leftIndex);
-
- // By definition, if we are below the low water mark and cannot merge
- // then we must actively balance.
- invariant(split != l->n);
- if (split < l->n) {
- doBalanceLeftToRight(txn, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild);
- }
- else {
- doBalanceRightToLeft(txn, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild);
- }
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::doBalanceChildren(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int leftIndex) {
+ DiskLoc lchild = childLocForPos(bucket, leftIndex);
+ DiskLoc rchild = childLocForPos(bucket, leftIndex + 1);
+
+ int zeropos = 0;
+ BucketType* l = btreemod(txn, getBucket(txn, lchild));
+ _packReadyForMod(l, zeropos);
+
+ BucketType* r = btreemod(txn, getBucket(txn, rchild));
+ _packReadyForMod(r, zeropos);
+
+ int split = _rebalancedSeparatorPos(txn, bucket, leftIndex);
+
+ // By definition, if we are below the low water mark and cannot merge
+ // then we must actively balance.
+ invariant(split != l->n);
+ if (split < l->n) {
+ doBalanceLeftToRight(txn, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild);
+ } else {
+ doBalanceRightToLeft(txn, bucket, bucketLoc, leftIndex, split, l, lchild, r, rchild);
+ }
+}
+
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc) {
+ if (bucket->parent.isNull()) {
+ return false;
}
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::mayBalanceWithNeighbors(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc) {
- if (bucket->parent.isNull()) {
- return false;
- }
-
- if (_packedDataSize(bucket, 0) >= lowWaterMark()) {
- return false;
- }
-
- BucketType* p = getBucket(txn, bucket->parent);
- int parentIdx = indexInParent(txn, bucket, bucketLoc);
-
- // TODO will missing neighbor case be possible long term? Should we try to merge/balance
- // somehow in that case if so?
- bool mayBalanceRight = (parentIdx < p->n) && !childLocForPos(p, parentIdx + 1).isNull();
- bool mayBalanceLeft = ( parentIdx > 0 ) && !childLocForPos(p, parentIdx - 1).isNull();
-
- // Balance if possible on one side - we merge only if absolutely necessary to preserve btree
- // bucket utilization constraints since that's a more heavy duty operation (especially if we
- // must re-split later).
- if (mayBalanceRight && tryBalanceChildren(txn, p, bucket->parent, parentIdx)) {
- return true;
- }
-
- if (mayBalanceLeft && tryBalanceChildren(txn, p, bucket->parent, parentIdx - 1)) {
- return true;
- }
-
- BucketType* pm = btreemod(txn, getBucket(txn, bucket->parent));
- if (mayBalanceRight) {
- doMergeChildren(txn, pm, bucket->parent, parentIdx);
- return true;
- }
- else if (mayBalanceLeft) {
- doMergeChildren(txn, pm, bucket->parent, parentIdx - 1);
- return true;
- }
-
+ if (_packedDataSize(bucket, 0) >= lowWaterMark()) {
return false;
}
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::unindex(OperationContext* txn,
- const BSONObj& key,
- const DiskLoc& recordLoc) {
- int pos;
- bool found = false;
- KeyDataOwnedType ownedKey(key);
+ BucketType* p = getBucket(txn, bucket->parent);
+ int parentIdx = indexInParent(txn, bucket, bucketLoc);
- DiskLoc loc = _locate(txn, getRootLoc(txn), ownedKey, &pos, &found, recordLoc, 1);
- if (found) {
- BucketType* bucket = btreemod(txn, getBucket(txn, loc));
- delKeyAtPos(txn, bucket, loc, pos);
- assertValid(_indexName, getRoot(txn), _ordering);
- }
- return found;
- }
+ // TODO will missing neighbor case be possible long term? Should we try to merge/balance
+ // somehow in that case if so?
+ bool mayBalanceRight = (parentIdx < p->n) && !childLocForPos(p, parentIdx + 1).isNull();
+ bool mayBalanceLeft = (parentIdx > 0) && !childLocForPos(p, parentIdx - 1).isNull();
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::isEmpty(OperationContext* txn) const {
- return getRoot(txn)->n == 0;
+ // Balance if possible on one side - we merge only if absolutely necessary to preserve btree
+ // bucket utilization constraints since that's a more heavy duty operation (especially if we
+ // must re-split later).
+ if (mayBalanceRight && tryBalanceChildren(txn, p, bucket->parent, parentIdx)) {
+ return true;
}
- /**
- * This can cause a lot of additional page writes when we assign buckets to different parents.
- * Maybe get rid of parent ptrs?
- */
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::fixParentPtrs(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int firstIndex,
- int lastIndex) {
-
- invariant(getBucket(txn, bucketLoc) == bucket);
-
- if (lastIndex == -1) {
- lastIndex = bucket->n;
- }
-
- for (int i = firstIndex; i <= lastIndex; i++) {
- const DiskLoc childLoc = childLocForPos(bucket, i);
- if (!childLoc.isNull()) {
- *txn->recoveryUnit()->writing(&getBucket(txn, childLoc)->parent) = bucketLoc;
- }
- }
+ if (mayBalanceLeft && tryBalanceChildren(txn, p, bucket->parent, parentIdx - 1)) {
+ return true;
}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::setInternalKey(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int keypos,
- const DiskLoc recordLoc,
- const KeyDataType& key,
- const DiskLoc lchild,
- const DiskLoc rchild) {
- childLocForPos(bucket, keypos).Null();
- // This may leave the bucket empty (n == 0) which is ok only as a txnient state. In the
- // instant case, the implementation of insertHere behaves correctly when n == 0 and as a
- // side effect increments n.
- _delKeyAtPos(bucket, keypos, true);
-
- // Ensure we do not orphan neighbor's old child.
- invariant(childLocForPos(bucket, keypos ) == rchild);
-
- // Just set temporarily - required to pass validation in insertHere()
- childLocForPos(bucket, keypos) = lchild;
-
- insertHere(txn, bucketLoc, keypos, key, recordLoc, lchild, rchild);
- }
-
- /**
- * insert a key in this bucket, splitting if necessary.
- *
- * @keypos - where to insert the key in range 0..n. 0=make leftmost, n=make rightmost. NOTE
- * this function may free some data, and as a result the value passed for keypos may be invalid
- * after calling insertHere()
- *
- * Some of the write intent signaling below relies on the implementation of the optimized write
- * intent code in basicInsert().
- */
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::insertHere(OperationContext* txn,
- const DiskLoc bucketLoc,
- int pos,
- const KeyDataType& key,
- const DiskLoc recordLoc,
- const DiskLoc leftChildLoc,
- const DiskLoc rightChildLoc) {
+ BucketType* pm = btreemod(txn, getBucket(txn, bucket->parent));
+ if (mayBalanceRight) {
+ doMergeChildren(txn, pm, bucket->parent, parentIdx);
+ return true;
+ } else if (mayBalanceLeft) {
+ doMergeChildren(txn, pm, bucket->parent, parentIdx - 1);
+ return true;
+ }
- BucketType* bucket = getBucket(txn, bucketLoc);
+ return false;
+}
- if (!basicInsert(txn, bucket, bucketLoc, pos, key, recordLoc)) {
- // If basicInsert() fails, the bucket will be packed as required by split().
- split(txn, btreemod(txn, bucket), bucketLoc, pos, recordLoc, key, leftChildLoc, rightChildLoc);
- return;
- }
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::unindex(OperationContext* txn,
+ const BSONObj& key,
+ const DiskLoc& recordLoc) {
+ int pos;
+ bool found = false;
+ KeyDataOwnedType ownedKey(key);
- KeyHeaderType* kn = &getKeyHeader(bucket, pos);
- if (pos + 1 == bucket->n) {
- // It's the last key.
- if (bucket->nextChild != leftChildLoc) {
- // XXX log more
- invariant(false);
- }
- kn->prevChildBucket = bucket->nextChild;
- invariant(kn->prevChildBucket == leftChildLoc);
- *txn->recoveryUnit()->writing(&bucket->nextChild) = rightChildLoc;
- if (!rightChildLoc.isNull()) {
- *txn->recoveryUnit()->writing(&getBucket(txn, rightChildLoc)->parent) = bucketLoc;
- }
- }
- else {
- kn->prevChildBucket = leftChildLoc;
- if (getKeyHeader(bucket, pos + 1).prevChildBucket != leftChildLoc) {
- // XXX: log more
- invariant(false);
- }
- const LocType *pc = &getKeyHeader(bucket, pos + 1).prevChildBucket;
- // Intent declared in basicInsert()
- *const_cast<LocType*>(pc) = rightChildLoc;
- if (!rightChildLoc.isNull()) {
- *txn->recoveryUnit()->writing(&getBucket(txn, rightChildLoc)->parent) = bucketLoc;
- }
- }
+ DiskLoc loc = _locate(txn, getRootLoc(txn), ownedKey, &pos, &found, recordLoc, 1);
+ if (found) {
+ BucketType* bucket = btreemod(txn, getBucket(txn, loc));
+ delKeyAtPos(txn, bucket, loc, pos);
+ assertValid(_indexName, getRoot(txn), _ordering);
}
+ return found;
+}
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::split(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int keypos,
- const DiskLoc recordLoc,
- const KeyDataType& key,
- const DiskLoc lchild,
- const DiskLoc rchild) {
-
- int split = splitPos(bucket, keypos);
- DiskLoc rLoc = _addBucket(txn);
- BucketType* r = btreemod(txn, getBucket(txn, rLoc));
-
- for (int i = split + 1; i < bucket->n; i++) {
- FullKey kn = getFullKey(bucket, i);
- invariant(pushBack(r, kn.recordLoc, kn.data, kn.prevChildBucket));
- }
- r->nextChild = bucket->nextChild;
- assertValid(_indexName, r, _ordering);
-
- r = NULL;
- fixParentPtrs(txn, getBucket(txn, rLoc), rLoc);
-
- FullKey splitkey = getFullKey(bucket, split);
- // splitkey key gets promoted, its children will be thisLoc (l) and rLoc (r)
- bucket->nextChild = splitkey.prevChildBucket;
-
- // Because thisLoc is a descendant of parent, updating parent will not affect packing or
- // keys of thisLoc and splitkey will be stable during the following:
-
- if (bucket->parent.isNull()) {
- // promote splitkey to a parent this->node make a new parent if we were the root
- DiskLoc L = _addBucket(txn);
- BucketType* p = btreemod(txn, getBucket(txn, L));
- invariant(pushBack(p, splitkey.recordLoc, splitkey.data, bucketLoc));
- p->nextChild = rLoc;
- assertValid(_indexName, p, _ordering);
- bucket->parent = L;
- _headManager->setHead(txn, L.toRecordId());
- *txn->recoveryUnit()->writing(&getBucket(txn, rLoc)->parent) = bucket->parent;
- }
- else {
- // set this before calling _insert - if it splits it will do fixParent() logic and
- // change the value.
- *txn->recoveryUnit()->writing(&getBucket(txn, rLoc)->parent) = bucket->parent;
- _insert(txn,
- getBucket(txn, bucket->parent),
- bucket->parent,
- splitkey.data,
- splitkey.recordLoc,
- true, // dupsallowed
- bucketLoc,
- rLoc);
- }
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::isEmpty(OperationContext* txn) const {
+ return getRoot(txn)->n == 0;
+}
- int newpos = keypos;
- // note this may trash splitkey.key. thus we had to promote it before finishing up here.
- truncateTo(bucket, split, newpos);
+/**
+ * This can cause a lot of additional page writes when we assign buckets to different parents.
+ * Maybe get rid of parent ptrs?
+ */
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::fixParentPtrs(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int firstIndex,
+ int lastIndex) {
+ invariant(getBucket(txn, bucketLoc) == bucket);
- // add our this->new key, there is room this->now
- if (keypos <= split) {
- insertHere(txn, bucketLoc, newpos, key, recordLoc, lchild, rchild);
- }
- else {
- int kp = keypos - split - 1;
- invariant(kp >= 0);
- insertHere(txn, rLoc, kp, key, recordLoc, lchild, rchild);
- }
+ if (lastIndex == -1) {
+ lastIndex = bucket->n;
}
- class DummyDocWriter : public DocWriter {
- public:
- DummyDocWriter(size_t sz) : _sz(sz) { }
- virtual void writeDocument(char* buf) const { /* no-op */ }
- virtual size_t documentSize() const { return _sz; }
- private:
- size_t _sz;
- };
-
- template <class BtreeLayout>
- Status BtreeLogic<BtreeLayout>::initAsEmpty(OperationContext* txn) {
- if (!_headManager->getHead(txn).isNull()) {
- return Status(ErrorCodes::InternalError, "index already initialized");
+ for (int i = firstIndex; i <= lastIndex; i++) {
+ const DiskLoc childLoc = childLocForPos(bucket, i);
+ if (!childLoc.isNull()) {
+ *txn->recoveryUnit()->writing(&getBucket(txn, childLoc)->parent) = bucketLoc;
}
-
- _headManager->setHead(txn, _addBucket(txn).toRecordId());
- return Status::OK();
}
+}
- template <class BtreeLayout>
- DiskLoc BtreeLogic<BtreeLayout>::_addBucket(OperationContext* txn) {
- DummyDocWriter docWriter(BtreeLayout::BucketSize);
- StatusWith<RecordId> loc = _recordStore->insertRecord(txn, &docWriter, false);
- // XXX: remove this(?) or turn into massert or sanely bubble it back up.
- uassertStatusOK(loc.getStatus());
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::setInternalKey(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int keypos,
+ const DiskLoc recordLoc,
+ const KeyDataType& key,
+ const DiskLoc lchild,
+ const DiskLoc rchild) {
+ childLocForPos(bucket, keypos).Null();
+ // This may leave the bucket empty (n == 0) which is ok only as a txnient state. In the
+ // instant case, the implementation of insertHere behaves correctly when n == 0 and as a
+ // side effect increments n.
+ _delKeyAtPos(bucket, keypos, true);
- // this is a new bucket, not referenced by anyone, probably don't need this lock
- BucketType* b = btreemod(txn, getBucket(txn, loc.getValue()));
- init(b);
- return DiskLoc::fromRecordId(loc.getValue());
- }
+ // Ensure we do not orphan neighbor's old child.
+ invariant(childLocForPos(bucket, keypos) == rchild);
- // static
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::dumpBucket(const BucketType* bucket, int indentLength) {
- log() << "BUCKET n:" << bucket->n << ", parent:" << hex << bucket->parent.getOfs() << dec;
+ // Just set temporarily - required to pass validation in insertHere()
+ childLocForPos(bucket, keypos) = lchild;
- const string indent = string(indentLength, ' ');
+ insertHere(txn, bucketLoc, keypos, key, recordLoc, lchild, rchild);
+}
- for (int i = 0; i < bucket->n; i++) {
- log() << '\n' << indent;
- FullKey k = getFullKey(bucket, i);
- string ks = k.data.toString();
- log() << " " << hex << k.prevChildBucket.getOfs() << "<-- prevChildBucket for " << i << '\n';
- log() << indent << " " << i << ' ' << ks.substr(0, 30)
- << " Loc:" << k.recordLoc.toString() << dec;
- if (getKeyHeader(bucket, i).isUnused()) {
- log() << " UNUSED";
- }
+/**
+ * insert a key in this bucket, splitting if necessary.
+ *
+ * @keypos - where to insert the key in range 0..n. 0=make leftmost, n=make rightmost. NOTE
+ * this function may free some data, and as a result the value passed for keypos may be invalid
+ * after calling insertHere()
+ *
+ * Some of the write intent signaling below relies on the implementation of the optimized write
+ * intent code in basicInsert().
+ */
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::insertHere(OperationContext* txn,
+ const DiskLoc bucketLoc,
+ int pos,
+ const KeyDataType& key,
+ const DiskLoc recordLoc,
+ const DiskLoc leftChildLoc,
+ const DiskLoc rightChildLoc) {
+ BucketType* bucket = getBucket(txn, bucketLoc);
+
+ if (!basicInsert(txn, bucket, bucketLoc, pos, key, recordLoc)) {
+ // If basicInsert() fails, the bucket will be packed as required by split().
+ split(txn,
+ btreemod(txn, bucket),
+ bucketLoc,
+ pos,
+ recordLoc,
+ key,
+ leftChildLoc,
+ rightChildLoc);
+ return;
+ }
+
+ KeyHeaderType* kn = &getKeyHeader(bucket, pos);
+ if (pos + 1 == bucket->n) {
+ // It's the last key.
+ if (bucket->nextChild != leftChildLoc) {
+ // XXX log more
+ invariant(false);
}
-
- log() << "\n" << indent << " " << hex << bucket->nextChild.getOfs() << dec << endl;
- }
-
- template <class BtreeLayout>
- DiskLoc BtreeLogic<BtreeLayout>::getDiskLoc(OperationContext* txn,
- const DiskLoc& bucketLoc,
- const int keyOffset) const {
- invariant(!bucketLoc.isNull());
- BucketType* bucket = getBucket(txn, bucketLoc);
- return getKeyHeader(bucket, keyOffset).recordLoc;
- }
-
- template <class BtreeLayout>
- BSONObj BtreeLogic<BtreeLayout>::getKey(OperationContext* txn,
- const DiskLoc& bucketLoc,
- const int keyOffset) const {
- invariant(!bucketLoc.isNull());
- BucketType* bucket = getBucket(txn, bucketLoc);
- int n = bucket->n;
- invariant(n != BtreeLayout::INVALID_N_SENTINEL);
- invariant(n >= 0);
- invariant(n < 10000);
- invariant(n != 0xffff);
-
- invariant(keyOffset >= 0);
- invariant(keyOffset < n);
-
- // XXX: should we really return an empty obj if keyOffset>=n?
- if (keyOffset >= n) {
- return BSONObj();
+ kn->prevChildBucket = bucket->nextChild;
+ invariant(kn->prevChildBucket == leftChildLoc);
+ *txn->recoveryUnit()->writing(&bucket->nextChild) = rightChildLoc;
+ if (!rightChildLoc.isNull()) {
+ *txn->recoveryUnit()->writing(&getBucket(txn, rightChildLoc)->parent) = bucketLoc;
}
- else {
- return getFullKey(bucket, keyOffset).data.toBson();
+ } else {
+ kn->prevChildBucket = leftChildLoc;
+ if (getKeyHeader(bucket, pos + 1).prevChildBucket != leftChildLoc) {
+ // XXX: log more
+ invariant(false);
}
- }
+ const LocType* pc = &getKeyHeader(bucket, pos + 1).prevChildBucket;
+ // Intent declared in basicInsert()
+ *const_cast<LocType*>(pc) = rightChildLoc;
+ if (!rightChildLoc.isNull()) {
+ *txn->recoveryUnit()->writing(&getBucket(txn, rightChildLoc)->parent) = bucketLoc;
+ }
+ }
+}
+
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::split(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int keypos,
+ const DiskLoc recordLoc,
+ const KeyDataType& key,
+ const DiskLoc lchild,
+ const DiskLoc rchild) {
+ int split = splitPos(bucket, keypos);
+ DiskLoc rLoc = _addBucket(txn);
+ BucketType* r = btreemod(txn, getBucket(txn, rLoc));
+
+ for (int i = split + 1; i < bucket->n; i++) {
+ FullKey kn = getFullKey(bucket, i);
+ invariant(pushBack(r, kn.recordLoc, kn.data, kn.prevChildBucket));
+ }
+ r->nextChild = bucket->nextChild;
+ assertValid(_indexName, r, _ordering);
+
+ r = NULL;
+ fixParentPtrs(txn, getBucket(txn, rLoc), rLoc);
+
+ FullKey splitkey = getFullKey(bucket, split);
+ // splitkey key gets promoted, its children will be thisLoc (l) and rLoc (r)
+ bucket->nextChild = splitkey.prevChildBucket;
+
+ // Because thisLoc is a descendant of parent, updating parent will not affect packing or
+ // keys of thisLoc and splitkey will be stable during the following:
+
+ if (bucket->parent.isNull()) {
+ // promote splitkey to a parent this->node make a new parent if we were the root
+ DiskLoc L = _addBucket(txn);
+ BucketType* p = btreemod(txn, getBucket(txn, L));
+ invariant(pushBack(p, splitkey.recordLoc, splitkey.data, bucketLoc));
+ p->nextChild = rLoc;
+ assertValid(_indexName, p, _ordering);
+ bucket->parent = L;
+ _headManager->setHead(txn, L.toRecordId());
+ *txn->recoveryUnit()->writing(&getBucket(txn, rLoc)->parent) = bucket->parent;
+ } else {
+ // set this before calling _insert - if it splits it will do fixParent() logic and
+ // change the value.
+ *txn->recoveryUnit()->writing(&getBucket(txn, rLoc)->parent) = bucket->parent;
+ _insert(txn,
+ getBucket(txn, bucket->parent),
+ bucket->parent,
+ splitkey.data,
+ splitkey.recordLoc,
+ true, // dupsallowed
+ bucketLoc,
+ rLoc);
+ }
+
+ int newpos = keypos;
+ // note this may trash splitkey.key. thus we had to promote it before finishing up here.
+ truncateTo(bucket, split, newpos);
+
+ // add our this->new key, there is room this->now
+ if (keypos <= split) {
+ insertHere(txn, bucketLoc, newpos, key, recordLoc, lchild, rchild);
+ } else {
+ int kp = keypos - split - 1;
+ invariant(kp >= 0);
+ insertHere(txn, rLoc, kp, key, recordLoc, lchild, rchild);
+ }
+}
+
+class DummyDocWriter : public DocWriter {
+public:
+ DummyDocWriter(size_t sz) : _sz(sz) {}
+ virtual void writeDocument(char* buf) const { /* no-op */
+ }
+ virtual size_t documentSize() const {
+ return _sz;
+ }
+
+private:
+ size_t _sz;
+};
+
+template <class BtreeLayout>
+Status BtreeLogic<BtreeLayout>::initAsEmpty(OperationContext* txn) {
+ if (!_headManager->getHead(txn).isNull()) {
+ return Status(ErrorCodes::InternalError, "index already initialized");
+ }
+
+ _headManager->setHead(txn, _addBucket(txn).toRecordId());
+ return Status::OK();
+}
+
+template <class BtreeLayout>
+DiskLoc BtreeLogic<BtreeLayout>::_addBucket(OperationContext* txn) {
+ DummyDocWriter docWriter(BtreeLayout::BucketSize);
+ StatusWith<RecordId> loc = _recordStore->insertRecord(txn, &docWriter, false);
+ // XXX: remove this(?) or turn into massert or sanely bubble it back up.
+ uassertStatusOK(loc.getStatus());
+
+ // this is a new bucket, not referenced by anyone, probably don't need this lock
+ BucketType* b = btreemod(txn, getBucket(txn, loc.getValue()));
+ init(b);
+ return DiskLoc::fromRecordId(loc.getValue());
+}
+
+// static
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::dumpBucket(const BucketType* bucket, int indentLength) {
+ log() << "BUCKET n:" << bucket->n << ", parent:" << hex << bucket->parent.getOfs() << dec;
+
+ const string indent = string(indentLength, ' ');
+
+ for (int i = 0; i < bucket->n; i++) {
+ log() << '\n' << indent;
+ FullKey k = getFullKey(bucket, i);
+ string ks = k.data.toString();
+ log() << " " << hex << k.prevChildBucket.getOfs() << "<-- prevChildBucket for " << i
+ << '\n';
+ log() << indent << " " << i << ' ' << ks.substr(0, 30)
+ << " Loc:" << k.recordLoc.toString() << dec;
+ if (getKeyHeader(bucket, i).isUnused()) {
+ log() << " UNUSED";
+ }
+ }
+
+ log() << "\n" << indent << " " << hex << bucket->nextChild.getOfs() << dec << endl;
+}
+
+template <class BtreeLayout>
+DiskLoc BtreeLogic<BtreeLayout>::getDiskLoc(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ const int keyOffset) const {
+ invariant(!bucketLoc.isNull());
+ BucketType* bucket = getBucket(txn, bucketLoc);
+ return getKeyHeader(bucket, keyOffset).recordLoc;
+}
+
+template <class BtreeLayout>
+BSONObj BtreeLogic<BtreeLayout>::getKey(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ const int keyOffset) const {
+ invariant(!bucketLoc.isNull());
+ BucketType* bucket = getBucket(txn, bucketLoc);
+ int n = bucket->n;
+ invariant(n != BtreeLayout::INVALID_N_SENTINEL);
+ invariant(n >= 0);
+ invariant(n < 10000);
+ invariant(n != 0xffff);
+
+ invariant(keyOffset >= 0);
+ invariant(keyOffset < n);
+
+ // XXX: should we really return an empty obj if keyOffset>=n?
+ if (keyOffset >= n) {
+ return BSONObj();
+ } else {
+ return getFullKey(bucket, keyOffset).data.toBson();
+ }
+}
+
+template <class BtreeLayout>
+Status BtreeLogic<BtreeLayout>::touch(OperationContext* txn) const {
+ return _recordStore->touch(txn, NULL);
+}
+
+template <class BtreeLayout>
+long long BtreeLogic<BtreeLayout>::fullValidate(OperationContext* txn,
+ long long* unusedCount,
+ bool strict,
+ bool dumpBuckets,
+ unsigned depth) const {
+ return _fullValidate(txn, getRootLoc(txn), unusedCount, strict, dumpBuckets, depth);
+}
+
+template <class BtreeLayout>
+long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* txn,
+ const DiskLoc bucketLoc,
+ long long* unusedCount,
+ bool strict,
+ bool dumpBuckets,
+ unsigned depth) const {
+ BucketType* bucket = getBucket(txn, bucketLoc);
+ assertValid(_indexName, bucket, _ordering, true);
- template <class BtreeLayout>
- Status BtreeLogic<BtreeLayout>::touch(OperationContext* txn) const {
- return _recordStore->touch( txn, NULL );
+ if (dumpBuckets) {
+ log() << bucketLoc.toString() << ' ';
+ dumpBucket(bucket, depth);
}
- template <class BtreeLayout>
- long long BtreeLogic<BtreeLayout>::fullValidate(OperationContext* txn,
- long long *unusedCount,
- bool strict,
- bool dumpBuckets,
- unsigned depth) const {
- return _fullValidate(txn, getRootLoc(txn), unusedCount, strict, dumpBuckets, depth);
- }
+ long long keyCount = 0;
- template <class BtreeLayout>
- long long BtreeLogic<BtreeLayout>::_fullValidate(OperationContext* txn,
- const DiskLoc bucketLoc,
- long long *unusedCount,
- bool strict,
- bool dumpBuckets,
- unsigned depth) const {
- BucketType* bucket = getBucket(txn, bucketLoc);
- assertValid(_indexName, bucket, _ordering, true);
+ for (int i = 0; i < bucket->n; i++) {
+ KeyHeaderType& kn = getKeyHeader(bucket, i);
- if (dumpBuckets) {
- log() << bucketLoc.toString() << ' ';
- dumpBucket(bucket, depth);
+ if (kn.isUsed()) {
+ keyCount++;
+ } else if (NULL != unusedCount) {
+ ++(*unusedCount);
}
- long long keyCount = 0;
-
- for (int i = 0; i < bucket->n; i++) {
- KeyHeaderType& kn = getKeyHeader(bucket, i);
-
- if (kn.isUsed()) {
- keyCount++;
- }
- else if (NULL != unusedCount) {
- ++(*unusedCount);
- }
-
- if (!kn.prevChildBucket.isNull()) {
- DiskLoc left = kn.prevChildBucket;
- BucketType* b = getBucket(txn, left);
-
- if (strict) {
- invariant(b->parent == bucketLoc);
- }
- else {
- wassert(b->parent == bucketLoc);
- }
-
- keyCount += _fullValidate(txn, left, unusedCount, strict, dumpBuckets, depth + 1);
- }
- }
+ if (!kn.prevChildBucket.isNull()) {
+ DiskLoc left = kn.prevChildBucket;
+ BucketType* b = getBucket(txn, left);
- if (!bucket->nextChild.isNull()) {
- BucketType* b = getBucket(txn, bucket->nextChild);
if (strict) {
invariant(b->parent == bucketLoc);
- }
- else {
+ } else {
wassert(b->parent == bucketLoc);
}
- keyCount += _fullValidate(txn, bucket->nextChild, unusedCount, strict, dumpBuckets, depth + 1);
+ keyCount += _fullValidate(txn, left, unusedCount, strict, dumpBuckets, depth + 1);
+ }
+ }
+
+ if (!bucket->nextChild.isNull()) {
+ BucketType* b = getBucket(txn, bucket->nextChild);
+ if (strict) {
+ invariant(b->parent == bucketLoc);
+ } else {
+ wassert(b->parent == bucketLoc);
}
- return keyCount;
+ keyCount +=
+ _fullValidate(txn, bucket->nextChild, unusedCount, strict, dumpBuckets, depth + 1);
}
- // XXX: remove this(?) used to not dump every key in assertValid.
- int nDumped = 0;
+ return keyCount;
+}
- // static
- template <class BtreeLayout>
- void BtreeLogic<BtreeLayout>::assertValid(const std::string& ns,
- BucketType* bucket,
- const Ordering& ordering,
- bool force) {
- if (!force) {
- return;
- }
+// XXX: remove this(?) used to not dump every key in assertValid.
+int nDumped = 0;
- // this is very slow so don't do often
- {
- static int _k;
- if (++_k % 128) {
- return;
- }
+// static
+template <class BtreeLayout>
+void BtreeLogic<BtreeLayout>::assertValid(const std::string& ns,
+ BucketType* bucket,
+ const Ordering& ordering,
+ bool force) {
+ if (!force) {
+ return;
+ }
+
+ // this is very slow so don't do often
+ {
+ static int _k;
+ if (++_k % 128) {
+ return;
}
+ }
- DEV {
- // slow:
- for (int i = 0; i < bucket->n - 1; i++) {
- FullKey firstKey = getFullKey(bucket, i);
- FullKey secondKey = getFullKey(bucket, i + 1);
- int z = firstKey.data.woCompare(secondKey.data, ordering);
- if (z > 0) {
- log() << "ERROR: btree key order corrupt. Keys:" << endl;
- if (++nDumped < 5) {
- for (int j = 0; j < bucket->n; j++) {
- log() << " " << getFullKey(bucket, j).data.toString() << endl;
- }
- dumpBucket(bucket);
+ DEV {
+ // slow:
+ for (int i = 0; i < bucket->n - 1; i++) {
+ FullKey firstKey = getFullKey(bucket, i);
+ FullKey secondKey = getFullKey(bucket, i + 1);
+ int z = firstKey.data.woCompare(secondKey.data, ordering);
+ if (z > 0) {
+ log() << "ERROR: btree key order corrupt. Keys:" << endl;
+ if (++nDumped < 5) {
+ for (int j = 0; j < bucket->n; j++) {
+ log() << " " << getFullKey(bucket, j).data.toString() << endl;
}
- wassert(false);
- break;
+ dumpBucket(bucket);
}
- else if (z == 0) {
- if (!(firstKey.header.recordLoc < secondKey.header.recordLoc)) {
- log() << "ERROR: btree key order corrupt (recordlocs wrong):" << endl;
- log() << " k(" << i << ")" << firstKey.data.toString()
- << " RL:" << firstKey.header.recordLoc.toString() << endl;
- log() << " k(" << i + 1 << ")" << secondKey.data.toString()
- << " RL:" << secondKey.header.recordLoc.toString() << endl;
- wassert(firstKey.header.recordLoc < secondKey.header.recordLoc);
- }
+ wassert(false);
+ break;
+ } else if (z == 0) {
+ if (!(firstKey.header.recordLoc < secondKey.header.recordLoc)) {
+ log() << "ERROR: btree key order corrupt (recordlocs wrong):" << endl;
+ log() << " k(" << i << ")" << firstKey.data.toString()
+ << " RL:" << firstKey.header.recordLoc.toString() << endl;
+ log() << " k(" << i + 1 << ")" << secondKey.data.toString()
+ << " RL:" << secondKey.header.recordLoc.toString() << endl;
+ wassert(firstKey.header.recordLoc < secondKey.header.recordLoc);
}
}
}
- else {
- //faster:
- if (bucket->n > 1) {
- FullKey k1 = getFullKey(bucket, 0);
- FullKey k2 = getFullKey(bucket, bucket->n - 1);
- int z = k1.data.woCompare(k2.data, ordering);
- //wassert( z <= 0 );
- if (z > 0) {
- log() << "Btree keys out of order in collection " << ns;
- ONCE {
- dumpBucket(bucket);
- }
- invariant(false);
+ }
+ else {
+ // faster:
+ if (bucket->n > 1) {
+ FullKey k1 = getFullKey(bucket, 0);
+ FullKey k2 = getFullKey(bucket, bucket->n - 1);
+ int z = k1.data.woCompare(k2.data, ordering);
+ // wassert( z <= 0 );
+ if (z > 0) {
+ log() << "Btree keys out of order in collection " << ns;
+ ONCE {
+ dumpBucket(bucket);
}
+ invariant(false);
}
}
}
+}
- template <class BtreeLayout>
- Status BtreeLogic<BtreeLayout>::insert(OperationContext* txn,
- const BSONObj& rawKey,
- const DiskLoc& value,
- bool dupsAllowed) {
- KeyDataOwnedType key(rawKey);
+template <class BtreeLayout>
+Status BtreeLogic<BtreeLayout>::insert(OperationContext* txn,
+ const BSONObj& rawKey,
+ const DiskLoc& value,
+ bool dupsAllowed) {
+ KeyDataOwnedType key(rawKey);
- if (key.dataSize() > BtreeLayout::KeyMax) {
- string msg = str::stream() << "Btree::insert: key too large to index, failing "
- << _indexName << ' '
- << key.dataSize() << ' ' << key.toString();
- return Status(ErrorCodes::KeyTooLong, msg);
- }
-
- Status status = _insert(txn,
- getRoot(txn),
- getRootLoc(txn),
- key,
- value,
- dupsAllowed,
- DiskLoc(),
- DiskLoc());
-
- assertValid(_indexName, getRoot(txn), _ordering);
- return status;
+ if (key.dataSize() > BtreeLayout::KeyMax) {
+ string msg = str::stream() << "Btree::insert: key too large to index, failing "
+ << _indexName << ' ' << key.dataSize() << ' ' << key.toString();
+ return Status(ErrorCodes::KeyTooLong, msg);
}
- template <class BtreeLayout>
- Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- const KeyDataType& key,
- const DiskLoc recordLoc,
- bool dupsAllowed,
- const DiskLoc leftChild,
- const DiskLoc rightChild) {
- invariant( key.dataSize() > 0 );
-
- int pos;
- bool found;
- Status findStatus = _find(txn, bucket, key, recordLoc, !dupsAllowed, &pos, &found);
- if (!findStatus.isOK()) {
- return findStatus;
- }
+ Status status =
+ _insert(txn, getRoot(txn), getRootLoc(txn), key, value, dupsAllowed, DiskLoc(), DiskLoc());
- if (found) {
- KeyHeaderType& header = getKeyHeader(bucket, pos);
- if (header.isUnused()) {
- LOG(4) << "btree _insert: reusing unused key" << endl;
- massert(17433, "_insert: reuse key but lchild is not null", leftChild.isNull());
- massert(17434, "_insert: reuse key but rchild is not null", rightChild.isNull());
- txn->recoveryUnit()->writing(&header)->setUsed();
- return Status::OK();
- }
- // The logic in _find() prohibits finding and returning a position if the 'used' bit
- // in the header is set and dups are disallowed.
- invariant(dupsAllowed);
- return Status(ErrorCodes::DuplicateKeyValue, "key/value already in index");
- }
+ assertValid(_indexName, getRoot(txn), _ordering);
+ return status;
+}
- DiskLoc childLoc = childLocForPos(bucket, pos);
-
- // In current usage, rightChild is NULL for a new key and is not NULL when we are
- // promoting a split key. These are the only two cases where _insert() is called
- // currently.
- if (childLoc.isNull() || !rightChild.isNull()) {
- insertHere(txn, bucketLoc, pos, key, recordLoc, leftChild, rightChild);
+template <class BtreeLayout>
+Status BtreeLogic<BtreeLayout>::_insert(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ const KeyDataType& key,
+ const DiskLoc recordLoc,
+ bool dupsAllowed,
+ const DiskLoc leftChild,
+ const DiskLoc rightChild) {
+ invariant(key.dataSize() > 0);
+
+ int pos;
+ bool found;
+ Status findStatus = _find(txn, bucket, key, recordLoc, !dupsAllowed, &pos, &found);
+ if (!findStatus.isOK()) {
+ return findStatus;
+ }
+
+ if (found) {
+ KeyHeaderType& header = getKeyHeader(bucket, pos);
+ if (header.isUnused()) {
+ LOG(4) << "btree _insert: reusing unused key" << endl;
+ massert(17433, "_insert: reuse key but lchild is not null", leftChild.isNull());
+ massert(17434, "_insert: reuse key but rchild is not null", rightChild.isNull());
+ txn->recoveryUnit()->writing(&header)->setUsed();
return Status::OK();
}
- else {
- return _insert(txn,
- getBucket(txn, childLoc),
- childLoc,
- key,
- recordLoc,
- dupsAllowed,
- DiskLoc(),
- DiskLoc());
- }
+ // The logic in _find() prohibits finding and returning a position if the 'used' bit
+ // in the header is set and dups are disallowed.
+ invariant(dupsAllowed);
+ return Status(ErrorCodes::DuplicateKeyValue, "key/value already in index");
}
- template <class BtreeLayout>
- DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
- const DiskLoc& bucketLoc,
- int* posInOut,
- int direction) const {
- BucketType* bucket = getBucket(txn, bucketLoc);
-
- if (*posInOut < 0 || *posInOut >= bucket->n ) {
- log() << "ASSERT failure advancing btree bucket" << endl;
- log() << " thisLoc: " << bucketLoc.toString() << endl;
- log() << " keyOfs: " << *posInOut << " n:" << bucket->n << " direction: " << direction << endl;
- // log() << bucketSummary() << endl;
- invariant(false);
- }
+ DiskLoc childLoc = childLocForPos(bucket, pos);
- // XXX document
- int adj = direction < 0 ? 1 : 0;
- int ko = *posInOut + direction;
-
- // Look down if we need to.
- DiskLoc nextDownLoc = childLocForPos(bucket, ko + adj);
- BucketType* nextDown = getBucket(txn, nextDownLoc);
- if (NULL != nextDown) {
- for (;;) {
- if (direction > 0) {
- *posInOut = 0;
- }
- else {
- *posInOut = nextDown->n - 1;
- }
- DiskLoc newNextDownLoc = childLocForPos(nextDown, *posInOut + adj);
- BucketType* newNextDownBucket = getBucket(txn, newNextDownLoc);
- if (NULL == newNextDownBucket) {
- break;
- }
- nextDownLoc = newNextDownLoc;
- nextDown = newNextDownBucket;
- }
- return nextDownLoc;
- }
+ // In current usage, rightChild is NULL for a new key and is not NULL when we are
+ // promoting a split key. These are the only two cases where _insert() is called
+ // currently.
+ if (childLoc.isNull() || !rightChild.isNull()) {
+ insertHere(txn, bucketLoc, pos, key, recordLoc, leftChild, rightChild);
+ return Status::OK();
+ } else {
+ return _insert(txn,
+ getBucket(txn, childLoc),
+ childLoc,
+ key,
+ recordLoc,
+ dupsAllowed,
+ DiskLoc(),
+ DiskLoc());
+ }
+}
+
+template <class BtreeLayout>
+DiskLoc BtreeLogic<BtreeLayout>::advance(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ int* posInOut,
+ int direction) const {
+ BucketType* bucket = getBucket(txn, bucketLoc);
+
+ if (*posInOut < 0 || *posInOut >= bucket->n) {
+ log() << "ASSERT failure advancing btree bucket" << endl;
+ log() << " thisLoc: " << bucketLoc.toString() << endl;
+ log() << " keyOfs: " << *posInOut << " n:" << bucket->n << " direction: " << direction
+ << endl;
+ // log() << bucketSummary() << endl;
+ invariant(false);
+ }
- // Looking down isn't the right choice, move forward.
- if (ko < bucket->n && ko >= 0) {
- *posInOut = ko;
- return bucketLoc;
- }
+ // XXX document
+ int adj = direction < 0 ? 1 : 0;
+ int ko = *posInOut + direction;
- // Hit the end of the bucket, move up and over.
- DiskLoc childLoc = bucketLoc;
- DiskLoc ancestor = getBucket(txn, bucketLoc)->parent;
+ // Look down if we need to.
+ DiskLoc nextDownLoc = childLocForPos(bucket, ko + adj);
+ BucketType* nextDown = getBucket(txn, nextDownLoc);
+ if (NULL != nextDown) {
for (;;) {
- if (ancestor.isNull()) {
- break;
+ if (direction > 0) {
+ *posInOut = 0;
+ } else {
+ *posInOut = nextDown->n - 1;
}
- BucketType* an = getBucket(txn, ancestor);
- for (int i = 0; i < an->n; i++) {
- if (childLocForPos(an, i + adj) == childLoc) {
- *posInOut = i;
- return ancestor;
- }
+ DiskLoc newNextDownLoc = childLocForPos(nextDown, *posInOut + adj);
+ BucketType* newNextDownBucket = getBucket(txn, newNextDownLoc);
+ if (NULL == newNextDownBucket) {
+ break;
}
- invariant(direction < 0 || an->nextChild == childLoc);
- // parent exhausted also, keep going up
- childLoc = ancestor;
- ancestor = an->parent;
+ nextDownLoc = newNextDownLoc;
+ nextDown = newNextDownBucket;
}
+ return nextDownLoc;
+ }
- return DiskLoc();
+ // Looking down isn't the right choice, move forward.
+ if (ko < bucket->n && ko >= 0) {
+ *posInOut = ko;
+ return bucketLoc;
}
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::keyIsUsed(OperationContext* txn,
- const DiskLoc& loc,
- const int& pos) const {
- return getKeyHeader(getBucket(txn, loc), pos).isUsed();
+ // Hit the end of the bucket, move up and over.
+ DiskLoc childLoc = bucketLoc;
+ DiskLoc ancestor = getBucket(txn, bucketLoc)->parent;
+ for (;;) {
+ if (ancestor.isNull()) {
+ break;
+ }
+ BucketType* an = getBucket(txn, ancestor);
+ for (int i = 0; i < an->n; i++) {
+ if (childLocForPos(an, i + adj) == childLoc) {
+ *posInOut = i;
+ return ancestor;
+ }
+ }
+ invariant(direction < 0 || an->nextChild == childLoc);
+ // parent exhausted also, keep going up
+ childLoc = ancestor;
+ ancestor = an->parent;
}
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::locate(OperationContext* txn,
- const BSONObj& key,
- const DiskLoc& recordLoc,
- const int direction,
- int* posOut,
- DiskLoc* bucketLocOut) const {
- // Clear out any data.
- *posOut = 0;
- *bucketLocOut = DiskLoc();
+ return DiskLoc();
+}
- bool found = false;
- KeyDataOwnedType owned(key);
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::keyIsUsed(OperationContext* txn,
+ const DiskLoc& loc,
+ const int& pos) const {
+ return getKeyHeader(getBucket(txn, loc), pos).isUsed();
+}
- *bucketLocOut = _locate(txn, getRootLoc(txn), owned, posOut, &found, recordLoc, direction);
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::locate(OperationContext* txn,
+ const BSONObj& key,
+ const DiskLoc& recordLoc,
+ const int direction,
+ int* posOut,
+ DiskLoc* bucketLocOut) const {
+ // Clear out any data.
+ *posOut = 0;
+ *bucketLocOut = DiskLoc();
- if (!found) {
- return false;
- }
+ bool found = false;
+ KeyDataOwnedType owned(key);
- skipUnusedKeys(txn, bucketLocOut, posOut, direction);
+ *bucketLocOut = _locate(txn, getRootLoc(txn), owned, posOut, &found, recordLoc, direction);
- return found;
+ if (!found) {
+ return false;
}
- /**
- * Recursively walk down the btree, looking for a match of key and recordLoc.
- * Caller should have acquired lock on bucketLoc.
- */
- template <class BtreeLayout>
- DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* txn,
- const DiskLoc& bucketLoc,
- const KeyDataType& key,
- int* posOut,
- bool* foundOut,
- const DiskLoc& recordLoc,
- const int direction) const {
- int position;
- BucketType* bucket = getBucket(txn, bucketLoc);
- // XXX: owned to not owned conversion(?)
- _find(txn, bucket, key, recordLoc, false, &position, foundOut);
-
- // Look in our current bucket.
- if (*foundOut) {
- *posOut = position;
- return bucketLoc;
- }
+ skipUnusedKeys(txn, bucketLocOut, posOut, direction);
- // Not in our current bucket. 'position' tells us where there may be a child.
- DiskLoc childLoc = childLocForPos(bucket, position);
+ return found;
+}
- if (!childLoc.isNull()) {
- DiskLoc inChild = _locate(txn, childLoc, key, posOut, foundOut, recordLoc, direction);
- if (!inChild.isNull()) {
- return inChild;
- }
+/**
+ * Recursively walk down the btree, looking for a match of key and recordLoc.
+ * Caller should have acquired lock on bucketLoc.
+ */
+template <class BtreeLayout>
+DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ const KeyDataType& key,
+ int* posOut,
+ bool* foundOut,
+ const DiskLoc& recordLoc,
+ const int direction) const {
+ int position;
+ BucketType* bucket = getBucket(txn, bucketLoc);
+ // XXX: owned to not owned conversion(?)
+ _find(txn, bucket, key, recordLoc, false, &position, foundOut);
+
+ // Look in our current bucket.
+ if (*foundOut) {
+ *posOut = position;
+ return bucketLoc;
+ }
+
+ // Not in our current bucket. 'position' tells us where there may be a child.
+ DiskLoc childLoc = childLocForPos(bucket, position);
+
+ if (!childLoc.isNull()) {
+ DiskLoc inChild = _locate(txn, childLoc, key, posOut, foundOut, recordLoc, direction);
+ if (!inChild.isNull()) {
+ return inChild;
}
+ }
- *posOut = position;
+ *posOut = position;
- if (direction < 0) {
- // The key *would* go to our left.
- (*posOut)--;
- if (-1 == *posOut) {
- // But there's no space for that in our bucket.
- return DiskLoc();
- }
- else {
- return bucketLoc;
- }
+ if (direction < 0) {
+ // The key *would* go to our left.
+ (*posOut)--;
+ if (-1 == *posOut) {
+ // But there's no space for that in our bucket.
+ return DiskLoc();
+ } else {
+ return bucketLoc;
}
- else {
- // The key would go to our right...
- if (bucket->n == *posOut) {
- return DiskLoc();
- }
- else {
- // But only if there is space.
- return bucketLoc;
- }
+ } else {
+ // The key would go to our right...
+ if (bucket->n == *posOut) {
+ return DiskLoc();
+ } else {
+ // But only if there is space.
+ return bucketLoc;
}
}
+}
- // TODO relcoate
- template <class BtreeLayout>
- bool BtreeLogic<BtreeLayout>::isHead(BucketType* bucket) {
- return bucket->parent.isNull();
- }
+// TODO relcoate
+template <class BtreeLayout>
+bool BtreeLogic<BtreeLayout>::isHead(BucketType* bucket) {
+ return bucket->parent.isNull();
+}
- template <class BtreeLayout>
- typename BtreeLogic<BtreeLayout>::BucketType*
- BtreeLogic<BtreeLayout>::getBucket(OperationContext* txn, const RecordId id) const {
- if (id.isNull()) {
- return NULL;
- }
+template <class BtreeLayout>
+typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::getBucket(
+ OperationContext* txn, const RecordId id) const {
+ if (id.isNull()) {
+ return NULL;
+ }
- RecordData recordData = _recordStore->dataFor(txn, id);
+ RecordData recordData = _recordStore->dataFor(txn, id);
- // we need to be working on the raw bytes, not a transient copy
- invariant(!recordData.isOwned());
+ // we need to be working on the raw bytes, not a transient copy
+ invariant(!recordData.isOwned());
- return reinterpret_cast<BucketType*>(const_cast<char*>(recordData.data()));
- }
+ return reinterpret_cast<BucketType*>(const_cast<char*>(recordData.data()));
+}
- template <class BtreeLayout>
- typename BtreeLogic<BtreeLayout>::BucketType*
- BtreeLogic<BtreeLayout>::getRoot(OperationContext* txn) const {
- return getBucket(txn, _headManager->getHead(txn));
- }
+template <class BtreeLayout>
+typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::getRoot(
+ OperationContext* txn) const {
+ return getBucket(txn, _headManager->getHead(txn));
+}
- template <class BtreeLayout>
- DiskLoc
- BtreeLogic<BtreeLayout>::getRootLoc(OperationContext* txn) const {
- return DiskLoc::fromRecordId(_headManager->getHead(txn));
- }
+template <class BtreeLayout>
+DiskLoc BtreeLogic<BtreeLayout>::getRootLoc(OperationContext* txn) const {
+ return DiskLoc::fromRecordId(_headManager->getHead(txn));
+}
- template <class BtreeLayout>
- typename BtreeLogic<BtreeLayout>::BucketType*
- BtreeLogic<BtreeLayout>::childForPos(OperationContext* txn, BucketType* bucket, int pos) const {
- DiskLoc loc = childLocForPos(bucket, pos);
- return getBucket(txn, loc);
- }
+template <class BtreeLayout>
+typename BtreeLogic<BtreeLayout>::BucketType* BtreeLogic<BtreeLayout>::childForPos(
+ OperationContext* txn, BucketType* bucket, int pos) const {
+ DiskLoc loc = childLocForPos(bucket, pos);
+ return getBucket(txn, loc);
+}
- template <class BtreeLayout>
- typename BtreeLogic<BtreeLayout>::LocType&
- BtreeLogic<BtreeLayout>::childLocForPos(BucketType* bucket, int pos) {
- if (bucket->n == pos) {
- return bucket->nextChild;
- }
- else {
- return getKeyHeader(bucket, pos).prevChildBucket;
- }
+template <class BtreeLayout>
+typename BtreeLogic<BtreeLayout>::LocType& BtreeLogic<BtreeLayout>::childLocForPos(
+ BucketType* bucket, int pos) {
+ if (bucket->n == pos) {
+ return bucket->nextChild;
+ } else {
+ return getKeyHeader(bucket, pos).prevChildBucket;
}
+}
- //
- // And, template stuff.
- //
+//
+// And, template stuff.
+//
- // V0 format.
- template struct FixedWidthKey<DiskLoc>;
- template class BtreeLogic<BtreeLayoutV0>;
+// V0 format.
+template struct FixedWidthKey<DiskLoc>;
+template class BtreeLogic<BtreeLayoutV0>;
- // V1 format.
- template struct FixedWidthKey<DiskLoc56Bit>;
- template class BtreeLogic<BtreeLayoutV1>;
+// V1 format.
+template struct FixedWidthKey<DiskLoc56Bit>;
+template class BtreeLogic<BtreeLayoutV1>;
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.h b/src/mongo/db/storage/mmap_v1/btree/btree_logic.h
index 48a307f3b4d..3c742170bcd 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.h
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.h
@@ -41,539 +41,522 @@
namespace mongo {
- class RecordStore;
- class SavedCursorRegistry;
+class RecordStore;
+class SavedCursorRegistry;
- // Used for unit-testing only
- template <class BtreeLayout> class BtreeLogicTestBase;
- template <class BtreeLayout> class ArtificialTreeBuilder;
-
- /**
- * This is the logic for manipulating the Btree. It is (mostly) independent of the on-disk
- * format.
- */
- template <class BtreeLayout>
- class BtreeLogic {
- public:
- // AKA _keyNode
- typedef typename BtreeLayout::FixedWidthKeyType KeyHeaderType;
-
- // AKA Key
- typedef typename BtreeLayout::KeyType KeyDataType;
+// Used for unit-testing only
+template <class BtreeLayout>
+class BtreeLogicTestBase;
+template <class BtreeLayout>
+class ArtificialTreeBuilder;
- // AKA KeyOwned
- typedef typename BtreeLayout::KeyOwnedType KeyDataOwnedType;
+/**
+ * This is the logic for manipulating the Btree. It is (mostly) independent of the on-disk
+ * format.
+ */
+template <class BtreeLayout>
+class BtreeLogic {
+public:
+ // AKA _keyNode
+ typedef typename BtreeLayout::FixedWidthKeyType KeyHeaderType;
- // AKA Loc
- typedef typename BtreeLayout::LocType LocType;
+ // AKA Key
+ typedef typename BtreeLayout::KeyType KeyDataType;
- // AKA BucketBasics or BtreeBucket, either one.
- typedef typename BtreeLayout::BucketType BucketType;
+ // AKA KeyOwned
+ typedef typename BtreeLayout::KeyOwnedType KeyDataOwnedType;
- /**
- * 'head' manages the catalog information.
- * 'store' allocates and frees buckets.
- * 'ordering' is meta-information we store in the catalog.
- * 'indexName' is a string identifying the index that we use to print errors with.
- */
- BtreeLogic(HeadManager* head,
- RecordStore* store,
- SavedCursorRegistry* cursors,
- const Ordering& ordering,
- const std::string& indexName)
- : _headManager(head),
- _recordStore(store),
- _cursorRegistry(cursors),
- _ordering(ordering),
- _indexName(indexName) {
- }
+ // AKA Loc
+ typedef typename BtreeLayout::LocType LocType;
- //
- // Public-facing
- //
+ // AKA BucketBasics or BtreeBucket, either one.
+ typedef typename BtreeLayout::BucketType BucketType;
- class Builder {
- public:
- typedef typename BtreeLayout::KeyOwnedType KeyDataOwnedType;
- typedef typename BtreeLayout::KeyType KeyDataType;
+ /**
+ * 'head' manages the catalog information.
+ * 'store' allocates and frees buckets.
+ * 'ordering' is meta-information we store in the catalog.
+ * 'indexName' is a string identifying the index that we use to print errors with.
+ */
+ BtreeLogic(HeadManager* head,
+ RecordStore* store,
+ SavedCursorRegistry* cursors,
+ const Ordering& ordering,
+ const std::string& indexName)
+ : _headManager(head),
+ _recordStore(store),
+ _cursorRegistry(cursors),
+ _ordering(ordering),
+ _indexName(indexName) {}
+
+ //
+ // Public-facing
+ //
+
+ class Builder {
+ public:
+ typedef typename BtreeLayout::KeyOwnedType KeyDataOwnedType;
+ typedef typename BtreeLayout::KeyType KeyDataType;
- Status addKey(const BSONObj& key, const DiskLoc& loc);
+ Status addKey(const BSONObj& key, const DiskLoc& loc);
- private:
- friend class BtreeLogic;
+ private:
+ friend class BtreeLogic;
- class SetRightLeafLocChange;
+ class SetRightLeafLocChange;
- Builder(BtreeLogic* logic, OperationContext* txn, bool dupsAllowed);
+ Builder(BtreeLogic* logic, OperationContext* txn, bool dupsAllowed);
- /**
- * Creates and returns a new empty bucket to the right of leftSib, maintaining the
- * internal consistency of the tree. leftSib must be the right-most child of its parent
- * or it must be the root.
- */
- DiskLoc newBucket(BucketType* leftSib, DiskLoc leftSibLoc);
+ /**
+ * Creates and returns a new empty bucket to the right of leftSib, maintaining the
+ * internal consistency of the tree. leftSib must be the right-most child of its parent
+ * or it must be the root.
+ */
+ DiskLoc newBucket(BucketType* leftSib, DiskLoc leftSibLoc);
- BucketType* _getModifiableBucket(DiskLoc loc);
- BucketType* _getBucket(DiskLoc loc);
+ BucketType* _getModifiableBucket(DiskLoc loc);
+ BucketType* _getBucket(DiskLoc loc);
- // Not owned.
- BtreeLogic* _logic;
+ // Not owned.
+ BtreeLogic* _logic;
- DiskLoc _rightLeafLoc; // DiskLoc of right-most (highest) leaf bucket.
- bool _dupsAllowed;
- std::unique_ptr<KeyDataOwnedType> _keyLast;
+ DiskLoc _rightLeafLoc; // DiskLoc of right-most (highest) leaf bucket.
+ bool _dupsAllowed;
+ std::unique_ptr<KeyDataOwnedType> _keyLast;
- // Not owned.
- OperationContext* _txn;
- };
+ // Not owned.
+ OperationContext* _txn;
+ };
- /**
- * Caller owns the returned pointer.
- * 'this' must outlive the returned pointer.
- */
- Builder* newBuilder(OperationContext* txn, bool dupsAllowed);
+ /**
+ * Caller owns the returned pointer.
+ * 'this' must outlive the returned pointer.
+ */
+ Builder* newBuilder(OperationContext* txn, bool dupsAllowed);
- Status dupKeyCheck(OperationContext* txn,
- const BSONObj& key,
- const DiskLoc& loc) const;
+ Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const DiskLoc& loc) const;
- Status insert(OperationContext* txn,
- const BSONObj& rawKey,
- const DiskLoc& value,
- bool dupsAllowed);
+ Status insert(OperationContext* txn,
+ const BSONObj& rawKey,
+ const DiskLoc& value,
+ bool dupsAllowed);
- /**
- * Navigates down the tree and locates the bucket and position containing a record with
- * the specified <key, recordLoc> combination.
- *
- * @return true if the exact <key, recordLoc> was found. Otherwise, false and the
- * bucketLocOut would contain the bucket containing key which is before or after the
- * searched one (dependent on the direction).
- */
- bool locate(OperationContext* txn,
- const BSONObj& key,
- const DiskLoc& recordLoc,
- const int direction,
- int* posOut,
- DiskLoc* bucketLocOut) const;
+ /**
+ * Navigates down the tree and locates the bucket and position containing a record with
+ * the specified <key, recordLoc> combination.
+ *
+ * @return true if the exact <key, recordLoc> was found. Otherwise, false and the
+ * bucketLocOut would contain the bucket containing key which is before or after the
+ * searched one (dependent on the direction).
+ */
+ bool locate(OperationContext* txn,
+ const BSONObj& key,
+ const DiskLoc& recordLoc,
+ const int direction,
+ int* posOut,
+ DiskLoc* bucketLocOut) const;
- void advance(OperationContext* txn,
- DiskLoc* bucketLocInOut,
- int* posInOut,
- int direction) const;
+ void advance(OperationContext* txn,
+ DiskLoc* bucketLocInOut,
+ int* posInOut,
+ int direction) const;
- bool exists(OperationContext* txn, const KeyDataType& key) const;
+ bool exists(OperationContext* txn, const KeyDataType& key) const;
- bool unindex(OperationContext* txn,
- const BSONObj& key,
- const DiskLoc& recordLoc);
+ bool unindex(OperationContext* txn, const BSONObj& key, const DiskLoc& recordLoc);
- bool isEmpty(OperationContext* txn) const;
+ bool isEmpty(OperationContext* txn) const;
- long long fullValidate(OperationContext*,
- long long *unusedCount,
- bool strict,
- bool dumpBuckets,
- unsigned depth) const;
+ long long fullValidate(OperationContext*,
+ long long* unusedCount,
+ bool strict,
+ bool dumpBuckets,
+ unsigned depth) const;
- DiskLoc getDiskLoc(OperationContext* txn,
- const DiskLoc& bucketLoc,
- const int keyOffset) const;
+ DiskLoc getDiskLoc(OperationContext* txn, const DiskLoc& bucketLoc, const int keyOffset) const;
- BSONObj getKey(OperationContext* txn,
- const DiskLoc& bucketLoc,
- const int keyOffset) const;
+ BSONObj getKey(OperationContext* txn, const DiskLoc& bucketLoc, const int keyOffset) const;
- DiskLoc getHead(OperationContext* txn) const {
- return DiskLoc::fromRecordId(_headManager->getHead(txn));
- }
+ DiskLoc getHead(OperationContext* txn) const {
+ return DiskLoc::fromRecordId(_headManager->getHead(txn));
+ }
- Status touch(OperationContext* txn) const;
+ Status touch(OperationContext* txn) const;
- //
- // Composite key navigation methods
- //
+ //
+ // Composite key navigation methods
+ //
- void customLocate(OperationContext* txn,
- DiskLoc* locInOut,
- int* keyOfsInOut,
- const IndexSeekPoint& seekPoint,
- int direction) const;
+ void customLocate(OperationContext* txn,
+ DiskLoc* locInOut,
+ int* keyOfsInOut,
+ const IndexSeekPoint& seekPoint,
+ int direction) const;
- void advanceTo(OperationContext*,
- DiskLoc* thisLocInOut,
- int* keyOfsInOut,
- const IndexSeekPoint& seekPoint,
- int direction) const;
+ void advanceTo(OperationContext*,
+ DiskLoc* thisLocInOut,
+ int* keyOfsInOut,
+ const IndexSeekPoint& seekPoint,
+ int direction) const;
- void restorePosition(OperationContext* txn,
- const BSONObj& savedKey,
- const DiskLoc& savedLoc,
- int direction,
- DiskLoc* bucketInOut,
- int* keyOffsetInOut) const;
+ void restorePosition(OperationContext* txn,
+ const BSONObj& savedKey,
+ const DiskLoc& savedLoc,
+ int direction,
+ DiskLoc* bucketInOut,
+ int* keyOffsetInOut) const;
- //
- // Creation and deletion
- //
+ //
+ // Creation and deletion
+ //
- /**
- * Returns OK if the index was uninitialized before, error status otherwise.
- */
- Status initAsEmpty(OperationContext* txn);
+ /**
+ * Returns OK if the index was uninitialized before, error status otherwise.
+ */
+ Status initAsEmpty(OperationContext* txn);
- //
- // Size constants
- //
+ //
+ // Size constants
+ //
- const RecordStore* getRecordStore() const { return _recordStore; }
+ const RecordStore* getRecordStore() const {
+ return _recordStore;
+ }
- SavedCursorRegistry* savedCursors() const { return _cursorRegistry; }
+ SavedCursorRegistry* savedCursors() const {
+ return _cursorRegistry;
+ }
- static int lowWaterMark();
-
- Ordering ordering() const { return _ordering; }
+ static int lowWaterMark();
- int customBSONCmp(const BSONObj& inIndex_left,
- const IndexSeekPoint& seekPoint_right,
- int direction) const;
+ Ordering ordering() const {
+ return _ordering;
+ }
- private:
- friend class BtreeLogic::Builder;
+ int customBSONCmp(const BSONObj& inIndex_left,
+ const IndexSeekPoint& seekPoint_right,
+ int direction) const;
- // Used for unit-testing only
- friend class BtreeLogicTestBase<BtreeLayout>;
- friend class ArtificialTreeBuilder<BtreeLayout>;
+private:
+ friend class BtreeLogic::Builder;
- /**
- * This is an in memory wrapper for the variable length data associated with a
- * KeyHeaderType. It points to on-disk data but is not itself on-disk data.
- *
- * This object and its BSONObj 'key' will become invalid if the KeyHeaderType data that owns
- * this it is moved within the btree. In general, a KeyWrapper should not be expected to be
- * valid after a write.
- */
- struct FullKey {
- FullKey(const BucketType* bucket, int i)
- : header(getKeyHeader(bucket, i)),
- prevChildBucket(header.prevChildBucket),
- recordLoc(header.recordLoc),
- data(bucket->data + header.keyDataOfs()) { }
+ // Used for unit-testing only
+ friend class BtreeLogicTestBase<BtreeLayout>;
+ friend class ArtificialTreeBuilder<BtreeLayout>;
- // This is actually a reference to something on-disk.
- const KeyHeaderType& header;
+ /**
+ * This is an in memory wrapper for the variable length data associated with a
+ * KeyHeaderType. It points to on-disk data but is not itself on-disk data.
+ *
+ * This object and its BSONObj 'key' will become invalid if the KeyHeaderType data that owns
+ * this it is moved within the btree. In general, a KeyWrapper should not be expected to be
+ * valid after a write.
+ */
+ struct FullKey {
+ FullKey(const BucketType* bucket, int i)
+ : header(getKeyHeader(bucket, i)),
+ prevChildBucket(header.prevChildBucket),
+ recordLoc(header.recordLoc),
+ data(bucket->data + header.keyDataOfs()) {}
+
+ // This is actually a reference to something on-disk.
+ const KeyHeaderType& header;
+
+ // These are actually in 'header'.
+ const LocType& prevChildBucket;
+ const LocType& recordLoc;
+
+ // This is *not* memory-mapped but its members point to something on-disk.
+ KeyDataType data;
+ };
- // These are actually in 'header'.
- const LocType& prevChildBucket;
- const LocType& recordLoc;
+ //
+ // Functions that depend on the templated type info but nothing in 'this'.
+ //
- // This is *not* memory-mapped but its members point to something on-disk.
- KeyDataType data;
- };
+ static LocType& childLocForPos(BucketType* bucket, int pos);
- //
- // Functions that depend on the templated type info but nothing in 'this'.
- //
+ static FullKey getFullKey(const BucketType* bucket, int i);
- static LocType& childLocForPos(BucketType* bucket, int pos);
+ static KeyHeaderType& getKeyHeader(BucketType* bucket, int i);
- static FullKey getFullKey(const BucketType* bucket, int i);
+ static const KeyHeaderType& getKeyHeader(const BucketType* bucket, int i);
- static KeyHeaderType& getKeyHeader(BucketType* bucket, int i);
+ static char* dataAt(BucketType* bucket, short ofs);
- static const KeyHeaderType& getKeyHeader(const BucketType* bucket, int i);
+ static void markUnused(BucketType* bucket, int keypos);
- static char* dataAt(BucketType* bucket, short ofs);
+ static int totalDataSize(BucketType* bucket);
- static void markUnused(BucketType* bucket, int keypos);
+ static void init(BucketType* bucket);
- static int totalDataSize(BucketType* bucket);
+ static int _alloc(BucketType* bucket, int bytes);
- static void init(BucketType* bucket);
+ static void _unalloc(BucketType* bucket, int bytes);
- static int _alloc(BucketType* bucket, int bytes);
+ static void _delKeyAtPos(BucketType* bucket, int keypos, bool mayEmpty = false);
- static void _unalloc(BucketType* bucket, int bytes);
+ static void popBack(BucketType* bucket, DiskLoc* recordLocOut, KeyDataType* keyDataOut);
- static void _delKeyAtPos(BucketType* bucket, int keypos, bool mayEmpty = false);
+ static bool mayDropKey(BucketType* bucket, int index, int refPos);
- static void popBack(BucketType* bucket, DiskLoc* recordLocOut, KeyDataType *keyDataOut);
+ static int _packedDataSize(BucketType* bucket, int refPos);
- static bool mayDropKey(BucketType* bucket, int index, int refPos);
+ static void setPacked(BucketType* bucket);
- static int _packedDataSize(BucketType* bucket, int refPos);
+ static void setNotPacked(BucketType* bucket);
- static void setPacked(BucketType* bucket);
+ static BucketType* btreemod(OperationContext* txn, BucketType* bucket);
- static void setNotPacked(BucketType* bucket);
+ static int splitPos(BucketType* bucket, int keypos);
- static BucketType* btreemod(OperationContext* txn, BucketType* bucket);
+ static void reserveKeysFront(BucketType* bucket, int nAdd);
- static int splitPos(BucketType* bucket, int keypos);
+ static void setKey(BucketType* bucket,
+ int i,
+ const DiskLoc recordLoc,
+ const KeyDataType& key,
+ const DiskLoc prevChildBucket);
- static void reserveKeysFront(BucketType* bucket, int nAdd);
+ static bool isHead(BucketType* bucket);
- static void setKey(BucketType* bucket,
- int i,
- const DiskLoc recordLoc,
- const KeyDataType &key,
- const DiskLoc prevChildBucket);
+ static void dumpBucket(const BucketType* bucket, int indentLength = 0);
- static bool isHead(BucketType* bucket);
+ static void assertValid(const std::string& ns,
+ BucketType* bucket,
+ const Ordering& ordering,
+ bool force = false);
- static void dumpBucket(const BucketType* bucket, int indentLength = 0);
+ //
+ // 'this'-specific helpers (require record store, catalog information, or ordering, or type
+ // information).
+ //
- static void assertValid(const std::string& ns,
- BucketType* bucket,
- const Ordering& ordering,
- bool force = false);
+ bool basicInsert(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int& keypos,
+ const KeyDataType& key,
+ const DiskLoc recordLoc);
+
+ void dropFront(BucketType* bucket, int nDrop, int& refpos);
+
+ void _pack(OperationContext* txn, BucketType* bucket, const DiskLoc thisLoc, int& refPos);
+
+ void customLocate(OperationContext* txn,
+ DiskLoc* locInOut,
+ int* keyOfsInOut,
+ const IndexSeekPoint& seekPoint,
+ int direction,
+ std::pair<DiskLoc, int>& bestParent) const;
+
+ Status _find(OperationContext* txn,
+ BucketType* bucket,
+ const KeyDataType& key,
+ const DiskLoc& recordLoc,
+ bool errorIfDup,
+ int* keyPositionOut,
+ bool* foundOut) const;
+
+ bool customFind(OperationContext* txn,
+ int low,
+ int high,
+ const IndexSeekPoint& seekPoint,
+ int direction,
+ DiskLoc* thisLocInOut,
+ int* keyOfsInOut,
+ std::pair<DiskLoc, int>& bestParent) const;
+
+ void advanceToImpl(OperationContext* txn,
+ DiskLoc* thisLocInOut,
+ int* keyOfsInOut,
+ const IndexSeekPoint& seekPoint,
+ int direction) const;
- //
- // 'this'-specific helpers (require record store, catalog information, or ordering, or type
- // information).
- //
+ bool wouldCreateDup(OperationContext* txn, const KeyDataType& key, const DiskLoc self) const;
- bool basicInsert(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int& keypos,
- const KeyDataType& key,
- const DiskLoc recordLoc);
+ bool keyIsUsed(OperationContext* txn, const DiskLoc& loc, const int& pos) const;
- void dropFront(BucketType* bucket, int nDrop, int& refpos);
+ void skipUnusedKeys(OperationContext* txn, DiskLoc* loc, int* pos, int direction) const;
- void _pack(OperationContext* txn, BucketType* bucket, const DiskLoc thisLoc, int &refPos);
+ DiskLoc advance(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ int* posInOut,
+ int direction) const;
- void customLocate(OperationContext* txn,
- DiskLoc* locInOut,
- int* keyOfsInOut,
- const IndexSeekPoint& seekPoint,
- int direction,
- std::pair<DiskLoc, int>& bestParent) const;
+ DiskLoc _locate(OperationContext* txn,
+ const DiskLoc& bucketLoc,
+ const KeyDataType& key,
+ int* posOut,
+ bool* foundOut,
+ const DiskLoc& recordLoc,
+ const int direction) const;
- Status _find(OperationContext* txn,
- BucketType* bucket,
- const KeyDataType& key,
- const DiskLoc& recordLoc,
- bool errorIfDup,
- int* keyPositionOut,
- bool* foundOut) const;
-
- bool customFind(OperationContext* txn,
- int low,
- int high,
- const IndexSeekPoint& seekPoint,
- int direction,
- DiskLoc* thisLocInOut,
- int* keyOfsInOut,
- std::pair<DiskLoc, int>& bestParent) const;
-
- void advanceToImpl(OperationContext* txn,
- DiskLoc* thisLocInOut,
- int* keyOfsInOut,
- const IndexSeekPoint& seekPoint,
- int direction) const;
-
- bool wouldCreateDup(OperationContext* txn,
- const KeyDataType& key,
- const DiskLoc self) const;
-
- bool keyIsUsed(OperationContext* txn, const DiskLoc& loc, const int& pos) const;
-
- void skipUnusedKeys(OperationContext* txn,
- DiskLoc* loc,
- int* pos,
- int direction) const;
-
- DiskLoc advance(OperationContext* txn,
- const DiskLoc& bucketLoc,
- int* posInOut,
- int direction) const;
-
- DiskLoc _locate(OperationContext* txn,
- const DiskLoc& bucketLoc,
- const KeyDataType& key,
- int* posOut,
- bool* foundOut,
- const DiskLoc& recordLoc,
- const int direction) const;
+ long long _fullValidate(OperationContext* txn,
+ const DiskLoc bucketLoc,
+ long long* unusedCount,
+ bool strict,
+ bool dumpBuckets,
+ unsigned depth) const;
- long long _fullValidate(OperationContext* txn,
- const DiskLoc bucketLoc,
- long long *unusedCount,
- bool strict,
- bool dumpBuckets,
- unsigned depth) const ;
+ DiskLoc _addBucket(OperationContext* txn);
- DiskLoc _addBucket(OperationContext* txn);
+ bool canMergeChildren(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ const int leftIndex);
- bool canMergeChildren(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- const int leftIndex);
+ // has to look in children of 'bucket' and requires record store
+ int _rebalancedSeparatorPos(OperationContext* txn, BucketType* bucket, int leftIndex);
- // has to look in children of 'bucket' and requires record store
- int _rebalancedSeparatorPos(OperationContext* txn,
- BucketType* bucket,
- int leftIndex);
+ void _packReadyForMod(BucketType* bucket, int& refPos);
- void _packReadyForMod(BucketType* bucket, int &refPos);
+ void truncateTo(BucketType* bucket, int N, int& refPos);
- void truncateTo(BucketType* bucket, int N, int &refPos);
+ void split(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int keypos,
+ const DiskLoc recordLoc,
+ const KeyDataType& key,
+ const DiskLoc lchild,
+ const DiskLoc rchild);
- void split(OperationContext* txn,
+ Status _insert(OperationContext* txn,
BucketType* bucket,
const DiskLoc bucketLoc,
- int keypos,
- const DiskLoc recordLoc,
const KeyDataType& key,
- const DiskLoc lchild,
- const DiskLoc rchild);
-
- Status _insert(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- const KeyDataType& key,
- const DiskLoc recordLoc,
- bool dupsAllowed,
- const DiskLoc leftChild,
- const DiskLoc rightChild);
-
- // TODO take a BucketType*?
- void insertHere(OperationContext* txn,
+ const DiskLoc recordLoc,
+ bool dupsAllowed,
+ const DiskLoc leftChild,
+ const DiskLoc rightChild);
+
+ // TODO take a BucketType*?
+ void insertHere(OperationContext* txn,
+ const DiskLoc bucketLoc,
+ int pos,
+ const KeyDataType& key,
+ const DiskLoc recordLoc,
+ const DiskLoc leftChild,
+ const DiskLoc rightChild);
+
+ std::string dupKeyError(const KeyDataType& key) const;
+
+ void setInternalKey(OperationContext* txn,
+ BucketType* bucket,
const DiskLoc bucketLoc,
- int pos,
- const KeyDataType& key,
+ int keypos,
const DiskLoc recordLoc,
- const DiskLoc leftChild,
- const DiskLoc rightChild);
+ const KeyDataType& key,
+ const DiskLoc lchild,
+ const DiskLoc rchild);
- std::string dupKeyError(const KeyDataType& key) const;
+ void fixParentPtrs(OperationContext* trans,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int firstIndex = 0,
+ int lastIndex = -1);
- void setInternalKey(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int keypos,
- const DiskLoc recordLoc,
- const KeyDataType& key,
- const DiskLoc lchild,
- const DiskLoc rchild);
+ bool mayBalanceWithNeighbors(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc);
- void fixParentPtrs(OperationContext* trans,
+ void doBalanceChildren(OperationContext* txn,
BucketType* bucket,
const DiskLoc bucketLoc,
- int firstIndex = 0,
- int lastIndex = -1);
-
- bool mayBalanceWithNeighbors(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc);
-
- void doBalanceChildren(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int leftIndex);
-
- void doBalanceLeftToRight(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc thisLoc,
- int leftIndex,
- int split,
- BucketType* l,
- const DiskLoc lchild,
- BucketType* r,
- const DiskLoc rchild);
-
- void doBalanceRightToLeft(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc thisLoc,
- int leftIndex,
- int split,
- BucketType* l,
- const DiskLoc lchild,
- BucketType* r,
- const DiskLoc rchild);
-
- bool tryBalanceChildren(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int leftIndex);
-
- int indexInParent(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc) const;
-
- void doMergeChildren(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int leftIndex);
+ int leftIndex);
- void replaceWithNextChild(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc);
+ void doBalanceLeftToRight(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc thisLoc,
+ int leftIndex,
+ int split,
+ BucketType* l,
+ const DiskLoc lchild,
+ BucketType* r,
+ const DiskLoc rchild);
+
+ void doBalanceRightToLeft(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc thisLoc,
+ int leftIndex,
+ int split,
+ BucketType* l,
+ const DiskLoc lchild,
+ BucketType* r,
+ const DiskLoc rchild);
+
+ bool tryBalanceChildren(OperationContext* txn,
+ BucketType* bucket,
+ const DiskLoc bucketLoc,
+ int leftIndex);
- void deleteInternalKey(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc,
- int keypos);
+ int indexInParent(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc) const;
- void delKeyAtPos(OperationContext* txn,
+ void doMergeChildren(OperationContext* txn,
BucketType* bucket,
const DiskLoc bucketLoc,
- int p);
+ int leftIndex);
- void delBucket(OperationContext* txn,
- BucketType* bucket,
- const DiskLoc bucketLoc);
+ void replaceWithNextChild(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc);
- void deallocBucket(OperationContext* txn,
+ void deleteInternalKey(OperationContext* txn,
BucketType* bucket,
- const DiskLoc bucketLoc);
+ const DiskLoc bucketLoc,
+ int keypos);
- bool _keyIsAt(const BSONObj& savedKey,
- const DiskLoc& savedLoc,
- BucketType* bucket,
- int keyPos) const;
+ void delKeyAtPos(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc, int p);
- /**
- * Tries to push key into bucket. Return false if it can't because key doesn't fit.
- *
- * bucket must be declared as writable by the caller.
- * The new key/recordLoc pair must be higher than any others in bucket.
- *
- * TODO needs 'this' for _ordering for sanity check
- */
- bool pushBack(BucketType* bucket,
- const DiskLoc recordLoc,
- const KeyDataType& key,
- const DiskLoc prevChild);
+ void delBucket(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc);
+ void deallocBucket(OperationContext* txn, BucketType* bucket, const DiskLoc bucketLoc);
- BucketType* childForPos(OperationContext* txn, BucketType* bucket, int pos) const;
+ bool _keyIsAt(const BSONObj& savedKey,
+ const DiskLoc& savedLoc,
+ BucketType* bucket,
+ int keyPos) const;
- BucketType* getBucket(OperationContext* txn, const DiskLoc dl) const {
- return getBucket(txn, dl.toRecordId());
- }
- BucketType* getBucket(OperationContext* txn, const RecordId dl) const;
+ /**
+ * Tries to push key into bucket. Return false if it can't because key doesn't fit.
+ *
+ * bucket must be declared as writable by the caller.
+ * The new key/recordLoc pair must be higher than any others in bucket.
+ *
+ * TODO needs 'this' for _ordering for sanity check
+ */
+ bool pushBack(BucketType* bucket,
+ const DiskLoc recordLoc,
+ const KeyDataType& key,
+ const DiskLoc prevChild);
- BucketType* getRoot(OperationContext* txn) const;
- DiskLoc getRootLoc(OperationContext* txn) const;
+ BucketType* childForPos(OperationContext* txn, BucketType* bucket, int pos) const;
- //
- // Data
- //
+ BucketType* getBucket(OperationContext* txn, const DiskLoc dl) const {
+ return getBucket(txn, dl.toRecordId());
+ }
+ BucketType* getBucket(OperationContext* txn, const RecordId dl) const;
- // Not owned here.
- HeadManager* _headManager;
+ BucketType* getRoot(OperationContext* txn) const;
- // Not owned here.
- RecordStore* _recordStore;
+ DiskLoc getRootLoc(OperationContext* txn) const;
- // Not owned Here.
- SavedCursorRegistry* _cursorRegistry;
+ //
+ // Data
+ //
- Ordering _ordering;
+ // Not owned here.
+ HeadManager* _headManager;
- std::string _indexName;
- };
+ // Not owned here.
+ RecordStore* _recordStore;
+
+ // Not owned Here.
+ SavedCursorRegistry* _cursorRegistry;
+
+ Ordering _ordering;
+
+ std::string _indexName;
+};
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
index 1c0bd1c1505..b4e42196c99 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
@@ -43,2070 +43,2244 @@
namespace mongo {
- using std::string;
+using std::string;
+
+/**
+ * This class is made friend of BtreeLogic so we can add whatever private method accesses we
+ * need to it, to be used by the tests.
+ */
+template <class BtreeLayoutType>
+class BtreeLogicTestBase {
+public:
+ typedef typename BtreeLayoutType::BucketType BucketType;
+ typedef typename BtreeLayoutType::FixedWidthKeyType FixedWidthKeyType;
+
+ typedef typename BtreeLogic<BtreeLayoutType>::FullKey FullKey;
+ typedef typename BtreeLogic<BtreeLayoutType>::KeyDataOwnedType KeyDataOwnedType;
+
+ BtreeLogicTestBase() : _helper(BSON("TheKey" << 1)) {}
+
+ virtual ~BtreeLogicTestBase() {}
+
+protected:
+ void checkValidNumKeys(int nKeys) {
+ OperationContextNoop txn;
+ ASSERT_EQUALS(nKeys, _helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ }
+
+ Status insert(const BSONObj& key, const DiskLoc dl, bool dupsAllowed = true) {
+ OperationContextNoop txn;
+ return _helper.btree.insert(&txn, key, dl, dupsAllowed);
+ }
+
+ bool unindex(const BSONObj& key) {
+ OperationContextNoop txn;
+ return _helper.btree.unindex(&txn, key, _helper.dummyDiskLoc);
+ }
+
+ void locate(const BSONObj& key,
+ int expectedPos,
+ bool expectedFound,
+ const RecordId& expectedLocation,
+ int direction) {
+ return locate(
+ key, expectedPos, expectedFound, DiskLoc::fromRecordId(expectedLocation), direction);
+ }
+ void locate(const BSONObj& key,
+ int expectedPos,
+ bool expectedFound,
+ const DiskLoc& expectedLocation,
+ int direction) {
+ int pos;
+ DiskLoc loc;
+ OperationContextNoop txn;
+ ASSERT_EQUALS(expectedFound,
+ _helper.btree.locate(&txn, key, _helper.dummyDiskLoc, direction, &pos, &loc));
+ ASSERT_EQUALS(expectedLocation, loc);
+ ASSERT_EQUALS(expectedPos, pos);
+ }
+
+ const BucketType* child(const BucketType* bucket, int i) const {
+ verify(i <= bucket->n);
+
+ DiskLoc diskLoc;
+ if (i == bucket->n) {
+ diskLoc = bucket->nextChild;
+ } else {
+ FullKey fullKey = BtreeLogic<BtreeLayoutType>::getFullKey(bucket, i);
+ diskLoc = fullKey.prevChildBucket;
+ }
+
+ verify(!diskLoc.isNull());
+
+ return _helper.btree.getBucket(NULL, diskLoc);
+ }
+
+ BucketType* head() const {
+ OperationContextNoop txn;
+ return _helper.btree.getBucket(&txn, _helper.headManager.getHead(&txn));
+ }
+
+ void forcePackBucket(const RecordId bucketLoc) {
+ BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
+
+ bucket->topSize += bucket->emptySize;
+ bucket->emptySize = 0;
+ BtreeLogic<BtreeLayoutType>::setNotPacked(bucket);
+ }
+
+ void truncateBucket(BucketType* bucket, int N, int& refPos) {
+ _helper.btree.truncateTo(bucket, N, refPos);
+ }
+
+ int bucketPackedDataSize(BucketType* bucket, int refPos) {
+ return _helper.btree._packedDataSize(bucket, refPos);
+ }
+
+ int bucketRebalancedSeparatorPos(const RecordId bucketLoc, int leftIndex) {
+ BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
+ OperationContextNoop txn;
+ return _helper.btree._rebalancedSeparatorPos(&txn, bucket, leftIndex);
+ }
+
+ FullKey getKey(const RecordId bucketLoc, int pos) const {
+ const BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
+ return BtreeLogic<BtreeLayoutType>::getFullKey(bucket, pos);
+ }
+
+ void markKeyUnused(const DiskLoc bucketLoc, int keyPos) {
+ BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
+ invariant(keyPos >= 0 && keyPos < bucket->n);
+
+ _helper.btree.getKeyHeader(bucket, keyPos).setUnused();
+ }
+
+ DiskLoc newBucket() {
+ OperationContextNoop txn;
+ return _helper.btree._addBucket(&txn);
+ }
/**
- * This class is made friend of BtreeLogic so we can add whatever private method accesses we
- * need to it, to be used by the tests.
+ * Sets the nextChild pointer for the bucket at the specified location.
*/
- template<class BtreeLayoutType>
- class BtreeLogicTestBase {
- public:
- typedef typename BtreeLayoutType::BucketType BucketType;
- typedef typename BtreeLayoutType::FixedWidthKeyType FixedWidthKeyType;
+ void setBucketNextChild(const DiskLoc bucketLoc, const DiskLoc nextChild) {
+ OperationContextNoop txn;
- typedef typename BtreeLogic<BtreeLayoutType>::FullKey FullKey;
- typedef typename BtreeLogic<BtreeLayoutType>::KeyDataOwnedType KeyDataOwnedType;
+ BucketType* bucket = _helper.btree.getBucket(&txn, bucketLoc);
+ bucket->nextChild = nextChild;
- BtreeLogicTestBase() : _helper(BSON("TheKey" << 1)) {
+ _helper.btree.fixParentPtrs(&txn, bucket, bucketLoc);
+ }
- }
+protected:
+ BtreeLogicTestHelper<BtreeLayoutType> _helper;
+};
- virtual ~BtreeLogicTestBase() {
+//
+// TESTS
+//
- }
+template <class OnDiskFormat>
+class SimpleCreate : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ this->checkValidNumKeys(0);
+ }
+};
+
+template <class OnDiskFormat>
+class SimpleInsertDelete : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ BSONObj key = simpleKey('z');
+ this->insert(key, this->_helper.dummyDiskLoc);
+
+ this->checkValidNumKeys(1);
+ this->locate(key, 0, true, this->_helper.headManager.getHead(&txn), 1);
+
+ this->unindex(key);
+
+ this->checkValidNumKeys(0);
+ this->locate(key, 0, false, DiskLoc(), 1);
+ }
+};
+
+template <class OnDiskFormat>
+class SplitUnevenBucketBase : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ for (int i = 0; i < 10; ++i) {
+ BSONObj shortKey = simpleKey(shortToken(i), 1);
+ this->insert(shortKey, this->_helper.dummyDiskLoc);
+
+ BSONObj longKey = simpleKey(longToken(i), 800);
+ this->insert(longKey, this->_helper.dummyDiskLoc);
+ }
+
+ this->checkValidNumKeys(20);
+ ASSERT_EQUALS(1, this->head()->n);
+ checkSplit();
+ }
+
+protected:
+ virtual char shortToken(int i) const = 0;
+ virtual char longToken(int i) const = 0;
+ virtual void checkSplit() = 0;
+
+ static char leftToken(int i) {
+ return 'a' + i;
+ }
+
+ static char rightToken(int i) {
+ return 'z' - i;
+ }
+};
+
+template <class OnDiskFormat>
+class SplitRightHeavyBucket : public SplitUnevenBucketBase<OnDiskFormat> {
+private:
+ virtual char shortToken(int i) const {
+ return this->leftToken(i);
+ }
+ virtual char longToken(int i) const {
+ return this->rightToken(i);
+ }
+ virtual void checkSplit() {
+ ASSERT_EQUALS(15, this->child(this->head(), 0)->n);
+ ASSERT_EQUALS(4, this->child(this->head(), 1)->n);
+ }
+};
+
+template <class OnDiskFormat>
+class SplitLeftHeavyBucket : public SplitUnevenBucketBase<OnDiskFormat> {
+private:
+ virtual char shortToken(int i) const {
+ return this->rightToken(i);
+ }
+ virtual char longToken(int i) const {
+ return this->leftToken(i);
+ }
+ virtual void checkSplit() {
+ ASSERT_EQUALS(4, this->child(this->head(), 0)->n);
+ ASSERT_EQUALS(15, this->child(this->head(), 1)->n);
+ }
+};
+
+template <class OnDiskFormat>
+class MissingLocate : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ for (int i = 0; i < 3; ++i) {
+ BSONObj k = simpleKey('b' + 2 * i);
+ this->insert(k, this->_helper.dummyDiskLoc);
+ }
+
+ locateExtended(1, 'a', 'b', this->_helper.headManager.getHead(&txn));
+ locateExtended(1, 'c', 'd', this->_helper.headManager.getHead(&txn));
+ locateExtended(1, 'e', 'f', this->_helper.headManager.getHead(&txn));
+ locateExtended(1, 'g', 'g' + 1, RecordId()); // of course, 'h' isn't in the index.
+
+ // old behavior
+ // locateExtended( -1, 'a', 'b', dl() );
+ // locateExtended( -1, 'c', 'd', dl() );
+ // locateExtended( -1, 'e', 'f', dl() );
+ // locateExtended( -1, 'g', 'f', dl() );
+
+ locateExtended(-1, 'a', 'a' - 1, RecordId()); // of course, 'a' - 1 isn't in the index
+ locateExtended(-1, 'c', 'b', this->_helper.headManager.getHead(&txn));
+ locateExtended(-1, 'e', 'd', this->_helper.headManager.getHead(&txn));
+ locateExtended(-1, 'g', 'f', this->_helper.headManager.getHead(&txn));
+ }
+
+private:
+ void locateExtended(int direction, char token, char expectedMatch, RecordId expectedLocation) {
+ const BSONObj k = simpleKey(token);
+ int expectedPos = (expectedMatch - 'b') / 2;
+
+ this->locate(k, expectedPos, false, expectedLocation, direction);
+ }
+};
+
+template <class OnDiskFormat>
+class MissingLocateMultiBucket : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('C', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('D', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('E', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('F', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('G', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('H', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('J', 800), this->_helper.dummyDiskLoc);
+
+ // This causes split
+ this->insert(simpleKey('I', 800), this->_helper.dummyDiskLoc);
+
+ int pos;
+ DiskLoc loc;
+
+ // 'E' is the split point and should be in the head the rest should be ~50/50
+ const BSONObj splitPoint = simpleKey('E', 800);
+ this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
+ ASSERT_EQUALS(this->_helper.headManager.getHead(&txn), loc.toRecordId());
+ ASSERT_EQUALS(0, pos);
+
+ // Find the one before 'E'
+ int largePos;
+ DiskLoc largeLoc;
+ this->_helper.btree.locate(
+ &txn, splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
+ this->_helper.btree.advance(&txn, &largeLoc, &largePos, -1);
+
+ // Find the one after 'E'
+ int smallPos;
+ DiskLoc smallLoc;
+ this->_helper.btree.locate(
+ &txn, splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
+ this->_helper.btree.advance(&txn, &smallLoc, &smallPos, 1);
+
+ ASSERT_NOT_EQUALS(smallLoc, largeLoc);
+ ASSERT_NOT_EQUALS(smallLoc, loc);
+ ASSERT_NOT_EQUALS(largeLoc, loc);
+ }
+};
- protected:
- void checkValidNumKeys(int nKeys) {
- OperationContextNoop txn;
- ASSERT_EQUALS(nKeys, _helper.btree.fullValidate(&txn, NULL, true, false, 0));
- }
+/**
+ * Validates that adding keys incrementally produces buckets, which are 90%/10% full.
+ */
+template <class OnDiskFormat>
+class SERVER983 : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('C', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('D', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('E', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('F', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('G', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('H', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('I', 800), this->_helper.dummyDiskLoc);
+
+ // This will cause split
+ this->insert(simpleKey('J', 800), this->_helper.dummyDiskLoc);
+
+ int pos;
+ DiskLoc loc;
+
+ // 'H' is the maximum 'large' interval key, 90% should be < 'H' and 10% larger
+ const BSONObj splitPoint = simpleKey('H', 800);
+ this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
+ ASSERT_EQUALS(this->_helper.headManager.getHead(&txn), loc.toRecordId());
+ ASSERT_EQUALS(0, pos);
+
+ // Find the one before 'H'
+ int largePos;
+ DiskLoc largeLoc;
+ this->_helper.btree.locate(
+ &txn, splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
+ this->_helper.btree.advance(&txn, &largeLoc, &largePos, -1);
+
+ // Find the one after 'H'
+ int smallPos;
+ DiskLoc smallLoc;
+ this->_helper.btree.locate(
+ &txn, splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
+ this->_helper.btree.advance(&txn, &smallLoc, &smallPos, 1);
+
+ ASSERT_NOT_EQUALS(smallLoc, largeLoc);
+ ASSERT_NOT_EQUALS(smallLoc, loc);
+ ASSERT_NOT_EQUALS(largeLoc, loc);
+ }
+};
+
+template <class OnDiskFormat>
+class DontReuseUnused : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ for (int i = 0; i < 10; ++i) {
+ const BSONObj k = simpleKey('b' + 2 * i, 800);
+ this->insert(k, this->_helper.dummyDiskLoc);
+ }
+
+ const BSONObj root = simpleKey('p', 800);
+ this->unindex(root);
+
+ this->insert(root, this->_helper.dummyDiskLoc);
+ this->locate(root, 0, true, this->head()->nextChild, 1);
+ }
+};
+
+template <class OnDiskFormat>
+class MergeBucketsTestBase : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ for (int i = 0; i < 10; ++i) {
+ const BSONObj k = simpleKey('b' + 2 * i, 800);
+ this->insert(k, this->_helper.dummyDiskLoc);
+ }
+
+ // numRecords() - 1, because this->_helper.dummyDiskLoc is actually in the record store too
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL) - 1);
+
+ long long expectedCount = 10 - unindexKeys();
+ ASSERT_EQUALS(1, this->_helper.recordStore.numRecords(NULL) - 1);
+
+ long long unusedCount = 0;
+ ASSERT_EQUALS(expectedCount,
+ this->_helper.btree.fullValidate(&txn, &unusedCount, true, false, 0));
+ ASSERT_EQUALS(0, unusedCount);
+ }
+
+protected:
+ virtual int unindexKeys() = 0;
+};
+
+template <class OnDiskFormat>
+class MergeBucketsLeft : public MergeBucketsTestBase<OnDiskFormat> {
+ virtual int unindexKeys() {
+ BSONObj k = simpleKey('b', 800);
+ this->unindex(k);
+
+ k = simpleKey('b' + 2, 800);
+ this->unindex(k);
+
+ k = simpleKey('b' + 4, 800);
+ this->unindex(k);
+
+ k = simpleKey('b' + 6, 800);
+ this->unindex(k);
+
+ return 4;
+ }
+};
+
+template <class OnDiskFormat>
+class MergeBucketsRight : public MergeBucketsTestBase<OnDiskFormat> {
+ virtual int unindexKeys() {
+ const BSONObj k = simpleKey('b' + 2 * 9, 800);
+ this->unindex(k);
+ return 1;
+ }
+};
+
+template <class OnDiskFormat>
+class MergeBucketsDontReplaceHead : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ for (int i = 0; i < 18; ++i) {
+ const BSONObj k = simpleKey('a' + i, 800);
+ this->insert(k, this->_helper.dummyDiskLoc);
+ }
+
+ // numRecords(NULL) - 1, because fixedDiskLoc is actually in the record store too
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL) - 1);
+
+ const BSONObj k = simpleKey('a' + 17, 800);
+ this->unindex(k);
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL) - 1);
+
+ long long unusedCount = 0;
+ ASSERT_EQUALS(17, this->_helper.btree.fullValidate(&txn, &unusedCount, true, false, 0));
+ ASSERT_EQUALS(0, unusedCount);
+ }
+};
- Status insert(const BSONObj &key, const DiskLoc dl, bool dupsAllowed = true) {
- OperationContextNoop txn;
- return _helper.btree.insert(&txn, key, dl, dupsAllowed);
- }
+template <class OnDiskFormat>
+class MergeBucketsDelInternal : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- bool unindex(const BSONObj &key) {
- OperationContextNoop txn;
- return _helper.btree.unindex(&txn, key, _helper.dummyDiskLoc);
- }
+ builder.makeTree("{d:{b:{a:null},bb:null,_:{c:null}},_:{f:{e:null},_:{g:null}}}");
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- void locate(const BSONObj &key,
- int expectedPos,
- bool expectedFound,
- const RecordId &expectedLocation,
- int direction) {
- return locate(key, expectedPos, expectedFound, DiskLoc::fromRecordId(expectedLocation),
- direction);
- }
- void locate(const BSONObj &key,
- int expectedPos,
- bool expectedFound,
- const DiskLoc &expectedLocation,
- int direction) {
- int pos;
- DiskLoc loc;
- OperationContextNoop txn;
- ASSERT_EQUALS(expectedFound,
- _helper.btree.locate(&txn, key, _helper.dummyDiskLoc, direction, &pos, &loc));
- ASSERT_EQUALS(expectedLocation, loc);
- ASSERT_EQUALS(expectedPos, pos);
- }
+ // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
- const BucketType* child(const BucketType* bucket, int i) const {
- verify(i <= bucket->n);
+ const BSONObj k = BSON(""
+ << "bb");
+ verify(this->unindex(k));
- DiskLoc diskLoc;
- if (i == bucket->n) {
- diskLoc = bucket->nextChild;
- }
- else {
- FullKey fullKey = BtreeLogic<BtreeLayoutType>::getFullKey(bucket, i);
- diskLoc = fullKey.prevChildBucket;
- }
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- verify(!diskLoc.isNull());
+ // The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
- return _helper.btree.getBucket(NULL, diskLoc);
- }
+ builder.checkStructure("{b:{a:null},d:{c:null},f:{e:null},_:{g:null}}");
+ }
+};
- BucketType* head() const {
- OperationContextNoop txn;
- return _helper.btree.getBucket(&txn, _helper.headManager.getHead(&txn));
- }
+template <class OnDiskFormat>
+class MergeBucketsRightNull : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- void forcePackBucket(const RecordId bucketLoc) {
- BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
+ builder.makeTree("{d:{b:{a:null},bb:null,cc:{c:null}},_:{f:{e:null},h:{g:null}}}");
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- bucket->topSize += bucket->emptySize;
- bucket->emptySize = 0;
- BtreeLogic<BtreeLayoutType>::setNotPacked(bucket);
- }
+ // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
- void truncateBucket(BucketType* bucket, int N, int &refPos) {
- _helper.btree.truncateTo(bucket, N, refPos);
- }
+ const BSONObj k = BSON(""
+ << "bb");
+ verify(this->unindex(k));
- int bucketPackedDataSize(BucketType* bucket, int refPos) {
- return _helper.btree._packedDataSize(bucket, refPos);
- }
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- int bucketRebalancedSeparatorPos(const RecordId bucketLoc, int leftIndex) {
- BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
- OperationContextNoop txn;
- return _helper.btree._rebalancedSeparatorPos(&txn, bucket, leftIndex);
- }
+ // The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
- FullKey getKey(const RecordId bucketLoc, int pos) const {
- const BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
- return BtreeLogic<BtreeLayoutType>::getFullKey(bucket, pos);
- }
+ builder.checkStructure("{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}");
+ }
+};
- void markKeyUnused(const DiskLoc bucketLoc, int keyPos) {
- BucketType* bucket = _helper.btree.getBucket(NULL, bucketLoc);
- invariant(keyPos >= 0 && keyPos < bucket->n);
+// This comment was here during porting, not sure what it means:
+//
+// "Not yet handling this case"
+template <class OnDiskFormat>
+class DontMergeSingleBucket : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- _helper.btree.getKeyHeader(bucket, keyPos).setUnused();
- }
+ builder.makeTree("{d:{b:{a:null},c:null}}");
- DiskLoc newBucket() {
- OperationContextNoop txn;
- return _helper.btree._addBucket(&txn);
- }
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- /**
- * Sets the nextChild pointer for the bucket at the specified location.
- */
- void setBucketNextChild(const DiskLoc bucketLoc, const DiskLoc nextChild) {
- OperationContextNoop txn;
+ // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
- BucketType* bucket = _helper.btree.getBucket(&txn, bucketLoc);
- bucket->nextChild = nextChild;
+ const BSONObj k = BSON(""
+ << "c");
+ verify(this->unindex(k));
- _helper.btree.fixParentPtrs(&txn, bucket, bucketLoc);
- }
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- protected:
- BtreeLogicTestHelper<BtreeLayoutType> _helper;
- };
+ // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
- //
- // TESTS
- //
+ builder.checkStructure("{d:{b:{a:null}}}");
+ }
+};
- template<class OnDiskFormat>
- class SimpleCreate : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+template <class OnDiskFormat>
+class ParentMergeNonRightToLeft : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- this->checkValidNumKeys(0);
- }
- };
+ builder.makeTree("{d:{b:{a:null},bb:null,cc:{c:null}},i:{f:{e:null},h:{g:null}}}");
- template<class OnDiskFormat>
- class SimpleInsertDelete : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- BSONObj key = simpleKey('z');
- this->insert(key, this->_helper.dummyDiskLoc);
+ // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
- this->checkValidNumKeys(1);
- this->locate(key, 0, true, this->_helper.headManager.getHead(&txn), 1);
+ const BSONObj k = BSON(""
+ << "bb");
+ verify(this->unindex(k));
- this->unindex(key);
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- this->checkValidNumKeys(0);
- this->locate(key, 0, false, DiskLoc(), 1);
- }
- };
+ // Child does not currently replace parent in this case. Also, the tree
+ // has 6 buckets + 1 for the this->_helper.dummyDiskLoc.
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
- template<class OnDiskFormat>
- class SplitUnevenBucketBase : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ builder.checkStructure("{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}");
+ }
+};
- for (int i = 0; i < 10; ++i) {
- BSONObj shortKey = simpleKey(shortToken(i), 1);
- this->insert(shortKey, this->_helper.dummyDiskLoc);
+template <class OnDiskFormat>
+class ParentMergeNonRightToRight : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- BSONObj longKey = simpleKey(longToken(i), 800);
- this->insert(longKey, this->_helper.dummyDiskLoc);
- }
+ builder.makeTree("{d:{b:{a:null},cc:{c:null}},i:{f:{e:null},ff:null,h:{g:null}}}");
- this->checkValidNumKeys(20);
- ASSERT_EQUALS(1, this->head()->n);
- checkSplit();
- }
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- protected:
- virtual char shortToken(int i) const = 0;
- virtual char longToken(int i) const = 0;
- virtual void checkSplit() = 0;
+ // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
- static char leftToken(int i) {
- return 'a' + i;
- }
+ const BSONObj k = BSON(""
+ << "ff");
+ verify(this->unindex(k));
- static char rightToken(int i) {
- return 'z' - i;
- }
- };
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- template<class OnDiskFormat>
- class SplitRightHeavyBucket : public SplitUnevenBucketBase<OnDiskFormat> {
- private:
- virtual char shortToken(int i) const {
- return this->leftToken(i);
- }
- virtual char longToken(int i) const {
- return this->rightToken(i);
- }
- virtual void checkSplit() {
- ASSERT_EQUALS(15, this->child(this->head(), 0)->n);
- ASSERT_EQUALS(4, this->child(this->head(), 1)->n);
- }
- };
+ // Child does not currently replace parent in this case. Also, the tree
+ // has 6 buckets + 1 for the this->_helper.dummyDiskLoc.
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
- template<class OnDiskFormat>
- class SplitLeftHeavyBucket : public SplitUnevenBucketBase<OnDiskFormat> {
- private:
- virtual char shortToken(int i) const {
- return this->rightToken(i);
- }
- virtual char longToken(int i) const {
- return this->leftToken(i);
- }
- virtual void checkSplit() {
- ASSERT_EQUALS(4, this->child(this->head(), 0)->n);
- ASSERT_EQUALS(15, this->child(this->head(), 1)->n);
- }
- };
+ builder.checkStructure("{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}");
+ }
+};
- template<class OnDiskFormat>
- class MissingLocate : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+template <class OnDiskFormat>
+class CantMergeRightNoMerge : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- for (int i = 0; i < 3; ++i) {
- BSONObj k = simpleKey('b' + 2 * i);
- this->insert(k, this->_helper.dummyDiskLoc);
- }
-
- locateExtended(1, 'a', 'b', this->_helper.headManager.getHead(&txn));
- locateExtended(1, 'c', 'd', this->_helper.headManager.getHead(&txn));
- locateExtended(1, 'e', 'f', this->_helper.headManager.getHead(&txn));
- locateExtended(1, 'g', 'g' + 1, RecordId()); // of course, 'h' isn't in the index.
-
- // old behavior
- // locateExtended( -1, 'a', 'b', dl() );
- // locateExtended( -1, 'c', 'd', dl() );
- // locateExtended( -1, 'e', 'f', dl() );
- // locateExtended( -1, 'g', 'f', dl() );
-
- locateExtended(-1, 'a', 'a' - 1, RecordId()); // of course, 'a' - 1 isn't in the index
- locateExtended(-1, 'c', 'b', this->_helper.headManager.getHead(&txn));
- locateExtended(-1, 'e', 'd', this->_helper.headManager.getHead(&txn));
- locateExtended(-1, 'g', 'f', this->_helper.headManager.getHead(&txn));
- }
+ builder.makeTree(
+ "{d:{b:{a:null},bb:null,cc:{c:null}},"
+ "dd:null,"
+ "_:{f:{e:null},h:{g:null}}}");
- private:
- void locateExtended(
- int direction, char token, char expectedMatch, RecordId expectedLocation) {
- const BSONObj k = simpleKey(token);
- int expectedPos = (expectedMatch - 'b') / 2;
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- this->locate(k, expectedPos, false, expectedLocation, direction);
- }
- };
+ // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
- template<class OnDiskFormat>
- class MissingLocateMultiBucket : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
-
- this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('C', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('D', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('E', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('F', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('G', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('H', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('J', 800), this->_helper.dummyDiskLoc);
-
- // This causes split
- this->insert(simpleKey('I', 800), this->_helper.dummyDiskLoc);
-
- int pos;
- DiskLoc loc;
-
- // 'E' is the split point and should be in the head the rest should be ~50/50
- const BSONObj splitPoint = simpleKey('E', 800);
- this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
- ASSERT_EQUALS(this->_helper.headManager.getHead(&txn), loc.toRecordId());
- ASSERT_EQUALS(0, pos);
-
- // Find the one before 'E'
- int largePos;
- DiskLoc largeLoc;
- this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
- this->_helper.btree.advance(&txn, &largeLoc, &largePos, -1);
-
- // Find the one after 'E'
- int smallPos;
- DiskLoc smallLoc;
- this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
- this->_helper.btree.advance(&txn, &smallLoc, &smallPos, 1);
-
- ASSERT_NOT_EQUALS(smallLoc, largeLoc);
- ASSERT_NOT_EQUALS(smallLoc, loc);
- ASSERT_NOT_EQUALS(largeLoc, loc);
- }
- };
+ const BSONObj k = BSON(""
+ << "bb");
+ verify(this->unindex(k));
- /**
- * Validates that adding keys incrementally produces buckets, which are 90%/10% full.
- */
- template<class OnDiskFormat>
- class SERVER983 : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
-
- this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('C', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('D', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('E', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('F', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('G', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('H', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('I', 800), this->_helper.dummyDiskLoc);
-
- // This will cause split
- this->insert(simpleKey('J', 800), this->_helper.dummyDiskLoc);
-
- int pos;
- DiskLoc loc;
-
- // 'H' is the maximum 'large' interval key, 90% should be < 'H' and 10% larger
- const BSONObj splitPoint = simpleKey('H', 800);
- this->_helper.btree.locate(&txn, splitPoint, this->_helper.dummyDiskLoc, 1, &pos, &loc);
- ASSERT_EQUALS(this->_helper.headManager.getHead(&txn), loc.toRecordId());
- ASSERT_EQUALS(0, pos);
-
- // Find the one before 'H'
- int largePos;
- DiskLoc largeLoc;
- this->_helper.btree.locate(&txn,
- splitPoint, this->_helper.dummyDiskLoc, 1, &largePos, &largeLoc);
- this->_helper.btree.advance(&txn, &largeLoc, &largePos, -1);
-
- // Find the one after 'H'
- int smallPos;
- DiskLoc smallLoc;
- this->_helper.btree.locate(&txn,
- splitPoint, this->_helper.dummyDiskLoc, 1, &smallPos, &smallLoc);
- this->_helper.btree.advance(&txn, &smallLoc, &smallPos, 1);
-
- ASSERT_NOT_EQUALS(smallLoc, largeLoc);
- ASSERT_NOT_EQUALS(smallLoc, loc);
- ASSERT_NOT_EQUALS(largeLoc, loc);
- }
- };
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- template<class OnDiskFormat>
- class DontReuseUnused : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
- for (int i = 0; i < 10; ++i) {
- const BSONObj k = simpleKey('b' + 2 * i, 800);
- this->insert(k, this->_helper.dummyDiskLoc);
- }
+ builder.checkStructure(
+ "{d:{b:{a:null},cc:{c:null}},"
+ "dd:null,"
+ "_:{f:{e:null},h:{g:null}}}");
+ }
+};
- const BSONObj root = simpleKey('p', 800);
- this->unindex(root);
+template <class OnDiskFormat>
+class CantMergeLeftNoMerge : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- this->insert(root, this->_helper.dummyDiskLoc);
- this->locate(root, 0, true, this->head()->nextChild, 1);
- }
- };
+ builder.makeTree("{c:{b:{a:null}},d:null,_:{f:{e:null},g:null}}");
- template<class OnDiskFormat>
- class MergeBucketsTestBase : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- for (int i = 0; i < 10; ++i) {
- const BSONObj k = simpleKey('b' + 2 * i, 800);
- this->insert(k, this->_helper.dummyDiskLoc);
- }
-
- // numRecords() - 1, because this->_helper.dummyDiskLoc is actually in the record store too
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL) - 1);
+ // The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
- long long expectedCount = 10 - unindexKeys();
- ASSERT_EQUALS(1, this->_helper.recordStore.numRecords(NULL) - 1);
+ const BSONObj k = BSON(""
+ << "g");
+ verify(this->unindex(k));
- long long unusedCount = 0;
- ASSERT_EQUALS(expectedCount, this->_helper.btree.fullValidate(&txn, &unusedCount, true, false, 0));
- ASSERT_EQUALS(0, unusedCount);
- }
+ ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- protected:
- virtual int unindexKeys() = 0;
- };
+ // The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
- template<class OnDiskFormat>
- class MergeBucketsLeft : public MergeBucketsTestBase<OnDiskFormat> {
- virtual int unindexKeys() {
- BSONObj k = simpleKey('b', 800);
- this->unindex(k);
+ builder.checkStructure("{c:{b:{a:null}},d:null,_:{f:{e:null}}}");
+ }
+};
- k = simpleKey('b' + 2, 800);
- this->unindex(k);
+template <class OnDiskFormat>
+class MergeOption : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- k = simpleKey('b' + 4, 800);
- this->unindex(k);
+ builder.makeTree("{c:{b:{a:null}},f:{e:{d:null},ee:null},_:{h:{g:null}}}");
- k = simpleKey('b' + 6, 800);
- this->unindex(k);
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- return 4;
- }
- };
+ // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
- template<class OnDiskFormat>
- class MergeBucketsRight : public MergeBucketsTestBase<OnDiskFormat> {
- virtual int unindexKeys() {
- const BSONObj k = simpleKey('b' + 2 * 9, 800);
- this->unindex(k);
- return 1;
- }
- };
+ const BSONObj k = BSON(""
+ << "ee");
+ verify(this->unindex(k));
- template<class OnDiskFormat>
- class MergeBucketsDontReplaceHead : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- for (int i = 0; i < 18; ++i) {
- const BSONObj k = simpleKey('a' + i, 800);
- this->insert(k, this->_helper.dummyDiskLoc);
- }
+ // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
- // numRecords(NULL) - 1, because fixedDiskLoc is actually in the record store too
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL) - 1);
+ builder.checkStructure("{c:{b:{a:null}},_:{e:{d:null},f:null,h:{g:null}}}");
+ }
+};
- const BSONObj k = simpleKey('a' + 17, 800);
- this->unindex(k);
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL) - 1);
+template <class OnDiskFormat>
+class ForceMergeLeft : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- long long unusedCount = 0;
- ASSERT_EQUALS(17, this->_helper.btree.fullValidate(&txn, &unusedCount, true, false, 0));
- ASSERT_EQUALS(0, unusedCount);
- }
- };
+ builder.makeTree("{c:{b:{a:null}},f:{e:{d:null},ee:null},ff:null,_:{h:{g:null}}}");
- template<class OnDiskFormat>
- class MergeBucketsDelInternal : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- builder.makeTree("{d:{b:{a:null},bb:null,_:{c:null}},_:{f:{e:null},_:{g:null}}}");
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
- // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
+ const BSONObj k = BSON(""
+ << "ee");
+ verify(this->unindex(k));
- const BSONObj k = BSON("" << "bb");
- verify(this->unindex(k));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
- // The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
+ builder.checkStructure("{f:{b:{a:null},c:null,e:{d:null}},ff:null,_:{h:{g:null}}}");
+ }
+};
- builder.checkStructure("{b:{a:null},d:{c:null},f:{e:null},_:{g:null}}");
- }
- };
+template <class OnDiskFormat>
+class ForceMergeRight : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- template<class OnDiskFormat>
- class MergeBucketsRightNull : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ builder.makeTree("{c:{b:{a:null}},cc:null,f:{e:{d:null},ee:null},_:{h:{g:null}}}");
- builder.makeTree("{d:{b:{a:null},bb:null,cc:{c:null}},_:{f:{e:null},h:{g:null}}}");
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
+ // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
- const BSONObj k = BSON("" << "bb");
- verify(this->unindex(k));
+ const BSONObj k = BSON(""
+ << "ee");
+ verify(this->unindex(k));
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- // The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
+ // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
- builder.checkStructure("{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}");
- }
- };
+ builder.checkStructure("{c:{b:{a:null}},cc:null,_:{e:{d:null},f:null,h:{g:null}}}");
+ }
+};
- // This comment was here during porting, not sure what it means:
- //
- // "Not yet handling this case"
- template<class OnDiskFormat>
- class DontMergeSingleBucket : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+template <class OnDiskFormat>
+class RecursiveMerge : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- builder.makeTree("{d:{b:{a:null},c:null}}");
+ builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},j:{i:null}}");
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
+ // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
- const BSONObj k = BSON("" << "c");
- verify(this->unindex(k));
+ const BSONObj k = BSON(""
+ << "c");
+ verify(this->unindex(k));
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
- builder.checkStructure("{d:{b:{a:null}}}");
- }
- };
+ // Height is not currently reduced in this case
+ builder.checkStructure("{j:{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}}");
+ }
+};
- template<class OnDiskFormat>
- class ParentMergeNonRightToLeft : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+template <class OnDiskFormat>
+class RecursiveMergeRightBucket : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- builder.makeTree("{d:{b:{a:null},bb:null,cc:{c:null}},i:{f:{e:null},h:{g:null}}}");
+ builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},_:{i:null}}");
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
+ // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
- const BSONObj k = BSON("" << "bb");
- verify(this->unindex(k));
+ const BSONObj k = BSON(""
+ << "c");
+ verify(this->unindex(k));
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- // Child does not currently replace parent in this case. Also, the tree
- // has 6 buckets + 1 for the this->_helper.dummyDiskLoc.
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
+ // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
- builder.checkStructure("{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}");
- }
- };
+ builder.checkStructure("{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}");
+ }
+};
- template<class OnDiskFormat>
- class ParentMergeNonRightToRight : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+template <class OnDiskFormat>
+class RecursiveMergeDoubleRightBucket : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- builder.makeTree("{d:{b:{a:null},cc:{c:null}},i:{f:{e:null},ff:null,h:{g:null}}}");
+ builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},_:{f:null}},_:{i:null}}");
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
+ // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
- const BSONObj k = BSON("" << "ff");
- verify(this->unindex(k));
+ const BSONObj k = BSON(""
+ << "c");
+ verify(this->unindex(k));
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- // Child does not currently replace parent in this case. Also, the tree
- // has 6 buckets + 1 for the this->_helper.dummyDiskLoc.
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
- builder.checkStructure("{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}");
- }
- };
+ // no recursion currently in this case
+ builder.checkStructure("{h:{b:{a:null},d:null,e:null,f:null},_:{i:null}}");
+ }
+};
- template<class OnDiskFormat>
- class CantMergeRightNoMerge : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+template <class OnDiskFormat>
+class MergeSizeTestBase : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ MergeSizeTestBase() : _count(0) {}
- builder.makeTree("{d:{b:{a:null},bb:null,cc:{c:null}},"
- "dd:null,"
- "_:{f:{e:null},h:{g:null}}}");
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
+ const BSONObj& topKey = biggestKey('m');
- const BSONObj k = BSON("" << "bb");
- verify(this->unindex(k));
+ DiskLoc leftChild = this->newBucket();
+ builder.push(
+ DiskLoc::fromRecordId(this->_helper.headManager.getHead(&txn)), topKey, leftChild);
+ _count++;
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ DiskLoc rightChild = this->newBucket();
+ this->setBucketNextChild(DiskLoc::fromRecordId(this->_helper.headManager.getHead(&txn)),
+ rightChild);
- // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
+ _count += builder.fillBucketToExactSize(leftChild, leftSize(), 'a');
+ _count += builder.fillBucketToExactSize(rightChild, rightSize(), 'n');
- builder.checkStructure("{d:{b:{a:null},cc:{c:null}},"
- "dd:null,"
- "_:{f:{e:null},h:{g:null}}}");
+ ASSERT(leftAdditional() <= 2);
+ if (leftAdditional() >= 2) {
+ builder.push(leftChild, bigKey('k'), DiskLoc());
}
- };
-
- template<class OnDiskFormat>
- class CantMergeLeftNoMerge : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{c:{b:{a:null}},d:null,_:{f:{e:null},g:null}}");
-
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
-
- const BSONObj k = BSON("" << "g");
- verify(this->unindex(k));
-
- ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
-
- builder.checkStructure("{c:{b:{a:null}},d:null,_:{f:{e:null}}}");
+ if (leftAdditional() >= 1) {
+ builder.push(leftChild, bigKey('l'), DiskLoc());
}
- };
-
- template<class OnDiskFormat>
- class MergeOption : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{c:{b:{a:null}},f:{e:{d:null},ee:null},_:{h:{g:null}}}");
-
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
-
- const BSONObj k = BSON("" << "ee");
- verify(this->unindex(k));
-
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
-
- builder.checkStructure("{c:{b:{a:null}},_:{e:{d:null},f:null,h:{g:null}}}");
+ ASSERT(rightAdditional() <= 2);
+ if (rightAdditional() >= 2) {
+ builder.push(rightChild, bigKey('y'), DiskLoc());
}
- };
-
- template<class OnDiskFormat>
- class ForceMergeLeft : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{c:{b:{a:null}},f:{e:{d:null},ee:null},ff:null,_:{h:{g:null}}}");
-
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
-
- const BSONObj k = BSON("" << "ee");
- verify(this->unindex(k));
-
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
-
- builder.checkStructure("{f:{b:{a:null},c:null,e:{d:null}},ff:null,_:{h:{g:null}}}");
- }
- };
-
- template<class OnDiskFormat>
- class ForceMergeRight : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{c:{b:{a:null}},cc:null,f:{e:{d:null},ee:null},_:{h:{g:null}}}");
-
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 7 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(8, this->_helper.recordStore.numRecords(NULL));
-
- const BSONObj k = BSON("" << "ee");
- verify(this->unindex(k));
-
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
-
- builder.checkStructure("{c:{b:{a:null}},cc:null,_:{e:{d:null},f:null,h:{g:null}}}");
- }
- };
-
- template<class OnDiskFormat>
- class RecursiveMerge : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},j:{i:null}}");
-
- ASSERT_EQUALS(10, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
-
- const BSONObj k = BSON("" << "c");
- verify(this->unindex(k));
-
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
-
- // Height is not currently reduced in this case
- builder.checkStructure("{j:{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}}");
+ if (rightAdditional() >= 1) {
+ builder.push(rightChild, bigKey('z'), DiskLoc());
}
- };
-
- template<class OnDiskFormat>
- class RecursiveMergeRightBucket : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},_:{i:null}}");
-
- ASSERT_EQUALS(9, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
-
- const BSONObj k = BSON("" << "c");
- verify(this->unindex(k));
-
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
-
- builder.checkStructure("{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}");
- }
- };
-
- template<class OnDiskFormat>
- class RecursiveMergeDoubleRightBucket : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{h:{e:{b:{a:null},c:null,d:null},_:{f:null}},_:{i:null}}");
-
- ASSERT_EQUALS(8, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
-
- const BSONObj k = BSON("" << "c");
- verify(this->unindex(k));
-
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
-
- // no recursion currently in this case
- builder.checkStructure("{h:{b:{a:null},d:null,e:null,f:null},_:{i:null}}");
- }
- };
-
- template<class OnDiskFormat>
- class MergeSizeTestBase : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- MergeSizeTestBase() : _count(0) {
-
- }
-
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
-
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- const BSONObj& topKey = biggestKey('m');
-
- DiskLoc leftChild = this->newBucket();
- builder.push(DiskLoc::fromRecordId(this->_helper.headManager.getHead(&txn)), topKey,
- leftChild);
- _count++;
-
- DiskLoc rightChild = this->newBucket();
- this->setBucketNextChild(DiskLoc::fromRecordId(this->_helper.headManager.getHead(&txn)),
- rightChild);
-
- _count += builder.fillBucketToExactSize(leftChild, leftSize(), 'a');
- _count += builder.fillBucketToExactSize(rightChild, rightSize(), 'n');
-
- ASSERT(leftAdditional() <= 2);
- if (leftAdditional() >= 2) {
- builder.push(leftChild, bigKey('k'), DiskLoc());
- }
- if (leftAdditional() >= 1) {
- builder.push(leftChild, bigKey('l'), DiskLoc());
- }
-
- ASSERT(rightAdditional() <= 2);
- if (rightAdditional() >= 2) {
- builder.push(rightChild, bigKey('y'), DiskLoc());
- }
- if (rightAdditional() >= 1) {
- builder.push(rightChild, bigKey('z'), DiskLoc());
- }
-
- _count += leftAdditional() + rightAdditional();
-
- initCheck();
-
- const char *keys = delKeys();
- for (const char *i = keys; *i; ++i) {
- long long unused = 0;
- ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- ASSERT_EQUALS(0, unused);
- // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
+ _count += leftAdditional() + rightAdditional();
- const BSONObj k = bigKey(*i);
- this->unindex(k);
-
- --_count;
- }
+ initCheck();
+ const char* keys = delKeys();
+ for (const char* i = keys; *i; ++i) {
long long unused = 0;
ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
ASSERT_EQUALS(0, unused);
- validate();
-
- if (!merge()) {
- // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
- }
- else {
- // The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
- }
- }
-
- protected:
- virtual int leftAdditional() const { return 2; }
- virtual int rightAdditional() const { return 2; }
- virtual void initCheck() {}
- virtual void validate() {}
- virtual int leftSize() const = 0;
- virtual int rightSize() const = 0;
- virtual const char * delKeys() const { return "klyz"; }
- virtual bool merge() const { return true; }
-
- static BSONObj bigKey(char a) {
- return simpleKey(a, 801);
- }
-
- static BSONObj biggestKey(char a) {
- int size = OnDiskFormat::KeyMax - bigSize() + 801;
- return simpleKey(a, size);
- }
-
- static int bigSize() {
- return typename BtreeLogicTestBase<OnDiskFormat>::KeyDataOwnedType(bigKey('a')).dataSize();
- }
-
- static int biggestSize() {
- return typename BtreeLogicTestBase<OnDiskFormat>::KeyDataOwnedType(biggestKey('a')).dataSize();
- }
-
- int _count;
- };
-
- template<class OnDiskFormat>
- class MergeSizeJustRightRight : public MergeSizeTestBase<OnDiskFormat> {
- protected:
- virtual int rightSize() const {
- return BtreeLogic<OnDiskFormat>::lowWaterMark() - 1;
- }
-
- virtual int leftSize() const {
- return OnDiskFormat::BucketBodySize -
- MergeSizeTestBase<OnDiskFormat>::biggestSize() -
- sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType) -
- (BtreeLogic<OnDiskFormat>::lowWaterMark() - 1);
- }
- };
-
- template<class OnDiskFormat>
- class MergeSizeJustRightLeft : public MergeSizeTestBase<OnDiskFormat> {
- protected:
- virtual int leftSize() const {
- return BtreeLogic<OnDiskFormat>::lowWaterMark() - 1;
- }
-
- virtual int rightSize() const {
- return OnDiskFormat::BucketBodySize -
- MergeSizeTestBase<OnDiskFormat>::biggestSize() -
- sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType) -
- (BtreeLogic<OnDiskFormat>::lowWaterMark() - 1);
- }
-
- virtual const char * delKeys() const { return "yzkl"; }
- };
-
- template<class OnDiskFormat>
- class MergeSizeRight : public MergeSizeJustRightRight<OnDiskFormat> {
- virtual int rightSize() const { return MergeSizeJustRightRight<OnDiskFormat>::rightSize() - 1; }
- virtual int leftSize() const { return MergeSizeJustRightRight<OnDiskFormat>::leftSize() + 1; }
- };
-
- template<class OnDiskFormat>
- class MergeSizeLeft : public MergeSizeJustRightLeft<OnDiskFormat> {
- virtual int rightSize() const { return MergeSizeJustRightLeft<OnDiskFormat>::rightSize() + 1; }
- virtual int leftSize() const { return MergeSizeJustRightLeft<OnDiskFormat>::leftSize() - 1; }
- };
-
- template<class OnDiskFormat>
- class NoMergeBelowMarkRight : public MergeSizeJustRightRight<OnDiskFormat> {
- virtual int rightSize() const { return MergeSizeJustRightRight<OnDiskFormat>::rightSize() + 1; }
- virtual int leftSize() const { return MergeSizeJustRightRight<OnDiskFormat>::leftSize() - 1; }
- virtual bool merge() const { return false; }
- };
-
- template<class OnDiskFormat>
- class NoMergeBelowMarkLeft : public MergeSizeJustRightLeft<OnDiskFormat> {
- virtual int rightSize() const { return MergeSizeJustRightLeft<OnDiskFormat>::rightSize() - 1; }
- virtual int leftSize() const { return MergeSizeJustRightLeft<OnDiskFormat>::leftSize() + 1; }
- virtual bool merge() const { return false; }
- };
-
- template<class OnDiskFormat>
- class MergeSizeRightTooBig : public MergeSizeJustRightLeft<OnDiskFormat> {
- virtual int rightSize() const { return MergeSizeJustRightLeft<OnDiskFormat>::rightSize() + 1; }
- virtual bool merge() const { return false; }
- };
-
- template<class OnDiskFormat>
- class MergeSizeLeftTooBig : public MergeSizeJustRightRight<OnDiskFormat> {
- virtual int leftSize() const { return MergeSizeJustRightRight<OnDiskFormat>::leftSize() + 1; }
- virtual bool merge() const { return false; }
- };
-
- template<class OnDiskFormat>
- class MergeRightEmpty : public MergeSizeTestBase<OnDiskFormat> {
- protected:
- virtual int rightAdditional() const { return 1; }
- virtual int leftAdditional() const { return 1; }
- virtual const char * delKeys() const { return "lz"; }
- virtual int rightSize() const { return 0; }
- virtual int leftSize() const {
- return OnDiskFormat::BucketBodySize -
- MergeSizeTestBase<OnDiskFormat>::biggestSize() -
- sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType);
- }
- };
-
- template<class OnDiskFormat>
- class MergeMinRightEmpty : public MergeSizeTestBase<OnDiskFormat> {
- protected:
- virtual int rightAdditional() const { return 1; }
- virtual int leftAdditional() const { return 0; }
- virtual const char * delKeys() const { return "z"; }
- virtual int rightSize() const { return 0; }
- virtual int leftSize() const {
- return MergeSizeTestBase<OnDiskFormat>::bigSize() +
- sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType);
- }
- };
-
- template<class OnDiskFormat>
- class MergeLeftEmpty : public MergeSizeTestBase<OnDiskFormat> {
- protected:
- virtual int rightAdditional() const { return 1; }
- virtual int leftAdditional() const { return 1; }
- virtual const char * delKeys() const { return "zl"; }
- virtual int leftSize() const { return 0; }
- virtual int rightSize() const {
- return OnDiskFormat::BucketBodySize -
- MergeSizeTestBase<OnDiskFormat>::biggestSize() -
- sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType);
- }
- };
-
- template<class OnDiskFormat>
- class MergeMinLeftEmpty : public MergeSizeTestBase<OnDiskFormat> {
- protected:
- virtual int leftAdditional() const { return 1; }
- virtual int rightAdditional() const { return 0; }
- virtual const char * delKeys() const { return "l"; }
- virtual int leftSize() const { return 0; }
- virtual int rightSize() const {
- return MergeSizeTestBase<OnDiskFormat>::bigSize() +
- sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType);
- }
- };
-
- template<class OnDiskFormat>
- class BalanceRightEmpty : public MergeRightEmpty<OnDiskFormat> {
- protected:
- virtual int leftSize() const {
- return OnDiskFormat::BucketBodySize -
- MergeSizeTestBase<OnDiskFormat>::biggestSize() -
- sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType) + 1;
- }
-
- virtual bool merge() const { return false; }
-
- virtual void initCheck() {
- OperationContextNoop txn;
- _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
- }
-
- virtual void validate() {
- OperationContextNoop txn;
- ASSERT_NOT_EQUALS(_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
- }
-
- private:
- BSONObj _oldTop;
- };
-
- template<class OnDiskFormat>
- class BalanceLeftEmpty : public MergeLeftEmpty<OnDiskFormat> {
- protected:
- virtual int rightSize() const {
- return OnDiskFormat::BucketBodySize -
- MergeSizeTestBase<OnDiskFormat>::biggestSize() -
- sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType) + 1;
- }
-
- virtual bool merge() const { return false; }
-
- virtual void initCheck() {
- OperationContextNoop txn;
- _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
- }
-
- virtual void validate() {
- OperationContextNoop txn;
- ASSERT_TRUE(_oldTop != this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
- }
-
- private:
- BSONObj _oldTop;
- };
-
- template<class OnDiskFormat>
- class BalanceOneLeftToRight : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
- "b:{$20:null,$30:null,$40:null,$50:null,a:null},"
- "_:{c:null}}");
-
- ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
-
- const BSONObj k = BSON("" << bigNumString(0x40, 800));
- ASSERT(this->unindex(k));
-
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
-
- builder.checkStructure("{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},"
- "b:{$10:null,$20:null,$30:null,$50:null,a:null},"
- "_:{c:null}}");
- }
- };
-
- template<class OnDiskFormat>
- class BalanceOneRightToLeft : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{$10:{$1:null,$2:null,$3:null,$4:null},"
- "b:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},"
- "_:{c:null}}");
-
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
-
- const BSONObj k = BSON("" << bigNumString(0x3, 800));
- ASSERT(this->unindex(k));
-
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
-
- builder.checkStructure("{$20:{$1:null,$2:null,$4:null,$10:null},"
- "b:{$30:null,$40:null,$50:null,$60:null,$70:null},"
- "_:{c:null}}");
- }
- };
-
- template<class OnDiskFormat>
- class BalanceThreeLeftToRight : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{$20:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},"
- "$9:{$8:null},$11:{$10:null},$13:{$12:null},_:{$14:null}},"
- "b:{$30:null,$40:{$35:null},$50:{$45:null}},"
- "_:{c:null}}");
-
- ASSERT_EQUALS(23, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 14 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(15, this->_helper.recordStore.numRecords(NULL));
-
- const BSONObj k = BSON("" << bigNumString(0x30, 800));
- ASSERT(this->unindex(k));
-
- ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 14 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(15, this->_helper.recordStore.numRecords(NULL));
-
- builder.checkStructure("{$9:{$1:{$0:null},$3:{$2:null},"
- "$5:{$4:null},$7:{$6:null},_:{$8:null}},"
- "b:{$11:{$10:null},$13:{$12:null},$20:{$14:null},"
- "$40:{$35:null},$50:{$45:null}},"
- "_:{c:null}}");
- }
- };
-
- template<class OnDiskFormat>
- class BalanceThreeRightToLeft : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{$20:{$1:{$0:null},$3:{$2:null},$5:null,_:{$14:null}},"
- "b:{$30:{$25:null},$40:{$35:null},$50:{$45:null},$60:{$55:null},"
- "$70:{$65:null},$80:{$75:null},"
- "$90:{$85:null},$100:{$95:null}},"
- "_:{c:null}}");
-
- ASSERT_EQUALS(25, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 15 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(16, this->_helper.recordStore.numRecords(NULL));
-
- const BSONObj k = BSON("" << bigNumString(0x5, 800));
- ASSERT(this->unindex(k));
-
- ASSERT_EQUALS(24, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 15 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(16, this->_helper.recordStore.numRecords(NULL));
-
- builder.checkStructure("{$50:{$1:{$0:null},$3:{$2:null},$20:{$14:null},"
- "$30:{$25:null},$40:{$35:null},_:{$45:null}},"
- "b:{$60:{$55:null},$70:{$65:null},$80:{$75:null},"
- "$90:{$85:null},$100:{$95:null}},"
- "_:{c:null}}");
- }
- };
-
- template<class OnDiskFormat>
- class BalanceSingleParentKey : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
- "_:{$20:null,$30:null,$40:null,$50:null,a:null}}");
-
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
- const BSONObj k = BSON("" << bigNumString(0x40, 800));
- ASSERT(this->unindex(k));
-
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
-
- builder.checkStructure("{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},"
- "_:{$10:null,$20:null,$30:null,$50:null,a:null}}");
- }
- };
-
- template<class OnDiskFormat>
- class PackEmptyBucket : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{a:null}");
-
- const BSONObj k = BSON("" << "a");
- ASSERT(this->unindex(k));
-
- this->forcePackBucket(this->_helper.headManager.getHead(&txn));
-
- typename BtreeLogicTestBase<OnDiskFormat>::BucketType* headBucket = this->head();
-
- ASSERT_EQUALS(0, headBucket->n);
- ASSERT_FALSE(headBucket->flags & Packed);
-
- int unused = 0;
- this->truncateBucket(headBucket, 0, unused);
-
- ASSERT_EQUALS(0, headBucket->n);
- ASSERT_EQUALS(0, headBucket->topSize);
- ASSERT_EQUALS((int)OnDiskFormat::BucketBodySize, headBucket->emptySize);
- ASSERT_TRUE(headBucket->flags & Packed);
- }
- };
-
- template<class OnDiskFormat>
- class PackedDataSizeEmptyBucket : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree("{a:null}");
-
- const BSONObj k = BSON("" << "a");
- ASSERT(this->unindex(k));
-
- this->forcePackBucket(this->_helper.headManager.getHead(&txn));
-
- typename BtreeLogicTestBase<OnDiskFormat>::BucketType* headBucket = this->head();
+ const BSONObj k = bigKey(*i);
+ this->unindex(k);
- ASSERT_EQUALS(0, headBucket->n);
- ASSERT_FALSE(headBucket->flags & Packed);
- ASSERT_EQUALS(0, this->bucketPackedDataSize(headBucket, 0));
- ASSERT_FALSE(headBucket->flags & Packed);
+ --_count;
}
- };
- template<class OnDiskFormat>
- class BalanceSingleParentKeyPackParent : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ long long unused = 0;
+ ASSERT_EQUALS(_count, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(0, unused);
- builder.makeTree("{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
- "_:{$20:null,$30:null,$40:null,$50:null,a:null}}");
-
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ validate();
+ if (!merge()) {
// The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
-
- // force parent pack
- this->forcePackBucket(this->_helper.headManager.getHead(&txn));
-
- const BSONObj k = BSON("" << bigNumString(0x40, 800));
- ASSERT(this->unindex(k));
-
- ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
-
- builder.checkStructure("{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},"
- "_:{$10:null,$20:null,$30:null,$50:null,a:null}}");
- }
- };
-
- template<class OnDiskFormat>
- class BalanceSplitParent : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree(
- "{$10$10:{$1:null,$2:null,$3:null,$4:null},"
- "$100:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null},"
- "$200:null,$300:null,$400:null,$500:null,$600:null,"
- "$700:null,$800:null,$900:null,_:{c:null}}");
-
- ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
-
- const BSONObj k = BSON("" << bigNumString(0x3, 800));
- ASSERT(this->unindex(k));
-
- ASSERT_EQUALS(21, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
-
- // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
-
- builder.checkStructure("{$500:{ $30:{$1:null,$2:null,$4:null,$10$10:null,$20:null},"
- "$100:{$40:null,$50:null,$60:null,$70:null,$80:null},"
- "$200:null,$300:null,$400:null},"
- "_:{$600:null,$700:null,$800:null,$900:null,_:{c:null}}}");
- }
- };
-
- template<class OnDiskFormat>
- class RebalancedSeparatorBase : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
-
- builder.makeTree(treeSpec());
- modTree();
-
- ASSERT_EQUALS(expectedSeparator(),
- this->bucketRebalancedSeparatorPos(
- this->_helper.headManager.getHead(&txn), 0));
- }
-
- virtual string treeSpec() const = 0;
- virtual int expectedSeparator() const = 0;
- virtual void modTree() {}
- };
-
- template<class OnDiskFormat>
- class EvenRebalanceLeft : public RebalancedSeparatorBase<OnDiskFormat> {
- virtual string treeSpec() const { return "{$7:{$1:null,$2$31f:null,$3:null,"
- "$4$31f:null,$5:null,$6:null},"
- "_:{$8:null,$9:null,$10$31e:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- template<class OnDiskFormat>
- class EvenRebalanceLeftCusp : public RebalancedSeparatorBase<OnDiskFormat> {
- virtual string treeSpec() const {
- return "{$6:{$1:null,$2$31f:null,$3:null,$4$31f:null,$5:null},"
- "_:{$7:null,$8:null,$9$31e:null,$10:null}}";
- }
- virtual int expectedSeparator() const { return 4; }
- };
-
- template<class OnDiskFormat>
- class EvenRebalanceRight : public RebalancedSeparatorBase<OnDiskFormat> {
- virtual string treeSpec() const { return "{$3:{$1:null,$2$31f:null},_:{$4$31f:null,$5:null,$6:null,$7:null,$8$31e:null,$9:null,$10:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- template<class OnDiskFormat>
- class EvenRebalanceRightCusp : public RebalancedSeparatorBase<OnDiskFormat> {
- virtual string treeSpec() const { return "{$4$31f:{$1:null,$2$31f:null,$3:null},_:{$5:null,$6:null,$7$31e:null,$8:null,$9:null,$10:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- template<class OnDiskFormat>
- class EvenRebalanceCenter : public RebalancedSeparatorBase<OnDiskFormat> {
- virtual string treeSpec() const { return "{$5:{$1:null,$2$31f:null,$3:null,$4$31f:null},_:{$6:null,$7$31e:null,$8:null,$9:null,$10:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- template<class OnDiskFormat>
- class OddRebalanceLeft : public RebalancedSeparatorBase<OnDiskFormat> {
- virtual string treeSpec() const { return "{$6$31f:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$7:null,$8:null,$9:null,$10:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- template<class OnDiskFormat>
- class OddRebalanceRight : public RebalancedSeparatorBase<OnDiskFormat> {
- virtual string treeSpec() const { return "{$4:{$1:null,$2:null,$3:null},_:{$5:null,$6:null,$7:null,$8$31f:null,$9:null,$10:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- template<class OnDiskFormat>
- class OddRebalanceCenter : public RebalancedSeparatorBase<OnDiskFormat> {
- virtual string treeSpec() const { return "{$5:{$1:null,$2:null,$3:null,$4:null},_:{$6:null,$7:null,$8:null,$9:null,$10$31f:null}}"; }
- virtual int expectedSeparator() const { return 4; }
- };
-
- template<class OnDiskFormat>
- class RebalanceEmptyRight : public RebalancedSeparatorBase<OnDiskFormat> {
- virtual string treeSpec() const { return "{$a:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null,$7:null,$8:null,$9:null},_:{$b:null}}"; }
- virtual void modTree() {
- BSONObj k = BSON("" << bigNumString(0xb, 800));
- ASSERT(this->unindex(k));
- }
- virtual int expectedSeparator() const { return 4; }
- };
-
- template<class OnDiskFormat>
- class RebalanceEmptyLeft : public RebalancedSeparatorBase<OnDiskFormat> {
- virtual string treeSpec() const { return "{$a:{$1:null},_:{$11:null,$12:null,$13:null,$14:null,$15:null,$16:null,$17:null,$18:null,$19:null}}"; }
- virtual void modTree() {
- BSONObj k = BSON("" << bigNumString(0x1, 800));
- ASSERT(this->unindex(k));
- }
- virtual int expectedSeparator() const { return 4; }
- };
-
- template<class OnDiskFormat>
- class NoMoveAtLowWaterMarkRight : public MergeSizeJustRightRight<OnDiskFormat> {
- virtual int rightSize() const { return MergeSizeJustRightRight<OnDiskFormat>::rightSize() + 1; }
-
- virtual void initCheck() {
- OperationContextNoop txn;
- _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
- }
-
- virtual void validate() {
- OperationContextNoop txn;
- ASSERT_EQUALS(_oldTop, this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
- }
-
- virtual bool merge() const { return false; }
-
- protected:
- BSONObj _oldTop;
- };
-
- template<class OnDiskFormat>
- class MoveBelowLowWaterMarkRight : public NoMoveAtLowWaterMarkRight<OnDiskFormat> {
- virtual int rightSize() const { return MergeSizeJustRightRight<OnDiskFormat>::rightSize(); }
- virtual int leftSize() const { return MergeSizeJustRightRight<OnDiskFormat>::leftSize() + 1; }
-
- virtual void validate() {
- OperationContextNoop txn;
- // Different top means we rebalanced
- ASSERT_NOT_EQUALS(this->_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
- }
- };
-
- template<class OnDiskFormat>
- class NoMoveAtLowWaterMarkLeft : public MergeSizeJustRightLeft<OnDiskFormat> {
- virtual int leftSize() const { return MergeSizeJustRightLeft<OnDiskFormat>::leftSize() + 1; }
- virtual void initCheck() {
- OperationContextNoop txn;
- this->_oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ } else {
+ // The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
}
-
- virtual void validate() {
- OperationContextNoop txn;
- ASSERT_EQUALS(this->_oldTop,
+ }
+
+protected:
+ virtual int leftAdditional() const {
+ return 2;
+ }
+ virtual int rightAdditional() const {
+ return 2;
+ }
+ virtual void initCheck() {}
+ virtual void validate() {}
+ virtual int leftSize() const = 0;
+ virtual int rightSize() const = 0;
+ virtual const char* delKeys() const {
+ return "klyz";
+ }
+ virtual bool merge() const {
+ return true;
+ }
+
+ static BSONObj bigKey(char a) {
+ return simpleKey(a, 801);
+ }
+
+ static BSONObj biggestKey(char a) {
+ int size = OnDiskFormat::KeyMax - bigSize() + 801;
+ return simpleKey(a, size);
+ }
+
+ static int bigSize() {
+ return typename BtreeLogicTestBase<OnDiskFormat>::KeyDataOwnedType(bigKey('a')).dataSize();
+ }
+
+ static int biggestSize() {
+ return
+ typename BtreeLogicTestBase<OnDiskFormat>::KeyDataOwnedType(biggestKey('a')).dataSize();
+ }
+
+ int _count;
+};
+
+template <class OnDiskFormat>
+class MergeSizeJustRightRight : public MergeSizeTestBase<OnDiskFormat> {
+protected:
+ virtual int rightSize() const {
+ return BtreeLogic<OnDiskFormat>::lowWaterMark() - 1;
+ }
+
+ virtual int leftSize() const {
+ return OnDiskFormat::BucketBodySize - MergeSizeTestBase<OnDiskFormat>::biggestSize() -
+ sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType) -
+ (BtreeLogic<OnDiskFormat>::lowWaterMark() - 1);
+ }
+};
+
+template <class OnDiskFormat>
+class MergeSizeJustRightLeft : public MergeSizeTestBase<OnDiskFormat> {
+protected:
+ virtual int leftSize() const {
+ return BtreeLogic<OnDiskFormat>::lowWaterMark() - 1;
+ }
+
+ virtual int rightSize() const {
+ return OnDiskFormat::BucketBodySize - MergeSizeTestBase<OnDiskFormat>::biggestSize() -
+ sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType) -
+ (BtreeLogic<OnDiskFormat>::lowWaterMark() - 1);
+ }
+
+ virtual const char* delKeys() const {
+ return "yzkl";
+ }
+};
+
+template <class OnDiskFormat>
+class MergeSizeRight : public MergeSizeJustRightRight<OnDiskFormat> {
+ virtual int rightSize() const {
+ return MergeSizeJustRightRight<OnDiskFormat>::rightSize() - 1;
+ }
+ virtual int leftSize() const {
+ return MergeSizeJustRightRight<OnDiskFormat>::leftSize() + 1;
+ }
+};
+
+template <class OnDiskFormat>
+class MergeSizeLeft : public MergeSizeJustRightLeft<OnDiskFormat> {
+ virtual int rightSize() const {
+ return MergeSizeJustRightLeft<OnDiskFormat>::rightSize() + 1;
+ }
+ virtual int leftSize() const {
+ return MergeSizeJustRightLeft<OnDiskFormat>::leftSize() - 1;
+ }
+};
+
+template <class OnDiskFormat>
+class NoMergeBelowMarkRight : public MergeSizeJustRightRight<OnDiskFormat> {
+ virtual int rightSize() const {
+ return MergeSizeJustRightRight<OnDiskFormat>::rightSize() + 1;
+ }
+ virtual int leftSize() const {
+ return MergeSizeJustRightRight<OnDiskFormat>::leftSize() - 1;
+ }
+ virtual bool merge() const {
+ return false;
+ }
+};
+
+template <class OnDiskFormat>
+class NoMergeBelowMarkLeft : public MergeSizeJustRightLeft<OnDiskFormat> {
+ virtual int rightSize() const {
+ return MergeSizeJustRightLeft<OnDiskFormat>::rightSize() - 1;
+ }
+ virtual int leftSize() const {
+ return MergeSizeJustRightLeft<OnDiskFormat>::leftSize() + 1;
+ }
+ virtual bool merge() const {
+ return false;
+ }
+};
+
+template <class OnDiskFormat>
+class MergeSizeRightTooBig : public MergeSizeJustRightLeft<OnDiskFormat> {
+ virtual int rightSize() const {
+ return MergeSizeJustRightLeft<OnDiskFormat>::rightSize() + 1;
+ }
+ virtual bool merge() const {
+ return false;
+ }
+};
+
+template <class OnDiskFormat>
+class MergeSizeLeftTooBig : public MergeSizeJustRightRight<OnDiskFormat> {
+ virtual int leftSize() const {
+ return MergeSizeJustRightRight<OnDiskFormat>::leftSize() + 1;
+ }
+ virtual bool merge() const {
+ return false;
+ }
+};
+
+template <class OnDiskFormat>
+class MergeRightEmpty : public MergeSizeTestBase<OnDiskFormat> {
+protected:
+ virtual int rightAdditional() const {
+ return 1;
+ }
+ virtual int leftAdditional() const {
+ return 1;
+ }
+ virtual const char* delKeys() const {
+ return "lz";
+ }
+ virtual int rightSize() const {
+ return 0;
+ }
+ virtual int leftSize() const {
+ return OnDiskFormat::BucketBodySize - MergeSizeTestBase<OnDiskFormat>::biggestSize() -
+ sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType);
+ }
+};
+
+template <class OnDiskFormat>
+class MergeMinRightEmpty : public MergeSizeTestBase<OnDiskFormat> {
+protected:
+ virtual int rightAdditional() const {
+ return 1;
+ }
+ virtual int leftAdditional() const {
+ return 0;
+ }
+ virtual const char* delKeys() const {
+ return "z";
+ }
+ virtual int rightSize() const {
+ return 0;
+ }
+ virtual int leftSize() const {
+ return MergeSizeTestBase<OnDiskFormat>::bigSize() +
+ sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType);
+ }
+};
+
+template <class OnDiskFormat>
+class MergeLeftEmpty : public MergeSizeTestBase<OnDiskFormat> {
+protected:
+ virtual int rightAdditional() const {
+ return 1;
+ }
+ virtual int leftAdditional() const {
+ return 1;
+ }
+ virtual const char* delKeys() const {
+ return "zl";
+ }
+ virtual int leftSize() const {
+ return 0;
+ }
+ virtual int rightSize() const {
+ return OnDiskFormat::BucketBodySize - MergeSizeTestBase<OnDiskFormat>::biggestSize() -
+ sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType);
+ }
+};
+
+template <class OnDiskFormat>
+class MergeMinLeftEmpty : public MergeSizeTestBase<OnDiskFormat> {
+protected:
+ virtual int leftAdditional() const {
+ return 1;
+ }
+ virtual int rightAdditional() const {
+ return 0;
+ }
+ virtual const char* delKeys() const {
+ return "l";
+ }
+ virtual int leftSize() const {
+ return 0;
+ }
+ virtual int rightSize() const {
+ return MergeSizeTestBase<OnDiskFormat>::bigSize() +
+ sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType);
+ }
+};
+
+template <class OnDiskFormat>
+class BalanceRightEmpty : public MergeRightEmpty<OnDiskFormat> {
+protected:
+ virtual int leftSize() const {
+ return OnDiskFormat::BucketBodySize - MergeSizeTestBase<OnDiskFormat>::biggestSize() -
+ sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType) + 1;
+ }
+
+ virtual bool merge() const {
+ return false;
+ }
+
+ virtual void initCheck() {
+ OperationContextNoop txn;
+ _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ }
+
+ virtual void validate() {
+ OperationContextNoop txn;
+ ASSERT_NOT_EQUALS(_oldTop,
this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
- }
- virtual bool merge() const { return false; }
+ }
+
+private:
+ BSONObj _oldTop;
+};
+
+template <class OnDiskFormat>
+class BalanceLeftEmpty : public MergeLeftEmpty<OnDiskFormat> {
+protected:
+ virtual int rightSize() const {
+ return OnDiskFormat::BucketBodySize - MergeSizeTestBase<OnDiskFormat>::biggestSize() -
+ sizeof(typename BtreeLogicTestBase<OnDiskFormat>::FixedWidthKeyType) + 1;
+ }
+
+ virtual bool merge() const {
+ return false;
+ }
+
+ virtual void initCheck() {
+ OperationContextNoop txn;
+ _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ }
- protected:
- BSONObj _oldTop;
- };
+ virtual void validate() {
+ OperationContextNoop txn;
+ ASSERT_TRUE(_oldTop !=
+ this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ }
+
+private:
+ BSONObj _oldTop;
+};
+
+template <class OnDiskFormat>
+class BalanceOneLeftToRight : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+
+ builder.makeTree(
+ "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
+ "b:{$20:null,$30:null,$40:null,$50:null,a:null},"
+ "_:{c:null}}");
+
+ ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+
+ const BSONObj k = BSON("" << bigNumString(0x40, 800));
+ ASSERT(this->unindex(k));
+
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+
+ builder.checkStructure(
+ "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},"
+ "b:{$10:null,$20:null,$30:null,$50:null,a:null},"
+ "_:{c:null}}");
+ }
+};
+
+template <class OnDiskFormat>
+class BalanceOneRightToLeft : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+
+ builder.makeTree(
+ "{$10:{$1:null,$2:null,$3:null,$4:null},"
+ "b:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},"
+ "_:{c:null}}");
+
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+
+ const BSONObj k = BSON("" << bigNumString(0x3, 800));
+ ASSERT(this->unindex(k));
+
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+
+ builder.checkStructure(
+ "{$20:{$1:null,$2:null,$4:null,$10:null},"
+ "b:{$30:null,$40:null,$50:null,$60:null,$70:null},"
+ "_:{c:null}}");
+ }
+};
+
+template <class OnDiskFormat>
+class BalanceThreeLeftToRight : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+
+ builder.makeTree(
+ "{$20:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},"
+ "$9:{$8:null},$11:{$10:null},$13:{$12:null},_:{$14:null}},"
+ "b:{$30:null,$40:{$35:null},$50:{$45:null}},"
+ "_:{c:null}}");
+
+ ASSERT_EQUALS(23, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+
+ // The tree has 14 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(15, this->_helper.recordStore.numRecords(NULL));
+
+ const BSONObj k = BSON("" << bigNumString(0x30, 800));
+ ASSERT(this->unindex(k));
- template<class OnDiskFormat>
- class MoveBelowLowWaterMarkLeft : public NoMoveAtLowWaterMarkLeft<OnDiskFormat> {
- virtual int leftSize() const { return MergeSizeJustRightLeft<OnDiskFormat>::leftSize(); }
- virtual int rightSize() const { return MergeSizeJustRightLeft<OnDiskFormat>::rightSize() + 1; }
+ ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+
+ // The tree has 14 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(15, this->_helper.recordStore.numRecords(NULL));
- virtual void validate() {
- OperationContextNoop txn;
- // Different top means we rebalanced
- ASSERT_NOT_EQUALS(this->_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
- }
- };
+ builder.checkStructure(
+ "{$9:{$1:{$0:null},$3:{$2:null},"
+ "$5:{$4:null},$7:{$6:null},_:{$8:null}},"
+ "b:{$11:{$10:null},$13:{$12:null},$20:{$14:null},"
+ "$40:{$35:null},$50:{$45:null}},"
+ "_:{c:null}}");
+ }
+};
+
+template <class OnDiskFormat>
+class BalanceThreeRightToLeft : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- template<class OnDiskFormat>
- class PreferBalanceLeft : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ builder.makeTree(
+ "{$20:{$1:{$0:null},$3:{$2:null},$5:null,_:{$14:null}},"
+ "b:{$30:{$25:null},$40:{$35:null},$50:{$45:null},$60:{$55:null},"
+ "$70:{$65:null},$80:{$75:null},"
+ "$90:{$85:null},$100:{$95:null}},"
+ "_:{c:null}}");
- builder.makeTree("{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
- "$20:{$11:null,$12:null,$13:null,$14:null},"
- "_:{$30:null}}");
+ ASSERT_EQUALS(25, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ // The tree has 15 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(16, this->_helper.recordStore.numRecords(NULL));
+
+ const BSONObj k = BSON("" << bigNumString(0x5, 800));
+ ASSERT(this->unindex(k));
+
+ ASSERT_EQUALS(24, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+
+ // The tree has 15 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(16, this->_helper.recordStore.numRecords(NULL));
+
+ builder.checkStructure(
+ "{$50:{$1:{$0:null},$3:{$2:null},$20:{$14:null},"
+ "$30:{$25:null},$40:{$35:null},_:{$45:null}},"
+ "b:{$60:{$55:null},$70:{$65:null},$80:{$75:null},"
+ "$90:{$85:null},$100:{$95:null}},"
+ "_:{c:null}}");
+ }
+};
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+template <class OnDiskFormat>
+class BalanceSingleParentKey : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- const BSONObj k = BSON("" << bigNumString(0x12, 800));
- ASSERT(this->unindex(k));
+ builder.makeTree(
+ "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
+ "_:{$20:null,$30:null,$40:null,$50:null,a:null}}");
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+ // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
- builder.checkStructure("{$5:{$1:null,$2:null,$3:null,$4:null},"
- "$20:{$6:null,$10:null,$11:null,$13:null,$14:null},"
- "_:{$30:null}}");
- }
- };
+ const BSONObj k = BSON("" << bigNumString(0x40, 800));
+ ASSERT(this->unindex(k));
- template<class OnDiskFormat>
- class PreferBalanceRight : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+
+ // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
+
+ builder.checkStructure(
+ "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},"
+ "_:{$10:null,$20:null,$30:null,$50:null,a:null}}");
+ }
+};
+
+template <class OnDiskFormat>
+class PackEmptyBucket : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- builder.makeTree("{$10:{$1:null},"
- "$20:{$11:null,$12:null,$13:null,$14:null},"
- "_:{$31:null,$32:null,$33:null,$34:null,$35:null,$36:null}}");
+ builder.makeTree("{a:null}");
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ const BSONObj k = BSON(""
+ << "a");
+ ASSERT(this->unindex(k));
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+ this->forcePackBucket(this->_helper.headManager.getHead(&txn));
- const BSONObj k = BSON("" << bigNumString(0x12, 800));
- ASSERT(this->unindex(k));
+ typename BtreeLogicTestBase<OnDiskFormat>::BucketType* headBucket = this->head();
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ ASSERT_EQUALS(0, headBucket->n);
+ ASSERT_FALSE(headBucket->flags & Packed);
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+ int unused = 0;
+ this->truncateBucket(headBucket, 0, unused);
+
+ ASSERT_EQUALS(0, headBucket->n);
+ ASSERT_EQUALS(0, headBucket->topSize);
+ ASSERT_EQUALS((int)OnDiskFormat::BucketBodySize, headBucket->emptySize);
+ ASSERT_TRUE(headBucket->flags & Packed);
+ }
+};
+
+template <class OnDiskFormat>
+class PackedDataSizeEmptyBucket : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- builder.checkStructure("{$10:{$1:null},"
- "$31:{$11:null,$13:null,$14:null,$20:null},"
- "_:{$32:null,$33:null,$34:null,$35:null,$36:null}}");
- }
- };
+ builder.makeTree("{a:null}");
- template<class OnDiskFormat>
- class RecursiveMergeThenBalance : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ const BSONObj k = BSON(""
+ << "a");
+ ASSERT(this->unindex(k));
- builder.makeTree("{$10:{$5:{$1:null,$2:null},$8:{$6:null,$7:null}},"
- "_:{$20:null,$30:null,$40:null,$50:null,"
- "$60:null,$70:null,$80:null,$90:null}}");
+ this->forcePackBucket(this->_helper.headManager.getHead(&txn));
- ASSERT_EQUALS(15, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ typename BtreeLogicTestBase<OnDiskFormat>::BucketType* headBucket = this->head();
- // The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, headBucket->n);
+ ASSERT_FALSE(headBucket->flags & Packed);
+ ASSERT_EQUALS(0, this->bucketPackedDataSize(headBucket, 0));
+ ASSERT_FALSE(headBucket->flags & Packed);
+ }
+};
+
+template <class OnDiskFormat>
+class BalanceSingleParentKeyPackParent : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+
+ builder.makeTree(
+ "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
+ "_:{$20:null,$30:null,$40:null,$50:null,a:null}}");
+
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+
+ // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
+
+ // force parent pack
+ this->forcePackBucket(this->_helper.headManager.getHead(&txn));
+
+ const BSONObj k = BSON("" << bigNumString(0x40, 800));
+ ASSERT(this->unindex(k));
+
+ ASSERT_EQUALS(11, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+
+ // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
+
+ builder.checkStructure(
+ "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},"
+ "_:{$10:null,$20:null,$30:null,$50:null,a:null}}");
+ }
+};
+
+template <class OnDiskFormat>
+class BalanceSplitParent : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+
+ builder.makeTree(
+ "{$10$10:{$1:null,$2:null,$3:null,$4:null},"
+ "$100:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null},"
+ "$200:null,$300:null,$400:null,$500:null,$600:null,"
+ "$700:null,$800:null,$900:null,_:{c:null}}");
+
+ ASSERT_EQUALS(22, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+
+ const BSONObj k = BSON("" << bigNumString(0x3, 800));
+ ASSERT(this->unindex(k));
+
+ ASSERT_EQUALS(21, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+
+ // The tree has 6 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(7, this->_helper.recordStore.numRecords(NULL));
+
+ builder.checkStructure(
+ "{$500:{ $30:{$1:null,$2:null,$4:null,$10$10:null,$20:null},"
+ "$100:{$40:null,$50:null,$60:null,$70:null,$80:null},"
+ "$200:null,$300:null,$400:null},"
+ "_:{$600:null,$700:null,$800:null,$900:null,_:{c:null}}}");
+ }
+};
+
+template <class OnDiskFormat>
+class RebalancedSeparatorBase : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+
+ builder.makeTree(treeSpec());
+ modTree();
+
+ ASSERT_EQUALS(
+ expectedSeparator(),
+ this->bucketRebalancedSeparatorPos(this->_helper.headManager.getHead(&txn), 0));
+ }
+
+ virtual string treeSpec() const = 0;
+ virtual int expectedSeparator() const = 0;
+ virtual void modTree() {}
+};
+
+template <class OnDiskFormat>
+class EvenRebalanceLeft : public RebalancedSeparatorBase<OnDiskFormat> {
+ virtual string treeSpec() const {
+ return "{$7:{$1:null,$2$31f:null,$3:null,"
+ "$4$31f:null,$5:null,$6:null},"
+ "_:{$8:null,$9:null,$10$31e:null}}";
+ }
+ virtual int expectedSeparator() const {
+ return 4;
+ }
+};
+
+template <class OnDiskFormat>
+class EvenRebalanceLeftCusp : public RebalancedSeparatorBase<OnDiskFormat> {
+ virtual string treeSpec() const {
+ return "{$6:{$1:null,$2$31f:null,$3:null,$4$31f:null,$5:null},"
+ "_:{$7:null,$8:null,$9$31e:null,$10:null}}";
+ }
+ virtual int expectedSeparator() const {
+ return 4;
+ }
+};
+
+template <class OnDiskFormat>
+class EvenRebalanceRight : public RebalancedSeparatorBase<OnDiskFormat> {
+ virtual string treeSpec() const {
+ return "{$3:{$1:null,$2$31f:null},_:{$4$31f:null,$5:null,$6:null,$7:null,$8$31e:null,$9:"
+ "null,$10:null}}";
+ }
+ virtual int expectedSeparator() const {
+ return 4;
+ }
+};
+
+template <class OnDiskFormat>
+class EvenRebalanceRightCusp : public RebalancedSeparatorBase<OnDiskFormat> {
+ virtual string treeSpec() const {
+ return "{$4$31f:{$1:null,$2$31f:null,$3:null},_:{$5:null,$6:null,$7$31e:null,$8:null,$9:"
+ "null,$10:null}}";
+ }
+ virtual int expectedSeparator() const {
+ return 4;
+ }
+};
+
+template <class OnDiskFormat>
+class EvenRebalanceCenter : public RebalancedSeparatorBase<OnDiskFormat> {
+ virtual string treeSpec() const {
+ return "{$5:{$1:null,$2$31f:null,$3:null,$4$31f:null},_:{$6:null,$7$31e:null,$8:null,$9:"
+ "null,$10:null}}";
+ }
+ virtual int expectedSeparator() const {
+ return 4;
+ }
+};
+
+template <class OnDiskFormat>
+class OddRebalanceLeft : public RebalancedSeparatorBase<OnDiskFormat> {
+ virtual string treeSpec() const {
+ return "{$6$31f:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$7:null,$8:null,$9:null,$10:"
+ "null}}";
+ }
+ virtual int expectedSeparator() const {
+ return 4;
+ }
+};
+
+template <class OnDiskFormat>
+class OddRebalanceRight : public RebalancedSeparatorBase<OnDiskFormat> {
+ virtual string treeSpec() const {
+ return "{$4:{$1:null,$2:null,$3:null},_:{$5:null,$6:null,$7:null,$8$31f:null,$9:null,$10:"
+ "null}}";
+ }
+ virtual int expectedSeparator() const {
+ return 4;
+ }
+};
+
+template <class OnDiskFormat>
+class OddRebalanceCenter : public RebalancedSeparatorBase<OnDiskFormat> {
+ virtual string treeSpec() const {
+ return "{$5:{$1:null,$2:null,$3:null,$4:null},_:{$6:null,$7:null,$8:null,$9:null,$10$31f:"
+ "null}}";
+ }
+ virtual int expectedSeparator() const {
+ return 4;
+ }
+};
+
+template <class OnDiskFormat>
+class RebalanceEmptyRight : public RebalancedSeparatorBase<OnDiskFormat> {
+ virtual string treeSpec() const {
+ return "{$a:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null,$7:null,$8:null,$9:null},_:{$"
+ "b:null}}";
+ }
+ virtual void modTree() {
+ BSONObj k = BSON("" << bigNumString(0xb, 800));
+ ASSERT(this->unindex(k));
+ }
+ virtual int expectedSeparator() const {
+ return 4;
+ }
+};
+
+template <class OnDiskFormat>
+class RebalanceEmptyLeft : public RebalancedSeparatorBase<OnDiskFormat> {
+ virtual string treeSpec() const {
+ return "{$a:{$1:null},_:{$11:null,$12:null,$13:null,$14:null,$15:null,$16:null,$17:null,$"
+ "18:null,$19:null}}";
+ }
+ virtual void modTree() {
+ BSONObj k = BSON("" << bigNumString(0x1, 800));
+ ASSERT(this->unindex(k));
+ }
+ virtual int expectedSeparator() const {
+ return 4;
+ }
+};
+
+template <class OnDiskFormat>
+class NoMoveAtLowWaterMarkRight : public MergeSizeJustRightRight<OnDiskFormat> {
+ virtual int rightSize() const {
+ return MergeSizeJustRightRight<OnDiskFormat>::rightSize() + 1;
+ }
+
+ virtual void initCheck() {
+ OperationContextNoop txn;
+ _oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ }
+
+ virtual void validate() {
+ OperationContextNoop txn;
+ ASSERT_EQUALS(_oldTop,
+ this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ }
+
+ virtual bool merge() const {
+ return false;
+ }
+
+protected:
+ BSONObj _oldTop;
+};
+
+template <class OnDiskFormat>
+class MoveBelowLowWaterMarkRight : public NoMoveAtLowWaterMarkRight<OnDiskFormat> {
+ virtual int rightSize() const {
+ return MergeSizeJustRightRight<OnDiskFormat>::rightSize();
+ }
+ virtual int leftSize() const {
+ return MergeSizeJustRightRight<OnDiskFormat>::leftSize() + 1;
+ }
+
+ virtual void validate() {
+ OperationContextNoop txn;
+ // Different top means we rebalanced
+ ASSERT_NOT_EQUALS(this->_oldTop,
+ this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ }
+};
+
+template <class OnDiskFormat>
+class NoMoveAtLowWaterMarkLeft : public MergeSizeJustRightLeft<OnDiskFormat> {
+ virtual int leftSize() const {
+ return MergeSizeJustRightLeft<OnDiskFormat>::leftSize() + 1;
+ }
+ virtual void initCheck() {
+ OperationContextNoop txn;
+ this->_oldTop = this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson();
+ }
+
+ virtual void validate() {
+ OperationContextNoop txn;
+ ASSERT_EQUALS(this->_oldTop,
+ this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ }
+ virtual bool merge() const {
+ return false;
+ }
+
+protected:
+ BSONObj _oldTop;
+};
+
+template <class OnDiskFormat>
+class MoveBelowLowWaterMarkLeft : public NoMoveAtLowWaterMarkLeft<OnDiskFormat> {
+ virtual int leftSize() const {
+ return MergeSizeJustRightLeft<OnDiskFormat>::leftSize();
+ }
+ virtual int rightSize() const {
+ return MergeSizeJustRightLeft<OnDiskFormat>::rightSize() + 1;
+ }
+
+ virtual void validate() {
+ OperationContextNoop txn;
+ // Different top means we rebalanced
+ ASSERT_NOT_EQUALS(this->_oldTop,
+ this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ }
+};
- const BSONObj k = BSON("" << bigNumString(0x7, 800));
- ASSERT(this->unindex(k));
+template <class OnDiskFormat>
+class PreferBalanceLeft : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ builder.makeTree(
+ "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},"
+ "$20:{$11:null,$12:null,$13:null,$14:null},"
+ "_:{$30:null}}");
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- builder.checkStructure(
- "{$40:{$8:{$1:null,$2:null,$5:null,$6:null},$10:null,$20:null,$30:null},"
- "_:{$50:null,$60:null,$70:null,$80:null,$90:null}}");
- }
- };
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
- template<class OnDiskFormat>
- class DelEmptyNoNeighbors : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ const BSONObj k = BSON("" << bigNumString(0x12, 800));
+ ASSERT(this->unindex(k));
- builder.makeTree("{b:{a:null}}");
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
- // The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
+ builder.checkStructure(
+ "{$5:{$1:null,$2:null,$3:null,$4:null},"
+ "$20:{$6:null,$10:null,$11:null,$13:null,$14:null},"
+ "_:{$30:null}}");
+ }
+};
- const BSONObj k = BSON("" << "a");
- ASSERT(this->unindex(k));
+template <class OnDiskFormat>
+class PreferBalanceRight : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ builder.makeTree(
+ "{$10:{$1:null},"
+ "$20:{$11:null,$12:null,$13:null,$14:null},"
+ "_:{$31:null,$32:null,$33:null,$34:null,$35:null,$36:null}}");
- // The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- builder.checkStructure("{b:null}");
- }
- };
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
- template<class OnDiskFormat>
- class DelEmptyEmptyNeighbors : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ const BSONObj k = BSON("" << bigNumString(0x12, 800));
+ ASSERT(this->unindex(k));
- builder.makeTree("{a:null,c:{b:null},d:null}");
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
- // The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
+ builder.checkStructure(
+ "{$10:{$1:null},"
+ "$31:{$11:null,$13:null,$14:null,$20:null},"
+ "_:{$32:null,$33:null,$34:null,$35:null,$36:null}}");
+ }
+};
- const BSONObj k = BSON("" << "b");
- ASSERT(this->unindex(k));
+template <class OnDiskFormat>
+class RecursiveMergeThenBalance : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
+ builder.makeTree(
+ "{$10:{$5:{$1:null,$2:null},$8:{$6:null,$7:null}},"
+ "_:{$20:null,$30:null,$40:null,$50:null,"
+ "$60:null,$70:null,$80:null,$90:null}}");
- // The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(15, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- builder.checkStructure("{a:null,c:null,d:null}");
- }
- };
+ // The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
- template<class OnDiskFormat>
- class DelInternal : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ const BSONObj k = BSON("" << bigNumString(0x7, 800));
+ ASSERT(this->unindex(k));
- builder.makeTree("{a:null,c:{b:null},d:null}");
+ ASSERT_EQUALS(14, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- long long unused = 0;
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
- // The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
+ builder.checkStructure(
+ "{$40:{$8:{$1:null,$2:null,$5:null,$6:null},$10:null,$20:null,$30:null},"
+ "_:{$50:null,$60:null,$70:null,$80:null,$90:null}}");
+ }
+};
- const BSONObj k = BSON("" << "c");
- ASSERT(this->unindex(k));
+template <class OnDiskFormat>
+class DelEmptyNoNeighbors : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ builder.makeTree("{b:{a:null}}");
- // The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- builder.checkStructure("{a:null,b:null,d:null}");
- }
- };
+ // The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
- template<class OnDiskFormat>
- class DelInternalReplaceWithUnused : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ const BSONObj k = BSON(""
+ << "a");
+ ASSERT(this->unindex(k));
- builder.makeTree("{a:null,c:{b:null},d:null}");
+ ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- const DiskLoc prevChildBucket =
- this->getKey(this->_helper.headManager.getHead(&txn), 1).prevChildBucket;
- this->markKeyUnused(prevChildBucket, 0);
+ // The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
- long long unused = 0;
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ builder.checkStructure("{b:null}");
+ }
+};
- // The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(1, unused);
+template <class OnDiskFormat>
+class DelEmptyEmptyNeighbors : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- const BSONObj k = BSON("" << "c");
- ASSERT(this->unindex(k));
+ builder.makeTree("{a:null,c:{b:null},d:null}");
- unused = 0;
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- // The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(1, unused);
+ // The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
- // doesn't discriminate between used and unused
- builder.checkStructure("{a:null,b:null,d:null}");
- }
- };
+ const BSONObj k = BSON(""
+ << "b");
+ ASSERT(this->unindex(k));
- template<class OnDiskFormat>
- class DelInternalReplaceRight : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, NULL, true, false, 0));
- builder.makeTree("{a:null,_:{b:null}}");
+ // The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
- long long unused = 0;
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ builder.checkStructure("{a:null,c:null,d:null}");
+ }
+};
- // The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
+template <class OnDiskFormat>
+class DelInternal : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- const BSONObj k = BSON("" << "a");
- ASSERT(this->unindex(k));
+ builder.makeTree("{a:null,c:{b:null},d:null}");
- unused = 0;
- ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ long long unused = 0;
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- // The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
+ // The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
- builder.checkStructure("{b:null}");
- }
- };
+ const BSONObj k = BSON(""
+ << "c");
+ ASSERT(this->unindex(k));
- template<class OnDiskFormat>
- class DelInternalPromoteKey : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- builder.makeTree("{a:null,y:{d:{c:{b:null}},_:{e:null}},z:null}");
+ // The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
- long long unused = 0;
- ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ builder.checkStructure("{a:null,b:null,d:null}");
+ }
+};
- // The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
+template <class OnDiskFormat>
+class DelInternalReplaceWithUnused : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- const BSONObj k = BSON("" << "y");
- ASSERT(this->unindex(k));
+ builder.makeTree("{a:null,c:{b:null},d:null}");
- unused = 0;
- ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ const DiskLoc prevChildBucket =
+ this->getKey(this->_helper.headManager.getHead(&txn), 1).prevChildBucket;
+ this->markKeyUnused(prevChildBucket, 0);
- // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
+ long long unused = 0;
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- builder.checkStructure("{a:null,e:{c:{b:null},d:null},z:null}");
- }
- };
+ // The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(1, unused);
- template<class OnDiskFormat>
- class DelInternalPromoteRightKey : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ const BSONObj k = BSON(""
+ << "c");
+ ASSERT(this->unindex(k));
- builder.makeTree("{a:null,_:{e:{c:null},_:{f:null}}}");
+ unused = 0;
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- long long unused = 0;
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ // The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(1, unused);
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
+ // doesn't discriminate between used and unused
+ builder.checkStructure("{a:null,b:null,d:null}");
+ }
+};
- const BSONObj k = BSON("" << "a");
- ASSERT(this->unindex(k));
+template <class OnDiskFormat>
+class DelInternalReplaceRight : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- unused = 0;
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ builder.makeTree("{a:null,_:{b:null}}");
- // The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
+ long long unused = 0;
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- builder.checkStructure("{c:null,_:{e:null,f:null}}");
- }
- };
+ // The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
- template<class OnDiskFormat>
- class DelInternalReplacementPrevNonNull : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ const BSONObj k = BSON(""
+ << "a");
+ ASSERT(this->unindex(k));
- builder.makeTree("{a:null,d:{c:{b:null}},e:null}");
+ unused = 0;
+ ASSERT_EQUALS(1, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- long long unused = 0;
- ASSERT_EQUALS(5, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ // The tree has 1 bucket + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
- // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
+ builder.checkStructure("{b:null}");
+ }
+};
- const BSONObj k = BSON("" << "d");
- ASSERT(this->unindex(k));
+template <class OnDiskFormat>
+class DelInternalPromoteKey : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ builder.makeTree("{a:null,y:{d:{c:{b:null}},_:{e:null}},z:null}");
- // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(1, unused);
+ long long unused = 0;
+ ASSERT_EQUALS(7, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- builder.checkStructure("{a:null,d:{c:{b:null}},e:null}");
+ // The tree has 5 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(6, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
- // Check 'unused' key
- ASSERT(this->getKey(this->_helper.headManager.getHead(&txn), 1).recordLoc.getOfs() & 1);
- }
- };
+ const BSONObj k = BSON(""
+ << "y");
+ ASSERT(this->unindex(k));
- template<class OnDiskFormat>
- class DelInternalReplacementNextNonNull : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ unused = 0;
+ ASSERT_EQUALS(6, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- builder.makeTree("{a:null,_:{c:null,_:{d:null}}}");
+ // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
- long long unused = 0;
- ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ builder.checkStructure("{a:null,e:{c:{b:null},d:null},z:null}");
+ }
+};
- // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
-
- const BSONObj k = BSON("" << "a");
- ASSERT(this->unindex(k));
-
- ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
-
- // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(1, unused);
-
- builder.checkStructure("{a:null,_:{c:null,_:{d:null}}}");
+template <class OnDiskFormat>
+class DelInternalPromoteRightKey : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- // Check 'unused' key
- ASSERT(this->getKey(this->_helper.headManager.getHead(&txn), 0).recordLoc.getOfs() & 1);
- }
- };
+ builder.makeTree("{a:null,_:{e:{c:null},_:{f:null}}}");
- template<class OnDiskFormat>
- class DelInternalSplitPromoteLeft : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ long long unused = 0;
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- builder.makeTree("{$10:null,$20:null,$30$10:{$25:{$23:null},_:{$27:null}},"
- "$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100:null}");
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
- long long unused = 0;
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ const BSONObj k = BSON(""
+ << "a");
+ ASSERT(this->unindex(k));
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
-
- const BSONObj k = BSON("" << bigNumString(0x30, 0x10));
- ASSERT(this->unindex(k));
-
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
-
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
+ unused = 0;
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- builder.checkStructure("{$60:{$10:null,$20:null,"
- "$27:{$23:null,$25:null},$40:null,$50:null},"
- "_:{$70:null,$80:null,$90:null,$100:null}}");
- }
- };
+ // The tree has 2 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
- template<class OnDiskFormat>
- class DelInternalSplitPromoteRight : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ builder.checkStructure("{c:null,_:{e:null,f:null}}");
+ }
+};
- builder.makeTree("{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,"
- "$80:null,$90:null,$100$10:{$95:{$93:null},_:{$97:null}}}");
+template <class OnDiskFormat>
+class DelInternalReplacementPrevNonNull : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
- long long unused = 0;
- ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ builder.makeTree("{a:null,d:{c:{b:null}},e:null}");
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
+ long long unused = 0;
+ ASSERT_EQUALS(5, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- const BSONObj k = BSON("" << bigNumString(0x100, 0x10));
- ASSERT(this->unindex(k));
+ // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
- ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+ const BSONObj k = BSON(""
+ << "d");
+ ASSERT(this->unindex(k));
- // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
- ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
- ASSERT_EQUALS(0, unused);
+ ASSERT_EQUALS(4, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- builder.checkStructure(
- "{$80:{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},"
- "_:{$90:null,$97:{$93:null,$95:null}}}");
- }
- };
+ // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(1, unused);
- template<class OnDiskFormat>
- class LocateEmptyForward : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
-
- BSONObj key1 = simpleKey('a');
- this->insert(key1, this->_helper.dummyDiskLoc);
- BSONObj key2 = simpleKey('b');
- this->insert(key2, this->_helper.dummyDiskLoc);
- BSONObj key3 = simpleKey('c');
- this->insert(key3, this->_helper.dummyDiskLoc);
-
- this->checkValidNumKeys(3);
- this->locate(BSONObj(), 0, false, this->_helper.headManager.getHead(&txn), 1);
- }
- };
+ builder.checkStructure("{a:null,d:{c:{b:null}},e:null}");
- template<class OnDiskFormat>
- class LocateEmptyReverse : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
-
- BSONObj key1 = simpleKey('a');
- this->insert(key1, this->_helper.dummyDiskLoc);
- BSONObj key2 = simpleKey('b');
- this->insert(key2, this->_helper.dummyDiskLoc);
- BSONObj key3 = simpleKey('c');
- this->insert(key3, this->_helper.dummyDiskLoc);
-
- this->checkValidNumKeys(3);
- this->locate(BSONObj(), -1, false, DiskLoc(), -1);
- }
- };
+ // Check 'unused' key
+ ASSERT(this->getKey(this->_helper.headManager.getHead(&txn), 1).recordLoc.getOfs() & 1);
+ }
+};
- template<class OnDiskFormat>
- class DuplicateKeys : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- OperationContextNoop txn;
- this->_helper.btree.initAsEmpty(&txn);
-
- BSONObj key1 = simpleKey('z');
- ASSERT_OK(this->insert(key1, this->_helper.dummyDiskLoc, true));
- this->checkValidNumKeys(1);
- this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
-
- // Attempt to insert a dup key/value.
- ASSERT_EQUALS(ErrorCodes::DuplicateKeyValue,
- this->insert(key1, this->_helper.dummyDiskLoc, true));
- this->checkValidNumKeys(1);
- this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
-
- // Attempt to insert a dup key/value with dupsAllowed=false.
- ASSERT_EQUALS(ErrorCodes::DuplicateKeyValue,
- this->insert(key1, this->_helper.dummyDiskLoc, false));
- this->checkValidNumKeys(1);
- this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
-
- // Add another record to produce another diskloc.
- StatusWith<RecordId> s = this->_helper.recordStore.insertRecord(&txn, "a", 1, false);
-
- ASSERT_TRUE(s.isOK());
- ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
-
- const DiskLoc dummyDiskLoc2 = DiskLoc::fromRecordId(s.getValue());
-
- // Attempt to insert a dup key but this time with a different value.
- ASSERT_EQUALS(ErrorCodes::DuplicateKey, this->insert(key1, dummyDiskLoc2, false));
- this->checkValidNumKeys(1);
-
- // Insert a dup key with dupsAllowed=true, should succeed.
- ASSERT_OK(this->insert(key1, dummyDiskLoc2, true));
- this->checkValidNumKeys(2);
-
- // Clean up.
- this->_helper.recordStore.deleteRecord(&txn, s.getValue());
- ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
- }
- };
+template <class OnDiskFormat>
+class DelInternalReplacementNextNonNull : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+ builder.makeTree("{a:null,_:{c:null,_:{d:null}}}");
- /* This test requires the entire server to be linked-in and it is better implemented using
- the JS framework. Disabling here and will put in jsCore.
+ long long unused = 0;
+ ASSERT_EQUALS(3, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
- template<class OnDiskFormat>
- class SignedZeroDuplication : public BtreeLogicTestBase<OnDiskFormat> {
- public:
- void run() {
- ASSERT_EQUALS(0.0, -0.0);
- DBDirectClient c;
-
- static const string ns("unittests.SignedZeroDuplication");
+ // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
- c.ensureIndex(ns, BSON("b" << 1), true);
- c.insert(ns, BSON("b" << 0.0));
- c.insert(ns, BSON("b" << 1.0));
- c.update(ns, BSON("b" << 1.0), BSON("b" << -0.0));
+ const BSONObj k = BSON(""
+ << "a");
+ ASSERT(this->unindex(k));
- ASSERT_EQUALS(1U, c.count(ns, BSON("b" << 0.0)));
- }
- };
- */
+ ASSERT_EQUALS(2, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+
+ // The tree has 3 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(4, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(1, unused);
+
+ builder.checkStructure("{a:null,_:{c:null,_:{d:null}}}");
+
+ // Check 'unused' key
+ ASSERT(this->getKey(this->_helper.headManager.getHead(&txn), 0).recordLoc.getOfs() & 1);
+ }
+};
+
+template <class OnDiskFormat>
+class DelInternalSplitPromoteLeft : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+
+ builder.makeTree(
+ "{$10:null,$20:null,$30$10:{$25:{$23:null},_:{$27:null}},"
+ "$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100:null}");
+
+ long long unused = 0;
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
+
+ const BSONObj k = BSON("" << bigNumString(0x30, 0x10));
+ ASSERT(this->unindex(k));
+
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
+
+ builder.checkStructure(
+ "{$60:{$10:null,$20:null,"
+ "$27:{$23:null,$25:null},$40:null,$50:null},"
+ "_:{$70:null,$80:null,$90:null,$100:null}}");
+ }
+};
+
+template <class OnDiskFormat>
+class DelInternalSplitPromoteRight : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ ArtificialTreeBuilder<OnDiskFormat> builder(&txn, &this->_helper);
+
+ builder.makeTree(
+ "{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,"
+ "$80:null,$90:null,$100$10:{$95:{$93:null},_:{$97:null}}}");
+
+ long long unused = 0;
+ ASSERT_EQUALS(13, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
+
+ const BSONObj k = BSON("" << bigNumString(0x100, 0x10));
+ ASSERT(this->unindex(k));
+
+ ASSERT_EQUALS(12, this->_helper.btree.fullValidate(&txn, &unused, true, false, 0));
+
+ // The tree has 4 buckets + 1 for the this->_helper.dummyDiskLoc
+ ASSERT_EQUALS(5, this->_helper.recordStore.numRecords(NULL));
+ ASSERT_EQUALS(0, unused);
+
+ builder.checkStructure(
+ "{$80:{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},"
+ "_:{$90:null,$97:{$93:null,$95:null}}}");
+ }
+};
+
+template <class OnDiskFormat>
+class LocateEmptyForward : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ BSONObj key1 = simpleKey('a');
+ this->insert(key1, this->_helper.dummyDiskLoc);
+ BSONObj key2 = simpleKey('b');
+ this->insert(key2, this->_helper.dummyDiskLoc);
+ BSONObj key3 = simpleKey('c');
+ this->insert(key3, this->_helper.dummyDiskLoc);
+
+ this->checkValidNumKeys(3);
+ this->locate(BSONObj(), 0, false, this->_helper.headManager.getHead(&txn), 1);
+ }
+};
+
+template <class OnDiskFormat>
+class LocateEmptyReverse : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ BSONObj key1 = simpleKey('a');
+ this->insert(key1, this->_helper.dummyDiskLoc);
+ BSONObj key2 = simpleKey('b');
+ this->insert(key2, this->_helper.dummyDiskLoc);
+ BSONObj key3 = simpleKey('c');
+ this->insert(key3, this->_helper.dummyDiskLoc);
+
+ this->checkValidNumKeys(3);
+ this->locate(BSONObj(), -1, false, DiskLoc(), -1);
+ }
+};
+
+template <class OnDiskFormat>
+class DuplicateKeys : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ BSONObj key1 = simpleKey('z');
+ ASSERT_OK(this->insert(key1, this->_helper.dummyDiskLoc, true));
+ this->checkValidNumKeys(1);
+ this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
+
+ // Attempt to insert a dup key/value.
+ ASSERT_EQUALS(ErrorCodes::DuplicateKeyValue,
+ this->insert(key1, this->_helper.dummyDiskLoc, true));
+ this->checkValidNumKeys(1);
+ this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
+
+ // Attempt to insert a dup key/value with dupsAllowed=false.
+ ASSERT_EQUALS(ErrorCodes::DuplicateKeyValue,
+ this->insert(key1, this->_helper.dummyDiskLoc, false));
+ this->checkValidNumKeys(1);
+ this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
+
+ // Add another record to produce another diskloc.
+ StatusWith<RecordId> s = this->_helper.recordStore.insertRecord(&txn, "a", 1, false);
+
+ ASSERT_TRUE(s.isOK());
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
+
+ const DiskLoc dummyDiskLoc2 = DiskLoc::fromRecordId(s.getValue());
+
+ // Attempt to insert a dup key but this time with a different value.
+ ASSERT_EQUALS(ErrorCodes::DuplicateKey, this->insert(key1, dummyDiskLoc2, false));
+ this->checkValidNumKeys(1);
+
+ // Insert a dup key with dupsAllowed=true, should succeed.
+ ASSERT_OK(this->insert(key1, dummyDiskLoc2, true));
+ this->checkValidNumKeys(2);
+
+ // Clean up.
+ this->_helper.recordStore.deleteRecord(&txn, s.getValue());
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
+ }
+};
+
+
+/* This test requires the entire server to be linked-in and it is better implemented using
+ the JS framework. Disabling here and will put in jsCore.
+
+template<class OnDiskFormat>
+class SignedZeroDuplication : public BtreeLogicTestBase<OnDiskFormat> {
+public:
+ void run() {
+ ASSERT_EQUALS(0.0, -0.0);
+ DBDirectClient c;
+
+ static const string ns("unittests.SignedZeroDuplication");
+
+ c.ensureIndex(ns, BSON("b" << 1), true);
+ c.insert(ns, BSON("b" << 0.0));
+ c.insert(ns, BSON("b" << 1.0));
+ c.update(ns, BSON("b" << 1.0), BSON("b" << -0.0));
+
+ ASSERT_EQUALS(1U, c.count(ns, BSON("b" << 0.0)));
+ }
+};
+*/
/*
// QUERY_MIGRATION: port later
@@ -2217,111 +2391,107 @@ namespace mongo {
};
*/
- //
- // TEST SUITE DEFINITION
- //
-
- template<class OnDiskFormat>
- class BtreeLogicTestSuite : public unittest::Suite {
- public:
- BtreeLogicTestSuite(const std::string& name) : Suite(name) {
-
- }
-
- void setupTests() {
- add< SimpleCreate<OnDiskFormat> >();
- add< SimpleInsertDelete<OnDiskFormat> >();
- add< SplitRightHeavyBucket<OnDiskFormat> >();
- add< SplitLeftHeavyBucket<OnDiskFormat> >();
- add< MissingLocate<OnDiskFormat> >();
- add< MissingLocateMultiBucket<OnDiskFormat> >();
- add< SERVER983<OnDiskFormat> >();
- add< DontReuseUnused<OnDiskFormat> >();
- add< MergeBucketsLeft<OnDiskFormat> >();
- add< MergeBucketsRight<OnDiskFormat> >();
- add< MergeBucketsDontReplaceHead<OnDiskFormat> >();
- add< MergeBucketsDelInternal<OnDiskFormat> >();
- add< MergeBucketsRightNull<OnDiskFormat> >();
- add< DontMergeSingleBucket<OnDiskFormat> >();
- add< ParentMergeNonRightToLeft<OnDiskFormat> >();
- add< ParentMergeNonRightToRight<OnDiskFormat> >();
- add< CantMergeRightNoMerge<OnDiskFormat> >();
- add< CantMergeLeftNoMerge<OnDiskFormat> >();
- add< MergeOption<OnDiskFormat> >();
- add< ForceMergeLeft<OnDiskFormat> >();
- add< ForceMergeRight<OnDiskFormat> >();
- add< RecursiveMerge<OnDiskFormat> >();
- add< RecursiveMergeRightBucket<OnDiskFormat> >();
- add< RecursiveMergeDoubleRightBucket<OnDiskFormat> >();
-
- add< MergeSizeJustRightRight<OnDiskFormat> >();
- add< MergeSizeJustRightLeft<OnDiskFormat> >();
- add< MergeSizeRight<OnDiskFormat> >();
- add< MergeSizeLeft<OnDiskFormat> >();
- add< NoMergeBelowMarkRight<OnDiskFormat> >();
- add< NoMergeBelowMarkLeft<OnDiskFormat> >();
- add< MergeSizeRightTooBig<OnDiskFormat> >();
- add< MergeSizeLeftTooBig<OnDiskFormat> >();
- add< MergeRightEmpty<OnDiskFormat> >();
- add< MergeMinRightEmpty<OnDiskFormat> >();
- add< MergeLeftEmpty<OnDiskFormat> >();
- add< MergeMinLeftEmpty<OnDiskFormat> >();
- add< BalanceRightEmpty<OnDiskFormat> >();
- add< BalanceLeftEmpty<OnDiskFormat> >();
-
- add< BalanceOneLeftToRight<OnDiskFormat> >();
- add< BalanceOneRightToLeft<OnDiskFormat> >();
- add< BalanceThreeLeftToRight<OnDiskFormat> >();
- add< BalanceThreeRightToLeft<OnDiskFormat> >();
- add< BalanceSingleParentKey<OnDiskFormat> >();
-
- add< PackEmptyBucket<OnDiskFormat> >();
- add< PackedDataSizeEmptyBucket<OnDiskFormat> >();
-
- add< BalanceSingleParentKeyPackParent<OnDiskFormat> >();
- add< BalanceSplitParent<OnDiskFormat> >();
- add< EvenRebalanceLeft<OnDiskFormat> >();
- add< EvenRebalanceLeftCusp<OnDiskFormat> >();
- add< EvenRebalanceRight<OnDiskFormat> >();
- add< EvenRebalanceRightCusp<OnDiskFormat> >();
- add< EvenRebalanceCenter<OnDiskFormat> >();
- add< OddRebalanceLeft<OnDiskFormat> >();
- add< OddRebalanceRight<OnDiskFormat> >();
- add< OddRebalanceCenter<OnDiskFormat> >();
- add< RebalanceEmptyRight<OnDiskFormat> >();
- add< RebalanceEmptyLeft<OnDiskFormat> >();
-
- add< NoMoveAtLowWaterMarkRight<OnDiskFormat> >();
- add< MoveBelowLowWaterMarkRight<OnDiskFormat> >();
- add< NoMoveAtLowWaterMarkLeft<OnDiskFormat> >();
- add< MoveBelowLowWaterMarkLeft<OnDiskFormat> >();
-
- add< PreferBalanceLeft<OnDiskFormat> >();
- add< PreferBalanceRight<OnDiskFormat> >();
- add< RecursiveMergeThenBalance<OnDiskFormat> >();
- add< DelEmptyNoNeighbors<OnDiskFormat> >();
- add< DelEmptyEmptyNeighbors<OnDiskFormat> >();
- add< DelInternal<OnDiskFormat> >();
- add< DelInternalReplaceWithUnused<OnDiskFormat> >();
- add< DelInternalReplaceRight<OnDiskFormat> >();
- add< DelInternalPromoteKey<OnDiskFormat> >();
- add< DelInternalPromoteRightKey<OnDiskFormat> >();
- add< DelInternalReplacementPrevNonNull<OnDiskFormat> >();
- add< DelInternalReplacementNextNonNull<OnDiskFormat> >();
- add< DelInternalSplitPromoteLeft<OnDiskFormat> >();
- add< DelInternalSplitPromoteRight<OnDiskFormat> >();
-
- add< LocateEmptyForward<OnDiskFormat> >();
- add< LocateEmptyReverse<OnDiskFormat> >();
-
- add< DuplicateKeys<OnDiskFormat> >();
- }
- };
-
- // Test suite for both V0 and V1
- static unittest::SuiteInstance< BtreeLogicTestSuite<BtreeLayoutV0> > SUITE_V0(
- "BTreeLogicTests_V0");
+//
+// TEST SUITE DEFINITION
+//
- static unittest::SuiteInstance< BtreeLogicTestSuite<BtreeLayoutV1> > SUITE_V1(
- "BTreeLogicTests_V1");
+template <class OnDiskFormat>
+class BtreeLogicTestSuite : public unittest::Suite {
+public:
+ BtreeLogicTestSuite(const std::string& name) : Suite(name) {}
+
+ void setupTests() {
+ add<SimpleCreate<OnDiskFormat>>();
+ add<SimpleInsertDelete<OnDiskFormat>>();
+ add<SplitRightHeavyBucket<OnDiskFormat>>();
+ add<SplitLeftHeavyBucket<OnDiskFormat>>();
+ add<MissingLocate<OnDiskFormat>>();
+ add<MissingLocateMultiBucket<OnDiskFormat>>();
+ add<SERVER983<OnDiskFormat>>();
+ add<DontReuseUnused<OnDiskFormat>>();
+ add<MergeBucketsLeft<OnDiskFormat>>();
+ add<MergeBucketsRight<OnDiskFormat>>();
+ add<MergeBucketsDontReplaceHead<OnDiskFormat>>();
+ add<MergeBucketsDelInternal<OnDiskFormat>>();
+ add<MergeBucketsRightNull<OnDiskFormat>>();
+ add<DontMergeSingleBucket<OnDiskFormat>>();
+ add<ParentMergeNonRightToLeft<OnDiskFormat>>();
+ add<ParentMergeNonRightToRight<OnDiskFormat>>();
+ add<CantMergeRightNoMerge<OnDiskFormat>>();
+ add<CantMergeLeftNoMerge<OnDiskFormat>>();
+ add<MergeOption<OnDiskFormat>>();
+ add<ForceMergeLeft<OnDiskFormat>>();
+ add<ForceMergeRight<OnDiskFormat>>();
+ add<RecursiveMerge<OnDiskFormat>>();
+ add<RecursiveMergeRightBucket<OnDiskFormat>>();
+ add<RecursiveMergeDoubleRightBucket<OnDiskFormat>>();
+
+ add<MergeSizeJustRightRight<OnDiskFormat>>();
+ add<MergeSizeJustRightLeft<OnDiskFormat>>();
+ add<MergeSizeRight<OnDiskFormat>>();
+ add<MergeSizeLeft<OnDiskFormat>>();
+ add<NoMergeBelowMarkRight<OnDiskFormat>>();
+ add<NoMergeBelowMarkLeft<OnDiskFormat>>();
+ add<MergeSizeRightTooBig<OnDiskFormat>>();
+ add<MergeSizeLeftTooBig<OnDiskFormat>>();
+ add<MergeRightEmpty<OnDiskFormat>>();
+ add<MergeMinRightEmpty<OnDiskFormat>>();
+ add<MergeLeftEmpty<OnDiskFormat>>();
+ add<MergeMinLeftEmpty<OnDiskFormat>>();
+ add<BalanceRightEmpty<OnDiskFormat>>();
+ add<BalanceLeftEmpty<OnDiskFormat>>();
+
+ add<BalanceOneLeftToRight<OnDiskFormat>>();
+ add<BalanceOneRightToLeft<OnDiskFormat>>();
+ add<BalanceThreeLeftToRight<OnDiskFormat>>();
+ add<BalanceThreeRightToLeft<OnDiskFormat>>();
+ add<BalanceSingleParentKey<OnDiskFormat>>();
+
+ add<PackEmptyBucket<OnDiskFormat>>();
+ add<PackedDataSizeEmptyBucket<OnDiskFormat>>();
+
+ add<BalanceSingleParentKeyPackParent<OnDiskFormat>>();
+ add<BalanceSplitParent<OnDiskFormat>>();
+ add<EvenRebalanceLeft<OnDiskFormat>>();
+ add<EvenRebalanceLeftCusp<OnDiskFormat>>();
+ add<EvenRebalanceRight<OnDiskFormat>>();
+ add<EvenRebalanceRightCusp<OnDiskFormat>>();
+ add<EvenRebalanceCenter<OnDiskFormat>>();
+ add<OddRebalanceLeft<OnDiskFormat>>();
+ add<OddRebalanceRight<OnDiskFormat>>();
+ add<OddRebalanceCenter<OnDiskFormat>>();
+ add<RebalanceEmptyRight<OnDiskFormat>>();
+ add<RebalanceEmptyLeft<OnDiskFormat>>();
+
+ add<NoMoveAtLowWaterMarkRight<OnDiskFormat>>();
+ add<MoveBelowLowWaterMarkRight<OnDiskFormat>>();
+ add<NoMoveAtLowWaterMarkLeft<OnDiskFormat>>();
+ add<MoveBelowLowWaterMarkLeft<OnDiskFormat>>();
+
+ add<PreferBalanceLeft<OnDiskFormat>>();
+ add<PreferBalanceRight<OnDiskFormat>>();
+ add<RecursiveMergeThenBalance<OnDiskFormat>>();
+ add<DelEmptyNoNeighbors<OnDiskFormat>>();
+ add<DelEmptyEmptyNeighbors<OnDiskFormat>>();
+ add<DelInternal<OnDiskFormat>>();
+ add<DelInternalReplaceWithUnused<OnDiskFormat>>();
+ add<DelInternalReplaceRight<OnDiskFormat>>();
+ add<DelInternalPromoteKey<OnDiskFormat>>();
+ add<DelInternalPromoteRightKey<OnDiskFormat>>();
+ add<DelInternalReplacementPrevNonNull<OnDiskFormat>>();
+ add<DelInternalReplacementNextNonNull<OnDiskFormat>>();
+ add<DelInternalSplitPromoteLeft<OnDiskFormat>>();
+ add<DelInternalSplitPromoteRight<OnDiskFormat>>();
+
+ add<LocateEmptyForward<OnDiskFormat>>();
+ add<LocateEmptyReverse<OnDiskFormat>>();
+
+ add<DuplicateKeys<OnDiskFormat>>();
+ }
+};
+
+// Test suite for both V0 and V1
+static unittest::SuiteInstance<BtreeLogicTestSuite<BtreeLayoutV0>> SUITE_V0("BTreeLogicTests_V0");
+
+static unittest::SuiteInstance<BtreeLogicTestSuite<BtreeLayoutV1>> SUITE_V1("BTreeLogicTests_V1");
}
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp
index 15997d5681c..91b7141e7ed 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.cpp
@@ -37,23 +37,23 @@
namespace mongo {
- void DiskLoc56Bit::operator=(const DiskLoc& loc) {
- ofs = loc.getOfs();
- int la = loc.a();
- if (la == DiskLoc::max().a()) {
- invariant(ofs == DiskLoc::max().getOfs());
- la = OurMaxA;
- }
- invariant( la <= OurMaxA ); // must fit in 3 bytes
- if( la < 0 ) {
- if ( la != -1 ) {
- log() << "btree diskloc isn't negative 1: " << la << std::endl;
- invariant ( la == -1 );
- }
- la = 0;
- ofs = OurNullOfs;
+void DiskLoc56Bit::operator=(const DiskLoc& loc) {
+ ofs = loc.getOfs();
+ int la = loc.a();
+ if (la == DiskLoc::max().a()) {
+ invariant(ofs == DiskLoc::max().getOfs());
+ la = OurMaxA;
+ }
+ invariant(la <= OurMaxA); // must fit in 3 bytes
+ if (la < 0) {
+ if (la != -1) {
+ log() << "btree diskloc isn't negative 1: " << la << std::endl;
+ invariant(la == -1);
}
- memcpy(_a, &la, 3); // endian
+ la = 0;
+ ofs = OurNullOfs;
}
+ memcpy(_a, &la, 3); // endian
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.h b/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.h
index a5ddec6bccd..3238ec64179 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.h
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_ondisk.h
@@ -34,337 +34,342 @@
namespace mongo {
- const int OldBucketSize = 8192;
+const int OldBucketSize = 8192;
+//
+// On-disk index format
+//
+
+#pragma pack(1)
+/**
+ * This is the fixed width data component for storage of a key within a bucket. It contains an
+ * offset pointer to the variable width bson data component. This may be 'unused', please see
+ * below.
+ *
+ * Why is this templated on Loc? Because V0 and V1 have different size DiskLoc(s) but otherwise
+ * the same layout.
+ */
+template <class LocType>
+struct FixedWidthKey {
//
- // On-disk index format
+ // Data
//
-#pragma pack(1)
/**
- * This is the fixed width data component for storage of a key within a bucket. It contains an
- * offset pointer to the variable width bson data component. This may be 'unused', please see
- * below.
- *
- * Why is this templated on Loc? Because V0 and V1 have different size DiskLoc(s) but otherwise
- * the same layout.
+ * The 'left' child bucket of this key. If this is the i-th key, it points to the i index
+ * child bucket.
*/
- template <class LocType>
- struct FixedWidthKey {
- //
- // Data
- //
-
- /**
- * The 'left' child bucket of this key. If this is the i-th key, it points to the i index
- * child bucket.
- */
- LocType prevChildBucket;
-
- /**
- * The location of the record associated with this key.
- */
- LocType recordLoc;
-
- /**
- * Offset within current bucket of the variable width bson key for this _KeyNode.
- */
- unsigned short _kdo;
-
- //
- // Accessors / mutators
- //
-
- short keyDataOfs() const {
- return static_cast<short>(_kdo);
- }
+ LocType prevChildBucket;
- void setKeyDataOfs(short s) {
- _kdo = s;
- invariant(s>=0);
- }
+ /**
+ * The location of the record associated with this key.
+ */
+ LocType recordLoc;
- void setKeyDataOfsSavingUse(short s) {
- // XXX kill this func
- setKeyDataOfs(s);
- }
+ /**
+ * Offset within current bucket of the variable width bson key for this _KeyNode.
+ */
+ unsigned short _kdo;
- /**
- * Unused keys are not returned by read operations. Keys may be marked
- * as unused in cases where it is difficult to delete them while
- * maintaining the constraints required of a btree.
- *
- * Setting ofs to odd is the sentinel for unused, as real recordLoc's
- * are always even numbers. Note we need to keep its value basically
- * the same as we use the recordLoc as part of the key in the index
- * (to handle duplicate keys efficiently).
- *
- * Flagging keys as unused is a feature that is being phased out in favor
- * of deleting the keys outright. The current btree implementation is
- * not expected to mark a key as unused in a non legacy btree.
- */
- void setUnused() {
- recordLoc.GETOFS() |= 1;
- }
+ //
+ // Accessors / mutators
+ //
- void setUsed() { recordLoc.GETOFS() &= ~1; }
+ short keyDataOfs() const {
+ return static_cast<short>(_kdo);
+ }
- int isUnused() const {
- return recordLoc.getOfs() & 1;
- }
+ void setKeyDataOfs(short s) {
+ _kdo = s;
+ invariant(s >= 0);
+ }
- int isUsed() const {
- return !isUnused();
- }
- };
+ void setKeyDataOfsSavingUse(short s) {
+ // XXX kill this func
+ setKeyDataOfs(s);
+ }
/**
- * This structure represents header data for a btree bucket. An object of
- * this type is typically allocated inside of a buffer of size BucketSize,
- * resulting in a full bucket with an appropriate header.
+ * Unused keys are not returned by read operations. Keys may be marked
+ * as unused in cases where it is difficult to delete them while
+ * maintaining the constraints required of a btree.
*
- * The body of a btree bucket contains an array of _KeyNode objects starting
- * from its lowest indexed bytes and growing to higher indexed bytes. The
- * body also contains variable width bson keys, which are allocated from the
- * highest indexed bytes toward lower indexed bytes.
+ * Setting ofs to odd is the sentinel for unused, as real recordLoc's
+ * are always even numbers. Note we need to keep its value basically
+ * the same as we use the recordLoc as part of the key in the index
+ * (to handle duplicate keys efficiently).
*
- * |hhhh|kkkkkkk--------bbbbbbbbbbbuuubbbuubbb|
- * h = header data
- * k = KeyNode data
- * - = empty space
- * b = bson key data
- * u = unused (old) bson key data, that may be garbage collected
+ * Flagging keys as unused is a feature that is being phased out in favor
+ * of deleting the keys outright. The current btree implementation is
+ * not expected to mark a key as unused in a non legacy btree.
*/
- struct BtreeBucketV0 {
- /**
- * Parent bucket of this bucket, which isNull() for the root bucket.
- */
- DiskLoc parent;
+ void setUnused() {
+ recordLoc.GETOFS() |= 1;
+ }
- /**
- * Given that there are n keys, this is the n index child.
- */
- DiskLoc nextChild;
+ void setUsed() {
+ recordLoc.GETOFS() &= ~1;
+ }
- /**
- * Can be reused, value is 8192 in current pdfile version Apr2010
- */
- unsigned short _wasSize;
+ int isUnused() const {
+ return recordLoc.getOfs() & 1;
+ }
- /**
- * zero
- */
- unsigned short _reserved1;
+ int isUsed() const {
+ return !isUnused();
+ }
+};
- int flags;
+/**
+ * This structure represents header data for a btree bucket. An object of
+ * this type is typically allocated inside of a buffer of size BucketSize,
+ * resulting in a full bucket with an appropriate header.
+ *
+ * The body of a btree bucket contains an array of _KeyNode objects starting
+ * from its lowest indexed bytes and growing to higher indexed bytes. The
+ * body also contains variable width bson keys, which are allocated from the
+ * highest indexed bytes toward lower indexed bytes.
+ *
+ * |hhhh|kkkkkkk--------bbbbbbbbbbbuuubbbuubbb|
+ * h = header data
+ * k = KeyNode data
+ * - = empty space
+ * b = bson key data
+ * u = unused (old) bson key data, that may be garbage collected
+ */
+struct BtreeBucketV0 {
+ /**
+ * Parent bucket of this bucket, which isNull() for the root bucket.
+ */
+ DiskLoc parent;
- /** basicInsert() assumes the next three members are consecutive and in this order: */
+ /**
+ * Given that there are n keys, this is the n index child.
+ */
+ DiskLoc nextChild;
- /** Size of the empty region. */
- int emptySize;
+ /**
+ * Can be reused, value is 8192 in current pdfile version Apr2010
+ */
+ unsigned short _wasSize;
- /** Size used for bson storage, including storage of old keys. */
- int topSize;
+ /**
+ * zero
+ */
+ unsigned short _reserved1;
- /* Number of keys in the bucket. */
- int n;
+ int flags;
- int reserved;
+ /** basicInsert() assumes the next three members are consecutive and in this order: */
- /* Beginning of the bucket's body */
- char data[4];
+ /** Size of the empty region. */
+ int emptySize;
- // Precalculated size constants
- enum { HeaderSize = 40 };
- };
+ /** Size used for bson storage, including storage of old keys. */
+ int topSize;
- // BtreeBucketV0 is part of the on-disk format, so it should never be changed
- BOOST_STATIC_ASSERT(
- sizeof(BtreeBucketV0) - sizeof(static_cast<BtreeBucketV0*>(NULL)->data)
- == BtreeBucketV0::HeaderSize);
+ /* Number of keys in the bucket. */
+ int n;
- /**
- * A variant of DiskLoc Used by the V1 bucket type.
- */
- struct DiskLoc56Bit {
- //
- // Data
- //
+ int reserved;
- int ofs;
+ /* Beginning of the bucket's body */
+ char data[4];
- unsigned char _a[3];
+ // Precalculated size constants
+ enum { HeaderSize = 40 };
+};
- //
- // Accessors XXX rename these, this is terrible
- //
+// BtreeBucketV0 is part of the on-disk format, so it should never be changed
+BOOST_STATIC_ASSERT(sizeof(BtreeBucketV0) - sizeof(static_cast<BtreeBucketV0*>(NULL)->data) ==
+ BtreeBucketV0::HeaderSize);
- int& GETOFS() { return ofs; }
+/**
+ * A variant of DiskLoc Used by the V1 bucket type.
+ */
+struct DiskLoc56Bit {
+ //
+ // Data
+ //
- int getOfs() const { return ofs; }
+ int ofs;
- //
- // Comparison
- //
+ unsigned char _a[3];
- bool isNull() const { return ofs < 0; }
+ //
+ // Accessors XXX rename these, this is terrible
+ //
- unsigned long long toLongLong() const {
- // endian
- unsigned long long result = ofs;
- char* cursor = reinterpret_cast<char *>(&result);
- *reinterpret_cast<uint16_t*>(cursor + 4) = *reinterpret_cast<const uint16_t*>(&_a[0]);
- *reinterpret_cast<uint8_t*>(cursor + 6) = *reinterpret_cast<const uint8_t*>(&_a[2]);
- *reinterpret_cast<uint8_t*>(cursor + 7) = uint8_t(0);
- return result;
- }
+ int& GETOFS() {
+ return ofs;
+ }
- bool operator<(const DiskLoc56Bit& rhs) const {
- // the orderering of dup keys in btrees isn't too critical, but we'd like to put items
- // that are close together on disk close together in the tree, so we do want the file #
- // to be the most significant bytes
- return toLongLong() < rhs.toLongLong();
- }
+ int getOfs() const {
+ return ofs;
+ }
- int compare(const DiskLoc56Bit& rhs) const {
- unsigned long long a = toLongLong();
- unsigned long long b = rhs.toLongLong();
- if ( a < b ) {
- return -1;
- }
- else {
- return a == b ? 0 : 1;
- }
- }
+ //
+ // Comparison
+ //
- bool operator==(const DiskLoc56Bit& rhs) const {
- return toLongLong() == rhs.toLongLong();
+ bool isNull() const {
+ return ofs < 0;
+ }
+
+ unsigned long long toLongLong() const {
+ // endian
+ unsigned long long result = ofs;
+ char* cursor = reinterpret_cast<char*>(&result);
+ *reinterpret_cast<uint16_t*>(cursor + 4) = *reinterpret_cast<const uint16_t*>(&_a[0]);
+ *reinterpret_cast<uint8_t*>(cursor + 6) = *reinterpret_cast<const uint8_t*>(&_a[2]);
+ *reinterpret_cast<uint8_t*>(cursor + 7) = uint8_t(0);
+ return result;
+ }
+
+ bool operator<(const DiskLoc56Bit& rhs) const {
+ // the orderering of dup keys in btrees isn't too critical, but we'd like to put items
+ // that are close together on disk close together in the tree, so we do want the file #
+ // to be the most significant bytes
+ return toLongLong() < rhs.toLongLong();
+ }
+
+ int compare(const DiskLoc56Bit& rhs) const {
+ unsigned long long a = toLongLong();
+ unsigned long long b = rhs.toLongLong();
+ if (a < b) {
+ return -1;
+ } else {
+ return a == b ? 0 : 1;
}
+ }
- bool operator!=(const DiskLoc56Bit& rhs) const {
- return toLongLong() != rhs.toLongLong();
- }
+ bool operator==(const DiskLoc56Bit& rhs) const {
+ return toLongLong() == rhs.toLongLong();
+ }
- bool operator==(const DiskLoc& rhs) const {
- return DiskLoc(*this) == rhs;
- }
+ bool operator!=(const DiskLoc56Bit& rhs) const {
+ return toLongLong() != rhs.toLongLong();
+ }
- bool operator!=(const DiskLoc& rhs) const {
- return !(*this==rhs);
- }
+ bool operator==(const DiskLoc& rhs) const {
+ return DiskLoc(*this) == rhs;
+ }
- //
- // Mutation
- //
+ bool operator!=(const DiskLoc& rhs) const {
+ return !(*this == rhs);
+ }
- enum {
- OurNullOfs = -2, // first bit of offsets used in _KeyNode we don't use -1 here
- OurMaxA = 0xffffff, // highest 3-byte value
- };
+ //
+ // Mutation
+ //
- void Null() {
- ofs = OurNullOfs;
- _a[0] = _a[1] = _a[2] = 0;
- }
+ enum {
+ OurNullOfs = -2, // first bit of offsets used in _KeyNode we don't use -1 here
+ OurMaxA = 0xffffff, // highest 3-byte value
+ };
- void operator=(const DiskLoc& loc);
+ void Null() {
+ ofs = OurNullOfs;
+ _a[0] = _a[1] = _a[2] = 0;
+ }
- //
- // Type Conversion
- //
+ void operator=(const DiskLoc& loc);
- RecordId toRecordId() const {
- return DiskLoc(*this).toRecordId();
- }
+ //
+ // Type Conversion
+ //
- operator DiskLoc() const {
- // endian
- if( isNull() ) return DiskLoc();
- unsigned a = *((unsigned *) (_a-1));
- return DiskLoc(a >> 8, ofs);
- }
+ RecordId toRecordId() const {
+ return DiskLoc(*this).toRecordId();
+ }
- std::string toString() const { return DiskLoc(*this).toString(); }
- };
+ operator DiskLoc() const {
+ // endian
+ if (isNull())
+ return DiskLoc();
+ unsigned a = *((unsigned*)(_a - 1));
+ return DiskLoc(a >> 8, ofs);
+ }
- struct BtreeBucketV1 {
- /** Parent bucket of this bucket, which isNull() for the root bucket. */
- DiskLoc56Bit parent;
+ std::string toString() const {
+ return DiskLoc(*this).toString();
+ }
+};
- /** Given that there are n keys, this is the n index child. */
- DiskLoc56Bit nextChild;
+struct BtreeBucketV1 {
+ /** Parent bucket of this bucket, which isNull() for the root bucket. */
+ DiskLoc56Bit parent;
- unsigned short flags;
+ /** Given that there are n keys, this is the n index child. */
+ DiskLoc56Bit nextChild;
- /** Size of the empty region. */
- unsigned short emptySize;
+ unsigned short flags;
- /** Size used for bson storage, including storage of old keys. */
- unsigned short topSize;
+ /** Size of the empty region. */
+ unsigned short emptySize;
- /* Number of keys in the bucket. */
- unsigned short n;
+ /** Size used for bson storage, including storage of old keys. */
+ unsigned short topSize;
- /* Beginning of the bucket's body */
- char data[4];
+ /* Number of keys in the bucket. */
+ unsigned short n;
- // Precalculated size constants
- enum { HeaderSize = 22 };
- };
+ /* Beginning of the bucket's body */
+ char data[4];
- // BtreeBucketV1 is part of the on-disk format, so it should never be changed
- BOOST_STATIC_ASSERT(
- sizeof(BtreeBucketV1) - sizeof(static_cast<BtreeBucketV1*>(NULL)->data)
- == BtreeBucketV1::HeaderSize);
+ // Precalculated size constants
+ enum { HeaderSize = 22 };
+};
- enum Flags {
- Packed = 1
- };
+// BtreeBucketV1 is part of the on-disk format, so it should never be changed
+BOOST_STATIC_ASSERT(sizeof(BtreeBucketV1) - sizeof(static_cast<BtreeBucketV1*>(NULL)->data) ==
+ BtreeBucketV1::HeaderSize);
- struct BtreeLayoutV0 {
- typedef FixedWidthKey<DiskLoc> FixedWidthKeyType;
- typedef DiskLoc LocType;
- typedef KeyBson KeyType;
- typedef KeyBson KeyOwnedType;
- typedef BtreeBucketV0 BucketType;
+enum Flags { Packed = 1 };
- enum { BucketSize = 8192,
- BucketBodySize = BucketSize - BucketType::HeaderSize
- };
+struct BtreeLayoutV0 {
+ typedef FixedWidthKey<DiskLoc> FixedWidthKeyType;
+ typedef DiskLoc LocType;
+ typedef KeyBson KeyType;
+ typedef KeyBson KeyOwnedType;
+ typedef BtreeBucketV0 BucketType;
- // largest key size we allow. note we very much need to support bigger keys (somehow) in
- // the future.
+ enum { BucketSize = 8192, BucketBodySize = BucketSize - BucketType::HeaderSize };
- static const int KeyMax = OldBucketSize / 10;
+ // largest key size we allow. note we very much need to support bigger keys (somehow) in
+ // the future.
- // A sentinel value sometimes used to identify a deallocated bucket.
- static const int INVALID_N_SENTINEL = -1;
+ static const int KeyMax = OldBucketSize / 10;
- static void initBucket(BucketType* bucket) {
- bucket->_reserved1 = 0;
- bucket->_wasSize = BucketSize;
- bucket->reserved = 0;
- }
- };
+ // A sentinel value sometimes used to identify a deallocated bucket.
+ static const int INVALID_N_SENTINEL = -1;
+
+ static void initBucket(BucketType* bucket) {
+ bucket->_reserved1 = 0;
+ bucket->_wasSize = BucketSize;
+ bucket->reserved = 0;
+ }
+};
- struct BtreeLayoutV1 {
- typedef FixedWidthKey<DiskLoc56Bit> FixedWidthKeyType;
- typedef KeyV1 KeyType;
- typedef KeyV1Owned KeyOwnedType;
- typedef DiskLoc56Bit LocType;
- typedef BtreeBucketV1 BucketType;
+struct BtreeLayoutV1 {
+ typedef FixedWidthKey<DiskLoc56Bit> FixedWidthKeyType;
+ typedef KeyV1 KeyType;
+ typedef KeyV1Owned KeyOwnedType;
+ typedef DiskLoc56Bit LocType;
+ typedef BtreeBucketV1 BucketType;
- enum { BucketSize = 8192 - 16, // The -16 is to leave room for the MmapV1RecordHeader header
- BucketBodySize = BucketSize - BucketType::HeaderSize
- };
+ enum {
+ BucketSize = 8192 - 16, // The -16 is to leave room for the MmapV1RecordHeader header
+ BucketBodySize = BucketSize - BucketType::HeaderSize
+ };
- static const int KeyMax = 1024;
+ static const int KeyMax = 1024;
- // A sentinel value sometimes used to identify a deallocated bucket.
- static const unsigned short INVALID_N_SENTINEL = 0xffff;
+ // A sentinel value sometimes used to identify a deallocated bucket.
+ static const unsigned short INVALID_N_SENTINEL = 0xffff;
- static void initBucket(BucketType* bucket) { }
- };
+ static void initBucket(BucketType* bucket) {}
+};
#pragma pack()
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp
index fe0cdf7e82e..760095898be 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.cpp
@@ -37,210 +37,203 @@
namespace mongo {
- using std::string;
-
- string bigNumString(long long n, int len) {
- char sub[17];
- sprintf(sub, "%.16llx", n);
- string val(len, ' ');
- for (int i = 0; i < len; ++i) {
- val[i] = sub[i % 16];
- }
- return val;
- }
-
- BSONObj simpleKey(char c, int n) {
- BSONObjBuilder builder;
- string val(n, c);
- builder.append("a", val);
- return builder.obj();
+using std::string;
+
+string bigNumString(long long n, int len) {
+ char sub[17];
+ sprintf(sub, "%.16llx", n);
+ string val(len, ' ');
+ for (int i = 0; i < len; ++i) {
+ val[i] = sub[i % 16];
}
+ return val;
+}
- //
- // BtreeLogicTestHelper
- //
-
- template <class OnDiskFormat>
- BtreeLogicTestHelper<OnDiskFormat>::BtreeLogicTestHelper(const BSONObj& order)
- : recordStore("TestRecordStore"),
- btree(&headManager,
- &recordStore,
- &cursorRegistry,
- Ordering::make(order),
- "TestIndex") {
- static const string randomData("RandomStuff");
-
- // Generate a valid record location for a "fake" record, which we will repeatedly use
- // thoughout the tests.
- OperationContextNoop txn;
- StatusWith<RecordId> s =
- recordStore.insertRecord(&txn, randomData.c_str(), randomData.length(), false);
-
- ASSERT_TRUE(s.isOK());
- ASSERT_EQUALS(1, recordStore.numRecords(NULL));
-
- dummyDiskLoc = DiskLoc::fromRecordId(s.getValue());
- }
+BSONObj simpleKey(char c, int n) {
+ BSONObjBuilder builder;
+ string val(n, c);
+ builder.append("a", val);
+ return builder.obj();
+}
+//
+// BtreeLogicTestHelper
+//
- //
- // ArtificialTreeBuilder
- //
+template <class OnDiskFormat>
+BtreeLogicTestHelper<OnDiskFormat>::BtreeLogicTestHelper(const BSONObj& order)
+ : recordStore("TestRecordStore"),
+ btree(&headManager, &recordStore, &cursorRegistry, Ordering::make(order), "TestIndex") {
+ static const string randomData("RandomStuff");
- template <class OnDiskFormat>
- void ArtificialTreeBuilder<OnDiskFormat>::makeTree(const string &spec) {
- _helper->headManager.setHead(_txn, makeTree(fromjson(spec)).toRecordId());
- }
+ // Generate a valid record location for a "fake" record, which we will repeatedly use
+ // thoughout the tests.
+ OperationContextNoop txn;
+ StatusWith<RecordId> s =
+ recordStore.insertRecord(&txn, randomData.c_str(), randomData.length(), false);
- template <class OnDiskFormat>
- DiskLoc ArtificialTreeBuilder<OnDiskFormat>::makeTree(const BSONObj &spec) {
- DiskLoc bucketLoc = _helper->btree._addBucket(_txn);
- BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
-
- BSONObjIterator i(spec);
- while (i.more()) {
- BSONElement e = i.next();
- DiskLoc child;
- if (e.type() == Object) {
- child = makeTree(e.embeddedObject());
- }
-
- if (e.fieldName() == string("_")) {
- bucket->nextChild = child;
- }
- else {
- KeyDataOwnedType key(BSON("" << expectedKey(e.fieldName())));
- invariant(_helper->btree.pushBack(bucket, _helper->dummyDiskLoc, key, child));
- }
- }
+ ASSERT_TRUE(s.isOK());
+ ASSERT_EQUALS(1, recordStore.numRecords(NULL));
- _helper->btree.fixParentPtrs(_txn, bucket, bucketLoc);
- return bucketLoc;
- }
+ dummyDiskLoc = DiskLoc::fromRecordId(s.getValue());
+}
- template <class OnDiskFormat>
- void ArtificialTreeBuilder<OnDiskFormat>::checkStructure(const string &spec) const {
- checkStructure(fromjson(spec), DiskLoc::fromRecordId(_helper->headManager.getHead(_txn)));
- }
- template <class OnDiskFormat>
- void ArtificialTreeBuilder<OnDiskFormat>::push(
- const DiskLoc bucketLoc, const BSONObj& key, const DiskLoc child) {
- KeyDataOwnedType k(key);
- BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
+//
+// ArtificialTreeBuilder
+//
- invariant(_helper->btree.pushBack(bucket, _helper->dummyDiskLoc, k, child));
- _helper->btree.fixParentPtrs(_txn, bucket, bucketLoc);
- }
+template <class OnDiskFormat>
+void ArtificialTreeBuilder<OnDiskFormat>::makeTree(const string& spec) {
+ _helper->headManager.setHead(_txn, makeTree(fromjson(spec)).toRecordId());
+}
- template <class OnDiskFormat>
- void ArtificialTreeBuilder<OnDiskFormat>::checkStructure(
- const BSONObj &spec, const DiskLoc node) const {
- BucketType* bucket = _helper->btree.getBucket(_txn, node);
-
- BSONObjIterator j(spec);
- for (int i = 0; i < bucket->n; ++i) {
- ASSERT(j.more());
- BSONElement e = j.next();
- KeyHeaderType kn = BtreeLogic<OnDiskFormat>::getKeyHeader(bucket, i);
- string expected = expectedKey(e.fieldName());
- ASSERT(isPresent(BSON("" << expected), 1));
- ASSERT(isPresent(BSON("" << expected), -1));
-
- // ASSERT_EQUALS(expected, kn.key.toBson().firstElement().valuestr());
- if (kn.prevChildBucket.isNull()) {
- ASSERT(e.type() == jstNULL);
- }
- else {
- ASSERT(e.type() == Object);
- checkStructure(e.embeddedObject(), kn.prevChildBucket);
- }
+template <class OnDiskFormat>
+DiskLoc ArtificialTreeBuilder<OnDiskFormat>::makeTree(const BSONObj& spec) {
+ DiskLoc bucketLoc = _helper->btree._addBucket(_txn);
+ BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
+
+ BSONObjIterator i(spec);
+ while (i.more()) {
+ BSONElement e = i.next();
+ DiskLoc child;
+ if (e.type() == Object) {
+ child = makeTree(e.embeddedObject());
}
- if (bucket->nextChild.isNull()) {
- // maybe should allow '_' field with null value?
- ASSERT(!j.more());
- }
- else {
- BSONElement e = j.next();
- ASSERT_EQUALS(string("_"), e.fieldName());
- ASSERT(e.type() == Object);
- checkStructure(e.embeddedObject(), bucket->nextChild);
- }
- ASSERT(!j.more());
- }
- template <class OnDiskFormat>
- bool ArtificialTreeBuilder<OnDiskFormat>::isPresent(const BSONObj &key, int direction) const {
- int pos;
- DiskLoc loc;
- OperationContextNoop txn;
- return _helper->btree.locate(&txn, key, _helper->dummyDiskLoc, direction, &pos, &loc);
+ if (e.fieldName() == string("_")) {
+ bucket->nextChild = child;
+ } else {
+ KeyDataOwnedType key(BSON("" << expectedKey(e.fieldName())));
+ invariant(_helper->btree.pushBack(bucket, _helper->dummyDiskLoc, key, child));
+ }
}
- // Static
- template <class OnDiskFormat>
- string ArtificialTreeBuilder<OnDiskFormat>::expectedKey(const char *spec) {
- if (spec[0] != '$') {
- return spec;
- }
- char *endPtr;
+ _helper->btree.fixParentPtrs(_txn, bucket, bucketLoc);
+ return bucketLoc;
+}
- // parsing a long long is a pain, so just allow shorter keys for now
- unsigned long long num = strtol(spec + 1, &endPtr, 16);
- int len = 800;
- if (*endPtr == '$') {
- len = strtol(endPtr + 1, 0, 16);
- }
+template <class OnDiskFormat>
+void ArtificialTreeBuilder<OnDiskFormat>::checkStructure(const string& spec) const {
+ checkStructure(fromjson(spec), DiskLoc::fromRecordId(_helper->headManager.getHead(_txn)));
+}
- return bigNumString(num, len);
- }
+template <class OnDiskFormat>
+void ArtificialTreeBuilder<OnDiskFormat>::push(const DiskLoc bucketLoc,
+ const BSONObj& key,
+ const DiskLoc child) {
+ KeyDataOwnedType k(key);
+ BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
- template <class OnDiskFormat>
- int ArtificialTreeBuilder<OnDiskFormat>::fillBucketToExactSize(
- const DiskLoc bucketLoc, int targetSize, char startKey) {
- ASSERT_FALSE(bucketLoc.isNull());
+ invariant(_helper->btree.pushBack(bucket, _helper->dummyDiskLoc, k, child));
+ _helper->btree.fixParentPtrs(_txn, bucket, bucketLoc);
+}
- BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
- ASSERT_EQUALS(0, bucket->n);
+template <class OnDiskFormat>
+void ArtificialTreeBuilder<OnDiskFormat>::checkStructure(const BSONObj& spec,
+ const DiskLoc node) const {
+ BucketType* bucket = _helper->btree.getBucket(_txn, node);
+
+ BSONObjIterator j(spec);
+ for (int i = 0; i < bucket->n; ++i) {
+ ASSERT(j.more());
+ BSONElement e = j.next();
+ KeyHeaderType kn = BtreeLogic<OnDiskFormat>::getKeyHeader(bucket, i);
+ string expected = expectedKey(e.fieldName());
+ ASSERT(isPresent(BSON("" << expected), 1));
+ ASSERT(isPresent(BSON("" << expected), -1));
+
+ // ASSERT_EQUALS(expected, kn.key.toBson().firstElement().valuestr());
+ if (kn.prevChildBucket.isNull()) {
+ ASSERT(e.type() == jstNULL);
+ } else {
+ ASSERT(e.type() == Object);
+ checkStructure(e.embeddedObject(), kn.prevChildBucket);
+ }
+ }
+ if (bucket->nextChild.isNull()) {
+ // maybe should allow '_' field with null value?
+ ASSERT(!j.more());
+ } else {
+ BSONElement e = j.next();
+ ASSERT_EQUALS(string("_"), e.fieldName());
+ ASSERT(e.type() == Object);
+ checkStructure(e.embeddedObject(), bucket->nextChild);
+ }
+ ASSERT(!j.more());
+}
- static const int bigSize = KeyDataOwnedType(simpleKey('a', 801)).dataSize();
+template <class OnDiskFormat>
+bool ArtificialTreeBuilder<OnDiskFormat>::isPresent(const BSONObj& key, int direction) const {
+ int pos;
+ DiskLoc loc;
+ OperationContextNoop txn;
+ return _helper->btree.locate(&txn, key, _helper->dummyDiskLoc, direction, &pos, &loc);
+}
- int size = 0;
- int keyCount = 0;
- while (size < targetSize) {
- int space = targetSize - size;
- int nextSize = space - sizeof(FixedWidthKeyType);
- verify(nextSize > 0);
+// Static
+template <class OnDiskFormat>
+string ArtificialTreeBuilder<OnDiskFormat>::expectedKey(const char* spec) {
+ if (spec[0] != '$') {
+ return spec;
+ }
+ char* endPtr;
- BSONObj newKey;
- if (nextSize >= bigSize) {
- newKey = simpleKey(startKey++, 801);
- }
- else {
- newKey = simpleKey(startKey++, nextSize - (bigSize - 801));
- }
+ // parsing a long long is a pain, so just allow shorter keys for now
+ unsigned long long num = strtol(spec + 1, &endPtr, 16);
+ int len = 800;
+ if (*endPtr == '$') {
+ len = strtol(endPtr + 1, 0, 16);
+ }
- push(bucketLoc, newKey, DiskLoc());
+ return bigNumString(num, len);
+}
- size += KeyDataOwnedType(newKey).dataSize() +
- sizeof(FixedWidthKeyType);
- keyCount += 1;
+template <class OnDiskFormat>
+int ArtificialTreeBuilder<OnDiskFormat>::fillBucketToExactSize(const DiskLoc bucketLoc,
+ int targetSize,
+ char startKey) {
+ ASSERT_FALSE(bucketLoc.isNull());
+
+ BucketType* bucket = _helper->btree.getBucket(_txn, bucketLoc);
+ ASSERT_EQUALS(0, bucket->n);
+
+ static const int bigSize = KeyDataOwnedType(simpleKey('a', 801)).dataSize();
+
+ int size = 0;
+ int keyCount = 0;
+ while (size < targetSize) {
+ int space = targetSize - size;
+ int nextSize = space - sizeof(FixedWidthKeyType);
+ verify(nextSize > 0);
+
+ BSONObj newKey;
+ if (nextSize >= bigSize) {
+ newKey = simpleKey(startKey++, 801);
+ } else {
+ newKey = simpleKey(startKey++, nextSize - (bigSize - 801));
}
- ASSERT_EQUALS(_helper->btree._packedDataSize(bucket, 0), targetSize);
+ push(bucketLoc, newKey, DiskLoc());
- return keyCount;
+ size += KeyDataOwnedType(newKey).dataSize() + sizeof(FixedWidthKeyType);
+ keyCount += 1;
}
- //
- // This causes actual code to be generated for the usages of the templates in this file.
- //
+ ASSERT_EQUALS(_helper->btree._packedDataSize(bucket, 0), targetSize);
+
+ return keyCount;
+}
+
+//
+// This causes actual code to be generated for the usages of the templates in this file.
+//
- // V0 format.
- template struct BtreeLogicTestHelper<BtreeLayoutV0>;
- template class ArtificialTreeBuilder<BtreeLayoutV0>;
+// V0 format.
+template struct BtreeLogicTestHelper<BtreeLayoutV0>;
+template class ArtificialTreeBuilder<BtreeLayoutV0>;
- // V1 format.
- template struct BtreeLogicTestHelper<BtreeLayoutV1>;
- template class ArtificialTreeBuilder<BtreeLayoutV1>;
+// V1 format.
+template struct BtreeLogicTestHelper<BtreeLayoutV1>;
+template class ArtificialTreeBuilder<BtreeLayoutV1>;
}
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h
index b282e72d827..5aeec516528 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_test_help.h
@@ -37,118 +37,114 @@
namespace mongo {
+/**
+ * Generates a string of the specified length containing repeated concatenation of the
+ * hexadecimal representation of the input value.
+ */
+std::string bigNumString(long long n, int len);
+
+/**
+ * Generates key on a field 'a', with the specified number of repetitions of the character.
+ */
+BSONObj simpleKey(char c, int n = 1);
+
+/**
+ * Simple head manager, which performs no validity checking or persistence.
+ */
+class TestHeadManager : public HeadManager {
+public:
+ virtual const RecordId getHead(OperationContext* txn) const {
+ return _head;
+ }
+
+ virtual void setHead(OperationContext* txn, const RecordId newHead) {
+ _head = newHead;
+ }
+
+private:
+ RecordId _head;
+};
+
+
+/**
+ * This structure encapsulates a Btree and all the infrastructure needed by it (head manager,
+ * record store and a valid disk location to use by the tests).
+ */
+template <class OnDiskFormat>
+struct BtreeLogicTestHelper {
+ BtreeLogicTestHelper(const BSONObj& order);
+
+ // Everything needed for a fully-functional Btree logic
+ TestHeadManager headManager;
+ HeapRecordStoreBtree recordStore;
+ SavedCursorRegistry cursorRegistry;
+ BtreeLogic<OnDiskFormat> btree;
+ DiskLoc dummyDiskLoc;
+};
+
+
+/**
+ * Tool to construct custom tree shapes for tests.
+ */
+template <class OnDiskFormat>
+class ArtificialTreeBuilder {
+public:
+ typedef typename BtreeLogic<OnDiskFormat>::BucketType BucketType;
+ typedef typename BtreeLogic<OnDiskFormat>::KeyDataOwnedType KeyDataOwnedType;
+ typedef typename BtreeLogic<OnDiskFormat>::KeyHeaderType KeyHeaderType;
+
+ typedef typename OnDiskFormat::FixedWidthKeyType FixedWidthKeyType;
+
/**
- * Generates a string of the specified length containing repeated concatenation of the
- * hexadecimal representation of the input value.
+ * The tree builder wraps around the passed-in helper and will invoke methods on it. It
+ * does not do any cleanup, so constructing multiple trees over the same helper will
+ * cause leaked records.
*/
- std::string bigNumString(long long n, int len);
+ ArtificialTreeBuilder(OperationContext* txn, BtreeLogicTestHelper<OnDiskFormat>* helper)
+ : _txn(txn), _helper(helper) {}
/**
- * Generates key on a field 'a', with the specified number of repetitions of the character.
+ * Causes the specified tree shape to be built on the associated helper and the tree's
+ * root installed as the head. Uses a custom JSON-based language with the following
+ * syntax:
+ *
+ * Btree := BTreeBucket
+ * BtreeBucket := { Child_1_Key: <BtreeBucket | null>,
+ * Child_2_Key: <BtreeBucket | null>,
+ * ...,
+ * _: <BtreeBucket | null> }
+ *
+ * The _ key name specifies the content of the nextChild pointer. The value null means
+ * use a fixed disk loc.
*/
- BSONObj simpleKey(char c, int n = 1);
+ void makeTree(const std::string& spec);
/**
- * Simple head manager, which performs no validity checking or persistence.
+ * Validates that the structure of the Btree in the helper matches the specification.
*/
- class TestHeadManager : public HeadManager {
- public:
- virtual const RecordId getHead( OperationContext* txn ) const {
- return _head;
- }
-
- virtual void setHead(OperationContext* txn, const RecordId newHead) {
- _head = newHead;
- }
-
- private:
- RecordId _head;
- };
+ void checkStructure(const std::string& spec) const;
+ /**
+ * Adds the following key to the bucket and fixes up the child pointers.
+ */
+ void push(const DiskLoc bucketLoc, const BSONObj& key, const DiskLoc child);
/**
- * This structure encapsulates a Btree and all the infrastructure needed by it (head manager,
- * record store and a valid disk location to use by the tests).
+ * @return The number of keys inserted.
*/
- template <class OnDiskFormat>
- struct BtreeLogicTestHelper {
- BtreeLogicTestHelper(const BSONObj& order);
+ int fillBucketToExactSize(const DiskLoc bucketLoc, int targetSize, char startKey);
- // Everything needed for a fully-functional Btree logic
- TestHeadManager headManager;
- HeapRecordStoreBtree recordStore;
- SavedCursorRegistry cursorRegistry;
- BtreeLogic<OnDiskFormat> btree;
- DiskLoc dummyDiskLoc;
- };
+private:
+ DiskLoc makeTree(const BSONObj& spec);
+ void checkStructure(const BSONObj& spec, const DiskLoc node) const;
- /**
- * Tool to construct custom tree shapes for tests.
- */
- template <class OnDiskFormat>
- class ArtificialTreeBuilder {
- public:
-
- typedef typename BtreeLogic<OnDiskFormat>::BucketType BucketType;
- typedef typename BtreeLogic<OnDiskFormat>::KeyDataOwnedType KeyDataOwnedType;
- typedef typename BtreeLogic<OnDiskFormat>::KeyHeaderType KeyHeaderType;
-
- typedef typename OnDiskFormat::FixedWidthKeyType FixedWidthKeyType;
-
- /**
- * The tree builder wraps around the passed-in helper and will invoke methods on it. It
- * does not do any cleanup, so constructing multiple trees over the same helper will
- * cause leaked records.
- */
- ArtificialTreeBuilder(OperationContext* txn,
- BtreeLogicTestHelper<OnDiskFormat>* helper)
- : _txn(txn), _helper(helper) {
-
- }
-
- /**
- * Causes the specified tree shape to be built on the associated helper and the tree's
- * root installed as the head. Uses a custom JSON-based language with the following
- * syntax:
- *
- * Btree := BTreeBucket
- * BtreeBucket := { Child_1_Key: <BtreeBucket | null>,
- * Child_2_Key: <BtreeBucket | null>,
- * ...,
- * _: <BtreeBucket | null> }
- *
- * The _ key name specifies the content of the nextChild pointer. The value null means
- * use a fixed disk loc.
- */
- void makeTree(const std::string& spec);
-
- /**
- * Validates that the structure of the Btree in the helper matches the specification.
- */
- void checkStructure(const std::string& spec) const;
-
- /**
- * Adds the following key to the bucket and fixes up the child pointers.
- */
- void push(const DiskLoc bucketLoc, const BSONObj& key, const DiskLoc child);
-
- /**
- * @return The number of keys inserted.
- */
- int fillBucketToExactSize(const DiskLoc bucketLoc, int targetSize, char startKey);
-
- private:
- DiskLoc makeTree(const BSONObj& spec);
-
- void checkStructure(const BSONObj& spec, const DiskLoc node) const;
-
- bool isPresent(const BSONObj& key, int direction) const;
-
- static std::string expectedKey(const char* spec);
-
- OperationContext* _txn;
- BtreeLogicTestHelper<OnDiskFormat>* _helper;
- };
-
-} // namespace mongo
+ bool isPresent(const BSONObj& key, int direction) const;
+
+ static std::string expectedKey(const char* spec);
+
+ OperationContext* _txn;
+ BtreeLogicTestHelper<OnDiskFormat>* _helper;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/btree/key.cpp b/src/mongo/db/storage/mmap_v1/btree/key.cpp
index 5cc1afbdc69..cbb89d8fab9 100644
--- a/src/mongo/db/storage/mmap_v1/btree/key.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/key.cpp
@@ -39,26 +39,26 @@
namespace mongo {
- using std::endl;
- using std::numeric_limits;
- using std::min;
+using std::endl;
+using std::numeric_limits;
+using std::min;
- extern const Ordering nullOrdering = Ordering::make(BSONObj());
+extern const Ordering nullOrdering = Ordering::make(BSONObj());
- // KeyBson is for V0 (version #0) indexes
+// KeyBson is for V0 (version #0) indexes
- int oldCompare(const BSONObj& l,const BSONObj& r, const Ordering &o);
+int oldCompare(const BSONObj& l, const BSONObj& r, const Ordering& o);
- // "old" = pre signed dates & such; i.e. btree V0
- /* must be same canon type when called */
- int oldCompareElementValues(const BSONElement& l, const BSONElement& r) {
- dassert( l.canonicalType() == r.canonicalType() );
- int f;
- double x;
+// "old" = pre signed dates & such; i.e. btree V0
+/* must be same canon type when called */
+int oldCompareElementValues(const BSONElement& l, const BSONElement& r) {
+ dassert(l.canonicalType() == r.canonicalType());
+ int f;
+ double x;
- switch ( l.type() ) {
+ switch (l.type()) {
case EOO:
- case Undefined: // EOO and Undefined are same canonicalType
+ case Undefined: // EOO and Undefined are same canonicalType
case jstNULL:
case MaxKey:
case MinKey:
@@ -75,35 +75,36 @@ namespace mongo {
return lULL == rULL ? 0 : 1;
}
case NumberLong:
- if( r.type() == NumberLong ) {
+ if (r.type() == NumberLong) {
long long L = l._numberLong();
long long R = r._numberLong();
- if( L < R ) return -1;
- if( L == R ) return 0;
+ if (L < R)
+ return -1;
+ if (L == R)
+ return 0;
return 1;
}
- // else fall through
+ // else fall through
case NumberInt:
case NumberDouble: {
double left = l.number();
double right = r.number();
- bool lNan = !( left <= numeric_limits< double >::max() &&
- left >= -numeric_limits< double >::max() );
- bool rNan = !( right <= numeric_limits< double >::max() &&
- right >= -numeric_limits< double >::max() );
- if ( lNan ) {
- if ( rNan ) {
+ bool lNan =
+ !(left <= numeric_limits<double>::max() && left >= -numeric_limits<double>::max());
+ bool rNan = !(right <= numeric_limits<double>::max() &&
+ right >= -numeric_limits<double>::max());
+ if (lNan) {
+ if (rNan) {
return 0;
- }
- else {
+ } else {
return -1;
}
- }
- else if ( rNan ) {
+ } else if (rNan) {
return 1;
}
x = left - right;
- if ( x < 0 ) return -1;
+ if (x < 0)
+ return -1;
return x == 0 ? 0 : 1;
}
case jstOID:
@@ -119,562 +120,569 @@ namespace mongo {
case DBRef: {
int lsz = l.valuesize();
int rsz = r.valuesize();
- if ( lsz - rsz != 0 ) return lsz - rsz;
+ if (lsz - rsz != 0)
+ return lsz - rsz;
return memcmp(l.value(), r.value(), lsz);
}
case BinData: {
- int lsz = l.objsize(); // our bin data size in bytes, not including the subtype byte
+ int lsz = l.objsize(); // our bin data size in bytes, not including the subtype byte
int rsz = r.objsize();
- if ( lsz - rsz != 0 ) return lsz - rsz;
- return memcmp(l.value()+4, r.value()+4, lsz+1);
+ if (lsz - rsz != 0)
+ return lsz - rsz;
+ return memcmp(l.value() + 4, r.value() + 4, lsz + 1);
}
case RegEx: {
int c = strcmp(l.regex(), r.regex());
- if ( c )
+ if (c)
return c;
return strcmp(l.regexFlags(), r.regexFlags());
}
- case CodeWScope : {
+ case CodeWScope: {
f = l.canonicalType() - r.canonicalType();
- if ( f )
+ if (f)
return f;
- f = strcmp( l.codeWScopeCode() , r.codeWScopeCode() );
- if ( f )
+ f = strcmp(l.codeWScopeCode(), r.codeWScopeCode());
+ if (f)
return f;
- f = strcmp( l.codeWScopeScopeDataUnsafe() , r.codeWScopeScopeDataUnsafe() );
- if ( f )
+ f = strcmp(l.codeWScopeScopeDataUnsafe(), r.codeWScopeScopeDataUnsafe());
+ if (f)
return f;
return 0;
}
default:
- log() << "oldCompareElementValues: bad type " << (int) l.type() << endl;
+ log() << "oldCompareElementValues: bad type " << (int)l.type() << endl;
verify(false);
- }
- return -1;
- }
-
- int oldElemCompare(const BSONElement&l , const BSONElement& r) {
- int lt = (int) l.canonicalType();
- int rt = (int) r.canonicalType();
- int x = lt - rt;
- if( x )
- return x;
- return oldCompareElementValues(l, r);
}
-
- // pre signed dates & such
- int oldCompare(const BSONObj& l,const BSONObj& r, const Ordering &o) {
- BSONObjIterator i(l);
- BSONObjIterator j(r);
- unsigned mask = 1;
- while ( 1 ) {
- // so far, equal...
-
- BSONElement l = i.next();
- BSONElement r = j.next();
- if ( l.eoo() )
- return r.eoo() ? 0 : -1;
- if ( r.eoo() )
- return 1;
-
- int x;
- {
- x = oldElemCompare(l, r);
- if( o.descending(mask) )
- x = -x;
- }
- if ( x != 0 )
- return x;
- mask <<= 1;
+ return -1;
+}
+
+int oldElemCompare(const BSONElement& l, const BSONElement& r) {
+ int lt = (int)l.canonicalType();
+ int rt = (int)r.canonicalType();
+ int x = lt - rt;
+ if (x)
+ return x;
+ return oldCompareElementValues(l, r);
+}
+
+// pre signed dates & such
+int oldCompare(const BSONObj& l, const BSONObj& r, const Ordering& o) {
+ BSONObjIterator i(l);
+ BSONObjIterator j(r);
+ unsigned mask = 1;
+ while (1) {
+ // so far, equal...
+
+ BSONElement l = i.next();
+ BSONElement r = j.next();
+ if (l.eoo())
+ return r.eoo() ? 0 : -1;
+ if (r.eoo())
+ return 1;
+
+ int x;
+ {
+ x = oldElemCompare(l, r);
+ if (o.descending(mask))
+ x = -x;
}
- return -1;
- }
-
- /* old style compares:
- - dates are unsigned
- - strings no nulls
- */
- int KeyBson::woCompare(const KeyBson& r, const Ordering &o) const {
- return oldCompare(_o, r._o, o);
- }
-
- // woEqual could be made faster than woCompare but this is for backward compatibility so not worth a big effort
- bool KeyBson::woEqual(const KeyBson& r) const {
- return oldCompare(_o, r._o, nullOrdering) == 0;
- }
-
- // [ ][HASMORE][x][y][canontype_4bits]
- enum CanonicalsEtc {
- cminkey=1,
- cnull=2,
- cdouble=4,
- cstring=6,
- cbindata=7,
- coid=8,
- cfalse=10,
- ctrue=11,
- cdate=12,
- cmaxkey=14,
- cCANONTYPEMASK = 0xf,
- cY = 0x10,
- cint = cY | cdouble,
- cX = 0x20,
- clong = cX | cdouble,
- cHASMORE = 0x40,
- cNOTUSED = 0x80 // but see IsBSON sentinel - this bit not usable without great care
- };
-
- // bindata bson type
- const unsigned BinDataLenMask = 0xf0; // lengths are powers of 2 of this value
- const unsigned BinDataTypeMask = 0x0f; // 0-7 as you would expect, 8-15 are 128+value. see BinDataType.
- const int BinDataLenMax = 32;
- const int BinDataLengthToCode[] = {
- 0x00, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70,
- 0x80, -1/*9*/, 0x90/*10*/, -1/*11*/, 0xa0/*12*/, -1/*13*/, 0xb0/*14*/, -1/*15*/,
- 0xc0/*16*/, -1, -1, -1, 0xd0/*20*/, -1, -1, -1,
- 0xe0/*24*/, -1, -1, -1, -1, -1, -1, -1,
- 0xf0/*32*/
- };
- const int BinDataCodeToLength[] = {
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 32
- };
-
- int binDataCodeToLength(int codeByte) {
- return BinDataCodeToLength[codeByte >> 4];
- }
-
- /** object cannot be represented in compact format. so store in traditional bson format
- with a leading sentinel byte IsBSON to indicate it's in that format.
-
- Given that the KeyV1Owned constructor already grabbed a bufbuilder, we reuse it here
- so that we don't have to do an extra malloc.
- */
- void KeyV1Owned::traditional(const BSONObj& obj) {
- b.reset();
- b.appendUChar(IsBSON);
- b.appendBuf(obj.objdata(), obj.objsize());
- _keyData = (const unsigned char *) b.buf();
- }
-
- KeyV1Owned::KeyV1Owned(const KeyV1& rhs) {
- b.appendBuf( rhs.data(), rhs.dataSize() );
- _keyData = (const unsigned char *) b.buf();
- dassert( b.len() == dataSize() ); // check datasize method is correct
- dassert( (*_keyData & cNOTUSED) == 0 );
+ if (x != 0)
+ return x;
+ mask <<= 1;
}
-
- // fromBSON to Key format
- KeyV1Owned::KeyV1Owned(const BSONObj& obj) {
- BSONObj::iterator i(obj);
- unsigned char bits = 0;
- while( 1 ) {
- BSONElement e = i.next();
- if( i.more() )
- bits |= cHASMORE;
- switch( e.type() ) {
+ return -1;
+}
+
+/* old style compares:
+ - dates are unsigned
+ - strings no nulls
+*/
+int KeyBson::woCompare(const KeyBson& r, const Ordering& o) const {
+ return oldCompare(_o, r._o, o);
+}
+
+// woEqual could be made faster than woCompare but this is for backward compatibility so not worth a big effort
+bool KeyBson::woEqual(const KeyBson& r) const {
+ return oldCompare(_o, r._o, nullOrdering) == 0;
+}
+
+// [ ][HASMORE][x][y][canontype_4bits]
+enum CanonicalsEtc {
+ cminkey = 1,
+ cnull = 2,
+ cdouble = 4,
+ cstring = 6,
+ cbindata = 7,
+ coid = 8,
+ cfalse = 10,
+ ctrue = 11,
+ cdate = 12,
+ cmaxkey = 14,
+ cCANONTYPEMASK = 0xf,
+ cY = 0x10,
+ cint = cY | cdouble,
+ cX = 0x20,
+ clong = cX | cdouble,
+ cHASMORE = 0x40,
+ cNOTUSED = 0x80 // but see IsBSON sentinel - this bit not usable without great care
+};
+
+// bindata bson type
+const unsigned BinDataLenMask = 0xf0; // lengths are powers of 2 of this value
+const unsigned BinDataTypeMask =
+ 0x0f; // 0-7 as you would expect, 8-15 are 128+value. see BinDataType.
+const int BinDataLenMax = 32;
+const int BinDataLengthToCode[] = {
+ 0x00, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60,
+ 0x70, 0x80, -1 /*9*/, 0x90 /*10*/, -1 /*11*/, 0xa0 /*12*/, -1 /*13*/,
+ 0xb0 /*14*/, -1 /*15*/, 0xc0 /*16*/, -1, -1, -1, 0xd0 /*20*/,
+ -1, -1, -1, 0xe0 /*24*/, -1, -1, -1,
+ -1, -1, -1, -1, 0xf0 /*32*/
+};
+const int BinDataCodeToLength[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 32};
+
+int binDataCodeToLength(int codeByte) {
+ return BinDataCodeToLength[codeByte >> 4];
+}
+
+/** object cannot be represented in compact format. so store in traditional bson format
+ with a leading sentinel byte IsBSON to indicate it's in that format.
+
+ Given that the KeyV1Owned constructor already grabbed a bufbuilder, we reuse it here
+ so that we don't have to do an extra malloc.
+*/
+void KeyV1Owned::traditional(const BSONObj& obj) {
+ b.reset();
+ b.appendUChar(IsBSON);
+ b.appendBuf(obj.objdata(), obj.objsize());
+ _keyData = (const unsigned char*)b.buf();
+}
+
+KeyV1Owned::KeyV1Owned(const KeyV1& rhs) {
+ b.appendBuf(rhs.data(), rhs.dataSize());
+ _keyData = (const unsigned char*)b.buf();
+ dassert(b.len() == dataSize()); // check datasize method is correct
+ dassert((*_keyData & cNOTUSED) == 0);
+}
+
+// fromBSON to Key format
+KeyV1Owned::KeyV1Owned(const BSONObj& obj) {
+ BSONObj::iterator i(obj);
+ unsigned char bits = 0;
+ while (1) {
+ BSONElement e = i.next();
+ if (i.more())
+ bits |= cHASMORE;
+ switch (e.type()) {
case MinKey:
- b.appendUChar(cminkey|bits);
+ b.appendUChar(cminkey | bits);
break;
case jstNULL:
- b.appendUChar(cnull|bits);
+ b.appendUChar(cnull | bits);
break;
case MaxKey:
- b.appendUChar(cmaxkey|bits);
+ b.appendUChar(cmaxkey | bits);
break;
case Bool:
- b.appendUChar( (e.boolean()?ctrue:cfalse) | bits );
+ b.appendUChar((e.boolean() ? ctrue : cfalse) | bits);
break;
case jstOID:
- b.appendUChar(coid|bits);
+ b.appendUChar(coid | bits);
b.appendBuf(e.__oid().view().view(), OID::kOIDSize);
break;
- case BinData:
- {
- int t = e.binDataType();
- // 0-7 and 0x80 to 0x87 are supported by Key
- if( (t & 0x78) == 0 && t != ByteArrayDeprecated ) {
- int len;
- const char * d = e.binData(len);
- if( len <= BinDataLenMax ) {
- int code = BinDataLengthToCode[len];
- if( code >= 0 ) {
- if( t >= 128 )
- t = (t-128) | 0x08;
- dassert( (code&t) == 0 );
- b.appendUChar( cbindata|bits );
- b.appendUChar( code | t );
- b.appendBuf(d, len);
- break;
- }
+ case BinData: {
+ int t = e.binDataType();
+ // 0-7 and 0x80 to 0x87 are supported by Key
+ if ((t & 0x78) == 0 && t != ByteArrayDeprecated) {
+ int len;
+ const char* d = e.binData(len);
+ if (len <= BinDataLenMax) {
+ int code = BinDataLengthToCode[len];
+ if (code >= 0) {
+ if (t >= 128)
+ t = (t - 128) | 0x08;
+ dassert((code & t) == 0);
+ b.appendUChar(cbindata | bits);
+ b.appendUChar(code | t);
+ b.appendBuf(d, len);
+ break;
}
}
- traditional(obj);
- return;
}
+ traditional(obj);
+ return;
+ }
case Date:
- b.appendUChar(cdate|bits);
+ b.appendUChar(cdate | bits);
b.appendStruct(e.date());
break;
- case String:
- {
- b.appendUChar(cstring|bits);
- // note we do not store the terminating null, to save space.
- unsigned x = (unsigned) e.valuestrsize() - 1;
- if( x > 255 ) {
- traditional(obj);
- return;
- }
- b.appendUChar(x);
- b.appendBuf(e.valuestr(), x);
- break;
+ case String: {
+ b.appendUChar(cstring | bits);
+ // note we do not store the terminating null, to save space.
+ unsigned x = (unsigned)e.valuestrsize() - 1;
+ if (x > 255) {
+ traditional(obj);
+ return;
}
+ b.appendUChar(x);
+ b.appendBuf(e.valuestr(), x);
+ break;
+ }
case NumberInt:
- b.appendUChar(cint|bits);
- b.appendNum((double) e._numberInt());
+ b.appendUChar(cint | bits);
+ b.appendNum((double)e._numberInt());
break;
- case NumberLong:
- {
- long long n = e._numberLong();
- long long m = 2LL << 52;
- DEV {
- long long d = m-1;
- verify( ((long long) ((double) -d)) == -d );
- }
- if( n >= m || n <= -m ) {
- // can't represent exactly as a double
- traditional(obj);
- return;
- }
- b.appendUChar(clong|bits);
- b.appendNum((double) n);
- break;
+ case NumberLong: {
+ long long n = e._numberLong();
+ long long m = 2LL << 52;
+ DEV {
+ long long d = m - 1;
+ verify(((long long)((double)-d)) == -d);
}
- case NumberDouble:
- {
- double d = e._numberDouble();
- if( std::isnan(d) ) {
- traditional(obj);
- return;
- }
- b.appendUChar(cdouble|bits);
- b.appendNum(d);
- break;
+ if (n >= m || n <= -m) {
+ // can't represent exactly as a double
+ traditional(obj);
+ return;
+ }
+ b.appendUChar(clong | bits);
+ b.appendNum((double)n);
+ break;
+ }
+ case NumberDouble: {
+ double d = e._numberDouble();
+ if (std::isnan(d)) {
+ traditional(obj);
+ return;
}
+ b.appendUChar(cdouble | bits);
+ b.appendNum(d);
+ break;
+ }
default:
// if other types involved, store as traditional BSON
traditional(obj);
return;
- }
- if( !i.more() )
- break;
- bits = 0;
}
- _keyData = (const unsigned char *) b.buf();
- dassert( b.len() == dataSize() ); // check datasize method is correct
- dassert( (*_keyData & cNOTUSED) == 0 );
+ if (!i.more())
+ break;
+ bits = 0;
}
-
- BSONObj KeyV1::toBson() const {
- verify( _keyData != 0 );
- if( !isCompactFormat() )
- return bson();
-
- BSONObjBuilder b(512);
- const unsigned char *p = _keyData;
- while( 1 ) {
- unsigned bits = *p++;
-
- switch( bits & 0x3f ) {
- case cminkey: b.appendMinKey(""); break;
- case cnull: b.appendNull(""); break;
- case cfalse: b.appendBool("", false); break;
- case ctrue: b.appendBool("", true); break;
- case cmaxkey:
- b.appendMaxKey("");
- break;
- case cstring:
- {
- unsigned sz = *p++;
- // we build the element ourself as we have to null terminate it
- BufBuilder &bb = b.bb();
- bb.appendNum((char) String);
- bb.appendUChar(0); // fieldname ""
- bb.appendNum(sz+1);
- bb.appendBuf(p, sz);
- bb.appendUChar(0); // null char at end of string
- p += sz;
- break;
- }
- case coid:
- {
- OID oid = OID::from(p);
- b.appendOID("", &oid);
- p += OID::kOIDSize;
- break;
- }
- case cbindata:
- {
- int len = binDataCodeToLength(*p);
- int subtype = (*p) & BinDataTypeMask;
- if( subtype & 0x8 ) {
- subtype = (subtype & 0x7) | 0x80;
- }
- b.appendBinData("", len, (BinDataType) subtype, ++p);
- p += len;
- break;
- }
- case cdate:
- b.appendDate("", (Date_t&) *p);
- p += 8;
- break;
- case cdouble:
- b.append("", (double&) *p);
- p += sizeof(double);
- break;
- case cint:
- b.append("", static_cast< int >((reinterpret_cast< const PackedDouble& >(*p)).d));
- p += sizeof(double);
- break;
- case clong:
- b.append("", static_cast< long long>((reinterpret_cast< const PackedDouble& >(*p)).d));
- p += sizeof(double);
- break;
- default:
- verify(false);
- }
-
- if( (bits & cHASMORE) == 0 )
+ _keyData = (const unsigned char*)b.buf();
+ dassert(b.len() == dataSize()); // check datasize method is correct
+ dassert((*_keyData & cNOTUSED) == 0);
+}
+
+BSONObj KeyV1::toBson() const {
+ verify(_keyData != 0);
+ if (!isCompactFormat())
+ return bson();
+
+ BSONObjBuilder b(512);
+ const unsigned char* p = _keyData;
+ while (1) {
+ unsigned bits = *p++;
+
+ switch (bits & 0x3f) {
+ case cminkey:
+ b.appendMinKey("");
break;
- }
- return b.obj();
- }
-
- static int compare(const unsigned char *&l, const unsigned char *&r) {
- int lt = (*l & cCANONTYPEMASK);
- int rt = (*r & cCANONTYPEMASK);
- int x = lt - rt;
- if( x )
- return x;
-
- l++; r++;
-
- // same type
- switch( lt ) {
- case cdouble:
- {
- double L = (reinterpret_cast< const PackedDouble* >(l))->d;
- double R = (reinterpret_cast< const PackedDouble* >(r))->d;
- if( L < R )
- return -1;
- if( L != R )
- return 1;
- l += 8; r += 8;
+ case cnull:
+ b.appendNull("");
+ break;
+ case cfalse:
+ b.appendBool("", false);
+ break;
+ case ctrue:
+ b.appendBool("", true);
+ break;
+ case cmaxkey:
+ b.appendMaxKey("");
+ break;
+ case cstring: {
+ unsigned sz = *p++;
+ // we build the element ourself as we have to null terminate it
+ BufBuilder& bb = b.bb();
+ bb.appendNum((char)String);
+ bb.appendUChar(0); // fieldname ""
+ bb.appendNum(sz + 1);
+ bb.appendBuf(p, sz);
+ bb.appendUChar(0); // null char at end of string
+ p += sz;
break;
}
- case cstring:
- {
- int lsz = *l;
- int rsz = *r;
- int common = min(lsz, rsz);
- l++; r++; // skip the size byte
- // use memcmp as we (will) allow zeros in UTF8 strings
- int res = memcmp(l, r, common);
- if( res )
- return res;
- // longer string is the greater one
- int diff = lsz-rsz;
- if( diff )
- return diff;
- l += lsz; r += lsz;
+ case coid: {
+ OID oid = OID::from(p);
+ b.appendOID("", &oid);
+ p += OID::kOIDSize;
break;
}
- case cbindata:
- {
- int L = *l;
- int R = *r;
- int llen = binDataCodeToLength(L);
- int diff = L-R; // checks length and subtype simultaneously
- if( diff ) {
- // unfortunately nibbles are backwards to do subtype and len in one check (could bit swap...)
- int rlen = binDataCodeToLength(R);
- if( llen != rlen )
- return llen - rlen;
- return diff;
+ case cbindata: {
+ int len = binDataCodeToLength(*p);
+ int subtype = (*p) & BinDataTypeMask;
+ if (subtype & 0x8) {
+ subtype = (subtype & 0x7) | 0x80;
}
- // same length, same type
- l++; r++;
- int res = memcmp(l, r, llen);
- if( res )
- return res;
- l += llen; r += llen;
+ b.appendBinData("", len, (BinDataType)subtype, ++p);
+ p += len;
break;
}
- case cdate:
- {
- long long L = *((long long *) l);
- long long R = *((long long *) r);
- if( L < R )
- return -1;
- if( L > R )
- return 1;
- l += 8; r += 8;
+ case cdate:
+ b.appendDate("", (Date_t&)*p);
+ p += 8;
break;
- }
- case coid:
- {
- int res = memcmp(l, r, OID::kOIDSize);
- if( res )
- return res;
- l += OID::kOIDSize; r += OID::kOIDSize;
+ case cdouble:
+ b.append("", (double&)*p);
+ p += sizeof(double);
+ break;
+ case cint:
+ b.append("", static_cast<int>((reinterpret_cast<const PackedDouble&>(*p)).d));
+ p += sizeof(double);
break;
+ case clong:
+ b.append("", static_cast<long long>((reinterpret_cast<const PackedDouble&>(*p)).d));
+ p += sizeof(double);
+ break;
+ default:
+ verify(false);
+ }
+
+ if ((bits & cHASMORE) == 0)
+ break;
+ }
+ return b.obj();
+}
+
+static int compare(const unsigned char*& l, const unsigned char*& r) {
+ int lt = (*l & cCANONTYPEMASK);
+ int rt = (*r & cCANONTYPEMASK);
+ int x = lt - rt;
+ if (x)
+ return x;
+
+ l++;
+ r++;
+
+ // same type
+ switch (lt) {
+ case cdouble: {
+ double L = (reinterpret_cast<const PackedDouble*>(l))->d;
+ double R = (reinterpret_cast<const PackedDouble*>(r))->d;
+ if (L < R)
+ return -1;
+ if (L != R)
+ return 1;
+ l += 8;
+ r += 8;
+ break;
+ }
+ case cstring: {
+ int lsz = *l;
+ int rsz = *r;
+ int common = min(lsz, rsz);
+ l++;
+ r++; // skip the size byte
+ // use memcmp as we (will) allow zeros in UTF8 strings
+ int res = memcmp(l, r, common);
+ if (res)
+ return res;
+ // longer string is the greater one
+ int diff = lsz - rsz;
+ if (diff)
+ return diff;
+ l += lsz;
+ r += lsz;
+ break;
+ }
+ case cbindata: {
+ int L = *l;
+ int R = *r;
+ int llen = binDataCodeToLength(L);
+ int diff = L - R; // checks length and subtype simultaneously
+ if (diff) {
+ // unfortunately nibbles are backwards to do subtype and len in one check (could bit swap...)
+ int rlen = binDataCodeToLength(R);
+ if (llen != rlen)
+ return llen - rlen;
+ return diff;
}
+ // same length, same type
+ l++;
+ r++;
+ int res = memcmp(l, r, llen);
+ if (res)
+ return res;
+ l += llen;
+ r += llen;
+ break;
+ }
+ case cdate: {
+ long long L = *((long long*)l);
+ long long R = *((long long*)r);
+ if (L < R)
+ return -1;
+ if (L > R)
+ return 1;
+ l += 8;
+ r += 8;
+ break;
+ }
+ case coid: {
+ int res = memcmp(l, r, OID::kOIDSize);
+ if (res)
+ return res;
+ l += OID::kOIDSize;
+ r += OID::kOIDSize;
+ break;
+ }
default:
// all the others are a match -- e.g. null == null
;
- }
-
- return 0;
- }
-
- // at least one of this and right are traditional BSON format
- int NOINLINE_DECL KeyV1::compareHybrid(const KeyV1& right, const Ordering& order) const {
- BSONObj L = toBson();
- BSONObj R = right.toBson();
- return L.woCompare(R, order, /*considerfieldname*/false);
}
- int KeyV1::woCompare(const KeyV1& right, const Ordering &order) const {
- const unsigned char *l = _keyData;
- const unsigned char *r = right._keyData;
-
- if( (*l|*r) == IsBSON ) // only can do this if cNOTUSED maintained
- return compareHybrid(right, order);
-
- unsigned mask = 1;
- while( 1 ) {
- char lval = *l;
- char rval = *r;
- {
- int x = compare(l, r); // updates l and r pointers
- if( x ) {
- if( order.descending(mask) )
- x = -x;
- return x;
- }
- }
-
- {
- int x = ((int)(lval & cHASMORE)) - ((int)(rval & cHASMORE));
- if( x )
- return x;
- if( (lval & cHASMORE) == 0 )
- break;
+ return 0;
+}
+
+// at least one of this and right are traditional BSON format
+int NOINLINE_DECL KeyV1::compareHybrid(const KeyV1& right, const Ordering& order) const {
+ BSONObj L = toBson();
+ BSONObj R = right.toBson();
+ return L.woCompare(R, order, /*considerfieldname*/ false);
+}
+
+int KeyV1::woCompare(const KeyV1& right, const Ordering& order) const {
+ const unsigned char* l = _keyData;
+ const unsigned char* r = right._keyData;
+
+ if ((*l | *r) == IsBSON) // only can do this if cNOTUSED maintained
+ return compareHybrid(right, order);
+
+ unsigned mask = 1;
+ while (1) {
+ char lval = *l;
+ char rval = *r;
+ {
+ int x = compare(l, r); // updates l and r pointers
+ if (x) {
+ if (order.descending(mask))
+ x = -x;
+ return x;
}
+ }
- mask <<= 1;
+ {
+ int x = ((int)(lval & cHASMORE)) - ((int)(rval & cHASMORE));
+ if (x)
+ return x;
+ if ((lval & cHASMORE) == 0)
+ break;
}
- return 0;
+ mask <<= 1;
}
- static unsigned sizes[] = {
- 0,
- 1, //cminkey=1,
- 1, //cnull=2,
- 0,
- 9, //cdouble=4,
- 0,
- 0, //cstring=6,
- 0,
- 13, //coid=8,
- 0,
- 1, //cfalse=10,
- 1, //ctrue=11,
- 9, //cdate=12,
- 0,
- 1, //cmaxkey=14,
- 0
- };
-
- inline unsigned sizeOfElement(const unsigned char *p) {
- unsigned type = *p & cCANONTYPEMASK;
- unsigned sz = sizes[type];
- if( sz == 0 ) {
- if( type == cstring ) {
- sz = ((unsigned) p[1]) + 2;
- }
- else {
- verify( type == cbindata );
- sz = binDataCodeToLength(p[1]) + 2;
- }
+ return 0;
+}
+
+static unsigned sizes[] = {0,
+ 1, // cminkey=1,
+ 1, // cnull=2,
+ 0,
+ 9, // cdouble=4,
+ 0,
+ 0, // cstring=6,
+ 0,
+ 13, // coid=8,
+ 0,
+ 1, // cfalse=10,
+ 1, // ctrue=11,
+ 9, // cdate=12,
+ 0,
+ 1, // cmaxkey=14,
+ 0};
+
+inline unsigned sizeOfElement(const unsigned char* p) {
+ unsigned type = *p & cCANONTYPEMASK;
+ unsigned sz = sizes[type];
+ if (sz == 0) {
+ if (type == cstring) {
+ sz = ((unsigned)p[1]) + 2;
+ } else {
+ verify(type == cbindata);
+ sz = binDataCodeToLength(p[1]) + 2;
}
- return sz;
}
+ return sz;
+}
- int KeyV1::dataSize() const {
- const unsigned char *p = _keyData;
- if( !isCompactFormat() ) {
- return bson().objsize() + 1;
- }
-
- bool more;
- do {
- unsigned z = sizeOfElement(p);
- more = (*p & cHASMORE) != 0;
- p += z;
- } while( more );
- return p - _keyData;
+int KeyV1::dataSize() const {
+ const unsigned char* p = _keyData;
+ if (!isCompactFormat()) {
+ return bson().objsize() + 1;
}
- bool KeyV1::woEqual(const KeyV1& right) const {
- const unsigned char *l = _keyData;
- const unsigned char *r = right._keyData;
-
- if( (*l|*r) == IsBSON ) {
- return toBson().equal(right.toBson());
- }
+ bool more;
+ do {
+ unsigned z = sizeOfElement(p);
+ more = (*p & cHASMORE) != 0;
+ p += z;
+ } while (more);
+ return p - _keyData;
+}
+
+bool KeyV1::woEqual(const KeyV1& right) const {
+ const unsigned char* l = _keyData;
+ const unsigned char* r = right._keyData;
+
+ if ((*l | *r) == IsBSON) {
+ return toBson().equal(right.toBson());
+ }
- while( 1 ) {
- char lval = *l;
- char rval = *r;
- if( (lval&(cCANONTYPEMASK|cHASMORE)) != (rval&(cCANONTYPEMASK|cHASMORE)) )
- return false;
- l++; r++;
- switch( lval&cCANONTYPEMASK ) {
+ while (1) {
+ char lval = *l;
+ char rval = *r;
+ if ((lval & (cCANONTYPEMASK | cHASMORE)) != (rval & (cCANONTYPEMASK | cHASMORE)))
+ return false;
+ l++;
+ r++;
+ switch (lval & cCANONTYPEMASK) {
case coid:
- if( *((unsigned*) l) != *((unsigned*) r) )
+ if (*((unsigned*)l) != *((unsigned*)r))
return false;
- l += 4; r += 4;
+ l += 4;
+ r += 4;
case cdate:
- if( *((unsigned long long *) l) != *((unsigned long long *) r) )
+ if (*((unsigned long long*)l) != *((unsigned long long*)r))
return false;
- l += 8; r += 8;
+ l += 8;
+ r += 8;
break;
case cdouble:
- if( (reinterpret_cast< const PackedDouble* > (l))->d != (reinterpret_cast< const PackedDouble* >(r))->d )
+ if ((reinterpret_cast<const PackedDouble*>(l))->d !=
+ (reinterpret_cast<const PackedDouble*>(r))->d)
return false;
- l += 8; r += 8;
+ l += 8;
+ r += 8;
break;
- case cstring:
- {
- if( *l != *r )
- return false; // not same length
- unsigned sz = ((unsigned) *l) + 1;
- if( memcmp(l, r, sz) )
- return false;
- l += sz; r += sz;
- break;
- }
- case cbindata:
- {
- if( *l != *r )
- return false; // len or subtype mismatch
- int len = binDataCodeToLength(*l) + 1;
- if( memcmp(l, r, len) )
- return false;
- l += len; r += len;
- break;
- }
+ case cstring: {
+ if (*l != *r)
+ return false; // not same length
+ unsigned sz = ((unsigned)*l) + 1;
+ if (memcmp(l, r, sz))
+ return false;
+ l += sz;
+ r += sz;
+ break;
+ }
+ case cbindata: {
+ if (*l != *r)
+ return false; // len or subtype mismatch
+ int len = binDataCodeToLength(*l) + 1;
+ if (memcmp(l, r, len))
+ return false;
+ l += len;
+ r += len;
+ break;
+ }
case cminkey:
case cnull:
case cfalse:
@@ -683,23 +691,23 @@ namespace mongo {
break;
default:
verify(false);
- }
- if( (lval&cHASMORE) == 0 )
- break;
}
- return true;
+ if ((lval & cHASMORE) == 0)
+ break;
}
-
- struct CmpUnitTest : public StartupTest {
- void run() {
- char a[2];
- char b[2];
- a[0] = -3;
- a[1] = 0;
- b[0] = 3;
- b[1] = 0;
- verify( strcmp(a,b)>0 && memcmp(a,b,2)>0 );
- }
- } cunittest;
+ return true;
+}
+
+struct CmpUnitTest : public StartupTest {
+ void run() {
+ char a[2];
+ char b[2];
+ a[0] = -3;
+ a[1] = 0;
+ b[0] = 3;
+ b[1] = 0;
+ verify(strcmp(a, b) > 0 && memcmp(a, b, 2) > 0);
+ }
+} cunittest;
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/btree/key.h b/src/mongo/db/storage/mmap_v1/btree/key.h
index 7f886552067..4787d83281a 100644
--- a/src/mongo/db/storage/mmap_v1/btree/key.h
+++ b/src/mongo/db/storage/mmap_v1/btree/key.h
@@ -35,97 +35,132 @@
namespace mongo {
- /** Key class for precomputing a small format index key that is denser than a traditional BSONObj.
+/** Key class for precomputing a small format index key that is denser than a traditional BSONObj.
- KeyBson is a legacy wrapper implementation for old BSONObj style keys for v:0 indexes.
+ KeyBson is a legacy wrapper implementation for old BSONObj style keys for v:0 indexes.
- KeyV1 is the new implementation.
+ KeyV1 is the new implementation.
+*/
+class KeyBson /* "KeyV0" */ {
+public:
+ KeyBson() {}
+ explicit KeyBson(const char* keyData) : _o(keyData) {}
+ explicit KeyBson(const BSONObj& obj) : _o(obj) {}
+ int woCompare(const KeyBson& r, const Ordering& o) const;
+ BSONObj toBson() const {
+ return _o;
+ }
+ std::string toString() const {
+ return _o.toString();
+ }
+ int dataSize() const {
+ return _o.objsize();
+ }
+ const char* data() const {
+ return _o.objdata();
+ }
+ BSONElement _firstElement() const {
+ return _o.firstElement();
+ }
+ bool isCompactFormat() const {
+ return false;
+ }
+ bool woEqual(const KeyBson& r) const;
+ void assign(const KeyBson& rhs) {
+ *this = rhs;
+ }
+ bool isValid() const {
+ return true;
+ }
+
+private:
+ BSONObj _o;
+};
+
+class KeyV1Owned;
+
+// corresponding to BtreeData_V1
+class KeyV1 {
+ void operator=(
+ const KeyV1&); // disallowed just to make people be careful as we don't own the buffer
+ KeyV1(
+ const KeyV1Owned&); // disallowed as this is not a great idea as KeyV1Owned likely will go out of scope
+public:
+ KeyV1() {
+ _keyData = 0;
+ }
+ ~KeyV1() {
+ DEV _keyData = (const unsigned char*)1;
+ }
+
+ KeyV1(const KeyV1& rhs) : _keyData(rhs._keyData) {
+ dassert(_keyData > (const unsigned char*)1);
+ }
+
+ // explicit version of operator= to be safe
+ void assign(const KeyV1& rhs) {
+ _keyData = rhs._keyData;
+ }
+
+ /** @param keyData can be a buffer containing data in either BSON format, OR in KeyV1 format.
+ when BSON, we are just a wrapper
*/
- class KeyBson /* "KeyV0" */ {
- public:
- KeyBson() { }
- explicit KeyBson(const char *keyData) : _o(keyData) { }
- explicit KeyBson(const BSONObj& obj) : _o(obj) { }
- int woCompare(const KeyBson& r, const Ordering &o) const;
- BSONObj toBson() const { return _o; }
- std::string toString() const { return _o.toString(); }
- int dataSize() const { return _o.objsize(); }
- const char * data() const { return _o.objdata(); }
- BSONElement _firstElement() const { return _o.firstElement(); }
- bool isCompactFormat() const { return false; }
- bool woEqual(const KeyBson& r) const;
- void assign(const KeyBson& rhs) { *this = rhs; }
- bool isValid() const { return true; }
- private:
- BSONObj _o;
- };
-
- class KeyV1Owned;
-
- // corresponding to BtreeData_V1
- class KeyV1 {
- void operator=(const KeyV1&); // disallowed just to make people be careful as we don't own the buffer
- KeyV1(const KeyV1Owned&); // disallowed as this is not a great idea as KeyV1Owned likely will go out of scope
- public:
- KeyV1() { _keyData = 0; }
- ~KeyV1() { DEV _keyData = (const unsigned char *) 1; }
-
- KeyV1(const KeyV1& rhs) : _keyData(rhs._keyData) {
- dassert( _keyData > (const unsigned char *) 1 );
- }
-
- // explicit version of operator= to be safe
- void assign(const KeyV1& rhs) {
- _keyData = rhs._keyData;
- }
-
- /** @param keyData can be a buffer containing data in either BSON format, OR in KeyV1 format.
- when BSON, we are just a wrapper
- */
- explicit KeyV1(const char *keyData) : _keyData((unsigned char *) keyData) { }
-
- int woCompare(const KeyV1& r, const Ordering &o) const;
- bool woEqual(const KeyV1& r) const;
- BSONObj toBson() const;
- std::string toString() const { return toBson().toString(); }
-
- /** get the key data we want to store in the btree bucket */
- const char * data() const { return (const char *) _keyData; }
-
- /** @return size of data() */
- int dataSize() const;
-
- /** only used by geo, which always has bson keys */
- BSONElement _firstElement() const { return bson().firstElement(); }
- bool isCompactFormat() const { return *_keyData != IsBSON; }
-
- bool isValid() const { return _keyData > (const unsigned char*)1; }
- protected:
- enum { IsBSON = 0xff };
- const unsigned char *_keyData;
- BSONObj bson() const {
- dassert( !isCompactFormat() );
- return BSONObj((const char *) _keyData+1);
- }
- private:
- int compareHybrid(const KeyV1& right, const Ordering& order) const;
- };
-
- class KeyV1Owned : public KeyV1 {
- void operator=(const KeyV1Owned&);
- public:
- /** @obj a BSON object to be translated to KeyV1 format. If the object isn't
- representable in KeyV1 format (which happens, intentionally, at times)
- it will stay as bson herein.
- */
- KeyV1Owned(const BSONObj& obj);
-
- /** makes a copy (memcpy's the whole thing) */
- KeyV1Owned(const KeyV1& rhs);
-
- private:
- StackBufBuilder b;
- void traditional(const BSONObj& obj); // store as traditional bson not as compact format
- };
+ explicit KeyV1(const char* keyData) : _keyData((unsigned char*)keyData) {}
+
+ int woCompare(const KeyV1& r, const Ordering& o) const;
+ bool woEqual(const KeyV1& r) const;
+ BSONObj toBson() const;
+ std::string toString() const {
+ return toBson().toString();
+ }
+
+ /** get the key data we want to store in the btree bucket */
+ const char* data() const {
+ return (const char*)_keyData;
+ }
+
+ /** @return size of data() */
+ int dataSize() const;
+
+ /** only used by geo, which always has bson keys */
+ BSONElement _firstElement() const {
+ return bson().firstElement();
+ }
+ bool isCompactFormat() const {
+ return *_keyData != IsBSON;
+ }
+
+ bool isValid() const {
+ return _keyData > (const unsigned char*)1;
+ }
+
+protected:
+ enum { IsBSON = 0xff };
+ const unsigned char* _keyData;
+ BSONObj bson() const {
+ dassert(!isCompactFormat());
+ return BSONObj((const char*)_keyData + 1);
+ }
+
+private:
+ int compareHybrid(const KeyV1& right, const Ordering& order) const;
+};
+
+class KeyV1Owned : public KeyV1 {
+ void operator=(const KeyV1Owned&);
+public:
+ /** @obj a BSON object to be translated to KeyV1 format. If the object isn't
+ representable in KeyV1 format (which happens, intentionally, at times)
+ it will stay as bson herein.
+ */
+ KeyV1Owned(const BSONObj& obj);
+
+ /** makes a copy (memcpy's the whole thing) */
+ KeyV1Owned(const KeyV1& rhs);
+
+private:
+ StackBufBuilder b;
+ void traditional(const BSONObj& obj); // store as traditional bson not as compact format
+};
};
diff --git a/src/mongo/db/storage/mmap_v1/catalog/hashtab.cpp b/src/mongo/db/storage/mmap_v1/catalog/hashtab.cpp
index 9c86a4fffba..df766917fac 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/hashtab.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/hashtab.cpp
@@ -36,52 +36,50 @@
namespace mongo {
- int NamespaceHashTable::_find(const Namespace& k, bool& found) const {
- found = false;
- int h = k.hash();
- int i = h % n;
- int start = i;
- int chain = 0;
- int firstNonUsed = -1;
- while ( 1 ) {
- if ( !_nodes(i).inUse() ) {
- if ( firstNonUsed < 0 )
- firstNonUsed = i;
- }
-
- if ( _nodes(i).hash == h && _nodes(i).key == k ) {
- if ( chain >= 200 )
- log() << "warning: hashtable " << _name << " long chain " << std::endl;
- found = true;
- return i;
- }
- chain++;
- i = (i+1) % n;
- if ( i == start ) {
- // shouldn't get here / defensive for infinite loops
- log() << "error: hashtable " << _name << " is full n:" << n << std::endl;
- return -1;
- }
- if( chain >= maxChain ) {
- if ( firstNonUsed >= 0 )
- return firstNonUsed;
- log() << "error: hashtable " << _name << " max chain reached:" << maxChain << std::endl;
- return -1;
- }
+int NamespaceHashTable::_find(const Namespace& k, bool& found) const {
+ found = false;
+ int h = k.hash();
+ int i = h % n;
+ int start = i;
+ int chain = 0;
+ int firstNonUsed = -1;
+ while (1) {
+ if (!_nodes(i).inUse()) {
+ if (firstNonUsed < 0)
+ firstNonUsed = i;
}
- }
- /* buf must be all zeroes on initialization. */
- NamespaceHashTable::NamespaceHashTable(void* buf, int buflen, const char* name)
- : _name(name),
- _buf(buf) {
-
- n = buflen / sizeof(Node);
- if ((n & 1) == 0) {
- n--;
+ if (_nodes(i).hash == h && _nodes(i).key == k) {
+ if (chain >= 200)
+ log() << "warning: hashtable " << _name << " long chain " << std::endl;
+ found = true;
+ return i;
+ }
+ chain++;
+ i = (i + 1) % n;
+ if (i == start) {
+ // shouldn't get here / defensive for infinite loops
+ log() << "error: hashtable " << _name << " is full n:" << n << std::endl;
+ return -1;
}
+ if (chain >= maxChain) {
+ if (firstNonUsed >= 0)
+ return firstNonUsed;
+ log() << "error: hashtable " << _name << " max chain reached:" << maxChain << std::endl;
+ return -1;
+ }
+ }
+}
- maxChain = (int)(n * 0.05);
+/* buf must be all zeroes on initialization. */
+NamespaceHashTable::NamespaceHashTable(void* buf, int buflen, const char* name)
+ : _name(name), _buf(buf) {
+ n = buflen / sizeof(Node);
+ if ((n & 1) == 0) {
+ n--;
}
+ maxChain = (int)(n * 0.05);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/catalog/hashtab.h b/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
index b4ab9d858fa..286de349138 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
@@ -35,105 +35,103 @@
namespace mongo {
- /**
- * Simple, fixed size hash table used for namespace mapping (effectively the contents of the
- * MMAP V1 .ns file). Uses a contiguous block of memory, so you can put it in a memory mapped
- * file very easily.
- */
- class NamespaceHashTable {
- MONGO_DISALLOW_COPYING(NamespaceHashTable);
- public:
-
- typedef stdx::function< void(const Namespace& k, NamespaceDetails& v) > IteratorCallback;
-
-
- /* buf must be all zeroes on initialization. */
- NamespaceHashTable(void* buf, int buflen, const char *name);
-
- NamespaceDetails* get(const Namespace& k) const {
- bool found;
- int i = _find(k, found);
- if (found) {
- return &_nodes(i).value;
- }
+/**
+ * Simple, fixed size hash table used for namespace mapping (effectively the contents of the
+ * MMAP V1 .ns file). Uses a contiguous block of memory, so you can put it in a memory mapped
+ * file very easily.
+ */
+class NamespaceHashTable {
+ MONGO_DISALLOW_COPYING(NamespaceHashTable);
- return 0;
- }
+public:
+ typedef stdx::function<void(const Namespace& k, NamespaceDetails& v)> IteratorCallback;
- void kill(OperationContext* txn, const Namespace& k) {
- bool found;
- int i = _find(k, found);
- if ( i >= 0 && found ) {
- Node* n = &_nodes(i);
- n = txn->recoveryUnit()->writing(n);
- n->key.kill();
- n->setUnused();
- }
- }
- /** returns false if too full */
- bool put(OperationContext* txn, const Namespace& k, const NamespaceDetails& value) {
- bool found;
- int i = _find(k, found);
- if (i < 0)
- return false;
-
- Node* n = txn->recoveryUnit()->writing(&_nodes(i));
- if (!found) {
- n->key = k;
- n->hash = k.hash();
- }
- else {
- invariant(n->hash == k.hash());
- }
+ /* buf must be all zeroes on initialization. */
+ NamespaceHashTable(void* buf, int buflen, const char* name);
- n->value = value;
- return true;
+ NamespaceDetails* get(const Namespace& k) const {
+ bool found;
+ int i = _find(k, found);
+ if (found) {
+ return &_nodes(i).value;
}
- void iterAll(IteratorCallback callback) {
- for (int i = 0; i < n; i++) {
- if (_nodes(i).inUse()) {
- callback(_nodes(i).key, _nodes(i).value);
- }
- }
+ return 0;
+ }
+
+ void kill(OperationContext* txn, const Namespace& k) {
+ bool found;
+ int i = _find(k, found);
+ if (i >= 0 && found) {
+ Node* n = &_nodes(i);
+ n = txn->recoveryUnit()->writing(n);
+ n->key.kill();
+ n->setUnused();
+ }
+ }
+
+ /** returns false if too full */
+ bool put(OperationContext* txn, const Namespace& k, const NamespaceDetails& value) {
+ bool found;
+ int i = _find(k, found);
+ if (i < 0)
+ return false;
+
+ Node* n = txn->recoveryUnit()->writing(&_nodes(i));
+ if (!found) {
+ n->key = k;
+ n->hash = k.hash();
+ } else {
+ invariant(n->hash == k.hash());
}
+ n->value = value;
+ return true;
+ }
+
+ void iterAll(IteratorCallback callback) {
+ for (int i = 0; i < n; i++) {
+ if (_nodes(i).inUse()) {
+ callback(_nodes(i).key, _nodes(i).value);
+ }
+ }
+ }
- private:
+private:
#pragma pack(1)
- struct Node {
- int hash;
- Namespace key;
- NamespaceDetails value;
+ struct Node {
+ int hash;
+ Namespace key;
+ NamespaceDetails value;
- bool inUse() const {
- return hash != 0;
- }
+ bool inUse() const {
+ return hash != 0;
+ }
- void setUnused() {
- hash = 0;
- }
- };
+ void setUnused() {
+ hash = 0;
+ }
+ };
#pragma pack()
- BOOST_STATIC_ASSERT(sizeof(Node) == 628);
+ BOOST_STATIC_ASSERT(sizeof(Node) == 628);
- int _find(const Namespace& k, bool& found) const;
+ int _find(const Namespace& k, bool& found) const;
- Node& _nodes(int i) const {
- Node *nodes = (Node *)_buf;
- return nodes[i];
- }
+ Node& _nodes(int i) const {
+ Node* nodes = (Node*)_buf;
+ return nodes[i];
+ }
- const char* _name;
- void* const _buf;
+ const char* _name;
+ void* const _buf;
- int n; // number of hashtable buckets
- int maxChain;
- };
+ int n; // number of hashtable buckets
+ int maxChain;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/catalog/index_details.cpp b/src/mongo/db/storage/mmap_v1/catalog/index_details.cpp
index bc9cc3ee791..fa9093196f8 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/index_details.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/index_details.cpp
@@ -32,9 +32,8 @@
namespace mongo {
- void IndexDetails::_reset() {
- head.setInvalid();
- info.setInvalid();
- }
-
+void IndexDetails::_reset() {
+ head.setInvalid();
+ info.setInvalid();
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/catalog/index_details.h b/src/mongo/db/storage/mmap_v1/catalog/index_details.h
index 8b343d2ee66..1ee5387c57c 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/index_details.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/index_details.h
@@ -34,38 +34,37 @@
namespace mongo {
- /* Details about a particular index. There is one of these effectively for each object in
- system.namespaces (although this also includes the head pointer, which is not in that
- collection).
+/* Details about a particular index. There is one of these effectively for each object in
+ system.namespaces (although this also includes the head pointer, which is not in that
+ collection).
- This is an internal part of the catalog. Nothing outside of the catalog should use this.
+ This is an internal part of the catalog. Nothing outside of the catalog should use this.
- ** MemoryMapped in NamespaceDetails ** (i.e., this is on disk data)
- */
+ ** MemoryMapped in NamespaceDetails ** (i.e., this is on disk data)
+ */
#pragma pack(1)
- struct IndexDetails {
- /**
- * btree head disk location
- */
- DiskLoc head;
-
- /* Location of index info object. Format:
+struct IndexDetails {
+ /**
+ * btree head disk location
+ */
+ DiskLoc head;
- { name:"nameofindex", ns:"parentnsname", key: {keypattobject}
- [, unique: <bool>, background: <bool>, v:<version>]
- }
+ /* Location of index info object. Format:
- This object is in the system.indexes collection. Note that since we
- have a pointer to the object here, the object in system.indexes MUST NEVER MOVE.
- */
- DiskLoc info;
+ { name:"nameofindex", ns:"parentnsname", key: {keypattobject}
+ [, unique: <bool>, background: <bool>, v:<version>]
+ }
- /**
- * makes head and info invalid
- */
- void _reset();
+ This object is in the system.indexes collection. Note that since we
+ have a pointer to the object here, the object in system.indexes MUST NEVER MOVE.
+ */
+ DiskLoc info;
- };
+ /**
+ * makes head and info invalid
+ */
+ void _reset();
+};
#pragma pack()
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace-inl.h b/src/mongo/db/storage/mmap_v1/catalog/namespace-inl.h
index 318106dc5a7..6ed1bd661ca 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace-inl.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace-inl.h
@@ -36,43 +36,44 @@
namespace mongo {
- inline Namespace& Namespace::operator=(StringData ns) {
- // we fill the remaining space with all zeroes here. as the full Namespace struct is in
- // the datafiles (the .ns files specifically), that is helpful as then they are deterministic
- // in the bytes they have for a given sequence of operations. that makes testing and debugging
- // the data files easier.
- //
- // if profiling indicates this method is a significant bottleneck, we could have a version we
- // use for reads which does not fill with zeroes, and keep the zeroing behavior on writes.
- //
- memset( buf, 0, sizeof(buf) );
- uassert( 10080 , "ns name too long, max size is 127 bytes", ns.size() <= MaxNsLen);
- uassert( 17380 , "ns name can't contain embedded '\0' byte", ns.find('\0') == std::string::npos);
- ns.copyTo( buf, true );
- return *this;
- }
+inline Namespace& Namespace::operator=(StringData ns) {
+ // we fill the remaining space with all zeroes here. as the full Namespace struct is in
+ // the datafiles (the .ns files specifically), that is helpful as then they are deterministic
+ // in the bytes they have for a given sequence of operations. that makes testing and debugging
+ // the data files easier.
+ //
+ // if profiling indicates this method is a significant bottleneck, we could have a version we
+ // use for reads which does not fill with zeroes, and keep the zeroing behavior on writes.
+ //
+ memset(buf, 0, sizeof(buf));
+ uassert(10080, "ns name too long, max size is 127 bytes", ns.size() <= MaxNsLen);
+ uassert(17380, "ns name can't contain embedded '\0' byte", ns.find('\0') == std::string::npos);
+ ns.copyTo(buf, true);
+ return *this;
+}
- inline std::string Namespace::extraName(int i) const {
- char ex[] = "$extra";
- ex[5] += i;
- std::string s = std::string(buf) + ex;
- massert( 10348 , "$extra: ns name too long", s.size() <= MaxNsLen);
- return s;
- }
+inline std::string Namespace::extraName(int i) const {
+ char ex[] = "$extra";
+ ex[5] += i;
+ std::string s = std::string(buf) + ex;
+ massert(10348, "$extra: ns name too long", s.size() <= MaxNsLen);
+ return s;
+}
- inline bool Namespace::isExtra() const {
- const char *p = strstr(buf, "$extr");
- return p && p[5] && p[6] == 0; //==0 important in case an index uses name "$extra_1" for example
- }
+inline bool Namespace::isExtra() const {
+ const char* p = strstr(buf, "$extr");
+ return p && p[5] &&
+ p[6] == 0; //==0 important in case an index uses name "$extra_1" for example
+}
- inline int Namespace::hash() const {
- unsigned x = 0;
- const char *p = buf;
- while ( *p ) {
- x = x * 131 + *p;
- p++;
- }
- return (x & 0x7fffffff) | 0x8000000; // must be > 0
+inline int Namespace::hash() const {
+ unsigned x = 0;
+ const char* p = buf;
+ while (*p) {
+ x = x * 131 + *p;
+ p++;
}
+ return (x & 0x7fffffff) | 0x8000000; // must be > 0
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace.cpp
index 374761fe386..c9dec65d520 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace.cpp
@@ -37,13 +37,12 @@
#include "mongo/db/namespace_string.h"
namespace mongo {
- namespace {
- BOOST_STATIC_ASSERT( sizeof(Namespace) == 128 );
- BOOST_STATIC_ASSERT( Namespace::MaxNsLenWithNUL == MaxDatabaseNameLen );
- BOOST_STATIC_ASSERT((int)Namespace::MaxNsLenWithNUL == (int)NamespaceString::MaxNsLenWithNUL);
- BOOST_STATIC_ASSERT((int)Namespace::MaxNsLen == (int)NamespaceString::MaxNsLen);
- // Note the typo.
- BOOST_STATIC_ASSERT((int)Namespace::MaxNsColletionLen == (int)NamespaceString::MaxNsCollectionLen);
- }
+namespace {
+BOOST_STATIC_ASSERT(sizeof(Namespace) == 128);
+BOOST_STATIC_ASSERT(Namespace::MaxNsLenWithNUL == MaxDatabaseNameLen);
+BOOST_STATIC_ASSERT((int)Namespace::MaxNsLenWithNUL == (int)NamespaceString::MaxNsLenWithNUL);
+BOOST_STATIC_ASSERT((int)Namespace::MaxNsLen == (int)NamespaceString::MaxNsLen);
+// Note the typo.
+BOOST_STATIC_ASSERT((int)Namespace::MaxNsColletionLen == (int)NamespaceString::MaxNsCollectionLen);
+}
}
-
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace.h b/src/mongo/db/storage/mmap_v1/catalog/namespace.h
index 556e7adf889..f93112de47f 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace.h
@@ -38,55 +38,77 @@
namespace mongo {
#pragma pack(1)
- /**
- * This is used for storing a namespace on disk in a fixed witdh form
- * it should only be used for that, not for passing internally
- * for that, please use NamespaceString
- */
- class Namespace {
- public:
- Namespace(StringData ns) { *this = ns; }
- Namespace& operator=(StringData ns);
-
- void kill() { buf[0] = 0x7f; }
-
- bool operator==(const char *r) const { return strcmp(buf, r) == 0; }
- bool operator==(const Namespace& r) const { return strcmp(buf, r.buf) == 0; }
- bool operator!=(const char *r) const { return strcmp(buf, r) != 0; }
- bool operator!=(const Namespace& r) const { return strcmp(buf, r.buf) != 0; }
-
- bool hasDollarSign() const { return strchr( buf , '$' ) != NULL; }
-
- int hash() const; // value returned is always > 0
-
- size_t size() const { return strlen( buf ); }
-
- std::string toString() const { return buf; }
- operator std::string() const { return buf; }
-
- /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more than 10 indexes
- (more than 10 IndexDetails). It's a bit hacky because of this late addition with backward
- file support. */
- std::string extraName(int i) const;
- bool isExtra() const; /* ends with $extr... -- when true an extra block not a normal NamespaceDetails block */
-
- enum MaxNsLenValue {
- // Maximum possible length of name any namespace, including special ones like $extra.
- // This includes rum for the NUL byte so it can be used when sizing buffers.
- MaxNsLenWithNUL = 128,
-
- // MaxNsLenWithNUL excluding the NUL byte. Use this when comparing std::string lengths.
- MaxNsLen = MaxNsLenWithNUL - 1,
-
- // Maximum allowed length of fully qualified namespace name of any real collection.
- // Does not include NUL so it can be directly compared to std::string lengths.
- MaxNsColletionLen = MaxNsLen - 7/*strlen(".$extra")*/,
- };
- private:
- char buf[MaxNsLenWithNUL];
+/**
+ * This is used for storing a namespace on disk in a fixed witdh form
+ * it should only be used for that, not for passing internally
+ * for that, please use NamespaceString
+ */
+class Namespace {
+public:
+ Namespace(StringData ns) {
+ *this = ns;
+ }
+ Namespace& operator=(StringData ns);
+
+ void kill() {
+ buf[0] = 0x7f;
+ }
+
+ bool operator==(const char* r) const {
+ return strcmp(buf, r) == 0;
+ }
+ bool operator==(const Namespace& r) const {
+ return strcmp(buf, r.buf) == 0;
+ }
+ bool operator!=(const char* r) const {
+ return strcmp(buf, r) != 0;
+ }
+ bool operator!=(const Namespace& r) const {
+ return strcmp(buf, r.buf) != 0;
+ }
+
+ bool hasDollarSign() const {
+ return strchr(buf, '$') != NULL;
+ }
+
+ int hash() const; // value returned is always > 0
+
+ size_t size() const {
+ return strlen(buf);
+ }
+
+ std::string toString() const {
+ return buf;
+ }
+ operator std::string() const {
+ return buf;
+ }
+
+ /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more than 10 indexes
+ (more than 10 IndexDetails). It's a bit hacky because of this late addition with backward
+ file support. */
+ std::string extraName(int i) const;
+ bool isExtra()
+ const; /* ends with $extr... -- when true an extra block not a normal NamespaceDetails block */
+
+ enum MaxNsLenValue {
+ // Maximum possible length of name any namespace, including special ones like $extra.
+ // This includes rum for the NUL byte so it can be used when sizing buffers.
+ MaxNsLenWithNUL = 128,
+
+ // MaxNsLenWithNUL excluding the NUL byte. Use this when comparing std::string lengths.
+ MaxNsLen = MaxNsLenWithNUL - 1,
+
+ // Maximum allowed length of fully qualified namespace name of any real collection.
+ // Does not include NUL so it can be directly compared to std::string lengths.
+ MaxNsColletionLen = MaxNsLen - 7 /*strlen(".$extra")*/,
};
+
+private:
+ char buf[MaxNsLenWithNUL];
+};
#pragma pack()
-} // namespace mongo
+} // namespace mongo
#include "mongo/db/storage/mmap_v1/catalog/namespace-inl.h"
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
index 38fa8a7ae00..538a4500906 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
@@ -51,195 +51,193 @@
namespace mongo {
- NamespaceDetails::NamespaceDetails( const DiskLoc &loc, bool capped ) {
- BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) <= sizeof(NamespaceDetails) );
-
- /* be sure to initialize new fields here -- doesn't default to zeroes the way we use it */
- firstExtent = lastExtent = capExtent = loc;
- stats.datasize = stats.nrecords = 0;
- lastExtentSize = 0;
- nIndexes = 0;
- isCapped = capped;
- maxDocsInCapped = 0x7fffffff; // no limit (value is for pre-v2.3.2 compatibility)
- paddingFactorOldDoNotUse = 1.0;
- systemFlagsOldDoNotUse = 0;
- userFlags = 0;
- capFirstNewRecord = DiskLoc();
- // Signal that we are on first allocation iteration through extents.
- capFirstNewRecord.setInvalid();
- // For capped case, signal that we are doing initial extent allocation.
- if ( capped ) {
- // WAS: cappedLastDelRecLastExtent().setInvalid();
- deletedListSmall[1].setInvalid();
- }
- verify( sizeof(_dataFileVersion) == 2 );
- _dataFileVersion = 0;
- _indexFileVersion = 0;
- multiKeyIndexBits = 0;
- _reservedA = 0;
- _extraOffset = 0;
- indexBuildsInProgress = 0;
- memset(_reserved, 0, sizeof(_reserved));
+NamespaceDetails::NamespaceDetails(const DiskLoc& loc, bool capped) {
+ BOOST_STATIC_ASSERT(sizeof(NamespaceDetails::Extra) <= sizeof(NamespaceDetails));
+
+ /* be sure to initialize new fields here -- doesn't default to zeroes the way we use it */
+ firstExtent = lastExtent = capExtent = loc;
+ stats.datasize = stats.nrecords = 0;
+ lastExtentSize = 0;
+ nIndexes = 0;
+ isCapped = capped;
+ maxDocsInCapped = 0x7fffffff; // no limit (value is for pre-v2.3.2 compatibility)
+ paddingFactorOldDoNotUse = 1.0;
+ systemFlagsOldDoNotUse = 0;
+ userFlags = 0;
+ capFirstNewRecord = DiskLoc();
+ // Signal that we are on first allocation iteration through extents.
+ capFirstNewRecord.setInvalid();
+ // For capped case, signal that we are doing initial extent allocation.
+ if (capped) {
+ // WAS: cappedLastDelRecLastExtent().setInvalid();
+ deletedListSmall[1].setInvalid();
}
-
- NamespaceDetails::Extra* NamespaceDetails::allocExtra( OperationContext* txn,
- StringData ns,
- NamespaceIndex& ni,
- int nindexessofar) {
-
- // Namespace details must always be changed under an exclusive DB lock
- const NamespaceString nss(ns);
- invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
-
- int i = (nindexessofar - NIndexesBase) / NIndexesExtra;
- verify( i >= 0 && i <= 1 );
-
- Namespace fullns( ns );
- Namespace extrans( fullns.extraName(i) ); // throws UserException if ns name too long
-
- massert( 10350, "allocExtra: base ns missing?", this );
- massert( 10351, "allocExtra: extra already exists", ni.details(extrans) == 0 );
-
- Extra temp;
- temp.init();
-
- ni.add_ns( txn, extrans, reinterpret_cast<NamespaceDetails*>( &temp ) );
- Extra* e = reinterpret_cast<NamespaceDetails::Extra*>( ni.details( extrans ) );
-
- long ofs = e->ofsFrom(this);
- if( i == 0 ) {
- verify( _extraOffset == 0 );
- *txn->recoveryUnit()->writing(&_extraOffset) = ofs;
- verify( extra() == e );
- }
- else {
- Extra *hd = extra();
- verify( hd->next(this) == 0 );
- hd->setNext(txn, ofs);
- }
- return e;
+ verify(sizeof(_dataFileVersion) == 2);
+ _dataFileVersion = 0;
+ _indexFileVersion = 0;
+ multiKeyIndexBits = 0;
+ _reservedA = 0;
+ _extraOffset = 0;
+ indexBuildsInProgress = 0;
+ memset(_reserved, 0, sizeof(_reserved));
+}
+
+NamespaceDetails::Extra* NamespaceDetails::allocExtra(OperationContext* txn,
+ StringData ns,
+ NamespaceIndex& ni,
+ int nindexessofar) {
+ // Namespace details must always be changed under an exclusive DB lock
+ const NamespaceString nss(ns);
+ invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
+
+ int i = (nindexessofar - NIndexesBase) / NIndexesExtra;
+ verify(i >= 0 && i <= 1);
+
+ Namespace fullns(ns);
+ Namespace extrans(fullns.extraName(i)); // throws UserException if ns name too long
+
+ massert(10350, "allocExtra: base ns missing?", this);
+ massert(10351, "allocExtra: extra already exists", ni.details(extrans) == 0);
+
+ Extra temp;
+ temp.init();
+
+ ni.add_ns(txn, extrans, reinterpret_cast<NamespaceDetails*>(&temp));
+ Extra* e = reinterpret_cast<NamespaceDetails::Extra*>(ni.details(extrans));
+
+ long ofs = e->ofsFrom(this);
+ if (i == 0) {
+ verify(_extraOffset == 0);
+ *txn->recoveryUnit()->writing(&_extraOffset) = ofs;
+ verify(extra() == e);
+ } else {
+ Extra* hd = extra();
+ verify(hd->next(this) == 0);
+ hd->setNext(txn, ofs);
}
+ return e;
+}
- IndexDetails& NamespaceDetails::idx(int idxNo, bool missingExpected) {
- if( idxNo < NIndexesBase ) {
- IndexDetails& id = _indexes[idxNo];
- return id;
- }
- Extra *e = extra();
- if ( ! e ) {
- if ( missingExpected )
- throw MsgAssertionException( 13283 , "Missing Extra" );
- massert(14045, "missing Extra", e);
- }
- int i = idxNo - NIndexesBase;
- if( i >= NIndexesExtra ) {
- e = e->next(this);
- if ( ! e ) {
- if ( missingExpected )
- throw MsgAssertionException( 14823 , "missing extra" );
- massert(14824, "missing Extra", e);
- }
- i -= NIndexesExtra;
- }
- return e->details[i];
+IndexDetails& NamespaceDetails::idx(int idxNo, bool missingExpected) {
+ if (idxNo < NIndexesBase) {
+ IndexDetails& id = _indexes[idxNo];
+ return id;
}
-
-
- const IndexDetails& NamespaceDetails::idx(int idxNo, bool missingExpected) const {
- if( idxNo < NIndexesBase ) {
- const IndexDetails& id = _indexes[idxNo];
- return id;
- }
- const Extra *e = extra();
- if ( ! e ) {
- if ( missingExpected )
- throw MsgAssertionException( 17421 , "Missing Extra" );
- massert(17422, "missing Extra", e);
- }
- int i = idxNo - NIndexesBase;
- if( i >= NIndexesExtra ) {
- e = e->next(this);
- if ( ! e ) {
- if ( missingExpected )
- throw MsgAssertionException( 17423 , "missing extra" );
- massert(17424, "missing Extra", e);
- }
- i -= NIndexesExtra;
+ Extra* e = extra();
+ if (!e) {
+ if (missingExpected)
+ throw MsgAssertionException(13283, "Missing Extra");
+ massert(14045, "missing Extra", e);
+ }
+ int i = idxNo - NIndexesBase;
+ if (i >= NIndexesExtra) {
+ e = e->next(this);
+ if (!e) {
+ if (missingExpected)
+ throw MsgAssertionException(14823, "missing extra");
+ massert(14824, "missing Extra", e);
}
- return e->details[i];
+ i -= NIndexesExtra;
}
+ return e->details[i];
+}
- NamespaceDetails::IndexIterator::IndexIterator(const NamespaceDetails *_d,
- bool includeBackgroundInProgress) {
- d = _d;
- i = 0;
- n = d->nIndexes;
- if ( includeBackgroundInProgress )
- n += d->indexBuildsInProgress;
- }
- // must be called when renaming a NS to fix up extra
- void NamespaceDetails::copyingFrom( OperationContext* txn,
- StringData thisns,
- NamespaceIndex& ni,
- NamespaceDetails* src) {
- _extraOffset = 0; // we are a copy -- the old value is wrong. fixing it up below.
- Extra *se = src->extra();
- int n = NIndexesBase;
- if( se ) {
- Extra *e = allocExtra(txn, thisns, ni, n);
- while( 1 ) {
- n += NIndexesExtra;
- e->copy(this, *se);
- se = se->next(src);
- if( se == 0 ) break;
- Extra *nxt = allocExtra(txn, thisns, ni, n);
- e->setNext( txn, nxt->ofsFrom(this) );
- e = nxt;
- }
- verify( _extraOffset );
- }
+const IndexDetails& NamespaceDetails::idx(int idxNo, bool missingExpected) const {
+ if (idxNo < NIndexesBase) {
+ const IndexDetails& id = _indexes[idxNo];
+ return id;
}
-
- NamespaceDetails* NamespaceDetails::writingWithoutExtra( OperationContext* txn ) {
- return txn->recoveryUnit()->writing( this );
+ const Extra* e = extra();
+ if (!e) {
+ if (missingExpected)
+ throw MsgAssertionException(17421, "Missing Extra");
+ massert(17422, "missing Extra", e);
}
-
-
- // XXX - this method should go away
- NamespaceDetails *NamespaceDetails::writingWithExtra( OperationContext* txn ) {
- for( Extra *e = extra(); e; e = e->next( this ) ) {
- txn->recoveryUnit()->writing( e );
+ int i = idxNo - NIndexesBase;
+ if (i >= NIndexesExtra) {
+ e = e->next(this);
+ if (!e) {
+ if (missingExpected)
+ throw MsgAssertionException(17423, "missing extra");
+ massert(17424, "missing Extra", e);
}
- return writingWithoutExtra( txn );
+ i -= NIndexesExtra;
}
-
- void NamespaceDetails::setMaxCappedDocs( OperationContext* txn, long long max ) {
- massert( 16499,
- "max in a capped collection has to be < 2^31 or -1",
- CollectionOptions::validMaxCappedDocs( &max ) );
- maxDocsInCapped = max;
+ return e->details[i];
+}
+
+NamespaceDetails::IndexIterator::IndexIterator(const NamespaceDetails* _d,
+ bool includeBackgroundInProgress) {
+ d = _d;
+ i = 0;
+ n = d->nIndexes;
+ if (includeBackgroundInProgress)
+ n += d->indexBuildsInProgress;
+}
+
+// must be called when renaming a NS to fix up extra
+void NamespaceDetails::copyingFrom(OperationContext* txn,
+ StringData thisns,
+ NamespaceIndex& ni,
+ NamespaceDetails* src) {
+ _extraOffset = 0; // we are a copy -- the old value is wrong. fixing it up below.
+ Extra* se = src->extra();
+ int n = NIndexesBase;
+ if (se) {
+ Extra* e = allocExtra(txn, thisns, ni, n);
+ while (1) {
+ n += NIndexesExtra;
+ e->copy(this, *se);
+ se = se->next(src);
+ if (se == 0)
+ break;
+ Extra* nxt = allocExtra(txn, thisns, ni, n);
+ e->setNext(txn, nxt->ofsFrom(this));
+ e = nxt;
+ }
+ verify(_extraOffset);
}
+}
- /* ------------------------------------------------------------------------- */
+NamespaceDetails* NamespaceDetails::writingWithoutExtra(OperationContext* txn) {
+ return txn->recoveryUnit()->writing(this);
+}
- int NamespaceDetails::_catalogFindIndexByName(OperationContext* txn,
- const Collection* coll,
- StringData name,
- bool includeBackgroundInProgress) const {
- IndexIterator i = ii(includeBackgroundInProgress);
- while( i.more() ) {
- const BSONObj obj = coll->docFor(txn, i.next().info.toRecordId()).value();
- if ( name == obj.getStringField("name") )
- return i.pos()-1;
- }
- return -1;
+// XXX - this method should go away
+NamespaceDetails* NamespaceDetails::writingWithExtra(OperationContext* txn) {
+ for (Extra* e = extra(); e; e = e->next(this)) {
+ txn->recoveryUnit()->writing(e);
}
-
- void NamespaceDetails::Extra::setNext( OperationContext* txn,
- long ofs ) {
- *txn->recoveryUnit()->writing(&_next) = ofs;
+ return writingWithoutExtra(txn);
+}
+
+void NamespaceDetails::setMaxCappedDocs(OperationContext* txn, long long max) {
+ massert(16499,
+ "max in a capped collection has to be < 2^31 or -1",
+ CollectionOptions::validMaxCappedDocs(&max));
+ maxDocsInCapped = max;
+}
+
+/* ------------------------------------------------------------------------- */
+
+
+int NamespaceDetails::_catalogFindIndexByName(OperationContext* txn,
+ const Collection* coll,
+ StringData name,
+ bool includeBackgroundInProgress) const {
+ IndexIterator i = ii(includeBackgroundInProgress);
+ while (i.more()) {
+ const BSONObj obj = coll->docFor(txn, i.next().info.toRecordId()).value();
+ if (name == obj.getStringField("name"))
+ return i.pos() - 1;
}
+ return -1;
+}
+
+void NamespaceDetails::Extra::setNext(OperationContext* txn, long ofs) {
+ *txn->recoveryUnit()->writing(&_next) = ofs;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
index 9011d6d27f3..5002bf267c7 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
@@ -35,200 +35,216 @@
namespace mongo {
- class Collection;
- class NamespaceIndex;
- class OperationContext;
+class Collection;
+class NamespaceIndex;
+class OperationContext;
#pragma pack(1)
- /* NamespaceDetails : this is the "header" for a collection that has all its details.
- It's in the .ns file and this is a memory mapped region (thus the pack pragma above).
+/* NamespaceDetails : this is the "header" for a collection that has all its details.
+ It's in the .ns file and this is a memory mapped region (thus the pack pragma above).
+*/
+class NamespaceDetails {
+public:
+ enum { NIndexesMax = 64, NIndexesExtra = 30, NIndexesBase = 10 };
+
+ // deleted lists -- linked lists of deleted records -- are placed in 'buckets' of various
+ // sizes so you can look for a deleted record of about the right size. These buckets are
+ // split into small and large groups for compatibility with old versions.
+ static const int SmallBuckets = 18;
+ static const int LargeBuckets = 8;
+
+
+ /*-------- data fields, as present on disk : */
+
+ DiskLoc firstExtent;
+ DiskLoc lastExtent;
+
+ /* NOTE: capped collections v1 override the meaning of deletedList.
+ deletedList[0] points to a list of free records (DeletedRecord's) for all extents in
+ the capped namespace.
+ deletedList[1] points to the last record in the prev extent. When the "current extent"
+ changes, this value is updated. !deletedList[1].isValid() when this value is not
+ yet computed.
*/
- class NamespaceDetails {
- public:
- enum { NIndexesMax = 64, NIndexesExtra = 30, NIndexesBase = 10 };
+ DiskLoc deletedListSmall[SmallBuckets];
+ DiskLoc deletedListLegacyGrabBag; // old implementations put records of multiple sizes here.
- // deleted lists -- linked lists of deleted records -- are placed in 'buckets' of various
- // sizes so you can look for a deleted record of about the right size. These buckets are
- // split into small and large groups for compatibility with old versions.
- static const int SmallBuckets = 18;
- static const int LargeBuckets = 8;
+ // ofs 168 (8 byte aligned)
+ struct Stats {
+ // datasize and nrecords MUST Be adjacent code assumes!
+ long long datasize; // this includes padding, but not record headers
+ long long nrecords;
+ } stats;
- /*-------- data fields, as present on disk : */
+ int lastExtentSize;
- DiskLoc firstExtent;
- DiskLoc lastExtent;
+ int nIndexes;
- /* NOTE: capped collections v1 override the meaning of deletedList.
- deletedList[0] points to a list of free records (DeletedRecord's) for all extents in
- the capped namespace.
- deletedList[1] points to the last record in the prev extent. When the "current extent"
- changes, this value is updated. !deletedList[1].isValid() when this value is not
- yet computed.
- */
- DiskLoc deletedListSmall[SmallBuckets];
- DiskLoc deletedListLegacyGrabBag; // old implementations put records of multiple sizes here.
+ // ofs 192
+ IndexDetails _indexes[NIndexesBase];
- // ofs 168 (8 byte aligned)
- struct Stats {
- // datasize and nrecords MUST Be adjacent code assumes!
- long long datasize; // this includes padding, but not record headers
- long long nrecords;
- } stats;
+public:
+ // ofs 352 (16 byte aligned)
+ int isCapped; // there is wasted space here if I'm right (ERH)
+ int maxDocsInCapped; // max # of objects for a capped table, -1 for inf.
- int lastExtentSize;
+ double paddingFactorOldDoNotUse;
+ // ofs 368 (16)
+ int systemFlagsOldDoNotUse; // things that the system sets/cares about
- int nIndexes;
+ DiskLoc capExtent; // the "current" extent we're writing too for a capped collection
+ DiskLoc capFirstNewRecord;
- // ofs 192
- IndexDetails _indexes[NIndexesBase];
+ unsigned short
+ _dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
+ unsigned short _indexFileVersion;
- public:
- // ofs 352 (16 byte aligned)
- int isCapped; // there is wasted space here if I'm right (ERH)
+ unsigned long long multiKeyIndexBits;
- int maxDocsInCapped; // max # of objects for a capped table, -1 for inf.
+ // ofs 400 (16)
+ unsigned long long _reservedA;
+ long long _extraOffset; // where the $extra info is located (bytes relative to this)
- double paddingFactorOldDoNotUse;
- // ofs 368 (16)
- int systemFlagsOldDoNotUse; // things that the system sets/cares about
+public:
+ int indexBuildsInProgress; // Number of indexes currently being built
- DiskLoc capExtent; // the "current" extent we're writing too for a capped collection
- DiskLoc capFirstNewRecord;
+ int userFlags;
- unsigned short _dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
- unsigned short _indexFileVersion;
+ DiskLoc deletedListLarge[LargeBuckets];
- unsigned long long multiKeyIndexBits;
+ // Think carefully before using this. We need at least 8 bytes reserved to leave room for a
+ // DiskLoc pointing to more data (eg in a dummy MmapV1RecordHeader or Extent). There is still _reservedA
+ // above, but these are the final two reserved 8-byte regions.
+ char _reserved[8];
+ /*-------- end data 496 bytes */
+public:
+ explicit NamespaceDetails(const DiskLoc& loc, bool _capped);
- // ofs 400 (16)
- unsigned long long _reservedA;
- long long _extraOffset; // where the $extra info is located (bytes relative to this)
+ class Extra {
+ long long _next;
public:
- int indexBuildsInProgress; // Number of indexes currently being built
-
- int userFlags;
+ IndexDetails details[NIndexesExtra];
- DiskLoc deletedListLarge[LargeBuckets];
+ private:
+ unsigned reserved2;
+ unsigned reserved3;
+ Extra(const Extra&) {
+ verify(false);
+ }
+ Extra& operator=(const Extra& r) {
+ verify(false);
+ return *this;
+ }
- // Think carefully before using this. We need at least 8 bytes reserved to leave room for a
- // DiskLoc pointing to more data (eg in a dummy MmapV1RecordHeader or Extent). There is still _reservedA
- // above, but these are the final two reserved 8-byte regions.
- char _reserved[8];
- /*-------- end data 496 bytes */
public:
- explicit NamespaceDetails( const DiskLoc &loc, bool _capped );
-
- class Extra {
- long long _next;
- public:
- IndexDetails details[NIndexesExtra];
- private:
- unsigned reserved2;
- unsigned reserved3;
- Extra(const Extra&) { verify(false); }
- Extra& operator=(const Extra& r) { verify(false); return *this; }
- public:
- Extra() { }
- long ofsFrom(NamespaceDetails *d) {
- return ((char *) this) - ((char *) d);
- }
- void init() { memset(this, 0, sizeof(Extra)); }
- Extra* next(const NamespaceDetails *d) const {
- if( _next == 0 ) return 0;
- return (Extra*) (((char *) d) + _next);
- }
- void setNext(OperationContext* txn, long ofs);
- void copy(NamespaceDetails *d, const Extra& e) {
- memcpy(this, &e, sizeof(Extra));
- _next = 0;
- }
- };
- Extra* extra() const {
- if( _extraOffset == 0 ) return 0;
- return (Extra *) (((char *) this) + _extraOffset);
+ Extra() {}
+ long ofsFrom(NamespaceDetails* d) {
+ return ((char*)this) - ((char*)d);
}
- /* add extra space for indexes when more than 10 */
- Extra* allocExtra( OperationContext* txn,
- StringData ns,
- NamespaceIndex& ni,
- int nindexessofar );
-
- void copyingFrom( OperationContext* txn,
- StringData thisns,
- NamespaceIndex& ni,
- NamespaceDetails *src); // must be called when renaming a NS to fix up extra
-
+ void init() {
+ memset(this, 0, sizeof(Extra));
+ }
+ Extra* next(const NamespaceDetails* d) const {
+ if (_next == 0)
+ return 0;
+ return (Extra*)(((char*)d) + _next);
+ }
+ void setNext(OperationContext* txn, long ofs);
+ void copy(NamespaceDetails* d, const Extra& e) {
+ memcpy(this, &e, sizeof(Extra));
+ _next = 0;
+ }
+ };
+ Extra* extra() const {
+ if (_extraOffset == 0)
+ return 0;
+ return (Extra*)(((char*)this) + _extraOffset);
+ }
+ /* add extra space for indexes when more than 10 */
+ Extra* allocExtra(OperationContext* txn, StringData ns, NamespaceIndex& ni, int nindexessofar);
+
+ void copyingFrom(OperationContext* txn,
+ StringData thisns,
+ NamespaceIndex& ni,
+ NamespaceDetails* src); // must be called when renaming a NS to fix up extra
+
+public:
+ void setMaxCappedDocs(OperationContext* txn, long long max);
+
+ enum UserFlags {
+ Flag_UsePowerOf2Sizes = 1 << 0,
+ Flag_NoPadding = 1 << 1,
+ };
+
+ IndexDetails& idx(int idxNo, bool missingExpected = false);
+ const IndexDetails& idx(int idxNo, bool missingExpected = false) const;
+
+ class IndexIterator {
public:
- void setMaxCappedDocs( OperationContext* txn, long long max );
-
- enum UserFlags {
- Flag_UsePowerOf2Sizes = 1 << 0,
- Flag_NoPadding = 1 << 1,
- };
-
- IndexDetails& idx(int idxNo, bool missingExpected = false );
- const IndexDetails& idx(int idxNo, bool missingExpected = false ) const;
-
- class IndexIterator {
- public:
- int pos() { return i; } // note this is the next one to come
- bool more() { return i < n; }
- const IndexDetails& next() { return d->idx(i++); }
- private:
- friend class NamespaceDetails;
- int i, n;
- const NamespaceDetails *d;
- IndexIterator(const NamespaceDetails *_d, bool includeBackgroundInProgress);
- };
-
- IndexIterator ii( bool includeBackgroundInProgress = false ) const {
- return IndexIterator(this, includeBackgroundInProgress);
+ int pos() {
+ return i;
+ } // note this is the next one to come
+ bool more() {
+ return i < n;
+ }
+ const IndexDetails& next() {
+ return d->idx(i++);
}
-
- /**
- * This fetches the IndexDetails for the next empty index slot. The caller must populate
- * returned object. This handles allocating extra index space, if necessary.
- */
- IndexDetails& getNextIndexDetails(OperationContext* txn, Collection* collection);
-
- NamespaceDetails *writingWithoutExtra( OperationContext* txn );
-
- /** Make all linked Extra objects writeable as well */
- NamespaceDetails *writingWithExtra( OperationContext* txn );
-
- /**
- * Returns the offset of the specified index name within the array of indexes. Must be
- * passed-in the owning collection to resolve the index record entries to objects.
- *
- * @return > 0 if index name was found, -1 otherwise.
- */
- int _catalogFindIndexByName(OperationContext* txn,
- const Collection* coll,
- StringData name,
- bool includeBackgroundInProgress) const;
private:
-
- /**
- * swaps all meta data for 2 indexes
- * a and b are 2 index ids, whose contents will be swapped
- * must have a lock on the entire collection to do this
- */
- void swapIndex( OperationContext* txn, int a, int b );
-
- friend class IndexCatalog;
- friend class IndexCatalogEntry;
-
- /** Update cappedLastDelRecLastExtent() after capExtent changed in cappedTruncateAfter() */
- void cappedTruncateLastDelUpdate();
- BOOST_STATIC_ASSERT( NIndexesMax <= NIndexesBase + NIndexesExtra*2 );
- BOOST_STATIC_ASSERT( NIndexesMax <= 64 ); // multiKey bits
- BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) == 496 );
- }; // NamespaceDetails
- BOOST_STATIC_ASSERT( sizeof(NamespaceDetails) == 496 );
+ friend class NamespaceDetails;
+ int i, n;
+ const NamespaceDetails* d;
+ IndexIterator(const NamespaceDetails* _d, bool includeBackgroundInProgress);
+ };
+
+ IndexIterator ii(bool includeBackgroundInProgress = false) const {
+ return IndexIterator(this, includeBackgroundInProgress);
+ }
+
+ /**
+ * This fetches the IndexDetails for the next empty index slot. The caller must populate
+ * returned object. This handles allocating extra index space, if necessary.
+ */
+ IndexDetails& getNextIndexDetails(OperationContext* txn, Collection* collection);
+
+ NamespaceDetails* writingWithoutExtra(OperationContext* txn);
+
+ /** Make all linked Extra objects writeable as well */
+ NamespaceDetails* writingWithExtra(OperationContext* txn);
+
+ /**
+ * Returns the offset of the specified index name within the array of indexes. Must be
+ * passed-in the owning collection to resolve the index record entries to objects.
+ *
+ * @return > 0 if index name was found, -1 otherwise.
+ */
+ int _catalogFindIndexByName(OperationContext* txn,
+ const Collection* coll,
+ StringData name,
+ bool includeBackgroundInProgress) const;
+
+private:
+ /**
+ * swaps all meta data for 2 indexes
+ * a and b are 2 index ids, whose contents will be swapped
+ * must have a lock on the entire collection to do this
+ */
+ void swapIndex(OperationContext* txn, int a, int b);
+
+ friend class IndexCatalog;
+ friend class IndexCatalogEntry;
+
+ /** Update cappedLastDelRecLastExtent() after capExtent changed in cappedTruncateAfter() */
+ void cappedTruncateLastDelUpdate();
+ BOOST_STATIC_ASSERT(NIndexesMax <= NIndexesBase + NIndexesExtra * 2);
+ BOOST_STATIC_ASSERT(NIndexesMax <= 64); // multiKey bits
+ BOOST_STATIC_ASSERT(sizeof(NamespaceDetails::Extra) == 496);
+}; // NamespaceDetails
+BOOST_STATIC_ASSERT(sizeof(NamespaceDetails) == 496);
#pragma pack()
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
index 1d3fef7b918..7e79cfdca9d 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
@@ -43,359 +43,350 @@
namespace mongo {
- using std::string;
-
- NamespaceDetailsCollectionCatalogEntry::NamespaceDetailsCollectionCatalogEntry(
- StringData ns,
- NamespaceDetails* details,
- RecordStore* namespacesRecordStore,
- RecordStore* indexRecordStore,
- MMAPV1DatabaseCatalogEntry* db )
- : CollectionCatalogEntry( ns ),
- _details( details ),
- _namespacesRecordStore(namespacesRecordStore),
- _indexRecordStore( indexRecordStore ),
- _db( db ) {
- }
-
- CollectionOptions NamespaceDetailsCollectionCatalogEntry::getCollectionOptions(OperationContext* txn) const {
- CollectionOptions options = _db->getCollectionOptions( txn, ns().ns() );
-
- if (options.flagsSet) {
- if (options.flags != _details->userFlags) {
- warning() << "system.namespaces and NamespaceDetails disagree about userFlags."
- << " system.namespaces: " << options.flags
- << " NamespaceDetails: " << _details->userFlags;
- dassert(options.flags == _details->userFlags);
- }
+using std::string;
+
+NamespaceDetailsCollectionCatalogEntry::NamespaceDetailsCollectionCatalogEntry(
+ StringData ns,
+ NamespaceDetails* details,
+ RecordStore* namespacesRecordStore,
+ RecordStore* indexRecordStore,
+ MMAPV1DatabaseCatalogEntry* db)
+ : CollectionCatalogEntry(ns),
+ _details(details),
+ _namespacesRecordStore(namespacesRecordStore),
+ _indexRecordStore(indexRecordStore),
+ _db(db) {}
+
+CollectionOptions NamespaceDetailsCollectionCatalogEntry::getCollectionOptions(
+ OperationContext* txn) const {
+ CollectionOptions options = _db->getCollectionOptions(txn, ns().ns());
+
+ if (options.flagsSet) {
+ if (options.flags != _details->userFlags) {
+ warning() << "system.namespaces and NamespaceDetails disagree about userFlags."
+ << " system.namespaces: " << options.flags
+ << " NamespaceDetails: " << _details->userFlags;
+ dassert(options.flags == _details->userFlags);
}
-
- // Fill in the actual flags from the NamespaceDetails.
- // Leaving flagsSet alone since it indicates whether the user actively set the flags.
- options.flags = _details->userFlags;
-
- return options;
}
- int NamespaceDetailsCollectionCatalogEntry::getTotalIndexCount( OperationContext* txn ) const {
- return _details->nIndexes + _details->indexBuildsInProgress;
- }
+ // Fill in the actual flags from the NamespaceDetails.
+ // Leaving flagsSet alone since it indicates whether the user actively set the flags.
+ options.flags = _details->userFlags;
- int NamespaceDetailsCollectionCatalogEntry::getCompletedIndexCount( OperationContext* txn ) const {
- return _details->nIndexes;
- }
+ return options;
+}
- int NamespaceDetailsCollectionCatalogEntry::getMaxAllowedIndexes() const {
- return NamespaceDetails::NIndexesMax;
- }
+int NamespaceDetailsCollectionCatalogEntry::getTotalIndexCount(OperationContext* txn) const {
+ return _details->nIndexes + _details->indexBuildsInProgress;
+}
- void NamespaceDetailsCollectionCatalogEntry::getAllIndexes( OperationContext* txn,
- std::vector<std::string>* names ) const {
- NamespaceDetails::IndexIterator i = _details->ii( true );
- while ( i.more() ) {
- const IndexDetails& id = i.next();
- const BSONObj obj = _indexRecordStore->dataFor( txn, id.info.toRecordId() ).toBson();
- names->push_back( obj.getStringField("name") );
- }
- }
+int NamespaceDetailsCollectionCatalogEntry::getCompletedIndexCount(OperationContext* txn) const {
+ return _details->nIndexes;
+}
- bool NamespaceDetailsCollectionCatalogEntry::isIndexMultikey(OperationContext* txn,
- StringData idxName) const {
- int idxNo = _findIndexNumber( txn, idxName );
- invariant( idxNo >= 0 );
- return isIndexMultikey( idxNo );
- }
+int NamespaceDetailsCollectionCatalogEntry::getMaxAllowedIndexes() const {
+ return NamespaceDetails::NIndexesMax;
+}
- bool NamespaceDetailsCollectionCatalogEntry::isIndexMultikey(int idxNo) const {
- return (_details->multiKeyIndexBits & (((unsigned long long) 1) << idxNo)) != 0;
+void NamespaceDetailsCollectionCatalogEntry::getAllIndexes(OperationContext* txn,
+ std::vector<std::string>* names) const {
+ NamespaceDetails::IndexIterator i = _details->ii(true);
+ while (i.more()) {
+ const IndexDetails& id = i.next();
+ const BSONObj obj = _indexRecordStore->dataFor(txn, id.info.toRecordId()).toBson();
+ names->push_back(obj.getStringField("name"));
}
+}
- bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
- StringData indexName,
- bool multikey ) {
+bool NamespaceDetailsCollectionCatalogEntry::isIndexMultikey(OperationContext* txn,
+ StringData idxName) const {
+ int idxNo = _findIndexNumber(txn, idxName);
+ invariant(idxNo >= 0);
+ return isIndexMultikey(idxNo);
+}
- int idxNo = _findIndexNumber( txn, indexName );
- invariant( idxNo >= 0 );
- return setIndexIsMultikey( txn, idxNo, multikey );
- }
+bool NamespaceDetailsCollectionCatalogEntry::isIndexMultikey(int idxNo) const {
+ return (_details->multiKeyIndexBits & (((unsigned long long)1) << idxNo)) != 0;
+}
- bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
- int idxNo,
- bool multikey ) {
- unsigned long long mask = 1ULL << idxNo;
+bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
+ StringData indexName,
+ bool multikey) {
+ int idxNo = _findIndexNumber(txn, indexName);
+ invariant(idxNo >= 0);
+ return setIndexIsMultikey(txn, idxNo, multikey);
+}
- if (multikey) {
- // Shortcut if the bit is already set correctly
- if (_details->multiKeyIndexBits & mask) {
- return false;
- }
+bool NamespaceDetailsCollectionCatalogEntry::setIndexIsMultikey(OperationContext* txn,
+ int idxNo,
+ bool multikey) {
+ unsigned long long mask = 1ULL << idxNo;
- *txn->recoveryUnit()->writing(&_details->multiKeyIndexBits) |= mask;
+ if (multikey) {
+ // Shortcut if the bit is already set correctly
+ if (_details->multiKeyIndexBits & mask) {
+ return false;
}
- else {
- // Shortcut if the bit is already set correctly
- if (!(_details->multiKeyIndexBits & mask)) {
- return false;
- }
-
- // Invert mask: all 1's except a 0 at the ith bit
- mask = ~mask;
- *txn->recoveryUnit()->writing(&_details->multiKeyIndexBits) &= mask;
+
+ *txn->recoveryUnit()->writing(&_details->multiKeyIndexBits) |= mask;
+ } else {
+ // Shortcut if the bit is already set correctly
+ if (!(_details->multiKeyIndexBits & mask)) {
+ return false;
}
- return true;
+ // Invert mask: all 1's except a 0 at the ith bit
+ mask = ~mask;
+ *txn->recoveryUnit()->writing(&_details->multiKeyIndexBits) &= mask;
}
- RecordId NamespaceDetailsCollectionCatalogEntry::getIndexHead(OperationContext* txn,
- StringData idxName) const {
- int idxNo = _findIndexNumber( txn, idxName );
- invariant( idxNo >= 0 );
- return _details->idx( idxNo ).head.toRecordId();
- }
+ return true;
+}
- BSONObj NamespaceDetailsCollectionCatalogEntry::getIndexSpec( OperationContext* txn,
- StringData idxName ) const {
- int idxNo = _findIndexNumber( txn, idxName );
- invariant( idxNo >= 0 );
- const IndexDetails& id = _details->idx( idxNo );
- return _indexRecordStore->dataFor( txn, id.info.toRecordId() ).toBson();
- }
+RecordId NamespaceDetailsCollectionCatalogEntry::getIndexHead(OperationContext* txn,
+ StringData idxName) const {
+ int idxNo = _findIndexNumber(txn, idxName);
+ invariant(idxNo >= 0);
+ return _details->idx(idxNo).head.toRecordId();
+}
- void NamespaceDetailsCollectionCatalogEntry::setIndexHead( OperationContext* txn,
- StringData idxName,
- const RecordId& newHead ) {
- int idxNo = _findIndexNumber( txn, idxName );
- invariant( idxNo >= 0 );
- *txn->recoveryUnit()->writing(&_details->idx(idxNo).head) = DiskLoc::fromRecordId(newHead);
- }
+BSONObj NamespaceDetailsCollectionCatalogEntry::getIndexSpec(OperationContext* txn,
+ StringData idxName) const {
+ int idxNo = _findIndexNumber(txn, idxName);
+ invariant(idxNo >= 0);
+ const IndexDetails& id = _details->idx(idxNo);
+ return _indexRecordStore->dataFor(txn, id.info.toRecordId()).toBson();
+}
- bool NamespaceDetailsCollectionCatalogEntry::isIndexReady( OperationContext* txn,
- StringData idxName ) const {
- int idxNo = _findIndexNumber( txn, idxName );
- invariant( idxNo >= 0 );
- return idxNo < getCompletedIndexCount( txn );
- }
+void NamespaceDetailsCollectionCatalogEntry::setIndexHead(OperationContext* txn,
+ StringData idxName,
+ const RecordId& newHead) {
+ int idxNo = _findIndexNumber(txn, idxName);
+ invariant(idxNo >= 0);
+ *txn->recoveryUnit()->writing(&_details->idx(idxNo).head) = DiskLoc::fromRecordId(newHead);
+}
- int NamespaceDetailsCollectionCatalogEntry::_findIndexNumber( OperationContext* txn,
- StringData idxName ) const {
- NamespaceDetails::IndexIterator i = _details->ii( true );
- while ( i.more() ) {
- const IndexDetails& id = i.next();
- int idxNo = i.pos() - 1;
- const BSONObj obj = _indexRecordStore->dataFor( txn, id.info.toRecordId() ).toBson();
- if ( idxName == obj.getStringField("name") )
- return idxNo;
- }
- return -1;
+bool NamespaceDetailsCollectionCatalogEntry::isIndexReady(OperationContext* txn,
+ StringData idxName) const {
+ int idxNo = _findIndexNumber(txn, idxName);
+ invariant(idxNo >= 0);
+ return idxNo < getCompletedIndexCount(txn);
+}
+
+int NamespaceDetailsCollectionCatalogEntry::_findIndexNumber(OperationContext* txn,
+ StringData idxName) const {
+ NamespaceDetails::IndexIterator i = _details->ii(true);
+ while (i.more()) {
+ const IndexDetails& id = i.next();
+ int idxNo = i.pos() - 1;
+ const BSONObj obj = _indexRecordStore->dataFor(txn, id.info.toRecordId()).toBson();
+ if (idxName == obj.getStringField("name"))
+ return idxNo;
}
+ return -1;
+}
+
+/* remove bit from a bit array - actually remove its slot, not a clear
+ note: this function does not work with x == 63 -- that is ok
+ but keep in mind in the future if max indexes were extended to
+ exactly 64 it would be a problem
+*/
+unsigned long long removeAndSlideBit(unsigned long long b, int x) {
+ unsigned long long tmp = b;
+ return (tmp & ((((unsigned long long)1) << x) - 1)) | ((tmp >> (x + 1)) << x);
+}
- /* remove bit from a bit array - actually remove its slot, not a clear
- note: this function does not work with x == 63 -- that is ok
- but keep in mind in the future if max indexes were extended to
- exactly 64 it would be a problem
- */
- unsigned long long removeAndSlideBit(unsigned long long b, int x) {
- unsigned long long tmp = b;
- return
- (tmp & ((((unsigned long long) 1) << x)-1)) |
- ((tmp >> (x+1)) << x);
+class IndexUpdateTest : public StartupTest {
+public:
+ void run() {
+ verify(removeAndSlideBit(1, 0) == 0);
+ verify(removeAndSlideBit(2, 0) == 1);
+ verify(removeAndSlideBit(2, 1) == 0);
+ verify(removeAndSlideBit(255, 1) == 127);
+ verify(removeAndSlideBit(21, 2) == 9);
+ verify(removeAndSlideBit(0x4000000000000001ULL, 62) == 1);
}
+} iu_unittest;
- class IndexUpdateTest : public StartupTest {
- public:
- void run() {
- verify( removeAndSlideBit(1, 0) == 0 );
- verify( removeAndSlideBit(2, 0) == 1 );
- verify( removeAndSlideBit(2, 1) == 0 );
- verify( removeAndSlideBit(255, 1) == 127 );
- verify( removeAndSlideBit(21, 2) == 9 );
- verify( removeAndSlideBit(0x4000000000000001ULL, 62) == 1 );
- }
- } iu_unittest;
+Status NamespaceDetailsCollectionCatalogEntry::removeIndex(OperationContext* txn,
+ StringData indexName) {
+ int idxNo = _findIndexNumber(txn, indexName);
+ if (idxNo < 0)
+ return Status(ErrorCodes::NamespaceNotFound, "index not found to remove");
- Status NamespaceDetailsCollectionCatalogEntry::removeIndex( OperationContext* txn,
- StringData indexName ) {
- int idxNo = _findIndexNumber( txn, indexName );
- if ( idxNo < 0 )
- return Status( ErrorCodes::NamespaceNotFound, "index not found to remove" );
+ RecordId infoLocation = _details->idx(idxNo).info.toRecordId();
- RecordId infoLocation = _details->idx( idxNo ).info.toRecordId();
+ { // sanity check
+ BSONObj info = _indexRecordStore->dataFor(txn, infoLocation).toBson();
+ invariant(info["name"].String() == indexName);
+ }
- { // sanity check
- BSONObj info = _indexRecordStore->dataFor( txn, infoLocation ).toBson();
- invariant( info["name"].String() == indexName );
+ { // drop the namespace
+ string indexNamespace = IndexDescriptor::makeIndexNamespace(ns().ns(), indexName);
+ Status status = _db->dropCollection(txn, indexNamespace);
+ if (!status.isOK()) {
+ return status;
}
+ }
- { // drop the namespace
- string indexNamespace = IndexDescriptor::makeIndexNamespace( ns().ns(), indexName );
- Status status = _db->dropCollection( txn, indexNamespace );
- if ( !status.isOK() ) {
- return status;
- }
- }
+ { // all info in the .ns file
+ NamespaceDetails* d = _details->writingWithExtra(txn);
- { // all info in the .ns file
- NamespaceDetails* d = _details->writingWithExtra( txn );
+ // fix the _multiKeyIndexBits, by moving all bits above me down one
+ d->multiKeyIndexBits = removeAndSlideBit(d->multiKeyIndexBits, idxNo);
- // fix the _multiKeyIndexBits, by moving all bits above me down one
- d->multiKeyIndexBits = removeAndSlideBit(d->multiKeyIndexBits, idxNo);
+ if (idxNo >= d->nIndexes)
+ d->indexBuildsInProgress--;
+ else
+ d->nIndexes--;
- if ( idxNo >= d->nIndexes )
- d->indexBuildsInProgress--;
- else
- d->nIndexes--;
+ for (int i = idxNo; i < getTotalIndexCount(txn); i++)
+ d->idx(i) = d->idx(i + 1);
- for ( int i = idxNo; i < getTotalIndexCount( txn ); i++ )
- d->idx(i) = d->idx(i+1);
+ d->idx(getTotalIndexCount(txn)) = IndexDetails();
+ }
- d->idx( getTotalIndexCount( txn ) ) = IndexDetails();
- }
+ // remove from system.indexes
+ _indexRecordStore->deleteRecord(txn, infoLocation);
- // remove from system.indexes
- _indexRecordStore->deleteRecord( txn, infoLocation );
+ return Status::OK();
+}
- return Status::OK();
+Status NamespaceDetailsCollectionCatalogEntry::prepareForIndexBuild(OperationContext* txn,
+ const IndexDescriptor* desc) {
+ BSONObj spec = desc->infoObj();
+ // 1) entry in system.indexs
+ StatusWith<RecordId> systemIndexesEntry =
+ _indexRecordStore->insertRecord(txn, spec.objdata(), spec.objsize(), false);
+ if (!systemIndexesEntry.isOK())
+ return systemIndexesEntry.getStatus();
+
+ // 2) NamespaceDetails mods
+ IndexDetails* id;
+ try {
+ id = &_details->idx(getTotalIndexCount(txn), true);
+ } catch (DBException&) {
+ _details->allocExtra(txn, ns().ns(), _db->_namespaceIndex, getTotalIndexCount(txn));
+ id = &_details->idx(getTotalIndexCount(txn), false);
}
- Status NamespaceDetailsCollectionCatalogEntry::prepareForIndexBuild( OperationContext* txn,
- const IndexDescriptor* desc ) {
- BSONObj spec = desc->infoObj();
- // 1) entry in system.indexs
- StatusWith<RecordId> systemIndexesEntry = _indexRecordStore->insertRecord( txn,
- spec.objdata(),
- spec.objsize(),
- false );
- if ( !systemIndexesEntry.isOK() )
- return systemIndexesEntry.getStatus();
-
- // 2) NamespaceDetails mods
- IndexDetails *id;
- try {
- id = &_details->idx(getTotalIndexCount( txn ), true);
- }
- catch( DBException& ) {
- _details->allocExtra(txn,
- ns().ns(),
- _db->_namespaceIndex,
- getTotalIndexCount( txn ));
- id = &_details->idx(getTotalIndexCount( txn ), false);
- }
-
- const DiskLoc infoLoc = DiskLoc::fromRecordId(systemIndexesEntry.getValue());
- *txn->recoveryUnit()->writing( &id->info ) = infoLoc;
- *txn->recoveryUnit()->writing( &id->head ) = DiskLoc();
+ const DiskLoc infoLoc = DiskLoc::fromRecordId(systemIndexesEntry.getValue());
+ *txn->recoveryUnit()->writing(&id->info) = infoLoc;
+ *txn->recoveryUnit()->writing(&id->head) = DiskLoc();
- txn->recoveryUnit()->writingInt( _details->indexBuildsInProgress ) += 1;
+ txn->recoveryUnit()->writingInt(_details->indexBuildsInProgress) += 1;
- // 3) indexes entry in .ns file and system.namespaces
- _db->createNamespaceForIndex(txn, desc->indexNamespace());
+ // 3) indexes entry in .ns file and system.namespaces
+ _db->createNamespaceForIndex(txn, desc->indexNamespace());
- return Status::OK();
- }
+ return Status::OK();
+}
- void NamespaceDetailsCollectionCatalogEntry::indexBuildSuccess( OperationContext* txn,
- StringData indexName ) {
- int idxNo = _findIndexNumber( txn, indexName );
- fassert( 17202, idxNo >= 0 );
+void NamespaceDetailsCollectionCatalogEntry::indexBuildSuccess(OperationContext* txn,
+ StringData indexName) {
+ int idxNo = _findIndexNumber(txn, indexName);
+ fassert(17202, idxNo >= 0);
- // Make sure the newly created index is relocated to nIndexes, if it isn't already there
- if ( idxNo != getCompletedIndexCount( txn ) ) {
- int toIdxNo = getCompletedIndexCount( txn );
+ // Make sure the newly created index is relocated to nIndexes, if it isn't already there
+ if (idxNo != getCompletedIndexCount(txn)) {
+ int toIdxNo = getCompletedIndexCount(txn);
- //_details->swapIndex( txn, idxNo, toIdxNo );
+ //_details->swapIndex( txn, idxNo, toIdxNo );
- // flip main meta data
- IndexDetails temp = _details->idx(idxNo);
- *txn->recoveryUnit()->writing(&_details->idx(idxNo)) = _details->idx(toIdxNo);
- *txn->recoveryUnit()->writing(&_details->idx(toIdxNo)) = temp;
+ // flip main meta data
+ IndexDetails temp = _details->idx(idxNo);
+ *txn->recoveryUnit()->writing(&_details->idx(idxNo)) = _details->idx(toIdxNo);
+ *txn->recoveryUnit()->writing(&_details->idx(toIdxNo)) = temp;
- // flip multi key bits
- bool tempMultikey = isIndexMultikey(idxNo);
- setIndexIsMultikey( txn, idxNo, isIndexMultikey(toIdxNo) );
- setIndexIsMultikey( txn, toIdxNo, tempMultikey );
+ // flip multi key bits
+ bool tempMultikey = isIndexMultikey(idxNo);
+ setIndexIsMultikey(txn, idxNo, isIndexMultikey(toIdxNo));
+ setIndexIsMultikey(txn, toIdxNo, tempMultikey);
- idxNo = toIdxNo;
- invariant( (idxNo = _findIndexNumber( txn, indexName )) );
- }
+ idxNo = toIdxNo;
+ invariant((idxNo = _findIndexNumber(txn, indexName)));
+ }
- txn->recoveryUnit()->writingInt( _details->indexBuildsInProgress ) -= 1;
- txn->recoveryUnit()->writingInt( _details->nIndexes ) += 1;
+ txn->recoveryUnit()->writingInt(_details->indexBuildsInProgress) -= 1;
+ txn->recoveryUnit()->writingInt(_details->nIndexes) += 1;
- invariant( isIndexReady( txn, indexName ) );
- }
+ invariant(isIndexReady(txn, indexName));
+}
- void NamespaceDetailsCollectionCatalogEntry::updateTTLSetting( OperationContext* txn,
- StringData idxName,
- long long newExpireSeconds ) {
- int idx = _findIndexNumber( txn, idxName );
- invariant( idx >= 0 );
+void NamespaceDetailsCollectionCatalogEntry::updateTTLSetting(OperationContext* txn,
+ StringData idxName,
+ long long newExpireSeconds) {
+ int idx = _findIndexNumber(txn, idxName);
+ invariant(idx >= 0);
- IndexDetails& indexDetails = _details->idx( idx );
+ IndexDetails& indexDetails = _details->idx(idx);
- BSONObj obj = _indexRecordStore->dataFor( txn, indexDetails.info.toRecordId() ).toBson();
- const BSONElement oldExpireSecs = obj.getField("expireAfterSeconds");
+ BSONObj obj = _indexRecordStore->dataFor(txn, indexDetails.info.toRecordId()).toBson();
+ const BSONElement oldExpireSecs = obj.getField("expireAfterSeconds");
- // Important that we set the new value in-place. We are writing directly to the
- // object here so must be careful not to overwrite with a longer numeric type.
+ // Important that we set the new value in-place. We are writing directly to the
+ // object here so must be careful not to overwrite with a longer numeric type.
- char* nonConstPtr = const_cast<char*>(oldExpireSecs.value());
- switch( oldExpireSecs.type() ) {
+ char* nonConstPtr = const_cast<char*>(oldExpireSecs.value());
+ switch (oldExpireSecs.type()) {
case EOO:
- massert( 16631, "index does not have an 'expireAfterSeconds' field", false );
+ massert(16631, "index does not have an 'expireAfterSeconds' field", false);
break;
case NumberInt:
*txn->recoveryUnit()->writing(reinterpret_cast<int*>(nonConstPtr)) = newExpireSeconds;
break;
case NumberDouble:
- *txn->recoveryUnit()->writing(reinterpret_cast<double*>(nonConstPtr)) = newExpireSeconds;
+ *txn->recoveryUnit()->writing(reinterpret_cast<double*>(nonConstPtr)) =
+ newExpireSeconds;
break;
case NumberLong:
- *txn->recoveryUnit()->writing(reinterpret_cast<long long*>(nonConstPtr)) = newExpireSeconds;
+ *txn->recoveryUnit()->writing(reinterpret_cast<long long*>(nonConstPtr)) =
+ newExpireSeconds;
break;
default:
- massert( 16632, "current 'expireAfterSeconds' is not a number", false );
- }
+ massert(16632, "current 'expireAfterSeconds' is not a number", false);
}
+}
namespace {
- void updateSystemNamespaces(OperationContext* txn, RecordStore* namespaces,
- const NamespaceString& ns, const BSONObj& update) {
-
- if (!namespaces)
- return;
-
- auto cursor = namespaces->getCursor(txn);
- while (auto record = cursor->next()) {
- BSONObj oldEntry = record->data.releaseToBson();
- BSONElement e = oldEntry["name"];
- if (e.type() != String)
- continue;
-
- if (e.String() != ns.ns())
- continue;
-
- const BSONObj newEntry = applyUpdateOperators(oldEntry, update);
- StatusWith<RecordId> result = namespaces->updateRecord(txn, record->id,
- newEntry.objdata(),
- newEntry.objsize(),
- false, NULL);
- fassert(17486, result.getStatus());
- return;
- }
- fassertFailed(17488);
+void updateSystemNamespaces(OperationContext* txn,
+ RecordStore* namespaces,
+ const NamespaceString& ns,
+ const BSONObj& update) {
+ if (!namespaces)
+ return;
+
+ auto cursor = namespaces->getCursor(txn);
+ while (auto record = cursor->next()) {
+ BSONObj oldEntry = record->data.releaseToBson();
+ BSONElement e = oldEntry["name"];
+ if (e.type() != String)
+ continue;
+
+ if (e.String() != ns.ns())
+ continue;
+
+ const BSONObj newEntry = applyUpdateOperators(oldEntry, update);
+ StatusWith<RecordId> result = namespaces->updateRecord(
+ txn, record->id, newEntry.objdata(), newEntry.objsize(), false, NULL);
+ fassert(17486, result.getStatus());
+ return;
}
+ fassertFailed(17488);
+}
}
- void NamespaceDetailsCollectionCatalogEntry::updateFlags(OperationContext* txn, int newValue) {
- NamespaceDetailsRSV1MetaData md(ns().ns(), _details);
- md.replaceUserFlags(txn, newValue);
- updateSystemNamespaces(txn, _namespacesRecordStore, ns(),
- BSON("$set" << BSON("options.flags" << newValue)));
- }
+void NamespaceDetailsCollectionCatalogEntry::updateFlags(OperationContext* txn, int newValue) {
+ NamespaceDetailsRSV1MetaData md(ns().ns(), _details);
+ md.replaceUserFlags(txn, newValue);
+ updateSystemNamespaces(
+ txn, _namespacesRecordStore, ns(), BSON("$set" << BSON("options.flags" << newValue)));
+}
- void NamespaceDetailsCollectionCatalogEntry::updateValidator(OperationContext* txn,
- const BSONObj& validator) {
- updateSystemNamespaces(txn, _namespacesRecordStore, ns(),
- BSON("$set" << BSON("options.validator" << validator)));
- }
+void NamespaceDetailsCollectionCatalogEntry::updateValidator(OperationContext* txn,
+ const BSONObj& validator) {
+ updateSystemNamespaces(
+ txn, _namespacesRecordStore, ns(), BSON("$set" << BSON("options.validator" << validator)));
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h
index 9080c24c776..2d6751345d6 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h
@@ -37,84 +37,73 @@
namespace mongo {
- class NamespaceDetails;
+class NamespaceDetails;
- class MMAPV1DatabaseCatalogEntry;;
- class RecordStore;
- class OperationContext;
+class MMAPV1DatabaseCatalogEntry;
+;
+class RecordStore;
+class OperationContext;
- class NamespaceDetailsCollectionCatalogEntry : public CollectionCatalogEntry {
- public:
- NamespaceDetailsCollectionCatalogEntry( StringData ns,
- NamespaceDetails* details,
- RecordStore* namespacesRecordStore,
- RecordStore* indexRecordStore,
- MMAPV1DatabaseCatalogEntry* db );
+class NamespaceDetailsCollectionCatalogEntry : public CollectionCatalogEntry {
+public:
+ NamespaceDetailsCollectionCatalogEntry(StringData ns,
+ NamespaceDetails* details,
+ RecordStore* namespacesRecordStore,
+ RecordStore* indexRecordStore,
+ MMAPV1DatabaseCatalogEntry* db);
- ~NamespaceDetailsCollectionCatalogEntry(){}
+ ~NamespaceDetailsCollectionCatalogEntry() {}
- CollectionOptions getCollectionOptions(OperationContext* txn) const final;
+ CollectionOptions getCollectionOptions(OperationContext* txn) const final;
- int getTotalIndexCount(OperationContext* txn) const final;
+ int getTotalIndexCount(OperationContext* txn) const final;
- int getCompletedIndexCount(OperationContext* txn) const final;
+ int getCompletedIndexCount(OperationContext* txn) const final;
- int getMaxAllowedIndexes() const final;
+ int getMaxAllowedIndexes() const final;
- void getAllIndexes( OperationContext* txn,
- std::vector<std::string>* names ) const final;
+ void getAllIndexes(OperationContext* txn, std::vector<std::string>* names) const final;
- BSONObj getIndexSpec( OperationContext* txn,
- StringData idxName ) const final;
+ BSONObj getIndexSpec(OperationContext* txn, StringData idxName) const final;
- bool isIndexMultikey(OperationContext* txn,
- StringData indexName) const final;
- bool isIndexMultikey(int idxNo) const;
+ bool isIndexMultikey(OperationContext* txn, StringData indexName) const final;
+ bool isIndexMultikey(int idxNo) const;
- bool setIndexIsMultikey(OperationContext* txn,
- int idxNo,
- bool multikey = true);
- bool setIndexIsMultikey(OperationContext* txn,
- StringData indexName,
- bool multikey = true) final;
+ bool setIndexIsMultikey(OperationContext* txn, int idxNo, bool multikey = true);
+ bool setIndexIsMultikey(OperationContext* txn,
+ StringData indexName,
+ bool multikey = true) final;
- RecordId getIndexHead( OperationContext* txn,
- StringData indexName ) const final;
+ RecordId getIndexHead(OperationContext* txn, StringData indexName) const final;
- void setIndexHead( OperationContext* txn,
- StringData indexName,
- const RecordId& newHead ) final;
+ void setIndexHead(OperationContext* txn, StringData indexName, const RecordId& newHead) final;
- bool isIndexReady( OperationContext* txn,
- StringData indexName ) const final;
+ bool isIndexReady(OperationContext* txn, StringData indexName) const final;
- Status removeIndex( OperationContext* txn,
- StringData indexName ) final;
+ Status removeIndex(OperationContext* txn, StringData indexName) final;
- Status prepareForIndexBuild( OperationContext* txn,
- const IndexDescriptor* spec ) final;
+ Status prepareForIndexBuild(OperationContext* txn, const IndexDescriptor* spec) final;
- void indexBuildSuccess( OperationContext* txn,
- StringData indexName ) final;
+ void indexBuildSuccess(OperationContext* txn, StringData indexName) final;
- void updateTTLSetting( OperationContext* txn,
- StringData idxName,
- long long newExpireSeconds ) final;
+ void updateTTLSetting(OperationContext* txn,
+ StringData idxName,
+ long long newExpireSeconds) final;
- void updateFlags(OperationContext* txn, int newValue) final;
+ void updateFlags(OperationContext* txn, int newValue) final;
- void updateValidator(OperationContext* txn, const BSONObj& validator) final;
+ void updateValidator(OperationContext* txn, const BSONObj& validator) final;
- // not part of interface, but available to my storage engine
+ // not part of interface, but available to my storage engine
- int _findIndexNumber( OperationContext* txn, StringData indexName) const;
+ int _findIndexNumber(OperationContext* txn, StringData indexName) const;
- private:
- NamespaceDetails* _details;
- RecordStore* _namespacesRecordStore;
- RecordStore* _indexRecordStore;
- MMAPV1DatabaseCatalogEntry* _db;
+private:
+ NamespaceDetails* _details;
+ RecordStore* _namespacesRecordStore;
+ RecordStore* _indexRecordStore;
+ MMAPV1DatabaseCatalogEntry* _db;
- friend class MMAPV1DatabaseCatalogEntry;
- };
+ friend class MMAPV1DatabaseCatalogEntry;
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
index 5c95ec2bbc7..51fc1c1ed75 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.cpp
@@ -35,169 +35,165 @@
namespace mongo {
- using std::unique_ptr;
- using std::numeric_limits;
+using std::unique_ptr;
+using std::numeric_limits;
- BOOST_STATIC_ASSERT(RecordStoreV1Base::Buckets
- == NamespaceDetails::SmallBuckets + NamespaceDetails::LargeBuckets);
+BOOST_STATIC_ASSERT(RecordStoreV1Base::Buckets ==
+ NamespaceDetails::SmallBuckets + NamespaceDetails::LargeBuckets);
- NamespaceDetailsRSV1MetaData::NamespaceDetailsRSV1MetaData( StringData ns,
- NamespaceDetails* details )
- : _ns( ns.toString() ),
- _details( details ) {
- }
-
- const DiskLoc& NamespaceDetailsRSV1MetaData::capExtent() const {
- return _details->capExtent;
- }
+NamespaceDetailsRSV1MetaData::NamespaceDetailsRSV1MetaData(StringData ns, NamespaceDetails* details)
+ : _ns(ns.toString()), _details(details) {}
- void NamespaceDetailsRSV1MetaData::setCapExtent( OperationContext* txn, const DiskLoc& loc ) {
- *txn->recoveryUnit()->writing( &_details->capExtent ) = loc;
- }
+const DiskLoc& NamespaceDetailsRSV1MetaData::capExtent() const {
+ return _details->capExtent;
+}
- const DiskLoc& NamespaceDetailsRSV1MetaData::capFirstNewRecord() const {
- return _details->capFirstNewRecord;
- }
+void NamespaceDetailsRSV1MetaData::setCapExtent(OperationContext* txn, const DiskLoc& loc) {
+ *txn->recoveryUnit()->writing(&_details->capExtent) = loc;
+}
- void NamespaceDetailsRSV1MetaData::setCapFirstNewRecord( OperationContext* txn,
- const DiskLoc& loc ) {
- *txn->recoveryUnit()->writing( &_details->capFirstNewRecord ) = loc;
- }
+const DiskLoc& NamespaceDetailsRSV1MetaData::capFirstNewRecord() const {
+ return _details->capFirstNewRecord;
+}
- bool NamespaceDetailsRSV1MetaData::capLooped() const {
- return _details->capFirstNewRecord.isValid();
- }
+void NamespaceDetailsRSV1MetaData::setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc) {
+ *txn->recoveryUnit()->writing(&_details->capFirstNewRecord) = loc;
+}
- long long NamespaceDetailsRSV1MetaData::dataSize() const {
- return _details->stats.datasize;
- }
- long long NamespaceDetailsRSV1MetaData::numRecords() const {
- return _details->stats.nrecords;
- }
+bool NamespaceDetailsRSV1MetaData::capLooped() const {
+ return _details->capFirstNewRecord.isValid();
+}
- void NamespaceDetailsRSV1MetaData::incrementStats( OperationContext* txn,
- long long dataSizeIncrement,
- long long numRecordsIncrement ) {
- // durability todo : this could be a bit annoying / slow to record constantly
- NamespaceDetails::Stats* s = txn->recoveryUnit()->writing( &_details->stats );
- s->datasize += dataSizeIncrement;
- s->nrecords += numRecordsIncrement;
- }
+long long NamespaceDetailsRSV1MetaData::dataSize() const {
+ return _details->stats.datasize;
+}
+long long NamespaceDetailsRSV1MetaData::numRecords() const {
+ return _details->stats.nrecords;
+}
- void NamespaceDetailsRSV1MetaData::setStats( OperationContext* txn,
- long long dataSize,
- long long numRecords ) {
- NamespaceDetails::Stats* s = txn->recoveryUnit()->writing( &_details->stats );
- s->datasize = dataSize;
- s->nrecords = numRecords;
- }
+void NamespaceDetailsRSV1MetaData::incrementStats(OperationContext* txn,
+ long long dataSizeIncrement,
+ long long numRecordsIncrement) {
+ // durability todo : this could be a bit annoying / slow to record constantly
+ NamespaceDetails::Stats* s = txn->recoveryUnit()->writing(&_details->stats);
+ s->datasize += dataSizeIncrement;
+ s->nrecords += numRecordsIncrement;
+}
- DiskLoc NamespaceDetailsRSV1MetaData::deletedListEntry( int bucket ) const {
- invariant(bucket >= 0 && bucket < RecordStoreV1Base::Buckets);
- const DiskLoc head = (bucket < NamespaceDetails::SmallBuckets)
- ? _details->deletedListSmall[bucket]
- : _details->deletedListLarge[bucket - NamespaceDetails::SmallBuckets];
+void NamespaceDetailsRSV1MetaData::setStats(OperationContext* txn,
+ long long dataSize,
+ long long numRecords) {
+ NamespaceDetails::Stats* s = txn->recoveryUnit()->writing(&_details->stats);
+ s->datasize = dataSize;
+ s->nrecords = numRecords;
+}
- if (head == DiskLoc(0,0)) {
- // This will happen the first time we use a "large" bucket since they were previously
- // zero-initialized.
- return DiskLoc();
- }
+DiskLoc NamespaceDetailsRSV1MetaData::deletedListEntry(int bucket) const {
+ invariant(bucket >= 0 && bucket < RecordStoreV1Base::Buckets);
+ const DiskLoc head = (bucket < NamespaceDetails::SmallBuckets)
+ ? _details->deletedListSmall[bucket]
+ : _details->deletedListLarge[bucket - NamespaceDetails::SmallBuckets];
- return head;
+ if (head == DiskLoc(0, 0)) {
+ // This will happen the first time we use a "large" bucket since they were previously
+ // zero-initialized.
+ return DiskLoc();
}
- void NamespaceDetailsRSV1MetaData::setDeletedListEntry( OperationContext* txn,
- int bucket,
- const DiskLoc& loc ) {
- DiskLoc* head = (bucket < NamespaceDetails::SmallBuckets)
- ? &_details->deletedListSmall[bucket]
- : &_details->deletedListLarge[bucket - NamespaceDetails::SmallBuckets];
- *txn->recoveryUnit()->writing( head ) = loc;
- }
+ return head;
+}
- DiskLoc NamespaceDetailsRSV1MetaData::deletedListLegacyGrabBag() const {
- return _details->deletedListLegacyGrabBag;
- }
+void NamespaceDetailsRSV1MetaData::setDeletedListEntry(OperationContext* txn,
+ int bucket,
+ const DiskLoc& loc) {
+ DiskLoc* head = (bucket < NamespaceDetails::SmallBuckets)
+ ? &_details->deletedListSmall[bucket]
+ : &_details->deletedListLarge[bucket - NamespaceDetails::SmallBuckets];
+ *txn->recoveryUnit()->writing(head) = loc;
+}
- void NamespaceDetailsRSV1MetaData::setDeletedListLegacyGrabBag(OperationContext* txn,
- const DiskLoc& loc) {
- *txn->recoveryUnit()->writing(&_details->deletedListLegacyGrabBag) = loc;
- }
+DiskLoc NamespaceDetailsRSV1MetaData::deletedListLegacyGrabBag() const {
+ return _details->deletedListLegacyGrabBag;
+}
- void NamespaceDetailsRSV1MetaData::orphanDeletedList( OperationContext* txn ) {
- for( int i = 0; i < RecordStoreV1Base::Buckets; i++ ) {
- setDeletedListEntry( txn, i, DiskLoc() );
- }
- setDeletedListLegacyGrabBag(txn, DiskLoc());
- }
+void NamespaceDetailsRSV1MetaData::setDeletedListLegacyGrabBag(OperationContext* txn,
+ const DiskLoc& loc) {
+ *txn->recoveryUnit()->writing(&_details->deletedListLegacyGrabBag) = loc;
+}
- const DiskLoc& NamespaceDetailsRSV1MetaData::firstExtent( OperationContext* txn ) const {
- return _details->firstExtent;
+void NamespaceDetailsRSV1MetaData::orphanDeletedList(OperationContext* txn) {
+ for (int i = 0; i < RecordStoreV1Base::Buckets; i++) {
+ setDeletedListEntry(txn, i, DiskLoc());
}
+ setDeletedListLegacyGrabBag(txn, DiskLoc());
+}
- void NamespaceDetailsRSV1MetaData::setFirstExtent( OperationContext* txn, const DiskLoc& loc ) {
- *txn->recoveryUnit()->writing( &_details->firstExtent ) = loc;
- }
+const DiskLoc& NamespaceDetailsRSV1MetaData::firstExtent(OperationContext* txn) const {
+ return _details->firstExtent;
+}
- const DiskLoc& NamespaceDetailsRSV1MetaData::lastExtent( OperationContext* txn ) const {
- return _details->lastExtent;
- }
+void NamespaceDetailsRSV1MetaData::setFirstExtent(OperationContext* txn, const DiskLoc& loc) {
+ *txn->recoveryUnit()->writing(&_details->firstExtent) = loc;
+}
- void NamespaceDetailsRSV1MetaData::setLastExtent( OperationContext* txn, const DiskLoc& loc ) {
- *txn->recoveryUnit()->writing( &_details->lastExtent ) = loc;
- }
+const DiskLoc& NamespaceDetailsRSV1MetaData::lastExtent(OperationContext* txn) const {
+ return _details->lastExtent;
+}
- bool NamespaceDetailsRSV1MetaData::isCapped() const {
- return _details->isCapped;
- }
+void NamespaceDetailsRSV1MetaData::setLastExtent(OperationContext* txn, const DiskLoc& loc) {
+ *txn->recoveryUnit()->writing(&_details->lastExtent) = loc;
+}
- bool NamespaceDetailsRSV1MetaData::isUserFlagSet( int flag ) const {
- return _details->userFlags & flag;
- }
+bool NamespaceDetailsRSV1MetaData::isCapped() const {
+ return _details->isCapped;
+}
- int NamespaceDetailsRSV1MetaData::userFlags() const {
- return _details->userFlags;
- }
+bool NamespaceDetailsRSV1MetaData::isUserFlagSet(int flag) const {
+ return _details->userFlags & flag;
+}
- bool NamespaceDetailsRSV1MetaData::setUserFlag( OperationContext* txn, int flag ) {
- if ( ( _details->userFlags & flag ) == flag )
- return false;
+int NamespaceDetailsRSV1MetaData::userFlags() const {
+ return _details->userFlags;
+}
- txn->recoveryUnit()->writingInt( _details->userFlags) |= flag;
- return true;
- }
+bool NamespaceDetailsRSV1MetaData::setUserFlag(OperationContext* txn, int flag) {
+ if ((_details->userFlags & flag) == flag)
+ return false;
- bool NamespaceDetailsRSV1MetaData::clearUserFlag( OperationContext* txn, int flag ) {
- if ( ( _details->userFlags & flag ) == 0 )
- return false;
+ txn->recoveryUnit()->writingInt(_details->userFlags) |= flag;
+ return true;
+}
- txn->recoveryUnit()->writingInt(_details->userFlags) &= ~flag;
- return true;
- }
+bool NamespaceDetailsRSV1MetaData::clearUserFlag(OperationContext* txn, int flag) {
+ if ((_details->userFlags & flag) == 0)
+ return false;
- bool NamespaceDetailsRSV1MetaData::replaceUserFlags( OperationContext* txn, int flags ) {
- if ( _details->userFlags == flags )
- return false;
+ txn->recoveryUnit()->writingInt(_details->userFlags) &= ~flag;
+ return true;
+}
- txn->recoveryUnit()->writingInt(_details->userFlags) = flags;
- return true;
- }
+bool NamespaceDetailsRSV1MetaData::replaceUserFlags(OperationContext* txn, int flags) {
+ if (_details->userFlags == flags)
+ return false;
- int NamespaceDetailsRSV1MetaData::lastExtentSize( OperationContext* txn ) const {
- return _details->lastExtentSize;
- }
+ txn->recoveryUnit()->writingInt(_details->userFlags) = flags;
+ return true;
+}
- void NamespaceDetailsRSV1MetaData::setLastExtentSize( OperationContext* txn, int newMax ) {
- if ( _details->lastExtentSize == newMax )
- return;
- txn->recoveryUnit()->writingInt(_details->lastExtentSize) = newMax;
- }
+int NamespaceDetailsRSV1MetaData::lastExtentSize(OperationContext* txn) const {
+ return _details->lastExtentSize;
+}
- long long NamespaceDetailsRSV1MetaData::maxCappedDocs() const {
- invariant( _details->isCapped );
- if ( _details->maxDocsInCapped == 0x7fffffff )
- return numeric_limits<long long>::max();
- return _details->maxDocsInCapped;
- }
+void NamespaceDetailsRSV1MetaData::setLastExtentSize(OperationContext* txn, int newMax) {
+ if (_details->lastExtentSize == newMax)
+ return;
+ txn->recoveryUnit()->writingInt(_details->lastExtentSize) = newMax;
+}
+
+long long NamespaceDetailsRSV1MetaData::maxCappedDocs() const {
+ invariant(_details->isCapped);
+ if (_details->maxDocsInCapped == 0x7fffffff)
+ return numeric_limits<long long>::max();
+ return _details->maxDocsInCapped;
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h
index 5bc9c475506..a6fde4807b5 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_rsv1_metadata.h
@@ -38,70 +38,65 @@
namespace mongo {
- class RecordStore;
+class RecordStore;
- /*
- * NOTE: NamespaceDetails will become a struct
- * all dur, etc... will move here
- */
- class NamespaceDetailsRSV1MetaData : public RecordStoreV1MetaData {
- public:
- explicit NamespaceDetailsRSV1MetaData( StringData ns, NamespaceDetails* details);
-
- virtual ~NamespaceDetailsRSV1MetaData(){}
+/*
+ * NOTE: NamespaceDetails will become a struct
+ * all dur, etc... will move here
+ */
+class NamespaceDetailsRSV1MetaData : public RecordStoreV1MetaData {
+public:
+ explicit NamespaceDetailsRSV1MetaData(StringData ns, NamespaceDetails* details);
- virtual const DiskLoc& capExtent() const;
- virtual void setCapExtent( OperationContext* txn, const DiskLoc& loc );
+ virtual ~NamespaceDetailsRSV1MetaData() {}
- virtual const DiskLoc& capFirstNewRecord() const;
- virtual void setCapFirstNewRecord( OperationContext* txn, const DiskLoc& loc );
+ virtual const DiskLoc& capExtent() const;
+ virtual void setCapExtent(OperationContext* txn, const DiskLoc& loc);
- virtual bool capLooped() const;
+ virtual const DiskLoc& capFirstNewRecord() const;
+ virtual void setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc);
- virtual long long dataSize() const;
- virtual long long numRecords() const;
+ virtual bool capLooped() const;
- virtual void incrementStats( OperationContext* txn,
- long long dataSizeIncrement,
- long long numRecordsIncrement );
+ virtual long long dataSize() const;
+ virtual long long numRecords() const;
- virtual void setStats( OperationContext* txn,
- long long dataSize,
- long long numRecords );
+ virtual void incrementStats(OperationContext* txn,
+ long long dataSizeIncrement,
+ long long numRecordsIncrement);
- virtual DiskLoc deletedListEntry( int bucket ) const;
- virtual void setDeletedListEntry( OperationContext* txn,
- int bucket,
- const DiskLoc& loc );
+ virtual void setStats(OperationContext* txn, long long dataSize, long long numRecords);
- virtual DiskLoc deletedListLegacyGrabBag() const;
- virtual void setDeletedListLegacyGrabBag(OperationContext* txn, const DiskLoc& loc);
+ virtual DiskLoc deletedListEntry(int bucket) const;
+ virtual void setDeletedListEntry(OperationContext* txn, int bucket, const DiskLoc& loc);
- virtual void orphanDeletedList(OperationContext* txn);
+ virtual DiskLoc deletedListLegacyGrabBag() const;
+ virtual void setDeletedListLegacyGrabBag(OperationContext* txn, const DiskLoc& loc);
- virtual const DiskLoc& firstExtent( OperationContext* txn ) const;
- virtual void setFirstExtent( OperationContext* txn, const DiskLoc& loc );
+ virtual void orphanDeletedList(OperationContext* txn);
- virtual const DiskLoc& lastExtent( OperationContext* txn ) const;
- virtual void setLastExtent( OperationContext* txn, const DiskLoc& loc );
+ virtual const DiskLoc& firstExtent(OperationContext* txn) const;
+ virtual void setFirstExtent(OperationContext* txn, const DiskLoc& loc);
- virtual bool isCapped() const;
+ virtual const DiskLoc& lastExtent(OperationContext* txn) const;
+ virtual void setLastExtent(OperationContext* txn, const DiskLoc& loc);
- virtual bool isUserFlagSet( int flag ) const;
- virtual int userFlags() const;
- virtual bool setUserFlag( OperationContext* txn, int flag );
- virtual bool clearUserFlag( OperationContext* txn, int flag );
- virtual bool replaceUserFlags( OperationContext* txn, int flags );
+ virtual bool isCapped() const;
- virtual int lastExtentSize( OperationContext* txn ) const;
- virtual void setLastExtentSize( OperationContext* txn, int newMax );
+ virtual bool isUserFlagSet(int flag) const;
+ virtual int userFlags() const;
+ virtual bool setUserFlag(OperationContext* txn, int flag);
+ virtual bool clearUserFlag(OperationContext* txn, int flag);
+ virtual bool replaceUserFlags(OperationContext* txn, int flags);
- virtual long long maxCappedDocs() const;
+ virtual int lastExtentSize(OperationContext* txn) const;
+ virtual void setLastExtentSize(OperationContext* txn, int newMax);
- private:
- std::string _ns;
- NamespaceDetails* _details;
- RecordStore* _namespaceRecordStore;
- };
+ virtual long long maxCappedDocs() const;
+private:
+ std::string _ns;
+ NamespaceDetails* _details;
+ RecordStore* _namespaceRecordStore;
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
index 8f1bb505197..12e90d2db57 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.cpp
@@ -47,211 +47,194 @@
namespace mongo {
- using std::endl;
- using std::list;
- using std::string;
+using std::endl;
+using std::list;
+using std::string;
- NamespaceIndex::NamespaceIndex(const std::string& dir, const std::string& database)
- : _dir(dir),
- _database(database),
- _ht(nullptr) {
+NamespaceIndex::NamespaceIndex(const std::string& dir, const std::string& database)
+ : _dir(dir), _database(database), _ht(nullptr) {}
- }
-
- NamespaceIndex::~NamespaceIndex() {
-
- }
-
- NamespaceDetails* NamespaceIndex::details(StringData ns) const {
- const Namespace n(ns);
- return details(n);
- }
+NamespaceIndex::~NamespaceIndex() {}
- NamespaceDetails* NamespaceIndex::details(const Namespace& ns) const {
- return _ht->get(ns);
- }
-
- void NamespaceIndex::add_ns( OperationContext* txn,
- StringData ns, const DiskLoc& loc, bool capped) {
- NamespaceDetails details( loc, capped );
- add_ns( txn, ns, &details );
- }
+NamespaceDetails* NamespaceIndex::details(StringData ns) const {
+ const Namespace n(ns);
+ return details(n);
+}
- void NamespaceIndex::add_ns( OperationContext* txn,
- StringData ns,
- const NamespaceDetails* details ) {
- Namespace n(ns);
- add_ns( txn, n, details );
- }
+NamespaceDetails* NamespaceIndex::details(const Namespace& ns) const {
+ return _ht->get(ns);
+}
- void NamespaceIndex::add_ns( OperationContext* txn,
- const Namespace& ns,
- const NamespaceDetails* details ) {
- const NamespaceString nss(ns.toString());
- invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
+void NamespaceIndex::add_ns(OperationContext* txn, StringData ns, const DiskLoc& loc, bool capped) {
+ NamespaceDetails details(loc, capped);
+ add_ns(txn, ns, &details);
+}
- massert(17315, "no . in ns", nsIsFull(nss.toString()));
+void NamespaceIndex::add_ns(OperationContext* txn, StringData ns, const NamespaceDetails* details) {
+ Namespace n(ns);
+ add_ns(txn, n, details);
+}
- uassert(10081, "too many namespaces/collections", _ht->put(txn, ns, *details));
- }
+void NamespaceIndex::add_ns(OperationContext* txn,
+ const Namespace& ns,
+ const NamespaceDetails* details) {
+ const NamespaceString nss(ns.toString());
+ invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
- void NamespaceIndex::kill_ns( OperationContext* txn, StringData ns) {
- const NamespaceString nss(ns.toString());
- invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
+ massert(17315, "no . in ns", nsIsFull(nss.toString()));
- const Namespace n(ns);
- _ht->kill(txn, n);
+ uassert(10081, "too many namespaces/collections", _ht->put(txn, ns, *details));
+}
- if (ns.size() <= Namespace::MaxNsColletionLen) {
- // Larger namespace names don't have room for $extras so they can't exist. The code
- // below would cause an "$extra: ns too large" error and stacktrace to be printed to the
- // log even though everything is fine.
- for( int i = 0; i<=1; i++ ) {
- try {
- Namespace extra(n.extraName(i));
- _ht->kill(txn, extra);
- }
- catch(DBException&) {
- LOG(3) << "caught exception in kill_ns" << endl;
- }
+void NamespaceIndex::kill_ns(OperationContext* txn, StringData ns) {
+ const NamespaceString nss(ns.toString());
+ invariant(txn->lockState()->isDbLockedForMode(nss.db(), MODE_X));
+
+ const Namespace n(ns);
+ _ht->kill(txn, n);
+
+ if (ns.size() <= Namespace::MaxNsColletionLen) {
+ // Larger namespace names don't have room for $extras so they can't exist. The code
+ // below would cause an "$extra: ns too large" error and stacktrace to be printed to the
+ // log even though everything is fine.
+ for (int i = 0; i <= 1; i++) {
+ try {
+ Namespace extra(n.extraName(i));
+ _ht->kill(txn, extra);
+ } catch (DBException&) {
+ LOG(3) << "caught exception in kill_ns" << endl;
}
}
}
+}
- bool NamespaceIndex::pathExists() const {
- return boost::filesystem::exists(path());
- }
-
- boost::filesystem::path NamespaceIndex::path() const {
- boost::filesystem::path ret( _dir );
- if (storageGlobalParams.directoryperdb)
- ret /= _database;
- ret /= ( _database + ".ns" );
- return ret;
- }
+bool NamespaceIndex::pathExists() const {
+ return boost::filesystem::exists(path());
+}
- static void namespaceGetNamespacesCallback( const Namespace& k , NamespaceDetails& v , list<string>* l ) {
- if ( ! k.hasDollarSign() || k == "local.oplog.$main" ) {
- // we call out local.oplog.$main specifically as its the only "normal"
- // collection that has a $, so we make sure it gets added
- l->push_back( k.toString() );
- }
- }
+boost::filesystem::path NamespaceIndex::path() const {
+ boost::filesystem::path ret(_dir);
+ if (storageGlobalParams.directoryperdb)
+ ret /= _database;
+ ret /= (_database + ".ns");
+ return ret;
+}
- void NamespaceIndex::getCollectionNamespaces( list<string>* tofill ) const {
- _ht->iterAll(stdx::bind(namespaceGetNamespacesCallback,
- stdx::placeholders::_1,
- stdx::placeholders::_2,
- tofill));
+static void namespaceGetNamespacesCallback(const Namespace& k,
+ NamespaceDetails& v,
+ list<string>* l) {
+ if (!k.hasDollarSign() || k == "local.oplog.$main") {
+ // we call out local.oplog.$main specifically as its the only "normal"
+ // collection that has a $, so we make sure it gets added
+ l->push_back(k.toString());
}
+}
- void NamespaceIndex::maybeMkdir() const {
- if (!storageGlobalParams.directoryperdb)
- return;
- boost::filesystem::path dir( _dir );
- dir /= _database;
- if ( !boost::filesystem::exists( dir ) )
- MONGO_ASSERT_ON_EXCEPTION_WITH_MSG( boost::filesystem::create_directory( dir ), "create dir for db " );
- }
+void NamespaceIndex::getCollectionNamespaces(list<string>* tofill) const {
+ _ht->iterAll(stdx::bind(
+ namespaceGetNamespacesCallback, stdx::placeholders::_1, stdx::placeholders::_2, tofill));
+}
- void NamespaceIndex::init(OperationContext* txn) {
- invariant(!_ht.get());
+void NamespaceIndex::maybeMkdir() const {
+ if (!storageGlobalParams.directoryperdb)
+ return;
+ boost::filesystem::path dir(_dir);
+ dir /= _database;
+ if (!boost::filesystem::exists(dir))
+ MONGO_ASSERT_ON_EXCEPTION_WITH_MSG(boost::filesystem::create_directory(dir),
+ "create dir for db ");
+}
- unsigned long long len = 0;
+void NamespaceIndex::init(OperationContext* txn) {
+ invariant(!_ht.get());
- const boost::filesystem::path nsPath = path();
- const std::string pathString = nsPath.string();
+ unsigned long long len = 0;
- void* p = 0;
+ const boost::filesystem::path nsPath = path();
+ const std::string pathString = nsPath.string();
- if (boost::filesystem::exists(nsPath)) {
- if (_f.open(pathString, true)) {
- len = _f.length();
+ void* p = 0;
- if (len % (1024 * 1024) != 0) {
- StringBuilder sb;
- sb << "Invalid length: " << len
- << " for .ns file: " << pathString << ". Cannot open database";
+ if (boost::filesystem::exists(nsPath)) {
+ if (_f.open(pathString, true)) {
+ len = _f.length();
- log() << sb.str();
- uassert(10079, sb.str(), len % (1024 * 1024) == 0);
- }
+ if (len % (1024 * 1024) != 0) {
+ StringBuilder sb;
+ sb << "Invalid length: " << len << " for .ns file: " << pathString
+ << ". Cannot open database";
- p = _f.getView();
+ log() << sb.str();
+ uassert(10079, sb.str(), len % (1024 * 1024) == 0);
}
+
+ p = _f.getView();
}
- else {
- // use mmapv1GlobalOptions.lenForNewNsFiles, we are making a new database
- massert(10343,
- "bad mmapv1GlobalOptions.lenForNewNsFiles",
- mmapv1GlobalOptions.lenForNewNsFiles >= 1024*1024);
+ } else {
+ // use mmapv1GlobalOptions.lenForNewNsFiles, we are making a new database
+ massert(10343,
+ "bad mmapv1GlobalOptions.lenForNewNsFiles",
+ mmapv1GlobalOptions.lenForNewNsFiles >= 1024 * 1024);
- maybeMkdir();
+ maybeMkdir();
- unsigned long long l = mmapv1GlobalOptions.lenForNewNsFiles;
- log() << "allocating new ns file " << pathString << ", filling with zeroes..." << endl;
+ unsigned long long l = mmapv1GlobalOptions.lenForNewNsFiles;
+ log() << "allocating new ns file " << pathString << ", filling with zeroes..." << endl;
- {
- // Due to SERVER-15369 we need to explicitly write zero-bytes to the NS file.
- const unsigned long long kBlockSize = 1024*1024;
- invariant(l % kBlockSize == 0); // ns files can only be multiples of 1MB
- const std::vector<char> zeros(kBlockSize, 0);
+ {
+ // Due to SERVER-15369 we need to explicitly write zero-bytes to the NS file.
+ const unsigned long long kBlockSize = 1024 * 1024;
+ invariant(l % kBlockSize == 0); // ns files can only be multiples of 1MB
+ const std::vector<char> zeros(kBlockSize, 0);
- File file;
- file.open(pathString.c_str());
+ File file;
+ file.open(pathString.c_str());
- massert(18825,
- str::stream() << "couldn't create file " << pathString,
- file.is_open());
+ massert(18825, str::stream() << "couldn't create file " << pathString, file.is_open());
- for (fileofs ofs = 0; ofs < l && !file.bad(); ofs += kBlockSize) {
- file.write(ofs, &zeros[0], kBlockSize);
- }
+ for (fileofs ofs = 0; ofs < l && !file.bad(); ofs += kBlockSize) {
+ file.write(ofs, &zeros[0], kBlockSize);
+ }
- if (file.bad()) {
- try {
- boost::filesystem::remove(pathString);
- } catch (const std::exception& e) {
- StringBuilder ss;
- ss << "error removing file: " << e.what();
- massert(18909, ss.str(), 0);
- }
- }
- else {
- file.fsync();
+ if (file.bad()) {
+ try {
+ boost::filesystem::remove(pathString);
+ } catch (const std::exception& e) {
+ StringBuilder ss;
+ ss << "error removing file: " << e.what();
+ massert(18909, ss.str(), 0);
}
-
- massert(18826,
- str::stream() << "failure writing file " << pathString,
- !file.bad());
+ } else {
+ file.fsync();
}
- if (_f.create(pathString, l, true)) {
- // The writes done in this function must not be rolled back. This will leave the
- // file empty, but available for future use. That is why we go directly to the
- // global dur dirty list rather than going through the OperationContext.
- getDur().createdFile(pathString, l);
+ massert(18826, str::stream() << "failure writing file " << pathString, !file.bad());
+ }
- // Commit the journal and all changes to disk so that even if exceptions occur
- // during subsequent initialization, we won't have uncommited changes during file
- // close.
- getDur().commitNow(txn);
+ if (_f.create(pathString, l, true)) {
+ // The writes done in this function must not be rolled back. This will leave the
+ // file empty, but available for future use. That is why we go directly to the
+ // global dur dirty list rather than going through the OperationContext.
+ getDur().createdFile(pathString, l);
- len = l;
- invariant(len == mmapv1GlobalOptions.lenForNewNsFiles);
+ // Commit the journal and all changes to disk so that even if exceptions occur
+ // during subsequent initialization, we won't have uncommited changes during file
+ // close.
+ getDur().commitNow(txn);
- p = _f.getView();
- }
- }
+ len = l;
+ invariant(len == mmapv1GlobalOptions.lenForNewNsFiles);
- if (p == 0) {
- severe() << "error couldn't open file " << pathString << " terminating" << endl;
- invariant(false);
+ p = _f.getView();
}
+ }
- invariant(len <= 0x7fffffff);
- _ht.reset(new NamespaceHashTable(p, (int) len, "namespace index"));
+ if (p == 0) {
+ severe() << "error couldn't open file " << pathString << " terminating" << endl;
+ invariant(false);
}
+ invariant(len <= 0x7fffffff);
+ _ht.reset(new NamespaceHashTable(p, (int)len, "namespace index"));
+}
}
-
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h
index 44f429311ba..53d162bc601 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_index.h
@@ -40,53 +40,53 @@
namespace mongo {
- class NamespaceDetails;
- class NamespaceHashTable;
- class OperationContext;
+class NamespaceDetails;
+class NamespaceHashTable;
+class OperationContext;
- /* NamespaceIndex is the ".ns" file you see in the data directory. It is the "system catalog"
- if you will: at least the core parts. (Additional info in system.* collections.)
- */
- class NamespaceIndex {
- MONGO_DISALLOW_COPYING(NamespaceIndex);
- public:
- NamespaceIndex(const std::string& dir, const std::string& database);
- ~NamespaceIndex();
+/* NamespaceIndex is the ".ns" file you see in the data directory. It is the "system catalog"
+ if you will: at least the core parts. (Additional info in system.* collections.)
+*/
+class NamespaceIndex {
+ MONGO_DISALLOW_COPYING(NamespaceIndex);
- /* returns true if the file represented by this file exists on disk */
- bool pathExists() const;
+public:
+ NamespaceIndex(const std::string& dir, const std::string& database);
+ ~NamespaceIndex();
- void init(OperationContext* txn);
+ /* returns true if the file represented by this file exists on disk */
+ bool pathExists() const;
- void add_ns( OperationContext* txn,
- StringData ns, const DiskLoc& loc, bool capped);
- void add_ns( OperationContext* txn,
- StringData ns, const NamespaceDetails* details );
- void add_ns( OperationContext* txn,
- const Namespace& ns, const NamespaceDetails* details );
+ void init(OperationContext* txn);
- NamespaceDetails* details(StringData ns) const;
- NamespaceDetails* details(const Namespace& ns) const;
+ void add_ns(OperationContext* txn, StringData ns, const DiskLoc& loc, bool capped);
+ void add_ns(OperationContext* txn, StringData ns, const NamespaceDetails* details);
+ void add_ns(OperationContext* txn, const Namespace& ns, const NamespaceDetails* details);
- void kill_ns( OperationContext* txn,
- StringData ns);
+ NamespaceDetails* details(StringData ns) const;
+ NamespaceDetails* details(const Namespace& ns) const;
- bool allocated() const { return _ht.get() != 0; }
+ void kill_ns(OperationContext* txn, StringData ns);
- void getCollectionNamespaces( std::list<std::string>* tofill ) const;
+ bool allocated() const {
+ return _ht.get() != 0;
+ }
- boost::filesystem::path path() const;
+ void getCollectionNamespaces(std::list<std::string>* tofill) const;
- unsigned long long fileLength() const { return _f.length(); }
+ boost::filesystem::path path() const;
- private:
- void maybeMkdir() const;
+ unsigned long long fileLength() const {
+ return _f.length();
+ }
- const std::string _dir;
- const std::string _database;
+private:
+ void maybeMkdir() const;
- DurableMappedFile _f;
- std::unique_ptr<NamespaceHashTable> _ht;
- };
+ const std::string _dir;
+ const std::string _database;
+ DurableMappedFile _f;
+ std::unique_ptr<NamespaceHashTable> _ht;
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_test.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_test.cpp
index 6a0edb79ea4..85cd79be43b 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_test.cpp
@@ -34,36 +34,35 @@
namespace mongo {
- using std::string;
+using std::string;
- TEST( NamespaceTest, Basics ) {
- Namespace foo( "foo.bar" );
- Namespace bar( "bar.foo" );
+TEST(NamespaceTest, Basics) {
+ Namespace foo("foo.bar");
+ Namespace bar("bar.foo");
- ASSERT_EQUALS( foo.toString(), foo.toString() );
- ASSERT_EQUALS( foo.hash(), foo.hash() );
+ ASSERT_EQUALS(foo.toString(), foo.toString());
+ ASSERT_EQUALS(foo.hash(), foo.hash());
- ASSERT_NOT_EQUALS( foo.hash(), bar.hash() );
+ ASSERT_NOT_EQUALS(foo.hash(), bar.hash());
- ASSERT( foo == foo );
- ASSERT( !( foo != foo ) );
- ASSERT( foo != bar );
- ASSERT( !( foo == bar ) );
- }
-
- TEST( NamespaceTest, ExtraName ) {
- Namespace foo( "foo.bar" );
- ASSERT_FALSE( foo.isExtra() );
+ ASSERT(foo == foo);
+ ASSERT(!(foo != foo));
+ ASSERT(foo != bar);
+ ASSERT(!(foo == bar));
+}
- string str0 = foo.extraName( 0 );
- ASSERT_EQUALS( "foo.bar$extra", str0 );
- Namespace ex0( str0 );
- ASSERT_TRUE( ex0.isExtra() );
+TEST(NamespaceTest, ExtraName) {
+ Namespace foo("foo.bar");
+ ASSERT_FALSE(foo.isExtra());
- string str1 = foo.extraName( 1 );
- ASSERT_EQUALS( "foo.bar$extrb", str1 );
- Namespace ex1( str1 );
- ASSERT_TRUE( ex1.isExtra() );
+ string str0 = foo.extraName(0);
+ ASSERT_EQUALS("foo.bar$extra", str0);
+ Namespace ex0(str0);
+ ASSERT_TRUE(ex0.isExtra());
- }
+ string str1 = foo.extraName(1);
+ ASSERT_EQUALS("foo.bar$extrb", str1);
+ Namespace ex1(str1);
+ ASSERT_TRUE(ex1.isExtra());
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/compress.cpp b/src/mongo/db/storage/mmap_v1/compress.cpp
index bae8bc5acba..8f8dce527ed 100644
--- a/src/mongo/db/storage/mmap_v1/compress.cpp
+++ b/src/mongo/db/storage/mmap_v1/compress.cpp
@@ -36,24 +36,22 @@
namespace mongo {
- void rawCompress(const char* input,
- size_t input_length,
- char* compressed,
- size_t* compressed_length)
- {
- snappy::RawCompress(input, input_length, compressed, compressed_length);
- }
-
- size_t maxCompressedLength(size_t source_len) {
- return snappy::MaxCompressedLength(source_len);
- }
-
- size_t compress(const char* input, size_t input_length, std::string* output) {
- return snappy::Compress(input, input_length, output);
- }
-
- bool uncompress(const char* compressed, size_t compressed_length, std::string* uncompressed) {
- return snappy::Uncompress(compressed, compressed_length, uncompressed);
- }
+void rawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length) {
+ snappy::RawCompress(input, input_length, compressed, compressed_length);
+}
+
+size_t maxCompressedLength(size_t source_len) {
+ return snappy::MaxCompressedLength(source_len);
+}
+size_t compress(const char* input, size_t input_length, std::string* output) {
+ return snappy::Compress(input, input_length, output);
+}
+
+bool uncompress(const char* compressed, size_t compressed_length, std::string* uncompressed) {
+ return snappy::Uncompress(compressed, compressed_length, uncompressed);
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/compress.h b/src/mongo/db/storage/mmap_v1/compress.h
index b8afa4d90c5..8ff828a93a6 100644
--- a/src/mongo/db/storage/mmap_v1/compress.h
+++ b/src/mongo/db/storage/mmap_v1/compress.h
@@ -32,18 +32,15 @@
#include <string>
-namespace mongo {
+namespace mongo {
- size_t compress(const char* input, size_t input_length, std::string* output);
+size_t compress(const char* input, size_t input_length, std::string* output);
- bool uncompress(const char* compressed, size_t compressed_length, std::string* uncompressed);
-
- size_t maxCompressedLength(size_t source_len);
- void rawCompress(const char* input,
- size_t input_length,
- char* compressed,
- size_t* compressed_length);
+bool uncompress(const char* compressed, size_t compressed_length, std::string* uncompressed);
+size_t maxCompressedLength(size_t source_len);
+void rawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length);
}
-
-
diff --git a/src/mongo/db/storage/mmap_v1/data_file.cpp b/src/mongo/db/storage/mmap_v1/data_file.cpp
index 15fbaba024d..90f6b71b7c6 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file.cpp
@@ -47,216 +47,201 @@
namespace mongo {
- using std::endl;
+using std::endl;
namespace {
- void data_file_check(void *_mb) {
- if (sizeof(char *) == 4) {
- uassert(10084,
- "can't map file memory - mongo requires 64 bit build for larger datasets",
- _mb != NULL);
- }
- else {
- uassert(10085, "can't map file memory", _mb != NULL);
- }
+void data_file_check(void* _mb) {
+ if (sizeof(char*) == 4) {
+ uassert(10084,
+ "can't map file memory - mongo requires 64 bit build for larger datasets",
+ _mb != NULL);
+ } else {
+ uassert(10085, "can't map file memory", _mb != NULL);
}
+}
-} // namespace
+} // namespace
- BOOST_STATIC_ASSERT(DataFileHeader::HeaderSize == 8192);
- BOOST_STATIC_ASSERT(sizeof(static_cast<DataFileHeader*>(NULL)->data) == 4);
- BOOST_STATIC_ASSERT(
- sizeof(DataFileHeader) - sizeof(static_cast<DataFileHeader*>(NULL)->data)
- == DataFileHeader::HeaderSize);
+BOOST_STATIC_ASSERT(DataFileHeader::HeaderSize == 8192);
+BOOST_STATIC_ASSERT(sizeof(static_cast<DataFileHeader*>(NULL)->data) == 4);
+BOOST_STATIC_ASSERT(sizeof(DataFileHeader) - sizeof(static_cast<DataFileHeader*>(NULL)->data) ==
+ DataFileHeader::HeaderSize);
- int DataFile::maxSize() {
- if ( sizeof( int* ) == 4 ) {
- return 512 * 1024 * 1024;
- }
- else if (mmapv1GlobalOptions.smallfiles) {
- return 0x7ff00000 >> 2;
- }
- else {
- return 0x7ff00000;
- }
+int DataFile::maxSize() {
+ if (sizeof(int*) == 4) {
+ return 512 * 1024 * 1024;
+ } else if (mmapv1GlobalOptions.smallfiles) {
+ return 0x7ff00000 >> 2;
+ } else {
+ return 0x7ff00000;
}
+}
+
+NOINLINE_DECL void DataFile::badOfs(int ofs) const {
+ msgasserted(13440,
+ str::stream() << "bad offset:" << ofs << " accessing file: " << mmf.filename()
+ << ". See http://dochub.mongodb.org/core/data-recovery");
+}
- NOINLINE_DECL void DataFile::badOfs(int ofs) const {
- msgasserted(13440, str::stream() << "bad offset:" << ofs
- << " accessing file: " << mmf.filename()
- << ". See http://dochub.mongodb.org/core/data-recovery");
+int DataFile::_defaultSize() const {
+ int size;
+
+ if (_fileNo <= 4) {
+ size = (64 * 1024 * 1024) << _fileNo;
+ } else {
+ size = 0x7ff00000;
}
- int DataFile::_defaultSize() const {
- int size;
+ if (mmapv1GlobalOptions.smallfiles) {
+ size = size >> 2;
+ }
- if (_fileNo <= 4) {
- size = (64 * 1024 * 1024) << _fileNo;
- }
- else {
- size = 0x7ff00000;
- }
+ return size;
+}
- if (mmapv1GlobalOptions.smallfiles) {
- size = size >> 2;
- }
+/** @return true if found and opened. if uninitialized (prealloc only) does not open. */
+Status DataFile::openExisting(const char* filename) {
+ invariant(_mb == 0);
- return size;
+ if (!boost::filesystem::exists(filename)) {
+ return Status(ErrorCodes::InvalidPath, "DataFile::openExisting - file does not exist");
}
- /** @return true if found and opened. if uninitialized (prealloc only) does not open. */
- Status DataFile::openExisting(const char *filename) {
- invariant(_mb == 0);
-
- if (!boost::filesystem::exists(filename)) {
- return Status(ErrorCodes::InvalidPath, "DataFile::openExisting - file does not exist");
- }
+ if (!mmf.open(filename, false)) {
+ return Status(ErrorCodes::InternalError, "DataFile::openExisting - mmf.open failed");
+ }
- if (!mmf.open(filename, false)) {
- return Status(ErrorCodes::InternalError, "DataFile::openExisting - mmf.open failed");
- }
+ // The mapped view of the file should never be NULL if the open call above succeeded.
+ _mb = mmf.getView();
+ invariant(_mb);
- // The mapped view of the file should never be NULL if the open call above succeeded.
- _mb = mmf.getView();
- invariant(_mb);
+ const uint64_t sz = mmf.length();
+ invariant(sz <= 0x7fffffff);
+ invariant(sz % 4096 == 0);
- const uint64_t sz = mmf.length();
- invariant(sz <= 0x7fffffff);
- invariant(sz % 4096 == 0);
-
- if (sz < 64*1024*1024 && !mmapv1GlobalOptions.smallfiles) {
- if( sz >= 16*1024*1024 && sz % (1024*1024) == 0 ) {
- log() << "info openExisting file size " << sz
- << " but mmapv1GlobalOptions.smallfiles=false: "
- << filename << endl;
- }
- else {
- log() << "openExisting size " << sz << " less than minimum file size expectation "
- << filename << endl;
- verify(false);
- }
+ if (sz < 64 * 1024 * 1024 && !mmapv1GlobalOptions.smallfiles) {
+ if (sz >= 16 * 1024 * 1024 && sz % (1024 * 1024) == 0) {
+ log() << "info openExisting file size " << sz
+ << " but mmapv1GlobalOptions.smallfiles=false: " << filename << endl;
+ } else {
+ log() << "openExisting size " << sz << " less than minimum file size expectation "
+ << filename << endl;
+ verify(false);
}
-
- data_file_check(_mb);
- return Status::OK();
}
- void DataFile::open( OperationContext* txn,
- const char *filename,
- int minSize,
- bool preallocateOnly ) {
-
- long size = _defaultSize();
-
- while (size < minSize) {
- if (size < maxSize() / 2) {
- size *= 2;
- }
- else {
- size = maxSize();
- break;
- }
- }
+ data_file_check(_mb);
+ return Status::OK();
+}
+
+void DataFile::open(OperationContext* txn,
+ const char* filename,
+ int minSize,
+ bool preallocateOnly) {
+ long size = _defaultSize();
- if (size > maxSize()) {
+ while (size < minSize) {
+ if (size < maxSize() / 2) {
+ size *= 2;
+ } else {
size = maxSize();
+ break;
}
+ }
- invariant(size >= 64 * 1024 * 1024 || mmapv1GlobalOptions.smallfiles);
- invariant( size % 4096 == 0 );
+ if (size > maxSize()) {
+ size = maxSize();
+ }
- if ( preallocateOnly ) {
- if (mmapv1GlobalOptions.prealloc) {
- FileAllocator::get()->requestAllocation( filename, size );
- }
- return;
- }
+ invariant(size >= 64 * 1024 * 1024 || mmapv1GlobalOptions.smallfiles);
+ invariant(size % 4096 == 0);
- {
- invariant(_mb == 0);
- unsigned long long sz = size;
- if (mmf.create(filename, sz, false)) {
- _mb = mmf.getView();
- }
+ if (preallocateOnly) {
+ if (mmapv1GlobalOptions.prealloc) {
+ FileAllocator::get()->requestAllocation(filename, size);
+ }
+ return;
+ }
- invariant(sz <= 0x7fffffff);
- size = (int)sz;
+ {
+ invariant(_mb == 0);
+ unsigned long long sz = size;
+ if (mmf.create(filename, sz, false)) {
+ _mb = mmf.getView();
}
- data_file_check(_mb);
- header()->init(txn, _fileNo, size, filename);
+ invariant(sz <= 0x7fffffff);
+ size = (int)sz;
}
- void DataFile::flush( bool sync ) {
- mmf.flush( sync );
- }
+ data_file_check(_mb);
+ header()->init(txn, _fileNo, size, filename);
+}
- DiskLoc DataFile::allocExtentArea( OperationContext* txn, int size ) {
- // The header would be NULL if file open failed. However, if file open failed we should
- // never be entering here.
- invariant(header());
- invariant(size <= header()->unusedLength);
+void DataFile::flush(bool sync) {
+ mmf.flush(sync);
+}
- int offset = header()->unused.getOfs();
+DiskLoc DataFile::allocExtentArea(OperationContext* txn, int size) {
+ // The header would be NULL if file open failed. However, if file open failed we should
+ // never be entering here.
+ invariant(header());
+ invariant(size <= header()->unusedLength);
- DataFileHeader *h = header();
- *txn->recoveryUnit()->writing(&h->unused) = DiskLoc(_fileNo, offset + size);
- txn->recoveryUnit()->writingInt(h->unusedLength) = h->unusedLength - size;
+ int offset = header()->unused.getOfs();
- return DiskLoc(_fileNo, offset);
- }
+ DataFileHeader* h = header();
+ *txn->recoveryUnit()->writing(&h->unused) = DiskLoc(_fileNo, offset + size);
+ txn->recoveryUnit()->writingInt(h->unusedLength) = h->unusedLength - size;
- // -------------------------------------------------------------------------------
-
- void DataFileHeader::init(OperationContext* txn,
- int fileno,
- int filelength,
- const char* filename) {
-
- if (uninitialized()) {
- DEV log() << "datafileheader::init initializing " << filename << " n:" << fileno << endl;
-
- massert(13640,
- str::stream() << "DataFileHeader looks corrupt at file open filelength:"
- << filelength << " fileno:" << fileno,
- filelength > 32768);
-
- // The writes done in this function must not be rolled back. If the containing
- // UnitOfWork rolls back it should roll back to the state *after* these writes. This
- // will leave the file empty, but available for future use. That is why we go directly
- // to the global dur dirty list rather than going through the RecoveryUnit.
- getDur().createdFile(filename, filelength);
-
- typedef std::pair<void*, unsigned> Intent;
- std::vector<Intent> intent;
- intent.push_back(std::make_pair(this, sizeof(DataFileHeader)));
- privateViews.makeWritable(this, sizeof(DataFileHeader));
- getDur().declareWriteIntents(intent);
-
- fileLength = filelength;
- version = DataFileVersion::defaultForNewFiles();
- unused.set(fileno, HeaderSize);
- unusedLength = fileLength - HeaderSize - 16;
- freeListStart.Null();
- freeListEnd.Null();
- }
- else {
- checkUpgrade(txn);
- }
- }
+ return DiskLoc(_fileNo, offset);
+}
- void DataFileHeader::checkUpgrade(OperationContext* txn) {
- if ( freeListStart == DiskLoc(0, 0) ) {
- // we are upgrading from 2.4 to 2.6
- invariant(freeListEnd == DiskLoc(0, 0)); // both start and end should be (0,0) or real
- WriteUnitOfWork wunit(txn);
- *txn->recoveryUnit()->writing( &freeListStart ) = DiskLoc();
- *txn->recoveryUnit()->writing( &freeListEnd ) = DiskLoc();
- wunit.commit();
- }
+// -------------------------------------------------------------------------------
+
+void DataFileHeader::init(OperationContext* txn, int fileno, int filelength, const char* filename) {
+ if (uninitialized()) {
+ DEV log() << "datafileheader::init initializing " << filename << " n:" << fileno << endl;
+
+ massert(13640,
+ str::stream() << "DataFileHeader looks corrupt at file open filelength:"
+ << filelength << " fileno:" << fileno,
+ filelength > 32768);
+
+ // The writes done in this function must not be rolled back. If the containing
+ // UnitOfWork rolls back it should roll back to the state *after* these writes. This
+ // will leave the file empty, but available for future use. That is why we go directly
+ // to the global dur dirty list rather than going through the RecoveryUnit.
+ getDur().createdFile(filename, filelength);
+
+ typedef std::pair<void*, unsigned> Intent;
+ std::vector<Intent> intent;
+ intent.push_back(std::make_pair(this, sizeof(DataFileHeader)));
+ privateViews.makeWritable(this, sizeof(DataFileHeader));
+ getDur().declareWriteIntents(intent);
+
+ fileLength = filelength;
+ version = DataFileVersion::defaultForNewFiles();
+ unused.set(fileno, HeaderSize);
+ unusedLength = fileLength - HeaderSize - 16;
+ freeListStart.Null();
+ freeListEnd.Null();
+ } else {
+ checkUpgrade(txn);
}
+}
+void DataFileHeader::checkUpgrade(OperationContext* txn) {
+ if (freeListStart == DiskLoc(0, 0)) {
+ // we are upgrading from 2.4 to 2.6
+ invariant(freeListEnd == DiskLoc(0, 0)); // both start and end should be (0,0) or real
+ WriteUnitOfWork wunit(txn);
+ *txn->recoveryUnit()->writing(&freeListStart) = DiskLoc();
+ *txn->recoveryUnit()->writing(&freeListEnd) = DiskLoc();
+ wunit.commit();
+ }
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/data_file.h b/src/mongo/db/storage/mmap_v1/data_file.h
index 6eddb092478..ed6e08e7931 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.h
+++ b/src/mongo/db/storage/mmap_v1/data_file.h
@@ -35,158 +35,181 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
#pragma pack(1)
- class DataFileVersion {
- public:
- DataFileVersion(uint32_t major, uint32_t minor) :_major(major), _minor(minor) {}
-
- static DataFileVersion defaultForNewFiles() {
- return DataFileVersion(kCurrentMajor, kIndexes24AndNewer
- | kMayHave28Freelist
- );
- }
-
- bool isCompatibleWithCurrentCode() const {
- if (_major != kCurrentMajor)
- return false;
-
- if (_minor & ~kUsedMinorFlagsMask)
- return false;
-
- const uint32_t indexCleanliness = _minor & kIndexPluginMask;
- if (indexCleanliness != kIndexes24AndNewer && indexCleanliness != kIndexes22AndOlder)
- return false;
-
- // We are compatible with either setting of kMayHave28Freelist.
-
- return true;
- }
-
- bool is24IndexClean() const { return (_minor & kIndexPluginMask) == kIndexes24AndNewer; }
- void setIs24IndexClean() { _minor = ((_minor & ~kIndexPluginMask) | kIndexes24AndNewer); }
-
- bool mayHave28Freelist() const { return _minor & kMayHave28Freelist; }
- void setMayHave28Freelist() { _minor |= kMayHave28Freelist; }
-
- uint32_t majorRaw() const { return _major; }
- uint32_t minorRaw() const { return _minor; }
-
- private:
- static const uint32_t kCurrentMajor = 4;
-
- // minor layout:
- // first 4 bits - index plugin cleanliness.
- // see IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded for details
- // 5th bit - 1 if started with 3.0-style freelist implementation (SERVER-14081)
- // 6th through 31st bit - reserved and must be set to 0.
- static const uint32_t kIndexPluginMask = 0xf;
- static const uint32_t kIndexes22AndOlder = 5;
- static const uint32_t kIndexes24AndNewer = 6;
-
- static const uint32_t kMayHave28Freelist = (1 << 4);
-
- // All set bits we know about are covered by this mask.
- static const uint32_t kUsedMinorFlagsMask = 0x1f;
-
- uint32_t _major;
- uint32_t _minor;
- };
-
- // Note: Intentionally not defining relational operators for DataFileVersion as there is no
- // total ordering of all versions now that '_minor' is used as a bit vector.
+class DataFileVersion {
+public:
+ DataFileVersion(uint32_t major, uint32_t minor) : _major(major), _minor(minor) {}
+
+ static DataFileVersion defaultForNewFiles() {
+ return DataFileVersion(kCurrentMajor, kIndexes24AndNewer | kMayHave28Freelist);
+ }
+
+ bool isCompatibleWithCurrentCode() const {
+ if (_major != kCurrentMajor)
+ return false;
+
+ if (_minor & ~kUsedMinorFlagsMask)
+ return false;
+
+ const uint32_t indexCleanliness = _minor & kIndexPluginMask;
+ if (indexCleanliness != kIndexes24AndNewer && indexCleanliness != kIndexes22AndOlder)
+ return false;
+
+ // We are compatible with either setting of kMayHave28Freelist.
+
+ return true;
+ }
+
+ bool is24IndexClean() const {
+ return (_minor & kIndexPluginMask) == kIndexes24AndNewer;
+ }
+ void setIs24IndexClean() {
+ _minor = ((_minor & ~kIndexPluginMask) | kIndexes24AndNewer);
+ }
+
+ bool mayHave28Freelist() const {
+ return _minor & kMayHave28Freelist;
+ }
+ void setMayHave28Freelist() {
+ _minor |= kMayHave28Freelist;
+ }
+
+ uint32_t majorRaw() const {
+ return _major;
+ }
+ uint32_t minorRaw() const {
+ return _minor;
+ }
+
+private:
+ static const uint32_t kCurrentMajor = 4;
+
+ // minor layout:
+ // first 4 bits - index plugin cleanliness.
+ // see IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded for details
+ // 5th bit - 1 if started with 3.0-style freelist implementation (SERVER-14081)
+ // 6th through 31st bit - reserved and must be set to 0.
+ static const uint32_t kIndexPluginMask = 0xf;
+ static const uint32_t kIndexes22AndOlder = 5;
+ static const uint32_t kIndexes24AndNewer = 6;
+
+ static const uint32_t kMayHave28Freelist = (1 << 4);
+
+ // All set bits we know about are covered by this mask.
+ static const uint32_t kUsedMinorFlagsMask = 0x1f;
+
+ uint32_t _major;
+ uint32_t _minor;
+};
+
+// Note: Intentionally not defining relational operators for DataFileVersion as there is no
+// total ordering of all versions now that '_minor' is used as a bit vector.
#pragma pack()
- /* a datafile - i.e. the "dbname.<#>" files :
-
- ----------------------
- DataFileHeader
- ----------------------
- Extent (for a particular namespace)
- MmapV1RecordHeader
- ...
- MmapV1RecordHeader (some chained for unused space)
- ----------------------
- more Extents...
- ----------------------
- */
+/* a datafile - i.e. the "dbname.<#>" files :
+
+ ----------------------
+ DataFileHeader
+ ----------------------
+ Extent (for a particular namespace)
+ MmapV1RecordHeader
+ ...
+ MmapV1RecordHeader (some chained for unused space)
+ ----------------------
+ more Extents...
+ ----------------------
+*/
#pragma pack(1)
- class DataFileHeader {
- public:
- DataFileVersion version;
- int fileLength;
- DiskLoc unused; /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more */
- int unusedLength;
- DiskLoc freeListStart;
- DiskLoc freeListEnd;
- char reserved[8192 - 4*4 - 8*3];
+class DataFileHeader {
+public:
+ DataFileVersion version;
+ int fileLength;
+ DiskLoc
+ unused; /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more */
+ int unusedLength;
+ DiskLoc freeListStart;
+ DiskLoc freeListEnd;
+ char reserved[8192 - 4 * 4 - 8 * 3];
- char data[4]; // first extent starts here
+ char data[4]; // first extent starts here
- enum { HeaderSize = 8192 };
+ enum { HeaderSize = 8192 };
- bool uninitialized() const { return version.majorRaw() == 0; }
+ bool uninitialized() const {
+ return version.majorRaw() == 0;
+ }
- void init(OperationContext* txn, int fileno, int filelength, const char* filename);
+ void init(OperationContext* txn, int fileno, int filelength, const char* filename);
- void checkUpgrade(OperationContext* txn);
+ void checkUpgrade(OperationContext* txn);
- bool isEmpty() const {
- return uninitialized() || ( unusedLength == fileLength - HeaderSize - 16 );
- }
- };
+ bool isEmpty() const {
+ return uninitialized() || (unusedLength == fileLength - HeaderSize - 16);
+ }
+};
#pragma pack()
- class DataFile {
- public:
- DataFile(int fn) : _fileNo(fn), _mb(NULL) {
-
- }
-
- /** @return true if found and opened. if uninitialized (prealloc only) does not open. */
- Status openExisting(const char *filename );
-
- /** creates if DNE */
- void open(OperationContext* txn,
- const char *filename,
- int requestedDataSize = 0,
- bool preallocateOnly = false);
+class DataFile {
+public:
+ DataFile(int fn) : _fileNo(fn), _mb(NULL) {}
- DiskLoc allocExtentArea( OperationContext* txn, int size );
+ /** @return true if found and opened. if uninitialized (prealloc only) does not open. */
+ Status openExisting(const char* filename);
- DataFileHeader* getHeader() { return header(); }
- const DataFileHeader* getHeader() const { return header(); }
+ /** creates if DNE */
+ void open(OperationContext* txn,
+ const char* filename,
+ int requestedDataSize = 0,
+ bool preallocateOnly = false);
- HANDLE getFd() { return mmf.getFd(); }
- unsigned long long length() const { return mmf.length(); }
+ DiskLoc allocExtentArea(OperationContext* txn, int size);
- /* return max size an extent may be */
- static int maxSize();
+ DataFileHeader* getHeader() {
+ return header();
+ }
+ const DataFileHeader* getHeader() const {
+ return header();
+ }
- /** fsync */
- void flush( bool sync );
+ HANDLE getFd() {
+ return mmf.getFd();
+ }
+ unsigned long long length() const {
+ return mmf.length();
+ }
- private:
- friend class MmapV1ExtentManager;
+ /* return max size an extent may be */
+ static int maxSize();
+ /** fsync */
+ void flush(bool sync);
- void badOfs(int) const;
- int _defaultSize() const;
+private:
+ friend class MmapV1ExtentManager;
- void grow(DiskLoc dl, int size);
- char* p() const { return (char *) _mb; }
- DataFileHeader* header() { return static_cast<DataFileHeader*>( _mb ); }
- const DataFileHeader* header() const { return static_cast<DataFileHeader*>( _mb ); }
+ void badOfs(int) const;
+ int _defaultSize() const;
+ void grow(DiskLoc dl, int size);
- const int _fileNo;
+ char* p() const {
+ return (char*)_mb;
+ }
+ DataFileHeader* header() {
+ return static_cast<DataFileHeader*>(_mb);
+ }
+ const DataFileHeader* header() const {
+ return static_cast<DataFileHeader*>(_mb);
+ }
- DurableMappedFile mmf;
- void *_mb; // the memory mapped view
- };
+ const int _fileNo;
+ DurableMappedFile mmf;
+ void* _mb; // the memory mapped view
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
index 9579278ded1..013877cb08b 100644
--- a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
@@ -44,95 +44,90 @@
namespace mongo {
- using std::endl;
+using std::endl;
- DataFileSync dataFileSync;
+DataFileSync dataFileSync;
- DataFileSync::DataFileSync()
- : ServerStatusSection( "backgroundFlushing" ),
- _total_time( 0 ),
- _flushes( 0 ),
- _last() {
+DataFileSync::DataFileSync()
+ : ServerStatusSection("backgroundFlushing"), _total_time(0), _flushes(0), _last() {}
- }
-
- void DataFileSync::run() {
- Client::initThread( name().c_str() );
+void DataFileSync::run() {
+ Client::initThread(name().c_str());
+ if (storageGlobalParams.syncdelay == 0) {
+ log() << "warning: --syncdelay 0 is not recommended and can have strange performance"
+ << endl;
+ } else if (storageGlobalParams.syncdelay == 1) {
+ log() << "--syncdelay 1" << endl;
+ } else if (storageGlobalParams.syncdelay != 60) {
+ LOG(1) << "--syncdelay " << storageGlobalParams.syncdelay << endl;
+ }
+ int time_flushing = 0;
+ while (!inShutdown()) {
+ _diaglog.flush();
if (storageGlobalParams.syncdelay == 0) {
- log() << "warning: --syncdelay 0 is not recommended and can have strange performance" << endl;
- }
- else if (storageGlobalParams.syncdelay == 1) {
- log() << "--syncdelay 1" << endl;
+ // in case at some point we add an option to change at runtime
+ sleepsecs(5);
+ continue;
}
- else if (storageGlobalParams.syncdelay != 60) {
- LOG(1) << "--syncdelay " << storageGlobalParams.syncdelay << endl;
- }
- int time_flushing = 0;
- while ( ! inShutdown() ) {
- _diaglog.flush();
- if (storageGlobalParams.syncdelay == 0) {
- // in case at some point we add an option to change at runtime
- sleepsecs(5);
- continue;
- }
-
- sleepmillis((long long) std::max(0.0, (storageGlobalParams.syncdelay * 1000) - time_flushing));
-
- if ( inShutdown() ) {
- // occasional issue trying to flush during shutdown when sleep interrupted
- break;
- }
-
- Date_t start = jsTime();
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- int numFiles = storageEngine->flushAllFiles( true );
- time_flushing = (jsTime() - start).count();
-
- _flushed(time_flushing);
-
- if( shouldLog(logger::LogSeverity::Debug(1)) || time_flushing >= 10000 ) {
- log() << "flushing mmaps took " << time_flushing << "ms " << " for " << numFiles << " files" << endl;
- }
- }
- }
- BSONObj DataFileSync::generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
- if (!running()) {
- return BSONObj();
+ sleepmillis(
+ (long long)std::max(0.0, (storageGlobalParams.syncdelay * 1000) - time_flushing));
+
+ if (inShutdown()) {
+ // occasional issue trying to flush during shutdown when sleep interrupted
+ break;
}
- BSONObjBuilder b;
- b.appendNumber( "flushes" , _flushes );
- b.appendNumber( "total_ms" , _total_time );
- b.appendNumber( "average_ms" , (_flushes ? (_total_time / double(_flushes)) : 0.0) );
- b.appendNumber( "last_ms" , _last_time );
- b.append("last_finished", _last);
- return b.obj();
+ Date_t start = jsTime();
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ int numFiles = storageEngine->flushAllFiles(true);
+ time_flushing = (jsTime() - start).count();
+
+ _flushed(time_flushing);
+
+ if (shouldLog(logger::LogSeverity::Debug(1)) || time_flushing >= 10000) {
+ log() << "flushing mmaps took " << time_flushing << "ms "
+ << " for " << numFiles << " files" << endl;
+ }
}
+}
- void DataFileSync::_flushed(int ms) {
- _flushes++;
- _total_time += ms;
- _last_time = ms;
- _last = jsTime();
+BSONObj DataFileSync::generateSection(OperationContext* txn,
+ const BSONElement& configElement) const {
+ if (!running()) {
+ return BSONObj();
}
+ BSONObjBuilder b;
+ b.appendNumber("flushes", _flushes);
+ b.appendNumber("total_ms", _total_time);
+ b.appendNumber("average_ms", (_flushes ? (_total_time / double(_flushes)) : 0.0));
+ b.appendNumber("last_ms", _last_time);
+ b.append("last_finished", _last);
+ return b.obj();
+}
+
+void DataFileSync::_flushed(int ms) {
+ _flushes++;
+ _total_time += ms;
+ _last_time = ms;
+ _last = jsTime();
+}
- class MemJournalServerStatusMetric : public ServerStatusMetric {
- public:
- MemJournalServerStatusMetric() : ServerStatusMetric(".mem.mapped") {}
- virtual void appendAtLeaf( BSONObjBuilder& b ) const {
- int m = static_cast<int>(MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ));
- b.appendNumber( "mapped" , m );
- if (storageGlobalParams.dur) {
- m *= 2;
- b.appendNumber( "mappedWithJournal" , m );
- }
+class MemJournalServerStatusMetric : public ServerStatusMetric {
+public:
+ MemJournalServerStatusMetric() : ServerStatusMetric(".mem.mapped") {}
+ virtual void appendAtLeaf(BSONObjBuilder& b) const {
+ int m = static_cast<int>(MemoryMappedFile::totalMappedLength() / (1024 * 1024));
+ b.appendNumber("mapped", m);
+ if (storageGlobalParams.dur) {
+ m *= 2;
+ b.appendNumber("mappedWithJournal", m);
}
+ }
- } memJournalServerStatusMetric;
+} memJournalServerStatusMetric;
}
diff --git a/src/mongo/db/storage/mmap_v1/data_file_sync.h b/src/mongo/db/storage/mmap_v1/data_file_sync.h
index a92f55b64f8..b204fdad019 100644
--- a/src/mongo/db/storage/mmap_v1/data_file_sync.h
+++ b/src/mongo/db/storage/mmap_v1/data_file_sync.h
@@ -33,30 +33,32 @@
namespace mongo {
- /**
- * does background async flushes of mmapped files
- */
- class DataFileSync : public BackgroundJob , public ServerStatusSection {
- public:
- DataFileSync();
-
- virtual bool includeByDefault() const { return true; }
- virtual std::string name() const { return "DataFileSync"; }
+/**
+ * does background async flushes of mmapped files
+ */
+class DataFileSync : public BackgroundJob, public ServerStatusSection {
+public:
+ DataFileSync();
- void run();
+ virtual bool includeByDefault() const {
+ return true;
+ }
+ virtual std::string name() const {
+ return "DataFileSync";
+ }
- virtual BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const;
+ void run();
- private:
- void _flushed(int ms);
+ virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const;
- long long _total_time;
- long long _flushes;
- int _last_time;
- Date_t _last;
+private:
+ void _flushed(int ms);
- };
+ long long _total_time;
+ long long _flushes;
+ int _last_time;
+ Date_t _last;
+};
- extern DataFileSync dataFileSync;
+extern DataFileSync dataFileSync;
}
diff --git a/src/mongo/db/storage/mmap_v1/diskloc.h b/src/mongo/db/storage/mmap_v1/diskloc.h
index 9d3adc64da7..662daf074d5 100644
--- a/src/mongo/db/storage/mmap_v1/diskloc.h
+++ b/src/mongo/db/storage/mmap_v1/diskloc.h
@@ -43,149 +43,176 @@
namespace mongo {
- template< class Version > class BtreeBucket;
+template <class Version>
+class BtreeBucket;
#pragma pack(1)
- /** represents a disk location/offset on disk in a database. 64 bits.
- it is assumed these will be passed around by value a lot so don't do anything to make them large
- (such as adding a virtual function)
- */
- class DiskLoc {
- int _a; // this will be volume, file #, etc. but is a logical value could be anything depending on storage engine
- int ofs;
-
- public:
-
- enum SentinelValues {
- /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
- NullOfs = -1,
-
- // Caps the number of files that may be allocated in a database, allowing about 32TB of
- // data per db. Note that the DiskLoc and DiskLoc56Bit types supports more files than
- // this value, as does the data storage format.
- MaxFiles=16000,
-
- // How invalid DiskLocs are represented in RecordIds.
- InvalidRepr = -2LL,
- };
-
- DiskLoc(int a, int Ofs) : _a(a), ofs(Ofs) { }
- DiskLoc() { Null(); }
-
- // Minimum allowed DiskLoc. No MmapV1RecordHeader may begin at this location because file and extent
- // headers must precede Records in a file.
- static DiskLoc min() { return DiskLoc(0, 0); }
-
- // Maximum allowed DiskLoc.
- // No MmapV1RecordHeader may begin at this location because the minimum size of a MmapV1RecordHeader is larger than
- // one byte. Also, the last bit is not able to be used because mmapv1 uses that for "used".
- static DiskLoc max() { return DiskLoc(0x7fffffff, 0x7ffffffe); }
-
- bool questionable() const {
- return ofs < -1 ||
- _a < -1 ||
- _a > 524288;
- }
+/** represents a disk location/offset on disk in a database. 64 bits.
+ it is assumed these will be passed around by value a lot so don't do anything to make them large
+ (such as adding a virtual function)
+ */
+class DiskLoc {
+ int _a; // this will be volume, file #, etc. but is a logical value could be anything depending on storage engine
+ int ofs;
+
+public:
+ enum SentinelValues {
+ /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
+ NullOfs = -1,
+
+ // Caps the number of files that may be allocated in a database, allowing about 32TB of
+ // data per db. Note that the DiskLoc and DiskLoc56Bit types supports more files than
+ // this value, as does the data storage format.
+ MaxFiles = 16000,
+
+ // How invalid DiskLocs are represented in RecordIds.
+ InvalidRepr = -2LL,
+ };
- bool isNull() const { return _a == -1; }
- DiskLoc& Null() {
- _a = -1;
- ofs = 0; /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
- return *this;
- }
- void assertOk() const { verify(!isNull()); }
- DiskLoc& setInvalid() {
- _a = -2;
- ofs = 0;
- return *this;
- }
- bool isValid() const { return _a != -2; }
-
- std::string toString() const {
- if ( isNull() )
- return "null";
- std::stringstream ss;
- ss << _a << ':' << std::hex << ofs;
- return ss.str();
- }
+ DiskLoc(int a, int Ofs) : _a(a), ofs(Ofs) {}
+ DiskLoc() {
+ Null();
+ }
- BSONObj toBSONObj() const { return BSON( "file" << _a << "offset" << ofs ); }
+ // Minimum allowed DiskLoc. No MmapV1RecordHeader may begin at this location because file and extent
+ // headers must precede Records in a file.
+ static DiskLoc min() {
+ return DiskLoc(0, 0);
+ }
- int a() const { return _a; }
+ // Maximum allowed DiskLoc.
+ // No MmapV1RecordHeader may begin at this location because the minimum size of a MmapV1RecordHeader is larger than
+ // one byte. Also, the last bit is not able to be used because mmapv1 uses that for "used".
+ static DiskLoc max() {
+ return DiskLoc(0x7fffffff, 0x7ffffffe);
+ }
- int& GETOFS() { return ofs; }
- int getOfs() const { return ofs; }
- void set(int a, int b) {
- _a=a;
- ofs=b;
- }
+ bool questionable() const {
+ return ofs < -1 || _a < -1 || _a > 524288;
+ }
- void inc(int amt) {
- verify( !isNull() );
- ofs += amt;
- }
+ bool isNull() const {
+ return _a == -1;
+ }
+ DiskLoc& Null() {
+ _a = -1;
+ ofs =
+ 0; /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
+ return *this;
+ }
+ void assertOk() const {
+ verify(!isNull());
+ }
+ DiskLoc& setInvalid() {
+ _a = -2;
+ ofs = 0;
+ return *this;
+ }
+ bool isValid() const {
+ return _a != -2;
+ }
- bool sameFile(DiskLoc b) {
- return _a== b._a;
- }
+ std::string toString() const {
+ if (isNull())
+ return "null";
+ std::stringstream ss;
+ ss << _a << ':' << std::hex << ofs;
+ return ss.str();
+ }
- bool operator==(const DiskLoc& b) const {
- return _a==b._a&& ofs == b.ofs;
- }
- bool operator!=(const DiskLoc& b) const {
- return !(*this==b);
- }
- int compare(const DiskLoc& b) const {
- int x = _a - b._a;
- if ( x )
- return x;
- return ofs - b.ofs;
- }
+ BSONObj toBSONObj() const {
+ return BSON("file" << _a << "offset" << ofs);
+ }
- static DiskLoc fromRecordId(RecordId id) {
- if (id.isNormal())
- return DiskLoc((id.repr() >> 32), uint32_t(id.repr()));
+ int a() const {
+ return _a;
+ }
- if (id.isNull())
- return DiskLoc();
+ int& GETOFS() {
+ return ofs;
+ }
+ int getOfs() const {
+ return ofs;
+ }
+ void set(int a, int b) {
+ _a = a;
+ ofs = b;
+ }
- if (id == RecordId::max())
- return DiskLoc::max();
+ void inc(int amt) {
+ verify(!isNull());
+ ofs += amt;
+ }
- if (id == RecordId::min())
- return DiskLoc::min();
+ bool sameFile(DiskLoc b) {
+ return _a == b._a;
+ }
- dassert(id.repr() == InvalidRepr);
- return DiskLoc().setInvalid();
- }
+ bool operator==(const DiskLoc& b) const {
+ return _a == b._a && ofs == b.ofs;
+ }
+ bool operator!=(const DiskLoc& b) const {
+ return !(*this == b);
+ }
+ int compare(const DiskLoc& b) const {
+ int x = _a - b._a;
+ if (x)
+ return x;
+ return ofs - b.ofs;
+ }
+
+ static DiskLoc fromRecordId(RecordId id) {
+ if (id.isNormal())
+ return DiskLoc((id.repr() >> 32), uint32_t(id.repr()));
- RecordId toRecordId() const {
- if (_a >= 0) {
- if (*this == DiskLoc::min())
- return RecordId::min();
+ if (id.isNull())
+ return DiskLoc();
- if (*this == DiskLoc::max())
- return RecordId::max();
+ if (id == RecordId::max())
+ return DiskLoc::max();
- return RecordId(uint64_t(_a) << 32 | uint32_t(ofs));
- }
+ if (id == RecordId::min())
+ return DiskLoc::min();
+
+ dassert(id.repr() == InvalidRepr);
+ return DiskLoc().setInvalid();
+ }
- if (isNull())
- return RecordId();
+ RecordId toRecordId() const {
+ if (_a >= 0) {
+ if (*this == DiskLoc::min())
+ return RecordId::min();
- dassert(!isValid());
- return RecordId(InvalidRepr);
+ if (*this == DiskLoc::max())
+ return RecordId::max();
+
+ return RecordId(uint64_t(_a) << 32 | uint32_t(ofs));
}
- };
-#pragma pack()
- inline bool operator< (const DiskLoc& rhs, const DiskLoc& lhs) { return rhs.compare(lhs) < 0; }
- inline bool operator<=(const DiskLoc& rhs, const DiskLoc& lhs) { return rhs.compare(lhs) <= 0; }
- inline bool operator> (const DiskLoc& rhs, const DiskLoc& lhs) { return rhs.compare(lhs) > 0; }
- inline bool operator>=(const DiskLoc& rhs, const DiskLoc& lhs) { return rhs.compare(lhs) >= 0; }
+ if (isNull())
+ return RecordId();
- inline std::ostream& operator<<( std::ostream &stream, const DiskLoc &loc ) {
- return stream << loc.toString();
+ dassert(!isValid());
+ return RecordId(InvalidRepr);
}
+};
+#pragma pack()
-} // namespace mongo
+inline bool operator<(const DiskLoc& rhs, const DiskLoc& lhs) {
+ return rhs.compare(lhs) < 0;
+}
+inline bool operator<=(const DiskLoc& rhs, const DiskLoc& lhs) {
+ return rhs.compare(lhs) <= 0;
+}
+inline bool operator>(const DiskLoc& rhs, const DiskLoc& lhs) {
+ return rhs.compare(lhs) > 0;
+}
+inline bool operator>=(const DiskLoc& rhs, const DiskLoc& lhs) {
+ return rhs.compare(lhs) >= 0;
+}
+
+inline std::ostream& operator<<(std::ostream& stream, const DiskLoc& loc) {
+ return stream << loc.toString();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index a596bba061f..21c729eea17 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -38,15 +38,15 @@
have to handle falling behind which would use too much ram (going back into a read lock would suffice to stop that).
for now (1.7.5/1.8.0) we are in read lock which is not ideal.
WRITETODATAFILES
- actually write to the database data files in this phase. currently done by memcpy'ing the writes back to
- the non-private MMF. alternatively one could write to the files the traditional way; however the way our
+ actually write to the database data files in this phase. currently done by memcpy'ing the writes back to
+ the non-private MMF. alternatively one could write to the files the traditional way; however the way our
storage engine works that isn't any faster (actually measured a tiny bit slower).
REMAPPRIVATEVIEW
we could in a write lock quickly flip readers back to the main view, then stay in read lock and do our real
remapping. with many files (e.g., 1000), remapping could be time consuming (several ms), so we don't want
to be too frequent.
there could be a slow down immediately after remapping as fresh copy-on-writes for commonly written pages will
- be required. so doing these remaps fractionally is helpful.
+ be required. so doing these remaps fractionally is helpful.
mutexes:
@@ -99,820 +99,788 @@
namespace mongo {
- using std::endl;
- using std::fixed;
- using std::hex;
- using std::set;
- using std::setprecision;
- using std::setw;
- using std::string;
- using std::stringstream;
+using std::endl;
+using std::fixed;
+using std::hex;
+using std::set;
+using std::setprecision;
+using std::setw;
+using std::string;
+using std::stringstream;
namespace dur {
namespace {
- // Used to activate the flush thread
- stdx::mutex flushMutex;
- stdx::condition_variable flushRequested;
+// Used to activate the flush thread
+stdx::mutex flushMutex;
+stdx::condition_variable flushRequested;
- // This is waited on for getlasterror acknowledgements. It means that data has been written to
- // the journal, but not necessarily applied to the shared view, so it is all right to
- // acknowledge the user operation, but NOT all right to delete the journal files for example.
- NotifyAll commitNotify;
+// This is waited on for getlasterror acknowledgements. It means that data has been written to
+// the journal, but not necessarily applied to the shared view, so it is all right to
+// acknowledge the user operation, but NOT all right to delete the journal files for example.
+NotifyAll commitNotify;
- // This is waited on for complete flush. It means that data has been both written to journal
- // and applied to the shared view, so it is allowed to delete the journal files. Used for
- // fsync:true, close DB, shutdown acknowledgements.
- NotifyAll applyToDataFilesNotify;
+// This is waited on for complete flush. It means that data has been both written to journal
+// and applied to the shared view, so it is allowed to delete the journal files. Used for
+// fsync:true, close DB, shutdown acknowledgements.
+NotifyAll applyToDataFilesNotify;
- // When set, the flush thread will exit
- AtomicUInt32 shutdownRequested(0);
+// When set, the flush thread will exit
+AtomicUInt32 shutdownRequested(0);
- enum {
- // How many commit cycles to do before considering doing a remap
- NumCommitsBeforeRemap = 10,
+enum {
+ // How many commit cycles to do before considering doing a remap
+ NumCommitsBeforeRemap = 10,
- // How many outstanding journal flushes should be allowed before applying writer back
- // pressure. Size of 1 allows two journal blocks to be in the process of being written -
- // one on the journal writer's buffer and one blocked waiting to be picked up.
- NumAsyncJournalWrites = 1,
- };
+ // How many outstanding journal flushes should be allowed before applying writer back
+ // pressure. Size of 1 allows two journal blocks to be in the process of being written -
+ // one on the journal writer's buffer and one blocked waiting to be picked up.
+ NumAsyncJournalWrites = 1,
+};
- // Remap loop state
- unsigned remapFileToStartAt;
+// Remap loop state
+unsigned remapFileToStartAt;
- // How frequently to reset the durability statistics
- enum { DurStatsResetIntervalMillis = 3 * 1000 };
+// How frequently to reset the durability statistics
+enum { DurStatsResetIntervalMillis = 3 * 1000 };
- // Size sanity checks
- BOOST_STATIC_ASSERT(UncommittedBytesLimit > BSONObjMaxInternalSize * 3);
- BOOST_STATIC_ASSERT(sizeof(void*) == 4 || UncommittedBytesLimit > BSONObjMaxInternalSize * 6);
+// Size sanity checks
+BOOST_STATIC_ASSERT(UncommittedBytesLimit > BSONObjMaxInternalSize * 3);
+BOOST_STATIC_ASSERT(sizeof(void*) == 4 || UncommittedBytesLimit > BSONObjMaxInternalSize * 6);
- /**
- * MMAP V1 durability server status section.
- */
- class DurSSS : public ServerStatusSection {
- public:
- DurSSS() : ServerStatusSection("dur") {
+/**
+ * MMAP V1 durability server status section.
+ */
+class DurSSS : public ServerStatusSection {
+public:
+ DurSSS() : ServerStatusSection("dur") {}
- }
+ virtual bool includeByDefault() const {
+ return true;
+ }
- virtual bool includeByDefault() const { return true; }
+ virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ if (!getDur().isDurable()) {
+ return BSONObj();
+ }
- virtual BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
+ return dur::stats.asObj();
+ }
- if (!getDur().isDurable()) {
- return BSONObj();
- }
+} durSSS;
- return dur::stats.asObj();
- }
- } durSSS;
+/**
+ * A no-op durability interface. Used for the case when journaling is not enabled.
+ */
+class NonDurableImpl : public DurableInterface {
+public:
+ NonDurableImpl() {}
+ // DurableInterface virtual methods
+ virtual void* writingPtr(void* x, unsigned len) {
+ return x;
+ }
+ virtual void declareWriteIntent(void*, unsigned) {}
+ virtual void declareWriteIntents(const std::vector<std::pair<void*, unsigned>>& intents) {}
+ virtual void createdFile(const std::string& filename, unsigned long long len) {}
+ virtual bool waitUntilDurable() {
+ return false;
+ }
+ virtual bool commitNow(OperationContext* txn) {
+ return false;
+ }
+ virtual bool commitIfNeeded() {
+ return false;
+ }
+ virtual void syncDataAndTruncateJournal(OperationContext* txn) {}
+ virtual bool isDurable() const {
+ return false;
+ }
+ virtual void closingFileNotification() {}
+ virtual void commitAndStopDurThread() {}
+};
- /**
- * A no-op durability interface. Used for the case when journaling is not enabled.
- */
- class NonDurableImpl : public DurableInterface {
- public:
- NonDurableImpl() { }
- // DurableInterface virtual methods
- virtual void* writingPtr(void *x, unsigned len) { return x; }
- virtual void declareWriteIntent(void*, unsigned) { }
- virtual void declareWriteIntents(const std::vector<std::pair<void*, unsigned> >& intents) {
+/**
+ * The actual durability interface, when journaling is enabled.
+ */
+class DurableImpl : public DurableInterface {
+public:
+ DurableImpl() {}
+
+ // DurableInterface virtual methods
+ virtual void declareWriteIntents(const std::vector<std::pair<void*, unsigned>>& intents);
+ virtual void createdFile(const std::string& filename, unsigned long long len);
+ virtual bool waitUntilDurable();
+ virtual bool commitNow(OperationContext* txn);
+ virtual bool commitIfNeeded();
+ virtual void syncDataAndTruncateJournal(OperationContext* txn);
+ virtual bool isDurable() const {
+ return true;
+ }
+ virtual void closingFileNotification();
+ virtual void commitAndStopDurThread();
- }
- virtual void createdFile(const std::string& filename, unsigned long long len) { }
- virtual bool waitUntilDurable() { return false; }
- virtual bool commitNow(OperationContext* txn) { return false; }
- virtual bool commitIfNeeded() { return false; }
- virtual void syncDataAndTruncateJournal(OperationContext* txn) {}
- virtual bool isDurable() const { return false; }
- virtual void closingFileNotification() { }
- virtual void commitAndStopDurThread() { }
- };
-
-
- /**
- * The actual durability interface, when journaling is enabled.
- */
- class DurableImpl : public DurableInterface {
- public:
- DurableImpl() { }
-
- // DurableInterface virtual methods
- virtual void declareWriteIntents(const std::vector<std::pair<void*, unsigned> >& intents);
- virtual void createdFile(const std::string& filename, unsigned long long len);
- virtual bool waitUntilDurable();
- virtual bool commitNow(OperationContext* txn);
- virtual bool commitIfNeeded();
- virtual void syncDataAndTruncateJournal(OperationContext* txn);
- virtual bool isDurable() const { return true; }
- virtual void closingFileNotification();
- virtual void commitAndStopDurThread();
-
- void start();
-
- private:
- stdx::thread _durThreadHandle;
- };
-
-
- /**
- * Diagnostic to check that the private view and the non-private view are in sync after
- * applying the journal changes. This function is very slow and only runs when paranoid checks
- * are enabled.
- *
- * Must be called under at least S flush lock to ensure that there are no concurrent writes
- * happening.
- */
- void debugValidateFileMapsMatch(const DurableMappedFile* mmf) {
- const unsigned char *p = (const unsigned char *)mmf->getView();
- const unsigned char *w = (const unsigned char *)mmf->view_write();
-
- // Ignore pre-allocated files that are not fully created yet
- if (!p || !w) {
- return;
- }
+ void start();
- if (memcmp(p, w, (unsigned)mmf->length()) == 0) {
- return;
- }
+private:
+ stdx::thread _durThreadHandle;
+};
- unsigned low = 0xffffffff;
- unsigned high = 0;
- log() << "DurParanoid mismatch in " << mmf->filename();
+/**
+ * Diagnostic to check that the private view and the non-private view are in sync after
+ * applying the journal changes. This function is very slow and only runs when paranoid checks
+ * are enabled.
+ *
+ * Must be called under at least S flush lock to ensure that there are no concurrent writes
+ * happening.
+ */
+void debugValidateFileMapsMatch(const DurableMappedFile* mmf) {
+ const unsigned char* p = (const unsigned char*)mmf->getView();
+ const unsigned char* w = (const unsigned char*)mmf->view_write();
- int logged = 0;
- unsigned lastMismatch = 0xffffffff;
+ // Ignore pre-allocated files that are not fully created yet
+ if (!p || !w) {
+ return;
+ }
- for (unsigned i = 0; i < mmf->length(); i++) {
- if (p[i] != w[i]) {
+ if (memcmp(p, w, (unsigned)mmf->length()) == 0) {
+ return;
+ }
- if (lastMismatch != 0xffffffff && lastMismatch + 1 != i) {
- // Separate blocks of mismatches
- log() << std::endl;
- }
+ unsigned low = 0xffffffff;
+ unsigned high = 0;
- lastMismatch = i;
+ log() << "DurParanoid mismatch in " << mmf->filename();
- if (++logged < 60) {
- if (logged == 1) {
- // For .ns files to find offset in record
- log() << "ofs % 628 = 0x" << hex << (i % 628) << endl;
- }
+ int logged = 0;
+ unsigned lastMismatch = 0xffffffff;
- stringstream ss;
- ss << "mismatch ofs:" << hex << i
- << "\tfilemap:" << setw(2) << (unsigned)w[i]
- << "\tprivmap:" << setw(2) << (unsigned)p[i];
+ for (unsigned i = 0; i < mmf->length(); i++) {
+ if (p[i] != w[i]) {
+ if (lastMismatch != 0xffffffff && lastMismatch + 1 != i) {
+ // Separate blocks of mismatches
+ log() << std::endl;
+ }
- if (p[i] > 32 && p[i] <= 126) {
- ss << '\t' << p[i];
- }
+ lastMismatch = i;
- log() << ss.str() << endl;
+ if (++logged < 60) {
+ if (logged == 1) {
+ // For .ns files to find offset in record
+ log() << "ofs % 628 = 0x" << hex << (i % 628) << endl;
}
- if (logged == 60) {
- log() << "..." << endl;
+ stringstream ss;
+ ss << "mismatch ofs:" << hex << i << "\tfilemap:" << setw(2) << (unsigned)w[i]
+ << "\tprivmap:" << setw(2) << (unsigned)p[i];
+
+ if (p[i] > 32 && p[i] <= 126) {
+ ss << '\t' << p[i];
}
- if (i < low) low = i;
- if (i > high) high = i;
+ log() << ss.str() << endl;
}
+
+ if (logged == 60) {
+ log() << "..." << endl;
+ }
+
+ if (i < low)
+ low = i;
+ if (i > high)
+ high = i;
}
+ }
- if (low != 0xffffffff) {
- std::stringstream ss;
- ss << "journal error warning views mismatch " << mmf->filename() << ' '
- << hex << low << ".." << high
- << " len:" << high - low + 1;
+ if (low != 0xffffffff) {
+ std::stringstream ss;
+ ss << "journal error warning views mismatch " << mmf->filename() << ' ' << hex << low
+ << ".." << high << " len:" << high - low + 1;
- log() << ss.str() << endl;
- log() << "priv loc: " << (void*)(p + low) << ' ' << endl;
+ log() << ss.str() << endl;
+ log() << "priv loc: " << (void*)(p + low) << ' ' << endl;
- severe() << "Written data does not match in-memory view. Missing WriteIntent?";
- invariant(false);
- }
+ severe() << "Written data does not match in-memory view. Missing WriteIntent?";
+ invariant(false);
}
+}
- /**
- * Main code of the remap private view function.
- */
- void remapPrivateViewImpl(double fraction) {
- LOG(4) << "journal REMAPPRIVATEVIEW" << endl;
-
- // There is no way that the set of files can change while we are in this method, because
- // we hold the flush lock in X mode. For files to go away, a database needs to be dropped,
- // which means acquiring the flush lock in at least IX mode.
- //
- // However, the record fetcher logic unfortunately operates without any locks and on
- // Windows and Solaris remap is not atomic and there is a window where the record fetcher
- // might get an access violation. That's why we acquire the mongo files mutex here in X
- // mode and the record fetcher takes in in S-mode (see MmapV1RecordFetcher for more
- // detail).
- //
- // See SERVER-5723 for performance improvement.
- // See SERVER-5680 to see why this code is necessary on Windows.
- // See SERVER-8795 to see why this code is necessary on Solaris.
+/**
+ * Main code of the remap private view function.
+ */
+void remapPrivateViewImpl(double fraction) {
+ LOG(4) << "journal REMAPPRIVATEVIEW" << endl;
+
+// There is no way that the set of files can change while we are in this method, because
+// we hold the flush lock in X mode. For files to go away, a database needs to be dropped,
+// which means acquiring the flush lock in at least IX mode.
+//
+// However, the record fetcher logic unfortunately operates without any locks and on
+// Windows and Solaris remap is not atomic and there is a window where the record fetcher
+// might get an access violation. That's why we acquire the mongo files mutex here in X
+// mode and the record fetcher takes in in S-mode (see MmapV1RecordFetcher for more
+// detail).
+//
+// See SERVER-5723 for performance improvement.
+// See SERVER-5680 to see why this code is necessary on Windows.
+// See SERVER-8795 to see why this code is necessary on Solaris.
#if defined(_WIN32) || defined(__sun)
- LockMongoFilesExclusive lk;
+ LockMongoFilesExclusive lk;
#else
- LockMongoFilesShared lk;
+ LockMongoFilesShared lk;
#endif
- std::set<MongoFile*>& files = MongoFile::getAllFiles();
+ std::set<MongoFile*>& files = MongoFile::getAllFiles();
- const unsigned sz = files.size();
- if (sz == 0) {
- return;
- }
+ const unsigned sz = files.size();
+ if (sz == 0) {
+ return;
+ }
- unsigned ntodo = (unsigned) (sz * fraction);
- if( ntodo < 1 ) ntodo = 1;
- if( ntodo > sz ) ntodo = sz;
+ unsigned ntodo = (unsigned)(sz * fraction);
+ if (ntodo < 1)
+ ntodo = 1;
+ if (ntodo > sz)
+ ntodo = sz;
+
+ const set<MongoFile*>::iterator b = files.begin();
+ const set<MongoFile*>::iterator e = files.end();
+ set<MongoFile*>::iterator i = b;
+
+ // Skip to our starting position as remembered from the last remap cycle
+ for (unsigned x = 0; x < remapFileToStartAt; x++) {
+ i++;
+ if (i == e)
+ i = b;
+ }
- const set<MongoFile*>::iterator b = files.begin();
- const set<MongoFile*>::iterator e = files.end();
- set<MongoFile*>::iterator i = b;
+ // Mark where to start on the next cycle
+ const unsigned startedAt = remapFileToStartAt;
+ remapFileToStartAt = (remapFileToStartAt + ntodo) % sz;
- // Skip to our starting position as remembered from the last remap cycle
- for (unsigned x = 0; x < remapFileToStartAt; x++) {
- i++;
- if (i == e) i = b;
- }
+ Timer t;
- // Mark where to start on the next cycle
- const unsigned startedAt = remapFileToStartAt;
- remapFileToStartAt = (remapFileToStartAt + ntodo) % sz;
+ for (unsigned x = 0; x < ntodo; x++) {
+ if ((*i)->isDurableMappedFile()) {
+ DurableMappedFile* const mmf = (DurableMappedFile*)*i;
- Timer t;
+ // Sanity check that the contents of the shared and the private view match so we
+ // don't end up overwriting data.
+ if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalParanoid) {
+ debugValidateFileMapsMatch(mmf);
+ }
- for (unsigned x = 0; x < ntodo; x++) {
- if ((*i)->isDurableMappedFile()) {
- DurableMappedFile* const mmf = (DurableMappedFile*) *i;
+ if (mmf->willNeedRemap()) {
+ mmf->remapThePrivateView();
+ }
- // Sanity check that the contents of the shared and the private view match so we
- // don't end up overwriting data.
- if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalParanoid) {
- debugValidateFileMapsMatch(mmf);
- }
+ i++;
- if (mmf->willNeedRemap()) {
- mmf->remapThePrivateView();
- }
+ if (i == e)
+ i = b;
+ }
+ }
- i++;
+ LOG(3) << "journal REMAPPRIVATEVIEW done startedAt: " << startedAt << " n:" << ntodo << ' '
+ << t.millis() << "ms";
+}
- if (i == e) i = b;
- }
- }
- LOG(3) << "journal REMAPPRIVATEVIEW done startedAt: " << startedAt << " n:" << ntodo
- << ' ' << t.millis() << "ms";
- }
+// One instance of each durability interface
+DurableImpl durableImpl;
+NonDurableImpl nonDurableImpl;
+} // namespace
- // One instance of each durability interface
- DurableImpl durableImpl;
- NonDurableImpl nonDurableImpl;
-} // namespace
+// Declared in dur_preplogbuffer.cpp
+void PREPLOGBUFFER(JSectHeader& outHeader, AlignedBuilder& outBuffer);
+// Declared in dur_journal.cpp
+boost::filesystem::path getJournalDir();
+void preallocateFiles();
- // Declared in dur_preplogbuffer.cpp
- void PREPLOGBUFFER(JSectHeader& outHeader, AlignedBuilder& outBuffer);
+// Forward declaration
+static void durThread();
- // Declared in dur_journal.cpp
- boost::filesystem::path getJournalDir();
- void preallocateFiles();
+// Durability activity statistics
+Stats stats;
- // Forward declaration
- static void durThread();
+// Reference to the write intents tracking object
+CommitJob commitJob;
- // Durability activity statistics
- Stats stats;
+// Reference to the active durability interface
+DurableInterface* DurableInterface::_impl(&nonDurableImpl);
- // Reference to the write intents tracking object
- CommitJob commitJob;
- // Reference to the active durability interface
- DurableInterface* DurableInterface::_impl(&nonDurableImpl);
+//
+// Stats
+//
+Stats::Stats() : _currIdx(0) {}
- //
- // Stats
- //
+void Stats::reset() {
+ // Seal the current metrics
+ _stats[_currIdx]._durationMillis = _stats[_currIdx].getCurrentDurationMillis();
- Stats::Stats() : _currIdx(0) {
+ // Use a new metric
+ const unsigned newCurrIdx = (_currIdx + 1) % (sizeof(_stats) / sizeof(_stats[0]));
+ _stats[newCurrIdx].reset();
- }
+ _currIdx = newCurrIdx;
+}
- void Stats::reset() {
- // Seal the current metrics
- _stats[_currIdx]._durationMillis = _stats[_currIdx].getCurrentDurationMillis();
+BSONObj Stats::asObj() const {
+ // Use the previous statistic
+ const S& stats = _stats[(_currIdx - 1) % (sizeof(_stats) / sizeof(_stats[0]))];
- // Use a new metric
- const unsigned newCurrIdx = (_currIdx + 1) % (sizeof(_stats) / sizeof(_stats[0]));
- _stats[newCurrIdx].reset();
+ BSONObjBuilder builder;
+ stats._asObj(&builder);
- _currIdx = newCurrIdx;
- }
+ return builder.obj();
+}
- BSONObj Stats::asObj() const {
- // Use the previous statistic
- const S& stats = _stats[(_currIdx - 1) % (sizeof(_stats) / sizeof(_stats[0]))];
+void Stats::S::reset() {
+ memset(this, 0, sizeof(*this));
+ _startTimeMicros = curTimeMicros64();
+}
- BSONObjBuilder builder;
- stats._asObj(&builder);
+std::string Stats::S::_CSVHeader() const {
+ return "cmts\t jrnMB\t wrDFMB\t cIWLk\t early\t prpLgB\t wrToJ\t wrToDF\t rmpPrVw";
+}
- return builder.obj();
- }
+std::string Stats::S::_asCSV() const {
+ stringstream ss;
+ ss << setprecision(2) << _commits << '\t' << _journaledBytes / 1000000.0 << '\t'
+ << _writeToDataFilesBytes / 1000000.0 << '\t' << _commitsInWriteLock << '\t' << 0 << '\t'
+ << (unsigned)(_prepLogBufferMicros / 1000) << '\t'
+ << (unsigned)(_writeToJournalMicros / 1000) << '\t'
+ << (unsigned)(_writeToDataFilesMicros / 1000) << '\t'
+ << (unsigned)(_remapPrivateViewMicros / 1000) << '\t' << (unsigned)(_commitsMicros / 1000)
+ << '\t' << (unsigned)(_commitsInWriteLockMicros / 1000) << '\t';
- void Stats::S::reset() {
- memset(this, 0, sizeof(*this));
- _startTimeMicros = curTimeMicros64();
- }
+ return ss.str();
+}
- std::string Stats::S::_CSVHeader() const {
- return "cmts\t jrnMB\t wrDFMB\t cIWLk\t early\t prpLgB\t wrToJ\t wrToDF\t rmpPrVw";
- }
+void Stats::S::_asObj(BSONObjBuilder* builder) const {
+ BSONObjBuilder& b = *builder;
+ b << "commits" << _commits << "journaledMB" << _journaledBytes / 1000000.0
+ << "writeToDataFilesMB" << _writeToDataFilesBytes / 1000000.0 << "compression"
+ << _journaledBytes / (_uncompressedBytes + 1.0) << "commitsInWriteLock" << _commitsInWriteLock
+ << "earlyCommits" << 0 << "timeMs"
+ << BSON("dt" << _durationMillis << "prepLogBuffer" << (unsigned)(_prepLogBufferMicros / 1000)
+ << "writeToJournal" << (unsigned)(_writeToJournalMicros / 1000)
+ << "writeToDataFiles" << (unsigned)(_writeToDataFilesMicros / 1000)
+ << "remapPrivateView" << (unsigned)(_remapPrivateViewMicros / 1000) << "commits"
+ << (unsigned)(_commitsMicros / 1000) << "commitsInWriteLock"
+ << (unsigned)(_commitsInWriteLockMicros / 1000));
- std::string Stats::S::_asCSV() const {
- stringstream ss;
- ss << setprecision(2)
- << _commits << '\t'
- << _journaledBytes / 1000000.0 << '\t'
- << _writeToDataFilesBytes / 1000000.0 << '\t'
- << _commitsInWriteLock << '\t'
- << 0 << '\t'
- << (unsigned) (_prepLogBufferMicros / 1000) << '\t'
- << (unsigned) (_writeToJournalMicros / 1000) << '\t'
- << (unsigned) (_writeToDataFilesMicros / 1000) << '\t'
- << (unsigned) (_remapPrivateViewMicros / 1000) << '\t'
- << (unsigned) (_commitsMicros / 1000) << '\t'
- << (unsigned) (_commitsInWriteLockMicros / 1000) << '\t';
-
- return ss.str();
+ if (mmapv1GlobalOptions.journalCommitInterval != 0) {
+ b << "journalCommitIntervalMs" << mmapv1GlobalOptions.journalCommitInterval;
}
+}
- void Stats::S::_asObj(BSONObjBuilder* builder) const {
- BSONObjBuilder& b = *builder;
- b << "commits" << _commits
- << "journaledMB" << _journaledBytes / 1000000.0
- << "writeToDataFilesMB" << _writeToDataFilesBytes / 1000000.0
- << "compression" << _journaledBytes / (_uncompressedBytes + 1.0)
- << "commitsInWriteLock" << _commitsInWriteLock
- << "earlyCommits" << 0
- << "timeMs" << BSON("dt" << _durationMillis <<
- "prepLogBuffer" << (unsigned) (_prepLogBufferMicros / 1000) <<
- "writeToJournal" << (unsigned) (_writeToJournalMicros / 1000) <<
- "writeToDataFiles" << (unsigned) (_writeToDataFilesMicros / 1000) <<
- "remapPrivateView" << (unsigned) (_remapPrivateViewMicros / 1000) <<
- "commits" << (unsigned)(_commitsMicros / 1000) <<
- "commitsInWriteLock"
- << (unsigned)(_commitsInWriteLockMicros / 1000));
-
- if (mmapv1GlobalOptions.journalCommitInterval != 0) {
- b << "journalCommitIntervalMs" << mmapv1GlobalOptions.journalCommitInterval;
- }
- }
+//
+// DurableInterface
+//
- //
- // DurableInterface
- //
+DurableInterface::DurableInterface() {}
- DurableInterface::DurableInterface() {
+DurableInterface::~DurableInterface() {}
- }
- DurableInterface::~DurableInterface() {
+//
+// DurableImpl
+//
- }
+bool DurableImpl::commitNow(OperationContext* txn) {
+ NotifyAll::When when = commitNotify.now();
+ AutoYieldFlushLockForMMAPV1Commit flushLockYield(txn->lockState());
- //
- // DurableImpl
- //
+ // There is always just one waiting anyways
+ flushRequested.notify_one();
- bool DurableImpl::commitNow(OperationContext* txn) {
- NotifyAll::When when = commitNotify.now();
+ // commitNotify.waitFor ensures that whatever was scheduled for journaling before this
+ // call has been persisted to the journal file. This does not mean that this data has been
+ // applied to the shared view yet though, that's why we wait for applyToDataFilesNotify.
+ applyToDataFilesNotify.waitFor(when);
- AutoYieldFlushLockForMMAPV1Commit flushLockYield(txn->lockState());
+ return true;
+}
- // There is always just one waiting anyways
- flushRequested.notify_one();
+bool DurableImpl::waitUntilDurable() {
+ commitNotify.awaitBeyondNow();
+ return true;
+}
- // commitNotify.waitFor ensures that whatever was scheduled for journaling before this
- // call has been persisted to the journal file. This does not mean that this data has been
- // applied to the shared view yet though, that's why we wait for applyToDataFilesNotify.
- applyToDataFilesNotify.waitFor(when);
+void DurableImpl::createdFile(const std::string& filename, unsigned long long len) {
+ std::shared_ptr<DurOp> op(new FileCreatedOp(filename, len));
+ commitJob.noteOp(op);
+}
- return true;
- }
- bool DurableImpl::waitUntilDurable() {
- commitNotify.awaitBeyondNow();
- return true;
+void DurableImpl::declareWriteIntents(const std::vector<std::pair<void*, unsigned>>& intents) {
+ typedef std::vector<std::pair<void*, unsigned>> Intents;
+ stdx::lock_guard<SimpleMutex> lk(commitJob.groupCommitMutex);
+ for (Intents::const_iterator it(intents.begin()), end(intents.end()); it != end; ++it) {
+ commitJob.note(it->first, it->second);
}
+}
- void DurableImpl::createdFile(const std::string& filename, unsigned long long len) {
- std::shared_ptr<DurOp> op(new FileCreatedOp(filename, len));
- commitJob.noteOp(op);
+bool DurableImpl::commitIfNeeded() {
+ if (MONGO_likely(commitJob.bytes() < UncommittedBytesLimit)) {
+ return false;
}
+ // Just wake up the flush thread
+ flushRequested.notify_one();
+ return true;
+}
- void DurableImpl::declareWriteIntents(
- const std::vector<std::pair<void*, unsigned> >& intents) {
- typedef std::vector<std::pair<void*, unsigned> > Intents;
- stdx::lock_guard<SimpleMutex> lk(commitJob.groupCommitMutex);
- for (Intents::const_iterator it(intents.begin()), end(intents.end()); it != end; ++it) {
- commitJob.note(it->first, it->second);
- }
- }
-
- bool DurableImpl::commitIfNeeded() {
- if (MONGO_likely(commitJob.bytes() < UncommittedBytesLimit)) {
- return false;
- }
+void DurableImpl::syncDataAndTruncateJournal(OperationContext* txn) {
+ invariant(txn->lockState()->isW());
- // Just wake up the flush thread
- flushRequested.notify_one();
- return true;
- }
+ // Once this returns, all the outstanding journal has been applied to the data files and
+ // so it's safe to do the flushAll/journalCleanup below.
+ commitNow(txn);
- void DurableImpl::syncDataAndTruncateJournal(OperationContext* txn) {
- invariant(txn->lockState()->isW());
+ // Flush the shared view to disk.
+ MongoFile::flushAll(true);
- // Once this returns, all the outstanding journal has been applied to the data files and
- // so it's safe to do the flushAll/journalCleanup below.
- commitNow(txn);
+ // Once the shared view has been flushed, we do not need the journal files anymore.
+ journalCleanup(true);
- // Flush the shared view to disk.
- MongoFile::flushAll(true);
+ // Double check post-conditions
+ invariant(!haveJournalFiles());
+}
- // Once the shared view has been flushed, we do not need the journal files anymore.
- journalCleanup(true);
+void DurableImpl::closingFileNotification() {
+ if (commitJob.hasWritten()) {
+ severe() << "journal warning files are closing outside locks with writes pending";
- // Double check post-conditions
- invariant(!haveJournalFiles());
+ // File is closing while there are unwritten changes
+ invariant(false);
}
+}
- void DurableImpl::closingFileNotification() {
- if (commitJob.hasWritten()) {
- severe() << "journal warning files are closing outside locks with writes pending";
+void DurableImpl::commitAndStopDurThread() {
+ NotifyAll::When when = commitNotify.now();
- // File is closing while there are unwritten changes
- invariant(false);
- }
- }
+ // There is always just one waiting anyways
+ flushRequested.notify_one();
- void DurableImpl::commitAndStopDurThread() {
- NotifyAll::When when = commitNotify.now();
+ // commitNotify.waitFor ensures that whatever was scheduled for journaling before this
+ // call has been persisted to the journal file. This does not mean that this data has been
+ // applied to the shared view yet though, that's why we wait for applyToDataFilesNotify.
+ applyToDataFilesNotify.waitFor(when);
- // There is always just one waiting anyways
- flushRequested.notify_one();
+ // Flush the shared view to disk.
+ MongoFile::flushAll(true);
- // commitNotify.waitFor ensures that whatever was scheduled for journaling before this
- // call has been persisted to the journal file. This does not mean that this data has been
- // applied to the shared view yet though, that's why we wait for applyToDataFilesNotify.
- applyToDataFilesNotify.waitFor(when);
+ // Once the shared view has been flushed, we do not need the journal files anymore.
+ journalCleanup(true);
- // Flush the shared view to disk.
- MongoFile::flushAll(true);
+ // Double check post-conditions
+ invariant(!haveJournalFiles());
- // Once the shared view has been flushed, we do not need the journal files anymore.
- journalCleanup(true);
+ shutdownRequested.store(1);
- // Double check post-conditions
- invariant(!haveJournalFiles());
+ // Wait for the durability thread to terminate
+ log() << "Terminating durability thread ...";
+ _durThreadHandle.join();
+}
- shutdownRequested.store(1);
+void DurableImpl::start() {
+ // Start the durability thread
+ stdx::thread t(durThread);
+ _durThreadHandle.swap(t);
+}
- // Wait for the durability thread to terminate
- log() << "Terminating durability thread ...";
- _durThreadHandle.join();
- }
- void DurableImpl::start() {
- // Start the durability thread
- stdx::thread t(durThread);
- _durThreadHandle.swap(t);
+/**
+ * Remaps the private view from the shared view so that it does not consume too much
+ * copy-on-write/swap space. Must only be called after the in-memory journal has been flushed
+ * to disk and applied on top of the shared view.
+ *
+ * @param fraction Value between (0, 1] indicating what fraction of the memory to remap.
+ * Remapping too much or too frequently incurs copy-on-write page fault cost.
+ */
+static void remapPrivateView(double fraction) {
+ // Remapping private views must occur after WRITETODATAFILES otherwise we wouldn't see any
+ // newly written data on reads.
+ invariant(!commitJob.hasWritten());
+
+ try {
+ Timer t;
+ remapPrivateViewImpl(fraction);
+ stats.curr()->_remapPrivateViewMicros += t.micros();
+
+ LOG(4) << "remapPrivateView end";
+ return;
+ } catch (DBException& e) {
+ severe() << "dbexception in remapPrivateView causing immediate shutdown: " << e.toString();
+ } catch (std::ios_base::failure& e) {
+ severe() << "ios_base exception in remapPrivateView causing immediate shutdown: "
+ << e.what();
+ } catch (std::bad_alloc& e) {
+ severe() << "bad_alloc exception in remapPrivateView causing immediate shutdown: "
+ << e.what();
+ } catch (std::exception& e) {
+ severe() << "exception in remapPrivateView causing immediate shutdown: " << e.what();
+ } catch (...) {
+ severe() << "unknown exception in remapPrivateView causing immediate shutdown: ";
}
+ invariant(false);
+}
- /**
- * Remaps the private view from the shared view so that it does not consume too much
- * copy-on-write/swap space. Must only be called after the in-memory journal has been flushed
- * to disk and applied on top of the shared view.
- *
- * @param fraction Value between (0, 1] indicating what fraction of the memory to remap.
- * Remapping too much or too frequently incurs copy-on-write page fault cost.
- */
- static void remapPrivateView(double fraction) {
- // Remapping private views must occur after WRITETODATAFILES otherwise we wouldn't see any
- // newly written data on reads.
- invariant(!commitJob.hasWritten());
- try {
- Timer t;
- remapPrivateViewImpl(fraction);
- stats.curr()->_remapPrivateViewMicros += t.micros();
+/**
+ * The main durability thread loop. There is a single instance of this function running.
+ */
+static void durThread() {
+ Client::initThread("durability");
- LOG(4) << "remapPrivateView end";
- return;
- }
- catch (DBException& e) {
- severe() << "dbexception in remapPrivateView causing immediate shutdown: "
- << e.toString();
- }
- catch (std::ios_base::failure& e) {
- severe() << "ios_base exception in remapPrivateView causing immediate shutdown: "
- << e.what();
- }
- catch (std::bad_alloc& e) {
- severe() << "bad_alloc exception in remapPrivateView causing immediate shutdown: "
- << e.what();
- }
- catch (std::exception& e) {
- severe() << "exception in remapPrivateView causing immediate shutdown: "
- << e.what();
- }
- catch (...) {
- severe() << "unknown exception in remapPrivateView causing immediate shutdown: ";
- }
+ log() << "Durability thread started";
- invariant(false);
+ bool samePartition = true;
+ try {
+ const std::string dbpathDir = boost::filesystem::path(storageGlobalParams.dbpath).string();
+ samePartition = onSamePartition(getJournalDir().string(), dbpathDir);
+ } catch (...) {
}
+ // Spawn the journal writer thread
+ JournalWriter journalWriter(&commitNotify, &applyToDataFilesNotify, NumAsyncJournalWrites);
+ journalWriter.start();
- /**
- * The main durability thread loop. There is a single instance of this function running.
- */
- static void durThread() {
- Client::initThread("durability");
+ // Used as an estimate of how much / how fast to remap
+ uint64_t commitCounter(0);
+ uint64_t estimatedPrivateMapSize(0);
+ uint64_t remapLastTimestamp(0);
- log() << "Durability thread started";
-
- bool samePartition = true;
- try {
- const std::string dbpathDir =
- boost::filesystem::path(storageGlobalParams.dbpath).string();
- samePartition = onSamePartition(getJournalDir().string(), dbpathDir);
+ while (shutdownRequested.loadRelaxed() == 0) {
+ unsigned ms = mmapv1GlobalOptions.journalCommitInterval;
+ if (ms == 0) {
+ ms = samePartition ? 100 : 30;
}
- catch(...) {
- }
+ // +1 so it never goes down to zero
+ const unsigned oneThird = (ms / 3) + 1;
- // Spawn the journal writer thread
- JournalWriter journalWriter(&commitNotify, &applyToDataFilesNotify, NumAsyncJournalWrites);
- journalWriter.start();
+ // Reset the stats based on the reset interval
+ if (stats.curr()->getCurrentDurationMillis() > DurStatsResetIntervalMillis) {
+ stats.reset();
+ }
- // Used as an estimate of how much / how fast to remap
- uint64_t commitCounter(0);
- uint64_t estimatedPrivateMapSize(0);
- uint64_t remapLastTimestamp(0);
+ try {
+ stdx::unique_lock<stdx::mutex> lock(flushMutex);
- while (shutdownRequested.loadRelaxed() == 0) {
- unsigned ms = mmapv1GlobalOptions.journalCommitInterval;
- if (ms == 0) {
- ms = samePartition ? 100 : 30;
- }
+ for (unsigned i = 0; i <= 2; i++) {
+ if (boost::cv_status::no_timeout ==
+ flushRequested.wait_for(lock, Milliseconds(oneThird))) {
+ // Someone forced a flush
+ break;
+ }
- // +1 so it never goes down to zero
- const unsigned oneThird = (ms / 3) + 1;
+ if (commitNotify.nWaiting()) {
+ // One or more getLastError j:true is pending
+ break;
+ }
- // Reset the stats based on the reset interval
- if (stats.curr()->getCurrentDurationMillis() > DurStatsResetIntervalMillis) {
- stats.reset();
+ if (commitJob.bytes() > UncommittedBytesLimit / 2) {
+ // The number of written bytes is growing
+ break;
+ }
}
- try {
- stdx::unique_lock<stdx::mutex> lock(flushMutex);
+ // The commit logic itself
+ LOG(4) << "groupCommit begin";
- for (unsigned i = 0; i <= 2; i++) {
- if (boost::cv_status::no_timeout == flushRequested.wait_for(
- lock, Milliseconds(oneThird))) {
- // Someone forced a flush
- break;
- }
+ Timer t;
- if (commitNotify.nWaiting()) {
- // One or more getLastError j:true is pending
- break;
+ OperationContextImpl txn;
+ AutoAcquireFlushLockForMMAPV1Commit autoFlushLock(txn.lockState());
+
+ // We need to snapshot the commitNumber after the flush lock has been obtained,
+ // because at this point we know that we have a stable snapshot of the data.
+ const NotifyAll::When commitNumber(commitNotify.now());
+
+ LOG(4) << "Processing commit number " << commitNumber;
+
+ if (!commitJob.hasWritten()) {
+ // We do not need the journal lock anymore. Free it here, for the really
+ // unlikely possibility that the writeBuffer command below blocks.
+ autoFlushLock.release();
+
+ // getlasterror request could have came after the data was already committed.
+ // No need to call committingReset though, because we have not done any
+ // writes (hasWritten == false).
+ JournalWriter::Buffer* const buffer = journalWriter.newBuffer();
+ buffer->setNoop();
+
+ journalWriter.writeBuffer(buffer, commitNumber);
+ } else {
+ // This copies all the in-memory changes into the journal writer's buffer.
+ JournalWriter::Buffer* const buffer = journalWriter.newBuffer();
+ PREPLOGBUFFER(buffer->getHeader(), buffer->getBuilder());
+
+ estimatedPrivateMapSize += commitJob.bytes();
+ commitCounter++;
+
+ // Now that the write intents have been copied to the buffer, the commit job is
+ // free to be reused. We need to reset the commit job's contents while under
+ // the S flush lock, because otherwise someone might have done a write and this
+ // would wipe out their changes without ever being committed.
+ commitJob.committingReset();
+
+ double systemMemoryPressurePercentage =
+ ProcessInfo::getSystemMemoryPressurePercentage();
+
+ // Now that the in-memory modifications have been collected, we can potentially
+ // release the flush lock if remap is not necessary.
+ // When we remap due to memory pressure, we look at two criteria
+ // 1. If the amount of 4k pages touched exceeds 512 MB,
+ // a reasonable estimate of memory pressure on Linux.
+ // 2. Check if the amount of free memory on the machine is running low,
+ // since #1 is underestimates the memory pressure on Windows since
+ // commits in 64MB chunks.
+ const bool shouldRemap = (estimatedPrivateMapSize >= UncommittedBytesLimit) ||
+ (systemMemoryPressurePercentage > 0.0) ||
+ (commitCounter % NumCommitsBeforeRemap == 0) ||
+ (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalAlwaysRemap);
+
+ double remapFraction = 0.0;
+
+ if (shouldRemap) {
+ // We want to remap all private views about every 2 seconds. There could be
+ // ~1000 views so we do a little each pass. There will be copy on write
+ // faults after remapping, so doing a little bit at a time will avoid big
+ // load spikes when the pages are touched.
+ //
+ // TODO: Instead of the time-based logic above, consider using ProcessInfo
+ // and watching for getResidentSize to drop, which is more precise.
+ remapFraction = (curTimeMicros64() - remapLastTimestamp) / 2000000.0;
+
+ if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalAlwaysRemap) {
+ remapFraction = 1;
+ } else {
+ // We don't want to get close to the UncommittedBytesLimit
+ const double remapMemFraction =
+ estimatedPrivateMapSize / ((double)UncommittedBytesLimit);
+
+ remapFraction = std::max(remapMemFraction, remapFraction);
+
+ remapFraction = std::max(systemMemoryPressurePercentage, remapFraction);
}
+ } else {
+ LOG(4) << "Early release flush lock";
- if (commitJob.bytes() > UncommittedBytesLimit / 2) {
- // The number of written bytes is growing
- break;
- }
+ // We will not be doing a remap so drop the flush lock. That way we will be
+ // doing the journal I/O outside of lock, so other threads can proceed.
+ invariant(!shouldRemap);
+ autoFlushLock.release();
}
- // The commit logic itself
- LOG(4) << "groupCommit begin";
+ // Request async I/O to the journal. This may block.
+ journalWriter.writeBuffer(buffer, commitNumber);
+
+ // Data has now been written to the shared view. If remap was requested, we
+ // would still be holding the S flush lock here, so just upgrade it and
+ // perform the remap.
+ if (shouldRemap) {
+ // Need to wait for the previously scheduled journal writes to complete
+ // before any remap is attempted.
+ journalWriter.flush();
+ journalWriter.assertIdle();
+
+ // Upgrading the journal lock to flush stops all activity on the system,
+ // because we will be remapping memory and we don't want readers to be
+ // accessing it. Technically this step could be avoided on systems, which
+ // support atomic remap.
+ autoFlushLock.upgradeFlushLockToExclusive();
+ remapPrivateView(remapFraction);
- Timer t;
-
- OperationContextImpl txn;
- AutoAcquireFlushLockForMMAPV1Commit autoFlushLock(txn.lockState());
-
- // We need to snapshot the commitNumber after the flush lock has been obtained,
- // because at this point we know that we have a stable snapshot of the data.
- const NotifyAll::When commitNumber(commitNotify.now());
-
- LOG(4) << "Processing commit number " << commitNumber;
-
- if (!commitJob.hasWritten()) {
- // We do not need the journal lock anymore. Free it here, for the really
- // unlikely possibility that the writeBuffer command below blocks.
autoFlushLock.release();
- // getlasterror request could have came after the data was already committed.
- // No need to call committingReset though, because we have not done any
- // writes (hasWritten == false).
- JournalWriter::Buffer* const buffer = journalWriter.newBuffer();
- buffer->setNoop();
-
- journalWriter.writeBuffer(buffer, commitNumber);
- }
- else {
- // This copies all the in-memory changes into the journal writer's buffer.
- JournalWriter::Buffer* const buffer = journalWriter.newBuffer();
- PREPLOGBUFFER(buffer->getHeader(), buffer->getBuilder());
-
- estimatedPrivateMapSize += commitJob.bytes();
- commitCounter++;
-
- // Now that the write intents have been copied to the buffer, the commit job is
- // free to be reused. We need to reset the commit job's contents while under
- // the S flush lock, because otherwise someone might have done a write and this
- // would wipe out their changes without ever being committed.
- commitJob.committingReset();
-
- double systemMemoryPressurePercentage =
- ProcessInfo::getSystemMemoryPressurePercentage();
-
- // Now that the in-memory modifications have been collected, we can potentially
- // release the flush lock if remap is not necessary.
- // When we remap due to memory pressure, we look at two criteria
- // 1. If the amount of 4k pages touched exceeds 512 MB,
- // a reasonable estimate of memory pressure on Linux.
- // 2. Check if the amount of free memory on the machine is running low,
- // since #1 is underestimates the memory pressure on Windows since
- // commits in 64MB chunks.
- const bool shouldRemap =
- (estimatedPrivateMapSize >= UncommittedBytesLimit) ||
- (systemMemoryPressurePercentage > 0.0) ||
- (commitCounter % NumCommitsBeforeRemap == 0) ||
- (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalAlwaysRemap);
-
- double remapFraction = 0.0;
-
- if (shouldRemap) {
- // We want to remap all private views about every 2 seconds. There could be
- // ~1000 views so we do a little each pass. There will be copy on write
- // faults after remapping, so doing a little bit at a time will avoid big
- // load spikes when the pages are touched.
- //
- // TODO: Instead of the time-based logic above, consider using ProcessInfo
- // and watching for getResidentSize to drop, which is more precise.
- remapFraction = (curTimeMicros64() - remapLastTimestamp) / 2000000.0;
-
- if (mmapv1GlobalOptions.journalOptions &
- MMAPV1Options::JournalAlwaysRemap) {
- remapFraction = 1;
- }
- else {
- // We don't want to get close to the UncommittedBytesLimit
- const double remapMemFraction =
- estimatedPrivateMapSize / ((double)UncommittedBytesLimit);
-
- remapFraction = std::max(remapMemFraction, remapFraction);
-
- remapFraction = std::max(systemMemoryPressurePercentage, remapFraction);
- }
- }
- else {
- LOG(4) << "Early release flush lock";
-
- // We will not be doing a remap so drop the flush lock. That way we will be
- // doing the journal I/O outside of lock, so other threads can proceed.
- invariant(!shouldRemap);
- autoFlushLock.release();
- }
+ // Reset the private map estimate outside of the lock
+ estimatedPrivateMapSize = 0;
+ remapLastTimestamp = curTimeMicros64();
- // Request async I/O to the journal. This may block.
- journalWriter.writeBuffer(buffer, commitNumber);
-
- // Data has now been written to the shared view. If remap was requested, we
- // would still be holding the S flush lock here, so just upgrade it and
- // perform the remap.
- if (shouldRemap) {
- // Need to wait for the previously scheduled journal writes to complete
- // before any remap is attempted.
- journalWriter.flush();
- journalWriter.assertIdle();
-
- // Upgrading the journal lock to flush stops all activity on the system,
- // because we will be remapping memory and we don't want readers to be
- // accessing it. Technically this step could be avoided on systems, which
- // support atomic remap.
- autoFlushLock.upgradeFlushLockToExclusive();
- remapPrivateView(remapFraction);
-
- autoFlushLock.release();
-
- // Reset the private map estimate outside of the lock
- estimatedPrivateMapSize = 0;
- remapLastTimestamp = curTimeMicros64();
-
- stats.curr()->_commitsInWriteLock++;
- stats.curr()->_commitsInWriteLockMicros += t.micros();
- }
+ stats.curr()->_commitsInWriteLock++;
+ stats.curr()->_commitsInWriteLockMicros += t.micros();
}
-
- stats.curr()->_commits++;
- stats.curr()->_commitsMicros += t.micros();
-
- LOG(4) << "groupCommit end";
- }
- catch (DBException& e) {
- severe() << "dbexception in durThread causing immediate shutdown: "
- << e.toString();
- invariant(false);
- }
- catch (std::ios_base::failure& e) {
- severe() << "ios_base exception in durThread causing immediate shutdown: "
- << e.what();
- invariant(false);
}
- catch (std::bad_alloc& e) {
- severe() << "bad_alloc exception in durThread causing immediate shutdown: "
- << e.what();
- invariant(false);
- }
- catch (std::exception& e) {
- severe() << "exception in durThread causing immediate shutdown: "
- << e.what();
- invariant(false);
- }
- catch (...) {
- severe() << "unhandled exception in durThread causing immediate shutdown";
- invariant(false);
- }
- }
- // Stops the journal thread and ensures everything was written
- invariant(!commitJob.hasWritten());
+ stats.curr()->_commits++;
+ stats.curr()->_commitsMicros += t.micros();
- journalWriter.flush();
- journalWriter.shutdown();
-
- log() << "Durability thread stopped";
+ LOG(4) << "groupCommit end";
+ } catch (DBException& e) {
+ severe() << "dbexception in durThread causing immediate shutdown: " << e.toString();
+ invariant(false);
+ } catch (std::ios_base::failure& e) {
+ severe() << "ios_base exception in durThread causing immediate shutdown: " << e.what();
+ invariant(false);
+ } catch (std::bad_alloc& e) {
+ severe() << "bad_alloc exception in durThread causing immediate shutdown: " << e.what();
+ invariant(false);
+ } catch (std::exception& e) {
+ severe() << "exception in durThread causing immediate shutdown: " << e.what();
+ invariant(false);
+ } catch (...) {
+ severe() << "unhandled exception in durThread causing immediate shutdown";
+ invariant(false);
+ }
}
+ // Stops the journal thread and ensures everything was written
+ invariant(!commitJob.hasWritten());
- /**
- * Invoked at server startup. Recovers the database by replaying journal files and then
- * starts the durability thread.
- */
- void startup() {
- if (!storageGlobalParams.dur) {
- return;
- }
+ journalWriter.flush();
+ journalWriter.shutdown();
- journalMakeDir();
+ log() << "Durability thread stopped";
+}
- try {
- replayJournalFilesAtStartup();
- }
- catch (DBException& e) {
- severe() << "dbexception during recovery: " << e.toString();
- throw;
- }
- catch (std::exception& e) {
- severe() << "std::exception during recovery: " << e.what();
- throw;
- }
- catch (...) {
- severe() << "exception during recovery";
- throw;
- }
- preallocateFiles();
+/**
+ * Invoked at server startup. Recovers the database by replaying journal files and then
+ * starts the durability thread.
+ */
+void startup() {
+ if (!storageGlobalParams.dur) {
+ return;
+ }
- durableImpl.start();
- DurableInterface::_impl = &durableImpl;
+ journalMakeDir();
+
+ try {
+ replayJournalFilesAtStartup();
+ } catch (DBException& e) {
+ severe() << "dbexception during recovery: " << e.toString();
+ throw;
+ } catch (std::exception& e) {
+ severe() << "std::exception during recovery: " << e.what();
+ throw;
+ } catch (...) {
+ severe() << "exception during recovery";
+ throw;
}
-} // namespace dur
-} // namespace mongo
+ preallocateFiles();
+
+ durableImpl.start();
+ DurableInterface::_impl = &durableImpl;
+}
+
+} // namespace dur
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/dur.h b/src/mongo/db/storage/mmap_v1/dur.h
index 2915ece1439..7cfd46fada3 100644
--- a/src/mongo/db/storage/mmap_v1/dur.h
+++ b/src/mongo/db/storage/mmap_v1/dur.h
@@ -35,126 +35,130 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
namespace dur {
- // a smaller limit is likely better on 32 bit
- const unsigned UncommittedBytesLimit = (sizeof(void*) == 4) ? 50 * 1024 * 1024 : 512 * 1024 * 1024;
+// a smaller limit is likely better on 32 bit
+const unsigned UncommittedBytesLimit = (sizeof(void*) == 4) ? 50 * 1024 * 1024 : 512 * 1024 * 1024;
- class DurableInterface {
- MONGO_DISALLOW_COPYING(DurableInterface);
- public:
- virtual ~DurableInterface();
+class DurableInterface {
+ MONGO_DISALLOW_COPYING(DurableInterface);
- /**
- * Declare that a file has been created. Normally writes are applied only after journaling
- * for safety. But here the file is created first, and the journal will just replay the
- * creation if the create didn't happen due to a crash.
- */
- virtual void createdFile(const std::string& filename, unsigned long long len) = 0;
+public:
+ virtual ~DurableInterface();
- // Declare write intents. Use these methods to declare "i'm about to write to x and it
- // should be logged for redo."
- //
- // Failure to call declare write intents is checked in MONGO_CONFIG_DEBUG_BUILD mode by
- // using a read only mapped view (i.e., you'll segfault if the code is covered in that
- // situation). The debug check doesn't verify that your length is correct though.
- virtual void declareWriteIntents(
- const std::vector<std::pair<void*, unsigned> >& intents) = 0;
+ /**
+ * Declare that a file has been created. Normally writes are applied only after journaling
+ * for safety. But here the file is created first, and the journal will just replay the
+ * creation if the create didn't happen due to a crash.
+ */
+ virtual void createdFile(const std::string& filename, unsigned long long len) = 0;
- /** Wait for acknowledgement of the next group commit.
- @return true if --dur is on. There will be delay.
- @return false if --dur is off.
- */
- virtual bool waitUntilDurable() = 0;
+ // Declare write intents. Use these methods to declare "i'm about to write to x and it
+ // should be logged for redo."
+ //
+ // Failure to call declare write intents is checked in MONGO_CONFIG_DEBUG_BUILD mode by
+ // using a read only mapped view (i.e., you'll segfault if the code is covered in that
+ // situation). The debug check doesn't verify that your length is correct though.
+ virtual void declareWriteIntents(const std::vector<std::pair<void*, unsigned>>& intents) = 0;
- /** Commit immediately.
+ /** Wait for acknowledgement of the next group commit.
+ @return true if --dur is on. There will be delay.
+ @return false if --dur is off.
+ */
+ virtual bool waitUntilDurable() = 0;
- Generally, you do not want to do this often, as highly granular committing may affect
- performance.
+ /** Commit immediately.
- Does not return until the commit is complete.
+ Generally, you do not want to do this often, as highly granular committing may affect
+ performance.
- You must be at least read locked when you call this. Ideally, you are not write locked
- and then read operations can occur concurrently.
+ Does not return until the commit is complete.
- Do not use this. Use commitIfNeeded() instead.
+ You must be at least read locked when you call this. Ideally, you are not write locked
+ and then read operations can occur concurrently.
- @return true if --dur is on.
- @return false if --dur is off. (in which case there is action)
- */
- virtual bool commitNow(OperationContext* txn) = 0;
+ Do not use this. Use commitIfNeeded() instead.
- /** Commit if enough bytes have been modified. Current threshold is 50MB
+ @return true if --dur is on.
+ @return false if --dur is off. (in which case there is action)
+ */
+ virtual bool commitNow(OperationContext* txn) = 0;
- The idea is that long running write operations that don't yield
- (like creating an index or update with $atomic) can call this
- whenever the db is in a sane state and it will prevent commits
- from growing too large.
- @return true if commited
- */
- virtual bool commitIfNeeded() = 0;
+ /** Commit if enough bytes have been modified. Current threshold is 50MB
+ The idea is that long running write operations that don't yield
+ (like creating an index or update with $atomic) can call this
+ whenever the db is in a sane state and it will prevent commits
+ from growing too large.
+ @return true if commited
+ */
+ virtual bool commitIfNeeded() = 0;
- /**
- * Called when a DurableMappedFile is closing. Asserts that there are no unwritten changes,
- * because that would mean journal replay on recovery would try to write to non-existent
- * files and fail.
- */
- virtual void closingFileNotification() = 0;
- /**
- * Invoked at clean shutdown time. Performs one last commit/flush and terminates the
- * flush thread.
- *
- * Must be called under the global X lock.
- */
- virtual void commitAndStopDurThread() = 0;
+ /**
+ * Called when a DurableMappedFile is closing. Asserts that there are no unwritten changes,
+ * because that would mean journal replay on recovery would try to write to non-existent
+ * files and fail.
+ */
+ virtual void closingFileNotification() = 0;
- /**
- * Commits pending changes, flushes all changes to main data files, then removes the
- * journal.
- *
- * WARNING: Data *must* be in a crash-recoverable state when this is called and must
- * not be inside of a write unit of work.
- *
- * This is useful as a "barrier" to ensure that writes before this call will never go
- * through recovery and be applied to files that have had changes made after this call
- * applied.
- */
- virtual void syncDataAndTruncateJournal(OperationContext* txn) = 0;
+ /**
+ * Invoked at clean shutdown time. Performs one last commit/flush and terminates the
+ * flush thread.
+ *
+ * Must be called under the global X lock.
+ */
+ virtual void commitAndStopDurThread() = 0;
- virtual bool isDurable() const = 0;
+ /**
+ * Commits pending changes, flushes all changes to main data files, then removes the
+ * journal.
+ *
+ * WARNING: Data *must* be in a crash-recoverable state when this is called and must
+ * not be inside of a write unit of work.
+ *
+ * This is useful as a "barrier" to ensure that writes before this call will never go
+ * through recovery and be applied to files that have had changes made after this call
+ * applied.
+ */
+ virtual void syncDataAndTruncateJournal(OperationContext* txn) = 0;
- static DurableInterface& getDur() { return *_impl; }
+ virtual bool isDurable() const = 0;
- protected:
- DurableInterface();
+ static DurableInterface& getDur() {
+ return *_impl;
+ }
- private:
- friend void startup();
+protected:
+ DurableInterface();
- static DurableInterface* _impl;
- };
+private:
+ friend void startup();
+ static DurableInterface* _impl;
+};
- /**
- * Called during startup to startup the durability module.
- * Does nothing if storageGlobalParams.dur is false
- */
- void startup();
-} // namespace dur
+/**
+ * Called during startup to startup the durability module.
+ * Does nothing if storageGlobalParams.dur is false
+ */
+void startup();
+} // namespace dur
- /**
- * Provides a reference to the active durability interface.
- *
- * TODO: The only reason this is an inline function is that tests try to link it and fail if
- * the MMAP V1 engine is not included.
- */
- inline dur::DurableInterface& getDur() { return dur::DurableInterface::getDur(); }
-} // namespace mongo
+/**
+ * Provides a reference to the active durability interface.
+ *
+ * TODO: The only reason this is an inline function is that tests try to link it and fail if
+ * the MMAP V1 engine is not included.
+ */
+inline dur::DurableInterface& getDur() {
+ return dur::DurableInterface::getDur();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp b/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp
index 27e7681b17c..aff01c1c7bf 100644
--- a/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp
@@ -44,83 +44,76 @@
namespace mongo {
- using std::shared_ptr;
- using std::endl;
- using std::max;
- using std::min;
+using std::shared_ptr;
+using std::endl;
+using std::max;
+using std::min;
namespace dur {
- void WriteIntent::absorb(const WriteIntent& other) {
- dassert(overlaps(other));
+void WriteIntent::absorb(const WriteIntent& other) {
+ dassert(overlaps(other));
- void* newStart = min(start(), other.start());
- p = max(p, other.p);
- len = (char*)p - (char*)newStart;
+ void* newStart = min(start(), other.start());
+ p = max(p, other.p);
+ len = (char*)p - (char*)newStart;
- dassert(contains(other));
- }
+ dassert(contains(other));
+}
- CommitJob::CommitJob() :
- _hasWritten(false),
- _lastNotedPos(0),
- _bytes(0) {
+CommitJob::CommitJob() : _hasWritten(false), _lastNotedPos(0), _bytes(0) {}
- }
+CommitJob::~CommitJob() {}
- CommitJob::~CommitJob() {
+void CommitJob::noteOp(shared_ptr<DurOp> p) {
+ stdx::lock_guard<SimpleMutex> lk(groupCommitMutex);
+ _hasWritten = true;
+ _durOps.push_back(p);
+}
- }
-
- void CommitJob::noteOp(shared_ptr<DurOp> p) {
- stdx::lock_guard<SimpleMutex> lk(groupCommitMutex);
- _hasWritten = true;
- _durOps.push_back(p);
- }
+void CommitJob::note(void* p, int len) {
+ _hasWritten = true;
- void CommitJob::note(void* p, int len) {
- _hasWritten = true;
+ if (!_alreadyNoted.checkAndSet(p, len)) {
+ // Remember intent. We will journal it in a bit.
+ _insertWriteIntent(p, len);
- if (!_alreadyNoted.checkAndSet(p, len)) {
- // Remember intent. We will journal it in a bit.
- _insertWriteIntent(p, len);
+ // Round off to page address (4KB).
+ const size_t x = ((size_t)p) & ~0xfff;
- // Round off to page address (4KB).
- const size_t x = ((size_t)p) & ~0xfff;
+ if (x != _lastNotedPos) {
+ _lastNotedPos = x;
- if (x != _lastNotedPos) {
- _lastNotedPos = x;
+ // Add the full page amount
+ _bytes += (len + 4095) & ~0xfff;
- // Add the full page amount
- _bytes += (len + 4095) & ~0xfff;
+ if (_bytes > UncommittedBytesLimit * 3) {
+ _complains++;
- if (_bytes > UncommittedBytesLimit * 3) {
- _complains++;
+ // Throttle logging
+ if (_complains < 100 || (curTimeMillis64() - _lastComplainMs >= 60000)) {
+ _lastComplainMs = curTimeMillis64();
- // Throttle logging
- if (_complains < 100 || (curTimeMillis64() - _lastComplainMs >= 60000)) {
- _lastComplainMs = curTimeMillis64();
+ warning() << "DR102 too much data written uncommitted (" << _bytes / 1000000.0
+ << "MB)";
- warning() << "DR102 too much data written uncommitted ("
- << _bytes / 1000000.0 << "MB)";
-
- if (_complains < 10 || _complains % 10 == 0) {
- printStackTrace();
- }
+ if (_complains < 10 || _complains % 10 == 0) {
+ printStackTrace();
}
}
}
}
}
-
- void CommitJob::committingReset() {
- _hasWritten = false;
- _alreadyNoted.clear();
- _intents.clear();
- _durOps.clear();
- _bytes = 0;
- }
-
-} // namespace "dur"
-} // namespace "mongo"
+}
+
+void CommitJob::committingReset() {
+ _hasWritten = false;
+ _alreadyNoted.clear();
+ _intents.clear();
+ _durOps.clear();
+ _bytes = 0;
+}
+
+} // namespace "dur"
+} // namespace "mongo"
diff --git a/src/mongo/db/storage/mmap_v1/dur_commitjob.h b/src/mongo/db/storage/mmap_v1/dur_commitjob.h
index b2d07c3b293..8261b613c57 100644
--- a/src/mongo/db/storage/mmap_v1/dur_commitjob.h
+++ b/src/mongo/db/storage/mmap_v1/dur_commitjob.h
@@ -35,179 +35,191 @@
namespace mongo {
namespace dur {
- typedef std::vector<std::shared_ptr<DurOp> > DurOpsVector;
+typedef std::vector<std::shared_ptr<DurOp>> DurOpsVector;
- /**
- * Declaration of an intent to write to a region of a memory mapped view. We store the end
- * rather than the start pointer to make operator < faster since that is heavily used in
- * set lookup.
- */
- struct WriteIntent {
- WriteIntent() : p(0) { }
- WriteIntent(void *a, unsigned b) : p((char*)a + b), len(b) { }
+/**
+ * Declaration of an intent to write to a region of a memory mapped view. We store the end
+ * rather than the start pointer to make operator < faster since that is heavily used in
+ * set lookup.
+ */
+struct WriteIntent {
+ WriteIntent() : p(0) {}
+ WriteIntent(void* a, unsigned b) : p((char*)a + b), len(b) {}
+
+ void* start() const {
+ return (char*)p - len;
+ }
+ void* end() const {
+ return p;
+ }
+ unsigned length() const {
+ return len;
+ }
+ bool operator<(const WriteIntent& rhs) const {
+ return end() < rhs.end();
+ }
+
+ bool overlaps(const WriteIntent& rhs) const {
+ return (start() <= rhs.end() && end() >= rhs.start());
+ }
+
+ bool contains(const WriteIntent& rhs) const {
+ return (start() <= rhs.start() && end() >= rhs.end());
+ }
+
+ // merge into me:
+ void absorb(const WriteIntent& other);
+
+ friend std::ostream& operator<<(std::ostream& out, const WriteIntent& wi) {
+ return (out << "p: " << wi.p << " end: " << wi.end() << " len: " << wi.len);
+ }
+
+private:
+ void* p; // intent to write up to p
+ unsigned len; // up to this len
+};
+
+typedef std::vector<WriteIntent> WriteIntentsVector;
- void* start() const { return (char*)p - len; }
- void* end() const { return p; }
- unsigned length() const { return len; }
- bool operator < (const WriteIntent& rhs) const { return end() < rhs.end(); }
- bool overlaps(const WriteIntent& rhs) const {
- return (start() <= rhs.end() && end() >= rhs.start());
- }
+/**
+ * Bitmap to remember things we have already marked for journaling. False negatives are ok
+ * if infrequent, since they impact performance.
+ */
+template <int Prime>
+class Already {
+ MONGO_DISALLOW_COPYING(Already);
+
+public:
+ Already() {
+ clear();
+ }
+
+ void clear() {
+ memset(this, 0, sizeof(*this));
+ }
- bool contains(const WriteIntent& rhs) const {
- return (start() <= rhs.start() && end() >= rhs.end());
+ /**
+ * Checks if we have Already recorded/indicated our write intent for this region of
+ * memory and automatically upgrades the length if the length was shorter previously.
+ *
+ * @return true if already indicated.
+ */
+ bool checkAndSet(void* p, int len) {
+ const unsigned x = hashPointer(p);
+ std::pair<void*, int>& nd = nodes[x % Prime];
+
+ if (nd.first == p) {
+ if (nd.second < len) {
+ nd.second = len;
+ return false; // haven't indicated this len yet
+ }
+ return true; // already indicated
}
- // merge into me:
- void absorb(const WriteIntent& other);
-
- friend std::ostream& operator << (std::ostream& out, const WriteIntent& wi) {
- return (out << "p: " << wi.p << " end: " << wi.end() << " len: " << wi.len);
+ nd.first = p;
+ nd.second = len;
+ return false; // a new set
+ }
+
+private:
+ static unsigned hashPointer(void* v) {
+ unsigned x = 0;
+ unsigned char* p = (unsigned char*)&v;
+ for (unsigned i = 0; i < sizeof(void*); i++) {
+ x = x * 131 + p[i];
}
+ return x;
+ }
+
+ std::pair<void*, int> nodes[Prime];
+};
- private:
- void *p; // intent to write up to p
- unsigned len; // up to this len
- };
- typedef std::vector<WriteIntent> WriteIntentsVector;
+/**
+ * Tracks all write operations on the private view so they can be journaled.
+ */
+class CommitJob {
+ MONGO_DISALLOW_COPYING(CommitJob);
+public:
+ CommitJob();
+ ~CommitJob();
/**
- * Bitmap to remember things we have already marked for journaling. False negatives are ok
- * if infrequent, since they impact performance.
+ * Note an operation other than a "basic write".
*/
- template<int Prime>
- class Already {
- MONGO_DISALLOW_COPYING(Already);
- public:
- Already() {
- clear();
- }
-
- void clear() {
- memset(this, 0, sizeof(*this));
- }
-
- /**
- * Checks if we have Already recorded/indicated our write intent for this region of
- * memory and automatically upgrades the length if the length was shorter previously.
- *
- * @return true if already indicated.
- */
- bool checkAndSet(void* p, int len) {
- const unsigned x = hashPointer(p);
- std::pair<void*, int>& nd = nodes[x % Prime];
-
- if (nd.first == p) {
- if (nd.second < len) {
- nd.second = len;
- return false; // haven't indicated this len yet
- }
- return true; // already indicated
- }
-
- nd.first = p;
- nd.second = len;
- return false; // a new set
- }
+ void noteOp(std::shared_ptr<DurOp> p);
- private:
-
- static unsigned hashPointer(void *v) {
- unsigned x = 0;
- unsigned char *p = (unsigned char *)&v;
- for (unsigned i = 0; i < sizeof(void*); i++) {
- x = x * 131 + p[i];
- }
- return x;
- }
+ /**
+ * Record/note an intent to write.
+ *
+ * NOTE: Not thread safe. Requires the mutex to be locked.
+ */
+ void note(void* p, int len);
- std::pair<void*, int> nodes[Prime];
- };
+ /**
+ * When this value is false we don't have to do any group commit.
+ */
+ bool hasWritten() const {
+ return _hasWritten;
+ }
+ /**
+ * We use the commitjob object over and over, calling committingReset() rather than
+ * reconstructing.
+ */
+ void committingReset();
/**
- * Tracks all write operations on the private view so they can be journaled.
+ * We check how much written and if it is getting to be a lot, we commit sooner.
*/
- class CommitJob {
- MONGO_DISALLOW_COPYING(CommitJob);
- public:
- CommitJob();
- ~CommitJob();
-
- /**
- * Note an operation other than a "basic write".
- */
- void noteOp(std::shared_ptr<DurOp> p);
-
- /**
- * Record/note an intent to write.
- *
- * NOTE: Not thread safe. Requires the mutex to be locked.
- */
- void note(void* p, int len);
-
- /**
- * When this value is false we don't have to do any group commit.
- */
- bool hasWritten() const { return _hasWritten; }
-
- /**
- * We use the commitjob object over and over, calling committingReset() rather than
- * reconstructing.
- */
- void committingReset();
-
- /**
- * We check how much written and if it is getting to be a lot, we commit sooner.
- */
- size_t bytes() const { return _bytes; }
-
- /**
- * Sorts the internal list of write intents so that overlapping and duplicate items can be
- * merged. We do the sort here so the caller receives something they must keep const from
- * their POV.
- */
- const WriteIntentsVector& getIntentsSorted() {
- sort(_intents.begin(), _intents.end());
- return _intents;
- }
+ size_t bytes() const {
+ return _bytes;
+ }
- const DurOpsVector& ops() const {
- return _durOps;
- }
+ /**
+ * Sorts the internal list of write intents so that overlapping and duplicate items can be
+ * merged. We do the sort here so the caller receives something they must keep const from
+ * their POV.
+ */
+ const WriteIntentsVector& getIntentsSorted() {
+ sort(_intents.begin(), _intents.end());
+ return _intents;
+ }
- SimpleMutex groupCommitMutex;
+ const DurOpsVector& ops() const {
+ return _durOps;
+ }
- private:
+ SimpleMutex groupCommitMutex;
- void _insertWriteIntent(void* p, int len) {
- _intents.push_back(WriteIntent(p, len));
- wassert(_intents.size() < 2000000);
- }
+private:
+ void _insertWriteIntent(void* p, int len) {
+ _intents.push_back(WriteIntent(p, len));
+ wassert(_intents.size() < 2000000);
+ }
- // Whether we put write intents or durops
- bool _hasWritten;
+ // Whether we put write intents or durops
+ bool _hasWritten;
- // Write intents along with a bitmask for whether we have already noted them
- Already<127> _alreadyNoted;
- WriteIntentsVector _intents;
+ // Write intents along with a bitmask for whether we have already noted them
+ Already<127> _alreadyNoted;
+ WriteIntentsVector _intents;
- // All the ops other than basic writes
- DurOpsVector _durOps;
+ // All the ops other than basic writes
+ DurOpsVector _durOps;
- // Used to count the private map used bytes. Note that _lastNotedPos doesn't reset with
- // each commit, but that is ok we aren't being that precise.
- size_t _lastNotedPos;
- size_t _bytes;
+ // Used to count the private map used bytes. Note that _lastNotedPos doesn't reset with
+ // each commit, but that is ok we aren't being that precise.
+ size_t _lastNotedPos;
+ size_t _bytes;
- // Warning logging for large commits
- uint64_t _lastComplainMs;
- unsigned _complains;
- };
+ // Warning logging for large commits
+ uint64_t _lastComplainMs;
+ unsigned _complains;
+};
-} // namespace "dur"
-} // namespace "mongo"
+} // namespace "dur"
+} // namespace "mongo"
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal.cpp b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
index a76ade46128..66c88e3e156 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
@@ -58,7 +58,7 @@
#include "mongo/util/hex.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
-#include "mongo/util/net/listen.h" // getelapsedtimemillis
+#include "mongo/util/net/listen.h" // getelapsedtimemillis
#include "mongo/util/progress_meter.h"
#include "mongo/util/timer.h"
@@ -66,732 +66,727 @@ using namespace mongoutils;
namespace mongo {
- using std::endl;
- using std::hex;
- using std::string;
+using std::endl;
+using std::hex;
+using std::string;
- class AlignedBuilder;
+class AlignedBuilder;
- namespace dur {
- // Rotate after reaching this data size in a journal (j._<n>) file
- // We use a smaller size for 32 bit as the journal is mmapped during recovery (only)
- // Note if you take a set of datafiles, including journal files, from 32->64 or vice-versa, it must
- // work. (and should as-is)
- // --smallfiles makes the limit small.
+namespace dur {
+// Rotate after reaching this data size in a journal (j._<n>) file
+// We use a smaller size for 32 bit as the journal is mmapped during recovery (only)
+// Note if you take a set of datafiles, including journal files, from 32->64 or vice-versa, it must
+// work. (and should as-is)
+// --smallfiles makes the limit small.
#if defined(MONGO_CONFIG_DEBUG_BUILD)
- unsigned long long DataLimitPerJournalFile = 128 * 1024 * 1024;
+unsigned long long DataLimitPerJournalFile = 128 * 1024 * 1024;
#elif defined(__APPLE__)
- // assuming a developer box if OS X
- unsigned long long DataLimitPerJournalFile = 256 * 1024 * 1024;
+// assuming a developer box if OS X
+unsigned long long DataLimitPerJournalFile = 256 * 1024 * 1024;
#else
- unsigned long long DataLimitPerJournalFile = (sizeof(void*)==4) ? 256 * 1024 * 1024 : 1 * 1024 * 1024 * 1024;
+unsigned long long DataLimitPerJournalFile =
+ (sizeof(void*) == 4) ? 256 * 1024 * 1024 : 1 * 1024 * 1024 * 1024;
#endif
- MONGO_INITIALIZER(InitializeJournalingParams)(InitializerContext* context) {
- if (mmapv1GlobalOptions.smallfiles == true) {
- verify(dur::DataLimitPerJournalFile >= 128 * 1024 * 1024);
- dur::DataLimitPerJournalFile = 128 * 1024 * 1024;
- }
- return Status::OK();
- }
+MONGO_INITIALIZER(InitializeJournalingParams)(InitializerContext* context) {
+ if (mmapv1GlobalOptions.smallfiles == true) {
+ verify(dur::DataLimitPerJournalFile >= 128 * 1024 * 1024);
+ dur::DataLimitPerJournalFile = 128 * 1024 * 1024;
+ }
+ return Status::OK();
+}
- BOOST_STATIC_ASSERT( sizeof(Checksum) == 16 );
- BOOST_STATIC_ASSERT( sizeof(JHeader) == 8192 );
- BOOST_STATIC_ASSERT( sizeof(JSectHeader) == 20 );
- BOOST_STATIC_ASSERT( sizeof(JSectFooter) == 32 );
- BOOST_STATIC_ASSERT( sizeof(JEntry) == 12 );
- BOOST_STATIC_ASSERT( sizeof(LSNFile) == 88 );
+BOOST_STATIC_ASSERT(sizeof(Checksum) == 16);
+BOOST_STATIC_ASSERT(sizeof(JHeader) == 8192);
+BOOST_STATIC_ASSERT(sizeof(JSectHeader) == 20);
+BOOST_STATIC_ASSERT(sizeof(JSectFooter) == 32);
+BOOST_STATIC_ASSERT(sizeof(JEntry) == 12);
+BOOST_STATIC_ASSERT(sizeof(LSNFile) == 88);
- bool usingPreallocate = false;
+bool usingPreallocate = false;
- void removeOldJournalFile(boost::filesystem::path p);
+void removeOldJournalFile(boost::filesystem::path p);
- boost::filesystem::path getJournalDir() {
- boost::filesystem::path p(storageGlobalParams.dbpath);
- p /= "journal";
- return p;
- }
+boost::filesystem::path getJournalDir() {
+ boost::filesystem::path p(storageGlobalParams.dbpath);
+ p /= "journal";
+ return p;
+}
- boost::filesystem::path lsnPath() {
- return getJournalDir()/"lsn";
- }
+boost::filesystem::path lsnPath() {
+ return getJournalDir() / "lsn";
+}
- /** this should be called when something really bad happens so that we can flag appropriately
- */
- void journalingFailure(const char *msg) {
- /** todo:
- (1) don't log too much
- (2) make an indicator in the journal dir that something bad happened.
- (2b) refuse to do a recovery startup if that is there without manual override.
- */
- log() << "journaling failure/error: " << msg << endl;
- verify(false);
- }
+/** this should be called when something really bad happens so that we can flag appropriately
+*/
+void journalingFailure(const char* msg) {
+ /** todo:
+ (1) don't log too much
+ (2) make an indicator in the journal dir that something bad happened.
+ (2b) refuse to do a recovery startup if that is there without manual override.
+ */
+ log() << "journaling failure/error: " << msg << endl;
+ verify(false);
+}
- JSectFooter::JSectFooter() {
- memset(this, 0, sizeof(*this));
- sentinel = JEntry::OpCode_Footer;
- }
+JSectFooter::JSectFooter() {
+ memset(this, 0, sizeof(*this));
+ sentinel = JEntry::OpCode_Footer;
+}
- JSectFooter::JSectFooter(const void* begin, int len) { // needs buffer to compute hash
- sentinel = JEntry::OpCode_Footer;
- reserved = 0;
- magic[0] = magic[1] = magic[2] = magic[3] = '\n';
+JSectFooter::JSectFooter(const void* begin, int len) { // needs buffer to compute hash
+ sentinel = JEntry::OpCode_Footer;
+ reserved = 0;
+ magic[0] = magic[1] = magic[2] = magic[3] = '\n';
- Checksum c;
- c.gen(begin, (unsigned) len);
- memcpy(hash, c.bytes, sizeof(hash));
- }
+ Checksum c;
+ c.gen(begin, (unsigned)len);
+ memcpy(hash, c.bytes, sizeof(hash));
+}
- bool JSectFooter::checkHash(const void* begin, int len) const {
- if( !magicOk() ) {
- log() << "journal footer not valid" << endl;
- return false;
- }
- Checksum c;
- c.gen(begin, len);
- DEV log() << "checkHash len:" << len << " hash:" << toHex(hash, 16) << " current:" << toHex(c.bytes, 16) << endl;
- if( memcmp(hash, c.bytes, sizeof(hash)) == 0 )
- return true;
- log() << "journal checkHash mismatch, got: " << toHex(c.bytes, 16) << " expected: " << toHex(hash,16) << endl;
- return false;
- }
+bool JSectFooter::checkHash(const void* begin, int len) const {
+ if (!magicOk()) {
+ log() << "journal footer not valid" << endl;
+ return false;
+ }
+ Checksum c;
+ c.gen(begin, len);
+ DEV log() << "checkHash len:" << len << " hash:" << toHex(hash, 16)
+ << " current:" << toHex(c.bytes, 16) << endl;
+ if (memcmp(hash, c.bytes, sizeof(hash)) == 0)
+ return true;
+ log() << "journal checkHash mismatch, got: " << toHex(c.bytes, 16)
+ << " expected: " << toHex(hash, 16) << endl;
+ return false;
+}
- namespace {
- SecureRandom* mySecureRandom = NULL;
- stdx::mutex mySecureRandomMutex;
- int64_t getMySecureRandomNumber() {
- stdx::lock_guard<stdx::mutex> lk( mySecureRandomMutex );
- if ( ! mySecureRandom )
- mySecureRandom = SecureRandom::create();
- return mySecureRandom->nextInt64();
- }
- }
+namespace {
+SecureRandom* mySecureRandom = NULL;
+stdx::mutex mySecureRandomMutex;
+int64_t getMySecureRandomNumber() {
+ stdx::lock_guard<stdx::mutex> lk(mySecureRandomMutex);
+ if (!mySecureRandom)
+ mySecureRandom = SecureRandom::create();
+ return mySecureRandom->nextInt64();
+}
+}
- JHeader::JHeader(string fname) {
- magic[0] = 'j'; magic[1] = '\n';
- _version = CurrentVersion;
- memset(ts, 0, sizeof(ts));
- time_t t = time(0);
- strncpy(ts, time_t_to_String_short(t).c_str(), sizeof(ts)-1);
- memset(dbpath, 0, sizeof(dbpath));
- strncpy(dbpath, fname.c_str(), sizeof(dbpath)-1);
- {
- fileId = t&0xffffffff;
- fileId |= static_cast<unsigned long long>( getMySecureRandomNumber() ) << 32;
- }
- memset(reserved3, 0, sizeof(reserved3));
- txt2[0] = txt2[1] = '\n';
- n1 = n2 = n3 = n4 = '\n';
- }
+JHeader::JHeader(string fname) {
+ magic[0] = 'j';
+ magic[1] = '\n';
+ _version = CurrentVersion;
+ memset(ts, 0, sizeof(ts));
+ time_t t = time(0);
+ strncpy(ts, time_t_to_String_short(t).c_str(), sizeof(ts) - 1);
+ memset(dbpath, 0, sizeof(dbpath));
+ strncpy(dbpath, fname.c_str(), sizeof(dbpath) - 1);
+ {
+ fileId = t & 0xffffffff;
+ fileId |= static_cast<unsigned long long>(getMySecureRandomNumber()) << 32;
+ }
+ memset(reserved3, 0, sizeof(reserved3));
+ txt2[0] = txt2[1] = '\n';
+ n1 = n2 = n3 = n4 = '\n';
+}
- Journal j;
+Journal j;
- const unsigned long long LsnShutdownSentinel = ~((unsigned long long)0);
+const unsigned long long LsnShutdownSentinel = ~((unsigned long long)0);
- Journal::Journal() {
- _written = 0;
- _nextFileNumber = 0;
- _curLogFile = 0;
- _curFileId = 0;
- _preFlushTime = 0;
- _lastFlushTime = 0;
- _writeToLSNNeeded = false;
- }
+Journal::Journal() {
+ _written = 0;
+ _nextFileNumber = 0;
+ _curLogFile = 0;
+ _curFileId = 0;
+ _preFlushTime = 0;
+ _lastFlushTime = 0;
+ _writeToLSNNeeded = false;
+}
- boost::filesystem::path Journal::getFilePathFor(int filenumber) const {
- boost::filesystem::path p(dir);
- p /= string(str::stream() << "j._" << filenumber);
- return p;
- }
+boost::filesystem::path Journal::getFilePathFor(int filenumber) const {
+ boost::filesystem::path p(dir);
+ p /= string(str::stream() << "j._" << filenumber);
+ return p;
+}
- /** never throws
- @param anyFiles by default we only look at j._* files. If anyFiles is true, return true
- if there are any files in the journal directory. acquirePathLock() uses this to
- make sure that the journal directory is mounted.
- @return true if journal dir is not empty
- */
- bool haveJournalFiles(bool anyFiles) {
- try {
- boost::filesystem::path jdir = getJournalDir();
- if ( !boost::filesystem::exists( jdir ) )
- return false;
-
- for ( boost::filesystem::directory_iterator i( jdir );
- i != boost::filesystem::directory_iterator();
- ++i ) {
- string fileName = boost::filesystem::path(*i).leaf().string();
- if( anyFiles || str::startsWith(fileName, "j._") )
- return true;
- }
- }
- catch(const std::exception& e) {
- log() << "Unable to check for journal files due to: " << e.what() << endl;
- }
+/** never throws
+ @param anyFiles by default we only look at j._* files. If anyFiles is true, return true
+ if there are any files in the journal directory. acquirePathLock() uses this to
+ make sure that the journal directory is mounted.
+ @return true if journal dir is not empty
+*/
+bool haveJournalFiles(bool anyFiles) {
+ try {
+ boost::filesystem::path jdir = getJournalDir();
+ if (!boost::filesystem::exists(jdir))
return false;
+
+ for (boost::filesystem::directory_iterator i(jdir);
+ i != boost::filesystem::directory_iterator();
+ ++i) {
+ string fileName = boost::filesystem::path(*i).leaf().string();
+ if (anyFiles || str::startsWith(fileName, "j._"))
+ return true;
}
+ } catch (const std::exception& e) {
+ log() << "Unable to check for journal files due to: " << e.what() << endl;
+ }
+ return false;
+}
- /** throws */
- void removeJournalFiles() {
- log() << "removeJournalFiles" << endl;
- try {
- for ( boost::filesystem::directory_iterator i( getJournalDir() );
- i != boost::filesystem::directory_iterator();
- ++i ) {
- string fileName = boost::filesystem::path(*i).leaf().string();
- if( str::startsWith(fileName, "j._") ) {
- try {
- removeOldJournalFile(*i);
- }
- catch(std::exception& e) {
- log() << "couldn't remove " << fileName << ' ' << e.what() << endl;
- throw;
- }
- }
- }
+/** throws */
+void removeJournalFiles() {
+ log() << "removeJournalFiles" << endl;
+ try {
+ for (boost::filesystem::directory_iterator i(getJournalDir());
+ i != boost::filesystem::directory_iterator();
+ ++i) {
+ string fileName = boost::filesystem::path(*i).leaf().string();
+ if (str::startsWith(fileName, "j._")) {
try {
- boost::filesystem::remove(lsnPath());
- }
- catch(...) {
- // std::exception details logged in catch below
- log() << "couldn't remove " << lsnPath().string() << endl;
+ removeOldJournalFile(*i);
+ } catch (std::exception& e) {
+ log() << "couldn't remove " << fileName << ' ' << e.what() << endl;
throw;
}
}
- catch( std::exception& e ) {
- log() << "error removing journal files " << e.what() << endl;
- throw;
- }
- verify(!haveJournalFiles());
-
- flushMyDirectory(getJournalDir() / "file"); // flushes parent of argument (in this case journal dir)
-
- LOG(1) << "removeJournalFiles end" << endl;
}
+ try {
+ boost::filesystem::remove(lsnPath());
+ } catch (...) {
+ // std::exception details logged in catch below
+ log() << "couldn't remove " << lsnPath().string() << endl;
+ throw;
+ }
+ } catch (std::exception& e) {
+ log() << "error removing journal files " << e.what() << endl;
+ throw;
+ }
+ verify(!haveJournalFiles());
- /** at clean shutdown */
- bool okToCleanUp = false; // successful recovery would set this to true
- void Journal::cleanup(bool _log) {
- if( !okToCleanUp )
- return;
+ flushMyDirectory(getJournalDir() /
+ "file"); // flushes parent of argument (in this case journal dir)
- if( _log )
- log() << "journalCleanup..." << endl;
- try {
- stdx::lock_guard<SimpleMutex> lk(_curLogFileMutex);
- closeCurrentJournalFile();
- removeJournalFiles();
- }
- catch(std::exception& e) {
- log() << "error couldn't remove journal file during shutdown " << e.what() << endl;
- throw;
- }
- }
- void journalCleanup(bool log) { j.cleanup(log); }
+ LOG(1) << "removeJournalFiles end" << endl;
+}
- bool _preallocateIsFaster() {
- bool faster = false;
- boost::filesystem::path p = getJournalDir() / "tempLatencyTest";
- if (boost::filesystem::exists(p)) {
- try {
- remove(p);
- }
- catch(const std::exception& e) {
- log() << "Unable to remove temporary file due to: " << e.what() << endl;
- }
- }
- try {
- AlignedBuilder b(8192);
- int millis[2];
- const int N = 50;
- for( int pass = 0; pass < 2; pass++ ) {
- LogFile f(p.string());
- Timer t;
- for( int i = 0 ; i < N; i++ ) {
- f.synchronousAppend(b.buf(), 8192);
- }
- millis[pass] = t.millis();
- // second time through, file exists and is prealloc case
- }
- int diff = millis[0] - millis[1];
- if( diff > 2 * N ) {
- // at least 2ms faster for prealloc case?
- faster = true;
- log() << "preallocateIsFaster=true " << diff / (1.0*N) << endl;
- }
- }
- catch (const std::exception& e) {
- log() << "info preallocateIsFaster couldn't run due to: " << e.what()
- << "; returning false" << endl;
- }
- if (boost::filesystem::exists(p)) {
- try {
- remove(p);
- }
- catch(const std::exception& e) {
- log() << "Unable to remove temporary file due to: " << e.what() << endl;
- }
- }
- return faster;
+/** at clean shutdown */
+bool okToCleanUp = false; // successful recovery would set this to true
+void Journal::cleanup(bool _log) {
+ if (!okToCleanUp)
+ return;
+
+ if (_log)
+ log() << "journalCleanup..." << endl;
+ try {
+ stdx::lock_guard<SimpleMutex> lk(_curLogFileMutex);
+ closeCurrentJournalFile();
+ removeJournalFiles();
+ } catch (std::exception& e) {
+ log() << "error couldn't remove journal file during shutdown " << e.what() << endl;
+ throw;
+ }
+}
+void journalCleanup(bool log) {
+ j.cleanup(log);
+}
+
+bool _preallocateIsFaster() {
+ bool faster = false;
+ boost::filesystem::path p = getJournalDir() / "tempLatencyTest";
+ if (boost::filesystem::exists(p)) {
+ try {
+ remove(p);
+ } catch (const std::exception& e) {
+ log() << "Unable to remove temporary file due to: " << e.what() << endl;
}
- bool preallocateIsFaster() {
+ }
+ try {
+ AlignedBuilder b(8192);
+ int millis[2];
+ const int N = 50;
+ for (int pass = 0; pass < 2; pass++) {
+ LogFile f(p.string());
Timer t;
- bool res = false;
- if( _preallocateIsFaster() && _preallocateIsFaster() ) {
- // maybe system is just super busy at the moment? sleep a second to let it calm down.
- // deciding to to prealloc is a medium big decision:
- sleepsecs(1);
- res = _preallocateIsFaster();
+ for (int i = 0; i < N; i++) {
+ f.synchronousAppend(b.buf(), 8192);
}
- if( t.millis() > 3000 )
- log() << "preallocateIsFaster check took " << t.millis()/1000.0 << " secs" << endl;
- return res;
+ millis[pass] = t.millis();
+ // second time through, file exists and is prealloc case
}
-
- // throws
- void preallocateFile(boost::filesystem::path p, unsigned long long len) {
- if( exists(p) )
- return;
-
- log() << "preallocating a journal file " << p.string() << endl;
-
- const unsigned BLKSZ = 1024 * 1024;
- verify( len % BLKSZ == 0 );
-
- AlignedBuilder b(BLKSZ);
- memset((void*)b.buf(), 0, BLKSZ);
-
- ProgressMeter m(len, 3/*secs*/, 10/*hits between time check (once every 6.4MB)*/);
- m.setName("File Preallocator Progress");
-
- File f;
- f.open( p.string().c_str() , /*read-only*/false , /*direct-io*/false );
- verify( f.is_open() );
- fileofs loc = 0;
- while ( loc < len ) {
- f.write( loc , b.buf() , BLKSZ );
- loc += BLKSZ;
- m.hit(BLKSZ);
- }
- verify( loc == len );
- f.fsync();
+ int diff = millis[0] - millis[1];
+ if (diff > 2 * N) {
+ // at least 2ms faster for prealloc case?
+ faster = true;
+ log() << "preallocateIsFaster=true " << diff / (1.0 * N) << endl;
}
-
- const int NUM_PREALLOC_FILES = 3;
- inline boost::filesystem::path preallocPath(int n) {
- verify(n >= 0);
- verify(n < NUM_PREALLOC_FILES);
- string fn = str::stream() << "prealloc." << n;
- return getJournalDir() / fn;
+ } catch (const std::exception& e) {
+ log() << "info preallocateIsFaster couldn't run due to: " << e.what() << "; returning false"
+ << endl;
+ }
+ if (boost::filesystem::exists(p)) {
+ try {
+ remove(p);
+ } catch (const std::exception& e) {
+ log() << "Unable to remove temporary file due to: " << e.what() << endl;
}
+ }
+ return faster;
+}
+bool preallocateIsFaster() {
+ Timer t;
+ bool res = false;
+ if (_preallocateIsFaster() && _preallocateIsFaster()) {
+ // maybe system is just super busy at the moment? sleep a second to let it calm down.
+ // deciding to to prealloc is a medium big decision:
+ sleepsecs(1);
+ res = _preallocateIsFaster();
+ }
+ if (t.millis() > 3000)
+ log() << "preallocateIsFaster check took " << t.millis() / 1000.0 << " secs" << endl;
+ return res;
+}
- // throws
- void _preallocateFiles() {
- for( int i = 0; i < NUM_PREALLOC_FILES; i++ ) {
- boost::filesystem::path filepath = preallocPath(i);
+// throws
+void preallocateFile(boost::filesystem::path p, unsigned long long len) {
+ if (exists(p))
+ return;
- unsigned long long limit = DataLimitPerJournalFile;
- if( kDebugBuild && i == 1 ) {
- // moving 32->64, the prealloc files would be short. that is "ok", but we
- // want to exercise that case, so we force exercising here when
- // MONGO_CONFIG_DEBUG_BUILD is set by arbitrarily stopping prealloc at a
- // low limit for a file. also we want to be able to change in the future
- // the constant without a lot of work anyway.
- limit = 16 * 1024 * 1024;
- }
- preallocateFile(filepath, limit);
- }
- }
+ log() << "preallocating a journal file " << p.string() << endl;
- void checkFreeSpace() {
- unsigned long long spaceNeeded = static_cast<unsigned long long>(3 * DataLimitPerJournalFile * 1.1); // add 10% for headroom
- unsigned long long freeSpace = File::freeSpace(getJournalDir().string());
- unsigned long long prealloced = 0;
- for( int i = 0; i < NUM_PREALLOC_FILES; i++ ) {
- boost::filesystem::path filepath = preallocPath(i);
- if (exists(filepath))
- prealloced += file_size(filepath);
- }
+ const unsigned BLKSZ = 1024 * 1024;
+ verify(len % BLKSZ == 0);
- if (freeSpace + prealloced < spaceNeeded) {
- log() << endl;
- error() << "Insufficient free space for journal files" << endl;
- log() << "Please make at least " << spaceNeeded/(1024*1024) << "MB available in " << getJournalDir().string() << " or use --smallfiles" << endl;
- log() << endl;
- throw UserException(15926, "Insufficient free space for journals");
- }
- }
+ AlignedBuilder b(BLKSZ);
+ memset((void*)b.buf(), 0, BLKSZ);
- void preallocateFiles() {
- if (!(mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalNoCheckSpace))
- checkFreeSpace();
+ ProgressMeter m(len, 3 /*secs*/, 10 /*hits between time check (once every 6.4MB)*/);
+ m.setName("File Preallocator Progress");
- if( exists(preallocPath(0)) || // if enabled previously, keep using
- exists(preallocPath(1)) ||
- (mmapv1GlobalOptions.preallocj && preallocateIsFaster()) ) {
- usingPreallocate = true;
- try {
- _preallocateFiles();
- }
- catch (const std::exception& e) {
- log() << "warning caught exception (" << e.what()
- << ") in preallocateFiles, continuing" << endl;
- }
- }
- j.open();
- }
+ File f;
+ f.open(p.string().c_str(), /*read-only*/ false, /*direct-io*/ false);
+ verify(f.is_open());
+ fileofs loc = 0;
+ while (loc < len) {
+ f.write(loc, b.buf(), BLKSZ);
+ loc += BLKSZ;
+ m.hit(BLKSZ);
+ }
+ verify(loc == len);
+ f.fsync();
+}
- void removeOldJournalFile(boost::filesystem::path p) {
- if( usingPreallocate ) {
- try {
- for( int i = 0; i < NUM_PREALLOC_FILES; i++ ) {
- boost::filesystem::path filepath = preallocPath(i);
- if( !boost::filesystem::exists(filepath) ) {
- // we can recycle this file into this prealloc file location
- boost::filesystem::path temppath = filepath.string() + ".temp";
- boost::filesystem::rename(p, temppath);
- {
- // zero the header
- File f;
- f.open(temppath.string().c_str(), false, false);
- char buf[8192];
- memset(buf, 0, 8192);
- f.write(0, buf, 8192);
- f.truncate(DataLimitPerJournalFile);
- f.fsync();
- }
- boost::filesystem::rename(temppath, filepath);
- return;
- }
- }
- } catch (const std::exception& e) {
- log() << "warning exception in dur::removeOldJournalFile " << p.string()
- << ": " << e.what() << endl;
- // fall through and try to delete the file
- }
- }
+const int NUM_PREALLOC_FILES = 3;
+inline boost::filesystem::path preallocPath(int n) {
+ verify(n >= 0);
+ verify(n < NUM_PREALLOC_FILES);
+ string fn = str::stream() << "prealloc." << n;
+ return getJournalDir() / fn;
+}
- // already have 3 prealloc files, so delete this file
- try {
- boost::filesystem::remove(p);
- }
- catch (const std::exception& e) {
- log() << "warning exception removing " << p.string() << ": " << e.what() << endl;
- }
- }
+// throws
+void _preallocateFiles() {
+ for (int i = 0; i < NUM_PREALLOC_FILES; i++) {
+ boost::filesystem::path filepath = preallocPath(i);
+
+ unsigned long long limit = DataLimitPerJournalFile;
+ if (kDebugBuild && i == 1) {
+ // moving 32->64, the prealloc files would be short. that is "ok", but we
+ // want to exercise that case, so we force exercising here when
+ // MONGO_CONFIG_DEBUG_BUILD is set by arbitrarily stopping prealloc at a
+ // low limit for a file. also we want to be able to change in the future
+ // the constant without a lot of work anyway.
+ limit = 16 * 1024 * 1024;
+ }
+ preallocateFile(filepath, limit);
+ }
+}
- // find a prealloc.<n> file, presumably to take and use
- boost::filesystem::path findPrealloced() {
- try {
- for( int i = 0; i < NUM_PREALLOC_FILES; i++ ) {
- boost::filesystem::path filepath = preallocPath(i);
- if( boost::filesystem::exists(filepath) )
- return filepath;
- }
- } catch (const std::exception& e) {
- log() << "warning exception in dur::findPrealloced(): " << e.what() << endl;
- }
- return boost::filesystem::path();
- }
+void checkFreeSpace() {
+ unsigned long long spaceNeeded =
+ static_cast<unsigned long long>(3 * DataLimitPerJournalFile * 1.1); // add 10% for headroom
+ unsigned long long freeSpace = File::freeSpace(getJournalDir().string());
+ unsigned long long prealloced = 0;
+ for (int i = 0; i < NUM_PREALLOC_FILES; i++) {
+ boost::filesystem::path filepath = preallocPath(i);
+ if (exists(filepath))
+ prealloced += file_size(filepath);
+ }
- /** assure journal/ dir exists. throws. call during startup. */
- void journalMakeDir() {
- j.init();
+ if (freeSpace + prealloced < spaceNeeded) {
+ log() << endl;
+ error() << "Insufficient free space for journal files" << endl;
+ log() << "Please make at least " << spaceNeeded / (1024 * 1024) << "MB available in "
+ << getJournalDir().string() << " or use --smallfiles" << endl;
+ log() << endl;
+ throw UserException(15926, "Insufficient free space for journals");
+ }
+}
- boost::filesystem::path p = getJournalDir();
- j.dir = p.string();
- log() << "journal dir=" << j.dir << endl;
- if( !boost::filesystem::exists(j.dir) ) {
- try {
- boost::filesystem::create_directory(j.dir);
- }
- catch(std::exception& e) {
- log() << "error creating directory " << j.dir << ' ' << e.what() << endl;
- throw;
- }
- }
+void preallocateFiles() {
+ if (!(mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalNoCheckSpace))
+ checkFreeSpace();
+
+ if (exists(preallocPath(0)) || // if enabled previously, keep using
+ exists(preallocPath(1)) ||
+ (mmapv1GlobalOptions.preallocj && preallocateIsFaster())) {
+ usingPreallocate = true;
+ try {
+ _preallocateFiles();
+ } catch (const std::exception& e) {
+ log() << "warning caught exception (" << e.what() << ") in preallocateFiles, continuing"
+ << endl;
}
+ }
+ j.open();
+}
- void Journal::_open() {
- _curFileId = 0;
- verify( _curLogFile == 0 );
- boost::filesystem::path fname = getFilePathFor(_nextFileNumber);
-
- // if we have a prealloced file, use it
- {
- boost::filesystem::path p = findPrealloced();
- if( !p.empty() ) {
- try {
- {
- // JHeader::fileId must be updated before renaming to be race-safe
- LogFile f(p.string());
- JHeader h(p.string());
- AlignedBuilder b(8192);
- b.appendStruct(h);
- f.synchronousAppend(b.buf(), b.len());
- }
- boost::filesystem::rename(p, fname);
- }
- catch (const std::exception& e) {
- log() << "warning couldn't write to / rename file " << p.string()
- << ": " << e.what() << endl;
+void removeOldJournalFile(boost::filesystem::path p) {
+ if (usingPreallocate) {
+ try {
+ for (int i = 0; i < NUM_PREALLOC_FILES; i++) {
+ boost::filesystem::path filepath = preallocPath(i);
+ if (!boost::filesystem::exists(filepath)) {
+ // we can recycle this file into this prealloc file location
+ boost::filesystem::path temppath = filepath.string() + ".temp";
+ boost::filesystem::rename(p, temppath);
+ {
+ // zero the header
+ File f;
+ f.open(temppath.string().c_str(), false, false);
+ char buf[8192];
+ memset(buf, 0, 8192);
+ f.write(0, buf, 8192);
+ f.truncate(DataLimitPerJournalFile);
+ f.fsync();
}
+ boost::filesystem::rename(temppath, filepath);
+ return;
}
}
-
- _curLogFile = new LogFile(fname.string());
- _nextFileNumber++;
- {
- JHeader h(fname.string());
- _curFileId = h.fileId;
- verify(_curFileId);
- AlignedBuilder b(8192);
- b.appendStruct(h);
- _curLogFile->synchronousAppend(b.buf(), b.len());
- }
+ } catch (const std::exception& e) {
+ log() << "warning exception in dur::removeOldJournalFile " << p.string() << ": "
+ << e.what() << endl;
+ // fall through and try to delete the file
}
+ }
- void Journal::init() {
- verify( _curLogFile == 0 );
- MongoFile::notifyPreFlush = preFlush;
- MongoFile::notifyPostFlush = postFlush;
- }
+ // already have 3 prealloc files, so delete this file
+ try {
+ boost::filesystem::remove(p);
+ } catch (const std::exception& e) {
+ log() << "warning exception removing " << p.string() << ": " << e.what() << endl;
+ }
+}
- void Journal::open() {
- verify( MongoFile::notifyPreFlush == preFlush );
- stdx::lock_guard<SimpleMutex> lk(_curLogFileMutex);
- _open();
+// find a prealloc.<n> file, presumably to take and use
+boost::filesystem::path findPrealloced() {
+ try {
+ for (int i = 0; i < NUM_PREALLOC_FILES; i++) {
+ boost::filesystem::path filepath = preallocPath(i);
+ if (boost::filesystem::exists(filepath))
+ return filepath;
}
+ } catch (const std::exception& e) {
+ log() << "warning exception in dur::findPrealloced(): " << e.what() << endl;
+ }
+ return boost::filesystem::path();
+}
- void LSNFile::set(unsigned long long x) {
- memset(this, 0, sizeof(*this));
- lsn = x;
- checkbytes = ~x;
- }
+/** assure journal/ dir exists. throws. call during startup. */
+void journalMakeDir() {
+ j.init();
- /** logs details of the situation, and returns 0, if anything surprising in the LSNFile
- if something highly surprising, throws to abort
- */
- unsigned long long LSNFile::get() {
- uassert(13614, str::stream() << "unexpected version number of lsn file in journal/ directory got: " << ver , ver == 0);
- if( ~lsn != checkbytes ) {
- log() << "lsnfile not valid. recovery will be from log start. lsn: " << hex << lsn << " checkbytes: " << hex << checkbytes << endl;
- return 0;
- }
- return lsn;
+ boost::filesystem::path p = getJournalDir();
+ j.dir = p.string();
+ log() << "journal dir=" << j.dir << endl;
+ if (!boost::filesystem::exists(j.dir)) {
+ try {
+ boost::filesystem::create_directory(j.dir);
+ } catch (std::exception& e) {
+ log() << "error creating directory " << j.dir << ' ' << e.what() << endl;
+ throw;
}
+ }
+}
- /** called during recovery (the error message text below assumes that)
- */
- unsigned long long journalReadLSN() {
- if( !exists(lsnPath()) ) {
- log() << "info no lsn file in journal/ directory" << endl;
- return 0;
- }
+void Journal::_open() {
+ _curFileId = 0;
+ verify(_curLogFile == 0);
+ boost::filesystem::path fname = getFilePathFor(_nextFileNumber);
+ // if we have a prealloced file, use it
+ {
+ boost::filesystem::path p = findPrealloced();
+ if (!p.empty()) {
try {
- // os can flush as it likes. if it flushes slowly, we will just do extra work on recovery.
- // however, given we actually close the file when writing, that seems unlikely.
- LSNFile L;
- File f;
- f.open(lsnPath().string().c_str());
- verify(f.is_open());
- if( f.len() == 0 ) {
- // this could be 'normal' if we crashed at the right moment
- log() << "info lsn file is zero bytes long" << endl;
- return 0;
+ {
+ // JHeader::fileId must be updated before renaming to be race-safe
+ LogFile f(p.string());
+ JHeader h(p.string());
+ AlignedBuilder b(8192);
+ b.appendStruct(h);
+ f.synchronousAppend(b.buf(), b.len());
}
- f.read(0,(char*)&L, sizeof(L));
- unsigned long long lsn = L.get();
- return lsn;
- }
- catch(std::exception& e) {
- uasserted(13611, str::stream() << "can't read lsn file in journal directory : " << e.what());
+ boost::filesystem::rename(p, fname);
+ } catch (const std::exception& e) {
+ log() << "warning couldn't write to / rename file " << p.string() << ": "
+ << e.what() << endl;
}
- return 0;
}
+ }
- unsigned long long getLastDataFileFlushTime() {
- return j.lastFlushTime();
- }
+ _curLogFile = new LogFile(fname.string());
+ _nextFileNumber++;
+ {
+ JHeader h(fname.string());
+ _curFileId = h.fileId;
+ verify(_curFileId);
+ AlignedBuilder b(8192);
+ b.appendStruct(h);
+ _curLogFile->synchronousAppend(b.buf(), b.len());
+ }
+}
- /** remember "last sequence number" to speed recoveries
- concurrency: called by durThread only.
- */
- void Journal::updateLSNFile() {
- if( !_writeToLSNNeeded )
- return;
- _writeToLSNNeeded = false;
- try {
- // os can flush as it likes. if it flushes slowly, we will just do extra work on recovery.
- // however, given we actually close the file, that seems unlikely.
- File f;
- f.open(lsnPath().string().c_str());
- if( !f.is_open() ) {
- // can get 0 if an i/o error
- log() << "warning: open of lsn file failed" << endl;
- return;
- }
- LOG(1) << "lsn set " << _lastFlushTime << endl;
- LSNFile lsnf;
- lsnf.set(_lastFlushTime);
- f.write(0, (char*)&lsnf, sizeof(lsnf));
- // do we want to fsync here? if we do it probably needs to be async so the durthread
- // is not delayed.
- }
- catch(std::exception& e) {
- log() << "warning: write to lsn file failed " << e.what() << endl;
- // keep running (ignore the error). recovery will be slow.
- }
- }
+void Journal::init() {
+ verify(_curLogFile == 0);
+ MongoFile::notifyPreFlush = preFlush;
+ MongoFile::notifyPostFlush = postFlush;
+}
- void Journal::preFlush() {
- j._preFlushTime = Listener::getElapsedTimeMillis();
- }
+void Journal::open() {
+ verify(MongoFile::notifyPreFlush == preFlush);
+ stdx::lock_guard<SimpleMutex> lk(_curLogFileMutex);
+ _open();
+}
- void Journal::postFlush() {
- j._lastFlushTime = j._preFlushTime;
- j._writeToLSNNeeded = true;
- }
+void LSNFile::set(unsigned long long x) {
+ memset(this, 0, sizeof(*this));
+ lsn = x;
+ checkbytes = ~x;
+}
- // call from within _curLogFileMutex
- void Journal::closeCurrentJournalFile() {
- if (!_curLogFile)
- return;
+/** logs details of the situation, and returns 0, if anything surprising in the LSNFile
+ if something highly surprising, throws to abort
+*/
+unsigned long long LSNFile::get() {
+ uassert(
+ 13614,
+ str::stream() << "unexpected version number of lsn file in journal/ directory got: " << ver,
+ ver == 0);
+ if (~lsn != checkbytes) {
+ log() << "lsnfile not valid. recovery will be from log start. lsn: " << hex << lsn
+ << " checkbytes: " << hex << checkbytes << endl;
+ return 0;
+ }
+ return lsn;
+}
- JFile jf;
- jf.filename = _curLogFile->_name;
- jf.lastEventTimeMs = Listener::getElapsedTimeMillis();
- _oldJournalFiles.push_back(jf);
+/** called during recovery (the error message text below assumes that)
+*/
+unsigned long long journalReadLSN() {
+ if (!exists(lsnPath())) {
+ log() << "info no lsn file in journal/ directory" << endl;
+ return 0;
+ }
- delete _curLogFile; // close
- _curLogFile = 0;
- _written = 0;
+ try {
+ // os can flush as it likes. if it flushes slowly, we will just do extra work on recovery.
+ // however, given we actually close the file when writing, that seems unlikely.
+ LSNFile L;
+ File f;
+ f.open(lsnPath().string().c_str());
+ verify(f.is_open());
+ if (f.len() == 0) {
+ // this could be 'normal' if we crashed at the right moment
+ log() << "info lsn file is zero bytes long" << endl;
+ return 0;
}
+ f.read(0, (char*)&L, sizeof(L));
+ unsigned long long lsn = L.get();
+ return lsn;
+ } catch (std::exception& e) {
+ uasserted(13611,
+ str::stream() << "can't read lsn file in journal directory : " << e.what());
+ }
+ return 0;
+}
- /** remove older journal files.
- be in _curLogFileMutex but not dbMutex when calling
- */
- void Journal::removeUnneededJournalFiles() {
- while( !_oldJournalFiles.empty() ) {
- JFile f = _oldJournalFiles.front();
-
- if( f.lastEventTimeMs < _lastFlushTime + ExtraKeepTimeMs ) {
- // eligible for deletion
- boost::filesystem::path p( f.filename );
- log() << "old journal file will be removed: " << f.filename << endl;
- removeOldJournalFile(p);
- }
- else {
- break;
- }
+unsigned long long getLastDataFileFlushTime() {
+ return j.lastFlushTime();
+}
- _oldJournalFiles.pop_front();
- }
- }
+/** remember "last sequence number" to speed recoveries
+ concurrency: called by durThread only.
+*/
+void Journal::updateLSNFile() {
+ if (!_writeToLSNNeeded)
+ return;
+ _writeToLSNNeeded = false;
+ try {
+ // os can flush as it likes. if it flushes slowly, we will just do extra work on recovery.
+ // however, given we actually close the file, that seems unlikely.
+ File f;
+ f.open(lsnPath().string().c_str());
+ if (!f.is_open()) {
+ // can get 0 if an i/o error
+ log() << "warning: open of lsn file failed" << endl;
+ return;
+ }
+ LOG(1) << "lsn set " << _lastFlushTime << endl;
+ LSNFile lsnf;
+ lsnf.set(_lastFlushTime);
+ f.write(0, (char*)&lsnf, sizeof(lsnf));
+ // do we want to fsync here? if we do it probably needs to be async so the durthread
+ // is not delayed.
+ } catch (std::exception& e) {
+ log() << "warning: write to lsn file failed " << e.what() << endl;
+ // keep running (ignore the error). recovery will be slow.
+ }
+}
- void Journal::_rotate() {
+void Journal::preFlush() {
+ j._preFlushTime = Listener::getElapsedTimeMillis();
+}
- if ( inShutdown() || !_curLogFile )
- return;
+void Journal::postFlush() {
+ j._lastFlushTime = j._preFlushTime;
+ j._writeToLSNNeeded = true;
+}
- j.updateLSNFile();
+// call from within _curLogFileMutex
+void Journal::closeCurrentJournalFile() {
+ if (!_curLogFile)
+ return;
- if( _curLogFile && _written < DataLimitPerJournalFile )
- return;
+ JFile jf;
+ jf.filename = _curLogFile->_name;
+ jf.lastEventTimeMs = Listener::getElapsedTimeMillis();
+ _oldJournalFiles.push_back(jf);
- if( _curLogFile ) {
- _curLogFile->truncate();
- closeCurrentJournalFile();
- removeUnneededJournalFiles();
- }
+ delete _curLogFile; // close
+ _curLogFile = 0;
+ _written = 0;
+}
- try {
- Timer t;
- _open();
- int ms = t.millis();
- if( ms >= 200 ) {
- log() << "DR101 latency warning on journal file open " << ms << "ms" << endl;
- }
- }
- catch(std::exception& e) {
- log() << "warning exception opening journal file " << e.what() << endl;
- throw;
- }
- }
+/** remove older journal files.
+ be in _curLogFileMutex but not dbMutex when calling
+*/
+void Journal::removeUnneededJournalFiles() {
+ while (!_oldJournalFiles.empty()) {
+ JFile f = _oldJournalFiles.front();
- /** write (append) the buffer we have built to the journal and fsync it.
- outside of dbMutex lock as this could be slow.
- @param uncompressed - a buffer that will be written to the journal after compression
- will not return until on disk
- */
- void WRITETOJOURNAL(const JSectHeader& h, const AlignedBuilder& uncompressed) {
- Timer t;
- j.journal(h, uncompressed);
- stats.curr()->_writeToJournalMicros += t.micros();
+ if (f.lastEventTimeMs < _lastFlushTime + ExtraKeepTimeMs) {
+ // eligible for deletion
+ boost::filesystem::path p(f.filename);
+ log() << "old journal file will be removed: " << f.filename << endl;
+ removeOldJournalFile(p);
+ } else {
+ break;
}
- void Journal::journal(const JSectHeader& h, const AlignedBuilder& uncompressed) {
- static AlignedBuilder b(32*1024*1024);
- /* buffer to journal will be
- JSectHeader
- compressed operations
- JSectFooter
- */
- const unsigned headTailSize = sizeof(JSectHeader) + sizeof(JSectFooter);
- const unsigned max = maxCompressedLength(uncompressed.len()) + headTailSize;
- b.reset(max);
-
- {
- dassert( h.sectionLen() == (unsigned) 0xffffffff ); // we will backfill later
- b.appendStruct(h);
- }
+ _oldJournalFiles.pop_front();
+ }
+}
- size_t compressedLength = 0;
- rawCompress(uncompressed.buf(), uncompressed.len(), b.cur(), &compressedLength);
- verify( compressedLength < 0xffffffff );
- verify( compressedLength < max );
- b.skip(compressedLength);
-
- // footer
- unsigned L = 0xffffffff;
- {
- // pad to alignment, and set the total section length in the JSectHeader
- verify( 0xffffe000 == (~(Alignment-1)) );
- unsigned lenUnpadded = b.len() + sizeof(JSectFooter);
- L = (lenUnpadded + Alignment-1) & (~(Alignment-1));
- dassert( L >= lenUnpadded );
-
- ((JSectHeader*)b.atOfs(0))->setSectionLen(lenUnpadded);
-
- JSectFooter f(b.buf(), b.len()); // computes checksum
- b.appendStruct(f);
- dassert( b.len() == lenUnpadded );
-
- b.skip(L - lenUnpadded);
- dassert( b.len() % Alignment == 0 );
- }
+void Journal::_rotate() {
+ if (inShutdown() || !_curLogFile)
+ return;
- try {
- stdx::lock_guard<SimpleMutex> lk(_curLogFileMutex);
-
- // must already be open -- so that _curFileId is correct for previous buffer building
- verify( _curLogFile );
-
- stats.curr()->_uncompressedBytes += uncompressed.len();
- unsigned w = b.len();
- _written += w;
- verify( w <= L );
- stats.curr()->_journaledBytes += L;
- _curLogFile->synchronousAppend((const void *) b.buf(), L);
- _rotate();
- }
- catch(std::exception& e) {
- log() << "error exception in dur::journal " << e.what() << endl;
- throw;
- }
+ j.updateLSNFile();
+
+ if (_curLogFile && _written < DataLimitPerJournalFile)
+ return;
+
+ if (_curLogFile) {
+ _curLogFile->truncate();
+ closeCurrentJournalFile();
+ removeUnneededJournalFiles();
+ }
+
+ try {
+ Timer t;
+ _open();
+ int ms = t.millis();
+ if (ms >= 200) {
+ log() << "DR101 latency warning on journal file open " << ms << "ms" << endl;
}
+ } catch (std::exception& e) {
+ log() << "warning exception opening journal file " << e.what() << endl;
+ throw;
+ }
+}
+
+/** write (append) the buffer we have built to the journal and fsync it.
+ outside of dbMutex lock as this could be slow.
+ @param uncompressed - a buffer that will be written to the journal after compression
+ will not return until on disk
+*/
+void WRITETOJOURNAL(const JSectHeader& h, const AlignedBuilder& uncompressed) {
+ Timer t;
+ j.journal(h, uncompressed);
+ stats.curr()->_writeToJournalMicros += t.micros();
+}
+void Journal::journal(const JSectHeader& h, const AlignedBuilder& uncompressed) {
+ static AlignedBuilder b(32 * 1024 * 1024);
+ /* buffer to journal will be
+ JSectHeader
+ compressed operations
+ JSectFooter
+ */
+ const unsigned headTailSize = sizeof(JSectHeader) + sizeof(JSectFooter);
+ const unsigned max = maxCompressedLength(uncompressed.len()) + headTailSize;
+ b.reset(max);
+
+ {
+ dassert(h.sectionLen() == (unsigned)0xffffffff); // we will backfill later
+ b.appendStruct(h);
}
+
+ size_t compressedLength = 0;
+ rawCompress(uncompressed.buf(), uncompressed.len(), b.cur(), &compressedLength);
+ verify(compressedLength < 0xffffffff);
+ verify(compressedLength < max);
+ b.skip(compressedLength);
+
+ // footer
+ unsigned L = 0xffffffff;
+ {
+ // pad to alignment, and set the total section length in the JSectHeader
+ verify(0xffffe000 == (~(Alignment - 1)));
+ unsigned lenUnpadded = b.len() + sizeof(JSectFooter);
+ L = (lenUnpadded + Alignment - 1) & (~(Alignment - 1));
+ dassert(L >= lenUnpadded);
+
+ ((JSectHeader*)b.atOfs(0))->setSectionLen(lenUnpadded);
+
+ JSectFooter f(b.buf(), b.len()); // computes checksum
+ b.appendStruct(f);
+ dassert(b.len() == lenUnpadded);
+
+ b.skip(L - lenUnpadded);
+ dassert(b.len() % Alignment == 0);
+ }
+
+ try {
+ stdx::lock_guard<SimpleMutex> lk(_curLogFileMutex);
+
+ // must already be open -- so that _curFileId is correct for previous buffer building
+ verify(_curLogFile);
+
+ stats.curr()->_uncompressedBytes += uncompressed.len();
+ unsigned w = b.len();
+ _written += w;
+ verify(w <= L);
+ stats.curr()->_journaledBytes += L;
+ _curLogFile->synchronousAppend((const void*)b.buf(), L);
+ _rotate();
+ } catch (std::exception& e) {
+ log() << "error exception in dur::journal " << e.what() << endl;
+ throw;
+ }
+}
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal.h b/src/mongo/db/storage/mmap_v1/dur_journal.h
index 469732c59a2..07def586090 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal.h
+++ b/src/mongo/db/storage/mmap_v1/dur_journal.h
@@ -32,59 +32,58 @@
namespace mongo {
- class AlignedBuilder;
- class JSectHeader;
+class AlignedBuilder;
+class JSectHeader;
- namespace dur {
+namespace dur {
- /** true if ok to cleanup journal files at termination. otherwise, files journal will be retained.
- */
- extern bool okToCleanUp;
-
- /** at termination after db files closed & fsynced
- also after recovery
- closes and removes journal files
- @param log report in log that we are cleaning up if we actually do any work
- */
- void journalCleanup(bool log = false);
+/** true if ok to cleanup journal files at termination. otherwise, files journal will be retained.
+*/
+extern bool okToCleanUp;
- /** assure journal/ dir exists. throws */
- void journalMakeDir();
+/** at termination after db files closed & fsynced
+ also after recovery
+ closes and removes journal files
+ @param log report in log that we are cleaning up if we actually do any work
+*/
+void journalCleanup(bool log = false);
- /** check if time to rotate files; assure a file is open.
- done separately from the journal() call as we can do this part
- outside of lock.
- only called by durThread.
- */
- void journalRotate();
+/** assure journal/ dir exists. throws */
+void journalMakeDir();
- /** flag that something has gone wrong during writing to the journal
- (not for recovery mode)
- */
- void journalingFailure(const char *msg);
+/** check if time to rotate files; assure a file is open.
+ done separately from the journal() call as we can do this part
+ outside of lock.
+ only called by durThread.
+ */
+void journalRotate();
- /** read lsn from disk from the last run before doing recovery */
- unsigned long long journalReadLSN();
+/** flag that something has gone wrong during writing to the journal
+ (not for recovery mode)
+*/
+void journalingFailure(const char* msg);
- unsigned long long getLastDataFileFlushTime();
+/** read lsn from disk from the last run before doing recovery */
+unsigned long long journalReadLSN();
- /** never throws.
- @param anyFiles by default we only look at j._* files. If anyFiles is true, return true
- if there are any files in the journal directory. acquirePathLock() uses this to
- make sure that the journal directory is mounted.
- @return true if there are any journal files in the journal dir.
- */
- bool haveJournalFiles(bool anyFiles=false);
+unsigned long long getLastDataFileFlushTime();
- /**
- * Writes the specified uncompressed buffer to the journal.
- */
- void WRITETOJOURNAL(const JSectHeader& h, const AlignedBuilder& uncompressed);
+/** never throws.
+ @param anyFiles by default we only look at j._* files. If anyFiles is true, return true
+ if there are any files in the journal directory. acquirePathLock() uses this to
+ make sure that the journal directory is mounted.
+ @return true if there are any journal files in the journal dir.
+*/
+bool haveJournalFiles(bool anyFiles = false);
- // in case disk controller buffers writes
- const long long ExtraKeepTimeMs = 10000;
+/**
+ * Writes the specified uncompressed buffer to the journal.
+ */
+void WRITETOJOURNAL(const JSectHeader& h, const AlignedBuilder& uncompressed);
- const unsigned JournalCommitIntervalDefault = 100;
+// in case disk controller buffers writes
+const long long ExtraKeepTimeMs = 10000;
- }
+const unsigned JournalCommitIntervalDefault = 100;
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp b/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp
index 4c6eb8ec8cc..971f2aa0e60 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_journal_writer.cpp
@@ -47,268 +47,251 @@ namespace dur {
namespace {
- /**
- * Apply the writes back to the non-private MMF after they are for certain in the journal.
- *
- * (1) TODO we don't need to write back everything every group commit. We MUST write back that
- * which is going to be a remapped on its private view - but that might not be all views.
- *
- * (2) TODO should we do this using N threads? Would be quite easy see Hackenberg paper table
- * 5 and 6. 2 threads might be a good balance.
- */
- void WRITETODATAFILES(const JSectHeader& h, const AlignedBuilder& uncompressed) {
- Timer t;
-
- LOG(4) << "WRITETODATAFILES BEGIN";
-
- RecoveryJob::get().processSection(&h, uncompressed.buf(), uncompressed.len(), NULL);
-
- const long long m = t.micros();
- stats.curr()->_writeToDataFilesMicros += m;
-
- LOG(4) << "journal WRITETODATAFILES " << m / 1000.0 << "ms";
- }
-
-} // namespace
-
+/**
+ * Apply the writes back to the non-private MMF after they are for certain in the journal.
+ *
+ * (1) TODO we don't need to write back everything every group commit. We MUST write back that
+ * which is going to be a remapped on its private view - but that might not be all views.
+ *
+ * (2) TODO should we do this using N threads? Would be quite easy see Hackenberg paper table
+ * 5 and 6. 2 threads might be a good balance.
+ */
+void WRITETODATAFILES(const JSectHeader& h, const AlignedBuilder& uncompressed) {
+ Timer t;
- /**
- * Used inside the journal writer thread to ensure that used buffers are cleaned up properly.
- */
- class BufferGuard {
- MONGO_DISALLOW_COPYING(BufferGuard);
- public:
- BufferGuard(JournalWriter::Buffer* buffer, JournalWriter::BufferQueue* bufferQueue)
- : _buffer(buffer),
- _bufferQueue(bufferQueue) {
+ LOG(4) << "WRITETODATAFILES BEGIN";
- }
+ RecoveryJob::get().processSection(&h, uncompressed.buf(), uncompressed.len(), NULL);
- ~BufferGuard() {
- // This buffer is done. Reset and remove it from the journal queue and put it on
- // the ready queue.
- _buffer->_reset();
+ const long long m = t.micros();
+ stats.curr()->_writeToDataFilesMicros += m;
- // This should never block. Otherwise we will stall the journaling pipeline
- // permanently and cause deadlock.
- invariant(_bufferQueue->count() < _bufferQueue->maxSize());
- _bufferQueue->push(_buffer);
- }
+ LOG(4) << "journal WRITETODATAFILES " << m / 1000.0 << "ms";
+}
- private:
- // Buffer that this scoped object is managing. Owned until destruction time. Then, the
- // bufferQueue owns it.
- JournalWriter::Buffer* const _buffer;
+} // namespace
- // Queue where the buffer should be returned to at destruction time. Not owned.
- JournalWriter::BufferQueue* const _bufferQueue;
- };
+/**
+ * Used inside the journal writer thread to ensure that used buffers are cleaned up properly.
+ */
+class BufferGuard {
+ MONGO_DISALLOW_COPYING(BufferGuard);
+
+public:
+ BufferGuard(JournalWriter::Buffer* buffer, JournalWriter::BufferQueue* bufferQueue)
+ : _buffer(buffer), _bufferQueue(bufferQueue) {}
+
+ ~BufferGuard() {
+ // This buffer is done. Reset and remove it from the journal queue and put it on
+ // the ready queue.
+ _buffer->_reset();
+
+ // This should never block. Otherwise we will stall the journaling pipeline
+ // permanently and cause deadlock.
+ invariant(_bufferQueue->count() < _bufferQueue->maxSize());
+ _bufferQueue->push(_buffer);
+ }
- //
- // JournalWriter
- //
+private:
+ // Buffer that this scoped object is managing. Owned until destruction time. Then, the
+ // bufferQueue owns it.
+ JournalWriter::Buffer* const _buffer;
+
+ // Queue where the buffer should be returned to at destruction time. Not owned.
+ JournalWriter::BufferQueue* const _bufferQueue;
+};
+
+
+//
+// JournalWriter
+//
+
+JournalWriter::JournalWriter(NotifyAll* commitNotify,
+ NotifyAll* applyToDataFilesNotify,
+ size_t numBuffers)
+ : _commitNotify(commitNotify),
+ _applyToDataFilesNotify(applyToDataFilesNotify),
+ _shutdownRequested(false),
+ _journalQueue(numBuffers),
+ _lastCommitNumber(0),
+ _readyQueue(numBuffers) {
+ invariant(_journalQueue.maxSize() == _readyQueue.maxSize());
+}
+
+JournalWriter::~JournalWriter() {
+ // Never close the journal writer with outstanding or unaccounted writes
+ invariant(_journalQueue.empty());
+ invariant(_readyQueue.empty());
+}
+
+void JournalWriter::start() {
+ // Do not allow reuse
+ invariant(!_shutdownRequested);
+
+ // Pre-allocate the journal buffers and push them on the ready queue
+ for (size_t i = 0; i < _readyQueue.maxSize(); i++) {
+ _readyQueue.push(new Buffer(InitialBufferSizeBytes));
+ }
- JournalWriter::JournalWriter(NotifyAll* commitNotify,
- NotifyAll* applyToDataFilesNotify,
- size_t numBuffers)
- : _commitNotify(commitNotify),
- _applyToDataFilesNotify(applyToDataFilesNotify),
- _shutdownRequested(false),
- _journalQueue(numBuffers),
- _lastCommitNumber(0),
- _readyQueue(numBuffers) {
+ // Start the thread
+ stdx::thread t(stdx::bind(&JournalWriter::_journalWriterThread, this));
+ _journalWriterThreadHandle.swap(t);
+}
- invariant(_journalQueue.maxSize() == _readyQueue.maxSize());
- }
+void JournalWriter::shutdown() {
+ // There is no reason to call shutdown multiple times
+ invariant(!_shutdownRequested);
+ _shutdownRequested = true;
- JournalWriter::~JournalWriter() {
- // Never close the journal writer with outstanding or unaccounted writes
- invariant(_journalQueue.empty());
- invariant(_readyQueue.empty());
- }
+ // Never terminate the journal writer with outstanding or unaccounted writes
+ assertIdle();
- void JournalWriter::start() {
- // Do not allow reuse
- invariant(!_shutdownRequested);
+ Buffer* const shutdownBuffer = newBuffer();
+ shutdownBuffer->_setShutdown();
- // Pre-allocate the journal buffers and push them on the ready queue
- for (size_t i = 0; i < _readyQueue.maxSize(); i++) {
- _readyQueue.push(new Buffer(InitialBufferSizeBytes));
- }
+ // This will terminate the journal thread. No need to specify commit number, since we are
+ // shutting down and nothing will be notified anyways.
+ writeBuffer(shutdownBuffer, 0);
- // Start the thread
- stdx::thread t(stdx::bind(&JournalWriter::_journalWriterThread, this));
- _journalWriterThreadHandle.swap(t);
- }
+ // Ensure the journal thread has stopped and everything accounted for.
+ _journalWriterThreadHandle.join();
+ assertIdle();
- void JournalWriter::shutdown() {
- // There is no reason to call shutdown multiple times
- invariant(!_shutdownRequested);
- _shutdownRequested = true;
-
- // Never terminate the journal writer with outstanding or unaccounted writes
- assertIdle();
-
- Buffer* const shutdownBuffer = newBuffer();
- shutdownBuffer->_setShutdown();
-
- // This will terminate the journal thread. No need to specify commit number, since we are
- // shutting down and nothing will be notified anyways.
- writeBuffer(shutdownBuffer, 0);
-
- // Ensure the journal thread has stopped and everything accounted for.
- _journalWriterThreadHandle.join();
- assertIdle();
-
- // Delete the buffers (this deallocates the journal buffer memory)
- while (!_readyQueue.empty()) {
- Buffer* const buffer = _readyQueue.blockingPop();
- delete buffer;
- }
+ // Delete the buffers (this deallocates the journal buffer memory)
+ while (!_readyQueue.empty()) {
+ Buffer* const buffer = _readyQueue.blockingPop();
+ delete buffer;
}
+}
- void JournalWriter::assertIdle() {
- // All buffers are in the ready queue means there is nothing pending.
- invariant(_journalQueue.empty());
- invariant(_readyQueue.count() == _readyQueue.maxSize());
- }
+void JournalWriter::assertIdle() {
+ // All buffers are in the ready queue means there is nothing pending.
+ invariant(_journalQueue.empty());
+ invariant(_readyQueue.count() == _readyQueue.maxSize());
+}
- JournalWriter::Buffer* JournalWriter::newBuffer() {
- Buffer* const buffer = _readyQueue.blockingPop();
- buffer->_assertEmpty();
+JournalWriter::Buffer* JournalWriter::newBuffer() {
+ Buffer* const buffer = _readyQueue.blockingPop();
+ buffer->_assertEmpty();
- return buffer;
- }
+ return buffer;
+}
- void JournalWriter::writeBuffer(Buffer* buffer, NotifyAll::When commitNumber) {
- invariant(buffer->_commitNumber == 0);
- invariant((commitNumber > _lastCommitNumber) ||
- (buffer->_isShutdown && (commitNumber == 0)));
+void JournalWriter::writeBuffer(Buffer* buffer, NotifyAll::When commitNumber) {
+ invariant(buffer->_commitNumber == 0);
+ invariant((commitNumber > _lastCommitNumber) || (buffer->_isShutdown && (commitNumber == 0)));
- buffer->_commitNumber = commitNumber;
+ buffer->_commitNumber = commitNumber;
- _journalQueue.push(buffer);
- }
+ _journalQueue.push(buffer);
+}
- void JournalWriter::flush() {
- std::vector<Buffer*> buffers;
+void JournalWriter::flush() {
+ std::vector<Buffer*> buffers;
- // Pop the expected number of buffers from the ready queue. This will block until all
- // in-progress buffers have completed.
- for (size_t i = 0; i < _readyQueue.maxSize(); i++) {
- buffers.push_back(_readyQueue.blockingPop());
- }
+ // Pop the expected number of buffers from the ready queue. This will block until all
+ // in-progress buffers have completed.
+ for (size_t i = 0; i < _readyQueue.maxSize(); i++) {
+ buffers.push_back(_readyQueue.blockingPop());
+ }
- // Put them back in to restore the original state.
- for (size_t i = 0; i < buffers.size(); i++) {
- _readyQueue.push(buffers[i]);
- }
+ // Put them back in to restore the original state.
+ for (size_t i = 0; i < buffers.size(); i++) {
+ _readyQueue.push(buffers[i]);
}
+}
- void JournalWriter::_journalWriterThread() {
- Client::initThread("journal writer");
+void JournalWriter::_journalWriterThread() {
+ Client::initThread("journal writer");
- log() << "Journal writer thread started";
+ log() << "Journal writer thread started";
- try {
- while (true) {
- Buffer* const buffer = _journalQueue.blockingPop();
- BufferGuard bufferGuard(buffer, &_readyQueue);
+ try {
+ while (true) {
+ Buffer* const buffer = _journalQueue.blockingPop();
+ BufferGuard bufferGuard(buffer, &_readyQueue);
- if (buffer->_isShutdown) {
- invariant(buffer->_builder.len() == 0);
+ if (buffer->_isShutdown) {
+ invariant(buffer->_builder.len() == 0);
- // The journal writer thread is terminating. Nothing to notify or write.
- break;
- }
+ // The journal writer thread is terminating. Nothing to notify or write.
+ break;
+ }
- if (buffer->_isNoop) {
- invariant(buffer->_builder.len() == 0);
+ if (buffer->_isNoop) {
+ invariant(buffer->_builder.len() == 0);
- // There's nothing to be writen, but we still need to notify this commit number
- _commitNotify->notifyAll(buffer->_commitNumber);
- _applyToDataFilesNotify->notifyAll(buffer->_commitNumber);
- continue;
- }
+ // There's nothing to be writen, but we still need to notify this commit number
+ _commitNotify->notifyAll(buffer->_commitNumber);
+ _applyToDataFilesNotify->notifyAll(buffer->_commitNumber);
+ continue;
+ }
- LOG(4) << "Journaling commit number " << buffer->_commitNumber
- << " (journal file " << buffer->_header.fileId
- << ", sequence " << buffer->_header.seqNumber
- << ", size " << buffer->_builder.len() << " bytes)";
+ LOG(4) << "Journaling commit number " << buffer->_commitNumber << " (journal file "
+ << buffer->_header.fileId << ", sequence " << buffer->_header.seqNumber
+ << ", size " << buffer->_builder.len() << " bytes)";
- // This performs synchronous I/O to the journal file and will block.
- WRITETOJOURNAL(buffer->_header, buffer->_builder);
+ // This performs synchronous I/O to the journal file and will block.
+ WRITETOJOURNAL(buffer->_header, buffer->_builder);
- // Data is now persisted in the journal, which is sufficient for acknowledging
- // getLastError
- _commitNotify->notifyAll(buffer->_commitNumber);
+ // Data is now persisted in the journal, which is sufficient for acknowledging
+ // getLastError
+ _commitNotify->notifyAll(buffer->_commitNumber);
- // Apply the journal entries on top of the shared view so that when flush is
- // requested it would write the latest.
- WRITETODATAFILES(buffer->_header, buffer->_builder);
+ // Apply the journal entries on top of the shared view so that when flush is
+ // requested it would write the latest.
+ WRITETODATAFILES(buffer->_header, buffer->_builder);
- // Data is now persisted on the shared view, so notify any potential journal file
- // cleanup waiters.
- _applyToDataFilesNotify->notifyAll(buffer->_commitNumber);
- }
+ // Data is now persisted on the shared view, so notify any potential journal file
+ // cleanup waiters.
+ _applyToDataFilesNotify->notifyAll(buffer->_commitNumber);
}
- catch (const DBException& e) {
- severe() << "dbexception in journalWriterThread causing immediate shutdown: "
- << e.toString();
- invariant(false);
- }
- catch (const std::ios_base::failure& e) {
- severe() << "ios_base exception in journalWriterThread causing immediate shutdown: "
- << e.what();
- invariant(false);
- }
- catch (const std::bad_alloc& e) {
- severe() << "bad_alloc exception in journalWriterThread causing immediate shutdown: "
- << e.what();
- invariant(false);
- }
- catch (const std::exception& e) {
- severe() << "exception in journalWriterThread causing immediate shutdown: "
- << e.what();
- invariant(false);
- }
- catch (...) {
- severe() << "unhandled exception in journalWriterThread causing immediate shutdown";
- invariant(false);
- }
-
- log() << "Journal writer thread stopped";
+ } catch (const DBException& e) {
+ severe() << "dbexception in journalWriterThread causing immediate shutdown: "
+ << e.toString();
+ invariant(false);
+ } catch (const std::ios_base::failure& e) {
+ severe() << "ios_base exception in journalWriterThread causing immediate shutdown: "
+ << e.what();
+ invariant(false);
+ } catch (const std::bad_alloc& e) {
+ severe() << "bad_alloc exception in journalWriterThread causing immediate shutdown: "
+ << e.what();
+ invariant(false);
+ } catch (const std::exception& e) {
+ severe() << "exception in journalWriterThread causing immediate shutdown: " << e.what();
+ invariant(false);
+ } catch (...) {
+ severe() << "unhandled exception in journalWriterThread causing immediate shutdown";
+ invariant(false);
}
+ log() << "Journal writer thread stopped";
+}
- //
- // Buffer
- //
-
- JournalWriter::Buffer::Buffer(size_t initialSize)
- : _commitNumber(0),
- _isNoop(false),
- _isShutdown(false),
- _header(),
- _builder(initialSize) {
- }
+//
+// Buffer
+//
- JournalWriter::Buffer::~Buffer() {
- _assertEmpty();
- }
+JournalWriter::Buffer::Buffer(size_t initialSize)
+ : _commitNumber(0), _isNoop(false), _isShutdown(false), _header(), _builder(initialSize) {}
- void JournalWriter::Buffer::_assertEmpty() {
- invariant(_commitNumber == 0);
- invariant(_builder.len() == 0);
- }
+JournalWriter::Buffer::~Buffer() {
+ _assertEmpty();
+}
- void JournalWriter::Buffer::_reset() {
- _commitNumber = 0;
- _isNoop = false;
- _builder.reset();
- }
+void JournalWriter::Buffer::_assertEmpty() {
+ invariant(_commitNumber == 0);
+ invariant(_builder.len() == 0);
+}
+
+void JournalWriter::Buffer::_reset() {
+ _commitNumber = 0;
+ _isNoop = false;
+ _builder.reset();
+}
-} // namespace dur
-} // namespace mongo
+} // namespace dur
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal_writer.h b/src/mongo/db/storage/mmap_v1/dur_journal_writer.h
index 6ac91de6532..2f738cbb380 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal_writer.h
+++ b/src/mongo/db/storage/mmap_v1/dur_journal_writer.h
@@ -38,150 +38,158 @@
namespace mongo {
namespace dur {
+/**
+ * Manages the thread and queues used for writing the journal to disk and notify parties with
+ * are waiting on the write concern.
+ *
+ * NOTE: Not thread-safe and must not be used from more than one thread.
+ */
+class JournalWriter {
+ MONGO_DISALLOW_COPYING(JournalWriter);
+
+public:
/**
- * Manages the thread and queues used for writing the journal to disk and notify parties with
- * are waiting on the write concern.
- *
- * NOTE: Not thread-safe and must not be used from more than one thread.
+ * Stores the memory and the header for a complete journal buffer which is pending to be
+ * written by the journal writer thread.
*/
- class JournalWriter {
- MONGO_DISALLOW_COPYING(JournalWriter);
+ class Buffer {
public:
+ Buffer(size_t initialSize);
+ ~Buffer();
+
+ JSectHeader& getHeader() {
+ return _header;
+ }
+ AlignedBuilder& getBuilder() {
+ return _builder;
+ }
- /**
- * Stores the memory and the header for a complete journal buffer which is pending to be
- * written by the journal writer thread.
- */
- class Buffer {
- public:
- Buffer(size_t initialSize);
- ~Buffer();
-
- JSectHeader& getHeader() { return _header; }
- AlignedBuilder& getBuilder() { return _builder; }
-
- void setNoop() { _isNoop = true; }
-
- private:
- friend class BufferGuard;
- friend class JournalWriter;
-
-
- void _assertEmpty();
- void _reset();
- void _setShutdown() { _isShutdown = true; }
-
- // Specifies the commit number which flushing this buffer would notify. This value is
- // zero, if there is no data to be flushed or if the buffer is noop/shutdown.
- NotifyAll::When _commitNumber;
-
- // Special buffer that's posted when there is nothing to be written to the journal,
- // but we want to order a notification so it happens after all other writes have
- // completed.
- bool _isNoop;
-
- // Special buffer that's posted when the receiving thread must terminate. This should
- // be the last entry posted to the queue and the commit number should be zero.
- bool _isShutdown;
-
- JSectHeader _header;
- AlignedBuilder _builder;
- };
-
-
- /**
- * Initializes the journal writer.
- *
- * @param commitNotify Notification object to be called after journal entries have been
- * written to disk. The caller retains ownership and the notify object must outlive
- * the journal writer object.
- * @param applyToDataFilesNotify Notification object to be called after journal entries
- * have been applied to the shared view. This means that if the shared view were to be
- * flushed at this point, the journal files before this point are not necessary. The
- * caller retains ownership and the notify object must outlive the journal writer
- * object.
- * @param numBuffers How many buffers to create to hold outstanding writes. If there are
- * more than this number of journal writes that have not completed, the write calls
- * will block.
- */
- JournalWriter(NotifyAll* commitNotify, NotifyAll* applyToDataFilesNotify, size_t numBuffers);
- ~JournalWriter();
-
- /**
- * Allocates buffer memory and starts the journal writer thread.
- */
- void start();
-
- /**
- * Terminates the journal writer thread and frees memory for the buffers. Must not be
- * called if there are any pending journal writes.
- */
- void shutdown();
-
- /**
- * Asserts that there are no pending journal writes.
- */
- void assertIdle();
-
- /**
- * Obtains a new empty buffer into which a journal entry should be written.
- *
- * This method may block if there are no free buffers.
- *
- * The caller does not own the buffer and needs to "return" it to the writer by calling
- * writeBuffer. Buffers with data on them should never be discarded until they are written.
- */
- Buffer* newBuffer();
-
- /**
- * Requests that the specified buffer be written asynchronously.
- *
- * This method may block if there are too many outstanding unwritten buffers.
- *
- * @param buffer Buffer entry to be written. The buffer object must not be used anymore
- * after it has been given to this function.
- * @param commitNumber What commit number to be notified once the buffer has been written
- * to disk.
- */
- void writeBuffer(Buffer* buffer, NotifyAll::When commitNumber);
-
- /**
- * Ensures that all previously submitted write requests complete. This call is blocking.
- */
- void flush();
+ void setNoop() {
+ _isNoop = true;
+ }
private:
friend class BufferGuard;
+ friend class JournalWriter;
- typedef BlockingQueue<Buffer*> BufferQueue;
- // Start all buffers with 4MB of size
- enum { InitialBufferSizeBytes = 4 * 1024 * 1024 };
+ void _assertEmpty();
+ void _reset();
+ void _setShutdown() {
+ _isShutdown = true;
+ }
+ // Specifies the commit number which flushing this buffer would notify. This value is
+ // zero, if there is no data to be flushed or if the buffer is noop/shutdown.
+ NotifyAll::When _commitNumber;
- void _journalWriterThread();
+ // Special buffer that's posted when there is nothing to be written to the journal,
+ // but we want to order a notification so it happens after all other writes have
+ // completed.
+ bool _isNoop;
+ // Special buffer that's posted when the receiving thread must terminate. This should
+ // be the last entry posted to the queue and the commit number should be zero.
+ bool _isShutdown;
- // This gets notified as journal buffers are written. It is not owned and needs to outlive
- // the journal writer object.
- NotifyAll* const _commitNotify;
+ JSectHeader _header;
+ AlignedBuilder _builder;
+ };
- // This gets notified as journal buffers are done being applied to the shared view
- NotifyAll* const _applyToDataFilesNotify;
- // Wraps and controls the journal writer thread
- stdx::thread _journalWriterThreadHandle;
+ /**
+ * Initializes the journal writer.
+ *
+ * @param commitNotify Notification object to be called after journal entries have been
+ * written to disk. The caller retains ownership and the notify object must outlive
+ * the journal writer object.
+ * @param applyToDataFilesNotify Notification object to be called after journal entries
+ * have been applied to the shared view. This means that if the shared view were to be
+ * flushed at this point, the journal files before this point are not necessary. The
+ * caller retains ownership and the notify object must outlive the journal writer
+ * object.
+ * @param numBuffers How many buffers to create to hold outstanding writes. If there are
+ * more than this number of journal writes that have not completed, the write calls
+ * will block.
+ */
+ JournalWriter(NotifyAll* commitNotify, NotifyAll* applyToDataFilesNotify, size_t numBuffers);
+ ~JournalWriter();
- // Indicates that shutdown has been requested. Used for idempotency of the shutdown call.
- bool _shutdownRequested;
+ /**
+ * Allocates buffer memory and starts the journal writer thread.
+ */
+ void start();
- // Queue of buffers, which need to be written by the journal writer thread
- BufferQueue _journalQueue;
- NotifyAll::When _lastCommitNumber;
+ /**
+ * Terminates the journal writer thread and frees memory for the buffers. Must not be
+ * called if there are any pending journal writes.
+ */
+ void shutdown();
- // Queue of buffers, whose write has been completed by the journal writer thread.
- BufferQueue _readyQueue;
- };
+ /**
+ * Asserts that there are no pending journal writes.
+ */
+ void assertIdle();
+
+ /**
+ * Obtains a new empty buffer into which a journal entry should be written.
+ *
+ * This method may block if there are no free buffers.
+ *
+ * The caller does not own the buffer and needs to "return" it to the writer by calling
+ * writeBuffer. Buffers with data on them should never be discarded until they are written.
+ */
+ Buffer* newBuffer();
+
+ /**
+ * Requests that the specified buffer be written asynchronously.
+ *
+ * This method may block if there are too many outstanding unwritten buffers.
+ *
+ * @param buffer Buffer entry to be written. The buffer object must not be used anymore
+ * after it has been given to this function.
+ * @param commitNumber What commit number to be notified once the buffer has been written
+ * to disk.
+ */
+ void writeBuffer(Buffer* buffer, NotifyAll::When commitNumber);
+
+ /**
+ * Ensures that all previously submitted write requests complete. This call is blocking.
+ */
+ void flush();
+
+private:
+ friend class BufferGuard;
+
+ typedef BlockingQueue<Buffer*> BufferQueue;
+
+ // Start all buffers with 4MB of size
+ enum { InitialBufferSizeBytes = 4 * 1024 * 1024 };
+
+
+ void _journalWriterThread();
+
+
+ // This gets notified as journal buffers are written. It is not owned and needs to outlive
+ // the journal writer object.
+ NotifyAll* const _commitNotify;
+
+ // This gets notified as journal buffers are done being applied to the shared view
+ NotifyAll* const _applyToDataFilesNotify;
+
+ // Wraps and controls the journal writer thread
+ stdx::thread _journalWriterThreadHandle;
+
+ // Indicates that shutdown has been requested. Used for idempotency of the shutdown call.
+ bool _shutdownRequested;
+
+ // Queue of buffers, which need to be written by the journal writer thread
+ BufferQueue _journalQueue;
+ NotifyAll::When _lastCommitNumber;
+
+ // Queue of buffers, whose write has been completed by the journal writer thread.
+ BufferQueue _readyQueue;
+};
-} // namespace dur
-} // namespace mongo
+} // namespace dur
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/dur_journalformat.h b/src/mongo/db/storage/mmap_v1/dur_journalformat.h
index 80ea90bd78a..3c31c2686dd 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journalformat.h
+++ b/src/mongo/db/storage/mmap_v1/dur_journalformat.h
@@ -37,155 +37,181 @@
namespace mongo {
- namespace dur {
+namespace dur {
- const unsigned Alignment = 8192;
+const unsigned Alignment = 8192;
#pragma pack(1)
- /** beginning header for a journal/j._<n> file
- there is nothing important int this header at this time. except perhaps version #.
- */
- struct JHeader {
- JHeader() { }
- JHeader(std::string fname);
+/** beginning header for a journal/j._<n> file
+ there is nothing important int this header at this time. except perhaps version #.
+*/
+struct JHeader {
+ JHeader() {}
+ JHeader(std::string fname);
- char magic[2]; // "j\n". j means journal, then a linefeed, fwiw if you were to run "less" on the file or something...
+ char magic
+ [2]; // "j\n". j means journal, then a linefeed, fwiw if you were to run "less" on the file or something...
- // x4142 is asci--readable if you look at the file with head/less -- thus the starting values were near
- // that. simply incrementing the version # is safe on a fwd basis.
+// x4142 is asci--readable if you look at the file with head/less -- thus the starting values were near
+// that. simply incrementing the version # is safe on a fwd basis.
#if defined(_NOCOMPRESS)
- enum { CurrentVersion = 0x4148 };
+ enum { CurrentVersion = 0x4148 };
#else
- enum { CurrentVersion = 0x4149 };
+ enum { CurrentVersion = 0x4149 };
#endif
- unsigned short _version;
-
- // these are just for diagnostic ease (make header more useful as plain text)
- char n1; // '\n'
- char ts[20]; // ascii timestamp of file generation. for user reading, not used by code.
- char n2; // '\n'
- char dbpath[128]; // path/filename of this file for human reading and diagnostics. not used by code.
- char n3, n4; // '\n', '\n'
-
- unsigned long long fileId; // unique identifier that will be in each JSectHeader. important as we recycle prealloced files
-
- char reserved3[8026]; // 8KB total for the file header
- char txt2[2]; // "\n\n" at the end
-
- bool versionOk() const { return _version == CurrentVersion; }
- bool valid() const { return magic[0] == 'j' && txt2[1] == '\n' && fileId; }
- };
-
- /** "Section" header. A section corresponds to a group commit.
- len is length of the entire section including header and footer.
- header and footer are not compressed, just the stuff in between.
- */
- struct JSectHeader {
- private:
- unsigned _sectionLen; // unpadded length in bytes of the whole section
- public:
- unsigned long long seqNumber; // sequence number that can be used on recovery to not do too much work
- unsigned long long fileId; // matches JHeader::fileId
- unsigned sectionLen() const { return _sectionLen; }
-
- // we store the unpadded length so we can use that when we uncompress. to
- // get the true total size this must be rounded up to the Alignment.
- void setSectionLen(unsigned lenUnpadded) { _sectionLen = lenUnpadded; }
-
- unsigned sectionLenWithPadding() const {
- unsigned x = (sectionLen() + (Alignment-1)) & (~(Alignment-1));
- dassert( x % Alignment == 0 );
- return x;
- }
- };
-
- /** an individual write operation within a group commit section. Either the entire section should
- be applied, or nothing. (We check the md5 for the whole section before doing anything on recovery.)
- */
- struct JEntry {
- enum OpCodes {
- OpCode_Footer = 0xffffffff,
- OpCode_DbContext = 0xfffffffe,
- OpCode_FileCreated = 0xfffffffd,
- OpCode_DropDb = 0xfffffffc,
- OpCode_Min = 0xfffff000
- };
- union {
- unsigned len; // length in bytes of the data of the JEntry. does not include the JEntry header
- OpCodes opcode;
- };
-
- unsigned ofs; // offset in file
-
- // sentinel and masks for _fileNo
- enum {
- DotNsSuffix = 0x7fffffff, // ".ns" file
- LocalDbBit = 0x80000000 // assuming "local" db instead of using the JDbContext
- };
- int _fileNo; // high bit is set to indicate it should be the <dbpath>/local database
- // char data[len] follows
-
- const char * srcData() const {
- const int *i = &_fileNo;
- return (const char *) (i+1);
- }
-
- int getFileNo() const { return _fileNo & (~LocalDbBit); }
- void setFileNo(int f) { _fileNo = f; }
- bool isNsSuffix() const { return getFileNo() == DotNsSuffix; }
-
- void setLocalDbContextBit() { _fileNo |= LocalDbBit; }
- bool isLocalDbContext() const { return _fileNo & LocalDbBit; }
- void clearLocalDbContextBit() { _fileNo = getFileNo(); }
-
- static std::string suffix(int fileno) {
- if( fileno == DotNsSuffix ) return "ns";
- std::stringstream ss;
- ss << fileno;
- return ss.str();
- }
- };
-
- /** group commit section footer. md5 is a key field. */
- struct JSectFooter {
- JSectFooter();
- JSectFooter(const void* begin, int len); // needs buffer to compute hash
- unsigned sentinel;
- unsigned char hash[16];
- unsigned long long reserved;
- char magic[4]; // "\n\n\n\n"
-
- /** used by recovery to see if buffer is valid
- @param begin the buffer
- @param len buffer len
- @return true if buffer looks valid
- */
- bool checkHash(const void* begin, int len) const;
-
- bool magicOk() const { return *((unsigned*)magic) == 0x0a0a0a0a; }
- };
-
- /** declares "the next entry(s) are for this database / file path prefix" */
- struct JDbContext {
- JDbContext() : sentinel(JEntry::OpCode_DbContext) { }
- const unsigned sentinel; // compare to JEntry::len -- zero is our sentinel
- //char dbname[];
- };
-
- /** "last sequence number" */
- struct LSNFile {
- unsigned ver;
- unsigned reserved2;
- unsigned long long lsn;
- unsigned long long checkbytes;
- unsigned long long reserved[8];
-
- void set(unsigned long long lsn);
- unsigned long long get();
- };
+ unsigned short _version;
-#pragma pack()
+ // these are just for diagnostic ease (make header more useful as plain text)
+ char n1; // '\n'
+ char ts[20]; // ascii timestamp of file generation. for user reading, not used by code.
+ char n2; // '\n'
+ char dbpath
+ [128]; // path/filename of this file for human reading and diagnostics. not used by code.
+ char n3, n4; // '\n', '\n'
+
+ unsigned long long
+ fileId; // unique identifier that will be in each JSectHeader. important as we recycle prealloced files
+
+ char reserved3[8026]; // 8KB total for the file header
+ char txt2[2]; // "\n\n" at the end
+
+ bool versionOk() const {
+ return _version == CurrentVersion;
+ }
+ bool valid() const {
+ return magic[0] == 'j' && txt2[1] == '\n' && fileId;
+ }
+};
+
+/** "Section" header. A section corresponds to a group commit.
+ len is length of the entire section including header and footer.
+ header and footer are not compressed, just the stuff in between.
+*/
+struct JSectHeader {
+private:
+ unsigned _sectionLen; // unpadded length in bytes of the whole section
+public:
+ unsigned long long
+ seqNumber; // sequence number that can be used on recovery to not do too much work
+ unsigned long long fileId; // matches JHeader::fileId
+ unsigned sectionLen() const {
+ return _sectionLen;
+ }
+
+ // we store the unpadded length so we can use that when we uncompress. to
+ // get the true total size this must be rounded up to the Alignment.
+ void setSectionLen(unsigned lenUnpadded) {
+ _sectionLen = lenUnpadded;
+ }
+
+ unsigned sectionLenWithPadding() const {
+ unsigned x = (sectionLen() + (Alignment - 1)) & (~(Alignment - 1));
+ dassert(x % Alignment == 0);
+ return x;
+ }
+};
+/** an individual write operation within a group commit section. Either the entire section should
+ be applied, or nothing. (We check the md5 for the whole section before doing anything on recovery.)
+*/
+struct JEntry {
+ enum OpCodes {
+ OpCode_Footer = 0xffffffff,
+ OpCode_DbContext = 0xfffffffe,
+ OpCode_FileCreated = 0xfffffffd,
+ OpCode_DropDb = 0xfffffffc,
+ OpCode_Min = 0xfffff000
+ };
+ union {
+ unsigned
+ len; // length in bytes of the data of the JEntry. does not include the JEntry header
+ OpCodes opcode;
+ };
+
+ unsigned ofs; // offset in file
+
+ // sentinel and masks for _fileNo
+ enum {
+ DotNsSuffix = 0x7fffffff, // ".ns" file
+ LocalDbBit = 0x80000000 // assuming "local" db instead of using the JDbContext
+ };
+ int _fileNo; // high bit is set to indicate it should be the <dbpath>/local database
+ // char data[len] follows
+
+ const char* srcData() const {
+ const int* i = &_fileNo;
+ return (const char*)(i + 1);
+ }
+
+ int getFileNo() const {
+ return _fileNo & (~LocalDbBit);
+ }
+ void setFileNo(int f) {
+ _fileNo = f;
+ }
+ bool isNsSuffix() const {
+ return getFileNo() == DotNsSuffix;
+ }
+
+ void setLocalDbContextBit() {
+ _fileNo |= LocalDbBit;
+ }
+ bool isLocalDbContext() const {
+ return _fileNo & LocalDbBit;
+ }
+ void clearLocalDbContextBit() {
+ _fileNo = getFileNo();
}
+ static std::string suffix(int fileno) {
+ if (fileno == DotNsSuffix)
+ return "ns";
+ std::stringstream ss;
+ ss << fileno;
+ return ss.str();
+ }
+};
+
+/** group commit section footer. md5 is a key field. */
+struct JSectFooter {
+ JSectFooter();
+ JSectFooter(const void* begin, int len); // needs buffer to compute hash
+ unsigned sentinel;
+ unsigned char hash[16];
+ unsigned long long reserved;
+ char magic[4]; // "\n\n\n\n"
+
+ /** used by recovery to see if buffer is valid
+ @param begin the buffer
+ @param len buffer len
+ @return true if buffer looks valid
+ */
+ bool checkHash(const void* begin, int len) const;
+
+ bool magicOk() const {
+ return *((unsigned*)magic) == 0x0a0a0a0a;
+ }
+};
+
+/** declares "the next entry(s) are for this database / file path prefix" */
+struct JDbContext {
+ JDbContext() : sentinel(JEntry::OpCode_DbContext) {}
+ const unsigned sentinel; // compare to JEntry::len -- zero is our sentinel
+ // char dbname[];
+};
+
+/** "last sequence number" */
+struct LSNFile {
+ unsigned ver;
+ unsigned reserved2;
+ unsigned long long lsn;
+ unsigned long long checkbytes;
+ unsigned long long reserved[8];
+
+ void set(unsigned long long lsn);
+ unsigned long long get();
+};
+
+#pragma pack()
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/dur_journalimpl.h b/src/mongo/db/storage/mmap_v1/dur_journalimpl.h
index 365f38aec71..86a2d19de97 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journalimpl.h
+++ b/src/mongo/db/storage/mmap_v1/dur_journalimpl.h
@@ -34,80 +34,84 @@
#include "mongo/db/storage/mmap_v1/logfile.h"
namespace mongo {
- namespace dur {
+namespace dur {
- /** the writeahead journal for durability */
- class Journal {
- public:
- std::string dir; // set by journalMakeDir() during initialization
+/** the writeahead journal for durability */
+class Journal {
+public:
+ std::string dir; // set by journalMakeDir() during initialization
- Journal();
+ Journal();
- /** call during startup by journalMakeDir() */
- void init();
+ /** call during startup by journalMakeDir() */
+ void init();
- /** check if time to rotate files. assure a file is open.
- done separately from the journal() call as we can do this part
- outside of lock.
- thread: durThread()
- */
- void rotate();
+ /** check if time to rotate files. assure a file is open.
+ done separately from the journal() call as we can do this part
+ outside of lock.
+ thread: durThread()
+ */
+ void rotate();
- /** append to the journal file
- */
- void journal(const JSectHeader& h, const AlignedBuilder& b);
+ /** append to the journal file
+ */
+ void journal(const JSectHeader& h, const AlignedBuilder& b);
- boost::filesystem::path getFilePathFor(int filenumber) const;
+ boost::filesystem::path getFilePathFor(int filenumber) const;
- unsigned long long lastFlushTime() const { return _lastFlushTime; }
- void cleanup(bool log); // closes and removes journal files
-
- unsigned long long curFileId() const { return _curFileId; }
-
- void assureLogFileOpen() {
- stdx::lock_guard<SimpleMutex> lk(_curLogFileMutex);
- if( _curLogFile == 0 )
- _open();
- }
-
- /** open a journal file to journal operations to. */
- void open();
-
- private:
- /** check if time to rotate files. assure a file is open.
- * internally called with every commit
- */
- void _rotate();
-
- void _open();
- void closeCurrentJournalFile();
- void removeUnneededJournalFiles();
-
- unsigned long long _written; // bytes written so far to the current journal (log) file
- unsigned _nextFileNumber;
-
- SimpleMutex _curLogFileMutex;
-
- LogFile *_curLogFile; // use _curLogFileMutex
- unsigned long long _curFileId; // current file id see JHeader::fileId
-
- struct JFile {
- std::string filename;
- unsigned long long lastEventTimeMs;
- };
-
- // files which have been closed but not unlinked (rotated out) yet
- // ordered oldest to newest
- std::list<JFile> _oldJournalFiles; // use _curLogFileMutex
+ unsigned long long lastFlushTime() const {
+ return _lastFlushTime;
+ }
+ void cleanup(bool log); // closes and removes journal files
- // lsn related
- static void preFlush();
- static void postFlush();
- unsigned long long _preFlushTime;
- unsigned long long _lastFlushTime; // data < this time is fsynced in the datafiles (unless hard drive controller is caching)
- bool _writeToLSNNeeded;
- void updateLSNFile();
- };
+ unsigned long long curFileId() const {
+ return _curFileId;
+ }
+ void assureLogFileOpen() {
+ stdx::lock_guard<SimpleMutex> lk(_curLogFileMutex);
+ if (_curLogFile == 0)
+ _open();
}
+
+ /** open a journal file to journal operations to. */
+ void open();
+
+private:
+ /** check if time to rotate files. assure a file is open.
+ * internally called with every commit
+ */
+ void _rotate();
+
+ void _open();
+ void closeCurrentJournalFile();
+ void removeUnneededJournalFiles();
+
+ unsigned long long _written; // bytes written so far to the current journal (log) file
+ unsigned _nextFileNumber;
+
+ SimpleMutex _curLogFileMutex;
+
+ LogFile* _curLogFile; // use _curLogFileMutex
+ unsigned long long _curFileId; // current file id see JHeader::fileId
+
+ struct JFile {
+ std::string filename;
+ unsigned long long lastEventTimeMs;
+ };
+
+ // files which have been closed but not unlinked (rotated out) yet
+ // ordered oldest to newest
+ std::list<JFile> _oldJournalFiles; // use _curLogFileMutex
+
+ // lsn related
+ static void preFlush();
+ static void postFlush();
+ unsigned long long _preFlushTime;
+ unsigned long long
+ _lastFlushTime; // data < this time is fsynced in the datafiles (unless hard drive controller is caching)
+ bool _writeToLSNNeeded;
+ void updateLSNFile();
+};
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
index 171254eb946..dc9d7fb2b7a 100644
--- a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
@@ -53,152 +53,147 @@
namespace mongo {
- using std::endl;
- using std::min;
- using std::stringstream;
+using std::endl;
+using std::min;
+using std::stringstream;
- namespace dur {
+namespace dur {
- extern Journal j;
- extern CommitJob commitJob;
+extern Journal j;
+extern CommitJob commitJob;
- const RelativePath local = RelativePath::fromRelativePath("local");
+const RelativePath local = RelativePath::fromRelativePath("local");
- static DurableMappedFile* findMMF_inlock(void *ptr, size_t &ofs) {
- DurableMappedFile *f = privateViews.find_inlock(ptr, ofs);
- if( f == 0 ) {
- error() << "findMMF_inlock failed " << privateViews.numberOfViews_inlock() << endl;
- printStackTrace(); // we want a stack trace and the assert below didn't print a trace once in the real world - not sure why
- stringstream ss;
- ss << "view pointer cannot be resolved " << std::hex << (size_t) ptr;
- journalingFailure(ss.str().c_str()); // asserts, which then abends
- }
- return f;
- }
-
- /** put the basic write operation into the buffer (bb) to be journaled */
- static void prepBasicWrite_inlock(AlignedBuilder&bb, const WriteIntent *i, RelativePath& lastDbPath) {
- size_t ofs = 1;
- DurableMappedFile *mmf = findMMF_inlock(i->start(), /*out*/ofs);
+static DurableMappedFile* findMMF_inlock(void* ptr, size_t& ofs) {
+ DurableMappedFile* f = privateViews.find_inlock(ptr, ofs);
+ if (f == 0) {
+ error() << "findMMF_inlock failed " << privateViews.numberOfViews_inlock() << endl;
+ printStackTrace(); // we want a stack trace and the assert below didn't print a trace once in the real world - not sure why
+ stringstream ss;
+ ss << "view pointer cannot be resolved " << std::hex << (size_t)ptr;
+ journalingFailure(ss.str().c_str()); // asserts, which then abends
+ }
+ return f;
+}
- if( MONGO_unlikely(!mmf->willNeedRemap()) ) {
- // tag this mmf as needed a remap of its private view later.
- // usually it will already be dirty/already set, so we do the if above first
- // to avoid possibility of cpu cache line contention
- mmf->setWillNeedRemap();
- }
+/** put the basic write operation into the buffer (bb) to be journaled */
+static void prepBasicWrite_inlock(AlignedBuilder& bb,
+ const WriteIntent* i,
+ RelativePath& lastDbPath) {
+ size_t ofs = 1;
+ DurableMappedFile* mmf = findMMF_inlock(i->start(), /*out*/ ofs);
+
+ if (MONGO_unlikely(!mmf->willNeedRemap())) {
+ // tag this mmf as needed a remap of its private view later.
+ // usually it will already be dirty/already set, so we do the if above first
+ // to avoid possibility of cpu cache line contention
+ mmf->setWillNeedRemap();
+ }
- // since we have already looked up the mmf, we go ahead and remember the write view location
- // so we don't have to find the DurableMappedFile again later in WRITETODATAFILES()
- //
- // this was for WRITETODATAFILES_Impl2 so commented out now
- //
- /*
- dassert( i->w_ptr == 0 );
- i->w_ptr = ((char*)mmf->view_write()) + ofs;
- */
-
- JEntry e;
- e.len = min(i->length(), (unsigned)(mmf->length() - ofs)); //don't write past end of file
- verify( ofs <= 0x80000000 );
- e.ofs = (unsigned) ofs;
- e.setFileNo( mmf->fileSuffixNo() );
-
- if( mmf->relativePath() == local ) {
- e.setLocalDbContextBit();
- }
- else if( mmf->relativePath() != lastDbPath ) {
- lastDbPath = mmf->relativePath();
- JDbContext c;
- bb.appendStruct(c);
- bb.appendStr(lastDbPath.toString());
- }
+ // since we have already looked up the mmf, we go ahead and remember the write view location
+ // so we don't have to find the DurableMappedFile again later in WRITETODATAFILES()
+ //
+ // this was for WRITETODATAFILES_Impl2 so commented out now
+ //
+ /*
+ dassert( i->w_ptr == 0 );
+ i->w_ptr = ((char*)mmf->view_write()) + ofs;
+ */
+
+ JEntry e;
+ e.len = min(i->length(), (unsigned)(mmf->length() - ofs)); // don't write past end of file
+ verify(ofs <= 0x80000000);
+ e.ofs = (unsigned)ofs;
+ e.setFileNo(mmf->fileSuffixNo());
+
+ if (mmf->relativePath() == local) {
+ e.setLocalDbContextBit();
+ } else if (mmf->relativePath() != lastDbPath) {
+ lastDbPath = mmf->relativePath();
+ JDbContext c;
+ bb.appendStruct(c);
+ bb.appendStr(lastDbPath.toString());
+ }
- bb.appendStruct(e);
- bb.appendBuf(i->start(), e.len);
+ bb.appendStruct(e);
+ bb.appendBuf(i->start(), e.len);
- if (MONGO_unlikely(e.len != (unsigned)i->length())) {
- log() << "journal info splitting prepBasicWrite at boundary" << endl;
+ if (MONGO_unlikely(e.len != (unsigned)i->length())) {
+ log() << "journal info splitting prepBasicWrite at boundary" << endl;
- // This only happens if we write to the last byte in a file and
- // the fist byte in another file that is mapped adjacently. I
- // think most OSs leave at least a one page gap between
- // mappings, but better to be safe.
+ // This only happens if we write to the last byte in a file and
+ // the fist byte in another file that is mapped adjacently. I
+ // think most OSs leave at least a one page gap between
+ // mappings, but better to be safe.
- WriteIntent next ((char*)i->start() + e.len, i->length() - e.len);
- prepBasicWrite_inlock(bb, &next, lastDbPath);
- }
- }
+ WriteIntent next((char*)i->start() + e.len, i->length() - e.len);
+ prepBasicWrite_inlock(bb, &next, lastDbPath);
+ }
+}
- /** basic write ops / write intents. note there is no particular order to these : if we have
- two writes to the same location during the group commit interval, it is likely
- (although not assured) that it is journaled here once.
- */
- static void prepBasicWrites(AlignedBuilder& bb, const std::vector<WriteIntent>& intents) {
- stdx::lock_guard<stdx::mutex> lk(privateViews._mutex());
-
- // Each time write intents switch to a different database we journal a JDbContext.
- // Switches will be rare as we sort by memory location first and we batch commit.
- RelativePath lastDbPath;
-
- invariant(!intents.empty());
-
- WriteIntent last;
- for (std::vector<WriteIntent>::const_iterator i = intents.begin();
- i != intents.end();
- i++) {
-
- if( i->start() < last.end() ) {
- // overlaps
- last.absorb(*i);
- }
- else {
- // discontinuous
- if (i != intents.begin()) {
- prepBasicWrite_inlock(bb, &last, lastDbPath);
- }
-
- last = *i;
- }
+/** basic write ops / write intents. note there is no particular order to these : if we have
+ two writes to the same location during the group commit interval, it is likely
+ (although not assured) that it is journaled here once.
+*/
+static void prepBasicWrites(AlignedBuilder& bb, const std::vector<WriteIntent>& intents) {
+ stdx::lock_guard<stdx::mutex> lk(privateViews._mutex());
+
+ // Each time write intents switch to a different database we journal a JDbContext.
+ // Switches will be rare as we sort by memory location first and we batch commit.
+ RelativePath lastDbPath;
+
+ invariant(!intents.empty());
+
+ WriteIntent last;
+ for (std::vector<WriteIntent>::const_iterator i = intents.begin(); i != intents.end(); i++) {
+ if (i->start() < last.end()) {
+ // overlaps
+ last.absorb(*i);
+ } else {
+ // discontinuous
+ if (i != intents.begin()) {
+ prepBasicWrite_inlock(bb, &last, lastDbPath);
}
- prepBasicWrite_inlock(bb, &last, lastDbPath);
+ last = *i;
}
+ }
- /** we will build an output buffer ourself and then use O_DIRECT
- we could be in read lock for this
- caller handles locking
- @return partially populated sectheader and _ab set
- */
- static void _PREPLOGBUFFER(JSectHeader& h, AlignedBuilder& bb) {
- // Add the JSectHeader
-
- // Invalidate the total length, we will fill it in later.
- h.setSectionLen(0xffffffff);
- h.seqNumber = getLastDataFileFlushTime();
- h.fileId = j.curFileId();
-
- // Ops other than basic writes (DurOp's) go first
- const std::vector<std::shared_ptr<DurOp> >& durOps = commitJob.ops();
- for (std::vector<std::shared_ptr<DurOp> >::const_iterator i = durOps.begin();
- i != durOps.end();
- i++) {
-
- (*i)->serialize(bb);
- }
+ prepBasicWrite_inlock(bb, &last, lastDbPath);
+}
- // Write intents
- const std::vector<WriteIntent>& intents = commitJob.getIntentsSorted();
- if (!intents.empty()) {
- prepBasicWrites(bb, intents);
- }
- }
+/** we will build an output buffer ourself and then use O_DIRECT
+ we could be in read lock for this
+ caller handles locking
+ @return partially populated sectheader and _ab set
+*/
+static void _PREPLOGBUFFER(JSectHeader& h, AlignedBuilder& bb) {
+ // Add the JSectHeader
+
+ // Invalidate the total length, we will fill it in later.
+ h.setSectionLen(0xffffffff);
+ h.seqNumber = getLastDataFileFlushTime();
+ h.fileId = j.curFileId();
+
+ // Ops other than basic writes (DurOp's) go first
+ const std::vector<std::shared_ptr<DurOp>>& durOps = commitJob.ops();
+ for (std::vector<std::shared_ptr<DurOp>>::const_iterator i = durOps.begin(); i != durOps.end();
+ i++) {
+ (*i)->serialize(bb);
+ }
- void PREPLOGBUFFER(/*out*/ JSectHeader& outHeader, AlignedBuilder& outBuffer) {
- Timer t;
- j.assureLogFileOpen(); // so fileId is set
- _PREPLOGBUFFER(outHeader, outBuffer);
- stats.curr()->_prepLogBufferMicros += t.micros();
- }
+ // Write intents
+ const std::vector<WriteIntent>& intents = commitJob.getIntentsSorted();
+ if (!intents.empty()) {
+ prepBasicWrites(bb, intents);
}
}
+
+void PREPLOGBUFFER(/*out*/ JSectHeader& outHeader, AlignedBuilder& outBuffer) {
+ Timer t;
+ j.assureLogFileOpen(); // so fileId is set
+ _PREPLOGBUFFER(outHeader, outBuffer);
+ stats.curr()->_prepLogBufferMicros += t.micros();
+}
+}
+}
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index bfd023affab..a6958ad1aec 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -58,571 +58,564 @@
namespace mongo {
- using std::shared_ptr;
- using std::unique_ptr;
- using std::endl;
- using std::hex;
- using std::map;
- using std::pair;
- using std::setw;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- /**
- * Thrown when a journal section is corrupt. This is considered OK as long as it occurs while
- * processing the last file. Processing stops at the first corrupt section.
- *
- * Any logging about the nature of the corruption should happen before throwing as this class
- * contains no data.
- */
- class JournalSectionCorruptException {};
+using std::shared_ptr;
+using std::unique_ptr;
+using std::endl;
+using std::hex;
+using std::map;
+using std::pair;
+using std::setw;
+using std::string;
+using std::stringstream;
+using std::vector;
- namespace dur {
+/**
+ * Thrown when a journal section is corrupt. This is considered OK as long as it occurs while
+ * processing the last file. Processing stops at the first corrupt section.
+ *
+ * Any logging about the nature of the corruption should happen before throwing as this class
+ * contains no data.
+ */
+class JournalSectionCorruptException {};
- // The singleton recovery job object
- RecoveryJob& RecoveryJob::_instance = *(new RecoveryJob());
+namespace dur {
+// The singleton recovery job object
+RecoveryJob& RecoveryJob::_instance = *(new RecoveryJob());
- void removeJournalFiles();
- boost::filesystem::path getJournalDir();
+void removeJournalFiles();
+boost::filesystem::path getJournalDir();
- struct ParsedJournalEntry { /*copyable*/
- ParsedJournalEntry() : e(0) { }
- // relative path of database for the operation.
- // might be a pointer into mmaped Journal file
- const char *dbName;
+struct ParsedJournalEntry {/*copyable*/
+ ParsedJournalEntry() : e(0) {}
- // those are pointers into the memory mapped journal file
- const JEntry *e; // local db sentinel is already parsed out here into dbName
+ // relative path of database for the operation.
+ // might be a pointer into mmaped Journal file
+ const char* dbName;
- // if not one of the two simple JEntry's above, this is the operation:
- std::shared_ptr<DurOp> op;
- };
+ // those are pointers into the memory mapped journal file
+ const JEntry* e; // local db sentinel is already parsed out here into dbName
+ // if not one of the two simple JEntry's above, this is the operation:
+ std::shared_ptr<DurOp> op;
+};
- /**
- * Get journal filenames, in order. Throws if unexpected content found.
- */
- static void getFiles(boost::filesystem::path dir, vector<boost::filesystem::path>& files) {
- map<unsigned,boost::filesystem::path> m;
- for ( boost::filesystem::directory_iterator i( dir );
- i != boost::filesystem::directory_iterator();
- ++i ) {
- boost::filesystem::path filepath = *i;
- string fileName = boost::filesystem::path(*i).leaf().string();
- if( str::startsWith(fileName, "j._") ) {
- unsigned u = str::toUnsigned( str::after(fileName, '_') );
- if( m.count(u) ) {
- uasserted(13531, str::stream() << "unexpected files in journal directory " << dir.string() << " : " << fileName);
- }
- m.insert( pair<unsigned,boost::filesystem::path>(u,filepath) );
- }
- }
- for( map<unsigned,boost::filesystem::path>::iterator i = m.begin(); i != m.end(); ++i ) {
- if( i != m.begin() && m.count(i->first - 1) == 0 ) {
- uasserted(13532,
- str::stream() << "unexpected file in journal directory " << dir.string()
- << " : " << boost::filesystem::path(i->second).leaf().string() << " : can't find its preceding file");
- }
- files.push_back(i->second);
+
+/**
+ * Get journal filenames, in order. Throws if unexpected content found.
+ */
+static void getFiles(boost::filesystem::path dir, vector<boost::filesystem::path>& files) {
+ map<unsigned, boost::filesystem::path> m;
+ for (boost::filesystem::directory_iterator i(dir); i != boost::filesystem::directory_iterator();
+ ++i) {
+ boost::filesystem::path filepath = *i;
+ string fileName = boost::filesystem::path(*i).leaf().string();
+ if (str::startsWith(fileName, "j._")) {
+ unsigned u = str::toUnsigned(str::after(fileName, '_'));
+ if (m.count(u)) {
+ uasserted(13531,
+ str::stream() << "unexpected files in journal directory " << dir.string()
+ << " : " << fileName);
}
+ m.insert(pair<unsigned, boost::filesystem::path>(u, filepath));
}
+ }
+ for (map<unsigned, boost::filesystem::path>::iterator i = m.begin(); i != m.end(); ++i) {
+ if (i != m.begin() && m.count(i->first - 1) == 0) {
+ uasserted(13532,
+ str::stream() << "unexpected file in journal directory " << dir.string()
+ << " : " << boost::filesystem::path(i->second).leaf().string()
+ << " : can't find its preceding file");
+ }
+ files.push_back(i->second);
+ }
+}
- /** read through the memory mapped data of a journal file (journal/j._<n> file)
- throws
- */
- class JournalSectionIterator {
- MONGO_DISALLOW_COPYING(JournalSectionIterator);
- public:
- JournalSectionIterator(const JSectHeader& h,
- const void *compressed,
- unsigned compressedLen,
- bool doDurOpsRecovering)
- : _h(h),
- _lastDbName(0),
- _doDurOps(doDurOpsRecovering) {
-
- verify(doDurOpsRecovering);
-
- if (!uncompress((const char *)compressed, compressedLen, &_uncompressed)) {
- // We check the checksum before we uncompress, but this may still fail as the
- // checksum isn't foolproof.
- log() << "couldn't uncompress journal section" << endl;
- throw JournalSectionCorruptException();
- }
-
- const char *p = _uncompressed.c_str();
- verify(compressedLen == _h.sectionLen() - sizeof(JSectFooter) - sizeof(JSectHeader));
-
- _entries = unique_ptr<BufReader>(new BufReader(p, _uncompressed.size()));
- }
+/** read through the memory mapped data of a journal file (journal/j._<n> file)
+ throws
+*/
+class JournalSectionIterator {
+ MONGO_DISALLOW_COPYING(JournalSectionIterator);
+
+public:
+ JournalSectionIterator(const JSectHeader& h,
+ const void* compressed,
+ unsigned compressedLen,
+ bool doDurOpsRecovering)
+ : _h(h), _lastDbName(0), _doDurOps(doDurOpsRecovering) {
+ verify(doDurOpsRecovering);
+
+ if (!uncompress((const char*)compressed, compressedLen, &_uncompressed)) {
+ // We check the checksum before we uncompress, but this may still fail as the
+ // checksum isn't foolproof.
+ log() << "couldn't uncompress journal section" << endl;
+ throw JournalSectionCorruptException();
+ }
- // We work with the uncompressed buffer when doing a WRITETODATAFILES (for speed)
- JournalSectionIterator(const JSectHeader &h, const void *p, unsigned len)
- : _entries(new BufReader((const char *)p, len)),
- _h(h),
- _lastDbName(0),
- _doDurOps(false) {
+ const char* p = _uncompressed.c_str();
+ verify(compressedLen == _h.sectionLen() - sizeof(JSectFooter) - sizeof(JSectHeader));
- }
+ _entries = unique_ptr<BufReader>(new BufReader(p, _uncompressed.size()));
+ }
- bool atEof() const { return _entries->atEof(); }
+ // We work with the uncompressed buffer when doing a WRITETODATAFILES (for speed)
+ JournalSectionIterator(const JSectHeader& h, const void* p, unsigned len)
+ : _entries(new BufReader((const char*)p, len)), _h(h), _lastDbName(0), _doDurOps(false) {}
- unsigned long long seqNumber() const { return _h.seqNumber; }
+ bool atEof() const {
+ return _entries->atEof();
+ }
- /** get the next entry from the log. this function parses and combines JDbContext and JEntry's.
- * throws on premature end of section.
- */
- void next(ParsedJournalEntry& e) {
- unsigned lenOrOpCode;
- _entries->read(lenOrOpCode);
+ unsigned long long seqNumber() const {
+ return _h.seqNumber;
+ }
- if (lenOrOpCode > JEntry::OpCode_Min) {
- switch( lenOrOpCode ) {
+ /** get the next entry from the log. this function parses and combines JDbContext and JEntry's.
+ * throws on premature end of section.
+ */
+ void next(ParsedJournalEntry& e) {
+ unsigned lenOrOpCode;
+ _entries->read(lenOrOpCode);
- case JEntry::OpCode_Footer: {
- verify( false );
- }
+ if (lenOrOpCode > JEntry::OpCode_Min) {
+ switch (lenOrOpCode) {
+ case JEntry::OpCode_Footer: {
+ verify(false);
+ }
- case JEntry::OpCode_FileCreated:
- case JEntry::OpCode_DropDb: {
- e.dbName = 0;
- std::shared_ptr<DurOp> op = DurOp::read(lenOrOpCode, *_entries);
- if (_doDurOps) {
- e.op = op;
- }
- return;
+ case JEntry::OpCode_FileCreated:
+ case JEntry::OpCode_DropDb: {
+ e.dbName = 0;
+ std::shared_ptr<DurOp> op = DurOp::read(lenOrOpCode, *_entries);
+ if (_doDurOps) {
+ e.op = op;
}
+ return;
+ }
- case JEntry::OpCode_DbContext: {
- _lastDbName = (const char*) _entries->pos();
- const unsigned limit = _entries->remaining();
- const unsigned len = strnlen(_lastDbName, limit);
- if (_lastDbName[len] != '\0') {
- log() << "problem processing journal file during recovery";
- throw JournalSectionCorruptException();
- }
-
- _entries->skip(len+1); // skip '\0' too
- _entries->read(lenOrOpCode); // read this for the fall through
+ case JEntry::OpCode_DbContext: {
+ _lastDbName = (const char*)_entries->pos();
+ const unsigned limit = _entries->remaining();
+ const unsigned len = strnlen(_lastDbName, limit);
+ if (_lastDbName[len] != '\0') {
+ log() << "problem processing journal file during recovery";
+ throw JournalSectionCorruptException();
}
- // fall through as a basic operation always follows jdbcontext, and we don't have anything to return yet
- default:
- // fall through
- ;
- }
+ _entries->skip(len + 1); // skip '\0' too
+ _entries->read(lenOrOpCode); // read this for the fall through
}
+ // fall through as a basic operation always follows jdbcontext, and we don't have anything to return yet
- // JEntry - a basic write
- verify( lenOrOpCode && lenOrOpCode < JEntry::OpCode_Min );
- _entries->rewind(4);
- e.e = (JEntry *) _entries->skip(sizeof(JEntry));
- e.dbName = e.e->isLocalDbContext() ? "local" : _lastDbName;
- verify( e.e->len == lenOrOpCode );
- _entries->skip(e.e->len);
+ default:
+ // fall through
+ ;
}
+ }
-
- private:
- unique_ptr<BufReader> _entries;
- const JSectHeader _h;
- const char *_lastDbName; // pointer into mmaped journal file
- const bool _doDurOps;
- string _uncompressed;
- };
-
-
- static string fileName(const char* dbName, int fileNo) {
+ // JEntry - a basic write
+ verify(lenOrOpCode && lenOrOpCode < JEntry::OpCode_Min);
+ _entries->rewind(4);
+ e.e = (JEntry*)_entries->skip(sizeof(JEntry));
+ e.dbName = e.e->isLocalDbContext() ? "local" : _lastDbName;
+ verify(e.e->len == lenOrOpCode);
+ _entries->skip(e.e->len);
+ }
+
+
+private:
+ unique_ptr<BufReader> _entries;
+ const JSectHeader _h;
+ const char* _lastDbName; // pointer into mmaped journal file
+ const bool _doDurOps;
+ string _uncompressed;
+};
+
+
+static string fileName(const char* dbName, int fileNo) {
+ stringstream ss;
+ ss << dbName << '.';
+ verify(fileNo >= 0);
+ if (fileNo == JEntry::DotNsSuffix)
+ ss << "ns";
+ else
+ ss << fileNo;
+
+ // relative name -> full path name
+ boost::filesystem::path full(storageGlobalParams.dbpath);
+ full /= ss.str();
+ return full.string();
+}
+
+
+RecoveryJob::RecoveryJob()
+ : _recovering(false), _lastDataSyncedFromLastRun(0), _lastSeqMentionedInConsoleLog(1) {}
+
+RecoveryJob::~RecoveryJob() {
+ DESTRUCTOR_GUARD(if (!_mmfs.empty()) {} close();)
+}
+
+void RecoveryJob::close() {
+ stdx::lock_guard<stdx::mutex> lk(_mx);
+ _close();
+}
+
+void RecoveryJob::_close() {
+ MongoFile::flushAll(true);
+ _mmfs.clear();
+}
+
+RecoveryJob::Last::Last() : mmf(NULL), fileNo(-1) {
+ // Make sure the files list does not change from underneath
+ LockMongoFilesShared::assertAtLeastReadLocked();
+}
+
+DurableMappedFile* RecoveryJob::Last::newEntry(const dur::ParsedJournalEntry& entry,
+ RecoveryJob& rj) {
+ int num = entry.e->getFileNo();
+ if (num == fileNo && entry.dbName == dbName)
+ return mmf;
+
+ string fn = fileName(entry.dbName, num);
+ MongoFile* file;
+ {
+ MongoFileFinder finder; // must release lock before creating new DurableMappedFile
+ file = finder.findByPath(fn);
+ }
+
+ if (file) {
+ verify(file->isDurableMappedFile());
+ mmf = (DurableMappedFile*)file;
+ } else {
+ if (!rj._recovering) {
+ log() << "journal error applying writes, file " << fn << " is not open" << endl;
+ verify(false);
+ }
+ std::shared_ptr<DurableMappedFile> sp(new DurableMappedFile);
+ verify(sp->open(fn, false));
+ rj._mmfs.push_back(sp);
+ mmf = sp.get();
+ }
+
+ // we do this last so that if an exception were thrown, there isn't any wrong memory
+ dbName = entry.dbName;
+ fileNo = num;
+ return mmf;
+}
+
+void RecoveryJob::write(Last& last, const ParsedJournalEntry& entry) {
+ // TODO(mathias): look into making some of these dasserts
+ verify(entry.e);
+ verify(entry.dbName);
+
+ DurableMappedFile* mmf = last.newEntry(entry, *this);
+
+ if ((entry.e->ofs + entry.e->len) <= mmf->length()) {
+ verify(mmf->view_write());
+ verify(entry.e->srcData());
+
+ void* dest = (char*)mmf->view_write() + entry.e->ofs;
+ memcpy(dest, entry.e->srcData(), entry.e->len);
+ stats.curr()->_writeToDataFilesBytes += entry.e->len;
+ } else {
+ massert(13622, "Trying to write past end of file in WRITETODATAFILES", _recovering);
+ }
+}
+
+void RecoveryJob::applyEntry(Last& last, const ParsedJournalEntry& entry, bool apply, bool dump) {
+ if (entry.e) {
+ if (dump) {
stringstream ss;
- ss << dbName << '.';
- verify( fileNo >= 0 );
- if( fileNo == JEntry::DotNsSuffix )
+ ss << " BASICWRITE " << setw(20) << entry.dbName << '.';
+ if (entry.e->isNsSuffix())
ss << "ns";
else
- ss << fileNo;
-
- // relative name -> full path name
- boost::filesystem::path full(storageGlobalParams.dbpath);
- full /= ss.str();
- return full.string();
- }
-
-
- RecoveryJob::RecoveryJob()
- : _recovering(false),
- _lastDataSyncedFromLastRun(0),
- _lastSeqMentionedInConsoleLog(1) {
-
- }
-
- RecoveryJob::~RecoveryJob() {
- DESTRUCTOR_GUARD(
- if (!_mmfs.empty()) {}
- close();
- )
+ ss << setw(2) << entry.e->getFileNo();
+ ss << ' ' << setw(6) << entry.e->len << ' '
+ << /*hex << setw(8) << (size_t) fqe.srcData << dec <<*/
+ " " << hexdump(entry.e->srcData(), entry.e->len);
+ log() << ss.str() << endl;
}
-
- void RecoveryJob::close() {
- stdx::lock_guard<stdx::mutex> lk(_mx);
- _close();
+ if (apply) {
+ write(last, entry);
}
-
- void RecoveryJob::_close() {
- MongoFile::flushAll(true);
- _mmfs.clear();
+ } else if (entry.op) {
+ // a DurOp subclass operation
+ if (dump) {
+ log() << " OP " << entry.op->toString() << endl;
}
-
- RecoveryJob::Last::Last() : mmf(NULL), fileNo(-1) {
- // Make sure the files list does not change from underneath
- LockMongoFilesShared::assertAtLeastReadLocked();
- }
-
- DurableMappedFile* RecoveryJob::Last::newEntry(const dur::ParsedJournalEntry& entry, RecoveryJob& rj) {
- int num = entry.e->getFileNo();
- if( num == fileNo && entry.dbName == dbName )
- return mmf;
-
- string fn = fileName(entry.dbName, num);
- MongoFile *file;
- {
- MongoFileFinder finder; // must release lock before creating new DurableMappedFile
- file = finder.findByPath(fn);
- }
-
- if (file) {
- verify(file->isDurableMappedFile());
- mmf = (DurableMappedFile*)file;
- }
- else {
- if( !rj._recovering ) {
- log() << "journal error applying writes, file " << fn << " is not open" << endl;
- verify(false);
- }
- std::shared_ptr<DurableMappedFile> sp (new DurableMappedFile);
- verify(sp->open(fn, false));
- rj._mmfs.push_back(sp);
- mmf = sp.get();
+ if (apply) {
+ if (entry.op->needFilesClosed()) {
+ _close(); // locked in processSection
}
-
- // we do this last so that if an exception were thrown, there isn't any wrong memory
- dbName = entry.dbName;
- fileNo = num;
- return mmf;
+ entry.op->replay();
}
-
- void RecoveryJob::write(Last& last, const ParsedJournalEntry& entry) {
- //TODO(mathias): look into making some of these dasserts
- verify(entry.e);
- verify(entry.dbName);
-
- DurableMappedFile *mmf = last.newEntry(entry, *this);
-
- if ((entry.e->ofs + entry.e->len) <= mmf->length()) {
- verify(mmf->view_write());
- verify(entry.e->srcData());
-
- void* dest = (char*)mmf->view_write() + entry.e->ofs;
- memcpy(dest, entry.e->srcData(), entry.e->len);
- stats.curr()->_writeToDataFilesBytes += entry.e->len;
- }
- else {
- massert(13622, "Trying to write past end of file in WRITETODATAFILES", _recovering);
- }
+ }
+}
+
+void RecoveryJob::applyEntries(const vector<ParsedJournalEntry>& entries) {
+ const bool apply = (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalScanOnly) == 0;
+ const bool dump = (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalDumpJournal);
+
+ if (dump) {
+ log() << "BEGIN section" << endl;
+ }
+
+ Last last;
+ for (vector<ParsedJournalEntry>::const_iterator i = entries.begin(); i != entries.end(); ++i) {
+ applyEntry(last, *i, apply, dump);
+ }
+
+ if (dump) {
+ log() << "END section" << endl;
+ }
+}
+
+void RecoveryJob::processSection(const JSectHeader* h,
+ const void* p,
+ unsigned len,
+ const JSectFooter* f) {
+ LockMongoFilesShared lkFiles; // for RecoveryJob::Last
+ stdx::lock_guard<stdx::mutex> lk(_mx);
+
+ // Check the footer checksum before doing anything else.
+ if (_recovering) {
+ verify(((const char*)h) + sizeof(JSectHeader) == p);
+ if (!f->checkHash(h, len + sizeof(JSectHeader))) {
+ log() << "journal section checksum doesn't match";
+ throw JournalSectionCorruptException();
}
-
- void RecoveryJob::applyEntry(Last& last, const ParsedJournalEntry& entry, bool apply, bool dump) {
- if( entry.e ) {
- if( dump ) {
- stringstream ss;
- ss << " BASICWRITE " << setw(20) << entry.dbName << '.';
- if( entry.e->isNsSuffix() )
- ss << "ns";
- else
- ss << setw(2) << entry.e->getFileNo();
- ss << ' ' << setw(6) << entry.e->len << ' ' << /*hex << setw(8) << (size_t) fqe.srcData << dec <<*/
- " " << hexdump(entry.e->srcData(), entry.e->len);
- log() << ss.str() << endl;
- }
- if( apply ) {
- write(last, entry);
- }
- }
- else if(entry.op) {
- // a DurOp subclass operation
- if( dump ) {
- log() << " OP " << entry.op->toString() << endl;
- }
- if( apply ) {
- if( entry.op->needFilesClosed() ) {
- _close(); // locked in processSection
- }
- entry.op->replay();
- }
+ }
+
+ if (_recovering && _lastDataSyncedFromLastRun > h->seqNumber + ExtraKeepTimeMs) {
+ if (h->seqNumber != _lastSeqMentionedInConsoleLog) {
+ static int n;
+ if (++n < 10) {
+ log() << "recover skipping application of section seq:" << h->seqNumber
+ << " < lsn:" << _lastDataSyncedFromLastRun << endl;
+ } else if (n == 10) {
+ log() << "recover skipping application of section more..." << endl;
}
+ _lastSeqMentionedInConsoleLog = h->seqNumber;
}
-
- void RecoveryJob::applyEntries(const vector<ParsedJournalEntry> &entries) {
- const bool apply =
- (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalScanOnly) == 0;
- const bool dump =
- (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalDumpJournal);
-
- if (dump) {
- log() << "BEGIN section" << endl;
- }
-
- Last last;
- for (vector<ParsedJournalEntry>::const_iterator i = entries.begin(); i != entries.end(); ++i) {
- applyEntry(last, *i, apply, dump);
- }
-
- if (dump) {
- log() << "END section" << endl;
- }
- }
-
- void RecoveryJob::processSection(const JSectHeader *h, const void *p, unsigned len, const JSectFooter *f) {
- LockMongoFilesShared lkFiles; // for RecoveryJob::Last
- stdx::lock_guard<stdx::mutex> lk(_mx);
-
- // Check the footer checksum before doing anything else.
- if (_recovering) {
- verify( ((const char *)h) + sizeof(JSectHeader) == p );
- if (!f->checkHash(h, len + sizeof(JSectHeader))) {
- log() << "journal section checksum doesn't match";
- throw JournalSectionCorruptException();
- }
- }
-
- if( _recovering && _lastDataSyncedFromLastRun > h->seqNumber + ExtraKeepTimeMs ) {
- if( h->seqNumber != _lastSeqMentionedInConsoleLog ) {
- static int n;
- if( ++n < 10 ) {
- log() << "recover skipping application of section seq:" << h->seqNumber << " < lsn:" << _lastDataSyncedFromLastRun << endl;
+ return;
+ }
+
+ unique_ptr<JournalSectionIterator> i;
+ if (_recovering) {
+ i = unique_ptr<JournalSectionIterator>(new JournalSectionIterator(*h, p, len, _recovering));
+ } else {
+ i = unique_ptr<JournalSectionIterator>(
+ new JournalSectionIterator(*h, /*after header*/ p, /*w/out header*/ len));
+ }
+
+ // we use a static so that we don't have to reallocate every time through. occasionally we
+ // go back to a small allocation so that if there were a spiky growth it won't stick forever.
+ static vector<ParsedJournalEntry> entries;
+ entries.clear();
+ /** TEMP uncomment
+ RARELY OCCASIONALLY {
+ if( entries.capacity() > 2048 ) {
+ entries.shrink_to_fit();
+ entries.reserve(2048);
}
- else if( n == 10 ) {
- log() << "recover skipping application of section more..." << endl;
- }
- _lastSeqMentionedInConsoleLog = h->seqNumber;
- }
- return;
- }
-
- unique_ptr<JournalSectionIterator> i;
- if( _recovering ) {
- i = unique_ptr<JournalSectionIterator>(new JournalSectionIterator(*h, p, len, _recovering));
- }
- else {
- i = unique_ptr<JournalSectionIterator>(new JournalSectionIterator(*h, /*after header*/p, /*w/out header*/len));
- }
-
- // we use a static so that we don't have to reallocate every time through. occasionally we
- // go back to a small allocation so that if there were a spiky growth it won't stick forever.
- static vector<ParsedJournalEntry> entries;
- entries.clear();
-/** TEMP uncomment
- RARELY OCCASIONALLY {
- if( entries.capacity() > 2048 ) {
- entries.shrink_to_fit();
- entries.reserve(2048);
}
- }
+ */
+
+ // first read all entries to make sure this section is valid
+ ParsedJournalEntry e;
+ while (!i->atEof()) {
+ i->next(e);
+ entries.push_back(e);
+ }
+
+ // got all the entries for one group commit. apply them:
+ applyEntries(entries);
+}
+
+/** apply a specific journal file, that is already mmap'd
+ @param p start of the memory mapped file
+ @return true if this is detected to be the last file (ends abruptly)
*/
-
- // first read all entries to make sure this section is valid
- ParsedJournalEntry e;
- while( !i->atEof() ) {
- i->next(e);
- entries.push_back(e);
+bool RecoveryJob::processFileBuffer(const void* p, unsigned len) {
+ try {
+ unsigned long long fileId;
+ BufReader br(p, len);
+
+ {
+ // read file header
+ JHeader h;
+ br.read(h);
+
+ if (!h.valid()) {
+ log() << "Journal file header invalid. This could indicate corruption, or "
+ << "an unclean shutdown while writing the first section in a journal "
+ << "file.";
+ throw JournalSectionCorruptException();
}
- // got all the entries for one group commit. apply them:
- applyEntries(entries);
- }
-
- /** apply a specific journal file, that is already mmap'd
- @param p start of the memory mapped file
- @return true if this is detected to be the last file (ends abruptly)
- */
- bool RecoveryJob::processFileBuffer(const void *p, unsigned len) {
- try {
- unsigned long long fileId;
- BufReader br(p,len);
-
- {
- // read file header
- JHeader h;
- br.read(h);
-
- if (!h.valid()) {
- log() << "Journal file header invalid. This could indicate corruption, or "
- << "an unclean shutdown while writing the first section in a journal "
- << "file.";
- throw JournalSectionCorruptException();
- }
-
- if( !h.versionOk() ) {
- log() << "journal file version number mismatch got:" << hex << h._version
- << " expected:" << hex << (unsigned) JHeader::CurrentVersion
- << ". if you have just upgraded, recover with old version of mongod, terminate cleanly, then upgrade."
- << endl;
- // Not using JournalSectionCurruptException as we don't want to ignore
- // journal files on upgrade.
- uasserted(13536, str::stream() << "journal version number mismatch " << h._version);
- }
- fileId = h.fileId;
- if (mmapv1GlobalOptions.journalOptions &
- MMAPV1Options::JournalDumpJournal) {
- log() << "JHeader::fileId=" << fileId << endl;
- }
- }
-
- // read sections
- while ( !br.atEof() ) {
- JSectHeader h;
- br.peek(h);
- if( h.fileId != fileId ) {
- if (kDebugBuild || (mmapv1GlobalOptions.journalOptions &
- MMAPV1Options::JournalDumpJournal)) {
- log() << "Ending processFileBuffer at differing fileId want:" << fileId << " got:" << h.fileId << endl;
- log() << " sect len:" << h.sectionLen() << " seqnum:" << h.seqNumber << endl;
- }
- return true;
- }
- unsigned slen = h.sectionLen();
- unsigned dataLen = slen - sizeof(JSectHeader) - sizeof(JSectFooter);
- const char *hdr = (const char *) br.skip(h.sectionLenWithPadding());
- const char *data = hdr + sizeof(JSectHeader);
- const char *footer = data + dataLen;
- processSection((const JSectHeader*) hdr, data, dataLen, (const JSectFooter*) footer);
-
- // ctrl c check
- uassert(ErrorCodes::Interrupted, "interrupted during journal recovery", !inShutdown());
- }
+ if (!h.versionOk()) {
+ log() << "journal file version number mismatch got:" << hex << h._version
+ << " expected:" << hex << (unsigned)JHeader::CurrentVersion
+ << ". if you have just upgraded, recover with old version of mongod, "
+ "terminate cleanly, then upgrade." << endl;
+ // Not using JournalSectionCurruptException as we don't want to ignore
+ // journal files on upgrade.
+ uasserted(13536, str::stream() << "journal version number mismatch " << h._version);
}
- catch (const BufReader::eof&) {
- if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalDumpJournal)
- log() << "ABRUPT END" << endl;
- return true; // abrupt end
+ fileId = h.fileId;
+ if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalDumpJournal) {
+ log() << "JHeader::fileId=" << fileId << endl;
}
- catch (const JournalSectionCorruptException&) {
- if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalDumpJournal)
- log() << "ABRUPT END" << endl;
- return true; // abrupt end
- }
-
- return false; // non-abrupt end
}
- /** apply a specific journal file */
- bool RecoveryJob::processFile(boost::filesystem::path journalfile) {
- log() << "recover " << journalfile.string() << endl;
-
- try {
- if( boost::filesystem::file_size( journalfile.string() ) == 0 ) {
- log() << "recover info " << journalfile.string() << " has zero length" << endl;
- return true;
+ // read sections
+ while (!br.atEof()) {
+ JSectHeader h;
+ br.peek(h);
+ if (h.fileId != fileId) {
+ if (kDebugBuild ||
+ (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalDumpJournal)) {
+ log() << "Ending processFileBuffer at differing fileId want:" << fileId
+ << " got:" << h.fileId << endl;
+ log() << " sect len:" << h.sectionLen() << " seqnum:" << h.seqNumber << endl;
}
- } catch(...) {
- // if something weird like a permissions problem keep going so the massert down below can happen (presumably)
- log() << "recover exception checking filesize" << endl;
+ return true;
}
-
- MemoryMappedFile f;
- void *p = f.mapWithOptions(journalfile.string().c_str(), MongoFile::READONLY | MongoFile::SEQUENTIAL);
- massert(13544, str::stream() << "recover error couldn't open " << journalfile.string(), p);
- return processFileBuffer(p, (unsigned) f.length());
+ unsigned slen = h.sectionLen();
+ unsigned dataLen = slen - sizeof(JSectHeader) - sizeof(JSectFooter);
+ const char* hdr = (const char*)br.skip(h.sectionLenWithPadding());
+ const char* data = hdr + sizeof(JSectHeader);
+ const char* footer = data + dataLen;
+ processSection((const JSectHeader*)hdr, data, dataLen, (const JSectFooter*)footer);
+
+ // ctrl c check
+ uassert(ErrorCodes::Interrupted, "interrupted during journal recovery", !inShutdown());
}
-
- /** @param files all the j._0 style files we need to apply for recovery */
- void RecoveryJob::go(vector<boost::filesystem::path>& files) {
- log() << "recover begin" << endl;
- LockMongoFilesExclusive lkFiles; // for RecoveryJob::Last
- _recovering = true;
-
- // load the last sequence number synced to the datafiles on disk before the last crash
- _lastDataSyncedFromLastRun = journalReadLSN();
- log() << "recover lsn: " << _lastDataSyncedFromLastRun << endl;
-
- for( unsigned i = 0; i != files.size(); ++i ) {
- bool abruptEnd = processFile(files[i]);
- if( abruptEnd && i+1 < files.size() ) {
- log() << "recover error: abrupt end to file " << files[i].string() << ", yet it isn't the last journal file" << endl;
- close();
- uasserted(13535, "recover abrupt journal file end");
- }
- }
-
- close();
-
- if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalScanOnly) {
- uasserted(13545, str::stream() << "--durOptions "
- << (int) MMAPV1Options::JournalScanOnly
- << " (scan only) specified");
- }
-
- log() << "recover cleaning up" << endl;
- removeJournalFiles();
- log() << "recover done" << endl;
- okToCleanUp = true;
- _recovering = false;
+ } catch (const BufReader::eof&) {
+ if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalDumpJournal)
+ log() << "ABRUPT END" << endl;
+ return true; // abrupt end
+ } catch (const JournalSectionCorruptException&) {
+ if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalDumpJournal)
+ log() << "ABRUPT END" << endl;
+ return true; // abrupt end
+ }
+
+ return false; // non-abrupt end
+}
+
+/** apply a specific journal file */
+bool RecoveryJob::processFile(boost::filesystem::path journalfile) {
+ log() << "recover " << journalfile.string() << endl;
+
+ try {
+ if (boost::filesystem::file_size(journalfile.string()) == 0) {
+ log() << "recover info " << journalfile.string() << " has zero length" << endl;
+ return true;
}
-
- void _recover() {
- verify(storageGlobalParams.dur);
-
- boost::filesystem::path p = getJournalDir();
- if( !exists(p) ) {
- log() << "directory " << p.string() << " does not exist, there will be no recovery startup step" << endl;
- okToCleanUp = true;
- return;
- }
-
- vector<boost::filesystem::path> journalFiles;
- getFiles(p, journalFiles);
-
- if( journalFiles.empty() ) {
- log() << "recover : no journal files present, no recovery needed" << endl;
- okToCleanUp = true;
- return;
- }
-
- RecoveryJob::get().go(journalFiles);
- }
-
- /** recover from a crash
- called during startup
- throws on error
- */
- void replayJournalFilesAtStartup() {
- // we use a lock so that exitCleanly will wait for us
- // to finish (or at least to notice what is up and stop)
- OperationContextImpl txn;
- ScopedTransaction transaction(&txn, MODE_X);
- Lock::GlobalWrite lk(txn.lockState());
-
- _recover(); // throws on interruption
+ } catch (...) {
+ // if something weird like a permissions problem keep going so the massert down below can happen (presumably)
+ log() << "recover exception checking filesize" << endl;
+ }
+
+ MemoryMappedFile f;
+ void* p =
+ f.mapWithOptions(journalfile.string().c_str(), MongoFile::READONLY | MongoFile::SEQUENTIAL);
+ massert(13544, str::stream() << "recover error couldn't open " << journalfile.string(), p);
+ return processFileBuffer(p, (unsigned)f.length());
+}
+
+/** @param files all the j._0 style files we need to apply for recovery */
+void RecoveryJob::go(vector<boost::filesystem::path>& files) {
+ log() << "recover begin" << endl;
+ LockMongoFilesExclusive lkFiles; // for RecoveryJob::Last
+ _recovering = true;
+
+ // load the last sequence number synced to the datafiles on disk before the last crash
+ _lastDataSyncedFromLastRun = journalReadLSN();
+ log() << "recover lsn: " << _lastDataSyncedFromLastRun << endl;
+
+ for (unsigned i = 0; i != files.size(); ++i) {
+ bool abruptEnd = processFile(files[i]);
+ if (abruptEnd && i + 1 < files.size()) {
+ log() << "recover error: abrupt end to file " << files[i].string()
+ << ", yet it isn't the last journal file" << endl;
+ close();
+ uasserted(13535, "recover abrupt journal file end");
}
-
- struct BufReaderY { int a,b; };
- class BufReaderUnitTest : public StartupTest {
- public:
- void run() {
- BufReader r((void*) "abcdabcdabcd", 12);
- char x;
- BufReaderY y;
- r.read(x); //cout << x; // a
- verify( x == 'a' );
- r.read(y);
- r.read(x);
- verify( x == 'b' );
- }
- } brunittest;
-
- } // namespace dur
-} // namespace mongo
-
+ }
+
+ close();
+
+ if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalScanOnly) {
+ uasserted(13545,
+ str::stream() << "--durOptions " << (int)MMAPV1Options::JournalScanOnly
+ << " (scan only) specified");
+ }
+
+ log() << "recover cleaning up" << endl;
+ removeJournalFiles();
+ log() << "recover done" << endl;
+ okToCleanUp = true;
+ _recovering = false;
+}
+
+void _recover() {
+ verify(storageGlobalParams.dur);
+
+ boost::filesystem::path p = getJournalDir();
+ if (!exists(p)) {
+ log() << "directory " << p.string()
+ << " does not exist, there will be no recovery startup step" << endl;
+ okToCleanUp = true;
+ return;
+ }
+
+ vector<boost::filesystem::path> journalFiles;
+ getFiles(p, journalFiles);
+
+ if (journalFiles.empty()) {
+ log() << "recover : no journal files present, no recovery needed" << endl;
+ okToCleanUp = true;
+ return;
+ }
+
+ RecoveryJob::get().go(journalFiles);
+}
+
+/** recover from a crash
+ called during startup
+ throws on error
+*/
+void replayJournalFilesAtStartup() {
+ // we use a lock so that exitCleanly will wait for us
+ // to finish (or at least to notice what is up and stop)
+ OperationContextImpl txn;
+ ScopedTransaction transaction(&txn, MODE_X);
+ Lock::GlobalWrite lk(txn.lockState());
+
+ _recover(); // throws on interruption
+}
+
+struct BufReaderY {
+ int a, b;
+};
+class BufReaderUnitTest : public StartupTest {
+public:
+ void run() {
+ BufReader r((void*)"abcdabcdabcd", 12);
+ char x;
+ BufReaderY y;
+ r.read(x); // cout << x; // a
+ verify(x == 'a');
+ r.read(y);
+ r.read(x);
+ verify(x == 'b');
+ }
+} brunittest;
+
+} // namespace dur
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.h b/src/mongo/db/storage/mmap_v1/dur_recover.h
index 886f278a66a..e05e7926215 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.h
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.h
@@ -38,67 +38,69 @@
namespace mongo {
- class DurableMappedFile;
+class DurableMappedFile;
- namespace dur {
+namespace dur {
- struct ParsedJournalEntry;
+struct ParsedJournalEntry;
- /** call go() to execute a recovery from existing journal files.
- */
- class RecoveryJob {
- MONGO_DISALLOW_COPYING(RecoveryJob);
- public:
- RecoveryJob();
- ~RecoveryJob();
+/** call go() to execute a recovery from existing journal files.
+ */
+class RecoveryJob {
+ MONGO_DISALLOW_COPYING(RecoveryJob);
- void go(std::vector<boost::filesystem::path>& files);
+public:
+ RecoveryJob();
+ ~RecoveryJob();
- /** @param data data between header and footer. compressed if recovering. */
- void processSection(const JSectHeader *h, const void *data, unsigned len, const JSectFooter *f);
+ void go(std::vector<boost::filesystem::path>& files);
- // locks and calls _close()
- void close();
+ /** @param data data between header and footer. compressed if recovering. */
+ void processSection(const JSectHeader* h, const void* data, unsigned len, const JSectFooter* f);
- static RecoveryJob& get() { return _instance; }
+ // locks and calls _close()
+ void close();
- private:
+ static RecoveryJob& get() {
+ return _instance;
+ }
- class Last {
- public:
- Last();
- DurableMappedFile* newEntry(const ParsedJournalEntry&, RecoveryJob&);
+private:
+ class Last {
+ public:
+ Last();
+ DurableMappedFile* newEntry(const ParsedJournalEntry&, RecoveryJob&);
- private:
- DurableMappedFile* mmf;
- std::string dbName;
- int fileNo;
- };
+ private:
+ DurableMappedFile* mmf;
+ std::string dbName;
+ int fileNo;
+ };
- void write(Last& last, const ParsedJournalEntry& entry); // actually writes to the file
- void applyEntry(Last& last, const ParsedJournalEntry& entry, bool apply, bool dump);
- void applyEntries(const std::vector<ParsedJournalEntry> &entries);
- bool processFileBuffer(const void *, unsigned len);
- bool processFile(boost::filesystem::path journalfile);
- void _close(); // doesn't lock
+ void write(Last& last, const ParsedJournalEntry& entry); // actually writes to the file
+ void applyEntry(Last& last, const ParsedJournalEntry& entry, bool apply, bool dump);
+ void applyEntries(const std::vector<ParsedJournalEntry>& entries);
+ bool processFileBuffer(const void*, unsigned len);
+ bool processFile(boost::filesystem::path journalfile);
+ void _close(); // doesn't lock
- // Set of memory mapped files and a mutex to protect them
- stdx::mutex _mx;
- std::list<std::shared_ptr<DurableMappedFile> > _mmfs;
+ // Set of memory mapped files and a mutex to protect them
+ stdx::mutex _mx;
+ std::list<std::shared_ptr<DurableMappedFile>> _mmfs;
- // Are we in recovery or WRITETODATAFILES
- bool _recovering;
+ // Are we in recovery or WRITETODATAFILES
+ bool _recovering;
- unsigned long long _lastDataSyncedFromLastRun;
- unsigned long long _lastSeqMentionedInConsoleLog;
+ unsigned long long _lastDataSyncedFromLastRun;
+ unsigned long long _lastSeqMentionedInConsoleLog;
- static RecoveryJob& _instance;
- };
+ static RecoveryJob& _instance;
+};
- void replayJournalFilesAtStartup();
- }
+void replayJournalFilesAtStartup();
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/dur_recovery_unit.cpp b/src/mongo/db/storage/mmap_v1/dur_recovery_unit.cpp
index e826277e7ff..0c9f58988e2 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recovery_unit.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recovery_unit.cpp
@@ -45,284 +45,272 @@
namespace mongo {
- DurRecoveryUnit::DurRecoveryUnit()
- : _writeCount(0), _writeBytes(0), _inUnitOfWork(false), _rollbackWritesDisabled(false) {
- }
+DurRecoveryUnit::DurRecoveryUnit()
+ : _writeCount(0), _writeBytes(0), _inUnitOfWork(false), _rollbackWritesDisabled(false) {}
- void DurRecoveryUnit::beginUnitOfWork(OperationContext* opCtx) {
- invariant(!_inUnitOfWork);
- _inUnitOfWork = true;
- }
+void DurRecoveryUnit::beginUnitOfWork(OperationContext* opCtx) {
+ invariant(!_inUnitOfWork);
+ _inUnitOfWork = true;
+}
- void DurRecoveryUnit::commitUnitOfWork() {
- invariant(_inUnitOfWork);
+void DurRecoveryUnit::commitUnitOfWork() {
+ invariant(_inUnitOfWork);
- commitChanges();
+ commitChanges();
- // global journal flush opportunity
- getDur().commitIfNeeded();
+ // global journal flush opportunity
+ getDur().commitIfNeeded();
- resetChanges();
- }
+ resetChanges();
+}
- void DurRecoveryUnit::abortUnitOfWork() {
- invariant(_inUnitOfWork);
+void DurRecoveryUnit::abortUnitOfWork() {
+ invariant(_inUnitOfWork);
- rollbackChanges();
- resetChanges();
- }
+ rollbackChanges();
+ resetChanges();
+}
- void DurRecoveryUnit::abandonSnapshot() {
- invariant(!_inUnitOfWork);
- // no-op since we have no transaction
- }
+void DurRecoveryUnit::abandonSnapshot() {
+ invariant(!_inUnitOfWork);
+ // no-op since we have no transaction
+}
- void DurRecoveryUnit::commitChanges() {
- if (getDur().isDurable())
- markWritesForJournaling();
+void DurRecoveryUnit::commitChanges() {
+ if (getDur().isDurable())
+ markWritesForJournaling();
- try {
- for (Changes::const_iterator it = _changes.begin(), end = _changes.end();
- it != end; ++it) {
- (*it)->commit();
- }
- }
- catch (...) {
- std::terminate();
+ try {
+ for (Changes::const_iterator it = _changes.begin(), end = _changes.end(); it != end; ++it) {
+ (*it)->commit();
}
+ } catch (...) {
+ std::terminate();
}
-
- void DurRecoveryUnit::markWritesForJournaling() {
- if (!_writeCount)
- return;
-
- typedef std::pair<void*, unsigned> Intent;
- std::vector<Intent> intents;
- const size_t numStoredWrites = _initialWrites.size() + _mergedWrites.size();
- intents.reserve(numStoredWrites);
-
- // Show very large units of work at LOG(1) level as they may hint at performance issues
- const int logLevel = (_writeCount > 100*1000 || _writeBytes > 50*1024*1024) ? 1 : 3;
-
- LOG(logLevel) << _writeCount << " writes (" << _writeBytes / 1024 << " kB) covered by "
- << numStoredWrites << " pre-images ("
- << _preimageBuffer.size() / 1024 << " kB) ";
-
- // orders the initial, unmerged writes, by address so we can coalesce overlapping and
- // adjacent writes
- std::sort(_initialWrites.begin(), _initialWrites.end());
-
- if (!_initialWrites.empty()) {
- intents.push_back(std::make_pair(_initialWrites.front().addr,
- _initialWrites.front().len));
- for (InitialWrites::iterator it = (_initialWrites.begin() + 1),
- end = _initialWrites.end();
- it != end;
- ++it) {
- Intent& lastIntent = intents.back();
- char* lastEnd = static_cast<char*>(lastIntent.first) + lastIntent.second;
- if (it->addr <= lastEnd) {
- // overlapping or adjacent, so extend.
- ptrdiff_t extendedLen = (it->end()) - static_cast<char*>(lastIntent.first);
- lastIntent.second = std::max(lastIntent.second, unsigned(extendedLen));
- }
- else {
- // not overlapping, so create a new intent
- intents.push_back(std::make_pair(it->addr, it->len));
- }
+}
+
+void DurRecoveryUnit::markWritesForJournaling() {
+ if (!_writeCount)
+ return;
+
+ typedef std::pair<void*, unsigned> Intent;
+ std::vector<Intent> intents;
+ const size_t numStoredWrites = _initialWrites.size() + _mergedWrites.size();
+ intents.reserve(numStoredWrites);
+
+ // Show very large units of work at LOG(1) level as they may hint at performance issues
+ const int logLevel = (_writeCount > 100 * 1000 || _writeBytes > 50 * 1024 * 1024) ? 1 : 3;
+
+ LOG(logLevel) << _writeCount << " writes (" << _writeBytes / 1024 << " kB) covered by "
+ << numStoredWrites << " pre-images (" << _preimageBuffer.size() / 1024 << " kB) ";
+
+ // orders the initial, unmerged writes, by address so we can coalesce overlapping and
+ // adjacent writes
+ std::sort(_initialWrites.begin(), _initialWrites.end());
+
+ if (!_initialWrites.empty()) {
+ intents.push_back(std::make_pair(_initialWrites.front().addr, _initialWrites.front().len));
+ for (InitialWrites::iterator it = (_initialWrites.begin() + 1), end = _initialWrites.end();
+ it != end;
+ ++it) {
+ Intent& lastIntent = intents.back();
+ char* lastEnd = static_cast<char*>(lastIntent.first) + lastIntent.second;
+ if (it->addr <= lastEnd) {
+ // overlapping or adjacent, so extend.
+ ptrdiff_t extendedLen = (it->end()) - static_cast<char*>(lastIntent.first);
+ lastIntent.second = std::max(lastIntent.second, unsigned(extendedLen));
+ } else {
+ // not overlapping, so create a new intent
+ intents.push_back(std::make_pair(it->addr, it->len));
}
}
+ }
- MergedWrites::iterator it = _mergedWrites.begin();
- if (it != _mergedWrites.end()) {
- intents.push_back(std::make_pair(it->addr, it->len));
- while (++it != _mergedWrites.end()) {
- // Check the property that write intents are sorted and don't overlap.
- invariant(it->addr >= intents.back().first);
- Intent& lastIntent = intents.back();
- char* lastEnd = static_cast<char*>(lastIntent.first) + lastIntent.second;
- if (it->addr == lastEnd) {
- // adjacent, so extend.
- lastIntent.second += it->len;
- }
- else {
- // not overlapping, so create a new intent
- invariant(it->addr > lastEnd);
- intents.push_back(std::make_pair(it->addr, it->len));
- }
+ MergedWrites::iterator it = _mergedWrites.begin();
+ if (it != _mergedWrites.end()) {
+ intents.push_back(std::make_pair(it->addr, it->len));
+ while (++it != _mergedWrites.end()) {
+ // Check the property that write intents are sorted and don't overlap.
+ invariant(it->addr >= intents.back().first);
+ Intent& lastIntent = intents.back();
+ char* lastEnd = static_cast<char*>(lastIntent.first) + lastIntent.second;
+ if (it->addr == lastEnd) {
+ // adjacent, so extend.
+ lastIntent.second += it->len;
+ } else {
+ // not overlapping, so create a new intent
+ invariant(it->addr > lastEnd);
+ intents.push_back(std::make_pair(it->addr, it->len));
}
}
- LOG(logLevel) << _mergedWrites.size() << " pre-images " << "coalesced into "
- << intents.size() << " write intents";
-
- getDur().declareWriteIntents(intents);
- }
-
- void DurRecoveryUnit::resetChanges() {
- _writeCount = 0;
- _writeBytes = 0;
- _initialWrites.clear();
- _mergedWrites.clear();
- _changes.clear();
- _preimageBuffer.clear();
- _rollbackWritesDisabled = false;
- _inUnitOfWork = false;
}
-
- void DurRecoveryUnit::rollbackChanges() {
- // First rollback disk writes, then Changes. This matches behavior in other storage engines
- // that either rollback a transaction or don't write a writebatch.
-
- if (_rollbackWritesDisabled) {
- LOG(2) << " ***** NOT ROLLING BACK " << _writeCount << " disk writes";
+ LOG(logLevel) << _mergedWrites.size() << " pre-images "
+ << "coalesced into " << intents.size() << " write intents";
+
+ getDur().declareWriteIntents(intents);
+}
+
+void DurRecoveryUnit::resetChanges() {
+ _writeCount = 0;
+ _writeBytes = 0;
+ _initialWrites.clear();
+ _mergedWrites.clear();
+ _changes.clear();
+ _preimageBuffer.clear();
+ _rollbackWritesDisabled = false;
+ _inUnitOfWork = false;
+}
+
+void DurRecoveryUnit::rollbackChanges() {
+ // First rollback disk writes, then Changes. This matches behavior in other storage engines
+ // that either rollback a transaction or don't write a writebatch.
+
+ if (_rollbackWritesDisabled) {
+ LOG(2) << " ***** NOT ROLLING BACK " << _writeCount << " disk writes";
+ } else {
+ LOG(2) << " ***** ROLLING BACK " << _writeCount << " disk writes";
+
+ // First roll back the merged writes. These have no overlap or ordering requirement
+ // other than needing to be rolled back before all _initialWrites.
+ for (MergedWrites::iterator it = _mergedWrites.begin(); it != _mergedWrites.end(); ++it) {
+ _preimageBuffer.copy(it->addr, it->len, it->offset);
}
- else {
- LOG(2) << " ***** ROLLING BACK " << _writeCount << " disk writes";
-
- // First roll back the merged writes. These have no overlap or ordering requirement
- // other than needing to be rolled back before all _initialWrites.
- for (MergedWrites::iterator it = _mergedWrites.begin();
- it != _mergedWrites.end();
- ++it) {
- _preimageBuffer.copy(it->addr, it->len, it->offset);
- }
- // Then roll back the initial writes in LIFO order, as these might have overlaps.
- for (InitialWrites::reverse_iterator rit = _initialWrites.rbegin();
- rit != _initialWrites.rend();
- ++rit) {
- _preimageBuffer.copy(rit->addr, rit->len, rit->offset);
- }
+ // Then roll back the initial writes in LIFO order, as these might have overlaps.
+ for (InitialWrites::reverse_iterator rit = _initialWrites.rbegin();
+ rit != _initialWrites.rend();
+ ++rit) {
+ _preimageBuffer.copy(rit->addr, rit->len, rit->offset);
}
+ }
- LOG(2) << " ***** ROLLING BACK " << (_changes.size()) << " custom changes";
+ LOG(2) << " ***** ROLLING BACK " << (_changes.size()) << " custom changes";
- try {
- for (int i = _changes.size() - 1; i >= 0; i--) {
- LOG(2) << "CUSTOM ROLLBACK " << demangleName(typeid(*_changes[i]));
- _changes[i]->rollback();
- }
- }
- catch (...) {
- std::terminate();
+ try {
+ for (int i = _changes.size() - 1; i >= 0; i--) {
+ LOG(2) << "CUSTOM ROLLBACK " << demangleName(typeid(*_changes[i]));
+ _changes[i]->rollback();
}
+ } catch (...) {
+ std::terminate();
}
+}
- bool DurRecoveryUnit::waitUntilDurable() {
- invariant(!_inUnitOfWork);
- return getDur().waitUntilDurable();
- }
+bool DurRecoveryUnit::waitUntilDurable() {
+ invariant(!_inUnitOfWork);
+ return getDur().waitUntilDurable();
+}
- void DurRecoveryUnit::mergingWritingPtr(char* addr, size_t len) {
- // The invariant is that all writes are non-overlapping and non-empty. So, a single
- // writingPtr call may result in a number of new segments added. At this point, we cannot
- // in general merge adjacent writes, as that would require inefficient operations on the
- // preimage buffer.
+void DurRecoveryUnit::mergingWritingPtr(char* addr, size_t len) {
+ // The invariant is that all writes are non-overlapping and non-empty. So, a single
+ // writingPtr call may result in a number of new segments added. At this point, we cannot
+ // in general merge adjacent writes, as that would require inefficient operations on the
+ // preimage buffer.
- MergedWrites::iterator coveringWrite = _mergedWrites.upper_bound(Write(addr, 0, 0));
+ MergedWrites::iterator coveringWrite = _mergedWrites.upper_bound(Write(addr, 0, 0));
- char* const end = addr + len;
- while (addr < end) {
- dassert(coveringWrite == _mergedWrites.end() || coveringWrite->end() > addr);
+ char* const end = addr + len;
+ while (addr < end) {
+ dassert(coveringWrite == _mergedWrites.end() || coveringWrite->end() > addr);
- // Determine whether addr[0] is already covered by a write or not.
- // If covered, adjust addr and len to exclude the covered run from addr[0] onwards.
+ // Determine whether addr[0] is already covered by a write or not.
+ // If covered, adjust addr and len to exclude the covered run from addr[0] onwards.
- if (coveringWrite != _mergedWrites.end()) {
- char* const cwEnd = coveringWrite->end();
+ if (coveringWrite != _mergedWrites.end()) {
+ char* const cwEnd = coveringWrite->end();
- if (coveringWrite->addr <= addr) {
- // If the begin of the covering write at or before addr[0], addr[0] is covered.
- // While the existing pre-image will not generally be the same as the data
- // being written now, during rollback only the oldest pre-image matters.
+ if (coveringWrite->addr <= addr) {
+ // If the begin of the covering write at or before addr[0], addr[0] is covered.
+ // While the existing pre-image will not generally be the same as the data
+ // being written now, during rollback only the oldest pre-image matters.
- if (end <= cwEnd) {
- break; // fully covered
- }
-
- addr = cwEnd;
- coveringWrite++;
- dassert(coveringWrite == _mergedWrites.end() || coveringWrite->addr >= cwEnd);
+ if (end <= cwEnd) {
+ break; // fully covered
}
- }
- dassert(coveringWrite == _mergedWrites.end() || coveringWrite->end() > addr);
- // If the next coveringWrite overlaps, adjust the end of the uncovered region.
- char* uncoveredEnd = end;
- if (coveringWrite != _mergedWrites.end() && coveringWrite->addr < end) {
- uncoveredEnd = coveringWrite->addr;
+ addr = cwEnd;
+ coveringWrite++;
+ dassert(coveringWrite == _mergedWrites.end() || coveringWrite->addr >= cwEnd);
}
+ }
+ dassert(coveringWrite == _mergedWrites.end() || coveringWrite->end() > addr);
- const size_t uncoveredLen = uncoveredEnd - addr;
- if (uncoveredLen) {
- // We are writing to a region that hasn't been declared previously.
- _mergedWrites.insert(Write(addr, uncoveredLen, _preimageBuffer.size()));
+ // If the next coveringWrite overlaps, adjust the end of the uncovered region.
+ char* uncoveredEnd = end;
+ if (coveringWrite != _mergedWrites.end() && coveringWrite->addr < end) {
+ uncoveredEnd = coveringWrite->addr;
+ }
- // Windows requires us to adjust the address space *before* we write to anything.
- privateViews.makeWritable(addr, uncoveredLen);
+ const size_t uncoveredLen = uncoveredEnd - addr;
+ if (uncoveredLen) {
+ // We are writing to a region that hasn't been declared previously.
+ _mergedWrites.insert(Write(addr, uncoveredLen, _preimageBuffer.size()));
- if (!_rollbackWritesDisabled) {
- _preimageBuffer.append(addr, uncoveredLen);
- }
- addr = uncoveredEnd;
+ // Windows requires us to adjust the address space *before* we write to anything.
+ privateViews.makeWritable(addr, uncoveredLen);
+
+ if (!_rollbackWritesDisabled) {
+ _preimageBuffer.append(addr, uncoveredLen);
}
+ addr = uncoveredEnd;
}
}
+}
- void* DurRecoveryUnit::writingPtr(void* addr, size_t len) {
- invariant(_inUnitOfWork);
-
- if (len == 0) {
- return addr; // Don't need to do anything for empty ranges.
- }
+void* DurRecoveryUnit::writingPtr(void* addr, size_t len) {
+ invariant(_inUnitOfWork);
- invariant(len < size_t(std::numeric_limits<int>::max()));
+ if (len == 0) {
+ return addr; // Don't need to do anything for empty ranges.
+ }
- _writeCount++;
- _writeBytes += len;
- char* const data = static_cast<char*>(addr);
+ invariant(len < size_t(std::numeric_limits<int>::max()));
- // The initial writes are stored in a faster, but less memory-efficient way. This will
- // typically be enough for simple operations, where the extra cost of incremental
- // coalescing and merging would be too much. For larger writes, more redundancy is
- // is expected, so the cost of checking for duplicates is offset by savings in copying
- // and allocating preimage buffers. Total memory use of the preimage buffer may be up to
- // kMaxUnmergedPreimageBytes larger than the amount memory covered by the write intents.
+ _writeCount++;
+ _writeBytes += len;
+ char* const data = static_cast<char*>(addr);
- const size_t kMaxUnmergedPreimageBytes = kDebugBuild ? 16*1024 : 10*1024*1024;
+ // The initial writes are stored in a faster, but less memory-efficient way. This will
+ // typically be enough for simple operations, where the extra cost of incremental
+ // coalescing and merging would be too much. For larger writes, more redundancy is
+ // is expected, so the cost of checking for duplicates is offset by savings in copying
+ // and allocating preimage buffers. Total memory use of the preimage buffer may be up to
+ // kMaxUnmergedPreimageBytes larger than the amount memory covered by the write intents.
- if (_preimageBuffer.size() + len > kMaxUnmergedPreimageBytes) {
- mergingWritingPtr(data, len);
+ const size_t kMaxUnmergedPreimageBytes = kDebugBuild ? 16 * 1024 : 10 * 1024 * 1024;
- // After a merged write, no more initial writes can occur or there would be an
- // ordering violation during rollback. So, ensure that the if-condition will be true
- // for any future write regardless of length. This is true now because
- // mergingWritingPtr also will store its first write in _preimageBuffer as well.
- invariant(_preimageBuffer.size() >= kMaxUnmergedPreimageBytes);
+ if (_preimageBuffer.size() + len > kMaxUnmergedPreimageBytes) {
+ mergingWritingPtr(data, len);
- return addr;
- }
+ // After a merged write, no more initial writes can occur or there would be an
+ // ordering violation during rollback. So, ensure that the if-condition will be true
+ // for any future write regardless of length. This is true now because
+ // mergingWritingPtr also will store its first write in _preimageBuffer as well.
+ invariant(_preimageBuffer.size() >= kMaxUnmergedPreimageBytes);
- // Windows requires us to adjust the address space *before* we write to anything.
- privateViews.makeWritable(data, len);
+ return addr;
+ }
- _initialWrites.push_back(Write(data, len, _preimageBuffer.size()));
+ // Windows requires us to adjust the address space *before* we write to anything.
+ privateViews.makeWritable(data, len);
- if (!_rollbackWritesDisabled) {
- _preimageBuffer.append(data, len);
- }
+ _initialWrites.push_back(Write(data, len, _preimageBuffer.size()));
- return addr;
+ if (!_rollbackWritesDisabled) {
+ _preimageBuffer.append(data, len);
}
- void DurRecoveryUnit::setRollbackWritesDisabled() {
- invariant(_inUnitOfWork);
- _rollbackWritesDisabled = true;
- }
+ return addr;
+}
- void DurRecoveryUnit::registerChange(Change* change) {
- invariant(_inUnitOfWork);
- _changes.push_back(change);
- }
+void DurRecoveryUnit::setRollbackWritesDisabled() {
+ invariant(_inUnitOfWork);
+ _rollbackWritesDisabled = true;
+}
+
+void DurRecoveryUnit::registerChange(Change* change) {
+ invariant(_inUnitOfWork);
+ _changes.push_back(change);
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/dur_recovery_unit.h b/src/mongo/db/storage/mmap_v1/dur_recovery_unit.h
index d26032e8f26..52f717d29b2 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recovery_unit.h
+++ b/src/mongo/db/storage/mmap_v1/dur_recovery_unit.h
@@ -39,127 +39,131 @@
namespace mongo {
+/**
+ * Just pass through to getDur().
+ */
+class DurRecoveryUnit : public RecoveryUnit {
+public:
+ DurRecoveryUnit();
+
+ void beginUnitOfWork(OperationContext* opCtx) final;
+ void commitUnitOfWork() final;
+ void abortUnitOfWork() final;
+
+ virtual bool waitUntilDurable();
+
+ virtual void abandonSnapshot();
+
+ // The recovery unit takes ownership of change.
+ virtual void registerChange(Change* change);
+
+ virtual void* writingPtr(void* addr, size_t len);
+
+ virtual void setRollbackWritesDisabled();
+
+ virtual SnapshotId getSnapshotId() const {
+ return SnapshotId();
+ }
+
+private:
/**
- * Just pass through to getDur().
+ * Marks writes for journaling, if enabled, and then commits all other Changes in order.
+ * Returns with empty _initialWrites, _mergedWrites, _changes and _preimageBuffer, but
+ * does not reset the _rollbackWritesDisabled or _mustRollback flags. This leaves the
+ * RecoveryUnit ready for more changes that may be committed or rolled back.
*/
- class DurRecoveryUnit : public RecoveryUnit {
- public:
- DurRecoveryUnit();
-
- void beginUnitOfWork(OperationContext* opCtx) final;
- void commitUnitOfWork() final;
- void abortUnitOfWork() final;
-
- virtual bool waitUntilDurable();
-
- virtual void abandonSnapshot();
-
- // The recovery unit takes ownership of change.
- virtual void registerChange(Change* change);
-
- virtual void* writingPtr(void* addr, size_t len);
-
- virtual void setRollbackWritesDisabled();
-
- virtual SnapshotId getSnapshotId() const { return SnapshotId(); }
-
- private:
- /**
- * Marks writes for journaling, if enabled, and then commits all other Changes in order.
- * Returns with empty _initialWrites, _mergedWrites, _changes and _preimageBuffer, but
- * does not reset the _rollbackWritesDisabled or _mustRollback flags. This leaves the
- * RecoveryUnit ready for more changes that may be committed or rolled back.
- */
- void commitChanges();
-
- /**
- * Creates a list of write intents to be journaled, and hands it of to the active
- * DurabilityInterface.
- */
- void markWritesForJournaling();
-
- /**
- * Restores state by rolling back all writes using the saved pre-images, and then
- * rolling back all other Changes in LIFO order. Resets internal state.
- */
- void rollbackChanges();
-
-
- /**
- * Version of writingPtr that checks existing writes for overlap and only stores those
- * changes not yet covered by an existing write intent and pre-image.
- */
- void mergingWritingPtr(char* data, size_t len);
-
- /**
- * Reset to a clean state without any uncommitted changes or write.
- */
- void resetChanges();
-
- // Changes are ordered from oldest to newest.
- typedef OwnedPointerVector<Change> Changes;
- Changes _changes;
-
-
- // Number of pending uncommitted writes. Incremented even if new write is fully covered by
- // existing writes.
- size_t _writeCount;
- // Total size of the pending uncommitted writes.
- size_t _writeBytes;
-
- /**
- * These are memory writes inside the mmapv1 mmap-ed files. A pointer past the end is just
- * instead of a pointer to the beginning for the benefit of MergedWrites.
- */
- struct Write {
- Write(char* addr, int len, int offset) : addr(addr), len(len), offset(offset) { }
- Write(const Write& rhs) : addr(rhs.addr), len(rhs.len), offset(rhs.offset) { }
- Write() : addr(0), len(0), offset(0) { }
- bool operator< (const Write& rhs) const { return addr < rhs.addr; }
-
- struct compareEnd {
- bool operator() (const Write& lhs, const Write& rhs) const {
- return lhs.addr + lhs.len < rhs.addr + rhs.len;
- }
- };
-
- char* end() const {
- return addr + len;
- }
+ void commitChanges();
+
+ /**
+ * Creates a list of write intents to be journaled, and hands it of to the active
+ * DurabilityInterface.
+ */
+ void markWritesForJournaling();
+
+ /**
+ * Restores state by rolling back all writes using the saved pre-images, and then
+ * rolling back all other Changes in LIFO order. Resets internal state.
+ */
+ void rollbackChanges();
+
- char* addr;
- int len;
- int offset; // index into _preimageBuffer
+ /**
+ * Version of writingPtr that checks existing writes for overlap and only stores those
+ * changes not yet covered by an existing write intent and pre-image.
+ */
+ void mergingWritingPtr(char* data, size_t len);
+
+ /**
+ * Reset to a clean state without any uncommitted changes or write.
+ */
+ void resetChanges();
+
+ // Changes are ordered from oldest to newest.
+ typedef OwnedPointerVector<Change> Changes;
+ Changes _changes;
+
+
+ // Number of pending uncommitted writes. Incremented even if new write is fully covered by
+ // existing writes.
+ size_t _writeCount;
+ // Total size of the pending uncommitted writes.
+ size_t _writeBytes;
+
+ /**
+ * These are memory writes inside the mmapv1 mmap-ed files. A pointer past the end is just
+ * instead of a pointer to the beginning for the benefit of MergedWrites.
+ */
+ struct Write {
+ Write(char* addr, int len, int offset) : addr(addr), len(len), offset(offset) {}
+ Write(const Write& rhs) : addr(rhs.addr), len(rhs.len), offset(rhs.offset) {}
+ Write() : addr(0), len(0), offset(0) {}
+ bool operator<(const Write& rhs) const {
+ return addr < rhs.addr;
+ }
+
+ struct compareEnd {
+ bool operator()(const Write& lhs, const Write& rhs) const {
+ return lhs.addr + lhs.len < rhs.addr + rhs.len;
+ }
};
- /**
- * Writes are ordered by ending address, so MergedWrites::upper_bound() can find the first
- * overlapping write, if any. Overlapping and duplicate regions are forbidden, as rollback
- * of MergedChanges undoes changes by address rather than LIFO order. In addition, empty
- * regions are not allowed. Storing writes by age does not work well for large indexed
- * arrays, as coalescing is needed to bound the size of the preimage buffer.
- */
- typedef std::set<Write, Write::compareEnd> MergedWrites;
- MergedWrites _mergedWrites;
-
- // Generally it's more efficient to just store pre-images unconditionally and then
- // sort/eliminate duplicates at commit time. However, this can lead to excessive memory
- // use in cases involving large indexes arrays, where the same memory is written many
- // times. To keep the speed for the general case and bound memory use, the first few MB of
- // pre-images are stored unconditionally, but once the threshold has been exceeded, the
- // remainder is stored in a more space-efficient datastructure.
- typedef std::vector<Write> InitialWrites;
- InitialWrites _initialWrites;
-
- std::string _preimageBuffer;
-
- bool _inUnitOfWork;
-
-
- // Default is false.
- // If true, no preimages are tracked. If rollback is subsequently attempted, the process
- // will abort.
- bool _rollbackWritesDisabled;
+ char* end() const {
+ return addr + len;
+ }
+
+ char* addr;
+ int len;
+ int offset; // index into _preimageBuffer
};
+ /**
+ * Writes are ordered by ending address, so MergedWrites::upper_bound() can find the first
+ * overlapping write, if any. Overlapping and duplicate regions are forbidden, as rollback
+ * of MergedChanges undoes changes by address rather than LIFO order. In addition, empty
+ * regions are not allowed. Storing writes by age does not work well for large indexed
+ * arrays, as coalescing is needed to bound the size of the preimage buffer.
+ */
+ typedef std::set<Write, Write::compareEnd> MergedWrites;
+ MergedWrites _mergedWrites;
+
+ // Generally it's more efficient to just store pre-images unconditionally and then
+ // sort/eliminate duplicates at commit time. However, this can lead to excessive memory
+ // use in cases involving large indexes arrays, where the same memory is written many
+ // times. To keep the speed for the general case and bound memory use, the first few MB of
+ // pre-images are stored unconditionally, but once the threshold has been exceeded, the
+ // remainder is stored in a more space-efficient datastructure.
+ typedef std::vector<Write> InitialWrites;
+ InitialWrites _initialWrites;
+
+ std::string _preimageBuffer;
+
+ bool _inUnitOfWork;
+
+
+ // Default is false.
+ // If true, no preimages are tracked. If rollback is subsequently attempted, the process
+ // will abort.
+ bool _rollbackWritesDisabled;
+};
+
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/dur_stats.h b/src/mongo/db/storage/mmap_v1/dur_stats.h
index 27532e9ee59..8ec6f8c024f 100644
--- a/src/mongo/db/storage/mmap_v1/dur_stats.h
+++ b/src/mongo/db/storage/mmap_v1/dur_stats.h
@@ -31,61 +31,64 @@
#include "mongo/db/jsobj.h"
namespace mongo {
- namespace dur {
+namespace dur {
- /** journaling stats. the model here is that the commit thread is the only writer, and that reads are
- uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter overhead.
- */
- struct Stats {
-
- struct S {
- std::string _CSVHeader() const;
- std::string _asCSV() const;
+/** journaling stats. the model here is that the commit thread is the only writer, and that reads are
+ uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter overhead.
+*/
+struct Stats {
+ struct S {
+ std::string _CSVHeader() const;
+ std::string _asCSV() const;
- void _asObj(BSONObjBuilder* builder) const;
+ void _asObj(BSONObjBuilder* builder) const;
- void reset();
+ void reset();
- uint64_t getCurrentDurationMillis() const {
- return ((curTimeMicros64() - _startTimeMicros) / 1000);
- }
+ uint64_t getCurrentDurationMillis() const {
+ return ((curTimeMicros64() - _startTimeMicros) / 1000);
+ }
- // Not reported. Internal use only.
- uint64_t _startTimeMicros;
+ // Not reported. Internal use only.
+ uint64_t _startTimeMicros;
- // Reported statistics
- unsigned _durationMillis;
+ // Reported statistics
+ unsigned _durationMillis;
- unsigned _commits;
- unsigned _commitsInWriteLock;
+ unsigned _commits;
+ unsigned _commitsInWriteLock;
- uint64_t _journaledBytes;
- uint64_t _uncompressedBytes;
- uint64_t _writeToDataFilesBytes;
+ uint64_t _journaledBytes;
+ uint64_t _uncompressedBytes;
+ uint64_t _writeToDataFilesBytes;
- uint64_t _prepLogBufferMicros;
- uint64_t _writeToJournalMicros;
- uint64_t _writeToDataFilesMicros;
- uint64_t _remapPrivateViewMicros;
- uint64_t _commitsMicros;
- uint64_t _commitsInWriteLockMicros;
- };
+ uint64_t _prepLogBufferMicros;
+ uint64_t _writeToJournalMicros;
+ uint64_t _writeToDataFilesMicros;
+ uint64_t _remapPrivateViewMicros;
+ uint64_t _commitsMicros;
+ uint64_t _commitsInWriteLockMicros;
+ };
- Stats();
- void reset();
+ Stats();
+ void reset();
- BSONObj asObj() const;
+ BSONObj asObj() const;
- const S* curr() const { return &_stats[_currIdx]; }
- S* curr() { return &_stats[_currIdx]; }
+ const S* curr() const {
+ return &_stats[_currIdx];
+ }
+ S* curr() {
+ return &_stats[_currIdx];
+ }
- private:
- S _stats[5];
- unsigned _currIdx;
- };
+private:
+ S _stats[5];
+ unsigned _currIdx;
+};
- extern Stats stats;
- }
+extern Stats stats;
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
index e32c0b15ffe..fad28753372 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
@@ -53,258 +53,260 @@ using namespace mongoutils;
namespace mongo {
- using std::dec;
- using std::endl;
- using std::hex;
- using std::map;
- using std::pair;
- using std::string;
-
- void DurableMappedFile::remapThePrivateView() {
- verify(storageGlobalParams.dur);
-
- _willNeedRemap = false;
-
- // todo 1.9 : it turns out we require that we always remap to the same address.
- // so the remove / add isn't necessary and can be removed?
- void *old = _view_private;
- //privateViews.remove(_view_private);
- _view_private = remapPrivateView(_view_private);
- //privateViews.add(_view_private, this);
- fassert( 16112, _view_private == old );
- }
+using std::dec;
+using std::endl;
+using std::hex;
+using std::map;
+using std::pair;
+using std::string;
+
+void DurableMappedFile::remapThePrivateView() {
+ verify(storageGlobalParams.dur);
+
+ _willNeedRemap = false;
+
+ // todo 1.9 : it turns out we require that we always remap to the same address.
+ // so the remove / add isn't necessary and can be removed?
+ void* old = _view_private;
+ // privateViews.remove(_view_private);
+ _view_private = remapPrivateView(_view_private);
+ // privateViews.add(_view_private, this);
+ fassert(16112, _view_private == old);
+}
- /** register view. threadsafe */
- void PointerToDurableMappedFile::add_inlock(void *view, DurableMappedFile *f) {
- verify(view);
- verify(f);
- clearWritableBits_inlock(view, f->length());
- _views.insert(pair<void*, DurableMappedFile*>(view, f));
- }
+/** register view. threadsafe */
+void PointerToDurableMappedFile::add_inlock(void* view, DurableMappedFile* f) {
+ verify(view);
+ verify(f);
+ clearWritableBits_inlock(view, f->length());
+ _views.insert(pair<void*, DurableMappedFile*>(view, f));
+}
- /** de-register view. threadsafe */
- void PointerToDurableMappedFile::remove(void *view, size_t len) {
- if( view ) {
- stdx::lock_guard<stdx::mutex> lk(_m);
- clearWritableBits_inlock(view, len);
- _views.erase(view);
- }
+/** de-register view. threadsafe */
+void PointerToDurableMappedFile::remove(void* view, size_t len) {
+ if (view) {
+ stdx::lock_guard<stdx::mutex> lk(_m);
+ clearWritableBits_inlock(view, len);
+ _views.erase(view);
}
+}
#ifdef _WIN32
- void PointerToDurableMappedFile::clearWritableBits(void *privateView, size_t len) {
- stdx::lock_guard<stdx::mutex> lk(_m);
- clearWritableBits_inlock(privateView, len);
- }
+void PointerToDurableMappedFile::clearWritableBits(void* privateView, size_t len) {
+ stdx::lock_guard<stdx::mutex> lk(_m);
+ clearWritableBits_inlock(privateView, len);
+}
- /** notification on unmapping so we can clear writable bits */
- void PointerToDurableMappedFile::clearWritableBits_inlock(void *privateView, size_t len) {
- for (unsigned i = reinterpret_cast<size_t>(privateView) / MemoryMappedCOWBitset::ChunkSize;
- i <= (reinterpret_cast<size_t>(privateView) + len) / MemoryMappedCOWBitset::ChunkSize;
- ++i) {
- writable.clear(i);
- dassert(!writable.get(i));
- }
+/** notification on unmapping so we can clear writable bits */
+void PointerToDurableMappedFile::clearWritableBits_inlock(void* privateView, size_t len) {
+ for (unsigned i = reinterpret_cast<size_t>(privateView) / MemoryMappedCOWBitset::ChunkSize;
+ i <= (reinterpret_cast<size_t>(privateView) + len) / MemoryMappedCOWBitset::ChunkSize;
+ ++i) {
+ writable.clear(i);
+ dassert(!writable.get(i));
}
+}
- extern stdx::mutex mapViewMutex;
+extern stdx::mutex mapViewMutex;
- __declspec(noinline) void PointerToDurableMappedFile::makeChunkWritable(size_t chunkno) {
- stdx::lock_guard<stdx::mutex> lkPrivateViews(_m);
+__declspec(noinline) void PointerToDurableMappedFile::makeChunkWritable(size_t chunkno) {
+ stdx::lock_guard<stdx::mutex> lkPrivateViews(_m);
- if (writable.get(chunkno)) // double check lock
- return;
+ if (writable.get(chunkno)) // double check lock
+ return;
- // remap all maps in this chunk.
- // common case is a single map, but could have more than one with smallfiles or .ns files
- size_t chunkStart = chunkno * MemoryMappedCOWBitset::ChunkSize;
- size_t chunkNext = chunkStart + MemoryMappedCOWBitset::ChunkSize;
+ // remap all maps in this chunk.
+ // common case is a single map, but could have more than one with smallfiles or .ns files
+ size_t chunkStart = chunkno * MemoryMappedCOWBitset::ChunkSize;
+ size_t chunkNext = chunkStart + MemoryMappedCOWBitset::ChunkSize;
- stdx::lock_guard<stdx::mutex> lkMapView(mapViewMutex);
+ stdx::lock_guard<stdx::mutex> lkMapView(mapViewMutex);
- map<void*, DurableMappedFile*>::iterator i = _views.upper_bound((void*)(chunkNext - 1));
- while (1) {
- const pair<void*, DurableMappedFile*> x = *(--i);
- DurableMappedFile *mmf = x.second;
- if (mmf == 0)
- break;
+ map<void*, DurableMappedFile*>::iterator i = _views.upper_bound((void*)(chunkNext - 1));
+ while (1) {
+ const pair<void*, DurableMappedFile*> x = *(--i);
+ DurableMappedFile* mmf = x.second;
+ if (mmf == 0)
+ break;
- size_t viewStart = reinterpret_cast<size_t>(x.first);
- size_t viewEnd = viewStart + mmf->length();
- if (viewEnd <= chunkStart)
- break;
+ size_t viewStart = reinterpret_cast<size_t>(x.first);
+ size_t viewEnd = viewStart + mmf->length();
+ if (viewEnd <= chunkStart)
+ break;
- size_t protectStart = std::max(viewStart, chunkStart);
- dassert(protectStart < chunkNext);
+ size_t protectStart = std::max(viewStart, chunkStart);
+ dassert(protectStart < chunkNext);
- size_t protectEnd = std::min(viewEnd, chunkNext);
- size_t protectSize = protectEnd - protectStart;
- dassert(protectSize > 0 && protectSize <= MemoryMappedCOWBitset::ChunkSize);
+ size_t protectEnd = std::min(viewEnd, chunkNext);
+ size_t protectSize = protectEnd - protectStart;
+ dassert(protectSize > 0 && protectSize <= MemoryMappedCOWBitset::ChunkSize);
- DWORD oldProtection;
- bool ok = VirtualProtect(reinterpret_cast<void*>(protectStart),
- protectSize,
- PAGE_WRITECOPY,
- &oldProtection);
- if (!ok) {
- DWORD dosError = GetLastError();
+ DWORD oldProtection;
+ bool ok = VirtualProtect(
+ reinterpret_cast<void*>(protectStart), protectSize, PAGE_WRITECOPY, &oldProtection);
+ if (!ok) {
+ DWORD dosError = GetLastError();
- if (dosError == ERROR_COMMITMENT_LIMIT) {
- // System has run out of memory between physical RAM & page file, tell the user
- BSONObjBuilder bb;
+ if (dosError == ERROR_COMMITMENT_LIMIT) {
+ // System has run out of memory between physical RAM & page file, tell the user
+ BSONObjBuilder bb;
- ProcessInfo p;
- p.getExtraInfo(bb);
+ ProcessInfo p;
+ p.getExtraInfo(bb);
- severe() << "MongoDB has exhausted the system memory capacity.";
- severe() << "Current Memory Status: " << bb.obj().toString();
- }
+ severe() << "MongoDB has exhausted the system memory capacity.";
+ severe() << "Current Memory Status: " << bb.obj().toString();
+ }
- severe() << "VirtualProtect for " << mmf->filename()
- << " chunk " << chunkno
- << " failed with " << errnoWithDescription(dosError)
- << " (chunk size is " << protectSize
- << ", address is " << hex << protectStart << dec << ")"
- << " in mongo::makeChunkWritable, terminating"
- << endl;
+ severe() << "VirtualProtect for " << mmf->filename() << " chunk " << chunkno
+ << " failed with " << errnoWithDescription(dosError) << " (chunk size is "
+ << protectSize << ", address is " << hex << protectStart << dec << ")"
+ << " in mongo::makeChunkWritable, terminating" << endl;
- fassertFailed(16362);
- }
+ fassertFailed(16362);
}
-
- writable.set(chunkno);
}
+
+ writable.set(chunkno);
+}
#else
- void PointerToDurableMappedFile::clearWritableBits(void *privateView, size_t len) {
- }
+void PointerToDurableMappedFile::clearWritableBits(void* privateView, size_t len) {}
- void PointerToDurableMappedFile::clearWritableBits_inlock(void *privateView, size_t len) {
- }
+void PointerToDurableMappedFile::clearWritableBits_inlock(void* privateView, size_t len) {}
#endif
- PointerToDurableMappedFile::PointerToDurableMappedFile() {
+PointerToDurableMappedFile::PointerToDurableMappedFile() {
#if defined(SIZE_MAX)
- size_t max = SIZE_MAX;
+ size_t max = SIZE_MAX;
#else
- size_t max = ~((size_t)0);
+ size_t max = ~((size_t)0);
#endif
- verify( max > (size_t) this ); // just checking that no one redef'd SIZE_MAX and that it is sane
+ verify(max > (size_t) this); // just checking that no one redef'd SIZE_MAX and that it is sane
- // this way we don't need any boundary checking in _find()
- _views.insert( pair<void*,DurableMappedFile*>((void*)0,(DurableMappedFile*)0) );
- _views.insert( pair<void*,DurableMappedFile*>((void*)max,(DurableMappedFile*)0) );
- }
+ // this way we don't need any boundary checking in _find()
+ _views.insert(pair<void*, DurableMappedFile*>((void*)0, (DurableMappedFile*)0));
+ _views.insert(pair<void*, DurableMappedFile*>((void*)max, (DurableMappedFile*)0));
+}
- /** underscore version of find is for when you are already locked
- @param ofs out return our offset in the view
- @return the DurableMappedFile to which this pointer belongs
- */
- DurableMappedFile* PointerToDurableMappedFile::find_inlock(void *p, /*out*/ size_t& ofs) {
- //
- // .................memory..........................
- // v1 p v2
- // [--------------------] [-------]
- //
- // e.g., _find(p) == v1
- //
- const pair<void*,DurableMappedFile*> x = *(--_views.upper_bound(p));
- DurableMappedFile *mmf = x.second;
- if( mmf ) {
- size_t o = ((char *)p) - ((char*)x.first);
- if( o < mmf->length() ) {
- ofs = o;
- return mmf;
- }
+/** underscore version of find is for when you are already locked
+ @param ofs out return our offset in the view
+ @return the DurableMappedFile to which this pointer belongs
+*/
+DurableMappedFile* PointerToDurableMappedFile::find_inlock(void* p, /*out*/ size_t& ofs) {
+ //
+ // .................memory..........................
+ // v1 p v2
+ // [--------------------] [-------]
+ //
+ // e.g., _find(p) == v1
+ //
+ const pair<void*, DurableMappedFile*> x = *(--_views.upper_bound(p));
+ DurableMappedFile* mmf = x.second;
+ if (mmf) {
+ size_t o = ((char*)p) - ((char*)x.first);
+ if (o < mmf->length()) {
+ ofs = o;
+ return mmf;
}
- return 0;
}
+ return 0;
+}
- /** find associated MMF object for a given pointer.
- threadsafe
- @param ofs out returns offset into the view of the pointer, if found.
- @return the DurableMappedFile to which this pointer belongs. null if not found.
- */
- DurableMappedFile* PointerToDurableMappedFile::find(void *p, /*out*/ size_t& ofs) {
- stdx::lock_guard<stdx::mutex> lk(_m);
- return find_inlock(p, ofs);
- }
-
- PointerToDurableMappedFile privateViews;
-
- // here so that it is precomputed...
- void DurableMappedFile::setPath(const std::string& f) {
- string suffix;
- string prefix;
- bool ok = str::rSplitOn(f, '.', prefix, suffix);
- uassert(13520, str::stream() << "DurableMappedFile only supports filenames in a certain format " << f, ok);
- if( suffix == "ns" )
- _fileSuffixNo = dur::JEntry::DotNsSuffix;
- else
- _fileSuffixNo = (int) str::toUnsigned(suffix);
+/** find associated MMF object for a given pointer.
+ threadsafe
+ @param ofs out returns offset into the view of the pointer, if found.
+ @return the DurableMappedFile to which this pointer belongs. null if not found.
+*/
+DurableMappedFile* PointerToDurableMappedFile::find(void* p, /*out*/ size_t& ofs) {
+ stdx::lock_guard<stdx::mutex> lk(_m);
+ return find_inlock(p, ofs);
+}
- _p = RelativePath::fromFullPath(storageGlobalParams.dbpath, prefix);
- }
+PointerToDurableMappedFile privateViews;
+
+// here so that it is precomputed...
+void DurableMappedFile::setPath(const std::string& f) {
+ string suffix;
+ string prefix;
+ bool ok = str::rSplitOn(f, '.', prefix, suffix);
+ uassert(13520,
+ str::stream() << "DurableMappedFile only supports filenames in a certain format " << f,
+ ok);
+ if (suffix == "ns")
+ _fileSuffixNo = dur::JEntry::DotNsSuffix;
+ else
+ _fileSuffixNo = (int)str::toUnsigned(suffix);
+
+ _p = RelativePath::fromFullPath(storageGlobalParams.dbpath, prefix);
+}
- bool DurableMappedFile::open(const std::string& fname, bool sequentialHint) {
- LOG(3) << "mmf open " << fname;
- invariant(!_view_write);
+bool DurableMappedFile::open(const std::string& fname, bool sequentialHint) {
+ LOG(3) << "mmf open " << fname;
+ invariant(!_view_write);
- setPath(fname);
- _view_write = mapWithOptions(fname.c_str(), sequentialHint ? SEQUENTIAL : 0);
- return finishOpening();
- }
+ setPath(fname);
+ _view_write = mapWithOptions(fname.c_str(), sequentialHint ? SEQUENTIAL : 0);
+ return finishOpening();
+}
- bool DurableMappedFile::create(const std::string& fname, unsigned long long& len, bool sequentialHint) {
- LOG(3) << "mmf create " << fname;
- invariant(!_view_write);
+bool DurableMappedFile::create(const std::string& fname,
+ unsigned long long& len,
+ bool sequentialHint) {
+ LOG(3) << "mmf create " << fname;
+ invariant(!_view_write);
- setPath(fname);
- _view_write = map(fname.c_str(), len, sequentialHint ? SEQUENTIAL : 0);
- return finishOpening();
- }
+ setPath(fname);
+ _view_write = map(fname.c_str(), len, sequentialHint ? SEQUENTIAL : 0);
+ return finishOpening();
+}
- bool DurableMappedFile::finishOpening() {
- LOG(3) << "mmf finishOpening " << (void*) _view_write << ' ' << filename() << " len:" << length();
- if( _view_write ) {
- if (storageGlobalParams.dur) {
- stdx::lock_guard<stdx::mutex> lk2(privateViews._mutex());
-
- _view_private = createPrivateMap();
- if( _view_private == 0 ) {
- msgasserted(13636, str::stream() << "file " << filename() << " open/create failed in createPrivateMap (look in log for more information)");
- }
- privateViews.add_inlock(_view_private, this); // note that testIntent builds use this, even though it points to view_write then...
- }
- else {
- _view_private = _view_write;
+bool DurableMappedFile::finishOpening() {
+ LOG(3) << "mmf finishOpening " << (void*)_view_write << ' ' << filename()
+ << " len:" << length();
+ if (_view_write) {
+ if (storageGlobalParams.dur) {
+ stdx::lock_guard<stdx::mutex> lk2(privateViews._mutex());
+
+ _view_private = createPrivateMap();
+ if (_view_private == 0) {
+ msgasserted(13636,
+ str::stream() << "file " << filename() << " open/create failed "
+ "in createPrivateMap "
+ "(look in log for "
+ "more information)");
}
- return true;
+ privateViews.add_inlock(
+ _view_private,
+ this); // note that testIntent builds use this, even though it points to view_write then...
+ } else {
+ _view_private = _view_write;
}
- return false;
+ return true;
}
+ return false;
+}
- DurableMappedFile::DurableMappedFile() : _willNeedRemap(false) {
- _view_write = _view_private = 0;
- }
+DurableMappedFile::DurableMappedFile() : _willNeedRemap(false) {
+ _view_write = _view_private = 0;
+}
- DurableMappedFile::~DurableMappedFile() {
- try {
- LOG(3) << "mmf close " << filename();
+DurableMappedFile::~DurableMappedFile() {
+ try {
+ LOG(3) << "mmf close " << filename();
- // If _view_private was not set, this means file open failed
- if (_view_private) {
- // Notify the durability system that we are closing a file so it can ensure we
- // will not have journaled operations with no corresponding file.
- getDur().closingFileNotification();
- }
+ // If _view_private was not set, this means file open failed
+ if (_view_private) {
+ // Notify the durability system that we are closing a file so it can ensure we
+ // will not have journaled operations with no corresponding file.
+ getDur().closingFileNotification();
+ }
- LockMongoFilesExclusive lk;
- privateViews.remove(_view_private, length());
+ LockMongoFilesExclusive lk;
+ privateViews.remove(_view_private, length());
- MemoryMappedFile::close();
- }
- catch (...) {
- error() << "exception in ~DurableMappedFile";
- }
+ MemoryMappedFile::close();
+ } catch (...) {
+ error() << "exception in ~DurableMappedFile";
}
}
+}
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
index c4cfb5a6131..02906f112fe 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
@@ -37,220 +37,245 @@
namespace mongo {
- /** DurableMappedFile adds some layers atop memory mapped files - specifically our handling of private views & such.
- if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class,
- not this.
- */
- class DurableMappedFile : private MemoryMappedFile {
- protected:
- virtual void* viewForFlushing() { return _view_write; }
+/** DurableMappedFile adds some layers atop memory mapped files - specifically our handling of private views & such.
+ if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class,
+ not this.
+*/
+class DurableMappedFile : private MemoryMappedFile {
+protected:
+ virtual void* viewForFlushing() {
+ return _view_write;
+ }
- public:
- DurableMappedFile();
- virtual ~DurableMappedFile();
+public:
+ DurableMappedFile();
+ virtual ~DurableMappedFile();
- /** @return true if opened ok. */
- bool open(const std::string& fname, bool sequentialHint /*typically we open with this false*/);
+ /** @return true if opened ok. */
+ bool open(const std::string& fname, bool sequentialHint /*typically we open with this false*/);
- /** @return file length */
- unsigned long long length() const { return MemoryMappedFile::length(); }
+ /** @return file length */
+ unsigned long long length() const {
+ return MemoryMappedFile::length();
+ }
- std::string filename() const { return MemoryMappedFile::filename(); }
+ std::string filename() const {
+ return MemoryMappedFile::filename();
+ }
- void flush(bool sync) { MemoryMappedFile::flush(sync); }
+ void flush(bool sync) {
+ MemoryMappedFile::flush(sync);
+ }
- /* Creates with length if DNE, otherwise uses existing file length,
- passed length.
- @param sequentialHint if true will be sequentially accessed
- @return true for ok
- */
- bool create(const std::string& fname, unsigned long long& len, bool sequentialHint);
+ /* Creates with length if DNE, otherwise uses existing file length,
+ passed length.
+ @param sequentialHint if true will be sequentially accessed
+ @return true for ok
+ */
+ bool create(const std::string& fname, unsigned long long& len, bool sequentialHint);
- /* Get the "standard" view (which is the private one).
- @return the private view.
- */
- void* getView() const { return _view_private; }
-
- /* Get the "write" view (which is required for writing).
- @return the write view.
- */
- void* view_write() const { return _view_write; }
+ /* Get the "standard" view (which is the private one).
+ @return the private view.
+ */
+ void* getView() const {
+ return _view_private;
+ }
- /** for a filename a/b/c.3
- filePath() is "a/b/c"
- fileSuffixNo() is 3
- if the suffix is "ns", fileSuffixNo -1
- */
- const RelativePath& relativePath() const {
- DEV verify( !_p._p.empty() );
- return _p;
- }
+ /* Get the "write" view (which is required for writing).
+ @return the write view.
+ */
+ void* view_write() const {
+ return _view_write;
+ }
- int fileSuffixNo() const { return _fileSuffixNo; }
- HANDLE getFd() { return MemoryMappedFile::getFd(); }
+ /** for a filename a/b/c.3
+ filePath() is "a/b/c"
+ fileSuffixNo() is 3
+ if the suffix is "ns", fileSuffixNo -1
+ */
+ const RelativePath& relativePath() const {
+ DEV verify(!_p._p.empty());
+ return _p;
+ }
- /** true if we have written.
- set in PREPLOGBUFFER, it is NOT set immediately on write intent declaration.
- reset to false in REMAPPRIVATEVIEW
- */
- bool willNeedRemap() { return _willNeedRemap; }
- void setWillNeedRemap() { _willNeedRemap = true; }
+ int fileSuffixNo() const {
+ return _fileSuffixNo;
+ }
+ HANDLE getFd() {
+ return MemoryMappedFile::getFd();
+ }
- void remapThePrivateView();
+ /** true if we have written.
+ set in PREPLOGBUFFER, it is NOT set immediately on write intent declaration.
+ reset to false in REMAPPRIVATEVIEW
+ */
+ bool willNeedRemap() {
+ return _willNeedRemap;
+ }
+ void setWillNeedRemap() {
+ _willNeedRemap = true;
+ }
- virtual bool isDurableMappedFile() { return true; }
+ void remapThePrivateView();
- private:
+ virtual bool isDurableMappedFile() {
+ return true;
+ }
- void *_view_write;
- void *_view_private;
- bool _willNeedRemap;
- RelativePath _p; // e.g. "somepath/dbname"
- int _fileSuffixNo; // e.g. 3. -1="ns"
+private:
+ void* _view_write;
+ void* _view_private;
+ bool _willNeedRemap;
+ RelativePath _p; // e.g. "somepath/dbname"
+ int _fileSuffixNo; // e.g. 3. -1="ns"
- void setPath(const std::string& pathAndFileName);
- bool finishOpening();
- };
+ void setPath(const std::string& pathAndFileName);
+ bool finishOpening();
+};
#ifdef _WIN32
- // Simple array based bitset to track COW chunks in memory mapped files on Windows
- // A chunk is a 64MB granular region in virtual memory that we mark as COW everytime we need
- // to write to a memory mapped files on Windows
- //
- class MemoryMappedCOWBitset {
- MONGO_DISALLOW_COPYING(MemoryMappedCOWBitset);
- public:
- // Size of the chunks we mark Copy-On-Write with VirtualProtect
- static const unsigned long long ChunkSize = 64 * 1024 * 1024;
-
- // Number of chunks we store in our bitset which are really 32-bit ints
- static const unsigned long long NChunks = 64 * 1024;
-
- // Total Virtual Memory space we can cover with the bitset
- static const unsigned long long MaxChunkMemory = ChunkSize * NChunks
- * sizeof(unsigned int) * 8;
-
- // Size in bytes of the bitset we allocate
- static const unsigned long long MaxChunkBytes = NChunks * sizeof(unsigned int);
-
- // 128 TB Virtual Memory space in Windows 8.1/2012 R2, 8TB before
- static const unsigned long long MaxWinMemory =
- 128ULL * 1024 * 1024 * 1024 * 1024;
-
- // Make sure that the chunk memory covers the Max Windows user process VM space
- static_assert(MaxChunkMemory == MaxWinMemory,
- "Need a larger bitset to cover max process VM space");
- public:
- MemoryMappedCOWBitset() {
- static_assert(MemoryMappedCOWBitset::MaxChunkBytes == sizeof(bits),
- "Validate our predicted bitset size is correct");
- }
+// Simple array based bitset to track COW chunks in memory mapped files on Windows
+// A chunk is a 64MB granular region in virtual memory that we mark as COW everytime we need
+// to write to a memory mapped files on Windows
+//
+class MemoryMappedCOWBitset {
+ MONGO_DISALLOW_COPYING(MemoryMappedCOWBitset);
- bool get(uintptr_t i) const {
- uintptr_t x = i / 32;
- verify(x < MemoryMappedCOWBitset::NChunks);
- return (bits[x].loadRelaxed() & (1 << (i % 32))) != 0;
- }
+public:
+ // Size of the chunks we mark Copy-On-Write with VirtualProtect
+ static const unsigned long long ChunkSize = 64 * 1024 * 1024;
- // Note: assumes caller holds privateViews.mutex
- void set(uintptr_t i) {
- uintptr_t x = i / 32;
- verify(x < MemoryMappedCOWBitset::NChunks);
- bits[x].store( bits[x].loadRelaxed() | (1 << (i % 32)));
- }
+ // Number of chunks we store in our bitset which are really 32-bit ints
+ static const unsigned long long NChunks = 64 * 1024;
- // Note: assumes caller holds privateViews.mutex
- void clear(uintptr_t i) {
- uintptr_t x = i / 32;
- verify(x < MemoryMappedCOWBitset::NChunks);
- bits[x].store(bits[x].loadRelaxed() & ~(1 << (i % 32)));
- }
+ // Total Virtual Memory space we can cover with the bitset
+ static const unsigned long long MaxChunkMemory = ChunkSize * NChunks * sizeof(unsigned int) * 8;
+
+ // Size in bytes of the bitset we allocate
+ static const unsigned long long MaxChunkBytes = NChunks * sizeof(unsigned int);
+
+ // 128 TB Virtual Memory space in Windows 8.1/2012 R2, 8TB before
+ static const unsigned long long MaxWinMemory = 128ULL * 1024 * 1024 * 1024 * 1024;
+
+ // Make sure that the chunk memory covers the Max Windows user process VM space
+ static_assert(MaxChunkMemory == MaxWinMemory,
+ "Need a larger bitset to cover max process VM space");
+
+public:
+ MemoryMappedCOWBitset() {
+ static_assert(MemoryMappedCOWBitset::MaxChunkBytes == sizeof(bits),
+ "Validate our predicted bitset size is correct");
+ }
+
+ bool get(uintptr_t i) const {
+ uintptr_t x = i / 32;
+ verify(x < MemoryMappedCOWBitset::NChunks);
+ return (bits[x].loadRelaxed() & (1 << (i % 32))) != 0;
+ }
+
+ // Note: assumes caller holds privateViews.mutex
+ void set(uintptr_t i) {
+ uintptr_t x = i / 32;
+ verify(x < MemoryMappedCOWBitset::NChunks);
+ bits[x].store(bits[x].loadRelaxed() | (1 << (i % 32)));
+ }
- private:
- // atomic as we are doing double check locking
- AtomicUInt32 bits[MemoryMappedCOWBitset::NChunks];
- };
+ // Note: assumes caller holds privateViews.mutex
+ void clear(uintptr_t i) {
+ uintptr_t x = i / 32;
+ verify(x < MemoryMappedCOWBitset::NChunks);
+ bits[x].store(bits[x].loadRelaxed() & ~(1 << (i % 32)));
+ }
+
+private:
+ // atomic as we are doing double check locking
+ AtomicUInt32 bits[MemoryMappedCOWBitset::NChunks];
+};
#endif
- /** for durability support we want to be able to map pointers to specific DurableMappedFile objects.
- */
- class PointerToDurableMappedFile {
- MONGO_DISALLOW_COPYING(PointerToDurableMappedFile);
- public:
- PointerToDurableMappedFile();
+/** for durability support we want to be able to map pointers to specific DurableMappedFile objects.
+*/
+class PointerToDurableMappedFile {
+ MONGO_DISALLOW_COPYING(PointerToDurableMappedFile);
- /** register view.
- not-threadsafe, caller must hold _mutex()
- */
- void add_inlock(void *view, DurableMappedFile *f);
+public:
+ PointerToDurableMappedFile();
- /** de-register view.
- threadsafe
- */
- void remove(void *view, size_t length);
+ /** register view.
+ not-threadsafe, caller must hold _mutex()
+ */
+ void add_inlock(void* view, DurableMappedFile* f);
- /** find associated MMF object for a given pointer.
- threadsafe
- @param ofs out returns offset into the view of the pointer, if found.
- @return the DurableMappedFile to which this pointer belongs. null if not found.
+ /** de-register view.
+ threadsafe
*/
- DurableMappedFile* find(void *p, /*out*/ size_t& ofs);
+ void remove(void* view, size_t length);
- /** for doing many finds in a row with one lock operation */
- stdx::mutex& _mutex() { return _m; }
+ /** find associated MMF object for a given pointer.
+ threadsafe
+ @param ofs out returns offset into the view of the pointer, if found.
+ @return the DurableMappedFile to which this pointer belongs. null if not found.
+ */
+ DurableMappedFile* find(void* p, /*out*/ size_t& ofs);
- /** not-threadsafe, caller must hold _mutex() */
- DurableMappedFile* find_inlock(void *p, /*out*/ size_t& ofs);
+ /** for doing many finds in a row with one lock operation */
+ stdx::mutex& _mutex() {
+ return _m;
+ }
+
+ /** not-threadsafe, caller must hold _mutex() */
+ DurableMappedFile* find_inlock(void* p, /*out*/ size_t& ofs);
- /** not-threadsafe, caller must hold _mutex() */
- unsigned numberOfViews_inlock() const { return _views.size(); }
+ /** not-threadsafe, caller must hold _mutex() */
+ unsigned numberOfViews_inlock() const {
+ return _views.size();
+ }
- /** make the private map range writable (necessary for our windows implementation) */
- void makeWritable(void *, unsigned len);
+ /** make the private map range writable (necessary for our windows implementation) */
+ void makeWritable(void*, unsigned len);
- void clearWritableBits(void *privateView, size_t len);
+ void clearWritableBits(void* privateView, size_t len);
- private:
- void clearWritableBits_inlock(void *privateView, size_t len);
+private:
+ void clearWritableBits_inlock(void* privateView, size_t len);
#ifdef _WIN32
- void makeChunkWritable(size_t chunkno);
+ void makeChunkWritable(size_t chunkno);
#endif
- private:
- // PointerToDurableMappedFile Mutex
- //
- // Protects:
- // Protects internal consistency of data structure
- // Lock Ordering:
- // Must be taken before MapViewMutex if both are taken to prevent deadlocks
- stdx::mutex _m;
- std::map<void*, DurableMappedFile*> _views;
+private:
+ // PointerToDurableMappedFile Mutex
+ //
+ // Protects:
+ // Protects internal consistency of data structure
+ // Lock Ordering:
+ // Must be taken before MapViewMutex if both are taken to prevent deadlocks
+ stdx::mutex _m;
+ std::map<void*, DurableMappedFile*> _views;
#ifdef _WIN32
- // Tracks which memory mapped regions are marked as Copy on Write
- MemoryMappedCOWBitset writable;
+ // Tracks which memory mapped regions are marked as Copy on Write
+ MemoryMappedCOWBitset writable;
#endif
- };
+};
#ifdef _WIN32
- inline void PointerToDurableMappedFile::makeWritable(void *privateView, unsigned len) {
- size_t p = reinterpret_cast<size_t>(privateView);
- unsigned a = p / MemoryMappedCOWBitset::ChunkSize;
- unsigned b = (p + len) / MemoryMappedCOWBitset::ChunkSize;
-
- for (unsigned i = a; i <= b; i++) {
- if (!writable.get(i)) {
- makeChunkWritable(i);
- }
+inline void PointerToDurableMappedFile::makeWritable(void* privateView, unsigned len) {
+ size_t p = reinterpret_cast<size_t>(privateView);
+ unsigned a = p / MemoryMappedCOWBitset::ChunkSize;
+ unsigned b = (p + len) / MemoryMappedCOWBitset::ChunkSize;
+
+ for (unsigned i = a; i <= b; i++) {
+ if (!writable.get(i)) {
+ makeChunkWritable(i);
}
}
+}
#else
- inline void PointerToDurableMappedFile::makeWritable(void *_p, unsigned len) {
- }
+inline void PointerToDurableMappedFile::makeWritable(void* _p, unsigned len) {}
#endif
- // allows a pointer into any private view of a DurableMappedFile to be resolved to the DurableMappedFile object
- extern PointerToDurableMappedFile privateViews;
+// allows a pointer into any private view of a DurableMappedFile to be resolved to the DurableMappedFile object
+extern PointerToDurableMappedFile privateViews;
}
diff --git a/src/mongo/db/storage/mmap_v1/durop.cpp b/src/mongo/db/storage/mmap_v1/durop.cpp
index 2a049596593..8efd7720c3e 100644
--- a/src/mongo/db/storage/mmap_v1/durop.cpp
+++ b/src/mongo/db/storage/mmap_v1/durop.cpp
@@ -47,134 +47,133 @@
namespace mongo {
- using std::unique_ptr;
- using std::shared_ptr;
- using std::endl;
- using std::string;
-
- namespace dur {
-
- /** read a durop from journal file referenced by br.
- @param opcode the opcode which has already been written from the bufreader
- */
- shared_ptr<DurOp> DurOp::read(unsigned opcode, BufReader& br) {
- shared_ptr<DurOp> op;
- switch( opcode ) {
- case JEntry::OpCode_FileCreated:
- op = shared_ptr<DurOp>( new FileCreatedOp(br) );
- break;
- case JEntry::OpCode_DropDb:
- op = shared_ptr<DurOp>( new DropDbOp(br) );
- break;
- default:
- massert(13546, (str::stream() << "journal recover: unrecognized opcode in journal " << opcode), false);
- }
- return op;
- }
+using std::unique_ptr;
+using std::shared_ptr;
+using std::endl;
+using std::string;
- void DurOp::serialize(AlignedBuilder& ab) {
- ab.appendNum(_opcode);
- _serialize(ab);
- }
+namespace dur {
- DropDbOp::DropDbOp(BufReader& log) : DurOp(JEntry::OpCode_DropDb) {
- unsigned long long reserved;
- log.read(reserved);
- log.read(reserved);
- log.readStr(_db);
- string reservedStr;
- log.readStr(reservedStr);
- }
+/** read a durop from journal file referenced by br.
+ @param opcode the opcode which has already been written from the bufreader
+*/
+shared_ptr<DurOp> DurOp::read(unsigned opcode, BufReader& br) {
+ shared_ptr<DurOp> op;
+ switch (opcode) {
+ case JEntry::OpCode_FileCreated:
+ op = shared_ptr<DurOp>(new FileCreatedOp(br));
+ break;
+ case JEntry::OpCode_DropDb:
+ op = shared_ptr<DurOp>(new DropDbOp(br));
+ break;
+ default:
+ massert(13546,
+ (str::stream() << "journal recover: unrecognized opcode in journal " << opcode),
+ false);
+ }
+ return op;
+}
- void DropDbOp::_serialize(AlignedBuilder& ab) {
- ab.appendNum((unsigned long long) 0); // reserved for future use
- ab.appendNum((unsigned long long) 0); // reserved for future use
- ab.appendStr(_db);
- ab.appendStr(""); // reserved
- }
+void DurOp::serialize(AlignedBuilder& ab) {
+ ab.appendNum(_opcode);
+ _serialize(ab);
+}
- /** throws */
- void DropDbOp::replay() {
- log() << "recover replay drop db " << _db << endl;
- _deleteDataFiles(_db);
- }
+DropDbOp::DropDbOp(BufReader& log) : DurOp(JEntry::OpCode_DropDb) {
+ unsigned long long reserved;
+ log.read(reserved);
+ log.read(reserved);
+ log.readStr(_db);
+ string reservedStr;
+ log.readStr(reservedStr);
+}
- FileCreatedOp::FileCreatedOp(const std::string& f, unsigned long long l) :
- DurOp(JEntry::OpCode_FileCreated) {
- _p = RelativePath::fromFullPath(storageGlobalParams.dbpath, f);
- _len = l;
- }
+void DropDbOp::_serialize(AlignedBuilder& ab) {
+ ab.appendNum((unsigned long long)0); // reserved for future use
+ ab.appendNum((unsigned long long)0); // reserved for future use
+ ab.appendStr(_db);
+ ab.appendStr(""); // reserved
+}
- FileCreatedOp::FileCreatedOp(BufReader& log) : DurOp(JEntry::OpCode_FileCreated) {
- unsigned long long reserved;
- log.read(reserved);
- log.read(reserved);
- log.read(_len); // size of file, not length of name
- string s;
- log.readStr(s);
- _p._p = s;
- }
+/** throws */
+void DropDbOp::replay() {
+ log() << "recover replay drop db " << _db << endl;
+ _deleteDataFiles(_db);
+}
- void FileCreatedOp::_serialize(AlignedBuilder& ab) {
- ab.appendNum((unsigned long long) 0); // reserved for future use
- ab.appendNum((unsigned long long) 0); // reserved for future use
- ab.appendNum(_len);
- ab.appendStr(_p.toString());
- }
+FileCreatedOp::FileCreatedOp(const std::string& f, unsigned long long l)
+ : DurOp(JEntry::OpCode_FileCreated) {
+ _p = RelativePath::fromFullPath(storageGlobalParams.dbpath, f);
+ _len = l;
+}
- string FileCreatedOp::toString() {
- return str::stream() << "FileCreatedOp " << _p.toString() << ' ' << _len/1024.0/1024.0 << "MB";
- }
+FileCreatedOp::FileCreatedOp(BufReader& log) : DurOp(JEntry::OpCode_FileCreated) {
+ unsigned long long reserved;
+ log.read(reserved);
+ log.read(reserved);
+ log.read(_len); // size of file, not length of name
+ string s;
+ log.readStr(s);
+ _p._p = s;
+}
- // if an operation deletes or creates a file (or moves etc.), it may need files closed.
- bool FileCreatedOp::needFilesClosed() {
- return boost::filesystem::exists( _p.asFullPath() );
- }
+void FileCreatedOp::_serialize(AlignedBuilder& ab) {
+ ab.appendNum((unsigned long long)0); // reserved for future use
+ ab.appendNum((unsigned long long)0); // reserved for future use
+ ab.appendNum(_len);
+ ab.appendStr(_p.toString());
+}
- void FileCreatedOp::replay() {
- // i believe the code assumes new files are filled with zeros. thus we have to recreate the file,
- // or rewrite at least, even if it were the right length. perhaps one day we should change that
- // although easier to avoid defects if we assume it is zeros perhaps.
- string full = _p.asFullPath();
- if( boost::filesystem::exists(full) ) {
- try {
- boost::filesystem::remove(full);
- }
- catch(std::exception& e) {
- LOG(1) << "recover info FileCreateOp::replay unlink " << e.what() << endl;
- }
- }
-
- log() << "recover create file " << full << ' ' << _len/1024.0/1024.0 << "MB" << endl;
- if( boost::filesystem::exists(full) ) {
- // first delete if exists.
- try {
- boost::filesystem::remove(full);
- }
- catch(...) {
- log() << "warning could not delete file " << full << endl;
- }
- }
- ensureParentDirCreated(full);
- File f;
- f.open(full.c_str());
- massert(13547, str::stream() << "recover couldn't create file " << full, f.is_open());
- unsigned long long left = _len;
- const unsigned blksz = 64 * 1024;
- unique_ptr<char[]> v( new char[blksz] );
- memset( v.get(), 0, blksz );
- fileofs ofs = 0;
- while( left ) {
- unsigned long long w = left < blksz ? left : blksz;
- f.write(ofs, v.get(), (unsigned) w);
- left -= w;
- ofs += w;
- }
- f.fsync();
- flushMyDirectory(full);
- massert(13628, str::stream() << "recover failure writing file " << full, !f.bad() );
- }
+string FileCreatedOp::toString() {
+ return str::stream() << "FileCreatedOp " << _p.toString() << ' ' << _len / 1024.0 / 1024.0
+ << "MB";
+}
+// if an operation deletes or creates a file (or moves etc.), it may need files closed.
+bool FileCreatedOp::needFilesClosed() {
+ return boost::filesystem::exists(_p.asFullPath());
+}
+
+void FileCreatedOp::replay() {
+ // i believe the code assumes new files are filled with zeros. thus we have to recreate the file,
+ // or rewrite at least, even if it were the right length. perhaps one day we should change that
+ // although easier to avoid defects if we assume it is zeros perhaps.
+ string full = _p.asFullPath();
+ if (boost::filesystem::exists(full)) {
+ try {
+ boost::filesystem::remove(full);
+ } catch (std::exception& e) {
+ LOG(1) << "recover info FileCreateOp::replay unlink " << e.what() << endl;
+ }
}
+ log() << "recover create file " << full << ' ' << _len / 1024.0 / 1024.0 << "MB" << endl;
+ if (boost::filesystem::exists(full)) {
+ // first delete if exists.
+ try {
+ boost::filesystem::remove(full);
+ } catch (...) {
+ log() << "warning could not delete file " << full << endl;
+ }
+ }
+ ensureParentDirCreated(full);
+ File f;
+ f.open(full.c_str());
+ massert(13547, str::stream() << "recover couldn't create file " << full, f.is_open());
+ unsigned long long left = _len;
+ const unsigned blksz = 64 * 1024;
+ unique_ptr<char[]> v(new char[blksz]);
+ memset(v.get(), 0, blksz);
+ fileofs ofs = 0;
+ while (left) {
+ unsigned long long w = left < blksz ? left : blksz;
+ f.write(ofs, v.get(), (unsigned)w);
+ left -= w;
+ ofs += w;
+ }
+ f.fsync();
+ flushMyDirectory(full);
+ massert(13628, str::stream() << "recover failure writing file " << full, !f.bad());
+}
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/durop.h b/src/mongo/db/storage/mmap_v1/durop.h
index 9ebddb3dfc0..a798f210616 100644
--- a/src/mongo/db/storage/mmap_v1/durop.h
+++ b/src/mongo/db/storage/mmap_v1/durop.h
@@ -37,86 +37,93 @@
namespace mongo {
- class AlignedBuilder;
-
- namespace dur {
-
- /** DurOp - Operations we journal that aren't just basic writes.
- *
- * Basic writes are logged as JEntry's, and indicated in ram temporarily as struct dur::WriteIntent.
- * We don't make WriteIntent inherit from DurOp to keep it as lean as possible as there will be millions of
- * them (we don't want a vtable for example there).
- *
- * For each op we want to journal, we define a subclass.
- */
- class DurOp { /* copyable */
- public:
- // @param opcode a sentinel value near max unsigned which uniquely identifies the operation.
- // @see dur::JEntry
- DurOp(unsigned opcode) : _opcode(opcode) { }
-
- virtual ~DurOp() { }
-
- /** serialize the op out to a builder which will then be written (presumably) to the journal */
- void serialize(AlignedBuilder& ab);
-
- /** read a durop from journal file referenced by br.
- @param opcode the opcode which has already been written from the bufreader
- */
- static std::shared_ptr<DurOp> read(unsigned opcode, BufReader& br);
-
- /** replay the operation (during recovery)
- throws
-
- For now, these are not replayed during the normal WRITETODATAFILES phase, since these
- operations are handled in other parts of the code. At some point this may change.
- */
- virtual void replay() = 0;
-
- virtual std::string toString() = 0;
-
- /** if the op requires all file to be closed before doing its work, returns true. */
- virtual bool needFilesClosed() { return false; }
-
- protected:
- /** DurOp will have already written the opcode for you */
- virtual void _serialize(AlignedBuilder& ab) = 0;
-
- private:
- const unsigned _opcode;
- };
-
- /** indicates creation of a new file */
- class FileCreatedOp : public DurOp {
- public:
- FileCreatedOp(BufReader& log);
- /** param f filename to create with path */
- FileCreatedOp(const std::string& f, unsigned long long l);
- virtual void replay();
- virtual std::string toString();
- virtual bool needFilesClosed();
- protected:
- virtual void _serialize(AlignedBuilder& ab);
- private:
- RelativePath _p;
- unsigned long long _len; // size of file, not length of name
- };
-
- /** record drop of a database */
- class DropDbOp : public DurOp {
- public:
- DropDbOp(BufReader& log);
- DropDbOp(const std::string& db) :
- DurOp(JEntry::OpCode_DropDb), _db(db) { }
- virtual void replay();
- virtual std::string toString() { return std::string("DropDbOp ") + _db; }
- virtual bool needFilesClosed() { return true; }
- protected:
- virtual void _serialize(AlignedBuilder& ab);
- private:
- std::string _db;
- };
+class AlignedBuilder;
+
+namespace dur {
+
+/** DurOp - Operations we journal that aren't just basic writes.
+ *
+ * Basic writes are logged as JEntry's, and indicated in ram temporarily as struct dur::WriteIntent.
+ * We don't make WriteIntent inherit from DurOp to keep it as lean as possible as there will be millions of
+ * them (we don't want a vtable for example there).
+ *
+ * For each op we want to journal, we define a subclass.
+ */
+class DurOp {/* copyable */
+public:
+ // @param opcode a sentinel value near max unsigned which uniquely identifies the operation.
+ // @see dur::JEntry
+ DurOp(unsigned opcode) : _opcode(opcode) {}
+
+ virtual ~DurOp() {}
+
+ /** serialize the op out to a builder which will then be written (presumably) to the journal */
+ void serialize(AlignedBuilder& ab);
+
+ /** read a durop from journal file referenced by br.
+ @param opcode the opcode which has already been written from the bufreader
+ */
+ static std::shared_ptr<DurOp> read(unsigned opcode, BufReader& br);
+
+ /** replay the operation (during recovery)
+ throws
+
+ For now, these are not replayed during the normal WRITETODATAFILES phase, since these
+ operations are handled in other parts of the code. At some point this may change.
+ */
+ virtual void replay() = 0;
+
+ virtual std::string toString() = 0;
+
+ /** if the op requires all file to be closed before doing its work, returns true. */
+ virtual bool needFilesClosed() {
+ return false;
+ }
+protected:
+ /** DurOp will have already written the opcode for you */
+ virtual void _serialize(AlignedBuilder& ab) = 0;
+
+private:
+ const unsigned _opcode;
+};
+
+/** indicates creation of a new file */
+class FileCreatedOp : public DurOp {
+public:
+ FileCreatedOp(BufReader& log);
+ /** param f filename to create with path */
+ FileCreatedOp(const std::string& f, unsigned long long l);
+ virtual void replay();
+ virtual std::string toString();
+ virtual bool needFilesClosed();
+
+protected:
+ virtual void _serialize(AlignedBuilder& ab);
+
+private:
+ RelativePath _p;
+ unsigned long long _len; // size of file, not length of name
+};
+
+/** record drop of a database */
+class DropDbOp : public DurOp {
+public:
+ DropDbOp(BufReader& log);
+ DropDbOp(const std::string& db) : DurOp(JEntry::OpCode_DropDb), _db(db) {}
+ virtual void replay();
+ virtual std::string toString() {
+ return std::string("DropDbOp ") + _db;
+ }
+ virtual bool needFilesClosed() {
+ return true;
}
+protected:
+ virtual void _serialize(AlignedBuilder& ab);
+
+private:
+ std::string _db;
+};
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/extent.cpp b/src/mongo/db/storage/mmap_v1/extent.cpp
index 905e4d28a9e..7f6d41cde80 100644
--- a/src/mongo/db/storage/mmap_v1/extent.cpp
+++ b/src/mongo/db/storage/mmap_v1/extent.cpp
@@ -36,82 +36,70 @@
namespace mongo {
- using std::iostream;
- using std::string;
- using std::vector;
+using std::iostream;
+using std::string;
+using std::vector;
- BOOST_STATIC_ASSERT( sizeof(Extent)-4 == 48+128 );
+BOOST_STATIC_ASSERT(sizeof(Extent) - 4 == 48 + 128);
- BSONObj Extent::dump() const {
- return BSON( "loc" << myLoc.toString()
- << "xnext" << xnext.toString()
- << "xprev" << xprev.toString()
- << "nsdiag" << nsDiagnostic.toString()
- << "size" << length
- << "firstRecord"
- << firstRecord.toString()
- << "lastRecord" << lastRecord.toString() );
- }
+BSONObj Extent::dump() const {
+ return BSON("loc" << myLoc.toString() << "xnext" << xnext.toString() << "xprev"
+ << xprev.toString() << "nsdiag" << nsDiagnostic.toString() << "size" << length
+ << "firstRecord" << firstRecord.toString() << "lastRecord"
+ << lastRecord.toString());
+}
- void Extent::dump(iostream& s) const {
- s << " loc:" << myLoc.toString()
- << " xnext:" << xnext.toString()
- << " xprev:" << xprev.toString() << '\n';
- s << " nsdiag:" << nsDiagnostic.toString() << '\n';
- s << " size:" << length
- << " firstRecord:" << firstRecord.toString()
- << " lastRecord:" << lastRecord.toString() << '\n';
- }
+void Extent::dump(iostream& s) const {
+ s << " loc:" << myLoc.toString() << " xnext:" << xnext.toString()
+ << " xprev:" << xprev.toString() << '\n';
+ s << " nsdiag:" << nsDiagnostic.toString() << '\n';
+ s << " size:" << length << " firstRecord:" << firstRecord.toString()
+ << " lastRecord:" << lastRecord.toString() << '\n';
+}
- bool Extent::validates(const DiskLoc diskLoc, vector<string>* errors) const {
- bool extentOk = true;
- if (magic != extentSignature) {
- if (errors) {
- StringBuilder sb;
- sb << "bad extent signature " << integerToHex(magic)
- << " in extent " << diskLoc.toString();
- errors->push_back( sb.str() );
- }
- extentOk = false;
+bool Extent::validates(const DiskLoc diskLoc, vector<string>* errors) const {
+ bool extentOk = true;
+ if (magic != extentSignature) {
+ if (errors) {
+ StringBuilder sb;
+ sb << "bad extent signature " << integerToHex(magic) << " in extent "
+ << diskLoc.toString();
+ errors->push_back(sb.str());
}
- if (myLoc != diskLoc) {
- if (errors) {
- StringBuilder sb;
- sb << "extent " << diskLoc.toString()
- << " self-pointer is " << myLoc.toString();
- errors->push_back( sb.str() );
- }
- extentOk = false;
+ extentOk = false;
+ }
+ if (myLoc != diskLoc) {
+ if (errors) {
+ StringBuilder sb;
+ sb << "extent " << diskLoc.toString() << " self-pointer is " << myLoc.toString();
+ errors->push_back(sb.str());
}
- if (firstRecord.isNull() != lastRecord.isNull()) {
- if (errors) {
- StringBuilder sb;
- if (firstRecord.isNull()) {
- sb << "in extent " << diskLoc.toString()
- << ", firstRecord is null but lastRecord is "
- << lastRecord.toString();
- }
- else {
- sb << "in extent " << diskLoc.toString()
- << ", firstRecord is " << firstRecord.toString()
- << " but lastRecord is null";
- }
- errors->push_back( sb.str() );
+ extentOk = false;
+ }
+ if (firstRecord.isNull() != lastRecord.isNull()) {
+ if (errors) {
+ StringBuilder sb;
+ if (firstRecord.isNull()) {
+ sb << "in extent " << diskLoc.toString()
+ << ", firstRecord is null but lastRecord is " << lastRecord.toString();
+ } else {
+ sb << "in extent " << diskLoc.toString() << ", firstRecord is "
+ << firstRecord.toString() << " but lastRecord is null";
}
- extentOk = false;
+ errors->push_back(sb.str());
}
- static const int minSize = 0x1000;
- if (length < minSize) {
- if (errors) {
- StringBuilder sb;
- sb << "length of extent " << diskLoc.toString()
- << " is " << length
- << ", which is less than minimum length of " << minSize;
- errors->push_back( sb.str() );
- }
- extentOk = false;
+ extentOk = false;
+ }
+ static const int minSize = 0x1000;
+ if (length < minSize) {
+ if (errors) {
+ StringBuilder sb;
+ sb << "length of extent " << diskLoc.toString() << " is " << length
+ << ", which is less than minimum length of " << minSize;
+ errors->push_back(sb.str());
}
- return extentOk;
+ extentOk = false;
}
-
+ return extentOk;
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/extent.h b/src/mongo/db/storage/mmap_v1/extent.h
index a25d34c49e0..9d6d3935346 100644
--- a/src/mongo/db/storage/mmap_v1/extent.h
+++ b/src/mongo/db/storage/mmap_v1/extent.h
@@ -39,45 +39,50 @@
namespace mongo {
- /* extents are datafile regions where all the records within the region
- belong to the same namespace.
+/* extents are datafile regions where all the records within the region
+ belong to the same namespace.
- (11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big DeletedRecord
- (11:12:55 AM) dm10gen: and that is placed on the free list
- */
+(11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big DeletedRecord
+(11:12:55 AM) dm10gen: and that is placed on the free list
+*/
#pragma pack(1)
- struct Extent {
- enum { extentSignature = 0x41424344 };
- unsigned magic;
- DiskLoc myLoc;
+struct Extent {
+ enum { extentSignature = 0x41424344 };
+ unsigned magic;
+ DiskLoc myLoc;
- /* next/prev extent for this namespace */
- DiskLoc xnext;
- DiskLoc xprev;
+ /* next/prev extent for this namespace */
+ DiskLoc xnext;
+ DiskLoc xprev;
- /* which namespace this extent is for. this is just for troubleshooting really
- and won't even be correct if the collection were renamed!
- */
- Namespace nsDiagnostic;
+ /* which namespace this extent is for. this is just for troubleshooting really
+ and won't even be correct if the collection were renamed!
+ */
+ Namespace nsDiagnostic;
- int length; /* size of the extent, including these fields */
- DiskLoc firstRecord;
- DiskLoc lastRecord;
- char _extentData[4];
+ int length; /* size of the extent, including these fields */
+ DiskLoc firstRecord;
+ DiskLoc lastRecord;
+ char _extentData[4];
- // -----
+ // -----
- bool validates(const DiskLoc diskLoc, std::vector<std::string>* errors = NULL) const;
+ bool validates(const DiskLoc diskLoc, std::vector<std::string>* errors = NULL) const;
- BSONObj dump() const;
+ BSONObj dump() const;
- void dump(std::iostream& s) const;
+ void dump(std::iostream& s) const;
- bool isOk() const { return magic == extentSignature; }
- void assertOk() const { verify(isOk()); }
+ bool isOk() const {
+ return magic == extentSignature;
+ }
+ void assertOk() const {
+ verify(isOk());
+ }
- static int HeaderSize() { return sizeof(Extent)-4; }
- };
+ static int HeaderSize() {
+ return sizeof(Extent) - 4;
+ }
+};
#pragma pack()
-
}
diff --git a/src/mongo/db/storage/mmap_v1/extent_manager.cpp b/src/mongo/db/storage/mmap_v1/extent_manager.cpp
index 8efc2cbc50f..15222fac01a 100644
--- a/src/mongo/db/storage/mmap_v1/extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/extent_manager.cpp
@@ -34,66 +34,64 @@
namespace mongo {
- int ExtentManager::quantizeExtentSize( int size ) const {
+int ExtentManager::quantizeExtentSize(int size) const {
+ if (size == maxSize()) {
+ // no point doing quantizing for the entire file
+ return size;
+ }
- if ( size == maxSize() ) {
- // no point doing quantizing for the entire file
- return size;
- }
+ invariant(size <= maxSize());
- invariant( size <= maxSize() );
+ // make sizes align with VM page size
+ int newSize = (size + 0xfff) & 0xfffff000;
- // make sizes align with VM page size
- int newSize = (size + 0xfff) & 0xfffff000;
+ if (newSize > maxSize()) {
+ return maxSize();
+ }
- if ( newSize > maxSize() ) {
- return maxSize();
- }
+ if (newSize < minSize()) {
+ return minSize();
+ }
- if ( newSize < minSize() ) {
- return minSize();
- }
+ return newSize;
+}
- return newSize;
+int ExtentManager::followupSize(int len, int lastExtentLen) const {
+ invariant(len < maxSize());
+ int x = initialSize(len);
+ // changed from 1.20 to 1.35 in v2.1.x to get to larger extent size faster
+ int y = (int)(lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.35);
+ int sz = y > x ? y : x;
+
+ if (sz < lastExtentLen) {
+ // this means there was an int overflow
+ // so we should turn it into maxSize
+ return maxSize();
+ } else if (sz > maxSize()) {
+ return maxSize();
}
- int ExtentManager::followupSize( int len, int lastExtentLen ) const {
- invariant( len < maxSize() );
- int x = initialSize(len);
- // changed from 1.20 to 1.35 in v2.1.x to get to larger extent size faster
- int y = (int) (lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.35);
- int sz = y > x ? y : x;
-
- if ( sz < lastExtentLen ) {
- // this means there was an int overflow
- // so we should turn it into maxSize
- return maxSize();
- }
- else if ( sz > maxSize() ) {
- return maxSize();
- }
-
- sz = quantizeExtentSize( sz );
- verify( sz >= len );
-
- return sz;
- }
+ sz = quantizeExtentSize(sz);
+ verify(sz >= len);
- int ExtentManager::initialSize( int len ) const {
- invariant( len <= maxSize() );
+ return sz;
+}
- long long sz = len * 16;
- if ( len < 1000 )
- sz = len * 64;
+int ExtentManager::initialSize(int len) const {
+ invariant(len <= maxSize());
- if ( sz >= maxSize() )
- return maxSize();
+ long long sz = len * 16;
+ if (len < 1000)
+ sz = len * 64;
- if ( sz <= minSize() )
- return minSize();
+ if (sz >= maxSize())
+ return maxSize();
- int z = ExtentManager::quantizeExtentSize( sz );
- verify( z >= len );
- return z;
- }
+ if (sz <= minSize())
+ return minSize();
+
+ int z = ExtentManager::quantizeExtentSize(sz);
+ verify(z >= len);
+ return z;
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/extent_manager.h b/src/mongo/db/storage/mmap_v1/extent_manager.h
index 54191faa2cf..6151f8e11a2 100644
--- a/src/mongo/db/storage/mmap_v1/extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/extent_manager.h
@@ -40,141 +40,141 @@
namespace mongo {
- class DataFile;
- class MmapV1RecordHeader;
- class RecordFetcher;
- class OperationContext;
+class DataFile;
+class MmapV1RecordHeader;
+class RecordFetcher;
+class OperationContext;
- struct Extent;
+struct Extent;
+
+/**
+ * ExtentManager basics
+ * - one per database
+ * - responsible for managing <db>.# files
+ * - NOT responsible for .ns file
+ * - gives out extents
+ * - responsible for figuring out how to get a new extent
+ * - can use any method it wants to do so
+ * - this structure is NOT stored on disk
+ * - files will not be removed from the EM
+ * - extent size and loc are immutable
+ * - this class is thread safe, once constructed and init()-ialized
+ */
+class ExtentManager {
+ MONGO_DISALLOW_COPYING(ExtentManager);
+
+public:
+ ExtentManager() {}
+
+ virtual ~ExtentManager() {}
+
+ /**
+ * opens all current files
+ */
+ virtual Status init(OperationContext* txn) = 0;
+
+ virtual int numFiles() const = 0;
+ virtual long long fileSize() const = 0;
+
+ // must call Extent::reuse on the returned extent
+ virtual DiskLoc allocateExtent(OperationContext* txn,
+ bool capped,
+ int size,
+ bool enforceQuota) = 0;
+
+ /**
+ * firstExt has to be == lastExt or a chain
+ */
+ virtual void freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt) = 0;
/**
- * ExtentManager basics
- * - one per database
- * - responsible for managing <db>.# files
- * - NOT responsible for .ns file
- * - gives out extents
- * - responsible for figuring out how to get a new extent
- * - can use any method it wants to do so
- * - this structure is NOT stored on disk
- * - files will not be removed from the EM
- * - extent size and loc are immutable
- * - this class is thread safe, once constructed and init()-ialized
+ * frees a single extent
+ * ignores all fields in the Extent except: magic, myLoc, length
*/
- class ExtentManager {
- MONGO_DISALLOW_COPYING( ExtentManager );
+ virtual void freeExtent(OperationContext* txn, DiskLoc extent) = 0;
+ /**
+ * Retrieve statistics on the the free list managed by this ExtentManger.
+ * @param numExtents - non-null pointer to an int that will receive the number of extents
+ * @param totalFreeSizeBytes - non-null pointer to an int64_t receiving the total free
+ * space in the free list.
+ */
+ virtual void freeListStats(OperationContext* txn,
+ int* numExtents,
+ int64_t* totalFreeSizeBytes) const = 0;
+
+ /**
+ * @param loc - has to be for a specific MmapV1RecordHeader
+ * Note(erh): this sadly cannot be removed.
+ * A MmapV1RecordHeader DiskLoc has an offset from a file, while a RecordStore really wants an offset
+ * from an extent. This intrinsically links an original record store to the original extent
+ * manager.
+ */
+ virtual MmapV1RecordHeader* recordForV1(const DiskLoc& loc) const = 0;
+
+ /**
+ * The extent manager tracks accesses to DiskLocs. This returns non-NULL if the DiskLoc has
+ * been recently accessed, and therefore has likely been paged into physical memory.
+ * Returns nullptr if the DiskLoc is Null.
+ *
+ */
+ virtual std::unique_ptr<RecordFetcher> recordNeedsFetch(const DiskLoc& loc) const = 0;
+
+ /**
+ * @param loc - has to be for a specific MmapV1RecordHeader (not an Extent)
+ * Note(erh) see comment on recordFor
+ */
+ virtual Extent* extentForV1(const DiskLoc& loc) const = 0;
+
+ /**
+ * @param loc - has to be for a specific MmapV1RecordHeader (not an Extent)
+ * Note(erh) see comment on recordFor
+ */
+ virtual DiskLoc extentLocForV1(const DiskLoc& loc) const = 0;
+
+ /**
+ * @param loc - has to be for a specific Extent
+ */
+ virtual Extent* getExtent(const DiskLoc& loc, bool doSanityCheck = true) const = 0;
+
+ /**
+ * @return maximum size of an Extent
+ */
+ virtual int maxSize() const = 0;
+
+ /**
+ * @return minimum size of an Extent
+ */
+ virtual int minSize() const {
+ return 0x1000;
+ }
+
+ /**
+ * @param recordLen length of record we need
+ * @param lastExt size of last extent which is a factor in next extent size
+ */
+ virtual int followupSize(int recordLen, int lastExtentLen) const;
+
+ /** get a suggested size for the first extent in a namespace
+ * @param recordLen length of record we need to insert
+ */
+ virtual int initialSize(int recordLen) const;
+
+ /**
+ * quantizes extent size to >= min + page boundary
+ */
+ virtual int quantizeExtentSize(int size) const;
+
+ // see cacheHint methods
+ enum HintType { Sequential, Random };
+ class CacheHint {
public:
- ExtentManager(){}
-
- virtual ~ExtentManager(){}
-
- /**
- * opens all current files
- */
- virtual Status init(OperationContext* txn) = 0;
-
- virtual int numFiles() const = 0;
- virtual long long fileSize() const = 0;
-
- // must call Extent::reuse on the returned extent
- virtual DiskLoc allocateExtent( OperationContext* txn,
- bool capped,
- int size,
- bool enforceQuota ) = 0;
-
- /**
- * firstExt has to be == lastExt or a chain
- */
- virtual void freeExtents( OperationContext* txn,
- DiskLoc firstExt, DiskLoc lastExt ) = 0;
-
- /**
- * frees a single extent
- * ignores all fields in the Extent except: magic, myLoc, length
- */
- virtual void freeExtent( OperationContext* txn, DiskLoc extent ) = 0;
-
- /**
- * Retrieve statistics on the the free list managed by this ExtentManger.
- * @param numExtents - non-null pointer to an int that will receive the number of extents
- * @param totalFreeSizeBytes - non-null pointer to an int64_t receiving the total free
- * space in the free list.
- */
- virtual void freeListStats(OperationContext* txn,
- int* numExtents,
- int64_t* totalFreeSizeBytes) const = 0;
-
- /**
- * @param loc - has to be for a specific MmapV1RecordHeader
- * Note(erh): this sadly cannot be removed.
- * A MmapV1RecordHeader DiskLoc has an offset from a file, while a RecordStore really wants an offset
- * from an extent. This intrinsically links an original record store to the original extent
- * manager.
- */
- virtual MmapV1RecordHeader* recordForV1( const DiskLoc& loc ) const = 0;
-
- /**
- * The extent manager tracks accesses to DiskLocs. This returns non-NULL if the DiskLoc has
- * been recently accessed, and therefore has likely been paged into physical memory.
- * Returns nullptr if the DiskLoc is Null.
- *
- */
- virtual std::unique_ptr<RecordFetcher> recordNeedsFetch( const DiskLoc& loc ) const = 0;
-
- /**
- * @param loc - has to be for a specific MmapV1RecordHeader (not an Extent)
- * Note(erh) see comment on recordFor
- */
- virtual Extent* extentForV1( const DiskLoc& loc ) const = 0;
-
- /**
- * @param loc - has to be for a specific MmapV1RecordHeader (not an Extent)
- * Note(erh) see comment on recordFor
- */
- virtual DiskLoc extentLocForV1( const DiskLoc& loc ) const = 0;
-
- /**
- * @param loc - has to be for a specific Extent
- */
- virtual Extent* getExtent( const DiskLoc& loc, bool doSanityCheck = true ) const = 0;
-
- /**
- * @return maximum size of an Extent
- */
- virtual int maxSize() const = 0;
-
- /**
- * @return minimum size of an Extent
- */
- virtual int minSize() const { return 0x1000; }
-
- /**
- * @param recordLen length of record we need
- * @param lastExt size of last extent which is a factor in next extent size
- */
- virtual int followupSize( int recordLen, int lastExtentLen ) const;
-
- /** get a suggested size for the first extent in a namespace
- * @param recordLen length of record we need to insert
- */
- virtual int initialSize( int recordLen ) const;
-
- /**
- * quantizes extent size to >= min + page boundary
- */
- virtual int quantizeExtentSize( int size ) const;
-
- // see cacheHint methods
- enum HintType { Sequential, Random };
- class CacheHint {
- public:
- virtual ~CacheHint(){}
- };
- /**
- * Tell the system that for this extent, it will have this kind of disk access.
- * Caller takes owernship of CacheHint
- */
- virtual CacheHint* cacheHint( const DiskLoc& extentLoc, const HintType& hint ) = 0;
+ virtual ~CacheHint() {}
};
-
+ /**
+ * Tell the system that for this extent, it will have this kind of disk access.
+ * Caller takes owernship of CacheHint
+ */
+ virtual CacheHint* cacheHint(const DiskLoc& extentLoc, const HintType& hint) = 0;
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/file_allocator.cpp b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
index bedd7d9e03d..0500ad43a83 100644
--- a/src/mongo/db/storage/mmap_v1/file_allocator.cpp
+++ b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
@@ -38,16 +38,16 @@
#include <fcntl.h>
#if defined(__FreeBSD__)
-# include <sys/param.h>
-# include <sys/mount.h>
+#include <sys/param.h>
+#include <sys/mount.h>
#endif
#if defined(__linux__)
-# include <sys/vfs.h>
+#include <sys/vfs.h>
#endif
#if defined(_WIN32)
-# include <io.h>
+#include <io.h>
#endif
#include "mongo/db/storage/paths.h"
@@ -71,402 +71,410 @@ using namespace mongoutils;
namespace mongo {
- using std::endl;
- using std::list;
- using std::string;
- using std::stringstream;
+using std::endl;
+using std::list;
+using std::string;
+using std::stringstream;
- // unique number for temporary file names
- unsigned long long FileAllocator::_uniqueNumber = 0;
- static SimpleMutex _uniqueNumberMutex;
+// unique number for temporary file names
+unsigned long long FileAllocator::_uniqueNumber = 0;
+static SimpleMutex _uniqueNumberMutex;
- MONGO_FP_DECLARE(allocateDiskFull);
+MONGO_FP_DECLARE(allocateDiskFull);
- /**
- * Aliases for Win32 CRT functions
- */
+/**
+ * Aliases for Win32 CRT functions
+ */
#if defined(_WIN32)
- static inline long lseek(int fd, long offset, int origin) { return _lseek(fd, offset, origin); }
- static inline int write(int fd, const void *data, int count) { return _write(fd, data, count); }
- static inline int close(int fd) { return _close(fd); }
-
- typedef BOOL (CALLBACK *GetVolumeInformationByHandleWPtr)(HANDLE, LPWSTR, DWORD, LPDWORD, LPDWORD, LPDWORD, LPWSTR, DWORD);
- GetVolumeInformationByHandleWPtr GetVolumeInformationByHandleWFunc;
-
- MONGO_INITIALIZER(InitGetVolumeInformationByHandleW)(InitializerContext *context) {
- HMODULE kernelLib = LoadLibraryA("kernel32.dll");
- if (kernelLib) {
- GetVolumeInformationByHandleWFunc = reinterpret_cast<GetVolumeInformationByHandleWPtr>
- (GetProcAddress(kernelLib, "GetVolumeInformationByHandleW"));
- }
- return Status::OK();
+static inline long lseek(int fd, long offset, int origin) {
+ return _lseek(fd, offset, origin);
+}
+static inline int write(int fd, const void* data, int count) {
+ return _write(fd, data, count);
+}
+static inline int close(int fd) {
+ return _close(fd);
+}
+
+typedef BOOL(CALLBACK* GetVolumeInformationByHandleWPtr)(
+ HANDLE, LPWSTR, DWORD, LPDWORD, LPDWORD, LPDWORD, LPWSTR, DWORD);
+GetVolumeInformationByHandleWPtr GetVolumeInformationByHandleWFunc;
+
+MONGO_INITIALIZER(InitGetVolumeInformationByHandleW)(InitializerContext* context) {
+ HMODULE kernelLib = LoadLibraryA("kernel32.dll");
+ if (kernelLib) {
+ GetVolumeInformationByHandleWFunc = reinterpret_cast<GetVolumeInformationByHandleWPtr>(
+ GetProcAddress(kernelLib, "GetVolumeInformationByHandleW"));
}
+ return Status::OK();
+}
#endif
- boost::filesystem::path ensureParentDirCreated(const boost::filesystem::path& p){
- const boost::filesystem::path parent = p.branch_path();
+boost::filesystem::path ensureParentDirCreated(const boost::filesystem::path& p) {
+ const boost::filesystem::path parent = p.branch_path();
- if (! boost::filesystem::exists(parent)){
- ensureParentDirCreated(parent);
- log() << "creating directory " << parent.string() << endl;
- boost::filesystem::create_directory(parent);
- flushMyDirectory(parent); // flushes grandparent to ensure parent exists after crash
- }
-
- verify(boost::filesystem::is_directory(parent));
- return parent;
+ if (!boost::filesystem::exists(parent)) {
+ ensureParentDirCreated(parent);
+ log() << "creating directory " << parent.string() << endl;
+ boost::filesystem::create_directory(parent);
+ flushMyDirectory(parent); // flushes grandparent to ensure parent exists after crash
}
- FileAllocator::FileAllocator() : _failed() {}
+ verify(boost::filesystem::is_directory(parent));
+ return parent;
+}
+FileAllocator::FileAllocator() : _failed() {}
- void FileAllocator::start() {
- stdx::thread t( stdx::bind( &FileAllocator::run , this ) );
- }
- void FileAllocator::requestAllocation( const string &name, long &size ) {
- stdx::lock_guard<stdx::mutex> lk( _pendingMutex );
- if ( _failed )
- return;
- long oldSize = prevSize( name );
- if ( oldSize != -1 ) {
- size = oldSize;
- return;
- }
- _pending.push_back( name );
- _pendingSize[ name ] = size;
- _pendingUpdated.notify_all();
- }
-
- void FileAllocator::allocateAsap( const string &name, unsigned long long &size ) {
- stdx::unique_lock<stdx::mutex> lk( _pendingMutex );
-
- // In case the allocator is in failed state, check once before starting so that subsequent
- // requests for the same database would fail fast after the first one has failed.
- checkFailure();
-
- long oldSize = prevSize( name );
- if ( oldSize != -1 ) {
- size = oldSize;
- if ( !inProgress( name ) )
- return;
- }
- checkFailure();
- _pendingSize[ name ] = size;
- if ( _pending.size() == 0 )
- _pending.push_back( name );
- else if ( _pending.front() != name ) {
- _pending.remove( name );
- list< string >::iterator i = _pending.begin();
- ++i;
- _pending.insert( i, name );
- }
- _pendingUpdated.notify_all();
- while( inProgress( name ) ) {
- checkFailure();
- _pendingUpdated.wait(lk);
- }
+void FileAllocator::start() {
+ stdx::thread t(stdx::bind(&FileAllocator::run, this));
+}
+void FileAllocator::requestAllocation(const string& name, long& size) {
+ stdx::lock_guard<stdx::mutex> lk(_pendingMutex);
+ if (_failed)
+ return;
+ long oldSize = prevSize(name);
+ if (oldSize != -1) {
+ size = oldSize;
+ return;
}
-
- void FileAllocator::waitUntilFinished() const {
- if ( _failed )
+ _pending.push_back(name);
+ _pendingSize[name] = size;
+ _pendingUpdated.notify_all();
+}
+
+void FileAllocator::allocateAsap(const string& name, unsigned long long& size) {
+ stdx::unique_lock<stdx::mutex> lk(_pendingMutex);
+
+ // In case the allocator is in failed state, check once before starting so that subsequent
+ // requests for the same database would fail fast after the first one has failed.
+ checkFailure();
+
+ long oldSize = prevSize(name);
+ if (oldSize != -1) {
+ size = oldSize;
+ if (!inProgress(name))
return;
- stdx::unique_lock<stdx::mutex> lk( _pendingMutex );
- while( _pending.size() != 0 )
- _pendingUpdated.wait(lk);
}
-
- // TODO: pull this out to per-OS files once they exist
- static bool useSparseFiles(int fd) {
-
+ checkFailure();
+ _pendingSize[name] = size;
+ if (_pending.size() == 0)
+ _pending.push_back(name);
+ else if (_pending.front() != name) {
+ _pending.remove(name);
+ list<string>::iterator i = _pending.begin();
+ ++i;
+ _pending.insert(i, name);
+ }
+ _pendingUpdated.notify_all();
+ while (inProgress(name)) {
+ checkFailure();
+ _pendingUpdated.wait(lk);
+ }
+}
+
+void FileAllocator::waitUntilFinished() const {
+ if (_failed)
+ return;
+ stdx::unique_lock<stdx::mutex> lk(_pendingMutex);
+ while (_pending.size() != 0)
+ _pendingUpdated.wait(lk);
+}
+
+// TODO: pull this out to per-OS files once they exist
+static bool useSparseFiles(int fd) {
#if defined(__linux__) || defined(__FreeBSD__)
- struct statfs fs_stats;
- int ret = fstatfs(fd, &fs_stats);
- uassert(16062, "fstatfs failed: " + errnoWithDescription(), ret == 0);
+ struct statfs fs_stats;
+ int ret = fstatfs(fd, &fs_stats);
+ uassert(16062, "fstatfs failed: " + errnoWithDescription(), ret == 0);
#endif
#if defined(__linux__)
// these are from <linux/magic.h> but that isn't available on all systems
-# define NFS_SUPER_MAGIC 0x6969
-# define TMPFS_MAGIC 0x01021994
+#define NFS_SUPER_MAGIC 0x6969
+#define TMPFS_MAGIC 0x01021994
- return (fs_stats.f_type == NFS_SUPER_MAGIC)
- || (fs_stats.f_type == TMPFS_MAGIC)
- ;
+ return (fs_stats.f_type == NFS_SUPER_MAGIC) || (fs_stats.f_type == TMPFS_MAGIC);
#elif defined(__FreeBSD__)
- return (str::equals(fs_stats.f_fstypename, "zfs") ||
+ return (str::equals(fs_stats.f_fstypename, "zfs") ||
str::equals(fs_stats.f_fstypename, "nfs") ||
str::equals(fs_stats.f_fstypename, "oldnfs"));
#elif defined(__sun)
- // assume using ZFS which is copy-on-write so no benefit to zero-filling
- // TODO: check which fs we are using like we do elsewhere
- return true;
+ // assume using ZFS which is copy-on-write so no benefit to zero-filling
+ // TODO: check which fs we are using like we do elsewhere
+ return true;
#else
- return false;
+ return false;
#endif
- }
+}
#if defined(_WIN32)
- static bool isFileOnNTFSVolume(int fd) {
- if (!GetVolumeInformationByHandleWFunc) {
- warning() << "Could not retrieve pointer to GetVolumeInformationByHandleW function";
- return false;
- }
-
- HANDLE fileHandle = (HANDLE)_get_osfhandle(fd);
- if (fileHandle == INVALID_HANDLE_VALUE) {
- warning() << "_get_osfhandle() failed with " << _strerror(NULL);
- return false;
- }
+static bool isFileOnNTFSVolume(int fd) {
+ if (!GetVolumeInformationByHandleWFunc) {
+ warning() << "Could not retrieve pointer to GetVolumeInformationByHandleW function";
+ return false;
+ }
- WCHAR fileSystemName[MAX_PATH + 1];
- if (!GetVolumeInformationByHandleWFunc(fileHandle, NULL, 0, NULL, 0, NULL, fileSystemName, sizeof(fileSystemName))) {
- DWORD gle = GetLastError();
- warning() << "GetVolumeInformationByHandleW failed with " << errnoWithDescription(gle);
- return false;
- }
+ HANDLE fileHandle = (HANDLE)_get_osfhandle(fd);
+ if (fileHandle == INVALID_HANDLE_VALUE) {
+ warning() << "_get_osfhandle() failed with " << _strerror(NULL);
+ return false;
+ }
- return lstrcmpW(fileSystemName, L"NTFS") == 0;
+ WCHAR fileSystemName[MAX_PATH + 1];
+ if (!GetVolumeInformationByHandleWFunc(
+ fileHandle, NULL, 0, NULL, 0, NULL, fileSystemName, sizeof(fileSystemName))) {
+ DWORD gle = GetLastError();
+ warning() << "GetVolumeInformationByHandleW failed with " << errnoWithDescription(gle);
+ return false;
}
+
+ return lstrcmpW(fileSystemName, L"NTFS") == 0;
+}
#endif
- void FileAllocator::ensureLength(int fd , long size) {
- // Test running out of disk scenarios
- if (MONGO_FAIL_POINT(allocateDiskFull)) {
- uasserted( 10444 , "File allocation failed due to failpoint.");
- }
+void FileAllocator::ensureLength(int fd, long size) {
+ // Test running out of disk scenarios
+ if (MONGO_FAIL_POINT(allocateDiskFull)) {
+ uasserted(10444, "File allocation failed due to failpoint.");
+ }
#if !defined(_WIN32)
- if (useSparseFiles(fd)) {
- LOG(1) << "using ftruncate to create a sparse file" << endl;
- int ret = ftruncate(fd, size);
- uassert(16063, "ftruncate failed: " + errnoWithDescription(), ret == 0);
- return;
- }
+ if (useSparseFiles(fd)) {
+ LOG(1) << "using ftruncate to create a sparse file" << endl;
+ int ret = ftruncate(fd, size);
+ uassert(16063, "ftruncate failed: " + errnoWithDescription(), ret == 0);
+ return;
+ }
#endif
#if defined(__linux__)
- int ret = posix_fallocate(fd,0,size);
- if ( ret == 0 )
- return;
+ int ret = posix_fallocate(fd, 0, size);
+ if (ret == 0)
+ return;
- log() << "FileAllocator: posix_fallocate failed: " << errnoWithDescription( ret ) << " falling back" << endl;
+ log() << "FileAllocator: posix_fallocate failed: " << errnoWithDescription(ret)
+ << " falling back" << endl;
#endif
- off_t filelen = lseek( fd, 0, SEEK_END );
- if ( filelen < size ) {
- if (filelen != 0) {
- stringstream ss;
- ss << "failure creating new datafile; lseek failed for fd " << fd << " with errno: " << errnoWithDescription();
- uassert( 10440 , ss.str(), filelen == 0 );
- }
- // Check for end of disk.
-
- uassert( 10441 , str::stream() << "Unable to allocate new file of size " << size << ' ' << errnoWithDescription(),
- size - 1 == lseek(fd, size - 1, SEEK_SET) );
- uassert( 10442 , str::stream() << "Unable to allocate new file of size " << size << ' ' << errnoWithDescription(),
- 1 == write(fd, "", 1) );
-
- // File expansion is completed here. Do not do the zeroing out on OS-es where there
- // is no risk of triggering allocation-related bugs such as
- // http://support.microsoft.com/kb/2731284.
- //
- if (!ProcessInfo::isDataFileZeroingNeeded()) {
- return;
- }
+ off_t filelen = lseek(fd, 0, SEEK_END);
+ if (filelen < size) {
+ if (filelen != 0) {
+ stringstream ss;
+ ss << "failure creating new datafile; lseek failed for fd " << fd
+ << " with errno: " << errnoWithDescription();
+ uassert(10440, ss.str(), filelen == 0);
+ }
+ // Check for end of disk.
+
+ uassert(10441,
+ str::stream() << "Unable to allocate new file of size " << size << ' '
+ << errnoWithDescription(),
+ size - 1 == lseek(fd, size - 1, SEEK_SET));
+ uassert(10442,
+ str::stream() << "Unable to allocate new file of size " << size << ' '
+ << errnoWithDescription(),
+ 1 == write(fd, "", 1));
+
+ // File expansion is completed here. Do not do the zeroing out on OS-es where there
+ // is no risk of triggering allocation-related bugs such as
+ // http://support.microsoft.com/kb/2731284.
+ //
+ if (!ProcessInfo::isDataFileZeroingNeeded()) {
+ return;
+ }
#if defined(_WIN32)
- if (!isFileOnNTFSVolume(fd)) {
- log() << "No need to zero out datafile on non-NTFS volume" << endl;
- return;
- }
-#endif
-
- lseek(fd, 0, SEEK_SET);
-
- const long z = 256 * 1024;
- const std::unique_ptr<char[]> buf_holder (new char[z]);
- char* buf = buf_holder.get();
- memset(buf, 0, z);
- long left = size;
- while ( left > 0 ) {
- long towrite = left;
- if ( towrite > z )
- towrite = z;
-
- int written = write( fd , buf , towrite );
- uassert( 10443 , errnoWithPrefix("FileAllocator: file write failed" ), written > 0 );
- left -= written;
- }
+ if (!isFileOnNTFSVolume(fd)) {
+ log() << "No need to zero out datafile on non-NTFS volume" << endl;
+ return;
}
- }
+#endif
- void FileAllocator::checkFailure() {
- if (_failed) {
- // we want to log the problem (diskfull.js expects it) but we do not want to dump a stack tracke
- msgassertedNoTrace( 12520, "new file allocation failure" );
+ lseek(fd, 0, SEEK_SET);
+
+ const long z = 256 * 1024;
+ const std::unique_ptr<char[]> buf_holder(new char[z]);
+ char* buf = buf_holder.get();
+ memset(buf, 0, z);
+ long left = size;
+ while (left > 0) {
+ long towrite = left;
+ if (towrite > z)
+ towrite = z;
+
+ int written = write(fd, buf, towrite);
+ uassert(10443, errnoWithPrefix("FileAllocator: file write failed"), written > 0);
+ left -= written;
}
}
+}
- long FileAllocator::prevSize( const string &name ) const {
- if ( _pendingSize.count( name ) > 0 )
- return _pendingSize[ name ];
- if ( boost::filesystem::exists( name ) )
- return boost::filesystem::file_size( name );
- return -1;
+void FileAllocator::checkFailure() {
+ if (_failed) {
+ // we want to log the problem (diskfull.js expects it) but we do not want to dump a stack tracke
+ msgassertedNoTrace(12520, "new file allocation failure");
}
-
- // caller must hold _pendingMutex lock.
- bool FileAllocator::inProgress( const string &name ) const {
- for( list< string >::const_iterator i = _pending.begin(); i != _pending.end(); ++i )
- if ( *i == name )
- return true;
- return false;
- }
-
- string FileAllocator::makeTempFileName( boost::filesystem::path root ) {
- while( 1 ) {
- boost::filesystem::path p = root / "_tmp";
- stringstream ss;
- unsigned long long thisUniqueNumber;
- {
- // increment temporary file name counter
- // TODO: SERVER-6055 -- Unify temporary file name selection
- stdx::lock_guard<SimpleMutex> lk(_uniqueNumberMutex);
- thisUniqueNumber = _uniqueNumber;
- ++_uniqueNumber;
- }
- ss << thisUniqueNumber;
- p /= ss.str();
- string fn = p.string();
- if( !boost::filesystem::exists(p) )
- return fn;
- }
- return "";
- }
-
- void FileAllocator::run( FileAllocator * fa ) {
- setThreadName( "FileAllocator" );
+}
+
+long FileAllocator::prevSize(const string& name) const {
+ if (_pendingSize.count(name) > 0)
+ return _pendingSize[name];
+ if (boost::filesystem::exists(name))
+ return boost::filesystem::file_size(name);
+ return -1;
+}
+
+// caller must hold _pendingMutex lock.
+bool FileAllocator::inProgress(const string& name) const {
+ for (list<string>::const_iterator i = _pending.begin(); i != _pending.end(); ++i)
+ if (*i == name)
+ return true;
+ return false;
+}
+
+string FileAllocator::makeTempFileName(boost::filesystem::path root) {
+ while (1) {
+ boost::filesystem::path p = root / "_tmp";
+ stringstream ss;
+ unsigned long long thisUniqueNumber;
{
- // initialize unique temporary file name counter
+ // increment temporary file name counter
// TODO: SERVER-6055 -- Unify temporary file name selection
stdx::lock_guard<SimpleMutex> lk(_uniqueNumberMutex);
- _uniqueNumber = curTimeMicros64();
+ thisUniqueNumber = _uniqueNumber;
+ ++_uniqueNumber;
}
- while( 1 ) {
+ ss << thisUniqueNumber;
+ p /= ss.str();
+ string fn = p.string();
+ if (!boost::filesystem::exists(p))
+ return fn;
+ }
+ return "";
+}
+
+void FileAllocator::run(FileAllocator* fa) {
+ setThreadName("FileAllocator");
+ {
+ // initialize unique temporary file name counter
+ // TODO: SERVER-6055 -- Unify temporary file name selection
+ stdx::lock_guard<SimpleMutex> lk(_uniqueNumberMutex);
+ _uniqueNumber = curTimeMicros64();
+ }
+ while (1) {
+ {
+ stdx::unique_lock<stdx::mutex> lk(fa->_pendingMutex);
+ if (fa->_pending.size() == 0)
+ fa->_pendingUpdated.wait(lk);
+ }
+ while (1) {
+ string name;
+ long size = 0;
{
- stdx::unique_lock<stdx::mutex> lk( fa->_pendingMutex );
- if ( fa->_pending.size() == 0 )
- fa->_pendingUpdated.wait(lk);
+ stdx::lock_guard<stdx::mutex> lk(fa->_pendingMutex);
+ if (fa->_pending.size() == 0)
+ break;
+ name = fa->_pending.front();
+ size = fa->_pendingSize[name];
}
- while( 1 ) {
- string name;
- long size = 0;
- {
- stdx::lock_guard<stdx::mutex> lk( fa->_pendingMutex );
- if ( fa->_pending.size() == 0 )
- break;
- name = fa->_pending.front();
- size = fa->_pendingSize[ name ];
- }
- string tmp;
- long fd = 0;
- try {
- log() << "allocating new datafile " << name << ", filling with zeroes..." << endl;
-
- boost::filesystem::path parent = ensureParentDirCreated(name);
- tmp = fa->makeTempFileName( parent );
- ensureParentDirCreated(tmp);
+ string tmp;
+ long fd = 0;
+ try {
+ log() << "allocating new datafile " << name << ", filling with zeroes..." << endl;
+
+ boost::filesystem::path parent = ensureParentDirCreated(name);
+ tmp = fa->makeTempFileName(parent);
+ ensureParentDirCreated(tmp);
#if defined(_WIN32)
- fd = _open( tmp.c_str(), _O_RDWR | _O_CREAT | O_NOATIME, _S_IREAD | _S_IWRITE );
+ fd = _open(tmp.c_str(), _O_RDWR | _O_CREAT | O_NOATIME, _S_IREAD | _S_IWRITE);
#else
- fd = open(tmp.c_str(), O_CREAT | O_RDWR | O_NOATIME, S_IRUSR | S_IWUSR);
+ fd = open(tmp.c_str(), O_CREAT | O_RDWR | O_NOATIME, S_IRUSR | S_IWUSR);
#endif
- if ( fd < 0 ) {
- log() << "FileAllocator: couldn't create " << name << " (" << tmp << ") " << errnoWithDescription() << endl;
- uasserted(10439, "");
- }
+ if (fd < 0) {
+ log() << "FileAllocator: couldn't create " << name << " (" << tmp << ") "
+ << errnoWithDescription() << endl;
+ uasserted(10439, "");
+ }
#if defined(POSIX_FADV_DONTNEED)
- if( posix_fadvise(fd, 0, size, POSIX_FADV_DONTNEED) ) {
- log() << "warning: posix_fadvise fails " << name << " (" << tmp << ") " << errnoWithDescription() << endl;
- }
+ if (posix_fadvise(fd, 0, size, POSIX_FADV_DONTNEED)) {
+ log() << "warning: posix_fadvise fails " << name << " (" << tmp << ") "
+ << errnoWithDescription() << endl;
+ }
#endif
- Timer t;
+ Timer t;
- /* make sure the file is the full desired length */
- ensureLength( fd , size );
+ /* make sure the file is the full desired length */
+ ensureLength(fd, size);
- close( fd );
- fd = 0;
+ close(fd);
+ fd = 0;
- if( rename(tmp.c_str(), name.c_str()) ) {
- const string& errStr = errnoWithDescription();
- const string& errMessage = str::stream()
- << "error: couldn't rename " << tmp
- << " to " << name << ' ' << errStr;
- msgasserted(13653, errMessage);
- }
- flushMyDirectory(name);
-
- log() << "done allocating datafile " << name << ", "
- << "size: " << size/1024/1024 << "MB, "
- << " took " << ((double)t.millis())/1000.0 << " secs"
- << endl;
-
- // no longer in a failed state. allow new writers.
- fa->_failed = false;
+ if (rename(tmp.c_str(), name.c_str())) {
+ const string& errStr = errnoWithDescription();
+ const string& errMessage = str::stream() << "error: couldn't rename " << tmp
+ << " to " << name << ' ' << errStr;
+ msgasserted(13653, errMessage);
}
- catch ( const std::exception& e ) {
- log() << "error: failed to allocate new file: " << name
- << " size: " << size << ' ' << e.what()
- << ". will try again in 10 seconds" << endl;
- if ( fd > 0 )
- close( fd );
- try {
- if ( ! tmp.empty() )
- boost::filesystem::remove( tmp );
- boost::filesystem::remove( name );
- } catch ( const std::exception& e ) {
- log() << "error removing files: " << e.what() << endl;
- }
-
- {
- stdx::lock_guard<stdx::mutex> lk(fa->_pendingMutex);
- fa->_failed = true;
-
- // TODO: Should we remove the file from pending?
- fa->_pendingUpdated.notify_all();
- }
-
-
- sleepsecs(10);
- continue;
+ flushMyDirectory(name);
+
+ log() << "done allocating datafile " << name << ", "
+ << "size: " << size / 1024 / 1024 << "MB, "
+ << " took " << ((double)t.millis()) / 1000.0 << " secs" << endl;
+
+ // no longer in a failed state. allow new writers.
+ fa->_failed = false;
+ } catch (const std::exception& e) {
+ log() << "error: failed to allocate new file: " << name << " size: " << size << ' '
+ << e.what() << ". will try again in 10 seconds" << endl;
+ if (fd > 0)
+ close(fd);
+ try {
+ if (!tmp.empty())
+ boost::filesystem::remove(tmp);
+ boost::filesystem::remove(name);
+ } catch (const std::exception& e) {
+ log() << "error removing files: " << e.what() << endl;
}
{
- stdx::lock_guard<stdx::mutex> lk( fa->_pendingMutex );
- fa->_pendingSize.erase( name );
- fa->_pending.pop_front();
+ stdx::lock_guard<stdx::mutex> lk(fa->_pendingMutex);
+ fa->_failed = true;
+
+ // TODO: Should we remove the file from pending?
fa->_pendingUpdated.notify_all();
}
+
+
+ sleepsecs(10);
+ continue;
+ }
+
+ {
+ stdx::lock_guard<stdx::mutex> lk(fa->_pendingMutex);
+ fa->_pendingSize.erase(name);
+ fa->_pending.pop_front();
+ fa->_pendingUpdated.notify_all();
}
}
}
+}
- FileAllocator* FileAllocator::_instance = 0;
+FileAllocator* FileAllocator::_instance = 0;
- FileAllocator* FileAllocator::get(){
- if ( ! _instance )
- _instance = new FileAllocator();
- return _instance;
- }
+FileAllocator* FileAllocator::get() {
+ if (!_instance)
+ _instance = new FileAllocator();
+ return _instance;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/file_allocator.h b/src/mongo/db/storage/mmap_v1/file_allocator.h
index d3f9b6cceda..e3e4ad55881 100644
--- a/src/mongo/db/storage/mmap_v1/file_allocator.h
+++ b/src/mongo/db/storage/mmap_v1/file_allocator.h
@@ -37,73 +37,71 @@
namespace mongo {
+/*
+ * Handles allocation of contiguous files on disk. Allocation may be
+ * requested asynchronously or synchronously.
+ * singleton
+ */
+class FileAllocator {
+ MONGO_DISALLOW_COPYING(FileAllocator);
/*
- * Handles allocation of contiguous files on disk. Allocation may be
- * requested asynchronously or synchronously.
- * singleton
+ * The public functions may not be called concurrently. The allocation
+ * functions may be called multiple times per file, but only the first
+ * size specified per file will be used.
+ */
+public:
+ void start();
+
+ /**
+ * May be called if file exists. If file exists, or its allocation has
+ * been requested, size is updated to match existing file size.
*/
- class FileAllocator {
- MONGO_DISALLOW_COPYING(FileAllocator);
- /*
- * The public functions may not be called concurrently. The allocation
- * functions may be called multiple times per file, but only the first
- * size specified per file will be used.
- */
- public:
- void start();
-
- /**
- * May be called if file exists. If file exists, or its allocation has
- * been requested, size is updated to match existing file size.
- */
- void requestAllocation( const std::string &name, long &size );
+ void requestAllocation(const std::string& name, long& size);
- /**
- * Returns when file has been allocated. If file exists, size is
- * updated to match existing file size.
- */
- void allocateAsap( const std::string &name, unsigned long long &size );
-
- void waitUntilFinished() const;
+ /**
+ * Returns when file has been allocated. If file exists, size is
+ * updated to match existing file size.
+ */
+ void allocateAsap(const std::string& name, unsigned long long& size);
- static void ensureLength(int fd, long size);
+ void waitUntilFinished() const;
- /** @return the singleton */
- static FileAllocator * get();
-
- private:
+ static void ensureLength(int fd, long size);
- FileAllocator();
+ /** @return the singleton */
+ static FileAllocator* get();
- void checkFailure();
+private:
+ FileAllocator();
- // caller must hold pendingMutex_ lock. Returns size if allocated or
- // allocation requested, -1 otherwise.
- long prevSize( const std::string &name ) const;
+ void checkFailure();
- // caller must hold pendingMutex_ lock.
- bool inProgress( const std::string &name ) const;
+ // caller must hold pendingMutex_ lock. Returns size if allocated or
+ // allocation requested, -1 otherwise.
+ long prevSize(const std::string& name) const;
- /** called from the worked thread */
- static void run( FileAllocator * fa );
+ // caller must hold pendingMutex_ lock.
+ bool inProgress(const std::string& name) const;
- // generate a unique name for temporary files
- std::string makeTempFileName( boost::filesystem::path root );
+ /** called from the worked thread */
+ static void run(FileAllocator* fa);
- mutable stdx::mutex _pendingMutex;
- mutable stdx::condition_variable _pendingUpdated;
+ // generate a unique name for temporary files
+ std::string makeTempFileName(boost::filesystem::path root);
- std::list< std::string > _pending;
- mutable std::map< std::string, long > _pendingSize;
+ mutable stdx::mutex _pendingMutex;
+ mutable stdx::condition_variable _pendingUpdated;
- // unique number for temporary files
- static unsigned long long _uniqueNumber;
+ std::list<std::string> _pending;
+ mutable std::map<std::string, long> _pendingSize;
- bool _failed;
+ // unique number for temporary files
+ static unsigned long long _uniqueNumber;
- static FileAllocator* _instance;
+ bool _failed;
- };
+ static FileAllocator* _instance;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
index dfe51554836..934f9807628 100644
--- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
+++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.cpp
@@ -40,117 +40,117 @@
namespace mongo {
- RecordData HeapRecordStoreBtree::dataFor(OperationContext* txn, const RecordId& loc) const {
- Records::const_iterator it = _records.find(loc);
- invariant(it != _records.end());
- const MmapV1RecordHeader& rec = it->second;
-
- return RecordData(rec.data.get(), rec.dataSize);
- }
-
- bool HeapRecordStoreBtree::findRecord(OperationContext* txn,
- const RecordId& loc, RecordData* out) const {
- Records::const_iterator it = _records.find(loc);
- if ( it == _records.end() )
- return false;
- const MmapV1RecordHeader& rec = it->second;
- *out = RecordData(rec.data.get(), rec.dataSize);
- return true;
- }
-
- void HeapRecordStoreBtree::deleteRecord(OperationContext* txn, const RecordId& loc) {
- invariant(_records.erase(loc) == 1);
- }
-
- StatusWith<RecordId> HeapRecordStoreBtree::insertRecord(OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota) {
- MmapV1RecordHeader rec(len);
- memcpy(rec.data.get(), data, len);
-
- const RecordId loc = allocateLoc();
- _records[loc] = rec;
-
- HeapRecordStoreBtreeRecoveryUnit::notifyInsert( txn, this, loc );
-
- return StatusWith<RecordId>(loc);
+RecordData HeapRecordStoreBtree::dataFor(OperationContext* txn, const RecordId& loc) const {
+ Records::const_iterator it = _records.find(loc);
+ invariant(it != _records.end());
+ const MmapV1RecordHeader& rec = it->second;
+
+ return RecordData(rec.data.get(), rec.dataSize);
+}
+
+bool HeapRecordStoreBtree::findRecord(OperationContext* txn,
+ const RecordId& loc,
+ RecordData* out) const {
+ Records::const_iterator it = _records.find(loc);
+ if (it == _records.end())
+ return false;
+ const MmapV1RecordHeader& rec = it->second;
+ *out = RecordData(rec.data.get(), rec.dataSize);
+ return true;
+}
+
+void HeapRecordStoreBtree::deleteRecord(OperationContext* txn, const RecordId& loc) {
+ invariant(_records.erase(loc) == 1);
+}
+
+StatusWith<RecordId> HeapRecordStoreBtree::insertRecord(OperationContext* txn,
+ const char* data,
+ int len,
+ bool enforceQuota) {
+ MmapV1RecordHeader rec(len);
+ memcpy(rec.data.get(), data, len);
+
+ const RecordId loc = allocateLoc();
+ _records[loc] = rec;
+
+ HeapRecordStoreBtreeRecoveryUnit::notifyInsert(txn, this, loc);
+
+ return StatusWith<RecordId>(loc);
+}
+
+StatusWith<RecordId> HeapRecordStoreBtree::insertRecord(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota) {
+ MmapV1RecordHeader rec(doc->documentSize());
+ doc->writeDocument(rec.data.get());
+
+ const RecordId loc = allocateLoc();
+ _records[loc] = rec;
+
+ HeapRecordStoreBtreeRecoveryUnit::notifyInsert(txn, this, loc);
+
+ return StatusWith<RecordId>(loc);
+}
+
+RecordId HeapRecordStoreBtree::allocateLoc() {
+ const int64_t id = _nextId++;
+ // This is a hack, but both the high and low order bits of RecordId offset must be 0, and the
+ // file must fit in 23 bits. This gives us a total of 30 + 23 == 53 bits.
+ invariant(id < (1LL << 53));
+ RecordId dl(int(id >> 30), int((id << 1) & ~(1 << 31)));
+ invariant((dl.repr() & 0x1) == 0);
+ return dl;
+}
+
+Status HeapRecordStoreBtree::touch(OperationContext* txn, BSONObjBuilder* output) const {
+ // not currently called from the tests, but called from btree_logic.h
+ return Status::OK();
+}
+
+// ---------------------------
+
+void HeapRecordStoreBtreeRecoveryUnit::commitUnitOfWork() {
+ _insertions.clear();
+ _mods.clear();
+}
+
+void HeapRecordStoreBtreeRecoveryUnit::abortUnitOfWork() {
+ // reverse in case we write same area twice
+ for (size_t i = _mods.size(); i > 0; i--) {
+ ModEntry& e = _mods[i - 1];
+ memcpy(e.data, e.old.get(), e.len);
}
- StatusWith<RecordId> HeapRecordStoreBtree::insertRecord(OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota) {
- MmapV1RecordHeader rec(doc->documentSize());
- doc->writeDocument(rec.data.get());
+ invariant(_insertions.size() == 0); // todo
+}
- const RecordId loc = allocateLoc();
- _records[loc] = rec;
+void* HeapRecordStoreBtreeRecoveryUnit::writingPtr(void* data, size_t len) {
+ ModEntry e = {data, len, boost::shared_array<char>(new char[len])};
+ memcpy(e.old.get(), data, len);
+ _mods.push_back(e);
+ return data;
+}
- HeapRecordStoreBtreeRecoveryUnit::notifyInsert( txn, this, loc );
+void HeapRecordStoreBtreeRecoveryUnit::notifyInsert(HeapRecordStoreBtree* rs, const RecordId& loc) {
+ InsertEntry e = {rs, loc};
+ _insertions.push_back(e);
+}
- return StatusWith<RecordId>(loc);
- }
+void HeapRecordStoreBtreeRecoveryUnit::notifyInsert(OperationContext* ctx,
+ HeapRecordStoreBtree* rs,
+ const RecordId& loc) {
+ if (!ctx)
+ return;
- RecordId HeapRecordStoreBtree::allocateLoc() {
- const int64_t id = _nextId++;
- // This is a hack, but both the high and low order bits of RecordId offset must be 0, and the
- // file must fit in 23 bits. This gives us a total of 30 + 23 == 53 bits.
- invariant(id < (1LL << 53));
- RecordId dl(int(id >> 30), int((id << 1) & ~(1<<31)));
- invariant( (dl.repr() & 0x1) == 0 );
- return dl;
- }
-
- Status HeapRecordStoreBtree::touch(OperationContext* txn, BSONObjBuilder* output) const {
- // not currently called from the tests, but called from btree_logic.h
- return Status::OK();
- }
+ // This dynamic_cast has semantics, should change ideally.
+ HeapRecordStoreBtreeRecoveryUnit* ru =
+ dynamic_cast<HeapRecordStoreBtreeRecoveryUnit*>(ctx->recoveryUnit());
- // ---------------------------
+ if (!ru)
+ return;
- void HeapRecordStoreBtreeRecoveryUnit::commitUnitOfWork() {
- _insertions.clear();
- _mods.clear();
- }
-
- void HeapRecordStoreBtreeRecoveryUnit::abortUnitOfWork() {
- // reverse in case we write same area twice
- for ( size_t i = _mods.size(); i > 0; i-- ) {
- ModEntry& e = _mods[i-1];
- memcpy( e.data, e.old.get(), e.len );
- }
-
- invariant( _insertions.size() == 0 ); // todo
- }
-
- void* HeapRecordStoreBtreeRecoveryUnit::writingPtr(void* data, size_t len) {
- ModEntry e = { data, len, boost::shared_array<char>( new char[len] ) };
- memcpy( e.old.get(), data, len );
- _mods.push_back( e );
- return data;
- }
-
- void HeapRecordStoreBtreeRecoveryUnit::notifyInsert( HeapRecordStoreBtree* rs,
- const RecordId& loc ) {
- InsertEntry e = { rs, loc };
- _insertions.push_back( e );
- }
-
- void HeapRecordStoreBtreeRecoveryUnit::notifyInsert( OperationContext* ctx,
- HeapRecordStoreBtree* rs,
- const RecordId& loc ) {
- if ( !ctx )
- return;
-
- // This dynamic_cast has semantics, should change ideally.
- HeapRecordStoreBtreeRecoveryUnit* ru =
- dynamic_cast<HeapRecordStoreBtreeRecoveryUnit*>( ctx->recoveryUnit() );
-
- if ( !ru )
- return;
-
- ru->notifyInsert( rs, loc );
- }
+ ru->notifyInsert(rs, loc);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
index c44dcf3f473..aa193549440 100644
--- a/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
+++ b/src/mongo/db/storage/mmap_v1/heap_record_store_btree.h
@@ -38,174 +38,190 @@
namespace mongo {
- /**
- * A RecordStore that stores all data on the heap. This implementation contains only the
- * functionality necessary to test btree.
- */
- class HeapRecordStoreBtree : public RecordStore {
- struct MmapV1RecordHeader;
-
- public:
- // RecordId(0,0) isn't valid for records.
- explicit HeapRecordStoreBtree(StringData ns): RecordStore(ns), _nextId(1) { }
-
- virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const;
-
- virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* out) const;
-
- virtual void deleteRecord(OperationContext* txn, const RecordId& dl);
-
- virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota);
-
- virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota);
-
- virtual long long numRecords( OperationContext* txn ) const { return _records.size(); }
-
- virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
-
- // public methods below here are not necessary to test btree, and will crash when called.
-
- // ------------------------------
-
- virtual StatusWith<RecordId> updateRecord(OperationContext* txn,
- const RecordId& oldLocation,
- const char* data,
- int len,
- bool enforceQuota,
- UpdateNotifier* notifier) {
- invariant(false);
- }
-
- virtual bool updateWithDamagesSupported() const {
- return true;
- }
-
- virtual Status updateWithDamages(OperationContext* txn,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages) {
- invariant(false);
- }
-
- std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward) const final {
- invariant(false);
- }
-
-
- virtual Status truncate(OperationContext* txn) { invariant(false); }
-
- virtual void temp_cappedTruncateAfter(OperationContext* txn,
- RecordId end,
- bool inclusive) {
- invariant(false);
- }
-
- virtual bool compactSupported() const { invariant(false); }
-
- virtual Status validate(OperationContext* txn,
- bool full,
- bool scanData,
- ValidateAdaptor* adaptor,
- ValidateResults* results, BSONObjBuilder* output) {
- invariant(false);
- }
+/**
+ * A RecordStore that stores all data on the heap. This implementation contains only the
+ * functionality necessary to test btree.
+ */
+class HeapRecordStoreBtree : public RecordStore {
+ struct MmapV1RecordHeader;
+
+public:
+ // RecordId(0,0) isn't valid for records.
+ explicit HeapRecordStoreBtree(StringData ns) : RecordStore(ns), _nextId(1) {}
+
+ virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const;
+
+ virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* out) const;
+
+ virtual void deleteRecord(OperationContext* txn, const RecordId& dl);
+
+ virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ const char* data,
+ int len,
+ bool enforceQuota);
+
+ virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota);
+
+ virtual long long numRecords(OperationContext* txn) const {
+ return _records.size();
+ }
+
+ virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
+
+ // public methods below here are not necessary to test btree, and will crash when called.
+
+ // ------------------------------
+
+ virtual StatusWith<RecordId> updateRecord(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* data,
+ int len,
+ bool enforceQuota,
+ UpdateNotifier* notifier) {
+ invariant(false);
+ }
+
+ virtual bool updateWithDamagesSupported() const {
+ return true;
+ }
+
+ virtual Status updateWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const RecordData& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages) {
+ invariant(false);
+ }
+
+ std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward) const final {
+ invariant(false);
+ }
+
+
+ virtual Status truncate(OperationContext* txn) {
+ invariant(false);
+ }
+
+ virtual void temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
+ invariant(false);
+ }
+
+ virtual bool compactSupported() const {
+ invariant(false);
+ }
+
+ virtual Status validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateAdaptor* adaptor,
+ ValidateResults* results,
+ BSONObjBuilder* output) {
+ invariant(false);
+ }
+
+ virtual void appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale) const {
+ invariant(false);
+ }
+
+ virtual void increaseStorageSize(OperationContext* txn, int size, bool enforceQuota) {
+ invariant(false);
+ }
+
+ virtual int64_t storageSize(OperationContext* txn,
+ BSONObjBuilder* extraInfo = NULL,
+ int infoLevel = 0) const {
+ invariant(false);
+ }
+
+ virtual long long dataSize(OperationContext* txn) const {
+ invariant(false);
+ }
+
+ virtual MmapV1RecordHeader* recordFor(const RecordId& loc) const {
+ invariant(false);
+ }
+
+ virtual bool isCapped() const {
+ invariant(false);
+ }
+
+ virtual const char* name() const {
+ invariant(false);
+ }
+
+ virtual void updateStatsAfterRepair(OperationContext* txn,
+ long long numRecords,
+ long long dataSize) {
+ invariant(false);
+ }
+ // more things that we actually care about below
+
+private:
+ struct MmapV1RecordHeader {
+ MmapV1RecordHeader() : dataSize(-1), data() {}
+ explicit MmapV1RecordHeader(int size) : dataSize(size), data(new char[size]) {}
+
+ int dataSize;
+ boost::shared_array<char> data;
+ };
- virtual void appendCustomStats(OperationContext* txn,
- BSONObjBuilder* result,
- double scale) const {
- invariant(false);
- }
+ RecordId allocateLoc();
- virtual void increaseStorageSize(OperationContext* txn, int size, bool enforceQuota) {
- invariant(false);
- }
+ typedef std::map<RecordId, HeapRecordStoreBtree::MmapV1RecordHeader> Records;
+ Records _records;
+ int64_t _nextId;
+};
- virtual int64_t storageSize(OperationContext* txn,
- BSONObjBuilder* extraInfo = NULL,
- int infoLevel = 0) const {
- invariant(false);
- }
+/**
+ * A RecoveryUnit for HeapRecordStoreBtree, this is for testing btree only.
+ */
+class HeapRecordStoreBtreeRecoveryUnit : public RecoveryUnit {
+public:
+ void beginUnitOfWork(OperationContext* opCtx) final{};
+ void commitUnitOfWork() final;
+ void abortUnitOfWork() final;
- virtual long long dataSize(OperationContext* txn) const { invariant(false); }
+ virtual bool waitUntilDurable() {
+ return true;
+ }
- virtual MmapV1RecordHeader* recordFor(const RecordId& loc) const { invariant(false); }
+ virtual void abandonSnapshot() {}
- virtual bool isCapped() const { invariant(false); }
+ virtual void registerChange(Change* change) {
+ change->commit();
+ delete change;
+ }
- virtual const char* name() const { invariant(false); }
+ virtual void* writingPtr(void* data, size_t len);
- virtual void updateStatsAfterRepair(OperationContext* txn,
- long long numRecords,
- long long dataSize) {
- invariant(false);
- }
- // more things that we actually care about below
+ virtual void setRollbackWritesDisabled() {}
- private:
- struct MmapV1RecordHeader {
- MmapV1RecordHeader(): dataSize(-1), data() { }
- explicit MmapV1RecordHeader(int size): dataSize(size), data(new char[size]) { }
+ virtual SnapshotId getSnapshotId() const {
+ return SnapshotId();
+ }
- int dataSize;
- boost::shared_array<char> data;
- };
+ // -----------------------
- RecordId allocateLoc();
+ void notifyInsert(HeapRecordStoreBtree* rs, const RecordId& loc);
+ static void notifyInsert(OperationContext* ctx, HeapRecordStoreBtree* rs, const RecordId& loc);
- typedef std::map<RecordId, HeapRecordStoreBtree::MmapV1RecordHeader> Records;
- Records _records;
- int64_t _nextId;
+private:
+ struct InsertEntry {
+ HeapRecordStoreBtree* rs;
+ RecordId loc;
};
+ std::vector<InsertEntry> _insertions;
- /**
- * A RecoveryUnit for HeapRecordStoreBtree, this is for testing btree only.
- */
- class HeapRecordStoreBtreeRecoveryUnit : public RecoveryUnit {
- public:
- void beginUnitOfWork(OperationContext* opCtx) final { };
- void commitUnitOfWork() final;
- void abortUnitOfWork() final;
-
- virtual bool waitUntilDurable() { return true; }
-
- virtual void abandonSnapshot() {}
-
- virtual void registerChange(Change* change) {
- change->commit();
- delete change;
- }
-
- virtual void* writingPtr(void* data, size_t len);
-
- virtual void setRollbackWritesDisabled() {}
-
- virtual SnapshotId getSnapshotId() const { return SnapshotId(); }
-
- // -----------------------
-
- void notifyInsert( HeapRecordStoreBtree* rs, const RecordId& loc );
- static void notifyInsert( OperationContext* ctx,
- HeapRecordStoreBtree* rs, const RecordId& loc );
-
- private:
- struct InsertEntry {
- HeapRecordStoreBtree* rs;
- RecordId loc;
- };
- std::vector<InsertEntry> _insertions;
-
- struct ModEntry {
- void* data;
- size_t len;
- boost::shared_array<char> old;
- };
- std::vector<ModEntry> _mods;
+ struct ModEntry {
+ void* data;
+ size_t len;
+ boost::shared_array<char> old;
};
+ std::vector<ModEntry> _mods;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
index 8c29741ed7e..0f21961d459 100644
--- a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
+++ b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
@@ -54,102 +54,108 @@
namespace mongo {
- using std::max;
- using std::min;
- using std::string;
- using std::stringstream;
+using std::max;
+using std::min;
+using std::string;
+using std::stringstream;
- namespace dur {
- boost::filesystem::path getJournalDir();
- }
+namespace dur {
+boost::filesystem::path getJournalDir();
+}
- // Testing-only, enabled via command line
- class JournalLatencyTestCmd : public Command {
- public:
- JournalLatencyTestCmd() : Command( "journalLatencyTest" ) {}
+// Testing-only, enabled via command line
+class JournalLatencyTestCmd : public Command {
+public:
+ JournalLatencyTestCmd() : Command("journalLatencyTest") {}
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool adminOnly() const { return true; }
- virtual void help(stringstream& h) const { h << "test how long to write and fsync to a test file in the journal/ directory"; }
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- boost::filesystem::path p = dur::getJournalDir();
- p /= "journalLatencyTest";
-
- // remove file if already present
- try {
- boost::filesystem::remove(p);
- }
- catch(...) { }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual void help(stringstream& h) const {
+ h << "test how long to write and fsync to a test file in the journal/ directory";
+ }
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ boost::filesystem::path p = dur::getJournalDir();
+ p /= "journalLatencyTest";
- BSONObjBuilder bb[2];
- for( int pass = 0; pass < 2; pass++ ) {
- LogFile f(p.string());
- AlignedBuilder b(1024 * 1024);
- {
- Timer t;
- for( int i = 0 ; i < 100; i++ ) {
- f.synchronousAppend(b.buf(), 8192);
- }
- bb[pass].append("8KB", t.millis() / 100.0);
- }
- {
- const int N = 50;
- Timer t2;
- long long x = 0;
- for( int i = 0 ; i < N; i++ ) {
- Timer t;
- f.synchronousAppend(b.buf(), 8192);
- x += t.micros();
- sleepmillis(4);
- }
- long long y = t2.micros() - 4*N*1000;
- // not really trusting the timer granularity on all platforms so whichever is higher of x and y
- bb[pass].append("8KBWithPauses", max(x,y) / (N*1000.0));
+ // remove file if already present
+ try {
+ boost::filesystem::remove(p);
+ } catch (...) {
+ }
+
+ BSONObjBuilder bb[2];
+ for (int pass = 0; pass < 2; pass++) {
+ LogFile f(p.string());
+ AlignedBuilder b(1024 * 1024);
+ {
+ Timer t;
+ for (int i = 0; i < 100; i++) {
+ f.synchronousAppend(b.buf(), 8192);
}
- {
+ bb[pass].append("8KB", t.millis() / 100.0);
+ }
+ {
+ const int N = 50;
+ Timer t2;
+ long long x = 0;
+ for (int i = 0; i < N; i++) {
Timer t;
- for( int i = 0 ; i < 20; i++ ) {
- f.synchronousAppend(b.buf(), 1024 * 1024);
- }
- bb[pass].append("1MB", t.millis() / 20.0);
+ f.synchronousAppend(b.buf(), 8192);
+ x += t.micros();
+ sleepmillis(4);
}
- // second time around, we are prealloced.
+ long long y = t2.micros() - 4 * N * 1000;
+ // not really trusting the timer granularity on all platforms so whichever is higher of x and y
+ bb[pass].append("8KBWithPauses", max(x, y) / (N * 1000.0));
}
- result.append("timeMillis", bb[0].obj());
- result.append("timeMillisWithPrealloc", bb[1].obj());
-
- try {
- remove(p);
- }
- catch(...) { }
-
- try {
- result.append("onSamePartition", onSamePartition(dur::getJournalDir().string(),
- storageGlobalParams.dbpath));
+ {
+ Timer t;
+ for (int i = 0; i < 20; i++) {
+ f.synchronousAppend(b.buf(), 1024 * 1024);
+ }
+ bb[pass].append("1MB", t.millis() / 20.0);
}
- catch(...) { }
-
- return 1;
- }
- };
- MONGO_INITIALIZER(RegisterJournalLatencyTestCmd)(InitializerContext* context) {
- if (Command::testCommandsEnabled) {
- // Leaked intentionally: a Command registers itself when constructed.
- new JournalLatencyTestCmd();
+ // second time around, we are prealloced.
}
- return Status::OK();
- }
+ result.append("timeMillis", bb[0].obj());
+ result.append("timeMillisWithPrealloc", bb[1].obj());
+ try {
+ remove(p);
+ } catch (...) {
+ }
+ try {
+ result.append(
+ "onSamePartition",
+ onSamePartition(dur::getJournalDir().string(), storageGlobalParams.dbpath));
+ } catch (...) {
+ }
+ return 1;
+ }
+};
+MONGO_INITIALIZER(RegisterJournalLatencyTestCmd)(InitializerContext* context) {
+ if (Command::testCommandsEnabled) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new JournalLatencyTestCmd();
+ }
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/logfile.cpp b/src/mongo/db/storage/mmap_v1/logfile.cpp
index 8aa5e32626f..62c3b61bc73 100644
--- a/src/mongo/db/storage/mmap_v1/logfile.cpp
+++ b/src/mongo/db/storage/mmap_v1/logfile.cpp
@@ -53,83 +53,84 @@ using std::string;
namespace mongo {
- LogFile::LogFile(const std::string& name, bool readwrite) : _name(name) {
- _fd = CreateFile(
- toNativeString(name.c_str()).c_str(),
- (readwrite?GENERIC_READ:0)|GENERIC_WRITE,
- FILE_SHARE_READ,
- NULL,
- OPEN_ALWAYS,
- FILE_FLAG_NO_BUFFERING,
- NULL);
- if( _fd == INVALID_HANDLE_VALUE ) {
- DWORD e = GetLastError();
- uasserted(13518, str::stream() << "couldn't open file " << name << " for writing " << errnoWithDescription(e));
- }
- SetFilePointer(_fd, 0, 0, FILE_BEGIN);
+LogFile::LogFile(const std::string& name, bool readwrite) : _name(name) {
+ _fd = CreateFile(toNativeString(name.c_str()).c_str(),
+ (readwrite ? GENERIC_READ : 0) | GENERIC_WRITE,
+ FILE_SHARE_READ,
+ NULL,
+ OPEN_ALWAYS,
+ FILE_FLAG_NO_BUFFERING,
+ NULL);
+ if (_fd == INVALID_HANDLE_VALUE) {
+ DWORD e = GetLastError();
+ uasserted(13518,
+ str::stream() << "couldn't open file " << name << " for writing "
+ << errnoWithDescription(e));
}
+ SetFilePointer(_fd, 0, 0, FILE_BEGIN);
+}
- LogFile::~LogFile() {
- if( _fd != INVALID_HANDLE_VALUE )
- CloseHandle(_fd);
- }
+LogFile::~LogFile() {
+ if (_fd != INVALID_HANDLE_VALUE)
+ CloseHandle(_fd);
+}
- void LogFile::truncate() {
- verify(_fd != INVALID_HANDLE_VALUE);
+void LogFile::truncate() {
+ verify(_fd != INVALID_HANDLE_VALUE);
- if (!SetEndOfFile(_fd)){
- msgasserted(15871, "Couldn't truncate file: " + errnoWithDescription());
- }
+ if (!SetEndOfFile(_fd)) {
+ msgasserted(15871, "Couldn't truncate file: " + errnoWithDescription());
}
+}
- void LogFile::writeAt(unsigned long long offset, const void *_buf, size_t _len) {
-// TODO 64 bit offsets
- OVERLAPPED o;
- memset(&o,0,sizeof(o));
- (unsigned long long&) o.Offset = offset;
- BOOL ok= WriteFile(_fd, _buf, _len, 0, &o);
- verify(ok);
- }
+void LogFile::writeAt(unsigned long long offset, const void* _buf, size_t _len) {
+ // TODO 64 bit offsets
+ OVERLAPPED o;
+ memset(&o, 0, sizeof(o));
+ (unsigned long long&)o.Offset = offset;
+ BOOL ok = WriteFile(_fd, _buf, _len, 0, &o);
+ verify(ok);
+}
- void LogFile::readAt(unsigned long long offset, void *_buf, size_t _len) {
-// TODO 64 bit offsets
- OVERLAPPED o;
- memset(&o,0,sizeof(o));
- (unsigned long long&) o.Offset = offset;
- DWORD nr;
- BOOL ok = ReadFile(_fd, _buf, _len, &nr, &o);
- if( !ok ) {
- string e = errnoWithDescription();
- //DWORD e = GetLastError();
- log() << "LogFile readAt(" << offset << ") len:" << _len << "errno:" << e << endl;
- verify(false);
- }
+void LogFile::readAt(unsigned long long offset, void* _buf, size_t _len) {
+ // TODO 64 bit offsets
+ OVERLAPPED o;
+ memset(&o, 0, sizeof(o));
+ (unsigned long long&)o.Offset = offset;
+ DWORD nr;
+ BOOL ok = ReadFile(_fd, _buf, _len, &nr, &o);
+ if (!ok) {
+ string e = errnoWithDescription();
+ // DWORD e = GetLastError();
+ log() << "LogFile readAt(" << offset << ") len:" << _len << "errno:" << e << endl;
+ verify(false);
}
+}
- void LogFile::synchronousAppend(const void *_buf, size_t _len) {
- const size_t BlockSize = 8 * 1024 * 1024;
- verify(_fd);
- verify(_len % g_minOSPageSizeBytes == 0);
- const char *buf = (const char *) _buf;
- size_t left = _len;
- while( left ) {
- size_t toWrite = std::min(left, BlockSize);
- DWORD written;
- if( !WriteFile(_fd, buf, toWrite, &written, NULL) ) {
- DWORD e = GetLastError();
- if( e == 87 )
- msgasserted(13519, "error 87 appending to file - invalid parameter");
- else
- uasserted(13517, str::stream() << "error appending to file " << _name << ' ' << _len << ' ' << toWrite << ' ' << errnoWithDescription(e));
- }
- else {
- dassert( written == toWrite );
- }
- left -= written;
- buf += written;
+void LogFile::synchronousAppend(const void* _buf, size_t _len) {
+ const size_t BlockSize = 8 * 1024 * 1024;
+ verify(_fd);
+ verify(_len % g_minOSPageSizeBytes == 0);
+ const char* buf = (const char*)_buf;
+ size_t left = _len;
+ while (left) {
+ size_t toWrite = std::min(left, BlockSize);
+ DWORD written;
+ if (!WriteFile(_fd, buf, toWrite, &written, NULL)) {
+ DWORD e = GetLastError();
+ if (e == 87)
+ msgasserted(13519, "error 87 appending to file - invalid parameter");
+ else
+ uasserted(13517,
+ str::stream() << "error appending to file " << _name << ' ' << _len << ' '
+ << toWrite << ' ' << errnoWithDescription(e));
+ } else {
+ dassert(written == toWrite);
}
+ left -= written;
+ buf += written;
}
-
+}
}
#else
@@ -147,124 +148,123 @@ namespace mongo {
namespace mongo {
- LogFile::LogFile(const std::string& name, bool readwrite) : _name(name) {
- int options = O_CREAT
- | (readwrite?O_RDWR:O_WRONLY)
+LogFile::LogFile(const std::string& name, bool readwrite) : _name(name) {
+ int options = O_CREAT | (readwrite ? O_RDWR : O_WRONLY)
#if defined(O_DIRECT)
- | O_DIRECT
+ | O_DIRECT
#endif
#if defined(O_NOATIME)
- | O_NOATIME
+ | O_NOATIME
#endif
- ;
+ ;
- _fd = open(name.c_str(), options, S_IRUSR | S_IWUSR);
- _blkSize = g_minOSPageSizeBytes;
+ _fd = open(name.c_str(), options, S_IRUSR | S_IWUSR);
+ _blkSize = g_minOSPageSizeBytes;
#if defined(O_DIRECT)
- _direct = true;
- if( _fd < 0 ) {
- _direct = false;
- options &= ~O_DIRECT;
- _fd = open(name.c_str(), options, S_IRUSR | S_IWUSR);
- }
+ _direct = true;
+ if (_fd < 0) {
+ _direct = false;
+ options &= ~O_DIRECT;
+ _fd = open(name.c_str(), options, S_IRUSR | S_IWUSR);
+ }
#ifdef __linux__
- ssize_t tmpBlkSize = ioctl(_fd, BLKBSZGET);
- // TODO: We need some sanity checking on tmpBlkSize even if ioctl() did not fail.
- if (tmpBlkSize > 0) {
- _blkSize = (size_t)tmpBlkSize;
- }
+ ssize_t tmpBlkSize = ioctl(_fd, BLKBSZGET);
+ // TODO: We need some sanity checking on tmpBlkSize even if ioctl() did not fail.
+ if (tmpBlkSize > 0) {
+ _blkSize = (size_t)tmpBlkSize;
+ }
#endif
#else
- _direct = false;
+ _direct = false;
#endif
- if( _fd < 0 ) {
- uasserted(13516, str::stream() << "couldn't open file " << name << " for writing " << errnoWithDescription());
- }
-
- flushMyDirectory(name);
+ if (_fd < 0) {
+ uasserted(13516,
+ str::stream() << "couldn't open file " << name << " for writing "
+ << errnoWithDescription());
}
- LogFile::~LogFile() {
- if( _fd >= 0 )
- close(_fd);
- _fd = -1;
- }
+ flushMyDirectory(name);
+}
- void LogFile::truncate() {
- verify(_fd >= 0);
+LogFile::~LogFile() {
+ if (_fd >= 0)
+ close(_fd);
+ _fd = -1;
+}
- BOOST_STATIC_ASSERT(sizeof(off_t) == 8); // we don't want overflow here
- const off_t pos = lseek(_fd, 0, SEEK_CUR); // doesn't actually seek
- if (ftruncate(_fd, pos) != 0){
- msgasserted(15873, "Couldn't truncate file: " + errnoWithDescription());
- }
+void LogFile::truncate() {
+ verify(_fd >= 0);
- fsync(_fd);
+ BOOST_STATIC_ASSERT(sizeof(off_t) == 8); // we don't want overflow here
+ const off_t pos = lseek(_fd, 0, SEEK_CUR); // doesn't actually seek
+ if (ftruncate(_fd, pos) != 0) {
+ msgasserted(15873, "Couldn't truncate file: " + errnoWithDescription());
}
- void LogFile::writeAt(unsigned long long offset, const void *buf, size_t len) {
- verify(((size_t)buf) % g_minOSPageSizeBytes == 0); // aligned
- ssize_t written = pwrite(_fd, buf, len, offset);
- if( written != (ssize_t) len ) {
- log() << "writeAt fails " << errnoWithDescription() << endl;
- }
+ fsync(_fd);
+}
+
+void LogFile::writeAt(unsigned long long offset, const void* buf, size_t len) {
+ verify(((size_t)buf) % g_minOSPageSizeBytes == 0); // aligned
+ ssize_t written = pwrite(_fd, buf, len, offset);
+ if (written != (ssize_t)len) {
+ log() << "writeAt fails " << errnoWithDescription() << endl;
+ }
#if defined(__linux__)
- fdatasync(_fd);
+ fdatasync(_fd);
#else
- fsync(_fd);
+ fsync(_fd);
#endif
- }
-
- void LogFile::readAt(unsigned long long offset, void *_buf, size_t _len) {
- verify(((size_t)_buf) % g_minOSPageSizeBytes == 0); // aligned
- ssize_t rd = pread(_fd, _buf, _len, offset);
- verify( rd != -1 );
- }
+}
- void LogFile::synchronousAppend(const void *b, size_t len) {
+void LogFile::readAt(unsigned long long offset, void* _buf, size_t _len) {
+ verify(((size_t)_buf) % g_minOSPageSizeBytes == 0); // aligned
+ ssize_t rd = pread(_fd, _buf, _len, offset);
+ verify(rd != -1);
+}
- const char *buf = static_cast<const char *>( b );
- ssize_t charsToWrite = static_cast<ssize_t>( len );
+void LogFile::synchronousAppend(const void* b, size_t len) {
+ const char* buf = static_cast<const char*>(b);
+ ssize_t charsToWrite = static_cast<ssize_t>(len);
- fassert( 16144, charsToWrite >= 0 );
- fassert( 16142, _fd >= 0 );
- fassert( 16143, reinterpret_cast<size_t>( buf ) % _blkSize == 0 ); // aligned
+ fassert(16144, charsToWrite >= 0);
+ fassert(16142, _fd >= 0);
+ fassert(16143, reinterpret_cast<size_t>(buf) % _blkSize == 0); // aligned
#ifdef POSIX_FADV_DONTNEED
- const off_t pos = lseek(_fd, 0, SEEK_CUR); // doesn't actually seek, just get current position
+ const off_t pos = lseek(_fd, 0, SEEK_CUR); // doesn't actually seek, just get current position
#endif
- while ( charsToWrite > 0 ) {
- const ssize_t written = write( _fd, buf, static_cast<size_t>( charsToWrite ) );
- if ( -1 == written ) {
- log() << "LogFile::synchronousAppend failed with " << charsToWrite
- << " bytes unwritten out of " << len << " bytes; b=" << b << ' '
- << errnoWithDescription() << std::endl;
- fassertFailed( 13515 );
- }
- buf += written;
- charsToWrite -= written;
+ while (charsToWrite > 0) {
+ const ssize_t written = write(_fd, buf, static_cast<size_t>(charsToWrite));
+ if (-1 == written) {
+ log() << "LogFile::synchronousAppend failed with " << charsToWrite
+ << " bytes unwritten out of " << len << " bytes; b=" << b << ' '
+ << errnoWithDescription() << std::endl;
+ fassertFailed(13515);
}
+ buf += written;
+ charsToWrite -= written;
+ }
- if(
+ if (
#if defined(__linux__)
- fdatasync(_fd) < 0
+ fdatasync(_fd) < 0
#else
- fsync(_fd)
+ fsync(_fd)
#endif
- ) {
- log() << "error appending to file on fsync " << ' ' << errnoWithDescription();
- fassertFailed( 13514 );
- }
+ ) {
+ log() << "error appending to file on fsync " << ' ' << errnoWithDescription();
+ fassertFailed(13514);
+ }
#ifdef POSIX_FADV_DONTNEED
- if (!_direct)
- posix_fadvise(_fd, pos, len, POSIX_FADV_DONTNEED);
+ if (!_direct)
+ posix_fadvise(_fd, pos, len, POSIX_FADV_DONTNEED);
#endif
- }
-
+}
}
#endif
diff --git a/src/mongo/db/storage/mmap_v1/logfile.h b/src/mongo/db/storage/mmap_v1/logfile.h
index 278b9c162aa..4a3bb5535e2 100644
--- a/src/mongo/db/storage/mmap_v1/logfile.h
+++ b/src/mongo/db/storage/mmap_v1/logfile.h
@@ -35,43 +35,42 @@
namespace mongo {
- class LogFile {
- public:
- /** create the file and open. must not already exist.
- throws UserAssertion on i/o error
- */
- LogFile(const std::string& name, bool readwrite = false);
+class LogFile {
+public:
+ /** create the file and open. must not already exist.
+ throws UserAssertion on i/o error
+ */
+ LogFile(const std::string& name, bool readwrite = false);
- /** closes */
- ~LogFile();
+ /** closes */
+ ~LogFile();
- /** append to file. does not return until sync'd. uses direct i/o when possible.
- throws UserAssertion on an i/o error
- note direct i/o may have alignment requirements
- */
- void synchronousAppend(const void *buf, size_t len);
+ /** append to file. does not return until sync'd. uses direct i/o when possible.
+ throws UserAssertion on an i/o error
+ note direct i/o may have alignment requirements
+ */
+ void synchronousAppend(const void* buf, size_t len);
- /** write at specified offset. must be aligned. noreturn until physically written. thread safe */
- void writeAt(unsigned long long offset, const void *_bug, size_t _len);
+ /** write at specified offset. must be aligned. noreturn until physically written. thread safe */
+ void writeAt(unsigned long long offset, const void* _bug, size_t _len);
- void readAt(unsigned long long offset, void *_buf, size_t _len);
+ void readAt(unsigned long long offset, void* _buf, size_t _len);
- const std::string _name;
+ const std::string _name;
- void truncate(); // Removes extra data after current position
+ void truncate(); // Removes extra data after current position
- private:
+private:
#if defined(_WIN32)
- typedef HANDLE fd_type;
+ typedef HANDLE fd_type;
#else
- typedef int fd_type;
+ typedef int fd_type;
#endif
- fd_type _fd;
- bool _direct; // are we using direct I/O
-
- // Block size, in case of direct I/O we need to test alignment against the page size,
- // which can be different than 4kB.
- size_t _blkSize;
- };
+ fd_type _fd;
+ bool _direct; // are we using direct I/O
+ // Block size, in case of direct I/O we need to test alignment against the page size,
+ // which can be different than 4kB.
+ size_t _blkSize;
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap.cpp b/src/mongo/db/storage/mmap_v1/mmap.cpp
index e9519fc7d94..57559d3038e 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap.cpp
@@ -46,213 +46,220 @@
namespace mongo {
- using std::endl;
- using std::map;
- using std::set;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- void minOSPageSizeBytesTest(size_t minOSPageSizeBytes) {
- fassert( 16325, minOSPageSizeBytes > 0 );
- fassert( 16326, minOSPageSizeBytes < 1000000 );
- // check to see if the page size is a power of 2
- fassert( 16327, (minOSPageSizeBytes & (minOSPageSizeBytes - 1)) == 0);
- }
+using std::endl;
+using std::map;
+using std::set;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+void minOSPageSizeBytesTest(size_t minOSPageSizeBytes) {
+ fassert(16325, minOSPageSizeBytes > 0);
+ fassert(16326, minOSPageSizeBytes < 1000000);
+ // check to see if the page size is a power of 2
+ fassert(16327, (minOSPageSizeBytes & (minOSPageSizeBytes - 1)) == 0);
+}
namespace {
- set<MongoFile*> mmfiles;
- map<string,MongoFile*> pathToFile;
+set<MongoFile*> mmfiles;
+map<string, MongoFile*> pathToFile;
} // namespace
- /* Create. Must not exist.
- @param zero fill file with zeros when true
- */
- void* MemoryMappedFile::create(const std::string& filename, unsigned long long len, bool zero) {
- uassert( 13468, string("can't create file already exists ") + filename, ! boost::filesystem::exists(filename) );
- void *p = map(filename.c_str(), len);
- if( p && zero ) {
- size_t sz = (size_t) len;
- verify( len == sz );
- memset(p, 0, sz);
- }
- return p;
+/* Create. Must not exist.
+@param zero fill file with zeros when true
+*/
+void* MemoryMappedFile::create(const std::string& filename, unsigned long long len, bool zero) {
+ uassert(13468,
+ string("can't create file already exists ") + filename,
+ !boost::filesystem::exists(filename));
+ void* p = map(filename.c_str(), len);
+ if (p && zero) {
+ size_t sz = (size_t)len;
+ verify(len == sz);
+ memset(p, 0, sz);
}
-
- /*static*/ void MemoryMappedFile::updateLength( const char *filename, unsigned long long &length ) {
- if ( !boost::filesystem::exists( filename ) )
- return;
- // make sure we map full length if preexisting file.
- boost::uintmax_t l = boost::filesystem::file_size( filename );
- length = l;
+ return p;
+}
+
+/*static*/ void MemoryMappedFile::updateLength(const char* filename, unsigned long long& length) {
+ if (!boost::filesystem::exists(filename))
+ return;
+ // make sure we map full length if preexisting file.
+ boost::uintmax_t l = boost::filesystem::file_size(filename);
+ length = l;
+}
+
+void* MemoryMappedFile::map(const char* filename) {
+ unsigned long long l;
+ try {
+ l = boost::filesystem::file_size(filename);
+ } catch (boost::filesystem::filesystem_error& e) {
+ uasserted(15922,
+ mongoutils::str::stream() << "couldn't get file length when opening mapping "
+ << filename << ' ' << e.what());
}
-
- void* MemoryMappedFile::map(const char *filename) {
- unsigned long long l;
- try {
- l = boost::filesystem::file_size( filename );
- }
- catch(boost::filesystem::filesystem_error& e) {
- uasserted(15922, mongoutils::str::stream() << "couldn't get file length when opening mapping " << filename << ' ' << e.what() );
- }
- return map( filename , l );
+ return map(filename, l);
+}
+void* MemoryMappedFile::mapWithOptions(const char* filename, int options) {
+ unsigned long long l;
+ try {
+ l = boost::filesystem::file_size(filename);
+ } catch (boost::filesystem::filesystem_error& e) {
+ uasserted(15923,
+ mongoutils::str::stream() << "couldn't get file length when opening mapping "
+ << filename << ' ' << e.what());
}
- void* MemoryMappedFile::mapWithOptions(const char *filename, int options) {
- unsigned long long l;
- try {
- l = boost::filesystem::file_size( filename );
- }
- catch(boost::filesystem::filesystem_error& e) {
- uasserted(15923, mongoutils::str::stream() << "couldn't get file length when opening mapping " << filename << ' ' << e.what() );
- }
- return map( filename , l, options );
+ return map(filename, l, options);
+}
+
+/* --- MongoFile -------------------------------------------------
+ this is the administrative stuff
+*/
+
+RWLockRecursiveNongreedy LockMongoFilesShared::mmmutex("mmmutex", 10 * 60 * 1000 /* 10 minutes */);
+unsigned LockMongoFilesShared::era = 99; // note this rolls over
+
+set<MongoFile*>& MongoFile::getAllFiles() {
+ return mmfiles;
+}
+
+/* subclass must call in destructor (or at close).
+ removes this from pathToFile and other maps
+ safe to call more than once, albeit might be wasted work
+ ideal to call close to the close, if the close is well before object destruction
+*/
+void MongoFile::destroyed() {
+ LockMongoFilesShared::assertExclusivelyLocked();
+ mmfiles.erase(this);
+ pathToFile.erase(filename());
+}
+
+/*static*/
+void MongoFile::closeAllFiles(stringstream& message) {
+ static int closingAllFiles = 0;
+ if (closingAllFiles) {
+ message << "warning closingAllFiles=" << closingAllFiles << endl;
+ return;
}
+ ++closingAllFiles;
- /* --- MongoFile -------------------------------------------------
- this is the administrative stuff
- */
-
- RWLockRecursiveNongreedy LockMongoFilesShared::mmmutex("mmmutex",10*60*1000 /* 10 minutes */);
- unsigned LockMongoFilesShared::era = 99; // note this rolls over
-
- set<MongoFile*>& MongoFile::getAllFiles() { return mmfiles; }
+ LockMongoFilesExclusive lk;
- /* subclass must call in destructor (or at close).
- removes this from pathToFile and other maps
- safe to call more than once, albeit might be wasted work
- ideal to call close to the close, if the close is well before object destruction
- */
- void MongoFile::destroyed() {
- LockMongoFilesShared::assertExclusivelyLocked();
- mmfiles.erase(this);
- pathToFile.erase( filename() );
+ ProgressMeter pm(mmfiles.size(), 2, 1, "files", "File Closing Progress");
+ set<MongoFile*> temp = mmfiles;
+ for (set<MongoFile*>::iterator i = temp.begin(); i != temp.end(); i++) {
+ (*i)->close(); // close() now removes from mmfiles
+ pm.hit();
}
+ message << "closeAllFiles() finished";
+ --closingAllFiles;
+}
- /*static*/
- void MongoFile::closeAllFiles( stringstream &message ) {
- static int closingAllFiles = 0;
- if ( closingAllFiles ) {
- message << "warning closingAllFiles=" << closingAllFiles << endl;
- return;
- }
- ++closingAllFiles;
+/*static*/ long long MongoFile::totalMappedLength() {
+ unsigned long long total = 0;
- LockMongoFilesExclusive lk;
+ LockMongoFilesShared lk;
- ProgressMeter pm(mmfiles.size(), 2, 1, "files", "File Closing Progress");
- set<MongoFile*> temp = mmfiles;
- for ( set<MongoFile*>::iterator i = temp.begin(); i != temp.end(); i++ ) {
- (*i)->close(); // close() now removes from mmfiles
- pm.hit();
- }
- message << "closeAllFiles() finished";
- --closingAllFiles;
- }
+ for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++)
+ total += (*i)->length();
- /*static*/ long long MongoFile::totalMappedLength() {
- unsigned long long total = 0;
+ return total;
+}
- LockMongoFilesShared lk;
-
- for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ )
- total += (*i)->length();
-
- return total;
- }
+void nullFunc() {}
- void nullFunc() { }
+// callback notifications
+void (*MongoFile::notifyPreFlush)() = nullFunc;
+void (*MongoFile::notifyPostFlush)() = nullFunc;
- // callback notifications
- void (*MongoFile::notifyPreFlush)() = nullFunc;
- void (*MongoFile::notifyPostFlush)() = nullFunc;
+/*static*/ int MongoFile::flushAll(bool sync) {
+ if (sync)
+ notifyPreFlush();
+ int x = _flushAll(sync);
+ if (sync)
+ notifyPostFlush();
+ return x;
+}
- /*static*/ int MongoFile::flushAll( bool sync ) {
- if ( sync ) notifyPreFlush();
- int x = _flushAll(sync);
- if ( sync ) notifyPostFlush();
- return x;
- }
+/*static*/ int MongoFile::_flushAll(bool sync) {
+ if (!sync) {
+ int num = 0;
+ LockMongoFilesShared lk;
+ for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++) {
+ num++;
+ MongoFile* mmf = *i;
+ if (!mmf)
+ continue;
- /*static*/ int MongoFile::_flushAll( bool sync ) {
- if ( ! sync ) {
- int num = 0;
- LockMongoFilesShared lk;
- for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
- num++;
- MongoFile * mmf = *i;
- if ( ! mmf )
- continue;
-
- mmf->flush( sync );
- }
- return num;
+ mmf->flush(sync);
}
+ return num;
+ }
- // want to do it sync
-
- // get a thread-safe Flushable object for each file first in a single lock
- // so that we can iterate and flush without doing any locking here
- OwnedPointerVector<Flushable> thingsToFlushWrapper;
- vector<Flushable*>& thingsToFlush = thingsToFlushWrapper.mutableVector();
- {
- LockMongoFilesShared lk;
- for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
- MongoFile* mmf = *i;
- if ( !mmf )
- continue;
- thingsToFlush.push_back( mmf->prepareFlush() );
- }
- }
+ // want to do it sync
- for ( size_t i = 0; i < thingsToFlush.size(); i++ ) {
- thingsToFlush[i]->flush();
+ // get a thread-safe Flushable object for each file first in a single lock
+ // so that we can iterate and flush without doing any locking here
+ OwnedPointerVector<Flushable> thingsToFlushWrapper;
+ vector<Flushable*>& thingsToFlush = thingsToFlushWrapper.mutableVector();
+ {
+ LockMongoFilesShared lk;
+ for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++) {
+ MongoFile* mmf = *i;
+ if (!mmf)
+ continue;
+ thingsToFlush.push_back(mmf->prepareFlush());
}
-
- return thingsToFlush.size();
- }
-
- void MongoFile::created() {
- LockMongoFilesExclusive lk;
- mmfiles.insert(this);
}
- void MongoFile::setFilename(const std::string& fn) {
- LockMongoFilesExclusive lk;
- verify( _filename.empty() );
- _filename = boost::filesystem::absolute(fn).generic_string();
- MongoFile *&ptf = pathToFile[_filename];
- massert(13617, "MongoFile : multiple opens of same filename", ptf == 0);
- ptf = this;
+ for (size_t i = 0; i < thingsToFlush.size(); i++) {
+ thingsToFlush[i]->flush();
}
- MongoFile* MongoFileFinder::findByPath(const std::string& path) const {
- return mapFindWithDefault(pathToFile,
- boost::filesystem::absolute(path).generic_string(),
- static_cast<MongoFile*>(NULL));
+ return thingsToFlush.size();
+}
+
+void MongoFile::created() {
+ LockMongoFilesExclusive lk;
+ mmfiles.insert(this);
+}
+
+void MongoFile::setFilename(const std::string& fn) {
+ LockMongoFilesExclusive lk;
+ verify(_filename.empty());
+ _filename = boost::filesystem::absolute(fn).generic_string();
+ MongoFile*& ptf = pathToFile[_filename];
+ massert(13617, "MongoFile : multiple opens of same filename", ptf == 0);
+ ptf = this;
+}
+
+MongoFile* MongoFileFinder::findByPath(const std::string& path) const {
+ return mapFindWithDefault(pathToFile,
+ boost::filesystem::absolute(path).generic_string(),
+ static_cast<MongoFile*>(NULL));
+}
+
+
+void printMemInfo(const char* where) {
+ LogstreamBuilder out = log();
+ out << "mem info: ";
+ if (where)
+ out << where << " ";
+
+ ProcessInfo pi;
+ if (!pi.supported()) {
+ out << " not supported";
+ return;
}
+ out << "vsize: " << pi.getVirtualMemorySize() << " resident: " << pi.getResidentSize()
+ << " mapped: " << (MemoryMappedFile::totalMappedLength() / (1024 * 1024));
+}
- void printMemInfo( const char * where ) {
- LogstreamBuilder out = log();
- out << "mem info: ";
- if ( where )
- out << where << " ";
-
- ProcessInfo pi;
- if ( ! pi.supported() ) {
- out << " not supported";
- return;
- }
-
- out << "vsize: " << pi.getVirtualMemorySize()
- << " resident: " << pi.getResidentSize()
- << " mapped: " << ( MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ) );
- }
-
- void dataSyncFailedHandler() {
- log() << "error syncing data to disk, probably a disk error";
- log() << " shutting down immediately to avoid corruption";
- fassertFailed( 17346 );
- }
+void dataSyncFailedHandler() {
+ log() << "error syncing data to disk, probably a disk error";
+ log() << " shutting down immediately to avoid corruption";
+ fassertFailed(17346);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/mmap.h b/src/mongo/db/storage/mmap_v1/mmap.h
index f70b64c96eb..ae9a0796a4b 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.h
+++ b/src/mongo/db/storage/mmap_v1/mmap.h
@@ -38,225 +38,249 @@
namespace mongo {
#if !defined(_WIN32)
- typedef int HANDLE;
+typedef int HANDLE;
#endif
- extern const size_t g_minOSPageSizeBytes;
- void minOSPageSizeBytesTest(size_t minOSPageSizeBytes); // lame-o
+extern const size_t g_minOSPageSizeBytes;
+void minOSPageSizeBytesTest(size_t minOSPageSizeBytes); // lame-o
- // call this if syncing data fails
- void dataSyncFailedHandler();
+// call this if syncing data fails
+void dataSyncFailedHandler();
- class MAdvise {
- MONGO_DISALLOW_COPYING(MAdvise);
- public:
- enum Advice { Sequential=1 , Random=2 };
- MAdvise(void *p, unsigned len, Advice a);
- ~MAdvise(); // destructor resets the range to MADV_NORMAL
- private:
- void *_p;
- unsigned _len;
- };
+class MAdvise {
+ MONGO_DISALLOW_COPYING(MAdvise);
- // lock order: lock dbMutex before this if you lock both
- class LockMongoFilesShared {
- friend class LockMongoFilesExclusive;
- static RWLockRecursiveNongreedy mmmutex;
- static unsigned era;
- RWLockRecursive::Shared lk;
- public:
- LockMongoFilesShared() : lk(mmmutex) { }
+public:
+ enum Advice { Sequential = 1, Random = 2 };
+ MAdvise(void* p, unsigned len, Advice a);
+ ~MAdvise(); // destructor resets the range to MADV_NORMAL
+private:
+ void* _p;
+ unsigned _len;
+};
- /** era changes anytime memory maps come and go. thus you can use this as a cheap way to check
- if nothing has changed since the last time you locked. Of course you must be shared locked
- at the time of this call, otherwise someone could be in progress.
+// lock order: lock dbMutex before this if you lock both
+class LockMongoFilesShared {
+ friend class LockMongoFilesExclusive;
+ static RWLockRecursiveNongreedy mmmutex;
+ static unsigned era;
+ RWLockRecursive::Shared lk;
- This is used for yielding; see PageFaultException::touch().
- */
- static unsigned getEra() { return era; }
+public:
+ LockMongoFilesShared() : lk(mmmutex) {}
- static void assertExclusivelyLocked() { mmmutex.assertExclusivelyLocked(); }
- static void assertAtLeastReadLocked() { mmmutex.assertAtLeastReadLocked(); }
- };
+ /** era changes anytime memory maps come and go. thus you can use this as a cheap way to check
+ if nothing has changed since the last time you locked. Of course you must be shared locked
+ at the time of this call, otherwise someone could be in progress.
+
+ This is used for yielding; see PageFaultException::touch().
+ */
+ static unsigned getEra() {
+ return era;
+ }
+
+ static void assertExclusivelyLocked() {
+ mmmutex.assertExclusivelyLocked();
+ }
+ static void assertAtLeastReadLocked() {
+ mmmutex.assertAtLeastReadLocked();
+ }
+};
+
+class LockMongoFilesExclusive {
+ RWLockRecursive::Exclusive lk;
- class LockMongoFilesExclusive {
- RWLockRecursive::Exclusive lk;
+public:
+ LockMongoFilesExclusive() : lk(LockMongoFilesShared::mmmutex) {
+ LockMongoFilesShared::era++;
+ }
+};
+
+/* the administrative-ish stuff here */
+class MongoFile {
+ MONGO_DISALLOW_COPYING(MongoFile);
+
+public:
+ /** Flushable has to fail nicely if the underlying object gets killed */
+ class Flushable {
public:
- LockMongoFilesExclusive() : lk(LockMongoFilesShared::mmmutex) {
- LockMongoFilesShared::era++;
- }
+ virtual ~Flushable() {}
+ virtual void flush() = 0;
};
- /* the administrative-ish stuff here */
- class MongoFile {
- MONGO_DISALLOW_COPYING(MongoFile);
- public:
- /** Flushable has to fail nicely if the underlying object gets killed */
- class Flushable {
- public:
- virtual ~Flushable() {}
- virtual void flush() = 0;
- };
-
- MongoFile() {}
- virtual ~MongoFile() {}
-
- enum Options {
- SEQUENTIAL = 1, // hint - e.g. FILE_FLAG_SEQUENTIAL_SCAN on windows
- READONLY = 2 // not contractually guaranteed, but if specified the impl has option to fault writes
- };
-
- /** @param fun is called for each MongoFile.
- called from within a mutex that MongoFile uses. so be careful not to deadlock.
- */
- template < class F >
- static void forEach( F fun );
+ MongoFile() {}
+ virtual ~MongoFile() {}
- /** note: you need to be in mmmutex when using this. forEach (above) handles that for you automatically.
-*/
- static std::set<MongoFile*>& getAllFiles();
+ enum Options {
+ SEQUENTIAL = 1, // hint - e.g. FILE_FLAG_SEQUENTIAL_SCAN on windows
+ READONLY =
+ 2 // not contractually guaranteed, but if specified the impl has option to fault writes
+ };
- // callbacks if you need them
- static void (*notifyPreFlush)();
- static void (*notifyPostFlush)();
+ /** @param fun is called for each MongoFile.
+ called from within a mutex that MongoFile uses. so be careful not to deadlock.
+ */
+ template <class F>
+ static void forEach(F fun);
- static int flushAll( bool sync ); // returns n flushed
- static long long totalMappedLength();
- static void closeAllFiles( std::stringstream &message );
+ /** note: you need to be in mmmutex when using this. forEach (above) handles that for you automatically.
+*/
+ static std::set<MongoFile*>& getAllFiles();
- virtual bool isDurableMappedFile() { return false; }
+ // callbacks if you need them
+ static void (*notifyPreFlush)();
+ static void (*notifyPostFlush)();
- std::string filename() const { return _filename; }
- void setFilename(const std::string& fn);
+ static int flushAll(bool sync); // returns n flushed
+ static long long totalMappedLength();
+ static void closeAllFiles(std::stringstream& message);
- virtual uint64_t getUniqueId() const = 0;
+ virtual bool isDurableMappedFile() {
+ return false;
+ }
- private:
- std::string _filename;
- static int _flushAll( bool sync ); // returns n flushed
- protected:
- virtual void close() = 0;
- virtual void flush(bool sync) = 0;
- /**
- * returns a thread safe object that you can call flush on
- * Flushable has to fail nicely if the underlying object gets killed
- */
- virtual Flushable * prepareFlush() = 0;
+ std::string filename() const {
+ return _filename;
+ }
+ void setFilename(const std::string& fn);
+
+ virtual uint64_t getUniqueId() const = 0;
+
+private:
+ std::string _filename;
+ static int _flushAll(bool sync); // returns n flushed
+protected:
+ virtual void close() = 0;
+ virtual void flush(bool sync) = 0;
+ /**
+ * returns a thread safe object that you can call flush on
+ * Flushable has to fail nicely if the underlying object gets killed
+ */
+ virtual Flushable* prepareFlush() = 0;
+
+ void created(); /* subclass must call after create */
+
+ /* subclass must call in destructor (or at close).
+ removes this from pathToFile and other maps
+ safe to call more than once, albeit might be wasted work
+ ideal to call close to the close, if the close is well before object destruction
+ */
+ void destroyed();
- void created(); /* subclass must call after create */
+ virtual unsigned long long length() const = 0;
+};
- /* subclass must call in destructor (or at close).
- removes this from pathToFile and other maps
- safe to call more than once, albeit might be wasted work
- ideal to call close to the close, if the close is well before object destruction
- */
- void destroyed();
+/** look up a MMF by filename. scoped mutex locking convention.
+ example:
+ MMFFinderByName finder;
+ DurableMappedFile *a = finder.find("file_name_a");
+ DurableMappedFile *b = finder.find("file_name_b");
+*/
+class MongoFileFinder {
+ MONGO_DISALLOW_COPYING(MongoFileFinder);
- virtual unsigned long long length() const = 0;
- };
+public:
+ MongoFileFinder() {}
- /** look up a MMF by filename. scoped mutex locking convention.
- example:
- MMFFinderByName finder;
- DurableMappedFile *a = finder.find("file_name_a");
- DurableMappedFile *b = finder.find("file_name_b");
+ /** @return The MongoFile object associated with the specified file name. If no file is open
+ with the specified name, returns null.
*/
- class MongoFileFinder {
- MONGO_DISALLOW_COPYING(MongoFileFinder);
- public:
- MongoFileFinder() { }
+ MongoFile* findByPath(const std::string& path) const;
+
+private:
+ LockMongoFilesShared _lk;
+};
+
+class MemoryMappedFile : public MongoFile {
+protected:
+ virtual void* viewForFlushing() {
+ if (views.size() == 0)
+ return 0;
+ verify(views.size() == 1);
+ return views[0];
+ }
- /** @return The MongoFile object associated with the specified file name. If no file is open
- with the specified name, returns null.
- */
- MongoFile* findByPath(const std::string& path) const;
+public:
+ MemoryMappedFile();
- private:
- LockMongoFilesShared _lk;
- };
+ virtual ~MemoryMappedFile() {
+ LockMongoFilesExclusive lk;
+ close();
+ }
- class MemoryMappedFile : public MongoFile {
- protected:
- virtual void* viewForFlushing() {
- if( views.size() == 0 )
- return 0;
- verify( views.size() == 1 );
- return views[0];
- }
- public:
- MemoryMappedFile();
+ virtual void close();
- virtual ~MemoryMappedFile() {
- LockMongoFilesExclusive lk;
- close();
- }
+ // Throws exception if file doesn't exist. (dm may2010: not sure if this is always true?)
+ void* map(const char* filename);
- virtual void close();
+ /** @param options see MongoFile::Options
+ */
+ void* mapWithOptions(const char* filename, int options);
- // Throws exception if file doesn't exist. (dm may2010: not sure if this is always true?)
- void* map(const char *filename);
+ /* Creates with length if DNE, otherwise uses existing file length,
+ passed length.
+ @param options MongoFile::Options bits
+ */
+ void* map(const char* filename, unsigned long long& length, int options = 0);
- /** @param options see MongoFile::Options
- */
- void* mapWithOptions(const char *filename, int options);
+ /* Create. Must not exist.
+ @param zero fill file with zeros when true
+ */
+ void* create(const std::string& filename, unsigned long long len, bool zero);
- /* Creates with length if DNE, otherwise uses existing file length,
- passed length.
- @param options MongoFile::Options bits
- */
- void* map(const char *filename, unsigned long long &length, int options = 0 );
+ void flush(bool sync);
+ virtual Flushable* prepareFlush();
- /* Create. Must not exist.
- @param zero fill file with zeros when true
+ long shortLength() const {
+ return (long)len;
+ }
+ unsigned long long length() const {
+ return len;
+ }
+ HANDLE getFd() const {
+ return fd;
+ }
+ /** create a new view with the specified properties.
+ automatically cleaned up upon close/destruction of the MemoryMappedFile object.
*/
- void* create(const std::string& filename, unsigned long long len, bool zero);
-
- void flush(bool sync);
- virtual Flushable * prepareFlush();
-
- long shortLength() const { return (long) len; }
- unsigned long long length() const { return len; }
- HANDLE getFd() const { return fd; }
- /** create a new view with the specified properties.
- automatically cleaned up upon close/destruction of the MemoryMappedFile object.
- */
- void* createReadOnlyMap();
- void* createPrivateMap();
-
- virtual uint64_t getUniqueId() const { return _uniqueId; }
-
- private:
- static void updateLength( const char *filename, unsigned long long &length );
-
- HANDLE fd;
- HANDLE maphandle;
- std::vector<void *> views;
- unsigned long long len;
- const uint64_t _uniqueId;
-#ifdef _WIN32
- // flush Mutex
- //
- // Protects:
- // Prevent flush() and close() from concurrently running.
- // It ensures close() cannot complete while flush() is running
- // Lock Ordering:
- // LockMongoFilesShared must be taken before _flushMutex if both are taken
- stdx::mutex _flushMutex;
-#endif
+ void* createReadOnlyMap();
+ void* createPrivateMap();
- protected:
+ virtual uint64_t getUniqueId() const {
+ return _uniqueId;
+ }
- /** close the current private view and open a new replacement */
- void* remapPrivateView(void *oldPrivateAddr);
- };
+private:
+ static void updateLength(const char* filename, unsigned long long& length);
- /** p is called from within a mutex that MongoFile uses. so be careful not to deadlock. */
- template < class F >
- inline void MongoFile::forEach( F p ) {
- LockMongoFilesShared lklk;
- const std::set<MongoFile*>& mmfiles = MongoFile::getAllFiles();
- for ( std::set<MongoFile*>::const_iterator i = mmfiles.begin(); i != mmfiles.end(); i++ )
- p(*i);
- }
+ HANDLE fd;
+ HANDLE maphandle;
+ std::vector<void*> views;
+ unsigned long long len;
+ const uint64_t _uniqueId;
+#ifdef _WIN32
+ // flush Mutex
+ //
+ // Protects:
+ // Prevent flush() and close() from concurrently running.
+ // It ensures close() cannot complete while flush() is running
+ // Lock Ordering:
+ // LockMongoFilesShared must be taken before _flushMutex if both are taken
+ stdx::mutex _flushMutex;
+#endif
-} // namespace mongo
+protected:
+ /** close the current private view and open a new replacement */
+ void* remapPrivateView(void* oldPrivateAddr);
+};
+
+/** p is called from within a mutex that MongoFile uses. so be careful not to deadlock. */
+template <class F>
+inline void MongoFile::forEach(F p) {
+ LockMongoFilesShared lklk;
+ const std::set<MongoFile*>& mmfiles = MongoFile::getAllFiles();
+ for (std::set<MongoFile*>::const_iterator i = mmfiles.begin(); i != mmfiles.end(); i++)
+ p(*i);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/mmap_posix.cpp b/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
index f7dffae468f..a673d3e5fde 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
@@ -53,38 +53,37 @@ using std::vector;
using namespace mongoutils;
namespace {
- mongo::AtomicUInt64 mmfNextId(0);
+mongo::AtomicUInt64 mmfNextId(0);
}
namespace mongo {
- static size_t fetchMinOSPageSizeBytes() {
- size_t minOSPageSizeBytes = sysconf( _SC_PAGESIZE );
- minOSPageSizeBytesTest(minOSPageSizeBytes);
- return minOSPageSizeBytes;
- }
- const size_t g_minOSPageSizeBytes = fetchMinOSPageSizeBytes();
-
-
-
- MemoryMappedFile::MemoryMappedFile() : _uniqueId(mmfNextId.fetchAndAdd(1)) {
- fd = 0;
- maphandle = 0;
- len = 0;
- created();
- }
+static size_t fetchMinOSPageSizeBytes() {
+ size_t minOSPageSizeBytes = sysconf(_SC_PAGESIZE);
+ minOSPageSizeBytesTest(minOSPageSizeBytes);
+ return minOSPageSizeBytes;
+}
+const size_t g_minOSPageSizeBytes = fetchMinOSPageSizeBytes();
- void MemoryMappedFile::close() {
- LockMongoFilesShared::assertExclusivelyLocked();
- for( vector<void*>::iterator i = views.begin(); i != views.end(); i++ ) {
- munmap(*i,len);
- }
- views.clear();
- if ( fd )
- ::close(fd);
- fd = 0;
- destroyed(); // cleans up from the master list of mmaps
+MemoryMappedFile::MemoryMappedFile() : _uniqueId(mmfNextId.fetchAndAdd(1)) {
+ fd = 0;
+ maphandle = 0;
+ len = 0;
+ created();
+}
+
+void MemoryMappedFile::close() {
+ LockMongoFilesShared::assertExclusivelyLocked();
+ for (vector<void*>::iterator i = views.begin(); i != views.end(); i++) {
+ munmap(*i, len);
}
+ views.clear();
+
+ if (fd)
+ ::close(fd);
+ fd = 0;
+ destroyed(); // cleans up from the master list of mmaps
+}
#ifndef O_NOATIME
#define O_NOATIME (0)
@@ -94,231 +93,234 @@ namespace mongo {
#define MAP_NORESERVE (0)
#endif
- namespace {
- void* _pageAlign( void* p ) {
- return (void*)((int64_t)p & ~(g_minOSPageSizeBytes-1));
+namespace {
+void* _pageAlign(void* p) {
+ return (void*)((int64_t)p & ~(g_minOSPageSizeBytes - 1));
+}
+
+class PageAlignTest : public StartupTest {
+public:
+ void run() {
+ {
+ int64_t x = g_minOSPageSizeBytes + 123;
+ void* y = _pageAlign(reinterpret_cast<void*>(x));
+ invariant(g_minOSPageSizeBytes == reinterpret_cast<size_t>(y));
}
+ {
+ int64_t a = static_cast<uint64_t>(numeric_limits<int>::max());
+ a = a / g_minOSPageSizeBytes;
+ a = a * g_minOSPageSizeBytes;
+ // a should now be page aligned
- class PageAlignTest : public StartupTest {
- public:
- void run() {
- {
- int64_t x = g_minOSPageSizeBytes + 123;
- void* y = _pageAlign( reinterpret_cast<void*>( x ) );
- invariant( g_minOSPageSizeBytes == reinterpret_cast<size_t>(y) );
- }
- {
- int64_t a = static_cast<uint64_t>( numeric_limits<int>::max() );
- a = a / g_minOSPageSizeBytes;
- a = a * g_minOSPageSizeBytes;
- // a should now be page aligned
-
- // b is not page aligned
- int64_t b = a + 123;
-
- void* y = _pageAlign( reinterpret_cast<void*>( b ) );
- invariant( a == reinterpret_cast<int64_t>(y) );
- }
+ // b is not page aligned
+ int64_t b = a + 123;
- }
- } pageAlignTest;
+ void* y = _pageAlign(reinterpret_cast<void*>(b));
+ invariant(a == reinterpret_cast<int64_t>(y));
+ }
}
+} pageAlignTest;
+}
#if defined(__sun)
- MAdvise::MAdvise(void *,unsigned, Advice) { }
- MAdvise::~MAdvise() { }
+MAdvise::MAdvise(void*, unsigned, Advice) {}
+MAdvise::~MAdvise() {}
#else
- MAdvise::MAdvise(void *p, unsigned len, Advice a) {
+MAdvise::MAdvise(void* p, unsigned len, Advice a) {
+ _p = _pageAlign(p);
- _p = _pageAlign( p );
+ _len = len + static_cast<unsigned>(reinterpret_cast<size_t>(p) - reinterpret_cast<size_t>(_p));
- _len = len + static_cast<unsigned>( reinterpret_cast<size_t>(p) -
- reinterpret_cast<size_t>(_p) );
-
- int advice = 0;
- switch ( a ) {
+ int advice = 0;
+ switch (a) {
case Sequential:
advice = MADV_SEQUENTIAL;
break;
case Random:
advice = MADV_RANDOM;
break;
- }
-
- if ( madvise(_p,_len,advice ) ) {
- error() << "madvise failed: " << errnoWithDescription();
- }
-
}
- MAdvise::~MAdvise() {
- madvise(_p,_len,MADV_NORMAL);
+
+ if (madvise(_p, _len, advice)) {
+ error() << "madvise failed: " << errnoWithDescription();
}
+}
+MAdvise::~MAdvise() {
+ madvise(_p, _len, MADV_NORMAL);
+}
#endif
- void* MemoryMappedFile::map(const char *filename, unsigned long long &length, int options) {
- // length may be updated by callee.
- setFilename(filename);
- FileAllocator::get()->allocateAsap( filename, length );
- len = length;
+void* MemoryMappedFile::map(const char* filename, unsigned long long& length, int options) {
+ // length may be updated by callee.
+ setFilename(filename);
+ FileAllocator::get()->allocateAsap(filename, length);
+ len = length;
- massert( 10446 , str::stream() << "mmap: can't map area of size 0 file: " << filename, length > 0 );
+ massert(
+ 10446, str::stream() << "mmap: can't map area of size 0 file: " << filename, length > 0);
- fd = open(filename, O_RDWR | O_NOATIME);
- if ( fd <= 0 ) {
- log() << "couldn't open " << filename << ' ' << errnoWithDescription() << endl;
- fd = 0; // our sentinel for not opened
- return 0;
- }
+ fd = open(filename, O_RDWR | O_NOATIME);
+ if (fd <= 0) {
+ log() << "couldn't open " << filename << ' ' << errnoWithDescription() << endl;
+ fd = 0; // our sentinel for not opened
+ return 0;
+ }
- unsigned long long filelen = lseek(fd, 0, SEEK_END);
- uassert(10447, str::stream() << "map file alloc failed, wanted: " << length << " filelen: " << filelen << ' ' << sizeof(size_t), filelen == length );
- lseek( fd, 0, SEEK_SET );
-
- void * view = mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
- if ( view == MAP_FAILED ) {
- error() << " mmap() failed for " << filename << " len:" << length << " " << errnoWithDescription() << endl;
- if ( errno == ENOMEM ) {
- if( sizeof(void*) == 4 )
- error() << "mmap failed with out of memory. You are using a 32-bit build and probably need to upgrade to 64" << endl;
- else
- error() << "mmap failed with out of memory. (64 bit build)" << endl;
- }
- return 0;
+ unsigned long long filelen = lseek(fd, 0, SEEK_END);
+ uassert(10447,
+ str::stream() << "map file alloc failed, wanted: " << length << " filelen: " << filelen
+ << ' ' << sizeof(size_t),
+ filelen == length);
+ lseek(fd, 0, SEEK_SET);
+
+ void* view = mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (view == MAP_FAILED) {
+ error() << " mmap() failed for " << filename << " len:" << length << " "
+ << errnoWithDescription() << endl;
+ if (errno == ENOMEM) {
+ if (sizeof(void*) == 4)
+ error() << "mmap failed with out of memory. You are using a 32-bit build and "
+ "probably need to upgrade to 64" << endl;
+ else
+ error() << "mmap failed with out of memory. (64 bit build)" << endl;
}
+ return 0;
+ }
#if defined(__sun)
#warning madvise not supported on solaris yet
#else
- if ( options & SEQUENTIAL ) {
- if ( madvise( view , length , MADV_SEQUENTIAL ) ) {
- warning() << "map: madvise failed for " << filename << ' ' << errnoWithDescription() << endl;
- }
+ if (options & SEQUENTIAL) {
+ if (madvise(view, length, MADV_SEQUENTIAL)) {
+ warning() << "map: madvise failed for " << filename << ' ' << errnoWithDescription()
+ << endl;
}
+ }
#endif
- views.push_back( view );
+ views.push_back(view);
- return view;
- }
+ return view;
+}
- void* MemoryMappedFile::createReadOnlyMap() {
- void * x = mmap( /*start*/0 , len , PROT_READ , MAP_SHARED , fd , 0 );
- if( x == MAP_FAILED ) {
- if ( errno == ENOMEM ) {
- if( sizeof(void*) == 4 )
- error() << "mmap ro failed with out of memory. You are using a 32-bit build and probably need to upgrade to 64" << endl;
- else
- error() << "mmap ro failed with out of memory. (64 bit build)" << endl;
- }
- return 0;
+void* MemoryMappedFile::createReadOnlyMap() {
+ void* x = mmap(/*start*/ 0, len, PROT_READ, MAP_SHARED, fd, 0);
+ if (x == MAP_FAILED) {
+ if (errno == ENOMEM) {
+ if (sizeof(void*) == 4)
+ error() << "mmap ro failed with out of memory. You are using a 32-bit build and "
+ "probably need to upgrade to 64" << endl;
+ else
+ error() << "mmap ro failed with out of memory. (64 bit build)" << endl;
}
- return x;
+ return 0;
}
+ return x;
+}
- void* MemoryMappedFile::createPrivateMap() {
- void * x = mmap( /*start*/0 , len , PROT_READ|PROT_WRITE , MAP_PRIVATE|MAP_NORESERVE , fd , 0 );
- if( x == MAP_FAILED ) {
- if ( errno == ENOMEM ) {
- if( sizeof(void*) == 4 ) {
- error() << "mmap private failed with out of memory. You are using a 32-bit build and probably need to upgrade to 64" << endl;
- }
- else {
- error() << "mmap private failed with out of memory. (64 bit build)" << endl;
- }
+void* MemoryMappedFile::createPrivateMap() {
+ void* x = mmap(/*start*/ 0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_NORESERVE, fd, 0);
+ if (x == MAP_FAILED) {
+ if (errno == ENOMEM) {
+ if (sizeof(void*) == 4) {
+ error() << "mmap private failed with out of memory. You are using a 32-bit build "
+ "and probably need to upgrade to 64" << endl;
+ } else {
+ error() << "mmap private failed with out of memory. (64 bit build)" << endl;
}
- else {
- error() << "mmap private failed " << errnoWithDescription() << endl;
- }
- return 0;
+ } else {
+ error() << "mmap private failed " << errnoWithDescription() << endl;
}
-
- views.push_back(x);
- return x;
+ return 0;
}
- void* MemoryMappedFile::remapPrivateView(void *oldPrivateAddr) {
-#if defined(__sun) // SERVER-8795
- LockMongoFilesExclusive lockMongoFiles;
+ views.push_back(x);
+ return x;
+}
+
+void* MemoryMappedFile::remapPrivateView(void* oldPrivateAddr) {
+#if defined(__sun) // SERVER-8795
+ LockMongoFilesExclusive lockMongoFiles;
#endif
- // don't unmap, just mmap over the old region
- void * x = mmap( oldPrivateAddr, len , PROT_READ|PROT_WRITE , MAP_PRIVATE|MAP_NORESERVE|MAP_FIXED , fd , 0 );
- if( x == MAP_FAILED ) {
- int err = errno;
- error() << "13601 Couldn't remap private view: " << errnoWithDescription(err) << endl;
- log() << "aborting" << endl;
- printMemInfo();
- abort();
- }
- verify( x == oldPrivateAddr );
- return x;
+ // don't unmap, just mmap over the old region
+ void* x = mmap(oldPrivateAddr,
+ len,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_NORESERVE | MAP_FIXED,
+ fd,
+ 0);
+ if (x == MAP_FAILED) {
+ int err = errno;
+ error() << "13601 Couldn't remap private view: " << errnoWithDescription(err) << endl;
+ log() << "aborting" << endl;
+ printMemInfo();
+ abort();
}
+ verify(x == oldPrivateAddr);
+ return x;
+}
- void MemoryMappedFile::flush(bool sync) {
- if ( views.empty() || fd == 0 )
- return;
+void MemoryMappedFile::flush(bool sync) {
+ if (views.empty() || fd == 0)
+ return;
- bool useFsync = sync && !ProcessInfo::preferMsyncOverFSync();
+ bool useFsync = sync && !ProcessInfo::preferMsyncOverFSync();
- if ( useFsync ?
- fsync(fd) != 0 :
- msync(viewForFlushing(), len, sync ? MS_SYNC : MS_ASYNC) ) {
- // msync failed, this is very bad
- log() << (useFsync ? "fsync failed: " : "msync failed: ") << errnoWithDescription()
- << " file: " << filename() << endl;
- dataSyncFailedHandler();
- }
+ if (useFsync ? fsync(fd) != 0 : msync(viewForFlushing(), len, sync ? MS_SYNC : MS_ASYNC)) {
+ // msync failed, this is very bad
+ log() << (useFsync ? "fsync failed: " : "msync failed: ") << errnoWithDescription()
+ << " file: " << filename() << endl;
+ dataSyncFailedHandler();
}
+}
- class PosixFlushable : public MemoryMappedFile::Flushable {
- public:
- PosixFlushable( MemoryMappedFile* theFile, void* view , HANDLE fd , long len)
- : _theFile( theFile ), _view( view ), _fd(fd), _len(len), _id(_theFile->getUniqueId()) {
- }
-
- void flush() {
- if ( _view == NULL || _fd == 0 )
- return;
-
- if ( ProcessInfo::preferMsyncOverFSync() ?
- msync(_view, _len, MS_SYNC ) == 0 :
- fsync(_fd) == 0 ) {
- return;
- }
+class PosixFlushable : public MemoryMappedFile::Flushable {
+public:
+ PosixFlushable(MemoryMappedFile* theFile, void* view, HANDLE fd, long len)
+ : _theFile(theFile), _view(view), _fd(fd), _len(len), _id(_theFile->getUniqueId()) {}
- if ( errno == EBADF ) {
- // ok, we were unlocked, so this file was closed
- return;
- }
+ void flush() {
+ if (_view == NULL || _fd == 0)
+ return;
- // some error, lets see if we're supposed to exist
- LockMongoFilesShared mmfilesLock;
- std::set<MongoFile*> mmfs = MongoFile::getAllFiles();
- std::set<MongoFile*>::const_iterator it = mmfs.find(_theFile);
- if ( (it == mmfs.end()) || ((*it)->getUniqueId() != _id) ) {
- log() << "msync failed with: " << errnoWithDescription()
- << " but file doesn't exist anymore, so ignoring";
- // this was deleted while we were unlocked
- return;
- }
+ if (ProcessInfo::preferMsyncOverFSync() ? msync(_view, _len, MS_SYNC) == 0
+ : fsync(_fd) == 0) {
+ return;
+ }
- // we got an error, and we still exist, so this is bad, we fail
- log() << "msync " << errnoWithDescription() << endl;
- dataSyncFailedHandler();
+ if (errno == EBADF) {
+ // ok, we were unlocked, so this file was closed
+ return;
}
- MemoryMappedFile* _theFile;
- void * _view;
- HANDLE _fd;
- long _len;
- const uint64_t _id;
- };
+ // some error, lets see if we're supposed to exist
+ LockMongoFilesShared mmfilesLock;
+ std::set<MongoFile*> mmfs = MongoFile::getAllFiles();
+ std::set<MongoFile*>::const_iterator it = mmfs.find(_theFile);
+ if ((it == mmfs.end()) || ((*it)->getUniqueId() != _id)) {
+ log() << "msync failed with: " << errnoWithDescription()
+ << " but file doesn't exist anymore, so ignoring";
+ // this was deleted while we were unlocked
+ return;
+ }
- MemoryMappedFile::Flushable * MemoryMappedFile::prepareFlush() {
- return new PosixFlushable( this, viewForFlushing(), fd, len);
+ // we got an error, and we still exist, so this is bad, we fail
+ log() << "msync " << errnoWithDescription() << endl;
+ dataSyncFailedHandler();
}
+ MemoryMappedFile* _theFile;
+ void* _view;
+ HANDLE _fd;
+ long _len;
+ const uint64_t _id;
+};
+
+MemoryMappedFile::Flushable* MemoryMappedFile::prepareFlush() {
+ return new PosixFlushable(this, viewForFlushing(), fd, len);
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index 6cc9d9cef73..8cdbd4ad7a4 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -55,844 +55,782 @@
namespace mongo {
- using std::unique_ptr;
+using std::unique_ptr;
namespace {
- /**
- * Declaration for the "newCollectionsUsePowerOf2Sizes" server parameter, which is now
- * deprecated in 3.0.
- * Note that:
- * - setting to true performs a no-op.
- * - setting to false will fail.
- */
- class NewCollectionsUsePowerOf2SizesParameter : public ExportedServerParameter<bool> {
- public:
- NewCollectionsUsePowerOf2SizesParameter() :
- ExportedServerParameter<bool>(ServerParameterSet::getGlobal(),
- "newCollectionsUsePowerOf2Sizes",
- &newCollectionsUsePowerOf2SizesFlag,
- true,
- true),
- newCollectionsUsePowerOf2SizesFlag(true) {
-
+/**
+ * Declaration for the "newCollectionsUsePowerOf2Sizes" server parameter, which is now
+ * deprecated in 3.0.
+ * Note that:
+ * - setting to true performs a no-op.
+ * - setting to false will fail.
+ */
+class NewCollectionsUsePowerOf2SizesParameter : public ExportedServerParameter<bool> {
+public:
+ NewCollectionsUsePowerOf2SizesParameter()
+ : ExportedServerParameter<bool>(ServerParameterSet::getGlobal(),
+ "newCollectionsUsePowerOf2Sizes",
+ &newCollectionsUsePowerOf2SizesFlag,
+ true,
+ true),
+ newCollectionsUsePowerOf2SizesFlag(true) {}
+
+ virtual Status validate(const bool& potentialNewValue) {
+ if (!potentialNewValue) {
+ return Status(ErrorCodes::BadValue,
+ "newCollectionsUsePowerOf2Sizes cannot be set to false. "
+ "Use noPadding instead during createCollection.");
}
- virtual Status validate(const bool& potentialNewValue) {
- if (!potentialNewValue) {
- return Status(ErrorCodes::BadValue,
- "newCollectionsUsePowerOf2Sizes cannot be set to false. "
- "Use noPadding instead during createCollection.");
- }
+ return Status::OK();
+ }
- return Status::OK();
- }
+private:
+ // Unused, needed for server parameter.
+ bool newCollectionsUsePowerOf2SizesFlag;
- private:
- // Unused, needed for server parameter.
- bool newCollectionsUsePowerOf2SizesFlag;
+} exportedNewCollectionsUsePowerOf2SizesParameter;
- } exportedNewCollectionsUsePowerOf2SizesParameter;
+int _massageExtentSize(const ExtentManager* em, long long size) {
+ if (size < em->minSize())
+ return em->minSize();
+ if (size > em->maxSize())
+ return em->maxSize();
- int _massageExtentSize(const ExtentManager* em, long long size) {
- if (size < em->minSize())
- return em->minSize();
- if (size > em->maxSize())
- return em->maxSize();
+ return static_cast<int>(size);
+}
- return static_cast<int>(size);
- }
+} // namespace
-} // namespace
+/**
+ * Registers the insertion of a new entry in the _collections cache with the RecoveryUnit,
+ * allowing for rollback.
+ */
+class MMAPV1DatabaseCatalogEntry::EntryInsertion : public RecoveryUnit::Change {
+public:
+ EntryInsertion(StringData ns, MMAPV1DatabaseCatalogEntry* entry)
+ : _ns(ns.toString()), _entry(entry) {}
- /**
- * Registers the insertion of a new entry in the _collections cache with the RecoveryUnit,
- * allowing for rollback.
- */
- class MMAPV1DatabaseCatalogEntry::EntryInsertion : public RecoveryUnit::Change {
- public:
- EntryInsertion(StringData ns, MMAPV1DatabaseCatalogEntry* entry)
- : _ns(ns.toString()), _entry(entry) { }
+ void rollback() {
+ _entry->_removeFromCache(NULL, _ns);
+ }
- void rollback() {
- _entry->_removeFromCache(NULL, _ns);
- }
+ void commit() {}
- void commit() { }
- private:
- const std::string _ns;
- MMAPV1DatabaseCatalogEntry* const _entry;
- };
-
- /**
- * Registers the removal of an entry from the _collections cache with the RecoveryUnit,
- * delaying actual deletion of the information until the change is commited. This allows
- * for easy rollback.
- */
- class MMAPV1DatabaseCatalogEntry::EntryRemoval : public RecoveryUnit::Change {
- public:
- // Rollback removing the collection from the cache. Takes ownership of the cachedEntry,
- // and will delete it if removal is final.
- EntryRemoval(StringData ns,
- MMAPV1DatabaseCatalogEntry* catalogEntry,
- Entry *cachedEntry)
- : _ns(ns.toString()), _catalogEntry(catalogEntry), _cachedEntry(cachedEntry) { }
-
- void rollback() {
- _catalogEntry->_collections[_ns] = _cachedEntry;
- }
+private:
+ const std::string _ns;
+ MMAPV1DatabaseCatalogEntry* const _entry;
+};
- void commit() {
- delete _cachedEntry;
- }
+/**
+ * Registers the removal of an entry from the _collections cache with the RecoveryUnit,
+ * delaying actual deletion of the information until the change is commited. This allows
+ * for easy rollback.
+ */
+class MMAPV1DatabaseCatalogEntry::EntryRemoval : public RecoveryUnit::Change {
+public:
+ // Rollback removing the collection from the cache. Takes ownership of the cachedEntry,
+ // and will delete it if removal is final.
+ EntryRemoval(StringData ns, MMAPV1DatabaseCatalogEntry* catalogEntry, Entry* cachedEntry)
+ : _ns(ns.toString()), _catalogEntry(catalogEntry), _cachedEntry(cachedEntry) {}
+
+ void rollback() {
+ _catalogEntry->_collections[_ns] = _cachedEntry;
+ }
- private:
- const std::string _ns;
- MMAPV1DatabaseCatalogEntry* const _catalogEntry;
- Entry* const _cachedEntry;
- };
-
- MMAPV1DatabaseCatalogEntry::MMAPV1DatabaseCatalogEntry( OperationContext* txn,
- StringData name,
- StringData path,
- bool directoryPerDB,
- bool transient )
- : DatabaseCatalogEntry( name ),
- _path( path.toString() ),
- _namespaceIndex(_path, name.toString()),
- _extentManager(name, path, directoryPerDB) {
-
- invariant(txn->lockState()->isDbLockedForMode(name, MODE_X));
-
- try {
- // First init the .ns file. If this fails, we may leak the .ns file, but this is OK
- // because subsequent openDB will go through this code path again.
- _namespaceIndex.init(txn);
-
- // Initialize the extent manager. This will create the first data file (.0) if needed
- // and if this fails we would leak the .ns file above. Leaking the .ns or .0 file is
- // acceptable, because subsequent openDB calls will exercise the code path again.
- Status s = _extentManager.init(txn);
- if (!s.isOK()) {
- msgasserted(16966, str::stream() << "_extentManager.init failed: " << s.toString());
- }
+ void commit() {
+ delete _cachedEntry;
+ }
- // This is the actual loading of the on-disk structures into cache.
- _init( txn );
- }
- catch (const DBException& dbe) {
- warning() << "database " << path << " " << name
- << " could not be opened due to DBException " << dbe.getCode() << ": "
- << dbe.what();
- throw;
- }
- catch (const std::exception& e) {
- warning() << "database " << path << " " << name
- << " could not be opened " << e.what();
- throw;
- }
+private:
+ const std::string _ns;
+ MMAPV1DatabaseCatalogEntry* const _catalogEntry;
+ Entry* const _cachedEntry;
+};
+
+MMAPV1DatabaseCatalogEntry::MMAPV1DatabaseCatalogEntry(
+ OperationContext* txn, StringData name, StringData path, bool directoryPerDB, bool transient)
+ : DatabaseCatalogEntry(name),
+ _path(path.toString()),
+ _namespaceIndex(_path, name.toString()),
+ _extentManager(name, path, directoryPerDB) {
+ invariant(txn->lockState()->isDbLockedForMode(name, MODE_X));
+
+ try {
+ // First init the .ns file. If this fails, we may leak the .ns file, but this is OK
+ // because subsequent openDB will go through this code path again.
+ _namespaceIndex.init(txn);
+
+ // Initialize the extent manager. This will create the first data file (.0) if needed
+ // and if this fails we would leak the .ns file above. Leaking the .ns or .0 file is
+ // acceptable, because subsequent openDB calls will exercise the code path again.
+ Status s = _extentManager.init(txn);
+ if (!s.isOK()) {
+ msgasserted(16966, str::stream() << "_extentManager.init failed: " << s.toString());
+ }
+
+ // This is the actual loading of the on-disk structures into cache.
+ _init(txn);
+ } catch (const DBException& dbe) {
+ warning() << "database " << path << " " << name
+ << " could not be opened due to DBException " << dbe.getCode() << ": "
+ << dbe.what();
+ throw;
+ } catch (const std::exception& e) {
+ warning() << "database " << path << " " << name << " could not be opened " << e.what();
+ throw;
}
+}
- MMAPV1DatabaseCatalogEntry::~MMAPV1DatabaseCatalogEntry() {
- for ( CollectionMap::const_iterator i = _collections.begin();
- i != _collections.end();
- ++i ) {
- delete i->second;
- }
- _collections.clear();
+MMAPV1DatabaseCatalogEntry::~MMAPV1DatabaseCatalogEntry() {
+ for (CollectionMap::const_iterator i = _collections.begin(); i != _collections.end(); ++i) {
+ delete i->second;
}
+ _collections.clear();
+}
- intmax_t dbSize( const string& database ); // from repair_database.cpp
+intmax_t dbSize(const string& database); // from repair_database.cpp
- int64_t MMAPV1DatabaseCatalogEntry::sizeOnDisk( OperationContext* opCtx ) const {
- return static_cast<int64_t>( dbSize( name() ) );
+int64_t MMAPV1DatabaseCatalogEntry::sizeOnDisk(OperationContext* opCtx) const {
+ return static_cast<int64_t>(dbSize(name()));
+}
+
+void MMAPV1DatabaseCatalogEntry::_removeFromCache(RecoveryUnit* ru, StringData ns) {
+ CollectionMap::iterator i = _collections.find(ns.toString());
+ if (i == _collections.end()) {
+ return;
}
- void MMAPV1DatabaseCatalogEntry::_removeFromCache(RecoveryUnit* ru,
- StringData ns) {
- CollectionMap::iterator i = _collections.find(ns.toString());
- if (i == _collections.end()) {
- return;
- }
+ // If there is an operation context, register a rollback to restore the cache entry
+ if (ru) {
+ ru->registerChange(new EntryRemoval(ns, this, i->second));
+ } else {
+ delete i->second;
+ }
+ _collections.erase(i);
+}
- // If there is an operation context, register a rollback to restore the cache entry
- if (ru) {
- ru->registerChange(new EntryRemoval(ns, this, i->second));
- }
- else {
- delete i->second;
- }
- _collections.erase(i);
+Status MMAPV1DatabaseCatalogEntry::dropCollection(OperationContext* txn, StringData ns) {
+ invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ _removeFromCache(txn->recoveryUnit(), ns);
+
+ NamespaceDetails* details = _namespaceIndex.details(ns);
+
+ if (!details) {
+ return Status(ErrorCodes::NamespaceNotFound, str::stream() << "ns not found: " << ns);
}
- Status MMAPV1DatabaseCatalogEntry::dropCollection(OperationContext* txn, StringData ns) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- _removeFromCache(txn->recoveryUnit(), ns);
+ invariant(details->nIndexes == 0); // TODO: delete instead?
+ invariant(details->indexBuildsInProgress == 0); // TODO: delete instead?
- NamespaceDetails* details = _namespaceIndex.details( ns );
+ _removeNamespaceFromNamespaceCollection(txn, ns);
- if ( !details ) {
- return Status( ErrorCodes::NamespaceNotFound, str::stream() << "ns not found: " << ns );
- }
+ // free extents
+ if (!details->firstExtent.isNull()) {
+ _extentManager.freeExtents(txn, details->firstExtent, details->lastExtent);
+ *txn->recoveryUnit()->writing(&details->firstExtent) = DiskLoc().setInvalid();
+ *txn->recoveryUnit()->writing(&details->lastExtent) = DiskLoc().setInvalid();
+ }
- invariant( details->nIndexes == 0 ); // TODO: delete instead?
- invariant( details->indexBuildsInProgress == 0 ); // TODO: delete instead?
+ // remove from the catalog hashtable
+ _namespaceIndex.kill_ns(txn, ns);
- _removeNamespaceFromNamespaceCollection( txn, ns );
+ return Status::OK();
+}
- // free extents
- if( !details->firstExtent.isNull() ) {
- _extentManager.freeExtents(txn, details->firstExtent, details->lastExtent);
- *txn->recoveryUnit()->writing( &details->firstExtent ) = DiskLoc().setInvalid();
- *txn->recoveryUnit()->writing( &details->lastExtent ) = DiskLoc().setInvalid();
- }
- // remove from the catalog hashtable
- _namespaceIndex.kill_ns( txn, ns );
+Status MMAPV1DatabaseCatalogEntry::renameCollection(OperationContext* txn,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp) {
+ Status s = _renameSingleNamespace(txn, fromNS, toNS, stayTemp);
+ if (!s.isOK())
+ return s;
- return Status::OK();
- }
+ NamespaceDetails* details = _namespaceIndex.details(toNS);
+ invariant(details);
+ RecordStoreV1Base* systemIndexRecordStore = _getIndexRecordStore();
+ auto cursor = systemIndexRecordStore->getCursor(txn);
+ while (auto record = cursor->next()) {
+ BSONObj oldIndexSpec = record->data.releaseToBson();
+ if (fromNS != oldIndexSpec["ns"].valuestrsafe())
+ continue;
- Status MMAPV1DatabaseCatalogEntry::renameCollection( OperationContext* txn,
- StringData fromNS,
- StringData toNS,
- bool stayTemp ) {
- Status s = _renameSingleNamespace( txn, fromNS, toNS, stayTemp );
- if ( !s.isOK() )
- return s;
-
- NamespaceDetails* details = _namespaceIndex.details( toNS );
- invariant( details );
-
- RecordStoreV1Base* systemIndexRecordStore = _getIndexRecordStore();
- auto cursor = systemIndexRecordStore->getCursor(txn);
- while (auto record = cursor->next()) {
- BSONObj oldIndexSpec = record->data.releaseToBson();
- if ( fromNS != oldIndexSpec["ns"].valuestrsafe() )
- continue;
-
- BSONObj newIndexSpec;
- {
- BSONObjBuilder b;
- BSONObjIterator i( oldIndexSpec );
- while( i.more() ) {
- BSONElement e = i.next();
- if ( strcmp( e.fieldName(), "ns" ) != 0 )
- b.append( e );
- else
- b << "ns" << toNS;
- }
- newIndexSpec = b.obj();
+ BSONObj newIndexSpec;
+ {
+ BSONObjBuilder b;
+ BSONObjIterator i(oldIndexSpec);
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (strcmp(e.fieldName(), "ns") != 0)
+ b.append(e);
+ else
+ b << "ns" << toNS;
}
+ newIndexSpec = b.obj();
+ }
- StatusWith<RecordId> newIndexSpecLoc =
- systemIndexRecordStore->insertRecord( txn,
- newIndexSpec.objdata(),
- newIndexSpec.objsize(),
- false );
- if ( !newIndexSpecLoc.isOK() )
- return newIndexSpecLoc.getStatus();
-
- const string& indexName = oldIndexSpec.getStringField( "name" );
-
- {
- // fix IndexDetails pointer
- NamespaceDetailsCollectionCatalogEntry ce( toNS,
- details,
- _getNamespaceRecordStore(),
- systemIndexRecordStore,
- this );
- int indexI = ce._findIndexNumber( txn, indexName );
-
- IndexDetails& indexDetails = details->idx(indexI);
- *txn->recoveryUnit()->writing(&indexDetails.info) =
- DiskLoc::fromRecordId(newIndexSpecLoc.getValue());
- }
+ StatusWith<RecordId> newIndexSpecLoc = systemIndexRecordStore->insertRecord(
+ txn, newIndexSpec.objdata(), newIndexSpec.objsize(), false);
+ if (!newIndexSpecLoc.isOK())
+ return newIndexSpecLoc.getStatus();
- {
- // move underlying namespac
- string oldIndexNs = IndexDescriptor::makeIndexNamespace( fromNS, indexName );
- string newIndexNs = IndexDescriptor::makeIndexNamespace( toNS, indexName );
+ const string& indexName = oldIndexSpec.getStringField("name");
- Status s = _renameSingleNamespace( txn, oldIndexNs, newIndexNs, false );
- if ( !s.isOK() )
- return s;
- }
+ {
+ // fix IndexDetails pointer
+ NamespaceDetailsCollectionCatalogEntry ce(
+ toNS, details, _getNamespaceRecordStore(), systemIndexRecordStore, this);
+ int indexI = ce._findIndexNumber(txn, indexName);
- systemIndexRecordStore->deleteRecord( txn, record->id );
+ IndexDetails& indexDetails = details->idx(indexI);
+ *txn->recoveryUnit()->writing(&indexDetails.info) =
+ DiskLoc::fromRecordId(newIndexSpecLoc.getValue());
}
- return Status::OK();
+ {
+ // move underlying namespac
+ string oldIndexNs = IndexDescriptor::makeIndexNamespace(fromNS, indexName);
+ string newIndexNs = IndexDescriptor::makeIndexNamespace(toNS, indexName);
+
+ Status s = _renameSingleNamespace(txn, oldIndexNs, newIndexNs, false);
+ if (!s.isOK())
+ return s;
+ }
+
+ systemIndexRecordStore->deleteRecord(txn, record->id);
}
- Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace( OperationContext* txn,
- StringData fromNS,
- StringData toNS,
- bool stayTemp ) {
- // some sanity checking
- NamespaceDetails* fromDetails = _namespaceIndex.details( fromNS );
- if ( !fromDetails )
- return Status( ErrorCodes::BadValue, "from namespace doesn't exist" );
+ return Status::OK();
+}
- if ( _namespaceIndex.details( toNS ) )
- return Status( ErrorCodes::BadValue, "to namespace already exists" );
+Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace(OperationContext* txn,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp) {
+ // some sanity checking
+ NamespaceDetails* fromDetails = _namespaceIndex.details(fromNS);
+ if (!fromDetails)
+ return Status(ErrorCodes::BadValue, "from namespace doesn't exist");
- _removeFromCache(txn->recoveryUnit(), fromNS);
+ if (_namespaceIndex.details(toNS))
+ return Status(ErrorCodes::BadValue, "to namespace already exists");
- // at this point, we haven't done anything destructive yet
+ _removeFromCache(txn->recoveryUnit(), fromNS);
- // ----
- // actually start moving
- // ----
+ // at this point, we haven't done anything destructive yet
- // this could throw, but if it does we're ok
- _namespaceIndex.add_ns( txn, toNS, fromDetails );
- NamespaceDetails* toDetails = _namespaceIndex.details( toNS );
+ // ----
+ // actually start moving
+ // ----
- try {
- toDetails->copyingFrom(txn,
- toNS,
- _namespaceIndex,
- fromDetails); // fixes extraOffset
- }
- catch( DBException& ) {
- // could end up here if .ns is full - if so try to clean up / roll back a little
- _namespaceIndex.kill_ns( txn, toNS );
- throw;
- }
+ // this could throw, but if it does we're ok
+ _namespaceIndex.add_ns(txn, toNS, fromDetails);
+ NamespaceDetails* toDetails = _namespaceIndex.details(toNS);
- // at this point, code .ns stuff moved
+ try {
+ toDetails->copyingFrom(txn, toNS, _namespaceIndex, fromDetails); // fixes extraOffset
+ } catch (DBException&) {
+ // could end up here if .ns is full - if so try to clean up / roll back a little
+ _namespaceIndex.kill_ns(txn, toNS);
+ throw;
+ }
- _namespaceIndex.kill_ns( txn, fromNS );
- fromDetails = NULL;
+ // at this point, code .ns stuff moved
- // fix system.namespaces
- BSONObj newSpec;
- RecordId oldSpecLocation;
- {
+ _namespaceIndex.kill_ns(txn, fromNS);
+ fromDetails = NULL;
- BSONObj oldSpec;
- {
- RecordStoreV1Base* rs = _getNamespaceRecordStore();
- auto cursor = rs->getCursor(txn);
- while (auto record = cursor->next()) {
- BSONObj entry = record->data.releaseToBson();
- if ( fromNS == entry["name"].String() ) {
- oldSpecLocation = record->id;
- oldSpec = entry.getOwned();
- break;
- }
+ // fix system.namespaces
+ BSONObj newSpec;
+ RecordId oldSpecLocation;
+ {
+ BSONObj oldSpec;
+ {
+ RecordStoreV1Base* rs = _getNamespaceRecordStore();
+ auto cursor = rs->getCursor(txn);
+ while (auto record = cursor->next()) {
+ BSONObj entry = record->data.releaseToBson();
+ if (fromNS == entry["name"].String()) {
+ oldSpecLocation = record->id;
+ oldSpec = entry.getOwned();
+ break;
}
}
- invariant( !oldSpec.isEmpty() );
- invariant( !oldSpecLocation.isNull() );
+ }
+ invariant(!oldSpec.isEmpty());
+ invariant(!oldSpecLocation.isNull());
- BSONObjBuilder b;
- BSONObjIterator i( oldSpec.getObjectField( "options" ) );
- while( i.more() ) {
- BSONElement e = i.next();
- if ( strcmp( e.fieldName(), "create" ) != 0 ) {
- if (stayTemp || (strcmp(e.fieldName(), "temp") != 0))
- b.append( e );
- }
- else {
- b << "create" << toNS;
- }
+ BSONObjBuilder b;
+ BSONObjIterator i(oldSpec.getObjectField("options"));
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (strcmp(e.fieldName(), "create") != 0) {
+ if (stayTemp || (strcmp(e.fieldName(), "temp") != 0))
+ b.append(e);
+ } else {
+ b << "create" << toNS;
}
- newSpec = b.obj();
}
+ newSpec = b.obj();
+ }
- _addNamespaceToNamespaceCollection( txn, toNS, newSpec.isEmpty() ? 0 : &newSpec );
-
- _getNamespaceRecordStore()->deleteRecord( txn, oldSpecLocation );
+ _addNamespaceToNamespaceCollection(txn, toNS, newSpec.isEmpty() ? 0 : &newSpec);
- Entry*& entry = _collections[toNS.toString()];
- invariant( entry == NULL );
- txn->recoveryUnit()->registerChange(new EntryInsertion(toNS, this));
- entry = new Entry();
- _insertInCache(txn, toNS, entry);
+ _getNamespaceRecordStore()->deleteRecord(txn, oldSpecLocation);
- return Status::OK();
- }
+ Entry*& entry = _collections[toNS.toString()];
+ invariant(entry == NULL);
+ txn->recoveryUnit()->registerChange(new EntryInsertion(toNS, this));
+ entry = new Entry();
+ _insertInCache(txn, toNS, entry);
- void MMAPV1DatabaseCatalogEntry::appendExtraStats( OperationContext* opCtx,
- BSONObjBuilder* output,
- double scale ) const {
- if ( isEmpty() ) {
- output->appendNumber( "fileSize", 0 );
- }
- else {
- output->appendNumber( "fileSize", _extentManager.fileSize() / scale );
- output->appendNumber( "nsSizeMB", static_cast<int>( _namespaceIndex.fileLength() /
- ( 1024 * 1024 ) ) );
+ return Status::OK();
+}
- int freeListSize = 0;
- int64_t freeListSpace = 0;
- _extentManager.freeListStats(opCtx, &freeListSize, &freeListSpace);
+void MMAPV1DatabaseCatalogEntry::appendExtraStats(OperationContext* opCtx,
+ BSONObjBuilder* output,
+ double scale) const {
+ if (isEmpty()) {
+ output->appendNumber("fileSize", 0);
+ } else {
+ output->appendNumber("fileSize", _extentManager.fileSize() / scale);
+ output->appendNumber("nsSizeMB",
+ static_cast<int>(_namespaceIndex.fileLength() / (1024 * 1024)));
- BSONObjBuilder extentFreeList( output->subobjStart( "extentFreeList" ) );
- extentFreeList.append( "num", freeListSize );
- extentFreeList.appendNumber( "totalSize",
- static_cast<long long>( freeListSpace / scale ) );
- extentFreeList.done();
+ int freeListSize = 0;
+ int64_t freeListSpace = 0;
+ _extentManager.freeListStats(opCtx, &freeListSize, &freeListSpace);
- {
+ BSONObjBuilder extentFreeList(output->subobjStart("extentFreeList"));
+ extentFreeList.append("num", freeListSize);
+ extentFreeList.appendNumber("totalSize", static_cast<long long>(freeListSpace / scale));
+ extentFreeList.done();
- const DataFileVersion version = _extentManager.getFileFormat(opCtx);
+ {
+ const DataFileVersion version = _extentManager.getFileFormat(opCtx);
- BSONObjBuilder dataFileVersion( output->subobjStart( "dataFileVersion" ) );
- dataFileVersion.append( "major", version.majorRaw() );
- dataFileVersion.append( "minor", version.minorRaw() );
- dataFileVersion.done();
- }
+ BSONObjBuilder dataFileVersion(output->subobjStart("dataFileVersion"));
+ dataFileVersion.append("major", version.majorRaw());
+ dataFileVersion.append("minor", version.minorRaw());
+ dataFileVersion.done();
}
-
}
+}
- bool MMAPV1DatabaseCatalogEntry::isOlderThan24( OperationContext* opCtx ) const {
- if ( _extentManager.numFiles() == 0 )
- return false;
+bool MMAPV1DatabaseCatalogEntry::isOlderThan24(OperationContext* opCtx) const {
+ if (_extentManager.numFiles() == 0)
+ return false;
- const DataFileVersion version = _extentManager.getFileFormat(opCtx);
+ const DataFileVersion version = _extentManager.getFileFormat(opCtx);
- invariant(version.isCompatibleWithCurrentCode());
+ invariant(version.isCompatibleWithCurrentCode());
- return !version.is24IndexClean();
- }
+ return !version.is24IndexClean();
+}
- void MMAPV1DatabaseCatalogEntry::markIndexSafe24AndUp( OperationContext* opCtx ) {
- if ( _extentManager.numFiles() == 0 )
- return;
+void MMAPV1DatabaseCatalogEntry::markIndexSafe24AndUp(OperationContext* opCtx) {
+ if (_extentManager.numFiles() == 0)
+ return;
- DataFileVersion version = _extentManager.getFileFormat(opCtx);
+ DataFileVersion version = _extentManager.getFileFormat(opCtx);
- invariant(version.isCompatibleWithCurrentCode());
+ invariant(version.isCompatibleWithCurrentCode());
- if (version.is24IndexClean())
- return; // nothing to do
+ if (version.is24IndexClean())
+ return; // nothing to do
- version.setIs24IndexClean();
- _extentManager.setFileFormat(opCtx, version);
- }
+ version.setIs24IndexClean();
+ _extentManager.setFileFormat(opCtx, version);
+}
- bool MMAPV1DatabaseCatalogEntry::currentFilesCompatible( OperationContext* opCtx ) const {
- if ( _extentManager.numFiles() == 0 )
- return true;
+bool MMAPV1DatabaseCatalogEntry::currentFilesCompatible(OperationContext* opCtx) const {
+ if (_extentManager.numFiles() == 0)
+ return true;
- return _extentManager.getOpenFile( 0 )->getHeader()->version.isCompatibleWithCurrentCode();
- }
+ return _extentManager.getOpenFile(0)->getHeader()->version.isCompatibleWithCurrentCode();
+}
- void MMAPV1DatabaseCatalogEntry::getCollectionNamespaces( std::list<std::string>* tofill ) const {
- _namespaceIndex.getCollectionNamespaces( tofill );
- }
+void MMAPV1DatabaseCatalogEntry::getCollectionNamespaces(std::list<std::string>* tofill) const {
+ _namespaceIndex.getCollectionNamespaces(tofill);
+}
- void MMAPV1DatabaseCatalogEntry::_ensureSystemCollection(OperationContext* txn,
- StringData ns) {
-
- NamespaceDetails* details = _namespaceIndex.details(ns);
- if (details) {
- return;
- }
- _namespaceIndex.add_ns( txn, ns, DiskLoc(), false );
+void MMAPV1DatabaseCatalogEntry::_ensureSystemCollection(OperationContext* txn, StringData ns) {
+ NamespaceDetails* details = _namespaceIndex.details(ns);
+ if (details) {
+ return;
}
+ _namespaceIndex.add_ns(txn, ns, DiskLoc(), false);
+}
- void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
- WriteUnitOfWork wunit(txn);
+void MMAPV1DatabaseCatalogEntry::_init(OperationContext* txn) {
+ WriteUnitOfWork wunit(txn);
- // Upgrade freelist
- const NamespaceString oldFreeList(name(), "$freelist");
- NamespaceDetails* freeListDetails = _namespaceIndex.details(oldFreeList.ns());
- if (freeListDetails) {
- if (!freeListDetails->firstExtent.isNull()) {
- _extentManager.freeExtents(txn,
- freeListDetails->firstExtent,
- freeListDetails->lastExtent);
- }
-
- _namespaceIndex.kill_ns(txn, oldFreeList.ns());
+ // Upgrade freelist
+ const NamespaceString oldFreeList(name(), "$freelist");
+ NamespaceDetails* freeListDetails = _namespaceIndex.details(oldFreeList.ns());
+ if (freeListDetails) {
+ if (!freeListDetails->firstExtent.isNull()) {
+ _extentManager.freeExtents(
+ txn, freeListDetails->firstExtent, freeListDetails->lastExtent);
}
- DataFileVersion version = _extentManager.getFileFormat(txn);
- if (version.isCompatibleWithCurrentCode() && !version.mayHave28Freelist()) {
- // Any DB that can be opened and written to gets this flag set.
- version.setMayHave28Freelist();
- _extentManager.setFileFormat(txn, version);
- }
+ _namespaceIndex.kill_ns(txn, oldFreeList.ns());
+ }
- const NamespaceString nsi(name(), "system.indexes");
- const NamespaceString nsn(name(), "system.namespaces");
+ DataFileVersion version = _extentManager.getFileFormat(txn);
+ if (version.isCompatibleWithCurrentCode() && !version.mayHave28Freelist()) {
+ // Any DB that can be opened and written to gets this flag set.
+ version.setMayHave28Freelist();
+ _extentManager.setFileFormat(txn, version);
+ }
- bool isSystemNamespacesGoingToBeNew = _namespaceIndex.details(nsn.toString()) == NULL;
- bool isSystemIndexesGoingToBeNew = _namespaceIndex.details(nsi.toString()) == NULL;
+ const NamespaceString nsi(name(), "system.indexes");
+ const NamespaceString nsn(name(), "system.namespaces");
- _ensureSystemCollection(txn, nsn.toString());
- _ensureSystemCollection(txn, nsi.toString());
+ bool isSystemNamespacesGoingToBeNew = _namespaceIndex.details(nsn.toString()) == NULL;
+ bool isSystemIndexesGoingToBeNew = _namespaceIndex.details(nsi.toString()) == NULL;
- if (isSystemNamespacesGoingToBeNew) {
- txn->recoveryUnit()->registerChange(new EntryInsertion(nsn.toString(), this));
- }
- if (isSystemIndexesGoingToBeNew) {
- txn->recoveryUnit()->registerChange(new EntryInsertion(nsi.toString(), this));
- }
+ _ensureSystemCollection(txn, nsn.toString());
+ _ensureSystemCollection(txn, nsi.toString());
- Entry*& indexEntry = _collections[nsi.toString()];
- Entry*& nsEntry = _collections[nsn.toString()];
+ if (isSystemNamespacesGoingToBeNew) {
+ txn->recoveryUnit()->registerChange(new EntryInsertion(nsn.toString(), this));
+ }
+ if (isSystemIndexesGoingToBeNew) {
+ txn->recoveryUnit()->registerChange(new EntryInsertion(nsi.toString(), this));
+ }
- NamespaceDetails* const indexDetails = _namespaceIndex.details(nsi.toString());
- NamespaceDetails* const nsDetails = _namespaceIndex.details(nsn.toString());
+ Entry*& indexEntry = _collections[nsi.toString()];
+ Entry*& nsEntry = _collections[nsn.toString()];
- // order has to be:
- // 1) ns rs
- // 2) i rs
- // 3) catalog entries
+ NamespaceDetails* const indexDetails = _namespaceIndex.details(nsi.toString());
+ NamespaceDetails* const nsDetails = _namespaceIndex.details(nsn.toString());
- if (!nsEntry) {
- nsEntry = new Entry();
+ // order has to be:
+ // 1) ns rs
+ // 2) i rs
+ // 3) catalog entries
- NamespaceDetailsRSV1MetaData* md = new NamespaceDetailsRSV1MetaData(nsn.toString(),
- nsDetails);
- nsEntry->recordStore.reset(new SimpleRecordStoreV1(txn,
- nsn.toString(),
- md,
- &_extentManager,
- false));
- }
+ if (!nsEntry) {
+ nsEntry = new Entry();
- if (!indexEntry) {
- indexEntry = new Entry();
+ NamespaceDetailsRSV1MetaData* md =
+ new NamespaceDetailsRSV1MetaData(nsn.toString(), nsDetails);
+ nsEntry->recordStore.reset(
+ new SimpleRecordStoreV1(txn, nsn.toString(), md, &_extentManager, false));
+ }
- NamespaceDetailsRSV1MetaData* md =
- new NamespaceDetailsRSV1MetaData(nsi.toString(), indexDetails);
+ if (!indexEntry) {
+ indexEntry = new Entry();
- indexEntry->recordStore.reset(new SimpleRecordStoreV1(txn,
- nsi.toString(),
- md,
- &_extentManager,
- true));
- }
+ NamespaceDetailsRSV1MetaData* md =
+ new NamespaceDetailsRSV1MetaData(nsi.toString(), indexDetails);
- if (isSystemIndexesGoingToBeNew) {
- _addNamespaceToNamespaceCollection(txn, nsi.toString(), NULL);
- }
+ indexEntry->recordStore.reset(
+ new SimpleRecordStoreV1(txn, nsi.toString(), md, &_extentManager, true));
+ }
- if (!nsEntry->catalogEntry) {
- nsEntry->catalogEntry.reset(
- new NamespaceDetailsCollectionCatalogEntry(nsn.toString(),
- nsDetails,
- nsEntry->recordStore.get(),
- indexEntry->recordStore.get(),
- this));
- }
+ if (isSystemIndexesGoingToBeNew) {
+ _addNamespaceToNamespaceCollection(txn, nsi.toString(), NULL);
+ }
- if (!indexEntry->catalogEntry) {
- indexEntry->catalogEntry.reset(
- new NamespaceDetailsCollectionCatalogEntry(nsi.toString(),
- indexDetails,
- nsEntry->recordStore.get(),
- indexEntry->recordStore.get(),
- this));
- }
+ if (!nsEntry->catalogEntry) {
+ nsEntry->catalogEntry.reset(
+ new NamespaceDetailsCollectionCatalogEntry(nsn.toString(),
+ nsDetails,
+ nsEntry->recordStore.get(),
+ indexEntry->recordStore.get(),
+ this));
+ }
- wunit.commit();
+ if (!indexEntry->catalogEntry) {
+ indexEntry->catalogEntry.reset(
+ new NamespaceDetailsCollectionCatalogEntry(nsi.toString(),
+ indexDetails,
+ nsEntry->recordStore.get(),
+ indexEntry->recordStore.get(),
+ this));
+ }
- // Now put everything in the cache of namespaces. None of the operations below do any
- // transactional operations.
- std::list<std::string> namespaces;
- _namespaceIndex.getCollectionNamespaces(&namespaces);
+ wunit.commit();
- for (std::list<std::string>::const_iterator i = namespaces.begin();
- i != namespaces.end(); // we add to the list in the loop so can't cache end().
- i++) {
+ // Now put everything in the cache of namespaces. None of the operations below do any
+ // transactional operations.
+ std::list<std::string> namespaces;
+ _namespaceIndex.getCollectionNamespaces(&namespaces);
- const std::string& ns = *i;
- Entry*& entry = _collections[ns];
+ for (std::list<std::string>::const_iterator i = namespaces.begin();
+ i != namespaces.end(); // we add to the list in the loop so can't cache end().
+ i++) {
+ const std::string& ns = *i;
+ Entry*& entry = _collections[ns];
- // The two cases where entry is not null is for system.indexes and system.namespaces,
- // which we manually instantiated above. It is OK to skip these two collections,
- // because they don't have indexes on them anyway.
- if (entry) {
- continue;
- }
+ // The two cases where entry is not null is for system.indexes and system.namespaces,
+ // which we manually instantiated above. It is OK to skip these two collections,
+ // because they don't have indexes on them anyway.
+ if (entry) {
+ continue;
+ }
- entry = new Entry();
- _insertInCache(txn, ns, entry);
+ entry = new Entry();
+ _insertInCache(txn, ns, entry);
- // Add the indexes on this namespace to the list of namespaces to load.
- std::vector<std::string> indexNames;
- entry->catalogEntry->getAllIndexes(txn, &indexNames);
+ // Add the indexes on this namespace to the list of namespaces to load.
+ std::vector<std::string> indexNames;
+ entry->catalogEntry->getAllIndexes(txn, &indexNames);
- for (size_t i = 0; i < indexNames.size(); i++) {
- namespaces.push_back(IndexDescriptor::makeIndexNamespace(ns, indexNames[i]));
- }
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ namespaces.push_back(IndexDescriptor::makeIndexNamespace(ns, indexNames[i]));
}
}
+}
- Status MMAPV1DatabaseCatalogEntry::createCollection( OperationContext* txn,
- StringData ns,
- const CollectionOptions& options,
- bool allocateDefaultSpace ) {
- if ( _namespaceIndex.details( ns ) ) {
- return Status( ErrorCodes::NamespaceExists,
- str::stream() << "namespace already exists: " << ns );
- }
+Status MMAPV1DatabaseCatalogEntry::createCollection(OperationContext* txn,
+ StringData ns,
+ const CollectionOptions& options,
+ bool allocateDefaultSpace) {
+ if (_namespaceIndex.details(ns)) {
+ return Status(ErrorCodes::NamespaceExists,
+ str::stream() << "namespace already exists: " << ns);
+ }
- BSONObj optionsAsBSON = options.toBSON();
- _addNamespaceToNamespaceCollection( txn, ns, &optionsAsBSON );
+ BSONObj optionsAsBSON = options.toBSON();
+ _addNamespaceToNamespaceCollection(txn, ns, &optionsAsBSON);
- _namespaceIndex.add_ns( txn, ns, DiskLoc(), options.capped );
- NamespaceDetails* details = _namespaceIndex.details(ns);
+ _namespaceIndex.add_ns(txn, ns, DiskLoc(), options.capped);
+ NamespaceDetails* details = _namespaceIndex.details(ns);
- // Set the flags.
- NamespaceDetailsRSV1MetaData(ns, details).replaceUserFlags(txn, options.flags);
+ // Set the flags.
+ NamespaceDetailsRSV1MetaData(ns, details).replaceUserFlags(txn, options.flags);
- if (options.capped && options.cappedMaxDocs > 0) {
- txn->recoveryUnit()->writingInt( details->maxDocsInCapped ) = options.cappedMaxDocs;
- }
-
- Entry*& entry = _collections[ns.toString()];
- invariant( !entry );
- txn->recoveryUnit()->registerChange(new EntryInsertion(ns, this));
- entry = new Entry();
- _insertInCache(txn, ns, entry);
+ if (options.capped && options.cappedMaxDocs > 0) {
+ txn->recoveryUnit()->writingInt(details->maxDocsInCapped) = options.cappedMaxDocs;
+ }
- if ( allocateDefaultSpace ) {
- RecordStoreV1Base* rs = _getRecordStore( ns );
- if ( options.initialNumExtents > 0 ) {
- int size = _massageExtentSize( &_extentManager, options.cappedSize );
- for ( int i = 0; i < options.initialNumExtents; i++ ) {
- rs->increaseStorageSize( txn, size, false );
- }
- }
- else if ( !options.initialExtentSizes.empty() ) {
- for ( size_t i = 0; i < options.initialExtentSizes.size(); i++ ) {
- int size = options.initialExtentSizes[i];
- size = _massageExtentSize( &_extentManager, size );
- rs->increaseStorageSize( txn, size, false );
- }
+ Entry*& entry = _collections[ns.toString()];
+ invariant(!entry);
+ txn->recoveryUnit()->registerChange(new EntryInsertion(ns, this));
+ entry = new Entry();
+ _insertInCache(txn, ns, entry);
+
+ if (allocateDefaultSpace) {
+ RecordStoreV1Base* rs = _getRecordStore(ns);
+ if (options.initialNumExtents > 0) {
+ int size = _massageExtentSize(&_extentManager, options.cappedSize);
+ for (int i = 0; i < options.initialNumExtents; i++) {
+ rs->increaseStorageSize(txn, size, false);
}
- else if ( options.capped ) {
- // normal
- do {
- // Must do this at least once, otherwise we leave the collection with no
- // extents, which is invalid.
- int sz = _massageExtentSize( &_extentManager,
- options.cappedSize - rs->storageSize(txn) );
- sz &= 0xffffff00;
- rs->increaseStorageSize( txn, sz, false );
- } while( rs->storageSize(txn) < options.cappedSize );
- }
- else {
- rs->increaseStorageSize( txn, _extentManager.initialSize( 128 ), false );
+ } else if (!options.initialExtentSizes.empty()) {
+ for (size_t i = 0; i < options.initialExtentSizes.size(); i++) {
+ int size = options.initialExtentSizes[i];
+ size = _massageExtentSize(&_extentManager, size);
+ rs->increaseStorageSize(txn, size, false);
}
+ } else if (options.capped) {
+ // normal
+ do {
+ // Must do this at least once, otherwise we leave the collection with no
+ // extents, which is invalid.
+ int sz =
+ _massageExtentSize(&_extentManager, options.cappedSize - rs->storageSize(txn));
+ sz &= 0xffffff00;
+ rs->increaseStorageSize(txn, sz, false);
+ } while (rs->storageSize(txn) < options.cappedSize);
+ } else {
+ rs->increaseStorageSize(txn, _extentManager.initialSize(128), false);
}
-
- return Status::OK();
}
- void MMAPV1DatabaseCatalogEntry::createNamespaceForIndex(OperationContext* txn,
- StringData name) {
- // This is a simplified form of createCollection.
- invariant(!_namespaceIndex.details(name));
-
- _addNamespaceToNamespaceCollection(txn, name, NULL);
- _namespaceIndex.add_ns(txn, name, DiskLoc(), false);
+ return Status::OK();
+}
- Entry*& entry = _collections[name.toString()];
- invariant( !entry );
- txn->recoveryUnit()->registerChange(new EntryInsertion(name, this));
- entry = new Entry();
- _insertInCache(txn, name, entry);
- }
+void MMAPV1DatabaseCatalogEntry::createNamespaceForIndex(OperationContext* txn, StringData name) {
+ // This is a simplified form of createCollection.
+ invariant(!_namespaceIndex.details(name));
- CollectionCatalogEntry* MMAPV1DatabaseCatalogEntry::getCollectionCatalogEntry(
- StringData ns ) const {
+ _addNamespaceToNamespaceCollection(txn, name, NULL);
+ _namespaceIndex.add_ns(txn, name, DiskLoc(), false);
- CollectionMap::const_iterator i = _collections.find( ns.toString() );
- if (i == _collections.end()) {
- return NULL;
- }
+ Entry*& entry = _collections[name.toString()];
+ invariant(!entry);
+ txn->recoveryUnit()->registerChange(new EntryInsertion(name, this));
+ entry = new Entry();
+ _insertInCache(txn, name, entry);
+}
- invariant( i->second->catalogEntry.get() );
- return i->second->catalogEntry.get();
+CollectionCatalogEntry* MMAPV1DatabaseCatalogEntry::getCollectionCatalogEntry(StringData ns) const {
+ CollectionMap::const_iterator i = _collections.find(ns.toString());
+ if (i == _collections.end()) {
+ return NULL;
}
- void MMAPV1DatabaseCatalogEntry::_insertInCache(OperationContext* txn,
- StringData ns,
- Entry* entry) {
+ invariant(i->second->catalogEntry.get());
+ return i->second->catalogEntry.get();
+}
- NamespaceDetails* details = _namespaceIndex.details(ns);
- invariant(details);
+void MMAPV1DatabaseCatalogEntry::_insertInCache(OperationContext* txn,
+ StringData ns,
+ Entry* entry) {
+ NamespaceDetails* details = _namespaceIndex.details(ns);
+ invariant(details);
- entry->catalogEntry.reset(
- new NamespaceDetailsCollectionCatalogEntry(ns,
- details,
- _getNamespaceRecordStore(),
- _getIndexRecordStore(),
- this));
+ entry->catalogEntry.reset(new NamespaceDetailsCollectionCatalogEntry(
+ ns, details, _getNamespaceRecordStore(), _getIndexRecordStore(), this));
- unique_ptr<NamespaceDetailsRSV1MetaData> md(new NamespaceDetailsRSV1MetaData(ns, details));
- const NamespaceString nss(ns);
+ unique_ptr<NamespaceDetailsRSV1MetaData> md(new NamespaceDetailsRSV1MetaData(ns, details));
+ const NamespaceString nss(ns);
- if (details->isCapped) {
- entry->recordStore.reset(new CappedRecordStoreV1(txn,
- NULL,
- ns,
- md.release(),
- &_extentManager,
- nss.coll() == "system.indexes"));
- }
- else {
- entry->recordStore.reset(new SimpleRecordStoreV1(txn,
- ns,
- md.release(),
- &_extentManager,
- nss.coll() == "system.indexes"));
- }
+ if (details->isCapped) {
+ entry->recordStore.reset(new CappedRecordStoreV1(
+ txn, NULL, ns, md.release(), &_extentManager, nss.coll() == "system.indexes"));
+ } else {
+ entry->recordStore.reset(new SimpleRecordStoreV1(
+ txn, ns, md.release(), &_extentManager, nss.coll() == "system.indexes"));
}
+}
- RecordStore* MMAPV1DatabaseCatalogEntry::getRecordStore( StringData ns ) const {
- return _getRecordStore( ns );
+RecordStore* MMAPV1DatabaseCatalogEntry::getRecordStore(StringData ns) const {
+ return _getRecordStore(ns);
+}
+
+RecordStoreV1Base* MMAPV1DatabaseCatalogEntry::_getRecordStore(StringData ns) const {
+ CollectionMap::const_iterator i = _collections.find(ns.toString());
+ if (i == _collections.end()) {
+ return NULL;
}
- RecordStoreV1Base* MMAPV1DatabaseCatalogEntry::_getRecordStore( StringData ns ) const {
- CollectionMap::const_iterator i = _collections.find( ns.toString() );
- if (i == _collections.end()) {
- return NULL;
- }
+ invariant(i->second->recordStore.get());
+ return i->second->recordStore.get();
+}
- invariant( i->second->recordStore.get() );
- return i->second->recordStore.get();
- }
+IndexAccessMethod* MMAPV1DatabaseCatalogEntry::getIndex(OperationContext* txn,
+ const CollectionCatalogEntry* collection,
+ IndexCatalogEntry* entry) {
+ const string& type = entry->descriptor()->getAccessMethodName();
- IndexAccessMethod* MMAPV1DatabaseCatalogEntry::getIndex( OperationContext* txn,
- const CollectionCatalogEntry* collection,
- IndexCatalogEntry* entry ) {
- const string& type = entry->descriptor()->getAccessMethodName();
+ string ns = collection->ns().ns();
- string ns = collection->ns().ns();
+ RecordStoreV1Base* rs = _getRecordStore(entry->descriptor()->indexNamespace());
+ invariant(rs);
- RecordStoreV1Base* rs = _getRecordStore(entry->descriptor()->indexNamespace());
- invariant(rs);
+ std::unique_ptr<SortedDataInterface> btree(
+ getMMAPV1Interface(entry->headManager(),
+ rs,
+ &rs->savedCursors,
+ entry->ordering(),
+ entry->descriptor()->indexNamespace(),
+ entry->descriptor()->version()));
- std::unique_ptr<SortedDataInterface> btree(
- getMMAPV1Interface(entry->headManager(),
- rs,
- &rs->savedCursors,
- entry->ordering(),
- entry->descriptor()->indexNamespace(),
- entry->descriptor()->version()));
+ if (IndexNames::HASHED == type)
+ return new HashAccessMethod(entry, btree.release());
- if (IndexNames::HASHED == type)
- return new HashAccessMethod( entry, btree.release() );
+ if (IndexNames::GEO_2DSPHERE == type)
+ return new S2AccessMethod(entry, btree.release());
- if (IndexNames::GEO_2DSPHERE == type)
- return new S2AccessMethod( entry, btree.release() );
+ if (IndexNames::TEXT == type)
+ return new FTSAccessMethod(entry, btree.release());
- if (IndexNames::TEXT == type)
- return new FTSAccessMethod( entry, btree.release() );
+ if (IndexNames::GEO_HAYSTACK == type)
+ return new HaystackAccessMethod(entry, btree.release());
- if (IndexNames::GEO_HAYSTACK == type)
- return new HaystackAccessMethod( entry, btree.release() );
+ if ("" == type)
+ return new BtreeAccessMethod(entry, btree.release());
- if ("" == type)
- return new BtreeAccessMethod( entry, btree.release() );
+ if (IndexNames::GEO_2D == type)
+ return new TwoDAccessMethod(entry, btree.release());
- if (IndexNames::GEO_2D == type)
- return new TwoDAccessMethod( entry, btree.release() );
+ log() << "Can't find index for keyPattern " << entry->descriptor()->keyPattern();
+ fassertFailed(17489);
+}
- log() << "Can't find index for keyPattern " << entry->descriptor()->keyPattern();
- fassertFailed(17489);
- }
+RecordStoreV1Base* MMAPV1DatabaseCatalogEntry::_getIndexRecordStore() {
+ const NamespaceString nss(name(), "system.indexes");
+ Entry* entry = _collections[nss.toString()];
+ invariant(entry);
- RecordStoreV1Base* MMAPV1DatabaseCatalogEntry::_getIndexRecordStore() {
- const NamespaceString nss(name(), "system.indexes");
- Entry* entry = _collections[nss.toString()];
- invariant( entry );
+ return entry->recordStore.get();
+}
- return entry->recordStore.get();
- }
+RecordStoreV1Base* MMAPV1DatabaseCatalogEntry::_getNamespaceRecordStore() const {
+ const NamespaceString nss(name(), "system.namespaces");
+ CollectionMap::const_iterator i = _collections.find(nss.toString());
+ invariant(i != _collections.end());
- RecordStoreV1Base* MMAPV1DatabaseCatalogEntry::_getNamespaceRecordStore() const {
- const NamespaceString nss( name(), "system.namespaces" );
- CollectionMap::const_iterator i = _collections.find( nss.toString() );
- invariant( i != _collections.end() );
+ return i->second->recordStore.get();
+}
- return i->second->recordStore.get();
+void MMAPV1DatabaseCatalogEntry::_addNamespaceToNamespaceCollection(OperationContext* txn,
+ StringData ns,
+ const BSONObj* options) {
+ if (nsToCollectionSubstring(ns) == "system.namespaces") {
+ // system.namespaces holds all the others, so it is not explicitly listed in the catalog.
+ return;
}
- void MMAPV1DatabaseCatalogEntry::_addNamespaceToNamespaceCollection(OperationContext* txn,
- StringData ns,
- const BSONObj* options) {
+ BSONObjBuilder b;
+ b.append("name", ns);
+ if (options && !options->isEmpty()) {
+ b.append("options", *options);
+ }
- if (nsToCollectionSubstring(ns) == "system.namespaces") {
- // system.namespaces holds all the others, so it is not explicitly listed in the catalog.
- return;
- }
+ const BSONObj obj = b.done();
- BSONObjBuilder b;
- b.append("name", ns);
- if (options && !options->isEmpty()) {
- b.append("options", *options);
- }
+ RecordStoreV1Base* rs = _getNamespaceRecordStore();
+ invariant(rs);
- const BSONObj obj = b.done();
+ StatusWith<RecordId> loc = rs->insertRecord(txn, obj.objdata(), obj.objsize(), false);
+ massertStatusOK(loc.getStatus());
+}
- RecordStoreV1Base* rs = _getNamespaceRecordStore();
- invariant( rs );
-
- StatusWith<RecordId> loc = rs->insertRecord( txn, obj.objdata(), obj.objsize(), false );
- massertStatusOK( loc.getStatus() );
+void MMAPV1DatabaseCatalogEntry::_removeNamespaceFromNamespaceCollection(OperationContext* txn,
+ StringData ns) {
+ if (nsToCollectionSubstring(ns) == "system.namespaces") {
+ // system.namespaces holds all the others, so it is not explicitly listed in the catalog.
+ return;
}
- void MMAPV1DatabaseCatalogEntry::_removeNamespaceFromNamespaceCollection(
- OperationContext* txn,
- StringData ns ) {
+ RecordStoreV1Base* rs = _getNamespaceRecordStore();
+ invariant(rs);
- if ( nsToCollectionSubstring( ns ) == "system.namespaces" ) {
- // system.namespaces holds all the others, so it is not explicitly listed in the catalog.
- return;
- }
-
- RecordStoreV1Base* rs = _getNamespaceRecordStore();
- invariant( rs );
-
- auto cursor = rs->getCursor(txn);
- while (auto record = cursor->next()) {
- BSONObj entry = record->data.releaseToBson();
- BSONElement name = entry["name"];
- if ( name.type() == String && name.String() == ns ) {
- rs->deleteRecord( txn, record->id );
- break;
- }
+ auto cursor = rs->getCursor(txn);
+ while (auto record = cursor->next()) {
+ BSONObj entry = record->data.releaseToBson();
+ BSONElement name = entry["name"];
+ if (name.type() == String && name.String() == ns) {
+ rs->deleteRecord(txn, record->id);
+ break;
}
}
+}
- CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions( OperationContext* txn,
- StringData ns ) const {
- if ( nsToCollectionSubstring( ns ) == "system.namespaces" ) {
- return CollectionOptions();
- }
+CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions(OperationContext* txn,
+ StringData ns) const {
+ if (nsToCollectionSubstring(ns) == "system.namespaces") {
+ return CollectionOptions();
+ }
- RecordStoreV1Base* rs = _getNamespaceRecordStore();
- invariant( rs );
-
- auto cursor = rs->getCursor(txn);
- while (auto record = cursor->next()) {
- BSONObj entry = record->data.releaseToBson();
- BSONElement name = entry["name"];
- if ( name.type() == String && name.String() == ns ) {
- CollectionOptions options;
- if ( entry["options"].isABSONObj() ) {
- Status status = options.parse( entry["options"].Obj() );
- fassert( 18523, status );
- }
- return options;
+ RecordStoreV1Base* rs = _getNamespaceRecordStore();
+ invariant(rs);
+
+ auto cursor = rs->getCursor(txn);
+ while (auto record = cursor->next()) {
+ BSONObj entry = record->data.releaseToBson();
+ BSONElement name = entry["name"];
+ if (name.type() == String && name.String() == ns) {
+ CollectionOptions options;
+ if (entry["options"].isABSONObj()) {
+ Status status = options.parse(entry["options"].Obj());
+ fassert(18523, status);
}
+ return options;
}
-
- return CollectionOptions();
}
-} // namespace mongo
+
+ return CollectionOptions();
+}
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
index 1db5e8a1f87..2a922d3d89c 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
@@ -39,145 +39,150 @@
namespace mongo {
- class CollectionCatalogEntry;
- struct CollectionOptions;
- class IndexAccessMethod;
- class IndexCatalogEntry;
- class IndexDescriptor;
- class RecordStore;
- class RecordStoreV1Base;
- class RecoveryUnit;
- class OperationContext;
-
- class MMAPV1DatabaseCatalogEntry : public DatabaseCatalogEntry {
- public:
- MMAPV1DatabaseCatalogEntry( OperationContext* txn,
- StringData name,
- StringData path,
- bool directoryperdb,
- bool transient );
-
- virtual ~MMAPV1DatabaseCatalogEntry();
-
- // these two seem the same and yet different
- // TODO(ERH): consolidate into one ideally
- virtual bool exists() const { return _namespaceIndex.pathExists(); }
- virtual bool isEmpty() const { return !_namespaceIndex.allocated(); }
- virtual bool hasUserData() const {
- // The two collections which exist and can't be removed are:
- // system.indexes
- // system.namespaces
- return _collections.size() > 2;
- }
-
- virtual int64_t sizeOnDisk( OperationContext* opCtx ) const;
-
- virtual bool isOlderThan24( OperationContext* opCtx ) const;
- virtual void markIndexSafe24AndUp( OperationContext* opCtx );
-
- virtual bool currentFilesCompatible( OperationContext* opCtx ) const;
+class CollectionCatalogEntry;
+struct CollectionOptions;
+class IndexAccessMethod;
+class IndexCatalogEntry;
+class IndexDescriptor;
+class RecordStore;
+class RecordStoreV1Base;
+class RecoveryUnit;
+class OperationContext;
+
+class MMAPV1DatabaseCatalogEntry : public DatabaseCatalogEntry {
+public:
+ MMAPV1DatabaseCatalogEntry(OperationContext* txn,
+ StringData name,
+ StringData path,
+ bool directoryperdb,
+ bool transient);
+
+ virtual ~MMAPV1DatabaseCatalogEntry();
+
+ // these two seem the same and yet different
+ // TODO(ERH): consolidate into one ideally
+ virtual bool exists() const {
+ return _namespaceIndex.pathExists();
+ }
+ virtual bool isEmpty() const {
+ return !_namespaceIndex.allocated();
+ }
+ virtual bool hasUserData() const {
+ // The two collections which exist and can't be removed are:
+ // system.indexes
+ // system.namespaces
+ return _collections.size() > 2;
+ }
+
+ virtual int64_t sizeOnDisk(OperationContext* opCtx) const;
+
+ virtual bool isOlderThan24(OperationContext* opCtx) const;
+ virtual void markIndexSafe24AndUp(OperationContext* opCtx);
+
+ virtual bool currentFilesCompatible(OperationContext* opCtx) const;
+
+ virtual void appendExtraStats(OperationContext* opCtx, BSONObjBuilder* out, double scale) const;
+
+ Status createCollection(OperationContext* txn,
+ StringData ns,
+ const CollectionOptions& options,
+ bool allocateDefaultSpace);
+
+ Status dropCollection(OperationContext* txn, StringData ns);
+
+ Status renameCollection(OperationContext* txn,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp);
+
+ void getCollectionNamespaces(std::list<std::string>* tofill) const;
+
+ /**
+ * will return NULL if ns does not exist
+ */
+ CollectionCatalogEntry* getCollectionCatalogEntry(StringData ns) const;
+
+ RecordStore* getRecordStore(StringData ns) const;
+
+ IndexAccessMethod* getIndex(OperationContext* txn,
+ const CollectionCatalogEntry* collection,
+ IndexCatalogEntry* index);
+
+ const MmapV1ExtentManager* getExtentManager() const {
+ return &_extentManager;
+ }
+ MmapV1ExtentManager* getExtentManager() {
+ return &_extentManager;
+ }
+
+ CollectionOptions getCollectionOptions(OperationContext* txn, StringData ns) const;
+
+ /**
+ * Creates a CollectionCatalogEntry in the form of an index rather than a collection.
+ * MMAPv1 puts both indexes and collections into CCEs. A namespace named 'name' must not
+ * exist.
+ */
+ void createNamespaceForIndex(OperationContext* txn, StringData name);
+
+private:
+ class EntryInsertion;
+ class EntryRemoval;
+
+ friend class NamespaceDetailsCollectionCatalogEntry;
+
+ // The _collections map is a cache for efficiently looking up namespace information. Access
+ // to the cache is protected by holding the appropriate DB lock. Regular operations
+ // (insert/update/delete/query) hold intent locks on the database and they access the cache
+ // directly. Metadata operations, such as create db/collection, etc acquire exclusive lock
+ // on the database, which protects against concurrent readers of the cache.
+ //
+ // Once initialized, the cache must remain consistent with the data in the memory-mapped
+ // database files through _removeFromCache and _insertInCache. These methods use the
+ // RecoveryUnit to ensure correct handling of rollback.
+
+ struct Entry {
+ std::unique_ptr<CollectionCatalogEntry> catalogEntry;
+ std::unique_ptr<RecordStoreV1Base> recordStore;
+ };
- virtual void appendExtraStats( OperationContext* opCtx,
- BSONObjBuilder* out,
- double scale ) const;
-
- Status createCollection( OperationContext* txn,
- StringData ns,
- const CollectionOptions& options,
- bool allocateDefaultSpace );
-
- Status dropCollection( OperationContext* txn, StringData ns );
-
- Status renameCollection( OperationContext* txn,
- StringData fromNS,
- StringData toNS,
- bool stayTemp );
-
- void getCollectionNamespaces( std::list<std::string>* tofill ) const;
-
- /**
- * will return NULL if ns does not exist
- */
- CollectionCatalogEntry* getCollectionCatalogEntry( StringData ns ) const;
-
- RecordStore* getRecordStore( StringData ns ) const;
-
- IndexAccessMethod* getIndex( OperationContext* txn,
- const CollectionCatalogEntry* collection,
- IndexCatalogEntry* index );
-
- const MmapV1ExtentManager* getExtentManager() const { return &_extentManager; }
- MmapV1ExtentManager* getExtentManager() { return &_extentManager; }
-
- CollectionOptions getCollectionOptions( OperationContext* txn,
- StringData ns ) const;
-
- /**
- * Creates a CollectionCatalogEntry in the form of an index rather than a collection.
- * MMAPv1 puts both indexes and collections into CCEs. A namespace named 'name' must not
- * exist.
- */
- void createNamespaceForIndex(OperationContext* txn, StringData name);
-
- private:
- class EntryInsertion;
- class EntryRemoval;
-
- friend class NamespaceDetailsCollectionCatalogEntry;
-
- // The _collections map is a cache for efficiently looking up namespace information. Access
- // to the cache is protected by holding the appropriate DB lock. Regular operations
- // (insert/update/delete/query) hold intent locks on the database and they access the cache
- // directly. Metadata operations, such as create db/collection, etc acquire exclusive lock
- // on the database, which protects against concurrent readers of the cache.
- //
- // Once initialized, the cache must remain consistent with the data in the memory-mapped
- // database files through _removeFromCache and _insertInCache. These methods use the
- // RecoveryUnit to ensure correct handling of rollback.
-
- struct Entry {
- std::unique_ptr<CollectionCatalogEntry> catalogEntry;
- std::unique_ptr<RecordStoreV1Base> recordStore;
- };
-
- typedef std::map<std::string, Entry*> CollectionMap;
+ typedef std::map<std::string, Entry*> CollectionMap;
- RecordStoreV1Base* _getIndexRecordStore();
- RecordStoreV1Base* _getNamespaceRecordStore() const;
- RecordStoreV1Base* _getRecordStore(StringData ns) const;
+ RecordStoreV1Base* _getIndexRecordStore();
+ RecordStoreV1Base* _getNamespaceRecordStore() const;
+ RecordStoreV1Base* _getRecordStore(StringData ns) const;
- void _addNamespaceToNamespaceCollection(OperationContext* txn,
- StringData ns,
- const BSONObj* options);
+ void _addNamespaceToNamespaceCollection(OperationContext* txn,
+ StringData ns,
+ const BSONObj* options);
- void _removeNamespaceFromNamespaceCollection(OperationContext* txn, StringData ns);
+ void _removeNamespaceFromNamespaceCollection(OperationContext* txn, StringData ns);
- Status _renameSingleNamespace( OperationContext* txn,
- StringData fromNS,
- StringData toNS,
- bool stayTemp );
+ Status _renameSingleNamespace(OperationContext* txn,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp);
- void _ensureSystemCollection(OperationContext* txn, StringData ns);
+ void _ensureSystemCollection(OperationContext* txn, StringData ns);
- void _init( OperationContext* txn );
+ void _init(OperationContext* txn);
- /**
- * Populate the _collections cache.
- */
- void _insertInCache(OperationContext* opCtx, StringData ns, Entry* entry);
+ /**
+ * Populate the _collections cache.
+ */
+ void _insertInCache(OperationContext* opCtx, StringData ns, Entry* entry);
- /**
- * Drop cached information for specified namespace. If a RecoveryUnit is specified,
- * use it to allow rollback. When ru is null, removal is unconditional.
- */
- void _removeFromCache(RecoveryUnit* ru, StringData ns);
+ /**
+ * Drop cached information for specified namespace. If a RecoveryUnit is specified,
+ * use it to allow rollback. When ru is null, removal is unconditional.
+ */
+ void _removeFromCache(RecoveryUnit* ru, StringData ns);
- const std::string _path;
+ const std::string _path;
- NamespaceIndex _namespaceIndex;
- MmapV1ExtentManager _extentManager;
- CollectionMap _collections;
- };
+ NamespaceIndex _namespaceIndex;
+ MmapV1ExtentManager _extentManager;
+ CollectionMap _collections;
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
index b4550f135db..b1fd028a1d5 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
@@ -53,304 +53,300 @@
namespace mongo {
- using std::endl;
- using std::ifstream;
- using std::string;
- using std::stringstream;
- using std::vector;
+using std::endl;
+using std::ifstream;
+using std::string;
+using std::stringstream;
+using std::vector;
namespace {
#if !defined(__sun)
- // if doingRepair is true don't consider unclean shutdown an error
- void acquirePathLock(MMAPV1Engine* storageEngine,
- bool doingRepair,
- const StorageEngineLockFile& lockFile) {
- string name = lockFile.getFilespec();
- bool oldFile = lockFile.createdByUncleanShutdown();
-
- if ( oldFile ) {
- // we check this here because we want to see if we can get the lock
- // if we can't, then its probably just another mongod running
-
- string errmsg;
- if (doingRepair && dur::haveJournalFiles()) {
- errmsg = "************** \n"
- "You specified --repair but there are dirty journal files. Please\n"
- "restart without --repair to allow the journal files to be replayed.\n"
- "If you wish to repair all databases, please shutdown cleanly and\n"
- "run with --repair again.\n"
- "**************";
- }
- else if (storageGlobalParams.dur) {
- if (!dur::haveJournalFiles(/*anyFiles=*/true)) {
- // Passing anyFiles=true as we are trying to protect against starting in an
- // unclean state with the journal directory unmounted. If there are any files,
- // even prealloc files, then it means that it is mounted so we can continue.
- // Previously there was an issue (SERVER-5056) where we would fail to start up
- // if killed during prealloc.
-
- vector<string> dbnames;
- storageEngine->listDatabases( &dbnames );
-
- if ( dbnames.size() == 0 ) {
- // this means that mongod crashed
- // between initial startup and when journaling was initialized
- // it is safe to continue
- }
- else {
- errmsg = str::stream()
- << "************** \n"
- << "old lock file: " << name << ". probably means unclean shutdown,\n"
- << "but there are no journal files to recover.\n"
- << "this is likely human error or filesystem corruption.\n"
- << "please make sure that your journal directory is mounted.\n"
- << "found " << dbnames.size() << " dbs.\n"
- << "see: http://dochub.mongodb.org/core/repair for more information\n"
- << "*************";
- }
-
- }
- }
- else {
- if (!dur::haveJournalFiles() && !doingRepair) {
+// if doingRepair is true don't consider unclean shutdown an error
+void acquirePathLock(MMAPV1Engine* storageEngine,
+ bool doingRepair,
+ const StorageEngineLockFile& lockFile) {
+ string name = lockFile.getFilespec();
+ bool oldFile = lockFile.createdByUncleanShutdown();
+
+ if (oldFile) {
+ // we check this here because we want to see if we can get the lock
+ // if we can't, then its probably just another mongod running
+
+ string errmsg;
+ if (doingRepair && dur::haveJournalFiles()) {
+ errmsg =
+ "************** \n"
+ "You specified --repair but there are dirty journal files. Please\n"
+ "restart without --repair to allow the journal files to be replayed.\n"
+ "If you wish to repair all databases, please shutdown cleanly and\n"
+ "run with --repair again.\n"
+ "**************";
+ } else if (storageGlobalParams.dur) {
+ if (!dur::haveJournalFiles(/*anyFiles=*/true)) {
+ // Passing anyFiles=true as we are trying to protect against starting in an
+ // unclean state with the journal directory unmounted. If there are any files,
+ // even prealloc files, then it means that it is mounted so we can continue.
+ // Previously there was an issue (SERVER-5056) where we would fail to start up
+ // if killed during prealloc.
+
+ vector<string> dbnames;
+ storageEngine->listDatabases(&dbnames);
+
+ if (dbnames.size() == 0) {
+ // this means that mongod crashed
+ // between initial startup and when journaling was initialized
+ // it is safe to continue
+ } else {
errmsg = str::stream()
- << "************** \n"
- << "Unclean shutdown detected.\n"
- << "Please visit http://dochub.mongodb.org/core/repair for recovery instructions.\n"
- << "*************";
+ << "************** \n"
+ << "old lock file: " << name << ". probably means unclean shutdown,\n"
+ << "but there are no journal files to recover.\n"
+ << "this is likely human error or filesystem corruption.\n"
+ << "please make sure that your journal directory is mounted.\n"
+ << "found " << dbnames.size() << " dbs.\n"
+ << "see: http://dochub.mongodb.org/core/repair for more information\n"
+ << "*************";
}
}
-
- if (!errmsg.empty()) {
- log() << errmsg << endl;
- uassert( 12596 , "old lock file" , 0 );
+ } else {
+ if (!dur::haveJournalFiles() && !doingRepair) {
+ errmsg = str::stream() << "************** \n"
+ << "Unclean shutdown detected.\n"
+ << "Please visit http://dochub.mongodb.org/core/repair for "
+ "recovery instructions.\n"
+ << "*************";
}
}
- // Not related to lock file, but this is where we handle unclean shutdown
- if (!storageGlobalParams.dur && dur::haveJournalFiles()) {
- log() << "**************" << endl;
- log() << "Error: journal files are present in journal directory, yet starting without journaling enabled." << endl;
- log() << "It is recommended that you start with journaling enabled so that recovery may occur." << endl;
- log() << "**************" << endl;
- uasserted(13597, "can't start without --journal enabled when journal/ files are present");
+ if (!errmsg.empty()) {
+ log() << errmsg << endl;
+ uassert(12596, "old lock file", 0);
}
}
+
+ // Not related to lock file, but this is where we handle unclean shutdown
+ if (!storageGlobalParams.dur && dur::haveJournalFiles()) {
+ log() << "**************" << endl;
+ log() << "Error: journal files are present in journal directory, yet starting without "
+ "journaling enabled." << endl;
+ log() << "It is recommended that you start with journaling enabled so that recovery may "
+ "occur." << endl;
+ log() << "**************" << endl;
+ uasserted(13597, "can't start without --journal enabled when journal/ files are present");
+ }
+}
#else
- void acquirePathLock(MMAPV1Engine* storageEngine,
- bool doingRepair,
- const StorageEngineLockFile& lockFile) {
- // TODO - this is very bad that the code above not running here.
-
- // Not related to lock file, but this is where we handle unclean shutdown
- if (!storageGlobalParams.dur && dur::haveJournalFiles()) {
- log() << "**************" << endl;
- log() << "Error: journal files are present in journal directory, yet starting without --journal enabled." << endl;
- log() << "It is recommended that you start with journaling enabled so that recovery may occur." << endl;
- log() << "Alternatively (not recommended), you can backup everything, then delete the journal files, and run --repair" << endl;
- log() << "**************" << endl;
- uasserted(13618, "can't start without --journal enabled when journal/ files are present");
- }
+void acquirePathLock(MMAPV1Engine* storageEngine,
+ bool doingRepair,
+ const StorageEngineLockFile& lockFile) {
+ // TODO - this is very bad that the code above not running here.
+
+ // Not related to lock file, but this is where we handle unclean shutdown
+ if (!storageGlobalParams.dur && dur::haveJournalFiles()) {
+ log() << "**************" << endl;
+ log() << "Error: journal files are present in journal directory, yet starting without "
+ "--journal enabled." << endl;
+ log() << "It is recommended that you start with journaling enabled so that recovery may "
+ "occur." << endl;
+ log() << "Alternatively (not recommended), you can backup everything, then delete the "
+ "journal files, and run --repair" << endl;
+ log() << "**************" << endl;
+ uasserted(13618, "can't start without --journal enabled when journal/ files are present");
}
+}
#endif // !defined(__sun)
- /// warn if readahead > 256KB (gridfs chunk size)
- void checkReadAhead(const string& dir) {
+/// warn if readahead > 256KB (gridfs chunk size)
+void checkReadAhead(const string& dir) {
#ifdef __linux__
- try {
- const dev_t dev = getPartition(dir);
-
- // This path handles the case where the filesystem uses the whole device (including LVM)
- string path = str::stream() <<
- "/sys/dev/block/" << major(dev) << ':' << minor(dev) << "/queue/read_ahead_kb";
-
- if (!boost::filesystem::exists(path)){
- // This path handles the case where the filesystem is on a partition.
- path = str::stream()
- << "/sys/dev/block/" << major(dev) << ':' << minor(dev) // this is a symlink
- << "/.." // parent directory of a partition is for the whole device
- << "/queue/read_ahead_kb";
- }
+ try {
+ const dev_t dev = getPartition(dir);
+
+ // This path handles the case where the filesystem uses the whole device (including LVM)
+ string path = str::stream() << "/sys/dev/block/" << major(dev) << ':' << minor(dev)
+ << "/queue/read_ahead_kb";
+
+ if (!boost::filesystem::exists(path)) {
+ // This path handles the case where the filesystem is on a partition.
+ path =
+ str::stream() << "/sys/dev/block/" << major(dev) << ':'
+ << minor(dev) // this is a symlink
+ << "/.." // parent directory of a partition is for the whole device
+ << "/queue/read_ahead_kb";
+ }
- if (boost::filesystem::exists(path)) {
- ifstream file (path.c_str());
- if (file.is_open()) {
- int kb;
- file >> kb;
- if (kb > 256) {
- log() << startupWarningsLog;
+ if (boost::filesystem::exists(path)) {
+ ifstream file(path.c_str());
+ if (file.is_open()) {
+ int kb;
+ file >> kb;
+ if (kb > 256) {
+ log() << startupWarningsLog;
- log() << "** WARNING: Readahead for " << dir << " is set to " << kb << "KB"
- << startupWarningsLog;
+ log() << "** WARNING: Readahead for " << dir << " is set to " << kb << "KB"
+ << startupWarningsLog;
- log() << "** We suggest setting it to 256KB (512 sectors) or less"
- << startupWarningsLog;
+ log() << "** We suggest setting it to 256KB (512 sectors) or less"
+ << startupWarningsLog;
- log() << "** http://dochub.mongodb.org/core/readahead"
- << startupWarningsLog;
- }
+ log() << "** http://dochub.mongodb.org/core/readahead"
+ << startupWarningsLog;
}
}
}
- catch (const std::exception& e) {
- log() << "unable to validate readahead settings due to error: " << e.what()
- << startupWarningsLog;
- log() << "for more information, see http://dochub.mongodb.org/core/readahead"
- << startupWarningsLog;
- }
-#endif // __linux__
+ } catch (const std::exception& e) {
+ log() << "unable to validate readahead settings due to error: " << e.what()
+ << startupWarningsLog;
+ log() << "for more information, see http://dochub.mongodb.org/core/readahead"
+ << startupWarningsLog;
}
+#endif // __linux__
+}
- // This is unrelated to the _tmp directory in dbpath.
- void clearTmpFiles() {
- boost::filesystem::path path(storageGlobalParams.dbpath);
- for ( boost::filesystem::directory_iterator i( path );
- i != boost::filesystem::directory_iterator(); ++i ) {
- string fileName = boost::filesystem::path(*i).leaf().string();
- if ( boost::filesystem::is_directory( *i ) &&
- fileName.length() && fileName[ 0 ] == '$' )
- boost::filesystem::remove_all( *i );
- }
+// This is unrelated to the _tmp directory in dbpath.
+void clearTmpFiles() {
+ boost::filesystem::path path(storageGlobalParams.dbpath);
+ for (boost::filesystem::directory_iterator i(path);
+ i != boost::filesystem::directory_iterator();
+ ++i) {
+ string fileName = boost::filesystem::path(*i).leaf().string();
+ if (boost::filesystem::is_directory(*i) && fileName.length() && fileName[0] == '$')
+ boost::filesystem::remove_all(*i);
}
-} // namespace
+}
+} // namespace
- MMAPV1Engine::MMAPV1Engine(const StorageEngineLockFile& lockFile) {
- // TODO check non-journal subdirs if using directory-per-db
- checkReadAhead(storageGlobalParams.dbpath);
+MMAPV1Engine::MMAPV1Engine(const StorageEngineLockFile& lockFile) {
+ // TODO check non-journal subdirs if using directory-per-db
+ checkReadAhead(storageGlobalParams.dbpath);
- acquirePathLock(this, storageGlobalParams.repair, lockFile);
+ acquirePathLock(this, storageGlobalParams.repair, lockFile);
- FileAllocator::get()->start();
+ FileAllocator::get()->start();
- MONGO_ASSERT_ON_EXCEPTION_WITH_MSG( clearTmpFiles(), "clear tmp files" );
- }
+ MONGO_ASSERT_ON_EXCEPTION_WITH_MSG(clearTmpFiles(), "clear tmp files");
+}
- void MMAPV1Engine::finishInit() {
- dataFileSync.go();
+void MMAPV1Engine::finishInit() {
+ dataFileSync.go();
- // Replays the journal (if needed) and starts the background thread. This requires the
- // ability to create OperationContexts.
- dur::startup();
- }
+ // Replays the journal (if needed) and starts the background thread. This requires the
+ // ability to create OperationContexts.
+ dur::startup();
+}
- MMAPV1Engine::~MMAPV1Engine() {
- for ( EntryMap::const_iterator it = _entryMap.begin(); it != _entryMap.end(); ++it ) {
- delete it->second;
- }
- _entryMap.clear();
+MMAPV1Engine::~MMAPV1Engine() {
+ for (EntryMap::const_iterator it = _entryMap.begin(); it != _entryMap.end(); ++it) {
+ delete it->second;
}
+ _entryMap.clear();
+}
- RecoveryUnit* MMAPV1Engine::newRecoveryUnit() {
- return new DurRecoveryUnit();
- }
+RecoveryUnit* MMAPV1Engine::newRecoveryUnit() {
+ return new DurRecoveryUnit();
+}
- void MMAPV1Engine::listDatabases( std::vector<std::string>* out ) const {
- _listDatabases( storageGlobalParams.dbpath, out );
- }
+void MMAPV1Engine::listDatabases(std::vector<std::string>* out) const {
+ _listDatabases(storageGlobalParams.dbpath, out);
+}
- DatabaseCatalogEntry* MMAPV1Engine::getDatabaseCatalogEntry( OperationContext* opCtx,
- StringData db ) {
- {
- stdx::lock_guard<stdx::mutex> lk(_entryMapMutex);
- EntryMap::const_iterator iter = _entryMap.find(db.toString());
- if (iter != _entryMap.end()) {
- return iter->second;
- }
+DatabaseCatalogEntry* MMAPV1Engine::getDatabaseCatalogEntry(OperationContext* opCtx,
+ StringData db) {
+ {
+ stdx::lock_guard<stdx::mutex> lk(_entryMapMutex);
+ EntryMap::const_iterator iter = _entryMap.find(db.toString());
+ if (iter != _entryMap.end()) {
+ return iter->second;
}
+ }
- // This is an on-demand database create/open. At this point, we are locked under X lock for
- // the database (MMAPV1DatabaseCatalogEntry's constructor checks that) so no two threads
- // can be creating the same database concurrenty. We need to create the database outside of
- // the _entryMapMutex so we do not deadlock (see SERVER-15880).
- MMAPV1DatabaseCatalogEntry* entry =
- new MMAPV1DatabaseCatalogEntry(opCtx,
- db,
- storageGlobalParams.dbpath,
- storageGlobalParams.directoryperdb,
- false);
+ // This is an on-demand database create/open. At this point, we are locked under X lock for
+ // the database (MMAPV1DatabaseCatalogEntry's constructor checks that) so no two threads
+ // can be creating the same database concurrenty. We need to create the database outside of
+ // the _entryMapMutex so we do not deadlock (see SERVER-15880).
+ MMAPV1DatabaseCatalogEntry* entry = new MMAPV1DatabaseCatalogEntry(
+ opCtx, db, storageGlobalParams.dbpath, storageGlobalParams.directoryperdb, false);
- stdx::lock_guard<stdx::mutex> lk(_entryMapMutex);
+ stdx::lock_guard<stdx::mutex> lk(_entryMapMutex);
- // Sanity check that we are not overwriting something
- invariant(_entryMap.insert(EntryMap::value_type(db.toString(), entry)).second);
+ // Sanity check that we are not overwriting something
+ invariant(_entryMap.insert(EntryMap::value_type(db.toString(), entry)).second);
- return entry;
- }
+ return entry;
+}
- Status MMAPV1Engine::closeDatabase( OperationContext* txn, StringData db ) {
- // Before the files are closed, flush any potentially outstanding changes, which might
- // reference this database. Otherwise we will assert when subsequent applications of the
- // global journal entries occur, which happen to have write intents for the removed files.
- getDur().syncDataAndTruncateJournal(txn);
-
- stdx::lock_guard<stdx::mutex> lk( _entryMapMutex );
- MMAPV1DatabaseCatalogEntry* entry = _entryMap[db.toString()];
- delete entry;
- _entryMap.erase( db.toString() );
- return Status::OK();
- }
+Status MMAPV1Engine::closeDatabase(OperationContext* txn, StringData db) {
+ // Before the files are closed, flush any potentially outstanding changes, which might
+ // reference this database. Otherwise we will assert when subsequent applications of the
+ // global journal entries occur, which happen to have write intents for the removed files.
+ getDur().syncDataAndTruncateJournal(txn);
+
+ stdx::lock_guard<stdx::mutex> lk(_entryMapMutex);
+ MMAPV1DatabaseCatalogEntry* entry = _entryMap[db.toString()];
+ delete entry;
+ _entryMap.erase(db.toString());
+ return Status::OK();
+}
- Status MMAPV1Engine::dropDatabase( OperationContext* txn, StringData db ) {
- Status status = closeDatabase( txn, db );
- if ( !status.isOK() )
- return status;
+Status MMAPV1Engine::dropDatabase(OperationContext* txn, StringData db) {
+ Status status = closeDatabase(txn, db);
+ if (!status.isOK())
+ return status;
- _deleteDataFiles( db.toString() );
+ _deleteDataFiles(db.toString());
- return Status::OK();
- }
+ return Status::OK();
+}
- void MMAPV1Engine::_listDatabases( const std::string& directory,
- std::vector<std::string>* out ) {
- boost::filesystem::path path( directory );
- for ( boost::filesystem::directory_iterator i( path );
- i != boost::filesystem::directory_iterator();
- ++i ) {
- if (storageGlobalParams.directoryperdb) {
- boost::filesystem::path p = *i;
- string dbName = p.leaf().string();
- p /= ( dbName + ".ns" );
- if ( exists( p ) )
- out->push_back( dbName );
- }
- else {
- string fileName = boost::filesystem::path(*i).leaf().string();
- if ( fileName.length() > 3 && fileName.substr( fileName.length() - 3, 3 ) == ".ns" )
- out->push_back( fileName.substr( 0, fileName.length() - 3 ) );
- }
+void MMAPV1Engine::_listDatabases(const std::string& directory, std::vector<std::string>* out) {
+ boost::filesystem::path path(directory);
+ for (boost::filesystem::directory_iterator i(path);
+ i != boost::filesystem::directory_iterator();
+ ++i) {
+ if (storageGlobalParams.directoryperdb) {
+ boost::filesystem::path p = *i;
+ string dbName = p.leaf().string();
+ p /= (dbName + ".ns");
+ if (exists(p))
+ out->push_back(dbName);
+ } else {
+ string fileName = boost::filesystem::path(*i).leaf().string();
+ if (fileName.length() > 3 && fileName.substr(fileName.length() - 3, 3) == ".ns")
+ out->push_back(fileName.substr(0, fileName.length() - 3));
}
}
+}
- int MMAPV1Engine::flushAllFiles( bool sync ) {
- return MongoFile::flushAll( sync );
- }
-
- bool MMAPV1Engine::isDurable() const {
- return getDur().isDurable();
- }
+int MMAPV1Engine::flushAllFiles(bool sync) {
+ return MongoFile::flushAll(sync);
+}
- RecordAccessTracker& MMAPV1Engine::getRecordAccessTracker() {
- return _recordAccessTracker;
- }
+bool MMAPV1Engine::isDurable() const {
+ return getDur().isDurable();
+}
- void MMAPV1Engine::cleanShutdown() {
- // wait until file preallocation finishes
- // we would only hang here if the file_allocator code generates a
- // synchronous signal, which we don't expect
- log() << "shutdown: waiting for fs preallocator..." << endl;
- FileAllocator::get()->waitUntilFinished();
+RecordAccessTracker& MMAPV1Engine::getRecordAccessTracker() {
+ return _recordAccessTracker;
+}
- if (storageGlobalParams.dur) {
- log() << "shutdown: final commit..." << endl;
+void MMAPV1Engine::cleanShutdown() {
+ // wait until file preallocation finishes
+ // we would only hang here if the file_allocator code generates a
+ // synchronous signal, which we don't expect
+ log() << "shutdown: waiting for fs preallocator..." << endl;
+ FileAllocator::get()->waitUntilFinished();
- getDur().commitAndStopDurThread();
- }
+ if (storageGlobalParams.dur) {
+ log() << "shutdown: final commit..." << endl;
- log() << "shutdown: closing all files..." << endl;
- stringstream ss3;
- MemoryMappedFile::closeAllFiles( ss3 );
- log() << ss3.str() << endl;
+ getDur().commitAndStopDurThread();
}
+
+ log() << "shutdown: closing all files..." << endl;
+ stringstream ss3;
+ MemoryMappedFile::closeAllFiles(ss3);
+ log() << ss3.str() << endl;
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
index 4141794c426..25c38500831 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
@@ -38,68 +38,70 @@
namespace mongo {
- class MMAPV1DatabaseCatalogEntry;
+class MMAPV1DatabaseCatalogEntry;
- class MMAPV1Engine : public StorageEngine {
- public:
- MMAPV1Engine(const StorageEngineLockFile& lockFile);
- virtual ~MMAPV1Engine();
+class MMAPV1Engine : public StorageEngine {
+public:
+ MMAPV1Engine(const StorageEngineLockFile& lockFile);
+ virtual ~MMAPV1Engine();
- void finishInit();
+ void finishInit();
- RecoveryUnit* newRecoveryUnit();
- void listDatabases( std::vector<std::string>* out ) const;
- int flushAllFiles( bool sync );
+ RecoveryUnit* newRecoveryUnit();
+ void listDatabases(std::vector<std::string>* out) const;
+ int flushAllFiles(bool sync);
- DatabaseCatalogEntry* getDatabaseCatalogEntry( OperationContext* opCtx,
- StringData db );
+ DatabaseCatalogEntry* getDatabaseCatalogEntry(OperationContext* opCtx, StringData db);
- virtual bool supportsDocLocking() const { return false; }
- virtual bool isMmapV1() const { return true; }
+ virtual bool supportsDocLocking() const {
+ return false;
+ }
+ virtual bool isMmapV1() const {
+ return true;
+ }
- virtual bool isDurable() const;
+ virtual bool isDurable() const;
- virtual Status closeDatabase(OperationContext* txn, StringData db);
+ virtual Status closeDatabase(OperationContext* txn, StringData db);
- virtual Status dropDatabase(OperationContext* txn, StringData db);
+ virtual Status dropDatabase(OperationContext* txn, StringData db);
- virtual void cleanShutdown();
+ virtual void cleanShutdown();
- // Callers should use repairDatabase instead.
- virtual Status repairRecordStore(OperationContext* txn, const std::string& ns) {
- return Status(ErrorCodes::InternalError, "MMAPv1 doesn't support repairRecordStore");
- }
+ // Callers should use repairDatabase instead.
+ virtual Status repairRecordStore(OperationContext* txn, const std::string& ns) {
+ return Status(ErrorCodes::InternalError, "MMAPv1 doesn't support repairRecordStore");
+ }
- // MMAPv1 specific (non-virtual)
- Status repairDatabase( OperationContext* txn,
- const std::string& dbName,
- bool preserveClonedFilesOnFailure,
- bool backupOriginalFiles );
+ // MMAPv1 specific (non-virtual)
+ Status repairDatabase(OperationContext* txn,
+ const std::string& dbName,
+ bool preserveClonedFilesOnFailure,
+ bool backupOriginalFiles);
- /**
- * Gets a reference to the abstraction used by MMAP v1 to track recently used memory
- * addresses.
- *
- * MMAPv1 specific (non-virtual). This is non-const because callers are allowed to use
- * the returned reference to modify the RecordAccessTracker.
- *
- * The RecordAccessTracker is thread-safe (it uses its own mutex internally).
- */
- RecordAccessTracker& getRecordAccessTracker();
+ /**
+ * Gets a reference to the abstraction used by MMAP v1 to track recently used memory
+ * addresses.
+ *
+ * MMAPv1 specific (non-virtual). This is non-const because callers are allowed to use
+ * the returned reference to modify the RecordAccessTracker.
+ *
+ * The RecordAccessTracker is thread-safe (it uses its own mutex internally).
+ */
+ RecordAccessTracker& getRecordAccessTracker();
- private:
- static void _listDatabases( const std::string& directory,
- std::vector<std::string>* out );
+private:
+ static void _listDatabases(const std::string& directory, std::vector<std::string>* out);
- stdx::mutex _entryMapMutex;
- typedef std::map<std::string,MMAPV1DatabaseCatalogEntry*> EntryMap;
- EntryMap _entryMap;
+ stdx::mutex _entryMapMutex;
+ typedef std::map<std::string, MMAPV1DatabaseCatalogEntry*> EntryMap;
+ EntryMap _entryMap;
- // A record access tracker is essentially a large table which tracks recently used
- // addresses. It is used when higher layers (e.g. the query system) need to ask
- // the storage engine whether data is likely in physical memory.
- RecordAccessTracker _recordAccessTracker;
- };
+ // A record access tracker is essentially a large table which tracks recently used
+ // addresses. It is used when higher layers (e.g. the query system) need to ask
+ // the storage engine whether data is likely in physical memory.
+ RecordAccessTracker _recordAccessTracker;
+};
- void _deleteDataFiles(const std::string& database);
+void _deleteDataFiles(const std::string& database);
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index ed4f160e1a9..69d80422e66 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -55,632 +55,612 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::max;
- using std::string;
- using std::stringstream;
-
- // Turn on this failpoint to force the system to yield for a fetch. Setting to "alwaysOn"
- // will cause yields for fetching to occur on every 'kNeedsFetchFailFreq'th call to
- // recordNeedsFetch().
- static const int kNeedsFetchFailFreq = 2;
- static Counter64 needsFetchFailCounter;
- MONGO_FP_DECLARE(recordNeedsFetchFail);
-
- // Used to make sure the compiler doesn't get too smart on us when we're
- // trying to touch records.
- volatile int __record_touch_dummy = 1;
-
- class MmapV1RecordFetcher : public RecordFetcher {
- MONGO_DISALLOW_COPYING(MmapV1RecordFetcher);
- public:
- explicit MmapV1RecordFetcher(const MmapV1RecordHeader* record)
- : _record(record) { }
-
- virtual void setup() {
- invariant(!_filesLock.get());
- _filesLock.reset(new LockMongoFilesShared());
- }
+using std::unique_ptr;
+using std::endl;
+using std::max;
+using std::string;
+using std::stringstream;
+
+// Turn on this failpoint to force the system to yield for a fetch. Setting to "alwaysOn"
+// will cause yields for fetching to occur on every 'kNeedsFetchFailFreq'th call to
+// recordNeedsFetch().
+static const int kNeedsFetchFailFreq = 2;
+static Counter64 needsFetchFailCounter;
+MONGO_FP_DECLARE(recordNeedsFetchFail);
+
+// Used to make sure the compiler doesn't get too smart on us when we're
+// trying to touch records.
+volatile int __record_touch_dummy = 1;
+
+class MmapV1RecordFetcher : public RecordFetcher {
+ MONGO_DISALLOW_COPYING(MmapV1RecordFetcher);
+
+public:
+ explicit MmapV1RecordFetcher(const MmapV1RecordHeader* record) : _record(record) {}
+
+ virtual void setup() {
+ invariant(!_filesLock.get());
+ _filesLock.reset(new LockMongoFilesShared());
+ }
+
+ virtual void fetch() {
+ // It's only legal to touch the record while we're holding a lock on the data files.
+ invariant(_filesLock.get());
+
+ const char* recordChar = reinterpret_cast<const char*>(_record);
+
+ // Here's where we actually deference a pointer into the record. This is where
+ // we expect a page fault to occur, so we should this out of the lock.
+ __record_touch_dummy += *recordChar;
+
+ // We're not going to touch the record anymore, so we can give up our
+ // lock on mongo files. We do this here because we have to release the
+ // lock on mongo files prior to reacquiring lock mgr locks.
+ _filesLock.reset();
+ }
+
+private:
+ // The record which needs to be touched in order to page fault. Not owned by us.
+ const MmapV1RecordHeader* _record;
+
+ // This ensures that our MmapV1RecordHeader* does not drop out from under our feet before
+ // we dereference it.
+ std::unique_ptr<LockMongoFilesShared> _filesLock;
+};
+
+MmapV1ExtentManager::MmapV1ExtentManager(StringData dbname, StringData path, bool directoryPerDB)
+ : _dbname(dbname.toString()),
+ _path(path.toString()),
+ _directoryPerDB(directoryPerDB),
+ _rid(RESOURCE_METADATA, dbname) {
+ StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
+ invariant(engine->isMmapV1());
+ MMAPV1Engine* mmapEngine = static_cast<MMAPV1Engine*>(engine);
+ _recordAccessTracker = &mmapEngine->getRecordAccessTracker();
+}
- virtual void fetch() {
- // It's only legal to touch the record while we're holding a lock on the data files.
- invariant(_filesLock.get());
+boost::filesystem::path MmapV1ExtentManager::_fileName(int n) const {
+ stringstream ss;
+ ss << _dbname << '.' << n;
+ boost::filesystem::path fullName(_path);
+ if (_directoryPerDB)
+ fullName /= _dbname;
+ fullName /= ss.str();
+ return fullName;
+}
- const char* recordChar = reinterpret_cast<const char*>(_record);
- // Here's where we actually deference a pointer into the record. This is where
- // we expect a page fault to occur, so we should this out of the lock.
- __record_touch_dummy += *recordChar;
+Status MmapV1ExtentManager::init(OperationContext* txn) {
+ invariant(_files.empty());
- // We're not going to touch the record anymore, so we can give up our
- // lock on mongo files. We do this here because we have to release the
- // lock on mongo files prior to reacquiring lock mgr locks.
- _filesLock.reset();
+ for (int n = 0; n < DiskLoc::MaxFiles; n++) {
+ const boost::filesystem::path fullName = _fileName(n);
+ if (!boost::filesystem::exists(fullName)) {
+ break;
}
- private:
- // The record which needs to be touched in order to page fault. Not owned by us.
- const MmapV1RecordHeader* _record;
-
- // This ensures that our MmapV1RecordHeader* does not drop out from under our feet before
- // we dereference it.
- std::unique_ptr<LockMongoFilesShared> _filesLock;
- };
-
- MmapV1ExtentManager::MmapV1ExtentManager(StringData dbname,
- StringData path,
- bool directoryPerDB)
- : _dbname(dbname.toString()),
- _path(path.toString()),
- _directoryPerDB(directoryPerDB),
- _rid(RESOURCE_METADATA, dbname) {
- StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
- invariant(engine->isMmapV1());
- MMAPV1Engine* mmapEngine = static_cast<MMAPV1Engine*>(engine);
- _recordAccessTracker = &mmapEngine->getRecordAccessTracker();
- }
+ const std::string fullNameString = fullName.string();
- boost::filesystem::path MmapV1ExtentManager::_fileName(int n) const {
- stringstream ss;
- ss << _dbname << '.' << n;
- boost::filesystem::path fullName( _path );
- if ( _directoryPerDB )
- fullName /= _dbname;
- fullName /= ss.str();
- return fullName;
- }
-
-
- Status MmapV1ExtentManager::init(OperationContext* txn) {
- invariant(_files.empty());
-
- for (int n = 0; n < DiskLoc::MaxFiles; n++) {
- const boost::filesystem::path fullName = _fileName(n);
- if (!boost::filesystem::exists(fullName)) {
+ {
+ // If the file is uninitialized we exit the loop because it is just prealloced. We
+ // do this on a bare File object rather than using the DataFile because closing a
+ // DataFile triggers dur::closingFileNotification() which is fatal if there are any
+ // pending writes. Therefore we must only open files that we know we want to keep.
+ File preview;
+ preview.open(fullNameString.c_str(), /*readOnly*/ true);
+ invariant(preview.is_open());
+
+ // File can't be initialized if too small.
+ if (preview.len() < sizeof(DataFileHeader)) {
break;
}
- const std::string fullNameString = fullName.string();
-
- {
- // If the file is uninitialized we exit the loop because it is just prealloced. We
- // do this on a bare File object rather than using the DataFile because closing a
- // DataFile triggers dur::closingFileNotification() which is fatal if there are any
- // pending writes. Therefore we must only open files that we know we want to keep.
- File preview;
- preview.open(fullNameString.c_str(), /*readOnly*/ true);
- invariant(preview.is_open());
-
- // File can't be initialized if too small.
- if (preview.len() < sizeof(DataFileHeader)) {
- break;
- }
-
- // This is the equivalent of DataFileHeader::uninitialized().
- int version;
- preview.read(0, reinterpret_cast<char*>(&version), sizeof(version));
- invariant(!preview.bad());
- if (version == 0) {
- break;
- }
- }
-
- unique_ptr<DataFile> df(new DataFile(n));
-
- Status s = df->openExisting(fullNameString.c_str());
- if (!s.isOK()) {
- return s;
+ // This is the equivalent of DataFileHeader::uninitialized().
+ int version;
+ preview.read(0, reinterpret_cast<char*>(&version), sizeof(version));
+ invariant(!preview.bad());
+ if (version == 0) {
+ break;
}
+ }
- invariant(!df->getHeader()->uninitialized());
-
- // We only checkUpgrade on files that we are keeping, not preallocs.
- df->getHeader()->checkUpgrade(txn);
+ unique_ptr<DataFile> df(new DataFile(n));
- _files.push_back( df.release() );
+ Status s = df->openExisting(fullNameString.c_str());
+ if (!s.isOK()) {
+ return s;
}
- // If this is a new database being created, instantiate the first file and one extent so
- // we can have a coherent database.
- if (_files.empty()) {
- WriteUnitOfWork wuow(txn);
- _createExtent(txn, initialSize(128), false);
- wuow.commit();
+ invariant(!df->getHeader()->uninitialized());
- // Commit the journal and all changes to disk so that even if exceptions occur during
- // subsequent initialization, we won't have uncommited changes during file close.
- getDur().commitNow(txn);
- }
+ // We only checkUpgrade on files that we are keeping, not preallocs.
+ df->getHeader()->checkUpgrade(txn);
- return Status::OK();
+ _files.push_back(df.release());
}
- const DataFile* MmapV1ExtentManager::_getOpenFile(int fileId) const {
- if (fileId < 0 || fileId >= _files.size()) {
- log() << "_getOpenFile() invalid file index requested " << fileId;
- invariant(false);
- }
+ // If this is a new database being created, instantiate the first file and one extent so
+ // we can have a coherent database.
+ if (_files.empty()) {
+ WriteUnitOfWork wuow(txn);
+ _createExtent(txn, initialSize(128), false);
+ wuow.commit();
- return _files[fileId];
+ // Commit the journal and all changes to disk so that even if exceptions occur during
+ // subsequent initialization, we won't have uncommited changes during file close.
+ getDur().commitNow(txn);
}
- DataFile* MmapV1ExtentManager::_getOpenFile(int fileId) {
- if (fileId < 0 || fileId >= _files.size()) {
- log() << "_getOpenFile() invalid file index requested " << fileId;
- invariant(false);
- }
+ return Status::OK();
+}
- return _files[fileId];
+const DataFile* MmapV1ExtentManager::_getOpenFile(int fileId) const {
+ if (fileId < 0 || fileId >= _files.size()) {
+ log() << "_getOpenFile() invalid file index requested " << fileId;
+ invariant(false);
}
- DataFile* MmapV1ExtentManager::_addAFile(OperationContext* txn,
- int sizeNeeded,
- bool preallocateNextFile) {
-
- // Database must be stable and we need to be in some sort of an update operation in order
- // to add a new file.
- invariant(txn->lockState()->isDbLockedForMode(_dbname, MODE_IX));
+ return _files[fileId];
+}
- const int allocFileId = _files.size();
+DataFile* MmapV1ExtentManager::_getOpenFile(int fileId) {
+ if (fileId < 0 || fileId >= _files.size()) {
+ log() << "_getOpenFile() invalid file index requested " << fileId;
+ invariant(false);
+ }
- int minSize = 0;
- if (allocFileId > 0) {
- // Make the next file at least as large as the previous
- minSize = _files[allocFileId - 1]->getHeader()->fileLength;
- }
+ return _files[fileId];
+}
- if (minSize < sizeNeeded + DataFileHeader::HeaderSize) {
- minSize = sizeNeeded + DataFileHeader::HeaderSize;
- }
+DataFile* MmapV1ExtentManager::_addAFile(OperationContext* txn,
+ int sizeNeeded,
+ bool preallocateNextFile) {
+ // Database must be stable and we need to be in some sort of an update operation in order
+ // to add a new file.
+ invariant(txn->lockState()->isDbLockedForMode(_dbname, MODE_IX));
- {
- unique_ptr<DataFile> allocFile(new DataFile(allocFileId));
- const string allocFileName = _fileName(allocFileId).string();
+ const int allocFileId = _files.size();
- Timer t;
+ int minSize = 0;
+ if (allocFileId > 0) {
+ // Make the next file at least as large as the previous
+ minSize = _files[allocFileId - 1]->getHeader()->fileLength;
+ }
- allocFile->open(txn, allocFileName.c_str(), minSize, false);
- if (t.seconds() > 1) {
- log() << "MmapV1ExtentManager took "
- << t.seconds()
- << " seconds to open: "
- << allocFileName;
- }
+ if (minSize < sizeNeeded + DataFileHeader::HeaderSize) {
+ minSize = sizeNeeded + DataFileHeader::HeaderSize;
+ }
- // It's all good
- _files.push_back(allocFile.release());
- }
+ {
+ unique_ptr<DataFile> allocFile(new DataFile(allocFileId));
+ const string allocFileName = _fileName(allocFileId).string();
- // Preallocate is asynchronous
- if (preallocateNextFile) {
- unique_ptr<DataFile> nextFile(new DataFile(allocFileId + 1));
- const string nextFileName = _fileName(allocFileId + 1).string();
+ Timer t;
- nextFile->open(txn, nextFileName.c_str(), minSize, false);
+ allocFile->open(txn, allocFileName.c_str(), minSize, false);
+ if (t.seconds() > 1) {
+ log() << "MmapV1ExtentManager took " << t.seconds()
+ << " seconds to open: " << allocFileName;
}
- // Returns the last file added
- return _files[allocFileId];
- }
-
- int MmapV1ExtentManager::numFiles() const {
- return _files.size();
+ // It's all good
+ _files.push_back(allocFile.release());
}
- long long MmapV1ExtentManager::fileSize() const {
- long long size = 0;
- for (int n = 0; boost::filesystem::exists(_fileName(n)); n++) {
- size += boost::filesystem::file_size(_fileName(n));
- }
+ // Preallocate is asynchronous
+ if (preallocateNextFile) {
+ unique_ptr<DataFile> nextFile(new DataFile(allocFileId + 1));
+ const string nextFileName = _fileName(allocFileId + 1).string();
- return size;
+ nextFile->open(txn, nextFileName.c_str(), minSize, false);
}
- MmapV1RecordHeader* MmapV1ExtentManager::_recordForV1( const DiskLoc& loc ) const {
- loc.assertOk();
- const DataFile* df = _getOpenFile( loc.a() );
+ // Returns the last file added
+ return _files[allocFileId];
+}
- int ofs = loc.getOfs();
- if ( ofs < DataFileHeader::HeaderSize ) {
- df->badOfs(ofs); // will msgassert - external call to keep out of the normal code path
- }
+int MmapV1ExtentManager::numFiles() const {
+ return _files.size();
+}
- return reinterpret_cast<MmapV1RecordHeader*>( df->p() + ofs );
+long long MmapV1ExtentManager::fileSize() const {
+ long long size = 0;
+ for (int n = 0; boost::filesystem::exists(_fileName(n)); n++) {
+ size += boost::filesystem::file_size(_fileName(n));
}
- MmapV1RecordHeader* MmapV1ExtentManager::recordForV1( const DiskLoc& loc ) const {
- MmapV1RecordHeader* record = _recordForV1( loc );
- _recordAccessTracker->markAccessed( record );
- return record;
- }
+ return size;
+}
- std::unique_ptr<RecordFetcher> MmapV1ExtentManager::recordNeedsFetch(const DiskLoc& loc) const {
- if (loc.isNull()) return {};
- MmapV1RecordHeader* record = _recordForV1( loc );
+MmapV1RecordHeader* MmapV1ExtentManager::_recordForV1(const DiskLoc& loc) const {
+ loc.assertOk();
+ const DataFile* df = _getOpenFile(loc.a());
- // For testing: if failpoint is enabled we randomly request fetches without
- // going to the RecordAccessTracker.
- if ( MONGO_FAIL_POINT( recordNeedsFetchFail ) ) {
- needsFetchFailCounter.increment();
- if ( ( needsFetchFailCounter.get() % kNeedsFetchFailFreq ) == 0 ) {
- return stdx::make_unique<MmapV1RecordFetcher>( record );
- }
- }
+ int ofs = loc.getOfs();
+ if (ofs < DataFileHeader::HeaderSize) {
+ df->badOfs(ofs); // will msgassert - external call to keep out of the normal code path
+ }
- if ( !_recordAccessTracker->checkAccessedAndMark( record ) ) {
- return stdx::make_unique<MmapV1RecordFetcher>( record );
- }
+ return reinterpret_cast<MmapV1RecordHeader*>(df->p() + ofs);
+}
+
+MmapV1RecordHeader* MmapV1ExtentManager::recordForV1(const DiskLoc& loc) const {
+ MmapV1RecordHeader* record = _recordForV1(loc);
+ _recordAccessTracker->markAccessed(record);
+ return record;
+}
+std::unique_ptr<RecordFetcher> MmapV1ExtentManager::recordNeedsFetch(const DiskLoc& loc) const {
+ if (loc.isNull())
return {};
+ MmapV1RecordHeader* record = _recordForV1(loc);
+
+ // For testing: if failpoint is enabled we randomly request fetches without
+ // going to the RecordAccessTracker.
+ if (MONGO_FAIL_POINT(recordNeedsFetchFail)) {
+ needsFetchFailCounter.increment();
+ if ((needsFetchFailCounter.get() % kNeedsFetchFailFreq) == 0) {
+ return stdx::make_unique<MmapV1RecordFetcher>(record);
+ }
}
- DiskLoc MmapV1ExtentManager::extentLocForV1( const DiskLoc& loc ) const {
- MmapV1RecordHeader* record = recordForV1( loc );
- return DiskLoc( loc.a(), record->extentOfs() );
+ if (!_recordAccessTracker->checkAccessedAndMark(record)) {
+ return stdx::make_unique<MmapV1RecordFetcher>(record);
}
- Extent* MmapV1ExtentManager::extentForV1( const DiskLoc& loc ) const {
- DiskLoc extentLoc = extentLocForV1( loc );
- return getExtent( extentLoc );
- }
+ return {};
+}
- Extent* MmapV1ExtentManager::getExtent( const DiskLoc& loc, bool doSanityCheck ) const {
- loc.assertOk();
- Extent* e = reinterpret_cast<Extent*>( _getOpenFile( loc.a() )->p() + loc.getOfs() );
- if ( doSanityCheck )
- e->assertOk();
+DiskLoc MmapV1ExtentManager::extentLocForV1(const DiskLoc& loc) const {
+ MmapV1RecordHeader* record = recordForV1(loc);
+ return DiskLoc(loc.a(), record->extentOfs());
+}
- _recordAccessTracker->markAccessed( e );
+Extent* MmapV1ExtentManager::extentForV1(const DiskLoc& loc) const {
+ DiskLoc extentLoc = extentLocForV1(loc);
+ return getExtent(extentLoc);
+}
- return e;
- }
+Extent* MmapV1ExtentManager::getExtent(const DiskLoc& loc, bool doSanityCheck) const {
+ loc.assertOk();
+ Extent* e = reinterpret_cast<Extent*>(_getOpenFile(loc.a())->p() + loc.getOfs());
+ if (doSanityCheck)
+ e->assertOk();
- void _checkQuota( bool enforceQuota, int fileNo ) {
- if ( !enforceQuota )
- return;
+ _recordAccessTracker->markAccessed(e);
- if ( fileNo < mmapv1GlobalOptions.quotaFiles )
- return;
+ return e;
+}
- uasserted(12501, "quota exceeded");
- }
+void _checkQuota(bool enforceQuota, int fileNo) {
+ if (!enforceQuota)
+ return;
- int MmapV1ExtentManager::maxSize() const {
- return DataFile::maxSize() - DataFileHeader::HeaderSize - 16;
- }
+ if (fileNo < mmapv1GlobalOptions.quotaFiles)
+ return;
- DiskLoc MmapV1ExtentManager::_createExtentInFile( OperationContext* txn,
- int fileNo,
- DataFile* f,
- int size,
- bool enforceQuota ) {
+ uasserted(12501, "quota exceeded");
+}
- _checkQuota( enforceQuota, fileNo - 1 );
+int MmapV1ExtentManager::maxSize() const {
+ return DataFile::maxSize() - DataFileHeader::HeaderSize - 16;
+}
- massert( 10358, "bad new extent size", size >= minSize() && size <= maxSize() );
+DiskLoc MmapV1ExtentManager::_createExtentInFile(
+ OperationContext* txn, int fileNo, DataFile* f, int size, bool enforceQuota) {
+ _checkQuota(enforceQuota, fileNo - 1);
- DiskLoc loc = f->allocExtentArea( txn, size );
- loc.assertOk();
+ massert(10358, "bad new extent size", size >= minSize() && size <= maxSize());
- Extent *e = getExtent( loc, false );
- verify( e );
+ DiskLoc loc = f->allocExtentArea(txn, size);
+ loc.assertOk();
- *txn->recoveryUnit()->writing(&e->magic) = Extent::extentSignature;
- *txn->recoveryUnit()->writing(&e->myLoc) = loc;
- *txn->recoveryUnit()->writing(&e->length) = size;
+ Extent* e = getExtent(loc, false);
+ verify(e);
- return loc;
- }
+ *txn->recoveryUnit()->writing(&e->magic) = Extent::extentSignature;
+ *txn->recoveryUnit()->writing(&e->myLoc) = loc;
+ *txn->recoveryUnit()->writing(&e->length) = size;
+ return loc;
+}
- DiskLoc MmapV1ExtentManager::_createExtent( OperationContext* txn,
- int size,
- bool enforceQuota ) {
- size = quantizeExtentSize( size );
- if ( size > maxSize() )
- size = maxSize();
+DiskLoc MmapV1ExtentManager::_createExtent(OperationContext* txn, int size, bool enforceQuota) {
+ size = quantizeExtentSize(size);
- verify( size < DataFile::maxSize() );
+ if (size > maxSize())
+ size = maxSize();
- for ( int i = numFiles() - 1; i >= 0; i-- ) {
- DataFile* f = _getOpenFile(i);
- invariant(f);
+ verify(size < DataFile::maxSize());
- if ( f->getHeader()->unusedLength >= size ) {
- return _createExtentInFile( txn, i, f, size, enforceQuota );
- }
- }
+ for (int i = numFiles() - 1; i >= 0; i--) {
+ DataFile* f = _getOpenFile(i);
+ invariant(f);
- _checkQuota( enforceQuota, numFiles() );
+ if (f->getHeader()->unusedLength >= size) {
+ return _createExtentInFile(txn, i, f, size, enforceQuota);
+ }
+ }
- // no space in an existing file
- // allocate files until we either get one big enough or hit maxSize
- for ( int i = 0; i < 8; i++ ) {
- DataFile* f = _addAFile( txn, size, false );
+ _checkQuota(enforceQuota, numFiles());
- if ( f->getHeader()->unusedLength >= size ) {
- return _createExtentInFile( txn, numFiles() - 1, f, size, enforceQuota );
- }
+ // no space in an existing file
+ // allocate files until we either get one big enough or hit maxSize
+ for (int i = 0; i < 8; i++) {
+ DataFile* f = _addAFile(txn, size, false);
+ if (f->getHeader()->unusedLength >= size) {
+ return _createExtentInFile(txn, numFiles() - 1, f, size, enforceQuota);
}
-
- // callers don't check for null return code, so assert
- msgasserted(14810, "couldn't allocate space for a new extent" );
}
- DiskLoc MmapV1ExtentManager::_allocFromFreeList( OperationContext* txn,
- int approxSize,
- bool capped ) {
- // setup extent constraints
-
- int low, high;
- if ( capped ) {
- // be strict about the size
- low = approxSize;
- if ( low > 2048 ) low -= 256;
- high = (int) (approxSize * 1.05) + 256;
- }
- else {
- low = (int) (approxSize * 0.8);
- high = (int) (approxSize * 1.4);
- }
- if ( high <= 0 ) {
- // overflowed
- high = max(approxSize, maxSize());
- }
- if ( high <= minSize() ) {
- // the minimum extent size is 4097
- high = minSize() + 1;
- }
-
- // scan free list looking for something suitable
+ // callers don't check for null return code, so assert
+ msgasserted(14810, "couldn't allocate space for a new extent");
+}
- int n = 0;
- Extent *best = 0;
- int bestDiff = 0x7fffffff;
- {
- Timer t;
- DiskLoc L = _getFreeListStart();
- while( !L.isNull() ) {
- Extent* e = getExtent( L );
- if ( e->length >= low && e->length <= high ) {
- int diff = abs(e->length - approxSize);
- if ( diff < bestDiff ) {
- bestDiff = diff;
- best = e;
- if ( ((double) diff) / approxSize < 0.1 ) {
- // close enough
- break;
- }
- if ( t.seconds() >= 2 ) {
- // have spent lots of time in write lock, and we are in [low,high], so close enough
- // could come into play if extent freelist is very long
- break;
- }
+DiskLoc MmapV1ExtentManager::_allocFromFreeList(OperationContext* txn,
+ int approxSize,
+ bool capped) {
+ // setup extent constraints
+
+ int low, high;
+ if (capped) {
+ // be strict about the size
+ low = approxSize;
+ if (low > 2048)
+ low -= 256;
+ high = (int)(approxSize * 1.05) + 256;
+ } else {
+ low = (int)(approxSize * 0.8);
+ high = (int)(approxSize * 1.4);
+ }
+ if (high <= 0) {
+ // overflowed
+ high = max(approxSize, maxSize());
+ }
+ if (high <= minSize()) {
+ // the minimum extent size is 4097
+ high = minSize() + 1;
+ }
+
+ // scan free list looking for something suitable
+
+ int n = 0;
+ Extent* best = 0;
+ int bestDiff = 0x7fffffff;
+ {
+ Timer t;
+ DiskLoc L = _getFreeListStart();
+ while (!L.isNull()) {
+ Extent* e = getExtent(L);
+ if (e->length >= low && e->length <= high) {
+ int diff = abs(e->length - approxSize);
+ if (diff < bestDiff) {
+ bestDiff = diff;
+ best = e;
+ if (((double)diff) / approxSize < 0.1) {
+ // close enough
+ break;
}
- else {
- OCCASIONALLY {
- if ( high < 64 * 1024 && t.seconds() >= 2 ) {
- // be less picky if it is taking a long time
- high = 64 * 1024;
- }
+ if (t.seconds() >= 2) {
+ // have spent lots of time in write lock, and we are in [low,high], so close enough
+ // could come into play if extent freelist is very long
+ break;
+ }
+ } else {
+ OCCASIONALLY {
+ if (high < 64 * 1024 && t.seconds() >= 2) {
+ // be less picky if it is taking a long time
+ high = 64 * 1024;
}
}
}
- L = e->xnext;
- ++n;
- }
- if ( t.seconds() >= 10 ) {
- log() << "warning: slow scan in allocFromFreeList (in write lock)" << endl;
}
+ L = e->xnext;
+ ++n;
}
+ if (t.seconds() >= 10) {
+ log() << "warning: slow scan in allocFromFreeList (in write lock)" << endl;
+ }
+ }
- if ( n > 128 ) { LOG( n < 512 ? 1 : 0 ) << "warning: newExtent " << n << " scanned\n"; }
-
- if ( !best )
- return DiskLoc();
-
- // remove from the free list
- if ( !best->xprev.isNull() )
- *txn->recoveryUnit()->writing(&getExtent( best->xprev )->xnext) = best->xnext;
- if ( !best->xnext.isNull() )
- *txn->recoveryUnit()->writing(&getExtent( best->xnext )->xprev) = best->xprev;
- if ( _getFreeListStart() == best->myLoc )
- _setFreeListStart( txn, best->xnext );
- if ( _getFreeListEnd() == best->myLoc )
- _setFreeListEnd( txn, best->xprev );
-
- return best->myLoc;
+ if (n > 128) {
+ LOG(n < 512 ? 1 : 0) << "warning: newExtent " << n << " scanned\n";
}
- DiskLoc MmapV1ExtentManager::allocateExtent(OperationContext* txn,
- bool capped,
- int size,
- bool enforceQuota) {
- Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_X);
- bool fromFreeList = true;
- DiskLoc eloc = _allocFromFreeList( txn, size, capped );
- if ( eloc.isNull() ) {
- fromFreeList = false;
- eloc = _createExtent( txn, size, enforceQuota );
- }
+ if (!best)
+ return DiskLoc();
- invariant( !eloc.isNull() );
- invariant( eloc.isValid() );
+ // remove from the free list
+ if (!best->xprev.isNull())
+ *txn->recoveryUnit()->writing(&getExtent(best->xprev)->xnext) = best->xnext;
+ if (!best->xnext.isNull())
+ *txn->recoveryUnit()->writing(&getExtent(best->xnext)->xprev) = best->xprev;
+ if (_getFreeListStart() == best->myLoc)
+ _setFreeListStart(txn, best->xnext);
+ if (_getFreeListEnd() == best->myLoc)
+ _setFreeListEnd(txn, best->xprev);
- LOG(1) << "MmapV1ExtentManager::allocateExtent"
- << " desiredSize:" << size
- << " fromFreeList: " << fromFreeList
- << " eloc: " << eloc;
+ return best->myLoc;
+}
- return eloc;
+DiskLoc MmapV1ExtentManager::allocateExtent(OperationContext* txn,
+ bool capped,
+ int size,
+ bool enforceQuota) {
+ Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_X);
+ bool fromFreeList = true;
+ DiskLoc eloc = _allocFromFreeList(txn, size, capped);
+ if (eloc.isNull()) {
+ fromFreeList = false;
+ eloc = _createExtent(txn, size, enforceQuota);
}
- void MmapV1ExtentManager::freeExtent(OperationContext* txn, DiskLoc firstExt ) {
- Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_X);
- Extent* e = getExtent( firstExt );
- txn->recoveryUnit()->writing( &e->xnext )->Null();
- txn->recoveryUnit()->writing( &e->xprev )->Null();
- txn->recoveryUnit()->writing( &e->firstRecord )->Null();
- txn->recoveryUnit()->writing( &e->lastRecord )->Null();
-
-
- if( _getFreeListStart().isNull() ) {
- _setFreeListStart( txn, firstExt );
- _setFreeListEnd( txn, firstExt );
- }
- else {
- DiskLoc a = _getFreeListStart();
- invariant( getExtent( a )->xprev.isNull() );
- *txn->recoveryUnit()->writing( &getExtent( a )->xprev ) = firstExt;
- *txn->recoveryUnit()->writing( &getExtent( firstExt )->xnext ) = a;
- _setFreeListStart( txn, firstExt );
- }
+ invariant(!eloc.isNull());
+ invariant(eloc.isValid());
- }
+ LOG(1) << "MmapV1ExtentManager::allocateExtent"
+ << " desiredSize:" << size << " fromFreeList: " << fromFreeList << " eloc: " << eloc;
- void MmapV1ExtentManager::freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt) {
- Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_X);
+ return eloc;
+}
- if ( firstExt.isNull() && lastExt.isNull() )
- return;
+void MmapV1ExtentManager::freeExtent(OperationContext* txn, DiskLoc firstExt) {
+ Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_X);
+ Extent* e = getExtent(firstExt);
+ txn->recoveryUnit()->writing(&e->xnext)->Null();
+ txn->recoveryUnit()->writing(&e->xprev)->Null();
+ txn->recoveryUnit()->writing(&e->firstRecord)->Null();
+ txn->recoveryUnit()->writing(&e->lastRecord)->Null();
- {
- verify( !firstExt.isNull() && !lastExt.isNull() );
- Extent *f = getExtent( firstExt );
- Extent *l = getExtent( lastExt );
- verify( f->xprev.isNull() );
- verify( l->xnext.isNull() );
- verify( f==l || !f->xnext.isNull() );
- verify( f==l || !l->xprev.isNull() );
- }
- if( _getFreeListStart().isNull() ) {
- _setFreeListStart( txn, firstExt );
- _setFreeListEnd( txn, lastExt );
- }
- else {
- DiskLoc a = _getFreeListStart();
- invariant( getExtent( a )->xprev.isNull() );
- *txn->recoveryUnit()->writing( &getExtent( a )->xprev ) = lastExt;
- *txn->recoveryUnit()->writing( &getExtent( lastExt )->xnext ) = a;
- _setFreeListStart( txn, firstExt );
- }
+ if (_getFreeListStart().isNull()) {
+ _setFreeListStart(txn, firstExt);
+ _setFreeListEnd(txn, firstExt);
+ } else {
+ DiskLoc a = _getFreeListStart();
+ invariant(getExtent(a)->xprev.isNull());
+ *txn->recoveryUnit()->writing(&getExtent(a)->xprev) = firstExt;
+ *txn->recoveryUnit()->writing(&getExtent(firstExt)->xnext) = a;
+ _setFreeListStart(txn, firstExt);
}
+}
- DiskLoc MmapV1ExtentManager::_getFreeListStart() const {
- if ( _files.empty() )
- return DiskLoc();
- const DataFile* file = _getOpenFile(0);
- return file->header()->freeListStart;
- }
+void MmapV1ExtentManager::freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt) {
+ Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_X);
- DiskLoc MmapV1ExtentManager::_getFreeListEnd() const {
- if ( _files.empty() )
- return DiskLoc();
- const DataFile* file = _getOpenFile(0);
- return file->header()->freeListEnd;
- }
+ if (firstExt.isNull() && lastExt.isNull())
+ return;
- void MmapV1ExtentManager::_setFreeListStart( OperationContext* txn, DiskLoc loc ) {
- invariant( !_files.empty() );
- DataFile* file = _files[0];
- *txn->recoveryUnit()->writing( &file->header()->freeListStart ) = loc;
+ {
+ verify(!firstExt.isNull() && !lastExt.isNull());
+ Extent* f = getExtent(firstExt);
+ Extent* l = getExtent(lastExt);
+ verify(f->xprev.isNull());
+ verify(l->xnext.isNull());
+ verify(f == l || !f->xnext.isNull());
+ verify(f == l || !l->xprev.isNull());
}
- void MmapV1ExtentManager::_setFreeListEnd( OperationContext* txn, DiskLoc loc ) {
- invariant( !_files.empty() );
- DataFile* file = _files[0];
- *txn->recoveryUnit()->writing( &file->header()->freeListEnd ) = loc;
+ if (_getFreeListStart().isNull()) {
+ _setFreeListStart(txn, firstExt);
+ _setFreeListEnd(txn, lastExt);
+ } else {
+ DiskLoc a = _getFreeListStart();
+ invariant(getExtent(a)->xprev.isNull());
+ *txn->recoveryUnit()->writing(&getExtent(a)->xprev) = lastExt;
+ *txn->recoveryUnit()->writing(&getExtent(lastExt)->xnext) = a;
+ _setFreeListStart(txn, firstExt);
}
+}
- void MmapV1ExtentManager::freeListStats(OperationContext* txn,
- int* numExtents,
- int64_t* totalFreeSizeBytes) const {
- Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_S);
+DiskLoc MmapV1ExtentManager::_getFreeListStart() const {
+ if (_files.empty())
+ return DiskLoc();
+ const DataFile* file = _getOpenFile(0);
+ return file->header()->freeListStart;
+}
- invariant(numExtents);
- invariant(totalFreeSizeBytes);
+DiskLoc MmapV1ExtentManager::_getFreeListEnd() const {
+ if (_files.empty())
+ return DiskLoc();
+ const DataFile* file = _getOpenFile(0);
+ return file->header()->freeListEnd;
+}
- *numExtents = 0;
- *totalFreeSizeBytes = 0;
+void MmapV1ExtentManager::_setFreeListStart(OperationContext* txn, DiskLoc loc) {
+ invariant(!_files.empty());
+ DataFile* file = _files[0];
+ *txn->recoveryUnit()->writing(&file->header()->freeListStart) = loc;
+}
- DiskLoc a = _getFreeListStart();
- while( !a.isNull() ) {
- Extent *e = getExtent( a );
- (*numExtents)++;
- (*totalFreeSizeBytes) += e->length;
- a = e->xnext;
- }
+void MmapV1ExtentManager::_setFreeListEnd(OperationContext* txn, DiskLoc loc) {
+ invariant(!_files.empty());
+ DataFile* file = _files[0];
+ *txn->recoveryUnit()->writing(&file->header()->freeListEnd) = loc;
+}
- }
+void MmapV1ExtentManager::freeListStats(OperationContext* txn,
+ int* numExtents,
+ int64_t* totalFreeSizeBytes) const {
+ Lock::ResourceLock rlk(txn->lockState(), _rid, MODE_S);
- void MmapV1ExtentManager::printFreeList() const {
- log() << "dump freelist " << _dbname << endl;
+ invariant(numExtents);
+ invariant(totalFreeSizeBytes);
- DiskLoc a = _getFreeListStart();
- while( !a.isNull() ) {
- Extent *e = getExtent( a );
- log() << " extent " << a.toString()
- << " len:" << e->length
- << " prev:" << e->xprev.toString() << endl;
- a = e->xnext;
- }
+ *numExtents = 0;
+ *totalFreeSizeBytes = 0;
- log() << "end freelist" << endl;
+ DiskLoc a = _getFreeListStart();
+ while (!a.isNull()) {
+ Extent* e = getExtent(a);
+ (*numExtents)++;
+ (*totalFreeSizeBytes) += e->length;
+ a = e->xnext;
}
+}
- namespace {
- class CacheHintMadvise : public ExtentManager::CacheHint {
- public:
- CacheHintMadvise(void *p, unsigned len, MAdvise::Advice a)
- : _advice( p, len, a ) {
- }
- private:
- MAdvise _advice;
- };
- }
+void MmapV1ExtentManager::printFreeList() const {
+ log() << "dump freelist " << _dbname << endl;
- ExtentManager::CacheHint* MmapV1ExtentManager::cacheHint( const DiskLoc& extentLoc,
- const ExtentManager::HintType& hint ) {
- invariant ( hint == Sequential );
- Extent* e = getExtent( extentLoc );
- return new CacheHintMadvise( reinterpret_cast<void*>( e ),
- e->length,
- MAdvise::Sequential );
+ DiskLoc a = _getFreeListStart();
+ while (!a.isNull()) {
+ Extent* e = getExtent(a);
+ log() << " extent " << a.toString() << " len:" << e->length
+ << " prev:" << e->xprev.toString() << endl;
+ a = e->xnext;
}
- MmapV1ExtentManager::FilesArray::~FilesArray() {
- for (int i = 0; i < size(); i++) {
- delete _files[i];
- }
- }
+ log() << "end freelist" << endl;
+}
- void MmapV1ExtentManager::FilesArray::push_back(DataFile* val) {
- stdx::lock_guard<stdx::mutex> lk(_writersMutex);
- const int n = _size.load();
- invariant(n < DiskLoc::MaxFiles);
- // Note ordering: _size update must come after updating the _files array
- _files[n] = val;
- _size.store(n + 1);
- }
+namespace {
+class CacheHintMadvise : public ExtentManager::CacheHint {
+public:
+ CacheHintMadvise(void* p, unsigned len, MAdvise::Advice a) : _advice(p, len, a) {}
+
+private:
+ MAdvise _advice;
+};
+}
- DataFileVersion MmapV1ExtentManager::getFileFormat(OperationContext* txn) const {
- if ( numFiles() == 0 )
- return DataFileVersion(0, 0);
+ExtentManager::CacheHint* MmapV1ExtentManager::cacheHint(const DiskLoc& extentLoc,
+ const ExtentManager::HintType& hint) {
+ invariant(hint == Sequential);
+ Extent* e = getExtent(extentLoc);
+ return new CacheHintMadvise(reinterpret_cast<void*>(e), e->length, MAdvise::Sequential);
+}
- // We explicitly only look at the first file.
- return _getOpenFile(0)->getHeader()->version;
+MmapV1ExtentManager::FilesArray::~FilesArray() {
+ for (int i = 0; i < size(); i++) {
+ delete _files[i];
}
+}
- void MmapV1ExtentManager::setFileFormat(OperationContext* txn, DataFileVersion newVersion) {
- invariant(numFiles() > 0);
+void MmapV1ExtentManager::FilesArray::push_back(DataFile* val) {
+ stdx::lock_guard<stdx::mutex> lk(_writersMutex);
+ const int n = _size.load();
+ invariant(n < DiskLoc::MaxFiles);
+ // Note ordering: _size update must come after updating the _files array
+ _files[n] = val;
+ _size.store(n + 1);
+}
- DataFile* df = _getOpenFile(0);
- invariant(df);
+DataFileVersion MmapV1ExtentManager::getFileFormat(OperationContext* txn) const {
+ if (numFiles() == 0)
+ return DataFileVersion(0, 0);
- *txn->recoveryUnit()->writing(&df->getHeader()->version) = newVersion;
- }
+ // We explicitly only look at the first file.
+ return _getOpenFile(0)->getHeader()->version;
+}
+
+void MmapV1ExtentManager::setFileFormat(OperationContext* txn, DataFileVersion newVersion) {
+ invariant(numFiles() > 0);
+
+ DataFile* df = _getOpenFile(0);
+ invariant(df);
+
+ *txn->recoveryUnit()->writing(&df->getHeader()->version) = newVersion;
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
index 8253d0f87a3..1f7a0963aa1 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
@@ -45,204 +45,198 @@
namespace mongo {
- class DataFile;
- class DataFileVersion;
- class MmapV1RecordHeader;
- class OperationContext;
+class DataFile;
+class DataFileVersion;
+class MmapV1RecordHeader;
+class OperationContext;
- struct Extent;
+struct Extent;
+/**
+ * ExtentManager basics
+ * - one per database
+ * - responsible for managing <db>.# files
+ * - NOT responsible for .ns file
+ * - gives out extents
+ * - responsible for figuring out how to get a new extent
+ * - can use any method it wants to do so
+ * - this structure is NOT stored on disk
+ * - this class is thread safe, except as indicated below
+ *
+ * Implementation:
+ * - ExtentManager holds a preallocated list of DataFile
+ * - files will not be removed from the EM, so _files access can be lock-free
+ * - extent size and loc are immutable
+ * - Any non-const public operations on an ExtentManager will acquire an MODE_X lock on its
+ * RESOURCE_MMAPv1_EXTENT_MANAGER resource from the lock-manager, which will extend life
+ * to during WriteUnitOfWorks that might need rollback. Private methods will only
+ * be called from public ones.
+ */
+class MmapV1ExtentManager : public ExtentManager {
+ MONGO_DISALLOW_COPYING(MmapV1ExtentManager);
+
+public:
/**
- * ExtentManager basics
- * - one per database
- * - responsible for managing <db>.# files
- * - NOT responsible for .ns file
- * - gives out extents
- * - responsible for figuring out how to get a new extent
- * - can use any method it wants to do so
- * - this structure is NOT stored on disk
- * - this class is thread safe, except as indicated below
- *
- * Implementation:
- * - ExtentManager holds a preallocated list of DataFile
- * - files will not be removed from the EM, so _files access can be lock-free
- * - extent size and loc are immutable
- * - Any non-const public operations on an ExtentManager will acquire an MODE_X lock on its
- * RESOURCE_MMAPv1_EXTENT_MANAGER resource from the lock-manager, which will extend life
- * to during WriteUnitOfWorks that might need rollback. Private methods will only
- * be called from public ones.
+ * @param freeListDetails this is a reference into the .ns file
+ * while a bit odd, this is not a layer violation as extents
+ * are a peer to the .ns file, without any layering
*/
- class MmapV1ExtentManager : public ExtentManager {
- MONGO_DISALLOW_COPYING( MmapV1ExtentManager );
- public:
- /**
- * @param freeListDetails this is a reference into the .ns file
- * while a bit odd, this is not a layer violation as extents
- * are a peer to the .ns file, without any layering
- */
- MmapV1ExtentManager(StringData dbname, StringData path,
- bool directoryPerDB);
+ MmapV1ExtentManager(StringData dbname, StringData path, bool directoryPerDB);
- /**
- * opens all current files, not thread safe
- */
- Status init(OperationContext* txn);
+ /**
+ * opens all current files, not thread safe
+ */
+ Status init(OperationContext* txn);
- int numFiles() const;
- long long fileSize() const;
+ int numFiles() const;
+ long long fileSize() const;
- // must call Extent::reuse on the returned extent
- DiskLoc allocateExtent( OperationContext* txn,
- bool capped,
- int size,
- bool enforceQuota );
+ // must call Extent::reuse on the returned extent
+ DiskLoc allocateExtent(OperationContext* txn, bool capped, int size, bool enforceQuota);
- /**
- * firstExt has to be == lastExt or a chain
- */
- void freeExtents( OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt );
+ /**
+ * firstExt has to be == lastExt or a chain
+ */
+ void freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt);
- /**
- * frees a single extent
- * ignores all fields in the Extent except: magic, myLoc, length
- */
- void freeExtent( OperationContext* txn, DiskLoc extent );
+ /**
+ * frees a single extent
+ * ignores all fields in the Extent except: magic, myLoc, length
+ */
+ void freeExtent(OperationContext* txn, DiskLoc extent);
- // For debug only: not thread safe
- void printFreeList() const;
+ // For debug only: not thread safe
+ void printFreeList() const;
- void freeListStats(OperationContext* txn,
- int* numExtents,
- int64_t* totalFreeSizeBytes) const;
+ void freeListStats(OperationContext* txn, int* numExtents, int64_t* totalFreeSizeBytes) const;
- /**
- * @param loc - has to be for a specific MmapV1RecordHeader
- * Note(erh): this sadly cannot be removed.
- * A MmapV1RecordHeader DiskLoc has an offset from a file, while a RecordStore really wants an offset
- * from an extent. This intrinsically links an original record store to the original extent
- * manager.
- */
- MmapV1RecordHeader* recordForV1( const DiskLoc& loc ) const;
+ /**
+ * @param loc - has to be for a specific MmapV1RecordHeader
+ * Note(erh): this sadly cannot be removed.
+ * A MmapV1RecordHeader DiskLoc has an offset from a file, while a RecordStore really wants an offset
+ * from an extent. This intrinsically links an original record store to the original extent
+ * manager.
+ */
+ MmapV1RecordHeader* recordForV1(const DiskLoc& loc) const;
- std::unique_ptr<RecordFetcher> recordNeedsFetch( const DiskLoc& loc ) const final;
+ std::unique_ptr<RecordFetcher> recordNeedsFetch(const DiskLoc& loc) const final;
- /**
- * @param loc - has to be for a specific MmapV1RecordHeader (not an Extent)
- * Note(erh) see comment on recordFor
- */
- Extent* extentForV1( const DiskLoc& loc ) const;
+ /**
+ * @param loc - has to be for a specific MmapV1RecordHeader (not an Extent)
+ * Note(erh) see comment on recordFor
+ */
+ Extent* extentForV1(const DiskLoc& loc) const;
- /**
- * @param loc - has to be for a specific MmapV1RecordHeader (not an Extent)
- * Note(erh) see comment on recordFor
- */
- DiskLoc extentLocForV1( const DiskLoc& loc ) const;
+ /**
+ * @param loc - has to be for a specific MmapV1RecordHeader (not an Extent)
+ * Note(erh) see comment on recordFor
+ */
+ DiskLoc extentLocForV1(const DiskLoc& loc) const;
- /**
- * @param loc - has to be for a specific Extent
- */
- Extent* getExtent( const DiskLoc& loc, bool doSanityCheck = true ) const;
+ /**
+ * @param loc - has to be for a specific Extent
+ */
+ Extent* getExtent(const DiskLoc& loc, bool doSanityCheck = true) const;
- /**
- * Not thread safe, requires a database exclusive lock
- */
- DataFileVersion getFileFormat(OperationContext* txn) const;
- void setFileFormat(OperationContext* txn, DataFileVersion newVersion);
+ /**
+ * Not thread safe, requires a database exclusive lock
+ */
+ DataFileVersion getFileFormat(OperationContext* txn) const;
+ void setFileFormat(OperationContext* txn, DataFileVersion newVersion);
- const DataFile* getOpenFile( int n ) const { return _getOpenFile( n ); }
+ const DataFile* getOpenFile(int n) const {
+ return _getOpenFile(n);
+ }
- virtual int maxSize() const;
+ virtual int maxSize() const;
- virtual CacheHint* cacheHint( const DiskLoc& extentLoc, const HintType& hint );
+ virtual CacheHint* cacheHint(const DiskLoc& extentLoc, const HintType& hint);
- private:
- /**
- * will return NULL if nothing suitable in free list
- */
- DiskLoc _allocFromFreeList( OperationContext* txn, int approxSize, bool capped );
+private:
+ /**
+ * will return NULL if nothing suitable in free list
+ */
+ DiskLoc _allocFromFreeList(OperationContext* txn, int approxSize, bool capped);
- /* allocate a new Extent, does not check free list
- */
- DiskLoc _createExtent( OperationContext* txn, int approxSize, bool enforceQuota );
+ /* allocate a new Extent, does not check free list
+ */
+ DiskLoc _createExtent(OperationContext* txn, int approxSize, bool enforceQuota);
- DataFile* _addAFile( OperationContext* txn, int sizeNeeded, bool preallocateNextFile );
+ DataFile* _addAFile(OperationContext* txn, int sizeNeeded, bool preallocateNextFile);
- /**
- * Shared record retrieval logic used by the public recordForV1() and likelyInPhysicalMem()
- * above.
- */
- MmapV1RecordHeader* _recordForV1( const DiskLoc& loc ) const;
+ /**
+ * Shared record retrieval logic used by the public recordForV1() and likelyInPhysicalMem()
+ * above.
+ */
+ MmapV1RecordHeader* _recordForV1(const DiskLoc& loc) const;
- DiskLoc _getFreeListStart() const;
- DiskLoc _getFreeListEnd() const;
- void _setFreeListStart( OperationContext* txn, DiskLoc loc );
- void _setFreeListEnd( OperationContext* txn, DiskLoc loc );
+ DiskLoc _getFreeListStart() const;
+ DiskLoc _getFreeListEnd() const;
+ void _setFreeListStart(OperationContext* txn, DiskLoc loc);
+ void _setFreeListEnd(OperationContext* txn, DiskLoc loc);
- const DataFile* _getOpenFile(int fileId) const;
- DataFile* _getOpenFile(int fileId);
+ const DataFile* _getOpenFile(int fileId) const;
+ DataFile* _getOpenFile(int fileId);
- DiskLoc _createExtentInFile( OperationContext* txn,
- int fileNo,
- DataFile* f,
- int size,
- bool enforceQuota );
+ DiskLoc _createExtentInFile(
+ OperationContext* txn, int fileNo, DataFile* f, int size, bool enforceQuota);
- boost::filesystem::path _fileName(int n) const;
+ boost::filesystem::path _fileName(int n) const;
-// -----
+ // -----
- const std::string _dbname; // i.e. "test"
- const std::string _path; // i.e. "/data/db"
- const bool _directoryPerDB;
- const ResourceId _rid;
+ const std::string _dbname; // i.e. "test"
+ const std::string _path; // i.e. "/data/db"
+ const bool _directoryPerDB;
+ const ResourceId _rid;
- // This reference points into the MMAPv1 engine and is only valid as long as the
- // engine is valid. Not owned here.
- RecordAccessTracker* _recordAccessTracker;
+ // This reference points into the MMAPv1 engine and is only valid as long as the
+ // engine is valid. Not owned here.
+ RecordAccessTracker* _recordAccessTracker;
+
+ /**
+ * Simple wrapper around an array object to allow append-only modification of the array,
+ * as well as concurrent read-accesses. This class has a minimal interface to keep
+ * implementation simple and easy to modify.
+ */
+ class FilesArray {
+ public:
+ FilesArray() : _size(0) {}
+ ~FilesArray();
+
+ /**
+ * Returns file at location 'n' in the array, with 'n' less than number of files added.
+ * Will always return the same pointer for a given file.
+ */
+ DataFile* operator[](int n) const {
+ invariant(n >= 0 && n < size());
+ return _files[n];
+ }
/**
- * Simple wrapper around an array object to allow append-only modification of the array,
- * as well as concurrent read-accesses. This class has a minimal interface to keep
- * implementation simple and easy to modify.
+ * Returns true iff no files were added
*/
- class FilesArray {
- public:
- FilesArray() : _size(0) { }
- ~FilesArray();
-
- /**
- * Returns file at location 'n' in the array, with 'n' less than number of files added.
- * Will always return the same pointer for a given file.
- */
- DataFile* operator[](int n) const {
- invariant(n >= 0 && n < size());
- return _files[n];
- }
-
- /**
- * Returns true iff no files were added
- */
- bool empty() const {
- return size() == 0;
- }
-
- /**
- * Returns number of files added to the array
- */
- int size() const {
- return _size.load();
- }
-
- // Appends val to the array, taking ownership of its pointer
- void push_back(DataFile* val);
-
- private:
- stdx::mutex _writersMutex;
- AtomicInt32 _size; // number of files in the array
- DataFile* _files[DiskLoc::MaxFiles];
- };
-
- FilesArray _files;
+ bool empty() const {
+ return size() == 0;
+ }
+
+ /**
+ * Returns number of files added to the array
+ */
+ int size() const {
+ return _size.load();
+ }
+
+ // Appends val to the array, taking ownership of its pointer
+ void push_back(DataFile* val);
+
+ private:
+ stdx::mutex _writersMutex;
+ AtomicInt32 _size; // number of files in the array
+ DataFile* _files[DiskLoc::MaxFiles];
};
+
+ FilesArray _files;
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_init.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_init.cpp
index 29fb1bc8c97..920a6d89182 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_init.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_init.cpp
@@ -38,46 +38,44 @@
namespace mongo {
- namespace {
+namespace {
- class MMAPV1Factory : public StorageEngine::Factory {
- public:
- virtual ~MMAPV1Factory() { }
- virtual StorageEngine* create(const StorageGlobalParams& params,
- const StorageEngineLockFile& lockFile) const {
- return new MMAPV1Engine(lockFile);
- }
-
- virtual StringData getCanonicalName() const {
- return "mmapv1";
- }
+class MMAPV1Factory : public StorageEngine::Factory {
+public:
+ virtual ~MMAPV1Factory() {}
+ virtual StorageEngine* create(const StorageGlobalParams& params,
+ const StorageEngineLockFile& lockFile) const {
+ return new MMAPV1Engine(lockFile);
+ }
- virtual Status validateMetadata(const StorageEngineMetadata& metadata,
- const StorageGlobalParams& params) const {
- Status status = metadata.validateStorageEngineOption(
- "directoryPerDB", params.directoryperdb);
- if (!status.isOK()) {
- return status;
- }
+ virtual StringData getCanonicalName() const {
+ return "mmapv1";
+ }
- return Status::OK();
- }
+ virtual Status validateMetadata(const StorageEngineMetadata& metadata,
+ const StorageGlobalParams& params) const {
+ Status status =
+ metadata.validateStorageEngineOption("directoryPerDB", params.directoryperdb);
+ if (!status.isOK()) {
+ return status;
+ }
- virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const {
- BSONObjBuilder builder;
- builder.appendBool("directoryPerDB", params.directoryperdb);
- return builder.obj();
- }
- };
+ return Status::OK();
+ }
- } // namespace
+ virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const {
+ BSONObjBuilder builder;
+ builder.appendBool("directoryPerDB", params.directoryperdb);
+ return builder.obj();
+ }
+};
- MONGO_INITIALIZER_WITH_PREREQUISITES(MMAPV1EngineInit,
- ("SetGlobalEnvironment"))
- (InitializerContext* context) {
+} // namespace
- getGlobalServiceContext()->registerStorageEngine("mmapv1", new MMAPV1Factory());
- return Status::OK();
- }
+MONGO_INITIALIZER_WITH_PREREQUISITES(MMAPV1EngineInit, ("SetGlobalEnvironment"))
+(InitializerContext* context) {
+ getGlobalServiceContext()->registerStorageEngine("mmapv1", new MMAPV1Factory());
+ return Status::OK();
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp
index 62ecdde5aa1..d5323f1b398 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp
@@ -38,93 +38,89 @@
namespace {
- using namespace mongo;
-
- class MMAPV1FactoryTest : public mongo::unittest::Test {
- private:
- virtual void setUp() {
- ServiceContext* globalEnv = getGlobalServiceContext();
- ASSERT_TRUE(globalEnv);
- ASSERT_TRUE(getGlobalServiceContext()->isRegisteredStorageEngine("mmapv1"));
- std::unique_ptr<StorageFactoriesIterator> sfi(getGlobalServiceContext()->
- makeStorageFactoriesIterator());
- ASSERT_TRUE(sfi);
- bool found = false;
- while (sfi->more()) {
- const StorageEngine::Factory* currentFactory = sfi->next();
- if (currentFactory->getCanonicalName() == "mmapv1") {
- found = true;
- factory = currentFactory;
- break;
- }
+using namespace mongo;
+
+class MMAPV1FactoryTest : public mongo::unittest::Test {
+private:
+ virtual void setUp() {
+ ServiceContext* globalEnv = getGlobalServiceContext();
+ ASSERT_TRUE(globalEnv);
+ ASSERT_TRUE(getGlobalServiceContext()->isRegisteredStorageEngine("mmapv1"));
+ std::unique_ptr<StorageFactoriesIterator> sfi(
+ getGlobalServiceContext()->makeStorageFactoriesIterator());
+ ASSERT_TRUE(sfi);
+ bool found = false;
+ while (sfi->more()) {
+ const StorageEngine::Factory* currentFactory = sfi->next();
+ if (currentFactory->getCanonicalName() == "mmapv1") {
+ found = true;
+ factory = currentFactory;
+ break;
}
- ASSERT_TRUE(found);
- }
-
- virtual void tearDown() {
- factory = NULL;
- }
-
- protected:
- const StorageEngine::Factory* factory;
- };
-
- void _testValidateMetadata(const StorageEngine::Factory* factory,
- const BSONObj& metadataOptions,
- bool directoryPerDB,
- ErrorCodes::Error expectedCode) {
- // It is fine to specify an invalid data directory for the metadata
- // as long as we do not invoke read() or write().
- StorageEngineMetadata metadata("no_such_directory");
- metadata.setStorageEngineOptions(metadataOptions);
-
- StorageGlobalParams storageOptions;
- storageOptions.directoryperdb = directoryPerDB;
-
- Status status = factory->validateMetadata(metadata, storageOptions);
- if (expectedCode != status.code()) {
- FAIL(str::stream()
- << "Unexpected StorageEngine::Factory::validateMetadata result. Expected: "
- << ErrorCodes::errorString(expectedCode) << " but got "
- << status.toString()
- << " instead. metadataOptions: " << metadataOptions
- << "; directoryPerDB: " << directoryPerDB);
}
+ ASSERT_TRUE(found);
}
- // Do not validate fields that are not present in metadata.
- TEST_F(MMAPV1FactoryTest, ValidateMetadataEmptyOptions) {
- _testValidateMetadata(factory, BSONObj(), false, ErrorCodes::OK);
- _testValidateMetadata(factory, BSONObj(), true, ErrorCodes::OK);
- }
-
- TEST_F(MMAPV1FactoryTest, ValidateMetadataDirectoryPerDB) {
- _testValidateMetadata(factory, fromjson("{directoryPerDB: 123}"), false,
- ErrorCodes::FailedToParse);
- _testValidateMetadata(factory, fromjson("{directoryPerDB: false}"), false,
- ErrorCodes::OK);
- _testValidateMetadata(factory, fromjson("{directoryPerDB: false}"), true,
- ErrorCodes::InvalidOptions);
- _testValidateMetadata(factory, fromjson("{directoryPerDB: true}"), false,
- ErrorCodes::InvalidOptions);
- _testValidateMetadata(factory, fromjson("{directoryPerDB: true}"), true,
- ErrorCodes::OK);
- }
-
- void _testCreateMetadataOptions(const StorageEngine::Factory* factory,
- bool directoryPerDB) {
- StorageGlobalParams storageOptions;
- storageOptions.directoryperdb = directoryPerDB;
-
- BSONObj metadataOptions = factory->createMetadataOptions(storageOptions);
- BSONElement directoryPerDBElement = metadataOptions.getField("directoryPerDB");
- ASSERT_TRUE(directoryPerDBElement.isBoolean());
- ASSERT_EQUALS(directoryPerDB, directoryPerDBElement.boolean());
+ virtual void tearDown() {
+ factory = NULL;
}
- TEST_F(MMAPV1FactoryTest, CreateMetadataOptions) {
- _testCreateMetadataOptions(factory, false);
- _testCreateMetadataOptions(factory, true);
+protected:
+ const StorageEngine::Factory* factory;
+};
+
+void _testValidateMetadata(const StorageEngine::Factory* factory,
+ const BSONObj& metadataOptions,
+ bool directoryPerDB,
+ ErrorCodes::Error expectedCode) {
+ // It is fine to specify an invalid data directory for the metadata
+ // as long as we do not invoke read() or write().
+ StorageEngineMetadata metadata("no_such_directory");
+ metadata.setStorageEngineOptions(metadataOptions);
+
+ StorageGlobalParams storageOptions;
+ storageOptions.directoryperdb = directoryPerDB;
+
+ Status status = factory->validateMetadata(metadata, storageOptions);
+ if (expectedCode != status.code()) {
+ FAIL(str::stream()
+ << "Unexpected StorageEngine::Factory::validateMetadata result. Expected: "
+ << ErrorCodes::errorString(expectedCode) << " but got " << status.toString()
+ << " instead. metadataOptions: " << metadataOptions
+ << "; directoryPerDB: " << directoryPerDB);
}
+}
+
+// Do not validate fields that are not present in metadata.
+TEST_F(MMAPV1FactoryTest, ValidateMetadataEmptyOptions) {
+ _testValidateMetadata(factory, BSONObj(), false, ErrorCodes::OK);
+ _testValidateMetadata(factory, BSONObj(), true, ErrorCodes::OK);
+}
+
+TEST_F(MMAPV1FactoryTest, ValidateMetadataDirectoryPerDB) {
+ _testValidateMetadata(
+ factory, fromjson("{directoryPerDB: 123}"), false, ErrorCodes::FailedToParse);
+ _testValidateMetadata(factory, fromjson("{directoryPerDB: false}"), false, ErrorCodes::OK);
+ _testValidateMetadata(
+ factory, fromjson("{directoryPerDB: false}"), true, ErrorCodes::InvalidOptions);
+ _testValidateMetadata(
+ factory, fromjson("{directoryPerDB: true}"), false, ErrorCodes::InvalidOptions);
+ _testValidateMetadata(factory, fromjson("{directoryPerDB: true}"), true, ErrorCodes::OK);
+}
+
+void _testCreateMetadataOptions(const StorageEngine::Factory* factory, bool directoryPerDB) {
+ StorageGlobalParams storageOptions;
+ storageOptions.directoryperdb = directoryPerDB;
+
+ BSONObj metadataOptions = factory->createMetadataOptions(storageOptions);
+ BSONElement directoryPerDBElement = metadataOptions.getField("directoryPerDB");
+ ASSERT_TRUE(directoryPerDBElement.isBoolean());
+ ASSERT_EQUALS(directoryPerDB, directoryPerDBElement.boolean());
+}
+
+TEST_F(MMAPV1FactoryTest, CreateMetadataOptions) {
+ _testCreateMetadataOptions(factory, false);
+ _testCreateMetadataOptions(factory, true);
+}
} // namespace
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_options.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_options.cpp
index aa5168ea2c6..554a5eafe37 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_options.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_options.cpp
@@ -35,65 +35,65 @@
namespace mongo {
- MMAPV1Options mmapv1GlobalOptions;
+MMAPV1Options mmapv1GlobalOptions;
- /**
- * Specify an integer between 1 and 500 signifying the number of milliseconds (ms)
- * between journal commits.
- */
- class JournalCommitIntervalSetting : public ServerParameter {
- public:
- JournalCommitIntervalSetting() :
- ServerParameter(ServerParameterSet::getGlobal(), "journalCommitInterval",
- false, // allowedToChangeAtStartup
- true // allowedToChangeAtRuntime
- ) {}
+/**
+ * Specify an integer between 1 and 500 signifying the number of milliseconds (ms)
+ * between journal commits.
+ */
+class JournalCommitIntervalSetting : public ServerParameter {
+public:
+ JournalCommitIntervalSetting()
+ : ServerParameter(ServerParameterSet::getGlobal(),
+ "journalCommitInterval",
+ false, // allowedToChangeAtStartup
+ true // allowedToChangeAtRuntime
+ ) {}
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
- b << name << mmapv1GlobalOptions.journalCommitInterval;
- }
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ b << name << mmapv1GlobalOptions.journalCommitInterval;
+ }
- virtual Status set(const BSONElement& newValueElement) {
- long long newValue;
- if (!newValueElement.isNumber()) {
- StringBuilder sb;
- sb << "Expected number type for journalCommitInterval via setParameter command: "
- << newValueElement;
- return Status(ErrorCodes::BadValue, sb.str());
- }
- if (newValueElement.type() == NumberDouble &&
- (newValueElement.numberDouble() - newValueElement.numberLong()) > 0) {
- StringBuilder sb;
- sb << "journalCommitInterval must be a whole number: "
- << newValueElement;
- return Status(ErrorCodes::BadValue, sb.str());
- }
- newValue = newValueElement.numberLong();
- if (newValue <= 1 || newValue >= 500) {
- StringBuilder sb;
- sb << "journalCommitInterval must be between 1 and 500, but attempted to set to: "
- << newValue;
- return Status(ErrorCodes::BadValue, sb.str());
- }
- mmapv1GlobalOptions.journalCommitInterval = static_cast<unsigned>(newValue);
- return Status::OK();
+ virtual Status set(const BSONElement& newValueElement) {
+ long long newValue;
+ if (!newValueElement.isNumber()) {
+ StringBuilder sb;
+ sb << "Expected number type for journalCommitInterval via setParameter command: "
+ << newValueElement;
+ return Status(ErrorCodes::BadValue, sb.str());
+ }
+ if (newValueElement.type() == NumberDouble &&
+ (newValueElement.numberDouble() - newValueElement.numberLong()) > 0) {
+ StringBuilder sb;
+ sb << "journalCommitInterval must be a whole number: " << newValueElement;
+ return Status(ErrorCodes::BadValue, sb.str());
}
+ newValue = newValueElement.numberLong();
+ if (newValue <= 1 || newValue >= 500) {
+ StringBuilder sb;
+ sb << "journalCommitInterval must be between 1 and 500, but attempted to set to: "
+ << newValue;
+ return Status(ErrorCodes::BadValue, sb.str());
+ }
+ mmapv1GlobalOptions.journalCommitInterval = static_cast<unsigned>(newValue);
+ return Status::OK();
+ }
- virtual Status setFromString(const std::string& str) {
- unsigned newValue;
- Status status = parseNumberFromString(str, &newValue);
- if (!status.isOK()) {
- return status;
- }
- if (newValue <= 1 || newValue >= 500) {
- StringBuilder sb;
- sb << "journalCommitInterval must be between 1 and 500, but attempted to set to: "
- << newValue;
- return Status(ErrorCodes::BadValue, sb.str());
- }
- mmapv1GlobalOptions.journalCommitInterval = newValue;
- return Status::OK();
+ virtual Status setFromString(const std::string& str) {
+ unsigned newValue;
+ Status status = parseNumberFromString(str, &newValue);
+ if (!status.isOK()) {
+ return status;
+ }
+ if (newValue <= 1 || newValue >= 500) {
+ StringBuilder sb;
+ sb << "journalCommitInterval must be between 1 and 500, but attempted to set to: "
+ << newValue;
+ return Status(ErrorCodes::BadValue, sb.str());
}
- } journalCommitIntervalSetting;
+ mmapv1GlobalOptions.journalCommitInterval = newValue;
+ return Status::OK();
+ }
+} journalCommitIntervalSetting;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_options.h b/src/mongo/db/storage/mmap_v1/mmap_v1_options.h
index f70dea73af7..d94d46c449e 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_options.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_options.h
@@ -37,60 +37,59 @@
namespace mongo {
- struct MMAPV1Options {
+struct MMAPV1Options {
+ MMAPV1Options()
+ : lenForNewNsFiles(16 * 1024 * 1024),
+ preallocj(true),
+ prealloc(false),
+ journalCommitInterval(0), // 0 means use default
+ quota(false),
+ quotaFiles(8) {}
- MMAPV1Options() :
- lenForNewNsFiles(16 * 1024 * 1024),
- preallocj(true),
- prealloc(false),
- journalCommitInterval(0), // 0 means use default
- quota(false),
- quotaFiles(8) {}
+ // --nssize
+ // Specifies the default size for namespace files, which are files that end in .ns.
+ // Each collection and index counts as a namespace.
+ unsigned lenForNewNsFiles;
- // --nssize
- // Specifies the default size for namespace files, which are files that end in .ns.
- // Each collection and index counts as a namespace.
- unsigned lenForNewNsFiles;
+ bool preallocj; // --nopreallocj no preallocation of journal files
+ bool prealloc; // --noprealloc no preallocation of data files
+ bool smallfiles; // --smallfiles allocate smaller data files
- bool preallocj; // --nopreallocj no preallocation of journal files
- bool prealloc; // --noprealloc no preallocation of data files
- bool smallfiles; // --smallfiles allocate smaller data files
+ // --journalCommitInterval
+ // The maximum amount of time the mongod process allows between journal operations.
+ // Values can range from 2 to 300 milliseconds. Lower values increase the durability
+ // of the journal, at the expense of disk performance.
+ unsigned journalCommitInterval; // group/batch commit interval ms
- // --journalCommitInterval
- // The maximum amount of time the mongod process allows between journal operations.
- // Values can range from 2 to 300 milliseconds. Lower values increase the durability
- // of the journal, at the expense of disk performance.
- unsigned journalCommitInterval; // group/batch commit interval ms
-
- // --journalOptions 7 dump journal and terminate without doing anything further
- // --journalOptions 4 recover and terminate without listening
- enum { // bits to be ORed
- JournalDumpJournal = 1, // dump diagnostics on the journal during recovery
- JournalScanOnly = 2, // don't do any real work, just scan and dump if dump
- // specified
- JournalRecoverOnly = 4, // terminate after recovery step
- JournalParanoid = 8, // paranoid mode enables extra checks
- JournalAlwaysCommit = 16, // do a group commit every time the writelock is released
- JournalAlwaysRemap = 32, // remap the private view after every group commit
- // (may lag to the next write lock acquisition,
- // but will do all files then)
- JournalNoCheckSpace = 64 // don't check that there is enough room for journal files
- // before startup (for diskfull tests)
- };
- int journalOptions; // --journalOptions <n> for debugging
+ // --journalOptions 7 dump journal and terminate without doing anything further
+ // --journalOptions 4 recover and terminate without listening
+ enum { // bits to be ORed
+ JournalDumpJournal = 1, // dump diagnostics on the journal during recovery
+ JournalScanOnly = 2, // don't do any real work, just scan and dump if dump
+ // specified
+ JournalRecoverOnly = 4, // terminate after recovery step
+ JournalParanoid = 8, // paranoid mode enables extra checks
+ JournalAlwaysCommit = 16, // do a group commit every time the writelock is released
+ JournalAlwaysRemap = 32, // remap the private view after every group commit
+ // (may lag to the next write lock acquisition,
+ // but will do all files then)
+ JournalNoCheckSpace = 64 // don't check that there is enough room for journal files
+ // before startup (for diskfull tests)
+ };
+ int journalOptions; // --journalOptions <n> for debugging
- // --quota
- // Enables a maximum limit for the number data files each database can have.
- // When running with the --quota option, MongoDB has a maximum of 8 data files
- // per database. Adjust the quota with --quotaFiles.
- bool quota;
+ // --quota
+ // Enables a maximum limit for the number data files each database can have.
+ // When running with the --quota option, MongoDB has a maximum of 8 data files
+ // per database. Adjust the quota with --quotaFiles.
+ bool quota;
- // --quotaFiles
- // Modifies the limit on the number of data files per database.
- // --quotaFiles option requires that you set --quota.
- int quotaFiles; // --quotaFiles
- };
+ // --quotaFiles
+ // Modifies the limit on the number of data files per database.
+ // --quotaFiles option requires that you set --quota.
+ int quotaFiles; // --quotaFiles
+};
- extern MMAPV1Options mmapv1GlobalOptions;
+extern MMAPV1Options mmapv1GlobalOptions;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
index 6e2e54d56c5..5a00a5a9a7f 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
@@ -41,58 +41,41 @@
namespace mongo {
- class MyHarnessHelper : public HarnessHelper {
- public:
- MyHarnessHelper() {
- }
+class MyHarnessHelper : public HarnessHelper {
+public:
+ MyHarnessHelper() {}
- virtual RecordStore* newNonCappedRecordStore() {
- OperationContextNoop txn;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- md->setUserFlag( &txn, CollectionOptions::Flag_NoPadding );
- SimpleRecordStoreV1* rs = new SimpleRecordStoreV1( &txn,
- "a.b",
- md,
- &_em,
- false );
- return rs;
- }
-
- virtual RecordStore* newCappedRecordStore( int64_t cappedMaxSize,
- int64_t cappedMaxDocs ) {
- OperationContextNoop txn;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- CappedRecordStoreV1* rs = new CappedRecordStoreV1( &txn,
- NULL,
- "a.b",
- md,
- &_em,
- false );
-
- LocAndSize records[] = {
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 1000},
- {}
- };
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &_em, md);
+ virtual RecordStore* newNonCappedRecordStore() {
+ OperationContextNoop txn;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
+ md->setUserFlag(&txn, CollectionOptions::Flag_NoPadding);
+ SimpleRecordStoreV1* rs = new SimpleRecordStoreV1(&txn, "a.b", md, &_em, false);
+ return rs;
+ }
- return rs;
- }
+ virtual RecordStore* newCappedRecordStore(int64_t cappedMaxSize, int64_t cappedMaxDocs) {
+ OperationContextNoop txn;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ CappedRecordStoreV1* rs = new CappedRecordStoreV1(&txn, NULL, "a.b", md, &_em, false);
- virtual RecoveryUnit* newRecoveryUnit() {
- return new RecoveryUnitNoop();
- }
+ LocAndSize records[] = {{}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
+ initializeV1RS(&txn, records, drecs, NULL, &_em, md);
- private:
- DummyExtentManager _em;
- };
+ return rs;
+ }
- HarnessHelper* newHarnessHelper() {
- return new MyHarnessHelper();
+ virtual RecoveryUnit* newRecoveryUnit() {
+ return new RecoveryUnitNoop();
}
+private:
+ DummyExtentManager _em;
+};
+
+HarnessHelper* newHarnessHelper() {
+ return new MyHarnessHelper();
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
index 2969028575a..88abedd9c77 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
@@ -43,498 +43,472 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::vector;
+using std::endl;
+using std::string;
+using std::vector;
- namespace {
- mongo::AtomicUInt64 mmfNextId(0);
- }
+namespace {
+mongo::AtomicUInt64 mmfNextId(0);
+}
- static size_t fetchMinOSPageSizeBytes() {
- SYSTEM_INFO si;
- GetSystemInfo(&si);
- size_t minOSPageSizeBytes = si.dwPageSize;
- minOSPageSizeBytesTest(minOSPageSizeBytes);
- return minOSPageSizeBytes;
- }
- const size_t g_minOSPageSizeBytes = fetchMinOSPageSizeBytes();
-
- // MapViewMutex
- //
- // Protects:
- // 1. Ensures all MapViewOfFile/UnMapViewOfFile operations are serialized to reduce chance of
- // "address in use" errors (error code 487)
- // - These errors can still occur if the memory is used for other purposes
- // (stack storage, heap)
- // 2. Prevents calls to VirtualProtect while we remapping files.
- // Lock Ordering:
- // - If taken, must be after previewViews._m to prevent deadlocks
- stdx::mutex mapViewMutex;
-
- MAdvise::MAdvise(void *,unsigned, Advice) { }
- MAdvise::~MAdvise() { }
-
- const unsigned long long memoryMappedFileLocationFloor = 256LL * 1024LL * 1024LL * 1024LL;
- static unsigned long long _nextMemoryMappedFileLocation = memoryMappedFileLocationFloor;
-
- // nextMemoryMappedFileLocationMutex
- //
- // Protects:
- // Windows 64-bit specific allocation of virtual memory regions for
- // placing memory mapped files in memory
- // Lock Ordering:
- // No restrictions
- static SimpleMutex _nextMemoryMappedFileLocationMutex;
-
- unsigned long long AlignNumber(unsigned long long number, unsigned long long granularity)
- {
- return (number + granularity - 1) & ~(granularity - 1);
+static size_t fetchMinOSPageSizeBytes() {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ size_t minOSPageSizeBytes = si.dwPageSize;
+ minOSPageSizeBytesTest(minOSPageSizeBytes);
+ return minOSPageSizeBytes;
+}
+const size_t g_minOSPageSizeBytes = fetchMinOSPageSizeBytes();
+
+// MapViewMutex
+//
+// Protects:
+// 1. Ensures all MapViewOfFile/UnMapViewOfFile operations are serialized to reduce chance of
+// "address in use" errors (error code 487)
+// - These errors can still occur if the memory is used for other purposes
+// (stack storage, heap)
+// 2. Prevents calls to VirtualProtect while we remapping files.
+// Lock Ordering:
+// - If taken, must be after previewViews._m to prevent deadlocks
+stdx::mutex mapViewMutex;
+
+MAdvise::MAdvise(void*, unsigned, Advice) {}
+MAdvise::~MAdvise() {}
+
+const unsigned long long memoryMappedFileLocationFloor = 256LL * 1024LL * 1024LL * 1024LL;
+static unsigned long long _nextMemoryMappedFileLocation = memoryMappedFileLocationFloor;
+
+// nextMemoryMappedFileLocationMutex
+//
+// Protects:
+// Windows 64-bit specific allocation of virtual memory regions for
+// placing memory mapped files in memory
+// Lock Ordering:
+// No restrictions
+static SimpleMutex _nextMemoryMappedFileLocationMutex;
+
+unsigned long long AlignNumber(unsigned long long number, unsigned long long granularity) {
+ return (number + granularity - 1) & ~(granularity - 1);
+}
+
+static void* getNextMemoryMappedFileLocation(unsigned long long mmfSize) {
+ if (4 == sizeof(void*)) {
+ return 0;
}
+ stdx::lock_guard<SimpleMutex> lk(_nextMemoryMappedFileLocationMutex);
- static void* getNextMemoryMappedFileLocation(unsigned long long mmfSize) {
- if (4 == sizeof(void*)) {
- return 0;
- }
- stdx::lock_guard<SimpleMutex> lk(_nextMemoryMappedFileLocationMutex);
+ static unsigned long long granularity = 0;
- static unsigned long long granularity = 0;
+ if (0 == granularity) {
+ SYSTEM_INFO systemInfo;
+ GetSystemInfo(&systemInfo);
+ granularity = static_cast<unsigned long long>(systemInfo.dwAllocationGranularity);
+ }
- if (0 == granularity) {
- SYSTEM_INFO systemInfo;
- GetSystemInfo(&systemInfo);
- granularity = static_cast<unsigned long long>(systemInfo.dwAllocationGranularity);
- }
+ unsigned long long thisMemoryMappedFileLocation = _nextMemoryMappedFileLocation;
- unsigned long long thisMemoryMappedFileLocation = _nextMemoryMappedFileLocation;
+ int current_retry = 1;
- int current_retry = 1;
+ while (true) {
+ MEMORY_BASIC_INFORMATION memInfo;
- while (true) {
- MEMORY_BASIC_INFORMATION memInfo;
-
- if (VirtualQuery(reinterpret_cast<LPCVOID>(thisMemoryMappedFileLocation),
- &memInfo, sizeof(memInfo)) == 0) {
- DWORD gle = GetLastError();
-
- // If we exceed the limits of Virtual Memory
- // - 8TB before Windows 8.1/2012 R2, 128 TB after
- // restart scanning from our memory mapped floor once more
- // This is a linear scan of regions, not of every VM page
- if (gle == ERROR_INVALID_PARAMETER && current_retry == 1) {
- thisMemoryMappedFileLocation = memoryMappedFileLocationFloor;
- ++current_retry;
- continue;
- }
+ if (VirtualQuery(reinterpret_cast<LPCVOID>(thisMemoryMappedFileLocation),
+ &memInfo,
+ sizeof(memInfo)) == 0) {
+ DWORD gle = GetLastError();
- log() << "VirtualQuery of " << thisMemoryMappedFileLocation
- << " failed with error " << errnoWithDescription(gle);
- fassertFailed(17484);
+ // If we exceed the limits of Virtual Memory
+ // - 8TB before Windows 8.1/2012 R2, 128 TB after
+ // restart scanning from our memory mapped floor once more
+ // This is a linear scan of regions, not of every VM page
+ if (gle == ERROR_INVALID_PARAMETER && current_retry == 1) {
+ thisMemoryMappedFileLocation = memoryMappedFileLocationFloor;
+ ++current_retry;
+ continue;
}
- // Free memory regions that we can use for memory map files
- // 1. Marked MEM_FREE, not MEM_RESERVE
- // 2. Marked as PAGE_NOACCESS, not anything else
- if (memInfo.Protect == PAGE_NOACCESS &&
- memInfo.State == MEM_FREE &&
- memInfo.RegionSize > mmfSize)
- break;
-
- thisMemoryMappedFileLocation = reinterpret_cast<unsigned long long>(memInfo.BaseAddress)
- + memInfo.RegionSize;
+ log() << "VirtualQuery of " << thisMemoryMappedFileLocation << " failed with error "
+ << errnoWithDescription(gle);
+ fassertFailed(17484);
}
- _nextMemoryMappedFileLocation = thisMemoryMappedFileLocation
- + AlignNumber(mmfSize, granularity);
+ // Free memory regions that we can use for memory map files
+ // 1. Marked MEM_FREE, not MEM_RESERVE
+ // 2. Marked as PAGE_NOACCESS, not anything else
+ if (memInfo.Protect == PAGE_NOACCESS && memInfo.State == MEM_FREE &&
+ memInfo.RegionSize > mmfSize)
+ break;
- return reinterpret_cast<void*>(static_cast<uintptr_t>(thisMemoryMappedFileLocation));
+ thisMemoryMappedFileLocation =
+ reinterpret_cast<unsigned long long>(memInfo.BaseAddress) + memInfo.RegionSize;
}
- MemoryMappedFile::MemoryMappedFile()
- : _uniqueId(mmfNextId.fetchAndAdd(1)),
- fd(0),
- maphandle(0),
- len(0) {
+ _nextMemoryMappedFileLocation =
+ thisMemoryMappedFileLocation + AlignNumber(mmfSize, granularity);
- created();
- }
+ return reinterpret_cast<void*>(static_cast<uintptr_t>(thisMemoryMappedFileLocation));
+}
- void MemoryMappedFile::close() {
- LockMongoFilesShared::assertExclusivelyLocked();
+MemoryMappedFile::MemoryMappedFile()
+ : _uniqueId(mmfNextId.fetchAndAdd(1)), fd(0), maphandle(0), len(0) {
+ created();
+}
- // Prevent flush and close from concurrently running
- stdx::lock_guard<stdx::mutex> lk(_flushMutex);
+void MemoryMappedFile::close() {
+ LockMongoFilesShared::assertExclusivelyLocked();
- {
- stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
+ // Prevent flush and close from concurrently running
+ stdx::lock_guard<stdx::mutex> lk(_flushMutex);
- for (vector<void*>::iterator i = views.begin(); i != views.end(); i++) {
- UnmapViewOfFile(*i);
- }
- }
+ {
+ stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
- views.clear();
- if ( maphandle )
- CloseHandle(maphandle);
- maphandle = 0;
- if ( fd )
- CloseHandle(fd);
- fd = 0;
- destroyed(); // cleans up from the master list of mmaps
+ for (vector<void*>::iterator i = views.begin(); i != views.end(); i++) {
+ UnmapViewOfFile(*i);
+ }
}
- unsigned long long mapped = 0;
+ views.clear();
+ if (maphandle)
+ CloseHandle(maphandle);
+ maphandle = 0;
+ if (fd)
+ CloseHandle(fd);
+ fd = 0;
+ destroyed(); // cleans up from the master list of mmaps
+}
- void* MemoryMappedFile::createReadOnlyMap() {
- verify( maphandle );
+unsigned long long mapped = 0;
- stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
+void* MemoryMappedFile::createReadOnlyMap() {
+ verify(maphandle);
- void* readOnlyMapAddress = NULL;
- int current_retry = 0;
+ stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
- while (true) {
+ void* readOnlyMapAddress = NULL;
+ int current_retry = 0;
- LPVOID thisAddress = getNextMemoryMappedFileLocation(len);
+ while (true) {
+ LPVOID thisAddress = getNextMemoryMappedFileLocation(len);
- readOnlyMapAddress = MapViewOfFileEx(
- maphandle, // file mapping handle
- FILE_MAP_READ, // access
- 0, 0, // file offset, high and low
- 0, // bytes to map, 0 == all
- thisAddress); // address to place file
+ readOnlyMapAddress = MapViewOfFileEx(maphandle, // file mapping handle
+ FILE_MAP_READ, // access
+ 0,
+ 0, // file offset, high and low
+ 0, // bytes to map, 0 == all
+ thisAddress); // address to place file
- if (0 == readOnlyMapAddress) {
- DWORD dosError = GetLastError();
-
- ++current_retry;
-
- // If we failed to allocate a memory mapped file, try again in case we picked
- // an address that Windows is also trying to use for some other VM allocations
- if (dosError == ERROR_INVALID_ADDRESS && current_retry < 5) {
- continue;
- }
+ if (0 == readOnlyMapAddress) {
+ DWORD dosError = GetLastError();
- log() << "MapViewOfFileEx for " << filename()
- << " at address " << thisAddress
- << " failed with error " << errnoWithDescription(dosError)
- << " (file size is " << len << ")"
- << " in MemoryMappedFile::createReadOnlyMap"
- << endl;
+ ++current_retry;
- fassertFailed(16165);
+ // If we failed to allocate a memory mapped file, try again in case we picked
+ // an address that Windows is also trying to use for some other VM allocations
+ if (dosError == ERROR_INVALID_ADDRESS && current_retry < 5) {
+ continue;
}
- break;
+ log() << "MapViewOfFileEx for " << filename() << " at address " << thisAddress
+ << " failed with error " << errnoWithDescription(dosError) << " (file size is "
+ << len << ")"
+ << " in MemoryMappedFile::createReadOnlyMap" << endl;
+
+ fassertFailed(16165);
}
- views.push_back( readOnlyMapAddress );
- return readOnlyMapAddress;
+ break;
}
- void* MemoryMappedFile::map(const char *filenameIn, unsigned long long &length, int options) {
- verify( fd == 0 && len == 0 ); // can't open more than once
- setFilename(filenameIn);
- FileAllocator::get()->allocateAsap( filenameIn, length );
- /* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary perhaps. */
- char filename[256];
- strncpy(filename, filenameIn, 255);
- filename[255] = 0;
- {
- size_t len = strlen( filename );
- for ( size_t i=len-1; i>=0; i-- ) {
- if ( filename[i] == '/' ||
- filename[i] == '\\' )
- break;
+ views.push_back(readOnlyMapAddress);
+ return readOnlyMapAddress;
+}
- if ( filename[i] == ':' )
- filename[i] = '_';
- }
+void* MemoryMappedFile::map(const char* filenameIn, unsigned long long& length, int options) {
+ verify(fd == 0 && len == 0); // can't open more than once
+ setFilename(filenameIn);
+ FileAllocator::get()->allocateAsap(filenameIn, length);
+ /* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary perhaps. */
+ char filename[256];
+ strncpy(filename, filenameIn, 255);
+ filename[255] = 0;
+ {
+ size_t len = strlen(filename);
+ for (size_t i = len - 1; i >= 0; i--) {
+ if (filename[i] == '/' || filename[i] == '\\')
+ break;
+
+ if (filename[i] == ':')
+ filename[i] = '_';
}
+ }
- updateLength( filename, length );
+ updateLength(filename, length);
- {
- DWORD createOptions = FILE_ATTRIBUTE_NORMAL;
- if ( options & SEQUENTIAL )
- createOptions |= FILE_FLAG_SEQUENTIAL_SCAN;
- DWORD rw = GENERIC_READ | GENERIC_WRITE;
- fd = CreateFileW(
- toWideString(filename).c_str(),
- rw, // desired access
- FILE_SHARE_WRITE | FILE_SHARE_READ, // share mode
- NULL, // security
- OPEN_ALWAYS, // create disposition
- createOptions , // flags
- NULL); // hTempl
- if ( fd == INVALID_HANDLE_VALUE ) {
- DWORD dosError = GetLastError();
- log() << "CreateFileW for " << filename
- << " failed with " << errnoWithDescription( dosError )
- << " (file size is " << length << ")"
- << " in MemoryMappedFile::map"
- << endl;
- return 0;
- }
+ {
+ DWORD createOptions = FILE_ATTRIBUTE_NORMAL;
+ if (options & SEQUENTIAL)
+ createOptions |= FILE_FLAG_SEQUENTIAL_SCAN;
+ DWORD rw = GENERIC_READ | GENERIC_WRITE;
+ fd = CreateFileW(toWideString(filename).c_str(),
+ rw, // desired access
+ FILE_SHARE_WRITE | FILE_SHARE_READ, // share mode
+ NULL, // security
+ OPEN_ALWAYS, // create disposition
+ createOptions, // flags
+ NULL); // hTempl
+ if (fd == INVALID_HANDLE_VALUE) {
+ DWORD dosError = GetLastError();
+ log() << "CreateFileW for " << filename << " failed with "
+ << errnoWithDescription(dosError) << " (file size is " << length << ")"
+ << " in MemoryMappedFile::map" << endl;
+ return 0;
}
+ }
- mapped += length;
+ mapped += length;
- {
- DWORD flProtect = PAGE_READWRITE; //(options & READONLY)?PAGE_READONLY:PAGE_READWRITE;
- maphandle = CreateFileMappingW(fd, NULL, flProtect,
- length >> 32 /*maxsizehigh*/,
- (unsigned) length /*maxsizelow*/,
- NULL/*lpName*/);
- if ( maphandle == NULL ) {
- DWORD dosError = GetLastError();
- log() << "CreateFileMappingW for " << filename
- << " failed with " << errnoWithDescription( dosError )
- << " (file size is " << length << ")"
- << " in MemoryMappedFile::map"
- << endl;
- close();
- fassertFailed( 16225 );
- }
+ {
+ DWORD flProtect = PAGE_READWRITE; //(options & READONLY)?PAGE_READONLY:PAGE_READWRITE;
+ maphandle = CreateFileMappingW(fd,
+ NULL,
+ flProtect,
+ length >> 32 /*maxsizehigh*/,
+ (unsigned)length /*maxsizelow*/,
+ NULL /*lpName*/);
+ if (maphandle == NULL) {
+ DWORD dosError = GetLastError();
+ log() << "CreateFileMappingW for " << filename << " failed with "
+ << errnoWithDescription(dosError) << " (file size is " << length << ")"
+ << " in MemoryMappedFile::map" << endl;
+ close();
+ fassertFailed(16225);
}
+ }
- void *view = 0;
- {
- stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
- DWORD access = ( options & READONLY ) ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS;
-
- int current_retry = 0;
- while (true) {
+ void* view = 0;
+ {
+ stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
+ DWORD access = (options & READONLY) ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS;
- LPVOID thisAddress = getNextMemoryMappedFileLocation(length);
+ int current_retry = 0;
+ while (true) {
+ LPVOID thisAddress = getNextMemoryMappedFileLocation(length);
- view = MapViewOfFileEx(
- maphandle, // file mapping handle
- access, // access
- 0, 0, // file offset, high and low
- 0, // bytes to map, 0 == all
- thisAddress); // address to place file
+ view = MapViewOfFileEx(maphandle, // file mapping handle
+ access, // access
+ 0,
+ 0, // file offset, high and low
+ 0, // bytes to map, 0 == all
+ thisAddress); // address to place file
- if (view == 0) {
- DWORD dosError = GetLastError();
+ if (view == 0) {
+ DWORD dosError = GetLastError();
- ++current_retry;
+ ++current_retry;
- // If we failed to allocate a memory mapped file, try again in case we picked
- // an address that Windows is also trying to use for some other VM allocations
- if (dosError == ERROR_INVALID_ADDRESS && current_retry < 5) {
- continue;
- }
+ // If we failed to allocate a memory mapped file, try again in case we picked
+ // an address that Windows is also trying to use for some other VM allocations
+ if (dosError == ERROR_INVALID_ADDRESS && current_retry < 5) {
+ continue;
+ }
#ifndef _WIN64
- // Warn user that if they are running a 32-bit app on 64-bit Windows
- if (dosError == ERROR_NOT_ENOUGH_MEMORY) {
- BOOL wow64Process;
- BOOL retWow64 = IsWow64Process(GetCurrentProcess(), &wow64Process);
- if (retWow64 && wow64Process) {
- log() << "This is a 32-bit MongoDB binary running on a 64-bit"
- " operating system that has run out of virtual memory for"
- " databases. Switch to a 64-bit build of MongoDB to open"
- " the databases.";
- }
+ // Warn user that if they are running a 32-bit app on 64-bit Windows
+ if (dosError == ERROR_NOT_ENOUGH_MEMORY) {
+ BOOL wow64Process;
+ BOOL retWow64 = IsWow64Process(GetCurrentProcess(), &wow64Process);
+ if (retWow64 && wow64Process) {
+ log() << "This is a 32-bit MongoDB binary running on a 64-bit"
+ " operating system that has run out of virtual memory for"
+ " databases. Switch to a 64-bit build of MongoDB to open"
+ " the databases.";
}
+ }
#endif
- log() << "MapViewOfFileEx for " << filename
- << " at address " << thisAddress
- << " failed with " << errnoWithDescription(dosError)
- << " (file size is " << length << ")"
- << " in MemoryMappedFile::map"
- << endl;
-
- close();
- fassertFailed(16166);
- }
+ log() << "MapViewOfFileEx for " << filename << " at address " << thisAddress
+ << " failed with " << errnoWithDescription(dosError) << " (file size is "
+ << length << ")"
+ << " in MemoryMappedFile::map" << endl;
- break;
+ close();
+ fassertFailed(16166);
}
- }
- views.push_back(view);
- len = length;
- return view;
+ break;
+ }
}
- extern stdx::mutex mapViewMutex;
-
- void* MemoryMappedFile::createPrivateMap() {
- verify( maphandle );
-
- stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
+ views.push_back(view);
+ len = length;
+ return view;
+}
- LPVOID thisAddress = getNextMemoryMappedFileLocation( len );
+extern stdx::mutex mapViewMutex;
- void* privateMapAddress = NULL;
- int current_retry = 0;
+void* MemoryMappedFile::createPrivateMap() {
+ verify(maphandle);
- while (true) {
+ stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
- privateMapAddress = MapViewOfFileEx(
- maphandle, // file mapping handle
- FILE_MAP_READ, // access
- 0, 0, // file offset, high and low
- 0, // bytes to map, 0 == all
- thisAddress); // address to place file
+ LPVOID thisAddress = getNextMemoryMappedFileLocation(len);
- if (privateMapAddress == 0) {
- DWORD dosError = GetLastError();
+ void* privateMapAddress = NULL;
+ int current_retry = 0;
- ++current_retry;
+ while (true) {
+ privateMapAddress = MapViewOfFileEx(maphandle, // file mapping handle
+ FILE_MAP_READ, // access
+ 0,
+ 0, // file offset, high and low
+ 0, // bytes to map, 0 == all
+ thisAddress); // address to place file
- // If we failed to allocate a memory mapped file, try again in case we picked
- // an address that Windows is also trying to use for some other VM allocations
- if (dosError == ERROR_INVALID_ADDRESS && current_retry < 5) {
- continue;
- }
+ if (privateMapAddress == 0) {
+ DWORD dosError = GetLastError();
- log() << "MapViewOfFileEx for " << filename()
- << " failed with error " << errnoWithDescription(dosError)
- << " (file size is " << len << ")"
- << " in MemoryMappedFile::createPrivateMap"
- << endl;
+ ++current_retry;
- fassertFailed(16167);
+ // If we failed to allocate a memory mapped file, try again in case we picked
+ // an address that Windows is also trying to use for some other VM allocations
+ if (dosError == ERROR_INVALID_ADDRESS && current_retry < 5) {
+ continue;
}
- break;
+ log() << "MapViewOfFileEx for " << filename() << " failed with error "
+ << errnoWithDescription(dosError) << " (file size is " << len << ")"
+ << " in MemoryMappedFile::createPrivateMap" << endl;
+
+ fassertFailed(16167);
}
- views.push_back( privateMapAddress );
- return privateMapAddress;
+ break;
}
- void* MemoryMappedFile::remapPrivateView(void *oldPrivateAddr) {
- LockMongoFilesExclusive lockMongoFiles;
+ views.push_back(privateMapAddress);
+ return privateMapAddress;
+}
- privateViews.clearWritableBits(oldPrivateAddr, len);
+void* MemoryMappedFile::remapPrivateView(void* oldPrivateAddr) {
+ LockMongoFilesExclusive lockMongoFiles;
- stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
+ privateViews.clearWritableBits(oldPrivateAddr, len);
- if( !UnmapViewOfFile(oldPrivateAddr) ) {
- DWORD dosError = GetLastError();
- log() << "UnMapViewOfFile for " << filename()
- << " failed with error " << errnoWithDescription( dosError )
- << " in MemoryMappedFile::remapPrivateView"
- << endl;
- fassertFailed( 16168 );
- }
+ stdx::lock_guard<stdx::mutex> lk(mapViewMutex);
- void* newPrivateView = MapViewOfFileEx(
- maphandle, // file mapping handle
- FILE_MAP_READ, // access
- 0, 0, // file offset, high and low
- 0, // bytes to map, 0 == all
- oldPrivateAddr ); // we want the same address we had before
- if ( 0 == newPrivateView ) {
- DWORD dosError = GetLastError();
- log() << "MapViewOfFileEx for " << filename()
- << " failed with error " << errnoWithDescription( dosError )
- << " (file size is " << len << ")"
- << " in MemoryMappedFile::remapPrivateView"
- << endl;
- }
- fassert( 16148, newPrivateView == oldPrivateAddr );
- return newPrivateView;
+ if (!UnmapViewOfFile(oldPrivateAddr)) {
+ DWORD dosError = GetLastError();
+ log() << "UnMapViewOfFile for " << filename() << " failed with error "
+ << errnoWithDescription(dosError) << " in MemoryMappedFile::remapPrivateView" << endl;
+ fassertFailed(16168);
}
- class WindowsFlushable : public MemoryMappedFile::Flushable {
- public:
- WindowsFlushable( MemoryMappedFile* theFile,
- void * view,
- HANDLE fd,
- const uint64_t id,
- const std::string& filename,
- stdx::mutex& flushMutex )
- : _theFile(theFile), _view(view), _fd(fd), _id(id), _filename(filename),
- _flushMutex(flushMutex)
- {}
-
- void flush() {
- if (!_view || !_fd)
- return;
+ void* newPrivateView =
+ MapViewOfFileEx(maphandle, // file mapping handle
+ FILE_MAP_READ, // access
+ 0,
+ 0, // file offset, high and low
+ 0, // bytes to map, 0 == all
+ oldPrivateAddr); // we want the same address we had before
+ if (0 == newPrivateView) {
+ DWORD dosError = GetLastError();
+ log() << "MapViewOfFileEx for " << filename() << " failed with error "
+ << errnoWithDescription(dosError) << " (file size is " << len << ")"
+ << " in MemoryMappedFile::remapPrivateView" << endl;
+ }
+ fassert(16148, newPrivateView == oldPrivateAddr);
+ return newPrivateView;
+}
- {
- LockMongoFilesShared mmfilesLock;
+class WindowsFlushable : public MemoryMappedFile::Flushable {
+public:
+ WindowsFlushable(MemoryMappedFile* theFile,
+ void* view,
+ HANDLE fd,
+ const uint64_t id,
+ const std::string& filename,
+ stdx::mutex& flushMutex)
+ : _theFile(theFile),
+ _view(view),
+ _fd(fd),
+ _id(id),
+ _filename(filename),
+ _flushMutex(flushMutex) {}
+
+ void flush() {
+ if (!_view || !_fd)
+ return;
- std::set<MongoFile*> mmfs = MongoFile::getAllFiles();
- std::set<MongoFile*>::const_iterator it = mmfs.find(_theFile);
- if ( it == mmfs.end() || (*it)->getUniqueId() != _id ) {
- // this was deleted while we were unlocked
- return;
- }
+ {
+ LockMongoFilesShared mmfilesLock;
- // Hold the flush mutex to ensure the file is not closed during flush
- _flushMutex.lock();
+ std::set<MongoFile*> mmfs = MongoFile::getAllFiles();
+ std::set<MongoFile*>::const_iterator it = mmfs.find(_theFile);
+ if (it == mmfs.end() || (*it)->getUniqueId() != _id) {
+ // this was deleted while we were unlocked
+ return;
}
- stdx::lock_guard<stdx::mutex> lk(_flushMutex, stdx::adopt_lock);
-
- int loopCount = 0;
- bool success = false;
- bool timeout = false;
- int dosError = ERROR_SUCCESS;
- const int maximumTimeInSeconds = 60 * 15;
- Timer t;
- while ( !success && !timeout ) {
- ++loopCount;
- success = FALSE != FlushViewOfFile( _view, 0 );
- if ( !success ) {
- dosError = GetLastError();
- if ( dosError != ERROR_LOCK_VIOLATION ) {
- break;
- }
- timeout = t.seconds() > maximumTimeInSeconds;
- }
- }
- if ( success && loopCount > 1 ) {
- log() << "FlushViewOfFile for " << _filename
- << " succeeded after " << loopCount
- << " attempts taking " << t.millis()
- << "ms" << endl;
- }
- else if ( !success ) {
- log() << "FlushViewOfFile for " << _filename
- << " failed with error " << dosError
- << " after " << loopCount
- << " attempts taking " << t.millis()
- << "ms" << endl;
- // Abort here to avoid data corruption
- fassert(16387, false);
- }
+ // Hold the flush mutex to ensure the file is not closed during flush
+ _flushMutex.lock();
+ }
- success = FALSE != FlushFileBuffers(_fd);
+ stdx::lock_guard<stdx::mutex> lk(_flushMutex, stdx::adopt_lock);
+
+ int loopCount = 0;
+ bool success = false;
+ bool timeout = false;
+ int dosError = ERROR_SUCCESS;
+ const int maximumTimeInSeconds = 60 * 15;
+ Timer t;
+ while (!success && !timeout) {
+ ++loopCount;
+ success = FALSE != FlushViewOfFile(_view, 0);
if (!success) {
- int err = GetLastError();
- log() << "FlushFileBuffers failed: " << errnoWithDescription( err )
- << " file: " << _filename << endl;
- dataSyncFailedHandler();
+ dosError = GetLastError();
+ if (dosError != ERROR_LOCK_VIOLATION) {
+ break;
+ }
+ timeout = t.seconds() > maximumTimeInSeconds;
}
}
+ if (success && loopCount > 1) {
+ log() << "FlushViewOfFile for " << _filename << " succeeded after " << loopCount
+ << " attempts taking " << t.millis() << "ms" << endl;
+ } else if (!success) {
+ log() << "FlushViewOfFile for " << _filename << " failed with error " << dosError
+ << " after " << loopCount << " attempts taking " << t.millis() << "ms" << endl;
+ // Abort here to avoid data corruption
+ fassert(16387, false);
+ }
- MemoryMappedFile* _theFile; // this may be deleted while we are running
- void * _view;
- HANDLE _fd;
- const uint64_t _id;
- string _filename;
- stdx::mutex& _flushMutex;
- };
-
- void MemoryMappedFile::flush(bool sync) {
- uassert(13056, "Async flushing not supported on windows", sync);
- if( !views.empty() ) {
- WindowsFlushable f(this, viewForFlushing(), fd, _uniqueId, filename(), _flushMutex);
- f.flush();
+ success = FALSE != FlushFileBuffers(_fd);
+ if (!success) {
+ int err = GetLastError();
+ log() << "FlushFileBuffers failed: " << errnoWithDescription(err)
+ << " file: " << _filename << endl;
+ dataSyncFailedHandler();
}
}
- MemoryMappedFile::Flushable * MemoryMappedFile::prepareFlush() {
- return new WindowsFlushable(this, viewForFlushing(), fd, _uniqueId,
- filename(), _flushMutex);
+ MemoryMappedFile* _theFile; // this may be deleted while we are running
+ void* _view;
+ HANDLE _fd;
+ const uint64_t _id;
+ string _filename;
+ stdx::mutex& _flushMutex;
+};
+
+void MemoryMappedFile::flush(bool sync) {
+ uassert(13056, "Async flushing not supported on windows", sync);
+ if (!views.empty()) {
+ WindowsFlushable f(this, viewForFlushing(), fd, _uniqueId, filename(), _flushMutex);
+ f.flush();
}
+}
+MemoryMappedFile::Flushable* MemoryMappedFile::prepareFlush() {
+ return new WindowsFlushable(this, viewForFlushing(), fd, _uniqueId, filename(), _flushMutex);
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/record.h b/src/mongo/db/storage/mmap_v1/record.h
index 38c0cfd7085..0f3f9ebcdd4 100644
--- a/src/mongo/db/storage/mmap_v1/record.h
+++ b/src/mongo/db/storage/mmap_v1/record.h
@@ -37,98 +37,141 @@
namespace mongo {
- class DeletedRecord;
-
- /* MmapV1RecordHeader is a record in a datafile. DeletedRecord is similar but for deleted space.
-
- *11:03:20 AM) dm10gen: regarding extentOfs...
- (11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and DeleteRecords
- (11:03:56 AM) dm10gen: a DiskLoc has two pieces, the fileno and ofs. (64 bit total)
- (11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full extent address, we keep just the offset
- (11:04:29 AM) dm10gen: we can do this as we know the record's address, and it has the same fileNo
- (11:04:33 AM) dm10gen: see class DiskLoc for more info
- (11:04:43 AM) dm10gen: so that is how MmapV1RecordHeader::myExtent() works
- (11:04:53 AM) dm10gen: on an alloc(), when we build a new MmapV1RecordHeader, we must populate its extentOfs then
- */
+class DeletedRecord;
+
+/* MmapV1RecordHeader is a record in a datafile. DeletedRecord is similar but for deleted space.
+
+*11:03:20 AM) dm10gen: regarding extentOfs...
+(11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and DeleteRecords
+(11:03:56 AM) dm10gen: a DiskLoc has two pieces, the fileno and ofs. (64 bit total)
+(11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full extent address, we keep just the offset
+(11:04:29 AM) dm10gen: we can do this as we know the record's address, and it has the same fileNo
+(11:04:33 AM) dm10gen: see class DiskLoc for more info
+(11:04:43 AM) dm10gen: so that is how MmapV1RecordHeader::myExtent() works
+(11:04:53 AM) dm10gen: on an alloc(), when we build a new MmapV1RecordHeader, we must populate its extentOfs then
+*/
#pragma pack(1)
- class MmapV1RecordHeader {
- public:
- enum HeaderSizeValue { HeaderSize = 16 };
-
- int lengthWithHeaders() const { return _lengthWithHeaders; }
- int& lengthWithHeaders() { return _lengthWithHeaders; }
-
- int extentOfs() const { return _extentOfs; }
- int& extentOfs() { return _extentOfs; }
-
- int nextOfs() const { return _nextOfs; }
- int& nextOfs() { return _nextOfs; }
-
- int prevOfs() const { return _prevOfs; }
- int& prevOfs() { return _prevOfs; }
-
- const char* data() const { return _data; }
- char* data() { return _data; }
-
- // XXX remove
- const char* dataNoThrowing() const { return _data; }
- char* dataNoThrowing() { return _data; }
-
- int netLength() const { return _netLength(); }
-
- /* use this when a record is deleted. basically a union with next/prev fields */
- DeletedRecord& asDeleted() { return *((DeletedRecord*) this); }
-
- DiskLoc myExtentLoc(const DiskLoc& myLoc) const { return DiskLoc(myLoc.a(), extentOfs() ); }
-
- struct NP {
- int nextOfs;
- int prevOfs;
- };
-
- NP* np() { return (NP*) &_nextOfs; }
-
- RecordData toRecordData() const { return RecordData(_data, _netLength()); }
-
- private:
-
- int _netLength() const { return _lengthWithHeaders - HeaderSize; }
-
- int _lengthWithHeaders;
- int _extentOfs;
- int _nextOfs;
- int _prevOfs;
-
- /** be careful when referencing this that your write intent was correct */
- char _data[4];
-
- public:
- static bool MemoryTrackingEnabled;
-
+class MmapV1RecordHeader {
+public:
+ enum HeaderSizeValue { HeaderSize = 16 };
+
+ int lengthWithHeaders() const {
+ return _lengthWithHeaders;
+ }
+ int& lengthWithHeaders() {
+ return _lengthWithHeaders;
+ }
+
+ int extentOfs() const {
+ return _extentOfs;
+ }
+ int& extentOfs() {
+ return _extentOfs;
+ }
+
+ int nextOfs() const {
+ return _nextOfs;
+ }
+ int& nextOfs() {
+ return _nextOfs;
+ }
+
+ int prevOfs() const {
+ return _prevOfs;
+ }
+ int& prevOfs() {
+ return _prevOfs;
+ }
+
+ const char* data() const {
+ return _data;
+ }
+ char* data() {
+ return _data;
+ }
+
+ // XXX remove
+ const char* dataNoThrowing() const {
+ return _data;
+ }
+ char* dataNoThrowing() {
+ return _data;
+ }
+
+ int netLength() const {
+ return _netLength();
+ }
+
+ /* use this when a record is deleted. basically a union with next/prev fields */
+ DeletedRecord& asDeleted() {
+ return *((DeletedRecord*)this);
+ }
+
+ DiskLoc myExtentLoc(const DiskLoc& myLoc) const {
+ return DiskLoc(myLoc.a(), extentOfs());
+ }
+
+ struct NP {
+ int nextOfs;
+ int prevOfs;
};
-#pragma pack()
-
- // TODO: this probably moves to record_store.h
- class DeletedRecord {
- public:
- int lengthWithHeaders() const { return _lengthWithHeaders; }
- int& lengthWithHeaders() { return _lengthWithHeaders; }
+ NP* np() {
+ return (NP*)&_nextOfs;
+ }
- int extentOfs() const { return _extentOfs; }
- int& extentOfs() { return _extentOfs; }
+ RecordData toRecordData() const {
+ return RecordData(_data, _netLength());
+ }
- // TODO: we need to not const_cast here but problem is DiskLoc::writing
- DiskLoc& nextDeleted() const { return const_cast<DiskLoc&>(_nextDeleted); }
+private:
+ int _netLength() const {
+ return _lengthWithHeaders - HeaderSize;
+ }
- private:
- int _lengthWithHeaders;
+ int _lengthWithHeaders;
+ int _extentOfs;
+ int _nextOfs;
+ int _prevOfs;
- int _extentOfs;
+ /** be careful when referencing this that your write intent was correct */
+ char _data[4];
- DiskLoc _nextDeleted;
- };
+public:
+ static bool MemoryTrackingEnabled;
+};
+#pragma pack()
- BOOST_STATIC_ASSERT( 16 == sizeof(DeletedRecord) );
+// TODO: this probably moves to record_store.h
+class DeletedRecord {
+public:
+ int lengthWithHeaders() const {
+ return _lengthWithHeaders;
+ }
+ int& lengthWithHeaders() {
+ return _lengthWithHeaders;
+ }
+
+ int extentOfs() const {
+ return _extentOfs;
+ }
+ int& extentOfs() {
+ return _extentOfs;
+ }
+
+ // TODO: we need to not const_cast here but problem is DiskLoc::writing
+ DiskLoc& nextDeleted() const {
+ return const_cast<DiskLoc&>(_nextDeleted);
+ }
+
+private:
+ int _lengthWithHeaders;
+
+ int _extentOfs;
+
+ DiskLoc _nextDeleted;
+};
+
+BOOST_STATIC_ASSERT(16 == sizeof(DeletedRecord));
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/record_access_tracker.cpp b/src/mongo/db/storage/mmap_v1/record_access_tracker.cpp
index ab77ad69b08..ee13b62d456 100644
--- a/src/mongo/db/storage/mmap_v1/record_access_tracker.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_access_tracker.cpp
@@ -42,312 +42,305 @@
namespace mongo {
- namespace {
+namespace {
- static bool blockSupported = false;
+static bool blockSupported = false;
- MONGO_INITIALIZER_WITH_PREREQUISITES(RecordBlockSupported,
- ("SystemInfo"))(InitializerContext* cx) {
- blockSupported = ProcessInfo::blockCheckSupported();
- return Status::OK();
- }
+MONGO_INITIALIZER_WITH_PREREQUISITES(RecordBlockSupported, ("SystemInfo"))(InitializerContext* cx) {
+ blockSupported = ProcessInfo::blockCheckSupported();
+ return Status::OK();
+}
- int hash(size_t region) {
- return
- abs( ( ( 7 + (int)(region & 0xFFFF) )
- * ( 11 + (int)( ( region >> 16 ) & 0xFFFF ) )
+int hash(size_t region) {
+ return abs(((7 + (int)(region & 0xFFFF)) * (11 + (int)((region >> 16) & 0xFFFF))
#if defined(_WIN64) || defined(__amd64__)
- * ( 13 + (int)( ( region >> 32 ) & 0xFFFF ) )
- * ( 17 + (int)( ( region >> 48 ) & 0xFFFF ) )
+ *
+ (13 + (int)((region >> 32) & 0xFFFF)) * (17 + (int)((region >> 48) & 0xFFFF))
#endif
- ) % RecordAccessTracker::SliceSize );
- }
+ ) %
+ RecordAccessTracker::SliceSize);
+}
- int bigHash(size_t region) {
- return hash(region) % RecordAccessTracker::BigHashSize;
- }
+int bigHash(size_t region) {
+ return hash(region) % RecordAccessTracker::BigHashSize;
+}
- namespace PointerTable {
-
- /* A "superpage" is a group of 16 contiguous pages that differ
- * only in the low-order 16 bits. This means that there is
- * enough room in the low-order bits to store a bitmap for each
- * page in the superpage.
- */
- static const size_t superpageMask = ~0xffffLL;
- static const size_t superpageShift = 16;
- static const size_t pageSelectorMask = 0xf000LL; // selects a page in a superpage
- static const int pageSelectorShift = 12;
-
- // Tunables
- static const int capacity = 128; // in superpages
- static const int bucketSize = 4; // half cache line
- static const int buckets = capacity/bucketSize;
-
- struct Data {
- /** organized similar to a CPU cache
- * bucketSize-way set associative
- * least-recently-inserted replacement policy
- */
- size_t _table[buckets][bucketSize];
- long long _lastReset; // time in millis
- };
-
- void reset(Data* data) {
- memset(data->_table, 0, sizeof(data->_table));
- data->_lastReset = Listener::getElapsedTimeMillis();
- }
-
- inline void resetIfNeeded( Data* data ) {
- const long long now = Listener::getElapsedTimeMillis();
- if (MONGO_unlikely((now - data->_lastReset) >
- RecordAccessTracker::RotateTimeSecs*1000)) {
- reset(data);
- }
- }
-
- inline size_t pageBitOf(size_t ptr) {
- return 1LL << ((ptr & pageSelectorMask) >> pageSelectorShift);
- }
-
- inline size_t superpageOf(size_t ptr) {
- return ptr & superpageMask;
- }
-
- inline size_t bucketFor(size_t ptr) {
- return (ptr >> superpageShift) % buckets;
- }
-
- inline bool haveSeenPage(size_t superpage, size_t ptr) {
- return superpage & pageBitOf(ptr);
- }
-
- inline void markPageSeen(size_t& superpage, size_t ptr) {
- superpage |= pageBitOf(ptr);
- }
-
- /** call this to check a page has been seen yet. */
- inline bool seen(Data* data, size_t ptr) {
- resetIfNeeded(data);
-
- // A bucket contains 4 superpages each containing 16 contiguous pages
- // See above for a more detailed explanation of superpages
- size_t* bucket = data->_table[bucketFor(ptr)];
-
- for (int i = 0; i < bucketSize; i++) {
- if (superpageOf(ptr) == superpageOf(bucket[i])) {
- if (haveSeenPage(bucket[i], ptr))
- return true;
-
- markPageSeen(bucket[i], ptr);
- return false;
- }
- }
-
- // superpage isn't in thread-local cache
- // slide bucket forward and add new superpage at front
- for (int i = bucketSize-1; i > 0; i--)
- bucket[i] = bucket[i-1];
-
- bucket[0] = superpageOf(ptr);
- markPageSeen(bucket[0], ptr);
-
- return false;
- }
-
- Data* getData();
-
- }; // namespace PointerTable
-
- } // namespace
-
- //
- // Slice
- //
-
- RecordAccessTracker::Slice::Slice() {
- reset();
- }
+namespace PointerTable {
- void RecordAccessTracker::Slice::reset() {
- memset(_data, 0, sizeof(_data));
- _lastReset = time(0);
+/* A "superpage" is a group of 16 contiguous pages that differ
+ * only in the low-order 16 bits. This means that there is
+ * enough room in the low-order bits to store a bitmap for each
+ * page in the superpage.
+ */
+static const size_t superpageMask = ~0xffffLL;
+static const size_t superpageShift = 16;
+static const size_t pageSelectorMask = 0xf000LL; // selects a page in a superpage
+static const int pageSelectorShift = 12;
+
+// Tunables
+static const int capacity = 128; // in superpages
+static const int bucketSize = 4; // half cache line
+static const int buckets = capacity / bucketSize;
+
+struct Data {
+ /** organized similar to a CPU cache
+ * bucketSize-way set associative
+ * least-recently-inserted replacement policy
+ */
+ size_t _table[buckets][bucketSize];
+ long long _lastReset; // time in millis
+};
+
+void reset(Data* data) {
+ memset(data->_table, 0, sizeof(data->_table));
+ data->_lastReset = Listener::getElapsedTimeMillis();
+}
+
+inline void resetIfNeeded(Data* data) {
+ const long long now = Listener::getElapsedTimeMillis();
+ if (MONGO_unlikely((now - data->_lastReset) > RecordAccessTracker::RotateTimeSecs * 1000)) {
+ reset(data);
}
+}
- RecordAccessTracker::State RecordAccessTracker::Slice::get(int regionHash,
- size_t region,
- short offset) {
- DEV verify(hash(region) == regionHash);
+inline size_t pageBitOf(size_t ptr) {
+ return 1LL << ((ptr & pageSelectorMask) >> pageSelectorShift);
+}
- Entry* e = _get(regionHash, region, false);
- if (!e)
- return Unk;
+inline size_t superpageOf(size_t ptr) {
+ return ptr & superpageMask;
+}
- return (e->value & ( 1ULL << offset ) ) ? In : Out;
- }
+inline size_t bucketFor(size_t ptr) {
+ return (ptr >> superpageShift) % buckets;
+}
- bool RecordAccessTracker::Slice::put(int regionHash, size_t region, short offset) {
- DEV verify(hash(region) == regionHash);
+inline bool haveSeenPage(size_t superpage, size_t ptr) {
+ return superpage & pageBitOf(ptr);
+}
- Entry* e = _get(regionHash, region, true);
- if (!e)
- return false;
+inline void markPageSeen(size_t& superpage, size_t ptr) {
+ superpage |= pageBitOf(ptr);
+}
- e->value |= 1ULL << offset;
- return true;
- }
+/** call this to check a page has been seen yet. */
+inline bool seen(Data* data, size_t ptr) {
+ resetIfNeeded(data);
+
+ // A bucket contains 4 superpages each containing 16 contiguous pages
+ // See above for a more detailed explanation of superpages
+ size_t* bucket = data->_table[bucketFor(ptr)];
+
+ for (int i = 0; i < bucketSize; i++) {
+ if (superpageOf(ptr) == superpageOf(bucket[i])) {
+ if (haveSeenPage(bucket[i], ptr))
+ return true;
- time_t RecordAccessTracker::Slice::lastReset() const {
- return _lastReset;
+ markPageSeen(bucket[i], ptr);
+ return false;
+ }
}
- RecordAccessTracker::Entry* RecordAccessTracker::Slice::_get(int start,
- size_t region,
- bool add) {
- for (int i = 0; i < MaxChain; i++) {
- int bucket = (start + i) % SliceSize;
+ // superpage isn't in thread-local cache
+ // slide bucket forward and add new superpage at front
+ for (int i = bucketSize - 1; i > 0; i--)
+ bucket[i] = bucket[i - 1];
- if (_data[bucket].region == 0) {
- if (!add)
- return NULL;
+ bucket[0] = superpageOf(ptr);
+ markPageSeen(bucket[0], ptr);
- _data[bucket].region = region;
- return &_data[bucket];
- }
+ return false;
+}
- if (_data[bucket].region == region) {
- return &_data[bucket];
- }
- }
+Data* getData();
- return NULL;
- }
+}; // namespace PointerTable
- //
- // Rolling
- //
+} // namespace
- RecordAccessTracker::Rolling::Rolling() {
- _curSlice = 0;
- _lastRotate = Listener::getElapsedTimeMillis();
- }
+//
+// Slice
+//
- bool RecordAccessTracker::Rolling::access(size_t region, short offset, bool doHalf) {
- int regionHash = hash(region);
+RecordAccessTracker::Slice::Slice() {
+ reset();
+}
- stdx::lock_guard<SimpleMutex> lk(_lock);
+void RecordAccessTracker::Slice::reset() {
+ memset(_data, 0, sizeof(_data));
+ _lastReset = time(0);
+}
- static int rarelyCount = 0;
- if (rarelyCount++ % (2048 / BigHashSize) == 0) {
- long long now = Listener::getElapsedTimeMillis();
+RecordAccessTracker::State RecordAccessTracker::Slice::get(int regionHash,
+ size_t region,
+ short offset) {
+ DEV verify(hash(region) == regionHash);
- if (now - _lastRotate > (1000 * RotateTimeSecs)) {
- _rotate();
- }
- }
+ Entry* e = _get(regionHash, region, false);
+ if (!e)
+ return Unk;
- for (int i = 0; i < NumSlices / (doHalf ? 2 : 1); i++) {
- int pos = (_curSlice + i) % NumSlices;
- State s = _slices[pos].get(regionHash, region, offset);
+ return (e->value & (1ULL << offset)) ? In : Out;
+}
- if (s == In)
- return true;
+bool RecordAccessTracker::Slice::put(int regionHash, size_t region, short offset) {
+ DEV verify(hash(region) == regionHash);
+
+ Entry* e = _get(regionHash, region, true);
+ if (!e)
+ return false;
+
+ e->value |= 1ULL << offset;
+ return true;
+}
+
+time_t RecordAccessTracker::Slice::lastReset() const {
+ return _lastReset;
+}
+
+RecordAccessTracker::Entry* RecordAccessTracker::Slice::_get(int start, size_t region, bool add) {
+ for (int i = 0; i < MaxChain; i++) {
+ int bucket = (start + i) % SliceSize;
- if (s == Out) {
- _slices[pos].put(regionHash, region, offset);
- return false;
- }
+ if (_data[bucket].region == 0) {
+ if (!add)
+ return NULL;
+
+ _data[bucket].region = region;
+ return &_data[bucket];
+ }
+
+ if (_data[bucket].region == region) {
+ return &_data[bucket];
}
+ }
+
+ return NULL;
+}
+
+//
+// Rolling
+//
+
+RecordAccessTracker::Rolling::Rolling() {
+ _curSlice = 0;
+ _lastRotate = Listener::getElapsedTimeMillis();
+}
+
+bool RecordAccessTracker::Rolling::access(size_t region, short offset, bool doHalf) {
+ int regionHash = hash(region);
- // we weren't in any slice
- // so add to cur
- if (!_slices[_curSlice].put(regionHash, region, offset)) {
+ stdx::lock_guard<SimpleMutex> lk(_lock);
+
+ static int rarelyCount = 0;
+ if (rarelyCount++ % (2048 / BigHashSize) == 0) {
+ long long now = Listener::getElapsedTimeMillis();
+
+ if (now - _lastRotate > (1000 * RotateTimeSecs)) {
_rotate();
- _slices[_curSlice].put(regionHash, region, offset);
}
- return false;
}
- void RecordAccessTracker::Rolling::_rotate() {
- _curSlice = (_curSlice + 1) % NumSlices;
- _slices[_curSlice].reset();
- _lastRotate = Listener::getElapsedTimeMillis();
+ for (int i = 0; i < NumSlices / (doHalf ? 2 : 1); i++) {
+ int pos = (_curSlice + i) % NumSlices;
+ State s = _slices[pos].get(regionHash, region, offset);
+
+ if (s == In)
+ return true;
+
+ if (s == Out) {
+ _slices[pos].put(regionHash, region, offset);
+ return false;
+ }
+ }
+
+ // we weren't in any slice
+ // so add to cur
+ if (!_slices[_curSlice].put(regionHash, region, offset)) {
+ _rotate();
+ _slices[_curSlice].put(regionHash, region, offset);
}
+ return false;
+}
- // These need to be outside the ps namespace due to the way they are defined
+void RecordAccessTracker::Rolling::_rotate() {
+ _curSlice = (_curSlice + 1) % NumSlices;
+ _slices[_curSlice].reset();
+ _lastRotate = Listener::getElapsedTimeMillis();
+}
+
+// These need to be outside the ps namespace due to the way they are defined
#if defined(MONGO_CONFIG_HAVE___THREAD)
- __thread PointerTable::Data _pointerTableData;
- PointerTable::Data* PointerTable::getData() {
- return &_pointerTableData;
- }
+__thread PointerTable::Data _pointerTableData;
+PointerTable::Data* PointerTable::getData() {
+ return &_pointerTableData;
+}
#elif defined(MONGO_CONFIG_HAVE___DECLSPEC_THREAD)
- __declspec( thread ) PointerTable::Data _pointerTableData;
- PointerTable::Data* PointerTable::getData() {
- return &_pointerTableData;
- }
+__declspec(thread) PointerTable::Data _pointerTableData;
+PointerTable::Data* PointerTable::getData() {
+ return &_pointerTableData;
+}
#else
- TSP_DEFINE(PointerTable::Data, _pointerTableData);
- PointerTable::Data* PointerTable::getData() {
- return _pointerTableData.getMake();
- }
+TSP_DEFINE(PointerTable::Data, _pointerTableData);
+PointerTable::Data* PointerTable::getData() {
+ return _pointerTableData.getMake();
+}
#endif
- //
- // RecordAccessTracker
- //
+//
+// RecordAccessTracker
+//
- RecordAccessTracker::RecordAccessTracker()
- : _blockSupported(blockSupported) {
- reset();
- }
+RecordAccessTracker::RecordAccessTracker() : _blockSupported(blockSupported) {
+ reset();
+}
- void RecordAccessTracker::reset() {
- PointerTable::reset(PointerTable::getData());
- _rollingTable.reset(new Rolling[BigHashSize]);
- }
+void RecordAccessTracker::reset() {
+ PointerTable::reset(PointerTable::getData());
+ _rollingTable.reset(new Rolling[BigHashSize]);
+}
- void RecordAccessTracker::markAccessed(const void* record) {
- const size_t page = reinterpret_cast<size_t>(record) >> 12;
- const size_t region = page >> 6;
- const size_t offset = page & 0x3f;
+void RecordAccessTracker::markAccessed(const void* record) {
+ const size_t page = reinterpret_cast<size_t>(record) >> 12;
+ const size_t region = page >> 6;
+ const size_t offset = page & 0x3f;
- const bool seen = PointerTable::seen(PointerTable::getData(),
- reinterpret_cast<size_t>(record));
- if (!seen) {
- _rollingTable[bigHash(region)].access(region, offset , true);
- }
+ const bool seen = PointerTable::seen(PointerTable::getData(), reinterpret_cast<size_t>(record));
+ if (!seen) {
+ _rollingTable[bigHash(region)].access(region, offset, true);
}
+}
- bool RecordAccessTracker::checkAccessedAndMark(const void* record) {
- const size_t page = reinterpret_cast<size_t>(record) >> 12;
- const size_t region = page >> 6;
- const size_t offset = page & 0x3f;
-
- // This is like the "L1 cache". If we're a miss then we fall through and check the
- // "L2 cache". If we're still a miss, then we defer to a system-specific system
- // call (or give up and return false if deferring to the system call is not enabled).
- if (PointerTable::seen(PointerTable::getData(), reinterpret_cast<size_t>(record))) {
- return true;
- }
-
- // We were a miss in the PointerTable. See if we can find 'record' in the Rolling table.
- if (_rollingTable[bigHash(region)].access(region, offset, false)) {
- return true;
- }
+bool RecordAccessTracker::checkAccessedAndMark(const void* record) {
+ const size_t page = reinterpret_cast<size_t>(record) >> 12;
+ const size_t region = page >> 6;
+ const size_t offset = page & 0x3f;
- if (!_blockSupported) {
- // This means we don't fall back to a system call. Instead we assume things aren't
- // in memory. This could mean that we yield too much, but this is much better
- // than the alternative of not yielding through a page fault.
- return false;
- }
+ // This is like the "L1 cache". If we're a miss then we fall through and check the
+ // "L2 cache". If we're still a miss, then we defer to a system-specific system
+ // call (or give up and return false if deferring to the system call is not enabled).
+ if (PointerTable::seen(PointerTable::getData(), reinterpret_cast<size_t>(record))) {
+ return true;
+ }
- return ProcessInfo::blockInMemory(const_cast<void*>(record));
+ // We were a miss in the PointerTable. See if we can find 'record' in the Rolling table.
+ if (_rollingTable[bigHash(region)].access(region, offset, false)) {
+ return true;
}
- void RecordAccessTracker::disableSystemBlockInMemCheck() {
- _blockSupported = false;
+ if (!_blockSupported) {
+ // This means we don't fall back to a system call. Instead we assume things aren't
+ // in memory. This could mean that we yield too much, but this is much better
+ // than the alternative of not yielding through a page fault.
+ return false;
}
-} // namespace mongo
+ return ProcessInfo::blockInMemory(const_cast<void*>(record));
+}
+
+void RecordAccessTracker::disableSystemBlockInMemCheck() {
+ _blockSupported = false;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/record_access_tracker.h b/src/mongo/db/storage/mmap_v1/record_access_tracker.h
index aa98e22230e..a1cb7ab2187 100644
--- a/src/mongo/db/storage/mmap_v1/record_access_tracker.h
+++ b/src/mongo/db/storage/mmap_v1/record_access_tracker.h
@@ -33,127 +33,126 @@
namespace mongo {
- class MmapV1RecordHeader;
+class MmapV1RecordHeader;
+
+/**
+ * Used to implement likelyInPhysicalMemory() for the MMAP v1 storage engine. Since
+ * MMAP v1 holds exclusive collection-level locks, it should yield the locks during a
+ * page fault. The RecordAccessTracker is used to guess at which records are in memory,
+ * so that a yield can be requested unless we're sure that the record has been
+ * recently accessed.
+ */
+class RecordAccessTracker {
+ MONGO_DISALLOW_COPYING(RecordAccessTracker);
+
+public:
+ RecordAccessTracker();
+
+ enum Constants {
+ SliceSize = 1024,
+ MaxChain = 20, // intentionally very low
+ NumSlices = 10,
+ RotateTimeSecs = 90,
+ BigHashSize = 128
+ };
/**
- * Used to implement likelyInPhysicalMemory() for the MMAP v1 storage engine. Since
- * MMAP v1 holds exclusive collection-level locks, it should yield the locks during a
- * page fault. The RecordAccessTracker is used to guess at which records are in memory,
- * so that a yield can be requested unless we're sure that the record has been
- * recently accessed.
+ * Informs this record access tracker that 'record' has been accessed.
*/
- class RecordAccessTracker {
- MONGO_DISALLOW_COPYING(RecordAccessTracker);
- public:
- RecordAccessTracker();
+ void markAccessed(const void* record);
- enum Constants {
- SliceSize = 1024,
- MaxChain = 20, // intentionally very low
- NumSlices = 10,
- RotateTimeSecs = 90,
- BigHashSize = 128
- };
+ /**
+ * @return whether or not 'record' has been marked as accessed recently. A return value
+ * of true means that 'record' is likely in physical memory.
+ *
+ * Also has the side effect of marking 'record' as accessed.
+ */
+ bool checkAccessedAndMark(const void* record);
- /**
- * Informs this record access tracker that 'record' has been accessed.
- */
- void markAccessed(const void* record);
+ /**
+ * Clears out any history of record accesses.
+ */
+ void reset();
- /**
- * @return whether or not 'record' has been marked as accessed recently. A return value
- * of true means that 'record' is likely in physical memory.
- *
- * Also has the side effect of marking 'record' as accessed.
- */
- bool checkAccessedAndMark(const void* record);
+ //
+ // For testing.
+ //
+
+ /**
+ * The accessedRecently() implementation falls back to making a system call if it
+ * appears that the record is not in physical memory. Use this method to disable
+ * the fallback for testing.
+ */
+ void disableSystemBlockInMemCheck();
+
+private:
+ enum State { In, Out, Unk };
+
+ struct Entry {
+ size_t region;
+ unsigned long long value;
+ };
+
+ /**
+ * simple hash map for region -> status
+ * this constitutes a single region of time
+ * it does chaining, but very short chains
+ */
+ class Slice {
+ public:
+ Slice();
- /**
- * Clears out any history of record accesses.
- */
void reset();
- //
- // For testing.
- //
+ State get(int regionHash, size_t region, short offset);
/**
- * The accessedRecently() implementation falls back to making a system call if it
- * appears that the record is not in physical memory. Use this method to disable
- * the fallback for testing.
+ * @return true if added, false if full
*/
- void disableSystemBlockInMemCheck();
+ bool put(int regionHash, size_t region, short offset);
+
+ time_t lastReset() const;
private:
- enum State {
- In, Out, Unk
- };
+ Entry* _get(int start, size_t region, bool add);
+
+ Entry _data[SliceSize];
+ time_t _lastReset;
+ };
- struct Entry {
- size_t region;
- unsigned long long value;
- };
+ /**
+ * this contains many slices of times
+ * the idea you put mem status in the current time slice
+ * and then after a certain period of time, it rolls off so we check again
+ */
+ class Rolling {
+ public:
+ Rolling();
/**
- * simple hash map for region -> status
- * this constitutes a single region of time
- * it does chaining, but very short chains
+ * After this call, we assume the page is in RAM.
+ *
+ * @param doHalf if this is a known good access, want to put in first half.
+ *
+ * @return whether we know the page is in RAM
*/
- class Slice {
- public:
- Slice();
-
- void reset();
-
- State get(int regionHash, size_t region, short offset);
+ bool access(size_t region, short offset, bool doHalf);
- /**
- * @return true if added, false if full
- */
- bool put(int regionHash, size_t region, short offset);
+ private:
+ void _rotate();
- time_t lastReset() const;
+ int _curSlice;
+ long long _lastRotate;
+ Slice _slices[NumSlices];
- private:
- Entry* _get(int start, size_t region, bool add);
+ SimpleMutex _lock;
+ };
- Entry _data[SliceSize];
- time_t _lastReset;
- };
+ // Should this record tracker fallback to making a system call?
+ bool _blockSupported;
- /**
- * this contains many slices of times
- * the idea you put mem status in the current time slice
- * and then after a certain period of time, it rolls off so we check again
- */
- class Rolling {
- public:
- Rolling();
-
- /**
- * After this call, we assume the page is in RAM.
- *
- * @param doHalf if this is a known good access, want to put in first half.
- *
- * @return whether we know the page is in RAM
- */
- bool access(size_t region, short offset, bool doHalf);
-
- private:
- void _rotate();
-
- int _curSlice;
- long long _lastRotate;
- Slice _slices[NumSlices];
-
- SimpleMutex _lock;
- };
-
- // Should this record tracker fallback to making a system call?
- bool _blockSupported;
-
- // An array of Rolling instances for tracking record accesses.
- std::unique_ptr<Rolling[]> _rollingTable;
- };
+ // An array of Rolling instances for tracking record accesses.
+ std::unique_ptr<Rolling[]> _rollingTable;
+};
-} // namespace
+} // namespace
diff --git a/src/mongo/db/storage/mmap_v1/record_access_tracker_test.cpp b/src/mongo/db/storage/mmap_v1/record_access_tracker_test.cpp
index 92147a24c55..7cc766f2b13 100644
--- a/src/mongo/db/storage/mmap_v1/record_access_tracker_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_access_tracker_test.cpp
@@ -35,108 +35,108 @@ using namespace mongo;
namespace {
- const void* pointerOf(int data) {
- return reinterpret_cast<const void*>(data);
- }
-
- TEST(RecordAccessTrackerTest, TouchRecordTwice) {
- RecordAccessTracker tracker;
- tracker.disableSystemBlockInMemCheck();
-
- const void* record = pointerOf(0x10003);
-
- ASSERT_FALSE(tracker.checkAccessedAndMark(record));
- ASSERT_TRUE(tracker.checkAccessedAndMark(record));
- }
-
- TEST(RecordAccessTrackerTest, TouchPageTwice) {
- RecordAccessTracker tracker;
- tracker.disableSystemBlockInMemCheck();
-
- const void* firstRecord = pointerOf(0x10003);
- const void* secondRecord = pointerOf(0x10004);
-
- ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecord));
- ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecord));
- ASSERT_TRUE(tracker.checkAccessedAndMark(firstRecord));
- ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecord));
- }
-
- TEST(RecordAccessTrackerTest, TouchTwoPagesTwice) {
- RecordAccessTracker tracker;
- tracker.disableSystemBlockInMemCheck();
-
- const void* firstRecordFirstPage = pointerOf(0x11000);
- const void* secondRecordFirstPage = pointerOf(0x11100);
-
- const void* firstRecordSecondPage = pointerOf(0x12000);
- const void* secondRecordSecondPage = pointerOf(0x12100);
-
- ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecordFirstPage));
- ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecordSecondPage));
- ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecordFirstPage));
- ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecordSecondPage));
- }
-
- // Tests RecordAccessTracker::reset().
- TEST(RecordAccessTrackerTest, TouchTwoPagesTwiceWithReset) {
- RecordAccessTracker tracker;
- tracker.disableSystemBlockInMemCheck();
-
- const void* firstRecordFirstPage = pointerOf(0x11000);
- const void* secondRecordFirstPage = pointerOf(0x11100);
-
- const void* firstRecordSecondPage = pointerOf(0x12000);
- const void* secondRecordSecondPage = pointerOf(0x12100);
-
- ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecordFirstPage));
- ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecordSecondPage));
- ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecordFirstPage));
- ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecordSecondPage));
-
- // Now reset and make sure things look as though we have a fresh RecordAccessTracker.
- tracker.reset();
- ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecordFirstPage));
- ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecordSecondPage));
- ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecordFirstPage));
- ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecordSecondPage));
+const void* pointerOf(int data) {
+ return reinterpret_cast<const void*>(data);
+}
+
+TEST(RecordAccessTrackerTest, TouchRecordTwice) {
+ RecordAccessTracker tracker;
+ tracker.disableSystemBlockInMemCheck();
+
+ const void* record = pointerOf(0x10003);
+
+ ASSERT_FALSE(tracker.checkAccessedAndMark(record));
+ ASSERT_TRUE(tracker.checkAccessedAndMark(record));
+}
+
+TEST(RecordAccessTrackerTest, TouchPageTwice) {
+ RecordAccessTracker tracker;
+ tracker.disableSystemBlockInMemCheck();
+
+ const void* firstRecord = pointerOf(0x10003);
+ const void* secondRecord = pointerOf(0x10004);
+
+ ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecord));
+ ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecord));
+ ASSERT_TRUE(tracker.checkAccessedAndMark(firstRecord));
+ ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecord));
+}
+
+TEST(RecordAccessTrackerTest, TouchTwoPagesTwice) {
+ RecordAccessTracker tracker;
+ tracker.disableSystemBlockInMemCheck();
+
+ const void* firstRecordFirstPage = pointerOf(0x11000);
+ const void* secondRecordFirstPage = pointerOf(0x11100);
+
+ const void* firstRecordSecondPage = pointerOf(0x12000);
+ const void* secondRecordSecondPage = pointerOf(0x12100);
+
+ ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecordFirstPage));
+ ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecordSecondPage));
+ ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecordFirstPage));
+ ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecordSecondPage));
+}
+
+// Tests RecordAccessTracker::reset().
+TEST(RecordAccessTrackerTest, TouchTwoPagesTwiceWithReset) {
+ RecordAccessTracker tracker;
+ tracker.disableSystemBlockInMemCheck();
+
+ const void* firstRecordFirstPage = pointerOf(0x11000);
+ const void* secondRecordFirstPage = pointerOf(0x11100);
+
+ const void* firstRecordSecondPage = pointerOf(0x12000);
+ const void* secondRecordSecondPage = pointerOf(0x12100);
+
+ ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecordFirstPage));
+ ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecordSecondPage));
+ ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecordFirstPage));
+ ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecordSecondPage));
+
+ // Now reset and make sure things look as though we have a fresh RecordAccessTracker.
+ tracker.reset();
+ ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecordFirstPage));
+ ASSERT_FALSE(tracker.checkAccessedAndMark(firstRecordSecondPage));
+ ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecordFirstPage));
+ ASSERT_TRUE(tracker.checkAccessedAndMark(secondRecordSecondPage));
+}
+
+// Tests RecordAccessTracker::markAccessed().
+TEST(RecordAccessTrackerTest, AccessTest) {
+ RecordAccessTracker tracker;
+ tracker.disableSystemBlockInMemCheck();
+
+ // Mark the first page in superpage 3 as accessed.
+ const void* record = pointerOf(0x30000);
+ tracker.markAccessed(record);
+
+ // Test that all remaining addresses in the page give true when asked whether they are
+ // recently accessed.
+ for (int i = 0x30001; i < 0x31000; i++) {
+ const void* touchedPageRecord = pointerOf(i);
+ ASSERT_TRUE(tracker.checkAccessedAndMark(touchedPageRecord));
}
-
- // Tests RecordAccessTracker::markAccessed().
- TEST(RecordAccessTrackerTest, AccessTest) {
- RecordAccessTracker tracker;
- tracker.disableSystemBlockInMemCheck();
-
- // Mark the first page in superpage 3 as accessed.
- const void* record = pointerOf(0x30000);
- tracker.markAccessed(record);
-
- // Test that all remaining addresses in the page give true when asked whether they are
- // recently accessed.
- for (int i = 0x30001; i < 0x31000; i++) {
- const void* touchedPageRecord = pointerOf(i);
- ASSERT_TRUE(tracker.checkAccessedAndMark(touchedPageRecord));
- }
+}
+
+// Touch pages in 128 separate superpages, and make sure that they all are reported as
+// recently accessed.
+TEST(RecordAccessTrackerTest, Access128Superpages) {
+ RecordAccessTracker tracker;
+ tracker.disableSystemBlockInMemCheck();
+
+ // Touch the pages.
+ for (int i = 0x00000; i < 0x800000; i += 0x10000) {
+ const void* touchedPageRecord = pointerOf(i);
+ tracker.markAccessed(touchedPageRecord);
}
- // Touch pages in 128 separate superpages, and make sure that they all are reported as
- // recently accessed.
- TEST(RecordAccessTrackerTest, Access128Superpages) {
- RecordAccessTracker tracker;
- tracker.disableSystemBlockInMemCheck();
-
- // Touch the pages.
- for (int i = 0x00000; i < 0x800000; i += 0x10000) {
- const void* touchedPageRecord = pointerOf(i);
- tracker.markAccessed(touchedPageRecord);
- }
-
- // Ensure we know that the pages have all been touched.
- for (int i = 0x00000; i < 0x800000; i += 0x10000) {
- // It should be fine if there is an offset of, say, 0xA, into the page.
- const void* touchedPageRecord = pointerOf(i + 0xA);
- ASSERT_TRUE(tracker.checkAccessedAndMark(touchedPageRecord));
- }
+ // Ensure we know that the pages have all been touched.
+ for (int i = 0x00000; i < 0x800000; i += 0x10000) {
+ // It should be fine if there is an offset of, say, 0xA, into the page.
+ const void* touchedPageRecord = pointerOf(i + 0xA);
+ ASSERT_TRUE(tracker.checkAccessedAndMark(touchedPageRecord));
}
+}
} // namespace
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
index 5862a44a144..cc8cf582ffe 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
@@ -48,935 +48,920 @@
namespace mongo {
- using std::unique_ptr;
- using std::set;
- using std::string;
-
- /* Deleted list buckets are used to quickly locate free space based on size. Each bucket
- contains records up to that size (meaning a record with a size exactly equal to
- bucketSizes[n] would go into bucket n+1).
- */
- const int RecordStoreV1Base::bucketSizes[] = {
- 0x20, 0x40, 0x80, 0x100, // 32, 64, 128, 256
- 0x200, 0x400, 0x800, 0x1000, // 512, 1K, 2K, 4K
- 0x2000, 0x4000, 0x8000, 0x10000, // 8K, 16K, 32K, 64K
- 0x20000, 0x40000, 0x80000, 0x100000, // 128K, 256K, 512K, 1M
- 0x200000, 0x400000, 0x600000, 0x800000, // 2M, 4M, 6M, 8M
- 0xA00000, 0xC00000, 0xE00000, // 10M, 12M, 14M,
- MaxAllowedAllocation, // 16.5M
- MaxAllowedAllocation + 1, // Only MaxAllowedAllocation sized records go here.
- INT_MAX, // "oversized" bucket for unused parts of extents.
- };
-
- // If this fails, it means that bucketSizes doesn't have the correct number of entries.
- BOOST_STATIC_ASSERT(sizeof(RecordStoreV1Base::bucketSizes)
- / sizeof(RecordStoreV1Base::bucketSizes[0])
- == RecordStoreV1Base::Buckets);
-
- SavedCursorRegistry::~SavedCursorRegistry() {
- for (SavedCursorSet::iterator it = _cursors.begin(); it != _cursors.end(); it++) {
- (*it)->_registry = NULL; // prevent SavedCursor destructor from accessing this
- }
+using std::unique_ptr;
+using std::set;
+using std::string;
+
+/* Deleted list buckets are used to quickly locate free space based on size. Each bucket
+ contains records up to that size (meaning a record with a size exactly equal to
+ bucketSizes[n] would go into bucket n+1).
+*/
+const int RecordStoreV1Base::bucketSizes[] = {
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100, // 32, 64, 128, 256
+ 0x200,
+ 0x400,
+ 0x800,
+ 0x1000, // 512, 1K, 2K, 4K
+ 0x2000,
+ 0x4000,
+ 0x8000,
+ 0x10000, // 8K, 16K, 32K, 64K
+ 0x20000,
+ 0x40000,
+ 0x80000,
+ 0x100000, // 128K, 256K, 512K, 1M
+ 0x200000,
+ 0x400000,
+ 0x600000,
+ 0x800000, // 2M, 4M, 6M, 8M
+ 0xA00000,
+ 0xC00000,
+ 0xE00000, // 10M, 12M, 14M,
+ MaxAllowedAllocation, // 16.5M
+ MaxAllowedAllocation + 1, // Only MaxAllowedAllocation sized records go here.
+ INT_MAX, // "oversized" bucket for unused parts of extents.
+};
+
+// If this fails, it means that bucketSizes doesn't have the correct number of entries.
+BOOST_STATIC_ASSERT(sizeof(RecordStoreV1Base::bucketSizes) /
+ sizeof(RecordStoreV1Base::bucketSizes[0]) ==
+ RecordStoreV1Base::Buckets);
+
+SavedCursorRegistry::~SavedCursorRegistry() {
+ for (SavedCursorSet::iterator it = _cursors.begin(); it != _cursors.end(); it++) {
+ (*it)->_registry = NULL; // prevent SavedCursor destructor from accessing this
}
+}
- void SavedCursorRegistry::registerCursor(SavedCursor* cursor) {
- invariant(!cursor->_registry);
- cursor->_registry = this;
- scoped_spinlock lock(_mutex);
- _cursors.insert(cursor);
- }
+void SavedCursorRegistry::registerCursor(SavedCursor* cursor) {
+ invariant(!cursor->_registry);
+ cursor->_registry = this;
+ scoped_spinlock lock(_mutex);
+ _cursors.insert(cursor);
+}
- bool SavedCursorRegistry::unregisterCursor(SavedCursor* cursor) {
- if (!cursor->_registry) {
- return false;
- }
- invariant(cursor->_registry == this);
- cursor->_registry = NULL;
- scoped_spinlock lock(_mutex);
- invariant(_cursors.erase(cursor));
- return true;
- }
-
- void SavedCursorRegistry::invalidateCursorsForBucket(DiskLoc bucket) {
- // While this is not strictly necessary as an exclusive collection lock will be held,
- // it's cleaner to just make the SavedCursorRegistry thread-safe. Spinlock is OK here.
- scoped_spinlock lock(_mutex);
- for (SavedCursorSet::iterator it = _cursors.begin(); it != _cursors.end();) {
- if ((*it)->bucket == bucket) {
- (*it)->_registry = NULL; // prevent ~SavedCursor from trying to unregister
- _cursors.erase(it++);
- }
- else {
- it++;
- }
- }
+bool SavedCursorRegistry::unregisterCursor(SavedCursor* cursor) {
+ if (!cursor->_registry) {
+ return false;
}
+ invariant(cursor->_registry == this);
+ cursor->_registry = NULL;
+ scoped_spinlock lock(_mutex);
+ invariant(_cursors.erase(cursor));
+ return true;
+}
- RecordStoreV1Base::RecordStoreV1Base( StringData ns,
- RecordStoreV1MetaData* details,
- ExtentManager* em,
- bool isSystemIndexes )
- : RecordStore( ns ),
- _details( details ),
- _extentManager( em ),
- _isSystemIndexes( isSystemIndexes ) {
+void SavedCursorRegistry::invalidateCursorsForBucket(DiskLoc bucket) {
+ // While this is not strictly necessary as an exclusive collection lock will be held,
+ // it's cleaner to just make the SavedCursorRegistry thread-safe. Spinlock is OK here.
+ scoped_spinlock lock(_mutex);
+ for (SavedCursorSet::iterator it = _cursors.begin(); it != _cursors.end();) {
+ if ((*it)->bucket == bucket) {
+ (*it)->_registry = NULL; // prevent ~SavedCursor from trying to unregister
+ _cursors.erase(it++);
+ } else {
+ it++;
+ }
}
+}
- RecordStoreV1Base::~RecordStoreV1Base() {
- }
+RecordStoreV1Base::RecordStoreV1Base(StringData ns,
+ RecordStoreV1MetaData* details,
+ ExtentManager* em,
+ bool isSystemIndexes)
+ : RecordStore(ns), _details(details), _extentManager(em), _isSystemIndexes(isSystemIndexes) {}
+RecordStoreV1Base::~RecordStoreV1Base() {}
- int64_t RecordStoreV1Base::storageSize( OperationContext* txn,
- BSONObjBuilder* extraInfo,
- int level ) const {
- BSONArrayBuilder extentInfo;
- int64_t total = 0;
- int n = 0;
+int64_t RecordStoreV1Base::storageSize(OperationContext* txn,
+ BSONObjBuilder* extraInfo,
+ int level) const {
+ BSONArrayBuilder extentInfo;
- DiskLoc cur = _details->firstExtent(txn);
+ int64_t total = 0;
+ int n = 0;
- while ( !cur.isNull() ) {
- Extent* e = _extentManager->getExtent( cur );
+ DiskLoc cur = _details->firstExtent(txn);
- total += e->length;
- n++;
+ while (!cur.isNull()) {
+ Extent* e = _extentManager->getExtent(cur);
- if ( extraInfo && level > 0 ) {
- extentInfo.append( BSON( "len" << e->length << "loc: " << e->myLoc.toBSONObj() ) );
- }
- cur = e->xnext;
- }
+ total += e->length;
+ n++;
- if ( extraInfo ) {
- extraInfo->append( "numExtents", n );
- if ( level > 0 )
- extraInfo->append( "extents", extentInfo.arr() );
+ if (extraInfo && level > 0) {
+ extentInfo.append(BSON("len" << e->length << "loc: " << e->myLoc.toBSONObj()));
}
-
- return total;
+ cur = e->xnext;
}
- RecordData RecordStoreV1Base::dataFor( OperationContext* txn, const RecordId& loc ) const {
- return recordFor(DiskLoc::fromRecordId(loc))->toRecordData();
+ if (extraInfo) {
+ extraInfo->append("numExtents", n);
+ if (level > 0)
+ extraInfo->append("extents", extentInfo.arr());
}
- bool RecordStoreV1Base::findRecord( OperationContext* txn,
- const RecordId& loc, RecordData* rd ) const {
- // this is a bit odd, as the semantics of using the storage engine imply it _has_ to be.
- // And in fact we can't actually check.
- // So we assume the best.
- MmapV1RecordHeader* rec = recordFor(DiskLoc::fromRecordId(loc));
- if ( !rec ) {
- return false;
- }
- *rd = rec->toRecordData();
- return true;
- }
+ return total;
+}
- MmapV1RecordHeader* RecordStoreV1Base::recordFor( const DiskLoc& loc ) const {
- return _extentManager->recordForV1( loc );
- }
+RecordData RecordStoreV1Base::dataFor(OperationContext* txn, const RecordId& loc) const {
+ return recordFor(DiskLoc::fromRecordId(loc))->toRecordData();
+}
- const DeletedRecord* RecordStoreV1Base::deletedRecordFor( const DiskLoc& loc ) const {
- invariant( loc.a() != -1 );
- return reinterpret_cast<const DeletedRecord*>( recordFor( loc ) );
- }
+bool RecordStoreV1Base::findRecord(OperationContext* txn,
+ const RecordId& loc,
+ RecordData* rd) const {
+ // this is a bit odd, as the semantics of using the storage engine imply it _has_ to be.
+ // And in fact we can't actually check.
+ // So we assume the best.
+ MmapV1RecordHeader* rec = recordFor(DiskLoc::fromRecordId(loc));
+ if (!rec) {
+ return false;
+ }
+ *rd = rec->toRecordData();
+ return true;
+}
- DeletedRecord* RecordStoreV1Base::drec( const DiskLoc& loc ) const {
- invariant( loc.a() != -1 );
- return reinterpret_cast<DeletedRecord*>( recordFor( loc ) );
- }
+MmapV1RecordHeader* RecordStoreV1Base::recordFor(const DiskLoc& loc) const {
+ return _extentManager->recordForV1(loc);
+}
- Extent* RecordStoreV1Base::_getExtent( OperationContext* txn, const DiskLoc& loc ) const {
- return _extentManager->getExtent( loc );
- }
+const DeletedRecord* RecordStoreV1Base::deletedRecordFor(const DiskLoc& loc) const {
+ invariant(loc.a() != -1);
+ return reinterpret_cast<const DeletedRecord*>(recordFor(loc));
+}
- DiskLoc RecordStoreV1Base::_getExtentLocForRecord( OperationContext* txn, const DiskLoc& loc ) const {
- return _extentManager->extentLocForV1( loc );
- }
+DeletedRecord* RecordStoreV1Base::drec(const DiskLoc& loc) const {
+ invariant(loc.a() != -1);
+ return reinterpret_cast<DeletedRecord*>(recordFor(loc));
+}
+Extent* RecordStoreV1Base::_getExtent(OperationContext* txn, const DiskLoc& loc) const {
+ return _extentManager->getExtent(loc);
+}
- DiskLoc RecordStoreV1Base::getNextRecord( OperationContext* txn, const DiskLoc& loc ) const {
- DiskLoc next = getNextRecordInExtent( txn, loc );
- if ( !next.isNull() ) {
- return next;
- }
+DiskLoc RecordStoreV1Base::_getExtentLocForRecord(OperationContext* txn, const DiskLoc& loc) const {
+ return _extentManager->extentLocForV1(loc);
+}
- // now traverse extents
- Extent* e = _getExtent( txn, _getExtentLocForRecord(txn, loc) );
- while ( 1 ) {
- if ( e->xnext.isNull() )
- return DiskLoc(); // end of collection
- e = _getExtent( txn, e->xnext );
- if ( !e->firstRecord.isNull() )
- break;
- // entire extent could be empty, keep looking
- }
- return e->firstRecord;
+DiskLoc RecordStoreV1Base::getNextRecord(OperationContext* txn, const DiskLoc& loc) const {
+ DiskLoc next = getNextRecordInExtent(txn, loc);
+ if (!next.isNull()) {
+ return next;
}
- DiskLoc RecordStoreV1Base::getPrevRecord( OperationContext* txn, const DiskLoc& loc ) const {
- DiskLoc prev = getPrevRecordInExtent( txn, loc );
- if ( !prev.isNull() ) {
- return prev;
- }
+ // now traverse extents
- // now traverse extents
+ Extent* e = _getExtent(txn, _getExtentLocForRecord(txn, loc));
+ while (1) {
+ if (e->xnext.isNull())
+ return DiskLoc(); // end of collection
+ e = _getExtent(txn, e->xnext);
+ if (!e->firstRecord.isNull())
+ break;
+ // entire extent could be empty, keep looking
+ }
+ return e->firstRecord;
+}
- Extent *e = _getExtent(txn, _getExtentLocForRecord(txn, loc));
- while ( 1 ) {
- if ( e->xprev.isNull() )
- return DiskLoc(); // end of collection
- e = _getExtent( txn, e->xprev );
- if ( !e->firstRecord.isNull() )
- break;
- // entire extent could be empty, keep looking
- }
- return e->lastRecord;
-
- }
-
- DiskLoc RecordStoreV1Base::_findFirstSpot( OperationContext* txn,
- const DiskLoc& extDiskLoc,
- Extent* e ) {
- DiskLoc emptyLoc = extDiskLoc;
- emptyLoc.inc( Extent::HeaderSize() );
- int delRecLength = e->length - Extent::HeaderSize();
- if ( delRecLength >= 32*1024 && _ns.find('$') != string::npos && !isCapped() ) {
- // probably an index. so skip forward to keep its records page aligned
- int& ofs = emptyLoc.GETOFS();
- int newOfs = (ofs + 0xfff) & ~0xfff;
- delRecLength -= (newOfs-ofs);
- dassert( delRecLength > 0 );
- ofs = newOfs;
- }
+DiskLoc RecordStoreV1Base::getPrevRecord(OperationContext* txn, const DiskLoc& loc) const {
+ DiskLoc prev = getPrevRecordInExtent(txn, loc);
+ if (!prev.isNull()) {
+ return prev;
+ }
- DeletedRecord* empty = txn->recoveryUnit()->writing(drec(emptyLoc));
- empty->lengthWithHeaders() = delRecLength;
- empty->extentOfs() = e->myLoc.getOfs();
- empty->nextDeleted().Null();
- return emptyLoc;
+ // now traverse extents
+ Extent* e = _getExtent(txn, _getExtentLocForRecord(txn, loc));
+ while (1) {
+ if (e->xprev.isNull())
+ return DiskLoc(); // end of collection
+ e = _getExtent(txn, e->xprev);
+ if (!e->firstRecord.isNull())
+ break;
+ // entire extent could be empty, keep looking
}
+ return e->lastRecord;
+}
- DiskLoc RecordStoreV1Base::getNextRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const {
- int nextOffset = recordFor( loc )->nextOfs();
-
- if ( nextOffset == DiskLoc::NullOfs )
- return DiskLoc();
+DiskLoc RecordStoreV1Base::_findFirstSpot(OperationContext* txn,
+ const DiskLoc& extDiskLoc,
+ Extent* e) {
+ DiskLoc emptyLoc = extDiskLoc;
+ emptyLoc.inc(Extent::HeaderSize());
+ int delRecLength = e->length - Extent::HeaderSize();
+ if (delRecLength >= 32 * 1024 && _ns.find('$') != string::npos && !isCapped()) {
+ // probably an index. so skip forward to keep its records page aligned
+ int& ofs = emptyLoc.GETOFS();
+ int newOfs = (ofs + 0xfff) & ~0xfff;
+ delRecLength -= (newOfs - ofs);
+ dassert(delRecLength > 0);
+ ofs = newOfs;
+ }
+
+ DeletedRecord* empty = txn->recoveryUnit()->writing(drec(emptyLoc));
+ empty->lengthWithHeaders() = delRecLength;
+ empty->extentOfs() = e->myLoc.getOfs();
+ empty->nextDeleted().Null();
+ return emptyLoc;
+}
- fassert( 17441, abs(nextOffset) >= 8 ); // defensive
- DiskLoc result( loc.a(), nextOffset );
- return result;
- }
+DiskLoc RecordStoreV1Base::getNextRecordInExtent(OperationContext* txn, const DiskLoc& loc) const {
+ int nextOffset = recordFor(loc)->nextOfs();
- DiskLoc RecordStoreV1Base::getPrevRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const {
- int prevOffset = recordFor( loc )->prevOfs();
+ if (nextOffset == DiskLoc::NullOfs)
+ return DiskLoc();
- if ( prevOffset == DiskLoc::NullOfs )
- return DiskLoc();
+ fassert(17441, abs(nextOffset) >= 8); // defensive
+ DiskLoc result(loc.a(), nextOffset);
+ return result;
+}
- fassert( 17442, abs(prevOffset) >= 8 ); // defensive
- DiskLoc result( loc.a(), prevOffset );
- return result;
- }
+DiskLoc RecordStoreV1Base::getPrevRecordInExtent(OperationContext* txn, const DiskLoc& loc) const {
+ int prevOffset = recordFor(loc)->prevOfs();
- StatusWith<RecordId> RecordStoreV1Base::insertRecord( OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota ) {
- int docSize = doc->documentSize();
- if ( docSize < 4 ) {
- return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be >= 4 bytes");
- }
- const int lenWHdr = docSize + MmapV1RecordHeader::HeaderSize;
- if ( lenWHdr > MaxAllowedAllocation ) {
- return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be <= 16.5MB");
- }
- const int lenToAlloc = (doc->addPadding() && shouldPadInserts())
- ? quantizeAllocationSpace(lenWHdr)
- : lenWHdr;
+ if (prevOffset == DiskLoc::NullOfs)
+ return DiskLoc();
- StatusWith<DiskLoc> loc = allocRecord( txn, lenToAlloc, enforceQuota );
- if ( !loc.isOK() )
- return StatusWith<RecordId>(loc.getStatus());
+ fassert(17442, abs(prevOffset) >= 8); // defensive
+ DiskLoc result(loc.a(), prevOffset);
+ return result;
+}
- MmapV1RecordHeader *r = recordFor( loc.getValue() );
- fassert( 17319, r->lengthWithHeaders() >= lenWHdr );
+StatusWith<RecordId> RecordStoreV1Base::insertRecord(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota) {
+ int docSize = doc->documentSize();
+ if (docSize < 4) {
+ return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be >= 4 bytes");
+ }
+ const int lenWHdr = docSize + MmapV1RecordHeader::HeaderSize;
+ if (lenWHdr > MaxAllowedAllocation) {
+ return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be <= 16.5MB");
+ }
+ const int lenToAlloc =
+ (doc->addPadding() && shouldPadInserts()) ? quantizeAllocationSpace(lenWHdr) : lenWHdr;
- r = reinterpret_cast<MmapV1RecordHeader*>( txn->recoveryUnit()->writingPtr(r, lenWHdr) );
- doc->writeDocument( r->data() );
+ StatusWith<DiskLoc> loc = allocRecord(txn, lenToAlloc, enforceQuota);
+ if (!loc.isOK())
+ return StatusWith<RecordId>(loc.getStatus());
- _addRecordToRecListInExtent(txn, r, loc.getValue());
+ MmapV1RecordHeader* r = recordFor(loc.getValue());
+ fassert(17319, r->lengthWithHeaders() >= lenWHdr);
- _details->incrementStats( txn, r->netLength(), 1 );
+ r = reinterpret_cast<MmapV1RecordHeader*>(txn->recoveryUnit()->writingPtr(r, lenWHdr));
+ doc->writeDocument(r->data());
- return StatusWith<RecordId>(loc.getValue().toRecordId());
- }
+ _addRecordToRecListInExtent(txn, r, loc.getValue());
+ _details->incrementStats(txn, r->netLength(), 1);
- StatusWith<RecordId> RecordStoreV1Base::insertRecord( OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota ) {
- if ( len < 4 ) {
- return StatusWith<RecordId>( ErrorCodes::InvalidLength, "record has to be >= 4 bytes" );
- }
+ return StatusWith<RecordId>(loc.getValue().toRecordId());
+}
- if ( len + MmapV1RecordHeader::HeaderSize > MaxAllowedAllocation ) {
- return StatusWith<RecordId>( ErrorCodes::InvalidLength, "record has to be <= 16.5MB" );
- }
- return _insertRecord( txn, data, len, enforceQuota );
+StatusWith<RecordId> RecordStoreV1Base::insertRecord(OperationContext* txn,
+ const char* data,
+ int len,
+ bool enforceQuota) {
+ if (len < 4) {
+ return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be >= 4 bytes");
}
- StatusWith<RecordId> RecordStoreV1Base::_insertRecord( OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota ) {
+ if (len + MmapV1RecordHeader::HeaderSize > MaxAllowedAllocation) {
+ return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be <= 16.5MB");
+ }
- const int lenWHdr = len + MmapV1RecordHeader::HeaderSize;
- const int lenToAlloc = shouldPadInserts() ? quantizeAllocationSpace(lenWHdr)
- : lenWHdr;
- fassert( 17208, lenToAlloc >= lenWHdr );
+ return _insertRecord(txn, data, len, enforceQuota);
+}
- StatusWith<DiskLoc> loc = allocRecord( txn, lenToAlloc, enforceQuota );
- if ( !loc.isOK() )
- return StatusWith<RecordId>(loc.getStatus());
+StatusWith<RecordId> RecordStoreV1Base::_insertRecord(OperationContext* txn,
+ const char* data,
+ int len,
+ bool enforceQuota) {
+ const int lenWHdr = len + MmapV1RecordHeader::HeaderSize;
+ const int lenToAlloc = shouldPadInserts() ? quantizeAllocationSpace(lenWHdr) : lenWHdr;
+ fassert(17208, lenToAlloc >= lenWHdr);
- MmapV1RecordHeader *r = recordFor( loc.getValue() );
- fassert( 17210, r->lengthWithHeaders() >= lenWHdr );
+ StatusWith<DiskLoc> loc = allocRecord(txn, lenToAlloc, enforceQuota);
+ if (!loc.isOK())
+ return StatusWith<RecordId>(loc.getStatus());
- // copy the data
- r = reinterpret_cast<MmapV1RecordHeader*>( txn->recoveryUnit()->writingPtr(r, lenWHdr) );
- memcpy( r->data(), data, len );
+ MmapV1RecordHeader* r = recordFor(loc.getValue());
+ fassert(17210, r->lengthWithHeaders() >= lenWHdr);
- _addRecordToRecListInExtent(txn, r, loc.getValue());
+ // copy the data
+ r = reinterpret_cast<MmapV1RecordHeader*>(txn->recoveryUnit()->writingPtr(r, lenWHdr));
+ memcpy(r->data(), data, len);
- _details->incrementStats( txn, r->netLength(), 1 );
+ _addRecordToRecListInExtent(txn, r, loc.getValue());
- return StatusWith<RecordId>(loc.getValue().toRecordId());
- }
+ _details->incrementStats(txn, r->netLength(), 1);
- StatusWith<RecordId> RecordStoreV1Base::updateRecord( OperationContext* txn,
- const RecordId& oldLocation,
- const char* data,
- int dataSize,
- bool enforceQuota,
- UpdateNotifier* notifier ) {
- MmapV1RecordHeader* oldRecord = recordFor( DiskLoc::fromRecordId(oldLocation) );
- if ( oldRecord->netLength() >= dataSize ) {
- // Make sure to notify other queries before we do an in-place update.
- if ( notifier ) {
- Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace( txn,
- oldLocation );
- if ( !callbackStatus.isOK() )
- return StatusWith<RecordId>( callbackStatus );
- }
+ return StatusWith<RecordId>(loc.getValue().toRecordId());
+}
- // we fit
- memcpy( txn->recoveryUnit()->writingPtr( oldRecord->data(), dataSize ), data, dataSize );
- return StatusWith<RecordId>( oldLocation );
+StatusWith<RecordId> RecordStoreV1Base::updateRecord(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* data,
+ int dataSize,
+ bool enforceQuota,
+ UpdateNotifier* notifier) {
+ MmapV1RecordHeader* oldRecord = recordFor(DiskLoc::fromRecordId(oldLocation));
+ if (oldRecord->netLength() >= dataSize) {
+ // Make sure to notify other queries before we do an in-place update.
+ if (notifier) {
+ Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(txn, oldLocation);
+ if (!callbackStatus.isOK())
+ return StatusWith<RecordId>(callbackStatus);
}
- if ( isCapped() )
- return StatusWith<RecordId>( ErrorCodes::InternalError,
- "failing update: objects in a capped ns cannot grow",
- 10003 );
-
- // we have to move
- if ( dataSize + MmapV1RecordHeader::HeaderSize > MaxAllowedAllocation ) {
- return StatusWith<RecordId>( ErrorCodes::InvalidLength, "record has to be <= 16.5MB" );
- }
+ // we fit
+ memcpy(txn->recoveryUnit()->writingPtr(oldRecord->data(), dataSize), data, dataSize);
+ return StatusWith<RecordId>(oldLocation);
+ }
- StatusWith<RecordId> newLocation = _insertRecord( txn, data, dataSize, enforceQuota );
- if ( !newLocation.isOK() )
- return newLocation;
-
- // insert worked, so we delete old record
- if ( notifier ) {
- Status moveStatus = notifier->recordStoreGoingToMove( txn,
- oldLocation,
- oldRecord->data(),
- oldRecord->netLength() );
- if ( !moveStatus.isOK() )
- return StatusWith<RecordId>( moveStatus );
- }
+ if (isCapped())
+ return StatusWith<RecordId>(
+ ErrorCodes::InternalError, "failing update: objects in a capped ns cannot grow", 10003);
- deleteRecord( txn, oldLocation );
+ // we have to move
+ if (dataSize + MmapV1RecordHeader::HeaderSize > MaxAllowedAllocation) {
+ return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be <= 16.5MB");
+ }
+ StatusWith<RecordId> newLocation = _insertRecord(txn, data, dataSize, enforceQuota);
+ if (!newLocation.isOK())
return newLocation;
- }
- bool RecordStoreV1Base::updateWithDamagesSupported() const {
- return true;
+ // insert worked, so we delete old record
+ if (notifier) {
+ Status moveStatus = notifier->recordStoreGoingToMove(
+ txn, oldLocation, oldRecord->data(), oldRecord->netLength());
+ if (!moveStatus.isOK())
+ return StatusWith<RecordId>(moveStatus);
}
- Status RecordStoreV1Base::updateWithDamages( OperationContext* txn,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages ) {
- MmapV1RecordHeader* rec = recordFor( DiskLoc::fromRecordId(loc) );
- char* root = rec->data();
+ deleteRecord(txn, oldLocation);
- // All updates were in place. Apply them via durability and writing pointer.
- mutablebson::DamageVector::const_iterator where = damages.begin();
- const mutablebson::DamageVector::const_iterator end = damages.end();
- for( ; where != end; ++where ) {
- const char* sourcePtr = damageSource + where->sourceOffset;
- void* targetPtr = txn->recoveryUnit()->writingPtr(root + where->targetOffset, where->size);
- std::memcpy(targetPtr, sourcePtr, where->size);
- }
+ return newLocation;
+}
- return Status::OK();
- }
+bool RecordStoreV1Base::updateWithDamagesSupported() const {
+ return true;
+}
- void RecordStoreV1Base::deleteRecord( OperationContext* txn, const RecordId& rid ) {
- const DiskLoc dl = DiskLoc::fromRecordId(rid);
+Status RecordStoreV1Base::updateWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const RecordData& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages) {
+ MmapV1RecordHeader* rec = recordFor(DiskLoc::fromRecordId(loc));
+ char* root = rec->data();
- MmapV1RecordHeader* todelete = recordFor( dl );
- invariant( todelete->netLength() >= 4 ); // this is required for defensive code
+ // All updates were in place. Apply them via durability and writing pointer.
+ mutablebson::DamageVector::const_iterator where = damages.begin();
+ const mutablebson::DamageVector::const_iterator end = damages.end();
+ for (; where != end; ++where) {
+ const char* sourcePtr = damageSource + where->sourceOffset;
+ void* targetPtr = txn->recoveryUnit()->writingPtr(root + where->targetOffset, where->size);
+ std::memcpy(targetPtr, sourcePtr, where->size);
+ }
- /* remove ourself from the record next/prev chain */
- {
- if ( todelete->prevOfs() != DiskLoc::NullOfs ) {
- DiskLoc prev = getPrevRecordInExtent( txn, dl );
- MmapV1RecordHeader* prevRecord = recordFor( prev );
- txn->recoveryUnit()->writingInt( prevRecord->nextOfs() ) = todelete->nextOfs();
- }
+ return Status::OK();
+}
- if ( todelete->nextOfs() != DiskLoc::NullOfs ) {
- DiskLoc next = getNextRecord( txn, dl );
- MmapV1RecordHeader* nextRecord = recordFor( next );
- txn->recoveryUnit()->writingInt( nextRecord->prevOfs() ) = todelete->prevOfs();
- }
- }
+void RecordStoreV1Base::deleteRecord(OperationContext* txn, const RecordId& rid) {
+ const DiskLoc dl = DiskLoc::fromRecordId(rid);
- /* remove ourself from extent pointers */
- {
- DiskLoc extentLoc = todelete->myExtentLoc(dl);
- Extent *e = _getExtent( txn, extentLoc );
- if ( e->firstRecord == dl ) {
- txn->recoveryUnit()->writing(&e->firstRecord);
- if ( todelete->nextOfs() == DiskLoc::NullOfs )
- e->firstRecord.Null();
- else
- e->firstRecord.set(dl.a(), todelete->nextOfs() );
- }
- if ( e->lastRecord == dl ) {
- txn->recoveryUnit()->writing(&e->lastRecord);
- if ( todelete->prevOfs() == DiskLoc::NullOfs )
- e->lastRecord.Null();
- else
- e->lastRecord.set(dl.a(), todelete->prevOfs() );
- }
- }
+ MmapV1RecordHeader* todelete = recordFor(dl);
+ invariant(todelete->netLength() >= 4); // this is required for defensive code
- /* add to the free list */
- {
- _details->incrementStats( txn, -1 * todelete->netLength(), -1 );
-
- if ( _isSystemIndexes ) {
- /* temp: if in system.indexes, don't reuse, and zero out: we want to be
- careful until validated more, as IndexDetails has pointers
- to this disk location. so an incorrectly done remove would cause
- a lot of problems.
- */
- memset( txn->recoveryUnit()->writingPtr(todelete, todelete->lengthWithHeaders() ),
- 0, todelete->lengthWithHeaders() );
- }
- else {
- // this is defensive so we can detect if we are still using a location
- // that was deleted
- memset(txn->recoveryUnit()->writingPtr(todelete->data(), 4), 0xee, 4);
- addDeletedRec(txn, dl);
- }
+ /* remove ourself from the record next/prev chain */
+ {
+ if (todelete->prevOfs() != DiskLoc::NullOfs) {
+ DiskLoc prev = getPrevRecordInExtent(txn, dl);
+ MmapV1RecordHeader* prevRecord = recordFor(prev);
+ txn->recoveryUnit()->writingInt(prevRecord->nextOfs()) = todelete->nextOfs();
}
+ if (todelete->nextOfs() != DiskLoc::NullOfs) {
+ DiskLoc next = getNextRecord(txn, dl);
+ MmapV1RecordHeader* nextRecord = recordFor(next);
+ txn->recoveryUnit()->writingInt(nextRecord->prevOfs()) = todelete->prevOfs();
+ }
}
- std::unique_ptr<RecordCursor> RecordStoreV1Base::getCursorForRepair(
- OperationContext* txn) const {
- return stdx::make_unique<RecordStoreV1RepairCursor>(txn, this);
- }
-
- void RecordStoreV1Base::_addRecordToRecListInExtent(OperationContext* txn,
- MmapV1RecordHeader *r,
- DiskLoc loc) {
- dassert( recordFor(loc) == r );
- DiskLoc extentLoc = _getExtentLocForRecord( txn, loc );
- Extent *e = _getExtent( txn, extentLoc );
- if ( e->lastRecord.isNull() ) {
- *txn->recoveryUnit()->writing(&e->firstRecord) = loc;
- *txn->recoveryUnit()->writing(&e->lastRecord) = loc;
- r->prevOfs() = r->nextOfs() = DiskLoc::NullOfs;
+ /* remove ourself from extent pointers */
+ {
+ DiskLoc extentLoc = todelete->myExtentLoc(dl);
+ Extent* e = _getExtent(txn, extentLoc);
+ if (e->firstRecord == dl) {
+ txn->recoveryUnit()->writing(&e->firstRecord);
+ if (todelete->nextOfs() == DiskLoc::NullOfs)
+ e->firstRecord.Null();
+ else
+ e->firstRecord.set(dl.a(), todelete->nextOfs());
}
- else {
- MmapV1RecordHeader *oldlast = recordFor(e->lastRecord);
- r->prevOfs() = e->lastRecord.getOfs();
- r->nextOfs() = DiskLoc::NullOfs;
- txn->recoveryUnit()->writingInt(oldlast->nextOfs()) = loc.getOfs();
- *txn->recoveryUnit()->writing(&e->lastRecord) = loc;
+ if (e->lastRecord == dl) {
+ txn->recoveryUnit()->writing(&e->lastRecord);
+ if (todelete->prevOfs() == DiskLoc::NullOfs)
+ e->lastRecord.Null();
+ else
+ e->lastRecord.set(dl.a(), todelete->prevOfs());
}
}
- void RecordStoreV1Base::increaseStorageSize( OperationContext* txn,
- int size,
- bool enforceQuota ) {
- DiskLoc eloc = _extentManager->allocateExtent( txn,
- isCapped(),
- size,
- enforceQuota );
- Extent *e = _extentManager->getExtent( eloc );
- invariant( e );
-
- *txn->recoveryUnit()->writing( &e->nsDiagnostic ) = _ns;
-
- txn->recoveryUnit()->writing( &e->xnext )->Null();
- txn->recoveryUnit()->writing( &e->xprev )->Null();
- txn->recoveryUnit()->writing( &e->firstRecord )->Null();
- txn->recoveryUnit()->writing( &e->lastRecord )->Null();
-
- DiskLoc emptyLoc = _findFirstSpot( txn, eloc, e );
-
- if ( _details->lastExtent(txn).isNull() ) {
- invariant( _details->firstExtent(txn).isNull() );
- _details->setFirstExtent( txn, eloc );
- _details->setLastExtent( txn, eloc );
- _details->setCapExtent( txn, eloc );
- invariant( e->xprev.isNull() );
- invariant( e->xnext.isNull() );
- }
- else {
- invariant( !_details->firstExtent(txn).isNull() );
- *txn->recoveryUnit()->writing(&e->xprev) = _details->lastExtent(txn);
- *txn->recoveryUnit()->writing(&_extentManager->getExtent(_details->lastExtent(txn))->xnext) = eloc;
- _details->setLastExtent( txn, eloc );
+ /* add to the free list */
+ {
+ _details->incrementStats(txn, -1 * todelete->netLength(), -1);
+
+ if (_isSystemIndexes) {
+ /* temp: if in system.indexes, don't reuse, and zero out: we want to be
+ careful until validated more, as IndexDetails has pointers
+ to this disk location. so an incorrectly done remove would cause
+ a lot of problems.
+ */
+ memset(txn->recoveryUnit()->writingPtr(todelete, todelete->lengthWithHeaders()),
+ 0,
+ todelete->lengthWithHeaders());
+ } else {
+ // this is defensive so we can detect if we are still using a location
+ // that was deleted
+ memset(txn->recoveryUnit()->writingPtr(todelete->data(), 4), 0xee, 4);
+ addDeletedRec(txn, dl);
}
+ }
+}
- _details->setLastExtentSize( txn, e->length );
+std::unique_ptr<RecordCursor> RecordStoreV1Base::getCursorForRepair(OperationContext* txn) const {
+ return stdx::make_unique<RecordStoreV1RepairCursor>(txn, this);
+}
- addDeletedRec(txn, emptyLoc);
+void RecordStoreV1Base::_addRecordToRecListInExtent(OperationContext* txn,
+ MmapV1RecordHeader* r,
+ DiskLoc loc) {
+ dassert(recordFor(loc) == r);
+ DiskLoc extentLoc = _getExtentLocForRecord(txn, loc);
+ Extent* e = _getExtent(txn, extentLoc);
+ if (e->lastRecord.isNull()) {
+ *txn->recoveryUnit()->writing(&e->firstRecord) = loc;
+ *txn->recoveryUnit()->writing(&e->lastRecord) = loc;
+ r->prevOfs() = r->nextOfs() = DiskLoc::NullOfs;
+ } else {
+ MmapV1RecordHeader* oldlast = recordFor(e->lastRecord);
+ r->prevOfs() = e->lastRecord.getOfs();
+ r->nextOfs() = DiskLoc::NullOfs;
+ txn->recoveryUnit()->writingInt(oldlast->nextOfs()) = loc.getOfs();
+ *txn->recoveryUnit()->writing(&e->lastRecord) = loc;
}
+}
- Status RecordStoreV1Base::validate( OperationContext* txn,
- bool full, bool scanData,
- ValidateAdaptor* adaptor,
- ValidateResults* results, BSONObjBuilder* output ) {
+void RecordStoreV1Base::increaseStorageSize(OperationContext* txn, int size, bool enforceQuota) {
+ DiskLoc eloc = _extentManager->allocateExtent(txn, isCapped(), size, enforceQuota);
+ Extent* e = _extentManager->getExtent(eloc);
+ invariant(e);
- // 1) basic status that require no iteration
- // 2) extent level info
- // 3) check extent start and end
- // 4) check each non-deleted record
- // 5) check deleted list
+ *txn->recoveryUnit()->writing(&e->nsDiagnostic) = _ns;
- // -------------
+ txn->recoveryUnit()->writing(&e->xnext)->Null();
+ txn->recoveryUnit()->writing(&e->xprev)->Null();
+ txn->recoveryUnit()->writing(&e->firstRecord)->Null();
+ txn->recoveryUnit()->writing(&e->lastRecord)->Null();
- // 1111111111111111111
- if ( isCapped() ){
- output->appendBool("capped", true);
- output->appendNumber("max", _details->maxCappedDocs());
- }
+ DiskLoc emptyLoc = _findFirstSpot(txn, eloc, e);
- output->appendNumber("datasize", _details->dataSize());
- output->appendNumber("nrecords", _details->numRecords());
- output->appendNumber("lastExtentSize", _details->lastExtentSize(txn));
-
- if ( _details->firstExtent(txn).isNull() )
- output->append( "firstExtent", "null" );
- else
- output->append( "firstExtent",
- str::stream() << _details->firstExtent(txn).toString()
- << " ns:"
- << _getExtent( txn, _details->firstExtent(txn) )->nsDiagnostic.toString());
- if ( _details->lastExtent(txn).isNull() )
- output->append( "lastExtent", "null" );
- else
- output->append( "lastExtent", str::stream() << _details->lastExtent(txn).toString()
- << " ns:"
- << _getExtent( txn, _details->lastExtent(txn) )->nsDiagnostic.toString());
-
- // 22222222222222222222222222
- { // validate extent basics
- BSONArrayBuilder extentData;
- int extentCount = 0;
- DiskLoc extentDiskLoc;
- try {
- if ( !_details->firstExtent(txn).isNull() ) {
- _getExtent( txn, _details->firstExtent(txn) )->assertOk();
- _getExtent( txn, _details->lastExtent(txn) )->assertOk();
- }
+ if (_details->lastExtent(txn).isNull()) {
+ invariant(_details->firstExtent(txn).isNull());
+ _details->setFirstExtent(txn, eloc);
+ _details->setLastExtent(txn, eloc);
+ _details->setCapExtent(txn, eloc);
+ invariant(e->xprev.isNull());
+ invariant(e->xnext.isNull());
+ } else {
+ invariant(!_details->firstExtent(txn).isNull());
+ *txn->recoveryUnit()->writing(&e->xprev) = _details->lastExtent(txn);
+ *txn->recoveryUnit()->writing(
+ &_extentManager->getExtent(_details->lastExtent(txn))->xnext) = eloc;
+ _details->setLastExtent(txn, eloc);
+ }
- extentDiskLoc = _details->firstExtent(txn);
- while (!extentDiskLoc.isNull()) {
- Extent* thisExtent = _getExtent( txn, extentDiskLoc );
- if (full) {
- extentData << thisExtent->dump();
- }
- if (!thisExtent->validates(extentDiskLoc, &results->errors)) {
- results->valid = false;
- }
- DiskLoc nextDiskLoc = thisExtent->xnext;
+ _details->setLastExtentSize(txn, e->length);
- if (extentCount > 0 && !nextDiskLoc.isNull()
- && _getExtent( txn, nextDiskLoc )->xprev != extentDiskLoc) {
- StringBuilder sb;
- sb << "'xprev' pointer " << _getExtent( txn, nextDiskLoc )->xprev.toString()
- << " in extent " << nextDiskLoc.toString()
- << " does not point to extent " << extentDiskLoc.toString();
- results->errors.push_back( sb.str() );
- results->valid = false;
- }
- if (nextDiskLoc.isNull() && extentDiskLoc != _details->lastExtent(txn)) {
- StringBuilder sb;
- sb << "'lastExtent' pointer " << _details->lastExtent(txn).toString()
- << " does not point to last extent in list " << extentDiskLoc.toString();
- results->errors.push_back( sb.str() );
- results->valid = false;
- }
- extentDiskLoc = nextDiskLoc;
- extentCount++;
- txn->checkForInterrupt();
- }
- }
- catch (const DBException& e) {
- StringBuilder sb;
- sb << "exception validating extent " << extentCount
- << ": " << e.what();
- results->errors.push_back( sb.str() );
- results->valid = false;
- return Status::OK();
- }
- output->append("extentCount", extentCount);
+ addDeletedRec(txn, emptyLoc);
+}
- if ( full )
- output->appendArray( "extents" , extentData.arr() );
+Status RecordStoreV1Base::validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateAdaptor* adaptor,
+ ValidateResults* results,
+ BSONObjBuilder* output) {
+ // 1) basic status that require no iteration
+ // 2) extent level info
+ // 3) check extent start and end
+ // 4) check each non-deleted record
+ // 5) check deleted list
+
+ // -------------
+
+ // 1111111111111111111
+ if (isCapped()) {
+ output->appendBool("capped", true);
+ output->appendNumber("max", _details->maxCappedDocs());
+ }
+
+ output->appendNumber("datasize", _details->dataSize());
+ output->appendNumber("nrecords", _details->numRecords());
+ output->appendNumber("lastExtentSize", _details->lastExtentSize(txn));
+
+ if (_details->firstExtent(txn).isNull())
+ output->append("firstExtent", "null");
+ else
+ output->append("firstExtent",
+ str::stream()
+ << _details->firstExtent(txn).toString() << " ns:"
+ << _getExtent(txn, _details->firstExtent(txn))->nsDiagnostic.toString());
+ if (_details->lastExtent(txn).isNull())
+ output->append("lastExtent", "null");
+ else
+ output->append("lastExtent",
+ str::stream()
+ << _details->lastExtent(txn).toString() << " ns:"
+ << _getExtent(txn, _details->lastExtent(txn))->nsDiagnostic.toString());
+
+ // 22222222222222222222222222
+ { // validate extent basics
+ BSONArrayBuilder extentData;
+ int extentCount = 0;
+ DiskLoc extentDiskLoc;
+ try {
+ if (!_details->firstExtent(txn).isNull()) {
+ _getExtent(txn, _details->firstExtent(txn))->assertOk();
+ _getExtent(txn, _details->lastExtent(txn))->assertOk();
+ }
+ extentDiskLoc = _details->firstExtent(txn);
+ while (!extentDiskLoc.isNull()) {
+ Extent* thisExtent = _getExtent(txn, extentDiskLoc);
+ if (full) {
+ extentData << thisExtent->dump();
+ }
+ if (!thisExtent->validates(extentDiskLoc, &results->errors)) {
+ results->valid = false;
+ }
+ DiskLoc nextDiskLoc = thisExtent->xnext;
+
+ if (extentCount > 0 && !nextDiskLoc.isNull() &&
+ _getExtent(txn, nextDiskLoc)->xprev != extentDiskLoc) {
+ StringBuilder sb;
+ sb << "'xprev' pointer " << _getExtent(txn, nextDiskLoc)->xprev.toString()
+ << " in extent " << nextDiskLoc.toString() << " does not point to extent "
+ << extentDiskLoc.toString();
+ results->errors.push_back(sb.str());
+ results->valid = false;
+ }
+ if (nextDiskLoc.isNull() && extentDiskLoc != _details->lastExtent(txn)) {
+ StringBuilder sb;
+ sb << "'lastExtent' pointer " << _details->lastExtent(txn).toString()
+ << " does not point to last extent in list " << extentDiskLoc.toString();
+ results->errors.push_back(sb.str());
+ results->valid = false;
+ }
+ extentDiskLoc = nextDiskLoc;
+ extentCount++;
+ txn->checkForInterrupt();
+ }
+ } catch (const DBException& e) {
+ StringBuilder sb;
+ sb << "exception validating extent " << extentCount << ": " << e.what();
+ results->errors.push_back(sb.str());
+ results->valid = false;
+ return Status::OK();
}
+ output->append("extentCount", extentCount);
+
+ if (full)
+ output->appendArray("extents", extentData.arr());
+ }
+ try {
+ // 333333333333333333333333333
+ bool testingLastExtent = false;
try {
- // 333333333333333333333333333
- bool testingLastExtent = false;
- try {
- DiskLoc firstExtentLoc = _details->firstExtent(txn);
- if (firstExtentLoc.isNull()) {
- // this is ok
+ DiskLoc firstExtentLoc = _details->firstExtent(txn);
+ if (firstExtentLoc.isNull()) {
+ // this is ok
+ } else {
+ output->append("firstExtentDetails", _getExtent(txn, firstExtentLoc)->dump());
+ if (!_getExtent(txn, firstExtentLoc)->xprev.isNull()) {
+ StringBuilder sb;
+ sb << "'xprev' pointer in 'firstExtent' "
+ << _details->firstExtent(txn).toString() << " is "
+ << _getExtent(txn, firstExtentLoc)->xprev.toString() << ", should be null";
+ results->errors.push_back(sb.str());
+ results->valid = false;
}
- else {
- output->append("firstExtentDetails", _getExtent(txn, firstExtentLoc)->dump());
- if (!_getExtent(txn, firstExtentLoc)->xprev.isNull()) {
+ }
+ testingLastExtent = true;
+ DiskLoc lastExtentLoc = _details->lastExtent(txn);
+ if (lastExtentLoc.isNull()) {
+ // this is ok
+ } else {
+ if (firstExtentLoc != lastExtentLoc) {
+ output->append("lastExtentDetails", _getExtent(txn, lastExtentLoc)->dump());
+ if (!_getExtent(txn, lastExtentLoc)->xnext.isNull()) {
StringBuilder sb;
- sb << "'xprev' pointer in 'firstExtent' " << _details->firstExtent(txn).toString()
- << " is " << _getExtent(txn, firstExtentLoc)->xprev.toString()
+ sb << "'xnext' pointer in 'lastExtent' " << lastExtentLoc.toString()
+ << " is " << _getExtent(txn, lastExtentLoc)->xnext.toString()
<< ", should be null";
- results->errors.push_back( sb.str() );
+ results->errors.push_back(sb.str());
results->valid = false;
}
}
- testingLastExtent = true;
- DiskLoc lastExtentLoc = _details->lastExtent(txn);
- if (lastExtentLoc.isNull()) {
- // this is ok
- }
- else {
- if (firstExtentLoc != lastExtentLoc) {
- output->append("lastExtentDetails", _getExtent(txn, lastExtentLoc)->dump());
- if (!_getExtent(txn, lastExtentLoc)->xnext.isNull()) {
- StringBuilder sb;
- sb << "'xnext' pointer in 'lastExtent' " << lastExtentLoc.toString()
- << " is " << _getExtent(txn, lastExtentLoc)->xnext.toString()
- << ", should be null";
- results->errors.push_back( sb.str() );
- results->valid = false;
- }
- }
- }
- }
- catch (const DBException& e) {
- StringBuilder sb;
- sb << "exception processing '"
- << (testingLastExtent ? "lastExtent" : "firstExtent")
- << "': " << e.what();
- results->errors.push_back( sb.str() );
- results->valid = false;
}
+ } catch (const DBException& e) {
+ StringBuilder sb;
+ sb << "exception processing '" << (testingLastExtent ? "lastExtent" : "firstExtent")
+ << "': " << e.what();
+ results->errors.push_back(sb.str());
+ results->valid = false;
+ }
- // 4444444444444444444444444
-
- set<DiskLoc> recs;
- if( scanData ) {
- int n = 0;
- int nInvalid = 0;
- long long nQuantizedSize = 0;
- long long len = 0;
- long long nlen = 0;
- long long bsonLen = 0;
- int outOfOrder = 0;
- DiskLoc dl_last;
-
- auto cursor = getCursor(txn);
- while (auto record = cursor->next()) {
- const auto dl = DiskLoc::fromRecordId(record->id);
- n++;
-
- if ( n < 1000000 )
- recs.insert(dl);
- if ( isCapped() ) {
- if ( dl < dl_last )
- outOfOrder++;
- dl_last = dl;
- }
-
- MmapV1RecordHeader *r = recordFor(dl);
- len += r->lengthWithHeaders();
- nlen += r->netLength();
+ // 4444444444444444444444444
+
+ set<DiskLoc> recs;
+ if (scanData) {
+ int n = 0;
+ int nInvalid = 0;
+ long long nQuantizedSize = 0;
+ long long len = 0;
+ long long nlen = 0;
+ long long bsonLen = 0;
+ int outOfOrder = 0;
+ DiskLoc dl_last;
+
+ auto cursor = getCursor(txn);
+ while (auto record = cursor->next()) {
+ const auto dl = DiskLoc::fromRecordId(record->id);
+ n++;
+
+ if (n < 1000000)
+ recs.insert(dl);
+ if (isCapped()) {
+ if (dl < dl_last)
+ outOfOrder++;
+ dl_last = dl;
+ }
- if ( isQuantized( r->lengthWithHeaders() ) ) {
- // Count the number of records having a size consistent with
- // the quantizeAllocationSpace quantization implementation.
- ++nQuantizedSize;
- }
+ MmapV1RecordHeader* r = recordFor(dl);
+ len += r->lengthWithHeaders();
+ nlen += r->netLength();
- if (full){
- size_t dataSize = 0;
- const Status status = adaptor->validate( r->toRecordData(), &dataSize );
- if (!status.isOK()) {
- results->valid = false;
- if (nInvalid == 0) // only log once;
- results->errors.push_back( "invalid object detected (see logs)" );
-
- nInvalid++;
- log() << "Invalid object detected in " << _ns
- << ": " << status.reason();
- }
- else {
- bsonLen += dataSize;
- }
- }
+ if (isQuantized(r->lengthWithHeaders())) {
+ // Count the number of records having a size consistent with
+ // the quantizeAllocationSpace quantization implementation.
+ ++nQuantizedSize;
}
- if ( isCapped() && !_details->capLooped() ) {
- output->append("cappedOutOfOrder", outOfOrder);
- if ( outOfOrder > 1 ) {
+ if (full) {
+ size_t dataSize = 0;
+ const Status status = adaptor->validate(r->toRecordData(), &dataSize);
+ if (!status.isOK()) {
results->valid = false;
- results->errors.push_back( "too many out of order records" );
+ if (nInvalid == 0) // only log once;
+ results->errors.push_back("invalid object detected (see logs)");
+
+ nInvalid++;
+ log() << "Invalid object detected in " << _ns << ": " << status.reason();
+ } else {
+ bsonLen += dataSize;
}
}
- output->append("objectsFound", n);
+ }
- if (full) {
- output->append("invalidObjects", nInvalid);
+ if (isCapped() && !_details->capLooped()) {
+ output->append("cappedOutOfOrder", outOfOrder);
+ if (outOfOrder > 1) {
+ results->valid = false;
+ results->errors.push_back("too many out of order records");
}
+ }
+ output->append("objectsFound", n);
- output->appendNumber("nQuantizedSize", nQuantizedSize);
- output->appendNumber("bytesWithHeaders", len);
- output->appendNumber("bytesWithoutHeaders", nlen);
+ if (full) {
+ output->append("invalidObjects", nInvalid);
+ }
- if (full) {
- output->appendNumber("bytesBson", bsonLen);
- }
- } // end scanData
+ output->appendNumber("nQuantizedSize", nQuantizedSize);
+ output->appendNumber("bytesWithHeaders", len);
+ output->appendNumber("bytesWithoutHeaders", nlen);
- // 55555555555555555555555555
- BSONArrayBuilder deletedListArray;
- for ( int i = 0; i < Buckets; i++ ) {
- deletedListArray << _details->deletedListEntry(i).isNull();
+ if (full) {
+ output->appendNumber("bytesBson", bsonLen);
}
+ } // end scanData
+
+ // 55555555555555555555555555
+ BSONArrayBuilder deletedListArray;
+ for (int i = 0; i < Buckets; i++) {
+ deletedListArray << _details->deletedListEntry(i).isNull();
+ }
- int ndel = 0;
- long long delSize = 0;
- BSONArrayBuilder delBucketSizes;
- int incorrect = 0;
- for ( int i = 0; i < Buckets; i++ ) {
- DiskLoc loc = _details->deletedListEntry(i);
- try {
- int k = 0;
- while ( !loc.isNull() ) {
- if ( recs.count(loc) )
- incorrect++;
- ndel++;
-
- if ( loc.questionable() ) {
- if( isCapped() && !loc.isValid() && i == 1 ) {
- /* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
- see comments in namespace.h
- */
- break;
- }
-
- string err( str::stream() << "bad pointer in deleted record list: "
- << loc.toString()
- << " bucket: " << i
- << " k: " << k );
- results->errors.push_back( err );
- results->valid = false;
+ int ndel = 0;
+ long long delSize = 0;
+ BSONArrayBuilder delBucketSizes;
+ int incorrect = 0;
+ for (int i = 0; i < Buckets; i++) {
+ DiskLoc loc = _details->deletedListEntry(i);
+ try {
+ int k = 0;
+ while (!loc.isNull()) {
+ if (recs.count(loc))
+ incorrect++;
+ ndel++;
+
+ if (loc.questionable()) {
+ if (isCapped() && !loc.isValid() && i == 1) {
+ /* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
+ see comments in namespace.h
+ */
break;
}
- const DeletedRecord* d = deletedRecordFor(loc);
- delSize += d->lengthWithHeaders();
- loc = d->nextDeleted();
- k++;
- txn->checkForInterrupt();
+ string err(str::stream()
+ << "bad pointer in deleted record list: " << loc.toString()
+ << " bucket: " << i << " k: " << k);
+ results->errors.push_back(err);
+ results->valid = false;
+ break;
}
- delBucketSizes << k;
- }
- catch (...) {
- results->errors.push_back( (string)"exception in deleted chain for bucket " +
- BSONObjBuilder::numStr(i) );
- results->valid = false;
- }
- }
- output->appendNumber("deletedCount", ndel);
- output->appendNumber("deletedSize", delSize);
- if ( full ) {
- output->append( "delBucketSizes", delBucketSizes.arr() );
- }
- if ( incorrect ) {
- results->errors.push_back( BSONObjBuilder::numStr(incorrect) +
- " records from datafile are in deleted list" );
+ const DeletedRecord* d = deletedRecordFor(loc);
+ delSize += d->lengthWithHeaders();
+ loc = d->nextDeleted();
+ k++;
+ txn->checkForInterrupt();
+ }
+ delBucketSizes << k;
+ } catch (...) {
+ results->errors.push_back((string) "exception in deleted chain for bucket " +
+ BSONObjBuilder::numStr(i));
results->valid = false;
}
-
}
- catch (AssertionException) {
- results->errors.push_back( "exception during validate" );
- results->valid = false;
+ output->appendNumber("deletedCount", ndel);
+ output->appendNumber("deletedSize", delSize);
+ if (full) {
+ output->append("delBucketSizes", delBucketSizes.arr());
}
- return Status::OK();
- }
-
- void RecordStoreV1Base::appendCustomStats( OperationContext* txn,
- BSONObjBuilder* result,
- double scale ) const {
- result->append( "lastExtentSize", _details->lastExtentSize(txn) / scale );
- result->append( "paddingFactor", 1.0 ); // hard coded
- result->append( "paddingFactorNote", "paddingFactor is unused and unmaintained in 3.0. It "
- "remains hard coded to 1.0 for compatibility only." );
- result->append( "userFlags", _details->userFlags() );
- result->appendBool( "capped", isCapped() );
- if ( isCapped() ) {
- result->appendNumber( "max", _details->maxCappedDocs() );
- result->appendNumber( "maxSize", static_cast<long long>(storageSize(txn, NULL, 0) /
- scale) );
+ if (incorrect) {
+ results->errors.push_back(BSONObjBuilder::numStr(incorrect) +
+ " records from datafile are in deleted list");
+ results->valid = false;
}
+
+ } catch (AssertionException) {
+ results->errors.push_back("exception during validate");
+ results->valid = false;
}
+ return Status::OK();
+}
- namespace {
- struct touch_location {
- const char* root;
- size_t length;
- };
+void RecordStoreV1Base::appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale) const {
+ result->append("lastExtentSize", _details->lastExtentSize(txn) / scale);
+ result->append("paddingFactor", 1.0); // hard coded
+ result->append("paddingFactorNote",
+ "paddingFactor is unused and unmaintained in 3.0. It "
+ "remains hard coded to 1.0 for compatibility only.");
+ result->append("userFlags", _details->userFlags());
+ result->appendBool("capped", isCapped());
+ if (isCapped()) {
+ result->appendNumber("max", _details->maxCappedDocs());
+ result->appendNumber("maxSize", static_cast<long long>(storageSize(txn, NULL, 0) / scale));
}
+}
- Status RecordStoreV1Base::touch( OperationContext* txn, BSONObjBuilder* output ) const {
- Timer t;
- std::vector<touch_location> ranges;
- {
- DiskLoc nextLoc = _details->firstExtent(txn);
- Extent* ext = nextLoc.isNull() ? NULL : _getExtent( txn, nextLoc );
- while ( ext ) {
- touch_location tl;
- tl.root = reinterpret_cast<const char*>(ext);
- tl.length = ext->length;
- ranges.push_back(tl);
+namespace {
+struct touch_location {
+ const char* root;
+ size_t length;
+};
+}
- nextLoc = ext->xnext;
- if ( nextLoc.isNull() )
- ext = NULL;
- else
- ext = _getExtent( txn, nextLoc );
- }
- }
+Status RecordStoreV1Base::touch(OperationContext* txn, BSONObjBuilder* output) const {
+ Timer t;
- std::string progress_msg = "touch " + std::string(txn->getNS()) + " extents";
- stdx::unique_lock<Client> lk(*txn->getClient());
- ProgressMeterHolder pm(*txn->setMessage_inlock(progress_msg.c_str(),
- "Touch Progress",
- ranges.size()));
- lk.unlock();
-
- for ( std::vector<touch_location>::iterator it = ranges.begin(); it != ranges.end(); ++it ) {
- touch_pages( it->root, it->length );
- pm.hit();
- txn->checkForInterrupt();
- }
- pm.finished();
+ std::vector<touch_location> ranges;
+ {
+ DiskLoc nextLoc = _details->firstExtent(txn);
+ Extent* ext = nextLoc.isNull() ? NULL : _getExtent(txn, nextLoc);
+ while (ext) {
+ touch_location tl;
+ tl.root = reinterpret_cast<const char*>(ext);
+ tl.length = ext->length;
+ ranges.push_back(tl);
- if ( output ) {
- output->append( "numRanges", static_cast<int>( ranges.size() ) );
- output->append( "millis", t.millis() );
+ nextLoc = ext->xnext;
+ if (nextLoc.isNull())
+ ext = NULL;
+ else
+ ext = _getExtent(txn, nextLoc);
}
-
- return Status::OK();
}
- boost::optional<Record> RecordStoreV1Base::IntraExtentIterator::next() {
- if (_curr.isNull()) return {};
- auto out = _curr.toRecordId();
- advance();
- return {{out, _rs->dataFor(_txn, out)}};
+ std::string progress_msg = "touch " + std::string(txn->getNS()) + " extents";
+ stdx::unique_lock<Client> lk(*txn->getClient());
+ ProgressMeterHolder pm(
+ *txn->setMessage_inlock(progress_msg.c_str(), "Touch Progress", ranges.size()));
+ lk.unlock();
+
+ for (std::vector<touch_location>::iterator it = ranges.begin(); it != ranges.end(); ++it) {
+ touch_pages(it->root, it->length);
+ pm.hit();
+ txn->checkForInterrupt();
}
+ pm.finished();
- boost::optional<Record> RecordStoreV1Base::IntraExtentIterator::seekExact(const RecordId& id) {
- invariant(!"seekExact not supported");
+ if (output) {
+ output->append("numRanges", static_cast<int>(ranges.size()));
+ output->append("millis", t.millis());
}
- void RecordStoreV1Base::IntraExtentIterator::advance() {
- if (_curr.isNull())
- return;
+ return Status::OK();
+}
- const MmapV1RecordHeader* rec = recordFor(_curr);
- const int nextOfs = _forward ? rec->nextOfs() : rec->prevOfs();
- _curr = (nextOfs == DiskLoc::NullOfs ? DiskLoc() : DiskLoc(_curr.a(), nextOfs));
- }
+boost::optional<Record> RecordStoreV1Base::IntraExtentIterator::next() {
+ if (_curr.isNull())
+ return {};
+ auto out = _curr.toRecordId();
+ advance();
+ return {{out, _rs->dataFor(_txn, out)}};
+}
- void RecordStoreV1Base::IntraExtentIterator::invalidate(const RecordId& rid) {
- if (rid == _curr.toRecordId()) {
- advance();
- }
- }
+boost::optional<Record> RecordStoreV1Base::IntraExtentIterator::seekExact(const RecordId& id) {
+ invariant(!"seekExact not supported");
+}
+
+void RecordStoreV1Base::IntraExtentIterator::advance() {
+ if (_curr.isNull())
+ return;
- std::unique_ptr<RecordFetcher> RecordStoreV1Base::IntraExtentIterator::fetcherForNext() const {
- return _rs->_extentManager->recordNeedsFetch(_curr);
+ const MmapV1RecordHeader* rec = recordFor(_curr);
+ const int nextOfs = _forward ? rec->nextOfs() : rec->prevOfs();
+ _curr = (nextOfs == DiskLoc::NullOfs ? DiskLoc() : DiskLoc(_curr.a(), nextOfs));
+}
+
+void RecordStoreV1Base::IntraExtentIterator::invalidate(const RecordId& rid) {
+ if (rid == _curr.toRecordId()) {
+ advance();
}
+}
- int RecordStoreV1Base::quantizeAllocationSpace(int allocSize) {
- invariant(allocSize <= MaxAllowedAllocation);
- for ( int i = 0; i < Buckets - 2; i++ ) { // last two bucketSizes are invalid
- if ( bucketSizes[i] >= allocSize ) {
- // Return the size of the first bucket sized >= the requested size.
- return bucketSizes[i];
- }
+std::unique_ptr<RecordFetcher> RecordStoreV1Base::IntraExtentIterator::fetcherForNext() const {
+ return _rs->_extentManager->recordNeedsFetch(_curr);
+}
+
+int RecordStoreV1Base::quantizeAllocationSpace(int allocSize) {
+ invariant(allocSize <= MaxAllowedAllocation);
+ for (int i = 0; i < Buckets - 2; i++) { // last two bucketSizes are invalid
+ if (bucketSizes[i] >= allocSize) {
+ // Return the size of the first bucket sized >= the requested size.
+ return bucketSizes[i];
}
- invariant(false); // prior invariant means we should find something.
}
+ invariant(false); // prior invariant means we should find something.
+}
- bool RecordStoreV1Base::isQuantized(int recordSize) {
- if (recordSize > MaxAllowedAllocation)
- return false;
+bool RecordStoreV1Base::isQuantized(int recordSize) {
+ if (recordSize > MaxAllowedAllocation)
+ return false;
- return recordSize == quantizeAllocationSpace(recordSize);
- }
+ return recordSize == quantizeAllocationSpace(recordSize);
+}
- int RecordStoreV1Base::bucket(int size) {
- for ( int i = 0; i < Buckets; i++ ) {
- if ( bucketSizes[i] > size ) {
- // Return the first bucket sized _larger_ than the requested size. This is important
- // since we want all records in a bucket to be >= the quantized size, therefore the
- // quantized size must be the smallest allowed record per bucket.
- return i;
- }
+int RecordStoreV1Base::bucket(int size) {
+ for (int i = 0; i < Buckets; i++) {
+ if (bucketSizes[i] > size) {
+ // Return the first bucket sized _larger_ than the requested size. This is important
+ // since we want all records in a bucket to be >= the quantized size, therefore the
+ // quantized size must be the smallest allowed record per bucket.
+ return i;
}
- // Technically, this is reachable if size == INT_MAX, but it would be an error to pass that
- // in anyway since it would be impossible to have a record that large given the file and
- // extent headers.
- invariant(false);
}
+ // Technically, this is reachable if size == INT_MAX, but it would be an error to pass that
+ // in anyway since it would be impossible to have a record that large given the file and
+ // extent headers.
+ invariant(false);
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
index 4e1aa8de338..5c0437cce56 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
@@ -38,312 +38,319 @@
namespace mongo {
- class DeletedRecord;
- class DocWriter;
- class ExtentManager;
- class MmapV1RecordHeader;
- class OperationContext;
+class DeletedRecord;
+class DocWriter;
+class ExtentManager;
+class MmapV1RecordHeader;
+class OperationContext;
- struct Extent;
+struct Extent;
- class RecordStoreV1MetaData {
- public:
- virtual ~RecordStoreV1MetaData(){}
+class RecordStoreV1MetaData {
+public:
+ virtual ~RecordStoreV1MetaData() {}
- virtual const DiskLoc& capExtent() const = 0;
- virtual void setCapExtent( OperationContext* txn, const DiskLoc& loc ) = 0;
+ virtual const DiskLoc& capExtent() const = 0;
+ virtual void setCapExtent(OperationContext* txn, const DiskLoc& loc) = 0;
- virtual const DiskLoc& capFirstNewRecord() const = 0;
- virtual void setCapFirstNewRecord( OperationContext* txn, const DiskLoc& loc ) = 0;
+ virtual const DiskLoc& capFirstNewRecord() const = 0;
+ virtual void setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc) = 0;
- bool capLooped() const { return capFirstNewRecord().isValid(); }
+ bool capLooped() const {
+ return capFirstNewRecord().isValid();
+ }
- virtual long long dataSize() const = 0;
- virtual long long numRecords() const = 0;
+ virtual long long dataSize() const = 0;
+ virtual long long numRecords() const = 0;
- virtual void incrementStats( OperationContext* txn,
- long long dataSizeIncrement,
- long long numRecordsIncrement ) = 0;
+ virtual void incrementStats(OperationContext* txn,
+ long long dataSizeIncrement,
+ long long numRecordsIncrement) = 0;
- virtual void setStats( OperationContext* txn,
- long long dataSize,
- long long numRecords ) = 0;
+ virtual void setStats(OperationContext* txn, long long dataSize, long long numRecords) = 0;
- virtual DiskLoc deletedListEntry( int bucket ) const = 0;
- virtual void setDeletedListEntry( OperationContext* txn,
- int bucket,
- const DiskLoc& loc ) = 0;
+ virtual DiskLoc deletedListEntry(int bucket) const = 0;
+ virtual void setDeletedListEntry(OperationContext* txn, int bucket, const DiskLoc& loc) = 0;
- virtual DiskLoc deletedListLegacyGrabBag() const = 0;
- virtual void setDeletedListLegacyGrabBag(OperationContext* txn, const DiskLoc& loc) = 0;
+ virtual DiskLoc deletedListLegacyGrabBag() const = 0;
+ virtual void setDeletedListLegacyGrabBag(OperationContext* txn, const DiskLoc& loc) = 0;
- virtual void orphanDeletedList(OperationContext* txn) = 0;
+ virtual void orphanDeletedList(OperationContext* txn) = 0;
- virtual const DiskLoc& firstExtent( OperationContext* txn ) const = 0;
- virtual void setFirstExtent( OperationContext* txn, const DiskLoc& loc ) = 0;
+ virtual const DiskLoc& firstExtent(OperationContext* txn) const = 0;
+ virtual void setFirstExtent(OperationContext* txn, const DiskLoc& loc) = 0;
- virtual const DiskLoc& lastExtent( OperationContext* txn ) const = 0;
- virtual void setLastExtent( OperationContext* txn, const DiskLoc& loc ) = 0;
+ virtual const DiskLoc& lastExtent(OperationContext* txn) const = 0;
+ virtual void setLastExtent(OperationContext* txn, const DiskLoc& loc) = 0;
- virtual bool isCapped() const = 0;
+ virtual bool isCapped() const = 0;
- virtual bool isUserFlagSet( int flag ) const = 0;
- virtual int userFlags() const = 0;
- virtual bool setUserFlag( OperationContext* txn, int flag ) = 0;
- virtual bool clearUserFlag( OperationContext* txn, int flag ) = 0;
- virtual bool replaceUserFlags( OperationContext* txn, int flags ) = 0;
+ virtual bool isUserFlagSet(int flag) const = 0;
+ virtual int userFlags() const = 0;
+ virtual bool setUserFlag(OperationContext* txn, int flag) = 0;
+ virtual bool clearUserFlag(OperationContext* txn, int flag) = 0;
+ virtual bool replaceUserFlags(OperationContext* txn, int flags) = 0;
- virtual int lastExtentSize( OperationContext* txn) const = 0;
- virtual void setLastExtentSize( OperationContext* txn, int newMax ) = 0;
+ virtual int lastExtentSize(OperationContext* txn) const = 0;
+ virtual void setLastExtentSize(OperationContext* txn, int newMax) = 0;
- virtual long long maxCappedDocs() const = 0;
-
- };
+ virtual long long maxCappedDocs() const = 0;
+};
+/**
+ * Class that stores active cursors that have been saved (as part of yielding) to
+ * allow them to be invalidated if the thing they pointed at goes away. The registry is
+ * thread-safe, as readers may concurrently register and remove their cursors. Contention is
+ * expected to be very low, as yielding is infrequent. This logically belongs to the
+ * RecordStore, but is not contained in it to facilitate unit testing.
+ */
+class SavedCursorRegistry {
+public:
/**
- * Class that stores active cursors that have been saved (as part of yielding) to
- * allow them to be invalidated if the thing they pointed at goes away. The registry is
- * thread-safe, as readers may concurrently register and remove their cursors. Contention is
- * expected to be very low, as yielding is infrequent. This logically belongs to the
- * RecordStore, but is not contained in it to facilitate unit testing.
+ * The destructor ensures the cursor is unregistered when an exception is thrown.
+ * Note that the SavedCursor may outlive the registry it was saved in.
*/
- class SavedCursorRegistry {
- public:
- /**
- * The destructor ensures the cursor is unregistered when an exception is thrown.
- * Note that the SavedCursor may outlive the registry it was saved in.
- */
- struct SavedCursor {
- SavedCursor() : _registry(NULL) { }
- virtual ~SavedCursor() { if (_registry) _registry->unregisterCursor(this); }
- DiskLoc bucket;
- BSONObj key;
- DiskLoc loc;
-
- private:
- friend class SavedCursorRegistry;
- // Non-null iff registered. Accessed by owner or writer with MODE_X collection lock
- SavedCursorRegistry* _registry;
- };
-
- ~SavedCursorRegistry();
-
- /**
- * Adds given saved cursor to SavedCursorRegistry. Doesn't take ownership.
- */
- void registerCursor(SavedCursor* cursor);
-
- /**
- * Removes given saved cursor. Returns true if the cursor was still present, and false
- * if it had already been removed due to invalidation. Doesn't take ownership.
- */
- bool unregisterCursor(SavedCursor* cursor);
-
- /**
- * When a btree-bucket disappears due to merge/split or similar, this invalidates all
- * cursors that point at the same bucket by removing them from the registry.
- */
- void invalidateCursorsForBucket(DiskLoc bucket);
+ struct SavedCursor {
+ SavedCursor() : _registry(NULL) {}
+ virtual ~SavedCursor() {
+ if (_registry)
+ _registry->unregisterCursor(this);
+ }
+ DiskLoc bucket;
+ BSONObj key;
+ DiskLoc loc;
private:
- SpinLock _mutex;
- typedef unordered_set<SavedCursor *> SavedCursorSet; // SavedCursor pointers not owned here
- SavedCursorSet _cursors;
+ friend class SavedCursorRegistry;
+ // Non-null iff registered. Accessed by owner or writer with MODE_X collection lock
+ SavedCursorRegistry* _registry;
};
- class RecordStoreV1Base : public RecordStore {
- public:
-
- static const int Buckets = 26;
- static const int MaxAllowedAllocation = 16*1024*1024 + 512*1024;
+ ~SavedCursorRegistry();
- static const int bucketSizes[];
+ /**
+ * Adds given saved cursor to SavedCursorRegistry. Doesn't take ownership.
+ */
+ void registerCursor(SavedCursor* cursor);
- // ------------
+ /**
+ * Removes given saved cursor. Returns true if the cursor was still present, and false
+ * if it had already been removed due to invalidation. Doesn't take ownership.
+ */
+ bool unregisterCursor(SavedCursor* cursor);
- class IntraExtentIterator;
+ /**
+ * When a btree-bucket disappears due to merge/split or similar, this invalidates all
+ * cursors that point at the same bucket by removing them from the registry.
+ */
+ void invalidateCursorsForBucket(DiskLoc bucket);
- /**
- * @param details - takes ownership
- * @param em - does NOT take ownership
- */
- RecordStoreV1Base(StringData ns,
- RecordStoreV1MetaData* details,
- ExtentManager* em,
- bool isSystemIndexes);
+private:
+ SpinLock _mutex;
+ typedef unordered_set<SavedCursor*> SavedCursorSet; // SavedCursor pointers not owned here
+ SavedCursorSet _cursors;
+};
- virtual ~RecordStoreV1Base();
+class RecordStoreV1Base : public RecordStore {
+public:
+ static const int Buckets = 26;
+ static const int MaxAllowedAllocation = 16 * 1024 * 1024 + 512 * 1024;
- virtual long long dataSize( OperationContext* txn ) const { return _details->dataSize(); }
- virtual long long numRecords( OperationContext* txn ) const { return _details->numRecords(); }
+ static const int bucketSizes[];
- virtual int64_t storageSize( OperationContext* txn,
- BSONObjBuilder* extraInfo = NULL,
- int level = 0 ) const;
+ // ------------
- virtual RecordData dataFor( OperationContext* txn, const RecordId& loc ) const;
+ class IntraExtentIterator;
- virtual bool findRecord( OperationContext* txn, const RecordId& loc, RecordData* rd ) const;
+ /**
+ * @param details - takes ownership
+ * @param em - does NOT take ownership
+ */
+ RecordStoreV1Base(StringData ns,
+ RecordStoreV1MetaData* details,
+ ExtentManager* em,
+ bool isSystemIndexes);
- void deleteRecord( OperationContext* txn,
- const RecordId& dl );
+ virtual ~RecordStoreV1Base();
- StatusWith<RecordId> insertRecord( OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota );
+ virtual long long dataSize(OperationContext* txn) const {
+ return _details->dataSize();
+ }
+ virtual long long numRecords(OperationContext* txn) const {
+ return _details->numRecords();
+ }
- StatusWith<RecordId> insertRecord( OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota );
+ virtual int64_t storageSize(OperationContext* txn,
+ BSONObjBuilder* extraInfo = NULL,
+ int level = 0) const;
- virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
- const RecordId& oldLocation,
- const char* data,
- int len,
- bool enforceQuota,
- UpdateNotifier* notifier );
+ virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const;
- virtual bool updateWithDamagesSupported() const;
+ virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* rd) const;
- virtual Status updateWithDamages( OperationContext* txn,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages );
+ void deleteRecord(OperationContext* txn, const RecordId& dl);
- virtual std::unique_ptr<RecordCursor> getCursorForRepair( OperationContext* txn ) const;
+ StatusWith<RecordId> insertRecord(OperationContext* txn,
+ const char* data,
+ int len,
+ bool enforceQuota);
- void increaseStorageSize( OperationContext* txn, int size, bool enforceQuota );
+ StatusWith<RecordId> insertRecord(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota);
- virtual Status validate( OperationContext* txn,
- bool full, bool scanData,
- ValidateAdaptor* adaptor,
- ValidateResults* results, BSONObjBuilder* output );
+ virtual StatusWith<RecordId> updateRecord(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* data,
+ int len,
+ bool enforceQuota,
+ UpdateNotifier* notifier);
- virtual void appendCustomStats( OperationContext* txn,
- BSONObjBuilder* result,
- double scale ) const;
+ virtual bool updateWithDamagesSupported() const;
- virtual Status touch( OperationContext* txn, BSONObjBuilder* output ) const;
+ virtual Status updateWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const RecordData& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages);
- const RecordStoreV1MetaData* details() const { return _details.get(); }
+ virtual std::unique_ptr<RecordCursor> getCursorForRepair(OperationContext* txn) const;
- // This keeps track of cursors saved during yielding, for invalidation purposes.
- SavedCursorRegistry savedCursors;
+ void increaseStorageSize(OperationContext* txn, int size, bool enforceQuota);
- DiskLoc getExtentLocForRecord( OperationContext* txn, const DiskLoc& loc ) const;
+ virtual Status validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateAdaptor* adaptor,
+ ValidateResults* results,
+ BSONObjBuilder* output);
- DiskLoc getNextRecord( OperationContext* txn, const DiskLoc& loc ) const;
- DiskLoc getPrevRecord( OperationContext* txn, const DiskLoc& loc ) const;
+ virtual void appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale) const;
- DiskLoc getNextRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const;
- DiskLoc getPrevRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const;
+ virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const;
- /**
- * Quantize 'minSize' to the nearest allocation size.
- */
- static int quantizeAllocationSpace(int minSize);
+ const RecordStoreV1MetaData* details() const {
+ return _details.get();
+ }
- static bool isQuantized(int recordSize);
+ // This keeps track of cursors saved during yielding, for invalidation purposes.
+ SavedCursorRegistry savedCursors;
- /* return which "deleted bucket" for this size object */
- static int bucket(int size);
+ DiskLoc getExtentLocForRecord(OperationContext* txn, const DiskLoc& loc) const;
- virtual void updateStatsAfterRepair(OperationContext* txn,
- long long numRecords,
- long long dataSize) {
- invariant(false); // MMAPv1 has its own repair which doesn't call this.
- }
- protected:
+ DiskLoc getNextRecord(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc getPrevRecord(OperationContext* txn, const DiskLoc& loc) const;
- virtual MmapV1RecordHeader* recordFor( const DiskLoc& loc ) const;
+ DiskLoc getNextRecordInExtent(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc getPrevRecordInExtent(OperationContext* txn, const DiskLoc& loc) const;
- const DeletedRecord* deletedRecordFor( const DiskLoc& loc ) const;
+ /**
+ * Quantize 'minSize' to the nearest allocation size.
+ */
+ static int quantizeAllocationSpace(int minSize);
- virtual bool isCapped() const = 0;
+ static bool isQuantized(int recordSize);
- virtual bool shouldPadInserts() const = 0;
+ /* return which "deleted bucket" for this size object */
+ static int bucket(int size);
- virtual StatusWith<DiskLoc> allocRecord( OperationContext* txn,
- int lengthWithHeaders,
- bool enforceQuota ) = 0;
+ virtual void updateStatsAfterRepair(OperationContext* txn,
+ long long numRecords,
+ long long dataSize) {
+ invariant(false); // MMAPv1 has its own repair which doesn't call this.
+ }
- // TODO: document, remove, what have you
- virtual void addDeletedRec( OperationContext* txn, const DiskLoc& dloc) = 0;
+protected:
+ virtual MmapV1RecordHeader* recordFor(const DiskLoc& loc) const;
- // TODO: another sad one
- virtual DeletedRecord* drec( const DiskLoc& loc ) const;
+ const DeletedRecord* deletedRecordFor(const DiskLoc& loc) const;
- // just a wrapper for _extentManager->getExtent( loc );
- Extent* _getExtent( OperationContext* txn, const DiskLoc& loc ) const;
+ virtual bool isCapped() const = 0;
- DiskLoc _getExtentLocForRecord( OperationContext* txn, const DiskLoc& loc ) const;
+ virtual bool shouldPadInserts() const = 0;
- DiskLoc _getNextRecord( OperationContext* txn, const DiskLoc& loc ) const;
- DiskLoc _getPrevRecord( OperationContext* txn, const DiskLoc& loc ) const;
+ virtual StatusWith<DiskLoc> allocRecord(OperationContext* txn,
+ int lengthWithHeaders,
+ bool enforceQuota) = 0;
- DiskLoc _getNextRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const;
- DiskLoc _getPrevRecordInExtent( OperationContext* txn, const DiskLoc& loc ) const;
+ // TODO: document, remove, what have you
+ virtual void addDeletedRec(OperationContext* txn, const DiskLoc& dloc) = 0;
- /**
- * finds the first suitable DiskLoc for data
- * will return the DiskLoc of a newly created DeletedRecord
- */
- DiskLoc _findFirstSpot( OperationContext* txn, const DiskLoc& extDiskLoc, Extent* e );
+ // TODO: another sad one
+ virtual DeletedRecord* drec(const DiskLoc& loc) const;
- /** add a record to the end of the linked list chain within this extent.
- require: you must have already declared write intent for the record header.
- */
- void _addRecordToRecListInExtent(OperationContext* txn, MmapV1RecordHeader* r, DiskLoc loc);
+ // just a wrapper for _extentManager->getExtent( loc );
+ Extent* _getExtent(OperationContext* txn, const DiskLoc& loc) const;
- /**
- * internal
- * doesn't check inputs or change padding
- */
- StatusWith<RecordId> _insertRecord( OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota );
+ DiskLoc _getExtentLocForRecord(OperationContext* txn, const DiskLoc& loc) const;
- std::unique_ptr<RecordStoreV1MetaData> _details;
- ExtentManager* _extentManager;
- bool _isSystemIndexes;
+ DiskLoc _getNextRecord(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc _getPrevRecord(OperationContext* txn, const DiskLoc& loc) const;
- friend class RecordStoreV1RepairCursor;
- };
+ DiskLoc _getNextRecordInExtent(OperationContext* txn, const DiskLoc& loc) const;
+ DiskLoc _getPrevRecordInExtent(OperationContext* txn, const DiskLoc& loc) const;
/**
- * Iterates over all records within a single extent.
- *
- * EOF at end of extent, even if there are more extents.
+ * finds the first suitable DiskLoc for data
+ * will return the DiskLoc of a newly created DeletedRecord
*/
- class RecordStoreV1Base::IntraExtentIterator final : public RecordCursor {
- public:
- IntraExtentIterator(OperationContext* txn,
- DiskLoc start,
- const RecordStoreV1Base* rs,
- bool forward = true)
- : _txn(txn), _curr(start), _rs(rs), _forward(forward) {}
-
- boost::optional<Record> next() final;
- boost::optional<Record> seekExact(const RecordId& id) final;
- void invalidate(const RecordId& dl) final;
- void savePositioned() final {}
- bool restore(OperationContext* txn) final { return true; }
- std::unique_ptr<RecordFetcher> fetcherForNext() const final;
+ DiskLoc _findFirstSpot(OperationContext* txn, const DiskLoc& extDiskLoc, Extent* e);
- private:
- virtual const MmapV1RecordHeader* recordFor( const DiskLoc& loc ) const {
- return _rs->recordFor(loc);
- }
+ /** add a record to the end of the linked list chain within this extent.
+ require: you must have already declared write intent for the record header.
+ */
+ void _addRecordToRecListInExtent(OperationContext* txn, MmapV1RecordHeader* r, DiskLoc loc);
+
+ /**
+ * internal
+ * doesn't check inputs or change padding
+ */
+ StatusWith<RecordId> _insertRecord(OperationContext* txn,
+ const char* data,
+ int len,
+ bool enforceQuota);
- void advance();
+ std::unique_ptr<RecordStoreV1MetaData> _details;
+ ExtentManager* _extentManager;
+ bool _isSystemIndexes;
- OperationContext* _txn;
- DiskLoc _curr;
- const RecordStoreV1Base* _rs;
- bool _forward;
- };
+ friend class RecordStoreV1RepairCursor;
+};
+/**
+ * Iterates over all records within a single extent.
+ *
+ * EOF at end of extent, even if there are more extents.
+ */
+class RecordStoreV1Base::IntraExtentIterator final : public RecordCursor {
+public:
+ IntraExtentIterator(OperationContext* txn,
+ DiskLoc start,
+ const RecordStoreV1Base* rs,
+ bool forward = true)
+ : _txn(txn), _curr(start), _rs(rs), _forward(forward) {}
+
+ boost::optional<Record> next() final;
+ boost::optional<Record> seekExact(const RecordId& id) final;
+ void invalidate(const RecordId& dl) final;
+ void savePositioned() final {}
+ bool restore(OperationContext* txn) final {
+ return true;
+ }
+ std::unique_ptr<RecordFetcher> fetcherForNext() const final;
+
+private:
+ virtual const MmapV1RecordHeader* recordFor(const DiskLoc& loc) const {
+ return _rs->recordFor(loc);
+ }
+
+ void advance();
+
+ OperationContext* _txn;
+ DiskLoc _curr;
+ const RecordStoreV1Base* _rs;
+ bool _forward;
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
index a41dd66ab1e..2674861bdb1 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
@@ -62,658 +62,630 @@
namespace mongo {
- using std::dec;
- using std::endl;
- using std::hex;
- using std::vector;
-
- CappedRecordStoreV1::CappedRecordStoreV1( OperationContext* txn,
- CappedDocumentDeleteCallback* collection,
- StringData ns,
- RecordStoreV1MetaData* details,
- ExtentManager* em,
- bool isSystemIndexes )
- : RecordStoreV1Base( ns, details, em, isSystemIndexes ),
- _deleteCallback( collection ) {
-
- DiskLoc extentLoc = details->firstExtent(txn);
- while ( !extentLoc.isNull() ) {
- _extentAdvice.push_back( _extentManager->cacheHint( extentLoc,
- ExtentManager::Sequential ) );
- Extent* extent = em->getExtent( extentLoc );
- extentLoc = extent->xnext;
- }
-
- // this is for VERY VERY old versions of capped collections
- cappedCheckMigrate(txn);
+using std::dec;
+using std::endl;
+using std::hex;
+using std::vector;
+
+CappedRecordStoreV1::CappedRecordStoreV1(OperationContext* txn,
+ CappedDocumentDeleteCallback* collection,
+ StringData ns,
+ RecordStoreV1MetaData* details,
+ ExtentManager* em,
+ bool isSystemIndexes)
+ : RecordStoreV1Base(ns, details, em, isSystemIndexes), _deleteCallback(collection) {
+ DiskLoc extentLoc = details->firstExtent(txn);
+ while (!extentLoc.isNull()) {
+ _extentAdvice.push_back(_extentManager->cacheHint(extentLoc, ExtentManager::Sequential));
+ Extent* extent = em->getExtent(extentLoc);
+ extentLoc = extent->xnext;
}
- CappedRecordStoreV1::~CappedRecordStoreV1() {
- }
+ // this is for VERY VERY old versions of capped collections
+ cappedCheckMigrate(txn);
+}
- StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord( OperationContext* txn,
- int lenToAlloc,
- bool enforceQuota ) {
- {
- // align very slightly.
- lenToAlloc = (lenToAlloc + 3) & 0xfffffffc;
- }
+CappedRecordStoreV1::~CappedRecordStoreV1() {}
- if ( lenToAlloc > theCapExtent()->length ) {
- // the extent check is a way to try and improve performance
- // since we have to iterate all the extents (for now) to get
- // storage size
- if ( lenToAlloc > storageSize(txn) ) {
- return StatusWith<DiskLoc>( ErrorCodes::DocTooLargeForCapped,
- mongoutils::str::stream()
- << "document is larger than capped size "
- << lenToAlloc << " > " << storageSize(txn),
- 16328 );
- }
+StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
+ int lenToAlloc,
+ bool enforceQuota) {
+ {
+ // align very slightly.
+ lenToAlloc = (lenToAlloc + 3) & 0xfffffffc;
+ }
+ if (lenToAlloc > theCapExtent()->length) {
+ // the extent check is a way to try and improve performance
+ // since we have to iterate all the extents (for now) to get
+ // storage size
+ if (lenToAlloc > storageSize(txn)) {
+ return StatusWith<DiskLoc>(ErrorCodes::DocTooLargeForCapped,
+ mongoutils::str::stream()
+ << "document is larger than capped size " << lenToAlloc
+ << " > " << storageSize(txn),
+ 16328);
}
- DiskLoc loc;
- { // do allocation
-
- // signal done allocating new extents.
- if ( !cappedLastDelRecLastExtent().isValid() )
- setLastDelRecLastExtent( txn, DiskLoc() );
+ }
+ DiskLoc loc;
+ { // do allocation
- invariant( lenToAlloc < 400000000 );
- int passes = 0;
+ // signal done allocating new extents.
+ if (!cappedLastDelRecLastExtent().isValid())
+ setLastDelRecLastExtent(txn, DiskLoc());
- // delete records until we have room and the max # objects limit achieved.
+ invariant(lenToAlloc < 400000000);
+ int passes = 0;
- /* this fails on a rename -- that is ok but must keep commented out */
- //invariant( theCapExtent()->ns == ns );
+ // delete records until we have room and the max # objects limit achieved.
- theCapExtent()->assertOk();
- DiskLoc firstEmptyExtent; // This prevents us from infinite looping.
- while ( 1 ) {
- if ( _details->numRecords() < _details->maxCappedDocs() ) {
- loc = __capAlloc( txn, lenToAlloc );
- if ( !loc.isNull() )
- break;
- }
+ /* this fails on a rename -- that is ok but must keep commented out */
+ // invariant( theCapExtent()->ns == ns );
- // If on first iteration through extents, don't delete anything.
- if ( !_details->capFirstNewRecord().isValid() ) {
- advanceCapExtent( txn, _ns );
+ theCapExtent()->assertOk();
+ DiskLoc firstEmptyExtent; // This prevents us from infinite looping.
+ while (1) {
+ if (_details->numRecords() < _details->maxCappedDocs()) {
+ loc = __capAlloc(txn, lenToAlloc);
+ if (!loc.isNull())
+ break;
+ }
- if ( _details->capExtent() != _details->firstExtent(txn) )
- _details->setCapFirstNewRecord( txn, DiskLoc().setInvalid() );
- // else signal done with first iteration through extents.
- continue;
- }
+ // If on first iteration through extents, don't delete anything.
+ if (!_details->capFirstNewRecord().isValid()) {
+ advanceCapExtent(txn, _ns);
- if ( !_details->capFirstNewRecord().isNull() &&
- theCapExtent()->firstRecord == _details->capFirstNewRecord() ) {
- // We've deleted all records that were allocated on the previous
- // iteration through this extent.
- advanceCapExtent( txn, _ns );
- continue;
- }
+ if (_details->capExtent() != _details->firstExtent(txn))
+ _details->setCapFirstNewRecord(txn, DiskLoc().setInvalid());
+ // else signal done with first iteration through extents.
+ continue;
+ }
- if ( theCapExtent()->firstRecord.isNull() ) {
- if ( firstEmptyExtent.isNull() )
- firstEmptyExtent = _details->capExtent();
- advanceCapExtent( txn, _ns );
- if ( firstEmptyExtent == _details->capExtent() ) {
- // All records have been deleted but there is still no room for this record.
- // Nothing we can do but fail.
- _maybeComplain( txn, lenToAlloc );
- return StatusWith<DiskLoc>(
- ErrorCodes::DocTooLargeForCapped,
- str::stream() << "document doesn't fit in capped collection."
- << " size: " << lenToAlloc
- << " storageSize:" << storageSize(txn),
- 28575);
- }
- continue;
- }
+ if (!_details->capFirstNewRecord().isNull() &&
+ theCapExtent()->firstRecord == _details->capFirstNewRecord()) {
+ // We've deleted all records that were allocated on the previous
+ // iteration through this extent.
+ advanceCapExtent(txn, _ns);
+ continue;
+ }
- const RecordId fr = theCapExtent()->firstRecord.toRecordId();
- Status status = _deleteCallback->aboutToDeleteCapped( txn, fr, dataFor(txn, fr) );
- if ( !status.isOK() )
- return StatusWith<DiskLoc>( status );
- deleteRecord( txn, fr );
-
- _compact(txn);
- if ((++passes % 5000) == 0) {
- StringBuilder sb;
- log() << "passes = " << passes << " in CappedRecordStoreV1::allocRecord:"
- << " ns: " << _ns
- << ", lenToAlloc: " << lenToAlloc
- << ", maxCappedDocs: " << _details->maxCappedDocs()
- << ", nrecords: " << _details->numRecords()
- << ", datasize: " << _details->dataSize()
- << ". Continuing to delete old records to make room.";
+ if (theCapExtent()->firstRecord.isNull()) {
+ if (firstEmptyExtent.isNull())
+ firstEmptyExtent = _details->capExtent();
+ advanceCapExtent(txn, _ns);
+ if (firstEmptyExtent == _details->capExtent()) {
+ // All records have been deleted but there is still no room for this record.
+ // Nothing we can do but fail.
+ _maybeComplain(txn, lenToAlloc);
+ return StatusWith<DiskLoc>(ErrorCodes::DocTooLargeForCapped,
+ str::stream()
+ << "document doesn't fit in capped collection."
+ << " size: " << lenToAlloc
+ << " storageSize:" << storageSize(txn),
+ 28575);
}
+ continue;
}
- // Remember first record allocated on this iteration through capExtent.
- if ( _details->capFirstNewRecord().isValid() && _details->capFirstNewRecord().isNull() )
- _details->setCapFirstNewRecord( txn, loc );
+ const RecordId fr = theCapExtent()->firstRecord.toRecordId();
+ Status status = _deleteCallback->aboutToDeleteCapped(txn, fr, dataFor(txn, fr));
+ if (!status.isOK())
+ return StatusWith<DiskLoc>(status);
+ deleteRecord(txn, fr);
+
+ _compact(txn);
+ if ((++passes % 5000) == 0) {
+ StringBuilder sb;
+ log() << "passes = " << passes << " in CappedRecordStoreV1::allocRecord:"
+ << " ns: " << _ns << ", lenToAlloc: " << lenToAlloc
+ << ", maxCappedDocs: " << _details->maxCappedDocs()
+ << ", nrecords: " << _details->numRecords()
+ << ", datasize: " << _details->dataSize()
+ << ". Continuing to delete old records to make room.";
+ }
}
- invariant( !loc.isNull() );
+ // Remember first record allocated on this iteration through capExtent.
+ if (_details->capFirstNewRecord().isValid() && _details->capFirstNewRecord().isNull())
+ _details->setCapFirstNewRecord(txn, loc);
+ }
- // possibly slice up if we've allocated too much space
+ invariant(!loc.isNull());
- DeletedRecord *r = drec( loc );
+ // possibly slice up if we've allocated too much space
- /* note we want to grab from the front so our next pointers on disk tend
- to go in a forward direction which is important for performance. */
- int regionlen = r->lengthWithHeaders();
- invariant( r->extentOfs() < loc.getOfs() );
+ DeletedRecord* r = drec(loc);
- int left = regionlen - lenToAlloc;
+ /* note we want to grab from the front so our next pointers on disk tend
+ to go in a forward direction which is important for performance. */
+ int regionlen = r->lengthWithHeaders();
+ invariant(r->extentOfs() < loc.getOfs());
- /* split off some for further use. */
- txn->recoveryUnit()->writingInt(r->lengthWithHeaders()) = lenToAlloc;
- DiskLoc newDelLoc = loc;
- newDelLoc.inc(lenToAlloc);
- DeletedRecord* newDel = drec( newDelLoc );
- DeletedRecord* newDelW = txn->recoveryUnit()->writing(newDel);
- newDelW->extentOfs() = r->extentOfs();
- newDelW->lengthWithHeaders() = left;
- newDelW->nextDeleted().Null();
+ int left = regionlen - lenToAlloc;
- addDeletedRec(txn, newDelLoc);
+ /* split off some for further use. */
+ txn->recoveryUnit()->writingInt(r->lengthWithHeaders()) = lenToAlloc;
+ DiskLoc newDelLoc = loc;
+ newDelLoc.inc(lenToAlloc);
+ DeletedRecord* newDel = drec(newDelLoc);
+ DeletedRecord* newDelW = txn->recoveryUnit()->writing(newDel);
+ newDelW->extentOfs() = r->extentOfs();
+ newDelW->lengthWithHeaders() = left;
+ newDelW->nextDeleted().Null();
- return StatusWith<DiskLoc>( loc );
- }
+ addDeletedRec(txn, newDelLoc);
- Status CappedRecordStoreV1::truncate(OperationContext* txn) {
- setLastDelRecLastExtent( txn, DiskLoc() );
- setListOfAllDeletedRecords( txn, DiskLoc() );
-
- // preserve firstExtent/lastExtent
- _details->setCapExtent( txn, _details->firstExtent(txn) );
- _details->setStats( txn, 0, 0 );
- // preserve lastExtentSize
- // nIndexes preserve 0
- // capped preserve true
- // max preserve
- // paddingFactor is unused
- _details->setCapFirstNewRecord( txn, DiskLoc().setInvalid() );
- setLastDelRecLastExtent( txn, DiskLoc().setInvalid() );
- // dataFileVersion preserve
- // indexFileVersion preserve
-
- // Reset all existing extents and recreate the deleted list.
- Extent* ext;
- for( DiskLoc extLoc = _details->firstExtent(txn);
- !extLoc.isNull();
- extLoc = ext->xnext ) {
- ext = _extentManager->getExtent(extLoc);
-
- txn->recoveryUnit()->writing( &ext->firstRecord )->Null();
- txn->recoveryUnit()->writing( &ext->lastRecord )->Null();
-
- addDeletedRec( txn, _findFirstSpot( txn, extLoc, ext ) );
- }
+ return StatusWith<DiskLoc>(loc);
+}
- return Status::OK();
+Status CappedRecordStoreV1::truncate(OperationContext* txn) {
+ setLastDelRecLastExtent(txn, DiskLoc());
+ setListOfAllDeletedRecords(txn, DiskLoc());
+
+ // preserve firstExtent/lastExtent
+ _details->setCapExtent(txn, _details->firstExtent(txn));
+ _details->setStats(txn, 0, 0);
+ // preserve lastExtentSize
+ // nIndexes preserve 0
+ // capped preserve true
+ // max preserve
+ // paddingFactor is unused
+ _details->setCapFirstNewRecord(txn, DiskLoc().setInvalid());
+ setLastDelRecLastExtent(txn, DiskLoc().setInvalid());
+ // dataFileVersion preserve
+ // indexFileVersion preserve
+
+ // Reset all existing extents and recreate the deleted list.
+ Extent* ext;
+ for (DiskLoc extLoc = _details->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
+ ext = _extentManager->getExtent(extLoc);
+
+ txn->recoveryUnit()->writing(&ext->firstRecord)->Null();
+ txn->recoveryUnit()->writing(&ext->lastRecord)->Null();
+
+ addDeletedRec(txn, _findFirstSpot(txn, extLoc, ext));
}
- void CappedRecordStoreV1::temp_cappedTruncateAfter( OperationContext* txn,
- RecordId end,
- bool inclusive ) {
- cappedTruncateAfter( txn, _ns.c_str(), DiskLoc::fromRecordId(end), inclusive );
- }
+ return Status::OK();
+}
- /* combine adjacent deleted records *for the current extent* of the capped collection
+void CappedRecordStoreV1::temp_cappedTruncateAfter(OperationContext* txn,
+ RecordId end,
+ bool inclusive) {
+ cappedTruncateAfter(txn, _ns.c_str(), DiskLoc::fromRecordId(end), inclusive);
+}
- this is O(n^2) but we call it for capped tables where typically n==1 or 2!
- (or 3...there will be a little unused sliver at the end of the extent.)
- */
- void CappedRecordStoreV1::_compact(OperationContext* txn) {
- DDD( "CappedRecordStoreV1::compact enter" );
+/* combine adjacent deleted records *for the current extent* of the capped collection
- vector<DiskLoc> drecs;
+ this is O(n^2) but we call it for capped tables where typically n==1 or 2!
+ (or 3...there will be a little unused sliver at the end of the extent.)
+*/
+void CappedRecordStoreV1::_compact(OperationContext* txn) {
+ DDD("CappedRecordStoreV1::compact enter");
- // Pull out capExtent's DRs from deletedList
- DiskLoc i = cappedFirstDeletedInCurExtent();
- for (; !i.isNull() && inCapExtent( i ); i = deletedRecordFor( i )->nextDeleted() ) {
- DDD( "\t" << i );
- drecs.push_back( i );
- }
+ vector<DiskLoc> drecs;
+
+ // Pull out capExtent's DRs from deletedList
+ DiskLoc i = cappedFirstDeletedInCurExtent();
+ for (; !i.isNull() && inCapExtent(i); i = deletedRecordFor(i)->nextDeleted()) {
+ DDD("\t" << i);
+ drecs.push_back(i);
+ }
- setFirstDeletedInCurExtent( txn, i );
+ setFirstDeletedInCurExtent(txn, i);
- std::sort( drecs.begin(), drecs.end() );
- DDD( "\t drecs.size(): " << drecs.size() );
+ std::sort(drecs.begin(), drecs.end());
+ DDD("\t drecs.size(): " << drecs.size());
- vector<DiskLoc>::const_iterator j = drecs.begin();
- invariant( j != drecs.end() );
- DiskLoc a = *j;
- while ( 1 ) {
+ vector<DiskLoc>::const_iterator j = drecs.begin();
+ invariant(j != drecs.end());
+ DiskLoc a = *j;
+ while (1) {
+ j++;
+ if (j == drecs.end()) {
+ DDD("\t compact adddelrec");
+ addDeletedRec(txn, a);
+ break;
+ }
+ DiskLoc b = *j;
+ while (a.a() == b.a() && a.getOfs() + drec(a)->lengthWithHeaders() == b.getOfs()) {
+ // a & b are adjacent. merge.
+ txn->recoveryUnit()->writingInt(drec(a)->lengthWithHeaders()) +=
+ drec(b)->lengthWithHeaders();
j++;
- if ( j == drecs.end() ) {
- DDD( "\t compact adddelrec" );
+ if (j == drecs.end()) {
+ DDD("\t compact adddelrec2");
addDeletedRec(txn, a);
- break;
+ return;
}
- DiskLoc b = *j;
- while ( a.a() == b.a() &&
- a.getOfs() + drec( a )->lengthWithHeaders() == b.getOfs() ) {
-
- // a & b are adjacent. merge.
- txn->recoveryUnit()->writingInt( drec(a)->lengthWithHeaders() ) += drec(b)->lengthWithHeaders();
- j++;
- if ( j == drecs.end() ) {
- DDD( "\t compact adddelrec2" );
- addDeletedRec(txn, a);
- return;
- }
- b = *j;
- }
- DDD( "\t compact adddelrec3" );
- addDeletedRec(txn, a);
- a = b;
+ b = *j;
}
-
- }
-
- DiskLoc CappedRecordStoreV1::cappedFirstDeletedInCurExtent() const {
- if ( cappedLastDelRecLastExtent().isNull() )
- return cappedListOfAllDeletedRecords();
- else
- return drec(cappedLastDelRecLastExtent())->nextDeleted();
+ DDD("\t compact adddelrec3");
+ addDeletedRec(txn, a);
+ a = b;
}
+}
- void CappedRecordStoreV1::setFirstDeletedInCurExtent( OperationContext* txn,
- const DiskLoc& loc ) {
- if ( cappedLastDelRecLastExtent().isNull() )
- setListOfAllDeletedRecords( txn, loc );
- else
- *txn->recoveryUnit()->writing( &drec(cappedLastDelRecLastExtent())->nextDeleted() ) = loc;
- }
+DiskLoc CappedRecordStoreV1::cappedFirstDeletedInCurExtent() const {
+ if (cappedLastDelRecLastExtent().isNull())
+ return cappedListOfAllDeletedRecords();
+ else
+ return drec(cappedLastDelRecLastExtent())->nextDeleted();
+}
- void CappedRecordStoreV1::cappedCheckMigrate(OperationContext* txn) {
- // migrate old RecordStoreV1MetaData format
- if ( _details->capExtent().a() == 0 && _details->capExtent().getOfs() == 0 ) {
- WriteUnitOfWork wunit(txn);
- _details->setCapFirstNewRecord( txn, DiskLoc().setInvalid() );
- // put all the DeletedRecords in cappedListOfAllDeletedRecords()
- for ( int i = 1; i < Buckets; ++i ) {
- DiskLoc first = _details->deletedListEntry( i );
- if ( first.isNull() )
- continue;
- DiskLoc last = first;
- for (; !drec(last)->nextDeleted().isNull(); last = drec(last)->nextDeleted() );
- *txn->recoveryUnit()->writing(&drec(last)->nextDeleted()) = cappedListOfAllDeletedRecords();
- setListOfAllDeletedRecords( txn, first );
- _details->setDeletedListEntry(txn, i, DiskLoc());
- }
- // NOTE cappedLastDelRecLastExtent() set to DiskLoc() in above
+void CappedRecordStoreV1::setFirstDeletedInCurExtent(OperationContext* txn, const DiskLoc& loc) {
+ if (cappedLastDelRecLastExtent().isNull())
+ setListOfAllDeletedRecords(txn, loc);
+ else
+ *txn->recoveryUnit()->writing(&drec(cappedLastDelRecLastExtent())->nextDeleted()) = loc;
+}
- // Last, in case we're killed before getting here
- _details->setCapExtent( txn, _details->firstExtent(txn) );
- wunit.commit();
+void CappedRecordStoreV1::cappedCheckMigrate(OperationContext* txn) {
+ // migrate old RecordStoreV1MetaData format
+ if (_details->capExtent().a() == 0 && _details->capExtent().getOfs() == 0) {
+ WriteUnitOfWork wunit(txn);
+ _details->setCapFirstNewRecord(txn, DiskLoc().setInvalid());
+ // put all the DeletedRecords in cappedListOfAllDeletedRecords()
+ for (int i = 1; i < Buckets; ++i) {
+ DiskLoc first = _details->deletedListEntry(i);
+ if (first.isNull())
+ continue;
+ DiskLoc last = first;
+ for (; !drec(last)->nextDeleted().isNull(); last = drec(last)->nextDeleted())
+ ;
+ *txn->recoveryUnit()->writing(&drec(last)->nextDeleted()) =
+ cappedListOfAllDeletedRecords();
+ setListOfAllDeletedRecords(txn, first);
+ _details->setDeletedListEntry(txn, i, DiskLoc());
}
+ // NOTE cappedLastDelRecLastExtent() set to DiskLoc() in above
+
+ // Last, in case we're killed before getting here
+ _details->setCapExtent(txn, _details->firstExtent(txn));
+ wunit.commit();
}
+}
- bool CappedRecordStoreV1::inCapExtent( const DiskLoc &dl ) const {
- invariant( !dl.isNull() );
+bool CappedRecordStoreV1::inCapExtent(const DiskLoc& dl) const {
+ invariant(!dl.isNull());
- if ( dl.a() != _details->capExtent().a() )
- return false;
+ if (dl.a() != _details->capExtent().a())
+ return false;
- if ( dl.getOfs() < _details->capExtent().getOfs() )
- return false;
+ if (dl.getOfs() < _details->capExtent().getOfs())
+ return false;
- const Extent* e = theCapExtent();
- int end = _details->capExtent().getOfs() + e->length;
- return dl.getOfs() <= end;
- }
+ const Extent* e = theCapExtent();
+ int end = _details->capExtent().getOfs() + e->length;
+ return dl.getOfs() <= end;
+}
- bool CappedRecordStoreV1::nextIsInCapExtent( const DiskLoc &dl ) const {
- invariant( !dl.isNull() );
- DiskLoc next = drec(dl)->nextDeleted();
- if ( next.isNull() )
- return false;
- return inCapExtent( next );
+bool CappedRecordStoreV1::nextIsInCapExtent(const DiskLoc& dl) const {
+ invariant(!dl.isNull());
+ DiskLoc next = drec(dl)->nextDeleted();
+ if (next.isNull())
+ return false;
+ return inCapExtent(next);
+}
+
+void CappedRecordStoreV1::advanceCapExtent(OperationContext* txn, StringData ns) {
+ // We want cappedLastDelRecLastExtent() to be the last DeletedRecord of the prev cap extent
+ // (or DiskLoc() if new capExtent == firstExtent)
+ if (_details->capExtent() == _details->lastExtent(txn))
+ setLastDelRecLastExtent(txn, DiskLoc());
+ else {
+ DiskLoc i = cappedFirstDeletedInCurExtent();
+ for (; !i.isNull() && nextIsInCapExtent(i); i = drec(i)->nextDeleted())
+ ;
+ setLastDelRecLastExtent(txn, i);
}
- void CappedRecordStoreV1::advanceCapExtent( OperationContext* txn, StringData ns ) {
- // We want cappedLastDelRecLastExtent() to be the last DeletedRecord of the prev cap extent
- // (or DiskLoc() if new capExtent == firstExtent)
- if ( _details->capExtent() == _details->lastExtent(txn) )
- setLastDelRecLastExtent( txn, DiskLoc() );
- else {
- DiskLoc i = cappedFirstDeletedInCurExtent();
- for (; !i.isNull() && nextIsInCapExtent( i ); i = drec(i)->nextDeleted() );
- setLastDelRecLastExtent( txn, i );
- }
+ _details->setCapExtent(
+ txn, theCapExtent()->xnext.isNull() ? _details->firstExtent(txn) : theCapExtent()->xnext);
- _details->setCapExtent( txn,
- theCapExtent()->xnext.isNull() ? _details->firstExtent(txn)
- : theCapExtent()->xnext );
+ /* this isn't true if a collection has been renamed...that is ok just used for diagnostics */
+ // dassert( theCapExtent()->ns == ns );
- /* this isn't true if a collection has been renamed...that is ok just used for diagnostics */
- //dassert( theCapExtent()->ns == ns );
+ theCapExtent()->assertOk();
+ _details->setCapFirstNewRecord(txn, DiskLoc());
+}
- theCapExtent()->assertOk();
- _details->setCapFirstNewRecord( txn, DiskLoc() );
+DiskLoc CappedRecordStoreV1::__capAlloc(OperationContext* txn, int len) {
+ DiskLoc prev = cappedLastDelRecLastExtent();
+ DiskLoc i = cappedFirstDeletedInCurExtent();
+ DiskLoc ret;
+ for (; !i.isNull() && inCapExtent(i); prev = i, i = drec(i)->nextDeleted()) {
+ // We need to keep at least one DR per extent in cappedListOfAllDeletedRecords(),
+ // so make sure there's space to create a DR at the end.
+ if (drec(i)->lengthWithHeaders() >= len + 24) {
+ ret = i;
+ break;
+ }
}
- DiskLoc CappedRecordStoreV1::__capAlloc( OperationContext* txn, int len ) {
- DiskLoc prev = cappedLastDelRecLastExtent();
- DiskLoc i = cappedFirstDeletedInCurExtent();
- DiskLoc ret;
- for (; !i.isNull() && inCapExtent( i ); prev = i, i = drec(i)->nextDeleted() ) {
- // We need to keep at least one DR per extent in cappedListOfAllDeletedRecords(),
- // so make sure there's space to create a DR at the end.
- if ( drec(i)->lengthWithHeaders() >= len + 24 ) {
- ret = i;
- break;
- }
- }
+ /* unlink ourself from the deleted list */
+ if (!ret.isNull()) {
+ if (prev.isNull())
+ setListOfAllDeletedRecords(txn, drec(ret)->nextDeleted());
+ else
+ *txn->recoveryUnit()->writing(&drec(prev)->nextDeleted()) = drec(ret)->nextDeleted();
+ *txn->recoveryUnit()->writing(&drec(ret)->nextDeleted()) =
+ DiskLoc().setInvalid(); // defensive.
+ invariant(drec(ret)->extentOfs() < ret.getOfs());
+ }
- /* unlink ourself from the deleted list */
- if ( !ret.isNull() ) {
- if ( prev.isNull() )
- setListOfAllDeletedRecords( txn, drec(ret)->nextDeleted() );
- else
- *txn->recoveryUnit()->writing(&drec(prev)->nextDeleted()) = drec(ret)->nextDeleted();
- *txn->recoveryUnit()->writing(&drec(ret)->nextDeleted()) = DiskLoc().setInvalid(); // defensive.
- invariant( drec(ret)->extentOfs() < ret.getOfs() );
- }
+ return ret;
+}
- return ret;
+void CappedRecordStoreV1::cappedTruncateLastDelUpdate(OperationContext* txn) {
+ if (_details->capExtent() == _details->firstExtent(txn)) {
+ // Only one extent of the collection is in use, so there
+ // is no deleted record in a previous extent, so nullify
+ // cappedLastDelRecLastExtent().
+ setLastDelRecLastExtent(txn, DiskLoc());
+ } else {
+ // Scan through all deleted records in the collection
+ // until the last deleted record for the extent prior
+ // to the new capExtent is found. Then set
+ // cappedLastDelRecLastExtent() to that deleted record.
+ DiskLoc i = cappedListOfAllDeletedRecords();
+ for (; !drec(i)->nextDeleted().isNull() && !inCapExtent(drec(i)->nextDeleted());
+ i = drec(i)->nextDeleted())
+ ;
+ // In our capped storage model, every extent must have at least one
+ // deleted record. Here we check that 'i' is not the last deleted
+ // record. (We expect that there will be deleted records in the new
+ // capExtent as well.)
+ invariant(!drec(i)->nextDeleted().isNull());
+ setLastDelRecLastExtent(txn, i);
}
+}
- void CappedRecordStoreV1::cappedTruncateLastDelUpdate(OperationContext* txn) {
- if ( _details->capExtent() == _details->firstExtent(txn) ) {
- // Only one extent of the collection is in use, so there
- // is no deleted record in a previous extent, so nullify
- // cappedLastDelRecLastExtent().
- setLastDelRecLastExtent( txn, DiskLoc() );
+void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn,
+ const char* ns,
+ DiskLoc end,
+ bool inclusive) {
+ invariant(cappedLastDelRecLastExtent().isValid());
+
+ // We iteratively remove the newest document until the newest document
+ // is 'end', then we remove 'end' if requested.
+ bool foundLast = false;
+ while (1) {
+ if (foundLast) {
+ // 'end' has been found and removed, so break.
+ break;
}
- else {
- // Scan through all deleted records in the collection
- // until the last deleted record for the extent prior
- // to the new capExtent is found. Then set
- // cappedLastDelRecLastExtent() to that deleted record.
- DiskLoc i = cappedListOfAllDeletedRecords();
- for( ;
- !drec(i)->nextDeleted().isNull() &&
- !inCapExtent( drec(i)->nextDeleted() );
- i = drec(i)->nextDeleted() );
- // In our capped storage model, every extent must have at least one
- // deleted record. Here we check that 'i' is not the last deleted
- // record. (We expect that there will be deleted records in the new
- // capExtent as well.)
- invariant( !drec(i)->nextDeleted().isNull() );
- setLastDelRecLastExtent( txn, i );
- }
- }
-
- void CappedRecordStoreV1::cappedTruncateAfter(OperationContext* txn,
- const char* ns,
- DiskLoc end,
- bool inclusive) {
- invariant( cappedLastDelRecLastExtent().isValid() );
-
- // We iteratively remove the newest document until the newest document
- // is 'end', then we remove 'end' if requested.
- bool foundLast = false;
- while( 1 ) {
- if ( foundLast ) {
- // 'end' has been found and removed, so break.
+ // 'curr' will point to the newest document in the collection.
+ const DiskLoc curr = theCapExtent()->lastRecord;
+ const RecordId currId = curr.toRecordId();
+ invariant(!curr.isNull());
+ if (curr == end) {
+ if (inclusive) {
+ // 'end' has been found, so break next iteration.
+ foundLast = true;
+ } else {
+ // 'end' has been found, so break.
break;
}
- // 'curr' will point to the newest document in the collection.
- const DiskLoc curr = theCapExtent()->lastRecord;
- const RecordId currId = curr.toRecordId();
- invariant( !curr.isNull() );
- if ( curr == end ) {
- if ( inclusive ) {
- // 'end' has been found, so break next iteration.
- foundLast = true;
- }
- else {
- // 'end' has been found, so break.
- break;
- }
- }
-
- // TODO The algorithm used in this function cannot generate an
- // empty collection, but we could call emptyCappedCollection() in
- // this case instead of asserting.
- uassert( 13415, "emptying the collection is not allowed", _details->numRecords() > 1 );
-
- WriteUnitOfWork wunit(txn);
- // Delete the newest record, and coalesce the new deleted
- // record with existing deleted records.
- Status status = _deleteCallback->aboutToDeleteCapped(txn, currId, dataFor(txn, currId));
- uassertStatusOK( status );
- deleteRecord( txn, currId );
- _compact(txn);
-
- // This is the case where we have not yet had to remove any
- // documents to make room for other documents, and we are allocating
- // documents from free space in fresh extents instead of reusing
- // space from familiar extents.
- if ( !_details->capLooped() ) {
-
- // We just removed the last record from the 'capExtent', and
- // the 'capExtent' can't be empty, so we set 'capExtent' to
- // capExtent's prev extent.
- if ( theCapExtent()->lastRecord.isNull() ) {
- invariant( !theCapExtent()->xprev.isNull() );
- // NOTE Because we didn't delete the last document, and
- // capLooped() is false, capExtent is not the first extent
- // so xprev will be nonnull.
- _details->setCapExtent( txn, theCapExtent()->xprev );
- theCapExtent()->assertOk();
-
- // update cappedLastDelRecLastExtent()
- cappedTruncateLastDelUpdate(txn);
- }
- wunit.commit();
- continue;
- }
-
- // This is the case where capLooped() is true, and we just deleted
- // from capExtent, and we just deleted capFirstNewRecord, which was
- // the last record on the fresh side of capExtent.
- // NOTE In this comparison, curr and potentially capFirstNewRecord
- // may point to invalid data, but we can still compare the
- // references themselves.
- if ( curr == _details->capFirstNewRecord() ) {
-
- // Set 'capExtent' to the first nonempty extent prior to the
- // initial capExtent. There must be such an extent because we
- // have not deleted the last document in the collection. It is
- // possible that all extents other than the capExtent are empty.
- // In this case we will keep the initial capExtent and specify
- // that all records contained within are on the fresh rather than
- // stale side of the extent.
- DiskLoc newCapExtent = _details->capExtent();
- do {
- // Find the previous extent, looping if necessary.
- newCapExtent = ( newCapExtent == _details->firstExtent(txn) ) ?
- _details->lastExtent(txn) :
- _extentManager->getExtent(newCapExtent)->xprev;
- _extentManager->getExtent(newCapExtent)->assertOk();
- }
- while ( _extentManager->getExtent(newCapExtent)->firstRecord.isNull() );
- _details->setCapExtent( txn, newCapExtent );
+ }
- // Place all documents in the new capExtent on the fresh side
- // of the capExtent by setting capFirstNewRecord to the first
- // document in the new capExtent.
- _details->setCapFirstNewRecord( txn, theCapExtent()->firstRecord );
+ // TODO The algorithm used in this function cannot generate an
+ // empty collection, but we could call emptyCappedCollection() in
+ // this case instead of asserting.
+ uassert(13415, "emptying the collection is not allowed", _details->numRecords() > 1);
+
+ WriteUnitOfWork wunit(txn);
+ // Delete the newest record, and coalesce the new deleted
+ // record with existing deleted records.
+ Status status = _deleteCallback->aboutToDeleteCapped(txn, currId, dataFor(txn, currId));
+ uassertStatusOK(status);
+ deleteRecord(txn, currId);
+ _compact(txn);
+
+ // This is the case where we have not yet had to remove any
+ // documents to make room for other documents, and we are allocating
+ // documents from free space in fresh extents instead of reusing
+ // space from familiar extents.
+ if (!_details->capLooped()) {
+ // We just removed the last record from the 'capExtent', and
+ // the 'capExtent' can't be empty, so we set 'capExtent' to
+ // capExtent's prev extent.
+ if (theCapExtent()->lastRecord.isNull()) {
+ invariant(!theCapExtent()->xprev.isNull());
+ // NOTE Because we didn't delete the last document, and
+ // capLooped() is false, capExtent is not the first extent
+ // so xprev will be nonnull.
+ _details->setCapExtent(txn, theCapExtent()->xprev);
+ theCapExtent()->assertOk();
// update cappedLastDelRecLastExtent()
cappedTruncateLastDelUpdate(txn);
}
-
wunit.commit();
+ continue;
}
- }
- DiskLoc CappedRecordStoreV1::cappedListOfAllDeletedRecords() const {
- return _details->deletedListEntry(0);
- }
+ // This is the case where capLooped() is true, and we just deleted
+ // from capExtent, and we just deleted capFirstNewRecord, which was
+ // the last record on the fresh side of capExtent.
+ // NOTE In this comparison, curr and potentially capFirstNewRecord
+ // may point to invalid data, but we can still compare the
+ // references themselves.
+ if (curr == _details->capFirstNewRecord()) {
+ // Set 'capExtent' to the first nonempty extent prior to the
+ // initial capExtent. There must be such an extent because we
+ // have not deleted the last document in the collection. It is
+ // possible that all extents other than the capExtent are empty.
+ // In this case we will keep the initial capExtent and specify
+ // that all records contained within are on the fresh rather than
+ // stale side of the extent.
+ DiskLoc newCapExtent = _details->capExtent();
+ do {
+ // Find the previous extent, looping if necessary.
+ newCapExtent = (newCapExtent == _details->firstExtent(txn))
+ ? _details->lastExtent(txn)
+ : _extentManager->getExtent(newCapExtent)->xprev;
+ _extentManager->getExtent(newCapExtent)->assertOk();
+ } while (_extentManager->getExtent(newCapExtent)->firstRecord.isNull());
+ _details->setCapExtent(txn, newCapExtent);
+
+ // Place all documents in the new capExtent on the fresh side
+ // of the capExtent by setting capFirstNewRecord to the first
+ // document in the new capExtent.
+ _details->setCapFirstNewRecord(txn, theCapExtent()->firstRecord);
+
+ // update cappedLastDelRecLastExtent()
+ cappedTruncateLastDelUpdate(txn);
+ }
- void CappedRecordStoreV1::setListOfAllDeletedRecords( OperationContext* txn,
- const DiskLoc& loc ) {
- return _details->setDeletedListEntry(txn, 0, loc);
+ wunit.commit();
}
+}
- DiskLoc CappedRecordStoreV1::cappedLastDelRecLastExtent() const {
- return _details->deletedListEntry(1);
- }
+DiskLoc CappedRecordStoreV1::cappedListOfAllDeletedRecords() const {
+ return _details->deletedListEntry(0);
+}
- void CappedRecordStoreV1::setLastDelRecLastExtent( OperationContext* txn,
- const DiskLoc& loc ) {
- return _details->setDeletedListEntry(txn, 1, loc);
- }
+void CappedRecordStoreV1::setListOfAllDeletedRecords(OperationContext* txn, const DiskLoc& loc) {
+ return _details->setDeletedListEntry(txn, 0, loc);
+}
- Extent* CappedRecordStoreV1::theCapExtent() const {
- return _extentManager->getExtent(_details->capExtent());
- }
+DiskLoc CappedRecordStoreV1::cappedLastDelRecLastExtent() const {
+ return _details->deletedListEntry(1);
+}
- void CappedRecordStoreV1::addDeletedRec( OperationContext* txn, const DiskLoc& dloc ) {
- DeletedRecord* d = txn->recoveryUnit()->writing( drec( dloc ) );
-
- if ( !cappedLastDelRecLastExtent().isValid() ) {
- // Initial extent allocation. Insert at end.
- d->nextDeleted() = DiskLoc();
- if ( cappedListOfAllDeletedRecords().isNull() )
- setListOfAllDeletedRecords( txn, dloc );
- else {
- DiskLoc i = cappedListOfAllDeletedRecords();
- for (; !drec(i)->nextDeleted().isNull(); i = drec(i)->nextDeleted() )
- ;
- *txn->recoveryUnit()->writing(&drec(i)->nextDeleted()) = dloc;
- }
- }
+void CappedRecordStoreV1::setLastDelRecLastExtent(OperationContext* txn, const DiskLoc& loc) {
+ return _details->setDeletedListEntry(txn, 1, loc);
+}
+
+Extent* CappedRecordStoreV1::theCapExtent() const {
+ return _extentManager->getExtent(_details->capExtent());
+}
+
+void CappedRecordStoreV1::addDeletedRec(OperationContext* txn, const DiskLoc& dloc) {
+ DeletedRecord* d = txn->recoveryUnit()->writing(drec(dloc));
+
+ if (!cappedLastDelRecLastExtent().isValid()) {
+ // Initial extent allocation. Insert at end.
+ d->nextDeleted() = DiskLoc();
+ if (cappedListOfAllDeletedRecords().isNull())
+ setListOfAllDeletedRecords(txn, dloc);
else {
- d->nextDeleted() = cappedFirstDeletedInCurExtent();
- setFirstDeletedInCurExtent( txn, dloc );
- // always _compact() after this so order doesn't matter
+ DiskLoc i = cappedListOfAllDeletedRecords();
+ for (; !drec(i)->nextDeleted().isNull(); i = drec(i)->nextDeleted())
+ ;
+ *txn->recoveryUnit()->writing(&drec(i)->nextDeleted()) = dloc;
}
+ } else {
+ d->nextDeleted() = cappedFirstDeletedInCurExtent();
+ setFirstDeletedInCurExtent(txn, dloc);
+ // always _compact() after this so order doesn't matter
}
+}
- std::unique_ptr<RecordCursor> CappedRecordStoreV1::getCursor(OperationContext* txn,
- bool forward) const {
-
- return stdx::make_unique<CappedRecordStoreV1Iterator>(txn, this, forward);
- }
+std::unique_ptr<RecordCursor> CappedRecordStoreV1::getCursor(OperationContext* txn,
+ bool forward) const {
+ return stdx::make_unique<CappedRecordStoreV1Iterator>(txn, this, forward);
+}
- vector<std::unique_ptr<RecordCursor>> CappedRecordStoreV1::getManyCursors(
- OperationContext* txn) const {
- vector<std::unique_ptr<RecordCursor>> cursors;
+vector<std::unique_ptr<RecordCursor>> CappedRecordStoreV1::getManyCursors(
+ OperationContext* txn) const {
+ vector<std::unique_ptr<RecordCursor>> cursors;
- if (!_details->capLooped()) {
- // if we haven't looped yet, just spit out all extents (same as non-capped impl)
- const Extent* ext;
- for (DiskLoc extLoc = details()->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
- ext = _getExtent(txn, extLoc);
- if (ext->firstRecord.isNull())
- continue;
+ if (!_details->capLooped()) {
+ // if we haven't looped yet, just spit out all extents (same as non-capped impl)
+ const Extent* ext;
+ for (DiskLoc extLoc = details()->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
+ ext = _getExtent(txn, extLoc);
+ if (ext->firstRecord.isNull())
+ continue;
- cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
- txn, ext->firstRecord, this));
- }
+ cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
+ txn, ext->firstRecord, this));
}
- else {
- // if we've looped we need to iterate the extents, starting and ending with the
- // capExtent
- const DiskLoc capExtent = details()->capExtent();
- invariant(!capExtent.isNull());
- invariant(capExtent.isValid());
-
- // First do the "old" portion of capExtent if there is any
- DiskLoc extLoc = capExtent;
- {
- const Extent* ext = _getExtent(txn, extLoc);
- if (ext->firstRecord != details()->capFirstNewRecord()) {
- // this means there is old data in capExtent
- cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
- txn, ext->firstRecord, this));
- }
-
- extLoc = ext->xnext.isNull() ? details()->firstExtent(txn) : ext->xnext;
- }
-
- // Next handle all the other extents
- while (extLoc != capExtent) {
- const Extent* ext = _getExtent(txn, extLoc);
+ } else {
+ // if we've looped we need to iterate the extents, starting and ending with the
+ // capExtent
+ const DiskLoc capExtent = details()->capExtent();
+ invariant(!capExtent.isNull());
+ invariant(capExtent.isValid());
+
+ // First do the "old" portion of capExtent if there is any
+ DiskLoc extLoc = capExtent;
+ {
+ const Extent* ext = _getExtent(txn, extLoc);
+ if (ext->firstRecord != details()->capFirstNewRecord()) {
+ // this means there is old data in capExtent
cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
- txn, ext->firstRecord, this));
-
- extLoc = ext->xnext.isNull() ? details()->firstExtent(txn) : ext->xnext;
+ txn, ext->firstRecord, this));
}
- // Finally handle the "new" data in the capExtent
+ extLoc = ext->xnext.isNull() ? details()->firstExtent(txn) : ext->xnext;
+ }
+
+ // Next handle all the other extents
+ while (extLoc != capExtent) {
+ const Extent* ext = _getExtent(txn, extLoc);
cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
- txn, details()->capFirstNewRecord(), this));
+ txn, ext->firstRecord, this));
+
+ extLoc = ext->xnext.isNull() ? details()->firstExtent(txn) : ext->xnext;
}
- return cursors;
+ // Finally handle the "new" data in the capExtent
+ cursors.push_back(stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(
+ txn, details()->capFirstNewRecord(), this));
}
- void CappedRecordStoreV1::_maybeComplain( OperationContext* txn, int len ) const {
- RARELY {
- std::stringstream buf;
- buf << "couldn't make room for record len: " << len << " in capped ns " << _ns << '\n';
- buf << "numRecords: " << numRecords(txn) << '\n';
- int i = 0;
- for ( DiskLoc e = _details->firstExtent(txn);
- !e.isNull();
- e = _extentManager->getExtent( e )->xnext, ++i ) {
- buf << " Extent " << i;
- if ( e == _details->capExtent() )
- buf << " (capExtent)";
- buf << ' ' << e;
- buf << '\n';
-
- buf << " magic: " << hex << _extentManager->getExtent( e )->magic << dec
- << " extent->ns: " << _extentManager->getExtent( e )->nsDiagnostic.toString()
- << '\n';
- buf << " fr: " << _extentManager->getExtent( e )->firstRecord.toString()
- << " lr: " << _extentManager->getExtent( e )->lastRecord.toString()
- << " extent->len: " << _extentManager->getExtent( e )->length << '\n';
- }
-
- warning() << buf.str();
+ return cursors;
+}
- // assume it is unusually large record; if not, something is broken
- fassert( 17438, len * 5 > _details->lastExtentSize(txn) );
+void CappedRecordStoreV1::_maybeComplain(OperationContext* txn, int len) const {
+ RARELY {
+ std::stringstream buf;
+ buf << "couldn't make room for record len: " << len << " in capped ns " << _ns << '\n';
+ buf << "numRecords: " << numRecords(txn) << '\n';
+ int i = 0;
+ for (DiskLoc e = _details->firstExtent(txn); !e.isNull();
+ e = _extentManager->getExtent(e)->xnext, ++i) {
+ buf << " Extent " << i;
+ if (e == _details->capExtent())
+ buf << " (capExtent)";
+ buf << ' ' << e;
+ buf << '\n';
+
+ buf << " magic: " << hex << _extentManager->getExtent(e)->magic << dec
+ << " extent->ns: " << _extentManager->getExtent(e)->nsDiagnostic.toString() << '\n';
+ buf << " fr: " << _extentManager->getExtent(e)->firstRecord.toString()
+ << " lr: " << _extentManager->getExtent(e)->lastRecord.toString()
+ << " extent->len: " << _extentManager->getExtent(e)->length << '\n';
}
- }
-
- DiskLoc CappedRecordStoreV1::firstRecord( OperationContext* txn,
- const DiskLoc &startExtent ) const {
- for (DiskLoc i = startExtent.isNull() ? _details->firstExtent(txn) : startExtent;
- !i.isNull();
- i = _extentManager->getExtent( i )->xnext ) {
- Extent* e = _extentManager->getExtent( i );
+ warning() << buf.str();
- if ( !e->firstRecord.isNull() )
- return e->firstRecord;
- }
- return DiskLoc();
+ // assume it is unusually large record; if not, something is broken
+ fassert(17438, len * 5 > _details->lastExtentSize(txn));
}
+}
- DiskLoc CappedRecordStoreV1::lastRecord( OperationContext* txn,
- const DiskLoc &startExtent ) const {
- for (DiskLoc i = startExtent.isNull() ? _details->lastExtent(txn) : startExtent;
- !i.isNull();
- i = _extentManager->getExtent( i )->xprev ) {
+DiskLoc CappedRecordStoreV1::firstRecord(OperationContext* txn, const DiskLoc& startExtent) const {
+ for (DiskLoc i = startExtent.isNull() ? _details->firstExtent(txn) : startExtent; !i.isNull();
+ i = _extentManager->getExtent(i)->xnext) {
+ Extent* e = _extentManager->getExtent(i);
- Extent* e = _extentManager->getExtent( i );
- if ( !e->lastRecord.isNull() )
- return e->lastRecord;
- }
- return DiskLoc();
+ if (!e->firstRecord.isNull())
+ return e->firstRecord;
}
+ return DiskLoc();
+}
+DiskLoc CappedRecordStoreV1::lastRecord(OperationContext* txn, const DiskLoc& startExtent) const {
+ for (DiskLoc i = startExtent.isNull() ? _details->lastExtent(txn) : startExtent; !i.isNull();
+ i = _extentManager->getExtent(i)->xprev) {
+ Extent* e = _extentManager->getExtent(i);
+ if (!e->lastRecord.isNull())
+ return e->lastRecord;
+ }
+ return DiskLoc();
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.h b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.h
index 186de786f37..83105fe8ff9 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.h
@@ -38,95 +38,92 @@
namespace mongo {
- class CappedRecordStoreV1 final : public RecordStoreV1Base {
- public:
- CappedRecordStoreV1( OperationContext* txn,
- CappedDocumentDeleteCallback* collection,
- StringData ns,
- RecordStoreV1MetaData* details,
- ExtentManager* em,
- bool isSystemIndexes );
-
- ~CappedRecordStoreV1() final;
-
- const char* name() const final { return "CappedRecordStoreV1"; }
-
- Status truncate(OperationContext* txn) final;
-
- /**
- * Truncate documents newer than the document at 'end' from the capped
- * collection. The collection cannot be completely emptied using this
- * function. An assertion will be thrown if that is attempted.
- * @param inclusive - Truncate 'end' as well iff true
- * XXX: this will go away soon, just needed to move for now
- */
- void temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) final;
-
- std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward) const final;
-
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(
- OperationContext* txn) const final;
-
- // Start from firstExtent by default.
- DiskLoc firstRecord( OperationContext* txn,
- const DiskLoc &startExtent = DiskLoc() ) const;
- // Start from lastExtent by default.
- DiskLoc lastRecord( OperationContext* txn,
- const DiskLoc &startExtent = DiskLoc() ) const;
-
- protected:
-
- bool isCapped() const final { return true; }
- bool shouldPadInserts() const final { return false; }
-
- void setCappedDeleteCallback( CappedDocumentDeleteCallback* cb ) final {
- _deleteCallback = cb;
- }
-
- StatusWith<DiskLoc> allocRecord( OperationContext* txn,
- int lengthWithHeaders,
- bool enforceQuota ) final;
-
- void addDeletedRec(OperationContext* txn, const DiskLoc& dloc) final;
-
- private:
- // -- start copy from cap.cpp --
- void _compact(OperationContext* txn);
- DiskLoc cappedFirstDeletedInCurExtent() const;
- void setFirstDeletedInCurExtent( OperationContext* txn, const DiskLoc& loc );
- void cappedCheckMigrate(OperationContext* txn);
- DiskLoc __capAlloc( OperationContext* txn, int len );
- bool inCapExtent( const DiskLoc &dl ) const;
- DiskLoc cappedListOfAllDeletedRecords() const;
- DiskLoc cappedLastDelRecLastExtent() const;
- void setListOfAllDeletedRecords( OperationContext* txn, const DiskLoc& loc );
- void setLastDelRecLastExtent( OperationContext* txn, const DiskLoc& loc );
- Extent *theCapExtent() const;
- bool nextIsInCapExtent( const DiskLoc &dl ) const;
- void advanceCapExtent( OperationContext* txn, StringData ns );
- void cappedTruncateLastDelUpdate(OperationContext* txn);
-
- /**
- * Truncate documents newer than the document at 'end' from the capped
- * collection. The collection cannot be completely emptied using this
- * function. An assertion will be thrown if that is attempted.
- * @param inclusive - Truncate 'end' as well iff true
- */
- void cappedTruncateAfter(OperationContext* txn,
- const char* ns,
- DiskLoc end,
- bool inclusive);
-
- void _maybeComplain( OperationContext* txn, int len ) const;
-
- // -- end copy from cap.cpp --
-
- CappedDocumentDeleteCallback* _deleteCallback;
-
- OwnedPointerVector<ExtentManager::CacheHint> _extentAdvice;
-
- friend class CappedRecordStoreV1Iterator;
- };
-
-
+class CappedRecordStoreV1 final : public RecordStoreV1Base {
+public:
+ CappedRecordStoreV1(OperationContext* txn,
+ CappedDocumentDeleteCallback* collection,
+ StringData ns,
+ RecordStoreV1MetaData* details,
+ ExtentManager* em,
+ bool isSystemIndexes);
+
+ ~CappedRecordStoreV1() final;
+
+ const char* name() const final {
+ return "CappedRecordStoreV1";
+ }
+
+ Status truncate(OperationContext* txn) final;
+
+ /**
+ * Truncate documents newer than the document at 'end' from the capped
+ * collection. The collection cannot be completely emptied using this
+ * function. An assertion will be thrown if that is attempted.
+ * @param inclusive - Truncate 'end' as well iff true
+ * XXX: this will go away soon, just needed to move for now
+ */
+ void temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) final;
+
+ std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward) const final;
+
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const final;
+
+ // Start from firstExtent by default.
+ DiskLoc firstRecord(OperationContext* txn, const DiskLoc& startExtent = DiskLoc()) const;
+ // Start from lastExtent by default.
+ DiskLoc lastRecord(OperationContext* txn, const DiskLoc& startExtent = DiskLoc()) const;
+
+protected:
+ bool isCapped() const final {
+ return true;
+ }
+ bool shouldPadInserts() const final {
+ return false;
+ }
+
+ void setCappedDeleteCallback(CappedDocumentDeleteCallback* cb) final {
+ _deleteCallback = cb;
+ }
+
+ StatusWith<DiskLoc> allocRecord(OperationContext* txn,
+ int lengthWithHeaders,
+ bool enforceQuota) final;
+
+ void addDeletedRec(OperationContext* txn, const DiskLoc& dloc) final;
+
+private:
+ // -- start copy from cap.cpp --
+ void _compact(OperationContext* txn);
+ DiskLoc cappedFirstDeletedInCurExtent() const;
+ void setFirstDeletedInCurExtent(OperationContext* txn, const DiskLoc& loc);
+ void cappedCheckMigrate(OperationContext* txn);
+ DiskLoc __capAlloc(OperationContext* txn, int len);
+ bool inCapExtent(const DiskLoc& dl) const;
+ DiskLoc cappedListOfAllDeletedRecords() const;
+ DiskLoc cappedLastDelRecLastExtent() const;
+ void setListOfAllDeletedRecords(OperationContext* txn, const DiskLoc& loc);
+ void setLastDelRecLastExtent(OperationContext* txn, const DiskLoc& loc);
+ Extent* theCapExtent() const;
+ bool nextIsInCapExtent(const DiskLoc& dl) const;
+ void advanceCapExtent(OperationContext* txn, StringData ns);
+ void cappedTruncateLastDelUpdate(OperationContext* txn);
+
+ /**
+ * Truncate documents newer than the document at 'end' from the capped
+ * collection. The collection cannot be completely emptied using this
+ * function. An assertion will be thrown if that is attempted.
+ * @param inclusive - Truncate 'end' as well iff true
+ */
+ void cappedTruncateAfter(OperationContext* txn, const char* ns, DiskLoc end, bool inclusive);
+
+ void _maybeComplain(OperationContext* txn, int len) const;
+
+ // -- end copy from cap.cpp --
+
+ CappedDocumentDeleteCallback* _deleteCallback;
+
+ OwnedPointerVector<ExtentManager::CacheHint> _extentAdvice;
+
+ friend class CappedRecordStoreV1Iterator;
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp
index ea77d224488..353a7f39c0c 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.cpp
@@ -36,181 +36,181 @@
namespace mongo {
- //
- // Capped collection traversal
- //
- CappedRecordStoreV1Iterator::CappedRecordStoreV1Iterator(OperationContext* txn,
- const CappedRecordStoreV1* collection,
- bool forward)
- : _txn(txn), _recordStore(collection), _forward(forward) {
-
- const RecordStoreV1MetaData* nsd = _recordStore->details();
-
- // If a start position isn't specified, we fill one out from the start of the
- // collection.
- if (_forward) {
- // Going forwards.
- if (!nsd->capLooped()) {
- // If our capped collection doesn't loop around, the first record is easy.
- _curr = collection->firstRecord(_txn);
- }
- else {
- // Our capped collection has "looped' around.
- // Copied verbatim from ForwardCappedCursor::init.
- // TODO ELABORATE
- _curr = _getExtent( nsd->capExtent() )->firstRecord;
- if (!_curr.isNull() && _curr == nsd->capFirstNewRecord()) {
- _curr = _getExtent( nsd->capExtent() )->lastRecord;
- _curr = nextLoop(_curr);
- }
- }
- }
- else {
- // Going backwards
- if (!nsd->capLooped()) {
- // Start at the end.
- _curr = collection->lastRecord(_txn);
- }
- else {
- _curr = _getExtent( nsd->capExtent() )->lastRecord;
+//
+// Capped collection traversal
+//
+CappedRecordStoreV1Iterator::CappedRecordStoreV1Iterator(OperationContext* txn,
+ const CappedRecordStoreV1* collection,
+ bool forward)
+ : _txn(txn), _recordStore(collection), _forward(forward) {
+ const RecordStoreV1MetaData* nsd = _recordStore->details();
+
+ // If a start position isn't specified, we fill one out from the start of the
+ // collection.
+ if (_forward) {
+ // Going forwards.
+ if (!nsd->capLooped()) {
+ // If our capped collection doesn't loop around, the first record is easy.
+ _curr = collection->firstRecord(_txn);
+ } else {
+ // Our capped collection has "looped' around.
+ // Copied verbatim from ForwardCappedCursor::init.
+ // TODO ELABORATE
+ _curr = _getExtent(nsd->capExtent())->firstRecord;
+ if (!_curr.isNull() && _curr == nsd->capFirstNewRecord()) {
+ _curr = _getExtent(nsd->capExtent())->lastRecord;
+ _curr = nextLoop(_curr);
}
}
- }
-
- boost::optional<Record> CappedRecordStoreV1Iterator::next() {
- if (isEOF()) return {};
- auto toReturn = _curr.toRecordId();
- _curr = getNextCapped(_curr);
- return {{toReturn, _recordStore->RecordStore::dataFor(_txn, toReturn)}};
- }
-
- boost::optional<Record> CappedRecordStoreV1Iterator::seekExact(const RecordId& id) {
- _curr = getNextCapped(DiskLoc::fromRecordId(id));
- return {{id, _recordStore->RecordStore::dataFor(_txn, id)}};
- }
-
- void CappedRecordStoreV1Iterator::invalidate(const RecordId& id) {
- const DiskLoc dl = DiskLoc::fromRecordId(id);
- if (dl == _curr) {
- // We *could* move to the next thing, since there is actually a next
- // thing, but according to clientcursor.cpp:
- // "note we cannot advance here. if this condition occurs, writes to the oplog
- // have "caught" the reader. skipping ahead, the reader would miss potentially
- // important data."
- _curr = DiskLoc();
- _killedByInvalidate = true;
+ } else {
+ // Going backwards
+ if (!nsd->capLooped()) {
+ // Start at the end.
+ _curr = collection->lastRecord(_txn);
+ } else {
+ _curr = _getExtent(nsd->capExtent())->lastRecord;
}
}
-
- void CappedRecordStoreV1Iterator::savePositioned() {
- _txn = nullptr;
+}
+
+boost::optional<Record> CappedRecordStoreV1Iterator::next() {
+ if (isEOF())
+ return {};
+ auto toReturn = _curr.toRecordId();
+ _curr = getNextCapped(_curr);
+ return {{toReturn, _recordStore->RecordStore::dataFor(_txn, toReturn)}};
+}
+
+boost::optional<Record> CappedRecordStoreV1Iterator::seekExact(const RecordId& id) {
+ _curr = getNextCapped(DiskLoc::fromRecordId(id));
+ return {{id, _recordStore->RecordStore::dataFor(_txn, id)}};
+}
+
+void CappedRecordStoreV1Iterator::invalidate(const RecordId& id) {
+ const DiskLoc dl = DiskLoc::fromRecordId(id);
+ if (dl == _curr) {
+ // We *could* move to the next thing, since there is actually a next
+ // thing, but according to clientcursor.cpp:
+ // "note we cannot advance here. if this condition occurs, writes to the oplog
+ // have "caught" the reader. skipping ahead, the reader would miss potentially
+ // important data."
+ _curr = DiskLoc();
+ _killedByInvalidate = true;
}
+}
- bool CappedRecordStoreV1Iterator::restore(OperationContext* txn) {
- _txn = txn;
- return !_killedByInvalidate;
- }
+void CappedRecordStoreV1Iterator::savePositioned() {
+ _txn = nullptr;
+}
- DiskLoc CappedRecordStoreV1Iterator::getNextCapped(const DiskLoc& dl) {
- invariant(!dl.isNull());
- const RecordStoreV1MetaData* details = _recordStore->details();
+bool CappedRecordStoreV1Iterator::restore(OperationContext* txn) {
+ _txn = txn;
+ return !_killedByInvalidate;
+}
- if (_forward) {
- // If it's not looped, it's easy.
- if (!_recordStore->details()->capLooped()) {
- return _getNextRecord( dl );
- }
+DiskLoc CappedRecordStoreV1Iterator::getNextCapped(const DiskLoc& dl) {
+ invariant(!dl.isNull());
+ const RecordStoreV1MetaData* details = _recordStore->details();
- // TODO ELABORATE
- // EOF.
- if (dl == _getExtent( details->capExtent() )->lastRecord) {
- return DiskLoc();
- }
+ if (_forward) {
+ // If it's not looped, it's easy.
+ if (!_recordStore->details()->capLooped()) {
+ return _getNextRecord(dl);
+ }
- DiskLoc ret = nextLoop(dl);
+ // TODO ELABORATE
+ // EOF.
+ if (dl == _getExtent(details->capExtent())->lastRecord) {
+ return DiskLoc();
+ }
- // If we become capFirstNewRecord from same extent, advance to next extent.
- if (ret == details->capFirstNewRecord() && ret != _getExtent( details->capExtent() )->firstRecord) {
- ret = nextLoop(_getExtent( details->capExtent() )->lastRecord);
- }
+ DiskLoc ret = nextLoop(dl);
- // If we have just gotten to beginning of capExtent, skip to capFirstNewRecord
- if (ret == _getExtent( details->capExtent() )->firstRecord) { ret = details->capFirstNewRecord(); }
+ // If we become capFirstNewRecord from same extent, advance to next extent.
+ if (ret == details->capFirstNewRecord() &&
+ ret != _getExtent(details->capExtent())->firstRecord) {
+ ret = nextLoop(_getExtent(details->capExtent())->lastRecord);
+ }
- return ret;
+ // If we have just gotten to beginning of capExtent, skip to capFirstNewRecord
+ if (ret == _getExtent(details->capExtent())->firstRecord) {
+ ret = details->capFirstNewRecord();
}
- else {
- if (!details->capLooped()) { return _getPrevRecord( dl ); }
- // TODO ELABORATE
- // Last record
- if (details->capFirstNewRecord() == _getExtent( details->capExtent() )->firstRecord) {
- if (dl == nextLoop(_getExtent( details->capExtent() )->lastRecord)) {
- return DiskLoc();
- }
- }
- else {
- if (dl == _getExtent( details->capExtent() )->firstRecord) { return DiskLoc(); }
- }
+ return ret;
+ } else {
+ if (!details->capLooped()) {
+ return _getPrevRecord(dl);
+ }
- DiskLoc ret;
- // If we are capFirstNewRecord, advance to prev extent, otherwise just get prev.
- if (dl == details->capFirstNewRecord()) {
- ret = prevLoop(_getExtent( details->capExtent() )->firstRecord);
- }
- else {
- ret = prevLoop(dl);
+ // TODO ELABORATE
+ // Last record
+ if (details->capFirstNewRecord() == _getExtent(details->capExtent())->firstRecord) {
+ if (dl == nextLoop(_getExtent(details->capExtent())->lastRecord)) {
+ return DiskLoc();
}
-
- // If we just became last in cap extent, advance past capFirstNewRecord
- // (We know ext(capExtent)->firstRecord != capFirstNewRecord, since would
- // have returned DiskLoc() earlier otherwise.)
- if (ret == _getExtent( details->capExtent() )->lastRecord) {
- ret = _getPrevRecord( details->capFirstNewRecord() );
+ } else {
+ if (dl == _getExtent(details->capExtent())->firstRecord) {
+ return DiskLoc();
}
+ }
- return ret;
+ DiskLoc ret;
+ // If we are capFirstNewRecord, advance to prev extent, otherwise just get prev.
+ if (dl == details->capFirstNewRecord()) {
+ ret = prevLoop(_getExtent(details->capExtent())->firstRecord);
+ } else {
+ ret = prevLoop(dl);
}
- }
- DiskLoc CappedRecordStoreV1Iterator::nextLoop(const DiskLoc& prev) {
- // TODO ELABORATE
- DiskLoc next = _getNextRecord( prev );
- if (!next.isNull()) {
- return next;
+ // If we just became last in cap extent, advance past capFirstNewRecord
+ // (We know ext(capExtent)->firstRecord != capFirstNewRecord, since would
+ // have returned DiskLoc() earlier otherwise.)
+ if (ret == _getExtent(details->capExtent())->lastRecord) {
+ ret = _getPrevRecord(details->capFirstNewRecord());
}
- return _recordStore->firstRecord(_txn);
+
+ return ret;
}
+}
- DiskLoc CappedRecordStoreV1Iterator::prevLoop(const DiskLoc& curr) {
- // TODO ELABORATE
- DiskLoc prev = _getPrevRecord( curr );
- if (!prev.isNull()) {
- return prev;
- }
- return _recordStore->lastRecord(_txn);
+DiskLoc CappedRecordStoreV1Iterator::nextLoop(const DiskLoc& prev) {
+ // TODO ELABORATE
+ DiskLoc next = _getNextRecord(prev);
+ if (!next.isNull()) {
+ return next;
+ }
+ return _recordStore->firstRecord(_txn);
+}
+
+DiskLoc CappedRecordStoreV1Iterator::prevLoop(const DiskLoc& curr) {
+ // TODO ELABORATE
+ DiskLoc prev = _getPrevRecord(curr);
+ if (!prev.isNull()) {
+ return prev;
}
+ return _recordStore->lastRecord(_txn);
+}
- Extent* CappedRecordStoreV1Iterator::_getExtent( const DiskLoc& loc ) {
- return _recordStore->_extentManager->getExtent( loc );
- }
+Extent* CappedRecordStoreV1Iterator::_getExtent(const DiskLoc& loc) {
+ return _recordStore->_extentManager->getExtent(loc);
+}
- DiskLoc CappedRecordStoreV1Iterator::_getNextRecord( const DiskLoc& loc ) {
- return _recordStore->getNextRecord( _txn, loc );
- }
+DiskLoc CappedRecordStoreV1Iterator::_getNextRecord(const DiskLoc& loc) {
+ return _recordStore->getNextRecord(_txn, loc);
+}
- DiskLoc CappedRecordStoreV1Iterator::_getPrevRecord( const DiskLoc& loc ) {
- return _recordStore->getPrevRecord( _txn, loc );
- }
+DiskLoc CappedRecordStoreV1Iterator::_getPrevRecord(const DiskLoc& loc) {
+ return _recordStore->getPrevRecord(_txn, loc);
+}
- std::unique_ptr<RecordFetcher> CappedRecordStoreV1Iterator::fetcherForNext() const {
- return _recordStore->_extentManager->recordNeedsFetch(_curr);
- }
+std::unique_ptr<RecordFetcher> CappedRecordStoreV1Iterator::fetcherForNext() const {
+ return _recordStore->_extentManager->recordNeedsFetch(_curr);
+}
- std::unique_ptr<RecordFetcher> CappedRecordStoreV1Iterator::fetcherForId(
- const RecordId& id) const {
- return _recordStore->_extentManager->recordNeedsFetch(DiskLoc::fromRecordId(id));
- }
+std::unique_ptr<RecordFetcher> CappedRecordStoreV1Iterator::fetcherForId(const RecordId& id) const {
+ return _recordStore->_extentManager->recordNeedsFetch(DiskLoc::fromRecordId(id));
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h
index de2b6fda5e3..0a366d9921a 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h
@@ -33,58 +33,60 @@
namespace mongo {
- class CappedRecordStoreV1;
+class CappedRecordStoreV1;
- struct Extent;
+struct Extent;
+
+/**
+ * This class iterates over a capped collection identified by 'ns'.
+ * The collection must exist when the constructor is called.
+ */
+class CappedRecordStoreV1Iterator final : public RecordCursor {
+public:
+ CappedRecordStoreV1Iterator(OperationContext* txn,
+ const CappedRecordStoreV1* collection,
+ bool forward);
+
+ boost::optional<Record> next() final;
+ boost::optional<Record> seekExact(const RecordId& id) final;
+ void savePositioned() final;
+ bool restore(OperationContext* txn) final;
+ void invalidate(const RecordId& dl) final;
+ std::unique_ptr<RecordFetcher> fetcherForNext() const final;
+ std::unique_ptr<RecordFetcher> fetcherForId(const RecordId& id) const final;
+
+private:
+ void advance();
+ bool isEOF() {
+ return _curr.isNull();
+ }
/**
- * This class iterates over a capped collection identified by 'ns'.
- * The collection must exist when the constructor is called.
+ * Internal collection navigation helper methods.
*/
- class CappedRecordStoreV1Iterator final : public RecordCursor {
- public:
- CappedRecordStoreV1Iterator( OperationContext* txn,
- const CappedRecordStoreV1* collection,
- bool forward );
-
- boost::optional<Record> next() final;
- boost::optional<Record> seekExact(const RecordId& id) final;
- void savePositioned() final;
- bool restore(OperationContext* txn) final;
- void invalidate(const RecordId& dl) final;
- std::unique_ptr<RecordFetcher> fetcherForNext() const final;
- std::unique_ptr<RecordFetcher> fetcherForId(const RecordId& id) const final;
-
- private:
- void advance();
- bool isEOF() { return _curr.isNull(); }
-
- /**
- * Internal collection navigation helper methods.
- */
- DiskLoc getNextCapped(const DiskLoc& dl);
- DiskLoc prevLoop(const DiskLoc& curr);
- DiskLoc nextLoop(const DiskLoc& prev);
-
- // some helpers - these move to RecordStore probably
- Extent* _getExtent( const DiskLoc& loc );
- DiskLoc _getNextRecord( const DiskLoc& loc );
- DiskLoc _getPrevRecord( const DiskLoc& loc );
-
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
-
- // The collection we're iterating over.
- const CappedRecordStoreV1* const _recordStore;
-
- // The result returned on the next call to getNext().
- DiskLoc _curr;
-
- const bool _forward;
-
- // If invalidate kills the DiskLoc we need to move forward, we kill the iterator. See the
- // comment in the body of invalidate(...).
- bool _killedByInvalidate = false;
- };
+ DiskLoc getNextCapped(const DiskLoc& dl);
+ DiskLoc prevLoop(const DiskLoc& curr);
+ DiskLoc nextLoop(const DiskLoc& prev);
+
+ // some helpers - these move to RecordStore probably
+ Extent* _getExtent(const DiskLoc& loc);
+ DiskLoc _getNextRecord(const DiskLoc& loc);
+ DiskLoc _getPrevRecord(const DiskLoc& loc);
+
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
+ // The collection we're iterating over.
+ const CappedRecordStoreV1* const _recordStore;
+
+ // The result returned on the next call to getNext().
+ DiskLoc _curr;
+
+ const bool _forward;
+
+ // If invalidate kills the DiskLoc we need to move forward, we kill the iterator. See the
+ // comment in the body of invalidate(...).
+ bool _killedByInvalidate = false;
+};
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
index 0c369587f9b..1089d243467 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
@@ -42,773 +42,671 @@ using namespace mongo;
namespace {
- using std::string;
- using std::vector;
-
- // Provides data to be inserted. Must be large enough for largest possible record.
- // Should be in BSS so unused portions should be free.
- char zeros[20*1024*1024] = {};
-
- class DummyCappedDocumentDeleteCallback : public CappedDocumentDeleteCallback {
- public:
- Status aboutToDeleteCapped( OperationContext* txn, const RecordId& loc, RecordData data) {
- deleted.push_back( DiskLoc::fromRecordId(loc) );
- return Status::OK();
- }
- vector<DiskLoc> deleted;
- };
-
- void simpleInsertTest( const char* buf, int size ) {
-
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- DummyCappedDocumentDeleteCallback cb;
-
- string myns = "test.simple1";
- CappedRecordStoreV1 rs( &txn, &cb, myns, md, &em, false );
+using std::string;
+using std::vector;
+
+// Provides data to be inserted. Must be large enough for largest possible record.
+// Should be in BSS so unused portions should be free.
+char zeros[20 * 1024 * 1024] = {};
+
+class DummyCappedDocumentDeleteCallback : public CappedDocumentDeleteCallback {
+public:
+ Status aboutToDeleteCapped(OperationContext* txn, const RecordId& loc, RecordData data) {
+ deleted.push_back(DiskLoc::fromRecordId(loc));
+ return Status::OK();
+ }
+ vector<DiskLoc> deleted;
+};
- rs.increaseStorageSize( &txn, 1024, false );
+void simpleInsertTest(const char* buf, int size) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ DummyCappedDocumentDeleteCallback cb;
- ASSERT_NOT_OK( rs.insertRecord( &txn, buf, 3, 1000 ).getStatus() );
+ string myns = "test.simple1";
+ CappedRecordStoreV1 rs(&txn, &cb, myns, md, &em, false);
- rs.insertRecord( &txn, buf, size, 10000 );
+ rs.increaseStorageSize(&txn, 1024, false);
- {
- BSONObjBuilder b;
- int64_t storageSize = rs.storageSize( &txn, &b );
- BSONObj obj = b.obj();
- ASSERT_EQUALS( 1, obj["numExtents"].numberInt() );
- ASSERT_EQUALS( storageSize, em.quantizeExtentSize( 1024 ) );
- }
+ ASSERT_NOT_OK(rs.insertRecord(&txn, buf, 3, 1000).getStatus());
- for ( int i = 0; i < 1000; i++ ) {
- ASSERT_OK( rs.insertRecord( &txn, buf, size, 10000 ).getStatus() );
- }
+ rs.insertRecord(&txn, buf, size, 10000);
- long long start = md->numRecords();
- for ( int i = 0; i < 1000; i++ ) {
- ASSERT_OK( rs.insertRecord( &txn, buf, size, 10000 ).getStatus() );
- }
- ASSERT_EQUALS( start, md->numRecords() );
- ASSERT_GREATER_THAN( start, 100 );
- ASSERT_LESS_THAN( start, 1000 );
+ {
+ BSONObjBuilder b;
+ int64_t storageSize = rs.storageSize(&txn, &b);
+ BSONObj obj = b.obj();
+ ASSERT_EQUALS(1, obj["numExtents"].numberInt());
+ ASSERT_EQUALS(storageSize, em.quantizeExtentSize(1024));
}
- TEST(CappedRecordStoreV1, SimpleInsertSize4) {
- simpleInsertTest("abcd", 4);
- }
- TEST(CappedRecordStoreV1, SimpleInsertSize8) {
- simpleInsertTest("abcdefgh", 8);
+ for (int i = 0; i < 1000; i++) {
+ ASSERT_OK(rs.insertRecord(&txn, buf, size, 10000).getStatus());
}
- TEST(CappedRecordStoreV1, EmptySingleExtent) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- DummyCappedDocumentDeleteCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
-
- {
- LocAndSize records[] = {
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 1000},
- {}
- };
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
- }
-
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 100},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1100), 900},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
- ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc().setInvalid()); // unlooped
- }
+ long long start = md->numRecords();
+ for (int i = 0; i < 1000; i++) {
+ ASSERT_OK(rs.insertRecord(&txn, buf, size, 10000).getStatus());
}
+ ASSERT_EQUALS(start, md->numRecords());
+ ASSERT_GREATER_THAN(start, 100);
+ ASSERT_LESS_THAN(start, 1000);
+}
- TEST(CappedRecordStoreV1, FirstLoopWithSingleExtentExactSize) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- DummyCappedDocumentDeleteCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
-
- {
- LocAndSize records[] = {
- {DiskLoc(0, 1000), 100},
- {DiskLoc(0, 1100), 100},
- {DiskLoc(0, 1200), 100},
- {DiskLoc(0, 1300), 100},
- {DiskLoc(0, 1400), 100},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1500), 50},
- {}
- };
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
- }
-
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+TEST(CappedRecordStoreV1, SimpleInsertSize4) {
+ simpleInsertTest("abcd", 4);
+}
+TEST(CappedRecordStoreV1, SimpleInsertSize8) {
+ simpleInsertTest("abcdefgh", 8);
+}
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1200), 100}, // first old record
- {DiskLoc(0, 1300), 100},
- {DiskLoc(0, 1400), 100}, // last old record
- {DiskLoc(0, 1000), 100}, // first new record
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1100), 100}, // gap after newest record XXX this is probably a bug
- {DiskLoc(0, 1500), 50}, // gap at end of extent
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
- ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
- }
+TEST(CappedRecordStoreV1, EmptySingleExtent) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ DummyCappedDocumentDeleteCallback cb;
+ CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+
+ {
+ LocAndSize records[] = {{}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
+ initializeV1RS(&txn, records, drecs, NULL, &em, md);
}
- TEST(CappedRecordStoreV1, NonFirstLoopWithSingleExtentExactSize) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- DummyCappedDocumentDeleteCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- {
- LocAndSize records[] = {
- {DiskLoc(0, 1000), 100},
- {DiskLoc(0, 1100), 100},
- {DiskLoc(0, 1200), 100},
- {DiskLoc(0, 1300), 100},
- {DiskLoc(0, 1400), 100},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1500), 50},
- {}
- };
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
- }
-
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1200), 100}, // first old record
- {DiskLoc(0, 1300), 100},
- {DiskLoc(0, 1400), 100}, // last old record
- {DiskLoc(0, 1000), 100}, // first new record
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1100), 100}, // gap after newest record XXX this is probably a bug
- {DiskLoc(0, 1500), 50}, // gap at end of extent
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
- ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
- }
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1000), 100}, {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1100), 900}, {}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
+ ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc().setInvalid()); // unlooped
}
+}
- /**
- * Current code always tries to leave 24 bytes to create a DeletedRecord.
- */
- TEST(CappedRecordStoreV1, WillLoopWithout24SpareBytes) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- DummyCappedDocumentDeleteCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+TEST(CappedRecordStoreV1, FirstLoopWithSingleExtentExactSize) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ DummyCappedDocumentDeleteCallback cb;
+ CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+
+ {
+ LocAndSize records[] = {{DiskLoc(0, 1000), 100},
+ {DiskLoc(0, 1100), 100},
+ {DiskLoc(0, 1200), 100},
+ {DiskLoc(0, 1300), 100},
+ {DiskLoc(0, 1400), 100},
+ {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1500), 50}, {}};
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
+ initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ }
- {
- LocAndSize records[] = {
- {DiskLoc(0, 1000), 100},
- {DiskLoc(0, 1100), 100},
- {DiskLoc(0, 1200), 100},
- {DiskLoc(0, 1300), 100},
- {DiskLoc(0, 1400), 100},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1500), 123},
- {}
- };
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
- }
+ rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
+ {DiskLoc(0, 1300), 100},
+ {DiskLoc(0, 1400), 100}, // last old record
+ {DiskLoc(0, 1000), 100}, // first new record
+ {}};
+ LocAndSize drecs[] = {
+ {DiskLoc(0, 1100), 100}, // gap after newest record XXX this is probably a bug
+ {DiskLoc(0, 1500), 50}, // gap at end of extent
+ {}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
+ ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
+ }
+}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+TEST(CappedRecordStoreV1, NonFirstLoopWithSingleExtentExactSize) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ DummyCappedDocumentDeleteCallback cb;
+ CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+
+ {
+ LocAndSize records[] = {{DiskLoc(0, 1000), 100},
+ {DiskLoc(0, 1100), 100},
+ {DiskLoc(0, 1200), 100},
+ {DiskLoc(0, 1300), 100},
+ {DiskLoc(0, 1400), 100},
+ {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1500), 50}, {}};
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
+ initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ }
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1200), 100}, // first old record
- {DiskLoc(0, 1300), 100},
- {DiskLoc(0, 1400), 100}, // last old record
- {DiskLoc(0, 1000), 100}, // first new record
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1100), 100}, // gap after newest record
- {DiskLoc(0, 1500), 123}, // gap at end of extent
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
- ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
- }
+ rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
+ {DiskLoc(0, 1300), 100},
+ {DiskLoc(0, 1400), 100}, // last old record
+ {DiskLoc(0, 1000), 100}, // first new record
+ {}};
+ LocAndSize drecs[] = {
+ {DiskLoc(0, 1100), 100}, // gap after newest record XXX this is probably a bug
+ {DiskLoc(0, 1500), 50}, // gap at end of extent
+ {}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
+ ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
+}
- TEST(CappedRecordStoreV1, WontLoopWith24SpareBytes) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- DummyCappedDocumentDeleteCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+/**
+ * Current code always tries to leave 24 bytes to create a DeletedRecord.
+ */
+TEST(CappedRecordStoreV1, WillLoopWithout24SpareBytes) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ DummyCappedDocumentDeleteCallback cb;
+ CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+
+ {
+ LocAndSize records[] = {{DiskLoc(0, 1000), 100},
+ {DiskLoc(0, 1100), 100},
+ {DiskLoc(0, 1200), 100},
+ {DiskLoc(0, 1300), 100},
+ {DiskLoc(0, 1400), 100},
+ {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1500), 123}, {}};
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
+ initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ }
- {
- LocAndSize records[] = {
- {DiskLoc(0, 1000), 100},
- {DiskLoc(0, 1100), 100},
- {DiskLoc(0, 1200), 100},
- {DiskLoc(0, 1300), 100},
- {DiskLoc(0, 1400), 100},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1500), 124},
- {}
- };
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
- }
+ rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
+ {DiskLoc(0, 1300), 100},
+ {DiskLoc(0, 1400), 100}, // last old record
+ {DiskLoc(0, 1000), 100}, // first new record
+ {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1100), 100}, // gap after newest record
+ {DiskLoc(0, 1500), 123}, // gap at end of extent
+ {}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
+ ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
+ }
+}
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+TEST(CappedRecordStoreV1, WontLoopWith24SpareBytes) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ DummyCappedDocumentDeleteCallback cb;
+ CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+
+ {
+ LocAndSize records[] = {{DiskLoc(0, 1000), 100},
+ {DiskLoc(0, 1100), 100},
+ {DiskLoc(0, 1200), 100},
+ {DiskLoc(0, 1300), 100},
+ {DiskLoc(0, 1400), 100},
+ {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1500), 124}, {}};
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
+ initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ }
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 100},
- {DiskLoc(0, 1100), 100},
- {DiskLoc(0, 1200), 100},
- {DiskLoc(0, 1300), 100},
- {DiskLoc(0, 1400), 100},
- {DiskLoc(0, 1500), 100},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1600), 24}, // gap at end of extent
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
- ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
- }
+ rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1000), 100},
+ {DiskLoc(0, 1100), 100},
+ {DiskLoc(0, 1200), 100},
+ {DiskLoc(0, 1300), 100},
+ {DiskLoc(0, 1400), 100},
+ {DiskLoc(0, 1500), 100},
+ {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1600), 24}, // gap at end of extent
+ {}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
+ ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
}
+}
- TEST(CappedRecordStoreV1, MoveToSecondExtentUnLooped) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- DummyCappedDocumentDeleteCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+TEST(CappedRecordStoreV1, MoveToSecondExtentUnLooped) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ DummyCappedDocumentDeleteCallback cb;
+ CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+
+ {
+ // Two extents, each with 1000 bytes.
+ LocAndSize records[] = {
+ {DiskLoc(0, 1000), 500}, {DiskLoc(0, 1500), 300}, {DiskLoc(0, 1800), 100}, {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1900), 100}, {DiskLoc(1, 1000), 1000}, {}};
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
+ initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ }
- {
- // Two extents, each with 1000 bytes.
- LocAndSize records[] = {
- {DiskLoc(0, 1000), 500},
- {DiskLoc(0, 1500), 300},
- {DiskLoc(0, 1800), 100},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1900), 100},
- {DiskLoc(1, 1000), 1000},
- {}
- };
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
- }
+ rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1000), 500},
+ {DiskLoc(0, 1500), 300},
+ {DiskLoc(0, 1800), 100},
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 500},
- {DiskLoc(0, 1500), 300},
- {DiskLoc(0, 1800), 100},
-
- {DiskLoc(1, 1000), 100},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1900), 100},
- {DiskLoc(1, 1100), 900},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- ASSERT_EQUALS(md->capExtent(), DiskLoc(1, 0));
- ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc().setInvalid()); // unlooped
- }
+ {DiskLoc(1, 1000), 100},
+ {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1900), 100}, {DiskLoc(1, 1100), 900}, {}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ ASSERT_EQUALS(md->capExtent(), DiskLoc(1, 0));
+ ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc().setInvalid()); // unlooped
}
+}
- TEST(CappedRecordStoreV1, MoveToSecondExtentLooped) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- DummyCappedDocumentDeleteCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+TEST(CappedRecordStoreV1, MoveToSecondExtentLooped) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ DummyCappedDocumentDeleteCallback cb;
+ CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+
+ {
+ // Two extents, each with 1000 bytes.
+ LocAndSize records[] = {{DiskLoc(0, 1800), 100}, // old
+ {DiskLoc(0, 1000), 500}, // first new
+ {DiskLoc(0, 1500), 400},
+
+ {DiskLoc(1, 1000), 300},
+ {DiskLoc(1, 1300), 600},
+ {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1900), 100}, {DiskLoc(1, 1900), 100}, {}};
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
+ initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ }
- {
- // Two extents, each with 1000 bytes.
- LocAndSize records[] = {
- {DiskLoc(0, 1800), 100}, // old
- {DiskLoc(0, 1000), 500}, // first new
- {DiskLoc(0, 1500), 400},
-
- {DiskLoc(1, 1000), 300},
- {DiskLoc(1, 1300), 600},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1900), 100},
- {DiskLoc(1, 1900), 100},
- {}
- };
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc(0, 1000));
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
- }
+ rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1000), 500},
+ {DiskLoc(0, 1500), 400},
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 500},
- {DiskLoc(0, 1500), 400},
-
- {DiskLoc(1, 1300), 600}, // old
- {DiskLoc(1, 1000), 200}, // first new
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1800), 200},
- {DiskLoc(1, 1200), 100},
- {DiskLoc(1, 1900), 100},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- ASSERT_EQUALS(md->capExtent(), DiskLoc(1, 0));
- ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(1, 1000));
- }
+ {DiskLoc(1, 1300), 600}, // old
+ {DiskLoc(1, 1000), 200}, // first new
+ {}};
+ LocAndSize drecs[] = {
+ {DiskLoc(0, 1800), 200}, {DiskLoc(1, 1200), 100}, {DiskLoc(1, 1900), 100}, {}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ ASSERT_EQUALS(md->capExtent(), DiskLoc(1, 0));
+ ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(1, 1000));
}
+}
- // Larger than storageSize (fails early)
- TEST(CappedRecordStoreV1, OversizedRecordHuge) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- DummyCappedDocumentDeleteCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+// Larger than storageSize (fails early)
+TEST(CappedRecordStoreV1, OversizedRecordHuge) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ DummyCappedDocumentDeleteCallback cb;
+ CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+
+ {
+ LocAndSize records[] = {{}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
+ initializeV1RS(&txn, records, drecs, NULL, &em, md);
+ }
- {
- LocAndSize records[] = {
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 1000},
- {}
- };
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
- }
+ StatusWith<RecordId> status = rs.insertRecord(&txn, zeros, 16000, false);
+ ASSERT_EQUALS(status.getStatus(), ErrorCodes::DocTooLargeForCapped);
+ ASSERT_EQUALS(status.getStatus().location(), 16328);
+}
- StatusWith<RecordId> status = rs.insertRecord(&txn, zeros, 16000, false);
- ASSERT_EQUALS(status.getStatus(), ErrorCodes::DocTooLargeForCapped);
- ASSERT_EQUALS(status.getStatus().location(), 16328);
+// Smaller than storageSize, but larger than usable space (fails late)
+TEST(CappedRecordStoreV1, OversizedRecordMedium) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ DummyCappedDocumentDeleteCallback cb;
+ CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+
+ {
+ LocAndSize records[] = {{}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
+ initializeV1RS(&txn, records, drecs, NULL, &em, md);
}
- // Smaller than storageSize, but larger than usable space (fails late)
- TEST(CappedRecordStoreV1, OversizedRecordMedium) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- DummyCappedDocumentDeleteCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ StatusWith<RecordId> status =
+ rs.insertRecord(&txn, zeros, 1004 - MmapV1RecordHeader::HeaderSize, false);
+ ASSERT_EQUALS(status.getStatus(), ErrorCodes::DocTooLargeForCapped);
+ ASSERT_EQUALS(status.getStatus().location(), 28575);
+}
- {
- LocAndSize records[] = {
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 1000},
- {}
- };
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
- }
+//
+// XXX The CappedRecordStoreV1Scrambler suite of tests describe existing behavior that is less
+// than ideal. Any improved implementation will need to be able to handle a collection that has
+// been scrambled like this.
+//
- StatusWith<RecordId> status = rs.insertRecord(&txn, zeros, 1004 - MmapV1RecordHeader::HeaderSize, false);
- ASSERT_EQUALS(status.getStatus(), ErrorCodes::DocTooLargeForCapped);
- ASSERT_EQUALS(status.getStatus().location(), 28575);
+/**
+ * This is a minimal example that shows the current allocator laying out records out-of-order.
+ */
+TEST(CappedRecordStoreV1Scrambler, Minimal) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ DummyCappedDocumentDeleteCallback cb;
+ CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+
+ {
+ // Starting with a single empty 1000 byte extent.
+ LocAndSize records[] = {{}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
+ initializeV1RS(&txn, records, drecs, NULL, &em, md);
}
- //
- // XXX The CappedRecordStoreV1Scrambler suite of tests describe existing behavior that is less
- // than ideal. Any improved implementation will need to be able to handle a collection that has
- // been scrambled like this.
- //
-
- /**
- * This is a minimal example that shows the current allocator laying out records out-of-order.
- */
- TEST(CappedRecordStoreV1Scrambler, Minimal) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- DummyCappedDocumentDeleteCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
-
- {
- // Starting with a single empty 1000 byte extent.
- LocAndSize records[] = {
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 1000},
- {}
- };
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
- }
-
- rs.insertRecord(&txn, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 400 - MmapV1RecordHeader::HeaderSize, false); // won't fit at end so wraps
- rs.insertRecord(&txn, zeros, 120 - MmapV1RecordHeader::HeaderSize, false); // fits at end
- rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false); // fits in earlier hole
+ rs.insertRecord(&txn, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(
+ &txn, zeros, 400 - MmapV1RecordHeader::HeaderSize, false); // won't fit at end so wraps
+ rs.insertRecord(&txn, zeros, 120 - MmapV1RecordHeader::HeaderSize, false); // fits at end
+ rs.insertRecord(
+ &txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false); // fits in earlier hole
+
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1500), 300}, // 2nd insert
+ {DiskLoc(0, 1000), 400}, // 3rd (1st new)
+ {DiskLoc(0, 1800), 120}, // 4th
+ {DiskLoc(0, 1400), 60}, // 5th
+ {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1460), 40}, {DiskLoc(0, 1920), 80}, {}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
+ ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
+ }
+}
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1500), 300}, // 2nd insert
- {DiskLoc(0, 1000), 400}, // 3rd (1st new)
- {DiskLoc(0, 1800), 120}, // 4th
- {DiskLoc(0, 1400), 60}, // 5th
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1460), 40},
- {DiskLoc(0, 1920), 80},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
- ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
- }
+/**
+ * This tests a specially crafted set of inserts that scrambles a capped collection in a way
+ * that leaves 4 deleted records in a single extent.
+ */
+TEST(CappedRecordStoreV1Scrambler, FourDeletedRecordsInSingleExtent) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
+ DummyCappedDocumentDeleteCallback cb;
+ CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+
+ {
+ // Starting with a single empty 1000 byte extent.
+ LocAndSize records[] = {{}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
+ initializeV1RS(&txn, records, drecs, NULL, &em, md);
}
- /**
- * This tests a specially crafted set of inserts that scrambles a capped collection in a way
- * that leaves 4 deleted records in a single extent.
- */
- TEST(CappedRecordStoreV1Scrambler, FourDeletedRecordsInSingleExtent) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( true, 0 );
- DummyCappedDocumentDeleteCallback cb;
- CappedRecordStoreV1 rs(&txn, &cb, "test.foo", md, &em, false);
+ // This list of sizes was empirically generated to achieve this outcome. Don't think too
+ // much about them.
+ rs.insertRecord(&txn, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 304 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 56 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 104 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 36 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&txn, zeros, 64 - MmapV1RecordHeader::HeaderSize, false);
+
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1148), 148},
+ {DiskLoc(0, 1936), 40},
+ {DiskLoc(0, 1712), 40},
+ {DiskLoc(0, 1296), 36},
+ {DiskLoc(0, 1752), 100},
+ {DiskLoc(0, 1332), 96},
+ {DiskLoc(0, 1428), 200},
+ {DiskLoc(0, 1852), 60},
+ {DiskLoc(0, 1000), 64}, // (1st new)
+ {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1064), 84},
+ {DiskLoc(0, 1976), 24},
+ {DiskLoc(0, 1912), 24},
+ {DiskLoc(0, 1628), 84},
+ {}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
+ ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
+ }
+}
- {
- // Starting with a single empty 1000 byte extent.
- LocAndSize records[] = {
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 1000},
- {}
- };
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
- initializeV1RS(&txn, records, drecs, NULL, &em, md);
+//
+// The CappedRecordStoreV1QueryStage tests some nitty-gritty capped
+// collection details. Ported and polished from pdfiletests.cpp.
+//
+
+class CollscanHelper {
+public:
+ CollscanHelper(int nExtents)
+ : md(new DummyRecordStoreV1MetaData(true, 0)), rs(&txn, &cb, ns(), md, &em, false) {
+ LocAndSize recs[] = {{}};
+ LocAndSize drecs[8];
+ ASSERT_LESS_THAN(nExtents, 8);
+ for (int j = 0; j < nExtents; ++j) {
+ drecs[j].loc = DiskLoc(j, 1000);
+ drecs[j].size = 1000;
}
+ drecs[nExtents].loc = DiskLoc();
+ drecs[nExtents].size = 0;
- // This list of sizes was empirically generated to achieve this outcome. Don't think too
- // much about them.
- rs.insertRecord(&txn, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 304 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 56 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 104 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 36 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&txn, zeros, 64 - MmapV1RecordHeader::HeaderSize, false);
+ md->setCapExtent(&txn, DiskLoc(0, 0));
+ md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
+ initializeV1RS(&txn, recs, drecs, NULL, &em, md);
+ }
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1148), 148},
- {DiskLoc(0, 1936), 40},
- {DiskLoc(0, 1712), 40},
- {DiskLoc(0, 1296), 36},
- {DiskLoc(0, 1752), 100},
- {DiskLoc(0, 1332), 96},
- {DiskLoc(0, 1428), 200},
- {DiskLoc(0, 1852), 60},
- {DiskLoc(0, 1000), 64}, // (1st new)
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1064), 84},
- {DiskLoc(0, 1976), 24},
- {DiskLoc(0, 1912), 24},
- {DiskLoc(0, 1628), 84},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- ASSERT_EQUALS(md->capExtent(), DiskLoc(0, 0));
- ASSERT_EQUALS(md->capFirstNewRecord(), DiskLoc(0, 1000));
+ // Insert bypasses standard alloc/insert routines to use the extent we want.
+ // TODO: Directly declare resulting record store state instead of procedurally creating it
+ DiskLoc insert(const DiskLoc& ext, int i) {
+ // Copied verbatim.
+ BSONObjBuilder b;
+ b.append("a", i);
+ BSONObj o = b.done();
+ int len = o.objsize();
+ Extent* e = em.getExtent(ext);
+ e = txn.recoveryUnit()->writing(e);
+ int ofs;
+ if (e->lastRecord.isNull()) {
+ ofs = ext.getOfs() + (e->_extentData - (char*)e);
+ } else {
+ ofs = e->lastRecord.getOfs() + em.recordForV1(e->lastRecord)->lengthWithHeaders();
}
+ DiskLoc dl(ext.a(), ofs);
+ MmapV1RecordHeader* r = em.recordForV1(dl);
+ r = (MmapV1RecordHeader*)txn.recoveryUnit()->writingPtr(
+ r, MmapV1RecordHeader::HeaderSize + len);
+ r->lengthWithHeaders() = MmapV1RecordHeader::HeaderSize + len;
+ r->extentOfs() = e->myLoc.getOfs();
+ r->nextOfs() = DiskLoc::NullOfs;
+ r->prevOfs() = e->lastRecord.isNull() ? DiskLoc::NullOfs : e->lastRecord.getOfs();
+ memcpy(r->data(), o.objdata(), len);
+ if (e->firstRecord.isNull())
+ e->firstRecord = dl;
+ else
+ txn.recoveryUnit()->writingInt(em.recordForV1(e->lastRecord)->nextOfs()) = ofs;
+ e->lastRecord = dl;
+ return dl;
}
- //
- // The CappedRecordStoreV1QueryStage tests some nitty-gritty capped
- // collection details. Ported and polished from pdfiletests.cpp.
- //
-
- class CollscanHelper {
- public:
- CollscanHelper(int nExtents)
- : md(new DummyRecordStoreV1MetaData( true, 0 )),
- rs(&txn, &cb, ns(), md, &em, false)
+ // TODO: Directly assert the desired record store state instead of just walking it
+ void walkAndCount(int expectedCount) {
+ // Walk the collection going forward.
{
- LocAndSize recs[] = {
- {}
- };
- LocAndSize drecs[8];
- ASSERT_LESS_THAN(nExtents, 8);
- for (int j = 0; j < nExtents; ++j) {
- drecs[j].loc = DiskLoc(j, 1000);
- drecs[j].size = 1000;
+ CappedRecordStoreV1Iterator cursor(&txn, &rs, /*forward=*/true);
+ int resultCount = 0;
+ while (auto record = cursor.next()) {
+ ++resultCount;
}
- drecs[nExtents].loc = DiskLoc();
- drecs[nExtents].size = 0;
- md->setCapExtent(&txn, DiskLoc(0, 0));
- md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid()); // unlooped
- initializeV1RS(&txn, recs, drecs, NULL, &em, md);
+ ASSERT_EQUALS(resultCount, expectedCount);
}
- // Insert bypasses standard alloc/insert routines to use the extent we want.
- // TODO: Directly declare resulting record store state instead of procedurally creating it
- DiskLoc insert( const DiskLoc& ext, int i ) {
- // Copied verbatim.
- BSONObjBuilder b;
- b.append( "a", i );
- BSONObj o = b.done();
- int len = o.objsize();
- Extent *e = em.getExtent(ext);
- e = txn.recoveryUnit()->writing(e);
- int ofs;
- if ( e->lastRecord.isNull() ) {
- ofs = ext.getOfs() + ( e->_extentData - (char *)e );
- }
- else {
- ofs = e->lastRecord.getOfs()
- + em.recordForV1(e->lastRecord)->lengthWithHeaders();
- }
- DiskLoc dl( ext.a(), ofs );
- MmapV1RecordHeader *r = em.recordForV1(dl);
- r = (MmapV1RecordHeader*) txn.recoveryUnit()->writingPtr(r, MmapV1RecordHeader::HeaderSize + len);
- r->lengthWithHeaders() = MmapV1RecordHeader::HeaderSize + len;
- r->extentOfs() = e->myLoc.getOfs();
- r->nextOfs() = DiskLoc::NullOfs;
- r->prevOfs() = e->lastRecord.isNull() ? DiskLoc::NullOfs : e->lastRecord.getOfs();
- memcpy( r->data(), o.objdata(), len );
- if ( e->firstRecord.isNull() )
- e->firstRecord = dl;
- else
- txn.recoveryUnit()->writingInt(em.recordForV1(e->lastRecord)->nextOfs()) = ofs;
- e->lastRecord = dl;
- return dl;
- }
-
- // TODO: Directly assert the desired record store state instead of just walking it
- void walkAndCount (int expectedCount) {
- // Walk the collection going forward.
- {
- CappedRecordStoreV1Iterator cursor(&txn, &rs, /*forward=*/true);
- int resultCount = 0;
- while (auto record = cursor.next()) {
- ++resultCount;
- }
-
- ASSERT_EQUALS(resultCount, expectedCount);
+ // Walk the collection going backwards.
+ {
+ CappedRecordStoreV1Iterator cursor(&txn, &rs, /*forward=*/false);
+ int resultCount = expectedCount;
+ while (auto record = cursor.next()) {
+ --resultCount;
}
- // Walk the collection going backwards.
- {
- CappedRecordStoreV1Iterator cursor(&txn, &rs, /*forward=*/false);
- int resultCount = expectedCount;
- while (auto record = cursor.next()) {
- --resultCount;
- }
-
- ASSERT_EQUALS(resultCount, 0);
- }
+ ASSERT_EQUALS(resultCount, 0);
}
+ }
- static const char *ns() { return "unittests.QueryStageCollectionScanCapped"; }
+ static const char* ns() {
+ return "unittests.QueryStageCollectionScanCapped";
+ }
- OperationContextNoop txn;
- DummyRecordStoreV1MetaData* md;
- DummyExtentManager em;
+ OperationContextNoop txn;
+ DummyRecordStoreV1MetaData* md;
+ DummyExtentManager em;
- private:
- DummyCappedDocumentDeleteCallback cb;
- CappedRecordStoreV1 rs;
- };
+private:
+ DummyCappedDocumentDeleteCallback cb;
+ CappedRecordStoreV1 rs;
+};
- TEST(CappedRecordStoreV1QueryStage, CollscanCappedBase) {
- CollscanHelper h(1);
- h.walkAndCount(0);
- }
+TEST(CappedRecordStoreV1QueryStage, CollscanCappedBase) {
+ CollscanHelper h(1);
+ h.walkAndCount(0);
+}
- TEST(CappedRecordStoreV1QueryStage, CollscanEmptyLooped) {
- CollscanHelper h(1);
- h.md->setCapFirstNewRecord( &h.txn, DiskLoc() );
- h.walkAndCount(0);
- }
+TEST(CappedRecordStoreV1QueryStage, CollscanEmptyLooped) {
+ CollscanHelper h(1);
+ h.md->setCapFirstNewRecord(&h.txn, DiskLoc());
+ h.walkAndCount(0);
+}
- TEST(CappedRecordStoreV1QueryStage, CollscanEmptyMultiExtentLooped) {
- CollscanHelper h(3);
- h.md->setCapFirstNewRecord( &h.txn, DiskLoc() );
- h.walkAndCount(0);
- }
+TEST(CappedRecordStoreV1QueryStage, CollscanEmptyMultiExtentLooped) {
+ CollscanHelper h(3);
+ h.md->setCapFirstNewRecord(&h.txn, DiskLoc());
+ h.walkAndCount(0);
+}
- TEST(CappedRecordStoreV1QueryStage, CollscanSingle) {
- CollscanHelper h(1);
+TEST(CappedRecordStoreV1QueryStage, CollscanSingle) {
+ CollscanHelper h(1);
- h.md->setCapFirstNewRecord(&h.txn, h.insert( h.md->capExtent(), 0 ));
- h.walkAndCount(1);
- }
+ h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 0));
+ h.walkAndCount(1);
+}
- TEST(CappedRecordStoreV1QueryStage, CollscanNewCapFirst) {
- CollscanHelper h(1);
- DiskLoc x = h.insert(h.md->capExtent(), 0 );
- h.md->setCapFirstNewRecord( &h.txn, x );
- h.insert(h.md->capExtent(), 1 );
- h.walkAndCount(2);
- }
+TEST(CappedRecordStoreV1QueryStage, CollscanNewCapFirst) {
+ CollscanHelper h(1);
+ DiskLoc x = h.insert(h.md->capExtent(), 0);
+ h.md->setCapFirstNewRecord(&h.txn, x);
+ h.insert(h.md->capExtent(), 1);
+ h.walkAndCount(2);
+}
- TEST(CappedRecordStoreV1QueryStage, CollscanNewCapMiddle) {
- CollscanHelper h(1);
- h.insert(h.md->capExtent(), 0 );
- h.md->setCapFirstNewRecord(&h.txn, h.insert( h.md->capExtent(), 1 ) );
- h.insert( h.md->capExtent(), 2 );
- h.walkAndCount(3);
- }
+TEST(CappedRecordStoreV1QueryStage, CollscanNewCapMiddle) {
+ CollscanHelper h(1);
+ h.insert(h.md->capExtent(), 0);
+ h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 1));
+ h.insert(h.md->capExtent(), 2);
+ h.walkAndCount(3);
+}
- TEST(CappedRecordStoreV1QueryStage, CollscanFirstExtent) {
- CollscanHelper h(2);
- h.insert(h.md->capExtent(), 0 );
- h.insert(h.md->lastExtent(&h.txn), 1 );
- h.md->setCapFirstNewRecord(&h.txn, h.insert( h.md->capExtent(), 2 ) );
- h.insert( h.md->capExtent(), 3 );
- h.walkAndCount(4);
- }
+TEST(CappedRecordStoreV1QueryStage, CollscanFirstExtent) {
+ CollscanHelper h(2);
+ h.insert(h.md->capExtent(), 0);
+ h.insert(h.md->lastExtent(&h.txn), 1);
+ h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 2));
+ h.insert(h.md->capExtent(), 3);
+ h.walkAndCount(4);
+}
- TEST(CappedRecordStoreV1QueryStage, CollscanLastExtent) {
- CollscanHelper h(2);
- h.md->setCapExtent( &h.txn, h.md->lastExtent(&h.txn) );
- h.insert( h.md->capExtent(), 0 );
- h.insert( h.md->firstExtent(&h.txn), 1 );
- h.md->setCapFirstNewRecord( &h.txn, h.insert( h.md->capExtent(), 2 ) );
- h.insert( h.md->capExtent(), 3 );
- h.walkAndCount(4);
- }
+TEST(CappedRecordStoreV1QueryStage, CollscanLastExtent) {
+ CollscanHelper h(2);
+ h.md->setCapExtent(&h.txn, h.md->lastExtent(&h.txn));
+ h.insert(h.md->capExtent(), 0);
+ h.insert(h.md->firstExtent(&h.txn), 1);
+ h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 2));
+ h.insert(h.md->capExtent(), 3);
+ h.walkAndCount(4);
+}
- TEST(CappedRecordStoreV1QueryStage, CollscanMidExtent) {
- CollscanHelper h(3);
- h.md->setCapExtent( &h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext );
- h.insert( h.md->capExtent(), 0 );
- h.insert( h.md->lastExtent(&h.txn), 1 );
- h.insert( h.md->firstExtent(&h.txn), 2 );
- h.md->setCapFirstNewRecord( &h.txn, h.insert( h.md->capExtent(), 3 ) );
- h.insert( h.md->capExtent(), 4 );
- h.walkAndCount(5);
- }
+TEST(CappedRecordStoreV1QueryStage, CollscanMidExtent) {
+ CollscanHelper h(3);
+ h.md->setCapExtent(&h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext);
+ h.insert(h.md->capExtent(), 0);
+ h.insert(h.md->lastExtent(&h.txn), 1);
+ h.insert(h.md->firstExtent(&h.txn), 2);
+ h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 3));
+ h.insert(h.md->capExtent(), 4);
+ h.walkAndCount(5);
+}
- TEST(CappedRecordStoreV1QueryStage, CollscanAloneInExtent) {
- CollscanHelper h(3);
- h.md->setCapExtent( &h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext );
- h.insert( h.md->lastExtent(&h.txn), 0 );
- h.insert( h.md->firstExtent(&h.txn), 1 );
- h.md->setCapFirstNewRecord( &h.txn, h.insert( h.md->capExtent(), 2 ) );
- h.walkAndCount(3);
- }
+TEST(CappedRecordStoreV1QueryStage, CollscanAloneInExtent) {
+ CollscanHelper h(3);
+ h.md->setCapExtent(&h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext);
+ h.insert(h.md->lastExtent(&h.txn), 0);
+ h.insert(h.md->firstExtent(&h.txn), 1);
+ h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 2));
+ h.walkAndCount(3);
+}
- TEST(CappedRecordStoreV1QueryStage, CollscanFirstInExtent) {
- CollscanHelper h(3);
- h.md->setCapExtent( &h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext );
- h.insert( h.md->lastExtent(&h.txn), 0 );
- h.insert( h.md->firstExtent(&h.txn), 1 );
- h.md->setCapFirstNewRecord( &h.txn, h.insert( h.md->capExtent(), 2 ) );
- h.insert( h.md->capExtent(), 3 );
- h.walkAndCount(4);
- }
+TEST(CappedRecordStoreV1QueryStage, CollscanFirstInExtent) {
+ CollscanHelper h(3);
+ h.md->setCapExtent(&h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext);
+ h.insert(h.md->lastExtent(&h.txn), 0);
+ h.insert(h.md->firstExtent(&h.txn), 1);
+ h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 2));
+ h.insert(h.md->capExtent(), 3);
+ h.walkAndCount(4);
+}
- TEST(CappedRecordStoreV1QueryStage, CollscanLastInExtent) {
- CollscanHelper h(3);
- h.md->setCapExtent( &h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext );
- h.insert( h.md->capExtent(), 0 );
- h.insert( h.md->lastExtent(&h.txn), 1 );
- h.insert( h.md->firstExtent(&h.txn), 2 );
- h.md->setCapFirstNewRecord( &h.txn, h.insert( h.md->capExtent(), 3 ) );
- h.walkAndCount(4);
- }
+TEST(CappedRecordStoreV1QueryStage, CollscanLastInExtent) {
+ CollscanHelper h(3);
+ h.md->setCapExtent(&h.txn, h.em.getExtent(h.md->firstExtent(&h.txn))->xnext);
+ h.insert(h.md->capExtent(), 0);
+ h.insert(h.md->lastExtent(&h.txn), 1);
+ h.insert(h.md->firstExtent(&h.txn), 2);
+ h.md->setCapFirstNewRecord(&h.txn, h.insert(h.md->capExtent(), 3));
+ h.walkAndCount(4);
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp
index a4cb9977fe3..728f07d6013 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.cpp
@@ -38,49 +38,47 @@
namespace mongo {
- using std::endl;
-
- RecordStoreV1RepairCursor::RecordStoreV1RepairCursor(OperationContext* txn,
- const RecordStoreV1Base* recordStore)
- : _txn(txn), _recordStore(recordStore), _stage(FORWARD_SCAN) {
-
- // Position the iterator at the first record
- //
- advance();
- }
-
- boost::optional<Record> RecordStoreV1RepairCursor::next() {
- if (_currRecord.isNull()) return {};
- auto out = _currRecord.toRecordId();
- advance();
- return {{out, _recordStore->dataFor(_txn, out)}};
- }
-
- boost::optional<Record> RecordStoreV1RepairCursor::seekExact(const RecordId& id) {
- invariant(!"seekExact not supported");
- }
-
- void RecordStoreV1RepairCursor::advance() {
- const ExtentManager* em = _recordStore->_extentManager;
-
- while (true) {
- if (_currRecord.isNull()) {
-
- if (!_advanceToNextValidExtent()) {
- return;
- }
+using std::endl;
+
+RecordStoreV1RepairCursor::RecordStoreV1RepairCursor(OperationContext* txn,
+ const RecordStoreV1Base* recordStore)
+ : _txn(txn), _recordStore(recordStore), _stage(FORWARD_SCAN) {
+ // Position the iterator at the first record
+ //
+ advance();
+}
+
+boost::optional<Record> RecordStoreV1RepairCursor::next() {
+ if (_currRecord.isNull())
+ return {};
+ auto out = _currRecord.toRecordId();
+ advance();
+ return {{out, _recordStore->dataFor(_txn, out)}};
+}
+
+boost::optional<Record> RecordStoreV1RepairCursor::seekExact(const RecordId& id) {
+ invariant(!"seekExact not supported");
+}
+
+void RecordStoreV1RepairCursor::advance() {
+ const ExtentManager* em = _recordStore->_extentManager;
+
+ while (true) {
+ if (_currRecord.isNull()) {
+ if (!_advanceToNextValidExtent()) {
+ return;
+ }
- _seenInCurrentExtent.clear();
+ _seenInCurrentExtent.clear();
- // Otherwise _advanceToNextValidExtent would have returned false
- //
- invariant(!_currExtent.isNull());
+ // Otherwise _advanceToNextValidExtent would have returned false
+ //
+ invariant(!_currExtent.isNull());
- const Extent* e = em->getExtent(_currExtent, false);
- _currRecord = (FORWARD_SCAN == _stage ? e->firstRecord : e->lastRecord);
- }
- else {
- switch (_stage) {
+ const Extent* e = em->getExtent(_currExtent, false);
+ _currRecord = (FORWARD_SCAN == _stage ? e->firstRecord : e->lastRecord);
+ } else {
+ switch (_stage) {
case FORWARD_SCAN:
_currRecord = _recordStore->getNextRecordInExtent(_txn, _currRecord);
break;
@@ -90,37 +88,37 @@ namespace mongo {
default:
invariant(!"This should never be reached.");
break;
- }
- }
-
- if (_currRecord.isNull()) {
- continue;
}
+ }
- // Validate the contents of the record's disk location and deduplicate
- //
- if (!_seenInCurrentExtent.insert(_currRecord).second) {
- error() << "infinite loop in extent, seen: " << _currRecord << " before" << endl;
- _currRecord = DiskLoc();
- continue;
- }
+ if (_currRecord.isNull()) {
+ continue;
+ }
- if (_currRecord.getOfs() <= 0){
- error() << "offset is 0 for record which should be impossible" << endl;
- _currRecord = DiskLoc();
- continue;
- }
+ // Validate the contents of the record's disk location and deduplicate
+ //
+ if (!_seenInCurrentExtent.insert(_currRecord).second) {
+ error() << "infinite loop in extent, seen: " << _currRecord << " before" << endl;
+ _currRecord = DiskLoc();
+ continue;
+ }
- return;
+ if (_currRecord.getOfs() <= 0) {
+ error() << "offset is 0 for record which should be impossible" << endl;
+ _currRecord = DiskLoc();
+ continue;
}
+
+ return;
}
+}
- bool RecordStoreV1RepairCursor::_advanceToNextValidExtent() {
- const ExtentManager* em = _recordStore->_extentManager;
+bool RecordStoreV1RepairCursor::_advanceToNextValidExtent() {
+ const ExtentManager* em = _recordStore->_extentManager;
- while (true) {
- if (_currExtent.isNull()) {
- switch (_stage) {
+ while (true) {
+ if (_currExtent.isNull()) {
+ switch (_stage) {
case FORWARD_SCAN:
_currExtent = _recordStore->details()->firstExtent(_txn);
break;
@@ -130,35 +128,34 @@ namespace mongo {
default:
invariant(DONE == _stage);
return false;
- }
- }
- else {
- // If _currExtent is not NULL, then it must point to a valid extent, so no extra
- // checks here.
- //
- const Extent* e = em->getExtent(_currExtent, false);
- _currExtent = (FORWARD_SCAN == _stage ? e->xnext : e->xprev);
}
-
- bool hasNextExtent = !_currExtent.isNull();
-
- // Sanity checks for the extent's disk location
+ } else {
+ // If _currExtent is not NULL, then it must point to a valid extent, so no extra
+ // checks here.
//
- if (hasNextExtent && (!_currExtent.isValid() || (_currExtent.getOfs() < 0))) {
- error() << "Invalid extent location: " << _currExtent << endl;
+ const Extent* e = em->getExtent(_currExtent, false);
+ _currExtent = (FORWARD_SCAN == _stage ? e->xnext : e->xprev);
+ }
- // Switch the direction of scan
- //
- hasNextExtent = false;
- }
+ bool hasNextExtent = !_currExtent.isNull();
- if (hasNextExtent) {
- break;
- }
+ // Sanity checks for the extent's disk location
+ //
+ if (hasNextExtent && (!_currExtent.isValid() || (_currExtent.getOfs() < 0))) {
+ error() << "Invalid extent location: " << _currExtent << endl;
- // Swap the direction of scan and loop again
+ // Switch the direction of scan
//
- switch (_stage) {
+ hasNextExtent = false;
+ }
+
+ if (hasNextExtent) {
+ break;
+ }
+
+ // Swap the direction of scan and loop again
+ //
+ switch (_stage) {
case FORWARD_SCAN:
_stage = BACKWARD_SCAN;
break;
@@ -168,49 +165,48 @@ namespace mongo {
default:
invariant(!"This should never be reached.");
break;
- }
-
- _currExtent = DiskLoc();
}
+ _currExtent = DiskLoc();
+ }
- // Check _currExtent's contents for validity, but do not count is as failure if they
- // don't check out.
- //
- const Extent* e = em->getExtent(_currExtent, false);
- if (!e->isOk()){
- warning() << "Extent not ok magic: " << e->magic << " going to try to continue"
- << endl;
- }
-
- log() << (FORWARD_SCAN == _stage ? "FORWARD" : "BACKWARD") << " Extent loc: "
- << _currExtent << ", length: " << e->length << endl;
- return true;
+ // Check _currExtent's contents for validity, but do not count is as failure if they
+ // don't check out.
+ //
+ const Extent* e = em->getExtent(_currExtent, false);
+ if (!e->isOk()) {
+ warning() << "Extent not ok magic: " << e->magic << " going to try to continue" << endl;
}
- void RecordStoreV1RepairCursor::invalidate(const RecordId& id) {
- // If we see this record again it probably means it was reinserted rather than an infinite
- // loop. If we do loop, we should quickly hit another seen record that hasn't been
- // invalidated.
- DiskLoc dl = DiskLoc::fromRecordId(id);
- _seenInCurrentExtent.erase(dl);
+ log() << (FORWARD_SCAN == _stage ? "FORWARD" : "BACKWARD") << " Extent loc: " << _currExtent
+ << ", length: " << e->length << endl;
- if (_currRecord == dl) {
- // The DiskLoc being invalidated is also the one pointed at by this iterator. We
- // advance the iterator so it's not pointing at invalid data.
- advance();
+ return true;
+}
- if (_currRecord == dl) {
- // Even after advancing the iterator, we're still pointing at the DiskLoc being
- // invalidated. This is expected when 'dl' is the last DiskLoc in the FORWARD scan,
- // and the initial call to getNext() moves the iterator to the first loc in the
- // BACKWARDS scan.
- advance();
- }
+void RecordStoreV1RepairCursor::invalidate(const RecordId& id) {
+ // If we see this record again it probably means it was reinserted rather than an infinite
+ // loop. If we do loop, we should quickly hit another seen record that hasn't been
+ // invalidated.
+ DiskLoc dl = DiskLoc::fromRecordId(id);
+ _seenInCurrentExtent.erase(dl);
+
+ if (_currRecord == dl) {
+ // The DiskLoc being invalidated is also the one pointed at by this iterator. We
+ // advance the iterator so it's not pointing at invalid data.
+ advance();
- invariant(_currRecord != dl);
+ if (_currRecord == dl) {
+ // Even after advancing the iterator, we're still pointing at the DiskLoc being
+ // invalidated. This is expected when 'dl' is the last DiskLoc in the FORWARD scan,
+ // and the initial call to getNext() moves the iterator to the first loc in the
+ // BACKWARDS scan.
+ advance();
}
+
+ invariant(_currRecord != dl);
}
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
index 6b93ad5941a..def5178ad8e 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
@@ -35,63 +35,60 @@
namespace mongo {
+/**
+ * This iterator will go over the collection twice - once going forward (first extent -> last
+ * extent) and once backwards in an attempt to salvage potentially corrupted or unreachable
+ * records. It is used by the mongodump --repair option.
+ */
+class RecordStoreV1RepairCursor final : public RecordCursor {
+public:
+ RecordStoreV1RepairCursor(OperationContext* txn, const RecordStoreV1Base* recordStore);
+
+ boost::optional<Record> next() final;
+ boost::optional<Record> seekExact(const RecordId& id) final;
+ void invalidate(const RecordId& dl);
+ void savePositioned() final {
+ _txn = nullptr;
+ }
+ bool restore(OperationContext* txn) final {
+ _txn = txn;
+ return true;
+ }
+
+ // Explicitly not supporting fetcherForNext(). The expected use case for this class is a
+ // special offline operation where there are no concurrent operations, so it would be better
+ // to take the pagefault inline with the operation.
+
+private:
+ void advance();
+
/**
- * This iterator will go over the collection twice - once going forward (first extent -> last
- * extent) and once backwards in an attempt to salvage potentially corrupted or unreachable
- * records. It is used by the mongodump --repair option.
+ * Based on the direction of scan, finds the next valid (un-corrupted) extent in the chain
+ * and sets _currExtent to point to that.
+ *
+ * @return true if valid extent was found (_currExtent will not be null)
+ * false otherwise and _currExtent will be null
*/
- class RecordStoreV1RepairCursor final : public RecordCursor {
- public:
- RecordStoreV1RepairCursor(OperationContext* txn,
- const RecordStoreV1Base* recordStore);
-
- boost::optional<Record> next() final;
- boost::optional<Record> seekExact(const RecordId& id) final;
- void invalidate(const RecordId& dl);
- void savePositioned() final { _txn = nullptr; }
- bool restore(OperationContext* txn) final {
- _txn = txn;
- return true;
- }
-
- // Explicitly not supporting fetcherForNext(). The expected use case for this class is a
- // special offline operation where there are no concurrent operations, so it would be better
- // to take the pagefault inline with the operation.
-
- private:
- void advance();
-
- /**
- * Based on the direction of scan, finds the next valid (un-corrupted) extent in the chain
- * and sets _currExtent to point to that.
- *
- * @return true if valid extent was found (_currExtent will not be null)
- * false otherwise and _currExtent will be null
- */
- bool _advanceToNextValidExtent();
-
- // transactional context for read locks. Not owned by us
- OperationContext* _txn;
-
- // Reference to the owning RecordStore. The store must not be deleted while there are
- // active iterators on it.
- //
- const RecordStoreV1Base* _recordStore;
-
- DiskLoc _currExtent;
- DiskLoc _currRecord;
-
- enum Stage {
- FORWARD_SCAN = 0,
- BACKWARD_SCAN = 1,
- DONE = 2
- };
-
- Stage _stage;
-
- // Used to find cycles within an extent. Cleared after each extent has been processed.
- //
- std::set<DiskLoc> _seenInCurrentExtent;
- };
+ bool _advanceToNextValidExtent();
+
+ // transactional context for read locks. Not owned by us
+ OperationContext* _txn;
+
+ // Reference to the owning RecordStore. The store must not be deleted while there are
+ // active iterators on it.
+ //
+ const RecordStoreV1Base* _recordStore;
+
+ DiskLoc _currExtent;
+ DiskLoc _currRecord;
+
+ enum Stage { FORWARD_SCAN = 0, BACKWARD_SCAN = 1, DONE = 2 };
+
+ Stage _stage;
+
+ // Used to find cycles within an extent. Cleared after each extent has been processed.
+ //
+ std::set<DiskLoc> _seenInCurrentExtent;
+};
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
index 029883254bd..5948553b9af 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
@@ -53,447 +53,431 @@
namespace mongo {
- using std::endl;
- using std::vector;
-
- static Counter64 freelistAllocs;
- static Counter64 freelistBucketExhausted;
- static Counter64 freelistIterations;
-
- // TODO figure out what to do about these.
- static ServerStatusMetricField<Counter64> dFreelist1( "storage.freelist.search.requests",
- &freelistAllocs );
-
- static ServerStatusMetricField<Counter64> dFreelist2( "storage.freelist.search.bucketExhausted",
- &freelistBucketExhausted );
-
- static ServerStatusMetricField<Counter64> dFreelist3( "storage.freelist.search.scanned",
- &freelistIterations );
-
- SimpleRecordStoreV1::SimpleRecordStoreV1( OperationContext* txn,
- StringData ns,
- RecordStoreV1MetaData* details,
- ExtentManager* em,
- bool isSystemIndexes )
- : RecordStoreV1Base( ns, details, em, isSystemIndexes ) {
-
- invariant( !details->isCapped() );
- _normalCollection = NamespaceString::normal( ns );
- }
+using std::endl;
+using std::vector;
+
+static Counter64 freelistAllocs;
+static Counter64 freelistBucketExhausted;
+static Counter64 freelistIterations;
+
+// TODO figure out what to do about these.
+static ServerStatusMetricField<Counter64> dFreelist1("storage.freelist.search.requests",
+ &freelistAllocs);
+
+static ServerStatusMetricField<Counter64> dFreelist2("storage.freelist.search.bucketExhausted",
+ &freelistBucketExhausted);
+
+static ServerStatusMetricField<Counter64> dFreelist3("storage.freelist.search.scanned",
+ &freelistIterations);
+
+SimpleRecordStoreV1::SimpleRecordStoreV1(OperationContext* txn,
+ StringData ns,
+ RecordStoreV1MetaData* details,
+ ExtentManager* em,
+ bool isSystemIndexes)
+ : RecordStoreV1Base(ns, details, em, isSystemIndexes) {
+ invariant(!details->isCapped());
+ _normalCollection = NamespaceString::normal(ns);
+}
- SimpleRecordStoreV1::~SimpleRecordStoreV1() {
+SimpleRecordStoreV1::~SimpleRecordStoreV1() {}
+
+DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents(OperationContext* txn, int lenToAllocRaw) {
+ // Slowly drain the deletedListLegacyGrabBag by popping one record off and putting it in the
+ // correct deleted list each time we try to allocate a new record. This ensures we won't
+ // orphan any data when upgrading from old versions, without needing a long upgrade phase.
+ // This is done before we try to allocate the new record so we can take advantage of the new
+ // space immediately.
+ {
+ const DiskLoc head = _details->deletedListLegacyGrabBag();
+ if (!head.isNull()) {
+ _details->setDeletedListLegacyGrabBag(txn, drec(head)->nextDeleted());
+ addDeletedRec(txn, head);
+ }
}
- DiskLoc SimpleRecordStoreV1::_allocFromExistingExtents( OperationContext* txn,
- int lenToAllocRaw ) {
-
- // Slowly drain the deletedListLegacyGrabBag by popping one record off and putting it in the
- // correct deleted list each time we try to allocate a new record. This ensures we won't
- // orphan any data when upgrading from old versions, without needing a long upgrade phase.
- // This is done before we try to allocate the new record so we can take advantage of the new
- // space immediately.
- {
- const DiskLoc head = _details->deletedListLegacyGrabBag();
- if (!head.isNull()) {
- _details->setDeletedListLegacyGrabBag(txn, drec(head)->nextDeleted());
- addDeletedRec(txn, head);
+ // align size up to a multiple of 4
+ const int lenToAlloc = (lenToAllocRaw + (4 - 1)) & ~(4 - 1);
+
+ freelistAllocs.increment();
+ DiskLoc loc;
+ DeletedRecord* dr = NULL;
+ {
+ int myBucket;
+ for (myBucket = bucket(lenToAlloc); myBucket < Buckets; myBucket++) {
+ // Only look at the first entry in each bucket. This works because we are either
+ // quantizing or allocating fixed-size blocks.
+ const DiskLoc head = _details->deletedListEntry(myBucket);
+ if (head.isNull())
+ continue;
+ DeletedRecord* const candidate = drec(head);
+ if (candidate->lengthWithHeaders() >= lenToAlloc) {
+ loc = head;
+ dr = candidate;
+ break;
}
}
- // align size up to a multiple of 4
- const int lenToAlloc = (lenToAllocRaw + (4-1)) & ~(4-1);
+ if (!dr)
+ return DiskLoc(); // no space
- freelistAllocs.increment();
- DiskLoc loc;
- DeletedRecord* dr = NULL;
- {
-
- int myBucket;
- for (myBucket = bucket(lenToAlloc); myBucket < Buckets; myBucket++) {
- // Only look at the first entry in each bucket. This works because we are either
- // quantizing or allocating fixed-size blocks.
- const DiskLoc head = _details->deletedListEntry(myBucket);
- if (head.isNull()) continue;
- DeletedRecord* const candidate = drec(head);
- if (candidate->lengthWithHeaders() >= lenToAlloc) {
- loc = head;
- dr = candidate;
- break;
- }
- }
-
- if (!dr)
- return DiskLoc(); // no space
-
- // Unlink ourself from the deleted list
- _details->setDeletedListEntry(txn, myBucket, dr->nextDeleted());
- *txn->recoveryUnit()->writing(&dr->nextDeleted()) = DiskLoc().setInvalid(); // defensive
- }
+ // Unlink ourself from the deleted list
+ _details->setDeletedListEntry(txn, myBucket, dr->nextDeleted());
+ *txn->recoveryUnit()->writing(&dr->nextDeleted()) = DiskLoc().setInvalid(); // defensive
+ }
- invariant( dr->extentOfs() < loc.getOfs() );
+ invariant(dr->extentOfs() < loc.getOfs());
- // Split the deleted record if it has at least as much left over space as our smallest
- // allocation size. Otherwise, just take the whole DeletedRecord.
- const int remainingLength = dr->lengthWithHeaders() - lenToAlloc;
- if (remainingLength >= bucketSizes[0]) {
- txn->recoveryUnit()->writingInt(dr->lengthWithHeaders()) = lenToAlloc;
- const DiskLoc newDelLoc = DiskLoc(loc.a(), loc.getOfs() + lenToAlloc);
- DeletedRecord* newDel = txn->recoveryUnit()->writing(drec(newDelLoc));
- newDel->extentOfs() = dr->extentOfs();
- newDel->lengthWithHeaders() = remainingLength;
- newDel->nextDeleted().Null();
+ // Split the deleted record if it has at least as much left over space as our smallest
+ // allocation size. Otherwise, just take the whole DeletedRecord.
+ const int remainingLength = dr->lengthWithHeaders() - lenToAlloc;
+ if (remainingLength >= bucketSizes[0]) {
+ txn->recoveryUnit()->writingInt(dr->lengthWithHeaders()) = lenToAlloc;
+ const DiskLoc newDelLoc = DiskLoc(loc.a(), loc.getOfs() + lenToAlloc);
+ DeletedRecord* newDel = txn->recoveryUnit()->writing(drec(newDelLoc));
+ newDel->extentOfs() = dr->extentOfs();
+ newDel->lengthWithHeaders() = remainingLength;
+ newDel->nextDeleted().Null();
- addDeletedRec(txn, newDelLoc);
- }
-
- return loc;
+ addDeletedRec(txn, newDelLoc);
}
- StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord( OperationContext* txn,
- int lengthWithHeaders,
- bool enforceQuota ) {
- if (lengthWithHeaders > MaxAllowedAllocation) {
- return StatusWith<DiskLoc>(
- ErrorCodes::InvalidLength,
- str::stream() << "Attempting to allocate a record larger than maximum size: "
- << lengthWithHeaders << " > 16.5MB");
- }
+ return loc;
+}
- DiskLoc loc = _allocFromExistingExtents( txn, lengthWithHeaders );
- if ( !loc.isNull() )
- return StatusWith<DiskLoc>( loc );
+StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* txn,
+ int lengthWithHeaders,
+ bool enforceQuota) {
+ if (lengthWithHeaders > MaxAllowedAllocation) {
+ return StatusWith<DiskLoc>(
+ ErrorCodes::InvalidLength,
+ str::stream() << "Attempting to allocate a record larger than maximum size: "
+ << lengthWithHeaders << " > 16.5MB");
+ }
- LOG(1) << "allocating new extent";
+ DiskLoc loc = _allocFromExistingExtents(txn, lengthWithHeaders);
+ if (!loc.isNull())
+ return StatusWith<DiskLoc>(loc);
- increaseStorageSize( txn,
- _extentManager->followupSize( lengthWithHeaders,
- _details->lastExtentSize(txn)),
- enforceQuota );
+ LOG(1) << "allocating new extent";
- loc = _allocFromExistingExtents( txn, lengthWithHeaders );
- if ( !loc.isNull() ) {
- // got on first try
- return StatusWith<DiskLoc>( loc );
- }
+ increaseStorageSize(
+ txn,
+ _extentManager->followupSize(lengthWithHeaders, _details->lastExtentSize(txn)),
+ enforceQuota);
- log() << "warning: alloc() failed after allocating new extent. "
- << "lengthWithHeaders: " << lengthWithHeaders << " last extent size:"
- << _details->lastExtentSize(txn) << "; trying again";
+ loc = _allocFromExistingExtents(txn, lengthWithHeaders);
+ if (!loc.isNull()) {
+ // got on first try
+ return StatusWith<DiskLoc>(loc);
+ }
- for ( int z = 0; z < 10 && lengthWithHeaders > _details->lastExtentSize(txn); z++ ) {
- log() << "try #" << z << endl;
+ log() << "warning: alloc() failed after allocating new extent. "
+ << "lengthWithHeaders: " << lengthWithHeaders
+ << " last extent size:" << _details->lastExtentSize(txn) << "; trying again";
- increaseStorageSize( txn,
- _extentManager->followupSize( lengthWithHeaders,
- _details->lastExtentSize(txn)),
- enforceQuota );
+ for (int z = 0; z < 10 && lengthWithHeaders > _details->lastExtentSize(txn); z++) {
+ log() << "try #" << z << endl;
- loc = _allocFromExistingExtents( txn, lengthWithHeaders );
- if ( ! loc.isNull() )
- return StatusWith<DiskLoc>( loc );
- }
+ increaseStorageSize(
+ txn,
+ _extentManager->followupSize(lengthWithHeaders, _details->lastExtentSize(txn)),
+ enforceQuota);
- return StatusWith<DiskLoc>( ErrorCodes::InternalError, "cannot allocate space" );
+ loc = _allocFromExistingExtents(txn, lengthWithHeaders);
+ if (!loc.isNull())
+ return StatusWith<DiskLoc>(loc);
}
- Status SimpleRecordStoreV1::truncate(OperationContext* txn) {
- const DiskLoc firstExtLoc = _details->firstExtent(txn);
- if (firstExtLoc.isNull() || !firstExtLoc.isValid()) {
- // Already empty
- return Status::OK();
- }
-
- // Free all extents except the first.
- Extent* firstExt = _extentManager->getExtent(firstExtLoc);
- if (!firstExt->xnext.isNull()) {
- const DiskLoc extNextLoc = firstExt->xnext;
- const DiskLoc oldLastExtLoc = _details->lastExtent(txn);
- Extent* const nextExt = _extentManager->getExtent(extNextLoc);
+ return StatusWith<DiskLoc>(ErrorCodes::InternalError, "cannot allocate space");
+}
- // Unlink other extents;
- *txn->recoveryUnit()->writing(&nextExt->xprev) = DiskLoc();
- *txn->recoveryUnit()->writing(&firstExt->xnext) = DiskLoc();
- _details->setLastExtent(txn, firstExtLoc);
- _details->setLastExtentSize(txn, firstExt->length);
+Status SimpleRecordStoreV1::truncate(OperationContext* txn) {
+ const DiskLoc firstExtLoc = _details->firstExtent(txn);
+ if (firstExtLoc.isNull() || !firstExtLoc.isValid()) {
+ // Already empty
+ return Status::OK();
+ }
- _extentManager->freeExtents(txn, extNextLoc, oldLastExtLoc);
- }
+ // Free all extents except the first.
+ Extent* firstExt = _extentManager->getExtent(firstExtLoc);
+ if (!firstExt->xnext.isNull()) {
+ const DiskLoc extNextLoc = firstExt->xnext;
+ const DiskLoc oldLastExtLoc = _details->lastExtent(txn);
+ Extent* const nextExt = _extentManager->getExtent(extNextLoc);
- // Make the first (now only) extent a single large deleted record.
- *txn->recoveryUnit()->writing(&firstExt->firstRecord) = DiskLoc();
- *txn->recoveryUnit()->writing(&firstExt->lastRecord) = DiskLoc();
- _details->orphanDeletedList(txn);
- addDeletedRec(txn, _findFirstSpot(txn, firstExtLoc, firstExt));
+ // Unlink other extents;
+ *txn->recoveryUnit()->writing(&nextExt->xprev) = DiskLoc();
+ *txn->recoveryUnit()->writing(&firstExt->xnext) = DiskLoc();
+ _details->setLastExtent(txn, firstExtLoc);
+ _details->setLastExtentSize(txn, firstExt->length);
- // Make stats reflect that there are now no documents in this record store.
- _details->setStats(txn, 0, 0);
-
- return Status::OK();
+ _extentManager->freeExtents(txn, extNextLoc, oldLastExtLoc);
}
- void SimpleRecordStoreV1::addDeletedRec( OperationContext* txn, const DiskLoc& dloc ) {
- DeletedRecord* d = drec( dloc );
+ // Make the first (now only) extent a single large deleted record.
+ *txn->recoveryUnit()->writing(&firstExt->firstRecord) = DiskLoc();
+ *txn->recoveryUnit()->writing(&firstExt->lastRecord) = DiskLoc();
+ _details->orphanDeletedList(txn);
+ addDeletedRec(txn, _findFirstSpot(txn, firstExtLoc, firstExt));
- int b = bucket(d->lengthWithHeaders());
- *txn->recoveryUnit()->writing(&d->nextDeleted()) = _details->deletedListEntry(b);
- _details->setDeletedListEntry(txn, b, dloc);
- }
+ // Make stats reflect that there are now no documents in this record store.
+ _details->setStats(txn, 0, 0);
- std::unique_ptr<RecordCursor> SimpleRecordStoreV1::getCursor(OperationContext* txn,
- bool forward) const {
- return stdx::make_unique<SimpleRecordStoreV1Iterator>( txn, this, forward );
- }
+ return Status::OK();
+}
- vector<std::unique_ptr<RecordCursor>> SimpleRecordStoreV1::getManyCursors(
- OperationContext* txn) const {
- vector<std::unique_ptr<RecordCursor>> cursors;
- const Extent* ext;
- for (DiskLoc extLoc = details()->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
- ext = _getExtent(txn, extLoc);
- if (ext->firstRecord.isNull())
- continue;
- cursors.push_back(
- stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(txn,
- ext->firstRecord,
- this));
- }
+void SimpleRecordStoreV1::addDeletedRec(OperationContext* txn, const DiskLoc& dloc) {
+ DeletedRecord* d = drec(dloc);
- return cursors;
- }
+ int b = bucket(d->lengthWithHeaders());
+ *txn->recoveryUnit()->writing(&d->nextDeleted()) = _details->deletedListEntry(b);
+ _details->setDeletedListEntry(txn, b, dloc);
+}
- class CompactDocWriter : public DocWriter {
- public:
- /**
- * param allocationSize - allocation size WITH header
- */
- CompactDocWriter( const MmapV1RecordHeader* rec, unsigned dataSize, size_t allocationSize )
- : _rec( rec ),
- _dataSize( dataSize ),
- _allocationSize( allocationSize ) {
- }
+std::unique_ptr<RecordCursor> SimpleRecordStoreV1::getCursor(OperationContext* txn,
+ bool forward) const {
+ return stdx::make_unique<SimpleRecordStoreV1Iterator>(txn, this, forward);
+}
- virtual ~CompactDocWriter() {}
+vector<std::unique_ptr<RecordCursor>> SimpleRecordStoreV1::getManyCursors(
+ OperationContext* txn) const {
+ vector<std::unique_ptr<RecordCursor>> cursors;
+ const Extent* ext;
+ for (DiskLoc extLoc = details()->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
+ ext = _getExtent(txn, extLoc);
+ if (ext->firstRecord.isNull())
+ continue;
+ cursors.push_back(
+ stdx::make_unique<RecordStoreV1Base::IntraExtentIterator>(txn, ext->firstRecord, this));
+ }
- virtual void writeDocument( char* buf ) const {
- memcpy( buf, _rec->data(), _dataSize );
- }
+ return cursors;
+}
- virtual size_t documentSize() const {
- return _allocationSize - MmapV1RecordHeader::HeaderSize;
- }
+class CompactDocWriter : public DocWriter {
+public:
+ /**
+ * param allocationSize - allocation size WITH header
+ */
+ CompactDocWriter(const MmapV1RecordHeader* rec, unsigned dataSize, size_t allocationSize)
+ : _rec(rec), _dataSize(dataSize), _allocationSize(allocationSize) {}
- virtual bool addPadding() const {
- return false;
- }
+ virtual ~CompactDocWriter() {}
- private:
- const MmapV1RecordHeader* _rec;
- size_t _dataSize;
- size_t _allocationSize;
- };
+ virtual void writeDocument(char* buf) const {
+ memcpy(buf, _rec->data(), _dataSize);
+ }
- void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
- const DiskLoc extentLoc,
- int extentNumber,
- RecordStoreCompactAdaptor* adaptor,
- const CompactOptions* compactOptions,
- CompactStats* stats ) {
+ virtual size_t documentSize() const {
+ return _allocationSize - MmapV1RecordHeader::HeaderSize;
+ }
- log() << "compact begin extent #" << extentNumber
- << " for namespace " << _ns << " " << extentLoc;
+ virtual bool addPadding() const {
+ return false;
+ }
- unsigned oldObjSize = 0; // we'll report what the old padding was
- unsigned oldObjSizeWithPadding = 0;
+private:
+ const MmapV1RecordHeader* _rec;
+ size_t _dataSize;
+ size_t _allocationSize;
+};
- Extent* const sourceExtent = _extentManager->getExtent( extentLoc );
- sourceExtent->assertOk();
- fassert( 17437, sourceExtent->validates(extentLoc) );
+void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
+ const DiskLoc extentLoc,
+ int extentNumber,
+ RecordStoreCompactAdaptor* adaptor,
+ const CompactOptions* compactOptions,
+ CompactStats* stats) {
+ log() << "compact begin extent #" << extentNumber << " for namespace " << _ns << " "
+ << extentLoc;
+
+ unsigned oldObjSize = 0; // we'll report what the old padding was
+ unsigned oldObjSizeWithPadding = 0;
+
+ Extent* const sourceExtent = _extentManager->getExtent(extentLoc);
+ sourceExtent->assertOk();
+ fassert(17437, sourceExtent->validates(extentLoc));
+
+ {
+ // The next/prev MmapV1RecordHeader pointers within the Extent might not be in order so we first
+ // page in the whole Extent sequentially.
+ // TODO benchmark on slow storage to verify this is measurably faster.
+ log() << "compact paging in len=" << sourceExtent->length / 1000000.0 << "MB" << endl;
+ Timer t;
+ size_t length = sourceExtent->length;
+
+ touch_pages(reinterpret_cast<const char*>(sourceExtent), length);
+ int ms = t.millis();
+ if (ms > 1000)
+ log() << "compact end paging in " << ms << "ms "
+ << sourceExtent->length / 1000000.0 / t.seconds() << "MB/sec" << endl;
+ }
- {
- // The next/prev MmapV1RecordHeader pointers within the Extent might not be in order so we first
- // page in the whole Extent sequentially.
- // TODO benchmark on slow storage to verify this is measurably faster.
- log() << "compact paging in len=" << sourceExtent->length/1000000.0 << "MB" << endl;
- Timer t;
- size_t length = sourceExtent->length;
-
- touch_pages( reinterpret_cast<const char*>(sourceExtent), length );
- int ms = t.millis();
- if( ms > 1000 )
- log() << "compact end paging in " << ms << "ms "
- << sourceExtent->length/1000000.0/t.seconds() << "MB/sec" << endl;
- }
+ {
+ // Move each MmapV1RecordHeader out of this extent and insert it in to the "new" extents.
+ log() << "compact copying records" << endl;
+ long long totalNetSize = 0;
+ long long nrecords = 0;
+ DiskLoc nextSourceLoc = sourceExtent->firstRecord;
+ while (!nextSourceLoc.isNull()) {
+ txn->checkForInterrupt();
- {
- // Move each MmapV1RecordHeader out of this extent and insert it in to the "new" extents.
- log() << "compact copying records" << endl;
- long long totalNetSize = 0;
- long long nrecords = 0;
- DiskLoc nextSourceLoc = sourceExtent->firstRecord;
- while (!nextSourceLoc.isNull()) {
- txn->checkForInterrupt();
-
- WriteUnitOfWork wunit(txn);
- MmapV1RecordHeader* recOld = recordFor(nextSourceLoc);
- RecordData oldData = recOld->toRecordData();
- nextSourceLoc = getNextRecordInExtent(txn, nextSourceLoc);
-
- if ( compactOptions->validateDocuments && !adaptor->isDataValid( oldData ) ) {
- // object is corrupt!
- log() << "compact removing corrupt document!";
- stats->corruptDocuments++;
- }
- else {
- // How much data is in the record. Excludes padding and MmapV1RecordHeader headers.
- const unsigned rawDataSize = adaptor->dataSize( oldData );
-
- nrecords++;
- oldObjSize += rawDataSize;
- oldObjSizeWithPadding += recOld->netLength();
-
- // Allocation sizes include the headers and possibly some padding.
- const unsigned minAllocationSize = rawDataSize + MmapV1RecordHeader::HeaderSize;
- unsigned allocationSize = minAllocationSize;
- switch( compactOptions->paddingMode ) {
- case CompactOptions::NONE: // default padding
+ WriteUnitOfWork wunit(txn);
+ MmapV1RecordHeader* recOld = recordFor(nextSourceLoc);
+ RecordData oldData = recOld->toRecordData();
+ nextSourceLoc = getNextRecordInExtent(txn, nextSourceLoc);
+
+ if (compactOptions->validateDocuments && !adaptor->isDataValid(oldData)) {
+ // object is corrupt!
+ log() << "compact removing corrupt document!";
+ stats->corruptDocuments++;
+ } else {
+ // How much data is in the record. Excludes padding and MmapV1RecordHeader headers.
+ const unsigned rawDataSize = adaptor->dataSize(oldData);
+
+ nrecords++;
+ oldObjSize += rawDataSize;
+ oldObjSizeWithPadding += recOld->netLength();
+
+ // Allocation sizes include the headers and possibly some padding.
+ const unsigned minAllocationSize = rawDataSize + MmapV1RecordHeader::HeaderSize;
+ unsigned allocationSize = minAllocationSize;
+ switch (compactOptions->paddingMode) {
+ case CompactOptions::NONE: // default padding
if (shouldPadInserts()) {
allocationSize = quantizeAllocationSpace(minAllocationSize);
}
break;
- case CompactOptions::PRESERVE: // keep original padding
+ case CompactOptions::PRESERVE: // keep original padding
allocationSize = recOld->lengthWithHeaders();
break;
- case CompactOptions::MANUAL: // user specified how much padding to use
+ case CompactOptions::MANUAL: // user specified how much padding to use
allocationSize = compactOptions->computeRecordSize(minAllocationSize);
- if (allocationSize < minAllocationSize
- || allocationSize > BSONObjMaxUserSize / 2 ) {
+ if (allocationSize < minAllocationSize ||
+ allocationSize > BSONObjMaxUserSize / 2) {
allocationSize = minAllocationSize;
}
break;
- }
- invariant(allocationSize >= minAllocationSize);
-
- // Copy the data to a new record. Because we orphaned the record freelist at the
- // start of the compact, this insert will allocate a record in a new extent.
- // See the comment in compact() for more details.
- CompactDocWriter writer( recOld, rawDataSize, allocationSize );
- StatusWith<RecordId> status = insertRecord( txn, &writer, false );
- uassertStatusOK( status.getStatus() );
- const MmapV1RecordHeader* newRec = recordFor(DiskLoc::fromRecordId(status.getValue()));
- invariant(unsigned(newRec->netLength()) >= rawDataSize);
- totalNetSize += newRec->netLength();
-
- // Tells the caller that the record has been moved, so it can do things such as
- // add it to indexes.
- adaptor->inserted(newRec->toRecordData(), status.getValue());
- }
-
- // Remove the old record from the linked list of records withing the sourceExtent.
- // The old record is not added to the freelist as we will be freeing the whole
- // extent at the end.
- *txn->recoveryUnit()->writing(&sourceExtent->firstRecord) = nextSourceLoc;
- if (nextSourceLoc.isNull()) {
- // Just moved the last record out of the extent. Mark extent as empty.
- *txn->recoveryUnit()->writing(&sourceExtent->lastRecord) = DiskLoc();
}
- else {
- MmapV1RecordHeader* newFirstRecord = recordFor(nextSourceLoc);
- txn->recoveryUnit()->writingInt(newFirstRecord->prevOfs()) = DiskLoc::NullOfs;
- }
-
- // Adjust the stats to reflect the removal of the old record. The insert above
- // handled adjusting the stats for the new record.
- _details->incrementStats(txn, -(recOld->netLength()), -1);
-
- wunit.commit();
+ invariant(allocationSize >= minAllocationSize);
+
+ // Copy the data to a new record. Because we orphaned the record freelist at the
+ // start of the compact, this insert will allocate a record in a new extent.
+ // See the comment in compact() for more details.
+ CompactDocWriter writer(recOld, rawDataSize, allocationSize);
+ StatusWith<RecordId> status = insertRecord(txn, &writer, false);
+ uassertStatusOK(status.getStatus());
+ const MmapV1RecordHeader* newRec =
+ recordFor(DiskLoc::fromRecordId(status.getValue()));
+ invariant(unsigned(newRec->netLength()) >= rawDataSize);
+ totalNetSize += newRec->netLength();
+
+ // Tells the caller that the record has been moved, so it can do things such as
+ // add it to indexes.
+ adaptor->inserted(newRec->toRecordData(), status.getValue());
}
- // The extent must now be empty.
- invariant(sourceExtent->firstRecord.isNull());
- invariant(sourceExtent->lastRecord.isNull());
+ // Remove the old record from the linked list of records withing the sourceExtent.
+ // The old record is not added to the freelist as we will be freeing the whole
+ // extent at the end.
+ *txn->recoveryUnit()->writing(&sourceExtent->firstRecord) = nextSourceLoc;
+ if (nextSourceLoc.isNull()) {
+ // Just moved the last record out of the extent. Mark extent as empty.
+ *txn->recoveryUnit()->writing(&sourceExtent->lastRecord) = DiskLoc();
+ } else {
+ MmapV1RecordHeader* newFirstRecord = recordFor(nextSourceLoc);
+ txn->recoveryUnit()->writingInt(newFirstRecord->prevOfs()) = DiskLoc::NullOfs;
+ }
- // We are still the first extent, but we must not be the only extent.
- invariant( _details->firstExtent(txn) == extentLoc );
- invariant( _details->lastExtent(txn) != extentLoc );
+ // Adjust the stats to reflect the removal of the old record. The insert above
+ // handled adjusting the stats for the new record.
+ _details->incrementStats(txn, -(recOld->netLength()), -1);
- // Remove the newly emptied sourceExtent from the extent linked list and return it to
- // the extent manager.
- WriteUnitOfWork wunit(txn);
- const DiskLoc newFirst = sourceExtent->xnext;
- _details->setFirstExtent( txn, newFirst );
- *txn->recoveryUnit()->writing(&_extentManager->getExtent( newFirst )->xprev) = DiskLoc();
- _extentManager->freeExtent( txn, extentLoc );
wunit.commit();
-
- {
- const double oldPadding = oldObjSize ? double(oldObjSizeWithPadding) / oldObjSize
- : 1.0; // defining 0/0 as 1 for this.
-
- log() << "compact finished extent #" << extentNumber << " containing " << nrecords
- << " documents (" << totalNetSize / (1024*1024.0) << "MB)"
- << " oldPadding: " << oldPadding;
- }
}
- }
+ // The extent must now be empty.
+ invariant(sourceExtent->firstRecord.isNull());
+ invariant(sourceExtent->lastRecord.isNull());
- Status SimpleRecordStoreV1::compact( OperationContext* txn,
- RecordStoreCompactAdaptor* adaptor,
- const CompactOptions* options,
- CompactStats* stats ) {
-
- std::vector<DiskLoc> extents;
- for( DiskLoc extLocation = _details->firstExtent(txn);
- !extLocation.isNull();
- extLocation = _extentManager->getExtent( extLocation )->xnext ) {
- extents.push_back( extLocation );
- }
- log() << "compact " << extents.size() << " extents";
+ // We are still the first extent, but we must not be the only extent.
+ invariant(_details->firstExtent(txn) == extentLoc);
+ invariant(_details->lastExtent(txn) != extentLoc);
- {
- WriteUnitOfWork wunit(txn);
- // Orphaning the deleted lists ensures that all inserts go to new extents rather than
- // the ones that existed before starting the compact. If we abort the operation before
- // completion, any free space in the old extents will be leaked and never reused unless
- // the collection is compacted again or dropped. This is considered an acceptable
- // failure mode as no data will be lost.
- log() << "compact orphan deleted lists" << endl;
- _details->orphanDeletedList(txn);
-
- // Start over from scratch with our extent sizing and growth
- _details->setLastExtentSize( txn, 0 );
-
- // create a new extent so new records go there
- increaseStorageSize( txn, _details->lastExtentSize(txn), true );
- wunit.commit();
- }
+ // Remove the newly emptied sourceExtent from the extent linked list and return it to
+ // the extent manager.
+ WriteUnitOfWork wunit(txn);
+ const DiskLoc newFirst = sourceExtent->xnext;
+ _details->setFirstExtent(txn, newFirst);
+ *txn->recoveryUnit()->writing(&_extentManager->getExtent(newFirst)->xprev) = DiskLoc();
+ _extentManager->freeExtent(txn, extentLoc);
+ wunit.commit();
- stdx::unique_lock<Client> lk(*txn->getClient());
- ProgressMeterHolder pm(*txn->setMessage_inlock("compact extent",
- "Extent Compacting Progress",
- extents.size()));
- lk.unlock();
+ {
+ const double oldPadding = oldObjSize ? double(oldObjSizeWithPadding) / oldObjSize
+ : 1.0; // defining 0/0 as 1 for this.
- // Go through all old extents and move each record to a new set of extents.
- int extentNumber = 0;
- for( std::vector<DiskLoc>::iterator it = extents.begin(); it != extents.end(); it++ ) {
- txn->checkForInterrupt();
- invariant(_details->firstExtent(txn) == *it);
- // empties and removes the first extent
- _compactExtent(txn, *it, extentNumber++, adaptor, options, stats );
- invariant(_details->firstExtent(txn) != *it);
- pm.hit();
+ log() << "compact finished extent #" << extentNumber << " containing " << nrecords
+ << " documents (" << totalNetSize / (1024 * 1024.0) << "MB)"
+ << " oldPadding: " << oldPadding;
}
+ }
+}
- invariant( _extentManager->getExtent( _details->firstExtent(txn) )->xprev.isNull() );
- invariant( _extentManager->getExtent( _details->lastExtent(txn) )->xnext.isNull() );
+Status SimpleRecordStoreV1::compact(OperationContext* txn,
+ RecordStoreCompactAdaptor* adaptor,
+ const CompactOptions* options,
+ CompactStats* stats) {
+ std::vector<DiskLoc> extents;
+ for (DiskLoc extLocation = _details->firstExtent(txn); !extLocation.isNull();
+ extLocation = _extentManager->getExtent(extLocation)->xnext) {
+ extents.push_back(extLocation);
+ }
+ log() << "compact " << extents.size() << " extents";
+
+ {
+ WriteUnitOfWork wunit(txn);
+ // Orphaning the deleted lists ensures that all inserts go to new extents rather than
+ // the ones that existed before starting the compact. If we abort the operation before
+ // completion, any free space in the old extents will be leaked and never reused unless
+ // the collection is compacted again or dropped. This is considered an acceptable
+ // failure mode as no data will be lost.
+ log() << "compact orphan deleted lists" << endl;
+ _details->orphanDeletedList(txn);
- // indexes will do their own progress meter
- pm.finished();
+ // Start over from scratch with our extent sizing and growth
+ _details->setLastExtentSize(txn, 0);
- return Status::OK();
+ // create a new extent so new records go there
+ increaseStorageSize(txn, _details->lastExtentSize(txn), true);
+ wunit.commit();
}
+ stdx::unique_lock<Client> lk(*txn->getClient());
+ ProgressMeterHolder pm(
+ *txn->setMessage_inlock("compact extent", "Extent Compacting Progress", extents.size()));
+ lk.unlock();
+
+ // Go through all old extents and move each record to a new set of extents.
+ int extentNumber = 0;
+ for (std::vector<DiskLoc>::iterator it = extents.begin(); it != extents.end(); it++) {
+ txn->checkForInterrupt();
+ invariant(_details->firstExtent(txn) == *it);
+ // empties and removes the first extent
+ _compactExtent(txn, *it, extentNumber++, adaptor, options, stats);
+ invariant(_details->firstExtent(txn) != *it);
+ pm.hit();
+ }
+
+ invariant(_extentManager->getExtent(_details->firstExtent(txn))->xprev.isNull());
+ invariant(_extentManager->getExtent(_details->lastExtent(txn))->xnext.isNull());
+
+ // indexes will do their own progress meter
+ pm.finished();
+
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.h b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.h
index a108305492a..9ab6ba86f78 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.h
@@ -36,65 +36,70 @@
namespace mongo {
- class SimpleRecordStoreV1Cursor;
-
- // used by index and original collections
- class SimpleRecordStoreV1 : public RecordStoreV1Base {
- public:
- SimpleRecordStoreV1( OperationContext* txn,
- StringData ns,
- RecordStoreV1MetaData* details,
- ExtentManager* em,
- bool isSystemIndexes );
-
- virtual ~SimpleRecordStoreV1();
-
- const char* name() const { return "SimpleRecordStoreV1"; }
-
- std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward) const final;
-
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(
- OperationContext* txn) const final;
-
- virtual Status truncate(OperationContext* txn);
-
- virtual void temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
- invariant(!"cappedTruncateAfter not supported");
- }
-
- virtual bool compactSupported() const { return true; }
- virtual bool compactsInPlace() const { return false; }
- virtual Status compact( OperationContext* txn,
- RecordStoreCompactAdaptor* adaptor,
- const CompactOptions* options,
- CompactStats* stats );
-
- protected:
- virtual bool isCapped() const { return false; }
- virtual bool shouldPadInserts() const {
- return !_details->isUserFlagSet(CollectionOptions::Flag_NoPadding);
- }
-
- virtual StatusWith<DiskLoc> allocRecord( OperationContext* txn,
- int lengthWithHeaders,
- bool enforceQuota );
-
- virtual void addDeletedRec(OperationContext* txn,
- const DiskLoc& dloc);
- private:
- DiskLoc _allocFromExistingExtents( OperationContext* txn,
- int lengthWithHeaders );
-
- void _compactExtent(OperationContext* txn,
- const DiskLoc diskloc,
- int extentNumber,
- RecordStoreCompactAdaptor* adaptor,
- const CompactOptions* compactOptions,
- CompactStats* stats );
-
- bool _normalCollection;
-
- friend class SimpleRecordStoreV1Iterator;
- };
-
+class SimpleRecordStoreV1Cursor;
+
+// used by index and original collections
+class SimpleRecordStoreV1 : public RecordStoreV1Base {
+public:
+ SimpleRecordStoreV1(OperationContext* txn,
+ StringData ns,
+ RecordStoreV1MetaData* details,
+ ExtentManager* em,
+ bool isSystemIndexes);
+
+ virtual ~SimpleRecordStoreV1();
+
+ const char* name() const {
+ return "SimpleRecordStoreV1";
+ }
+
+ std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward) const final;
+
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const final;
+
+ virtual Status truncate(OperationContext* txn);
+
+ virtual void temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
+ invariant(!"cappedTruncateAfter not supported");
+ }
+
+ virtual bool compactSupported() const {
+ return true;
+ }
+ virtual bool compactsInPlace() const {
+ return false;
+ }
+ virtual Status compact(OperationContext* txn,
+ RecordStoreCompactAdaptor* adaptor,
+ const CompactOptions* options,
+ CompactStats* stats);
+
+protected:
+ virtual bool isCapped() const {
+ return false;
+ }
+ virtual bool shouldPadInserts() const {
+ return !_details->isUserFlagSet(CollectionOptions::Flag_NoPadding);
+ }
+
+ virtual StatusWith<DiskLoc> allocRecord(OperationContext* txn,
+ int lengthWithHeaders,
+ bool enforceQuota);
+
+ virtual void addDeletedRec(OperationContext* txn, const DiskLoc& dloc);
+
+private:
+ DiskLoc _allocFromExistingExtents(OperationContext* txn, int lengthWithHeaders);
+
+ void _compactExtent(OperationContext* txn,
+ const DiskLoc diskloc,
+ int extentNumber,
+ RecordStoreCompactAdaptor* adaptor,
+ const CompactOptions* compactOptions,
+ CompactStats* stats);
+
+ bool _normalCollection;
+
+ friend class SimpleRecordStoreV1Iterator;
+};
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp
index ec1e51abe02..babfbcf26ea 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.cpp
@@ -35,100 +35,94 @@
namespace mongo {
- //
- // Regular / non-capped collection traversal
- //
-
- SimpleRecordStoreV1Iterator::SimpleRecordStoreV1Iterator(OperationContext* txn,
- const SimpleRecordStoreV1* collection,
- bool forward)
- : _txn(txn)
- , _recordStore(collection)
- , _forward(forward) {
-
- // Eagerly seek to first Record on creation since it is cheap.
- const ExtentManager* em = _recordStore->_extentManager;
- if ( _recordStore->details()->firstExtent(txn).isNull() ) {
- // nothing in the collection
- verify( _recordStore->details()->lastExtent(txn).isNull() );
+//
+// Regular / non-capped collection traversal
+//
+
+SimpleRecordStoreV1Iterator::SimpleRecordStoreV1Iterator(OperationContext* txn,
+ const SimpleRecordStoreV1* collection,
+ bool forward)
+ : _txn(txn), _recordStore(collection), _forward(forward) {
+ // Eagerly seek to first Record on creation since it is cheap.
+ const ExtentManager* em = _recordStore->_extentManager;
+ if (_recordStore->details()->firstExtent(txn).isNull()) {
+ // nothing in the collection
+ verify(_recordStore->details()->lastExtent(txn).isNull());
+ } else if (_forward) {
+ // Find a non-empty extent and start with the first record in it.
+ Extent* e = em->getExtent(_recordStore->details()->firstExtent(txn));
+
+ while (e->firstRecord.isNull() && !e->xnext.isNull()) {
+ e = em->getExtent(e->xnext);
}
- else if (_forward) {
- // Find a non-empty extent and start with the first record in it.
- Extent* e = em->getExtent( _recordStore->details()->firstExtent(txn) );
- while (e->firstRecord.isNull() && !e->xnext.isNull()) {
- e = em->getExtent( e->xnext );
- }
-
- // _curr may be set to DiskLoc() here if e->lastRecord isNull but there is no
- // valid e->xnext
- _curr = e->firstRecord;
- }
- else {
- // Walk backwards, skipping empty extents, and use the last record in the first
- // non-empty extent we see.
- Extent* e = em->getExtent( _recordStore->details()->lastExtent(txn) );
-
- // TODO ELABORATE
- // Does one of e->lastRecord.isNull(), e.firstRecord.isNull() imply the other?
- while (e->lastRecord.isNull() && !e->xprev.isNull()) {
- e = em->getExtent( e->xprev );
- }
-
- // _curr may be set to DiskLoc() here if e->lastRecord isNull but there is no
- // valid e->xprev
- _curr = e->lastRecord;
+ // _curr may be set to DiskLoc() here if e->lastRecord isNull but there is no
+ // valid e->xnext
+ _curr = e->firstRecord;
+ } else {
+ // Walk backwards, skipping empty extents, and use the last record in the first
+ // non-empty extent we see.
+ Extent* e = em->getExtent(_recordStore->details()->lastExtent(txn));
+
+ // TODO ELABORATE
+ // Does one of e->lastRecord.isNull(), e.firstRecord.isNull() imply the other?
+ while (e->lastRecord.isNull() && !e->xprev.isNull()) {
+ e = em->getExtent(e->xprev);
}
- }
- boost::optional<Record> SimpleRecordStoreV1Iterator::next() {
- if (isEOF()) return {};
- auto toReturn = _curr.toRecordId();
- advance();
- return {{toReturn, _recordStore->RecordStore::dataFor(_txn, toReturn)}};
+ // _curr may be set to DiskLoc() here if e->lastRecord isNull but there is no
+ // valid e->xprev
+ _curr = e->lastRecord;
}
+}
- boost::optional<Record> SimpleRecordStoreV1Iterator::seekExact(const RecordId& id) {
- _curr = DiskLoc::fromRecordId(id);
- advance();
- return {{id, _recordStore->RecordStore::dataFor(_txn, id)}};
- }
+boost::optional<Record> SimpleRecordStoreV1Iterator::next() {
+ if (isEOF())
+ return {};
+ auto toReturn = _curr.toRecordId();
+ advance();
+ return {{toReturn, _recordStore->RecordStore::dataFor(_txn, toReturn)}};
+}
- void SimpleRecordStoreV1Iterator::advance() {
- // Move to the next thing.
- if (!isEOF()) {
- if (_forward) {
- _curr = _recordStore->getNextRecord( _txn, _curr );
- }
- else {
- _curr = _recordStore->getPrevRecord( _txn, _curr );
- }
- }
- }
+boost::optional<Record> SimpleRecordStoreV1Iterator::seekExact(const RecordId& id) {
+ _curr = DiskLoc::fromRecordId(id);
+ advance();
+ return {{id, _recordStore->RecordStore::dataFor(_txn, id)}};
+}
- void SimpleRecordStoreV1Iterator::invalidate(const RecordId& dl) {
- // Just move past the thing being deleted.
- if (dl == _curr.toRecordId()) {
- advance();
+void SimpleRecordStoreV1Iterator::advance() {
+ // Move to the next thing.
+ if (!isEOF()) {
+ if (_forward) {
+ _curr = _recordStore->getNextRecord(_txn, _curr);
+ } else {
+ _curr = _recordStore->getPrevRecord(_txn, _curr);
}
}
+}
- void SimpleRecordStoreV1Iterator::savePositioned() {
- _txn = nullptr;
+void SimpleRecordStoreV1Iterator::invalidate(const RecordId& dl) {
+ // Just move past the thing being deleted.
+ if (dl == _curr.toRecordId()) {
+ advance();
}
+}
- bool SimpleRecordStoreV1Iterator::restore(OperationContext* txn) {
- _txn = txn;
- // if the collection is dropped, then the cursor should be destroyed
- return true;
- }
+void SimpleRecordStoreV1Iterator::savePositioned() {
+ _txn = nullptr;
+}
- std::unique_ptr<RecordFetcher> SimpleRecordStoreV1Iterator::fetcherForNext() const {
- return _recordStore->_extentManager->recordNeedsFetch(_curr);
- }
+bool SimpleRecordStoreV1Iterator::restore(OperationContext* txn) {
+ _txn = txn;
+ // if the collection is dropped, then the cursor should be destroyed
+ return true;
+}
- std::unique_ptr<RecordFetcher> SimpleRecordStoreV1Iterator::fetcherForId(
- const RecordId& id) const {
- return _recordStore->_extentManager->recordNeedsFetch(DiskLoc::fromRecordId(id));
- }
+std::unique_ptr<RecordFetcher> SimpleRecordStoreV1Iterator::fetcherForNext() const {
+ return _recordStore->_extentManager->recordNeedsFetch(_curr);
+}
+
+std::unique_ptr<RecordFetcher> SimpleRecordStoreV1Iterator::fetcherForId(const RecordId& id) const {
+ return _recordStore->_extentManager->recordNeedsFetch(DiskLoc::fromRecordId(id));
+}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h
index c19c0c386b3..91b0088bf72 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h
@@ -33,39 +33,41 @@
namespace mongo {
- class SimpleRecordStoreV1;
+class SimpleRecordStoreV1;
- /**
- * This class iterates over a non-capped collection identified by 'ns'.
- * The collection must exist when the constructor is called.
- *
- * If start is not DiskLoc(), the iteration begins at that DiskLoc.
- */
- class SimpleRecordStoreV1Iterator final : public RecordCursor {
- public:
- SimpleRecordStoreV1Iterator( OperationContext* txn,
- const SimpleRecordStoreV1* records,
- bool forward);
+/**
+ * This class iterates over a non-capped collection identified by 'ns'.
+ * The collection must exist when the constructor is called.
+ *
+ * If start is not DiskLoc(), the iteration begins at that DiskLoc.
+ */
+class SimpleRecordStoreV1Iterator final : public RecordCursor {
+public:
+ SimpleRecordStoreV1Iterator(OperationContext* txn,
+ const SimpleRecordStoreV1* records,
+ bool forward);
- boost::optional<Record> next() final;
- boost::optional<Record> seekExact(const RecordId& id) final;
- void savePositioned() final;
- bool restore(OperationContext* txn) final;
- void invalidate(const RecordId& dl) final;
- std::unique_ptr<RecordFetcher> fetcherForNext() const final;
- std::unique_ptr<RecordFetcher> fetcherForId(const RecordId& id) const final;
+ boost::optional<Record> next() final;
+ boost::optional<Record> seekExact(const RecordId& id) final;
+ void savePositioned() final;
+ bool restore(OperationContext* txn) final;
+ void invalidate(const RecordId& dl) final;
+ std::unique_ptr<RecordFetcher> fetcherForNext() const final;
+ std::unique_ptr<RecordFetcher> fetcherForId(const RecordId& id) const final;
- private:
- void advance();
- bool isEOF() { return _curr.isNull(); }
+private:
+ void advance();
+ bool isEOF() {
+ return _curr.isNull();
+ }
- // for getNext, not owned
- OperationContext* _txn;
+ // for getNext, not owned
+ OperationContext* _txn;
- // The result returned on the next call to getNext().
- DiskLoc _curr;
- const SimpleRecordStoreV1* const _recordStore;
- const bool _forward;
- };
+ // The result returned on the next call to getNext().
+ DiskLoc _curr;
+ const SimpleRecordStoreV1* const _recordStore;
+ const bool _forward;
+};
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
index 21ffdf6ef2b..e4e85168b01 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
@@ -40,501 +40,413 @@ using namespace mongo;
namespace {
- using std::string;
-
- TEST( SimpleRecordStoreV1, quantizeAllocationSpaceSimple ) {
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(33), 64);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1000), 1024);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(10001), 16*1024);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(100000), 128*1024);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1000001), 1024*1024);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(10000000), 10*1024*1024);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(14*1024*1024 - 1), 14*1024*1024);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(14*1024*1024), 14*1024*1024);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(14*1024*1024 + 1),
- 16*1024*1024 + 512*1024);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(16*1024*1024 + 512*1024),
- 16*1024*1024 + 512*1024);
- }
+using std::string;
+
+TEST(SimpleRecordStoreV1, quantizeAllocationSpaceSimple) {
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(33), 64);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1000), 1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(10001), 16 * 1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(100000), 128 * 1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1000001), 1024 * 1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(10000000), 10 * 1024 * 1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(14 * 1024 * 1024 - 1),
+ 14 * 1024 * 1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(14 * 1024 * 1024), 14 * 1024 * 1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(14 * 1024 * 1024 + 1),
+ 16 * 1024 * 1024 + 512 * 1024);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(16 * 1024 * 1024 + 512 * 1024),
+ 16 * 1024 * 1024 + 512 * 1024);
+}
- TEST( SimpleRecordStoreV1, quantizeAllocationMinMaxBound ) {
- const int maxSize = RecordStoreV1Base::MaxAllowedAllocation;
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1), 32);
- ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(maxSize), maxSize);
- }
+TEST(SimpleRecordStoreV1, quantizeAllocationMinMaxBound) {
+ const int maxSize = RecordStoreV1Base::MaxAllowedAllocation;
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(1), 32);
+ ASSERT_EQUALS(RecordStoreV1Base::quantizeAllocationSpace(maxSize), maxSize);
+}
- /**
- * Tests quantization of sizes around all valid bucket sizes.
- */
- TEST( SimpleRecordStoreV1, quantizeAroundBucketSizes ) {
- for (int bucket = 0; bucket < RecordStoreV1Base::Buckets - 2; bucket++) {
- const int size = RecordStoreV1Base::bucketSizes[bucket];
- const int nextSize = RecordStoreV1Base::bucketSizes[bucket + 1];
-
- // size - 1 is quantized to size.
- ASSERT_EQUALS( size,
- RecordStoreV1Base::quantizeAllocationSpace( size - 1 ) );
-
- // size is quantized to size.
- ASSERT_EQUALS( size,
- RecordStoreV1Base::quantizeAllocationSpace( size ) );
-
- // size + 1 is quantized to nextSize (if it is a valid allocation)
- if (size + 1 <= RecordStoreV1Base::MaxAllowedAllocation) {
- ASSERT_EQUALS( nextSize,
- RecordStoreV1Base::quantizeAllocationSpace( size + 1 ) );
- }
+/**
+ * Tests quantization of sizes around all valid bucket sizes.
+ */
+TEST(SimpleRecordStoreV1, quantizeAroundBucketSizes) {
+ for (int bucket = 0; bucket < RecordStoreV1Base::Buckets - 2; bucket++) {
+ const int size = RecordStoreV1Base::bucketSizes[bucket];
+ const int nextSize = RecordStoreV1Base::bucketSizes[bucket + 1];
+
+ // size - 1 is quantized to size.
+ ASSERT_EQUALS(size, RecordStoreV1Base::quantizeAllocationSpace(size - 1));
+
+ // size is quantized to size.
+ ASSERT_EQUALS(size, RecordStoreV1Base::quantizeAllocationSpace(size));
+
+ // size + 1 is quantized to nextSize (if it is a valid allocation)
+ if (size + 1 <= RecordStoreV1Base::MaxAllowedAllocation) {
+ ASSERT_EQUALS(nextSize, RecordStoreV1Base::quantizeAllocationSpace(size + 1));
}
}
+}
+
+BSONObj docForRecordSize(int size) {
+ BSONObjBuilder b;
+ b.append("_id", 5);
+ b.append("x", string(size - MmapV1RecordHeader::HeaderSize - 22, 'x'));
+ BSONObj x = b.obj();
+ ASSERT_EQUALS(MmapV1RecordHeader::HeaderSize + x.objsize(), size);
+ return x;
+}
+
+class BsonDocWriter : public DocWriter {
+public:
+ BsonDocWriter(const BSONObj& obj, bool padding) : _obj(obj), _padding(padding) {}
- BSONObj docForRecordSize( int size ) {
- BSONObjBuilder b;
- b.append( "_id", 5 );
- b.append( "x", string( size - MmapV1RecordHeader::HeaderSize - 22, 'x' ) );
- BSONObj x = b.obj();
- ASSERT_EQUALS( MmapV1RecordHeader::HeaderSize + x.objsize(), size );
- return x;
+ virtual void writeDocument(char* buf) const {
+ memcpy(buf, _obj.objdata(), _obj.objsize());
+ }
+ virtual size_t documentSize() const {
+ return _obj.objsize();
+ }
+ virtual bool addPadding() const {
+ return _padding;
}
- class BsonDocWriter : public DocWriter {
- public:
- BsonDocWriter(const BSONObj& obj, bool padding) : _obj(obj), _padding(padding) {}
+private:
+ BSONObj _obj;
+ bool _padding;
+};
- virtual void writeDocument(char* buf) const { memcpy(buf, _obj.objdata(), _obj.objsize()); }
- virtual size_t documentSize() const { return _obj.objsize(); }
- virtual bool addPadding() const { return _padding; }
+/** alloc() quantizes the requested size using quantizeAllocationSpace() rules. */
+TEST(SimpleRecordStoreV1, AllocQuantized) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- private:
- BSONObj _obj;
- bool _padding;
- };
+ string myns = "test.AllocQuantized";
+ SimpleRecordStoreV1 rs(&txn, myns, md, &em, false);
- /** alloc() quantizes the requested size using quantizeAllocationSpace() rules. */
- TEST(SimpleRecordStoreV1, AllocQuantized) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
+ BSONObj obj = docForRecordSize(300);
+ StatusWith<RecordId> result = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ ASSERT(result.isOK());
- string myns = "test.AllocQuantized";
- SimpleRecordStoreV1 rs( &txn, myns, md, &em, false );
+ // The length of the allocated record is quantized.
+ ASSERT_EQUALS(512, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+}
- BSONObj obj = docForRecordSize( 300 );
- StatusWith<RecordId> result = rs.insertRecord( &txn, obj.objdata(), obj.objsize(), false);
- ASSERT( result.isOK() );
+TEST(SimpleRecordStoreV1, AllocNonQuantized) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
+ md->setUserFlag(&txn, CollectionOptions::Flag_NoPadding);
- // The length of the allocated record is quantized.
- ASSERT_EQUALS( 512 , rs.dataFor( &txn, result.getValue() ).size() + MmapV1RecordHeader::HeaderSize );
- }
+ string myns = "test.AllocQuantized";
+ SimpleRecordStoreV1 rs(&txn, myns, md, &em, false);
- TEST(SimpleRecordStoreV1, AllocNonQuantized) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- md->setUserFlag(&txn, CollectionOptions::Flag_NoPadding);
+ BSONObj obj = docForRecordSize(300);
+ StatusWith<RecordId> result = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ ASSERT(result.isOK());
- string myns = "test.AllocQuantized";
- SimpleRecordStoreV1 rs( &txn, myns, md, &em, false );
+ // The length of the allocated record is quantized.
+ ASSERT_EQUALS(300, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+}
- BSONObj obj = docForRecordSize( 300 );
- StatusWith<RecordId> result = rs.insertRecord( &txn, obj.objdata(), obj.objsize(), false);
- ASSERT( result.isOK() );
+TEST(SimpleRecordStoreV1, AllocNonQuantizedStillAligned) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
+ md->setUserFlag(&txn, CollectionOptions::Flag_NoPadding);
- // The length of the allocated record is quantized.
- ASSERT_EQUALS( 300 , rs.dataFor( &txn, result.getValue() ).size() + MmapV1RecordHeader::HeaderSize );
- }
+ string myns = "test.AllocQuantized";
+ SimpleRecordStoreV1 rs(&txn, myns, md, &em, false);
- TEST(SimpleRecordStoreV1, AllocNonQuantizedStillAligned) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- md->setUserFlag(&txn, CollectionOptions::Flag_NoPadding);
+ BSONObj obj = docForRecordSize(298);
+ StatusWith<RecordId> result = rs.insertRecord(&txn, obj.objdata(), obj.objsize(), false);
+ ASSERT(result.isOK());
- string myns = "test.AllocQuantized";
- SimpleRecordStoreV1 rs( &txn, myns, md, &em, false );
+ // The length of the allocated record is quantized.
+ ASSERT_EQUALS(300, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+}
- BSONObj obj = docForRecordSize( 298 );
- StatusWith<RecordId> result = rs.insertRecord( &txn, obj.objdata(), obj.objsize(), false);
- ASSERT( result.isOK() );
+/** alloc() quantizes the requested size if DocWriter::addPadding() returns true. */
+TEST(SimpleRecordStoreV1, AllocQuantizedWithDocWriter) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- // The length of the allocated record is quantized.
- ASSERT_EQUALS( 300 , rs.dataFor( &txn, result.getValue() ).size() + MmapV1RecordHeader::HeaderSize );
- }
+ string myns = "test.AllocQuantized";
+ SimpleRecordStoreV1 rs(&txn, myns, md, &em, false);
- /** alloc() quantizes the requested size if DocWriter::addPadding() returns true. */
- TEST(SimpleRecordStoreV1, AllocQuantizedWithDocWriter) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
+ BsonDocWriter docWriter(docForRecordSize(300), true);
+ StatusWith<RecordId> result = rs.insertRecord(&txn, &docWriter, false);
+ ASSERT(result.isOK());
- string myns = "test.AllocQuantized";
- SimpleRecordStoreV1 rs( &txn, myns, md, &em, false );
+ // The length of the allocated record is quantized.
+ ASSERT_EQUALS(512, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+}
- BsonDocWriter docWriter(docForRecordSize( 300 ), true);
- StatusWith<RecordId> result = rs.insertRecord(&txn, &docWriter, false);
- ASSERT( result.isOK() );
+/**
+ * alloc() does not quantize records if DocWriter::addPadding() returns false
+ */
+TEST(SimpleRecordStoreV1, AllocNonQuantizedDocWriter) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- // The length of the allocated record is quantized.
- ASSERT_EQUALS( 512 , rs.dataFor( &txn, result.getValue() ).size() + MmapV1RecordHeader::HeaderSize );
- }
+ string myns = "test.AllocIndexNamespaceNotQuantized";
+ SimpleRecordStoreV1 rs(&txn, myns + "$x", md, &em, false);
- /**
- * alloc() does not quantize records if DocWriter::addPadding() returns false
- */
- TEST(SimpleRecordStoreV1, AllocNonQuantizedDocWriter) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
+ BsonDocWriter docWriter(docForRecordSize(300), false);
+ StatusWith<RecordId> result = rs.insertRecord(&txn, &docWriter, false);
+ ASSERT(result.isOK());
- string myns = "test.AllocIndexNamespaceNotQuantized";
- SimpleRecordStoreV1 rs( &txn, myns + "$x", md, &em, false );
+ // The length of the allocated record is not quantized.
+ ASSERT_EQUALS(300, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+}
+
+/** alloc() aligns record sizes up to 4 bytes even if DocWriter::addPadding returns false. */
+TEST(SimpleRecordStoreV1, AllocAlignedDocWriter) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
- BsonDocWriter docWriter(docForRecordSize( 300 ), false);
- StatusWith<RecordId> result = rs.insertRecord(&txn, &docWriter, false);
- ASSERT( result.isOK() );
+ string myns = "test.AllocIndexNamespaceNotQuantized";
+ SimpleRecordStoreV1 rs(&txn, myns + "$x", md, &em, false);
- // The length of the allocated record is not quantized.
- ASSERT_EQUALS( 300, rs.dataFor( &txn, result.getValue() ).size() + MmapV1RecordHeader::HeaderSize );
+ BsonDocWriter docWriter(docForRecordSize(298), false);
+ StatusWith<RecordId> result = rs.insertRecord(&txn, &docWriter, false);
+ ASSERT(result.isOK());
+ ASSERT_EQUALS(300, rs.dataFor(&txn, result.getValue()).size() + MmapV1RecordHeader::HeaderSize);
+}
+/**
+ * alloc() with quantized size doesn't split if enough room left over.
+ */
+TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithoutSplit) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
+ SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+
+ {
+ LocAndSize drecs[] = {{DiskLoc(0, 1000), 512 + 31}, {}};
+ initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
}
- /** alloc() aligns record sizes up to 4 bytes even if DocWriter::addPadding returns false. */
- TEST(SimpleRecordStoreV1, AllocAlignedDocWriter) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
+ BsonDocWriter docWriter(docForRecordSize(300), true);
+ StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
+ ASSERT_OK(actualLocation.getStatus());
- string myns = "test.AllocIndexNamespaceNotQuantized";
- SimpleRecordStoreV1 rs( &txn, myns + "$x", md, &em, false );
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1000), 512 + 31}, {}};
+ LocAndSize drecs[] = {{}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
+ }
+}
- BsonDocWriter docWriter(docForRecordSize( 298 ), false);
- StatusWith<RecordId> result = rs.insertRecord(&txn, &docWriter, false);
- ASSERT( result.isOK() );
+/**
+ * alloc() with quantized size splits if enough room left over.
+ */
+TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithSplit) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
+ SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+
+ {
+ LocAndSize drecs[] = {{DiskLoc(0, 1000), 512 + 32}, {}};
+ initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
+ }
+
+ BsonDocWriter docWriter(docForRecordSize(300), true);
+ StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
+ ASSERT_OK(actualLocation.getStatus());
- ASSERT_EQUALS( 300, rs.dataFor( &txn, result.getValue() ).size() + MmapV1RecordHeader::HeaderSize );
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1000), 512}, {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1512), 32}, {}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
}
- /**
- * alloc() with quantized size doesn't split if enough room left over.
- */
- TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithoutSplit) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
-
- {
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 512 + 31},
- {}
- };
- initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
- }
+}
- BsonDocWriter docWriter(docForRecordSize( 300 ), true);
- StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
- ASSERT_OK( actualLocation.getStatus() );
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 512 + 31},
- {}
- };
- LocAndSize drecs[] = {
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- }
+/**
+ * alloc() with non quantized size doesn't split if enough room left over.
+ */
+TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithoutSplit) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
+ SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+
+ {
+ LocAndSize drecs[] = {{DiskLoc(0, 1000), 331}, {}};
+ initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
}
- /**
- * alloc() with quantized size splits if enough room left over.
- */
- TEST(SimpleRecordStoreV1, AllocUseQuantizedDeletedRecordWithSplit) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
-
- {
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 512 + 32},
- {}
- };
- initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
- }
+ BsonDocWriter docWriter(docForRecordSize(300), false);
+ StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
+ ASSERT_OK(actualLocation.getStatus());
- BsonDocWriter docWriter(docForRecordSize( 300 ), true);
- StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
- ASSERT_OK( actualLocation.getStatus() );
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 512},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1512), 32},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- }
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1000), 331}, {}};
+ LocAndSize drecs[] = {{}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
}
+}
- /**
- * alloc() with non quantized size doesn't split if enough room left over.
- */
- TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithoutSplit) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
-
- {
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 331},
- {}
- };
- initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
- }
-
- BsonDocWriter docWriter(docForRecordSize( 300 ), false);
- StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
- ASSERT_OK( actualLocation.getStatus() );
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 331},
- {}
- };
- LocAndSize drecs[] = {
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- }
+/**
+ * alloc() with non quantized size splits if enough room left over.
+ */
+TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithSplit) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
+ SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+
+ {
+ LocAndSize drecs[] = {{DiskLoc(0, 1000), 332}, {}};
+ initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
}
- /**
- * alloc() with non quantized size splits if enough room left over.
- */
- TEST(SimpleRecordStoreV1, AllocUseNonQuantizedDeletedRecordWithSplit) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
-
- {
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 332},
- {}
- };
- initializeV1RS(&txn, NULL, drecs, NULL, &em, md);
- }
+ BsonDocWriter docWriter(docForRecordSize(300), false);
+ StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
+ ASSERT_OK(actualLocation.getStatus());
- BsonDocWriter docWriter(docForRecordSize( 300 ), false);
- StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
- ASSERT_OK( actualLocation.getStatus() );
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 300},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1300), 32},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- }
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1000), 300}, {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1300), 32}, {}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
}
+}
- /**
- * alloc() will use from the legacy grab bag if it can.
- */
- TEST(SimpleRecordStoreV1, GrabBagIsUsed) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
-
- {
- LocAndSize drecs[] = {
- {}
- };
- LocAndSize grabBag[] = {
- {DiskLoc(0, 1000), 4*1024*1024},
- {DiskLoc(1, 1000), 4*1024*1024},
- {}
- };
- initializeV1RS(&txn, NULL, drecs, grabBag, &em, md);
- }
+/**
+ * alloc() will use from the legacy grab bag if it can.
+ */
+TEST(SimpleRecordStoreV1, GrabBagIsUsed) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
+ SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+
+ {
+ LocAndSize drecs[] = {{}};
+ LocAndSize grabBag[] = {
+ {DiskLoc(0, 1000), 4 * 1024 * 1024}, {DiskLoc(1, 1000), 4 * 1024 * 1024}, {}};
+ initializeV1RS(&txn, NULL, drecs, grabBag, &em, md);
+ }
- BsonDocWriter docWriter(docForRecordSize( 256 ), false);
- StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
- ASSERT_OK( actualLocation.getStatus() );
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 256},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1256), 4*1024*1024 - 256},
- {}
- };
- LocAndSize grabBag[] = {
- {DiskLoc(1, 1000), 4*1024*1024},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, grabBag, &em, md);
- }
+ BsonDocWriter docWriter(docForRecordSize(256), false);
+ StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
+ ASSERT_OK(actualLocation.getStatus());
+
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1000), 256}, {}};
+ LocAndSize drecs[] = {{DiskLoc(0, 1256), 4 * 1024 * 1024 - 256}, {}};
+ LocAndSize grabBag[] = {{DiskLoc(1, 1000), 4 * 1024 * 1024}, {}};
+ assertStateV1RS(&txn, recs, drecs, grabBag, &em, md);
}
+}
- /**
- * alloc() will pull from the legacy grab bag even if it isn't needed.
- */
- TEST(SimpleRecordStoreV1, GrabBagIsPoppedEvenIfUnneeded) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
-
- {
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 1000},
- {}
- };
- LocAndSize grabBag[] = {
- {DiskLoc(1, 1000), 4*1024*1024},
- {DiskLoc(2, 1000), 4*1024*1024},
- {}
- };
- initializeV1RS(&txn, NULL, drecs, grabBag, &em, md);
- }
+/**
+ * alloc() will pull from the legacy grab bag even if it isn't needed.
+ */
+TEST(SimpleRecordStoreV1, GrabBagIsPoppedEvenIfUnneeded) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
+ SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+
+ {
+ LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
+ LocAndSize grabBag[] = {
+ {DiskLoc(1, 1000), 4 * 1024 * 1024}, {DiskLoc(2, 1000), 4 * 1024 * 1024}, {}};
+ initializeV1RS(&txn, NULL, drecs, grabBag, &em, md);
+ }
- BsonDocWriter docWriter(docForRecordSize( 1000 ), false);
- StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
- ASSERT_OK( actualLocation.getStatus() );
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 1000},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(1, 1000), 4*1024*1024},
- {}
- };
- LocAndSize grabBag[] = {
- {DiskLoc(2, 1000), 4*1024*1024},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, grabBag, &em, md);
- }
+ BsonDocWriter docWriter(docForRecordSize(1000), false);
+ StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
+ ASSERT_OK(actualLocation.getStatus());
+
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1000), 1000}, {}};
+ LocAndSize drecs[] = {{DiskLoc(1, 1000), 4 * 1024 * 1024}, {}};
+ LocAndSize grabBag[] = {{DiskLoc(2, 1000), 4 * 1024 * 1024}, {}};
+ assertStateV1RS(&txn, recs, drecs, grabBag, &em, md);
}
+}
- /**
- * alloc() will pull from the legacy grab bag even if it can't be used
- */
- TEST(SimpleRecordStoreV1, GrabBagIsPoppedEvenIfUnusable) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
-
- {
- LocAndSize drecs[] = {
- {DiskLoc(0, 1000), 8*1024*1024},
- {}
- };
- LocAndSize grabBag[] = {
- {DiskLoc(1, 1000), 4*1024*1024},
- {DiskLoc(2, 1000), 4*1024*1024},
- {}
- };
- initializeV1RS(&txn, NULL, drecs, grabBag, &em, md);
- }
+/**
+ * alloc() will pull from the legacy grab bag even if it can't be used
+ */
+TEST(SimpleRecordStoreV1, GrabBagIsPoppedEvenIfUnusable) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
+ SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+
+ {
+ LocAndSize drecs[] = {{DiskLoc(0, 1000), 8 * 1024 * 1024}, {}};
+ LocAndSize grabBag[] = {
+ {DiskLoc(1, 1000), 4 * 1024 * 1024}, {DiskLoc(2, 1000), 4 * 1024 * 1024}, {}};
+ initializeV1RS(&txn, NULL, drecs, grabBag, &em, md);
+ }
- BsonDocWriter docWriter(docForRecordSize( 8*1024*1024 ), false);
- StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
- ASSERT_OK( actualLocation.getStatus() );
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 8*1024*1024},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(1, 1000), 4*1024*1024},
- {}
- };
- LocAndSize grabBag[] = {
- {DiskLoc(2, 1000), 4*1024*1024},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, grabBag, &em, md);
- }
+ BsonDocWriter docWriter(docForRecordSize(8 * 1024 * 1024), false);
+ StatusWith<RecordId> actualLocation = rs.insertRecord(&txn, &docWriter, false);
+ ASSERT_OK(actualLocation.getStatus());
+
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1000), 8 * 1024 * 1024}, {}};
+ LocAndSize drecs[] = {{DiskLoc(1, 1000), 4 * 1024 * 1024}, {}};
+ LocAndSize grabBag[] = {{DiskLoc(2, 1000), 4 * 1024 * 1024}, {}};
+ assertStateV1RS(&txn, recs, drecs, grabBag, &em, md);
}
+}
+
+// -----------------
+
+TEST(SimpleRecordStoreV1, FullSimple1) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
+ SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
- // -----------------
-
- TEST( SimpleRecordStoreV1, FullSimple1 ) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn,
- "test.foo",
- md,
- &em,
- false );
-
-
- ASSERT_EQUALS( 0, md->numRecords() );
- StatusWith<RecordId> result = rs.insertRecord( &txn, "abc", 4, 1000 );
- ASSERT_TRUE( result.isOK() );
- ASSERT_EQUALS( 1, md->numRecords() );
- RecordData recordData = rs.dataFor( &txn, result.getValue() );
- ASSERT_EQUALS( string("abc"), string(recordData.data()) );
+
+ ASSERT_EQUALS(0, md->numRecords());
+ StatusWith<RecordId> result = rs.insertRecord(&txn, "abc", 4, 1000);
+ ASSERT_TRUE(result.isOK());
+ ASSERT_EQUALS(1, md->numRecords());
+ RecordData recordData = rs.dataFor(&txn, result.getValue());
+ ASSERT_EQUALS(string("abc"), string(recordData.data()));
+}
+
+// -----------------
+
+TEST(SimpleRecordStoreV1, Truncate) {
+ OperationContextNoop txn;
+ DummyExtentManager em;
+ DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
+ SimpleRecordStoreV1 rs(&txn, "test.foo", md, &em, false);
+
+ {
+ LocAndSize recs[] = {{DiskLoc(0, 1000), 100},
+ {DiskLoc(0, 1100), 100},
+ {DiskLoc(0, 1300), 100},
+ {DiskLoc(2, 1100), 100},
+ {}};
+ LocAndSize drecs[] = {
+ {DiskLoc(0, 1200), 100}, {DiskLoc(2, 1000), 100}, {DiskLoc(1, 1000), 1000}, {}};
+
+ initializeV1RS(&txn, recs, drecs, NULL, &em, md);
+
+ ASSERT_EQUALS(em.getExtent(DiskLoc(0, 0))->length, em.minSize());
}
- // -----------------
-
- TEST( SimpleRecordStoreV1, Truncate ) {
- OperationContextNoop txn;
- DummyExtentManager em;
- DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData( false, 0 );
- SimpleRecordStoreV1 rs( &txn, "test.foo", md, &em, false );
-
- {
- LocAndSize recs[] = {
- {DiskLoc(0, 1000), 100},
- {DiskLoc(0, 1100), 100},
- {DiskLoc(0, 1300), 100},
- {DiskLoc(2, 1100), 100},
- {}
- };
- LocAndSize drecs[] = {
- {DiskLoc(0, 1200), 100},
- {DiskLoc(2, 1000), 100},
- {DiskLoc(1, 1000), 1000},
- {}
- };
-
- initializeV1RS(&txn, recs, drecs, NULL, &em, md);
-
- ASSERT_EQUALS(em.getExtent(DiskLoc(0, 0))->length, em.minSize());
- }
+ rs.truncate(&txn);
- rs.truncate(&txn);
-
- {
- LocAndSize recs[] = {
- {}
- };
- LocAndSize drecs[] = {
- // One extent filled with a single deleted record.
- {DiskLoc(0, Extent::HeaderSize()), em.minSize() - Extent::HeaderSize()},
- {}
- };
- assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
- }
+ {
+ LocAndSize recs[] = {{}};
+ LocAndSize drecs[] = {
+ // One extent filled with a single deleted record.
+ {DiskLoc(0, Extent::HeaderSize()), em.minSize() - Extent::HeaderSize()},
+ {}};
+ assertStateV1RS(&txn, recs, drecs, NULL, &em, md);
}
}
+}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
index 7bfaee1867e..12801124b95 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
@@ -47,631 +47,609 @@
namespace mongo {
- using std::numeric_limits;
-
- DummyRecordStoreV1MetaData::DummyRecordStoreV1MetaData( bool capped, int userFlags ) {
- _dataSize = 0;
- _numRecords = 0;
- _capped = capped;
- _userFlags = userFlags;
- _lastExtentSize = 0;
- _paddingFactor = 1;
- _maxCappedDocs = numeric_limits<long long>::max();
- _capFirstNewRecord.setInvalid();
- if ( _capped ) {
- // copied from NamespaceDetails::NamespaceDetails()
- setDeletedListEntry( NULL, 1, DiskLoc().setInvalid() );
- }
- }
-
- const DiskLoc& DummyRecordStoreV1MetaData::capExtent() const {
- return _capExtent;
- }
-
- void DummyRecordStoreV1MetaData::setCapExtent( OperationContext* txn,
- const DiskLoc& loc ) {
- _capExtent = loc;
- }
-
- const DiskLoc& DummyRecordStoreV1MetaData::capFirstNewRecord() const {
- return _capFirstNewRecord;
- }
-
- void DummyRecordStoreV1MetaData::setCapFirstNewRecord( OperationContext* txn,
- const DiskLoc& loc ) {
- _capFirstNewRecord = loc;
- }
-
- long long DummyRecordStoreV1MetaData::dataSize() const {
- return _dataSize;
+using std::numeric_limits;
+
+DummyRecordStoreV1MetaData::DummyRecordStoreV1MetaData(bool capped, int userFlags) {
+ _dataSize = 0;
+ _numRecords = 0;
+ _capped = capped;
+ _userFlags = userFlags;
+ _lastExtentSize = 0;
+ _paddingFactor = 1;
+ _maxCappedDocs = numeric_limits<long long>::max();
+ _capFirstNewRecord.setInvalid();
+ if (_capped) {
+ // copied from NamespaceDetails::NamespaceDetails()
+ setDeletedListEntry(NULL, 1, DiskLoc().setInvalid());
}
+}
- long long DummyRecordStoreV1MetaData::numRecords() const {
- return _numRecords;
- }
+const DiskLoc& DummyRecordStoreV1MetaData::capExtent() const {
+ return _capExtent;
+}
- void DummyRecordStoreV1MetaData::incrementStats( OperationContext* txn,
- long long dataSizeIncrement,
- long long numRecordsIncrement ) {
- _dataSize += dataSizeIncrement;
- _numRecords += numRecordsIncrement;
- }
+void DummyRecordStoreV1MetaData::setCapExtent(OperationContext* txn, const DiskLoc& loc) {
+ _capExtent = loc;
+}
- void DummyRecordStoreV1MetaData::setStats( OperationContext* txn,
- long long dataSize,
- long long numRecords ) {
- _dataSize = dataSize;
- _numRecords = numRecords;
- }
+const DiskLoc& DummyRecordStoreV1MetaData::capFirstNewRecord() const {
+ return _capFirstNewRecord;
+}
- namespace {
- DiskLoc myNull;
- }
+void DummyRecordStoreV1MetaData::setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc) {
+ _capFirstNewRecord = loc;
+}
- DiskLoc DummyRecordStoreV1MetaData::deletedListEntry( int bucket ) const {
- invariant( bucket >= 0 );
- if ( static_cast<size_t>( bucket ) >= _deletedLists.size() )
- return myNull;
- return _deletedLists[bucket];
- }
+long long DummyRecordStoreV1MetaData::dataSize() const {
+ return _dataSize;
+}
- void DummyRecordStoreV1MetaData::setDeletedListEntry( OperationContext* txn,
- int bucket,
- const DiskLoc& loc ) {
- invariant( bucket >= 0 );
- invariant( bucket < 1000 );
- while ( static_cast<size_t>( bucket ) >= _deletedLists.size() )
- _deletedLists.push_back( DiskLoc() );
- _deletedLists[bucket] = loc;
- }
+long long DummyRecordStoreV1MetaData::numRecords() const {
+ return _numRecords;
+}
- DiskLoc DummyRecordStoreV1MetaData::deletedListLegacyGrabBag() const {
- return _deletedListLegacyGrabBag;
- }
+void DummyRecordStoreV1MetaData::incrementStats(OperationContext* txn,
+ long long dataSizeIncrement,
+ long long numRecordsIncrement) {
+ _dataSize += dataSizeIncrement;
+ _numRecords += numRecordsIncrement;
+}
- void DummyRecordStoreV1MetaData::setDeletedListLegacyGrabBag(OperationContext* txn,
- const DiskLoc& loc) {
- _deletedListLegacyGrabBag = loc;
- }
+void DummyRecordStoreV1MetaData::setStats(OperationContext* txn,
+ long long dataSize,
+ long long numRecords) {
+ _dataSize = dataSize;
+ _numRecords = numRecords;
+}
- void DummyRecordStoreV1MetaData::orphanDeletedList(OperationContext* txn) {
- // They will be recreated on demand.
- _deletedLists.clear();
- }
+namespace {
+DiskLoc myNull;
+}
- const DiskLoc& DummyRecordStoreV1MetaData::firstExtent(OperationContext* txn) const {
- return _firstExtent;
- }
+DiskLoc DummyRecordStoreV1MetaData::deletedListEntry(int bucket) const {
+ invariant(bucket >= 0);
+ if (static_cast<size_t>(bucket) >= _deletedLists.size())
+ return myNull;
+ return _deletedLists[bucket];
+}
- void DummyRecordStoreV1MetaData::setFirstExtent( OperationContext* txn,
- const DiskLoc& loc ) {
- _firstExtent = loc;
- }
+void DummyRecordStoreV1MetaData::setDeletedListEntry(OperationContext* txn,
+ int bucket,
+ const DiskLoc& loc) {
+ invariant(bucket >= 0);
+ invariant(bucket < 1000);
+ while (static_cast<size_t>(bucket) >= _deletedLists.size())
+ _deletedLists.push_back(DiskLoc());
+ _deletedLists[bucket] = loc;
+}
- const DiskLoc& DummyRecordStoreV1MetaData::lastExtent(OperationContext* txn) const {
- return _lastExtent;
- }
+DiskLoc DummyRecordStoreV1MetaData::deletedListLegacyGrabBag() const {
+ return _deletedListLegacyGrabBag;
+}
- void DummyRecordStoreV1MetaData::setLastExtent( OperationContext* txn,
- const DiskLoc& loc ) {
- _lastExtent = loc;
- }
+void DummyRecordStoreV1MetaData::setDeletedListLegacyGrabBag(OperationContext* txn,
+ const DiskLoc& loc) {
+ _deletedListLegacyGrabBag = loc;
+}
- bool DummyRecordStoreV1MetaData::isCapped() const {
- return _capped;
- }
+void DummyRecordStoreV1MetaData::orphanDeletedList(OperationContext* txn) {
+ // They will be recreated on demand.
+ _deletedLists.clear();
+}
- bool DummyRecordStoreV1MetaData::isUserFlagSet( int flag ) const {
- return _userFlags & flag;
- }
+const DiskLoc& DummyRecordStoreV1MetaData::firstExtent(OperationContext* txn) const {
+ return _firstExtent;
+}
- bool DummyRecordStoreV1MetaData::setUserFlag( OperationContext* txn, int flag ) {
- if ( ( _userFlags & flag ) == flag )
- return false;
+void DummyRecordStoreV1MetaData::setFirstExtent(OperationContext* txn, const DiskLoc& loc) {
+ _firstExtent = loc;
+}
- _userFlags |= flag;
- return true;
+const DiskLoc& DummyRecordStoreV1MetaData::lastExtent(OperationContext* txn) const {
+ return _lastExtent;
+}
- }
- bool DummyRecordStoreV1MetaData::clearUserFlag( OperationContext* txn, int flag ) {
- if ( ( _userFlags & flag ) == 0 )
- return false;
+void DummyRecordStoreV1MetaData::setLastExtent(OperationContext* txn, const DiskLoc& loc) {
+ _lastExtent = loc;
+}
- _userFlags &= ~flag;
- return true;
+bool DummyRecordStoreV1MetaData::isCapped() const {
+ return _capped;
+}
- }
- bool DummyRecordStoreV1MetaData::replaceUserFlags( OperationContext* txn, int flags ) {
- if ( _userFlags == flags )
- return false;
- _userFlags = flags;
- return true;
- }
+bool DummyRecordStoreV1MetaData::isUserFlagSet(int flag) const {
+ return _userFlags & flag;
+}
+bool DummyRecordStoreV1MetaData::setUserFlag(OperationContext* txn, int flag) {
+ if ((_userFlags & flag) == flag)
+ return false;
- int DummyRecordStoreV1MetaData::lastExtentSize(OperationContext* txn) const {
- return _lastExtentSize;
- }
+ _userFlags |= flag;
+ return true;
+}
+bool DummyRecordStoreV1MetaData::clearUserFlag(OperationContext* txn, int flag) {
+ if ((_userFlags & flag) == 0)
+ return false;
- void DummyRecordStoreV1MetaData::setLastExtentSize( OperationContext* txn, int newMax ) {
- _lastExtentSize = newMax;
- }
+ _userFlags &= ~flag;
+ return true;
+}
+bool DummyRecordStoreV1MetaData::replaceUserFlags(OperationContext* txn, int flags) {
+ if (_userFlags == flags)
+ return false;
+ _userFlags = flags;
+ return true;
+}
- long long DummyRecordStoreV1MetaData::maxCappedDocs() const {
- return _maxCappedDocs;
- }
- // -----------------------------------------
+int DummyRecordStoreV1MetaData::lastExtentSize(OperationContext* txn) const {
+ return _lastExtentSize;
+}
- DummyExtentManager::~DummyExtentManager() {
- for ( size_t i = 0; i < _extents.size(); i++ ) {
- if ( _extents[i].data )
- free( _extents[i].data );
- }
- }
+void DummyRecordStoreV1MetaData::setLastExtentSize(OperationContext* txn, int newMax) {
+ _lastExtentSize = newMax;
+}
- Status DummyExtentManager::init(OperationContext* txn) {
- return Status::OK();
- }
+long long DummyRecordStoreV1MetaData::maxCappedDocs() const {
+ return _maxCappedDocs;
+}
- int DummyExtentManager::numFiles() const {
- return static_cast<int>( _extents.size() );
- }
+// -----------------------------------------
- long long DummyExtentManager::fileSize() const {
- invariant( false );
- return -1;
+DummyExtentManager::~DummyExtentManager() {
+ for (size_t i = 0; i < _extents.size(); i++) {
+ if (_extents[i].data)
+ free(_extents[i].data);
}
+}
- DiskLoc DummyExtentManager::allocateExtent( OperationContext* txn,
- bool capped,
- int size,
- bool enforceQuota ) {
- size = quantizeExtentSize( size );
+Status DummyExtentManager::init(OperationContext* txn) {
+ return Status::OK();
+}
- ExtentInfo info;
- info.data = static_cast<char*>( mongoMalloc( size ) );
- info.length = size;
+int DummyExtentManager::numFiles() const {
+ return static_cast<int>(_extents.size());
+}
- DiskLoc loc( _extents.size(), 0 );
- _extents.push_back( info );
+long long DummyExtentManager::fileSize() const {
+ invariant(false);
+ return -1;
+}
- Extent* e = getExtent( loc, false );
- e->magic = Extent::extentSignature;
- e->myLoc = loc;
- e->xnext.Null();
- e->xprev.Null();
- e->length = size;
- e->firstRecord.Null();
- e->lastRecord.Null();
+DiskLoc DummyExtentManager::allocateExtent(OperationContext* txn,
+ bool capped,
+ int size,
+ bool enforceQuota) {
+ size = quantizeExtentSize(size);
+
+ ExtentInfo info;
+ info.data = static_cast<char*>(mongoMalloc(size));
+ info.length = size;
+
+ DiskLoc loc(_extents.size(), 0);
+ _extents.push_back(info);
+
+ Extent* e = getExtent(loc, false);
+ e->magic = Extent::extentSignature;
+ e->myLoc = loc;
+ e->xnext.Null();
+ e->xprev.Null();
+ e->length = size;
+ e->firstRecord.Null();
+ e->lastRecord.Null();
+
+ return loc;
+}
- return loc;
+void DummyExtentManager::freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt) {
+ // XXX
+}
- }
+void DummyExtentManager::freeExtent(OperationContext* txn, DiskLoc extent) {
+ // XXX
+}
+void DummyExtentManager::freeListStats(OperationContext* txn,
+ int* numExtents,
+ int64_t* totalFreeSizeBytes) const {
+ invariant(false);
+}
- void DummyExtentManager::freeExtents( OperationContext* txn,
- DiskLoc firstExt, DiskLoc lastExt ) {
- // XXX
- }
+std::unique_ptr<RecordFetcher> DummyExtentManager::recordNeedsFetch(const DiskLoc& loc) const {
+ return {};
+}
- void DummyExtentManager::freeExtent( OperationContext* txn, DiskLoc extent ) {
- // XXX
- }
- void DummyExtentManager::freeListStats(OperationContext* txn,
- int* numExtents,
- int64_t* totalFreeSizeBytes) const {
- invariant(false);
- }
+MmapV1RecordHeader* DummyExtentManager::recordForV1(const DiskLoc& loc) const {
+ if (static_cast<size_t>(loc.a()) >= _extents.size())
+ return NULL;
+ if (static_cast<size_t>(loc.getOfs()) >= _extents[loc.a()].length)
+ return NULL;
+ char* root = _extents[loc.a()].data;
+ return reinterpret_cast<MmapV1RecordHeader*>(root + loc.getOfs());
+}
- std::unique_ptr<RecordFetcher> DummyExtentManager::recordNeedsFetch(const DiskLoc& loc) const {
- return {};
- }
+Extent* DummyExtentManager::extentForV1(const DiskLoc& loc) const {
+ invariant(false);
+}
- MmapV1RecordHeader* DummyExtentManager::recordForV1( const DiskLoc& loc ) const {
- if ( static_cast<size_t>( loc.a() ) >= _extents.size() )
- return NULL;
- if ( static_cast<size_t>( loc.getOfs() ) >= _extents[loc.a()].length )
- return NULL;
- char* root = _extents[loc.a()].data;
- return reinterpret_cast<MmapV1RecordHeader*>( root + loc.getOfs() );
- }
+DiskLoc DummyExtentManager::extentLocForV1(const DiskLoc& loc) const {
+ return DiskLoc(loc.a(), 0);
+}
- Extent* DummyExtentManager::extentForV1( const DiskLoc& loc ) const {
- invariant( false );
- }
+Extent* DummyExtentManager::getExtent(const DiskLoc& loc, bool doSanityCheck) const {
+ invariant(!loc.isNull());
+ invariant(static_cast<size_t>(loc.a()) < _extents.size());
+ invariant(loc.getOfs() == 0);
+ Extent* ext = reinterpret_cast<Extent*>(_extents[loc.a()].data);
+ if (doSanityCheck)
+ ext->assertOk();
+ return ext;
+}
- DiskLoc DummyExtentManager::extentLocForV1( const DiskLoc& loc ) const {
- return DiskLoc( loc.a(), 0 );
- }
+int DummyExtentManager::maxSize() const {
+ return 1024 * 1024 * 64;
+}
- Extent* DummyExtentManager::getExtent( const DiskLoc& loc, bool doSanityCheck ) const {
- invariant( !loc.isNull() );
- invariant( static_cast<size_t>( loc.a() ) < _extents.size() );
- invariant( loc.getOfs() == 0 );
- Extent* ext = reinterpret_cast<Extent*>( _extents[loc.a()].data );
- if (doSanityCheck)
- ext->assertOk();
- return ext;
- }
+DummyExtentManager::CacheHint* DummyExtentManager::cacheHint(const DiskLoc& extentLoc,
+ const HintType& hint) {
+ return new CacheHint();
+}
- int DummyExtentManager::maxSize() const {
- return 1024 * 1024 * 64;
- }
+namespace {
+void accumulateExtentSizeRequirements(const LocAndSize* las, std::map<int, size_t>* sizes) {
+ if (!las)
+ return;
- DummyExtentManager::CacheHint* DummyExtentManager::cacheHint( const DiskLoc& extentLoc, const HintType& hint ) {
- return new CacheHint();
- }
+ while (!las->loc.isNull()) {
+ // We require passed in offsets to be > 1000 to leave room for Extent headers.
+ invariant(Extent::HeaderSize() < 1000);
+ invariant(las->loc.getOfs() >= 1000);
-namespace {
- void accumulateExtentSizeRequirements(const LocAndSize* las, std::map<int, size_t>* sizes) {
- if (!las)
- return;
-
- while (!las->loc.isNull()) {
- // We require passed in offsets to be > 1000 to leave room for Extent headers.
- invariant(Extent::HeaderSize() < 1000);
- invariant(las->loc.getOfs() >= 1000);
-
- const size_t end = las->loc.getOfs() + las->size;
- size_t& sizeNeeded = (*sizes)[las->loc.a()];
- sizeNeeded = std::max(sizeNeeded, end);
- las++;
- }
+ const size_t end = las->loc.getOfs() + las->size;
+ size_t& sizeNeeded = (*sizes)[las->loc.a()];
+ sizeNeeded = std::max(sizeNeeded, end);
+ las++;
}
+}
- void printRecList(OperationContext* txn,
- const ExtentManager* em,
- const RecordStoreV1MetaData* md) {
- log() << " *** BEGIN ACTUAL RECORD LIST *** ";
- DiskLoc extLoc = md->firstExtent(txn);
- std::set<DiskLoc> seenLocs;
- while (!extLoc.isNull()) {
- Extent* ext = em->getExtent(extLoc, true);
- DiskLoc actualLoc = ext->firstRecord;
- while (!actualLoc.isNull()) {
- const MmapV1RecordHeader* actualRec = em->recordForV1(actualLoc);
- const int actualSize = actualRec->lengthWithHeaders();
-
- log() << "loc: " << actualLoc // <--hex
- << " (" << actualLoc.getOfs() << ")"
- << " size: " << actualSize
- << " prev: " << actualRec->prevOfs()
- << " next: " << actualRec->nextOfs()
- << (actualLoc == md->capFirstNewRecord() ? " (CAP_FIRST_NEW)" : "")
- ;
-
- const bool foundCycle = !seenLocs.insert(actualLoc).second;
- invariant(!foundCycle);
-
- const int nextOfs = actualRec->nextOfs();
- actualLoc = (nextOfs == DiskLoc::NullOfs ? DiskLoc()
- : DiskLoc(actualLoc.a(), nextOfs));
- }
- extLoc = ext->xnext;
+void printRecList(OperationContext* txn, const ExtentManager* em, const RecordStoreV1MetaData* md) {
+ log() << " *** BEGIN ACTUAL RECORD LIST *** ";
+ DiskLoc extLoc = md->firstExtent(txn);
+ std::set<DiskLoc> seenLocs;
+ while (!extLoc.isNull()) {
+ Extent* ext = em->getExtent(extLoc, true);
+ DiskLoc actualLoc = ext->firstRecord;
+ while (!actualLoc.isNull()) {
+ const MmapV1RecordHeader* actualRec = em->recordForV1(actualLoc);
+ const int actualSize = actualRec->lengthWithHeaders();
+
+ log() << "loc: " << actualLoc // <--hex
+ << " (" << actualLoc.getOfs() << ")"
+ << " size: " << actualSize << " prev: " << actualRec->prevOfs()
+ << " next: " << actualRec->nextOfs()
+ << (actualLoc == md->capFirstNewRecord() ? " (CAP_FIRST_NEW)" : "");
+
+ const bool foundCycle = !seenLocs.insert(actualLoc).second;
+ invariant(!foundCycle);
+
+ const int nextOfs = actualRec->nextOfs();
+ actualLoc = (nextOfs == DiskLoc::NullOfs ? DiskLoc() : DiskLoc(actualLoc.a(), nextOfs));
}
- log() << " *** END ACTUAL RECORD LIST *** ";
+ extLoc = ext->xnext;
}
+ log() << " *** END ACTUAL RECORD LIST *** ";
+}
- void printDRecList(const ExtentManager* em, const RecordStoreV1MetaData* md) {
- log() << " *** BEGIN ACTUAL DELETED RECORD LIST *** ";
- std::set<DiskLoc> seenLocs;
- for (int bucketIdx = 0; bucketIdx < RecordStoreV1Base::Buckets; bucketIdx++) {
- DiskLoc actualLoc = md->deletedListEntry(bucketIdx);
- while (!actualLoc.isNull()) {
- const DeletedRecord* actualDrec = &em->recordForV1(actualLoc)->asDeleted();
- const int actualSize = actualDrec->lengthWithHeaders();
+void printDRecList(const ExtentManager* em, const RecordStoreV1MetaData* md) {
+ log() << " *** BEGIN ACTUAL DELETED RECORD LIST *** ";
+ std::set<DiskLoc> seenLocs;
+ for (int bucketIdx = 0; bucketIdx < RecordStoreV1Base::Buckets; bucketIdx++) {
+ DiskLoc actualLoc = md->deletedListEntry(bucketIdx);
+ while (!actualLoc.isNull()) {
+ const DeletedRecord* actualDrec = &em->recordForV1(actualLoc)->asDeleted();
+ const int actualSize = actualDrec->lengthWithHeaders();
- log() << "loc: " << actualLoc // <--hex
- << " (" << actualLoc.getOfs() << ")"
- << " size: " << actualSize
- << " bucket: " << bucketIdx
- << " next: " << actualDrec->nextDeleted();
+ log() << "loc: " << actualLoc // <--hex
+ << " (" << actualLoc.getOfs() << ")"
+ << " size: " << actualSize << " bucket: " << bucketIdx
+ << " next: " << actualDrec->nextDeleted();
- const bool foundCycle = !seenLocs.insert(actualLoc).second;
- invariant(!foundCycle);
+ const bool foundCycle = !seenLocs.insert(actualLoc).second;
+ invariant(!foundCycle);
- actualLoc = actualDrec->nextDeleted();
- }
-
- // Only print bucket 0 in capped collections since it contains all deleted records
- if (md->isCapped())
- break;
+ actualLoc = actualDrec->nextDeleted();
}
- log() << " *** END ACTUAL DELETED RECORD LIST *** ";
+
+ // Only print bucket 0 in capped collections since it contains all deleted records
+ if (md->isCapped())
+ break;
}
+ log() << " *** END ACTUAL DELETED RECORD LIST *** ";
+}
}
- void initializeV1RS(OperationContext* txn,
- const LocAndSize* records,
- const LocAndSize* drecs,
- const LocAndSize* legacyGrabBag,
- DummyExtentManager* em,
- DummyRecordStoreV1MetaData* md) {
- invariant(records || drecs); // if both are NULL nothing is being created...
-
- // Need to start with a blank slate
- invariant(em->numFiles() == 0);
- invariant(md->firstExtent(txn).isNull());
-
- // pre-allocate extents (even extents that aren't part of this RS)
- {
- typedef std::map<int, size_t> ExtentSizes;
- ExtentSizes extentSizes;
- accumulateExtentSizeRequirements(records, &extentSizes);
- accumulateExtentSizeRequirements(drecs, &extentSizes);
- accumulateExtentSizeRequirements(legacyGrabBag, &extentSizes);
- invariant(!extentSizes.empty());
-
- const int maxExtent = extentSizes.rbegin()->first;
- for (int i = 0; i <= maxExtent; i++) {
- const size_t size = extentSizes.count(i) ? extentSizes[i] : 0;
- const DiskLoc loc = em->allocateExtent(txn, md->isCapped(), size, 0);
-
- // This function and assertState depend on these details of DummyExtentManager
- invariant(loc.a() == i);
- invariant(loc.getOfs() == 0);
- }
-
- // link together extents that should be part of this RS
- md->setFirstExtent(txn, DiskLoc(extentSizes.begin()->first, 0));
- md->setLastExtent(txn, DiskLoc(extentSizes.rbegin()->first, 0));
- for (ExtentSizes::iterator it = extentSizes.begin();
- boost::next(it) != extentSizes.end(); /* ++it */ ) {
- const int a = it->first;
- ++it;
- const int b = it->first;
- em->getExtent(DiskLoc(a, 0))->xnext = DiskLoc(b, 0);
- em->getExtent(DiskLoc(b, 0))->xprev = DiskLoc(a, 0);
- }
+void initializeV1RS(OperationContext* txn,
+ const LocAndSize* records,
+ const LocAndSize* drecs,
+ const LocAndSize* legacyGrabBag,
+ DummyExtentManager* em,
+ DummyRecordStoreV1MetaData* md) {
+ invariant(records || drecs); // if both are NULL nothing is being created...
+
+ // Need to start with a blank slate
+ invariant(em->numFiles() == 0);
+ invariant(md->firstExtent(txn).isNull());
+
+ // pre-allocate extents (even extents that aren't part of this RS)
+ {
+ typedef std::map<int, size_t> ExtentSizes;
+ ExtentSizes extentSizes;
+ accumulateExtentSizeRequirements(records, &extentSizes);
+ accumulateExtentSizeRequirements(drecs, &extentSizes);
+ accumulateExtentSizeRequirements(legacyGrabBag, &extentSizes);
+ invariant(!extentSizes.empty());
+
+ const int maxExtent = extentSizes.rbegin()->first;
+ for (int i = 0; i <= maxExtent; i++) {
+ const size_t size = extentSizes.count(i) ? extentSizes[i] : 0;
+ const DiskLoc loc = em->allocateExtent(txn, md->isCapped(), size, 0);
+
+ // This function and assertState depend on these details of DummyExtentManager
+ invariant(loc.a() == i);
+ invariant(loc.getOfs() == 0);
+ }
- // This signals "done allocating new extents".
- if (md->isCapped())
- md->setDeletedListEntry(txn, 1, DiskLoc());
+ // link together extents that should be part of this RS
+ md->setFirstExtent(txn, DiskLoc(extentSizes.begin()->first, 0));
+ md->setLastExtent(txn, DiskLoc(extentSizes.rbegin()->first, 0));
+ for (ExtentSizes::iterator it = extentSizes.begin(); boost::next(it) != extentSizes.end();
+ /* ++it */) {
+ const int a = it->first;
+ ++it;
+ const int b = it->first;
+ em->getExtent(DiskLoc(a, 0))->xnext = DiskLoc(b, 0);
+ em->getExtent(DiskLoc(b, 0))->xprev = DiskLoc(a, 0);
}
- if (records && !records[0].loc.isNull()) {
- int recIdx = 0;
- DiskLoc extLoc = md->firstExtent(txn);
- while (!extLoc.isNull()) {
- Extent* ext = em->getExtent(extLoc);
- int prevOfs = DiskLoc::NullOfs;
- while (extLoc.a() == records[recIdx].loc.a()) { // for all records in this extent
- const DiskLoc loc = records[recIdx].loc;
- const int size = records[recIdx].size;;
- invariant(size >= MmapV1RecordHeader::HeaderSize);
+ // This signals "done allocating new extents".
+ if (md->isCapped())
+ md->setDeletedListEntry(txn, 1, DiskLoc());
+ }
- md->incrementStats(txn, size - MmapV1RecordHeader::HeaderSize, 1);
+ if (records && !records[0].loc.isNull()) {
+ int recIdx = 0;
+ DiskLoc extLoc = md->firstExtent(txn);
+ while (!extLoc.isNull()) {
+ Extent* ext = em->getExtent(extLoc);
+ int prevOfs = DiskLoc::NullOfs;
+ while (extLoc.a() == records[recIdx].loc.a()) { // for all records in this extent
+ const DiskLoc loc = records[recIdx].loc;
+ const int size = records[recIdx].size;
+ ;
+ invariant(size >= MmapV1RecordHeader::HeaderSize);
- if (ext->firstRecord.isNull())
- ext->firstRecord = loc;
+ md->incrementStats(txn, size - MmapV1RecordHeader::HeaderSize, 1);
- MmapV1RecordHeader* rec = em->recordForV1(loc);
- rec->lengthWithHeaders() = size;
- rec->extentOfs() = 0;
+ if (ext->firstRecord.isNull())
+ ext->firstRecord = loc;
- rec->prevOfs() = prevOfs;
- prevOfs = loc.getOfs();
+ MmapV1RecordHeader* rec = em->recordForV1(loc);
+ rec->lengthWithHeaders() = size;
+ rec->extentOfs() = 0;
- const DiskLoc nextLoc = records[recIdx + 1].loc;
- if (nextLoc.a() == loc.a()) { // if next is in same extent
- rec->nextOfs() = nextLoc.getOfs();
- }
- else {
- rec->nextOfs() = DiskLoc::NullOfs;
- ext->lastRecord = loc;
- }
+ rec->prevOfs() = prevOfs;
+ prevOfs = loc.getOfs();
- recIdx++;
+ const DiskLoc nextLoc = records[recIdx + 1].loc;
+ if (nextLoc.a() == loc.a()) { // if next is in same extent
+ rec->nextOfs() = nextLoc.getOfs();
+ } else {
+ rec->nextOfs() = DiskLoc::NullOfs;
+ ext->lastRecord = loc;
}
- extLoc = ext->xnext;
+
+ recIdx++;
}
- invariant(records[recIdx].loc.isNull());
+ extLoc = ext->xnext;
}
-
- if (drecs && !drecs[0].loc.isNull()) {
- int drecIdx = 0;
- DiskLoc* prevNextPtr = NULL;
- int lastBucket = -1;
- while (!drecs[drecIdx].loc.isNull()) {
- const DiskLoc loc = drecs[drecIdx].loc;
- const int size = drecs[drecIdx].size;
- invariant(size >= MmapV1RecordHeader::HeaderSize);
- const int bucket = RecordStoreV1Base::bucket(size);
-
- if (md->isCapped()) {
- // All drecs form a single list in bucket 0
- if (prevNextPtr == NULL) {
- md->setDeletedListEntry(txn, 0, loc);
- }
- else {
- *prevNextPtr = loc;
- }
-
- if (loc.a() < md->capExtent().a()
- && drecs[drecIdx + 1].loc.a() == md->capExtent().a()) {
- // Bucket 1 is known as cappedLastDelRecLastExtent
- md->setDeletedListEntry(txn, 1, loc);
- }
- }
- else if (bucket != lastBucket) {
- invariant(bucket > lastBucket); // if this fails, drecs weren't sorted by bucket
- md->setDeletedListEntry(txn, bucket, loc);
- lastBucket = bucket;
- }
- else {
+ invariant(records[recIdx].loc.isNull());
+ }
+
+ if (drecs && !drecs[0].loc.isNull()) {
+ int drecIdx = 0;
+ DiskLoc* prevNextPtr = NULL;
+ int lastBucket = -1;
+ while (!drecs[drecIdx].loc.isNull()) {
+ const DiskLoc loc = drecs[drecIdx].loc;
+ const int size = drecs[drecIdx].size;
+ invariant(size >= MmapV1RecordHeader::HeaderSize);
+ const int bucket = RecordStoreV1Base::bucket(size);
+
+ if (md->isCapped()) {
+ // All drecs form a single list in bucket 0
+ if (prevNextPtr == NULL) {
+ md->setDeletedListEntry(txn, 0, loc);
+ } else {
*prevNextPtr = loc;
}
- DeletedRecord* drec = &em->recordForV1(loc)->asDeleted();
- drec->lengthWithHeaders() = size;
- drec->extentOfs() = 0;
- drec->nextDeleted() = DiskLoc();
- prevNextPtr = &drec->nextDeleted();
-
- drecIdx++;
+ if (loc.a() < md->capExtent().a() &&
+ drecs[drecIdx + 1].loc.a() == md->capExtent().a()) {
+ // Bucket 1 is known as cappedLastDelRecLastExtent
+ md->setDeletedListEntry(txn, 1, loc);
+ }
+ } else if (bucket != lastBucket) {
+ invariant(bucket > lastBucket); // if this fails, drecs weren't sorted by bucket
+ md->setDeletedListEntry(txn, bucket, loc);
+ lastBucket = bucket;
+ } else {
+ *prevNextPtr = loc;
}
- }
- if (legacyGrabBag && !legacyGrabBag[0].loc.isNull()) {
- invariant(!md->isCapped()); // capped should have an empty legacy grab bag.
+ DeletedRecord* drec = &em->recordForV1(loc)->asDeleted();
+ drec->lengthWithHeaders() = size;
+ drec->extentOfs() = 0;
+ drec->nextDeleted() = DiskLoc();
+ prevNextPtr = &drec->nextDeleted();
- int grabBagIdx = 0;
- DiskLoc* prevNextPtr = NULL;
- while (!legacyGrabBag[grabBagIdx].loc.isNull()) {
- const DiskLoc loc = legacyGrabBag[grabBagIdx].loc;
- const int size = legacyGrabBag[grabBagIdx].size;
- invariant(size >= MmapV1RecordHeader::HeaderSize);
+ drecIdx++;
+ }
+ }
- if (grabBagIdx == 0) {
- md->setDeletedListLegacyGrabBag(txn, loc);
- }
- else {
- *prevNextPtr = loc;
- }
+ if (legacyGrabBag && !legacyGrabBag[0].loc.isNull()) {
+ invariant(!md->isCapped()); // capped should have an empty legacy grab bag.
- DeletedRecord* drec = &em->recordForV1(loc)->asDeleted();
- drec->lengthWithHeaders() = size;
- drec->extentOfs() = 0;
- drec->nextDeleted() = DiskLoc();
- prevNextPtr = &drec->nextDeleted();
+ int grabBagIdx = 0;
+ DiskLoc* prevNextPtr = NULL;
+ while (!legacyGrabBag[grabBagIdx].loc.isNull()) {
+ const DiskLoc loc = legacyGrabBag[grabBagIdx].loc;
+ const int size = legacyGrabBag[grabBagIdx].size;
+ invariant(size >= MmapV1RecordHeader::HeaderSize);
- grabBagIdx++;
+ if (grabBagIdx == 0) {
+ md->setDeletedListLegacyGrabBag(txn, loc);
+ } else {
+ *prevNextPtr = loc;
}
- }
- // Make sure we set everything up as requested.
- assertStateV1RS(txn, records, drecs, legacyGrabBag, em, md);
+ DeletedRecord* drec = &em->recordForV1(loc)->asDeleted();
+ drec->lengthWithHeaders() = size;
+ drec->extentOfs() = 0;
+ drec->nextDeleted() = DiskLoc();
+ prevNextPtr = &drec->nextDeleted();
+
+ grabBagIdx++;
+ }
}
- void assertStateV1RS(OperationContext* txn,
- const LocAndSize* records,
- const LocAndSize* drecs,
- const LocAndSize* legacyGrabBag,
- const ExtentManager* em,
- const DummyRecordStoreV1MetaData* md) {
- invariant(records || drecs); // if both are NULL nothing is being asserted...
-
- try {
- if (records) {
- long long dataSize = 0;
- long long numRecs = 0;
-
- int recIdx = 0;
-
- DiskLoc extLoc = md->firstExtent(txn);
- while (!extLoc.isNull()) { // for each Extent
- Extent* ext = em->getExtent(extLoc, true);
- int expectedPrevOfs = DiskLoc::NullOfs;
- DiskLoc actualLoc = ext->firstRecord;
- while (!actualLoc.isNull()) { // for each MmapV1RecordHeader in this Extent
- const MmapV1RecordHeader* actualRec = em->recordForV1(actualLoc);
- const int actualSize = actualRec->lengthWithHeaders();
-
- dataSize += actualSize - MmapV1RecordHeader::HeaderSize;
- numRecs += 1;
-
- ASSERT_EQUALS(actualLoc, records[recIdx].loc);
- ASSERT_EQUALS(actualSize, records[recIdx].size);
-
- ASSERT_EQUALS(actualRec->extentOfs(), extLoc.getOfs());
- ASSERT_EQUALS(actualRec->prevOfs(), expectedPrevOfs);
- expectedPrevOfs = actualLoc.getOfs();
-
- recIdx++;
- const int nextOfs = actualRec->nextOfs();
- actualLoc = (nextOfs == DiskLoc::NullOfs ? DiskLoc()
- : DiskLoc(actualLoc.a(), nextOfs));
- }
+ // Make sure we set everything up as requested.
+ assertStateV1RS(txn, records, drecs, legacyGrabBag, em, md);
+}
- if (ext->xnext.isNull()) {
- ASSERT_EQUALS(md->lastExtent(txn), extLoc);
- }
+void assertStateV1RS(OperationContext* txn,
+ const LocAndSize* records,
+ const LocAndSize* drecs,
+ const LocAndSize* legacyGrabBag,
+ const ExtentManager* em,
+ const DummyRecordStoreV1MetaData* md) {
+ invariant(records || drecs); // if both are NULL nothing is being asserted...
- extLoc = ext->xnext;
- }
+ try {
+ if (records) {
+ long long dataSize = 0;
+ long long numRecs = 0;
- // both the expected and actual record lists must be done at this point
- ASSERT_EQUALS(records[recIdx].loc, DiskLoc());
+ int recIdx = 0;
- ASSERT_EQUALS(dataSize, md->dataSize());
- ASSERT_EQUALS(numRecs, md->numRecords());
- }
+ DiskLoc extLoc = md->firstExtent(txn);
+ while (!extLoc.isNull()) { // for each Extent
+ Extent* ext = em->getExtent(extLoc, true);
+ int expectedPrevOfs = DiskLoc::NullOfs;
+ DiskLoc actualLoc = ext->firstRecord;
+ while (!actualLoc.isNull()) { // for each MmapV1RecordHeader in this Extent
+ const MmapV1RecordHeader* actualRec = em->recordForV1(actualLoc);
+ const int actualSize = actualRec->lengthWithHeaders();
- if (drecs) {
- int drecIdx = 0;
- for (int bucketIdx = 0; bucketIdx < RecordStoreV1Base::Buckets; bucketIdx++) {
- DiskLoc actualLoc = md->deletedListEntry(bucketIdx);
-
- if (md->isCapped() && bucketIdx == 1) {
- // In capped collections, the 2nd bucket (index 1) points to the drec before
- // the first drec in the capExtent. If the capExtent is the first Extent,
- // it should be Null.
-
- if (md->capExtent() == md->firstExtent(txn)) {
- ASSERT_EQUALS(actualLoc, DiskLoc());
- }
- else {
- ASSERT_NOT_EQUALS(actualLoc.a(), md->capExtent().a());
- const DeletedRecord* actualDrec =
- &em->recordForV1(actualLoc)->asDeleted();
- ASSERT_EQUALS(actualDrec->nextDeleted().a(), md->capExtent().a());
- }
-
- // Don't do normal checking of bucket 1 in capped collections. Checking
- // other buckets to verify that they are Null.
- continue;
- }
+ dataSize += actualSize - MmapV1RecordHeader::HeaderSize;
+ numRecs += 1;
- while (!actualLoc.isNull()) {
- const DeletedRecord* actualDrec = &em->recordForV1(actualLoc)->asDeleted();
- const int actualSize = actualDrec->lengthWithHeaders();
+ ASSERT_EQUALS(actualLoc, records[recIdx].loc);
+ ASSERT_EQUALS(actualSize, records[recIdx].size);
+
+ ASSERT_EQUALS(actualRec->extentOfs(), extLoc.getOfs());
+ ASSERT_EQUALS(actualRec->prevOfs(), expectedPrevOfs);
+ expectedPrevOfs = actualLoc.getOfs();
+
+ recIdx++;
+ const int nextOfs = actualRec->nextOfs();
+ actualLoc =
+ (nextOfs == DiskLoc::NullOfs ? DiskLoc() : DiskLoc(actualLoc.a(), nextOfs));
+ }
+
+ if (ext->xnext.isNull()) {
+ ASSERT_EQUALS(md->lastExtent(txn), extLoc);
+ }
- ASSERT_EQUALS(actualLoc, drecs[drecIdx].loc);
- ASSERT_EQUALS(actualSize, drecs[drecIdx].size);
+ extLoc = ext->xnext;
+ }
- // Make sure the drec is correct
- ASSERT_EQUALS(actualDrec->extentOfs(), 0);
+ // both the expected and actual record lists must be done at this point
+ ASSERT_EQUALS(records[recIdx].loc, DiskLoc());
- // in capped collections all drecs are linked into a single list in bucket 0
- ASSERT_EQUALS(bucketIdx, md->isCapped()
- ? 0
- : RecordStoreV1Base::bucket(actualSize));
+ ASSERT_EQUALS(dataSize, md->dataSize());
+ ASSERT_EQUALS(numRecs, md->numRecords());
+ }
- drecIdx++;
- actualLoc = actualDrec->nextDeleted();
+ if (drecs) {
+ int drecIdx = 0;
+ for (int bucketIdx = 0; bucketIdx < RecordStoreV1Base::Buckets; bucketIdx++) {
+ DiskLoc actualLoc = md->deletedListEntry(bucketIdx);
+
+ if (md->isCapped() && bucketIdx == 1) {
+ // In capped collections, the 2nd bucket (index 1) points to the drec before
+ // the first drec in the capExtent. If the capExtent is the first Extent,
+ // it should be Null.
+
+ if (md->capExtent() == md->firstExtent(txn)) {
+ ASSERT_EQUALS(actualLoc, DiskLoc());
+ } else {
+ ASSERT_NOT_EQUALS(actualLoc.a(), md->capExtent().a());
+ const DeletedRecord* actualDrec = &em->recordForV1(actualLoc)->asDeleted();
+ ASSERT_EQUALS(actualDrec->nextDeleted().a(), md->capExtent().a());
}
+
+ // Don't do normal checking of bucket 1 in capped collections. Checking
+ // other buckets to verify that they are Null.
+ continue;
}
- // both the expected and actual deleted lists must be done at this point
- ASSERT_EQUALS(drecs[drecIdx].loc, DiskLoc());
- }
- if (legacyGrabBag) {
- int grabBagIdx = 0;
- DiskLoc actualLoc = md->deletedListLegacyGrabBag();
while (!actualLoc.isNull()) {
const DeletedRecord* actualDrec = &em->recordForV1(actualLoc)->asDeleted();
const int actualSize = actualDrec->lengthWithHeaders();
- ASSERT_EQUALS(actualLoc, legacyGrabBag[grabBagIdx].loc);
- ASSERT_EQUALS(actualSize, legacyGrabBag[grabBagIdx].size);
+ ASSERT_EQUALS(actualLoc, drecs[drecIdx].loc);
+ ASSERT_EQUALS(actualSize, drecs[drecIdx].size);
+
+ // Make sure the drec is correct
+ ASSERT_EQUALS(actualDrec->extentOfs(), 0);
+
+ // in capped collections all drecs are linked into a single list in bucket 0
+ ASSERT_EQUALS(bucketIdx,
+ md->isCapped() ? 0 : RecordStoreV1Base::bucket(actualSize));
- grabBagIdx++;
+ drecIdx++;
actualLoc = actualDrec->nextDeleted();
}
-
- // both the expected and actual deleted lists must be done at this point
- ASSERT_EQUALS(legacyGrabBag[grabBagIdx].loc, DiskLoc());
- }
- else {
- // Unless a test is actually using the grabBag it should be empty
- ASSERT_EQUALS(md->deletedListLegacyGrabBag(), DiskLoc());
}
+ // both the expected and actual deleted lists must be done at this point
+ ASSERT_EQUALS(drecs[drecIdx].loc, DiskLoc());
}
- catch (...) {
- // If a test fails, provide extra info to make debugging easier
- printRecList(txn, em, md);
- printDRecList(em, md);
- throw;
+
+ if (legacyGrabBag) {
+ int grabBagIdx = 0;
+ DiskLoc actualLoc = md->deletedListLegacyGrabBag();
+ while (!actualLoc.isNull()) {
+ const DeletedRecord* actualDrec = &em->recordForV1(actualLoc)->asDeleted();
+ const int actualSize = actualDrec->lengthWithHeaders();
+
+ ASSERT_EQUALS(actualLoc, legacyGrabBag[grabBagIdx].loc);
+ ASSERT_EQUALS(actualSize, legacyGrabBag[grabBagIdx].size);
+
+ grabBagIdx++;
+ actualLoc = actualDrec->nextDeleted();
+ }
+
+ // both the expected and actual deleted lists must be done at this point
+ ASSERT_EQUALS(legacyGrabBag[grabBagIdx].loc, DiskLoc());
+ } else {
+ // Unless a test is actually using the grabBag it should be empty
+ ASSERT_EQUALS(md->deletedListLegacyGrabBag(), DiskLoc());
}
+ } catch (...) {
+ // If a test fails, provide extra info to make debugging easier
+ printRecList(txn, em, md);
+ printDRecList(em, md);
+ throw;
}
}
+}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
index f37969c1ca6..0a038f9e9f3 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.h
@@ -37,169 +37,162 @@
namespace mongo {
- class DummyRecordStoreV1MetaData : public RecordStoreV1MetaData {
- public:
- DummyRecordStoreV1MetaData( bool capped, int userFlags );
- virtual ~DummyRecordStoreV1MetaData(){}
+class DummyRecordStoreV1MetaData : public RecordStoreV1MetaData {
+public:
+ DummyRecordStoreV1MetaData(bool capped, int userFlags);
+ virtual ~DummyRecordStoreV1MetaData() {}
- virtual const DiskLoc& capExtent() const;
- virtual void setCapExtent( OperationContext* txn, const DiskLoc& loc );
+ virtual const DiskLoc& capExtent() const;
+ virtual void setCapExtent(OperationContext* txn, const DiskLoc& loc);
- virtual const DiskLoc& capFirstNewRecord() const;
- virtual void setCapFirstNewRecord( OperationContext* txn, const DiskLoc& loc );
+ virtual const DiskLoc& capFirstNewRecord() const;
+ virtual void setCapFirstNewRecord(OperationContext* txn, const DiskLoc& loc);
- virtual long long dataSize() const;
- virtual long long numRecords() const;
+ virtual long long dataSize() const;
+ virtual long long numRecords() const;
- virtual void incrementStats( OperationContext* txn,
- long long dataSizeIncrement,
- long long numRecordsIncrement );
+ virtual void incrementStats(OperationContext* txn,
+ long long dataSizeIncrement,
+ long long numRecordsIncrement);
- virtual void setStats( OperationContext* txn,
- long long dataSize,
- long long numRecords );
+ virtual void setStats(OperationContext* txn, long long dataSize, long long numRecords);
- virtual DiskLoc deletedListEntry( int bucket ) const;
- virtual void setDeletedListEntry( OperationContext* txn,
- int bucket,
- const DiskLoc& loc );
+ virtual DiskLoc deletedListEntry(int bucket) const;
+ virtual void setDeletedListEntry(OperationContext* txn, int bucket, const DiskLoc& loc);
- virtual DiskLoc deletedListLegacyGrabBag() const;
- virtual void setDeletedListLegacyGrabBag(OperationContext* txn, const DiskLoc& loc);
+ virtual DiskLoc deletedListLegacyGrabBag() const;
+ virtual void setDeletedListLegacyGrabBag(OperationContext* txn, const DiskLoc& loc);
- virtual void orphanDeletedList(OperationContext* txn);
+ virtual void orphanDeletedList(OperationContext* txn);
- virtual const DiskLoc& firstExtent( OperationContext* txn ) const;
- virtual void setFirstExtent( OperationContext* txn, const DiskLoc& loc );
+ virtual const DiskLoc& firstExtent(OperationContext* txn) const;
+ virtual void setFirstExtent(OperationContext* txn, const DiskLoc& loc);
- virtual const DiskLoc& lastExtent( OperationContext* txn ) const;
- virtual void setLastExtent( OperationContext* txn, const DiskLoc& loc );
+ virtual const DiskLoc& lastExtent(OperationContext* txn) const;
+ virtual void setLastExtent(OperationContext* txn, const DiskLoc& loc);
- virtual bool isCapped() const;
+ virtual bool isCapped() const;
- virtual bool isUserFlagSet( int flag ) const;
- virtual int userFlags() const { return _userFlags; }
- virtual bool setUserFlag( OperationContext* txn, int flag );
- virtual bool clearUserFlag( OperationContext* txn, int flag );
- virtual bool replaceUserFlags( OperationContext* txn, int flags );
+ virtual bool isUserFlagSet(int flag) const;
+ virtual int userFlags() const {
+ return _userFlags;
+ }
+ virtual bool setUserFlag(OperationContext* txn, int flag);
+ virtual bool clearUserFlag(OperationContext* txn, int flag);
+ virtual bool replaceUserFlags(OperationContext* txn, int flags);
- virtual int lastExtentSize( OperationContext* txn ) const;
- virtual void setLastExtentSize( OperationContext* txn, int newMax );
+ virtual int lastExtentSize(OperationContext* txn) const;
+ virtual void setLastExtentSize(OperationContext* txn, int newMax);
- virtual long long maxCappedDocs() const;
+ virtual long long maxCappedDocs() const;
- protected:
+protected:
+ DiskLoc _capExtent;
+ DiskLoc _capFirstNewRecord;
- DiskLoc _capExtent;
- DiskLoc _capFirstNewRecord;
+ long long _dataSize;
+ long long _numRecords;
- long long _dataSize;
- long long _numRecords;
+ DiskLoc _firstExtent;
+ DiskLoc _lastExtent;
- DiskLoc _firstExtent;
- DiskLoc _lastExtent;
+ bool _capped;
+ int _userFlags;
+ long long _maxCappedDocs;
- bool _capped;
- int _userFlags;
- long long _maxCappedDocs;
+ int _lastExtentSize;
+ double _paddingFactor;
- int _lastExtentSize;
- double _paddingFactor;
+ std::vector<DiskLoc> _deletedLists;
+ DiskLoc _deletedListLegacyGrabBag;
+};
- std::vector<DiskLoc> _deletedLists;
- DiskLoc _deletedListLegacyGrabBag;
- };
+class DummyExtentManager : public ExtentManager {
+public:
+ virtual ~DummyExtentManager();
- class DummyExtentManager : public ExtentManager {
- public:
- virtual ~DummyExtentManager();
+ virtual Status init(OperationContext* txn);
- virtual Status init(OperationContext* txn);
+ virtual int numFiles() const;
+ virtual long long fileSize() const;
- virtual int numFiles() const;
- virtual long long fileSize() const;
+ virtual DiskLoc allocateExtent(OperationContext* txn, bool capped, int size, bool enforceQuota);
- virtual DiskLoc allocateExtent( OperationContext* txn,
- bool capped,
- int size,
- bool enforceQuota );
+ virtual void freeExtents(OperationContext* txn, DiskLoc firstExt, DiskLoc lastExt);
- virtual void freeExtents( OperationContext* txn,
- DiskLoc firstExt, DiskLoc lastExt );
+ virtual void freeExtent(OperationContext* txn, DiskLoc extent);
- virtual void freeExtent( OperationContext* txn, DiskLoc extent );
+ virtual void freeListStats(OperationContext* txn,
+ int* numExtents,
+ int64_t* totalFreeSizeBytes) const;
- virtual void freeListStats(OperationContext* txn,
- int* numExtents,
- int64_t* totalFreeSizeBytes) const;
+ virtual MmapV1RecordHeader* recordForV1(const DiskLoc& loc) const;
- virtual MmapV1RecordHeader* recordForV1( const DiskLoc& loc ) const;
+ virtual std::unique_ptr<RecordFetcher> recordNeedsFetch(const DiskLoc& loc) const final;
- virtual std::unique_ptr<RecordFetcher> recordNeedsFetch( const DiskLoc& loc ) const final;
+ virtual Extent* extentForV1(const DiskLoc& loc) const;
- virtual Extent* extentForV1( const DiskLoc& loc ) const;
+ virtual DiskLoc extentLocForV1(const DiskLoc& loc) const;
- virtual DiskLoc extentLocForV1( const DiskLoc& loc ) const;
+ virtual Extent* getExtent(const DiskLoc& loc, bool doSanityCheck = true) const;
- virtual Extent* getExtent( const DiskLoc& loc, bool doSanityCheck = true ) const;
+ virtual int maxSize() const;
- virtual int maxSize() const;
+ virtual CacheHint* cacheHint(const DiskLoc& extentLoc, const HintType& hint);
- virtual CacheHint* cacheHint( const DiskLoc& extentLoc, const HintType& hint );
+protected:
+ struct ExtentInfo {
+ char* data;
+ size_t length;
+ };
- protected:
- struct ExtentInfo {
- char* data;
- size_t length;
- };
+ std::vector<ExtentInfo> _extents;
+};
- std::vector<ExtentInfo> _extents;
- };
-
- struct LocAndSize {
- DiskLoc loc;
- int size; // with headers
- };
+struct LocAndSize {
+ DiskLoc loc;
+ int size; // with headers
+};
- /**
- * Creates a V1 storage/mmap_v1 with the passed in records and DeletedRecords (drecs).
- *
- * List of LocAndSize are terminated by a Null DiskLoc. Passing a NULL pointer is shorthand for
- * an empty list. Each extent gets it's own DiskLoc file number. DiskLoc Offsets must be > 1000.
- *
- * records must be sorted by extent/file. offsets within an extent can be in any order.
- *
- * In a simple RS, drecs must be grouped into size-buckets, but the ordering within the size
- * buckets is up to you.
- *
- * In a capped collection, all drecs form a single list and must be grouped by extent, with each
- * extent having at least one drec. capFirstNewRecord() and capExtent() *must* be correctly set
- * on md before calling.
- *
- * You are responsible for ensuring the records and drecs don't overlap.
- *
- * ExtentManager and MetaData must both be empty.
- */
- void initializeV1RS(OperationContext* txn,
- const LocAndSize* records,
- const LocAndSize* drecs,
- const LocAndSize* legacyGrabBag,
- DummyExtentManager* em,
- DummyRecordStoreV1MetaData* md);
-
- /**
- * Asserts that the V1RecordStore defined by md has the passed in records and drecs in the
- * correct order.
- *
- * List of LocAndSize are terminated by a Null DiskLoc. Passing a NULL pointer means don't check
- * that list.
- */
- void assertStateV1RS(OperationContext* txn,
- const LocAndSize* records,
- const LocAndSize* drecs,
- const LocAndSize* legacyGrabBag,
- const ExtentManager* em,
- const DummyRecordStoreV1MetaData* md);
+/**
+ * Creates a V1 storage/mmap_v1 with the passed in records and DeletedRecords (drecs).
+ *
+ * List of LocAndSize are terminated by a Null DiskLoc. Passing a NULL pointer is shorthand for
+ * an empty list. Each extent gets it's own DiskLoc file number. DiskLoc Offsets must be > 1000.
+ *
+ * records must be sorted by extent/file. offsets within an extent can be in any order.
+ *
+ * In a simple RS, drecs must be grouped into size-buckets, but the ordering within the size
+ * buckets is up to you.
+ *
+ * In a capped collection, all drecs form a single list and must be grouped by extent, with each
+ * extent having at least one drec. capFirstNewRecord() and capExtent() *must* be correctly set
+ * on md before calling.
+ *
+ * You are responsible for ensuring the records and drecs don't overlap.
+ *
+ * ExtentManager and MetaData must both be empty.
+ */
+void initializeV1RS(OperationContext* txn,
+ const LocAndSize* records,
+ const LocAndSize* drecs,
+ const LocAndSize* legacyGrabBag,
+ DummyExtentManager* em,
+ DummyRecordStoreV1MetaData* md);
+
+/**
+ * Asserts that the V1RecordStore defined by md has the passed in records and drecs in the
+ * correct order.
+ *
+ * List of LocAndSize are terminated by a Null DiskLoc. Passing a NULL pointer means don't check
+ * that list.
+ */
+void assertStateV1RS(OperationContext* txn,
+ const LocAndSize* records,
+ const LocAndSize* drecs,
+ const LocAndSize* legacyGrabBag,
+ const ExtentManager* em,
+ const DummyRecordStoreV1MetaData* md);
} // namespace mongo
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index aa83636ae6b..6db0a4e15da 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -55,427 +55,415 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::map;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- typedef boost::filesystem::path Path;
-
- // inheritable class to implement an operation that may be applied to all
- // files in a database using _applyOpToDataFiles()
- class FileOp {
- public:
- virtual ~FileOp() {}
- // Return true if file exists and operation successful
- virtual bool apply( const boost::filesystem::path &p ) = 0;
- virtual const char * op() const = 0;
- };
-
- void _applyOpToDataFiles(const string& database, FileOp &fo, bool afterAllocator = false,
- const string& path = storageGlobalParams.dbpath);
-
- void _deleteDataFiles(const std::string& database) {
- if (storageGlobalParams.directoryperdb) {
- FileAllocator::get()->waitUntilFinished();
- MONGO_ASSERT_ON_EXCEPTION_WITH_MSG(
- boost::filesystem::remove_all(
- boost::filesystem::path(storageGlobalParams.dbpath) / database),
- "delete data files with a directoryperdb");
- return;
- }
- class : public FileOp {
- virtual bool apply( const boost::filesystem::path &p ) {
- return boost::filesystem::remove( p );
- }
- virtual const char * op() const {
- return "remove";
- }
- } deleter;
- _applyOpToDataFiles( database, deleter, true );
+using std::unique_ptr;
+using std::endl;
+using std::map;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+typedef boost::filesystem::path Path;
+
+// inheritable class to implement an operation that may be applied to all
+// files in a database using _applyOpToDataFiles()
+class FileOp {
+public:
+ virtual ~FileOp() {}
+ // Return true if file exists and operation successful
+ virtual bool apply(const boost::filesystem::path& p) = 0;
+ virtual const char* op() const = 0;
+};
+
+void _applyOpToDataFiles(const string& database,
+ FileOp& fo,
+ bool afterAllocator = false,
+ const string& path = storageGlobalParams.dbpath);
+
+void _deleteDataFiles(const std::string& database) {
+ if (storageGlobalParams.directoryperdb) {
+ FileAllocator::get()->waitUntilFinished();
+ MONGO_ASSERT_ON_EXCEPTION_WITH_MSG(
+ boost::filesystem::remove_all(boost::filesystem::path(storageGlobalParams.dbpath) /
+ database),
+ "delete data files with a directoryperdb");
+ return;
}
-
- void boostRenameWrapper( const Path &from, const Path &to ) {
- try {
- boost::filesystem::rename( from, to );
+ class : public FileOp {
+ virtual bool apply(const boost::filesystem::path& p) {
+ return boost::filesystem::remove(p);
}
- catch ( const boost::filesystem::filesystem_error & ) {
- // boost rename doesn't work across partitions
- boost::filesystem::copy_file( from, to);
- boost::filesystem::remove( from );
+ virtual const char* op() const {
+ return "remove";
}
}
+ deleter;
+ _applyOpToDataFiles(database, deleter, true);
+}
- // back up original database files to 'temp' dir
- void _renameForBackup( const std::string& database, const Path &reservedPath ) {
- Path newPath( reservedPath );
- if (storageGlobalParams.directoryperdb)
- newPath /= database;
- class Renamer : public FileOp {
- public:
- Renamer( const Path &newPath ) : newPath_( newPath ) {}
- private:
- const boost::filesystem::path &newPath_;
- virtual bool apply( const Path &p ) {
- if ( !boost::filesystem::exists( p ) )
- return false;
- boostRenameWrapper( p, newPath_ / ( p.leaf().string() + ".bak" ) );
- return true;
- }
- virtual const char * op() const {
- return "renaming";
- }
- } renamer( newPath );
- _applyOpToDataFiles( database, renamer, true );
+void boostRenameWrapper(const Path& from, const Path& to) {
+ try {
+ boost::filesystem::rename(from, to);
+ } catch (const boost::filesystem::filesystem_error&) {
+ // boost rename doesn't work across partitions
+ boost::filesystem::copy_file(from, to);
+ boost::filesystem::remove(from);
}
+}
- intmax_t dbSize( const string& database ) {
- class SizeAccumulator : public FileOp {
- public:
- SizeAccumulator() : totalSize_( 0 ) {}
- intmax_t size() const {
- return totalSize_;
- }
- private:
- virtual bool apply( const boost::filesystem::path &p ) {
- if ( !boost::filesystem::exists( p ) )
- return false;
- totalSize_ += boost::filesystem::file_size( p );
- return true;
- }
- virtual const char *op() const {
- return "checking size";
- }
- intmax_t totalSize_;
- };
- SizeAccumulator sa;
- _applyOpToDataFiles( database, sa );
- return sa.size();
- }
+// back up original database files to 'temp' dir
+void _renameForBackup(const std::string& database, const Path& reservedPath) {
+ Path newPath(reservedPath);
+ if (storageGlobalParams.directoryperdb)
+ newPath /= database;
+ class Renamer : public FileOp {
+ public:
+ Renamer(const Path& newPath) : newPath_(newPath) {}
- // move temp files to standard data dir
- void _replaceWithRecovered( const string& database, const char *reservedPathString ) {
- Path newPath(storageGlobalParams.dbpath);
- if (storageGlobalParams.directoryperdb)
- newPath /= database;
- class Replacer : public FileOp {
- public:
- Replacer( const Path &newPath ) : newPath_( newPath ) {}
- private:
- const boost::filesystem::path &newPath_;
- virtual bool apply( const Path &p ) {
- if ( !boost::filesystem::exists( p ) )
- return false;
- boostRenameWrapper( p, newPath_ / p.leaf() );
- return true;
- }
- virtual const char * op() const {
- return "renaming";
- }
- } replacer( newPath );
- _applyOpToDataFiles( database, replacer, true, reservedPathString );
- }
+ private:
+ const boost::filesystem::path& newPath_;
+ virtual bool apply(const Path& p) {
+ if (!boost::filesystem::exists(p))
+ return false;
+ boostRenameWrapper(p, newPath_ / (p.leaf().string() + ".bak"));
+ return true;
+ }
+ virtual const char* op() const {
+ return "renaming";
+ }
+ } renamer(newPath);
+ _applyOpToDataFiles(database, renamer, true);
+}
- // generate a directory name for storing temp data files
- Path uniqueReservedPath( const char *prefix ) {
- Path repairPath = Path(storageGlobalParams.repairpath);
- Path reservedPath;
- int i = 0;
- bool exists = false;
- do {
- stringstream ss;
- ss << prefix << "_repairDatabase_" << i++;
- reservedPath = repairPath / ss.str();
- MONGO_ASSERT_ON_EXCEPTION( exists = boost::filesystem::exists( reservedPath ) );
+intmax_t dbSize(const string& database) {
+ class SizeAccumulator : public FileOp {
+ public:
+ SizeAccumulator() : totalSize_(0) {}
+ intmax_t size() const {
+ return totalSize_;
}
- while ( exists );
- return reservedPath;
- }
- void _applyOpToDataFiles( const string& database, FileOp &fo, bool afterAllocator, const string& path ) {
- if ( afterAllocator )
- FileAllocator::get()->waitUntilFinished();
- string c = database;
- c += '.';
- boost::filesystem::path p(path);
- if (storageGlobalParams.directoryperdb)
- p /= database;
- boost::filesystem::path q;
- q = p / (c+"ns");
- bool ok = false;
- MONGO_ASSERT_ON_EXCEPTION( ok = fo.apply( q ) );
- if ( ok ) {
- LOG(2) << fo.op() << " file " << q.string() << endl;
+ private:
+ virtual bool apply(const boost::filesystem::path& p) {
+ if (!boost::filesystem::exists(p))
+ return false;
+ totalSize_ += boost::filesystem::file_size(p);
+ return true;
}
- int i = 0;
- int extra = 10; // should not be necessary, this is defensive in case there are missing files
- while ( 1 ) {
- verify( i <= DiskLoc::MaxFiles );
- stringstream ss;
- ss << c << i;
- q = p / ss.str();
- MONGO_ASSERT_ON_EXCEPTION( ok = fo.apply(q) );
- if ( ok ) {
- if ( extra != 10 ) {
- LOG(1) << fo.op() << " file " << q.string() << endl;
- log() << " _applyOpToDataFiles() warning: extra == " << extra << endl;
- }
- }
- else if ( --extra <= 0 )
- break;
- i++;
+ virtual const char* op() const {
+ return "checking size";
}
- }
+ intmax_t totalSize_;
+ };
+ SizeAccumulator sa;
+ _applyOpToDataFiles(database, sa);
+ return sa.size();
+}
- class RepairFileDeleter {
+// move temp files to standard data dir
+void _replaceWithRecovered(const string& database, const char* reservedPathString) {
+ Path newPath(storageGlobalParams.dbpath);
+ if (storageGlobalParams.directoryperdb)
+ newPath /= database;
+ class Replacer : public FileOp {
public:
- RepairFileDeleter( OperationContext* txn,
- const string& dbName,
- const string& pathString,
- const Path& path )
- : _txn(txn),
- _dbName( dbName ),
- _pathString( pathString ),
- _path( path ),
- _success( false ) {
+ Replacer(const Path& newPath) : newPath_(newPath) {}
+
+ private:
+ const boost::filesystem::path& newPath_;
+ virtual bool apply(const Path& p) {
+ if (!boost::filesystem::exists(p))
+ return false;
+ boostRenameWrapper(p, newPath_ / p.leaf());
+ return true;
}
+ virtual const char* op() const {
+ return "renaming";
+ }
+ } replacer(newPath);
+ _applyOpToDataFiles(database, replacer, true, reservedPathString);
+}
+
+// generate a directory name for storing temp data files
+Path uniqueReservedPath(const char* prefix) {
+ Path repairPath = Path(storageGlobalParams.repairpath);
+ Path reservedPath;
+ int i = 0;
+ bool exists = false;
+ do {
+ stringstream ss;
+ ss << prefix << "_repairDatabase_" << i++;
+ reservedPath = repairPath / ss.str();
+ MONGO_ASSERT_ON_EXCEPTION(exists = boost::filesystem::exists(reservedPath));
+ } while (exists);
+ return reservedPath;
+}
- ~RepairFileDeleter() {
- if ( _success )
- return;
+void _applyOpToDataFiles(const string& database,
+ FileOp& fo,
+ bool afterAllocator,
+ const string& path) {
+ if (afterAllocator)
+ FileAllocator::get()->waitUntilFinished();
+ string c = database;
+ c += '.';
+ boost::filesystem::path p(path);
+ if (storageGlobalParams.directoryperdb)
+ p /= database;
+ boost::filesystem::path q;
+ q = p / (c + "ns");
+ bool ok = false;
+ MONGO_ASSERT_ON_EXCEPTION(ok = fo.apply(q));
+ if (ok) {
+ LOG(2) << fo.op() << " file " << q.string() << endl;
+ }
+ int i = 0;
+ int extra = 10; // should not be necessary, this is defensive in case there are missing files
+ while (1) {
+ verify(i <= DiskLoc::MaxFiles);
+ stringstream ss;
+ ss << c << i;
+ q = p / ss.str();
+ MONGO_ASSERT_ON_EXCEPTION(ok = fo.apply(q));
+ if (ok) {
+ if (extra != 10) {
+ LOG(1) << fo.op() << " file " << q.string() << endl;
+ log() << " _applyOpToDataFiles() warning: extra == " << extra << endl;
+ }
+ } else if (--extra <= 0)
+ break;
+ i++;
+ }
+}
- log() << "cleaning up failed repair "
- << "db: " << _dbName << " path: " << _pathString;
+class RepairFileDeleter {
+public:
+ RepairFileDeleter(OperationContext* txn,
+ const string& dbName,
+ const string& pathString,
+ const Path& path)
+ : _txn(txn), _dbName(dbName), _pathString(pathString), _path(path), _success(false) {}
- try {
- getDur().syncDataAndTruncateJournal(_txn);
+ ~RepairFileDeleter() {
+ if (_success)
+ return;
- // need both in case journaling is disabled
- MongoFile::flushAll(true);
+ log() << "cleaning up failed repair "
+ << "db: " << _dbName << " path: " << _pathString;
- MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( _path ) );
- }
- catch ( DBException& e ) {
- error() << "RepairFileDeleter failed to cleanup: " << e;
- error() << "aborting";
- fassertFailed( 17402 );
- }
- }
+ try {
+ getDur().syncDataAndTruncateJournal(_txn);
+
+ // need both in case journaling is disabled
+ MongoFile::flushAll(true);
- void success() {
- _success = true;
+ MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::remove_all(_path));
+ } catch (DBException& e) {
+ error() << "RepairFileDeleter failed to cleanup: " << e;
+ error() << "aborting";
+ fassertFailed(17402);
}
+ }
- private:
- OperationContext* _txn;
- string _dbName;
- string _pathString;
- Path _path;
- bool _success;
- };
+ void success() {
+ _success = true;
+ }
- Status MMAPV1Engine::repairDatabase( OperationContext* txn,
- const std::string& dbName,
- bool preserveClonedFilesOnFailure,
- bool backupOriginalFiles ) {
- unique_ptr<RepairFileDeleter> repairFileDeleter;
+private:
+ OperationContext* _txn;
+ string _dbName;
+ string _pathString;
+ Path _path;
+ bool _success;
+};
+
+Status MMAPV1Engine::repairDatabase(OperationContext* txn,
+ const std::string& dbName,
+ bool preserveClonedFilesOnFailure,
+ bool backupOriginalFiles) {
+ unique_ptr<RepairFileDeleter> repairFileDeleter;
+
+ // Must be done before and after repair
+ getDur().syncDataAndTruncateJournal(txn);
+
+ intmax_t totalSize = dbSize(dbName);
+ intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath);
+
+ if (freeSize > -1 && freeSize < totalSize) {
+ return Status(ErrorCodes::OutOfDiskSpace,
+ str::stream()
+ << "Cannot repair database " << dbName << " having size: " << totalSize
+ << " (bytes) because free disk space is: " << freeSize << " (bytes)");
+ }
- // Must be done before and after repair
- getDur().syncDataAndTruncateJournal(txn);
+ txn->checkForInterrupt();
- intmax_t totalSize = dbSize( dbName );
- intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath);
+ Path reservedPath = uniqueReservedPath(
+ (preserveClonedFilesOnFailure || backupOriginalFiles) ? "backup" : "_tmp");
+ bool created = false;
+ MONGO_ASSERT_ON_EXCEPTION(created = boost::filesystem::create_directory(reservedPath));
+ invariant(created);
+ string reservedPathString = reservedPath.string();
- if ( freeSize > -1 && freeSize < totalSize ) {
- return Status( ErrorCodes::OutOfDiskSpace,
- str::stream() << "Cannot repair database " << dbName
- << " having size: " << totalSize
- << " (bytes) because free disk space is: " << freeSize << " (bytes)" );
- }
+ if (!preserveClonedFilesOnFailure)
+ repairFileDeleter.reset(
+ new RepairFileDeleter(txn, dbName, reservedPathString, reservedPath));
- txn->checkForInterrupt();
+ {
+ Database* originalDatabase = dbHolder().openDb(txn, dbName);
+ if (originalDatabase == NULL) {
+ return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair");
+ }
- Path reservedPath =
- uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ?
- "backup" : "_tmp" );
- bool created = false;
- MONGO_ASSERT_ON_EXCEPTION( created = boost::filesystem::create_directory( reservedPath ) );
- invariant( created );
- string reservedPathString = reservedPath.string();
+ unique_ptr<MMAPV1DatabaseCatalogEntry> dbEntry;
+ unique_ptr<Database> tempDatabase;
- if ( !preserveClonedFilesOnFailure )
- repairFileDeleter.reset( new RepairFileDeleter( txn,
- dbName,
- reservedPathString,
- reservedPath ) );
+ // Must call this before MMAPV1DatabaseCatalogEntry's destructor closes the DB files
+ ON_BLOCK_EXIT(&dur::DurableInterface::syncDataAndTruncateJournal, &getDur(), txn);
{
- Database* originalDatabase = dbHolder().openDb(txn, dbName);
- if (originalDatabase == NULL) {
- return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair");
- }
-
- unique_ptr<MMAPV1DatabaseCatalogEntry> dbEntry;
- unique_ptr<Database> tempDatabase;
+ dbEntry.reset(new MMAPV1DatabaseCatalogEntry(
+ txn, dbName, reservedPathString, storageGlobalParams.directoryperdb, true));
+ tempDatabase.reset(new Database(txn, dbName, dbEntry.get()));
+ }
- // Must call this before MMAPV1DatabaseCatalogEntry's destructor closes the DB files
- ON_BLOCK_EXIT(&dur::DurableInterface::syncDataAndTruncateJournal, &getDur(), txn);
+ map<string, CollectionOptions> namespacesToCopy;
+ {
+ string ns = dbName + ".system.namespaces";
+ OldClientContext ctx(txn, ns);
+ Collection* coll = originalDatabase->getCollection(ns);
+ if (coll) {
+ auto cursor = coll->getCursor(txn);
+ while (auto record = cursor->next()) {
+ BSONObj obj = record->data.releaseToBson();
- {
- dbEntry.reset(new MMAPV1DatabaseCatalogEntry(txn,
- dbName,
- reservedPathString,
- storageGlobalParams.directoryperdb,
- true));
- tempDatabase.reset( new Database(txn, dbName, dbEntry.get()));
- }
+ string ns = obj["name"].String();
- map<string,CollectionOptions> namespacesToCopy;
- {
- string ns = dbName + ".system.namespaces";
- OldClientContext ctx(txn, ns );
- Collection* coll = originalDatabase->getCollection( ns );
- if ( coll ) {
- auto cursor = coll->getCursor(txn);
- while (auto record = cursor->next()) {
- BSONObj obj = record->data.releaseToBson();
-
- string ns = obj["name"].String();
-
- NamespaceString nss( ns );
- if ( nss.isSystem() ) {
- if ( nss.isSystemDotIndexes() )
- continue;
- if ( nss.coll() == "system.namespaces" )
- continue;
- }
-
- if ( !nss.isNormal() )
+ NamespaceString nss(ns);
+ if (nss.isSystem()) {
+ if (nss.isSystemDotIndexes())
continue;
+ if (nss.coll() == "system.namespaces")
+ continue;
+ }
+
+ if (!nss.isNormal())
+ continue;
- CollectionOptions options;
- if ( obj["options"].isABSONObj() ) {
- Status status = options.parse( obj["options"].Obj() );
- if ( !status.isOK() )
- return status;
- }
- namespacesToCopy[ns] = options;
+ CollectionOptions options;
+ if (obj["options"].isABSONObj()) {
+ Status status = options.parse(obj["options"].Obj());
+ if (!status.isOK())
+ return status;
}
+ namespacesToCopy[ns] = options;
}
}
+ }
- for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin();
- i != namespacesToCopy.end();
- ++i ) {
- string ns = i->first;
- CollectionOptions options = i->second;
-
- Collection* tempCollection = NULL;
- {
- WriteUnitOfWork wunit(txn);
- tempCollection = tempDatabase->createCollection(txn, ns, options, false);
- wunit.commit();
- }
+ for (map<string, CollectionOptions>::const_iterator i = namespacesToCopy.begin();
+ i != namespacesToCopy.end();
+ ++i) {
+ string ns = i->first;
+ CollectionOptions options = i->second;
- OldClientContext readContext(txn, ns, originalDatabase);
- Collection* originalCollection = originalDatabase->getCollection( ns );
- invariant( originalCollection );
-
- // data
-
- // TODO SERVER-14812 add a mode that drops duplicates rather than failing
- MultiIndexBlock indexer(txn, tempCollection );
- {
- vector<BSONObj> indexes;
- IndexCatalog::IndexIterator ii =
- originalCollection->getIndexCatalog()->getIndexIterator( txn, false );
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
- indexes.push_back( desc->infoObj() );
- }
+ Collection* tempCollection = NULL;
+ {
+ WriteUnitOfWork wunit(txn);
+ tempCollection = tempDatabase->createCollection(txn, ns, options, false);
+ wunit.commit();
+ }
- Status status = indexer.init( indexes );
- if (!status.isOK()) {
- return status;
- }
- }
+ OldClientContext readContext(txn, ns, originalDatabase);
+ Collection* originalCollection = originalDatabase->getCollection(ns);
+ invariant(originalCollection);
- auto cursor = originalCollection->getCursor(txn);
- while (auto record = cursor->next()) {
- BSONObj doc = record->data.releaseToBson();
-
- WriteUnitOfWork wunit(txn);
- StatusWith<RecordId> result = tempCollection->insertDocument(txn,
- doc,
- &indexer,
- false);
- if ( !result.isOK() )
- return result.getStatus();
-
- wunit.commit();
- txn->checkForInterrupt();
- }
-
- Status status = indexer.doneInserting();
- if (!status.isOK())
- return status;
+ // data
- {
- WriteUnitOfWork wunit(txn);
- indexer.commit();
- wunit.commit();
+ // TODO SERVER-14812 add a mode that drops duplicates rather than failing
+ MultiIndexBlock indexer(txn, tempCollection);
+ {
+ vector<BSONObj> indexes;
+ IndexCatalog::IndexIterator ii =
+ originalCollection->getIndexCatalog()->getIndexIterator(txn, false);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
+ indexes.push_back(desc->infoObj());
}
+ Status status = indexer.init(indexes);
+ if (!status.isOK()) {
+ return status;
+ }
}
- getDur().syncDataAndTruncateJournal(txn);
+ auto cursor = originalCollection->getCursor(txn);
+ while (auto record = cursor->next()) {
+ BSONObj doc = record->data.releaseToBson();
- // need both in case journaling is disabled
- MongoFile::flushAll(true);
+ WriteUnitOfWork wunit(txn);
+ StatusWith<RecordId> result =
+ tempCollection->insertDocument(txn, doc, &indexer, false);
+ if (!result.isOK())
+ return result.getStatus();
+
+ wunit.commit();
+ txn->checkForInterrupt();
+ }
+
+ Status status = indexer.doneInserting();
+ if (!status.isOK())
+ return status;
- txn->checkForInterrupt();
+ {
+ WriteUnitOfWork wunit(txn);
+ indexer.commit();
+ wunit.commit();
+ }
}
- // at this point if we abort, we don't want to delete new files
- // as they might be the only copies
+ getDur().syncDataAndTruncateJournal(txn);
- if ( repairFileDeleter.get() )
- repairFileDeleter->success();
+ // need both in case journaling is disabled
+ MongoFile::flushAll(true);
- // Close the database so we can rename/delete the original data files
- dbHolder().close(txn, dbName);
+ txn->checkForInterrupt();
+ }
- if ( backupOriginalFiles ) {
- _renameForBackup( dbName, reservedPath );
- }
- else {
- // first make new directory before deleting data
- Path newDir = Path(storageGlobalParams.dbpath) / dbName;
- MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir));
+ // at this point if we abort, we don't want to delete new files
+ // as they might be the only copies
- // this deletes old files
- _deleteDataFiles( dbName );
+ if (repairFileDeleter.get())
+ repairFileDeleter->success();
- if ( !boost::filesystem::exists(newDir) ) {
- // we deleted because of directoryperdb
- // re-create
- MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir));
- }
- }
+ // Close the database so we can rename/delete the original data files
+ dbHolder().close(txn, dbName);
+
+ if (backupOriginalFiles) {
+ _renameForBackup(dbName, reservedPath);
+ } else {
+ // first make new directory before deleting data
+ Path newDir = Path(storageGlobalParams.dbpath) / dbName;
+ MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir));
- _replaceWithRecovered( dbName, reservedPathString.c_str() );
+ // this deletes old files
+ _deleteDataFiles(dbName);
- if (!backupOriginalFiles) {
- MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::remove_all(reservedPath));
+ if (!boost::filesystem::exists(newDir)) {
+ // we deleted because of directoryperdb
+ // re-create
+ MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir));
}
+ }
- // Reopen the database so it's discoverable
- dbHolder().openDb(txn, dbName);
+ _replaceWithRecovered(dbName, reservedPathString.c_str());
- return Status::OK();
+ if (!backupOriginalFiles) {
+ MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::remove_all(reservedPath));
}
+ // Reopen the database so it's discoverable
+ dbHolder().openDb(txn, dbName);
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/storage/oplog_hack.cpp b/src/mongo/db/storage/oplog_hack.cpp
index b25268e4271..e42946e5611 100644
--- a/src/mongo/db/storage/oplog_hack.cpp
+++ b/src/mongo/db/storage/oplog_hack.cpp
@@ -42,40 +42,40 @@
namespace mongo {
namespace oploghack {
- StatusWith<RecordId> keyForOptime(const Timestamp& opTime) {
- // Make sure secs and inc wouldn't be negative if treated as signed. This ensures that they
- // don't sort differently when put in a RecordId. It also avoids issues with Null/Invalid
- // RecordIds
- if (opTime.getSecs() > uint32_t(std::numeric_limits<int32_t>::max()))
- return StatusWith<RecordId>(ErrorCodes::BadValue, "ts secs too high");
+StatusWith<RecordId> keyForOptime(const Timestamp& opTime) {
+ // Make sure secs and inc wouldn't be negative if treated as signed. This ensures that they
+ // don't sort differently when put in a RecordId. It also avoids issues with Null/Invalid
+ // RecordIds
+ if (opTime.getSecs() > uint32_t(std::numeric_limits<int32_t>::max()))
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts secs too high");
- if (opTime.getInc() > uint32_t(std::numeric_limits<int32_t>::max()))
- return StatusWith<RecordId>(ErrorCodes::BadValue, "ts inc too high");
+ if (opTime.getInc() > uint32_t(std::numeric_limits<int32_t>::max()))
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts inc too high");
- const RecordId out = RecordId(opTime.getSecs(), opTime.getInc());
- if (out <= RecordId::min())
- return StatusWith<RecordId>(ErrorCodes::BadValue, "ts too low");
- if (out >= RecordId::max())
- return StatusWith<RecordId>(ErrorCodes::BadValue, "ts too high");
+ const RecordId out = RecordId(opTime.getSecs(), opTime.getInc());
+ if (out <= RecordId::min())
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts too low");
+ if (out >= RecordId::max())
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts too high");
- return StatusWith<RecordId>(out);
- }
+ return StatusWith<RecordId>(out);
+}
- /**
- * data and len must be the arguments from RecordStore::insert() on an oplog collection.
- */
- StatusWith<RecordId> extractKey(const char* data, int len) {
- DEV invariant(validateBSON(data, len).isOK());
+/**
+ * data and len must be the arguments from RecordStore::insert() on an oplog collection.
+ */
+StatusWith<RecordId> extractKey(const char* data, int len) {
+ DEV invariant(validateBSON(data, len).isOK());
- const BSONObj obj(data);
- const BSONElement elem = obj["ts"];
- if (elem.eoo())
- return StatusWith<RecordId>(ErrorCodes::BadValue, "no ts field");
- if (elem.type() != bsonTimestamp)
- return StatusWith<RecordId>(ErrorCodes::BadValue, "ts must be a Timestamp");
+ const BSONObj obj(data);
+ const BSONElement elem = obj["ts"];
+ if (elem.eoo())
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "no ts field");
+ if (elem.type() != bsonTimestamp)
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "ts must be a Timestamp");
- return keyForOptime(elem.timestamp());
- }
+ return keyForOptime(elem.timestamp());
+}
} // namespace oploghack
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/oplog_hack.h b/src/mongo/db/storage/oplog_hack.h
index 92138959304..11f9e07264f 100644
--- a/src/mongo/db/storage/oplog_hack.h
+++ b/src/mongo/db/storage/oplog_hack.h
@@ -32,21 +32,21 @@
#include "mongo/base/status_with.h"
namespace mongo {
- class RecordId;
- class Timestamp;
+class RecordId;
+class Timestamp;
namespace oploghack {
- /**
- * Converts Timestamp to a RecordId in an unspecified manor that is safe to use as the key to
- * in a RecordStore.
- */
- StatusWith<RecordId> keyForOptime(const Timestamp& opTime);
+/**
+ * Converts Timestamp to a RecordId in an unspecified manor that is safe to use as the key to
+ * in a RecordStore.
+ */
+StatusWith<RecordId> keyForOptime(const Timestamp& opTime);
- /**
- * data and len must be the arguments from RecordStore::insert() on an oplog collection.
- */
- StatusWith<RecordId> extractKey(const char* data, int len);
+/**
+ * data and len must be the arguments from RecordStore::insert() on an oplog collection.
+ */
+StatusWith<RecordId> extractKey(const char* data, int len);
} // namespace oploghack
} // namespace mongo
diff --git a/src/mongo/db/storage/paths.cpp b/src/mongo/db/storage/paths.cpp
index cb2913c6b06..2f6fb4d4a77 100644
--- a/src/mongo/db/storage/paths.cpp
+++ b/src/mongo/db/storage/paths.cpp
@@ -35,79 +35,79 @@
namespace mongo {
- /** from a full path */
- RelativePath RelativePath::fromFullPath(boost::filesystem::path dbp,
- boost::filesystem::path f) {
- // filesystem::path normalizes / and backslash
- std::string fullpath = f.string();
- std::string relative = str::after(fullpath, dbp.string());
- if( relative.empty() ) {
- log() << "warning file is not under db path? " << fullpath << ' ' << dbp.string();
- RelativePath rp;
- rp._p = fullpath;
- return rp;
- }
- if( str::startsWith(relative, "/") || str::startsWith(relative, "\\") ) {
- relative.erase(0, 1);
- }
+/** from a full path */
+RelativePath RelativePath::fromFullPath(boost::filesystem::path dbp, boost::filesystem::path f) {
+ // filesystem::path normalizes / and backslash
+ std::string fullpath = f.string();
+ std::string relative = str::after(fullpath, dbp.string());
+ if (relative.empty()) {
+ log() << "warning file is not under db path? " << fullpath << ' ' << dbp.string();
RelativePath rp;
- rp._p = relative;
+ rp._p = fullpath;
return rp;
}
+ if (str::startsWith(relative, "/") || str::startsWith(relative, "\\")) {
+ relative.erase(0, 1);
+ }
+ RelativePath rp;
+ rp._p = relative;
+ return rp;
+}
- dev_t getPartition(const std::string& path){
- struct stat stats;
-
- if (stat(path.c_str(), &stats) != 0){
- uasserted(13646, str::stream() << "stat() failed for file: " << path << " " << errnoWithDescription());
- }
+dev_t getPartition(const std::string& path) {
+ struct stat stats;
- return stats.st_dev;
+ if (stat(path.c_str(), &stats) != 0) {
+ uasserted(13646,
+ str::stream() << "stat() failed for file: " << path << " "
+ << errnoWithDescription());
}
- void flushMyDirectory(const boost::filesystem::path& file) {
-#ifdef __linux__ // this isn't needed elsewhere
- static bool _warnedAboutFilesystem = false;
- // if called without a fully qualified path it asserts; that makes mongoperf fail.
- // so make a warning. need a better solution longer term.
- // massert(13652, str::stream() << "Couldn't find parent dir for file: " << file.string(),);
- if (!file.has_branch_path()) {
- log() << "warning flushMyDirectory couldn't find parent dir for file: "
- << file.string();
- return;
- }
+ return stats.st_dev;
+}
+
+void flushMyDirectory(const boost::filesystem::path& file) {
+#ifdef __linux__ // this isn't needed elsewhere
+ static bool _warnedAboutFilesystem = false;
+ // if called without a fully qualified path it asserts; that makes mongoperf fail.
+ // so make a warning. need a better solution longer term.
+ // massert(13652, str::stream() << "Couldn't find parent dir for file: " << file.string(),);
+ if (!file.has_branch_path()) {
+ log() << "warning flushMyDirectory couldn't find parent dir for file: " << file.string();
+ return;
+ }
- boost::filesystem::path dir = file.branch_path(); // parent_path in new boosts
+ boost::filesystem::path dir = file.branch_path(); // parent_path in new boosts
- LOG(1) << "flushing directory " << dir.string();
+ LOG(1) << "flushing directory " << dir.string();
- int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
- massert(13650, str::stream() << "Couldn't open directory '" << dir.string()
- << "' for flushing: " << errnoWithDescription(),
- fd >= 0);
- if (fsync(fd) != 0) {
- int e = errno;
- if (e == EINVAL) { // indicates filesystem does not support synchronization
- if (!_warnedAboutFilesystem) {
- log() << "\tWARNING: This file system is not supported. For further information"
- << " see:"
- << startupWarningsLog;
- log() << "\t\t\thttp://dochub.mongodb.org/core/unsupported-filesystems"
- << startupWarningsLog;
- log() << "\t\tPlease notify MongoDB, Inc. if an unlisted filesystem generated "
- << "this warning." << startupWarningsLog;
- _warnedAboutFilesystem = true;
- }
- }
- else {
- close(fd);
- massert(13651, str::stream() << "Couldn't fsync directory '" << dir.string()
- << "': " << errnoWithDescription(e),
- false);
+ int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
+ massert(13650,
+ str::stream() << "Couldn't open directory '" << dir.string()
+ << "' for flushing: " << errnoWithDescription(),
+ fd >= 0);
+ if (fsync(fd) != 0) {
+ int e = errno;
+ if (e == EINVAL) { // indicates filesystem does not support synchronization
+ if (!_warnedAboutFilesystem) {
+ log() << "\tWARNING: This file system is not supported. For further information"
+ << " see:" << startupWarningsLog;
+ log() << "\t\t\thttp://dochub.mongodb.org/core/unsupported-filesystems"
+ << startupWarningsLog;
+ log() << "\t\tPlease notify MongoDB, Inc. if an unlisted filesystem generated "
+ << "this warning." << startupWarningsLog;
+ _warnedAboutFilesystem = true;
}
+ } else {
+ close(fd);
+ massert(13651,
+ str::stream() << "Couldn't fsync directory '" << dir.string()
+ << "': " << errnoWithDescription(e),
+ false);
}
- close(fd);
-#endif
}
+ close(fd);
+#endif
+}
}
diff --git a/src/mongo/db/storage/paths.h b/src/mongo/db/storage/paths.h
index 8286c920566..e662ab41bf9 100644
--- a/src/mongo/db/storage/paths.h
+++ b/src/mongo/db/storage/paths.h
@@ -40,54 +40,61 @@
#include "mongo/db/storage_options.h"
namespace mongo {
-
- using namespace mongoutils;
-
- /** this is very much like a boost::path. however, we define a new type to get some type
- checking. if you want to say 'my param MUST be a relative path", use this.
- */
- struct RelativePath {
- std::string _p;
-
- bool empty() const { return _p.empty(); }
-
- static RelativePath fromRelativePath(const std::string& f) {
- RelativePath rp;
- rp._p = f;
- return rp;
- }
-
- /**
- * Returns path relative to 'dbpath' from a full path 'f'.
- */
- static RelativePath fromFullPath(boost::filesystem::path dbpath,
- boost::filesystem::path f);
-
- std::string toString() const { return _p; }
-
- bool operator!=(const RelativePath& r) const { return _p != r._p; }
- bool operator==(const RelativePath& r) const { return _p == r._p; }
- bool operator<(const RelativePath& r) const { return _p < r._p; }
-
- std::string asFullPath() const {
- boost::filesystem::path x(storageGlobalParams.dbpath);
- x /= _p;
- return x.string();
- }
-
- };
-
- dev_t getPartition(const std::string& path);
-
- inline bool onSamePartition(const std::string& path1, const std::string& path2){
- dev_t dev1 = getPartition(path1);
- dev_t dev2 = getPartition(path2);
-
- return dev1 == dev2;
+
+using namespace mongoutils;
+
+/** this is very much like a boost::path. however, we define a new type to get some type
+ checking. if you want to say 'my param MUST be a relative path", use this.
+*/
+struct RelativePath {
+ std::string _p;
+
+ bool empty() const {
+ return _p.empty();
+ }
+
+ static RelativePath fromRelativePath(const std::string& f) {
+ RelativePath rp;
+ rp._p = f;
+ return rp;
}
- void flushMyDirectory(const boost::filesystem::path& file);
+ /**
+ * Returns path relative to 'dbpath' from a full path 'f'.
+ */
+ static RelativePath fromFullPath(boost::filesystem::path dbpath, boost::filesystem::path f);
+
+ std::string toString() const {
+ return _p;
+ }
+
+ bool operator!=(const RelativePath& r) const {
+ return _p != r._p;
+ }
+ bool operator==(const RelativePath& r) const {
+ return _p == r._p;
+ }
+ bool operator<(const RelativePath& r) const {
+ return _p < r._p;
+ }
+
+ std::string asFullPath() const {
+ boost::filesystem::path x(storageGlobalParams.dbpath);
+ x /= _p;
+ return x.string();
+ }
+};
+
+dev_t getPartition(const std::string& path);
+
+inline bool onSamePartition(const std::string& path1, const std::string& path2) {
+ dev_t dev1 = getPartition(path1);
+ dev_t dev2 = getPartition(path2);
+
+ return dev1 == dev2;
+}
- boost::filesystem::path ensureParentDirCreated(const boost::filesystem::path& p);
+void flushMyDirectory(const boost::filesystem::path& file);
+boost::filesystem::path ensureParentDirCreated(const boost::filesystem::path& p);
}
diff --git a/src/mongo/db/storage/record_data.h b/src/mongo/db/storage/record_data.h
index 612408f84c6..03409d911ea 100644
--- a/src/mongo/db/storage/record_data.h
+++ b/src/mongo/db/storage/record_data.h
@@ -35,58 +35,69 @@
namespace mongo {
- // TODO: Does this need to have move support?
- /**
- * A replacement for the Record class. This class represents data in a record store.
- * The _dataPtr attribute is used to manage memory ownership. If _dataPtr is NULL, then
- * the memory pointed to by _data is owned by the RecordStore. If _dataPtr is not NULL, then
- * it must point to the same array as _data.
- */
- class RecordData {
- public:
- RecordData() : _data( NULL ), _size( 0 ) {}
- RecordData(const char* data, int size): _data(data), _size(size) { }
-
- RecordData(SharedBuffer ownedData, int size)
- : _data(ownedData.get()), _size(size), _ownedData(std::move(ownedData)) {
- }
-
- const char* data() const { return _data; }
-
- int size() const { return _size; }
-
- /**
- * Returns true if this owns its own memory, and false otherwise
- */
- bool isOwned() const { return _ownedData.get(); }
-
- SharedBuffer releaseBuffer() {
- return std::move(_ownedData);
- }
-
- BSONObj toBson() const { return isOwned() ? BSONObj(_ownedData) : BSONObj(_data); }
-
- BSONObj releaseToBson() { return isOwned() ? BSONObj(releaseBuffer()) : BSONObj(_data); }
-
- // TODO uncomment once we require compilers that support overloading for rvalue this.
- // BSONObj toBson() && { return releaseToBson(); }
+// TODO: Does this need to have move support?
+/**
+ * A replacement for the Record class. This class represents data in a record store.
+ * The _dataPtr attribute is used to manage memory ownership. If _dataPtr is NULL, then
+ * the memory pointed to by _data is owned by the RecordStore. If _dataPtr is not NULL, then
+ * it must point to the same array as _data.
+ */
+class RecordData {
+public:
+ RecordData() : _data(NULL), _size(0) {}
+ RecordData(const char* data, int size) : _data(data), _size(size) {}
- RecordData getOwned() const {
- if (isOwned()) return *this;
- auto buffer = SharedBuffer::allocate(_size);
- memcpy(buffer.get(), _data, _size);
- return RecordData(buffer, _size);
- }
+ RecordData(SharedBuffer ownedData, int size)
+ : _data(ownedData.get()), _size(size), _ownedData(std::move(ownedData)) {}
- void makeOwned() {
- if (isOwned()) return;
- *this = getOwned();
- }
+ const char* data() const {
+ return _data;
+ }
- private:
- const char* _data;
- int _size;
- SharedBuffer _ownedData;
- };
+ int size() const {
+ return _size;
+ }
-} // namespace mongo
+ /**
+ * Returns true if this owns its own memory, and false otherwise
+ */
+ bool isOwned() const {
+ return _ownedData.get();
+ }
+
+ SharedBuffer releaseBuffer() {
+ return std::move(_ownedData);
+ }
+
+ BSONObj toBson() const {
+ return isOwned() ? BSONObj(_ownedData) : BSONObj(_data);
+ }
+
+ BSONObj releaseToBson() {
+ return isOwned() ? BSONObj(releaseBuffer()) : BSONObj(_data);
+ }
+
+ // TODO uncomment once we require compilers that support overloading for rvalue this.
+ // BSONObj toBson() && { return releaseToBson(); }
+
+ RecordData getOwned() const {
+ if (isOwned())
+ return *this;
+ auto buffer = SharedBuffer::allocate(_size);
+ memcpy(buffer.get(), _data, _size);
+ return RecordData(buffer, _size);
+ }
+
+ void makeOwned() {
+ if (isOwned())
+ return;
+ *this = getOwned();
+ }
+
+private:
+ const char* _data;
+ int _size;
+ SharedBuffer _ownedData;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_fetcher.h b/src/mongo/db/storage/record_fetcher.h
index 9255e805b10..66c626ea4d5 100644
--- a/src/mongo/db/storage/record_fetcher.h
+++ b/src/mongo/db/storage/record_fetcher.h
@@ -30,26 +30,26 @@
namespace mongo {
+/**
+ * Used for yielding while data is fetched from disk.
+ *
+ * @see RecordStore::recordNeedsFetch
+ */
+class RecordFetcher {
+public:
+ virtual ~RecordFetcher() {}
+
/**
- * Used for yielding while data is fetched from disk.
- *
- * @see RecordStore::recordNeedsFetch
+ * Performs any setup which is needed prior to yielding locks.
*/
- class RecordFetcher {
- public:
- virtual ~RecordFetcher() { }
+ virtual void setup() = 0;
- /**
- * Performs any setup which is needed prior to yielding locks.
- */
- virtual void setup() = 0;
-
- /**
- * Called after locks are yielded in order to bring data into memory.
- *
- * Should not be called more than once.
- */
- virtual void fetch() = 0;
- };
+ /**
+ * Called after locks are yielded in order to bring data into memory.
+ *
+ * Should not be called more than once.
+ */
+ virtual void fetch() = 0;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index 5f4285995df..1f5a3f27556 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -41,491 +41,504 @@
namespace mongo {
- class CappedDocumentDeleteCallback;
- class Collection;
- struct CompactOptions;
- struct CompactStats;
- class DocWriter;
- class MAdvise;
- class NamespaceDetails;
- class OperationContext;
- class RecordFetcher;
-
- class RecordStoreCompactAdaptor;
- class RecordStore;
-
- struct ValidateResults;
- class ValidateAdaptor;
+class CappedDocumentDeleteCallback;
+class Collection;
+struct CompactOptions;
+struct CompactStats;
+class DocWriter;
+class MAdvise;
+class NamespaceDetails;
+class OperationContext;
+class RecordFetcher;
+
+class RecordStoreCompactAdaptor;
+class RecordStore;
+
+struct ValidateResults;
+class ValidateAdaptor;
+
+/**
+ * Allows inserting a Record "in-place" without creating a copy ahead of time.
+ */
+class DocWriter {
+public:
+ virtual ~DocWriter() {}
+ virtual void writeDocument(char* buf) const = 0;
+ virtual size_t documentSize() const = 0;
+ virtual bool addPadding() const {
+ return true;
+ }
+};
+
+/**
+ * @see RecordStore::updateRecord
+ */
+class UpdateNotifier {
+public:
+ virtual ~UpdateNotifier() {}
+ virtual Status recordStoreGoingToMove(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* oldBuffer,
+ size_t oldSize) = 0;
+ virtual Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) = 0;
+};
+
+/**
+ * The data items stored in a RecordStore.
+ */
+struct Record {
+ RecordId id;
+ RecordData data;
+};
+
+/**
+ * Retrieves Records from a RecordStore.
+ *
+ * A cursor is constructed with a direction flag with the following effects:
+ * - The direction that next() moves.
+ * - If a restore cannot return to the saved position, cursors will be positioned on the
+ * closest position *after* the query in the direction of the scan.
+ *
+ * A cursor is tied to a transaction, such as the OperationContext or a WriteUnitOfWork
+ * inside that context. Any cursor acquired inside a transaction is invalid outside
+ * of that transaction, instead use the save and restore methods to reestablish the cursor.
+ *
+ * Any method other than invalidate and the save methods may throw WriteConflict exception. If
+ * that happens, the cursor may not be used again until it has been saved and successfully
+ * restored. If next() or restore() throw a WCE the cursor's position will be the same as before
+ * the call (strong exception guarantee). All other methods leave the cursor in a valid state
+ * but with an unspecified position (basic exception guarantee). If any exception other than
+ * WCE is thrown, the cursor must be destroyed, which is guaranteed not to leak any resources.
+ *
+ * Any returned unowned BSON is only valid until the next call to any method on this
+ * interface.
+ *
+ * Implementations may override any default implementation if they can provide a more
+ * efficient implementation.
+ */
+class RecordCursor {
+public:
+ virtual ~RecordCursor() = default;
/**
- * Allows inserting a Record "in-place" without creating a copy ahead of time.
+ * Moves forward and returns the new data or boost::none if there is no more data.
+ * Continues returning boost::none once it reaches EOF.
*/
- class DocWriter {
- public:
- virtual ~DocWriter() {}
- virtual void writeDocument( char* buf ) const = 0;
- virtual size_t documentSize() const = 0;
- virtual bool addPadding() const { return true; }
- };
+ virtual boost::optional<Record> next() = 0;
+
+ //
+ // Seeking
+ //
+ // Warning: MMAPv1 cannot detect if RecordIds are valid. Therefore callers should only pass
+ // potentially deleted RecordIds to seek methods if they know that MMAPv1 is not the current
+ // storage engine. All new storage engines must support detecting the existence of Records.
+ //
/**
- * @see RecordStore::updateRecord
+ * Seeks to a Record with the provided id.
+ *
+ * If an exact match can't be found, boost::none will be returned and the resulting position
+ * of the cursor is unspecified.
+ */
+ virtual boost::optional<Record> seekExact(const RecordId& id) = 0;
+
+ //
+ // Saving and restoring state
+ //
+
+ /**
+ * Prepares for state changes in underlying data in a way that allows the cursor's
+ * current position to be restored.
+ *
+ * It is safe to call savePositioned multiple times in a row.
+ * No other method (excluding destructor) may be called until successfully restored.
*/
- class UpdateNotifier {
- public:
- virtual ~UpdateNotifier(){}
- virtual Status recordStoreGoingToMove( OperationContext* txn,
- const RecordId& oldLocation,
- const char* oldBuffer,
- size_t oldSize ) = 0;
- virtual Status recordStoreGoingToUpdateInPlace( OperationContext* txn,
- const RecordId& loc ) = 0;
- };
+ virtual void savePositioned() = 0;
/**
- * The data items stored in a RecordStore.
+ * Prepares for state changes in underlying data without necessarily saving the current
+ * state.
+ *
+ * The cursor's position when restored is unspecified. Caller is expected to seek rather
+ * than call next() following the restore.
+ *
+ * It is safe to call saveUnpositioned multiple times in a row.
+ * No other method (excluding destructor) may be called until successfully restored.
+ */
+ virtual void saveUnpositioned() {
+ savePositioned();
+ }
+
+ /**
+ * Recovers from potential state changes in underlying data.
+ *
+ * Returns false if it is invalid to continue using this iterator. This usually means that
+ * capped deletes have caught up to the position of this iterator and continuing could
+ * result in missed data.
+ *
+ * If the former position no longer exists, but it is safe to continue iterating, the
+ * following call to next() will return the next closest position in the direction of the
+ * scan, if any.
+ *
+ * This handles restoring after either savePositioned() or saveUnpositioned().
+ */
+ virtual bool restore(OperationContext* txn) = 0;
+
+ /**
+ * Inform the cursor that this id is being invalidated.
+ * Must be called between save and restore.
+ *
+ * WARNING: Storage engines other than MMAPv1 should not depend on this being called.
+ */
+ virtual void invalidate(const RecordId& id){};
+
+ //
+ // RecordFetchers
+ //
+ // Storage engines which do not support document-level locking hold locks at collection or
+ // database granularity. As an optimization, these locks can be yielded when a record needs
+ // to be fetched from secondary storage. If this method returns non-NULL, then it indicates
+ // that the query system layer should yield its locks, following the protocol defined by the
+ // RecordFetcher class, so that a potential page fault is triggered out of the lock.
+ //
+ // Storage engines which support document-level locking need not implement this.
+ //
+ // TODO see if these can be replaced by WriteConflictException.
+ //
+
+ /**
+ * Returns a RecordFetcher if needed for a call to next() or none if unneeded.
+ */
+ virtual std::unique_ptr<RecordFetcher> fetcherForNext() const {
+ return {};
+ }
+
+ /**
+ * Returns a RecordFetcher if needed to fetch the provided Record or none if unneeded.
+ */
+ virtual std::unique_ptr<RecordFetcher> fetcherForId(const RecordId& id) const {
+ return {};
+ }
+};
+
+/**
+ * A RecordStore provides an abstraction used for storing documents in a collection,
+ * or entries in an index. In storage engines implementing the KVEngine, record stores
+ * are also used for implementing catalogs.
+ *
+ * Many methods take an OperationContext parameter. This contains the RecoveryUnit, with
+ * all RecordStore specific transaction information, as well as the LockState. Methods that take
+ * an OperationContext may throw a WriteConflictException.
+ */
+class RecordStore {
+ MONGO_DISALLOW_COPYING(RecordStore);
+
+public:
+ RecordStore(StringData ns) : _ns(ns.toString()) {}
+
+ virtual ~RecordStore() {}
+
+ // META
+
+ // name of the RecordStore implementation
+ virtual const char* name() const = 0;
+
+ virtual const std::string& ns() const {
+ return _ns;
+ }
+
+ /**
+ * The dataSize is an approximation of the sum of the sizes (in bytes) of the
+ * documents or entries in the recordStore.
+ */
+ virtual long long dataSize(OperationContext* txn) const = 0;
+
+ /**
+ * Total number of record in the RecordStore. You may need to cache it, so this call
+ * takes constant time, as it is called often.
+ */
+ virtual long long numRecords(OperationContext* txn) const = 0;
+
+ virtual bool isCapped() const = 0;
+
+ virtual void setCappedDeleteCallback(CappedDocumentDeleteCallback*) {
+ invariant(false);
+ }
+
+ /**
+ * @param extraInfo - optional more debug info
+ * @param level - optional, level of debug info to put in (higher is more)
+ * @return total estimate size (in bytes) on stable storage
+ */
+ virtual int64_t storageSize(OperationContext* txn,
+ BSONObjBuilder* extraInfo = NULL,
+ int infoLevel = 0) const = 0;
+
+ // CRUD related
+
+ /**
+ * Get the RecordData at loc, which must exist.
+ *
+ * If unowned data is returned, it is valid until the next modification of this Record or
+ * the lock on this collection is released.
+ *
+ * In general, prefer findRecord or RecordCursor::seekExact since they can tell you if a
+ * record has been removed.
*/
- struct Record {
- RecordId id;
+ virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const {
RecordData data;
- };
+ invariant(findRecord(txn, loc, &data));
+ return data;
+ }
/**
- * Retrieves Records from a RecordStore.
+ * @param out - If the record exists, the contents of this are set.
+ * @return true iff there is a Record for loc
*
- * A cursor is constructed with a direction flag with the following effects:
- * - The direction that next() moves.
- * - If a restore cannot return to the saved position, cursors will be positioned on the
- * closest position *after* the query in the direction of the scan.
+ * If unowned data is returned, it is valid until the next modification of this Record or
+ * the lock on this collection is released.
*
- * A cursor is tied to a transaction, such as the OperationContext or a WriteUnitOfWork
- * inside that context. Any cursor acquired inside a transaction is invalid outside
- * of that transaction, instead use the save and restore methods to reestablish the cursor.
+ * In general prefer RecordCursor::seekExact since it can avoid copying data in more
+ * storageEngines.
*
- * Any method other than invalidate and the save methods may throw WriteConflict exception. If
- * that happens, the cursor may not be used again until it has been saved and successfully
- * restored. If next() or restore() throw a WCE the cursor's position will be the same as before
- * the call (strong exception guarantee). All other methods leave the cursor in a valid state
- * but with an unspecified position (basic exception guarantee). If any exception other than
- * WCE is thrown, the cursor must be destroyed, which is guaranteed not to leak any resources.
+ * Warning: MMAPv1 cannot detect if RecordIds are valid. Therefore callers should only pass
+ * potentially deleted RecordIds to seek methods if they know that MMAPv1 is not the current
+ * storage engine. All new storage engines must support detecting the existence of Records.
+ */
+ virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* out) const {
+ auto cursor = getCursor(txn);
+ auto record = cursor->seekExact(loc);
+ if (!record)
+ return false;
+
+ record->data.makeOwned(); // Unowned data expires when cursor goes out of scope.
+ *out = std::move(record->data);
+ return true;
+ }
+
+ virtual void deleteRecord(OperationContext* txn, const RecordId& dl) = 0;
+
+ virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ const char* data,
+ int len,
+ bool enforceQuota) = 0;
+
+ virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota) = 0;
+
+ /**
+ * @param notifier - Only used by record stores which do not support doc-locking.
+ * In the case of a document move, this is called after the document
+ * has been written to the new location, but before it is deleted from
+ * the old location.
+ * In the case of an in-place update, this is called just before the
+ * in-place write occurs.
+ * @return Status or RecordId, RecordId might be different
+ */
+ virtual StatusWith<RecordId> updateRecord(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* data,
+ int len,
+ bool enforceQuota,
+ UpdateNotifier* notifier) = 0;
+
+ /**
+ * @return Returns 'false' if this record store does not implement
+ * 'updatewithDamages'. If this method returns false, 'updateWithDamages' must not be
+ * called, and all updates must be routed through 'updateRecord' above. This allows the
+ * update framework to avoid doing the work of damage tracking if the underlying record
+ * store cannot utilize that information.
+ */
+ virtual bool updateWithDamagesSupported() const = 0;
+
+ virtual Status updateWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const RecordData& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages) = 0;
+
+ /**
+ * Returns a new cursor over this record store.
*
- * Any returned unowned BSON is only valid until the next call to any method on this
- * interface.
+ * The cursor is logically positioned before the first (or last if !forward) Record in the
+ * collection so that Record will be returned on the first call to next(). Implementations
+ * are allowed to lazily seek to the first Record when next() is called rather than doing
+ * it on construction.
+ */
+ virtual std::unique_ptr<RecordCursor> getCursor(OperationContext* txn,
+ bool forward = true) const = 0;
+
+ /**
+ * Constructs a cursor over a potentially corrupted store, which can be used to salvage
+ * damaged records. The iterator might return every record in the store if all of them
+ * are reachable and not corrupted. Returns NULL if not supported.
*
- * Implementations may override any default implementation if they can provide a more
- * efficient implementation.
+ * Repair cursors are only required to support forward scanning, so it is illegal to call
+ * seekExact() on the returned cursor.
*/
- class RecordCursor {
- public:
- virtual ~RecordCursor() = default;
-
- /**
- * Moves forward and returns the new data or boost::none if there is no more data.
- * Continues returning boost::none once it reaches EOF.
- */
- virtual boost::optional<Record> next() = 0;
-
- //
- // Seeking
- //
- // Warning: MMAPv1 cannot detect if RecordIds are valid. Therefore callers should only pass
- // potentially deleted RecordIds to seek methods if they know that MMAPv1 is not the current
- // storage engine. All new storage engines must support detecting the existence of Records.
- //
-
- /**
- * Seeks to a Record with the provided id.
- *
- * If an exact match can't be found, boost::none will be returned and the resulting position
- * of the cursor is unspecified.
- */
- virtual boost::optional<Record> seekExact(const RecordId& id) = 0;
-
- //
- // Saving and restoring state
- //
-
- /**
- * Prepares for state changes in underlying data in a way that allows the cursor's
- * current position to be restored.
- *
- * It is safe to call savePositioned multiple times in a row.
- * No other method (excluding destructor) may be called until successfully restored.
- */
- virtual void savePositioned() = 0;
-
- /**
- * Prepares for state changes in underlying data without necessarily saving the current
- * state.
- *
- * The cursor's position when restored is unspecified. Caller is expected to seek rather
- * than call next() following the restore.
- *
- * It is safe to call saveUnpositioned multiple times in a row.
- * No other method (excluding destructor) may be called until successfully restored.
- */
- virtual void saveUnpositioned() { savePositioned(); }
-
- /**
- * Recovers from potential state changes in underlying data.
- *
- * Returns false if it is invalid to continue using this iterator. This usually means that
- * capped deletes have caught up to the position of this iterator and continuing could
- * result in missed data.
- *
- * If the former position no longer exists, but it is safe to continue iterating, the
- * following call to next() will return the next closest position in the direction of the
- * scan, if any.
- *
- * This handles restoring after either savePositioned() or saveUnpositioned().
- */
- virtual bool restore(OperationContext* txn) = 0;
-
- /**
- * Inform the cursor that this id is being invalidated.
- * Must be called between save and restore.
- *
- * WARNING: Storage engines other than MMAPv1 should not depend on this being called.
- */
- virtual void invalidate(const RecordId& id) {};
-
- //
- // RecordFetchers
- //
- // Storage engines which do not support document-level locking hold locks at collection or
- // database granularity. As an optimization, these locks can be yielded when a record needs
- // to be fetched from secondary storage. If this method returns non-NULL, then it indicates
- // that the query system layer should yield its locks, following the protocol defined by the
- // RecordFetcher class, so that a potential page fault is triggered out of the lock.
- //
- // Storage engines which support document-level locking need not implement this.
- //
- // TODO see if these can be replaced by WriteConflictException.
- //
-
- /**
- * Returns a RecordFetcher if needed for a call to next() or none if unneeded.
- */
- virtual std::unique_ptr<RecordFetcher> fetcherForNext() const { return {}; }
-
- /**
- * Returns a RecordFetcher if needed to fetch the provided Record or none if unneeded.
- */
- virtual std::unique_ptr<RecordFetcher> fetcherForId(const RecordId& id) const { return {}; }
- };
+ virtual std::unique_ptr<RecordCursor> getCursorForRepair(OperationContext* txn) const {
+ return {};
+ }
/**
- * A RecordStore provides an abstraction used for storing documents in a collection,
- * or entries in an index. In storage engines implementing the KVEngine, record stores
- * are also used for implementing catalogs.
+ * Returns many RecordCursors that partition the RecordStore into many disjoint sets.
+ * Iterating all returned RecordCursors is equivalent to iterating the full store.
*
- * Many methods take an OperationContext parameter. This contains the RecoveryUnit, with
- * all RecordStore specific transaction information, as well as the LockState. Methods that take
- * an OperationContext may throw a WriteConflictException.
+ * Partition cursors are only required to support forward scanning, so it is illegal to call
+ * seekExact() on any of the returned cursors.
+ *
+ * WARNING: the first call to restore() on each cursor may (but is not guaranteed to) be on
+ * a different RecoveryUnit than the initial save. This will be made more sane as part of
+ * SERVER-17364.
+ */
+ virtual std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const {
+ std::vector<std::unique_ptr<RecordCursor>> out(1);
+ out[0] = getCursor(txn);
+ return out;
+ }
+
+ // higher level
+
+
+ /**
+ * removes all Records
*/
- class RecordStore {
- MONGO_DISALLOW_COPYING(RecordStore);
- public:
- RecordStore( StringData ns ) : _ns(ns.toString()) { }
-
- virtual ~RecordStore() { }
-
- // META
-
- // name of the RecordStore implementation
- virtual const char* name() const = 0;
-
- virtual const std::string& ns() const { return _ns; }
-
- /**
- * The dataSize is an approximation of the sum of the sizes (in bytes) of the
- * documents or entries in the recordStore.
- */
- virtual long long dataSize(OperationContext* txn) const = 0;
-
- /**
- * Total number of record in the RecordStore. You may need to cache it, so this call
- * takes constant time, as it is called often.
- */
- virtual long long numRecords(OperationContext* txn) const = 0;
-
- virtual bool isCapped() const = 0;
-
- virtual void setCappedDeleteCallback(CappedDocumentDeleteCallback*) {invariant( false );}
-
- /**
- * @param extraInfo - optional more debug info
- * @param level - optional, level of debug info to put in (higher is more)
- * @return total estimate size (in bytes) on stable storage
- */
- virtual int64_t storageSize( OperationContext* txn,
- BSONObjBuilder* extraInfo = NULL,
- int infoLevel = 0 ) const = 0;
-
- // CRUD related
-
- /**
- * Get the RecordData at loc, which must exist.
- *
- * If unowned data is returned, it is valid until the next modification of this Record or
- * the lock on this collection is released.
- *
- * In general, prefer findRecord or RecordCursor::seekExact since they can tell you if a
- * record has been removed.
- */
- virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const {
- RecordData data;
- invariant(findRecord(txn, loc, &data));
- return data;
- }
-
- /**
- * @param out - If the record exists, the contents of this are set.
- * @return true iff there is a Record for loc
- *
- * If unowned data is returned, it is valid until the next modification of this Record or
- * the lock on this collection is released.
- *
- * In general prefer RecordCursor::seekExact since it can avoid copying data in more
- * storageEngines.
- *
- * Warning: MMAPv1 cannot detect if RecordIds are valid. Therefore callers should only pass
- * potentially deleted RecordIds to seek methods if they know that MMAPv1 is not the current
- * storage engine. All new storage engines must support detecting the existence of Records.
- */
- virtual bool findRecord(OperationContext* txn,
- const RecordId& loc,
- RecordData* out) const {
- auto cursor = getCursor(txn);
- auto record = cursor->seekExact(loc);
- if (!record) return false;
-
- record->data.makeOwned(); // Unowned data expires when cursor goes out of scope.
- *out = std::move(record->data);
- return true;
- }
-
- virtual void deleteRecord( OperationContext* txn, const RecordId& dl ) = 0;
-
- virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota ) = 0;
-
- virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota ) = 0;
-
- /**
- * @param notifier - Only used by record stores which do not support doc-locking.
- * In the case of a document move, this is called after the document
- * has been written to the new location, but before it is deleted from
- * the old location.
- * In the case of an in-place update, this is called just before the
- * in-place write occurs.
- * @return Status or RecordId, RecordId might be different
- */
- virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
- const RecordId& oldLocation,
- const char* data,
- int len,
- bool enforceQuota,
- UpdateNotifier* notifier ) = 0;
-
- /**
- * @return Returns 'false' if this record store does not implement
- * 'updatewithDamages'. If this method returns false, 'updateWithDamages' must not be
- * called, and all updates must be routed through 'updateRecord' above. This allows the
- * update framework to avoid doing the work of damage tracking if the underlying record
- * store cannot utilize that information.
- */
- virtual bool updateWithDamagesSupported() const = 0;
-
- virtual Status updateWithDamages( OperationContext* txn,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages ) = 0;
-
- /**
- * Returns a new cursor over this record store.
- *
- * The cursor is logically positioned before the first (or last if !forward) Record in the
- * collection so that Record will be returned on the first call to next(). Implementations
- * are allowed to lazily seek to the first Record when next() is called rather than doing
- * it on construction.
- */
- virtual std::unique_ptr<RecordCursor> getCursor(OperationContext* txn,
- bool forward = true) const = 0;
-
- /**
- * Constructs a cursor over a potentially corrupted store, which can be used to salvage
- * damaged records. The iterator might return every record in the store if all of them
- * are reachable and not corrupted. Returns NULL if not supported.
- *
- * Repair cursors are only required to support forward scanning, so it is illegal to call
- * seekExact() on the returned cursor.
- */
- virtual std::unique_ptr<RecordCursor> getCursorForRepair( OperationContext* txn ) const {
- return {};
- }
-
- /**
- * Returns many RecordCursors that partition the RecordStore into many disjoint sets.
- * Iterating all returned RecordCursors is equivalent to iterating the full store.
- *
- * Partition cursors are only required to support forward scanning, so it is illegal to call
- * seekExact() on any of the returned cursors.
- *
- * WARNING: the first call to restore() on each cursor may (but is not guaranteed to) be on
- * a different RecoveryUnit than the initial save. This will be made more sane as part of
- * SERVER-17364.
- */
- virtual std::vector<std::unique_ptr<RecordCursor>> getManyCursors(
- OperationContext* txn) const {
- std::vector<std::unique_ptr<RecordCursor>> out(1);
- out[0] = getCursor(txn);
- return out;
- }
-
- // higher level
-
-
- /**
- * removes all Records
- */
- virtual Status truncate( OperationContext* txn ) = 0;
-
- /**
- * Truncate documents newer than the document at 'end' from the capped
- * collection. The collection cannot be completely emptied using this
- * function. An assertion will be thrown if that is attempted.
- * @param inclusive - Truncate 'end' as well iff true
- * XXX: this will go away soon, just needed to move for now
- */
- virtual void temp_cappedTruncateAfter(OperationContext* txn,
- RecordId end,
- bool inclusive) = 0;
-
- /**
- * does this RecordStore support the compact operation?
- *
- * If you return true, you must provide implementations of all compact methods.
- */
- virtual bool compactSupported() const { return false; }
-
- /**
- * Does compact() leave RecordIds alone or can they change.
- *
- * Only called if compactSupported() returns true.
- */
- virtual bool compactsInPlace() const { invariant(false); }
-
- /**
- * Attempt to reduce the storage space used by this RecordStore.
- *
- * Only called if compactSupported() returns true.
- * No RecordStoreCompactAdaptor will be passed if compactsInPlace() returns true.
- */
- virtual Status compact( OperationContext* txn,
- RecordStoreCompactAdaptor* adaptor,
- const CompactOptions* options,
- CompactStats* stats ) {
- invariant(false);
- }
-
- /**
- * @param full - does more checks
- * @param scanData - scans each document
- * @return OK if the validate run successfully
- * OK will be returned even if corruption is found
- * deatils will be in result
- */
- virtual Status validate( OperationContext* txn,
- bool full, bool scanData,
- ValidateAdaptor* adaptor,
- ValidateResults* results, BSONObjBuilder* output ) = 0;
-
- /**
- * @param scaleSize - amount by which to scale size metrics
- * appends any custom stats from the RecordStore or other unique stats
- */
- virtual void appendCustomStats( OperationContext* txn,
- BSONObjBuilder* result,
- double scale ) const = 0;
-
- /**
- * Load all data into cache.
- * What cache depends on implementation.
- *
- * If the underlying storage engine does not support the operation,
- * returns ErrorCodes::CommandNotSupported
- *
- * @param output (optional) - where to put detailed stats
- */
- virtual Status touch( OperationContext* txn, BSONObjBuilder* output ) const {
- return Status(ErrorCodes::CommandNotSupported,
- "this storage engine does not support touch");
- }
-
- /**
- * Return the RecordId of an oplog entry as close to startingPosition as possible without
- * being higher. If there are no entries <= startingPosition, return RecordId().
- *
- * If you don't implement the oplogStartHack, just use the default implementation which
- * returns boost::none.
- */
- virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
- const RecordId& startingPosition) const {
- return boost::none;
- }
-
- /**
- * When we write to an oplog, we call this so that if the storage engine
- * supports doc locking, it can manage the visibility of oplog entries to ensure
- * they are ordered.
- */
- virtual Status oplogDiskLocRegister( OperationContext* txn,
- const Timestamp& opTime ) {
- return Status::OK();
- }
-
- /**
- * Called after a repair operation is run with the recomputed numRecords and dataSize.
- */
- virtual void updateStatsAfterRepair(OperationContext* txn,
- long long numRecords,
- long long dataSize) = 0;
-
- protected:
- std::string _ns;
- };
-
- class RecordStoreCompactAdaptor {
- public:
- virtual ~RecordStoreCompactAdaptor(){}
- virtual bool isDataValid( const RecordData& recData ) = 0;
- virtual size_t dataSize( const RecordData& recData ) = 0;
- virtual void inserted( const RecordData& recData, const RecordId& newLocation ) = 0;
- };
-
- struct ValidateResults {
- ValidateResults() {
- valid = true;
- }
- bool valid;
- std::vector<std::string> errors;
- };
+ virtual Status truncate(OperationContext* txn) = 0;
/**
- * This is so when a RecordStore is validating all records
- * it can call back to someone to check if a record is valid.
- * The actual data contained in a Record is totally opaque to the implementation.
+ * Truncate documents newer than the document at 'end' from the capped
+ * collection. The collection cannot be completely emptied using this
+ * function. An assertion will be thrown if that is attempted.
+ * @param inclusive - Truncate 'end' as well iff true
+ * XXX: this will go away soon, just needed to move for now
*/
- class ValidateAdaptor {
- public:
- virtual ~ValidateAdaptor(){}
+ virtual void temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) = 0;
- virtual Status validate( const RecordData& recordData, size_t* dataSize ) = 0;
- };
+ /**
+ * does this RecordStore support the compact operation?
+ *
+ * If you return true, you must provide implementations of all compact methods.
+ */
+ virtual bool compactSupported() const {
+ return false;
+ }
+
+ /**
+ * Does compact() leave RecordIds alone or can they change.
+ *
+ * Only called if compactSupported() returns true.
+ */
+ virtual bool compactsInPlace() const {
+ invariant(false);
+ }
+
+ /**
+ * Attempt to reduce the storage space used by this RecordStore.
+ *
+ * Only called if compactSupported() returns true.
+ * No RecordStoreCompactAdaptor will be passed if compactsInPlace() returns true.
+ */
+ virtual Status compact(OperationContext* txn,
+ RecordStoreCompactAdaptor* adaptor,
+ const CompactOptions* options,
+ CompactStats* stats) {
+ invariant(false);
+ }
+
+ /**
+ * @param full - does more checks
+ * @param scanData - scans each document
+ * @return OK if the validate run successfully
+ * OK will be returned even if corruption is found
+ * deatils will be in result
+ */
+ virtual Status validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateAdaptor* adaptor,
+ ValidateResults* results,
+ BSONObjBuilder* output) = 0;
+
+ /**
+ * @param scaleSize - amount by which to scale size metrics
+ * appends any custom stats from the RecordStore or other unique stats
+ */
+ virtual void appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale) const = 0;
+
+ /**
+ * Load all data into cache.
+ * What cache depends on implementation.
+ *
+ * If the underlying storage engine does not support the operation,
+ * returns ErrorCodes::CommandNotSupported
+ *
+ * @param output (optional) - where to put detailed stats
+ */
+ virtual Status touch(OperationContext* txn, BSONObjBuilder* output) const {
+ return Status(ErrorCodes::CommandNotSupported,
+ "this storage engine does not support touch");
+ }
+
+ /**
+ * Return the RecordId of an oplog entry as close to startingPosition as possible without
+ * being higher. If there are no entries <= startingPosition, return RecordId().
+ *
+ * If you don't implement the oplogStartHack, just use the default implementation which
+ * returns boost::none.
+ */
+ virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const {
+ return boost::none;
+ }
+
+ /**
+ * When we write to an oplog, we call this so that if the storage engine
+ * supports doc locking, it can manage the visibility of oplog entries to ensure
+ * they are ordered.
+ */
+ virtual Status oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime) {
+ return Status::OK();
+ }
+
+ /**
+ * Called after a repair operation is run with the recomputed numRecords and dataSize.
+ */
+ virtual void updateStatsAfterRepair(OperationContext* txn,
+ long long numRecords,
+ long long dataSize) = 0;
+
+protected:
+ std::string _ns;
+};
+
+class RecordStoreCompactAdaptor {
+public:
+ virtual ~RecordStoreCompactAdaptor() {}
+ virtual bool isDataValid(const RecordData& recData) = 0;
+ virtual size_t dataSize(const RecordData& recData) = 0;
+ virtual void inserted(const RecordData& recData, const RecordId& newLocation) = 0;
+};
+
+struct ValidateResults {
+ ValidateResults() {
+ valid = true;
+ }
+ bool valid;
+ std::vector<std::string> errors;
+};
+
+/**
+ * This is so when a RecordStore is validating all records
+ * it can call back to someone to check if a record is valid.
+ * The actual data contained in a Record is totally opaque to the implementation.
+ */
+class ValidateAdaptor {
+public:
+ virtual ~ValidateAdaptor() {}
+
+ virtual Status validate(const RecordData& recordData, size_t* dataSize) = 0;
+};
}
diff --git a/src/mongo/db/storage/record_store_test_datafor.cpp b/src/mongo/db/storage/record_store_test_datafor.cpp
index 7af9534acb5..4bb9a0fbca1 100644
--- a/src/mongo/db/storage/record_store_test_datafor.cpp
+++ b/src/mongo/db/storage/record_store_test_datafor.cpp
@@ -41,98 +41,94 @@ using std::stringstream;
namespace mongo {
- using std::unique_ptr;
+using std::unique_ptr;
- // Insert a record and verify its contents by calling dataFor()
- // on the returned RecordId.
- TEST( RecordStoreTestHarness, DataFor ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Insert a record and verify its contents by calling dataFor()
+// on the returned RecordId.
+TEST(RecordStoreTestHarness, DataFor) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- string data = "record-";
- RecordId loc;
+ string data = "record-";
+ RecordId loc;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- RecordData record = rs->dataFor( opCtx.get(), loc );
- ASSERT_EQUALS( data.size() + 1, static_cast<size_t>( record.size() ) );
- ASSERT_EQUALS( data, record.data() );
- }
+ RecordData record = rs->dataFor(opCtx.get(), loc);
+ ASSERT_EQUALS(data.size() + 1, static_cast<size_t>(record.size()));
+ ASSERT_EQUALS(data, record.data());
}
}
+}
- // Insert multiple records and verify their contents by calling dataFor()
- // on each of the returned RecordIds.
- TEST( RecordStoreTestHarness, DataForMultiple ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Insert multiple records and verify their contents by calling dataFor()
+// on each of the returned RecordIds.
+TEST(RecordStoreTestHarness, DataForMultiple) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ stringstream ss;
+ ss << "record----" << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ uow.commit();
}
+ }
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record----" << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
- }
+ stringstream ss;
+ ss << "record----" << i;
+ string data = ss.str();
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record----" << i;
- string data = ss.str();
-
- RecordData record = rs->dataFor( opCtx.get(), locs[i] );
- ASSERT_EQUALS( data.size() + 1, static_cast<size_t>( record.size() ) );
- ASSERT_EQUALS( data, record.data() );
- }
+ RecordData record = rs->dataFor(opCtx.get(), locs[i]);
+ ASSERT_EQUALS(data.size() + 1, static_cast<size_t>(record.size()));
+ ASSERT_EQUALS(data, record.data());
}
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_datasize.cpp b/src/mongo/db/storage/record_store_test_datasize.cpp
index 247adb92189..b39d62d27b3 100644
--- a/src/mongo/db/storage/record_store_test_datasize.cpp
+++ b/src/mongo/db/storage/record_store_test_datasize.cpp
@@ -39,61 +39,59 @@ using std::stringstream;
namespace mongo {
- using std::unique_ptr;
+using std::unique_ptr;
- // Verify that an empty collection takes up no space.
- TEST( RecordStoreTestHarness, DataSizeEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Verify that an empty collection takes up no space.
+TEST(RecordStoreTestHarness, DataSizeEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
-
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( rs->dataSize( opCtx.get() ) == 0 );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
}
- // Verify that a nonempty collection takes up some space.
- TEST( RecordStoreTestHarness, DataSizeNonEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(rs->dataSize(opCtx.get()) == 0);
+ }
+}
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+// Verify that a nonempty collection takes up some space.
+TEST(RecordStoreTestHarness, DataSizeNonEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( rs->dataSize( opCtx.get() ) > 0 );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(rs->dataSize(opCtx.get()) > 0);
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_deleterecord.cpp b/src/mongo/db/storage/record_store_test_deleterecord.cpp
index d2a978d733f..87249d51a17 100644
--- a/src/mongo/db/storage/record_store_test_deleterecord.cpp
+++ b/src/mongo/db/storage/record_store_test_deleterecord.cpp
@@ -41,102 +41,98 @@ using std::stringstream;
namespace mongo {
- using std::unique_ptr;
+using std::unique_ptr;
- // Insert a record and try to delete it.
- TEST( RecordStoreTestHarness, DeleteRecord ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Insert a record and try to delete it.
+TEST(RecordStoreTestHarness, DeleteRecord) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
-
- string data = "my record";
- RecordId loc;
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ string data = "my record";
+ RecordId loc;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- rs->deleteRecord( opCtx.get(), loc );
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ rs->deleteRecord(opCtx.get(), loc);
+ uow.commit();
}
}
- // Insert multiple records and try to delete them.
- TEST( RecordStoreTestHarness, DeleteMultipleRecords ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+}
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+// Insert multiple records and try to delete them.
+TEST(RecordStoreTestHarness, DeleteMultipleRecords) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ uow.commit();
}
+ }
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- rs->deleteRecord( opCtx.get(), locs[i] );
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ rs->deleteRecord(opCtx.get(), locs[i]);
+ uow.commit();
}
}
-} // namespace mongo
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_docwriter.h b/src/mongo/db/storage/record_store_test_docwriter.h
index b52cfd97335..b6032f2509a 100644
--- a/src/mongo/db/storage/record_store_test_docwriter.h
+++ b/src/mongo/db/storage/record_store_test_docwriter.h
@@ -35,26 +35,28 @@
namespace mongo {
namespace {
- class StringDocWriter : public DocWriter {
- public:
- StringDocWriter( const std::string &data, bool padding )
- : _data( data ), _padding( padding ) {
- }
+class StringDocWriter : public DocWriter {
+public:
+ StringDocWriter(const std::string& data, bool padding) : _data(data), _padding(padding) {}
- ~StringDocWriter() { }
+ ~StringDocWriter() {}
- void writeDocument( char *buf ) const {
- memcpy( buf, _data.c_str(), documentSize() );
- }
+ void writeDocument(char* buf) const {
+ memcpy(buf, _data.c_str(), documentSize());
+ }
- size_t documentSize() const { return _data.size() + 1; }
+ size_t documentSize() const {
+ return _data.size() + 1;
+ }
- bool addPadding() const { return _padding; }
+ bool addPadding() const {
+ return _padding;
+ }
- private:
- std::string _data;
- bool _padding;
- };
+private:
+ std::string _data;
+ bool _padding;
+};
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp
index b3a4495b2e1..b448bc7d1c0 100644
--- a/src/mongo/db/storage/record_store_test_harness.cpp
+++ b/src/mongo/db/storage/record_store_test_harness.cpp
@@ -36,407 +36,395 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
+using std::unique_ptr;
+using std::string;
- TEST( RecordStoreTestHarness, Simple1 ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+TEST(RecordStoreTestHarness, Simple1) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- string s = "eliot was here";
+ string s = "eliot was here";
- RecordId loc1;
+ RecordId loc1;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
- ASSERT_OK( res.getStatus() );
- loc1 = res.getValue();
- uow.commit();
- }
-
- ASSERT_EQUALS( s, rs->dataFor( opCtx.get(), loc1 ).data() );
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ loc1 = res.getValue();
+ uow.commit();
}
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( s, rs->dataFor( opCtx.get(), loc1 ).data() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
-
- RecordData rd;
- ASSERT( !rs->findRecord( opCtx.get(), RecordId(111,17), &rd ) );
- ASSERT( rd.data() == NULL );
+ ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc1).data());
+ }
- ASSERT( rs->findRecord( opCtx.get(), loc1, &rd ) );
- ASSERT_EQUALS( s, rd.data() );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc1).data());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
- ASSERT_OK( res.getStatus() );
- uow.commit();
- }
+ RecordData rd;
+ ASSERT(!rs->findRecord(opCtx.get(), RecordId(111, 17), &rd));
+ ASSERT(rd.data() == NULL);
- }
+ ASSERT(rs->findRecord(opCtx.get(), loc1, &rd));
+ ASSERT_EQUALS(s, rd.data());
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ uow.commit();
}
}
- namespace {
- class DummyDocWriter : public DocWriter {
- public:
- virtual ~DummyDocWriter(){}
- virtual void writeDocument( char* buf ) const {
- memcpy( buf, "eliot", 6 );
- }
- virtual size_t documentSize() const { return 6; }
- virtual bool addPadding() const { return false; }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, rs->numRecords(opCtx.get()));
+ }
+}
- };
+namespace {
+class DummyDocWriter : public DocWriter {
+public:
+ virtual ~DummyDocWriter() {}
+ virtual void writeDocument(char* buf) const {
+ memcpy(buf, "eliot", 6);
+ }
+ virtual size_t documentSize() const {
+ return 6;
+ }
+ virtual bool addPadding() const {
+ return false;
}
+};
+}
- TEST( RecordStoreTestHarness, Simple1InsertDocWroter ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
- RecordId loc1;
+TEST(RecordStoreTestHarness, Simple1InsertDocWroter) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
-
- {
- WriteUnitOfWork uow( opCtx.get() );
- DummyDocWriter dw;
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), &dw, false );
- ASSERT_OK( res.getStatus() );
- loc1 = res.getValue();
- uow.commit();
- }
+ RecordId loc1;
- ASSERT_EQUALS( string("eliot"), rs->dataFor( opCtx.get(), loc1 ).data() );
- }
- }
-
- TEST( RecordStoreTestHarness, Delete1 ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ DummyDocWriter dw;
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), &dw, false);
+ ASSERT_OK(res.getStatus());
+ loc1 = res.getValue();
+ uow.commit();
}
- string s = "eliot was here";
+ ASSERT_EQUALS(string("eliot"), rs->dataFor(opCtx.get(), loc1).data());
+ }
+}
- RecordId loc;
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
-
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
+TEST(RecordStoreTestHarness, Delete1) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- ASSERT_EQUALS( s, rs->dataFor( opCtx.get(), loc ).data() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- }
+ string s = "eliot was here";
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
- }
+ RecordId loc;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
-
- {
- WriteUnitOfWork uow( opCtx.get() );
- rs->deleteRecord( opCtx.get(), loc );
- uow.commit();
- }
-
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc).data());
+ }
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
}
- TEST( RecordStoreTestHarness, Delete2 ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ rs->deleteRecord(opCtx.get(), loc);
+ uow.commit();
}
- string s = "eliot was here";
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+}
- RecordId loc;
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
-
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
- ASSERT_OK( res.getStatus() );
- res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
+TEST(RecordStoreTestHarness, Delete2) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( s, rs->dataFor( opCtx.get(), loc ).data() );
- ASSERT_EQUALS( 2, rs->numRecords( opCtx.get() ) );
- }
+ string s = "eliot was here";
+
+ RecordId loc;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- rs->deleteRecord( opCtx.get(), loc );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ res = rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
}
- TEST( RecordStoreTestHarness, Update1 ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc).data());
+ ASSERT_EQUALS(2, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ rs->deleteRecord(opCtx.get(), loc);
+ uow.commit();
}
+ }
+}
- string s1 = "eliot was here";
- string s2 = "eliot was here again";
+TEST(RecordStoreTestHarness, Update1) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- RecordId loc;
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- s1.c_str(), s1.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- }
+ string s1 = "eliot was here";
+ string s2 = "eliot was here again";
+ RecordId loc;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( s1, rs->dataFor( opCtx.get(), loc ).data() );
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), s1.c_str(), s1.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->updateRecord( opCtx.get(), loc,
- s2.c_str(), s2.size() + 1,
- false, NULL );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
-
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(s1, rs->dataFor(opCtx.get(), loc).data());
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
- ASSERT_EQUALS( s2, rs->dataFor( opCtx.get(), loc ).data() );
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->updateRecord(opCtx.get(), loc, s2.c_str(), s2.size() + 1, false, NULL);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
-
}
- TEST( RecordStoreTestHarness, UpdateInPlace1 ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
+ ASSERT_EQUALS(s2, rs->dataFor(opCtx.get(), loc).data());
+ }
+}
- if (!rs->updateWithDamagesSupported())
- return;
+TEST(RecordStoreTestHarness, UpdateInPlace1) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- string s1 = "aaa111bbb";
- string s2 = "aaa222bbb";
+ if (!rs->updateWithDamagesSupported())
+ return;
- RecordId loc;
- const RecordData s1Rec(s1.c_str(), s1.size() + 1);
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- s1Rec.data(),
- s1Rec.size(),
- -1 );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
-
- }
+ string s1 = "aaa111bbb";
+ string s2 = "aaa222bbb";
+ RecordId loc;
+ const RecordData s1Rec(s1.c_str(), s1.size() + 1);
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( s1, rs->dataFor( opCtx.get(), loc ).data() );
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), s1Rec.data(), s1Rec.size(), -1);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- const char* damageSource = "222";
- mutablebson::DamageVector dv;
- dv.push_back( mutablebson::DamageEvent() );
- dv[0].sourceOffset = 0;
- dv[0].targetOffset = 3;
- dv[0].size = 3;
- Status res = rs->updateWithDamages( opCtx.get(),
- loc,
- s1Rec,
- damageSource,
- dv );
- ASSERT_OK( res );
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(s1, rs->dataFor(opCtx.get(), loc).data());
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( s2, rs->dataFor( opCtx.get(), loc ).data() );
+ WriteUnitOfWork uow(opCtx.get());
+ const char* damageSource = "222";
+ mutablebson::DamageVector dv;
+ dv.push_back(mutablebson::DamageEvent());
+ dv[0].sourceOffset = 0;
+ dv[0].targetOffset = 3;
+ dv[0].size = 3;
+ Status res = rs->updateWithDamages(opCtx.get(), loc, s1Rec, damageSource, dv);
+ ASSERT_OK(res);
+ uow.commit();
}
}
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(s2, rs->dataFor(opCtx.get(), loc).data());
+ }
+}
- TEST( RecordStoreTestHarness, Truncate1 ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
-
- string s = "eliot was here";
- RecordId loc;
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
+TEST(RecordStoreTestHarness, Truncate1) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ string s = "eliot was here";
+ RecordId loc;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( s, rs->dataFor( opCtx.get(), loc ).data() );
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
- }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- rs->truncate( opCtx.get() );
- uow.commit();
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc).data());
+ }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ rs->truncate(opCtx.get());
+ uow.commit();
}
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
}
+}
- TEST( RecordStoreTestHarness, Cursor1 ) {
- const int N = 10;
+TEST(RecordStoreTestHarness, Cursor1) {
+ const int N = 10;
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- for ( int i = 0; i < N; i++ ) {
- string s = str::stream() << "eliot" << i;
- ASSERT_OK( rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false ).getStatus() );
- }
- uow.commit();
+ WriteUnitOfWork uow(opCtx.get());
+ for (int i = 0; i < N; i++) {
+ string s = str::stream() << "eliot" << i;
+ ASSERT_OK(
+ rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false).getStatus());
}
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( N, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(N, rs->numRecords(opCtx.get()));
+ }
- {
- int x = 0;
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- auto cursor = rs->getCursor(opCtx.get());
- while (auto record = cursor->next()) {
- string s = str::stream() << "eliot" << x++;
- ASSERT_EQUALS(s, record->data.data());
- }
- ASSERT_EQUALS( N, x );
- ASSERT(!cursor->next());
+ {
+ int x = 0;
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ auto cursor = rs->getCursor(opCtx.get());
+ while (auto record = cursor->next()) {
+ string s = str::stream() << "eliot" << x++;
+ ASSERT_EQUALS(s, record->data.data());
}
+ ASSERT_EQUALS(N, x);
+ ASSERT(!cursor->next());
+ }
- {
- int x = N;
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- auto cursor = rs->getCursor(opCtx.get(), false);
- while (auto record = cursor->next()) {
- string s = str::stream() << "eliot" << --x;
- ASSERT_EQUALS(s, record->data.data());
- }
- ASSERT_EQUALS( 0, x );
- ASSERT(!cursor->next());
+ {
+ int x = N;
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ auto cursor = rs->getCursor(opCtx.get(), false);
+ while (auto record = cursor->next()) {
+ string s = str::stream() << "eliot" << --x;
+ ASSERT_EQUALS(s, record->data.data());
}
-
+ ASSERT_EQUALS(0, x);
+ ASSERT(!cursor->next());
}
-
+}
}
diff --git a/src/mongo/db/storage/record_store_test_harness.h b/src/mongo/db/storage/record_store_test_harness.h
index 003e7c398a1..e3cd758e545 100644
--- a/src/mongo/db/storage/record_store_test_harness.h
+++ b/src/mongo/db/storage/record_store_test_harness.h
@@ -35,25 +35,25 @@
namespace mongo {
- class RecordStore;
- class RecoveryUnit;
+class RecordStore;
+class RecoveryUnit;
- class HarnessHelper {
- public:
- HarnessHelper() : _serviceContext(), _client(_serviceContext.makeClient("hh")) {}
- virtual ~HarnessHelper(){}
+class HarnessHelper {
+public:
+ HarnessHelper() : _serviceContext(), _client(_serviceContext.makeClient("hh")) {}
+ virtual ~HarnessHelper() {}
- virtual RecordStore* newNonCappedRecordStore() = 0;
- virtual RecoveryUnit* newRecoveryUnit() = 0;
+ virtual RecordStore* newNonCappedRecordStore() = 0;
+ virtual RecoveryUnit* newRecoveryUnit() = 0;
- virtual OperationContext* newOperationContext() {
- return new OperationContextNoop(_client.get(), 1, newRecoveryUnit());
- }
+ virtual OperationContext* newOperationContext() {
+ return new OperationContextNoop(_client.get(), 1, newRecoveryUnit());
+ }
- private:
- ServiceContextNoop _serviceContext;
- ServiceContext::UniqueClient _client;
- };
+private:
+ ServiceContextNoop _serviceContext;
+ ServiceContext::UniqueClient _client;
+};
- HarnessHelper* newHarnessHelper();
+HarnessHelper* newHarnessHelper();
}
diff --git a/src/mongo/db/storage/record_store_test_insertrecord.cpp b/src/mongo/db/storage/record_store_test_insertrecord.cpp
index 18f72f8bab7..4f62256c8e1 100644
--- a/src/mongo/db/storage/record_store_test_insertrecord.cpp
+++ b/src/mongo/db/storage/record_store_test_insertrecord.cpp
@@ -42,144 +42,136 @@ using std::stringstream;
namespace mongo {
- using std::unique_ptr;
+using std::unique_ptr;
- // Insert a record and verify the number of entries in the collection is 1.
- TEST( RecordStoreTestHarness, InsertRecord ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Insert a record and verify the number of entries in the collection is 1.
+TEST(RecordStoreTestHarness, InsertRecord) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- string data = "my record";
- RecordId loc;
+ string data = "my record";
+ RecordId loc;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
}
+}
- // Insert multiple records and verify the number of entries in the collection
- // equals the number that were inserted.
- TEST( RecordStoreTestHarness, InsertMultipleRecords ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Insert multiple records and verify the number of entries in the collection
+// equals the number that were inserted.
+TEST(RecordStoreTestHarness, InsertMultipleRecords) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
-
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ uow.commit();
}
}
- // Insert a record using a DocWriter and verify the number of entries
- // in the collection is 1.
- TEST( RecordStoreTestHarness, InsertRecordUsingDocWriter ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
+}
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+// Insert a record using a DocWriter and verify the number of entries
+// in the collection is 1.
+TEST(RecordStoreTestHarness, InsertRecordUsingDocWriter) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- RecordId loc;
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- StringDocWriter docWriter( "my record", false );
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- &docWriter,
- false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ RecordId loc;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
+ StringDocWriter docWriter("my record", false);
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), &docWriter, false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
}
- // Insert multiple records using a DocWriter and verify the number of entries
- // in the collection equals the number that were inserted.
- TEST( RecordStoreTestHarness, InsertMultipleRecordsUsingDocWriter ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
+ }
+}
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+// Insert multiple records using a DocWriter and verify the number of entries
+// in the collection equals the number that were inserted.
+TEST(RecordStoreTestHarness, InsertMultipleRecordsUsingDocWriter) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- StringDocWriter docWriter( ss.str(), false );
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- &docWriter,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
+ stringstream ss;
+ ss << "record " << i;
+ StringDocWriter docWriter(ss.str(), false);
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), &docWriter, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ uow.commit();
}
}
-} // namespace mongo
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_manyiter.cpp b/src/mongo/db/storage/record_store_test_manyiter.cpp
index ec896be50a0..842e2681abc 100644
--- a/src/mongo/db/storage/record_store_test_manyiter.cpp
+++ b/src/mongo/db/storage/record_store_test_manyiter.cpp
@@ -43,74 +43,72 @@ using std::vector;
namespace mongo {
- using std::unique_ptr;
+using std::unique_ptr;
- // Create multiple iterators over an empty record store.
- TEST( RecordStoreTestHarness, GetManyIteratorsEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Create multiple iterators over an empty record store.
+TEST(RecordStoreTestHarness, GetManyIteratorsEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- for (auto&& cursor : rs->getManyCursors(opCtx.get())) {
- ASSERT(!cursor->next());
- ASSERT(!cursor->next());
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ for (auto&& cursor : rs->getManyCursors(opCtx.get())) {
+ ASSERT(!cursor->next());
+ ASSERT(!cursor->next());
}
}
+}
- // Create multiple iterators over a nonempty record store.
- TEST( RecordStoreTestHarness, GetManyIteratorsNonEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+// Create multiple iterators over a nonempty record store.
+TEST(RecordStoreTestHarness, GetManyIteratorsNonEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ uow.commit();
}
+ }
- set<RecordId> remain( locs, locs + nToInsert );
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- for (auto&& cursor : rs->getManyCursors(opCtx.get())) {
- while (auto record = cursor->next()) {
- ASSERT_EQ(remain.erase(record->id), size_t(1));
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
- ASSERT(!cursor->next());
+ set<RecordId> remain(locs, locs + nToInsert);
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ for (auto&& cursor : rs->getManyCursors(opCtx.get())) {
+ while (auto record = cursor->next()) {
+ ASSERT_EQ(remain.erase(record->id), size_t(1));
}
- ASSERT( remain.empty() );
+
+ ASSERT(!cursor->next());
}
+ ASSERT(remain.empty());
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_recorditer.cpp b/src/mongo/db/storage/record_store_test_recorditer.cpp
index a4f33287ee3..cfd1a9b9c24 100644
--- a/src/mongo/db/storage/record_store_test_recorditer.cpp
+++ b/src/mongo/db/storage/record_store_test_recorditer.cpp
@@ -44,359 +44,345 @@ using std::stringstream;
namespace mongo {
- // Insert multiple records and iterate through them in the forward direction.
- // When curr() or getNext() is called on an iterator positioned at EOF,
- // the iterator returns RecordId() and stays at EOF.
- TEST( RecordStoreTestHarness, IterateOverMultipleRecords ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Insert multiple records and iterate through them in the forward direction.
+// When curr() or getNext() is called on an iterator positioned at EOF,
+// the iterator returns RecordId() and stays at EOF.
+TEST(RecordStoreTestHarness, IterateOverMultipleRecords) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ std::string datas[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ datas[i] = data;
+ uow.commit();
}
+ }
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- std::string datas[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- datas[i] = data;
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
+ std::sort(locs, locs + nToInsert); // inserted records may not be in RecordId order
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ auto cursor = rs->getCursor(opCtx.get());
+ for (int i = 0; i < nToInsert; i++) {
+ const auto record = cursor->next();
+ ASSERT(record);
+ ASSERT_EQUALS(locs[i], record->id);
+ ASSERT_EQUALS(datas[i], record->data.data());
}
+ ASSERT(!cursor->next());
+ }
+}
+
+// Insert multiple records and iterate through them in the reverse direction.
+// When curr() or getNext() is called on an iterator positioned at EOF,
+// the iterator returns RecordId() and stays at EOF.
+TEST(RecordStoreTestHarness, IterateOverMultipleRecordsReversed) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- std::sort( locs, locs + nToInsert ); // inserted records may not be in RecordId order
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ std::string datas[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- auto cursor = rs->getCursor(opCtx.get());
- for ( int i = 0; i < nToInsert; i++ ) {
- const auto record = cursor->next();
- ASSERT(record);
- ASSERT_EQUALS( locs[i], record->id );
- ASSERT_EQUALS( datas[i], record->data.data() );
- }
- ASSERT(!cursor->next());
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ datas[i] = data;
+ uow.commit();
}
}
- // Insert multiple records and iterate through them in the reverse direction.
- // When curr() or getNext() is called on an iterator positioned at EOF,
- // the iterator returns RecordId() and stays at EOF.
- TEST( RecordStoreTestHarness, IterateOverMultipleRecordsReversed ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ std::sort(locs, locs + nToInsert); // inserted records may not be in RecordId order
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- std::string datas[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- datas[i] = data;
- uow.commit();
- }
+ auto cursor = rs->getCursor(opCtx.get(), false);
+ for (int i = nToInsert - 1; i >= 0; i--) {
+ const auto record = cursor->next();
+ ASSERT(record);
+ ASSERT_EQUALS(locs[i], record->id);
+ ASSERT_EQUALS(datas[i], record->data.data());
}
+ ASSERT(!cursor->next());
+ }
+}
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
- }
+// Insert multiple records and try to create a forward iterator
+// starting at an interior position.
+TEST(RecordStoreTestHarness, IterateStartFromMiddle) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- std::sort( locs, locs + nToInsert ); // inserted records may not be in RecordId order
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ std::string datas[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
-
- auto cursor = rs->getCursor(opCtx.get(), false);
- for ( int i = nToInsert - 1; i >= 0; i-- ) {
- const auto record = cursor->next();
- ASSERT(record);
- ASSERT_EQUALS( locs[i], record->id );
- ASSERT_EQUALS( datas[i], record->data.data() );
- }
- ASSERT(!cursor->next());
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ datas[i] = data;
+ uow.commit();
}
}
- // Insert multiple records and try to create a forward iterator
- // starting at an interior position.
- TEST( RecordStoreTestHarness, IterateStartFromMiddle ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ std::sort(locs, locs + nToInsert); // inserted records may not be in RecordId order
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+
+ int start = nToInsert / 2;
+ auto cursor = rs->getCursor(opCtx.get());
+ for (int i = start; i < nToInsert; i++) {
+ const auto record = (i == start) ? cursor->seekExact(locs[i]) : cursor->next();
+ ASSERT(record);
+ ASSERT_EQUALS(locs[i], record->id);
+ ASSERT_EQUALS(datas[i], record->data.data());
}
+ ASSERT(!cursor->next());
+ }
+}
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- std::string datas[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- datas[i] = data;
- uow.commit();
- }
- }
+// Insert multiple records and try to create a reverse iterator
+// starting at an interior position.
+TEST(RecordStoreTestHarness, IterateStartFromMiddleReversed) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- std::sort( locs, locs + nToInsert ); // inserted records may not be in RecordId order
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ std::string datas[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
-
- int start = nToInsert / 2;
- auto cursor = rs->getCursor(opCtx.get());
- for ( int i = start; i < nToInsert; i++ ) {
- const auto record = (i == start) ? cursor->seekExact(locs[i]) : cursor->next();
- ASSERT(record);
- ASSERT_EQUALS( locs[i], record->id );
- ASSERT_EQUALS( datas[i], record->data.data() );
- }
- ASSERT(!cursor->next());
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ datas[i] = data;
+ uow.commit();
}
}
- // Insert multiple records and try to create a reverse iterator
- // starting at an interior position.
- TEST( RecordStoreTestHarness, IterateStartFromMiddleReversed ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- std::string datas[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- datas[i] = data;
- uow.commit();
- }
+ std::sort(locs, locs + nToInsert); // inserted records may not be in RecordId order
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+
+ int start = nToInsert / 2;
+ auto cursor = rs->getCursor(opCtx.get(), false);
+ for (int i = start; i >= 0; i--) {
+ const auto record = (i == start) ? cursor->seekExact(locs[i]) : cursor->next();
+ ASSERT(record);
+ ASSERT_EQUALS(locs[i], record->id);
+ ASSERT_EQUALS(datas[i], record->data.data());
}
+ ASSERT(!cursor->next());
+ }
+}
+
+// Insert several records, and iterate to the end. Ensure that the record iterator
+// is EOF. Add an additional record, saving and restoring the iterator state, and check
+// that the iterator remains EOF.
+TEST(RecordStoreTestHarness, RecordIteratorEOF) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ std::string datas[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
- }
+ StringBuilder sb;
+ sb << "record " << i;
+ string data = sb.str();
- std::sort( locs, locs + nToInsert ); // inserted records may not be in RecordId order
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
-
- int start = nToInsert / 2;
- auto cursor = rs->getCursor(opCtx.get(), false);
- for ( int i = start; i >= 0; i-- ) {
- const auto record = (i == start) ? cursor->seekExact(locs[i]) : cursor->next();
- ASSERT(record);
- ASSERT_EQUALS( locs[i], record->id );
- ASSERT_EQUALS( datas[i], record->data.data() );
- }
- ASSERT(!cursor->next());
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ datas[i] = data;
+ uow.commit();
}
}
- // Insert several records, and iterate to the end. Ensure that the record iterator
- // is EOF. Add an additional record, saving and restoring the iterator state, and check
- // that the iterator remains EOF.
- TEST( RecordStoreTestHarness, RecordIteratorEOF ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- std::string datas[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- StringBuilder sb;
- sb << "record " << i;
- string data = sb.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- datas[i] = data;
- uow.commit();
- }
- }
+ // Get a forward iterator starting at the beginning of the record store.
+ auto cursor = rs->getCursor(opCtx.get());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
+ // Iterate, checking EOF along the way.
+ for (int i = 0; i < nToInsert; i++) {
+ const auto record = cursor->next();
+ ASSERT(record);
+ ASSERT_EQUALS(locs[i], record->id);
+ ASSERT_EQUALS(datas[i], record->data.data());
}
+ ASSERT(!cursor->next());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
-
- // Get a forward iterator starting at the beginning of the record store.
- auto cursor = rs->getCursor(opCtx.get());
+ // Add a record and ensure we're still EOF.
+ cursor->savePositioned();
- // Iterate, checking EOF along the way.
- for ( int i = 0; i < nToInsert; i++ ) {
- const auto record = cursor->next();
- ASSERT(record);
- ASSERT_EQUALS( locs[i], record->id );
- ASSERT_EQUALS( datas[i], record->data.data() );
- }
- ASSERT(!cursor->next());
+ StringBuilder sb;
+ sb << "record " << nToInsert + 1;
+ string data = sb.str();
- // Add a record and ensure we're still EOF.
- cursor->savePositioned();
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ uow.commit();
- StringBuilder sb;
- sb << "record " << nToInsert + 1;
- string data = sb.str();
+ ASSERT(cursor->restore(opCtx.get()));
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- uow.commit();
+ // Iterator should still be EOF.
+ ASSERT(!cursor->next());
+ ASSERT(!cursor->next());
+ }
+}
- ASSERT( cursor->restore( opCtx.get() ) );
+// Test calling savePositioned and restore after each call to next
+TEST(RecordStoreTestHarness, RecordIteratorSavePositionedRestore) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- // Iterator should still be EOF.
- ASSERT(!cursor->next());
- ASSERT(!cursor->next());
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
}
- // Test calling savePositioned and restore after each call to next
- TEST( RecordStoreTestHarness, RecordIteratorSavePositionedRestore ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ std::string datas[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
-
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- std::string datas[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- StringBuilder sb;
- sb << "record " << i;
- string data = sb.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- datas[i] = data;
- uow.commit();
- }
- }
+ StringBuilder sb;
+ sb << "record " << i;
+ string data = sb.str();
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ datas[i] = data;
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
-
- // Get a forward iterator starting at the beginning of the record store.
- auto cursor = rs->getCursor(opCtx.get());
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
- // Iterate, checking EOF along the way.
- for ( int i = 0; i < nToInsert; i++ ) {
- cursor->savePositioned();
- cursor->savePositioned(); // It is legal to save twice in a row.
- cursor->restore(opCtx.get());
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- const auto record = cursor->next();
- ASSERT(record);
- ASSERT_EQUALS( locs[i], record->id );
- ASSERT_EQUALS( datas[i], record->data.data() );
- }
+ // Get a forward iterator starting at the beginning of the record store.
+ auto cursor = rs->getCursor(opCtx.get());
+ // Iterate, checking EOF along the way.
+ for (int i = 0; i < nToInsert; i++) {
cursor->savePositioned();
- cursor->savePositioned(); // It is legal to save twice in a row.
+ cursor->savePositioned(); // It is legal to save twice in a row.
cursor->restore(opCtx.get());
- ASSERT(!cursor->next());
+ const auto record = cursor->next();
+ ASSERT(record);
+ ASSERT_EQUALS(locs[i], record->id);
+ ASSERT_EQUALS(datas[i], record->data.data());
}
+
+ cursor->savePositioned();
+ cursor->savePositioned(); // It is legal to save twice in a row.
+ cursor->restore(opCtx.get());
+
+ ASSERT(!cursor->next());
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_recordstore.cpp b/src/mongo/db/storage/record_store_test_recordstore.cpp
index 14b21c15aac..2e2fb9c3bfd 100644
--- a/src/mongo/db/storage/record_store_test_recordstore.cpp
+++ b/src/mongo/db/storage/record_store_test_recordstore.cpp
@@ -39,33 +39,33 @@ using std::string;
namespace mongo {
- // Verify that the name of the record store is not NULL and nonempty.
- TEST ( RecordStoreTestHarness, RecordStoreName ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Verify that the name of the record store is not NULL and nonempty.
+TEST(RecordStoreTestHarness, RecordStoreName) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- const char *name = rs->name();
- ASSERT( name != NULL && name[0] != '\0' );
- }
+ {
+ const char* name = rs->name();
+ ASSERT(name != NULL && name[0] != '\0');
}
+}
- // Verify that the namespace of the record store is nonempty.
- TEST( RecordStoreTestHarness, Namespace ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Verify that the namespace of the record store is nonempty.
+TEST(RecordStoreTestHarness, Namespace) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- string ns = rs->ns();
- ASSERT( ns[0] != '\0' );
- }
+ {
+ string ns = rs->ns();
+ ASSERT(ns[0] != '\0');
}
+}
- // Call isCapped() on a non-capped collection and verify the result is false.
- TEST( RecordStoreTestHarness, IsNotCapped ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
- ASSERT( !rs->isCapped() );
- }
+// Call isCapped() on a non-capped collection and verify the result is false.
+TEST(RecordStoreTestHarness, IsNotCapped) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+ ASSERT(!rs->isCapped());
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_repairiter.cpp b/src/mongo/db/storage/record_store_test_repairiter.cpp
index b3e11e137d4..56abdcb6b14 100644
--- a/src/mongo/db/storage/record_store_test_repairiter.cpp
+++ b/src/mongo/db/storage/record_store_test_repairiter.cpp
@@ -43,128 +43,126 @@ using std::stringstream;
namespace mongo {
- // Create an iterator for repairing an empty record store.
- TEST( RecordStoreTestHarness, GetIteratorForRepairEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Create an iterator for repairing an empty record store.
+TEST(RecordStoreTestHarness, GetIteratorForRepairEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ auto cursor = rs->getCursorForRepair(opCtx.get());
+ // returns NULL if getCursorForRepair is not supported
+ if (!cursor) {
+ return;
}
+ ASSERT(!cursor->next());
+ }
+}
+
+// Insert multiple records and create an iterator for repairing the record store,
+// even though the it has not been corrupted.
+TEST(RecordStoreTestHarness, GetIteratorForRepairNonEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- auto cursor = rs->getCursorForRepair( opCtx.get() );
- // returns NULL if getCursorForRepair is not supported
- if (!cursor) {
- return;
- }
- ASSERT(!cursor->next());
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ uow.commit();
}
}
- // Insert multiple records and create an iterator for repairing the record store,
- // even though the it has not been corrupted.
- TEST( RecordStoreTestHarness, GetIteratorForRepairNonEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ set<RecordId> remain(locs, locs + nToInsert);
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ auto cursor = rs->getCursorForRepair(opCtx.get());
+ // returns NULL if getCursorForRepair is not supported
+ if (!cursor) {
+ return;
}
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- uow.commit();
- }
+ while (auto record = cursor->next()) {
+ remain.erase(record->id); // can happen more than once per doc
}
+ ASSERT(remain.empty());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
- }
+ ASSERT(!cursor->next());
+ }
+}
+
+// Insert a single record. Create a repair iterator pointing to that single record.
+// Then invalidate the record and ensure that the repair iterator responds correctly.
+// See SERVER-16300.
+TEST(RecordStoreTestHarness, GetIteratorForRepairInvalidateSingleton) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQ(0, rs->numRecords(opCtx.get()));
+ }
- set<RecordId> remain( locs, locs + nToInsert );
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- auto cursor = rs->getCursorForRepair( opCtx.get() );
- // returns NULL if getCursorForRepair is not supported
- if (!cursor) {
- return;
- }
-
- while (auto record = cursor->next()) {
- remain.erase(record->id); // can happen more than once per doc
- }
- ASSERT( remain.empty() );
-
- ASSERT(!cursor->next());
- }
+ // Insert one record.
+ RecordId idToInvalidate;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "some data", 10, false);
+ ASSERT_OK(res.getStatus());
+ idToInvalidate = res.getValue();
+ uow.commit();
}
- // Insert a single record. Create a repair iterator pointing to that single record.
- // Then invalidate the record and ensure that the repair iterator responds correctly.
- // See SERVER-16300.
- TEST(RecordStoreTestHarness, GetIteratorForRepairInvalidateSingleton) {
- unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
- unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+ // Double-check that the record store has one record in it now.
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQ(1, rs->numRecords(opCtx.get()));
+ }
- {
- unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- ASSERT_EQ(0, rs->numRecords(opCtx.get()));
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ auto cursor = rs->getCursorForRepair(opCtx.get());
+ // returns NULL if getCursorForRepair is not supported
+ if (!cursor) {
+ return;
}
- // Insert one record.
- RecordId idToInvalidate;
- {
- unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- WriteUnitOfWork uow(opCtx.get());
- StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "some data", 10, false);
- ASSERT_OK(res.getStatus());
- idToInvalidate = res.getValue();
- uow.commit();
- }
+ // We should be pointing at the only record in the store.
- // Double-check that the record store has one record in it now.
- {
- unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- ASSERT_EQ(1, rs->numRecords(opCtx.get()));
- }
+ // Invalidate the record we're pointing at.
+ cursor->savePositioned();
+ cursor->invalidate(idToInvalidate);
+ cursor->restore(opCtx.get());
- {
- unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- auto cursor = rs->getCursorForRepair( opCtx.get() );
- // returns NULL if getCursorForRepair is not supported
- if (!cursor) {
- return;
- }
-
- // We should be pointing at the only record in the store.
-
- // Invalidate the record we're pointing at.
- cursor->savePositioned();
- cursor->invalidate(idToInvalidate);
- cursor->restore(opCtx.get());
-
- // Iterator should be EOF now because the only thing in the collection got deleted.
- ASSERT(!cursor->next());
- }
+ // Iterator should be EOF now because the only thing in the collection got deleted.
+ ASSERT(!cursor->next());
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_storagesize.cpp b/src/mongo/db/storage/record_store_test_storagesize.cpp
index a0d5cfab024..61303b7967d 100644
--- a/src/mongo/db/storage/record_store_test_storagesize.cpp
+++ b/src/mongo/db/storage/record_store_test_storagesize.cpp
@@ -40,43 +40,41 @@ using std::stringstream;
namespace mongo {
- // Verify that a nonempty collection maybe takes up some space on disk.
- TEST( RecordStoreTestHarness, StorageSizeNonEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Verify that a nonempty collection maybe takes up some space on disk.
+TEST(RecordStoreTestHarness, StorageSizeNonEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ {
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( rs->storageSize( opCtx.get(), NULL ) >= 0 );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(rs->storageSize(opCtx.get(), NULL) >= 0);
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_touch.cpp b/src/mongo/db/storage/record_store_test_touch.cpp
index 80b8909611c..c765bc2638b 100644
--- a/src/mongo/db/storage/record_store_test_touch.cpp
+++ b/src/mongo/db/storage/record_store_test_touch.cpp
@@ -40,131 +40,127 @@ using std::stringstream;
namespace mongo {
- // Verify that calling touch() on an empty collection returns an OK status.
- TEST( RecordStoreTestHarness, TouchEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
-
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- BSONObjBuilder stats;
- Status status = rs->touch( opCtx.get(), &stats );
- ASSERT( status.isOK() || status.code() == ErrorCodes::CommandNotSupported );
- }
- }
+// Verify that calling touch() on an empty collection returns an OK status.
+TEST(RecordStoreTestHarness, TouchEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
}
- // Insert multiple records, and verify that calling touch() on a nonempty collection
- // returns an OK status.
- TEST( RecordStoreTestHarness, TouchNonEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ BSONObjBuilder stats;
+ Status status = rs->touch(opCtx.get(), &stats);
+ ASSERT(status.isOK() || status.code() == ErrorCodes::CommandNotSupported);
}
+ }
+}
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- uow.commit();
- }
- }
+// Insert multiple records, and verify that calling touch() on a nonempty collection
+// returns an OK status.
+TEST(RecordStoreTestHarness, TouchNonEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- BSONObjBuilder stats;
- // XXX does not verify the collection was loaded into cache
- // (even if supported by storage engine)
- Status status = rs->touch( opCtx.get(), &stats );
- ASSERT( status.isOK() || status.code() == ErrorCodes::CommandNotSupported );
- }
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ uow.commit();
}
}
- // Verify that calling touch() on an empty collection returns an OK status,
- // even when NULL is passed in for the stats output.
- TEST( RecordStoreTestHarness, TouchEmptyWithNullStats ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ BSONObjBuilder stats;
+ // XXX does not verify the collection was loaded into cache
+ // (even if supported by storage engine)
+ Status status = rs->touch(opCtx.get(), &stats);
+ ASSERT(status.isOK() || status.code() == ErrorCodes::CommandNotSupported);
}
+ }
+}
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- Status status = rs->touch( opCtx.get(), NULL /* stats output */ );
- ASSERT( status.isOK() || status.code() == ErrorCodes::CommandNotSupported );
- }
+// Verify that calling touch() on an empty collection returns an OK status,
+// even when NULL is passed in for the stats output.
+TEST(RecordStoreTestHarness, TouchEmptyWithNullStats) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
}
- // Insert multiple records, and verify that calling touch() on a nonempty collection
- // returns an OK status, even when NULL is passed in for the stats output.
- TEST( RecordStoreTestHarness, TouchNonEmptyWithNullStats ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ Status status = rs->touch(opCtx.get(), NULL /* stats output */);
+ ASSERT(status.isOK() || status.code() == ErrorCodes::CommandNotSupported);
+ }
+}
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+// Insert multiple records, and verify that calling touch() on a nonempty collection
+// returns an OK status, even when NULL is passed in for the stats output.
+TEST(RecordStoreTestHarness, TouchNonEmptyWithNullStats) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- // XXX does not verify the collection was loaded into cache
- // (even if supported by storage engine)
- Status status = rs->touch( opCtx.get(), NULL /* stats output */ );
- ASSERT( status.isOK() || status.code() == ErrorCodes::CommandNotSupported );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ // XXX does not verify the collection was loaded into cache
+ // (even if supported by storage engine)
+ Status status = rs->touch(opCtx.get(), NULL /* stats output */);
+ ASSERT(status.isOK() || status.code() == ErrorCodes::CommandNotSupported);
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_truncate.cpp b/src/mongo/db/storage/record_store_test_truncate.cpp
index 08870cc141f..b38ee3cb806 100644
--- a/src/mongo/db/storage/record_store_test_truncate.cpp
+++ b/src/mongo/db/storage/record_store_test_truncate.cpp
@@ -40,78 +40,76 @@ using std::stringstream;
namespace mongo {
- // Verify that calling truncate() on an already empty collection returns an OK status.
- TEST( RecordStoreTestHarness, TruncateEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
-
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( rs->truncate( opCtx.get() ) );
- uow.commit();
- }
- }
+// Verify that calling truncate() on an already empty collection returns an OK status.
+TEST(RecordStoreTestHarness, TruncateEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(rs->truncate(opCtx.get()));
+ uow.commit();
}
}
- // Insert multiple records, and verify that calling truncate() on a nonempty collection
- // removes all of them and returns an OK status.
- TEST( RecordStoreTestHarness, TruncateNonEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+}
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+// Insert multiple records, and verify that calling truncate() on a nonempty collection
+// removes all of them and returns an OK status.
+TEST(RecordStoreTestHarness, TruncateNonEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( rs->truncate( opCtx.get() ) );
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(rs->truncate(opCtx.get()));
+ uow.commit();
}
}
-} // namespace mongo
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_updaterecord.cpp b/src/mongo/db/storage/record_store_test_updaterecord.cpp
index 260ce0e7e9e..0d7c9433503 100644
--- a/src/mongo/db/storage/record_store_test_updaterecord.cpp
+++ b/src/mongo/db/storage/record_store_test_updaterecord.cpp
@@ -43,202 +43,184 @@ using std::stringstream;
namespace mongo {
- // Insert a record and try to update it.
- TEST( RecordStoreTestHarness, UpdateRecord ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Insert a record and try to update it.
+TEST(RecordStoreTestHarness, UpdateRecord) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ string data = "my record";
+ RecordId loc;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
- string data = "my record";
- RecordId loc;
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
+ }
+ data = "updated record-";
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
- }
-
- data = "updated record-";
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->updateRecord( opCtx.get(),
- loc,
- data.c_str(),
- data.size() + 1,
- false,
- NULL );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->updateRecord(opCtx.get(), loc, data.c_str(), data.size() + 1, false, NULL);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- RecordData record = rs->dataFor( opCtx.get(), loc );
- ASSERT_EQUALS( data.size() + 1, static_cast<size_t>( record.size() ) );
- ASSERT_EQUALS( data, record.data() );
- }
+ RecordData record = rs->dataFor(opCtx.get(), loc);
+ ASSERT_EQUALS(data.size() + 1, static_cast<size_t>(record.size()));
+ ASSERT_EQUALS(data, record.data());
}
}
+}
+
+// Insert multiple records and try to update them.
+TEST(RecordStoreTestHarness, UpdateMultipleRecords) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- // Insert multiple records and try to update them.
- TEST( RecordStoreTestHarness, UpdateMultipleRecords ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ const int nToInsert = 10;
+ RecordId locs[nToInsert];
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ stringstream ss;
+ ss << "record " << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ uow.commit();
}
+ }
- const int nToInsert = 10;
- RecordId locs[nToInsert];
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "record " << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- uow.commit();
- }
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
+ }
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) );
- }
-
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "update record-" << i;
- string data = ss.str();
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->updateRecord( opCtx.get(),
- locs[i],
- data.c_str(),
- data.size() + 1,
- false,
- NULL );
- ASSERT_OK( res.getStatus() );
- locs[i] = res.getValue();
- uow.commit();
- }
+ stringstream ss;
+ ss << "update record-" << i;
+ string data = ss.str();
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->updateRecord(opCtx.get(), locs[i], data.c_str(), data.size() + 1, false, NULL);
+ ASSERT_OK(res.getStatus());
+ locs[i] = res.getValue();
+ uow.commit();
}
+ }
- for ( int i = 0; i < nToInsert; i++ ) {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- stringstream ss;
- ss << "update record-" << i;
- string data = ss.str();
+ for (int i = 0; i < nToInsert; i++) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ {
+ stringstream ss;
+ ss << "update record-" << i;
+ string data = ss.str();
- RecordData record = rs->dataFor( opCtx.get(), locs[i] );
- ASSERT_EQUALS( data.size() + 1, static_cast<size_t>( record.size() ) );
- ASSERT_EQUALS( data, record.data() );
- }
+ RecordData record = rs->dataFor(opCtx.get(), locs[i]);
+ ASSERT_EQUALS(data.size() + 1, static_cast<size_t>(record.size()));
+ ASSERT_EQUALS(data, record.data());
}
}
+}
- // Insert a record, try to update it, and examine how the UpdateNotifier is called.
- TEST( RecordStoreTestHarness, UpdateRecordWithMoveNotifier ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Insert a record, try to update it, and examine how the UpdateNotifier is called.
+TEST(RecordStoreTestHarness, UpdateRecordWithMoveNotifier) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- string oldData = "my record";
- RecordId loc;
+ string oldData = "my record";
+ RecordId loc;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- oldData.c_str(),
- oldData.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ rs->insertRecord(opCtx.get(), oldData.c_str(), oldData.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
+ }
- string newData = "my updated record--";
+ string newData = "my updated record--";
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- UpdateNotifierSpy umn( opCtx.get(), loc, oldData.c_str(), oldData.size() );
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->updateRecord( opCtx.get(),
- loc,
- newData.c_str(),
- newData.size() + 1,
- false,
- &umn );
- ASSERT_OK( res.getStatus() );
- // UpdateNotifier::recordStoreGoingToMove() called only if
- // the RecordId for the record changes
- if ( loc == res.getValue() ) {
- ASSERT_EQUALS( 0, umn.numMoveCallbacks() );
- // Only MMAP v1 is required to use the UpdateNotifier for in-place updates,
- // so the number of callbacks is expected to be 0 for non-MMAP storage engines.
- ASSERT_GTE( 1, umn.numInPlaceCallbacks() );
- } else {
- ASSERT_EQUALS( 1, umn.numMoveCallbacks() );
- ASSERT_EQUALS( 0, umn.numInPlaceCallbacks() );
- }
- loc = res.getValue();
- uow.commit();
+ UpdateNotifierSpy umn(opCtx.get(), loc, oldData.c_str(), oldData.size());
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res = rs->updateRecord(
+ opCtx.get(), loc, newData.c_str(), newData.size() + 1, false, &umn);
+ ASSERT_OK(res.getStatus());
+ // UpdateNotifier::recordStoreGoingToMove() called only if
+ // the RecordId for the record changes
+ if (loc == res.getValue()) {
+ ASSERT_EQUALS(0, umn.numMoveCallbacks());
+ // Only MMAP v1 is required to use the UpdateNotifier for in-place updates,
+ // so the number of callbacks is expected to be 0 for non-MMAP storage engines.
+ ASSERT_GTE(1, umn.numInPlaceCallbacks());
+ } else {
+ ASSERT_EQUALS(1, umn.numMoveCallbacks());
+ ASSERT_EQUALS(0, umn.numInPlaceCallbacks());
}
+ loc = res.getValue();
+ uow.commit();
}
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- RecordData record = rs->dataFor( opCtx.get(), loc );
- ASSERT_EQUALS( newData.size() + 1, static_cast<size_t>( record.size() ) );
- ASSERT_EQUALS( newData, record.data() );
- }
+ RecordData record = rs->dataFor(opCtx.get(), loc);
+ ASSERT_EQUALS(newData.size() + 1, static_cast<size_t>(record.size()));
+ ASSERT_EQUALS(newData, record.data());
}
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_updaterecord.h b/src/mongo/db/storage/record_store_test_updaterecord.h
index 36000c6fc21..f82feb6b592 100644
--- a/src/mongo/db/storage/record_store_test_updaterecord.h
+++ b/src/mongo/db/storage/record_store_test_updaterecord.h
@@ -38,51 +38,48 @@
namespace mongo {
namespace {
- class UpdateNotifierSpy : public UpdateNotifier {
- public:
- UpdateNotifierSpy( OperationContext* txn, const RecordId &loc,
- const char *buf, size_t size )
- : _txn( txn ),
- _loc( loc ),
- _data( buf, size ),
- nMoveCalls( 0 ),
- nInPlaceCalls( 0 ) {
- }
+class UpdateNotifierSpy : public UpdateNotifier {
+public:
+ UpdateNotifierSpy(OperationContext* txn, const RecordId& loc, const char* buf, size_t size)
+ : _txn(txn), _loc(loc), _data(buf, size), nMoveCalls(0), nInPlaceCalls(0) {}
- ~UpdateNotifierSpy() { }
+ ~UpdateNotifierSpy() {}
- Status recordStoreGoingToMove( OperationContext *txn,
- const RecordId &oldLocation,
- const char *oldBuffer,
- size_t oldSize ) {
- nMoveCalls++;
- ASSERT_EQUALS( _txn, txn );
- ASSERT_EQUALS( _loc, oldLocation );
- ASSERT_EQUALS( _data, oldBuffer );
- return Status::OK();
- }
+ Status recordStoreGoingToMove(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* oldBuffer,
+ size_t oldSize) {
+ nMoveCalls++;
+ ASSERT_EQUALS(_txn, txn);
+ ASSERT_EQUALS(_loc, oldLocation);
+ ASSERT_EQUALS(_data, oldBuffer);
+ return Status::OK();
+ }
- Status recordStoreGoingToUpdateInPlace( OperationContext* txn,
- const RecordId& loc ) {
- nInPlaceCalls++;
- ASSERT_EQUALS( _txn, txn );
- ASSERT_EQUALS( _loc, loc );
- return Status::OK();
- }
+ Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) {
+ nInPlaceCalls++;
+ ASSERT_EQUALS(_txn, txn);
+ ASSERT_EQUALS(_loc, loc);
+ return Status::OK();
+ }
- int numMoveCallbacks() const { return nMoveCalls; }
+ int numMoveCallbacks() const {
+ return nMoveCalls;
+ }
- int numInPlaceCallbacks() const { return nInPlaceCalls; }
+ int numInPlaceCallbacks() const {
+ return nInPlaceCalls;
+ }
- private:
- OperationContext *_txn;
- RecordId _loc;
- std::string _data;
+private:
+ OperationContext* _txn;
+ RecordId _loc;
+ std::string _data;
- // To verify the number of callbacks to the notifier.
- int nMoveCalls;
- int nInPlaceCalls;
- };
+ // To verify the number of callbacks to the notifier.
+ int nMoveCalls;
+ int nInPlaceCalls;
+};
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
index f5dc1353f15..701d5fd58f0 100644
--- a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
+++ b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp
@@ -41,251 +41,239 @@ using std::string;
namespace mongo {
- // Insert a record and try to perform an in-place update on it.
- TEST( RecordStoreTestHarness, UpdateWithDamages ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Insert a record and try to perform an in-place update on it.
+TEST(RecordStoreTestHarness, UpdateWithDamages) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- if (!rs->updateWithDamagesSupported())
- return;
+ if (!rs->updateWithDamagesSupported())
+ return;
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- string data = "00010111";
- RecordId loc;
- const RecordData rec(data.c_str(), data.size() + 1);
+ string data = "00010111";
+ RecordId loc;
+ const RecordData rec(data.c_str(), data.size() + 1);
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- rec.data(),
- rec.size(),
- false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), rec.data(), rec.size(), false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- mutablebson::DamageVector dv( 3 );
- dv[0].sourceOffset = 5;
- dv[0].targetOffset = 0;
- dv[0].size = 2;
- dv[1].sourceOffset = 3;
- dv[1].targetOffset = 2;
- dv[1].size = 3;
- dv[2].sourceOffset = 0;
- dv[2].targetOffset = 5;
- dv[2].size = 3;
-
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, data.c_str(), dv ) );
- uow.commit();
- }
+ mutablebson::DamageVector dv(3);
+ dv[0].sourceOffset = 5;
+ dv[0].targetOffset = 0;
+ dv[0].size = 2;
+ dv[1].sourceOffset = 3;
+ dv[1].targetOffset = 2;
+ dv[1].size = 3;
+ dv[2].sourceOffset = 0;
+ dv[2].targetOffset = 5;
+ dv[2].size = 3;
+
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(rs->updateWithDamages(opCtx.get(), loc, rec, data.c_str(), dv));
+ uow.commit();
}
+ }
- data = "11101000";
+ data = "11101000";
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- RecordData record = rs->dataFor( opCtx.get(), loc );
- ASSERT_EQUALS( data, record.data() );
- }
+ RecordData record = rs->dataFor(opCtx.get(), loc);
+ ASSERT_EQUALS(data, record.data());
}
}
+}
- // Insert a record and try to perform an in-place update on it with a DamageVector
- // containing overlapping DamageEvents.
- TEST( RecordStoreTestHarness, UpdateWithOverlappingDamageEvents ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Insert a record and try to perform an in-place update on it with a DamageVector
+// containing overlapping DamageEvents.
+TEST(RecordStoreTestHarness, UpdateWithOverlappingDamageEvents) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- if (!rs->updateWithDamagesSupported())
- return;
+ if (!rs->updateWithDamagesSupported())
+ return;
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- string data = "00010111";
- RecordId loc;
- const RecordData rec(data.c_str(), data.size() + 1);
+ string data = "00010111";
+ RecordId loc;
+ const RecordData rec(data.c_str(), data.size() + 1);
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- rec.data(),
- rec.size(),
- false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), rec.data(), rec.size(), false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- mutablebson::DamageVector dv( 2 );
- dv[0].sourceOffset = 3;
- dv[0].targetOffset = 0;
- dv[0].size = 5;
- dv[1].sourceOffset = 0;
- dv[1].targetOffset = 3;
- dv[1].size = 5;
-
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, data.c_str(), dv ) );
- uow.commit();
- }
+ mutablebson::DamageVector dv(2);
+ dv[0].sourceOffset = 3;
+ dv[0].targetOffset = 0;
+ dv[0].size = 5;
+ dv[1].sourceOffset = 0;
+ dv[1].targetOffset = 3;
+ dv[1].size = 5;
+
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(rs->updateWithDamages(opCtx.get(), loc, rec, data.c_str(), dv));
+ uow.commit();
}
+ }
- data = "10100010";
+ data = "10100010";
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- RecordData record = rs->dataFor( opCtx.get(), loc );
- ASSERT_EQUALS( data, record.data() );
- }
+ RecordData record = rs->dataFor(opCtx.get(), loc);
+ ASSERT_EQUALS(data, record.data());
}
}
+}
- // Insert a record and try to perform an in-place update on it with a DamageVector
- // containing overlapping DamageEvents. The changes should be applied in the order
- // specified by the DamageVector, and not -- for instance -- by the targetOffset.
- TEST( RecordStoreTestHarness, UpdateWithOverlappingDamageEventsReversed ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Insert a record and try to perform an in-place update on it with a DamageVector
+// containing overlapping DamageEvents. The changes should be applied in the order
+// specified by the DamageVector, and not -- for instance -- by the targetOffset.
+TEST(RecordStoreTestHarness, UpdateWithOverlappingDamageEventsReversed) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- if (!rs->updateWithDamagesSupported())
- return;
+ if (!rs->updateWithDamagesSupported())
+ return;
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- string data = "00010111";
- RecordId loc;
- const RecordData rec(data.c_str(), data.size() + 1);
+ string data = "00010111";
+ RecordId loc;
+ const RecordData rec(data.c_str(), data.size() + 1);
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- rec.data(),
- rec.size(),
- false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), rec.data(), rec.size(), false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- mutablebson::DamageVector dv( 2 );
- dv[0].sourceOffset = 0;
- dv[0].targetOffset = 3;
- dv[0].size = 5;
- dv[1].sourceOffset = 3;
- dv[1].targetOffset = 0;
- dv[1].size = 5;
-
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, data.c_str(), dv ) );
- uow.commit();
- }
+ mutablebson::DamageVector dv(2);
+ dv[0].sourceOffset = 0;
+ dv[0].targetOffset = 3;
+ dv[0].size = 5;
+ dv[1].sourceOffset = 3;
+ dv[1].targetOffset = 0;
+ dv[1].size = 5;
+
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(rs->updateWithDamages(opCtx.get(), loc, rec, data.c_str(), dv));
+ uow.commit();
}
+ }
- data = "10111010";
+ data = "10111010";
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- RecordData record = rs->dataFor( opCtx.get(), loc );
- ASSERT_EQUALS( data, record.data() );
- }
+ RecordData record = rs->dataFor(opCtx.get(), loc);
+ ASSERT_EQUALS(data, record.data());
}
}
+}
- // Insert a record and try to call updateWithDamages() with an empty DamageVector.
- TEST( RecordStoreTestHarness, UpdateWithNoDamages ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Insert a record and try to call updateWithDamages() with an empty DamageVector.
+TEST(RecordStoreTestHarness, UpdateWithNoDamages) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- if (!rs->updateWithDamagesSupported())
- return;
+ if (!rs->updateWithDamagesSupported())
+ return;
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- string data = "my record";
- RecordId loc;
- const RecordData rec(data.c_str(), data.size() + 1);
+ string data = "my record";
+ RecordId loc;
+ const RecordData rec(data.c_str(), data.size() + 1);
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
- rec.data(),
- rec.size(),
- false );
- ASSERT_OK( res.getStatus() );
- loc = res.getValue();
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), rec.data(), rec.size(), false);
+ ASSERT_OK(res.getStatus());
+ loc = res.getValue();
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- mutablebson::DamageVector dv;
-
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, "", dv ) );
- uow.commit();
- }
+ mutablebson::DamageVector dv;
+
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(rs->updateWithDamages(opCtx.get(), loc, rec, "", dv));
+ uow.commit();
}
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- RecordData record = rs->dataFor( opCtx.get(), loc );
- ASSERT_EQUALS( data, record.data() );
- }
+ RecordData record = rs->dataFor(opCtx.get(), loc);
+ ASSERT_EQUALS(data, record.data());
}
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_validate.cpp b/src/mongo/db/storage/record_store_test_validate.cpp
index 89453520073..71790a376a3 100644
--- a/src/mongo/db/storage/record_store_test_validate.cpp
+++ b/src/mongo/db/storage/record_store_test_validate.cpp
@@ -41,211 +41,211 @@ using std::string;
namespace mongo {
namespace {
- // Verify that calling validate() on an empty collection returns an OK status.
- // When either of `full` or `scanData` are false, the ValidateAdaptor
- // should not be used.
- TEST( RecordStoreTestHarness, ValidateEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
-
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- ValidateAdaptorSpy adaptor;
- ValidateResults results;
- BSONObjBuilder stats;
- ASSERT_OK( rs->validate( opCtx.get(),
- false, // full validate
- false, // scan data
- &adaptor,
- &results,
- &stats ) );
- ASSERT( results.valid );
- ASSERT( results.errors.empty() );
- }
- }
+// Verify that calling validate() on an empty collection returns an OK status.
+// When either of `full` or `scanData` are false, the ValidateAdaptor
+// should not be used.
+TEST(RecordStoreTestHarness, ValidateEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
}
- // Verify that calling validate() on an empty collection returns an OK status.
- // When either of `full` or `scanData` are false, the ValidateAdaptor
- // should not be used.
- TEST( RecordStoreTestHarness, ValidateEmptyAndScanData ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
-
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- ValidateAdaptorSpy adaptor;
- ValidateResults results;
- BSONObjBuilder stats;
- ASSERT_OK( rs->validate( opCtx.get(),
- false, // full validate
- true, // scan data
- &adaptor,
- &results,
- &stats ) );
- ASSERT( results.valid );
- ASSERT( results.errors.empty() );
- }
+ ValidateAdaptorSpy adaptor;
+ ValidateResults results;
+ BSONObjBuilder stats;
+ ASSERT_OK(rs->validate(opCtx.get(),
+ false, // full validate
+ false, // scan data
+ &adaptor,
+ &results,
+ &stats));
+ ASSERT(results.valid);
+ ASSERT(results.errors.empty());
}
}
+}
+
+// Verify that calling validate() on an empty collection returns an OK status.
+// When either of `full` or `scanData` are false, the ValidateAdaptor
+// should not be used.
+TEST(RecordStoreTestHarness, ValidateEmptyAndScanData) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
- // Verify that calling validate() on an empty collection returns an OK status.
- // When either of `full` or `scanData` are false, the ValidateAdaptor
- // should not be used.
- TEST( RecordStoreTestHarness, FullValidateEmpty ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
+ ValidateAdaptorSpy adaptor;
+ ValidateResults results;
+ BSONObjBuilder stats;
+ ASSERT_OK(rs->validate(opCtx.get(),
+ false, // full validate
+ true, // scan data
+ &adaptor,
+ &results,
+ &stats));
+ ASSERT(results.valid);
+ ASSERT(results.errors.empty());
}
+ }
+}
+
+// Verify that calling validate() on an empty collection returns an OK status.
+// When either of `full` or `scanData` are false, the ValidateAdaptor
+// should not be used.
+TEST(RecordStoreTestHarness, FullValidateEmpty) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- ValidateAdaptorSpy adaptor;
- ValidateResults results;
- BSONObjBuilder stats;
- ASSERT_OK( rs->validate( opCtx.get(),
- true, // full validate
- false, // scan data
- &adaptor,
- &results,
- &stats ) );
- ASSERT( results.valid );
- ASSERT( results.errors.empty() );
- }
+ ValidateAdaptorSpy adaptor;
+ ValidateResults results;
+ BSONObjBuilder stats;
+ ASSERT_OK(rs->validate(opCtx.get(),
+ true, // full validate
+ false, // scan data
+ &adaptor,
+ &results,
+ &stats));
+ ASSERT(results.valid);
+ ASSERT(results.errors.empty());
}
}
+}
- // Verify that calling validate() on an empty collection returns an OK status.
- TEST( RecordStoreTestHarness, FullValidateEmptyAndScanData ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+// Verify that calling validate() on an empty collection returns an OK status.
+TEST(RecordStoreTestHarness, FullValidateEmptyAndScanData) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
+ }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- ValidateAdaptorSpy adaptor;
- ValidateResults results;
- BSONObjBuilder stats;
- ASSERT_OK( rs->validate( opCtx.get(),
- true, // full validate
- true, // scan data
- &adaptor,
- &results,
- &stats ) );
- ASSERT( results.valid );
- ASSERT( results.errors.empty() );
- }
+ ValidateAdaptorSpy adaptor;
+ ValidateResults results;
+ BSONObjBuilder stats;
+ ASSERT_OK(rs->validate(opCtx.get(),
+ true, // full validate
+ true, // scan data
+ &adaptor,
+ &results,
+ &stats));
+ ASSERT(results.valid);
+ ASSERT(results.errors.empty());
}
}
-
- // Insert multiple records, and verify that calling validate() on a nonempty collection
- // returns an OK status. When either of `full` or `scanData` are false, the ValidateAdaptor
- // should not be used.
- TEST_F( ValidateTest, ValidateNonEmpty ) {
+}
+
+// Insert multiple records, and verify that calling validate() on a nonempty collection
+// returns an OK status. When either of `full` or `scanData` are false, the ValidateAdaptor
+// should not be used.
+TEST_F(ValidateTest, ValidateNonEmpty) {
+ {
+ unique_ptr<OperationContext> opCtx(newOperationContext());
{
- unique_ptr<OperationContext> opCtx( newOperationContext() );
- {
- ValidateAdaptorSpy adaptor;
- ValidateResults results;
- BSONObjBuilder stats;
- ASSERT_OK( getRecordStore().validate( opCtx.get(),
- false, // full validate
- false, // scan data
- &adaptor,
- &results,
- &stats ) );
- ASSERT( results.valid );
- ASSERT( results.errors.empty() );
- }
+ ValidateAdaptorSpy adaptor;
+ ValidateResults results;
+ BSONObjBuilder stats;
+ ASSERT_OK(getRecordStore().validate(opCtx.get(),
+ false, // full validate
+ false, // scan data
+ &adaptor,
+ &results,
+ &stats));
+ ASSERT(results.valid);
+ ASSERT(results.errors.empty());
}
}
-
- // Insert multiple records, and verify that calling validate() on a nonempty collection
- // returns an OK status. When either of `full` or `scanData` are false, the ValidateAdaptor
- // should not be used.
- TEST_F( ValidateTest, ValidateAndScanDataNonEmpty ) {
+}
+
+// Insert multiple records, and verify that calling validate() on a nonempty collection
+// returns an OK status. When either of `full` or `scanData` are false, the ValidateAdaptor
+// should not be used.
+TEST_F(ValidateTest, ValidateAndScanDataNonEmpty) {
+ {
+ unique_ptr<OperationContext> opCtx(newOperationContext());
{
- unique_ptr<OperationContext> opCtx( newOperationContext() );
- {
- ValidateAdaptorSpy adaptor;
- ValidateResults results;
- BSONObjBuilder stats;
- ASSERT_OK( getRecordStore().validate( opCtx.get(),
- false, // full validate
- true, // scan data
- &adaptor,
- &results,
- &stats ) );
- ASSERT( results.valid );
- ASSERT( results.errors.empty() );
- }
+ ValidateAdaptorSpy adaptor;
+ ValidateResults results;
+ BSONObjBuilder stats;
+ ASSERT_OK(getRecordStore().validate(opCtx.get(),
+ false, // full validate
+ true, // scan data
+ &adaptor,
+ &results,
+ &stats));
+ ASSERT(results.valid);
+ ASSERT(results.errors.empty());
}
}
-
- // Insert multiple records, and verify that calling validate() on a nonempty collection
- // returns an OK status. When either of `full` or `scanData` are false, the ValidateAdaptor
- // should not be used.
- TEST_F( ValidateTest, FullValidateNonEmpty ) {
+}
+
+// Insert multiple records, and verify that calling validate() on a nonempty collection
+// returns an OK status. When either of `full` or `scanData` are false, the ValidateAdaptor
+// should not be used.
+TEST_F(ValidateTest, FullValidateNonEmpty) {
+ {
+ unique_ptr<OperationContext> opCtx(newOperationContext());
{
- unique_ptr<OperationContext> opCtx( newOperationContext() );
- {
- ValidateAdaptorSpy adaptor;
- ValidateResults results;
- BSONObjBuilder stats;
- ASSERT_OK( getRecordStore().validate( opCtx.get(),
- true, // full validate
- false, // scan data
- &adaptor,
- &results,
- &stats ) );
- ASSERT( results.valid );
- ASSERT( results.errors.empty() );
- }
+ ValidateAdaptorSpy adaptor;
+ ValidateResults results;
+ BSONObjBuilder stats;
+ ASSERT_OK(getRecordStore().validate(opCtx.get(),
+ true, // full validate
+ false, // scan data
+ &adaptor,
+ &results,
+ &stats));
+ ASSERT(results.valid);
+ ASSERT(results.errors.empty());
}
}
+}
- // Insert multiple records, and verify that calling validate() on a nonempty collection
- // returns an OK status.
- TEST_F( ValidateTest, FullValidateNonEmptyAndScanData ) {
+// Insert multiple records, and verify that calling validate() on a nonempty collection
+// returns an OK status.
+TEST_F(ValidateTest, FullValidateNonEmptyAndScanData) {
+ {
+ unique_ptr<OperationContext> opCtx(newOperationContext());
{
- unique_ptr<OperationContext> opCtx( newOperationContext() );
- {
- ValidateAdaptorSpy adaptor( getInsertedRecords() );
- ValidateResults results;
- BSONObjBuilder stats;
- ASSERT_OK( getRecordStore().validate( opCtx.get(),
- true, // full validate
- true, // scan data
- &adaptor,
- &results,
- &stats ) );
- ASSERT( adaptor.allValidated() );
- ASSERT( results.valid );
- ASSERT( results.errors.empty() );
- }
+ ValidateAdaptorSpy adaptor(getInsertedRecords());
+ ValidateResults results;
+ BSONObjBuilder stats;
+ ASSERT_OK(getRecordStore().validate(opCtx.get(),
+ true, // full validate
+ true, // scan data
+ &adaptor,
+ &results,
+ &stats));
+ ASSERT(adaptor.allValidated());
+ ASSERT(results.valid);
+ ASSERT(results.errors.empty());
}
}
+}
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_validate.h b/src/mongo/db/storage/record_store_test_validate.h
index e8ff30acfa3..e7b435e4738 100644
--- a/src/mongo/db/storage/record_store_test_validate.h
+++ b/src/mongo/db/storage/record_store_test_validate.h
@@ -39,81 +39,81 @@
namespace mongo {
namespace {
- class ValidateAdaptorSpy : public ValidateAdaptor {
- public:
- ValidateAdaptorSpy() { }
+class ValidateAdaptorSpy : public ValidateAdaptor {
+public:
+ ValidateAdaptorSpy() {}
- ValidateAdaptorSpy( const std::set<std::string> &remain )
- : _remain( remain ) {
- }
+ ValidateAdaptorSpy(const std::set<std::string>& remain) : _remain(remain) {}
- ~ValidateAdaptorSpy() { }
+ ~ValidateAdaptorSpy() {}
- Status validate( const RecordData &recordData, size_t *dataSize ) {
- std::string s( recordData.data() );
- ASSERT( 1 == _remain.erase( s ) );
+ Status validate(const RecordData& recordData, size_t* dataSize) {
+ std::string s(recordData.data());
+ ASSERT(1 == _remain.erase(s));
- *dataSize = recordData.size();
- return Status::OK();
- }
+ *dataSize = recordData.size();
+ return Status::OK();
+ }
- bool allValidated() { return _remain.empty(); }
+ bool allValidated() {
+ return _remain.empty();
+ }
- private:
- std::set<std::string> _remain; // initially contains all inserted records
- };
+private:
+ std::set<std::string> _remain; // initially contains all inserted records
+};
- class ValidateTest : public mongo::unittest::Test {
- public:
- ValidateTest()
- : _harnessHelper( newHarnessHelper() ),
- _rs( _harnessHelper->newNonCappedRecordStore() ) {
- }
+class ValidateTest : public mongo::unittest::Test {
+public:
+ ValidateTest()
+ : _harnessHelper(newHarnessHelper()), _rs(_harnessHelper->newNonCappedRecordStore()) {}
- OperationContext* newOperationContext() {
- return _harnessHelper->newOperationContext();
- }
+ OperationContext* newOperationContext() {
+ return _harnessHelper->newOperationContext();
+ }
- RecordStore& getRecordStore() { return *_rs; }
+ RecordStore& getRecordStore() {
+ return *_rs;
+ }
- const std::set<std::string>& getInsertedRecords() { return _remain; }
+ const std::set<std::string>& getInsertedRecords() {
+ return _remain;
+ }
- void setUp() {
- {
- std::unique_ptr<OperationContext> opCtx( newOperationContext() );
- ASSERT_EQUALS( 0, _rs->numRecords( opCtx.get() ) );
- }
-
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- std::unique_ptr<OperationContext> opCtx( newOperationContext() );
- {
- std::stringstream ss;
- ss << "record " << i;
- std::string data = ss.str();
- ASSERT( _remain.insert( data ).second );
-
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = _rs->insertRecord( opCtx.get(),
- data.c_str(),
- data.size() + 1,
- false );
- ASSERT_OK( res.getStatus() );
- uow.commit();
- }
- }
+ void setUp() {
+ {
+ std::unique_ptr<OperationContext> opCtx(newOperationContext());
+ ASSERT_EQUALS(0, _rs->numRecords(opCtx.get()));
+ }
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ std::unique_ptr<OperationContext> opCtx(newOperationContext());
{
- std::unique_ptr<OperationContext> opCtx( newOperationContext() );
- ASSERT_EQUALS( nToInsert, _rs->numRecords( opCtx.get() ) );
+ std::stringstream ss;
+ ss << "record " << i;
+ std::string data = ss.str();
+ ASSERT(_remain.insert(data).second);
+
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res =
+ _rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
+ ASSERT_OK(res.getStatus());
+ uow.commit();
}
}
- private:
- std::unique_ptr<HarnessHelper> _harnessHelper;
- std::unique_ptr<RecordStore> _rs;
- std::set<std::string> _remain;
- };
+ {
+ std::unique_ptr<OperationContext> opCtx(newOperationContext());
+ ASSERT_EQUALS(nToInsert, _rs->numRecords(opCtx.get()));
+ }
+ }
+
+private:
+ std::unique_ptr<HarnessHelper> _harnessHelper;
+ std::unique_ptr<RecordStore> _rs;
+ std::set<std::string> _remain;
+};
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/storage/recovery_unit.h b/src/mongo/db/storage/recovery_unit.h
index 77bb7a33dd1..d671516fcbd 100644
--- a/src/mongo/db/storage/recovery_unit.h
+++ b/src/mongo/db/storage/recovery_unit.h
@@ -37,134 +37,135 @@
namespace mongo {
- class BSONObjBuilder;
- class OperationContext;
+class BSONObjBuilder;
+class OperationContext;
+
+/**
+ * A RecoveryUnit is responsible for ensuring that data is persisted.
+ * All on-disk information must be mutated through this interface.
+ */
+class RecoveryUnit {
+ MONGO_DISALLOW_COPYING(RecoveryUnit);
+
+public:
+ virtual ~RecoveryUnit() {}
+
+ virtual void reportState(BSONObjBuilder* b) const {}
+
+ virtual void beingReleasedFromOperationContext() {}
+ virtual void beingSetOnOperationContext() {}
+
+ /**
+ * These should be called through WriteUnitOfWork rather than directly.
+ *
+ * A call to 'beginUnitOfWork' marks the beginning of a unit of work. Each call to
+ * 'beginUnitOfWork' must be matched with exactly one call to either 'commitUnitOfWork' or
+ * 'abortUnitOfWork'. When 'abortUnitOfWork' is called, all changes made since the begin
+ * of the unit of work will be rolled back.
+ */
+ virtual void beginUnitOfWork(OperationContext* opCtx) = 0;
+ virtual void commitUnitOfWork() = 0;
+ virtual void abortUnitOfWork() = 0;
+
+ /**
+ * Waits until all writes prior to this call are durable. Returns true, unless the storage
+ * engine cannot guarantee durability, which should never happen when isDurable() returned
+ * true.
+ */
+ virtual bool waitUntilDurable() = 0;
+
+ /**
+ * This is a hint to the engine that this transaction is going to call waitUntilDurable at
+ * the end. This should be called before any work is done so that transactions can be
+ * configured correctly.
+ */
+ virtual void goingToWaitUntilDurable() {}
+
+ /**
+ * When this is called, if there is an open transaction, it is closed. On return no
+ * transaction is active. This cannot be called inside of a WriteUnitOfWork, and should
+ * fail if it is.
+ */
+ virtual void abandonSnapshot() = 0;
+
+ virtual SnapshotId getSnapshotId() const = 0;
/**
- * A RecoveryUnit is responsible for ensuring that data is persisted.
- * All on-disk information must be mutated through this interface.
+ * A Change is an action that is registerChange()'d while a WriteUnitOfWork exists. The
+ * change is either rollback()'d or commit()'d when the WriteUnitOfWork goes out of scope.
+ *
+ * Neither rollback() nor commit() may fail or throw exceptions.
+ *
+ * Change implementors are responsible for handling their own locking, and must be aware
+ * that rollback() and commit() may be called after resources with a shorter lifetime than
+ * the WriteUnitOfWork have been freed. Each registered change will be committed or rolled
+ * back once.
*/
- class RecoveryUnit {
- MONGO_DISALLOW_COPYING(RecoveryUnit);
+ class Change {
public:
- virtual ~RecoveryUnit() { }
-
- virtual void reportState( BSONObjBuilder* b ) const { }
-
- virtual void beingReleasedFromOperationContext() {}
- virtual void beingSetOnOperationContext() {}
-
- /**
- * These should be called through WriteUnitOfWork rather than directly.
- *
- * A call to 'beginUnitOfWork' marks the beginning of a unit of work. Each call to
- * 'beginUnitOfWork' must be matched with exactly one call to either 'commitUnitOfWork' or
- * 'abortUnitOfWork'. When 'abortUnitOfWork' is called, all changes made since the begin
- * of the unit of work will be rolled back.
- */
- virtual void beginUnitOfWork(OperationContext* opCtx) = 0;
- virtual void commitUnitOfWork() = 0;
- virtual void abortUnitOfWork() = 0;
-
- /**
- * Waits until all writes prior to this call are durable. Returns true, unless the storage
- * engine cannot guarantee durability, which should never happen when isDurable() returned
- * true.
- */
- virtual bool waitUntilDurable() = 0;
-
- /**
- * This is a hint to the engine that this transaction is going to call waitUntilDurable at
- * the end. This should be called before any work is done so that transactions can be
- * configured correctly.
- */
- virtual void goingToWaitUntilDurable() { }
-
- /**
- * When this is called, if there is an open transaction, it is closed. On return no
- * transaction is active. This cannot be called inside of a WriteUnitOfWork, and should
- * fail if it is.
- */
- virtual void abandonSnapshot() = 0;
-
- virtual SnapshotId getSnapshotId() const = 0;
-
- /**
- * A Change is an action that is registerChange()'d while a WriteUnitOfWork exists. The
- * change is either rollback()'d or commit()'d when the WriteUnitOfWork goes out of scope.
- *
- * Neither rollback() nor commit() may fail or throw exceptions.
- *
- * Change implementors are responsible for handling their own locking, and must be aware
- * that rollback() and commit() may be called after resources with a shorter lifetime than
- * the WriteUnitOfWork have been freed. Each registered change will be committed or rolled
- * back once.
- */
- class Change {
- public:
- virtual ~Change() { }
-
- virtual void rollback() = 0;
- virtual void commit() = 0;
- };
-
- /**
- * The RecoveryUnit takes ownership of the change. The commitUnitOfWork() method calls the
- * commit() method of each registered change in order of registration. The endUnitOfWork()
- * method calls the rollback() method of each registered Change in reverse order of
- * registration. Either will unregister and delete the changes.
- *
- * The registerChange() method may only be called when a WriteUnitOfWork is active, and
- * may not be called during commit or rollback.
- */
- virtual void registerChange(Change* change) = 0;
-
- //
- // The remaining methods probably belong on DurRecoveryUnit rather than on the interface.
- //
-
- /**
- * Declare that the data at [x, x + len) is being written.
- */
- virtual void* writingPtr(void* data, size_t len) = 0;
-
- //
- // Syntactic sugar
- //
-
- /**
- * Declare write intent for an int
- */
- inline int& writingInt(int& d) {
- return *writing(&d);
- }
-
- /**
- * A templated helper for writingPtr.
- */
- template <typename T>
- inline T* writing(T* x) {
- writingPtr(x, sizeof(T));
- return x;
- }
-
- /**
- * Sets a flag that declares this RecoveryUnit will skip rolling back writes, for the
- * duration of the current outermost WriteUnitOfWork. This function can only be called
- * between a pair of unnested beginUnitOfWork() / endUnitOfWork() calls.
- * The flag is cleared when endUnitOfWork() is called.
- * While the flag is set, rollback will skip rolling back writes, but custom rollback
- * change functions are still called. Clearly, this functionality should only be used when
- * writing to temporary collections that can be cleaned up externally. For example,
- * foreground index builds write to a temporary collection; if something goes wrong that
- * normally requires a rollback, we can instead clean up the index by dropping the entire
- * index.
- * Setting the flag may permit increased performance.
- */
- virtual void setRollbackWritesDisabled() = 0;
-
- protected:
- RecoveryUnit() { }
+ virtual ~Change() {}
+
+ virtual void rollback() = 0;
+ virtual void commit() = 0;
};
+ /**
+ * The RecoveryUnit takes ownership of the change. The commitUnitOfWork() method calls the
+ * commit() method of each registered change in order of registration. The endUnitOfWork()
+ * method calls the rollback() method of each registered Change in reverse order of
+ * registration. Either will unregister and delete the changes.
+ *
+ * The registerChange() method may only be called when a WriteUnitOfWork is active, and
+ * may not be called during commit or rollback.
+ */
+ virtual void registerChange(Change* change) = 0;
+
+ //
+ // The remaining methods probably belong on DurRecoveryUnit rather than on the interface.
+ //
+
+ /**
+ * Declare that the data at [x, x + len) is being written.
+ */
+ virtual void* writingPtr(void* data, size_t len) = 0;
+
+ //
+ // Syntactic sugar
+ //
+
+ /**
+ * Declare write intent for an int
+ */
+ inline int& writingInt(int& d) {
+ return *writing(&d);
+ }
+
+ /**
+ * A templated helper for writingPtr.
+ */
+ template <typename T>
+ inline T* writing(T* x) {
+ writingPtr(x, sizeof(T));
+ return x;
+ }
+
+ /**
+ * Sets a flag that declares this RecoveryUnit will skip rolling back writes, for the
+ * duration of the current outermost WriteUnitOfWork. This function can only be called
+ * between a pair of unnested beginUnitOfWork() / endUnitOfWork() calls.
+ * The flag is cleared when endUnitOfWork() is called.
+ * While the flag is set, rollback will skip rolling back writes, but custom rollback
+ * change functions are still called. Clearly, this functionality should only be used when
+ * writing to temporary collections that can be cleaned up externally. For example,
+ * foreground index builds write to a temporary collection; if something goes wrong that
+ * normally requires a rollback, we can instead clean up the index by dropping the entire
+ * index.
+ * Setting the flag may permit increased performance.
+ */
+ virtual void setRollbackWritesDisabled() = 0;
+
+protected:
+ RecoveryUnit() {}
+};
+
} // namespace mongo
diff --git a/src/mongo/db/storage/recovery_unit_noop.h b/src/mongo/db/storage/recovery_unit_noop.h
index c324bc330a8..d8f7e69adb1 100644
--- a/src/mongo/db/storage/recovery_unit_noop.h
+++ b/src/mongo/db/storage/recovery_unit_noop.h
@@ -35,53 +35,53 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
- class RecoveryUnitNoop : public RecoveryUnit {
- public:
- void beginUnitOfWork(OperationContext* opCtx) final {}
- void commitUnitOfWork() final {
- for (auto& change : _changes) {
- try {
- change->commit();
- }
- catch (...) {
- std::terminate();
- }
+class RecoveryUnitNoop : public RecoveryUnit {
+public:
+ void beginUnitOfWork(OperationContext* opCtx) final {}
+ void commitUnitOfWork() final {
+ for (auto& change : _changes) {
+ try {
+ change->commit();
+ } catch (...) {
+ std::terminate();
}
- _changes.clear();
}
- void abortUnitOfWork() final {
- for (auto it = _changes.rbegin(); it != _changes.rend(); ++it) {
- try {
- (*it)->rollback();
- }
- catch (...) {
- std::terminate();
- }
+ _changes.clear();
+ }
+ void abortUnitOfWork() final {
+ for (auto it = _changes.rbegin(); it != _changes.rend(); ++it) {
+ try {
+ (*it)->rollback();
+ } catch (...) {
+ std::terminate();
}
- _changes.clear();
}
+ _changes.clear();
+ }
- virtual void abandonSnapshot() {}
+ virtual void abandonSnapshot() {}
- virtual bool waitUntilDurable() {
- return true;
- }
+ virtual bool waitUntilDurable() {
+ return true;
+ }
- virtual void registerChange(Change* change) {
- _changes.push_back(std::unique_ptr<Change>(change));
- }
+ virtual void registerChange(Change* change) {
+ _changes.push_back(std::unique_ptr<Change>(change));
+ }
- virtual void* writingPtr(void* data, size_t len) {
- return data;
- }
- virtual void setRollbackWritesDisabled() {}
+ virtual void* writingPtr(void* data, size_t len) {
+ return data;
+ }
+ virtual void setRollbackWritesDisabled() {}
- virtual SnapshotId getSnapshotId() const { return SnapshotId(); }
+ virtual SnapshotId getSnapshotId() const {
+ return SnapshotId();
+ }
- private:
- std::vector<std::unique_ptr<Change>> _changes;
- };
+private:
+ std::vector<std::unique_ptr<Change>> _changes;
+};
} // namespace mongo
diff --git a/src/mongo/db/storage/snapshot.h b/src/mongo/db/storage/snapshot.h
index 5d432ba3cbc..6ce5b57e51d 100644
--- a/src/mongo/db/storage/snapshot.h
+++ b/src/mongo/db/storage/snapshot.h
@@ -34,56 +34,60 @@
namespace mongo {
- class SnapshotId {
- static const uint64_t kNullId = 0;
- public:
- SnapshotId()
- : _id(kNullId) {
- }
-
- // 0 is NULL
- explicit SnapshotId(uint64_t id)
- : _id(id) {
- invariant(id != kNullId);
- }
-
- bool isNull() const { return _id == kNullId; }
-
- bool operator==(const SnapshotId& other) const {
- return _id == other._id;
- }
-
- bool operator!=(const SnapshotId& other) const {
- return _id != other._id;
- }
-
- private:
- uint64_t _id;
- };
-
- template<typename T>
- class Snapshotted {
- public:
- Snapshotted()
- : _id(), _value() {
- }
-
- Snapshotted(SnapshotId id, const T& value ) :
- _id(id), _value(value) {
- }
-
- void reset() {
- *this = Snapshotted();
- }
-
- void setValue(const T& t) { _value = t; }
-
- SnapshotId snapshotId() const { return _id; }
- const T& value() const { return _value; }
- T& value() { return _value; }
-
- private:
- SnapshotId _id;
- T _value;
- };
+class SnapshotId {
+ static const uint64_t kNullId = 0;
+
+public:
+ SnapshotId() : _id(kNullId) {}
+
+ // 0 is NULL
+ explicit SnapshotId(uint64_t id) : _id(id) {
+ invariant(id != kNullId);
+ }
+
+ bool isNull() const {
+ return _id == kNullId;
+ }
+
+ bool operator==(const SnapshotId& other) const {
+ return _id == other._id;
+ }
+
+ bool operator!=(const SnapshotId& other) const {
+ return _id != other._id;
+ }
+
+private:
+ uint64_t _id;
+};
+
+template <typename T>
+class Snapshotted {
+public:
+ Snapshotted() : _id(), _value() {}
+
+ Snapshotted(SnapshotId id, const T& value) : _id(id), _value(value) {}
+
+ void reset() {
+ *this = Snapshotted();
+ }
+
+ void setValue(const T& t) {
+ _value = t;
+ }
+
+ SnapshotId snapshotId() const {
+ return _id;
+ }
+ const T& value() const {
+ return _value;
+ }
+ T& value() {
+ return _value;
+ }
+
+private:
+ SnapshotId _id;
+ T _value;
+};
}
diff --git a/src/mongo/db/storage/sorted_data_interface.h b/src/mongo/db/storage/sorted_data_interface.h
index 006fa7ff4dd..2836c7c4814 100644
--- a/src/mongo/db/storage/sorted_data_interface.h
+++ b/src/mongo/db/storage/sorted_data_interface.h
@@ -39,336 +39,337 @@
namespace mongo {
- class BSONObjBuilder;
- class BucketDeletionNotification;
- class SortedDataBuilderInterface;
+class BSONObjBuilder;
+class BucketDeletionNotification;
+class SortedDataBuilderInterface;
+
+/**
+ * This interface is a work in progress. Notes below:
+ *
+ * This interface began as the SortedDataInterface, a way to hide the fact that there were two
+ * on-disk formats for the btree. With the introduction of other storage engines, this
+ * interface was generalized to provide access to sorted data. Specifically:
+ *
+ * 1. Many other storage engines provide different Btree(-ish) implementations. This interface
+ * could allow those interfaces to avoid storing btree buckets in an already sorted structure.
+ *
+ * TODO: See if there is actually a performance gain.
+ *
+ * 2. The existing btree implementation is written to assume that if it modifies a record it is
+ * modifying the underlying record. This interface is an attempt to work around that.
+ *
+ * TODO: See if this actually works.
+ */
+class SortedDataInterface {
+public:
+ virtual ~SortedDataInterface() {}
+
+ //
+ // Data changes
+ //
/**
- * This interface is a work in progress. Notes below:
+ * Return a bulk builder for 'this' index.
*
- * This interface began as the SortedDataInterface, a way to hide the fact that there were two
- * on-disk formats for the btree. With the introduction of other storage engines, this
- * interface was generalized to provide access to sorted data. Specifically:
+ * Implementations can assume that 'this' index outlives its bulk
+ * builder.
*
- * 1. Many other storage engines provide different Btree(-ish) implementations. This interface
- * could allow those interfaces to avoid storing btree buckets in an already sorted structure.
+ * @param txn the transaction under which keys are added to 'this' index
+ * @param dupsAllowed true if duplicate keys are allowed, and false
+ * otherwise
*
- * TODO: See if there is actually a performance gain.
+ * @return caller takes ownership
+ */
+ virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) = 0;
+
+ /**
+ * Insert an entry into the index with the specified key and RecordId.
+ *
+ * @param txn the transaction under which the insert takes place
+ * @param dupsAllowed true if duplicate keys are allowed, and false
+ * otherwise
*
- * 2. The existing btree implementation is written to assume that if it modifies a record it is
- * modifying the underlying record. This interface is an attempt to work around that.
+ * @return Status::OK() if the insert succeeded,
*
- * TODO: See if this actually works.
+ * ErrorCodes::DuplicateKey if 'key' already exists in 'this' index
+ * at a RecordId other than 'loc' and duplicates were not allowed
*/
- class SortedDataInterface {
- public:
- virtual ~SortedDataInterface() { }
+ virtual Status insert(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) = 0;
- //
- // Data changes
- //
+ /**
+ * Remove the entry from the index with the specified key and RecordId.
+ *
+ * @param txn the transaction under which the remove takes place
+ * @param dupsAllowed true if duplicate keys are allowed, and false
+ * otherwise
+ */
+ virtual void unindex(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) = 0;
- /**
- * Return a bulk builder for 'this' index.
- *
- * Implementations can assume that 'this' index outlives its bulk
- * builder.
- *
- * @param txn the transaction under which keys are added to 'this' index
- * @param dupsAllowed true if duplicate keys are allowed, and false
- * otherwise
- *
- * @return caller takes ownership
- */
- virtual SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn,
- bool dupsAllowed) = 0;
+ /**
+ * Return ErrorCodes::DuplicateKey if 'key' already exists in 'this'
+ * index at a RecordId other than 'loc', and Status::OK() otherwise.
+ *
+ * @param txn the transaction under which this operation takes place
+ *
+ * TODO: Hide this by exposing an update method?
+ */
+ virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc) = 0;
+ //
+ // Information about the tree
+ //
+
+ /**
+ * 'output' is used to store results of validate when 'full' is true.
+ * If 'full' is false, 'output' may be NULL.
+ *
+ * TODO: expose full set of args for testing?
+ */
+ virtual void fullValidate(OperationContext* txn,
+ bool full,
+ long long* numKeysOut,
+ BSONObjBuilder* output) const = 0;
+
+ virtual bool appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* output,
+ double scale) const = 0;
+
+
+ /**
+ * Return the number of bytes consumed by 'this' index.
+ *
+ * @param txn the transaction under which this operation takes place
+ *
+ * @see IndexAccessMethod::getSpaceUsedBytes
+ */
+ virtual long long getSpaceUsedBytes(OperationContext* txn) const = 0;
+
+ /**
+ * Return true if 'this' index is empty, and false otherwise.
+ */
+ virtual bool isEmpty(OperationContext* txn) = 0;
+
+ /**
+ * Attempt to bring the entirety of 'this' index into memory.
+ *
+ * If the underlying storage engine does not support the operation,
+ * returns ErrorCodes::CommandNotSupported
+ *
+ * @return Status::OK()
+ */
+ virtual Status touch(OperationContext* txn) const {
+ return Status(ErrorCodes::CommandNotSupported,
+ "this storage engine does not support touch");
+ }
+
+ /**
+ * Return the number of entries in 'this' index.
+ *
+ * The default implementation should be overridden with a more
+ * efficient one if at all possible.
+ */
+ virtual long long numEntries(OperationContext* txn) const {
+ long long x = -1;
+ fullValidate(txn, false, &x, NULL);
+ return x;
+ }
+
+ /**
+ * Navigates over the sorted data.
+ *
+ * A cursor is constructed with a direction flag with the following effects:
+ * - The direction that next() moves.
+ * - If a seek method hits an exact match on key, forward cursors will be positioned on
+ * the first value for that key, reverse cursors on the last.
+ * - If a seek method or restore does not hit an exact match, cursors will be
+ * positioned on the closest position *after* the query in the direction of the
+ * search.
+ * - The end position is on the "far" side of the query. In a forward cursor that means
+ * that it is the lowest value for the key if the end is exclusive or the first entry
+ * past the key if the end is inclusive or there are no exact matches.
+ *
+ * A cursor is tied to a transaction, such as the OperationContext or a WriteUnitOfWork
+ * inside that context. Any cursor acquired inside a transaction is invalid outside
+ * of that transaction, instead use the save and restore methods to reestablish the cursor.
+ *
+ * Any method other than the save methods may throw WriteConflict exception. If that
+ * happens, the cursor may not be used again until it has been saved and successfully
+ * restored. If next() or restore() throw a WCE the cursor's position will be the same as
+ * before the call (strong exception guarantee). All other methods leave the cursor in a
+ * valid state but with an unspecified position (basic exception guarantee). All methods
+ * only provide the basic guarantee for exceptions other than WCE.
+ *
+ * Any returned unowned BSON is only valid until the next call to any method on this
+ * interface. The implementations must assume that passed-in unowned BSON is only valid for
+ * the duration of the call.
+ *
+ * Implementations may override any default implementation if they can provide a more
+ * efficient implementation.
+ */
+ class Cursor {
+ public:
/**
- * Insert an entry into the index with the specified key and RecordId.
- *
- * @param txn the transaction under which the insert takes place
- * @param dupsAllowed true if duplicate keys are allowed, and false
- * otherwise
+ * Tells methods that return an IndexKeyEntry what part of the data the caller is
+ * interested in.
*
- * @return Status::OK() if the insert succeeded,
+ * Methods returning an engaged optional<T> will only return null RecordIds or empty
+ * BSONObjs if they have been explicitly left out of the request.
*
- * ErrorCodes::DuplicateKey if 'key' already exists in 'this' index
- * at a RecordId other than 'loc' and duplicates were not allowed
+ * Implementations are allowed to return more data than requested, but not less.
*/
- virtual Status insert(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) = 0;
+ enum RequestedInfo {
+ // Only usable part of the return is whether it is engaged or not.
+ kJustExistance = 0,
+ // Key must be filled in.
+ kWantKey = 1,
+ // Loc must be fulled in.
+ kWantLoc = 2,
+ // Both must be returned.
+ kKeyAndLoc = kWantKey | kWantLoc,
+ };
+
+ virtual ~Cursor() = default;
+
/**
- * Remove the entry from the index with the specified key and RecordId.
+ * Sets the position to stop scanning. An empty key unsets the end position.
*
- * @param txn the transaction under which the remove takes place
- * @param dupsAllowed true if duplicate keys are allowed, and false
- * otherwise
+ * If next() hits this position, or a seek method attempts to seek past it they
+ * unposition the cursor and return boost::none.
+ *
+ * Setting the end position should be done before seeking since the current position, if
+ * any, isn't checked.
*/
- virtual void unindex(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) = 0;
+ virtual void setEndPosition(const BSONObj& key, bool inclusive) = 0;
/**
- * Return ErrorCodes::DuplicateKey if 'key' already exists in 'this'
- * index at a RecordId other than 'loc', and Status::OK() otherwise.
- *
- * @param txn the transaction under which this operation takes place
- *
- * TODO: Hide this by exposing an update method?
+ * Moves forward and returns the new data or boost::none if there is no more data.
+ * If not positioned, returns boost::none.
*/
- virtual Status dupKeyCheck(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc) = 0;
+ virtual boost::optional<IndexKeyEntry> next(RequestedInfo parts = kKeyAndLoc) = 0;
//
- // Information about the tree
+ // Seeking
//
/**
- * 'output' is used to store results of validate when 'full' is true.
- * If 'full' is false, 'output' may be NULL.
+ * Seeks to the provided key and returns current position.
*
- * TODO: expose full set of args for testing?
+ * TODO consider removing once IndexSeekPoint has been cleaned up a bit. In particular,
+ * need a way to specify use whole keyPrefix and nothing else and to support the
+ * combination of empty and exclusive. Should also make it easier to construct for the
+ * common cases.
*/
- virtual void fullValidate(OperationContext* txn, bool full, long long* numKeysOut,
- BSONObjBuilder* output) const = 0;
-
- virtual bool appendCustomStats(OperationContext* txn, BSONObjBuilder* output, double scale)
- const = 0;
-
+ virtual boost::optional<IndexKeyEntry> seek(const BSONObj& key,
+ bool inclusive,
+ RequestedInfo parts = kKeyAndLoc) = 0;
/**
- * Return the number of bytes consumed by 'this' index.
+ * Seeks to the position described by seekPoint and returns the current position.
*
- * @param txn the transaction under which this operation takes place
- *
- * @see IndexAccessMethod::getSpaceUsedBytes
+ * NOTE: most implementations should just pass seekPoint to
+ * IndexEntryComparison::makeQueryObject().
*/
- virtual long long getSpaceUsedBytes( OperationContext* txn ) const = 0;
+ virtual boost::optional<IndexKeyEntry> seek(const IndexSeekPoint& seekPoint,
+ RequestedInfo parts = kKeyAndLoc) = 0;
/**
- * Return true if 'this' index is empty, and false otherwise.
+ * Seeks to a key with a hint to the implementation that you only want exact matches. If
+ * an exact match can't be found, boost::none will be returned and the resulting
+ * position of the cursor is unspecified.
*/
- virtual bool isEmpty(OperationContext* txn) = 0;
+ virtual boost::optional<IndexKeyEntry> seekExact(const BSONObj& key,
+ RequestedInfo parts = kKeyAndLoc) {
+ auto kv = seek(key, true, kKeyAndLoc);
+ if (kv && kv->key.woCompare(key, BSONObj(), /*considerFieldNames*/ false) == 0)
+ return kv;
+ return {};
+ }
+
+ //
+ // Saving and restoring state
+ //
/**
- * Attempt to bring the entirety of 'this' index into memory.
- *
- * If the underlying storage engine does not support the operation,
- * returns ErrorCodes::CommandNotSupported
+ * Prepares for state changes in underlying data in a way that allows the cursor's
+ * current position to be restored.
*
- * @return Status::OK()
+ * It is safe to call savePositioned multiple times in a row.
+ * No other method (excluding destructor) may be called until successfully restored.
*/
- virtual Status touch(OperationContext* txn) const {
- return Status(ErrorCodes::CommandNotSupported,
- "this storage engine does not support touch");
- }
+ virtual void savePositioned() = 0;
/**
- * Return the number of entries in 'this' index.
+ * Prepares for state changes in underlying data without necessarily saving the current
+ * state.
*
- * The default implementation should be overridden with a more
- * efficient one if at all possible.
+ * The cursor's position when restored is unspecified. Caller is expected to seek
+ * following the restore.
+ *
+ * It is safe to call saveUnpositioned multiple times in a row.
+ * No other method (excluding destructor) may be called until successfully restored.
*/
- virtual long long numEntries( OperationContext* txn ) const {
- long long x = -1;
- fullValidate(txn, false, &x, NULL);
- return x;
+ virtual void saveUnpositioned() {
+ savePositioned();
}
/**
- * Navigates over the sorted data.
- *
- * A cursor is constructed with a direction flag with the following effects:
- * - The direction that next() moves.
- * - If a seek method hits an exact match on key, forward cursors will be positioned on
- * the first value for that key, reverse cursors on the last.
- * - If a seek method or restore does not hit an exact match, cursors will be
- * positioned on the closest position *after* the query in the direction of the
- * search.
- * - The end position is on the "far" side of the query. In a forward cursor that means
- * that it is the lowest value for the key if the end is exclusive or the first entry
- * past the key if the end is inclusive or there are no exact matches.
- *
- * A cursor is tied to a transaction, such as the OperationContext or a WriteUnitOfWork
- * inside that context. Any cursor acquired inside a transaction is invalid outside
- * of that transaction, instead use the save and restore methods to reestablish the cursor.
+ * Recovers from potential state changes in underlying data.
*
- * Any method other than the save methods may throw WriteConflict exception. If that
- * happens, the cursor may not be used again until it has been saved and successfully
- * restored. If next() or restore() throw a WCE the cursor's position will be the same as
- * before the call (strong exception guarantee). All other methods leave the cursor in a
- * valid state but with an unspecified position (basic exception guarantee). All methods
- * only provide the basic guarantee for exceptions other than WCE.
+ * If the former position no longer exists, a following call to next() will return the
+ * next closest position in the direction of the scan, if any.
*
- * Any returned unowned BSON is only valid until the next call to any method on this
- * interface. The implementations must assume that passed-in unowned BSON is only valid for
- * the duration of the call.
- *
- * Implementations may override any default implementation if they can provide a more
- * efficient implementation.
+ * This handles restoring after either savePositioned() or saveUnpositioned().
*/
- class Cursor {
- public:
-
- /**
- * Tells methods that return an IndexKeyEntry what part of the data the caller is
- * interested in.
- *
- * Methods returning an engaged optional<T> will only return null RecordIds or empty
- * BSONObjs if they have been explicitly left out of the request.
- *
- * Implementations are allowed to return more data than requested, but not less.
- */
- enum RequestedInfo {
- // Only usable part of the return is whether it is engaged or not.
- kJustExistance = 0,
- // Key must be filled in.
- kWantKey = 1,
- // Loc must be fulled in.
- kWantLoc = 2,
- // Both must be returned.
- kKeyAndLoc = kWantKey | kWantLoc,
- };
-
- virtual ~Cursor() = default;
-
-
- /**
- * Sets the position to stop scanning. An empty key unsets the end position.
- *
- * If next() hits this position, or a seek method attempts to seek past it they
- * unposition the cursor and return boost::none.
- *
- * Setting the end position should be done before seeking since the current position, if
- * any, isn't checked.
- */
- virtual void setEndPosition(const BSONObj& key, bool inclusive) = 0;
-
- /**
- * Moves forward and returns the new data or boost::none if there is no more data.
- * If not positioned, returns boost::none.
- */
- virtual boost::optional<IndexKeyEntry> next(RequestedInfo parts = kKeyAndLoc) = 0;
-
- //
- // Seeking
- //
-
- /**
- * Seeks to the provided key and returns current position.
- *
- * TODO consider removing once IndexSeekPoint has been cleaned up a bit. In particular,
- * need a way to specify use whole keyPrefix and nothing else and to support the
- * combination of empty and exclusive. Should also make it easier to construct for the
- * common cases.
- */
- virtual boost::optional<IndexKeyEntry> seek(const BSONObj& key,
- bool inclusive,
- RequestedInfo parts = kKeyAndLoc) = 0;
-
- /**
- * Seeks to the position described by seekPoint and returns the current position.
- *
- * NOTE: most implementations should just pass seekPoint to
- * IndexEntryComparison::makeQueryObject().
- */
- virtual boost::optional<IndexKeyEntry> seek(const IndexSeekPoint& seekPoint,
- RequestedInfo parts = kKeyAndLoc) = 0;
-
- /**
- * Seeks to a key with a hint to the implementation that you only want exact matches. If
- * an exact match can't be found, boost::none will be returned and the resulting
- * position of the cursor is unspecified.
- */
- virtual boost::optional<IndexKeyEntry> seekExact(const BSONObj& key,
- RequestedInfo parts = kKeyAndLoc) {
- auto kv = seek(key, true, kKeyAndLoc);
- if (kv && kv->key.woCompare(key, BSONObj(), /*considerFieldNames*/false) == 0)
- return kv;
- return {};
- }
-
- //
- // Saving and restoring state
- //
-
- /**
- * Prepares for state changes in underlying data in a way that allows the cursor's
- * current position to be restored.
- *
- * It is safe to call savePositioned multiple times in a row.
- * No other method (excluding destructor) may be called until successfully restored.
- */
- virtual void savePositioned() = 0;
-
- /**
- * Prepares for state changes in underlying data without necessarily saving the current
- * state.
- *
- * The cursor's position when restored is unspecified. Caller is expected to seek
- * following the restore.
- *
- * It is safe to call saveUnpositioned multiple times in a row.
- * No other method (excluding destructor) may be called until successfully restored.
- */
- virtual void saveUnpositioned() { savePositioned(); }
-
- /**
- * Recovers from potential state changes in underlying data.
- *
- * If the former position no longer exists, a following call to next() will return the
- * next closest position in the direction of the scan, if any.
- *
- * This handles restoring after either savePositioned() or saveUnpositioned().
- */
- virtual void restore(OperationContext* txn) = 0;
- };
+ virtual void restore(OperationContext* txn) = 0;
+ };
- /**
- * Returns an unpositioned cursor over 'this' index.
- *
- * Implementations can assume that 'this' index outlives all cursors it produces.
- */
- virtual std::unique_ptr<Cursor> newCursor(OperationContext* txn,
- bool isForward = true) const = 0;
+ /**
+ * Returns an unpositioned cursor over 'this' index.
+ *
+ * Implementations can assume that 'this' index outlives all cursors it produces.
+ */
+ virtual std::unique_ptr<Cursor> newCursor(OperationContext* txn,
+ bool isForward = true) const = 0;
- //
- // Index creation
- //
+ //
+ // Index creation
+ //
- virtual Status initAsEmpty(OperationContext* txn) = 0;
- };
+ virtual Status initAsEmpty(OperationContext* txn) = 0;
+};
+
+/**
+ * A version-hiding wrapper around the bulk builder for the Btree.
+ */
+class SortedDataBuilderInterface {
+public:
+ virtual ~SortedDataBuilderInterface() {}
/**
- * A version-hiding wrapper around the bulk builder for the Btree.
+ * Adds 'key' to intermediate storage.
+ *
+ * 'key' must be > or >= the last key passed to this function (depends on _dupsAllowed). If
+ * this is violated an error Status (ErrorCodes::InternalError) will be returned.
*/
- class SortedDataBuilderInterface {
- public:
- virtual ~SortedDataBuilderInterface() { }
+ virtual Status addKey(const BSONObj& key, const RecordId& loc) = 0;
- /**
- * Adds 'key' to intermediate storage.
- *
- * 'key' must be > or >= the last key passed to this function (depends on _dupsAllowed). If
- * this is violated an error Status (ErrorCodes::InternalError) will be returned.
- */
- virtual Status addKey(const BSONObj& key, const RecordId& loc) = 0;
-
- /**
- * Do any necessary work to finish building the tree.
- *
- * The default implementation may be used if no commit phase is necessary because addKey
- * always leaves the tree in a valid state.
- *
- * This is called outside of any WriteUnitOfWork to allow implementations to split this up
- * into multiple units.
- */
- virtual void commit(bool mayInterrupt) {}
- };
+ /**
+ * Do any necessary work to finish building the tree.
+ *
+ * The default implementation may be used if no commit phase is necessary because addKey
+ * always leaves the tree in a valid state.
+ *
+ * This is called outside of any WriteUnitOfWork to allow implementations to split this up
+ * into multiple units.
+ */
+ virtual void commit(bool mayInterrupt) {}
+};
} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp b/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp
index cb1f8fa0953..754bed1d757 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp
@@ -37,165 +37,165 @@
namespace mongo {
- // Add a key using a bulk builder.
- TEST( SortedDataInterface, BuilderAddKey ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataBuilderInterface> builder(
- sorted->getBulkBuilder( opCtx.get(), true ) );
-
- ASSERT_OK( builder->addKey( key1, loc1 ) );
- builder->commit( false );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
- }
-
- // Add a compound key using a bulk builder.
- TEST( SortedDataInterface, BuilderAddCompoundKey ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataBuilderInterface> builder(
- sorted->getBulkBuilder( opCtx.get(), true ) );
-
- ASSERT_OK( builder->addKey( compoundKey1a, loc1 ) );
- builder->commit( false );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
- }
-
- // Add the same key multiple times using a bulk builder and verify that
- // the returned status is ErrorCodes::DuplicateKey when duplicates are
- // not allowed.
- TEST( SortedDataInterface, BuilderAddSameKey ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataBuilderInterface> builder(
- sorted->getBulkBuilder( opCtx.get(), false ) );
-
- ASSERT_OK( builder->addKey( key1, loc1 ) );
- ASSERT_EQUALS( ErrorCodes::DuplicateKey, builder->addKey( key1, loc2 ) );
- builder->commit( false );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
- }
-
- // Add the same key multiple times using a bulk builder and verify that
- // the returned status is OK when duplicates are allowed.
- TEST( SortedDataInterface, BuilderAddSameKeyWithDupsAllowed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataBuilderInterface> builder(
- sorted->getBulkBuilder( opCtx.get(), true /* allow duplicates */ ) );
-
- ASSERT_OK( builder->addKey( key1, loc1 ) );
- ASSERT_OK( builder->addKey( key1, loc2 ) );
- builder->commit( false );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
- }
- }
-
- // Add multiple keys using a bulk builder.
- TEST( SortedDataInterface, BuilderAddMultipleKeys ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataBuilderInterface> builder(
- sorted->getBulkBuilder( opCtx.get(), true ) );
-
- ASSERT_OK( builder->addKey( key1, loc1 ) );
- ASSERT_OK( builder->addKey( key2, loc2 ) );
- ASSERT_OK( builder->addKey( key3, loc3 ) );
- builder->commit( false );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) );
- }
- }
-
- // Add multiple compound keys using a bulk builder.
- TEST( SortedDataInterface, BuilderAddMultipleCompoundKeys ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataBuilderInterface> builder(
- sorted->getBulkBuilder( opCtx.get(), true ) );
-
- ASSERT_OK( builder->addKey( compoundKey1a, loc1 ) );
- ASSERT_OK( builder->addKey( compoundKey1b, loc2 ) );
- ASSERT_OK( builder->addKey( compoundKey1c, loc4 ) );
- ASSERT_OK( builder->addKey( compoundKey2b, loc3 ) );
- ASSERT_OK( builder->addKey( compoundKey3a, loc5 ) );
- builder->commit( false );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 5, sorted->numEntries( opCtx.get() ) );
- }
- }
-
-} // namespace mongo
+// Add a key using a bulk builder.
+TEST(SortedDataInterface, BuilderAddKey) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataBuilderInterface> builder(
+ sorted->getBulkBuilder(opCtx.get(), true));
+
+ ASSERT_OK(builder->addKey(key1, loc1));
+ builder->commit(false);
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+}
+
+// Add a compound key using a bulk builder.
+TEST(SortedDataInterface, BuilderAddCompoundKey) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataBuilderInterface> builder(
+ sorted->getBulkBuilder(opCtx.get(), true));
+
+ ASSERT_OK(builder->addKey(compoundKey1a, loc1));
+ builder->commit(false);
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+}
+
+// Add the same key multiple times using a bulk builder and verify that
+// the returned status is ErrorCodes::DuplicateKey when duplicates are
+// not allowed.
+TEST(SortedDataInterface, BuilderAddSameKey) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataBuilderInterface> builder(
+ sorted->getBulkBuilder(opCtx.get(), false));
+
+ ASSERT_OK(builder->addKey(key1, loc1));
+ ASSERT_EQUALS(ErrorCodes::DuplicateKey, builder->addKey(key1, loc2));
+ builder->commit(false);
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+}
+
+// Add the same key multiple times using a bulk builder and verify that
+// the returned status is OK when duplicates are allowed.
+TEST(SortedDataInterface, BuilderAddSameKeyWithDupsAllowed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataBuilderInterface> builder(
+ sorted->getBulkBuilder(opCtx.get(), true /* allow duplicates */));
+
+ ASSERT_OK(builder->addKey(key1, loc1));
+ ASSERT_OK(builder->addKey(key1, loc2));
+ builder->commit(false);
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+}
+
+// Add multiple keys using a bulk builder.
+TEST(SortedDataInterface, BuilderAddMultipleKeys) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataBuilderInterface> builder(
+ sorted->getBulkBuilder(opCtx.get(), true));
+
+ ASSERT_OK(builder->addKey(key1, loc1));
+ ASSERT_OK(builder->addKey(key2, loc2));
+ ASSERT_OK(builder->addKey(key3, loc3));
+ builder->commit(false);
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+ }
+}
+
+// Add multiple compound keys using a bulk builder.
+TEST(SortedDataInterface, BuilderAddMultipleCompoundKeys) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataBuilderInterface> builder(
+ sorted->getBulkBuilder(opCtx.get(), true));
+
+ ASSERT_OK(builder->addKey(compoundKey1a, loc1));
+ ASSERT_OK(builder->addKey(compoundKey1b, loc2));
+ ASSERT_OK(builder->addKey(compoundKey1c, loc4));
+ ASSERT_OK(builder->addKey(compoundKey2b, loc3));
+ ASSERT_OK(builder->addKey(compoundKey3a, loc5));
+ builder->commit(false);
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(5, sorted->numEntries(opCtx.get()));
+ }
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp
index d4a99333fdc..3b49ed4bded 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp
@@ -37,130 +37,132 @@
namespace mongo {
- // Verify that a forward cursor is positioned at EOF when the index is empty.
- TEST( SortedDataInterface, CursorIsEOFWhenEmpty ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Verify that a forward cursor is positioned at EOF when the index is empty.
+TEST(SortedDataInterface, CursorIsEOFWhenEmpty) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
+ ASSERT(!cursor->seek(minKey, true));
- ASSERT( !cursor->seek(minKey, true) );
-
- // Cursor at EOF should remain at EOF when advanced
- ASSERT( !cursor->next() );
- }
+ // Cursor at EOF should remain at EOF when advanced
+ ASSERT(!cursor->next());
}
+}
- // Verify that a reverse cursor is positioned at EOF when the index is empty.
- TEST( SortedDataInterface, CursorIsEOFWhenEmptyReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Verify that a reverse cursor is positioned at EOF when the index is empty.
+TEST(SortedDataInterface, CursorIsEOFWhenEmptyReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- ASSERT( !cursor->seek( maxKey, true ) );
+ ASSERT(!cursor->seek(maxKey, true));
- // Cursor at EOF should remain at EOF when advanced
- ASSERT( !cursor->next() );
- }
+ // Cursor at EOF should remain at EOF when advanced
+ ASSERT(!cursor->next());
}
+}
- // Call advance() on a forward cursor until it is exhausted.
- // When a cursor positioned at EOF is advanced, it stays at EOF.
- TEST( SortedDataInterface, ExhaustCursor ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Call advance() on a forward cursor until it is exhausted.
+// When a cursor positioned at EOF is advanced, it stays at EOF.
+TEST(SortedDataInterface, ExhaustCursor) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ BSONObj key = BSON("" << i);
+ RecordId loc(42, i * 2);
+ ASSERT_OK(sorted->insert(opCtx.get(), key, loc, true));
+ uow.commit();
}
+ }
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- BSONObj key = BSON( "" << i );
- RecordId loc( 42, i * 2 );
- ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, sorted->numEntries( opCtx.get() ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ for (int i = 0; i < nToInsert; i++) {
+ auto entry = i == 0 ? cursor->seek(minKey, true) : cursor->next();
+ ASSERT_EQ(entry, IndexKeyEntry(BSON("" << i), RecordId(42, i * 2)));
}
+ ASSERT(!cursor->next());
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- for ( int i = 0; i < nToInsert; i++ ) {
- auto entry = i == 0 ? cursor->seek(minKey, true) : cursor->next();
- ASSERT_EQ(entry, IndexKeyEntry(BSON("" << i), RecordId(42, i * 2)));
- }
- ASSERT( !cursor->next() );
-
- // Cursor at EOF should remain at EOF when advanced
- ASSERT( !cursor->next() );
- }
+ // Cursor at EOF should remain at EOF when advanced
+ ASSERT(!cursor->next());
}
+}
+
+// Call advance() on a reverse cursor until it is exhausted.
+// When a cursor positioned at EOF is advanced, it stays at EOF.
+TEST(SortedDataInterface, ExhaustCursorReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- // Call advance() on a reverse cursor until it is exhausted.
- // When a cursor positioned at EOF is advanced, it stays at EOF.
- TEST( SortedDataInterface, ExhaustCursorReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ BSONObj key = BSON("" << i);
+ RecordId loc(42, i * 2);
+ ASSERT_OK(sorted->insert(opCtx.get(), key, loc, true));
+ uow.commit();
}
+ }
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- BSONObj key = BSON( "" << i );
- RecordId loc( 42, i * 2 );
- ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, sorted->numEntries( opCtx.get() ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
+ for (int i = nToInsert - 1; i >= 0; i--) {
+ auto entry = (i == nToInsert - 1) ? cursor->seek(maxKey, true) : cursor->next();
+ ASSERT_EQ(entry, IndexKeyEntry(BSON("" << i), RecordId(42, i * 2)));
}
+ ASSERT(!cursor->next());
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
- for ( int i = nToInsert - 1; i >= 0; i-- ) {
- auto entry = (i == nToInsert - 1) ? cursor->seek(maxKey, true) : cursor->next();
- ASSERT_EQ(entry, IndexKeyEntry(BSON("" << i), RecordId(42, i * 2)));
- }
- ASSERT( !cursor->next() );
-
- // Cursor at EOF should remain at EOF when advanced
- ASSERT( !cursor->next() );
- }
+ // Cursor at EOF should remain at EOF when advanced
+ ASSERT(!cursor->next());
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp
index 2b094626ac6..d36e84a8a66 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp
@@ -37,614 +37,620 @@
namespace mongo {
- // Insert multiple single-field keys and advance to each of them
- // using a forward cursor by specifying their exact key. When
- // advanceTo() is called on a duplicate key, the cursor is
- // positioned at the first occurrence of that key in ascending
- // order by RecordId.
- TEST( SortedDataInterface, AdvanceTo ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+// Insert multiple single-field keys and advance to each of them
+// using a forward cursor by specifying their exact key. When
+// advanceTo() is called on a duplicate key, the cursor is
+// positioned at the first occurrence of that key in ascending
+// order by RecordId.
+TEST(SortedDataInterface, AdvanceTo) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc2, true /* allow duplicates */ ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc3, true /* allow duplicates */ ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc4, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc5, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc2, true /* allow duplicates */));
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc3, true /* allow duplicates */));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc4, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc5, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 5, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(5, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- IndexSeekPoint seekPoint;
- seekPoint.keyPrefix = key1;
- seekPoint.prefixLen = 1;
- seekPoint.prefixExclusive = false;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
+ IndexSeekPoint seekPoint;
+ seekPoint.keyPrefix = key1;
+ seekPoint.prefixLen = 1;
+ seekPoint.prefixExclusive = false;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
- seekPoint.keyPrefix = key2;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key2, loc4));
+ seekPoint.keyPrefix = key2;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key2, loc4));
- seekPoint.keyPrefix = key3;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc5));
+ seekPoint.keyPrefix = key3;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc5));
- seekPoint.keyPrefix = key4;
- ASSERT_EQ(cursor->seek(seekPoint), boost::none);
- }
+ seekPoint.keyPrefix = key4;
+ ASSERT_EQ(cursor->seek(seekPoint), boost::none);
+ }
+}
+
+// Insert multiple single-field keys and advance to each of them
+// using a reverse cursor by specifying their exact key. When
+// advanceTo() is called on a duplicate key, the cursor is
+// positioned at the first occurrence of that key in descending
+// order by RecordId (last occurrence in index order).
+TEST(SortedDataInterface, AdvanceToReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Insert multiple single-field keys and advance to each of them
- // using a reverse cursor by specifying their exact key. When
- // advanceTo() is called on a duplicate key, the cursor is
- // positioned at the first occurrence of that key in descending
- // order by RecordId (last occurrence in index order).
- TEST( SortedDataInterface, AdvanceToReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc2, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc3, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc4, true /* allow duplicates */ ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc5, true /* allow duplicates */ ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc3, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc4, true /* allow duplicates */));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc5, true /* allow duplicates */));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 5, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(5, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc5));
+ ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc5));
- IndexSeekPoint seekPoint;
- seekPoint.keyPrefix = key3;
- seekPoint.prefixLen = 1;
- seekPoint.prefixExclusive = false;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc5));
+ IndexSeekPoint seekPoint;
+ seekPoint.keyPrefix = key3;
+ seekPoint.prefixLen = 1;
+ seekPoint.prefixExclusive = false;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc5));
- seekPoint.keyPrefix = key2;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key2, loc2));
+ seekPoint.keyPrefix = key2;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key2, loc2));
- seekPoint.keyPrefix = key1;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
+ seekPoint.keyPrefix = key1;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
- seekPoint.keyPrefix = key0;
- ASSERT_EQ(cursor->seek(seekPoint), boost::none);
- }
+ seekPoint.keyPrefix = key0;
+ ASSERT_EQ(cursor->seek(seekPoint), boost::none);
+ }
+}
+
+// Insert two single-field keys, then seek a forward cursor to the larger one then seek behind
+// the smaller one. Ending position is on the smaller one since a seek describes where to go
+// and should not be effected by current position.
+TEST(SortedDataInterface, AdvanceToKeyBeforeCursorPosition) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Insert two single-field keys, then seek a forward cursor to the larger one then seek behind
- // the smaller one. Ending position is on the smaller one since a seek describes where to go
- // and should not be effected by current position.
- TEST( SortedDataInterface, AdvanceToKeyBeforeCursorPosition ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc2, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- IndexSeekPoint seekPoint;
- seekPoint.keyPrefix = key0;
- seekPoint.prefixLen = 1;
- seekPoint.prefixExclusive = false;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
+ IndexSeekPoint seekPoint;
+ seekPoint.keyPrefix = key0;
+ seekPoint.prefixLen = 1;
+ seekPoint.prefixExclusive = false;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
- seekPoint.prefixExclusive = true;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
- }
+ seekPoint.prefixExclusive = true;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
+ }
+}
+
+// Insert two single-field keys, then seek a reverse cursor to the smaller one then seek behind
+// the larger one. Ending position is on the larger one since a seek describes where to go
+// and should not be effected by current position.
+TEST(SortedDataInterface, AdvanceToKeyAfterCursorPositionReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Insert two single-field keys, then seek a reverse cursor to the smaller one then seek behind
- // the larger one. Ending position is on the larger one since a seek describes where to go
- // and should not be effected by current position.
- TEST( SortedDataInterface, AdvanceToKeyAfterCursorPositionReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc2, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
- IndexSeekPoint seekPoint;
- seekPoint.keyPrefix = key3;
- seekPoint.prefixLen = 1;
- seekPoint.prefixExclusive = false;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key2, loc2));
+ IndexSeekPoint seekPoint;
+ seekPoint.keyPrefix = key3;
+ seekPoint.prefixLen = 1;
+ seekPoint.prefixExclusive = false;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key2, loc2));
- seekPoint.prefixExclusive = true;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key2, loc2));
- }
+ seekPoint.prefixExclusive = true;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key2, loc2));
+ }
+}
+
+// Insert a single-field key and advance to EOF using a forward cursor
+// by specifying that exact key. When seek() is called with the key
+// where the cursor is positioned (and it is the first entry for that key),
+// the cursor should remain at its current position. An exclusive seek will
+// position the cursor on the next position, which may be EOF.
+TEST(SortedDataInterface, AdvanceToKeyAtCursorPosition) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Insert a single-field key and advance to EOF using a forward cursor
- // by specifying that exact key. When seek() is called with the key
- // where the cursor is positioned (and it is the first entry for that key),
- // the cursor should remain at its current position. An exclusive seek will
- // position the cursor on the next position, which may be EOF.
- TEST( SortedDataInterface, AdvanceToKeyAtCursorPosition ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- IndexSeekPoint seekPoint;
- seekPoint.keyPrefix = key1;
- seekPoint.prefixLen = 1;
- seekPoint.prefixExclusive = false;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
+ IndexSeekPoint seekPoint;
+ seekPoint.keyPrefix = key1;
+ seekPoint.prefixLen = 1;
+ seekPoint.prefixExclusive = false;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
- seekPoint.prefixExclusive = true;
- ASSERT_EQ(cursor->seek(seekPoint), boost::none);
- }
+ seekPoint.prefixExclusive = true;
+ ASSERT_EQ(cursor->seek(seekPoint), boost::none);
+ }
+}
+
+// Insert a single-field key and advance to EOF using a reverse cursor
+// by specifying that exact key. When seek() is called with the key
+// where the cursor is positioned (and it is the first entry for that key),
+// the cursor should remain at its current position. An exclusive seek will
+// position the cursor on the next position, which may be EOF.
+TEST(SortedDataInterface, AdvanceToKeyAtCursorPositionReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Insert a single-field key and advance to EOF using a reverse cursor
- // by specifying that exact key. When seek() is called with the key
- // where the cursor is positioned (and it is the first entry for that key),
- // the cursor should remain at its current position. An exclusive seek will
- // position the cursor on the next position, which may be EOF.
- TEST( SortedDataInterface, AdvanceToKeyAtCursorPositionReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- IndexSeekPoint seekPoint;
- seekPoint.keyPrefix = key1;
- seekPoint.prefixLen = 1;
- seekPoint.prefixExclusive = false;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
+ IndexSeekPoint seekPoint;
+ seekPoint.keyPrefix = key1;
+ seekPoint.prefixLen = 1;
+ seekPoint.prefixExclusive = false;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
- seekPoint.prefixExclusive = true;
- ASSERT_EQ(cursor->seek(seekPoint), boost::none);
- }
+ seekPoint.prefixExclusive = true;
+ ASSERT_EQ(cursor->seek(seekPoint), boost::none);
+ }
+}
+
+// Insert multiple single-field keys and advance to each of them using
+// a forward cursor by specifying a key that comes immediately before.
+// When advanceTo() is called in non-inclusive mode, the cursor is
+// positioned at the key that comes after the one specified.
+TEST(SortedDataInterface, AdvanceToExclusive) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Insert multiple single-field keys and advance to each of them using
- // a forward cursor by specifying a key that comes immediately before.
- // When advanceTo() is called in non-inclusive mode, the cursor is
- // positioned at the key that comes after the one specified.
- TEST( SortedDataInterface, AdvanceToExclusive ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc2, true /* allow duplicates */ ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc3, true /* allow duplicates */ ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc4, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc5, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc2, true /* allow duplicates */));
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc3, true /* allow duplicates */));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc4, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc5, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 5, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(5, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- IndexSeekPoint seekPoint;
- seekPoint.keyPrefix = key1;
- seekPoint.prefixLen = 1;
- seekPoint.prefixExclusive = true;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key2, loc4));
+ IndexSeekPoint seekPoint;
+ seekPoint.keyPrefix = key1;
+ seekPoint.prefixLen = 1;
+ seekPoint.prefixExclusive = true;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key2, loc4));
- seekPoint.keyPrefix = key2;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc5));
+ seekPoint.keyPrefix = key2;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc5));
- seekPoint.keyPrefix = key3;
- ASSERT_EQ(cursor->seek(seekPoint), boost::none);
+ seekPoint.keyPrefix = key3;
+ ASSERT_EQ(cursor->seek(seekPoint), boost::none);
- seekPoint.keyPrefix = key4;
- ASSERT_EQ(cursor->seek(seekPoint), boost::none);
- }
+ seekPoint.keyPrefix = key4;
+ ASSERT_EQ(cursor->seek(seekPoint), boost::none);
+ }
+}
+
+// Insert multiple single-field keys and advance to each of them using
+// a reverse cursor by specifying a key that comes immediately after.
+// When advanceTo() is called in non-inclusive mode, the cursor is
+// positioned at the key that comes before the one specified.
+TEST(SortedDataInterface, AdvanceToExclusiveReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Insert multiple single-field keys and advance to each of them using
- // a reverse cursor by specifying a key that comes immediately after.
- // When advanceTo() is called in non-inclusive mode, the cursor is
- // positioned at the key that comes before the one specified.
- TEST( SortedDataInterface, AdvanceToExclusiveReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc2, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc3, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc4, true /* allow duplicates */ ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc5, true /* allow duplicates */ ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc3, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc4, true /* allow duplicates */));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc5, true /* allow duplicates */));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 5, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(5, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc5));
+ ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc5));
- IndexSeekPoint seekPoint;
- seekPoint.keyPrefix = key3;
- seekPoint.prefixLen = 1;
- seekPoint.prefixExclusive = true;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key2, loc2));
+ IndexSeekPoint seekPoint;
+ seekPoint.keyPrefix = key3;
+ seekPoint.prefixLen = 1;
+ seekPoint.prefixExclusive = true;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key2, loc2));
- seekPoint.keyPrefix = key2;
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
+ seekPoint.keyPrefix = key2;
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
- seekPoint.keyPrefix = key1;
- ASSERT_EQ(cursor->seek(seekPoint), boost::none);
+ seekPoint.keyPrefix = key1;
+ ASSERT_EQ(cursor->seek(seekPoint), boost::none);
- seekPoint.keyPrefix = key0;
- ASSERT_EQ(cursor->seek(seekPoint), boost::none);
- }
+ seekPoint.keyPrefix = key0;
+ ASSERT_EQ(cursor->seek(seekPoint), boost::none);
}
+}
- // Insert multiple, non-consecutive, single-field keys and advance to
- // each of them using a forward cursor by specifying a key between their
- // exact key and the current position of the cursor.
- TEST( SortedDataInterface, AdvanceToIndirect ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Insert multiple, non-consecutive, single-field keys and advance to
+// each of them using a forward cursor by specifying a key between their
+// exact key and the current position of the cursor.
+TEST(SortedDataInterface, AdvanceToIndirect) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- BSONObj unusedKey = key6; // larger than any inserted key
+ BSONObj unusedKey = key6; // larger than any inserted key
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc2, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key5, loc3, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc2, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key5, loc3, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- IndexSeekPoint seekPoint;
- seekPoint.prefixLen = 0;
- BSONElement suffix0;
- seekPoint.keySuffix = {&suffix0};
- seekPoint.suffixInclusive = {true};
+ IndexSeekPoint seekPoint;
+ seekPoint.prefixLen = 0;
+ BSONElement suffix0;
+ seekPoint.keySuffix = {&suffix0};
+ seekPoint.suffixInclusive = {true};
- suffix0 = key2.firstElement();
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc2));
+ suffix0 = key2.firstElement();
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc2));
- suffix0 = key4.firstElement();
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key5, loc3));
- }
+ suffix0 = key4.firstElement();
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key5, loc3));
}
+}
- // Insert multiple, non-consecutive, single-field keys and advance to
- // each of them using a reverse cursor by specifying a key between their
- // exact key and the current position of the cursor.
- TEST( SortedDataInterface, AdvanceToIndirectReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Insert multiple, non-consecutive, single-field keys and advance to
+// each of them using a reverse cursor by specifying a key between their
+// exact key and the current position of the cursor.
+TEST(SortedDataInterface, AdvanceToIndirectReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- BSONObj unusedKey = key0; // smaller than any inserted key
+ BSONObj unusedKey = key0; // smaller than any inserted key
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc2, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key5, loc3, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc2, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key5, loc3, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- ASSERT_EQ(cursor->seek(key5, true), IndexKeyEntry(key5, loc3));
+ ASSERT_EQ(cursor->seek(key5, true), IndexKeyEntry(key5, loc3));
- IndexSeekPoint seekPoint;
- seekPoint.prefixLen = 0;
- BSONElement suffix0;
- seekPoint.keySuffix = {&suffix0};
- seekPoint.suffixInclusive = {true};
+ IndexSeekPoint seekPoint;
+ seekPoint.prefixLen = 0;
+ BSONElement suffix0;
+ seekPoint.keySuffix = {&suffix0};
+ seekPoint.suffixInclusive = {true};
- suffix0 = key4.firstElement();
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc2));
+ suffix0 = key4.firstElement();
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc2));
- suffix0 = key2.firstElement();
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
- }
+ suffix0 = key2.firstElement();
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
+ }
+}
+
+// Insert multiple, non-consecutive, single-field keys and advance to
+// each of them using a forward cursor by specifying a key between their
+// exact key and the current position of the cursor. When advanceTo()
+// is called in non-inclusive mode, the cursor is positioned at the key
+// that comes after the one specified.
+TEST(SortedDataInterface, AdvanceToIndirectExclusive) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ BSONObj unusedKey = key6; // larger than any inserted key
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Insert multiple, non-consecutive, single-field keys and advance to
- // each of them using a forward cursor by specifying a key between their
- // exact key and the current position of the cursor. When advanceTo()
- // is called in non-inclusive mode, the cursor is positioned at the key
- // that comes after the one specified.
- TEST( SortedDataInterface, AdvanceToIndirectExclusive ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- BSONObj unusedKey = key6; // larger than any inserted key
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc2, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key5, loc3, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc2, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key5, loc3, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- IndexSeekPoint seekPoint;
- seekPoint.prefixLen = 0;
- BSONElement suffix0;
- seekPoint.keySuffix = {&suffix0};
- seekPoint.suffixInclusive = {false};
+ IndexSeekPoint seekPoint;
+ seekPoint.prefixLen = 0;
+ BSONElement suffix0;
+ seekPoint.keySuffix = {&suffix0};
+ seekPoint.suffixInclusive = {false};
- suffix0 = key2.firstElement();
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc2));
+ suffix0 = key2.firstElement();
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc2));
- suffix0 = key4.firstElement();
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key5, loc3));
+ suffix0 = key4.firstElement();
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key5, loc3));
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- suffix0 = key3.firstElement();
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key5, loc3));
- }
+ suffix0 = key3.firstElement();
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key5, loc3));
+ }
+}
+
+// Insert multiple, non-consecutive, single-field keys and advance to
+// each of them using a reverse cursor by specifying a key between their
+// exact key and the current position of the cursor. When advanceTo()
+// is called in non-inclusive mode, the cursor is positioned at the key
+// that comes before the one specified.
+TEST(SortedDataInterface, AdvanceToIndirectExclusiveReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ BSONObj unusedKey = key0; // smaller than any inserted key
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Insert multiple, non-consecutive, single-field keys and advance to
- // each of them using a reverse cursor by specifying a key between their
- // exact key and the current position of the cursor. When advanceTo()
- // is called in non-inclusive mode, the cursor is positioned at the key
- // that comes before the one specified.
- TEST( SortedDataInterface, AdvanceToIndirectExclusiveReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- BSONObj unusedKey = key0; // smaller than any inserted key
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc2, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key5, loc3, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc2, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key5, loc3, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- ASSERT_EQ(cursor->seek(key5, true), IndexKeyEntry(key5, loc3));
+ ASSERT_EQ(cursor->seek(key5, true), IndexKeyEntry(key5, loc3));
- IndexSeekPoint seekPoint;
- seekPoint.prefixLen = 0;
- BSONElement suffix0;
- seekPoint.keySuffix = {&suffix0};
- seekPoint.suffixInclusive = {false};
+ IndexSeekPoint seekPoint;
+ seekPoint.prefixLen = 0;
+ BSONElement suffix0;
+ seekPoint.keySuffix = {&suffix0};
+ seekPoint.suffixInclusive = {false};
- suffix0 = key4.firstElement();
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc2));
+ suffix0 = key4.firstElement();
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key3, loc2));
- suffix0 = key2.firstElement();
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
+ suffix0 = key2.firstElement();
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->seek(key5, true), IndexKeyEntry(key5, loc3));
+ ASSERT_EQ(cursor->seek(key5, true), IndexKeyEntry(key5, loc3));
- suffix0 = key3.firstElement();
- ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
- }
+ suffix0 = key3.firstElement();
+ ASSERT_EQ(cursor->seek(seekPoint), IndexKeyEntry(key1, loc1));
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
index a2d4609c82c..190f707c4c0 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
@@ -34,380 +34,384 @@
#include "mongo/unittest/unittest.h"
namespace mongo {
- // Tests setEndPosition with next().
- void testSetEndPosition_Next_Forward(bool unique, bool inclusive) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key2, loc1},
- {key3, loc1},
- {key4, loc1},
- {key5, loc1},
+// Tests setEndPosition with next().
+void testSetEndPosition_Next_Forward(bool unique, bool inclusive) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(
+ unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
});
- // Dup key on end point. Illegal for unique indexes.
- if (!unique) insertToIndex(opCtx, sorted, {{key3, loc2}});
+ // Dup key on end point. Illegal for unique indexes.
+ if (!unique)
+ insertToIndex(opCtx, sorted, {{key3, loc2}});
- auto cursor = sorted->newCursor(opCtx.get());
- cursor->setEndPosition(key3, inclusive);
-
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
- if (inclusive) {
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc1));
- if (!unique) ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc2));
- }
- ASSERT_EQ(cursor->next(), boost::none);
- ASSERT_EQ(cursor->next(), boost::none); // don't resurrect.
- }
- TEST(SortedDataInterface, SetEndPosition_Next_Forward_Unique_Inclusive) {
- testSetEndPosition_Next_Forward(true, true);
- }
- TEST(SortedDataInterface, SetEndPosition_Next_Forward_Unique_Exclusive) {
- testSetEndPosition_Next_Forward(true, false);
- }
- TEST(SortedDataInterface, SetEndPosition_Next_Forward_Standard_Inclusive) {
- testSetEndPosition_Next_Forward(false, true);
- }
- TEST(SortedDataInterface, SetEndPosition_Next_Forward_Standard_Exclusive) {
- testSetEndPosition_Next_Forward(false, false);
- }
-
- void testSetEndPosition_Next_Reverse(bool unique, bool inclusive) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key2, loc1},
- {key3, loc1},
- {key4, loc1},
- {key5, loc1},
- });
-
- // Dup key on end point. Illegal for unique indexes.
- if (!unique) insertToIndex(opCtx, sorted, {{key3, loc2}});
-
- auto cursor = sorted->newCursor(opCtx.get(), false);
- cursor->setEndPosition(key3, inclusive);
-
- ASSERT_EQ(cursor->seek(key5, true), IndexKeyEntry(key5, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key4, loc1));
- if (inclusive) {
- if (!unique) ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc1));
- }
- ASSERT_EQ(cursor->next(), boost::none);
- ASSERT_EQ(cursor->next(), boost::none); // don't resurrect.
- }
- TEST(SortedDataInterface, SetEndPosition_Next_Reverse_Unique_Inclusive) {
- testSetEndPosition_Next_Reverse(true, true);
- }
- TEST(SortedDataInterface, SetEndPosition_Next_Reverse_Unique_Exclusive) {
- testSetEndPosition_Next_Reverse(true, false);
- }
- TEST(SortedDataInterface, SetEndPosition_Next_Reverse_Standard_Inclusive) {
- testSetEndPosition_Next_Reverse(false, true);
- }
- TEST(SortedDataInterface, SetEndPosition_Next_Reverse_Standard_Exclusive) {
- testSetEndPosition_Next_Reverse(false, false);
- }
-
- // Tests setEndPosition with seek() and seekExact().
- void testSetEndPosition_Seek_Forward(bool unique, bool inclusive) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- // No key2
- {key3, loc1},
- {key4, loc1},
- });
-
- auto cursor = sorted->newCursor(opCtx.get());
- cursor->setEndPosition(key3, inclusive);
-
- // Directly seeking past end is considered out of range.
- ASSERT_EQ(cursor->seek(key4, true), boost::none);
- ASSERT_EQ(cursor->seekExact(key4), boost::none);
-
- // Seeking to key3 directly or indirectly is only returned if endPosition is inclusive.
- auto maybeKey3 = inclusive ? boost::make_optional(IndexKeyEntry(key3, loc1)) : boost::none;
-
- // direct
- ASSERT_EQ(cursor->seek(key3, true), maybeKey3);
- ASSERT_EQ(cursor->seekExact(key3), maybeKey3);
-
- // indirect
- ASSERT_EQ(cursor->seek(key2, true), maybeKey3);
-
- cursor->saveUnpositioned();
- removeFromIndex(opCtx, sorted, {{key3, loc1}});
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->seek(key2, true), boost::none);
- ASSERT_EQ(cursor->seek(key3, true), boost::none);
- }
- TEST(SortedDataInterface, SetEndPosition_Seek_Forward_Unique_Inclusive) {
- testSetEndPosition_Seek_Forward(true, true);
- }
- TEST(SortedDataInterface, SetEndPosition_Seek_Forward_Unique_Exclusive) {
- testSetEndPosition_Seek_Forward(true, false);
- }
- TEST(SortedDataInterface, SetEndPosition_Seek_Forward_Standard_Inclusive) {
- testSetEndPosition_Seek_Forward(false, true);
- }
- TEST(SortedDataInterface, SetEndPosition_Seek_Forward_Standard_Exclusive) {
- testSetEndPosition_Seek_Forward(false, false);
- }
-
- void testSetEndPosition_Seek_Reverse(bool unique, bool inclusive) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key2, loc1},
- // No key3
- {key4, loc1},
- });
-
- auto cursor = sorted->newCursor(opCtx.get(), false);
- cursor->setEndPosition(key2, inclusive);
-
- // Directly seeking past end is considered out of range.
- ASSERT_EQ(cursor->seek(key1, true), boost::none);
- ASSERT_EQ(cursor->seekExact(key1), boost::none);
-
- // Seeking to key2 directly or indirectly is only returned if endPosition is inclusive.
- auto maybeKey2 = inclusive ? boost::make_optional(IndexKeyEntry(key2, loc1)) : boost::none;
-
- // direct
- ASSERT_EQ(cursor->seek(key2, true), maybeKey2);
- ASSERT_EQ(cursor->seekExact(key2), maybeKey2);
-
- // indirect
- ASSERT_EQ(cursor->seek(key3, true), maybeKey2);
-
- cursor->saveUnpositioned();
- removeFromIndex(opCtx, sorted, {{key2, loc1}});
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->seek(key3, true), boost::none);
- ASSERT_EQ(cursor->seek(key2, true), boost::none);
- }
- TEST(SortedDataInterface, SetEndPosition_Seek_Reverse_Unique_Inclusive) {
- testSetEndPosition_Seek_Reverse(true, true);
- }
- TEST(SortedDataInterface, SetEndPosition_Seek_Reverse_Unique_Exclusive) {
- testSetEndPosition_Seek_Reverse(true, false);
- }
- TEST(SortedDataInterface, SetEndPosition_Seek_Reverse_Standard_Inclusive) {
- testSetEndPosition_Seek_Reverse(false, true);
- }
- TEST(SortedDataInterface, SetEndPosition_Seek_Reverse_Standard_Exclusive) {
- testSetEndPosition_Seek_Reverse(false, false);
- }
-
- // Test that restore never lands on the wrong side of the endPosition.
- void testSetEndPosition_Restore_Forward(bool unique) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key2, loc1},
- {key3, loc1},
- {key4, loc1},
- });
-
- auto cursor = sorted->newCursor(opCtx.get());
- cursor->setEndPosition(key3, false); // Should never see key3 or key4.
-
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
-
- cursor->savePositioned();
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
-
- cursor->savePositioned();
- removeFromIndex(opCtx, sorted, {
- {key2, loc1},
- {key3, loc1},
- });
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->next(), boost::none);
- }
- TEST(SortedDataInterface, SetEndPosition_Restore_Forward_Unique) {
- testSetEndPosition_Restore_Forward(true);
- }
- TEST(SortedDataInterface, SetEndPosition_Restore_Forward_Standard) {
- testSetEndPosition_Restore_Forward(false);
- }
-
- void testSetEndPosition_Restore_Reverse(bool unique) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key2, loc1},
- {key3, loc1},
- {key4, loc1},
- });
-
- auto cursor = sorted->newCursor(opCtx.get(), false);
- cursor->setEndPosition(key2, false); // Should never see key1 or key2.
-
- ASSERT_EQ(cursor->seek(key4, true), IndexKeyEntry(key4, loc1));
-
- cursor->savePositioned();
- cursor->restore(opCtx.get());
+ auto cursor = sorted->newCursor(opCtx.get());
+ cursor->setEndPosition(key3, inclusive);
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
+ if (inclusive) {
ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc1));
-
- cursor->savePositioned();
- removeFromIndex(opCtx, sorted, {
- {key2, loc1},
- {key3, loc1},
- });
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->next(), boost::none);
- }
- TEST(SortedDataInterface, SetEndPosition_Restore_Reverse_Unique) {
- testSetEndPosition_Restore_Reverse(true);
- }
- TEST(SortedDataInterface, SetEndPosition_Restore_Reverse_Standard) {
- testSetEndPosition_Restore_Reverse(false);
- }
-
- // Test that restore always updates the end cursor if one is used. Some storage engines use a
- // cursor positioned at the first out-of-range document and have next() check if the current
- // position is the same as the end cursor. End cursor maintenance cannot be directly tested
- // (since implementations are free not to use end cursors) but implementations that incorrectly
- // restore end cursors would tend to fail this test.
- void testSetEndPosition_RestoreEndCursor_Forward(bool unique) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key4, loc1},
- });
-
- auto cursor = sorted->newCursor(opCtx.get());
- cursor->setEndPosition(key2, true);
-
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
-
- // A potential source of bugs is not restoring end cursor with saveUnpositioned().
- cursor->saveUnpositioned();
- insertToIndex(opCtx, sorted, {
- {key2, loc1}, // in range
- {key3, loc1}, // out of range
- });
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
- TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Forward_Unique) {
- testSetEndPosition_RestoreEndCursor_Forward(true);
- }
- TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Forward_Standard) {
- testSetEndPosition_RestoreEndCursor_Forward(false);
- }
-
- void testSetEndPosition_RestoreEndCursor_Reverse(bool unique) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key4, loc1},
+ if (!unique)
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc2));
+ }
+ ASSERT_EQ(cursor->next(), boost::none);
+ ASSERT_EQ(cursor->next(), boost::none); // don't resurrect.
+}
+TEST(SortedDataInterface, SetEndPosition_Next_Forward_Unique_Inclusive) {
+ testSetEndPosition_Next_Forward(true, true);
+}
+TEST(SortedDataInterface, SetEndPosition_Next_Forward_Unique_Exclusive) {
+ testSetEndPosition_Next_Forward(true, false);
+}
+TEST(SortedDataInterface, SetEndPosition_Next_Forward_Standard_Inclusive) {
+ testSetEndPosition_Next_Forward(false, true);
+}
+TEST(SortedDataInterface, SetEndPosition_Next_Forward_Standard_Exclusive) {
+ testSetEndPosition_Next_Forward(false, false);
+}
+
+void testSetEndPosition_Next_Reverse(bool unique, bool inclusive) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(
+ unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
});
- auto cursor = sorted->newCursor(opCtx.get(), false);
- cursor->setEndPosition(key3, true);
-
- ASSERT_EQ(cursor->seek(key4, true), IndexKeyEntry(key4, loc1));
+ // Dup key on end point. Illegal for unique indexes.
+ if (!unique)
+ insertToIndex(opCtx, sorted, {{key3, loc2}});
- cursor->saveUnpositioned();
- insertToIndex(opCtx, sorted, {
- {key2, loc1}, // in range
- {key3, loc1}, // out of range
- });
- cursor->restore(opCtx.get()); // must restore end cursor even with saveUnpositioned().
+ auto cursor = sorted->newCursor(opCtx.get(), false);
+ cursor->setEndPosition(key3, inclusive);
- ASSERT_EQ(cursor->seek(key4, true), IndexKeyEntry(key4, loc1));
+ ASSERT_EQ(cursor->seek(key5, true), IndexKeyEntry(key5, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key4, loc1));
+ if (inclusive) {
+ if (!unique)
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc2));
ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
- TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Reverse_Standard) {
- testSetEndPosition_RestoreEndCursor_Reverse(true);
- }
- TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Reverse_Unique) {
- testSetEndPosition_RestoreEndCursor_Reverse(false);
}
-
- // setEndPosition with empty BSONObj is supposed to mean "no end position", regardless of
- // inclusive flag or direction.
- void testSetEndPosition_Empty_Forward(bool unique, bool inclusive) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key2, loc1},
- {key3, loc1},
+ ASSERT_EQ(cursor->next(), boost::none);
+ ASSERT_EQ(cursor->next(), boost::none); // don't resurrect.
+}
+TEST(SortedDataInterface, SetEndPosition_Next_Reverse_Unique_Inclusive) {
+ testSetEndPosition_Next_Reverse(true, true);
+}
+TEST(SortedDataInterface, SetEndPosition_Next_Reverse_Unique_Exclusive) {
+ testSetEndPosition_Next_Reverse(true, false);
+}
+TEST(SortedDataInterface, SetEndPosition_Next_Reverse_Standard_Inclusive) {
+ testSetEndPosition_Next_Reverse(false, true);
+}
+TEST(SortedDataInterface, SetEndPosition_Next_Reverse_Standard_Exclusive) {
+ testSetEndPosition_Next_Reverse(false, false);
+}
+
+// Tests setEndPosition with seek() and seekExact().
+void testSetEndPosition_Seek_Forward(bool unique, bool inclusive) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1},
+ // No key2
+ {key3, loc1},
+ {key4, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get());
+ cursor->setEndPosition(key3, inclusive);
+
+ // Directly seeking past end is considered out of range.
+ ASSERT_EQ(cursor->seek(key4, true), boost::none);
+ ASSERT_EQ(cursor->seekExact(key4), boost::none);
+
+ // Seeking to key3 directly or indirectly is only returned if endPosition is inclusive.
+ auto maybeKey3 = inclusive ? boost::make_optional(IndexKeyEntry(key3, loc1)) : boost::none;
+
+ // direct
+ ASSERT_EQ(cursor->seek(key3, true), maybeKey3);
+ ASSERT_EQ(cursor->seekExact(key3), maybeKey3);
+
+ // indirect
+ ASSERT_EQ(cursor->seek(key2, true), maybeKey3);
+
+ cursor->saveUnpositioned();
+ removeFromIndex(opCtx, sorted, {{key3, loc1}});
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->seek(key2, true), boost::none);
+ ASSERT_EQ(cursor->seek(key3, true), boost::none);
+}
+TEST(SortedDataInterface, SetEndPosition_Seek_Forward_Unique_Inclusive) {
+ testSetEndPosition_Seek_Forward(true, true);
+}
+TEST(SortedDataInterface, SetEndPosition_Seek_Forward_Unique_Exclusive) {
+ testSetEndPosition_Seek_Forward(true, false);
+}
+TEST(SortedDataInterface, SetEndPosition_Seek_Forward_Standard_Inclusive) {
+ testSetEndPosition_Seek_Forward(false, true);
+}
+TEST(SortedDataInterface, SetEndPosition_Seek_Forward_Standard_Exclusive) {
+ testSetEndPosition_Seek_Forward(false, false);
+}
+
+void testSetEndPosition_Seek_Reverse(bool unique, bool inclusive) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1},
+ {key2, loc1},
+ // No key3
+ {key4, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get(), false);
+ cursor->setEndPosition(key2, inclusive);
+
+ // Directly seeking past end is considered out of range.
+ ASSERT_EQ(cursor->seek(key1, true), boost::none);
+ ASSERT_EQ(cursor->seekExact(key1), boost::none);
+
+ // Seeking to key2 directly or indirectly is only returned if endPosition is inclusive.
+ auto maybeKey2 = inclusive ? boost::make_optional(IndexKeyEntry(key2, loc1)) : boost::none;
+
+ // direct
+ ASSERT_EQ(cursor->seek(key2, true), maybeKey2);
+ ASSERT_EQ(cursor->seekExact(key2), maybeKey2);
+
+ // indirect
+ ASSERT_EQ(cursor->seek(key3, true), maybeKey2);
+
+ cursor->saveUnpositioned();
+ removeFromIndex(opCtx, sorted, {{key2, loc1}});
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->seek(key3, true), boost::none);
+ ASSERT_EQ(cursor->seek(key2, true), boost::none);
+}
+TEST(SortedDataInterface, SetEndPosition_Seek_Reverse_Unique_Inclusive) {
+ testSetEndPosition_Seek_Reverse(true, true);
+}
+TEST(SortedDataInterface, SetEndPosition_Seek_Reverse_Unique_Exclusive) {
+ testSetEndPosition_Seek_Reverse(true, false);
+}
+TEST(SortedDataInterface, SetEndPosition_Seek_Reverse_Standard_Inclusive) {
+ testSetEndPosition_Seek_Reverse(false, true);
+}
+TEST(SortedDataInterface, SetEndPosition_Seek_Reverse_Standard_Exclusive) {
+ testSetEndPosition_Seek_Reverse(false, false);
+}
+
+// Test that restore never lands on the wrong side of the endPosition.
+void testSetEndPosition_Restore_Forward(bool unique) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(
+ unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
});
- auto cursor = sorted->newCursor(opCtx.get());
- cursor->setEndPosition(BSONObj(), inclusive);
-
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
- TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Unique_Inclusive) {
- testSetEndPosition_Empty_Forward(true, true);
- }
- TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Unique_Exclusive) {
- testSetEndPosition_Empty_Forward(true, false);
- }
- TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Standard_Inclusive) {
- testSetEndPosition_Empty_Forward(false, true);
- }
- TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Standard_Exclusive) {
- testSetEndPosition_Empty_Forward(false, false);
- }
-
- void testSetEndPosition_Empty_Reverse(bool unique, bool inclusive) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key2, loc1},
- {key3, loc1},
+ auto cursor = sorted->newCursor(opCtx.get());
+ cursor->setEndPosition(key3, false); // Should never see key3 or key4.
+
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+
+ cursor->savePositioned();
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
+
+ cursor->savePositioned();
+ removeFromIndex(opCtx,
+ sorted,
+ {
+ {key2, loc1}, {key3, loc1},
+ });
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->next(), boost::none);
+}
+TEST(SortedDataInterface, SetEndPosition_Restore_Forward_Unique) {
+ testSetEndPosition_Restore_Forward(true);
+}
+TEST(SortedDataInterface, SetEndPosition_Restore_Forward_Standard) {
+ testSetEndPosition_Restore_Forward(false);
+}
+
+void testSetEndPosition_Restore_Reverse(bool unique) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(
+ unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
});
- auto cursor = sorted->newCursor(opCtx.get(), false);
- cursor->setEndPosition(BSONObj(), inclusive);
-
- ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
- TEST(SortedDataInterface, SetEndPosition_Empty_Reverse_Unique_Inclusive) {
- testSetEndPosition_Empty_Reverse(true, true);
- }
- TEST(SortedDataInterface, SetEndPosition_Empty_Reverse_Unique_Exclusive) {
- testSetEndPosition_Empty_Reverse(true, false);
- }
- TEST(SortedDataInterface, SetEndPosition_Empty_Reverse_Standard_Inclusive) {
- testSetEndPosition_Empty_Reverse(false, true);
- }
- TEST(SortedDataInterface, SetEndPosition_Empty_Reverse_Standard_Exclusive) {
- testSetEndPosition_Empty_Reverse(false, false);
- }
-} // namespace mongo
+ auto cursor = sorted->newCursor(opCtx.get(), false);
+ cursor->setEndPosition(key2, false); // Should never see key1 or key2.
+
+ ASSERT_EQ(cursor->seek(key4, true), IndexKeyEntry(key4, loc1));
+
+ cursor->savePositioned();
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc1));
+
+ cursor->savePositioned();
+ removeFromIndex(opCtx,
+ sorted,
+ {
+ {key2, loc1}, {key3, loc1},
+ });
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->next(), boost::none);
+}
+TEST(SortedDataInterface, SetEndPosition_Restore_Reverse_Unique) {
+ testSetEndPosition_Restore_Reverse(true);
+}
+TEST(SortedDataInterface, SetEndPosition_Restore_Reverse_Standard) {
+ testSetEndPosition_Restore_Reverse(false);
+}
+
+// Test that restore always updates the end cursor if one is used. Some storage engines use a
+// cursor positioned at the first out-of-range document and have next() check if the current
+// position is the same as the end cursor. End cursor maintenance cannot be directly tested
+// (since implementations are free not to use end cursors) but implementations that incorrectly
+// restore end cursors would tend to fail this test.
+void testSetEndPosition_RestoreEndCursor_Forward(bool unique) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key4, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get());
+ cursor->setEndPosition(key2, true);
+
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+
+ // A potential source of bugs is not restoring end cursor with saveUnpositioned().
+ cursor->saveUnpositioned();
+ insertToIndex(opCtx,
+ sorted,
+ {
+ {key2, loc1}, // in range
+ {key3, loc1}, // out of range
+ });
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+}
+TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Forward_Unique) {
+ testSetEndPosition_RestoreEndCursor_Forward(true);
+}
+TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Forward_Standard) {
+ testSetEndPosition_RestoreEndCursor_Forward(false);
+}
+
+void testSetEndPosition_RestoreEndCursor_Reverse(bool unique) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key4, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get(), false);
+ cursor->setEndPosition(key3, true);
+
+ ASSERT_EQ(cursor->seek(key4, true), IndexKeyEntry(key4, loc1));
+
+ cursor->saveUnpositioned();
+ insertToIndex(opCtx,
+ sorted,
+ {
+ {key2, loc1}, // in range
+ {key3, loc1}, // out of range
+ });
+ cursor->restore(opCtx.get()); // must restore end cursor even with saveUnpositioned().
+
+ ASSERT_EQ(cursor->seek(key4, true), IndexKeyEntry(key4, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+}
+TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Reverse_Standard) {
+ testSetEndPosition_RestoreEndCursor_Reverse(true);
+}
+TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Reverse_Unique) {
+ testSetEndPosition_RestoreEndCursor_Reverse(false);
+}
+
+// setEndPosition with empty BSONObj is supposed to mean "no end position", regardless of
+// inclusive flag or direction.
+void testSetEndPosition_Empty_Forward(bool unique, bool inclusive) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get());
+ cursor->setEndPosition(BSONObj(), inclusive);
+
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+}
+TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Unique_Inclusive) {
+ testSetEndPosition_Empty_Forward(true, true);
+}
+TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Unique_Exclusive) {
+ testSetEndPosition_Empty_Forward(true, false);
+}
+TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Standard_Inclusive) {
+ testSetEndPosition_Empty_Forward(false, true);
+}
+TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Standard_Exclusive) {
+ testSetEndPosition_Empty_Forward(false, false);
+}
+
+void testSetEndPosition_Empty_Reverse(bool unique, bool inclusive) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get(), false);
+ cursor->setEndPosition(BSONObj(), inclusive);
+
+ ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+}
+TEST(SortedDataInterface, SetEndPosition_Empty_Reverse_Unique_Inclusive) {
+ testSetEndPosition_Empty_Reverse(true, true);
+}
+TEST(SortedDataInterface, SetEndPosition_Empty_Reverse_Unique_Exclusive) {
+ testSetEndPosition_Empty_Reverse(true, false);
+}
+TEST(SortedDataInterface, SetEndPosition_Empty_Reverse_Standard_Inclusive) {
+ testSetEndPosition_Empty_Reverse(false, true);
+}
+TEST(SortedDataInterface, SetEndPosition_Empty_Reverse_Standard_Exclusive) {
+ testSetEndPosition_Empty_Reverse(false, false);
+}
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp
index b69a39a15fd..80e71b57da2 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp
@@ -37,591 +37,608 @@
namespace mongo {
- // Insert a key and try to locate it using a forward cursor
- // by specifying its exact key and RecordId.
- TEST( SortedDataInterface, Locate ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- ASSERT( !cursor->seek( key1, true ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- uow.commit();
- }
- }
+// Insert a key and try to locate it using a forward cursor
+// by specifying its exact key and RecordId.
+TEST(SortedDataInterface, Locate) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT(!cursor->seek(key1, true));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
-
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ uow.commit();
}
}
- // Insert a key and try to locate it using a reverse cursor
- // by specifying its exact key and RecordId.
- TEST( SortedDataInterface, LocateReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
- ASSERT( !cursor->seek( key1, true ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- uow.commit();
- }
- }
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+}
+
+// Insert a key and try to locate it using a reverse cursor
+// by specifying its exact key and RecordId.
+TEST(SortedDataInterface, LocateReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
+ ASSERT(!cursor->seek(key1, true));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
-
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ uow.commit();
}
}
- // Insert a compound key and try to locate it using a forward cursor
- // by specifying its exact key and RecordId.
- TEST( SortedDataInterface, LocateCompoundKey ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- ASSERT( !cursor->seek( compoundKey1a, true ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1a, loc1, true ) );
- uow.commit();
- }
- }
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+}
+
+// Insert a compound key and try to locate it using a forward cursor
+// by specifying its exact key and RecordId.
+TEST(SortedDataInterface, LocateCompoundKey) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT(!cursor->seek(compoundKey1a, true));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
-
- ASSERT_EQ(cursor->seek(compoundKey1a, true), IndexKeyEntry(compoundKey1a, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1a, loc1, true));
+ uow.commit();
}
}
- // Insert a compound key and try to locate it using a reverse cursor
- // by specifying its exact key and RecordId.
- TEST( SortedDataInterface, LocateCompoundKeyReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
- ASSERT( !cursor->seek( compoundKey1a, true ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1a, loc1, true ) );
- uow.commit();
- }
- }
+ ASSERT_EQ(cursor->seek(compoundKey1a, true), IndexKeyEntry(compoundKey1a, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+}
+
+// Insert a compound key and try to locate it using a reverse cursor
+// by specifying its exact key and RecordId.
+TEST(SortedDataInterface, LocateCompoundKeyReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
+ ASSERT(!cursor->seek(compoundKey1a, true));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
-
- ASSERT_EQ(cursor->seek(compoundKey1a, true), IndexKeyEntry(compoundKey1a, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1a, loc1, true));
+ uow.commit();
}
}
- // Insert multiple keys and try to locate them using a forward cursor
- // by specifying their exact key and RecordId.
- TEST( SortedDataInterface, LocateMultiple ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- ASSERT( !cursor->seek( key1, true ) );
- }
+ ASSERT_EQ(cursor->seek(compoundKey1a, true), IndexKeyEntry(compoundKey1a, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+}
+
+// Insert multiple keys and try to locate them using a forward cursor
+// by specifying their exact key and RecordId.
+TEST(SortedDataInterface, LocateMultiple) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT(!cursor->seek(key1, true));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc2, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc2));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc3, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc3, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc3));
- ASSERT_EQ(cursor->next(), boost::none);
+ ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc3));
+ ASSERT_EQ(cursor->next(), boost::none);
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc3));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc3));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+}
+
+// Insert multiple keys and try to locate them using a reverse cursor
+// by specifying their exact key and RecordId.
+TEST(SortedDataInterface, LocateMultipleReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
+ ASSERT(!cursor->seek(key3, true));
}
- // Insert multiple keys and try to locate them using a reverse cursor
- // by specifying their exact key and RecordId.
- TEST( SortedDataInterface, LocateMultipleReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
- ASSERT( !cursor->seek( key3, true ) );
- }
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc2, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc3, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc3, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
+ ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
- ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc3));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc3));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+}
+
+// Insert multiple compound keys and try to locate them using a forward cursor
+// by specifying their exact key and RecordId.
+TEST(SortedDataInterface, LocateMultipleCompoundKeys) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT(!cursor->seek(compoundKey1a, true));
}
- // Insert multiple compound keys and try to locate them using a forward cursor
- // by specifying their exact key and RecordId.
- TEST( SortedDataInterface, LocateMultipleCompoundKeys ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- ASSERT( !cursor->seek( compoundKey1a, true ) );
- }
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1a, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1b, loc2, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey2b, loc3, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1a, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1b, loc2, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey2b, loc3, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
-
- ASSERT_EQ(cursor->seek(compoundKey1a, true), IndexKeyEntry(compoundKey1a, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1b, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey2b, loc3));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1c, loc4, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey3a, loc5, true ) );
- uow.commit();
- }
- }
+ ASSERT_EQ(cursor->seek(compoundKey1a, true), IndexKeyEntry(compoundKey1a, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1b, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey2b, loc3));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
-
- ASSERT_EQ(cursor->seek(compoundKey1a, true), IndexKeyEntry(compoundKey1a, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1b, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1c, loc4));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey2b, loc3));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey3a, loc5));
- ASSERT_EQ(cursor->next(), boost::none);
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1c, loc4, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey3a, loc5, true));
+ uow.commit();
}
}
- // Insert multiple compound keys and try to locate them using a reverse cursor
- // by specifying their exact key and RecordId.
- TEST( SortedDataInterface, LocateMultipleCompoundKeysReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
- ASSERT( !cursor->seek( compoundKey3a, true ) );
- }
+ ASSERT_EQ(cursor->seek(compoundKey1a, true), IndexKeyEntry(compoundKey1a, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1b, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1c, loc4));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey2b, loc3));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey3a, loc5));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+}
+
+// Insert multiple compound keys and try to locate them using a reverse cursor
+// by specifying their exact key and RecordId.
+TEST(SortedDataInterface, LocateMultipleCompoundKeysReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
+ ASSERT(!cursor->seek(compoundKey3a, true));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1a, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1b, loc2, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey2b, loc3, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1a, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1b, loc2, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey2b, loc3, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
-
- ASSERT_EQ(cursor->seek(compoundKey2b, true), IndexKeyEntry(compoundKey2b, loc3));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1b, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1a, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1c, loc4, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey3a, loc5, true ) );
- uow.commit();
- }
- }
+ ASSERT_EQ(cursor->seek(compoundKey2b, true), IndexKeyEntry(compoundKey2b, loc3));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1b, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1a, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
-
- ASSERT_EQ(cursor->seek(compoundKey3a, true), IndexKeyEntry(compoundKey3a, loc5));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey2b, loc3));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1c, loc4));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1b, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1a, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1c, loc4, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey3a, loc5, true));
+ uow.commit();
}
}
- // Insert multiple keys and try to locate them using a forward cursor
- // by specifying either a smaller key or RecordId.
- TEST( SortedDataInterface, LocateIndirect ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- ASSERT( !cursor->seek( key1, true ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
+
+ ASSERT_EQ(cursor->seek(compoundKey3a, true), IndexKeyEntry(compoundKey3a, loc5));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey2b, loc3));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1c, loc4));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1b, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1a, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+}
+
+// Insert multiple keys and try to locate them using a forward cursor
+// by specifying either a smaller key or RecordId.
+TEST(SortedDataInterface, LocateIndirect) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT(!cursor->seek(key1, true));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc2, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
-
- ASSERT_EQ(cursor->seek(key1, false), IndexKeyEntry(key2, loc2));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc3, true ) );
- uow.commit();
- }
- }
+ ASSERT_EQ(cursor->seek(key1, false), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
-
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc3));
- ASSERT_EQ(cursor->next(), boost::none);
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc3, true));
+ uow.commit();
}
}
- // Insert multiple keys and try to locate them using a reverse cursor
- // by specifying either a larger key or RecordId.
- TEST( SortedDataInterface, LocateIndirectReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
- ASSERT( !cursor->seek( key3, true ) );
- }
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc3));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+}
+
+// Insert multiple keys and try to locate them using a reverse cursor
+// by specifying either a larger key or RecordId.
+TEST(SortedDataInterface, LocateIndirectReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
+ ASSERT(!cursor->seek(key3, true));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc2, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
-
- ASSERT_EQ(cursor->seek(key2, false), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc3, true ) );
- uow.commit();
- }
- }
+ ASSERT_EQ(cursor->seek(key2, false), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
-
- ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc3));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc3, true));
+ uow.commit();
}
}
- // Insert multiple compound keys and try to locate them using a forward cursor
- // by specifying either a smaller key or RecordId.
- TEST( SortedDataInterface, LocateIndirectCompoundKeys ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- ASSERT( !cursor->seek( compoundKey1a, true ) );
- }
+ ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc3));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+}
+
+// Insert multiple compound keys and try to locate them using a forward cursor
+// by specifying either a smaller key or RecordId.
+TEST(SortedDataInterface, LocateIndirectCompoundKeys) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT(!cursor->seek(compoundKey1a, true));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1a, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1b, loc2, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey2b, loc3, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1a, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1b, loc2, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey2b, loc3, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
-
- ASSERT_EQ(cursor->seek(compoundKey1a, false), IndexKeyEntry(compoundKey1b, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey2b, loc3));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1c, loc4, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey3a, loc5, true ) );
- uow.commit();
- }
- }
+ ASSERT_EQ(cursor->seek(compoundKey1a, false), IndexKeyEntry(compoundKey1b, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey2b, loc3));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
-
- ASSERT_EQ(cursor->seek(compoundKey2a, true), IndexKeyEntry(compoundKey2b, loc3));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey3a, loc5));
- ASSERT_EQ(cursor->next(), boost::none);
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1c, loc4, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey3a, loc5, true));
+ uow.commit();
}
}
- // Insert multiple compound keys and try to locate them using a reverse cursor
- // by specifying either a larger key or RecordId.
- TEST( SortedDataInterface, LocateIndirectCompoundKeysReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
- ASSERT( !cursor->seek( compoundKey3a, true ) );
- }
+ ASSERT_EQ(cursor->seek(compoundKey2a, true), IndexKeyEntry(compoundKey2b, loc3));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey3a, loc5));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+}
+
+// Insert multiple compound keys and try to locate them using a reverse cursor
+// by specifying either a larger key or RecordId.
+TEST(SortedDataInterface, LocateIndirectCompoundKeysReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
+ ASSERT(!cursor->seek(compoundKey3a, true));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1a, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1b, loc2, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey2b, loc3, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1a, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1b, loc2, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey2b, loc3, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- ASSERT_EQ(cursor->seek(compoundKey2b, false), IndexKeyEntry(compoundKey1b, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1a, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ ASSERT_EQ(cursor->seek(compoundKey2b, false), IndexKeyEntry(compoundKey1b, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1a, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1c, loc4, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey3a, loc5, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1c, loc4, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey3a, loc5, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- ASSERT_EQ(cursor->seek(compoundKey1d, true), IndexKeyEntry(compoundKey1c, loc4));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1b, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1a, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ ASSERT_EQ(cursor->seek(compoundKey1d, true), IndexKeyEntry(compoundKey1c, loc4));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1b, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(compoundKey1a, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
}
+}
- // Call locate on a forward cursor of an empty index and verify that the cursor
- // is positioned at EOF.
- TEST( SortedDataInterface, LocateEmpty ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Call locate on a forward cursor of an empty index and verify that the cursor
+// is positioned at EOF.
+TEST(SortedDataInterface, LocateEmpty) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- ASSERT( !cursor->seek( BSONObj(), true ) );
- ASSERT( !cursor->next() );
- }
+ ASSERT(!cursor->seek(BSONObj(), true));
+ ASSERT(!cursor->next());
}
+}
- // Call locate on a reverse cursor of an empty index and verify that the cursor
- // is positioned at EOF.
- TEST( SortedDataInterface, LocateEmptyReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Call locate on a reverse cursor of an empty index and verify that the cursor
+// is positioned at EOF.
+TEST(SortedDataInterface, LocateEmptyReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
- ASSERT( !cursor->seek( BSONObj(), true ) );
- ASSERT( !cursor->next() );
- }
+ ASSERT(!cursor->seek(BSONObj(), true));
+ ASSERT(!cursor->next());
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
index aa1b2adfe00..679bb3f8c8b 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
@@ -37,480 +37,481 @@
namespace mongo {
- // Insert multiple keys and try to iterate through all of them
- // using a forward cursor while calling savePosition() and
- // restorePosition() in succession.
- TEST( SortedDataInterface, SaveAndRestorePositionWhileIterateCursor ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- BSONObj key = BSON( "" << i );
- RecordId loc( 42, i * 2 );
- ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
- uow.commit();
- }
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, sorted->numEntries( opCtx.get() ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- int i = 0;
- for (auto entry = cursor->seek(minKey, true); entry; i++, entry = cursor->next()) {
- ASSERT_LT(i, nToInsert);
- ASSERT_EQ(entry, IndexKeyEntry(BSON( "" << i), RecordId(42, i * 2)));
-
- cursor->savePositioned();
- cursor->restore( opCtx.get() );
- }
- ASSERT( !cursor->next() );
- ASSERT_EQ(i, nToInsert);
- }
+// Insert multiple keys and try to iterate through all of them
+// using a forward cursor while calling savePosition() and
+// restorePosition() in succession.
+TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursor) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Insert multiple keys and try to iterate through all of them
- // using a reverse cursor while calling savePosition() and
- // restorePosition() in succession.
- TEST( SortedDataInterface, SaveAndRestorePositionWhileIterateCursorReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ BSONObj key = BSON("" << i);
+ RecordId loc(42, i * 2);
+ ASSERT_OK(sorted->insert(opCtx.get(), key, loc, true));
+ uow.commit();
}
+ }
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- BSONObj key = BSON( "" << i );
- RecordId loc( 42, i * 2 );
- ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ int i = 0;
+ for (auto entry = cursor->seek(minKey, true); entry; i++, entry = cursor->next()) {
+ ASSERT_LT(i, nToInsert);
+ ASSERT_EQ(entry, IndexKeyEntry(BSON("" << i), RecordId(42, i * 2)));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
- int i = nToInsert - 1;
- for (auto entry = cursor->seek(maxKey, true); entry; i--, entry = cursor->next()) {
- ASSERT_GTE(i, 0);
- ASSERT_EQ(entry, IndexKeyEntry(BSON( "" << i), RecordId(42, i * 2)));
-
- cursor->savePositioned();
- cursor->restore( opCtx.get() );
- }
- ASSERT( !cursor->next() );
- ASSERT_EQ(i, -1);
+ cursor->savePositioned();
+ cursor->restore(opCtx.get());
}
+ ASSERT(!cursor->next());
+ ASSERT_EQ(i, nToInsert);
+ }
+}
+
+// Insert multiple keys and try to iterate through all of them
+// using a reverse cursor while calling savePosition() and
+// restorePosition() in succession.
+TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Insert the same key multiple times and try to iterate through each
- // occurrence using a forward cursor while calling savePosition() and
- // restorePosition() in succession. Verify that the RecordId is saved
- // as part of the current position of the cursor.
- TEST( SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeys ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ BSONObj key = BSON("" << i);
+ RecordId loc(42, i * 2);
+ ASSERT_OK(sorted->insert(opCtx.get(), key, loc, true));
+ uow.commit();
}
+ }
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- RecordId loc( 42, i * 2 );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc, true /* allow duplicates */ ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
+ int i = nToInsert - 1;
+ for (auto entry = cursor->seek(maxKey, true); entry; i--, entry = cursor->next()) {
+ ASSERT_GTE(i, 0);
+ ASSERT_EQ(entry, IndexKeyEntry(BSON("" << i), RecordId(42, i * 2)));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- int i = 0;
- for (auto entry = cursor->seek(minKey, true); entry; i++, entry = cursor->next()) {
- ASSERT_LT(i, nToInsert);
- ASSERT_EQ(entry, IndexKeyEntry(key1, RecordId(42, i * 2)));
-
- cursor->savePositioned();
- cursor->restore( opCtx.get() );
- }
- ASSERT( !cursor->next() );
- ASSERT_EQ(i, nToInsert);
+ cursor->savePositioned();
+ cursor->restore(opCtx.get());
}
+ ASSERT(!cursor->next());
+ ASSERT_EQ(i, -1);
+ }
+}
+
+// Insert the same key multiple times and try to iterate through each
+// occurrence using a forward cursor while calling savePosition() and
+// restorePosition() in succession. Verify that the RecordId is saved
+// as part of the current position of the cursor.
+TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeys) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Insert the same key multiple times and try to iterate through each
- // occurrence using a reverse cursor while calling savePosition() and
- // restorePosition() in succession. Verify that the RecordId is saved
- // as part of the current position of the cursor.
- TEST( SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeysReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ RecordId loc(42, i * 2);
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc, true /* allow duplicates */));
+ uow.commit();
}
+ }
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- RecordId loc( 42, i * 2 );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc, true /* allow duplicates */ ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ int i = 0;
+ for (auto entry = cursor->seek(minKey, true); entry; i++, entry = cursor->next()) {
+ ASSERT_LT(i, nToInsert);
+ ASSERT_EQ(entry, IndexKeyEntry(key1, RecordId(42, i * 2)));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
- int i = nToInsert - 1;
- for (auto entry = cursor->seek(maxKey, true); entry; i--, entry = cursor->next()) {
- ASSERT_GTE(i, 0);
- ASSERT_EQ(entry, IndexKeyEntry(key1, RecordId(42, i * 2)));
-
- cursor->savePositioned();
- cursor->restore( opCtx.get() );
- }
- ASSERT( !cursor->next() );
- ASSERT_EQ(i, -1);
+ cursor->savePositioned();
+ cursor->restore(opCtx.get());
}
+ ASSERT(!cursor->next());
+ ASSERT_EQ(i, nToInsert);
+ }
+}
+
+// Insert the same key multiple times and try to iterate through each
+// occurrence using a reverse cursor while calling savePosition() and
+// restorePosition() in succession. Verify that the RecordId is saved
+// as part of the current position of the cursor.
+TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeysReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Call savePosition() on a forward cursor without ever calling restorePosition().
- // May be useful to run this test under valgrind to verify there are no leaks.
- TEST( SortedDataInterface, SavePositionWithoutRestore ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
-
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ RecordId loc(42, i * 2);
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc, true /* allow duplicates */));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, false ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
+ int i = nToInsert - 1;
+ for (auto entry = cursor->seek(maxKey, true); entry; i--, entry = cursor->next()) {
+ ASSERT_GTE(i, 0);
+ ASSERT_EQ(entry, IndexKeyEntry(key1, RecordId(42, i * 2)));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
cursor->savePositioned();
+ cursor->restore(opCtx.get());
}
+ ASSERT(!cursor->next());
+ ASSERT_EQ(i, -1);
}
+}
- // Call savePosition() on a reverse cursor without ever calling restorePosition().
- // May be useful to run this test under valgrind to verify there are no leaks.
- TEST( SortedDataInterface, SavePositionWithoutRestoreReversed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- uow.commit();
- }
- }
+// Call savePosition() on a forward cursor without ever calling restorePosition().
+// May be useful to run this test under valgrind to verify there are no leaks.
+TEST(SortedDataInterface, SavePositionWithoutRestore) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
- cursor->savePositioned();
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, false));
+ uow.commit();
}
}
-
- // Ensure that restore lands as close as possible to original position, even if data inserted
- // while saved.
- void testSaveAndRestorePositionSeesNewInserts(bool forward, bool unique) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key3, loc1},
- });
-
- auto cursor = sorted->newCursor(opCtx.get(), forward);
- const auto seekPoint = forward ? key1 : key3;
-
- ASSERT_EQ(cursor->seek(seekPoint, true), IndexKeyEntry(seekPoint, loc1));
- cursor->savePositioned();
- insertToIndex(opCtx, sorted, {{key2, loc1}});
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
- }
- TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInserts_Forward_Unique) {
- testSaveAndRestorePositionSeesNewInserts(true, true);
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
}
- TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInserts_Forward_Standard) {
- testSaveAndRestorePositionSeesNewInserts(true, false);
- }
- TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInserts_Reverse_Unique) {
- testSaveAndRestorePositionSeesNewInserts(false, true);
- }
- TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInserts_Reverse_Standard) {
- testSaveAndRestorePositionSeesNewInserts(false, false);
- }
-
- // Ensure that repeated restores lands as close as possible to original position, even if data
- // inserted while saved and the current position removed.
- void testSaveAndRestorePositionSeesNewInsertsAfterRemove(bool forward, bool unique) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key3, loc1},
- });
-
- auto cursor = sorted->newCursor(opCtx.get(), forward);
- const auto seekPoint = forward ? key1 : key3;
-
- ASSERT_EQ(cursor->seek(seekPoint, true), IndexKeyEntry(seekPoint, loc1));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
cursor->savePositioned();
- removeFromIndex(opCtx, sorted, {{key1, loc1}});
- cursor->restore(opCtx.get());
- // The restore may have seeked since it can't return to the saved position.
-
- cursor->savePositioned(); // Should still save originally saved key as "current position".
- insertToIndex(opCtx, sorted, {{key2, loc1}});
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
- }
- TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterRemove_Forward_Unique) {
- testSaveAndRestorePositionSeesNewInsertsAfterRemove(true, true);
- }
- TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterRemove_Forward_Standard) {
- testSaveAndRestorePositionSeesNewInsertsAfterRemove(true, false);
- }
- TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterRemove_Reverse_Unique) {
- testSaveAndRestorePositionSeesNewInsertsAfterRemove(false, true);
- }
- TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterRemove_Reverse_Standard) {
- testSaveAndRestorePositionSeesNewInsertsAfterRemove(false, false);
}
+}
- // Ensure that repeated restores lands as close as possible to original position, even if data
- // inserted while saved and the current position removed in a way that temporarily makes the
- // cursor EOF.
- void testSaveAndRestorePositionSeesNewInsertsAfterEOF(bool forward, bool unique) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(false, {
- {key1, loc1},
- });
+// Call savePosition() on a reverse cursor without ever calling restorePosition().
+// May be useful to run this test under valgrind to verify there are no leaks.
+TEST(SortedDataInterface, SavePositionWithoutRestoreReversed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- auto cursor = sorted->newCursor(opCtx.get(), forward);
-
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
- // next() would return EOF now.
-
- cursor->savePositioned();
- removeFromIndex(opCtx, sorted, {{key1, loc1}});
- cursor->restore(opCtx.get());
- // The restore may have seeked to EOF.
-
- auto insertPoint = forward ? key2 : key0;
- cursor->savePositioned(); // Should still save key1 as "current position".
- insertToIndex(opCtx, sorted, {{insertPoint, loc1}});
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->next(), IndexKeyEntry(insertPoint, loc1));
- }
-
- TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Forward_Unique) {
- testSaveAndRestorePositionSeesNewInsertsAfterEOF(true, true);
- }
- TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Forward_Standard) {
- testSaveAndRestorePositionSeesNewInsertsAfterEOF(true, false);
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Reverse_Unique) {
- testSaveAndRestorePositionSeesNewInsertsAfterEOF(false, true);
- }
- TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Reverse_Standard) {
- testSaveAndRestorePositionSeesNewInsertsAfterEOF(false, false);
- }
-
- // Make sure we restore to a RecordId at or ahead of save point if same key.
- void testSaveAndRestorePositionConsidersRecordId_Forward(bool unique) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key2, loc1},
- {key3, loc1},
- });
-
- auto cursor = sorted->newCursor(opCtx.get());
-
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
-
- cursor->savePositioned();
- removeFromIndex(opCtx, sorted, {{key1, loc1}});
- insertToIndex(opCtx, sorted, {{key1, loc2}});
- cursor->restore(opCtx.get()); // Lands on inserted key.
-
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc2));
-
- cursor->savePositioned();
- removeFromIndex(opCtx, sorted, {{key1, loc2}});
- insertToIndex(opCtx, sorted, {{key1, loc1}});
- cursor->restore(opCtx.get()); // Lands after inserted.
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
-
- cursor->savePositioned();
- removeFromIndex(opCtx, sorted, {{key2, loc1}});
- cursor->restore(opCtx.get());
-
- cursor->savePositioned();
- insertToIndex(opCtx, sorted, {{key2, loc1}});
- cursor->restore(opCtx.get()); // Lands at same point as initial save.
-
- // Advances from restore point since restore didn't move position.
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc1));
- }
- TEST(SortedDataInterface, SaveAndRestorePositionConsidersRecordId_Forward_Standard) {
- testSaveAndRestorePositionConsidersRecordId_Forward(false);
- }
- TEST(SortedDataInterface, SaveAndRestorePositionConsidersRecordId_Forward_Unique) {
- testSaveAndRestorePositionConsidersRecordId_Forward(true);
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ uow.commit();
+ }
}
-
- // Make sure we restore to a RecordId at or ahead of save point if same key on reverse cursor.
- void testSaveAndRestorePositionConsidersRecordId_Reverse(bool unique) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key0, loc1},
- {key1, loc1},
- {key2, loc2},
- });
-
- auto cursor = sorted->newCursor(opCtx.get(), false);
- ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
-
- cursor->savePositioned();
- removeFromIndex(opCtx, sorted, {{key2, loc2}});
- insertToIndex(opCtx, sorted, {{key2, loc1}});
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
-
- cursor->savePositioned();
- removeFromIndex(opCtx, sorted, {{key2, loc1}});
- insertToIndex(opCtx, sorted, {{key2, loc2}});
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
-
- cursor->savePositioned();
- removeFromIndex(opCtx, sorted, {{key1, loc1}});
- cursor->restore(opCtx.get());
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
cursor->savePositioned();
- insertToIndex(opCtx, sorted, {{key1, loc1}});
- cursor->restore(opCtx.get()); // Lands at same point as initial save.
-
- // Advances from restore point since restore didn't move position.
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key0, loc1));
- }
- TEST(SortedDataInterface, SaveAndRestorePositionConsidersRecordId_Reverse_Standard) {
- testSaveAndRestorePositionConsidersRecordId_Reverse(false);
- }
- TEST(SortedDataInterface, SaveAndRestorePositionConsidersRecordId_Reverse_Unique) {
- testSaveAndRestorePositionConsidersRecordId_Reverse(true);
}
+}
+
+// Ensure that restore lands as close as possible to original position, even if data inserted
+// while saved.
+void testSaveAndRestorePositionSeesNewInserts(bool forward, bool unique) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key3, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get(), forward);
+ const auto seekPoint = forward ? key1 : key3;
+
+ ASSERT_EQ(cursor->seek(seekPoint, true), IndexKeyEntry(seekPoint, loc1));
+
+ cursor->savePositioned();
+ insertToIndex(opCtx, sorted, {{key2, loc1}});
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
+}
+TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInserts_Forward_Unique) {
+ testSaveAndRestorePositionSeesNewInserts(true, true);
+}
+TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInserts_Forward_Standard) {
+ testSaveAndRestorePositionSeesNewInserts(true, false);
+}
+TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInserts_Reverse_Unique) {
+ testSaveAndRestorePositionSeesNewInserts(false, true);
+}
+TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInserts_Reverse_Standard) {
+ testSaveAndRestorePositionSeesNewInserts(false, false);
+}
+
+// Ensure that repeated restores lands as close as possible to original position, even if data
+// inserted while saved and the current position removed.
+void testSaveAndRestorePositionSeesNewInsertsAfterRemove(bool forward, bool unique) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key3, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get(), forward);
+ const auto seekPoint = forward ? key1 : key3;
+
+ ASSERT_EQ(cursor->seek(seekPoint, true), IndexKeyEntry(seekPoint, loc1));
+
+ cursor->savePositioned();
+ removeFromIndex(opCtx, sorted, {{key1, loc1}});
+ cursor->restore(opCtx.get());
+ // The restore may have seeked since it can't return to the saved position.
+
+ cursor->savePositioned(); // Should still save originally saved key as "current position".
+ insertToIndex(opCtx, sorted, {{key2, loc1}});
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
+}
+TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterRemove_Forward_Unique) {
+ testSaveAndRestorePositionSeesNewInsertsAfterRemove(true, true);
+}
+TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterRemove_Forward_Standard) {
+ testSaveAndRestorePositionSeesNewInsertsAfterRemove(true, false);
+}
+TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterRemove_Reverse_Unique) {
+ testSaveAndRestorePositionSeesNewInsertsAfterRemove(false, true);
+}
+TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterRemove_Reverse_Standard) {
+ testSaveAndRestorePositionSeesNewInsertsAfterRemove(false, false);
+}
+
+// Ensure that repeated restores lands as close as possible to original position, even if data
+// inserted while saved and the current position removed in a way that temporarily makes the
+// cursor EOF.
+void testSaveAndRestorePositionSeesNewInsertsAfterEOF(bool forward, bool unique) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(false,
+ {
+ {key1, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get(), forward);
+
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+ // next() would return EOF now.
+
+ cursor->savePositioned();
+ removeFromIndex(opCtx, sorted, {{key1, loc1}});
+ cursor->restore(opCtx.get());
+ // The restore may have seeked to EOF.
+
+ auto insertPoint = forward ? key2 : key0;
+ cursor->savePositioned(); // Should still save key1 as "current position".
+ insertToIndex(opCtx, sorted, {{insertPoint, loc1}});
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(insertPoint, loc1));
+}
+
+TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Forward_Unique) {
+ testSaveAndRestorePositionSeesNewInsertsAfterEOF(true, true);
+}
+TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Forward_Standard) {
+ testSaveAndRestorePositionSeesNewInsertsAfterEOF(true, false);
+}
+TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Reverse_Unique) {
+ testSaveAndRestorePositionSeesNewInsertsAfterEOF(false, true);
+}
+TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Reverse_Standard) {
+ testSaveAndRestorePositionSeesNewInsertsAfterEOF(false, false);
+}
+
+// Make sure we restore to a RecordId at or ahead of save point if same key.
+void testSaveAndRestorePositionConsidersRecordId_Forward(bool unique) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get());
+
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
+
+ cursor->savePositioned();
+ removeFromIndex(opCtx, sorted, {{key1, loc1}});
+ insertToIndex(opCtx, sorted, {{key1, loc2}});
+ cursor->restore(opCtx.get()); // Lands on inserted key.
+
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc2));
+
+ cursor->savePositioned();
+ removeFromIndex(opCtx, sorted, {{key1, loc2}});
+ insertToIndex(opCtx, sorted, {{key1, loc1}});
+ cursor->restore(opCtx.get()); // Lands after inserted.
+
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
+
+ cursor->savePositioned();
+ removeFromIndex(opCtx, sorted, {{key2, loc1}});
+ cursor->restore(opCtx.get());
+
+ cursor->savePositioned();
+ insertToIndex(opCtx, sorted, {{key2, loc1}});
+ cursor->restore(opCtx.get()); // Lands at same point as initial save.
+
+ // Advances from restore point since restore didn't move position.
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc1));
+}
+TEST(SortedDataInterface, SaveAndRestorePositionConsidersRecordId_Forward_Standard) {
+ testSaveAndRestorePositionConsidersRecordId_Forward(false);
+}
+TEST(SortedDataInterface, SaveAndRestorePositionConsidersRecordId_Forward_Unique) {
+ testSaveAndRestorePositionConsidersRecordId_Forward(true);
+}
+
+// Make sure we restore to a RecordId at or ahead of save point if same key on reverse cursor.
+void testSaveAndRestorePositionConsidersRecordId_Reverse(bool unique) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ {
+ {key0, loc1}, {key1, loc1}, {key2, loc2},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get(), false);
+
+ ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc2));
+
+ cursor->savePositioned();
+ removeFromIndex(opCtx, sorted, {{key2, loc2}});
+ insertToIndex(opCtx, sorted, {{key2, loc1}});
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
+
+ cursor->savePositioned();
+ removeFromIndex(opCtx, sorted, {{key2, loc1}});
+ insertToIndex(opCtx, sorted, {{key2, loc2}});
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
+
+ cursor->savePositioned();
+ removeFromIndex(opCtx, sorted, {{key1, loc1}});
+ cursor->restore(opCtx.get());
+
+ cursor->savePositioned();
+ insertToIndex(opCtx, sorted, {{key1, loc1}});
+ cursor->restore(opCtx.get()); // Lands at same point as initial save.
+
+ // Advances from restore point since restore didn't move position.
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key0, loc1));
+}
+TEST(SortedDataInterface, SaveAndRestorePositionConsidersRecordId_Reverse_Standard) {
+ testSaveAndRestorePositionConsidersRecordId_Reverse(false);
+}
+TEST(SortedDataInterface, SaveAndRestorePositionConsidersRecordId_Reverse_Unique) {
+ testSaveAndRestorePositionConsidersRecordId_Reverse(true);
+}
+
+// Ensure that SaveUnpositioned allows later use of the cursor.
+TEST(SortedDataInterface, SaveUnpositionedAndRestore) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(false,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get());
+
+ ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc1));
+
+ cursor->saveUnpositioned();
+ removeFromIndex(opCtx, sorted, {{key2, loc1}});
+ cursor->restore(opCtx.get());
- // Ensure that SaveUnpositioned allows later use of the cursor.
- TEST(SortedDataInterface, SaveUnpositionedAndRestore) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(false, {
- {key1, loc1},
- {key2, loc1},
- {key3, loc1},
- });
-
- auto cursor = sorted->newCursor(opCtx.get());
-
- ASSERT_EQ(cursor->seek(key2, true), IndexKeyEntry(key2, loc1));
-
- cursor->saveUnpositioned();
- removeFromIndex(opCtx, sorted, {{key2, loc1}});
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
-
- cursor->saveUnpositioned();
- cursor->restore(opCtx.get());
-
- ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc1));
- }
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, loc1));
-} // namespace mongo
+ cursor->saveUnpositioned();
+ cursor->restore(opCtx.get());
+
+ ASSERT_EQ(cursor->seek(key3, true), IndexKeyEntry(key3, loc1));
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
index 926b23b5237..c767dbee859 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
@@ -34,106 +34,104 @@
#include "mongo/unittest/unittest.h"
namespace mongo {
- // Tests seekExact when it hits something.
- void testSeekExact_Hit(bool unique, bool forward) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- {key2, loc1},
- {key3, loc1},
+// Tests seekExact when it hits something.
+void testSeekExact_Hit(bool unique, bool forward) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get(), forward);
+
+ ASSERT_EQ(cursor->seekExact(key2), IndexKeyEntry(key2, loc1));
+
+ // Make sure iterating works. We may consider loosening this requirement if it is a hardship
+ // for some storage engines.
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(forward ? key3 : key1, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+}
+TEST(SortedDataInterface, SeekExact_Hit_Unique_Forward) {
+ testSeekExact_Hit(true, true);
+}
+TEST(SortedDataInterface, SeekExact_Hit_Unique_Reverse) {
+ testSeekExact_Hit(true, false);
+}
+TEST(SortedDataInterface, SeekExact_Hit_Standard_Forward) {
+ testSeekExact_Hit(false, true);
+}
+TEST(SortedDataInterface, SeekExact_Hit_Standard_Reverse) {
+ testSeekExact_Hit(false, false);
+}
+
+// Tests seekExact when it doesn't hit the query.
+void testSeekExact_Miss(bool unique, bool forward) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1},
+ // No key2.
+ {key3, loc1},
+ });
+
+ auto cursor = sorted->newCursor(opCtx.get(), forward);
+
+ ASSERT_EQ(cursor->seekExact(key2), boost::none);
+
+ // Not testing iteration since the cursors position following a failed seekExact is
+ // undefined. However, you must be able to seek somewhere else.
+ ASSERT_EQ(cursor->seekExact(key1), IndexKeyEntry(key1, loc1));
+}
+TEST(SortedDataInterface, SeekExact_Miss_Unique_Forward) {
+ testSeekExact_Miss(true, true);
+}
+TEST(SortedDataInterface, SeekExact_Miss_Unique_Reverse) {
+ testSeekExact_Miss(true, false);
+}
+TEST(SortedDataInterface, SeekExact_Miss_Standard_Forward) {
+ testSeekExact_Miss(false, true);
+}
+TEST(SortedDataInterface, SeekExact_Miss_Standard_Reverse) {
+ testSeekExact_Miss(false, false);
+}
+
+// Tests seekExact on forward cursor when it hits something with dup keys. Doesn't make sense
+// for unique indexes.
+TEST(SortedDataInterface, SeekExact_HitWithDups_Forward) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(
+ false,
+ {
+ {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
});
- auto cursor = sorted->newCursor(opCtx.get(), forward);
-
- ASSERT_EQ(cursor->seekExact(key2), IndexKeyEntry(key2, loc1));
-
- // Make sure iterating works. We may consider loosening this requirement if it is a hardship
- // for some storage engines.
- ASSERT_EQ(cursor->next(), IndexKeyEntry(forward ? key3 : key1, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
- TEST(SortedDataInterface, SeekExact_Hit_Unique_Forward) {
- testSeekExact_Hit(true, true);
- }
- TEST(SortedDataInterface, SeekExact_Hit_Unique_Reverse) {
- testSeekExact_Hit(true, false);
- }
- TEST(SortedDataInterface, SeekExact_Hit_Standard_Forward) {
- testSeekExact_Hit(false, true);
- }
- TEST(SortedDataInterface, SeekExact_Hit_Standard_Reverse) {
- testSeekExact_Hit(false, false);
- }
-
- // Tests seekExact when it doesn't hit the query.
- void testSeekExact_Miss(bool unique, bool forward) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique, {
- {key1, loc1},
- // No key2.
- {key3, loc1},
+ auto cursor = sorted->newCursor(opCtx.get());
+
+ ASSERT_EQ(cursor->seekExact(key2), IndexKeyEntry(key2, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+}
+
+// Tests seekExact on reverse cursor when it hits something with dup keys. Doesn't make sense
+// for unique indexes.
+TEST(SortedDataInterface, SeekExact_HitWithDups_Reverse) {
+ auto harnessHelper = newHarnessHelper();
+ auto opCtx = harnessHelper->newOperationContext();
+ auto sorted = harnessHelper->newSortedDataInterface(
+ false,
+ {
+ {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
});
- auto cursor = sorted->newCursor(opCtx.get(), forward);
-
- ASSERT_EQ(cursor->seekExact(key2), boost::none);
-
- // Not testing iteration since the cursors position following a failed seekExact is
- // undefined. However, you must be able to seek somewhere else.
- ASSERT_EQ(cursor->seekExact(key1), IndexKeyEntry(key1, loc1));
- }
- TEST(SortedDataInterface, SeekExact_Miss_Unique_Forward) {
- testSeekExact_Miss(true, true);
- }
- TEST(SortedDataInterface, SeekExact_Miss_Unique_Reverse) {
- testSeekExact_Miss(true, false);
- }
- TEST(SortedDataInterface, SeekExact_Miss_Standard_Forward) {
- testSeekExact_Miss(false, true);
- }
- TEST(SortedDataInterface, SeekExact_Miss_Standard_Reverse) {
- testSeekExact_Miss(false, false);
- }
-
- // Tests seekExact on forward cursor when it hits something with dup keys. Doesn't make sense
- // for unique indexes.
- TEST(SortedDataInterface, SeekExact_HitWithDups_Forward) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(false, {
- {key1, loc1},
- {key2, loc1},
- {key2, loc2},
- {key3, loc1},
- });
-
- auto cursor = sorted->newCursor(opCtx.get());
-
- ASSERT_EQ(cursor->seekExact(key2), IndexKeyEntry(key2, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key3, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
-
- // Tests seekExact on reverse cursor when it hits something with dup keys. Doesn't make sense
- // for unique indexes.
- TEST(SortedDataInterface, SeekExact_HitWithDups_Reverse) {
- auto harnessHelper = newHarnessHelper();
- auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(false, {
- {key1, loc1},
- {key2, loc1},
- {key2, loc2},
- {key3, loc1},
- });
-
- auto cursor = sorted->newCursor(opCtx.get(), false);
+ auto cursor = sorted->newCursor(opCtx.get(), false);
- ASSERT_EQ(cursor->seekExact(key2), IndexKeyEntry(key2, loc2));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
- ASSERT_EQ(cursor->next(), boost::none);
- }
-} // namespace mongo
+ ASSERT_EQ(cursor->seekExact(key2), IndexKeyEntry(key2, loc2));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key2, loc1));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(key1, loc1));
+ ASSERT_EQ(cursor->next(), boost::none);
+}
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp b/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp
index 0d31abe6f19..1c069da9ebe 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp
@@ -37,132 +37,132 @@
namespace mongo {
- // Insert a key and verify that dupKeyCheck() returns a non-OK status for
- // the same key. When dupKeyCheck() is called with the exact (key, RecordId)
- // pair that was inserted, it should still return an OK status.
- TEST( SortedDataInterface, DupKeyCheckAfterInsert ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+// Insert a key and verify that dupKeyCheck() returns a non-OK status for
+// the same key. When dupKeyCheck() is called with the exact (key, RecordId)
+// pair that was inserted, it should still return an OK status.
+TEST(SortedDataInterface, DupKeyCheckAfterInsert) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, false ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, false));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->dupKeyCheck( opCtx.get(), key1, loc1 ) );
- ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, RecordId::min() ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->dupKeyCheck(opCtx.get(), key1, loc1));
+ ASSERT_NOT_OK(sorted->dupKeyCheck(opCtx.get(), key1, RecordId::min()));
+ uow.commit();
}
}
+}
- // Verify that dupKeyCheck() returns an OK status for a key that does
- // not exist in the index.
- TEST( SortedDataInterface, DupKeyCheckEmpty ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
+// Verify that dupKeyCheck() returns an OK status for a key that does
+// not exist in the index.
+TEST(SortedDataInterface, DupKeyCheckEmpty) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->dupKeyCheck( opCtx.get(), key1, loc1 ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->dupKeyCheck(opCtx.get(), key1, loc1));
+ uow.commit();
}
}
+}
- // Insert a key and verify that dupKeyCheck() acknowledges the duplicate key, even
- // when the insert key is located at a RecordId that comes after the one specified.
- TEST( SortedDataInterface, DupKeyCheckWhenDiskLocBefore ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
+// Insert a key and verify that dupKeyCheck() acknowledges the duplicate key, even
+// when the insert key is located at a RecordId that comes after the one specified.
+TEST(SortedDataInterface, DupKeyCheckWhenDiskLocBefore) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, RecordId::min() ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_NOT_OK(sorted->dupKeyCheck(opCtx.get(), key1, RecordId::min()));
+ uow.commit();
}
}
+}
- // Insert a key and verify that dupKeyCheck() acknowledges the duplicate key, even
- // when the insert key is located at a RecordId that comes before the one specified.
- TEST( SortedDataInterface, DupKeyCheckWhenDiskLocAfter ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
+// Insert a key and verify that dupKeyCheck() acknowledges the duplicate key, even
+// when the insert key is located at a RecordId that comes before the one specified.
+TEST(SortedDataInterface, DupKeyCheckWhenDiskLocAfter) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_NOT_OK( sorted->dupKeyCheck( opCtx.get(), key1, RecordId::max() ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_NOT_OK(sorted->dupKeyCheck(opCtx.get(), key1, RecordId::max()));
+ uow.commit();
}
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp b/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp
index a7ee8544dc0..6f1b4575a09 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp
@@ -37,41 +37,41 @@
namespace mongo {
- // Insert multiple keys and verify that fullValidate() either sets
- // the `numKeysOut` as the number of entries in the index, or as -1.
- TEST( SortedDataInterface, FullValidate ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Insert multiple keys and verify that fullValidate() either sets
+// the `numKeysOut` as the number of entries in the index, or as -1.
+TEST(SortedDataInterface, FullValidate) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- BSONObj key = BSON( "" << i );
- RecordId loc( 42, i * 2 );
- ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ BSONObj key = BSON("" << i);
+ RecordId loc(42, i * 2);
+ ASSERT_OK(sorted->insert(opCtx.get(), key, loc, true));
+ uow.commit();
}
+ }
- {
- long long numKeysOut;
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- sorted->fullValidate(opCtx.get(), false, &numKeysOut, NULL);
- // fullValidate() can set numKeysOut as the number of existing keys or -1.
- ASSERT( numKeysOut == nToInsert || numKeysOut == -1 );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get()));
+ }
+
+ {
+ long long numKeysOut;
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ sorted->fullValidate(opCtx.get(), false, &numKeysOut, NULL);
+ // fullValidate() can set numKeysOut as the number of existing keys or -1.
+ ASSERT(numKeysOut == nToInsert || numKeysOut == -1);
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
index 286c3038f8c..13929c7eacc 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
@@ -37,535 +37,524 @@
#include "mongo/unittest/unittest.h"
namespace mongo {
- std::unique_ptr<SortedDataInterface> HarnessHelper::newSortedDataInterface(
- bool unique,
- std::initializer_list<IndexKeyEntry> toInsert) {
- invariant(std::is_sorted(toInsert.begin(), toInsert.end(),
- IndexEntryComparison(Ordering::make(BSONObj()))));
-
- auto index = newSortedDataInterface(unique);
- insertToIndex(this, index, toInsert);
- return index;
- }
-
- void insertToIndex(unowned_ptr<OperationContext> txn,
- unowned_ptr<SortedDataInterface> index,
- std::initializer_list<IndexKeyEntry> toInsert) {
- WriteUnitOfWork wuow(txn);
- for (auto&& entry : toInsert) {
- ASSERT_OK(index->insert(txn, entry.key, entry.loc, true));
- }
- wuow.commit();
+std::unique_ptr<SortedDataInterface> HarnessHelper::newSortedDataInterface(
+ bool unique, std::initializer_list<IndexKeyEntry> toInsert) {
+ invariant(std::is_sorted(
+ toInsert.begin(), toInsert.end(), IndexEntryComparison(Ordering::make(BSONObj()))));
+
+ auto index = newSortedDataInterface(unique);
+ insertToIndex(this, index, toInsert);
+ return index;
+}
+
+void insertToIndex(unowned_ptr<OperationContext> txn,
+ unowned_ptr<SortedDataInterface> index,
+ std::initializer_list<IndexKeyEntry> toInsert) {
+ WriteUnitOfWork wuow(txn);
+ for (auto&& entry : toInsert) {
+ ASSERT_OK(index->insert(txn, entry.key, entry.loc, true));
}
-
- void removeFromIndex(unowned_ptr<OperationContext> txn,
- unowned_ptr<SortedDataInterface> index,
- std::initializer_list<IndexKeyEntry> toRemove) {
- WriteUnitOfWork wuow(txn);
- for (auto&& entry : toRemove) {
- index->unindex(txn, entry.key, entry.loc, true);
- }
- wuow.commit();
+ wuow.commit();
+}
+
+void removeFromIndex(unowned_ptr<OperationContext> txn,
+ unowned_ptr<SortedDataInterface> index,
+ std::initializer_list<IndexKeyEntry> toRemove) {
+ WriteUnitOfWork wuow(txn);
+ for (auto&& entry : toRemove) {
+ index->unindex(txn, entry.key, entry.loc, true);
}
+ wuow.commit();
+}
- TEST( SortedDataInterface, InsertWithDups1 ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+TEST(SortedDataInterface, InsertWithDups1) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 2 ), true );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 2), true);
+ uow.commit();
}
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 6, 2 ), true );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(6, 2), true);
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
- long long x = 0;
- sorted->fullValidate(opCtx.get(), false, &x, NULL);
- ASSERT_EQUALS( 2, x );
- }
+ long long x = 0;
+ sorted->fullValidate(opCtx.get(), false, &x, NULL);
+ ASSERT_EQUALS(2, x);
}
+}
- TEST( SortedDataInterface, InsertWithDups2 ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
- uow.commit();
- }
- }
+TEST(SortedDataInterface, InsertWithDups2) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 20 ), true );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ uow.commit();
}
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 20), true);
+ uow.commit();
}
}
- TEST( SortedDataInterface, InsertWithDups3AndRollback ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+}
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
- uow.commit();
- }
- }
+TEST(SortedDataInterface, InsertWithDups3AndRollback) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 20 ), true );
- // no commit
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ uow.commit();
}
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 20), true);
+ // no commit
}
}
- TEST( SortedDataInterface, InsertNoDups1 ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+}
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), false );
- uow.commit();
- }
- }
+TEST(SortedDataInterface, InsertNoDups1) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 2 ), RecordId( 5, 20 ), false );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), false);
+ uow.commit();
}
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 2), RecordId(5, 20), false);
+ uow.commit();
}
-
}
- TEST( SortedDataInterface, InsertNoDups2 ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+}
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 2 ), false );
- uow.commit();
- }
- }
+TEST(SortedDataInterface, InsertNoDups2) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 4 ), false );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 2), false);
+ uow.commit();
}
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 4), false);
+ uow.commit();
}
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
}
+}
- TEST( SortedDataInterface, Unindex1 ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
- uow.commit();
- }
- }
+TEST(SortedDataInterface, Unindex1) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 20 ), true );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), BSON("" << 1), RecordId(5, 20), true);
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), BSON( "" << 2 ), RecordId( 5, 18 ), true );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), BSON("" << 2), RecordId(5, 18), true);
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ uow.commit();
}
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ uow.commit();
}
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
+}
- TEST( SortedDataInterface, Unindex2Rollback ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
- uow.commit();
- }
- }
+TEST(SortedDataInterface, Unindex2Rollback) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), BSON( "" << 1 ), RecordId( 5, 18 ), true );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- // no commit
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ // no commit
}
-
}
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+}
- TEST( SortedDataInterface, CursorIterate1 ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
- int N = 5;
- for ( int i = 0; i < N; i++ ) {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << i ), RecordId( 5, i * 2 ), true ) );
- uow.commit();
- }
- }
+TEST(SortedDataInterface, CursorIterate1) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+ int N = 5;
+ for (int i = 0; i < N; i++) {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- int n = 0;
- for (auto entry = cursor->seek(BSONObj(), true); entry; entry = cursor->next()) {
- ASSERT_EQ(entry, IndexKeyEntry(BSON("" << n), RecordId(5, n * 2)));
- n++;
- }
- ASSERT_EQUALS( N, n );
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), BSON("" << i), RecordId(5, i * 2), true));
+ uow.commit();
}
-
-
}
- TEST( SortedDataInterface, CursorIterate1WithSaveRestore ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- int N = 5;
- for ( int i = 0; i < N; i++ ) {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << i ), RecordId( 5, i * 2 ), true );
- uow.commit();
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ int n = 0;
+ for (auto entry = cursor->seek(BSONObj(), true); entry; entry = cursor->next()) {
+ ASSERT_EQ(entry, IndexKeyEntry(BSON("" << n), RecordId(5, n * 2)));
+ n++;
}
+ ASSERT_EQUALS(N, n);
+ }
+}
+
+TEST(SortedDataInterface, CursorIterate1WithSaveRestore) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+ int N = 5;
+ for (int i = 0; i < N; i++) {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- int n = 0;
- for (auto entry = cursor->seek(BSONObj(), true); entry; entry = cursor->next()) {
- ASSERT_EQ(entry, IndexKeyEntry(BSON("" << n), RecordId(5, n * 2)));
- n++;
- cursor->savePositioned();
- cursor->restore( opCtx.get() );
- }
- ASSERT_EQUALS( N, n );
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << i), RecordId(5, i * 2), true);
+ uow.commit();
}
-
}
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ int n = 0;
+ for (auto entry = cursor->seek(BSONObj(), true); entry; entry = cursor->next()) {
+ ASSERT_EQ(entry, IndexKeyEntry(BSON("" << n), RecordId(5, n * 2)));
+ n++;
+ cursor->savePositioned();
+ cursor->restore(opCtx.get());
+ }
+ ASSERT_EQUALS(N, n);
+ }
+}
- TEST( SortedDataInterface, CursorIterateAllDupKeysWithSaveRestore ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
- int N = 5;
- for ( int i = 0; i < N; i++ ) {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->insert( opCtx.get(), BSON( "" << 5 ), RecordId( 5, i * 2 ), true );
- uow.commit();
- }
- }
+TEST(SortedDataInterface, CursorIterateAllDupKeysWithSaveRestore) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+ int N = 5;
+ for (int i = 0; i < N; i++) {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- int n = 0;
- for (auto entry = cursor->seek(BSONObj(), true); entry; entry = cursor->next()) {
- ASSERT_EQ(entry, IndexKeyEntry(BSON("" << 5), RecordId(5, n * 2)));
- n++;
- cursor->savePositioned();
- cursor->restore( opCtx.get() );
- }
- ASSERT_EQUALS( N, n );
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->insert(opCtx.get(), BSON("" << 5), RecordId(5, i * 2), true);
+ uow.commit();
}
-
}
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ int n = 0;
+ for (auto entry = cursor->seek(BSONObj(), true); entry; entry = cursor->next()) {
+ ASSERT_EQ(entry, IndexKeyEntry(BSON("" << 5), RecordId(5, n * 2)));
+ n++;
+ cursor->savePositioned();
+ cursor->restore(opCtx.get());
+ }
+ ASSERT_EQUALS(N, n);
+ }
+}
- TEST( SortedDataInterface, Locate1 ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- BSONObj key = BSON( "" << 1 );
- RecordId loc( 5, 16 );
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- ASSERT( !cursor->seek( key, true ) );
- }
+TEST(SortedDataInterface, Locate1) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- Status res = sorted->insert( opCtx.get(), key, loc, true );
- ASSERT_OK( res );
- uow.commit();
- }
- }
+ BSONObj key = BSON("" << 1);
+ RecordId loc(5, 16);
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- ASSERT_EQ(cursor->seek(key, true), IndexKeyEntry(key, loc));
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT(!cursor->seek(key, true));
}
- TEST( SortedDataInterface, Locate2 ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
-
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId(1,2), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), RecordId(1,4), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), RecordId(1,6), true ) );
- uow.commit();
- }
- }
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- ASSERT_EQ(cursor->seek(BSON("a" << 2), true),
- IndexKeyEntry(BSON("" << 2), RecordId(1, 4)));
-
- ASSERT_EQ(cursor->next(), IndexKeyEntry(BSON("" << 3), RecordId(1, 6)));
- ASSERT_EQ(cursor->next(), boost::none);
+ WriteUnitOfWork uow(opCtx.get());
+ Status res = sorted->insert(opCtx.get(), key, loc, true);
+ ASSERT_OK(res);
+ uow.commit();
}
}
- TEST( SortedDataInterface, Locate2Empty ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(key, true), IndexKeyEntry(key, loc));
+ }
+}
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
-
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 1 ), RecordId(1,2), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 2 ), RecordId(1,4), true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), BSON( "" << 3 ), RecordId(1,6), true ) );
- uow.commit();
- }
- }
+TEST(SortedDataInterface, Locate2) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get()) );
- ASSERT_EQ(cursor->seek(BSONObj(), true), IndexKeyEntry(BSON("" << 1), RecordId(1, 2)));
- }
+ WriteUnitOfWork uow(opCtx.get());
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- const std::unique_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor(opCtx.get(), false) );
- ASSERT_EQ(cursor->seek(BSONObj(), false), boost::none);
+ ASSERT_OK(sorted->insert(opCtx.get(), BSON("" << 1), RecordId(1, 2), true));
+ ASSERT_OK(sorted->insert(opCtx.get(), BSON("" << 2), RecordId(1, 4), true));
+ ASSERT_OK(sorted->insert(opCtx.get(), BSON("" << 3), RecordId(1, 6), true));
+ uow.commit();
}
-
}
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(BSON("a" << 2), true), IndexKeyEntry(BSON("" << 2), RecordId(1, 4)));
- TEST( SortedDataInterface, Locate3Descending ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(BSON("" << 3), RecordId(1, 6)));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
+}
- auto buildEntry = [](int i) { return IndexKeyEntry(BSON("" << i), RecordId(1, i*2)); };
+TEST(SortedDataInterface, Locate2Empty) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- for ( int i = 0; i < 10; i++ ) {
- if ( i == 6 )
- continue;
- WriteUnitOfWork uow( opCtx.get() );
- auto entry = buildEntry(i);
- ASSERT_OK( sorted->insert( opCtx.get(), entry.key, entry.loc, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+
+ ASSERT_OK(sorted->insert(opCtx.get(), BSON("" << 1), RecordId(1, 2), true));
+ ASSERT_OK(sorted->insert(opCtx.get(), BSON("" << 2), RecordId(1, 4), true));
+ ASSERT_OK(sorted->insert(opCtx.get(), BSON("" << 3), RecordId(1, 6), true));
+ uow.commit();
}
+ }
+ {
const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get(), true));
- ASSERT_EQ(cursor->seek(BSON("" << 5), true), buildEntry(5));
- ASSERT_EQ(cursor->next(), buildEntry(7));
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(BSONObj(), true), IndexKeyEntry(BSON("" << 1), RecordId(1, 2)));
+ }
- cursor = sorted->newCursor(opCtx.get(), /*forward*/false);
- ASSERT_EQ(cursor->seek(BSON("" << 5), /*inclusive*/false), buildEntry(4));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get(), false));
+ ASSERT_EQ(cursor->seek(BSONObj(), false), boost::none);
+ }
+}
- cursor = sorted->newCursor(opCtx.get(), /*forward*/false);
- ASSERT_EQ(cursor->seek(BSON("" << 5), /*inclusive*/true), buildEntry(5));
- ASSERT_EQ(cursor->next(), buildEntry(4));
- cursor = sorted->newCursor(opCtx.get(), /*forward*/false);
- ASSERT_EQ(cursor->seek(BSON("" << 5), /*inclusive*/false), buildEntry(4));
- ASSERT_EQ(cursor->next(), buildEntry(3));
+TEST(SortedDataInterface, Locate3Descending) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- cursor = sorted->newCursor(opCtx.get(), /*forward*/false);
- ASSERT_EQ(cursor->seek(BSON("" << 6), /*inclusive*/true), buildEntry(5));
- ASSERT_EQ(cursor->next(), buildEntry(4));
+ auto buildEntry = [](int i) { return IndexKeyEntry(BSON("" << i), RecordId(1, i * 2)); };
- cursor = sorted->newCursor(opCtx.get(), /*forward*/false);
- ASSERT_EQ(cursor->seek(BSON("" << 500), /*inclusive*/true), buildEntry(9));
- ASSERT_EQ(cursor->next(), buildEntry(8));
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ for (int i = 0; i < 10; i++) {
+ if (i == 6)
+ continue;
+ WriteUnitOfWork uow(opCtx.get());
+ auto entry = buildEntry(i);
+ ASSERT_OK(sorted->insert(opCtx.get(), entry.key, entry.loc, true));
+ uow.commit();
+ }
}
- TEST( SortedDataInterface, Locate4 ) {
- auto harnessHelper = newHarnessHelper();
- auto sorted = harnessHelper->newSortedDataInterface(false, {
- {BSON("" << 1), RecordId(1, 2)},
- {BSON("" << 1), RecordId(1, 4)},
- {BSON("" << 1), RecordId(1, 6)},
- {BSON("" << 2), RecordId(1, 8)},
- });
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get(), true));
+ ASSERT_EQ(cursor->seek(BSON("" << 5), true), buildEntry(5));
+ ASSERT_EQ(cursor->next(), buildEntry(7));
+
+ cursor = sorted->newCursor(opCtx.get(), /*forward*/ false);
+ ASSERT_EQ(cursor->seek(BSON("" << 5), /*inclusive*/ false), buildEntry(4));
+
+ cursor = sorted->newCursor(opCtx.get(), /*forward*/ false);
+ ASSERT_EQ(cursor->seek(BSON("" << 5), /*inclusive*/ true), buildEntry(5));
+ ASSERT_EQ(cursor->next(), buildEntry(4));
+
+ cursor = sorted->newCursor(opCtx.get(), /*forward*/ false);
+ ASSERT_EQ(cursor->seek(BSON("" << 5), /*inclusive*/ false), buildEntry(4));
+ ASSERT_EQ(cursor->next(), buildEntry(3));
+
+ cursor = sorted->newCursor(opCtx.get(), /*forward*/ false);
+ ASSERT_EQ(cursor->seek(BSON("" << 6), /*inclusive*/ true), buildEntry(5));
+ ASSERT_EQ(cursor->next(), buildEntry(4));
+
+ cursor = sorted->newCursor(opCtx.get(), /*forward*/ false);
+ ASSERT_EQ(cursor->seek(BSON("" << 500), /*inclusive*/ true), buildEntry(9));
+ ASSERT_EQ(cursor->next(), buildEntry(8));
+}
+
+TEST(SortedDataInterface, Locate4) {
+ auto harnessHelper = newHarnessHelper();
+ auto sorted = harnessHelper->newSortedDataInterface(false,
+ {
+ {BSON("" << 1), RecordId(1, 2)},
+ {BSON("" << 1), RecordId(1, 4)},
+ {BSON("" << 1), RecordId(1, 6)},
+ {BSON("" << 2), RecordId(1, 8)},
+ });
+
+ {
+ auto opCtx = harnessHelper->newOperationContext();
+ auto cursor = sorted->newCursor(opCtx.get());
+ ASSERT_EQ(cursor->seek(BSON("a" << 1), true), IndexKeyEntry(BSON("" << 1), RecordId(1, 2)));
+
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(BSON("" << 1), RecordId(1, 4)));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(BSON("" << 1), RecordId(1, 6)));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(BSON("" << 2), RecordId(1, 8)));
+ ASSERT_EQ(cursor->next(), boost::none);
+ }
- {
- auto opCtx = harnessHelper->newOperationContext();
- auto cursor = sorted->newCursor(opCtx.get());
- ASSERT_EQ(cursor->seek(BSON("a" << 1), true),
- IndexKeyEntry(BSON("" << 1), RecordId(1, 2)));
-
- ASSERT_EQ(cursor->next(), IndexKeyEntry(BSON("" << 1), RecordId(1, 4)));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(BSON("" << 1), RecordId(1, 6)));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(BSON("" << 2), RecordId(1, 8)));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ {
+ auto opCtx = harnessHelper->newOperationContext();
+ auto cursor = sorted->newCursor(opCtx.get(), false);
+ ASSERT_EQ(cursor->seek(BSON("a" << 1), true), IndexKeyEntry(BSON("" << 1), RecordId(1, 6)));
- {
- auto opCtx = harnessHelper->newOperationContext();
- auto cursor = sorted->newCursor(opCtx.get(), false);
- ASSERT_EQ(cursor->seek(BSON("a" << 1), true),
- IndexKeyEntry(BSON("" << 1), RecordId(1, 6)));
-
- ASSERT_EQ(cursor->next(), IndexKeyEntry(BSON("" << 1), RecordId(1, 4)));
- ASSERT_EQ(cursor->next(), IndexKeyEntry(BSON("" << 1), RecordId(1, 2)));
- ASSERT_EQ(cursor->next(), boost::none);
- }
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(BSON("" << 1), RecordId(1, 4)));
+ ASSERT_EQ(cursor->next(), IndexKeyEntry(BSON("" << 1), RecordId(1, 2)));
+ ASSERT_EQ(cursor->next(), boost::none);
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.h b/src/mongo/db/storage/sorted_data_interface_test_harness.h
index 3905b120507..e90f6606e86 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.h
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.h
@@ -42,90 +42,99 @@
namespace mongo {
- const BSONObj key0 = BSON( "" << 0 );
- const BSONObj key1 = BSON( "" << 1 );
- const BSONObj key2 = BSON( "" << 2 );
- const BSONObj key3 = BSON( "" << 3 );
- const BSONObj key4 = BSON( "" << 4 );
- const BSONObj key5 = BSON( "" << 5 );
- const BSONObj key6 = BSON( "" << 6 );
-
- const BSONObj compoundKey1a = BSON( "" << 1 << "" << "a" );
- const BSONObj compoundKey1b = BSON( "" << 1 << "" << "b" );
- const BSONObj compoundKey1c = BSON( "" << 1 << "" << "c" );
- const BSONObj compoundKey1d = BSON( "" << 1 << "" << "d" );
- const BSONObj compoundKey2a = BSON( "" << 2 << "" << "a" );
- const BSONObj compoundKey2b = BSON( "" << 2 << "" << "b" );
- const BSONObj compoundKey2c = BSON( "" << 2 << "" << "c" );
- const BSONObj compoundKey3a = BSON( "" << 3 << "" << "a" );
- const BSONObj compoundKey3b = BSON( "" << 3 << "" << "b" );
- const BSONObj compoundKey3c = BSON( "" << 3 << "" << "c" );
-
- const RecordId loc1( 0, 42 );
- const RecordId loc2( 0, 44 );
- const RecordId loc3( 0, 46 );
- const RecordId loc4( 0, 48 );
- const RecordId loc5( 0, 50 );
- const RecordId loc6( 0, 52 );
- const RecordId loc7( 0, 54 );
- const RecordId loc8( 0, 56 );
-
- class RecoveryUnit;
-
- class HarnessHelper {
- public:
- HarnessHelper(){}
- virtual ~HarnessHelper() = default;
-
- virtual std::unique_ptr<SortedDataInterface> newSortedDataInterface( bool unique ) = 0;
- virtual std::unique_ptr<RecoveryUnit> newRecoveryUnit() = 0;
-
- virtual std::unique_ptr<OperationContext> newOperationContext() {
- return stdx::make_unique<OperationContextNoop>(newRecoveryUnit().release());
- }
-
- /**
- * Creates a new SDI with some initial data.
- *
- * For clarity to readers, toInsert must be sorted.
- */
- std::unique_ptr<SortedDataInterface> newSortedDataInterface(
- bool unique,
- std::initializer_list<IndexKeyEntry> toInsert);
- };
-
- /**
- * Inserts all entries in toInsert into index.
- * ASSERT_OKs the inserts.
- * Always uses dupsAllowed=true.
- *
- * Should be used for declaring and changing conditions, not for testing inserts.
- */
- void insertToIndex(unowned_ptr<OperationContext> txn,
- unowned_ptr<SortedDataInterface> index,
- std::initializer_list<IndexKeyEntry> toInsert);
-
- inline void insertToIndex(unowned_ptr<HarnessHelper> harness,
- unowned_ptr<SortedDataInterface> index,
- std::initializer_list<IndexKeyEntry> toInsert) {
- insertToIndex(harness->newOperationContext(), index, toInsert);
+const BSONObj key0 = BSON("" << 0);
+const BSONObj key1 = BSON("" << 1);
+const BSONObj key2 = BSON("" << 2);
+const BSONObj key3 = BSON("" << 3);
+const BSONObj key4 = BSON("" << 4);
+const BSONObj key5 = BSON("" << 5);
+const BSONObj key6 = BSON("" << 6);
+
+const BSONObj compoundKey1a = BSON("" << 1 << ""
+ << "a");
+const BSONObj compoundKey1b = BSON("" << 1 << ""
+ << "b");
+const BSONObj compoundKey1c = BSON("" << 1 << ""
+ << "c");
+const BSONObj compoundKey1d = BSON("" << 1 << ""
+ << "d");
+const BSONObj compoundKey2a = BSON("" << 2 << ""
+ << "a");
+const BSONObj compoundKey2b = BSON("" << 2 << ""
+ << "b");
+const BSONObj compoundKey2c = BSON("" << 2 << ""
+ << "c");
+const BSONObj compoundKey3a = BSON("" << 3 << ""
+ << "a");
+const BSONObj compoundKey3b = BSON("" << 3 << ""
+ << "b");
+const BSONObj compoundKey3c = BSON("" << 3 << ""
+ << "c");
+
+const RecordId loc1(0, 42);
+const RecordId loc2(0, 44);
+const RecordId loc3(0, 46);
+const RecordId loc4(0, 48);
+const RecordId loc5(0, 50);
+const RecordId loc6(0, 52);
+const RecordId loc7(0, 54);
+const RecordId loc8(0, 56);
+
+class RecoveryUnit;
+
+class HarnessHelper {
+public:
+ HarnessHelper() {}
+ virtual ~HarnessHelper() = default;
+
+ virtual std::unique_ptr<SortedDataInterface> newSortedDataInterface(bool unique) = 0;
+ virtual std::unique_ptr<RecoveryUnit> newRecoveryUnit() = 0;
+
+ virtual std::unique_ptr<OperationContext> newOperationContext() {
+ return stdx::make_unique<OperationContextNoop>(newRecoveryUnit().release());
}
/**
- * Removes all entries in toRemove from index.
- * Always uses dupsAllowed=true.
+ * Creates a new SDI with some initial data.
*
- * Should be used for declaring and changing conditions, not for testing removes.
+ * For clarity to readers, toInsert must be sorted.
*/
- void removeFromIndex(unowned_ptr<OperationContext> txn,
- unowned_ptr<SortedDataInterface> index,
- std::initializer_list<IndexKeyEntry> toRemove);
-
- inline void removeFromIndex(unowned_ptr<HarnessHelper> harness,
- unowned_ptr<SortedDataInterface> index,
- std::initializer_list<IndexKeyEntry> toRemove) {
- removeFromIndex(harness->newOperationContext(), index, toRemove);
- }
+ std::unique_ptr<SortedDataInterface> newSortedDataInterface(
+ bool unique, std::initializer_list<IndexKeyEntry> toInsert);
+};
+
+/**
+ * Inserts all entries in toInsert into index.
+ * ASSERT_OKs the inserts.
+ * Always uses dupsAllowed=true.
+ *
+ * Should be used for declaring and changing conditions, not for testing inserts.
+ */
+void insertToIndex(unowned_ptr<OperationContext> txn,
+ unowned_ptr<SortedDataInterface> index,
+ std::initializer_list<IndexKeyEntry> toInsert);
+
+inline void insertToIndex(unowned_ptr<HarnessHelper> harness,
+ unowned_ptr<SortedDataInterface> index,
+ std::initializer_list<IndexKeyEntry> toInsert) {
+ insertToIndex(harness->newOperationContext(), index, toInsert);
+}
+
+/**
+ * Removes all entries in toRemove from index.
+ * Always uses dupsAllowed=true.
+ *
+ * Should be used for declaring and changing conditions, not for testing removes.
+ */
+void removeFromIndex(unowned_ptr<OperationContext> txn,
+ unowned_ptr<SortedDataInterface> index,
+ std::initializer_list<IndexKeyEntry> toRemove);
+
+inline void removeFromIndex(unowned_ptr<HarnessHelper> harness,
+ unowned_ptr<SortedDataInterface> index,
+ std::initializer_list<IndexKeyEntry> toRemove) {
+ removeFromIndex(harness->newOperationContext(), index, toRemove);
+}
- std::unique_ptr<HarnessHelper> newHarnessHelper();
+std::unique_ptr<HarnessHelper> newHarnessHelper();
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp
index 2aa254f2f3f..71ec797cc17 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp
@@ -37,325 +37,327 @@
namespace mongo {
- // Insert a key and verify that the number of entries in the index equals 1.
- TEST( SortedDataInterface, Insert ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+// Insert a key and verify that the number of entries in the index equals 1.
+TEST(SortedDataInterface, Insert) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
}
+}
- // Insert a compound key and verify that the number of entries in the index equals 1.
- TEST( SortedDataInterface, InsertCompoundKey ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Insert a compound key and verify that the number of entries in the index equals 1.
+TEST(SortedDataInterface, InsertCompoundKey) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1a, loc1, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1a, loc1, true));
+ uow.commit();
}
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+}
+
+// Insert multiple, distinct keys at the same RecordId and verify that the
+// number of entries in the index equals the number that were inserted, even
+// when duplicates are not allowed.
+TEST(SortedDataInterface, InsertSameDiskLoc) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc1, true));
+ uow.commit();
}
}
- // Insert multiple, distinct keys at the same RecordId and verify that the
- // number of entries in the index equals the number that were inserted, even
- // when duplicates are not allowed.
- TEST( SortedDataInterface, InsertSameDiskLoc ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc1, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc1, true ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+ }
+}
+
+// Insert multiple, distinct keys at the same RecordId and verify that the
+// number of entries in the index equals the number that were inserted, even
+// when duplicates are allowed.
+TEST(SortedDataInterface, InsertSameDiskLocWithDupsAllowed) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, false));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc1, true /* allow duplicates */));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc1, true ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc1, true /* allow duplicates */));
+ uow.commit();
}
}
- // Insert multiple, distinct keys at the same RecordId and verify that the
- // number of entries in the index equals the number that were inserted, even
- // when duplicates are allowed.
- TEST( SortedDataInterface, InsertSameDiskLocWithDupsAllowed ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+ }
+}
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+// Insert the same key multiple times and verify that only 1 entry exists
+// in the index when duplicates are not allowed.
+TEST(SortedDataInterface, InsertSameKey) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, false ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc1, true /* allow duplicates */ ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, false));
+ ASSERT_NOT_OK(sorted->insert(opCtx.get(), key1, loc2, false));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc1, true /* allow duplicates */ ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_NOT_OK(sorted->insert(opCtx.get(), key1, loc2, false));
+ uow.commit();
}
}
- // Insert the same key multiple times and verify that only 1 entry exists
- // in the index when duplicates are not allowed.
- TEST( SortedDataInterface, InsertSameKey ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+}
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+namespace {
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, false ) );
- ASSERT_NOT_OK( sorted->insert( opCtx.get(), key1, loc2, false ) );
- uow.commit();
- }
- }
+// Insert the same key multiple times and verify that all entries exists
+// in the index when duplicates are allowed. Since it is illegal to open a cursor to an unique
+// index while the unique constraint is violated, this is tested by running the test 3 times,
+// removing all but one loc each time and verifying the correct loc remains.
+void _testInsertSameKeyWithDupsAllowed(const RecordId locs[3]) {
+ for (int keeper = 0; keeper < 3; keeper++) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(
+ harnessHelper->newSortedDataInterface(true));
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_NOT_OK( sorted->insert( opCtx.get(), key1, loc2, false ) );
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, locs[0], false));
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, locs[1], true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, locs[2], true));
uow.commit();
}
}
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
- }
-
-namespace {
-
- // Insert the same key multiple times and verify that all entries exists
- // in the index when duplicates are allowed. Since it is illegal to open a cursor to an unique
- // index while the unique constraint is violated, this is tested by running the test 3 times,
- // removing all but one loc each time and verifying the correct loc remains.
- void _testInsertSameKeyWithDupsAllowed(const RecordId locs[3]) {
- for (int keeper = 0; keeper < 3; keeper++) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
-
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK(sorted->insert(opCtx.get(), key1, locs[0], false));
- ASSERT_OK(sorted->insert(opCtx.get(), key1, locs[1], true));
- ASSERT_OK(sorted->insert(opCtx.get(), key1, locs[2], true));
- uow.commit();
- }
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- for (int i = 0; i < 3; i++) {
- if (i != keeper) {
- sorted->unindex(opCtx.get(), key1, locs[i], true);
- }
+ WriteUnitOfWork uow(opCtx.get());
+ for (int i = 0; i < 3; i++) {
+ if (i != keeper) {
+ sorted->unindex(opCtx.get(), key1, locs[i], true);
}
- uow.commit();
}
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
- const std::unique_ptr<SortedDataInterface::Cursor> cursor(sorted->newCursor(opCtx.get()));
- ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, locs[keeper]));
- }
+ const std::unique_ptr<SortedDataInterface::Cursor> cursor(
+ sorted->newCursor(opCtx.get()));
+ ASSERT_EQ(cursor->seek(key1, true), IndexKeyEntry(key1, locs[keeper]));
}
}
+}
} // namespace
- TEST( SortedDataInterface, InsertSameKeyWithDupsAllowedLocsAscending ) {
- const RecordId locs[3] = {loc1, loc2, loc3};
- _testInsertSameKeyWithDupsAllowed(locs);
+TEST(SortedDataInterface, InsertSameKeyWithDupsAllowedLocsAscending) {
+ const RecordId locs[3] = {loc1, loc2, loc3};
+ _testInsertSameKeyWithDupsAllowed(locs);
+}
+
+TEST(SortedDataInterface, InsertSameKeyWithDupsAllowedLocsDescending) {
+ const RecordId locs[3] = {loc3, loc2, loc1};
+ _testInsertSameKeyWithDupsAllowed(locs);
+}
+
+// Insert multiple keys and verify that the number of entries
+// in the index equals the number that were inserted.
+TEST(SortedDataInterface, InsertMultiple) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- TEST( SortedDataInterface, InsertSameKeyWithDupsAllowedLocsDescending ) {
- const RecordId locs[3] = {loc3, loc2, loc1};
- _testInsertSameKeyWithDupsAllowed(locs);
- }
-
- // Insert multiple keys and verify that the number of entries
- // in the index equals the number that were inserted.
- TEST( SortedDataInterface, InsertMultiple ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
-
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, false ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc2, false ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, false));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, false));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc3, false ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc3, false));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
}
+}
- // Insert multiple compound keys and verify that the number of entries
- // in the index equals the number that were inserted.
- TEST( SortedDataInterface, InsertMultipleCompoundKeys ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
+// Insert multiple compound keys and verify that the number of entries
+// in the index equals the number that were inserted.
+TEST(SortedDataInterface, InsertMultipleCompoundKeys) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1a, loc1, false ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1b, loc2, false ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey2b, loc3, false ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1a, loc1, false));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1b, loc2, false));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey2b, loc3, false));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1c, loc4, false ) );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey3a, loc5, false ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1c, loc4, false));
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey3a, loc5, false));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 5, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(5, sorted->numEntries(opCtx.get()));
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_isempty.cpp b/src/mongo/db/storage/sorted_data_interface_test_isempty.cpp
index 8dcef9a3770..16ea19ca815 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_isempty.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_isempty.cpp
@@ -37,46 +37,46 @@
namespace mongo {
- // Verify that isEmpty() returns true when the index is empty,
- // returns false when a key is inserted, and returns true again
- // when that is unindex.
- TEST( SortedDataInterface, IsEmpty ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
+// Verify that isEmpty() returns true when the index is empty,
+// returns false when a key is inserted, and returns true again
+// when that is unindex.
+TEST(SortedDataInterface, IsEmpty) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, false ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, false));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( !sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(!sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), key1, loc1, false );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), key1, loc1, false);
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_rollback.cpp b/src/mongo/db/storage/sorted_data_interface_test_rollback.cpp
index 4b1125e5f07..c99627bf4d1 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_rollback.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_rollback.cpp
@@ -37,119 +37,119 @@
namespace mongo {
- // Insert multiple keys and verify that omitting the commit()
- // on the WriteUnitOfWork causes the changes to not become visible.
- TEST( SortedDataInterface, InsertWithoutCommit ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, false ) );
- // no commit
- }
- }
+// Insert multiple keys and verify that omitting the commit()
+// on the WriteUnitOfWork causes the changes to not become visible.
+TEST(SortedDataInterface, InsertWithoutCommit) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, false));
+ // no commit
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc1, false ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc2, false ) );
- // no commit
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc1, false));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc2, false));
+ // no commit
}
}
- // Insert multiple keys, then unindex those same keys and verify that
- // omitting the commit() on the WriteUnitOfWork causes the changes to
- // not become visible.
- TEST( SortedDataInterface, UnindexWithoutCommit ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+}
+
+// Insert multiple keys, then unindex those same keys and verify that
+// omitting the commit() on the WriteUnitOfWork causes the changes to
+// not become visible.
+TEST(SortedDataInterface, UnindexWithoutCommit) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc2, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), key2, loc2, true );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- // no commit
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), key2, loc2, true);
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ // no commit
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc3, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc3, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), key1, loc1, true );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
- sorted->unindex( opCtx.get(), key3, loc3, true );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- // no commit
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), key1, loc1, true);
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ sorted->unindex(opCtx.get(), key3, loc3, true);
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ // no commit
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp b/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp
index 49914ff161c..64171093fb2 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp
@@ -37,67 +37,67 @@
namespace mongo {
- // Verify that an empty index takes up no space.
- TEST( SortedDataInterface, GetSpaceUsedBytesEmpty ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Verify that an empty index takes up no space.
+TEST(SortedDataInterface, GetSpaceUsedBytesEmpty) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
-
- // SERVER-15416 mmapv1 test harness does not use SimpleRecordStoreV1 as its record store
- // and HeapRecordStoreBtree::dataSize does not have an actual implementation
- // {
- // const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- // ASSERT( sorted->getSpaceUsedBytes( opCtx.get() ) == 0 );
- // }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
- // Verify that a nonempty index takes up some space.
- TEST( SortedDataInterface, GetSpaceUsedBytesNonEmpty ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+ // SERVER-15416 mmapv1 test harness does not use SimpleRecordStoreV1 as its record store
+ // and HeapRecordStoreBtree::dataSize does not have an actual implementation
+ // {
+ // const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
+ // ASSERT( sorted->getSpaceUsedBytes( opCtx.get() ) == 0 );
+ // }
+}
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+// Verify that a nonempty index takes up some space.
+TEST(SortedDataInterface, GetSpaceUsedBytesNonEmpty) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- int nToInsert = 10;
- for ( int i = 0; i < nToInsert; i++ ) {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- BSONObj key = BSON( "" << i );
- RecordId loc( 42, i * 2 );
- ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) );
- uow.commit();
- }
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ int nToInsert = 10;
+ for (int i = 0; i < nToInsert; i++) {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( nToInsert, sorted->numEntries( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ BSONObj key = BSON("" << i);
+ RecordId loc(42, i * 2);
+ ASSERT_OK(sorted->insert(opCtx.get(), key, loc, true));
+ uow.commit();
}
+ }
- // SERVER-15416 mmapv1 test harness does not use SimpleRecordStoreV1 as its record store
- // and HeapRecordStoreBtree::dataSize does not have an actual implementation
- // long long spaceUsedBytes;
- // {
- // const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- // spaceUsedBytes = sorted->getSpaceUsedBytes( opCtx.get() );
- // ASSERT( spaceUsedBytes > 0 );
- // }
-
- // {
- // // getSpaceUsedBytes() returns the same value when called multiple times
- // // and there were not interleaved write operations.
- // const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- // ASSERT_EQUALS( spaceUsedBytes, sorted->getSpaceUsedBytes( opCtx.get() ) );
- // ASSERT_EQUALS( spaceUsedBytes, sorted->getSpaceUsedBytes( opCtx.get() ) );
- // }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get()));
}
-} // namespace mongo
+ // SERVER-15416 mmapv1 test harness does not use SimpleRecordStoreV1 as its record store
+ // and HeapRecordStoreBtree::dataSize does not have an actual implementation
+ // long long spaceUsedBytes;
+ // {
+ // const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
+ // spaceUsedBytes = sorted->getSpaceUsedBytes( opCtx.get() );
+ // ASSERT( spaceUsedBytes > 0 );
+ // }
+
+ // {
+ // // getSpaceUsedBytes() returns the same value when called multiple times
+ // // and there were not interleaved write operations.
+ // const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
+ // ASSERT_EQUALS( spaceUsedBytes, sorted->getSpaceUsedBytes( opCtx.get() ) );
+ // ASSERT_EQUALS( spaceUsedBytes, sorted->getSpaceUsedBytes( opCtx.get() ) );
+ // }
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_touch.cpp b/src/mongo/db/storage/sorted_data_interface_test_touch.cpp
index 83a3c314ef9..07ec83fb9c7 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_touch.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_touch.cpp
@@ -37,51 +37,51 @@
namespace mongo {
- // Verify that calling touch() on an empty index returns an OK status.
- TEST( SortedDataInterface, TouchEmpty ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Verify that calling touch() on an empty index returns an OK status.
+TEST(SortedDataInterface, TouchEmpty) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- Status status = sorted->touch( opCtx.get() );
- ASSERT( status.isOK() || status.code() == ErrorCodes::CommandNotSupported );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ Status status = sorted->touch(opCtx.get());
+ ASSERT(status.isOK() || status.code() == ErrorCodes::CommandNotSupported);
}
+}
- // Verify that calling touch() on a nonempty index returns an OK status.
- TEST( SortedDataInterface, TouchNonEmpty ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( true ) );
+// Verify that calling touch() on a nonempty index returns an OK status.
+TEST(SortedDataInterface, TouchNonEmpty) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(true));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, false ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc2, false ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc3, false ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, false));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, false));
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc3, false));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(3, sorted->numEntries(opCtx.get()));
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- // XXX does not verify the index was brought into memory
- // (even if supported by storage engine)
- Status status = sorted->touch( opCtx.get() );
- ASSERT( status.isOK() || status.code() == ErrorCodes::CommandNotSupported );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ // XXX does not verify the index was brought into memory
+ // (even if supported by storage engine)
+ Status status = sorted->touch(opCtx.get());
+ ASSERT(status.isOK() || status.code() == ErrorCodes::CommandNotSupported);
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/sorted_data_interface_test_unindex.cpp b/src/mongo/db/storage/sorted_data_interface_test_unindex.cpp
index b15d1e883e2..d1101a90d12 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_unindex.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_unindex.cpp
@@ -37,249 +37,249 @@
namespace mongo {
- // Insert a key and verify that it can be unindexed.
- TEST( SortedDataInterface, Unindex ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
-
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+// Insert a key and verify that it can be unindexed.
+TEST(SortedDataInterface, Unindex) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
+
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), key1, loc1, true );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), key1, loc1, true);
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
+}
- // Insert a compound key and verify that it can be unindexed.
- TEST( SortedDataInterface, UnindexCompoundKey ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Insert a compound key and verify that it can be unindexed.
+TEST(SortedDataInterface, UnindexCompoundKey) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1a, loc1, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), compoundKey1a, loc1, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), compoundKey1a, loc1, true );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), compoundKey1a, loc1, true);
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
+}
- // Insert multiple, distinct keys and verify that they can be unindexed.
- TEST( SortedDataInterface, UnindexMultipleDistinct ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Insert multiple, distinct keys and verify that they can be unindexed.
+TEST(SortedDataInterface, UnindexMultipleDistinct) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key2, loc2, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key2, loc2, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), key2, loc2, true );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), key2, loc2, true);
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key3, loc3, true ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key3, loc3, true));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), key1, loc1, true );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- sorted->unindex( opCtx.get(), key3, loc3, true );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), key1, loc1, true);
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ sorted->unindex(opCtx.get(), key3, loc3, true);
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
+}
- // Insert the same key multiple times and verify that each occurrence can be unindexed.
- TEST( SortedDataInterface, UnindexMultipleSameKey ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Insert the same key multiple times and verify that each occurrence can be unindexed.
+TEST(SortedDataInterface, UnindexMultipleSameKey) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc2, true /* allow duplicates */ ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc1, true));
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc2, true /* allow duplicates */));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), key1, loc2, true );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), key1, loc2, true);
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- ASSERT_OK( sorted->insert( opCtx.get(), key1, loc3, true /* allow duplicates */ ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ ASSERT_OK(sorted->insert(opCtx.get(), key1, loc3, true /* allow duplicates */));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(2, sorted->numEntries(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), key1, loc1, true);
- ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
- sorted->unindex( opCtx.get(), key1, loc3, true );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), key1, loc1, true);
+ ASSERT_EQUALS(1, sorted->numEntries(opCtx.get()));
+ sorted->unindex(opCtx.get(), key1, loc3, true);
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ uow.commit();
}
+ }
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
}
+}
- // Call unindex() on a nonexistent key and verify the result is false.
- TEST( SortedDataInterface, UnindexEmpty ) {
- const std::unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- const std::unique_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );
+// Call unindex() on a nonexistent key and verify the result is false.
+TEST(SortedDataInterface, UnindexEmpty) {
+ const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));
- {
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ }
+ {
+ const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const std::unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- sorted->unindex( opCtx.get(), key1, loc1, true );
- ASSERT( sorted->isEmpty( opCtx.get() ) );
- uow.commit();
- }
+ WriteUnitOfWork uow(opCtx.get());
+ sorted->unindex(opCtx.get(), key1, loc1, true);
+ ASSERT(sorted->isEmpty(opCtx.get()));
+ uow.commit();
}
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index 85008da0f5c..67614562b72 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -39,180 +39,183 @@
namespace mongo {
- class DatabaseCatalogEntry;
- class OperationContext;
- class RecoveryUnit;
- struct StorageGlobalParams;
- class StorageEngineLockFile;
- class StorageEngineMetadata;
+class DatabaseCatalogEntry;
+class OperationContext;
+class RecoveryUnit;
+struct StorageGlobalParams;
+class StorageEngineLockFile;
+class StorageEngineMetadata;
+/**
+ * The StorageEngine class is the top level interface for creating a new storage
+ * engine. All StorageEngine(s) must be registered by calling registerFactory in order
+ * to possibly be activated.
+ */
+class StorageEngine {
+public:
/**
- * The StorageEngine class is the top level interface for creating a new storage
- * engine. All StorageEngine(s) must be registered by calling registerFactory in order
- * to possibly be activated.
+ * The interface for creating new instances of storage engines.
+ *
+ * A storage engine provides an instance of this class (along with an associated
+ * name) to the global environment, which then sets the global storage engine
+ * according to the provided configuration parameter.
*/
- class StorageEngine {
+ class Factory {
public:
+ virtual ~Factory() {}
/**
- * The interface for creating new instances of storage engines.
- *
- * A storage engine provides an instance of this class (along with an associated
- * name) to the global environment, which then sets the global storage engine
- * according to the provided configuration parameter.
- */
- class Factory {
- public:
- virtual ~Factory() { }
-
- /**
- * Return a new instance of the StorageEngine. Caller owns the returned pointer.
- */
- virtual StorageEngine* create(const StorageGlobalParams& params,
- const StorageEngineLockFile& lockFile) const = 0;
-
- /**
- * Returns the name of the storage engine.
- *
- * Implementations that change the value of the returned string can cause
- * data file incompatibilities.
- */
- virtual StringData getCanonicalName() const = 0;
-
- /**
- * Validates creation options for a collection in the StorageEngine.
- * Returns an error if the creation options are not valid.
- *
- * Default implementation only accepts empty objects (no options).
- */
- virtual Status validateCollectionStorageOptions(const BSONObj& options) const {
- if (options.isEmpty()) return Status::OK();
- return Status(ErrorCodes::InvalidOptions,
- str::stream() << "storage engine " << getCanonicalName()
- << " does not support any collection storage options");
- }
-
- /**
- * Validates creation options for an index in the StorageEngine.
- * Returns an error if the creation options are not valid.
- *
- * Default implementation only accepts empty objects (no options).
- */
- virtual Status validateIndexStorageOptions(const BSONObj& options) const {
- if (options.isEmpty()) return Status::OK();
- return Status(ErrorCodes::InvalidOptions,
- str::stream() << "storage engine " << getCanonicalName()
- << " does not support any index storage options");
- }
-
- /**
- * Validates existing metadata in the data directory against startup options.
- * Returns an error if the storage engine initialization should not proceed
- * due to any inconsistencies between the current startup options and the creation
- * options stored in the metadata.
- */
- virtual Status validateMetadata(const StorageEngineMetadata& metadata,
- const StorageGlobalParams& params) const = 0;
-
- /**
- * Returns a new document suitable for storing in the data directory metadata.
- * This document will be used by validateMetadata() to check startup options
- * on restart.
- */
- virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const = 0;
- };
-
- /**
- * Called after the globalStorageEngine pointer has been set up, before any other methods
- * are called. Any initialization work that requires the ability to create OperationContexts
- * should be done here rather than in the constructor.
+ * Return a new instance of the StorageEngine. Caller owns the returned pointer.
*/
- virtual void finishInit() {}
+ virtual StorageEngine* create(const StorageGlobalParams& params,
+ const StorageEngineLockFile& lockFile) const = 0;
/**
- * Returns a new interface to the storage engine's recovery unit. The recovery
- * unit is the durability interface. For details, see recovery_unit.h
+ * Returns the name of the storage engine.
*
- * Caller owns the returned pointer.
+ * Implementations that change the value of the returned string can cause
+ * data file incompatibilities.
*/
- virtual RecoveryUnit* newRecoveryUnit() = 0;
+ virtual StringData getCanonicalName() const = 0;
/**
- * List the databases stored in this storage engine.
+ * Validates creation options for a collection in the StorageEngine.
+ * Returns an error if the creation options are not valid.
*
- * XXX: why doesn't this take OpCtx?
+ * Default implementation only accepts empty objects (no options).
*/
- virtual void listDatabases( std::vector<std::string>* out ) const = 0;
+ virtual Status validateCollectionStorageOptions(const BSONObj& options) const {
+ if (options.isEmpty())
+ return Status::OK();
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream() << "storage engine " << getCanonicalName()
+ << " does not support any collection storage options");
+ }
/**
- * Return the DatabaseCatalogEntry that describes the database indicated by 'db'.
+ * Validates creation options for an index in the StorageEngine.
+ * Returns an error if the creation options are not valid.
*
- * StorageEngine owns returned pointer.
- * It should not be deleted by any caller.
+ * Default implementation only accepts empty objects (no options).
*/
- virtual DatabaseCatalogEntry* getDatabaseCatalogEntry( OperationContext* opCtx,
- StringData db ) = 0;
+ virtual Status validateIndexStorageOptions(const BSONObj& options) const {
+ if (options.isEmpty())
+ return Status::OK();
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream() << "storage engine " << getCanonicalName()
+ << " does not support any index storage options");
+ }
/**
- * Returns whether the storage engine supports its own locking locking below the collection
- * level. If the engine returns true, MongoDB will acquire intent locks down to the
- * collection level and will assume that the engine will ensure consistency at the level of
- * documents. If false, MongoDB will lock the entire collection in Shared/Exclusive mode
- * for read/write operations respectively.
+ * Validates existing metadata in the data directory against startup options.
+ * Returns an error if the storage engine initialization should not proceed
+ * due to any inconsistencies between the current startup options and the creation
+ * options stored in the metadata.
*/
- virtual bool supportsDocLocking() const = 0;
+ virtual Status validateMetadata(const StorageEngineMetadata& metadata,
+ const StorageGlobalParams& params) const = 0;
/**
- * Returns whether the engine supports a journalling concept or not.
+ * Returns a new document suitable for storing in the data directory metadata.
+ * This document will be used by validateMetadata() to check startup options
+ * on restart.
*/
- virtual bool isDurable() const = 0;
+ virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const = 0;
+ };
- /**
- * Only MMAPv1 should override this and return true to trigger MMAPv1-specific behavior.
- */
- virtual bool isMmapV1() const { return false; }
+ /**
+ * Called after the globalStorageEngine pointer has been set up, before any other methods
+ * are called. Any initialization work that requires the ability to create OperationContexts
+ * should be done here rather than in the constructor.
+ */
+ virtual void finishInit() {}
- /**
- * Closes all file handles associated with a database.
- */
- virtual Status closeDatabase( OperationContext* txn, StringData db ) = 0;
+ /**
+ * Returns a new interface to the storage engine's recovery unit. The recovery
+ * unit is the durability interface. For details, see recovery_unit.h
+ *
+ * Caller owns the returned pointer.
+ */
+ virtual RecoveryUnit* newRecoveryUnit() = 0;
- /**
- * Deletes all data and metadata for a database.
- */
- virtual Status dropDatabase( OperationContext* txn, StringData db ) = 0;
+ /**
+ * List the databases stored in this storage engine.
+ *
+ * XXX: why doesn't this take OpCtx?
+ */
+ virtual void listDatabases(std::vector<std::string>* out) const = 0;
- /**
- * @return number of files flushed
- */
- virtual int flushAllFiles( bool sync ) = 0;
+ /**
+ * Return the DatabaseCatalogEntry that describes the database indicated by 'db'.
+ *
+ * StorageEngine owns returned pointer.
+ * It should not be deleted by any caller.
+ */
+ virtual DatabaseCatalogEntry* getDatabaseCatalogEntry(OperationContext* opCtx,
+ StringData db) = 0;
- /**
- * Recover as much data as possible from a potentially corrupt RecordStore.
- * This only recovers the record data, not indexes or anything else.
- *
- * Generally, this method should not be called directly except by the repairDatabase()
- * free function.
- *
- * NOTE: MMAPv1 does not support this method and has its own repairDatabase() method.
- */
- virtual Status repairRecordStore(OperationContext* txn, const std::string& ns) = 0;
+ /**
+ * Returns whether the storage engine supports its own locking locking below the collection
+ * level. If the engine returns true, MongoDB will acquire intent locks down to the
+ * collection level and will assume that the engine will ensure consistency at the level of
+ * documents. If false, MongoDB will lock the entire collection in Shared/Exclusive mode
+ * for read/write operations respectively.
+ */
+ virtual bool supportsDocLocking() const = 0;
- /**
- * This method will be called before there is a clean shutdown. Storage engines should
- * override this method if they have clean-up to do that is different from unclean shutdown.
- * MongoDB will not call into the storage subsystem after calling this function.
- *
- * On error, the storage engine should assert and crash.
- * There is intentionally no uncleanShutdown().
- */
- virtual void cleanShutdown() = 0;
+ /**
+ * Returns whether the engine supports a journalling concept or not.
+ */
+ virtual bool isDurable() const = 0;
- protected:
- /**
- * The destructor will never be called. See cleanShutdown instead.
- */
- virtual ~StorageEngine() {}
- };
+ /**
+ * Only MMAPv1 should override this and return true to trigger MMAPv1-specific behavior.
+ */
+ virtual bool isMmapV1() const {
+ return false;
+ }
+
+ /**
+ * Closes all file handles associated with a database.
+ */
+ virtual Status closeDatabase(OperationContext* txn, StringData db) = 0;
+
+ /**
+ * Deletes all data and metadata for a database.
+ */
+ virtual Status dropDatabase(OperationContext* txn, StringData db) = 0;
+
+ /**
+ * @return number of files flushed
+ */
+ virtual int flushAllFiles(bool sync) = 0;
+
+ /**
+ * Recover as much data as possible from a potentially corrupt RecordStore.
+ * This only recovers the record data, not indexes or anything else.
+ *
+ * Generally, this method should not be called directly except by the repairDatabase()
+ * free function.
+ *
+ * NOTE: MMAPv1 does not support this method and has its own repairDatabase() method.
+ */
+ virtual Status repairRecordStore(OperationContext* txn, const std::string& ns) = 0;
+
+ /**
+ * This method will be called before there is a clean shutdown. Storage engines should
+ * override this method if they have clean-up to do that is different from unclean shutdown.
+ * MongoDB will not call into the storage subsystem after calling this function.
+ *
+ * On error, the storage engine should assert and crash.
+ * There is intentionally no uncleanShutdown().
+ */
+ virtual void cleanShutdown() = 0;
+
+protected:
+ /**
+ * The destructor will never be called. See cleanShutdown instead.
+ */
+ virtual ~StorageEngine() {}
+};
} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_lock_file.h b/src/mongo/db/storage/storage_engine_lock_file.h
index 2cb14ac77d3..24e2359396e 100644
--- a/src/mongo/db/storage/storage_engine_lock_file.h
+++ b/src/mongo/db/storage/storage_engine_lock_file.h
@@ -36,60 +36,60 @@
namespace mongo {
- class StorageEngineLockFile {
- MONGO_DISALLOW_COPYING(StorageEngineLockFile);
- public:
-
- /**
- * Checks existing lock file, if present, to see if it contains data from a previous
- * unclean shutdown. A clean shutdown should have produced a zero length lock file.
- * Uses open() to read existing lock file or create new file.
- * Uses boost::filesystem to check lock file so may throw boost::exception.
- */
- StorageEngineLockFile(const std::string& dbpath);
-
- virtual ~StorageEngineLockFile();
-
- /**
- * Returns the path to the lock file.
- */
- std::string getFilespec() const;
-
- /**
- * Returns true if lock file was not zeroed out due to previous unclean shutdown.
- * This state is evaluated at object initialization to allow storage engine
- * to make decisions on recovery based on this information after open() has been called.
- */
- bool createdByUncleanShutdown() const;
-
- /**
- * Opens and locks 'mongod.lock' in 'dbpath' directory.
- */
- Status open();
-
- /**
- * Closes lock file handles.
- */
- void close();
-
- /**
- * Writes current process ID to file.
- * Fails if lock file has not been opened.
- */
- Status writePid();
-
- /**
- * Truncates file contents and releases file locks.
- */
- void clearPidAndUnlock();
-
- private:
- std::string _dbpath;
- std::string _filespec;
- bool _uncleanShutdown;
-
- class LockFileHandle;
- std::unique_ptr<LockFileHandle> _lockFileHandle;
- };
+class StorageEngineLockFile {
+ MONGO_DISALLOW_COPYING(StorageEngineLockFile);
+
+public:
+ /**
+ * Checks existing lock file, if present, to see if it contains data from a previous
+ * unclean shutdown. A clean shutdown should have produced a zero length lock file.
+ * Uses open() to read existing lock file or create new file.
+ * Uses boost::filesystem to check lock file so may throw boost::exception.
+ */
+ StorageEngineLockFile(const std::string& dbpath);
+
+ virtual ~StorageEngineLockFile();
+
+ /**
+ * Returns the path to the lock file.
+ */
+ std::string getFilespec() const;
+
+ /**
+ * Returns true if lock file was not zeroed out due to previous unclean shutdown.
+ * This state is evaluated at object initialization to allow storage engine
+ * to make decisions on recovery based on this information after open() has been called.
+ */
+ bool createdByUncleanShutdown() const;
+
+ /**
+ * Opens and locks 'mongod.lock' in 'dbpath' directory.
+ */
+ Status open();
+
+ /**
+ * Closes lock file handles.
+ */
+ void close();
+
+ /**
+ * Writes current process ID to file.
+ * Fails if lock file has not been opened.
+ */
+ Status writePid();
+
+ /**
+ * Truncates file contents and releases file locks.
+ */
+ void clearPidAndUnlock();
+
+private:
+ std::string _dbpath;
+ std::string _filespec;
+ bool _uncleanShutdown;
+
+ class LockFileHandle;
+ std::unique_ptr<LockFileHandle> _lockFileHandle;
+};
} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
index f7f12871053..66f477232d2 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
@@ -50,150 +50,153 @@ namespace mongo {
namespace {
- const std::string kLockFileBasename = "mongod.lock";
+const std::string kLockFileBasename = "mongod.lock";
} // namespace
- class StorageEngineLockFile::LockFileHandle {
- public:
- static const int kInvalidFd = -1;
- LockFileHandle() : _fd(kInvalidFd) { }
- bool isValid() const { return _fd != kInvalidFd; }
- void clear() { _fd = kInvalidFd; }
- int _fd;
- };
-
- StorageEngineLockFile::StorageEngineLockFile(const std::string& dbpath)
- : _dbpath(dbpath),
- _filespec((boost::filesystem::path(_dbpath) / kLockFileBasename).string()),
- _uncleanShutdown(boost::filesystem::exists(_filespec) &&
- boost::filesystem::file_size(_filespec) > 0),
- _lockFileHandle(new LockFileHandle()) {
+class StorageEngineLockFile::LockFileHandle {
+public:
+ static const int kInvalidFd = -1;
+ LockFileHandle() : _fd(kInvalidFd) {}
+ bool isValid() const {
+ return _fd != kInvalidFd;
}
-
- StorageEngineLockFile::~StorageEngineLockFile() { }
-
- std::string StorageEngineLockFile::getFilespec() const {
- return _filespec;
- }
-
- bool StorageEngineLockFile::createdByUncleanShutdown() const {
- return _uncleanShutdown;
+ void clear() {
+ _fd = kInvalidFd;
}
-
- Status StorageEngineLockFile::open() {
- try {
- if (!boost::filesystem::exists(_dbpath)) {
- return Status(ErrorCodes::NonExistentPath, str::stream()
- << "Data directory " << _dbpath << " not found.");
- }
- }
- catch (const std::exception& ex) {
- return Status(ErrorCodes::UnknownError, str::stream()
- << "Unable to check existence of data directory "
- << _dbpath << ": " << ex.what());
+ int _fd;
+};
+
+StorageEngineLockFile::StorageEngineLockFile(const std::string& dbpath)
+ : _dbpath(dbpath),
+ _filespec((boost::filesystem::path(_dbpath) / kLockFileBasename).string()),
+ _uncleanShutdown(boost::filesystem::exists(_filespec) &&
+ boost::filesystem::file_size(_filespec) > 0),
+ _lockFileHandle(new LockFileHandle()) {}
+
+StorageEngineLockFile::~StorageEngineLockFile() {}
+
+std::string StorageEngineLockFile::getFilespec() const {
+ return _filespec;
+}
+
+bool StorageEngineLockFile::createdByUncleanShutdown() const {
+ return _uncleanShutdown;
+}
+
+Status StorageEngineLockFile::open() {
+ try {
+ if (!boost::filesystem::exists(_dbpath)) {
+ return Status(ErrorCodes::NonExistentPath,
+ str::stream() << "Data directory " << _dbpath << " not found.");
}
+ } catch (const std::exception& ex) {
+ return Status(ErrorCodes::UnknownError,
+ str::stream() << "Unable to check existence of data directory " << _dbpath
+ << ": " << ex.what());
+ }
- int lockFile = ::open(_filespec.c_str(), O_RDWR | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO);
- if (lockFile < 0) {
- int errorcode = errno;
- return Status(ErrorCodes::DBPathInUse, str::stream()
- << "Unable to create/open lock file: "
- << _filespec << ' ' << errnoWithDescription(errorcode)
- << " Is a mongod instance already running?");
- }
+ int lockFile = ::open(_filespec.c_str(), O_RDWR | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO);
+ if (lockFile < 0) {
+ int errorcode = errno;
+ return Status(ErrorCodes::DBPathInUse,
+ str::stream() << "Unable to create/open lock file: " << _filespec << ' '
+ << errnoWithDescription(errorcode)
+ << " Is a mongod instance already running?");
+ }
#if !defined(__sun)
- int ret = ::flock(lockFile, LOCK_EX | LOCK_NB);
+ int ret = ::flock(lockFile, LOCK_EX | LOCK_NB);
#else
- struct flock fileLockInfo = {0};
- fileLockInfo.l_type = F_WRLCK;
- fileLockInfo.l_whence = SEEK_SET;
- int ret = ::fcntl(lockFile, F_SETLK, &fileLockInfo);
+ struct flock fileLockInfo = {0};
+ fileLockInfo.l_type = F_WRLCK;
+ fileLockInfo.l_whence = SEEK_SET;
+ int ret = ::fcntl(lockFile, F_SETLK, &fileLockInfo);
#endif // !defined(__sun)
- if (ret != 0) {
- int errorcode = errno;
- ::close(lockFile);
- return Status(ErrorCodes::DBPathInUse, str::stream()
- << "Unable to lock file: "
- << _filespec << ' ' << errnoWithDescription(errorcode)
- << ". Is a mongod instance already running?");
- }
- _lockFileHandle->_fd = lockFile;
- return Status::OK();
+ if (ret != 0) {
+ int errorcode = errno;
+ ::close(lockFile);
+ return Status(ErrorCodes::DBPathInUse,
+ str::stream() << "Unable to lock file: " << _filespec << ' '
+ << errnoWithDescription(errorcode)
+ << ". Is a mongod instance already running?");
}
+ _lockFileHandle->_fd = lockFile;
+ return Status::OK();
+}
- void StorageEngineLockFile::close() {
- if (!_lockFileHandle->isValid()) {
- return;
- }
- ::close(_lockFileHandle->_fd);
- _lockFileHandle->clear();
+void StorageEngineLockFile::close() {
+ if (!_lockFileHandle->isValid()) {
+ return;
+ }
+ ::close(_lockFileHandle->_fd);
+ _lockFileHandle->clear();
+}
+
+Status StorageEngineLockFile::writePid() {
+ if (!_lockFileHandle->isValid()) {
+ return Status(ErrorCodes::FileNotOpen,
+ str::stream() << "Unable to write process ID to " << _filespec
+ << " because file has not been opened.");
}
- Status StorageEngineLockFile::writePid() {
- if (!_lockFileHandle->isValid()) {
- return Status(ErrorCodes::FileNotOpen, str::stream()
- << "Unable to write process ID to " << _filespec
- << " because file has not been opened.");
- }
-
- if (::ftruncate(_lockFileHandle->_fd, 0)) {
- int errorcode = errno;
- return Status(ErrorCodes::FileStreamFailed, str::stream()
- << "Unable to write process id to file (ftruncate failed): "
- << _filespec << ' ' << errnoWithDescription(errorcode));
- }
+ if (::ftruncate(_lockFileHandle->_fd, 0)) {
+ int errorcode = errno;
+ return Status(ErrorCodes::FileStreamFailed,
+ str::stream() << "Unable to write process id to file (ftruncate failed): "
+ << _filespec << ' ' << errnoWithDescription(errorcode));
+ }
- ProcessId pid = ProcessId::getCurrent();
- std::stringstream ss;
- ss << pid << std::endl;
- std::string pidStr = ss.str();
- int bytesWritten = ::write(_lockFileHandle->_fd, pidStr.c_str(), pidStr.size());
- if (bytesWritten < 0) {
- int errorcode = errno;
- return Status(ErrorCodes::FileStreamFailed, str::stream()
- << "Unable to write process id " << pid.toString() << " to file: "
- << _filespec << ' ' << errnoWithDescription(errorcode));
+ ProcessId pid = ProcessId::getCurrent();
+ std::stringstream ss;
+ ss << pid << std::endl;
+ std::string pidStr = ss.str();
+ int bytesWritten = ::write(_lockFileHandle->_fd, pidStr.c_str(), pidStr.size());
+ if (bytesWritten < 0) {
+ int errorcode = errno;
+ return Status(ErrorCodes::FileStreamFailed,
+ str::stream() << "Unable to write process id " << pid.toString()
+ << " to file: " << _filespec << ' '
+ << errnoWithDescription(errorcode));
+
+ } else if (bytesWritten == 0) {
+ return Status(ErrorCodes::FileStreamFailed,
+ str::stream() << "Unable to write process id " << pid.toString()
+ << " to file: " << _filespec << " no data written.");
+ }
- }
- else if (bytesWritten == 0) {
- return Status(ErrorCodes::FileStreamFailed, str::stream()
- << "Unable to write process id " << pid.toString() << " to file: "
- << _filespec << " no data written.");
- }
+ if (::fsync(_lockFileHandle->_fd)) {
+ int errorcode = errno;
+ return Status(ErrorCodes::FileStreamFailed,
+ str::stream() << "Unable to write process id " << pid.toString()
+ << " to file (fsync failed): " << _filespec << ' '
+ << errnoWithDescription(errorcode));
+ }
- if (::fsync(_lockFileHandle->_fd)) {
- int errorcode = errno;
- return Status(ErrorCodes::FileStreamFailed, str::stream()
- << "Unable to write process id " << pid.toString() << " to file (fsync failed): "
- << _filespec << ' ' << errnoWithDescription(errorcode));
- }
+ flushMyDirectory(_filespec);
- flushMyDirectory(_filespec);
+ return Status::OK();
+}
- return Status::OK();
+void StorageEngineLockFile::clearPidAndUnlock() {
+ if (!_lockFileHandle->isValid()) {
+ return;
+ }
+ log() << "shutdown: removing fs lock...";
+ // This ought to be an unlink(), but Eliot says the last
+ // time that was attempted, there was a race condition
+ // with acquirePathLock().
+ if (::ftruncate(_lockFileHandle->_fd, 0)) {
+ int errorcode = errno;
+ log() << "couldn't remove fs lock " << errnoWithDescription(errorcode);
}
-
- void StorageEngineLockFile::clearPidAndUnlock() {
- if (!_lockFileHandle->isValid()) {
- return;
- }
- log() << "shutdown: removing fs lock...";
- // This ought to be an unlink(), but Eliot says the last
- // time that was attempted, there was a race condition
- // with acquirePathLock().
- if(::ftruncate(_lockFileHandle->_fd, 0)) {
- int errorcode = errno;
- log() << "couldn't remove fs lock " << errnoWithDescription(errorcode);
- }
#if !defined(__sun)
- ::flock(_lockFileHandle->_fd, LOCK_UN);
+ ::flock(_lockFileHandle->_fd, LOCK_UN);
#else
- struct flock fileLockInfo = {0};
- fileLockInfo.l_type = F_UNLCK;
- fileLockInfo.l_whence = SEEK_SET;
- ::fcntl(_lockFileHandle->_fd, F_SETLK, &fileLockInfo);
+ struct flock fileLockInfo = {0};
+ fileLockInfo.l_type = F_UNLCK;
+ fileLockInfo.l_whence = SEEK_SET;
+ ::fcntl(_lockFileHandle->_fd, F_SETLK, &fileLockInfo);
#endif // !defined(__sun)
- }
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_lock_file_test.cpp b/src/mongo/db/storage/storage_engine_lock_file_test.cpp
index 7ed69bc1477..9312b7f7c3a 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_test.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_test.cpp
@@ -39,134 +39,134 @@
namespace {
- using std::string;
- using mongo::unittest::TempDir;
+using std::string;
+using mongo::unittest::TempDir;
- using namespace mongo;
+using namespace mongo;
- TEST(StorageEngineLockFileTest, UncleanShutdownNoExistingFile) {
- TempDir tempDir("StorageEngineLockFileTest_UncleanShutdownNoExistingFile");
- StorageEngineLockFile lockFile(tempDir.path());
- ASSERT_FALSE(lockFile.createdByUncleanShutdown());
- }
-
- TEST(StorageEngineLockFileTest, UncleanShutdownEmptyExistingFile) {
- TempDir tempDir("StorageEngineLockFileTest_UncleanShutdownEmptyExistingFile");
- {
- std::string filename(tempDir.path() + "/mongod.lock");
- std::ofstream(filename.c_str());
- }
- StorageEngineLockFile lockFile(tempDir.path());
- ASSERT_FALSE(lockFile.createdByUncleanShutdown());
- }
-
- TEST(StorageEngineLockFileTest, UncleanShutdownNonEmptyExistingFile) {
- TempDir tempDir("StorageEngineLockFileTest_UncleanShutdownNonEmptyExistingFile");
- {
- std::string filename(tempDir.path() + "/mongod.lock");
- std::ofstream ofs(filename.c_str());
- ofs << 12345 << std::endl;
- }
- StorageEngineLockFile lockFile(tempDir.path());
- ASSERT_TRUE(lockFile.createdByUncleanShutdown());
- }
+TEST(StorageEngineLockFileTest, UncleanShutdownNoExistingFile) {
+ TempDir tempDir("StorageEngineLockFileTest_UncleanShutdownNoExistingFile");
+ StorageEngineLockFile lockFile(tempDir.path());
+ ASSERT_FALSE(lockFile.createdByUncleanShutdown());
+}
- TEST(StorageEngineLockFileTest, OpenInvalidDirectory) {
- StorageEngineLockFile lockFile("no_such_directory");
- ASSERT_EQUALS((boost::filesystem::path("no_such_directory") / "mongod.lock").string(),
- lockFile.getFilespec());
- Status status = lockFile.open();
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::NonExistentPath, status.code());
- }
-
- // Cause ::open() to fail by providing a regular file instead of a directory for 'dbpath'.
- TEST(StorageEngineLockFileTest, OpenInvalidFilename) {
- TempDir tempDir("StorageEngineLockFileTest_OpenInvalidFilename");
- std::string filename(tempDir.path() + "/some_file");
+TEST(StorageEngineLockFileTest, UncleanShutdownEmptyExistingFile) {
+ TempDir tempDir("StorageEngineLockFileTest_UncleanShutdownEmptyExistingFile");
+ {
+ std::string filename(tempDir.path() + "/mongod.lock");
std::ofstream(filename.c_str());
- StorageEngineLockFile lockFile(filename);
- Status status = lockFile.open();
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::DBPathInUse, status.code());
}
-
- TEST(StorageEngineLockFileTest, OpenNoExistingLockFile) {
- TempDir tempDir("StorageEngineLockFileTest_OpenNoExistingLockFile");
- StorageEngineLockFile lockFile(tempDir.path());
- ASSERT_OK(lockFile.open());
- lockFile.close();
+ StorageEngineLockFile lockFile(tempDir.path());
+ ASSERT_FALSE(lockFile.createdByUncleanShutdown());
+}
+
+TEST(StorageEngineLockFileTest, UncleanShutdownNonEmptyExistingFile) {
+ TempDir tempDir("StorageEngineLockFileTest_UncleanShutdownNonEmptyExistingFile");
+ {
+ std::string filename(tempDir.path() + "/mongod.lock");
+ std::ofstream ofs(filename.c_str());
+ ofs << 12345 << std::endl;
}
-
- TEST(StorageEngineLockFileTest, OpenEmptyLockFile) {
- TempDir tempDir("StorageEngineLockFileTest_OpenEmptyLockFile");
- StorageEngineLockFile lockFile(tempDir.path());
- std::string filename(lockFile.getFilespec());
- std::ofstream(filename.c_str());
- ASSERT_OK(lockFile.open());
- lockFile.close();
- }
-
- TEST(StorageEngineLockFileTest, WritePidFileNotOpened) {
- TempDir tempDir("StorageEngineLockFileTest_WritePidFileNotOpened");
- StorageEngineLockFile lockFile(tempDir.path());
- Status status = lockFile.writePid();
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::FileNotOpen, status.code());
- }
-
- TEST(StorageEngineLockFileTest, WritePidFileOpened) {
- TempDir tempDir("StorageEngineLockFileTest_WritePidFileOpened");
- StorageEngineLockFile lockFile(tempDir.path());
- ASSERT_OK(lockFile.open());
- ASSERT_OK(lockFile.writePid());
- lockFile.close();
-
- // Read PID from lock file.
- std::string filename(lockFile.getFilespec());
- std::ifstream ifs(filename.c_str());
- int64_t pidFromLockFile = 0;
- ASSERT_TRUE(ifs >> pidFromLockFile);
- ASSERT_EQUALS(ProcessId::getCurrent().asInt64(), pidFromLockFile);
- }
-
- // Existing data in lock file must be removed before writing process ID.
- TEST(StorageEngineLockFileTest, WritePidTruncateExistingFile) {
- TempDir tempDir("StorageEngineLockFileTest_WritePidTruncateExistingFile");
- StorageEngineLockFile lockFile(tempDir.path());
- {
- std::string filename(tempDir.path() + "/mongod.lock");
- std::ofstream ofs(filename.c_str());
- std::string currentPidStr = ProcessId::getCurrent().toString();
- ASSERT_FALSE(currentPidStr.empty());
- ofs << std::string(currentPidStr.size() * 100, 'X') << std::endl;
- }
- ASSERT_OK(lockFile.open());
- ASSERT_OK(lockFile.writePid());
- lockFile.close();
-
- // Read PID from lock file.
- std::string filename(lockFile.getFilespec());
- std::ifstream ifs(filename.c_str());
- int64_t pidFromLockFile = 0;
- ASSERT_TRUE(ifs >> pidFromLockFile);
- ASSERT_EQUALS(ProcessId::getCurrent().asInt64(), pidFromLockFile);
-
- // There should not be any data in the file after the process ID.
- std::string extraData;
- ASSERT_FALSE(ifs >> extraData);
- }
-
- TEST(StorageEngineLockFileTest, ClearPidAndUnlock) {
- TempDir tempDir("StorageEngineLockFileTest_ClearPidAndUnlock");
- StorageEngineLockFile lockFile(tempDir.path());
- ASSERT_OK(lockFile.open());
- ASSERT_OK(lockFile.writePid());
-
- // Clear lock file contents.
- lockFile.clearPidAndUnlock();
- ASSERT_TRUE(boost::filesystem::exists(lockFile.getFilespec()));
- ASSERT_EQUALS(0U, boost::filesystem::file_size(lockFile.getFilespec()));
+ StorageEngineLockFile lockFile(tempDir.path());
+ ASSERT_TRUE(lockFile.createdByUncleanShutdown());
+}
+
+TEST(StorageEngineLockFileTest, OpenInvalidDirectory) {
+ StorageEngineLockFile lockFile("no_such_directory");
+ ASSERT_EQUALS((boost::filesystem::path("no_such_directory") / "mongod.lock").string(),
+ lockFile.getFilespec());
+ Status status = lockFile.open();
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::NonExistentPath, status.code());
+}
+
+// Cause ::open() to fail by providing a regular file instead of a directory for 'dbpath'.
+TEST(StorageEngineLockFileTest, OpenInvalidFilename) {
+ TempDir tempDir("StorageEngineLockFileTest_OpenInvalidFilename");
+ std::string filename(tempDir.path() + "/some_file");
+ std::ofstream(filename.c_str());
+ StorageEngineLockFile lockFile(filename);
+ Status status = lockFile.open();
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::DBPathInUse, status.code());
+}
+
+TEST(StorageEngineLockFileTest, OpenNoExistingLockFile) {
+ TempDir tempDir("StorageEngineLockFileTest_OpenNoExistingLockFile");
+ StorageEngineLockFile lockFile(tempDir.path());
+ ASSERT_OK(lockFile.open());
+ lockFile.close();
+}
+
+TEST(StorageEngineLockFileTest, OpenEmptyLockFile) {
+ TempDir tempDir("StorageEngineLockFileTest_OpenEmptyLockFile");
+ StorageEngineLockFile lockFile(tempDir.path());
+ std::string filename(lockFile.getFilespec());
+ std::ofstream(filename.c_str());
+ ASSERT_OK(lockFile.open());
+ lockFile.close();
+}
+
+TEST(StorageEngineLockFileTest, WritePidFileNotOpened) {
+ TempDir tempDir("StorageEngineLockFileTest_WritePidFileNotOpened");
+ StorageEngineLockFile lockFile(tempDir.path());
+ Status status = lockFile.writePid();
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::FileNotOpen, status.code());
+}
+
+TEST(StorageEngineLockFileTest, WritePidFileOpened) {
+ TempDir tempDir("StorageEngineLockFileTest_WritePidFileOpened");
+ StorageEngineLockFile lockFile(tempDir.path());
+ ASSERT_OK(lockFile.open());
+ ASSERT_OK(lockFile.writePid());
+ lockFile.close();
+
+ // Read PID from lock file.
+ std::string filename(lockFile.getFilespec());
+ std::ifstream ifs(filename.c_str());
+ int64_t pidFromLockFile = 0;
+ ASSERT_TRUE(ifs >> pidFromLockFile);
+ ASSERT_EQUALS(ProcessId::getCurrent().asInt64(), pidFromLockFile);
+}
+
+// Existing data in lock file must be removed before writing process ID.
+TEST(StorageEngineLockFileTest, WritePidTruncateExistingFile) {
+ TempDir tempDir("StorageEngineLockFileTest_WritePidTruncateExistingFile");
+ StorageEngineLockFile lockFile(tempDir.path());
+ {
+ std::string filename(tempDir.path() + "/mongod.lock");
+ std::ofstream ofs(filename.c_str());
+ std::string currentPidStr = ProcessId::getCurrent().toString();
+ ASSERT_FALSE(currentPidStr.empty());
+ ofs << std::string(currentPidStr.size() * 100, 'X') << std::endl;
}
+ ASSERT_OK(lockFile.open());
+ ASSERT_OK(lockFile.writePid());
+ lockFile.close();
+
+ // Read PID from lock file.
+ std::string filename(lockFile.getFilespec());
+ std::ifstream ifs(filename.c_str());
+ int64_t pidFromLockFile = 0;
+ ASSERT_TRUE(ifs >> pidFromLockFile);
+ ASSERT_EQUALS(ProcessId::getCurrent().asInt64(), pidFromLockFile);
+
+ // There should not be any data in the file after the process ID.
+ std::string extraData;
+ ASSERT_FALSE(ifs >> extraData);
+}
+
+TEST(StorageEngineLockFileTest, ClearPidAndUnlock) {
+ TempDir tempDir("StorageEngineLockFileTest_ClearPidAndUnlock");
+ StorageEngineLockFile lockFile(tempDir.path());
+ ASSERT_OK(lockFile.open());
+ ASSERT_OK(lockFile.writePid());
+
+ // Clear lock file contents.
+ lockFile.clearPidAndUnlock();
+ ASSERT_TRUE(boost::filesystem::exists(lockFile.getFilespec()));
+ ASSERT_EQUALS(0U, boost::filesystem::file_size(lockFile.getFilespec()));
+}
} // namespace
diff --git a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
index 41fc74ea736..6a3d69a3e2a 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
@@ -45,146 +45,152 @@ namespace mongo {
namespace {
- const std::string kLockFileBasename = "mongod.lock";
-
- Status _truncateFile(HANDLE handle) {
- invariant(handle != INVALID_HANDLE_VALUE);
-
- LARGE_INTEGER largeint;
- largeint.QuadPart = 0;
- if (::SetFilePointerEx(handle, largeint, NULL, FILE_BEGIN) == FALSE) {
- int errorcode = GetLastError();
- return Status(ErrorCodes::FileStreamFailed, str::stream()
- << "Unable to truncate lock file (SetFilePointerEx failed) "
- << errnoWithDescription(errorcode));
- }
-
- if (::SetEndOfFile(handle) == FALSE) {
- int errorcode = GetLastError();
- return Status(ErrorCodes::FileStreamFailed, str::stream()
- << "Unable to truncate lock file (SetEndOfFile failed) "
- << errnoWithDescription(errorcode));
- }
-
- return Status::OK();
+const std::string kLockFileBasename = "mongod.lock";
+
+Status _truncateFile(HANDLE handle) {
+ invariant(handle != INVALID_HANDLE_VALUE);
+
+ LARGE_INTEGER largeint;
+ largeint.QuadPart = 0;
+ if (::SetFilePointerEx(handle, largeint, NULL, FILE_BEGIN) == FALSE) {
+ int errorcode = GetLastError();
+ return Status(ErrorCodes::FileStreamFailed,
+ str::stream() << "Unable to truncate lock file (SetFilePointerEx failed) "
+ << errnoWithDescription(errorcode));
}
-} // namespace
-
- class StorageEngineLockFile::LockFileHandle {
- public:
- LockFileHandle() : _handle(INVALID_HANDLE_VALUE) { }
- bool isValid() const { return _handle != INVALID_HANDLE_VALUE; }
- void clear() { _handle = INVALID_HANDLE_VALUE; }
- HANDLE _handle;
- };
-
- StorageEngineLockFile::StorageEngineLockFile(const std::string& dbpath)
- : _dbpath(dbpath),
- _filespec((boost::filesystem::path(_dbpath) / kLockFileBasename).string()),
- _uncleanShutdown(boost::filesystem::exists(_filespec) &&
- boost::filesystem::file_size(_filespec) > 0),
- _lockFileHandle(new LockFileHandle()) {
+ if (::SetEndOfFile(handle) == FALSE) {
+ int errorcode = GetLastError();
+ return Status(ErrorCodes::FileStreamFailed,
+ str::stream() << "Unable to truncate lock file (SetEndOfFile failed) "
+ << errnoWithDescription(errorcode));
}
- StorageEngineLockFile::~StorageEngineLockFile() { }
+ return Status::OK();
+}
- std::string StorageEngineLockFile::getFilespec() const {
- return _filespec;
- }
+} // namespace
- bool StorageEngineLockFile::createdByUncleanShutdown() const {
- return _uncleanShutdown;
+class StorageEngineLockFile::LockFileHandle {
+public:
+ LockFileHandle() : _handle(INVALID_HANDLE_VALUE) {}
+ bool isValid() const {
+ return _handle != INVALID_HANDLE_VALUE;
}
-
- Status StorageEngineLockFile::open() {
- try {
- if (!boost::filesystem::exists(_dbpath)) {
- return Status(ErrorCodes::NonExistentPath, str::stream()
- << "Data directory " << _dbpath << " not found.");
- }
- }
- catch (const std::exception& ex) {
- return Status(ErrorCodes::UnknownError, str::stream()
- << "Unable to check existence of data directory "
- << _dbpath << ": " << ex.what());
+ void clear() {
+ _handle = INVALID_HANDLE_VALUE;
+ }
+ HANDLE _handle;
+};
+
+StorageEngineLockFile::StorageEngineLockFile(const std::string& dbpath)
+ : _dbpath(dbpath),
+ _filespec((boost::filesystem::path(_dbpath) / kLockFileBasename).string()),
+ _uncleanShutdown(boost::filesystem::exists(_filespec) &&
+ boost::filesystem::file_size(_filespec) > 0),
+ _lockFileHandle(new LockFileHandle()) {}
+
+StorageEngineLockFile::~StorageEngineLockFile() {}
+
+std::string StorageEngineLockFile::getFilespec() const {
+ return _filespec;
+}
+
+bool StorageEngineLockFile::createdByUncleanShutdown() const {
+ return _uncleanShutdown;
+}
+
+Status StorageEngineLockFile::open() {
+ try {
+ if (!boost::filesystem::exists(_dbpath)) {
+ return Status(ErrorCodes::NonExistentPath,
+ str::stream() << "Data directory " << _dbpath << " not found.");
}
+ } catch (const std::exception& ex) {
+ return Status(ErrorCodes::UnknownError,
+ str::stream() << "Unable to check existence of data directory " << _dbpath
+ << ": " << ex.what());
+ }
- HANDLE lockFileHandle = CreateFileA(_filespec.c_str(), GENERIC_READ | GENERIC_WRITE,
- 0 /* do not allow anyone else access */, NULL,
- OPEN_ALWAYS /* success if fh can open */, 0, NULL);
-
- if (lockFileHandle == INVALID_HANDLE_VALUE) {
- int errorcode = GetLastError();
- return Status(ErrorCodes::DBPathInUse, str::stream()
- << "Unable to create/open lock file: " << _filespec << ' '
- << errnoWithDescription(errorcode)
- << ". Is a mongod instance already running?");
- }
- _lockFileHandle->_handle = lockFileHandle;
- return Status::OK();
+ HANDLE lockFileHandle = CreateFileA(_filespec.c_str(),
+ GENERIC_READ | GENERIC_WRITE,
+ 0 /* do not allow anyone else access */,
+ NULL,
+ OPEN_ALWAYS /* success if fh can open */,
+ 0,
+ NULL);
+
+ if (lockFileHandle == INVALID_HANDLE_VALUE) {
+ int errorcode = GetLastError();
+ return Status(ErrorCodes::DBPathInUse,
+ str::stream() << "Unable to create/open lock file: " << _filespec << ' '
+ << errnoWithDescription(errorcode)
+ << ". Is a mongod instance already running?");
}
+ _lockFileHandle->_handle = lockFileHandle;
+ return Status::OK();
+}
- void StorageEngineLockFile::close() {
- if (!_lockFileHandle->isValid()) {
- return;
- }
- CloseHandle(_lockFileHandle->_handle);
- _lockFileHandle->clear();
+void StorageEngineLockFile::close() {
+ if (!_lockFileHandle->isValid()) {
+ return;
+ }
+ CloseHandle(_lockFileHandle->_handle);
+ _lockFileHandle->clear();
+}
+
+Status StorageEngineLockFile::writePid() {
+ if (!_lockFileHandle->isValid()) {
+ return Status(ErrorCodes::FileNotOpen,
+ str::stream() << "Unable to write process ID to " << _filespec
+ << " because file has not been opened.");
}
- Status StorageEngineLockFile::writePid() {
- if (!_lockFileHandle->isValid()) {
- return Status(ErrorCodes::FileNotOpen, str::stream()
- << "Unable to write process ID to " << _filespec
- << " because file has not been opened.");
- }
+ Status status = _truncateFile(_lockFileHandle->_handle);
+ if (!status.isOK()) {
+ return status;
+ }
- Status status = _truncateFile(_lockFileHandle->_handle);
- if (!status.isOK()) {
- return status;
- }
+ ProcessId pid = ProcessId::getCurrent();
+ std::stringstream ss;
+ ss << pid << std::endl;
+ std::string pidStr = ss.str();
+ DWORD bytesWritten = 0;
+ if (::WriteFile(_lockFileHandle->_handle,
+ static_cast<LPCVOID>(pidStr.c_str()),
+ static_cast<DWORD>(pidStr.size()),
+ &bytesWritten,
+ NULL) == FALSE) {
+ int errorcode = GetLastError();
+ return Status(ErrorCodes::FileStreamFailed,
+ str::stream() << "Unable to write process id " << pid.toString()
+ << " to file: " << _filespec << ' '
+ << errnoWithDescription(errorcode));
+ } else if (bytesWritten == 0) {
+ return Status(ErrorCodes::FileStreamFailed,
+ str::stream() << "Unable to write process id " << pid.toString()
+ << " to file: " << _filespec << " no data written.");
+ }
- ProcessId pid = ProcessId::getCurrent();
- std::stringstream ss;
- ss << pid << std::endl;
- std::string pidStr = ss.str();
- DWORD bytesWritten = 0;
- if (::WriteFile(_lockFileHandle->_handle,
- static_cast<LPCVOID>(pidStr.c_str()),
- static_cast<DWORD>(pidStr.size()),
- &bytesWritten,
- NULL) == FALSE) {
- int errorcode = GetLastError();
- return Status(ErrorCodes::FileStreamFailed, str::stream()
- << "Unable to write process id " << pid.toString() << " to file: "
- << _filespec << ' ' << errnoWithDescription(errorcode));
- }
- else if (bytesWritten == 0) {
- return Status(ErrorCodes::FileStreamFailed, str::stream()
- << "Unable to write process id " << pid.toString() << " to file: "
- << _filespec << " no data written.");
- }
+ ::FlushFileBuffers(_lockFileHandle->_handle);
- ::FlushFileBuffers(_lockFileHandle->_handle);
+ return Status::OK();
+}
- return Status::OK();
+void StorageEngineLockFile::clearPidAndUnlock() {
+ if (!_lockFileHandle->isValid()) {
+ return;
}
-
- void StorageEngineLockFile::clearPidAndUnlock() {
- if (!_lockFileHandle->isValid()) {
- return;
- }
- log() << "shutdown: removing fs lock...";
- // This ought to be an unlink(), but Eliot says the last
- // time that was attempted, there was a race condition
- // with acquirePathLock().
- Status status = _truncateFile(_lockFileHandle->_handle);
- if (!status.isOK()) {
- log() << "couldn't remove fs lock " << status.toString();
- }
- CloseHandle(_lockFileHandle->_handle);
- _lockFileHandle->clear();
+ log() << "shutdown: removing fs lock...";
+ // This ought to be an unlink(), but Eliot says the last
+ // time that was attempted, there was a race condition
+ // with acquirePathLock().
+ Status status = _truncateFile(_lockFileHandle->_handle);
+ if (!status.isOK()) {
+ log() << "couldn't remove fs lock " << status.toString();
}
+ CloseHandle(_lockFileHandle->_handle);
+ _lockFileHandle->clear();
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_metadata.cpp b/src/mongo/db/storage/storage_engine_metadata.cpp
index 2881c41d689..0a6dc42582e 100644
--- a/src/mongo/db/storage/storage_engine_metadata.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata.cpp
@@ -49,221 +49,218 @@ namespace mongo {
namespace {
- const std::string kMetadataBasename = "storage.bson";
-
- /**
- * Returns true if local.ns is found in 'directory' or 'directory'/local/.
- */
- bool containsMMapV1LocalNsFile(const std::string& directory) {
- boost::filesystem::path directoryPath(directory);
- return boost::filesystem::exists(directoryPath / "local.ns") ||
- boost::filesystem::exists((directoryPath / "local") / "local.ns");
- }
+const std::string kMetadataBasename = "storage.bson";
+
+/**
+ * Returns true if local.ns is found in 'directory' or 'directory'/local/.
+ */
+bool containsMMapV1LocalNsFile(const std::string& directory) {
+ boost::filesystem::path directoryPath(directory);
+ return boost::filesystem::exists(directoryPath / "local.ns") ||
+ boost::filesystem::exists((directoryPath / "local") / "local.ns");
+}
} // namespace
- // static
- std::unique_ptr<StorageEngineMetadata> StorageEngineMetadata::forPath(
- const std::string& dbpath) {
- std::unique_ptr<StorageEngineMetadata> metadata;
- if (boost::filesystem::exists(boost::filesystem::path(dbpath) / kMetadataBasename)) {
- metadata.reset(new StorageEngineMetadata(dbpath));
- Status status = metadata->read();
- if (!status.isOK()) {
- error() << "Unable to read the storage engine metadata file: " << status;
- fassertFailed(28661);
- }
+// static
+std::unique_ptr<StorageEngineMetadata> StorageEngineMetadata::forPath(const std::string& dbpath) {
+ std::unique_ptr<StorageEngineMetadata> metadata;
+ if (boost::filesystem::exists(boost::filesystem::path(dbpath) / kMetadataBasename)) {
+ metadata.reset(new StorageEngineMetadata(dbpath));
+ Status status = metadata->read();
+ if (!status.isOK()) {
+ error() << "Unable to read the storage engine metadata file: " << status;
+ fassertFailed(28661);
}
- return metadata;
}
-
- // static
- boost::optional<std::string> StorageEngineMetadata::getStorageEngineForPath(
- const std::string& dbpath) {
- if (auto metadata = StorageEngineMetadata::forPath(dbpath)) {
- return {metadata->getStorageEngine()};
- }
-
- // Fallback to checking for MMAPv1-specific files to handle upgrades from before the
- // storage.bson metadata file was introduced in 3.0.
- if (containsMMapV1LocalNsFile(dbpath)) {
- return {std::string("mmapv1")};
- }
- return {};
+ return metadata;
+}
+
+// static
+boost::optional<std::string> StorageEngineMetadata::getStorageEngineForPath(
+ const std::string& dbpath) {
+ if (auto metadata = StorageEngineMetadata::forPath(dbpath)) {
+ return {metadata->getStorageEngine()};
}
- StorageEngineMetadata::StorageEngineMetadata(const std::string& dbpath)
- : _dbpath(dbpath) {
- reset();
+ // Fallback to checking for MMAPv1-specific files to handle upgrades from before the
+ // storage.bson metadata file was introduced in 3.0.
+ if (containsMMapV1LocalNsFile(dbpath)) {
+ return {std::string("mmapv1")};
}
+ return {};
+}
- StorageEngineMetadata::~StorageEngineMetadata() { }
+StorageEngineMetadata::StorageEngineMetadata(const std::string& dbpath) : _dbpath(dbpath) {
+ reset();
+}
- void StorageEngineMetadata::reset() {
- _storageEngine.clear();
- _storageEngineOptions = BSONObj();
- }
+StorageEngineMetadata::~StorageEngineMetadata() {}
- const std::string& StorageEngineMetadata::getStorageEngine() const {
- return _storageEngine;
- }
+void StorageEngineMetadata::reset() {
+ _storageEngine.clear();
+ _storageEngineOptions = BSONObj();
+}
- const BSONObj& StorageEngineMetadata::getStorageEngineOptions() const {
- return _storageEngineOptions;
- }
+const std::string& StorageEngineMetadata::getStorageEngine() const {
+ return _storageEngine;
+}
- void StorageEngineMetadata::setStorageEngine(const std::string& storageEngine) {
- _storageEngine = storageEngine;
- }
+const BSONObj& StorageEngineMetadata::getStorageEngineOptions() const {
+ return _storageEngineOptions;
+}
- void StorageEngineMetadata::setStorageEngineOptions(const BSONObj& storageEngineOptions) {
- _storageEngineOptions = storageEngineOptions.getOwned();
- }
+void StorageEngineMetadata::setStorageEngine(const std::string& storageEngine) {
+ _storageEngine = storageEngine;
+}
- Status StorageEngineMetadata::read() {
- reset();
+void StorageEngineMetadata::setStorageEngineOptions(const BSONObj& storageEngineOptions) {
+ _storageEngineOptions = storageEngineOptions.getOwned();
+}
- boost::filesystem::path metadataPath =
- boost::filesystem::path(_dbpath) / kMetadataBasename;
+Status StorageEngineMetadata::read() {
+ reset();
- if (!boost::filesystem::exists(metadataPath)) {
- return Status(ErrorCodes::NonExistentPath, str::stream()
- << "Metadata file " << metadataPath.string() << " not found.");
- }
+ boost::filesystem::path metadataPath = boost::filesystem::path(_dbpath) / kMetadataBasename;
- boost::uintmax_t fileSize = boost::filesystem::file_size(metadataPath);
- if (fileSize == 0) {
- return Status(ErrorCodes::InvalidPath, str::stream()
- << "Metadata file " << metadataPath.string() << " cannot be empty.");
- }
- if (fileSize == static_cast<boost::uintmax_t>(-1)) {
- return Status(ErrorCodes::InvalidPath, str::stream()
- << "Unable to determine size of metadata file " << metadataPath.string());
- }
+ if (!boost::filesystem::exists(metadataPath)) {
+ return Status(ErrorCodes::NonExistentPath,
+ str::stream() << "Metadata file " << metadataPath.string() << " not found.");
+ }
+
+ boost::uintmax_t fileSize = boost::filesystem::file_size(metadataPath);
+ if (fileSize == 0) {
+ return Status(ErrorCodes::InvalidPath,
+ str::stream() << "Metadata file " << metadataPath.string()
+ << " cannot be empty.");
+ }
+ if (fileSize == static_cast<boost::uintmax_t>(-1)) {
+ return Status(ErrorCodes::InvalidPath,
+ str::stream() << "Unable to determine size of metadata file "
+ << metadataPath.string());
+ }
- std::vector<char> buffer(fileSize);
- std::string filename = metadataPath.string();
- try {
- std::ifstream ifs(filename.c_str(), std::ios_base::in | std::ios_base::binary);
- if (!ifs) {
- return Status(ErrorCodes::FileNotOpen, str::stream()
- << "Failed to read metadata from " << filename);
+ std::vector<char> buffer(fileSize);
+ std::string filename = metadataPath.string();
+ try {
+ std::ifstream ifs(filename.c_str(), std::ios_base::in | std::ios_base::binary);
+ if (!ifs) {
+ return Status(ErrorCodes::FileNotOpen,
+ str::stream() << "Failed to read metadata from " << filename);
}
// Read BSON from file
ifs.read(&buffer[0], buffer.size());
if (!ifs) {
- return Status(ErrorCodes::FileStreamFailed, str::stream()
- << "Unable to read BSON data from " << filename);
- }
- }
- catch (const std::exception& ex) {
- return Status(ErrorCodes::FileStreamFailed, str::stream()
- << "Unexpected error reading BSON data from " << filename
- << ": " << ex.what());
+ return Status(ErrorCodes::FileStreamFailed,
+ str::stream() << "Unable to read BSON data from " << filename);
}
+ } catch (const std::exception& ex) {
+ return Status(ErrorCodes::FileStreamFailed,
+ str::stream() << "Unexpected error reading BSON data from " << filename
+ << ": " << ex.what());
+ }
- BSONObj obj;
- try {
- obj = BSONObj(&buffer[0]);
- }
- catch (DBException& ex) {
- return Status(ErrorCodes::FailedToParse, str::stream()
- << "Failed to convert data in " << filename
- << " to BSON: " << ex.what());
- }
+ BSONObj obj;
+ try {
+ obj = BSONObj(&buffer[0]);
+ } catch (DBException& ex) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "Failed to convert data in " << filename
+ << " to BSON: " << ex.what());
+ }
- // Validate 'storage.engine' field.
- BSONElement storageEngineElement = obj.getFieldDotted("storage.engine");
- if (storageEngineElement.type() != mongo::String) {
- return Status(ErrorCodes::FailedToParse, str::stream()
- << "The 'storage.engine' field in metadata must be a string: "
- << storageEngineElement.toString());
- }
+ // Validate 'storage.engine' field.
+ BSONElement storageEngineElement = obj.getFieldDotted("storage.engine");
+ if (storageEngineElement.type() != mongo::String) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "The 'storage.engine' field in metadata must be a string: "
+ << storageEngineElement.toString());
+ }
- // Extract storage engine name from 'storage.engine' node.
- std::string storageEngine = storageEngineElement.String();
- if (storageEngine.empty()) {
- return Status(ErrorCodes::FailedToParse,
- "The 'storage.engine' field in metadata cannot be empty string.");
- }
- _storageEngine = storageEngine;
+ // Extract storage engine name from 'storage.engine' node.
+ std::string storageEngine = storageEngineElement.String();
+ if (storageEngine.empty()) {
+ return Status(ErrorCodes::FailedToParse,
+ "The 'storage.engine' field in metadata cannot be empty string.");
+ }
+ _storageEngine = storageEngine;
- // Read storage engine options generated by storage engine factory from startup options.
- BSONElement storageEngineOptionsElement = obj.getFieldDotted("storage.options");
- if (!storageEngineOptionsElement.eoo()) {
- if (!storageEngineOptionsElement.isABSONObj()) {
- return Status(ErrorCodes::FailedToParse, str::stream()
+ // Read storage engine options generated by storage engine factory from startup options.
+ BSONElement storageEngineOptionsElement = obj.getFieldDotted("storage.options");
+ if (!storageEngineOptionsElement.eoo()) {
+ if (!storageEngineOptionsElement.isABSONObj()) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream()
<< "The 'storage.options' field in metadata must be a string: "
<< storageEngineOptionsElement.toString());
- }
- setStorageEngineOptions(storageEngineOptionsElement.Obj());
}
+ setStorageEngineOptions(storageEngineOptionsElement.Obj());
+ }
- return Status::OK();
+ return Status::OK();
+}
+
+Status StorageEngineMetadata::write() const {
+ if (_storageEngine.empty()) {
+ return Status(ErrorCodes::BadValue,
+ "Cannot write empty storage engine name to metadata file.");
}
- Status StorageEngineMetadata::write() const {
- if (_storageEngine.empty()) {
- return Status(ErrorCodes::BadValue,
- "Cannot write empty storage engine name to metadata file.");
+ boost::filesystem::path metadataTempPath =
+ boost::filesystem::path(_dbpath) / (kMetadataBasename + ".tmp");
+ {
+ std::string filenameTemp = metadataTempPath.string();
+ std::ofstream ofs(filenameTemp.c_str(), std::ios_base::out | std::ios_base::binary);
+ if (!ofs) {
+ return Status(ErrorCodes::FileNotOpen,
+ str::stream() << "Failed to write metadata to " << filenameTemp);
}
- boost::filesystem::path metadataTempPath =
- boost::filesystem::path(_dbpath) / (kMetadataBasename + ".tmp");
- {
- std::string filenameTemp = metadataTempPath.string();
- std::ofstream ofs(filenameTemp.c_str(), std::ios_base::out | std::ios_base::binary);
- if (!ofs) {
- return Status(ErrorCodes::FileNotOpen, str::stream()
- << "Failed to write metadata to " << filenameTemp);
- }
-
- BSONObj obj = BSON("storage"
- << BSON("engine" << _storageEngine << "options" << _storageEngineOptions));
- ofs.write(obj.objdata(), obj.objsize());
- if (!ofs) {
- return Status(ErrorCodes::InternalError, str::stream()
- << "Failed to write BSON data to " << filenameTemp);
- }
+ BSONObj obj = BSON(
+ "storage" << BSON("engine" << _storageEngine << "options" << _storageEngineOptions));
+ ofs.write(obj.objdata(), obj.objsize());
+ if (!ofs) {
+ return Status(ErrorCodes::InternalError,
+ str::stream() << "Failed to write BSON data to " << filenameTemp);
}
+ }
- // Rename temporary file to actual metadata file.
- boost::filesystem::path metadataPath =
- boost::filesystem::path(_dbpath) / kMetadataBasename;
- try {
- boost::filesystem::rename(metadataTempPath, metadataPath);
- }
- catch (const std::exception& ex) {
- return Status(ErrorCodes::FileRenameFailed, str::stream()
- << "Unexpected error while renaming temporary metadata file "
- << metadataTempPath.string() << " to " << metadataPath.string()
- << ": " << ex.what());
- }
+ // Rename temporary file to actual metadata file.
+ boost::filesystem::path metadataPath = boost::filesystem::path(_dbpath) / kMetadataBasename;
+ try {
+ boost::filesystem::rename(metadataTempPath, metadataPath);
+ } catch (const std::exception& ex) {
+ return Status(ErrorCodes::FileRenameFailed,
+ str::stream() << "Unexpected error while renaming temporary metadata file "
+ << metadataTempPath.string() << " to " << metadataPath.string()
+ << ": " << ex.what());
+ }
+ return Status::OK();
+}
+
+template <>
+Status StorageEngineMetadata::validateStorageEngineOption<bool>(StringData fieldName,
+ bool expectedValue) const {
+ BSONElement element = _storageEngineOptions.getField(fieldName);
+ if (element.eoo()) {
return Status::OK();
}
-
- template <>
- Status StorageEngineMetadata::validateStorageEngineOption<bool>(StringData fieldName,
- bool expectedValue) const {
- BSONElement element = _storageEngineOptions.getField(fieldName);
- if (element.eoo()) {
- return Status::OK();
- }
- if (!element.isBoolean()) {
- return Status(ErrorCodes::FailedToParse, str::stream()
- << "Expected boolean field " << fieldName << " but got "
- << typeName(element.type()) << " instead: " << element);
- }
- if (element.boolean() == expectedValue) {
- return Status::OK();
- }
- return Status(ErrorCodes::InvalidOptions, str::stream()
- << "Requested option conflicts with current storage engine option for "
+ if (!element.isBoolean()) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "Expected boolean field " << fieldName << " but got "
+ << typeName(element.type()) << " instead: " << element);
+ }
+ if (element.boolean() == expectedValue) {
+ return Status::OK();
+ }
+ return Status(
+ ErrorCodes::InvalidOptions,
+ str::stream() << "Requested option conflicts with current storage engine option for "
<< fieldName << "; you requested " << (expectedValue ? "true" : "false")
<< " but the current server storage is already set to "
<< (element.boolean() ? "true" : "false") << " and cannot be changed");
- }
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_metadata.h b/src/mongo/db/storage/storage_engine_metadata.h
index a4dafdf9bfa..03873a851a9 100644
--- a/src/mongo/db/storage/storage_engine_metadata.h
+++ b/src/mongo/db/storage/storage_engine_metadata.h
@@ -38,83 +38,82 @@
namespace mongo {
+/**
+ * This reads and write the storage engine metadata file 'storage.bson'
+ * in the data directory (See --dbpath).
+ * 'storage.engine' is the only mandatory field in the BSON metadata file.
+ * Fields other than 'storage.engine' are ignored.
+ */
+class StorageEngineMetadata {
+ MONGO_DISALLOW_COPYING(StorageEngineMetadata);
+
+public:
+ /**
+ * Returns a metadata object describing the storage engine that backs the data files
+ * contained in 'dbpath', and nullptr otherwise.
+ */
+ static std::unique_ptr<StorageEngineMetadata> forPath(const std::string& dbpath);
+
+ /**
+ * Returns the name of the storage engine that backs the data files contained in 'dbpath',
+ * and none otherwise.
+ */
+ static boost::optional<std::string> getStorageEngineForPath(const std::string& dbpath);
+
+ /**
+ * Sets fields to defaults.
+ * Use read() load metadata from file.
+ */
+ StorageEngineMetadata(const std::string& dbpath);
+
+ virtual ~StorageEngineMetadata();
+
+ /**
+ * Returns name of storage engine in metadata.
+ */
+ const std::string& getStorageEngine() const;
+
+ /**
+ * Returns storage engine options in metadata.
+ */
+ const BSONObj& getStorageEngineOptions() const;
+
+ /**
+ * Sets name of storage engine in metadata.
+ */
+ void setStorageEngine(const std::string& storageEngine);
+
+ /**
+ * Sets storage engine options in metadata.
+ */
+ void setStorageEngineOptions(const BSONObj& storageEngineOptions);
+
+ /**
+ * Resets fields to default values.
+ */
+ void reset();
+
+ /**
+ * Reads metadata from 'storage.bson' in 'dbpath' directory.
+ */
+ Status read();
+
+ /**
+ * Writes metadata to file.
+ */
+ Status write() const;
+
/**
- * This reads and write the storage engine metadata file 'storage.bson'
- * in the data directory (See --dbpath).
- * 'storage.engine' is the only mandatory field in the BSON metadata file.
- * Fields other than 'storage.engine' are ignored.
+ * Validates a single field in the storage engine options.
+ * Currently, only boolean fields are supported.
*/
- class StorageEngineMetadata {
- MONGO_DISALLOW_COPYING(StorageEngineMetadata);
-
- public:
-
- /**
- * Returns a metadata object describing the storage engine that backs the data files
- * contained in 'dbpath', and nullptr otherwise.
- */
- static std::unique_ptr<StorageEngineMetadata> forPath(const std::string& dbpath);
-
- /**
- * Returns the name of the storage engine that backs the data files contained in 'dbpath',
- * and none otherwise.
- */
- static boost::optional<std::string> getStorageEngineForPath(const std::string& dbpath);
-
- /**
- * Sets fields to defaults.
- * Use read() load metadata from file.
- */
- StorageEngineMetadata(const std::string& dbpath);
-
- virtual ~StorageEngineMetadata();
-
- /**
- * Returns name of storage engine in metadata.
- */
- const std::string& getStorageEngine() const;
-
- /**
- * Returns storage engine options in metadata.
- */
- const BSONObj& getStorageEngineOptions() const;
-
- /**
- * Sets name of storage engine in metadata.
- */
- void setStorageEngine(const std::string& storageEngine);
-
- /**
- * Sets storage engine options in metadata.
- */
- void setStorageEngineOptions(const BSONObj& storageEngineOptions);
-
- /**
- * Resets fields to default values.
- */
- void reset();
-
- /**
- * Reads metadata from 'storage.bson' in 'dbpath' directory.
- */
- Status read();
-
- /**
- * Writes metadata to file.
- */
- Status write() const;
-
- /**
- * Validates a single field in the storage engine options.
- * Currently, only boolean fields are supported.
- */
- template <typename T>
- Status validateStorageEngineOption(StringData fieldName, T expectedValue) const;
-
- private:
- std::string _dbpath;
- std::string _storageEngine;
- BSONObj _storageEngineOptions;
- };
+ template <typename T>
+ Status validateStorageEngineOption(StringData fieldName, T expectedValue) const;
+
+private:
+ std::string _dbpath;
+ std::string _storageEngine;
+ BSONObj _storageEngineOptions;
+};
} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_metadata_test.cpp b/src/mongo/db/storage/storage_engine_metadata_test.cpp
index 27508dfe7a5..0f0326a2161 100644
--- a/src/mongo/db/storage/storage_engine_metadata_test.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata_test.cpp
@@ -43,261 +43,261 @@
namespace {
- using std::string;
- using mongo::unittest::TempDir;
+using std::string;
+using mongo::unittest::TempDir;
- using namespace mongo;
+using namespace mongo;
- TEST(StorageEngineMetadataTest, ReadNonExistentMetadataFile) {
- StorageEngineMetadata metadata("no_such_directory");
- Status status = metadata.read();
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::NonExistentPath, status.code());
- }
+TEST(StorageEngineMetadataTest, ReadNonExistentMetadataFile) {
+ StorageEngineMetadata metadata("no_such_directory");
+ Status status = metadata.read();
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::NonExistentPath, status.code());
+}
- TEST(StorageEngineMetadataTest, WriteToNonexistentDirectory) {
- ASSERT_NOT_OK(StorageEngineMetadata("no_such_directory").write());
- }
+TEST(StorageEngineMetadataTest, WriteToNonexistentDirectory) {
+ ASSERT_NOT_OK(StorageEngineMetadata("no_such_directory").write());
+}
- TEST(StorageEngineMetadataTest, InvalidMetadataFileNotBSON) {
- TempDir tempDir("StorageEngineMetadataTest_InvalidMetadataFileNotBSON");
- {
- std::string filename(tempDir.path() + "/storage.bson");
- std::ofstream ofs(filename.c_str());
- // BSON document of size -1 and EOO as first element.
- BSONObj obj = fromjson("{x: 1}");
- ofs.write("\xff\xff\xff\xff", 4);
- ofs.write(obj.objdata()+4, obj.objsize()-4);
- ofs.flush();
- }
- {
- StorageEngineMetadata metadata(tempDir.path());
- ASSERT_NOT_OK(metadata.read());
- }
+TEST(StorageEngineMetadataTest, InvalidMetadataFileNotBSON) {
+ TempDir tempDir("StorageEngineMetadataTest_InvalidMetadataFileNotBSON");
+ {
+ std::string filename(tempDir.path() + "/storage.bson");
+ std::ofstream ofs(filename.c_str());
+ // BSON document of size -1 and EOO as first element.
+ BSONObj obj = fromjson("{x: 1}");
+ ofs.write("\xff\xff\xff\xff", 4);
+ ofs.write(obj.objdata() + 4, obj.objsize() - 4);
+ ofs.flush();
}
-
- TEST(StorageEngineMetadataTest, InvalidMetadataFileStorageFieldMissing) {
- TempDir tempDir("StorageEngineMetadataTest_InvalidMetadataFileStorageFieldMissing");
- {
- std::string filename(tempDir.path() + "/storage.bson");
- std::ofstream ofs(filename.c_str(), std::ios_base::out | std::ios_base::binary);
- BSONObj obj = fromjson("{missing_storage_field: 123}");
- ofs.write(obj.objdata(), obj.objsize());
- ofs.flush();
- }
- {
- StorageEngineMetadata metadata(tempDir.path());
- ASSERT_NOT_OK(metadata.read());
- }
+ {
+ StorageEngineMetadata metadata(tempDir.path());
+ ASSERT_NOT_OK(metadata.read());
}
+}
- TEST(StorageEngineMetadataTest, InvalidMetadataFileStorageNodeNotObject) {
- TempDir tempDir("StorageEngineMetadataTest_InvalidMetadataFileStorageNodeNotObject");
- {
- std::string filename(tempDir.path() + "/storage.bson");
- std::ofstream ofs(filename.c_str());
- BSONObj obj = fromjson("{storage: 123}");
- ofs.write(obj.objdata(), obj.objsize());
- ofs.flush();
- }
- {
- StorageEngineMetadata metadata(tempDir.path());
- ASSERT_NOT_OK(metadata.read());
- }
+TEST(StorageEngineMetadataTest, InvalidMetadataFileStorageFieldMissing) {
+ TempDir tempDir("StorageEngineMetadataTest_InvalidMetadataFileStorageFieldMissing");
+ {
+ std::string filename(tempDir.path() + "/storage.bson");
+ std::ofstream ofs(filename.c_str(), std::ios_base::out | std::ios_base::binary);
+ BSONObj obj = fromjson("{missing_storage_field: 123}");
+ ofs.write(obj.objdata(), obj.objsize());
+ ofs.flush();
}
+ {
+ StorageEngineMetadata metadata(tempDir.path());
+ ASSERT_NOT_OK(metadata.read());
+ }
+}
- TEST(StorageEngineMetadataTest, InvalidMetadataFileStorageEngineFieldMissing) {
- TempDir tempDir("StorageEngineMetadataTest_InvalidMetadataFileStorageEngineFieldMissing");
- {
- std::string filename(tempDir.path() + "/storage.bson");
- std::ofstream ofs(filename.c_str());
- BSONObj obj = fromjson("{storage: {}}");
- ofs.write(obj.objdata(), obj.objsize());
- ofs.flush();
- }
- {
- StorageEngineMetadata metadata(tempDir.path());
- ASSERT_NOT_OK(metadata.read());
- }
+TEST(StorageEngineMetadataTest, InvalidMetadataFileStorageNodeNotObject) {
+ TempDir tempDir("StorageEngineMetadataTest_InvalidMetadataFileStorageNodeNotObject");
+ {
+ std::string filename(tempDir.path() + "/storage.bson");
+ std::ofstream ofs(filename.c_str());
+ BSONObj obj = fromjson("{storage: 123}");
+ ofs.write(obj.objdata(), obj.objsize());
+ ofs.flush();
}
+ {
+ StorageEngineMetadata metadata(tempDir.path());
+ ASSERT_NOT_OK(metadata.read());
+ }
+}
- TEST(StorageEngineMetadataTest, InvalidMetadataFileStorageEngineFieldNotString) {
- TempDir tempDir("StorageEngineMetadataTest_InvalidMetadataFileStorageEngineFieldNotString");
- {
- std::string filename(tempDir.path() + "/storage.bson");
- std::ofstream ofs(filename.c_str());
- BSONObj obj = fromjson("{storage: {engine: 123}}");
- ofs.write(obj.objdata(), obj.objsize());
- ofs.flush();
- }
- {
- StorageEngineMetadata metadata(tempDir.path());
- ASSERT_NOT_OK(metadata.read());
- }
+TEST(StorageEngineMetadataTest, InvalidMetadataFileStorageEngineFieldMissing) {
+ TempDir tempDir("StorageEngineMetadataTest_InvalidMetadataFileStorageEngineFieldMissing");
+ {
+ std::string filename(tempDir.path() + "/storage.bson");
+ std::ofstream ofs(filename.c_str());
+ BSONObj obj = fromjson("{storage: {}}");
+ ofs.write(obj.objdata(), obj.objsize());
+ ofs.flush();
+ }
+ {
+ StorageEngineMetadata metadata(tempDir.path());
+ ASSERT_NOT_OK(metadata.read());
}
+}
- TEST(StorageEngineMetadataTest, InvalidMetadataFileStorageEngineOptionsFieldNotObject) {
- TempDir tempDir("StorageEngineMetadataTest_IgnoreUnknownField");
- {
- std::string filename(tempDir.path() + "/storage.bson");
- std::ofstream ofs(filename.c_str());
- BSONObj obj = fromjson("{storage: {engine: \"storageEngine1\", options: 123}}");
- ofs.write(obj.objdata(), obj.objsize());
- ofs.flush();
- }
- {
- StorageEngineMetadata metadata(tempDir.path());
- ASSERT_NOT_OK(metadata.read());
- }
+TEST(StorageEngineMetadataTest, InvalidMetadataFileStorageEngineFieldNotString) {
+ TempDir tempDir("StorageEngineMetadataTest_InvalidMetadataFileStorageEngineFieldNotString");
+ {
+ std::string filename(tempDir.path() + "/storage.bson");
+ std::ofstream ofs(filename.c_str());
+ BSONObj obj = fromjson("{storage: {engine: 123}}");
+ ofs.write(obj.objdata(), obj.objsize());
+ ofs.flush();
}
+ {
+ StorageEngineMetadata metadata(tempDir.path());
+ ASSERT_NOT_OK(metadata.read());
+ }
+}
- // Metadata parser should ignore unknown metadata fields.
- TEST(StorageEngineMetadataTest, IgnoreUnknownField) {
- TempDir tempDir("StorageEngineMetadataTest_IgnoreUnknownField");
- {
- std::string filename(tempDir.path() + "/storage.bson");
- std::ofstream ofs(filename.c_str());
- BSONObj obj = fromjson("{storage: {engine: \"storageEngine1\", unknown_field: 123}}");
- ofs.write(obj.objdata(), obj.objsize());
- ofs.flush();
- }
- {
- StorageEngineMetadata metadata(tempDir.path());
- ASSERT_OK(metadata.read());
- ASSERT_EQUALS("storageEngine1", metadata.getStorageEngine());
- ASSERT_TRUE(metadata.getStorageEngineOptions().isEmpty());
- }
+TEST(StorageEngineMetadataTest, InvalidMetadataFileStorageEngineOptionsFieldNotObject) {
+ TempDir tempDir("StorageEngineMetadataTest_IgnoreUnknownField");
+ {
+ std::string filename(tempDir.path() + "/storage.bson");
+ std::ofstream ofs(filename.c_str());
+ BSONObj obj = fromjson("{storage: {engine: \"storageEngine1\", options: 123}}");
+ ofs.write(obj.objdata(), obj.objsize());
+ ofs.flush();
}
+ {
+ StorageEngineMetadata metadata(tempDir.path());
+ ASSERT_NOT_OK(metadata.read());
+ }
+}
- TEST(StorageEngineMetadataTest, WriteEmptyStorageEngineName) {
- TempDir tempDir("StorageEngineMetadataTest_WriteEmptyStorageEngineName");
+// Metadata parser should ignore unknown metadata fields.
+TEST(StorageEngineMetadataTest, IgnoreUnknownField) {
+ TempDir tempDir("StorageEngineMetadataTest_IgnoreUnknownField");
+ {
+ std::string filename(tempDir.path() + "/storage.bson");
+ std::ofstream ofs(filename.c_str());
+ BSONObj obj = fromjson("{storage: {engine: \"storageEngine1\", unknown_field: 123}}");
+ ofs.write(obj.objdata(), obj.objsize());
+ ofs.flush();
+ }
+ {
StorageEngineMetadata metadata(tempDir.path());
- ASSERT_EQUALS("", metadata.getStorageEngine());
- // Write empty storage engine name to metadata file.
- ASSERT_NOT_OK(metadata.write());
+ ASSERT_OK(metadata.read());
+ ASSERT_EQUALS("storageEngine1", metadata.getStorageEngine());
+ ASSERT_TRUE(metadata.getStorageEngineOptions().isEmpty());
}
+}
- TEST(StorageEngineMetadataTest, Roundtrip) {
- TempDir tempDir("StorageEngineMetadataTest_Roundtrip");
- BSONObj options = fromjson("{x: 1}");
- {
- StorageEngineMetadata metadata(tempDir.path());
- metadata.setStorageEngine("storageEngine1");
- metadata.setStorageEngineOptions(options);
- ASSERT_OK(metadata.write());
- }
- // Read back storage engine name.
- {
- StorageEngineMetadata metadata(tempDir.path());
- ASSERT_OK(metadata.read());
- ASSERT_EQUALS("storageEngine1", metadata.getStorageEngine());
- ASSERT_EQUALS(options, metadata.getStorageEngineOptions());
+TEST(StorageEngineMetadataTest, WriteEmptyStorageEngineName) {
+ TempDir tempDir("StorageEngineMetadataTest_WriteEmptyStorageEngineName");
+ StorageEngineMetadata metadata(tempDir.path());
+ ASSERT_EQUALS("", metadata.getStorageEngine());
+ // Write empty storage engine name to metadata file.
+ ASSERT_NOT_OK(metadata.write());
+}
- metadata.reset();
- ASSERT_TRUE(metadata.getStorageEngine().empty());
- ASSERT_TRUE(metadata.getStorageEngineOptions().isEmpty());
- }
+TEST(StorageEngineMetadataTest, Roundtrip) {
+ TempDir tempDir("StorageEngineMetadataTest_Roundtrip");
+ BSONObj options = fromjson("{x: 1}");
+ {
+ StorageEngineMetadata metadata(tempDir.path());
+ metadata.setStorageEngine("storageEngine1");
+ metadata.setStorageEngineOptions(options);
+ ASSERT_OK(metadata.write());
}
+ // Read back storage engine name.
+ {
+ StorageEngineMetadata metadata(tempDir.path());
+ ASSERT_OK(metadata.read());
+ ASSERT_EQUALS("storageEngine1", metadata.getStorageEngine());
+ ASSERT_EQUALS(options, metadata.getStorageEngineOptions());
- TEST(StorageEngineMetadataTest, ValidateStorageEngineOption) {
- // It is fine to provide an invalid data directory as long as we do not
- // call read() or write().
- StorageEngineMetadata metadata("no_such_directory");
- BSONObj options = fromjson("{x: true, y: false, z: 123}");
- metadata.setStorageEngineOptions(options);
+ metadata.reset();
+ ASSERT_TRUE(metadata.getStorageEngine().empty());
+ ASSERT_TRUE(metadata.getStorageEngineOptions().isEmpty());
+ }
+}
- // Non-existent field.
- ASSERT_OK(metadata.validateStorageEngineOption("w", true));
- ASSERT_OK(metadata.validateStorageEngineOption("w", false));
+TEST(StorageEngineMetadataTest, ValidateStorageEngineOption) {
+ // It is fine to provide an invalid data directory as long as we do not
+ // call read() or write().
+ StorageEngineMetadata metadata("no_such_directory");
+ BSONObj options = fromjson("{x: true, y: false, z: 123}");
+ metadata.setStorageEngineOptions(options);
- // Non-boolean field.
- Status status = metadata.validateStorageEngineOption("z", true);
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
- status = metadata.validateStorageEngineOption("z", false);
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
+ // Non-existent field.
+ ASSERT_OK(metadata.validateStorageEngineOption("w", true));
+ ASSERT_OK(metadata.validateStorageEngineOption("w", false));
- // Boolean fields.
- ASSERT_OK(metadata.validateStorageEngineOption("x", true));
- status = metadata.validateStorageEngineOption("x", false);
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::InvalidOptions, status.code());
+ // Non-boolean field.
+ Status status = metadata.validateStorageEngineOption("z", true);
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
+ status = metadata.validateStorageEngineOption("z", false);
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
- ASSERT_OK(metadata.validateStorageEngineOption("y", false));
- status = metadata.validateStorageEngineOption("y", true);
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::InvalidOptions, status.code());
- }
+ // Boolean fields.
+ ASSERT_OK(metadata.validateStorageEngineOption("x", true));
+ status = metadata.validateStorageEngineOption("x", false);
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, status.code());
- // Do not override the active storage engine when the data directory is empty.
- TEST(StorageEngineMetadataTest, StorageEngineForPath_EmptyDirectory) {
- TempDir tempDir("StorageEngineMetadataTest_StorageEngineForPath_EmptyDirectory");
- auto storageEngine = StorageEngineMetadata::getStorageEngineForPath(tempDir.path());
- ASSERT_FALSE(storageEngine);
- }
+ ASSERT_OK(metadata.validateStorageEngineOption("y", false));
+ status = metadata.validateStorageEngineOption("y", true);
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, status.code());
+}
- // Override the active storage engine with "mmapv1" when the data directory contains local.ns.
- TEST(StorageEngineMetadataTest, StorageEngineForPath_DataFilesExist) {
- TempDir tempDir("StorageEngineMetadataTest_StorageEngineForPath_DataFilesExist");
- {
- std::string filename(tempDir.path() + "/local.ns");
- std::ofstream ofs(filename.c_str());
- ofs << "unused data" << std::endl;
- }
- ASSERT_EQUALS(std::string("mmapv1"),
- StorageEngineMetadata::getStorageEngineForPath(tempDir.path()));
+// Do not override the active storage engine when the data directory is empty.
+TEST(StorageEngineMetadataTest, StorageEngineForPath_EmptyDirectory) {
+ TempDir tempDir("StorageEngineMetadataTest_StorageEngineForPath_EmptyDirectory");
+ auto storageEngine = StorageEngineMetadata::getStorageEngineForPath(tempDir.path());
+ ASSERT_FALSE(storageEngine);
+}
+
+// Override the active storage engine with "mmapv1" when the data directory contains local.ns.
+TEST(StorageEngineMetadataTest, StorageEngineForPath_DataFilesExist) {
+ TempDir tempDir("StorageEngineMetadataTest_StorageEngineForPath_DataFilesExist");
+ {
+ std::string filename(tempDir.path() + "/local.ns");
+ std::ofstream ofs(filename.c_str());
+ ofs << "unused data" << std::endl;
}
+ ASSERT_EQUALS(std::string("mmapv1"),
+ StorageEngineMetadata::getStorageEngineForPath(tempDir.path()));
+}
- // Override the active storage engine with "mmapv1" when the data directory contains
- // local/local.ns.
- TEST(StorageEngineMetadataTest, StorageEngineForPath_DataFilesExist_DirPerDB) {
- TempDir tempDir("StorageEngineMetadataTest_StorageEngineForPath_DataFilesExist_DirPerDB");
- {
- boost::filesystem::create_directory(tempDir.path() + "/local");
- std::string filename(tempDir.path() + "/local/local.ns");
- std::ofstream ofs(filename.c_str());
- ofs << "unused data" << std::endl;
- }
- ASSERT_EQUALS(std::string("mmapv1"),
- StorageEngineMetadata::getStorageEngineForPath(tempDir.path()));
+// Override the active storage engine with "mmapv1" when the data directory contains
+// local/local.ns.
+TEST(StorageEngineMetadataTest, StorageEngineForPath_DataFilesExist_DirPerDB) {
+ TempDir tempDir("StorageEngineMetadataTest_StorageEngineForPath_DataFilesExist_DirPerDB");
+ {
+ boost::filesystem::create_directory(tempDir.path() + "/local");
+ std::string filename(tempDir.path() + "/local/local.ns");
+ std::ofstream ofs(filename.c_str());
+ ofs << "unused data" << std::endl;
}
+ ASSERT_EQUALS(std::string("mmapv1"),
+ StorageEngineMetadata::getStorageEngineForPath(tempDir.path()));
+}
- // Do not override the active storage engine when the data directory is nonempty, but does not
- // contain either local.ns or local/local.ns.
- TEST(StorageEngineMetadataTest, StorageEngineForPath_NoDataFilesExist) {
- TempDir tempDir("StorageEngineMetadataTest_StorageEngineForPath_NoDataFilesExist");
- {
- std::string filename(tempDir.path() + "/user_data.txt");
- std::ofstream ofs(filename.c_str());
- ofs << "unused data" << std::endl;
- }
- auto storageEngine = StorageEngineMetadata::getStorageEngineForPath(tempDir.path());
- ASSERT_FALSE(storageEngine);
+// Do not override the active storage engine when the data directory is nonempty, but does not
+// contain either local.ns or local/local.ns.
+TEST(StorageEngineMetadataTest, StorageEngineForPath_NoDataFilesExist) {
+ TempDir tempDir("StorageEngineMetadataTest_StorageEngineForPath_NoDataFilesExist");
+ {
+ std::string filename(tempDir.path() + "/user_data.txt");
+ std::ofstream ofs(filename.c_str());
+ ofs << "unused data" << std::endl;
}
+ auto storageEngine = StorageEngineMetadata::getStorageEngineForPath(tempDir.path());
+ ASSERT_FALSE(storageEngine);
+}
- // Override the active storage engine with "mmapv1" when the metadata file specifies "mmapv1".
- TEST(StorageEngineMetadataTest, StorageEngineForPath_MetadataFile_mmapv1) {
- TempDir tempDir("StorageEngineMetadataTest_StorageEngineForPath_MetadataFile_mmapv1");
- {
- StorageEngineMetadata metadata(tempDir.path());
- metadata.setStorageEngine("mmapv1");
- ASSERT_OK(metadata.write());
- }
- ASSERT_EQUALS(std::string("mmapv1"),
- StorageEngineMetadata::getStorageEngineForPath(tempDir.path()));
+// Override the active storage engine with "mmapv1" when the metadata file specifies "mmapv1".
+TEST(StorageEngineMetadataTest, StorageEngineForPath_MetadataFile_mmapv1) {
+ TempDir tempDir("StorageEngineMetadataTest_StorageEngineForPath_MetadataFile_mmapv1");
+ {
+ StorageEngineMetadata metadata(tempDir.path());
+ metadata.setStorageEngine("mmapv1");
+ ASSERT_OK(metadata.write());
}
+ ASSERT_EQUALS(std::string("mmapv1"),
+ StorageEngineMetadata::getStorageEngineForPath(tempDir.path()));
+}
- // Override the active storage engine whatever the metadata file specifies.
- TEST(StorageEngineMetadataTest, StorageEngineForPath_MetadataFile_someEngine) {
- TempDir tempDir("StorageEngineMetadataTest_StorageEngineForPath_MetadataFile_someEngine");
- {
- StorageEngineMetadata metadata(tempDir.path());
- metadata.setStorageEngine("someEngine");
- ASSERT_OK(metadata.write());
- }
- ASSERT_EQUALS(std::string("someEngine"),
- StorageEngineMetadata::getStorageEngineForPath(tempDir.path()));
+// Override the active storage engine whatever the metadata file specifies.
+TEST(StorageEngineMetadataTest, StorageEngineForPath_MetadataFile_someEngine) {
+ TempDir tempDir("StorageEngineMetadataTest_StorageEngineForPath_MetadataFile_someEngine");
+ {
+ StorageEngineMetadata metadata(tempDir.path());
+ metadata.setStorageEngine("someEngine");
+ ASSERT_OK(metadata.write());
}
+ ASSERT_EQUALS(std::string("someEngine"),
+ StorageEngineMetadata::getStorageEngineForPath(tempDir.path()));
+}
} // namespace
diff --git a/src/mongo/db/storage/storage_init.cpp b/src/mongo/db/storage/storage_init.cpp
index 4e46afd5a84..a17f5e494d5 100644
--- a/src/mongo/db/storage/storage_init.cpp
+++ b/src/mongo/db/storage/storage_init.cpp
@@ -36,22 +36,21 @@ namespace mongo {
// TODO: Does this belong here?
namespace {
- class StorageSSS : public ServerStatusSection {
- public:
- StorageSSS() : ServerStatusSection( "storageEngine" ) {
- }
+class StorageSSS : public ServerStatusSection {
+public:
+ StorageSSS() : ServerStatusSection("storageEngine") {}
- virtual ~StorageSSS() {}
+ virtual ~StorageSSS() {}
- virtual bool includeByDefault() const { return true; }
+ virtual bool includeByDefault() const {
+ return true;
+ }
- virtual BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
+ virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ return BSON("name" << storageGlobalParams.engine);
+ }
- return BSON( "name" << storageGlobalParams.engine );
- }
-
- } storageSSS;
+} storageSSS;
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.cpp
index a6f685418e6..12388fbfaa6 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.cpp
@@ -38,39 +38,37 @@
namespace mongo {
- /* Make a WiredTigerCustomizationHooks pointer a decoration on the global ServiceContext */
- MONGO_INITIALIZER_WITH_PREREQUISITES(SetWiredTigerCustomizationHooks,
- ("SetGlobalEnvironment"))
- (InitializerContext* context) {
- auto customizationHooks = stdx::make_unique<EmptyWiredTigerCustomizationHooks>();
- WiredTigerCustomizationHooks::set(getGlobalServiceContext(), std::move(customizationHooks));
+/* Make a WiredTigerCustomizationHooks pointer a decoration on the global ServiceContext */
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetWiredTigerCustomizationHooks, ("SetGlobalEnvironment"))
+(InitializerContext* context) {
+ auto customizationHooks = stdx::make_unique<EmptyWiredTigerCustomizationHooks>();
+ WiredTigerCustomizationHooks::set(getGlobalServiceContext(), std::move(customizationHooks));
- return Status::OK();
- }
+ return Status::OK();
+}
- namespace {
- const auto getCustomizationHooks =
- ServiceContext::declareDecoration<std::unique_ptr<WiredTigerCustomizationHooks>>();
- } // namespace
+namespace {
+const auto getCustomizationHooks =
+ ServiceContext::declareDecoration<std::unique_ptr<WiredTigerCustomizationHooks>>();
+} // namespace
- void WiredTigerCustomizationHooks::set(
- ServiceContext* service,
- std::unique_ptr<WiredTigerCustomizationHooks> custHooks) {
- auto& hooks = getCustomizationHooks(service);
- invariant(custHooks);
- hooks = std::move(custHooks);
- }
+void WiredTigerCustomizationHooks::set(ServiceContext* service,
+ std::unique_ptr<WiredTigerCustomizationHooks> custHooks) {
+ auto& hooks = getCustomizationHooks(service);
+ invariant(custHooks);
+ hooks = std::move(custHooks);
+}
- WiredTigerCustomizationHooks* WiredTigerCustomizationHooks::get(ServiceContext* service) {
- return getCustomizationHooks(service).get();
- }
+WiredTigerCustomizationHooks* WiredTigerCustomizationHooks::get(ServiceContext* service) {
+ return getCustomizationHooks(service).get();
+}
- EmptyWiredTigerCustomizationHooks::~EmptyWiredTigerCustomizationHooks() {}
+EmptyWiredTigerCustomizationHooks::~EmptyWiredTigerCustomizationHooks() {}
- void EmptyWiredTigerCustomizationHooks::appendUID(BSONObjBuilder* builder) {}
+void EmptyWiredTigerCustomizationHooks::appendUID(BSONObjBuilder* builder) {}
- std::string EmptyWiredTigerCustomizationHooks::getOpenConfig(StringData tableName) {
- return "";
- }
+std::string EmptyWiredTigerCustomizationHooks::getOpenConfig(StringData tableName) {
+ return "";
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h b/src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h
index e826a8971b8..8eeed66b49a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h
@@ -36,38 +36,37 @@
#include "mongo/db/jsobj.h"
namespace mongo {
- class StringData;
- class ServiceContext;
+class StringData;
+class ServiceContext;
- class WiredTigerCustomizationHooks {
- public:
- static void set(ServiceContext* service,
- std::unique_ptr<WiredTigerCustomizationHooks> custHooks);
+class WiredTigerCustomizationHooks {
+public:
+ static void set(ServiceContext* service,
+ std::unique_ptr<WiredTigerCustomizationHooks> custHooks);
- static WiredTigerCustomizationHooks* get(ServiceContext* service);
+ static WiredTigerCustomizationHooks* get(ServiceContext* service);
- virtual ~WiredTigerCustomizationHooks() = default;
+ virtual ~WiredTigerCustomizationHooks() = default;
- /**
- * Appends additional configuration sub object(s) to the BSONObjbuilder builder.
- */
- virtual void appendUID(BSONObjBuilder* builder) = 0;
+ /**
+ * Appends additional configuration sub object(s) to the BSONObjbuilder builder.
+ */
+ virtual void appendUID(BSONObjBuilder* builder) = 0;
- /**
- * Gets the WiredTiger encryption configuration string for the
- * provided table name
- */
- virtual std::string getOpenConfig(StringData tableName) = 0;
- };
+ /**
+ * Gets the WiredTiger encryption configuration string for the
+ * provided table name
+ */
+ virtual std::string getOpenConfig(StringData tableName) = 0;
+};
- // Empty default implementation of the abstract class WiredTigerCustomizationHooks
- class EmptyWiredTigerCustomizationHooks : public WiredTigerCustomizationHooks {
+// Empty default implementation of the abstract class WiredTigerCustomizationHooks
+class EmptyWiredTigerCustomizationHooks : public WiredTigerCustomizationHooks {
+public:
+ ~EmptyWiredTigerCustomizationHooks() override;
- public:
- ~EmptyWiredTigerCustomizationHooks() override;
+ void appendUID(BSONObjBuilder* builder) override;
- void appendUID(BSONObjBuilder* builder) override;
-
- std::string getOpenConfig(StringData tableName) override;
- };
-} // namespace mongo
+ std::string getOpenConfig(StringData tableName) override;
+};
+} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp
index 24cd3aa82d6..01aa0d3bbc1 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp
@@ -38,126 +38,122 @@
namespace mongo {
- WiredTigerGlobalOptions wiredTigerGlobalOptions;
-
- Status WiredTigerGlobalOptions::add(moe::OptionSection* options) {
- moe::OptionSection wiredTigerOptions("WiredTiger options");
-
- // WiredTiger storage engine options
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.cacheSizeGB",
- "wiredTigerCacheSizeGB",
- moe::Int,
- "maximum amount of memory to allocate for cache; "
- "defaults to 1/2 of physical RAM")
- .validRange(1,10000);
- wiredTigerOptions.addOptionChaining(
- "storage.wiredTiger.engineConfig.statisticsLogDelaySecs",
- "wiredTigerStatisticsLogDelaySecs",
- moe::Int,
- "seconds to wait between each write to a statistics file in the dbpath; "
- "0 means do not log statistics")
- .validRange(0, 100000)
- .setDefault(moe::Value(0));
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.journalCompressor",
- "wiredTigerJournalCompressor",
- moe::String,
- "use a compressor for log records [none|snappy|zlib]")
- .format("(:?none)|(:?snappy)|(:?zlib)", "(none/snappy/zlib)")
- .setDefault(moe::Value(std::string("snappy")));
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.directoryForIndexes",
- "wiredTigerDirectoryForIndexes",
- moe::Switch,
- "Put indexes and data in different directories");
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.configString",
- "wiredTigerEngineConfigString",
- moe::String,
- "WiredTiger storage engine custom "
- "configuration settings")
- .hidden();
-
- // WiredTiger collection options
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.collectionConfig.blockCompressor",
- "wiredTigerCollectionBlockCompressor",
- moe::String,
- "block compression algorithm for collection data "
- "[none|snappy|zlib]")
- .format("(:?none)|(:?snappy)|(:?zlib)", "(none/snappy/zlib)")
- .setDefault(moe::Value(std::string("snappy")));
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.collectionConfig.configString",
- "wiredTigerCollectionConfigString",
- moe::String,
- "WiredTiger custom collection configuration settings")
- .hidden();
-
-
- // WiredTiger index options
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.indexConfig.prefixCompression",
- "wiredTigerIndexPrefixCompression",
- moe::Bool,
- "use prefix compression on row-store leaf pages")
- .setDefault(moe::Value(true));
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.indexConfig.configString",
- "wiredTigerIndexConfigString",
- moe::String,
- "WiredTiger custom index configuration settings")
- .hidden();
-
- return options->addSection(wiredTigerOptions);
+WiredTigerGlobalOptions wiredTigerGlobalOptions;
+
+Status WiredTigerGlobalOptions::add(moe::OptionSection* options) {
+ moe::OptionSection wiredTigerOptions("WiredTiger options");
+
+ // WiredTiger storage engine options
+ wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.cacheSizeGB",
+ "wiredTigerCacheSizeGB",
+ moe::Int,
+ "maximum amount of memory to allocate for cache; "
+ "defaults to 1/2 of physical RAM").validRange(1, 10000);
+ wiredTigerOptions.addOptionChaining(
+ "storage.wiredTiger.engineConfig.statisticsLogDelaySecs",
+ "wiredTigerStatisticsLogDelaySecs",
+ moe::Int,
+ "seconds to wait between each write to a statistics file in the dbpath; "
+ "0 means do not log statistics")
+ .validRange(0, 100000)
+ .setDefault(moe::Value(0));
+ wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.journalCompressor",
+ "wiredTigerJournalCompressor",
+ moe::String,
+ "use a compressor for log records [none|snappy|zlib]")
+ .format("(:?none)|(:?snappy)|(:?zlib)", "(none/snappy/zlib)")
+ .setDefault(moe::Value(std::string("snappy")));
+ wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.directoryForIndexes",
+ "wiredTigerDirectoryForIndexes",
+ moe::Switch,
+ "Put indexes and data in different directories");
+ wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.configString",
+ "wiredTigerEngineConfigString",
+ moe::String,
+ "WiredTiger storage engine custom "
+ "configuration settings").hidden();
+
+ // WiredTiger collection options
+ wiredTigerOptions.addOptionChaining("storage.wiredTiger.collectionConfig.blockCompressor",
+ "wiredTigerCollectionBlockCompressor",
+ moe::String,
+ "block compression algorithm for collection data "
+ "[none|snappy|zlib]")
+ .format("(:?none)|(:?snappy)|(:?zlib)", "(none/snappy/zlib)")
+ .setDefault(moe::Value(std::string("snappy")));
+ wiredTigerOptions.addOptionChaining("storage.wiredTiger.collectionConfig.configString",
+ "wiredTigerCollectionConfigString",
+ moe::String,
+ "WiredTiger custom collection configuration settings")
+ .hidden();
+
+
+ // WiredTiger index options
+ wiredTigerOptions.addOptionChaining("storage.wiredTiger.indexConfig.prefixCompression",
+ "wiredTigerIndexPrefixCompression",
+ moe::Bool,
+ "use prefix compression on row-store leaf pages")
+ .setDefault(moe::Value(true));
+ wiredTigerOptions.addOptionChaining("storage.wiredTiger.indexConfig.configString",
+ "wiredTigerIndexConfigString",
+ moe::String,
+ "WiredTiger custom index configuration settings").hidden();
+
+ return options->addSection(wiredTigerOptions);
+}
+
+Status WiredTigerGlobalOptions::store(const moe::Environment& params,
+ const std::vector<std::string>& args) {
+ // WiredTiger storage engine options
+ if (params.count("storage.wiredTiger.engineConfig.cacheSizeGB")) {
+ wiredTigerGlobalOptions.cacheSizeGB =
+ params["storage.wiredTiger.engineConfig.cacheSizeGB"].as<int>();
+ }
+ if (params.count("storage.syncPeriodSecs")) {
+ wiredTigerGlobalOptions.checkpointDelaySecs =
+ static_cast<size_t>(params["storage.syncPeriodSecs"].as<double>());
+ }
+ if (params.count("storage.wiredTiger.engineConfig.statisticsLogDelaySecs")) {
+ wiredTigerGlobalOptions.statisticsLogDelaySecs =
+ params["storage.wiredTiger.engineConfig.statisticsLogDelaySecs"].as<int>();
+ }
+ if (params.count("storage.wiredTiger.engineConfig.journalCompressor")) {
+ wiredTigerGlobalOptions.journalCompressor =
+ params["storage.wiredTiger.engineConfig.journalCompressor"].as<std::string>();
+ }
+ if (params.count("storage.wiredTiger.engineConfig.directoryForIndexes")) {
+ wiredTigerGlobalOptions.directoryForIndexes =
+ params["storage.wiredTiger.engineConfig.directoryForIndexes"].as<bool>();
+ }
+ if (params.count("storage.wiredTiger.engineConfig.configString")) {
+ wiredTigerGlobalOptions.engineConfig =
+ params["storage.wiredTiger.engineConfig.configString"].as<std::string>();
+ log() << "Engine custom option: " << wiredTigerGlobalOptions.engineConfig;
}
- Status WiredTigerGlobalOptions::store(const moe::Environment& params,
- const std::vector<std::string>& args) {
-
- // WiredTiger storage engine options
- if (params.count("storage.wiredTiger.engineConfig.cacheSizeGB")) {
- wiredTigerGlobalOptions.cacheSizeGB =
- params["storage.wiredTiger.engineConfig.cacheSizeGB"].as<int>();
- }
- if (params.count("storage.syncPeriodSecs")) {
- wiredTigerGlobalOptions.checkpointDelaySecs =
- static_cast<size_t>(params["storage.syncPeriodSecs"].as<double>());
- }
- if (params.count("storage.wiredTiger.engineConfig.statisticsLogDelaySecs")) {
- wiredTigerGlobalOptions.statisticsLogDelaySecs =
- params["storage.wiredTiger.engineConfig.statisticsLogDelaySecs"].as<int>();
- }
- if (params.count("storage.wiredTiger.engineConfig.journalCompressor")) {
- wiredTigerGlobalOptions.journalCompressor =
- params["storage.wiredTiger.engineConfig.journalCompressor"].as<std::string>();
- }
- if (params.count("storage.wiredTiger.engineConfig.directoryForIndexes")) {
- wiredTigerGlobalOptions.directoryForIndexes =
- params["storage.wiredTiger.engineConfig.directoryForIndexes"].as<bool>();
- }
- if (params.count("storage.wiredTiger.engineConfig.configString")) {
- wiredTigerGlobalOptions.engineConfig =
- params["storage.wiredTiger.engineConfig.configString"].as<std::string>();
- log() << "Engine custom option: " << wiredTigerGlobalOptions.engineConfig;
- }
-
- // WiredTiger collection options
- if (params.count("storage.wiredTiger.collectionConfig.blockCompressor")) {
- wiredTigerGlobalOptions.collectionBlockCompressor =
- params["storage.wiredTiger.collectionConfig.blockCompressor"].as<std::string>();
- }
- if (params.count("storage.wiredTiger.collectionConfig.configString")) {
- wiredTigerGlobalOptions.collectionConfig =
- params["storage.wiredTiger.collectionConfig.configString"].as<std::string>();
- log() << "Collection custom option: " << wiredTigerGlobalOptions.collectionConfig;
- }
-
- // WiredTiger index options
- if (params.count("storage.wiredTiger.indexConfig.prefixCompression")) {
- wiredTigerGlobalOptions.useIndexPrefixCompression =
- params["storage.wiredTiger.indexConfig.prefixCompression"].as<bool>();
- }
- if (params.count("storage.wiredTiger.indexConfig.configString")) {
- wiredTigerGlobalOptions.indexConfig =
- params["storage.wiredTiger.indexConfig.configString"].as<std::string>();
- log() << "Index custom option: " << wiredTigerGlobalOptions.indexConfig;
- }
-
- return Status::OK();
+ // WiredTiger collection options
+ if (params.count("storage.wiredTiger.collectionConfig.blockCompressor")) {
+ wiredTigerGlobalOptions.collectionBlockCompressor =
+ params["storage.wiredTiger.collectionConfig.blockCompressor"].as<std::string>();
}
+ if (params.count("storage.wiredTiger.collectionConfig.configString")) {
+ wiredTigerGlobalOptions.collectionConfig =
+ params["storage.wiredTiger.collectionConfig.configString"].as<std::string>();
+ log() << "Collection custom option: " << wiredTigerGlobalOptions.collectionConfig;
+ }
+
+ // WiredTiger index options
+ if (params.count("storage.wiredTiger.indexConfig.prefixCompression")) {
+ wiredTigerGlobalOptions.useIndexPrefixCompression =
+ params["storage.wiredTiger.indexConfig.prefixCompression"].as<bool>();
+ }
+ if (params.count("storage.wiredTiger.indexConfig.configString")) {
+ wiredTigerGlobalOptions.indexConfig =
+ params["storage.wiredTiger.indexConfig.configString"].as<std::string>();
+ log() << "Index custom option: " << wiredTigerGlobalOptions.indexConfig;
+ }
+
+ return Status::OK();
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.h b/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.h
index 622d3d61b03..9e2307ff0fa 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.h
@@ -35,37 +35,35 @@
namespace mongo {
- namespace moe = mongo::optionenvironment;
+namespace moe = mongo::optionenvironment;
- class WiredTigerGlobalOptions {
- public:
- WiredTigerGlobalOptions() : cacheSizeGB(0),
- checkpointDelaySecs(0),
- statisticsLogDelaySecs(0),
- directoryForIndexes(false),
- useCollectionPrefixCompression(false),
- useIndexPrefixCompression(false)
- {};
+class WiredTigerGlobalOptions {
+public:
+ WiredTigerGlobalOptions()
+ : cacheSizeGB(0),
+ checkpointDelaySecs(0),
+ statisticsLogDelaySecs(0),
+ directoryForIndexes(false),
+ useCollectionPrefixCompression(false),
+ useIndexPrefixCompression(false){};
- Status add(moe::OptionSection* options);
- Status store(const moe::Environment& params, const std::vector<std::string>& args);
+ Status add(moe::OptionSection* options);
+ Status store(const moe::Environment& params, const std::vector<std::string>& args);
- size_t cacheSizeGB;
- size_t checkpointDelaySecs;
- size_t statisticsLogDelaySecs;
- std::string journalCompressor;
- bool directoryForIndexes;
- std::string engineConfig;
+ size_t cacheSizeGB;
+ size_t checkpointDelaySecs;
+ size_t statisticsLogDelaySecs;
+ std::string journalCompressor;
+ bool directoryForIndexes;
+ std::string engineConfig;
- std::string collectionBlockCompressor;
- std::string indexBlockCompressor;
- bool useCollectionPrefixCompression;
- bool useIndexPrefixCompression;
- std::string collectionConfig;
- std::string indexConfig;
-
- };
-
- extern WiredTigerGlobalOptions wiredTigerGlobalOptions;
+ std::string collectionBlockCompressor;
+ std::string indexBlockCompressor;
+ bool useCollectionPrefixCompression;
+ bool useIndexPrefixCompression;
+ std::string collectionConfig;
+ std::string indexConfig;
+};
+extern WiredTigerGlobalOptions wiredTigerGlobalOptions;
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index 402dc13a0d1..a5abbf61137 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -59,1119 +59,1110 @@
#if TRACING_ENABLED
#define TRACE_CURSOR log() << "WT index (" << (const void*)&_idx << ") "
-#define TRACE_INDEX log() << "WT index (" << (const void*)this << ") "
+#define TRACE_INDEX log() << "WT index (" << (const void*) this << ") "
#else
-#define TRACE_CURSOR if ( 0 ) log()
-#define TRACE_INDEX if ( 0 ) log()
+#define TRACE_CURSOR \
+ if (0) \
+ log()
+#define TRACE_INDEX \
+ if (0) \
+ log()
#endif
namespace mongo {
namespace {
- using std::string;
- using std::vector;
+using std::string;
+using std::vector;
- static const int TempKeyMaxSize = 1024; // this goes away with SERVER-3372
+static const int TempKeyMaxSize = 1024; // this goes away with SERVER-3372
- static const WiredTigerItem emptyItem(NULL, 0);
+static const WiredTigerItem emptyItem(NULL, 0);
- static const int kMinimumIndexVersion = 6;
- static const int kCurrentIndexVersion = 6; // New indexes use this by default.
- static const int kMaximumIndexVersion = 6;
- BOOST_STATIC_ASSERT(kCurrentIndexVersion >= kMinimumIndexVersion);
- BOOST_STATIC_ASSERT(kCurrentIndexVersion <= kMaximumIndexVersion);
+static const int kMinimumIndexVersion = 6;
+static const int kCurrentIndexVersion = 6; // New indexes use this by default.
+static const int kMaximumIndexVersion = 6;
+BOOST_STATIC_ASSERT(kCurrentIndexVersion >= kMinimumIndexVersion);
+BOOST_STATIC_ASSERT(kCurrentIndexVersion <= kMaximumIndexVersion);
- bool hasFieldNames(const BSONObj& obj) {
- BSONForEach(e, obj) {
- if (e.fieldName()[0])
- return true;
- }
- return false;
+bool hasFieldNames(const BSONObj& obj) {
+ BSONForEach(e, obj) {
+ if (e.fieldName()[0])
+ return true;
}
+ return false;
+}
- BSONObj stripFieldNames(const BSONObj& query) {
- if (!hasFieldNames(query))
- return query;
+BSONObj stripFieldNames(const BSONObj& query) {
+ if (!hasFieldNames(query))
+ return query;
- BSONObjBuilder bb;
- BSONForEach(e, query) {
- bb.appendAs(e, StringData());
- }
- return bb.obj();
+ BSONObjBuilder bb;
+ BSONForEach(e, query) {
+ bb.appendAs(e, StringData());
}
-
- Status checkKeySize(const BSONObj& key) {
- if ( key.objsize() >= TempKeyMaxSize ) {
- string msg = mongoutils::str::stream()
- << "WiredTigerIndex::insert: key too large to index, failing "
- << ' ' << key.objsize() << ' ' << key;
- return Status(ErrorCodes::KeyTooLong, msg);
- }
- return Status::OK();
+ return bb.obj();
+}
+
+Status checkKeySize(const BSONObj& key) {
+ if (key.objsize() >= TempKeyMaxSize) {
+ string msg = mongoutils::str::stream()
+ << "WiredTigerIndex::insert: key too large to index, failing " << ' ' << key.objsize()
+ << ' ' << key;
+ return Status(ErrorCodes::KeyTooLong, msg);
}
-
-} // namespace
-
- Status WiredTigerIndex::dupKeyError(const BSONObj& key) {
- StringBuilder sb;
- sb << "E11000 duplicate key error";
- sb << " collection: " << _collectionNamespace;
- sb << " index: " << _indexName;
- sb << " dup key: " << key;
- return Status(ErrorCodes::DuplicateKey, sb.str());
- }
-
- // static
- StatusWith<std::string> WiredTigerIndex::parseIndexOptions(const BSONObj& options) {
- StringBuilder ss;
- BSONForEach(elem, options) {
- if (elem.fieldNameStringData() == "configString") {
- if (elem.type() != String) {
- return StatusWith<std::string>(ErrorCodes::TypeMismatch, str::stream()
- << "configString must be a string. "
- << "Not adding 'configString' value "
- << elem << " to index configuration");
- }
- ss << elem.valueStringData() << ',';
- }
- else {
- // Return error on first unrecognized field.
- return StatusWith<std::string>(ErrorCodes::InvalidOptions, str::stream()
- << '\'' << elem.fieldNameStringData() << '\''
- << " is not a supported option.");
+ return Status::OK();
+}
+
+} // namespace
+
+Status WiredTigerIndex::dupKeyError(const BSONObj& key) {
+ StringBuilder sb;
+ sb << "E11000 duplicate key error";
+ sb << " collection: " << _collectionNamespace;
+ sb << " index: " << _indexName;
+ sb << " dup key: " << key;
+ return Status(ErrorCodes::DuplicateKey, sb.str());
+}
+
+// static
+StatusWith<std::string> WiredTigerIndex::parseIndexOptions(const BSONObj& options) {
+ StringBuilder ss;
+ BSONForEach(elem, options) {
+ if (elem.fieldNameStringData() == "configString") {
+ if (elem.type() != String) {
+ return StatusWith<std::string>(ErrorCodes::TypeMismatch,
+ str::stream() << "configString must be a string. "
+ << "Not adding 'configString' value "
+ << elem << " to index configuration");
}
+ ss << elem.valueStringData() << ',';
+ } else {
+ // Return error on first unrecognized field.
+ return StatusWith<std::string>(ErrorCodes::InvalidOptions,
+ str::stream() << '\'' << elem.fieldNameStringData()
+ << '\'' << " is not a supported option.");
}
- return StatusWith<std::string>(ss.str());
}
-
- // static
- StatusWith<std::string> WiredTigerIndex::generateCreateString(const std::string& extraConfig,
- const IndexDescriptor& desc) {
- str::stream ss;
-
- // Separate out a prefix and suffix in the default string. User configuration will override
- // values in the prefix, but not values in the suffix. Page sizes are chosen so that index
- // keys (up to 1024 bytes) will not overflow.
- ss << "type=file,internal_page_max=16k,leaf_page_max=16k,";
- ss << "checksum=on,";
- if (wiredTigerGlobalOptions.useIndexPrefixCompression) {
- ss << "prefix_compression=true,";
- }
-
- ss << "block_compressor=" << wiredTigerGlobalOptions.indexBlockCompressor << ",";
- ss << WiredTigerCustomizationHooks::get(
- getGlobalServiceContext())->getOpenConfig(desc.parentNS());
- ss << extraConfig;
-
- // Validate configuration object.
- // Raise an error about unrecognized fields that may be introduced in newer versions of
- // this storage engine.
- // Ensure that 'configString' field is a string. Raise an error if this is not the case.
- BSONElement storageEngineElement = desc.getInfoElement("storageEngine");
- if (storageEngineElement.isABSONObj()) {
- BSONObj storageEngine = storageEngineElement.Obj();
- StatusWith<std::string> parseStatus =
- parseIndexOptions(storageEngine.getObjectField(kWiredTigerEngineName));
- if (!parseStatus.isOK()) {
- return parseStatus;
- }
- if (!parseStatus.getValue().empty()) {
- ss << "," << parseStatus.getValue();
- }
- }
-
- // WARNING: No user-specified config can appear below this line. These options are required
- // for correct behavior of the server.
-
- // Indexes need to store the metadata for collation to work as expected.
- ss << ",key_format=u,value_format=u";
-
- // Index metadata
- ss << ",app_metadata=("
- << "formatVersion=" << kCurrentIndexVersion << ','
- << "infoObj=" << desc.infoObj().jsonString()
- << "),";
-
- LOG(3) << "index create string: " << ss.ss.str();
- return StatusWith<std::string>(ss);
- }
-
- int WiredTigerIndex::Create(OperationContext* txn,
- const std::string& uri,
- const std::string& config) {
- WT_SESSION* s = WiredTigerRecoveryUnit::get( txn )->getSession(txn)->getSession();
- LOG(1) << "create uri: " << uri << " config: " << config;
- return s->create(s, uri.c_str(), config.c_str());
+ return StatusWith<std::string>(ss.str());
+}
+
+// static
+StatusWith<std::string> WiredTigerIndex::generateCreateString(const std::string& extraConfig,
+ const IndexDescriptor& desc) {
+ str::stream ss;
+
+ // Separate out a prefix and suffix in the default string. User configuration will override
+ // values in the prefix, but not values in the suffix. Page sizes are chosen so that index
+ // keys (up to 1024 bytes) will not overflow.
+ ss << "type=file,internal_page_max=16k,leaf_page_max=16k,";
+ ss << "checksum=on,";
+ if (wiredTigerGlobalOptions.useIndexPrefixCompression) {
+ ss << "prefix_compression=true,";
}
- WiredTigerIndex::WiredTigerIndex(OperationContext* ctx,
- const std::string& uri,
- const IndexDescriptor* desc)
- : _ordering(Ordering::make(desc->keyPattern())),
- _uri( uri ),
- _instanceId( WiredTigerSession::genCursorId() ),
- _collectionNamespace( desc->parentNS() ),
- _indexName( desc->indexName() ){
-
- Status versionStatus =
- WiredTigerUtil::checkApplicationMetadataFormatVersion(ctx,
- uri,
- kMinimumIndexVersion,
- kMaximumIndexVersion);
- if (!versionStatus.isOK()) {
- fassertFailedWithStatusNoTrace(28579, versionStatus);
+ ss << "block_compressor=" << wiredTigerGlobalOptions.indexBlockCompressor << ",";
+ ss << WiredTigerCustomizationHooks::get(getGlobalServiceContext())
+ ->getOpenConfig(desc.parentNS());
+ ss << extraConfig;
+
+ // Validate configuration object.
+ // Raise an error about unrecognized fields that may be introduced in newer versions of
+ // this storage engine.
+ // Ensure that 'configString' field is a string. Raise an error if this is not the case.
+ BSONElement storageEngineElement = desc.getInfoElement("storageEngine");
+ if (storageEngineElement.isABSONObj()) {
+ BSONObj storageEngine = storageEngineElement.Obj();
+ StatusWith<std::string> parseStatus =
+ parseIndexOptions(storageEngine.getObjectField(kWiredTigerEngineName));
+ if (!parseStatus.isOK()) {
+ return parseStatus;
+ }
+ if (!parseStatus.getValue().empty()) {
+ ss << "," << parseStatus.getValue();
}
}
- Status WiredTigerIndex::insert(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) {
- invariant(loc.isNormal());
- dassert(!hasFieldNames(key));
-
- Status s = checkKeySize(key);
- if (!s.isOK())
- return s;
-
- WiredTigerCursor curwrap(_uri, _instanceId, false, txn);
- curwrap.assertInActiveTxn();
- WT_CURSOR *c = curwrap.get();
-
- return _insert( c, key, loc, dupsAllowed );
- }
-
- void WiredTigerIndex::unindex(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed ) {
- invariant(loc.isNormal());
- dassert(!hasFieldNames(key));
-
- WiredTigerCursor curwrap(_uri, _instanceId, false, txn);
- curwrap.assertInActiveTxn();
- WT_CURSOR *c = curwrap.get();
- invariant( c );
-
- _unindex( c, key, loc, dupsAllowed );
+ // WARNING: No user-specified config can appear below this line. These options are required
+ // for correct behavior of the server.
+
+ // Indexes need to store the metadata for collation to work as expected.
+ ss << ",key_format=u,value_format=u";
+
+ // Index metadata
+ ss << ",app_metadata=("
+ << "formatVersion=" << kCurrentIndexVersion << ','
+ << "infoObj=" << desc.infoObj().jsonString() << "),";
+
+ LOG(3) << "index create string: " << ss.ss.str();
+ return StatusWith<std::string>(ss);
+}
+
+int WiredTigerIndex::Create(OperationContext* txn,
+ const std::string& uri,
+ const std::string& config) {
+ WT_SESSION* s = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
+ LOG(1) << "create uri: " << uri << " config: " << config;
+ return s->create(s, uri.c_str(), config.c_str());
+}
+
+WiredTigerIndex::WiredTigerIndex(OperationContext* ctx,
+ const std::string& uri,
+ const IndexDescriptor* desc)
+ : _ordering(Ordering::make(desc->keyPattern())),
+ _uri(uri),
+ _instanceId(WiredTigerSession::genCursorId()),
+ _collectionNamespace(desc->parentNS()),
+ _indexName(desc->indexName()) {
+ Status versionStatus = WiredTigerUtil::checkApplicationMetadataFormatVersion(
+ ctx, uri, kMinimumIndexVersion, kMaximumIndexVersion);
+ if (!versionStatus.isOK()) {
+ fassertFailedWithStatusNoTrace(28579, versionStatus);
}
-
- void WiredTigerIndex::fullValidate(OperationContext* txn, bool full, long long *numKeysOut,
- BSONObjBuilder* output) const {
- {
- std::vector<std::string> errors;
- int err = WiredTigerUtil::verifyTable(txn, _uri, output ? &errors : NULL);
- if (err == EBUSY) {
- const char* msg = "verify() returned EBUSY. Not treating as invalid.";
- warning() << msg;
- if (output) {
- if (!errors.empty()) {
- *output << "errors" << errors;
- }
- *output << "warning" << msg;
- }
- }
- else if (err) {
- std::string msg = str::stream()
- << "verify() returned " << wiredtiger_strerror(err) << ". "
- << "This indicates structural damage. "
- << "Not examining individual index entries.";
- error() << msg;
- if (output) {
- errors.push_back(msg);
+}
+
+Status WiredTigerIndex::insert(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ invariant(loc.isNormal());
+ dassert(!hasFieldNames(key));
+
+ Status s = checkKeySize(key);
+ if (!s.isOK())
+ return s;
+
+ WiredTigerCursor curwrap(_uri, _instanceId, false, txn);
+ curwrap.assertInActiveTxn();
+ WT_CURSOR* c = curwrap.get();
+
+ return _insert(c, key, loc, dupsAllowed);
+}
+
+void WiredTigerIndex::unindex(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ invariant(loc.isNormal());
+ dassert(!hasFieldNames(key));
+
+ WiredTigerCursor curwrap(_uri, _instanceId, false, txn);
+ curwrap.assertInActiveTxn();
+ WT_CURSOR* c = curwrap.get();
+ invariant(c);
+
+ _unindex(c, key, loc, dupsAllowed);
+}
+
+void WiredTigerIndex::fullValidate(OperationContext* txn,
+ bool full,
+ long long* numKeysOut,
+ BSONObjBuilder* output) const {
+ {
+ std::vector<std::string> errors;
+ int err = WiredTigerUtil::verifyTable(txn, _uri, output ? &errors : NULL);
+ if (err == EBUSY) {
+ const char* msg = "verify() returned EBUSY. Not treating as invalid.";
+ warning() << msg;
+ if (output) {
+ if (!errors.empty()) {
*output << "errors" << errors;
- *output << "valid" << false;
}
- return;
+ *output << "warning" << msg;
+ }
+ } else if (err) {
+ std::string msg = str::stream() << "verify() returned " << wiredtiger_strerror(err)
+ << ". "
+ << "This indicates structural damage. "
+ << "Not examining individual index entries.";
+ error() << msg;
+ if (output) {
+ errors.push_back(msg);
+ *output << "errors" << errors;
+ *output << "valid" << false;
}
+ return;
}
+ }
- if (output) *output << "valid" << true;
-
- auto cursor = newCursor(txn);
- long long count = 0;
- TRACE_INDEX << " fullValidate";
-
- const auto requestedInfo = TRACING_ENABLED ? Cursor::kKeyAndLoc : Cursor::kJustExistance;
- for (auto kv = cursor->seek(BSONObj(), true, requestedInfo); kv; kv = cursor->next()) {
- TRACE_INDEX << "\t" << kv->key << ' ' << kv->loc;
- count++;
- }
+ if (output)
+ *output << "valid" << true;
- if ( numKeysOut ) {
- *numKeysOut = count;
- }
+ auto cursor = newCursor(txn);
+ long long count = 0;
+ TRACE_INDEX << " fullValidate";
- // Nothing further to do if 'full' validation is not requested.
- if (!full) {
- return;
- }
+ const auto requestedInfo = TRACING_ENABLED ? Cursor::kKeyAndLoc : Cursor::kJustExistance;
+ for (auto kv = cursor->seek(BSONObj(), true, requestedInfo); kv; kv = cursor->next()) {
+ TRACE_INDEX << "\t" << kv->key << ' ' << kv->loc;
+ count++;
+ }
- invariant(output);
+ if (numKeysOut) {
+ *numKeysOut = count;
}
- bool WiredTigerIndex::appendCustomStats(OperationContext* txn,
- BSONObjBuilder* output,
- double scale) const {
+ // Nothing further to do if 'full' validation is not requested.
+ if (!full) {
+ return;
+ }
- {
- BSONObjBuilder metadata(output->subobjStart("metadata"));
- Status status = WiredTigerUtil::getApplicationMetadata(txn, uri(), &metadata);
- if (!status.isOK()) {
- metadata.append("error", "unable to retrieve metadata");
- metadata.append("code", static_cast<int>(status.code()));
- metadata.append("reason", status.reason());
- }
- }
- std::string type, sourceURI;
- WiredTigerUtil::fetchTypeAndSourceURI(txn, _uri, &type, &sourceURI);
- StatusWith<std::string> metadataResult = WiredTigerUtil::getMetadata(txn, sourceURI);
- StringData creationStringName("creationString");
- if (!metadataResult.isOK()) {
- BSONObjBuilder creationString(output->subobjStart(creationStringName));
- creationString.append("error", "unable to retrieve creation config");
- creationString.append("code", static_cast<int>(metadataResult.getStatus().code()));
- creationString.append("reason", metadataResult.getStatus().reason());
- }
- else {
- output->append(creationStringName, metadataResult.getValue());
- // Type can be "lsm" or "file"
- output->append("type", type);
- }
+ invariant(output);
+}
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
- WT_SESSION* s = session->getSession();
- Status status = WiredTigerUtil::exportTableToBSON(s, "statistics:" + uri(),
- "statistics=(fast)", output);
+bool WiredTigerIndex::appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* output,
+ double scale) const {
+ {
+ BSONObjBuilder metadata(output->subobjStart("metadata"));
+ Status status = WiredTigerUtil::getApplicationMetadata(txn, uri(), &metadata);
if (!status.isOK()) {
- output->append("error", "unable to retrieve statistics");
- output->append("code", static_cast<int>(status.code()));
- output->append("reason", status.reason());
+ metadata.append("error", "unable to retrieve metadata");
+ metadata.append("code", static_cast<int>(status.code()));
+ metadata.append("reason", status.reason());
}
- return true;
}
-
- Status WiredTigerIndex::dupKeyCheck( OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc) {
- invariant(!hasFieldNames(key));
- invariant(unique());
-
- WiredTigerCursor curwrap(_uri, _instanceId, false, txn);
- WT_CURSOR *c = curwrap.get();
-
- if ( isDup(c, key, loc) )
- return dupKeyError(key);
- return Status::OK();
+ std::string type, sourceURI;
+ WiredTigerUtil::fetchTypeAndSourceURI(txn, _uri, &type, &sourceURI);
+ StatusWith<std::string> metadataResult = WiredTigerUtil::getMetadata(txn, sourceURI);
+ StringData creationStringName("creationString");
+ if (!metadataResult.isOK()) {
+ BSONObjBuilder creationString(output->subobjStart(creationStringName));
+ creationString.append("error", "unable to retrieve creation config");
+ creationString.append("code", static_cast<int>(metadataResult.getStatus().code()));
+ creationString.append("reason", metadataResult.getStatus().reason());
+ } else {
+ output->append(creationStringName, metadataResult.getValue());
+ // Type can be "lsm" or "file"
+ output->append("type", type);
}
- bool WiredTigerIndex::isEmpty(OperationContext* txn) {
- WiredTigerCursor curwrap(_uri, _instanceId, false, txn);
- WT_CURSOR *c = curwrap.get();
- if (!c)
- return true;
- int ret = WT_OP_CHECK(c->next(c));
- if (ret == WT_NOTFOUND)
- return true;
- invariantWTOK(ret);
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
+ WT_SESSION* s = session->getSession();
+ Status status =
+ WiredTigerUtil::exportTableToBSON(s, "statistics:" + uri(), "statistics=(fast)", output);
+ if (!status.isOK()) {
+ output->append("error", "unable to retrieve statistics");
+ output->append("code", static_cast<int>(status.code()));
+ output->append("reason", status.reason());
+ }
+ return true;
+}
+
+Status WiredTigerIndex::dupKeyCheck(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc) {
+ invariant(!hasFieldNames(key));
+ invariant(unique());
+
+ WiredTigerCursor curwrap(_uri, _instanceId, false, txn);
+ WT_CURSOR* c = curwrap.get();
+
+ if (isDup(c, key, loc))
+ return dupKeyError(key);
+ return Status::OK();
+}
+
+bool WiredTigerIndex::isEmpty(OperationContext* txn) {
+ WiredTigerCursor curwrap(_uri, _instanceId, false, txn);
+ WT_CURSOR* c = curwrap.get();
+ if (!c)
+ return true;
+ int ret = WT_OP_CHECK(c->next(c));
+ if (ret == WT_NOTFOUND)
+ return true;
+ invariantWTOK(ret);
+ return false;
+}
+
+long long WiredTigerIndex::getSpaceUsedBytes(OperationContext* txn) const {
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
+ return static_cast<long long>(WiredTigerUtil::getIdentSize(session->getSession(), _uri));
+}
+
+bool WiredTigerIndex::isDup(WT_CURSOR* c, const BSONObj& key, const RecordId& loc) {
+ invariant(unique());
+ // First check whether the key exists.
+ KeyString data(key, _ordering);
+ WiredTigerItem item(data.getBuffer(), data.getSize());
+ c->set_key(c, item.Get());
+ int ret = WT_OP_CHECK(c->search(c));
+ if (ret == WT_NOTFOUND) {
return false;
}
+ invariantWTOK(ret);
+
+ // If the key exists, check if we already have this loc at this key. If so, we don't
+ // consider that to be a dup.
+ WT_ITEM value;
+ invariantWTOK(c->get_value(c, &value));
+ BufReader br(value.data, value.size);
+ while (br.remaining()) {
+ if (KeyString::decodeRecordId(&br) == loc)
+ return false;
- long long WiredTigerIndex::getSpaceUsedBytes( OperationContext* txn ) const {
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
- return static_cast<long long>( WiredTigerUtil::getIdentSize( session->getSession(),
- _uri ) );
+ KeyString::TypeBits::fromBuffer(&br); // Just calling this to advance reader.
}
+ return true;
+}
- bool WiredTigerIndex::isDup(WT_CURSOR *c, const BSONObj& key, const RecordId& loc ) {
- invariant( unique() );
- // First check whether the key exists.
- KeyString data( key, _ordering );
- WiredTigerItem item( data.getBuffer(), data.getSize() );
- c->set_key( c, item.Get() );
- int ret = WT_OP_CHECK(c->search(c));
- if (ret == WT_NOTFOUND) {
- return false;
- }
- invariantWTOK( ret );
-
- // If the key exists, check if we already have this loc at this key. If so, we don't
- // consider that to be a dup.
- WT_ITEM value;
- invariantWTOK( c->get_value(c,&value) );
- BufReader br(value.data, value.size);
- while (br.remaining()) {
- if (KeyString::decodeRecordId(&br) == loc)
- return false;
-
- KeyString::TypeBits::fromBuffer(&br); // Just calling this to advance reader.
- }
- return true;
- }
+Status WiredTigerIndex::initAsEmpty(OperationContext* txn) {
+ // No-op
+ return Status::OK();
+}
- Status WiredTigerIndex::initAsEmpty(OperationContext* txn) {
- // No-op
- return Status::OK();
+/**
+ * Base class for WiredTigerIndex bulk builders.
+ *
+ * Manages the bulk cursor used by bulk builders.
+ */
+class WiredTigerIndex::BulkBuilder : public SortedDataBuilderInterface {
+public:
+ BulkBuilder(WiredTigerIndex* idx, OperationContext* txn)
+ : _ordering(idx->_ordering),
+ _txn(txn),
+ _session(WiredTigerRecoveryUnit::get(_txn)->getSessionCache()->getSession()),
+ _cursor(openBulkCursor(idx)) {}
+
+ ~BulkBuilder() {
+ _cursor->close(_cursor);
+ WiredTigerRecoveryUnit::get(_txn)->getSessionCache()->releaseSession(_session);
}
- /**
- * Base class for WiredTigerIndex bulk builders.
- *
- * Manages the bulk cursor used by bulk builders.
- */
- class WiredTigerIndex::BulkBuilder : public SortedDataBuilderInterface {
- public:
- BulkBuilder(WiredTigerIndex* idx, OperationContext* txn)
- : _ordering(idx->_ordering)
- , _txn(txn)
- , _session(WiredTigerRecoveryUnit::get(_txn)->getSessionCache()->getSession())
- , _cursor(openBulkCursor(idx))
- {}
-
- ~BulkBuilder() {
- _cursor->close(_cursor);
- WiredTigerRecoveryUnit::get(_txn)->getSessionCache()->releaseSession(_session);
- }
-
- protected:
- WT_CURSOR* openBulkCursor(WiredTigerIndex* idx) {
- // Open cursors can cause bulk open_cursor to fail with EBUSY.
- // TODO any other cases that could cause EBUSY?
- WiredTigerSession* outerSession = WiredTigerRecoveryUnit::get(_txn)->getSession(_txn);
- outerSession->closeAllCursors();
-
- // Not using cursor cache since we need to set "bulk".
- WT_CURSOR* cursor;
- // We use our own session to ensure we aren't in a transaction.
- WT_SESSION* session = _session->getSession();
- int err = session->open_cursor(session, idx->uri().c_str(), NULL, "bulk", &cursor);
- if (!err)
- return cursor;
-
- warning() << "failed to create WiredTiger bulk cursor: " << wiredtiger_strerror(err);
- warning() << "falling back to non-bulk cursor for index " << idx->uri();
-
- invariantWTOK(session->open_cursor(session, idx->uri().c_str(), NULL, NULL, &cursor));
+protected:
+ WT_CURSOR* openBulkCursor(WiredTigerIndex* idx) {
+ // Open cursors can cause bulk open_cursor to fail with EBUSY.
+ // TODO any other cases that could cause EBUSY?
+ WiredTigerSession* outerSession = WiredTigerRecoveryUnit::get(_txn)->getSession(_txn);
+ outerSession->closeAllCursors();
+
+ // Not using cursor cache since we need to set "bulk".
+ WT_CURSOR* cursor;
+ // We use our own session to ensure we aren't in a transaction.
+ WT_SESSION* session = _session->getSession();
+ int err = session->open_cursor(session, idx->uri().c_str(), NULL, "bulk", &cursor);
+ if (!err)
return cursor;
- }
- const Ordering _ordering;
- OperationContext* const _txn;
- WiredTigerSession* const _session;
- WT_CURSOR* const _cursor;
- };
+ warning() << "failed to create WiredTiger bulk cursor: " << wiredtiger_strerror(err);
+ warning() << "falling back to non-bulk cursor for index " << idx->uri();
- /**
- * Bulk builds a non-unique index.
- */
- class WiredTigerIndex::StandardBulkBuilder : public BulkBuilder {
- public:
- StandardBulkBuilder(WiredTigerIndex* idx, OperationContext* txn)
- : BulkBuilder(idx, txn), _idx(idx) {
- }
-
- Status addKey(const BSONObj& key, const RecordId& loc) {
- {
- const Status s = checkKeySize(key);
- if (!s.isOK())
- return s;
- }
-
- KeyString data( key, _idx->_ordering, loc );
+ invariantWTOK(session->open_cursor(session, idx->uri().c_str(), NULL, NULL, &cursor));
+ return cursor;
+ }
- // Can't use WiredTigerCursor since we aren't using the cache.
- WiredTigerItem item(data.getBuffer(), data.getSize());
- _cursor->set_key(_cursor, item.Get() );
+ const Ordering _ordering;
+ OperationContext* const _txn;
+ WiredTigerSession* const _session;
+ WT_CURSOR* const _cursor;
+};
- WiredTigerItem valueItem =
- data.getTypeBits().isAllZeros() ? emptyItem
- : WiredTigerItem(data.getTypeBits().getBuffer(),
- data.getTypeBits().getSize());
+/**
+ * Bulk builds a non-unique index.
+ */
+class WiredTigerIndex::StandardBulkBuilder : public BulkBuilder {
+public:
+ StandardBulkBuilder(WiredTigerIndex* idx, OperationContext* txn)
+ : BulkBuilder(idx, txn), _idx(idx) {}
- _cursor->set_value(_cursor, valueItem.Get());
+ Status addKey(const BSONObj& key, const RecordId& loc) {
+ {
+ const Status s = checkKeySize(key);
+ if (!s.isOK())
+ return s;
+ }
- invariantWTOK(_cursor->insert(_cursor));
+ KeyString data(key, _idx->_ordering, loc);
- return Status::OK();
- }
+ // Can't use WiredTigerCursor since we aren't using the cache.
+ WiredTigerItem item(data.getBuffer(), data.getSize());
+ _cursor->set_key(_cursor, item.Get());
- void commit(bool mayInterrupt) {
- // TODO do we still need this?
- // this is bizarre, but required as part of the contract
- WriteUnitOfWork uow( _txn );
- uow.commit();
- }
+ WiredTigerItem valueItem = data.getTypeBits().isAllZeros()
+ ? emptyItem
+ : WiredTigerItem(data.getTypeBits().getBuffer(), data.getTypeBits().getSize());
- private:
- WiredTigerIndex* _idx;
- };
+ _cursor->set_value(_cursor, valueItem.Get());
- /**
- * Bulk builds a unique index.
- *
- * In order to support unique indexes in dupsAllowed mode this class only does an actual insert
- * after it sees a key after the one we are trying to insert. This allows us to gather up all
- * duplicate locs and insert them all together. This is necessary since bulk cursors can only
- * append data.
- */
- class WiredTigerIndex::UniqueBulkBuilder : public BulkBuilder {
- public:
- UniqueBulkBuilder(WiredTigerIndex* idx, OperationContext* txn, bool dupsAllowed)
- : BulkBuilder(idx, txn), _idx(idx), _dupsAllowed(dupsAllowed) {
- }
+ invariantWTOK(_cursor->insert(_cursor));
- Status addKey(const BSONObj& newKey, const RecordId& loc) {
- {
- const Status s = checkKeySize(newKey);
- if (!s.isOK())
- return s;
- }
+ return Status::OK();
+ }
- const int cmp = newKey.woCompare(_key, _ordering);
- if (cmp != 0) {
- if (!_key.isEmpty()) { // _key.isEmpty() is only true on the first call to addKey().
- invariant(cmp > 0); // newKey must be > the last key
- // We are done with dups of the last key so we can insert it now.
- doInsert();
- }
- invariant(_records.empty());
- }
- else {
- // Dup found!
- if (!_dupsAllowed) {
- return _idx->dupKeyError(newKey);
- }
+ void commit(bool mayInterrupt) {
+ // TODO do we still need this?
+ // this is bizarre, but required as part of the contract
+ WriteUnitOfWork uow(_txn);
+ uow.commit();
+ }
- // If we get here, we are in the weird mode where dups are allowed on a unique
- // index, so add ourselves to the list of duplicate locs. This also replaces the
- // _key which is correct since any dups seen later are likely to be newer.
- }
+private:
+ WiredTigerIndex* _idx;
+};
- _key = newKey.getOwned();
- _keyString.resetToKey(_key, _idx->ordering());
- _records.push_back(std::make_pair(loc, _keyString.getTypeBits()));
+/**
+ * Bulk builds a unique index.
+ *
+ * In order to support unique indexes in dupsAllowed mode this class only does an actual insert
+ * after it sees a key after the one we are trying to insert. This allows us to gather up all
+ * duplicate locs and insert them all together. This is necessary since bulk cursors can only
+ * append data.
+ */
+class WiredTigerIndex::UniqueBulkBuilder : public BulkBuilder {
+public:
+ UniqueBulkBuilder(WiredTigerIndex* idx, OperationContext* txn, bool dupsAllowed)
+ : BulkBuilder(idx, txn), _idx(idx), _dupsAllowed(dupsAllowed) {}
- return Status::OK();
+ Status addKey(const BSONObj& newKey, const RecordId& loc) {
+ {
+ const Status s = checkKeySize(newKey);
+ if (!s.isOK())
+ return s;
}
- void commit(bool mayInterrupt) {
- WriteUnitOfWork uow( _txn );
- if (!_records.empty()) {
- // This handles inserting the last unique key.
+ const int cmp = newKey.woCompare(_key, _ordering);
+ if (cmp != 0) {
+ if (!_key.isEmpty()) { // _key.isEmpty() is only true on the first call to addKey().
+ invariant(cmp > 0); // newKey must be > the last key
+ // We are done with dups of the last key so we can insert it now.
doInsert();
}
- uow.commit();
- }
-
- private:
- void doInsert() {
- invariant(!_records.empty());
-
- KeyString value;
- for (size_t i = 0; i < _records.size(); i++) {
- value.appendRecordId(_records[i].first);
- // When there is only one record, we can omit AllZeros TypeBits. Otherwise they need
- // to be included.
- if (!(_records[i].second.isAllZeros() && _records.size() == 1)) {
- value.appendTypeBits(_records[i].second);
- }
+ invariant(_records.empty());
+ } else {
+ // Dup found!
+ if (!_dupsAllowed) {
+ return _idx->dupKeyError(newKey);
}
-
- WiredTigerItem keyItem( _keyString.getBuffer(), _keyString.getSize() );
- WiredTigerItem valueItem(value.getBuffer(), value.getSize());
-
- _cursor->set_key(_cursor, keyItem.Get());
- _cursor->set_value(_cursor, valueItem.Get());
- invariantWTOK(_cursor->insert(_cursor));
-
- _records.clear();
+ // If we get here, we are in the weird mode where dups are allowed on a unique
+ // index, so add ourselves to the list of duplicate locs. This also replaces the
+ // _key which is correct since any dups seen later are likely to be newer.
}
- WiredTigerIndex* _idx;
- const bool _dupsAllowed;
- BSONObj _key;
- KeyString _keyString;
- std::vector<std::pair<RecordId, KeyString::TypeBits> > _records;
- };
+ _key = newKey.getOwned();
+ _keyString.resetToKey(_key, _idx->ordering());
+ _records.push_back(std::make_pair(loc, _keyString.getTypeBits()));
-namespace {
+ return Status::OK();
+ }
- /**
- * Implements the basic WT_CURSOR functionality used by both unique and standard indexes.
- */
- class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor {
- public:
- WiredTigerIndexCursorBase(const WiredTigerIndex& idx, OperationContext *txn, bool forward)
- : _txn(txn),
- _cursor(idx.uri(), idx.instanceId(), false, txn),
- _idx(idx),
- _forward(forward) {
+ void commit(bool mayInterrupt) {
+ WriteUnitOfWork uow(_txn);
+ if (!_records.empty()) {
+ // This handles inserting the last unique key.
+ doInsert();
}
+ uow.commit();
+ }
- boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
- // Advance on a cursor at the end is a no-op
- if (_eof) return {};
-
- if (!_lastMoveWasRestore) advanceWTCursor();
- updatePosition();
- return curr(parts);
- }
+private:
+ void doInsert() {
+ invariant(!_records.empty());
- void setEndPosition(const BSONObj& key, bool inclusive) override {
- TRACE_CURSOR << "setEndPosition inclusive: " << inclusive << ' ' << key;
- if (key.isEmpty()) {
- // This means scan to end of index.
- _endPosition.reset();
- return;
+ KeyString value;
+ for (size_t i = 0; i < _records.size(); i++) {
+ value.appendRecordId(_records[i].first);
+ // When there is only one record, we can omit AllZeros TypeBits. Otherwise they need
+ // to be included.
+ if (!(_records[i].second.isAllZeros() && _records.size() == 1)) {
+ value.appendTypeBits(_records[i].second);
}
-
- // NOTE: this uses the opposite rules as a normal seek because a forward scan should
- // end after the key if inclusive and before if exclusive.
- const auto discriminator = _forward == inclusive ? KeyString::kExclusiveAfter
- : KeyString::kExclusiveBefore;
- _endPosition = stdx::make_unique<KeyString>();
- _endPosition->resetToKey(stripFieldNames(key), _idx.ordering(), discriminator);
}
- boost::optional<IndexKeyEntry> seek(const BSONObj& key, bool inclusive,
- RequestedInfo parts) override {
- const BSONObj finalKey = stripFieldNames(key);
- const auto discriminator = _forward == inclusive ? KeyString::kExclusiveBefore
- : KeyString::kExclusiveAfter;
-
- // By using a discriminator other than kInclusive, there is no need to distinguish
- // unique vs non-unique key formats since both start with the key.
- _query.resetToKey(finalKey, _idx.ordering(), discriminator);
- seekWTCursor(_query);
- updatePosition();
- return curr(parts);
- }
+ WiredTigerItem keyItem(_keyString.getBuffer(), _keyString.getSize());
+ WiredTigerItem valueItem(value.getBuffer(), value.getSize());
- boost::optional<IndexKeyEntry> seek(const IndexSeekPoint& seekPoint,
- RequestedInfo parts) override {
- // TODO: don't go to a bson obj then to a KeyString, go straight
- BSONObj key = IndexEntryComparison::makeQueryObject(seekPoint, _forward);
-
- // makeQueryObject handles the discriminator in the real exclusive cases.
- const auto discriminator = _forward ? KeyString::kExclusiveBefore
- : KeyString::kExclusiveAfter;
- _query.resetToKey(key, _idx.ordering(), discriminator);
- seekWTCursor(_query);
- updatePosition();
- return curr(parts);
- }
+ _cursor->set_key(_cursor, keyItem.Get());
+ _cursor->set_value(_cursor, valueItem.Get());
- void savePositioned() override {
- if (!_txn) return; // still saved
+ invariantWTOK(_cursor->insert(_cursor));
- _savedForCheck = _txn->recoveryUnit();
+ _records.clear();
+ }
- if (!wt_keeptxnopen()) {
- try {
- _cursor.reset();
- }
- catch (const WriteConflictException& wce) {
- // Ignore since this is only called when we are about to kill our transaction
- // anyway.
- }
+ WiredTigerIndex* _idx;
+ const bool _dupsAllowed;
+ BSONObj _key;
+ KeyString _keyString;
+ std::vector<std::pair<RecordId, KeyString::TypeBits>> _records;
+};
- // Our saved position is wherever we were when we last called updatePosition().
- // Any partially completed repositions should not effect our saved position.
- }
+namespace {
- _txn = NULL;
- }
+/**
+ * Implements the basic WT_CURSOR functionality used by both unique and standard indexes.
+ */
+class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor {
+public:
+ WiredTigerIndexCursorBase(const WiredTigerIndex& idx, OperationContext* txn, bool forward)
+ : _txn(txn),
+ _cursor(idx.uri(), idx.instanceId(), false, txn),
+ _idx(idx),
+ _forward(forward) {}
+
+ boost::optional<IndexKeyEntry> next(RequestedInfo parts) override {
+ // Advance on a cursor at the end is a no-op
+ if (_eof)
+ return {};
+
+ if (!_lastMoveWasRestore)
+ advanceWTCursor();
+ updatePosition();
+ return curr(parts);
+ }
- void saveUnpositioned() override {
- savePositioned();
- _eof = true;
+ void setEndPosition(const BSONObj& key, bool inclusive) override {
+ TRACE_CURSOR << "setEndPosition inclusive: " << inclusive << ' ' << key;
+ if (key.isEmpty()) {
+ // This means scan to end of index.
+ _endPosition.reset();
+ return;
}
- void restore(OperationContext *txn) override {
- // Update the session handle with our new operation context.
- invariant( _savedForCheck == txn->recoveryUnit() );
- _txn = txn;
-
- if (!wt_keeptxnopen()) {
- if (!_eof) {
- // Ensure an active session exists, so any restored cursors will bind to it
- WiredTigerRecoveryUnit::get(txn)->getSession(txn);
- _lastMoveWasRestore = !seekWTCursor(_key);
- TRACE_CURSOR << "restore _lastMoveWasRestore:" << _lastMoveWasRestore;
- }
- }
- }
+ // NOTE: this uses the opposite rules as a normal seek because a forward scan should
+ // end after the key if inclusive and before if exclusive.
+ const auto discriminator =
+ _forward == inclusive ? KeyString::kExclusiveAfter : KeyString::kExclusiveBefore;
+ _endPosition = stdx::make_unique<KeyString>();
+ _endPosition->resetToKey(stripFieldNames(key), _idx.ordering(), discriminator);
+ }
- protected:
- // Called after _key has been filled in. Must not throw WriteConflictException.
- virtual void updateLocAndTypeBits() = 0;
+ boost::optional<IndexKeyEntry> seek(const BSONObj& key,
+ bool inclusive,
+ RequestedInfo parts) override {
+ const BSONObj finalKey = stripFieldNames(key);
+ const auto discriminator =
+ _forward == inclusive ? KeyString::kExclusiveBefore : KeyString::kExclusiveAfter;
+
+ // By using a discriminator other than kInclusive, there is no need to distinguish
+ // unique vs non-unique key formats since both start with the key.
+ _query.resetToKey(finalKey, _idx.ordering(), discriminator);
+ seekWTCursor(_query);
+ updatePosition();
+ return curr(parts);
+ }
- boost::optional<IndexKeyEntry> curr(RequestedInfo parts) const {
- if (_eof) return {};
+ boost::optional<IndexKeyEntry> seek(const IndexSeekPoint& seekPoint,
+ RequestedInfo parts) override {
+ // TODO: don't go to a bson obj then to a KeyString, go straight
+ BSONObj key = IndexEntryComparison::makeQueryObject(seekPoint, _forward);
+
+ // makeQueryObject handles the discriminator in the real exclusive cases.
+ const auto discriminator =
+ _forward ? KeyString::kExclusiveBefore : KeyString::kExclusiveAfter;
+ _query.resetToKey(key, _idx.ordering(), discriminator);
+ seekWTCursor(_query);
+ updatePosition();
+ return curr(parts);
+ }
- dassert(!atOrPastEndPointAfterSeeking());
- dassert(!_loc.isNull());
+ void savePositioned() override {
+ if (!_txn)
+ return; // still saved
- BSONObj bson;
- if (TRACING_ENABLED || (parts & kWantKey)) {
- bson = KeyString::toBson(_key.getBuffer(), _key.getSize(), _idx.ordering(),
- _typeBits);
+ _savedForCheck = _txn->recoveryUnit();
- TRACE_CURSOR << " returning " << bson << ' ' << _loc;
+ if (!wt_keeptxnopen()) {
+ try {
+ _cursor.reset();
+ } catch (const WriteConflictException& wce) {
+ // Ignore since this is only called when we are about to kill our transaction
+ // anyway.
}
- return {{std::move(bson), _loc}};
+ // Our saved position is wherever we were when we last called updatePosition().
+ // Any partially completed repositions should not effect our saved position.
}
- bool atOrPastEndPointAfterSeeking() const {
- if (_eof) return true;
- if (!_endPosition) return false;
-
- const int cmp = _key.compare(*_endPosition);
-
- // We set up _endPosition to be in between the last in-range value and the first
- // out-of-range value. In particular, it is constructed to never equal any legal index
- // key.
- dassert(cmp != 0);
+ _txn = NULL;
+ }
- if (_forward) {
- // We may have landed after the end point.
- return cmp > 0;
- }
- else {
- // We may have landed before the end point.
- return cmp < 0;
- }
- }
+ void saveUnpositioned() override {
+ savePositioned();
+ _eof = true;
+ }
- void advanceWTCursor() {
- WT_CURSOR *c = _cursor.get();
- int ret = WT_OP_CHECK(_forward ? c->next(c) : c->prev(c));
- if ( ret == WT_NOTFOUND ) {
- _cursorAtEof = true;
- return;
+ void restore(OperationContext* txn) override {
+ // Update the session handle with our new operation context.
+ invariant(_savedForCheck == txn->recoveryUnit());
+ _txn = txn;
+
+ if (!wt_keeptxnopen()) {
+ if (!_eof) {
+ // Ensure an active session exists, so any restored cursors will bind to it
+ WiredTigerRecoveryUnit::get(txn)->getSession(txn);
+ _lastMoveWasRestore = !seekWTCursor(_key);
+ TRACE_CURSOR << "restore _lastMoveWasRestore:" << _lastMoveWasRestore;
}
- invariantWTOK(ret);
- _cursorAtEof = false;
}
+ }
- // Seeks to query. Returns true on exact match.
- bool seekWTCursor(const KeyString& query) {
- WT_CURSOR *c = _cursor.get();
-
- int cmp = -1;
- const WiredTigerItem keyItem(query.getBuffer(), query.getSize());
- c->set_key(c, keyItem.Get());
-
- int ret = WT_OP_CHECK(c->search_near(c, &cmp));
- if ( ret == WT_NOTFOUND ) {
- _cursorAtEof = true;
- TRACE_CURSOR << "\t not found";
- return false;
- }
- invariantWTOK( ret );
- _cursorAtEof = false;
+protected:
+ // Called after _key has been filled in. Must not throw WriteConflictException.
+ virtual void updateLocAndTypeBits() = 0;
- TRACE_CURSOR << "\t cmp: " << cmp;
+ boost::optional<IndexKeyEntry> curr(RequestedInfo parts) const {
+ if (_eof)
+ return {};
- if (cmp == 0) {
- // Found it!
- return true;
- }
+ dassert(!atOrPastEndPointAfterSeeking());
+ dassert(!_loc.isNull());
- // Make sure we land on a matching key (after/before for forward/reverse).
- if (_forward ? cmp < 0 : cmp > 0) {
- advanceWTCursor();
- }
+ BSONObj bson;
+ if (TRACING_ENABLED || (parts & kWantKey)) {
+ bson = KeyString::toBson(_key.getBuffer(), _key.getSize(), _idx.ordering(), _typeBits);
- return false;
+ TRACE_CURSOR << " returning " << bson << ' ' << _loc;
}
- /**
- * This must be called after moving the cursor to update our cached position. It should not
- * be called after a restore that did not restore to original state since that does not
- * logically move the cursor until the following call to next().
- */
- void updatePosition() {
- _lastMoveWasRestore = false;
- if (_cursorAtEof) {
- _eof = true;
- _loc = RecordId();
- return;
- }
+ return {{std::move(bson), _loc}};
+ }
- _eof = false;
+ bool atOrPastEndPointAfterSeeking() const {
+ if (_eof)
+ return true;
+ if (!_endPosition)
+ return false;
- WT_CURSOR *c = _cursor.get();
- WT_ITEM item;
- invariantWTOK(c->get_key(c, &item));
- _key.resetFromBuffer(item.data, item.size);
+ const int cmp = _key.compare(*_endPosition);
- if (atOrPastEndPointAfterSeeking()) {
- _eof = true;
- return;
- }
+ // We set up _endPosition to be in between the last in-range value and the first
+ // out-of-range value. In particular, it is constructed to never equal any legal index
+ // key.
+ dassert(cmp != 0);
- updateLocAndTypeBits();
+ if (_forward) {
+ // We may have landed after the end point.
+ return cmp > 0;
+ } else {
+ // We may have landed before the end point.
+ return cmp < 0;
}
+ }
- OperationContext *_txn;
- WiredTigerCursor _cursor;
- const WiredTigerIndex& _idx; // not owned
- const bool _forward;
-
- // Ensures we have the same RU at restore time.
- RecoveryUnit* _savedForCheck;
-
- // These are where this cursor instance is. They are not changed in the face of a failing
- // next().
- KeyString _key;
- KeyString::TypeBits _typeBits;
- RecordId _loc;
- bool _eof = false;
-
- // This differs from _eof in that it always reflects the result of the most recent call to
- // reposition _cursor.
- bool _cursorAtEof = false;
-
- // Used by next to decide to return current position rather than moving. Should be reset to
- // false by any operation that moves the cursor, other than subsequent save/restore pairs.
- bool _lastMoveWasRestore = false;
+ void advanceWTCursor() {
+ WT_CURSOR* c = _cursor.get();
+ int ret = WT_OP_CHECK(_forward ? c->next(c) : c->prev(c));
+ if (ret == WT_NOTFOUND) {
+ _cursorAtEof = true;
+ return;
+ }
+ invariantWTOK(ret);
+ _cursorAtEof = false;
+ }
- KeyString _query;
+ // Seeks to query. Returns true on exact match.
+ bool seekWTCursor(const KeyString& query) {
+ WT_CURSOR* c = _cursor.get();
- std::unique_ptr<KeyString> _endPosition;
- };
+ int cmp = -1;
+ const WiredTigerItem keyItem(query.getBuffer(), query.getSize());
+ c->set_key(c, keyItem.Get());
- class WiredTigerIndexStandardCursor final : public WiredTigerIndexCursorBase {
- public:
- WiredTigerIndexStandardCursor(const WiredTigerIndex& idx, OperationContext *txn,
- bool forward)
- : WiredTigerIndexCursorBase(idx, txn, forward) {
+ int ret = WT_OP_CHECK(c->search_near(c, &cmp));
+ if (ret == WT_NOTFOUND) {
+ _cursorAtEof = true;
+ TRACE_CURSOR << "\t not found";
+ return false;
}
+ invariantWTOK(ret);
+ _cursorAtEof = false;
- void updateLocAndTypeBits() override {
- _loc = KeyString::decodeRecordIdAtEnd(_key.getBuffer(), _key.getSize());
+ TRACE_CURSOR << "\t cmp: " << cmp;
- WT_CURSOR *c = _cursor.get();
- WT_ITEM item;
- invariantWTOK( c->get_value(c, &item ) );
- BufReader br(item.data, item.size);
- _typeBits.resetFromBuffer(&br);
+ if (cmp == 0) {
+ // Found it!
+ return true;
}
- };
- class WiredTigerIndexUniqueCursor final : public WiredTigerIndexCursorBase {
- public:
- WiredTigerIndexUniqueCursor(const WiredTigerIndex& idx, OperationContext *txn, bool forward)
- : WiredTigerIndexCursorBase(idx, txn, forward) {
+ // Make sure we land on a matching key (after/before for forward/reverse).
+ if (_forward ? cmp < 0 : cmp > 0) {
+ advanceWTCursor();
}
- void restore(OperationContext *txn) override {
- WiredTigerIndexCursorBase::restore(txn);
+ return false;
+ }
- // In addition to seeking to the correct key, we also need to make sure that the loc is
- // on the correct side of _loc.
- if (_lastMoveWasRestore) return; // We are on a different key so no need to check loc.
- if (_eof) return;
+ /**
+ * This must be called after moving the cursor to update our cached position. It should not
+ * be called after a restore that did not restore to original state since that does not
+ * logically move the cursor until the following call to next().
+ */
+ void updatePosition() {
+ _lastMoveWasRestore = false;
+ if (_cursorAtEof) {
+ _eof = true;
+ _loc = RecordId();
+ return;
+ }
- // If we get here we need to look at the actual RecordId for this key and make sure we
- // are supposed to see it.
- WT_CURSOR *c = _cursor.get();
- WT_ITEM item;
- invariantWTOK( c->get_value(c, &item ) );
+ _eof = false;
- BufReader br(item.data, item.size);
- RecordId locInIndex = KeyString::decodeRecordId(&br);
+ WT_CURSOR* c = _cursor.get();
+ WT_ITEM item;
+ invariantWTOK(c->get_key(c, &item));
+ _key.resetFromBuffer(item.data, item.size);
- TRACE_CURSOR << "restore"
- << " _loc:" << _loc
- << " locInIndex:" << locInIndex;
+ if (atOrPastEndPointAfterSeeking()) {
+ _eof = true;
+ return;
+ }
- if (locInIndex == _loc) return;
+ updateLocAndTypeBits();
+ }
- _lastMoveWasRestore = true;
- if ( _forward && (locInIndex < _loc)) advanceWTCursor();
- if (!_forward && (locInIndex > _loc)) advanceWTCursor();
- }
+ OperationContext* _txn;
+ WiredTigerCursor _cursor;
+ const WiredTigerIndex& _idx; // not owned
+ const bool _forward;
- void updateLocAndTypeBits() override {
- // We assume that cursors can only ever see unique indexes in their "pristine" state,
- // where no duplicates are possible. The cases where dups are allowed should hold
- // sufficient locks to ensure that no cursor ever sees them.
- WT_CURSOR *c = _cursor.get();
- WT_ITEM item;
- invariantWTOK( c->get_value(c, &item ) );
-
- BufReader br(item.data, item.size);
- _loc = KeyString::decodeRecordId(&br);
- _typeBits.resetFromBuffer(&br);
-
- if (!br.atEof()) {
- severe() << "Unique index cursor seeing multiple records for key "
- << curr(kWantKey)->key;
- fassertFailed(28608);
- }
- }
+ // Ensures we have the same RU at restore time.
+ RecoveryUnit* _savedForCheck;
- boost::optional<IndexKeyEntry> seekExact(const BSONObj& key, RequestedInfo parts) override {
- _query.resetToKey(stripFieldNames(key), _idx.ordering());
- const WiredTigerItem keyItem(_query.getBuffer(), _query.getSize());
+ // These are where this cursor instance is. They are not changed in the face of a failing
+ // next().
+ KeyString _key;
+ KeyString::TypeBits _typeBits;
+ RecordId _loc;
+ bool _eof = false;
- WT_CURSOR* c = _cursor.get();
- c->set_key(c, keyItem.Get());
+ // This differs from _eof in that it always reflects the result of the most recent call to
+ // reposition _cursor.
+ bool _cursorAtEof = false;
- // Using search rather than search_near.
- int ret = WT_OP_CHECK(c->search(c));
- if (ret != WT_NOTFOUND) invariantWTOK(ret);
- _cursorAtEof = ret == WT_NOTFOUND;
- updatePosition();
- dassert(_eof || _key.compare(_query) == 0);
- return curr(parts);
- }
- };
+ // Used by next to decide to return current position rather than moving. Should be reset to
+ // false by any operation that moves the cursor, other than subsequent save/restore pairs.
+ bool _lastMoveWasRestore = false;
-} // namespace
+ KeyString _query;
- WiredTigerIndexUnique::WiredTigerIndexUnique( OperationContext* ctx,
- const std::string& uri,
- const IndexDescriptor* desc )
- : WiredTigerIndex( ctx, uri, desc ) {
- }
+ std::unique_ptr<KeyString> _endPosition;
+};
- std::unique_ptr<SortedDataInterface::Cursor> WiredTigerIndexUnique::newCursor(
- OperationContext* txn,
- bool forward) const {
- return stdx::make_unique<WiredTigerIndexUniqueCursor>(*this, txn, forward);
- }
+class WiredTigerIndexStandardCursor final : public WiredTigerIndexCursorBase {
+public:
+ WiredTigerIndexStandardCursor(const WiredTigerIndex& idx, OperationContext* txn, bool forward)
+ : WiredTigerIndexCursorBase(idx, txn, forward) {}
+
+ void updateLocAndTypeBits() override {
+ _loc = KeyString::decodeRecordIdAtEnd(_key.getBuffer(), _key.getSize());
- SortedDataBuilderInterface* WiredTigerIndexUnique::getBulkBuilder(OperationContext* txn,
- bool dupsAllowed) {
- return new UniqueBulkBuilder(this, txn, dupsAllowed);
+ WT_CURSOR* c = _cursor.get();
+ WT_ITEM item;
+ invariantWTOK(c->get_value(c, &item));
+ BufReader br(item.data, item.size);
+ _typeBits.resetFromBuffer(&br);
}
+};
- Status WiredTigerIndexUnique::_insert( WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed ) {
+class WiredTigerIndexUniqueCursor final : public WiredTigerIndexCursorBase {
+public:
+ WiredTigerIndexUniqueCursor(const WiredTigerIndex& idx, OperationContext* txn, bool forward)
+ : WiredTigerIndexCursorBase(idx, txn, forward) {}
- const KeyString data( key, _ordering );
- WiredTigerItem keyItem( data.getBuffer(), data.getSize() );
+ void restore(OperationContext* txn) override {
+ WiredTigerIndexCursorBase::restore(txn);
- KeyString value(loc);
- if (!data.getTypeBits().isAllZeros())
- value.appendTypeBits(data.getTypeBits());
+ // In addition to seeking to the correct key, we also need to make sure that the loc is
+ // on the correct side of _loc.
+ if (_lastMoveWasRestore)
+ return; // We are on a different key so no need to check loc.
+ if (_eof)
+ return;
- WiredTigerItem valueItem(value.getBuffer(), value.getSize());
- c->set_key( c, keyItem.Get() );
- c->set_value( c, valueItem.Get() );
- int ret = WT_OP_CHECK(c->insert(c));
+ // If we get here we need to look at the actual RecordId for this key and make sure we
+ // are supposed to see it.
+ WT_CURSOR* c = _cursor.get();
+ WT_ITEM item;
+ invariantWTOK(c->get_value(c, &item));
- if ( ret != WT_DUPLICATE_KEY ) {
- return wtRCToStatus( ret );
- }
+ BufReader br(item.data, item.size);
+ RecordId locInIndex = KeyString::decodeRecordId(&br);
- // we might be in weird mode where there might be multiple values
- // we put them all in the "list"
- // Note that we can't omit AllZeros when there are multiple locs for a value. When we remove
- // down to a single value, it will be cleaned up.
- ret = WT_OP_CHECK(c->search(c));
- invariantWTOK( ret );
-
- WT_ITEM old;
- invariantWTOK( c->get_value(c, &old ) );
-
- bool insertedLoc = false;
-
- value.resetToEmpty();
- BufReader br(old.data, old.size);
- while (br.remaining()) {
- RecordId locInIndex = KeyString::decodeRecordId(&br);
- if (loc == locInIndex)
- return Status::OK(); // already in index
-
- if (!insertedLoc && loc < locInIndex) {
- value.appendRecordId(loc);
- value.appendTypeBits(data.getTypeBits());
- insertedLoc = true;
- }
+ TRACE_CURSOR << "restore"
+ << " _loc:" << _loc << " locInIndex:" << locInIndex;
- // Copy from old to new value
- value.appendRecordId(locInIndex);
- value.appendTypeBits(KeyString::TypeBits::fromBuffer(&br));
- }
+ if (locInIndex == _loc)
+ return;
- if (!dupsAllowed)
- return dupKeyError(key);
+ _lastMoveWasRestore = true;
+ if (_forward && (locInIndex < _loc))
+ advanceWTCursor();
+ if (!_forward && (locInIndex > _loc))
+ advanceWTCursor();
+ }
- if (!insertedLoc) {
- // This loc is higher than all currently in the index for this key
- value.appendRecordId(loc);
- value.appendTypeBits(data.getTypeBits());
+ void updateLocAndTypeBits() override {
+ // We assume that cursors can only ever see unique indexes in their "pristine" state,
+ // where no duplicates are possible. The cases where dups are allowed should hold
+ // sufficient locks to ensure that no cursor ever sees them.
+ WT_CURSOR* c = _cursor.get();
+ WT_ITEM item;
+ invariantWTOK(c->get_value(c, &item));
+
+ BufReader br(item.data, item.size);
+ _loc = KeyString::decodeRecordId(&br);
+ _typeBits.resetFromBuffer(&br);
+
+ if (!br.atEof()) {
+ severe() << "Unique index cursor seeing multiple records for key "
+ << curr(kWantKey)->key;
+ fassertFailed(28608);
}
-
- valueItem = WiredTigerItem(value.getBuffer(), value.getSize());
- c->set_value( c, valueItem.Get() );
- return wtRCToStatus( c->update( c ) );
}
- void WiredTigerIndexUnique::_unindex( WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed ) {
- KeyString data( key, _ordering );
- WiredTigerItem keyItem( data.getBuffer(), data.getSize() );
- c->set_key( c, keyItem.Get() );
-
- if ( !dupsAllowed ) {
- // nice and clear
- int ret = WT_OP_CHECK(c->remove(c));
- if (ret == WT_NOTFOUND) {
- return;
- }
- invariantWTOK(ret);
- return;
- }
+ boost::optional<IndexKeyEntry> seekExact(const BSONObj& key, RequestedInfo parts) override {
+ _query.resetToKey(stripFieldNames(key), _idx.ordering());
+ const WiredTigerItem keyItem(_query.getBuffer(), _query.getSize());
- // dups are allowed, so we have to deal with a vector of RecordIds.
+ WT_CURSOR* c = _cursor.get();
+ c->set_key(c, keyItem.Get());
+ // Using search rather than search_near.
int ret = WT_OP_CHECK(c->search(c));
- if ( ret == WT_NOTFOUND )
- return;
- invariantWTOK( ret );
-
- WT_ITEM old;
- invariantWTOK( c->get_value(c, &old ) );
-
- bool foundLoc = false;
- std::vector<std::pair<RecordId, KeyString::TypeBits> > records;
-
- BufReader br(old.data, old.size);
- while (br.remaining()) {
- RecordId locInIndex = KeyString::decodeRecordId(&br);
- KeyString::TypeBits typeBits = KeyString::TypeBits::fromBuffer(&br);
+ if (ret != WT_NOTFOUND)
+ invariantWTOK(ret);
+ _cursorAtEof = ret == WT_NOTFOUND;
+ updatePosition();
+ dassert(_eof || _key.compare(_query) == 0);
+ return curr(parts);
+ }
+};
+
+} // namespace
+
+WiredTigerIndexUnique::WiredTigerIndexUnique(OperationContext* ctx,
+ const std::string& uri,
+ const IndexDescriptor* desc)
+ : WiredTigerIndex(ctx, uri, desc) {}
+
+std::unique_ptr<SortedDataInterface::Cursor> WiredTigerIndexUnique::newCursor(OperationContext* txn,
+ bool forward) const {
+ return stdx::make_unique<WiredTigerIndexUniqueCursor>(*this, txn, forward);
+}
+
+SortedDataBuilderInterface* WiredTigerIndexUnique::getBulkBuilder(OperationContext* txn,
+ bool dupsAllowed) {
+ return new UniqueBulkBuilder(this, txn, dupsAllowed);
+}
+
+Status WiredTigerIndexUnique::_insert(WT_CURSOR* c,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ const KeyString data(key, _ordering);
+ WiredTigerItem keyItem(data.getBuffer(), data.getSize());
+
+ KeyString value(loc);
+ if (!data.getTypeBits().isAllZeros())
+ value.appendTypeBits(data.getTypeBits());
+
+ WiredTigerItem valueItem(value.getBuffer(), value.getSize());
+ c->set_key(c, keyItem.Get());
+ c->set_value(c, valueItem.Get());
+ int ret = WT_OP_CHECK(c->insert(c));
+
+ if (ret != WT_DUPLICATE_KEY) {
+ return wtRCToStatus(ret);
+ }
- if (loc == locInIndex) {
- if (records.empty() && !br.remaining()) {
- // This is the common case: we are removing the only loc for this key.
- // Remove the whole entry.
- invariantWTOK(WT_OP_CHECK(c->remove(c)));
- return;
- }
+ // we might be in weird mode where there might be multiple values
+ // we put them all in the "list"
+ // Note that we can't omit AllZeros when there are multiple locs for a value. When we remove
+ // down to a single value, it will be cleaned up.
+ ret = WT_OP_CHECK(c->search(c));
+ invariantWTOK(ret);
- foundLoc = true;
- continue;
- }
+ WT_ITEM old;
+ invariantWTOK(c->get_value(c, &old));
- records.push_back(std::make_pair(locInIndex, typeBits));
- }
+ bool insertedLoc = false;
- if (!foundLoc) {
- warning().stream() << loc << " not found in the index for key " << key;
- return; // nothing to do
- }
+ value.resetToEmpty();
+ BufReader br(old.data, old.size);
+ while (br.remaining()) {
+ RecordId locInIndex = KeyString::decodeRecordId(&br);
+ if (loc == locInIndex)
+ return Status::OK(); // already in index
- // Put other locs for this key back in the index.
- KeyString newValue;
- invariant(!records.empty());
- for (size_t i = 0; i < records.size(); i++) {
- newValue.appendRecordId(records[i].first);
- // When there is only one record, we can omit AllZeros TypeBits. Otherwise they need
- // to be included.
- if (!(records[i].second.isAllZeros() && records.size() == 1)) {
- newValue.appendTypeBits(records[i].second);
- }
+ if (!insertedLoc && loc < locInIndex) {
+ value.appendRecordId(loc);
+ value.appendTypeBits(data.getTypeBits());
+ insertedLoc = true;
}
- WiredTigerItem valueItem = WiredTigerItem(newValue.getBuffer(), newValue.getSize());
- c->set_value( c, valueItem.Get() );
- invariantWTOK( c->update( c ) );
+ // Copy from old to new value
+ value.appendRecordId(locInIndex);
+ value.appendTypeBits(KeyString::TypeBits::fromBuffer(&br));
}
- // ------------------------------
+ if (!dupsAllowed)
+ return dupKeyError(key);
- WiredTigerIndexStandard::WiredTigerIndexStandard( OperationContext* ctx,
- const std::string& uri,
- const IndexDescriptor* desc )
- : WiredTigerIndex( ctx, uri, desc ) {
+ if (!insertedLoc) {
+ // This loc is higher than all currently in the index for this key
+ value.appendRecordId(loc);
+ value.appendTypeBits(data.getTypeBits());
}
- std::unique_ptr<SortedDataInterface::Cursor> WiredTigerIndexStandard::newCursor(
- OperationContext* txn,
- bool forward) const {
- return stdx::make_unique<WiredTigerIndexStandardCursor>(*this, txn, forward);
+ valueItem = WiredTigerItem(value.getBuffer(), value.getSize());
+ c->set_value(c, valueItem.Get());
+ return wtRCToStatus(c->update(c));
+}
+
+void WiredTigerIndexUnique::_unindex(WT_CURSOR* c,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ KeyString data(key, _ordering);
+ WiredTigerItem keyItem(data.getBuffer(), data.getSize());
+ c->set_key(c, keyItem.Get());
+
+ if (!dupsAllowed) {
+ // nice and clear
+ int ret = WT_OP_CHECK(c->remove(c));
+ if (ret == WT_NOTFOUND) {
+ return;
+ }
+ invariantWTOK(ret);
+ return;
}
- SortedDataBuilderInterface* WiredTigerIndexStandard::getBulkBuilder(OperationContext* txn,
- bool dupsAllowed) {
- // We aren't unique so dups better be allowed.
- invariant(dupsAllowed);
- return new StandardBulkBuilder(this, txn);
- }
+ // dups are allowed, so we have to deal with a vector of RecordIds.
- Status WiredTigerIndexStandard::_insert( WT_CURSOR* c,
- const BSONObj& keyBson,
- const RecordId& loc,
- bool dupsAllowed ) {
- invariant( dupsAllowed );
+ int ret = WT_OP_CHECK(c->search(c));
+ if (ret == WT_NOTFOUND)
+ return;
+ invariantWTOK(ret);
- TRACE_INDEX << " key: " << keyBson << " loc: " << loc;
+ WT_ITEM old;
+ invariantWTOK(c->get_value(c, &old));
- KeyString key( keyBson, _ordering, loc );
- WiredTigerItem keyItem( key.getBuffer(), key.getSize() );
+ bool foundLoc = false;
+ std::vector<std::pair<RecordId, KeyString::TypeBits>> records;
- WiredTigerItem valueItem =
- key.getTypeBits().isAllZeros() ? emptyItem
- : WiredTigerItem(key.getTypeBits().getBuffer(),
- key.getTypeBits().getSize());
+ BufReader br(old.data, old.size);
+ while (br.remaining()) {
+ RecordId locInIndex = KeyString::decodeRecordId(&br);
+ KeyString::TypeBits typeBits = KeyString::TypeBits::fromBuffer(&br);
- c->set_key(c, keyItem.Get());
- c->set_value(c, valueItem.Get());
- int ret = WT_OP_CHECK(c->insert(c));
-
- if ( ret != WT_DUPLICATE_KEY )
- return wtRCToStatus( ret );
- // If the record was already in the index, we just return OK.
- // This can happen, for example, when building a background index while documents are being
- // written and reindexed.
- return Status::OK();
- }
+ if (loc == locInIndex) {
+ if (records.empty() && !br.remaining()) {
+ // This is the common case: we are removing the only loc for this key.
+ // Remove the whole entry.
+ invariantWTOK(WT_OP_CHECK(c->remove(c)));
+ return;
+ }
- void WiredTigerIndexStandard::_unindex( WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed ) {
- invariant( dupsAllowed );
- KeyString data( key, _ordering, loc );
- WiredTigerItem item( data.getBuffer(), data.getSize() );
- c->set_key(c, item.Get() );
- int ret = WT_OP_CHECK(c->remove(c));
- if (ret != WT_NOTFOUND) {
- invariantWTOK(ret);
+ foundLoc = true;
+ continue;
}
+
+ records.push_back(std::make_pair(locInIndex, typeBits));
+ }
+
+ if (!foundLoc) {
+ warning().stream() << loc << " not found in the index for key " << key;
+ return; // nothing to do
}
- // ---------------- for compatability with rc4 and previous ------
+ // Put other locs for this key back in the index.
+ KeyString newValue;
+ invariant(!records.empty());
+ for (size_t i = 0; i < records.size(); i++) {
+ newValue.appendRecordId(records[i].first);
+ // When there is only one record, we can omit AllZeros TypeBits. Otherwise they need
+ // to be included.
+ if (!(records[i].second.isAllZeros() && records.size() == 1)) {
+ newValue.appendTypeBits(records[i].second);
+ }
+ }
- int index_collator_customize(WT_COLLATOR *coll,
- WT_SESSION *s,
- const char *uri,
- WT_CONFIG_ITEM *metadata,
- WT_COLLATOR **collp) {
- fassertFailedWithStatusNoTrace(28580,
- Status(ErrorCodes::UnsupportedFormat, str::stream()
+ WiredTigerItem valueItem = WiredTigerItem(newValue.getBuffer(), newValue.getSize());
+ c->set_value(c, valueItem.Get());
+ invariantWTOK(c->update(c));
+}
+
+// ------------------------------
+
+WiredTigerIndexStandard::WiredTigerIndexStandard(OperationContext* ctx,
+ const std::string& uri,
+ const IndexDescriptor* desc)
+ : WiredTigerIndex(ctx, uri, desc) {}
+
+std::unique_ptr<SortedDataInterface::Cursor> WiredTigerIndexStandard::newCursor(
+ OperationContext* txn, bool forward) const {
+ return stdx::make_unique<WiredTigerIndexStandardCursor>(*this, txn, forward);
+}
+
+SortedDataBuilderInterface* WiredTigerIndexStandard::getBulkBuilder(OperationContext* txn,
+ bool dupsAllowed) {
+ // We aren't unique so dups better be allowed.
+ invariant(dupsAllowed);
+ return new StandardBulkBuilder(this, txn);
+}
+
+Status WiredTigerIndexStandard::_insert(WT_CURSOR* c,
+ const BSONObj& keyBson,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ invariant(dupsAllowed);
+
+ TRACE_INDEX << " key: " << keyBson << " loc: " << loc;
+
+ KeyString key(keyBson, _ordering, loc);
+ WiredTigerItem keyItem(key.getBuffer(), key.getSize());
+
+ WiredTigerItem valueItem = key.getTypeBits().isAllZeros()
+ ? emptyItem
+ : WiredTigerItem(key.getTypeBits().getBuffer(), key.getTypeBits().getSize());
+
+ c->set_key(c, keyItem.Get());
+ c->set_value(c, valueItem.Get());
+ int ret = WT_OP_CHECK(c->insert(c));
+
+ if (ret != WT_DUPLICATE_KEY)
+ return wtRCToStatus(ret);
+ // If the record was already in the index, we just return OK.
+ // This can happen, for example, when building a background index while documents are being
+ // written and reindexed.
+ return Status::OK();
+}
+
+void WiredTigerIndexStandard::_unindex(WT_CURSOR* c,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) {
+ invariant(dupsAllowed);
+ KeyString data(key, _ordering, loc);
+ WiredTigerItem item(data.getBuffer(), data.getSize());
+ c->set_key(c, item.Get());
+ int ret = WT_OP_CHECK(c->remove(c));
+ if (ret != WT_NOTFOUND) {
+ invariantWTOK(ret);
+ }
+}
+
+// ---------------- for compatability with rc4 and previous ------
+
+int index_collator_customize(WT_COLLATOR* coll,
+ WT_SESSION* s,
+ const char* uri,
+ WT_CONFIG_ITEM* metadata,
+ WT_COLLATOR** collp) {
+ fassertFailedWithStatusNoTrace(28580,
+ Status(ErrorCodes::UnsupportedFormat,
+ str::stream()
<< "Found an index from an unsupported RC version."
<< " Please restart with --repair to fix."));
- }
+}
- extern "C" MONGO_COMPILER_API_EXPORT int index_collator_extension(WT_CONNECTION *conn,
- WT_CONFIG_ARG *cfg) {
- static WT_COLLATOR idx_static;
+extern "C" MONGO_COMPILER_API_EXPORT int index_collator_extension(WT_CONNECTION* conn,
+ WT_CONFIG_ARG* cfg) {
+ static WT_COLLATOR idx_static;
- idx_static.customize = index_collator_customize;
- return conn->add_collator(conn, "mongo_index", &idx_static, NULL);
-
- }
+ idx_static.customize = index_collator_customize;
+ return conn->add_collator(conn, "mongo_index", &idx_static, NULL);
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
index c2b3d42538f..1d61b2b3c9e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
@@ -37,156 +37,154 @@
namespace mongo {
- class IndexCatalogEntry;
- class IndexDescriptor;
- struct WiredTigerItem;
-
- class WiredTigerIndex : public SortedDataInterface {
- public:
-
- /**
- * Parses index options for wired tiger configuration string suitable for table creation.
- * The document 'options' is typically obtained from the 'storageEngine.wiredTiger' field
- * of an IndexDescriptor's info object.
- */
- static StatusWith<std::string> parseIndexOptions(const BSONObj& options);
-
- /**
- * Creates a configuration string suitable for 'config' parameter in WT_SESSION::create().
- * Configuration string is constructed from:
- * built-in defaults
- * 'extraConfig'
- * storageEngine.wiredTiger.configString in index descriptor's info object.
- * Performs simple validation on the supplied parameters.
- * Returns error status if validation fails.
- * Note that even if this function returns an OK status, WT_SESSION:create() may still
- * fail with the constructed configuration string.
- */
- static StatusWith<std::string> generateCreateString(const std::string& extraConfig,
- const IndexDescriptor& desc);
-
- /**
- * Creates a WiredTiger table suitable for implementing a MongoDB index.
- * 'config' should be created with generateCreateString().
- */
- static int Create(OperationContext* txn,
+class IndexCatalogEntry;
+class IndexDescriptor;
+struct WiredTigerItem;
+
+class WiredTigerIndex : public SortedDataInterface {
+public:
+ /**
+ * Parses index options for wired tiger configuration string suitable for table creation.
+ * The document 'options' is typically obtained from the 'storageEngine.wiredTiger' field
+ * of an IndexDescriptor's info object.
+ */
+ static StatusWith<std::string> parseIndexOptions(const BSONObj& options);
+
+ /**
+ * Creates a configuration string suitable for 'config' parameter in WT_SESSION::create().
+ * Configuration string is constructed from:
+ * built-in defaults
+ * 'extraConfig'
+ * storageEngine.wiredTiger.configString in index descriptor's info object.
+ * Performs simple validation on the supplied parameters.
+ * Returns error status if validation fails.
+ * Note that even if this function returns an OK status, WT_SESSION:create() may still
+ * fail with the constructed configuration string.
+ */
+ static StatusWith<std::string> generateCreateString(const std::string& extraConfig,
+ const IndexDescriptor& desc);
+
+ /**
+ * Creates a WiredTiger table suitable for implementing a MongoDB index.
+ * 'config' should be created with generateCreateString().
+ */
+ static int Create(OperationContext* txn, const std::string& uri, const std::string& config);
+
+ /**
+ * @param unique - If this is a unique index.
+ * Note: even if unique, it may be allowed ot be non-unique at times.
+ */
+ WiredTigerIndex(OperationContext* ctx, const std::string& uri, const IndexDescriptor* desc);
+
+ virtual Status insert(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed);
+
+ virtual void unindex(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed);
+
+ virtual void fullValidate(OperationContext* txn,
+ bool full,
+ long long* numKeysOut,
+ BSONObjBuilder* output) const;
+ virtual bool appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* output,
+ double scale) const;
+ virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc);
+
+ virtual bool isEmpty(OperationContext* txn);
+
+ virtual long long getSpaceUsedBytes(OperationContext* txn) const;
+
+ bool isDup(WT_CURSOR* c, const BSONObj& key, const RecordId& loc);
+
+ virtual Status initAsEmpty(OperationContext* txn);
+
+ const std::string& uri() const {
+ return _uri;
+ }
+
+ uint64_t instanceId() const {
+ return _instanceId;
+ }
+ Ordering ordering() const {
+ return _ordering;
+ }
+
+ virtual bool unique() const = 0;
+
+ Status dupKeyError(const BSONObj& key);
+
+protected:
+ virtual Status _insert(WT_CURSOR* c,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) = 0;
+
+ virtual void _unindex(WT_CURSOR* c,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) = 0;
+
+ class BulkBuilder;
+ class StandardBulkBuilder;
+ class UniqueBulkBuilder;
+
+ const Ordering _ordering;
+ std::string _uri;
+ uint64_t _instanceId;
+ std::string _collectionNamespace;
+ std::string _indexName;
+};
+
+
+class WiredTigerIndexUnique : public WiredTigerIndex {
+public:
+ WiredTigerIndexUnique(OperationContext* ctx,
const std::string& uri,
- const std::string& config);
+ const IndexDescriptor* desc);
- /**
- * @param unique - If this is a unique index.
- * Note: even if unique, it may be allowed ot be non-unique at times.
- */
- WiredTigerIndex(OperationContext* ctx,
- const std::string& uri,
- const IndexDescriptor* desc);
+ std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ bool forward) const override;
- virtual Status insert(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed);
+ SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) override;
- virtual void unindex(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed);
+ bool unique() const override {
+ return true;
+ }
- virtual void fullValidate(OperationContext* txn, bool full, long long *numKeysOut,
- BSONObjBuilder* output) const;
- virtual bool appendCustomStats(OperationContext* txn, BSONObjBuilder* output, double scale)
- const;
- virtual Status dupKeyCheck(OperationContext* txn, const BSONObj& key, const RecordId& loc);
+ Status _insert(WT_CURSOR* c,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) override;
- virtual bool isEmpty(OperationContext* txn);
+ void _unindex(WT_CURSOR* c, const BSONObj& key, const RecordId& loc, bool dupsAllowed) override;
+};
- virtual long long getSpaceUsedBytes( OperationContext* txn ) const;
+class WiredTigerIndexStandard : public WiredTigerIndex {
+public:
+ WiredTigerIndexStandard(OperationContext* ctx,
+ const std::string& uri,
+ const IndexDescriptor* desc);
- bool isDup(WT_CURSOR *c, const BSONObj& key, const RecordId& loc );
+ std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ bool forward) const override;
- virtual Status initAsEmpty(OperationContext* txn);
+ SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn, bool dupsAllowed) override;
- const std::string& uri() const { return _uri; }
+ bool unique() const override {
+ return false;
+ }
- uint64_t instanceId() const { return _instanceId; }
- Ordering ordering() const { return _ordering; }
+ Status _insert(WT_CURSOR* c,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed) override;
- virtual bool unique() const = 0;
+ void _unindex(WT_CURSOR* c, const BSONObj& key, const RecordId& loc, bool dupsAllowed) override;
+};
- Status dupKeyError(const BSONObj& key);
-
- protected:
-
- virtual Status _insert( WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed ) = 0;
-
- virtual void _unindex( WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed ) = 0;
-
- class BulkBuilder;
- class StandardBulkBuilder;
- class UniqueBulkBuilder;
-
- const Ordering _ordering;
- std::string _uri;
- uint64_t _instanceId;
- std::string _collectionNamespace;
- std::string _indexName;
- };
-
-
- class WiredTigerIndexUnique : public WiredTigerIndex {
- public:
- WiredTigerIndexUnique( OperationContext* ctx,
- const std::string& uri,
- const IndexDescriptor* desc );
-
- std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
- bool forward) const override;
-
- SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn,
- bool dupsAllowed) override;
-
- bool unique() const override { return true; }
-
- Status _insert(WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) override;
-
- void _unindex(WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) override;
- };
-
- class WiredTigerIndexStandard : public WiredTigerIndex {
- public:
- WiredTigerIndexStandard( OperationContext* ctx,
- const std::string& uri,
- const IndexDescriptor* desc );
-
- std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
- bool forward) const override;
-
- SortedDataBuilderInterface* getBulkBuilder(OperationContext* txn,
- bool dupsAllowed) override;
-
- bool unique() const override { return false; }
-
- Status _insert(WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) override;
-
- void _unindex(WT_CURSOR* c,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) override;
-
- };
-
-} // namespace
+} // namespace
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
index f7e308e6cf2..ea468aa5d76 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
@@ -47,96 +47,95 @@
namespace mongo {
- using std::string;
+using std::string;
- class MyHarnessHelper final : public HarnessHelper {
- public:
- MyHarnessHelper() : _dbpath( "wt_test" ), _conn( NULL ) {
+class MyHarnessHelper final : public HarnessHelper {
+public:
+ MyHarnessHelper() : _dbpath("wt_test"), _conn(NULL) {
+ const char* config = "create,cache_size=1G,";
+ int ret = wiredtiger_open(_dbpath.path().c_str(), NULL, config, &_conn);
+ invariantWTOK(ret);
- const char* config = "create,cache_size=1G,";
- int ret = wiredtiger_open( _dbpath.path().c_str(), NULL, config, &_conn);
- invariantWTOK( ret );
-
- _sessionCache = new WiredTigerSessionCache( _conn );
- }
-
- ~MyHarnessHelper() final {
- delete _sessionCache;
- _conn->close(_conn, NULL);
- }
-
- std::unique_ptr<SortedDataInterface> newSortedDataInterface( bool unique ) final {
- std::string ns = "test.wt";
- OperationContextNoop txn( newRecoveryUnit().release() );
-
- BSONObj spec = BSON( "key" << BSON( "a" << 1 ) <<
- "name" << "testIndex" <<
- "ns" << ns );
-
- IndexDescriptor desc( NULL, "", spec );
-
- StatusWith<std::string> result = WiredTigerIndex::generateCreateString("", desc);
- ASSERT_OK(result.getStatus());
-
- string uri = "table:" + ns;
- invariantWTOK( WiredTigerIndex::Create(&txn, uri, result.getValue()));
+ _sessionCache = new WiredTigerSessionCache(_conn);
+ }
- if ( unique )
- return stdx::make_unique<WiredTigerIndexUnique>( &txn, uri, &desc );
- return stdx::make_unique<WiredTigerIndexStandard>( &txn, uri, &desc );
- }
+ ~MyHarnessHelper() final {
+ delete _sessionCache;
+ _conn->close(_conn, NULL);
+ }
- std::unique_ptr<RecoveryUnit> newRecoveryUnit() final {
- return stdx::make_unique<WiredTigerRecoveryUnit>( _sessionCache );
- }
+ std::unique_ptr<SortedDataInterface> newSortedDataInterface(bool unique) final {
+ std::string ns = "test.wt";
+ OperationContextNoop txn(newRecoveryUnit().release());
- private:
- unittest::TempDir _dbpath;
- WT_CONNECTION* _conn;
- WiredTigerSessionCache* _sessionCache;
- };
+ BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
+ << "testIndex"
+ << "ns" << ns);
- std::unique_ptr<HarnessHelper> newHarnessHelper() {
- return stdx::make_unique<MyHarnessHelper>();
- }
+ IndexDescriptor desc(NULL, "", spec);
- TEST(WiredTigerIndexTest, GenerateCreateStringEmptyDocument) {
- BSONObj spec = fromjson("{}");
- StatusWith<std::string> result = WiredTigerIndex::parseIndexOptions(spec);
+ StatusWith<std::string> result = WiredTigerIndex::generateCreateString("", desc);
ASSERT_OK(result.getStatus());
- ASSERT_EQ(result.getValue(), ""); // "," would also be valid.
- }
- TEST(WiredTigerIndexTest, GenerateCreateStringUnknownField) {
- BSONObj spec = fromjson("{unknownField: 1}");
- StatusWith<std::string> result = WiredTigerIndex::parseIndexOptions(spec);
- const Status& status = result.getStatus();
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::InvalidOptions, status);
- }
+ string uri = "table:" + ns;
+ invariantWTOK(WiredTigerIndex::Create(&txn, uri, result.getValue()));
- TEST(WiredTigerIndexTest, GenerateCreateStringNonStringConfig) {
- BSONObj spec = fromjson("{configString: 12345}");
- StatusWith<std::string> result = WiredTigerIndex::parseIndexOptions(spec);
- const Status& status = result.getStatus();
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
+ if (unique)
+ return stdx::make_unique<WiredTigerIndexUnique>(&txn, uri, &desc);
+ return stdx::make_unique<WiredTigerIndexStandard>(&txn, uri, &desc);
}
- TEST(WiredTigerIndexTest, GenerateCreateStringEmptyConfigString) {
- BSONObj spec = fromjson("{configString: ''}");
- StatusWith<std::string> result = WiredTigerIndex::parseIndexOptions(spec);
- ASSERT_OK(result.getStatus());
- ASSERT_EQ(result.getValue(), ","); // "" would also be valid.
+ std::unique_ptr<RecoveryUnit> newRecoveryUnit() final {
+ return stdx::make_unique<WiredTigerRecoveryUnit>(_sessionCache);
}
- TEST(WiredTigerIndexTest, GenerateCreateStringValidConfigFormat) {
- // TODO eventually this should fail since "abc" is not a valid WT option.
- BSONObj spec = fromjson("{configString: 'abc=def'}");
- StatusWith<std::string> result = WiredTigerIndex::parseIndexOptions(spec);
- const Status& status = result.getStatus();
- ASSERT_OK(status);
- ASSERT_EQ(result.getValue(), "abc=def,");
- }
+private:
+ unittest::TempDir _dbpath;
+ WT_CONNECTION* _conn;
+ WiredTigerSessionCache* _sessionCache;
+};
+
+std::unique_ptr<HarnessHelper> newHarnessHelper() {
+ return stdx::make_unique<MyHarnessHelper>();
+}
+
+TEST(WiredTigerIndexTest, GenerateCreateStringEmptyDocument) {
+ BSONObj spec = fromjson("{}");
+ StatusWith<std::string> result = WiredTigerIndex::parseIndexOptions(spec);
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQ(result.getValue(), ""); // "," would also be valid.
+}
+
+TEST(WiredTigerIndexTest, GenerateCreateStringUnknownField) {
+ BSONObj spec = fromjson("{unknownField: 1}");
+ StatusWith<std::string> result = WiredTigerIndex::parseIndexOptions(spec);
+ const Status& status = result.getStatus();
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, status);
+}
+
+TEST(WiredTigerIndexTest, GenerateCreateStringNonStringConfig) {
+ BSONObj spec = fromjson("{configString: 12345}");
+ StatusWith<std::string> result = WiredTigerIndex::parseIndexOptions(spec);
+ const Status& status = result.getStatus();
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
+}
+
+TEST(WiredTigerIndexTest, GenerateCreateStringEmptyConfigString) {
+ BSONObj spec = fromjson("{configString: ''}");
+ StatusWith<std::string> result = WiredTigerIndex::parseIndexOptions(spec);
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQ(result.getValue(), ","); // "" would also be valid.
+}
+
+TEST(WiredTigerIndexTest, GenerateCreateStringValidConfigFormat) {
+ // TODO eventually this should fail since "abc" is not a valid WT option.
+ BSONObj spec = fromjson("{configString: 'abc=def'}");
+ StatusWith<std::string> result = WiredTigerIndex::parseIndexOptions(spec);
+ const Status& status = result.getStatus();
+ ASSERT_OK(status);
+ ASSERT_EQ(result.getValue(), "abc=def,");
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
index 23d69d934c2..45888019c49 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
@@ -52,81 +52,74 @@
namespace mongo {
- namespace {
- class WiredTigerFactory : public StorageEngine::Factory {
- public:
- virtual ~WiredTigerFactory(){}
- virtual StorageEngine* create(const StorageGlobalParams& params,
- const StorageEngineLockFile& lockFile) const {
- if (lockFile.createdByUncleanShutdown()) {
- warning() << "Recovering data from the last clean checkpoint.";
- }
- WiredTigerKVEngine* kv = new WiredTigerKVEngine( params.dbpath,
- wiredTigerGlobalOptions.engineConfig,
- params.dur,
- params.repair );
- kv->setRecordStoreExtraOptions( wiredTigerGlobalOptions.collectionConfig );
- kv->setSortedDataInterfaceExtraOptions( wiredTigerGlobalOptions.indexConfig );
- // Intentionally leaked.
- new WiredTigerServerStatusSection(kv);
- new WiredTigerEngineRuntimeConfigParameter(kv);
-
- KVStorageEngineOptions options;
- options.directoryPerDB = params.directoryperdb;
- options.directoryForIndexes = wiredTigerGlobalOptions.directoryForIndexes;
- options.forRepair = params.repair;
- return new KVStorageEngine( kv, options );
- }
-
- virtual StringData getCanonicalName() const {
- return kWiredTigerEngineName;
- }
-
- virtual Status validateCollectionStorageOptions(const BSONObj& options) const {
- return WiredTigerRecordStore::parseOptionsField(options).getStatus();
- }
-
- virtual Status validateIndexStorageOptions(const BSONObj& options) const {
- return WiredTigerIndex::parseIndexOptions(options).getStatus();
- }
-
- virtual Status validateMetadata(const StorageEngineMetadata& metadata,
- const StorageGlobalParams& params) const {
- Status status = metadata.validateStorageEngineOption(
- "directoryPerDB", params.directoryperdb);
- if (!status.isOK()) {
- return status;
- }
-
- status = metadata.validateStorageEngineOption(
- "directoryForIndexes", wiredTigerGlobalOptions.directoryForIndexes);
- if (!status.isOK()) {
- return status;
- }
+namespace {
+class WiredTigerFactory : public StorageEngine::Factory {
+public:
+ virtual ~WiredTigerFactory() {}
+ virtual StorageEngine* create(const StorageGlobalParams& params,
+ const StorageEngineLockFile& lockFile) const {
+ if (lockFile.createdByUncleanShutdown()) {
+ warning() << "Recovering data from the last clean checkpoint.";
+ }
+ WiredTigerKVEngine* kv = new WiredTigerKVEngine(
+ params.dbpath, wiredTigerGlobalOptions.engineConfig, params.dur, params.repair);
+ kv->setRecordStoreExtraOptions(wiredTigerGlobalOptions.collectionConfig);
+ kv->setSortedDataInterfaceExtraOptions(wiredTigerGlobalOptions.indexConfig);
+ // Intentionally leaked.
+ new WiredTigerServerStatusSection(kv);
+ new WiredTigerEngineRuntimeConfigParameter(kv);
+
+ KVStorageEngineOptions options;
+ options.directoryPerDB = params.directoryperdb;
+ options.directoryForIndexes = wiredTigerGlobalOptions.directoryForIndexes;
+ options.forRepair = params.repair;
+ return new KVStorageEngine(kv, options);
+ }
- return Status::OK();
- }
+ virtual StringData getCanonicalName() const {
+ return kWiredTigerEngineName;
+ }
- virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const {
- BSONObjBuilder builder;
- builder.appendBool("directoryPerDB", params.directoryperdb);
- builder.appendBool("directoryForIndexes",
- wiredTigerGlobalOptions.directoryForIndexes);
- WiredTigerCustomizationHooks::get(getGlobalServiceContext())->appendUID(&builder);
- return builder.obj();
- }
+ virtual Status validateCollectionStorageOptions(const BSONObj& options) const {
+ return WiredTigerRecordStore::parseOptionsField(options).getStatus();
+ }
- };
- } // namespace
+ virtual Status validateIndexStorageOptions(const BSONObj& options) const {
+ return WiredTigerIndex::parseIndexOptions(options).getStatus();
+ }
- MONGO_INITIALIZER_WITH_PREREQUISITES(WiredTigerEngineInit,
- ("SetGlobalEnvironment"))
- (InitializerContext* context ) {
+ virtual Status validateMetadata(const StorageEngineMetadata& metadata,
+ const StorageGlobalParams& params) const {
+ Status status =
+ metadata.validateStorageEngineOption("directoryPerDB", params.directoryperdb);
+ if (!status.isOK()) {
+ return status;
+ }
- getGlobalServiceContext()->registerStorageEngine(kWiredTigerEngineName,
- new WiredTigerFactory());
+ status = metadata.validateStorageEngineOption("directoryForIndexes",
+ wiredTigerGlobalOptions.directoryForIndexes);
+ if (!status.isOK()) {
+ return status;
+ }
return Status::OK();
}
+ virtual BSONObj createMetadataOptions(const StorageGlobalParams& params) const {
+ BSONObjBuilder builder;
+ builder.appendBool("directoryPerDB", params.directoryperdb);
+ builder.appendBool("directoryForIndexes", wiredTigerGlobalOptions.directoryForIndexes);
+ WiredTigerCustomizationHooks::get(getGlobalServiceContext())->appendUID(&builder);
+ return builder.obj();
+ }
+};
+} // namespace
+
+MONGO_INITIALIZER_WITH_PREREQUISITES(WiredTigerEngineInit, ("SetGlobalEnvironment"))
+(InitializerContext* context) {
+ getGlobalServiceContext()->registerStorageEngine(kWiredTigerEngineName,
+ new WiredTigerFactory());
+
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
index 2f3ddf38518..1ace8c8c019 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
@@ -40,125 +40,122 @@
namespace {
- using namespace mongo;
-
- class WiredTigerFactoryTest : public mongo::unittest::Test {
- private:
- virtual void setUp() {
- ServiceContext* globalEnv = getGlobalServiceContext();
- ASSERT_TRUE(globalEnv);
- ASSERT_TRUE(getGlobalServiceContext()->isRegisteredStorageEngine(kWiredTigerEngineName));
- std::unique_ptr<StorageFactoriesIterator> sfi(getGlobalServiceContext()->
- makeStorageFactoriesIterator());
- ASSERT_TRUE(sfi);
- bool found = false;
- while (sfi->more()) {
- const StorageEngine::Factory* currentFactory = sfi->next();
- if (currentFactory->getCanonicalName() == kWiredTigerEngineName) {
- found = true;
- factory = currentFactory;
- break;
- }
+using namespace mongo;
+
+class WiredTigerFactoryTest : public mongo::unittest::Test {
+private:
+ virtual void setUp() {
+ ServiceContext* globalEnv = getGlobalServiceContext();
+ ASSERT_TRUE(globalEnv);
+ ASSERT_TRUE(getGlobalServiceContext()->isRegisteredStorageEngine(kWiredTigerEngineName));
+ std::unique_ptr<StorageFactoriesIterator> sfi(
+ getGlobalServiceContext()->makeStorageFactoriesIterator());
+ ASSERT_TRUE(sfi);
+ bool found = false;
+ while (sfi->more()) {
+ const StorageEngine::Factory* currentFactory = sfi->next();
+ if (currentFactory->getCanonicalName() == kWiredTigerEngineName) {
found = true;
+ factory = currentFactory;
+ break;
}
- ASSERT_TRUE(found);
- _oldOptions = wiredTigerGlobalOptions;
- }
-
- virtual void tearDown() {
- wiredTigerGlobalOptions = _oldOptions;
- factory = NULL;
- }
-
- WiredTigerGlobalOptions _oldOptions;
-
- protected:
- const StorageEngine::Factory* factory;
- };
-
- void _testValidateMetadata(const StorageEngine::Factory* factory,
- const BSONObj& metadataOptions,
- bool directoryPerDB,
- bool directoryForIndexes,
- ErrorCodes::Error expectedCode) {
- // It is fine to specify an invalid data directory for the metadata
- // as long as we do not invoke read() or write().
- StorageEngineMetadata metadata("no_such_directory");
- metadata.setStorageEngineOptions(metadataOptions);
-
- StorageGlobalParams storageOptions;
- storageOptions.directoryperdb = directoryPerDB;
- wiredTigerGlobalOptions.directoryForIndexes = directoryForIndexes;
-
- Status status = factory->validateMetadata(metadata, storageOptions);
- if (expectedCode != status.code()) {
- FAIL(str::stream()
- << "Unexpected StorageEngine::Factory::validateMetadata result. Expected: "
- << ErrorCodes::errorString(expectedCode) << " but got "
- << status.toString()
- << " instead. metadataOptions: " << metadataOptions
- << "; directoryPerDB: " << directoryPerDB
- << "; directoryForIndexes: " << directoryForIndexes);
+ found = true;
}
+ ASSERT_TRUE(found);
+ _oldOptions = wiredTigerGlobalOptions;
}
- // Do not validate fields that are not present in metadata.
- TEST_F(WiredTigerFactoryTest, ValidateMetadataEmptyOptions) {
- _testValidateMetadata(factory, BSONObj(), false, false, ErrorCodes::OK);
- _testValidateMetadata(factory, BSONObj(), false, true, ErrorCodes::OK);
- _testValidateMetadata(factory, BSONObj(), true, false, ErrorCodes::OK);
- _testValidateMetadata(factory, BSONObj(), false, false, ErrorCodes::OK);
- }
-
- TEST_F(WiredTigerFactoryTest, ValidateMetadataDirectoryPerDB) {
- _testValidateMetadata(factory, fromjson("{directoryPerDB: 123}"), false, false,
- ErrorCodes::FailedToParse);
- _testValidateMetadata(factory, fromjson("{directoryPerDB: false}"), false, false,
- ErrorCodes::OK);
- _testValidateMetadata(factory, fromjson("{directoryPerDB: false}"), true, false,
- ErrorCodes::InvalidOptions);
- _testValidateMetadata(factory, fromjson("{directoryPerDB: true}"), false, false,
- ErrorCodes::InvalidOptions);
- _testValidateMetadata(factory, fromjson("{directoryPerDB: true}"), true, false,
- ErrorCodes::OK);
- }
-
- TEST_F(WiredTigerFactoryTest, ValidateMetadataDirectoryForIndexes) {
- _testValidateMetadata(factory, fromjson("{directoryForIndexes: 123}"), false, false,
- ErrorCodes::FailedToParse);
- _testValidateMetadata(factory, fromjson("{directoryForIndexes: false}"), false, false,
- ErrorCodes::OK);
- _testValidateMetadata(factory, fromjson("{directoryForIndexes: false}"), false, true,
- ErrorCodes::InvalidOptions);
- _testValidateMetadata(factory, fromjson("{directoryForIndexes: true}"), false, false,
- ErrorCodes::InvalidOptions);
- _testValidateMetadata(factory, fromjson("{directoryForIndexes: true}"), true, true,
- ErrorCodes::OK);
- }
-
- void _testCreateMetadataOptions(const StorageEngine::Factory* factory,
- bool directoryPerDB,
- bool directoryForIndexes) {
- StorageGlobalParams storageOptions;
- storageOptions.directoryperdb = directoryPerDB;
- wiredTigerGlobalOptions.directoryForIndexes = directoryForIndexes;
-
- BSONObj metadataOptions = factory->createMetadataOptions(storageOptions);
-
- BSONElement directoryPerDBElement = metadataOptions.getField("directoryPerDB");
- ASSERT_TRUE(directoryPerDBElement.isBoolean());
- ASSERT_EQUALS(directoryPerDB, directoryPerDBElement.boolean());
-
- BSONElement directoryForIndexesElement = metadataOptions.getField("directoryForIndexes");
- ASSERT_TRUE(directoryForIndexesElement.isBoolean());
- ASSERT_EQUALS(directoryForIndexes, directoryForIndexesElement.boolean());
+ virtual void tearDown() {
+ wiredTigerGlobalOptions = _oldOptions;
+ factory = NULL;
}
- TEST_F(WiredTigerFactoryTest, CreateMetadataOptions) {
- _testCreateMetadataOptions(factory, false, false);
- _testCreateMetadataOptions(factory, false, true);
- _testCreateMetadataOptions(factory, true, false);
- _testCreateMetadataOptions(factory, true, true);
+ WiredTigerGlobalOptions _oldOptions;
+
+protected:
+ const StorageEngine::Factory* factory;
+};
+
+void _testValidateMetadata(const StorageEngine::Factory* factory,
+ const BSONObj& metadataOptions,
+ bool directoryPerDB,
+ bool directoryForIndexes,
+ ErrorCodes::Error expectedCode) {
+ // It is fine to specify an invalid data directory for the metadata
+ // as long as we do not invoke read() or write().
+ StorageEngineMetadata metadata("no_such_directory");
+ metadata.setStorageEngineOptions(metadataOptions);
+
+ StorageGlobalParams storageOptions;
+ storageOptions.directoryperdb = directoryPerDB;
+ wiredTigerGlobalOptions.directoryForIndexes = directoryForIndexes;
+
+ Status status = factory->validateMetadata(metadata, storageOptions);
+ if (expectedCode != status.code()) {
+ FAIL(str::stream()
+ << "Unexpected StorageEngine::Factory::validateMetadata result. Expected: "
+ << ErrorCodes::errorString(expectedCode) << " but got " << status.toString()
+ << " instead. metadataOptions: " << metadataOptions << "; directoryPerDB: "
+ << directoryPerDB << "; directoryForIndexes: " << directoryForIndexes);
}
+}
+
+// Do not validate fields that are not present in metadata.
+TEST_F(WiredTigerFactoryTest, ValidateMetadataEmptyOptions) {
+ _testValidateMetadata(factory, BSONObj(), false, false, ErrorCodes::OK);
+ _testValidateMetadata(factory, BSONObj(), false, true, ErrorCodes::OK);
+ _testValidateMetadata(factory, BSONObj(), true, false, ErrorCodes::OK);
+ _testValidateMetadata(factory, BSONObj(), false, false, ErrorCodes::OK);
+}
+
+TEST_F(WiredTigerFactoryTest, ValidateMetadataDirectoryPerDB) {
+ _testValidateMetadata(
+ factory, fromjson("{directoryPerDB: 123}"), false, false, ErrorCodes::FailedToParse);
+ _testValidateMetadata(
+ factory, fromjson("{directoryPerDB: false}"), false, false, ErrorCodes::OK);
+ _testValidateMetadata(
+ factory, fromjson("{directoryPerDB: false}"), true, false, ErrorCodes::InvalidOptions);
+ _testValidateMetadata(
+ factory, fromjson("{directoryPerDB: true}"), false, false, ErrorCodes::InvalidOptions);
+ _testValidateMetadata(factory, fromjson("{directoryPerDB: true}"), true, false, ErrorCodes::OK);
+}
+
+TEST_F(WiredTigerFactoryTest, ValidateMetadataDirectoryForIndexes) {
+ _testValidateMetadata(
+ factory, fromjson("{directoryForIndexes: 123}"), false, false, ErrorCodes::FailedToParse);
+ _testValidateMetadata(
+ factory, fromjson("{directoryForIndexes: false}"), false, false, ErrorCodes::OK);
+ _testValidateMetadata(
+ factory, fromjson("{directoryForIndexes: false}"), false, true, ErrorCodes::InvalidOptions);
+ _testValidateMetadata(
+ factory, fromjson("{directoryForIndexes: true}"), false, false, ErrorCodes::InvalidOptions);
+ _testValidateMetadata(
+ factory, fromjson("{directoryForIndexes: true}"), true, true, ErrorCodes::OK);
+}
+
+void _testCreateMetadataOptions(const StorageEngine::Factory* factory,
+ bool directoryPerDB,
+ bool directoryForIndexes) {
+ StorageGlobalParams storageOptions;
+ storageOptions.directoryperdb = directoryPerDB;
+ wiredTigerGlobalOptions.directoryForIndexes = directoryForIndexes;
+
+ BSONObj metadataOptions = factory->createMetadataOptions(storageOptions);
+
+ BSONElement directoryPerDBElement = metadataOptions.getField("directoryPerDB");
+ ASSERT_TRUE(directoryPerDBElement.isBoolean());
+ ASSERT_EQUALS(directoryPerDB, directoryPerDBElement.boolean());
+
+ BSONElement directoryForIndexesElement = metadataOptions.getField("directoryForIndexes");
+ ASSERT_TRUE(directoryForIndexesElement.isBoolean());
+ ASSERT_EQUALS(directoryForIndexes, directoryForIndexesElement.boolean());
+}
+
+TEST_F(WiredTigerFactoryTest, CreateMetadataOptions) {
+ _testCreateMetadataOptions(factory, false, false);
+ _testCreateMetadataOptions(factory, false, true);
+ _testCreateMetadataOptions(factory, true, false);
+ _testCreateMetadataOptions(factory, true, true);
+}
} // namespace
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index f123ff1fffd..291797c11a8 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -57,426 +57,417 @@
namespace mongo {
- using std::set;
- using std::string;
-
-
- WiredTigerKVEngine::WiredTigerKVEngine( const std::string& path,
- const std::string& extraOpenOptions,
- bool durable,
- bool repair )
- : _eventHandler(WiredTigerUtil::defaultEventHandlers()),
- _path( path ),
- _durable( durable ),
- _sizeStorerSyncTracker( 100000, 60 * 1000 ) {
-
- size_t cacheSizeGB = wiredTigerGlobalOptions.cacheSizeGB;
- if (cacheSizeGB == 0) {
- // Since the user didn't provide a cache size, choose a reasonable default value.
- ProcessInfo pi;
- unsigned long long memSizeMB = pi.getMemSizeMB();
- if ( memSizeMB > 0 ) {
- double cacheMB = memSizeMB / 2;
- cacheSizeGB = static_cast<size_t>( cacheMB / 1024 );
- if ( cacheSizeGB < 1 )
- cacheSizeGB = 1;
- }
+using std::set;
+using std::string;
+
+
+WiredTigerKVEngine::WiredTigerKVEngine(const std::string& path,
+ const std::string& extraOpenOptions,
+ bool durable,
+ bool repair)
+ : _eventHandler(WiredTigerUtil::defaultEventHandlers()),
+ _path(path),
+ _durable(durable),
+ _sizeStorerSyncTracker(100000, 60 * 1000) {
+ size_t cacheSizeGB = wiredTigerGlobalOptions.cacheSizeGB;
+ if (cacheSizeGB == 0) {
+ // Since the user didn't provide a cache size, choose a reasonable default value.
+ ProcessInfo pi;
+ unsigned long long memSizeMB = pi.getMemSizeMB();
+ if (memSizeMB > 0) {
+ double cacheMB = memSizeMB / 2;
+ cacheSizeGB = static_cast<size_t>(cacheMB / 1024);
+ if (cacheSizeGB < 1)
+ cacheSizeGB = 1;
}
+ }
- if ( _durable ) {
- boost::filesystem::path journalPath = path;
- journalPath /= "journal";
- if ( !boost::filesystem::exists( journalPath ) ) {
- try {
- boost::filesystem::create_directory( journalPath );
- }
- catch( std::exception& e) {
- log() << "error creating journal dir " << journalPath.string() << ' ' << e.what();
- throw;
- }
+ if (_durable) {
+ boost::filesystem::path journalPath = path;
+ journalPath /= "journal";
+ if (!boost::filesystem::exists(journalPath)) {
+ try {
+ boost::filesystem::create_directory(journalPath);
+ } catch (std::exception& e) {
+ log() << "error creating journal dir " << journalPath.string() << ' ' << e.what();
+ throw;
}
}
+ }
- std::stringstream ss;
- ss << "create,";
- ss << "cache_size=" << cacheSizeGB << "G,";
- ss << "session_max=20000,";
- ss << "eviction=(threads_max=4),";
- ss << "statistics=(fast),";
- if ( _durable ) {
- ss << "log=(enabled=true,archive=true,path=journal,compressor=";
- ss << wiredTigerGlobalOptions.journalCompressor << "),";
- }
- ss << "file_manager=(close_idle_time=100000),"; //~28 hours, will put better fix in 3.1.x
- ss << "checkpoint=(wait=" << wiredTigerGlobalOptions.checkpointDelaySecs;
- ss << ",log_size=2GB),";
- ss << "statistics_log=(wait=" << wiredTigerGlobalOptions.statisticsLogDelaySecs << "),";
- ss << WiredTigerCustomizationHooks::get(
- getGlobalServiceContext())->getOpenConfig("metadata");
- ss << extraOpenOptions;
- string config = ss.str();
- log() << "wiredtiger_open config: " << config;
- int ret = wiredtiger_open(path.c_str(), &_eventHandler, config.c_str(), &_conn);
- // Invalid argument (EINVAL) is usually caused by invalid configuration string.
- // We still fassert() but without a stack trace.
- if (ret == EINVAL) {
- fassertFailedNoTrace(28561);
- }
- else if (ret != 0) {
- Status s(wtRCToStatus(ret));
- msgassertedNoTrace(28595, s.reason());
- }
+ std::stringstream ss;
+ ss << "create,";
+ ss << "cache_size=" << cacheSizeGB << "G,";
+ ss << "session_max=20000,";
+ ss << "eviction=(threads_max=4),";
+ ss << "statistics=(fast),";
+ if (_durable) {
+ ss << "log=(enabled=true,archive=true,path=journal,compressor=";
+ ss << wiredTigerGlobalOptions.journalCompressor << "),";
+ }
+ ss << "file_manager=(close_idle_time=100000),"; //~28 hours, will put better fix in 3.1.x
+ ss << "checkpoint=(wait=" << wiredTigerGlobalOptions.checkpointDelaySecs;
+ ss << ",log_size=2GB),";
+ ss << "statistics_log=(wait=" << wiredTigerGlobalOptions.statisticsLogDelaySecs << "),";
+ ss << WiredTigerCustomizationHooks::get(getGlobalServiceContext())->getOpenConfig("metadata");
+ ss << extraOpenOptions;
+ string config = ss.str();
+ log() << "wiredtiger_open config: " << config;
+ int ret = wiredtiger_open(path.c_str(), &_eventHandler, config.c_str(), &_conn);
+ // Invalid argument (EINVAL) is usually caused by invalid configuration string.
+ // We still fassert() but without a stack trace.
+ if (ret == EINVAL) {
+ fassertFailedNoTrace(28561);
+ } else if (ret != 0) {
+ Status s(wtRCToStatus(ret));
+ msgassertedNoTrace(28595, s.reason());
+ }
- _sessionCache.reset( new WiredTigerSessionCache( this ) );
+ _sessionCache.reset(new WiredTigerSessionCache(this));
- _sizeStorerUri = "table:sizeStorer";
- {
- WiredTigerSession session(_conn);
- if (repair && _hasUri(session.getSession(), _sizeStorerUri)) {
- log() << "Repairing size cache";
- fassertNoTrace(28577, _salvageIfNeeded(_sizeStorerUri.c_str()));
- }
- _sizeStorer.reset(new WiredTigerSizeStorer(_conn, _sizeStorerUri));
- _sizeStorer->fillCache();
+ _sizeStorerUri = "table:sizeStorer";
+ {
+ WiredTigerSession session(_conn);
+ if (repair && _hasUri(session.getSession(), _sizeStorerUri)) {
+ log() << "Repairing size cache";
+ fassertNoTrace(28577, _salvageIfNeeded(_sizeStorerUri.c_str()));
}
+ _sizeStorer.reset(new WiredTigerSizeStorer(_conn, _sizeStorerUri));
+ _sizeStorer->fillCache();
}
+}
- WiredTigerKVEngine::~WiredTigerKVEngine() {
- if (_conn) {
- cleanShutdown();
- }
-
- _sessionCache.reset( NULL );
+WiredTigerKVEngine::~WiredTigerKVEngine() {
+ if (_conn) {
+ cleanShutdown();
}
- void WiredTigerKVEngine::cleanShutdown() {
- log() << "WiredTigerKVEngine shutting down";
- syncSizeInfo(true);
- if (_conn) {
- // these must be the last things we do before _conn->close();
- _sizeStorer.reset( NULL );
- _sessionCache->shuttingDown();
+ _sessionCache.reset(NULL);
+}
+
+void WiredTigerKVEngine::cleanShutdown() {
+ log() << "WiredTigerKVEngine shutting down";
+ syncSizeInfo(true);
+ if (_conn) {
+ // these must be the last things we do before _conn->close();
+ _sizeStorer.reset(NULL);
+ _sessionCache->shuttingDown();
#if !__has_feature(address_sanitizer)
- const char* config = "leak_memory=true";
+ const char* config = "leak_memory=true";
#else
- const char* config = NULL;
+ const char* config = NULL;
#endif
- invariantWTOK( _conn->close(_conn, config) );
- _conn = NULL;
- }
- }
-
- Status WiredTigerKVEngine::okToRename( OperationContext* opCtx,
- StringData fromNS,
- StringData toNS,
- StringData ident,
- const RecordStore* originalRecordStore ) const {
- _sizeStorer->storeToCache(_uri( ident ),
- originalRecordStore->numRecords( opCtx ),
- originalRecordStore->dataSize( opCtx ) );
- syncSizeInfo(true);
- return Status::OK();
+ invariantWTOK(_conn->close(_conn, config));
+ _conn = NULL;
}
+}
- int64_t WiredTigerKVEngine::getIdentSize( OperationContext* opCtx,
- StringData ident ) {
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx);
- return WiredTigerUtil::getIdentSize(session->getSession(), _uri(ident) );
- }
+Status WiredTigerKVEngine::okToRename(OperationContext* opCtx,
+ StringData fromNS,
+ StringData toNS,
+ StringData ident,
+ const RecordStore* originalRecordStore) const {
+ _sizeStorer->storeToCache(
+ _uri(ident), originalRecordStore->numRecords(opCtx), originalRecordStore->dataSize(opCtx));
+ syncSizeInfo(true);
+ return Status::OK();
+}
- Status WiredTigerKVEngine::repairIdent( OperationContext* opCtx,
- StringData ident ) {
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx);
- session->closeAllCursors();
- string uri = _uri(ident);
- return _salvageIfNeeded(uri.c_str());
- }
+int64_t WiredTigerKVEngine::getIdentSize(OperationContext* opCtx, StringData ident) {
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx);
+ return WiredTigerUtil::getIdentSize(session->getSession(), _uri(ident));
+}
- Status WiredTigerKVEngine::_salvageIfNeeded(const char* uri) {
- // Using a side session to avoid transactional issues
- WiredTigerSession sessionWrapper(_conn);
- WT_SESSION* session = sessionWrapper.getSession();
+Status WiredTigerKVEngine::repairIdent(OperationContext* opCtx, StringData ident) {
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx);
+ session->closeAllCursors();
+ string uri = _uri(ident);
+ return _salvageIfNeeded(uri.c_str());
+}
- int rc = (session->verify)(session, uri, NULL);
- if (rc == 0) {
- log() << "Verify succeeded on uri " << uri << ". Not salvaging.";
- return Status::OK();
- }
+Status WiredTigerKVEngine::_salvageIfNeeded(const char* uri) {
+ // Using a side session to avoid transactional issues
+ WiredTigerSession sessionWrapper(_conn);
+ WT_SESSION* session = sessionWrapper.getSession();
- if (rc == EBUSY) {
- // SERVER-16457: verify and salvage are occasionally failing with EBUSY. For now we
- // lie and return OK to avoid breaking tests. This block should go away when that ticket
- // is resolved.
- error() << "Verify on " << uri << " failed with EBUSY. Assuming no salvage is needed.";
- return Status::OK();
- }
+ int rc = (session->verify)(session, uri, NULL);
+ if (rc == 0) {
+ log() << "Verify succeeded on uri " << uri << ". Not salvaging.";
+ return Status::OK();
+ }
- // TODO need to cleanup the sizeStorer cache after salvaging.
- log() << "Verify failed on uri " << uri << ". Running a salvage operation.";
- return wtRCToStatus(session->salvage(session, uri, NULL), "Salvage failed:");
+ if (rc == EBUSY) {
+ // SERVER-16457: verify and salvage are occasionally failing with EBUSY. For now we
+ // lie and return OK to avoid breaking tests. This block should go away when that ticket
+ // is resolved.
+ error() << "Verify on " << uri << " failed with EBUSY. Assuming no salvage is needed.";
+ return Status::OK();
}
- int WiredTigerKVEngine::flushAllFiles( bool sync ) {
- LOG(1) << "WiredTigerKVEngine::flushAllFiles";
- syncSizeInfo(true);
+ // TODO need to cleanup the sizeStorer cache after salvaging.
+ log() << "Verify failed on uri " << uri << ". Running a salvage operation.";
+ return wtRCToStatus(session->salvage(session, uri, NULL), "Salvage failed:");
+}
- WiredTigerSession session(_conn);
- WT_SESSION* s = session.getSession();
- invariantWTOK( s->checkpoint(s, NULL ) );
+int WiredTigerKVEngine::flushAllFiles(bool sync) {
+ LOG(1) << "WiredTigerKVEngine::flushAllFiles";
+ syncSizeInfo(true);
- return 1;
- }
+ WiredTigerSession session(_conn);
+ WT_SESSION* s = session.getSession();
+ invariantWTOK(s->checkpoint(s, NULL));
- void WiredTigerKVEngine::syncSizeInfo( bool sync ) const {
- if ( !_sizeStorer )
- return;
+ return 1;
+}
- try {
- _sizeStorer->syncCache(sync);
- }
- catch (const WriteConflictException&) {
- // ignore, we'll try again later.
- }
- }
+void WiredTigerKVEngine::syncSizeInfo(bool sync) const {
+ if (!_sizeStorer)
+ return;
- RecoveryUnit* WiredTigerKVEngine::newRecoveryUnit() {
- return new WiredTigerRecoveryUnit( _sessionCache.get() );
+ try {
+ _sizeStorer->syncCache(sync);
+ } catch (const WriteConflictException&) {
+ // ignore, we'll try again later.
}
+}
- void WiredTigerKVEngine::setRecordStoreExtraOptions( const std::string& options ) {
- _rsOptions = options;
- }
+RecoveryUnit* WiredTigerKVEngine::newRecoveryUnit() {
+ return new WiredTigerRecoveryUnit(_sessionCache.get());
+}
- void WiredTigerKVEngine::setSortedDataInterfaceExtraOptions( const std::string& options ) {
- _indexOptions = options;
- }
+void WiredTigerKVEngine::setRecordStoreExtraOptions(const std::string& options) {
+ _rsOptions = options;
+}
- Status WiredTigerKVEngine::createRecordStore( OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options ) {
- _checkIdentPath( ident );
- WiredTigerSession session(_conn);
+void WiredTigerKVEngine::setSortedDataInterfaceExtraOptions(const std::string& options) {
+ _indexOptions = options;
+}
- StatusWith<std::string> result =
- WiredTigerRecordStore::generateCreateString(ns, options, _rsOptions);
- if (!result.isOK()) {
- return result.getStatus();
- }
- std::string config = result.getValue();
+Status WiredTigerKVEngine::createRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options) {
+ _checkIdentPath(ident);
+ WiredTigerSession session(_conn);
+
+ StatusWith<std::string> result =
+ WiredTigerRecordStore::generateCreateString(ns, options, _rsOptions);
+ if (!result.isOK()) {
+ return result.getStatus();
+ }
+ std::string config = result.getValue();
+
+ string uri = _uri(ident);
+ WT_SESSION* s = session.getSession();
+ LOG(2) << "WiredTigerKVEngine::createRecordStore uri: " << uri << " config: " << config;
+ return wtRCToStatus(s->create(s, uri.c_str(), config.c_str()));
+}
- string uri = _uri( ident );
- WT_SESSION* s = session.getSession();
- LOG(2) << "WiredTigerKVEngine::createRecordStore uri: " << uri << " config: " << config;
- return wtRCToStatus( s->create( s, uri.c_str(), config.c_str() ) );
+RecordStore* WiredTigerKVEngine::getRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options) {
+ if (options.capped) {
+ return new WiredTigerRecordStore(opCtx,
+ ns,
+ _uri(ident),
+ options.capped,
+ options.cappedSize ? options.cappedSize : 4096,
+ options.cappedMaxDocs ? options.cappedMaxDocs : -1,
+ NULL,
+ _sizeStorer.get());
+ } else {
+ return new WiredTigerRecordStore(
+ opCtx, ns, _uri(ident), false, -1, -1, NULL, _sizeStorer.get());
}
+}
+
+string WiredTigerKVEngine::_uri(StringData ident) const {
+ return string("table:") + ident.toString();
+}
- RecordStore* WiredTigerKVEngine::getRecordStore( OperationContext* opCtx,
- StringData ns,
+Status WiredTigerKVEngine::createSortedDataInterface(OperationContext* opCtx,
StringData ident,
- const CollectionOptions& options ) {
-
- if (options.capped) {
- return new WiredTigerRecordStore(opCtx, ns, _uri(ident), options.capped,
- options.cappedSize ? options.cappedSize : 4096,
- options.cappedMaxDocs ? options.cappedMaxDocs : -1,
- NULL,
- _sizeStorer.get() );
- }
- else {
- return new WiredTigerRecordStore(opCtx, ns, _uri(ident),
- false, -1, -1, NULL, _sizeStorer.get() );
- }
+ const IndexDescriptor* desc) {
+ _checkIdentPath(ident);
+ StatusWith<std::string> result = WiredTigerIndex::generateCreateString(_indexOptions, *desc);
+ if (!result.isOK()) {
+ return result.getStatus();
}
- string WiredTigerKVEngine::_uri( StringData ident ) const {
- return string("table:") + ident.toString();
- }
+ std::string config = result.getValue();
- Status WiredTigerKVEngine::createSortedDataInterface( OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc ) {
- _checkIdentPath( ident );
- StatusWith<std::string> result =
- WiredTigerIndex::generateCreateString(_indexOptions, *desc);
- if (!result.isOK()) {
- return result.getStatus();
- }
-
- std::string config = result.getValue();
+ LOG(2) << "WiredTigerKVEngine::createSortedDataInterface ident: " << ident
+ << " config: " << config;
+ return wtRCToStatus(WiredTigerIndex::Create(opCtx, _uri(ident), config));
+}
- LOG(2) << "WiredTigerKVEngine::createSortedDataInterface ident: " << ident
- << " config: " << config;
- return wtRCToStatus(WiredTigerIndex::Create(opCtx, _uri(ident), config));
- }
+SortedDataInterface* WiredTigerKVEngine::getSortedDataInterface(OperationContext* opCtx,
+ StringData ident,
+ const IndexDescriptor* desc) {
+ if (desc->unique())
+ return new WiredTigerIndexUnique(opCtx, _uri(ident), desc);
+ return new WiredTigerIndexStandard(opCtx, _uri(ident), desc);
+}
- SortedDataInterface* WiredTigerKVEngine::getSortedDataInterface( OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc ) {
- if ( desc->unique() )
- return new WiredTigerIndexUnique( opCtx, _uri( ident ), desc );
- return new WiredTigerIndexStandard( opCtx, _uri( ident ), desc );
- }
+Status WiredTigerKVEngine::dropIdent(OperationContext* opCtx, StringData ident) {
+ _drop(ident);
+ return Status::OK();
+}
- Status WiredTigerKVEngine::dropIdent( OperationContext* opCtx,
- StringData ident ) {
- _drop( ident );
- return Status::OK();
- }
+bool WiredTigerKVEngine::_drop(StringData ident) {
+ string uri = _uri(ident);
- bool WiredTigerKVEngine::_drop( StringData ident ) {
- string uri = _uri( ident );
+ WiredTigerSession session(_conn);
- WiredTigerSession session(_conn);
+ int ret = session.getSession()->drop(session.getSession(), uri.c_str(), "force");
+ LOG(1) << "WT drop of " << uri << " res " << ret;
- int ret = session.getSession()->drop( session.getSession(), uri.c_str(), "force" );
- LOG(1) << "WT drop of " << uri << " res " << ret;
+ if (ret == 0) {
+ // yay, it worked
+ return true;
+ }
- if ( ret == 0 ) {
- // yay, it worked
- return true;
+ if (ret == EBUSY) {
+ // this is expected, queue it up
+ {
+ stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ _identToDrop.insert(uri);
}
+ _sessionCache->closeAll();
+ return false;
+ }
- if ( ret == EBUSY ) {
- // this is expected, queue it up
- {
- stdx::lock_guard<stdx::mutex> lk( _identToDropMutex );
- _identToDrop.insert( uri );
- }
- _sessionCache->closeAll();
- return false;
- }
+ invariantWTOK(ret);
+ return false;
+}
- invariantWTOK( ret );
- return false;
+bool WiredTigerKVEngine::haveDropsQueued() const {
+ if (_sizeStorerSyncTracker.intervalHasElapsed()) {
+ _sizeStorerSyncTracker.resetLastTime();
+ syncSizeInfo(false);
}
+ stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ return !_identToDrop.empty();
+}
- bool WiredTigerKVEngine::haveDropsQueued() const {
- if ( _sizeStorerSyncTracker.intervalHasElapsed() ) {
- _sizeStorerSyncTracker.resetLastTime();
- syncSizeInfo(false);
- }
- stdx::lock_guard<stdx::mutex> lk( _identToDropMutex );
- return !_identToDrop.empty();
+void WiredTigerKVEngine::dropAllQueued() {
+ set<string> mine;
+ {
+ stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ mine = _identToDrop;
}
- void WiredTigerKVEngine::dropAllQueued() {
- set<string> mine;
- {
- stdx::lock_guard<stdx::mutex> lk( _identToDropMutex );
- mine = _identToDrop;
- }
+ set<string> deleted;
- set<string> deleted;
+ {
+ WiredTigerSession session(_conn);
+ for (set<string>::const_iterator it = mine.begin(); it != mine.end(); ++it) {
+ string uri = *it;
+ int ret = session.getSession()->drop(session.getSession(), uri.c_str(), "force");
+ LOG(1) << "WT queued drop of " << uri << " res " << ret;
- {
- WiredTigerSession session(_conn);
- for ( set<string>::const_iterator it = mine.begin(); it != mine.end(); ++it ) {
- string uri = *it;
- int ret = session.getSession()->drop( session.getSession(), uri.c_str(), "force" );
- LOG(1) << "WT queued drop of " << uri << " res " << ret;
-
- if ( ret == 0 ) {
- deleted.insert( uri );
- continue;
- }
-
- if ( ret == EBUSY ) {
- // leave in qeuue
- continue;
- }
-
- invariantWTOK( ret );
+ if (ret == 0) {
+ deleted.insert(uri);
+ continue;
}
- }
- {
- stdx::lock_guard<stdx::mutex> lk( _identToDropMutex );
- for ( set<string>::const_iterator it = deleted.begin(); it != deleted.end(); ++it ) {
- _identToDrop.erase( *it );
+ if (ret == EBUSY) {
+ // leave in qeuue
+ continue;
}
- }
- }
- bool WiredTigerKVEngine::supportsDocLocking() const {
- return true;
- }
-
- bool WiredTigerKVEngine::supportsDirectoryPerDB() const {
- return true;
+ invariantWTOK(ret);
+ }
}
- bool WiredTigerKVEngine::hasIdent(OperationContext* opCtx, StringData ident) const {
- return _hasUri(WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx)->getSession(),
- _uri(ident));
+ {
+ stdx::lock_guard<stdx::mutex> lk(_identToDropMutex);
+ for (set<string>::const_iterator it = deleted.begin(); it != deleted.end(); ++it) {
+ _identToDrop.erase(*it);
+ }
}
+}
- bool WiredTigerKVEngine::_hasUri(WT_SESSION* session, const std::string& uri) const {
- // can't use WiredTigerCursor since this is called from constructor.
- WT_CURSOR* c = NULL;
- int ret = session->open_cursor(session, "metadata:", NULL, NULL, &c);
- if (ret == ENOENT) return false;
- invariantWTOK(ret);
- ON_BLOCK_EXIT(c->close, c);
+bool WiredTigerKVEngine::supportsDocLocking() const {
+ return true;
+}
- c->set_key(c, uri.c_str());
- return c->search(c) == 0;
- }
+bool WiredTigerKVEngine::supportsDirectoryPerDB() const {
+ return true;
+}
- std::vector<std::string> WiredTigerKVEngine::getAllIdents( OperationContext* opCtx ) const {
- std::vector<std::string> all;
- WiredTigerCursor cursor( "metadata:", WiredTigerSession::kMetadataCursorId, false, opCtx );
- WT_CURSOR* c = cursor.get();
- if ( !c )
- return all;
-
- while ( c->next(c) == 0 ) {
- const char* raw;
- c->get_key(c, &raw );
- StringData key(raw);
- size_t idx = key.find( ':' );
- if ( idx == string::npos )
- continue;
- StringData type = key.substr( 0, idx );
- if ( type != "table" )
- continue;
+bool WiredTigerKVEngine::hasIdent(OperationContext* opCtx, StringData ident) const {
+ return _hasUri(WiredTigerRecoveryUnit::get(opCtx)->getSession(opCtx)->getSession(),
+ _uri(ident));
+}
- StringData ident = key.substr(idx+1);
- if ( ident == "sizeStorer" )
- continue;
+bool WiredTigerKVEngine::_hasUri(WT_SESSION* session, const std::string& uri) const {
+ // can't use WiredTigerCursor since this is called from constructor.
+ WT_CURSOR* c = NULL;
+ int ret = session->open_cursor(session, "metadata:", NULL, NULL, &c);
+ if (ret == ENOENT)
+ return false;
+ invariantWTOK(ret);
+ ON_BLOCK_EXIT(c->close, c);
- all.push_back( ident.toString() );
- }
+ c->set_key(c, uri.c_str());
+ return c->search(c) == 0;
+}
+std::vector<std::string> WiredTigerKVEngine::getAllIdents(OperationContext* opCtx) const {
+ std::vector<std::string> all;
+ WiredTigerCursor cursor("metadata:", WiredTigerSession::kMetadataCursorId, false, opCtx);
+ WT_CURSOR* c = cursor.get();
+ if (!c)
return all;
- }
- int WiredTigerKVEngine::reconfigure(const char* str) {
- return _conn->reconfigure(_conn, str);
+ while (c->next(c) == 0) {
+ const char* raw;
+ c->get_key(c, &raw);
+ StringData key(raw);
+ size_t idx = key.find(':');
+ if (idx == string::npos)
+ continue;
+ StringData type = key.substr(0, idx);
+ if (type != "table")
+ continue;
+
+ StringData ident = key.substr(idx + 1);
+ if (ident == "sizeStorer")
+ continue;
+
+ all.push_back(ident.toString());
}
- void WiredTigerKVEngine::_checkIdentPath( StringData ident ) {
- size_t start = 0;
- size_t idx;
- while ( ( idx = ident.find( '/', start ) ) != string::npos ) {
- StringData dir = ident.substr( 0, idx );
-
- boost::filesystem::path subdir = _path;
- subdir /= dir.toString();
- if ( !boost::filesystem::exists( subdir ) ) {
- LOG(1) << "creating subdirectory: " << dir;
- try {
- boost::filesystem::create_directory( subdir );
- }
- catch (const std::exception& e) {
- error() << "error creating path " << subdir.string() << ' ' << e.what();
- throw;
- }
- }
+ return all;
+}
+
+int WiredTigerKVEngine::reconfigure(const char* str) {
+ return _conn->reconfigure(_conn, str);
+}
- start = idx + 1;
+void WiredTigerKVEngine::_checkIdentPath(StringData ident) {
+ size_t start = 0;
+ size_t idx;
+ while ((idx = ident.find('/', start)) != string::npos) {
+ StringData dir = ident.substr(0, idx);
+
+ boost::filesystem::path subdir = _path;
+ subdir /= dir.toString();
+ if (!boost::filesystem::exists(subdir)) {
+ LOG(1) << "creating subdirectory: " << dir;
+ try {
+ boost::filesystem::create_directory(subdir);
+ } catch (const std::exception& e) {
+ error() << "error creating path " << subdir.string() << ' ' << e.what();
+ throw;
+ }
}
+
+ start = idx + 1;
}
}
+}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index dd40faa4ccf..cf90624d564 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -44,113 +44,112 @@
namespace mongo {
- class WiredTigerSessionCache;
- class WiredTigerSizeStorer;
+class WiredTigerSessionCache;
+class WiredTigerSizeStorer;
- class WiredTigerKVEngine : public KVEngine {
- public:
- WiredTigerKVEngine( const std::string& path,
- const std::string& extraOpenOptions = "",
- bool durable = true,
- bool repair = false );
- virtual ~WiredTigerKVEngine();
+class WiredTigerKVEngine : public KVEngine {
+public:
+ WiredTigerKVEngine(const std::string& path,
+ const std::string& extraOpenOptions = "",
+ bool durable = true,
+ bool repair = false);
+ virtual ~WiredTigerKVEngine();
- void setRecordStoreExtraOptions( const std::string& options );
- void setSortedDataInterfaceExtraOptions( const std::string& options );
+ void setRecordStoreExtraOptions(const std::string& options);
+ void setSortedDataInterfaceExtraOptions(const std::string& options);
- virtual bool supportsDocLocking() const;
+ virtual bool supportsDocLocking() const;
- virtual bool supportsDirectoryPerDB() const;
+ virtual bool supportsDirectoryPerDB() const;
- virtual bool isDurable() const { return _durable; }
+ virtual bool isDurable() const {
+ return _durable;
+ }
- virtual RecoveryUnit* newRecoveryUnit();
+ virtual RecoveryUnit* newRecoveryUnit();
- virtual Status createRecordStore( OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options );
+ virtual Status createRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options);
- virtual RecordStore* getRecordStore( OperationContext* opCtx,
- StringData ns,
- StringData ident,
- const CollectionOptions& options );
-
- virtual Status createSortedDataInterface( OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc );
+ virtual RecordStore* getRecordStore(OperationContext* opCtx,
+ StringData ns,
+ StringData ident,
+ const CollectionOptions& options);
- virtual SortedDataInterface* getSortedDataInterface( OperationContext* opCtx,
- StringData ident,
- const IndexDescriptor* desc );
-
- virtual Status dropIdent( OperationContext* opCtx,
- StringData ident );
+ virtual Status createSortedDataInterface(OperationContext* opCtx,
+ StringData ident,
+ const IndexDescriptor* desc);
- virtual Status okToRename( OperationContext* opCtx,
- StringData fromNS,
- StringData toNS,
- StringData ident,
- const RecordStore* originalRecordStore ) const;
+ virtual SortedDataInterface* getSortedDataInterface(OperationContext* opCtx,
+ StringData ident,
+ const IndexDescriptor* desc);
- virtual int flushAllFiles( bool sync );
+ virtual Status dropIdent(OperationContext* opCtx, StringData ident);
- virtual int64_t getIdentSize( OperationContext* opCtx,
- StringData ident );
+ virtual Status okToRename(OperationContext* opCtx,
+ StringData fromNS,
+ StringData toNS,
+ StringData ident,
+ const RecordStore* originalRecordStore) const;
- virtual Status repairIdent( OperationContext* opCtx,
- StringData ident );
+ virtual int flushAllFiles(bool sync);
- virtual bool hasIdent(OperationContext* opCtx, StringData ident) const;
+ virtual int64_t getIdentSize(OperationContext* opCtx, StringData ident);
- std::vector<std::string> getAllIdents( OperationContext* opCtx ) const;
+ virtual Status repairIdent(OperationContext* opCtx, StringData ident);
- virtual void cleanShutdown();
+ virtual bool hasIdent(OperationContext* opCtx, StringData ident) const;
- // wiredtiger specific
- // Calls WT_CONNECTION::reconfigure on the underlying WT_CONNECTION
- // held by this class
- int reconfigure(const char* str);
+ std::vector<std::string> getAllIdents(OperationContext* opCtx) const;
- WT_CONNECTION* getConnection() { return _conn; }
- void dropAllQueued();
- bool haveDropsQueued() const;
+ virtual void cleanShutdown();
- void syncSizeInfo(bool sync) const;
+ // wiredtiger specific
+ // Calls WT_CONNECTION::reconfigure on the underlying WT_CONNECTION
+ // held by this class
+ int reconfigure(const char* str);
- /**
- * Initializes a background job to remove excess documents in the oplog collections.
- * This applies to the capped collections in the local.oplog.* namespaces (specifically
- * local.oplog.rs for replica sets and local.oplog.$main for master/slave replication).
- * Returns true if a background job is running for the namespace.
- */
- static bool initRsOplogBackgroundThread(StringData ns);
+ WT_CONNECTION* getConnection() {
+ return _conn;
+ }
+ void dropAllQueued();
+ bool haveDropsQueued() const;
- private:
+ void syncSizeInfo(bool sync) const;
- Status _salvageIfNeeded(const char* uri);
- void _checkIdentPath( StringData ident );
+ /**
+ * Initializes a background job to remove excess documents in the oplog collections.
+ * This applies to the capped collections in the local.oplog.* namespaces (specifically
+ * local.oplog.rs for replica sets and local.oplog.$main for master/slave replication).
+ * Returns true if a background job is running for the namespace.
+ */
+ static bool initRsOplogBackgroundThread(StringData ns);
- bool _hasUri(WT_SESSION* session, const std::string& uri) const;
+private:
+ Status _salvageIfNeeded(const char* uri);
+ void _checkIdentPath(StringData ident);
- std::string _uri( StringData ident ) const;
- bool _drop( StringData ident );
+ bool _hasUri(WT_SESSION* session, const std::string& uri) const;
- WT_CONNECTION* _conn;
- WT_EVENT_HANDLER _eventHandler;
- std::unique_ptr<WiredTigerSessionCache> _sessionCache;
- std::string _path;
- bool _durable;
+ std::string _uri(StringData ident) const;
+ bool _drop(StringData ident);
- std::string _rsOptions;
- std::string _indexOptions;
+ WT_CONNECTION* _conn;
+ WT_EVENT_HANDLER _eventHandler;
+ std::unique_ptr<WiredTigerSessionCache> _sessionCache;
+ std::string _path;
+ bool _durable;
- std::set<std::string> _identToDrop;
- mutable stdx::mutex _identToDropMutex;
+ std::string _rsOptions;
+ std::string _indexOptions;
- std::unique_ptr<WiredTigerSizeStorer> _sizeStorer;
- std::string _sizeStorerUri;
- mutable ElapsedTracker _sizeStorerSyncTracker;
- };
+ std::set<std::string> _identToDrop;
+ mutable stdx::mutex _identToDropMutex;
+ std::unique_ptr<WiredTigerSizeStorer> _sizeStorer;
+ std::string _sizeStorerUri;
+ mutable ElapsedTracker _sizeStorerSyncTracker;
+};
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
index db944d575d2..576d121bb6c 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
@@ -36,31 +36,32 @@
namespace mongo {
- class WiredTigerKVHarnessHelper : public KVHarnessHelper {
- public:
- WiredTigerKVHarnessHelper()
- : _dbpath( "wt-kv-harness" ) {
- _engine.reset( new WiredTigerKVEngine( _dbpath.path() ) );
- }
+class WiredTigerKVHarnessHelper : public KVHarnessHelper {
+public:
+ WiredTigerKVHarnessHelper() : _dbpath("wt-kv-harness") {
+ _engine.reset(new WiredTigerKVEngine(_dbpath.path()));
+ }
- virtual ~WiredTigerKVHarnessHelper() {
- _engine.reset( NULL );
- }
+ virtual ~WiredTigerKVHarnessHelper() {
+ _engine.reset(NULL);
+ }
- virtual KVEngine* restartEngine() {
- _engine.reset( NULL );
- _engine.reset( new WiredTigerKVEngine( _dbpath.path() ) );
- return _engine.get();
- }
+ virtual KVEngine* restartEngine() {
+ _engine.reset(NULL);
+ _engine.reset(new WiredTigerKVEngine(_dbpath.path()));
+ return _engine.get();
+ }
- virtual KVEngine* getEngine() { return _engine.get(); }
+ virtual KVEngine* getEngine() {
+ return _engine.get();
+ }
- private:
- unittest::TempDir _dbpath;
- std::unique_ptr<WiredTigerKVEngine> _engine;
- };
+private:
+ unittest::TempDir _dbpath;
+ std::unique_ptr<WiredTigerKVEngine> _engine;
+};
- KVHarnessHelper* KVHarnessHelper::create() {
- return new WiredTigerKVHarnessHelper();
- }
+KVHarnessHelper* KVHarnessHelper::create() {
+ return new WiredTigerKVHarnessHelper();
+}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp
index 5cbd0ba2873..589706daed4 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp
@@ -37,22 +37,21 @@
namespace mongo {
- MONGO_MODULE_STARTUP_OPTIONS_REGISTER(WiredTigerOptions)(InitializerContext* context) {
- return wiredTigerGlobalOptions.add(&moe::startupOptions);
- }
+MONGO_MODULE_STARTUP_OPTIONS_REGISTER(WiredTigerOptions)(InitializerContext* context) {
+ return wiredTigerGlobalOptions.add(&moe::startupOptions);
+}
- MONGO_STARTUP_OPTIONS_VALIDATE(WiredTigerOptions)(InitializerContext* context) {
- return Status::OK();
- }
+MONGO_STARTUP_OPTIONS_VALIDATE(WiredTigerOptions)(InitializerContext* context) {
+ return Status::OK();
+}
- MONGO_STARTUP_OPTIONS_STORE(WiredTigerOptions)(InitializerContext* context) {
- Status ret = wiredTigerGlobalOptions.store(moe::startupOptionsParsed, context->args());
- if (!ret.isOK()) {
- std::cerr << ret.toString() << std::endl;
- std::cerr << "try '" << context->args()[0] << " --help' for more information"
- << std::endl;
- ::_exit(EXIT_BADOPTIONS);
- }
- return Status::OK();
+MONGO_STARTUP_OPTIONS_STORE(WiredTigerOptions)(InitializerContext* context) {
+ Status ret = wiredTigerGlobalOptions.store(moe::startupOptionsParsed, context->args());
+ if (!ret.isOK()) {
+ std::cerr << ret.toString() << std::endl;
+ std::cerr << "try '" << context->args()[0] << " --help' for more information" << std::endl;
+ ::_exit(EXIT_BADOPTIONS);
}
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
index 5be812178dc..49ef155b51d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
@@ -41,23 +41,25 @@ using std::string;
WiredTigerEngineRuntimeConfigParameter::WiredTigerEngineRuntimeConfigParameter(
WiredTigerKVEngine* engine)
- : ServerParameter(ServerParameterSet::getGlobal(),
- "wiredTigerEngineRuntimeConfig", false, true),
- _engine(engine) {}
+ : ServerParameter(
+ ServerParameterSet::getGlobal(), "wiredTigerEngineRuntimeConfig", false, true),
+ _engine(engine) {}
-void WiredTigerEngineRuntimeConfigParameter::append(OperationContext* txn, BSONObjBuilder& b,
- const std::string& name) {
+void WiredTigerEngineRuntimeConfigParameter::append(OperationContext* txn,
+ BSONObjBuilder& b,
+ const std::string& name) {
b << name << "";
}
Status WiredTigerEngineRuntimeConfigParameter::set(const BSONElement& newValueElement) {
try {
return setFromString(newValueElement.String());
- }
- catch (MsgAssertionException msg) {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for wiredTigerEngineRuntimeConfig via setParameter command: "
+ } catch (MsgAssertionException msg) {
+ return Status(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Invalid value for wiredTigerEngineRuntimeConfig via setParameter command: "
<< newValueElement);
}
}
@@ -65,18 +67,19 @@ Status WiredTigerEngineRuntimeConfigParameter::set(const BSONElement& newValueEl
Status WiredTigerEngineRuntimeConfigParameter::setFromString(const std::string& str) {
size_t pos = str.find('\0');
if (pos != std::string::npos) {
- return Status(ErrorCodes::BadValue, (str::stream() <<
- "WiredTiger configuration strings cannot have embedded null characters. "
- "Embedded null found at position " << pos));
+ return Status(ErrorCodes::BadValue,
+ (str::stream()
+ << "WiredTiger configuration strings cannot have embedded null characters. "
+ "Embedded null found at position " << pos));
}
log() << "Reconfiguring WiredTiger storage engine with config string: \"" << str << "\"";
int ret = _engine->reconfigure(str.c_str());
if (ret != 0) {
- string result = (mongoutils::str::stream() <<
- "WiredTiger reconfiguration failed with error code (" << ret << "): "
- << wiredtiger_strerror(ret));
+ string result =
+ (mongoutils::str::stream() << "WiredTiger reconfiguration failed with error code ("
+ << ret << "): " << wiredtiger_strerror(ret));
error() << result;
return Status(ErrorCodes::BadValue, result);
@@ -84,5 +87,4 @@ Status WiredTigerEngineRuntimeConfigParameter::setFromString(const std::string&
return Status::OK();
}
-
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h
index 4198a851ce3..6742f76be99 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.h
@@ -32,21 +32,21 @@
namespace mongo {
- /**
- * WT_CONNECTION::reconfigure get/setParameter support
- */
- class WiredTigerEngineRuntimeConfigParameter : public ServerParameter {
- MONGO_DISALLOW_COPYING(WiredTigerEngineRuntimeConfigParameter);
- public:
- explicit WiredTigerEngineRuntimeConfigParameter(WiredTigerKVEngine* engine);
+/**
+ * WT_CONNECTION::reconfigure get/setParameter support
+ */
+class WiredTigerEngineRuntimeConfigParameter : public ServerParameter {
+ MONGO_DISALLOW_COPYING(WiredTigerEngineRuntimeConfigParameter);
+
+public:
+ explicit WiredTigerEngineRuntimeConfigParameter(WiredTigerKVEngine* engine);
- virtual void append(OperationContext* txn, BSONObjBuilder& b,
- const std::string& name);
- virtual Status set(const BSONElement& newValueElement);
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name);
+ virtual Status set(const BSONElement& newValueElement);
- virtual Status setFromString(const std::string& str);
+ virtual Status setFromString(const std::string& str);
- private:
- WiredTigerKVEngine* _engine;
- };
+private:
+ WiredTigerKVEngine* _engine;
+};
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 4970869e1e5..6e96e241bd6 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -64,1164 +64,1142 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
+using std::unique_ptr;
+using std::string;
namespace {
- static const int kMinimumRecordStoreVersion = 1;
- static const int kCurrentRecordStoreVersion = 1; // New record stores use this by default.
- static const int kMaximumRecordStoreVersion = 1;
- BOOST_STATIC_ASSERT(kCurrentRecordStoreVersion >= kMinimumRecordStoreVersion);
- BOOST_STATIC_ASSERT(kCurrentRecordStoreVersion <= kMaximumRecordStoreVersion);
+static const int kMinimumRecordStoreVersion = 1;
+static const int kCurrentRecordStoreVersion = 1; // New record stores use this by default.
+static const int kMaximumRecordStoreVersion = 1;
+BOOST_STATIC_ASSERT(kCurrentRecordStoreVersion >= kMinimumRecordStoreVersion);
+BOOST_STATIC_ASSERT(kCurrentRecordStoreVersion <= kMaximumRecordStoreVersion);
- bool shouldUseOplogHack(OperationContext* opCtx, const std::string& uri) {
- StatusWith<BSONObj> appMetadata = WiredTigerUtil::getApplicationMetadata(opCtx, uri);
- if (!appMetadata.isOK()) {
- return false;
- }
-
- return (appMetadata.getValue().getIntField("oplogKeyExtractionVersion") == 1);
+bool shouldUseOplogHack(OperationContext* opCtx, const std::string& uri) {
+ StatusWith<BSONObj> appMetadata = WiredTigerUtil::getApplicationMetadata(opCtx, uri);
+ if (!appMetadata.isOK()) {
+ return false;
}
-} // namespace
-
- MONGO_FP_DECLARE(WTWriteConflictException);
-
- const std::string kWiredTigerEngineName = "wiredTiger";
-
- class WiredTigerRecordStore::Cursor final : public RecordCursor {
- public:
- Cursor(OperationContext* txn,
- const WiredTigerRecordStore& rs,
- bool forward = true,
- bool forParallelCollectionScan = false)
- : _rs(rs)
- , _txn(txn)
- , _forward(forward)
- , _forParallelCollectionScan(forParallelCollectionScan)
- , _cursor(new WiredTigerCursor(rs.getURI(), rs.instanceId(), true, txn))
- , _readUntilForOplog(WiredTigerRecoveryUnit::get(txn)->getOplogReadTill())
- {}
-
- boost::optional<Record> next() final {
- if (_eof) return {};
-
- WT_CURSOR* c = _cursor->get();
- {
- // Nothing after the next line can throw WCEs.
- // Note that an unpositioned (or eof) WT_CURSOR returns the first/last entry in the
- // table when you call next/prev.
- int advanceRet = WT_OP_CHECK(_forward ? c->next(c) : c->prev(c));
- if (advanceRet == WT_NOTFOUND) {
- _eof = true;
- return {};
- }
- invariantWTOK(advanceRet);
- }
+ return (appMetadata.getValue().getIntField("oplogKeyExtractionVersion") == 1);
+}
- int64_t key;
- invariantWTOK(c->get_key(c, &key));
- const RecordId id = _fromKey(key);
+} // namespace
- if (!isVisible(id)) {
- _eof = true;
- return {};
- }
+MONGO_FP_DECLARE(WTWriteConflictException);
- WT_ITEM value;
- invariantWTOK(c->get_value(c, &value));
- auto data = RecordData(static_cast<const char*>(value.data), value.size);
- data.makeOwned(); // TODO delete this line once safe.
+const std::string kWiredTigerEngineName = "wiredTiger";
- _lastReturnedId = id;
- return {{id, std::move(data)}};
- }
+class WiredTigerRecordStore::Cursor final : public RecordCursor {
+public:
+ Cursor(OperationContext* txn,
+ const WiredTigerRecordStore& rs,
+ bool forward = true,
+ bool forParallelCollectionScan = false)
+ : _rs(rs),
+ _txn(txn),
+ _forward(forward),
+ _forParallelCollectionScan(forParallelCollectionScan),
+ _cursor(new WiredTigerCursor(rs.getURI(), rs.instanceId(), true, txn)),
+ _readUntilForOplog(WiredTigerRecoveryUnit::get(txn)->getOplogReadTill()) {}
- boost::optional<Record> seekExact(const RecordId& id) final {
- if (!isVisible(id)) {
- _eof = true;
- return {};
- }
+ boost::optional<Record> next() final {
+ if (_eof)
+ return {};
- WT_CURSOR* c = _cursor->get();
- c->set_key(c, _makeKey(id));
+ WT_CURSOR* c = _cursor->get();
+ {
// Nothing after the next line can throw WCEs.
- int seekRet = WT_OP_CHECK(c->search(c));
- if (seekRet == WT_NOTFOUND) {
+ // Note that an unpositioned (or eof) WT_CURSOR returns the first/last entry in the
+ // table when you call next/prev.
+ int advanceRet = WT_OP_CHECK(_forward ? c->next(c) : c->prev(c));
+ if (advanceRet == WT_NOTFOUND) {
_eof = true;
return {};
}
- invariantWTOK(seekRet);
+ invariantWTOK(advanceRet);
+ }
- WT_ITEM value;
- invariantWTOK(c->get_value(c, &value));
- auto data = RecordData(static_cast<const char*>(value.data), value.size);
- data.makeOwned(); // TODO delete this line once safe.
+ int64_t key;
+ invariantWTOK(c->get_key(c, &key));
+ const RecordId id = _fromKey(key);
- _lastReturnedId = id;
- return {{id, std::move(data)}};
+ if (!isVisible(id)) {
+ _eof = true;
+ return {};
}
- void savePositioned() final {
- // It must be safe to call save() twice in a row without calling restore().
- if (!_txn) return;
-
- // the cursor and recoveryUnit are valid on restore
- // so we just record the recoveryUnit to make sure
- _savedRecoveryUnit = _txn->recoveryUnit();
- if ( _cursor && !wt_keeptxnopen() ) {
- try {
- _cursor->reset();
- }
- catch (const WriteConflictException& wce) {
- // Ignore since this is only called when we are about to kill our transaction
- // anyway.
- }
- }
+ WT_ITEM value;
+ invariantWTOK(c->get_value(c, &value));
+ auto data = RecordData(static_cast<const char*>(value.data), value.size);
+ data.makeOwned(); // TODO delete this line once safe.
- if (_forParallelCollectionScan) {
- // Delete the cursor since we may come back to a different RecoveryUnit
- _cursor.reset();
- }
- _txn = nullptr;
- }
+ _lastReturnedId = id;
+ return {{id, std::move(data)}};
+ }
- void saveUnpositioned() final {
- savePositioned();
- _lastReturnedId = RecordId();
+ boost::optional<Record> seekExact(const RecordId& id) final {
+ if (!isVisible(id)) {
+ _eof = true;
+ return {};
}
- bool restore(OperationContext* txn) final {
- _txn = txn;
+ WT_CURSOR* c = _cursor->get();
+ c->set_key(c, _makeKey(id));
+ // Nothing after the next line can throw WCEs.
+ int seekRet = WT_OP_CHECK(c->search(c));
+ if (seekRet == WT_NOTFOUND) {
+ _eof = true;
+ return {};
+ }
+ invariantWTOK(seekRet);
- // If we've hit EOF, then this iterator is done and need not be restored.
- if (_eof) return true;
+ WT_ITEM value;
+ invariantWTOK(c->get_value(c, &value));
+ auto data = RecordData(static_cast<const char*>(value.data), value.size);
+ data.makeOwned(); // TODO delete this line once safe.
- bool needRestore = false;
+ _lastReturnedId = id;
+ return {{id, std::move(data)}};
+ }
- if (_forParallelCollectionScan) {
- needRestore = true;
- _savedRecoveryUnit = txn->recoveryUnit();
- _cursor.reset( new WiredTigerCursor( _rs.getURI(), _rs.instanceId(), true, txn ) );
- _forParallelCollectionScan = false; // we only do this the first time
+ void savePositioned() final {
+ // It must be safe to call save() twice in a row without calling restore().
+ if (!_txn)
+ return;
+
+ // the cursor and recoveryUnit are valid on restore
+ // so we just record the recoveryUnit to make sure
+ _savedRecoveryUnit = _txn->recoveryUnit();
+ if (_cursor && !wt_keeptxnopen()) {
+ try {
+ _cursor->reset();
+ } catch (const WriteConflictException& wce) {
+ // Ignore since this is only called when we are about to kill our transaction
+ // anyway.
}
- invariant( _savedRecoveryUnit == txn->recoveryUnit() );
-
- if (!needRestore && wt_keeptxnopen()) return true;
- if (_lastReturnedId.isNull()) return true;
+ }
- // This will ensure an active session exists, so any restored cursors will bind to it
- invariant(WiredTigerRecoveryUnit::get(txn)->getSession(txn) == _cursor->getSession());
+ if (_forParallelCollectionScan) {
+ // Delete the cursor since we may come back to a different RecoveryUnit
+ _cursor.reset();
+ }
+ _txn = nullptr;
+ }
- WT_CURSOR* c = _cursor->get();
- c->set_key(c, _makeKey(_lastReturnedId));
+ void saveUnpositioned() final {
+ savePositioned();
+ _lastReturnedId = RecordId();
+ }
- int cmp;
- int ret = WT_OP_CHECK(c->search_near(c, &cmp));
- if (ret == WT_NOTFOUND) {
- _eof = true;
- return !_rs._isCapped;
- }
- invariantWTOK(ret);
+ bool restore(OperationContext* txn) final {
+ _txn = txn;
- if (cmp == 0) return true; // Landed right where we left off.
+ // If we've hit EOF, then this iterator is done and need not be restored.
+ if (_eof)
+ return true;
- if (_rs._isCapped) {
- // Doc was deleted either by cappedDeleteAsNeeded() or cappedTruncateAfter().
- // It is important that we error out in this case so that consumers don't
- // silently get 'holes' when scanning capped collections. We don't make
- // this guarantee for normal collections so it is ok to skip ahead in that case.
- _eof = true;
- return false;
- }
+ bool needRestore = false;
- if (_forward && cmp > 0) {
- // We landed after where we were. Move back one so that next() will return this
- // document.
- ret = WT_OP_CHECK(c->prev(c));
- }
- else if (!_forward && cmp < 0) {
- // Do the opposite for reverse cursors.
- ret = WT_OP_CHECK(c->next(c));
- }
- if (ret != WT_NOTFOUND) invariantWTOK(ret);
+ if (_forParallelCollectionScan) {
+ needRestore = true;
+ _savedRecoveryUnit = txn->recoveryUnit();
+ _cursor.reset(new WiredTigerCursor(_rs.getURI(), _rs.instanceId(), true, txn));
+ _forParallelCollectionScan = false; // we only do this the first time
+ }
+ invariant(_savedRecoveryUnit == txn->recoveryUnit());
+ if (!needRestore && wt_keeptxnopen())
+ return true;
+ if (_lastReturnedId.isNull())
return true;
- }
- private:
- bool isVisible(const RecordId& id) {
- if (!_rs._isCapped) return true;
+ // This will ensure an active session exists, so any restored cursors will bind to it
+ invariant(WiredTigerRecoveryUnit::get(txn)->getSession(txn) == _cursor->getSession());
- if ( _readUntilForOplog.isNull() || !_rs._isOplog ) {
- // this is the normal capped case
- return !_rs.isCappedHidden(id);
- }
+ WT_CURSOR* c = _cursor->get();
+ c->set_key(c, _makeKey(_lastReturnedId));
- // this is for oplogs
- if (id == _readUntilForOplog) {
- // we allow if its been committed already
- return !_rs.isCappedHidden(id);
- }
+ int cmp;
+ int ret = WT_OP_CHECK(c->search_near(c, &cmp));
+ if (ret == WT_NOTFOUND) {
+ _eof = true;
+ return !_rs._isCapped;
+ }
+ invariantWTOK(ret);
+
+ if (cmp == 0)
+ return true; // Landed right where we left off.
- return id < _readUntilForOplog;
+ if (_rs._isCapped) {
+ // Doc was deleted either by cappedDeleteAsNeeded() or cappedTruncateAfter().
+ // It is important that we error out in this case so that consumers don't
+ // silently get 'holes' when scanning capped collections. We don't make
+ // this guarantee for normal collections so it is ok to skip ahead in that case.
+ _eof = true;
+ return false;
}
- const WiredTigerRecordStore& _rs;
- OperationContext* _txn;
- RecoveryUnit* _savedRecoveryUnit; // only used to sanity check between save/restore.
- const bool _forward;
- bool _forParallelCollectionScan; // This can go away once SERVER-17364 is resolved.
- std::unique_ptr<WiredTigerCursor> _cursor;
- bool _eof = false;
- RecordId _lastReturnedId;
- const RecordId _readUntilForOplog;
- };
-
- StatusWith<std::string> WiredTigerRecordStore::parseOptionsField(const BSONObj options) {
- StringBuilder ss;
- BSONForEach(elem, options) {
- if (elem.fieldNameStringData() == "configString") {
- if (elem.type() != String) {
- return StatusWith<std::string>(ErrorCodes::TypeMismatch, str::stream()
- << "storageEngine.wiredTiger.configString "
- << "must be a string. "
- << "Not adding 'configString' value "
- << elem << " to collection configuration");
- }
- ss << elem.valueStringData() << ',';
- }
- else {
- // Return error on first unrecognized field.
- return StatusWith<std::string>(ErrorCodes::InvalidOptions, str::stream()
- << '\'' << elem.fieldNameStringData() << '\''
- << " is not a supported option in "
- << "storageEngine.wiredTiger");
- }
+ if (_forward && cmp > 0) {
+ // We landed after where we were. Move back one so that next() will return this
+ // document.
+ ret = WT_OP_CHECK(c->prev(c));
+ } else if (!_forward && cmp < 0) {
+ // Do the opposite for reverse cursors.
+ ret = WT_OP_CHECK(c->next(c));
}
- return StatusWith<std::string>(ss.str());
+ if (ret != WT_NOTFOUND)
+ invariantWTOK(ret);
+
+ return true;
}
- // static
- StatusWith<std::string> WiredTigerRecordStore::generateCreateString(
- StringData ns,
- const CollectionOptions& options,
- StringData extraStrings) {
+private:
+ bool isVisible(const RecordId& id) {
+ if (!_rs._isCapped)
+ return true;
- // Separate out a prefix and suffix in the default string. User configuration will
- // override values in the prefix, but not values in the suffix.
- str::stream ss;
- ss << "type=file,";
- // Setting this larger than 10m can hurt latencies and throughput degradation if this
- // is the oplog. See SERVER-16247
- ss << "memory_page_max=10m,";
- // Choose a higher split percent, since most usage is append only. Allow some space
- // for workloads where updates increase the size of documents.
- ss << "split_pct=90,";
- ss << "leaf_value_max=64MB,";
- ss << "checksum=on,";
- if (wiredTigerGlobalOptions.useCollectionPrefixCompression) {
- ss << "prefix_compression,";
+ if (_readUntilForOplog.isNull() || !_rs._isOplog) {
+ // this is the normal capped case
+ return !_rs.isCappedHidden(id);
}
- ss << "block_compressor=" << wiredTigerGlobalOptions.collectionBlockCompressor << ",";
+ // this is for oplogs
+ if (id == _readUntilForOplog) {
+ // we allow if its been committed already
+ return !_rs.isCappedHidden(id);
+ }
- ss << WiredTigerCustomizationHooks::get(getGlobalServiceContext())->getOpenConfig(ns);
+ return id < _readUntilForOplog;
+ }
- ss << extraStrings << ",";
+ const WiredTigerRecordStore& _rs;
+ OperationContext* _txn;
+ RecoveryUnit* _savedRecoveryUnit; // only used to sanity check between save/restore.
+ const bool _forward;
+ bool _forParallelCollectionScan; // This can go away once SERVER-17364 is resolved.
+ std::unique_ptr<WiredTigerCursor> _cursor;
+ bool _eof = false;
+ RecordId _lastReturnedId;
+ const RecordId _readUntilForOplog;
+};
+
+StatusWith<std::string> WiredTigerRecordStore::parseOptionsField(const BSONObj options) {
+ StringBuilder ss;
+ BSONForEach(elem, options) {
+ if (elem.fieldNameStringData() == "configString") {
+ if (elem.type() != String) {
+ return StatusWith<std::string>(ErrorCodes::TypeMismatch,
+ str::stream()
+ << "storageEngine.wiredTiger.configString "
+ << "must be a string. "
+ << "Not adding 'configString' value " << elem
+ << " to collection configuration");
+ }
+ ss << elem.valueStringData() << ',';
+ } else {
+ // Return error on first unrecognized field.
+ return StatusWith<std::string>(ErrorCodes::InvalidOptions,
+ str::stream() << '\'' << elem.fieldNameStringData()
+ << '\'' << " is not a supported option in "
+ << "storageEngine.wiredTiger");
+ }
+ }
+ return StatusWith<std::string>(ss.str());
+}
- StatusWith<std::string> customOptions =
- parseOptionsField(options.storageEngine.getObjectField(kWiredTigerEngineName));
- if (!customOptions.isOK())
- return customOptions;
+// static
+StatusWith<std::string> WiredTigerRecordStore::generateCreateString(
+ StringData ns, const CollectionOptions& options, StringData extraStrings) {
+ // Separate out a prefix and suffix in the default string. User configuration will
+ // override values in the prefix, but not values in the suffix.
+ str::stream ss;
+ ss << "type=file,";
+ // Setting this larger than 10m can hurt latencies and throughput degradation if this
+ // is the oplog. See SERVER-16247
+ ss << "memory_page_max=10m,";
+ // Choose a higher split percent, since most usage is append only. Allow some space
+ // for workloads where updates increase the size of documents.
+ ss << "split_pct=90,";
+ ss << "leaf_value_max=64MB,";
+ ss << "checksum=on,";
+ if (wiredTigerGlobalOptions.useCollectionPrefixCompression) {
+ ss << "prefix_compression,";
+ }
- ss << customOptions.getValue();
+ ss << "block_compressor=" << wiredTigerGlobalOptions.collectionBlockCompressor << ",";
- if ( NamespaceString::oplog(ns) ) {
- // force file for oplog
- ss << "type=file,";
- // Tune down to 10m. See SERVER-16247
- ss << "memory_page_max=10m,";
- }
+ ss << WiredTigerCustomizationHooks::get(getGlobalServiceContext())->getOpenConfig(ns);
- // WARNING: No user-specified config can appear below this line. These options are required
- // for correct behavior of the server.
+ ss << extraStrings << ",";
- ss << "key_format=q,value_format=u";
+ StatusWith<std::string> customOptions =
+ parseOptionsField(options.storageEngine.getObjectField(kWiredTigerEngineName));
+ if (!customOptions.isOK())
+ return customOptions;
- // Record store metadata
- ss << ",app_metadata=(formatVersion=" << kCurrentRecordStoreVersion;
- if (NamespaceString::oplog(ns)) {
- ss << ",oplogKeyExtractionVersion=1";
- }
- ss << ")";
+ ss << customOptions.getValue();
- return StatusWith<std::string>(ss);
+ if (NamespaceString::oplog(ns)) {
+ // force file for oplog
+ ss << "type=file,";
+ // Tune down to 10m. See SERVER-16247
+ ss << "memory_page_max=10m,";
}
- WiredTigerRecordStore::WiredTigerRecordStore(OperationContext* ctx,
- StringData ns,
- StringData uri,
- bool isCapped,
- int64_t cappedMaxSize,
- int64_t cappedMaxDocs,
- CappedDocumentDeleteCallback* cappedDeleteCallback,
- WiredTigerSizeStorer* sizeStorer)
- : RecordStore( ns ),
- _uri( uri.toString() ),
- _instanceId( WiredTigerSession::genCursorId() ),
- _isCapped( isCapped ),
- _isOplog( NamespaceString::oplog( ns ) ),
- _cappedMaxSize( cappedMaxSize ),
- _cappedMaxSizeSlack( std::min(cappedMaxSize/10, int64_t(16*1024*1024)) ),
- _cappedMaxDocs( cappedMaxDocs ),
- _cappedSleep(0),
- _cappedSleepMS(0),
- _cappedDeleteCallback( cappedDeleteCallback ),
- _cappedDeleteCheckCount(0),
- _useOplogHack(shouldUseOplogHack(ctx, _uri)),
- _sizeStorer( sizeStorer ),
- _sizeStorerCounter(0),
- _shuttingDown(false)
- {
- Status versionStatus = WiredTigerUtil::checkApplicationMetadataFormatVersion(
- ctx, uri, kMinimumRecordStoreVersion, kMaximumRecordStoreVersion);
- if (!versionStatus.isOK()) {
- fassertFailedWithStatusNoTrace(28548, versionStatus);
- }
+ // WARNING: No user-specified config can appear below this line. These options are required
+ // for correct behavior of the server.
- if (_isCapped) {
- invariant(_cappedMaxSize > 0);
- invariant(_cappedMaxDocs == -1 || _cappedMaxDocs > 0);
- }
- else {
- invariant(_cappedMaxSize == -1);
- invariant(_cappedMaxDocs == -1);
- }
+ ss << "key_format=q,value_format=u";
- // Find the largest RecordId currently in use and estimate the number of records.
- Cursor cursor(ctx, *this, /*forward=*/false);
- if (auto record = cursor.next()) {
- int64_t max = _makeKey(record->id);
- _oplog_highestSeen = record->id;
- _nextIdNum.store( 1 + max );
-
- if ( _sizeStorer ) {
- long long numRecords;
- long long dataSize;
- _sizeStorer->loadFromCache( uri, &numRecords, &dataSize );
- _numRecords.store( numRecords );
- _dataSize.store( dataSize );
- _sizeStorer->onCreate( this, numRecords, dataSize );
- }
+ // Record store metadata
+ ss << ",app_metadata=(formatVersion=" << kCurrentRecordStoreVersion;
+ if (NamespaceString::oplog(ns)) {
+ ss << ",oplogKeyExtractionVersion=1";
+ }
+ ss << ")";
- else {
- LOG(1) << "Doing scan of collection " << ns << " to get size and count info";
+ return StatusWith<std::string>(ss);
+}
- _numRecords.store(0);
- _dataSize.store(0);
+WiredTigerRecordStore::WiredTigerRecordStore(OperationContext* ctx,
+ StringData ns,
+ StringData uri,
+ bool isCapped,
+ int64_t cappedMaxSize,
+ int64_t cappedMaxDocs,
+ CappedDocumentDeleteCallback* cappedDeleteCallback,
+ WiredTigerSizeStorer* sizeStorer)
+ : RecordStore(ns),
+ _uri(uri.toString()),
+ _instanceId(WiredTigerSession::genCursorId()),
+ _isCapped(isCapped),
+ _isOplog(NamespaceString::oplog(ns)),
+ _cappedMaxSize(cappedMaxSize),
+ _cappedMaxSizeSlack(std::min(cappedMaxSize / 10, int64_t(16 * 1024 * 1024))),
+ _cappedMaxDocs(cappedMaxDocs),
+ _cappedSleep(0),
+ _cappedSleepMS(0),
+ _cappedDeleteCallback(cappedDeleteCallback),
+ _cappedDeleteCheckCount(0),
+ _useOplogHack(shouldUseOplogHack(ctx, _uri)),
+ _sizeStorer(sizeStorer),
+ _sizeStorerCounter(0),
+ _shuttingDown(false) {
+ Status versionStatus = WiredTigerUtil::checkApplicationMetadataFormatVersion(
+ ctx, uri, kMinimumRecordStoreVersion, kMaximumRecordStoreVersion);
+ if (!versionStatus.isOK()) {
+ fassertFailedWithStatusNoTrace(28548, versionStatus);
+ }
- do {
- _numRecords.fetchAndAdd(1);
- _dataSize.fetchAndAdd(record->data.size());
- } while ((record = cursor.next()));
+ if (_isCapped) {
+ invariant(_cappedMaxSize > 0);
+ invariant(_cappedMaxDocs == -1 || _cappedMaxDocs > 0);
+ } else {
+ invariant(_cappedMaxSize == -1);
+ invariant(_cappedMaxDocs == -1);
+ }
- if ( _sizeStorer ) {
- _sizeStorer->storeToCache( _uri, _numRecords.load(), _dataSize.load() );
- }
- }
+ // Find the largest RecordId currently in use and estimate the number of records.
+ Cursor cursor(ctx, *this, /*forward=*/false);
+ if (auto record = cursor.next()) {
+ int64_t max = _makeKey(record->id);
+ _oplog_highestSeen = record->id;
+ _nextIdNum.store(1 + max);
+
+ if (_sizeStorer) {
+ long long numRecords;
+ long long dataSize;
+ _sizeStorer->loadFromCache(uri, &numRecords, &dataSize);
+ _numRecords.store(numRecords);
+ _dataSize.store(dataSize);
+ _sizeStorer->onCreate(this, numRecords, dataSize);
}
+
else {
- _dataSize.store(0);
- _numRecords.store(0);
- // Need to start at 1 so we are always higher than RecordId::min()
- _nextIdNum.store( 1 );
- if ( sizeStorer )
- _sizeStorer->onCreate( this, 0, 0 );
- }
+ LOG(1) << "Doing scan of collection " << ns << " to get size and count info";
- _hasBackgroundThread = WiredTigerKVEngine::initRsOplogBackgroundThread(ns);
- }
+ _numRecords.store(0);
+ _dataSize.store(0);
- WiredTigerRecordStore::~WiredTigerRecordStore() {
- {
- stdx::lock_guard<stdx::timed_mutex> lk(_cappedDeleterMutex);
- _shuttingDown = true;
- }
+ do {
+ _numRecords.fetchAndAdd(1);
+ _dataSize.fetchAndAdd(record->data.size());
+ } while ((record = cursor.next()));
- LOG(1) << "~WiredTigerRecordStore for: " << ns();
- if ( _sizeStorer ) {
- _sizeStorer->onDestroy( this );
+ if (_sizeStorer) {
+ _sizeStorer->storeToCache(_uri, _numRecords.load(), _dataSize.load());
+ }
}
+ } else {
+ _dataSize.store(0);
+ _numRecords.store(0);
+ // Need to start at 1 so we are always higher than RecordId::min()
+ _nextIdNum.store(1);
+ if (sizeStorer)
+ _sizeStorer->onCreate(this, 0, 0);
}
- const char* WiredTigerRecordStore::name() const {
- return kWiredTigerEngineName.c_str();
- }
+ _hasBackgroundThread = WiredTigerKVEngine::initRsOplogBackgroundThread(ns);
+}
- bool WiredTigerRecordStore::inShutdown() const {
+WiredTigerRecordStore::~WiredTigerRecordStore() {
+ {
stdx::lock_guard<stdx::timed_mutex> lk(_cappedDeleterMutex);
- return _shuttingDown;
+ _shuttingDown = true;
}
- long long WiredTigerRecordStore::dataSize( OperationContext *txn ) const {
- return _dataSize.load();
+ LOG(1) << "~WiredTigerRecordStore for: " << ns();
+ if (_sizeStorer) {
+ _sizeStorer->onDestroy(this);
}
+}
- long long WiredTigerRecordStore::numRecords( OperationContext *txn ) const {
- return _numRecords.load();
- }
+const char* WiredTigerRecordStore::name() const {
+ return kWiredTigerEngineName.c_str();
+}
- bool WiredTigerRecordStore::isCapped() const {
- return _isCapped;
- }
+bool WiredTigerRecordStore::inShutdown() const {
+ stdx::lock_guard<stdx::timed_mutex> lk(_cappedDeleterMutex);
+ return _shuttingDown;
+}
- int64_t WiredTigerRecordStore::cappedMaxDocs() const {
- invariant(_isCapped);
- return _cappedMaxDocs;
- }
+long long WiredTigerRecordStore::dataSize(OperationContext* txn) const {
+ return _dataSize.load();
+}
- int64_t WiredTigerRecordStore::cappedMaxSize() const {
- invariant(_isCapped);
- return _cappedMaxSize;
- }
+long long WiredTigerRecordStore::numRecords(OperationContext* txn) const {
+ return _numRecords.load();
+}
- int64_t WiredTigerRecordStore::storageSize( OperationContext* txn,
- BSONObjBuilder* extraInfo,
- int infoLevel ) const {
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
- StatusWith<int64_t> result = WiredTigerUtil::getStatisticsValueAs<int64_t>(
- session->getSession(),
- "statistics:" + getURI(), "statistics=(size)", WT_STAT_DSRC_BLOCK_SIZE);
- uassertStatusOK(result.getStatus());
+bool WiredTigerRecordStore::isCapped() const {
+ return _isCapped;
+}
- int64_t size = result.getValue();
+int64_t WiredTigerRecordStore::cappedMaxDocs() const {
+ invariant(_isCapped);
+ return _cappedMaxDocs;
+}
- if ( size == 0 && _isCapped ) {
- // Many things assume an empty capped collection still takes up space.
- return 1;
- }
- return size;
+int64_t WiredTigerRecordStore::cappedMaxSize() const {
+ invariant(_isCapped);
+ return _cappedMaxSize;
+}
+
+int64_t WiredTigerRecordStore::storageSize(OperationContext* txn,
+ BSONObjBuilder* extraInfo,
+ int infoLevel) const {
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
+ StatusWith<int64_t> result =
+ WiredTigerUtil::getStatisticsValueAs<int64_t>(session->getSession(),
+ "statistics:" + getURI(),
+ "statistics=(size)",
+ WT_STAT_DSRC_BLOCK_SIZE);
+ uassertStatusOK(result.getStatus());
+
+ int64_t size = result.getValue();
+
+ if (size == 0 && _isCapped) {
+ // Many things assume an empty capped collection still takes up space.
+ return 1;
}
+ return size;
+}
- // Retrieve the value from a positioned cursor.
- RecordData WiredTigerRecordStore::_getData(const WiredTigerCursor& cursor) const {
- WT_ITEM value;
- int ret = cursor->get_value(cursor.get(), &value);
- invariantWTOK(ret);
+// Retrieve the value from a positioned cursor.
+RecordData WiredTigerRecordStore::_getData(const WiredTigerCursor& cursor) const {
+ WT_ITEM value;
+ int ret = cursor->get_value(cursor.get(), &value);
+ invariantWTOK(ret);
- SharedBuffer data = SharedBuffer::allocate(value.size);
- memcpy( data.get(), value.data, value.size );
- return RecordData(data, value.size);
- }
+ SharedBuffer data = SharedBuffer::allocate(value.size);
+ memcpy(data.get(), value.data, value.size);
+ return RecordData(data, value.size);
+}
- RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const RecordId& loc) const {
- // ownership passes to the shared_array created below
- WiredTigerCursor curwrap( _uri, _instanceId, true, txn);
- WT_CURSOR *c = curwrap.get();
- invariant( c );
- c->set_key(c, _makeKey(loc));
- int ret = WT_OP_CHECK(c->search(c));
- massert(28556, "Didn't find RecordId in WiredTigerRecordStore", ret != WT_NOTFOUND);
- invariantWTOK(ret);
- return _getData(curwrap);
- }
+RecordData WiredTigerRecordStore::dataFor(OperationContext* txn, const RecordId& loc) const {
+ // ownership passes to the shared_array created below
+ WiredTigerCursor curwrap(_uri, _instanceId, true, txn);
+ WT_CURSOR* c = curwrap.get();
+ invariant(c);
+ c->set_key(c, _makeKey(loc));
+ int ret = WT_OP_CHECK(c->search(c));
+ massert(28556, "Didn't find RecordId in WiredTigerRecordStore", ret != WT_NOTFOUND);
+ invariantWTOK(ret);
+ return _getData(curwrap);
+}
- bool WiredTigerRecordStore::findRecord( OperationContext* txn,
- const RecordId& loc, RecordData* out ) const {
- WiredTigerCursor curwrap( _uri, _instanceId, true, txn);
- WT_CURSOR *c = curwrap.get();
- invariant( c );
- c->set_key(c, _makeKey(loc));
- int ret = WT_OP_CHECK(c->search(c));
- if (ret == WT_NOTFOUND) {
- return false;
- }
- invariantWTOK(ret);
- *out = _getData(curwrap);
- return true;
+bool WiredTigerRecordStore::findRecord(OperationContext* txn,
+ const RecordId& loc,
+ RecordData* out) const {
+ WiredTigerCursor curwrap(_uri, _instanceId, true, txn);
+ WT_CURSOR* c = curwrap.get();
+ invariant(c);
+ c->set_key(c, _makeKey(loc));
+ int ret = WT_OP_CHECK(c->search(c));
+ if (ret == WT_NOTFOUND) {
+ return false;
}
+ invariantWTOK(ret);
+ *out = _getData(curwrap);
+ return true;
+}
- void WiredTigerRecordStore::deleteRecord( OperationContext* txn, const RecordId& loc ) {
- WiredTigerCursor cursor( _uri, _instanceId, true, txn );
- cursor.assertInActiveTxn();
- WT_CURSOR *c = cursor.get();
- c->set_key(c, _makeKey(loc));
- int ret = WT_OP_CHECK(c->search(c));
- invariantWTOK(ret);
+void WiredTigerRecordStore::deleteRecord(OperationContext* txn, const RecordId& loc) {
+ WiredTigerCursor cursor(_uri, _instanceId, true, txn);
+ cursor.assertInActiveTxn();
+ WT_CURSOR* c = cursor.get();
+ c->set_key(c, _makeKey(loc));
+ int ret = WT_OP_CHECK(c->search(c));
+ invariantWTOK(ret);
- WT_ITEM old_value;
- ret = c->get_value(c, &old_value);
- invariantWTOK(ret);
+ WT_ITEM old_value;
+ ret = c->get_value(c, &old_value);
+ invariantWTOK(ret);
- int old_length = old_value.size;
+ int old_length = old_value.size;
- ret = WT_OP_CHECK(c->remove(c));
- invariantWTOK(ret);
+ ret = WT_OP_CHECK(c->remove(c));
+ invariantWTOK(ret);
- _changeNumRecords(txn, -1);
- _increaseDataSize(txn, -old_length);
- }
+ _changeNumRecords(txn, -1);
+ _increaseDataSize(txn, -old_length);
+}
- bool WiredTigerRecordStore::cappedAndNeedDelete() const {
- if (!_isCapped)
- return false;
+bool WiredTigerRecordStore::cappedAndNeedDelete() const {
+ if (!_isCapped)
+ return false;
- if (_dataSize.load() >= _cappedMaxSize)
- return true;
+ if (_dataSize.load() >= _cappedMaxSize)
+ return true;
- if ((_cappedMaxDocs != -1) && (_numRecords.load() > _cappedMaxDocs))
- return true;
+ if ((_cappedMaxDocs != -1) && (_numRecords.load() > _cappedMaxDocs))
+ return true;
- return false;
- }
+ return false;
+}
- int64_t WiredTigerRecordStore::cappedDeleteAsNeeded(OperationContext* txn,
- const RecordId& justInserted) {
+int64_t WiredTigerRecordStore::cappedDeleteAsNeeded(OperationContext* txn,
+ const RecordId& justInserted) {
+ // We only want to do the checks occasionally as they are expensive.
+ // This variable isn't thread safe, but has loose semantics anyway.
+ dassert(!_isOplog || _cappedMaxDocs == -1);
- // We only want to do the checks occasionally as they are expensive.
- // This variable isn't thread safe, but has loose semantics anyway.
- dassert( !_isOplog || _cappedMaxDocs == -1 );
+ if (!cappedAndNeedDelete())
+ return 0;
- if (!cappedAndNeedDelete())
- return 0;
+ // ensure only one thread at a time can do deletes, otherwise they'll conflict.
+ stdx::unique_lock<stdx::timed_mutex> lock(_cappedDeleterMutex, stdx::defer_lock);
- // ensure only one thread at a time can do deletes, otherwise they'll conflict.
- stdx::unique_lock<stdx::timed_mutex> lock(_cappedDeleterMutex, stdx::defer_lock);
+ if (_cappedMaxDocs != -1) {
+ lock.lock(); // Max docs has to be exact, so have to check every time.
+ } else if (_hasBackgroundThread) {
+ // We are foreground, and there is a background thread,
- if (_cappedMaxDocs != -1) {
- lock.lock(); // Max docs has to be exact, so have to check every time.
+ // Check if we need some back pressure.
+ if ((_dataSize.load() - _cappedMaxSize) < _cappedMaxSizeSlack) {
+ return 0;
}
- else if(_hasBackgroundThread) {
- // We are foreground, and there is a background thread,
- // Check if we need some back pressure.
- if ((_dataSize.load() - _cappedMaxSize) < _cappedMaxSizeSlack) {
+ // Back pressure needed!
+ // We're not actually going to delete anything, but we're going to syncronize
+ // on the deleter thread.
+ // Don't wait forever: we're in a transaction, we could block eviction.
+ if (!lock.try_lock()) {
+ Date_t before = Date_t::now();
+ (void)lock.timed_lock(boost::posix_time::millisec(200));
+ stdx::chrono::milliseconds delay = Date_t::now() - before;
+ _cappedSleep.fetchAndAdd(1);
+ _cappedSleepMS.fetchAndAdd(delay.count());
+ }
+ return 0;
+ } else {
+ if (!lock.try_lock()) {
+ // Someone else is deleting old records. Apply back-pressure if too far behind,
+ // otherwise continue.
+ if ((_dataSize.load() - _cappedMaxSize) < _cappedMaxSizeSlack)
return 0;
- }
- // Back pressure needed!
- // We're not actually going to delete anything, but we're going to syncronize
- // on the deleter thread.
// Don't wait forever: we're in a transaction, we could block eviction.
- if (!lock.try_lock()) {
- Date_t before = Date_t::now();
- (void)lock.timed_lock(boost::posix_time::millisec(200));
- stdx::chrono::milliseconds delay = Date_t::now() - before;
- _cappedSleep.fetchAndAdd(1);
- _cappedSleepMS.fetchAndAdd(delay.count());
- }
- return 0;
- }
- else {
- if (!lock.try_lock()) {
- // Someone else is deleting old records. Apply back-pressure if too far behind,
- // otherwise continue.
- if ((_dataSize.load() - _cappedMaxSize) < _cappedMaxSizeSlack)
- return 0;
-
- // Don't wait forever: we're in a transaction, we could block eviction.
- Date_t before = Date_t::now();
- bool gotLock = lock.timed_lock(boost::posix_time::millisec(200));
- stdx::chrono::milliseconds delay = Date_t::now() - before;
- _cappedSleep.fetchAndAdd(1);
- _cappedSleepMS.fetchAndAdd(delay.count());
- if (!gotLock)
- return 0;
-
- // If we already waited, let someone else do cleanup unless we are significantly
- // over the limit.
- if ((_dataSize.load() - _cappedMaxSize) < (2 * _cappedMaxSizeSlack))
- return 0;
- }
- }
+ Date_t before = Date_t::now();
+ bool gotLock = lock.timed_lock(boost::posix_time::millisec(200));
+ stdx::chrono::milliseconds delay = Date_t::now() - before;
+ _cappedSleep.fetchAndAdd(1);
+ _cappedSleepMS.fetchAndAdd(delay.count());
+ if (!gotLock)
+ return 0;
- return cappedDeleteAsNeeded_inlock(txn, justInserted);
+ // If we already waited, let someone else do cleanup unless we are significantly
+ // over the limit.
+ if ((_dataSize.load() - _cappedMaxSize) < (2 * _cappedMaxSizeSlack))
+ return 0;
+ }
}
- int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn,
- const RecordId& justInserted) {
- // we do this is a side transaction in case it aborts
- WiredTigerRecoveryUnit* realRecoveryUnit =
- checked_cast<WiredTigerRecoveryUnit*>( txn->releaseRecoveryUnit() );
- invariant( realRecoveryUnit );
- WiredTigerSessionCache* sc = realRecoveryUnit->getSessionCache();
- OperationContext::RecoveryUnitState const realRUstate =
- txn->setRecoveryUnit(new WiredTigerRecoveryUnit(sc),
- OperationContext::kNotInUnitOfWork);
-
- WiredTigerRecoveryUnit::get(txn)->markNoTicketRequired(); // realRecoveryUnit already has
- WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
-
- int64_t dataSize = _dataSize.load();
- int64_t numRecords = _numRecords.load();
-
- int64_t sizeOverCap = (dataSize > _cappedMaxSize) ? dataSize - _cappedMaxSize : 0;
- int64_t sizeSaved = 0;
- int64_t docsOverCap = 0, docsRemoved = 0;
- if (_cappedMaxDocs != -1 && numRecords > _cappedMaxDocs)
- docsOverCap = numRecords - _cappedMaxDocs;
-
- try {
- WriteUnitOfWork wuow(txn);
-
- WiredTigerCursor curwrap( _uri, _instanceId, true, txn);
- WT_CURSOR *c = curwrap.get();
- RecordId newestOld;
- int ret = 0;
- while ((sizeSaved < sizeOverCap || docsRemoved < docsOverCap) &&
- (docsRemoved < 20000) &&
- (ret = WT_OP_CHECK(c->next(c))) == 0) {
-
- int64_t key;
- ret = c->get_key(c, &key);
- invariantWTOK(ret);
+ return cappedDeleteAsNeeded_inlock(txn, justInserted);
+}
+
+int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn,
+ const RecordId& justInserted) {
+ // we do this is a side transaction in case it aborts
+ WiredTigerRecoveryUnit* realRecoveryUnit =
+ checked_cast<WiredTigerRecoveryUnit*>(txn->releaseRecoveryUnit());
+ invariant(realRecoveryUnit);
+ WiredTigerSessionCache* sc = realRecoveryUnit->getSessionCache();
+ OperationContext::RecoveryUnitState const realRUstate =
+ txn->setRecoveryUnit(new WiredTigerRecoveryUnit(sc), OperationContext::kNotInUnitOfWork);
+
+ WiredTigerRecoveryUnit::get(txn)->markNoTicketRequired(); // realRecoveryUnit already has
+ WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
+
+ int64_t dataSize = _dataSize.load();
+ int64_t numRecords = _numRecords.load();
+
+ int64_t sizeOverCap = (dataSize > _cappedMaxSize) ? dataSize - _cappedMaxSize : 0;
+ int64_t sizeSaved = 0;
+ int64_t docsOverCap = 0, docsRemoved = 0;
+ if (_cappedMaxDocs != -1 && numRecords > _cappedMaxDocs)
+ docsOverCap = numRecords - _cappedMaxDocs;
+
+ try {
+ WriteUnitOfWork wuow(txn);
+
+ WiredTigerCursor curwrap(_uri, _instanceId, true, txn);
+ WT_CURSOR* c = curwrap.get();
+ RecordId newestOld;
+ int ret = 0;
+ while ((sizeSaved < sizeOverCap || docsRemoved < docsOverCap) && (docsRemoved < 20000) &&
+ (ret = WT_OP_CHECK(c->next(c))) == 0) {
+ int64_t key;
+ ret = c->get_key(c, &key);
+ invariantWTOK(ret);
- // don't go past the record we just inserted
- newestOld = _fromKey(key);
- if ( newestOld >= justInserted ) // TODO: use oldest uncommitted instead
- break;
+ // don't go past the record we just inserted
+ newestOld = _fromKey(key);
+ if (newestOld >= justInserted) // TODO: use oldest uncommitted instead
+ break;
- if ( _shuttingDown )
- break;
+ if (_shuttingDown)
+ break;
- WT_ITEM old_value;
- invariantWTOK(c->get_value(c, &old_value));
+ WT_ITEM old_value;
+ invariantWTOK(c->get_value(c, &old_value));
- ++docsRemoved;
- sizeSaved += old_value.size;
+ ++docsRemoved;
+ sizeSaved += old_value.size;
- if ( _cappedDeleteCallback ) {
- uassertStatusOK(
- _cappedDeleteCallback->aboutToDeleteCapped(
- txn,
- newestOld,
- RecordData(static_cast<const char*>(old_value.data), old_value.size)));
- }
+ if (_cappedDeleteCallback) {
+ uassertStatusOK(_cappedDeleteCallback->aboutToDeleteCapped(
+ txn,
+ newestOld,
+ RecordData(static_cast<const char*>(old_value.data), old_value.size)));
}
+ }
- if (ret != WT_NOTFOUND) {
- invariantWTOK(ret);
+ if (ret != WT_NOTFOUND) {
+ invariantWTOK(ret);
+ }
+
+ if (docsRemoved > 0) {
+ // if we scanned to the end of the collection or past our insert, go back one
+ if (ret == WT_NOTFOUND || newestOld >= justInserted) {
+ ret = WT_OP_CHECK(c->prev(c));
}
+ invariantWTOK(ret);
- if (docsRemoved > 0) {
- // if we scanned to the end of the collection or past our insert, go back one
- if (ret == WT_NOTFOUND || newestOld >= justInserted) {
- ret = WT_OP_CHECK(c->prev(c));
- }
- invariantWTOK(ret);
+ WiredTigerCursor startWrap(_uri, _instanceId, true, txn);
+ WT_CURSOR* start = startWrap.get();
+ ret = WT_OP_CHECK(start->next(start));
+ invariantWTOK(ret);
- WiredTigerCursor startWrap( _uri, _instanceId, true, txn);
- WT_CURSOR* start = startWrap.get();
- ret = WT_OP_CHECK(start->next(start));
+ ret = session->truncate(session, NULL, start, c, NULL);
+ if (ret == ENOENT || ret == WT_NOTFOUND) {
+ // TODO we should remove this case once SERVER-17141 is resolved
+ log() << "Soft failure truncating capped collection. Will try again later.";
+ docsRemoved = 0;
+ } else {
invariantWTOK(ret);
-
- ret = session->truncate(session, NULL, start, c, NULL);
- if (ret == ENOENT || ret == WT_NOTFOUND) {
- // TODO we should remove this case once SERVER-17141 is resolved
- log() << "Soft failure truncating capped collection. Will try again later.";
- docsRemoved = 0;
- }
- else {
- invariantWTOK(ret);
- _changeNumRecords(txn, -docsRemoved);
- _increaseDataSize(txn, -sizeSaved);
- wuow.commit();
- }
+ _changeNumRecords(txn, -docsRemoved);
+ _increaseDataSize(txn, -sizeSaved);
+ wuow.commit();
}
}
- catch ( const WriteConflictException& wce ) {
- delete txn->releaseRecoveryUnit();
- txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
- log() << "got conflict truncating capped, ignoring";
- return 0;
- }
- catch ( ... ) {
- delete txn->releaseRecoveryUnit();
- txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
- throw;
- }
-
+ } catch (const WriteConflictException& wce) {
delete txn->releaseRecoveryUnit();
txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
- return docsRemoved;
- }
-
- StatusWith<RecordId> WiredTigerRecordStore::extractAndCheckLocForOplog(const char* data,
- int len) {
- return oploghack::extractKey(data, len);
+ log() << "got conflict truncating capped, ignoring";
+ return 0;
+ } catch (...) {
+ delete txn->releaseRecoveryUnit();
+ txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
+ throw;
}
- StatusWith<RecordId> WiredTigerRecordStore::insertRecord( OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota ) {
- if ( _isCapped && len > _cappedMaxSize ) {
- return StatusWith<RecordId>( ErrorCodes::BadValue,
- "object to insert exceeds cappedMaxSize" );
- }
-
- RecordId loc;
- if ( _useOplogHack ) {
- StatusWith<RecordId> status = extractAndCheckLocForOplog(data, len);
- if (!status.isOK())
- return status;
- loc = status.getValue();
- if ( loc > _oplog_highestSeen ) {
- stdx::lock_guard<stdx::mutex> lk( _uncommittedDiskLocsMutex );
- if ( loc > _oplog_highestSeen ) {
- _oplog_highestSeen = loc;
- }
- }
- }
- else if ( _isCapped ) {
- stdx::lock_guard<stdx::mutex> lk( _uncommittedDiskLocsMutex );
- loc = _nextId();
- _addUncommitedDiskLoc_inlock( txn, loc );
- }
- else {
- loc = _nextId();
- }
-
- WiredTigerCursor curwrap( _uri, _instanceId, true, txn);
- curwrap.assertInActiveTxn();
- WT_CURSOR *c = curwrap.get();
- invariant( c );
-
- c->set_key(c, _makeKey(loc));
- WiredTigerItem value(data, len);
- c->set_value(c, value.Get());
- int ret = WT_OP_CHECK(c->insert(c));
- if (ret) {
- return StatusWith<RecordId>(wtRCToStatus(ret, "WiredTigerRecordStore::insertRecord"));
- }
-
- _changeNumRecords( txn, 1 );
- _increaseDataSize( txn, len );
+ delete txn->releaseRecoveryUnit();
+ txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
+ return docsRemoved;
+}
- cappedDeleteAsNeeded(txn, loc);
+StatusWith<RecordId> WiredTigerRecordStore::extractAndCheckLocForOplog(const char* data, int len) {
+ return oploghack::extractKey(data, len);
+}
- return StatusWith<RecordId>( loc );
+StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* txn,
+ const char* data,
+ int len,
+ bool enforceQuota) {
+ if (_isCapped && len > _cappedMaxSize) {
+ return StatusWith<RecordId>(ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize");
}
- void WiredTigerRecordStore::dealtWithCappedLoc( const RecordId& loc ) {
- stdx::lock_guard<stdx::mutex> lk( _uncommittedDiskLocsMutex );
- SortedDiskLocs::iterator it = std::find(_uncommittedDiskLocs.begin(),
- _uncommittedDiskLocs.end(),
- loc);
- invariant(it != _uncommittedDiskLocs.end());
- _uncommittedDiskLocs.erase(it);
+ RecordId loc;
+ if (_useOplogHack) {
+ StatusWith<RecordId> status = extractAndCheckLocForOplog(data, len);
+ if (!status.isOK())
+ return status;
+ loc = status.getValue();
+ if (loc > _oplog_highestSeen) {
+ stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex);
+ if (loc > _oplog_highestSeen) {
+ _oplog_highestSeen = loc;
+ }
+ }
+ } else if (_isCapped) {
+ stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex);
+ loc = _nextId();
+ _addUncommitedDiskLoc_inlock(txn, loc);
+ } else {
+ loc = _nextId();
}
- bool WiredTigerRecordStore::isCappedHidden( const RecordId& loc ) const {
- stdx::lock_guard<stdx::mutex> lk( _uncommittedDiskLocsMutex );
- if (_uncommittedDiskLocs.empty()) {
- return false;
- }
- return _uncommittedDiskLocs.front() <= loc;
+ WiredTigerCursor curwrap(_uri, _instanceId, true, txn);
+ curwrap.assertInActiveTxn();
+ WT_CURSOR* c = curwrap.get();
+ invariant(c);
+
+ c->set_key(c, _makeKey(loc));
+ WiredTigerItem value(data, len);
+ c->set_value(c, value.Get());
+ int ret = WT_OP_CHECK(c->insert(c));
+ if (ret) {
+ return StatusWith<RecordId>(wtRCToStatus(ret, "WiredTigerRecordStore::insertRecord"));
}
- StatusWith<RecordId> WiredTigerRecordStore::insertRecord( OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota ) {
- const int len = doc->documentSize();
+ _changeNumRecords(txn, 1);
+ _increaseDataSize(txn, len);
- boost::shared_array<char> buf( new char[len] );
- doc->writeDocument( buf.get() );
+ cappedDeleteAsNeeded(txn, loc);
- return insertRecord( txn, buf.get(), len, enforceQuota );
- }
+ return StatusWith<RecordId>(loc);
+}
- StatusWith<RecordId> WiredTigerRecordStore::updateRecord( OperationContext* txn,
- const RecordId& loc,
- const char* data,
- int len,
- bool enforceQuota,
- UpdateNotifier* notifier ) {
- WiredTigerCursor curwrap( _uri, _instanceId, true, txn);
- curwrap.assertInActiveTxn();
- WT_CURSOR *c = curwrap.get();
- invariant( c );
- c->set_key(c, _makeKey(loc));
- int ret = WT_OP_CHECK(c->search(c));
- invariantWTOK(ret);
+void WiredTigerRecordStore::dealtWithCappedLoc(const RecordId& loc) {
+ stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex);
+ SortedDiskLocs::iterator it =
+ std::find(_uncommittedDiskLocs.begin(), _uncommittedDiskLocs.end(), loc);
+ invariant(it != _uncommittedDiskLocs.end());
+ _uncommittedDiskLocs.erase(it);
+}
- WT_ITEM old_value;
- ret = c->get_value(c, &old_value);
- invariantWTOK(ret);
+bool WiredTigerRecordStore::isCappedHidden(const RecordId& loc) const {
+ stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex);
+ if (_uncommittedDiskLocs.empty()) {
+ return false;
+ }
+ return _uncommittedDiskLocs.front() <= loc;
+}
- int old_length = old_value.size;
+StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota) {
+ const int len = doc->documentSize();
- c->set_key(c, _makeKey(loc));
- WiredTigerItem value(data, len);
- c->set_value(c, value.Get());
- ret = WT_OP_CHECK(c->insert(c));
- invariantWTOK(ret);
+ boost::shared_array<char> buf(new char[len]);
+ doc->writeDocument(buf.get());
- _increaseDataSize(txn, len - old_length);
+ return insertRecord(txn, buf.get(), len, enforceQuota);
+}
- cappedDeleteAsNeeded(txn, loc);
+StatusWith<RecordId> WiredTigerRecordStore::updateRecord(OperationContext* txn,
+ const RecordId& loc,
+ const char* data,
+ int len,
+ bool enforceQuota,
+ UpdateNotifier* notifier) {
+ WiredTigerCursor curwrap(_uri, _instanceId, true, txn);
+ curwrap.assertInActiveTxn();
+ WT_CURSOR* c = curwrap.get();
+ invariant(c);
+ c->set_key(c, _makeKey(loc));
+ int ret = WT_OP_CHECK(c->search(c));
+ invariantWTOK(ret);
+
+ WT_ITEM old_value;
+ ret = c->get_value(c, &old_value);
+ invariantWTOK(ret);
+
+ int old_length = old_value.size;
+
+ c->set_key(c, _makeKey(loc));
+ WiredTigerItem value(data, len);
+ c->set_value(c, value.Get());
+ ret = WT_OP_CHECK(c->insert(c));
+ invariantWTOK(ret);
+
+ _increaseDataSize(txn, len - old_length);
+
+ cappedDeleteAsNeeded(txn, loc);
+
+ return StatusWith<RecordId>(loc);
+}
- return StatusWith<RecordId>( loc );
- }
+bool WiredTigerRecordStore::updateWithDamagesSupported() const {
+ return false;
+}
- bool WiredTigerRecordStore::updateWithDamagesSupported() const {
- return false;
- }
+Status WiredTigerRecordStore::updateWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const RecordData& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages) {
+ invariant(false);
+}
- Status WiredTigerRecordStore::updateWithDamages( OperationContext* txn,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages ) {
- invariant(false);
+void WiredTigerRecordStore::_oplogSetStartHack(WiredTigerRecoveryUnit* wru) const {
+ stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex);
+ if (_uncommittedDiskLocs.empty()) {
+ wru->setOplogReadTill(_oplog_highestSeen);
+ } else {
+ wru->setOplogReadTill(_uncommittedDiskLocs.front());
}
+}
- void WiredTigerRecordStore::_oplogSetStartHack( WiredTigerRecoveryUnit* wru ) const {
- stdx::lock_guard<stdx::mutex> lk( _uncommittedDiskLocsMutex );
- if ( _uncommittedDiskLocs.empty() ) {
- wru->setOplogReadTill( _oplog_highestSeen );
- }
- else {
- wru->setOplogReadTill( _uncommittedDiskLocs.front() );
+std::unique_ptr<RecordCursor> WiredTigerRecordStore::getCursor(OperationContext* txn,
+ bool forward) const {
+ if (_isOplog && forward) {
+ WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(txn);
+ if (!wru->inActiveTxn() || wru->getOplogReadTill().isNull()) {
+ // if we don't have a session, we have no snapshot, so we can update our view
+ _oplogSetStartHack(wru);
}
}
- std::unique_ptr<RecordCursor> WiredTigerRecordStore::getCursor(OperationContext* txn,
- bool forward) const {
-
- if ( _isOplog && forward ) {
- WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(txn);
- if ( !wru->inActiveTxn() || wru->getOplogReadTill().isNull() ) {
- // if we don't have a session, we have no snapshot, so we can update our view
- _oplogSetStartHack( wru );
- }
- }
+ return stdx::make_unique<Cursor>(txn, *this, forward);
+}
- return stdx::make_unique<Cursor>(txn, *this, forward);
- }
+std::vector<std::unique_ptr<RecordCursor>> WiredTigerRecordStore::getManyCursors(
+ OperationContext* txn) const {
+ std::vector<std::unique_ptr<RecordCursor>> cursors(1);
+ cursors[0] = stdx::make_unique<Cursor>(txn,
+ *this,
+ /*forward=*/true,
+ /*forParallelCollectionScan=*/true);
+ return cursors;
+}
- std::vector<std::unique_ptr<RecordCursor>> WiredTigerRecordStore::getManyCursors(
- OperationContext* txn) const {
- std::vector<std::unique_ptr<RecordCursor>> cursors(1);
- cursors[0] = stdx::make_unique<Cursor>(txn, *this, /*forward=*/true,
- /*forParallelCollectionScan=*/true);
- return cursors;
+Status WiredTigerRecordStore::truncate(OperationContext* txn) {
+ WiredTigerCursor startWrap(_uri, _instanceId, true, txn);
+ WT_CURSOR* start = startWrap.get();
+ int ret = WT_OP_CHECK(start->next(start));
+ // Empty collections don't have anything to truncate.
+ if (ret == WT_NOTFOUND) {
+ return Status::OK();
}
+ invariantWTOK(ret);
- Status WiredTigerRecordStore::truncate( OperationContext* txn ) {
- WiredTigerCursor startWrap( _uri, _instanceId, true, txn);
- WT_CURSOR* start = startWrap.get();
- int ret = WT_OP_CHECK(start->next(start));
- //Empty collections don't have anything to truncate.
- if (ret == WT_NOTFOUND) {
- return Status::OK();
- }
- invariantWTOK(ret);
+ WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
+ invariantWTOK(WT_OP_CHECK(session->truncate(session, NULL, start, NULL, NULL)));
+ _changeNumRecords(txn, -numRecords(txn));
+ _increaseDataSize(txn, -dataSize(txn));
- WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
- invariantWTOK(WT_OP_CHECK(session->truncate(session, NULL, start, NULL, NULL)));
- _changeNumRecords(txn, -numRecords(txn));
- _increaseDataSize(txn, -dataSize(txn));
+ return Status::OK();
+}
- return Status::OK();
- }
+Status WiredTigerRecordStore::compact(OperationContext* txn,
+ RecordStoreCompactAdaptor* adaptor,
+ const CompactOptions* options,
+ CompactStats* stats) {
+ WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(txn)->getSessionCache();
+ WiredTigerSession* session = cache->getSession();
+ WT_SESSION* s = session->getSession();
+ int ret = s->compact(s, getURI().c_str(), "timeout=0");
+ invariantWTOK(ret);
+ cache->releaseSession(session);
+ return Status::OK();
+}
- Status WiredTigerRecordStore::compact( OperationContext* txn,
- RecordStoreCompactAdaptor* adaptor,
- const CompactOptions* options,
- CompactStats* stats ) {
- WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(txn)->getSessionCache();
- WiredTigerSession* session = cache->getSession();
- WT_SESSION *s = session->getSession();
- int ret = s->compact(s, getURI().c_str(), "timeout=0");
- invariantWTOK(ret);
- cache->releaseSession(session);
- return Status::OK();
+Status WiredTigerRecordStore::validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateAdaptor* adaptor,
+ ValidateResults* results,
+ BSONObjBuilder* output) {
+ {
+ int err = WiredTigerUtil::verifyTable(txn, _uri, &results->errors);
+ if (err == EBUSY) {
+ const char* msg = "verify() returned EBUSY. Not treating as invalid.";
+ warning() << msg;
+ results->errors.push_back(msg);
+ } else if (err) {
+ std::string msg = str::stream() << "verify() returned " << wiredtiger_strerror(err)
+ << ". "
+ << "This indicates structural damage. "
+ << "Not examining individual documents.";
+ error() << msg;
+ results->errors.push_back(msg);
+ results->valid = false;
+ return Status::OK();
+ }
}
- Status WiredTigerRecordStore::validate( OperationContext* txn,
- bool full,
- bool scanData,
- ValidateAdaptor* adaptor,
- ValidateResults* results,
- BSONObjBuilder* output ) {
-
- {
- int err = WiredTigerUtil::verifyTable(txn, _uri, &results->errors);
- if (err == EBUSY) {
- const char* msg = "verify() returned EBUSY. Not treating as invalid.";
- warning() << msg;
- results->errors.push_back(msg);
- }
- else if (err) {
- std::string msg = str::stream()
- << "verify() returned " << wiredtiger_strerror(err) << ". "
- << "This indicates structural damage. "
- << "Not examining individual documents.";
- error() << msg;
- results->errors.push_back(msg);
+ long long nrecords = 0;
+ long long dataSizeTotal = 0;
+ results->valid = true;
+ Cursor cursor(txn, *this, true);
+ while (auto record = cursor.next()) {
+ ++nrecords;
+ if (full && scanData) {
+ size_t dataSize;
+ Status status = adaptor->validate(record->data, &dataSize);
+ if (!status.isOK()) {
results->valid = false;
- return Status::OK();
+ results->errors.push_back(str::stream() << record->id << " is corrupted");
}
+ dataSizeTotal += static_cast<long long>(dataSize);
}
+ }
- long long nrecords = 0;
- long long dataSizeTotal = 0;
- results->valid = true;
- Cursor cursor(txn, *this, true);
- while (auto record = cursor.next()) {
- ++nrecords;
- if ( full && scanData ) {
- size_t dataSize;
- Status status = adaptor->validate( record->data, &dataSize );
- if ( !status.isOK() ) {
- results->valid = false;
- results->errors.push_back( str::stream() << record->id << " is corrupted" );
- }
- dataSizeTotal += static_cast<long long>(dataSize);
- }
+ if (_sizeStorer && full && scanData && results->valid) {
+ if (nrecords != _numRecords.load() || dataSizeTotal != _dataSize.load()) {
+ warning() << _uri << ": Existing record and data size counters (" << _numRecords.load()
+ << " records " << _dataSize.load() << " bytes) "
+ << "are inconsistent with full validation results (" << nrecords
+ << " records " << dataSizeTotal << " bytes). "
+ << "Updating counters with new values.";
}
- if (_sizeStorer && full && scanData && results->valid) {
- if (nrecords != _numRecords.load() || dataSizeTotal != _dataSize.load()) {
- warning() << _uri << ": Existing record and data size counters ("
- << _numRecords.load() << " records " << _dataSize.load() << " bytes) "
- << "are inconsistent with full validation results ("
- << nrecords << " records " << dataSizeTotal << " bytes). "
- << "Updating counters with new values.";
- }
-
- _numRecords.store(nrecords);
- _dataSize.store(dataSizeTotal);
-
- long long oldNumRecords;
- long long oldDataSize;
- _sizeStorer->loadFromCache(_uri, &oldNumRecords, &oldDataSize);
- if (nrecords != oldNumRecords || dataSizeTotal != oldDataSize) {
- warning() << _uri << ": Existing data in size storer ("
- << oldNumRecords << " records " << oldDataSize << " bytes) "
- << "is inconsistent with full validation results ("
- << _numRecords.load() << " records " << _dataSize.load() << " bytes). "
- << "Updating size storer with new values.";
- }
-
- _sizeStorer->storeToCache(_uri, _numRecords.load(), _dataSize.load());
+ _numRecords.store(nrecords);
+ _dataSize.store(dataSizeTotal);
+
+ long long oldNumRecords;
+ long long oldDataSize;
+ _sizeStorer->loadFromCache(_uri, &oldNumRecords, &oldDataSize);
+ if (nrecords != oldNumRecords || dataSizeTotal != oldDataSize) {
+ warning() << _uri << ": Existing data in size storer (" << oldNumRecords << " records "
+ << oldDataSize << " bytes) "
+ << "is inconsistent with full validation results (" << _numRecords.load()
+ << " records " << _dataSize.load() << " bytes). "
+ << "Updating size storer with new values.";
}
- output->appendNumber( "nrecords", nrecords );
- return Status::OK();
+ _sizeStorer->storeToCache(_uri, _numRecords.load(), _dataSize.load());
}
- void WiredTigerRecordStore::appendCustomStats( OperationContext* txn,
- BSONObjBuilder* result,
- double scale ) const {
- result->appendBool( "capped", _isCapped );
- if ( _isCapped ) {
- result->appendIntOrLL("max", _cappedMaxDocs );
- result->appendIntOrLL("maxSize", static_cast<long long>(_cappedMaxSize / scale) );
- result->appendIntOrLL("sleepCount", _cappedSleep.load());
- result->appendIntOrLL("sleepMS", _cappedSleepMS.load());
- }
- WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
- WT_SESSION* s = session->getSession();
- BSONObjBuilder bob(result->subobjStart(kWiredTigerEngineName));
- {
- BSONObjBuilder metadata(bob.subobjStart("metadata"));
- Status status = WiredTigerUtil::getApplicationMetadata(txn, getURI(), &metadata);
- if (!status.isOK()) {
- metadata.append("error", "unable to retrieve metadata");
- metadata.append("code", static_cast<int>(status.code()));
- metadata.append("reason", status.reason());
- }
- }
-
- std::string type, sourceURI;
- WiredTigerUtil::fetchTypeAndSourceURI(txn, _uri, &type, &sourceURI);
- StatusWith<std::string> metadataResult = WiredTigerUtil::getMetadata(txn, sourceURI);
- StringData creationStringName("creationString");
- if (!metadataResult.isOK()) {
- BSONObjBuilder creationString(bob.subobjStart(creationStringName));
- creationString.append("error", "unable to retrieve creation config");
- creationString.append("code", static_cast<int>(metadataResult.getStatus().code()));
- creationString.append("reason", metadataResult.getStatus().reason());
- }
- else {
- bob.append("creationString", metadataResult.getValue());
- // Type can be "lsm" or "file"
- bob.append("type", type);
- }
+ output->appendNumber("nrecords", nrecords);
+ return Status::OK();
+}
- Status status = WiredTigerUtil::exportTableToBSON(s, "statistics:" + getURI(),
- "statistics=(fast)", &bob);
+void WiredTigerRecordStore::appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale) const {
+ result->appendBool("capped", _isCapped);
+ if (_isCapped) {
+ result->appendIntOrLL("max", _cappedMaxDocs);
+ result->appendIntOrLL("maxSize", static_cast<long long>(_cappedMaxSize / scale));
+ result->appendIntOrLL("sleepCount", _cappedSleep.load());
+ result->appendIntOrLL("sleepMS", _cappedSleepMS.load());
+ }
+ WiredTigerSession* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn);
+ WT_SESSION* s = session->getSession();
+ BSONObjBuilder bob(result->subobjStart(kWiredTigerEngineName));
+ {
+ BSONObjBuilder metadata(bob.subobjStart("metadata"));
+ Status status = WiredTigerUtil::getApplicationMetadata(txn, getURI(), &metadata);
if (!status.isOK()) {
- bob.append("error", "unable to retrieve statistics");
- bob.append("code", static_cast<int>(status.code()));
- bob.append("reason", status.reason());
+ metadata.append("error", "unable to retrieve metadata");
+ metadata.append("code", static_cast<int>(status.code()));
+ metadata.append("reason", status.reason());
}
-
}
- Status WiredTigerRecordStore::oplogDiskLocRegister( OperationContext* txn,
- const Timestamp& opTime ) {
- StatusWith<RecordId> loc = oploghack::keyForOptime( opTime );
- if ( !loc.isOK() )
- return loc.getStatus();
+ std::string type, sourceURI;
+ WiredTigerUtil::fetchTypeAndSourceURI(txn, _uri, &type, &sourceURI);
+ StatusWith<std::string> metadataResult = WiredTigerUtil::getMetadata(txn, sourceURI);
+ StringData creationStringName("creationString");
+ if (!metadataResult.isOK()) {
+ BSONObjBuilder creationString(bob.subobjStart(creationStringName));
+ creationString.append("error", "unable to retrieve creation config");
+ creationString.append("code", static_cast<int>(metadataResult.getStatus().code()));
+ creationString.append("reason", metadataResult.getStatus().reason());
+ } else {
+ bob.append("creationString", metadataResult.getValue());
+ // Type can be "lsm" or "file"
+ bob.append("type", type);
+ }
- stdx::lock_guard<stdx::mutex> lk( _uncommittedDiskLocsMutex );
- _addUncommitedDiskLoc_inlock( txn, loc.getValue() );
- return Status::OK();
+ Status status =
+ WiredTigerUtil::exportTableToBSON(s, "statistics:" + getURI(), "statistics=(fast)", &bob);
+ if (!status.isOK()) {
+ bob.append("error", "unable to retrieve statistics");
+ bob.append("code", static_cast<int>(status.code()));
+ bob.append("reason", status.reason());
}
+}
- class WiredTigerRecordStore::CappedInsertChange : public RecoveryUnit::Change {
- public:
- CappedInsertChange( WiredTigerRecordStore* rs, const RecordId& loc )
- : _rs( rs ), _loc( loc ) {
- }
+Status WiredTigerRecordStore::oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime) {
+ StatusWith<RecordId> loc = oploghack::keyForOptime(opTime);
+ if (!loc.isOK())
+ return loc.getStatus();
- virtual void commit() {
- _rs->dealtWithCappedLoc( _loc );
- }
+ stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex);
+ _addUncommitedDiskLoc_inlock(txn, loc.getValue());
+ return Status::OK();
+}
- virtual void rollback() {
- _rs->dealtWithCappedLoc( _loc );
- }
+class WiredTigerRecordStore::CappedInsertChange : public RecoveryUnit::Change {
+public:
+ CappedInsertChange(WiredTigerRecordStore* rs, const RecordId& loc) : _rs(rs), _loc(loc) {}
- private:
- WiredTigerRecordStore* _rs;
- RecordId _loc;
- };
-
- void WiredTigerRecordStore::_addUncommitedDiskLoc_inlock( OperationContext* txn,
- const RecordId& loc ) {
- // todo: make this a dassert at some point
- invariant( _uncommittedDiskLocs.empty() ||
- _uncommittedDiskLocs.back() < loc );
- _uncommittedDiskLocs.push_back( loc );
- txn->recoveryUnit()->registerChange( new CappedInsertChange( this, loc ) );
- _oplog_highestSeen = loc;
+ virtual void commit() {
+ _rs->dealtWithCappedLoc(_loc);
}
- boost::optional<RecordId> WiredTigerRecordStore::oplogStartHack(
- OperationContext* txn,
- const RecordId& startingPosition) const {
-
- if (!_useOplogHack)
- return boost::none;
-
- {
- WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(txn);
- _oplogSetStartHack( wru );
- }
+ virtual void rollback() {
+ _rs->dealtWithCappedLoc(_loc);
+ }
- WiredTigerCursor cursor(_uri, _instanceId, true, txn);
- WT_CURSOR* c = cursor.get();
+private:
+ WiredTigerRecordStore* _rs;
+ RecordId _loc;
+};
+
+void WiredTigerRecordStore::_addUncommitedDiskLoc_inlock(OperationContext* txn,
+ const RecordId& loc) {
+ // todo: make this a dassert at some point
+ invariant(_uncommittedDiskLocs.empty() || _uncommittedDiskLocs.back() < loc);
+ _uncommittedDiskLocs.push_back(loc);
+ txn->recoveryUnit()->registerChange(new CappedInsertChange(this, loc));
+ _oplog_highestSeen = loc;
+}
- int cmp;
- c->set_key(c, _makeKey(startingPosition));
- int ret = WT_OP_CHECK(c->search_near(c, &cmp));
- if (ret == 0 && cmp > 0) ret = c->prev(c); // landed one higher than startingPosition
- if (ret == WT_NOTFOUND) return RecordId(); // nothing <= startingPosition
- invariantWTOK(ret);
+boost::optional<RecordId> WiredTigerRecordStore::oplogStartHack(
+ OperationContext* txn, const RecordId& startingPosition) const {
+ if (!_useOplogHack)
+ return boost::none;
- int64_t key;
- ret = c->get_key(c, &key);
- invariantWTOK(ret);
- return _fromKey(key);
+ {
+ WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(txn);
+ _oplogSetStartHack(wru);
}
- void WiredTigerRecordStore::updateStatsAfterRepair(OperationContext* txn,
- long long numRecords,
- long long dataSize) {
- _numRecords.store(numRecords);
- _dataSize.store(dataSize);
- _sizeStorer->storeToCache(_uri, numRecords, dataSize);
- }
+ WiredTigerCursor cursor(_uri, _instanceId, true, txn);
+ WT_CURSOR* c = cursor.get();
+
+ int cmp;
+ c->set_key(c, _makeKey(startingPosition));
+ int ret = WT_OP_CHECK(c->search_near(c, &cmp));
+ if (ret == 0 && cmp > 0)
+ ret = c->prev(c); // landed one higher than startingPosition
+ if (ret == WT_NOTFOUND)
+ return RecordId(); // nothing <= startingPosition
+ invariantWTOK(ret);
+
+ int64_t key;
+ ret = c->get_key(c, &key);
+ invariantWTOK(ret);
+ return _fromKey(key);
+}
- RecordId WiredTigerRecordStore::_nextId() {
- invariant(!_useOplogHack);
- RecordId out = RecordId(_nextIdNum.fetchAndAdd(1));
- invariant(out.isNormal());
- return out;
- }
+void WiredTigerRecordStore::updateStatsAfterRepair(OperationContext* txn,
+ long long numRecords,
+ long long dataSize) {
+ _numRecords.store(numRecords);
+ _dataSize.store(dataSize);
+ _sizeStorer->storeToCache(_uri, numRecords, dataSize);
+}
- WiredTigerRecoveryUnit* WiredTigerRecordStore::_getRecoveryUnit( OperationContext* txn ) {
- return checked_cast<WiredTigerRecoveryUnit*>( txn->recoveryUnit() );
- }
+RecordId WiredTigerRecordStore::_nextId() {
+ invariant(!_useOplogHack);
+ RecordId out = RecordId(_nextIdNum.fetchAndAdd(1));
+ invariant(out.isNormal());
+ return out;
+}
- class WiredTigerRecordStore::NumRecordsChange : public RecoveryUnit::Change {
- public:
- NumRecordsChange(WiredTigerRecordStore* rs, int64_t diff) :_rs(rs), _diff(diff) {}
- virtual void commit() {}
- virtual void rollback() {
- _rs->_numRecords.fetchAndAdd( -_diff );
- }
+WiredTigerRecoveryUnit* WiredTigerRecordStore::_getRecoveryUnit(OperationContext* txn) {
+ return checked_cast<WiredTigerRecoveryUnit*>(txn->recoveryUnit());
+}
- private:
- WiredTigerRecordStore* _rs;
- int64_t _diff;
- };
-
- void WiredTigerRecordStore::_changeNumRecords( OperationContext* txn, int64_t diff ) {
- txn->recoveryUnit()->registerChange(new NumRecordsChange(this, diff));
- if ( diff > 0 ) {
- if ( _numRecords.fetchAndAdd( diff ) < diff )
- _numRecords.store( diff );
- } else if ( _numRecords.fetchAndAdd( diff ) < 0 ) {
- _numRecords.store( 0 );
- }
+class WiredTigerRecordStore::NumRecordsChange : public RecoveryUnit::Change {
+public:
+ NumRecordsChange(WiredTigerRecordStore* rs, int64_t diff) : _rs(rs), _diff(diff) {}
+ virtual void commit() {}
+ virtual void rollback() {
+ _rs->_numRecords.fetchAndAdd(-_diff);
}
- class WiredTigerRecordStore::DataSizeChange : public RecoveryUnit::Change {
- public:
- DataSizeChange(WiredTigerRecordStore* rs, int amount) :_rs(rs), _amount(amount) {}
- virtual void commit() {}
- virtual void rollback() {
- _rs->_increaseDataSize( NULL, -_amount );
- }
+private:
+ WiredTigerRecordStore* _rs;
+ int64_t _diff;
+};
+
+void WiredTigerRecordStore::_changeNumRecords(OperationContext* txn, int64_t diff) {
+ txn->recoveryUnit()->registerChange(new NumRecordsChange(this, diff));
+ if (diff > 0) {
+ if (_numRecords.fetchAndAdd(diff) < diff)
+ _numRecords.store(diff);
+ } else if (_numRecords.fetchAndAdd(diff) < 0) {
+ _numRecords.store(0);
+ }
+}
- private:
- WiredTigerRecordStore* _rs;
- bool _amount;
- };
+class WiredTigerRecordStore::DataSizeChange : public RecoveryUnit::Change {
+public:
+ DataSizeChange(WiredTigerRecordStore* rs, int amount) : _rs(rs), _amount(amount) {}
+ virtual void commit() {}
+ virtual void rollback() {
+ _rs->_increaseDataSize(NULL, -_amount);
+ }
- void WiredTigerRecordStore::_increaseDataSize( OperationContext* txn, int amount ) {
- if ( txn )
- txn->recoveryUnit()->registerChange(new DataSizeChange(this, amount));
+private:
+ WiredTigerRecordStore* _rs;
+ bool _amount;
+};
- if ( _dataSize.fetchAndAdd(amount) < 0 ) {
- if ( amount > 0 ) {
- _dataSize.store( amount );
- }
- else {
- _dataSize.store( 0 );
- }
- }
+void WiredTigerRecordStore::_increaseDataSize(OperationContext* txn, int amount) {
+ if (txn)
+ txn->recoveryUnit()->registerChange(new DataSizeChange(this, amount));
- if ( _sizeStorer && _sizeStorerCounter++ % 1000 == 0 ) {
- _sizeStorer->storeToCache( _uri, _numRecords.load(), _dataSize.load() );
+ if (_dataSize.fetchAndAdd(amount) < 0) {
+ if (amount > 0) {
+ _dataSize.store(amount);
+ } else {
+ _dataSize.store(0);
}
}
- int64_t WiredTigerRecordStore::_makeKey( const RecordId& loc ) {
- return loc.repr();
- }
- RecordId WiredTigerRecordStore::_fromKey( int64_t key ) {
- return RecordId(key);
+ if (_sizeStorer && _sizeStorerCounter++ % 1000 == 0) {
+ _sizeStorer->storeToCache(_uri, _numRecords.load(), _dataSize.load());
}
+}
- void WiredTigerRecordStore::temp_cappedTruncateAfter( OperationContext* txn,
- RecordId end,
- bool inclusive ) {
- WriteUnitOfWork wuow(txn);
- Cursor cursor(txn, *this);
- while (auto record = cursor.next()) {
- RecordId loc = record->id;
- if ( end < loc || ( inclusive && end == loc ) ) {
- deleteRecord( txn, loc );
- }
+int64_t WiredTigerRecordStore::_makeKey(const RecordId& loc) {
+ return loc.repr();
+}
+RecordId WiredTigerRecordStore::_fromKey(int64_t key) {
+ return RecordId(key);
+}
+
+void WiredTigerRecordStore::temp_cappedTruncateAfter(OperationContext* txn,
+ RecordId end,
+ bool inclusive) {
+ WriteUnitOfWork wuow(txn);
+ Cursor cursor(txn, *this);
+ while (auto record = cursor.next()) {
+ RecordId loc = record->id;
+ if (end < loc || (inclusive && end == loc)) {
+ deleteRecord(txn, loc);
}
- wuow.commit();
}
+ wuow.commit();
+}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 00fd04d42b9..1aa0dec93e7 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -49,217 +49,225 @@
namespace mongo {
- class RecoveryUnit;
- class WiredTigerCursor;
- class WiredTigerRecoveryUnit;
- class WiredTigerSizeStorer;
-
- extern const std::string kWiredTigerEngineName;
-
- class WiredTigerRecordStore : public RecordStore {
- public:
-
- /**
- * Parses collections options for wired tiger configuration string for table creation.
- * The document 'options' is typically obtained from the 'wiredTiger' field of
- * CollectionOptions::storageEngine.
- */
- static StatusWith<std::string> parseOptionsField(const BSONObj options);
-
- /**
- * Creates a configuration string suitable for 'config' parameter in WT_SESSION::create().
- * Configuration string is constructed from:
- * built-in defaults
- * storageEngine.wiredTiger.configString in 'options'
- * 'extraStrings'
- * Performs simple validation on the supplied parameters.
- * Returns error status if validation fails.
- * Note that even if this function returns an OK status, WT_SESSION:create() may still
- * fail with the constructed configuration string.
- */
- static StatusWith<std::string> generateCreateString(StringData ns,
- const CollectionOptions &options,
- StringData extraStrings);
-
- WiredTigerRecordStore(OperationContext* txn,
- StringData ns,
- StringData uri,
- bool isCapped = false,
- int64_t cappedMaxSize = -1,
- int64_t cappedMaxDocs = -1,
- CappedDocumentDeleteCallback* cappedDeleteCallback = NULL,
- WiredTigerSizeStorer* sizeStorer = NULL );
+class RecoveryUnit;
+class WiredTigerCursor;
+class WiredTigerRecoveryUnit;
+class WiredTigerSizeStorer;
+
+extern const std::string kWiredTigerEngineName;
+
+class WiredTigerRecordStore : public RecordStore {
+public:
+ /**
+ * Parses collections options for wired tiger configuration string for table creation.
+ * The document 'options' is typically obtained from the 'wiredTiger' field of
+ * CollectionOptions::storageEngine.
+ */
+ static StatusWith<std::string> parseOptionsField(const BSONObj options);
+
+ /**
+ * Creates a configuration string suitable for 'config' parameter in WT_SESSION::create().
+ * Configuration string is constructed from:
+ * built-in defaults
+ * storageEngine.wiredTiger.configString in 'options'
+ * 'extraStrings'
+ * Performs simple validation on the supplied parameters.
+ * Returns error status if validation fails.
+ * Note that even if this function returns an OK status, WT_SESSION:create() may still
+ * fail with the constructed configuration string.
+ */
+ static StatusWith<std::string> generateCreateString(StringData ns,
+ const CollectionOptions& options,
+ StringData extraStrings);
+
+ WiredTigerRecordStore(OperationContext* txn,
+ StringData ns,
+ StringData uri,
+ bool isCapped = false,
+ int64_t cappedMaxSize = -1,
+ int64_t cappedMaxDocs = -1,
+ CappedDocumentDeleteCallback* cappedDeleteCallback = NULL,
+ WiredTigerSizeStorer* sizeStorer = NULL);
+
+ virtual ~WiredTigerRecordStore();
+
+ // name of the RecordStore implementation
+ virtual const char* name() const;
+
+ virtual long long dataSize(OperationContext* txn) const;
+
+ virtual long long numRecords(OperationContext* txn) const;
+
+ virtual bool isCapped() const;
+
+ virtual int64_t storageSize(OperationContext* txn,
+ BSONObjBuilder* extraInfo = NULL,
+ int infoLevel = 0) const;
+
+ // CRUD related
+
+ virtual RecordData dataFor(OperationContext* txn, const RecordId& loc) const;
+
+ virtual bool findRecord(OperationContext* txn, const RecordId& loc, RecordData* out) const;
+
+ virtual void deleteRecord(OperationContext* txn, const RecordId& dl);
+
+ virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ const char* data,
+ int len,
+ bool enforceQuota);
+
+ virtual StatusWith<RecordId> insertRecord(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota);
+
+ virtual StatusWith<RecordId> updateRecord(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* data,
+ int len,
+ bool enforceQuota,
+ UpdateNotifier* notifier);
+
+ virtual bool updateWithDamagesSupported() const;
+
+ virtual Status updateWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const RecordData& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages);
+
+ std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward) const final;
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const final;
+
+ virtual Status truncate(OperationContext* txn);
+
+ virtual bool compactSupported() const {
+ return true;
+ }
+ virtual bool compactsInPlace() const {
+ return true;
+ }
+
+ virtual Status compact(OperationContext* txn,
+ RecordStoreCompactAdaptor* adaptor,
+ const CompactOptions* options,
+ CompactStats* stats);
+
+ virtual Status validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateAdaptor* adaptor,
+ ValidateResults* results,
+ BSONObjBuilder* output);
+
+ virtual void appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* result,
+ double scale) const;
+
+ virtual void temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive);
+
+ virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
+ const RecordId& startingPosition) const;
+
+ virtual Status oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime);
+
+ virtual void updateStatsAfterRepair(OperationContext* txn,
+ long long numRecords,
+ long long dataSize);
+
+ bool isOplog() const {
+ return _isOplog;
+ }
+ bool usingOplogHack() const {
+ return _useOplogHack;
+ }
- virtual ~WiredTigerRecordStore();
+ void setCappedDeleteCallback(CappedDocumentDeleteCallback* cb) {
+ _cappedDeleteCallback = cb;
+ }
+ int64_t cappedMaxDocs() const;
+ int64_t cappedMaxSize() const;
- // name of the RecordStore implementation
- virtual const char* name() const;
+ const std::string& getURI() const {
+ return _uri;
+ }
+ uint64_t instanceId() const {
+ return _instanceId;
+ }
- virtual long long dataSize( OperationContext *txn ) const;
-
- virtual long long numRecords( OperationContext* txn ) const;
-
- virtual bool isCapped() const;
-
- virtual int64_t storageSize( OperationContext* txn,
- BSONObjBuilder* extraInfo = NULL,
- int infoLevel = 0 ) const;
-
- // CRUD related
-
- virtual RecordData dataFor( OperationContext* txn, const RecordId& loc ) const;
-
- virtual bool findRecord( OperationContext* txn, const RecordId& loc, RecordData* out ) const;
-
- virtual void deleteRecord( OperationContext* txn, const RecordId& dl );
-
- virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
- const char* data,
- int len,
- bool enforceQuota );
-
- virtual StatusWith<RecordId> insertRecord( OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota );
-
- virtual StatusWith<RecordId> updateRecord( OperationContext* txn,
- const RecordId& oldLocation,
- const char* data,
- int len,
- bool enforceQuota,
- UpdateNotifier* notifier );
-
- virtual bool updateWithDamagesSupported() const;
-
- virtual Status updateWithDamages( OperationContext* txn,
- const RecordId& loc,
- const RecordData& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages );
-
- std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward) const final;
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(
- OperationContext* txn) const final;
-
- virtual Status truncate( OperationContext* txn );
-
- virtual bool compactSupported() const { return true; }
- virtual bool compactsInPlace() const { return true; }
-
- virtual Status compact( OperationContext* txn,
- RecordStoreCompactAdaptor* adaptor,
- const CompactOptions* options,
- CompactStats* stats );
-
- virtual Status validate( OperationContext* txn,
- bool full,
- bool scanData,
- ValidateAdaptor* adaptor,
- ValidateResults* results,
- BSONObjBuilder* output );
-
- virtual void appendCustomStats( OperationContext* txn,
- BSONObjBuilder* result,
- double scale ) const;
-
- virtual void temp_cappedTruncateAfter(OperationContext* txn,
- RecordId end,
- bool inclusive);
-
- virtual boost::optional<RecordId> oplogStartHack(OperationContext* txn,
- const RecordId& startingPosition) const;
-
- virtual Status oplogDiskLocRegister( OperationContext* txn,
- const Timestamp& opTime );
-
- virtual void updateStatsAfterRepair(OperationContext* txn,
- long long numRecords,
- long long dataSize);
-
- bool isOplog() const { return _isOplog; }
- bool usingOplogHack() const { return _useOplogHack; }
-
- void setCappedDeleteCallback(CappedDocumentDeleteCallback* cb) {
- _cappedDeleteCallback = cb;
- }
- int64_t cappedMaxDocs() const;
- int64_t cappedMaxSize() const;
-
- const std::string& getURI() const { return _uri; }
- uint64_t instanceId() const { return _instanceId; }
-
- void setSizeStorer( WiredTigerSizeStorer* ss ) { _sizeStorer = ss; }
-
- void dealtWithCappedLoc( const RecordId& loc );
- bool isCappedHidden( const RecordId& loc ) const;
-
- bool inShutdown() const;
- int64_t cappedDeleteAsNeeded(OperationContext* txn,
- const RecordId& justInserted);
-
- int64_t cappedDeleteAsNeeded_inlock(OperationContext* txn,
- const RecordId& justInserted);
-
- stdx::timed_mutex& cappedDeleterMutex() { return _cappedDeleterMutex; }
-
- private:
- class Cursor;
-
- class CappedInsertChange;
- class NumRecordsChange;
- class DataSizeChange;
-
- static WiredTigerRecoveryUnit* _getRecoveryUnit( OperationContext* txn );
-
- static int64_t _makeKey(const RecordId &loc);
- static RecordId _fromKey(int64_t k);
-
- void _addUncommitedDiskLoc_inlock( OperationContext* txn, const RecordId& loc );
-
- RecordId _nextId();
- void _setId(RecordId loc);
- bool cappedAndNeedDelete() const;
- void _changeNumRecords(OperationContext* txn, int64_t diff);
- void _increaseDataSize(OperationContext* txn, int amount);
- RecordData _getData( const WiredTigerCursor& cursor) const;
- StatusWith<RecordId> extractAndCheckLocForOplog(const char* data, int len);
- void _oplogSetStartHack( WiredTigerRecoveryUnit* wru ) const;
-
- const std::string _uri;
- const uint64_t _instanceId; // not persisted
-
- // The capped settings should not be updated once operations have started
- const bool _isCapped;
- const bool _isOplog;
- const int64_t _cappedMaxSize;
- const int64_t _cappedMaxSizeSlack; // when to start applying backpressure
- const int64_t _cappedMaxDocs;
- AtomicInt64 _cappedSleep;
- AtomicInt64 _cappedSleepMS;
- CappedDocumentDeleteCallback* _cappedDeleteCallback;
- int _cappedDeleteCheckCount; // see comment in ::cappedDeleteAsNeeded
- mutable stdx::timed_mutex _cappedDeleterMutex; // see comment in ::cappedDeleteAsNeeded
-
- const bool _useOplogHack;
-
- typedef std::vector<RecordId> SortedDiskLocs;
- SortedDiskLocs _uncommittedDiskLocs;
- RecordId _oplog_visibleTo;
- RecordId _oplog_highestSeen;
- mutable stdx::mutex _uncommittedDiskLocsMutex;
-
- AtomicInt64 _nextIdNum;
- AtomicInt64 _dataSize;
- AtomicInt64 _numRecords;
-
- WiredTigerSizeStorer* _sizeStorer; // not owned, can be NULL
- int _sizeStorerCounter;
-
- bool _shuttingDown;
- bool _hasBackgroundThread;
- };
-
- // WT failpoint to throw write conflict exceptions randomly
- MONGO_FP_FORWARD_DECLARE(WTWriteConflictException);
+ void setSizeStorer(WiredTigerSizeStorer* ss) {
+ _sizeStorer = ss;
+ }
+
+ void dealtWithCappedLoc(const RecordId& loc);
+ bool isCappedHidden(const RecordId& loc) const;
+
+ bool inShutdown() const;
+ int64_t cappedDeleteAsNeeded(OperationContext* txn, const RecordId& justInserted);
+
+ int64_t cappedDeleteAsNeeded_inlock(OperationContext* txn, const RecordId& justInserted);
+
+ stdx::timed_mutex& cappedDeleterMutex() {
+ return _cappedDeleterMutex;
+ }
+
+private:
+ class Cursor;
+
+ class CappedInsertChange;
+ class NumRecordsChange;
+ class DataSizeChange;
+
+ static WiredTigerRecoveryUnit* _getRecoveryUnit(OperationContext* txn);
+
+ static int64_t _makeKey(const RecordId& loc);
+ static RecordId _fromKey(int64_t k);
+
+ void _addUncommitedDiskLoc_inlock(OperationContext* txn, const RecordId& loc);
+
+ RecordId _nextId();
+ void _setId(RecordId loc);
+ bool cappedAndNeedDelete() const;
+ void _changeNumRecords(OperationContext* txn, int64_t diff);
+ void _increaseDataSize(OperationContext* txn, int amount);
+ RecordData _getData(const WiredTigerCursor& cursor) const;
+ StatusWith<RecordId> extractAndCheckLocForOplog(const char* data, int len);
+ void _oplogSetStartHack(WiredTigerRecoveryUnit* wru) const;
+
+ const std::string _uri;
+ const uint64_t _instanceId; // not persisted
+
+ // The capped settings should not be updated once operations have started
+ const bool _isCapped;
+ const bool _isOplog;
+ const int64_t _cappedMaxSize;
+ const int64_t _cappedMaxSizeSlack; // when to start applying backpressure
+ const int64_t _cappedMaxDocs;
+ AtomicInt64 _cappedSleep;
+ AtomicInt64 _cappedSleepMS;
+ CappedDocumentDeleteCallback* _cappedDeleteCallback;
+ int _cappedDeleteCheckCount; // see comment in ::cappedDeleteAsNeeded
+ mutable stdx::timed_mutex _cappedDeleterMutex; // see comment in ::cappedDeleteAsNeeded
+
+ const bool _useOplogHack;
+
+ typedef std::vector<RecordId> SortedDiskLocs;
+ SortedDiskLocs _uncommittedDiskLocs;
+ RecordId _oplog_visibleTo;
+ RecordId _oplog_highestSeen;
+ mutable stdx::mutex _uncommittedDiskLocsMutex;
+
+ AtomicInt64 _nextIdNum;
+ AtomicInt64 _dataSize;
+ AtomicInt64 _numRecords;
+
+ WiredTigerSizeStorer* _sizeStorer; // not owned, can be NULL
+ int _sizeStorerCounter;
+ bool _shuttingDown;
+ bool _hasBackgroundThread;
+};
+
+// WT failpoint to throw write conflict exceptions randomly
+MONGO_FP_FORWARD_DECLARE(WTWriteConflictException);
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mock.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mock.cpp
index 3a2a3d8e5d3..f83e1e63e5c 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mock.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mock.cpp
@@ -39,13 +39,13 @@
namespace mongo {
- // static
- bool WiredTigerKVEngine::initRsOplogBackgroundThread(StringData ns) {
- return false;
- }
+// static
+bool WiredTigerKVEngine::initRsOplogBackgroundThread(StringData ns) {
+ return false;
+}
- MONGO_INITIALIZER(SetGlobalEnvironment)(InitializerContext* context) {
- setGlobalServiceContext(stdx::make_unique<ServiceContextNoop>());
- return Status::OK();
- }
+MONGO_INITIALIZER(SetGlobalEnvironment)(InitializerContext* context) {
+ setGlobalServiceContext(stdx::make_unique<ServiceContextNoop>());
+ return Status::OK();
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
index ef3f4007ecd..dae37303017 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_mongod.cpp
@@ -52,121 +52,117 @@
namespace mongo {
- namespace {
+namespace {
- std::set<NamespaceString> _backgroundThreadNamespaces;
- stdx::mutex _backgroundThreadMutex;
+std::set<NamespaceString> _backgroundThreadNamespaces;
+stdx::mutex _backgroundThreadMutex;
- class WiredTigerRecordStoreThread : public BackgroundJob {
- public:
- WiredTigerRecordStoreThread(const NamespaceString& ns)
- : BackgroundJob(true /* deleteSelf */), _ns(ns) {
- _name = std::string("WiredTigerRecordStoreThread for ") + _ns.toString();
- }
+class WiredTigerRecordStoreThread : public BackgroundJob {
+public:
+ WiredTigerRecordStoreThread(const NamespaceString& ns)
+ : BackgroundJob(true /* deleteSelf */), _ns(ns) {
+ _name = std::string("WiredTigerRecordStoreThread for ") + _ns.toString();
+ }
+
+ virtual std::string name() const {
+ return _name;
+ }
+
+ /**
+ * @return Number of documents deleted.
+ */
+ int64_t _deleteExcessDocuments() {
+ if (!getGlobalServiceContext()->getGlobalStorageEngine()) {
+ LOG(1) << "no global storage engine yet";
+ return 0;
+ }
+
+ OperationContextImpl txn;
+ checked_cast<WiredTigerRecoveryUnit*>(txn.recoveryUnit())->markNoTicketRequired();
+
+ try {
+ ScopedTransaction transaction(&txn, MODE_IX);
- virtual std::string name() const {
- return _name;
+ AutoGetDb autoDb(&txn, _ns.db(), MODE_IX);
+ Database* db = autoDb.getDb();
+ if (!db) {
+ LOG(2) << "no local database yet";
+ return 0;
}
- /**
- * @return Number of documents deleted.
- */
- int64_t _deleteExcessDocuments() {
- if (!getGlobalServiceContext()->getGlobalStorageEngine()) {
- LOG(1) << "no global storage engine yet";
- return 0;
- }
-
- OperationContextImpl txn;
- checked_cast<WiredTigerRecoveryUnit*>(txn.recoveryUnit())->markNoTicketRequired();
-
- try {
- ScopedTransaction transaction(&txn, MODE_IX);
-
- AutoGetDb autoDb(&txn, _ns.db(), MODE_IX);
- Database* db = autoDb.getDb();
- if (!db) {
- LOG(2) << "no local database yet";
- return 0;
- }
-
- Lock::CollectionLock collectionLock(txn.lockState(), _ns.ns(), MODE_IX);
- Collection* collection = db->getCollection(_ns);
- if (!collection) {
- LOG(2) << "no collection " << _ns;
- return 0;
- }
-
- OldClientContext ctx(&txn, _ns, false);
- WiredTigerRecordStore* rs =
- checked_cast<WiredTigerRecordStore*>(collection->getRecordStore());
- WriteUnitOfWork wuow(&txn);
- stdx::lock_guard<stdx::timed_mutex> lock(rs->cappedDeleterMutex());
- int64_t removed = rs->cappedDeleteAsNeeded_inlock(&txn, RecordId::max());
- wuow.commit();
- return removed;
- }
- catch (const std::exception& e) {
- severe() << "error in WiredTigerRecordStoreThread: " << e.what();
- fassertFailedNoTrace(!"error in WiredTigerRecordStoreThread");
- }
- catch (...) {
- fassertFailedNoTrace(!"unknown error in WiredTigerRecordStoreThread");
- }
+ Lock::CollectionLock collectionLock(txn.lockState(), _ns.ns(), MODE_IX);
+ Collection* collection = db->getCollection(_ns);
+ if (!collection) {
+ LOG(2) << "no collection " << _ns;
+ return 0;
}
- virtual void run() {
- Client::initThread(_name.c_str());
-
- while (!inShutdown()) {
- int64_t removed = _deleteExcessDocuments();
- LOG(2) << "WiredTigerRecordStoreThread deleted " << removed;
- if (removed == 0) {
- // If we removed 0 documents, sleep a bit in case we're on a laptop
- // or something to be nice.
- sleepmillis(1000);
- }
- else if(removed < 1000) {
- // 1000 is the batch size, so we didn't even do a full batch,
- // which is the most efficient.
- sleepmillis(10);
- }
- }
-
- log() << "shutting down";
+ OldClientContext ctx(&txn, _ns, false);
+ WiredTigerRecordStore* rs =
+ checked_cast<WiredTigerRecordStore*>(collection->getRecordStore());
+ WriteUnitOfWork wuow(&txn);
+ stdx::lock_guard<stdx::timed_mutex> lock(rs->cappedDeleterMutex());
+ int64_t removed = rs->cappedDeleteAsNeeded_inlock(&txn, RecordId::max());
+ wuow.commit();
+ return removed;
+ } catch (const std::exception& e) {
+ severe() << "error in WiredTigerRecordStoreThread: " << e.what();
+ fassertFailedNoTrace(!"error in WiredTigerRecordStoreThread");
+ } catch (...) {
+ fassertFailedNoTrace(!"unknown error in WiredTigerRecordStoreThread");
+ }
+ }
+
+ virtual void run() {
+ Client::initThread(_name.c_str());
+
+ while (!inShutdown()) {
+ int64_t removed = _deleteExcessDocuments();
+ LOG(2) << "WiredTigerRecordStoreThread deleted " << removed;
+ if (removed == 0) {
+ // If we removed 0 documents, sleep a bit in case we're on a laptop
+ // or something to be nice.
+ sleepmillis(1000);
+ } else if (removed < 1000) {
+ // 1000 is the batch size, so we didn't even do a full batch,
+ // which is the most efficient.
+ sleepmillis(10);
}
+ }
- private:
- NamespaceString _ns;
- std::string _name;
- };
+ log() << "shutting down";
+ }
- } // namespace
+private:
+ NamespaceString _ns;
+ std::string _name;
+};
- // static
- bool WiredTigerKVEngine::initRsOplogBackgroundThread(StringData ns) {
- if (!NamespaceString::oplog(ns)) {
- return false;
- }
+} // namespace
- if (storageGlobalParams.repair) {
- LOG(1) << "not starting WiredTigerRecordStoreThread for " << ns
- << " because we are in repair";
- return false;
- }
+// static
+bool WiredTigerKVEngine::initRsOplogBackgroundThread(StringData ns) {
+ if (!NamespaceString::oplog(ns)) {
+ return false;
+ }
- stdx::lock_guard<stdx::mutex> lock(_backgroundThreadMutex);
- NamespaceString nss(ns);
- if (_backgroundThreadNamespaces.count(nss)) {
- log() << "WiredTigerRecordStoreThread " << ns << " already started";
- }
- else {
- log() << "Starting WiredTigerRecordStoreThread " << ns;
- BackgroundJob* backgroundThread = new WiredTigerRecordStoreThread(nss);
- backgroundThread->go();
- _backgroundThreadNamespaces.insert(nss);
- }
- return true;
+ if (storageGlobalParams.repair) {
+ LOG(1) << "not starting WiredTigerRecordStoreThread for " << ns
+ << " because we are in repair";
+ return false;
+ }
+
+ stdx::lock_guard<stdx::mutex> lock(_backgroundThreadMutex);
+ NamespaceString nss(ns);
+ if (_backgroundThreadNamespaces.count(nss)) {
+ log() << "WiredTigerRecordStoreThread " << ns << " already started";
+ } else {
+ log() << "Starting WiredTigerRecordStoreThread " << ns;
+ BackgroundJob* backgroundThread = new WiredTigerRecordStoreThread(nss);
+ backgroundThread->go();
+ _backgroundThreadNamespaces.insert(nss);
}
+ return true;
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index 52a3f0cfefc..aae1056bf8b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -50,833 +50,819 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::stringstream;
-
- class WiredTigerHarnessHelper : public HarnessHelper {
- public:
- static WT_CONNECTION* createConnection(StringData dbpath, StringData extraStrings) {
- WT_CONNECTION* conn = NULL;
-
- std::stringstream ss;
- ss << "create,";
- ss << "statistics=(all),";
- ss << extraStrings;
- string config = ss.str();
- int ret = wiredtiger_open(dbpath.toString().c_str(), NULL, config.c_str(), &conn);
- ASSERT_OK(wtRCToStatus(ret));
- ASSERT(conn);
-
- return conn;
- }
-
- WiredTigerHarnessHelper()
- : _dbpath("wt_test"),
- _conn(createConnection(_dbpath.path(), "")),
- _sessionCache(new WiredTigerSessionCache(_conn)) { }
+using std::unique_ptr;
+using std::string;
+using std::stringstream;
+
+class WiredTigerHarnessHelper : public HarnessHelper {
+public:
+ static WT_CONNECTION* createConnection(StringData dbpath, StringData extraStrings) {
+ WT_CONNECTION* conn = NULL;
+
+ std::stringstream ss;
+ ss << "create,";
+ ss << "statistics=(all),";
+ ss << extraStrings;
+ string config = ss.str();
+ int ret = wiredtiger_open(dbpath.toString().c_str(), NULL, config.c_str(), &conn);
+ ASSERT_OK(wtRCToStatus(ret));
+ ASSERT(conn);
+
+ return conn;
+ }
- WiredTigerHarnessHelper(StringData extraStrings)
- : _dbpath("wt_test"),
- _conn(createConnection(_dbpath.path(), extraStrings)),
- _sessionCache(new WiredTigerSessionCache(_conn)) { }
+ WiredTigerHarnessHelper()
+ : _dbpath("wt_test"),
+ _conn(createConnection(_dbpath.path(), "")),
+ _sessionCache(new WiredTigerSessionCache(_conn)) {}
- ~WiredTigerHarnessHelper() {
- delete _sessionCache;
- _conn->close(_conn, NULL);
- }
+ WiredTigerHarnessHelper(StringData extraStrings)
+ : _dbpath("wt_test"),
+ _conn(createConnection(_dbpath.path(), extraStrings)),
+ _sessionCache(new WiredTigerSessionCache(_conn)) {}
- virtual RecordStore* newNonCappedRecordStore() { return newNonCappedRecordStore("a.b"); }
- RecordStore* newNonCappedRecordStore(const std::string& ns) {
- WiredTigerRecoveryUnit* ru = new WiredTigerRecoveryUnit( _sessionCache );
- OperationContextNoop txn( ru );
- string uri = "table:" + ns;
+ ~WiredTigerHarnessHelper() {
+ delete _sessionCache;
+ _conn->close(_conn, NULL);
+ }
- StatusWith<std::string> result =
- WiredTigerRecordStore::generateCreateString(ns, CollectionOptions(), "");
- ASSERT_TRUE(result.isOK());
- std::string config = result.getValue();
+ virtual RecordStore* newNonCappedRecordStore() {
+ return newNonCappedRecordStore("a.b");
+ }
+ RecordStore* newNonCappedRecordStore(const std::string& ns) {
+ WiredTigerRecoveryUnit* ru = new WiredTigerRecoveryUnit(_sessionCache);
+ OperationContextNoop txn(ru);
+ string uri = "table:" + ns;
- {
- WriteUnitOfWork uow(&txn);
- WT_SESSION* s = ru->getSession(&txn)->getSession();
- invariantWTOK( s->create( s, uri.c_str(), config.c_str() ) );
- uow.commit();
- }
+ StatusWith<std::string> result =
+ WiredTigerRecordStore::generateCreateString(ns, CollectionOptions(), "");
+ ASSERT_TRUE(result.isOK());
+ std::string config = result.getValue();
- return new WiredTigerRecordStore( &txn, ns, uri );
+ {
+ WriteUnitOfWork uow(&txn);
+ WT_SESSION* s = ru->getSession(&txn)->getSession();
+ invariantWTOK(s->create(s, uri.c_str(), config.c_str()));
+ uow.commit();
}
- virtual RecordStore* newCappedRecordStore( const std::string& ns,
- int64_t cappedMaxSize,
- int64_t cappedMaxDocs ) {
-
- WiredTigerRecoveryUnit* ru = new WiredTigerRecoveryUnit( _sessionCache );
- OperationContextNoop txn( ru );
- string uri = "table:a.b";
-
- CollectionOptions options;
- options.capped = true;
+ return new WiredTigerRecordStore(&txn, ns, uri);
+ }
- StatusWith<std::string> result =
- WiredTigerRecordStore::generateCreateString(ns, options, "");
- ASSERT_TRUE(result.isOK());
- std::string config = result.getValue();
+ virtual RecordStore* newCappedRecordStore(const std::string& ns,
+ int64_t cappedMaxSize,
+ int64_t cappedMaxDocs) {
+ WiredTigerRecoveryUnit* ru = new WiredTigerRecoveryUnit(_sessionCache);
+ OperationContextNoop txn(ru);
+ string uri = "table:a.b";
- {
- WriteUnitOfWork uow(&txn);
- WT_SESSION* s = ru->getSession(&txn)->getSession();
- invariantWTOK( s->create( s, uri.c_str(), config.c_str() ) );
- uow.commit();
- }
+ CollectionOptions options;
+ options.capped = true;
- return new WiredTigerRecordStore( &txn, ns, uri, true, cappedMaxSize, cappedMaxDocs );
- }
+ StatusWith<std::string> result =
+ WiredTigerRecordStore::generateCreateString(ns, options, "");
+ ASSERT_TRUE(result.isOK());
+ std::string config = result.getValue();
- virtual RecoveryUnit* newRecoveryUnit() {
- return new WiredTigerRecoveryUnit( _sessionCache );
+ {
+ WriteUnitOfWork uow(&txn);
+ WT_SESSION* s = ru->getSession(&txn)->getSession();
+ invariantWTOK(s->create(s, uri.c_str(), config.c_str()));
+ uow.commit();
}
- WT_CONNECTION* conn() const { return _conn; }
-
- private:
- unittest::TempDir _dbpath;
- WT_CONNECTION* _conn;
- WiredTigerSessionCache* _sessionCache;
- };
-
- HarnessHelper* newHarnessHelper() {
- return new WiredTigerHarnessHelper();
+ return new WiredTigerRecordStore(&txn, ns, uri, true, cappedMaxSize, cappedMaxDocs);
}
- TEST(WiredTigerRecordStoreTest, GenerateCreateStringEmptyDocument) {
- BSONObj spec = fromjson("{}");
- StatusWith<std::string> result = WiredTigerRecordStore::parseOptionsField(spec);
- ASSERT_OK(result.getStatus());
- ASSERT_EQ(result.getValue(), ""); // "," would also be valid.
+ virtual RecoveryUnit* newRecoveryUnit() {
+ return new WiredTigerRecoveryUnit(_sessionCache);
}
- TEST(WiredTigerRecordStoreTest, GenerateCreateStringUnknownField) {
- BSONObj spec = fromjson("{unknownField: 1}");
- StatusWith<std::string> result = WiredTigerRecordStore::parseOptionsField(spec);
- const Status& status = result.getStatus();
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::InvalidOptions, status);
+ WT_CONNECTION* conn() const {
+ return _conn;
}
- TEST(WiredTigerRecordStoreTest, GenerateCreateStringNonStringConfig) {
- BSONObj spec = fromjson("{configString: 12345}");
- StatusWith<std::string> result = WiredTigerRecordStore::parseOptionsField(spec);
- const Status& status = result.getStatus();
- ASSERT_NOT_OK(status);
- ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
- }
+private:
+ unittest::TempDir _dbpath;
+ WT_CONNECTION* _conn;
+ WiredTigerSessionCache* _sessionCache;
+};
- TEST(WiredTigerRecordStoreTest, GenerateCreateStringEmptyConfigString) {
- BSONObj spec = fromjson("{configString: ''}");
- StatusWith<std::string> result = WiredTigerRecordStore::parseOptionsField(spec);
- ASSERT_OK(result.getStatus());
- ASSERT_EQ(result.getValue(), ","); // "" would also be valid.
- }
+HarnessHelper* newHarnessHelper() {
+ return new WiredTigerHarnessHelper();
+}
- TEST(WiredTigerRecordStoreTest, GenerateCreateStringValidConfigFormat) {
- // TODO eventually this should fail since "abc" is not a valid WT option.
- BSONObj spec = fromjson("{configString: 'abc=def'}");
- StatusWith<std::string> result = WiredTigerRecordStore::parseOptionsField(spec);
- const Status& status = result.getStatus();
- ASSERT_OK(status);
- ASSERT_EQ(result.getValue(), "abc=def,");
- }
+TEST(WiredTigerRecordStoreTest, GenerateCreateStringEmptyDocument) {
+ BSONObj spec = fromjson("{}");
+ StatusWith<std::string> result = WiredTigerRecordStore::parseOptionsField(spec);
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQ(result.getValue(), ""); // "," would also be valid.
+}
- TEST(WiredTigerRecordStoreTest, Isolation1 ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
+TEST(WiredTigerRecordStoreTest, GenerateCreateStringUnknownField) {
+ BSONObj spec = fromjson("{unknownField: 1}");
+ StatusWith<std::string> result = WiredTigerRecordStore::parseOptionsField(spec);
+ const Status& status = result.getStatus();
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, status);
+}
- RecordId loc1;
- RecordId loc2;
+TEST(WiredTigerRecordStoreTest, GenerateCreateStringNonStringConfig) {
+ BSONObj spec = fromjson("{configString: 12345}");
+ StatusWith<std::string> result = WiredTigerRecordStore::parseOptionsField(spec);
+ const Status& status = result.getStatus();
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
+}
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
+TEST(WiredTigerRecordStoreTest, GenerateCreateStringEmptyConfigString) {
+ BSONObj spec = fromjson("{configString: ''}");
+ StatusWith<std::string> result = WiredTigerRecordStore::parseOptionsField(spec);
+ ASSERT_OK(result.getStatus());
+ ASSERT_EQ(result.getValue(), ","); // "" would also be valid.
+}
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- loc1 = res.getValue();
+TEST(WiredTigerRecordStoreTest, GenerateCreateStringValidConfigFormat) {
+ // TODO eventually this should fail since "abc" is not a valid WT option.
+ BSONObj spec = fromjson("{configString: 'abc=def'}");
+ StatusWith<std::string> result = WiredTigerRecordStore::parseOptionsField(spec);
+ const Status& status = result.getStatus();
+ ASSERT_OK(status);
+ ASSERT_EQ(result.getValue(), "abc=def,");
+}
- res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- loc2 = res.getValue();
+TEST(WiredTigerRecordStoreTest, Isolation1) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- uow.commit();
- }
- }
+ RecordId loc1;
+ RecordId loc2;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> t1( harnessHelper->newOperationContext() );
- unique_ptr<OperationContext> t2( harnessHelper->newOperationContext() );
-
- unique_ptr<WriteUnitOfWork> w1( new WriteUnitOfWork( t1.get() ) );
- unique_ptr<WriteUnitOfWork> w2( new WriteUnitOfWork( t2.get() ) );
-
- rs->dataFor( t1.get(), loc1 );
- rs->dataFor( t2.get(), loc1 );
+ WriteUnitOfWork uow(opCtx.get());
- ASSERT_OK( rs->updateRecord( t1.get(), loc1, "b", 2, false, NULL ).getStatus() );
- ASSERT_OK( rs->updateRecord( t1.get(), loc2, "B", 2, false, NULL ).getStatus() );
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "a", 2, false);
+ ASSERT_OK(res.getStatus());
+ loc1 = res.getValue();
- try {
- // this should fail
- rs->updateRecord( t2.get(), loc1, "c", 2, false, NULL );
- ASSERT( 0 );
- }
- catch ( WriteConflictException& dle ) {
- w2.reset( NULL );
- t2.reset( NULL );
- }
+ res = rs->insertRecord(opCtx.get(), "a", 2, false);
+ ASSERT_OK(res.getStatus());
+ loc2 = res.getValue();
- w1->commit(); // this should succeed
+ uow.commit();
}
}
- TEST(WiredTigerRecordStoreTest, Isolation2 ) {
- unique_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
- RecordId loc1;
- RecordId loc2;
+ {
+ unique_ptr<OperationContext> t1(harnessHelper->newOperationContext());
+ unique_ptr<OperationContext> t2(harnessHelper->newOperationContext());
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
+ unique_ptr<WriteUnitOfWork> w1(new WriteUnitOfWork(t1.get()));
+ unique_ptr<WriteUnitOfWork> w2(new WriteUnitOfWork(t2.get()));
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- loc1 = res.getValue();
+ rs->dataFor(t1.get(), loc1);
+ rs->dataFor(t2.get(), loc1);
- res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- loc2 = res.getValue();
+ ASSERT_OK(rs->updateRecord(t1.get(), loc1, "b", 2, false, NULL).getStatus());
+ ASSERT_OK(rs->updateRecord(t1.get(), loc2, "B", 2, false, NULL).getStatus());
- uow.commit();
- }
+ try {
+ // this should fail
+ rs->updateRecord(t2.get(), loc1, "c", 2, false, NULL);
+ ASSERT(0);
+ } catch (WriteConflictException& dle) {
+ w2.reset(NULL);
+ t2.reset(NULL);
}
- {
- unique_ptr<OperationContext> t1( harnessHelper->newOperationContext() );
- unique_ptr<OperationContext> t2( harnessHelper->newOperationContext() );
+ w1->commit(); // this should succeed
+ }
+}
- // ensure we start transactions
- rs->dataFor( t1.get(), loc2 );
- rs->dataFor( t2.get(), loc2 );
+TEST(WiredTigerRecordStoreTest, Isolation2) {
+ unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- WriteUnitOfWork w( t1.get() );
- ASSERT_OK( rs->updateRecord( t1.get(), loc1, "b", 2, false, NULL ).getStatus() );
- w.commit();
- }
+ RecordId loc1;
+ RecordId loc2;
- {
- WriteUnitOfWork w( t2.get() );
- ASSERT_EQUALS( string("a"), rs->dataFor( t2.get(), loc1 ).data() );
- try {
- // this should fail as our version of loc1 is too old
- rs->updateRecord( t2.get(), loc1, "c", 2, false, NULL );
- ASSERT( 0 );
- }
- catch ( WriteConflictException& dle ) {
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ {
+ WriteUnitOfWork uow(opCtx.get());
- }
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "a", 2, false);
+ ASSERT_OK(res.getStatus());
+ loc1 = res.getValue();
+
+ res = rs->insertRecord(opCtx.get(), "a", 2, false);
+ ASSERT_OK(res.getStatus());
+ loc2 = res.getValue();
+ uow.commit();
}
}
- TEST(WiredTigerRecordStoreTest, SizeStorer1 ) {
- unique_ptr<WiredTigerHarnessHelper> harnessHelper(new WiredTigerHarnessHelper());
- unique_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );
-
- string uri = checked_cast<WiredTigerRecordStore*>( rs.get() )->getURI();
+ {
+ unique_ptr<OperationContext> t1(harnessHelper->newOperationContext());
+ unique_ptr<OperationContext> t2(harnessHelper->newOperationContext());
- string indexUri = "table:myindex";
- WiredTigerSizeStorer ss(harnessHelper->conn(), indexUri);
- checked_cast<WiredTigerRecordStore*>( rs.get() )->setSizeStorer( &ss );
-
- int N = 12;
+ // ensure we start transactions
+ rs->dataFor(t1.get(), loc2);
+ rs->dataFor(t2.get(), loc2);
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork uow( opCtx.get() );
- for ( int i = 0; i < N; i++ ) {
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- }
- uow.commit();
- }
+ WriteUnitOfWork w(t1.get());
+ ASSERT_OK(rs->updateRecord(t1.get(), loc1, "b", 2, false, NULL).getStatus());
+ w.commit();
}
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( N, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork w(t2.get());
+ ASSERT_EQUALS(string("a"), rs->dataFor(t2.get(), loc1).data());
+ try {
+ // this should fail as our version of loc1 is too old
+ rs->updateRecord(t2.get(), loc1, "c", 2, false, NULL);
+ ASSERT(0);
+ } catch (WriteConflictException& dle) {
+ }
}
+ }
+}
- rs.reset( NULL );
+TEST(WiredTigerRecordStoreTest, SizeStorer1) {
+ unique_ptr<WiredTigerHarnessHelper> harnessHelper(new WiredTigerHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());
- {
- long long numRecords;
- long long dataSize;
- ss.loadFromCache( uri, &numRecords, &dataSize );
- ASSERT_EQUALS( N, numRecords );
- }
+ string uri = checked_cast<WiredTigerRecordStore*>(rs.get())->getURI();
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- rs.reset( new WiredTigerRecordStore( opCtx.get(), "a.b", uri,
- false, -1, -1, NULL, &ss ) );
- }
+ string indexUri = "table:myindex";
+ WiredTigerSizeStorer ss(harnessHelper->conn(), indexUri);
+ checked_cast<WiredTigerRecordStore*>(rs.get())->setSizeStorer(&ss);
+ int N = 12;
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- ASSERT_EQUALS( N, rs->numRecords( opCtx.get() ) );
+ WriteUnitOfWork uow(opCtx.get());
+ for (int i = 0; i < N; i++) {
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "a", 2, false);
+ ASSERT_OK(res.getStatus());
+ }
+ uow.commit();
}
+ }
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- WiredTigerRecoveryUnit* ru =
- checked_cast<WiredTigerRecoveryUnit*>( opCtx->recoveryUnit() );
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(N, rs->numRecords(opCtx.get()));
+ }
- {
- WriteUnitOfWork uow( opCtx.get() );
- WT_SESSION* s = ru->getSession(opCtx.get())->getSession();
- invariantWTOK( s->create( s, indexUri.c_str(), "" ) );
- uow.commit();
- }
+ rs.reset(NULL);
- ss.syncCache(true);
- }
+ {
+ long long numRecords;
+ long long dataSize;
+ ss.loadFromCache(uri, &numRecords, &dataSize);
+ ASSERT_EQUALS(N, numRecords);
+ }
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ rs.reset(new WiredTigerRecordStore(opCtx.get(), "a.b", uri, false, -1, -1, NULL, &ss));
+ }
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ASSERT_EQUALS(N, rs->numRecords(opCtx.get()));
+ }
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ WiredTigerRecoveryUnit* ru = checked_cast<WiredTigerRecoveryUnit*>(opCtx->recoveryUnit());
{
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- WiredTigerSizeStorer ss2(harnessHelper->conn(), indexUri);
- ss2.fillCache();
- long long numRecords;
- long long dataSize;
- ss2.loadFromCache( uri, &numRecords, &dataSize );
- ASSERT_EQUALS( N, numRecords );
+ WriteUnitOfWork uow(opCtx.get());
+ WT_SESSION* s = ru->getSession(opCtx.get())->getSession();
+ invariantWTOK(s->create(s, indexUri.c_str(), ""));
+ uow.commit();
}
- rs.reset( NULL ); // this has to be deleted before ss
+ ss.syncCache(true);
}
-namespace {
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ WiredTigerSizeStorer ss2(harnessHelper->conn(), indexUri);
+ ss2.fillCache();
+ long long numRecords;
+ long long dataSize;
+ ss2.loadFromCache(uri, &numRecords, &dataSize);
+ ASSERT_EQUALS(N, numRecords);
+ }
- class GoodValidateAdaptor : public ValidateAdaptor {
- public:
- virtual Status validate(const RecordData& record, size_t* dataSize) {
- *dataSize = static_cast<size_t>(record.size());
- return Status::OK();
- }
- };
+ rs.reset(NULL); // this has to be deleted before ss
+}
- class BadValidateAdaptor : public ValidateAdaptor {
- public:
- virtual Status validate(const RecordData& record, size_t* dataSize) {
- *dataSize = static_cast<size_t>(record.size());
- return Status(ErrorCodes::UnknownError, "");
- }
- };
-
- class SizeStorerValidateTest : public mongo::unittest::Test {
- private:
- virtual void setUp() {
- harnessHelper.reset(new WiredTigerHarnessHelper());
- sizeStorer.reset(new WiredTigerSizeStorer(harnessHelper->conn(), "table:sizeStorer"));
- rs.reset(harnessHelper->newNonCappedRecordStore());
- WiredTigerRecordStore* wtrs = checked_cast<WiredTigerRecordStore*>(rs.get());
- wtrs->setSizeStorer(sizeStorer.get());
- uri = wtrs->getURI();
-
- expectedNumRecords = 10000;
- expectedDataSize = expectedNumRecords * 2;
- {
- unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- WriteUnitOfWork uow( opCtx.get() );
- for (int i=0; i < expectedNumRecords; i++) {
- ASSERT_OK(rs->insertRecord( opCtx.get(), "a", 2, false ).getStatus());
- }
- uow.commit();
- }
- ASSERT_EQUALS(expectedNumRecords, rs->numRecords(NULL));
- ASSERT_EQUALS(expectedDataSize, rs->dataSize(NULL));
- sizeStorer->storeToCache(uri, 0, 0);
- }
- virtual void tearDown() {
- expectedNumRecords = 0;
- expectedDataSize = 0;
-
- rs.reset(NULL);
- sizeStorer.reset(NULL);
- harnessHelper.reset(NULL);
- rs.reset(NULL);
- }
+namespace {
- protected:
- long long getNumRecords() const {
- long long numRecords;
- long long unused;
- sizeStorer->loadFromCache(uri, &numRecords, &unused);
- return numRecords;
- }
+class GoodValidateAdaptor : public ValidateAdaptor {
+public:
+ virtual Status validate(const RecordData& record, size_t* dataSize) {
+ *dataSize = static_cast<size_t>(record.size());
+ return Status::OK();
+ }
+};
- long long getDataSize() const {
- long long unused;
- long long dataSize;
- sizeStorer->loadFromCache(uri, &unused, &dataSize);
- return dataSize;
+class BadValidateAdaptor : public ValidateAdaptor {
+public:
+ virtual Status validate(const RecordData& record, size_t* dataSize) {
+ *dataSize = static_cast<size_t>(record.size());
+ return Status(ErrorCodes::UnknownError, "");
+ }
+};
+
+class SizeStorerValidateTest : public mongo::unittest::Test {
+private:
+ virtual void setUp() {
+ harnessHelper.reset(new WiredTigerHarnessHelper());
+ sizeStorer.reset(new WiredTigerSizeStorer(harnessHelper->conn(), "table:sizeStorer"));
+ rs.reset(harnessHelper->newNonCappedRecordStore());
+ WiredTigerRecordStore* wtrs = checked_cast<WiredTigerRecordStore*>(rs.get());
+ wtrs->setSizeStorer(sizeStorer.get());
+ uri = wtrs->getURI();
+
+ expectedNumRecords = 10000;
+ expectedDataSize = expectedNumRecords * 2;
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ WriteUnitOfWork uow(opCtx.get());
+ for (int i = 0; i < expectedNumRecords; i++) {
+ ASSERT_OK(rs->insertRecord(opCtx.get(), "a", 2, false).getStatus());
+ }
+ uow.commit();
}
+ ASSERT_EQUALS(expectedNumRecords, rs->numRecords(NULL));
+ ASSERT_EQUALS(expectedDataSize, rs->dataSize(NULL));
+ sizeStorer->storeToCache(uri, 0, 0);
+ }
+ virtual void tearDown() {
+ expectedNumRecords = 0;
+ expectedDataSize = 0;
- std::unique_ptr<WiredTigerHarnessHelper> harnessHelper;
- std::unique_ptr<WiredTigerSizeStorer> sizeStorer;
- std::unique_ptr<RecordStore> rs;
- std::string uri;
+ rs.reset(NULL);
+ sizeStorer.reset(NULL);
+ harnessHelper.reset(NULL);
+ rs.reset(NULL);
+ }
- long long expectedNumRecords;
- long long expectedDataSize;
- };
+protected:
+ long long getNumRecords() const {
+ long long numRecords;
+ long long unused;
+ sizeStorer->loadFromCache(uri, &numRecords, &unused);
+ return numRecords;
+ }
- // Basic validation - size storer data is not updated.
- TEST_F(SizeStorerValidateTest, Basic) {
- unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- ValidateResults results;
- BSONObjBuilder output;
- ASSERT_OK(rs->validate(opCtx.get(), false, false, NULL, &results, &output));
- BSONObj obj = output.obj();
- ASSERT_EQUALS(expectedNumRecords, obj.getIntField("nrecords"));
- ASSERT_EQUALS(0, getNumRecords());
- ASSERT_EQUALS(0, getDataSize());
+ long long getDataSize() const {
+ long long unused;
+ long long dataSize;
+ sizeStorer->loadFromCache(uri, &unused, &dataSize);
+ return dataSize;
}
- // Full validation - size storer data is updated.
- TEST_F(SizeStorerValidateTest, FullWithGoodAdaptor) {
- unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- GoodValidateAdaptor adaptor;
- ValidateResults results;
- BSONObjBuilder output;
- ASSERT_OK(rs->validate(opCtx.get(), true, true, &adaptor, &results, &output));
- BSONObj obj = output.obj();
- ASSERT_EQUALS(expectedNumRecords, obj.getIntField("nrecords"));
- ASSERT_EQUALS(expectedNumRecords, getNumRecords());
- ASSERT_EQUALS(expectedDataSize, getDataSize());
- }
-
- // Full validation with a validation adaptor that fails - size storer data is not updated.
- TEST_F(SizeStorerValidateTest, FullWithBadAdapter) {
- unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- BadValidateAdaptor adaptor;
- ValidateResults results;
- BSONObjBuilder output;
- ASSERT_OK(rs->validate(opCtx.get(), true, true, &adaptor, &results, &output));
- BSONObj obj = output.obj();
- ASSERT_EQUALS(expectedNumRecords, obj.getIntField("nrecords"));
- ASSERT_EQUALS(0, getNumRecords());
- ASSERT_EQUALS(0, getDataSize());
- }
-
- // Load bad _numRecords and _dataSize values at record store creation.
- TEST_F(SizeStorerValidateTest, InvalidSizeStorerAtCreation) {
- rs.reset(NULL);
+ std::unique_ptr<WiredTigerHarnessHelper> harnessHelper;
+ std::unique_ptr<WiredTigerSizeStorer> sizeStorer;
+ std::unique_ptr<RecordStore> rs;
+ std::string uri;
+
+ long long expectedNumRecords;
+ long long expectedDataSize;
+};
+
+// Basic validation - size storer data is not updated.
+TEST_F(SizeStorerValidateTest, Basic) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ ValidateResults results;
+ BSONObjBuilder output;
+ ASSERT_OK(rs->validate(opCtx.get(), false, false, NULL, &results, &output));
+ BSONObj obj = output.obj();
+ ASSERT_EQUALS(expectedNumRecords, obj.getIntField("nrecords"));
+ ASSERT_EQUALS(0, getNumRecords());
+ ASSERT_EQUALS(0, getDataSize());
+}
- unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
- sizeStorer->storeToCache(uri, expectedNumRecords*2, expectedDataSize*2);
- rs.reset(new WiredTigerRecordStore(opCtx.get(), "a.b", uri, false, -1, -1, NULL,
- sizeStorer.get()));
- ASSERT_EQUALS(expectedNumRecords*2, rs->numRecords(NULL));
- ASSERT_EQUALS(expectedDataSize*2, rs->dataSize(NULL));
-
- // Full validation should fix record and size counters.
- GoodValidateAdaptor adaptor;
- ValidateResults results;
- BSONObjBuilder output;
- ASSERT_OK(rs->validate(opCtx.get(), true, true, &adaptor, &results, &output));
- BSONObj obj = output.obj();
- ASSERT_EQUALS(expectedNumRecords, obj.getIntField("nrecords"));
- ASSERT_EQUALS(expectedNumRecords, getNumRecords());
- ASSERT_EQUALS(expectedDataSize, getDataSize());
+// Full validation - size storer data is updated.
+TEST_F(SizeStorerValidateTest, FullWithGoodAdaptor) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ GoodValidateAdaptor adaptor;
+ ValidateResults results;
+ BSONObjBuilder output;
+ ASSERT_OK(rs->validate(opCtx.get(), true, true, &adaptor, &results, &output));
+ BSONObj obj = output.obj();
+ ASSERT_EQUALS(expectedNumRecords, obj.getIntField("nrecords"));
+ ASSERT_EQUALS(expectedNumRecords, getNumRecords());
+ ASSERT_EQUALS(expectedDataSize, getDataSize());
+}
- ASSERT_EQUALS(expectedNumRecords, rs->numRecords(NULL));
- ASSERT_EQUALS(expectedDataSize, rs->dataSize(NULL));
+// Full validation with a validation adaptor that fails - size storer data is not updated.
+TEST_F(SizeStorerValidateTest, FullWithBadAdapter) {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ BadValidateAdaptor adaptor;
+ ValidateResults results;
+ BSONObjBuilder output;
+ ASSERT_OK(rs->validate(opCtx.get(), true, true, &adaptor, &results, &output));
+ BSONObj obj = output.obj();
+ ASSERT_EQUALS(expectedNumRecords, obj.getIntField("nrecords"));
+ ASSERT_EQUALS(0, getNumRecords());
+ ASSERT_EQUALS(0, getDataSize());
+}
+
+// Load bad _numRecords and _dataSize values at record store creation.
+TEST_F(SizeStorerValidateTest, InvalidSizeStorerAtCreation) {
+ rs.reset(NULL);
+
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ sizeStorer->storeToCache(uri, expectedNumRecords * 2, expectedDataSize * 2);
+ rs.reset(
+ new WiredTigerRecordStore(opCtx.get(), "a.b", uri, false, -1, -1, NULL, sizeStorer.get()));
+ ASSERT_EQUALS(expectedNumRecords * 2, rs->numRecords(NULL));
+ ASSERT_EQUALS(expectedDataSize * 2, rs->dataSize(NULL));
+
+ // Full validation should fix record and size counters.
+ GoodValidateAdaptor adaptor;
+ ValidateResults results;
+ BSONObjBuilder output;
+ ASSERT_OK(rs->validate(opCtx.get(), true, true, &adaptor, &results, &output));
+ BSONObj obj = output.obj();
+ ASSERT_EQUALS(expectedNumRecords, obj.getIntField("nrecords"));
+ ASSERT_EQUALS(expectedNumRecords, getNumRecords());
+ ASSERT_EQUALS(expectedDataSize, getDataSize());
+
+ ASSERT_EQUALS(expectedNumRecords, rs->numRecords(NULL));
+ ASSERT_EQUALS(expectedDataSize, rs->dataSize(NULL));
}
} // namespace
- StatusWith<RecordId> insertBSON(unique_ptr<OperationContext>& opCtx,
- unique_ptr<RecordStore>& rs,
- const Timestamp& opTime) {
- BSONObj obj = BSON( "ts" << opTime );
- WriteUnitOfWork wuow(opCtx.get());
- WiredTigerRecordStore* wrs = checked_cast<WiredTigerRecordStore*>(rs.get());
- invariant( wrs );
- Status status = wrs->oplogDiskLocRegister( opCtx.get(), opTime );
- if (!status.isOK())
- return StatusWith<RecordId>( status );
- StatusWith<RecordId> res = rs->insertRecord(opCtx.get(),
- obj.objdata(),
- obj.objsize(),
- false);
- if (res.isOK())
- wuow.commit();
- return res;
- }
-
- // TODO make generic
- TEST(WiredTigerRecordStoreTest, OplogHack) {
- WiredTigerHarnessHelper harnessHelper;
- unique_ptr<RecordStore> rs(harnessHelper.newNonCappedRecordStore("local.oplog.foo"));
- {
- unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+StatusWith<RecordId> insertBSON(unique_ptr<OperationContext>& opCtx,
+ unique_ptr<RecordStore>& rs,
+ const Timestamp& opTime) {
+ BSONObj obj = BSON("ts" << opTime);
+ WriteUnitOfWork wuow(opCtx.get());
+ WiredTigerRecordStore* wrs = checked_cast<WiredTigerRecordStore*>(rs.get());
+ invariant(wrs);
+ Status status = wrs->oplogDiskLocRegister(opCtx.get(), opTime);
+ if (!status.isOK())
+ return StatusWith<RecordId>(status);
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), obj.objdata(), obj.objsize(), false);
+ if (res.isOK())
+ wuow.commit();
+ return res;
+}
- // always illegal
- ASSERT_EQ(insertBSON(opCtx, rs, Timestamp(2,-1)).getStatus(),
- ErrorCodes::BadValue);
+// TODO make generic
+TEST(WiredTigerRecordStoreTest, OplogHack) {
+ WiredTigerHarnessHelper harnessHelper;
+ unique_ptr<RecordStore> rs(harnessHelper.newNonCappedRecordStore("local.oplog.foo"));
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- {
- BSONObj obj = BSON("not_ts" << Timestamp(2,1));
- ASSERT_EQ(rs->insertRecord(opCtx.get(), obj.objdata(), obj.objsize(),
- false ).getStatus(),
- ErrorCodes::BadValue);
-
- obj = BSON( "ts" << "not a Timestamp" );
- ASSERT_EQ(rs->insertRecord(opCtx.get(), obj.objdata(), obj.objsize(),
- false ).getStatus(),
- ErrorCodes::BadValue);
- }
+ // always illegal
+ ASSERT_EQ(insertBSON(opCtx, rs, Timestamp(2, -1)).getStatus(), ErrorCodes::BadValue);
- // currently dasserts
- // ASSERT_EQ(insertBSON(opCtx, rs, BSON("ts" << Timestamp(-2,1))).getStatus(),
- // ErrorCodes::BadValue);
+ {
+ BSONObj obj = BSON("not_ts" << Timestamp(2, 1));
+ ASSERT_EQ(
+ rs->insertRecord(opCtx.get(), obj.objdata(), obj.objsize(), false).getStatus(),
+ ErrorCodes::BadValue);
- // success cases
- ASSERT_EQ(insertBSON(opCtx, rs, Timestamp(1,1)).getValue(),
- RecordId(1,1));
+ obj = BSON("ts"
+ << "not a Timestamp");
+ ASSERT_EQ(
+ rs->insertRecord(opCtx.get(), obj.objdata(), obj.objsize(), false).getStatus(),
+ ErrorCodes::BadValue);
+ }
- ASSERT_EQ(insertBSON(opCtx, rs, Timestamp(1,2)).getValue(),
- RecordId(1,2));
+ // currently dasserts
+ // ASSERT_EQ(insertBSON(opCtx, rs, BSON("ts" << Timestamp(-2,1))).getStatus(),
+ // ErrorCodes::BadValue);
- ASSERT_EQ(insertBSON(opCtx, rs, Timestamp(2,2)).getValue(),
- RecordId(2,2));
- }
+ // success cases
+ ASSERT_EQ(insertBSON(opCtx, rs, Timestamp(1, 1)).getValue(), RecordId(1, 1));
- {
- unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- // find start
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(0,1)), RecordId()); // nothing <=
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,1)), RecordId(1,2)); // between
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,2)), RecordId(2,2)); // ==
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(2,2)); // > highest
- }
+ ASSERT_EQ(insertBSON(opCtx, rs, Timestamp(1, 2)).getValue(), RecordId(1, 2));
- {
- unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(2,2), false); // no-op
- }
+ ASSERT_EQ(insertBSON(opCtx, rs, Timestamp(2, 2)).getValue(), RecordId(2, 2));
+ }
- {
- unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(2,2));
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+ // find start
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(0, 1)), RecordId()); // nothing <=
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2, 1)), RecordId(1, 2)); // between
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2, 2)), RecordId(2, 2)); // ==
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2, 3)), RecordId(2, 2)); // > highest
+ }
- {
- unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(1,2), false); // deletes 2,2
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+ rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(2, 2), false); // no-op
+ }
- {
- unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(1,2));
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2, 3)), RecordId(2, 2));
+ }
- {
- unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(1,2), true); // deletes 1,2
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+ rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(1, 2), false); // deletes 2,2
+ }
- {
- unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId(1,1));
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2, 3)), RecordId(1, 2));
+ }
- {
- unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- WriteUnitOfWork wuow(opCtx.get());
- ASSERT_OK(rs->truncate(opCtx.get())); // deletes 1,1 and leaves collection empty
- wuow.commit();
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+ rs->temp_cappedTruncateAfter(opCtx.get(), RecordId(1, 2), true); // deletes 1,2
+ }
- {
- unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2,3)), RecordId());
- }
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2, 3)), RecordId(1, 1));
}
- TEST(WiredTigerRecordStoreTest, OplogHackOnNonOplog) {
- WiredTigerHarnessHelper harnessHelper;
- unique_ptr<RecordStore> rs(harnessHelper.newNonCappedRecordStore("local.NOT_oplog.foo"));
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+ WriteUnitOfWork wuow(opCtx.get());
+ ASSERT_OK(rs->truncate(opCtx.get())); // deletes 1,1 and leaves collection empty
+ wuow.commit();
+ }
+ {
unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(2, 3)), RecordId());
+ }
+}
+
+TEST(WiredTigerRecordStoreTest, OplogHackOnNonOplog) {
+ WiredTigerHarnessHelper harnessHelper;
+ unique_ptr<RecordStore> rs(harnessHelper.newNonCappedRecordStore("local.NOT_oplog.foo"));
- BSONObj obj = BSON( "ts" << Timestamp(2,-1) );
+ unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+
+ BSONObj obj = BSON("ts" << Timestamp(2, -1));
+ {
+ WriteUnitOfWork wuow(opCtx.get());
+ ASSERT_OK(rs->insertRecord(opCtx.get(), obj.objdata(), obj.objsize(), false).getStatus());
+ wuow.commit();
+ }
+ ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(0, 1)), boost::none);
+}
+
+TEST(WiredTigerRecordStoreTest, CappedOrder) {
+ unique_ptr<WiredTigerHarnessHelper> harnessHelper(new WiredTigerHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("a.b", 100000, 10000));
+
+ RecordId loc1;
+
+ { // first insert a document
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- WriteUnitOfWork wuow( opCtx.get() );
- ASSERT_OK(rs->insertRecord(opCtx.get(), obj.objdata(),
- obj.objsize(), false ).getStatus());
- wuow.commit();
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "a", 2, false);
+ ASSERT_OK(res.getStatus());
+ loc1 = res.getValue();
+ uow.commit();
}
- ASSERT_EQ(rs->oplogStartHack(opCtx.get(), RecordId(0,1)), boost::none);
}
- TEST(WiredTigerRecordStoreTest, CappedOrder) {
- unique_ptr<WiredTigerHarnessHelper> harnessHelper( new WiredTigerHarnessHelper() );
- unique_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("a.b", 100000,10000));
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ auto cursor = rs->getCursor(opCtx.get());
+ auto record = cursor->seekExact(loc1);
+ ASSERT_EQ(loc1, record->id);
+ ASSERT(!cursor->next());
+ }
- RecordId loc1;
+ {
+ // now we insert 2 docs, but commit the 2nd one fiirst
+ // we make sure we can't find the 2nd until the first is commited
+ unique_ptr<OperationContext> t1(harnessHelper->newOperationContext());
+ unique_ptr<WriteUnitOfWork> w1(new WriteUnitOfWork(t1.get()));
+ rs->insertRecord(t1.get(), "b", 2, false);
+ // do not commit yet
- { // first insert a document
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
+ { // create 2nd doc
+ unique_ptr<OperationContext> t2(harnessHelper->newOperationContext());
{
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- loc1 = res.getValue();
- uow.commit();
+ WriteUnitOfWork w2(t2.get());
+ rs->insertRecord(t2.get(), "c", 2, false);
+ w2.commit();
}
}
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
+ { // state should be the same
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
auto cursor = rs->getCursor(opCtx.get());
auto record = cursor->seekExact(loc1);
- ASSERT_EQ( loc1, record->id );
+ ASSERT_EQ(loc1, record->id);
ASSERT(!cursor->next());
}
- {
- // now we insert 2 docs, but commit the 2nd one fiirst
- // we make sure we can't find the 2nd until the first is commited
- unique_ptr<OperationContext> t1( harnessHelper->newOperationContext() );
- unique_ptr<WriteUnitOfWork> w1( new WriteUnitOfWork( t1.get() ) );
- rs->insertRecord( t1.get(), "b", 2, false );
- // do not commit yet
-
- { // create 2nd doc
- unique_ptr<OperationContext> t2( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork w2( t2.get() );
- rs->insertRecord( t2.get(), "c", 2, false );
- w2.commit();
- }
- }
+ w1->commit();
+ }
- { // state should be the same
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- auto cursor = rs->getCursor(opCtx.get());
- auto record = cursor->seekExact(loc1);
- ASSERT_EQ( loc1, record->id );
- ASSERT(!cursor->next());
- }
+ { // now all 3 docs should be visible
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ auto cursor = rs->getCursor(opCtx.get());
+ auto record = cursor->seekExact(loc1);
+ ASSERT_EQ(loc1, record->id);
+ ASSERT(cursor->next());
+ ASSERT(cursor->next());
+ ASSERT(!cursor->next());
+ }
+}
- w1->commit();
- }
+TEST(WiredTigerRecordStoreTest, CappedCursorRollover) {
+ unique_ptr<WiredTigerHarnessHelper> harnessHelper(new WiredTigerHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("a.b", 10000, 5));
- { // now all 3 docs should be visible
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- auto cursor = rs->getCursor(opCtx.get());
- auto record = cursor->seekExact(loc1);
- ASSERT_EQ( loc1, record->id );
- ASSERT(cursor->next());
- ASSERT(cursor->next());
- ASSERT(!cursor->next());
+ { // first insert 3 documents
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ for (int i = 0; i < 3; ++i) {
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "a", 2, false);
+ ASSERT_OK(res.getStatus());
+ uow.commit();
}
}
- TEST(WiredTigerRecordStoreTest, CappedCursorRollover) {
- unique_ptr<WiredTigerHarnessHelper> harnessHelper( new WiredTigerHarnessHelper() );
- unique_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("a.b", 10000, 5));
+ // set up our cursor that should rollover
+ unique_ptr<OperationContext> cursorCtx(harnessHelper->newOperationContext());
+ auto cursor = rs->getCursor(cursorCtx.get());
+ ASSERT(cursor->next());
+ cursor->savePositioned();
+ cursorCtx->recoveryUnit()->abandonSnapshot();
- { // first insert 3 documents
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- for ( int i = 0; i < 3; ++i ) {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- uow.commit();
- }
+ { // insert 100 documents which causes rollover
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ for (int i = 0; i < 100; i++) {
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "a", 2, false);
+ ASSERT_OK(res.getStatus());
+ uow.commit();
}
+ }
- // set up our cursor that should rollover
- unique_ptr<OperationContext> cursorCtx( harnessHelper->newOperationContext() );
- auto cursor = rs->getCursor(cursorCtx.get());
- ASSERT(cursor->next());
- cursor->savePositioned();
- cursorCtx->recoveryUnit()->abandonSnapshot();
-
- { // insert 100 documents which causes rollover
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- for ( int i = 0; i < 100; i++ ) {
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- uow.commit();
- }
- }
+ // cursor should now be dead
+ ASSERT_FALSE(cursor->restore(cursorCtx.get()));
+ ASSERT(!cursor->next());
+}
- // cursor should now be dead
- ASSERT_FALSE(cursor->restore(cursorCtx.get()));
- ASSERT(!cursor->next());
- }
+RecordId _oplogOrderInsertOplog(OperationContext* txn, unique_ptr<RecordStore>& rs, int inc) {
+ Timestamp opTime = Timestamp(5, inc);
+ WiredTigerRecordStore* wrs = checked_cast<WiredTigerRecordStore*>(rs.get());
+ Status status = wrs->oplogDiskLocRegister(txn, opTime);
+ ASSERT_OK(status);
+ BSONObj obj = BSON("ts" << opTime);
+ StatusWith<RecordId> res = rs->insertRecord(txn, obj.objdata(), obj.objsize(), false);
+ ASSERT_OK(res.getStatus());
+ return res.getValue();
+}
- RecordId _oplogOrderInsertOplog( OperationContext* txn,
- unique_ptr<RecordStore>& rs,
- int inc ) {
- Timestamp opTime = Timestamp(5,inc);
- WiredTigerRecordStore* wrs = checked_cast<WiredTigerRecordStore*>(rs.get());
- Status status = wrs->oplogDiskLocRegister( txn, opTime );
- ASSERT_OK( status );
- BSONObj obj = BSON( "ts" << opTime );
- StatusWith<RecordId> res = rs->insertRecord( txn, obj.objdata(), obj.objsize(), false );
- ASSERT_OK( res.getStatus() );
- return res.getValue();
+TEST(WiredTigerRecordStoreTest, OplogOrder) {
+ unique_ptr<WiredTigerHarnessHelper> harnessHelper(new WiredTigerHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("local.oplog.foo", 100000, -1));
+
+ {
+ const WiredTigerRecordStore* wrs = checked_cast<WiredTigerRecordStore*>(rs.get());
+ ASSERT(wrs->isOplog());
+ ASSERT(wrs->usingOplogHack());
}
- TEST(WiredTigerRecordStoreTest, OplogOrder) {
- unique_ptr<WiredTigerHarnessHelper> harnessHelper( new WiredTigerHarnessHelper() );
- unique_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("local.oplog.foo",
- 100000,
- -1));
+ RecordId loc1;
+ { // first insert a document
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
{
- const WiredTigerRecordStore* wrs = checked_cast<WiredTigerRecordStore*>(rs.get());
- ASSERT( wrs->isOplog() );
- ASSERT( wrs->usingOplogHack() );
+ WriteUnitOfWork uow(opCtx.get());
+ loc1 = _oplogOrderInsertOplog(opCtx.get(), rs, 1);
+ uow.commit();
}
+ }
+
+ {
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ auto cursor = rs->getCursor(opCtx.get());
+ auto record = cursor->seekExact(loc1);
+ ASSERT_EQ(loc1, record->id);
+ ASSERT(!cursor->next());
+ }
- RecordId loc1;
+ {
+ // now we insert 2 docs, but commit the 2nd one fiirst
+ // we make sure we can't find the 2nd until the first is commited
+ unique_ptr<OperationContext> t1(harnessHelper->newOperationContext());
+ unique_ptr<WriteUnitOfWork> w1(new WriteUnitOfWork(t1.get()));
+ _oplogOrderInsertOplog(t1.get(), rs, 2);
+ // do not commit yet
- { // first insert a document
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
+ { // create 2nd doc
+ unique_ptr<OperationContext> t2(harnessHelper->newOperationContext());
{
- WriteUnitOfWork uow( opCtx.get() );
- loc1 = _oplogOrderInsertOplog( opCtx.get(), rs, 1 );
- uow.commit();
+ WriteUnitOfWork w2(t2.get());
+ _oplogOrderInsertOplog(t2.get(), rs, 3);
+ w2.commit();
}
}
- {
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
+ { // state should be the same
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
auto cursor = rs->getCursor(opCtx.get());
auto record = cursor->seekExact(loc1);
- ASSERT_EQ( loc1, record->id );
+ ASSERT_EQ(loc1, record->id);
ASSERT(!cursor->next());
}
- {
- // now we insert 2 docs, but commit the 2nd one fiirst
- // we make sure we can't find the 2nd until the first is commited
- unique_ptr<OperationContext> t1( harnessHelper->newOperationContext() );
- unique_ptr<WriteUnitOfWork> w1( new WriteUnitOfWork( t1.get() ) );
- _oplogOrderInsertOplog( t1.get(), rs, 2 );
- // do not commit yet
-
- { // create 2nd doc
- unique_ptr<OperationContext> t2( harnessHelper->newOperationContext() );
- {
- WriteUnitOfWork w2( t2.get() );
- _oplogOrderInsertOplog( t2.get(), rs, 3 );
- w2.commit();
- }
- }
-
- { // state should be the same
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- auto cursor = rs->getCursor(opCtx.get());
- auto record = cursor->seekExact(loc1);
- ASSERT_EQ( loc1, record->id );
- ASSERT(!cursor->next());
- }
-
- w1->commit();
- }
-
- { // now all 3 docs should be visible
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- auto cursor = rs->getCursor(opCtx.get());
- auto record = cursor->seekExact(loc1);
- ASSERT_EQ( loc1, record->id );
- ASSERT(cursor->next());
- ASSERT(cursor->next());
- ASSERT(!cursor->next());
- }
+ w1->commit();
}
- TEST(WiredTigerRecordStoreTest, StorageSizeStatisticsDisabled) {
- WiredTigerHarnessHelper harnessHelper("statistics=(none)");
- unique_ptr<RecordStore> rs(harnessHelper.newNonCappedRecordStore("a.b"));
-
- unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- ASSERT_THROWS(rs->storageSize(opCtx.get()), UserException);
+ { // now all 3 docs should be visible
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ auto cursor = rs->getCursor(opCtx.get());
+ auto record = cursor->seekExact(loc1);
+ ASSERT_EQ(loc1, record->id);
+ ASSERT(cursor->next());
+ ASSERT(cursor->next());
+ ASSERT(!cursor->next());
}
+}
- TEST(WiredTigerRecordStoreTest, AppendCustomStatsMetadata) {
- WiredTigerHarnessHelper harnessHelper;
- unique_ptr<RecordStore> rs(harnessHelper.newNonCappedRecordStore("a.b"));
+TEST(WiredTigerRecordStoreTest, StorageSizeStatisticsDisabled) {
+ WiredTigerHarnessHelper harnessHelper("statistics=(none)");
+ unique_ptr<RecordStore> rs(harnessHelper.newNonCappedRecordStore("a.b"));
- unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
- BSONObjBuilder builder;
- rs->appendCustomStats(opCtx.get(), &builder, 1.0);
- BSONObj customStats = builder.obj();
+ unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+ ASSERT_THROWS(rs->storageSize(opCtx.get()), UserException);
+}
- BSONElement wiredTigerElement = customStats.getField(kWiredTigerEngineName);
- ASSERT_TRUE(wiredTigerElement.isABSONObj());
- BSONObj wiredTiger = wiredTigerElement.Obj();
+TEST(WiredTigerRecordStoreTest, AppendCustomStatsMetadata) {
+ WiredTigerHarnessHelper harnessHelper;
+ unique_ptr<RecordStore> rs(harnessHelper.newNonCappedRecordStore("a.b"));
- BSONElement metadataElement = wiredTiger.getField("metadata");
- ASSERT_TRUE(metadataElement.isABSONObj());
- BSONObj metadata = metadataElement.Obj();
+ unique_ptr<OperationContext> opCtx(harnessHelper.newOperationContext());
+ BSONObjBuilder builder;
+ rs->appendCustomStats(opCtx.get(), &builder, 1.0);
+ BSONObj customStats = builder.obj();
- BSONElement versionElement = metadata.getField("formatVersion");
- ASSERT_TRUE(versionElement.isNumber());
+ BSONElement wiredTigerElement = customStats.getField(kWiredTigerEngineName);
+ ASSERT_TRUE(wiredTigerElement.isABSONObj());
+ BSONObj wiredTiger = wiredTigerElement.Obj();
- BSONElement creationStringElement = wiredTiger.getField("creationString");
- ASSERT_EQUALS(creationStringElement.type(), String);
- }
+ BSONElement metadataElement = wiredTiger.getField("metadata");
+ ASSERT_TRUE(metadataElement.isABSONObj());
+ BSONObj metadata = metadataElement.Obj();
- TEST(WiredTigerRecordStoreTest, CappedCursorYieldFirst) {
- unique_ptr<WiredTigerHarnessHelper> harnessHelper( new WiredTigerHarnessHelper() );
- unique_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("a.b", 10000, 50));
+ BSONElement versionElement = metadata.getField("formatVersion");
+ ASSERT_TRUE(versionElement.isNumber());
- RecordId loc1;
+ BSONElement creationStringElement = wiredTiger.getField("creationString");
+ ASSERT_EQUALS(creationStringElement.type(), String);
+}
- { // first insert a document
- unique_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
- WriteUnitOfWork uow( opCtx.get() );
- StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false );
- ASSERT_OK( res.getStatus() );
- loc1 = res.getValue();
- uow.commit();
- }
+TEST(WiredTigerRecordStoreTest, CappedCursorYieldFirst) {
+ unique_ptr<WiredTigerHarnessHelper> harnessHelper(new WiredTigerHarnessHelper());
+ unique_ptr<RecordStore> rs(harnessHelper->newCappedRecordStore("a.b", 10000, 50));
- unique_ptr<OperationContext> cursorCtx( harnessHelper->newOperationContext() );
- auto cursor = rs->getCursor(cursorCtx.get());
+ RecordId loc1;
- // See that things work if you yield before you first call getNext().
- cursor->savePositioned();
- cursorCtx->recoveryUnit()->abandonSnapshot();
- ASSERT_TRUE(cursor->restore(cursorCtx.get()));
- auto record = cursor->next();
- ASSERT_EQ( loc1, record->id );
- ASSERT(!cursor->next());
+ { // first insert a document
+ unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
+ WriteUnitOfWork uow(opCtx.get());
+ StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "a", 2, false);
+ ASSERT_OK(res.getStatus());
+ loc1 = res.getValue();
+ uow.commit();
}
+ unique_ptr<OperationContext> cursorCtx(harnessHelper->newOperationContext());
+ auto cursor = rs->getCursor(cursorCtx.get());
+
+ // See that things work if you yield before you first call getNext().
+ cursor->savePositioned();
+ cursorCtx->recoveryUnit()->abandonSnapshot();
+ ASSERT_TRUE(cursor->restore(cursorCtx.get()));
+ auto record = cursor->next();
+ ASSERT_EQ(loc1, record->id);
+ ASSERT(!cursor->next());
+}
+
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index 4c71b448804..a248085ff36 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -47,363 +47,347 @@
namespace mongo {
- namespace {
- struct WaitUntilDurableData {
- WaitUntilDurableData() :
- numWaitingForSync(0),
- lastSyncTime(0) {
- }
-
- void syncHappend() {
- stdx::lock_guard<stdx::mutex> lk( mutex );
- lastSyncTime++;
- condvar.notify_all();
- }
-
- // return true if happened
- bool waitUntilDurable() {
- stdx::unique_lock<stdx::mutex> lk( mutex );
- long long start = lastSyncTime;
- numWaitingForSync.fetchAndAdd(1);
- condvar.timed_wait(lk,boost::posix_time::milliseconds(50));
- numWaitingForSync.fetchAndAdd(-1);
- return lastSyncTime > start;
- }
-
- AtomicUInt32 numWaitingForSync;
-
- stdx::mutex mutex; // this just protects lastSyncTime
- stdx::condition_variable condvar;
- long long lastSyncTime;
- } waitUntilDurableData;
+namespace {
+struct WaitUntilDurableData {
+ WaitUntilDurableData() : numWaitingForSync(0), lastSyncTime(0) {}
+
+ void syncHappend() {
+ stdx::lock_guard<stdx::mutex> lk(mutex);
+ lastSyncTime++;
+ condvar.notify_all();
}
- WiredTigerRecoveryUnit::WiredTigerRecoveryUnit(WiredTigerSessionCache* sc) :
- _sessionCache( sc ),
- _session( NULL ),
- _inUnitOfWork(false),
- _active( false ),
- _myTransactionCount( 1 ),
- _everStartedWrite( false ),
- _currentlySquirreled( false ),
- _syncing( false ),
- _noTicketNeeded( false ) {
+ // return true if happened
+ bool waitUntilDurable() {
+ stdx::unique_lock<stdx::mutex> lk(mutex);
+ long long start = lastSyncTime;
+ numWaitingForSync.fetchAndAdd(1);
+ condvar.timed_wait(lk, boost::posix_time::milliseconds(50));
+ numWaitingForSync.fetchAndAdd(-1);
+ return lastSyncTime > start;
}
- WiredTigerRecoveryUnit::~WiredTigerRecoveryUnit() {
- invariant(!_inUnitOfWork);
- _abort();
- if ( _session ) {
- _sessionCache->releaseSession( _session );
- _session = NULL;
- }
- }
+ AtomicUInt32 numWaitingForSync;
- void WiredTigerRecoveryUnit::reportState( BSONObjBuilder* b ) const {
- b->append("wt_inUnitOfWork", _inUnitOfWork);
- b->append("wt_active", _active);
- b->append("wt_everStartedWrite", _everStartedWrite);
- b->append("wt_hasTicket", _ticket.hasTicket());
- b->appendNumber("wt_myTransactionCount", static_cast<long long>(_myTransactionCount));
- if (_active)
- b->append("wt_millisSinceCommit", _timer.millis());
- }
+ stdx::mutex mutex; // this just protects lastSyncTime
+ stdx::condition_variable condvar;
+ long long lastSyncTime;
+} waitUntilDurableData;
+}
- void WiredTigerRecoveryUnit::_commit() {
- try {
- if ( _session && _active ) {
- _txnClose( true );
- }
+WiredTigerRecoveryUnit::WiredTigerRecoveryUnit(WiredTigerSessionCache* sc)
+ : _sessionCache(sc),
+ _session(NULL),
+ _inUnitOfWork(false),
+ _active(false),
+ _myTransactionCount(1),
+ _everStartedWrite(false),
+ _currentlySquirreled(false),
+ _syncing(false),
+ _noTicketNeeded(false) {}
+
+WiredTigerRecoveryUnit::~WiredTigerRecoveryUnit() {
+ invariant(!_inUnitOfWork);
+ _abort();
+ if (_session) {
+ _sessionCache->releaseSession(_session);
+ _session = NULL;
+ }
+}
- for (Changes::const_iterator it = _changes.begin(), end = _changes.end(); it != end;
- ++it) {
- (*it)->commit();
- }
- _changes.clear();
+void WiredTigerRecoveryUnit::reportState(BSONObjBuilder* b) const {
+ b->append("wt_inUnitOfWork", _inUnitOfWork);
+ b->append("wt_active", _active);
+ b->append("wt_everStartedWrite", _everStartedWrite);
+ b->append("wt_hasTicket", _ticket.hasTicket());
+ b->appendNumber("wt_myTransactionCount", static_cast<long long>(_myTransactionCount));
+ if (_active)
+ b->append("wt_millisSinceCommit", _timer.millis());
+}
- invariant(!_active);
+void WiredTigerRecoveryUnit::_commit() {
+ try {
+ if (_session && _active) {
+ _txnClose(true);
}
- catch (...) {
- std::terminate();
+
+ for (Changes::const_iterator it = _changes.begin(), end = _changes.end(); it != end; ++it) {
+ (*it)->commit();
}
+ _changes.clear();
+
+ invariant(!_active);
+ } catch (...) {
+ std::terminate();
}
+}
- void WiredTigerRecoveryUnit::_abort() {
- try {
- if ( _session && _active ) {
- _txnClose( false );
- }
-
- for (Changes::const_reverse_iterator it = _changes.rbegin(), end = _changes.rend();
- it != end; ++it) {
- Change* change = *it;
- LOG(2) << "CUSTOM ROLLBACK " << demangleName(typeid(*change));
- change->rollback();
- }
- _changes.clear();
-
- invariant(!_active);
+void WiredTigerRecoveryUnit::_abort() {
+ try {
+ if (_session && _active) {
+ _txnClose(false);
}
- catch (...) {
- std::terminate();
+
+ for (Changes::const_reverse_iterator it = _changes.rbegin(), end = _changes.rend();
+ it != end;
+ ++it) {
+ Change* change = *it;
+ LOG(2) << "CUSTOM ROLLBACK " << demangleName(typeid(*change));
+ change->rollback();
}
- }
+ _changes.clear();
- void WiredTigerRecoveryUnit::beginUnitOfWork(OperationContext* opCtx) {
- invariant(!_inUnitOfWork);
- invariant(!_currentlySquirreled);
- _inUnitOfWork = true;
- _everStartedWrite = true;
- _getTicket(opCtx);
+ invariant(!_active);
+ } catch (...) {
+ std::terminate();
}
+}
- void WiredTigerRecoveryUnit::commitUnitOfWork() {
- invariant(_inUnitOfWork);
- _inUnitOfWork = false;
- _commit();
- }
+void WiredTigerRecoveryUnit::beginUnitOfWork(OperationContext* opCtx) {
+ invariant(!_inUnitOfWork);
+ invariant(!_currentlySquirreled);
+ _inUnitOfWork = true;
+ _everStartedWrite = true;
+ _getTicket(opCtx);
+}
- void WiredTigerRecoveryUnit::abortUnitOfWork() {
- invariant(_inUnitOfWork);
- _inUnitOfWork = false;
- _abort();
- }
+void WiredTigerRecoveryUnit::commitUnitOfWork() {
+ invariant(_inUnitOfWork);
+ _inUnitOfWork = false;
+ _commit();
+}
- void WiredTigerRecoveryUnit::goingToWaitUntilDurable() {
- if ( _active ) {
- // too late, can't change config
- return;
- }
- // yay, we've configured ourselves for sync
- _syncing = true;
+void WiredTigerRecoveryUnit::abortUnitOfWork() {
+ invariant(_inUnitOfWork);
+ _inUnitOfWork = false;
+ _abort();
+}
+
+void WiredTigerRecoveryUnit::goingToWaitUntilDurable() {
+ if (_active) {
+ // too late, can't change config
+ return;
}
+ // yay, we've configured ourselves for sync
+ _syncing = true;
+}
- bool WiredTigerRecoveryUnit::waitUntilDurable() {
- if ( _syncing && _everStartedWrite ) {
- // we did a sync, so we're good
- return true;
- }
- waitUntilDurableData.waitUntilDurable();
+bool WiredTigerRecoveryUnit::waitUntilDurable() {
+ if (_syncing && _everStartedWrite) {
+ // we did a sync, so we're good
return true;
}
+ waitUntilDurableData.waitUntilDurable();
+ return true;
+}
+
+void WiredTigerRecoveryUnit::registerChange(Change* change) {
+ invariant(_inUnitOfWork);
+ _changes.push_back(change);
+}
+
+WiredTigerRecoveryUnit* WiredTigerRecoveryUnit::get(OperationContext* txn) {
+ invariant(txn);
+ return checked_cast<WiredTigerRecoveryUnit*>(txn->recoveryUnit());
+}
- void WiredTigerRecoveryUnit::registerChange(Change* change) {
- invariant(_inUnitOfWork);
- _changes.push_back(change);
+void WiredTigerRecoveryUnit::assertInActiveTxn() const {
+ fassert(28575, _active);
+}
+
+WiredTigerSession* WiredTigerRecoveryUnit::getSession(OperationContext* opCtx) {
+ if (!_session) {
+ _session = _sessionCache->getSession();
}
- WiredTigerRecoveryUnit* WiredTigerRecoveryUnit::get(OperationContext *txn) {
- invariant( txn );
- return checked_cast<WiredTigerRecoveryUnit*>(txn->recoveryUnit());
+ if (!_active) {
+ _txnOpen(opCtx);
}
+ return _session;
+}
- void WiredTigerRecoveryUnit::assertInActiveTxn() const {
- fassert( 28575, _active );
+void WiredTigerRecoveryUnit::abandonSnapshot() {
+ invariant(!_inUnitOfWork);
+ if (_active) {
+ // Can't be in a WriteUnitOfWork, so safe to rollback
+ _txnClose(false);
}
+}
- WiredTigerSession* WiredTigerRecoveryUnit::getSession(OperationContext* opCtx) {
- if ( !_session ) {
- _session = _sessionCache->getSession();
- }
+void WiredTigerRecoveryUnit::setOplogReadTill(const RecordId& loc) {
+ _oplogReadTill = loc;
+}
- if ( !_active ) {
- _txnOpen(opCtx);
- }
- return _session;
- }
+namespace {
- void WiredTigerRecoveryUnit::abandonSnapshot() {
- invariant(!_inUnitOfWork);
- if (_active) {
- // Can't be in a WriteUnitOfWork, so safe to rollback
- _txnClose(false);
- }
- }
- void WiredTigerRecoveryUnit::setOplogReadTill( const RecordId& loc ) {
- _oplogReadTill = loc;
- }
+class TicketServerParameter : public ServerParameter {
+ MONGO_DISALLOW_COPYING(TicketServerParameter);
- namespace {
-
-
- class TicketServerParameter : public ServerParameter {
- MONGO_DISALLOW_COPYING(TicketServerParameter);
- public:
- TicketServerParameter(TicketHolder* holder, const std::string& name)
- : ServerParameter(ServerParameterSet::getGlobal(),
- name,
- true,
- true),
- _holder( holder ) {
- }
-
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
- b.append(name, _holder->outof());
- }
-
- virtual Status set( const BSONElement& newValueElement ) {
- if (!newValueElement.isNumber())
- return Status(ErrorCodes::BadValue,
- str::stream() << name() << " has to be a number");
- return _set(newValueElement.numberInt());
- }
-
- virtual Status setFromString( const std::string& str ) {
- int num = 0;
- Status status = parseNumberFromString(str, &num);
- if (!status.isOK())
- return status;
- return _set(num);
- }
-
- Status _set(int newNum) {
- if (newNum <= 0) {
- return Status(ErrorCodes::BadValue,
- str::stream() << name() << " has to be > 0");
- }
-
- return _holder->resize(newNum);
- }
-
- private:
- TicketHolder* _holder;
- };
-
- TicketHolder openWriteTransaction(128);
- TicketServerParameter openWriteTransactionParam(&openWriteTransaction,
- "wiredTigerConcurrentWriteTransactions");
-
- TicketHolder openReadTransaction(128);
- TicketServerParameter openReadTransactionParam(&openReadTransaction,
- "wiredTigerConcurrentReadTransactions");
+public:
+ TicketServerParameter(TicketHolder* holder, const std::string& name)
+ : ServerParameter(ServerParameterSet::getGlobal(), name, true, true), _holder(holder) {}
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ b.append(name, _holder->outof());
}
- void WiredTigerRecoveryUnit::appendGlobalStats(BSONObjBuilder& b) {
- BSONObjBuilder bb(b.subobjStart("concurrentTransactions"));
- {
- BSONObjBuilder bbb(bb.subobjStart("write"));
- bbb.append("out", openWriteTransaction.used());
- bbb.append("available", openWriteTransaction.available());
- bbb.append("totalTickets", openWriteTransaction.outof());
- bbb.done();
- }
- {
- BSONObjBuilder bbb(bb.subobjStart("read"));
- bbb.append("out", openReadTransaction.used());
- bbb.append("available", openReadTransaction.available());
- bbb.append("totalTickets", openReadTransaction.outof());
- bbb.done();
- }
- bb.done();
+ virtual Status set(const BSONElement& newValueElement) {
+ if (!newValueElement.isNumber())
+ return Status(ErrorCodes::BadValue, str::stream() << name() << " has to be a number");
+ return _set(newValueElement.numberInt());
}
- void WiredTigerRecoveryUnit::_txnClose( bool commit ) {
- invariant( _active );
- WT_SESSION *s = _session->getSession();
- if ( commit ) {
- invariantWTOK( s->commit_transaction(s, NULL) );
- LOG(2) << "WT commit_transaction";
- if ( _syncing )
- waitUntilDurableData.syncHappend();
- }
- else {
- invariantWTOK( s->rollback_transaction(s, NULL) );
- LOG(2) << "WT rollback_transaction";
+ virtual Status setFromString(const std::string& str) {
+ int num = 0;
+ Status status = parseNumberFromString(str, &num);
+ if (!status.isOK())
+ return status;
+ return _set(num);
+ }
+
+ Status _set(int newNum) {
+ if (newNum <= 0) {
+ return Status(ErrorCodes::BadValue, str::stream() << name() << " has to be > 0");
}
- _active = false;
- _myTransactionCount++;
- _ticket.reset(NULL);
+
+ return _holder->resize(newNum);
}
- SnapshotId WiredTigerRecoveryUnit::getSnapshotId() const {
- // TODO: use actual wiredtiger txn id
- return SnapshotId(_myTransactionCount);
+private:
+ TicketHolder* _holder;
+};
+
+TicketHolder openWriteTransaction(128);
+TicketServerParameter openWriteTransactionParam(&openWriteTransaction,
+ "wiredTigerConcurrentWriteTransactions");
+
+TicketHolder openReadTransaction(128);
+TicketServerParameter openReadTransactionParam(&openReadTransaction,
+ "wiredTigerConcurrentReadTransactions");
+}
+
+void WiredTigerRecoveryUnit::appendGlobalStats(BSONObjBuilder& b) {
+ BSONObjBuilder bb(b.subobjStart("concurrentTransactions"));
+ {
+ BSONObjBuilder bbb(bb.subobjStart("write"));
+ bbb.append("out", openWriteTransaction.used());
+ bbb.append("available", openWriteTransaction.available());
+ bbb.append("totalTickets", openWriteTransaction.outof());
+ bbb.done();
+ }
+ {
+ BSONObjBuilder bbb(bb.subobjStart("read"));
+ bbb.append("out", openReadTransaction.used());
+ bbb.append("available", openReadTransaction.available());
+ bbb.append("totalTickets", openReadTransaction.outof());
+ bbb.done();
}
+ bb.done();
+}
- void WiredTigerRecoveryUnit::markNoTicketRequired() {
- invariant(!_ticket.hasTicket());
- _noTicketNeeded = true;
+void WiredTigerRecoveryUnit::_txnClose(bool commit) {
+ invariant(_active);
+ WT_SESSION* s = _session->getSession();
+ if (commit) {
+ invariantWTOK(s->commit_transaction(s, NULL));
+ LOG(2) << "WT commit_transaction";
+ if (_syncing)
+ waitUntilDurableData.syncHappend();
+ } else {
+ invariantWTOK(s->rollback_transaction(s, NULL));
+ LOG(2) << "WT rollback_transaction";
}
+ _active = false;
+ _myTransactionCount++;
+ _ticket.reset(NULL);
+}
- void WiredTigerRecoveryUnit::_getTicket(OperationContext* opCtx) {
- // already have a ticket
- if (_ticket.hasTicket())
- return;
+SnapshotId WiredTigerRecoveryUnit::getSnapshotId() const {
+ // TODO: use actual wiredtiger txn id
+ return SnapshotId(_myTransactionCount);
+}
- if (_noTicketNeeded)
- return;
+void WiredTigerRecoveryUnit::markNoTicketRequired() {
+ invariant(!_ticket.hasTicket());
+ _noTicketNeeded = true;
+}
- bool writeLocked;
+void WiredTigerRecoveryUnit::_getTicket(OperationContext* opCtx) {
+ // already have a ticket
+ if (_ticket.hasTicket())
+ return;
- // If we have a strong lock, waiting for a ticket can cause a deadlock.
- if (opCtx != NULL &&
- opCtx->lockState() != NULL) {
- if (opCtx->lockState()->hasStrongLocks())
- return;
- writeLocked = opCtx->lockState()->isWriteLocked();
- }
- else {
- writeLocked = _everStartedWrite;
- }
+ if (_noTicketNeeded)
+ return;
- TicketHolder* holder = writeLocked ? &openWriteTransaction : &openReadTransaction;
+ bool writeLocked;
- holder->waitForTicket();
- _ticket.reset(holder);
+ // If we have a strong lock, waiting for a ticket can cause a deadlock.
+ if (opCtx != NULL && opCtx->lockState() != NULL) {
+ if (opCtx->lockState()->hasStrongLocks())
+ return;
+ writeLocked = opCtx->lockState()->isWriteLocked();
+ } else {
+ writeLocked = _everStartedWrite;
}
- void WiredTigerRecoveryUnit::_txnOpen(OperationContext* opCtx) {
- invariant( !_active );
- _getTicket(opCtx);
+ TicketHolder* holder = writeLocked ? &openWriteTransaction : &openReadTransaction;
- WT_SESSION *s = _session->getSession();
- _syncing = _syncing || waitUntilDurableData.numWaitingForSync.load() > 0;
- invariantWTOK( s->begin_transaction(s, _syncing ? "sync=true" : NULL) );
- LOG(2) << "WT begin_transaction";
- _timer.reset();
- _active = true;
- }
+ holder->waitForTicket();
+ _ticket.reset(holder);
+}
- void WiredTigerRecoveryUnit::beingReleasedFromOperationContext() {
- LOG(2) << "WiredTigerRecoveryUnit::beingReleased";
- _currentlySquirreled = true;
- if ( _active == false && !wt_keeptxnopen() ) {
- _commit();
- }
- }
- void WiredTigerRecoveryUnit::beingSetOnOperationContext() {
- LOG(2) << "WiredTigerRecoveryUnit::broughtBack";
- _currentlySquirreled = false;
+void WiredTigerRecoveryUnit::_txnOpen(OperationContext* opCtx) {
+ invariant(!_active);
+ _getTicket(opCtx);
+
+ WT_SESSION* s = _session->getSession();
+ _syncing = _syncing || waitUntilDurableData.numWaitingForSync.load() > 0;
+ invariantWTOK(s->begin_transaction(s, _syncing ? "sync=true" : NULL));
+ LOG(2) << "WT begin_transaction";
+ _timer.reset();
+ _active = true;
+}
+
+void WiredTigerRecoveryUnit::beingReleasedFromOperationContext() {
+ LOG(2) << "WiredTigerRecoveryUnit::beingReleased";
+ _currentlySquirreled = true;
+ if (_active == false && !wt_keeptxnopen()) {
+ _commit();
}
+}
+void WiredTigerRecoveryUnit::beingSetOnOperationContext() {
+ LOG(2) << "WiredTigerRecoveryUnit::broughtBack";
+ _currentlySquirreled = false;
+}
- // ---------------------
+// ---------------------
- WiredTigerCursor::WiredTigerCursor(const std::string& uri,
- uint64_t id,
- bool forRecordStore,
- OperationContext* txn) {
- _uriID = id;
- _ru = WiredTigerRecoveryUnit::get( txn );
- _session = _ru->getSession(txn);
- _cursor = _session->getCursor( uri, id, forRecordStore );
- if ( !_cursor ) {
- error() << "no cursor for uri: " << uri;
- }
+WiredTigerCursor::WiredTigerCursor(const std::string& uri,
+ uint64_t id,
+ bool forRecordStore,
+ OperationContext* txn) {
+ _uriID = id;
+ _ru = WiredTigerRecoveryUnit::get(txn);
+ _session = _ru->getSession(txn);
+ _cursor = _session->getCursor(uri, id, forRecordStore);
+ if (!_cursor) {
+ error() << "no cursor for uri: " << uri;
}
+}
- WiredTigerCursor::~WiredTigerCursor() {
- _session->releaseCursor( _uriID, _cursor );
- _cursor = NULL;
- }
+WiredTigerCursor::~WiredTigerCursor() {
+ _session->releaseCursor(_uriID, _cursor);
+ _cursor = NULL;
+}
- void WiredTigerCursor::reset() {
- invariantWTOK( _cursor->reset( _cursor ) );
- }
+void WiredTigerCursor::reset() {
+ invariantWTOK(_cursor->reset(_cursor));
+}
- WT_SESSION* WiredTigerCursor::getWTSession() {
- return _session->getSession();
- }
+WT_SESSION* WiredTigerCursor::getWTSession() {
+ return _session->getSession();
+}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
index 4dcb216c060..6979f47af09 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
@@ -44,116 +44,131 @@
namespace mongo {
- class BSONObjBuilder;
- class WiredTigerSession;
- class WiredTigerSessionCache;
+class BSONObjBuilder;
+class WiredTigerSession;
+class WiredTigerSessionCache;
- class WiredTigerRecoveryUnit : public RecoveryUnit {
- public:
- WiredTigerRecoveryUnit(WiredTigerSessionCache* sc);
+class WiredTigerRecoveryUnit : public RecoveryUnit {
+public:
+ WiredTigerRecoveryUnit(WiredTigerSessionCache* sc);
- virtual ~WiredTigerRecoveryUnit();
+ virtual ~WiredTigerRecoveryUnit();
- virtual void reportState( BSONObjBuilder* b ) const;
+ virtual void reportState(BSONObjBuilder* b) const;
- void beginUnitOfWork(OperationContext* opCtx) final;
- void commitUnitOfWork() final;
- void abortUnitOfWork() final;
+ void beginUnitOfWork(OperationContext* opCtx) final;
+ void commitUnitOfWork() final;
+ void abortUnitOfWork() final;
- virtual bool waitUntilDurable();
- virtual void goingToWaitUntilDurable();
+ virtual bool waitUntilDurable();
+ virtual void goingToWaitUntilDurable();
- virtual void registerChange(Change *);
+ virtual void registerChange(Change*);
- virtual void beingReleasedFromOperationContext();
- virtual void beingSetOnOperationContext();
+ virtual void beingReleasedFromOperationContext();
+ virtual void beingSetOnOperationContext();
- virtual void abandonSnapshot();
+ virtual void abandonSnapshot();
- // un-used API
- virtual void* writingPtr(void* data, size_t len) { invariant(!"don't call writingPtr"); }
+ // un-used API
+ virtual void* writingPtr(void* data, size_t len) {
+ invariant(!"don't call writingPtr");
+ }
- virtual void setRollbackWritesDisabled() {}
+ virtual void setRollbackWritesDisabled() {}
- virtual SnapshotId getSnapshotId() const;
+ virtual SnapshotId getSnapshotId() const;
- // ---- WT STUFF
+ // ---- WT STUFF
- WiredTigerSession* getSession(OperationContext* opCtx);
- WiredTigerSessionCache* getSessionCache() { return _sessionCache; }
- bool inActiveTxn() const { return _active; }
- void assertInActiveTxn() const;
+ WiredTigerSession* getSession(OperationContext* opCtx);
+ WiredTigerSessionCache* getSessionCache() {
+ return _sessionCache;
+ }
+ bool inActiveTxn() const {
+ return _active;
+ }
+ void assertInActiveTxn() const;
- bool everStartedWrite() const { return _everStartedWrite; }
+ bool everStartedWrite() const {
+ return _everStartedWrite;
+ }
- void setOplogReadTill( const RecordId& loc );
- RecordId getOplogReadTill() const { return _oplogReadTill; }
+ void setOplogReadTill(const RecordId& loc);
+ RecordId getOplogReadTill() const {
+ return _oplogReadTill;
+ }
- void markNoTicketRequired();
+ void markNoTicketRequired();
- static WiredTigerRecoveryUnit* get(OperationContext *txn);
+ static WiredTigerRecoveryUnit* get(OperationContext* txn);
- static void appendGlobalStats(BSONObjBuilder& b);
- private:
+ static void appendGlobalStats(BSONObjBuilder& b);
- void _abort();
- void _commit();
+private:
+ void _abort();
+ void _commit();
- void _txnClose( bool commit );
- void _txnOpen(OperationContext* opCtx);
+ void _txnClose(bool commit);
+ void _txnOpen(OperationContext* opCtx);
- WiredTigerSessionCache* _sessionCache; // not owned
- WiredTigerSession* _session; // owned, but from pool
- bool _defaultCommit;
- bool _inUnitOfWork;
- bool _active;
- uint64_t _myTransactionCount;
- bool _everStartedWrite;
- Timer _timer;
- bool _currentlySquirreled;
- bool _syncing;
- RecordId _oplogReadTill;
+ WiredTigerSessionCache* _sessionCache; // not owned
+ WiredTigerSession* _session; // owned, but from pool
+ bool _defaultCommit;
+ bool _inUnitOfWork;
+ bool _active;
+ uint64_t _myTransactionCount;
+ bool _everStartedWrite;
+ Timer _timer;
+ bool _currentlySquirreled;
+ bool _syncing;
+ RecordId _oplogReadTill;
- typedef OwnedPointerVector<Change> Changes;
- Changes _changes;
+ typedef OwnedPointerVector<Change> Changes;
+ Changes _changes;
- bool _noTicketNeeded;
- void _getTicket(OperationContext* opCtx);
- TicketHolderReleaser _ticket;
- };
-
- /**
- * This is a smart pointer that wraps a WT_CURSOR and knows how to obtain and get from pool.
- */
- class WiredTigerCursor {
- public:
- WiredTigerCursor(const std::string& uri,
- uint64_t uriID,
- bool forRecordStore,
- OperationContext* txn);
-
- ~WiredTigerCursor();
-
-
- WT_CURSOR* get() const {
- // TODO(SERVER-16816): assertInActiveTxn();
- return _cursor;
- }
-
- WT_CURSOR* operator->() const { return get(); }
-
- WiredTigerSession* getSession() { return _session; }
- WT_SESSION* getWTSession();
-
- void reset();
-
- void assertInActiveTxn() const { _ru->assertInActiveTxn(); }
-
- private:
- uint64_t _uriID;
- WiredTigerRecoveryUnit* _ru; // not owned
- WiredTigerSession* _session;
- WT_CURSOR* _cursor; // owned, but pulled
- };
+ bool _noTicketNeeded;
+ void _getTicket(OperationContext* opCtx);
+ TicketHolderReleaser _ticket;
+};
+/**
+ * This is a smart pointer that wraps a WT_CURSOR and knows how to obtain and get from pool.
+ */
+class WiredTigerCursor {
+public:
+ WiredTigerCursor(const std::string& uri,
+ uint64_t uriID,
+ bool forRecordStore,
+ OperationContext* txn);
+
+ ~WiredTigerCursor();
+
+
+ WT_CURSOR* get() const {
+ // TODO(SERVER-16816): assertInActiveTxn();
+ return _cursor;
+ }
+
+ WT_CURSOR* operator->() const {
+ return get();
+ }
+
+ WiredTigerSession* getSession() {
+ return _session;
+ }
+ WT_SESSION* getWTSession();
+
+ void reset();
+
+ void assertInActiveTxn() const {
+ _ru->assertInActiveTxn();
+ }
+
+private:
+ uint64_t _uriID;
+ WiredTigerRecoveryUnit* _ru; // not owned
+ WiredTigerSession* _session;
+ WT_CURSOR* _cursor; // owned, but pulled
+};
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp
index d546a4997da..eadd39ebde7 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp
@@ -45,41 +45,36 @@
namespace mongo {
- using std::string;
-
- WiredTigerServerStatusSection::WiredTigerServerStatusSection(WiredTigerKVEngine* engine)
- : ServerStatusSection(kWiredTigerEngineName),
- _engine(engine) { }
-
- bool WiredTigerServerStatusSection::includeByDefault() const {
- return true;
+using std::string;
+
+WiredTigerServerStatusSection::WiredTigerServerStatusSection(WiredTigerKVEngine* engine)
+ : ServerStatusSection(kWiredTigerEngineName), _engine(engine) {}
+
+bool WiredTigerServerStatusSection::includeByDefault() const {
+ return true;
+}
+
+BSONObj WiredTigerServerStatusSection::generateSection(OperationContext* txn,
+ const BSONElement& configElement) const {
+ WiredTigerSession* session =
+ checked_cast<WiredTigerRecoveryUnit*>(txn->recoveryUnit())->getSession(txn);
+ invariant(session);
+
+ WT_SESSION* s = session->getSession();
+ invariant(s);
+ const string uri = "statistics:";
+
+ BSONObjBuilder bob;
+ Status status = WiredTigerUtil::exportTableToBSON(s, uri, "statistics=(fast)", &bob);
+ if (!status.isOK()) {
+ bob.append("error", "unable to retrieve statistics");
+ bob.append("code", static_cast<int>(status.code()));
+ bob.append("reason", status.reason());
}
- BSONObj WiredTigerServerStatusSection::generateSection(
- OperationContext* txn,
- const BSONElement& configElement) const {
-
- WiredTigerSession* session =
- checked_cast<WiredTigerRecoveryUnit*>(txn->recoveryUnit())->getSession(txn);
- invariant(session);
-
- WT_SESSION* s = session->getSession();
- invariant(s);
- const string uri = "statistics:";
+ WiredTigerRecoveryUnit::appendGlobalStats(bob);
- BSONObjBuilder bob;
- Status status = WiredTigerUtil::exportTableToBSON(s, uri,
- "statistics=(fast)", &bob);
- if (!status.isOK()) {
- bob.append("error", "unable to retrieve statistics");
- bob.append("code", static_cast<int>(status.code()));
- bob.append("reason", status.reason());
- }
-
- WiredTigerRecoveryUnit::appendGlobalStats(bob);
-
- return bob.obj();
- }
+ return bob.obj();
+}
} // namespace mongo
-
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h
index 21f9871a82a..5e7c3b3e8a1 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h
@@ -34,19 +34,19 @@
namespace mongo {
- class WiredTigerKVEngine;
-
- /**
- * Adds "wiredTiger" to the results of db.serverStatus().
- */
- class WiredTigerServerStatusSection : public ServerStatusSection {
- public:
- WiredTigerServerStatusSection(WiredTigerKVEngine* engine);
- virtual bool includeByDefault() const;
- virtual BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const;
- private:
- WiredTigerKVEngine* _engine;
- };
+class WiredTigerKVEngine;
+
+/**
+ * Adds "wiredTiger" to the results of db.serverStatus().
+ */
+class WiredTigerServerStatusSection : public ServerStatusSection {
+public:
+ WiredTigerServerStatusSection(WiredTigerKVEngine* engine);
+ virtual bool includeByDefault() const;
+ virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const;
+
+private:
+ WiredTigerKVEngine* _engine;
+};
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
index dcdde08de69..9bc0f9687a2 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
@@ -40,206 +40,194 @@
namespace mongo {
- WiredTigerSession::WiredTigerSession(WT_CONNECTION* conn, int cachePartition, int epoch)
- : _cachePartition(cachePartition),
- _epoch(epoch),
- _session(NULL),
- _cursorsOut(0) {
+WiredTigerSession::WiredTigerSession(WT_CONNECTION* conn, int cachePartition, int epoch)
+ : _cachePartition(cachePartition), _epoch(epoch), _session(NULL), _cursorsOut(0) {
+ int ret = conn->open_session(conn, NULL, "isolation=snapshot", &_session);
+ invariantWTOK(ret);
+}
- int ret = conn->open_session(conn, NULL, "isolation=snapshot", &_session);
+WiredTigerSession::~WiredTigerSession() {
+ if (_session) {
+ int ret = _session->close(_session, NULL);
invariantWTOK(ret);
}
+}
- WiredTigerSession::~WiredTigerSession() {
- if (_session) {
- int ret = _session->close(_session, NULL);
- invariantWTOK(ret);
+WT_CURSOR* WiredTigerSession::getCursor(const std::string& uri, uint64_t id, bool forRecordStore) {
+ {
+ Cursors& cursors = _curmap[id];
+ if (!cursors.empty()) {
+ WT_CURSOR* save = cursors.back();
+ cursors.pop_back();
+ _cursorsOut++;
+ return save;
}
}
+ WT_CURSOR* c = NULL;
+ int ret = _session->open_cursor(
+ _session, uri.c_str(), NULL, forRecordStore ? "" : "overwrite=false", &c);
+ if (ret != ENOENT)
+ invariantWTOK(ret);
+ if (c)
+ _cursorsOut++;
+ return c;
+}
- WT_CURSOR* WiredTigerSession::getCursor(const std::string& uri,
- uint64_t id,
- bool forRecordStore) {
- {
- Cursors& cursors = _curmap[id];
- if ( !cursors.empty() ) {
- WT_CURSOR* save = cursors.back();
- cursors.pop_back();
- _cursorsOut++;
- return save;
- }
- }
- WT_CURSOR* c = NULL;
- int ret = _session->open_cursor(_session,
- uri.c_str(),
- NULL,
- forRecordStore ? "" : "overwrite=false",
- &c);
- if (ret != ENOENT)
- invariantWTOK(ret);
- if ( c ) _cursorsOut++;
- return c;
- }
-
- void WiredTigerSession::releaseCursor(uint64_t id, WT_CURSOR *cursor) {
- invariant( _session );
- invariant( cursor );
- _cursorsOut--;
-
- Cursors& cursors = _curmap[id];
- if ( cursors.size() > 10u ) {
- invariantWTOK( cursor->close(cursor) );
- }
- else {
- invariantWTOK( cursor->reset( cursor ) );
- cursors.push_back( cursor );
- }
+void WiredTigerSession::releaseCursor(uint64_t id, WT_CURSOR* cursor) {
+ invariant(_session);
+ invariant(cursor);
+ _cursorsOut--;
+
+ Cursors& cursors = _curmap[id];
+ if (cursors.size() > 10u) {
+ invariantWTOK(cursor->close(cursor));
+ } else {
+ invariantWTOK(cursor->reset(cursor));
+ cursors.push_back(cursor);
}
+}
- void WiredTigerSession::closeAllCursors() {
- invariant( _session );
- for (CursorMap::iterator i = _curmap.begin(); i != _curmap.end(); ++i ) {
- Cursors& cursors = i->second;
- for ( size_t j = 0; j < cursors.size(); j++ ) {
- WT_CURSOR *cursor = cursors[j];
- if (cursor) {
- int ret = cursor->close(cursor);
- invariantWTOK(ret);
- }
+void WiredTigerSession::closeAllCursors() {
+ invariant(_session);
+ for (CursorMap::iterator i = _curmap.begin(); i != _curmap.end(); ++i) {
+ Cursors& cursors = i->second;
+ for (size_t j = 0; j < cursors.size(); j++) {
+ WT_CURSOR* cursor = cursors[j];
+ if (cursor) {
+ int ret = cursor->close(cursor);
+ invariantWTOK(ret);
}
}
- _curmap.clear();
}
+ _curmap.clear();
+}
- namespace {
- AtomicUInt64 nextCursorId(1);
- AtomicUInt64 cachePartitionGen(0);
- }
- // static
- uint64_t WiredTigerSession::genCursorId() {
- return nextCursorId.fetchAndAdd(1);
- }
+namespace {
+AtomicUInt64 nextCursorId(1);
+AtomicUInt64 cachePartitionGen(0);
+}
+// static
+uint64_t WiredTigerSession::genCursorId() {
+ return nextCursorId.fetchAndAdd(1);
+}
- // -----------------------
+// -----------------------
- WiredTigerSessionCache::WiredTigerSessionCache( WiredTigerKVEngine* engine )
- : _engine( engine ), _conn( engine->getConnection() ), _shuttingDown(0) {
+WiredTigerSessionCache::WiredTigerSessionCache(WiredTigerKVEngine* engine)
+ : _engine(engine), _conn(engine->getConnection()), _shuttingDown(0) {}
- }
+WiredTigerSessionCache::WiredTigerSessionCache(WT_CONNECTION* conn)
+ : _engine(NULL), _conn(conn), _shuttingDown(0) {}
+
+WiredTigerSessionCache::~WiredTigerSessionCache() {
+ shuttingDown();
+}
- WiredTigerSessionCache::WiredTigerSessionCache( WT_CONNECTION* conn )
- : _engine( NULL ), _conn( conn ), _shuttingDown(0) {
+void WiredTigerSessionCache::shuttingDown() {
+ if (_shuttingDown.load())
+ return;
+ _shuttingDown.store(1);
+ {
+ // This ensures that any calls, which are currently inside of getSession/releaseSession
+ // will be able to complete before we start cleaning up the pool. Any others, which are
+ // about to enter will return immediately because of _shuttingDown == true.
+ stdx::lock_guard<boost::shared_mutex> lk(_shutdownLock);
}
- WiredTigerSessionCache::~WiredTigerSessionCache() {
- shuttingDown();
- }
+ closeAll();
+}
- void WiredTigerSessionCache::shuttingDown() {
- if (_shuttingDown.load()) return;
- _shuttingDown.store(1);
+void WiredTigerSessionCache::closeAll() {
+ for (int i = 0; i < NumSessionCachePartitions; i++) {
+ SessionPool swapPool;
{
- // This ensures that any calls, which are currently inside of getSession/releaseSession
- // will be able to complete before we start cleaning up the pool. Any others, which are
- // about to enter will return immediately because of _shuttingDown == true.
- stdx::lock_guard<boost::shared_mutex> lk(_shutdownLock);
+ stdx::unique_lock<SpinLock> scopedLock(_cache[i].lock);
+ _cache[i].pool.swap(swapPool);
+ _cache[i].epoch++;
}
- closeAll();
- }
-
- void WiredTigerSessionCache::closeAll() {
- for (int i = 0; i < NumSessionCachePartitions; i++) {
- SessionPool swapPool;
-
- {
- stdx::unique_lock<SpinLock> scopedLock(_cache[i].lock);
- _cache[i].pool.swap(swapPool);
- _cache[i].epoch++;
- }
-
- // New sessions will be created if need be outside of the lock
- for (size_t i = 0; i < swapPool.size(); i++) {
- delete swapPool[i];
- }
-
- swapPool.clear();
+ // New sessions will be created if need be outside of the lock
+ for (size_t i = 0; i < swapPool.size(); i++) {
+ delete swapPool[i];
}
+
+ swapPool.clear();
}
+}
- WiredTigerSession* WiredTigerSessionCache::getSession() {
- boost::shared_lock<boost::shared_mutex> shutdownLock(_shutdownLock);
+WiredTigerSession* WiredTigerSessionCache::getSession() {
+ boost::shared_lock<boost::shared_mutex> shutdownLock(_shutdownLock);
- // We should never be able to get here after _shuttingDown is set, because no new
- // operations should be allowed to start.
- invariant(!_shuttingDown.loadRelaxed());
+ // We should never be able to get here after _shuttingDown is set, because no new
+ // operations should be allowed to start.
+ invariant(!_shuttingDown.loadRelaxed());
- // Spread sessions uniformly across the cache partitions
- const int cachePartition = cachePartitionGen.addAndFetch(1) % NumSessionCachePartitions;
+ // Spread sessions uniformly across the cache partitions
+ const int cachePartition = cachePartitionGen.addAndFetch(1) % NumSessionCachePartitions;
- int epoch;
+ int epoch;
- {
- stdx::unique_lock<SpinLock> cachePartitionLock(_cache[cachePartition].lock);
- epoch = _cache[cachePartition].epoch;
+ {
+ stdx::unique_lock<SpinLock> cachePartitionLock(_cache[cachePartition].lock);
+ epoch = _cache[cachePartition].epoch;
- if (!_cache[cachePartition].pool.empty()) {
- WiredTigerSession* cachedSession = _cache[cachePartition].pool.back();
- _cache[cachePartition].pool.pop_back();
+ if (!_cache[cachePartition].pool.empty()) {
+ WiredTigerSession* cachedSession = _cache[cachePartition].pool.back();
+ _cache[cachePartition].pool.pop_back();
- return cachedSession;
- }
+ return cachedSession;
}
-
- // Outside of the cache partition lock, but on release will be put back on the cache
- return new WiredTigerSession(_conn, cachePartition, epoch);
}
- void WiredTigerSessionCache::releaseSession( WiredTigerSession* session ) {
- invariant( session );
- invariant(session->cursorsOut() == 0);
-
- boost::shared_lock<boost::shared_mutex> shutdownLock(_shutdownLock);
- if (_shuttingDown.loadRelaxed()) {
- // Leak the session in order to avoid race condition with clean shutdown, where the
- // storage engine is ripped from underneath transactions, which are not "active"
- // (i.e., do not have any locks), but are just about to delete the recovery unit.
- // See SERVER-16031 for more information.
- return;
- }
+ // Outside of the cache partition lock, but on release will be put back on the cache
+ return new WiredTigerSession(_conn, cachePartition, epoch);
+}
- // This checks that we are only caching idle sessions and not something which might hold
- // locks or otherwise prevent truncation.
- {
- WT_SESSION* ss = session->getSession();
- uint64_t range;
- invariantWTOK(ss->transaction_pinned_range(ss, &range));
- invariant(range == 0);
- }
+void WiredTigerSessionCache::releaseSession(WiredTigerSession* session) {
+ invariant(session);
+ invariant(session->cursorsOut() == 0);
+
+ boost::shared_lock<boost::shared_mutex> shutdownLock(_shutdownLock);
+ if (_shuttingDown.loadRelaxed()) {
+ // Leak the session in order to avoid race condition with clean shutdown, where the
+ // storage engine is ripped from underneath transactions, which are not "active"
+ // (i.e., do not have any locks), but are just about to delete the recovery unit.
+ // See SERVER-16031 for more information.
+ return;
+ }
- const int cachePartition = session->_getCachePartition();
- bool returnedToCache = false;
+ // This checks that we are only caching idle sessions and not something which might hold
+ // locks or otherwise prevent truncation.
+ {
+ WT_SESSION* ss = session->getSession();
+ uint64_t range;
+ invariantWTOK(ss->transaction_pinned_range(ss, &range));
+ invariant(range == 0);
+ }
- if (cachePartition >= 0) {
- stdx::unique_lock<SpinLock> cachePartitionLock(_cache[cachePartition].lock);
+ const int cachePartition = session->_getCachePartition();
+ bool returnedToCache = false;
- invariant(session->_getEpoch() <= _cache[cachePartition].epoch);
+ if (cachePartition >= 0) {
+ stdx::unique_lock<SpinLock> cachePartitionLock(_cache[cachePartition].lock);
- if (session->_getEpoch() == _cache[cachePartition].epoch) {
- _cache[cachePartition].pool.push_back(session);
- returnedToCache = true;
- }
- }
+ invariant(session->_getEpoch() <= _cache[cachePartition].epoch);
- // Do all cleanup outside of the cache partition spinlock.
- if (!returnedToCache) {
- delete session;
+ if (session->_getEpoch() == _cache[cachePartition].epoch) {
+ _cache[cachePartition].pool.push_back(session);
+ returnedToCache = true;
}
+ }
- if (_engine && _engine->haveDropsQueued()) {
- _engine->dropAllQueued();
- }
+ // Do all cleanup outside of the cache partition spinlock.
+ if (!returnedToCache) {
+ delete session;
}
+
+ if (_engine && _engine->haveDropsQueued()) {
+ _engine->dropAllQueued();
+ }
+}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
index 2f9e8d64d4d..9fd575232b9 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
@@ -44,113 +44,118 @@
namespace mongo {
- class WiredTigerKVEngine;
+class WiredTigerKVEngine;
+/**
+ * This is a structure that caches 1 cursor for each uri.
+ * The idea is that there is a pool of these somewhere.
+ * NOT THREADSAFE
+ */
+class WiredTigerSession {
+public:
/**
- * This is a structure that caches 1 cursor for each uri.
- * The idea is that there is a pool of these somewhere.
- * NOT THREADSAFE
+ * Creates a new WT session on the specified connection.
+ *
+ * @param conn WT connection
+ * @param cachePartition If the session comes from the session cache, this indicates to
+ * which partition it should be returned. Value of -1 means it doesn't come from
+ * cache and that it should not be cached, but closed directly.
+ * @param epoch In which session cache cleanup epoch was this session instantiated. Value
+ * of -1 means that this value is not necessary since the session will not be
+ * cached.
*/
- class WiredTigerSession {
- public:
-
- /**
- * Creates a new WT session on the specified connection.
- *
- * @param conn WT connection
- * @param cachePartition If the session comes from the session cache, this indicates to
- * which partition it should be returned. Value of -1 means it doesn't come from
- * cache and that it should not be cached, but closed directly.
- * @param epoch In which session cache cleanup epoch was this session instantiated. Value
- * of -1 means that this value is not necessary since the session will not be
- * cached.
- */
- WiredTigerSession(WT_CONNECTION* conn, int cachePartition = -1, int epoch = -1);
- ~WiredTigerSession();
-
- WT_SESSION* getSession() const { return _session; }
-
- WT_CURSOR* getCursor(const std::string& uri,
- uint64_t id,
- bool forRecordStore);
- void releaseCursor(uint64_t id, WT_CURSOR *cursor);
+ WiredTigerSession(WT_CONNECTION* conn, int cachePartition = -1, int epoch = -1);
+ ~WiredTigerSession();
- void closeAllCursors();
+ WT_SESSION* getSession() const {
+ return _session;
+ }
- int cursorsOut() const { return _cursorsOut; }
+ WT_CURSOR* getCursor(const std::string& uri, uint64_t id, bool forRecordStore);
+ void releaseCursor(uint64_t id, WT_CURSOR* cursor);
- static uint64_t genCursorId();
+ void closeAllCursors();
- /**
- * For "metadata:" cursors. Guaranteed never to collide with genCursorId() ids.
- */
- static const uint64_t kMetadataCursorId = 0;
+ int cursorsOut() const {
+ return _cursorsOut;
+ }
- private:
- friend class WiredTigerSessionCache;
+ static uint64_t genCursorId();
- typedef std::vector<WT_CURSOR*> Cursors;
- typedef std::map<uint64_t, Cursors> CursorMap;
+ /**
+ * For "metadata:" cursors. Guaranteed never to collide with genCursorId() ids.
+ */
+ static const uint64_t kMetadataCursorId = 0;
+private:
+ friend class WiredTigerSessionCache;
- // Used internally by WiredTigerSessionCache
- int _getEpoch() const { return _epoch; }
- int _getCachePartition() const { return _cachePartition; }
+ typedef std::vector<WT_CURSOR*> Cursors;
+ typedef std::map<uint64_t, Cursors> CursorMap;
- const int _cachePartition;
- const int _epoch;
- WT_SESSION* _session; // owned
- CursorMap _curmap; // owned
- int _cursorsOut;
- };
+ // Used internally by WiredTigerSessionCache
+ int _getEpoch() const {
+ return _epoch;
+ }
+ int _getCachePartition() const {
+ return _cachePartition;
+ }
- class WiredTigerSessionCache {
- public:
- WiredTigerSessionCache( WiredTigerKVEngine* engine );
- WiredTigerSessionCache( WT_CONNECTION* conn );
- ~WiredTigerSessionCache();
+ const int _cachePartition;
+ const int _epoch;
+ WT_SESSION* _session; // owned
+ CursorMap _curmap; // owned
+ int _cursorsOut;
+};
- WiredTigerSession* getSession();
- void releaseSession( WiredTigerSession* session );
+class WiredTigerSessionCache {
+public:
+ WiredTigerSessionCache(WiredTigerKVEngine* engine);
+ WiredTigerSessionCache(WT_CONNECTION* conn);
+ ~WiredTigerSessionCache();
- void closeAll();
+ WiredTigerSession* getSession();
+ void releaseSession(WiredTigerSession* session);
- void shuttingDown();
+ void closeAll();
- WT_CONNECTION* conn() const { return _conn; }
+ void shuttingDown();
- private:
- typedef std::vector<WiredTigerSession*> SessionPool;
+ WT_CONNECTION* conn() const {
+ return _conn;
+ }
- enum { NumSessionCachePartitions = 64 };
+private:
+ typedef std::vector<WiredTigerSession*> SessionPool;
- struct SessionCachePartition {
- SessionCachePartition() : epoch(0) { }
- ~SessionCachePartition() {
- invariant(pool.empty());
- }
+ enum { NumSessionCachePartitions = 64 };
- SpinLock lock;
- int epoch;
- SessionPool pool;
- };
+ struct SessionCachePartition {
+ SessionCachePartition() : epoch(0) {}
+ ~SessionCachePartition() {
+ invariant(pool.empty());
+ }
+ SpinLock lock;
+ int epoch;
+ SessionPool pool;
+ };
- WiredTigerKVEngine* _engine; // not owned, might be NULL
- WT_CONNECTION* _conn; // not owned
- // Partitioned cache of WT sessions. The partition key is not important, but it is
- // important that sessions be returned to the same partition they were taken from in order
- // to have some form of balance between the partitions.
- SessionCachePartition _cache[NumSessionCachePartitions];
+ WiredTigerKVEngine* _engine; // not owned, might be NULL
+ WT_CONNECTION* _conn; // not owned
- // Regular operations take it in shared mode. Shutdown sets the _shuttingDown flag and
- // then takes it in exclusive mode. This ensures that all threads, which would return
- // sessions to the cache would leak them.
- boost::shared_mutex _shutdownLock;
- AtomicUInt32 _shuttingDown; // Used as boolean - 0 = false, 1 = true
- };
+ // Partitioned cache of WT sessions. The partition key is not important, but it is
+ // important that sessions be returned to the same partition they were taken from in order
+ // to have some form of balance between the partitions.
+ SessionCachePartition _cache[NumSessionCachePartitions];
+ // Regular operations take it in shared mode. Shutdown sets the _shuttingDown flag and
+ // then takes it in exclusive mode. This ensures that all threads, which would return
+ // sessions to the cache would leak them.
+ boost::shared_mutex _shutdownLock;
+ AtomicUInt32 _shuttingDown; // Used as boolean - 0 = false, 1 = true
+};
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
index bbf12fc075f..89d7438ea9a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp
@@ -47,196 +47,192 @@
namespace mongo {
- using std::string;
+using std::string;
- namespace {
- int MAGIC = 123123;
+namespace {
+int MAGIC = 123123;
+}
+
+WiredTigerSizeStorer::WiredTigerSizeStorer(WT_CONNECTION* conn, const std::string& storageUri)
+ : _session(conn) {
+ WT_SESSION* session = _session.getSession();
+ int ret = session->open_cursor(session, storageUri.c_str(), NULL, "overwrite=true", &_cursor);
+ if (ret == ENOENT) {
+ // Need to create table.
+ std::string config =
+ WiredTigerCustomizationHooks::get(getGlobalServiceContext())->getOpenConfig(storageUri);
+ invariantWTOK(session->create(session, storageUri.c_str(), config.c_str()));
+ ret = session->open_cursor(session, storageUri.c_str(), NULL, "overwrite=true", &_cursor);
}
+ invariantWTOK(ret);
- WiredTigerSizeStorer::WiredTigerSizeStorer(WT_CONNECTION* conn, const std::string& storageUri)
- : _session(conn)
- {
- WT_SESSION* session = _session.getSession();
- int ret = session->open_cursor(session, storageUri.c_str(), NULL,
- "overwrite=true", &_cursor);
- if (ret == ENOENT) {
- // Need to create table.
- std::string config = WiredTigerCustomizationHooks::get(
- getGlobalServiceContext())->getOpenConfig(storageUri);
- invariantWTOK(session->create(session, storageUri.c_str(), config.c_str()));
- ret = session->open_cursor(session, storageUri.c_str(), NULL,
- "overwrite=true", &_cursor);
- }
- invariantWTOK(ret);
+ _magic = MAGIC;
+}
- _magic = MAGIC;
- }
+WiredTigerSizeStorer::~WiredTigerSizeStorer() {
+ // This shouldn't be necessary, but protects us if we screw up.
+ stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
- WiredTigerSizeStorer::~WiredTigerSizeStorer() {
- // This shouldn't be necessary, but protects us if we screw up.
- stdx::lock_guard<stdx::mutex> cursorLock( _cursorMutex );
+ _magic = 11111;
+ _cursor->close(_cursor);
+}
- _magic = 11111;
- _cursor->close(_cursor);
- }
+void WiredTigerSizeStorer::_checkMagic() const {
+ if (MONGO_likely(_magic == MAGIC))
+ return;
+ log() << "WiredTigerSizeStorer magic wrong: " << _magic;
+ invariant(_magic == MAGIC);
+}
- void WiredTigerSizeStorer::_checkMagic() const {
- if ( MONGO_likely(_magic == MAGIC) )
- return;
- log() << "WiredTigerSizeStorer magic wrong: " << _magic;
- invariant( _magic == MAGIC );
- }
+void WiredTigerSizeStorer::onCreate(WiredTigerRecordStore* rs,
+ long long numRecords,
+ long long dataSize) {
+ _checkMagic();
+ stdx::lock_guard<stdx::mutex> lk(_entriesMutex);
+ Entry& entry = _entries[rs->getURI()];
+ entry.rs = rs;
+ entry.numRecords = numRecords;
+ entry.dataSize = dataSize;
+ entry.dirty = true;
+}
- void WiredTigerSizeStorer::onCreate( WiredTigerRecordStore* rs,
- long long numRecords, long long dataSize ) {
- _checkMagic();
- stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
- Entry& entry = _entries[rs->getURI()];
- entry.rs = rs;
- entry.numRecords = numRecords;
- entry.dataSize = dataSize;
- entry.dirty = true;
- }
+void WiredTigerSizeStorer::onDestroy(WiredTigerRecordStore* rs) {
+ _checkMagic();
+ stdx::lock_guard<stdx::mutex> lk(_entriesMutex);
+ Entry& entry = _entries[rs->getURI()];
+ entry.numRecords = rs->numRecords(NULL);
+ entry.dataSize = rs->dataSize(NULL);
+ entry.dirty = true;
+ entry.rs = NULL;
+}
- void WiredTigerSizeStorer::onDestroy( WiredTigerRecordStore* rs ) {
- _checkMagic();
- stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
- Entry& entry = _entries[rs->getURI()];
- entry.numRecords = rs->numRecords( NULL );
- entry.dataSize = rs->dataSize( NULL );
- entry.dirty = true;
- entry.rs = NULL;
- }
+void WiredTigerSizeStorer::storeToCache(StringData uri, long long numRecords, long long dataSize) {
+ _checkMagic();
+ stdx::lock_guard<stdx::mutex> lk(_entriesMutex);
+ Entry& entry = _entries[uri.toString()];
+ entry.numRecords = numRecords;
+ entry.dataSize = dataSize;
+ entry.dirty = true;
+}
- void WiredTigerSizeStorer::storeToCache( StringData uri,
- long long numRecords, long long dataSize ) {
- _checkMagic();
- stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
- Entry& entry = _entries[uri.toString()];
- entry.numRecords = numRecords;
- entry.dataSize = dataSize;
- entry.dirty = true;
+void WiredTigerSizeStorer::loadFromCache(StringData uri,
+ long long* numRecords,
+ long long* dataSize) const {
+ _checkMagic();
+ stdx::lock_guard<stdx::mutex> lk(_entriesMutex);
+ Map::const_iterator it = _entries.find(uri.toString());
+ if (it == _entries.end()) {
+ *numRecords = 0;
+ *dataSize = 0;
+ return;
}
+ *numRecords = it->second.numRecords;
+ *dataSize = it->second.dataSize;
+}
- void WiredTigerSizeStorer::loadFromCache( StringData uri,
- long long* numRecords, long long* dataSize ) const {
- _checkMagic();
- stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
- Map::const_iterator it = _entries.find( uri.toString() );
- if ( it == _entries.end() ) {
- *numRecords = 0;
- *dataSize = 0;
- return;
- }
- *numRecords = it->second.numRecords;
- *dataSize = it->second.dataSize;
- }
+void WiredTigerSizeStorer::fillCache() {
+ stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
+ _checkMagic();
- void WiredTigerSizeStorer::fillCache() {
- stdx::lock_guard<stdx::mutex> cursorLock( _cursorMutex );
- _checkMagic();
+ Map m;
+ {
+ // Seek to beginning if needed.
+ invariantWTOK(_cursor->reset(_cursor));
- Map m;
- {
- // Seek to beginning if needed.
- invariantWTOK(_cursor->reset(_cursor));
-
- // Intentionally ignoring return value.
- ON_BLOCK_EXIT(_cursor->reset, _cursor);
-
- int cursorNextRet;
- while ((cursorNextRet = _cursor->next(_cursor)) != WT_NOTFOUND) {
- invariantWTOK(cursorNextRet);
-
- WT_ITEM key;
- WT_ITEM value;
- invariantWTOK( _cursor->get_key(_cursor, &key ) );
- invariantWTOK( _cursor->get_value(_cursor, &value ) );
- std::string uriKey( reinterpret_cast<const char*>( key.data ), key.size );
- BSONObj data( reinterpret_cast<const char*>( value.data ) );
-
- LOG(2) << "WiredTigerSizeStorer::loadFrom " << uriKey << " -> " << data;
-
- Entry& e = m[uriKey];
- e.numRecords = data["numRecords"].safeNumberLong();
- e.dataSize = data["dataSize"].safeNumberLong();
- e.dirty = false;
- e.rs = NULL;
- }
- }
+ // Intentionally ignoring return value.
+ ON_BLOCK_EXIT(_cursor->reset, _cursor);
- stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
- _entries.swap(m);
- }
+ int cursorNextRet;
+ while ((cursorNextRet = _cursor->next(_cursor)) != WT_NOTFOUND) {
+ invariantWTOK(cursorNextRet);
- void WiredTigerSizeStorer::syncCache(bool syncToDisk) {
- stdx::lock_guard<stdx::mutex> cursorLock( _cursorMutex );
- _checkMagic();
+ WT_ITEM key;
+ WT_ITEM value;
+ invariantWTOK(_cursor->get_key(_cursor, &key));
+ invariantWTOK(_cursor->get_value(_cursor, &value));
+ std::string uriKey(reinterpret_cast<const char*>(key.data), key.size);
+ BSONObj data(reinterpret_cast<const char*>(value.data));
- Map myMap;
- {
- stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
- for ( Map::iterator it = _entries.begin(); it != _entries.end(); ++it ) {
- std::string uriKey = it->first;
- Entry& entry = it->second;
- if ( entry.rs ) {
- if ( entry.dataSize != entry.rs->dataSize( NULL ) ) {
- entry.dataSize = entry.rs->dataSize( NULL );
- entry.dirty = true;
- }
- if ( entry.numRecords != entry.rs->numRecords( NULL ) ) {
- entry.numRecords = entry.rs->numRecords( NULL );
- entry.dirty = true;
- }
- }
+ LOG(2) << "WiredTigerSizeStorer::loadFrom " << uriKey << " -> " << data;
- if ( !entry.dirty )
- continue;
- myMap[uriKey] = entry;
- }
+ Entry& e = m[uriKey];
+ e.numRecords = data["numRecords"].safeNumberLong();
+ e.dataSize = data["dataSize"].safeNumberLong();
+ e.dirty = false;
+ e.rs = NULL;
}
+ }
- if (myMap.empty())
- return; // Nothing to do.
+ stdx::lock_guard<stdx::mutex> lk(_entriesMutex);
+ _entries.swap(m);
+}
- WT_SESSION* session = _session.getSession();
- invariantWTOK(session->begin_transaction(session, syncToDisk ? "sync=true" : ""));
- ScopeGuard rollbacker = MakeGuard(session->rollback_transaction, session, "");
+void WiredTigerSizeStorer::syncCache(bool syncToDisk) {
+ stdx::lock_guard<stdx::mutex> cursorLock(_cursorMutex);
+ _checkMagic();
- for ( Map::iterator it = myMap.begin(); it != myMap.end(); ++it ) {
- string uriKey = it->first;
+ Map myMap;
+ {
+ stdx::lock_guard<stdx::mutex> lk(_entriesMutex);
+ for (Map::iterator it = _entries.begin(); it != _entries.end(); ++it) {
+ std::string uriKey = it->first;
Entry& entry = it->second;
-
- BSONObj data;
- {
- BSONObjBuilder b;
- b.append( "numRecords", entry.numRecords );
- b.append( "dataSize", entry.dataSize );
- data = b.obj();
+ if (entry.rs) {
+ if (entry.dataSize != entry.rs->dataSize(NULL)) {
+ entry.dataSize = entry.rs->dataSize(NULL);
+ entry.dirty = true;
+ }
+ if (entry.numRecords != entry.rs->numRecords(NULL)) {
+ entry.numRecords = entry.rs->numRecords(NULL);
+ entry.dirty = true;
+ }
}
- LOG(2) << "WiredTigerSizeStorer::storeInto " << uriKey << " -> " << data;
-
- WiredTigerItem key( uriKey.c_str(), uriKey.size() );
- WiredTigerItem value( data.objdata(), data.objsize() );
- _cursor->set_key( _cursor, key.Get() );
- _cursor->set_value( _cursor, value.Get() );
- invariantWTOK( _cursor->insert(_cursor) );
+ if (!entry.dirty)
+ continue;
+ myMap[uriKey] = entry;
}
+ }
- invariantWTOK(_cursor->reset(_cursor));
+ if (myMap.empty())
+ return; // Nothing to do.
+
+ WT_SESSION* session = _session.getSession();
+ invariantWTOK(session->begin_transaction(session, syncToDisk ? "sync=true" : ""));
+ ScopeGuard rollbacker = MakeGuard(session->rollback_transaction, session, "");
- rollbacker.Dismiss();
- invariantWTOK(session->commit_transaction(session, NULL));
+ for (Map::iterator it = myMap.begin(); it != myMap.end(); ++it) {
+ string uriKey = it->first;
+ Entry& entry = it->second;
+ BSONObj data;
{
- stdx::lock_guard<stdx::mutex> lk( _entriesMutex );
- for (Map::iterator it = _entries.begin(); it != _entries.end(); ++it) {
- it->second.dirty = false;
- }
+ BSONObjBuilder b;
+ b.append("numRecords", entry.numRecords);
+ b.append("dataSize", entry.dataSize);
+ data = b.obj();
}
+
+ LOG(2) << "WiredTigerSizeStorer::storeInto " << uriKey << " -> " << data;
+
+ WiredTigerItem key(uriKey.c_str(), uriKey.size());
+ WiredTigerItem value(data.objdata(), data.objsize());
+ _cursor->set_key(_cursor, key.Get());
+ _cursor->set_value(_cursor, value.Get());
+ invariantWTOK(_cursor->insert(_cursor));
}
+ invariantWTOK(_cursor->reset(_cursor));
+ rollbacker.Dismiss();
+ invariantWTOK(session->commit_transaction(session, NULL));
+
+ {
+ stdx::lock_guard<stdx::mutex> lk(_entriesMutex);
+ for (Map::iterator it = _entries.begin(); it != _entries.end(); ++it) {
+ it->second.dirty = false;
+ }
+ }
+}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
index 488696424a0..3dcbc6622a8 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h
@@ -41,53 +41,51 @@
namespace mongo {
- class WiredTigerRecordStore;
- class WiredTigerSession;
+class WiredTigerRecordStore;
+class WiredTigerSession;
- class WiredTigerSizeStorer {
- public:
- WiredTigerSizeStorer(WT_CONNECTION* conn, const std::string& storageUri);
- ~WiredTigerSizeStorer();
+class WiredTigerSizeStorer {
+public:
+ WiredTigerSizeStorer(WT_CONNECTION* conn, const std::string& storageUri);
+ ~WiredTigerSizeStorer();
- void onCreate( WiredTigerRecordStore* rs, long long nr, long long ds );
- void onDestroy( WiredTigerRecordStore* rs );
+ void onCreate(WiredTigerRecordStore* rs, long long nr, long long ds);
+ void onDestroy(WiredTigerRecordStore* rs);
- void storeToCache( StringData uri, long long numRecords, long long dataSize );
+ void storeToCache(StringData uri, long long numRecords, long long dataSize);
- void loadFromCache( StringData uri, long long* numRecords, long long* dataSize ) const;
+ void loadFromCache(StringData uri, long long* numRecords, long long* dataSize) const;
- /**
- * Loads from the underlying table.
- */
- void fillCache();
+ /**
+ * Loads from the underlying table.
+ */
+ void fillCache();
- /**
- * Writes all changes to the underlying table.
- */
- void syncCache(bool syncToDisk);
+ /**
+ * Writes all changes to the underlying table.
+ */
+ void syncCache(bool syncToDisk);
- private:
- void _checkMagic() const;
+private:
+ void _checkMagic() const;
- struct Entry {
- Entry() : numRecords(0), dataSize(0), dirty(false), rs(NULL){}
- long long numRecords;
- long long dataSize;
- bool dirty;
- WiredTigerRecordStore* rs; // not owned
- };
-
- int _magic;
-
- // Guards _cursor. Acquire *before* _entriesMutex.
- mutable stdx::mutex _cursorMutex;
- const WiredTigerSession _session;
- WT_CURSOR* _cursor; // pointer is const after constructor
+ struct Entry {
+ Entry() : numRecords(0), dataSize(0), dirty(false), rs(NULL) {}
+ long long numRecords;
+ long long dataSize;
+ bool dirty;
+ WiredTigerRecordStore* rs; // not owned
+ };
- typedef std::map<std::string,Entry> Map;
- Map _entries;
- mutable stdx::mutex _entriesMutex;
+ int _magic;
- };
+ // Guards _cursor. Acquire *before* _entriesMutex.
+ mutable stdx::mutex _cursorMutex;
+ const WiredTigerSession _session;
+ WT_CURSOR* _cursor; // pointer is const after constructor
+ typedef std::map<std::string, Entry> Map;
+ Map _entries;
+ mutable stdx::mutex _entriesMutex;
+};
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index d4cc5109303..ef1231282bd 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -49,115 +49,113 @@
namespace mongo {
- using std::string;
+using std::string;
- Status wtRCToStatus_slow(int retCode, const char* prefix ) {
- if (retCode == 0)
- return Status::OK();
-
- if ( retCode == WT_ROLLBACK ) {
- throw WriteConflictException();
- }
+Status wtRCToStatus_slow(int retCode, const char* prefix) {
+ if (retCode == 0)
+ return Status::OK();
- fassert( 28559, retCode != WT_PANIC );
+ if (retCode == WT_ROLLBACK) {
+ throw WriteConflictException();
+ }
- str::stream s;
- if ( prefix )
- s << prefix << " ";
- s << retCode << ": " << wiredtiger_strerror(retCode);
+ fassert(28559, retCode != WT_PANIC);
- if (retCode == EINVAL) {
- return Status(ErrorCodes::BadValue, s);
- }
+ str::stream s;
+ if (prefix)
+ s << prefix << " ";
+ s << retCode << ": " << wiredtiger_strerror(retCode);
- // TODO convert specific codes rather than just using UNKNOWN_ERROR for everything.
- return Status(ErrorCodes::UnknownError, s);
+ if (retCode == EINVAL) {
+ return Status(ErrorCodes::BadValue, s);
}
- void WiredTigerUtil::fetchTypeAndSourceURI(OperationContext* opCtx,
- const std::string& tableUri,
- std::string* type,
- std::string* source) {
- std::string colgroupUri = "colgroup";
- const size_t colon = tableUri.find(':');
- invariant(colon != string::npos);
- colgroupUri += tableUri.substr(colon);
- StatusWith<std::string> colgroupResult = getMetadata(opCtx, colgroupUri);
- invariant(colgroupResult.isOK());
- WiredTigerConfigParser parser(colgroupResult.getValue());
-
- WT_CONFIG_ITEM typeItem;
- invariant(parser.get("type", &typeItem) == 0);
- invariant(typeItem.type == WT_CONFIG_ITEM::WT_CONFIG_ITEM_ID);
- *type = std::string(typeItem.str, typeItem.len);
-
- WT_CONFIG_ITEM sourceItem;
- invariant(parser.get("source", &sourceItem) == 0);
- invariant(sourceItem.type == WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRING);
- *source = std::string(sourceItem.str, sourceItem.len);
+ // TODO convert specific codes rather than just using UNKNOWN_ERROR for everything.
+ return Status(ErrorCodes::UnknownError, s);
+}
+
+void WiredTigerUtil::fetchTypeAndSourceURI(OperationContext* opCtx,
+ const std::string& tableUri,
+ std::string* type,
+ std::string* source) {
+ std::string colgroupUri = "colgroup";
+ const size_t colon = tableUri.find(':');
+ invariant(colon != string::npos);
+ colgroupUri += tableUri.substr(colon);
+ StatusWith<std::string> colgroupResult = getMetadata(opCtx, colgroupUri);
+ invariant(colgroupResult.isOK());
+ WiredTigerConfigParser parser(colgroupResult.getValue());
+
+ WT_CONFIG_ITEM typeItem;
+ invariant(parser.get("type", &typeItem) == 0);
+ invariant(typeItem.type == WT_CONFIG_ITEM::WT_CONFIG_ITEM_ID);
+ *type = std::string(typeItem.str, typeItem.len);
+
+ WT_CONFIG_ITEM sourceItem;
+ invariant(parser.get("source", &sourceItem) == 0);
+ invariant(sourceItem.type == WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRING);
+ *source = std::string(sourceItem.str, sourceItem.len);
+}
+
+StatusWith<std::string> WiredTigerUtil::getMetadata(OperationContext* opCtx, StringData uri) {
+ invariant(opCtx);
+ WiredTigerCursor curwrap("metadata:", WiredTigerSession::kMetadataCursorId, false, opCtx);
+ WT_CURSOR* cursor = curwrap.get();
+ invariant(cursor);
+ std::string strUri = uri.toString();
+ cursor->set_key(cursor, strUri.c_str());
+ int ret = cursor->search(cursor);
+ if (ret == WT_NOTFOUND) {
+ return StatusWith<std::string>(ErrorCodes::NoSuchKey,
+ str::stream() << "Unable to find metadata for " << uri);
+ } else if (ret != 0) {
+ return StatusWith<std::string>(wtRCToStatus(ret));
}
+ const char* metadata = NULL;
+ ret = cursor->get_value(cursor, &metadata);
+ if (ret != 0) {
+ return StatusWith<std::string>(wtRCToStatus(ret));
+ }
+ invariant(metadata);
+ return StatusWith<std::string>(metadata);
+}
- StatusWith<std::string> WiredTigerUtil::getMetadata(OperationContext* opCtx,
- StringData uri) {
- invariant(opCtx);
- WiredTigerCursor curwrap("metadata:", WiredTigerSession::kMetadataCursorId, false, opCtx);
- WT_CURSOR* cursor = curwrap.get();
- invariant(cursor);
- std::string strUri = uri.toString();
- cursor->set_key(cursor, strUri.c_str());
- int ret = cursor->search(cursor);
- if (ret == WT_NOTFOUND) {
- return StatusWith<std::string>(ErrorCodes::NoSuchKey, str::stream()
- << "Unable to find metadata for " << uri);
- }
- else if (ret != 0) {
- return StatusWith<std::string>(wtRCToStatus(ret));
- }
- const char* metadata = NULL;
- ret = cursor->get_value(cursor, &metadata);
- if (ret != 0) {
- return StatusWith<std::string>(wtRCToStatus(ret));
- }
- invariant(metadata);
- return StatusWith<std::string>(metadata);
+Status WiredTigerUtil::getApplicationMetadata(OperationContext* opCtx,
+ StringData uri,
+ BSONObjBuilder* bob) {
+ StatusWith<std::string> metadataResult = getMetadata(opCtx, uri);
+ if (!metadataResult.isOK()) {
+ return metadataResult.getStatus();
+ }
+ WiredTigerConfigParser topParser(metadataResult.getValue());
+ WT_CONFIG_ITEM appMetadata;
+ if (topParser.get("app_metadata", &appMetadata) != 0) {
+ return Status::OK();
+ }
+ if (appMetadata.len == 0) {
+ return Status::OK();
+ }
+ if (appMetadata.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRUCT) {
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "app_metadata must be a nested struct. Actual value: "
+ << StringData(appMetadata.str, appMetadata.len));
}
- Status WiredTigerUtil::getApplicationMetadata(OperationContext* opCtx,
- StringData uri,
- BSONObjBuilder* bob) {
- StatusWith<std::string> metadataResult = getMetadata(opCtx, uri);
- if (!metadataResult.isOK()) {
- return metadataResult.getStatus();
- }
- WiredTigerConfigParser topParser(metadataResult.getValue());
- WT_CONFIG_ITEM appMetadata;
- if (topParser.get("app_metadata", &appMetadata) != 0) {
- return Status::OK();
- }
- if (appMetadata.len == 0) {
- return Status::OK();
+ WiredTigerConfigParser parser(appMetadata);
+ WT_CONFIG_ITEM keyItem;
+ WT_CONFIG_ITEM valueItem;
+ int ret;
+ unordered_set<StringData, StringData::Hasher> keysSeen;
+ while ((ret = parser.next(&keyItem, &valueItem)) == 0) {
+ const StringData key(keyItem.str, keyItem.len);
+ if (keysSeen.count(key)) {
+ return Status(ErrorCodes::DuplicateKey,
+ str::stream() << "app_metadata must not contain duplicate keys. "
+ << "Found multiple instances of key '" << key << "'.");
}
- if (appMetadata.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRUCT) {
- return Status(ErrorCodes::FailedToParse, str::stream()
- << "app_metadata must be a nested struct. Actual value: "
- << StringData(appMetadata.str, appMetadata.len));
- }
-
- WiredTigerConfigParser parser(appMetadata);
- WT_CONFIG_ITEM keyItem;
- WT_CONFIG_ITEM valueItem;
- int ret;
- unordered_set<StringData, StringData::Hasher> keysSeen;
- while ((ret = parser.next(&keyItem, &valueItem)) == 0) {
- const StringData key(keyItem.str, keyItem.len);
- if (keysSeen.count(key)) {
- return Status(ErrorCodes::DuplicateKey, str::stream()
- << "app_metadata must not contain duplicate keys. "
- << "Found multiple instances of key '" << key << "'.");
- }
- keysSeen.insert(key);
+ keysSeen.insert(key);
- switch (valueItem.type) {
+ switch (valueItem.type) {
case WT_CONFIG_ITEM::WT_CONFIG_ITEM_BOOL:
bob->appendBool(key, valueItem.val);
break;
@@ -167,285 +165,277 @@ namespace mongo {
default:
bob->append(key, StringData(valueItem.str, valueItem.len));
break;
- }
- }
- if (ret != WT_NOTFOUND) {
- return wtRCToStatus(ret);
}
-
- return Status::OK();
}
-
- StatusWith<BSONObj> WiredTigerUtil::getApplicationMetadata(OperationContext* opCtx,
- StringData uri) {
- BSONObjBuilder bob;
- Status status = getApplicationMetadata(opCtx, uri, &bob);
- if (!status.isOK()) {
- return StatusWith<BSONObj>(status);
- }
- return StatusWith<BSONObj>(bob.obj());
+ if (ret != WT_NOTFOUND) {
+ return wtRCToStatus(ret);
}
- Status WiredTigerUtil::checkApplicationMetadataFormatVersion(OperationContext* opCtx,
- StringData uri,
- int64_t minimumVersion,
- int64_t maximumVersion) {
-
- StatusWith<std::string> result = getMetadata(opCtx, uri);
- if (result.getStatus().code() == ErrorCodes::NoSuchKey) {
- return result.getStatus();
- }
- invariantOK(result.getStatus());
-
- WiredTigerConfigParser topParser(result.getValue());
- WT_CONFIG_ITEM metadata;
- if (topParser.get("app_metadata", &metadata) != 0)
- return Status(ErrorCodes::UnsupportedFormat, str::stream()
- << "application metadata for " << uri
- << " is missing ");
-
- WiredTigerConfigParser parser(metadata);
-
- int64_t version = 0;
- WT_CONFIG_ITEM versionItem;
- if (parser.get("formatVersion", &versionItem) != 0) {
- // If 'formatVersion' is missing, this metadata was introduced by
- // one of the RC versions (where the format version is 1).
- version = 1;
- }
- else if (versionItem.type == WT_CONFIG_ITEM::WT_CONFIG_ITEM_NUM) {
- version = versionItem.val;
- }
- else {
- return Status(ErrorCodes::UnsupportedFormat, str::stream()
- << "'formatVersion' in application metadata for " << uri
- << " must be a number. Current value: "
- << StringData(versionItem.str, versionItem.len));
- }
+ return Status::OK();
+}
- if (version < minimumVersion || version > maximumVersion) {
- return Status(ErrorCodes::UnsupportedFormat, str::stream()
- << "Application metadata for " << uri
- << " has unsupported format version " << version);
- }
+StatusWith<BSONObj> WiredTigerUtil::getApplicationMetadata(OperationContext* opCtx,
+ StringData uri) {
+ BSONObjBuilder bob;
+ Status status = getApplicationMetadata(opCtx, uri, &bob);
+ if (!status.isOK()) {
+ return StatusWith<BSONObj>(status);
+ }
+ return StatusWith<BSONObj>(bob.obj());
+}
- LOG(2) << "WiredTigerUtil::checkApplicationMetadataFormatVersion "
- << " uri: " << uri
- << " ok range " << minimumVersion << " -> " << maximumVersion
- << " current: " << version;
+Status WiredTigerUtil::checkApplicationMetadataFormatVersion(OperationContext* opCtx,
+ StringData uri,
+ int64_t minimumVersion,
+ int64_t maximumVersion) {
+ StatusWith<std::string> result = getMetadata(opCtx, uri);
+ if (result.getStatus().code() == ErrorCodes::NoSuchKey) {
+ return result.getStatus();
+ }
+ invariantOK(result.getStatus());
+
+ WiredTigerConfigParser topParser(result.getValue());
+ WT_CONFIG_ITEM metadata;
+ if (topParser.get("app_metadata", &metadata) != 0)
+ return Status(ErrorCodes::UnsupportedFormat,
+ str::stream() << "application metadata for " << uri << " is missing ");
+
+ WiredTigerConfigParser parser(metadata);
+
+ int64_t version = 0;
+ WT_CONFIG_ITEM versionItem;
+ if (parser.get("formatVersion", &versionItem) != 0) {
+ // If 'formatVersion' is missing, this metadata was introduced by
+ // one of the RC versions (where the format version is 1).
+ version = 1;
+ } else if (versionItem.type == WT_CONFIG_ITEM::WT_CONFIG_ITEM_NUM) {
+ version = versionItem.val;
+ } else {
+ return Status(ErrorCodes::UnsupportedFormat,
+ str::stream() << "'formatVersion' in application metadata for " << uri
+ << " must be a number. Current value: "
+ << StringData(versionItem.str, versionItem.len));
+ }
- return Status::OK();
+ if (version < minimumVersion || version > maximumVersion) {
+ return Status(ErrorCodes::UnsupportedFormat,
+ str::stream() << "Application metadata for " << uri
+ << " has unsupported format version " << version);
}
- // static
- StatusWith<uint64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
- const std::string& uri,
- const std::string& config,
- int statisticsKey) {
- invariant(session);
- WT_CURSOR* cursor = NULL;
- const char* cursorConfig = config.empty() ? NULL : config.c_str();
- int ret = session->open_cursor(session, uri.c_str(), NULL, cursorConfig, &cursor);
- if (ret != 0) {
- return StatusWith<uint64_t>(ErrorCodes::CursorNotFound, str::stream()
- << "unable to open cursor at URI " << uri
- << ". reason: " << wiredtiger_strerror(ret));
- }
- invariant(cursor);
- ON_BLOCK_EXIT(cursor->close, cursor);
-
- cursor->set_key(cursor, statisticsKey);
- ret = cursor->search(cursor);
- if (ret != 0) {
- return StatusWith<uint64_t>(ErrorCodes::NoSuchKey, str::stream()
- << "unable to find key " << statisticsKey << " at URI " << uri
- << ". reason: " << wiredtiger_strerror(ret));
- }
+ LOG(2) << "WiredTigerUtil::checkApplicationMetadataFormatVersion "
+ << " uri: " << uri << " ok range " << minimumVersion << " -> " << maximumVersion
+ << " current: " << version;
- uint64_t value;
- ret = cursor->get_value(cursor, NULL, NULL, &value);
- if (ret != 0) {
- return StatusWith<uint64_t>(ErrorCodes::BadValue, str::stream()
- << "unable to get value for key " << statisticsKey << " at URI " << uri
- << ". reason: " << wiredtiger_strerror(ret));
- }
+ return Status::OK();
+}
- return StatusWith<uint64_t>(value);
+// static
+StatusWith<uint64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
+ const std::string& uri,
+ const std::string& config,
+ int statisticsKey) {
+ invariant(session);
+ WT_CURSOR* cursor = NULL;
+ const char* cursorConfig = config.empty() ? NULL : config.c_str();
+ int ret = session->open_cursor(session, uri.c_str(), NULL, cursorConfig, &cursor);
+ if (ret != 0) {
+ return StatusWith<uint64_t>(ErrorCodes::CursorNotFound,
+ str::stream() << "unable to open cursor at URI " << uri
+ << ". reason: " << wiredtiger_strerror(ret));
+ }
+ invariant(cursor);
+ ON_BLOCK_EXIT(cursor->close, cursor);
+
+ cursor->set_key(cursor, statisticsKey);
+ ret = cursor->search(cursor);
+ if (ret != 0) {
+ return StatusWith<uint64_t>(ErrorCodes::NoSuchKey,
+ str::stream() << "unable to find key " << statisticsKey
+ << " at URI " << uri
+ << ". reason: " << wiredtiger_strerror(ret));
}
- int64_t WiredTigerUtil::getIdentSize(WT_SESSION* s,
- const std::string& uri ) {
- StatusWith<int64_t> result = WiredTigerUtil::getStatisticsValueAs<int64_t>(
- s,
- "statistics:" + uri, "statistics=(size)", WT_STAT_DSRC_BLOCK_SIZE);
- const Status& status = result.getStatus();
- if ( !status.isOK() ) {
- if ( status.code() == ErrorCodes::CursorNotFound ) {
- // ident gone, so its 0
- return 0;
- }
- uassertStatusOK( status );
- }
- return result.getValue();
+ uint64_t value;
+ ret = cursor->get_value(cursor, NULL, NULL, &value);
+ if (ret != 0) {
+ return StatusWith<uint64_t>(ErrorCodes::BadValue,
+ str::stream() << "unable to get value for key " << statisticsKey
+ << " at URI " << uri
+ << ". reason: " << wiredtiger_strerror(ret));
}
-namespace {
- int mdb_handle_error(WT_EVENT_HANDLER *handler, WT_SESSION *session,
- int errorCode, const char *message) {
- try {
- error() << "WiredTiger (" << errorCode << ") " << message;
- fassert( 28558, errorCode != WT_PANIC );
- }
- catch (...) {
- std::terminate();
+ return StatusWith<uint64_t>(value);
+}
+
+int64_t WiredTigerUtil::getIdentSize(WT_SESSION* s, const std::string& uri) {
+ StatusWith<int64_t> result = WiredTigerUtil::getStatisticsValueAs<int64_t>(
+ s, "statistics:" + uri, "statistics=(size)", WT_STAT_DSRC_BLOCK_SIZE);
+ const Status& status = result.getStatus();
+ if (!status.isOK()) {
+ if (status.code() == ErrorCodes::CursorNotFound) {
+ // ident gone, so its 0
+ return 0;
}
- return 0;
+ uassertStatusOK(status);
}
+ return result.getValue();
+}
- int mdb_handle_message( WT_EVENT_HANDLER *handler, WT_SESSION *session,
- const char *message) {
- try {
- log() << "WiredTiger " << message;
- }
- catch (...) {
- std::terminate();
- }
- return 0;
+namespace {
+int mdb_handle_error(WT_EVENT_HANDLER* handler,
+ WT_SESSION* session,
+ int errorCode,
+ const char* message) {
+ try {
+ error() << "WiredTiger (" << errorCode << ") " << message;
+ fassert(28558, errorCode != WT_PANIC);
+ } catch (...) {
+ std::terminate();
}
+ return 0;
+}
- int mdb_handle_progress( WT_EVENT_HANDLER *handler, WT_SESSION *session,
- const char *operation, uint64_t progress) {
- try {
- log() << "WiredTiger progress " << operation << " " << progress;
- }
- catch (...) {
- std::terminate();
- }
+int mdb_handle_message(WT_EVENT_HANDLER* handler, WT_SESSION* session, const char* message) {
+ try {
+ log() << "WiredTiger " << message;
+ } catch (...) {
+ std::terminate();
+ }
+ return 0;
+}
- return 0;
+int mdb_handle_progress(WT_EVENT_HANDLER* handler,
+ WT_SESSION* session,
+ const char* operation,
+ uint64_t progress) {
+ try {
+ log() << "WiredTiger progress " << operation << " " << progress;
+ } catch (...) {
+ std::terminate();
}
+ return 0;
+}
}
- WT_EVENT_HANDLER WiredTigerUtil::defaultEventHandlers() {
- WT_EVENT_HANDLER handlers = {};
- handlers.handle_error = mdb_handle_error;
- handlers.handle_message = mdb_handle_message;
- handlers.handle_progress = mdb_handle_progress;
- return handlers;
- }
+WT_EVENT_HANDLER WiredTigerUtil::defaultEventHandlers() {
+ WT_EVENT_HANDLER handlers = {};
+ handlers.handle_error = mdb_handle_error;
+ handlers.handle_message = mdb_handle_message;
+ handlers.handle_progress = mdb_handle_progress;
+ return handlers;
+}
- int WiredTigerUtil::verifyTable(OperationContext* txn, const std::string& uri,
- std::vector<std::string>* errors) {
-
- class MyEventHandlers : public WT_EVENT_HANDLER {
- public:
- MyEventHandlers(std::vector<std::string>* errors)
- : WT_EVENT_HANDLER(defaultEventHandlers())
- , _errors(errors)
- , _defaultErrorHandler(handle_error)
- {
- handle_error = onError;
- }
+int WiredTigerUtil::verifyTable(OperationContext* txn,
+ const std::string& uri,
+ std::vector<std::string>* errors) {
+ class MyEventHandlers : public WT_EVENT_HANDLER {
+ public:
+ MyEventHandlers(std::vector<std::string>* errors)
+ : WT_EVENT_HANDLER(defaultEventHandlers()),
+ _errors(errors),
+ _defaultErrorHandler(handle_error) {
+ handle_error = onError;
+ }
- private:
- static int onError(WT_EVENT_HANDLER* handler, WT_SESSION* session, int error,
- const char* message) {
- try {
- MyEventHandlers* self = static_cast<MyEventHandlers*>(handler);
- self->_errors->push_back(message);
- return self->_defaultErrorHandler(handler, session, error, message);
- }
- catch (...) {
- std::terminate();
- }
+ private:
+ static int onError(WT_EVENT_HANDLER* handler,
+ WT_SESSION* session,
+ int error,
+ const char* message) {
+ try {
+ MyEventHandlers* self = static_cast<MyEventHandlers*>(handler);
+ self->_errors->push_back(message);
+ return self->_defaultErrorHandler(handler, session, error, message);
+ } catch (...) {
+ std::terminate();
}
+ }
- typedef int(*ErrorHandler)(WT_EVENT_HANDLER*, WT_SESSION*, int, const char*);
+ typedef int (*ErrorHandler)(WT_EVENT_HANDLER*, WT_SESSION*, int, const char*);
- std::vector<std::string>* const _errors;
- const ErrorHandler _defaultErrorHandler;
- } eventHandler(errors);
+ std::vector<std::string>* const _errors;
+ const ErrorHandler _defaultErrorHandler;
+ } eventHandler(errors);
- // Try to close as much as possible to avoid EBUSY errors.
- WiredTigerRecoveryUnit::get(txn)->getSession(txn)->closeAllCursors();
- WiredTigerSessionCache* sessionCache = WiredTigerRecoveryUnit::get(txn)->getSessionCache();
- sessionCache->closeAll();
+ // Try to close as much as possible to avoid EBUSY errors.
+ WiredTigerRecoveryUnit::get(txn)->getSession(txn)->closeAllCursors();
+ WiredTigerSessionCache* sessionCache = WiredTigerRecoveryUnit::get(txn)->getSessionCache();
+ sessionCache->closeAll();
- // Open a new session with custom error handlers.
- WT_CONNECTION* conn = WiredTigerRecoveryUnit::get(txn)->getSessionCache()->conn();
- WT_SESSION* session;
- invariantWTOK(conn->open_session(conn, errors ? &eventHandler : NULL, NULL, &session));
- ON_BLOCK_EXIT(session->close, session, "");
+ // Open a new session with custom error handlers.
+ WT_CONNECTION* conn = WiredTigerRecoveryUnit::get(txn)->getSessionCache()->conn();
+ WT_SESSION* session;
+ invariantWTOK(conn->open_session(conn, errors ? &eventHandler : NULL, NULL, &session));
+ ON_BLOCK_EXIT(session->close, session, "");
- // Do the verify. Weird parens prevent treating "verify" as a macro.
- return (session->verify)(session, uri.c_str(), NULL);
- }
+ // Do the verify. Weird parens prevent treating "verify" as a macro.
+ return (session->verify)(session, uri.c_str(), NULL);
+}
- Status WiredTigerUtil::exportTableToBSON(WT_SESSION* session,
- const std::string& uri, const std::string& config,
- BSONObjBuilder* bob) {
- invariant(session);
- invariant(bob);
- WT_CURSOR* c = NULL;
- const char* cursorConfig = config.empty() ? NULL : config.c_str();
- int ret = session->open_cursor(session, uri.c_str(), NULL, cursorConfig, &c);
- if (ret != 0) {
- return Status(ErrorCodes::CursorNotFound, str::stream()
- << "unable to open cursor at URI " << uri
- << ". reason: " << wiredtiger_strerror(ret));
+Status WiredTigerUtil::exportTableToBSON(WT_SESSION* session,
+ const std::string& uri,
+ const std::string& config,
+ BSONObjBuilder* bob) {
+ invariant(session);
+ invariant(bob);
+ WT_CURSOR* c = NULL;
+ const char* cursorConfig = config.empty() ? NULL : config.c_str();
+ int ret = session->open_cursor(session, uri.c_str(), NULL, cursorConfig, &c);
+ if (ret != 0) {
+ return Status(ErrorCodes::CursorNotFound,
+ str::stream() << "unable to open cursor at URI " << uri
+ << ". reason: " << wiredtiger_strerror(ret));
+ }
+ bob->append("uri", uri);
+ invariant(c);
+ ON_BLOCK_EXIT(c->close, c);
+
+ std::map<string, BSONObjBuilder*> subs;
+ const char* desc;
+ uint64_t value;
+ while (c->next(c) == 0 && c->get_value(c, &desc, NULL, &value) == 0) {
+ StringData key(desc);
+
+ StringData prefix;
+ StringData suffix;
+
+ size_t idx = key.find(':');
+ if (idx != string::npos) {
+ prefix = key.substr(0, idx);
+ suffix = key.substr(idx + 1);
+ } else {
+ idx = key.find(' ');
}
- bob->append("uri", uri);
- invariant(c);
- ON_BLOCK_EXIT(c->close, c);
-
- std::map<string,BSONObjBuilder*> subs;
- const char* desc;
- uint64_t value;
- while (c->next(c) == 0 && c->get_value(c, &desc, NULL, &value) == 0) {
- StringData key( desc );
-
- StringData prefix;
- StringData suffix;
-
- size_t idx = key.find( ':' );
- if ( idx != string::npos ) {
- prefix = key.substr( 0, idx );
- suffix = key.substr( idx + 1 );
- }
- else {
- idx = key.find( ' ' );
- }
- if ( idx != string::npos ) {
- prefix = key.substr( 0, idx );
- suffix = key.substr( idx + 1 );
- }
- else {
- prefix = key;
- suffix = "num";
- }
+ if (idx != string::npos) {
+ prefix = key.substr(0, idx);
+ suffix = key.substr(idx + 1);
+ } else {
+ prefix = key;
+ suffix = "num";
+ }
- long long v = _castStatisticsValue<long long>(value);
+ long long v = _castStatisticsValue<long long>(value);
- if ( prefix.size() == 0 ) {
- bob->appendNumber(desc, v);
- }
- else {
- BSONObjBuilder*& sub = subs[prefix.toString()];
- if ( !sub )
- sub = new BSONObjBuilder();
- sub->appendNumber(mongoutils::str::ltrim(suffix.toString()), v);
- }
+ if (prefix.size() == 0) {
+ bob->appendNumber(desc, v);
+ } else {
+ BSONObjBuilder*& sub = subs[prefix.toString()];
+ if (!sub)
+ sub = new BSONObjBuilder();
+ sub->appendNumber(mongoutils::str::ltrim(suffix.toString()), v);
}
+ }
- for ( std::map<string,BSONObjBuilder*>::const_iterator it = subs.begin();
- it != subs.end(); ++it ) {
- const std::string& s = it->first;
- bob->append( s, it->second->obj() );
- delete it->second;
- }
- return Status::OK();
+ for (std::map<string, BSONObjBuilder*>::const_iterator it = subs.begin(); it != subs.end();
+ ++it) {
+ const std::string& s = it->first;
+ bob->append(s, it->second->obj());
+ delete it->second;
}
+ return Status::OK();
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.h b/src/mongo/db/storage/wiredtiger/wiredtiger_util.h
index 72ad5e88c15..1d69a382eec 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.h
@@ -41,233 +41,239 @@
namespace mongo {
- class BSONObjBuilder;
- class OperationContext;
- class WiredTigerConfigParser;
+class BSONObjBuilder;
+class OperationContext;
+class WiredTigerConfigParser;
- inline bool wt_keeptxnopen() {
- return false;
+inline bool wt_keeptxnopen() {
+ return false;
+}
+
+Status wtRCToStatus_slow(int retCode, const char* prefix);
+
+/**
+ * converts wiredtiger return codes to mongodb statuses.
+ */
+inline Status wtRCToStatus(int retCode, const char* prefix = NULL) {
+ if (MONGO_likely(retCode == 0))
+ return Status::OK();
+
+ return wtRCToStatus_slow(retCode, prefix);
+}
+
+#define invariantWTOK(expression) \
+ do { \
+ int _invariantWTOK_retCode = expression; \
+ if (MONGO_unlikely(_invariantWTOK_retCode != 0)) { \
+ invariantOKFailed( \
+ #expression, wtRCToStatus(_invariantWTOK_retCode), __FILE__, __LINE__); \
+ } \
+ } while (false)
+
+struct WiredTigerItem : public WT_ITEM {
+ WiredTigerItem(const void* d, size_t s) {
+ data = d;
+ size = s;
+ }
+ WiredTigerItem(const std::string& str) {
+ data = str.c_str();
+ size = str.size();
}
+ // NOTE: do not call Get() on a temporary.
+ // The pointer returned by Get() must not be allowed to live longer than *this.
+ WT_ITEM* Get() {
+ return this;
+ }
+ const WT_ITEM* Get() const {
+ return this;
+ }
+};
+
+class WiredTigerUtil {
+ MONGO_DISALLOW_COPYING(WiredTigerUtil);
- Status wtRCToStatus_slow(int retCode, const char* prefix );
+private:
+ WiredTigerUtil();
+public:
/**
- * converts wiredtiger return codes to mongodb statuses.
+ * Fetch the type and source fields out of the colgroup metadata. 'tableUri' must be a
+ * valid table: uri.
*/
- inline Status wtRCToStatus(int retCode, const char* prefix = NULL ) {
- if (MONGO_likely(retCode == 0))
- return Status::OK();
+ static void fetchTypeAndSourceURI(OperationContext* opCtx,
+ const std::string& tableUri,
+ std::string* type,
+ std::string* source);
- return wtRCToStatus_slow(retCode, prefix);
- }
+ /**
+ * Reads contents of table using URI and exports all keys to BSON as string elements.
+ * Additional, adds 'uri' field to output document.
+ */
+ static Status exportTableToBSON(WT_SESSION* s,
+ const std::string& uri,
+ const std::string& config,
+ BSONObjBuilder* bob);
-#define invariantWTOK(expression) do { \
- int _invariantWTOK_retCode = expression; \
- if (MONGO_unlikely(_invariantWTOK_retCode != 0)) { \
- invariantOKFailed(#expression, wtRCToStatus(_invariantWTOK_retCode), \
- __FILE__, __LINE__); \
- } \
- } while (false)
+ /**
+ * Gets entire metadata string for collection/index at URI.
+ */
+ static StatusWith<std::string> getMetadata(OperationContext* opCtx, StringData uri);
+
+ /**
+ * Reads app_metadata for collection/index at URI as a BSON document.
+ */
+ static Status getApplicationMetadata(OperationContext* opCtx,
+ StringData uri,
+ BSONObjBuilder* bob);
+
+ static StatusWith<BSONObj> getApplicationMetadata(OperationContext* opCtx, StringData uri);
+
+ /**
+ * Validates formatVersion in application metadata for 'uri'.
+ * Version must be numeric and be in the range [minimumVersion, maximumVersion].
+ * URI is used in error messages only.
+ */
+ static Status checkApplicationMetadataFormatVersion(OperationContext* opCtx,
+ StringData uri,
+ int64_t minimumVersion,
+ int64_t maximumVersion);
+ /**
+ * Reads individual statistics using URI.
+ * List of statistics keys WT_STAT_* can be found in wiredtiger.h.
+ */
+ static StatusWith<uint64_t> getStatisticsValue(WT_SESSION* session,
+ const std::string& uri,
+ const std::string& config,
+ int statisticsKey);
- struct WiredTigerItem : public WT_ITEM {
- WiredTigerItem(const void *d, size_t s) {
- data = d;
- size = s;
- }
- WiredTigerItem(const std::string &str) {
- data = str.c_str();
- size = str.size();
- }
- // NOTE: do not call Get() on a temporary.
- // The pointer returned by Get() must not be allowed to live longer than *this.
- WT_ITEM *Get() { return this; }
- const WT_ITEM *Get() const { return this; }
- };
-
- class WiredTigerUtil {
- MONGO_DISALLOW_COPYING(WiredTigerUtil);
- private:
- WiredTigerUtil();
-
- public:
-
- /**
- * Fetch the type and source fields out of the colgroup metadata. 'tableUri' must be a
- * valid table: uri.
- */
- static void fetchTypeAndSourceURI(OperationContext* opCtx,
- const std::string& tableUri,
- std::string* type,
- std::string* source);
-
- /**
- * Reads contents of table using URI and exports all keys to BSON as string elements.
- * Additional, adds 'uri' field to output document.
- */
- static Status exportTableToBSON(WT_SESSION* s,
- const std::string& uri,
- const std::string& config,
- BSONObjBuilder* bob);
-
- /**
- * Gets entire metadata string for collection/index at URI.
- */
- static StatusWith<std::string> getMetadata(OperationContext* opCtx,
- StringData uri);
-
- /**
- * Reads app_metadata for collection/index at URI as a BSON document.
- */
- static Status getApplicationMetadata(OperationContext* opCtx,
- StringData uri,
- BSONObjBuilder* bob);
-
- static StatusWith<BSONObj> getApplicationMetadata(OperationContext* opCtx,
- StringData uri);
-
- /**
- * Validates formatVersion in application metadata for 'uri'.
- * Version must be numeric and be in the range [minimumVersion, maximumVersion].
- * URI is used in error messages only.
- */
- static Status checkApplicationMetadataFormatVersion(OperationContext* opCtx,
- StringData uri,
- int64_t minimumVersion,
- int64_t maximumVersion);
- /**
- * Reads individual statistics using URI.
- * List of statistics keys WT_STAT_* can be found in wiredtiger.h.
- */
- static StatusWith<uint64_t> getStatisticsValue(WT_SESSION* session,
+ /**
+ * Reads individual statistics using URI and casts to type ResultType.
+ * Caps statistics value at max(ResultType) in case of overflow.
+ */
+ template <typename ResultType>
+ static StatusWith<ResultType> getStatisticsValueAs(WT_SESSION* session,
const std::string& uri,
const std::string& config,
int statisticsKey);
- /**
- * Reads individual statistics using URI and casts to type ResultType.
- * Caps statistics value at max(ResultType) in case of overflow.
- */
- template<typename ResultType>
- static StatusWith<ResultType> getStatisticsValueAs(WT_SESSION* session,
- const std::string& uri,
- const std::string& config,
- int statisticsKey);
-
- /**
- * Reads individual statistics using URI and casts to type ResultType.
- * Caps statistics value at 'maximumResultType'.
- */
- template<typename ResultType>
- static StatusWith<ResultType> getStatisticsValueAs(WT_SESSION* session,
- const std::string& uri,
- const std::string& config,
- int statisticsKey,
- ResultType maximumResultType);
-
- static int64_t getIdentSize(WT_SESSION* s, const std::string& uri );
-
- /**
- * Returns a WT_EVENT_HANDER with MongoDB's default handlers.
- * The default handlers just log so it is recommended that you consider calling them even if
- * you are capturing the output.
- *
- * There is no default "close" handler. You only need to provide one if you need to call a
- * destructor.
- */
- static WT_EVENT_HANDLER defaultEventHandlers();
-
- /**
- * Calls WT_SESSION::validate() on a side-session to ensure that your current transaction
- * isn't left in an invalid state.
- *
- * If errors is non-NULL, all error messages will be appended to the array.
- */
- static int verifyTable(OperationContext* txn, const std::string& uri,
- std::vector<std::string>* errors = NULL);
-
- private:
- /**
- * Casts unsigned 64-bit statistics value to T.
- * If original value exceeds maximum value of T, return max(T).
- */
- template<typename T>
- static T _castStatisticsValue(uint64_t statisticsValue);
-
- /**
- * Casts unsigned 64-bit statistics value to T.
- * If original value exceeds 'maximumResultType', return 'maximumResultType'.
- */
- template<typename T>
- static T _castStatisticsValue(uint64_t statisticsValue, T maximumResultType);
- };
-
- class WiredTigerConfigParser {
- MONGO_DISALLOW_COPYING(WiredTigerConfigParser);
- public:
- WiredTigerConfigParser(StringData config) {
- invariantWTOK(wiredtiger_config_parser_open(NULL, config.rawData(), config.size(),
- &_parser));
- }
-
- WiredTigerConfigParser(const WT_CONFIG_ITEM& nested) {
- invariant(nested.type == WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRUCT);
- invariantWTOK(wiredtiger_config_parser_open(NULL, nested.str, nested.len, &_parser));
- }
-
- ~WiredTigerConfigParser() {
- invariantWTOK(_parser->close(_parser));
- }
-
- int next(WT_CONFIG_ITEM* key, WT_CONFIG_ITEM* value) {
- return _parser->next(_parser, key, value);
- }
-
- int get(const char* key, WT_CONFIG_ITEM* value) {
- return _parser->get(_parser, key, value);
- }
-
- private:
- WT_CONFIG_PARSER* _parser;
- };
-
- // static
- template<typename ResultType>
- StatusWith<ResultType> WiredTigerUtil::getStatisticsValueAs(WT_SESSION* session,
- const std::string& uri,
- const std::string& config,
- int statisticsKey) {
- return getStatisticsValueAs<ResultType>(session, uri, config, statisticsKey,
- std::numeric_limits<ResultType>::max());
+ /**
+ * Reads individual statistics using URI and casts to type ResultType.
+ * Caps statistics value at 'maximumResultType'.
+ */
+ template <typename ResultType>
+ static StatusWith<ResultType> getStatisticsValueAs(WT_SESSION* session,
+ const std::string& uri,
+ const std::string& config,
+ int statisticsKey,
+ ResultType maximumResultType);
+
+ static int64_t getIdentSize(WT_SESSION* s, const std::string& uri);
+
+ /**
+ * Returns a WT_EVENT_HANDER with MongoDB's default handlers.
+ * The default handlers just log so it is recommended that you consider calling them even if
+ * you are capturing the output.
+ *
+ * There is no default "close" handler. You only need to provide one if you need to call a
+ * destructor.
+ */
+ static WT_EVENT_HANDLER defaultEventHandlers();
+
+ /**
+ * Calls WT_SESSION::validate() on a side-session to ensure that your current transaction
+ * isn't left in an invalid state.
+ *
+ * If errors is non-NULL, all error messages will be appended to the array.
+ */
+ static int verifyTable(OperationContext* txn,
+ const std::string& uri,
+ std::vector<std::string>* errors = NULL);
+
+private:
+ /**
+ * Casts unsigned 64-bit statistics value to T.
+ * If original value exceeds maximum value of T, return max(T).
+ */
+ template <typename T>
+ static T _castStatisticsValue(uint64_t statisticsValue);
+
+ /**
+ * Casts unsigned 64-bit statistics value to T.
+ * If original value exceeds 'maximumResultType', return 'maximumResultType'.
+ */
+ template <typename T>
+ static T _castStatisticsValue(uint64_t statisticsValue, T maximumResultType);
+};
+
+class WiredTigerConfigParser {
+ MONGO_DISALLOW_COPYING(WiredTigerConfigParser);
+
+public:
+ WiredTigerConfigParser(StringData config) {
+ invariantWTOK(
+ wiredtiger_config_parser_open(NULL, config.rawData(), config.size(), &_parser));
+ }
+
+ WiredTigerConfigParser(const WT_CONFIG_ITEM& nested) {
+ invariant(nested.type == WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRUCT);
+ invariantWTOK(wiredtiger_config_parser_open(NULL, nested.str, nested.len, &_parser));
+ }
+
+ ~WiredTigerConfigParser() {
+ invariantWTOK(_parser->close(_parser));
}
- // static
- template<typename ResultType>
- StatusWith<ResultType> WiredTigerUtil::getStatisticsValueAs(WT_SESSION* session,
- const std::string& uri,
- const std::string& config,
- int statisticsKey,
- ResultType maximumResultType) {
- StatusWith<uint64_t> result = getStatisticsValue(session, uri, config, statisticsKey);
- if (!result.isOK()) {
- return StatusWith<ResultType>(result.getStatus());
- }
- return StatusWith<ResultType>(_castStatisticsValue<ResultType>(result.getValue(),
- maximumResultType));
+ int next(WT_CONFIG_ITEM* key, WT_CONFIG_ITEM* value) {
+ return _parser->next(_parser, key, value);
}
- // static
- template<typename ResultType>
- ResultType WiredTigerUtil::_castStatisticsValue(uint64_t statisticsValue) {
- return _castStatisticsValue<ResultType>(statisticsValue,
- std::numeric_limits<ResultType>::max());
+ int get(const char* key, WT_CONFIG_ITEM* value) {
+ return _parser->get(_parser, key, value);
}
- // static
- template<typename ResultType>
- ResultType WiredTigerUtil::_castStatisticsValue(uint64_t statisticsValue,
- ResultType maximumResultType) {
- return statisticsValue > static_cast<uint64_t>(maximumResultType) ?
- maximumResultType : static_cast<ResultType>(statisticsValue);
+private:
+ WT_CONFIG_PARSER* _parser;
+};
+
+// static
+template <typename ResultType>
+StatusWith<ResultType> WiredTigerUtil::getStatisticsValueAs(WT_SESSION* session,
+ const std::string& uri,
+ const std::string& config,
+ int statisticsKey) {
+ return getStatisticsValueAs<ResultType>(
+ session, uri, config, statisticsKey, std::numeric_limits<ResultType>::max());
+}
+
+// static
+template <typename ResultType>
+StatusWith<ResultType> WiredTigerUtil::getStatisticsValueAs(WT_SESSION* session,
+ const std::string& uri,
+ const std::string& config,
+ int statisticsKey,
+ ResultType maximumResultType) {
+ StatusWith<uint64_t> result = getStatisticsValue(session, uri, config, statisticsKey);
+ if (!result.isOK()) {
+ return StatusWith<ResultType>(result.getStatus());
}
+ return StatusWith<ResultType>(
+ _castStatisticsValue<ResultType>(result.getValue(), maximumResultType));
+}
+
+// static
+template <typename ResultType>
+ResultType WiredTigerUtil::_castStatisticsValue(uint64_t statisticsValue) {
+ return _castStatisticsValue<ResultType>(statisticsValue,
+ std::numeric_limits<ResultType>::max());
+}
+
+// static
+template <typename ResultType>
+ResultType WiredTigerUtil::_castStatisticsValue(uint64_t statisticsValue,
+ ResultType maximumResultType) {
+ return statisticsValue > static_cast<uint64_t>(maximumResultType)
+ ? maximumResultType
+ : static_cast<ResultType>(statisticsValue);
+}
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp
index d699c3903ba..b9df4acc22e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp
@@ -43,292 +43,297 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- class WiredTigerConnection {
- public:
- WiredTigerConnection(StringData dbpath, StringData extraStrings) : _conn(NULL) {
- std::stringstream ss;
- ss << "create,";
- ss << extraStrings;
- string config = ss.str();
- int ret = wiredtiger_open(dbpath.toString().c_str(), NULL, config.c_str(), &_conn);
- ASSERT_OK(wtRCToStatus(ret));
- ASSERT(_conn);
- }
- ~WiredTigerConnection() {
- _conn->close(_conn, NULL);
- }
- WT_CONNECTION* getConnection() const { return _conn; }
- private:
- WT_CONNECTION* _conn;
- };
-
- class WiredTigerUtilHarnessHelper {
- public:
- WiredTigerUtilHarnessHelper(StringData extraStrings)
- : _dbpath("wt_test"),
- _connection(_dbpath.path(), extraStrings),
- _sessionCache(_connection.getConnection()) { }
-
-
- WiredTigerSessionCache* getSessionCache() {
- return &_sessionCache;
- }
-
- OperationContext* newOperationContext() {
- return new OperationContextNoop(new WiredTigerRecoveryUnit(getSessionCache()));
- }
-
- private:
- unittest::TempDir _dbpath;
- WiredTigerConnection _connection;
- WiredTigerSessionCache _sessionCache;
- };
-
- class WiredTigerUtilMetadataTest : public mongo::unittest::Test {
- public:
- virtual void setUp() {
- _harnessHelper.reset(new WiredTigerUtilHarnessHelper(""));
- _opCtx.reset(_harnessHelper->newOperationContext());
- }
-
- virtual void tearDown() {
- _opCtx.reset(NULL);
- _harnessHelper.reset(NULL);
- }
-
- protected:
- const char* getURI() const {
- return "table:mytable";
- }
-
- OperationContext* getOperationContext() const {
- ASSERT(_opCtx.get());
- return _opCtx.get();
- }
-
- void createSession(const char* config) {
- WT_SESSION* wtSession =
- WiredTigerRecoveryUnit::get(_opCtx.get())->getSession(_opCtx.get())->getSession();
- ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, getURI(), config)));
- }
- private:
- std::unique_ptr<WiredTigerUtilHarnessHelper> _harnessHelper;
- std::unique_ptr<OperationContext> _opCtx;
- };
-
- TEST_F(WiredTigerUtilMetadataTest, GetConfigurationStringInvalidURI) {
- StatusWith<std::string> result =
- WiredTigerUtil::getMetadata(getOperationContext(), getURI());
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
+using std::string;
+using std::stringstream;
+
+class WiredTigerConnection {
+public:
+ WiredTigerConnection(StringData dbpath, StringData extraStrings) : _conn(NULL) {
+ std::stringstream ss;
+ ss << "create,";
+ ss << extraStrings;
+ string config = ss.str();
+ int ret = wiredtiger_open(dbpath.toString().c_str(), NULL, config.c_str(), &_conn);
+ ASSERT_OK(wtRCToStatus(ret));
+ ASSERT(_conn);
}
-
- TEST_F(WiredTigerUtilMetadataTest, GetConfigurationStringNull) {
- const char* config = NULL;
- createSession(config);
- StatusWith<std::string> result =
- WiredTigerUtil::getMetadata(getOperationContext(), getURI());
- ASSERT_OK(result.getStatus());
- ASSERT_FALSE(result.getValue().empty());
- }
-
- TEST_F(WiredTigerUtilMetadataTest, GetConfigurationStringSimple) {
- const char* config = "app_metadata=(abc=123)";
- createSession(config);
- StatusWith<std::string> result =
- WiredTigerUtil::getMetadata(getOperationContext(), getURI());
- ASSERT_OK(result.getStatus());
- ASSERT_STRING_CONTAINS(result.getValue(), config);
- }
-
- TEST_F(WiredTigerUtilMetadataTest, GetApplicationMetadataInvalidURI) {
- StatusWith<BSONObj> result =
- WiredTigerUtil::getApplicationMetadata(getOperationContext(), getURI());
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
+ ~WiredTigerConnection() {
+ _conn->close(_conn, NULL);
}
-
- TEST_F(WiredTigerUtilMetadataTest, GetApplicationMetadataNull) {
- const char* config = NULL;
- createSession(config);
- StatusWith<BSONObj> result =
- WiredTigerUtil::getApplicationMetadata(getOperationContext(), getURI());
- ASSERT_OK(result.getStatus());
- ASSERT_TRUE(result.getValue().isEmpty());
+ WT_CONNECTION* getConnection() const {
+ return _conn;
}
- TEST_F(WiredTigerUtilMetadataTest, GetApplicationMetadataString) {
- const char* config = "app_metadata=\"abc\"";
- createSession(config);
- StatusWith<BSONObj> result =
- WiredTigerUtil::getApplicationMetadata(getOperationContext(), getURI());
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
- }
+private:
+ WT_CONNECTION* _conn;
+};
- TEST_F(WiredTigerUtilMetadataTest, GetApplicationMetadataDuplicateKeys) {
- const char* config = "app_metadata=(abc=123,abc=456)";
- createSession(config);
- StatusWith<BSONObj> result =
- WiredTigerUtil::getApplicationMetadata(getOperationContext(), getURI());
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::DuplicateKey, result.getStatus().code());
- }
+class WiredTigerUtilHarnessHelper {
+public:
+ WiredTigerUtilHarnessHelper(StringData extraStrings)
+ : _dbpath("wt_test"),
+ _connection(_dbpath.path(), extraStrings),
+ _sessionCache(_connection.getConnection()) {}
- TEST_F(WiredTigerUtilMetadataTest, GetApplicationMetadataTypes) {
- const char* config = "app_metadata=(stringkey=\"abc\",boolkey1=true,boolkey2=false,"
- "idkey=def,numkey=123,"
- "structkey=(k1=v2,k2=v2))";
- createSession(config);
- StatusWith<BSONObj> result =
- WiredTigerUtil::getApplicationMetadata(getOperationContext(), getURI());
- ASSERT_OK(result.getStatus());
- const BSONObj& obj = result.getValue();
-
- BSONElement stringElement = obj.getField("stringkey");
- ASSERT_EQUALS(mongo::String, stringElement.type());
- ASSERT_EQUALS("abc", stringElement.String());
-
- BSONElement boolElement1 = obj.getField("boolkey1");
- ASSERT_TRUE(boolElement1.isBoolean());
- ASSERT_TRUE(boolElement1.boolean());
-
- BSONElement boolElement2 = obj.getField("boolkey2");
- ASSERT_TRUE(boolElement2.isBoolean());
- ASSERT_FALSE(boolElement2.boolean());
-
- BSONElement identifierElement = obj.getField("idkey");
- ASSERT_EQUALS(mongo::String, identifierElement.type());
- ASSERT_EQUALS("def", identifierElement.String());
-
- BSONElement numberElement = obj.getField("numkey");
- ASSERT_TRUE(numberElement.isNumber());
- ASSERT_EQUALS(123, numberElement.numberInt());
-
- BSONElement structElement = obj.getField("structkey");
- ASSERT_EQUALS(mongo::String, structElement.type());
- ASSERT_EQUALS("(k1=v2,k2=v2)", structElement.String());
- }
- TEST_F(WiredTigerUtilMetadataTest, CheckApplicationMetadataFormatVersionMissingKey) {
- createSession("app_metadata=(abc=123)");
- ASSERT_OK(WiredTigerUtil::checkApplicationMetadataFormatVersion(getOperationContext(),
- getURI(),
- 1,
- 1));
- ASSERT_NOT_OK(WiredTigerUtil::checkApplicationMetadataFormatVersion(getOperationContext(),
- getURI(),
- 2,
- 2));
+ WiredTigerSessionCache* getSessionCache() {
+ return &_sessionCache;
}
- TEST_F(WiredTigerUtilMetadataTest, CheckApplicationMetadataFormatVersionString) {
- createSession("app_metadata=(formatVersion=\"bar\")");
- ASSERT_NOT_OK(WiredTigerUtil::checkApplicationMetadataFormatVersion(getOperationContext(),
- getURI(),
- 1,
- 1));
+ OperationContext* newOperationContext() {
+ return new OperationContextNoop(new WiredTigerRecoveryUnit(getSessionCache()));
}
- TEST_F(WiredTigerUtilMetadataTest, CheckApplicationMetadataFormatVersionNumber) {
- createSession("app_metadata=(formatVersion=2)");
- ASSERT_OK(WiredTigerUtil::checkApplicationMetadataFormatVersion(getOperationContext(),
- getURI(),
- 2,
- 3));
- ASSERT_NOT_OK(WiredTigerUtil::checkApplicationMetadataFormatVersion(getOperationContext(),
- getURI(),
- 1,
- 1));
- ASSERT_NOT_OK(WiredTigerUtil::checkApplicationMetadataFormatVersion(getOperationContext(),
- getURI(),
- 3,
- 3));
+private:
+ unittest::TempDir _dbpath;
+ WiredTigerConnection _connection;
+ WiredTigerSessionCache _sessionCache;
+};
+
+class WiredTigerUtilMetadataTest : public mongo::unittest::Test {
+public:
+ virtual void setUp() {
+ _harnessHelper.reset(new WiredTigerUtilHarnessHelper(""));
+ _opCtx.reset(_harnessHelper->newOperationContext());
}
- TEST(WiredTigerUtilTest, GetStatisticsValueMissingTable) {
- WiredTigerUtilHarnessHelper harnessHelper("statistics=(all)");
- WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache());
- WiredTigerSession* session = recoveryUnit.getSession(NULL);
- StatusWith<uint64_t> result = WiredTigerUtil::getStatisticsValue(session->getSession(),
- "statistics:table:no_such_table", "statistics=(fast)", WT_STAT_DSRC_BLOCK_SIZE);
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::CursorNotFound, result.getStatus().code());
+ virtual void tearDown() {
+ _opCtx.reset(NULL);
+ _harnessHelper.reset(NULL);
}
- TEST(WiredTigerUtilTest, GetStatisticsValueStatisticsDisabled) {
- WiredTigerUtilHarnessHelper harnessHelper("statistics=(none)");
- WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache());
- WiredTigerSession* session = recoveryUnit.getSession(NULL);
- WT_SESSION* wtSession = session->getSession();
- ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", NULL)));
- StatusWith<uint64_t> result = WiredTigerUtil::getStatisticsValue(session->getSession(),
- "statistics:table:mytable", "statistics=(fast)", WT_STAT_DSRC_BLOCK_SIZE);
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::CursorNotFound, result.getStatus().code());
+protected:
+ const char* getURI() const {
+ return "table:mytable";
}
- TEST(WiredTigerUtilTest, GetStatisticsValueInvalidKey) {
- WiredTigerUtilHarnessHelper harnessHelper("statistics=(all)");
- WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache());
- WiredTigerSession* session = recoveryUnit.getSession(NULL);
- WT_SESSION* wtSession = session->getSession();
- ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", NULL)));
- // Use connection statistics key which does not apply to a table.
- StatusWith<uint64_t> result = WiredTigerUtil::getStatisticsValue(session->getSession(),
- "statistics:table:mytable", "statistics=(fast)", WT_STAT_CONN_SESSION_OPEN);
- ASSERT_NOT_OK(result.getStatus());
- ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
+ OperationContext* getOperationContext() const {
+ ASSERT(_opCtx.get());
+ return _opCtx.get();
}
- TEST(WiredTigerUtilTest, GetStatisticsValueValidKey) {
- WiredTigerUtilHarnessHelper harnessHelper("statistics=(all)");
- WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache());
- WiredTigerSession* session = recoveryUnit.getSession(NULL);
- WT_SESSION* wtSession = session->getSession();
- ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", NULL)));
- // Use connection statistics key which does not apply to a table.
- StatusWith<uint64_t> result = WiredTigerUtil::getStatisticsValue(session->getSession(),
- "statistics:table:mytable", "statistics=(fast)", WT_STAT_DSRC_LSM_CHUNK_COUNT);
- ASSERT_OK(result.getStatus());
- // Expect statistics value to be zero for a LSM key on a Btree.
- ASSERT_EQUALS(0U, result.getValue());
+ void createSession(const char* config) {
+ WT_SESSION* wtSession =
+ WiredTigerRecoveryUnit::get(_opCtx.get())->getSession(_opCtx.get())->getSession();
+ ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, getURI(), config)));
}
- TEST(WiredTigerUtilTest, GetStatisticsValueAsUInt8) {
- WiredTigerUtilHarnessHelper harnessHelper("statistics=(all)");
- WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache());
- WiredTigerSession* session = recoveryUnit.getSession(NULL);
- WT_SESSION* wtSession = session->getSession();
- ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", NULL)));
-
- // Use data source statistics that has a value > 256 on an empty table.
- StatusWith<uint64_t> resultUInt64 = WiredTigerUtil::getStatisticsValue(
- session->getSession(),
- "statistics:table:mytable", "statistics=(fast)", WT_STAT_DSRC_ALLOCATION_SIZE);
- ASSERT_OK(resultUInt64.getStatus());
- ASSERT_GREATER_THAN(resultUInt64.getValue(),
- static_cast<uint64_t>(std::numeric_limits<uint8_t>::max()));
-
- // Ensure that statistics value retrieved as an 8-bit unsigned value
- // is capped at maximum value for that type.
- StatusWith<uint8_t> resultUInt8 = WiredTigerUtil::getStatisticsValueAs<uint8_t>(
- session->getSession(),
- "statistics:table:mytable", "statistics=(fast)", WT_STAT_DSRC_ALLOCATION_SIZE);
- ASSERT_OK(resultUInt8.getStatus());
- ASSERT_EQUALS(std::numeric_limits<uint8_t>::max(), resultUInt8.getValue());
-
- // Read statistics value as signed 16-bit value with alternative maximum value to
- // std::numeric_limits.
- StatusWith<int16_t> resultInt16 = WiredTigerUtil::getStatisticsValueAs<int16_t>(
- session->getSession(),
- "statistics:table:mytable", "statistics=(fast)", WT_STAT_DSRC_ALLOCATION_SIZE,
- static_cast<int16_t>(100));
- ASSERT_OK(resultInt16.getStatus());
- ASSERT_EQUALS(static_cast<uint8_t>(100), resultInt16.getValue());
- }
+private:
+ std::unique_ptr<WiredTigerUtilHarnessHelper> _harnessHelper;
+ std::unique_ptr<OperationContext> _opCtx;
+};
+
+TEST_F(WiredTigerUtilMetadataTest, GetConfigurationStringInvalidURI) {
+ StatusWith<std::string> result = WiredTigerUtil::getMetadata(getOperationContext(), getURI());
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
+}
+
+TEST_F(WiredTigerUtilMetadataTest, GetConfigurationStringNull) {
+ const char* config = NULL;
+ createSession(config);
+ StatusWith<std::string> result = WiredTigerUtil::getMetadata(getOperationContext(), getURI());
+ ASSERT_OK(result.getStatus());
+ ASSERT_FALSE(result.getValue().empty());
+}
+
+TEST_F(WiredTigerUtilMetadataTest, GetConfigurationStringSimple) {
+ const char* config = "app_metadata=(abc=123)";
+ createSession(config);
+ StatusWith<std::string> result = WiredTigerUtil::getMetadata(getOperationContext(), getURI());
+ ASSERT_OK(result.getStatus());
+ ASSERT_STRING_CONTAINS(result.getValue(), config);
+}
+
+TEST_F(WiredTigerUtilMetadataTest, GetApplicationMetadataInvalidURI) {
+ StatusWith<BSONObj> result =
+ WiredTigerUtil::getApplicationMetadata(getOperationContext(), getURI());
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
+}
+
+TEST_F(WiredTigerUtilMetadataTest, GetApplicationMetadataNull) {
+ const char* config = NULL;
+ createSession(config);
+ StatusWith<BSONObj> result =
+ WiredTigerUtil::getApplicationMetadata(getOperationContext(), getURI());
+ ASSERT_OK(result.getStatus());
+ ASSERT_TRUE(result.getValue().isEmpty());
+}
+
+TEST_F(WiredTigerUtilMetadataTest, GetApplicationMetadataString) {
+ const char* config = "app_metadata=\"abc\"";
+ createSession(config);
+ StatusWith<BSONObj> result =
+ WiredTigerUtil::getApplicationMetadata(getOperationContext(), getURI());
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
+}
+
+TEST_F(WiredTigerUtilMetadataTest, GetApplicationMetadataDuplicateKeys) {
+ const char* config = "app_metadata=(abc=123,abc=456)";
+ createSession(config);
+ StatusWith<BSONObj> result =
+ WiredTigerUtil::getApplicationMetadata(getOperationContext(), getURI());
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::DuplicateKey, result.getStatus().code());
+}
+
+TEST_F(WiredTigerUtilMetadataTest, GetApplicationMetadataTypes) {
+ const char* config =
+ "app_metadata=(stringkey=\"abc\",boolkey1=true,boolkey2=false,"
+ "idkey=def,numkey=123,"
+ "structkey=(k1=v2,k2=v2))";
+ createSession(config);
+ StatusWith<BSONObj> result =
+ WiredTigerUtil::getApplicationMetadata(getOperationContext(), getURI());
+ ASSERT_OK(result.getStatus());
+ const BSONObj& obj = result.getValue();
+
+ BSONElement stringElement = obj.getField("stringkey");
+ ASSERT_EQUALS(mongo::String, stringElement.type());
+ ASSERT_EQUALS("abc", stringElement.String());
+
+ BSONElement boolElement1 = obj.getField("boolkey1");
+ ASSERT_TRUE(boolElement1.isBoolean());
+ ASSERT_TRUE(boolElement1.boolean());
+
+ BSONElement boolElement2 = obj.getField("boolkey2");
+ ASSERT_TRUE(boolElement2.isBoolean());
+ ASSERT_FALSE(boolElement2.boolean());
+
+ BSONElement identifierElement = obj.getField("idkey");
+ ASSERT_EQUALS(mongo::String, identifierElement.type());
+ ASSERT_EQUALS("def", identifierElement.String());
+
+ BSONElement numberElement = obj.getField("numkey");
+ ASSERT_TRUE(numberElement.isNumber());
+ ASSERT_EQUALS(123, numberElement.numberInt());
+
+ BSONElement structElement = obj.getField("structkey");
+ ASSERT_EQUALS(mongo::String, structElement.type());
+ ASSERT_EQUALS("(k1=v2,k2=v2)", structElement.String());
+}
+
+TEST_F(WiredTigerUtilMetadataTest, CheckApplicationMetadataFormatVersionMissingKey) {
+ createSession("app_metadata=(abc=123)");
+ ASSERT_OK(WiredTigerUtil::checkApplicationMetadataFormatVersion(
+ getOperationContext(), getURI(), 1, 1));
+ ASSERT_NOT_OK(WiredTigerUtil::checkApplicationMetadataFormatVersion(
+ getOperationContext(), getURI(), 2, 2));
+}
+
+TEST_F(WiredTigerUtilMetadataTest, CheckApplicationMetadataFormatVersionString) {
+ createSession("app_metadata=(formatVersion=\"bar\")");
+ ASSERT_NOT_OK(WiredTigerUtil::checkApplicationMetadataFormatVersion(
+ getOperationContext(), getURI(), 1, 1));
+}
+
+TEST_F(WiredTigerUtilMetadataTest, CheckApplicationMetadataFormatVersionNumber) {
+ createSession("app_metadata=(formatVersion=2)");
+ ASSERT_OK(WiredTigerUtil::checkApplicationMetadataFormatVersion(
+ getOperationContext(), getURI(), 2, 3));
+ ASSERT_NOT_OK(WiredTigerUtil::checkApplicationMetadataFormatVersion(
+ getOperationContext(), getURI(), 1, 1));
+ ASSERT_NOT_OK(WiredTigerUtil::checkApplicationMetadataFormatVersion(
+ getOperationContext(), getURI(), 3, 3));
+}
+
+TEST(WiredTigerUtilTest, GetStatisticsValueMissingTable) {
+ WiredTigerUtilHarnessHelper harnessHelper("statistics=(all)");
+ WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache());
+ WiredTigerSession* session = recoveryUnit.getSession(NULL);
+ StatusWith<uint64_t> result =
+ WiredTigerUtil::getStatisticsValue(session->getSession(),
+ "statistics:table:no_such_table",
+ "statistics=(fast)",
+ WT_STAT_DSRC_BLOCK_SIZE);
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CursorNotFound, result.getStatus().code());
+}
+
+TEST(WiredTigerUtilTest, GetStatisticsValueStatisticsDisabled) {
+ WiredTigerUtilHarnessHelper harnessHelper("statistics=(none)");
+ WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache());
+ WiredTigerSession* session = recoveryUnit.getSession(NULL);
+ WT_SESSION* wtSession = session->getSession();
+ ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", NULL)));
+ StatusWith<uint64_t> result = WiredTigerUtil::getStatisticsValue(session->getSession(),
+ "statistics:table:mytable",
+ "statistics=(fast)",
+ WT_STAT_DSRC_BLOCK_SIZE);
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::CursorNotFound, result.getStatus().code());
+}
+
+TEST(WiredTigerUtilTest, GetStatisticsValueInvalidKey) {
+ WiredTigerUtilHarnessHelper harnessHelper("statistics=(all)");
+ WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache());
+ WiredTigerSession* session = recoveryUnit.getSession(NULL);
+ WT_SESSION* wtSession = session->getSession();
+ ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", NULL)));
+ // Use connection statistics key which does not apply to a table.
+ StatusWith<uint64_t> result = WiredTigerUtil::getStatisticsValue(session->getSession(),
+ "statistics:table:mytable",
+ "statistics=(fast)",
+ WT_STAT_CONN_SESSION_OPEN);
+ ASSERT_NOT_OK(result.getStatus());
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code());
+}
+
+TEST(WiredTigerUtilTest, GetStatisticsValueValidKey) {
+ WiredTigerUtilHarnessHelper harnessHelper("statistics=(all)");
+ WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache());
+ WiredTigerSession* session = recoveryUnit.getSession(NULL);
+ WT_SESSION* wtSession = session->getSession();
+ ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", NULL)));
+ // Use connection statistics key which does not apply to a table.
+ StatusWith<uint64_t> result = WiredTigerUtil::getStatisticsValue(session->getSession(),
+ "statistics:table:mytable",
+ "statistics=(fast)",
+ WT_STAT_DSRC_LSM_CHUNK_COUNT);
+ ASSERT_OK(result.getStatus());
+ // Expect statistics value to be zero for a LSM key on a Btree.
+ ASSERT_EQUALS(0U, result.getValue());
+}
+
+TEST(WiredTigerUtilTest, GetStatisticsValueAsUInt8) {
+ WiredTigerUtilHarnessHelper harnessHelper("statistics=(all)");
+ WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache());
+ WiredTigerSession* session = recoveryUnit.getSession(NULL);
+ WT_SESSION* wtSession = session->getSession();
+ ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", NULL)));
+
+ // Use data source statistics that has a value > 256 on an empty table.
+ StatusWith<uint64_t> resultUInt64 =
+ WiredTigerUtil::getStatisticsValue(session->getSession(),
+ "statistics:table:mytable",
+ "statistics=(fast)",
+ WT_STAT_DSRC_ALLOCATION_SIZE);
+ ASSERT_OK(resultUInt64.getStatus());
+ ASSERT_GREATER_THAN(resultUInt64.getValue(),
+ static_cast<uint64_t>(std::numeric_limits<uint8_t>::max()));
+
+ // Ensure that statistics value retrieved as an 8-bit unsigned value
+ // is capped at maximum value for that type.
+ StatusWith<uint8_t> resultUInt8 =
+ WiredTigerUtil::getStatisticsValueAs<uint8_t>(session->getSession(),
+ "statistics:table:mytable",
+ "statistics=(fast)",
+ WT_STAT_DSRC_ALLOCATION_SIZE);
+ ASSERT_OK(resultUInt8.getStatus());
+ ASSERT_EQUALS(std::numeric_limits<uint8_t>::max(), resultUInt8.getValue());
+
+ // Read statistics value as signed 16-bit value with alternative maximum value to
+ // std::numeric_limits.
+ StatusWith<int16_t> resultInt16 =
+ WiredTigerUtil::getStatisticsValueAs<int16_t>(session->getSession(),
+ "statistics:table:mytable",
+ "statistics=(fast)",
+ WT_STAT_DSRC_ALLOCATION_SIZE,
+ static_cast<int16_t>(100));
+ ASSERT_OK(resultInt16.getStatus());
+ ASSERT_EQUALS(static_cast<uint8_t>(100), resultInt16.getValue());
+}
} // namespace mongo
diff --git a/src/mongo/db/storage_options.cpp b/src/mongo/db/storage_options.cpp
index cb0becdc4e5..5ca50ee7332 100644
--- a/src/mongo/db/storage_options.cpp
+++ b/src/mongo/db/storage_options.cpp
@@ -34,39 +34,33 @@
namespace mongo {
- StorageGlobalParams storageGlobalParams;
+StorageGlobalParams storageGlobalParams;
- /**
- * The directory where the mongod instance stores its data.
- */
+/**
+ * The directory where the mongod instance stores its data.
+ */
#ifdef _WIN32
- const char* StorageGlobalParams::kDefaultDbPath = "\\data\\db\\";
- const char* StorageGlobalParams::kDefaultConfigDbPath = "\\data\\configdb\\";
+const char* StorageGlobalParams::kDefaultDbPath = "\\data\\db\\";
+const char* StorageGlobalParams::kDefaultConfigDbPath = "\\data\\configdb\\";
#else
- const char* StorageGlobalParams::kDefaultDbPath = "/data/db";
- const char* StorageGlobalParams::kDefaultConfigDbPath = "/data/configdb";
+const char* StorageGlobalParams::kDefaultDbPath = "/data/db";
+const char* StorageGlobalParams::kDefaultConfigDbPath = "/data/configdb";
#endif
- /**
- * Specify whether all queries must use indexes.
- * If 1, MongoDB will not execute queries that require a table scan and will return an error.
- * NOT recommended for production use.
- */
- ExportedServerParameter<bool> NoTableScanSetting(ServerParameterSet::getGlobal(),
- "notablescan",
- &storageGlobalParams.noTableScan,
- true,
- true);
+/**
+ * Specify whether all queries must use indexes.
+ * If 1, MongoDB will not execute queries that require a table scan and will return an error.
+ * NOT recommended for production use.
+ */
+ExportedServerParameter<bool> NoTableScanSetting(
+ ServerParameterSet::getGlobal(), "notablescan", &storageGlobalParams.noTableScan, true, true);
- /**
- * Specify the interval in seconds between fsync operations where mongod flushes its
- * working memory to disk. By default, mongod flushes memory to disk every 60 seconds.
- * In almost every situation you should not set this value and use the default setting.
- */
- ExportedServerParameter<double> SyncdelaySetting(ServerParameterSet::getGlobal(),
- "syncdelay",
- &storageGlobalParams.syncdelay,
- true,
- true);
+/**
+ * Specify the interval in seconds between fsync operations where mongod flushes its
+ * working memory to disk. By default, mongod flushes memory to disk every 60 seconds.
+ * In almost every situation you should not set this value and use the default setting.
+ */
+ExportedServerParameter<double> SyncdelaySetting(
+ ServerParameterSet::getGlobal(), "syncdelay", &storageGlobalParams.syncdelay, true, true);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/storage_options.h b/src/mongo/db/storage_options.h
index 3967af7139a..4ad9cf85034 100644
--- a/src/mongo/db/storage_options.h
+++ b/src/mongo/db/storage_options.h
@@ -39,75 +39,74 @@
namespace mongo {
- struct StorageGlobalParams {
-
- // Default data directory for mongod when running in non-config server mode.
- static const char* kDefaultDbPath;
-
- // Default data directory for mongod when running as the config database of
- // a sharded cluster.
- static const char* kDefaultConfigDbPath;
-
- StorageGlobalParams() :
- engine("wiredTiger"),
- engineSetByUser(false),
- dbpath(kDefaultDbPath),
- upgrade(false),
- repair(false),
- noTableScan(false),
- directoryperdb(false),
- syncdelay(60.0) {
- dur = false;
- if (sizeof(void*) == 8)
- dur = true;
- }
-
- // --storageEngine
- // storage engine for this instance of mongod.
- std::string engine;
-
- // True if --storageEngine was passed on the command line, and false otherwise.
- bool engineSetByUser;
-
- // The directory where the mongod instance stores its data.
- std::string dbpath;
-
- // --upgrade
- // Upgrades the on-disk data format of the files specified by the --dbpath to the
- // latest version, if needed.
- bool upgrade;
-
- // --repair
- // Runs a repair routine on all databases. This is equivalent to shutting down and
- // running the repairDatabase database command on all databases.
- bool repair;
-
- // --repairpath
- // Specifies the root directory containing MongoDB data files to use for the --repair
- // operation.
- // Default: A _tmp directory within the path specified by the dbPath option.
- std::string repairpath;
-
- bool dur; // --dur durability (now --journal)
-
- // --notablescan
- // no table scans allowed
- bool noTableScan;
-
- // --directoryperdb
- // Stores each database’s files in its own folder in the data directory.
- // When applied to an existing system, the directoryPerDB option alters
- // the storage pattern of the data directory.
- bool directoryperdb;
-
- // --syncdelay
- // Controls how much time can pass before MongoDB flushes data to the data files
- // via an fsync operation.
- // Do not set this value on production systems.
- // In almost every situation, you should use the default setting.
- double syncdelay; // seconds between fsyncs
- };
-
- extern StorageGlobalParams storageGlobalParams;
-
-} // namespace mongo
+struct StorageGlobalParams {
+ // Default data directory for mongod when running in non-config server mode.
+ static const char* kDefaultDbPath;
+
+ // Default data directory for mongod when running as the config database of
+ // a sharded cluster.
+ static const char* kDefaultConfigDbPath;
+
+ StorageGlobalParams()
+ : engine("wiredTiger"),
+ engineSetByUser(false),
+ dbpath(kDefaultDbPath),
+ upgrade(false),
+ repair(false),
+ noTableScan(false),
+ directoryperdb(false),
+ syncdelay(60.0) {
+ dur = false;
+ if (sizeof(void*) == 8)
+ dur = true;
+ }
+
+ // --storageEngine
+ // storage engine for this instance of mongod.
+ std::string engine;
+
+ // True if --storageEngine was passed on the command line, and false otherwise.
+ bool engineSetByUser;
+
+ // The directory where the mongod instance stores its data.
+ std::string dbpath;
+
+ // --upgrade
+ // Upgrades the on-disk data format of the files specified by the --dbpath to the
+ // latest version, if needed.
+ bool upgrade;
+
+ // --repair
+ // Runs a repair routine on all databases. This is equivalent to shutting down and
+ // running the repairDatabase database command on all databases.
+ bool repair;
+
+ // --repairpath
+ // Specifies the root directory containing MongoDB data files to use for the --repair
+ // operation.
+ // Default: A _tmp directory within the path specified by the dbPath option.
+ std::string repairpath;
+
+ bool dur; // --dur durability (now --journal)
+
+ // --notablescan
+ // no table scans allowed
+ bool noTableScan;
+
+ // --directoryperdb
+ // Stores each database’s files in its own folder in the data directory.
+ // When applied to an existing system, the directoryPerDB option alters
+ // the storage pattern of the data directory.
+ bool directoryperdb;
+
+ // --syncdelay
+ // Controls how much time can pass before MongoDB flushes data to the data files
+ // via an fsync operation.
+ // Do not set this value on production systems.
+ // In almost every situation, you should use the default setting.
+ double syncdelay; // seconds between fsyncs
+};
+
+extern StorageGlobalParams storageGlobalParams;
+
+} // namespace mongo
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index e680d6bd671..383f276d5a3 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -60,298 +60,282 @@
namespace mongo {
- using std::set;
- using std::endl;
- using std::list;
- using std::string;
- using std::vector;
- using std::unique_ptr;
-
- Counter64 ttlPasses;
- Counter64 ttlDeletedDocuments;
-
- ServerStatusMetricField<Counter64> ttlPassesDisplay("ttl.passes", &ttlPasses);
- ServerStatusMetricField<Counter64> ttlDeletedDocumentsDisplay("ttl.deletedDocuments", &ttlDeletedDocuments);
-
- MONGO_EXPORT_SERVER_PARAMETER( ttlMonitorEnabled, bool, true );
- MONGO_EXPORT_SERVER_PARAMETER( ttlMonitorSleepSecs, int, 60 ); //used for testing
-
- class TTLMonitor : public BackgroundJob {
- public:
- TTLMonitor(){}
- virtual ~TTLMonitor(){}
-
- virtual string name() const { return "TTLMonitor"; }
-
- static string secondsExpireField;
+using std::set;
+using std::endl;
+using std::list;
+using std::string;
+using std::vector;
+using std::unique_ptr;
+
+Counter64 ttlPasses;
+Counter64 ttlDeletedDocuments;
+
+ServerStatusMetricField<Counter64> ttlPassesDisplay("ttl.passes", &ttlPasses);
+ServerStatusMetricField<Counter64> ttlDeletedDocumentsDisplay("ttl.deletedDocuments",
+ &ttlDeletedDocuments);
+
+MONGO_EXPORT_SERVER_PARAMETER(ttlMonitorEnabled, bool, true);
+MONGO_EXPORT_SERVER_PARAMETER(ttlMonitorSleepSecs, int, 60); // used for testing
+
+class TTLMonitor : public BackgroundJob {
+public:
+ TTLMonitor() {}
+ virtual ~TTLMonitor() {}
+
+ virtual string name() const {
+ return "TTLMonitor";
+ }
- virtual void run() {
- Client::initThread( name().c_str() );
- AuthorizationSession::get(cc())->grantInternalAuthorization();
+ static string secondsExpireField;
- while ( ! inShutdown() ) {
- sleepsecs( ttlMonitorSleepSecs );
+ virtual void run() {
+ Client::initThread(name().c_str());
+ AuthorizationSession::get(cc())->grantInternalAuthorization();
- LOG(3) << "TTLMonitor thread awake" << endl;
+ while (!inShutdown()) {
+ sleepsecs(ttlMonitorSleepSecs);
- if ( !ttlMonitorEnabled ) {
- LOG(1) << "TTLMonitor is disabled" << endl;
- continue;
- }
+ LOG(3) << "TTLMonitor thread awake" << endl;
- if ( lockedForWriting() ) {
- // note: this is not perfect as you can go into fsync+lock between
- // this and actually doing the delete later
- LOG(3) << " locked for writing" << endl;
- continue;
- }
+ if (!ttlMonitorEnabled) {
+ LOG(1) << "TTLMonitor is disabled" << endl;
+ continue;
+ }
- try {
- doTTLPass();
- }
- catch ( const WriteConflictException& e ) {
- LOG(1) << "Got WriteConflictException in TTL thread";
- }
+ if (lockedForWriting()) {
+ // note: this is not perfect as you can go into fsync+lock between
+ // this and actually doing the delete later
+ LOG(3) << " locked for writing" << endl;
+ continue;
+ }
+ try {
+ doTTLPass();
+ } catch (const WriteConflictException& e) {
+ LOG(1) << "Got WriteConflictException in TTL thread";
}
}
+ }
- private:
-
- void doTTLPass() {
- // Count it as active from the moment the TTL thread wakes up
- OperationContextImpl txn;
+private:
+ void doTTLPass() {
+ // Count it as active from the moment the TTL thread wakes up
+ OperationContextImpl txn;
- // if part of replSet but not in a readable state (e.g. during initial sync), skip.
- if (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
+ // if part of replSet but not in a readable state (e.g. during initial sync), skip.
+ if (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
repl::ReplicationCoordinator::modeReplSet &&
- !repl::getGlobalReplicationCoordinator()->getMemberState().readable())
- return;
-
- set<string> dbs;
- dbHolder().getAllShortNames( dbs );
+ !repl::getGlobalReplicationCoordinator()->getMemberState().readable())
+ return;
- ttlPasses.increment();
+ set<string> dbs;
+ dbHolder().getAllShortNames(dbs);
- for ( set<string>::const_iterator i=dbs.begin(); i!=dbs.end(); ++i ) {
- string db = *i;
+ ttlPasses.increment();
- vector<BSONObj> indexes;
- getTTLIndexesForDB(&txn, db, &indexes);
+ for (set<string>::const_iterator i = dbs.begin(); i != dbs.end(); ++i) {
+ string db = *i;
- for ( vector<BSONObj>::const_iterator it = indexes.begin();
- it != indexes.end(); ++it ) {
+ vector<BSONObj> indexes;
+ getTTLIndexesForDB(&txn, db, &indexes);
- BSONObj idx = *it;
- try {
- if ( !doTTLForIndex( &txn, db, idx ) ) {
- break; // stop processing TTL indexes on this database
- }
- } catch (const DBException& dbex) {
- error() << "Error processing ttl index: " << idx
- << " -- " << dbex.toString();
- // continue on to the next index
- continue;
+ for (vector<BSONObj>::const_iterator it = indexes.begin(); it != indexes.end(); ++it) {
+ BSONObj idx = *it;
+ try {
+ if (!doTTLForIndex(&txn, db, idx)) {
+ break; // stop processing TTL indexes on this database
}
+ } catch (const DBException& dbex) {
+ error() << "Error processing ttl index: " << idx << " -- " << dbex.toString();
+ // continue on to the next index
+ continue;
}
}
}
+ }
- /**
- * Acquire an IS-mode lock on the specified database and for each
- * collection in the database, append the specification of all
- * TTL indexes on those collections to the supplied vector.
- *
- * The index specifications are grouped by the collection to which
- * they belong.
- */
- void getTTLIndexesForDB( OperationContext* txn, const string& dbName,
- vector<BSONObj>* indexes ) {
-
- invariant( indexes && indexes->empty() );
- ScopedTransaction transaction( txn, MODE_IS );
- Lock::DBLock dbLock( txn->lockState(), dbName, MODE_IS );
-
- Database* db = dbHolder().get( txn, dbName );
- if ( !db ) {
- return; // skip since database no longer exists
- }
-
- const DatabaseCatalogEntry* dbEntry = db->getDatabaseCatalogEntry();
+ /**
+ * Acquire an IS-mode lock on the specified database and for each
+ * collection in the database, append the specification of all
+ * TTL indexes on those collections to the supplied vector.
+ *
+ * The index specifications are grouped by the collection to which
+ * they belong.
+ */
+ void getTTLIndexesForDB(OperationContext* txn, const string& dbName, vector<BSONObj>* indexes) {
+ invariant(indexes && indexes->empty());
+ ScopedTransaction transaction(txn, MODE_IS);
+ Lock::DBLock dbLock(txn->lockState(), dbName, MODE_IS);
+
+ Database* db = dbHolder().get(txn, dbName);
+ if (!db) {
+ return; // skip since database no longer exists
+ }
- list<string> namespaces;
- dbEntry->getCollectionNamespaces( &namespaces );
+ const DatabaseCatalogEntry* dbEntry = db->getDatabaseCatalogEntry();
- for ( list<string>::const_iterator it = namespaces.begin();
- it != namespaces.end(); ++it ) {
+ list<string> namespaces;
+ dbEntry->getCollectionNamespaces(&namespaces);
- string ns = *it;
- Lock::CollectionLock collLock( txn->lockState(), ns, MODE_IS );
- CollectionCatalogEntry* coll = dbEntry->getCollectionCatalogEntry( ns );
+ for (list<string>::const_iterator it = namespaces.begin(); it != namespaces.end(); ++it) {
+ string ns = *it;
+ Lock::CollectionLock collLock(txn->lockState(), ns, MODE_IS);
+ CollectionCatalogEntry* coll = dbEntry->getCollectionCatalogEntry(ns);
- if ( !coll ) {
- continue; // skip since collection not found in catalog
- }
+ if (!coll) {
+ continue; // skip since collection not found in catalog
+ }
- vector<string> indexNames;
- coll->getAllIndexes( txn, &indexNames );
- for ( size_t i = 0; i < indexNames.size(); i++ ) {
- const string& name = indexNames[i];
- BSONObj spec = coll->getIndexSpec( txn, name );
+ vector<string> indexNames;
+ coll->getAllIndexes(txn, &indexNames);
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ const string& name = indexNames[i];
+ BSONObj spec = coll->getIndexSpec(txn, name);
- if ( spec.hasField( secondsExpireField ) ) {
- indexes->push_back( spec.getOwned() );
- }
+ if (spec.hasField(secondsExpireField)) {
+ indexes->push_back(spec.getOwned());
}
}
}
+ }
- /**
- * Remove documents from the collection using the specified TTL index
- * after a sufficient amount of time has passed according to its expiry
- * specification.
- *
- * @return true if caller should continue processing TTL indexes of collections
- * on the specified database, and false otherwise
- */
- bool doTTLForIndex(OperationContext* txn, const string& dbName, BSONObj idx) {
- const string ns = idx["ns"].String();
- NamespaceString nss(ns);
- if (!userAllowedWriteNS(nss).isOK()) {
- error() << "namespace '" << ns << "' doesn't allow deletes, skipping ttl job for: "
- << idx;
- return true;
- }
-
- BSONObj key = idx["key"].Obj();
- if (key.nFields() != 1) {
- error() << "key for ttl index can only have 1 field, skipping ttl job for: " << idx;
- return true;
- }
+ /**
+ * Remove documents from the collection using the specified TTL index
+ * after a sufficient amount of time has passed according to its expiry
+ * specification.
+ *
+ * @return true if caller should continue processing TTL indexes of collections
+ * on the specified database, and false otherwise
+ */
+ bool doTTLForIndex(OperationContext* txn, const string& dbName, BSONObj idx) {
+ const string ns = idx["ns"].String();
+ NamespaceString nss(ns);
+ if (!userAllowedWriteNS(nss).isOK()) {
+ error() << "namespace '" << ns
+ << "' doesn't allow deletes, skipping ttl job for: " << idx;
+ return true;
+ }
- LOG(1) << "TTL -- ns: " << ns << " key: " << key;
+ BSONObj key = idx["key"].Obj();
+ if (key.nFields() != 1) {
+ error() << "key for ttl index can only have 1 field, skipping ttl job for: " << idx;
+ return true;
+ }
- // Read the current time outside of the while loop, so that we don't expand our index
- // bounds after every WriteConflictException.
- const Date_t now = Date_t::now();
+ LOG(1) << "TTL -- ns: " << ns << " key: " << key;
- long long numDeleted = 0;
- int attempt = 1;
- while (1) {
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbName, MODE_IX);
- Database* db = autoDb.getDb();
- if (!db) {
- return false;
- }
+ // Read the current time outside of the while loop, so that we don't expand our index
+ // bounds after every WriteConflictException.
+ const Date_t now = Date_t::now();
- Lock::CollectionLock collLock(txn->lockState(), ns, MODE_IX);
+ long long numDeleted = 0;
+ int attempt = 1;
+ while (1) {
+ ScopedTransaction scopedXact(txn, MODE_IX);
+ AutoGetDb autoDb(txn, dbName, MODE_IX);
+ Database* db = autoDb.getDb();
+ if (!db) {
+ return false;
+ }
- Collection* collection = db->getCollection(ns);
- if (!collection) {
- // Collection was dropped.
- return true;
- }
+ Lock::CollectionLock collLock(txn->lockState(), ns, MODE_IX);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
- // We've stepped down since we started this function, so we should stop working
- // as we only do deletes on the primary.
- return false;
- }
+ Collection* collection = db->getCollection(ns);
+ if (!collection) {
+ // Collection was dropped.
+ return true;
+ }
- IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByKeyPattern(txn,
- key);
- if (!desc) {
- LOG(1) << "index not found (index build in progress? index dropped?), skipping "
- << "ttl job for: " << idx;
- return true;
- }
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
+ // We've stepped down since we started this function, so we should stop working
+ // as we only do deletes on the primary.
+ return false;
+ }
- // Re-read 'idx' from the descriptor, in case the collection or index definition
- // changed before we re-acquired the collection lock.
- idx = desc->infoObj();
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByKeyPattern(txn, key);
+ if (!desc) {
+ LOG(1) << "index not found (index build in progress? index dropped?), skipping "
+ << "ttl job for: " << idx;
+ return true;
+ }
- if (IndexType::INDEX_BTREE != IndexNames::nameToType(desc->getAccessMethodName())) {
- error() << "special index can't be used as a ttl index, skipping ttl job for: "
- << idx;
- return true;
- }
+ // Re-read 'idx' from the descriptor, in case the collection or index definition
+ // changed before we re-acquired the collection lock.
+ idx = desc->infoObj();
- BSONElement secondsExpireElt = idx[secondsExpireField];
- if (!secondsExpireElt.isNumber()) {
- error() << "ttl indexes require the " << secondsExpireField << " field to be "
- << "numeric but received a type of "
- << typeName(secondsExpireElt.type()) << ", skipping ttl job for: "
- << idx;
- return true;
- }
+ if (IndexType::INDEX_BTREE != IndexNames::nameToType(desc->getAccessMethodName())) {
+ error() << "special index can't be used as a ttl index, skipping ttl job for: "
+ << idx;
+ return true;
+ }
- const Date_t kDawnOfTime =
- Date_t::fromMillisSinceEpoch(std::numeric_limits<long long>::min());
- const BSONObj startKey = BSON("" << kDawnOfTime);
- const BSONObj endKey =
- BSON("" << now - Seconds(secondsExpireElt.numberLong()));
- const bool endKeyInclusive = true;
- // The canonical check as to whether a key pattern element is "ascending" or
- // "descending" is (elt.number() >= 0). This is defined by the Ordering class.
- const InternalPlanner::Direction direction =
- (key.firstElement().number() >= 0) ? InternalPlanner::Direction::FORWARD
- : InternalPlanner::Direction::BACKWARD;
- unique_ptr<PlanExecutor> exec(InternalPlanner::indexScan(txn,
- collection,
- desc,
- startKey,
- endKey,
- endKeyInclusive,
- direction));
- exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
+ BSONElement secondsExpireElt = idx[secondsExpireField];
+ if (!secondsExpireElt.isNumber()) {
+ error() << "ttl indexes require the " << secondsExpireField << " field to be "
+ << "numeric but received a type of " << typeName(secondsExpireElt.type())
+ << ", skipping ttl job for: " << idx;
+ return true;
+ }
- try {
- PlanExecutor::ExecState state;
- BSONObj obj;
- RecordId rid;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, &rid))) {
- exec->saveState();
- {
- WriteUnitOfWork wunit(txn);
- collection->deleteDocument(txn, rid);
- wunit.commit();
- }
- ++numDeleted;
- ttlDeletedDocuments.increment();
- if (!exec->restoreState(txn)) {
- return true;
- }
+ const Date_t kDawnOfTime =
+ Date_t::fromMillisSinceEpoch(std::numeric_limits<long long>::min());
+ const BSONObj startKey = BSON("" << kDawnOfTime);
+ const BSONObj endKey = BSON("" << now - Seconds(secondsExpireElt.numberLong()));
+ const bool endKeyInclusive = true;
+ // The canonical check as to whether a key pattern element is "ascending" or
+ // "descending" is (elt.number() >= 0). This is defined by the Ordering class.
+ const InternalPlanner::Direction direction = (key.firstElement().number() >= 0)
+ ? InternalPlanner::Direction::FORWARD
+ : InternalPlanner::Direction::BACKWARD;
+ unique_ptr<PlanExecutor> exec(InternalPlanner::indexScan(
+ txn, collection, desc, startKey, endKey, endKeyInclusive, direction));
+ exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
+
+ try {
+ PlanExecutor::ExecState state;
+ BSONObj obj;
+ RecordId rid;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, &rid))) {
+ exec->saveState();
+ {
+ WriteUnitOfWork wunit(txn);
+ collection->deleteDocument(txn, rid);
+ wunit.commit();
}
-
- if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
- if (WorkingSetCommon::isValidStatusMemberObject(obj)) {
- error() << "ttl query execution for index " << idx << " failed with: "
- << WorkingSetCommon::getMemberObjectStatus(obj);
- return true;
- }
- error() << "ttl query execution for index " << idx << " failed with state: "
- << PlanExecutor::statestr(state);
+ ++numDeleted;
+ ttlDeletedDocuments.increment();
+ if (!exec->restoreState(txn)) {
return true;
}
-
- invariant(PlanExecutor::IS_EOF == state);
- break;
}
- catch (const WriteConflictException& dle) {
- WriteConflictException::logAndBackoff(attempt++, "ttl", ns);
+
+ if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
+ if (WorkingSetCommon::isValidStatusMemberObject(obj)) {
+ error() << "ttl query execution for index " << idx
+ << " failed with: " << WorkingSetCommon::getMemberObjectStatus(obj);
+ return true;
+ }
+ error() << "ttl query execution for index " << idx
+ << " failed with state: " << PlanExecutor::statestr(state);
+ return true;
}
- }
- LOG(1) << "\tTTL deleted: " << numDeleted << endl;
- return true;
+ invariant(PlanExecutor::IS_EOF == state);
+ break;
+ } catch (const WriteConflictException& dle) {
+ WriteConflictException::logAndBackoff(attempt++, "ttl", ns);
+ }
}
- };
- void startTTLBackgroundJob() {
- TTLMonitor* ttl = new TTLMonitor();
- ttl->go();
+ LOG(1) << "\tTTL deleted: " << numDeleted << endl;
+ return true;
}
+};
+
+void startTTLBackgroundJob() {
+ TTLMonitor* ttl = new TTLMonitor();
+ ttl->go();
+}
- string TTLMonitor::secondsExpireField = "expireAfterSeconds";
+string TTLMonitor::secondsExpireField = "expireAfterSeconds";
}
diff --git a/src/mongo/db/ttl.h b/src/mongo/db/ttl.h
index dc64fb09349..c015f394ce1 100644
--- a/src/mongo/db/ttl.h
+++ b/src/mongo/db/ttl.h
@@ -31,5 +31,5 @@
#pragma once
namespace mongo {
- void startTTLBackgroundJob();
+void startTTLBackgroundJob();
}
diff --git a/src/mongo/db/update_index_data.cpp b/src/mongo/db/update_index_data.cpp
index 3a6dcb56676..9d66a4a7a6e 100644
--- a/src/mongo/db/update_index_data.cpp
+++ b/src/mongo/db/update_index_data.cpp
@@ -34,144 +34,136 @@
namespace mongo {
- using std::string;
+using std::string;
- UpdateIndexData::UpdateIndexData() : _allPathsIndexed( false ) { }
+UpdateIndexData::UpdateIndexData() : _allPathsIndexed(false) {}
- void UpdateIndexData::addPath( StringData path ) {
- string s;
- if ( getCanonicalIndexField( path, &s ) ) {
- _canonicalPaths.insert( s );
- }
- else {
- _canonicalPaths.insert( path.toString() );
- }
+void UpdateIndexData::addPath(StringData path) {
+ string s;
+ if (getCanonicalIndexField(path, &s)) {
+ _canonicalPaths.insert(s);
+ } else {
+ _canonicalPaths.insert(path.toString());
}
+}
- void UpdateIndexData::addPathComponent( StringData pathComponent ) {
- _pathComponents.insert( pathComponent.toString() );
- }
+void UpdateIndexData::addPathComponent(StringData pathComponent) {
+ _pathComponents.insert(pathComponent.toString());
+}
- void UpdateIndexData::allPathsIndexed() {
- _allPathsIndexed = true;
- }
+void UpdateIndexData::allPathsIndexed() {
+ _allPathsIndexed = true;
+}
- void UpdateIndexData::clear() {
- _canonicalPaths.clear();
- _pathComponents.clear();
- _allPathsIndexed = false;
- }
+void UpdateIndexData::clear() {
+ _canonicalPaths.clear();
+ _pathComponents.clear();
+ _allPathsIndexed = false;
+}
- bool UpdateIndexData::mightBeIndexed( StringData path ) const {
- if ( _allPathsIndexed ) {
- return true;
- }
+bool UpdateIndexData::mightBeIndexed(StringData path) const {
+ if (_allPathsIndexed) {
+ return true;
+ }
- StringData use = path;
- string x;
- if ( getCanonicalIndexField( path, &x ) )
- use = StringData( x );
+ StringData use = path;
+ string x;
+ if (getCanonicalIndexField(path, &x))
+ use = StringData(x);
- for ( std::set<string>::const_iterator i = _canonicalPaths.begin();
- i != _canonicalPaths.end();
- ++i ) {
+ for (std::set<string>::const_iterator i = _canonicalPaths.begin(); i != _canonicalPaths.end();
+ ++i) {
+ StringData idx(*i);
- StringData idx( *i );
+ if (_startsWith(use, idx))
+ return true;
- if ( _startsWith( use, idx ) )
- return true;
+ if (_startsWith(idx, use))
+ return true;
+ }
- if ( _startsWith( idx, use ) )
+ FieldRef pathFieldRef(path);
+ for (std::set<string>::const_iterator i = _pathComponents.begin(); i != _pathComponents.end();
+ ++i) {
+ const string& pathComponent = *i;
+ for (size_t partIdx = 0; partIdx < pathFieldRef.numParts(); ++partIdx) {
+ if (pathComponent == pathFieldRef.getPart(partIdx)) {
return true;
- }
-
- FieldRef pathFieldRef( path );
- for ( std::set<string>::const_iterator i = _pathComponents.begin();
- i != _pathComponents.end();
- ++i ) {
- const string& pathComponent = *i;
- for ( size_t partIdx = 0; partIdx < pathFieldRef.numParts(); ++partIdx ) {
- if ( pathComponent == pathFieldRef.getPart( partIdx ) ) {
- return true;
- }
}
}
+ }
+
+ return false;
+}
+bool UpdateIndexData::_startsWith(StringData a, StringData b) const {
+ if (!a.startsWith(b))
return false;
- }
- bool UpdateIndexData::_startsWith( StringData a, StringData b ) const {
- if ( !a.startsWith( b ) )
- return false;
+ // make sure there is a dot or EOL right after
- // make sure there is a dot or EOL right after
+ if (a.size() == b.size())
+ return true;
- if ( a.size() == b.size() )
- return true;
+ return a[b.size()] == '.';
+}
- return a[b.size()] == '.';
- }
+bool getCanonicalIndexField(StringData fullName, string* out) {
+ // check if fieldName contains ".$" or ".###" substrings (#=digit) and skip them
+ // however do not skip the first field even if it meets these criteria
- bool getCanonicalIndexField( StringData fullName, string* out ) {
- // check if fieldName contains ".$" or ".###" substrings (#=digit) and skip them
- // however do not skip the first field even if it meets these criteria
+ if (fullName.find('.') == string::npos)
+ return false;
- if ( fullName.find( '.' ) == string::npos )
- return false;
+ bool modified = false;
- bool modified = false;
+ StringBuilder buf;
+ for (size_t i = 0; i < fullName.size(); i++) {
+ char c = fullName[i];
- StringBuilder buf;
- for ( size_t i=0; i<fullName.size(); i++ ) {
+ if (c != '.') {
+ buf << c;
+ continue;
+ }
- char c = fullName[i];
+ if (i + 1 == fullName.size()) {
+ // ends with '.'
+ buf << c;
+ continue;
+ }
- if ( c != '.' ) {
- buf << c;
+ // check for ".$", skip if present
+ if (fullName[i + 1] == '$') {
+ // only do this if its not something like $a
+ if (i + 2 >= fullName.size() || fullName[i + 2] == '.') {
+ i++;
+ modified = true;
continue;
}
+ }
- if ( i + 1 == fullName.size() ) {
- // ends with '.'
- buf << c;
+ // check for ".###" for any number of digits (no letters)
+ if (isdigit(fullName[i + 1])) {
+ size_t j = i;
+ // skip digits
+ while (j + 1 < fullName.size() && isdigit(fullName[j + 1]))
+ j++;
+
+ if (j + 1 == fullName.size() || fullName[j + 1] == '.') {
+ // only digits found, skip forward
+ i = j;
+ modified = true;
continue;
}
-
- // check for ".$", skip if present
- if ( fullName[i+1] == '$' ) {
- // only do this if its not something like $a
- if ( i + 2 >= fullName.size() || fullName[i+2] == '.' ) {
- i++;
- modified = true;
- continue;
- }
- }
-
- // check for ".###" for any number of digits (no letters)
- if ( isdigit( fullName[i+1] ) ) {
- size_t j = i;
- // skip digits
- while ( j+1 < fullName.size() && isdigit( fullName[j+1] ) )
- j++;
-
- if ( j+1 == fullName.size() || fullName[j+1] == '.' ) {
- // only digits found, skip forward
- i = j;
- modified = true;
- continue;
- }
- }
-
- buf << c;
}
- if ( !modified )
- return false;
-
- *out = buf.str();
- return true;
+ buf << c;
}
+ if (!modified)
+ return false;
+ *out = buf.str();
+ return true;
+}
}
-
diff --git a/src/mongo/db/update_index_data.h b/src/mongo/db/update_index_data.h
index d23ee144a67..52113663df0 100644
--- a/src/mongo/db/update_index_data.h
+++ b/src/mongo/db/update_index_data.h
@@ -36,51 +36,49 @@
namespace mongo {
- /**
- * a.$ -> a
- * @return true if out is set and we made a change
- */
- bool getCanonicalIndexField( StringData fullName, std::string* out );
+/**
+ * a.$ -> a
+ * @return true if out is set and we made a change
+ */
+bool getCanonicalIndexField(StringData fullName, std::string* out);
+/**
+ * Holds pre-processed index spec information to allow update to quickly determine if an update
+ * can be applied as a delta to a document, or if the document must be re-indexed.
+ */
+class UpdateIndexData {
+public:
+ UpdateIndexData();
+
/**
- * Holds pre-processed index spec information to allow update to quickly determine if an update
- * can be applied as a delta to a document, or if the document must be re-indexed.
+ * Register a path. Any update targeting this path (or a parent of this path) will
+ * trigger a recomputation of the document's index keys.
*/
- class UpdateIndexData {
- public:
- UpdateIndexData();
-
- /**
- * Register a path. Any update targeting this path (or a parent of this path) will
- * trigger a recomputation of the document's index keys.
- */
- void addPath( StringData path );
+ void addPath(StringData path);
- /**
- * Register a path component. Any update targeting a path that contains this exact
- * component will trigger a recomputation of the document's index keys.
- */
- void addPathComponent( StringData pathComponent );
-
- /**
- * Register the "wildcard" path. All updates will trigger a recomputation of the document's
- * index keys.
- */
- void allPathsIndexed();
-
- void clear();
+ /**
+ * Register a path component. Any update targeting a path that contains this exact
+ * component will trigger a recomputation of the document's index keys.
+ */
+ void addPathComponent(StringData pathComponent);
- bool mightBeIndexed( StringData path ) const;
+ /**
+ * Register the "wildcard" path. All updates will trigger a recomputation of the document's
+ * index keys.
+ */
+ void allPathsIndexed();
- private:
+ void clear();
- bool _startsWith( StringData a, StringData b ) const;
+ bool mightBeIndexed(StringData path) const;
- std::set<std::string> _canonicalPaths;
- std::set<std::string> _pathComponents;
+private:
+ bool _startsWith(StringData a, StringData b) const;
- bool _allPathsIndexed;
- };
+ std::set<std::string> _canonicalPaths;
+ std::set<std::string> _pathComponents;
+ bool _allPathsIndexed;
+};
}
diff --git a/src/mongo/db/update_index_data_test.cpp b/src/mongo/db/update_index_data_test.cpp
index d011889e8fa..167cb632ab1 100644
--- a/src/mongo/db/update_index_data_test.cpp
+++ b/src/mongo/db/update_index_data_test.cpp
@@ -33,98 +33,97 @@
namespace mongo {
- using std::string;
-
- TEST( UpdateIndexDataTest, Simple1 ) {
- UpdateIndexData a;
- a.addPath( "a.b" );
- ASSERT_TRUE( a.mightBeIndexed( "a.b" ) );
- ASSERT_TRUE( a.mightBeIndexed( "a" ) );
- ASSERT_TRUE( a.mightBeIndexed( "a.b.c" ) );
- ASSERT_TRUE( a.mightBeIndexed( "a.$.b" ) );
-
- ASSERT_FALSE( a.mightBeIndexed( "b" ) );
- ASSERT_FALSE( a.mightBeIndexed( "a.c" ) );
-
- a.clear();
- ASSERT_FALSE( a.mightBeIndexed( "a.b" ) );
- }
-
- TEST( UpdateIndexDataTest, Simple2 ) {
- UpdateIndexData a;
- a.addPath( "ab" );
- ASSERT_FALSE( a.mightBeIndexed( "a" ) );
- a.clear();
- ASSERT_FALSE( a.mightBeIndexed( "ab" ) );
- }
-
- TEST( UpdateIndexDataTest, Component1 ) {
- UpdateIndexData a;
- a.addPathComponent( "a" );
- ASSERT_FALSE( a.mightBeIndexed( "" ) );
- ASSERT_TRUE( a.mightBeIndexed( "a" ) );
- ASSERT_TRUE( a.mightBeIndexed( "b.a" ) );
- ASSERT_TRUE( a.mightBeIndexed( "a.b" ) );
- ASSERT_TRUE( a.mightBeIndexed( "b.a.c" ) );
- ASSERT_FALSE( a.mightBeIndexed( "b.c" ) );
- ASSERT_FALSE( a.mightBeIndexed( "ab" ) );
- a.clear();
- ASSERT_FALSE( a.mightBeIndexed( "a" ) );
- }
-
- TEST( UpdateIndexDataTest, AllPathsIndexed1 ) {
- UpdateIndexData a;
- a.allPathsIndexed();
- ASSERT_TRUE( a.mightBeIndexed( "a" ) );
- a.clear();
- ASSERT_FALSE( a.mightBeIndexed( "a" ) );
- }
-
- TEST( UpdateIndexDataTest, AllPathsIndexed2 ) {
- UpdateIndexData a;
- a.allPathsIndexed();
- ASSERT_TRUE( a.mightBeIndexed( "a" ) );
- ASSERT_TRUE( a.mightBeIndexed( "" ) );
- a.addPathComponent( "a" );
- ASSERT_TRUE( a.mightBeIndexed( "a" ) );
- ASSERT_TRUE( a.mightBeIndexed( "b" ) );
- a.clear();
- ASSERT_FALSE( a.mightBeIndexed( "a" ) );
- }
-
- TEST( UpdateIndexDataTest, getCanonicalIndexField1 ) {
- string x;
-
- ASSERT_FALSE( getCanonicalIndexField( "a", &x ) );
- ASSERT_FALSE( getCanonicalIndexField( "aaa", &x ) );
- ASSERT_FALSE( getCanonicalIndexField( "a.b", &x ) );
-
- ASSERT_TRUE( getCanonicalIndexField( "a.$", &x ) );
- ASSERT_EQUALS( x, "a" );
- ASSERT_TRUE( getCanonicalIndexField( "a.0", &x ) );
- ASSERT_EQUALS( x, "a" );
- ASSERT_TRUE( getCanonicalIndexField( "a.123", &x ) );
- ASSERT_EQUALS( x, "a" );
-
- ASSERT_TRUE( getCanonicalIndexField( "a.$.b", &x ) );
- ASSERT_EQUALS( x, "a.b" );
- ASSERT_TRUE( getCanonicalIndexField( "a.0.b", &x ) );
- ASSERT_EQUALS( x, "a.b" );
- ASSERT_TRUE( getCanonicalIndexField( "a.123.b", &x ) );
- ASSERT_EQUALS( x, "a.b" );
-
- ASSERT_FALSE( getCanonicalIndexField( "a.$ref", &x ) );
- ASSERT_FALSE( getCanonicalIndexField( "a.$ref.b", &x ) );
-
-
- ASSERT_FALSE( getCanonicalIndexField( "a.c$d.b", &x ) );
-
- ASSERT_FALSE( getCanonicalIndexField( "a.123a", &x ) );
- ASSERT_FALSE( getCanonicalIndexField( "a.a123", &x ) );
- ASSERT_FALSE( getCanonicalIndexField( "a.123a.b", &x ) );
- ASSERT_FALSE( getCanonicalIndexField( "a.a123.b", &x ) );
-
- ASSERT_FALSE( getCanonicalIndexField( "a.", &x ) );
- }
+using std::string;
+TEST(UpdateIndexDataTest, Simple1) {
+ UpdateIndexData a;
+ a.addPath("a.b");
+ ASSERT_TRUE(a.mightBeIndexed("a.b"));
+ ASSERT_TRUE(a.mightBeIndexed("a"));
+ ASSERT_TRUE(a.mightBeIndexed("a.b.c"));
+ ASSERT_TRUE(a.mightBeIndexed("a.$.b"));
+
+ ASSERT_FALSE(a.mightBeIndexed("b"));
+ ASSERT_FALSE(a.mightBeIndexed("a.c"));
+
+ a.clear();
+ ASSERT_FALSE(a.mightBeIndexed("a.b"));
+}
+
+TEST(UpdateIndexDataTest, Simple2) {
+ UpdateIndexData a;
+ a.addPath("ab");
+ ASSERT_FALSE(a.mightBeIndexed("a"));
+ a.clear();
+ ASSERT_FALSE(a.mightBeIndexed("ab"));
+}
+
+TEST(UpdateIndexDataTest, Component1) {
+ UpdateIndexData a;
+ a.addPathComponent("a");
+ ASSERT_FALSE(a.mightBeIndexed(""));
+ ASSERT_TRUE(a.mightBeIndexed("a"));
+ ASSERT_TRUE(a.mightBeIndexed("b.a"));
+ ASSERT_TRUE(a.mightBeIndexed("a.b"));
+ ASSERT_TRUE(a.mightBeIndexed("b.a.c"));
+ ASSERT_FALSE(a.mightBeIndexed("b.c"));
+ ASSERT_FALSE(a.mightBeIndexed("ab"));
+ a.clear();
+ ASSERT_FALSE(a.mightBeIndexed("a"));
+}
+
+TEST(UpdateIndexDataTest, AllPathsIndexed1) {
+ UpdateIndexData a;
+ a.allPathsIndexed();
+ ASSERT_TRUE(a.mightBeIndexed("a"));
+ a.clear();
+ ASSERT_FALSE(a.mightBeIndexed("a"));
+}
+
+TEST(UpdateIndexDataTest, AllPathsIndexed2) {
+ UpdateIndexData a;
+ a.allPathsIndexed();
+ ASSERT_TRUE(a.mightBeIndexed("a"));
+ ASSERT_TRUE(a.mightBeIndexed(""));
+ a.addPathComponent("a");
+ ASSERT_TRUE(a.mightBeIndexed("a"));
+ ASSERT_TRUE(a.mightBeIndexed("b"));
+ a.clear();
+ ASSERT_FALSE(a.mightBeIndexed("a"));
+}
+
+TEST(UpdateIndexDataTest, getCanonicalIndexField1) {
+ string x;
+
+ ASSERT_FALSE(getCanonicalIndexField("a", &x));
+ ASSERT_FALSE(getCanonicalIndexField("aaa", &x));
+ ASSERT_FALSE(getCanonicalIndexField("a.b", &x));
+
+ ASSERT_TRUE(getCanonicalIndexField("a.$", &x));
+ ASSERT_EQUALS(x, "a");
+ ASSERT_TRUE(getCanonicalIndexField("a.0", &x));
+ ASSERT_EQUALS(x, "a");
+ ASSERT_TRUE(getCanonicalIndexField("a.123", &x));
+ ASSERT_EQUALS(x, "a");
+
+ ASSERT_TRUE(getCanonicalIndexField("a.$.b", &x));
+ ASSERT_EQUALS(x, "a.b");
+ ASSERT_TRUE(getCanonicalIndexField("a.0.b", &x));
+ ASSERT_EQUALS(x, "a.b");
+ ASSERT_TRUE(getCanonicalIndexField("a.123.b", &x));
+ ASSERT_EQUALS(x, "a.b");
+
+ ASSERT_FALSE(getCanonicalIndexField("a.$ref", &x));
+ ASSERT_FALSE(getCanonicalIndexField("a.$ref.b", &x));
+
+
+ ASSERT_FALSE(getCanonicalIndexField("a.c$d.b", &x));
+
+ ASSERT_FALSE(getCanonicalIndexField("a.123a", &x));
+ ASSERT_FALSE(getCanonicalIndexField("a.a123", &x));
+ ASSERT_FALSE(getCanonicalIndexField("a.123a.b", &x));
+ ASSERT_FALSE(getCanonicalIndexField("a.a123.b", &x));
+
+ ASSERT_FALSE(getCanonicalIndexField("a.", &x));
+}
}
diff --git a/src/mongo/db/wire_version.h b/src/mongo/db/wire_version.h
index f27f0962655..d79b6837a18 100644
--- a/src/mongo/db/wire_version.h
+++ b/src/mongo/db/wire_version.h
@@ -28,41 +28,41 @@
namespace mongo {
- /**
- * The 'WireVersion' captures all "protocol events" the write protocol went through. A
- * protocol event is a change in the syntax of messages on the wire or the semantics of
- * existing messages. We may also add "logical" entries for releases, although that's not
- * mandatory.
- *
- * We use the wire version to determine if two agents (a driver, a mongos, or a mongod) can
- * interact. Each agent carries two versions, a 'max' and a 'min' one. If the two agents
- * are on the same 'max' number, they stricly speak the same wire protocol and it is safe
- * to allow them to communicate. If two agents' ranges do not intersect, they should not be
- * allowed to communicate.
- *
- * If two agents have at least one version in common they can communicate, but one of the
- * sides has to be ready to compensate for not being on its partner version.
- */
- enum WireVersion {
- // Everything before we started tracking.
- RELEASE_2_4_AND_BEFORE = 0,
+/**
+ * The 'WireVersion' captures all "protocol events" the write protocol went through. A
+ * protocol event is a change in the syntax of messages on the wire or the semantics of
+ * existing messages. We may also add "logical" entries for releases, although that's not
+ * mandatory.
+ *
+ * We use the wire version to determine if two agents (a driver, a mongos, or a mongod) can
+ * interact. Each agent carries two versions, a 'max' and a 'min' one. If the two agents
+ * are on the same 'max' number, they stricly speak the same wire protocol and it is safe
+ * to allow them to communicate. If two agents' ranges do not intersect, they should not be
+ * allowed to communicate.
+ *
+ * If two agents have at least one version in common they can communicate, but one of the
+ * sides has to be ready to compensate for not being on its partner version.
+ */
+enum WireVersion {
+ // Everything before we started tracking.
+ RELEASE_2_4_AND_BEFORE = 0,
- // The aggregation command may now be requested to return cursors.
- AGG_RETURNS_CURSORS = 1,
+ // The aggregation command may now be requested to return cursors.
+ AGG_RETURNS_CURSORS = 1,
- // insert, update, and delete batch command
- BATCH_COMMANDS = 2,
+ // insert, update, and delete batch command
+ BATCH_COMMANDS = 2,
- // support SCRAM-SHA1, listIndexes, listCollections, new explain
- RELEASE_2_7_7 = 3
- };
+ // support SCRAM-SHA1, listIndexes, listCollections, new explain
+ RELEASE_2_7_7 = 3
+};
- // Latest version that the server accepts. This should always be at the latest entry in
- // WireVersion.
- static const int maxWireVersion = RELEASE_2_7_7;
+// Latest version that the server accepts. This should always be at the latest entry in
+// WireVersion.
+static const int maxWireVersion = RELEASE_2_7_7;
- // Minimum version that the server accepts. We should bump this whenever we don't want
- // to allow communication with too old agents.
- static const int minWireVersion = RELEASE_2_4_AND_BEFORE;
+// Minimum version that the server accepts. We should bump this whenever we don't want
+// to allow communication with too old agents.
+static const int minWireVersion = RELEASE_2_4_AND_BEFORE;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index e1426b20587..cbac6129001 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -44,193 +44,181 @@
namespace mongo {
- using std::string;
- using repl::OpTime;
+using std::string;
+using repl::OpTime;
- static TimerStats gleWtimeStats;
- static ServerStatusMetricField<TimerStats> displayGleLatency("getLastError.wtime",
- &gleWtimeStats );
+static TimerStats gleWtimeStats;
+static ServerStatusMetricField<TimerStats> displayGleLatency("getLastError.wtime", &gleWtimeStats);
- static Counter64 gleWtimeouts;
- static ServerStatusMetricField<Counter64> gleWtimeoutsDisplay("getLastError.wtimeouts",
- &gleWtimeouts );
+static Counter64 gleWtimeouts;
+static ServerStatusMetricField<Counter64> gleWtimeoutsDisplay("getLastError.wtimeouts",
+ &gleWtimeouts);
- void setupSynchronousCommit(OperationContext* txn) {
- const WriteConcernOptions& writeConcern = txn->getWriteConcern();
+void setupSynchronousCommit(OperationContext* txn) {
+ const WriteConcernOptions& writeConcern = txn->getWriteConcern();
- if ( writeConcern.syncMode == WriteConcernOptions::JOURNAL ||
- writeConcern.syncMode == WriteConcernOptions::FSYNC ) {
- txn->recoveryUnit()->goingToWaitUntilDurable();
- }
+ if (writeConcern.syncMode == WriteConcernOptions::JOURNAL ||
+ writeConcern.syncMode == WriteConcernOptions::FSYNC) {
+ txn->recoveryUnit()->goingToWaitUntilDurable();
}
-
- namespace {
- // The consensus protocol requires that w: majority implies j: true on all nodes.
- void addJournalSyncForWMajority(WriteConcernOptions* writeConcern) {
- if (repl::getGlobalReplicationCoordinator()->isV1ElectionProtocol()
- && writeConcern->wMode == WriteConcernOptions::kMajority
- && writeConcern->syncMode == WriteConcernOptions::NONE)
- {
- writeConcern->syncMode = WriteConcernOptions::JOURNAL;
- }
- }
- } // namespace
-
- StatusWith<WriteConcernOptions> extractWriteConcern(const BSONObj& cmdObj) {
- // The default write concern if empty is w : 1
- // Specifying w : 0 is/was allowed, but is interpreted identically to w : 1
- WriteConcernOptions writeConcern = repl::getGlobalReplicationCoordinator()
- ->getGetLastErrorDefault();
- if (writeConcern.wNumNodes == 0 && writeConcern.wMode.empty()) {
- writeConcern.wNumNodes = 1;
- }
- // Upgrade default write concern if necessary.
- addJournalSyncForWMajority(&writeConcern);
-
- BSONElement writeConcernElement;
- Status wcStatus = bsonExtractTypedField(cmdObj,
- "writeConcern",
- Object,
- &writeConcernElement);
- if (!wcStatus.isOK()) {
- if (wcStatus == ErrorCodes::NoSuchKey) {
- // Return default write concern if no write concern is given.
- return writeConcern;
- }
- return wcStatus;
- }
-
- BSONObj writeConcernObj = writeConcernElement.Obj();
- // Empty write concern is interpreted to default.
- if (writeConcernObj.isEmpty()) {
+}
+
+namespace {
+// The consensus protocol requires that w: majority implies j: true on all nodes.
+void addJournalSyncForWMajority(WriteConcernOptions* writeConcern) {
+ if (repl::getGlobalReplicationCoordinator()->isV1ElectionProtocol() &&
+ writeConcern->wMode == WriteConcernOptions::kMajority &&
+ writeConcern->syncMode == WriteConcernOptions::NONE) {
+ writeConcern->syncMode = WriteConcernOptions::JOURNAL;
+ }
+}
+} // namespace
+
+StatusWith<WriteConcernOptions> extractWriteConcern(const BSONObj& cmdObj) {
+ // The default write concern if empty is w : 1
+ // Specifying w : 0 is/was allowed, but is interpreted identically to w : 1
+ WriteConcernOptions writeConcern =
+ repl::getGlobalReplicationCoordinator()->getGetLastErrorDefault();
+ if (writeConcern.wNumNodes == 0 && writeConcern.wMode.empty()) {
+ writeConcern.wNumNodes = 1;
+ }
+ // Upgrade default write concern if necessary.
+ addJournalSyncForWMajority(&writeConcern);
+
+ BSONElement writeConcernElement;
+ Status wcStatus = bsonExtractTypedField(cmdObj, "writeConcern", Object, &writeConcernElement);
+ if (!wcStatus.isOK()) {
+ if (wcStatus == ErrorCodes::NoSuchKey) {
+ // Return default write concern if no write concern is given.
return writeConcern;
}
+ return wcStatus;
+ }
- wcStatus = writeConcern.parse(writeConcernObj);
- if (!wcStatus.isOK()) {
- return wcStatus;
- }
-
- wcStatus = validateWriteConcern(writeConcern);
- if (!wcStatus.isOK()) {
- return wcStatus;
- }
+ BSONObj writeConcernObj = writeConcernElement.Obj();
+ // Empty write concern is interpreted to default.
+ if (writeConcernObj.isEmpty()) {
+ return writeConcern;
+ }
- // Upgrade parsed write concern if necessary.
- addJournalSyncForWMajority(&writeConcern);
+ wcStatus = writeConcern.parse(writeConcernObj);
+ if (!wcStatus.isOK()) {
+ return wcStatus;
+ }
- return writeConcern;
+ wcStatus = validateWriteConcern(writeConcern);
+ if (!wcStatus.isOK()) {
+ return wcStatus;
}
- Status validateWriteConcern( const WriteConcernOptions& writeConcern ) {
- const bool isJournalEnabled = getGlobalServiceContext()->getGlobalStorageEngine()->isDurable();
+ // Upgrade parsed write concern if necessary.
+ addJournalSyncForWMajority(&writeConcern);
- if ( writeConcern.syncMode == WriteConcernOptions::JOURNAL && !isJournalEnabled ) {
- return Status( ErrorCodes::BadValue,
- "cannot use 'j' option when a host does not have journaling enabled" );
- }
+ return writeConcern;
+}
- const bool isConfigServer = serverGlobalParams.configsvr;
- const repl::ReplicationCoordinator::Mode replMode =
- repl::getGlobalReplicationCoordinator()->getReplicationMode();
+Status validateWriteConcern(const WriteConcernOptions& writeConcern) {
+ const bool isJournalEnabled = getGlobalServiceContext()->getGlobalStorageEngine()->isDurable();
- if ( isConfigServer || replMode == repl::ReplicationCoordinator::modeNone ) {
+ if (writeConcern.syncMode == WriteConcernOptions::JOURNAL && !isJournalEnabled) {
+ return Status(ErrorCodes::BadValue,
+ "cannot use 'j' option when a host does not have journaling enabled");
+ }
- // Note that config servers can be replicated (have an oplog), but we still don't allow
- // w > 1
+ const bool isConfigServer = serverGlobalParams.configsvr;
+ const repl::ReplicationCoordinator::Mode replMode =
+ repl::getGlobalReplicationCoordinator()->getReplicationMode();
- if ( writeConcern.wNumNodes > 1 ) {
- return Status( ErrorCodes::BadValue,
- string( "cannot use 'w' > 1 " ) +
- ( isConfigServer ? "on a config server host" :
- "when a host is not replicated" ) );
- }
- }
+ if (isConfigServer || replMode == repl::ReplicationCoordinator::modeNone) {
+ // Note that config servers can be replicated (have an oplog), but we still don't allow
+ // w > 1
- if ( replMode != repl::ReplicationCoordinator::modeReplSet &&
- !writeConcern.wMode.empty() &&
- writeConcern.wMode != WriteConcernOptions::kMajority ) {
- return Status( ErrorCodes::BadValue,
- string( "cannot use non-majority 'w' mode " ) + writeConcern.wMode
- + " when a host is not a member of a replica set" );
+ if (writeConcern.wNumNodes > 1) {
+ return Status(ErrorCodes::BadValue,
+ string("cannot use 'w' > 1 ") + (isConfigServer
+ ? "on a config server host"
+ : "when a host is not replicated"));
}
+ }
- return Status::OK();
+ if (replMode != repl::ReplicationCoordinator::modeReplSet && !writeConcern.wMode.empty() &&
+ writeConcern.wMode != WriteConcernOptions::kMajority) {
+ return Status(ErrorCodes::BadValue,
+ string("cannot use non-majority 'w' mode ") + writeConcern.wMode +
+ " when a host is not a member of a replica set");
}
- void WriteConcernResult::appendTo( const WriteConcernOptions& writeConcern,
- BSONObjBuilder* result ) const {
+ return Status::OK();
+}
- if ( syncMillis >= 0 )
- result->appendNumber( "syncMillis", syncMillis );
+void WriteConcernResult::appendTo(const WriteConcernOptions& writeConcern,
+ BSONObjBuilder* result) const {
+ if (syncMillis >= 0)
+ result->appendNumber("syncMillis", syncMillis);
- if ( fsyncFiles >= 0 )
- result->appendNumber( "fsyncFiles", fsyncFiles );
+ if (fsyncFiles >= 0)
+ result->appendNumber("fsyncFiles", fsyncFiles);
- if ( wTime >= 0 ) {
- if ( wTimedOut )
- result->appendNumber( "waited", wTime );
- else
- result->appendNumber( "wtime", wTime );
- }
+ if (wTime >= 0) {
+ if (wTimedOut)
+ result->appendNumber("waited", wTime);
+ else
+ result->appendNumber("wtime", wTime);
+ }
- if ( wTimedOut )
- result->appendBool( "wtimeout", true );
+ if (wTimedOut)
+ result->appendBool("wtimeout", true);
- if (writtenTo.size()) {
- BSONArrayBuilder hosts(result->subarrayStart("writtenTo"));
- for (size_t i = 0; i < writtenTo.size(); ++i) {
- hosts.append(writtenTo[i].toString());
- }
- }
- else {
- result->appendNull( "writtenTo" );
+ if (writtenTo.size()) {
+ BSONArrayBuilder hosts(result->subarrayStart("writtenTo"));
+ for (size_t i = 0; i < writtenTo.size(); ++i) {
+ hosts.append(writtenTo[i].toString());
}
+ } else {
+ result->appendNull("writtenTo");
+ }
- if ( err.empty() )
- result->appendNull( "err" );
- else
- result->append( "err", err );
-
- // *** 2.4 SyncClusterConnection compatibility ***
- // 2.4 expects either fsync'd files, or a "waited" field exist after running an fsync : true
- // GLE, but with journaling we don't actually need to run the fsync (fsync command is
- // preferred in 2.6). So we add a "waited" field if one doesn't exist.
-
- if ( writeConcern.syncMode == WriteConcernOptions::FSYNC ) {
+ if (err.empty())
+ result->appendNull("err");
+ else
+ result->append("err", err);
- if ( fsyncFiles < 0 && ( wTime < 0 || !wTimedOut ) ) {
- dassert( result->asTempObj()["waited"].eoo() );
- result->appendNumber( "waited", syncMillis );
- }
+ // *** 2.4 SyncClusterConnection compatibility ***
+ // 2.4 expects either fsync'd files, or a "waited" field exist after running an fsync : true
+ // GLE, but with journaling we don't actually need to run the fsync (fsync command is
+ // preferred in 2.6). So we add a "waited" field if one doesn't exist.
- dassert( result->asTempObj()["fsyncFiles"].numberInt() > 0 ||
- !result->asTempObj()["waited"].eoo() );
+ if (writeConcern.syncMode == WriteConcernOptions::FSYNC) {
+ if (fsyncFiles < 0 && (wTime < 0 || !wTimedOut)) {
+ dassert(result->asTempObj()["waited"].eoo());
+ result->appendNumber("waited", syncMillis);
}
- }
- Status waitForWriteConcern( OperationContext* txn,
- const OpTime& replOpTime,
- WriteConcernResult* result ) {
+ dassert(result->asTempObj()["fsyncFiles"].numberInt() > 0 ||
+ !result->asTempObj()["waited"].eoo());
+ }
+}
- const WriteConcernOptions& writeConcern = txn->getWriteConcern();
+Status waitForWriteConcern(OperationContext* txn,
+ const OpTime& replOpTime,
+ WriteConcernResult* result) {
+ const WriteConcernOptions& writeConcern = txn->getWriteConcern();
- // We assume all options have been validated earlier, if not, programming error
- dassert( validateWriteConcern( writeConcern ).isOK() );
+ // We assume all options have been validated earlier, if not, programming error
+ dassert(validateWriteConcern(writeConcern).isOK());
- // Next handle blocking on disk
+ // Next handle blocking on disk
- Timer syncTimer;
+ Timer syncTimer;
- switch( writeConcern.syncMode ) {
+ switch (writeConcern.syncMode) {
case WriteConcernOptions::NONE:
break;
case WriteConcernOptions::FSYNC: {
StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- if ( !storageEngine->isDurable() ) {
- result->fsyncFiles = storageEngine->flushAllFiles( true );
- }
- else {
+ if (!storageEngine->isDurable()) {
+ result->fsyncFiles = storageEngine->flushAllFiles(true);
+ } else {
// We only need to commit the journal if we're durable
txn->recoveryUnit()->waitUntilDurable();
}
@@ -239,40 +227,38 @@ namespace mongo {
case WriteConcernOptions::JOURNAL:
txn->recoveryUnit()->waitUntilDurable();
break;
- }
-
- result->syncMillis = syncTimer.millis();
+ }
- // Now wait for replication
+ result->syncMillis = syncTimer.millis();
- if (replOpTime.isNull()) {
- // no write happened for this client yet
- return Status::OK();
- }
+ // Now wait for replication
- // needed to avoid incrementing gleWtimeStats SERVER-9005
- if (writeConcern.wNumNodes <= 1 && writeConcern.wMode.empty()) {
- // no desired replication check
- return Status::OK();
- }
+ if (replOpTime.isNull()) {
+ // no write happened for this client yet
+ return Status::OK();
+ }
- // Now we wait for replication
- // Note that replica set stepdowns and gle mode changes are thrown as errors
- repl::ReplicationCoordinator::StatusAndDuration replStatus =
- repl::getGlobalReplicationCoordinator()->awaitReplication(txn,
- replOpTime,
- writeConcern);
- if (replStatus.status == ErrorCodes::WriteConcernFailed) {
- gleWtimeouts.increment();
- result->err = "timeout";
- result->wTimedOut = true;
- }
- // Add stats
- result->writtenTo = repl::getGlobalReplicationCoordinator()->getHostsWrittenTo(replOpTime);
- gleWtimeStats.recordMillis(replStatus.duration.count());
- result->wTime = replStatus.duration.count();
+ // needed to avoid incrementing gleWtimeStats SERVER-9005
+ if (writeConcern.wNumNodes <= 1 && writeConcern.wMode.empty()) {
+ // no desired replication check
+ return Status::OK();
+ }
- return replStatus.status;
+ // Now we wait for replication
+ // Note that replica set stepdowns and gle mode changes are thrown as errors
+ repl::ReplicationCoordinator::StatusAndDuration replStatus =
+ repl::getGlobalReplicationCoordinator()->awaitReplication(txn, replOpTime, writeConcern);
+ if (replStatus.status == ErrorCodes::WriteConcernFailed) {
+ gleWtimeouts.increment();
+ result->err = "timeout";
+ result->wTimedOut = true;
}
+ // Add stats
+ result->writtenTo = repl::getGlobalReplicationCoordinator()->getHostsWrittenTo(replOpTime);
+ gleWtimeStats.recordMillis(replStatus.duration.count());
+ result->wTime = replStatus.duration.count();
+
+ return replStatus.status;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/write_concern.h b/src/mongo/db/write_concern.h
index 0fa735aac4a..31e2bc6c588 100644
--- a/src/mongo/db/write_concern.h
+++ b/src/mongo/db/write_concern.h
@@ -33,73 +33,74 @@
namespace mongo {
- class OperationContext;
- template <typename T> class StatusWith;
+class OperationContext;
+template <typename T>
+class StatusWith;
- namespace repl {
- class OpTime;
+namespace repl {
+class OpTime;
+}
+
+/**
+ * If txn->getWriteConcern() indicates a durable commit level,
+ * marks the RecoveryUnit associated with "txn" appropriately.
+ * Provides a hint to the storage engine that
+ * particular operations will be waiting for their changes to become durable.
+ */
+void setupSynchronousCommit(OperationContext* txn);
+
+/**
+ * Attempts to extract a writeConcern from cmdObj.
+ * Verifies that the writeConcern is of type Object (BSON type) and
+ * that the resulting writeConcern is valid for this particular host.
+ */
+StatusWith<WriteConcernOptions> extractWriteConcern(const BSONObj& cmdObj);
+
+/**
+ * Verifies that a WriteConcern is valid for this particular host.
+ */
+Status validateWriteConcern(const WriteConcernOptions& writeConcern);
+
+struct WriteConcernResult {
+ WriteConcernResult() {
+ reset();
+ }
+
+ void reset() {
+ syncMillis = -1;
+ fsyncFiles = -1;
+ wTimedOut = false;
+ wTime = -1;
+ err = "";
}
- /**
- * If txn->getWriteConcern() indicates a durable commit level,
- * marks the RecoveryUnit associated with "txn" appropriately.
- * Provides a hint to the storage engine that
- * particular operations will be waiting for their changes to become durable.
- */
- void setupSynchronousCommit(OperationContext* txn);
-
- /**
- * Attempts to extract a writeConcern from cmdObj.
- * Verifies that the writeConcern is of type Object (BSON type) and
- * that the resulting writeConcern is valid for this particular host.
- */
- StatusWith<WriteConcernOptions> extractWriteConcern(const BSONObj& cmdObj);
-
- /**
- * Verifies that a WriteConcern is valid for this particular host.
- */
- Status validateWriteConcern( const WriteConcernOptions& writeConcern );
-
- struct WriteConcernResult {
- WriteConcernResult() {
- reset();
- }
-
- void reset() {
- syncMillis = -1;
- fsyncFiles = -1;
- wTimedOut = false;
- wTime = -1;
- err = "";
- }
-
- void appendTo( const WriteConcernOptions& writeConcern, BSONObjBuilder* result ) const;
-
- int syncMillis;
- int fsyncFiles;
-
- bool wTimedOut;
- int wTime;
- std::vector<HostAndPort> writtenTo;
-
- std::string err; // this is the old err field, should deprecate
- };
-
- /**
- * Blocks until the database is sure the specified user write concern has been fulfilled, or
- * returns an error status if the write concern fails. Does no validation of the input write
- * concern, it is an error to pass this function an invalid write concern for the host.
- *
- * Takes a user write concern as well as the replication opTime the write concern applies to -
- * if this opTime.isNull() no replication-related write concern options will be enforced.
- *
- * Returns result of the write concern if successful.
- * Returns NotMaster if the host steps down while waiting for replication
- * Returns UnknownReplWriteConcern if the wMode specified was not enforceable
- */
- Status waitForWriteConcern( OperationContext* txn,
- const repl::OpTime& replOpTime,
- WriteConcernResult* result );
-
-
-} // namespace mongo
+ void appendTo(const WriteConcernOptions& writeConcern, BSONObjBuilder* result) const;
+
+ int syncMillis;
+ int fsyncFiles;
+
+ bool wTimedOut;
+ int wTime;
+ std::vector<HostAndPort> writtenTo;
+
+ std::string err; // this is the old err field, should deprecate
+};
+
+/**
+ * Blocks until the database is sure the specified user write concern has been fulfilled, or
+ * returns an error status if the write concern fails. Does no validation of the input write
+ * concern, it is an error to pass this function an invalid write concern for the host.
+ *
+ * Takes a user write concern as well as the replication opTime the write concern applies to -
+ * if this opTime.isNull() no replication-related write concern options will be enforced.
+ *
+ * Returns result of the write concern if successful.
+ * Returns NotMaster if the host steps down while waiting for replication
+ * Returns UnknownReplWriteConcern if the wMode specified was not enforceable
+ */
+Status waitForWriteConcern(OperationContext* txn,
+ const repl::OpTime& replOpTime,
+ WriteConcernResult* result);
+
+
+} // namespace mongo
diff --git a/src/mongo/db/write_concern_options.cpp b/src/mongo/db/write_concern_options.cpp
index 8f0c8e9c25a..5fba6321b5e 100644
--- a/src/mongo/db/write_concern_options.cpp
+++ b/src/mongo/db/write_concern_options.cpp
@@ -32,180 +32,151 @@
namespace mongo {
- using std::string;
+using std::string;
namespace {
- /**
- * Controls how much a client cares about writes and serves as initializer for the pre-defined
- * write concern options.
- *
- * Default is NORMAL.
- */
- enum WriteConcern {
- W_NONE = 0 ,
- W_NORMAL = 1
- };
-
- const BSONField<bool> mongosSecondaryThrottleField("_secondaryThrottle", true);
- const BSONField<bool> secondaryThrottleField("secondaryThrottle", true);
- const BSONField<BSONObj> writeConcernField("writeConcern");
-
-} // namespace
-
- const char WriteConcernOptions::kMajority[] = "majority";
-
- const BSONObj WriteConcernOptions::Default = BSONObj();
- const BSONObj WriteConcernOptions::Acknowledged(BSON("w" << W_NORMAL));
- const BSONObj WriteConcernOptions::Unacknowledged(BSON("w" << W_NONE));
- const BSONObj WriteConcernOptions::Majority(BSON("w" << WriteConcernOptions::kMajority));
-
-
- WriteConcernOptions::WriteConcernOptions(int numNodes,
- SyncMode sync,
- int timeout):
- syncMode(sync),
- wNumNodes(numNodes),
- wTimeout(timeout) {
- }
+/**
+ * Controls how much a client cares about writes and serves as initializer for the pre-defined
+ * write concern options.
+ *
+ * Default is NORMAL.
+ */
+enum WriteConcern { W_NONE = 0, W_NORMAL = 1 };
- WriteConcernOptions::WriteConcernOptions(const std::string& mode,
- SyncMode sync,
- int timeout):
- syncMode(sync),
- wNumNodes(0),
- wMode(mode),
- wTimeout(timeout) {
- }
+const BSONField<bool> mongosSecondaryThrottleField("_secondaryThrottle", true);
+const BSONField<bool> secondaryThrottleField("secondaryThrottle", true);
+const BSONField<BSONObj> writeConcernField("writeConcern");
- Status WriteConcernOptions::parse( const BSONObj& obj ) {
- if ( obj.isEmpty() ) {
- return Status( ErrorCodes::FailedToParse, "write concern object cannot be empty" );
- }
+} // namespace
- BSONElement jEl = obj["j"];
- if ( !jEl.eoo() && !jEl.isNumber() && jEl.type() != Bool ) {
- return Status( ErrorCodes::FailedToParse, "j must be numeric or a boolean value" );
- }
+const char WriteConcernOptions::kMajority[] = "majority";
- const bool j = jEl.trueValue();
+const BSONObj WriteConcernOptions::Default = BSONObj();
+const BSONObj WriteConcernOptions::Acknowledged(BSON("w" << W_NORMAL));
+const BSONObj WriteConcernOptions::Unacknowledged(BSON("w" << W_NONE));
+const BSONObj WriteConcernOptions::Majority(BSON("w" << WriteConcernOptions::kMajority));
- BSONElement fsyncEl = obj["fsync"];
- if ( !fsyncEl.eoo() && !fsyncEl.isNumber() && fsyncEl.type() != Bool ) {
- return Status( ErrorCodes::FailedToParse, "fsync must be numeric or a boolean value" );
- }
- const bool fsync = fsyncEl.trueValue();
+WriteConcernOptions::WriteConcernOptions(int numNodes, SyncMode sync, int timeout)
+ : syncMode(sync), wNumNodes(numNodes), wTimeout(timeout) {}
- if ( j && fsync )
- return Status( ErrorCodes::FailedToParse,
- "fsync and j options cannot be used together" );
+WriteConcernOptions::WriteConcernOptions(const std::string& mode, SyncMode sync, int timeout)
+ : syncMode(sync), wNumNodes(0), wMode(mode), wTimeout(timeout) {}
- if ( j ) {
- syncMode = JOURNAL;
- }
- if ( fsync ) {
- syncMode = FSYNC;
- }
+Status WriteConcernOptions::parse(const BSONObj& obj) {
+ if (obj.isEmpty()) {
+ return Status(ErrorCodes::FailedToParse, "write concern object cannot be empty");
+ }
- BSONElement e = obj["w"];
- if ( e.isNumber() ) {
- wNumNodes = e.numberInt();
- }
- else if ( e.type() == String ) {
- wMode = e.valuestrsafe();
- }
- else if ( e.eoo() ||
- e.type() == jstNULL ||
- e.type() == Undefined ) {
- wNumNodes = 1;
- }
- else {
- return Status( ErrorCodes::FailedToParse, "w has to be a number or a string" );
- }
+ BSONElement jEl = obj["j"];
+ if (!jEl.eoo() && !jEl.isNumber() && jEl.type() != Bool) {
+ return Status(ErrorCodes::FailedToParse, "j must be numeric or a boolean value");
+ }
- wTimeout = obj["wtimeout"].numberInt();
+ const bool j = jEl.trueValue();
- return Status::OK();
+ BSONElement fsyncEl = obj["fsync"];
+ if (!fsyncEl.eoo() && !fsyncEl.isNumber() && fsyncEl.type() != Bool) {
+ return Status(ErrorCodes::FailedToParse, "fsync must be numeric or a boolean value");
}
- Status WriteConcernOptions::parseSecondaryThrottle(const BSONObj& doc,
- BSONObj* rawWriteConcernObj) {
- string errMsg;
- bool isSecondaryThrottle;
- FieldParser::FieldState fieldState = FieldParser::extract(doc,
- secondaryThrottleField,
- &isSecondaryThrottle,
- &errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) {
- return Status(ErrorCodes::FailedToParse, errMsg);
- }
+ const bool fsync = fsyncEl.trueValue();
- if (fieldState != FieldParser::FIELD_SET) {
- fieldState = FieldParser::extract(doc,
- mongosSecondaryThrottleField,
- &isSecondaryThrottle,
- &errMsg);
+ if (j && fsync)
+ return Status(ErrorCodes::FailedToParse, "fsync and j options cannot be used together");
- if (fieldState == FieldParser::FIELD_INVALID) {
- return Status(ErrorCodes::FailedToParse, errMsg);
- }
- }
+ if (j) {
+ syncMode = JOURNAL;
+ }
+ if (fsync) {
+ syncMode = FSYNC;
+ }
- BSONObj dummyBSON;
- if (!rawWriteConcernObj) {
- rawWriteConcernObj = &dummyBSON;
- }
+ BSONElement e = obj["w"];
+ if (e.isNumber()) {
+ wNumNodes = e.numberInt();
+ } else if (e.type() == String) {
+ wMode = e.valuestrsafe();
+ } else if (e.eoo() || e.type() == jstNULL || e.type() == Undefined) {
+ wNumNodes = 1;
+ } else {
+ return Status(ErrorCodes::FailedToParse, "w has to be a number or a string");
+ }
+
+ wTimeout = obj["wtimeout"].numberInt();
+
+ return Status::OK();
+}
+
+Status WriteConcernOptions::parseSecondaryThrottle(const BSONObj& doc,
+ BSONObj* rawWriteConcernObj) {
+ string errMsg;
+ bool isSecondaryThrottle;
+ FieldParser::FieldState fieldState =
+ FieldParser::extract(doc, secondaryThrottleField, &isSecondaryThrottle, &errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID) {
+ return Status(ErrorCodes::FailedToParse, errMsg);
+ }
+
+ if (fieldState != FieldParser::FIELD_SET) {
+ fieldState =
+ FieldParser::extract(doc, mongosSecondaryThrottleField, &isSecondaryThrottle, &errMsg);
- fieldState = FieldParser::extract(doc,
- writeConcernField,
- rawWriteConcernObj,
- &errMsg);
if (fieldState == FieldParser::FIELD_INVALID) {
return Status(ErrorCodes::FailedToParse, errMsg);
}
+ }
- if (!isSecondaryThrottle) {
- if (!rawWriteConcernObj->isEmpty()) {
- return Status(ErrorCodes::UnsupportedFormat,
- "Cannot have write concern when secondary throttle is false");
- }
+ BSONObj dummyBSON;
+ if (!rawWriteConcernObj) {
+ rawWriteConcernObj = &dummyBSON;
+ }
- wNumNodes = 1;
- return Status::OK();
- }
+ fieldState = FieldParser::extract(doc, writeConcernField, rawWriteConcernObj, &errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID) {
+ return Status(ErrorCodes::FailedToParse, errMsg);
+ }
- if (rawWriteConcernObj->isEmpty()) {
- return Status(ErrorCodes::WriteConcernNotDefined,
- "Secondary throttle is on, but write concern is not specified");
+ if (!isSecondaryThrottle) {
+ if (!rawWriteConcernObj->isEmpty()) {
+ return Status(ErrorCodes::UnsupportedFormat,
+ "Cannot have write concern when secondary throttle is false");
}
- return parse(*rawWriteConcernObj);
+ wNumNodes = 1;
+ return Status::OK();
}
- BSONObj WriteConcernOptions::toBSON() const {
- BSONObjBuilder builder;
-
- if (wMode.empty()) {
- builder.append("w", wNumNodes);
- }
- else {
- builder.append("w", wMode);
- }
+ if (rawWriteConcernObj->isEmpty()) {
+ return Status(ErrorCodes::WriteConcernNotDefined,
+ "Secondary throttle is on, but write concern is not specified");
+ }
- if (syncMode == FSYNC) {
- builder.append("fsync", true);
- }
- else if (syncMode == JOURNAL) {
- builder.append("j", true);
- }
+ return parse(*rawWriteConcernObj);
+}
- builder.append("wtimeout", wTimeout);
+BSONObj WriteConcernOptions::toBSON() const {
+ BSONObjBuilder builder;
- return builder.obj();
+ if (wMode.empty()) {
+ builder.append("w", wNumNodes);
+ } else {
+ builder.append("w", wMode);
}
- bool WriteConcernOptions::shouldWaitForOtherNodes() const {
- return !wMode.empty() || wNumNodes > 1;
+ if (syncMode == FSYNC) {
+ builder.append("fsync", true);
+ } else if (syncMode == JOURNAL) {
+ builder.append("j", true);
}
+
+ builder.append("wtimeout", wTimeout);
+
+ return builder.obj();
+}
+
+bool WriteConcernOptions::shouldWaitForOtherNodes() const {
+ return !wMode.empty() || wNumNodes > 1;
+}
}
diff --git a/src/mongo/db/write_concern_options.h b/src/mongo/db/write_concern_options.h
index 4c9569b1089..217e864a2cf 100644
--- a/src/mongo/db/write_concern_options.h
+++ b/src/mongo/db/write_concern_options.h
@@ -33,83 +33,77 @@
namespace mongo {
- class Status;
-
- struct WriteConcernOptions {
- public:
-
- enum SyncMode { NONE, FSYNC, JOURNAL };
-
- static const int kNoTimeout = 0;
- static const int kNoWaiting = -1;
-
- static const BSONObj Default;
- static const BSONObj Acknowledged;
- static const BSONObj Unacknowledged;
- static const BSONObj Majority;
-
- static const char kMajority[]; // = "majority"
-
- WriteConcernOptions() { reset(); }
-
- WriteConcernOptions(int numNodes,
- SyncMode sync,
- int timeout);
-
- WriteConcernOptions(const std::string& mode,
- SyncMode sync,
- int timeout);
-
- Status parse( const BSONObj& obj );
-
- /**
- * Extracts the write concern settings from the BSONObj. The BSON object should have
- * the format:
- *
- * {
- * ...
- * secondaryThrottle: <bool>, // optional
- * _secondaryThrottle: <bool>, // optional
- * writeConcern: <BSONObj> // optional
- * }
- *
- * Note: secondaryThrottle takes precedence over _secondaryThrottle.
- *
- * Also sets output parameter rawWriteConcernObj if the writeCocnern field exists.
- *
- * Returns OK if the parse was successful. Also returns ErrorCodes::WriteConcernNotDefined
- * when secondary throttle is true but write concern was not specified.
- */
- Status parseSecondaryThrottle(const BSONObj& doc,
- BSONObj* rawWriteConcernObj);
-
- /**
- * Return true if the server needs to wait for other secondary nodes to satisfy this
- * write concern setting. Errs on the false positive for non-empty wMode.
- */
- bool shouldWaitForOtherNodes() const;
-
- void reset() {
- syncMode = NONE;
- wNumNodes = 0;
- wMode = "";
- wTimeout = 0;
- }
-
- // Returns the BSON representation of this object.
- // Warning: does not return the same object passed on the last parse() call.
- BSONObj toBSON() const;
-
- SyncMode syncMode;
-
- // The w parameter for this write concern. The wMode represents the string format and
- // takes precedence over the numeric format wNumNodes.
- int wNumNodes;
- std::string wMode;
-
- // Timeout in milliseconds.
- int wTimeout;
- };
-
+class Status;
+
+struct WriteConcernOptions {
+public:
+ enum SyncMode { NONE, FSYNC, JOURNAL };
+
+ static const int kNoTimeout = 0;
+ static const int kNoWaiting = -1;
+
+ static const BSONObj Default;
+ static const BSONObj Acknowledged;
+ static const BSONObj Unacknowledged;
+ static const BSONObj Majority;
+
+ static const char kMajority[]; // = "majority"
+
+ WriteConcernOptions() {
+ reset();
+ }
+
+ WriteConcernOptions(int numNodes, SyncMode sync, int timeout);
+
+ WriteConcernOptions(const std::string& mode, SyncMode sync, int timeout);
+
+ Status parse(const BSONObj& obj);
+
+ /**
+ * Extracts the write concern settings from the BSONObj. The BSON object should have
+ * the format:
+ *
+ * {
+ * ...
+ * secondaryThrottle: <bool>, // optional
+ * _secondaryThrottle: <bool>, // optional
+ * writeConcern: <BSONObj> // optional
+ * }
+ *
+ * Note: secondaryThrottle takes precedence over _secondaryThrottle.
+ *
+ * Also sets output parameter rawWriteConcernObj if the writeCocnern field exists.
+ *
+ * Returns OK if the parse was successful. Also returns ErrorCodes::WriteConcernNotDefined
+ * when secondary throttle is true but write concern was not specified.
+ */
+ Status parseSecondaryThrottle(const BSONObj& doc, BSONObj* rawWriteConcernObj);
+
+ /**
+ * Return true if the server needs to wait for other secondary nodes to satisfy this
+ * write concern setting. Errs on the false positive for non-empty wMode.
+ */
+ bool shouldWaitForOtherNodes() const;
+
+ void reset() {
+ syncMode = NONE;
+ wNumNodes = 0;
+ wMode = "";
+ wTimeout = 0;
+ }
+
+ // Returns the BSON representation of this object.
+ // Warning: does not return the same object passed on the last parse() call.
+ BSONObj toBSON() const;
+
+ SyncMode syncMode;
+
+ // The w parameter for this write concern. The wMode represents the string format and
+ // takes precedence over the numeric format wNumNodes.
+ int wNumNodes;
+ std::string wMode;
+
+ // Timeout in milliseconds.
+ int wTimeout;
+};
}
-